diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000..0c4d2c9b20b63 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +CHANGELOG.asciidoc merge=union diff --git a/.gitignore b/.gitignore index 41a151f160cfa..8b2a7335ade9d 100644 --- a/.gitignore +++ b/.gitignore @@ -20,10 +20,8 @@ nbactions.xml .gradle/ build/ -# maven stuff (to be removed when trunk becomes 4.x) -*-execution-hints.log -target/ -dependency-reduced-pom.xml +# vscode stuff +.vscode/ # testing stuff **/.local* @@ -43,4 +41,3 @@ html_docs # random old stuff that we should look at the necessity of... /tmp/ eclipse-build - diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 69e90473a7f61..03b2674a4cc8c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -107,6 +107,8 @@ We support development in the Eclipse and IntelliJ IDEs. For Eclipse, the minimum version that we support is [Eclipse Oxygen][eclipse] (version 4.7). For IntelliJ, the minimum version that we support is [IntelliJ 2017.2][intellij]. +### Configuring IDEs And Running Tests + Eclipse users can automatically configure their IDE: `./gradlew eclipse` then `File: Import: Existing Projects into Workspace`. Select the option `Search for nested projects`. Additionally you will want to @@ -144,6 +146,9 @@ For IntelliJ, go to For Eclipse, go to `Preferences->Java->Installed JREs` and add `-ea` to `VM Arguments`. + +### Java Language Formatting Guidelines + Please follow these formatting guidelines: * Java indent is 4 spaces @@ -155,6 +160,33 @@ Please follow these formatting guidelines: * IntelliJ: `Preferences/Settings->Editor->Code Style->Java->Imports`. There are two configuration options: `Class count to use import with '*'` and `Names count to use static import with '*'`. Set their values to 99999 or some other absurdly high value. * Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so. +### License Headers + +We require license headers on all Java files. You will notice that all the Java files in +the top-level `x-pack` directory contain a separate license from the rest of the repository. This +directory contains commercial code that is associated with a separate license. It can be helpful +to have the IDE automatically insert the appropriate license header depending which part of the project +contributions are made to. + +#### IntelliJ: Copyright & Scope Profiles + +To have IntelliJ insert the correct license, it is necessary to create to copyright profiles. +These may potentially be called `apache2` and `commercial`. These can be created in +`Preferences/Settings->Editor->Copyright->Copyright Profiles`. To associate these profiles to +their respective directories, two "Scopes" will need to be created. These can be created in +`Preferences/Settings->Appearances & Behavior->Scopes`. When creating scopes, be sure to choose +the `shared` scope type. Create a scope, `apache2`, with +the associated pattern of `!file[group:x-pack]:*/`. This pattern will exclude all the files contained in +the `x-pack` directory. The other scope, `commercial`, will have the inverse pattern of `file[group:x-pack]:*/`. +The two scopes, together, should account for all the files in the project. To associate the scopes +with their copyright-profiles, go into `Preferences/Settings->Editor>Copyright` and use the `+` to add +the associations `apache2/apache2` and `commercial/commercial`. + +Configuring these options in IntelliJ can be quite buggy, so do not be alarmed if you have to open/close +the settings window and/or restart IntelliJ to see your changes take effect. + +### Creating A Distribution + To create a distribution from the source, simply run: ```sh @@ -169,6 +201,8 @@ The archive distributions (tar and zip) can be found under: `./distribution/archives/(tar|zip)/build/distributions/` +### Running The Full Test Suite + Before submitting your changes, run the test suite to make sure that nothing is broken, with: ```sh diff --git a/LICENSE.txt b/LICENSE.txt index d645695673349..e601d4382ad6d 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,202 +1,13 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Source code in this repository is variously licensed under the Apache License +Version 2.0, an Apache compatible license, or the Elastic License. Outside of +the "x-pack" folder, source code in a given file is licensed under the Apache +License Version 2.0, unless otherwise noted at the beginning of the file or a +LICENSE file present in the directory subtree declares a separate license. +Within the "x-pack" folder, source code in a given file is licensed under the +Elastic License, unless otherwise noted at the beginning of the file or a +LICENSE file present in the directory subtree declares a separate license. + +The build produces two sets of binaries - one set that falls under the Elastic +License and another set that falls under Apache License Version 2.0. The +binaries that contain `-oss` in the artifact name are licensed under the Apache +License Version 2.0. diff --git a/build.gradle b/build.gradle index 8218d49fd68ff..c538c0cb898ef 100644 --- a/build.gradle +++ b/build.gradle @@ -20,6 +20,7 @@ import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.BuildPlugin +import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionCollection import org.elasticsearch.gradle.VersionProperties @@ -30,6 +31,7 @@ import org.gradle.api.tasks.wrapper.Wrapper.DistributionType import org.gradle.util.GradleVersion import org.gradle.util.DistributionLocator +import java.nio.file.Files import java.nio.file.Path import java.security.MessageDigest @@ -40,10 +42,9 @@ subprojects { description = "Elasticsearch subproject ${project.path}" } -Path rootPath = rootDir.toPath() -// setup pom license info, but only for artifacts that are part of elasticsearch -configure(subprojects.findAll { it.projectDir.toPath().startsWith(rootPath) }) { - +subprojects { + project.ext.licenseName = 'The Apache Software License, Version 2.0' + project.ext.licenseUrl = 'http://www.apache.org/licenses/LICENSE-2.0.txt' // we only use maven publish to add tasks for pom generation plugins.withType(MavenPublishPlugin).whenPluginAdded { publishing { @@ -55,8 +56,8 @@ configure(subprojects.findAll { it.projectDir.toPath().startsWith(rootPath) }) { node.appendNode('inceptionYear', '2009') Node license = node.appendNode('licenses').appendNode('license') - license.appendNode('name', 'The Apache Software License, Version 2.0') - license.appendNode('url', 'http://www.apache.org/licenses/LICENSE-2.0.txt') + license.appendNode('name', project.licenseName) + license.appendNode('url', project.licenseUrl) license.appendNode('distribution', 'repo') Node developer = node.appendNode('developers').appendNode('developer') @@ -68,7 +69,7 @@ configure(subprojects.findAll { it.projectDir.toPath().startsWith(rootPath) }) { } } plugins.withType(BuildPlugin).whenPluginAdded { - project.licenseFile = project.rootProject.file('LICENSE.txt') + project.licenseFile = project.rootProject.file('licenses/APACHE-LICENSE-2.0.txt') project.noticeFile = project.rootProject.file('NOTICE.txt') } } @@ -206,9 +207,13 @@ subprojects { "org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:archives:integ-test-zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:archives:zip', + "org.elasticsearch.distribution.zip:elasticsearch-oss:${version}": ':distribution:archives:oss-zip', "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:archives:tar', + "org.elasticsearch.distribution.tar:elasticsearch-oss:${version}": ':distribution:archives:oss-tar', "org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:packages:rpm', + "org.elasticsearch.distribution.rpm:elasticsearch-oss:${version}": ':distribution:packages:oss-rpm', "org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:packages:deb', + "org.elasticsearch.distribution.deb:elasticsearch-oss:${version}": ':distribution:packages:oss-deb', "org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage', // for transport client "org.elasticsearch.plugin:transport-netty4-client:${version}": ':modules:transport-netty4', @@ -228,6 +233,11 @@ subprojects { ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${snapshot}"] = snapshotProject ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${snapshot}"] = snapshotProject ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${snapshot}"] = snapshotProject + if (snapshot.onOrAfter('6.3.0')) { + ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch-oss:${snapshot}"] = snapshotProject + ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch-oss:${snapshot}"] = snapshotProject + ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch-oss:${snapshot}"] = snapshotProject + } } } @@ -451,6 +461,59 @@ gradle.projectsEvaluated { } +static void assertLinesInFile(final Path path, final List expectedLines) { + final List actualLines = Files.readAllLines(path) + int line = 0 + for (final String expectedLine : expectedLines) { + final String actualLine = actualLines.get(line) + if (expectedLine != actualLine) { + throw new GradleException("expected line [${line + 1}] in [${path}] to be [${expectedLine}] but was [${actualLine}]") + } + line++ + } +} + +/* + * Check that all generated JARs have our NOTICE.txt and an appropriate + * LICENSE.txt in them. We configurate this in gradle but we'd like to + * be extra paranoid. + */ +subprojects { project -> + project.tasks.withType(Jar).whenTaskAdded { jarTask -> + final Task extract = project.task("extract${jarTask.name.capitalize()}", type: LoggedExec) { + dependsOn jarTask + ext.destination = project.buildDir.toPath().resolve("jar-extracted/${jarTask.name}") + commandLine "${->new File(rootProject.compilerJavaHome, 'bin/jar')}", + 'xf', "${-> jarTask.outputs.files.singleFile}", 'META-INF/LICENSE.txt', 'META-INF/NOTICE.txt' + workingDir destination + doFirst { + project.delete(destination) + Files.createDirectories(destination) + } + } + + final Task checkNotice = project.task("verify${jarTask.name.capitalize()}Notice") { + dependsOn extract + doLast { + final List noticeLines = Files.readAllLines(project.noticeFile.toPath()) + final Path noticePath = extract.destination.resolve('META-INF/NOTICE.txt') + assertLinesInFile(noticePath, noticeLines) + } + } + project.check.dependsOn checkNotice + + final Task checkLicense = project.task("verify${jarTask.name.capitalize()}License") { + dependsOn extract + doLast { + final List licenseLines = Files.readAllLines(project.licenseFile.toPath()) + final Path licensePath = extract.destination.resolve('META-INF/LICENSE.txt') + assertLinesInFile(licensePath, licenseLines) + } + } + project.check.dependsOn checkLicense + } +} + /* Remove assemble on all qa projects because we don't need to publish * artifacts for them. */ gradle.projectsEvaluated { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 3103f23472ed7..a44b9c849d333 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -38,6 +38,7 @@ import org.gradle.api.artifacts.ModuleVersionIdentifier import org.gradle.api.artifacts.ProjectDependency import org.gradle.api.artifacts.ResolvedArtifact import org.gradle.api.artifacts.dsl.RepositoryHandler +import org.gradle.api.execution.TaskExecutionGraph import org.gradle.api.plugins.JavaPlugin import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin @@ -221,21 +222,34 @@ class BuildPlugin implements Plugin { return System.getenv('JAVA' + version + '_HOME') } - /** - * Get Java home for the project for the specified version. If the specified version is not configured, an exception with the specified - * message is thrown. - * - * @param project the project - * @param version the version of Java home to obtain - * @param message the exception message if Java home for the specified version is not configured - * @return Java home for the specified version - * @throws GradleException if Java home for the specified version is not configured - */ - static String getJavaHome(final Project project, final int version, final String message) { - if (project.javaVersions.get(version) == null) { - throw new GradleException(message) + /** Add a check before gradle execution phase which ensures java home for the given java version is set. */ + static void requireJavaHome(Task task, int version) { + Project rootProject = task.project.rootProject // use root project for global accounting + if (rootProject.hasProperty('requiredJavaVersions') == false) { + rootProject.rootProject.ext.requiredJavaVersions = [:].withDefault{key -> return []} + rootProject.gradle.taskGraph.whenReady { TaskExecutionGraph taskGraph -> + List messages = [] + for (entry in rootProject.requiredJavaVersions) { + if (rootProject.javaVersions.get(entry.key) != null) { + continue + } + List tasks = entry.value.findAll { taskGraph.hasTask(it) }.collect { " ${it.path}" } + if (tasks.isEmpty() == false) { + messages.add("JAVA${entry.key}_HOME required to run tasks:\n${tasks.join('\n')}") + } + } + if (messages.isEmpty() == false) { + throw new GradleException(messages.join('\n')) + } + } } - return project.javaVersions.get(version) + rootProject.requiredJavaVersions.get(version).add(task) + } + + /** A convenience method for getting java home for a version of java and requiring that version for the given task to execute */ + static String getJavaHome(final Task task, final int version) { + requireJavaHome(task, version) + return task.project.javaVersions.get(version) } private static String findRuntimeJavaHome(final String compilerJavaHome) { @@ -605,6 +619,7 @@ class BuildPlugin implements Plugin { jarTask.metaInf { from(project.licenseFile.parent) { include project.licenseFile.name + rename { 'LICENSE.txt' } } from(project.noticeFile.parent) { include project.noticeFile.name diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy index 6c1857b3e7bf9..acb8f57d9d72c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy @@ -37,10 +37,11 @@ class MetaPluginBuildPlugin implements Plugin { project.plugins.apply(RestTestPlugin) createBundleTask(project) - boolean isModule = project.path.startsWith(':modules:') + boolean isModule = project.path.startsWith(':modules:') || project.path.startsWith(':x-pack:plugin') project.integTestCluster { dependsOn(project.bundlePlugin) + distribution = 'integ-test-zip' } BuildPlugin.configurePomGeneration(project) project.afterEvaluate { @@ -49,9 +50,9 @@ class MetaPluginBuildPlugin implements Plugin { if (project.integTestCluster.distribution == 'integ-test-zip') { project.integTestCluster.module(project) } - } else { + } else { project.integTestCluster.plugin(project.path) - } + } } RunTask run = project.tasks.create('run', RunTask) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 80cb376077ed1..28008f4313c97 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -50,7 +50,8 @@ public class PluginBuildPlugin extends BuildPlugin { // this afterEvaluate must happen before the afterEvaluate added by integTest creation, // so that the file name resolution for installing the plugin will be setup project.afterEvaluate { - boolean isModule = project.path.startsWith(':modules:') + boolean isXPackModule = project.path.startsWith(':x-pack:plugin') + boolean isModule = project.path.startsWith(':modules:') || isXPackModule String name = project.pluginProperties.extension.name project.archivesBaseName = name @@ -70,9 +71,13 @@ public class PluginBuildPlugin extends BuildPlugin { if (isModule) { project.integTestCluster.module(project) project.tasks.run.clusterConfig.module(project) + project.tasks.run.clusterConfig.distribution = 'integ-test-zip' } else { project.integTestCluster.plugin(project.path) project.tasks.run.clusterConfig.plugin(project.path) + } + + if (isModule == false || isXPackModule) { addZipPomGeneration(project) addNoticeGeneration(project) } @@ -256,6 +261,7 @@ public class PluginBuildPlugin extends BuildPlugin { if (licenseFile != null) { project.bundlePlugin.from(licenseFile.parentFile) { include(licenseFile.name) + rename { 'LICENSE.txt' } } } File noticeFile = project.pluginProperties.extension.noticeFile diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 5f9e4c49b34e9..ed066ddc96baa 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -20,6 +20,7 @@ package org.elasticsearch.gradle.test import org.apache.tools.ant.DefaultLogger import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties @@ -130,13 +131,22 @@ class ClusterFormationTasks { /** Adds a dependency on the given distribution */ static void configureDistributionDependency(Project project, String distro, Configuration configuration, Version elasticsearchVersion) { + if (elasticsearchVersion.before('6.3.0') && distro.startsWith('oss-')) { + distro = distro.substring('oss-'.length()) + } String packaging = distro - if (distro == 'tar') { - packaging = 'tar.gz' - } else if (distro == 'integ-test-zip') { + if (distro.contains('tar')) { + packaging = 'tar.gz'\ + } else if (distro.contains('zip')) { packaging = 'zip' } - project.dependencies.add(configuration.name, "org.elasticsearch.distribution.${distro}:elasticsearch:${elasticsearchVersion}@${packaging}") + String subgroup = distro + String artifactName = 'elasticsearch' + if (distro.contains('oss')) { + artifactName += '-oss' + subgroup = distro.substring('oss-'.length()) + } + project.dependencies.add(configuration.name, "org.elasticsearch.distribution.${subgroup}:${artifactName}:${elasticsearchVersion}@${packaging}") } /** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */ @@ -259,6 +269,7 @@ class ClusterFormationTasks { switch (node.config.distribution) { case 'integ-test-zip': case 'zip': + case 'oss-zip': extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { from { project.zipTree(configuration.singleFile) @@ -267,6 +278,7 @@ class ClusterFormationTasks { } break; case 'tar': + case 'oss-tar': extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { from { project.tarTree(project.resources.gzip(configuration.singleFile)) @@ -551,16 +563,17 @@ class ClusterFormationTasks { /** Adds a task to execute a command to help setup the cluster */ static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) { - return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) { - workingDir node.cwd + return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) { Exec exec -> + exec.workingDir node.cwd + exec.environment 'JAVA_HOME', node.getJavaHome() if (Os.isFamily(Os.FAMILY_WINDOWS)) { - executable 'cmd' - args '/C', 'call' + exec.executable 'cmd' + exec.args '/C', 'call' // On Windows the comma character is considered a parameter separator: // argument are wrapped in an ExecArgWrapper that escapes commas - args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) } + exec.args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) } } else { - commandLine execArgs + exec.commandLine execArgs } } } @@ -607,6 +620,9 @@ class ClusterFormationTasks { } Task start = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) + if (node.javaVersion != null) { + BuildPlugin.requireJavaHome(start, node.javaVersion) + } start.doLast(elasticsearchRunner) return start } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 1fc944eeec6eb..5e67dfa55cfd4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -36,6 +36,9 @@ import static org.elasticsearch.gradle.BuildPlugin.getJavaHome * A container for the files and configuration associated with a single node in a test cluster. */ class NodeInfo { + /** Gradle project this node is part of */ + Project project + /** common configuration for all nodes, including this one */ ClusterConfiguration config @@ -84,6 +87,9 @@ class NodeInfo { /** directory to install plugins from */ File pluginsTmpDir + /** Major version of java this node runs with, or {@code null} if using the runtime java version */ + Integer javaVersion + /** environment variables to start the node with */ Map env @@ -109,6 +115,7 @@ class NodeInfo { NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, Version nodeVersion, File sharedDir) { this.config = config this.nodeNum = nodeNum + this.project = project this.sharedDir = sharedDir if (config.clusterName != null) { clusterName = config.clusterName @@ -165,12 +172,11 @@ class NodeInfo { args.add("${esScript}") } + if (nodeVersion.before("6.2.0")) { - env = ['JAVA_HOME': "${-> getJavaHome(project, 8, "JAVA8_HOME must be set to run BWC tests against [" + nodeVersion + "]")}"] + javaVersion = 8 } else if (nodeVersion.onOrAfter("6.2.0") && nodeVersion.before("6.3.0")) { - env = ['JAVA_HOME': "${-> getJavaHome(project, 9, "JAVA9_HOME must be set to run BWC tests against [" + nodeVersion + "]")}"] - } else { - env = ['JAVA_HOME': (String) project.runtimeJavaHome] + javaVersion = 9 } args.addAll("-E", "node.portsfile=true") @@ -182,7 +188,7 @@ class NodeInfo { // in the cluster-specific options esJavaOpts = String.join(" ", "-ea", "-esa", esJavaOpts) } - env.put('ES_JAVA_OPTS', esJavaOpts) + env = ['ES_JAVA_OPTS': esJavaOpts] for (Map.Entry property : System.properties.entrySet()) { if (property.key.startsWith('tests.es.')) { args.add("-E") @@ -242,6 +248,11 @@ class NodeInfo { return Native.toString(shortPath).substring(4) } + /** Return the java home used by this node. */ + String getJavaHome() { + return javaVersion == null ? project.runtimeJavaHome : project.javaVersions.get(javaVersion) + } + /** Returns debug string for the command that started this node. */ String getCommandString() { String esCommandString = "\nNode ${nodeNum} configuration:\n" @@ -249,6 +260,7 @@ class NodeInfo { esCommandString += "| cwd: ${cwd}\n" esCommandString += "| command: ${executable} ${args.join(' ')}\n" esCommandString += '| environment:\n' + esCommandString += "| JAVA_HOME: ${javaHome}\n" env.each { k, v -> esCommandString += "| ${k}: ${v}\n" } if (config.daemonize) { esCommandString += "|\n| [${wrapperScript.name}]\n" @@ -300,6 +312,8 @@ class NodeInfo { case 'integ-test-zip': case 'zip': case 'tar': + case 'oss-zip': + case 'oss-tar': path = "elasticsearch-${nodeVersion}" break case 'rpm': @@ -316,7 +330,9 @@ class NodeInfo { switch (distro) { case 'integ-test-zip': case 'zip': + case 'oss-zip': case 'tar': + case 'oss-tar': return new File(homeDir(baseDir, distro, nodeVersion), 'config') case 'rpm': case 'deb': diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 3c7554453b5e2..242ed45eee86e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -24,6 +24,7 @@ import org.elasticsearch.gradle.VersionProperties import org.gradle.api.DefaultTask import org.gradle.api.Project import org.gradle.api.Task +import org.gradle.api.Transformer import org.gradle.api.execution.TaskExecutionAdapter import org.gradle.api.internal.tasks.options.Option import org.gradle.api.provider.Property diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy index c6d0f1d0425d0..264a1e0f8ac17 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantPropertiesExtension.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.vagrant +import org.elasticsearch.gradle.Version import org.gradle.api.tasks.Input class VagrantPropertiesExtension { @@ -26,7 +27,7 @@ class VagrantPropertiesExtension { List boxes @Input - String upgradeFromVersion + Version upgradeFromVersion @Input List upgradeFromVersions diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index d7d1c01e7dd00..7a0b9f96781df 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -3,6 +3,7 @@ package org.elasticsearch.gradle.vagrant import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.FileContentsTask import org.elasticsearch.gradle.LoggedExec +import org.elasticsearch.gradle.Version import org.gradle.api.* import org.gradle.api.artifacts.dsl.RepositoryHandler import org.gradle.api.execution.TaskExecutionAdapter @@ -36,8 +37,15 @@ class VagrantTestPlugin implements Plugin { 'ubuntu-1404', ] - /** All onboarded archives by default, available for Bats tests even if not used **/ - static List DISTRIBUTION_ARCHIVES = ['tar', 'rpm', 'deb'] + /** All distributions to bring into test VM, whether or not they are used **/ + static List DISTRIBUTIONS = [ + 'archives:tar', + 'archives:oss-tar', + 'packages:rpm', + 'packages:oss-rpm', + 'packages:deb', + 'packages:oss-deb' + ] /** Packages onboarded for upgrade tests **/ static List UPGRADE_FROM_ARCHIVES = ['rpm', 'deb'] @@ -105,21 +113,19 @@ class VagrantTestPlugin implements Plugin { private static void createPackagingConfiguration(Project project) { project.configurations.create(PACKAGING_CONFIGURATION) - String upgradeFromVersion = System.getProperty("tests.packaging.upgradeVersion") - if (upgradeFromVersion == null) { + String upgradeFromVersionRaw = System.getProperty("tests.packaging.upgradeVersion"); + Version upgradeFromVersion + if (upgradeFromVersionRaw == null) { String firstPartOfSeed = project.rootProject.testSeed.tokenize(':').get(0) final long seed = Long.parseUnsignedLong(firstPartOfSeed, 16) final def indexCompatVersions = project.bwcVersions.indexCompatible upgradeFromVersion = indexCompatVersions[new Random(seed).nextInt(indexCompatVersions.size())] + } else { + upgradeFromVersion = Version.fromString(upgradeFromVersionRaw) } - DISTRIBUTION_ARCHIVES.each { + DISTRIBUTIONS.each { // Adds a dependency for the current version - if (it == 'tar') { - it = 'archives:tar' - } else { - it = "packages:${it}" - } project.dependencies.add(PACKAGING_CONFIGURATION, project.dependencies.project(path: ":distribution:${it}", configuration: 'default')) } @@ -128,6 +134,10 @@ class VagrantTestPlugin implements Plugin { // The version of elasticsearch that we upgrade *from* project.dependencies.add(PACKAGING_CONFIGURATION, "org.elasticsearch.distribution.${it}:elasticsearch:${upgradeFromVersion}@${it}") + if (upgradeFromVersion.onOrAfter('6.3.0')) { + project.dependencies.add(PACKAGING_CONFIGURATION, + "org.elasticsearch.distribution.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}") + } } project.extensions.esvagrant.upgradeFromVersion = upgradeFromVersion @@ -173,7 +183,17 @@ class VagrantTestPlugin implements Plugin { Task createUpgradeFromFile = project.tasks.create('createUpgradeFromFile', FileContentsTask) { dependsOn copyPackagingArchives file "${archivesDir}/upgrade_from_version" - contents project.extensions.esvagrant.upgradeFromVersion + contents project.extensions.esvagrant.upgradeFromVersion.toString() + } + + Task createUpgradeIsOssFile = project.tasks.create('createUpgradeIsOssFile', FileContentsTask) { + dependsOn copyPackagingArchives + doFirst { + project.delete("${archivesDir}/upgrade_is_oss") + } + onlyIf { project.extensions.esvagrant.upgradeFromVersion.onOrAfter('6.3.0') } + file "${archivesDir}/upgrade_is_oss" + contents '' } File batsDir = new File(packagingDir, BATS) @@ -214,7 +234,7 @@ class VagrantTestPlugin implements Plugin { Task vagrantSetUpTask = project.tasks.create('setupPackagingTest') vagrantSetUpTask.dependsOn 'vagrantCheckVersion' - vagrantSetUpTask.dependsOn copyPackagingArchives, createVersionFile, createUpgradeFromFile + vagrantSetUpTask.dependsOn copyPackagingArchives, createVersionFile, createUpgradeFromFile, createUpgradeIsOssFile vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils } diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 2aa72f0fa7a1c..609a7cf2ea66f 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -535,7 +535,6 @@ - diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index 4e6fcdbb8dd4a..d68d3b309af51 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -48,6 +48,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; @@ -75,6 +76,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.rankeval.RankEvalRequest; +import org.elasticsearch.rest.action.RestFieldCapabilitiesAction; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -536,6 +538,16 @@ static Request existsAlias(GetAliasesRequest getAliasesRequest) { return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null); } + static Request fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest) { + Params params = Params.builder(); + params.withFields(fieldCapabilitiesRequest.fields()); + params.withIndicesOptions(fieldCapabilitiesRequest.indicesOptions()); + + String[] indices = fieldCapabilitiesRequest.indices(); + String endpoint = endpoint(indices, "_field_caps"); + return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), null); + } + static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { String endpoint = endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval"); Params params = Params.builder(); @@ -572,7 +584,6 @@ private static Request resize(ResizeRequest resizeRequest) throws IOException { static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException { Params parameters = Params.builder(); - parameters.withFlatSettings(clusterUpdateSettingsRequest.flatSettings()); parameters.withTimeout(clusterUpdateSettingsRequest.timeout()); parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout()); HttpEntity entity = createEntity(clusterUpdateSettingsRequest, REQUEST_BODY_CONTENT_TYPE); @@ -603,7 +614,6 @@ static Request indicesExist(GetIndexRequest request) { params.withLocal(request.local()); params.withHuman(request.humanReadable()); params.withIndicesOptions(request.indicesOptions()); - params.withFlatSettings(request.flatSettings()); params.withIncludeDefaults(request.includeDefaults()); return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null); } @@ -613,7 +623,6 @@ static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) thr parameters.withTimeout(updateSettingsRequest.timeout()); parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout()); parameters.withIndicesOptions(updateSettingsRequest.indicesOptions()); - parameters.withFlatSettings(updateSettingsRequest.flatSettings()); parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting()); String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices(); @@ -715,6 +724,13 @@ Params withFetchSourceContext(FetchSourceContext fetchSourceContext) { return this; } + Params withFields(String[] fields) { + if (fields != null && fields.length > 0) { + return putParam("fields", String.join(",", fields)); + } + return this; + } + Params withMasterTimeout(TimeValue masterTimeout) { return putParam("master_timeout", masterTimeout); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index bf80aa7720741..c6d5e947f2c62 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -30,6 +30,8 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetRequest; @@ -501,6 +503,31 @@ public final void rankEvalAsync(RankEvalRequest rankEvalRequest, ActionListener< headers); } + /** + * Executes a request using the Field Capabilities API. + * + * See Field Capabilities API + * on elastic.co. + */ + public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest, + Header... headers) throws IOException { + return performRequestAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps, + FieldCapabilitiesResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously executes a request using the Field Capabilities API. + * + * See Field Capabilities API + * on elastic.co. + */ + public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest, + ActionListener listener, + Header... headers) { + performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps, + FieldCapabilitiesResponse::fromXContent, listener, emptySet(), headers); + } + protected final Resp performRequestAndParseEntity(Req request, CheckedFunction requestConverter, CheckedFunction entityParser, diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java index c0de571226c4c..b4d8828eb7e6f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java @@ -42,6 +42,8 @@ public void testInfo() throws IOException { // only check node name existence, might be a different one from what was hit by low level client in multi-node cluster assertNotNull(info.getNodeName()); Map versionMap = (Map) infoAsMap.get("version"); + assertEquals(versionMap.get("build_flavor"), info.getBuild().flavor().displayName()); + assertEquals(versionMap.get("build_type"), info.getBuild().type().displayName()); assertEquals(versionMap.get("build_hash"), info.getBuild().shortHash()); assertEquals(versionMap.get("build_date"), info.getBuild().date()); assertEquals(versionMap.get("build_snapshot"), info.getBuild().isSnapshot()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index abce180546dfc..0fdeb7555a04a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -52,6 +52,7 @@ import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; @@ -89,6 +90,7 @@ import org.elasticsearch.index.rankeval.RankEvalSpec; import org.elasticsearch.index.rankeval.RatedRequest; import org.elasticsearch.index.rankeval.RestRankEvalAction; +import org.elasticsearch.rest.action.RestFieldCapabilitiesAction; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; @@ -108,11 +110,14 @@ import java.lang.reflect.Constructor; import java.lang.reflect.Modifier; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.StringJoiner; import java.util.function.Consumer; import java.util.function.Function; @@ -128,6 +133,8 @@ import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.nullValue; public class RequestTests extends ESTestCase { @@ -272,7 +279,6 @@ public void testIndicesExist() { Map expectedParams = new HashMap<>(); setRandomIndicesOptions(getIndexRequest::indicesOptions, getIndexRequest::indicesOptions, expectedParams); setRandomLocal(getIndexRequest, expectedParams); - setRandomFlatSettings(getIndexRequest::flatSettings, expectedParams); setRandomHumanReadable(getIndexRequest, expectedParams); setRandomIncludeDefaults(getIndexRequest, expectedParams); @@ -1214,6 +1220,47 @@ public void testExistsAliasNoAliasNoIndex() { } } + public void testFieldCaps() { + // Create a random request. + String[] indices = randomIndicesNames(0, 5); + String[] fields = generateRandomStringArray(5, 10, false, false); + + FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest() + .indices(indices) + .fields(fields); + + Map indicesOptionsParams = new HashMap<>(); + setRandomIndicesOptions(fieldCapabilitiesRequest::indicesOptions, + fieldCapabilitiesRequest::indicesOptions, + indicesOptionsParams); + + Request request = Request.fieldCaps(fieldCapabilitiesRequest); + + // Verify that the resulting REST request looks as expected. + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String joinedIndices = String.join(",", indices); + if (!joinedIndices.isEmpty()) { + endpoint.add(joinedIndices); + } + endpoint.add("_field_caps"); + + assertEquals(endpoint.toString(), request.getEndpoint()); + assertEquals(4, request.getParameters().size()); + + // Note that we don't check the field param value explicitly, as field names are passed through + // a hash set before being added to the request, and can appear in a non-deterministic order. + assertThat(request.getParameters(), hasKey("fields")); + String[] requestFields = Strings.splitStringByCommaToArray(request.getParameters().get("fields")); + assertEquals(new HashSet<>(Arrays.asList(fields)), + new HashSet<>(Arrays.asList(requestFields))); + + for (Map.Entry param : indicesOptionsParams.entrySet()) { + assertThat(request.getParameters(), hasEntry(param.getKey(), param.getValue())); + } + + assertNull(request.getEntity()); + } + public void testRankEval() throws Exception { RankEvalSpec spec = new RankEvalSpec( Collections.singletonList(new RatedRequest("queryId", Collections.emptyList(), new SearchSourceBuilder())), @@ -1234,7 +1281,6 @@ public void testRankEval() throws Exception { assertEquals(3, request.getParameters().size()); assertEquals(expectedParams, request.getParameters()); assertToXContentBody(spec, request.getEntity()); - } public void testSplit() throws IOException { @@ -1292,7 +1338,6 @@ private static void resizeTest(ResizeType resizeType, CheckedFunction expectedParams = new HashMap<>(); - setRandomFlatSettings(request::flatSettings, expectedParams); setRandomMasterTimeout(request, expectedParams); setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); @@ -1344,7 +1389,6 @@ public void testIndexPutSettings() throws IOException { String[] indices = randomBoolean() ? null : randomIndicesNames(0, 2); UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices); Map expectedParams = new HashMap<>(); - setRandomFlatSettings(updateSettingsRequest::flatSettings, expectedParams); setRandomMasterTimeout(updateSettingsRequest, expectedParams); setRandomTimeout(updateSettingsRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); setRandomIndicesOptions(updateSettingsRequest::indicesOptions, updateSettingsRequest::indicesOptions, expectedParams); @@ -1627,16 +1671,6 @@ private static void setRandomTimeout(Consumer setter, TimeValue defaultT } } - private static void setRandomFlatSettings(Consumer setter, Map expectedParams) { - if (randomBoolean()) { - boolean flatSettings = randomBoolean(); - setter.accept(flatSettings); - if (flatSettings) { - expectedParams.put("flat_settings", String.valueOf(flatSettings)); - } - } - } - private static void setRandomMasterTimeout(MasterNodeRequest request, Map expectedParams) { if (randomBoolean()) { String masterTimeout = randomTimeValue(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 01ef0598100fb..9828041332b32 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -27,6 +27,9 @@ import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.MultiSearchRequest; @@ -96,14 +99,31 @@ public void indexDocuments() throws IOException { client().performRequest(HttpPut.METHOD_NAME, "/index/type/5", Collections.emptyMap(), doc5); client().performRequest(HttpPost.METHOD_NAME, "/index/_refresh"); - StringEntity doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON); + + StringEntity doc = new StringEntity("{\"field\":\"value1\", \"rating\": 7}", ContentType.APPLICATION_JSON); client().performRequest(HttpPut.METHOD_NAME, "/index1/doc/1", Collections.emptyMap(), doc); doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON); client().performRequest(HttpPut.METHOD_NAME, "/index1/doc/2", Collections.emptyMap(), doc); - doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON); + + StringEntity mappings = new StringEntity( + "{" + + " \"mappings\": {" + + " \"doc\": {" + + " \"properties\": {" + + " \"rating\": {" + + " \"type\": \"keyword\"" + + " }" + + " }" + + " }" + + " }" + + "}}", + ContentType.APPLICATION_JSON); + client().performRequest("PUT", "/index2", Collections.emptyMap(), mappings); + doc = new StringEntity("{\"field\":\"value1\", \"rating\": \"good\"}", ContentType.APPLICATION_JSON); client().performRequest(HttpPut.METHOD_NAME, "/index2/doc/3", Collections.emptyMap(), doc); doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON); client().performRequest(HttpPut.METHOD_NAME, "/index2/doc/4", Collections.emptyMap(), doc); + doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON); client().performRequest(HttpPut.METHOD_NAME, "/index3/doc/5", Collections.emptyMap(), doc); doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON); @@ -713,6 +733,57 @@ public void testMultiSearch_failure() throws Exception { assertThat(multiSearchResponse.getResponses()[1].getResponse(), nullValue()); } + public void testFieldCaps() throws IOException { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() + .indices("index1", "index2") + .fields("rating", "field"); + + FieldCapabilitiesResponse response = execute(request, + highLevelClient()::fieldCaps, highLevelClient()::fieldCapsAsync); + + // Check the capabilities for the 'rating' field. + assertTrue(response.get().containsKey("rating")); + Map ratingResponse = response.getField("rating"); + assertEquals(2, ratingResponse.size()); + + FieldCapabilities expectedKeywordCapabilities = new FieldCapabilities( + "rating", "keyword", true, true, new String[]{"index2"}, null, null); + assertEquals(expectedKeywordCapabilities, ratingResponse.get("keyword")); + + FieldCapabilities expectedLongCapabilities = new FieldCapabilities( + "rating", "long", true, true, new String[]{"index1"}, null, null); + assertEquals(expectedLongCapabilities, ratingResponse.get("long")); + + // Check the capabilities for the 'field' field. + assertTrue(response.get().containsKey("field")); + Map fieldResponse = response.getField("field"); + assertEquals(1, fieldResponse.size()); + + FieldCapabilities expectedTextCapabilities = new FieldCapabilities( + "field", "text", true, false); + assertEquals(expectedTextCapabilities, fieldResponse.get("text")); + } + + public void testFieldCapsWithNonExistentFields() throws IOException { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() + .indices("index2") + .fields("nonexistent"); + + FieldCapabilitiesResponse response = execute(request, + highLevelClient()::fieldCaps, highLevelClient()::fieldCapsAsync); + assertTrue(response.get().isEmpty()); + } + + public void testFieldCapsWithNonExistentIndices() { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() + .indices("non-existent") + .fields("rating"); + + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> execute(request, highLevelClient()::fieldCaps, highLevelClient()::fieldCapsAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + } + private static void assertSearchHeader(SearchResponse searchResponse) { assertThat(searchResponse.getTook().nanos(), greaterThanOrEqualTo(0L)); assertEquals(0, searchResponse.getFailedShards()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index 0747ca757c4b9..2e7ea1650f424 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -124,10 +124,6 @@ public void testClusterPutSettings() throws IOException { request.masterNodeTimeout("1m"); // <2> // end::put-settings-request-masterTimeout - // tag::put-settings-request-flat-settings - request.flatSettings(true); // <1> - // end::put-settings-request-flat-settings - // tag::put-settings-execute ClusterUpdateSettingsResponse response = client.cluster().putSettings(request); // end::put-settings-execute diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index e33d1e4729b0e..24c321f87f998 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -58,7 +58,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -114,8 +113,7 @@ public void testIndicesExist() throws IOException { request.local(false); // <1> request.humanReadable(true); // <2> request.includeDefaults(false); // <3> - request.flatSettings(false); // <4> - request.indicesOptions(indicesOptions); // <5> + request.indicesOptions(indicesOptions); // <4> // end::indices-exists-request-optionals // tag::indices-exists-response @@ -1433,9 +1431,6 @@ public void testIndexPutSettings() throws Exception { // end::put-settings-settings-source } - // tag::put-settings-request-flat-settings - request.flatSettings(true); // <1> - // end::put-settings-request-flat-settings // tag::put-settings-request-preserveExisting request.setPreserveExisting(false); // <1> // end::put-settings-request-preserveExisting diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index bd1cf48f14195..4400d05a9f820 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -21,8 +21,13 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.ClearScrollRequest; @@ -44,6 +49,16 @@ import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.rankeval.EvalQueryQuality; +import org.elasticsearch.index.rankeval.EvaluationMetric; +import org.elasticsearch.index.rankeval.MetricDetail; +import org.elasticsearch.index.rankeval.PrecisionAtK; +import org.elasticsearch.index.rankeval.RankEvalRequest; +import org.elasticsearch.index.rankeval.RankEvalResponse; +import org.elasticsearch.index.rankeval.RankEvalSpec; +import org.elasticsearch.index.rankeval.RatedDocument; +import org.elasticsearch.index.rankeval.RatedRequest; +import org.elasticsearch.index.rankeval.RatedSearchHit; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchHit; @@ -74,6 +89,7 @@ import org.elasticsearch.search.suggest.term.TermSuggestion; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -82,6 +98,8 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -146,6 +164,7 @@ public void testSearch() throws Exception { // tag::search-source-setter SearchRequest searchRequest = new SearchRequest(); + searchRequest.indices("posts"); searchRequest.source(sourceBuilder); // end::search-source-setter @@ -688,6 +707,136 @@ public void onFailure(Exception e) { } } + public void testFieldCaps() throws Exception { + indexSearchTestData(); + RestHighLevelClient client = highLevelClient(); + // tag::field-caps-request + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() + .fields("user") + .indices("posts", "authors", "contributors"); + // end::field-caps-request + + // tag::field-caps-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::field-caps-request-indicesOptions + + // tag::field-caps-execute + FieldCapabilitiesResponse response = client.fieldCaps(request); + // end::field-caps-execute + + // tag::field-caps-response + assertThat(response.get().keySet(), contains("user")); + Map userResponse = response.getField("user"); + + assertThat(userResponse.keySet(), containsInAnyOrder("keyword", "text")); // <1> + FieldCapabilities textCapabilities = userResponse.get("keyword"); + + assertTrue(textCapabilities.isSearchable()); + assertFalse(textCapabilities.isAggregatable()); + + assertArrayEquals(textCapabilities.indices(), // <2> + new String[]{"authors", "contributors"}); + assertNull(textCapabilities.nonSearchableIndices()); // <3> + assertArrayEquals(textCapabilities.nonAggregatableIndices(), // <4> + new String[]{"authors"}); + // end::field-caps-response + + // tag::field-caps-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(FieldCapabilitiesResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::field-caps-execute-listener + + // Replace the empty listener by a blocking listener for tests. + CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::field-caps-execute-async + client.fieldCapsAsync(request, listener); // <1> + // end::field-caps-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + public void testRankEval() throws Exception { + indexSearchTestData(); + RestHighLevelClient client = highLevelClient(); + { + // tag::rank-eval-request-basic + EvaluationMetric metric = new PrecisionAtK(); // <1> + List ratedDocs = new ArrayList<>(); + ratedDocs.add(new RatedDocument("posts", "1", 1)); // <2> + SearchSourceBuilder searchQuery = new SearchSourceBuilder(); + searchQuery.query(QueryBuilders.matchQuery("user", "kimchy"));// <3> + RatedRequest ratedRequest = // <4> + new RatedRequest("kimchy_query", ratedDocs, searchQuery); + List ratedRequests = Arrays.asList(ratedRequest); + RankEvalSpec specification = + new RankEvalSpec(ratedRequests, metric); // <5> + RankEvalRequest request = // <6> + new RankEvalRequest(specification, new String[] { "posts" }); + // end::rank-eval-request-basic + + // tag::rank-eval-execute + RankEvalResponse response = client.rankEval(request); + // end::rank-eval-execute + + // tag::rank-eval-response + double evaluationResult = response.getEvaluationResult(); // <1> + assertEquals(1.0 / 3.0, evaluationResult, 0.0); + Map partialResults = + response.getPartialResults(); + EvalQueryQuality evalQuality = + partialResults.get("kimchy_query"); // <2> + assertEquals("kimchy_query", evalQuality.getId()); + double qualityLevel = evalQuality.getQualityLevel(); // <3> + assertEquals(1.0 / 3.0, qualityLevel, 0.0); + List hitsAndRatings = evalQuality.getHitsAndRatings(); + RatedSearchHit ratedSearchHit = hitsAndRatings.get(0); + assertEquals("3", ratedSearchHit.getSearchHit().getId()); // <4> + assertFalse(ratedSearchHit.getRating().isPresent()); // <5> + MetricDetail metricDetails = evalQuality.getMetricDetails(); + String metricName = metricDetails.getMetricName(); + assertEquals(PrecisionAtK.NAME, metricName); // <6> + PrecisionAtK.Detail detail = (PrecisionAtK.Detail) metricDetails; + assertEquals(1, detail.getRelevantRetrieved()); // <7> + assertEquals(3, detail.getRetrieved()); + // end::rank-eval-response + + // tag::rank-eval-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(RankEvalResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::rank-eval-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::rank-eval-execute-async + client.rankEvalAsync(request, listener); // <1> + // end::rank-eval-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testMultiSearch() throws Exception { indexSearchTestData(); RestHighLevelClient client = highLevelClient(); @@ -712,7 +861,7 @@ public void testMultiSearch() throws Exception { MultiSearchResponse.Item firstResponse = response.getResponses()[0]; // <1> assertNull(firstResponse.getFailure()); // <2> SearchResponse searchResponse = firstResponse.getResponse(); // <3> - assertEquals(3, searchResponse.getHits().getTotalHits()); + assertEquals(4, searchResponse.getHits().getTotalHits()); MultiSearchResponse.Item secondResponse = response.getResponses()[1]; // <4> assertNull(secondResponse.getFailure()); searchResponse = secondResponse.getResponse(); @@ -758,18 +907,35 @@ public void onFailure(Exception e) { } private void indexSearchTestData() throws IOException { - BulkRequest request = new BulkRequest(); - request.add(new IndexRequest("posts", "doc", "1") + CreateIndexRequest authorsRequest = new CreateIndexRequest("authors") + .mapping("doc", "user", "type=keyword,doc_values=false"); + CreateIndexResponse authorsResponse = highLevelClient().indices().create(authorsRequest); + assertTrue(authorsResponse.isAcknowledged()); + + CreateIndexRequest reviewersRequest = new CreateIndexRequest("contributors") + .mapping("doc", "user", "type=keyword"); + CreateIndexResponse reviewersResponse = highLevelClient().indices().create(reviewersRequest); + assertTrue(reviewersResponse.isAcknowledged()); + + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest("posts", "doc", "1") .source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?", "user", Arrays.asList("kimchy", "luca"), "innerObject", Collections.singletonMap("key", "value"))); - request.add(new IndexRequest("posts", "doc", "2") + bulkRequest.add(new IndexRequest("posts", "doc", "2") .source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch", "user", Arrays.asList("kimchy", "christoph"), "innerObject", Collections.singletonMap("key", "value"))); - request.add(new IndexRequest("posts", "doc", "3") + bulkRequest.add(new IndexRequest("posts", "doc", "3") .source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user", Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value"))); - request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = highLevelClient().bulk(request); + + bulkRequest.add(new IndexRequest("authors", "doc", "1") + .source(XContentType.JSON, "user", "kimchy")); + bulkRequest.add(new IndexRequest("contributors", "doc", "1") + .source(XContentType.JSON, "user", "tanguy")); + + + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest); assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 8e0f179634a27..bcb928495c5d2 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -20,7 +20,6 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks apply plugin: 'elasticsearch.build' -apply plugin: 'ru.vyarus.animalsniffer' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -52,8 +51,6 @@ dependencies { testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" testCompile "org.elasticsearch:securemock:${versions.securemock}" testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}" - testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15" - signature "org.codehaus.mojo.signature:java17:1.0@signature" } forbiddenApisMain { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java index 8142fea6d259b..199b7542e62a2 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java @@ -24,7 +24,6 @@ import com.sun.net.httpserver.HttpsConfigurator; import com.sun.net.httpserver.HttpsServer; import org.apache.http.HttpHost; -import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; import org.elasticsearch.mocksocket.MockHttpServer; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -46,8 +45,6 @@ /** * Integration test to validate the builder builds a client with the correct configuration */ -//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes -@IgnoreJRERequirement public class RestClientBuilderIntegTests extends RestClientTestCase { private static HttpsServer httpsServer; @@ -60,8 +57,6 @@ public static void startHttpServer() throws Exception { httpsServer.start(); } - //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes - @IgnoreJRERequirement private static class ResponseHandler implements HttpHandler { @Override public void handle(HttpExchange httpExchange) throws IOException { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index da5a960c2e84c..16c192b3977a8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -23,7 +23,6 @@ import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; import org.apache.http.HttpHost; -import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; import org.elasticsearch.mocksocket.MockHttpServer; import org.junit.AfterClass; import org.junit.Before; @@ -48,8 +47,6 @@ * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. * Works against real http servers, multiple hosts. Also tests failover by randomly shutting down hosts. */ -//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes -@IgnoreJRERequirement public class RestClientMultipleHostsIntegTests extends RestClientTestCase { private static HttpServer[] httpServers; @@ -90,8 +87,6 @@ private static HttpServer createHttpServer() throws Exception { return httpServer; } - //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes - @IgnoreJRERequirement private static class ResponseHandler implements HttpHandler { private final int statusCode; diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 59aa2baab9672..dd23dbe454fa4 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -33,7 +33,6 @@ import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.util.EntityUtils; -import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; import org.elasticsearch.mocksocket.MockHttpServer; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -64,8 +63,6 @@ * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. * Works against a real http server, one single host. */ -//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes -@IgnoreJRERequirement public class RestClientSingleHostIntegTests extends RestClientTestCase { private static HttpServer httpServer; @@ -91,8 +88,6 @@ private static HttpServer createHttpServer() throws Exception { return httpServer; } - //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes - @IgnoreJRERequirement private static class ResponseHandler implements HttpHandler { private final int statusCode; diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index bb59bc84f5385..f2fc297a9e4c8 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -23,8 +23,12 @@ import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.EmptyDirTask import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.plugin.PluginBuildPlugin +import java.nio.file.Files +import java.nio.file.Path + // need this so Zip/Tar tasks get basic defaults... apply plugin: 'base' @@ -42,23 +46,23 @@ task createPluginsDir(type: EmptyDirTask) { dirMode 0755 } -CopySpec archiveFiles(CopySpec... innerFiles) { +CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, boolean oss) { return copySpec { into("elasticsearch-${version}") { with libFiles into('config') { dirMode 0750 fileMode 0660 - with configFiles('def') + with configFiles(distributionType, oss) } into('bin') { + with binFiles(distributionType, oss) with copySpec { - with binFiles('def') from('../src/bin') { include '*.bat' filter(FixCrLfFilter, eol: FixCrLfFilter.CrLf.newInstance('crlf')) } - MavenFilteringHack.filter(it, expansionsForDistribution('def')) + MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, oss)) } } into('') { @@ -73,43 +77,65 @@ CopySpec archiveFiles(CopySpec... innerFiles) { pluginsDir.getParent() } } - with commonFiles + from(rootProject.projectDir) { + include 'README.textile' + } + from(rootProject.file('licenses')) { + include oss ? 'APACHE-LICENSE-2.0.txt' : 'ELASTIC-LICENSE.txt' + rename { 'LICENSE.txt' } + } + with noticeFile from('../src') { include 'bin/*.exe' } - for (CopySpec files : innerFiles) { - with files + into('modules') { + with modulesFiles } } } } -task buildIntegTestZip(type: Zip) { +// common config across all zip/tar +tasks.withType(AbstractArchiveTask) { dependsOn createLogsDir, createPluginsDir - destinationDir = file('integ-test-zip/build/distributions') - baseName = 'elasticsearch' - with archiveFiles(transportModulesFiles) + String subdir = it.name.substring('build'.size()).replaceAll(/[A-Z]/) { '-' + it.toLowerCase() }.substring(1) + destinationDir = file("${subdir}/build/distributions") + baseName = "elasticsearch${ subdir.contains('oss') ? '-oss' : ''}" +} + +task buildIntegTestZip(type: Zip) { + with archiveFiles(transportModulesFiles, 'zip', false) } task buildZip(type: Zip) { - dependsOn createLogsDir, createPluginsDir - destinationDir = file('zip/build/distributions') - baseName = 'elasticsearch' - with archiveFiles(modulesFiles) + with archiveFiles(modulesFiles(false), 'zip', false) } -task buildTar(type: Tar) { - dependsOn createLogsDir, createPluginsDir - destinationDir = file('tar/build/distributions') - baseName = 'elasticsearch' +task buildOssZip(type: Zip) { + with archiveFiles(modulesFiles(true), 'zip', true) +} + +Closure commonTarConfig = { extension = 'tar.gz' compression = Compression.GZIP dirMode 0755 fileMode 0644 - with archiveFiles(modulesFiles) } +task buildTar(type: Tar) { + configure(commonTarConfig) + with archiveFiles(modulesFiles(false), 'tar', false) +} + +task buildOssTar(type: Tar) { + configure(commonTarConfig) + with archiveFiles(modulesFiles(true), 'tar', true) +} + +Closure tarExists = { it -> new File('/bin/tar').exists() || new File('/usr/bin/tar').exists() || new File('/usr/local/bin/tar').exists() } +Closure unzipExists = { it -> new File('/bin/unzip').exists() || new File('/usr/bin/unzip').exists() || new File('/usr/local/bin/unzip').exists() } + // This configures the default artifact for the distribution specific // subprojects. We have subprojects for two reasons: // 1. Gradle project substitutions can only bind to the default @@ -119,35 +145,78 @@ task buildTar(type: Tar) { subprojects { apply plugin: 'distribution' - archivesBaseName = 'elasticsearch' - String buildTask = "build${it.name.replaceAll(/-[a-z]/) { it.substring(1).toUpperCase() }.capitalize()}" ext.buildDist = parent.tasks.getByName(buildTask) artifacts { 'default' buildDist } - // sanity checks if a archives can be extracted - File extractionDir = new File(buildDir, 'extracted') - task testExtraction(type: LoggedExec) { + // sanity checks if archives can be extracted + final File archiveExtractionDir + if (project.name.contains('tar')) { + archiveExtractionDir = new File(buildDir, 'tar-extracted') + } else { + assert project.name.contains('zip') + archiveExtractionDir = new File(buildDir, 'zip-extracted') + } + task checkExtraction(type: LoggedExec) { dependsOn buildDist doFirst { - project.delete(extractionDir) - extractionDir.mkdirs() + project.delete(archiveExtractionDir) + archiveExtractionDir.mkdirs() } } - if (project.name.contains('zip')) { - testExtraction { - onlyIf { new File('/bin/unzip').exists() || new File('/usr/bin/unzip').exists() || new File('/usr/local/bin/unzip').exists() } - commandLine 'unzip', "${-> buildDist.outputs.files.singleFile}", '-d', extractionDir + check.dependsOn checkExtraction + if (project.name.contains('tar')) { + checkExtraction { + onlyIf tarExists + commandLine 'tar', '-xvzf', "${-> buildDist.outputs.files.singleFile}", '-C', archiveExtractionDir } - } else { // tar - testExtraction { - onlyIf { new File('/bin/tar').exists() || new File('/usr/bin/tar').exists() || new File('/usr/local/bin/tar').exists() } - commandLine 'tar', '-xvzf', "${-> buildDist.outputs.files.singleFile}", '-C', extractionDir + } else { + assert project.name.contains('zip') + checkExtraction { + onlyIf unzipExists + commandLine 'unzip', "${-> buildDist.outputs.files.singleFile}", '-d', archiveExtractionDir } } - check.dependsOn testExtraction + + final Closure toolExists + if (project.name.contains('tar')) { + toolExists = tarExists + } else { + assert project.name.contains('zip') + toolExists = unzipExists + } + + + task checkLicense { + dependsOn buildDist, checkExtraction + onlyIf toolExists + doLast { + final String licenseFilename + if (project.name.contains('oss-')) { + licenseFilename = "APACHE-LICENSE-2.0.txt" + } else { + licenseFilename = "ELASTIC-LICENSE.txt" + } + final List licenseLines = Files.readAllLines(rootDir.toPath().resolve("licenses/" + licenseFilename)) + final Path licensePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/LICENSE.txt") + assertLinesInFile(licensePath, licenseLines) + } + } + check.dependsOn checkLicense + + task checkNotice { + dependsOn buildDist, checkExtraction + onlyIf toolExists + doLast { + final List noticeLines = Arrays.asList("Elasticsearch", "Copyright 2009-2018 Elasticsearch") + final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/NOTICE.txt") + assertLinesInFile(noticePath, noticeLines) + } + } + check.dependsOn checkNotice + } /***************************************************************************** @@ -158,7 +227,7 @@ configure(subprojects.findAll { it.name == 'integ-test-zip' }) { apply plugin: 'elasticsearch.rest-test' integTest { - includePackaged true + includePackaged = true } integTestCluster { @@ -190,12 +259,16 @@ configure(subprojects.findAll { it.name.contains('zip') }) { // note: the group must be correct before applying the nexus plugin, or // it will capture the wrong value... - project.group = "org.elasticsearch.distribution.${project.name}" + String subgroup = project.name == 'integ-test-zip' ? 'integ-test-zip' : 'zip' + project.group = "org.elasticsearch.distribution.${subgroup}" + + // make the pom file name use elasticsearch instead of the project name + archivesBaseName = "elasticsearch${it.name.contains('oss') ? '-oss' : ''}" publishing { publications { nebula { - artifactId 'elasticsearch' + artifactId archivesBaseName artifact buildDist } /* @@ -215,7 +288,7 @@ configure(subprojects.findAll { it.name.contains('zip') }) { * local work, since we publish to maven central externally. */ nebulaRealPom(MavenPublication) { - artifactId 'elasticsearch' + artifactId archivesBaseName pom.packaging = 'pom' pom.withXml { XmlProvider xml -> Node root = xml.asNode() @@ -229,4 +302,3 @@ configure(subprojects.findAll { it.name.contains('zip') }) { } } } - diff --git a/distribution/archives/oss-tar/build.gradle b/distribution/archives/oss-tar/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/oss-tar/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/archives/oss-zip/build.gradle b/distribution/archives/oss-zip/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/oss-zip/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/build.gradle b/distribution/build.gradle index 20758deb918c0..c1ab5b76148b3 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -17,17 +17,13 @@ * under the License. */ - -import org.apache.tools.ant.filters.FixCrLfFilter -import org.apache.tools.ant.taskdefs.condition.Os -import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.ConcatFilesTask import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.NoticeTask -import org.elasticsearch.gradle.precommit.DependencyLicensesTask -import org.elasticsearch.gradle.precommit.UpdateShasTask import org.elasticsearch.gradle.test.RunTask +import java.nio.file.Path + Collection distributions = project('archives').subprojects + project('packages').subprojects /***************************************************************************** @@ -46,42 +42,156 @@ task generateDependenciesReport(type: ConcatFilesTask) { *****************************************************************************/ // integ test zip only uses server, so a different notice file is needed there -task buildCoreNotice(type: NoticeTask) { +task buildServerNotice(type: NoticeTask) { licensesDir new File(project(':server').projectDir, 'licenses') } // other distributions include notices from modules as well, which are added below later -task buildFullNotice(type: NoticeTask) { +task buildDefaultNotice(type: NoticeTask) { + licensesDir new File(project(':server').projectDir, 'licenses') +} + +// other distributions include notices from modules as well, which are added below later +task buildOssNotice(type: NoticeTask) { licensesDir new File(project(':server').projectDir, 'licenses') } /***************************************************************************** * Modules * *****************************************************************************/ +String ossOutputs = 'build/outputs/oss' +String defaultOutputs = 'build/outputs/default' +String transportOutputs = 'build/outputs/transport-only' + +task processOssOutputs(type: Sync) { + into ossOutputs +} + +task processDefaultOutputs(type: Sync) { + into defaultOutputs + from processOssOutputs +} + +// Integ tests work over the rest http layer, so we need a transport included with the integ test zip. +// All transport modules are included so that they may be randomized for testing +task processTransportOutputs(type: Sync) { + into transportOutputs +} + +// these are dummy tasks that can be used to depend on the relevant sub output dir +task buildOssModules { + dependsOn processOssOutputs + outputs.dir "${ossOutputs}/modules" +} +task buildOssBin { + dependsOn processOssOutputs + outputs.dir "${ossOutputs}/bin" +} +task buildOssConfig { + dependsOn processOssOutputs + outputs.dir "${ossOutputs}/config" +} +task buildDefaultModules { + dependsOn processDefaultOutputs + outputs.dir "${defaultOutputs}/modules" +} +task buildDefaultBin { + dependsOn processDefaultOutputs + outputs.dir "${defaultOutputs}/bin" +} +task buildDefaultConfig { + dependsOn processDefaultOutputs + outputs.dir "${defaultOutputs}/config" +} +task buildTransportModules { + dependsOn processTransportOutputs + outputs.dir "${transportOutputs}/modules" +} -task buildModules(type: Sync) { - into 'build/modules' +void copyModule(Sync copyTask, Project module) { + copyTask.configure { + dependsOn { module.bundlePlugin } + from({ zipTree(module.bundlePlugin.outputs.files.singleFile) }) { + includeEmptyDirs false + + // these are handled separately in the log4j config tasks below + exclude '*/config/log4j2.properties' + exclude 'config/log4j2.properties' + + eachFile { details -> + String name = module.plugins.hasPlugin('elasticsearch.esplugin') ? module.esplugin.name : module.es_meta_plugin.name + // Copy all non config/bin files + // Note these might be unde a subdirectory in the case of a meta plugin + if ((details.relativePath.pathString ==~ /([^\/]+\/)?(config|bin)\/.*/) == false) { + details.relativePath = details.relativePath.prepend('modules', name) + } else if ((details.relativePath.pathString ==~ /([^\/]+\/)(config|bin)\/.*/)) { + // this is the meta plugin case, in which we need to remove the intermediate dir + String[] segments = details.relativePath.segments + details.relativePath = new RelativePath(true, segments.takeRight(segments.length - 1)) + } + } + } + } +} + +// log4j config could be contained in modules, so we must join it together using these tasks +task buildOssLog4jConfig { + dependsOn processOssOutputs + ext.contents = [] + ext.log4jFile = file("${ossOutputs}/log4j2.properties") + outputs.file log4jFile +} +task buildDefaultLog4jConfig { + dependsOn processDefaultOutputs + ext.contents = [] + ext.log4jFile = file("${defaultOutputs}/log4j2.properties") + outputs.file log4jFile +} + +Closure writeLog4jProperties = { + String mainLog4jProperties = file('src/config/log4j2.properties').getText('UTF-8') + it.log4jFile.setText(mainLog4jProperties, 'UTF-8') + for (String moduleLog4jProperties : it.contents.reverse()) { + it.log4jFile.append(moduleLog4jProperties, 'UTF-8') + } +} +buildOssLog4jConfig.doLast(writeLog4jProperties) +buildDefaultLog4jConfig.doLast(writeLog4jProperties) + +// copy log4j2.properties from modules that have it +void copyLog4jProperties(Task buildTask, Project module) { + buildTask.doFirst { + FileTree tree = zipTree(module.bundlePlugin.outputs.files.singleFile) + FileTree filtered = tree.matching { + include 'config/log4j2.properties' + include '*/config/log4j2.properties' // could be in a bundled plugin + } + if (filtered.isEmpty() == false) { + buildTask.contents.add('\n\n' + filtered.singleFile.getText('UTF-8')) + } + } } ext.restTestExpansions = [ 'expected.modules.count': 0, ] -// we create the buildModules task above so the distribution subprojects can -// depend on it, but we don't actually configure it until here so we can do a single +// we create the buildOssModules task above but fill it here so we can do a single // loop over modules to also setup cross task dependencies and increment our modules counter project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { Project module -> - buildFullNotice { - def defaultLicensesDir = new File(module.projectDir, 'licenses') - if (defaultLicensesDir.exists()) { - licensesDir defaultLicensesDir - } + File licenses = new File(module.projectDir, 'licenses') + if (licenses.exists()) { + buildDefaultNotice.licensesDir licenses + buildOssNotice.licensesDir licenses } - buildModules { - dependsOn({ project(module.path).bundlePlugin }) - into(module.name) { - from { zipTree(project(module.path).bundlePlugin.outputs.files.singleFile) } - } + + copyModule(processOssOutputs, module) + if (module.name.startsWith('transport-')) { + copyModule(processTransportOutputs, module) } + + copyLog4jProperties(buildOssLog4jConfig, module) + copyLog4jProperties(buildDefaultLog4jConfig, module) + // make sure the module's integration tests run after the integ-test-zip (ie rest tests) module.afterEvaluate({ module.integTest.mustRunAfter(':distribution:archives:integ-test-zip:integTest') @@ -89,20 +199,19 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { restTestExpansions['expected.modules.count'] += 1 } -// Integ tests work over the rest http layer, so we need a transport included with the integ test zip. -// All transport modules are included so that they may be randomized for testing -task buildTransportModules(type: Sync) { - into 'build/transport-modules' -} - -project.rootProject.subprojects.findAll { it.path.startsWith(':modules:transport-') }.each { Project transport -> - buildTransportModules { - dependsOn({ project(transport.path).bundlePlugin }) - into(transport.name) { - from { zipTree(project(transport.path).bundlePlugin.outputs.files.singleFile) } - } +// use licenses from each of the bundled xpack plugins +Project xpack = project(':x-pack:plugin') +xpack.subprojects.findAll { it.name != 'bwc' }.each { Project xpackSubproject -> + File licenses = new File(xpackSubproject.projectDir, 'licenses') + if (licenses.exists()) { + buildDefaultNotice.licensesDir licenses } } +// but copy just the top level meta plugin to the default modules +copyModule(processDefaultOutputs, xpack) +copyLog4jProperties(buildDefaultLog4jConfig, xpack) + +// // make sure we have a clean task since we aren't a java project, but we have tasks that // put stuff in the build dir @@ -130,45 +239,71 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { from { project(':distribution:tools:plugin-cli').jar } } - modulesFiles = copySpec { - into 'modules' - from project(':distribution').buildModules + modulesFiles = { oss -> + copySpec { + eachFile { + if (it.relativePath.segments[-2] == 'bin') { + // bin files, wherever they are within modules (eg platform specific) should be executable + it.mode = 0755 + } + } + if (oss) { + from project(':distribution').buildOssModules + } else { + from project(':distribution').buildDefaultModules + } + } } transportModulesFiles = copySpec { - into "modules" from project(':distribution').buildTransportModules } - configFiles = { distributionType -> + configFiles = { distributionType, oss -> copySpec { - from '../src/config' - MavenFilteringHack.filter(it, expansionsForDistribution(distributionType)) + with copySpec { + // main config files, processed with distribution specific substitutions + from '../src/config' + exclude 'log4j2.properties' // this is handled separately below + MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, oss)) + } + if (oss) { + from project(':distribution').buildOssLog4jConfig + from project(':distribution').buildOssConfig + } else { + from project(':distribution').buildDefaultLog4jConfig + from project(':distribution').buildDefaultConfig + } } } - binFiles = { distributionType -> + binFiles = { distributionType, oss -> copySpec { - // everything except windows files - from '../src/bin' - exclude '*.bat' - exclude '*.exe' - eachFile { it.setMode(0755) } - MavenFilteringHack.filter(it, expansionsForDistribution(distributionType)) + with copySpec { + // main bin files, processed with distribution specific substitutions + // everything except windows files + from '../src/bin' + exclude '*.exe' + exclude '*.bat' + eachFile { it.setMode(0755) } + MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, oss)) + } + with copySpec { + eachFile { it.setMode(0755) } + if (oss) { + from project(':distribution').buildOssBin + } else { + from project(':distribution').buildDefaultBin + } + } } } - commonFiles = copySpec { - from rootProject.projectDir - include 'LICENSE.txt' - include 'README.textile' - } - noticeFile = copySpec { if (project.name == 'integ-test-zip') { - from buildCoreNotice + from buildServerNotice } else { - from buildFullNotice + from buildDefaultNotice } } } @@ -176,7 +311,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } task run(type: RunTask) { - distribution = 'zip' + distribution = System.getProperty('run.distribution', 'zip') } /** @@ -210,13 +345,22 @@ task run(type: RunTask) { * */ subprojects { - ext.expansionsForDistribution = { distributionType -> + ext.expansionsForDistribution = { distributionType, oss -> final String defaultHeapSize = "1g" final String packagingPathData = "path.data: /var/lib/elasticsearch" final String pathLogs = "/var/log/elasticsearch" final String packagingPathLogs = "path.logs: ${pathLogs}" final String packagingLoggc = "${pathLogs}/gc.log" + String licenseText + if (oss) { + licenseText = rootProject.file('licenses/APACHE-LICENSE-2.0.txt').getText('UTF-8') + } else { + licenseText = rootProject.file('licenses/ELASTIC-LICENSE.txt').getText('UTF-8') + } + // license text needs to be indented with a single space + licenseText = ' ' + licenseText.replace('\n', '\n ') + String footer = "# Built for ${project.name}-${project.version} " + "(${distributionType})" Map expansions = [ @@ -281,6 +425,26 @@ subprojects { 'deb': "exit 0\n${footer}", 'def': footer ], + + 'es.distribution.flavor': [ + 'def': oss ? 'oss' : 'default' + ], + + + 'es.distribution.type': [ + 'deb': 'deb', + 'rpm': 'rpm', + 'tar': 'tar', + 'zip': 'zip' + ], + + 'license.name': [ + 'deb': oss ? 'ASL-2.0' : 'Elastic-License' + ], + + 'license.text': [ + 'deb': licenseText, + ], ] Map result = [:] expansions = expansions.each { key, value -> diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 48b84b4036240..42412c6230fa4 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -131,25 +131,28 @@ subprojects { } } - String debDir = 'distribution/packages/deb' - String rpmDir = 'distribution/packages/rpm' - String zipDir = 'distribution/archives/zip' - if (bwcVersion.before('6.3.0')) { - debDir = 'distribution/deb' - rpmDir = 'distribution/rpm' - zipDir = 'distribution/zip' + List artifactFiles = [] + List projectDirs = [] + for (String project : ['zip', 'deb', 'rpm']) { + String baseDir = "distribution" + if (bwcVersion.onOrAfter('6.3.0')) { + baseDir += project == 'zip' ? '/archives' : '/packages' + // add oss variant first + projectDirs.add("${baseDir}/oss-${project}") + artifactFiles.add(file("${checkoutDir}/${baseDir}/oss-${project}/build/distributions/elasticsearch-oss-${bwcVersion}.${project}")) + } + projectDirs.add("${baseDir}/${project}") + artifactFiles.add(file("${checkoutDir}/${baseDir}/${project}/build/distributions/elasticsearch-${bwcVersion}.${project}")) } - File bwcDeb = file("${checkoutDir}/${debDir}/build/distributions/elasticsearch-${bwcVersion}.deb") - File bwcRpm = file("${checkoutDir}/${rpmDir}/build/distributions/elasticsearch-${bwcVersion}.rpm") - File bwcZip = file("${checkoutDir}/${zipDir}/build/distributions/elasticsearch-${bwcVersion}.zip") + task buildBwcVersion(type: Exec) { dependsOn checkoutBwcBranch, writeBuildMetadata workingDir = checkoutDir if (["5.6", "6.0", "6.1"].contains(bwcBranch)) { // we are building branches that are officially built with JDK 8, push JAVA8_HOME to JAVA_HOME for these builds - environment('JAVA_HOME', "${-> getJavaHome(project, 8, "JAVA8_HOME is required to build BWC versions for BWC branch [" + bwcBranch + "]")}") + environment('JAVA_HOME', getJavaHome(it, 8)) } else if ("6.2".equals(bwcBranch)) { - environment('JAVA_HOME', "${-> getJavaHome(project, 9, "JAVA9_HOME is required to build BWC versions for BWC branch [" + bwcBranch + "]")}") + environment('JAVA_HOME', getJavaHome(it, 9)) } else { environment('JAVA_HOME', project.compilerJavaHome) } @@ -159,7 +162,10 @@ subprojects { } else { executable new File(checkoutDir, 'gradlew').toString() } - args ":${debDir.replace('/', ':')}:assemble", ":${rpmDir.replace('/', ':')}:assemble", ":${zipDir.replace('/', ':')}:assemble", "-Dbuild.snapshot=true" + for (String dir : projectDirs) { + args ":${dir.replace('/', ':')}:assemble" + } + args "-Dbuild.snapshot=true" final LogLevel logLevel = gradle.startParameter.logLevel if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { args "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" @@ -172,7 +178,7 @@ subprojects { args "--full-stacktrace" } doLast { - List missing = [bwcDeb, bwcRpm, bwcZip].grep { file -> + List missing = artifactFiles.grep { file -> false == file.exists() } if (false == missing.empty) { @@ -183,8 +189,10 @@ subprojects { } artifacts { - 'default' file: bwcDeb, name: 'elasticsearch', type: 'deb', builtBy: buildBwcVersion - 'default' file: bwcRpm, name: 'elasticsearch', type: 'rpm', builtBy: buildBwcVersion - 'default' file: bwcZip, name: 'elasticsearch', type: 'zip', builtBy: buildBwcVersion + for (File artifactFile : artifactFiles) { + String artifactName = artifactFile.name.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch' + String suffix = artifactFile.toString()[-3..-1] + 'default' file: artifactFile, name: artifactName, type: suffix, builtBy: buildBwcVersion + } } } diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 6c5d149a10a31..33f98386a8987 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -15,9 +15,15 @@ * KIND, either express or implied. See the License for the */ + import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.MavenFilteringHack +import java.nio.file.Files +import java.nio.file.Path +import java.util.regex.Matcher +import java.util.regex.Pattern + /***************************************************************************** * Deb and rpm configuration * ***************************************************************************** @@ -54,19 +60,22 @@ buildscript { } } -void addProcessFilesTask(String type) { - String packagingFiles = "build/packaging/${type}" +void addProcessFilesTask(String type, boolean oss) { + String packagingFiles = "build/packaging/${ oss ? 'oss-' : ''}${type}" - task("process${type.capitalize()}Files", type: Copy) { - from 'src/common' - from "src/${type}" + task("process${oss ? 'Oss' : ''}${type.capitalize()}Files", type: Copy) { into packagingFiles - into('config') { - from '../src/config' + with copySpec { + from 'src/common' + from "src/${type}" + MavenFilteringHack.filter(it, expansionsForDistribution(type, oss)) } - MavenFilteringHack.filter(it, expansionsForDistribution(type)) + into('config') { + with configFiles(type, oss) + } + MavenFilteringHack.filter(it, expansionsForDistribution(type, oss)) doLast { // create empty dirs, we set the permissions when configuring the packages @@ -77,19 +86,24 @@ void addProcessFilesTask(String type) { } } } -addProcessFilesTask('deb') -addProcessFilesTask('rpm') +addProcessFilesTask('deb', true) +addProcessFilesTask('deb', false) +addProcessFilesTask('rpm', true) +addProcessFilesTask('rpm', false) // Common configuration that is package dependent. This can't go in ospackage // since we have different templated files that need to be consumed, but the structure // is the same -Closure commonPackageConfig(String type) { +Closure commonPackageConfig(String type, boolean oss) { return { + dependsOn "process${oss ? 'Oss' : ''}${type.capitalize()}Files" + packageName "elasticsearch${oss ? '-oss' : ''}" // Follow elasticsearch's file naming convention - archiveName "elasticsearch-${project.version}.${type}" + archiveName "${packageName}-${project.version}.${type}" - destinationDir = file("${type}/build/distributions") - String packagingFiles = "build/packaging/${type}" + String prefix = "${oss ? 'oss-' : ''}${type}" + destinationDir = file("${prefix}/build/distributions") + String packagingFiles = "build/packaging/${prefix}" String scripts = "${packagingFiles}/scripts" preInstall file("${scripts}/preinst") @@ -104,13 +118,40 @@ Closure commonPackageConfig(String type) { // specify it again explicitly for copying common files into('/usr/share/elasticsearch') { into('bin') { - with binFiles(type) + with binFiles(type, oss) + } + from(rootProject.projectDir) { + include 'README.textile' + } + into('modules') { + with copySpec { + with modulesFiles(oss) + // we need to specify every intermediate directory, but modules could have sub directories + // and there might not be any files as direct children of intermediates (eg platform) + // so we must iterate through the parents, but duplicate calls with the same path + // are ok (they don't show up in the built packages) + eachFile { FileCopyDetails fcp -> + String[] segments = fcp.relativePath.segments + for (int i = segments.length - 2; i > 0 && segments[i] != 'modules'; --i) { + directory('/' + segments[0..i].join('/'), 0755) + } + } + } + } + } + + // license files + if (type == 'deb') { + into("/usr/share/doc/${packageName}") { + from "${packagingFiles}/copyright" + fileMode 0644 } - with copySpec { - with commonFiles - if (type == 'deb') { - // Deb gets a copyright file instead. - exclude 'LICENSE.txt' + } else { + assert type == 'rpm' + into('/usr/share/elasticsearch') { + from(rootProject.file('licenses')) { + include oss ? 'APACHE-LICENSE-2.0.txt' : 'ELASTIC-LICENSE.txt' + rename { 'LICENSE.txt' } } } } @@ -120,7 +161,7 @@ Closure commonPackageConfig(String type) { configurationFile '/etc/elasticsearch/jvm.options' configurationFile '/etc/elasticsearch/log4j2.properties' into('/etc/elasticsearch') { - //dirMode 0750 + dirMode 0750 fileMode 0660 permissionGroup 'elasticsearch' includeEmptyDirs true @@ -128,7 +169,7 @@ Closure commonPackageConfig(String type) { fileType CONFIG | NOREPLACE from "${packagingFiles}/config" } - String envFile = expansionsForDistribution(type)['path.env'] + String envFile = expansionsForDistribution(type, false)['path.env'] configurationFile envFile into(new File(envFile).getParent()) { fileType CONFIG | NOREPLACE @@ -176,6 +217,9 @@ Closure commonPackageConfig(String type) { copyEmptyDir('/var/log/elasticsearch', 'elasticsearch', 'elasticsearch', 0750) copyEmptyDir('/var/lib/elasticsearch', 'elasticsearch', 'elasticsearch', 0750) copyEmptyDir('/usr/share/elasticsearch/plugins', 'root', 'root', 0755) + + // the oss package conflicts with the default distribution and vice versa + conflicts('elasticsearch' + (oss ? '' : '-oss')) } } @@ -183,7 +227,6 @@ apply plugin: 'nebula.ospackage-base' // this is package indepdendent configuration ospackage { - packageName 'elasticsearch' maintainer 'Elasticsearch Team ' summary ''' Elasticsearch is a distributed RESTful search engine built for the cloud. @@ -212,96 +255,88 @@ ospackage { into '/usr/share/elasticsearch' with libFiles - with modulesFiles with noticeFile } -task buildDeb(type: Deb) { - dependsOn processDebFiles - configure(commonPackageConfig('deb')) +Closure commonDebConfig(boolean oss) { + return { + configure(commonPackageConfig('deb', oss)) + + // jdeb does not provide a way to set the License control attribute, and ospackage + // silently ignores setting it. Instead, we set the license as "custom field" + if (oss) { + customFields['License'] = 'ASL-2.0' + } else { + customFields['License'] = 'Elastic-License' + } - version = project.version - packageGroup 'web' - requires 'bash' - requires 'libc6' - requires 'adduser' + version = project.version.replace('-', '~') + packageGroup 'web' + requires 'bash' + requires 'libc6' + requires 'adduser' - into('/usr/share/lintian/overrides') { - from('src/deb/lintian/elasticsearch') - } - into('/usr/share/doc/elasticsearch') { - from 'src/deb/copyright' - fileMode 0644 + into('/usr/share/lintian/overrides') { + from('src/deb/lintian/elasticsearch') + } } } -// task that sanity checks if the Deb archive can be extracted -task checkDeb(type: LoggedExec) { - dependsOn buildDeb - onlyIf { new File('/usr/bin/dpkg-deb').exists() || new File('/usr/local/bin/dpkg-deb').exists() } - final File debExtracted = new File("${buildDir}", 'deb-extracted') - commandLine 'dpkg-deb', '-x', "deb/build/distributions/elasticsearch-${project.version}.deb", debExtracted - doFirst { - debExtracted.deleteDir() - } +task buildDeb(type: Deb) { + configure(commonDebConfig(false)) } -task buildRpm(type: Rpm) { - dependsOn processRpmFiles - configure(commonPackageConfig('rpm')) - - packageGroup 'Application/Internet' - requires '/bin/bash' - - prefix '/usr' - packager 'Elasticsearch' - version = project.version.replace('-', '_') - release = '1' - arch 'NOARCH' - os 'LINUX' - license '2009' - distribution 'Elasticsearch' - vendor 'Elasticsearch' - // TODO ospackage doesn't support icon but we used to have one - - // without this the rpm will have parent dirs of any files we copy in, eg /etc/elasticsearch - addParentDirs false - - // Declare the folders so that the RPM package manager removes - // them when upgrading or removing the package - directory('/usr/share/elasticsearch/bin', 0755) - directory('/usr/share/elasticsearch/lib', 0755) - directory('/usr/share/elasticsearch/modules', 0755) - modulesFiles.eachFile { FileCopyDetails fcp -> - if (fcp.name == "plugin-descriptor.properties") { - directory('/usr/share/elasticsearch/modules/' + fcp.file.parentFile.name, 0755) +task buildOssDeb(type: Deb) { + configure(commonDebConfig(true)) +} + +Closure commonRpmConfig(boolean oss) { + return { + configure(commonPackageConfig('rpm', oss)) + + if (oss) { + license 'ASL 2.0' + } else { + license 'Elastic License' } + + packageGroup 'Application/Internet' + requires '/bin/bash' + + prefix '/usr' + packager 'Elasticsearch' + version = project.version.replace('-', '_') + release = '1' + arch 'NOARCH' + os 'LINUX' + distribution 'Elasticsearch' + vendor 'Elasticsearch' + // TODO ospackage doesn't support icon but we used to have one + + // without this the rpm will have parent dirs of any files we copy in, eg /etc/elasticsearch + addParentDirs false + + // Declare the folders so that the RPM package manager removes + // them when upgrading or removing the package + directory('/usr/share/elasticsearch/bin', 0755) + directory('/usr/share/elasticsearch/lib', 0755) + directory('/usr/share/elasticsearch/modules', 0755) } } -// task that sanity checks if the RPM archive can be extracted -task checkRpm(type: LoggedExec) { - dependsOn buildRpm - onlyIf { new File('/bin/rpm').exists() || new File('/usr/bin/rpm').exists() || new File('/usr/local/bin/rpm').exists() } - final File rpmDatabase = new File("${buildDir}", 'rpm-database') - final File rpmExtracted = new File("${buildDir}", 'rpm-extracted') - commandLine 'rpm', - '--badreloc', - '--nodeps', - '--noscripts', - '--notriggers', - '--dbpath', - rpmDatabase, - '--relocate', - "/=${rpmExtracted}", - '-i', - "rpm/build/distributions/elasticsearch-${project.version}.rpm" - doFirst { - rpmDatabase.deleteDir() - rpmExtracted.deleteDir() - } +task buildRpm(type: Rpm) { + configure(commonRpmConfig(false)) +} + +task buildOssRpm(type: Rpm) { + configure(commonRpmConfig(true)) } +Closure dpkgExists = { it -> new File('/bin/dpkg-deb').exists() || new File('/usr/bin/dpkg-deb').exists() || new File('/usr/local/bin/dpkg-deb').exists() } +Closure rpmExists = { it -> new File('/bin/rpm').exists() || new File('/usr/bin/rpm').exists() || new File('/usr/local/bin/rpm').exists() } + +Closure debFilter = { f -> f.name.endsWith('.deb') } + // This configures the default artifact for the distribution specific // subprojects. We have subprojects because Gradle project substitutions // can only bind to the default configuration of a project @@ -313,7 +348,164 @@ subprojects { artifacts { 'default' buildDist } -} -check.dependsOn checkDeb, checkRpm + // sanity checks if packages can be extracted + final File extractionDir = new File(buildDir, 'extracted') + final File packageExtractionDir + if (project.name.contains('deb')) { + packageExtractionDir = new File(extractionDir, 'deb-extracted') + } else { + assert project.name.contains('rpm') + packageExtractionDir = new File(extractionDir, 'rpm-extracted') + } + task checkExtraction(type: LoggedExec) { + dependsOn buildDist + doFirst { + project.delete(extractionDir) + extractionDir.mkdirs() + } + } + check.dependsOn checkExtraction + if (project.name.contains('deb')) { + checkExtraction { + onlyIf dpkgExists + commandLine 'dpkg-deb', '-x', "${-> buildDist.outputs.files.filter(debFilter).singleFile}", packageExtractionDir + } + } else { + assert project.name.contains('rpm') + checkExtraction { + onlyIf rpmExists + final File rpmDatabase = new File(extractionDir, 'rpm-database') + commandLine 'rpm', + '--badreloc', + '--nodeps', + '--noscripts', + '--notriggers', + '--dbpath', + rpmDatabase, + '--relocate', + "/=${packageExtractionDir}", + '-i', + "${-> buildDist.outputs.files.singleFile}" + } + } + + task checkLicense { + dependsOn buildDist, checkExtraction + } + check.dependsOn checkLicense + if (project.name.contains('deb')) { + checkLicense { + onlyIf dpkgExists + doLast { + final Path copyrightPath + final String expectedLicense + final String licenseFilename + if (project.name.contains('oss-')) { + copyrightPath = packageExtractionDir.toPath().resolve("usr/share/doc/elasticsearch-oss/copyright") + expectedLicense = "ASL-2.0" + licenseFilename = "APACHE-LICENSE-2.0.txt" + } else { + copyrightPath = packageExtractionDir.toPath().resolve("usr/share/doc/elasticsearch/copyright") + expectedLicense = "Elastic-License" + licenseFilename = "ELASTIC-LICENSE.txt" + } + final List header = Arrays.asList("Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/", + "Copyright: Elasticsearch B.V. ", + "License: " + expectedLicense) + final List licenseLines = Files.readAllLines(rootDir.toPath().resolve("licenses/" + licenseFilename)) + final List expectedLines = header + licenseLines.collect { " " + it } + assertLinesInFile(copyrightPath, expectedLines) + } + } + } else { + assert project.name.contains('rpm') + checkLicense { + onlyIf rpmExists + doLast { + final String licenseFilename + if (project.name.contains('oss-')) { + licenseFilename = "APACHE-LICENSE-2.0.txt" + } else { + licenseFilename = "ELASTIC-LICENSE.txt" + } + final List licenseLines = Files.readAllLines(rootDir.toPath().resolve("licenses/" + licenseFilename)) + final Path licensePath = packageExtractionDir.toPath().resolve("usr/share/elasticsearch/LICENSE.txt") + assertLinesInFile(licensePath, licenseLines) + } + } + } + + task checkNotice { + dependsOn buildDist, checkExtraction + onlyIf { (project.name.contains('deb') && dpkgExists.call(it)) || (project.name.contains('rpm') && rpmExists.call(it)) } + doLast { + final List noticeLines = Arrays.asList("Elasticsearch", "Copyright 2009-2018 Elasticsearch") + final Path noticePath = packageExtractionDir.toPath().resolve("usr/share/elasticsearch/NOTICE.txt") + assertLinesInFile(noticePath, noticeLines) + } + } + check.dependsOn checkNotice + + task checkLicenseMetadata(type: LoggedExec) { + dependsOn buildDist, checkExtraction + } + check.dependsOn checkLicenseMetadata + if (project.name.contains('deb')) { + checkLicenseMetadata { LoggedExec exec -> + onlyIf dpkgExists + final ByteArrayOutputStream output = new ByteArrayOutputStream() + exec.commandLine 'dpkg-deb', '--info', "${ -> buildDist.outputs.files.filter(debFilter).singleFile}" + exec.standardOutput = output + doLast { + final String expectedLicense + if (project.name.contains('oss-')) { + expectedLicense = "ASL-2.0" + } else { + expectedLicense = "Elastic-License" + } + final Pattern pattern = Pattern.compile("\\s*License: (.+)") + final String info = output.toString('UTF-8') + final String[] actualLines = info.split("\n") + int count = 0 + for (final String actualLine : actualLines) { + final Matcher matcher = pattern.matcher(actualLine) + if (matcher.matches()) { + count++ + final String actualLicense = matcher.group(1) + if (expectedLicense != actualLicense) { + throw new GradleException("expected license [${expectedLicense} for package info but found [${actualLicense}]") + } + } + } + if (count == 0) { + throw new GradleException("expected license [${expectedLicense}] for package info but found none in:\n${info}") + } + if (count > 1) { + throw new GradleException("expected a single license for package info but found [${count}] in:\n${info}") + } + } + } + } else { + assert project.name.contains('rpm') + checkLicenseMetadata { LoggedExec exec -> + onlyIf rpmExists + final ByteArrayOutputStream output = new ByteArrayOutputStream() + exec.commandLine 'rpm', '-qp', '--queryformat', '%{License}', "${-> buildDist.outputs.files.singleFile}" + exec.standardOutput = output + doLast { + final String license = output.toString('UTF-8') + final String expectedLicense + if (project.name.contains('oss-')) { + expectedLicense = "ASL 2.0" + } else { + expectedLicense = "Elastic License" + } + if (license != expectedLicense) { + throw new GradleException("expected license [${expectedLicense}] for [${-> buildDist.outputs.files.singleFile}] but was [${license}]") + } + } + } + } +} diff --git a/distribution/packages/oss-deb/build.gradle b/distribution/packages/oss-deb/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/packages/oss-deb/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/packages/oss-rpm/build.gradle b/distribution/packages/oss-rpm/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/packages/oss-rpm/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/packages/src/deb/copyright b/distribution/packages/src/deb/copyright index 98a923677c907..44c7582666f21 100644 --- a/distribution/packages/src/deb/copyright +++ b/distribution/packages/src/deb/copyright @@ -1,17 +1,4 @@ -Copyright 2013-2018 Elasticsearch - -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache version 2.0 license - can be found in "/usr/share/common-licenses/Apache-2.0". +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Copyright: Elasticsearch B.V. +License: ${license.name} +${license.text} diff --git a/distribution/src/bin/elasticsearch b/distribution/src/bin/elasticsearch index 11efddf6e2678..84e14eea3f6f8 100755 --- a/distribution/src/bin/elasticsearch +++ b/distribution/src/bin/elasticsearch @@ -28,6 +28,8 @@ if ! echo $* | grep -E '(^-d |-d$| -d |--daemonize$|--daemonize )' > /dev/null; $ES_JAVA_OPTS \ -Des.path.home="$ES_HOME" \ -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ -cp "$ES_CLASSPATH" \ org.elasticsearch.bootstrap.Elasticsearch \ "$@" @@ -37,6 +39,8 @@ else $ES_JAVA_OPTS \ -Des.path.home="$ES_HOME" \ -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ -cp "$ES_CLASSPATH" \ org.elasticsearch.bootstrap.Elasticsearch \ "$@" \ diff --git a/distribution/src/bin/elasticsearch-env b/distribution/src/bin/elasticsearch-env index cc86a10b184ae..9d58d88e7aaf1 100644 --- a/distribution/src/bin/elasticsearch-env +++ b/distribution/src/bin/elasticsearch-env @@ -77,6 +77,9 @@ fi # now make ES_PATH_CONF absolute ES_PATH_CONF=`cd "$ES_PATH_CONF"; pwd` +ES_DISTRIBUTION_FLAVOR=${es.distribution.flavor} +ES_DISTRIBUTION_TYPE=${es.distribution.type} + if [ -z "$ES_TMPDIR" ]; then set +e mktemp --version 2>&1 | grep coreutils > /dev/null diff --git a/distribution/src/bin/elasticsearch-env.bat b/distribution/src/bin/elasticsearch-env.bat index 2499c0d99a4da..b0d015924b440 100644 --- a/distribution/src/bin/elasticsearch-env.bat +++ b/distribution/src/bin/elasticsearch-env.bat @@ -53,6 +53,9 @@ if not defined ES_PATH_CONF ( rem now make ES_PATH_CONF absolute for %%I in ("%ES_PATH_CONF%..") do set ES_PATH_CONF=%%~dpfI +set ES_DISTRIBUTION_FLAVOR=${es.distribution.flavor} +set ES_DISTRIBUTION_TYPE=${es.distribution.type} + if not defined ES_TMPDIR ( set ES_TMPDIR=!TMP!\elasticsearch ) diff --git a/distribution/src/bin/elasticsearch-keystore b/distribution/src/bin/elasticsearch-keystore index 8797e7c07a613..aee62dfde50d4 100755 --- a/distribution/src/bin/elasticsearch-keystore +++ b/distribution/src/bin/elasticsearch-keystore @@ -7,6 +7,8 @@ exec \ $ES_JAVA_OPTS \ -Des.path.home="$ES_HOME" \ -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ -cp "$ES_CLASSPATH" \ org.elasticsearch.common.settings.KeyStoreCli \ "$@" diff --git a/distribution/src/bin/elasticsearch-keystore.bat b/distribution/src/bin/elasticsearch-keystore.bat index 7e131a80a1b6c..1d6616983d8cc 100644 --- a/distribution/src/bin/elasticsearch-keystore.bat +++ b/distribution/src/bin/elasticsearch-keystore.bat @@ -9,6 +9,8 @@ call "%~dp0elasticsearch-env.bat" || exit /b 1 %ES_JAVA_OPTS% ^ -Des.path.home="%ES_HOME%" ^ -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ -cp "%ES_CLASSPATH%" ^ org.elasticsearch.common.settings.KeyStoreCli ^ %* diff --git a/distribution/src/bin/elasticsearch-plugin b/distribution/src/bin/elasticsearch-plugin index a2e228d490af5..500fd710c1aea 100755 --- a/distribution/src/bin/elasticsearch-plugin +++ b/distribution/src/bin/elasticsearch-plugin @@ -7,6 +7,8 @@ exec \ $ES_JAVA_OPTS \ -Des.path.home="$ES_HOME" \ -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ -cp "$ES_CLASSPATH" \ org.elasticsearch.plugins.PluginCli \ "$@" diff --git a/distribution/src/bin/elasticsearch-plugin.bat b/distribution/src/bin/elasticsearch-plugin.bat index 1d059aaaceee9..b3b94a31863f1 100644 --- a/distribution/src/bin/elasticsearch-plugin.bat +++ b/distribution/src/bin/elasticsearch-plugin.bat @@ -9,6 +9,8 @@ call "%~dp0elasticsearch-env.bat" || exit /b 1 %ES_JAVA_OPTS% ^ -Des.path.home="%ES_HOME%" ^ -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ -cp "%ES_CLASSPATH%" ^ org.elasticsearch.plugins.PluginCli ^ %* diff --git a/distribution/src/bin/elasticsearch-service.bat b/distribution/src/bin/elasticsearch-service.bat index e4f3e92b084c4..a1d0f04560e70 100644 --- a/distribution/src/bin/elasticsearch-service.bat +++ b/distribution/src/bin/elasticsearch-service.bat @@ -159,7 +159,7 @@ if "%JVM_SS%" == "" ( goto:eof ) -set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.path.conf="%ES_PATH_CONF%" +set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.path.conf="%ES_PATH_CONF%";-Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%";-Des.distribution.type="%ES_DISTRIBUTION_TYPE%" if "%ES_START_TYPE%" == "" set ES_START_TYPE=manual if "%ES_STOP_TIMEOUT%" == "" set ES_STOP_TIMEOUT=0 diff --git a/distribution/src/bin/elasticsearch-translog b/distribution/src/bin/elasticsearch-translog index dcb52c29ea381..e176231c6f44d 100755 --- a/distribution/src/bin/elasticsearch-translog +++ b/distribution/src/bin/elasticsearch-translog @@ -7,6 +7,8 @@ exec \ $ES_JAVA_OPTS \ -Des.path.home="$ES_HOME" \ -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ -cp "$ES_CLASSPATH" \ org.elasticsearch.index.translog.TranslogToolCli \ "$@" diff --git a/distribution/src/bin/elasticsearch-translog.bat b/distribution/src/bin/elasticsearch-translog.bat index 4f15e9b379250..492c1f0831263 100644 --- a/distribution/src/bin/elasticsearch-translog.bat +++ b/distribution/src/bin/elasticsearch-translog.bat @@ -9,6 +9,8 @@ call "%~dp0elasticsearch-env.bat" || exit /b 1 %ES_JAVA_OPTS% ^ -Des.path.home="%ES_HOME%" ^ -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ -cp "%ES_CLASSPATH%" ^ org.elasticsearch.index.translog.TranslogToolCli ^ %* diff --git a/distribution/src/bin/elasticsearch.bat b/distribution/src/bin/elasticsearch.bat index e0f52c54c627f..6e268c9b13321 100644 --- a/distribution/src/bin/elasticsearch.bat +++ b/distribution/src/bin/elasticsearch.bat @@ -51,7 +51,7 @@ if "%MAYBE_JVM_OPTIONS_PARSER_FAILED%" == "jvm_options_parser_failed" ( ) cd /d "%ES_HOME%" -%JAVA% %ES_JAVA_OPTS% -Delasticsearch -Des.path.home="%ES_HOME%" -Des.path.conf="%ES_PATH_CONF%" -cp "%ES_CLASSPATH%" "org.elasticsearch.bootstrap.Elasticsearch" !newparams! +%JAVA% %ES_JAVA_OPTS% -Delasticsearch -Des.path.home="%ES_HOME%" -Des.path.conf="%ES_PATH_CONF%" -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" -cp "%ES_CLASSPATH%" "org.elasticsearch.bootstrap.Elasticsearch" !newparams! endlocal endlocal diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index e1733e478b8c2..71c57f7f10135 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -21,10 +21,9 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; - import org.apache.lucene.search.spell.LevensteinDistance; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.bootstrap.JarHell; import org.elasticsearch.cli.EnvironmentAwareCommand; @@ -35,7 +34,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.hash.MessageDigests; -import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import java.io.BufferedReader; @@ -152,7 +151,6 @@ class InstallPluginCommand extends EnvironmentAwareCommand { plugins.add(line.trim()); line = reader.readLine(); } - plugins.add("x-pack"); OFFICIAL_PLUGINS = Collections.unmodifiableSet(plugins); } catch (IOException e) { throw new RuntimeException(e); @@ -218,11 +216,32 @@ void execute(Terminal terminal, String pluginId, boolean isBatch, Environment en throw new UserException(ExitCodes.USAGE, "plugin id is required"); } + if ("x-pack".equals(pluginId)) { + handleInstallXPack(buildFlavor()); + } + Path pluginZip = download(terminal, pluginId, env.tmpFile()); Path extractedZip = unzip(pluginZip, env.pluginsFile()); install(terminal, isBatch, extractedZip, env); } + Build.Flavor buildFlavor() { + return Build.CURRENT.flavor(); + } + + private static void handleInstallXPack(final Build.Flavor flavor) throws UserException { + switch (flavor) { + case DEFAULT: + throw new UserException(ExitCodes.CONFIG, "this distribution of Elasticsearch contains X-Pack by default"); + case OSS: + throw new UserException( + ExitCodes.CONFIG, + "X-Pack is not available with the oss distribution; to use X-Pack features use the default distribution"); + case UNKNOWN: + throw new IllegalStateException("your distribution is broken"); + } + } + /** Downloads the plugin and returns the file it was downloaded to. */ private Path download(Terminal terminal, String pluginId, Path tmpDir) throws Exception { if (OFFICIAL_PLUGINS.contains(pluginId)) { @@ -571,6 +590,9 @@ private void verifyPluginName(Path pluginPath, String pluginName, Path candidate /** Load information about the plugin, and verify it can be installed with no errors. */ private PluginInfo loadPluginInfo(Terminal terminal, Path pluginRoot, boolean isBatch, Environment env) throws Exception { final PluginInfo info = PluginInfo.readFromProperties(pluginRoot); + if (info.hasNativeController()) { + throw new IllegalStateException("plugins can not have native controllers"); + } PluginsService.verifyCompatibility(info); // checking for existing version of the plugin @@ -659,19 +681,16 @@ private void installMetaPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, Set permissions = new HashSet<>(); final List pluginInfos = new ArrayList<>(); - boolean hasNativeController = false; for (Path plugin : pluginPaths) { final PluginInfo info = loadPluginInfo(terminal, plugin, isBatch, env); pluginInfos.add(info); - hasNativeController |= info.hasNativeController(); - Path policy = plugin.resolve(PluginInfo.ES_PLUGIN_POLICY); if (Files.exists(policy)) { permissions.addAll(PluginSecurity.parsePermissions(policy, env.tmpFile())); } } - PluginSecurity.confirmPolicyExceptions(terminal, permissions, hasNativeController, isBatch); + PluginSecurity.confirmPolicyExceptions(terminal, permissions, isBatch); // move support files and rename as needed to prepare the exploded plugin for its final location for (int i = 0; i < pluginPaths.size(); ++i) { @@ -704,7 +723,7 @@ private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, } else { permissions = Collections.emptySet(); } - PluginSecurity.confirmPolicyExceptions(terminal, permissions, info.hasNativeController(), isBatch); + PluginSecurity.confirmPolicyExceptions(terminal, permissions, isBatch); final Path destination = env.pluginsFile().resolve(info.getName()); deleteOnFailure.add(destination); diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index 96e009b3462f1..5931e66cb9a5d 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -22,8 +22,8 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; - import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.MockTerminal; @@ -35,7 +35,6 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtilsForTesting; -import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; @@ -479,6 +478,15 @@ public void testBuiltinModule() throws Exception { assertInstallCleaned(env.v2()); } + public void testBuiltinXpackModule() throws Exception { + Tuple env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + String pluginZip = createPluginUrl("x-pack", pluginDir); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + assertTrue(e.getMessage(), e.getMessage().contains("is a system module")); + assertInstallCleaned(env.v2()); + } + public void testJarHell() throws Exception { // jar hell test needs a real filesystem assumeTrue("real filesystem", isReal); @@ -881,23 +889,33 @@ protected boolean addShutdownHook() { } } - public void testOfficialPluginsIncludesXpack() throws Exception { - MockTerminal terminal = new MockTerminal(); - new InstallPluginCommand() { + public void testInstallXPack() throws IOException { + runInstallXPackTest(Build.Flavor.DEFAULT, UserException.class, "this distribution of Elasticsearch contains X-Pack by default"); + runInstallXPackTest( + Build.Flavor.OSS, + UserException.class, + "X-Pack is not available with the oss distribution; to use X-Pack features use the default distribution"); + runInstallXPackTest(Build.Flavor.UNKNOWN, IllegalStateException.class, "your distribution is broken"); + } + + private void runInstallXPackTest( + final Build.Flavor flavor, final Class clazz, final String expectedMessage) throws IOException { + final InstallPluginCommand flavorCommand = new InstallPluginCommand() { @Override - protected boolean addShutdownHook() { - return false; + Build.Flavor buildFlavor() { + return flavor; } - }.main(new String[] { "--help" }, terminal); - assertTrue(terminal.getOutput(), terminal.getOutput().contains("x-pack")); + }; + + final Environment environment = createEnv(fs, temp).v2(); + final T exception = expectThrows(clazz, () -> flavorCommand.execute(terminal, "x-pack", false, environment)); + assertThat(exception, hasToString(containsString(expectedMessage))); } public void testInstallMisspelledOfficialPlugins() throws Exception { Tuple env = createEnv(fs, temp); - UserException e = expectThrows(UserException.class, () -> installPlugin("xpack", env.v1())); - assertThat(e.getMessage(), containsString("Unknown plugin xpack, did you mean [x-pack]?")); - e = expectThrows(UserException.class, () -> installPlugin("analysis-smartnc", env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin("analysis-smartnc", env.v1())); assertThat(e.getMessage(), containsString("Unknown plugin analysis-smartnc, did you mean [analysis-smartcn]?")); e = expectThrows(UserException.class, () -> installPlugin("repository", env.v1())); @@ -1224,42 +1242,16 @@ public void testMetaPluginPolicyConfirmation() throws Exception { assertMetaPlugin("meta-plugin", "fake2", metaDir, env.v2()); } - public void testNativeControllerConfirmation() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - String pluginZip = createPluginUrl("fake", pluginDir, "has.native.controller", "true"); - - assertPolicyConfirmation(env, pluginZip, "plugin forks a native controller"); - assertPlugin("fake", pluginDir, env.v2()); - } - - public void testMetaPluginNativeControllerConfirmation() throws Exception { - Tuple env = createEnv(fs, temp); - Path metaDir = createPluginDir(temp); - Path fake1Dir = metaDir.resolve("fake1"); - Files.createDirectory(fake1Dir); - writePlugin("fake1", fake1Dir, "has.native.controller", "true"); - Path fake2Dir = metaDir.resolve("fake2"); - Files.createDirectory(fake2Dir); - writePlugin("fake2", fake2Dir); - String pluginZip = createMetaPluginUrl("meta-plugin", metaDir); - - assertPolicyConfirmation(env, pluginZip, "plugin forks a native controller"); - assertMetaPlugin("meta-plugin", "fake1", metaDir, env.v2()); - assertMetaPlugin("meta-plugin", "fake2", metaDir, env.v2()); - } - - public void testNativeControllerAndPolicyConfirmation() throws Exception { + public void testPluginWithNativeController() throws Exception { Tuple env = createEnv(fs, temp); Path pluginDir = createPluginDir(temp); - writePluginSecurityPolicy(pluginDir, "setAccessible", "setFactory"); String pluginZip = createPluginUrl("fake", pluginDir, "has.native.controller", "true"); - assertPolicyConfirmation(env, pluginZip, "plugin requires additional permissions", "plugin forks a native controller"); - assertPlugin("fake", pluginDir, env.v2()); + final IllegalStateException e = expectThrows(IllegalStateException.class, () -> installPlugin(pluginZip, env.v1())); + assertThat(e, hasToString(containsString("plugins can not have native controllers"))); } - public void testMetaPluginNativeControllerAndPolicyConfirmation() throws Exception { + public void testMetaPluginWithNativeController() throws Exception { Tuple env = createEnv(fs, temp); Path metaDir = createPluginDir(temp); Path fake1Dir = metaDir.resolve("fake1"); @@ -1271,8 +1263,8 @@ public void testMetaPluginNativeControllerAndPolicyConfirmation() throws Excepti writePlugin("fake2", fake2Dir, "has.native.controller", "true"); String pluginZip = createMetaPluginUrl("meta-plugin", metaDir); - assertPolicyConfirmation(env, pluginZip, "plugin requires additional permissions", "plugin forks a native controller"); - assertMetaPlugin("meta-plugin", "fake1", metaDir, env.v2()); - assertMetaPlugin("meta-plugin", "fake2", metaDir, env.v2()); + final IllegalStateException e = expectThrows(IllegalStateException.class, () -> installPlugin(pluginZip, env.v1())); + assertThat(e, hasToString(containsString("plugins can not have native controllers"))); } + } diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc new file mode 100644 index 0000000000000..98be1db1b6d52 --- /dev/null +++ b/docs/CHANGELOG.asciidoc @@ -0,0 +1,46 @@ +// Use these for links to issue and pulls. Note issues and pulls redirect one to +// each other on Github, so don't worry too much on using the right prefix. +// :issue: https://github.com/elastic/elasticsearch/issues/ +// :pull: https://github.com/elastic/elasticsearch/pull/ + += Elasticsearch Release Notes + +== Elasticsearch 7.0.0 + +=== Breaking Changes + +<> ({pull}29609[#29609]) + +<> ({pull}29635[#29635]) + +=== Breaking Java Changes + +=== Deprecations + +=== New Features + +=== Enhancements + +=== Bug Fixes + +Fail snapshot operations early when creating or deleting a snapshot on a repository that has been +written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140]) + +=== Regressions + +=== Known Issues + +== Elasticsearch version 6.4.0 + +=== New Features + +=== Enhancements + +=== Bug Fixes + +=== Regressions + +=== Known Issues + + diff --git a/docs/build.gradle b/docs/build.gradle index 97094c6e79cbe..5057bead62d9b 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -20,6 +20,7 @@ apply plugin: 'elasticsearch.docs-test' integTestCluster { + distribution = 'oss-zip' /* Enable regexes in painless so our tests don't complain about example * snippets that use them. */ setting 'script.painless.regex.enabled', 'true' diff --git a/docs/java-rest/high-level/cluster/put_settings.asciidoc b/docs/java-rest/high-level/cluster/put_settings.asciidoc index 74b479faa0501..dc9b1679d4717 100644 --- a/docs/java-rest/high-level/cluster/put_settings.asciidoc +++ b/docs/java-rest/high-level/cluster/put_settings.asciidoc @@ -54,13 +54,6 @@ include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-setti ==== Optional Arguments The following arguments can optionally be provided: -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-flat-settings] --------------------------------------------------- -<1> Whether the updated settings returned in the `ClusterUpdateSettings` should -be in a flat format - ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-timeout] diff --git a/docs/java-rest/high-level/indices/indices_exists.asciidoc b/docs/java-rest/high-level/indices/indices_exists.asciidoc index 4a227db49ed8c..ee744e97ce8bd 100644 --- a/docs/java-rest/high-level/indices/indices_exists.asciidoc +++ b/docs/java-rest/high-level/indices/indices_exists.asciidoc @@ -23,8 +23,7 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[indices-exists-req <1> Whether to return local information or retrieve the state from master node <2> Return result in a format suitable for humans <3> Whether to return all default setting for each of the indices -<4> Return settings in flat format -<5> Controls how unavailable indices are resolved and how wildcard expressions are expanded +<4> Controls how unavailable indices are resolved and how wildcard expressions are expanded [[java-rest-high-indices-sync]] ==== Synchronous Execution diff --git a/docs/java-rest/high-level/indices/put_settings.asciidoc b/docs/java-rest/high-level/indices/put_settings.asciidoc index 49312da82a400..c305eeaa0965b 100644 --- a/docs/java-rest/high-level/indices/put_settings.asciidoc +++ b/docs/java-rest/high-level/indices/put_settings.asciidoc @@ -55,13 +55,6 @@ include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-setti ==== Optional Arguments The following arguments can optionally be provided: -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-flat-settings] --------------------------------------------------- -<1> Whether the updated settings returned in the `UpdateSettings` should -be in a flat format - ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-preserveExisting] diff --git a/docs/java-rest/high-level/search/field-caps.asciidoc b/docs/java-rest/high-level/search/field-caps.asciidoc new file mode 100644 index 0000000000000..fef30f629ca61 --- /dev/null +++ b/docs/java-rest/high-level/search/field-caps.asciidoc @@ -0,0 +1,82 @@ +[[java-rest-high-field-caps]] +=== Field Capabilities API + +The field capabilities API allows for retrieving the capabilities of fields across multiple indices. + +[[java-rest-high-field-caps-request]] +==== Field Capabilities Request + +A `FieldCapabilitiesRequest` contains a list of fields to get capabilities for, +should be returned, plus an optional list of target indices. If no indices +are provided, the request will be executed on all indices. + +Note that fields parameter supports wildcard notation. For example, providing `text_*` +will cause all fields that match the expression to be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-request] +-------------------------------------------------- + +[[java-rest-high-field-caps-request-optional]] +===== Optional arguments + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded. + +[[java-rest-high-field-caps-sync]] +==== Synchronous Execution + +The `fieldCaps` method executes the request synchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-execute] +-------------------------------------------------- + +[[java-rest-high-field-caps-async]] +==== Asynchronous Execution + +The `fieldCapsAsync` method executes the request asynchronously, +calling the provided `ActionListener` when the response is ready: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-execute-async] +-------------------------------------------------- +<1> The `FieldCapabilitiesRequest` to execute and the `ActionListener` to use when +the execution completes. + +The asynchronous method does not block and returns immediately. Once the request +completes, the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `FieldCapabilitiesResponse` is constructed as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. +<2> Called when the whole `FieldCapabilitiesRequest` fails. + +[[java-rest-high-field-caps-response]] +==== FieldCapabilitiesResponse + +For each requested field, the returned `FieldCapabilitiesResponse` contains its type +and whether or not it can be searched or aggregated on. The response also gives +information about how each index contributes to the field's capabilities. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-response] +-------------------------------------------------- +<1> The `user` field has two possible types, `keyword` and `text`. +<2> This field only has type `keyword` in the `authors` and `contributors` indices. +<3> Null, since the field is searchable in all indices for which it has the `keyword` type. +<4> The `user` field is not aggregatable in the `authors` index. \ No newline at end of file diff --git a/docs/java-rest/high-level/search/rank-eval.asciidoc b/docs/java-rest/high-level/search/rank-eval.asciidoc new file mode 100644 index 0000000000000..6db0dadd00ed7 --- /dev/null +++ b/docs/java-rest/high-level/search/rank-eval.asciidoc @@ -0,0 +1,89 @@ +[[java-rest-high-rank-eval]] +=== Ranking Evaluation API + +The `rankEval` method allows to evaluate the quality of ranked search +results over a set of search request. Given sets of manually rated +documents for each search request, ranking evaluation performs a +<> request and calculates +information retrieval metrics like _mean reciprocal rank_, _precision_ +or _discounted cumulative gain_ on the returned results. + +[[java-rest-high-rank-eval-request]] +==== Ranking Evaluation Request + +In order to build a `RankEvalRequest`, you first need to create an +evaluation specification (`RankEvalSpec`). This specification requires +to define the evaluation metric that is going to be calculated, as well +as a list of rated documents per search requests. Creating the ranking +evaluation request then takes the specification and a list of target +indices as arguments: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-request-basic] +-------------------------------------------------- +<1> Define the metric used in the evaluation +<2> Add rated documents, specified by index name, id and rating +<3> Create the search query to evaluate +<4> Combine the three former parts into a `RatedRequest` +<5> Create the ranking evaluation specification +<6> Create the ranking evaluation request + +[[java-rest-high-rank-eval-sync]] +==== Synchronous Execution + +The `rankEval` method executes `RankEvalRequest`s synchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-execute] +-------------------------------------------------- + +[[java-rest-high-rank-eval-async]] +==== Asynchronous Execution + +The `rankEvalAsync` method executes `RankEvalRequest`s asynchronously, +calling the provided `ActionListener` when the response is ready. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-execute-async] +-------------------------------------------------- +<1> The `RankEvalRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `RankEvalResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. +<2> Called when the whole `RankEvalRequest` fails. + +==== RankEvalResponse + +The `RankEvalResponse` that is returned by executing the request +contains information about the overall evaluation score, the +scores of each individual search request in the set of queries and +detailed information about search hits and details about the metric +calculation per partial result. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[rank-eval-response] +-------------------------------------------------- +<1> The overall evaluation result +<2> Partial results that are keyed by their query id +<3> The metric score for each partial result +<4> Rated search hits contain a fully fledged `SearchHit` +<5> Rated search hits also contain an `Optional` rating that +is not present if the document did not get a rating in the request +<6> Metric details are named after the metric used in the request +<7> After casting to the metric used in the request, the +metric details offers insight into parts of the metric calculation \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 29052171cddc6..1c0e09c6c079e 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -32,10 +32,14 @@ The Java High Level REST Client supports the following Search APIs: * <> * <> * <> +* <> +* <> include::search/search.asciidoc[] include::search/scroll.asciidoc[] include::search/multi-search.asciidoc[] +include::search/field-caps.asciidoc[] +include::search/rank-eval.asciidoc[] == Miscellaneous APIs diff --git a/docs/painless/index.asciidoc b/docs/painless/index.asciidoc index 4898ed933363b..abfd4d4f00abe 100644 --- a/docs/painless/index.asciidoc +++ b/docs/painless/index.asciidoc @@ -5,39 +5,6 @@ include::../Versions.asciidoc[] include::painless-getting-started.asciidoc[] -// include::painless-examples.asciidoc[] - -// include::painless-design.asciidoc[] - include::painless-lang-spec.asciidoc[] -include::painless-syntax.asciidoc[] - include::painless-api-reference.asciidoc[] - -//// -Proposed Outline (WIP) -Getting Started with Painless - Accessing Doc Values - Updating Fields - Working with Dates - Using Regular Expressions - Debugging Painless Scripts - -Example Scripts - Using Painless in Script Fields - Using Painless in Watches - Using Painless in Function Score Queries - Using Painless in Script Queries - Using Painless When Updating Docs - Using Painless When Reindexing - -How Painless Works - Painless Architecture - Dispatching Functions - -Painless Language Specification -Painless API -//// - -Painless API Reference diff --git a/docs/painless/painless-api-reference.asciidoc b/docs/painless/painless-api-reference.asciidoc index 1bda6d890c859..54b1f20977b61 100644 --- a/docs/painless/painless-api-reference.asciidoc +++ b/docs/painless/painless-api-reference.asciidoc @@ -1,17 +1,13 @@ -["appendix",id="painless-api-reference"] -= Painless API Reference +[[painless-api-reference]] +== Painless API Reference -Painless has a strict whitelist for methods and -classes to make sure that all painless scripts are secure and fast. Most of -these methods are exposed directly from the JRE while others are part of -Elasticsearch or Painless itself. Below is a list of all available methods -grouped under the classes on which you can call them. Clicking on the method -name takes you to the documentation for the method. - -NOTE: Methods defined in the JRE also have a `(java 9)` link which can be used -to see the method's documentation in Java 9 while clicking on the method's name -goes to the Java 8 documentation. Usually these aren't different but it is -worth going to the version that matches the version of Java you are using to -run Elasticsearch just in case. +Painless has a strict whitelist for methods and classes to ensure all +painless scripts are secure. Most of these methods are exposed directly +from the Java Runtime Enviroment (JRE) while others are part of +Elasticsearch or Painless itself. Below is a list of all available +classes grouped with their respected methods. Clicking on the method +name takes you to the documentation for that specific method. Methods +defined in the JRE also have a `(java 9)` link which can be used to see +the method's documentation in Java 9. include::painless-api-reference/index.asciidoc[] diff --git a/docs/painless/painless-casting.asciidoc b/docs/painless/painless-casting.asciidoc new file mode 100644 index 0000000000000..ec4f9919bd043 --- /dev/null +++ b/docs/painless/painless-casting.asciidoc @@ -0,0 +1,172 @@ +[[painless-casting]] +=== Casting + +Casting is the conversion of one type to another. Implicit casts are casts that +occur automatically, such as during an assignment operation. Explicit casts are +casts where you use the casting operator to explicitly convert one type to +another. This is necessary during operations where the cast cannot be inferred. + +To cast to a new type, precede the expression by the new type enclosed in +parentheses, for example +`(int)x`. + +The following sections specify the implicit casts that can be performed and the +explicit casts that are allowed. The only other permitted cast is casting +a single character `String` to a `char`. + +*Grammar:* +[source,ANTLR4] +---- +cast: '(' TYPE ')' expression +---- + +[[numeric-casting]] +==== Numeric Casting + +The following table shows the allowed implicit and explicit casts between +numeric types. Read the table by row. To find out if you need to explicitly +cast from type A to type B, find the row for type A and scan across to the +column for type B. + +IMPORTANT: Explicit casts between numeric types can result in some data loss. A +smaller numeric type cannot necessarily accommodate the value from a larger +numeric type. You might also lose precision when casting from integer types +to floating point types. + +|==== +| | byte | short | char | int | long | float | double +| byte | | implicit | implicit | implicit | implicit | implicit | implicit +| short | explicit | | explicit | implicit | implicit | implicit | implicit +| char | explicit | explicit | | implicit | implicit | implicit | implicit +| int | explicit | explicit | explicit | | implicit | implicit | implicit +| long | explicit | explicit | explicit | explicit | | implicit | implicit +| float | explicit | explicit | explicit | explicit | explicit | | implicit +| double | explicit | explicit | explicit | explicit | explicit | explicit | +|==== + + +Example(s) +[source,Java] +---- +int a = 1; // Declare int variable a and set it to the literal + // value 1 +long b = a; // Declare long variable b and set it to int variable + // a with an implicit cast to convert from int to long +short c = (short)b; // Declare short variable c, explicitly cast b to a + // short, and assign b to c +byte d = a; // ERROR: Casting an int to a byte requires an explicit + // cast +double e = (double)a; // Explicitly cast int variable a to a double and assign + // it to the double variable e. The explicit cast is + // allowed, but it is not necessary. +---- + +[[reference-casting]] +==== Reference Casting + +A reference type can be implicitly cast to another reference type as long as +the type being cast _from_ is a descendant of the type being cast _to_. A +reference type can be explicitly cast _to_ if the type being cast to is a +descendant of the type being cast _from_. + +*Examples:* +[source,Java] +---- +List x; // Declare List variable x +ArrayList y = new ArrayList(); // Declare ArrayList variable y and assign it a + // newly allocated ArrayList [1] +x = y; // Assign Arraylist y to List x using an + // implicit cast +y = (ArrayList)x; // Explicitly cast List x to an ArrayList and + // assign it to ArrayList y +x = (List)y; // Set List x to ArrayList y using an explicit + // cast (the explicit cast is not necessary) +y = x; // ERROR: List x cannot be implicitly cast to + // an ArrayList, an explicit cast is required +Map m = y; // ERROR: Cannot implicitly or explicitly cast [2] + // an ArrayList to a Map, no relationship + // exists between the two types. +---- +[1] `ArrayList` is a descendant of the `List` type. +[2] `Map` is unrelated to the `List` and `ArrayList` types. + +[[def-type-casting]] +==== def Type Casting +All primitive and reference types can always be implicitly cast to +`def`. While it is possible to explicitly cast to `def`, it is not necessary. + +However, it is not always possible to implicitly cast a `def` to other +primitive and reference types. An explicit cast is required if an explicit +cast would normally be required between the non-def types. + + +*Examples:* +[source,Java] +---- +def x; // Declare def variable x and set it to null +x = 3; // Set the def variable x to the literal 3 with an implicit + // cast from int to def +double a = x; // Declare double variable a and set it to def variable x, + // which contains a double +int b = x; // ERROR: Results in a run-time error because an explicit cast is + // required to cast from a double to an int +int c = (int)x; // Declare int variable c, explicitly cast def variable x to an + // int, and assign x to c +---- + +[[boxing-unboxing]] +==== Boxing and Unboxing + +Boxing is where a cast is used to convert a primitive type to its corresponding +reference type. Unboxing is the reverse, converting a reference type to the +corresponding primitive type. + +There are two places Painless performs implicit boxing and unboxing: + +* When you call methods, Painless automatically boxes and unboxes arguments +so you can specify either primitive types or their corresponding reference +types. +* When you use the `def` type, Painless automatically boxes and unboxes as +needed when converting to and from `def`. + +The casting operator does not support any way to explicitly box a primitive +type or unbox a reference type. + +If a primitive type needs to be converted to a reference type, the Painless +reference type API supports methods that can do that. However, under normal +circumstances this should not be necessary. + +*Examples:* +[source,Java] +---- +Integer x = 1; // ERROR: not a legal implicit cast +Integer y = (Integer)1; // ERROR: not a legal explicit cast +int a = new Integer(1); // ERROR: not a legal implicit cast +int b = (int)new Integer(1); // ERROR: not a legal explicit cast +---- + +[[promotion]] +==== Promotion + +Promotion is where certain operations require types to be either a minimum +numerical type or for two (or more) types to be equivalent. +The documentation for each operation that has these requirements +includes promotion tables that describe how this is handled. + +When an operation promotes a type or types, the resultant type +of the operation is the promoted type. Types can be promoted to def +at compile-time; however, at run-time, the resultant type will be the +promotion of the types the `def` is representing. + +*Examples:* +[source,Java] +---- +2 + 2.0 // Add the literal int 2 and the literal double 2.0. The literal + // 2 is promoted to a double and the resulting value is a double. + +def x = 1; // Declare def variable x and set it to the literal int 1 through + // an implicit cast +x + 2.0F // Add def variable x and the literal float 2.0. + // At compile-time the types are promoted to def. + // At run-time the types are promoted to float. +---- diff --git a/docs/painless/painless-comments.asciidoc b/docs/painless/painless-comments.asciidoc new file mode 100644 index 0000000000000..588e464d97f78 --- /dev/null +++ b/docs/painless/painless-comments.asciidoc @@ -0,0 +1,51 @@ +[[painless-comments]] +=== Comments + +Use the `//` token anywhere on a line to specify a single-line comment. All +characters from the `//` token to the end of the line are ignored. Use an +opening `/*` token and a closing `*/` token to specify a multi-line comment. +Multi-line comments can start anywhere on a line, and all characters in between +the `/*` token and `*/` token are ignored. Comments can be included anywhere +within a script. + +*Grammar* +[source,ANTLR4] +---- +SINGLE_LINE_COMMENT: '//' .*? [\n\r]; +MULTI_LINE_COMMENT: '/*' .*? '*/'; +---- + +*Examples* + +* Single-line comments. ++ +[source,Painless] +---- +// single-line comment + +int value; // single-line comment +---- ++ +* Multi-line comments. ++ +[source,Painless] +---- +/* multi- + line + comment */ + +int value; /* multi- + line + comment */ value = 0; + +int value; /* multi-line + comment */ + +/* multi-line + comment */ int value; + +int value; /* multi-line + comment */ value = 0; + +int value; /* multi-line comment */ value = 0; +---- diff --git a/docs/painless/painless-description.asciidoc b/docs/painless/painless-description.asciidoc index 874eab5632cfb..dfaf66ca26d4b 100644 --- a/docs/painless/painless-description.asciidoc +++ b/docs/painless/painless-description.asciidoc @@ -2,7 +2,7 @@ _Painless_ is a simple, secure scripting language designed specifically for use with Elasticsearch. It is the default scripting language for Elasticsearch and can safely be used for inline and stored scripts. For a detailed description of the Painless syntax and language features, see the -{painless}/painless-specification.html[Painless Language Specification]. +{painless}/painless-lang-spec.html[Painless Language Specification]. [[painless-features]] You can use Painless anywhere scripts can be used in Elasticsearch. Painless diff --git a/docs/painless/painless-execute-script.asciidoc b/docs/painless/painless-execute-script.asciidoc new file mode 100644 index 0000000000000..7997c87e3e45f --- /dev/null +++ b/docs/painless/painless-execute-script.asciidoc @@ -0,0 +1,53 @@ +[[painless-execute-api]] +=== Painless execute API + +The Painless execute API allows an arbitrary script to be executed and a result to be returned. + +[[painless-execute-api-parameters]] +.Parameters +[options="header"] +|====== +| Name | Required | Default | Description +| `script` | yes | - | The script to execute +| `context` | no | `execute_api_script` | The context the script should be executed in. +|====== + +==== Contexts + +Contexts control how scripts are executed, what variables are available at runtime and what the return type is. + +===== Painless test script context + +The `painless_test` context executes scripts as is and do not add any special parameters. +The only variable that is available is `params`, which can be used to access user defined values. +The result of the script is always converted to a string. +If no context is specified then this context is used by default. + +==== Example + +Request: + +[source,js] +---------------------------------------------------------------- +POST /_scripts/painless/_execute +{ + "script": { + "source": "params.count / params.total", + "params": { + "count": 100.0, + "total": 1000.0 + } + } +} +---------------------------------------------------------------- +// CONSOLE + +Response: + +[source,js] +-------------------------------------------------- +{ + "result": "0.1" +} +-------------------------------------------------- +// TESTRESPONSE \ No newline at end of file diff --git a/docs/painless/painless-syntax.asciidoc b/docs/painless/painless-general-syntax.asciidoc similarity index 72% rename from docs/painless/painless-syntax.asciidoc rename to docs/painless/painless-general-syntax.asciidoc index c68ed5168c01b..114bff80bfa70 100644 --- a/docs/painless/painless-syntax.asciidoc +++ b/docs/painless/painless-general-syntax.asciidoc @@ -1,7 +1,6 @@ -[[painless-syntax]] -=== Painless Syntax +[[painless-general-syntax]] +=== General Syntax -[float] [[control-flow]] ==== Control flow @@ -17,7 +16,6 @@ for (item : list) { } --------------------------------------------------------- -[float] [[functions]] ==== Functions @@ -32,7 +30,6 @@ if (isNegative(someVar)) { } --------------------------------------------------------- -[float] [[lambda-expressions]] ==== Lambda expressions Lambda expressions and method references work the same as in https://docs.oracle.com/javase/tutorial/java/javaOO/lambdaexpressions.html[Java]. @@ -49,7 +46,6 @@ list.sort(Integer::compare); You can make method references to functions within the script with `this`, for example `list.sort(this::mycompare)`. -[float] [[patterns]] ==== Patterns @@ -62,7 +58,6 @@ are always constants and compiled efficiently a single time. Pattern p = /[aeiou]/ --------------------------------------------------------- -[float] [[pattern-flags]] ===== Pattern flags @@ -84,34 +79,3 @@ Pattern class] using these characters: |`u` | UNICODE_CASE | `'Ɛ' ==~ /ɛ/iu` |`x` | COMMENTS (aka extended) | `'a' ==~ /a #comment/x` |======================================================================= - -[float] -[[painless-deref]] -==== Dereferences - -Like lots of languages, Painless uses `.` to reference fields and call methods: - -[source,painless] ---------------------------------------------------------- -String foo = 'foo'; -TypeWithGetterOrPublicField bar = new TypeWithGetterOrPublicField() -return foo.length() + bar.x ---------------------------------------------------------- - -Like Groovy, Painless uses `?.` to perform null-safe references, with the -result being `null` if the left hand side is `null`: - -[source,painless] ---------------------------------------------------------- -String foo = null; -return foo?.length() // Returns null ---------------------------------------------------------- - -Unlike Groovy, Painless doesn't support writing to `null` values with this -operator: - -[source,painless] ---------------------------------------------------------- -TypeWithSetterOrPublicField foo = null; -foo?.x = 'bar' // Compile error ---------------------------------------------------------- diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-getting-started.asciidoc index 8cf163d55d7b9..2cf91666ba48d 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-getting-started.asciidoc @@ -389,3 +389,5 @@ dispatch *feels* like it'd add a ton of complexity which'd make maintenance and other improvements much more difficult. include::painless-debugging.asciidoc[] + +include::painless-execute-script.asciidoc[] diff --git a/docs/painless/painless-identifiers.asciidoc b/docs/painless/painless-identifiers.asciidoc new file mode 100644 index 0000000000000..17073e3d4c415 --- /dev/null +++ b/docs/painless/painless-identifiers.asciidoc @@ -0,0 +1,29 @@ +[[painless-identifiers]] +=== Identifiers + +Specify identifiers to <>, <>, and +<> variables, <>, and +<>. <> and +<> cannot be used as identifiers. + +*Grammar* +[source,ANTLR4] +---- +ID: [_a-zA-Z] [_a-zA-Z-0-9]*; +---- + +*Examples* + +* Variations of identifiers. ++ +[source,Painless] +---- +a +Z +id +list +list0 +MAP25 +_map25 +Map_25 +---- diff --git a/docs/painless/painless-keywords.asciidoc b/docs/painless/painless-keywords.asciidoc new file mode 100644 index 0000000000000..cb3bafbd20f13 --- /dev/null +++ b/docs/painless/painless-keywords.asciidoc @@ -0,0 +1,13 @@ +[[painless-keywords]] +=== Keywords + +The keywords in the table below are reserved for built-in language +features. These keywords cannot be used as +<> or <>. + +[cols="^1,^1,^1,^1,^1"] +|==== +| if | else | while | do | for +| in | continue | break | return | new +| try | catch | throw | this | instanceof +|==== diff --git a/docs/painless/painless-lang-spec.asciidoc b/docs/painless/painless-lang-spec.asciidoc index 6544b0ad26495..ba6595000ae2f 100644 --- a/docs/painless/painless-lang-spec.asciidoc +++ b/docs/painless/painless-lang-spec.asciidoc @@ -1,73 +1,36 @@ -[[painless-specification]] +[[painless-lang-spec]] == Painless Language Specification -Painless uses a Java-style syntax that is similar to Groovy. In fact, most -Painless scripts are also valid Groovy, and simple Groovy scripts are typically -valid Painless. This specification assumes you have at least a passing -familiarity with Java and related languages. - -Painless is essentially a subset of Java with some additional scripting -language features that make scripts easier to write. However, there are some -important differences, particularly with the casting model. For more detailed +Painless is a scripting language designed for security and performance. +Painless syntax is similar to Java syntax along with some additional +features such as dynamic typing, Map and List accessor shortcuts, and array +initializers. As a direct comparison to Java, there are some important +differences, especially related to the casting model. For more detailed conceptual information about the basic constructs that Java and Painless share, refer to the corresponding topics in the https://docs.oracle.com/javase/specs/jls/se8/html/index.html[Java Language Specification]. Painless scripts are parsed and compiled using the http://www.antlr.org/[ANTLR4] -and http://asm.ow2.org/[ASM] libraries. Painless scripts are compiled directly -into Java byte code and executed against a standard Java Virtual Machine. This -specification uses ANTLR4 grammar notation to describe the allowed syntax. +and http://asm.ow2.org/[ASM] libraries. Scripts are compiled directly +into Java Virtual Machine (JVM) byte code and executed against a standard JVM. +This specification uses ANTLR4 grammar notation to describe the allowed syntax. However, the actual Painless grammar is more compact than what is shown here. -[float] -[[comments]] -==== Comments - -Painless supports both single-line and multi-line comments. You can include -comments anywhere within a script. - -Single-line comments are preceded by two slashes: `// comment`. They can be -placed anywhere on a line. All characters from the two slashes to the end of -the line are ignored. - -Multi-line comments are preceded by a slash-star `/*` and closed by -star-slash `*/`. Multi-line comments can start anywhere on a line. All -characters from the opening `/*` to the closing `*/` are ignored. - -*Examples:* - -[source,Java] ----- -// single-line comment - - // single-line comment +include::painless-comments.asciidoc[] -/* multi- - line - comment */ +include::painless-keywords.asciidoc[] - /* multi-line - comment */ +include::painless-literals.asciidoc[] - /* multi-line comment */ ----- +include::painless-identifiers.asciidoc[] -[float] -[[keywords]] -==== Keywords +include::painless-variables.asciidoc[] -Painless reserves the following keywords for built-in language features. -These keywords cannot be used in other contexts, such as identifiers. +include::painless-types.asciidoc[] -[cols="^1,^1,^1,^1,^1"] -|==== -| if | else | while | do | for -| in | continue | break | return | new -| try | catch | throw | this | instanceof -|==== +include::painless-casting.asciidoc[] -include::painless-literals.asciidoc[] -include::painless-variables.asciidoc[] -include::painless-types.asciidoc[] include::painless-operators.asciidoc[] + +include::painless-general-syntax.asciidoc[] diff --git a/docs/painless/painless-literals.asciidoc b/docs/painless/painless-literals.asciidoc index 43c5eb82f96a2..441cb264f1e15 100644 --- a/docs/painless/painless-literals.asciidoc +++ b/docs/painless/painless-literals.asciidoc @@ -1,94 +1,142 @@ -[[literals]] +[[painless-literals]] === Literals -Literals are values that you can specify directly in Painless scripts. +Use literals to specify different types of values directly in a script. [[integers]] ==== Integers -Specify integer literals in decimal, octal, or hex notation. Use the following -single letter designations to specify the primitive type: `l` for `long`, `f` -for `float`, and `d` for `double`. If not specified, the type defaults to -`int` (with the exception of certain assignments described later). +Use integer literals to specify an integer value in decimal, octal, or hex +notation of the <> `int`, `long`, `float`, +or `double`. Use the following single letter designations to specify the +<>: `l` or `L` for `long`, `f` or `F` for +`float`, and `d` or `D` for `double`. If not specified, the type defaults to +`int`. Use `0` as a prefix to specify an integer literal as octal, and use +`0x` or `0X` as a prefix to specify an integer literal as hex. -*Grammar:* +*Grammar* [source,ANTLR4] ---- INTEGER: '-'? ( '0' | [1-9] [0-9]* ) [lLfFdD]?; -OCTAL: '-'? '0' [0-7]+ [lL]?; -HEX: '-'? '0' [xX] [0-9a-fA-F]+ [lL]?; +OCTAL: '-'? '0' [0-7]+ [lL]?; +HEX: '-'? '0' [xX] [0-9a-fA-F]+ [lL]?; ---- -*Examples:* -[source,Java] +*Examples* + +* Integer literals. ++ +[source,Painless] ---- -0 // integer literal of 0 -0D // double literal of 0.0 -1234L // long literal of 1234 --90F // float literal of -90.0 --022 // integer literal of -18 specified in octal -0xF2A // integer literal of 3882 +<1> 0 +<2> 0D +<3> 1234L +<4> -90f +<5> -022 +<6> 0xF2A ---- - -[[floating-point-values]] -==== Floating Point Values - -Specify floating point literals using the following single letter designations -for the primitive type: `f` for `float` and `d` for `double`. -If not specified, the type defaults to `double`. - -*Grammar:* ++ +<1> `int 0` +<2> `double 0.0` +<3> `long 1234` +<4> `float -90.0` +<5> `int -18` in octal +<6> `int 3882` in hex + +[[floats]] +==== Floats + +Use floating point literals to specify a floating point value of the +<> `float` or `double`. Use the following +single letter designations to specify the <>: +`f` or `F` for `float` and `d` or `D` for `double`. If not specified, the type defaults +to `double`. + +*Grammar* [source,ANTLR4] ---- -DECIMAL: '-'? ( '0' | [1-9] [0-9]* ) (DOT [0-9]+)? ( [eE] [+\-]? [0-9]+ )? [fFdD]?; +DECIMAL: '-'? ( '0' | [1-9] [0-9]* ) (DOT [0-9]+)? EXPONENT? [fFdD]?; +EXPONENT: ( [eE] [+\-]? [0-9]+ ); ---- -*Examples:* -[source,Java] +*Examples* + +* Floating point literals. ++ +[source,Painless] ---- -0.0 // double value of 0.0 -1E6 // double value of 1000000 -0.977777 // double value of 0.97777 --126.34 // double value of -126.34 -89.9F // float value of 89.9 +<1> 0.0 +<2> 1E6 +<3> 0.977777 +<4> -126.34 +<5> 89.9F ---- ++ +<1> `double 0.0` +<2> `double 1000000.0` in exponent notation +<3> `double 0.977777` +<4> `double -126.34` +<5> `float 89.9` [[strings]] ==== Strings -Specify literal string with either single or double quotes. In double-quoted -literal strings, you can escape double-quotes with a backslash to include them -in the string. Similarly, you escape single quotes with a backslash in -single-quoted literal strings. Backslashes themselves also need to be -escaped with a backslash. +Use string literals to specify <> values with +either single-quotes or double-quotes. Use a `\"` token to include a +double-quote as part of a double-quoted string literal. Use a `\'` token to +include a single-quote as part of a single-quoted string literal. Use a `\\` +token to include a backslash as part of any string literal. -*Grammar:* +*Grammar* [source,ANTLR4] ---- -STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' ) | ( '\'' ( '\\\'' | '\\\\' | ~[\\'] )*? '\'' ); +STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' ) + | ( '\'' ( '\\\'' | '\\\\' | ~[\\'] )*? '\'' ); ---- -*Examples:* -[source,Java] +*Examples* + +* String literals using single-quotes. ++ +[source,Painless] ---- -"double-quoted String literal" -'single-quoted String literal' -"\"double-quoted String with escaped double-quotes\" and backslash: \\" -'\'single-quoted String with escaped single-quotes\' and backslash \\' -"double-quoted String with non-escaped 'single-quotes'" -'single-quoted String with non-escaped "double-quotes"' +'single-quoted string literal' +'\'single-quoted with escaped single-quotes\' and backslash \\' +'single-quoted with non-escaped "double-quotes"' ---- ++ +* String literals using double-quotes. ++ +[source,Painless] +---- +"double-quoted string literal" +"\"double-quoted with escaped double-quotes\" and backslash: \\" +"double-quoted with non-escaped 'single-quotes'" +---- + +[[characters]] +==== Characters -[[char]] -===== Char +Use the <> to convert string literals or +<> values into <> values. +<> values converted into +<> values must be exactly one character in length +or an error will occur. -You cannot directly specify character literals in Painless. However, you can -cast single-character strings to char. Attempting to cast a multi-character -string to a char throws an error. +*Examples* -*Examples:* -[source,Java] +* Casting string literals into <> values. ++ +[source,Painless] ---- (char)"C" (char)'c' ----- \ No newline at end of file +---- ++ +* Casting a <> value into a <> value. ++ +[source,Painless] +---- +String s = "s"; +char c = (char)s; +---- diff --git a/docs/painless/painless-operators.asciidoc b/docs/painless/painless-operators.asciidoc index 0d5135022ad90..915d811fa441b 100644 --- a/docs/painless/painless-operators.asciidoc +++ b/docs/painless/painless-operators.asciidoc @@ -1,3 +1,4 @@ +[[painless-operators]] === Operators The following is a table of the available operators in Painless. Each operator will have further information and examples outside of the table. Many operators will have a promotion table as described by the documentation on promotion [MARK]. @@ -703,6 +704,7 @@ e = ~d; // sets e the negation of d The cast operator can be used to explicitly convert one type to another. See casting [MARK] for more information. +[[constructor-call]] ==== Constructor Call A constructor call is a special type of method call [MARK] used to allocate a reference type instance using the new operator. The format is the new operator followed by a type, an opening parenthesis, arguments if any, and a closing parenthesis. Arguments are a series of zero-to-many expressions delimited by commas. Auto-boxing and auto-unboxing will be applied automatically for arguments passed into a constructor call. See boxing and unboxing [MARK] for more information on this topic. Constructor argument types can always be resolved at run-time; if appropriate type conversions (casting) cannot be applied an error will occur. Once a reference type instance has been allocated, its members may be used as part of other expressions. diff --git a/docs/painless/painless-types.asciidoc b/docs/painless/painless-types.asciidoc index 9e5077503b4a8..9d575a2069ae3 100644 --- a/docs/painless/painless-types.asciidoc +++ b/docs/painless/painless-types.asciidoc @@ -1,5 +1,5 @@ -[[types]] -=== Data Types +[[painless-types]] +=== Types Painless supports both dynamic and static types. Static types are split into _primitive types_ and _reference types_. @@ -267,176 +267,3 @@ def[] da = new def[] {i, l, f*d, s}; // Declare def array da and set it to // a def array with a size of 4 and the // values i, l, f*d, and s ---- - -[[casting]] -=== Casting - -Casting is the conversion of one type to another. Implicit casts are casts that -occur automatically, such as during an assignment operation. Explicit casts are -casts where you use the casting operator to explicitly convert one type to -another. This is necessary during operations where the cast cannot be inferred. - -To cast to a new type, precede the expression by the new type enclosed in -parentheses, for example -`(int)x`. - -The following sections specify the implicit casts that can be performed and the -explicit casts that are allowed. The only other permitted cast is casting -a single character `String` to a `char`. - -*Grammar:* -[source,ANTLR4] ----- -cast: '(' TYPE ')' expression ----- - -[[numeric-casting]] -==== Numeric Casting - -The following table shows the allowed implicit and explicit casts between -numeric types. Read the table by row. To find out if you need to explicitly -cast from type A to type B, find the row for type A and scan across to the -column for type B. - -IMPORTANT: Explicit casts between numeric types can result in some data loss. A -smaller numeric type cannot necessarily accommodate the value from a larger -numeric type. You might also lose precision when casting from integer types -to floating point types. - -|==== -| | byte | short | char | int | long | float | double -| byte | | implicit | implicit | implicit | implicit | implicit | implicit -| short | explicit | | explicit | implicit | implicit | implicit | implicit -| char | explicit | explicit | | implicit | implicit | implicit | implicit -| int | explicit | explicit | explicit | | implicit | implicit | implicit -| long | explicit | explicit | explicit | explicit | | implicit | implicit -| float | explicit | explicit | explicit | explicit | explicit | | implicit -| double | explicit | explicit | explicit | explicit | explicit | explicit | -|==== - - -Example(s) -[source,Java] ----- -int a = 1; // Declare int variable a and set it to the literal - // value 1 -long b = a; // Declare long variable b and set it to int variable - // a with an implicit cast to convert from int to long -short c = (short)b; // Declare short variable c, explicitly cast b to a - // short, and assign b to c -byte d = a; // ERROR: Casting an int to a byte requires an explicit - // cast -double e = (double)a; // Explicitly cast int variable a to a double and assign - // it to the double variable e. The explicit cast is - // allowed, but it is not necessary. ----- - -[[reference-casting]] -==== Reference Casting - -A reference type can be implicitly cast to another reference type as long as -the type being cast _from_ is a descendant of the type being cast _to_. A -reference type can be explicitly cast _to_ if the type being cast to is a -descendant of the type being cast _from_. - -*Examples:* -[source,Java] ----- -List x; // Declare List variable x -ArrayList y = new ArrayList(); // Declare ArrayList variable y and assign it a - // newly allocated ArrayList [1] -x = y; // Assign Arraylist y to List x using an - // implicit cast -y = (ArrayList)x; // Explicitly cast List x to an ArrayList and - // assign it to ArrayList y -x = (List)y; // Set List x to ArrayList y using an explicit - // cast (the explicit cast is not necessary) -y = x; // ERROR: List x cannot be implicitly cast to - // an ArrayList, an explicit cast is required -Map m = y; // ERROR: Cannot implicitly or explicitly cast [2] - // an ArrayList to a Map, no relationship - // exists between the two types. ----- -[1] `ArrayList` is a descendant of the `List` type. -[2] `Map` is unrelated to the `List` and `ArrayList` types. - -[[def-type-casting]] -==== def Type Casting -All primitive and reference types can always be implicitly cast to -`def`. While it is possible to explicitly cast to `def`, it is not necessary. - -However, it is not always possible to implicitly cast a `def` to other -primitive and reference types. An explicit cast is required if an explicit -cast would normally be required between the non-def types. - - -*Examples:* -[source,Java] ----- -def x; // Declare def variable x and set it to null -x = 3; // Set the def variable x to the literal 3 with an implicit - // cast from int to def -double a = x; // Declare double variable a and set it to def variable x, - // which contains a double -int b = x; // ERROR: Results in a run-time error because an explicit cast is - // required to cast from a double to an int -int c = (int)x; // Declare int variable c, explicitly cast def variable x to an - // int, and assign x to c ----- - -[[boxing-unboxing]] -==== Boxing and Unboxing - -Boxing is where a cast is used to convert a primitive type to its corresponding -reference type. Unboxing is the reverse, converting a reference type to the -corresponding primitive type. - -There are two places Painless performs implicit boxing and unboxing: - -* When you call methods, Painless automatically boxes and unboxes arguments -so you can specify either primitive types or their corresponding reference -types. -* When you use the `def` type, Painless automatically boxes and unboxes as -needed when converting to and from `def`. - -The casting operator does not support any way to explicitly box a primitive -type or unbox a reference type. - -If a primitive type needs to be converted to a reference type, the Painless -reference type API supports methods that can do that. However, under normal -circumstances this should not be necessary. - -*Examples:* -[source,Java] ----- -Integer x = 1; // ERROR: not a legal implicit cast -Integer y = (Integer)1; // ERROR: not a legal explicit cast -int a = new Integer(1); // ERROR: not a legal implicit cast -int b = (int)new Integer(1); // ERROR: not a legal explicit cast ----- - -[[promotion]] -==== Promotion - -Promotion is where certain operations require types to be either a minimum -numerical type or for two (or more) types to be equivalent. -The documentation for each operation that has these requirements -includes promotion tables that describe how this is handled. - -When an operation promotes a type or types, the resultant type -of the operation is the promoted type. Types can be promoted to def -at compile-time; however, at run-time, the resultant type will be the -promotion of the types the `def` is representing. - -*Examples:* -[source,Java] ----- -2 + 2.0 // Add the literal int 2 and the literal double 2.0. The literal - // 2 is promoted to a double and the resulting value is a double. - -def x = 1; // Declare def variable x and set it to the literal int 1 through - // an implicit cast -x + 2.0F // Add def variable x and the literal float 2.0. - // At compile-time the types are promoted to def. - // At run-time the types are promoted to float. ----- diff --git a/docs/painless/painless-variables.asciidoc b/docs/painless/painless-variables.asciidoc index 2177b0bb91ba8..9756676a08b5b 100644 --- a/docs/painless/painless-variables.asciidoc +++ b/docs/painless/painless-variables.asciidoc @@ -1,123 +1,130 @@ -[[variables]] +[[painless-variables]] === Variables -Variables in Painless must be declared and can be statically or <>. - -[[variable-identifiers]] -==== Variable Identifiers - -Specify variable identifiers using the following grammar. Variable identifiers -must start with a letter or underscore. You cannot use <> or -<> as identifiers. - -*Grammar:* -[source,ANTLR4] ----- -ID: [_a-zA-Z] [_a-zA-Z-0-9]*; ----- - -*Examples:* -[source,Java] ----- -_ -a -Z -id -list -list0 -MAP25 -_map25 ----- - -[[variable-declaration]] -==== Variable Declaration - -Variables must be declared before you use them. The format is `type-name -identifier-name`. To declare multiple variables of the same type, specify a -comma-separated list of identifier names. You can immediately assign a value to -a variable when you declare it. - -*Grammar:* +<> variables to <> values for +<> in expressions. Specify variables as a +<>, <>, or +<>. Variable operations follow the structure of a +standard JVM in relation to instruction execution and memory usage. + +[[declaration]] +==== Declaration + +Declare variables before use with the format of <> +<>. Specify a comma-separated list of +<> following the <> +to declare multiple variables in a single statement. Use an +<> statement combined with a declaration statement to +immediately assign a value to a variable. Variables not immediately assigned a +value will have a default value assigned implicitly based on the +<>. + +*Grammar* [source,ANTLR4] ---- +declaration : type ID assignment? (',' ID assignment?)*; type: ID ('[' ']')*; -declaration : type ID (',' ID)*; +assignment: '=' expression; ---- -*Examples:* -[source,Java] +*Examples* + +* Different variations of variable declaration. ++ +[source,Painless] ---- -int x; // Declare a variable with type int and id x -List y; // Declare a variable with type List and id y -int x, y, z; // Declare variables with type int and ids x, y, and z -def[] d; // Declare the variable d with type def[] -int i = 10; // Declare the int variable i and set it to the int literal 10 +<1> int x; +<2> List y; +<3> int x, y, z; +<4> def[] d; +<5> int i = 10; ---- ++ +<1> declare a variable of type `int` and identifier `x` +<2> declare a variable of type `List` and identifier `y` +<3> declare three variables of type `int` and identifiers `x`, `y`, `z` +<4> declare a variable of type `def[]` and identifier `d` +<5> declare a variable of type `int` and identifier `i`; + assign the integer literal `10` to `i` -[[variable-assignment]] -==== Variable Assignment +[[assignment]] +==== Assignment -Use the equals operator (`=`) to assign a value to a variable. The format is -`identifier-name = value`. Any value expression can be assigned to any variable -as long as the types match or the expression's type can be implicitly cast to -the variable's type. An error occurs if the types do not match. +Use the `equals` operator (`=`) to assign a value to a variable. Any expression +that produces a value can be assigned to any variable as long as the +<> are the same or the resultant +<> can be implicitly <> to +the variable <>. Otherwise, an error will occur. +<> values are shallow-copied when assigned. -*Grammar:* +*Grammar* [source,ANTLR4] ---- assignment: ID '=' expression ---- - -*Examples:* - -Assigning a literal of the appropriate type directly to a declared variable. - -[source,Java] ----- -int i;   // Declare an int i -i = 10;  // Set the int i to the int literal 10 ----- - -Immediately assigning a value when declaring a variable. - -[source,Java] ----- -int i = 10; // Declare the int variable i and set it the int literal 1 -double j = 2.0; // Declare the double variable j and set it to the double - // literal 2.0 ----- - -Assigning a variable of one primitive type to another variable of the same -type. - -[source,Java] ----- -int i = 10; // Declare the int variable i and set it to the int literal 10 -int j = i;  // Declare the int variable j and set it to the int variable i ----- - -Assigning a reference type to a new heap allocation with the `new` operator. - -[source,Java] ----- -ArrayList l = new ArrayList();  // Declare an ArrayList variable l and set it - // to a newly allocated ArrayList -Map m = new HashMap(); // Declare a Map variable m and set it - // to a newly allocated HashMap ----- - -Assigning a variable of one reference type to another variable of the same type. - -[source,Java] ----- -List l = new ArrayList(); // Declare List variable l and set it a newly - // allocated ArrayList -List k = l;  // Declare List variable k and set it to the - // value of the List variable l -List m;                   // Declare List variable m and set it the - // default value null -m = k;                    // Set the value of List variable m to the value - // of List variable k ----- +*Examples* + +* Variable assignment with an <>. ++ +[source,Painless] +---- +<1> int i; +<2> i = 10; +---- ++ +<1> declare `int i` +<2> assign `10` to `i` ++ +* <> combined with immediate variable assignment. ++ +[source,Painless] +---- +<1> int i = 10; +<2> double j = 2.0; +---- ++ +<1> declare `int i`; assign `10` to `i` +<2> declare `double j`; assign `2.0` to `j` ++ +* Assignment of one variable to another using +<>. ++ +[source,Painless] +---- +<1> int i = 10; +<2> int j = i; +---- ++ +<1> declare `int i`; assign `10` to `i` +<2> declare `int j`; assign `j` to `i` ++ +* Assignment with <> using the +<>. ++ +[source,Painless] +---- +<1> ArrayList l = new ArrayList(); +<2> Map m = new HashMap(); +---- ++ +<1> declare `ArrayList l`; assign a newly-allocated `Arraylist` to `l` +<2> declare `Map m`; assign a newly-allocated `HashMap` to `m` + with an implicit cast to `Map` ++ +* Assignment of one variable to another using +<>. ++ +[source,Painless] +---- +<1> List l = new ArrayList(); +<2> List k = l; +<3> List m; +<4> m = k; +---- ++ +<1> declare `List l`; assign a newly-allocated `Arraylist` to `l` + with an implicit cast to `List` +<2> declare `List k`; assign a shallow-copy of `l` to `k` +<3> declare `List m`; +<4> assign a shallow-copy of `k` to `m` diff --git a/docs/plugins/analysis.asciidoc b/docs/plugins/analysis.asciidoc index 3c3df021de5cb..c09c48640ea3d 100644 --- a/docs/plugins/analysis.asciidoc +++ b/docs/plugins/analysis.asciidoc @@ -53,6 +53,7 @@ A number of analysis plugins have been contributed by our community: * https://github.com/duydo/elasticsearch-analysis-vietnamese[Vietnamese Analysis Plugin] (by Duy Do) * https://github.com/ofir123/elasticsearch-network-analysis[Network Addresses Analysis Plugin] (by Ofir123) * https://github.com/medcl/elasticsearch-analysis-string2int[String2Integer Analysis Plugin] (by Medcl) +* https://github.com/ZarHenry96/elasticsearch-dandelion-plugin[Dandelion Analysis Plugin] (by ZarHenry96) include::analysis-icu.asciidoc[] diff --git a/docs/plugins/discovery-azure-classic.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc index f11b4018bf5d1..c56991b8f507f 100644 --- a/docs/plugins/discovery-azure-classic.asciidoc +++ b/docs/plugins/discovery-azure-classic.asciidoc @@ -372,6 +372,8 @@ This command should give you a JSON result: "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA", "version" : { "number" : "{version}", + "build_flavor" : "oss", + "build_type" : "zip", "build_hash" : "f27399d", "build_date" : "2016-03-30T09:51:41.449Z", "build_snapshot" : false, diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index 30ea2832a700e..c2d1614ad6e56 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -27,11 +27,13 @@ POST /sales/_search?size=0 // CONSOLE // TEST[setup:sales] -Available expressions for interval: `year`, `quarter`, `month`, `week`, `day`, `hour`, `minute`, `second` +Available expressions for interval: `year` (`1y`), `quarter` (`1q`), `month` (`1M`), `week` (`1w`), +`day` (`1d`), `hour` (`1h`), `minute` (`1m`), `second` (`1s`) Time values can also be specified via abbreviations supported by <> parsing. Note that fractional time values are not supported, but you can address this by shifting to another -time unit (e.g., `1.5h` could instead be specified as `90m`). +time unit (e.g., `1.5h` could instead be specified as `90m`). Also note that time intervals larger than +than days do not support arbitrary values but can only be one unit large (e.g. `1y` is valid, `2y` is not). [source,js] -------------------------------------------------- diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc index 3dff5abc52d9a..7a2262b7962bb 100644 --- a/docs/reference/cat.asciidoc +++ b/docs/reference/cat.asciidoc @@ -93,8 +93,8 @@ Responds with: // TESTRESPONSE[s/9300 27 sLBaIGK/\\d+ \\d+ .+/ _cat] You can also request multiple columns using simple wildcards like -`/_cat/thread_pool?h=ip,bulk.*` to get all headers (or aliases) starting -with `bulk.`. +`/_cat/thread_pool?h=ip,queue*` to get all headers (or aliases) starting +with `queue`. [float] [[numeric-formats]] diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index bfc5ca415c3ba..306650feb958b 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -14,20 +14,20 @@ Which looks like: [source,txt] -------------------------------------------------- -node-0 bulk 0 0 0 +node-0 analyze 0 0 0 node-0 fetch_shard_started 0 0 0 node-0 fetch_shard_store 0 0 0 node-0 flush 0 0 0 node-0 force_merge 0 0 0 node-0 generic 0 0 0 node-0 get 0 0 0 -node-0 index 0 0 0 node-0 listener 0 0 0 node-0 management 1 0 0 node-0 refresh 0 0 0 node-0 search 0 0 0 node-0 snapshot 0 0 0 node-0 warmer 0 0 0 +node-0 write 0 0 0 -------------------------------------------------- // TESTRESPONSE[s/\d+/\\d+/ _cat] @@ -43,20 +43,20 @@ The second column is the thread pool name [source,txt] -------------------------------------------------- name -bulk +analyze fetch_shard_started fetch_shard_store flush force_merge generic get -index listener management refresh search snapshot warmer +write -------------------------------------------------- diff --git a/docs/reference/cluster/nodes-info.asciidoc b/docs/reference/cluster/nodes-info.asciidoc index 2b91310da3a8e..6522d0f5ad68a 100644 --- a/docs/reference/cluster/nodes-info.asciidoc +++ b/docs/reference/cluster/nodes-info.asciidoc @@ -142,6 +142,8 @@ The result will look similar to: "host": "node-0.elastic.co", "ip": "192.168.17", "version": "{version}", + "build_flavor": "oss", + "build_type": "zip", "build_hash": "587409e", "roles": [ "master", @@ -235,6 +237,8 @@ The result will look similar to: "host": "node-0.elastic.co", "ip": "192.168.17", "version": "{version}", + "build_flavor": "oss", + "build_type": "zip", "build_hash": "587409e", "roles": [], "attributes": {}, diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index ec25d27d2535f..eb3abb19d1adf 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -346,7 +346,6 @@ Supported metrics are: * `search` * `segments` * `store` -* `suggest` * `translog` * `warmer` diff --git a/docs/reference/cluster/remote-info.asciidoc b/docs/reference/cluster/remote-info.asciidoc index d044f4dcad221..3dfcc201e7ac4 100644 --- a/docs/reference/cluster/remote-info.asciidoc +++ b/docs/reference/cluster/remote-info.asciidoc @@ -19,9 +19,6 @@ the configured remote cluster alias. `seeds`:: The configured initial seed transport addresses of the remote cluster. -`http_addresses`:: - The published http addresses of all connected remote nodes. - `connected`:: True if there is at least one connection to the remote cluster. diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index be015a811e9b3..f9919483e5a47 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -284,9 +284,12 @@ executed again in order to conform to `requests_per_second`. `failures`:: -Array of all indexing failures. If this is non-empty then the request aborted -because of those failures. See `conflicts` for how to prevent version conflicts -from aborting the operation. +Array of failures if there were any unrecoverable errors during the process. If +this is non-empty then the request aborted because of those failures. +Delete-by-query is implemented using batches and any failure causes the entire +process to abort but all failures in the current batch are collected into the +array. You can use the `conflicts` option to prevent reindex from aborting on +version conflicts. [float] diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index 782a625586b87..49f31eb2d75fb 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -39,11 +39,14 @@ The result of the above delete operation is: [[delete-versioning]] === Versioning -Each document indexed is versioned. When deleting a document, the -`version` can be specified to make sure the relevant document we are -trying to delete is actually being deleted and it has not changed in the -meantime. Every write operation executed on a document, deletes included, -causes its version to be incremented. +Each document indexed is versioned. When deleting a document, the `version` can +be specified to make sure the relevant document we are trying to delete is +actually being deleted and it has not changed in the meantime. Every write +operation executed on a document, deletes included, causes its version to be +incremented. The version number of a deleted document remains available for a +short time after deletion to allow for control of concurrent operations. The +length of time for which a deleted document's version remains available is +determined by the `index.gc_deletes` index setting and defaults to 60 seconds. [float] [[delete-routing]] diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 5f34371ab8467..e8283abfc2ef0 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -161,12 +161,12 @@ POST _reindex `index` and `type` in `source` can both be lists, allowing you to copy from lots of sources in one request. This will copy documents from the `_doc` and -`post` types in the `twitter` and `blog` index. The copied documents would include the -`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more +`post` types in the `twitter` and `blog` index. The copied documents would include the +`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more specific parameters, you can use `query`. -The Reindex API makes no effort to handle ID collisions. For such issues, the target index -will remain valid, but it's not easy to predict which document will survive because +The Reindex API makes no effort to handle ID collisions. For such issues, the target index +will remain valid, but it's not easy to predict which document will survive because the iteration order isn't well defined. [source,js] @@ -666,9 +666,11 @@ executed again in order to conform to `requests_per_second`. `failures`:: -Array of all indexing failures. If this is non-empty then the request aborted -because of those failures. See `conflicts` for how to prevent version conflicts -from aborting the operation. +Array of failures if there were any unrecoverable errors during the process. If +this is non-empty then the request aborted because of those failures. Reindex +is implemented using batches and any failure causes the entire process to abort +but all failures in the current batch are collected into the array. You can use +the `conflicts` option to prevent reindex from aborting on version conflicts. [float] [[docs-reindex-task-api]] @@ -1004,7 +1006,7 @@ number for most indices. If slicing manually or otherwise tuning automatic slicing, use these guidelines. Query performance is most efficient when the number of `slices` is equal to the -number of shards in the index. If that number is large (e.g. 500), +number of shards in the index. If that number is large (e.g. 500), choose a lower number as too many `slices` will hurt performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. @@ -1018,7 +1020,7 @@ documents being reindexed and cluster resources. [float] === Reindex daily indices -You can use `_reindex` in combination with <> +You can use `_reindex` in combination with <> to reindex daily indices to apply a new template to the existing documents. Assuming you have indices consisting of documents as follows: diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 482f3d62f5d5d..1d81e4a44ff24 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -338,9 +338,13 @@ executed again in order to conform to `requests_per_second`. `failures`:: -Array of all indexing failures. If this is non-empty then the request aborted -because of those failures. See `conflicts` for how to prevent version conflicts -from aborting the operation. +Array of failures if there were any unrecoverable errors during the process. If +this is non-empty then the request aborted because of those failures. +Update-by-query is implemented using batches and any failure causes the entire +process to abort but all failures in the current batch are collected into the +array. You can use the `conflicts` option to prevent reindex from aborting on +version conflicts. + [float] diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 0ab742108b92f..ed0077a629d7c 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -214,6 +214,27 @@ specific index module: The maximum length of regex that can be used in Regexp Query. Defaults to `1000`. + `index.routing.allocation.enable`:: + + Controls shard allocation for this index. It can be set to: + * `all` (default) - Allows shard allocation for all shards. + * `primaries` - Allows shard allocation only for primary shards. + * `new_primaries` - Allows shard allocation only for newly-created primary shards. + * `none` - No shard allocation is allowed. + + `index.routing.rebalance.enable`:: + + Enables shard rebalancing for this index. It can be set to: + * `all` (default) - Allows shard rebalancing for all shards. + * `primaries` - Allows shard rebalancing only for primary shards. + * `replicas` - Allows shard rebalancing only for replica shards. + * `none` - No shard rebalancing is allowed. + + `index.gc_deletes`:: + + The length of time that a <> remains available for <>. + Defaults to `60s`. + [float] === Settings in other index modules diff --git a/docs/reference/index-modules/merge.asciidoc b/docs/reference/index-modules/merge.asciidoc index 97db09ba656c7..cc0613ec2870d 100644 --- a/docs/reference/index-modules/merge.asciidoc +++ b/docs/reference/index-modules/merge.asciidoc @@ -23,7 +23,8 @@ The merge scheduler supports the following _dynamic_ setting: `index.merge.scheduler.max_thread_count`:: - The maximum number of threads that may be merging at once. Defaults to + The maximum number of threads on a single shard that may be merging at once. + Defaults to `Math.max(1, Math.min(4, Runtime.getRuntime().availableProcessors() / 2))` which works well for a good solid-state-disk (SSD). If your index is on spinning platter drives instead, decrease this to 1. diff --git a/docs/reference/index.x.asciidoc b/docs/reference/index.x.asciidoc index bbfdf515bc72d..5be21cb004331 100644 --- a/docs/reference/index.x.asciidoc +++ b/docs/reference/index.x.asciidoc @@ -4,7 +4,7 @@ :include-xpack: true :es-test-dir: {docdir}/../src/test :plugins-examples-dir: {docdir}/../../plugins/examples -:xes-repo-dir: {docdir}/../../../elasticsearch-extra/x-pack-elasticsearch/docs/{lang} +:xes-repo-dir: {docdir}/../../x-pack/docs/{lang} :es-repo-dir: {docdir} diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index 070d189a0fffe..95881ba83856f 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -258,15 +258,17 @@ Elasticsearch 6.x:: Elasticsearch 7.x:: -* The `type` parameter in URLs are optional. For instance, indexing +* The `type` parameter in URLs are deprecated. For instance, indexing a document no longer requires a document `type`. The new index APIs are `PUT {index}/_doc/{id}` in case of explicit ids and `POST {index}/_doc` for auto-generated ids. -* The `GET|PUT _mapping` APIs support a query string parameter - (`include_type_name`) which indicates whether the body should include - a layer for the type name. It defaults to `true`. 7.x indices which - don't have an explicit type will use the dummy type name `_doc`. +* The index creation, `GET|PUT _mapping` and document APIs support a query + string parameter (`include_type_name`) which indicates whether requests and + responses should include a type name. It defaults to `true`. + 7.x indices which don't have an explicit type will use the dummy type name + `_doc`. Not setting `include_type_name=false` will result in a deprecation + warning. * The `_default_` mapping type is removed. @@ -274,7 +276,8 @@ Elasticsearch 8.x:: * The `type` parameter is no longer supported in URLs. -* The `include_type_name` parameter defaults to `false`. +* The `include_type_name` parameter is deprecated, default to `false` and fails + the request when set to `true`. Elasticsearch 9.x:: @@ -421,3 +424,108 @@ POST _reindex ---- // NOTCONSOLE +[float] +=== Use `include_type_name=false` to prepare for upgrade to 8.0 + +Index creation, mappings and document APIs support the `include_type_name` +option. When set to `false`, this option enables the behavior that will become +default in 8.0 when types are removed. See some examples of interactions with +Elasticsearch with this option turned off: + +[float] +==== Index creation + +[source,js] +-------------------------------------------------- +PUT index?include_type_name=false +{ + "mappings": { + "properties": { <1> + "foo": { + "type": "keyword" + } + } + } +} +-------------------------------------------------- +// CONSOLE +<1> Mappings are included directly under the `mappings` key, without a type name. + +[float] +==== PUT and GET mappings + +[source,js] +-------------------------------------------------- +PUT index + +PUT index/_mappings?include_type_name=false +{ + "properties": { <1> + "foo": { + "type": "keyword" + } + } +} + +GET index/_mappings?include_type_name=false +-------------------------------------------------- +// CONSOLE +<1> Mappings are included directly under the `mappings` key, without a type name. + + +The above call returns + +[source,js] +-------------------------------------------------- +{ + "index": { + "mappings": { + "properties": { <1> + "foo": { + "type": "keyword" + } + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> Mappings are included directly under the `mappings` key, without a type name. + +[float] +==== Document APIs + +Index APIs must be call with the `{index}/_doc` path for automatic generation of +the `_id` and `{index}/_doc/{id}` with explicit ids. + +[source,js] +-------------------------------------------------- +PUT index/_doc/1?include_type_name=false +{ + "foo": "bar" +} +-------------------------------------------------- +// CONSOLE + +[source,js] +-------------------------------------------------- +{ + "_index": "index", <1> + "_id": "1", + "_version": 1, + "result": "created", + "_shards": { + "total": 2, + "successful": 1, + "failed": 0 + }, + "_seq_no": 0, + "_primary_term": 1 +} +-------------------------------------------------- +// TESTRESPONSE +<1> The response does not include a `_type`. + +Likewise the <>, <>, +<> and <> APIs do not return a `_type` +key in the response when `include_type_name` is set to `false`. diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 57faef2dbd7db..97f2ddb52825b 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -122,6 +122,11 @@ The following parameters are accepted by `geo_point` fields: ignored. If `false`, geo-points containing any more than latitude and longitude (two dimensions) values throw an exception and reject the whole document. +<>:: + + Accepts an geopoint value which is substituted for any explicit `null` values. + Defaults to `null`, which means the field is treated as missing. + ==== Using geo-points in scripts When accessing the value of a geo-point in a script, the value is returned as diff --git a/docs/reference/mapping/types/token-count.asciidoc b/docs/reference/mapping/types/token-count.asciidoc index da4220f4bb401..6f3295fab5ebb 100644 --- a/docs/reference/mapping/types/token-count.asciidoc +++ b/docs/reference/mapping/types/token-count.asciidoc @@ -81,7 +81,7 @@ Defaults to `true`. <>:: - Should the field be searchable? Accepts `not_analyzed` (default) and `no`. + Should the field be searchable? Accepts `true` (default) and `false`. <>:: diff --git a/docs/reference/migration/migrate_6_4.asciidoc b/docs/reference/migration/migrate_6_4.asciidoc new file mode 100644 index 0000000000000..a761509597fd2 --- /dev/null +++ b/docs/reference/migration/migrate_6_4.asciidoc @@ -0,0 +1,12 @@ +[[breaking-changes-6.4]] +== Breaking changes in 6.4 + +[[breaking_64_api_changes]] +=== API changes + +==== Field capabilities request format + +In the past, `fields` could be provided either as a parameter, or as part of the request +body. Specifying `fields` in the request body is now deprecated, and instead they should +always be supplied through a request parameter. In 7.0.0, the field capabilities API will +not accept `fields` supplied in the request body. diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index f8b8f9670c7fa..fc037504c5128 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -52,3 +52,10 @@ and `size` will be populated for fixed thread pools. and Update request. The Update API returns `400 - Bad request` if request contains unknown parameters (instead of ignored in the previous version). +[[remove-suggest-metric]] +==== Remove support for `suggest` metric/index metric in indices stats and nodes stats APIs + +Previously, `suggest` stats were folded into `search` stats. Support for the +`suggest` metric on the indices stats and nodes stats APIs remained for +backwards compatibility. Backwards support for the `suggest` metric was +deprecated in 6.3.0 and now removed in 7.0.0. diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index 1035bc73393ac..b09cecf5a48dc 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -5,4 +5,23 @@ ==== Percolator * The deprecated `index.percolator.map_unmapped_fields_as_string` setting has been removed in favour of - the `index.percolator.map_unmapped_fields_as_text` setting. \ No newline at end of file + the `index.percolator.map_unmapped_fields_as_text` setting. + +==== Index thread pool + +* Internally, single-document index/delete/update requests are executed as bulk + requests with a single-document payload. This means that these requests are + executed on the bulk thread pool. As such, the indexing thread pool is no + longer needed and has been removed. As such, the settings + `thread_pool.index.size` and `thread_pool.index.queue_size` have been removed. + +[[write-thread-pool-fallback]] +==== Write thread pool fallback + +* The bulk thread pool was replaced by the write thread pool in 6.3.0. However, + for backwards compatibility reasons the name `bulk` was still usable as fallback + settings `thread_pool.bulk.size` and `thread_pool.bulk.queue_size` for + `thread_pool.write.size` and `thread_pool.write.queue_size`, respectively, and + the system property `es.thread_pool.write.use_bulk_as_display_name` was + available to keep the display output in APIs as `bulk` instead of `write`. + These fallback settings and this system property have been removed. diff --git a/docs/reference/modules/cross-cluster-search.asciidoc b/docs/reference/modules/cross-cluster-search.asciidoc index d3c6426f271ef..21e21edc35b57 100644 --- a/docs/reference/modules/cross-cluster-search.asciidoc +++ b/docs/reference/modules/cross-cluster-search.asciidoc @@ -222,8 +222,7 @@ GET /cluster_one:twitter/_search // TESTRESPONSE[s/"_score": 1/"_score": "$body.hits.hits.0._score"/] -In contrast to the `tribe` feature cross cluster search can also search indices with the same name on different -clusters: +Indices can also be searched with the same name on different clusters: [source,js] -------------------------------------------------- diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index ea3f99debb94e..693d537d732c1 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -44,12 +44,12 @@ If you register same snapshot repository with multiple clusters, only one cluster should have write access to the repository. All other clusters connected to that repository should set the repository to `readonly` mode. -NOTE: The snapshot format can change across major versions, so if you have -clusters on different major versions trying to write the same repository, -new snapshots written by one version will not be visible to the other. While -setting the repository to `readonly` on all but one of the clusters should work -with multiple clusters differing by one major version, it is not a supported -configuration. +IMPORTANT: The snapshot format can change across major versions, so if you have +clusters on different versions trying to write the same repository, snapshots +written by one version may not be visible to the other and the repository could +be corrupted. While setting the repository to `readonly` on all but one of the +clusters should work with multiple clusters differing by one major version, it +is not a supported configuration. [source,js] ----------------------------------- diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index 984bef0a3cc3c..515959e4ea580 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -13,12 +13,6 @@ There are several thread pools, but the important ones include: For generic operations (e.g., background node discovery). Thread pool type is `scaling`. -`index`:: - For index/delete operations. Thread pool type is `fixed` - with a size of `# of available processors`, - queue_size of `200`. The maximum size for this pool - is `1 + # of available processors`. - `search`:: For count/search/suggest operations. Thread pool type is `fixed_auto_queue_size` with a size of @@ -30,11 +24,13 @@ There are several thread pools, but the important ones include: with a size of `# of available processors`, queue_size of `1000`. -`bulk`:: - For bulk operations. Thread pool type is `fixed` - with a size of `# of available processors`, - queue_size of `200`. The maximum size for this pool - is `1 + # of available processors`. +`analyze`:: + For analyze requests. Thread pool type is `fixed` with a size of 1, queue size of 16. + +`write`:: + For single-document index/delete/update and bulk requests. Thread pool type + is `fixed` with a size of `# of available processors`, queue_size of `200`. + The maximum size for this pool is `1 + # of available processors`. `snapshot`:: For snapshot/restore operations. Thread pool type is `scaling` with a @@ -52,13 +48,13 @@ There are several thread pools, but the important ones include: Mainly for java client executing of action when listener threaded is set to true. Thread pool type is `scaling` with a default max of `min(10, (# of available processors)/2)`. -Changing a specific thread pool can be done by setting its type-specific parameters; for example, changing the `index` +Changing a specific thread pool can be done by setting its type-specific parameters; for example, changing the `bulk` thread pool to have more threads: [source,yaml] -------------------------------------------------- thread_pool: - index: + bulk: size: 30 -------------------------------------------------- @@ -86,7 +82,7 @@ full, it will abort the request. [source,yaml] -------------------------------------------------- thread_pool: - index: + bulk: size: 30 queue_size: 1000 -------------------------------------------------- diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index 50c35a4a73634..b7a65d98592cc 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -41,7 +41,7 @@ addressable from the outside. Defaults to the actual port assigned via |`transport.tcp.connect_timeout` |The socket connect timeout setting (in time setting format). Defaults to `30s`. -|`transport.tcp.compress` |Set to `true` to enable compression (LZF) +|`transport.tcp.compress` |Set to `true` to enable compression (`DEFLATE`) between all nodes. Defaults to `false`. |`transport.ping_schedule` | Schedule a regular ping message to ensure that connections are kept alive. Defaults to `5s` in the transport client and `-1` (disabled) elsewhere. diff --git a/docs/reference/query-dsl/match-phrase-query.asciidoc b/docs/reference/query-dsl/match-phrase-query.asciidoc index 943d0e84d36db..1f4b19eedc132 100644 --- a/docs/reference/query-dsl/match-phrase-query.asciidoc +++ b/docs/reference/query-dsl/match-phrase-query.asciidoc @@ -39,3 +39,5 @@ GET /_search } -------------------------------------------------- // CONSOLE + +This query also accepts `zero_terms_query`, as explained in <>. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index a17027fb3c335..1583726421aeb 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -489,7 +489,7 @@ Using `_index` in scripts has been replaced with writing `ScriptEngine` backends === Painless Syntax See the -{painless}/painless-specification.html[Painless Language Specification] +{painless}/painless-lang-spec.html[Painless Language Specification] in the guide to the {painless}/index.html[Painless Scripting Language]. [role="exclude",id="modules-scripting-painless-debugging"] diff --git a/docs/reference/release-notes/7.0.0-alpha1.asciidoc b/docs/reference/release-notes/7.0.0-alpha1.asciidoc index 128a9b7dd716b..1cc328f16598b 100644 --- a/docs/reference/release-notes/7.0.0-alpha1.asciidoc +++ b/docs/reference/release-notes/7.0.0-alpha1.asciidoc @@ -8,4 +8,11 @@ The changes listed below have been released for the first time in Elasticsearch === Breaking changes Core:: -* Tribe node has been removed in favor of Cross-Cluster-Search \ No newline at end of file +* Tribe node has been removed in favor of Cross-Cluster-Search + +Cross-Cluster-Search:: +* `http_addresses` has been removed from the <> API + because it is expensive to fetch and no longer needed by Kibana. + +Rest API:: +* The Clear Cache API only supports `POST` as HTTP method diff --git a/docs/reference/search/field-caps.asciidoc b/docs/reference/search/field-caps.asciidoc index 8329d96131dff..6cb483e7a256e 100644 --- a/docs/reference/search/field-caps.asciidoc +++ b/docs/reference/search/field-caps.asciidoc @@ -20,7 +20,7 @@ GET twitter/_field_caps?fields=rating // CONSOLE // TEST[setup:twitter] -Alternatively the `fields` option can also be defined in the request body: +Alternatively the `fields` option can also be defined in the request body. deprecated[6.4.0, Please use a request parameter instead.] [source,js] -------------------------------------------------- @@ -30,6 +30,7 @@ POST _field_caps } -------------------------------------------------- // CONSOLE +// TEST[warning:Specifying a request body is deprecated -- the [fields] request parameter should be used instead.] This is equivalent to the previous request. diff --git a/docs/reference/search/request/highlighters-internal.asciidoc b/docs/reference/search/request/highlighters-internal.asciidoc new file mode 100644 index 0000000000000..651cdf917ced0 --- /dev/null +++ b/docs/reference/search/request/highlighters-internal.asciidoc @@ -0,0 +1,194 @@ +[[highlighter-internal-work]] +==== How highlighters work internally + +Given a query and a text (the content of a document field), the goal of a +highlighter is to find the best text fragments for the query, and highlight +the query terms in the found fragments. For this, a highlighter needs to +address several questions: + +- How break a text into fragments? +- How to find the best fragments among all fragments? +- How to highlight the query terms in a fragment? + +===== How to break a text into fragments? +Relevant settings: `fragment_size`, `fragmenter`, `type` of highlighter, +`boundary_chars`, `boundary_max_scan`, `boundary_scanner`, `boundary_scanner_locale`. + +Plain highlighter begins with analyzing the text using the given analyzer, +and creating a token stream from it. Plain highlighter uses a very simple +algorithm to break the token stream into fragments. It loops through terms in the token stream, +and every time the current term's end_offset exceeds `fragment_size` multiplied by the number of +created fragments, a new fragment is created. A little more computation is done with using `span` +fragmenter to avoid breaking up text between highlighted terms. But overall, since the breaking is +done only by `fragment_size`, some fragments can be quite odd, e.g. beginning +with a punctuation mark. + +Unified or FVH highlighters do a better job of breaking up a text into +fragments by utilizing Java's `BreakIterator`. This ensures that a fragment +is a valid sentence as long as `fragment_size` allows for this. + + +===== How to find the best fragments? +Relevant settings: `number_of_fragments`. + +To find the best, most relevant, fragments, a highlighter needs to score +each fragment in respect to the given query. The goal is to score only those +terms that participated in generating the 'hit' on the document. +For some complex queries, this is still work in progress. + +The plain highlighter creates an in-memory index from the current token stream, +and re-runs the original query criteria through Lucene's query execution planner +to get access to low-level match information for the current text. +For more complex queries the original query could be converted to a span query, +as span queries can handle phrases more accurately. Then this obtained low-level match +information is used to score each individual fragment. The scoring method of the plain +highlighter is quite simple. Each fragment is scored by the number of unique +query terms found in this fragment. The score of individual term is equal to its boost, +which is by default is 1. Thus, by default, a fragment that contains one unique query term, +will get a score of 1; and a fragment that contains two unique query terms, +will get a score of 2 and so on. The fragments are then sorted by their scores, +so the highest scored fragments will be output first. + +FVH doesn't need to analyze the text and build an in-memory index, as it uses +pre-indexed document term vectors, and finds among them terms that correspond to the query. +FVH scores each fragment by the number of query terms found in this fragment. +Similarly to plain highlighter, score of individual term is equal to its boost value. +In contrast to plain highlighter, all query terms are counted, not only unique terms. + +Unified highlighter can use pre-indexed term vectors or pre-indexed terms offsets, +if they are available. Otherwise, similar to Plain Highlighter, it has to create +an in-memory index from the text. Unified highlighter uses the BM25 scoring model +to score fragments. + + +===== How to highlight the query terms in a fragment? +Relevant settings: `pre-tags`, `post-tags`. + +The goal is to highlight only those terms that participated in generating the 'hit' on the document. +For some complex boolean queries, this is still work in progress, as highlighters don't reflect +the boolean logic of a query and only extract leaf (terms, phrases, prefix etc) queries. + +Plain highlighter given the token stream and the original text, recomposes the original text to +highlight only terms from the token stream that are contained in the low-level match information +structure from the previous step. + +FVH and unified highlighter use intermediate data structures to represent +fragments in some raw form, and then populate them with actual text. + +A highlighter uses `pre-tags`, `post-tags` to encode highlighted terms. + + +===== An example of the work of the unified highlighter + +Let's look in more details how unified highlighter works. + +First, we create a index with a text field `content`, that will be indexed +using `english` analyzer, and will be indexed without offsets or term vectors. + +[source,js] +-------------------------------------------------- +PUT test_index +{ + "mappings": { + "_doc": { + "properties": { + "content" : { + "type" : "text", + "analyzer" : "english" + } + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +We put the following document into the index: + +[source,js] +-------------------------------------------------- +PUT test_index/_doc/doc1 +{ + "content" : "For you I'm only a fox like a hundred thousand other foxes. But if you tame me, we'll need each other. You'll be the only boy in the world for me. I'll be the only fox in the world for you." +} +-------------------------------------------------- +// NOTCONSOLE + + +And we ran the following query with a highlight request: + +[source,js] +-------------------------------------------------- +GET test_index/_search +{ + "query": { + "match_phrase" : {"content" : "only fox"} + }, + "highlight": { + "type" : "unified", + "number_of_fragments" : 3, + "fields": { + "content": {} + } + } +} +-------------------------------------------------- +// NOTCONSOLE + + +After `doc1` is found as a hit for this query, this hit will be passed to the +unified highlighter for highlighting the field `content` of the document. +Since the field `content` was not indexed either with offsets or term vectors, +its raw field value will be analyzed, and in-memory index will be built from +the terms that match the query: + + {"token":"onli","start_offset":12,"end_offset":16,"position":3}, + {"token":"fox","start_offset":19,"end_offset":22,"position":5}, + {"token":"fox","start_offset":53,"end_offset":58,"position":11}, + {"token":"onli","start_offset":117,"end_offset":121,"position":24}, + {"token":"onli","start_offset":159,"end_offset":163,"position":34}, + {"token":"fox","start_offset":164,"end_offset":167,"position":35} + +Our complex phrase query will be converted to the span query: +`spanNear([text:onli, text:fox], 0, true)`, meaning that we are looking for +terms "onli: and "fox" within 0 distance from each other, and in the given +order. The span query will be run against the created before in-memory index, +to find the following match: + + {"term":"onli", "start_offset":159, "end_offset":163}, + {"term":"fox", "start_offset":164, "end_offset":167} + +In our example, we have got a single match, but there could be several matches. +Given the matches, the unified highlighter breaks the text of the field into +so called "passages". Each passage must contain at least one match. +The unified highlighter with the use of Java's `BreakIterator` ensures that each +passage represents a full sentence as long as it doesn't exceed `fragment_size`. +For our example, we have got a single passage with the following properties +(showing only a subset of the properties here): + + Passage: + startOffset: 147 + endOffset: 189 + score: 3.7158387 + matchStarts: [159, 164] + matchEnds: [163, 167] + numMatches: 2 + +Notice how a passage has a score, calculated using the BM25 scoring formula +adapted for passages. Scores allow us to choose the best scoring +passages if there are more passages available than the requested +by the user `number_of_fragments`. Scores also let us to sort passages by +`order: "score"` if requested by the user. + +As the final step, the unified highlighter will extract from the field's text +a string corresponding to each passage: + + "I'll be the only fox in the world for you." + +and will format with the tags and all matches in this string +using the passages's `matchStarts` and `matchEnds` information: + + I'll be the only fox in the world for you. + +This kind of formatted strings are the final result of the highlighter returned +to the user. \ No newline at end of file diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index a6d7bcf1415d6..2da11c14b5804 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -7,6 +7,11 @@ When you request highlights, the response contains an additional `highlight` element for each search hit that includes the highlighted fields and the highlighted fragments. +NOTE: Highlighters don't reflect the boolean logic of a query when extracting + terms to highlight. Thus, for some complex boolean queries (e.g nested boolean + queries, queries using `minimum_should_match` etc.), parts of documents may be + highlighted that don't correspond to query matches. + Highlighting requires the actual content of a field. If the field is not stored (the mapping does not set `store` to `true`), the actual `_source` is loaded and the relevant field is extracted from `_source`. @@ -88,7 +93,7 @@ the highlighted documents. This is important if you have large fields because it doesn't require reanalyzing the text to be highlighted. It also requires less disk space than using `term_vectors`. -* Term vectors. If `term_vector` information is provided by setting +* Term vectors. If `term_vector` information is provided by setting `term_vector` to `with_positions_offsets` in the mapping, the `unified` highlighter automatically uses the `term_vector` to highlight the field. It's fast especially for large fields (> `1MB`) and for highlighting multi-term queries like @@ -127,7 +132,7 @@ the `fvh` highlighter. boundaries. The `boundary_max_scan` setting controls how far to scan for boundary characters. Only valid for the `fvh` highlighter. `sentence`::: Break highlighted fragments at the next sentence boundary, as -determined by Java's +determined by Java's https://docs.oracle.com/javase/8/docs/api/java/text/BreakIterator.html[BreakIterator]. You can specify the locale to use with `boundary_scanner_locale`. + @@ -140,7 +145,10 @@ by Java's https://docs.oracle.com/javase/8/docs/api/java/text/BreakIterator.html You can specify the locale to use with `boundary_scanner_locale`. boundary_scanner_locale:: Controls which locale is used to search for sentence -and word boundaries. +and word boundaries. This parameter takes a form of a language tag, +e.g. `"en-US"`, `"fr-FR"`, `"ja-JP"`. More info can be found in the +https://docs.oracle.com/javase/8/docs/api/java/util/Locale.html#forLanguageTag-java.lang.String-[Locale Language Tag] +documentation. The default value is https://docs.oracle.com/javase/8/docs/api/java/util/Locale.html#ROOT[ Locale.ROOT]. encoder:: Indicates if the snippet should be HTML encoded: `default` (no encoding) or `html` (HTML-escape the snippet text and then @@ -200,12 +208,16 @@ handy when you need to highlight short texts such as a title or address, but fragmentation is not required. If `number_of_fragments` is 0, `fragment_size` is ignored. Defaults to 5. -order:: Sorts highlighted fragments by score when set to `score`. Only valid for -the `unified` highlighter. +order:: Sorts highlighted fragments by score when set to `score`. By default, +fragments will be output in the order they appear in the field (order: `none`). +Setting this option to `score` will output the most relevant fragments first. +Each highlighter applies its own logic to compute relevancy scores. See +the document <> +for more details how different highlighters find the best fragments. phrase_limit:: Controls the number of matching phrases in a document that are considered. Prevents the `fvh` highlighter from analyzing too many phrases -and consuming too much memory. When using `matched_fields, `phrase_limit` +and consuming too much memory. When using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory. Only supported by the `fvh` highlighter. Defaults to 256. @@ -929,3 +941,6 @@ Response: If the `number_of_fragments` option is set to `0`, `NullFragmenter` is used which does not fragment the text at all. This is useful for highlighting the entire contents of a document or field. + + +include::highlighters-internal.asciidoc[] diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index be725aaf362f5..0fd6979ef9568 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -78,9 +78,9 @@ returned with each batch of results. Each call to the `scroll` API returns the next batch of results until there are no more results left to return, ie the `hits` array is empty. -IMPORTANT: The initial search request and each subsequent scroll request -returns a new `_scroll_id` -- only the most recent `_scroll_id` should be -used. +IMPORTANT: The initial search request and each subsequent scroll request each +return a `_scroll_id`, which may change with each request -- only the most +recent `_scroll_id` should be used. NOTE: If the request specifies aggregations, only the initial search response will contain the aggregations results. diff --git a/docs/reference/setup/install/check-running.asciidoc b/docs/reference/setup/install/check-running.asciidoc index 3ec10c26346bc..0cfc4b329ecfa 100644 --- a/docs/reference/setup/install/check-running.asciidoc +++ b/docs/reference/setup/install/check-running.asciidoc @@ -19,6 +19,8 @@ which should give you a response something like this: "cluster_uuid" : "AT69_T_DTp-1qgIJlatQqA", "version" : { "number" : "{version}", + "build_flavor" : "oss", + "build_type" : "zip", "build_hash" : "f27399d", "build_date" : "2016-03-30T09:51:41.449Z", "build_snapshot" : false, diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 27768f1bbac3c..a5fe1cb94b9ee 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 8d6b7c2cbd9ba..7962563f742fe 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ -distributionUrl=https\://services.gradle.org/distributions/gradle-4.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-4.7-all.zip distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStorePath=wrapper/dists zipStoreBase=GRADLE_USER_HOME -distributionSha256Sum=6ac2f8f9302f50241bf14cc5f4a3d88504ad20e61bb98c5fd048f7723b61397e +distributionSha256Sum=203f4537da8b8075e38c036a6d14cb71b1149de5bf0a8f6db32ac2833a1d1294 diff --git a/libs/build.gradle b/libs/build.gradle index 78eb93886243d..7f24f69eedc2e 100644 --- a/libs/build.gradle +++ b/libs/build.gradle @@ -34,6 +34,7 @@ subprojects { Project depProject = dependencyToProject(dep) if (depProject != null && false == depProject.path.equals(':libs:elasticsearch-core') + && false == isEclipse && depProject.path.startsWith(':libs')) { throw new InvalidUserDataException("projects in :libs " + "may not depend on other projects libs except " diff --git a/licenses/APACHE-LICENSE-2.0.txt b/licenses/APACHE-LICENSE-2.0.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/licenses/APACHE-LICENSE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/licenses/ELASTIC-LICENSE.txt b/licenses/ELASTIC-LICENSE.txt new file mode 100644 index 0000000000000..7376ffc3ff107 --- /dev/null +++ b/licenses/ELASTIC-LICENSE.txt @@ -0,0 +1,223 @@ +ELASTIC LICENSE AGREEMENT + +PLEASE READ CAREFULLY THIS ELASTIC LICENSE AGREEMENT (THIS "AGREEMENT"), WHICH +CONSTITUTES A LEGALLY BINDING AGREEMENT AND GOVERNS ALL OF YOUR USE OF ALL OF +THE ELASTIC SOFTWARE WITH WHICH THIS AGREEMENT IS INCLUDED ("ELASTIC SOFTWARE") +THAT IS PROVIDED IN OBJECT CODE FORMAT, AND, IN ACCORDANCE WITH SECTION 2 BELOW, +CERTAIN OF THE ELASTIC SOFTWARE THAT IS PROVIDED IN SOURCE CODE FORMAT. BY +INSTALLING OR USING ANY OF THE ELASTIC SOFTWARE GOVERNED BY THIS AGREEMENT, YOU +ARE ASSENTING TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE +WITH SUCH TERMS AND CONDITIONS, YOU MAY NOT INSTALL OR USE THE ELASTIC SOFTWARE +GOVERNED BY THIS AGREEMENT. IF YOU ARE INSTALLING OR USING THE SOFTWARE ON +BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU HAVE THE ACTUAL +AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT ON BEHALF OF +SUCH ENTITY. + +Posted Date: April 20, 2018 + +This Agreement is entered into by and between Elasticsearch BV ("Elastic") and +You, or the legal entity on behalf of whom You are acting (as applicable, +"You"). + +1. OBJECT CODE END USER LICENSES, RESTRICTIONS AND THIRD PARTY OPEN SOURCE +SOFTWARE + + 1.1 Object Code End User License. Subject to the terms and conditions of + Section 1.2 of this Agreement, Elastic hereby grants to You, AT NO CHARGE and + for so long as you are not in breach of any provision of this Agreement, a + License to the Basic Features and Functions of the Elastic Software. + + 1.2 Reservation of Rights; Restrictions. As between Elastic and You, Elastic + and its licensors own all right, title and interest in and to the Elastic + Software, and except as expressly set forth in Sections 1.1, and 2.1 of this + Agreement, no other license to the Elastic Software is granted to You under + this Agreement, by implication, estoppel or otherwise. You agree not to: (i) + reverse engineer or decompile, decrypt, disassemble or otherwise reduce any + Elastic Software provided to You in Object Code, or any portion thereof, to + Source Code, except and only to the extent any such restriction is prohibited + by applicable law, (ii) except as expressly permitted in this Agreement, + prepare derivative works from, modify, copy or use the Elastic Software Object + Code or the Commercial Software Source Code in any manner; (iii) except as + expressly permitted in Section 1.1 above, transfer, sell, rent, lease, + distribute, sublicense, loan or otherwise transfer, Elastic Software Object + Code, in whole or in part, to any third party; (iv) use Elastic Software + Object Code for providing time-sharing services, any software-as-a-service, + service bureau services or as part of an application services provider or + other service offering (collectively, "SaaS Offering") where obtaining access + to the Elastic Software or the features and functions of the Elastic Software + is a primary reason or substantial motivation for users of the SaaS Offering + to access and/or use the SaaS Offering ("Prohibited SaaS Offering"); (v) + circumvent the limitations on use of Elastic Software provided to You in + Object Code format that are imposed or preserved by any License Key, or (vi) + alter or remove any Marks and Notices in the Elastic Software. If You have any + question as to whether a specific SaaS Offering constitutes a Prohibited SaaS + Offering, or are interested in obtaining Elastic's permission to engage in + commercial or non-commercial distribution of the Elastic Software, please + contact elastic_license@elastic.co. + + 1.3 Third Party Open Source Software. The Commercial Software may contain or + be provided with third party open source libraries, components, utilities and + other open source software (collectively, "Open Source Software"), which Open + Source Software may have applicable license terms as identified on a website + designated by Elastic. Notwithstanding anything to the contrary herein, use of + the Open Source Software shall be subject to the license terms and conditions + applicable to such Open Source Software, to the extent required by the + applicable licensor (which terms shall not restrict the license rights granted + to You hereunder, but may contain additional rights). To the extent any + condition of this Agreement conflicts with any license to the Open Source + Software, the Open Source Software license will govern with respect to such + Open Source Software only. Elastic may also separately provide you with + certain open source software that is licensed by Elastic. Your use of such + Elastic open source software will not be governed by this Agreement, but by + the applicable open source license terms. + +2. COMMERCIAL SOFTWARE SOURCE CODE + + 2.1 Limited License. Subject to the terms and conditions of Section 2.2 of + this Agreement, Elastic hereby grants to You, AT NO CHARGE and for so long as + you are not in breach of any provision of this Agreement, a limited, + non-exclusive, non-transferable, fully paid up royalty free right and license + to the Commercial Software in Source Code format, without the right to grant + or authorize sublicenses, to prepare Derivative Works of the Commercial + Software, provided You (i) do not hack the licensing mechanism, or otherwise + circumvent the intended limitations on the use of Elastic Software to enable + features other than Basic Features and Functions or those features You are + entitled to as part of a Subscription, and (ii) use the resulting object code + only for reasonable testing purposes. + + 2.2 Restrictions. Nothing in Section 2.1 grants You the right to (i) use the + Commercial Software Source Code other than in accordance with Section 2.1 + above, (ii) use a Derivative Work of the Commercial Software outside of a + Non-production Environment, in any production capacity, on a temporary or + permanent basis, or (iii) transfer, sell, rent, lease, distribute, sublicense, + loan or otherwise make available the Commercial Software Source Code, in whole + or in part, to any third party. Notwithstanding the foregoing, You may + maintain a copy of the repository in which the Source Code of the Commercial + Software resides and that copy may be publicly accessible, provided that you + include this Agreement with Your copy of the repository. + +3. TERMINATION + + 3.1 Termination. This Agreement will automatically terminate, whether or not + You receive notice of such Termination from Elastic, if You breach any of its + provisions. + + 3.2 Post Termination. Upon any termination of this Agreement, for any reason, + You shall promptly cease the use of the Elastic Software in Object Code format + and cease use of the Commercial Software in Source Code format. For the + avoidance of doubt, termination of this Agreement will not affect Your right + to use Elastic Software, in either Object Code or Source Code formats, made + available under the Apache License Version 2.0. + + 3.3 Survival. Sections 1.2, 2.2. 3.3, 4 and 5 shall survive any termination or + expiration of this Agreement. + +4. DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY + + 4.1 Disclaimer of Warranties. TO THE MAXIMUM EXTENT PERMITTED UNDER APPLICABLE + LAW, THE ELASTIC SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + AND ELASTIC AND ITS LICENSORS MAKE NO WARRANTIES WHETHER EXPRESSED, IMPLIED OR + STATUTORY REGARDING OR RELATING TO THE ELASTIC SOFTWARE. TO THE MAXIMUM EXTENT + PERMITTED UNDER APPLICABLE LAW, ELASTIC AND ITS LICENSORS SPECIFICALLY + DISCLAIM ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NON-INFRINGEMENT WITH RESPECT TO THE ELASTIC SOFTWARE, AND WITH + RESPECT TO THE USE OF THE FOREGOING. FURTHER, ELASTIC DOES NOT WARRANT RESULTS + OF USE OR THAT THE ELASTIC SOFTWARE WILL BE ERROR FREE OR THAT THE USE OF THE + ELASTIC SOFTWARE WILL BE UNINTERRUPTED. + + 4.2 Limitation of Liability. IN NO EVENT SHALL ELASTIC OR ITS LICENSORS BE + LIABLE TO YOU OR ANY THIRD PARTY FOR ANY DIRECT OR INDIRECT DAMAGES, + INCLUDING, WITHOUT LIMITATION, FOR ANY LOSS OF PROFITS, LOSS OF USE, BUSINESS + INTERRUPTION, LOSS OF DATA, COST OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY + SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, IN CONNECTION WITH + OR ARISING OUT OF THE USE OR INABILITY TO USE THE ELASTIC SOFTWARE, OR THE + PERFORMANCE OF OR FAILURE TO PERFORM THIS AGREEMENT, WHETHER ALLEGED AS A + BREACH OF CONTRACT OR TORTIOUS CONDUCT, INCLUDING NEGLIGENCE, EVEN IF ELASTIC + HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +5. MISCELLANEOUS + + This Agreement completely and exclusively states the entire agreement of the + parties regarding the subject matter herein, and it supersedes, and its terms + govern, all prior proposals, agreements, or other communications between the + parties, oral or written, regarding such subject matter. This Agreement may be + modified by Elastic from time to time, and any such modifications will be + effective upon the "Posted Date" set forth at the top of the modified + Agreement. If any provision hereof is held unenforceable, this Agreement will + continue without said provision and be interpreted to reflect the original + intent of the parties. This Agreement and any non-contractual obligation + arising out of or in connection with it, is governed exclusively by Dutch law. + This Agreement shall not be governed by the 1980 UN Convention on Contracts + for the International Sale of Goods. All disputes arising out of or in + connection with this Agreement, including its existence and validity, shall be + resolved by the courts with jurisdiction in Amsterdam, The Netherlands, except + where mandatory law provides for the courts at another location in The + Netherlands to have jurisdiction. The parties hereby irrevocably waive any and + all claims and defenses either might otherwise have in any such action or + proceeding in any of such courts based upon any alleged lack of personal + jurisdiction, improper venue, forum non conveniens or any similar claim or + defense. A breach or threatened breach, by You of Section 2 may cause + irreparable harm for which damages at law may not provide adequate relief, and + therefore Elastic shall be entitled to seek injunctive relief without being + required to post a bond. You may not assign this Agreement (including by + operation of law in connection with a merger or acquisition), in whole or in + part to any third party without the prior written consent of Elastic, which + may be withheld or granted by Elastic in its sole and absolute discretion. + Any assignment in violation of the preceding sentence is void. Notices to + Elastic may also be sent to legal@elastic.co. + +6. DEFINITIONS + + The following terms have the meanings ascribed: + + 6.1 "Affiliate" means, with respect to a party, any entity that controls, is + controlled by, or which is under common control with, such party, where + "control" means ownership of at least fifty percent (50%) of the outstanding + voting shares of the entity, or the contractual right to establish policy for, + and manage the operations of, the entity. + + 6.2 "Basic Features and Functions" means those features and functions of the + Elastic Software that are eligible for use under a Basic license, as set forth + at https://www.elastic.co/subscriptions, as may be modified by Elastic from + time to time. + + 6.3 "Commercial Software" means the Elastic Software Source Code in any file + containing a header stating the contents are subject to the Elastic License or + which is contained in the repository folder labeled "x-pack", unless a LICENSE + file present in the directory subtree declares a different license. + + 6.4 "Derivative Work of the Commercial Software" means, for purposes of this + Agreement, any modification(s) or enhancement(s) to the Commercial Software, + which represent, as a whole, an original work of authorship. + + 6.5 "License" means a limited, non-exclusive, non-transferable, fully paid up, + royalty free, right and license, without the right to grant or authorize + sublicenses, solely for Your internal business operations to (i) install and + use the applicable Features and Functions of the Elastic Software in Object + Code, and (ii) permit Contractors and Your Affiliates to use the Elastic + software as set forth in (i) above, provided that such use by Contractors must + be solely for Your benefit and/or the benefit of Your Affiliates, and You + shall be responsible for all acts and omissions of such Contractors and + Affiliates in connection with their use of the Elastic software that are + contrary to the terms and conditions of this Agreement. + + 6.6 "License Key" means a sequence of bytes, including but not limited to a + JSON blob, that is used to enable certain features and functions of the + Elastic Software. + + 6.7 "Marks and Notices" means all Elastic trademarks, trade names, logos and + notices present on the Documentation as originally provided by Elastic. + + 6.8 "Non-production Environment" means an environment for development, testing + or quality assurance, where software is not used for production purposes. + + 6.9 "Object Code" means any form resulting from mechanical transformation or + translation of Source Code form, including but not limited to compiled object + code, generated documentation, and conversions to other media types. + + 6.10 "Source Code" means the preferred form of computer software for making + modifications, including but not limited to software source code, + documentation source, and configuration files. + + 6.11 "Subscription" means the right to receive Support Services and a License + to the Commercial Software. diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index e0193e50313f3..a01eb52fdd498 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -67,6 +67,8 @@ import org.apache.lucene.analysis.standard.ClassicFilter; import org.apache.lucene.analysis.tr.ApostropheFilter; import org.apache.lucene.analysis.util.ElisionFilter; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; @@ -88,6 +90,9 @@ import static org.elasticsearch.plugins.AnalysisPlugin.requriesAnalysisSettings; public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { + + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(CommonAnalysisPlugin.class)); + @Override public Map> getTokenFilters() { Map> filters = new TreeMap<>(); @@ -171,8 +176,14 @@ public Map> getTokenizers() { public List getPreConfiguredCharFilters() { List filters = new ArrayList<>(); filters.add(PreConfiguredCharFilter.singleton("html_strip", false, HTMLStripCharFilter::new)); - // TODO deprecate htmlStrip - filters.add(PreConfiguredCharFilter.singleton("htmlStrip", false, HTMLStripCharFilter::new)); + filters.add(PreConfiguredCharFilter.singletonWithVersion("htmlStrip", false, (reader, version) -> { + if (version.onOrAfter(org.elasticsearch.Version.V_6_3_0)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("htmlStrip_deprecation", + "The [htmpStrip] char filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [html_strip] instead."); + } + return new HTMLStripCharFilter(reader); + })); return filters; } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java new file mode 100644 index 0000000000000..0d5389a6d6594 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/HtmlStripCharFilterFactoryTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.CharFilterFactory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.io.StringReader; +import java.util.Map; + + +public class HtmlStripCharFilterFactoryTests extends ESTestCase { + + /** + * Check that the deprecated name "htmlStrip" issues a deprecation warning for indices created since 6.3.0 + */ + public void testDeprecationWarning() throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_3_0, Version.CURRENT)) + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map charFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).charFilter; + CharFilterFactory charFilterFactory = charFilters.get("htmlStrip"); + assertNotNull(charFilterFactory.create(new StringReader("input"))); + assertWarnings("The [htmpStrip] char filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [html_strip] instead."); + } + } + + /** + * Check that the deprecated name "htmlStrip" does NOT issues a deprecation warning for indices created before 6.3.0 + */ + public void testNoDeprecationWarningPre6_3() throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_2_4)) + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map charFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).charFilter; + CharFilterFactory charFilterFactory = charFilters.get("htmlStrip"); + assertNotNull(charFilterFactory.create(new StringReader(""))); + } + } +} diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index cbb8f053cfbba..f8fc3acc02c4c 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -17,3 +17,56 @@ - match: { error.type: illegal_argument_exception } - match: { error.reason: "Custom normalizer may not use filter [word_delimiter]" } +--- +"htmlStrip_deprecated": + - skip: + version: " - 6.2.99" + reason: deprecated in 6.3 + features: "warnings" + + - do: + indices.create: + index: test_deprecated_htmlstrip + body: + settings: + index: + analysis: + analyzer: + my_htmlStripWithCharfilter: + tokenizer: keyword + char_filter: ["htmlStrip"] + mappings: + type: + properties: + name: + type: text + analyzer: my_htmlStripWithCharfilter + + - do: + warnings: + - 'The [htmpStrip] char filter name is deprecated and will be removed in a future version. Please change the filter name to [html_strip] instead.' + index: + index: test_deprecated_htmlstrip + type: type + id: 1 + body: { "name": "foo bar" } + + - do: + warnings: + - 'The [htmpStrip] char filter name is deprecated and will be removed in a future version. Please change the filter name to [html_strip] instead.' + index: + index: test_deprecated_htmlstrip + type: type + id: 2 + body: { "name": "foo baz" } + + - do: + warnings: + - 'The [htmpStrip] char filter name is deprecated and will be removed in a future version. Please change the filter name to [html_strip] instead.' + indices.analyze: + index: test_deprecated_htmlstrip + body: + analyzer: "my_htmlStripWithCharfilter" + text: "foo" + - length: { tokens: 1 } + - match: { tokens.0.token: "\nfoo\n" } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java index 758e5eb997297..bf35918ad6e24 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java @@ -128,7 +128,7 @@ public void testRenameExistingFieldNullValue() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); String fieldName = RandomDocumentPicks.randomFieldName(random()); ingestDocument.setFieldValue(fieldName, null); - String newFieldName = RandomDocumentPicks.randomFieldName(random()); + String newFieldName = randomValueOtherThanMany(ingestDocument::hasField, () -> RandomDocumentPicks.randomFieldName(random())); Processor processor = new RenameProcessor(randomAlphaOfLength(10), fieldName, newFieldName, false); processor.execute(ingestDocument); assertThat(ingestDocument.hasField(fieldName), equalTo(false)); diff --git a/modules/lang-painless/src/main/antlr/PainlessParser.g4 b/modules/lang-painless/src/main/antlr/PainlessParser.g4 index bfa4ee28dcc88..5292b4d195056 100644 --- a/modules/lang-painless/src/main/antlr/PainlessParser.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessParser.g4 @@ -22,7 +22,7 @@ parser grammar PainlessParser; options { tokenVocab=PainlessLexer; } source - : function* statement* EOF + : function* statement* dstatement? EOF ; function @@ -33,23 +33,31 @@ parameters : LP ( decltype ID ( COMMA decltype ID )* )? RP ; +statement + : rstatement + | dstatement SEMICOLON + ; + // Note we use a predicate on the if/else case here to prevent the // "dangling-else" ambiguity by forcing the 'else' token to be consumed // as soon as one is found. See (https://en.wikipedia.org/wiki/Dangling_else). -statement +rstatement : IF LP expression RP trailer ( ELSE trailer | { _input.LA(1) != ELSE }? ) # if | WHILE LP expression RP ( trailer | empty ) # while - | DO block WHILE LP expression RP delimiter # do | FOR LP initializer? SEMICOLON expression? SEMICOLON afterthought? RP ( trailer | empty ) # for | FOR LP decltype ID COLON expression RP trailer # each | FOR LP ID IN expression RP trailer # ineach - | declaration delimiter # decl - | CONTINUE delimiter # continue - | BREAK delimiter # break - | RETURN expression delimiter # return | TRY block trap+ # try - | THROW expression delimiter # throw - | expression delimiter # expr + ; + +dstatement + : DO block WHILE LP expression RP # do + | declaration # decl + | CONTINUE # continue + | BREAK # break + | RETURN expression # return + | THROW expression # throw + | expression # expr ; trailer @@ -58,7 +66,7 @@ trailer ; block - : LBRACK statement* RBRACK + : LBRACK statement* dstatement? RBRACK ; empty @@ -90,11 +98,6 @@ trap : CATCH LP TYPE ID RP block ; -delimiter - : SEMICOLON - | EOF - ; - expression : unary # single | expression ( MUL | DIV | REM ) expression # binary @@ -169,8 +172,8 @@ braceaccess ; arrayinitializer - : NEW TYPE ( LBRACE expression RBRACE )+ ( postdot postfix* )? # newstandardarray - | NEW TYPE LBRACE RBRACE LBRACK ( expression ( COMMA expression )* )? SEMICOLON? RBRACK postfix* # newinitializedarray + : NEW TYPE ( LBRACE expression RBRACE )+ ( postdot postfix* )? # newstandardarray + | NEW TYPE LBRACE RBRACE LBRACK ( expression ( COMMA expression )* )? RBRACK postfix* # newinitializedarray ; listinitializer @@ -206,10 +209,8 @@ lamtype ; funcref - : TYPE REF ID # classfuncref // reference to a static or instance method, - // e.g. ArrayList::size or Integer::compare - | decltype REF NEW # constructorfuncref // reference to a constructor, e.g. ArrayList::new - | ID REF ID # capturingfuncref // reference to an instance method, e.g. object::toString - // currently limited to capture of a simple variable (id). - | THIS REF ID # localfuncref // reference to a local function, e.g. this::myfunc + : TYPE REF ID # classfuncref + | decltype REF NEW # constructorfuncref + | ID REF ID # capturingfuncref + | THIS REF ID # localfuncref ; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java new file mode 100644 index 0000000000000..aa650a37c4fa2 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java @@ -0,0 +1,338 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.painless; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestStatus.OK; + +public class PainlessExecuteAction extends Action { + + static final PainlessExecuteAction INSTANCE = new PainlessExecuteAction(); + private static final String NAME = "cluster:admin/scripts/painless/execute"; + + private PainlessExecuteAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContent { + + private static final ParseField SCRIPT_FIELD = new ParseField("script"); + private static final ParseField CONTEXT_FIELD = new ParseField("context"); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "painless_execute_request", args -> new Request((Script) args[0], (SupportedContext) args[1])); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> Script.parse(p), SCRIPT_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { + // For now only accept an empty json object: + XContentParser.Token token = p.nextToken(); + assert token == XContentParser.Token.FIELD_NAME; + String contextType = p.currentName(); + token = p.nextToken(); + assert token == XContentParser.Token.START_OBJECT; + token = p.nextToken(); + assert token == XContentParser.Token.END_OBJECT; + token = p.nextToken(); + assert token == XContentParser.Token.END_OBJECT; + return SupportedContext.valueOf(contextType.toUpperCase(Locale.ROOT)); + }, CONTEXT_FIELD); + } + + private Script script; + private SupportedContext context; + + static Request parse(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + Request(Script script, SupportedContext context) { + this.script = Objects.requireNonNull(script); + this.context = context != null ? context : SupportedContext.PAINLESS_TEST; + } + + Request() { + } + + public Script getScript() { + return script; + } + + public SupportedContext getContext() { + return context; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (script.getType() != ScriptType.INLINE) { + validationException = addValidationError("only inline scripts are supported", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + script = new Script(in); + context = SupportedContext.fromId(in.readByte()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + script.writeTo(out); + out.writeByte(context.id); + } + + // For testing only: + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(SCRIPT_FIELD.getPreferredName(), script); + builder.startObject(CONTEXT_FIELD.getPreferredName()); + { + builder.startObject(context.name()); + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(script, request.script) && + context == request.context; + } + + @Override + public int hashCode() { + return Objects.hash(script, context); + } + + public enum SupportedContext { + + PAINLESS_TEST((byte) 0); + + private final byte id; + + SupportedContext(byte id) { + this.id = id; + } + + public static SupportedContext fromId(byte id) { + switch (id) { + case 0: + return PAINLESS_TEST; + default: + throw new IllegalArgumentException("unknown context [" + id + "]"); + } + } + } + + } + + public static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private Object result; + + Response() {} + + Response(Object result) { + this.result = result; + } + + public Object getResult() { + return result; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + result = in.readGenericValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeGenericValue(result); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("result", result); + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(result, response.result); + } + + @Override + public int hashCode() { + return Objects.hash(result); + } + } + + public abstract static class PainlessTestScript { + + private final Map params; + + public PainlessTestScript(Map params) { + this.params = params; + } + + /** Return the parameters for this script. */ + public Map getParams() { + return params; + } + + public abstract Object execute(); + + public interface Factory { + + PainlessTestScript newInstance(Map params); + + } + + public static final String[] PARAMETERS = {}; + public static final ScriptContext CONTEXT = new ScriptContext<>("painless_test", Factory.class); + + } + + public static class TransportAction extends HandledTransportAction { + + + private final ScriptService scriptService; + + @Inject + public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + ScriptService scriptService) { + super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new); + this.scriptService = scriptService; + } + @Override + protected void doExecute(Request request, ActionListener listener) { + switch (request.context) { + case PAINLESS_TEST: + PainlessTestScript.Factory factory = scriptService.compile(request.script, PainlessTestScript.CONTEXT); + PainlessTestScript painlessTestScript = factory.newInstance(request.script.getParams()); + String result = Objects.toString(painlessTestScript.execute()); + listener.onResponse(new Response(result)); + break; + default: + throw new UnsupportedOperationException("unsupported context [" + request.context + "]"); + } + } + + } + + static class RestAction extends BaseRestHandler { + + RestAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, "/_scripts/painless/_execute", this); + controller.registerHandler(POST, "/_scripts/painless/_execute", this); + } + + @Override + public String getName() { + return "_scripts_painless_execute"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + final Request request = Request.parse(restRequest.contentOrSourceParamParser()); + return channel -> client.executeLocally(INSTANCE, request, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(Response response, XContentBuilder builder) throws Exception { + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + return new BytesRestResponse(OK, builder); + } + }); + } + } + +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 795d81bb6e058..0364ad667efc7 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -20,28 +20,40 @@ package org.elasticsearch.painless; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.painless.spi.PainlessExtension; import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ExtensiblePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.ServiceLoader; +import java.util.function.Supplier; /** * Registers Painless as a plugin. */ -public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin { +public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin, ActionPlugin { private final Map, List> extendedWhitelists = new HashMap<>(); @@ -74,4 +86,24 @@ public void reloadSPI(ClassLoader loader) { } } } + + @SuppressWarnings("rawtypes") + public List getContexts() { + return Collections.singletonList(PainlessExecuteAction.PainlessTestScript.CONTEXT); + } + + @Override + public List> getActions() { + return Collections.singletonList( + new ActionHandler<>(PainlessExecuteAction.INSTANCE, PainlessExecuteAction.TransportAction.class) + ); + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + return Collections.singletonList(new PainlessExecuteAction.RestAction(settings, restController)); + } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/EnhancedPainlessLexer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/EnhancedPainlessLexer.java index 506ac8fcdecdb..adef4d3642571 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/EnhancedPainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/EnhancedPainlessLexer.java @@ -44,8 +44,7 @@ final class EnhancedPainlessLexer extends PainlessLexer { private final String sourceName; private final Definition definition; - private Token stashedNext = null; - private Token previous = null; + private Token current = null; EnhancedPainlessLexer(CharStream charStream, String sourceName, Definition definition) { super(charStream); @@ -53,27 +52,10 @@ final class EnhancedPainlessLexer extends PainlessLexer { this.definition = definition; } - public Token getPreviousToken() { - return previous; - } - @Override public Token nextToken() { - if (stashedNext != null) { - previous = stashedNext; - stashedNext = null; - return previous; - } - Token next = super.nextToken(); - if (insertSemicolon(previous, next)) { - stashedNext = next; - previous = _factory.create(new Pair(this, _input), PainlessLexer.SEMICOLON, ";", - Lexer.DEFAULT_TOKEN_CHANNEL, next.getStartIndex(), next.getStopIndex(), next.getLine(), next.getCharPositionInLine()); - return previous; - } else { - previous = next; - return next; - } + current = super.nextToken(); + return current; } @Override @@ -101,7 +83,7 @@ protected boolean isSimpleType(String name) { @Override protected boolean slashIsRegex() { - Token lastToken = getPreviousToken(); + Token lastToken = current; if (lastToken == null) { return true; } @@ -120,18 +102,4 @@ protected boolean slashIsRegex() { return true; } } - - private static boolean insertSemicolon(Token previous, Token next) { - if (previous == null || next.getType() != PainlessLexer.RBRACK) { - return false; - } - switch (previous.getType()) { - case PainlessLexer.RBRACK: // };} would be weird! - case PainlessLexer.SEMICOLON: // already have a semicolon, no need to add one - case PainlessLexer.LBRACK: // empty blocks don't need a semicolon - return false; - default: - return true; - } - } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java index 528a8a3d851c6..bba53d650ad32 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java @@ -30,21 +30,21 @@ class PainlessParser extends Parser { ID=82, DOTINTEGER=83, DOTID=84; public static final int RULE_source = 0, RULE_function = 1, RULE_parameters = 2, RULE_statement = 3, - RULE_trailer = 4, RULE_block = 5, RULE_empty = 6, RULE_initializer = 7, - RULE_afterthought = 8, RULE_declaration = 9, RULE_decltype = 10, RULE_declvar = 11, - RULE_trap = 12, RULE_delimiter = 13, RULE_expression = 14, RULE_unary = 15, - RULE_chain = 16, RULE_primary = 17, RULE_postfix = 18, RULE_postdot = 19, - RULE_callinvoke = 20, RULE_fieldaccess = 21, RULE_braceaccess = 22, RULE_arrayinitializer = 23, - RULE_listinitializer = 24, RULE_mapinitializer = 25, RULE_maptoken = 26, - RULE_arguments = 27, RULE_argument = 28, RULE_lambda = 29, RULE_lamtype = 30, - RULE_funcref = 31; + RULE_rstatement = 4, RULE_dstatement = 5, RULE_trailer = 6, RULE_block = 7, + RULE_empty = 8, RULE_initializer = 9, RULE_afterthought = 10, RULE_declaration = 11, + RULE_decltype = 12, RULE_declvar = 13, RULE_trap = 14, RULE_expression = 15, + RULE_unary = 16, RULE_chain = 17, RULE_primary = 18, RULE_postfix = 19, + RULE_postdot = 20, RULE_callinvoke = 21, RULE_fieldaccess = 22, RULE_braceaccess = 23, + RULE_arrayinitializer = 24, RULE_listinitializer = 25, RULE_mapinitializer = 26, + RULE_maptoken = 27, RULE_arguments = 28, RULE_argument = 29, RULE_lambda = 30, + RULE_lamtype = 31, RULE_funcref = 32; public static final String[] ruleNames = { - "source", "function", "parameters", "statement", "trailer", "block", "empty", - "initializer", "afterthought", "declaration", "decltype", "declvar", "trap", - "delimiter", "expression", "unary", "chain", "primary", "postfix", "postdot", - "callinvoke", "fieldaccess", "braceaccess", "arrayinitializer", "listinitializer", - "mapinitializer", "maptoken", "arguments", "argument", "lambda", "lamtype", - "funcref" + "source", "function", "parameters", "statement", "rstatement", "dstatement", + "trailer", "block", "empty", "initializer", "afterthought", "declaration", + "decltype", "declvar", "trap", "expression", "unary", "chain", "primary", + "postfix", "postdot", "callinvoke", "fieldaccess", "braceaccess", "arrayinitializer", + "listinitializer", "mapinitializer", "maptoken", "arguments", "argument", + "lambda", "lamtype", "funcref" }; private static final String[] _LITERAL_NAMES = { @@ -133,6 +133,9 @@ public List statement() { public StatementContext statement(int i) { return getRuleContext(StatementContext.class,i); } + public DstatementContext dstatement() { + return getRuleContext(DstatementContext.class,0); + } public SourceContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -152,37 +155,48 @@ public final SourceContext source() throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(67); + setState(69); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,0,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(64); + setState(66); function(); } } } - setState(69); + setState(71); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,0,_ctx); } - setState(73); + setState(75); _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,1,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(72); + statement(); + } + } + } + setState(77); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,1,_ctx); + } + setState(79); _la = _input.LA(1); - while ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << IF) | (1L << WHILE) | (1L << DO) | (1L << FOR) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << TRY) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DO) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - { - setState(70); - statement(); - } + setState(78); + dstatement(); } - setState(75); - _errHandler.sync(this); - _la = _input.LA(1); } - setState(76); + + setState(81); match(EOF); } } @@ -225,13 +239,13 @@ public final FunctionContext function() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(78); + setState(83); decltype(); - setState(79); + setState(84); match(ID); - setState(80); + setState(85); parameters(); - setState(81); + setState(86); block(); } } @@ -281,38 +295,38 @@ public final ParametersContext parameters() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(83); + setState(88); match(LP); - setState(95); + setState(100); _la = _input.LA(1); if (_la==TYPE) { { - setState(84); + setState(89); decltype(); - setState(85); + setState(90); match(ID); - setState(92); + setState(97); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(86); + setState(91); match(COMMA); - setState(87); + setState(92); decltype(); - setState(88); + setState(93); match(ID); } } - setState(94); + setState(99); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(97); + setState(102); match(RP); } } @@ -328,43 +342,100 @@ public final ParametersContext parameters() throws RecognitionException { } public static class StatementContext extends ParserRuleContext { + public RstatementContext rstatement() { + return getRuleContext(RstatementContext.class,0); + } + public DstatementContext dstatement() { + return getRuleContext(DstatementContext.class,0); + } + public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); } public StatementContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_statement; } - - public StatementContext() { } - public void copyFrom(StatementContext ctx) { - super.copyFrom(ctx); + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitStatement(this); + else return visitor.visitChildren(this); } } - public static class DeclContext extends StatementContext { - public DeclarationContext declaration() { - return getRuleContext(DeclarationContext.class,0); + + public final StatementContext statement() throws RecognitionException { + StatementContext _localctx = new StatementContext(_ctx, getState()); + enterRule(_localctx, 6, RULE_statement); + try { + setState(108); + switch (_input.LA(1)) { + case IF: + case WHILE: + case FOR: + case TRY: + enterOuterAlt(_localctx, 1); + { + setState(104); + rstatement(); + } + break; + case LBRACE: + case LP: + case DO: + case CONTINUE: + case BREAK: + case RETURN: + case NEW: + case THROW: + case BOOLNOT: + case BWNOT: + case ADD: + case SUB: + case INCR: + case DECR: + case OCTAL: + case HEX: + case INTEGER: + case DECIMAL: + case STRING: + case REGEX: + case TRUE: + case FALSE: + case NULL: + case TYPE: + case ID: + enterOuterAlt(_localctx, 2); + { + setState(105); + dstatement(); + setState(106); + match(SEMICOLON); + } + break; + default: + throw new NoViableAltException(this); + } } - public DelimiterContext delimiter() { - return getRuleContext(DelimiterContext.class,0); + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); } - public DeclContext(StatementContext ctx) { copyFrom(ctx); } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDecl(this); - else return visitor.visitChildren(this); + finally { + exitRule(); } + return _localctx; } - public static class BreakContext extends StatementContext { - public TerminalNode BREAK() { return getToken(PainlessParser.BREAK, 0); } - public DelimiterContext delimiter() { - return getRuleContext(DelimiterContext.class,0); + + public static class RstatementContext extends ParserRuleContext { + public RstatementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); } - public BreakContext(StatementContext ctx) { copyFrom(ctx); } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitBreak(this); - else return visitor.visitChildren(this); + @Override public int getRuleIndex() { return RULE_rstatement; } + + public RstatementContext() { } + public void copyFrom(RstatementContext ctx) { + super.copyFrom(ctx); } } - public static class ForContext extends StatementContext { + public static class ForContext extends RstatementContext { public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } public List SEMICOLON() { return getTokens(PainlessParser.SEMICOLON); } @@ -387,35 +458,32 @@ public ExpressionContext expression() { public AfterthoughtContext afterthought() { return getRuleContext(AfterthoughtContext.class,0); } - public ForContext(StatementContext ctx) { copyFrom(ctx); } + public ForContext(RstatementContext ctx) { copyFrom(ctx); } @Override public T accept(ParseTreeVisitor visitor) { if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitFor(this); else return visitor.visitChildren(this); } } - public static class DoContext extends StatementContext { - public TerminalNode DO() { return getToken(PainlessParser.DO, 0); } + public static class TryContext extends RstatementContext { + public TerminalNode TRY() { return getToken(PainlessParser.TRY, 0); } public BlockContext block() { return getRuleContext(BlockContext.class,0); } - public TerminalNode WHILE() { return getToken(PainlessParser.WHILE, 0); } - public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } - public ExpressionContext expression() { - return getRuleContext(ExpressionContext.class,0); + public List trap() { + return getRuleContexts(TrapContext.class); } - public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } - public DelimiterContext delimiter() { - return getRuleContext(DelimiterContext.class,0); + public TrapContext trap(int i) { + return getRuleContext(TrapContext.class,i); } - public DoContext(StatementContext ctx) { copyFrom(ctx); } + public TryContext(RstatementContext ctx) { copyFrom(ctx); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDo(this); + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitTry(this); else return visitor.visitChildren(this); } } - public static class WhileContext extends StatementContext { + public static class WhileContext extends RstatementContext { public TerminalNode WHILE() { return getToken(PainlessParser.WHILE, 0); } public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } public ExpressionContext expression() { @@ -428,14 +496,14 @@ public TrailerContext trailer() { public EmptyContext empty() { return getRuleContext(EmptyContext.class,0); } - public WhileContext(StatementContext ctx) { copyFrom(ctx); } + public WhileContext(RstatementContext ctx) { copyFrom(ctx); } @Override public T accept(ParseTreeVisitor visitor) { if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitWhile(this); else return visitor.visitChildren(this); } } - public static class IneachContext extends StatementContext { + public static class IneachContext extends RstatementContext { public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } @@ -447,95 +515,14 @@ public ExpressionContext expression() { public TrailerContext trailer() { return getRuleContext(TrailerContext.class,0); } - public IneachContext(StatementContext ctx) { copyFrom(ctx); } + public IneachContext(RstatementContext ctx) { copyFrom(ctx); } @Override public T accept(ParseTreeVisitor visitor) { if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitIneach(this); else return visitor.visitChildren(this); } } - public static class EachContext extends StatementContext { - public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } - public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } - public DecltypeContext decltype() { - return getRuleContext(DecltypeContext.class,0); - } - public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } - public TerminalNode COLON() { return getToken(PainlessParser.COLON, 0); } - public ExpressionContext expression() { - return getRuleContext(ExpressionContext.class,0); - } - public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } - public TrailerContext trailer() { - return getRuleContext(TrailerContext.class,0); - } - public EachContext(StatementContext ctx) { copyFrom(ctx); } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitEach(this); - else return visitor.visitChildren(this); - } - } - public static class ThrowContext extends StatementContext { - public TerminalNode THROW() { return getToken(PainlessParser.THROW, 0); } - public ExpressionContext expression() { - return getRuleContext(ExpressionContext.class,0); - } - public DelimiterContext delimiter() { - return getRuleContext(DelimiterContext.class,0); - } - public ThrowContext(StatementContext ctx) { copyFrom(ctx); } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitThrow(this); - else return visitor.visitChildren(this); - } - } - public static class ContinueContext extends StatementContext { - public TerminalNode CONTINUE() { return getToken(PainlessParser.CONTINUE, 0); } - public DelimiterContext delimiter() { - return getRuleContext(DelimiterContext.class,0); - } - public ContinueContext(StatementContext ctx) { copyFrom(ctx); } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitContinue(this); - else return visitor.visitChildren(this); - } - } - public static class TryContext extends StatementContext { - public TerminalNode TRY() { return getToken(PainlessParser.TRY, 0); } - public BlockContext block() { - return getRuleContext(BlockContext.class,0); - } - public List trap() { - return getRuleContexts(TrapContext.class); - } - public TrapContext trap(int i) { - return getRuleContext(TrapContext.class,i); - } - public TryContext(StatementContext ctx) { copyFrom(ctx); } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitTry(this); - else return visitor.visitChildren(this); - } - } - public static class ExprContext extends StatementContext { - public ExpressionContext expression() { - return getRuleContext(ExpressionContext.class,0); - } - public DelimiterContext delimiter() { - return getRuleContext(DelimiterContext.class,0); - } - public ExprContext(StatementContext ctx) { copyFrom(ctx); } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitExpr(this); - else return visitor.visitChildren(this); - } - } - public static class IfContext extends StatementContext { + public static class IfContext extends RstatementContext { public TerminalNode IF() { return getToken(PainlessParser.IF, 0); } public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } public ExpressionContext expression() { @@ -549,66 +536,73 @@ public TrailerContext trailer(int i) { return getRuleContext(TrailerContext.class,i); } public TerminalNode ELSE() { return getToken(PainlessParser.ELSE, 0); } - public IfContext(StatementContext ctx) { copyFrom(ctx); } + public IfContext(RstatementContext ctx) { copyFrom(ctx); } @Override public T accept(ParseTreeVisitor visitor) { if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitIf(this); else return visitor.visitChildren(this); } } - public static class ReturnContext extends StatementContext { - public TerminalNode RETURN() { return getToken(PainlessParser.RETURN, 0); } + public static class EachContext extends RstatementContext { + public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public TerminalNode COLON() { return getToken(PainlessParser.COLON, 0); } public ExpressionContext expression() { return getRuleContext(ExpressionContext.class,0); } - public DelimiterContext delimiter() { - return getRuleContext(DelimiterContext.class,0); + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public TrailerContext trailer() { + return getRuleContext(TrailerContext.class,0); } - public ReturnContext(StatementContext ctx) { copyFrom(ctx); } + public EachContext(RstatementContext ctx) { copyFrom(ctx); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitReturn(this); + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitEach(this); else return visitor.visitChildren(this); } } - public final StatementContext statement() throws RecognitionException { - StatementContext _localctx = new StatementContext(_ctx, getState()); - enterRule(_localctx, 6, RULE_statement); + public final RstatementContext rstatement() throws RecognitionException { + RstatementContext _localctx = new RstatementContext(_ctx, getState()); + enterRule(_localctx, 8, RULE_rstatement); int _la; try { int _alt; - setState(185); + setState(170); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,11,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { case 1: _localctx = new IfContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(99); + setState(110); match(IF); - setState(100); + setState(111); match(LP); - setState(101); + setState(112); expression(0); - setState(102); + setState(113); match(RP); - setState(103); + setState(114); trailer(); - setState(107); + setState(118); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,6,_ctx) ) { case 1: { - setState(104); + setState(115); match(ELSE); - setState(105); + setState(116); trailer(); } break; case 2: { - setState(106); + setState(117); if (!( _input.LA(1) != ELSE )) throw new FailedPredicateException(this, " _input.LA(1) != ELSE "); } break; @@ -619,15 +613,15 @@ public final StatementContext statement() throws RecognitionException { _localctx = new WhileContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(109); + setState(120); match(WHILE); - setState(110); + setState(121); match(LP); - setState(111); + setState(122); expression(0); - setState(112); + setState(123); match(RP); - setState(115); + setState(126); switch (_input.LA(1)) { case LBRACK: case LBRACE: @@ -660,13 +654,13 @@ public final StatementContext statement() throws RecognitionException { case TYPE: case ID: { - setState(113); + setState(124); trailer(); } break; case SEMICOLON: { - setState(114); + setState(125); empty(); } break; @@ -676,67 +670,47 @@ public final StatementContext statement() throws RecognitionException { } break; case 3: - _localctx = new DoContext(_localctx); - enterOuterAlt(_localctx, 3); - { - setState(117); - match(DO); - setState(118); - block(); - setState(119); - match(WHILE); - setState(120); - match(LP); - setState(121); - expression(0); - setState(122); - match(RP); - setState(123); - delimiter(); - } - break; - case 4: _localctx = new ForContext(_localctx); - enterOuterAlt(_localctx, 4); + enterOuterAlt(_localctx, 3); { - setState(125); + setState(128); match(FOR); - setState(126); + setState(129); match(LP); - setState(128); + setState(131); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(127); + setState(130); initializer(); } } - setState(130); + setState(133); match(SEMICOLON); - setState(132); + setState(135); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(131); + setState(134); expression(0); } } - setState(134); + setState(137); match(SEMICOLON); - setState(136); + setState(139); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(135); + setState(138); afterthought(); } } - setState(138); - match(RP); setState(141); + match(RP); + setState(144); switch (_input.LA(1)) { case LBRACK: case LBRACE: @@ -769,13 +743,13 @@ public final StatementContext statement() throws RecognitionException { case TYPE: case ID: { - setState(139); + setState(142); trailer(); } break; case SEMICOLON: { - setState(140); + setState(143); empty(); } break; @@ -784,99 +758,57 @@ public final StatementContext statement() throws RecognitionException { } } break; - case 5: + case 4: _localctx = new EachContext(_localctx); - enterOuterAlt(_localctx, 5); + enterOuterAlt(_localctx, 4); { - setState(143); + setState(146); match(FOR); - setState(144); + setState(147); match(LP); - setState(145); + setState(148); decltype(); - setState(146); + setState(149); match(ID); - setState(147); + setState(150); match(COLON); - setState(148); + setState(151); expression(0); - setState(149); + setState(152); match(RP); - setState(150); + setState(153); trailer(); } break; - case 6: + case 5: _localctx = new IneachContext(_localctx); - enterOuterAlt(_localctx, 6); + enterOuterAlt(_localctx, 5); { - setState(152); + setState(155); match(FOR); - setState(153); + setState(156); match(LP); - setState(154); + setState(157); match(ID); - setState(155); + setState(158); match(IN); - setState(156); + setState(159); expression(0); - setState(157); - match(RP); - setState(158); - trailer(); - } - break; - case 7: - _localctx = new DeclContext(_localctx); - enterOuterAlt(_localctx, 7); - { setState(160); - declaration(); + match(RP); setState(161); - delimiter(); - } - break; - case 8: - _localctx = new ContinueContext(_localctx); - enterOuterAlt(_localctx, 8); - { - setState(163); - match(CONTINUE); - setState(164); - delimiter(); - } - break; - case 9: - _localctx = new BreakContext(_localctx); - enterOuterAlt(_localctx, 9); - { - setState(165); - match(BREAK); - setState(166); - delimiter(); - } - break; - case 10: - _localctx = new ReturnContext(_localctx); - enterOuterAlt(_localctx, 10); - { - setState(167); - match(RETURN); - setState(168); - expression(0); - setState(169); - delimiter(); + trailer(); } break; - case 11: + case 6: _localctx = new TryContext(_localctx); - enterOuterAlt(_localctx, 11); + enterOuterAlt(_localctx, 6); { - setState(171); + setState(163); match(TRY); - setState(172); + setState(164); block(); - setState(174); + setState(166); _errHandler.sync(this); _alt = 1; do { @@ -884,7 +816,7 @@ public final StatementContext statement() throws RecognitionException { case 1: { { - setState(173); + setState(165); trap(); } } @@ -892,32 +824,194 @@ public final StatementContext statement() throws RecognitionException { default: throw new NoViableAltException(this); } - setState(176); + setState(168); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,10,_ctx); + _alt = getInterpreter().adaptivePredict(_input,12,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; - case 12: + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class DstatementContext extends ParserRuleContext { + public DstatementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_dstatement; } + + public DstatementContext() { } + public void copyFrom(DstatementContext ctx) { + super.copyFrom(ctx); + } + } + public static class DeclContext extends DstatementContext { + public DeclarationContext declaration() { + return getRuleContext(DeclarationContext.class,0); + } + public DeclContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDecl(this); + else return visitor.visitChildren(this); + } + } + public static class BreakContext extends DstatementContext { + public TerminalNode BREAK() { return getToken(PainlessParser.BREAK, 0); } + public BreakContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitBreak(this); + else return visitor.visitChildren(this); + } + } + public static class ThrowContext extends DstatementContext { + public TerminalNode THROW() { return getToken(PainlessParser.THROW, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public ThrowContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitThrow(this); + else return visitor.visitChildren(this); + } + } + public static class ContinueContext extends DstatementContext { + public TerminalNode CONTINUE() { return getToken(PainlessParser.CONTINUE, 0); } + public ContinueContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitContinue(this); + else return visitor.visitChildren(this); + } + } + public static class ExprContext extends DstatementContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public ExprContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitExpr(this); + else return visitor.visitChildren(this); + } + } + public static class DoContext extends DstatementContext { + public TerminalNode DO() { return getToken(PainlessParser.DO, 0); } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public TerminalNode WHILE() { return getToken(PainlessParser.WHILE, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public DoContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDo(this); + else return visitor.visitChildren(this); + } + } + public static class ReturnContext extends DstatementContext { + public TerminalNode RETURN() { return getToken(PainlessParser.RETURN, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public ReturnContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitReturn(this); + else return visitor.visitChildren(this); + } + } + + public final DstatementContext dstatement() throws RecognitionException { + DstatementContext _localctx = new DstatementContext(_ctx, getState()); + enterRule(_localctx, 10, RULE_dstatement); + try { + setState(187); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { + case 1: + _localctx = new DoContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(172); + match(DO); + setState(173); + block(); + setState(174); + match(WHILE); + setState(175); + match(LP); + setState(176); + expression(0); + setState(177); + match(RP); + } + break; + case 2: + _localctx = new DeclContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(179); + declaration(); + } + break; + case 3: + _localctx = new ContinueContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(180); + match(CONTINUE); + } + break; + case 4: + _localctx = new BreakContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(181); + match(BREAK); + } + break; + case 5: + _localctx = new ReturnContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(182); + match(RETURN); + setState(183); + expression(0); + } + break; + case 6: _localctx = new ThrowContext(_localctx); - enterOuterAlt(_localctx, 12); + enterOuterAlt(_localctx, 6); { - setState(178); + setState(184); match(THROW); - setState(179); + setState(185); expression(0); - setState(180); - delimiter(); } break; - case 13: + case 7: _localctx = new ExprContext(_localctx); - enterOuterAlt(_localctx, 13); + enterOuterAlt(_localctx, 7); { - setState(182); + setState(186); expression(0); - setState(183); - delimiter(); } break; } @@ -953,14 +1047,14 @@ public T accept(ParseTreeVisitor visitor) { public final TrailerContext trailer() throws RecognitionException { TrailerContext _localctx = new TrailerContext(_ctx, getState()); - enterRule(_localctx, 8, RULE_trailer); + enterRule(_localctx, 12, RULE_trailer); try { - setState(189); + setState(191); switch (_input.LA(1)) { case LBRACK: enterOuterAlt(_localctx, 1); { - setState(187); + setState(189); block(); } break; @@ -995,7 +1089,7 @@ public final TrailerContext trailer() throws RecognitionException { case ID: enterOuterAlt(_localctx, 2); { - setState(188); + setState(190); statement(); } break; @@ -1023,6 +1117,9 @@ public List statement() { public StatementContext statement(int i) { return getRuleContext(StatementContext.class,i); } + public DstatementContext dstatement() { + return getRuleContext(DstatementContext.class,0); + } public BlockContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -1036,28 +1133,40 @@ public T accept(ParseTreeVisitor visitor) { public final BlockContext block() throws RecognitionException { BlockContext _localctx = new BlockContext(_ctx, getState()); - enterRule(_localctx, 10, RULE_block); + enterRule(_localctx, 14, RULE_block); int _la; try { + int _alt; enterOuterAlt(_localctx, 1); { - setState(191); + setState(193); match(LBRACK); - setState(195); + setState(197); _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,16,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(194); + statement(); + } + } + } + setState(199); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,16,_ctx); + } + setState(201); _la = _input.LA(1); - while ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << IF) | (1L << WHILE) | (1L << DO) | (1L << FOR) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << TRY) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { - { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DO) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(192); - statement(); - } + setState(200); + dstatement(); } - setState(197); - _errHandler.sync(this); - _la = _input.LA(1); } - setState(198); + + setState(203); match(RBRACK); } } @@ -1087,11 +1196,11 @@ public T accept(ParseTreeVisitor visitor) { public final EmptyContext empty() throws RecognitionException { EmptyContext _localctx = new EmptyContext(_ctx, getState()); - enterRule(_localctx, 12, RULE_empty); + enterRule(_localctx, 16, RULE_empty); try { enterOuterAlt(_localctx, 1); { - setState(200); + setState(205); match(SEMICOLON); } } @@ -1126,22 +1235,22 @@ public T accept(ParseTreeVisitor visitor) { public final InitializerContext initializer() throws RecognitionException { InitializerContext _localctx = new InitializerContext(_ctx, getState()); - enterRule(_localctx, 14, RULE_initializer); + enterRule(_localctx, 18, RULE_initializer); try { - setState(204); + setState(209); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,18,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(202); + setState(207); declaration(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(203); + setState(208); expression(0); } break; @@ -1175,11 +1284,11 @@ public T accept(ParseTreeVisitor visitor) { public final AfterthoughtContext afterthought() throws RecognitionException { AfterthoughtContext _localctx = new AfterthoughtContext(_ctx, getState()); - enterRule(_localctx, 16, RULE_afterthought); + enterRule(_localctx, 20, RULE_afterthought); try { enterOuterAlt(_localctx, 1); { - setState(206); + setState(211); expression(0); } } @@ -1221,28 +1330,28 @@ public T accept(ParseTreeVisitor visitor) { public final DeclarationContext declaration() throws RecognitionException { DeclarationContext _localctx = new DeclarationContext(_ctx, getState()); - enterRule(_localctx, 18, RULE_declaration); + enterRule(_localctx, 22, RULE_declaration); int _la; try { enterOuterAlt(_localctx, 1); { - setState(208); + setState(213); decltype(); - setState(209); - declvar(); setState(214); + declvar(); + setState(219); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(210); + setState(215); match(COMMA); - setState(211); + setState(216); declvar(); } } - setState(216); + setState(221); _errHandler.sync(this); _la = _input.LA(1); } @@ -1282,30 +1391,30 @@ public T accept(ParseTreeVisitor visitor) { public final DecltypeContext decltype() throws RecognitionException { DecltypeContext _localctx = new DecltypeContext(_ctx, getState()); - enterRule(_localctx, 20, RULE_decltype); + enterRule(_localctx, 24, RULE_decltype); try { int _alt; enterOuterAlt(_localctx, 1); { - setState(217); - match(TYPE); setState(222); + match(TYPE); + setState(227); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,16,_ctx); + _alt = getInterpreter().adaptivePredict(_input,20,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(218); + setState(223); match(LBRACE); - setState(219); + setState(224); match(RBRACE); } } } - setState(224); + setState(229); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,16,_ctx); + _alt = getInterpreter().adaptivePredict(_input,20,_ctx); } } } @@ -1339,20 +1448,20 @@ public T accept(ParseTreeVisitor visitor) { public final DeclvarContext declvar() throws RecognitionException { DeclvarContext _localctx = new DeclvarContext(_ctx, getState()); - enterRule(_localctx, 22, RULE_declvar); + enterRule(_localctx, 26, RULE_declvar); int _la; try { enterOuterAlt(_localctx, 1); { - setState(225); + setState(230); match(ID); - setState(228); + setState(233); _la = _input.LA(1); if (_la==ASSIGN) { { - setState(226); + setState(231); match(ASSIGN); - setState(227); + setState(232); expression(0); } } @@ -1392,21 +1501,21 @@ public T accept(ParseTreeVisitor visitor) { public final TrapContext trap() throws RecognitionException { TrapContext _localctx = new TrapContext(_ctx, getState()); - enterRule(_localctx, 24, RULE_trap); + enterRule(_localctx, 28, RULE_trap); try { enterOuterAlt(_localctx, 1); { - setState(230); + setState(235); match(CATCH); - setState(231); + setState(236); match(LP); - setState(232); + setState(237); match(TYPE); - setState(233); + setState(238); match(ID); - setState(234); + setState(239); match(RP); - setState(235); + setState(240); block(); } } @@ -1421,47 +1530,6 @@ public final TrapContext trap() throws RecognitionException { return _localctx; } - public static class DelimiterContext extends ParserRuleContext { - public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); } - public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); } - public DelimiterContext(ParserRuleContext parent, int invokingState) { - super(parent, invokingState); - } - @Override public int getRuleIndex() { return RULE_delimiter; } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDelimiter(this); - else return visitor.visitChildren(this); - } - } - - public final DelimiterContext delimiter() throws RecognitionException { - DelimiterContext _localctx = new DelimiterContext(_ctx, getState()); - enterRule(_localctx, 26, RULE_delimiter); - int _la; - try { - enterOuterAlt(_localctx, 1); - { - setState(237); - _la = _input.LA(1); - if ( !(_la==EOF || _la==SEMICOLON) ) { - _errHandler.recoverInline(this); - } else { - consume(); - } - } - } - catch (RecognitionException re) { - _localctx.exception = re; - _errHandler.reportError(this, re); - _errHandler.recover(this, re); - } - finally { - exitRule(); - } - return _localctx; - } - public static class ExpressionContext extends ParserRuleContext { public ExpressionContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -1631,8 +1699,8 @@ private ExpressionContext expression(int _p) throws RecognitionException { int _parentState = getState(); ExpressionContext _localctx = new ExpressionContext(_ctx, _parentState); ExpressionContext _prevctx = _localctx; - int _startState = 28; - enterRecursionRule(_localctx, 28, RULE_expression, _p); + int _startState = 30; + enterRecursionRule(_localctx, 30, RULE_expression, _p); int _la; try { int _alt; @@ -1643,35 +1711,35 @@ private ExpressionContext expression(int _p) throws RecognitionException { _ctx = _localctx; _prevctx = _localctx; - setState(240); + setState(243); unary(); } _ctx.stop = _input.LT(-1); - setState(292); + setState(295); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,19,_ctx); + _alt = getInterpreter().adaptivePredict(_input,23,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { if ( _parseListeners!=null ) triggerExitRuleEvent(); _prevctx = _localctx; { - setState(290); + setState(293); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,18,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,22,_ctx) ) { case 1: { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(242); + setState(245); if (!(precpred(_ctx, 15))) throw new FailedPredicateException(this, "precpred(_ctx, 15)"); - setState(243); + setState(246); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << MUL) | (1L << DIV) | (1L << REM))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(244); + setState(247); expression(16); } break; @@ -1679,16 +1747,16 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(245); + setState(248); if (!(precpred(_ctx, 14))) throw new FailedPredicateException(this, "precpred(_ctx, 14)"); - setState(246); + setState(249); _la = _input.LA(1); if ( !(_la==ADD || _la==SUB) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(247); + setState(250); expression(15); } break; @@ -1696,16 +1764,16 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(248); + setState(251); if (!(precpred(_ctx, 13))) throw new FailedPredicateException(this, "precpred(_ctx, 13)"); - setState(249); + setState(252); _la = _input.LA(1); if ( !(_la==FIND || _la==MATCH) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(250); + setState(253); expression(14); } break; @@ -1713,16 +1781,16 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(251); + setState(254); if (!(precpred(_ctx, 12))) throw new FailedPredicateException(this, "precpred(_ctx, 12)"); - setState(252); + setState(255); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LSH) | (1L << RSH) | (1L << USH))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(253); + setState(256); expression(13); } break; @@ -1730,16 +1798,16 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(254); + setState(257); if (!(precpred(_ctx, 11))) throw new FailedPredicateException(this, "precpred(_ctx, 11)"); - setState(255); + setState(258); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LT) | (1L << LTE) | (1L << GT) | (1L << GTE))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(256); + setState(259); expression(12); } break; @@ -1747,16 +1815,16 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(257); + setState(260); if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, "precpred(_ctx, 9)"); - setState(258); + setState(261); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << EQ) | (1L << EQR) | (1L << NE) | (1L << NER))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(259); + setState(262); expression(10); } break; @@ -1764,11 +1832,11 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(260); + setState(263); if (!(precpred(_ctx, 8))) throw new FailedPredicateException(this, "precpred(_ctx, 8)"); - setState(261); + setState(264); match(BWAND); - setState(262); + setState(265); expression(9); } break; @@ -1776,11 +1844,11 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(263); + setState(266); if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); - setState(264); + setState(267); match(XOR); - setState(265); + setState(268); expression(8); } break; @@ -1788,11 +1856,11 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(266); + setState(269); if (!(precpred(_ctx, 6))) throw new FailedPredicateException(this, "precpred(_ctx, 6)"); - setState(267); + setState(270); match(BWOR); - setState(268); + setState(271); expression(7); } break; @@ -1800,11 +1868,11 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(269); + setState(272); if (!(precpred(_ctx, 5))) throw new FailedPredicateException(this, "precpred(_ctx, 5)"); - setState(270); + setState(273); match(BOOLAND); - setState(271); + setState(274); expression(6); } break; @@ -1812,11 +1880,11 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(272); + setState(275); if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)"); - setState(273); + setState(276); match(BOOLOR); - setState(274); + setState(277); expression(5); } break; @@ -1824,15 +1892,15 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new ConditionalContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(275); + setState(278); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(276); + setState(279); match(COND); - setState(277); + setState(280); expression(0); - setState(278); + setState(281); match(COLON); - setState(279); + setState(282); expression(3); } break; @@ -1840,11 +1908,11 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new ElvisContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(281); + setState(284); if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); - setState(282); + setState(285); match(ELVIS); - setState(283); + setState(286); expression(2); } break; @@ -1852,16 +1920,16 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new AssignmentContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(284); + setState(287); if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); - setState(285); + setState(288); _la = _input.LA(1); if ( !(((((_la - 60)) & ~0x3f) == 0 && ((1L << (_la - 60)) & ((1L << (ASSIGN - 60)) | (1L << (AADD - 60)) | (1L << (ASUB - 60)) | (1L << (AMUL - 60)) | (1L << (ADIV - 60)) | (1L << (AREM - 60)) | (1L << (AAND - 60)) | (1L << (AXOR - 60)) | (1L << (AOR - 60)) | (1L << (ALSH - 60)) | (1L << (ARSH - 60)) | (1L << (AUSH - 60)))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(286); + setState(289); expression(1); } break; @@ -1869,20 +1937,20 @@ private ExpressionContext expression(int _p) throws RecognitionException { { _localctx = new InstanceofContext(new ExpressionContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_expression); - setState(287); + setState(290); if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); - setState(288); + setState(291); match(INSTANCEOF); - setState(289); + setState(292); decltype(); } break; } } } - setState(294); + setState(297); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,19,_ctx); + _alt = getInterpreter().adaptivePredict(_input,23,_ctx); } } } @@ -1979,24 +2047,24 @@ public T accept(ParseTreeVisitor visitor) { public final UnaryContext unary() throws RecognitionException { UnaryContext _localctx = new UnaryContext(_ctx, getState()); - enterRule(_localctx, 30, RULE_unary); + enterRule(_localctx, 32, RULE_unary); int _la; try { - setState(308); + setState(311); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,20,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) { case 1: _localctx = new PreContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(295); + setState(298); _la = _input.LA(1); if ( !(_la==INCR || _la==DECR) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(296); + setState(299); chain(); } break; @@ -2004,9 +2072,9 @@ public final UnaryContext unary() throws RecognitionException { _localctx = new PostContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(297); + setState(300); chain(); - setState(298); + setState(301); _la = _input.LA(1); if ( !(_la==INCR || _la==DECR) ) { _errHandler.recoverInline(this); @@ -2019,7 +2087,7 @@ public final UnaryContext unary() throws RecognitionException { _localctx = new ReadContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(300); + setState(303); chain(); } break; @@ -2027,14 +2095,14 @@ public final UnaryContext unary() throws RecognitionException { _localctx = new OperatorContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(301); + setState(304); _la = _input.LA(1); if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB))) != 0)) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(302); + setState(305); unary(); } break; @@ -2042,13 +2110,13 @@ public final UnaryContext unary() throws RecognitionException { _localctx = new CastContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(303); + setState(306); match(LP); - setState(304); + setState(307); decltype(); - setState(305); + setState(308); match(RP); - setState(306); + setState(309); unary(); } break; @@ -2127,33 +2195,33 @@ public T accept(ParseTreeVisitor visitor) { public final ChainContext chain() throws RecognitionException { ChainContext _localctx = new ChainContext(_ctx, getState()); - enterRule(_localctx, 32, RULE_chain); + enterRule(_localctx, 34, RULE_chain); try { int _alt; - setState(326); + setState(329); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,23,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) { case 1: _localctx = new DynamicContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(310); + setState(313); primary(); - setState(314); + setState(317); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,21,_ctx); + _alt = getInterpreter().adaptivePredict(_input,25,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(311); + setState(314); postfix(); } } } - setState(316); + setState(319); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,21,_ctx); + _alt = getInterpreter().adaptivePredict(_input,25,_ctx); } } break; @@ -2161,25 +2229,25 @@ public final ChainContext chain() throws RecognitionException { _localctx = new StaticContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(317); + setState(320); decltype(); - setState(318); + setState(321); postdot(); - setState(322); + setState(325); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,22,_ctx); + _alt = getInterpreter().adaptivePredict(_input,26,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(319); + setState(322); postfix(); } } } - setState(324); + setState(327); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,22,_ctx); + _alt = getInterpreter().adaptivePredict(_input,26,_ctx); } } break; @@ -2187,7 +2255,7 @@ public final ChainContext chain() throws RecognitionException { _localctx = new NewarrayContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(325); + setState(328); arrayinitializer(); } break; @@ -2344,21 +2412,21 @@ public T accept(ParseTreeVisitor visitor) { public final PrimaryContext primary() throws RecognitionException { PrimaryContext _localctx = new PrimaryContext(_ctx, getState()); - enterRule(_localctx, 34, RULE_primary); + enterRule(_localctx, 36, RULE_primary); int _la; try { - setState(346); + setState(349); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) { case 1: _localctx = new PrecedenceContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(328); + setState(331); match(LP); - setState(329); + setState(332); expression(0); - setState(330); + setState(333); match(RP); } break; @@ -2366,7 +2434,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new NumericContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(332); + setState(335); _la = _input.LA(1); if ( !(((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)))) != 0)) ) { _errHandler.recoverInline(this); @@ -2379,7 +2447,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new TrueContext(_localctx); enterOuterAlt(_localctx, 3); { - setState(333); + setState(336); match(TRUE); } break; @@ -2387,7 +2455,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new FalseContext(_localctx); enterOuterAlt(_localctx, 4); { - setState(334); + setState(337); match(FALSE); } break; @@ -2395,7 +2463,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new NullContext(_localctx); enterOuterAlt(_localctx, 5); { - setState(335); + setState(338); match(NULL); } break; @@ -2403,7 +2471,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new StringContext(_localctx); enterOuterAlt(_localctx, 6); { - setState(336); + setState(339); match(STRING); } break; @@ -2411,7 +2479,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new RegexContext(_localctx); enterOuterAlt(_localctx, 7); { - setState(337); + setState(340); match(REGEX); } break; @@ -2419,7 +2487,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new ListinitContext(_localctx); enterOuterAlt(_localctx, 8); { - setState(338); + setState(341); listinitializer(); } break; @@ -2427,7 +2495,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new MapinitContext(_localctx); enterOuterAlt(_localctx, 9); { - setState(339); + setState(342); mapinitializer(); } break; @@ -2435,7 +2503,7 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new VariableContext(_localctx); enterOuterAlt(_localctx, 10); { - setState(340); + setState(343); match(ID); } break; @@ -2443,9 +2511,9 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new CalllocalContext(_localctx); enterOuterAlt(_localctx, 11); { - setState(341); + setState(344); match(ID); - setState(342); + setState(345); arguments(); } break; @@ -2453,11 +2521,11 @@ public final PrimaryContext primary() throws RecognitionException { _localctx = new NewobjectContext(_localctx); enterOuterAlt(_localctx, 12); { - setState(343); + setState(346); match(NEW); - setState(344); + setState(347); match(TYPE); - setState(345); + setState(348); arguments(); } break; @@ -2497,29 +2565,29 @@ public T accept(ParseTreeVisitor visitor) { public final PostfixContext postfix() throws RecognitionException { PostfixContext _localctx = new PostfixContext(_ctx, getState()); - enterRule(_localctx, 36, RULE_postfix); + enterRule(_localctx, 38, RULE_postfix); try { - setState(351); + setState(354); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,25,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(348); + setState(351); callinvoke(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(349); + setState(352); fieldaccess(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(350); + setState(353); braceaccess(); } break; @@ -2556,22 +2624,22 @@ public T accept(ParseTreeVisitor visitor) { public final PostdotContext postdot() throws RecognitionException { PostdotContext _localctx = new PostdotContext(_ctx, getState()); - enterRule(_localctx, 38, RULE_postdot); + enterRule(_localctx, 40, RULE_postdot); try { - setState(355); + setState(358); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,26,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { - setState(353); + setState(356); callinvoke(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(354); + setState(357); fieldaccess(); } break; @@ -2608,21 +2676,21 @@ public T accept(ParseTreeVisitor visitor) { public final CallinvokeContext callinvoke() throws RecognitionException { CallinvokeContext _localctx = new CallinvokeContext(_ctx, getState()); - enterRule(_localctx, 40, RULE_callinvoke); + enterRule(_localctx, 42, RULE_callinvoke); int _la; try { enterOuterAlt(_localctx, 1); { - setState(357); + setState(360); _la = _input.LA(1); if ( !(_la==DOT || _la==NSDOT) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(358); + setState(361); match(DOTID); - setState(359); + setState(362); arguments(); } } @@ -2655,19 +2723,19 @@ public T accept(ParseTreeVisitor visitor) { public final FieldaccessContext fieldaccess() throws RecognitionException { FieldaccessContext _localctx = new FieldaccessContext(_ctx, getState()); - enterRule(_localctx, 42, RULE_fieldaccess); + enterRule(_localctx, 44, RULE_fieldaccess); int _la; try { enterOuterAlt(_localctx, 1); { - setState(361); + setState(364); _la = _input.LA(1); if ( !(_la==DOT || _la==NSDOT) ) { _errHandler.recoverInline(this); } else { consume(); } - setState(362); + setState(365); _la = _input.LA(1); if ( !(_la==DOTINTEGER || _la==DOTID) ) { _errHandler.recoverInline(this); @@ -2706,15 +2774,15 @@ public T accept(ParseTreeVisitor visitor) { public final BraceaccessContext braceaccess() throws RecognitionException { BraceaccessContext _localctx = new BraceaccessContext(_ctx, getState()); - enterRule(_localctx, 44, RULE_braceaccess); + enterRule(_localctx, 46, RULE_braceaccess); try { enterOuterAlt(_localctx, 1); { - setState(364); + setState(367); match(LBRACE); - setState(365); + setState(368); expression(0); - setState(366); + setState(369); match(RBRACE); } } @@ -2786,7 +2854,6 @@ public List expression() { public ExpressionContext expression(int i) { return getRuleContext(ExpressionContext.class,i); } - public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); } public List postfix() { return getRuleContexts(PostfixContext.class); } @@ -2807,22 +2874,22 @@ public T accept(ParseTreeVisitor visitor) { public final ArrayinitializerContext arrayinitializer() throws RecognitionException { ArrayinitializerContext _localctx = new ArrayinitializerContext(_ctx, getState()); - enterRule(_localctx, 46, RULE_arrayinitializer); + enterRule(_localctx, 48, RULE_arrayinitializer); int _la; try { int _alt; setState(412); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,34,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,37,_ctx) ) { case 1: _localctx = new NewstandardarrayContext(_localctx); enterOuterAlt(_localctx, 1); { - setState(368); + setState(371); match(NEW); - setState(369); + setState(372); match(TYPE); - setState(374); + setState(377); _errHandler.sync(this); _alt = 1; do { @@ -2830,11 +2897,11 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept case 1: { { - setState(370); + setState(373); match(LBRACE); - setState(371); + setState(374); expression(0); - setState(372); + setState(375); match(RBRACE); } } @@ -2842,32 +2909,32 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept default: throw new NoViableAltException(this); } - setState(376); + setState(379); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,27,_ctx); + _alt = getInterpreter().adaptivePredict(_input,31,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); - setState(385); + setState(388); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,33,_ctx) ) { case 1: { - setState(378); + setState(381); postdot(); - setState(382); + setState(385); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,28,_ctx); + _alt = getInterpreter().adaptivePredict(_input,32,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { { - setState(379); + setState(382); postfix(); } } } - setState(384); + setState(387); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,28,_ctx); + _alt = getInterpreter().adaptivePredict(_input,32,_ctx); } } break; @@ -2878,55 +2945,46 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept _localctx = new NewinitializedarrayContext(_localctx); enterOuterAlt(_localctx, 2); { - setState(387); + setState(390); match(NEW); - setState(388); + setState(391); match(TYPE); - setState(389); + setState(392); match(LBRACE); - setState(390); + setState(393); match(RBRACE); - setState(391); + setState(394); match(LBRACK); - setState(400); + setState(403); _la = _input.LA(1); if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { { - setState(392); + setState(395); expression(0); - setState(397); + setState(400); _errHandler.sync(this); _la = _input.LA(1); while (_la==COMMA) { { { - setState(393); + setState(396); match(COMMA); - setState(394); + setState(397); expression(0); } } - setState(399); + setState(402); _errHandler.sync(this); _la = _input.LA(1); } } } - setState(403); - _la = _input.LA(1); - if (_la==SEMICOLON) { - { - setState(402); - match(SEMICOLON); - } - } - setState(405); match(RBRACK); setState(409); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,33,_ctx); + _alt = getInterpreter().adaptivePredict(_input,36,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { if ( _alt==1 ) { { @@ -2938,7 +2996,7 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept } setState(411); _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,33,_ctx); + _alt = getInterpreter().adaptivePredict(_input,36,_ctx); } } break; @@ -2981,12 +3039,12 @@ public T accept(ParseTreeVisitor visitor) { public final ListinitializerContext listinitializer() throws RecognitionException { ListinitializerContext _localctx = new ListinitializerContext(_ctx, getState()); - enterRule(_localctx, 48, RULE_listinitializer); + enterRule(_localctx, 50, RULE_listinitializer); int _la; try { setState(427); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,36,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,39,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { @@ -3063,12 +3121,12 @@ public T accept(ParseTreeVisitor visitor) { public final MapinitializerContext mapinitializer() throws RecognitionException { MapinitializerContext _localctx = new MapinitializerContext(_ctx, getState()); - enterRule(_localctx, 50, RULE_mapinitializer); + enterRule(_localctx, 52, RULE_mapinitializer); int _la; try { setState(443); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,38,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,41,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { @@ -3141,7 +3199,7 @@ public T accept(ParseTreeVisitor visitor) { public final MaptokenContext maptoken() throws RecognitionException { MaptokenContext _localctx = new MaptokenContext(_ctx, getState()); - enterRule(_localctx, 52, RULE_maptoken); + enterRule(_localctx, 54, RULE_maptoken); try { enterOuterAlt(_localctx, 1); { @@ -3190,7 +3248,7 @@ public T accept(ParseTreeVisitor visitor) { public final ArgumentsContext arguments() throws RecognitionException { ArgumentsContext _localctx = new ArgumentsContext(_ctx, getState()); - enterRule(_localctx, 54, RULE_arguments); + enterRule(_localctx, 56, RULE_arguments); int _la; try { enterOuterAlt(_localctx, 1); @@ -3262,11 +3320,11 @@ public T accept(ParseTreeVisitor visitor) { public final ArgumentContext argument() throws RecognitionException { ArgumentContext _localctx = new ArgumentContext(_ctx, getState()); - enterRule(_localctx, 56, RULE_argument); + enterRule(_localctx, 58, RULE_argument); try { setState(465); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,41,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,44,_ctx) ) { case 1: enterOuterAlt(_localctx, 1); { @@ -3334,7 +3392,7 @@ public T accept(ParseTreeVisitor visitor) { public final LambdaContext lambda() throws RecognitionException { LambdaContext _localctx = new LambdaContext(_ctx, getState()); - enterRule(_localctx, 58, RULE_lambda); + enterRule(_localctx, 60, RULE_lambda); int _la; try { enterOuterAlt(_localctx, 1); @@ -3453,7 +3511,7 @@ public T accept(ParseTreeVisitor visitor) { public final LamtypeContext lamtype() throws RecognitionException { LamtypeContext _localctx = new LamtypeContext(_ctx, getState()); - enterRule(_localctx, 60, RULE_lamtype); + enterRule(_localctx, 62, RULE_lamtype); int _la; try { enterOuterAlt(_localctx, 1); @@ -3544,11 +3602,11 @@ public T accept(ParseTreeVisitor visitor) { public final FuncrefContext funcref() throws RecognitionException { FuncrefContext _localctx = new FuncrefContext(_ctx, getState()); - enterRule(_localctx, 62, RULE_funcref); + enterRule(_localctx, 64, RULE_funcref); try { setState(505); _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,47,_ctx) ) { + switch ( getInterpreter().adaptivePredict(_input,50,_ctx) ) { case 1: _localctx = new ClassfuncrefContext(_localctx); enterOuterAlt(_localctx, 1); @@ -3612,14 +3670,14 @@ public final FuncrefContext funcref() throws RecognitionException { public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { switch (ruleIndex) { - case 3: - return statement_sempred((StatementContext)_localctx, predIndex); - case 14: + case 4: + return rstatement_sempred((RstatementContext)_localctx, predIndex); + case 15: return expression_sempred((ExpressionContext)_localctx, predIndex); } return true; } - private boolean statement_sempred(StatementContext _localctx, int predIndex) { + private boolean rstatement_sempred(RstatementContext _localctx, int predIndex) { switch (predIndex) { case 0: return _input.LA(1) != ELSE ; @@ -3668,196 +3726,195 @@ private boolean expression_sempred(ExpressionContext _localctx, int predIndex) { "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ "\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+ - "\t!\3\2\7\2D\n\2\f\2\16\2G\13\2\3\2\7\2J\n\2\f\2\16\2M\13\2\3\2\3\2\3"+ - "\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4]\n\4\f\4\16\4`\13\4"+ - "\5\4b\n\4\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\5\5n\n\5\3\5\3\5\3\5"+ - "\3\5\3\5\3\5\5\5v\n\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\5\5"+ - "\u0083\n\5\3\5\3\5\5\5\u0087\n\5\3\5\3\5\5\5\u008b\n\5\3\5\3\5\3\5\5\5"+ - "\u0090\n\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5"+ - "\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\3\5\6\5\u00b1"+ - "\n\5\r\5\16\5\u00b2\3\5\3\5\3\5\3\5\3\5\3\5\3\5\5\5\u00bc\n\5\3\6\3\6"+ - "\5\6\u00c0\n\6\3\7\3\7\7\7\u00c4\n\7\f\7\16\7\u00c7\13\7\3\7\3\7\3\b\3"+ - "\b\3\t\3\t\5\t\u00cf\n\t\3\n\3\n\3\13\3\13\3\13\3\13\7\13\u00d7\n\13\f"+ - "\13\16\13\u00da\13\13\3\f\3\f\3\f\7\f\u00df\n\f\f\f\16\f\u00e2\13\f\3"+ - "\r\3\r\3\r\5\r\u00e7\n\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\17\3\17"+ - "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20"+ - "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20"+ - "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20"+ - "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\7\20\u0125\n\20\f\20\16"+ - "\20\u0128\13\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ - "\3\21\3\21\5\21\u0137\n\21\3\22\3\22\7\22\u013b\n\22\f\22\16\22\u013e"+ - "\13\22\3\22\3\22\3\22\7\22\u0143\n\22\f\22\16\22\u0146\13\22\3\22\5\22"+ - "\u0149\n\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23"+ - "\3\23\3\23\3\23\3\23\3\23\3\23\5\23\u015d\n\23\3\24\3\24\3\24\5\24\u0162"+ - "\n\24\3\25\3\25\5\25\u0166\n\25\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\30"+ - "\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31\6\31\u0179\n\31\r\31\16"+ - "\31\u017a\3\31\3\31\7\31\u017f\n\31\f\31\16\31\u0182\13\31\5\31\u0184"+ - "\n\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31\7\31\u018e\n\31\f\31\16"+ - "\31\u0191\13\31\5\31\u0193\n\31\3\31\5\31\u0196\n\31\3\31\3\31\7\31\u019a"+ - "\n\31\f\31\16\31\u019d\13\31\5\31\u019f\n\31\3\32\3\32\3\32\3\32\7\32"+ - "\u01a5\n\32\f\32\16\32\u01a8\13\32\3\32\3\32\3\32\3\32\5\32\u01ae\n\32"+ - "\3\33\3\33\3\33\3\33\7\33\u01b4\n\33\f\33\16\33\u01b7\13\33\3\33\3\33"+ - "\3\33\3\33\3\33\5\33\u01be\n\33\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35"+ - "\7\35\u01c8\n\35\f\35\16\35\u01cb\13\35\5\35\u01cd\n\35\3\35\3\35\3\36"+ - "\3\36\3\36\5\36\u01d4\n\36\3\37\3\37\3\37\3\37\3\37\7\37\u01db\n\37\f"+ - "\37\16\37\u01de\13\37\5\37\u01e0\n\37\3\37\5\37\u01e3\n\37\3\37\3\37\3"+ - "\37\5\37\u01e8\n\37\3 \5 \u01eb\n \3 \3 \3!\3!\3!\3!\3!\3!\3!\3!\3!\3"+ - "!\3!\3!\3!\5!\u01fc\n!\3!\2\3\36\"\2\4\6\b\n\f\16\20\22\24\26\30\32\34"+ - "\36 \"$&(*,.\60\62\64\668:<>@\2\17\3\3\16\16\3\2 \"\3\2#$\3\2:;\3\2%\'"+ - "\3\2(+\3\2,/\3\2>I\3\2<=\4\2\36\37#$\3\2JM\3\2\13\f\3\2UV\u0237\2E\3\2"+ - "\2\2\4P\3\2\2\2\6U\3\2\2\2\b\u00bb\3\2\2\2\n\u00bf\3\2\2\2\f\u00c1\3\2"+ - "\2\2\16\u00ca\3\2\2\2\20\u00ce\3\2\2\2\22\u00d0\3\2\2\2\24\u00d2\3\2\2"+ - "\2\26\u00db\3\2\2\2\30\u00e3\3\2\2\2\32\u00e8\3\2\2\2\34\u00ef\3\2\2\2"+ - "\36\u00f1\3\2\2\2 \u0136\3\2\2\2\"\u0148\3\2\2\2$\u015c\3\2\2\2&\u0161"+ - "\3\2\2\2(\u0165\3\2\2\2*\u0167\3\2\2\2,\u016b\3\2\2\2.\u016e\3\2\2\2\60"+ - "\u019e\3\2\2\2\62\u01ad\3\2\2\2\64\u01bd\3\2\2\2\66\u01bf\3\2\2\28\u01c3"+ - "\3\2\2\2:\u01d3\3\2\2\2<\u01e2\3\2\2\2>\u01ea\3\2\2\2@\u01fb\3\2\2\2B"+ - "D\5\4\3\2CB\3\2\2\2DG\3\2\2\2EC\3\2\2\2EF\3\2\2\2FK\3\2\2\2GE\3\2\2\2"+ - "HJ\5\b\5\2IH\3\2\2\2JM\3\2\2\2KI\3\2\2\2KL\3\2\2\2LN\3\2\2\2MK\3\2\2\2"+ - "NO\7\2\2\3O\3\3\2\2\2PQ\5\26\f\2QR\7T\2\2RS\5\6\4\2ST\5\f\7\2T\5\3\2\2"+ - "\2Ua\7\t\2\2VW\5\26\f\2W^\7T\2\2XY\7\r\2\2YZ\5\26\f\2Z[\7T\2\2[]\3\2\2"+ - "\2\\X\3\2\2\2]`\3\2\2\2^\\\3\2\2\2^_\3\2\2\2_b\3\2\2\2`^\3\2\2\2aV\3\2"+ - "\2\2ab\3\2\2\2bc\3\2\2\2cd\7\n\2\2d\7\3\2\2\2ef\7\17\2\2fg\7\t\2\2gh\5"+ - "\36\20\2hi\7\n\2\2im\5\n\6\2jk\7\21\2\2kn\5\n\6\2ln\6\5\2\2mj\3\2\2\2"+ - "ml\3\2\2\2n\u00bc\3\2\2\2op\7\22\2\2pq\7\t\2\2qr\5\36\20\2ru\7\n\2\2s"+ - "v\5\n\6\2tv\5\16\b\2us\3\2\2\2ut\3\2\2\2v\u00bc\3\2\2\2wx\7\23\2\2xy\5"+ - "\f\7\2yz\7\22\2\2z{\7\t\2\2{|\5\36\20\2|}\7\n\2\2}~\5\34\17\2~\u00bc\3"+ - "\2\2\2\177\u0080\7\24\2\2\u0080\u0082\7\t\2\2\u0081\u0083\5\20\t\2\u0082"+ - "\u0081\3\2\2\2\u0082\u0083\3\2\2\2\u0083\u0084\3\2\2\2\u0084\u0086\7\16"+ - "\2\2\u0085\u0087\5\36\20\2\u0086\u0085\3\2\2\2\u0086\u0087\3\2\2\2\u0087"+ - "\u0088\3\2\2\2\u0088\u008a\7\16\2\2\u0089\u008b\5\22\n\2\u008a\u0089\3"+ - "\2\2\2\u008a\u008b\3\2\2\2\u008b\u008c\3\2\2\2\u008c\u008f\7\n\2\2\u008d"+ - "\u0090\5\n\6\2\u008e\u0090\5\16\b\2\u008f\u008d\3\2\2\2\u008f\u008e\3"+ - "\2\2\2\u0090\u00bc\3\2\2\2\u0091\u0092\7\24\2\2\u0092\u0093\7\t\2\2\u0093"+ - "\u0094\5\26\f\2\u0094\u0095\7T\2\2\u0095\u0096\7\66\2\2\u0096\u0097\5"+ - "\36\20\2\u0097\u0098\7\n\2\2\u0098\u0099\5\n\6\2\u0099\u00bc\3\2\2\2\u009a"+ - "\u009b\7\24\2\2\u009b\u009c\7\t\2\2\u009c\u009d\7T\2\2\u009d\u009e\7\20"+ - "\2\2\u009e\u009f\5\36\20\2\u009f\u00a0\7\n\2\2\u00a0\u00a1\5\n\6\2\u00a1"+ - "\u00bc\3\2\2\2\u00a2\u00a3\5\24\13\2\u00a3\u00a4\5\34\17\2\u00a4\u00bc"+ - "\3\2\2\2\u00a5\u00a6\7\25\2\2\u00a6\u00bc\5\34\17\2\u00a7\u00a8\7\26\2"+ - "\2\u00a8\u00bc\5\34\17\2\u00a9\u00aa\7\27\2\2\u00aa\u00ab\5\36\20\2\u00ab"+ - "\u00ac\5\34\17\2\u00ac\u00bc\3\2\2\2\u00ad\u00ae\7\31\2\2\u00ae\u00b0"+ - "\5\f\7\2\u00af\u00b1\5\32\16\2\u00b0\u00af\3\2\2\2\u00b1\u00b2\3\2\2\2"+ - "\u00b2\u00b0\3\2\2\2\u00b2\u00b3\3\2\2\2\u00b3\u00bc\3\2\2\2\u00b4\u00b5"+ - "\7\33\2\2\u00b5\u00b6\5\36\20\2\u00b6\u00b7\5\34\17\2\u00b7\u00bc\3\2"+ - "\2\2\u00b8\u00b9\5\36\20\2\u00b9\u00ba\5\34\17\2\u00ba\u00bc\3\2\2\2\u00bb"+ - "e\3\2\2\2\u00bbo\3\2\2\2\u00bbw\3\2\2\2\u00bb\177\3\2\2\2\u00bb\u0091"+ - "\3\2\2\2\u00bb\u009a\3\2\2\2\u00bb\u00a2\3\2\2\2\u00bb\u00a5\3\2\2\2\u00bb"+ - "\u00a7\3\2\2\2\u00bb\u00a9\3\2\2\2\u00bb\u00ad\3\2\2\2\u00bb\u00b4\3\2"+ - "\2\2\u00bb\u00b8\3\2\2\2\u00bc\t\3\2\2\2\u00bd\u00c0\5\f\7\2\u00be\u00c0"+ - "\5\b\5\2\u00bf\u00bd\3\2\2\2\u00bf\u00be\3\2\2\2\u00c0\13\3\2\2\2\u00c1"+ - "\u00c5\7\5\2\2\u00c2\u00c4\5\b\5\2\u00c3\u00c2\3\2\2\2\u00c4\u00c7\3\2"+ - "\2\2\u00c5\u00c3\3\2\2\2\u00c5\u00c6\3\2\2\2\u00c6\u00c8\3\2\2\2\u00c7"+ - "\u00c5\3\2\2\2\u00c8\u00c9\7\6\2\2\u00c9\r\3\2\2\2\u00ca\u00cb\7\16\2"+ - "\2\u00cb\17\3\2\2\2\u00cc\u00cf\5\24\13\2\u00cd\u00cf\5\36\20\2\u00ce"+ - "\u00cc\3\2\2\2\u00ce\u00cd\3\2\2\2\u00cf\21\3\2\2\2\u00d0\u00d1\5\36\20"+ - "\2\u00d1\23\3\2\2\2\u00d2\u00d3\5\26\f\2\u00d3\u00d8\5\30\r\2\u00d4\u00d5"+ - "\7\r\2\2\u00d5\u00d7\5\30\r\2\u00d6\u00d4\3\2\2\2\u00d7\u00da\3\2\2\2"+ - "\u00d8\u00d6\3\2\2\2\u00d8\u00d9\3\2\2\2\u00d9\25\3\2\2\2\u00da\u00d8"+ - "\3\2\2\2\u00db\u00e0\7S\2\2\u00dc\u00dd\7\7\2\2\u00dd\u00df\7\b\2\2\u00de"+ - "\u00dc\3\2\2\2\u00df\u00e2\3\2\2\2\u00e0\u00de\3\2\2\2\u00e0\u00e1\3\2"+ - "\2\2\u00e1\27\3\2\2\2\u00e2\u00e0\3\2\2\2\u00e3\u00e6\7T\2\2\u00e4\u00e5"+ - "\7>\2\2\u00e5\u00e7\5\36\20\2\u00e6\u00e4\3\2\2\2\u00e6\u00e7\3\2\2\2"+ - "\u00e7\31\3\2\2\2\u00e8\u00e9\7\32\2\2\u00e9\u00ea\7\t\2\2\u00ea\u00eb"+ - "\7S\2\2\u00eb\u00ec\7T\2\2\u00ec\u00ed\7\n\2\2\u00ed\u00ee\5\f\7\2\u00ee"+ - "\33\3\2\2\2\u00ef\u00f0\t\2\2\2\u00f0\35\3\2\2\2\u00f1\u00f2\b\20\1\2"+ - "\u00f2\u00f3\5 \21\2\u00f3\u0126\3\2\2\2\u00f4\u00f5\f\21\2\2\u00f5\u00f6"+ - "\t\3\2\2\u00f6\u0125\5\36\20\22\u00f7\u00f8\f\20\2\2\u00f8\u00f9\t\4\2"+ - "\2\u00f9\u0125\5\36\20\21\u00fa\u00fb\f\17\2\2\u00fb\u00fc\t\5\2\2\u00fc"+ - "\u0125\5\36\20\20\u00fd\u00fe\f\16\2\2\u00fe\u00ff\t\6\2\2\u00ff\u0125"+ - "\5\36\20\17\u0100\u0101\f\r\2\2\u0101\u0102\t\7\2\2\u0102\u0125\5\36\20"+ - "\16\u0103\u0104\f\13\2\2\u0104\u0105\t\b\2\2\u0105\u0125\5\36\20\f\u0106"+ - "\u0107\f\n\2\2\u0107\u0108\7\60\2\2\u0108\u0125\5\36\20\13\u0109\u010a"+ - "\f\t\2\2\u010a\u010b\7\61\2\2\u010b\u0125\5\36\20\n\u010c\u010d\f\b\2"+ - "\2\u010d\u010e\7\62\2\2\u010e\u0125\5\36\20\t\u010f\u0110\f\7\2\2\u0110"+ - "\u0111\7\63\2\2\u0111\u0125\5\36\20\b\u0112\u0113\f\6\2\2\u0113\u0114"+ - "\7\64\2\2\u0114\u0125\5\36\20\7\u0115\u0116\f\5\2\2\u0116\u0117\7\65\2"+ - "\2\u0117\u0118\5\36\20\2\u0118\u0119\7\66\2\2\u0119\u011a\5\36\20\5\u011a"+ - "\u0125\3\2\2\2\u011b\u011c\f\4\2\2\u011c\u011d\7\67\2\2\u011d\u0125\5"+ - "\36\20\4\u011e\u011f\f\3\2\2\u011f\u0120\t\t\2\2\u0120\u0125\5\36\20\3"+ - "\u0121\u0122\f\f\2\2\u0122\u0123\7\35\2\2\u0123\u0125\5\26\f\2\u0124\u00f4"+ - "\3\2\2\2\u0124\u00f7\3\2\2\2\u0124\u00fa\3\2\2\2\u0124\u00fd\3\2\2\2\u0124"+ - "\u0100\3\2\2\2\u0124\u0103\3\2\2\2\u0124\u0106\3\2\2\2\u0124\u0109\3\2"+ - "\2\2\u0124\u010c\3\2\2\2\u0124\u010f\3\2\2\2\u0124\u0112\3\2\2\2\u0124"+ - "\u0115\3\2\2\2\u0124\u011b\3\2\2\2\u0124\u011e\3\2\2\2\u0124\u0121\3\2"+ - "\2\2\u0125\u0128\3\2\2\2\u0126\u0124\3\2\2\2\u0126\u0127\3\2\2\2\u0127"+ - "\37\3\2\2\2\u0128\u0126\3\2\2\2\u0129\u012a\t\n\2\2\u012a\u0137\5\"\22"+ - "\2\u012b\u012c\5\"\22\2\u012c\u012d\t\n\2\2\u012d\u0137\3\2\2\2\u012e"+ - "\u0137\5\"\22\2\u012f\u0130\t\13\2\2\u0130\u0137\5 \21\2\u0131\u0132\7"+ - "\t\2\2\u0132\u0133\5\26\f\2\u0133\u0134\7\n\2\2\u0134\u0135\5 \21\2\u0135"+ - "\u0137\3\2\2\2\u0136\u0129\3\2\2\2\u0136\u012b\3\2\2\2\u0136\u012e\3\2"+ - "\2\2\u0136\u012f\3\2\2\2\u0136\u0131\3\2\2\2\u0137!\3\2\2\2\u0138\u013c"+ - "\5$\23\2\u0139\u013b\5&\24\2\u013a\u0139\3\2\2\2\u013b\u013e\3\2\2\2\u013c"+ - "\u013a\3\2\2\2\u013c\u013d\3\2\2\2\u013d\u0149\3\2\2\2\u013e\u013c\3\2"+ - "\2\2\u013f\u0140\5\26\f\2\u0140\u0144\5(\25\2\u0141\u0143\5&\24\2\u0142"+ - "\u0141\3\2\2\2\u0143\u0146\3\2\2\2\u0144\u0142\3\2\2\2\u0144\u0145\3\2"+ - "\2\2\u0145\u0149\3\2\2\2\u0146\u0144\3\2\2\2\u0147\u0149\5\60\31\2\u0148"+ - "\u0138\3\2\2\2\u0148\u013f\3\2\2\2\u0148\u0147\3\2\2\2\u0149#\3\2\2\2"+ - "\u014a\u014b\7\t\2\2\u014b\u014c\5\36\20\2\u014c\u014d\7\n\2\2\u014d\u015d"+ - "\3\2\2\2\u014e\u015d\t\f\2\2\u014f\u015d\7P\2\2\u0150\u015d\7Q\2\2\u0151"+ - "\u015d\7R\2\2\u0152\u015d\7N\2\2\u0153\u015d\7O\2\2\u0154\u015d\5\62\32"+ - "\2\u0155\u015d\5\64\33\2\u0156\u015d\7T\2\2\u0157\u0158\7T\2\2\u0158\u015d"+ - "\58\35\2\u0159\u015a\7\30\2\2\u015a\u015b\7S\2\2\u015b\u015d\58\35\2\u015c"+ - "\u014a\3\2\2\2\u015c\u014e\3\2\2\2\u015c\u014f\3\2\2\2\u015c\u0150\3\2"+ - "\2\2\u015c\u0151\3\2\2\2\u015c\u0152\3\2\2\2\u015c\u0153\3\2\2\2\u015c"+ - "\u0154\3\2\2\2\u015c\u0155\3\2\2\2\u015c\u0156\3\2\2\2\u015c\u0157\3\2"+ - "\2\2\u015c\u0159\3\2\2\2\u015d%\3\2\2\2\u015e\u0162\5*\26\2\u015f\u0162"+ - "\5,\27\2\u0160\u0162\5.\30\2\u0161\u015e\3\2\2\2\u0161\u015f\3\2\2\2\u0161"+ - "\u0160\3\2\2\2\u0162\'\3\2\2\2\u0163\u0166\5*\26\2\u0164\u0166\5,\27\2"+ - "\u0165\u0163\3\2\2\2\u0165\u0164\3\2\2\2\u0166)\3\2\2\2\u0167\u0168\t"+ - "\r\2\2\u0168\u0169\7V\2\2\u0169\u016a\58\35\2\u016a+\3\2\2\2\u016b\u016c"+ - "\t\r\2\2\u016c\u016d\t\16\2\2\u016d-\3\2\2\2\u016e\u016f\7\7\2\2\u016f"+ - "\u0170\5\36\20\2\u0170\u0171\7\b\2\2\u0171/\3\2\2\2\u0172\u0173\7\30\2"+ - "\2\u0173\u0178\7S\2\2\u0174\u0175\7\7\2\2\u0175\u0176\5\36\20\2\u0176"+ - "\u0177\7\b\2\2\u0177\u0179\3\2\2\2\u0178\u0174\3\2\2\2\u0179\u017a\3\2"+ - "\2\2\u017a\u0178\3\2\2\2\u017a\u017b\3\2\2\2\u017b\u0183\3\2\2\2\u017c"+ - "\u0180\5(\25\2\u017d\u017f\5&\24\2\u017e\u017d\3\2\2\2\u017f\u0182\3\2"+ - "\2\2\u0180\u017e\3\2\2\2\u0180\u0181\3\2\2\2\u0181\u0184\3\2\2\2\u0182"+ - "\u0180\3\2\2\2\u0183\u017c\3\2\2\2\u0183\u0184\3\2\2\2\u0184\u019f\3\2"+ - "\2\2\u0185\u0186\7\30\2\2\u0186\u0187\7S\2\2\u0187\u0188\7\7\2\2\u0188"+ - "\u0189\7\b\2\2\u0189\u0192\7\5\2\2\u018a\u018f\5\36\20\2\u018b\u018c\7"+ - "\r\2\2\u018c\u018e\5\36\20\2\u018d\u018b\3\2\2\2\u018e\u0191\3\2\2\2\u018f"+ - "\u018d\3\2\2\2\u018f\u0190\3\2\2\2\u0190\u0193\3\2\2\2\u0191\u018f\3\2"+ - "\2\2\u0192\u018a\3\2\2\2\u0192\u0193\3\2\2\2\u0193\u0195\3\2\2\2\u0194"+ - "\u0196\7\16\2\2\u0195\u0194\3\2\2\2\u0195\u0196\3\2\2\2\u0196\u0197\3"+ - "\2\2\2\u0197\u019b\7\6\2\2\u0198\u019a\5&\24\2\u0199\u0198\3\2\2\2\u019a"+ + "\t!\4\"\t\"\3\2\7\2F\n\2\f\2\16\2I\13\2\3\2\7\2L\n\2\f\2\16\2O\13\2\3"+ + "\2\5\2R\n\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7"+ + "\4b\n\4\f\4\16\4e\13\4\5\4g\n\4\3\4\3\4\3\5\3\5\3\5\3\5\5\5o\n\5\3\6\3"+ + "\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6y\n\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6\u0081"+ + "\n\6\3\6\3\6\3\6\5\6\u0086\n\6\3\6\3\6\5\6\u008a\n\6\3\6\3\6\5\6\u008e"+ + "\n\6\3\6\3\6\3\6\5\6\u0093\n\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6"+ + "\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\6\6\u00a9\n\6\r\6\16\6\u00aa"+ + "\5\6\u00ad\n\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7"+ + "\3\7\5\7\u00be\n\7\3\b\3\b\5\b\u00c2\n\b\3\t\3\t\7\t\u00c6\n\t\f\t\16"+ + "\t\u00c9\13\t\3\t\5\t\u00cc\n\t\3\t\3\t\3\n\3\n\3\13\3\13\5\13\u00d4\n"+ + "\13\3\f\3\f\3\r\3\r\3\r\3\r\7\r\u00dc\n\r\f\r\16\r\u00df\13\r\3\16\3\16"+ + "\3\16\7\16\u00e4\n\16\f\16\16\16\u00e7\13\16\3\17\3\17\3\17\5\17\u00ec"+ + "\n\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\7\21\u0128\n\21\f\21\16\21\u012b\13\21\3\22\3\22\3\22"+ + "\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\5\22\u013a\n\22\3\23"+ + "\3\23\7\23\u013e\n\23\f\23\16\23\u0141\13\23\3\23\3\23\3\23\7\23\u0146"+ + "\n\23\f\23\16\23\u0149\13\23\3\23\5\23\u014c\n\23\3\24\3\24\3\24\3\24"+ + "\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24"+ + "\5\24\u0160\n\24\3\25\3\25\3\25\5\25\u0165\n\25\3\26\3\26\5\26\u0169\n"+ + "\26\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\32\3\32\3"+ + "\32\3\32\3\32\3\32\6\32\u017c\n\32\r\32\16\32\u017d\3\32\3\32\7\32\u0182"+ + "\n\32\f\32\16\32\u0185\13\32\5\32\u0187\n\32\3\32\3\32\3\32\3\32\3\32"+ + "\3\32\3\32\3\32\7\32\u0191\n\32\f\32\16\32\u0194\13\32\5\32\u0196\n\32"+ + "\3\32\3\32\7\32\u019a\n\32\f\32\16\32\u019d\13\32\5\32\u019f\n\32\3\33"+ + "\3\33\3\33\3\33\7\33\u01a5\n\33\f\33\16\33\u01a8\13\33\3\33\3\33\3\33"+ + "\3\33\5\33\u01ae\n\33\3\34\3\34\3\34\3\34\7\34\u01b4\n\34\f\34\16\34\u01b7"+ + "\13\34\3\34\3\34\3\34\3\34\3\34\5\34\u01be\n\34\3\35\3\35\3\35\3\35\3"+ + "\36\3\36\3\36\3\36\7\36\u01c8\n\36\f\36\16\36\u01cb\13\36\5\36\u01cd\n"+ + "\36\3\36\3\36\3\37\3\37\3\37\5\37\u01d4\n\37\3 \3 \3 \3 \3 \7 \u01db\n"+ + " \f \16 \u01de\13 \5 \u01e0\n \3 \5 \u01e3\n \3 \3 \3 \5 \u01e8\n \3!"+ + "\5!\u01eb\n!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\""+ + "\5\"\u01fc\n\"\3\"\2\3 #\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&("+ + "*,.\60\62\64\668:<>@B\2\16\3\2 \"\3\2#$\3\2:;\3\2%\'\3\2(+\3\2,/\3\2>"+ + "I\3\2<=\4\2\36\37#$\3\2JM\3\2\13\f\3\2UV\u0237\2G\3\2\2\2\4U\3\2\2\2\6"+ + "Z\3\2\2\2\bn\3\2\2\2\n\u00ac\3\2\2\2\f\u00bd\3\2\2\2\16\u00c1\3\2\2\2"+ + "\20\u00c3\3\2\2\2\22\u00cf\3\2\2\2\24\u00d3\3\2\2\2\26\u00d5\3\2\2\2\30"+ + "\u00d7\3\2\2\2\32\u00e0\3\2\2\2\34\u00e8\3\2\2\2\36\u00ed\3\2\2\2 \u00f4"+ + "\3\2\2\2\"\u0139\3\2\2\2$\u014b\3\2\2\2&\u015f\3\2\2\2(\u0164\3\2\2\2"+ + "*\u0168\3\2\2\2,\u016a\3\2\2\2.\u016e\3\2\2\2\60\u0171\3\2\2\2\62\u019e"+ + "\3\2\2\2\64\u01ad\3\2\2\2\66\u01bd\3\2\2\28\u01bf\3\2\2\2:\u01c3\3\2\2"+ + "\2<\u01d3\3\2\2\2>\u01e2\3\2\2\2@\u01ea\3\2\2\2B\u01fb\3\2\2\2DF\5\4\3"+ + "\2ED\3\2\2\2FI\3\2\2\2GE\3\2\2\2GH\3\2\2\2HM\3\2\2\2IG\3\2\2\2JL\5\b\5"+ + "\2KJ\3\2\2\2LO\3\2\2\2MK\3\2\2\2MN\3\2\2\2NQ\3\2\2\2OM\3\2\2\2PR\5\f\7"+ + "\2QP\3\2\2\2QR\3\2\2\2RS\3\2\2\2ST\7\2\2\3T\3\3\2\2\2UV\5\32\16\2VW\7"+ + "T\2\2WX\5\6\4\2XY\5\20\t\2Y\5\3\2\2\2Zf\7\t\2\2[\\\5\32\16\2\\c\7T\2\2"+ + "]^\7\r\2\2^_\5\32\16\2_`\7T\2\2`b\3\2\2\2a]\3\2\2\2be\3\2\2\2ca\3\2\2"+ + "\2cd\3\2\2\2dg\3\2\2\2ec\3\2\2\2f[\3\2\2\2fg\3\2\2\2gh\3\2\2\2hi\7\n\2"+ + "\2i\7\3\2\2\2jo\5\n\6\2kl\5\f\7\2lm\7\16\2\2mo\3\2\2\2nj\3\2\2\2nk\3\2"+ + "\2\2o\t\3\2\2\2pq\7\17\2\2qr\7\t\2\2rs\5 \21\2st\7\n\2\2tx\5\16\b\2uv"+ + "\7\21\2\2vy\5\16\b\2wy\6\6\2\2xu\3\2\2\2xw\3\2\2\2y\u00ad\3\2\2\2z{\7"+ + "\22\2\2{|\7\t\2\2|}\5 \21\2}\u0080\7\n\2\2~\u0081\5\16\b\2\177\u0081\5"+ + "\22\n\2\u0080~\3\2\2\2\u0080\177\3\2\2\2\u0081\u00ad\3\2\2\2\u0082\u0083"+ + "\7\24\2\2\u0083\u0085\7\t\2\2\u0084\u0086\5\24\13\2\u0085\u0084\3\2\2"+ + "\2\u0085\u0086\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0089\7\16\2\2\u0088"+ + "\u008a\5 \21\2\u0089\u0088\3\2\2\2\u0089\u008a\3\2\2\2\u008a\u008b\3\2"+ + "\2\2\u008b\u008d\7\16\2\2\u008c\u008e\5\26\f\2\u008d\u008c\3\2\2\2\u008d"+ + "\u008e\3\2\2\2\u008e\u008f\3\2\2\2\u008f\u0092\7\n\2\2\u0090\u0093\5\16"+ + "\b\2\u0091\u0093\5\22\n\2\u0092\u0090\3\2\2\2\u0092\u0091\3\2\2\2\u0093"+ + "\u00ad\3\2\2\2\u0094\u0095\7\24\2\2\u0095\u0096\7\t\2\2\u0096\u0097\5"+ + "\32\16\2\u0097\u0098\7T\2\2\u0098\u0099\7\66\2\2\u0099\u009a\5 \21\2\u009a"+ + "\u009b\7\n\2\2\u009b\u009c\5\16\b\2\u009c\u00ad\3\2\2\2\u009d\u009e\7"+ + "\24\2\2\u009e\u009f\7\t\2\2\u009f\u00a0\7T\2\2\u00a0\u00a1\7\20\2\2\u00a1"+ + "\u00a2\5 \21\2\u00a2\u00a3\7\n\2\2\u00a3\u00a4\5\16\b\2\u00a4\u00ad\3"+ + "\2\2\2\u00a5\u00a6\7\31\2\2\u00a6\u00a8\5\20\t\2\u00a7\u00a9\5\36\20\2"+ + "\u00a8\u00a7\3\2\2\2\u00a9\u00aa\3\2\2\2\u00aa\u00a8\3\2\2\2\u00aa\u00ab"+ + "\3\2\2\2\u00ab\u00ad\3\2\2\2\u00acp\3\2\2\2\u00acz\3\2\2\2\u00ac\u0082"+ + "\3\2\2\2\u00ac\u0094\3\2\2\2\u00ac\u009d\3\2\2\2\u00ac\u00a5\3\2\2\2\u00ad"+ + "\13\3\2\2\2\u00ae\u00af\7\23\2\2\u00af\u00b0\5\20\t\2\u00b0\u00b1\7\22"+ + "\2\2\u00b1\u00b2\7\t\2\2\u00b2\u00b3\5 \21\2\u00b3\u00b4\7\n\2\2\u00b4"+ + "\u00be\3\2\2\2\u00b5\u00be\5\30\r\2\u00b6\u00be\7\25\2\2\u00b7\u00be\7"+ + "\26\2\2\u00b8\u00b9\7\27\2\2\u00b9\u00be\5 \21\2\u00ba\u00bb\7\33\2\2"+ + "\u00bb\u00be\5 \21\2\u00bc\u00be\5 \21\2\u00bd\u00ae\3\2\2\2\u00bd\u00b5"+ + "\3\2\2\2\u00bd\u00b6\3\2\2\2\u00bd\u00b7\3\2\2\2\u00bd\u00b8\3\2\2\2\u00bd"+ + "\u00ba\3\2\2\2\u00bd\u00bc\3\2\2\2\u00be\r\3\2\2\2\u00bf\u00c2\5\20\t"+ + "\2\u00c0\u00c2\5\b\5\2\u00c1\u00bf\3\2\2\2\u00c1\u00c0\3\2\2\2\u00c2\17"+ + "\3\2\2\2\u00c3\u00c7\7\5\2\2\u00c4\u00c6\5\b\5\2\u00c5\u00c4\3\2\2\2\u00c6"+ + "\u00c9\3\2\2\2\u00c7\u00c5\3\2\2\2\u00c7\u00c8\3\2\2\2\u00c8\u00cb\3\2"+ + "\2\2\u00c9\u00c7\3\2\2\2\u00ca\u00cc\5\f\7\2\u00cb\u00ca\3\2\2\2\u00cb"+ + "\u00cc\3\2\2\2\u00cc\u00cd\3\2\2\2\u00cd\u00ce\7\6\2\2\u00ce\21\3\2\2"+ + "\2\u00cf\u00d0\7\16\2\2\u00d0\23\3\2\2\2\u00d1\u00d4\5\30\r\2\u00d2\u00d4"+ + "\5 \21\2\u00d3\u00d1\3\2\2\2\u00d3\u00d2\3\2\2\2\u00d4\25\3\2\2\2\u00d5"+ + "\u00d6\5 \21\2\u00d6\27\3\2\2\2\u00d7\u00d8\5\32\16\2\u00d8\u00dd\5\34"+ + "\17\2\u00d9\u00da\7\r\2\2\u00da\u00dc\5\34\17\2\u00db\u00d9\3\2\2\2\u00dc"+ + "\u00df\3\2\2\2\u00dd\u00db\3\2\2\2\u00dd\u00de\3\2\2\2\u00de\31\3\2\2"+ + "\2\u00df\u00dd\3\2\2\2\u00e0\u00e5\7S\2\2\u00e1\u00e2\7\7\2\2\u00e2\u00e4"+ + "\7\b\2\2\u00e3\u00e1\3\2\2\2\u00e4\u00e7\3\2\2\2\u00e5\u00e3\3\2\2\2\u00e5"+ + "\u00e6\3\2\2\2\u00e6\33\3\2\2\2\u00e7\u00e5\3\2\2\2\u00e8\u00eb\7T\2\2"+ + "\u00e9\u00ea\7>\2\2\u00ea\u00ec\5 \21\2\u00eb\u00e9\3\2\2\2\u00eb\u00ec"+ + "\3\2\2\2\u00ec\35\3\2\2\2\u00ed\u00ee\7\32\2\2\u00ee\u00ef\7\t\2\2\u00ef"+ + "\u00f0\7S\2\2\u00f0\u00f1\7T\2\2\u00f1\u00f2\7\n\2\2\u00f2\u00f3\5\20"+ + "\t\2\u00f3\37\3\2\2\2\u00f4\u00f5\b\21\1\2\u00f5\u00f6\5\"\22\2\u00f6"+ + "\u0129\3\2\2\2\u00f7\u00f8\f\21\2\2\u00f8\u00f9\t\2\2\2\u00f9\u0128\5"+ + " \21\22\u00fa\u00fb\f\20\2\2\u00fb\u00fc\t\3\2\2\u00fc\u0128\5 \21\21"+ + "\u00fd\u00fe\f\17\2\2\u00fe\u00ff\t\4\2\2\u00ff\u0128\5 \21\20\u0100\u0101"+ + "\f\16\2\2\u0101\u0102\t\5\2\2\u0102\u0128\5 \21\17\u0103\u0104\f\r\2\2"+ + "\u0104\u0105\t\6\2\2\u0105\u0128\5 \21\16\u0106\u0107\f\13\2\2\u0107\u0108"+ + "\t\7\2\2\u0108\u0128\5 \21\f\u0109\u010a\f\n\2\2\u010a\u010b\7\60\2\2"+ + "\u010b\u0128\5 \21\13\u010c\u010d\f\t\2\2\u010d\u010e\7\61\2\2\u010e\u0128"+ + "\5 \21\n\u010f\u0110\f\b\2\2\u0110\u0111\7\62\2\2\u0111\u0128\5 \21\t"+ + "\u0112\u0113\f\7\2\2\u0113\u0114\7\63\2\2\u0114\u0128\5 \21\b\u0115\u0116"+ + "\f\6\2\2\u0116\u0117\7\64\2\2\u0117\u0128\5 \21\7\u0118\u0119\f\5\2\2"+ + "\u0119\u011a\7\65\2\2\u011a\u011b\5 \21\2\u011b\u011c\7\66\2\2\u011c\u011d"+ + "\5 \21\5\u011d\u0128\3\2\2\2\u011e\u011f\f\4\2\2\u011f\u0120\7\67\2\2"+ + "\u0120\u0128\5 \21\4\u0121\u0122\f\3\2\2\u0122\u0123\t\b\2\2\u0123\u0128"+ + "\5 \21\3\u0124\u0125\f\f\2\2\u0125\u0126\7\35\2\2\u0126\u0128\5\32\16"+ + "\2\u0127\u00f7\3\2\2\2\u0127\u00fa\3\2\2\2\u0127\u00fd\3\2\2\2\u0127\u0100"+ + "\3\2\2\2\u0127\u0103\3\2\2\2\u0127\u0106\3\2\2\2\u0127\u0109\3\2\2\2\u0127"+ + "\u010c\3\2\2\2\u0127\u010f\3\2\2\2\u0127\u0112\3\2\2\2\u0127\u0115\3\2"+ + "\2\2\u0127\u0118\3\2\2\2\u0127\u011e\3\2\2\2\u0127\u0121\3\2\2\2\u0127"+ + "\u0124\3\2\2\2\u0128\u012b\3\2\2\2\u0129\u0127\3\2\2\2\u0129\u012a\3\2"+ + "\2\2\u012a!\3\2\2\2\u012b\u0129\3\2\2\2\u012c\u012d\t\t\2\2\u012d\u013a"+ + "\5$\23\2\u012e\u012f\5$\23\2\u012f\u0130\t\t\2\2\u0130\u013a\3\2\2\2\u0131"+ + "\u013a\5$\23\2\u0132\u0133\t\n\2\2\u0133\u013a\5\"\22\2\u0134\u0135\7"+ + "\t\2\2\u0135\u0136\5\32\16\2\u0136\u0137\7\n\2\2\u0137\u0138\5\"\22\2"+ + "\u0138\u013a\3\2\2\2\u0139\u012c\3\2\2\2\u0139\u012e\3\2\2\2\u0139\u0131"+ + "\3\2\2\2\u0139\u0132\3\2\2\2\u0139\u0134\3\2\2\2\u013a#\3\2\2\2\u013b"+ + "\u013f\5&\24\2\u013c\u013e\5(\25\2\u013d\u013c\3\2\2\2\u013e\u0141\3\2"+ + "\2\2\u013f\u013d\3\2\2\2\u013f\u0140\3\2\2\2\u0140\u014c\3\2\2\2\u0141"+ + "\u013f\3\2\2\2\u0142\u0143\5\32\16\2\u0143\u0147\5*\26\2\u0144\u0146\5"+ + "(\25\2\u0145\u0144\3\2\2\2\u0146\u0149\3\2\2\2\u0147\u0145\3\2\2\2\u0147"+ + "\u0148\3\2\2\2\u0148\u014c\3\2\2\2\u0149\u0147\3\2\2\2\u014a\u014c\5\62"+ + "\32\2\u014b\u013b\3\2\2\2\u014b\u0142\3\2\2\2\u014b\u014a\3\2\2\2\u014c"+ + "%\3\2\2\2\u014d\u014e\7\t\2\2\u014e\u014f\5 \21\2\u014f\u0150\7\n\2\2"+ + "\u0150\u0160\3\2\2\2\u0151\u0160\t\13\2\2\u0152\u0160\7P\2\2\u0153\u0160"+ + "\7Q\2\2\u0154\u0160\7R\2\2\u0155\u0160\7N\2\2\u0156\u0160\7O\2\2\u0157"+ + "\u0160\5\64\33\2\u0158\u0160\5\66\34\2\u0159\u0160\7T\2\2\u015a\u015b"+ + "\7T\2\2\u015b\u0160\5:\36\2\u015c\u015d\7\30\2\2\u015d\u015e\7S\2\2\u015e"+ + "\u0160\5:\36\2\u015f\u014d\3\2\2\2\u015f\u0151\3\2\2\2\u015f\u0152\3\2"+ + "\2\2\u015f\u0153\3\2\2\2\u015f\u0154\3\2\2\2\u015f\u0155\3\2\2\2\u015f"+ + "\u0156\3\2\2\2\u015f\u0157\3\2\2\2\u015f\u0158\3\2\2\2\u015f\u0159\3\2"+ + "\2\2\u015f\u015a\3\2\2\2\u015f\u015c\3\2\2\2\u0160\'\3\2\2\2\u0161\u0165"+ + "\5,\27\2\u0162\u0165\5.\30\2\u0163\u0165\5\60\31\2\u0164\u0161\3\2\2\2"+ + "\u0164\u0162\3\2\2\2\u0164\u0163\3\2\2\2\u0165)\3\2\2\2\u0166\u0169\5"+ + ",\27\2\u0167\u0169\5.\30\2\u0168\u0166\3\2\2\2\u0168\u0167\3\2\2\2\u0169"+ + "+\3\2\2\2\u016a\u016b\t\f\2\2\u016b\u016c\7V\2\2\u016c\u016d\5:\36\2\u016d"+ + "-\3\2\2\2\u016e\u016f\t\f\2\2\u016f\u0170\t\r\2\2\u0170/\3\2\2\2\u0171"+ + "\u0172\7\7\2\2\u0172\u0173\5 \21\2\u0173\u0174\7\b\2\2\u0174\61\3\2\2"+ + "\2\u0175\u0176\7\30\2\2\u0176\u017b\7S\2\2\u0177\u0178\7\7\2\2\u0178\u0179"+ + "\5 \21\2\u0179\u017a\7\b\2\2\u017a\u017c\3\2\2\2\u017b\u0177\3\2\2\2\u017c"+ + "\u017d\3\2\2\2\u017d\u017b\3\2\2\2\u017d\u017e\3\2\2\2\u017e\u0186\3\2"+ + "\2\2\u017f\u0183\5*\26\2\u0180\u0182\5(\25\2\u0181\u0180\3\2\2\2\u0182"+ + "\u0185\3\2\2\2\u0183\u0181\3\2\2\2\u0183\u0184\3\2\2\2\u0184\u0187\3\2"+ + "\2\2\u0185\u0183\3\2\2\2\u0186\u017f\3\2\2\2\u0186\u0187\3\2\2\2\u0187"+ + "\u019f\3\2\2\2\u0188\u0189\7\30\2\2\u0189\u018a\7S\2\2\u018a\u018b\7\7"+ + "\2\2\u018b\u018c\7\b\2\2\u018c\u0195\7\5\2\2\u018d\u0192\5 \21\2\u018e"+ + "\u018f\7\r\2\2\u018f\u0191\5 \21\2\u0190\u018e\3\2\2\2\u0191\u0194\3\2"+ + "\2\2\u0192\u0190\3\2\2\2\u0192\u0193\3\2\2\2\u0193\u0196\3\2\2\2\u0194"+ + "\u0192\3\2\2\2\u0195\u018d\3\2\2\2\u0195\u0196\3\2\2\2\u0196\u0197\3\2"+ + "\2\2\u0197\u019b\7\6\2\2\u0198\u019a\5(\25\2\u0199\u0198\3\2\2\2\u019a"+ "\u019d\3\2\2\2\u019b\u0199\3\2\2\2\u019b\u019c\3\2\2\2\u019c\u019f\3\2"+ - "\2\2\u019d\u019b\3\2\2\2\u019e\u0172\3\2\2\2\u019e\u0185\3\2\2\2\u019f"+ - "\61\3\2\2\2\u01a0\u01a1\7\7\2\2\u01a1\u01a6\5\36\20\2\u01a2\u01a3\7\r"+ - "\2\2\u01a3\u01a5\5\36\20\2\u01a4\u01a2\3\2\2\2\u01a5\u01a8\3\2\2\2\u01a6"+ - "\u01a4\3\2\2\2\u01a6\u01a7\3\2\2\2\u01a7\u01a9\3\2\2\2\u01a8\u01a6\3\2"+ - "\2\2\u01a9\u01aa\7\b\2\2\u01aa\u01ae\3\2\2\2\u01ab\u01ac\7\7\2\2\u01ac"+ - "\u01ae\7\b\2\2\u01ad\u01a0\3\2\2\2\u01ad\u01ab\3\2\2\2\u01ae\63\3\2\2"+ - "\2\u01af\u01b0\7\7\2\2\u01b0\u01b5\5\66\34\2\u01b1\u01b2\7\r\2\2\u01b2"+ - "\u01b4\5\66\34\2\u01b3\u01b1\3\2\2\2\u01b4\u01b7\3\2\2\2\u01b5\u01b3\3"+ - "\2\2\2\u01b5\u01b6\3\2\2\2\u01b6\u01b8\3\2\2\2\u01b7\u01b5\3\2\2\2\u01b8"+ - "\u01b9\7\b\2\2\u01b9\u01be\3\2\2\2\u01ba\u01bb\7\7\2\2\u01bb\u01bc\7\66"+ - "\2\2\u01bc\u01be\7\b\2\2\u01bd\u01af\3\2\2\2\u01bd\u01ba\3\2\2\2\u01be"+ - "\65\3\2\2\2\u01bf\u01c0\5\36\20\2\u01c0\u01c1\7\66\2\2\u01c1\u01c2\5\36"+ - "\20\2\u01c2\67\3\2\2\2\u01c3\u01cc\7\t\2\2\u01c4\u01c9\5:\36\2\u01c5\u01c6"+ - "\7\r\2\2\u01c6\u01c8\5:\36\2\u01c7\u01c5\3\2\2\2\u01c8\u01cb\3\2\2\2\u01c9"+ - "\u01c7\3\2\2\2\u01c9\u01ca\3\2\2\2\u01ca\u01cd\3\2\2\2\u01cb\u01c9\3\2"+ - "\2\2\u01cc\u01c4\3\2\2\2\u01cc\u01cd\3\2\2\2\u01cd\u01ce\3\2\2\2\u01ce"+ - "\u01cf\7\n\2\2\u01cf9\3\2\2\2\u01d0\u01d4\5\36\20\2\u01d1\u01d4\5<\37"+ - "\2\u01d2\u01d4\5@!\2\u01d3\u01d0\3\2\2\2\u01d3\u01d1\3\2\2\2\u01d3\u01d2"+ - "\3\2\2\2\u01d4;\3\2\2\2\u01d5\u01e3\5> \2\u01d6\u01df\7\t\2\2\u01d7\u01dc"+ - "\5> \2\u01d8\u01d9\7\r\2\2\u01d9\u01db\5> \2\u01da\u01d8\3\2\2\2\u01db"+ - "\u01de\3\2\2\2\u01dc\u01da\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd\u01e0\3\2"+ - "\2\2\u01de\u01dc\3\2\2\2\u01df\u01d7\3\2\2\2\u01df\u01e0\3\2\2\2\u01e0"+ - "\u01e1\3\2\2\2\u01e1\u01e3\7\n\2\2\u01e2\u01d5\3\2\2\2\u01e2\u01d6\3\2"+ - "\2\2\u01e3\u01e4\3\2\2\2\u01e4\u01e7\79\2\2\u01e5\u01e8\5\f\7\2\u01e6"+ - "\u01e8\5\36\20\2\u01e7\u01e5\3\2\2\2\u01e7\u01e6\3\2\2\2\u01e8=\3\2\2"+ - "\2\u01e9\u01eb\5\26\f\2\u01ea\u01e9\3\2\2\2\u01ea\u01eb\3\2\2\2\u01eb"+ - "\u01ec\3\2\2\2\u01ec\u01ed\7T\2\2\u01ed?\3\2\2\2\u01ee\u01ef\7S\2\2\u01ef"+ - "\u01f0\78\2\2\u01f0\u01fc\7T\2\2\u01f1\u01f2\5\26\f\2\u01f2\u01f3\78\2"+ - "\2\u01f3\u01f4\7\30\2\2\u01f4\u01fc\3\2\2\2\u01f5\u01f6\7T\2\2\u01f6\u01f7"+ - "\78\2\2\u01f7\u01fc\7T\2\2\u01f8\u01f9\7\34\2\2\u01f9\u01fa\78\2\2\u01fa"+ - "\u01fc\7T\2\2\u01fb\u01ee\3\2\2\2\u01fb\u01f1\3\2\2\2\u01fb\u01f5\3\2"+ - "\2\2\u01fb\u01f8\3\2\2\2\u01fcA\3\2\2\2\62EK^amu\u0082\u0086\u008a\u008f"+ - "\u00b2\u00bb\u00bf\u00c5\u00ce\u00d8\u00e0\u00e6\u0124\u0126\u0136\u013c"+ - "\u0144\u0148\u015c\u0161\u0165\u017a\u0180\u0183\u018f\u0192\u0195\u019b"+ - "\u019e\u01a6\u01ad\u01b5\u01bd\u01c9\u01cc\u01d3\u01dc\u01df\u01e2\u01e7"+ - "\u01ea\u01fb"; + "\2\2\u019d\u019b\3\2\2\2\u019e\u0175\3\2\2\2\u019e\u0188\3\2\2\2\u019f"+ + "\63\3\2\2\2\u01a0\u01a1\7\7\2\2\u01a1\u01a6\5 \21\2\u01a2\u01a3\7\r\2"+ + "\2\u01a3\u01a5\5 \21\2\u01a4\u01a2\3\2\2\2\u01a5\u01a8\3\2\2\2\u01a6\u01a4"+ + "\3\2\2\2\u01a6\u01a7\3\2\2\2\u01a7\u01a9\3\2\2\2\u01a8\u01a6\3\2\2\2\u01a9"+ + "\u01aa\7\b\2\2\u01aa\u01ae\3\2\2\2\u01ab\u01ac\7\7\2\2\u01ac\u01ae\7\b"+ + "\2\2\u01ad\u01a0\3\2\2\2\u01ad\u01ab\3\2\2\2\u01ae\65\3\2\2\2\u01af\u01b0"+ + "\7\7\2\2\u01b0\u01b5\58\35\2\u01b1\u01b2\7\r\2\2\u01b2\u01b4\58\35\2\u01b3"+ + "\u01b1\3\2\2\2\u01b4\u01b7\3\2\2\2\u01b5\u01b3\3\2\2\2\u01b5\u01b6\3\2"+ + "\2\2\u01b6\u01b8\3\2\2\2\u01b7\u01b5\3\2\2\2\u01b8\u01b9\7\b\2\2\u01b9"+ + "\u01be\3\2\2\2\u01ba\u01bb\7\7\2\2\u01bb\u01bc\7\66\2\2\u01bc\u01be\7"+ + "\b\2\2\u01bd\u01af\3\2\2\2\u01bd\u01ba\3\2\2\2\u01be\67\3\2\2\2\u01bf"+ + "\u01c0\5 \21\2\u01c0\u01c1\7\66\2\2\u01c1\u01c2\5 \21\2\u01c29\3\2\2\2"+ + "\u01c3\u01cc\7\t\2\2\u01c4\u01c9\5<\37\2\u01c5\u01c6\7\r\2\2\u01c6\u01c8"+ + "\5<\37\2\u01c7\u01c5\3\2\2\2\u01c8\u01cb\3\2\2\2\u01c9\u01c7\3\2\2\2\u01c9"+ + "\u01ca\3\2\2\2\u01ca\u01cd\3\2\2\2\u01cb\u01c9\3\2\2\2\u01cc\u01c4\3\2"+ + "\2\2\u01cc\u01cd\3\2\2\2\u01cd\u01ce\3\2\2\2\u01ce\u01cf\7\n\2\2\u01cf"+ + ";\3\2\2\2\u01d0\u01d4\5 \21\2\u01d1\u01d4\5> \2\u01d2\u01d4\5B\"\2\u01d3"+ + "\u01d0\3\2\2\2\u01d3\u01d1\3\2\2\2\u01d3\u01d2\3\2\2\2\u01d4=\3\2\2\2"+ + "\u01d5\u01e3\5@!\2\u01d6\u01df\7\t\2\2\u01d7\u01dc\5@!\2\u01d8\u01d9\7"+ + "\r\2\2\u01d9\u01db\5@!\2\u01da\u01d8\3\2\2\2\u01db\u01de\3\2\2\2\u01dc"+ + "\u01da\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd\u01e0\3\2\2\2\u01de\u01dc\3\2"+ + "\2\2\u01df\u01d7\3\2\2\2\u01df\u01e0\3\2\2\2\u01e0\u01e1\3\2\2\2\u01e1"+ + "\u01e3\7\n\2\2\u01e2\u01d5\3\2\2\2\u01e2\u01d6\3\2\2\2\u01e3\u01e4\3\2"+ + "\2\2\u01e4\u01e7\79\2\2\u01e5\u01e8\5\20\t\2\u01e6\u01e8\5 \21\2\u01e7"+ + "\u01e5\3\2\2\2\u01e7\u01e6\3\2\2\2\u01e8?\3\2\2\2\u01e9\u01eb\5\32\16"+ + "\2\u01ea\u01e9\3\2\2\2\u01ea\u01eb\3\2\2\2\u01eb\u01ec\3\2\2\2\u01ec\u01ed"+ + "\7T\2\2\u01edA\3\2\2\2\u01ee\u01ef\7S\2\2\u01ef\u01f0\78\2\2\u01f0\u01fc"+ + "\7T\2\2\u01f1\u01f2\5\32\16\2\u01f2\u01f3\78\2\2\u01f3\u01f4\7\30\2\2"+ + "\u01f4\u01fc\3\2\2\2\u01f5\u01f6\7T\2\2\u01f6\u01f7\78\2\2\u01f7\u01fc"+ + "\7T\2\2\u01f8\u01f9\7\34\2\2\u01f9\u01fa\78\2\2\u01fa\u01fc\7T\2\2\u01fb"+ + "\u01ee\3\2\2\2\u01fb\u01f1\3\2\2\2\u01fb\u01f5\3\2\2\2\u01fb\u01f8\3\2"+ + "\2\2\u01fcC\3\2\2\2\65GMQcfnx\u0080\u0085\u0089\u008d\u0092\u00aa\u00ac"+ + "\u00bd\u00c1\u00c7\u00cb\u00d3\u00dd\u00e5\u00eb\u0127\u0129\u0139\u013f"+ + "\u0147\u014b\u015f\u0164\u0168\u017d\u0183\u0186\u0192\u0195\u019b\u019e"+ + "\u01a6\u01ad\u01b5\u01bd\u01c9\u01cc\u01d3\u01dc\u01df\u01e2\u01e7\u01ea"+ + "\u01fb"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java index 8c4741e672533..81e7166d9a9ae 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserBaseVisitor.java @@ -38,21 +38,21 @@ class PainlessParserBaseVisitor extends AbstractParseTreeVisitor implement *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitIf(PainlessParser.IfContext ctx) { return visitChildren(ctx); } + @Override public T visitStatement(PainlessParser.StatementContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitWhile(PainlessParser.WhileContext ctx) { return visitChildren(ctx); } + @Override public T visitIf(PainlessParser.IfContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitDo(PainlessParser.DoContext ctx) { return visitChildren(ctx); } + @Override public T visitWhile(PainlessParser.WhileContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * @@ -80,35 +80,42 @@ class PainlessParserBaseVisitor extends AbstractParseTreeVisitor implement *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitDecl(PainlessParser.DeclContext ctx) { return visitChildren(ctx); } + @Override public T visitTry(PainlessParser.TryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitContinue(PainlessParser.ContinueContext ctx) { return visitChildren(ctx); } + @Override public T visitDo(PainlessParser.DoContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitBreak(PainlessParser.BreakContext ctx) { return visitChildren(ctx); } + @Override public T visitDecl(PainlessParser.DeclContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitReturn(PainlessParser.ReturnContext ctx) { return visitChildren(ctx); } + @Override public T visitContinue(PainlessParser.ContinueContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitTry(PainlessParser.TryContext ctx) { return visitChildren(ctx); } + @Override public T visitBreak(PainlessParser.BreakContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitReturn(PainlessParser.ReturnContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * @@ -186,13 +193,6 @@ class PainlessParserBaseVisitor extends AbstractParseTreeVisitor implement * {@link #visitChildren} on {@code ctx}.

*/ @Override public T visitTrap(PainlessParser.TrapContext ctx) { return visitChildren(ctx); } - /** - * {@inheritDoc} - * - *

The default implementation returns the result of calling - * {@link #visitChildren} on {@code ctx}.

- */ - @Override public T visitDelimiter(PainlessParser.DelimiterContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java index 47bfd4a1d05b9..ec3e251f3e9ad 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParserVisitor.java @@ -29,92 +29,98 @@ interface PainlessParserVisitor extends ParseTreeVisitor { */ T visitParameters(PainlessParser.ParametersContext ctx); /** - * Visit a parse tree produced by the {@code if} - * labeled alternative in {@link PainlessParser#statement}. + * Visit a parse tree produced by {@link PainlessParser#statement}. * @param ctx the parse tree * @return the visitor result */ - T visitIf(PainlessParser.IfContext ctx); + T visitStatement(PainlessParser.StatementContext ctx); /** - * Visit a parse tree produced by the {@code while} - * labeled alternative in {@link PainlessParser#statement}. + * Visit a parse tree produced by the {@code if} + * labeled alternative in {@link PainlessParser#rstatement}. * @param ctx the parse tree * @return the visitor result */ - T visitWhile(PainlessParser.WhileContext ctx); + T visitIf(PainlessParser.IfContext ctx); /** - * Visit a parse tree produced by the {@code do} - * labeled alternative in {@link PainlessParser#statement}. + * Visit a parse tree produced by the {@code while} + * labeled alternative in {@link PainlessParser#rstatement}. * @param ctx the parse tree * @return the visitor result */ - T visitDo(PainlessParser.DoContext ctx); + T visitWhile(PainlessParser.WhileContext ctx); /** * Visit a parse tree produced by the {@code for} - * labeled alternative in {@link PainlessParser#statement}. + * labeled alternative in {@link PainlessParser#rstatement}. * @param ctx the parse tree * @return the visitor result */ T visitFor(PainlessParser.ForContext ctx); /** * Visit a parse tree produced by the {@code each} - * labeled alternative in {@link PainlessParser#statement}. + * labeled alternative in {@link PainlessParser#rstatement}. * @param ctx the parse tree * @return the visitor result */ T visitEach(PainlessParser.EachContext ctx); /** * Visit a parse tree produced by the {@code ineach} - * labeled alternative in {@link PainlessParser#statement}. + * labeled alternative in {@link PainlessParser#rstatement}. * @param ctx the parse tree * @return the visitor result */ T visitIneach(PainlessParser.IneachContext ctx); + /** + * Visit a parse tree produced by the {@code try} + * labeled alternative in {@link PainlessParser#rstatement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitTry(PainlessParser.TryContext ctx); + /** + * Visit a parse tree produced by the {@code do} + * labeled alternative in {@link PainlessParser#dstatement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitDo(PainlessParser.DoContext ctx); /** * Visit a parse tree produced by the {@code decl} - * labeled alternative in {@link PainlessParser#statement}. + * labeled alternative in {@link PainlessParser#dstatement}. * @param ctx the parse tree * @return the visitor result */ T visitDecl(PainlessParser.DeclContext ctx); /** * Visit a parse tree produced by the {@code continue} - * labeled alternative in {@link PainlessParser#statement}. + * labeled alternative in {@link PainlessParser#dstatement}. * @param ctx the parse tree * @return the visitor result */ T visitContinue(PainlessParser.ContinueContext ctx); /** * Visit a parse tree produced by the {@code break} - * labeled alternative in {@link PainlessParser#statement}. + * labeled alternative in {@link PainlessParser#dstatement}. * @param ctx the parse tree * @return the visitor result */ T visitBreak(PainlessParser.BreakContext ctx); /** * Visit a parse tree produced by the {@code return} - * labeled alternative in {@link PainlessParser#statement}. + * labeled alternative in {@link PainlessParser#dstatement}. * @param ctx the parse tree * @return the visitor result */ T visitReturn(PainlessParser.ReturnContext ctx); - /** - * Visit a parse tree produced by the {@code try} - * labeled alternative in {@link PainlessParser#statement}. - * @param ctx the parse tree - * @return the visitor result - */ - T visitTry(PainlessParser.TryContext ctx); /** * Visit a parse tree produced by the {@code throw} - * labeled alternative in {@link PainlessParser#statement}. + * labeled alternative in {@link PainlessParser#dstatement}. * @param ctx the parse tree * @return the visitor result */ T visitThrow(PainlessParser.ThrowContext ctx); /** * Visit a parse tree produced by the {@code expr} - * labeled alternative in {@link PainlessParser#statement}. + * labeled alternative in {@link PainlessParser#dstatement}. * @param ctx the parse tree * @return the visitor result */ @@ -173,12 +179,6 @@ interface PainlessParserVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitTrap(PainlessParser.TrapContext ctx); - /** - * Visit a parse tree produced by {@link PainlessParser#delimiter}. - * @param ctx the parse tree - * @return the visitor result - */ - T visitDelimiter(PainlessParser.DelimiterContext ctx); /** * Visit a parse tree produced by the {@code single} * labeled alternative in {@link PainlessParser#expression}. diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index a15f87966eae2..3ac6cb7fd37c4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -56,7 +56,6 @@ import org.elasticsearch.painless.antlr.PainlessParser.DeclarationContext; import org.elasticsearch.painless.antlr.PainlessParser.DecltypeContext; import org.elasticsearch.painless.antlr.PainlessParser.DeclvarContext; -import org.elasticsearch.painless.antlr.PainlessParser.DelimiterContext; import org.elasticsearch.painless.antlr.PainlessParser.DoContext; import org.elasticsearch.painless.antlr.PainlessParser.DynamicContext; import org.elasticsearch.painless.antlr.PainlessParser.EachContext; @@ -264,6 +263,10 @@ public ANode visitSource(SourceContext ctx) { statements.add((AStatement)visit(statement)); } + if (ctx.dstatement() != null) { + statements.add((AStatement)visit(ctx.dstatement())); + } + return new SSource(scriptClassInfo, settings, sourceName, sourceText, debugStream, (MainMethodReserved)reserved.pop(), location(ctx), functions, globals, statements); } @@ -290,6 +293,10 @@ public ANode visitFunction(FunctionContext ctx) { statements.add((AStatement)visit(statement)); } + if (ctx.block().dstatement() != null) { + statements.add((AStatement)visit(ctx.block().dstatement())); + } + return new SFunction((FunctionReserved)reserved.pop(), location(ctx), rtnType, name, paramTypes, paramNames, statements, false); } @@ -299,6 +306,17 @@ public ANode visitParameters(ParametersContext ctx) { throw location(ctx).createError(new IllegalStateException("Illegal tree structure.")); } + @Override + public ANode visitStatement(StatementContext ctx) { + if (ctx.rstatement() != null) { + return visit(ctx.rstatement()); + } else if (ctx.dstatement() != null) { + return visit(ctx.dstatement()); + } else { + throw location(ctx).createError(new IllegalStateException("Illegal tree structure.")); + } + } + @Override public ANode visitIf(IfContext ctx) { AExpression expression = (AExpression)visit(ctx.expression()); @@ -446,7 +464,7 @@ public ANode visitTrailer(TrailerContext ctx) { @Override public ANode visitBlock(BlockContext ctx) { - if (ctx.statement().isEmpty()) { + if (ctx.statement().isEmpty() && ctx.dstatement() == null) { return null; } else { List statements = new ArrayList<>(); @@ -455,6 +473,10 @@ public ANode visitBlock(BlockContext ctx) { statements.add((AStatement)visit(statement)); } + if (ctx.dstatement() != null) { + statements.add((AStatement)visit(ctx.dstatement())); + } + return new SBlock(location(ctx), statements); } } @@ -514,11 +536,6 @@ public ANode visitTrap(TrapContext ctx) { return new SCatch(location(ctx), type, name, block); } - @Override - public ANode visitDelimiter(DelimiterContext ctx) { - throw location(ctx).createError(new IllegalStateException("Illegal tree structure.")); - } - @Override public ANode visitSingle(SingleContext ctx) { return visit(ctx.unary()); @@ -1074,6 +1091,10 @@ public ANode visitLambda(LambdaContext ctx) { for (StatementContext statement : ctx.block().statement()) { statements.add((AStatement)visit(statement)); } + + if (ctx.block().dstatement() != null) { + statements.add((AStatement)visit(ctx.block().dstatement())); + } } FunctionReserved lambdaReserved = (FunctionReserved)reserved.pop(); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java new file mode 100644 index 0000000000000..488ae0e1643bc --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteRequestTests.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.painless; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; +import java.util.Collections; + +public class PainlessExecuteRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected PainlessExecuteAction.Request createTestInstance() { + Script script = new Script(randomAlphaOfLength(10)); + PainlessExecuteAction.Request.SupportedContext context = randomBoolean() ? + PainlessExecuteAction.Request.SupportedContext.PAINLESS_TEST : null; + return new PainlessExecuteAction.Request(script, context); + } + + @Override + protected PainlessExecuteAction.Request createBlankInstance() { + return new PainlessExecuteAction.Request(); + } + + @Override + protected PainlessExecuteAction.Request doParseInstance(XContentParser parser) throws IOException { + return PainlessExecuteAction.Request.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testValidate() { + Script script = new Script(ScriptType.STORED, null, randomAlphaOfLength(10), Collections.emptyMap()); + PainlessExecuteAction.Request request = new PainlessExecuteAction.Request(script, null); + Exception e = request.validate(); + assertNotNull(e); + assertEquals("Validation Failed: 1: only inline scripts are supported;", e.getMessage()); + } +} diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TestRatingEnum.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteResponseTests.java similarity index 60% rename from modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TestRatingEnum.java rename to modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteResponseTests.java index ea44c215d9214..20f3cf08e04c8 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/TestRatingEnum.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessExecuteResponseTests.java @@ -16,9 +16,19 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.painless; -package org.elasticsearch.index.rankeval; +import org.elasticsearch.test.AbstractStreamableTestCase; -enum TestRatingEnum { - IRRELEVANT, RELEVANT; -} \ No newline at end of file +public class PainlessExecuteResponseTests extends AbstractStreamableTestCase { + + @Override + protected PainlessExecuteAction.Response createBlankInstance() { + return new PainlessExecuteAction.Response(); + } + + @Override + protected PainlessExecuteAction.Response createTestInstance() { + return new PainlessExecuteAction.Response(randomAlphaOfLength(10)); + } +} diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java index 0a66b67a2e8ac..911a50468cc17 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java @@ -278,6 +278,6 @@ public void testBogusRegexFlag() { IllegalArgumentException e = expectScriptThrows(IllegalArgumentException.class, () -> { exec("/asdf/b", false); // Not picky so we get a non-assertion error }); - assertEquals("unexpected token ['b'] was expecting one of [{, ';'}].", e.getMessage()); + assertEquals("invalid sequence of tokens near ['b'].", e.getMessage()); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index d60da7b795fbc..1bb754db84745 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -256,7 +256,7 @@ public void testRCurlyNotDelim() { // We don't want PICKY here so we get the normal error message exec("def i = 1} return 1", emptyMap(), emptyMap(), null, false); }); - assertEquals("unexpected token ['}'] was expecting one of [].", e.getMessage()); + assertEquals("invalid sequence of tokens near ['}'].", e.getMessage()); } public void testBadBoxingCast() { diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml index ce8c03afec607..ede2927b992e0 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -3,6 +3,8 @@ setup: indices.create: index: test body: + settings: + number_of_shards: 1 mappings: test: properties: diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml new file mode 100644 index 0000000000000..7b915cc38dbc0 --- /dev/null +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_execute_painless_scripts.yml @@ -0,0 +1,25 @@ +--- +"Execute with defaults": + - do: + scripts_painless_execute: + body: + script: + source: "params.count / params.total" + params: + count: 100.0 + total: 1000.0 + - match: { result: "0.1" } + +--- +"Execute with execute_api_script context": + - do: + scripts_painless_execute: + body: + script: + source: "params.var1 - params.var2" + params: + var1: 10 + var2: 100 + context: + painless_test: {} + - match: { result: "-90" } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java index 0f51f6d5d6369..eb20dc8c680f9 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java @@ -128,7 +128,7 @@ public EvalQueryQuality evaluate(String taskId, SearchHit[] hits, double reciprocalRank = (firstRelevant == -1) ? 0 : 1.0d / firstRelevant; EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, reciprocalRank); - evalQueryQuality.setMetricDetails(new Breakdown(firstRelevant)); + evalQueryQuality.setMetricDetails(new Detail(firstRelevant)); evalQueryQuality.addHitsAndRatings(ratedHits); return evalQueryQuality; } @@ -181,16 +181,16 @@ public final int hashCode() { return Objects.hash(relevantRatingThreshhold, k); } - static class Breakdown implements MetricDetail { + public static final class Detail implements MetricDetail { private final int firstRelevantRank; private static ParseField FIRST_RELEVANT_RANK_FIELD = new ParseField("first_relevant"); - Breakdown(int firstRelevantRank) { + Detail(int firstRelevantRank) { this.firstRelevantRank = firstRelevantRank; } - Breakdown(StreamInput in) throws IOException { + Detail(StreamInput in) throws IOException { this.firstRelevantRank = in.readVInt(); } @@ -206,15 +206,15 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) return builder.field(FIRST_RELEVANT_RANK_FIELD.getPreferredName(), firstRelevantRank); } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { - return new Breakdown((Integer) args[0]); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { + return new Detail((Integer) args[0]); }); static { PARSER.declareInt(constructorArg(), FIRST_RELEVANT_RANK_FIELD); } - public static Breakdown fromXContent(XContentParser parser) { + public static Detail fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } @@ -232,24 +232,24 @@ public String getWriteableName() { * the ranking of the first relevant document, or -1 if no relevant document was * found */ - int getFirstRelevantRank() { + public int getFirstRelevantRank() { return firstRelevantRank; } @Override - public final boolean equals(Object obj) { + public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } - MeanReciprocalRank.Breakdown other = (MeanReciprocalRank.Breakdown) obj; + MeanReciprocalRank.Detail other = (MeanReciprocalRank.Detail) obj; return Objects.equals(firstRelevantRank, other.firstRelevantRank); } @Override - public final int hashCode() { + public int hashCode() { return Objects.hash(firstRelevantRank); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/PrecisionAtK.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/PrecisionAtK.java index 15d955935eeff..136158ea5cba7 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/PrecisionAtK.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/PrecisionAtK.java @@ -181,7 +181,7 @@ public EvalQueryQuality evaluate(String taskId, SearchHit[] hits, } EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, precision); evalQueryQuality.setMetricDetails( - new PrecisionAtK.Breakdown(truePositives, truePositives + falsePositives)); + new PrecisionAtK.Detail(truePositives, truePositives + falsePositives)); evalQueryQuality.addHitsAndRatings(ratedSearchHits); return evalQueryQuality; } @@ -217,19 +217,19 @@ public final int hashCode() { return Objects.hash(relevantRatingThreshhold, ignoreUnlabeled, k); } - static class Breakdown implements MetricDetail { + public static final class Detail implements MetricDetail { private static final ParseField DOCS_RETRIEVED_FIELD = new ParseField("docs_retrieved"); private static final ParseField RELEVANT_DOCS_RETRIEVED_FIELD = new ParseField("relevant_docs_retrieved"); private int relevantRetrieved; private int retrieved; - Breakdown(int relevantRetrieved, int retrieved) { + Detail(int relevantRetrieved, int retrieved) { this.relevantRetrieved = relevantRetrieved; this.retrieved = retrieved; } - Breakdown(StreamInput in) throws IOException { + Detail(StreamInput in) throws IOException { this.relevantRetrieved = in.readVInt(); this.retrieved = in.readVInt(); } @@ -242,8 +242,8 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) return builder; } - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { - return new Breakdown((Integer) args[0], (Integer) args[1]); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { + return new Detail((Integer) args[0], (Integer) args[1]); }); static { @@ -251,7 +251,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) PARSER.declareInt(constructorArg(), DOCS_RETRIEVED_FIELD); } - public static Breakdown fromXContent(XContentParser parser) { + public static Detail fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } @@ -266,29 +266,29 @@ public String getWriteableName() { return NAME; } - int getRelevantRetrieved() { + public int getRelevantRetrieved() { return relevantRetrieved; } - int getRetrieved() { + public int getRetrieved() { return retrieved; } @Override - public final boolean equals(Object obj) { + public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } - PrecisionAtK.Breakdown other = (PrecisionAtK.Breakdown) obj; + PrecisionAtK.Detail other = (PrecisionAtK.Detail) obj; return Objects.equals(relevantRetrieved, other.relevantRetrieved) && Objects.equals(retrieved, other.retrieved); } @Override - public final int hashCode() { + public int hashCode() { return Objects.hash(relevantRetrieved, retrieved); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java index 54d68774a016e..c5785ca3847d4 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java @@ -38,9 +38,9 @@ public List getNamedXContentParsers() { namedXContent.add(new NamedXContentRegistry.Entry(EvaluationMetric.class, new ParseField(DiscountedCumulativeGain.NAME), DiscountedCumulativeGain::fromXContent)); namedXContent.add(new NamedXContentRegistry.Entry(MetricDetail.class, new ParseField(PrecisionAtK.NAME), - PrecisionAtK.Breakdown::fromXContent)); + PrecisionAtK.Detail::fromXContent)); namedXContent.add(new NamedXContentRegistry.Entry(MetricDetail.class, new ParseField(MeanReciprocalRank.NAME), - MeanReciprocalRank.Breakdown::fromXContent)); + MeanReciprocalRank.Detail::fromXContent)); return namedXContent; } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java index d4ccd7c2180fe..884cf3bafdcda 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java @@ -60,9 +60,9 @@ public List getNamedWriteables() { namedWriteables.add(new NamedWriteableRegistry.Entry(EvaluationMetric.class, MeanReciprocalRank.NAME, MeanReciprocalRank::new)); namedWriteables.add( new NamedWriteableRegistry.Entry(EvaluationMetric.class, DiscountedCumulativeGain.NAME, DiscountedCumulativeGain::new)); - namedWriteables.add(new NamedWriteableRegistry.Entry(MetricDetail.class, PrecisionAtK.NAME, PrecisionAtK.Breakdown::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(MetricDetail.class, PrecisionAtK.NAME, PrecisionAtK.Detail::new)); namedWriteables - .add(new NamedWriteableRegistry.Entry(MetricDetail.class, MeanReciprocalRank.NAME, MeanReciprocalRank.Breakdown::new)); + .add(new NamedWriteableRegistry.Entry(MetricDetail.class, MeanReciprocalRank.NAME, MeanReciprocalRank.Detail::new)); return namedWriteables; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java index 22c3542c0fab4..ba03a734ec760 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -253,7 +253,7 @@ private void assertParsedCorrect(String xContent, Integer expectedUnknownDocRati public static DiscountedCumulativeGain createTestItem() { boolean normalize = randomBoolean(); - Integer unknownDocRating = new Integer(randomIntBetween(0, 1000)); + Integer unknownDocRating = Integer.valueOf(randomIntBetween(0, 1000)); return new DiscountedCumulativeGain(normalize, unknownDocRating, 10); } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java index df6de75ba2cb4..112cf4eaaf72e 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java @@ -69,9 +69,9 @@ public static EvalQueryQuality randomEvalQueryQuality() { randomDoubleBetween(0.0, 1.0, true)); if (randomBoolean()) { if (randomBoolean()) { - evalQueryQuality.setMetricDetails(new PrecisionAtK.Breakdown(randomIntBetween(0, 1000), randomIntBetween(0, 1000))); + evalQueryQuality.setMetricDetails(new PrecisionAtK.Detail(randomIntBetween(0, 1000), randomIntBetween(0, 1000))); } else { - evalQueryQuality.setMetricDetails(new MeanReciprocalRank.Breakdown(randomIntBetween(0, 1000))); + evalQueryQuality.setMetricDetails(new MeanReciprocalRank.Detail(randomIntBetween(0, 1000))); } } evalQueryQuality.addHitsAndRatings(ratedHits); @@ -137,7 +137,7 @@ private static EvalQueryQuality mutateTestItem(EvalQueryQuality original) { break; case 2: if (metricDetails == null) { - metricDetails = new PrecisionAtK.Breakdown(1, 5); + metricDetails = new PrecisionAtK.Detail(1, 5); } else { metricDetails = null; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java index 6604dbc74a065..c9ff39bbd118a 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java @@ -46,6 +46,9 @@ public class MeanReciprocalRankTests extends ESTestCase { + private static final int IRRELEVANT_RATING_0 = 0; + private static final int RELEVANT_RATING_1 = 1; + public void testParseFromXContent() throws IOException { String xContent = "{ }"; try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { @@ -84,16 +87,16 @@ public void testMaxAcceptableRank() { int relevantAt = randomIntBetween(0, searchHits); for (int i = 0; i <= searchHits; i++) { if (i == relevantAt) { - ratedDocs.add(new RatedDocument("test", Integer.toString(i), TestRatingEnum.RELEVANT.ordinal())); + ratedDocs.add(new RatedDocument("test", Integer.toString(i), RELEVANT_RATING_1)); } else { - ratedDocs.add(new RatedDocument("test", Integer.toString(i), TestRatingEnum.IRRELEVANT.ordinal())); + ratedDocs.add(new RatedDocument("test", Integer.toString(i), IRRELEVANT_RATING_0)); } } int rankAtFirstRelevant = relevantAt + 1; EvalQueryQuality evaluation = reciprocalRank.evaluate("id", hits, ratedDocs); assertEquals(1.0 / rankAtFirstRelevant, evaluation.getQualityLevel(), Double.MIN_VALUE); - assertEquals(rankAtFirstRelevant, ((MeanReciprocalRank.Breakdown) evaluation.getMetricDetails()).getFirstRelevantRank()); + assertEquals(rankAtFirstRelevant, ((MeanReciprocalRank.Detail) evaluation.getMetricDetails()).getFirstRelevantRank()); // check that if we have fewer search hits than relevant doc position, // we don't find any result and get 0.0 quality level @@ -110,15 +113,15 @@ public void testEvaluationOneRelevantInResults() { int relevantAt = randomIntBetween(0, 9); for (int i = 0; i <= 20; i++) { if (i == relevantAt) { - ratedDocs.add(new RatedDocument("test", Integer.toString(i), TestRatingEnum.RELEVANT.ordinal())); + ratedDocs.add(new RatedDocument("test", Integer.toString(i), RELEVANT_RATING_1)); } else { - ratedDocs.add(new RatedDocument("test", Integer.toString(i), TestRatingEnum.IRRELEVANT.ordinal())); + ratedDocs.add(new RatedDocument("test", Integer.toString(i), IRRELEVANT_RATING_0)); } } EvalQueryQuality evaluation = reciprocalRank.evaluate("id", hits, ratedDocs); assertEquals(1.0 / (relevantAt + 1), evaluation.getQualityLevel(), Double.MIN_VALUE); - assertEquals(relevantAt + 1, ((MeanReciprocalRank.Breakdown) evaluation.getMetricDetails()).getFirstRelevantRank()); + assertEquals(relevantAt + 1, ((MeanReciprocalRank.Detail) evaluation.getMetricDetails()).getFirstRelevantRank()); } /** @@ -138,7 +141,7 @@ public void testPrecisionAtFiveRelevanceThreshold() { MeanReciprocalRank reciprocalRank = new MeanReciprocalRank(2, 10); EvalQueryQuality evaluation = reciprocalRank.evaluate("id", hits, rated); assertEquals((double) 1 / 3, evaluation.getQualityLevel(), 0.00001); - assertEquals(3, ((MeanReciprocalRank.Breakdown) evaluation.getMetricDetails()).getFirstRelevantRank()); + assertEquals(3, ((MeanReciprocalRank.Detail) evaluation.getMetricDetails()).getFirstRelevantRank()); } public void testCombine() { @@ -162,7 +165,7 @@ public void testNoResults() throws Exception { SearchHit[] hits = new SearchHit[0]; EvalQueryQuality evaluated = (new MeanReciprocalRank()).evaluate("id", hits, Collections.emptyList()); assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); - assertEquals(-1, ((MeanReciprocalRank.Breakdown) evaluated.getMetricDetails()).getFirstRelevantRank()); + assertEquals(-1, ((MeanReciprocalRank.Detail) evaluated.getMetricDetails()).getFirstRelevantRank()); } public void testXContentRoundtrip() throws IOException { diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java index aa3dd5a0b7e32..3efff57920b84 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java @@ -46,26 +46,29 @@ public class PrecisionAtKTests extends ESTestCase { + private static final int IRRELEVANT_RATING_0 = 0; + private static final int RELEVANT_RATING_1 = 1; + public void testPrecisionAtFiveCalculation() { List rated = new ArrayList<>(); - rated.add(createRatedDoc("test", "0", TestRatingEnum.RELEVANT.ordinal())); + rated.add(createRatedDoc("test", "0", RELEVANT_RATING_1)); EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", toSearchHits(rated, "test"), rated); assertEquals(1, evaluated.getQualityLevel(), 0.00001); - assertEquals(1, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(1, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(1, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(1, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } public void testPrecisionAtFiveIgnoreOneResult() { List rated = new ArrayList<>(); - rated.add(createRatedDoc("test", "0", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "1", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "2", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "3", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "4", TestRatingEnum.IRRELEVANT.ordinal())); + rated.add(createRatedDoc("test", "0", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "1", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "2", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "3", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "4", IRRELEVANT_RATING_0)); EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", toSearchHits(rated, "test"), rated); assertEquals((double) 4 / 5, evaluated.getQualityLevel(), 0.00001); - assertEquals(4, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(5, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(4, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(5, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } /** @@ -83,28 +86,28 @@ public void testPrecisionAtFiveRelevanceThreshold() { PrecisionAtK precisionAtN = new PrecisionAtK(2, false, 5); EvalQueryQuality evaluated = precisionAtN.evaluate("id", toSearchHits(rated, "test"), rated); assertEquals((double) 3 / 5, evaluated.getQualityLevel(), 0.00001); - assertEquals(3, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(5, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(3, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(5, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } public void testPrecisionAtFiveCorrectIndex() { List rated = new ArrayList<>(); - rated.add(createRatedDoc("test_other", "0", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test_other", "1", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "0", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "1", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "2", TestRatingEnum.IRRELEVANT.ordinal())); + rated.add(createRatedDoc("test_other", "0", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test_other", "1", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "0", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "1", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "2", IRRELEVANT_RATING_0)); // the following search hits contain only the last three documents EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", toSearchHits(rated.subList(2, 5), "test"), rated); assertEquals((double) 2 / 3, evaluated.getQualityLevel(), 0.00001); - assertEquals(2, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(3, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(2, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(3, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } public void testIgnoreUnlabeled() { List rated = new ArrayList<>(); - rated.add(createRatedDoc("test", "0", TestRatingEnum.RELEVANT.ordinal())); - rated.add(createRatedDoc("test", "1", TestRatingEnum.RELEVANT.ordinal())); + rated.add(createRatedDoc("test", "0", RELEVANT_RATING_1)); + rated.add(createRatedDoc("test", "1", RELEVANT_RATING_1)); // add an unlabeled search hit SearchHit[] searchHits = Arrays.copyOf(toSearchHits(rated, "test"), 3); searchHits[2] = new SearchHit(2, "2", new Text("testtype"), Collections.emptyMap()); @@ -112,15 +115,15 @@ public void testIgnoreUnlabeled() { EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", searchHits, rated); assertEquals((double) 2 / 3, evaluated.getQualityLevel(), 0.00001); - assertEquals(2, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(3, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(2, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(3, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); // also try with setting `ignore_unlabeled` PrecisionAtK prec = new PrecisionAtK(1, true, 10); evaluated = prec.evaluate("id", searchHits, rated); assertEquals((double) 2 / 2, evaluated.getQualityLevel(), 0.00001); - assertEquals(2, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(2, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(2, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(2, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } public void testNoRatedDocs() throws Exception { @@ -131,23 +134,23 @@ public void testNoRatedDocs() throws Exception { } EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", hits, Collections.emptyList()); assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); - assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(5, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(0, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(5, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); // also try with setting `ignore_unlabeled` PrecisionAtK prec = new PrecisionAtK(1, true, 10); evaluated = prec.evaluate("id", hits, Collections.emptyList()); assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); - assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(0, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(0, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } public void testNoResults() throws Exception { SearchHit[] hits = new SearchHit[0]; EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", hits, Collections.emptyList()); assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); - assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); - assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + assertEquals(0, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(0, ((PrecisionAtK.Detail) evaluated.getMetricDetails()).getRetrieved()); } public void testParseFromXContent() throws IOException { diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java index dc0bbddeb62b1..b55c57bae2bcf 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java @@ -20,12 +20,13 @@ package org.elasticsearch.index.rankeval; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.rankeval.PrecisionAtK.Breakdown; +import org.elasticsearch.index.rankeval.PrecisionAtK.Detail; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -40,9 +41,15 @@ import java.util.Set; import static org.elasticsearch.index.rankeval.EvaluationMetric.filterUnknownDocuments; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.instanceOf; public class RankEvalRequestIT extends ESIntegTestCase { + + private static final String TEST_INDEX = "test"; + private static final String INDEX_ALIAS = "alias0"; + private static final int RELEVANT_RATING_1 = 1; + @Override protected Collection> transportClientPlugins() { return Arrays.asList(RankEvalPlugin.class); @@ -55,20 +62,23 @@ protected Collection> nodePlugins() { @Before public void setup() { - createIndex("test"); + createIndex(TEST_INDEX); ensureGreen(); - client().prepareIndex("test", "testtype").setId("1") + client().prepareIndex(TEST_INDEX, "testtype").setId("1") .setSource("text", "berlin", "title", "Berlin, Germany", "population", 3670622).get(); - client().prepareIndex("test", "testtype").setId("2").setSource("text", "amsterdam", "population", 851573).get(); - client().prepareIndex("test", "testtype").setId("3").setSource("text", "amsterdam", "population", 851573).get(); - client().prepareIndex("test", "testtype").setId("4").setSource("text", "amsterdam", "population", 851573).get(); - client().prepareIndex("test", "testtype").setId("5").setSource("text", "amsterdam", "population", 851573).get(); - client().prepareIndex("test", "testtype").setId("6").setSource("text", "amsterdam", "population", 851573).get(); + client().prepareIndex(TEST_INDEX, "testtype").setId("2").setSource("text", "amsterdam", "population", 851573).get(); + client().prepareIndex(TEST_INDEX, "testtype").setId("3").setSource("text", "amsterdam", "population", 851573).get(); + client().prepareIndex(TEST_INDEX, "testtype").setId("4").setSource("text", "amsterdam", "population", 851573).get(); + client().prepareIndex(TEST_INDEX, "testtype").setId("5").setSource("text", "amsterdam", "population", 851573).get(); + client().prepareIndex(TEST_INDEX, "testtype").setId("6").setSource("text", "amsterdam", "population", 851573).get(); // add another index for testing closed indices etc... client().prepareIndex("test2", "testtype").setId("7").setSource("text", "amsterdam", "population", 851573).get(); refresh(); + + // set up an alias that can also be used in tests + assertAcked(client().admin().indices().prepareAliases().addAliasAction(AliasActions.add().index(TEST_INDEX).alias(INDEX_ALIAS))); } /** @@ -98,7 +108,8 @@ public void testPrecisionAtRequest() { RankEvalAction.INSTANCE, new RankEvalRequest()); builder.setRankEvalSpec(task); - RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().indices("test")) + String indexToUse = randomBoolean() ? TEST_INDEX : INDEX_ALIAS; + RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().indices(indexToUse)) .actionGet(); // the expected Prec@ for the first query is 4/6 and the expected Prec@ for the // second is 1/6, divided by 2 to get the average @@ -117,7 +128,7 @@ public void testPrecisionAtRequest() { if (id.equals("1") || id.equals("6")) { assertFalse(hit.getRating().isPresent()); } else { - assertEquals(TestRatingEnum.RELEVANT.ordinal(), hit.getRating().get().intValue()); + assertEquals(RELEVANT_RATING_1, hit.getRating().get().intValue()); } } } @@ -128,7 +139,7 @@ public void testPrecisionAtRequest() { for (RatedSearchHit hit : hitsAndRatings) { String id = hit.getSearchHit().getId(); if (id.equals("1")) { - assertEquals(TestRatingEnum.RELEVANT.ordinal(), hit.getRating().get().intValue()); + assertEquals(RELEVANT_RATING_1, hit.getRating().get().intValue()); } else { assertFalse(hit.getRating().isPresent()); } @@ -140,7 +151,7 @@ public void testPrecisionAtRequest() { metric = new PrecisionAtK(1, false, 3); task = new RankEvalSpec(specifications, metric); - builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { "test" })); + builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { TEST_INDEX })); response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); // if we look only at top 3 documente, the expected P@3 for the first query is @@ -160,19 +171,19 @@ public void testDCGRequest() { List specifications = new ArrayList<>(); List ratedDocs = Arrays.asList( - new RatedDocument("test", "1", 3), - new RatedDocument("test", "2", 2), - new RatedDocument("test", "3", 3), - new RatedDocument("test", "4", 0), - new RatedDocument("test", "5", 1), - new RatedDocument("test", "6", 2)); + new RatedDocument(TEST_INDEX, "1", 3), + new RatedDocument(TEST_INDEX, "2", 2), + new RatedDocument(TEST_INDEX, "3", 3), + new RatedDocument(TEST_INDEX, "4", 0), + new RatedDocument(TEST_INDEX, "5", 1), + new RatedDocument(TEST_INDEX, "6", 2)); specifications.add(new RatedRequest("amsterdam_query", ratedDocs, testQuery)); DiscountedCumulativeGain metric = new DiscountedCumulativeGain(false, null, 10); RankEvalSpec task = new RankEvalSpec(specifications, metric); RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, - new RankEvalRequest(task, new String[] { "test" })); + new RankEvalRequest(task, new String[] { TEST_INDEX })); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); assertEquals(DiscountedCumulativeGainTests.EXPECTED_DCG, response.getEvaluationResult(), 10E-14); @@ -181,7 +192,7 @@ public void testDCGRequest() { metric = new DiscountedCumulativeGain(false, null, 3); task = new RankEvalSpec(specifications, metric); - builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { "test" })); + builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { TEST_INDEX })); response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); assertEquals(12.39278926071437, response.getEvaluationResult(), 10E-14); @@ -200,7 +211,7 @@ public void testMRRRequest() { RankEvalSpec task = new RankEvalSpec(specifications, metric); RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, - new RankEvalRequest(task, new String[] { "test" })); + new RankEvalRequest(task, new String[] { TEST_INDEX })); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); // the expected reciprocal rank for the amsterdam_query is 1/5 @@ -213,7 +224,7 @@ public void testMRRRequest() { metric = new MeanReciprocalRank(1, 3); task = new RankEvalSpec(specifications, metric); - builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { "test" })); + builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest(task, new String[] { TEST_INDEX })); response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); // limiting to top 3 results, the amsterdam_query has no relevant document in it @@ -244,7 +255,7 @@ public void testBadQuery() { RankEvalSpec task = new RankEvalSpec(specifications, new PrecisionAtK()); RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, - new RankEvalRequest(task, new String[] { "test" })); + new RankEvalRequest(task, new String[] { TEST_INDEX })); builder.setRankEvalSpec(task); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request()).actionGet(); @@ -259,16 +270,16 @@ public void testBadQuery() { public void testIndicesOptions() { SearchSourceBuilder amsterdamQuery = new SearchSourceBuilder().query(new MatchAllQueryBuilder()); List relevantDocs = createRelevant("2", "3", "4", "5", "6"); - relevantDocs.add(new RatedDocument("test2", "7", TestRatingEnum.RELEVANT.ordinal())); + relevantDocs.add(new RatedDocument("test2", "7", RELEVANT_RATING_1)); List specifications = new ArrayList<>(); specifications.add(new RatedRequest("amsterdam_query", relevantDocs, amsterdamQuery)); RankEvalSpec task = new RankEvalSpec(specifications, new PrecisionAtK()); - RankEvalRequest request = new RankEvalRequest(task, new String[] { "test", "test2" }); + RankEvalRequest request = new RankEvalRequest(task, new String[] { TEST_INDEX, "test2" }); request.setRankEvalSpec(task); RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); - Breakdown details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + Detail details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(7, details.getRetrieved()); assertEquals(6, details.getRelevantRetrieved()); @@ -277,7 +288,7 @@ public void testIndicesOptions() { request.indicesOptions(IndicesOptions.fromParameters(null, "true", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); - details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(6, details.getRetrieved()); assertEquals(5, details.getRelevantRetrieved()); @@ -292,12 +303,12 @@ public void testIndicesOptions() { request = new RankEvalRequest(task, new String[] { "tes*" }); request.indicesOptions(IndicesOptions.fromParameters("none", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); - details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(0, details.getRetrieved()); request.indicesOptions(IndicesOptions.fromParameters("open", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); - details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(6, details.getRetrieved()); assertEquals(5, details.getRelevantRetrieved()); @@ -310,7 +321,7 @@ public void testIndicesOptions() { request = new RankEvalRequest(task, new String[] { "bad*" }); request.indicesOptions(IndicesOptions.fromParameters(null, null, "true", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); - details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(0, details.getRetrieved()); request.indicesOptions(IndicesOptions.fromParameters(null, null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); @@ -322,7 +333,7 @@ public void testIndicesOptions() { private static List createRelevant(String... docs) { List relevant = new ArrayList<>(); for (String doc : docs) { - relevant.add(new RatedDocument("test", doc, TestRatingEnum.RELEVANT.ordinal())); + relevant.add(new RatedDocument("test", doc, RELEVANT_RATING_1)); } return relevant; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java index b49811a9bcaec..e0899b451af11 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java @@ -52,7 +52,6 @@ import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.startsWith; public class RankEvalSpecTests extends ESTestCase { diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java index ad962178f581f..196b50b7f6163 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.rankeval; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; @@ -54,7 +52,6 @@ import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.startsWith; public class RatedRequestsTests extends ESTestCase { @@ -139,8 +136,8 @@ public void testXContentParsingIsNotLenient() throws IOException { Exception exception = expectThrows(Exception.class, () -> RatedRequest.fromXContent(parser)); if (exception instanceof XContentParseException) { XContentParseException xcpe = (XContentParseException) exception; - assertThat(ExceptionsHelper.detailedMessage(xcpe), containsString("unknown field")); - assertThat(ExceptionsHelper.detailedMessage(xcpe), containsString("parser not found")); + assertThat(xcpe.getCause().getMessage(), containsString("unknown field")); + assertThat(xcpe.getCause().getMessage(), containsString("parser not found")); } if (exception instanceof XContentParseException) { assertThat(exception.getMessage(), containsString("[request] failed to parse field")); diff --git a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml index fcf5f945a06ae..3900b1f32baa7 100644 --- a/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml +++ b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/10_basic.yml @@ -1,10 +1,4 @@ ---- -"Response format": - - - skip: - version: " - 6.2.99" - reason: response format was updated in 6.3 - +setup: - do: indices.create: index: foo @@ -43,8 +37,21 @@ - do: indices.refresh: {} + - do: + indices.put_alias: + index: foo + name: alias + +--- +"Response format": + + - skip: + version: " - 6.2.99" + reason: response format was updated in 6.3 + - do: rank_eval: + index: foo, body: { "requests" : [ { @@ -84,52 +91,43 @@ - match: { details.berlin_query.hits.0.hit._id: "doc1" } - match: { details.berlin_query.hits.0.rating: 1} - match: { details.berlin_query.hits.1.hit._id: "doc4" } - - is_false: details.berlin_query.hits.1.rating + - is_false: details.berlin_query.hits.1.rating --- -"Mean Reciprocal Rank": - - - skip: - version: " - 6.2.99" - reason: response format was updated in 6.3 +"Alias resolution": - do: - indices.create: - index: foo - body: - settings: - index: - number_of_shards: 1 - - do: - index: - index: foo - type: bar - id: doc1 - body: { "text": "berlin" } + rank_eval: + index: alias + body: { + "requests" : [ + { + "id": "amsterdam_query", + "request": { "query": { "match" : {"text" : "amsterdam" }}}, + "ratings": [ + {"_index": "foo", "_id": "doc1", "rating": 0}, + {"_index": "foo", "_id": "doc2", "rating": 1}, + {"_index": "foo", "_id": "doc3", "rating": 1}] + }, + { + "id" : "berlin_query", + "request": { "query": { "match" : { "text" : "berlin" } }, "size" : 10 }, + "ratings": [{"_index": "foo", "_id": "doc1", "rating": 1}] + } + ], + "metric" : { "precision": { "ignore_unlabeled" : true }} + } - - do: - index: - index: foo - type: bar - id: doc2 - body: { "text": "amsterdam" } + - match: { quality_level: 1} + - match: { details.amsterdam_query.quality_level: 1.0} + - match: { details.berlin_query.quality_level: 1.0} - - do: - index: - index: foo - type: bar - id: doc3 - body: { "text": "amsterdam" } - - - do: - index: - index: foo - type: bar - id: doc4 - body: { "text": "something about amsterdam and berlin" } +--- +"Mean Reciprocal Rank": - - do: - indices.refresh: {} + - skip: + version: " - 6.2.99" + reason: response format was updated in 6.3 - do: rank_eval: diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 479fe78cc8071..f34f4cf52e09c 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -17,6 +17,10 @@ * under the License. */ +import org.apache.tools.ant.taskdefs.condition.Os + +import static org.elasticsearch.gradle.BuildPlugin.getJavaHome + apply plugin: 'elasticsearch.test-with-dependencies' esplugin { @@ -60,3 +64,61 @@ thirdPartyAudit.excludes = [ 'org.apache.log.Hierarchy', 'org.apache.log.Logger', ] + +// Support for testing reindex-from-remote against old Elaticsearch versions +configurations { + oldesFixture + es2 + es1 + es090 +} + +dependencies { + oldesFixture project(':test:fixtures:old-elasticsearch') + /* Right now we just test against the latest version of each major we expect + * reindex-from-remote to work against. We could randomize the versions but + * that doesn't seem worth it at this point. */ + es2 'org.elasticsearch.distribution.zip:elasticsearch:2.4.5@zip' + es1 'org.elasticsearch:elasticsearch:1.7.6@zip' + es090 'org.elasticsearch:elasticsearch:0.90.13@zip' +} + +if (Os.isFamily(Os.FAMILY_WINDOWS)) { + // we can't get the pid files in windows so we skip reindex-from-old + integTestRunner.systemProperty "tests.fromOld", "false" +} else { + integTestRunner.systemProperty "tests.fromOld", "true" + /* Set up tasks to unzip and run the old versions of ES before running the + * integration tests. */ + for (String version : ['2', '1', '090']) { + Task unzip = task("unzipEs${version}", type: Sync) { + Configuration oldEsDependency = configurations['es' + version] + dependsOn oldEsDependency + /* Use a closure here to delay resolution of the dependency until we need + * it */ + from { + oldEsDependency.collect { zipTree(it) } + } + into temporaryDir + } + Task fixture = task("oldEs${version}Fixture", + type: org.elasticsearch.gradle.test.AntFixture) { + dependsOn project.configurations.oldesFixture + dependsOn unzip + executable = new File(project.runtimeJavaHome, 'bin/java') + env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" + env 'JAVA_HOME', getJavaHome(it, 7) + args 'oldes.OldElasticsearch', + baseDir, + unzip.temporaryDir, + version == '090' + } + integTest.dependsOn fixture + integTestRunner { + /* Use a closure on the string to delay evaluation until right before we + * run the integration tests so that we can be sure that the file is + * ready. */ + systemProperty "es${version}.port", "${ -> fixture.addressAndPort }" + } + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index da0dbf2aae345..131c959af8afc 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -158,10 +158,10 @@ private void testCase( final Settings nodeSettings = Settings.builder() // use pools of size 1 so we can block them - .put("thread_pool.bulk.size", 1) + .put("thread_pool.write.size", 1) .put("thread_pool.search.size", 1) // use queues of size 1 because size 0 is broken and because search requests need the queue to function - .put("thread_pool.bulk.queue_size", 1) + .put("thread_pool.write.queue_size", 1) .put("thread_pool.search.queue_size", 1) .put("node.attr.color", "blue") .build(); @@ -203,7 +203,7 @@ private void testCase( assertBusy(() -> assertThat(taskStatus(action).getSearchRetries(), greaterThan(0L))); logger.info("Blocking bulk and unblocking search so we start to get bulk rejections"); - CyclicBarrier bulkBlock = blockExecutor(ThreadPool.Names.BULK, node); + CyclicBarrier bulkBlock = blockExecutor(ThreadPool.Names.WRITE, node); initialSearchBlock.await(); logger.info("Waiting for bulk rejections"); diff --git a/qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest/ReindexFromOldRemoteIT.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java similarity index 95% rename from qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest/ReindexFromOldRemoteIT.java rename to modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java index 459aff3439710..5d359053a6668 100644 --- a/qa/reindex-from-old/src/test/java/org/elasticsearch/smoketest/ReindexFromOldRemoteIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/ReindexFromOldRemoteIT.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.smoketest; +package org.elasticsearch.index.reindex.remote; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -27,6 +27,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Booleans; import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; @@ -38,6 +39,9 @@ public class ReindexFromOldRemoteIT extends ESRestTestCase { private void oldEsTestCase(String portPropertyName, String requestsPerSecond) throws IOException { + boolean enabled = Booleans.parseBoolean(System.getProperty("tests.fromOld")); + assumeTrue("test is disabled, probably because this is windows", enabled); + int oldEsPort = Integer.parseInt(System.getProperty(portPropertyName)); try (RestClient oldEs = RestClient.builder(new HttpHost("127.0.0.1", oldEsPort)).build()) { try { diff --git a/plugins/build.gradle b/plugins/build.gradle index 27655abf534f6..cf942148dea94 100644 --- a/plugins/build.gradle +++ b/plugins/build.gradle @@ -27,7 +27,7 @@ configure(subprojects.findAll { it.parent.path == project.path }) { // for local ES plugins, the name of the plugin is the same as the directory name project.name - licenseFile rootProject.file('LICENSE.txt') + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') noticeFile rootProject.file('NOTICE.txt') } } diff --git a/plugins/discovery-file/build.gradle b/plugins/discovery-file/build.gradle index 145d959fa4100..529b8cbef304d 100644 --- a/plugins/discovery-file/build.gradle +++ b/plugins/discovery-file/build.gradle @@ -38,6 +38,7 @@ task setupSeedNodeAndUnicastHostsFile(type: DefaultTask) { // setup the initial cluster with one node that will serve as the seed node // for unicast discovery ClusterConfiguration config = new ClusterConfiguration(project) +config.distribution = 'integ-test-zip' config.clusterName = 'discovery-file-test-cluster' List nodes = ClusterFormationTasks.setup(project, 'initialCluster', setupSeedNodeAndUnicastHostsFile, config) File srcUnicastHostsFile = file('build/cluster/unicast_hosts.txt') diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 631157a7e175b..8231e15af200c 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -153,6 +153,7 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', project.afterEvaluate { for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) { ClusterConfiguration cluster = project.extensions.getByName("${integTestTaskName}Cluster") as ClusterConfiguration + cluster.distribution = 'integ-test-zip' cluster.dependsOn(project.bundlePlugin) Task restIntegTestTask = project.tasks.getByName(integTestTaskName) diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 46988a2dd5107..23252881cd75f 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.test.AntFixture - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -66,28 +64,14 @@ test { exclude '**/*CredentialsTests.class' } -forbiddenApisTest { - // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage - bundledSignatures -= 'jdk-non-portable' - bundledSignatures += 'jdk-internal' -} - -/** A task to start the AmazonS3Fixture which emulates a S3 service **/ -task s3Fixture(type: AntFixture) { - dependsOn compileTestJava - env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" - executable = new File(project.runtimeJavaHome, 'bin/java') - args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, 'bucket_test' +check { + // also execute the QA tests when testing the plugin + dependsOn 'qa:amazon-s3:check' } integTestCluster { - dependsOn s3Fixture - keystoreSetting 's3.client.integration_test.access_key', "s3_integration_test_access_key" keystoreSetting 's3.client.integration_test.secret_key', "s3_integration_test_secret_key" - - /* Use a closure on the string to delay evaluation until tests are executed */ - setting 's3.client.integration_test.endpoint', "http://${ -> s3Fixture.addressAndPort }" } thirdPartyAudit.excludes = [ diff --git a/plugins/repository-s3/qa/amazon-s3/build.gradle b/plugins/repository-s3/qa/amazon-s3/build.gradle new file mode 100644 index 0000000000000..5e288899021a1 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/build.gradle @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.AntFixture + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: ':plugins:repository-s3', configuration: 'runtime') +} + +integTestCluster { + plugin ':plugins:repository-s3' +} + +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +boolean useFixture = false + +String s3AccessKey = System.getenv("amazon_s3_access_key") +String s3SecretKey = System.getenv("amazon_s3_secret_key") +String s3Bucket = System.getenv("amazon_s3_bucket") +String s3BasePath = System.getenv("amazon_s3_base_path") + +if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) { + s3AccessKey = 's3_integration_test_access_key' + s3SecretKey = 's3_integration_test_secret_key' + s3Bucket = 'bucket_test' + s3BasePath = 'integration_test' + useFixture = true +} + +/** A task to start the AmazonS3Fixture which emulates a S3 service **/ +task s3Fixture(type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3Bucket +} + +Map expansions = [ + 'bucket': s3Bucket, + 'base_path': s3BasePath +] +processTestResources { + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) +} + +integTestCluster { + keystoreSetting 's3.client.integration_test.access_key', s3AccessKey + keystoreSetting 's3.client.integration_test.secret_key', s3SecretKey + + if (useFixture) { + dependsOn s3Fixture + /* Use a closure on the string to delay evaluation until tests are executed */ + setting 's3.client.integration_test.endpoint', "http://${-> s3Fixture.addressAndPort}" + } else { + println "Using an external service to test the repository-s3 plugin" + } +} \ No newline at end of file diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java similarity index 100% rename from plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java rename to plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..afcc0fa353482 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public class AmazonS3RepositoryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public AmazonS3RepositoryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java similarity index 100% rename from plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java rename to plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml b/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml new file mode 100644 index 0000000000000..8b3daccf0a2d7 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml @@ -0,0 +1,183 @@ +# Integration tests for repository-s3 +--- +"Snapshot/Restore with repository-s3": + + # Register repository + - do: + snapshot.create_repository: + repository: repository + body: + type: s3 + settings: + bucket: ${bucket} + client: integration_test + base_path: ${base_path} + canned_acl: private + storage_class: standard + + - match: { acknowledged: true } + + # Get repository + - do: + snapshot.get_repository: + repository: repository + + - match: { repository.settings.bucket : ${bucket} } + - match: { repository.settings.client : "integration_test" } + - match: { repository.settings.base_path : ${base_path} } + - match: { repository.settings.canned_acl : "private" } + - match: { repository.settings.storage_class : "standard" } + - is_false: repository.settings.access_key + - is_false: repository.settings.secret_key + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-one + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository diff --git a/plugins/repository-s3/qa/build.gradle b/plugins/repository-s3/qa/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 09d9782aa91f8..cb4f977bae77b 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -156,7 +156,7 @@ class S3Repository extends BlobStoreRepository { String bucket = BUCKET_SETTING.get(metadata.settings()); if (bucket == null) { - throw new RepositoryException(metadata.name(), "No bucket defined for s3 gateway"); + throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository"); } boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml index 11f4610f6f7b2..7bb65a508863d 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml @@ -11,183 +11,3 @@ nodes.info: {} - match: { nodes.$master.plugins.0.name: repository-s3 } ---- -"Snapshot/Restore with repository-s3": - - # Register repository - - do: - snapshot.create_repository: - repository: repository - body: - type: s3 - settings: - bucket: "bucket_test" - client: "integration_test" - canned_acl: "public-read" - storage_class: "standard" - - - match: { acknowledged: true } - - # Get repository - - do: - snapshot.get_repository: - repository: repository - - - match: {repository.settings.bucket : "bucket_test"} - - match: {repository.settings.client : "integration_test"} - - match: {repository.settings.canned_acl : "public-read"} - - match: {repository.settings.storage_class : "standard"} - - is_false: repository.settings.access_key - - is_false: repository.settings.secret_key - - # Index documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _type: doc - _id: 1 - - snapshot: one - - index: - _index: docs - _type: doc - _id: 2 - - snapshot: one - - index: - _index: docs - _type: doc - _id: 3 - - snapshot: one - - - do: - count: - index: docs - - - match: {count: 3} - - # Create a first snapshot - - do: - snapshot.create: - repository: repository - snapshot: snapshot-one - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-one } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.include_global_state: true } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.status: - repository: repository - snapshot: snapshot-one - - - is_true: snapshots - - match: { snapshots.0.snapshot: snapshot-one } - - match: { snapshots.0.state : SUCCESS } - - # Index more documents - - do: - bulk: - refresh: true - body: - - index: - _index: docs - _type: doc - _id: 4 - - snapshot: two - - index: - _index: docs - _type: doc - _id: 5 - - snapshot: two - - index: - _index: docs - _type: doc - _id: 6 - - snapshot: two - - index: - _index: docs - _type: doc - _id: 7 - - snapshot: two - - - do: - count: - index: docs - - - match: {count: 7} - - # Create a second snapshot - - do: - snapshot.create: - repository: repository - snapshot: snapshot-two - wait_for_completion: true - - - match: { snapshot.snapshot: snapshot-two } - - match: { snapshot.state : SUCCESS } - - match: { snapshot.shards.failed : 0 } - - - do: - snapshot.get: - repository: repository - snapshot: snapshot-one,snapshot-two - - - is_true: snapshots - - match: { snapshots.0.state : SUCCESS } - - match: { snapshots.1.state : SUCCESS } - - # Delete the index - - do: - indices.delete: - index: docs - - # Restore the second snapshot - - do: - snapshot.restore: - repository: repository - snapshot: snapshot-two - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 7} - - # Delete the index again - - do: - indices.delete: - index: docs - - # Restore the first snapshot - - do: - snapshot.restore: - repository: repository - snapshot: snapshot-one - wait_for_completion: true - - - do: - count: - index: docs - - - match: {count: 3} - - # Remove the snapshots - - do: - snapshot.delete: - repository: repository - snapshot: snapshot-two - - - do: - snapshot.delete: - repository: repository - snapshot: snapshot-one - - # Remove our repository - - do: - snapshot.delete_repository: - repository: repository diff --git a/qa/build.gradle b/qa/build.gradle index e69de29bb2d1d..494f6e3cd94b7 100644 --- a/qa/build.gradle +++ b/qa/build.gradle @@ -0,0 +1,10 @@ + +import org.elasticsearch.gradle.test.RestIntegTestTask + +subprojects { Project subproj -> + subproj.tasks.withType(RestIntegTestTask) { + subproj.extensions.configure("${it.name}Cluster") { cluster -> + cluster.distribution = 'oss-zip' + } + } +} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java index ba4275b54a115..e04c0543fd7bf 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java @@ -19,62 +19,17 @@ package org.elasticsearch.plugins; -import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.cli.UserException; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; import java.nio.file.Path; -import java.util.ArrayList; -import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasToString; -import static org.hamcrest.Matchers.not; /** Tests plugin manager security check */ public class PluginSecurityTests extends ESTestCase { - public void testHasNativeController() throws Exception { - assumeTrue( - "test cannot run with security manager enabled", - System.getSecurityManager() == null); - final MockTerminal terminal = new MockTerminal(); - terminal.addTextInput("y"); - terminal.addTextInput("y"); - final Path policyFile = this.getDataPath("security/simple-plugin-security.policy"); - Set permissions = PluginSecurity.parsePermissions(policyFile, createTempDir()); - PluginSecurity.confirmPolicyExceptions(terminal, permissions, true, false); - final String output = terminal.getOutput(); - assertThat(output, containsString("plugin forks a native controller")); - } - - public void testDeclineNativeController() throws IOException { - assumeTrue("test cannot run with security manager enabled", System.getSecurityManager() == null); - final MockTerminal terminal = new MockTerminal(); - terminal.addTextInput("y"); - terminal.addTextInput("n"); - final Path policyFile = this.getDataPath("security/simple-plugin-security.policy"); - Set permissions = PluginSecurity.parsePermissions(policyFile, createTempDir()); - UserException e = expectThrows(UserException.class, - () -> PluginSecurity.confirmPolicyExceptions(terminal, permissions, true, false)); - assertThat(e, hasToString(containsString("installation aborted by user"))); - } - - public void testDoesNotHaveNativeController() throws Exception { - assumeTrue("test cannot run with security manager enabled", System.getSecurityManager() == null); - final MockTerminal terminal = new MockTerminal(); - terminal.addTextInput("y"); - final Path policyFile = this.getDataPath("security/simple-plugin-security.policy"); - Set permissions = PluginSecurity.parsePermissions(policyFile, createTempDir()); - PluginSecurity.confirmPolicyExceptions(terminal, permissions, false, false); - final String output = terminal.getOutput(); - assertThat(output, not(containsString("plugin forks a native controller"))); - } - /** Test that we can parse the set of permissions correctly for a simple policy */ public void testParsePermissions() throws Exception { assumeTrue( diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml b/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml deleted file mode 100644 index 19b2a7b5dd92f..0000000000000 --- a/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml +++ /dev/null @@ -1,5 +0,0 @@ -cluster.name: tribe_node_cluster -tribe.t1.cluster.name: tribe1 -tribe.t2.cluster.name: tribe2 -tribe.t1.node.id.seed: 1 -tribe.t2.node.id.seed: 2 diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index e50e0c45c8dc9..57c6ad7ff861f 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -687,8 +687,7 @@ public void testEmptyShard() throws IOException { * Tests recovery of an index with or without a translog and the * statistics we gather about that. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29544") - public void testRecovery() throws IOException { + public void testRecovery() throws Exception { int count; boolean shouldHaveTranslog; if (runningAgainstOldCluster) { @@ -701,7 +700,7 @@ public void testRecovery() throws IOException { indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); // make sure all recoveries are done - ensureNoInitializingShards(); + ensureGreen(index); // Explicitly flush so we're sure to have a bunch of documents in the Lucene index client().performRequest("POST", "/_flush"); if (shouldHaveTranslog) { diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml index 04086c2f2cb16..b4487e4fefee1 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml @@ -7,7 +7,6 @@ - match: { my_remote_cluster.num_nodes_connected: 1} - match: { my_remote_cluster.max_connections_per_cluster: 1} - match: { my_remote_cluster.initial_connect_timeout: "30s" } - - is_true: my_remote_cluster.http_addresses.0 --- "Add transient remote cluster based on the preset cluster and check remote info": @@ -38,9 +37,6 @@ - do: cluster.remote_info: {} - - set: { my_remote_cluster.http_addresses.0: remote_http } - - match: { test_remote_cluster.http_addresses.0: $remote_http } - - match: { test_remote_cluster.connected: true } - match: { my_remote_cluster.connected: true } @@ -132,4 +128,3 @@ transient: search.remote.remote1.seeds: null search.remote.remote1.skip_unavailable: null - diff --git a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java index 9f895c44977fc..1afda01130ba7 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java @@ -66,7 +66,7 @@ public class SpawnerNoBootstrapTests extends LuceneTestCase { + "read SOMETHING\n"; /** - * Simplest case: a plugin with no controller daemon. + * Simplest case: a module with no controller daemon. */ public void testNoControllerSpawn() throws IOException, InterruptedException { Path esHome = createTempDir().resolve("esHome"); @@ -77,7 +77,7 @@ public void testNoControllerSpawn() throws IOException, InterruptedException { Environment environment = TestEnvironment.newEnvironment(settings); // This plugin will NOT have a controller daemon - Path plugin = environment.pluginsFile().resolve("a_plugin"); + Path plugin = environment.modulesFile().resolve("a_plugin"); Files.createDirectories(environment.modulesFile()); Files.createDirectories(plugin); PluginTestUtil.writePluginProperties( @@ -91,7 +91,7 @@ public void testNoControllerSpawn() throws IOException, InterruptedException { "has.native.controller", "false"); try (Spawner spawner = new Spawner()) { - spawner.spawnNativePluginControllers(environment); + spawner.spawnNativeControllers(environment); assertThat(spawner.getProcesses(), hasSize(0)); } } @@ -100,11 +100,11 @@ public void testNoControllerSpawn() throws IOException, InterruptedException { * Two plugins - one with a controller daemon and one without. */ public void testControllerSpawn() throws Exception { - assertControllerSpawns(Environment::pluginsFile); - assertControllerSpawns(Environment::modulesFile); + assertControllerSpawns(Environment::pluginsFile, false); + assertControllerSpawns(Environment::modulesFile, true); } - private void assertControllerSpawns(Function pluginsDirFinder) throws Exception { + private void assertControllerSpawns(final Function pluginsDirFinder, boolean expectSpawn) throws Exception { /* * On Windows you can not directly run a batch file - you have to run cmd.exe with the batch * file as an argument and that's out of the remit of the controller daemon process spawner. @@ -149,33 +149,38 @@ private void assertControllerSpawns(Function pluginsDirFinder "has.native.controller", "false"); Spawner spawner = new Spawner(); - spawner.spawnNativePluginControllers(environment); + spawner.spawnNativeControllers(environment); List processes = spawner.getProcesses(); - /* - * As there should only be a reference in the list for the plugin that had the controller - * daemon, we expect one here. - */ - assertThat(processes, hasSize(1)); - Process process = processes.get(0); - final InputStreamReader in = - new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8); - try (BufferedReader stdoutReader = new BufferedReader(in)) { - String line = stdoutReader.readLine(); - assertEquals("I am alive", line); - spawner.close(); - /* - * Fail if the process does not die within one second; usually it will be even quicker - * but it depends on OS scheduling. - */ - assertTrue(process.waitFor(1, TimeUnit.SECONDS)); + + if (expectSpawn) { + // as there should only be a reference in the list for the module that had the controller daemon, we expect one here + assertThat(processes, hasSize(1)); + Process process = processes.get(0); + final InputStreamReader in = new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8); + try (BufferedReader stdoutReader = new BufferedReader(in)) { + String line = stdoutReader.readLine(); + assertEquals("I am alive", line); + spawner.close(); + // fail if the process does not die within one second; usually it will be even quicker but it depends on OS scheduling + assertTrue(process.waitFor(1, TimeUnit.SECONDS)); + } + } else { + assertThat(processes, hasSize(0)); } } /** - * Two plugins in a meta plugin - one with a controller daemon and one without. + * Two plugins in a meta module - one with a controller daemon and one without. */ - public void testControllerSpawnMetaPlugin() throws IOException, InterruptedException { + public void testControllerSpawnMeta() throws Exception { + runTestControllerSpawnMeta(Environment::pluginsFile, false); + runTestControllerSpawnMeta(Environment::modulesFile, true); + } + + + private void runTestControllerSpawnMeta( + final Function pluginsDirFinder, final boolean expectSpawn) throws Exception { /* * On Windows you can not directly run a batch file - you have to run cmd.exe with the batch * file as an argument and that's out of the remit of the controller daemon process spawner. @@ -189,65 +194,64 @@ public void testControllerSpawnMetaPlugin() throws IOException, InterruptedExcep Environment environment = TestEnvironment.newEnvironment(settings); - Path metaPlugin = environment.pluginsFile().resolve("meta_plugin"); + Path metaModule = pluginsDirFinder.apply(environment).resolve("meta_module"); Files.createDirectories(environment.modulesFile()); - Files.createDirectories(metaPlugin); + Files.createDirectories(metaModule); PluginTestUtil.writeMetaPluginProperties( - metaPlugin, - "description", "test_plugin", - "name", "meta_plugin", - "plugins", "test_plugin,other_plugin"); + metaModule, + "description", "test_plugin", + "name", "meta_plugin", + "plugins", "test_plugin,other_plugin"); // this plugin will have a controller daemon - Path plugin = metaPlugin.resolve("test_plugin"); + Path plugin = metaModule.resolve("test_plugin"); Files.createDirectories(plugin); PluginTestUtil.writePluginProperties( - plugin, - "description", "test_plugin", - "version", Version.CURRENT.toString(), - "elasticsearch.version", Version.CURRENT.toString(), - "name", "test_plugin", - "java.version", "1.8", - "classname", "TestPlugin", - "has.native.controller", "true"); + plugin, + "description", "test_plugin", + "version", Version.CURRENT.toString(), + "elasticsearch.version", Version.CURRENT.toString(), + "name", "test_plugin", + "java.version", "1.8", + "classname", "TestPlugin", + "has.native.controller", "true"); Path controllerProgram = Platforms.nativeControllerPath(plugin); createControllerProgram(controllerProgram); // this plugin will not have a controller daemon - Path otherPlugin = metaPlugin.resolve("other_plugin"); + Path otherPlugin = metaModule.resolve("other_plugin"); Files.createDirectories(otherPlugin); PluginTestUtil.writePluginProperties( - otherPlugin, - "description", "other_plugin", - "version", Version.CURRENT.toString(), - "elasticsearch.version", Version.CURRENT.toString(), - "name", "other_plugin", - "java.version", "1.8", - "classname", "OtherPlugin", - "has.native.controller", "false"); + otherPlugin, + "description", "other_plugin", + "version", Version.CURRENT.toString(), + "elasticsearch.version", Version.CURRENT.toString(), + "name", "other_plugin", + "java.version", "1.8", + "classname", "OtherPlugin", + "has.native.controller", "false"); Spawner spawner = new Spawner(); - spawner.spawnNativePluginControllers(environment); + spawner.spawnNativeControllers(environment); List processes = spawner.getProcesses(); - /* - * As there should only be a reference in the list for the plugin that had the controller - * daemon, we expect one here. - */ - assertThat(processes, hasSize(1)); - Process process = processes.get(0); - final InputStreamReader in = - new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8); - try (BufferedReader stdoutReader = new BufferedReader(in)) { - String line = stdoutReader.readLine(); - assertEquals("I am alive", line); - spawner.close(); - /* - * Fail if the process does not die within one second; usually it will be even quicker - * but it depends on OS scheduling. - */ - assertTrue(process.waitFor(1, TimeUnit.SECONDS)); + + if (expectSpawn) { + // as there should only be a reference in the list for the plugin that had the controller daemon, we expect one here + assertThat(processes, hasSize(1)); + Process process = processes.get(0); + final InputStreamReader in = + new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8); + try (BufferedReader stdoutReader = new BufferedReader(in)) { + String line = stdoutReader.readLine(); + assertEquals("I am alive", line); + spawner.close(); + // fail if the process does not die within one second; usually it will be even quicker but it depends on OS scheduling + assertTrue(process.waitFor(1, TimeUnit.SECONDS)); + } + } else { + assertThat(processes, hasSize(0)); } } @@ -260,7 +264,7 @@ public void testControllerSpawnWithIncorrectDescriptor() throws IOException { Environment environment = TestEnvironment.newEnvironment(settings); - Path plugin = environment.pluginsFile().resolve("test_plugin"); + Path plugin = environment.modulesFile().resolve("test_plugin"); Files.createDirectories(plugin); PluginTestUtil.writePluginProperties( plugin, @@ -277,10 +281,10 @@ public void testControllerSpawnWithIncorrectDescriptor() throws IOException { Spawner spawner = new Spawner(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> spawner.spawnNativePluginControllers(environment)); + () -> spawner.spawnNativeControllers(environment)); assertThat( e.getMessage(), - equalTo("plugin [test_plugin] does not have permission to fork native controller")); + equalTo("module [test_plugin] does not have permission to fork native controller")); } public void testSpawnerHandlingOfDesktopServicesStoreFiles() throws IOException { @@ -292,17 +296,16 @@ public void testSpawnerHandlingOfDesktopServicesStoreFiles() throws IOException Files.createDirectories(environment.modulesFile()); Files.createDirectories(environment.pluginsFile()); - final Path desktopServicesStore = environment.pluginsFile().resolve(".DS_Store"); + final Path desktopServicesStore = environment.modulesFile().resolve(".DS_Store"); Files.createFile(desktopServicesStore); final Spawner spawner = new Spawner(); if (Constants.MAC_OS_X) { // if the spawner were not skipping the Desktop Services Store files on macOS this would explode - spawner.spawnNativePluginControllers(environment); + spawner.spawnNativeControllers(environment); } else { // we do not ignore these files on non-macOS systems - final FileSystemException e = - expectThrows(FileSystemException.class, () -> spawner.spawnNativePluginControllers(environment)); + final FileSystemException e = expectThrows(FileSystemException.class, () -> spawner.spawnNativeControllers(environment)); if (Constants.WINDOWS) { assertThat(e, instanceOf(NoSuchFileException.class)); } else { diff --git a/qa/reindex-from-old/build.gradle b/qa/reindex-from-old/build.gradle deleted file mode 100644 index c4b4927a4a2b1..0000000000000 --- a/qa/reindex-from-old/build.gradle +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -description = """\ -Tests reindex-from-remote against some specific versions of -Elasticsearch prior to 5.0. Versions of Elasticsearch >= 5.0 -should be able to use the standard launching mechanism which -is more flexible and reliable. -""" - - -import org.apache.tools.ant.taskdefs.condition.Os - -import static org.elasticsearch.gradle.BuildPlugin.getJavaHome - -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - -integTestCluster { - // Whitelist reindexing from the local node so we can test it. - setting 'reindex.remote.whitelist', '127.0.0.1:*' -} - -configurations { - oldesFixture - es2 - es1 - es090 -} - -dependencies { - oldesFixture project(':test:fixtures:old-elasticsearch') - /* Right now we just test against the latest version of each major we expect - * reindex-from-remote to work against. We could randomize the versions but - * that doesn't seem worth it at this point. */ - es2 'org.elasticsearch.distribution.zip:elasticsearch:2.4.5@zip' - es1 'org.elasticsearch:elasticsearch:1.7.6@zip' - es090 'org.elasticsearch:elasticsearch:0.90.13@zip' -} - -if (Os.isFamily(Os.FAMILY_WINDOWS)) { - // we can't get the pid files in windows so we skip that - integTest.enabled = false -} else { - /* Set up tasks to unzip and run the old versions of ES before running the - * integration tests. */ - for (String version : ['2', '1', '090']) { - Task unzip = task("unzipEs${version}", type: Sync) { - Configuration oldEsDependency = configurations['es' + version] - dependsOn oldEsDependency - /* Use a closure here to delay resolution of the dependency until we need - * it */ - from { - oldEsDependency.collect { zipTree(it) } - } - into temporaryDir - } - Task fixture = task("oldEs${version}Fixture", - type: org.elasticsearch.gradle.test.AntFixture) { - dependsOn project.configurations.oldesFixture - dependsOn unzip - executable = new File(project.runtimeJavaHome, 'bin/java') - env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" - env 'JAVA_HOME', "${-> getJavaHome(project, 7, "JAVA7_HOME must be set to run reindex-from-old")}" - args 'oldes.OldElasticsearch', - baseDir, - unzip.temporaryDir, - version == '090' - } - integTest.dependsOn fixture - integTestRunner { - /* Use a closure on the string to delay evaluation until right before we - * run the integration tests so that we can be sure that the file is - * ready. */ - systemProperty "es${version}.port", "${ -> fixture.addressAndPort }" - } - } -} diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index d60216dad194f..602dfa2d6ea4f 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -23,9 +23,9 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' ext.pluginsCount = 0 -project.rootProject.subprojects.findAll { it.parent.path == ':plugins' }.each { subproj -> +project(':plugins').getChildProjects().each { pluginName, pluginProject -> integTestCluster { - plugin subproj.path + plugin pluginProject.path } pluginsCount += 1 } diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 4086cf2205785..2b1ffb280819c 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -22,7 +22,7 @@ apply plugin: 'elasticsearch.vagrant' List plugins = [] for (Project subproj : project.rootProject.subprojects) { - if (subproj.path.startsWith(':plugins:') || subproj.path.equals(':example-plugins:custom-settings')) { + if (subproj.parent.path == ':plugins' || subproj.path.equals(':example-plugins:custom-settings')) { // add plugin as a dep dependencies { packaging project(path: "${subproj.path}", configuration: 'zip') diff --git a/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats b/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats index 3536c2a207ddd..1a3704c33172f 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats @@ -55,7 +55,8 @@ setup() { } @test "[TAR] archive is available" { - count=$(find . -type f -name 'elasticsearch*.tar.gz' | wc -l) + local version=$(cat version) + count=$(find . -type f -name "${PACKAGE_NAME}-${version}.tar.gz" | wc -l) [ "$count" -eq 1 ] } diff --git a/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats b/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats index 0b06e74555394..59aaa3e8a072f 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/30_deb_package.bats @@ -46,7 +46,12 @@ setup() { } @test "[DEB] package depends on bash" { - dpkg -I elasticsearch-$(cat version).deb | grep "Depends:.*bash.*" + dpkg -I elasticsearch-oss-$(cat version).deb | grep "Depends:.*bash.*" +} + +@test "[DEB] package conflicts" { + dpkg -I elasticsearch-oss-$(cat version).deb | grep "^ Conflicts: elasticsearch$" + dpkg -I elasticsearch-$(cat version).deb | grep "^ Conflicts: elasticsearch-oss$" } ################################## @@ -58,21 +63,21 @@ setup() { } @test "[DEB] package is available" { - count=$(ls elasticsearch-$(cat version).deb | wc -l) + count=$(ls elasticsearch-oss-$(cat version).deb | wc -l) [ "$count" -eq 1 ] } @test "[DEB] package is not installed" { - run dpkg -s 'elasticsearch' + run dpkg -s 'elasticsearch-oss' [ "$status" -eq 1 ] } @test "[DEB] install package" { - dpkg -i elasticsearch-$(cat version).deb + dpkg -i elasticsearch-oss-$(cat version).deb } @test "[DEB] package is installed" { - dpkg -s 'elasticsearch' + dpkg -s 'elasticsearch-oss' } @test "[DEB] verify package installation" { @@ -109,11 +114,11 @@ setup() { # Uninstall DEB package ################################## @test "[DEB] remove package" { - dpkg -r 'elasticsearch' + dpkg -r 'elasticsearch-oss' } @test "[DEB] package has been removed" { - run dpkg -s 'elasticsearch' + run dpkg -s 'elasticsearch-oss' [ "$status" -eq 0 ] echo "$output" | grep -i "status" | grep -i "deinstall ok" } @@ -167,7 +172,7 @@ setup() { @test "[DEB] purge package" { # User installed scripts aren't removed so we'll just get them ourselves rm -rf $ESSCRIPTS - dpkg --purge 'elasticsearch' + dpkg --purge 'elasticsearch-oss' } @test "[DEB] verify package purge" { @@ -186,21 +191,21 @@ setup() { assert_file_not_exist "/usr/share/elasticsearch" - assert_file_not_exist "/usr/share/doc/elasticsearch" - assert_file_not_exist "/usr/share/doc/elasticsearch/copyright" + assert_file_not_exist "/usr/share/doc/elasticsearch-oss" + assert_file_not_exist "/usr/share/doc/elasticsearch-oss/copyright" } @test "[DEB] package has been completly removed" { - run dpkg -s 'elasticsearch' + run dpkg -s 'elasticsearch-oss' [ "$status" -eq 1 ] } @test "[DEB] reinstall package" { - dpkg -i elasticsearch-$(cat version).deb + dpkg -i elasticsearch-oss-$(cat version).deb } @test "[DEB] package is installed by reinstall" { - dpkg -s 'elasticsearch' + dpkg -s 'elasticsearch-oss' } @test "[DEB] verify package reinstallation" { @@ -208,10 +213,10 @@ setup() { } @test "[DEB] repurge package" { - dpkg --purge 'elasticsearch' + dpkg --purge 'elasticsearch-oss' } @test "[DEB] package has been completly removed again" { - run dpkg -s 'elasticsearch' + run dpkg -s 'elasticsearch-oss' [ "$status" -eq 1 ] } diff --git a/qa/vagrant/src/test/resources/packaging/tests/40_rpm_package.bats b/qa/vagrant/src/test/resources/packaging/tests/40_rpm_package.bats index e1b171a8e4b69..52347c7ef4e41 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/40_rpm_package.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/40_rpm_package.bats @@ -45,7 +45,12 @@ setup() { } @test "[RPM] package depends on bash" { - rpm -qpR elasticsearch-$(cat version).rpm | grep '/bin/bash' + rpm -qpR elasticsearch-oss-$(cat version).rpm | grep '/bin/bash' +} + +@test "[RPM] package conflicts" { + rpm -qp --conflicts elasticsearch-oss-$(cat version).rpm | grep "^elasticsearch\s*$" + rpm -qp --conflicts elasticsearch-$(cat version).rpm | grep "^elasticsearch-oss\s*$" } ################################## @@ -57,21 +62,21 @@ setup() { } @test "[RPM] package is available" { - count=$(ls elasticsearch-$(cat version).rpm | wc -l) + count=$(ls elasticsearch-oss-$(cat version).rpm | wc -l) [ "$count" -eq 1 ] } @test "[RPM] package is not installed" { - run rpm -qe 'elasticsearch' + run rpm -qe 'elasticsearch-oss' [ "$status" -eq 1 ] } @test "[RPM] install package" { - rpm -i elasticsearch-$(cat version).rpm + rpm -i elasticsearch-oss-$(cat version).rpm } @test "[RPM] package is installed" { - rpm -qe 'elasticsearch' + rpm -qe 'elasticsearch-oss' } @test "[RPM] verify package installation" { @@ -103,11 +108,11 @@ setup() { @test "[RPM] remove package" { # User installed scripts aren't removed so we'll just get them ourselves rm -rf $ESSCRIPTS - rpm -e 'elasticsearch' + rpm -e 'elasticsearch-oss' } @test "[RPM] package has been removed" { - run rpm -qe 'elasticsearch' + run rpm -qe 'elasticsearch-oss' [ "$status" -eq 1 ] } @@ -143,11 +148,11 @@ setup() { } @test "[RPM] reinstall package" { - rpm -i elasticsearch-$(cat version).rpm + rpm -i elasticsearch-oss-$(cat version).rpm } @test "[RPM] package is installed by reinstall" { - rpm -qe 'elasticsearch' + rpm -qe 'elasticsearch-oss' } @test "[RPM] verify package reinstallation" { @@ -159,7 +164,7 @@ setup() { echo "# ping" >> "/etc/elasticsearch/elasticsearch.yml" echo "# ping" >> "/etc/elasticsearch/jvm.options" echo "# ping" >> "/etc/elasticsearch/log4j2.properties" - rpm -e 'elasticsearch' + rpm -e 'elasticsearch-oss' } @test "[RPM] verify preservation" { @@ -202,6 +207,6 @@ setup() { } @test "[RPM] package has been removed again" { - run rpm -qe 'elasticsearch' + run rpm -qe 'elasticsearch-oss' [ "$status" -eq 1 ] } diff --git a/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats b/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats index f402305156b8e..af0c1280b2dc3 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats @@ -47,6 +47,12 @@ setup() { if [ "$(cat upgrade_from_version)" == "$(cat version)" ]; then sameVersion="true" fi + # TODO: this needs to conditionally change based on version > 6.3.0 + if [ -f upgrade_is_oss ]; then + export PACKAGE_NAME="elasticsearch-oss" + else + skip "upgrade cannot happen from pre 6.3.0 to elasticsearch-oss" + fi } @test "[UPGRADE] install old version" { diff --git a/qa/vagrant/src/test/resources/packaging/tests/90_reinstall.bats b/qa/vagrant/src/test/resources/packaging/tests/90_reinstall.bats index 816247f95f93b..7c5f05c65e894 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/90_reinstall.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/90_reinstall.bats @@ -42,6 +42,7 @@ load $BATS_UTILS/packages.bash # Cleans everything for the 1st execution setup() { skip_not_dpkg_or_rpm + export PACKAGE_NAME="elasticsearch-oss" } @test "[REINSTALL] install" { diff --git a/qa/vagrant/src/test/resources/packaging/utils/packages.bash b/qa/vagrant/src/test/resources/packaging/utils/packages.bash index 01ad9258cdbed..a214cd6940f63 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/packages.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/packages.bash @@ -46,6 +46,7 @@ export_elasticsearch_paths() { if is_rpm; then export ESENVFILE="/etc/sysconfig/elasticsearch" fi + export PACKAGE_NAME=${PACKAGE_NAME:-"elasticsearch-oss"} } # Install the rpm or deb package. @@ -73,9 +74,9 @@ install_package() { esac done if is_rpm; then - rpm $rpmCommand elasticsearch-$version.rpm + rpm $rpmCommand $PACKAGE_NAME-$version.rpm elif is_dpkg; then - dpkg $dpkgCommand -i elasticsearch-$version.deb + dpkg $dpkgCommand -i $PACKAGE_NAME-$version.deb else skip "Only rpm or deb supported" fi @@ -115,9 +116,10 @@ verify_package_installation() { # Env file assert_file "/etc/default/elasticsearch" f root elasticsearch 660 - # Doc files - assert_file "/usr/share/doc/elasticsearch" d root root 755 - assert_file "/usr/share/doc/elasticsearch/copyright" f root root 644 + # Machine-readable debian/copyright file + local copyrightDir=$(readlink -f /usr/share/doc/$PACKAGE_NAME) + assert_file $copyrightDir d root root 755 + assert_file "$copyrightDir/copyright" f root root 644 fi if is_rpm; then diff --git a/qa/vagrant/src/test/resources/packaging/utils/tar.bash b/qa/vagrant/src/test/resources/packaging/utils/tar.bash index 9b4bc76d841c9..4ded1f73514b2 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/tar.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/tar.bash @@ -35,10 +35,12 @@ install_archive() { export ESHOME=${1:-/tmp/elasticsearch} + local version=$(cat version) + echo "Unpacking tarball to $ESHOME" rm -rf /tmp/untar mkdir -p /tmp/untar - tar -xzpf elasticsearch*.tar.gz -C /tmp/untar + tar -xzpf "${PACKAGE_NAME}-${version}.tar.gz" -C /tmp/untar find /tmp/untar -depth -type d -name 'elasticsearch*' -exec mv {} "$ESHOME" \; > /dev/null @@ -79,6 +81,8 @@ export_elasticsearch_paths() { export ESSCRIPTS="$ESCONFIG/scripts" export ESDATA="$ESHOME/data" export ESLOG="$ESHOME/logs" + + export PACKAGE_NAME=${PACKAGE_NAME:-"elasticsearch-oss"} } # Checks that all directories & files are correctly installed diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index 2cb84528383b3..4e3fdf5bd5560 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -254,6 +254,7 @@ clean_before_test() { "/etc/sysconfig/elasticsearch" \ "/var/run/elasticsearch" \ "/usr/share/doc/elasticsearch" \ + "/usr/share/doc/elasticsearch-oss" \ "/tmp/elasticsearch" \ "/usr/lib/systemd/system/elasticsearch.conf" \ "/usr/lib/tmpfiles.d/elasticsearch.conf" \ @@ -288,20 +289,20 @@ clean_before_test() { purge_elasticsearch() { # Removes RPM package if is_rpm; then - rpm --quiet -e elasticsearch > /dev/null 2>&1 || true + rpm --quiet -e $PACKAGE_NAME > /dev/null 2>&1 || true fi if [ -x "`which yum 2>/dev/null`" ]; then - yum remove -y elasticsearch > /dev/null 2>&1 || true + yum remove -y $PACKAGE_NAME > /dev/null 2>&1 || true fi # Removes DEB package if is_dpkg; then - dpkg --purge elasticsearch > /dev/null 2>&1 || true + dpkg --purge $PACKAGE_NAME > /dev/null 2>&1 || true fi if [ -x "`which apt-get 2>/dev/null`" ]; then - apt-get --quiet --yes purge elasticsearch > /dev/null 2>&1 || true + apt-get --quiet --yes purge $PACKAGE_NAME > /dev/null 2>&1 || true fi } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json index c30ee70e2eb82..61fd9ba3513ca 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json @@ -16,6 +16,10 @@ } }, "params": { + "include_type_name": { + "type" : "string", + "description" : "Whether to add the type name to the response" + }, "wait_for_active_shards": { "type" : "string", "description" : "Sets the number of shard copies that must be active before proceeding with the bulk operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index 389d00c670622..b146c34b441ea 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -4,7 +4,7 @@ "methods": ["DELETE"], "url": { "path": "/{index}/{type}/{id}", - "paths": ["/{index}/{type}/{id}"], + "paths": ["/{index}/{type}/{id}", "/{index}/_doc/{id}"], "parts": { "id": { "type" : "string", @@ -18,11 +18,14 @@ }, "type": { "type" : "string", - "required" : true, "description" : "The type of the document" } }, "params": { + "include_type_name": { + "type" : "string", + "description" : "Whether to add the type name to the response" + }, "wait_for_active_shards": { "type" : "string", "description" : "Sets the number of shard copies that must be active before proceeding with the delete operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json index 8aba39e7710af..9f26ca565b293 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json @@ -4,7 +4,7 @@ "methods": ["GET"], "url": { "path": "/{index}/{type}/{id}", - "paths": ["/{index}/{type}/{id}"], + "paths": ["/{index}/{type}/{id}", "/{index}/_doc/{id}"], "parts": { "id": { "type" : "string", @@ -18,11 +18,14 @@ }, "type": { "type" : "string", - "required" : true, "description" : "The type of the document (use `_all` to fetch the first document matching the ID across all types)" } }, "params": { + "include_type_name": { + "type" : "string", + "description" : "Whether to add the type name to the response" + }, "stored_fields": { "type": "list", "description" : "A comma-separated list of stored fields to return in the response" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 574206a0dc3ed..3e07ff7acfa37 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -21,6 +21,10 @@ } }, "params": { + "include_type_name": { + "type" : "string", + "description" : "Whether to add the type name to the response" + }, "wait_for_active_shards": { "type" : "string", "description" : "Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json index 9a0f7be1d65ae..7e62371dd674d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json @@ -1,7 +1,7 @@ { "indices.clear_cache": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html", - "methods": ["POST", "GET"], + "methods": ["POST"], "url": { "path": "/_cache/clear", "paths": ["/_cache/clear", "/{index}/_cache/clear"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json b/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json new file mode 100644 index 0000000000000..c02627cfd874c --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json @@ -0,0 +1,17 @@ +{ + "scripts_painless_execute": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html", + "methods": ["GET", "POST"], + "url": { + "path": "/_scripts/painless/_execute", + "paths": ["/_scripts/painless/_execute"], + "parts": { + }, + "params": { + } + }, + "body": { + "description": "The script to execute" + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 3802747ed1b88..af2b3104a93f5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -16,6 +16,10 @@ } }, "params": { + "include_type_name": { + "type" : "string", + "description" : "Whether to add the type name to the response" + }, "analyzer": { "type" : "string", "description" : "The analyzer to use for the query string" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index ffa99cc9dc312..a63e248d00f6e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -4,7 +4,7 @@ "methods": ["POST"], "url": { "path": "/{index}/{type}/{id}/_update", - "paths": ["/{index}/{type}/{id}/_update"], + "paths": ["/{index}/{type}/{id}/_update", "/{index}/_doc/{id}/_update"], "parts": { "id": { "type": "string", @@ -18,11 +18,14 @@ }, "type": { "type": "string", - "required": true, "description": "The type of the document" } }, "params": { + "include_type_name": { + "type" : "string", + "description" : "Whether to add the type name to the response" + }, "wait_for_active_shards": { "type": "string", "description": "Sets the number of shard copies that must be active before proceeding with the update operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml index bb16ae391c46d..1ce8468cb51f9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml @@ -1,6 +1,5 @@ --- "Test cat thread_pool output": - - skip: version: " - 6.99.99" reason: this API was changed in a backwards-incompatible fashion in 7.0.0 so we need to skip in a mixed cluster @@ -33,30 +32,29 @@ - do: cat.thread_pool: - thread_pool_patterns: bulk,management,flush,index,generic,force_merge + thread_pool_patterns: write,management,flush,generic,force_merge h: id,name,active v: true - match: $body: | /^ id \s+ name \s+ active \n - (\S+\s+ bulk \s+ \d+ \n - \S+\s+ flush \s+ \d+ \n + (\S+\s+ flush \s+ \d+ \n \S+\s+ force_merge \s+ \d+ \n \S+\s+ generic \s+ \d+ \n - \S+\s+ index \s+ \d+ \n - \S+\s+ management \s+ \d+ \n)+ $/ + \S+\s+ management \s+ \d+ \n + \S+\s+ write \s+ \d+ \n)+ $/ - do: cat.thread_pool: - thread_pool_patterns: bulk - h: id,name,type,active,pool_size,queue,queue_size,rejected,largest,completed,core,max,size,keep_alive + thread_pool_patterns: write + h: id,name,type,active,size,queue,queue_size,rejected,largest,completed,min,max,keep_alive v: true - match: $body: | - /^ id \s+ name \s+ type \s+ active \s+ pool_size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ core \s+ max \s+ size \s+ keep_alive \n - (\S+ \s+ bulk \s+ fixed \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \d* \s+ \S* \n)+ $/ + /^ id \s+ name \s+ type \s+ active \s+ size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ max \s+ keep_alive \n + (\S+ \s+ write \s+ fixed \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \S* \n)+ $/ - do: cat.thread_pool: @@ -72,12 +70,11 @@ - do: cat.thread_pool: - thread_pool_patterns: bulk,index,search + thread_pool_patterns: write,search size: "" - match: $body: | / #node_name name active queue rejected - ^ (\S+ \s+ bulk \s+ \d+ \s+ \d+ \s+ \d+ \n - \S+ \s+ index \s+ \d+ \s+ \d+ \s+ \d+ \n - \S+ \s+ search \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + ^ (\S+ \s+ search \s+ \d+ \s+ \d+ \s+ \d+ \n + \S+ \s+ write \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_no_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_no_types.yml index 40effe01b080f..aa05deb326024 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_no_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_no_types.yml @@ -39,44 +39,257 @@ - match: { index.mappings.properties.foo.type: "keyword" } - match: { index.mappings.properties.bar.type: "float" } -# Explicit id +--- +"Index explicit IDs without types": + + - skip: + version: " - 6.99.99" + reason: include_type_name was introduced in 7.0.0 + - do: - index: - index: index - id: 1 - body: { foo: bar } + indices.create: + index: index + include_type_name: false -# Implicit id - do: index: + include_type_name: false index: index + id: 1 body: { foo: bar } -# Bulk with explicit id + - match: { "_index": "index" } + - is_false: _type + - do: bulk: index: index + include_type_name: false body: | { "index": { "_id": "2" } } { "doc": { "foo": "baz" } } -# Bulk with implicit id + - match: { "items.0.index._index": "index" } + - is_false: items.0.index._type + +--- +"Index implicit IDs without types": + + - skip: + version: " - 6.99.99" + reason: include_type_name was introduced in 7.0.0 + + - do: + indices.create: + index: index + include_type_name: false + + - do: + index: + index: index + include_type_name: false + body: { foo: bar } + + - match: { "_index": "index" } + - is_false: _type + - do: bulk: index: index + include_type_name: false body: | { "index": { } } { "doc": { "foo": "baz" } } + - match: { "items.0.index._index": "index" } + - is_false: items.0.index._type + +--- +"Mixing include_type_name=false with explicit types": + + - skip: + version: " - 6.99.99" + reason: include_type_name was introduced in 7.0.0 + + - do: + indices.create: + index: index + include_type_name: false + + - do: + catch: /illegal_argument_exception/ + index: + index: index + type: type + id: 1 + include_type_name: false + body: { foo: bar } + + - do: + catch: /illegal_argument_exception/ + index: + index: index + type: type + include_type_name: false + body: { foo: bar } + + - do: + catch: /illegal_argument_exception/ + get: + index: index + type: type + id: 1 + include_type_name: false + + - do: + catch: /illegal_argument_exception/ + update: + index: index + type: type + id: 1 + include_type_name: false + body: + doc: { foo: baz } + + - do: + catch: /illegal_argument_exception/ + delete: + index: index + type: type + id: 1 + include_type_name: false + + - do: + catch: /illegal_argument_exception/ + search: + index: index + type: type + include_type_name: false + + - do: + catch: /illegal_argument_exception/ + search: + index: index + type: _doc + include_type_name: false + +--- +"Update API without types": + + - skip: + version: " - 6.99.99" + reason: include_type_name was introduced in 7.0.0 + + - do: + indices.create: + index: index + include_type_name: false + + - do: + index: + index: index + id: 1 + include_type_name: false + body: { "foo": "bar" } + + - do: + update: + index: index + id: 1 + include_type_name: false + body: + doc: { "foo": "baz" } + + - match: { "_index": "index" } + - is_false: _type + +--- +"GET API without types": + + - skip: + version: " - 6.99.99" + reason: include_type_name was introduced in 7.0.0 + + - do: + indices.create: + index: index + include_type_name: false + + - do: + index: + index: index + id: 1 + include_type_name: false + body: { "foo": "bar" } + + - do: + get: + index: index + id: 1 + include_type_name: false + + - match: { "_index": "index" } + - is_false: _type + +--- +"Delete API without types": + + - skip: + version: " - 6.99.99" + reason: include_type_name was introduced in 7.0.0 + + - do: + indices.create: + index: index + include_type_name: false + + - do: + index: + index: index + id: 1 + include_type_name: false + body: { "foo": "bar" } + + - do: + delete: + index: index + id: 1 + include_type_name: false + + - match: { "_index": "index" } + - is_false: _type + +--- +"Search without types": + + - skip: + version: " - 6.99.99" + reason: include_type_name was introduced in 7.0.0 + + - do: + indices.create: + index: index + include_type_name: false + + - do: + index: + index: index + id: 1 + include_type_name: false + body: { "foo": "bar" } + - do: indices.refresh: - index: index + index: index - do: - count: + search: index: index + include_type_name: false - - match: { count: 4 } + - match: { "hits.total": 1 } + - match: { "hits.hits.0._index": "index" } + - is_false: hits.hits.0._type --- "PUT mapping with a type and include_type_name: false": @@ -88,6 +301,7 @@ - do: indices.create: index: index + include_type_name: false - do: catch: /illegal_argument_exception/ @@ -101,7 +315,7 @@ type: float --- -"Empty index with the include_type_name=false option": +"GET mappings on empty index with the include_type_name=false option": - skip: version: " - 6.99.99" diff --git a/server/build.gradle b/server/build.gradle index ab10b7571e8a6..7e880e0dae4d2 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -163,6 +163,7 @@ forbiddenPatterns { task generateModulesList { List modules = project(':modules').subprojects.collect { it.name } + modules.add('x-pack') File modulesFile = new File(buildDir, 'generated-resources/modules.txt') processResources.from(modulesFile) inputs.property('modules', modules) diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index cc162f35c2fcb..9021e2d4c1f66 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.net.URL; import java.security.CodeSource; +import java.util.Objects; import java.util.jar.JarInputStream; import java.util.jar.Manifest; @@ -40,11 +41,83 @@ public class Build { */ public static final Build CURRENT; + public enum Flavor { + + DEFAULT("default"), + OSS("oss"), + UNKNOWN("unknown"); + + final String displayName; + + Flavor(final String displayName) { + this.displayName = displayName; + } + + public String displayName() { + return displayName; + } + + public static Flavor fromDisplayName(final String displayName) { + switch (displayName) { + case "default": + return Flavor.DEFAULT; + case "oss": + return Flavor.OSS; + case "unknown": + return Flavor.UNKNOWN; + default: + throw new IllegalStateException("unexpected distribution flavor [" + displayName + "]; your distribution is broken"); + } + } + + } + + public enum Type { + + DEB("deb"), + RPM("rpm"), + TAR("tar"), + ZIP("zip"), + UNKNOWN("unknown"); + + final String displayName; + + public String displayName() { + return displayName; + } + + Type(final String displayName) { + this.displayName = displayName; + } + + public static Type fromDisplayName(final String displayName) { + switch (displayName) { + case "deb": + return Type.DEB; + case "rpm": + return Type.RPM; + case "tar": + return Type.TAR; + case "zip": + return Type.ZIP; + case "unknown": + return Type.UNKNOWN; + default: + throw new IllegalStateException("unexpected distribution type [" + displayName + "]; your distribution is broken"); + } + } + } + static { + final Flavor flavor; + final Type type; final String shortHash; final String date; final boolean isSnapshot; + flavor = Flavor.fromDisplayName(System.getProperty("es.distribution.flavor", "unknown")); + type = Type.fromDisplayName(System.getProperty("es.distribution.type", "unknown")); + final String esPrefix = "elasticsearch-" + Version.CURRENT; final URL url = getElasticsearchCodeSourceLocation(); final String urlStr = url == null ? "" : url.toString(); @@ -76,14 +149,14 @@ public class Build { } if (shortHash == null) { throw new IllegalStateException("Error finding the build shortHash. " + - "Stopping Elasticsearch now so it doesn't run in subtly broken ways. This is likely a build bug."); + "Stopping Elasticsearch now so it doesn't run in subtly broken ways. This is likely a build bug."); } if (date == null) { throw new IllegalStateException("Error finding the build date. " + - "Stopping Elasticsearch now so it doesn't run in subtly broken ways. This is likely a build bug."); + "Stopping Elasticsearch now so it doesn't run in subtly broken ways. This is likely a build bug."); } - CURRENT = new Build(shortHash, date, isSnapshot); + CURRENT = new Build(flavor, type, shortHash, date, isSnapshot); } private final boolean isSnapshot; @@ -98,10 +171,14 @@ static URL getElasticsearchCodeSourceLocation() { return codeSource == null ? null : codeSource.getLocation(); } + private final Flavor flavor; + private final Type type; private final String shortHash; private final String date; - public Build(String shortHash, String date, boolean isSnapshot) { + public Build(final Flavor flavor, final Type type, final String shortHash, final String date, boolean isSnapshot) { + this.flavor = flavor; + this.type = type; this.shortHash = shortHash; this.date = date; this.isSnapshot = isSnapshot; @@ -116,25 +193,51 @@ public String date() { } public static Build readBuild(StreamInput in) throws IOException { + final Flavor flavor; + final Type type; + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + flavor = Flavor.fromDisplayName(in.readString()); + } else { + flavor = Flavor.OSS; + } + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + type = Type.fromDisplayName(in.readString()); + } else { + type = Type.UNKNOWN; + } String hash = in.readString(); String date = in.readString(); boolean snapshot = in.readBoolean(); - return new Build(hash, date, snapshot); + return new Build(flavor, type, hash, date, snapshot); } public static void writeBuild(Build build, StreamOutput out) throws IOException { + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + out.writeString(build.flavor().displayName()); + } + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + out.writeString(build.type().displayName()); + } out.writeString(build.shortHash()); out.writeString(build.date()); out.writeBoolean(build.isSnapshot()); } + public Flavor flavor() { + return flavor; + } + + public Type type() { + return type; + } + public boolean isSnapshot() { return isSnapshot; } @Override public String toString() { - return "[" + shortHash + "][" + date + "]"; + return "[" + flavor.displayName() + "][" + type.displayName + "][" + shortHash + "][" + date + "]"; } @Override @@ -148,6 +251,14 @@ public boolean equals(Object o) { Build build = (Build) o; + if (!flavor.equals(build.flavor)) { + return false; + } + + if (!type.equals(build.type)) { + return false; + } + if (isSnapshot != build.isSnapshot) { return false; } @@ -160,9 +271,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - int result = (isSnapshot ? 1 : 0); - result = 31 * result + shortHash.hashCode(); - result = 31 * result + date.hashCode(); - return result; + return Objects.hash(flavor, type, isSnapshot, shortHash, date); } + } diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index be56f01fa2dc2..5142f1cb84b91 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -35,6 +35,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Locale; public class Version implements Comparable, ToXContentFragment { /* @@ -117,6 +118,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_5_6_8 = new Version(V_5_6_8_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_5_6_9_ID = 5060999; public static final Version V_5_6_9 = new Version(V_5_6_9_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); + public static final int V_5_6_10_ID = 5061099; + public static final Version V_5_6_10 = new Version(V_5_6_10_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); @@ -163,8 +166,12 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_2_3 = new Version(V_6_2_3_ID, LUCENE_7_2_1); public static final int V_6_2_4_ID = 6020499; public static final Version V_6_2_4 = new Version(V_6_2_4_ID, LUCENE_7_2_1); + public static final int V_6_2_5_ID = 6020599; + public static final Version V_6_2_5 = new Version(V_6_2_5_ID, LUCENE_7_2_1); public static final int V_6_3_0_ID = 6030099; public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_0); + public static final int V_6_4_0_ID = 6040099; + public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_0); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_3_0); @@ -183,8 +190,12 @@ public static Version fromId(int id) { switch (id) { case V_7_0_0_alpha1_ID: return V_7_0_0_alpha1; + case V_6_4_0_ID: + return V_6_4_0; case V_6_3_0_ID: return V_6_3_0; + case V_6_2_5_ID: + return V_6_2_5; case V_6_2_4_ID: return V_6_2_4; case V_6_2_3_ID: @@ -221,6 +232,8 @@ public static Version fromId(int id) { return V_6_0_0_alpha2; case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; + case V_5_6_10_ID: + return V_5_6_10; case V_5_6_9_ID: return V_5_6_9; case V_5_6_8_ID: @@ -494,8 +507,16 @@ public boolean isCompatible(Version version) { @SuppressForbidden(reason = "System.out.*") public static void main(String[] args) { - System.out.println("Version: " + Version.CURRENT + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() + ", JVM: " - + JvmInfo.jvmInfo().version()); + final String versionOutput = String.format( + Locale.ROOT, + "Version: %s, Build: %s/%s/%s/%s, JVM: %s", + Version.displayVersion(Version.CURRENT, Build.CURRENT.isSnapshot()), + Build.CURRENT.flavor().displayName(), + Build.CURRENT.type().displayName(), + Build.CURRENT.shortHash(), + Build.CURRENT.date(), + JvmInfo.jvmInfo().version()); + System.out.println(versionOutput); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 69ba6db63ef07..8fa183a843419 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -295,9 +295,11 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { ReplicationResponse.ShardInfo shardInfo = getShardInfo(); - builder.field(_INDEX, shardId.getIndexName()) - .field(_TYPE, type) - .field(_ID, id) + builder.field(_INDEX, shardId.getIndexName()); + if (params.paramAsBoolean("include_type_name", true)) { + builder.field(_TYPE, type); + } + builder.field(_ID, id) .field(_VERSION, version) .field(RESULT, getResult().getLowercase()); if (forcedRefresh) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java index 09ac7e6aa13d7..3bee722a472ec 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java @@ -66,6 +66,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("ip", nodeInfo.getNode().getHostAddress()); builder.field("version", nodeInfo.getVersion()); + builder.field("build_flavor", nodeInfo.getBuild().flavor().displayName()); + builder.field("build_type", nodeInfo.getBuild().type().displayName()); builder.field("build_hash", nodeInfo.getBuild().shortHash()); if (nodeInfo.getTotalIndexingBuffer() != null) { builder.humanReadableField("total_indexing_buffer", "total_indexing_buffer_in_bytes", nodeInfo.getTotalIndexingBuffer()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java index 0410f920c8a9a..36974633559b6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java @@ -30,6 +30,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import static java.util.stream.Collectors.toList; + public final class TransportRemoteInfoAction extends HandledTransportAction { private final RemoteClusterService remoteClusterService; @@ -45,7 +47,6 @@ public TransportRemoteInfoAction(Settings settings, ThreadPool threadPool, Trans @Override protected void doExecute(RemoteInfoRequest remoteInfoRequest, ActionListener listener) { - remoteClusterService.getRemoteConnectionInfos(ActionListener.wrap(remoteConnectionInfos - -> listener.onResponse(new RemoteInfoResponse(remoteConnectionInfos)), listener::onFailure)); + listener.onResponse(new RemoteInfoResponse(remoteClusterService.getRemoteConnectionInfos().collect(toList()))); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 38d3a9d5caf54..f13c30c53503b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -58,7 +58,6 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest r.transientSettings = t, (p, c) -> Settings.fromXContent(p), TRANSIENT); } - private boolean flatSettings = false; private Settings transientSettings = EMPTY_SETTINGS; private Settings persistentSettings = EMPTY_SETTINGS; @@ -74,29 +73,6 @@ public ActionRequestValidationException validate() { return validationException; } - /** - * Sets the value of "flat_settings". - * Used only by the high-level REST client. - * - * @param flatSettings - * value of "flat_settings" flag to be set - * @return this request - */ - public ClusterUpdateSettingsRequest flatSettings(boolean flatSettings) { - this.flatSettings = flatSettings; - return this; - } - - /** - * Return settings in flat format. - * Used only by the high-level REST client. - * - * @return true if settings need to be returned in flat format; false otherwise. - */ - public boolean flatSettings() { - return flatSettings; - } - public Settings transientSettings() { return transientSettings; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index dc13c8dab5188..949918f88a10a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -230,9 +230,9 @@ private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, Li SnapshotInfo snapshotInfo = snapshotsService.snapshot(repositoryName, snapshotId); List shardStatusBuilder = new ArrayList<>(); if (snapshotInfo.state().completed()) { - Map shardStatues = - snapshotsService.snapshotShards(request.repository(), snapshotInfo); - for (Map.Entry shardStatus : shardStatues.entrySet()) { + Map shardStatuses = + snapshotsService.snapshotShards(repositoryName, repositoryData, snapshotInfo); + for (Map.Entry shardStatus : shardStatuses.entrySet()) { IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue().asCopy(); shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 9b82bb85aae37..35f1f725b65ad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -85,7 +85,7 @@ public class TransportAnalyzeAction extends TransportSingleShardActiontrue
if settings need to be returned in flat format; false otherwise. - */ - public boolean flatSettings() { - return flatSettings; - } - /** * Sets the value of "include_defaults". * Used only by the high-level REST client. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 594564b681562..18c7d506c7275 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -55,7 +55,6 @@ public class UpdateSettingsRequest extends AcknowledgedRequesttrue
if settings need to be returned in flat format; false otherwise. - */ - public boolean flatSettings() { - return flatSettings; - } - @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 6379f8da21aa2..e244369c0c312 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -152,9 +152,6 @@ public CommonStats(CommonStatsFlags flags) { case Translog: translog = new TranslogStats(); break; - case Suggest: - // skip - break; case RequestCache: requestCache = new RequestCacheStats(); break; @@ -213,9 +210,6 @@ public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, C case Translog: translog = indexShard.translogStats(); break; - case Suggest: - // skip - break; case RequestCache: requestCache = indexShard.requestCache().stats(); break; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java index 7d6e7c124cd37..a53cc0b339de8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java @@ -53,7 +53,7 @@ public CommonStatsFlags(StreamInput in) throws IOException { final long longFlags = in.readLong(); flags.clear(); for (Flag flag : Flag.values()) { - if ((longFlags & (1 << flag.ordinal())) != 0) { + if ((longFlags & (1 << flag.getIndex())) != 0) { flags.add(flag); } } @@ -68,7 +68,7 @@ public CommonStatsFlags(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { long longFlags = 0; for (Flag flag : flags) { - longFlags |= (1 << flag.ordinal()); + longFlags |= (1 << flag.getIndex()); } out.writeLong(longFlags); @@ -207,34 +207,39 @@ public CommonStatsFlags clone() { } public enum Flag { - // Do not change the order of these flags we use - // the ordinal for encoding! Only append to the end! - Store("store"), - Indexing("indexing"), - Get("get"), - Search("search"), - Merge("merge"), - Flush("flush"), - Refresh("refresh"), - QueryCache("query_cache"), - FieldData("fielddata"), - Docs("docs"), - Warmer("warmer"), - Completion("completion"), - Segments("segments"), - Translog("translog"), - Suggest("suggest"), // unused - RequestCache("request_cache"), - Recovery("recovery"); + Store("store", 0), + Indexing("indexing", 1), + Get("get", 2), + Search("search", 3), + Merge("merge", 4), + Flush("flush", 5), + Refresh("refresh", 6), + QueryCache("query_cache", 7), + FieldData("fielddata", 8), + Docs("docs", 9), + Warmer("warmer", 10), + Completion("completion", 11), + Segments("segments", 12), + Translog("translog", 13), + // 14 was previously used for Suggest + RequestCache("request_cache", 15), + Recovery("recovery", 16); private final String restName; + private final int index; - Flag(String restName) { + Flag(final String restName, final int index) { this.restName = restName; + this.index = index; } public String getRestName() { return restName; } + + private int getIndex() { + return index; + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java index e4357f7ba126c..9f401b6312c46 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java @@ -229,15 +229,6 @@ public boolean translog() { return flags.isSet(Flag.Translog); } - public IndicesStatsRequest suggest(boolean suggest) { - flags.set(Flag.Suggest, suggest); - return this; - } - - public boolean suggest() { - return flags.isSet(Flag.Suggest); - } - public IndicesStatsRequest requestCache(boolean requestCache) { flags.set(Flag.RequestCache, requestCache); return this; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 50d7712da11d0..eeefe793db701 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -148,9 +148,6 @@ protected ShardStats shardOperation(IndicesStatsRequest request, ShardRouting sh if (request.translog()) { flags.set(CommonStatsFlags.Flag.Translog); } - if (request.suggest()) { - flags.set(CommonStatsFlags.Flag.Suggest); - } if (request.requestCache()) { flags.set(CommonStatsFlags.Flag.RequestCache); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index f9b27a1e62040..260c75692e19b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -83,7 +83,7 @@ public TransportShardBulkAction(Settings settings, TransportService transportSer MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.BULK); + indexNameExpressionResolver, BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.WRITE); this.updateHelper = updateHelper; this.mappingUpdatedAction = mappingUpdatedAction; } diff --git a/server/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/server/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 3aaf4a472facf..32c599a9f5804 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/server/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -46,7 +46,7 @@ public TransportDeleteAction(Settings settings, TransportService transportServic ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) { super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.INDEX, + actionFilters, indexNameExpressionResolver, DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.WRITE, bulkAction, shardBulkAction); } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java index ec6d0902ac98a..21bb452430e7a 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -19,11 +19,14 @@ package org.elasticsearch.action.fieldcaps; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; @@ -36,6 +39,13 @@ * Describes the capabilities of a field optionally merged across multiple indices. */ public class FieldCapabilities implements Writeable, ToXContentObject { + private static final ParseField TYPE_FIELD = new ParseField("type"); + private static final ParseField SEARCHABLE_FIELD = new ParseField("searchable"); + private static final ParseField AGGREGATABLE_FIELD = new ParseField("aggregatable"); + private static final ParseField INDICES_FIELD = new ParseField("indices"); + private static final ParseField NON_SEARCHABLE_INDICES_FIELD = new ParseField("non_searchable_indices"); + private static final ParseField NON_AGGREGATABLE_INDICES_FIELD = new ParseField("non_aggregatable_indices"); + private final String name; private final String type; private final boolean isSearchable; @@ -52,7 +62,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject { * @param isSearchable Whether this field is indexed for search. * @param isAggregatable Whether this field can be aggregated on. */ - FieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable) { + public FieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable) { this(name, type, isSearchable, isAggregatable, null, null, null); } @@ -69,7 +79,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject { * @param nonAggregatableIndices The list of indices where this field is not aggregatable, * or null if the field is aggregatable in all indices. */ - FieldCapabilities(String name, String type, + public FieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable, String[] indices, String[] nonSearchableIndices, @@ -83,7 +93,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject { this.nonAggregatableIndices = nonAggregatableIndices; } - FieldCapabilities(StreamInput in) throws IOException { + public FieldCapabilities(StreamInput in) throws IOException { this.name = in.readString(); this.type = in.readString(); this.isSearchable = in.readBoolean(); @@ -107,22 +117,47 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("type", type); - builder.field("searchable", isSearchable); - builder.field("aggregatable", isAggregatable); + builder.field(TYPE_FIELD.getPreferredName(), type); + builder.field(SEARCHABLE_FIELD.getPreferredName(), isSearchable); + builder.field(AGGREGATABLE_FIELD.getPreferredName(), isAggregatable); if (indices != null) { - builder.field("indices", indices); + builder.field(INDICES_FIELD.getPreferredName(), indices); } if (nonSearchableIndices != null) { - builder.field("non_searchable_indices", nonSearchableIndices); + builder.field(NON_SEARCHABLE_INDICES_FIELD.getPreferredName(), nonSearchableIndices); } if (nonAggregatableIndices != null) { - builder.field("non_aggregatable_indices", nonAggregatableIndices); + builder.field(NON_AGGREGATABLE_INDICES_FIELD.getPreferredName(), nonAggregatableIndices); } builder.endObject(); return builder; } + public static FieldCapabilities fromXContent(String name, XContentParser parser) throws IOException { + return PARSER.parse(parser, name); + } + + @SuppressWarnings("unchecked") + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "field_capabilities", + true, + (a, name) -> new FieldCapabilities(name, + (String) a[0], + (boolean) a[1], + (boolean) a[2], + a[3] != null ? ((List) a[3]).toArray(new String[0]) : null, + a[4] != null ? ((List) a[4]).toArray(new String[0]) : null, + a[5] != null ? ((List) a[5]).toArray(new String[0]) : null)); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), TYPE_FIELD); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SEARCHABLE_FIELD); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), AGGREGATABLE_FIELD); + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), INDICES_FIELD); + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_SEARCHABLE_INDICES_FIELD); + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_AGGREGATABLE_INDICES_FIELD); + } + /** * The name of the field. */ diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index b04f882076326..e91d9a703f491 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -61,14 +61,18 @@ public FieldCapabilitiesRequest() {} /** * Returns true iff the results should be merged. + * + * Note that when using the high-level REST client, results are always merged (this flag is always considered 'true'). */ boolean isMergeResults() { return mergeResults; } /** - * if set to true the response will contain only a merged view of the per index field capabilities. Otherwise only - * unmerged per index field capabilities are returned. + * If set to true the response will contain only a merged view of the per index field capabilities. + * Otherwise only unmerged per index field capabilities are returned. + * + * Note that when using the high-level REST client, results are always merged (this flag is always considered 'true'). */ void setMergeResults(boolean mergeResults) { this.mergeResults = mergeResults; @@ -158,17 +162,17 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; FieldCapabilitiesRequest that = (FieldCapabilitiesRequest) o; - - if (!Arrays.equals(indices, that.indices)) return false; - if (!indicesOptions.equals(that.indicesOptions)) return false; - return Arrays.equals(fields, that.fields); + return Arrays.equals(indices, that.indices) && + Objects.equals(indicesOptions, that.indicesOptions) && + Arrays.equals(fields, that.fields) && + Objects.equals(mergeResults, that.mergeResults); } @Override public int hashCode() { - int result = Arrays.hashCode(indices); - result = 31 * result + indicesOptions.hashCode(); - result = 31 * result + Arrays.hashCode(fields); - return result; + return Objects.hash(Arrays.hashCode(indices), + indicesOptions, + Arrays.hashCode(fields), + mergeResults); } } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java index 4b1bcf575899f..959b4e572b714 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -21,20 +21,29 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; import java.io.IOException; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; /** * Response for {@link FieldCapabilitiesRequest} requests. */ -public class FieldCapabilitiesResponse extends ActionResponse implements ToXContentFragment { +public class FieldCapabilitiesResponse extends ActionResponse implements ToXContentObject { + private static final ParseField FIELDS_FIELD = new ParseField("fields"); + private Map> responseMap; private List indexResponses; @@ -114,8 +123,41 @@ private static void writeField(StreamOutput out, @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("fields", responseMap); - return builder; + return builder.startObject() + .field(FIELDS_FIELD.getPreferredName(), responseMap) + .endObject(); + } + + public static FieldCapabilitiesResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("field_capabilities_response", true, + a -> new FieldCapabilitiesResponse( + ((List>>) a[0]).stream() + .collect(Collectors.toMap(Tuple::v1, Tuple::v2)))); + + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> { + Map typeToCapabilities = parseTypeToCapabilities(p, n); + return new Tuple<>(n, typeToCapabilities); + }, FIELDS_FIELD); + } + + private static Map parseTypeToCapabilities(XContentParser parser, String name) throws IOException { + Map typeToCapabilities = new HashMap<>(); + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); + String type = parser.currentName(); + FieldCapabilities capabilities = FieldCapabilities.fromXContent(name, parser); + typeToCapabilities.put(type, capabilities); + } + return typeToCapabilities; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 88a210c718019..4d5797971ca08 100644 --- a/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -54,7 +54,7 @@ public TransportIndexAction(Settings settings, TransportService transportService ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) { super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX, + actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.WRITE, bulkAction, shardBulkAction); } diff --git a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java index a8bd1acbe4363..6cae1056a4b7b 100644 --- a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java @@ -107,6 +107,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("cluster_uuid", clusterUuid); builder.startObject("version") .field("number", version.toString()) + .field("build_flavor", build.flavor().displayName()) + .field("build_type", build.type().displayName()) .field("build_hash", build.shortHash()) .field("build_date", build.date()) .field("build_snapshot", build.isSnapshot()) @@ -128,8 +130,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws PARSER.declareString((response, value) -> response.clusterUuid = value, new ParseField("cluster_uuid")); PARSER.declareString((response, value) -> {}, new ParseField("tagline")); PARSER.declareObject((response, value) -> { - response.build = new Build((String) value.get("build_hash"), (String) value.get("build_date"), - (boolean) value.get("build_snapshot")); + final String buildFlavor = (String) value.get("build_flavor"); + final String buildType = (String) value.get("build_type"); + response.build = + new Build( + buildFlavor == null ? Build.Flavor.UNKNOWN : Build.Flavor.fromDisplayName(buildFlavor), + buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType), + (String) value.get("build_hash"), + (String) value.get("build_date"), + (boolean) value.get("build_snapshot")); response.version = Version.fromString((String) value.get("number")); }, (parser, context) -> parser.map(), new ParseField("version")); } diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index 4e7c66afdcaf0..c182fb24ffb11 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -60,7 +60,7 @@ public TransportResyncReplicationAction(Settings settings, TransportService tran ShardStateAction shardStateAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, ResyncReplicationRequest::new, ResyncReplicationRequest::new, ThreadPool.Names.BULK); + indexNameExpressionResolver, ResyncReplicationRequest::new, ResyncReplicationRequest::new, ThreadPool.Names.WRITE); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index aad2638bd9de3..91aec1171dcd6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -37,8 +37,10 @@ import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.transport.Transport; +import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -62,6 +64,7 @@ abstract class AbstractSearchAsyncAction exten private final long clusterStateVersion; private final Map aliasFilter; private final Map concreteIndexBoosts; + private final Map> indexRoutings; private final SetOnce> shardFailures = new SetOnce<>(); private final Object shardFailuresMutex = new Object(); private final AtomicInteger successfulOps = new AtomicInteger(); @@ -72,6 +75,7 @@ abstract class AbstractSearchAsyncAction exten protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportService searchTransportService, BiFunction nodeIdToConnection, Map aliasFilter, Map concreteIndexBoosts, + Map> indexRoutings, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion, @@ -89,6 +93,7 @@ protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportS this.clusterStateVersion = clusterStateVersion; this.concreteIndexBoosts = concreteIndexBoosts; this.aliasFilter = aliasFilter; + this.indexRoutings = indexRoutings; this.results = resultConsumer; this.clusters = clusters; } @@ -128,17 +133,17 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha onPhaseFailure(currentPhase, "all shards failed", cause); } else { Boolean allowPartialResults = request.allowPartialSearchResults(); - assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; + assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; if (allowPartialResults == false && shardFailures.get() != null ){ if (logger.isDebugEnabled()) { final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); Throwable cause = shardSearchFailures.length == 0 ? null : ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; - logger.debug(() -> new ParameterizedMessage("{} shards failed for phase: [{}]", + logger.debug(() -> new ParameterizedMessage("{} shards failed for phase: [{}]", shardSearchFailures.length, getName()), cause); } - onPhaseFailure(currentPhase, "Partial shards failure", null); - } else { + onPhaseFailure(currentPhase, "Partial shards failure", null); + } else { if (logger.isTraceEnabled()) { final String resultsFrom = results.getSuccessfulResults() .map(r -> r.getSearchShardTarget().toString()).collect(Collectors.joining(",")); @@ -271,14 +276,14 @@ public final SearchRequest getRequest() { @Override public final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) { - + ShardSearchFailure[] failures = buildShardFailures(); Boolean allowPartialResults = request.allowPartialSearchResults(); assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; if (allowPartialResults == false && failures.length > 0){ - raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures)); - } - + raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures)); + } + return new SearchResponse(internalSearchResponse, scrollId, getNumShards(), successfulOps.get(), skippedOps.get(), buildTookInMillis(), failures, clusters); } @@ -318,8 +323,11 @@ public final ShardSearchTransportRequest buildShardSearchRequest(SearchShardIter AliasFilter filter = aliasFilter.get(shardIt.shardId().getIndex().getUUID()); assert filter != null; float indexBoost = concreteIndexBoosts.getOrDefault(shardIt.shardId().getIndex().getUUID(), DEFAULT_INDEX_BOOST); + String indexName = shardIt.shardId().getIndex().getName(); + final String[] routings = indexRoutings.getOrDefault(indexName, Collections.emptySet()) + .toArray(new String[0]); return new ShardSearchTransportRequest(shardIt.getOriginalIndices(), request, shardIt.shardId(), getNumShards(), - filter, indexBoost, timeProvider.getAbsoluteStartMillis(), clusterAlias); + filter, indexBoost, timeProvider.getAbsoluteStartMillis(), clusterAlias, routings); } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index fe42d50393635..0873ff40f7500 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -27,6 +27,7 @@ import org.elasticsearch.transport.Transport; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; import java.util.function.BiFunction; import java.util.function.Function; @@ -47,6 +48,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction nodeIdToConnection, Map aliasFilter, Map concreteIndexBoosts, + Map> indexRoutings, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion, @@ -56,9 +58,9 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction 0) { int maxConcurrentShardRequests = Math.min(this.maxConcurrentShardRequests, shardsIts.size()); final boolean success = shardExecutionIndex.compareAndSet(0, maxConcurrentShardRequests); - assert success; + assert success; assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults"; if (request.allowPartialSearchResults() == false) { final StringBuilder missingShards = new StringBuilder(); @@ -140,7 +140,7 @@ public final void run() throws IOException { final SearchShardIterator shardRoutings = shardsIts.get(index); if (shardRoutings.size() == 0) { if(missingShards.length() >0 ){ - missingShards.append(", "); + missingShards.append(", "); } missingShards.append(shardRoutings.shardId()); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 9bcbe1c8e6760..0782fbb310b65 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.transport.Transport; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; import java.util.function.BiFunction; @@ -37,11 +38,13 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction SearchDfsQueryThenFetchAsyncAction(final Logger logger, final SearchTransportService searchTransportService, final BiFunction nodeIdToConnection, final Map aliasFilter, - final Map concreteIndexBoosts, final SearchPhaseController searchPhaseController, final Executor executor, + final Map concreteIndexBoosts, final Map> indexRoutings, + final SearchPhaseController searchPhaseController, final Executor executor, final SearchRequest request, final ActionListener listener, final GroupShardsIterator shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider, final long clusterStateVersion, final SearchTask task, SearchResponse.Clusters clusters) { - super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener, + super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, indexRoutings, + executor, request, listener, shardsIts, timeProvider, clusterStateVersion, task, new ArraySearchPhaseResults<>(shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters); this.searchPhaseController = searchPhaseController; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index b7669312b0088..bbd84011de00b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.transport.Transport; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; import java.util.function.BiFunction; @@ -37,13 +38,14 @@ final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction nodeIdToConnection, final Map aliasFilter, - final Map concreteIndexBoosts, final SearchPhaseController searchPhaseController, final Executor executor, + final Map concreteIndexBoosts, final Map> indexRoutings, + final SearchPhaseController searchPhaseController, final Executor executor, final SearchRequest request, final ActionListener listener, final GroupShardsIterator shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion, SearchTask task, SearchResponse.Clusters clusters) { - super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener, - shardsIts, timeProvider, clusterStateVersion, task, searchPhaseController.newSearchPhaseResults(request, shardsIts.size()), - request.getMaxConcurrentShardRequests(), clusters); + super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, indexRoutings, + executor, request, listener, shardsIts, timeProvider, clusterStateVersion, task, + searchPhaseController.newSearchPhaseResults(request, shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters); this.searchPhaseController = searchPhaseController; } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index bd533ce7b097a..6b39af478f432 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -297,6 +297,7 @@ private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, Sea Map aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap); Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices()); + routingMap = routingMap == null ? Collections.emptyMap() : Collections.unmodifiableMap(routingMap); String[] concreteIndices = new String[indices.length]; for (int i = 0; i < indices.length; i++) { concreteIndices[i] = indices[i].getName(); @@ -350,7 +351,7 @@ private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, Sea } boolean preFilterSearchShards = shouldPreFilterSearchShards(searchRequest, shardIterators); searchAsyncAction(task, searchRequest, shardIterators, timeProvider, connectionLookup, clusterState.version(), - Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener, preFilterSearchShards, clusters).start(); + Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, routingMap, listener, preFilterSearchShards, clusters).start(); } private boolean shouldPreFilterSearchShards(SearchRequest searchRequest, GroupShardsIterator shardIterators) { @@ -380,17 +381,20 @@ private AbstractSearchAsyncAction searchAsyncAction(SearchTask task, SearchReque GroupShardsIterator shardIterators, SearchTimeProvider timeProvider, BiFunction connectionLookup, - long clusterStateVersion, Map aliasFilter, + long clusterStateVersion, + Map aliasFilter, Map concreteIndexBoosts, - ActionListener listener, boolean preFilter, + Map> indexRoutings, + ActionListener listener, + boolean preFilter, SearchResponse.Clusters clusters) { Executor executor = threadPool.executor(ThreadPool.Names.SEARCH); if (preFilter) { return new CanMatchPreFilterSearchPhase(logger, searchTransportService, connectionLookup, - aliasFilter, concreteIndexBoosts, executor, searchRequest, listener, shardIterators, + aliasFilter, concreteIndexBoosts, indexRoutings, executor, searchRequest, listener, shardIterators, timeProvider, clusterStateVersion, task, (iter) -> { AbstractSearchAsyncAction action = searchAsyncAction(task, searchRequest, iter, timeProvider, connectionLookup, - clusterStateVersion, aliasFilter, concreteIndexBoosts, listener, false, clusters); + clusterStateVersion, aliasFilter, concreteIndexBoosts, indexRoutings, listener, false, clusters); return new SearchPhase(action.getName()) { @Override public void run() throws IOException { @@ -403,14 +407,14 @@ public void run() throws IOException { switch (searchRequest.searchType()) { case DFS_QUERY_THEN_FETCH: searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup, - aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, - timeProvider, clusterStateVersion, task, clusters); + aliasFilter, concreteIndexBoosts, indexRoutings, searchPhaseController, executor, searchRequest, listener, + shardIterators, timeProvider, clusterStateVersion, task, clusters); break; case QUERY_AND_FETCH: case QUERY_THEN_FETCH: searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup, - aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, - timeProvider, clusterStateVersion, task, clusters); + aliasFilter, concreteIndexBoosts, indexRoutings, searchPhaseController, executor, searchRequest, listener, + shardIterators, timeProvider, clusterStateVersion, task, clusters); break; default: throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]"); diff --git a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 242dfe635ec91..91911129dfac7 100644 --- a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -86,7 +86,7 @@ public TransportUpdateAction(Settings settings, ThreadPool threadPool, ClusterSe @Override protected String executor() { - return ThreadPool.Names.INDEX; + return ThreadPool.Names.WRITE; } @Override diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 79e5e87251a90..870c537e020c5 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -162,7 +162,7 @@ private void setup(boolean addShutdownHook, Environment environment) throws Boot Settings settings = environment.settings(); try { - spawner.spawnNativePluginControllers(environment); + spawner.spawnNativeControllers(environment); } catch (IOException e) { throw new BootstrapException(e); } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index a0646288b1ad0..6296b611c15d9 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -38,6 +38,7 @@ import java.nio.file.Path; import java.security.Permission; import java.util.Arrays; +import java.util.Locale; /** * This class starts elasticsearch. @@ -98,9 +99,16 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th throw new UserException(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments()); } if (options.has(versionOption)) { - terminal.println("Version: " + Version.displayVersion(Version.CURRENT, Build.CURRENT.isSnapshot()) - + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() - + ", JVM: " + JvmInfo.jvmInfo().version()); + final String versionOutput = String.format( + Locale.ROOT, + "Version: %s, Build: %s/%s/%s/%s, JVM: %s", + Version.displayVersion(Version.CURRENT, Build.CURRENT.isSnapshot()), + Build.CURRENT.flavor().displayName(), + Build.CURRENT.type().displayName(), + Build.CURRENT.shortHash(), + Build.CURRENT.date(), + JvmInfo.jvmInfo().version()); + terminal.println(versionOutput); return; } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Security.java b/server/src/main/java/org/elasticsearch/bootstrap/Security.java index 9f2790c94cce6..5b4f352ded945 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -328,7 +328,6 @@ static void addFilePermissions(Permissions policy, Environment environment) thro private static void addBindPermissions(Permissions policy, Settings settings) { addSocketPermissionForHttp(policy, settings); addSocketPermissionForTransportProfiles(policy, settings); - addSocketPermissionForTribeNodes(policy, settings); } /** @@ -374,16 +373,6 @@ private static void addSocketPermissionForTransport(final Permissions policy, fi addSocketPermissionForPortRange(policy, transportRange); } - private static void addSocketPermissionForTribeNodes(final Permissions policy, final Settings settings) { - for (final Settings tribeNodeSettings : settings.getGroups("tribe", true).values()) { - // tribe nodes have HTTP disabled by default, so we check if HTTP is enabled before granting - if (NetworkModule.HTTP_ENABLED.exists(tribeNodeSettings) && NetworkModule.HTTP_ENABLED.get(tribeNodeSettings)) { - addSocketPermissionForHttp(policy, tribeNodeSettings); - } - addSocketPermissionForTransport(policy, tribeNodeSettings); - } - } - /** * Add dynamic {@link SocketPermission} for the specified port range. * diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java b/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java index c0705d9f863ba..f1c6c36dc5cca 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Spawner.java @@ -37,8 +37,7 @@ import java.util.concurrent.atomic.AtomicBoolean; /** - * Spawns native plugin controller processes if present. Will only work prior to a system call - * filter being installed. + * Spawns native module controller processes if present. Will only work prior to a system call filter being installed. */ final class Spawner implements Closeable { @@ -54,55 +53,46 @@ public void close() throws IOException { } /** - * Spawns the native controllers for each plugin + * Spawns the native controllers for each module. * * @param environment the node environment - * @throws IOException if an I/O error occurs reading the plugins or spawning a native process + * @throws IOException if an I/O error occurs reading the module or spawning a native process */ - void spawnNativePluginControllers(final Environment environment) throws IOException { + void spawnNativeControllers(final Environment environment) throws IOException { if (!spawned.compareAndSet(false, true)) { throw new IllegalStateException("native controllers already spawned"); } - spawnControllers(environment.pluginsFile(), "plugins", environment.tmpFile()); - spawnControllers(environment.modulesFile(), "modules", environment.tmpFile()); - } - - /** Spawn controllers in plugins found within the given directory. */ - private void spawnControllers(Path pluginsDir, String type, Path tmpDir) throws IOException { - if (!Files.exists(pluginsDir)) { - throw new IllegalStateException(type + " directory [" + pluginsDir + "] not found"); + if (!Files.exists(environment.modulesFile())) { + throw new IllegalStateException("modules directory [" + environment.modulesFile() + "] not found"); } /* - * For each plugin, attempt to spawn the controller daemon. Silently ignore any plugin that - * don't include a controller for the correct platform. + * For each module, attempt to spawn the controller daemon. Silently ignore any module that doesn't include a controller for the + * correct platform. */ - List paths = PluginsService.findPluginDirs(pluginsDir); - for (Path plugin : paths) { - final PluginInfo info = PluginInfo.readFromProperties(plugin); - final Path spawnPath = Platforms.nativeControllerPath(plugin); + List paths = PluginsService.findPluginDirs(environment.modulesFile()); + for (final Path modules : paths) { + final PluginInfo info = PluginInfo.readFromProperties(modules); + final Path spawnPath = Platforms.nativeControllerPath(modules); if (!Files.isRegularFile(spawnPath)) { continue; } if (!info.hasNativeController()) { final String message = String.format( Locale.ROOT, - "plugin [%s] does not have permission to fork native controller", - plugin.getFileName()); + "module [%s] does not have permission to fork native controller", + modules.getFileName()); throw new IllegalArgumentException(message); } - final Process process = spawnNativePluginController(spawnPath, tmpDir); + final Process process = spawnNativeController(spawnPath, environment.tmpFile()); processes.add(process); } } /** - * Attempt to spawn the controller daemon for a given plugin. The spawned process will remain - * connected to this JVM via its stdin, stdout, and stderr streams, but the references to these - * streams are not available to code outside this package. + * Attempt to spawn the controller daemon for a given module. The spawned process will remain connected to this JVM via its stdin, + * stdout, and stderr streams, but the references to these streams are not available to code outside this package. */ - private Process spawnNativePluginController( - final Path spawnPath, - final Path tmpPath) throws IOException { + private Process spawnNativeController(final Path spawnPath, final Path tmpPath) throws IOException { final String command; if (Constants.WINDOWS) { /* diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 41120115c792e..690cd1fbe5a3f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -19,11 +19,9 @@ package org.elasticsearch.cluster.metadata; -import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; @@ -57,7 +55,6 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -78,12 +75,11 @@ import org.joda.time.DateTime; import org.joda.time.DateTimeZone; -import java.io.IOException; import java.io.UnsupportedEncodingException; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -475,9 +471,11 @@ public ClusterState execute(ClusterState currentState) throws Exception { // now, update the mappings with the actual source Map mappingsMetaData = new HashMap<>(); - for (DocumentMapper mapper : mapperService.docMappers(true)) { - MappingMetaData mappingMd = new MappingMetaData(mapper); - mappingsMetaData.put(mapper.type(), mappingMd); + for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { + if (mapper != null) { + MappingMetaData mappingMd = new MappingMetaData(mapper); + mappingsMetaData.put(mapper.type(), mappingMd); + } } final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index()) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index fbeca652a97ff..b8e898cf6f5e3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -19,9 +19,7 @@ package org.elasticsearch.cluster.metadata; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; @@ -49,6 +47,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -175,10 +174,13 @@ private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Bui String index = indexService.index().getName(); try { List updatedTypes = new ArrayList<>(); - for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) { - final String type = mapper.type(); - if (!mapper.mappingSource().equals(builder.mapping(type).source())) { - updatedTypes.add(type); + MapperService mapperService = indexService.mapperService(); + for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { + if (mapper != null) { + final String type = mapper.type(); + if (!mapper.mappingSource().equals(builder.mapping(type).source())) { + updatedTypes.add(type); + } } } @@ -186,8 +188,10 @@ private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Bui if (updatedTypes.isEmpty() == false) { logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes); dirty = true; - for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) { - builder.putMapping(new MappingMetaData(mapper)); + for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { + if (mapper != null) { + builder.putMapping(new MappingMetaData(mapper)); + } } } } catch (Exception e) { @@ -320,8 +324,10 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData); // Mapping updates on a single type may have side-effects on other types so we need to // update mapping metadata on all types - for (DocumentMapper mapper : mapperService.docMappers(true)) { - indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource())); + for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { + if (mapper != null) { + indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource())); + } } builder.put(indexMetaDataBuilder); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java b/server/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java index 6cb1989a8dd02..e9a99b7b456c4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java @@ -24,7 +24,7 @@ /** * A simple {@link ShardsIterator} that iterates a list or sub-list of - * {@link ShardRouting shard routings}. + * {@link ShardRouting shard indexRoutings}. */ public class PlainShardsIterator implements ShardsIterator { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index be1213ad134f1..6a9a105b6c432 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -38,7 +38,7 @@ /** * {@link ShardRouting} immutably encapsulates information about shard - * routings like id, state, version, etc. + * indexRoutings like id, state, version, etc. */ public final class ShardRouting implements Writeable, ToXContentObject { @@ -477,7 +477,7 @@ public boolean isRelocationTargetOf(ShardRouting other) { "ShardRouting is a relocation target but current node id isn't equal to source relocating node. This [" + this + "], other [" + other + "]"; assert b == false || this.shardId.equals(other.shardId) : - "ShardRouting is a relocation target but both routings are not of the same shard id. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation target but both indexRoutings are not of the same shard id. This [" + this + "], other [" + other + "]"; assert b == false || this.primary == other.primary : "ShardRouting is a relocation target but primary flag is different. This [" + this + "], target [" + other + "]"; @@ -504,7 +504,7 @@ public boolean isRelocationSourceOf(ShardRouting other) { "ShardRouting is a relocation source but relocating node isn't equal to other's current node. This [" + this + "], other [" + other + "]"; assert b == false || this.shardId.equals(other.shardId) : - "ShardRouting is a relocation source but both routings are not of the same shard. This [" + this + "], target [" + other + "]"; + "ShardRouting is a relocation source but both indexRoutings are not of the same shard. This [" + this + "], target [" + other + "]"; assert b == false || this.primary == other.primary : "ShardRouting is a relocation source but primary flag is different. This [" + this + "], target [" + other + "]"; diff --git a/server/src/main/java/org/elasticsearch/common/cache/Cache.java b/server/src/main/java/org/elasticsearch/common/cache/Cache.java index 91d011ba03cad..620612619104b 100644 --- a/server/src/main/java/org/elasticsearch/common/cache/Cache.java +++ b/server/src/main/java/org/elasticsearch/common/cache/Cache.java @@ -580,7 +580,8 @@ public void remove() { /** * An LRU sequencing of the values in the cache. This sequence is not protected from mutations - * to the cache. The result of iteration under mutation is undefined. + * to the cache (except for {@link Iterator#remove()}. The result of iteration under any other mutation is + * undefined. * * @return an LRU-ordered {@link Iterable} over the values in the cache */ @@ -597,6 +598,11 @@ public boolean hasNext() { public V next() { return iterator.next().value; } + + @Override + public void remove() { + iterator.remove(); + } }; } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index ce0098ea9722f..57e87e06389c4 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -24,9 +24,14 @@ import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.util.SloppyMath; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.DistanceUnit; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.GeoPointValues; @@ -36,6 +41,7 @@ import org.elasticsearch.index.fielddata.SortingNumericDoubleValues; import java.io.IOException; +import java.io.InputStream; public class GeoUtils { @@ -351,6 +357,36 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) thro return parseGeoPoint(parser, point, false); } + /** + * Parses the value as a geopoint. The following types of values are supported: + *

+ * Object: has to contain either lat and lon or geohash fields + *

+ * String: expected to be in "latitude, longitude" format or a geohash + *

+ * Array: two or more elements, the first element is longitude, the second is latitude, the rest is ignored if ignoreZValue is true + */ + public static GeoPoint parseGeoPoint(Object value, final boolean ignoreZValue) throws ElasticsearchParseException { + try { + XContentBuilder content = JsonXContent.contentBuilder(); + content.startObject(); + content.field("null_value", value); + content.endObject(); + + try (InputStream stream = BytesReference.bytes(content).streamInput(); + XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // field value + return parseGeoPoint(parser, new GeoPoint(), ignoreZValue); + } + + } catch (IOException ex) { + throw new ElasticsearchParseException("error parsing geopoint", ex); + } + } + /** * Parse a {@link GeoPoint} with a {@link XContentParser}. A geopoint has one of the following forms: * diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java index 2b9cde65d64cc..15332d4317f34 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -71,7 +71,8 @@ public final class NetworkModule { Property.NodeScope); public static final Setting HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope); public static final Setting HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope); - public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope); + public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, + Property.NodeScope, Property.Deprecated); public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope); private final Settings settings; diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index db724112574a2..585406d01a6f6 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -697,8 +697,7 @@ private void maybeFSyncTranslogs() { if (indexSettings.getTranslogDurability() == Translog.Durability.ASYNC) { for (IndexShard shard : this.shards.values()) { try { - Translog translog = shard.getTranslog(); - if (translog.syncNeeded()) { + if (shard.isSyncNeeded()) { shard.sync(); } } catch (AlreadyClosedException ex) { diff --git a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java index f8b9d9d2ef805..e06dc5d2e8156 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java +++ b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java @@ -121,7 +121,8 @@ private static class FieldDataWarmer implements IndexWarmer.Listener { public TerminationHandle warmReader(final IndexShard indexShard, final Engine.Searcher searcher) { final MapperService mapperService = indexShard.mapperService(); final Map warmUpGlobalOrdinals = new HashMap<>(); - for (DocumentMapper docMapper : mapperService.docMappers(false)) { + DocumentMapper docMapper = mapperService.documentMapper(); + if (docMapper != null) { for (FieldMapper fieldMapper : docMapper.mappers()) { final MappedFieldType fieldType = fieldMapper.fieldType(); final String indexName = fieldType.name(); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredCharFilter.java b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredCharFilter.java index a979e9e34fe4e..84eb0c4c3498c 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredCharFilter.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredCharFilter.java @@ -40,6 +40,15 @@ public static PreConfiguredCharFilter singleton(String name, boolean useFilterFo (reader, version) -> create.apply(reader)); } + /** + * Create a pre-configured char filter that may not vary at all, provide access to the elasticsearch verison + */ + public static PreConfiguredCharFilter singletonWithVersion(String name, boolean useFilterForMultitermQueries, + BiFunction create) { + return new PreConfiguredCharFilter(name, CachingStrategy.ONE, useFilterForMultitermQueries, + (reader, version) -> create.apply(reader, version)); + } + /** * Create a pre-configured token filter that may vary based on the Lucene version. */ diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index a59af29036b7d..95e5af0afccac 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -233,7 +233,8 @@ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, fin boolean hasNested = false; final Set warmUp = new HashSet<>(); final MapperService mapperService = indexShard.mapperService(); - for (DocumentMapper docMapper : mapperService.docMappers(false)) { + DocumentMapper docMapper = mapperService.documentMapper(); + if (docMapper != null) { if (docMapper.hasNestedObjects()) { hasNested = true; for (ObjectMapper objectMapper : docMapper.objectMappers().values()) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java index d2b2e24c616a8..9f094197b8d9c 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java @@ -23,7 +23,7 @@ /** Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion. */ -class DeleteVersionValue extends VersionValue { +final class DeleteVersionValue extends VersionValue { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DeleteVersionValue.class); diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index fab8cba468b56..4c782cb500418 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -66,6 +66,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogStats; import java.io.Closeable; import java.io.FileNotFoundException; @@ -510,8 +511,18 @@ public enum SearcherScope { EXTERNAL, INTERNAL } - /** returns the translog for this engine */ - public abstract Translog getTranslog(); + /** + * Returns the translog associated with this engine. + * Prefer to keep the translog package-private, so that an engine can control all accesses to the translog. + */ + abstract Translog getTranslog(); + + /** + * Checks if the underlying storage sync is required. + */ + public boolean isTranslogSyncNeeded() { + return getTranslog().syncNeeded(); + } /** * Ensures that all locations in the given stream have been written to the underlying storage. @@ -520,6 +531,36 @@ public enum SearcherScope { public abstract void syncTranslog() throws IOException; + public Closeable acquireTranslogRetentionLock() { + return getTranslog().acquireRetentionLock(); + } + + /** + * Creates a new translog snapshot from this engine for reading translog operations whose seq# at least the provided seq#. + * The caller has to close the returned snapshot after finishing the reading. + */ + public Translog.Snapshot newTranslogSnapshotFromMinSeqNo(long minSeqNo) throws IOException { + return getTranslog().newSnapshotFromMinSeqNo(minSeqNo); + } + + /** + * Returns the estimated number of translog operations in this engine whose seq# at least the provided seq#. + */ + public int estimateTranslogOperationsFromMinSeq(long minSeqNo) { + return getTranslog().estimateTotalOperationsFromMinSeq(minSeqNo); + } + + public TranslogStats getTranslogStats() { + return getTranslog().stats(); + } + + /** + * Returns the last location that the translog of this engine has written into. + */ + public Translog.Location getTranslogLastWriteLocation() { + return getTranslog().getLastWriteLocation(); + } + protected final void ensureOpen(Exception suppressed) { if (isClosed.get()) { AlreadyClosedException ace = new AlreadyClosedException(shardId + " engine is closed", failedEngine.get()); @@ -546,6 +587,13 @@ public CommitStats commitStats() { */ public abstract LocalCheckpointTracker getLocalCheckpointTracker(); + /** + * Returns the latest global checkpoint value that has been persisted in the underlying storage (i.e. translog's checkpoint) + */ + public long getLastSyncedGlobalCheckpoint() { + return getTranslog().getLastSyncedGlobalCheckpoint(); + } + /** * Global stats on segments. */ @@ -810,6 +858,16 @@ public final boolean refreshNeeded() { */ public abstract void trimTranslog() throws EngineException; + /** + * Tests whether or not the translog generation should be rolled to a new generation. + * This test is based on the size of the current generation compared to the configured generation threshold size. + * + * @return {@code true} if the current generation should be rolled to a new generation + */ + public boolean shouldRollTranslogGeneration() { + return getTranslog().shouldRollGeneration(); + } + /** * Rolls the translog generation and cleans unneeded. */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java similarity index 83% rename from server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java rename to server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java index 67415ea6139a6..a658c84c16bbd 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/IndexVersionValue.java @@ -24,20 +24,20 @@ import java.util.Objects; -final class TranslogVersionValue extends VersionValue { +final class IndexVersionValue extends VersionValue { - private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(TranslogVersionValue.class); + private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IndexVersionValue.class); private final Translog.Location translogLocation; - TranslogVersionValue(Translog.Location translogLocation, long version, long seqNo, long term) { + IndexVersionValue(Translog.Location translogLocation, long version, long seqNo, long term) { super(version, seqNo, term); this.translogLocation = translogLocation; } @Override public long ramBytesUsed() { - return RAM_BYTES_USED; + return RAM_BYTES_USED + RamUsageEstimator.shallowSizeOf(translogLocation); } @Override @@ -45,7 +45,7 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; - TranslogVersionValue that = (TranslogVersionValue) o; + IndexVersionValue that = (IndexVersionValue) o; return Objects.equals(translogLocation, that.translogLocation); } @@ -56,7 +56,7 @@ public int hashCode() { @Override public String toString() { - return "TranslogVersionValue{" + + return "IndexVersionValue{" + "version=" + version + ", seqNo=" + seqNo + ", term=" + term + diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index dcd1ba65d8950..f89595c1c23a2 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -422,7 +422,7 @@ private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy } @Override - public Translog getTranslog() { + Translog getTranslog() { ensureOpen(); return translog; } @@ -623,7 +623,7 @@ private VersionValue resolveDocVersion(final Operation op) throws IOException { assert incrementIndexVersionLookup(); // used for asserting in tests final long currentVersion = loadCurrentVersionFromIndex(op.uid()); if (currentVersion != Versions.NOT_FOUND) { - versionValue = new VersionValue(currentVersion, SequenceNumbers.UNASSIGNED_SEQ_NO, 0L); + versionValue = new IndexVersionValue(null, currentVersion, SequenceNumbers.UNASSIGNED_SEQ_NO, 0L); } } else if (engineConfig.isEnableGcDeletes() && versionValue.isDelete() && (engineConfig.getThreadPool().relativeTimeInMillis() - ((DeleteVersionValue)versionValue).time) > getGcDeletesInMillis()) { @@ -785,8 +785,9 @@ public IndexResult index(Index index) throws IOException { indexResult.setTranslogLocation(location); } if (plan.indexIntoLucene && indexResult.hasFailure() == false) { - versionMap.maybePutUnderLock(index.uid().bytes(), - getVersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), indexResult.getTranslogLocation())); + final Translog.Location translogLocation = trackTranslogLocation.get() ? indexResult.getTranslogLocation() : null; + versionMap.maybePutIndexUnderLock(index.uid().bytes(), + new IndexVersionValue(translogLocation, plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm())); } if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { localCheckpointTracker.markSeqNoAsCompleted(indexResult.getSeqNo()); @@ -937,13 +938,6 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) } } - private VersionValue getVersionValue(long version, long seqNo, long term, Translog.Location location) { - if (location != null && trackTranslogLocation.get()) { - return new TranslogVersionValue(location, version, seqNo, term); - } - return new VersionValue(version, seqNo, term); - } - /** * returns true if the indexing operation may have already be processed by this engine. * Note that it is OK to rarely return true even if this is not the case. However a `false` @@ -1193,7 +1187,7 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) indexWriter.deleteDocuments(delete.uid()); numDocDeletes.inc(); } - versionMap.putUnderLock(delete.uid().bytes(), + versionMap.putDeleteUnderLock(delete.uid().bytes(), new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), engineConfig.getThreadPool().relativeTimeInMillis())); return new DeleteResult( @@ -1275,8 +1269,10 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { final long seqNo = noOp.seqNo(); try { final NoOpResult noOpResult = new NoOpResult(noOp.seqNo()); - final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); - noOpResult.setTranslogLocation(location); + if (noOp.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); + noOpResult.setTranslogLocation(location); + } noOpResult.setTook(System.nanoTime() - noOp.startTime()); noOpResult.freeze(); return noOpResult; diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index 7c5dcfa5c9050..18d3cedb37e60 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -268,7 +268,7 @@ VersionValue getUnderLock(final BytesRef uid) { } private VersionValue getUnderLock(final BytesRef uid, Maps currentMaps) { - assert keyedLock.isHeldByCurrentThread(uid); + assert assertKeyedLockHeldByCurrentThread(uid); // First try to get the "live" value: VersionValue value = currentMaps.current.get(uid); if (value != null) { @@ -306,44 +306,40 @@ boolean isSafeAccessRequired() { /** * Adds this uid/version to the pending adds map iff the map needs safe access. */ - void maybePutUnderLock(BytesRef uid, VersionValue version) { - assert keyedLock.isHeldByCurrentThread(uid); + void maybePutIndexUnderLock(BytesRef uid, IndexVersionValue version) { + assert assertKeyedLockHeldByCurrentThread(uid); Maps maps = this.maps; if (maps.isSafeAccessMode()) { - putUnderLock(uid, version, maps); + putIndexUnderLock(uid, version); } else { + // Even though we don't store a record of the indexing operation (and mark as unsafe), + // we should still remove any previous delete for this uuid (avoid accidental accesses). + // Not this should not hurt performance because the tombstone is small (or empty) when unsafe is relevant. + removeTombstoneUnderLock(uid); maps.current.markAsUnsafe(); assert putAssertionMap(uid, version); } } - private boolean putAssertionMap(BytesRef uid, VersionValue version) { - putUnderLock(uid, version, unsafeKeysMap); - return true; + void putIndexUnderLock(BytesRef uid, IndexVersionValue version) { + assert assertKeyedLockHeldByCurrentThread(uid); + assert uid.bytes.length == uid.length : "Oversized _uid! UID length: " + uid.length + ", bytes length: " + uid.bytes.length; + maps.put(uid, version); + removeTombstoneUnderLock(uid); } - /** - * Adds this uid/version to the pending adds map. - */ - void putUnderLock(BytesRef uid, VersionValue version) { - Maps maps = this.maps; - putUnderLock(uid, version, maps); + private boolean putAssertionMap(BytesRef uid, IndexVersionValue version) { + assert assertKeyedLockHeldByCurrentThread(uid); + assert uid.bytes.length == uid.length : "Oversized _uid! UID length: " + uid.length + ", bytes length: " + uid.bytes.length; + unsafeKeysMap.put(uid, version); + return true; } - /** - * Adds this uid/version to the pending adds map. - */ - private void putUnderLock(BytesRef uid, VersionValue version, Maps maps) { - assert keyedLock.isHeldByCurrentThread(uid); + void putDeleteUnderLock(BytesRef uid, DeleteVersionValue version) { + assert assertKeyedLockHeldByCurrentThread(uid); assert uid.bytes.length == uid.length : "Oversized _uid! UID length: " + uid.length + ", bytes length: " + uid.bytes.length; - if (version.isDelete() == false) { - maps.put(uid, version); - removeTombstoneUnderLock(uid); - } else { - DeleteVersionValue versionValue = (DeleteVersionValue) version; - putTombstone(uid, versionValue); - maps.remove(uid, versionValue); - } + putTombstone(uid, version); + maps.remove(uid, version); } private void putTombstone(BytesRef uid, DeleteVersionValue version) { @@ -365,7 +361,7 @@ private void putTombstone(BytesRef uid, DeleteVersionValue version) { * Removes this uid from the pending deletes map. */ void removeTombstoneUnderLock(BytesRef uid) { - assert keyedLock.isHeldByCurrentThread(uid); + assert assertKeyedLockHeldByCurrentThread(uid); long uidRAMBytesUsed = BASE_BYTES_PER_BYTESREF + uid.bytes.length; final VersionValue prev = tombstones.remove(uid); if (prev != null) { @@ -465,4 +461,9 @@ Map getAllTombstones() { Releasable acquireLock(BytesRef uid) { return keyedLock.acquire(uid); } + + private boolean assertKeyedLockHeldByCurrentThread(BytesRef uid) { + assert keyedLock.isHeldByCurrentThread(uid) : "Thread [" + Thread.currentThread().getName() + "], uid [" + uid.utf8ToString() + "]"; + return true; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java index d63306486732e..567a7964186ad 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java @@ -27,7 +27,7 @@ import java.util.Collection; import java.util.Collections; -class VersionValue implements Accountable { +abstract class VersionValue implements Accountable { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(VersionValue.class); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index 552ddbf9d616d..3d07a0f87aa5e 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -631,23 +631,9 @@ public String get(int index) { return values[index].get().utf8ToString(); } - public BytesRef getBytesValue() { - if (size() > 0) { - return values[0].get(); - } else { - return null; - } - } - public String getValue() { - BytesRef value = getBytesValue(); - if (value == null) { - return null; - } else { - return value.utf8ToString(); - } + return count == 0 ? null : get(0); } - } public static final class BytesRefs extends BinaryScriptDocValues { @@ -658,14 +644,16 @@ public BytesRefs(SortedBinaryDocValues in) { @Override public BytesRef get(int index) { - return values[index].get(); + /** + * We need to make a copy here because {@link BinaryScriptDocValues} might reuse the + * returned value and the same instance might be used to + * return values from multiple documents. + **/ + return values[index].toBytesRef(); } public BytesRef getValue() { - if (count == 0) { - return new BytesRef(); - } - return values[0].get(); + return count == 0 ? new BytesRef() : get(0); } } diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index ae59c6f507749..021e97767d840 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -252,7 +252,9 @@ public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(_INDEX, index); - builder.field(_TYPE, type); + if (params.paramAsBoolean("include_type_name", true)) { + builder.field(_TYPE, type); + } builder.field(_ID, id); if (isExists()) { if (version != -1) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 10c0db01dc776..0cd200021701e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -60,6 +60,7 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper public static class Names { public static final String IGNORE_MALFORMED = "ignore_malformed"; public static final ParseField IGNORE_Z_VALUE = new ParseField("ignore_z_value"); + public static final String NULL_VALUE = "null_value"; } public static class Defaults { @@ -134,7 +135,7 @@ public Mapper.Builder parse(String name, Map node, ParserContext throws MapperParsingException { Builder builder = new GeoPointFieldMapper.Builder(name); parseField(builder, name, node, parserContext); - + Object nullValue = null; for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); String propName = entry.getKey(); @@ -147,9 +148,31 @@ public Mapper.Builder parse(String name, Map node, ParserContext builder.ignoreZValue(XContentMapValues.nodeBooleanValue(propNode, name + "." + Names.IGNORE_Z_VALUE.getPreferredName())); iterator.remove(); + } else if (propName.equals(Names.NULL_VALUE)) { + if (propNode == null) { + throw new MapperParsingException("Property [null_value] cannot be null."); + } + nullValue = propNode; + iterator.remove(); } } + if (nullValue != null) { + boolean ignoreZValue = builder.ignoreZValue == null ? Defaults.IGNORE_Z_VALUE.value() : builder.ignoreZValue; + boolean ignoreMalformed = builder.ignoreMalformed == null ? Defaults.IGNORE_MALFORMED.value() : builder.ignoreZValue; + GeoPoint point = GeoUtils.parseGeoPoint(nullValue, ignoreZValue); + if (ignoreMalformed == false) { + if (point.lat() > 90.0 || point.lat() < -90.0) { + throw new IllegalArgumentException("illegal latitude value [" + point.lat() + "]"); + } + if (point.lon() > 180.0 || point.lon() < -180) { + throw new IllegalArgumentException("illegal longitude value [" + point.lon() + "]"); + } + } else { + GeoUtils.normalizePoint(point); + } + builder.nullValue(point); + } return builder; } } @@ -318,7 +341,11 @@ public Mapper parse(ParseContext context) throws IOException { } } else if (token == XContentParser.Token.VALUE_STRING) { parse(context, sparse.resetFromString(context.parser().text(), ignoreZValue.value())); - } else if (token != XContentParser.Token.VALUE_NULL) { + } else if (token == XContentParser.Token.VALUE_NULL) { + if (fieldType.nullValue() != null) { + parse(context, (GeoPoint) fieldType.nullValue()); + } + } else { try { parse(context, GeoUtils.parseGeoPoint(context.parser(), sparse)); } catch (ElasticsearchParseException e) { @@ -337,11 +364,15 @@ public Mapper parse(ParseContext context) throws IOException { protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { super.doXContentBody(builder, includeDefaults, params); if (includeDefaults || ignoreMalformed.explicit()) { - builder.field(GeoPointFieldMapper.Names.IGNORE_MALFORMED, ignoreMalformed.value()); + builder.field(Names.IGNORE_MALFORMED, ignoreMalformed.value()); } if (includeDefaults || ignoreZValue.explicit()) { builder.field(Names.IGNORE_Z_VALUE.getPreferredName(), ignoreZValue.value()); } + + if (includeDefaults || fieldType().nullValue() != null) { + builder.field(Names.NULL_VALUE, fieldType().nullValue()); + } } public Explicit ignoreZValue() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 2f027c0fbb998..c72187b649713 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -57,6 +57,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -64,13 +65,12 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.function.Function; import java.util.function.Supplier; -import java.util.stream.Collectors; import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; import static java.util.Collections.unmodifiableMap; public class MapperService extends AbstractIndexComponent implements Closeable { @@ -121,7 +121,8 @@ public enum MergeReason { private volatile String defaultMappingSource; - private volatile Map mappers = emptyMap(); + private volatile DocumentMapper mapper; + private volatile DocumentMapper defaultMapper; private volatile FieldTypeLookup fieldTypes; private volatile Map fullPathObjectMappers = emptyMap(); @@ -166,24 +167,6 @@ public boolean hasNested() { return this.hasNested; } - /** - * returns an immutable iterator over current document mappers. - * - * @param includingDefaultMapping indicates whether the iterator should contain the {@link #DEFAULT_MAPPING} document mapper. - * As is this not really an active type, you would typically set this to false - */ - public Iterable docMappers(final boolean includingDefaultMapping) { - return () -> { - final Collection documentMappers; - if (includingDefaultMapping) { - documentMappers = mappers.values(); - } else { - documentMappers = mappers.values().stream().filter(mapper -> !DEFAULT_MAPPING.equals(mapper.type())).collect(Collectors.toList()); - } - return Collections.unmodifiableCollection(documentMappers).iterator(); - }; - } - public IndexAnalyzers getIndexAnalyzers() { return this.indexAnalyzers; } @@ -212,7 +195,13 @@ public static Map parseMapping(NamedXContentRegistry xContentReg public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { assert indexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + indexMetaData.getIndex(); // go over and add the relevant mappings (or update them) - final Set existingMappers = new HashSet<>(mappers.keySet()); + Set existingMappers = new HashSet<>(); + if (mapper != null) { + existingMappers.add(mapper.type()); + } + if (defaultMapper != null) { + existingMappers.add(DEFAULT_MAPPING); + } final Map updatedEntries; try { // only update entries if needed @@ -314,29 +303,32 @@ private synchronized Map internalMerge(Map documentMappers = new ArrayList<>(); + DocumentMapper documentMapper = null; for (Map.Entry entry : mappings.entrySet()) { String type = entry.getKey(); if (type.equals(DEFAULT_MAPPING)) { continue; } + if (documentMapper != null) { + throw new IllegalArgumentException("Cannot put multiple mappings: " + mappings.keySet()); + } + final boolean applyDefault = // the default was already applied if we are recovering reason != MergeReason.MAPPING_RECOVERY // only apply the default mapping if we don't have the type yet - && mappers.containsKey(type) == false; + && this.mapper == null; try { - DocumentMapper documentMapper = + documentMapper = documentParser.parse(type, entry.getValue(), applyDefault ? defaultMappingSourceOrLastStored : null); - documentMappers.add(documentMapper); } catch (Exception e) { throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage()); } } - return internalMerge(defaultMapper, defaultMappingSource, documentMappers, reason); + return internalMerge(defaultMapper, defaultMappingSource, documentMapper, reason); } static void validateTypeName(String type) { @@ -361,13 +353,12 @@ static void validateTypeName(String type) { } private synchronized Map internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource, - List documentMappers, MergeReason reason) { + DocumentMapper mapper, MergeReason reason) { boolean hasNested = this.hasNested; Map fullPathObjectMappers = this.fullPathObjectMappers; FieldTypeLookup fieldTypes = this.fieldTypes; - Map mappers = new HashMap<>(this.mappers); - Map results = new LinkedHashMap<>(documentMappers.size() + 1); + Map results = new LinkedHashMap<>(2); if (defaultMapper != null) { if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { @@ -378,27 +369,23 @@ private synchronized Map internalMerge(@Nullable Documen "cannot have more than one type"); } assert defaultMapper.type().equals(DEFAULT_MAPPING); - mappers.put(DEFAULT_MAPPING, defaultMapper); results.put(DEFAULT_MAPPING, defaultMapper); } { - Set actualTypes = new HashSet<>(mappers.keySet()); - documentMappers.forEach(mapper -> actualTypes.add(mapper.type())); - actualTypes.remove(DEFAULT_MAPPING); - if (actualTypes.size() > 1) { + if (mapper != null && this.mapper != null && Objects.equals(this.mapper.type(), mapper.type()) == false) { throw new IllegalArgumentException( - "Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + actualTypes); + "Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + Arrays.asList(this.mapper.type(), mapper.type())); } } - for (DocumentMapper mapper : documentMappers) { + DocumentMapper newMapper = null; + if (mapper != null) { // check naming validateTypeName(mapper.type()); // compute the merged DocumentMapper - DocumentMapper oldMapper = mappers.get(mapper.type()); - DocumentMapper newMapper; + DocumentMapper oldMapper = this.mapper; if (oldMapper != null) { newMapper = oldMapper.merge(mapper.mapping()); } else { @@ -442,7 +429,6 @@ private synchronized Map internalMerge(@Nullable Documen } results.put(newMapper.type(), newMapper); - mappers.put(newMapper.type(), newMapper); } if (reason == MergeReason.MAPPING_UPDATE) { @@ -456,24 +442,16 @@ private synchronized Map internalMerge(@Nullable Documen } checkIndexSortCompatibility(indexSettings.getIndexSortConfig(), hasNested); - for (Map.Entry entry : mappers.entrySet()) { - if (entry.getKey().equals(DEFAULT_MAPPING)) { - continue; - } - DocumentMapper documentMapper = entry.getValue(); - // apply changes to the field types back - DocumentMapper updatedDocumentMapper = documentMapper.updateFieldType(fieldTypes.fullNameToFieldType); - if (updatedDocumentMapper != documentMapper) { + if (newMapper != null) { + DocumentMapper updatedDocumentMapper = newMapper.updateFieldType(fieldTypes.fullNameToFieldType); + if (updatedDocumentMapper != newMapper) { // update both mappers and result - entry.setValue(updatedDocumentMapper); - if (results.containsKey(updatedDocumentMapper.type())) { - results.put(updatedDocumentMapper.type(), updatedDocumentMapper); - } + newMapper = updatedDocumentMapper; + results.put(updatedDocumentMapper.type(), updatedDocumentMapper); } } // make structures immutable - mappers = Collections.unmodifiableMap(mappers); results = Collections.unmodifiableMap(results); // only need to immutably rewrap these if the previous reference was changed. @@ -486,7 +464,10 @@ private synchronized Map internalMerge(@Nullable Documen if (defaultMappingSource != null) { this.defaultMappingSource = defaultMappingSource; } - this.mappers = mappers; + if (newMapper != null) { + this.mapper = newMapper; + } + this.defaultMapper = defaultMapper; this.fieldTypes = fieldTypes; this.hasNested = hasNested; this.fullPathObjectMappers = fullPathObjectMappers; @@ -498,7 +479,7 @@ private synchronized Map internalMerge(@Nullable Documen } private boolean assertMappersShareSameFieldType() { - for (DocumentMapper mapper : docMappers(false)) { + if (mapper != null) { List fieldMappers = new ArrayList<>(); Collections.addAll(fieldMappers, mapper.mapping().metadataMappers); MapperUtils.collect(mapper.root(), new ArrayList<>(), fieldMappers); @@ -692,18 +673,20 @@ public DocumentMapper parse(String mappingType, CompressedXContent mappingSource return documentParser.parse(mappingType, mappingSource, applyDefault ? defaultMappingSource : null); } - public boolean hasMapping(String mappingType) { - return mappers.containsKey(mappingType); + /** + * Get the set of types. + * @deprecated Indices may have one type at most, use {@link #documentMapper()} instead. + */ + @Deprecated + public Set types() { + return mapper == null ? Collections.emptySet() : Collections.singleton(mapper.type()); } /** - * Return the set of concrete types that have a mapping. - * NOTE: this does not return the default mapping. + * Return the document mapper, or {@code null} if no mapping has been put yet. */ - public Collection types() { - final Set types = new HashSet<>(mappers.keySet()); - types.remove(DEFAULT_MAPPING); - return Collections.unmodifiableSet(types); + public DocumentMapper documentMapper() { + return mapper; } /** @@ -712,7 +695,13 @@ public Collection types() { * the default mapping. */ public DocumentMapper documentMapper(String type) { - return mappers.get(type); + if (mapper != null && type.equals(mapper.type())) { + return mapper; + } + if (DEFAULT_MAPPING.equals(type)) { + return defaultMapper; + } + return null; } /** @@ -720,7 +709,7 @@ public DocumentMapper documentMapper(String type) { * type has been dynamically created. */ public DocumentMapperForType documentMapperWithAutoCreate(String type) { - DocumentMapper mapper = mappers.get(type); + DocumentMapper mapper = documentMapper(type); if (mapper != null) { return new DocumentMapperForType(mapper, null); } @@ -836,7 +825,7 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { /** Return a term that uniquely identifies the document, or {@code null} if the type is not allowed. */ public Term createUidTerm(String type, String id) { - if (hasMapping(type) == false) { + if (mapper == null || mapper.type().equals(type) == false) { return null; } return new Term(IdFieldMapper.NAME, Uid.encodeId(id)); diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java index 03a1f78289409..ef88db6c12ce0 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -28,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.search.MatchQuery; +import org.elasticsearch.index.search.MatchQuery.ZeroTermsQuery; import java.io.IOException; import java.util.Objects; @@ -39,6 +41,7 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "match_phrase"; public static final ParseField SLOP_FIELD = new ParseField("slop"); + public static final ParseField ZERO_TERMS_QUERY_FIELD = new ParseField("zero_terms_query"); private final String fieldName; @@ -48,6 +51,8 @@ public class MatchPhraseQueryBuilder extends AbstractQueryBuilder getValues() { return values; } @@ -116,9 +119,10 @@ public TermsSetQueryBuilder setMinimumShouldMatchScript(Script minimumShouldMatc @Override protected boolean doEquals(TermsSetQueryBuilder other) { - return Objects.equals(fieldName, this.fieldName) && Objects.equals(values, this.values) && - Objects.equals(minimumShouldMatchField, this.minimumShouldMatchField) && - Objects.equals(minimumShouldMatchScript, this.minimumShouldMatchScript); + return Objects.equals(fieldName, other.fieldName) + && Objects.equals(values, other.values) + && Objects.equals(minimumShouldMatchField, other.minimumShouldMatchField) + && Objects.equals(minimumShouldMatchScript, other.minimumShouldMatchScript); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java index 18a124d86b35c..c3a695beff083 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java @@ -89,7 +89,8 @@ public static Map parseFieldsAndWeights(List fields) { * @param field The field name to search. */ public static FieldMapper getFieldMapper(MapperService mapperService, String field) { - for (DocumentMapper mapper : mapperService.docMappers(true)) { + DocumentMapper mapper = mapperService.documentMapper(); + if (mapper != null) { FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper(field); if (fieldMapper != null) { return fieldMapper; diff --git a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java index 0ec03cb7a8f5e..9b55cff8cff9a 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncAction.java @@ -131,7 +131,7 @@ protected ReplicaResult shardOperationOnReplica(final Request request, final Ind private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && - indexShard.getTranslog().getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { + indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getGlobalCheckpoint()) { indexShard.sync(); } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 520115dc30a46..def6362e334e4 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -934,7 +934,7 @@ public FieldDataStats fieldDataStats(String... fields) { } public TranslogStats translogStats() { - return getEngine().getTranslog().stats(); + return getEngine().getTranslogStats(); } public CompletionStats completionStats(String... fields) { @@ -1331,7 +1331,7 @@ private boolean assertMaxUnsafeAutoIdInCommit() throws IOException { } protected void onNewEngine(Engine newEngine) { - refreshListeners.setTranslog(newEngine.getTranslog()); + refreshListeners.setCurrentRefreshLocationSupplier(newEngine::getTranslogLastWriteLocation); } /** @@ -1563,8 +1563,7 @@ boolean shouldRollTranslogGeneration() { final Engine engine = getEngineOrNull(); if (engine != null) { try { - final Translog translog = engine.getTranslog(); - return translog.shouldRollGeneration(); + return engine.shouldRollTranslogGeneration(); } catch (final AlreadyClosedException e) { // we are already closed, no need to flush or roll } @@ -1579,9 +1578,26 @@ public void onSettingsChanged() { } } + /** + * Acquires a lock on the translog files, preventing them from being trimmed. + */ public Closeable acquireTranslogRetentionLock() { - Engine engine = getEngine(); - return engine.getTranslog().acquireRetentionLock(); + return getEngine().acquireTranslogRetentionLock(); + } + + /** + * Creates a new translog snapshot for reading translog operations whose seq# at least the provided seq#. + * The caller has to close the returned snapshot after finishing the reading. + */ + public Translog.Snapshot newTranslogSnapshotFromMinSeqNo(long minSeqNo) throws IOException { + return getEngine().newTranslogSnapshotFromMinSeqNo(minSeqNo); + } + + /** + * Returns the estimated number of operations in translog whose seq# at least the provided seq#. + */ + public int estimateTranslogOperationsFromMinSeq(long minSeqNo) { + return getEngine().estimateTranslogOperationsFromMinSeq(minSeqNo); } public List segments(boolean verbose) { @@ -1592,10 +1608,6 @@ public void flushAndCloseEngine() throws IOException { getEngine().flushAndClose(); } - public Translog getTranslog() { - return getEngine().getTranslog(); - } - public String getHistoryUUID() { return getEngine().getHistoryUUID(); } @@ -1733,6 +1745,13 @@ public long getGlobalCheckpoint() { return replicationTracker.getGlobalCheckpoint(); } + /** + * Returns the latest global checkpoint value that has been persisted in the underlying storage (i.e. translog's checkpoint) + */ + public long getLastSyncedGlobalCheckpoint() { + return getEngine().getLastSyncedGlobalCheckpoint(); + } + /** * Get the local knowledge of the global checkpoints for all in-sync allocation IDs. * @@ -2308,6 +2327,13 @@ public void sync() throws IOException { getEngine().syncTranslog(); } + /** + * Checks if the underlying storage sync is required. + */ + public boolean isSyncNeeded() { + return getEngine().isTranslogSyncNeeded(); + } + /** * Returns the current translog durability mode */ @@ -2467,7 +2493,7 @@ final long getLastSearcherAccess() { } private void setRefreshPending(Engine engine) { - Translog.Location lastWriteLocation = engine.getTranslog().getLastWriteLocation(); + Translog.Location lastWriteLocation = engine.getTranslogLastWriteLocation(); Translog.Location location; do { location = this.pendingRefreshLocation.get(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index 1e31eae7d417f..af8c9bdd0272f 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -83,7 +83,7 @@ public void resync(final IndexShard indexShard, final ActionListener ActionListener resyncListener = null; try { final long startingSeqNo = indexShard.getGlobalCheckpoint() + 1; - Translog.Snapshot snapshot = indexShard.getTranslog().newSnapshotFromMinSeqNo(startingSeqNo); + Translog.Snapshot snapshot = indexShard.newTranslogSnapshotFromMinSeqNo(startingSeqNo); resyncListener = new ActionListener() { @Override public void onResponse(final ResyncTask resyncTask) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/server/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index 17e824eb046c7..d8a51d58ad956 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -32,6 +32,7 @@ import java.util.concurrent.Executor; import java.util.function.Consumer; import java.util.function.IntSupplier; +import java.util.function.Supplier; import static java.util.Objects.requireNonNull; @@ -153,21 +154,20 @@ public int pendingCount() { /** * Setup the translog used to find the last refreshed location. */ - public void setTranslog(Translog translog) { - this.translog = translog; + public void setCurrentRefreshLocationSupplier(Supplier currentRefreshLocationSupplier) { + this.currentRefreshLocationSupplier = currentRefreshLocationSupplier; } - // Implementation of ReferenceManager.RefreshListener that adapts Lucene's RefreshListener into Elasticsearch's refresh listeners. - private Translog translog; /** * Snapshot of the translog location before the current refresh if there is a refresh going on or null. Doesn't have to be volatile * because when it is used by the refreshing thread. */ private Translog.Location currentRefreshLocation; + private Supplier currentRefreshLocationSupplier; @Override public void beforeRefresh() throws IOException { - currentRefreshLocation = translog.getLastWriteLocation(); + currentRefreshLocation = currentRefreshLocationSupplier.get(); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 710b4bc46e235..78f44ee723114 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -145,9 +145,6 @@ public RecoveryResponse recoverToTarget() throws IOException { }, shardId + " validating recovery target ["+ request.targetAllocationId() + "] registered "); try (Closeable ignored = shard.acquireTranslogRetentionLock()) { - - final Translog translog = shard.getTranslog(); - final long startingSeqNo; final long requiredSeqNoRangeStart; final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && @@ -170,7 +167,7 @@ public RecoveryResponse recoverToTarget() throws IOException { requiredSeqNoRangeStart = Long.parseLong(phase1Snapshot.getIndexCommit().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1; try { - phase1(phase1Snapshot.getIndexCommit(), translog::totalOperations); + phase1(phase1Snapshot.getIndexCommit(), () -> shard.estimateTranslogOperationsFromMinSeq(startingSeqNo)); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "phase1 failed", e); } finally { @@ -187,7 +184,7 @@ public RecoveryResponse recoverToTarget() throws IOException { try { // For a sequence based recovery, the target can keep its local translog - prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, translog.estimateTotalOperationsFromMinSeq(startingSeqNo)); + prepareTargetForTranslog(isSequenceNumberBasedRecovery == false, shard.estimateTranslogOperationsFromMinSeq(startingSeqNo)); } catch (final Exception e) { throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e); } @@ -210,9 +207,9 @@ public RecoveryResponse recoverToTarget() throws IOException { logger.trace("all operations up to [{}] completed, which will be used as an ending sequence number", endingSeqNo); - logger.trace("snapshot translog for recovery; current size is [{}]", translog.estimateTotalOperationsFromMinSeq(startingSeqNo)); + logger.trace("snapshot translog for recovery; current size is [{}]", shard.estimateTranslogOperationsFromMinSeq(startingSeqNo)); final long targetLocalCheckpoint; - try(Translog.Snapshot snapshot = translog.newSnapshotFromMinSeqNo(startingSeqNo)) { + try(Translog.Snapshot snapshot = shard.newTranslogSnapshotFromMinSeqNo(startingSeqNo)) { targetLocalCheckpoint = phase2(startingSeqNo, requiredSeqNoRangeStart, endingSeqNo, snapshot); } catch (Exception e) { throw new RecoveryEngineException(shard.shardId(), 2, "phase2 failed", e); @@ -261,7 +258,7 @@ boolean isTranslogReadyForSequenceNumberBasedRecovery() throws IOException { // the start recovery request is initialized with the starting sequence number set to the target shard's local checkpoint plus one if (startingSeqNo - 1 <= localCheckpoint) { final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1); - try (Translog.Snapshot snapshot = shard.getTranslog().newSnapshotFromMinSeqNo(startingSeqNo)) { + try (Translog.Snapshot snapshot = shard.newTranslogSnapshotFromMinSeqNo(startingSeqNo)) { Translog.Operation operation; while ((operation = snapshot.next()) != null) { if (operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index 39e60b5812eaf..f1062f7b5384c 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -56,7 +56,7 @@ public PipelineExecutionService(PipelineStore store, ThreadPool threadPool) { public void executeBulkRequest(Iterable actionRequests, BiConsumer itemFailureHandler, Consumer completionHandler) { - threadPool.executor(ThreadPool.Names.BULK).execute(new AbstractRunnable() { + threadPool.executor(ThreadPool.Names.WRITE).execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java index e9d3adba68255..45180fd1c6f0a 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java @@ -82,10 +82,8 @@ public static JvmStats jvmStats() { peakUsage.getUsed() < 0 ? 0 : peakUsage.getUsed(), peakUsage.getMax() < 0 ? 0 : peakUsage.getMax() )); - } catch (Exception ex) { - /* ignore some JVMs might barf here with: - * java.lang.InternalError: Memory Pool not found - * we just omit the pool in that case!*/ + } catch (final Exception ignored) { + } } Mem mem = new Mem(heapCommitted, heapUsed, heapMax, nonHeapCommitted, nonHeapUsed, Collections.unmodifiableList(pools)); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index b02e1614bbdea..cb7586b3d02f7 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -258,7 +258,6 @@ protected Node(final Environment environment, Collection // use temp logger just to say we are starting. we can't use it later on because the node name might not be set Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(environment.settings())); logger.info("initializing ..."); - } try { Settings tmpSettings = Settings.builder().put(environment.settings()) @@ -272,22 +271,24 @@ protected Node(final Environment environment, Collection throw new IllegalStateException("Failed to create node environment", ex); } final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings); - Logger logger = Loggers.getLogger(Node.class, tmpSettings); final String nodeId = nodeEnvironment.nodeId(); tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId); + final Logger logger = Loggers.getLogger(Node.class, tmpSettings); // this must be captured after the node name is possibly added to the settings final String nodeName = NODE_NAME_SETTING.get(tmpSettings); if (hadPredefinedNodeName == false) { - logger.info("node name [{}] derived from node ID [{}]; set [{}] to override", nodeName, nodeId, NODE_NAME_SETTING.getKey()); + logger.info("node name derived from node ID [{}]; set [{}] to override", nodeId, NODE_NAME_SETTING.getKey()); } else { logger.info("node name [{}], node ID [{}]", nodeName, nodeId); } final JvmInfo jvmInfo = JvmInfo.jvmInfo(); logger.info( - "version[{}], pid[{}], build[{}/{}], OS[{}/{}/{}], JVM[{}/{}/{}/{}]", + "version[{}], pid[{}], build[{}/{}/{}/{}], OS[{}/{}/{}], JVM[{}/{}/{}/{}]", Version.displayVersion(Version.CURRENT, Build.CURRENT.isSnapshot()), jvmInfo.pid(), + Build.CURRENT.flavor().displayName(), + Build.CURRENT.type().displayName(), Build.CURRENT.shortHash(), Build.CURRENT.date(), Constants.OS_NAME, @@ -777,8 +778,7 @@ public synchronized void close() throws IOException { logger.info("closing ..."); List toClose = new ArrayList<>(); StopWatch stopWatch = new StopWatch("node_close"); - toClose.add(() -> stopWatch.start("tribe")); - toClose.add(() -> stopWatch.stop().start("node_service")); + toClose.add(() -> stopWatch.start("node_service")); toClose.add(nodeService); toClose.add(() -> stopWatch.stop().start("http")); if (NetworkModule.HTTP_ENABLED.get(settings)) { diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginSecurity.java b/server/src/main/java/org/elasticsearch/plugins/PluginSecurity.java index 0b8ebde0b32c4..d2246259ab7c6 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginSecurity.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginSecurity.java @@ -19,11 +19,11 @@ package org.elasticsearch.plugins; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.Terminal.Verbosity; import org.elasticsearch.cli.UserException; +import org.elasticsearch.core.internal.io.IOUtils; import java.io.IOException; import java.nio.file.Files; @@ -37,10 +37,8 @@ import java.security.UnresolvedPermission; import java.util.ArrayList; import java.util.Collections; -import java.util.Comparator; import java.util.List; import java.util.Set; -import java.util.function.Supplier; import java.util.stream.Collectors; class PluginSecurity { @@ -48,8 +46,7 @@ class PluginSecurity { /** * prints/confirms policy exceptions with the user */ - static void confirmPolicyExceptions(Terminal terminal, Set permissions, - boolean needsNativeController, boolean batch) throws UserException { + static void confirmPolicyExceptions(Terminal terminal, Set permissions, boolean batch) throws UserException { List requested = new ArrayList<>(permissions); if (requested.isEmpty()) { terminal.println(Verbosity.VERBOSE, "plugin has a policy file with no additional permissions"); @@ -69,15 +66,6 @@ static void confirmPolicyExceptions(Terminal terminal, Set permissions, terminal.println(Verbosity.NORMAL, "for descriptions of what these permissions allow and the associated risks."); prompt(terminal, batch); } - - if (needsNativeController) { - terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); - terminal.println(Verbosity.NORMAL, "@ WARNING: plugin forks a native controller @"); - terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); - terminal.println(Verbosity.NORMAL, "This plugin launches a native controller that is not subject to the Java"); - terminal.println(Verbosity.NORMAL, "security manager nor to system call filters."); - prompt(terminal, batch); - } } private static void prompt(final Terminal terminal, final boolean batch) throws UserException { diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java index 102bc5a5f0524..7a8d8327d5e3a 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -230,13 +230,6 @@ public Set getSnapshots(final IndexId indexId) { return snapshotIds; } - /** - * Initializes the indices in the repository metadata; returns a new instance. - */ - public RepositoryData initIndices(final Map> indexSnapshots) { - return new RepositoryData(genId, snapshotIds, snapshotStates, indexSnapshots, incompatibleSnapshotIds); - } - @Override public boolean equals(Object obj) { if (this == obj) { @@ -352,9 +345,10 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final * Reads an instance of {@link RepositoryData} from x-content, loading the snapshots and indices metadata. */ public static RepositoryData snapshotsFromXContent(final XContentParser parser, long genId) throws IOException { - Map snapshots = new HashMap<>(); - Map snapshotStates = new HashMap<>(); - Map> indexSnapshots = new HashMap<>(); + final Map snapshots = new HashMap<>(); + final Map snapshotStates = new HashMap<>(); + final Map> indexSnapshots = new HashMap<>(); + if (parser.nextToken() == XContentParser.Token.START_OBJECT) { while (parser.nextToken() == XContentParser.Token.FIELD_NAME) { String field = parser.currentName(); @@ -397,17 +391,18 @@ public static RepositoryData snapshotsFromXContent(final XContentParser parser, throw new ElasticsearchParseException("start object expected [indices]"); } while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - String indexName = parser.currentName(); - String indexId = null; - Set snapshotIds = new LinkedHashSet<>(); + final String indexName = parser.currentName(); + final Set snapshotIds = new LinkedHashSet<>(); + + IndexId indexId = null; if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new ElasticsearchParseException("start object expected index[" + indexName + "]"); } while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - String indexMetaFieldName = parser.currentName(); + final String indexMetaFieldName = parser.currentName(); parser.nextToken(); if (INDEX_ID.equals(indexMetaFieldName)) { - indexId = parser.text(); + indexId = new IndexId(indexName, parser.text()); } else if (SNAPSHOTS.equals(indexMetaFieldName)) { if (parser.currentToken() != XContentParser.Token.START_ARRAY) { throw new ElasticsearchParseException("start array expected [snapshots]"); @@ -428,12 +423,22 @@ public static RepositoryData snapshotsFromXContent(final XContentParser parser, // since we already have the name/uuid combo in the snapshots array uuid = parser.text(); } - snapshotIds.add(snapshots.get(uuid)); + + SnapshotId snapshotId = snapshots.get(uuid); + if (snapshotId != null) { + snapshotIds.add(snapshotId); + } else { + // A snapshotted index references a snapshot which does not exist in + // the list of snapshots. This can happen when multiple clusters in + // different versions create or delete snapshot in the same repository. + throw new ElasticsearchParseException("Detected a corrupted repository, index " + indexId + + " references an unknown snapshot uuid [" + uuid + "]"); + } } } } assert indexId != null; - indexSnapshots.put(new IndexId(indexName, indexId), snapshotIds); + indexSnapshots.put(indexId, snapshotIds); } } else { throw new ElasticsearchParseException("unknown field name [" + field + "]"); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index f1adf9273ffde..d4e2323451a29 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -1113,6 +1113,11 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { BlobStoreIndexShardSnapshots snapshots = tuple.v1(); int fileListGeneration = tuple.v2(); + if (snapshots.snapshots().stream().anyMatch(sf -> sf.snapshot().equals(snapshotId.getName()))) { + throw new IndexShardSnapshotFailedException(shardId, + "Duplicate snapshot name [" + snapshotId.getName() + "] detected, aborting"); + } + final List indexCommitPointFiles = new ArrayList<>(); store.incRef(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java index 470f98a1e639a..4c477334265f6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java @@ -57,11 +57,16 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - if (request.hasContentOrSourceParam() && request.hasParam("fields")) { - throw new IllegalArgumentException("can't specify a request body and [fields]" + - " request parameter, either specify a request body or the" + - " [fields] request parameter"); + if (request.hasContentOrSourceParam()) { + deprecationLogger.deprecated("Specifying a request body is deprecated -- the" + + " [fields] request parameter should be used instead."); + if (request.hasParam("fields")) { + throw new IllegalArgumentException("can't specify a request body and [fields]" + + " request parameter, either specify a request body or the" + + " [fields] request parameter"); + } } + final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final FieldCapabilitiesRequest fieldRequest; if (request.hasContentOrSourceParam()) { @@ -76,17 +81,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, fieldRequest.indicesOptions( IndicesOptions.fromRequest(request, fieldRequest.indicesOptions()) ); - return channel -> client.fieldCaps(fieldRequest, - new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(FieldCapabilitiesResponse response, - XContentBuilder builder) throws Exception { - RestStatus status = OK; - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(status, builder); - } - }); + return channel -> client.fieldCaps(fieldRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java index 75baf8cecaaa5..c17be138df19a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -50,16 +51,8 @@ public String getName() { } @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) - throws IOException { - return channel -> client.execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest(), - new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(RemoteInfoResponse response, XContentBuilder builder) throws Exception { - response.toXContent(builder, request); - return new BytesRestResponse(RestStatus.OK, builder); - } - }); + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + return channel -> client.execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest(), new RestToXContentListener<>(channel)); } @Override public boolean canTripCircuitBreaker() { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java index 266c1cb68f03f..38b9d987d04e5 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java @@ -31,7 +31,6 @@ import java.io.IOException; -import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestClearIndicesCacheAction extends BaseRestHandler { @@ -40,9 +39,6 @@ public RestClearIndicesCacheAction(Settings settings, RestController controller) super(settings); controller.registerHandler(POST, "/_cache/clear", this); controller.registerHandler(POST, "/{index}/_cache/clear", this); - - controller.registerHandler(GET, "/_cache/clear", this); - controller.registerHandler(GET, "/{index}/_cache/clear", this); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java index 1dbbd6f1696db..fd70b7461ec67 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java @@ -62,7 +62,6 @@ public String getName() { metrics.put("store", r -> r.store(true)); metrics.put("indexing", r -> r.indexing(true)); metrics.put("search", r -> r.search(true)); - metrics.put("suggest", r -> r.search(true)); metrics.put("get", r -> r.get(true)); metrics.put("merge", r -> r.merge(true)); metrics.put("refresh", r -> r.refresh(true)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index c232ec25322ff..b3c480a2e7ba5 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -123,6 +123,8 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("http_address", "default:false;alias:http;desc:bound http address"); table.addCell("version", "default:false;alias:v;desc:es version"); + table.addCell("flavor", "default:false;alias:f;desc:es distribution flavor"); + table.addCell("type", "default:false;alias:t;desc:es distribution type"); table.addCell("build", "default:false;alias:b;desc:es build hash"); table.addCell("jdk", "default:false;alias:j;desc:jdk version"); table.addCell("disk.total", "default:false;alias:dt,diskTotal;text-align:right;desc:total disk space"); @@ -271,6 +273,8 @@ Table buildTable(boolean fullId, RestRequest req, ClusterStateResponse state, No } table.addCell(node.getVersion().toString()); + table.addCell(info == null ? null : info.getBuild().flavor().displayName()); + table.addCell(info == null ? null : info.getBuild().type().displayName()); table.addCell(info == null ? null : info.getBuild().shortHash()); table.addCell(jvmInfo == null ? null : jvmInfo.version()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index 8db9710af3139..0e242bb6d9f78 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -72,7 +72,15 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { BulkRequest bulkRequest = Requests.bulkRequest(); String defaultIndex = request.param("index"); - String defaultType = request.param("type", MapperService.SINGLE_MAPPING_NAME); + String defaultType = request.param("type"); + final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); + if (includeTypeName == false && defaultType != null) { + throw new IllegalArgumentException("You may only use the [include_type_name=false] option with the bulx APIs with the " + + "[_bulk] and [{index}/_bulk] endpoints."); + } + if (defaultType == null) { + defaultType = MapperService.SINGLE_MAPPING_NAME; + } String defaultRouting = request.param("routing"); FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); String defaultPipeline = request.param("pipeline"); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java index f6b0878c03802..05b60d3d7cbb2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -47,7 +48,13 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), request.param("type"), request.param("id")); + final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); + final String type = request.param("type"); + if (includeTypeName == false && MapperService.SINGLE_MAPPING_NAME.equals(type) == false) { + throw new IllegalArgumentException("You may only use the [include_type_name=false] option with the delete API with the " + + "[{index}/_doc/{id}] endpoints."); + } + DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), type, request.param("id")); deleteRequest.routing(request.param("routing")); deleteRequest.timeout(request.paramAsTime("timeout", DeleteRequest.DEFAULT_TIMEOUT)); deleteRequest.setRefreshPolicy(request.param("refresh")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java index e1d3f7557783c..8044600d6196e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -55,7 +56,13 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); + final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); + final String type = request.param("type"); + if (includeTypeName == false && MapperService.SINGLE_MAPPING_NAME.equals(type) == false) { + throw new IllegalArgumentException("You may only use the [include_type_name=false] option with the get APIs with the " + + "[{index}/_doc/{id}] endpoint."); + } + final GetRequest getRequest = new GetRequest(request.param("index"), type, request.param("id")); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); getRequest.preference(request.param("preference")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java index 5cc514f744098..619fd811e6a7c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -78,7 +79,13 @@ void validateOpType(String opType) { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - IndexRequest indexRequest = new IndexRequest(request.param("index"), request.param("type"), request.param("id")); + final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); + final String type = request.param("type"); + if (includeTypeName == false && MapperService.SINGLE_MAPPING_NAME.equals(type) == false) { + throw new IllegalArgumentException("You may only use the [include_type_name=false] option with the index APIs with the " + + "[{index}/_doc/{id}] and [{index}/_doc] endpoints."); + } + IndexRequest indexRequest = new IndexRequest(request.param("index"), type, request.param("id")); indexRequest.routing(request.param("routing")); indexRequest.setPipeline(request.param("pipeline")); indexRequest.source(request.requiredContent(), request.getXContentType()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java index de7c1fad5b26a..29cc6e8e028a8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -50,7 +51,13 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - UpdateRequest updateRequest = new UpdateRequest(request.param("index"), request.param("type"), request.param("id")); + final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); + final String type = request.param("type"); + if (includeTypeName == false && MapperService.SINGLE_MAPPING_NAME.equals(type) == false) { + throw new IllegalArgumentException("You may only use the [include_type_name=false] option with the update API with the " + + "[{index}/_doc/{id}/_update] endpoint."); + } + UpdateRequest updateRequest = new UpdateRequest(request.param("index"), type, request.param("id")); updateRequest.routing(request.param("routing")); updateRequest.timeout(request.paramAsTime("timeout", updateRequest.timeout())); updateRequest.setRefreshPolicy(request.param("refresh")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 6f0c033a0cf22..3098dc03a8c71 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; @@ -150,8 +151,13 @@ public static void parseSearchRequest(SearchRequest searchRequest, RestRequest r searchRequest.scroll(new Scroll(parseTimeValue(scroll, null, "scroll"))); } + final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); String types = request.param("type"); if (types != null) { + if (includeTypeName == false) { + throw new IllegalArgumentException("You may only use the [include_type_name=false] option with the search API with the " + + "[{index}/_search] endpoint."); + } DEPRECATION_LOGGER.deprecated("The {index}/{type}/_search endpoint is deprecated, use {index}/_search instead"); } searchRequest.types(Strings.splitStringByCommaToArray(types)); diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 1356a1458a2ed..d681a186892db 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -25,8 +25,10 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.apache.lucene.util.Counter; +import org.elasticsearch.Version; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.search.Queries; @@ -91,6 +93,7 @@ final class DefaultSearchContext extends SearchContext { private final Engine.Searcher engineSearcher; private final BigArrays bigArrays; private final IndexShard indexShard; + private final ClusterService clusterService; private final IndexService indexService; private final ContextIndexSearcher searcher; private final DfsSearchResult dfsResult; @@ -120,6 +123,7 @@ final class DefaultSearchContext extends SearchContext { // filter for sliced scroll private SliceBuilder sliceBuilder; private SearchTask task; + private final Version minNodeVersion; /** @@ -152,9 +156,10 @@ final class DefaultSearchContext extends SearchContext { private final QueryShardContext queryShardContext; private FetchPhase fetchPhase; - DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher, - IndexService indexService, IndexShard indexShard, BigArrays bigArrays, Counter timeEstimateCounter, - TimeValue timeout, FetchPhase fetchPhase, String clusterAlias) { + DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, + Engine.Searcher engineSearcher, ClusterService clusterService, IndexService indexService, + IndexShard indexShard, BigArrays bigArrays, Counter timeEstimateCounter, TimeValue timeout, + FetchPhase fetchPhase, String clusterAlias, Version minNodeVersion) { this.id = id; this.request = request; this.fetchPhase = fetchPhase; @@ -168,9 +173,11 @@ final class DefaultSearchContext extends SearchContext { this.fetchResult = new FetchSearchResult(id, shardTarget); this.indexShard = indexShard; this.indexService = indexService; + this.clusterService = clusterService; this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.timeEstimateCounter = timeEstimateCounter; this.timeout = timeout; + this.minNodeVersion = minNodeVersion; queryShardContext = indexService.newQueryShardContext(request.shardId().id(), searcher.getIndexReader(), request::nowInMillis, clusterAlias); queryShardContext.setTypes(request.types()); @@ -278,8 +285,7 @@ && new NestedHelper(mapperService()).mightMatchNestedDocs(query) } if (sliceBuilder != null) { - filters.add(sliceBuilder.toFilter(queryShardContext, shardTarget().getShardId().getId(), - queryShardContext.getIndexSettings().getNumberOfShards())); + filters.add(sliceBuilder.toFilter(clusterService, request, queryShardContext, minNodeVersion)); } if (filters.isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 96a5ebc25e2da..da7a42b22e3e6 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -426,7 +426,7 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t if (index != null) { builder.field(Fields._INDEX, RemoteClusterAware.buildRemoteIndexName(clusterAlias, index)); } - if (type != null) { + if (type != null && params.paramAsBoolean("include_type_name", true)) { builder.field(Fields._TYPE, type); } if (id != null) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index a742a3a06ae13..ed7f98c3b0b12 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -616,8 +616,8 @@ private DefaultSearchContext createSearchContext(ShardSearchRequest request, Tim Engine.Searcher engineSearcher = indexShard.acquireSearcher("search"); final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, - engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), timeout, fetchPhase, - request.getClusterAlias()); + engineSearcher, clusterService, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), timeout, + fetchPhase, request.getClusterAlias(), clusterService.state().nodes().getMinNodeVersion()); boolean success = false; try { // we clone the query shard context here just for rewriting otherwise we diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java index 66b8f8d5b15ed..25f83caa3eb92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java @@ -20,10 +20,8 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -103,11 +101,22 @@ public SignificantStringTerms buildAggregation(long owningBucketOrdinal) throws BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue<>(size); SignificantStringTerms.Bucket spare = null; - for (long globalTermOrd = 0; globalTermOrd < valueCount; ++globalTermOrd) { - if (includeExclude != null && !acceptedGlobalOrdinals.get(globalTermOrd)) { + final boolean needsFullScan = bucketOrds == null || bucketCountThresholds.getMinDocCount() == 0; + final long maxId = needsFullScan ? valueCount : bucketOrds.size(); + for (long ord = 0; ord < maxId; ord++) { + final long globalOrd; + final long bucketOrd; + if (needsFullScan) { + bucketOrd = bucketOrds == null ? ord : bucketOrds.find(ord); + globalOrd = ord; + } else { + assert bucketOrds != null; + bucketOrd = ord; + globalOrd = bucketOrds.get(ord); + } + if (includeExclude != null && !acceptedGlobalOrdinals.get(globalOrd)) { continue; } - final long bucketOrd = getBucketOrd(globalTermOrd); final int bucketDocCount = bucketOrd < 0 ? 0 : bucketDocCount(bucketOrd); if (bucketCountThresholds.getMinDocCount() > 0 && bucketDocCount == 0) { continue; @@ -120,7 +129,7 @@ public SignificantStringTerms buildAggregation(long owningBucketOrdinal) throws spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null, format); } spare.bucketOrd = bucketOrd; - copy(lookupGlobalOrd.apply(globalTermOrd), spare.termBytes); + copy(lookupGlobalOrd.apply(globalOrd), spare.termBytes); spare.subsetDf = bucketDocCount; spare.subsetSize = subsetSize; spare.supersetDf = termsAggFactory.getBackgroundFrequency(spare.termBytes); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 6ad14b8d0f93a..03eb00337e9c1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -71,7 +71,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr protected final long valueCount; protected final GlobalOrdLookupFunction lookupGlobalOrd; - private final LongHash bucketOrds; + protected final LongHash bucketOrds; public interface GlobalOrdLookupFunction { BytesRef apply(long ord) throws IOException; @@ -107,10 +107,6 @@ boolean remapGlobalOrds() { return bucketOrds != null; } - protected final long getBucketOrd(long globalOrd) { - return bucketOrds == null ? globalOrd : bucketOrds.find(globalOrd); - } - private void collectGlobalOrd(int doc, long globalOrd, LeafBucketCollector sub) throws IOException { if (bucketOrds == null) { collectExistingBucket(sub, doc, globalOrd); @@ -188,17 +184,28 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOE long otherDocCount = 0; BucketPriorityQueue ordered = new BucketPriorityQueue<>(size, order.comparator(this)); OrdBucket spare = new OrdBucket(-1, 0, null, showTermDocCountError, 0); - for (long globalTermOrd = 0; globalTermOrd < valueCount; ++globalTermOrd) { - if (includeExclude != null && !acceptedGlobalOrdinals.get(globalTermOrd)) { + final boolean needsFullScan = bucketOrds == null || bucketCountThresholds.getMinDocCount() == 0; + final long maxId = needsFullScan ? valueCount : bucketOrds.size(); + for (long ord = 0; ord < maxId; ord++) { + final long globalOrd; + final long bucketOrd; + if (needsFullScan) { + bucketOrd = bucketOrds == null ? ord : bucketOrds.find(ord); + globalOrd = ord; + } else { + assert bucketOrds != null; + bucketOrd = ord; + globalOrd = bucketOrds.get(ord); + } + if (includeExclude != null && !acceptedGlobalOrdinals.get(globalOrd)) { continue; } - final long bucketOrd = getBucketOrd(globalTermOrd); final int bucketDocCount = bucketOrd < 0 ? 0 : bucketDocCount(bucketOrd); if (bucketCountThresholds.getMinDocCount() > 0 && bucketDocCount == 0) { continue; } otherDocCount += bucketDocCount; - spare.globalOrd = globalTermOrd; + spare.globalOrd = globalOrd; spare.bucketOrd = bucketOrd; spare.docCount = bucketDocCount; if (bucketCountThresholds.getShardMinDocCount() <= spare.docCount) { @@ -378,7 +385,7 @@ private void mapSegmentCountsToGlobalCounts(LongUnaryOperator mapping) throws IO } final long ord = i - 1; // remember we do +1 when counting final long globalOrd = mapping.applyAsLong(ord); - long bucketOrd = getBucketOrd(globalOrd); + long bucketOrd = bucketOrds == null ? globalOrd : bucketOrds.find(globalOrd); incrementBucketDocCount(bucketOrd, inc); } } diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index af52924a2de2c..cf656ed3b9cb2 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -28,13 +28,10 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.Scroll; -import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; @@ -61,7 +58,6 @@ */ public class ShardSearchLocalRequest implements ShardSearchRequest { - private String clusterAlias; private ShardId shardId; private int numberOfShards; @@ -74,17 +70,18 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { private Boolean requestCache; private long nowInMillis; private boolean allowPartialSearchResults; - + private String[] indexRoutings = Strings.EMPTY_ARRAY; + private String preference; private boolean profile; ShardSearchLocalRequest() { } ShardSearchLocalRequest(SearchRequest searchRequest, ShardId shardId, int numberOfShards, - AliasFilter aliasFilter, float indexBoost, long nowInMillis, String clusterAlias) { + AliasFilter aliasFilter, float indexBoost, long nowInMillis, String clusterAlias, String[] indexRoutings) { this(shardId, numberOfShards, searchRequest.searchType(), - searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter, indexBoost, - searchRequest.allowPartialSearchResults()); + searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter, indexBoost, + searchRequest.allowPartialSearchResults(), indexRoutings, searchRequest.preference()); // If allowPartialSearchResults is unset (ie null), the cluster-level default should have been substituted // at this stage. Any NPEs in the above are therefore an error in request preparation logic. assert searchRequest.allowPartialSearchResults() != null; @@ -102,7 +99,8 @@ public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis } public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, - Boolean requestCache, AliasFilter aliasFilter, float indexBoost, boolean allowPartialSearchResults) { + Boolean requestCache, AliasFilter aliasFilter, float indexBoost, boolean allowPartialSearchResults, + String[] indexRoutings, String preference) { this.shardId = shardId; this.numberOfShards = numberOfShards; this.searchType = searchType; @@ -112,6 +110,8 @@ public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType s this.aliasFilter = aliasFilter; this.indexBoost = indexBoost; this.allowPartialSearchResults = allowPartialSearchResults; + this.indexRoutings = indexRoutings; + this.preference = preference; } @@ -169,18 +169,28 @@ public long nowInMillis() { public Boolean requestCache() { return requestCache; } - + @Override public Boolean allowPartialSearchResults() { return allowPartialSearchResults; } - + @Override public Scroll scroll() { return scroll; } + @Override + public String[] indexRoutings() { + return indexRoutings; + } + + @Override + public String preference() { + return preference; + } + @Override public void setProfile(boolean profile) { this.profile = profile; @@ -225,6 +235,13 @@ protected void innerReadFrom(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_6_3_0)) { allowPartialSearchResults = in.readOptionalBoolean(); } + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + indexRoutings = in.readStringArray(); + preference = in.readOptionalString(); + } else { + indexRoutings = Strings.EMPTY_ARRAY; + preference = null; + } } protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException { @@ -240,7 +257,7 @@ protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException if (out.getVersion().onOrAfter(Version.V_5_2_0)) { out.writeFloat(indexBoost); } - if (!asKey) { + if (asKey == false) { out.writeVLong(nowInMillis); } out.writeOptionalBoolean(requestCache); @@ -250,7 +267,12 @@ protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException if (out.getVersion().onOrAfter(Version.V_6_3_0)) { out.writeOptionalBoolean(allowPartialSearchResults); } - + if (asKey == false) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeStringArray(indexRoutings); + out.writeOptionalString(preference); + } + } } @Override diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 19eb0f17ccc84..0a1513e17d08e 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -19,7 +19,9 @@ package org.elasticsearch.search.internal; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.CheckedFunction; @@ -28,8 +30,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.AliasFilterParsingException; @@ -68,11 +68,21 @@ public interface ShardSearchRequest { long nowInMillis(); Boolean requestCache(); - + Boolean allowPartialSearchResults(); Scroll scroll(); + /** + * Returns the routing values resolved by the coordinating node for the index pointed by {@link #shardId()}. + */ + String[] indexRoutings(); + + /** + * Returns the preference of the original {@link SearchRequest#preference()}. + */ + String preference(); + /** * Sets if this shard search needs to be profiled or not * @param profile True if the shard should be profiled diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index ac86d24ed000d..08060a2b249b6 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -28,9 +28,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.Scroll; @@ -57,9 +54,10 @@ public ShardSearchTransportRequest(){ } public ShardSearchTransportRequest(OriginalIndices originalIndices, SearchRequest searchRequest, ShardId shardId, int numberOfShards, - AliasFilter aliasFilter, float indexBoost, long nowInMillis, String clusterAlias) { + AliasFilter aliasFilter, float indexBoost, long nowInMillis, + String clusterAlias, String[] indexRoutings) { this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardId, numberOfShards, aliasFilter, indexBoost, - nowInMillis, clusterAlias); + nowInMillis, clusterAlias, indexRoutings); this.originalIndices = originalIndices; } @@ -151,17 +149,27 @@ public long nowInMillis() { public Boolean requestCache() { return shardSearchLocalRequest.requestCache(); } - + @Override public Boolean allowPartialSearchResults() { return shardSearchLocalRequest.allowPartialSearchResults(); - } + } @Override public Scroll scroll() { return shardSearchLocalRequest.scroll(); } + @Override + public String[] indexRoutings() { + return shardSearchLocalRequest.indexRoutings(); + } + + @Override + public String preference() { + return shardSearchLocalRequest.preference(); + } + @Override public void readFrom(StreamInput in) throws IOException { throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index aabf0c3fd0c69..7e6945b9d4822 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -23,6 +23,10 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,6 +34,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,9 +44,13 @@ import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.search.internal.ShardSearchRequest; import java.io.IOException; +import java.util.Collections; +import java.util.Map; import java.util.Objects; +import java.util.Set; /** * A slice builder allowing to split a scroll in multiple partitions. @@ -203,12 +212,49 @@ public int hashCode() { return Objects.hash(this.field, this.id, this.max); } - public Query toFilter(QueryShardContext context, int shardId, int numShards) { + /** + * Converts this QueryBuilder to a lucene {@link Query}. + * + * @param context Additional information needed to build the query + */ + public Query toFilter(ClusterService clusterService, ShardSearchRequest request, QueryShardContext context, Version minNodeVersion) { final MappedFieldType type = context.fieldMapper(field); if (type == null) { throw new IllegalArgumentException("field " + field + " not found"); } + int shardId = request.shardId().id(); + int numShards = context.getIndexSettings().getNumberOfShards(); + if (minNodeVersion.onOrAfter(Version.V_6_4_0) && + (request.preference() != null || request.indexRoutings().length > 0)) { + GroupShardsIterator group = buildShardIterator(clusterService, request); + assert group.size() <= numShards : "index routing shards: " + group.size() + + " cannot be greater than total number of shards: " + numShards; + if (group.size() < numShards) { + /** + * The routing of this request targets a subset of the shards of this index so we need to we retrieve + * the original {@link GroupShardsIterator} and compute the request shard id and number of + * shards from it. + * This behavior has been added in {@link Version#V_6_4_0} so if there is another node in the cluster + * with an older version we use the original shard id and number of shards in order to ensure that all + * slices use the same numbers. + */ + numShards = group.size(); + int ord = 0; + shardId = -1; + // remap the original shard id with its index (position) in the sorted shard iterator. + for (ShardIterator it : group) { + assert it.shardId().getIndex().equals(request.shardId().getIndex()); + if (request.shardId().equals(it.shardId())) { + shardId = ord; + break; + } + ++ord; + } + assert shardId != -1 : "shard id: " + request.shardId().getId() + " not found in index shard routing"; + } + } + String field = this.field; boolean useTermQuery = false; if ("_uid".equals(field)) { @@ -273,6 +319,17 @@ public Query toFilter(QueryShardContext context, int shardId, int numShards) { return new MatchAllDocsQuery(); } + /** + * Returns the {@link GroupShardsIterator} for the provided request. + */ + private GroupShardsIterator buildShardIterator(ClusterService clusterService, ShardSearchRequest request) { + final ClusterState state = clusterService.state(); + String[] indices = new String[] { request.shardId().getIndex().getName() }; + Map> routingMap = request.indexRoutings().length > 0 ? + Collections.singletonMap(indices[0], Sets.newHashSet(request.indexRoutings())) : null; + return clusterService.operationRouting().searchShards(state, indices, routingMap, request.preference()); + } + @Override public String toString() { return Strings.toString(this, true, true); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index daf5c78b78cee..5665680fd9c57 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -592,10 +592,9 @@ public List currentSnapshots(final String repository, * @return map of shard id to snapshot status */ public Map snapshotShards(final String repositoryName, + final RepositoryData repositoryData, final SnapshotInfo snapshotInfo) throws IOException { final Repository repository = repositoriesService.repository(repositoryName); - final RepositoryData repositoryData = repository.getRepositoryData(); - final Map shardStatus = new HashMap<>(); for (String index : snapshotInfo.indices()) { IndexId indexId = repositoryData.resolveIndexId(index); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java index 314eb1df71a4b..3945042db509c 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java @@ -48,7 +48,7 @@ protected static String settingsKey(final String prefix, final String key) { } protected int applyHardSizeLimit(final Settings settings, final String name) { - if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) { + if (name.equals("bulk") || name.equals(ThreadPool.Names.WRITE)) { return 1 + EsExecutors.numberOfProcessors(settings); } else { return Integer.MAX_VALUE; diff --git a/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java index 2ce435071c5d1..43da1044c6bd0 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java @@ -41,8 +41,7 @@ public final class FixedExecutorBuilder extends ExecutorBuilder queueSizeSetting; /** - * Construct a fixed executor builder; the settings will have the - * key prefix "thread_pool." followed by the executor name. + * Construct a fixed executor builder; the settings will have the key prefix "thread_pool." followed by the executor name. * * @param settings the node-level settings * @param name the name of the executor @@ -66,14 +65,13 @@ public FixedExecutorBuilder(final Settings settings, final String name, final in super(name); final String sizeKey = settingsKey(prefix, "size"); this.sizeSetting = - new Setting<>( - sizeKey, - s -> Integer.toString(size), - s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey), - Setting.Property.NodeScope); + new Setting<>( + sizeKey, + s -> Integer.toString(size), + s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey), + Setting.Property.NodeScope); final String queueSizeKey = settingsKey(prefix, "queue_size"); - this.queueSizeSetting = - Setting.intSetting(queueSizeKey, queueSize, Setting.Property.NodeScope); + this.queueSizeSetting = Setting.intSetting(queueSizeKey, queueSize, Setting.Property.NodeScope); } @Override diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 80814960f0ea7..51a4adec8d16d 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -68,8 +68,8 @@ public static class Names { public static final String GENERIC = "generic"; public static final String LISTENER = "listener"; public static final String GET = "get"; - public static final String INDEX = "index"; - public static final String BULK = "bulk"; + public static final String ANALYZE = "analyze"; + public static final String WRITE = "write"; public static final String SEARCH = "search"; public static final String MANAGEMENT = "management"; public static final String FLUSH = "flush"; @@ -124,8 +124,8 @@ public static ThreadPoolType fromType(String type) { map.put(Names.GENERIC, ThreadPoolType.SCALING); map.put(Names.LISTENER, ThreadPoolType.FIXED); map.put(Names.GET, ThreadPoolType.FIXED); - map.put(Names.INDEX, ThreadPoolType.FIXED); - map.put(Names.BULK, ThreadPoolType.FIXED); + map.put(Names.ANALYZE, ThreadPoolType.FIXED); + map.put(Names.WRITE, ThreadPoolType.FIXED); map.put(Names.SEARCH, ThreadPoolType.FIXED_AUTO_QUEUE_SIZE); map.put(Names.MANAGEMENT, ThreadPoolType.SCALING); map.put(Names.FLUSH, ThreadPoolType.SCALING); @@ -170,9 +170,9 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui final int halfProcMaxAt10 = halfNumberOfProcessorsMaxTen(availableProcessors); final int genericThreadPoolMax = boundedBy(4 * availableProcessors, 128, 512); builders.put(Names.GENERIC, new ScalingExecutorBuilder(Names.GENERIC, 4, genericThreadPoolMax, TimeValue.timeValueSeconds(30))); - builders.put(Names.INDEX, new FixedExecutorBuilder(settings, Names.INDEX, availableProcessors, 200)); - builders.put(Names.BULK, new FixedExecutorBuilder(settings, Names.BULK, availableProcessors, 200)); // now that we reuse bulk for index/delete ops + builders.put(Names.WRITE, new FixedExecutorBuilder(settings, Names.WRITE, availableProcessors, 200)); builders.put(Names.GET, new FixedExecutorBuilder(settings, Names.GET, availableProcessors, 1000)); + builders.put(Names.ANALYZE, new FixedExecutorBuilder(settings, Names.ANALYZE, 1, 16)); builders.put(Names.SEARCH, new AutoQueueAdjustingExecutorBuilder(settings, Names.SEARCH, searchThreadPoolSize(availableProcessors), 1000, 1000, 1000, 2000)); builders.put(Names.MANAGEMENT, new ScalingExecutorBuilder(Names.MANAGEMENT, 1, 5, TimeValue.timeValueMinutes(5))); @@ -264,7 +264,7 @@ public Info info(String name) { public ThreadPoolStats stats() { List stats = new ArrayList<>(); for (ExecutorHolder holder : executors.values()) { - String name = holder.info.getName(); + final String name = holder.info.getName(); // no need to have info on "same" thread pool if ("same".equals(name)) { continue; diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index fb4586d201bd7..f24a1a928d50f 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -602,66 +603,13 @@ void addConnectedNode(DiscoveryNode node) { } /** - * Fetches connection info for this connection + * Get the information about remote nodes to be rendered on {@code _remote/info} requests. */ - public void getConnectionInfo(ActionListener listener) { - final Optional anyNode = connectedNodes.getAny(); - if (anyNode.isPresent() == false) { - // not connected we return immediately - RemoteConnectionInfo remoteConnectionStats = new RemoteConnectionInfo(clusterAlias, - Collections.emptyList(), Collections.emptyList(), maxNumRemoteConnections, 0, - RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), skipUnavailable); - listener.onResponse(remoteConnectionStats); - } else { - NodesInfoRequest request = new NodesInfoRequest(); - request.clear(); - request.http(true); - - transportService.sendRequest(anyNode.get(), NodesInfoAction.NAME, request, new TransportResponseHandler() { - @Override - public NodesInfoResponse newInstance() { - return new NodesInfoResponse(); - } - - @Override - public void handleResponse(NodesInfoResponse response) { - Collection httpAddresses = new HashSet<>(); - for (NodeInfo info : response.getNodes()) { - if (connectedNodes.contains(info.getNode()) && info.getHttp() != null) { - httpAddresses.add(info.getHttp().getAddress().publishAddress()); - } - } - - if (httpAddresses.size() < maxNumRemoteConnections) { - // just in case non of the connected nodes have http enabled we get other http enabled nodes instead. - for (NodeInfo info : response.getNodes()) { - if (nodePredicate.test(info.getNode()) && info.getHttp() != null) { - httpAddresses.add(info.getHttp().getAddress().publishAddress()); - } - if (httpAddresses.size() == maxNumRemoteConnections) { - break; // once we have enough return... - } - } - } - RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(clusterAlias, - seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList()), new ArrayList<>(httpAddresses), - maxNumRemoteConnections, connectedNodes.size(), - RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings), skipUnavailable); - listener.onResponse(remoteConnectionInfo); - } - - @Override - public void handleException(TransportException exp) { - listener.onFailure(exp); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }); - } - + public RemoteConnectionInfo getConnectionInfo() { + List seedNodeAddresses = seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList()); + TimeValue initialConnectionTimeout = RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings); + return new RemoteConnectionInfo(clusterAlias, seedNodeAddresses, maxNumRemoteConnections, connectedNodes.size(), + initialConnectionTimeout, skipUnavailable); } int getNumNodesConnected() { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index f454571301777..5de0d5e62dd07 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; -import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -42,7 +41,6 @@ import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -56,6 +54,7 @@ import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.common.settings.Setting.boolSetting; @@ -348,17 +347,8 @@ public void close() throws IOException { IOUtils.close(remoteClusters.values()); } - public void getRemoteConnectionInfos(ActionListener> listener) { - final Map remoteClusters = this.remoteClusters; - if (remoteClusters.isEmpty()) { - listener.onResponse(Collections.emptyList()); - } else { - final GroupedActionListener actionListener = new GroupedActionListener<>(listener, - remoteClusters.size(), Collections.emptyList()); - for (RemoteClusterConnection connection : remoteClusters.values()) { - connection.getConnectionInfo(actionListener); - } - } + public Stream getRemoteConnectionInfos() { + return remoteClusters.values().stream().map(RemoteClusterConnection::getConnectionInfo); } /** diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java index cb51f7edce570..60067e18573ad 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java @@ -27,17 +27,18 @@ import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import static java.util.Collections.emptyList; + import java.io.IOException; import java.util.List; import java.util.Objects; /** * This class encapsulates all remote cluster information to be rendered on - * _remote/info requests. + * {@code _remote/info} requests. */ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable { final List seedNodes; - final List httpAddresses; final int connectionsPerCluster; final TimeValue initialConnectionTimeout; final int numNodesConnected; @@ -45,12 +46,10 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable final boolean skipUnavailable; RemoteConnectionInfo(String clusterAlias, List seedNodes, - List httpAddresses, int connectionsPerCluster, int numNodesConnected, TimeValue initialConnectionTimeout, boolean skipUnavailable) { this.clusterAlias = clusterAlias; this.seedNodes = seedNodes; - this.httpAddresses = httpAddresses; this.connectionsPerCluster = connectionsPerCluster; this.numNodesConnected = numNodesConnected; this.initialConnectionTimeout = initialConnectionTimeout; @@ -59,16 +58,45 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable public RemoteConnectionInfo(StreamInput input) throws IOException { seedNodes = input.readList(TransportAddress::new); - httpAddresses = input.readList(TransportAddress::new); + if (input.getVersion().before(Version.V_7_0_0_alpha1)) { + /* + * Versions before 7.0 sent the HTTP addresses of all nodes in the + * remote cluster here but it was expensive to fetch and we + * ultimately figured out how to do without it. So we removed it. + * + * We just throw any HTTP addresses received here on the floor + * because we don't need to do anything with them. + */ + input.readList(TransportAddress::new); + } connectionsPerCluster = input.readVInt(); initialConnectionTimeout = input.readTimeValue(); numNodesConnected = input.readVInt(); clusterAlias = input.readString(); - if (input.getVersion().onOrAfter(Version.V_6_1_0)) { - skipUnavailable = input.readBoolean(); - } else { - skipUnavailable = false; + skipUnavailable = input.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(seedNodes); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + /* + * Versions before 7.0 sent the HTTP addresses of all nodes in the + * remote cluster here but it was expensive to fetch and we + * ultimately figured out how to do without it. So we removed it. + * + * When sending this request to a node that expects HTTP addresses + * here we pretend that we didn't find any. This *should* be fine + * because, after all, we haven't been using this information for + * a while. + */ + out.writeList(emptyList()); } + out.writeVInt(connectionsPerCluster); + out.writeTimeValue(initialConnectionTimeout); + out.writeVInt(numNodesConnected); + out.writeString(clusterAlias); + out.writeBoolean(skipUnavailable); } @Override @@ -80,11 +108,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.value(addr.toString()); } builder.endArray(); - builder.startArray("http_addresses"); - for (TransportAddress addr : httpAddresses) { - builder.value(addr.toString()); - } - builder.endArray(); builder.field("connected", numNodesConnected > 0); builder.field("num_nodes_connected", numNodesConnected); builder.field("max_connections_per_cluster", connectionsPerCluster); @@ -95,19 +118,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeList(seedNodes); - out.writeList(httpAddresses); - out.writeVInt(connectionsPerCluster); - out.writeTimeValue(initialConnectionTimeout); - out.writeVInt(numNodesConnected); - out.writeString(clusterAlias); - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeBoolean(skipUnavailable); - } - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -116,7 +126,6 @@ public boolean equals(Object o) { return connectionsPerCluster == that.connectionsPerCluster && numNodesConnected == that.numNodesConnected && Objects.equals(seedNodes, that.seedNodes) && - Objects.equals(httpAddresses, that.httpAddresses) && Objects.equals(initialConnectionTimeout, that.initialConnectionTimeout) && Objects.equals(clusterAlias, that.clusterAlias) && skipUnavailable == that.skipUnavailable; @@ -124,7 +133,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(seedNodes, httpAddresses, connectionsPerCluster, initialConnectionTimeout, + return Objects.hash(seedNodes, connectionsPerCluster, initialConnectionTimeout, numNodesConnected, clusterAlias, skipUnavailable); } } diff --git a/server/src/test/java/org/elasticsearch/BuildTests.java b/server/src/test/java/org/elasticsearch/BuildTests.java index a704bc7c3fcb5..c2b0f3acb941d 100644 --- a/server/src/test/java/org/elasticsearch/BuildTests.java +++ b/server/src/test/java/org/elasticsearch/BuildTests.java @@ -25,6 +25,9 @@ import java.io.IOException; import java.io.InputStream; import java.net.URL; +import java.util.Arrays; +import java.util.Set; +import java.util.stream.Collectors; public class BuildTests extends ESTestCase { @@ -40,17 +43,30 @@ public void testJarMetadata() throws IOException { public void testEqualsAndHashCode() { Build build = Build.CURRENT; - Build another = new Build(build.shortHash(), build.date(), build.isSnapshot()); + + Build another = new Build(build.flavor(), build.type(), build.shortHash(), build.date(), build.isSnapshot()); assertEquals(build, another); assertEquals(build.hashCode(), another.hashCode()); - Build differentHash = new Build(randomAlphaOfLengthBetween(3, 10), build.date(), build.isSnapshot()); + final Set otherFlavors = + Arrays.stream(Build.Flavor.values()).filter(f -> !f.equals(build.flavor())).collect(Collectors.toSet()); + final Build.Flavor otherFlavor = randomFrom(otherFlavors); + Build differentFlavor = new Build(otherFlavor, build.type(), build.shortHash(), build.date(), build.isSnapshot()); + assertNotEquals(build, differentFlavor); + + final Set otherTypes = + Arrays.stream(Build.Type.values()).filter(f -> !f.equals(build.type())).collect(Collectors.toSet()); + final Build.Type otherType = randomFrom(otherTypes); + Build differentType = new Build(build.flavor(), otherType, build.shortHash(), build.date(), build.isSnapshot()); + assertNotEquals(build, differentType); + + Build differentHash = new Build(build.flavor(), build.type(), randomAlphaOfLengthBetween(3, 10), build.date(), build.isSnapshot()); assertNotEquals(build, differentHash); - Build differentDate = new Build(build.shortHash(), "1970-01-01", build.isSnapshot()); + Build differentDate = new Build(build.flavor(), build.type(), build.shortHash(), "1970-01-01", build.isSnapshot()); assertNotEquals(build, differentDate); - Build differentSnapshot = new Build(build.shortHash(), build.date(), !build.isSnapshot()); + Build differentSnapshot = new Build(build.flavor(), build.type(), build.shortHash(), build.date(), !build.isSnapshot()); assertNotEquals(build, differentSnapshot); } } diff --git a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java index 0aa84ad806998..b45449425cb24 100644 --- a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java +++ b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java @@ -45,8 +45,8 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(super.nodeSettings(nodeOrdinal)) .put("thread_pool.search.size", 1) .put("thread_pool.search.queue_size", 1) - .put("thread_pool.index.size", 1) - .put("thread_pool.index.queue_size", 1) + .put("thread_pool.write.size", 1) + .put("thread_pool.write.queue_size", 1) .put("thread_pool.get.size", 1) .put("thread_pool.get.queue_size", 1) .build(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java index 463049a8c3c1f..114af3c13e707 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java @@ -67,7 +67,6 @@ public static UpdateSettingsRequest createTestItem() { request.timeout(randomTimeValue()); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); request.setPreserveExisting(randomBoolean()); - request.flatSettings(randomBoolean()); return request; } @@ -77,7 +76,6 @@ private static UpdateSettingsRequest copyRequest(UpdateSettingsRequest request) result.timeout(request.timeout()); result.indicesOptions(request.indicesOptions()); result.setPreserveExisting(request.isPreserveExisting()); - result.flatSettings(request.flatSettings()); return result; } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java index 1a07eac1adbd5..4b96f3d17543c 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java @@ -54,8 +54,8 @@ protected Settings nodeSettings(int nodeOrdinal) { // (see also ThreadedActionListener which is happily spawning threads even when we already got rejected) //.put("thread_pool.listener.queue_size", 1) .put("thread_pool.get.queue_size", 1) - // default is 50 - .put("thread_pool.bulk.queue_size", 30) + // default is 200 + .put("thread_pool.write.queue_size", 30) .build(); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 32dfbe85d426e..bcd16386df3d4 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -124,7 +124,7 @@ class TestSingleItemBulkWriteAction extends TransportSingleItemBulkWriteAction { + + @Override + protected FieldCapabilitiesRequest createTestInstance() { FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); int size = randomIntBetween(1, 20); String[] randomFields = new String[size]; @@ -48,49 +53,39 @@ private FieldCapabilitiesRequest randomRequest() { return request; } - public void testEqualsAndHashcode() { - FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); - request.indices("foo"); - request.indicesOptions(IndicesOptions.lenientExpandOpen()); - request.fields("bar"); - - FieldCapabilitiesRequest other = new FieldCapabilitiesRequest(); - other.indices("foo"); - other.indicesOptions(IndicesOptions.lenientExpandOpen()); - other.fields("bar"); - assertEquals(request, request); - assertEquals(request, other); - assertEquals(request.hashCode(), other.hashCode()); - - // change indices - other.indices("foo", "bar"); - assertNotEquals(request, other); - other.indices("foo"); - assertEquals(request, other); - - // change fields - other.fields("foo", "bar"); - assertNotEquals(request, other); - other.fields("bar"); - assertEquals(request, request); + @Override + protected FieldCapabilitiesRequest createBlankInstance() { + return new FieldCapabilitiesRequest(); + } - // change indices options - other.indicesOptions(IndicesOptions.strictExpand()); - assertNotEquals(request, other); + @Override + protected FieldCapabilitiesRequest mutateInstance(FieldCapabilitiesRequest instance) throws IOException { + List> mutators = new ArrayList<>(); + mutators.add(request -> { + String[] fields = ArrayUtils.concat(request.fields(), new String[] {randomAlphaOfLength(10)}); + request.fields(fields); + }); + mutators.add(request -> { + String[] indices = ArrayUtils.concat(instance.indices(), generateRandomStringArray(5, 10, false, false)); + request.indices(indices); + }); + mutators.add(request -> { + IndicesOptions indicesOptions = randomValueOtherThan(request.indicesOptions(), + () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + request.indicesOptions(indicesOptions); + }); + mutators.add(request -> request.setMergeResults(!request.isMergeResults())); + FieldCapabilitiesRequest mutatedInstance = copyInstance(instance); + Consumer mutator = randomFrom(mutators); + mutator.accept(mutatedInstance); + return mutatedInstance; } - public void testFieldCapsRequestSerialization() throws IOException { - for (int i = 0; i < 20; i++) { - FieldCapabilitiesRequest request = randomRequest(); - BytesStreamOutput output = new BytesStreamOutput(); - request.writeTo(output); - output.flush(); - StreamInput input = output.bytes().streamInput(); - FieldCapabilitiesRequest deserialized = new FieldCapabilitiesRequest(); - deserialized.readFrom(input); - assertEquals(deserialized, request); - assertEquals(deserialized.hashCode(), request.hashCode()); - } + public void testValidation() { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() + .indices("index2"); + ActionRequestValidationException exception = request.validate(); + assertNotNull(exception); } } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java index 2eaf1d4832f3f..61556fd9b28ed 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java @@ -19,42 +19,150 @@ package org.elasticsearch.action.fieldcaps; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.Predicate; -public class FieldCapabilitiesResponseTests extends ESTestCase { - private FieldCapabilitiesResponse randomResponse() { - Map > fieldMap = new HashMap<> (); - int numFields = randomInt(10); - for (int i = 0; i < numFields; i++) { - String fieldName = randomAlphaOfLengthBetween(5, 10); - int numIndices = randomIntBetween(1, 5); - Map indexFieldMap = new HashMap<> (); - for (int j = 0; j < numIndices; j++) { - String index = randomAlphaOfLengthBetween(10, 20); - indexFieldMap.put(index, FieldCapabilitiesTests.randomFieldCaps()); +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; + +public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected FieldCapabilitiesResponse doParseInstance(XContentParser parser) throws IOException { + return FieldCapabilitiesResponse.fromXContent(parser); + } + + @Override + protected FieldCapabilitiesResponse createBlankInstance() { + return new FieldCapabilitiesResponse(); + } + + @Override + protected FieldCapabilitiesResponse createTestInstance() { + Map> responses = new HashMap<>(); + + String[] fields = generateRandomStringArray(5, 10, false, true); + assertNotNull(fields); + + for (String field : fields) { + Map typesToCapabilities = new HashMap<>(); + String[] types = generateRandomStringArray(5, 10, false, false); + assertNotNull(types); + + for (String type : types) { + typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field)); } - fieldMap.put(fieldName, indexFieldMap); + responses.put(field, typesToCapabilities); } - return new FieldCapabilitiesResponse(fieldMap); + return new FieldCapabilitiesResponse(responses); } - public void testSerialization() throws IOException { - for (int i = 0; i < 20; i++) { - FieldCapabilitiesResponse response = randomResponse(); - BytesStreamOutput output = new BytesStreamOutput(); - response.writeTo(output); - output.flush(); - StreamInput input = output.bytes().streamInput(); - FieldCapabilitiesResponse deserialized = new FieldCapabilitiesResponse(); - deserialized.readFrom(input); - assertEquals(deserialized, response); - assertEquals(deserialized.hashCode(), response.hashCode()); + @Override + protected FieldCapabilitiesResponse mutateInstance(FieldCapabilitiesResponse response) { + Map> mutatedResponses = new HashMap<>(response.get()); + + int mutation = response.get().isEmpty() ? 0 : randomIntBetween(0, 2); + + switch (mutation) { + case 0: + String toAdd = randomAlphaOfLength(10); + mutatedResponses.put(toAdd, Collections.singletonMap( + randomAlphaOfLength(10), + FieldCapabilitiesTests.randomFieldCaps(toAdd))); + break; + case 1: + String toRemove = randomFrom(mutatedResponses.keySet()); + mutatedResponses.remove(toRemove); + break; + case 2: + String toReplace = randomFrom(mutatedResponses.keySet()); + mutatedResponses.put(toReplace, Collections.singletonMap( + randomAlphaOfLength(10), + FieldCapabilitiesTests.randomFieldCaps(toReplace))); + break; } + return new FieldCapabilitiesResponse(mutatedResponses); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // Disallow random fields from being inserted under the 'fields' key, as this + // map only contains field names, and also under 'fields.FIELD_NAME', as these + // maps only contain type names. + return field -> field.matches("fields(\\.\\w+)?"); + } + + public void testToXContent() throws IOException { + FieldCapabilitiesResponse response = createSimpleResponse(); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + + String generatedResponse = BytesReference.bytes(builder).utf8ToString(); + assertEquals(( + "{" + + " \"fields\": {" + + " \"rating\": { " + + " \"keyword\": {" + + " \"type\": \"keyword\"," + + " \"searchable\": false," + + " \"aggregatable\": true," + + " \"indices\": [\"index3\", \"index4\"]," + + " \"non_searchable_indices\": [\"index4\"] " + + " }," + + " \"long\": {" + + " \"type\": \"long\"," + + " \"searchable\": true," + + " \"aggregatable\": false," + + " \"indices\": [\"index1\", \"index2\"]," + + " \"non_aggregatable_indices\": [\"index1\"] " + + " }" + + " }," + + " \"title\": { " + + " \"text\": {" + + " \"type\": \"text\"," + + " \"searchable\": true," + + " \"aggregatable\": false" + + " }" + + " }" + + " }" + + "}").replaceAll("\\s+", ""), generatedResponse); + } + + private static FieldCapabilitiesResponse createSimpleResponse() { + Map titleCapabilities = new HashMap<>(); + titleCapabilities.put("text", new FieldCapabilities("title", "text", true, false)); + + Map ratingCapabilities = new HashMap<>(); + ratingCapabilities.put("long", new FieldCapabilities("rating", "long", + true, false, + new String[]{"index1", "index2"}, + null, + new String[]{"index1"})); + ratingCapabilities.put("keyword", new FieldCapabilities("rating", "keyword", + false, true, + new String[]{"index3", "index4"}, + new String[]{"index4"}, + null)); + + Map> responses = new HashMap<>(); + responses.put("title", titleCapabilities); + responses.put("rating", ratingCapabilities); + return new FieldCapabilitiesResponse(responses); } } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java index 53c27645bf298..0237ace962a80 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java @@ -20,16 +20,26 @@ package org.elasticsearch.action.fieldcaps; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import java.io.IOException; import java.util.Arrays; import static org.hamcrest.Matchers.equalTo; -public class FieldCapabilitiesTests extends AbstractWireSerializingTestCase { +public class FieldCapabilitiesTests extends AbstractSerializingTestCase { + private static final String FIELD_NAME = "field"; + + @Override + protected FieldCapabilities doParseInstance(XContentParser parser) throws IOException { + return FieldCapabilities.fromXContent(FIELD_NAME, parser); + } + @Override protected FieldCapabilities createTestInstance() { - return randomFieldCaps(); + return randomFieldCaps(FIELD_NAME); } @Override @@ -82,7 +92,7 @@ public void testBuilder() { } } - static FieldCapabilities randomFieldCaps() { + static FieldCapabilities randomFieldCaps(String fieldName) { String[] indices = null; if (randomBoolean()) { indices = new String[randomIntBetween(1, 5)]; @@ -104,7 +114,7 @@ static FieldCapabilities randomFieldCaps() { nonAggregatableIndices[i] = randomAlphaOfLengthBetween(5, 20); } } - return new FieldCapabilities(randomAlphaOfLengthBetween(5, 20), + return new FieldCapabilities(fieldName, randomAlphaOfLengthBetween(5, 20), randomBoolean(), randomBoolean(), indices, nonSearchableIndices, nonAggregatableIndices); } diff --git a/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java b/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java index bf04fe590805d..b833ed674c018 100644 --- a/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/main/MainResponseTests.java @@ -40,7 +40,8 @@ protected MainResponse createTestInstance() { String clusterUuid = randomAlphaOfLength(10); ClusterName clusterName = new ClusterName(randomAlphaOfLength(10)); String nodeName = randomAlphaOfLength(10); - Build build = new Build(randomAlphaOfLength(8), new Date(randomNonNegativeLong()).toString(), randomBoolean()); + final String date = new Date(randomNonNegativeLong()).toString(); + Build build = new Build(Build.Flavor.UNKNOWN, Build.Type.UNKNOWN, randomAlphaOfLength(8), date, randomBoolean()); Version version = VersionUtils.randomVersion(random()); return new MainResponse(nodeName, version, clusterName, clusterUuid , build); } @@ -57,7 +58,8 @@ protected MainResponse doParseInstance(XContentParser parser) { public void testToXContent() throws IOException { String clusterUUID = randomAlphaOfLengthBetween(10, 20); - Build build = new Build(Build.CURRENT.shortHash(), Build.CURRENT.date(), Build.CURRENT.isSnapshot()); + final Build current = Build.CURRENT; + Build build = new Build(current.flavor(), current.type(), current.shortHash(), current.date(), current.isSnapshot()); Version version = Version.CURRENT; MainResponse response = new MainResponse("nodeName", version, new ClusterName("clusterName"), clusterUUID, build); XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -68,9 +70,11 @@ public void testToXContent() throws IOException { + "\"cluster_uuid\":\"" + clusterUUID + "\"," + "\"version\":{" + "\"number\":\"" + version.toString() + "\"," - + "\"build_hash\":\"" + Build.CURRENT.shortHash() + "\"," - + "\"build_date\":\"" + Build.CURRENT.date() + "\"," - + "\"build_snapshot\":" + Build.CURRENT.isSnapshot() + "," + + "\"build_flavor\":\"" + current.flavor().displayName() + "\"," + + "\"build_type\":\"" + current.type().displayName() + "\"," + + "\"build_hash\":\"" + current.shortHash() + "\"," + + "\"build_date\":\"" + current.date() + "\"," + + "\"build_snapshot\":" + current.isSnapshot() + "," + "\"lucene_version\":\"" + version.luceneVersion.toString() + "\"," + "\"minimum_wire_compatibility_version\":\"" + version.minimumCompatibilityVersion().toString() + "\"," + "\"minimum_index_compatibility_version\":\"" + version.minimumIndexCompatibilityVersion().toString() + "\"}," @@ -94,7 +98,7 @@ protected MainResponse mutateInstance(MainResponse mutateInstance) { break; case 2: // toggle the snapshot flag of the original Build parameter - build = new Build(build.shortHash(), build.date(), !build.isSnapshot()); + build = new Build(Build.Flavor.UNKNOWN, Build.Type.UNKNOWN, build.shortHash(), build.date(), !build.isSnapshot()); break; case 3: version = randomValueOtherThan(version, () -> VersionUtils.randomVersion(random())); diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index 6ade2b8781ecf..193878e2f5e04 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.shard.ShardId; @@ -62,10 +63,15 @@ private AbstractSearchAsyncAction createAction( final SearchRequest request = new SearchRequest(); request.allowPartialSearchResults(true); + request.preference("_shards:1,3"); return new AbstractSearchAsyncAction("test", null, null, null, - Collections.singletonMap("foo", new AliasFilter(new MatchAllQueryBuilder())), Collections.singletonMap("foo", 2.0f), null, - request, null, new GroupShardsIterator<>(Collections.singletonList( - new SearchShardIterator(null, null, Collections.emptyList(), null))), timeProvider, 0, null, + Collections.singletonMap("foo", new AliasFilter(new MatchAllQueryBuilder())), Collections.singletonMap("foo", 2.0f), + Collections.singletonMap("name", Sets.newHashSet("bar", "baz")),null, request, null, + new GroupShardsIterator<>( + Collections.singletonList( + new SearchShardIterator(null, null, Collections.emptyList(), null) + ) + ), timeProvider, 0, null, new InitialSearchPhase.ArraySearchPhaseResults<>(10), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY) { @Override @@ -117,5 +123,8 @@ public void testBuildShardSearchTransportRequest() { assertArrayEquals(new String[] {"name", "name1"}, shardSearchTransportRequest.indices()); assertEquals(new MatchAllQueryBuilder(), shardSearchTransportRequest.getAliasFilter().getQueryBuilder()); assertEquals(2.0f, shardSearchTransportRequest.indexBoost(), 0.0f); + assertArrayEquals(new String[] {"name", "name1"}, shardSearchTransportRequest.indices()); + assertArrayEquals(new String[] {"bar", "baz"}, shardSearchTransportRequest.indexRoutings()); + assertEquals("_shards:1,3", shardSearchTransportRequest.preference()); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java index d60f29a5d5395..8b1741967734c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -78,12 +78,12 @@ public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRe 2, randomBoolean(), primaryNode, replicaNode); final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); - + CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase(logger, searchTransportService, (clusterAlias, node) -> lookup.get(node), Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), - Collections.emptyMap(), EsExecutors.newDirectExecutorService(), + Collections.emptyMap(), Collections.emptyMap(), EsExecutors.newDirectExecutorService(), searchRequest, null, shardsIter, timeProvider, 0, null, (iter) -> new SearchPhase("test") { @Override @@ -159,12 +159,12 @@ public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRe final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); - + CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase(logger, searchTransportService, (clusterAlias, node) -> lookup.get(node), Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), - Collections.emptyMap(), EsExecutors.newDirectExecutorService(), + Collections.emptyMap(), Collections.emptyMap(), EsExecutors.newDirectExecutorService(), searchRequest, null, shardsIter, timeProvider, 0, null, (iter) -> new SearchPhase("test") { @Override @@ -222,6 +222,7 @@ public void sendCanMatch( (clusterAlias, node) -> lookup.get(node), Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), Collections.emptyMap(), + Collections.emptyMap(), EsExecutors.newDirectExecutorService(), searchRequest, null, diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index c731d1aaabed0..82e0fcaf5d667 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -106,6 +106,7 @@ public void onFailure(Exception e) { return lookup.get(node); }, aliasFilters, Collections.emptyMap(), + Collections.emptyMap(), null, request, responseListener, @@ -198,6 +199,7 @@ public void onFailure(Exception e) { return lookup.get(node); }, aliasFilters, Collections.emptyMap(), + Collections.emptyMap(), null, request, responseListener, @@ -303,6 +305,7 @@ public void sendFreeContext(Transport.Connection connection, long contextId, Ori return lookup.get(node); }, aliasFilters, Collections.emptyMap(), + Collections.emptyMap(), executor, request, responseListener, diff --git a/server/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java b/server/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java index bcc70773146c6..b3e9eda4e1303 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.monitor.jvm.JvmInfo; import java.nio.file.Path; +import java.util.Locale; import java.util.Map; import java.util.function.Consumer; @@ -65,7 +66,14 @@ private void runTestThatVersionIsMutuallyExclusiveToOtherOptions(String... args) private void runTestThatVersionIsReturned(String... args) throws Exception { runTestVersion(ExitCodes.OK, output -> { assertThat(output, containsString("Version: " + Version.displayVersion(Version.CURRENT, Build.CURRENT.isSnapshot()))); - assertThat(output, containsString("Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date())); + final String expectedBuildOutput = String.format( + Locale.ROOT, + "Build: %s/%s/%s/%s", + Build.CURRENT.flavor().displayName(), + Build.CURRENT.type().displayName(), + Build.CURRENT.shortHash(), + Build.CURRENT.date()); + assertThat(output, containsString(expectedBuildOutput)); assertThat(output, containsString("JVM: " + JvmInfo.jvmInfo().version())); }, args); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 211ae48d04355..49df78565d315 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -433,7 +433,7 @@ private void setupIndicesService() throws Exception { when(docMapper.routingFieldMapper()).thenReturn(routingMapper); - when(mapper.docMappers(anyBoolean())).thenReturn(Collections.singletonList(docMapper)); + when(mapper.documentMapper()).thenReturn(docMapper); final Index index = new Index("target", "tgt1234"); final Supplier supplier = mock(Supplier.class); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java index 65526896864d6..8a9b00a8d4ff7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java @@ -83,7 +83,7 @@ public void setUp() throws Exception { } /** - * puts primary shard routings into initializing state + * puts primary shard indexRoutings into initializing state */ private void initPrimaries() { logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java index 055adbaebbce5..349997d7793eb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java @@ -83,7 +83,7 @@ public void setUp() throws Exception { } /** - * puts primary shard routings into initializing state + * puts primary shard indexRoutings into initializing state */ private void initPrimaries() { logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1); diff --git a/server/src/test/java/org/elasticsearch/common/cache/CacheTests.java b/server/src/test/java/org/elasticsearch/common/cache/CacheTests.java index 5675a7b524bd3..773585cc3b494 100644 --- a/server/src/test/java/org/elasticsearch/common/cache/CacheTests.java +++ b/server/src/test/java/org/elasticsearch/common/cache/CacheTests.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; @@ -824,4 +825,34 @@ public void testTorture() throws BrokenBarrierException, InterruptedException { cache.refresh(); assertEquals(500, cache.count()); } + + public void testRemoveUsingValuesIterator() { + final List> removalNotifications = new ArrayList<>(); + Cache cache = + CacheBuilder.builder() + .setMaximumWeight(numberOfEntries) + .removalListener(removalNotifications::add) + .build(); + + for (int i = 0; i < numberOfEntries; i++) { + cache.put(i, Integer.toString(i)); + } + + assertThat(removalNotifications.size(), is(0)); + final List expectedRemovals = new ArrayList<>(); + Iterator valueIterator = cache.values().iterator(); + while (valueIterator.hasNext()) { + String value = valueIterator.next(); + if (randomBoolean()) { + valueIterator.remove(); + expectedRemovals.add(value); + } + } + + assertEquals(expectedRemovals.size(), removalNotifications.size()); + for (int i = 0; i < expectedRemovals.size(); i++) { + assertEquals(expectedRemovals.get(i), removalNotifications.get(i).getValue()); + assertEquals(RemovalNotification.RemovalReason.INVALIDATED, removalNotifications.get(i).getRemovalReason()); + } + } } diff --git a/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java index 70deb8a4ba88e..09dc8607bc484 100644 --- a/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.ModuleTestCase; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.util.BigArrays; @@ -151,6 +152,7 @@ public Map> getTransports(Settings settings, ThreadP assertSame(custom, module.getTransportSupplier()); assertTrue(module.isTransportClient()); assertFalse(module.isHttpEnabled()); + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testRegisterHttpTransport() { @@ -181,6 +183,7 @@ public Map> getHttpTransports(Settings set assertFalse(newModule.isTransportClient()); assertFalse(newModule.isHttpEnabled()); expectThrows(IllegalStateException.class, () -> newModule.getHttpServerTransportSupplier()); + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testOverrideDefault() { @@ -306,7 +309,7 @@ public List getTransportInterceptors(NamedWriteableRegistr }); }); assertEquals("interceptor must not be null", nullPointerException.getMessage()); - + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } private NetworkModule newNetworkModule(Settings settings, boolean transportClient, NetworkPlugin... plugins) { diff --git a/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java b/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java index 27b2deb4656d8..e193ea34498cf 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java @@ -223,25 +223,36 @@ protected Reader instanceReader() { } @Override - protected ByteSizeValue mutateInstance(ByteSizeValue instance) throws IOException { - long size = instance.getSize(); - ByteSizeUnit unit = instance.getUnit(); + protected ByteSizeValue mutateInstance(final ByteSizeValue instance) { + final long instanceSize = instance.getSize(); + final ByteSizeUnit instanceUnit = instance.getUnit(); + final long mutateSize; + final ByteSizeUnit mutateUnit; switch (between(0, 1)) { case 0: - long unitBytes = unit.toBytes(1); - size = randomValueOtherThan(size, () -> randomNonNegativeLong() / unitBytes); + final long unitBytes = instanceUnit.toBytes(1); + mutateSize = randomValueOtherThan(instanceSize, () -> randomNonNegativeLong() / unitBytes); + mutateUnit = instanceUnit; break; case 1: - unit = randomValueOtherThan(unit, () -> randomFrom(ByteSizeUnit.values())); - long newUnitBytes = unit.toBytes(1); - if (size >= Long.MAX_VALUE / newUnitBytes) { - size = randomValueOtherThan(size, () -> randomNonNegativeLong() / newUnitBytes); + mutateUnit = randomValueOtherThan(instanceUnit, () -> randomFrom(ByteSizeUnit.values())); + final long newUnitBytes = mutateUnit.toBytes(1); + /* + * If size is zero we can not reuse zero because zero with any unit will be equal to zero with any other unit so in this case we + * need to randomize a new size. Additionally, if the size unit pair is such that the representation would be such that the + * number of represented bytes would exceed Long.Max_VALUE, we have to randomize a new size too. + */ + if (instanceSize == 0 || instanceSize >= Long.MAX_VALUE / newUnitBytes) { + mutateSize = randomValueOtherThanMany( + v -> v == instanceSize && v >= Long.MAX_VALUE / newUnitBytes, () -> randomNonNegativeLong() / newUnitBytes); + } else { + mutateSize = instanceSize; } break; default: throw new AssertionError("Invalid randomisation branch"); } - return new ByteSizeValue(size, unit); + return new ByteSizeValue(mutateSize, mutateUnit); } public void testParse() { diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutorTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutorTests.java index b6110a85eceb6..33917ceff685b 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutorTests.java @@ -35,8 +35,8 @@ public class EsThreadPoolExecutorTests extends ESSingleNodeTestCase { protected Settings nodeSettings() { return Settings.builder() .put("node.name", "es-thread-pool-executor-tests") - .put("thread_pool.bulk.size", 1) - .put("thread_pool.bulk.queue_size", 0) + .put("thread_pool.write.size", 1) + .put("thread_pool.write.queue_size", 0) .put("thread_pool.search.size", 1) .put("thread_pool.search.queue_size", 1) .build(); @@ -44,7 +44,7 @@ protected Settings nodeSettings() { public void testRejectedExecutionExceptionContainsNodeName() { // we test a fixed and an auto-queue executor but not scaling since it does not reject - runThreadPoolExecutorTest(1, ThreadPool.Names.BULK); + runThreadPoolExecutorTest(1, ThreadPool.Names.WRITE); runThreadPoolExecutorTest(2, ThreadPool.Names.SEARCH); } diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index 5000af6688f83..28fa440d96ac2 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -21,7 +21,6 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -131,16 +130,16 @@ public void testRefreshTaskIsUpdated() throws IOException { assertTrue(indexService.getRefreshTask().mustReschedule()); // now disable - IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).build(); - indexService.updateMetaData(metaData); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).get(); assertNotSame(refreshTask, indexService.getRefreshTask()); assertTrue(refreshTask.isClosed()); assertFalse(refreshTask.isScheduled()); assertFalse(indexService.getRefreshTask().mustReschedule()); // set it to 100ms - metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "100ms")).build(); - indexService.updateMetaData(metaData); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "100ms")).get(); assertNotSame(refreshTask, indexService.getRefreshTask()); assertTrue(refreshTask.isClosed()); @@ -150,8 +149,8 @@ public void testRefreshTaskIsUpdated() throws IOException { assertEquals(100, refreshTask.getInterval().millis()); // set it to 200ms - metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).build(); - indexService.updateMetaData(metaData); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).get(); assertNotSame(refreshTask, indexService.getRefreshTask()); assertTrue(refreshTask.isClosed()); @@ -161,8 +160,8 @@ public void testRefreshTaskIsUpdated() throws IOException { assertEquals(200, refreshTask.getInterval().millis()); // set it to 200ms again - metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).build(); - indexService.updateMetaData(metaData); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).get(); assertSame(refreshTask, indexService.getRefreshTask()); assertTrue(indexService.getRefreshTask().mustReschedule()); assertTrue(refreshTask.isScheduled()); @@ -174,7 +173,9 @@ public void testRefreshTaskIsUpdated() throws IOException { } public void testFsyncTaskIsRunning() throws IOException { - IndexService indexService = createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC).build()); + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC).build(); + IndexService indexService = createIndex("test", settings); IndexService.AsyncTranslogFSync fsyncTask = indexService.getFsyncTask(); assertNotNull(fsyncTask); assertEquals(5000, fsyncTask.getInterval().millis()); @@ -198,12 +199,10 @@ public void testRefreshActuallyWorks() throws Exception { IndexShard shard = indexService.getShard(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); // now disable the refresh - IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()) - .settings(Settings.builder().put(indexService.getMetaData().getSettings()) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).build(); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).get(); // when we update we reschedule the existing task AND fire off an async refresh to make sure we make everything visible // before that this is why we need to wait for the refresh task to be unscheduled and the first doc to be visible - indexService.updateMetaData(metaData); assertTrue(refreshTask.isClosed()); refreshTask = indexService.getRefreshTask(); assertBusy(() -> { @@ -217,10 +216,8 @@ public void testRefreshActuallyWorks() throws Exception { assertFalse(refreshTask.isClosed()); // refresh every millisecond client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); - metaData = IndexMetaData.builder(indexService.getMetaData()) - .settings(Settings.builder().put(indexService.getMetaData().getSettings()) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")).build(); - indexService.updateMetaData(metaData); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")).get(); assertTrue(refreshTask.isClosed()); assertBusy(() -> { // this one becomes visible due to the force refresh we are running on updateMetaData if the interval changes @@ -250,7 +247,7 @@ public void testAsyncFsyncActuallyWorks() throws Exception { client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); IndexShard shard = indexService.getShard(0); assertBusy(() -> { - assertFalse(shard.getTranslog().syncNeeded()); + assertFalse(shard.isSyncNeeded()); }); } @@ -275,7 +272,7 @@ public void testRescheduleAsyncFsync() throws Exception { client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); assertNotNull(indexService.getFsyncTask()); final IndexShard shard = indexService.getShard(0); - assertBusy(() -> assertFalse(shard.getTranslog().syncNeeded())); + assertBusy(() -> assertFalse(shard.isSyncNeeded())); client() .admin() @@ -303,15 +300,13 @@ public void testAsyncTranslogTrimActuallyWorks() throws Exception { assertTrue(indexService.getRefreshTask().mustReschedule()); client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); client().admin().indices().prepareFlush("test").get(); - IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder() - .put(indexService.getMetaData().getSettings()) - .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), -1) - .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1)) - .build(); - indexService.updateMetaData(metaData); - + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), -1) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1)) + .get(); IndexShard shard = indexService.getShard(0); - assertBusy(() -> assertThat(shard.getTranslog().totalOperations(), equalTo(0))); + assertBusy(() -> assertThat(shard.estimateTranslogOperationsFromMinSeq(0L), equalTo(0))); } public void testIllegalFsyncInterval() { diff --git a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index 6e8e679188c66..4ef9c36d9306c 100644 --- a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -122,6 +122,16 @@ public Scroll scroll() { return null; } + @Override + public String[] indexRoutings() { + return null; + } + + @Override + public String preference() { + return null; + } + @Override public void setProfile(boolean profile) { diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 60913c644eadb..e769485443a0a 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1576,6 +1576,23 @@ public void testInternalVersioningOnPrimary() throws IOException { assertOpsOnPrimary(ops, Versions.NOT_FOUND, true, engine); } + public void testVersionOnPrimaryWithConcurrentRefresh() throws Exception { + List ops = generateSingleDocHistory(false, VersionType.INTERNAL, false, 2, 10, 100); + CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean running = new AtomicBoolean(true); + Thread refreshThread = new Thread(() -> { + latch.countDown(); + while (running.get()) { + engine.refresh("test"); + } + }); + refreshThread.start(); + latch.await(); + assertOpsOnPrimary(ops, Versions.NOT_FOUND, true, engine); + running.set(false); + refreshThread.join(); + } + private int assertOpsOnPrimary(List ops, long currentOpVersion, boolean docDeleted, InternalEngine engine) throws IOException { String lastFieldValue = null; @@ -3707,15 +3724,13 @@ protected long doGenerateSeqNoForOperation(Operation operation) { noOpEngine.recoverFromTranslog(); final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get()); final String reason = randomAlphaOfLength(16); - noOpEngine.noOp( - new Engine.NoOp( - maxSeqNo + 1, - primaryTerm.get(), - randomFrom(PRIMARY, REPLICA, PEER_RECOVERY, LOCAL_TRANSLOG_RECOVERY), - System.nanoTime(), - reason)); + noOpEngine.noOp(new Engine.NoOp(maxSeqNo + 1, primaryTerm.get(), LOCAL_TRANSLOG_RECOVERY, System.nanoTime(), reason)); assertThat(noOpEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo((long) (maxSeqNo + 1))); - assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(1 + gapsFilled)); + assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled)); + noOpEngine.noOp( + new Engine.NoOp(maxSeqNo + 2, primaryTerm.get(), randomFrom(PRIMARY, REPLICA, PEER_RECOVERY), System.nanoTime(), reason)); + assertThat(noOpEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo((long) (maxSeqNo + 2))); + assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled + 1)); // skip to the op that we added to the translog Translog.Operation op; Translog.Operation last = null; @@ -3727,7 +3742,7 @@ protected long doGenerateSeqNoForOperation(Operation operation) { assertNotNull(last); assertThat(last, instanceOf(Translog.NoOp.class)); final Translog.NoOp noOp = (Translog.NoOp) last; - assertThat(noOp.seqNo(), equalTo((long) (maxSeqNo + 1))); + assertThat(noOp.seqNo(), equalTo((long) (maxSeqNo + 2))); assertThat(noOp.primaryTerm(), equalTo(primaryTerm.get())); assertThat(noOp.reason(), equalTo(reason)); } finally { diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index ce3ddff00dade..286e85cef3fc6 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.util.RamUsageTester; import org.apache.lucene.util.TestUtil; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -39,6 +40,8 @@ import java.util.concurrent.atomic.AtomicLong; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; public class LiveVersionMapTests extends ESTestCase { @@ -47,9 +50,8 @@ public void testRamBytesUsed() throws Exception { for (int i = 0; i < 100000; ++i) { BytesRefBuilder uid = new BytesRefBuilder(); uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20)); - VersionValue version = new VersionValue(randomLong(), randomLong(), randomLong()); try (Releasable r = map.acquireLock(uid.toBytesRef())) { - map.putUnderLock(uid.toBytesRef(), version); + map.putIndexUnderLock(uid.toBytesRef(), randomIndexVersionValue()); } } long actualRamBytesUsed = RamUsageTester.sizeOf(map); @@ -64,9 +66,8 @@ public void testRamBytesUsed() throws Exception { for (int i = 0; i < 100000; ++i) { BytesRefBuilder uid = new BytesRefBuilder(); uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20)); - VersionValue version = new VersionValue(randomLong(), randomLong(), randomLong()); try (Releasable r = map.acquireLock(uid.toBytesRef())) { - map.putUnderLock(uid.toBytesRef(), version); + map.putIndexUnderLock(uid.toBytesRef(), randomIndexVersionValue()); } } actualRamBytesUsed = RamUsageTester.sizeOf(map); @@ -100,14 +101,15 @@ private BytesRef uid(String string) { public void testBasics() throws IOException { LiveVersionMap map = new LiveVersionMap(); try (Releasable r = map.acquireLock(uid("test"))) { - map.putUnderLock(uid("test"), new VersionValue(1,1,1)); - assertEquals(new VersionValue(1,1,1), map.getUnderLock(uid("test"))); + Translog.Location tlogLoc = randomTranslogLocation(); + map.putIndexUnderLock(uid("test"), new IndexVersionValue(tlogLoc, 1, 1, 1)); + assertEquals(new IndexVersionValue(tlogLoc, 1, 1, 1), map.getUnderLock(uid("test"))); map.beforeRefresh(); - assertEquals(new VersionValue(1,1,1), map.getUnderLock(uid("test"))); + assertEquals(new IndexVersionValue(tlogLoc, 1, 1, 1), map.getUnderLock(uid("test"))); map.afterRefresh(randomBoolean()); assertNull(map.getUnderLock(uid("test"))); - map.putUnderLock(uid("test"), new DeleteVersionValue(1,1,1,1)); + map.putDeleteUnderLock(uid("test"), new DeleteVersionValue(1,1,1,1)); assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); map.beforeRefresh(); assertEquals(new DeleteVersionValue(1,1,1,1), map.getUnderLock(uid("test"))); @@ -154,21 +156,24 @@ public void testConcurrently() throws IOException, InterruptedException { BytesRef bytesRef = randomFrom(random(), keyList); try (Releasable r = map.acquireLock(bytesRef)) { VersionValue versionValue = values.computeIfAbsent(bytesRef, - v -> new VersionValue(randomLong(), maxSeqNo.incrementAndGet(), randomLong())); + v -> new IndexVersionValue( + randomTranslogLocation(), randomLong(), maxSeqNo.incrementAndGet(), randomLong())); boolean isDelete = versionValue instanceof DeleteVersionValue; if (isDelete) { map.removeTombstoneUnderLock(bytesRef); deletes.remove(bytesRef); } if (isDelete == false && rarely()) { - versionValue = new DeleteVersionValue(versionValue.version + 1, maxSeqNo.incrementAndGet(), - versionValue.term, clock.getAndIncrement()); + versionValue = new DeleteVersionValue(versionValue.version + 1, + maxSeqNo.incrementAndGet(), versionValue.term, clock.getAndIncrement()); deletes.put(bytesRef, (DeleteVersionValue) versionValue); + map.putDeleteUnderLock(bytesRef, (DeleteVersionValue) versionValue); } else { - versionValue = new VersionValue(versionValue.version + 1, maxSeqNo.incrementAndGet(), versionValue.term); + versionValue = new IndexVersionValue(randomTranslogLocation(), + versionValue.version + 1, maxSeqNo.incrementAndGet(), versionValue.term); + map.putIndexUnderLock(bytesRef, (IndexVersionValue) versionValue); } values.put(bytesRef, versionValue); - map.putUnderLock(bytesRef, versionValue); } if (rarely()) { final long pruneSeqNo = randomLongBetween(0, maxSeqNo.get()); @@ -268,7 +273,7 @@ public void testCarryOnSafeAccess() throws IOException { } try (Releasable r = map.acquireLock(uid(""))) { - map.maybePutUnderLock(new BytesRef(""), new VersionValue(randomLong(), randomLong(), randomLong())); + map.maybePutIndexUnderLock(new BytesRef(""), randomIndexVersionValue()); } assertFalse(map.isUnsafe()); assertEquals(1, map.getAllCurrent().size()); @@ -278,7 +283,7 @@ public void testCarryOnSafeAccess() throws IOException { assertFalse(map.isUnsafe()); assertFalse(map.isSafeAccessRequired()); try (Releasable r = map.acquireLock(uid(""))) { - map.maybePutUnderLock(new BytesRef(""), new VersionValue(randomLong(), randomLong(), randomLong())); + map.maybePutIndexUnderLock(new BytesRef(""), randomIndexVersionValue()); } assertTrue(map.isUnsafe()); assertFalse(map.isSafeAccessRequired()); @@ -288,7 +293,7 @@ public void testCarryOnSafeAccess() throws IOException { public void testRefreshTransition() throws IOException { LiveVersionMap map = new LiveVersionMap(); try (Releasable r = map.acquireLock(uid("1"))) { - map.maybePutUnderLock(uid("1"), new VersionValue(randomLong(), randomLong(), randomLong())); + map.maybePutIndexUnderLock(uid("1"), randomIndexVersionValue()); assertTrue(map.isUnsafe()); assertNull(map.getUnderLock(uid("1"))); map.beforeRefresh(); @@ -299,7 +304,7 @@ public void testRefreshTransition() throws IOException { assertFalse(map.isUnsafe()); map.enforceSafeAccess(); - map.maybePutUnderLock(uid("1"), new VersionValue(randomLong(), randomLong(), randomLong())); + map.maybePutIndexUnderLock(uid("1"), randomIndexVersionValue()); assertFalse(map.isUnsafe()); assertNotNull(map.getUnderLock(uid("1"))); map.beforeRefresh(); @@ -320,9 +325,10 @@ public void testAddAndDeleteRefreshConcurrently() throws IOException, Interrupte AtomicLong version = new AtomicLong(); CountDownLatch start = new CountDownLatch(2); BytesRef uid = uid("1"); - VersionValue initialVersion = new VersionValue(version.incrementAndGet(), 1, 1); + VersionValue initialVersion; try (Releasable ignore = map.acquireLock(uid)) { - map.putUnderLock(uid, initialVersion); + initialVersion = new IndexVersionValue(randomTranslogLocation(), version.incrementAndGet(), 1, 1); + map.putIndexUnderLock(uid, (IndexVersionValue) initialVersion); } Thread t = new Thread(() -> { start.countDown(); @@ -337,14 +343,13 @@ public void testAddAndDeleteRefreshConcurrently() throws IOException, Interrupte } else { underLock = nextVersionValue; } - if (underLock.isDelete()) { - nextVersionValue = new VersionValue(version.incrementAndGet(), 1, 1); - } else if (randomBoolean()) { - nextVersionValue = new VersionValue(version.incrementAndGet(), 1, 1); + if (underLock.isDelete() || randomBoolean()) { + nextVersionValue = new IndexVersionValue(randomTranslogLocation(), version.incrementAndGet(), 1, 1); + map.putIndexUnderLock(uid, (IndexVersionValue) nextVersionValue); } else { nextVersionValue = new DeleteVersionValue(version.incrementAndGet(), 1, 1, 0); + map.putDeleteUnderLock(uid, (DeleteVersionValue) nextVersionValue); } - map.putUnderLock(uid, nextVersionValue); } } } catch (Exception e) { @@ -375,7 +380,7 @@ public void testPruneTombstonesWhileLocked() throws InterruptedException, IOExce BytesRef uid = uid("1"); ; try (Releasable ignore = map.acquireLock(uid)) { - map.putUnderLock(uid, new DeleteVersionValue(0, 0, 0, 0)); + map.putDeleteUnderLock(uid, new DeleteVersionValue(0, 0, 0, 0)); map.beforeRefresh(); // refresh otherwise we won't prune since it's tracked by the current map map.afterRefresh(false); Thread thread = new Thread(() -> { @@ -392,4 +397,50 @@ public void testPruneTombstonesWhileLocked() throws InterruptedException, IOExce thread.join(); assertEquals(0, map.getAllTombstones().size()); } + + public void testRandomlyIndexDeleteAndRefresh() throws Exception { + final LiveVersionMap versionMap = new LiveVersionMap(); + final BytesRef uid = uid("1"); + final long versions = between(10, 1000); + VersionValue latestVersion = null; + for (long i = 0; i < versions; i++) { + if (randomBoolean()) { + versionMap.beforeRefresh(); + versionMap.afterRefresh(randomBoolean()); + } + if (randomBoolean()) { + versionMap.enforceSafeAccess(); + } + try (Releasable ignore = versionMap.acquireLock(uid)) { + if (randomBoolean()) { + latestVersion = new DeleteVersionValue(randomNonNegativeLong(), randomLong(), randomLong(), randomLong()); + versionMap.putDeleteUnderLock(uid, (DeleteVersionValue) latestVersion); + assertThat(versionMap.getUnderLock(uid), equalTo(latestVersion)); + } else if (randomBoolean()) { + latestVersion = new IndexVersionValue(randomTranslogLocation(), randomNonNegativeLong(), randomLong(), randomLong()); + versionMap.maybePutIndexUnderLock(uid, (IndexVersionValue) latestVersion); + if (versionMap.isSafeAccessRequired()) { + assertThat(versionMap.getUnderLock(uid), equalTo(latestVersion)); + } else { + assertThat(versionMap.getUnderLock(uid), nullValue()); + } + } + if (versionMap.getUnderLock(uid) != null) { + assertThat(versionMap.getUnderLock(uid), equalTo(latestVersion)); + } + } + } + } + + IndexVersionValue randomIndexVersionValue() { + return new IndexVersionValue(randomTranslogLocation(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); + } + + Translog.Location randomTranslogLocation() { + if (randomBoolean()) { + return null; + } else { + return new Translog.Location(randomNonNegativeLong(), randomNonNegativeLong(), randomInt()); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java b/server/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java index 3b953edece1b4..242a568295dd6 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java @@ -20,12 +20,17 @@ package org.elasticsearch.index.engine; import org.apache.lucene.util.RamUsageTester; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.test.ESTestCase; public class VersionValueTests extends ESTestCase { - public void testRamBytesUsed() { - VersionValue versionValue = new VersionValue(randomLong(), randomLong(), randomLong()); + public void testIndexRamBytesUsed() { + Translog.Location translogLoc = null; + if (randomBoolean()) { + translogLoc = new Translog.Location(randomNonNegativeLong(), randomNonNegativeLong(), randomInt()); + } + IndexVersionValue versionValue = new IndexVersionValue(translogLoc, randomLong(), randomLong(), randomLong()); assertEquals(RamUsageTester.sizeOf(versionValue), versionValue.ramBytesUsed()); } diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java index 7f407dd1c01d1..3b29d15bf3fb2 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java @@ -52,7 +52,6 @@ public void testDocValue() throws Exception { final DocumentMapper mapper = mapperService.documentMapperParser().parse("test", new CompressedXContent(mapping)); - List bytesList1 = new ArrayList<>(2); bytesList1.add(randomBytes()); bytesList1.add(randomBytes()); @@ -123,22 +122,26 @@ public void testDocValue() throws Exception { // Test whether ScriptDocValues.BytesRefs makes a deepcopy fieldData = indexFieldData.load(reader); ScriptDocValues scriptValues = fieldData.getScriptValues(); - scriptValues.setNextDocId(0); - assertEquals(2, scriptValues.size()); - assertEquals(bytesList1.get(0), scriptValues.get(0)); - assertEquals(bytesList1.get(1), scriptValues.get(1)); - - scriptValues.setNextDocId(1); - assertEquals(1, scriptValues.size()); - assertEquals(bytes1, scriptValues.get(0)); - - scriptValues.setNextDocId(2); - assertEquals(0, scriptValues.size()); - - scriptValues.setNextDocId(3); - assertEquals(2, scriptValues.size()); - assertEquals(bytesList2.get(0), scriptValues.get(0)); - assertEquals(bytesList2.get(1), scriptValues.get(1)); + Object[][] retValues = new BytesRef[4][0]; + for (int i = 0; i < 4; i++) { + scriptValues.setNextDocId(i); + retValues[i] = new BytesRef[scriptValues.size()]; + for (int j = 0; j < retValues[i].length; j++) { + retValues[i][j] = scriptValues.get(j); + } + } + assertEquals(2, retValues[0].length); + assertEquals(bytesList1.get(0), retValues[0][0]); + assertEquals(bytesList1.get(1), retValues[0][1]); + + assertEquals(1, retValues[1].length); + assertEquals(bytes1, retValues[1][0]); + + assertEquals(0, retValues[2].length); + + assertEquals(2, retValues[3].length); + assertEquals(bytesList2.get(0), retValues[3][0]); + assertEquals(bytesList2.get(1), retValues[3][1]); } private static BytesRef randomBytes() { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index 03cc183b906d3..0de90631a14b3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.index.mapper; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Priority; @@ -41,10 +42,12 @@ import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE; +import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.NULL_VALUE; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { @@ -349,4 +352,50 @@ public void testEmptyName() throws Exception { ); assertThat(e.getMessage(), containsString("name cannot be empty string")); } + + public void testNullValue() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("location") + .field("type", "geo_point") + .field(NULL_VALUE, "1,2") + .endObject().endObject() + .endObject().endObject()); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type", new CompressedXContent(mapping)); + FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoPointFieldMapper.class)); + + Object nullValue = fieldMapper.fieldType().nullValue(); + assertThat(nullValue, equalTo(new GeoPoint(1, 2))); + + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .nullField("location") + .endObject()), + XContentType.JSON)); + + assertThat(doc.rootDoc().getField("location"), notNullValue()); + BytesRef defaultValue = doc.rootDoc().getField("location").binaryValue(); + + doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("location", "1, 2") + .endObject()), + XContentType.JSON)); + // Shouldn't matter if we specify the value explicitly or use null value + assertThat(defaultValue, equalTo(doc.rootDoc().getField("location").binaryValue())); + + doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("location", "3, 4") + .endObject()), + XContentType.JSON)); + // Shouldn't matter if we specify the value explicitly or use null value + assertThat(defaultValue, not(equalTo(doc.rootDoc().getField("location").binaryValue()))); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 59ef784aea3a2..2365dec69ecb5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -119,7 +119,7 @@ public void testIndexIntoDefaultMapping() throws Throwable { } else { throw e; } - assertFalse(indexService.mapperService().hasMapping(MapperService.DEFAULT_MAPPING)); + assertNull(indexService.mapperService().documentMapper(MapperService.DEFAULT_MAPPING)); } public void testTotalFieldsExceedsLimit() throws Throwable { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NullValueTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NullValueTests.java index d9502d8e8800c..f38f83b6e418f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NullValueTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NullValueTests.java @@ -33,7 +33,7 @@ public class NullValueTests extends ESSingleNodeTestCase { public void testNullNullValue() throws Exception { IndexService indexService = createIndex("test", Settings.builder().build()); - String[] typesToTest = {"integer", "long", "double", "float", "short", "date", "ip", "keyword", "boolean", "byte"}; + String[] typesToTest = {"integer", "long", "double", "float", "short", "date", "ip", "keyword", "boolean", "byte", "geo_point"}; for (String type : typesToTest) { String mapping = Strings.toString(XContentFactory.jsonBuilder() diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java index d37d6eeb88fdf..1ac53992ebe8c 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchPhraseQueryBuilderTests.java @@ -21,12 +21,14 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.index.search.MatchQuery.ZeroTermsQuery; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; @@ -37,6 +39,7 @@ import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; public class MatchPhraseQueryBuilderTests extends AbstractQueryTestCase { @@ -68,6 +71,11 @@ protected MatchPhraseQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { matchQuery.slop(randomIntBetween(0, 10)); } + + if (randomBoolean()) { + matchQuery.zeroTermsQuery(randomFrom(ZeroTermsQuery.ALL, ZeroTermsQuery.NONE)); + } + return matchQuery; } @@ -88,6 +96,12 @@ protected Map getAlternateVersions() { @Override protected void doAssertLuceneQuery(MatchPhraseQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { assertThat(query, notNullValue()); + + if (query instanceof MatchAllDocsQuery) { + assertThat(queryBuilder.zeroTermsQuery(), equalTo(ZeroTermsQuery.ALL)); + return; + } + assertThat(query, either(instanceOf(BooleanQuery.class)).or(instanceOf(PhraseQuery.class)) .or(instanceOf(TermQuery.class)).or(instanceOf(PointRangeQuery.class)) .or(instanceOf(IndexOrDocValuesQuery.class)).or(instanceOf(MatchNoDocsQuery.class))); @@ -108,7 +122,7 @@ public void testBadAnalyzer() throws IOException { assertThat(e.getMessage(), containsString("analyzer [bogusAnalyzer] not found")); } - public void testPhraseMatchQuery() throws IOException { + public void testFromSimpleJson() throws IOException { String json1 = "{\n" + " \"match_phrase\" : {\n" + " \"message\" : \"this is a test\"\n" + @@ -120,6 +134,7 @@ public void testPhraseMatchQuery() throws IOException { " \"message\" : {\n" + " \"query\" : \"this is a test\",\n" + " \"slop\" : 0,\n" + + " \"zero_terms_query\" : \"NONE\",\n" + " \"boost\" : 1.0\n" + " }\n" + " }\n" + @@ -128,6 +143,26 @@ public void testPhraseMatchQuery() throws IOException { checkGeneratedJson(expected, qb); } + public void testFromJson() throws IOException { + String json = "{\n" + + " \"match_phrase\" : {\n" + + " \"message\" : {\n" + + " \"query\" : \"this is a test\",\n" + + " \"slop\" : 2,\n" + + " \"zero_terms_query\" : \"ALL\",\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + "}"; + + MatchPhraseQueryBuilder parsed = (MatchPhraseQueryBuilder) parseQuery(json); + checkGeneratedJson(json, parsed); + + assertEquals(json, "this is a test", parsed.value()); + assertEquals(json, 2, parsed.slop()); + assertEquals(json, ZeroTermsQuery.ALL, parsed.zeroTermsQuery()); + } + public void testParseFailsWithMultipleFields() throws IOException { String json = "{\n" + " \"match_phrase\" : {\n" + diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java index e445eb1411748..b3cd1a361ecde 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java @@ -59,7 +59,9 @@ import java.util.List; import java.util.Map; import java.util.function.Function; +import java.util.function.Predicate; +import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -85,17 +87,13 @@ protected TermsSetQueryBuilder doCreateTestQueryBuilder() { do { fieldName = randomFrom(MAPPED_FIELD_NAMES); } while (fieldName.equals(GEO_POINT_FIELD_NAME) || fieldName.equals(GEO_SHAPE_FIELD_NAME)); - int numValues = randomIntBetween(0, 10); - List randomTerms = new ArrayList<>(numValues); - for (int i = 0; i < numValues; i++) { - randomTerms.add(getRandomValueForFieldName(fieldName)); - } + List randomTerms = randomValues(fieldName); TermsSetQueryBuilder queryBuilder = new TermsSetQueryBuilder(STRING_FIELD_NAME, randomTerms); if (randomBoolean()) { queryBuilder.setMinimumShouldMatchField("m_s_m"); } else { queryBuilder.setMinimumShouldMatchScript( - new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", Collections.emptyMap())); + new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", emptyMap())); } return queryBuilder; } @@ -122,6 +120,41 @@ protected boolean builderGeneratesCacheableQueries() { return false; } + @Override + public TermsSetQueryBuilder mutateInstance(final TermsSetQueryBuilder instance) throws IOException { + String fieldName = instance.getFieldName(); + List values = instance.getValues(); + String minimumShouldMatchField = null; + Script minimumShouldMatchScript = null; + + switch (randomIntBetween(0, 3)) { + case 0: + Predicate predicate = s -> s.equals(instance.getFieldName()) == false && s.equals(GEO_POINT_FIELD_NAME) == false + && s.equals(GEO_SHAPE_FIELD_NAME) == false; + fieldName = randomValueOtherThanMany(predicate, () -> randomFrom(MAPPED_FIELD_NAMES)); + values = randomValues(fieldName); + break; + case 1: + values = randomValues(fieldName); + break; + case 2: + minimumShouldMatchField = randomAlphaOfLengthBetween(1, 10); + break; + case 3: + minimumShouldMatchScript = new Script(ScriptType.INLINE, MockScriptEngine.NAME, randomAlphaOfLength(10), emptyMap()); + break; + } + + TermsSetQueryBuilder newInstance = new TermsSetQueryBuilder(fieldName, values); + if (minimumShouldMatchField != null) { + newInstance.setMinimumShouldMatchField(minimumShouldMatchField); + } + if (minimumShouldMatchScript != null) { + newInstance.setMinimumShouldMatchScript(minimumShouldMatchScript); + } + return newInstance; + } + public void testBothFieldAndScriptSpecified() { TermsSetQueryBuilder queryBuilder = new TermsSetQueryBuilder("_field", Collections.emptyList()); queryBuilder.setMinimumShouldMatchScript(new Script("")); @@ -215,7 +248,7 @@ public void testDoToQuery_msmScriptField() throws Exception { try (IndexReader ir = DirectoryReader.open(directory)) { QueryShardContext context = createShardContext(); - Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", Collections.emptyMap()); + Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", emptyMap()); Query query = new TermsSetQueryBuilder("message", Arrays.asList("a", "b", "c", "d")) .setMinimumShouldMatchScript(script).doToQuery(context); IndexSearcher searcher = new IndexSearcher(ir); @@ -228,6 +261,16 @@ public void testDoToQuery_msmScriptField() throws Exception { } } + private static List randomValues(final String fieldName) { + final int numValues = randomIntBetween(0, 10); + final List values = new ArrayList<>(numValues); + + for (int i = 0; i < numValues; i++) { + values.add(getRandomValueForFieldName(fieldName)); + } + return values; + } + public static class CustomScriptPlugin extends MockScriptPlugin { @Override diff --git a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 97fc1b528acf3..16a73f0fa712d 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -542,7 +542,7 @@ public void onFailure(Exception e) { listener.onFailure(e); } }, - ThreadPool.Names.INDEX, request); + ThreadPool.Names.WRITE, request); } @Override @@ -681,7 +681,7 @@ class GlobalCheckpointSync extends ReplicationAction< @Override protected PrimaryResult performOnPrimary( final IndexShard primary, final GlobalCheckpointSyncAction.Request request) throws Exception { - primary.getTranslog().sync(); + primary.sync(); return new PrimaryResult(request, new ReplicationResponse()); } diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index baa56ee9585f6..2d2aaac7bbd26 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -330,7 +330,7 @@ public void testSeqNoCollision() throws Exception { final Translog.Operation op1; final List initOperations = new ArrayList<>(initDocs); - try (Translog.Snapshot snapshot = replica2.getTranslog().newSnapshot()) { + try (Translog.Snapshot snapshot = getTranslog(replica2).newSnapshot()) { assertThat(snapshot.totalOperations(), equalTo(initDocs + 1)); for (int i = 0; i < initDocs; i++) { Translog.Operation op = snapshot.next(); @@ -347,7 +347,7 @@ public void testSeqNoCollision() throws Exception { shards.promoteReplicaToPrimary(replica1).get(); // wait until resync completed. shards.index(new IndexRequest(index.getName(), "type", "d2").source("{}", XContentType.JSON)); final Translog.Operation op2; - try (Translog.Snapshot snapshot = replica2.getTranslog().newSnapshot()) { + try (Translog.Snapshot snapshot = getTranslog(replica2).newSnapshot()) { assertThat(snapshot.totalOperations(), equalTo(initDocs + 2)); op2 = snapshot.next(); assertThat(op2.seqNo(), equalTo(op1.seqNo())); @@ -362,7 +362,7 @@ public void testSeqNoCollision() throws Exception { shards.promoteReplicaToPrimary(replica2); logger.info("--> Recover replica3 from replica2"); recoverReplica(replica3, replica2); - try (Translog.Snapshot snapshot = replica3.getTranslog().newSnapshot()) { + try (Translog.Snapshot snapshot = getTranslog(replica3).newSnapshot()) { assertThat(snapshot.totalOperations(), equalTo(initDocs + 1)); assertThat(snapshot.next(), equalTo(op2)); assertThat("Remaining of snapshot should contain init operations", snapshot, containsOperationsInAnyOrder(initOperations)); @@ -468,7 +468,7 @@ private static void assertNoOpTranslogOperationForDocumentFailure( long expectedPrimaryTerm, String failureMessage) throws IOException { for (IndexShard indexShard : replicationGroup) { - try(Translog.Snapshot snapshot = indexShard.getTranslog().newSnapshot()) { + try(Translog.Snapshot snapshot = getTranslog(indexShard).newSnapshot()) { assertThat(snapshot.totalOperations(), equalTo(expectedOperation)); long expectedSeqNo = 0L; Translog.Operation op = snapshot.next(); diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index c7469f2432ad3..323b0364dfb93 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -128,7 +128,7 @@ public void testRecoveryOfDisconnectedReplica() throws Exception { shards.flush(); translogTrimmed = randomBoolean(); if (translogTrimmed) { - final Translog translog = shards.getPrimary().getTranslog(); + final Translog translog = getTranslog(shards.getPrimary()); translog.getDeletionPolicy().setRetentionAgeInMillis(0); translog.trimUnreferencedReaders(); } @@ -271,7 +271,7 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { // otherwise the deletion policy won't trim translog assertBusy(() -> { shards.syncGlobalCheckpoint(); - assertThat(newPrimary.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(newPrimary.seqNoStats().getMaxSeqNo())); + assertThat(newPrimary.getLastSyncedGlobalCheckpoint(), equalTo(newPrimary.seqNoStats().getMaxSeqNo())); }); newPrimary.flush(new FlushRequest()); uncommittedOpsOnPrimary = shards.indexDocs(randomIntBetween(0, 10)); @@ -340,7 +340,7 @@ public void testReplicaRollbackStaleDocumentsInPeerRecovery() throws Exception { // Index more docs - move the global checkpoint >= seqno of the stale operations. goodDocs += shards.indexDocs(scaledRandomIntBetween(staleDocs, staleDocs * 5)); shards.syncGlobalCheckpoint(); - assertThat(replica.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(replica.seqNoStats().getMaxSeqNo())); + assertThat(replica.getLastSyncedGlobalCheckpoint(), equalTo(replica.seqNoStats().getMaxSeqNo())); // Recover a replica again should also rollback the stale documents. shards.removeReplica(replica); replica.close("recover replica - second time", false); diff --git a/server/src/test/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java b/server/src/test/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java new file mode 100644 index 0000000000000..ebd0bf0460aeb --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/search/MatchPhraseQueryIT.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.search; + +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.MatchPhraseQueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.search.MatchQuery.ZeroTermsQuery; +import org.elasticsearch.test.ESIntegTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; + +public class MatchPhraseQueryIT extends ESIntegTestCase { + private static final String INDEX = "test"; + + @Before + public void setUp() throws Exception { + super.setUp(); + CreateIndexRequestBuilder createIndexRequest = prepareCreate(INDEX).setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.analysis.analyzer.standard_stopwords.type", "standard") + .putList("index.analysis.analyzer.standard_stopwords.stopwords", "of", "the", "who")); + assertAcked(createIndexRequest); + ensureGreen(); + } + + public void testZeroTermsQuery() throws ExecutionException, InterruptedException { + List indexRequests = getIndexRequests(); + indexRandom(true, false, indexRequests); + + MatchPhraseQueryBuilder baseQuery = QueryBuilders.matchPhraseQuery("name", "the who") + .analyzer("standard_stopwords"); + + MatchPhraseQueryBuilder matchNoneQuery = baseQuery.zeroTermsQuery(ZeroTermsQuery.NONE); + SearchResponse matchNoneResponse = client().prepareSearch(INDEX).setQuery(matchNoneQuery).get(); + assertHitCount(matchNoneResponse, 0L); + + MatchPhraseQueryBuilder matchAllQuery = baseQuery.zeroTermsQuery(ZeroTermsQuery.ALL); + SearchResponse matchAllResponse = client().prepareSearch(INDEX).setQuery(matchAllQuery).get(); + assertHitCount(matchAllResponse, 2L); + } + + + private List getIndexRequests() { + List requests = new ArrayList<>(); + requests.add(client().prepareIndex(INDEX, "band").setSource("name", "the beatles")); + requests.add(client().prepareIndex(INDEX, "band").setSource("name", "led zeppelin")); + return requests; + } +} diff --git a/server/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java b/server/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java index 4f410dc6d2690..f3d109868ef14 100644 --- a/server/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java @@ -76,14 +76,26 @@ public void testGeoPointParsing() throws IOException { GeoPoint point = GeoUtils.parseGeoPoint(objectLatLon(randomPt.lat(), randomPt.lon())); assertPointsEqual(point, randomPt); + GeoUtils.parseGeoPoint(toObject(objectLatLon(randomPt.lat(), randomPt.lon())), randomBoolean()); + assertPointsEqual(point, randomPt); + GeoUtils.parseGeoPoint(arrayLatLon(randomPt.lat(), randomPt.lon()), point); assertPointsEqual(point, randomPt); + GeoUtils.parseGeoPoint(toObject(arrayLatLon(randomPt.lat(), randomPt.lon())), randomBoolean()); + assertPointsEqual(point, randomPt); + GeoUtils.parseGeoPoint(geohash(randomPt.lat(), randomPt.lon()), point); assertCloseTo(point, randomPt.lat(), randomPt.lon()); + GeoUtils.parseGeoPoint(toObject(geohash(randomPt.lat(), randomPt.lon())), randomBoolean()); + assertCloseTo(point, randomPt.lat(), randomPt.lon()); + GeoUtils.parseGeoPoint(stringLatLon(randomPt.lat(), randomPt.lon()), point); assertCloseTo(point, randomPt.lat(), randomPt.lon()); + + GeoUtils.parseGeoPoint(toObject(stringLatLon(randomPt.lat(), randomPt.lon())), randomBoolean()); + assertCloseTo(point, randomPt.lat(), randomPt.lon()); } // Based on #5390 @@ -99,6 +111,12 @@ public void testInvalidPointEmbeddedObject() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); + + XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content)); + parser2.nextToken(); + e = expectThrows(ElasticsearchParseException.class, () -> + GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean())); + assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); } public void testInvalidPointLatHashMix() throws IOException { @@ -109,9 +127,14 @@ public void testInvalidPointLatHashMix() throws IOException { XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content)); parser.nextToken(); - Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); + + XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content)); + parser2.nextToken(); + e = expectThrows(ElasticsearchParseException.class, () -> + GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean())); + assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); } public void testInvalidPointLonHashMix() throws IOException { @@ -125,6 +148,12 @@ public void testInvalidPointLonHashMix() throws IOException { Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); + + XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content)); + parser2.nextToken(); + e = expectThrows(ElasticsearchParseException.class, () -> + GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean())); + assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); } public void testInvalidField() throws IOException { @@ -135,9 +164,15 @@ public void testInvalidField() throws IOException { XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content)); parser.nextToken(); - Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); + + + XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content)); + parser2.nextToken(); + e = expectThrows(ElasticsearchParseException.class, () -> + GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean())); + assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); } private XContentParser objectLatLon(double lat, double lon) throws IOException { @@ -183,4 +218,22 @@ public static void assertCloseTo(final GeoPoint point, final double lat, final d assertEquals(point.lat(), lat, TOLERANCE); assertEquals(point.lon(), lon, TOLERANCE); } + + public static Object toObject(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_NULL) { + return null; + } else if (token == XContentParser.Token.VALUE_STRING) { + return parser.text(); + } else if (token == XContentParser.Token.VALUE_NUMBER) { + return parser.numberValue(); + } else if (token == XContentParser.Token.START_OBJECT) { + return parser.map(); + } else if (token == XContentParser.Token.START_ARRAY) { + return parser.list(); + } else { + fail("Unexpected token " + token); + } + return null; + } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java index 70813531aeb0e..596575abc3025 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -90,9 +90,6 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { final Translog.Durability durability = randomFrom(Translog.Durability.ASYNC, Translog.Durability.REQUEST); when(indexShard.getTranslogDurability()).thenReturn(durability); - final Translog translog = mock(Translog.class); - when(indexShard.getTranslog()).thenReturn(translog); - final long globalCheckpoint = randomIntBetween(Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), Integer.MAX_VALUE); final long lastSyncedGlobalCheckpoint; if (randomBoolean() && globalCheckpoint != SequenceNumbers.NO_OPS_PERFORMED) { @@ -104,7 +101,7 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { } when(indexShard.getGlobalCheckpoint()).thenReturn(globalCheckpoint); - when(translog.getLastSyncedGlobalCheckpoint()).thenReturn(lastSyncedGlobalCheckpoint); + when(indexShard.getLastSyncedGlobalCheckpoint()).thenReturn(lastSyncedGlobalCheckpoint); final GlobalCheckpointSyncAction action = new GlobalCheckpointSyncAction( Settings.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index f7ee54b32ee84..bc34aa60c4925 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -107,6 +107,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog; public class IndexShardIT extends ESSingleNodeTestCase { @@ -167,7 +168,7 @@ public void testDurableFlagHasEffect() throws Exception { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); - Translog translog = ShardUtilsTests.getShardEngine(shard).getTranslog(); + Translog translog = getTranslog(shard); Predicate needsSync = (tlog) -> { // we can't use tlog.needsSync() here since it also takes the global checkpoint into account // we explicitly want to check here if our durability checks are taken into account so we only @@ -343,7 +344,7 @@ public void testMaybeFlush() throws Exception { SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); assertTrue(shard.shouldPeriodicallyFlush()); - final Translog translog = shard.getEngine().getTranslog(); + final Translog translog = getTranslog(shard); assertEquals(2, translog.stats().getUncommittedOperations()); client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); @@ -384,7 +385,7 @@ public void testMaybeRollTranslogGeneration() throws Exception { final IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); int rolls = 0; - final Translog translog = shard.getEngine().getTranslog(); + final Translog translog = getTranslog(shard); final long generation = translog.currentFileGeneration(); final int numberOfDocuments = randomIntBetween(32, 128); for (int i = 0; i < numberOfDocuments; i++) { @@ -454,11 +455,11 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { assertThat(shard.flushStats().getPeriodic(), equalTo(periodic + 1)); }; } else { - final long generation = shard.getEngine().getTranslog().currentFileGeneration(); + final long generation = getTranslog(shard).currentFileGeneration(); client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); check = () -> assertEquals( generation + 1, - shard.getEngine().getTranslog().currentFileGeneration()); + getTranslog(shard).currentFileGeneration()); } assertBusy(check); running.set(false); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java index 3d961d7f422c0..27d08b76c0310 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPermitsTests.java @@ -69,18 +69,18 @@ public class IndexShardOperationPermitsTests extends ESTestCase { @BeforeClass public static void setupThreadPool() { - int bulkThreadPoolSize = randomIntBetween(1, 2); - int bulkThreadPoolQueueSize = randomIntBetween(1, 2); + int writeThreadPoolSize = randomIntBetween(1, 2); + int writeThreadPoolQueueSize = randomIntBetween(1, 2); threadPool = new TestThreadPool("IndexShardOperationsLockTests", Settings.builder() - .put("thread_pool." + ThreadPool.Names.BULK + ".size", bulkThreadPoolSize) - .put("thread_pool." + ThreadPool.Names.BULK + ".queue_size", bulkThreadPoolQueueSize) + .put("thread_pool." + ThreadPool.Names.WRITE + ".size", writeThreadPoolSize) + .put("thread_pool." + ThreadPool.Names.WRITE + ".queue_size", writeThreadPoolQueueSize) .build()); - assertThat(threadPool.executor(ThreadPool.Names.BULK), instanceOf(EsThreadPoolExecutor.class)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.BULK)).getCorePoolSize(), equalTo(bulkThreadPoolSize)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.BULK)).getMaximumPoolSize(), equalTo(bulkThreadPoolSize)); - assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.BULK)).getQueue().remainingCapacity(), - equalTo(bulkThreadPoolQueueSize)); + assertThat(threadPool.executor(ThreadPool.Names.WRITE), instanceOf(EsThreadPoolExecutor.class)); + assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getCorePoolSize(), equalTo(writeThreadPoolSize)); + assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getMaximumPoolSize(), equalTo(writeThreadPoolSize)); + assertThat(((EsThreadPoolExecutor) threadPool.executor(ThreadPool.Names.WRITE)).getQueue().remainingCapacity(), + equalTo(writeThreadPoolQueueSize)); } @AfterClass @@ -110,8 +110,8 @@ class DummyException extends RuntimeException {} CountDownLatch latch = new CountDownLatch(numThreads / 4); boolean forceExecution = randomBoolean(); for (int i = 0; i < numThreads; i++) { - // the bulk thread pool uses a bounded size and can get rejections, see setupThreadPool - String threadPoolName = randomFrom(ThreadPool.Names.BULK, ThreadPool.Names.GENERIC); + // the write thread pool uses a bounded size and can get rejections, see setupThreadPool + String threadPoolName = randomFrom(ThreadPool.Names.WRITE, ThreadPool.Names.GENERIC); boolean failingListener = randomBoolean(); PlainActionFuture future = new PlainActionFuture() { @Override diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 5506bc515f24c..e945bc12705b4 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -72,7 +72,6 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineException; @@ -285,14 +284,14 @@ public void testClosesPreventsNewOperations() throws InterruptedException, Execu closeShards(indexShard); assertThat(indexShard.getActiveOperationsCount(), equalTo(0)); try { - indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.INDEX, ""); + indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.WRITE, ""); fail("we should not be able to increment anymore"); } catch (IndexShardClosedException e) { // expected } try { indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, null, - ThreadPool.Names.INDEX, ""); + ThreadPool.Names.WRITE, ""); fail("we should not be able to increment anymore"); } catch (IndexShardClosedException e) { // expected @@ -303,7 +302,7 @@ public void testRejectOperationPermitWithHigherTermWhenNotStarted() throws IOExc IndexShard indexShard = newShard(false); expectThrows(IndexShardNotStartedException.class, () -> indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm() + randomIntBetween(1, 100), - SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.INDEX, "")); + SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.WRITE, "")); closeShards(indexShard); } @@ -343,7 +342,7 @@ public void onFailure(Exception e) { throw new RuntimeException(e); } }, - ThreadPool.Names.INDEX, id); + ThreadPool.Names.WRITE, id); }); thread.start(); threads.add(thread); @@ -394,7 +393,7 @@ public void onFailure(Exception e) { throw new RuntimeException(e); } }, - ThreadPool.Names.INDEX, id); + ThreadPool.Names.WRITE, id); }); thread.start(); delayedThreads.add(thread); @@ -518,7 +517,7 @@ public void onFailure(Exception e) { public void testPrimaryPromotionRollsGeneration() throws Exception { final IndexShard indexShard = newStartedShard(false); - final long currentTranslogGeneration = indexShard.getTranslog().getGeneration().translogFileGeneration; + final long currentTranslogGeneration = getTranslog(indexShard).getGeneration().translogFileGeneration; // promote the replica final ShardRouting replicaRouting = indexShard.routingEntry(); @@ -556,8 +555,8 @@ public void onFailure(Exception e) { ThreadPool.Names.GENERIC, ""); latch.await(); - assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(currentTranslogGeneration + 1)); - assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm)); + assertThat(getTranslog(indexShard).getGeneration().translogFileGeneration, equalTo(currentTranslogGeneration + 1)); + assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm)); closeShards(indexShard); } @@ -578,7 +577,7 @@ public void testOperationPermitsOnPrimaryShards() throws InterruptedException, E true, ShardRoutingState.STARTED, replicaRouting.allocationId()); final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 1000); indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> { - assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm)); + assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm)); }, 0L, Collections.singleton(indexShard.routingEntry().allocationId().getId()), new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(), @@ -590,7 +589,7 @@ public void testOperationPermitsOnPrimaryShards() throws InterruptedException, E assertEquals(0, indexShard.getActiveOperationsCount()); if (indexShard.routingEntry().isRelocationTarget() == false) { try { - indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), null, ThreadPool.Names.INDEX, ""); + indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), null, ThreadPool.Names.WRITE, ""); fail("shard shouldn't accept operations as replica"); } catch (IllegalStateException ignored) { @@ -609,14 +608,14 @@ public void testOperationPermitsOnPrimaryShards() throws InterruptedException, E private Releasable acquirePrimaryOperationPermitBlockingly(IndexShard indexShard) throws ExecutionException, InterruptedException { PlainActionFuture fut = new PlainActionFuture<>(); - indexShard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.INDEX, ""); + indexShard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.WRITE, ""); return fut.get(); } private Releasable acquireReplicaOperationPermitBlockingly(IndexShard indexShard, long opPrimaryTerm) throws ExecutionException, InterruptedException { PlainActionFuture fut = new PlainActionFuture<>(); - indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getGlobalCheckpoint(), fut, ThreadPool.Names.INDEX, ""); + indexShard.acquireReplicaOperationPermit(opPrimaryTerm, indexShard.getGlobalCheckpoint(), fut, ThreadPool.Names.WRITE, ""); return fut.get(); } @@ -664,12 +663,12 @@ public void testOperationPermitOnReplicaShards() throws Exception { if (shardRouting.primary() == false) { final IllegalStateException e = expectThrows(IllegalStateException.class, - () -> indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.INDEX, "")); + () -> indexShard.acquirePrimaryOperationPermit(null, ThreadPool.Names.WRITE, "")); assertThat(e, hasToString(containsString("shard " + shardRouting + " is not a primary"))); } final long primaryTerm = indexShard.getPrimaryTerm(); - final long translogGen = engineClosed ? -1 : indexShard.getTranslog().getGeneration().translogFileGeneration; + final long translogGen = engineClosed ? -1 : getTranslog(indexShard).getGeneration().translogFileGeneration; final Releasable operation1; final Releasable operation2; @@ -701,7 +700,7 @@ public void onFailure(Exception e) { }; indexShard.acquireReplicaOperationPermit(primaryTerm - 1, SequenceNumbers.UNASSIGNED_SEQ_NO, onLockAcquired, - ThreadPool.Names.INDEX, ""); + ThreadPool.Names.WRITE, ""); assertFalse(onResponse.get()); assertTrue(onFailure.get()); @@ -747,7 +746,7 @@ public void onFailure(Exception e) { @Override public void onResponse(Releasable releasable) { assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm)); - assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm)); + assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm)); assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); onResponse.set(true); @@ -793,25 +792,25 @@ private void finish() { assertFalse(onResponse.get()); assertNull(onFailure.get()); assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm)); - assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(primaryTerm)); + assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(primaryTerm)); Releasables.close(operation1); // our operation should still be blocked assertFalse(onResponse.get()); assertNull(onFailure.get()); assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm)); - assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(primaryTerm)); + assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(primaryTerm)); Releasables.close(operation2); barrier.await(); // now lock acquisition should have succeeded assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm)); - assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm)); + assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm)); if (engineClosed) { assertFalse(onResponse.get()); assertThat(onFailure.get(), instanceOf(AlreadyClosedException.class)); } else { assertTrue(onResponse.get()); assertNull(onFailure.get()); - assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(translogGen + 1)); + assertThat(getTranslog(indexShard).getGeneration().translogFileGeneration, equalTo(translogGen + 1)); assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); } @@ -1021,7 +1020,7 @@ public void onFailure(Exception e) { latch.countDown(); } }, - ThreadPool.Names.INDEX, ""); + ThreadPool.Names.WRITE, ""); }; final long firstIncrement = 1 + (randomBoolean() ? 0 : 1); @@ -1382,7 +1381,7 @@ public void onResponse(Releasable releasable) { super.onResponse(releasable); } }; - shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.INDEX, "i_" + i); + shard.acquirePrimaryOperationPermit(onLockAcquired, ThreadPool.Names.WRITE, "i_" + i); onLockAcquiredActions.add(onLockAcquired); } @@ -1647,7 +1646,7 @@ public void testRecoverFromStoreWithNoOps() throws IOException { assertEquals(1, newShard.recoveryState().getTranslog().totalOperations()); assertEquals(1, newShard.recoveryState().getTranslog().totalOperationsOnStart()); assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f); - try (Translog.Snapshot snapshot = newShard.getTranslog().newSnapshot()) { + try (Translog.Snapshot snapshot = getTranslog(newShard).newSnapshot()) { Translog.Operation operation; int numNoops = 0; while ((operation = snapshot.next()) != null) { @@ -1662,6 +1661,16 @@ public void testRecoverFromStoreWithNoOps() throws IOException { IndexShardTestCase.updateRoutingEntry(newShard, newShard.routingEntry().moveToStarted()); assertDocCount(newShard, 1); assertDocCount(shard, 2); + + for (int i = 0; i < 2; i++) { + newShard = reinitShard(newShard, ShardRoutingHelper.initWithSameId(primaryShardRouting, + RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE)); + newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); + assertTrue(newShard.recoverFromStore()); + try (Translog.Snapshot snapshot = getTranslog(newShard).newSnapshot()) { + assertThat(snapshot.totalOperations(), equalTo(2)); + } + } closeShards(newShard, shard); } @@ -2048,7 +2057,7 @@ public void testTranslogRecoverySyncsTranslog() throws IOException { @Override public long indexTranslogOperations(List operations, int totalTranslogOps) throws IOException { final long localCheckpoint = super.indexTranslogOperations(operations, totalTranslogOps); - assertFalse(replica.getTranslog().syncNeeded()); + assertFalse(replica.isSyncNeeded()); return localCheckpoint; } }, true); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 5803bf263633d..2d1c1d4e15af8 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -133,7 +133,7 @@ indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilari (e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm); engine = new InternalEngine(config); engine.recoverFromTranslog(); - listeners.setTranslog(engine.getTranslog()); + listeners.setCurrentRefreshLocationSupplier(engine::getTranslogLastWriteLocation); } @After diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 3f7f0583593af..8af19aa9ac1e4 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -148,7 +148,7 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th when(indexService.index()).thenReturn(indexMetaData.getIndex()); MapperService mapperService = mock(MapperService.class); when(indexService.mapperService()).thenReturn(mapperService); - when(mapperService.docMappers(anyBoolean())).thenReturn(Collections.emptyList()); + when(mapperService.documentMapper()).thenReturn(null); when(indexService.getIndexEventListener()).thenReturn(new IndexEventListener() {}); when(indexService.getIndexSortSupplier()).thenReturn(() -> null); return indexService; diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index 6561001ad7d86..3bfcfdd3ab187 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -113,7 +113,7 @@ public void testSyncFailsIfOperationIsInFlight() throws InterruptedException, Ex SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); final ShardId shardId = shard.shardId(); PlainActionFuture fut = new PlainActionFuture<>(); - shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.INDEX, ""); + shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.WRITE, ""); try (Releasable operationLock = fut.get()) { SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); flushService.attemptSyncedFlush(shardId, listener); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index d65d40e5bcdaa..91b35594772cf 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -59,7 +59,7 @@ public void testGetStartingSeqNo() throws Exception { } flushShard(replica); replica.updateGlobalCheckpointOnReplica(initDocs - 1, "test"); - replica.getTranslog().sync(); + replica.sync(); final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null); assertThat(PeerRecoveryTargetService.getStartingSeqNo(logger, recoveryTarget), equalTo(initDocs)); recoveryTarget.decRef(); @@ -81,7 +81,7 @@ public void testGetStartingSeqNo() throws Exception { // Advances the global checkpoint, a safe commit also advances { replica.updateGlobalCheckpointOnReplica(initDocs + moreDocs - 1, "test"); - replica.getTranslog().sync(); + replica.sync(); final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null); assertThat(PeerRecoveryTargetService.getStartingSeqNo(logger, recoveryTarget), equalTo(initDocs + moreDocs)); recoveryTarget.decRef(); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index f46ab7ebbd603..4e9d0ccb22e11 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -61,7 +61,7 @@ public void testTranslogHistoryTransferred() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startPrimary(); int docs = shards.indexDocs(10); - shards.getPrimary().getTranslog().rollGeneration(); + getTranslog(shards.getPrimary()).rollGeneration(); shards.flush(); if (randomBoolean()) { docs += shards.indexDocs(10); @@ -69,7 +69,7 @@ public void testTranslogHistoryTransferred() throws Exception { shards.addReplica(); shards.startAll(); final IndexShard replica = shards.getReplicas().get(0); - assertThat(replica.getTranslog().totalOperations(), equalTo(docs)); + assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(docs)); } } @@ -77,7 +77,7 @@ public void testRetentionPolicyChangeDuringRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startPrimary(); shards.indexDocs(10); - shards.getPrimary().getTranslog().rollGeneration(); + getTranslog(shards.getPrimary()).rollGeneration(); shards.flush(); shards.indexDocs(10); final IndexShard replica = shards.addReplica(); @@ -99,7 +99,7 @@ public void testRetentionPolicyChangeDuringRecovery() throws Exception { releaseRecovery.countDown(); future.get(); // rolling/flushing is async - assertBusy(() -> assertThat(replica.getTranslog().totalOperations(), equalTo(0))); + assertBusy(() -> assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(0))); } } @@ -123,7 +123,7 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception { // delete #1 orgReplica.applyDeleteOperationOnReplica(1, 2, "type", "id", VersionType.EXTERNAL, u -> {}); - orgReplica.getTranslog().rollGeneration(); // isolate the delete in it's own generation + getTranslog(orgReplica).rollGeneration(); // isolate the delete in it's own generation // index #0 orgReplica.applyIndexOperationOnReplica(0, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, SourceToParse.source(indexName, "type", "id", new BytesArray("{}"), XContentType.JSON), u -> {}); @@ -167,7 +167,7 @@ public void testRecoveryWithOutOfOrderDelete() throws Exception { shards.recoverReplica(newReplica); shards.assertAllEqual(3); - assertThat(newReplica.getTranslog().totalOperations(), equalTo(translogOps)); + assertThat(newReplica.estimateTranslogOperationsFromMinSeq(0), equalTo(translogOps)); } } @@ -184,7 +184,7 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { IndexShard replica = shards.getReplicas().get(0); final String historyUUID = replica.getHistoryUUID(); - Translog.TranslogGeneration translogGeneration = replica.getTranslog().getGeneration(); + Translog.TranslogGeneration translogGeneration = getTranslog(replica).getGeneration(); shards.removeReplica(replica); replica.close("test", false); IndexWriterConfig iwc = new IndexWriterConfig(null) @@ -219,7 +219,7 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { shards.recoverReplica(newReplica); // file based recovery should be made assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); - assertThat(newReplica.getTranslog().totalOperations(), equalTo(numDocs)); + assertThat(newReplica.estimateTranslogOperationsFromMinSeq(0), equalTo(numDocs)); // history uuid was restored assertThat(newReplica.getHistoryUUID(), equalTo(historyUUID)); @@ -238,7 +238,7 @@ public void testPeerRecoveryPersistGlobalCheckpoint() throws Exception { } final IndexShard replica = shards.addReplica(); shards.recoverReplica(replica); - assertThat(replica.getTranslog().getLastSyncedGlobalCheckpoint(), equalTo(numDocs - 1)); + assertThat(replica.getLastSyncedGlobalCheckpoint(), equalTo(numDocs - 1)); } } @@ -291,7 +291,7 @@ public void testSequenceBasedRecoveryKeepsTranslog() throws Exception { final IndexShard newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); shards.recoverReplica(newReplica); - try (Translog.Snapshot snapshot = newReplica.getTranslog().newSnapshot()) { + try (Translog.Snapshot snapshot = getTranslog(newReplica).newSnapshot()) { assertThat("Sequence based recovery should keep existing translog", snapshot, SnapshotMatchers.size(initDocs + moreDocs)); } assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedDocs + moreDocs)); @@ -321,7 +321,7 @@ public void testShouldFlushAfterPeerRecovery() throws Exception { shards.recoverReplica(replica); // Make sure the flushing will eventually be completed (eg. `shouldPeriodicallyFlush` is false) assertBusy(() -> assertThat(getEngine(replica).shouldPeriodicallyFlush(), equalTo(false))); - assertThat(replica.getTranslog().totalOperations(), equalTo(numDocs)); + assertThat(replica.estimateTranslogOperationsFromMinSeq(0), equalTo(numDocs)); shards.assertAllEqual(numDocs); } } diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index ad2095a6dd073..4d0d0d6ff3fd9 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -589,10 +589,6 @@ public void testAllFlags() throws Exception { IndicesStatsResponse stats = builder.execute().actionGet(); for (Flag flag : values) { - if (flag == Flag.Suggest) { - // suggest flag is unused - continue; - } assertThat(isSet(flag, stats.getPrimaries()), equalTo(false)); assertThat(isSet(flag, stats.getTotal()), equalTo(false)); } @@ -628,10 +624,6 @@ public void testAllFlags() throws Exception { } for (Flag flag : EnumSet.complementOf(flags)) { // check the complement - if (flag == Flag.Suggest) { - // suggest flag is unused - continue; - } assertThat(isSet(flag, stats.getPrimaries()), equalTo(false)); assertThat(isSet(flag, stats.getTotal()), equalTo(false)); } @@ -684,7 +676,7 @@ public void testEncodeDecodeCommonStats() throws IOException { public void testFlagOrdinalOrder() { Flag[] flags = new Flag[]{Flag.Store, Flag.Indexing, Flag.Get, Flag.Search, Flag.Merge, Flag.Flush, Flag.Refresh, Flag.QueryCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Completion, Flag.Segments, - Flag.Translog, Flag.Suggest, Flag.RequestCache, Flag.Recovery}; + Flag.Translog, Flag.RequestCache, Flag.Recovery}; assertThat(flags.length, equalTo(Flag.values().length)); for (int i = 0; i < flags.length; i++) { @@ -935,8 +927,6 @@ private static void set(Flag flag, IndicesStatsRequestBuilder builder, boolean s case Translog: builder.setTranslog(set); break; - case Suggest: // unused - break; case RequestCache: builder.setRequestCache(set); break; @@ -979,8 +969,6 @@ private static boolean isSet(Flag flag, CommonStats response) { return response.getSegments() != null; case Translog: return response.getTranslog() != null; - case Suggest: // unused - return true; case RequestCache: return response.getRequestCache() != null; case Recovery: diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index f1c8177b5a61c..254823791d5cd 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.env.Environment; @@ -62,6 +63,7 @@ public void testNodeName() throws IOException { assertThat(Node.NODE_NAME_SETTING.get(nodeSettings), equalTo(name)); } } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public static class CheckPlugin extends Plugin { @@ -93,6 +95,7 @@ protected void validateNodeBeforeAcceptingRequests(BootstrapContext context, Bou expectThrows(NodeValidationException.class, () -> node.start()); assertTrue(executed.get()); } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testWarnIfPreRelease() { @@ -144,6 +147,7 @@ public void testNodeAttributes() throws IOException { } catch (IllegalArgumentException e) { assertEquals("node.attr.test_attr cannot have leading or trailing whitespace [trailing ]", e.getMessage()); } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } private static Settings.Builder baseSettings() { diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java index 8c1e242b3262f..db8aa615c1440 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java @@ -19,11 +19,14 @@ package org.elasticsearch.repositories; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotState; @@ -39,7 +42,11 @@ import java.util.Map; import java.util.Set; +import static java.util.Collections.emptySet; +import static java.util.Collections.singleton; import static org.elasticsearch.repositories.RepositoryData.EMPTY_REPO_GEN; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; /** @@ -101,15 +108,18 @@ public void testAddSnapshots() { public void testInitIndices() { final int numSnapshots = randomIntBetween(1, 30); final Map snapshotIds = new HashMap<>(numSnapshots); + final Map snapshotStates = new HashMap<>(numSnapshots); for (int i = 0; i < numSnapshots; i++) { final SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()); snapshotIds.put(snapshotId.getUUID(), snapshotId); + snapshotStates.put(snapshotId.getUUID(), randomFrom(SnapshotState.values())); } RepositoryData repositoryData = new RepositoryData(EMPTY_REPO_GEN, snapshotIds, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyList()); // test that initializing indices works Map> indices = randomIndices(snapshotIds); - RepositoryData newRepoData = repositoryData.initIndices(indices); + RepositoryData newRepoData = new RepositoryData(repositoryData.getGenId(), snapshotIds, snapshotStates, indices, + new ArrayList<>(repositoryData.getIncompatibleSnapshotIds())); List expected = new ArrayList<>(repositoryData.getSnapshotIds()); Collections.sort(expected); List actual = new ArrayList<>(newRepoData.getSnapshotIds()); @@ -153,6 +163,81 @@ public void testGetSnapshotState() { assertNull(repositoryData.getSnapshotState(new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()))); } + public void testIndexThatReferencesAnUnknownSnapshot() throws IOException { + final XContent xContent = randomFrom(XContentType.values()).xContent(); + final RepositoryData repositoryData = generateRandomRepoData(); + + XContentBuilder builder = XContentBuilder.builder(xContent); + repositoryData.snapshotsToXContent(builder, ToXContent.EMPTY_PARAMS); + RepositoryData parsedRepositoryData = RepositoryData.snapshotsFromXContent(createParser(builder), repositoryData.getGenId()); + assertEquals(repositoryData, parsedRepositoryData); + + Map snapshotIds = new HashMap<>(); + Map snapshotStates = new HashMap<>(); + for (SnapshotId snapshotId : parsedRepositoryData.getSnapshotIds()) { + snapshotIds.put(snapshotId.getUUID(), snapshotId); + snapshotStates.put(snapshotId.getUUID(), parsedRepositoryData.getSnapshotState(snapshotId)); + } + + final IndexId corruptedIndexId = randomFrom(parsedRepositoryData.getIndices().values()); + + Map> indexSnapshots = new HashMap<>(); + for (Map.Entry snapshottedIndex : parsedRepositoryData.getIndices().entrySet()) { + IndexId indexId = snapshottedIndex.getValue(); + Set snapshotsIds = new LinkedHashSet<>(parsedRepositoryData.getSnapshots(indexId)); + if (corruptedIndexId.equals(indexId)) { + snapshotsIds.add(new SnapshotId("_uuid", "_does_not_exist")); + } + indexSnapshots.put(indexId, snapshotsIds); + } + assertNotNull(corruptedIndexId); + + RepositoryData corruptedRepositoryData = new RepositoryData(parsedRepositoryData.getGenId(), snapshotIds, snapshotStates, + indexSnapshots, new ArrayList<>(parsedRepositoryData.getIncompatibleSnapshotIds())); + + final XContentBuilder corruptedBuilder = XContentBuilder.builder(xContent); + corruptedRepositoryData.snapshotsToXContent(corruptedBuilder, ToXContent.EMPTY_PARAMS); + + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> + RepositoryData.snapshotsFromXContent(createParser(corruptedBuilder), corruptedRepositoryData.getGenId())); + assertThat(e.getMessage(), equalTo("Detected a corrupted repository, index " + corruptedIndexId + " references an unknown " + + "snapshot uuid [_does_not_exist]")); + } + + public void testIndexThatReferenceANullSnapshot() throws IOException { + final XContentBuilder builder = XContentBuilder.builder(randomFrom(XContentType.JSON).xContent()); + builder.startObject(); + { + builder.startArray("snapshots"); + builder.value(new SnapshotId("_name", "_uuid")); + builder.endArray(); + + builder.startObject("indices"); + { + builder.startObject("docs"); + { + builder.field("id", "_id"); + builder.startArray("snapshots"); + { + builder.startObject(); + if (randomBoolean()) { + builder.field("name", "_name"); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> + RepositoryData.snapshotsFromXContent(createParser(builder), randomNonNegativeLong())); + assertThat(e.getMessage(), equalTo("Detected a corrupted repository, index [docs/_id] references an unknown snapshot uuid [null]")); + } + public static RepositoryData generateRandomRepoData() { final int numIndices = randomIntBetween(1, 30); final List indices = new ArrayList<>(numIndices); diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index 4830d48df79f6..63236b1655454 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.repositories.IndexId; @@ -48,6 +49,7 @@ import java.util.List; import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE; +import static org.hamcrest.Matchers.containsString; /** * This class tests the behavior of {@link BlobStoreRepository} when it @@ -126,6 +128,43 @@ public void testRestoreSnapshotWithExistingFiles() throws IOException { } } + public void testSnapshotWithConflictingName() throws IOException { + final IndexId indexId = new IndexId(randomAlphaOfLength(10), UUIDs.randomBase64UUID()); + final ShardId shardId = new ShardId(indexId.getName(), indexId.getId(), 0); + + IndexShard shard = newShard(shardId, true); + try { + // index documents in the shards + final int numDocs = scaledRandomIntBetween(1, 500); + recoverShardFromStore(shard); + for (int i = 0; i < numDocs; i++) { + indexDoc(shard, "doc", Integer.toString(i)); + if (rarely()) { + flushShard(shard, false); + } + } + assertDocCount(shard, numDocs); + + // snapshot the shard + final Repository repository = createRepository(); + final Snapshot snapshot = new Snapshot(repository.getMetadata().name(), new SnapshotId(randomAlphaOfLength(10), "_uuid")); + snapshotShard(shard, snapshot, repository); + final Snapshot snapshotWithSameName = new Snapshot(repository.getMetadata().name(), new SnapshotId( + snapshot.getSnapshotId().getName(), "_uuid2")); + IndexShardSnapshotFailedException isfe = expectThrows(IndexShardSnapshotFailedException.class, + () -> snapshotShard(shard, snapshotWithSameName, repository)); + assertThat(isfe.getMessage(), containsString("Duplicate snapshot name")); + } finally { + if (shard != null && shard.state() != IndexShardState.CLOSED) { + try { + shard.close("test", false); + } finally { + IOUtils.close(shard.store()); + } + } + } + } + /** Create a {@link Repository} with a random name **/ private Repository createRepository() throws IOException { Settings settings = Settings.builder().put("location", randomAlphaOfLength(10)).build(); diff --git a/server/src/test/java/org/elasticsearch/rest/action/RestFieldCapabilitiesActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/RestFieldCapabilitiesActionTests.java new file mode 100644 index 0000000000000..b8dd007f56729 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/RestFieldCapabilitiesActionTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestFieldCapabilitiesAction; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.usage.UsageService; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; + +import static org.mockito.Mockito.mock; + +public class RestFieldCapabilitiesActionTests extends ESTestCase { + + private RestFieldCapabilitiesAction action; + + @Before + public void setUpAction() { + action = new RestFieldCapabilitiesAction(Settings.EMPTY, mock(RestController.class)); + } + + public void testRequestBodyIsDeprecated() throws IOException { + String content = "{ \"fields\": [\"title\"] }"; + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withPath("/_field_caps") + .withContent(new BytesArray(content), XContentType.JSON) + .build(); + action.prepareRequest(request, mock(NodeClient.class)); + + assertWarnings("Specifying a request body is deprecated -- the" + + " [fields] request parameter should be used instead."); + } +} diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsActionTests.java index 640b97605af15..f1d7686838cea 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsActionTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -31,7 +32,10 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.object.HasToString.hasToString; diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsActionTests.java index 26c1e1fa17779..f9eb93b64bef2 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsActionTests.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.util.Collections; import java.util.HashMap; +import java.util.Map; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.object.HasToString.hasToString; diff --git a/server/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java b/server/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java index 14ec800c3a65d..6b3b6a67a9783 100644 --- a/server/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java +++ b/server/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java @@ -170,7 +170,7 @@ public void testAliasSearchRouting() throws Exception { assertThat(client().prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L)); } - logger.info("--> search with 0,1 routings , should find two"); + logger.info("--> search with 0,1 indexRoutings , should find two"); for (int i = 0; i < 5; i++) { assertThat(client().prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L)); assertThat(client().prepareSearch().setSize(0).setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L)); diff --git a/server/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java b/server/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java index 84caed948a2be..0a2a43f2f83d4 100644 --- a/server/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/server/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -173,13 +173,13 @@ public void testSimpleSearchRouting() { assertThat(client().prepareSearch().setSize(0).setRouting(secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L)); } - logger.info("--> search with {},{} routings , should find two", routingValue, "1"); + logger.info("--> search with {},{} indexRoutings , should find two", routingValue, "1"); for (int i = 0; i < 5; i++) { assertThat(client().prepareSearch().setRouting(routingValue, secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L)); assertThat(client().prepareSearch().setSize(0).setRouting(routingValue, secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L)); } - logger.info("--> search with {},{},{} routings , should find two", routingValue, secondRoutingValue, routingValue); + logger.info("--> search with {},{},{} indexRoutings , should find two", routingValue, secondRoutingValue, routingValue); for (int i = 0; i < 5; i++) { assertThat(client().prepareSearch().setRouting(routingValue, secondRoutingValue, routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L)); assertThat(client().prepareSearch().setSize(0).setRouting(routingValue, secondRoutingValue,routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L)); diff --git a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index ec422435e4e07..f59cc85c09ccf 100644 --- a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -112,8 +112,8 @@ public void testPreProcess() throws Exception { IndexReader reader = w.getReader(); Engine.Searcher searcher = new Engine.Searcher("test", new IndexSearcher(reader))) { - DefaultSearchContext context1 = new DefaultSearchContext(1L, shardSearchRequest, null, searcher, indexService, - indexShard, bigArrays, null, timeout, null, null); + DefaultSearchContext context1 = new DefaultSearchContext(1L, shardSearchRequest, null, searcher, null, indexService, + indexShard, bigArrays, null, timeout, null, null, Version.CURRENT); context1.from(300); // resultWindow greater than maxResultWindow and scrollContext is null @@ -153,8 +153,8 @@ public void testPreProcess() throws Exception { + "] index level setting.")); // rescore is null but sliceBuilder is not null - DefaultSearchContext context2 = new DefaultSearchContext(2L, shardSearchRequest, null, searcher, indexService, - indexShard, bigArrays, null, timeout, null, null); + DefaultSearchContext context2 = new DefaultSearchContext(2L, shardSearchRequest, null, searcher, + null, indexService, indexShard, bigArrays, null, timeout, null, null, Version.CURRENT); SliceBuilder sliceBuilder = mock(SliceBuilder.class); int numSlices = maxSlicesPerScroll + randomIntBetween(1, 100); @@ -170,8 +170,8 @@ public void testPreProcess() throws Exception { when(shardSearchRequest.getAliasFilter()).thenReturn(AliasFilter.EMPTY); when(shardSearchRequest.indexBoost()).thenReturn(AbstractQueryBuilder.DEFAULT_BOOST); - DefaultSearchContext context3 = new DefaultSearchContext(3L, shardSearchRequest, null, searcher, indexService, - indexShard, bigArrays, null, timeout, null, null); + DefaultSearchContext context3 = new DefaultSearchContext(3L, shardSearchRequest, null, searcher, null, + indexService, indexShard, bigArrays, null, timeout, null, null, Version.CURRENT); ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery(); context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(false); assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query())); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index f5552ee0d2e46..c58a158fc677d 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -213,7 +213,7 @@ public void onFailure(Exception e) { SearchPhaseResult searchPhaseResult = service.executeQueryPhase( new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, - true), + true, null, null), new SearchTask(123L, "", "", "", null, Collections.emptyMap())); IntArrayList intCursors = new IntArrayList(1); intCursors.add(0); @@ -249,7 +249,7 @@ public void testTimeout() throws IOException { new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), - 1.0f, true) + 1.0f, true, null, null) ); try { // the search context should inherit the default timeout @@ -269,7 +269,7 @@ public void testTimeout() throws IOException { new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), - 1.0f, true) + 1.0f, true, null, null) ); try { // the search context should inherit the query timeout @@ -297,12 +297,13 @@ public void testMaxDocvalueFieldsSearch() throws IOException { searchSourceBuilder.docValueField("field" + i); } try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true))) { + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true, null, null))) { assertNotNull(context); searchSourceBuilder.docValueField("one_field_too_much"); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true))); + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, + true, null, null))); assertEquals( "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [100] but was [101]. " + "This limit can be set by changing the [index.max_docvalue_fields_search] index level setting.", @@ -328,13 +329,14 @@ public void testMaxScriptFieldsSearch() throws IOException { new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap())); } try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true))) { + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true, null, null))) { assertNotNull(context); searchSourceBuilder.scriptField("anotherScriptField", new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap())); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true))); + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f, true, null, null))); assertEquals( "Trying to retrieve too many script_fields. Must be less than or equal to: [" + maxScriptFields + "] but was [" + (maxScriptFields + 1) @@ -406,28 +408,28 @@ public void testCanMatch() throws IOException { final IndexShard indexShard = indexService.getShard(0); final boolean allowPartialSearchResults = true; assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, null, - Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults))); + Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null))); assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, - new SearchSourceBuilder(), Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, - allowPartialSearchResults))); + new SearchSourceBuilder(), Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, + allowPartialSearchResults, null, null))); assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, new SearchSourceBuilder().query(new MatchAllQueryBuilder()), Strings.EMPTY_ARRAY, false, - new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults))); + new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null))); assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) .aggregation(new TermsAggregationBuilder("test", ValueType.STRING).minDocCount(0)), Strings.EMPTY_ARRAY, false, - new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults))); + new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null))); assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) .aggregation(new GlobalAggregationBuilder("test")), Strings.EMPTY_ARRAY, false, - new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults))); + new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null))); assertFalse(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, new SearchSourceBuilder().query(new MatchNoneQueryBuilder()), Strings.EMPTY_ARRAY, false, - new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults))); + new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null))); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java index 159b7e28b1269..43c7010d4b023 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java @@ -68,8 +68,8 @@ import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29456") @ESIntegTestCase.SuiteScopeTestCase +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29456") public class MovAvgIT extends ESIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; private static final String VALUE_FIELD = "v_value"; @@ -1296,7 +1296,7 @@ private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, } else { assertThat("[_count] movavg is null", countMovAvg, notNullValue()); assertEquals("[_count] movavg does not match expected [" + countMovAvg.value() + " vs " + expectedCount + "]", - countMovAvg.value(), expectedCount, 0.1); + countMovAvg.value(), expectedCount, 0.1 * Math.abs(countMovAvg.value())); } // This is a gap bucket @@ -1308,7 +1308,7 @@ private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, } else { assertThat("[value] movavg is null", valuesMovAvg, notNullValue()); assertEquals("[value] movavg does not match expected [" + valuesMovAvg.value() + " vs " + expectedValue + "]", - valuesMovAvg.value(), expectedValue, 0.1); + valuesMovAvg.value(), expectedValue, 0.1 * Math.abs(countMovAvg.value())); } } diff --git a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index c2016ceb02ce7..21a4f099f5a32 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -74,6 +74,8 @@ public void testSerialization() throws Exception { assertEquals(deserializedRequest.searchType(), shardSearchTransportRequest.searchType()); assertEquals(deserializedRequest.shardId(), shardSearchTransportRequest.shardId()); assertEquals(deserializedRequest.numberOfShards(), shardSearchTransportRequest.numberOfShards()); + assertEquals(deserializedRequest.indexRoutings(), shardSearchTransportRequest.indexRoutings()); + assertEquals(deserializedRequest.preference(), shardSearchTransportRequest.preference()); assertEquals(deserializedRequest.cacheKey(), shardSearchTransportRequest.cacheKey()); assertNotSame(deserializedRequest, shardSearchTransportRequest); assertEquals(deserializedRequest.getAliasFilter(), shardSearchTransportRequest.getAliasFilter()); @@ -92,8 +94,10 @@ private ShardSearchTransportRequest createShardSearchTransportRequest() throws I } else { filteringAliases = new AliasFilter(null, Strings.EMPTY_ARRAY); } + final String[] routings = generateRandomStringArray(5, 10, false, true); return new ShardSearchTransportRequest(new OriginalIndices(searchRequest), searchRequest, shardId, - randomIntBetween(1, 100), filteringAliases, randomBoolean() ? 1.0f : randomFloat(), Math.abs(randomLong()), null); + randomIntBetween(1, 100), filteringAliases, randomBoolean() ? 1.0f : randomFloat(), + Math.abs(randomLong()), null, routings); } public void testFilteringAliases() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java index 2227cbb806b3f..d609f84e4192e 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.slice; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; @@ -48,9 +49,7 @@ import static org.hamcrest.Matchers.startsWith; public class SearchSliceIT extends ESIntegTestCase { - private static final int NUM_DOCS = 1000; - - private int setupIndex(boolean withDocs) throws IOException, ExecutionException, InterruptedException { + private void setupIndex(int numDocs, int numberOfShards) throws IOException, ExecutionException, InterruptedException { String mapping = Strings.toString(XContentFactory.jsonBuilder(). startObject() .startObject("type") @@ -70,74 +69,112 @@ private int setupIndex(boolean withDocs) throws IOException, ExecutionException, .endObject() .endObject() .endObject()); - int numberOfShards = randomIntBetween(1, 7); assertAcked(client().admin().indices().prepareCreate("test") .setSettings(Settings.builder().put("number_of_shards", numberOfShards).put("index.max_slices_per_scroll", 10000)) .addMapping("type", mapping, XContentType.JSON)); ensureGreen(); - if (withDocs == false) { - return numberOfShards; - } - List requests = new ArrayList<>(); - for (int i = 0; i < NUM_DOCS; i++) { - XContentBuilder builder = jsonBuilder(); - builder.startObject(); - builder.field("invalid_random_kw", randomAlphaOfLengthBetween(5, 20)); - builder.field("random_int", randomInt()); - builder.field("static_int", 0); - builder.field("invalid_random_int", randomInt()); - builder.endObject(); + for (int i = 0; i < numDocs; i++) { + XContentBuilder builder = jsonBuilder() + .startObject() + .field("invalid_random_kw", randomAlphaOfLengthBetween(5, 20)) + .field("random_int", randomInt()) + .field("static_int", 0) + .field("invalid_random_int", randomInt()) + .endObject(); requests.add(client().prepareIndex("test", "type").setSource(builder)); } indexRandom(true, requests); - return numberOfShards; } - public void testDocIdSort() throws Exception { - int numShards = setupIndex(true); - SearchResponse sr = client().prepareSearch("test") - .setQuery(matchAllQuery()) - .setSize(0) - .get(); - int numDocs = (int) sr.getHits().getTotalHits(); - assertThat(numDocs, equalTo(NUM_DOCS)); - int max = randomIntBetween(2, numShards*3); + public void testSearchSort() throws Exception { + int numShards = randomIntBetween(1, 7); + int numDocs = randomIntBetween(100, 1000); + setupIndex(numDocs, numShards); + int max = randomIntBetween(2, numShards * 3); for (String field : new String[]{"_id", "random_int", "static_int"}) { int fetchSize = randomIntBetween(10, 100); + // test _doc sort SearchRequestBuilder request = client().prepareSearch("test") .setQuery(matchAllQuery()) .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .setSize(fetchSize) .addSort(SortBuilders.fieldSort("_doc")); - assertSearchSlicesWithScroll(request, field, max); + assertSearchSlicesWithScroll(request, field, max, numDocs); + + // test numeric sort + request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .addSort(SortBuilders.fieldSort("random_int")) + .setSize(fetchSize); + assertSearchSlicesWithScroll(request, field, max, numDocs); } } - public void testNumericSort() throws Exception { - int numShards = setupIndex(true); - SearchResponse sr = client().prepareSearch("test") - .setQuery(matchAllQuery()) - .setSize(0) - .get(); - int numDocs = (int) sr.getHits().getTotalHits(); - assertThat(numDocs, equalTo(NUM_DOCS)); - - int max = randomIntBetween(2, numShards*3); - for (String field : new String[]{"_id", "random_int", "static_int"}) { + public void testWithPreferenceAndRoutings() throws Exception { + int numShards = 10; + int totalDocs = randomIntBetween(100, 1000); + setupIndex(totalDocs, numShards); + { + SearchResponse sr = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setPreference("_shards:1,4") + .setSize(0) + .get(); + int numDocs = (int) sr.getHits().getTotalHits(); + int max = randomIntBetween(2, numShards * 3); int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = client().prepareSearch("test") .setQuery(matchAllQuery()) .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) - .addSort(SortBuilders.fieldSort("random_int")) - .setSize(fetchSize); - assertSearchSlicesWithScroll(request, field, max); + .setSize(fetchSize) + .setPreference("_shards:1,4") + .addSort(SortBuilders.fieldSort("_doc")); + assertSearchSlicesWithScroll(request, "_id", max, numDocs); + } + { + SearchResponse sr = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setRouting("foo", "bar") + .setSize(0) + .get(); + int numDocs = (int) sr.getHits().getTotalHits(); + int max = randomIntBetween(2, numShards * 3); + int fetchSize = randomIntBetween(10, 100); + SearchRequestBuilder request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setSize(fetchSize) + .setRouting("foo", "bar") + .addSort(SortBuilders.fieldSort("_doc")); + assertSearchSlicesWithScroll(request, "_id", max, numDocs); + } + { + assertAcked(client().admin().indices().prepareAliases() + .addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test").alias("alias1").routing("foo")) + .addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test").alias("alias2").routing("bar")) + .addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test").alias("alias3").routing("baz")) + .get()); + SearchResponse sr = client().prepareSearch("alias1", "alias3") + .setQuery(matchAllQuery()) + .setSize(0) + .get(); + int numDocs = (int) sr.getHits().getTotalHits(); + int max = randomIntBetween(2, numShards * 3); + int fetchSize = randomIntBetween(10, 100); + SearchRequestBuilder request = client().prepareSearch("alias1", "alias3") + .setQuery(matchAllQuery()) + .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setSize(fetchSize) + .addSort(SortBuilders.fieldSort("_doc")); + assertSearchSlicesWithScroll(request, "_id", max, numDocs); } } public void testInvalidFields() throws Exception { - setupIndex(false); + setupIndex(0, 1); SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("test") .setQuery(matchAllQuery()) @@ -161,7 +198,7 @@ public void testInvalidFields() throws Exception { } public void testInvalidQuery() throws Exception { - setupIndex(false); + setupIndex(0, 1); SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch() .setQuery(matchAllQuery()) @@ -173,7 +210,7 @@ public void testInvalidQuery() throws Exception { equalTo("`slice` cannot be used outside of a scroll context")); } - private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice) { + private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice, int numDocs) { int totalResults = 0; List keys = new ArrayList<>(); for (int id = 0; id < numSlice; id++) { @@ -184,7 +221,7 @@ private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String f int numSliceResults = searchResponse.getHits().getHits().length; String scrollId = searchResponse.getScrollId(); for (SearchHit hit : searchResponse.getHits().getHits()) { - keys.add(hit.getId()); + assertTrue(keys.add(hit.getId())); } while (searchResponse.getHits().getHits().length > 0) { searchResponse = client().prepareSearchScroll("test") @@ -195,15 +232,15 @@ private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String f totalResults += searchResponse.getHits().getHits().length; numSliceResults += searchResponse.getHits().getHits().length; for (SearchHit hit : searchResponse.getHits().getHits()) { - keys.add(hit.getId()); + assertTrue(keys.add(hit.getId())); } } assertThat(numSliceResults, equalTo(expectedSliceResults)); clearScroll(scrollId); } - assertThat(totalResults, equalTo(NUM_DOCS)); - assertThat(keys.size(), equalTo(NUM_DOCS)); - assertThat(new HashSet(keys).size(), equalTo(NUM_DOCS)); + assertThat(totalResults, equalTo(numDocs)); + assertThat(keys.size(), equalTo(numDocs)); + assertThat(new HashSet(keys).size(), equalTo(numDocs)); } private Throwable findRootCause(Exception e) { diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index 75802e92ee176..b93ebc1adde72 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -30,19 +30,38 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.Version; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.search.SearchShardIterator; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.Scroll; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -58,13 +77,138 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class SliceBuilderTests extends ESTestCase { private static final int MAX_SLICE = 20; - private static SliceBuilder randomSliceBuilder() throws IOException { + static class ShardSearchRequestTest implements IndicesRequest, ShardSearchRequest { + private final String[] indices; + private final int shardId; + private final String[] indexRoutings; + private final String preference; + + ShardSearchRequestTest(String index, int shardId, String[] indexRoutings, String preference) { + this.indices = new String[] { index }; + this.shardId = shardId; + this.indexRoutings = indexRoutings; + this.preference = preference; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return null; + } + + @Override + public ShardId shardId() { + return new ShardId(new Index(indices[0], indices[0]), shardId); + } + + @Override + public String[] types() { + return new String[0]; + } + + @Override + public SearchSourceBuilder source() { + return null; + } + + @Override + public AliasFilter getAliasFilter() { + return null; + } + + @Override + public void setAliasFilter(AliasFilter filter) { + + } + + @Override + public void source(SearchSourceBuilder source) { + + } + + @Override + public int numberOfShards() { + return 0; + } + + @Override + public SearchType searchType() { + return null; + } + + @Override + public float indexBoost() { + return 0; + } + + @Override + public long nowInMillis() { + return 0; + } + + @Override + public Boolean requestCache() { + return null; + } + + @Override + public Boolean allowPartialSearchResults() { + return null; + } + + @Override + public Scroll scroll() { + return null; + } + + @Override + public String[] indexRoutings() { + return indexRoutings; + } + + @Override + public String preference() { + return preference; + } + + @Override + public void setProfile(boolean profile) { + + } + + @Override + public boolean isProfile() { + return false; + } + + @Override + public BytesReference cacheKey() throws IOException { + return null; + } + + @Override + public String getClusterAlias() { + return null; + } + + @Override + public Rewriteable getRewriteable() { + return null; + } + } + + private static SliceBuilder randomSliceBuilder() { int max = randomIntBetween(2, MAX_SLICE); int id = randomIntBetween(1, max - 1); String field = randomAlphaOfLengthBetween(5, 20); @@ -75,7 +219,7 @@ private static SliceBuilder serializedCopy(SliceBuilder original) throws IOExcep return copyWriteable(original, new NamedWriteableRegistry(Collections.emptyList()), SliceBuilder::new); } - private static SliceBuilder mutate(SliceBuilder original) throws IOException { + private static SliceBuilder mutate(SliceBuilder original) { switch (randomIntBetween(0, 2)) { case 0: return new SliceBuilder(original.getField() + "_xyz", original.getId(), original.getMax()); case 1: return new SliceBuilder(original.getField(), original.getId() - 1, original.getMax()); @@ -84,6 +228,63 @@ private static SliceBuilder mutate(SliceBuilder original) throws IOException { } } + private IndexSettings createIndexSettings(Version indexVersionCreated, int numShards) { + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, indexVersionCreated) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build(); + return new IndexSettings(indexState, Settings.EMPTY); + } + + private ShardSearchRequest createRequest(int shardId) { + return createRequest(shardId, Strings.EMPTY_ARRAY, null); + } + + private ShardSearchRequest createRequest(int shardId, String[] routings, String preference) { + return new ShardSearchRequestTest("index", shardId, routings, preference); + } + + private QueryShardContext createShardContext(Version indexVersionCreated, IndexReader reader, + String fieldName, DocValuesType dvType, int numShards, int shardId) { + MappedFieldType fieldType = new MappedFieldType() { + @Override + public MappedFieldType clone() { + return null; + } + + @Override + public String typeName() { + return null; + } + + @Override + public Query termQuery(Object value, @Nullable QueryShardContext context) { + return null; + } + + public Query existsQuery(QueryShardContext context) { + return null; + } + }; + fieldType.setName(fieldName); + QueryShardContext context = mock(QueryShardContext.class); + when(context.fieldMapper(fieldName)).thenReturn(fieldType); + when(context.getIndexReader()).thenReturn(reader); + when(context.getShardId()).thenReturn(shardId); + IndexSettings indexSettings = createIndexSettings(indexVersionCreated, numShards); + when(context.getIndexSettings()).thenReturn(indexSettings); + if (dvType != null) { + fieldType.setHasDocValues(true); + fieldType.setDocValuesType(dvType); + IndexNumericFieldData fd = mock(IndexNumericFieldData.class); + when(context.getForField(fieldType)).thenReturn(fd); + } + return context; + + } + public void testSerialization() throws Exception { SliceBuilder original = randomSliceBuilder(); SliceBuilder deserialized = serializedCopy(original); @@ -131,92 +332,41 @@ public void testInvalidArguments() throws Exception { assertEquals("max must be greater than id", e.getMessage()); } - public void testToFilter() throws IOException { + public void testToFilterSimple() throws IOException { Directory dir = new RAMDirectory(); try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { writer.commit(); } - QueryShardContext context = mock(QueryShardContext.class); try (IndexReader reader = DirectoryReader.open(dir)) { - MappedFieldType fieldType = new MappedFieldType() { - @Override - public MappedFieldType clone() { - return null; - } - - @Override - public String typeName() { - return null; - } - - @Override - public Query termQuery(Object value, @Nullable QueryShardContext context) { - return null; - } - - public Query existsQuery(QueryShardContext context) { - return null; - } - }; - fieldType.setName(IdFieldMapper.NAME); - fieldType.setHasDocValues(false); - when(context.fieldMapper(IdFieldMapper.NAME)).thenReturn(fieldType); - when(context.getIndexReader()).thenReturn(reader); - Settings settings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build(); - IndexSettings indexSettings = new IndexSettings(indexState, Settings.EMPTY); - when(context.getIndexSettings()).thenReturn(indexSettings); + QueryShardContext context = + createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED_NUMERIC, 1,0); SliceBuilder builder = new SliceBuilder(5, 10); - Query query = builder.toFilter(context, 0, 1); + Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT); assertThat(query, instanceOf(TermsSliceQuery.class)); - assertThat(builder.toFilter(context, 0, 1), equalTo(query)); + assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query)); try (IndexReader newReader = DirectoryReader.open(dir)) { when(context.getIndexReader()).thenReturn(newReader); - assertThat(builder.toFilter(context, 0, 1), equalTo(query)); + assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query)); } } + } + public void testToFilterRandom() throws IOException { + Directory dir = new RAMDirectory(); + try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { + writer.commit(); + } try (IndexReader reader = DirectoryReader.open(dir)) { - MappedFieldType fieldType = new MappedFieldType() { - @Override - public MappedFieldType clone() { - return null; - } - - @Override - public String typeName() { - return null; - } - - @Override - public Query termQuery(Object value, @Nullable QueryShardContext context) { - return null; - } - - public Query existsQuery(QueryShardContext context) { - return null; - } - }; - fieldType.setName("field_doc_values"); - fieldType.setHasDocValues(true); - fieldType.setDocValuesType(DocValuesType.SORTED_NUMERIC); - when(context.fieldMapper("field_doc_values")).thenReturn(fieldType); - when(context.getIndexReader()).thenReturn(reader); - IndexNumericFieldData fd = mock(IndexNumericFieldData.class); - when(context.getForField(fieldType)).thenReturn(fd); - SliceBuilder builder = new SliceBuilder("field_doc_values", 5, 10); - Query query = builder.toFilter(context, 0, 1); + QueryShardContext context = + createShardContext(Version.CURRENT, reader, "field", DocValuesType.SORTED_NUMERIC, 1,0); + SliceBuilder builder = new SliceBuilder("field", 5, 10); + Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT); assertThat(query, instanceOf(DocValuesSliceQuery.class)); - - assertThat(builder.toFilter(context, 0, 1), equalTo(query)); + assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query)); try (IndexReader newReader = DirectoryReader.open(dir)) { when(context.getIndexReader()).thenReturn(newReader); - assertThat(builder.toFilter(context, 0, 1), equalTo(query)); + assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query)); } // numSlices > numShards @@ -226,7 +376,8 @@ public Query existsQuery(QueryShardContext context) { for (int i = 0; i < numSlices; i++) { for (int j = 0; j < numShards; j++) { SliceBuilder slice = new SliceBuilder("_id", i, numSlices); - Query q = slice.toFilter(context, j, numShards); + context = createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED, numShards, j); + Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT); if (q instanceof TermsSliceQuery || q instanceof MatchAllDocsQuery) { AtomicInteger count = numSliceMap.get(j); if (count == null) { @@ -250,12 +401,13 @@ public Query existsQuery(QueryShardContext context) { // numShards > numSlices numShards = randomIntBetween(4, 100); - numSlices = randomIntBetween(2, numShards-1); + numSlices = randomIntBetween(2, numShards - 1); List targetShards = new ArrayList<>(); for (int i = 0; i < numSlices; i++) { for (int j = 0; j < numShards; j++) { SliceBuilder slice = new SliceBuilder("_id", i, numSlices); - Query q = slice.toFilter(context, j, numShards); + context = createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED, numShards, j); + Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT); if (q instanceof MatchNoDocsQuery == false) { assertThat(q, instanceOf(MatchAllDocsQuery.class)); targetShards.add(j); @@ -271,7 +423,8 @@ public Query existsQuery(QueryShardContext context) { for (int i = 0; i < numSlices; i++) { for (int j = 0; j < numShards; j++) { SliceBuilder slice = new SliceBuilder("_id", i, numSlices); - Query q = slice.toFilter(context, j, numShards); + context = createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED, numShards, j); + Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT); if (i == j) { assertThat(q, instanceOf(MatchAllDocsQuery.class)); } else { @@ -280,85 +433,35 @@ public Query existsQuery(QueryShardContext context) { } } } + } + public void testInvalidField() throws IOException { + Directory dir = new RAMDirectory(); + try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { + writer.commit(); + } try (IndexReader reader = DirectoryReader.open(dir)) { - MappedFieldType fieldType = new MappedFieldType() { - @Override - public MappedFieldType clone() { - return null; - } - - @Override - public String typeName() { - return null; - } - - @Override - public Query termQuery(Object value, @Nullable QueryShardContext context) { - return null; - } - - public Query existsQuery(QueryShardContext context) { - return null; - } - }; - fieldType.setName("field_without_doc_values"); - when(context.fieldMapper("field_without_doc_values")).thenReturn(fieldType); - when(context.getIndexReader()).thenReturn(reader); - SliceBuilder builder = new SliceBuilder("field_without_doc_values", 5, 10); - IllegalArgumentException exc = - expectThrows(IllegalArgumentException.class, () -> builder.toFilter(context, 0, 1)); + QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", null, 1,0); + SliceBuilder builder = new SliceBuilder("field", 5, 10); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, + () -> builder.toFilter(null, createRequest(0), context, Version.CURRENT)); assertThat(exc.getMessage(), containsString("cannot load numeric doc values")); } } - public void testToFilterDeprecationMessage() throws IOException { Directory dir = new RAMDirectory(); try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { writer.commit(); } - QueryShardContext context = mock(QueryShardContext.class); try (IndexReader reader = DirectoryReader.open(dir)) { - MappedFieldType fieldType = new MappedFieldType() { - @Override - public MappedFieldType clone() { - return null; - } - - @Override - public String typeName() { - return null; - } - - @Override - public Query termQuery(Object value, @Nullable QueryShardContext context) { - return null; - } - - public Query existsQuery(QueryShardContext context) { - return null; - } - }; - fieldType.setName("_uid"); - fieldType.setHasDocValues(false); - when(context.fieldMapper("_uid")).thenReturn(fieldType); - when(context.getIndexReader()).thenReturn(reader); - Settings settings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build(); - IndexSettings indexSettings = new IndexSettings(indexState, Settings.EMPTY); - when(context.getIndexSettings()).thenReturn(indexSettings); + QueryShardContext context = createShardContext(Version.V_6_3_0, reader, "_uid", null, 1,0); SliceBuilder builder = new SliceBuilder("_uid", 5, 10); - Query query = builder.toFilter(context, 0, 1); + Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT); assertThat(query, instanceOf(TermsSliceQuery.class)); - assertThat(builder.toFilter(context, 0, 1), equalTo(query)); + assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query)); assertWarnings("Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead"); } - } public void testSerializationBackcompat() throws IOException { @@ -375,4 +478,35 @@ public void testSerializationBackcompat() throws IOException { SliceBuilder::new, Version.V_6_3_0); assertEquals(sliceBuilder, copy63); } + + public void testToFilterWithRouting() throws IOException { + Directory dir = new RAMDirectory(); + try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { + writer.commit(); + } + ClusterService clusterService = mock(ClusterService.class); + ClusterState state = mock(ClusterState.class); + when(state.metaData()).thenReturn(MetaData.EMPTY_META_DATA); + when(clusterService.state()).thenReturn(state); + OperationRouting routing = mock(OperationRouting.class); + GroupShardsIterator it = new GroupShardsIterator<>( + Collections.singletonList( + new SearchShardIterator(null, new ShardId("index", "index", 1), null, null) + ) + ); + when(routing.searchShards(any(), any(), any(), any())).thenReturn(it); + when(clusterService.operationRouting()).thenReturn(routing); + when(clusterService.getSettings()).thenReturn(Settings.EMPTY); + try (IndexReader reader = DirectoryReader.open(dir)) { + QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", DocValuesType.SORTED, 5, 0); + SliceBuilder builder = new SliceBuilder("field", 6, 10); + String[] routings = new String[] { "foo" }; + Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, Version.CURRENT); + assertEquals(new DocValuesSliceQuery("field", 6, 10), query); + query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.CURRENT); + assertEquals(new DocValuesSliceQuery("field", 6, 10), query); + query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.V_6_2_0); + assertEquals(new DocValuesSliceQuery("field", 1, 2), query); + } + } } diff --git a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java index fcd80b191b842..546017f807ac0 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java @@ -87,9 +87,9 @@ public void testThatToXContentWritesOutUnboundedCorrectly() throws Exception { } public void testThatNegativeSettingAllowsToStart() throws InterruptedException { - Settings settings = Settings.builder().put("node.name", "index").put("thread_pool.index.queue_size", "-1").build(); + Settings settings = Settings.builder().put("node.name", "write").put("thread_pool.write.queue_size", "-1").build(); ThreadPool threadPool = new ThreadPool(settings); - assertThat(threadPool.info("index").getQueueSize(), is(nullValue())); + assertThat(threadPool.info("write").getQueueSize(), is(nullValue())); terminate(threadPool); } diff --git a/server/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/server/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java index 29053400931cc..ea281f7d9ae1e 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -60,8 +60,8 @@ public void testCorrectThreadPoolTypePermittedInSettings() throws InterruptedExc } } - public void testIndexingThreadPoolsMaxSize() throws InterruptedException { - final String name = randomFrom(Names.BULK, Names.INDEX); + public void testWriteThreadPoolsMaxSize() throws InterruptedException { + final String name = Names.WRITE; final int maxSize = 1 + EsExecutors.numberOfProcessors(Settings.EMPTY); final int tooBig = randomIntBetween(1 + maxSize, Integer.MAX_VALUE); @@ -74,7 +74,7 @@ public void testIndexingThreadPoolsMaxSize() throws InterruptedException { try { tp = new ThreadPool(Settings.builder() .put("node.name", "testIndexingThreadPoolsMaxSize") - .put("thread_pool." + name + ".size", tooBig) + .put("thread_pool." + Names.WRITE + ".size", tooBig) .build()); } finally { terminateThreadPoolIfNeeded(tp); @@ -84,11 +84,11 @@ public void testIndexingThreadPoolsMaxSize() throws InterruptedException { assertThat( initial, hasToString(containsString( - "Failed to parse value [" + tooBig + "] for setting [thread_pool." + name + ".size] must be "))); + "Failed to parse value [" + tooBig + "] for setting [thread_pool." + Names.WRITE + ".size] must be "))); } private static int getExpectedThreadPoolSize(Settings settings, String name, int size) { - if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) { + if (name.equals(ThreadPool.Names.WRITE)) { return Math.min(size, EsExecutors.numberOfProcessors(settings)); } else { return size; diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 0d8a469981966..69096677664b3 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; @@ -717,22 +718,6 @@ public void run() { } } - private static void installNodeStatsHandler(TransportService service, DiscoveryNode...nodes) { - service.registerRequestHandler(NodesInfoAction.NAME, NodesInfoRequest::new, ThreadPool.Names.SAME, false, false, - (request, channel) -> { - List nodeInfos = new ArrayList<>(); - int port = 80; - for (DiscoveryNode node : nodes) { - HttpInfo http = new HttpInfo(new BoundTransportAddress(new TransportAddress[]{node.getAddress()}, - new TransportAddress(node.getAddress().address().getAddress(), port++)), 100); - nodeInfos.add(new NodeInfo(node.getVersion(), Build.CURRENT, node, null, null, null, null, null, null, http, null, - null, null)); - } - channel.sendResponse(new NodesInfoResponse(ClusterName.DEFAULT, nodeInfos, Collections.emptyList())); - }); - - } - public void testGetConnectionInfo() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService transport1 = startTransport("seed_node", knownNodes, Version.CURRENT); @@ -753,34 +738,24 @@ public void testGetConnectionInfo() throws Exception { service.acceptIncomingRequests(); int maxNumConnections = randomIntBetween(1, 5); try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, maxNumConnections, n -> true)) { + seedNodes, service, maxNumConnections, n -> true)) { // test no nodes connected - RemoteConnectionInfo remoteConnectionInfo = assertSerialization(getRemoteConnectionInfo(connection)); + RemoteConnectionInfo remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); assertNotNull(remoteConnectionInfo); assertEquals(0, remoteConnectionInfo.numNodesConnected); - assertEquals(0, remoteConnectionInfo.seedNodes.size()); - assertEquals(0, remoteConnectionInfo.httpAddresses.size()); + assertEquals(3, remoteConnectionInfo.seedNodes.size()); assertEquals(maxNumConnections, remoteConnectionInfo.connectionsPerCluster); assertEquals("test-cluster", remoteConnectionInfo.clusterAlias); - updateSeedNodes(connection, seedNodes); - expectThrows(RemoteTransportException.class, () -> getRemoteConnectionInfo(connection)); - - for (MockTransportService s : Arrays.asList(transport1, transport2, transport3)) { - installNodeStatsHandler(s, node1, node2, node3); - } - remoteConnectionInfo = getRemoteConnectionInfo(connection); - remoteConnectionInfo = assertSerialization(remoteConnectionInfo); + // Connect some nodes + updateSeedNodes(connection, seedNodes); + remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); assertNotNull(remoteConnectionInfo); assertEquals(connection.getNumNodesConnected(), remoteConnectionInfo.numNodesConnected); assertEquals(Math.min(3, maxNumConnections), connection.getNumNodesConnected()); assertEquals(3, remoteConnectionInfo.seedNodes.size()); - assertEquals(remoteConnectionInfo.httpAddresses.size(), Math.min(3, maxNumConnections)); assertEquals(maxNumConnections, remoteConnectionInfo.connectionsPerCluster); assertEquals("test-cluster", remoteConnectionInfo.clusterAlias); - for (TransportAddress address : remoteConnectionInfo.httpAddresses) { - assertTrue("port range mismatch: " + address.getPort(), address.getPort() >= 80 && address.getPort() <= 90); - } } } } @@ -789,48 +764,41 @@ public void testGetConnectionInfo() throws Exception { public void testRemoteConnectionInfo() throws IOException { RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 4, 3, TimeValue.timeValueMinutes(30), false); assertSerialization(stats); RemoteConnectionInfo stats1 = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 4, 4, TimeValue.timeValueMinutes(30), true); assertSerialization(stats1); assertNotEquals(stats, stats1); stats1 = new RemoteConnectionInfo("test_cluster_1", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 4, 3, TimeValue.timeValueMinutes(30), false); assertSerialization(stats1); assertNotEquals(stats, stats1); stats1 = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 15)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 4, 3, TimeValue.timeValueMinutes(30), false); assertSerialization(stats1); assertNotEquals(stats, stats1); stats1 = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 87)), 4, 3, TimeValue.timeValueMinutes(30), true); assertSerialization(stats1); assertNotEquals(stats, stats1); stats1 = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 4, 3, TimeValue.timeValueMinutes(325), true); assertSerialization(stats1); assertNotEquals(stats, stats1); stats1 = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 5, 3, TimeValue.timeValueMinutes(30), false); assertSerialization(stats1); assertNotEquals(stats, stats1); @@ -850,13 +818,14 @@ private static RemoteConnectionInfo assertSerialization(RemoteConnectionInfo inf } public void testRemoteConnectionInfoBwComp() throws IOException { - final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_5, Version.V_6_0_0); + final Version version = VersionUtils.randomVersionBetween(random(), + Version.V_6_1_0, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1)); RemoteConnectionInfo expected = new RemoteConnectionInfo("test_cluster", Collections.singletonList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), - Collections.singletonList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), 4, 4, new TimeValue(30, TimeUnit.MINUTES), false); - String encoded = "AQQAAAAABzAuMC4wLjAAAAABAQQAAAAABzAuMC4wLjAAAABQBDwEBAx0ZXN0X2NsdXN0ZXIAAAAAAAAAAAAAAA=="; + // This version was created using the serialization code in use from 6.1 but before 7.0 + String encoded = "AQQAAAAABzAuMC4wLjAAAAABAQQAAAAABzAuMC4wLjAAAABQBDwEBAx0ZXN0X2NsdXN0ZXIA"; final byte[] data = Base64.getDecoder().decode(encoded); try (StreamInput in = StreamInput.wrap(data)) { @@ -879,55 +848,29 @@ public void testRemoteConnectionInfoBwComp() throws IOException { public void testRenderConnectionInfoXContent() throws IOException { RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,1)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,80)), 4, 3, TimeValue.timeValueMinutes(30), true); stats = assertSerialization(stats); XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); stats.toXContent(builder, null); builder.endObject(); - assertEquals("{\"test_cluster\":{\"seeds\":[\"0.0.0.0:1\"],\"http_addresses\":[\"0.0.0.0:80\"],\"connected\":true," + + assertEquals("{\"test_cluster\":{\"seeds\":[\"0.0.0.0:1\"],\"connected\":true," + "\"num_nodes_connected\":3,\"max_connections_per_cluster\":4,\"initial_connect_timeout\":\"30m\"," + "\"skip_unavailable\":true}}", Strings.toString(builder)); stats = new RemoteConnectionInfo("some_other_cluster", Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,1), new TransportAddress(TransportAddress.META_ADDRESS,2)), - Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,80), new TransportAddress(TransportAddress.META_ADDRESS,81)), 2, 0, TimeValue.timeValueSeconds(30), false); stats = assertSerialization(stats); builder = XContentFactory.jsonBuilder(); builder.startObject(); stats.toXContent(builder, null); builder.endObject(); - assertEquals("{\"some_other_cluster\":{\"seeds\":[\"0.0.0.0:1\",\"0.0.0.0:2\"],\"http_addresses\":[\"0.0.0.0:80\",\"0.0.0.0:81\"]," + assertEquals("{\"some_other_cluster\":{\"seeds\":[\"0.0.0.0:1\",\"0.0.0.0:2\"]," + "\"connected\":false,\"num_nodes_connected\":0,\"max_connections_per_cluster\":2,\"initial_connect_timeout\":\"30s\"," + "\"skip_unavailable\":false}}", Strings.toString(builder)); } - private RemoteConnectionInfo getRemoteConnectionInfo(RemoteClusterConnection connection) throws Exception { - AtomicReference statsRef = new AtomicReference<>(); - AtomicReference exceptionRef = new AtomicReference<>(); - CountDownLatch latch = new CountDownLatch(1); - connection.getConnectionInfo(new ActionListener() { - @Override - public void onResponse(RemoteConnectionInfo remoteConnectionInfo) { - statsRef.set(remoteConnectionInfo); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - exceptionRef.set(e); - latch.countDown(); - } - }); - latch.await(); - if (exceptionRef.get() != null) { - throw exceptionRef.get(); - } - return statsRef.get(); - } - public void testEnsureConnected() throws IOException, InterruptedException { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); diff --git a/settings.gradle b/settings.gradle index 76b157d0e4a3b..ee88f9bd0ed37 100644 --- a/settings.gradle +++ b/settings.gradle @@ -16,9 +16,13 @@ List projects = [ 'client:benchmark', 'benchmarks', 'distribution:archives:integ-test-zip', + 'distribution:archives:oss-zip', 'distribution:archives:zip', + 'distribution:archives:oss-tar', 'distribution:archives:tar', + 'distribution:packages:oss-deb', 'distribution:packages:deb', + 'distribution:packages:oss-rpm', 'distribution:packages:rpm', 'distribution:bwc:next-minor-snapshot', 'distribution:bwc:staged-minor-snapshot', @@ -72,6 +76,7 @@ addSubProjects('', new File(rootProject.projectDir, 'libs')) addSubProjects('', new File(rootProject.projectDir, 'modules')) addSubProjects('', new File(rootProject.projectDir, 'plugins')) addSubProjects('', new File(rootProject.projectDir, 'qa')) +addSubProjects('', new File(rootProject.projectDir, 'x-pack')) boolean isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse') if (isEclipse) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index dea92c2927d86..8fff17900b072 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -493,4 +493,11 @@ protected Engine.Delete replicaDeleteForDoc(String id, long version, long seqNo, return new Engine.Delete("test", id, newUid(id), seqNo, 1, version, VersionType.EXTERNAL, Engine.Operation.Origin.REPLICA, startTime); } + + /** + * Exposes a translog associated with the given engine for testing purpose. + */ + public static Translog getTranslog(Engine engine) { + return engine.getTranslog(); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 0d535d9af3851..a0e1cfc334110 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -55,6 +55,7 @@ import org.elasticsearch.index.cache.query.DisabledQueryCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; @@ -66,6 +67,7 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; @@ -643,6 +645,10 @@ public static Engine getEngine(IndexShard indexShard) { return indexShard.getEngine(); } + public static Translog getTranslog(IndexShard shard) { + return EngineTestCase.getTranslog(getEngine(shard)); + } + public static ReplicationTracker getReplicationTracker(IndexShard indexShard) { return indexShard.getReplicationTracker(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 32c660cd5d24b..dfd3713333543 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -335,11 +335,14 @@ private void ensureNoWarnings() throws IOException { * @param warnings other expected general deprecation warnings */ protected final void assertSettingDeprecationsAndWarnings(final Setting[] settings, final String... warnings) { + assertSettingDeprecationsAndWarnings(Arrays.stream(settings).map(Setting::getKey).toArray(String[]::new), warnings); + } + + protected final void assertSettingDeprecationsAndWarnings(final String[] settings, final String... warnings) { assertWarnings( Stream.concat( Arrays .stream(settings) - .map(Setting::getKey) .map(k -> "[" + k + "] setting was deprecated in Elasticsearch and will be removed in a future release! " + "See the breaking changes documentation for the next major version."), Arrays.stream(warnings)) diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index d82b5052dbf54..12acd21903ec4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -76,6 +76,7 @@ import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -1158,7 +1159,7 @@ private void assertOpenTranslogReferences() throws Exception { for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { try { - indexShard.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); + IndexShardTestCase.getTranslog(indexShard).getDeletionPolicy().assertNoOpenTranslogRefs(); } catch (AlreadyClosedException ok) { // all good } diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java index 9fde8b66a1f96..766fc80ba5605 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java @@ -54,7 +54,7 @@ static Tuple, List> resolveReleasedVersions(Version curre Version last = versions.remove(versions.size() - 1); assert last.equals(current) : "The highest version must be the current one " - + "but was [" + versions.get(versions.size() - 1) + "] and current was [" + current + "]"; + + "but was [" + last + "] and current was [" + current + "]"; if (current.revision != 0) { /* If we are in a stable branch there should be no unreleased version constants diff --git a/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java b/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java index a217540b89fca..a0cdb8c3168f4 100644 --- a/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java +++ b/test/framework/src/test/java/org/elasticsearch/node/MockNodeTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.node; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; @@ -67,5 +69,6 @@ public void testComponentsMockedByMarkerPlugins() throws IOException { assertSame(searchService.getClass(), SearchService.class); } } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java index 67a9a40f0fc1f..3c8b349792b75 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/VersionUtilsTests.java @@ -28,9 +28,9 @@ import java.util.LinkedHashSet; import java.util.List; -import static java.util.Collections.singletonList; import static java.util.stream.Collectors.toCollection; import static java.util.stream.Collectors.toList; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -305,6 +305,24 @@ public void testResolveReleasedVersionsAtNewMinorBranchIn6x() { TestNewMinorBranchIn6x.V_6_2_0))); } + public static class TestIncorrectCurrentVersion { + public static final Version V_5_3_0 = Version.fromString("5.3.0"); + public static final Version V_5_3_1 = Version.fromString("5.3.1"); + public static final Version V_5_4_0 = Version.fromString("5.4.0"); + public static final Version V_5_4_1 = Version.fromString("5.4.1"); + public static final Version CURRENT = V_5_4_1; + } + + public void testIncorrectCurrentVersion() { + Version previousVersion = TestIncorrectCurrentVersion.V_5_4_0; + AssertionError error = expectThrows(AssertionError.class, () -> + VersionUtils.resolveReleasedVersions(previousVersion, TestIncorrectCurrentVersion.class)); + + String message = error.getMessage(); + assertThat(message, containsString(TestIncorrectCurrentVersion.CURRENT.toString())); + assertThat(message, containsString(previousVersion.toString())); + } + /** * Tests that {@link Version#minimumCompatibilityVersion()} and {@link VersionUtils#allReleasedVersions()} * agree with the list of wire and index compatible versions we build in gradle. diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 05fdfac541a2e..23c665af30a30 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -19,6 +19,7 @@ */ package org.elasticsearch.test.test; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Client; @@ -60,6 +61,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.not; @@ -218,6 +220,7 @@ public Settings transportClientSettings() { assertClusters(cluster0, cluster1, false); long seed = randomLong(); + boolean shouldAssertSettingsDeprecationsAndWarnings = false; try { { Random random = new Random(seed); @@ -228,19 +231,25 @@ public Settings transportClientSettings() { cluster1.beforeTest(random, random.nextDouble()); } assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames()); + if (cluster0.getNodeNames().length > 0) { + shouldAssertSettingsDeprecationsAndWarnings = true; + assertSettingDeprecationsAndWarnings(new Setting[]{NetworkModule.HTTP_ENABLED}); + } Iterator iterator1 = cluster1.getClients().iterator(); for (Client client : cluster0.getClients()) { assertTrue(iterator1.hasNext()); Client other = iterator1.next(); assertSettings(client.settings(), other.settings(), false); } - assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames()); assertMMNinNodeSetting(cluster0, cluster0.numMasterNodes()); assertMMNinNodeSetting(cluster1, cluster0.numMasterNodes()); cluster0.afterTest(); cluster1.afterTest(); } finally { IOUtils.close(cluster0, cluster1); + if (shouldAssertSettingsDeprecationsAndWarnings) { + assertSettingDeprecationsAndWarnings(new Setting[]{NetworkModule.HTTP_ENABLED}); + } } } @@ -346,6 +355,7 @@ public Settings transportClientSettings() { } finally { cluster.close(); } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } private Path[] getNodePaths(InternalTestCluster cluster, String name) { @@ -446,6 +456,7 @@ public Settings transportClientSettings() { } finally { cluster.close(); } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } public void testTwoNodeCluster() throws Exception { @@ -505,5 +516,6 @@ public Settings onNodeStopped(String nodeName) throws Exception { } finally { cluster.close(); } + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); } } diff --git a/x-pack/NOTICE.txt b/x-pack/NOTICE.txt new file mode 100644 index 0000000000000..3aa4dffcd74ee --- /dev/null +++ b/x-pack/NOTICE.txt @@ -0,0 +1,2 @@ +Elasticsearch X-Pack +Copyright 2009-2017 Elasticsearch diff --git a/x-pack/README.md b/x-pack/README.md new file mode 100644 index 0000000000000..351b14caa384a --- /dev/null +++ b/x-pack/README.md @@ -0,0 +1,5 @@ +# Elastic License Functionality +This directory tree contains files subject to the Elastic License. The files +subject to the Elastic License are grouped in this directory to clearly +separate them from files licensed under the Apache License 2.0. + diff --git a/x-pack/build.gradle b/x-pack/build.gradle new file mode 100644 index 0000000000000..88e09d82168f7 --- /dev/null +++ b/x-pack/build.gradle @@ -0,0 +1,67 @@ +import org.elasticsearch.gradle.BuildPlugin +import org.elasticsearch.gradle.plugin.PluginBuildPlugin +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.precommit.LicenseHeadersTask + +Project xpackRootProject = project + +apply plugin: 'nebula.info-scm' +final String licenseCommit +if (version.endsWith('-SNAPSHOT')) { + licenseCommit = xpackRootProject.scminfo.change ?: "master" // leniency for non git builds +} else { + licenseCommit = "v${version}" +} + +subprojects { + group = 'org.elasticsearch.plugin' + ext.xpackRootProject = xpackRootProject + ext.xpackProject = { String projectName -> xpackRootProject.project(projectName) } + // helper method to find the path to a module + ext.xpackModule = { String moduleName -> xpackProject("plugin:${moduleName}").path } + + ext.licenseName = 'Elastic License' + ext.licenseUrl = "https://raw.githubusercontent.com/elastic/elasticsearch/${licenseCommit}/licenses/ELASTIC-LICENSE.txt" + + project.ext.licenseFile = rootProject.file('licenses/ELASTIC-LICENSE.txt') + project.ext.noticeFile = xpackRootProject.file('NOTICE.txt') + + plugins.withType(PluginBuildPlugin).whenPluginAdded { + project.esplugin.licenseFile = rootProject.file('licenses/ELASTIC-LICENSE.txt') + project.esplugin.noticeFile = xpackRootProject.file('NOTICE.txt') + } +} + +File checkstyleSuppressions = file('dev-tools/checkstyle_suppressions.xml') +subprojects { + tasks.withType(Checkstyle) { + inputs.file(checkstyleSuppressions) + // Use x-pack-elasticsearch specific suppressions file rather than the open source one. + configProperties = [ + suppressions: checkstyleSuppressions + ] + } + + tasks.withType(LicenseHeadersTask.class) { + approvedLicenses = ['Elastic License', 'Generated'] + additionalLicense 'ELAST', 'Elastic License', 'Licensed under the Elastic License' + } + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-core:${version}": xpackModule('core')] + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-deprecation:${version}": xpackModule('deprecation')] + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-graph:${version}": xpackModule('graph')] + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-logstash:${version}": xpackModule('logstash')] + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-ml:${version}": xpackModule('ml')] + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-monitoring:${version}": xpackModule('monitoring')] + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-security:${version}": xpackModule('security')] + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-upgrade:${version}": xpackModule('upgrade')] + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-watcher:${version}": xpackModule('watcher')] + + bwcVersions.snapshotProjectNames.each { snapshotName -> + Version snapshot = bwcVersions.getSnapshotForProject(snapshotName) + if (snapshot != null && snapshot.onOrAfter("6.3.0")) { + String snapshotProject = ":x-pack:plugin:bwc:${snapshotName}" + project(snapshotProject).ext.bwcVersion = snapshot + ext.projectSubstitutions["org.elasticsearch.plugin:x-pack:${snapshot}"] = snapshotProject + } + } +} diff --git a/x-pack/dev-tools/checkstyle_suppressions.xml b/x-pack/dev-tools/checkstyle_suppressions.xml new file mode 100644 index 0000000000000..4748436a84979 --- /dev/null +++ b/x-pack/dev-tools/checkstyle_suppressions.xml @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/dev-tools/smoke_test_xpack_rc.py b/x-pack/dev-tools/smoke_test_xpack_rc.py new file mode 100644 index 0000000000000..eb9918d0312d1 --- /dev/null +++ b/x-pack/dev-tools/smoke_test_xpack_rc.py @@ -0,0 +1,200 @@ +# Smoke-tests a x-pack release candidate +# +# 1. Downloads the zip file from the staging URL +# 3. Installs x-pack plugin +# 4. Starts one node for zip package and checks: +# -- if x-pack plugin is loaded +# -- checks xpack info page, if response returns correct version and feature set info +# +# USAGE: +# +# python3 -B ./dev-tools/smoke_test_rc.py --version 5.0.0-beta1 --hash bfa3e47 +# + +import argparse +import tempfile +import os +import signal +import shutil +import urllib +import urllib.request +import time +import json +import base64 +from http.client import HTTPConnection + +# in case of debug, uncomment +# HTTPConnection.debuglevel = 4 + +try: + JAVA_HOME = os.environ['JAVA_HOME'] +except KeyError: + raise RuntimeError(""" + Please set JAVA_HOME in the env before running release tool + On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.8*'`""") + +def java_exe(): + path = JAVA_HOME + return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path) + +def verify_java_version(version): + s = os.popen('%s; java -version 2>&1' % java_exe()).read() + if ' version "%s.' % version not in s: + raise RuntimeError('got wrong version for java %s:\n%s' % (version, s)) + +def read_fully(file): + with open(file, encoding='utf-8') as f: + return f.read() + +def wait_for_node_startup(es_dir, timeout=60, headers={}): + print(' Waiting until node becomes available for at most %s seconds' % timeout) + for _ in range(timeout): + conn = None + try: + time.sleep(1) + host = get_host_from_ports_file(es_dir) + conn = HTTPConnection(host, timeout=1) + conn.request('GET', '/', headers=headers) + res = conn.getresponse() + if res.status == 200: + return True + except IOError as e: + pass + #that is ok it might not be there yet + finally: + if conn: + conn.close() + return False + +def download_release(version, release_hash, url): + print('Downloading release %s from %s' % (version, url)) + tmp_dir = tempfile.mkdtemp() + try: + downloaded_files = [] + print(' ' + '*' * 80) + print(' Downloading %s' % (url)) + file = ('elasticsearch-%s.zip' % version) + artifact_path = os.path.join(tmp_dir, file) + downloaded_files.append(artifact_path) + urllib.request.urlretrieve(url, os.path.join(tmp_dir, file)) + print(' ' + '*' * 80) + print() + + smoke_test_release(version, downloaded_files, release_hash) + print(' SUCCESS') + finally: + shutil.rmtree(tmp_dir) + +def get_host_from_ports_file(es_dir): + return read_fully(os.path.join(es_dir, 'logs/http.ports')).splitlines()[0] + +def smoke_test_release(release, files, release_hash): + for release_file in files: + if not os.path.isfile(release_file): + raise RuntimeError('Smoketest failed missing file %s' % (release_file)) + tmp_dir = tempfile.mkdtemp() + run('unzip %s -d %s' % (release_file, tmp_dir)) + + es_dir = os.path.join(tmp_dir, 'elasticsearch-%s' % (release)) + es_run_path = os.path.join(es_dir, 'bin/elasticsearch') + + print(' Smoke testing package [%s]' % release_file) + es_plugin_path = os.path.join(es_dir, 'bin/elasticsearch-plugin') + + print(' Install xpack [%s]') + run('%s; ES_JAVA_OPTS="-Des.plugins.staging=%s" %s install -b x-pack' % (java_exe(), release_hash, es_plugin_path)) + headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") } + es_shield_path = os.path.join(es_dir, 'bin/x-pack/users') + + print(" Install dummy shield user") + run('%s; %s useradd es_admin -r superuser -p foobar' % (java_exe(), es_shield_path)) + + print(' Starting elasticsearch daemon from [%s]' % es_dir) + try: + run('%s; %s -Enode.name=smoke_tester -Ecluster.name=prepare_release -Erepositories.url.allowed_urls=http://snapshot.test* %s -Epidfile=%s -Enode.portsfile=true' + % (java_exe(), es_run_path, '-d', os.path.join(es_dir, 'es-smoke.pid'))) + if not wait_for_node_startup(es_dir, headers=headers): + print("elasticsearch logs:") + print('*' * 80) + logs = read_fully(os.path.join(es_dir, 'logs/prepare_release.log')) + print(logs) + print('*' * 80) + raise RuntimeError('server didn\'t start up') + try: # we now get / and /_nodes to fetch basic infos like hashes etc and the installed plugins + host = get_host_from_ports_file(es_dir) + conn = HTTPConnection(host, timeout=20) + + # check if plugin is loaded + conn.request('GET', '/_nodes/plugins?pretty=true', headers=headers) + res = conn.getresponse() + if res.status == 200: + nodes = json.loads(res.read().decode("utf-8"))['nodes'] + for _, node in nodes.items(): + node_plugins = node['plugins'] + for node_plugin in node_plugins: + if node_plugin['name'] != 'x-pack': + raise RuntimeError('Unexpected plugin %s, expected x-pack only' % node_plugin['name']) + else: + raise RuntimeError('Expected HTTP 200 but got %s' % res.status) + + # check if license is the default one + # also sleep for few more seconds, as the initial license generation might take some time + time.sleep(5) + conn.request('GET', '/_xpack', headers=headers) + res = conn.getresponse() + if res.status == 200: + xpack = json.loads(res.read().decode("utf-8")) + if xpack['license']['type'] != 'trial': + raise RuntimeError('expected license type to be trial, was %s' % xpack['license']['type']) + if xpack['license']['mode'] != 'trial': + raise RuntimeError('expected license mode to be trial, was %s' % xpack['license']['mode']) + if xpack['license']['status'] != 'active': + raise RuntimeError('expected license status to be active, was %s' % xpack['license']['active']) + else: + raise RuntimeError('Expected HTTP 200 but got %s' % res.status) + + finally: + conn.close() + finally: + pid_path = os.path.join(es_dir, 'es-smoke.pid') + if os.path.exists(pid_path): # try reading the pid and kill the node + pid = int(read_fully(pid_path)) + os.kill(pid, signal.SIGKILL) + shutil.rmtree(tmp_dir) + print(' ' + '*' * 80) + print() + +# console colors +COLOR_OK = '\033[92m' +COLOR_END = '\033[0m' + +def run(command, env_vars=None): + if env_vars: + for key, value in env_vars.items(): + os.putenv(key, value) + print('*** Running: %s%s%s' % (COLOR_OK, command, COLOR_END)) + if os.system(command): + raise RuntimeError(' FAILED: %s' % (command)) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='SmokeTests a Release Candidate from S3 staging repo') + parser.add_argument('--version', '-v', dest='version', default=None, + help='The Elasticsearch Version to smoke-tests', required=True) + parser.add_argument('--hash', '-r', dest='hash', default=None, required=True, + help='The sha1 short hash of the release git commit to smoketest') + parser.add_argument('--fetch_url', '-u', dest='url', default=None, + help='Fetched from the specified URL') + parser.set_defaults(hash=None) + parser.set_defaults(version=None) + parser.set_defaults(url=None) + args = parser.parse_args() + version = args.version + hash = args.hash + url = args.url + verify_java_version('1.8') + if url: + download_url = url + else: + download_url = 'https://staging.elastic.co/%s-%s/downloads/elasticsearch/elasticsearch-%s.zip' % (version, hash, version) + download_release(version, hash, download_url) + diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle new file mode 100644 index 0000000000000..ab9bc99459968 --- /dev/null +++ b/x-pack/docs/build.gradle @@ -0,0 +1,686 @@ +import org.elasticsearch.gradle.test.NodeInfo + +import java.nio.charset.StandardCharsets + +apply plugin: 'elasticsearch.docs-test' + +/* List of files that have snippets that probably should be converted to + * `// CONSOLE` and `// TESTRESPONSE` but have yet to be converted. Try and + * only remove entries from this list. When it is empty we'll remove it + * entirely and have a party! There will be cake and everything.... */ +buildRestTests.expectedUnconvertedCandidates = [ + 'en/ml/functions/count.asciidoc', + 'en/ml/functions/geo.asciidoc', + 'en/ml/functions/info.asciidoc', + 'en/ml/functions/metric.asciidoc', + 'en/ml/functions/rare.asciidoc', + 'en/ml/functions/sum.asciidoc', + 'en/ml/functions/time.asciidoc', + 'en/ml/aggregations.asciidoc', + 'en/ml/customurl.asciidoc', + 'en/monitoring/indices.asciidoc', + 'en/rest-api/security/ssl.asciidoc', + 'en/rest-api/security/users.asciidoc', + 'en/rest-api/security/tokens.asciidoc', + 'en/rest-api/watcher/put-watch.asciidoc', + 'en/security/authentication/user-cache.asciidoc', + 'en/security/authorization/field-and-document-access-control.asciidoc', + 'en/security/authorization/run-as-privilege.asciidoc', + 'en/security/ccs-clients-integrations/http.asciidoc', + 'en/security/authorization/custom-roles-provider.asciidoc', + 'en/watcher/actions/email.asciidoc', + 'en/watcher/actions/hipchat.asciidoc', + 'en/watcher/actions/index.asciidoc', + 'en/watcher/actions/logging.asciidoc', + 'en/watcher/actions/pagerduty.asciidoc', + 'en/watcher/actions/slack.asciidoc', + 'en/watcher/actions/jira.asciidoc', + 'en/watcher/actions/webhook.asciidoc', + 'en/watcher/condition/always.asciidoc', + 'en/watcher/condition/array-compare.asciidoc', + 'en/watcher/condition/compare.asciidoc', + 'en/watcher/condition/never.asciidoc', + 'en/watcher/condition/script.asciidoc', + 'en/watcher/customizing-watches.asciidoc', + 'en/watcher/example-watches/example-watch-meetupdata.asciidoc', + 'en/watcher/how-watcher-works.asciidoc', + 'en/watcher/input/chain.asciidoc', + 'en/watcher/input/http.asciidoc', + 'en/watcher/input/search.asciidoc', + 'en/watcher/input/simple.asciidoc', + 'en/watcher/transform.asciidoc', + 'en/watcher/transform/chain.asciidoc', + 'en/watcher/transform/script.asciidoc', + 'en/watcher/transform/search.asciidoc', + 'en/watcher/trigger/schedule/cron.asciidoc', + 'en/watcher/trigger/schedule/daily.asciidoc', + 'en/watcher/trigger/schedule/hourly.asciidoc', + 'en/watcher/trigger/schedule/interval.asciidoc', + 'en/watcher/trigger/schedule/monthly.asciidoc', + 'en/watcher/trigger/schedule/weekly.asciidoc', + 'en/watcher/trigger/schedule/yearly.asciidoc', + 'en/watcher/troubleshooting.asciidoc', + 'en/rest-api/license/delete-license.asciidoc', + 'en/rest-api/license/update-license.asciidoc', + 'en/ml/api-quickref.asciidoc', + 'en/rest-api/ml/delete-calendar-event.asciidoc', + 'en/rest-api/ml/delete-snapshot.asciidoc', + 'en/rest-api/ml/forecast.asciidoc', + 'en/rest-api/ml/get-bucket.asciidoc', + 'en/rest-api/ml/get-job-stats.asciidoc', + 'en/rest-api/ml/get-overall-buckets.asciidoc', + 'en/rest-api/ml/get-category.asciidoc', + 'en/rest-api/ml/get-record.asciidoc', + 'en/rest-api/ml/get-influencer.asciidoc', + 'en/rest-api/ml/get-snapshot.asciidoc', + 'en/rest-api/ml/post-data.asciidoc', + 'en/rest-api/ml/preview-datafeed.asciidoc', + 'en/rest-api/ml/revert-snapshot.asciidoc', + 'en/rest-api/ml/update-snapshot.asciidoc', + 'en/rest-api/ml/validate-detector.asciidoc', + 'en/rest-api/ml/validate-job.asciidoc', + 'en/rest-api/security/authenticate.asciidoc', + 'en/rest-api/watcher/stats.asciidoc', + 'en/security/authorization.asciidoc', + 'en/watcher/example-watches/watching-time-series-data.asciidoc', +] + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') +} + +Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> + File tmpFile = new File(node.cwd, 'wait.success') + // wait up to twenty seconds + final long stopTime = System.currentTimeMillis() + 20000L; + Exception lastException = null; + while (System.currentTimeMillis() < stopTime) { + lastException = null; + // we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned + HttpURLConnection httpURLConnection = null; + try { + httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health").openConnection(); + httpURLConnection.setRequestProperty("Authorization", "Basic " + + Base64.getEncoder().encodeToString("test_admin:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); + httpURLConnection.setRequestMethod("GET"); + httpURLConnection.setConnectTimeout(1000); + httpURLConnection.setReadTimeout(30000); + httpURLConnection.connect(); + if (httpURLConnection.getResponseCode() == 200) { + tmpFile.withWriter StandardCharsets.UTF_8.name(), { + it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) + } + break; + } + } catch (Exception e) { + logger.debug("failed to call cluster health", e) + lastException = e + } finally { + if (httpURLConnection != null) { + httpURLConnection.disconnect(); + } + } + + // did not start, so wait a bit before trying again + Thread.sleep(500L); + } + if (tmpFile.exists() == false && lastException != null) { + logger.error("final attempt of calling cluster health failed", lastException) + } + return tmpFile.exists() +} + +// copy xpack rest api +File xpackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') +project.copyRestSpec.from(xpackResources) { + include 'rest-api-spec/api/**' +} +integTestCluster { + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.authc.token.enabled', 'true' + // Disable monitoring exporters for the docs tests + setting 'xpack.monitoring.exporters._local.type', 'local' + setting 'xpack.monitoring.exporters._local.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setupCommand 'setupTestAdmin', + 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = waitWithAuth +} + + + +buildRestTests.docs = fileTree(projectDir) { + // No snippets in here! + exclude 'build.gradle' + // That is where the snippets go, not where they come from! + exclude 'build' + // These file simply doesn't pass yet. We should figure out how to fix them. + exclude 'en/watcher/reference/actions.asciidoc' + exclude 'en/rest-api/graph/explore.asciidoc' +} + +Map setups = buildRestTests.setups +setups['my_inactive_watch'] = ''' + - do: + xpack.watcher.put_watch: + id: "my_watch" + active: false + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } + - match: { _id: "my_watch" } +''' +setups['my_active_watch'] = setups['my_inactive_watch'].replace( + 'active: false', 'active: true') + +// Used by SQL because it looks SQL-ish +setups['library'] = ''' + - do: + indices.create: + index: library + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + mappings: + book: + properties: + name: + type: text + fields: + keyword: + type: keyword + author: + type: text + fields: + keyword: + type: keyword + release_date: + type: date + page_count: + type: short + - do: + bulk: + index: library + type: book + refresh: true + body: | + {"index":{"_id": "Leviathan Wakes"}} + {"name": "Leviathan Wakes", "author": "James S.A. Corey", "release_date": "2011-06-02", "page_count": 561} + {"index":{"_id": "Hyperion"}} + {"name": "Hyperion", "author": "Dan Simmons", "release_date": "1989-05-26", "page_count": 482} + {"index":{"_id": "Dune"}} + {"name": "Dune", "author": "Frank Herbert", "release_date": "1965-06-01", "page_count": 604} + {"index":{"_id": "Dune Messiah"}} + {"name": "Dune Messiah", "author": "Frank Herbert", "release_date": "1969-10-15", "page_count": 331} + {"index":{"_id": "Children of Dune"}} + {"name": "Children of Dune", "author": "Frank Herbert", "release_date": "1976-04-21", "page_count": 408} + {"index":{"_id": "God Emperor of Dune"}} + {"name": "God Emperor of Dune", "author": "Frank Herbert", "release_date": "1981-05-28", "page_count": 454} + {"index":{"_id": "Consider Phlebas"}} + {"name": "Consider Phlebas", "author": "Iain M. Banks", "release_date": "1987-04-23", "page_count": 471} + {"index":{"_id": "Pandora's Star"}} + {"name": "Pandora's Star", "author": "Peter F. Hamilton", "release_date": "2004-03-02", "page_count": 768} + {"index":{"_id": "Revelation Space"}} + {"name": "Revelation Space", "author": "Alastair Reynolds", "release_date": "2000-03-15", "page_count": 585} + {"index":{"_id": "A Fire Upon the Deep"}} + {"name": "A Fire Upon the Deep", "author": "Vernor Vinge", "release_date": "1992-06-01", "page_count": 613} + {"index":{"_id": "Ender's Game"}} + {"name": "Ender's Game", "author": "Orson Scott Card", "release_date": "1985-06-01", "page_count": 324} + {"index":{"_id": "1984"}} + {"name": "1984", "author": "George Orwell", "release_date": "1985-06-01", "page_count": 328} + {"index":{"_id": "Fahrenheit 451"}} + {"name": "Fahrenheit 451", "author": "Ray Bradbury", "release_date": "1953-10-15", "page_count": 227} + {"index":{"_id": "Brave New World"}} + {"name": "Brave New World", "author": "Aldous Huxley", "release_date": "1932-06-01", "page_count": 268} + {"index":{"_id": "Foundation"}} + {"name": "Foundation", "author": "Isaac Asimov", "release_date": "1951-06-01", "page_count": 224} + {"index":{"_id": "The Giver"}} + {"name": "The Giver", "author": "Lois Lowry", "release_date": "1993-04-26", "page_count": 208} + {"index":{"_id": "Slaughterhouse-Five"}} + {"name": "Slaughterhouse-Five", "author": "Kurt Vonnegut", "release_date": "1969-06-01", "page_count": 275} + {"index":{"_id": "The Hitchhiker's Guide to the Galaxy"}} + {"name": "The Hitchhiker's Guide to the Galaxy", "author": "Douglas Adams", "release_date": "1979-10-12", "page_count": 180} + {"index":{"_id": "Snow Crash"}} + {"name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470} + {"index":{"_id": "Neuromancer"}} + {"name": "Neuromancer", "author": "William Gibson", "release_date": "1984-07-01", "page_count": 271} + {"index":{"_id": "The Handmaid's Tale"}} + {"name": "The Handmaid's Tale", "author": "Margaret Atwood", "release_date": "1985-06-01", "page_count": 311} + {"index":{"_id": "Starship Troopers"}} + {"name": "Starship Troopers", "author": "Robert A. Heinlein", "release_date": "1959-12-01", "page_count": 335} + {"index":{"_id": "The Left Hand of Darkness"}} + {"name": "The Left Hand of Darkness", "author": "Ursula K. Le Guin", "release_date": "1969-06-01", "page_count": 304} + {"index":{"_id": "The Moon is a Harsh Mistress"}} + {"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288} + +''' +setups['server_metrics_index'] = ''' + - do: + indices.create: + index: server-metrics + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + metric: + properties: + timestamp: + type: date + total: + type: long +''' +setups['server_metrics_data'] = setups['server_metrics_index'] + ''' + - do: + bulk: + index: server-metrics + type: metric + refresh: true + body: | + {"index": {"_id":"1177"}} + {"timestamp":"2017-03-23T13:00:00","total":40476} + {"index": {"_id":"1178"}} + {"timestamp":"2017-03-23T13:00:00","total":15287} + {"index": {"_id":"1179"}} + {"timestamp":"2017-03-23T13:00:00","total":-776} + {"index": {"_id":"1180"}} + {"timestamp":"2017-03-23T13:00:00","total":11366} + {"index": {"_id":"1181"}} + {"timestamp":"2017-03-23T13:00:00","total":3606} + {"index": {"_id":"1182"}} + {"timestamp":"2017-03-23T13:00:00","total":19006} + {"index": {"_id":"1183"}} + {"timestamp":"2017-03-23T13:00:00","total":38613} + {"index": {"_id":"1184"}} + {"timestamp":"2017-03-23T13:00:00","total":19516} + {"index": {"_id":"1185"}} + {"timestamp":"2017-03-23T13:00:00","total":-258} + {"index": {"_id":"1186"}} + {"timestamp":"2017-03-23T13:00:00","total":9551} + {"index": {"_id":"1187"}} + {"timestamp":"2017-03-23T13:00:00","total":11217} + {"index": {"_id":"1188"}} + {"timestamp":"2017-03-23T13:00:00","total":22557} + {"index": {"_id":"1189"}} + {"timestamp":"2017-03-23T13:00:00","total":40508} + {"index": {"_id":"1190"}} + {"timestamp":"2017-03-23T13:00:00","total":11887} + {"index": {"_id":"1191"}} + {"timestamp":"2017-03-23T13:00:00","total":31659} +''' +setups['server_metrics_job'] = setups['server_metrics_data'] + ''' + - do: + xpack.ml.put_job: + job_id: "total-requests" + body: > + { + "description" : "Total sum of requests", + "analysis_config" : { + "bucket_span":"10m", + "detectors" :[ + { + "detector_description": "Sum of total", + "function": "sum", + "field_name": "total" + } + ]}, + "data_description" : { + "time_field":"timestamp", + "time_format": "epoch_ms" + } + } +''' +setups['server_metrics_datafeed'] = setups['server_metrics_job'] + ''' + - do: + xpack.ml.put_datafeed: + datafeed_id: "datafeed-total-requests" + body: > + { + "job_id":"total-requests", + "indexes":"server-metrics" + } +''' +setups['server_metrics_openjob'] = setups['server_metrics_datafeed'] + ''' + - do: + xpack.ml.open_job: + job_id: "total-requests" +''' +setups['server_metrics_startdf'] = setups['server_metrics_openjob'] + ''' + - do: + xpack.ml.start_datafeed: + datafeed_id: "datafeed-total-requests" +''' +setups['calendar_outages'] = ''' + - do: + xpack.ml.put_calendar: + calendar_id: "planned-outages" +''' +setups['calendar_outages_addevent'] = setups['calendar_outages'] + ''' + - do: + xpack.ml.post_calendar_events: + calendar_id: "planned-outages" + body: > + { "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "planned-outages" } + + +''' +setups['calendar_outages_openjob'] = setups['server_metrics_openjob'] + ''' + - do: + xpack.ml.put_calendar: + calendar_id: "planned-outages" +''' +setups['calendar_outages_addjob'] = setups['server_metrics_openjob'] + ''' + - do: + xpack.ml.put_calendar: + calendar_id: "planned-outages" + body: > + { + "job_ids": ["total-requests"] + } +''' +setups['calendar_outages_addevent'] = setups['calendar_outages_addjob'] + ''' + - do: + xpack.ml.post_calendar_events: + calendar_id: "planned-outages" + body: > + { "events" : [ + { "description": "event 1", "start_time": "1513641600000", "end_time": "1513728000000"}, + { "description": "event 2", "start_time": "1513814400000", "end_time": "1513900800000"}, + { "description": "event 3", "start_time": "1514160000000", "end_time": "1514246400000"} + ]} +''' +setups['role_mapping'] = ''' + - do: + xpack.security.put_role_mapping: + name: "mapping1" + body: > + { + "enabled": true, + "roles": [ "user" ], + "rules": { "field": { "username": "*" } } + } +''' +setups['sensor_rollup_job'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + - do: + xpack.rollup.put_job: + id: "sensor" + body: > + { + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] + } +''' +setups['sensor_started_rollup_job'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + + - do: + bulk: + index: sensor-1 + type: _doc + refresh: true + body: | + {"index":{}} + {"timestamp": 1516729294000, "temperature": 200, "voltage": 5.2, "node": "a"} + {"index":{}} + {"timestamp": 1516642894000, "temperature": 201, "voltage": 5.8, "node": "b"} + {"index":{}} + {"timestamp": 1516556494000, "temperature": 202, "voltage": 5.1, "node": "a"} + {"index":{}} + {"timestamp": 1516470094000, "temperature": 198, "voltage": 5.6, "node": "b"} + {"index":{}} + {"timestamp": 1516383694000, "temperature": 200, "voltage": 4.2, "node": "c"} + {"index":{}} + {"timestamp": 1516297294000, "temperature": 202, "voltage": 4.0, "node": "c"} + + - do: + xpack.rollup.put_job: + id: "sensor" + body: > + { + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "* * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] + } + - do: + xpack.rollup.start_job: + id: "sensor" +''' + +setups['sensor_index'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + load: + type: double + net_in: + type: long + net_out: + type: long + hostname: + type: keyword + datacenter: + type: keyword +''' + +setups['sensor_prefab_data'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + - do: + indices.create: + index: sensor_rollup + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + node.terms.value: + type: keyword + temperature.sum.value: + type: double + temperature.max.value: + type: double + temperature.min.value: + type: double + timestamp.date_histogram.time_zone: + type: keyword + timestamp.date_histogram.interval: + type: keyword + timestamp.date_histogram.timestamp: + type: date + timestamp.date_histogram._count: + type: long + voltage.avg.value: + type: double + voltage.avg._count: + type: long + _rollup.id: + type: keyword + _rollup.version: + type: long + _meta: + _rollup: + sensor: + cron: "* * * * * ?" + rollup_index: "sensor_rollup" + index_pattern: "sensor-*" + timeout: "20s" + page_size: 1000 + groups: + date_histogram: + delay: "7d" + field: "timestamp" + interval: "1h" + time_zone: "UTC" + terms: + fields: + - "node" + id: sensor + metrics: + - field: "temperature" + metrics: + - min + - max + - sum + - field: "voltage" + metrics: + - avg + + - do: + bulk: + index: sensor_rollup + type: _doc + refresh: true + body: | + {"index":{}} + {"node.terms.value":"b","temperature.sum.value":201.0,"temperature.max.value":201.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":201.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.800000190734863,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516640400000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"c","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516381200000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"a","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.099999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516554000000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"a","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516726800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"b","temperature.sum.value":198.0,"temperature.max.value":198.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":198.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.599999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516467600000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"c","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.0,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516294800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + +''' diff --git a/x-pack/docs/en/commands/certgen.asciidoc b/x-pack/docs/en/commands/certgen.asciidoc new file mode 100644 index 0000000000000..c2a00f11b6958 --- /dev/null +++ b/x-pack/docs/en/commands/certgen.asciidoc @@ -0,0 +1,157 @@ +[role="xpack"] +[[certgen]] +== certgen + +deprecated[6.1,Replaced by <>.] + +The `elasticsearch-certgen` command simplifies the creation of certificate +authorities (CA), certificate signing requests (CSR), and signed certificates +for use with the Elastic Stack. Though this command is deprecated, you do not +need to replace CAs, CSRs, or certificates that it created. + +[float] +=== Synopsis + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-certgen +(([--cert ] [--days ] [--dn ] [--key ] +[--keysize ] [--pass ] [--p12 ]) +| [--csr]) +[-E ] [-h, --help] [--in ] [--out ] +([-s, --silent] | [-v, --verbose]) +-------------------------------------------------- + +[float] +=== Description + +By default, the command runs in interactive mode and you are prompted for +information about each instance. An instance is any piece of the Elastic Stack +that requires a Transport Layer Security (TLS) or SSL certificate. Depending on +your configuration, {es}, Logstash, {kib}, and Beats might all require a +certificate and private key. + +The minimum required value for each instance is a name. This can simply be the +hostname, which is used as the Common Name of the certificate. You can also use +a full distinguished name. IP addresses and DNS names are optional. Multiple +values can be specified as a comma separated string. If no IP addresses or DNS +names are provided, you might disable hostname verification in your TLS or SSL +configuration. + +Depending on the parameters that you specify, you are also prompted for +necessary information such as the path for the output file and the CA private +key password. + +The `elasticsearch-certgen` command also supports a silent mode of operation to +enable easier batch operations. For more information, see <>. + +The output file is a zip file that contains the signed certificates and private +keys for each instance. If you chose to generate a CA, which is the default +behavior, the certificate and private key are included in the output file. If +you chose to generate CSRs, you should provide them to your commercial or +organization-specific certificate authority to obtain signed certificates. The +signed certificates must be in PEM format to work with {security}. + +[float] +=== Parameters + +`--cert `:: Specifies to generate new instance certificates and keys +using an existing CA certificate, which is provided in the `` argument. +This parameter cannot be used with the `-csr` parameter. + +`--csr`:: Specifies to operate in certificate signing request mode. + +`--days `:: +Specifies an integer value that represents the number of days the generated keys +are valid. The default value is `1095`. This parameter cannot be used with the +`-csr` parameter. + +`--dn `:: +Defines the _Distinguished Name_ that is used for the generated CA certificate. +The default value is `CN=Elastic Certificate Tool Autogenerated CA`. +This parameter cannot be used with the `-csr` parameter. + +`-E `:: Configures a setting. + +`-h, --help`:: Returns all of the command parameters. + +`--in `:: Specifies the file that is used to run in silent mode. The +input file must be a YAML file, as described in <>. + +`--key `:: Specifies the _private-key_ file for the CA certificate. +This parameter is required whenever the `-cert` parameter is used. + +`--keysize `:: +Defines the number of bits that are used in generated RSA keys. The default +value is `2048`. + +`--out `:: Specifies a path for the output file. + +`--pass `:: Specifies the password for the CA private key. +If the `-key` parameter is provided, then this is the password for the existing +private key file. Otherwise, it is the password that should be applied to the +generated CA key. This parameter cannot be used with the `-csr` parameter. + +`--p12 `:: +Generate a PKCS#12 (`.p12` or `.pfx`) container file for each of the instance +certificates and keys. The generated file is protected by the supplied password, +which can be blank. This parameter cannot be used with the `-csr` parameter. + +`-s, --silent`:: Shows minimal output. + +`-v, --verbose`:: Shows verbose output. + +[float] +=== Examples + +[float] +[[certgen-silent]] +==== Using `elasticsearch-certgen` in Silent Mode + +To use the silent mode of operation, you must create a YAML file that contains +information about the instances. It must match the following format: + +[source, yaml] +-------------------------------------------------- +instances: + - name: "node1" <1> + ip: <2> + - "192.0.2.1" + dns: <3> + - "node1.mydomain.com" + - name: "node2" + ip: + - "192.0.2.2" + - "198.51.100.1" + - name: "node3" + - name: "node4" + dns: + - "node4.mydomain.com" + - "node4.internal" + - name: "CN=node5,OU=IT,DC=mydomain,DC=com" + filename: "node5" <4> +-------------------------------------------------- +<1> The name of the instance. This can be a simple string value or can be a +Distinguished Name (DN). This is the only required field. +<2> An optional array of strings that represent IP Addresses for this instance. +Both IPv4 and IPv6 values are allowed. The values are added as Subject +Alternative Names. +<3> An optional array of strings that represent DNS names for this instance. +The values are added as Subject Alternative Names. +<4> The filename to use for this instance. This name is used as the name of the +directory that contains the instance's files in the output. It is also used in +the names of the files within the directory. This filename should not have an +extension. Note: If the `name` provided for the instance does not represent a +valid filename, then the `filename` field must be present. + +When your YAML file is ready, you can use the `elasticsearch-certgen` command to +generate certificates or certificate signing requests. Simply use the `-in` +parameter to specify the location of the file. For example: + +[source, sh] +-------------------------------------------------- +bin/elasticsearch-certgen -in instances.yml +-------------------------------------------------- + +This command generates a CA certificate and private key as well as certificates +and private keys for the instances that are listed in the YAML file. diff --git a/x-pack/docs/en/commands/certutil.asciidoc b/x-pack/docs/en/commands/certutil.asciidoc new file mode 100644 index 0000000000000..ad265c89f10e8 --- /dev/null +++ b/x-pack/docs/en/commands/certutil.asciidoc @@ -0,0 +1,289 @@ +[role="xpack"] +[[certutil]] +== elasticsearch-certutil + +The `elasticsearch-certutil` command simplifies the creation of certificates for +use with Transport Layer Security (TLS) in the Elastic Stack. + +[float] +=== Synopsis + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-certutil +( +(ca [--ca-dn ] [--days ] [--pem]) + +| (cert ([--ca ] | [--ca-cert --ca-key ]) +[--ca-dn ] [--ca-pass ] [--days ] +[--dns ] [--in ] [--ip ] +[--keep-ca-key] [--multiple] [--name ] [--pem]) + +| (csr [--dns ] [--in ] [--ip ] +[--name ]) + +[-E ] [--keysize ] [--out ] +[--pass ] +) +[-h, --help] ([-s, --silent] | [-v, --verbose]) +-------------------------------------------------- + +[float] +=== Description + +You can specify one of the following modes: `ca`, `cert`, `csr`. The +`elasticsearch-certutil` command also supports a silent mode of operation to +enable easier batch operations. + +[float] +[[certutil-ca]] +==== CA mode + +The `ca` mode generates a new certificate authority (CA). By default, it +produces a single PKCS#12 output file, which holds the CA certificate and the +private key for the CA. If you specify the `--pem` parameter, the command +generates a zip file, which contains the certificate and private key in PEM +format. + +You can subsequently use these files as input for the `cert` mode of the command. + +[float] +[[certutil-cert]] +==== CERT mode + +The `cert` mode generates X.509 certificates and private keys. By default, it +produces a single certificate and key for use on a single instance. + +To generate certificates and keys for multiple instances, specify the +`--multiple` parameter, which prompts you for details about each instance. +Alternatively, you can use the `--in` parameter to specify a YAML file that +contains details about the instances. + +An instance is any piece of the Elastic Stack that requires a TLS or SSL +certificate. Depending on your configuration, {es}, Logstash, {kib}, and Beats +might all require a certificate and private key. The minimum required +information for an instance is its name, which is used as the common name for +the certificate. The instance name can be a hostname value or a full +distinguished name. If the instance name would result in an invalid file or +directory name, you must also specify a file name in the `--name` command +parameter or in the `filename` field in an input YAML file. + +You can optionally provide IP addresses or DNS names for each instance. If +neither IP addresses nor DNS names are specified, the Elastic stack products +cannot perform hostname verification and you might need to configure the +`verfication_mode` security setting to `certificate` only. For more information +about this setting, see <>. + +All certificates that are generated by this command are signed by a CA. You can +provide your own CA with the `--ca` or `--ca-cert` parameters. Otherwise, the +command automatically generates a new CA for you. For more information about +generating a CA, see the <>. + +By default, the `cert` mode produces a single PKCS#12 output file which holds +the instance certificate, the instance private key, and the CA certificate. If +you specify the `--pem` parameter, the command generates PEM formatted +certificates and keys and packages them into a zip file. +If you specify the `--keep-ca-key`, `--multiple` or `--in` parameters, +the command produces a zip file containing the generated certificates and keys. + +[float] +[[certutil-csr]] +==== CSR mode + +The `csr` mode generates certificate signing requests (CSRs) that you can send +to a trusted certificate authority to obtain signed certificates. The signed +certificates must be in PEM or PKCS#12 format to work with {security}. + +By default, the command produces a single CSR for a single instance. + +To generate CSRs for multiple instances, specify the `--multiple` parameter, +which prompts you for details about each instance. Alternatively, you can use +the `--in` parameter to specify a YAML file that contains details about the +instances. + +The `cert` mode produces a single zip file which contains the CSRs and the +private keys for each instance. Each CSR is provided as a standard PEM +encoding of a PKCS#10 CSR. Each key is provided as a PEM encoding of an RSA +private key. + +[float] +=== Parameters + +`ca`:: Specifies to generate a new local certificate authority (CA). This +parameter cannot be used with the `csr` or `cert` parameters. + +`cert`:: Specifies to generate new X.509 certificates and keys. +This parameter cannot be used with the `csr` or `ca` parameters. + +`csr`:: Specifies to generate certificate signing requests. This parameter +cannot be used with the `ca` or `cert` parameters. + +`--ca `:: Specifies the path to an existing CA key pair +(in PKCS#12 format). This parameter cannot be used with the `ca` or `csr` parameters. + +`--ca-cert `:: Specifies the path to an existing CA certificate (in +PEM format). You must also specify the `--ca-key` parameter. The `--ca-cert` +parameter cannot be used with the `ca` or `csr` parameters. + +`--ca-dn `:: Defines the _Distinguished Name_ (DN) that is used for the +generated CA certificate. The default value is +`CN=Elastic Certificate Tool Autogenerated CA`. This parameter cannot be used +with the `csr` parameter. + +`--ca-key `:: Specifies the path to an existing CA private key (in +PEM format). You must also specify the `--ca-cert` parameter. The `--ca-key` +parameter cannot be used with the `ca` or `csr` parameters. + +`--ca-pass `:: Specifies the password for an existing CA private key +or the generated CA private key. This parameter cannot be used with the `ca` or +`csr` parameters. + +`--days `:: Specifies an integer value that represents the number of days the +generated certificates are valid. The default value is `1095`. This parameter +cannot be used with the `csr` parameter. + +`--dns `:: Specifies a comma-separated list of DNS names. This +parameter cannot be used with the `ca` parameter. + +`-E `:: Configures a setting. + +`-h, --help`:: Returns all of the command parameters. + +`--in `:: Specifies the file that is used to run in silent mode. The +input file must be a YAML file. This parameter cannot be used with the `ca` +parameter. + +`--ip `:: Specifies a comma-separated list of IP addresses. This +parameter cannot be used with the `ca` parameter. + +`--keep-ca-key`:: When running in `cert` mode with an automatically-generated +CA, specifies to retain the CA private key for future use. + +`--keysize `:: +Defines the number of bits that are used in generated RSA keys. The default +value is `2048`. + +`--multiple`:: +Specifies to generate files for multiple instances. This parameter cannot be +used with the `ca` parameter. + +`--name `:: +Specifies the name of the generated certificate. This parameter cannot be used +with the `ca` parameter. + +`--out `:: Specifies a path for the output files. + +`--pass `:: Specifies the password for the generated private keys. ++ +Keys stored in PKCS#12 format are always password protected. ++ +Keys stored in PEM format are password protected only if the +`--pass` parameter is specified. If you do not supply an argument for the +`--pass` parameter, you are prompted for a password. ++ +If you want to specify a _blank_ password (without prompting), use +`--pass ""` (with no `=`). + +`--pem`:: Generates certificates and keys in PEM format instead of PKCS#12. This +parameter cannot be used with the `csr` parameter. + +`-s, --silent`:: Shows minimal output. + +`-v, --verbose`:: Shows verbose output. + +[float] +=== Examples + +The following command generates a CA certificate and private key in PKCS#12 +format: + +[source, sh] +-------------------------------------------------- +bin/elasticsearch-certutil ca +-------------------------------------------------- + +You are prompted for an output filename and a password. Alternatively, you can +specify the `--out` and `--pass` parameters. + +You can then generate X.509 certificates and private keys by using the new +CA. For example: + +[source, sh] +-------------------------------------------------- +bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12 +-------------------------------------------------- + +You are prompted for the CA password and for an output filename and password. +Alternatively, you can specify the `--ca-pass`, `--out`, and `--pass` parameters. + +By default, this command generates a file called `elastic-certificates.p12`, +which you can copy to the relevant configuration directory for each Elastic +product that you want to configure. For more information, see +{xpack-ref}/ssl-tls.html[Setting Up TLS on a Cluster]. + +[float] +[[certutil-silent]] +==== Using `elasticsearch-certutil` in Silent Mode + +To use the silent mode of operation, you must create a YAML file that contains +information about the instances. It must match the following format: + +[source, yaml] +-------------------------------------------------- +instances: + - name: "node1" <1> + ip: <2> + - "192.0.2.1" + dns: <3> + - "node1.mydomain.com" + - name: "node2" + ip: + - "192.0.2.2" + - "198.51.100.1" + - name: "node3" + - name: "node4" + dns: + - "node4.mydomain.com" + - "node4.internal" + - name: "CN=node5,OU=IT,DC=mydomain,DC=com" + filename: "node5" <4> +-------------------------------------------------- +<1> The name of the instance. This can be a simple string value or can be a +Distinguished Name (DN). This is the only required field. +<2> An optional array of strings that represent IP Addresses for this instance. +Both IPv4 and IPv6 values are allowed. The values are added as Subject +Alternative Names. +<3> An optional array of strings that represent DNS names for this instance. +The values are added as Subject Alternative Names. +<4> The filename to use for this instance. This name is used as the name of the +directory that contains the instance's files in the output. It is also used in +the names of the files within the directory. This filename should not have an +extension. Note: If the `name` provided for the instance does not represent a +valid filename, then the `filename` field must be present. + +When your YAML file is ready, you can use the `elasticsearch-certutil` command +to generate certificates or certificate signing requests. Simply use the `--in` +parameter to specify the location of the file. For example: + +[source, sh] +-------------------------------------------------- +bin/elasticsearch-certutil cert --silent --in instances.yml --out test1.zip --pass testpassword +-------------------------------------------------- + +This command generates a compressed `test1.zip` file. After you decompress the +output file, there is a directory for each instance that was listed in the +`instances.yml` file. Each instance directory contains a single PKCS#12 (`.p12`) +file, which contains the instance certificate, instance private key, and CA +certificate. + +You an also use the YAML file to generate certificate signing requests. For +example: + +[source, sh] +-------------------------------------------------- +bin/elasticsearch-certutil csr --silent --in instances.yml --out test2.zip --pass testpassword +-------------------------------------------------- + +This command generates a compressed file, which contains a directory for each +instance. Each instance directory contains a certificate signing request +(`*.csr` file) and private key (`*.key` file). diff --git a/x-pack/docs/en/commands/index.asciidoc b/x-pack/docs/en/commands/index.asciidoc new file mode 100644 index 0000000000000..164d2fc0e84f0 --- /dev/null +++ b/x-pack/docs/en/commands/index.asciidoc @@ -0,0 +1,26 @@ +[role="xpack"] +[[xpack-commands]] += {xpack} Commands + +[partintro] +-- + +{xpack} includes commands that help you configure security: + +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +-- + +include::certgen.asciidoc[] +include::certutil.asciidoc[] +include::migrate-tool.asciidoc[] +include::saml-metadata.asciidoc[] +include::setup-passwords.asciidoc[] +include::syskeygen.asciidoc[] +include::users-command.asciidoc[] diff --git a/x-pack/docs/en/commands/migrate-tool.asciidoc b/x-pack/docs/en/commands/migrate-tool.asciidoc new file mode 100644 index 0000000000000..1d19452df8094 --- /dev/null +++ b/x-pack/docs/en/commands/migrate-tool.asciidoc @@ -0,0 +1,109 @@ +[role="xpack"] +[[migrate-tool]] +== elasticsearch-migrate + +The `elasticsearch-migrate` command migrates existing file-based users and roles +to the native realm. From 5.0 onward, you should use the `native` realm to +manage roles and local users. + + +[float] +=== Synopsis + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-migrate +(native (-U, --url ) +[-h, --help] [-E ] +[-n, --users ] [-r, --roles ] +[-u, --username ] [-p, --password ] +[-s, --silent] [-v, --verbose]) +-------------------------------------------------- + +[float] +=== Description + +NOTE: When migrating from Shield 2.x, the `elasticsearch-migrate` tool should be +run prior to upgrading to ensure all roles can be migrated as some may be in a +deprecated format that {xpack} cannot read. The `migrate` tool is available in +Shield 2.4.0 and higher. + +The `elasticsearch-migrate` tool loads the existing file-based users and roles +and calls the user and roles APIs to add them to the native realm. You can +migrate all users and roles, or specify the ones you want to migrate. Users and +roles that already exist in the `native` realm are not replaced or +overridden. If the names you specify with the `--users` and `--roles` options +don't exist in the `file` realm, they are skipped. + +[float] +[[migrate-tool-options]] +=== Parameters +The `native` subcommand supports the following options: + +`-E `:: +Configures a setting. + +`-h, --help`:: +Returns all of the command parameters. + +`-n`, `--users`:: +Comma-separated list of the users that you want to migrate. If this parameter is +not specified, all users are migrated. + +`-p`, `--password`:: +Password to use for authentication with {es}. +//TBD: What is the default if this isn't specified? + +`-r`, `--roles`:: +Comma-separated list of the roles that you want to migrate. If this parameter is +not specified, all roles are migrated. + +`-s, --silent`:: Shows minimal output. + +`-U`, `--url`:: +Endpoint URL of the {es} cluster to which you want to migrate the +file-based users and roles. This parameter is required. + +`-u`, `--username`:: +Username to use for authentication with {es}. +//TBD: What is the default if this isn't specified? + +`-v, --verbose`:: Shows verbose output. + +[float] +=== Examples + +Run the `elasticsearch-migrate` tool when {xpack} is installed. For example: + +[source, sh] +---------------------------------------------------------------------- +$ bin/elasticsearch-migrate native -U http://localhost:9200 -u elastic +-p x-pack-test-password -n lee,foo -r role1,role2,role3,role4,foo +starting migration of users and roles... +importing users from [/home/es/config/shield/users]... +found existing users: [test_user, joe3, joe2] +migrating user [lee] +{"user":{"created":true}} +no user [foo] found, skipping +importing roles from [/home/es/config/shield/roles.yml]... +found existing roles: [marvel_user, role_query_fields, admin_role, role3, admin, +remote_marvel_agent, power_user, role_new_format_name_array, role_run_as, +logstash, role_fields, role_run_as1, role_new_format, kibana4_server, user, +transport_client, role1.ab, role_query] +migrating role [role1] +{"role":{"created":true}} +migrating role [role2] +{"role":{"created":true}} +role [role3] already exists, skipping +no role [foo] found, skipping +users and roles imported. +---------------------------------------------------------------------- + +Additionally, the `-E` flag can be used to specify additional settings. For example +to specify a different configuration directory, the command would look like: + +[source, sh] +---------------------------------------------------------------------- +$ bin/elasticsearch-migrate native -U http://localhost:9200 -u elastic +-p x-pack-test-password -E path.conf=/etc/elasticsearch +---------------------------------------------------------------------- diff --git a/x-pack/docs/en/commands/saml-metadata.asciidoc b/x-pack/docs/en/commands/saml-metadata.asciidoc new file mode 100644 index 0000000000000..1cd283fd77699 --- /dev/null +++ b/x-pack/docs/en/commands/saml-metadata.asciidoc @@ -0,0 +1,132 @@ +[role="xpack"] +[[saml-metadata]] +== saml-metadata + +The `elasticsearch-saml-metadata` command can be used to generate a SAML 2.0 Service +Provider Metadata file. + +[float] +=== Synopsis + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-saml-metadata +[--realm ] +[--out ] [--batch] +[--attribute ] [--service-name ] +[--locale ] [--contacts] +([--organisation-name ] [--organisation-display-name ] [--organisation-url ]) +([--signing-bundle ] | [--signing-cert ][--signing-key ]) +[--signing-key-password ] +[-E ] +[-h, --help] ([-s, --silent] | [-v, --verbose]) +-------------------------------------------------- + +[float] +=== Description + +The SAML 2.0 specification provides a mechanism for Service Providers to +describe their capabilities and configuration using a _metadata file_. + +The `elasticsearch-saml-metadata` command generates such a file, based on the +configuration of a SAML realm in {es}. + +Some SAML Identity Providers will allow you to automatically import a metadata +file when you configure the Elastic Stack as a Service Provider. + +You can optionally select to digitally sign the metadata file in order to +ensure its integrity and authenticity before sharing it with the Identity Provider. +The key used for signing the metadata file need not necessarily be the same as +the keys already used in the saml realm configuration for SAML message signing. + +[float] +=== Parameters + +`--attribute `:: Specifies a SAML attribute that should be +included as a `` element in the metadata. Any attribute +configured in the {es} realm is automatically included and does not need to be +specified as a commandline option. + +`--batch`:: Do not prompt for user input. + +`--contacts`:: Specifies that the metadata should include one or more +`` elements. The user will be prompted to enter the details for +each person. + +`-E `:: Configures an {es} setting. + +`-h, --help`:: Returns all of the command parameters. + +`--locale `:: Specifies the locale to use for metadata elements such as +``. Defaults to the JVM's default system locale. + +`--organisation-display-name ` element. +Only valid if `--organisation-name` is also specified. + +`--organisation-name `:: Specifies that an `` element should +be included in the metadata and provides the value for the ``. +If this is specified, then `--organisation-url` must also be specified. + +`--organisation-url `:: Specifies the value of the `` +element. This is required if `--organisation-name` is specified. + +`--out `:: Specifies a path for the output files. +Defaults to `saml-elasticsearch-metadata.xml` + +`--service-name `:: Specifies the value for the `` element in +the metadata. Defaults to `elasticsearch`. + +`--signing-bundle `:: Specifies the path to an existing key pair +(in PKCS#12 format). The private key of that key pair will be used to sign +the metadata file. + +`--signing-cert `:: Specifies the path to an existing certificate (in +PEM format) to be used for signing of the metadata file. You must also specify +the `--signing-key` parameter. This parameter cannot be used with the +`--signing-bundle` parameter. + +`--signing-key `:: Specifies the path to an existing key (in PEM format) +to be used for signing of the metadata file. You must also specify the +`--signing-cert` parameter. This parameter cannot be used with the +`--signing-bundle` parameter. + +`--signing-key-password `:: Specifies the password for the signing key. +It can be used with either the `--signing-key` or the `--signing-bundle` parameters. + +`--realm `:: Specifies the name of the realm for which the metadata +should be generated. This parameter is required if there is more than 1 `saml` +realm in your {es} configuration. + +`-s, --silent`:: Shows minimal output. + +`-v, --verbose`:: Shows verbose output. + +[float] +=== Examples + +The following command generates a default metadata file for the `saml1` realm: + +[source, sh] +-------------------------------------------------- +bin/elasticsearch-saml-metadata --realm saml1 +-------------------------------------------------- + +The file will be written to `saml-elasticsearch-metadata.xml`. +You may be prompted to provide the "friendlyName" value for any attributes that +are used by the realm. + +The following command generates a metadata file for the `saml2` realm, with a +`` of `kibana-finance`, a locale of `en-GB` and includes +`` elements and an `` element: + +[source, sh] +-------------------------------------------------- +bin/elasticsearch-saml-metadata --realm saml2 \ + --service-name kibana-finance \ + --locale en-GB \ + --contacts \ + --organisation-name "Mega Corp. Finance Team" \ + --organisation-url "http://mega.example.com/finance/" +-------------------------------------------------- + diff --git a/x-pack/docs/en/commands/setup-passwords.asciidoc b/x-pack/docs/en/commands/setup-passwords.asciidoc new file mode 100644 index 0000000000000..42f3c824496ce --- /dev/null +++ b/x-pack/docs/en/commands/setup-passwords.asciidoc @@ -0,0 +1,72 @@ +[role="xpack"] +[[setup-passwords]] +== elasticsearch-setup-passwords + +The `elasticsearch-setup-passwords` command sets the passwords for the built-in +`elastic`, `kibana`, `logstash_system`, and `beats_system` users. + +[float] +=== Synopsis + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-setup-passwords auto|interactive +[-b, --batch] [-h, --help] [-E ] +[-s, --silent] [-u, --url ""] [-v, --verbose] +-------------------------------------------------- + +[float] +=== Description + +This command is intended for use only during the initial configuration of +{xpack}. It uses the +{xpack-ref}/setting-up-authentication.html#bootstrap-elastic-passwords[`elastic` bootstrap password] +to run user management API requests. After you set a password for the `elastic` +user, the bootstrap password is no longer active and you cannot use this command. +Instead, you can change passwords by using the *Management > Users* UI in {kib} +or the <>. + +This command uses an HTTP connection to connect to the cluster and run the user +management requests. If your cluster uses TLS/SSL on the HTTP layer, the command +automatically attempts to establish the connection by using the HTTPS protocol. +It configures the connection by using the `xpack.security.http.ssl` settings in +the `elasticsearch.yml` file. If you do not use the default config directory +location, ensure that the *ES_PATH_CONF* environment variable returns the +correct path before you run the `elasticsearch-setup-passwords` command. You can +override settings in your `elasticsearch.yml` file by using the `-E` command +option. For more information about debugging connection failures, see +{xpack-ref}/trb-security-setup.html[`elasticsearch-setup-passwords` command fails due to connection failure]. + +[float] +=== Parameters + +`auto`:: Outputs randomly-generated passwords to the console. + +`-b, --batch`:: If enabled, runs the change password process without prompting the +user. + +`-E `:: Configures a standard {es} or {xpack} setting. + +`-h, --help`:: Shows help information. + +`interactive`:: Prompts you to manually enter passwords. + +`-s, --silent`:: Shows minimal output. + +`-u, --url ""`:: Specifies the URL that the tool uses to submit the user management API +requests. The default value is determined from the settings in your +`elasticsearch.yml` file. If `xpack.security.http.ssl.enabled` is set to `true`, +you must specify an HTTPS URL. + +`-v, --verbose`:: Shows verbose output. + +[float] +=== Examples + +The following example uses the `-u` parameter to tell the tool where to submit +its user management API requests: + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-setup-passwords auto -u "http://localhost:9201" +-------------------------------------------------- diff --git a/x-pack/docs/en/commands/syskeygen.asciidoc b/x-pack/docs/en/commands/syskeygen.asciidoc new file mode 100644 index 0000000000000..8683d801d58f1 --- /dev/null +++ b/x-pack/docs/en/commands/syskeygen.asciidoc @@ -0,0 +1,51 @@ +[role="xpack"] +[[syskeygen]] +== elasticsearch-syskeygen + +The `elasticsearch-syskeygen` command creates a system key file in the +elasticsearch config directory. + +[float] +=== Synopsis + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-syskeygen +[-E ] [-h, --help] +([-s, --silent] | [-v, --verbose]) +-------------------------------------------------- + +[float] +=== Description + +The command generates a `system_key` file, which you can use to symmetrically +encrypt sensitive data. For example, you can use this key to prevent {watcher} +from returning and storing information that contains clear text credentials. See {xpack-ref}/encrypting-data.html[Encrypting sensitive data in {watcher}]. + +IMPORTANT: The system key is a symmetric key, so the same key must be used on +every node in the cluster. + +[float] +=== Parameters + +`-E `:: Configures a setting. For example, if you have a custom +installation of {es}, you can use this parameter to specify the `ES_PATH_CONF` +environment variable. + +`-h, --help`:: Returns all of the command parameters. + +`-s, --silent`:: Shows minimal output. + +`-v, --verbose`:: Shows verbose output. + + +[float] +=== Examples + +The following command generates a `system_key` file in the +default `$ES_HOME/config/x-pack` directory: + +[source, sh] +-------------------------------------------------- +bin/elasticsearch-syskeygen +-------------------------------------------------- diff --git a/x-pack/docs/en/commands/users-command.asciidoc b/x-pack/docs/en/commands/users-command.asciidoc new file mode 100644 index 0000000000000..ab1b89b149b90 --- /dev/null +++ b/x-pack/docs/en/commands/users-command.asciidoc @@ -0,0 +1,138 @@ +[role="xpack"] +[[users-command]] +== Users Command +++++ +users +++++ + +If you use file-based user authentication, the `elasticsearch-users` command +enables you to add and remove users, assign user roles, and manage passwords. + +[float] +=== Synopsis + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-users +([useradd ] [-p ] [-r ]) | +([list] ) | +([passwd ] [-p ]) | +([roles ] [-a ] [-r ]) | +([userdel ]) +-------------------------------------------------- + +[float] +=== Description + +If you use the built-in `file` internal realm, users are defined in local files +on each node in the cluster. + +Usernames and roles must be at least 1 and no more than 1024 characters. They +can contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, punctuation, +and printable symbols in the +https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. +Leading or trailing whitespace is not allowed. + +Passwords must be at least 6 characters long. + +For more information, see {xpack-ref}/file-realm.html[File-based User Authentication]. + +TIP: To ensure that {es} can read the user and role information at startup, run +`elasticsearch-users useradd` as the same user you use to run {es}. Running the +command as root or some other user updates the permissions for the `users` and +`users_roles` files and prevents {es} from accessing them. + +[float] +=== Parameters + +`-a `:: If used with the `roles` parameter, adds a comma-separated list +of roles to a user. + +//`-h, --help`:: Returns all of the command parameters. + +`list`:: List the users that are registered with the `file` realm +on the local node. If you also specify a user name, the command provides +information for that user. + +`-p `:: Specifies the user's password. If you do not specify this +parameter, the command prompts you for the password. ++ +-- +TIP: Omit the `-p` option to keep +plaintext passwords out of the terminal session's command history. + +-- + +`passwd `:: Resets a user's password. You can specify the new +password directly with the `-p` parameter. + +`-r `:: +* If used with the `useradd` parameter, defines a user's roles. This option +accepts a comma-separated list of role names to assign to the user. +* If used with the `roles` parameter, removes a comma-separated list of roles +from a user. + +`roles`:: Manages the roles of a particular user. You can combine adding and +removing roles within the same command to change a user's roles. + +//`-s, --silent`:: Shows minimal output. + +`useradd `:: Adds a user to your local node. + +`userdel `:: Deletes a user from your local node. + +//`-v, --verbose`:: Shows verbose output. + +//[float] +//=== Authorization + +[float] +=== Examples + +The following example adds a new user named `jacknich` to the `file` realm. The +password for this user is `theshining`, and this user is associated with the +`network` and `monitoring` roles. + +[source,shell] +------------------------------------------------------------------- +bin/elasticsearch-users useradd jacknich -p theshining -r network,monitoring +------------------------------------------------------------------- + +The following example lists the users that are registered with the `file` realm +on the local node: + +[source, shell] +---------------------------------- +bin/elasticsearch-users list +rdeniro : admin +alpacino : power_user +jacknich : monitoring,network +---------------------------------- + +Users are in the left-hand column and their corresponding roles are listed in +the right-hand column. + +The following example resets the `jacknich` user's password: + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-users passwd jachnich +-------------------------------------------------- + +Since the `-p` parameter was omitted, the command prompts you to enter and +confirm a password in interactive mode. + +The following example removes the `network` and `monitoring` roles from the +`jacknich` user and adds the `user` role: + +[source,shell] +------------------------------------------------------------ +bin/elasticsearch-users roles jacknich -r network,monitoring -a user +------------------------------------------------------------ + +The following example deletes the `jacknich` user: + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-users userdel jacknich +-------------------------------------------------- diff --git a/x-pack/docs/en/index.asciidoc b/x-pack/docs/en/index.asciidoc new file mode 100644 index 0000000000000..595c78ddcf599 --- /dev/null +++ b/x-pack/docs/en/index.asciidoc @@ -0,0 +1,38 @@ + +include::{es-repo-dir}/index-shared1.asciidoc[] + +:edit_url!: +include::setup/setup-xes.asciidoc[] + +:edit_url: +include::{es-repo-dir}/index-shared2.asciidoc[] + +:edit_url!: +include::release-notes/xpack-breaking.asciidoc[] + +:edit_url: +include::{es-repo-dir}/index-shared3.asciidoc[] + +:edit_url!: +include::sql/index.asciidoc[] + +:edit_url!: +include::monitoring/index.asciidoc[] + +:edit_url!: +include::rollup/index.asciidoc[] + +:edit_url!: +include::rest-api/index.asciidoc[] + +:edit_url!: +include::commands/index.asciidoc[] + +:edit_url: +include::{es-repo-dir}/index-shared4.asciidoc[] + +:edit_url!: +include::release-notes/xpack-xes.asciidoc[] + +:edit_url: +include::{es-repo-dir}/index-shared5.asciidoc[] diff --git a/x-pack/docs/en/ml/aggregations.asciidoc b/x-pack/docs/en/ml/aggregations.asciidoc new file mode 100644 index 0000000000000..cc98a45d11e50 --- /dev/null +++ b/x-pack/docs/en/ml/aggregations.asciidoc @@ -0,0 +1,183 @@ +[[ml-configuring-aggregation]] +=== Aggregating Data For Faster Performance + +By default, {dfeeds} fetch data from {es} using search and scroll requests. +It can be significantly more efficient, however, to aggregate data in {es} +and to configure your jobs to analyze aggregated data. + +One of the benefits of aggregating data this way is that {es} automatically +distributes these calculations across your cluster. You can then feed this +aggregated data into {xpackml} instead of raw results, which +reduces the volume of data that must be considered while detecting anomalies. + +There are some limitations to using aggregations in {dfeeds}, however. +Your aggregation must include a buckets aggregation, which in turn must contain +a date histogram aggregation. This requirement ensures that the aggregated +data is a time series. If you use a terms aggregation and the cardinality of a +term is high, then the aggregation might not be effective and you might want +to just use the default search and scroll behavior. + +When you create or update a job, you can include the names of aggregations, for +example: + +[source,js] +---------------------------------- +PUT _xpack/ml/anomaly_detectors/farequote +{ + "analysis_config": { + "bucket_span": "60m", + "detectors": [{ + "function":"mean", + "field_name":"responsetime", + "by_field_name":"airline" + }], + "summary_count_field_name": "doc_count" + }, + "data_description": { + "time_field":"time" + } +} +---------------------------------- + +In this example, the `airline`, `responsetime`, and `time` fields are +aggregations. + +NOTE: When the `summary_count_field_name` property is set to a non-null value, +the job expects to receive aggregated input. The property must be set to the +name of the field that contains the count of raw data points that have been +aggregated. It applies to all detectors in the job. + +The aggregations are defined in the {dfeed} as follows: + +[source,js] +---------------------------------- +PUT _xpack/ml/datafeeds/datafeed-farequote +{ + "job_id":"farequote", + "indices": ["farequote"], + "types": ["response"], + "aggregations": { + "buckets": { + "date_histogram": { + "field": "time", + "interval": "360s", + "time_zone": "UTC" + }, + "aggregations": { + "time": { + "max": {"field": "time"} + }, + "airline": { + "terms": { + "field": "airline", + "size": 100 + }, + "aggregations": { + "responsetime": { + "avg": { + "field": "responsetime" + } + } + } + } + } + } + } +} +---------------------------------- + + +In this example, the aggregations have names that match the fields that they +operate on. That is to say, the `max` aggregation is named `time` and its +field is also `time`. The same is true for the aggregations with the names +`airline` and `responsetime`. Since you must create the job before you can +create the {dfeed}, synchronizing your aggregation and field names can simplify +these configuration steps. + +IMPORTANT: If you use a `max` aggregation on a time field, the aggregation name +in the {dfeed} must match the name of the time field, as in the previous example. +For all other aggregations, if the aggregation name doesn't match the field name, +there are limitations in the drill-down functionality within the {ml} page in +{kib}. + +When you define an aggregation in a {dfeed}, it must have the following form: + +[source,js] +---------------------------------- +"aggregations" : { + "buckets" : { + "date_histogram" : { + "time_zone": "UTC", ... + }, + "aggregations": { + "": { + "max": { + "field":"" + } + } + [,"": { + "terms":{... + } + [,"aggregations" : { + []+ + } ] + }] + } + } +} +---------------------------------- + +You must specify `buckets` as the aggregation name and `date_histogram` as the +aggregation type. For more information, see +{ref}/search-aggregations-bucket-datehistogram-aggregation.html[Date Histogram Aggregation]. + +NOTE: The `time_zone` parameter in the date histogram aggregation must be set to `UTC`, +which is the default value. + +Each histogram bucket has a key, which is the bucket start time. This key cannot +be used for aggregations in {dfeeds}, however, because they need to know the +time of the latest record within a bucket. Otherwise, when you restart a {dfeed}, +it continues from the start time of the histogram bucket and possibly fetches +the same data twice. The max aggregation for the time field is therefore +necessary to provide the time of the latest record within a bucket. + +You can optionally specify a terms aggregation, which creates buckets for +different values of a field. + +IMPORTANT: If you use a terms aggregation, by default it returns buckets for +the top ten terms. Thus if the cardinality of the term is greater than 10, not +all terms are analyzed. + +You can change this behavior by setting the `size` parameter. To +determine the cardinality of your data, you can run searches such as: + +[source,js] +-------------------------------------------------- +GET .../_search { + "aggs": { + "service_cardinality": { + "cardinality": { + "field": "service" + } + } + } +} +-------------------------------------------------- + +By default, {es} limits the maximum number of terms returned to 10000. For high +cardinality fields, the query might not run. It might return errors related to +circuit breaking exceptions that indicate that the data is too large. In such +cases, do not use aggregations in your {dfeed}. For more +information, see {ref}/search-aggregations-bucket-terms-aggregation.html[Terms Aggregation]. + +You can also optionally specify multiple sub-aggregations. +The sub-aggregations are aggregated for the buckets that were created by their +parent aggregation. For more information, see +{ref}/search-aggregations.html[Aggregations]. + +TIP: If your detectors use metric or sum analytical functions, set the +`interval` of the date histogram aggregation to a tenth of the `bucket_span` +that was defined in the job. This suggestion creates finer, more granular time +buckets, which are ideal for this type of analysis. If your detectors use count +or rare functions, set `interval` to the same value as `bucket_span`. For more +information about analytical functions, see <>. diff --git a/x-pack/docs/en/ml/analyzing.asciidoc b/x-pack/docs/en/ml/analyzing.asciidoc new file mode 100644 index 0000000000000..d8b6640f2c8f7 --- /dev/null +++ b/x-pack/docs/en/ml/analyzing.asciidoc @@ -0,0 +1,29 @@ +[float] +[[ml-analyzing]] +=== Analyzing the Past and Present + +The {xpackml} features automate the analysis of time-series data by creating +accurate baselines of normal behavior in the data and identifying anomalous +patterns in that data. You can submit your data for analysis in batches or +continuously in real-time {dfeeds}. + +Using proprietary {ml} algorithms, the following circumstances are detected, +scored, and linked with statistically significant influencers in the data: + +* Anomalies related to temporal deviations in values, counts, or frequencies +* Statistical rarity +* Unusual behaviors for a member of a population + +Automated periodicity detection and quick adaptation to changing data ensure +that you don’t need to specify algorithms, models, or other data science-related +configurations in order to get the benefits of {ml}. + +You can view the {ml} results in {kib} where, for example, charts illustrate the +actual data values, the bounds for the expected values, and the anomalies that +occur outside these bounds. + +[role="screenshot"] +image::images/ml-gs-job-analysis.jpg["Example screenshot from the Machine Learning Single Metric Viewer in Kibana"] + +For a more detailed walk-through of {xpackml} features, see +<>. diff --git a/x-pack/docs/en/ml/api-quickref.asciidoc b/x-pack/docs/en/ml/api-quickref.asciidoc new file mode 100644 index 0000000000000..9602379c37416 --- /dev/null +++ b/x-pack/docs/en/ml/api-quickref.asciidoc @@ -0,0 +1,91 @@ +[[ml-api-quickref]] +== API Quick Reference + +All {ml} endpoints have the following base: + +[source,js] +---- +/_xpack/ml/ +---- + +The main {ml} resources can be accessed with a variety of endpoints: + +* <>: Create and manage {ml} jobs +* <>: Select data from {es} to be analyzed +* <>: Access the results of a {ml} job +* <>: Manage model snapshots +//* <>: Validate subsections of job configurations + +[float] +[[ml-api-jobs]] +=== /anomaly_detectors/ + +* {ref}/ml-put-job.html[PUT /anomaly_detectors/+++]: Create a job +* {ref}/ml-open-job.html[POST /anomaly_detectors//_open]: Open a job +* {ref}/ml-post-data.html[POST /anomaly_detectors//_data]: Send data to a job +* {ref}/ml-get-job.html[GET /anomaly_detectors]: List jobs +* {ref}/ml-get-job.html[GET /anomaly_detectors/+++]: Get job details +* {ref}/ml-get-job-stats.html[GET /anomaly_detectors//_stats]: Get job statistics +* {ref}/ml-update-job.html[POST /anomaly_detectors//_update]: Update certain properties of the job configuration +* {ref}/ml-flush-job.html[POST anomaly_detectors//_flush]: Force a job to analyze buffered data +* {ref}/ml-forecast.html[POST anomaly_detectors//_forecast]: Forecast future job behavior +* {ref}/ml-close-job.html[POST /anomaly_detectors//_close]: Close a job +* {ref}/ml-delete-job.html[DELETE /anomaly_detectors/+++]: Delete a job + +[float] +[[ml-api-calendars]] +=== /calendars/ + +* {ref}/ml-put-calendar.html[PUT /calendars/+++]: Create a calendar +* {ref}/ml-post-calendar-event.html[POST /calendars/+++/events]: Add a scheduled event to a calendar +* {ref}/ml-put-calendar-job.html[PUT /calendars/+++/jobs/+++]: Associate a job with a calendar +* {ref}/ml-get-calendar.html[GET /calendars/+++]: Get calendar details +* {ref}/ml-get-calendar-event.html[GET /calendars/+++/events]: Get scheduled event details +* {ref}/ml-delete-calendar-event.html[DELETE /calendars/+++/events/+++]: Remove a scheduled event from a calendar +* {ref}/ml-delete-calendar-job.html[DELETE /calendars/+++/jobs/+++]: Disassociate a job from a calendar +* {ref}/ml-delete-calendar.html[DELETE /calendars/+++]: Delete a calendar + +[float] +[[ml-api-datafeeds]] +=== /datafeeds/ + +* {ref}/ml-put-datafeed.html[PUT /datafeeds/+++]: Create a {dfeed} +* {ref}/ml-start-datafeed.html[POST /datafeeds//_start]: Start a {dfeed} +* {ref}/ml-get-datafeed.html[GET /datafeeds]: List {dfeeds} +* {ref}/ml-get-datafeed.html[GET /datafeeds/+++]: Get {dfeed} details +* {ref}/ml-get-datafeed-stats.html[GET /datafeeds//_stats]: Get statistical information for {dfeeds} +* {ref}/ml-preview-datafeed.html[GET /datafeeds//_preview]: Get a preview of a {dfeed} +* {ref}/ml-update-datafeed.html[POST /datafeeds//_update]: Update certain settings for a {dfeed} +* {ref}/ml-stop-datafeed.html[POST /datafeeds//_stop]: Stop a {dfeed} +* {ref}/ml-delete-datafeed.html[DELETE /datafeeds/+++]: Delete {dfeed} + +[float] +[[ml-api-results]] +=== /results/ + +* {ref}/ml-get-bucket.html[GET /results/buckets]: List the buckets in the results +* {ref}/ml-get-bucket.html[GET /results/buckets/+++]: Get bucket details +* {ref}/ml-get-overall-buckets.html[GET /results/overall_buckets]: Get overall bucket results for multiple jobs +* {ref}/ml-get-category.html[GET /results/categories]: List the categories in the results +* {ref}/ml-get-category.html[GET /results/categories/+++]: Get category details +* {ref}/ml-get-influencer.html[GET /results/influencers]: Get influencer details +* {ref}/ml-get-record.html[GET /results/records]: Get records from the results + +[float] +[[ml-api-snapshots]] +=== /model_snapshots/ + +* {ref}/ml-get-snapshot.html[GET /model_snapshots]: List model snapshots +* {ref}/ml-get-snapshot.html[GET /model_snapshots/+++]: Get model snapshot details +* {ref}/ml-revert-snapshot.html[POST /model_snapshots//_revert]: Revert a model snapshot +* {ref}/ml-update-snapshot.html[POST /model_snapshots//_update]: Update certain settings for a model snapshot +* {ref}/ml-delete-snapshot.html[DELETE /model_snapshots/+++]: Delete a model snapshot + +//// +[float] +[[ml-api-validate]] +=== /validate/ + +* {ref}/ml-valid-detector.html[POST /anomaly_detectors/_validate/detector]: Validate a detector +* {ref}/ml-valid-job.html[POST /anomaly_detectors/_validate]: Validate a job +//// diff --git a/x-pack/docs/en/ml/architecture.asciidoc b/x-pack/docs/en/ml/architecture.asciidoc new file mode 100644 index 0000000000000..d16a8301da107 --- /dev/null +++ b/x-pack/docs/en/ml/architecture.asciidoc @@ -0,0 +1,9 @@ +[float] +[[ml-nodes]] +=== Machine learning nodes + +A {ml} node is a node that has `xpack.ml.enabled` and `node.ml` set to `true`, +which is the default behavior. If you set `node.ml` to `false`, the node can +service API requests but it cannot run jobs. If you want to use {xpackml} +features, there must be at least one {ml} node in your cluster. For more +information about this setting, see <>. diff --git a/x-pack/docs/en/ml/buckets.asciidoc b/x-pack/docs/en/ml/buckets.asciidoc new file mode 100644 index 0000000000000..89d7ea8cdeaff --- /dev/null +++ b/x-pack/docs/en/ml/buckets.asciidoc @@ -0,0 +1,26 @@ +[[ml-buckets]] +=== Buckets +++++ +Buckets +++++ + +The {xpackml} features use the concept of a _bucket_ to divide the time series +into batches for processing. + +The _bucket span_ is part of the configuration information for a job. It defines +the time interval that is used to summarize and model the data. This is +typically between 5 minutes to 1 hour and it depends on your data characteristics. +When you set the bucket span, take into account the granularity at which you +want to analyze, the frequency of the input data, the typical duration of the +anomalies, and the frequency at which alerting is required. + +When you view your {ml} results, each bucket has an anomaly score. This score is +a statistically aggregated and normalized view of the combined anomalousness of +all the record results in the bucket. If you have more than one job, you can +also obtain overall bucket results, which combine and correlate anomalies from +multiple jobs into an overall score. When you view the results for jobs groups +in {kib}, it provides the overall bucket scores. + +For more information, see +{ref}/ml-results-resource.html[Results Resources] and +{ref}/ml-get-overall-buckets.html[Get Overall Buckets API]. diff --git a/x-pack/docs/en/ml/calendars.asciidoc b/x-pack/docs/en/ml/calendars.asciidoc new file mode 100644 index 0000000000000..117ed5cb42cd4 --- /dev/null +++ b/x-pack/docs/en/ml/calendars.asciidoc @@ -0,0 +1,40 @@ +[[ml-calendars]] +=== Calendars and Scheduled Events + +Sometimes there are periods when you expect unusual activity to take place, +such as bank holidays, "Black Friday", or planned system outages. If you +identify these events in advance, no anomalies are generated during that period. +The {ml} model is not ill-affected and you do not receive spurious results. + +You can create calendars and scheduled events in the **Settings** pane on the +**Machine Learning** page in {kib} or by using {ref}/ml-apis.html[{ml} APIs]. + +A scheduled event must have a start time, end time, and description. In general, +scheduled events are short in duration (typically lasting from a few hours to a +day) and occur infrequently. If you have regularly occurring events, such as +weekly maintenance periods, you do not need to create scheduled events for these +circumstances; they are already handled by the {ml} analytics. + +You can identify zero or more scheduled events in a calendar. Jobs can then +subscribe to calendars and the {ml} analytics handle all subsequent scheduled +events appropriately. + +If you want to add multiple scheduled events at once, you can import an +iCalendar (`.ics`) file in {kib} or a JSON file in the +{ref}/ml-post-calendar-event.html[add events to calendar API]. + +[NOTE] +-- + +* You must identify scheduled events before your job analyzes the data for that +time period. Machine learning results are not updated retroactively. +* If your iCalendar file contains recurring events, only the first occurrence is +imported. +* Bucket results are generated during scheduled events but they have an +anomaly score of zero. For more information about bucket results, see +{ref}/ml-results-resource.html[Results Resources]. +* If you use long or frequent scheduled events, it might take longer for the +{ml} analytics to learn to model your data and some anomalous behavior might be +missed. + +-- diff --git a/x-pack/docs/en/ml/categories.asciidoc b/x-pack/docs/en/ml/categories.asciidoc new file mode 100644 index 0000000000000..bb217e2e18654 --- /dev/null +++ b/x-pack/docs/en/ml/categories.asciidoc @@ -0,0 +1,228 @@ +[[ml-configuring-categories]] +=== Categorizing log messages + +Application log events are often unstructured and contain variable data. For +example: +//Obtained from it_ops_new_app_logs.json +[source,js] +---------------------------------- +{"time":1454516381000,"message":"org.jdbi.v2.exceptions.UnableToExecuteStatementException: com.mysql.jdbc.exceptions.MySQLTimeoutException: Statement cancelled due to timeout or client request [statement:\"SELECT id, customer_id, name, force_disabled, enabled FROM customers\"]","type":"logs"} +---------------------------------- +//NOTCONSOLE + +You can use {ml} to observe the static parts of the message, cluster similar +messages together, and classify them into message categories. + +The {ml} model learns what volume and pattern is normal for each category over +time. You can then detect anomalies and surface rare events or unusual types of +messages by using count or rare functions. For example: + +//Obtained from it_ops_new_app_logs.sh +[source,js] +---------------------------------- +PUT _xpack/ml/anomaly_detectors/it_ops_new_logs +{ + "description" : "IT Ops Application Logs", + "analysis_config" : { + "categorization_field_name": "message", <1> + "bucket_span":"30m", + "detectors" :[{ + "function":"count", + "by_field_name": "mlcategory", <2> + "detector_description": "Unusual message counts" + }], + "categorization_filters":[ "\\[statement:.*\\]"] + }, + "analysis_limits":{ + "categorization_examples_limit": 5 + }, + "data_description" : { + "time_field":"time", + "time_format": "epoch_ms" + } +} +---------------------------------- +//CONSOLE +<1> The `categorization_field_name` property indicates which field will be +categorized. +<2> The resulting categories are used in a detector by setting `by_field_name`, +`over_field_name`, or `partition_field_name` to the keyword `mlcategory`. If you +do not specify this keyword in one of those properties, the API request fails. + +The optional `categorization_examples_limit` property specifies the +maximum number of examples that are stored in memory and in the results data +store for each category. The default value is `4`. Note that this setting does +not affect the categorization; it just affects the list of visible examples. If +you increase this value, more examples are available, but you must have more +storage available. If you set this value to `0`, no examples are stored. + +The optional `categorization_filters` property can contain an array of regular +expressions. If a categorization field value matches the regular expression, the +portion of the field that is matched is not taken into consideration when +defining categories. The categorization filters are applied in the order they +are listed in the job configuration, which allows you to disregard multiple +sections of the categorization field value. In this example, we have decided that +we do not want the detailed SQL to be considered in the message categorization. +This particular categorization filter removes the SQL statement from the categorization +algorithm. + +If your data is stored in {es}, you can create an advanced job with these same +properties: + +[role="screenshot"] +image::images/ml-category-advanced.jpg["Advanced job configuration options related to categorization"] + +NOTE: To add the `categorization_examples_limit` property, you must use the +**Edit JSON** tab and copy the `analysis_limits` object from the API example. + +[float] +[[ml-configuring-analyzer]] +==== Customizing the Categorization Analyzer + +Categorization uses English dictionary words to identify log message categories. +By default, it also uses English tokenization rules. For this reason, if you use +the default categorization analyzer, only English language log messages are +supported, as described in the <>. + +You can, however, change the tokenization rules by customizing the way the +categorization field values are interpreted. For example: + +[source,js] +---------------------------------- +PUT _xpack/ml/anomaly_detectors/it_ops_new_logs2 +{ + "description" : "IT Ops Application Logs", + "analysis_config" : { + "categorization_field_name": "message", + "bucket_span":"30m", + "detectors" :[{ + "function":"count", + "by_field_name": "mlcategory", + "detector_description": "Unusual message counts" + }], + "categorization_analyzer":{ + "char_filter": [ + { "type": "pattern_replace", "pattern": "\\[statement:.*\\]" } <1> + ], + "tokenizer": "ml_classic", <2> + "filter": [ + { "type" : "stop", "stopwords": [ + "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday", + "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun", + "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", + "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", + "GMT", "UTC" + ] } <3> + ] + } + }, + "analysis_limits":{ + "categorization_examples_limit": 5 + }, + "data_description" : { + "time_field":"time", + "time_format": "epoch_ms" + } +} +---------------------------------- +//CONSOLE +<1> The +{ref}/analysis-pattern-replace-charfilter.html[`pattern_replace` character filter] +here achieves exactly the same as the `categorization_filters` in the first +example. +<2> The `ml_classic` tokenizer works like the non-customizable tokenization +that was used for categorization in older versions of machine learning. If you +want the same categorization behavior as older versions, use this property value. +<3> By default, English day or month words are filtered from log messages before +categorization. If your logs are in a different language and contain +dates, you might get better results by filtering the day or month words in your +language. + +The optional `categorization_analyzer` property allows even greater customization +of how categorization interprets the categorization field value. It can refer to +a built-in {es} analyzer or a combination of zero or more character filters, +a tokenizer, and zero or more token filters. + +The `ml_classic` tokenizer and the day and month stopword filter are more or less +equivalent to the following analyzer, which is defined using only built-in {es} +{ref}/analysis-tokenizers.html[tokenizers] and +{ref}/analysis-tokenfilters.html[token filters]: + +[source,js] +---------------------------------- +PUT _xpack/ml/anomaly_detectors/it_ops_new_logs3 +{ + "description" : "IT Ops Application Logs", + "analysis_config" : { + "categorization_field_name": "message", + "bucket_span":"30m", + "detectors" :[{ + "function":"count", + "by_field_name": "mlcategory", + "detector_description": "Unusual message counts" + }], + "categorization_analyzer":{ + "tokenizer": { + "type" : "simple_pattern_split", + "pattern" : "[^-0-9A-Za-z_.]+" <1> + }, + "filter": [ + { "type" : "pattern_replace", "pattern": "^[0-9].*" }, <2> + { "type" : "pattern_replace", "pattern": "^[-0-9A-Fa-f.]+$" }, <3> + { "type" : "pattern_replace", "pattern": "^[^0-9A-Za-z]+" }, <4> + { "type" : "pattern_replace", "pattern": "[^0-9A-Za-z]+$" }, <5> + { "type" : "stop", "stopwords": [ + "", + "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday", + "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun", + "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", + "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", + "GMT", "UTC" + ] } + ] + } + }, + "analysis_limits":{ + "categorization_examples_limit": 5 + }, + "data_description" : { + "time_field":"time", + "time_format": "epoch_ms" + } +} +---------------------------------- +//CONSOLE +<1> Tokens basically consist of hyphens, digits, letters, underscores and dots. +<2> By default, categorization ignores tokens that begin with a digit. +<3> By default, categorization also ignores tokens that are hexadecimal numbers. +<4> Underscores, hyphens, and dots are removed from the beginning of tokens. +<5> Underscores, hyphens, and dots are also removed from the end of tokens. + +The key difference between the default `categorization_analyzer` and this example +analyzer is that using the `ml_classic` tokenizer is several times faster. The +difference in behavior is that this custom analyzer does not include accented +letters in tokens whereas the `ml_classic` tokenizer does, although that could +be fixed by using more complex regular expressions. + +For more information about the `categorization_analyzer` property, see +{ref}/ml-job-resource.html#ml-categorizationanalyzer[Categorization Analyzer]. + +NOTE: To add the `categorization_analyzer` property in {kib}, you must use the +**Edit JSON** tab and copy the `categorization_analyzer` object from one of the +API examples above. + +[float] +[[ml-viewing-categories]] +==== Viewing Categorization Results + +After you open the job and start the {dfeed} or supply data to the job, you can +view the categorization results in {kib}. For example: + +[role="screenshot"] +image::images/ml-category-anomalies.jpg["Categorization example in the Anomaly Explorer"] + +For this type of job, the **Anomaly Explorer** contains extra information for +each anomaly: the name of the category (for example, `mlcategory 11`) and +examples of the messages in that category. In this case, you can use these +details to investigate occurrences of unusually high message counts for specific +message categories. diff --git a/x-pack/docs/en/ml/configuring.asciidoc b/x-pack/docs/en/ml/configuring.asciidoc new file mode 100644 index 0000000000000..9e7b787dcea60 --- /dev/null +++ b/x-pack/docs/en/ml/configuring.asciidoc @@ -0,0 +1,41 @@ +[[ml-configuring]] +== Configuring Machine Learning + +If you want to use {xpackml} features, there must be at least one {ml} node in +your cluster and all master-eligible nodes must have {ml} enabled. By default, +all nodes are {ml} nodes. For more information about these settings, see +<>. + +To use the {xpackml} features to analyze your data, you must create a job and +send your data to that job. + +* If your data is stored in {es}: + +** You can create a {dfeed}, which retrieves data from {es} for analysis. +** You can use {kib} to expedite the creation of jobs and {dfeeds}. + +* If your data is not stored in {es}, you can +{ref}/ml-post-data.html[POST data] from any source directly to an API. + +The results of {ml} analysis are stored in {es} and you can use {kib} to help +you visualize and explore the results. + +For a tutorial that walks you through these configuration steps, +see <>. + +Though it is quite simple to analyze your data and provide quick {ml} results, +gaining deep insights might require some additional planning and configuration. +The scenarios in this section describe some best practices for generating useful +{ml} results and insights from your data. + +* <> +* <> +* <> +* <> +* <> + +include::customurl.asciidoc[] +include::aggregations.asciidoc[] +include::categories.asciidoc[] +include::populations.asciidoc[] +include::transforms.asciidoc[] diff --git a/x-pack/docs/en/ml/customurl.asciidoc b/x-pack/docs/en/ml/customurl.asciidoc new file mode 100644 index 0000000000000..d0b7a55763180 --- /dev/null +++ b/x-pack/docs/en/ml/customurl.asciidoc @@ -0,0 +1,104 @@ +[[ml-configuring-url]] +=== Adding Custom URLs To Machine Learning Results + +When you create an advanced job or edit any job in {kib}, you can optionally +attach one or more custom URLs. You can also specify these custom settings when +you create or update jobs by using the {ml} APIs. + +The custom URLs provide links from the anomalies table in the Anomaly Explorer +or Single Metric Viewer window in {kib} to custom dashboards or external +websites. For example, you can define a custom URL that provides a way for users +to drill down to the source data from the results set. + +For each custom URL, you must supply the URL and a label, which is the link text +that appears in the anomalies table. + +[role="screenshot"] +image::images/ml-customurl.jpg["Links in the Anomaly Explorer anoamilies table"] + +[float] +==== String Substitution in Custom URLs + +You can use dollar sign ($) delimited tokens in a custom URL. These tokens are +substituted for the values of the corresponding fields in the anomaly records. +For example, for a configured URL of +`http://my.datastore.com/dashboards?user=$user_name$`, the value of the +`user_name` field in the anomaly record is substituted into the `$user_name$` +token when you click the link in the anomalies table. + +NOTE: Not all fields in your source data exist in the anomaly results. If a +field is specified in the detector as the `field_name`, `by_field_name`, +`over_field_name`, or `partition_field_name`, for example, it can be used in a +custom URL. A field that is only used in the `categorization_field_name` +property, however, does not exist in the anomaly results. + +The following keywords can also be used as tokens for string substitution in a +custom URL: `$earliest$`; `$latest$`; `$mlcategoryregex$`; `$mlcategoryterms$`. + +The `$earliest$` and `$latest$` tokens pass the beginning and end of the time +span of the selected anomaly to the target page. The tokens are substituted with +date-time strings in ISO-8601 format. If you selected an interval of 1 hour for +the anomalies table, these tokens use one hour on either side of the anomaly +time as the earliest and latest times. The same is also true if the interval is +set to `Auto` and a one hour interval was chosen. + +The `$mlcategoryregex$` and `$mlcategoryterms$` tokens pertain to jobs where you +are categorizing field values. For more information about this type of analysis, +see <>. + +The `$mlcategoryregex$` token passes the regular expression value of the +category of the selected anomaly, as identified by the value of the `mlcategory` +field of the anomaly record. + +The `$mlcategoryterms$` token likewise passes the terms value of the category of +the selected anomaly. Each categorization term is prefixed by a plus (+) +character, so that when the token is passed to a {kib} dashboard, the resulting +dashboard query seeks a match for all of the terms of the category. + +For example, the following API updates a `log_categories` job to add a custom +URL that uses `$earliest$`, `$latest$`, and `$mlcategoryterms$` tokens: + +[source,js] +---------------------------------- +POST _xpack/ml/anomaly_detectors/log_categories/_update +{ + "custom_settings": { + "custom_urls": [ + { + "url_name": "test-link1", + "url_value": "http://localhost:5601/app/kibana#/discover?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:'$earliest$',mode:quick,to:'$latest$'))&_a=(columns:!(_source),index:AV3OWB68ue3Ht69t29aw,interval:auto,query:(query_string:(analyze_wildcard:!t,query:'$mlcategoryterms$')),sort:!(time,desc))" + } + ] + } +} +---------------------------------- + +When you click this custom URL in the anomalies table in {kib}, it opens up the +Discover page and displays source data for the period when the anomaly occurred. +Since this job was categorizing log messages, some `$mlcategoryterms$` token +values that were passed to the target page for an example anomaly are as follows: + +[role="screenshot"] +image::images/ml-categoryterms.jpg["A query for category terms on the Discover page in {kib}"] + +[TIP] +=============================== +* The custom URL links in the anomaly tables use pop-ups. You must configure +your web browser so that it does not block pop-up windows or create an exception +for your {kib} URL. +* When creating a link to a {kib} dashboard, the URLs for dashboards can be very +long. Be careful of typos, end of line characters, and URL encoding. Also ensure +you use the appropriate index ID for the target {kib} index pattern. +* If you use an influencer name for string substitution, keep in mind that it +might not always be available in the analysis results and the URL is invalid in +those cases. There is not always a statistically significant influencer for each +anomaly. +* The dates substituted for `$earliest$` and `$latest$` tokens are in +ISO-8601 format and the target system must understand this format. +* If the job performs an analysis against nested JSON fields, the tokens for +string substitution can refer to these fields using dot notation. For example, +`$cpu.total$`. +* {es} source data mappings might make it difficult for the query string to work. +Test the custom URL before saving the job configuration to check that it works +as expected, particularly when using string substitution. +=============================== diff --git a/x-pack/docs/en/ml/datafeeds.asciidoc b/x-pack/docs/en/ml/datafeeds.asciidoc new file mode 100644 index 0000000000000..885cb2a83f6f9 --- /dev/null +++ b/x-pack/docs/en/ml/datafeeds.asciidoc @@ -0,0 +1,40 @@ +[[ml-dfeeds]] +=== {dfeeds-cap} + +Machine learning jobs can analyze data that is stored in {es} or data that is +sent from some other source via an API. _{dfeeds-cap}_ retrieve data from {es} +for analysis, which is the simpler and more common scenario. + +If you create jobs in {kib}, you must use {dfeeds}. When you create a job, you +select an index pattern and {kib} configures the {dfeed} for you under the +covers. If you use {ml} APIs instead, you can create a {dfeed} by using the +{ref}/ml-put-datafeed.html[create {dfeeds} API] after you create a job. You can +associate only one {dfeed} with each job. + +For a description of all the {dfeed} properties, see +{ref}/ml-datafeed-resource.html[Datafeed Resources]. + +To start retrieving data from {es}, you must start the {dfeed}. When you start +it, you can optionally specify start and end times. If you do not specify an +end time, the {dfeed} runs continuously. You can start and stop {dfeeds} in +{kib} or use the {ref}/ml-start-datafeed.html[start {dfeeds}] and +{ref}/ml-stop-datafeed.html[stop {dfeeds}] APIs. A {dfeed} can be started and +stopped multiple times throughout its lifecycle. + +[IMPORTANT] +-- +When {security} is enabled, a {dfeed} stores the roles of the user who created +or updated the {dfeed} at that time. This means that if those roles are updated, +the {dfeed} subsequently runs with the new permissions that are associated with +the roles. However, if the user’s roles are adjusted after creating or updating +the {dfeed}, the {dfeed} continues to run with the permissions that were +associated with the original roles. + +One way to update the roles that are stored within the {dfeed} without changing +any other settings is to submit an empty JSON document ({}) to the +{ref}/ml-update-datafeed.html[update {dfeed} API]. +-- + +If the data that you want to analyze is not stored in {es}, you cannot use +{dfeeds}. You can however send batches of data directly to the job by using the +{ref}/ml-post-data.html[post data to jobs API]. diff --git a/x-pack/docs/en/ml/forecasting.asciidoc b/x-pack/docs/en/ml/forecasting.asciidoc new file mode 100644 index 0000000000000..95693a1677f0a --- /dev/null +++ b/x-pack/docs/en/ml/forecasting.asciidoc @@ -0,0 +1,69 @@ +[float] +[[ml-forecasting]] +=== Forecasting the Future + +After the {xpackml} features create baselines of normal behavior for your data, +you can use that information to extrapolate future behavior. + +You can use a forecast to estimate a time series value at a specific future date. +For example, you might want to determine how many users you can expect to visit +your website next Sunday at 0900. + +You can also use it to estimate the probability of a time series value occurring +at a future date. For example, you might want to determine how likely it is that +your disk utilization will reach 100% before the end of next week. + +Each forecast has a unique ID, which you can use to distinguish between forecasts +that you created at different times. You can create a forecast by using the +{ref}/ml-forecast.html[Forecast Jobs API] or by using {kib}. For example: + + +[role="screenshot"] +image::images/ml-gs-job-forecast.jpg["Example screenshot from the Machine Learning Single Metric Viewer in Kibana"] + +//For a more detailed walk-through of {xpackml} features, see <>. + +The yellow line in the chart represents the predicted data values. The +shaded yellow area represents the bounds for the predicted values, which also +gives an indication of the confidence of the predictions. + +When you create a forecast, you specify its _duration_, which indicates how far +the forecast extends beyond the last record that was processed. By default, the +duration is 1 day. Typically the farther into the future that you forecast, the +lower the confidence levels become (that is to say, the bounds increase). +Eventually if the confidence levels are too low, the forecast stops. + +You can also optionally specify when the forecast expires. By default, it +expires in 14 days and is deleted automatically thereafter. You can specify a +different expiration period by using the `expires_in` parameter in the +{ref}/ml-forecast.html[Forecast Jobs API]. + +//Add examples of forecast_request_stats and forecast documents? + +There are some limitations that affect your ability to create a forecast: + +* You can generate only three forecasts concurrently. There is no limit to the +number of forecasts that you retain. Existing forecasts are not overwritten when +you create new forecasts. Rather, they are automatically deleted when they expire. +* If you use an `over_field_name` property in your job (that is to say, it's a +_population job_), you cannot create a forecast. +* If you use any of the following analytical functions in your job, you +cannot create a forecast: +** `lat_long` +** `rare` and `freq_rare` +** `time_of_day` and `time_of_week` ++ +-- +For more information about any of these functions, see <>. +-- +* Forecasts run concurrently with real-time {ml} analysis. That is to say, {ml} +analysis does not stop while forecasts are generated. Forecasts can have an +impact on {ml} jobs, however, especially in terms of memory usage. For this +reason, forecasts run only if the model memory status is acceptable and the +snapshot models for the forecast do not require more than 20 MB. If these memory +limits are reached, consider splitting the job into multiple smaller jobs and +creating forecasts for these. +* The job must be open when you create a forecast. Otherwise, an error occurs. +* If there is insufficient data to generate any meaningful predictions, an +error occurs. In general, forecasts that are created early in the learning phase +of the data analysis are less accurate. diff --git a/x-pack/docs/en/ml/functions.asciidoc b/x-pack/docs/en/ml/functions.asciidoc new file mode 100644 index 0000000000000..a59b289266760 --- /dev/null +++ b/x-pack/docs/en/ml/functions.asciidoc @@ -0,0 +1,79 @@ +[[ml-functions]] +== Function Reference + +The {xpackml} features include analysis functions that provide a wide variety of +flexible ways to analyze data for anomalies. + +When you create jobs, you specify one or more detectors, which define the type of +analysis that needs to be done. If you are creating your job by using {ml} APIs, +you specify the functions in +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. +If you are creating your job in {kib}, you specify the functions differently +depending on whether you are creating single metric, multi-metric, or advanced +jobs. For a demonstration of creating jobs in {kib}, see <>. + +Most functions detect anomalies in both low and high values. In statistical +terminology, they apply a two-sided test. Some functions offer low and high +variations (for example, `count`, `low_count`, and `high_count`). These variations +apply one-sided tests, detecting anomalies only when the values are low or +high, depending one which alternative is used. + +//For some functions, you can optionally specify a field name in the +//`by_field_name` property. The analysis then considers whether there is an +//anomaly for one of more specific values of that field. In {kib}, use the +//**Key Fields** field in multi-metric jobs or the **by_field_name** field in +//advanced jobs. +//// +TODO: Per Sophie, "This is incorrect... Split Data refers to a partition_field_name. Over fields can only be added in Adv Config... + +Can you please remove the explanations for by/over/partition fields from the documentation for analytical functions. It's a complex topic and will be easier to review in a separate exercise." +//// + +//For some functions, you can also optionally specify a field name in the +//`over_field_name` property. This property shifts the analysis to be population- +//or peer-based and uses the field to split the data. In {kib}, use the +//**Split Data** field in multi-metric jobs or the **over_field_name** field in +//advanced jobs. + +//You can specify a `partition_field_name` with any function. The analysis is then +//segmented with completely independent baselines for each value of that field. +//In {kib}, use the **partition_field_name** field in advanced jobs. + +You can specify a `summary_count_field_name` with any function except `metric`. +When you use `summary_count_field_name`, the {ml} features expect the input +data to be pre-aggregated. The value of the `summary_count_field_name` field +must contain the count of raw events that were summarized. In {kib}, use the +**summary_count_field_name** in advanced jobs. Analyzing aggregated input data +provides a significant boost in performance. For more information, see +<>. + +If your data is sparse, there may be gaps in the data which means you might have +empty buckets. You might want to treat these as anomalies or you might want these +gaps to be ignored. Your decision depends on your use case and what is important +to you. It also depends on which functions you use. The `sum` and `count` +functions are strongly affected by empty buckets. For this reason, there are +`non_null_sum` and `non_zero_count` functions, which are tolerant to sparse data. +These functions effectively ignore empty buckets. + +//// +Some functions can benefit from overlapping buckets. This improves the overall +accuracy of the results but at the cost of a 2 bucket delay in seeing the results. + +The table below provides a high-level summary of the analytical functions provided by the API. Each of the functions is described in detail over the following pages. Note the examples given in these pages use single Detector Configuration objects. +//// + +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +include::functions/count.asciidoc[] +include::functions/geo.asciidoc[] +include::functions/info.asciidoc[] +include::functions/metric.asciidoc[] +include::functions/rare.asciidoc[] +include::functions/sum.asciidoc[] +include::functions/time.asciidoc[] diff --git a/x-pack/docs/en/ml/functions/count.asciidoc b/x-pack/docs/en/ml/functions/count.asciidoc new file mode 100644 index 0000000000000..4b70f80933dca --- /dev/null +++ b/x-pack/docs/en/ml/functions/count.asciidoc @@ -0,0 +1,214 @@ +[[ml-count-functions]] +=== Count Functions + +Count functions detect anomalies when the number of events in a bucket is +anomalous. + +Use `non_zero_count` functions if your data is sparse and you want to ignore +cases where the bucket count is zero. + +Use `distinct_count` functions to determine when the number of distinct values +in one field is unusual, as opposed to the total count. + +Use high-sided functions if you want to monitor unusually high event rates. +Use low-sided functions if you want to look at drops in event rate. + +The {xpackml} features include the following count functions: + +* xref:ml-count[`count`, `high_count`, `low_count`] +* xref:ml-nonzero-count[`non_zero_count`, `high_non_zero_count`, `low_non_zero_count`] +* xref:ml-distinct-count[`distinct_count`, `high_distinct_count`, `low_distinct_count`] + +[float] +[[ml-count]] +===== Count, High_count, Low_count + +The `count` function detects anomalies when the number of events in a bucket is +anomalous. + +The `high_count` function detects anomalies when the count of events in a +bucket are unusually high. + +The `low_count` function detects anomalies when the count of events in a +bucket are unusually low. + +These functions support the following properties: + +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, +see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 1: Analyzing events with the count function +[source,js] +-------------------------------------------------- +{ "function" : "count" } +-------------------------------------------------- + +This example is probably the simplest possible analysis. It identifies +time buckets during which the overall count of events is higher or lower than +usual. + +When you use this function in a detector in your job, it models the event rate +and detects when the event rate is unusual compared to its past behavior. + +.Example 2: Analyzing errors with the high_count function +[source,js] +-------------------------------------------------- +{ + "function" : "high_count", + "by_field_name" : "error_code", + "over_field_name": "user" +} +-------------------------------------------------- + +If you use this `high_count` function in a detector in your job, it +models the event rate for each error code. It detects users that generate an +unusually high count of error codes compared to other users. + + +.Example 3: Analyzing status codes with the low_count function +[source,js] +-------------------------------------------------- +{ + "function" : "low_count", + "by_field_name" : "status_code" +} +-------------------------------------------------- + +In this example, the function detects when the count of events for a +status code is lower than usual. + +When you use this function in a detector in your job, it models the event rate +for each status code and detects when a status code has an unusually low count +compared to its past behavior. + +.Example 4: Analyzing aggregated data with the count function +[source,js] +-------------------------------------------------- +{ + "summary_count_field_name" : "events_per_min", + "detectors" [ + { "function" : "count" } + ] +} +-------------------------------------------------- + +If you are analyzing an aggregated `events_per_min` field, do not use a sum +function (for example, `sum(events_per_min)`). Instead, use the count function +and the `summary_count_field_name` property. +//TO-DO: For more information, see <>. + +[float] +[[ml-nonzero-count]] +===== Non_zero_count, High_non_zero_count, Low_non_zero_count + +The `non_zero_count` function detects anomalies when the number of events in a +bucket is anomalous, but it ignores cases where the bucket count is zero. Use +this function if you know your data is sparse or has gaps and the gaps are not +important. + +The `high_non_zero_count` function detects anomalies when the number of events +in a bucket is unusually high and it ignores cases where the bucket count is +zero. + +The `low_non_zero_count` function detects anomalies when the number of events in +a bucket is unusually low and it ignores cases where the bucket count is zero. + +These functions support the following properties: + +* `by_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, +see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +For example, if you have the following number of events per bucket: + +======================================== + +1,22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,43,31,0,0,0,0,0,0,0,0,0,0,0,0,2,1 + +======================================== + +The `non_zero_count` function models only the following data: + +======================================== + +1,22,2,43,31,2,1 + +======================================== + +.Example 5: Analyzing signatures with the high_non_zero_count function +[source,js] +-------------------------------------------------- +{ + "function" : "high_non_zero_count", + "by_field_name" : "signaturename" +} +-------------------------------------------------- + +If you use this `high_non_zero_count` function in a detector in your job, it +models the count of events for the `signaturename` field. It ignores any buckets +where the count is zero and detects when a `signaturename` value has an +unusually high count of events compared to its past behavior. + +NOTE: Population analysis (using an `over_field_name` property value) is not +supported for the `non_zero_count`, `high_non_zero_count`, and +`low_non_zero_count` functions. If you want to do population analysis and your +data is sparse, use the `count` functions, which are optimized for that scenario. + + +[float] +[[ml-distinct-count]] +===== Distinct_count, High_distinct_count, Low_distinct_count + +The `distinct_count` function detects anomalies where the number of distinct +values in one field is unusual. + +The `high_distinct_count` function detects unusually high numbers of distinct +values in one field. + +The `low_distinct_count` function detects unusually low numbers of distinct +values in one field. + +These functions support the following properties: + +* `field_name` (required) +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, +see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 6: Analyzing users with the distinct_count function +[source,js] +-------------------------------------------------- +{ + "function" : "distinct_count", + "field_name" : "user" +} +-------------------------------------------------- + +This `distinct_count` function detects when a system has an unusual number +of logged in users. When you use this function in a detector in your job, it +models the distinct count of users. It also detects when the distinct number of +users is unusual compared to the past. + +.Example 7: Analyzing ports with the high_distinct_count function +[source,js] +-------------------------------------------------- +{ + "function" : "high_distinct_count", + "field_name" : "dst_port", + "over_field_name": "src_ip" +} +-------------------------------------------------- + +This example detects instances of port scanning. When you use this function in a +detector in your job, it models the distinct count of ports. It also detects the +`src_ip` values that connect to an unusually high number of different +`dst_ports` values compared to other `src_ip` values. diff --git a/x-pack/docs/en/ml/functions/geo.asciidoc b/x-pack/docs/en/ml/functions/geo.asciidoc new file mode 100644 index 0000000000000..cc98e95bf2069 --- /dev/null +++ b/x-pack/docs/en/ml/functions/geo.asciidoc @@ -0,0 +1,79 @@ +[[ml-geo-functions]] +=== Geographic Functions + +The geographic functions detect anomalies in the geographic location of the +input data. + +The {xpackml} features include the following geographic function: `lat_long`. + +NOTE: You cannot create forecasts for jobs that contain geographic functions. + +[float] +[[ml-lat-long]] +==== Lat_long + +The `lat_long` function detects anomalies in the geographic location of the +input data. + +This function supports the following properties: + +* `field_name` (required) +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, +see {ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 1: Analyzing transactions with the lat_long function +[source,js] +-------------------------------------------------- +{ + "function" : "lat_long", + "field_name" : "transactionCoordinates", + "by_field_name" : "creditCardNumber" +} +-------------------------------------------------- + +If you use this `lat_long` function in a detector in your job, it +detects anomalies where the geographic location of a credit card transaction is +unusual for a particular customer’s credit card. An anomaly might indicate fraud. + +IMPORTANT: The `field_name` that you supply must be a single string that contains +two comma-separated numbers of the form `latitude,longitude`. The `latitude` and +`longitude` must be in the range -180 to 180 and represent a point on the +surface of the Earth. + +For example, JSON data might contain the following transaction coordinates: + +[source,js] +-------------------------------------------------- +{ + "time": 1460464275, + "transactionCoordinates": "40.7,-74.0", + "creditCardNumber": "1234123412341234" +} +-------------------------------------------------- + +In {es}, location data is likely to be stored in `geo_point` fields. For more +information, see {ref}/geo-point.html[Geo-point datatype]. This data type is not +supported natively in {xpackml} features. You can, however, use Painless scripts +in `script_fields` in your {dfeed} to transform the data into an appropriate +format. For example, the following Painless script transforms +`"coords": {"lat" : 41.44, "lon":90.5}` into `"lat-lon": "41.44,90.5"`: + +[source,js] +-------------------------------------------------- +{ + "script_fields": { + "lat-lon": { + "script": { + "source": "doc['coords'].lat + ',' + doc['coords'].lon", + "lang": "painless" + } + } + } +} +-------------------------------------------------- + +For more information, see <>. diff --git a/x-pack/docs/en/ml/functions/info.asciidoc b/x-pack/docs/en/ml/functions/info.asciidoc new file mode 100644 index 0000000000000..f964d4eb3ec67 --- /dev/null +++ b/x-pack/docs/en/ml/functions/info.asciidoc @@ -0,0 +1,87 @@ +[[ml-info-functions]] +=== Information Content Functions + +The information content functions detect anomalies in the amount of information +that is contained in strings within a bucket. These functions can be used as +a more sophisticated method to identify incidences of data exfiltration or +C2C activity, when analyzing the size in bytes of the data might not be sufficient. + +The {xpackml} features include the following information content functions: + +* `info_content`, `high_info_content`, `low_info_content` + +[float] +[[ml-info-content]] +==== Info_content, High_info_content, Low_info_content + +The `info_content` function detects anomalies in the amount of information that +is contained in strings in a bucket. + +If you want to monitor for unusually high amounts of information, +use `high_info_content`. +If want to look at drops in information content, use `low_info_content`. + +These functions support the following properties: + +* `field_name` (required) +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 1: Analyzing subdomain strings with the info_content function +[source,js] +-------------------------------------------------- +{ + "function" : "info_content", + "field_name" : "subdomain", + "over_field_name" : "highest_registered_domain" +} +-------------------------------------------------- + +If you use this `info_content` function in a detector in your job, it models +information that is present in the `subdomain` string. It detects anomalies +where the information content is unusual compared to the other +`highest_registered_domain` values. An anomaly could indicate an abuse of the +DNS protocol, such as malicious command and control activity. + +NOTE: In this example, both high and low values are considered anomalous. +In many use cases, the `high_info_content` function is often a more appropriate +choice. + +.Example 2: Analyzing query strings with the high_info_content function +[source,js] +-------------------------------------------------- +{ + "function" : "high_info_content", + "field_name" : "query", + "over_field_name" : "src_ip" +} +-------------------------------------------------- + +If you use this `high_info_content` function in a detector in your job, it +models information content that is held in the DNS query string. It detects +`src_ip` values where the information content is unusually high compared to +other `src_ip` values. This example is similar to the example for the +`info_content` function, but it reports anomalies only where the amount of +information content is higher than expected. + +.Example 3: Analyzing message strings with the low_info_content function +[source,js] +-------------------------------------------------- +{ + "function" : "low_info_content", + "field_name" : "message", + "by_field_name" : "logfilename" +} +-------------------------------------------------- + +If you use this `low_info_content` function in a detector in your job, it models +information content that is present in the message string for each +`logfilename`. It detects anomalies where the information content is low +compared to its past behavior. For example, this function detects unusually low +amounts of information in a collection of rolling log files. Low information +might indicate that a process has entered an infinite loop or that logging +features have been disabled. diff --git a/x-pack/docs/en/ml/functions/metric.asciidoc b/x-pack/docs/en/ml/functions/metric.asciidoc new file mode 100644 index 0000000000000..495fc6f333575 --- /dev/null +++ b/x-pack/docs/en/ml/functions/metric.asciidoc @@ -0,0 +1,310 @@ +[[ml-metric-functions]] +=== Metric Functions + +The metric functions include functions such as mean, min and max. These values +are calculated for each bucket. Field values that cannot be converted to +double precision floating point numbers are ignored. + +The {xpackml} features include the following metric functions: + +* <> +* <> +* xref:ml-metric-median[`median`, `high_median`, `low_median`] +* xref:ml-metric-mean[`mean`, `high_mean`, `low_mean`] +* <> +* xref:ml-metric-varp[`varp`, `high_varp`, `low_varp`] + +[float] +[[ml-metric-min]] +==== Min + +The `min` function detects anomalies in the arithmetic minimum of a value. +The minimum value is calculated for each bucket. + +High- and low-sided functions are not applicable. + +This function supports the following properties: + +* `field_name` (required) +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 1: Analyzing minimum transactions with the min function +[source,js] +-------------------------------------------------- +{ + "function" : "min", + "field_name" : "amt", + "by_field_name" : "product" +} +-------------------------------------------------- + +If you use this `min` function in a detector in your job, it detects where the +smallest transaction is lower than previously observed. You can use this +function to detect items for sale at unintentionally low prices due to data +entry mistakes. It models the minimum amount for each product over time. + +[float] +[[ml-metric-max]] +==== Max + +The `max` function detects anomalies in the arithmetic maximum of a value. +The maximum value is calculated for each bucket. + +High- and low-sided functions are not applicable. + +This function supports the following properties: + +* `field_name` (required) +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 2: Analyzing maximum response times with the max function +[source,js] +-------------------------------------------------- +{ + "function" : "max", + "field_name" : "responsetime", + "by_field_name" : "application" +} +-------------------------------------------------- + +If you use this `max` function in a detector in your job, it detects where the +longest `responsetime` is longer than previously observed. You can use this +function to detect applications that have `responsetime` values that are +unusually lengthy. It models the maximum `responsetime` for each application +over time and detects when the longest `responsetime` is unusually long compared +to previous applications. + +.Example 3: Two detectors with max and high_mean functions +[source,js] +-------------------------------------------------- +{ + "function" : "max", + "field_name" : "responsetime", + "by_field_name" : "application" +}, +{ + "function" : "high_mean", + "field_name" : "responsetime", + "by_field_name" : "application" +} +-------------------------------------------------- + +The analysis in the previous example can be performed alongside `high_mean` +functions by application. By combining detectors and using the same influencer +this job can detect both unusually long individual response times and average +response times for each bucket. + +[float] +[[ml-metric-median]] +==== Median, High_median, Low_median + +The `median` function detects anomalies in the statistical median of a value. +The median value is calculated for each bucket. + +If you want to monitor unusually high median values, use the `high_median` +function. + +If you are just interested in unusually low median values, use the `low_median` +function. + +These functions support the following properties: + +* `field_name` (required) +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 4: Analyzing response times with the median function +[source,js] +-------------------------------------------------- +{ + "function" : "median", + "field_name" : "responsetime", + "by_field_name" : "application" +} +-------------------------------------------------- + +If you use this `median` function in a detector in your job, it models the +median `responsetime` for each application over time. It detects when the median +`responsetime` is unusual compared to previous `responsetime` values. + +[float] +[[ml-metric-mean]] +==== Mean, High_mean, Low_mean + +The `mean` function detects anomalies in the arithmetic mean of a value. +The mean value is calculated for each bucket. + +If you want to monitor unusually high average values, use the `high_mean` +function. + +If you are just interested in unusually low average values, use the `low_mean` +function. + +These functions support the following properties: + +* `field_name` (required) +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 5: Analyzing response times with the mean function +[source,js] +-------------------------------------------------- +{ + "function" : "mean", + "field_name" : "responsetime", + "by_field_name" : "application" +} +-------------------------------------------------- + +If you use this `mean` function in a detector in your job, it models the mean +`responsetime` for each application over time. It detects when the mean +`responsetime` is unusual compared to previous `responsetime` values. + +.Example 6: Analyzing response times with the high_mean function +[source,js] +-------------------------------------------------- +{ + "function" : "high_mean", + "field_name" : "responsetime", + "by_field_name" : "application" +} +-------------------------------------------------- + +If you use this `high_mean` function in a detector in your job, it models the +mean `responsetime` for each application over time. It detects when the mean +`responsetime` is unusually high compared to previous `responsetime` values. + +.Example 7: Analyzing response times with the low_mean function +[source,js] +-------------------------------------------------- +{ + "function" : "low_mean", + "field_name" : "responsetime", + "by_field_name" : "application" +} +-------------------------------------------------- + +If you use this `low_mean` function in a detector in your job, it models the +mean `responsetime` for each application over time. It detects when the mean +`responsetime` is unusually low compared to previous `responsetime` values. + +[float] +[[ml-metric-metric]] +==== Metric + +The `metric` function combines `min`, `max`, and `mean` functions. You can use +it as a shorthand for a combined analysis. If you do not specify a function in +a detector, this is the default function. +//TBD: Is that default behavior still true? + +High- and low-sided functions are not applicable. You cannot use this function +when a `summary_count_field_name` is specified. + +This function supports the following properties: + +* `field_name` (required) +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 8: Analyzing response times with the metric function +[source,js] +-------------------------------------------------- +{ + "function" : "metric", + "field_name" : "responsetime", + "by_field_name" : "application" +} +-------------------------------------------------- + +If you use this `metric` function in a detector in your job, it models the +mean, min, and max `responsetime` for each application over time. It detects +when the mean, min, or max `responsetime` is unusual compared to previous +`responsetime` values. + +[float] +[[ml-metric-varp]] +==== Varp, High_varp, Low_varp + +The `varp` function detects anomalies in the variance of a value which is a +measure of the variability and spread in the data. + +If you want to monitor unusually high variance, use the `high_varp` function. + +If you are just interested in unusually low variance, use the `low_varp` function. + +These functions support the following properties: + +* `field_name` (required) +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 9: Analyzing response times with the varp function +[source,js] +-------------------------------------------------- +{ + "function" : "varp", + "field_name" : "responsetime", + "by_field_name" : "application" +} +-------------------------------------------------- + +If you use this `varp` function in a detector in your job, it models the +variance in values of `responsetime` for each application over time. It detects +when the variance in `responsetime` is unusual compared to past application +behavior. + +.Example 10: Analyzing response times with the high_varp function +[source,js] +-------------------------------------------------- +{ + "function" : "high_varp", + "field_name" : "responsetime", + "by_field_name" : "application" +} +-------------------------------------------------- + +If you use this `high_varp` function in a detector in your job, it models the +variance in values of `responsetime` for each application over time. It detects +when the variance in `responsetime` is unusual compared to past application +behavior. + +.Example 11: Analyzing response times with the low_varp function +[source,js] +-------------------------------------------------- +{ + "function" : "low_varp", + "field_name" : "responsetime", + "by_field_name" : "application" +} +-------------------------------------------------- + +If you use this `low_varp` function in a detector in your job, it models the +variance in values of `responsetime` for each application over time. It detects +when the variance in `responsetime` is unusual compared to past application +behavior. diff --git a/x-pack/docs/en/ml/functions/rare.asciidoc b/x-pack/docs/en/ml/functions/rare.asciidoc new file mode 100644 index 0000000000000..2485605557cfa --- /dev/null +++ b/x-pack/docs/en/ml/functions/rare.asciidoc @@ -0,0 +1,128 @@ +[[ml-rare-functions]] +=== Rare Functions + +The rare functions detect values that occur rarely in time or rarely for a +population. + +The `rare` analysis detects anomalies according to the number of distinct rare +values. This differs from `freq_rare`, which detects anomalies according to the +number of times (frequency) rare values occur. + +[NOTE] +==== +* The `rare` and `freq_rare` functions should not be used in conjunction with +`exclude_frequent`. +* You cannot create forecasts for jobs that contain `rare` or `freq_rare` +functions. +* Shorter bucket spans (less than 1 hour, for example) are recommended when +looking for rare events. The functions model whether something happens in a +bucket at least once. With longer bucket spans, it is more likely that +entities will be seen in a bucket and therefore they appear less rare. +Picking the ideal the bucket span depends on the characteristics of the data +with shorter bucket spans typically being measured in minutes, not hours. +* To model rare data, a learning period of at least 20 buckets is required +for typical data. +==== + +The {xpackml} features include the following rare functions: + +* <> +* <> + + +[float] +[[ml-rare]] +==== Rare + +The `rare` function detects values that occur rarely in time or rarely for a +population. It detects anomalies according to the number of distinct rare values. + +This function supports the following properties: + +* `by_field_name` (required) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 1: Analyzing status codes with the rare function +[source,js] +-------------------------------------------------- +{ + "function" : "rare", + "by_field_name" : "status" +} +-------------------------------------------------- + +If you use this `rare` function in a detector in your job, it detects values +that are rare in time. It models status codes that occur over time and detects +when rare status codes occur compared to the past. For example, you can detect +status codes in a web access log that have never (or rarely) occurred before. + +.Example 2: Analyzing status codes in a population with the rare function +[source,js] +-------------------------------------------------- +{ + "function" : "rare", + "by_field_name" : "status", + "over_field_name" : "clientip" +} +-------------------------------------------------- + +If you use this `rare` function in a detector in your job, it detects values +that are rare in a population. It models status code and client IP interactions +that occur. It defines a rare status code as one that occurs for few client IP +values compared to the population. It detects client IP values that experience +one or more distinct rare status codes compared to the population. For example +in a web access log, a `clientip` that experiences the highest number of +different rare status codes compared to the population is regarded as highly +anomalous. This analysis is based on the number of different status code values, +not the count of occurrences. + +NOTE: To define a status code as rare the {xpackml} features look at the number +of distinct status codes that occur, not the number of times the status code +occurs. If a single client IP experiences a single unique status code, this +is rare, even if it occurs for that client IP in every bucket. + +[float] +[[ml-freq-rare]] +==== Freq_rare + +The `freq_rare` function detects values that occur rarely for a population. +It detects anomalies according to the number of times (frequency) that rare +values occur. + +This function supports the following properties: + +* `by_field_name` (required) +* `over_field_name` (required) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 3: Analyzing URI values in a population with the freq_rare function +[source,js] +-------------------------------------------------- +{ + "function" : "freq_rare", + "by_field_name" : "uri", + "over_field_name" : "clientip" +} +-------------------------------------------------- + +If you use this `freq_rare` function in a detector in your job, it +detects values that are frequently rare in a population. It models URI paths and +client IP interactions that occur. It defines a rare URI path as one that is +visited by few client IP values compared to the population. It detects the +client IP values that experience many interactions with rare URI paths compared +to the population. For example in a web access log, a client IP that visits +one or more rare URI paths many times compared to the population is regarded as +highly anomalous. This analysis is based on the count of interactions with rare +URI paths, not the number of different URI path values. + +NOTE: To define a URI path as rare, the analytics consider the number of +distinct values that occur and not the number of times the URI path occurs. +If a single client IP visits a single unique URI path, this is rare, even if it +occurs for that client IP in every bucket. diff --git a/x-pack/docs/en/ml/functions/sum.asciidoc b/x-pack/docs/en/ml/functions/sum.asciidoc new file mode 100644 index 0000000000000..3a0f0b264e9ef --- /dev/null +++ b/x-pack/docs/en/ml/functions/sum.asciidoc @@ -0,0 +1,119 @@ + +[[ml-sum-functions]] +=== Sum Functions + +The sum functions detect anomalies when the sum of a field in a bucket is anomalous. + +If you want to monitor unusually high totals, use high-sided functions. + +If want to look at drops in totals, use low-sided functions. + +If your data is sparse, use `non_null_sum` functions. Buckets without values are +ignored; buckets with a zero value are analyzed. + +The {xpackml} features include the following sum functions: + +* xref:ml-sum[`sum`, `high_sum`, `low_sum`] +* xref:ml-nonnull-sum[`non_null_sum`, `high_non_null_sum`, `low_non_null_sum`] + +//// +TBD: Incorporate from prelert docs?: +Input data may contain pre-calculated fields giving the total count of some value e.g. transactions per minute. +Ensure you are familiar with our advice on Summarization of Input Data, as this is likely to provide +a more appropriate method to using the sum function. +//// + +[float] +[[ml-sum]] +==== Sum, High_sum, Low_sum + +The `sum` function detects anomalies where the sum of a field in a bucket is +anomalous. + +If you want to monitor unusually high sum values, use the `high_sum` function. + +If you want to monitor unusually low sum values, use the `low_sum` function. + +These functions support the following properties: + +* `field_name` (required) +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 1: Analyzing total expenses with the sum function +[source,js] +-------------------------------------------------- +{ + "function" : "sum", + "field_name" : "expenses", + "by_field_name" : "costcenter", + "over_field_name" : "employee" +} +-------------------------------------------------- + +If you use this `sum` function in a detector in your job, it +models total expenses per employees for each cost center. For each time bucket, +it detects when an employee’s expenses are unusual for a cost center compared +to other employees. + +.Example 2: Analyzing total bytes with the high_sum function +[source,js] +-------------------------------------------------- +{ + "function" : "high_sum", + "field_name" : "cs_bytes", + "over_field_name" : "cs_host" +} +-------------------------------------------------- + +If you use this `high_sum` function in a detector in your job, it +models total `cs_bytes`. It detects `cs_hosts` that transfer unusually high +volumes compared to other `cs_hosts`. This example looks for volumes of data +transferred from a client to a server on the internet that are unusual compared +to other clients. This scenario could be useful to detect data exfiltration or +to find users that are abusing internet privileges. + +[float] +[[ml-nonnull-sum]] +==== Non_null_sum, High_non_null_sum, Low_non_null_sum + +The `non_null_sum` function is useful if your data is sparse. Buckets without +values are ignored and buckets with a zero value are analyzed. + +If you want to monitor unusually high totals, use the `high_non_null_sum` +function. + +If you want to look at drops in totals, use the `low_non_null_sum` function. + +These functions support the following properties: + +* `field_name` (required) +* `by_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +NOTE: Population analysis (that is to say, use of the `over_field_name` property) +is not applicable for this function. + +.Example 3: Analyzing employee approvals with the high_non_null_sum function +[source,js] +-------------------------------------------------- +{ + "function" : "high_non_null_sum", + "fieldName" : "amount_approved", + "byFieldName" : "employee" +} +-------------------------------------------------- + +If you use this `high_non_null_sum` function in a detector in your job, it +models the total `amount_approved` for each employee. It ignores any buckets +where the amount is null. It detects employees who approve unusually high +amounts compared to their past behavior. +//For this credit control system analysis, using non_null_sum will ignore +//periods where the employees are not active on the system. diff --git a/x-pack/docs/en/ml/functions/time.asciidoc b/x-pack/docs/en/ml/functions/time.asciidoc new file mode 100644 index 0000000000000..a8067e2ca1342 --- /dev/null +++ b/x-pack/docs/en/ml/functions/time.asciidoc @@ -0,0 +1,99 @@ +[[ml-time-functions]] +=== Time Functions + +The time functions detect events that happen at unusual times, either of the day +or of the week. These functions can be used to find unusual patterns of behavior, +typically associated with suspicious user activity. + +The {xpackml} features include the following time functions: + +* <> +* <> + + +[NOTE] +==== +* NOTE: You cannot create forecasts for jobs that contain time functions. +* The `time_of_day` function is not aware of the difference between days, for instance +work days and weekends. When modeling different days, use the `time_of_week` function. +In general, the `time_of_week` function is more suited to modeling the behavior of people +rather than machines, as people vary their behavior according to the day of the week. +* Shorter bucket spans (for example, 10 minutes) are recommended when performing a +`time_of_day` or `time_of_week` analysis. The time of the events being modeled are not +affected by the bucket span, but a shorter bucket span enables quicker alerting on unusual +events. +* Unusual events are flagged based on the previous pattern of the data, not on what we +might think of as unusual based on human experience. So, if events typically occur +between 3 a.m. and 5 a.m., and event occurring at 3 p.m. is be flagged as unusual. +* When Daylight Saving Time starts or stops, regular events can be flagged as anomalous. +This situation occurs because the actual time of the event (as measured against a UTC +baseline) has changed. This situation is treated as a step change in behavior and the new +times will be learned quickly. +==== + +[float] +[[ml-time-of-day]] +==== Time_of_day + +The `time_of_day` function detects when events occur that are outside normal +usage patterns. For example, it detects unusual activity in the middle of the +night. + +The function expects daily behavior to be similar. If you expect the behavior of +your data to differ on Saturdays compared to Wednesdays, the `time_of_week` +function is more appropriate. + +This function supports the following properties: + +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 1: Analyzing events with the time_of_day function +[source,js] +-------------------------------------------------- +{ + "function" : "time_of_day", + "by_field_name" : "process" +} +-------------------------------------------------- + +If you use this `time_of_day` function in a detector in your job, it +models when events occur throughout a day for each process. It detects when an +event occurs for a process that is at an unusual time in the day compared to +its past behavior. + +[float] +[[ml-time-of-week]] +==== Time_of_week + +The `time_of_week` function detects when events occur that are outside normal +usage patterns. For example, it detects login events on the weekend. + +This function supports the following properties: + +* `by_field_name` (optional) +* `over_field_name` (optional) +* `partition_field_name` (optional) + +For more information about those properties, see +{ref}/ml-job-resource.html#ml-detectorconfig[Detector Configuration Objects]. + +.Example 2: Analyzing events with the time_of_week function +[source,js] +-------------------------------------------------- +{ + "function" : "time_of_week", + "by_field_name" : "eventcode", + "over_field_name" : "workstation" +} +-------------------------------------------------- + +If you use this `time_of_week` function in a detector in your job, it +models when events occur throughout the week for each `eventcode`. It detects +when a workstation event occurs at an unusual time during the week for that +`eventcode` compared to other workstations. It detects events for a +particular workstation that are outside the normal usage pattern. diff --git a/x-pack/docs/en/ml/getting-started-data.asciidoc b/x-pack/docs/en/ml/getting-started-data.asciidoc new file mode 100644 index 0000000000000..6a0c6bbecc814 --- /dev/null +++ b/x-pack/docs/en/ml/getting-started-data.asciidoc @@ -0,0 +1,210 @@ +[[ml-gs-data]] +=== Identifying Data for Analysis + +For the purposes of this tutorial, we provide sample data that you can play with +and search in {es}. When you consider your own data, however, it's important to +take a moment and think about where the {xpackml} features will be most +impactful. + +The first consideration is that it must be time series data. The {ml} features +are designed to model and detect anomalies in time series data. + +The second consideration, especially when you are first learning to use {ml}, +is the importance of the data and how familiar you are with it. Ideally, it is +information that contains key performance indicators (KPIs) for the health, +security, or success of your business or system. It is information that you need +to monitor and act on when anomalous behavior occurs. You might even have {kib} +dashboards that you're already using to watch this data. The better you know the +data, the quicker you will be able to create {ml} jobs that generate useful +insights. + +The final consideration is where the data is located. This tutorial assumes that +your data is stored in {es}. It guides you through the steps required to create +a _{dfeed}_ that passes data to a job. If your own data is outside of {es}, +analysis is still possible by using a post data API. + +IMPORTANT: If you want to create {ml} jobs in {kib}, you must use {dfeeds}. +That is to say, you must store your input data in {es}. When you create +a job, you select an existing index pattern and {kib} configures the {dfeed} +for you under the covers. + + +[float] +[[ml-gs-sampledata]] +==== Obtaining a Sample Data Set + +In this step we will upload some sample data to {es}. This is standard +{es} functionality, and is needed to set the stage for using {ml}. + +The sample data for this tutorial contains information about the requests that +are received by various applications and services in a system. A system +administrator might use this type of information to track the total number of +requests across all of the infrastructure. If the number of requests increases +or decreases unexpectedly, for example, this might be an indication that there +is a problem or that resources need to be redistributed. By using the {xpack} +{ml} features to model the behavior of this data, it is easier to identify +anomalies and take appropriate action. + +Download this sample data by clicking here: +https://download.elastic.co/demos/machine_learning/gettingstarted/server_metrics.tar.gz[server_metrics.tar.gz] + +Use the following commands to extract the files: + +[source,sh] +---------------------------------- +tar -zxvf server_metrics.tar.gz +---------------------------------- + +Each document in the server-metrics data set has the following schema: + +[source,js] +---------------------------------- +{ + "index": + { + "_index":"server-metrics", + "_type":"metric", + "_id":"1177" + } +} +{ + "@timestamp":"2017-03-23T13:00:00", + "accept":36320, + "deny":4156, + "host":"server_2", + "response":2.4558210155, + "service":"app_3", + "total":40476 +} +---------------------------------- +// NOTCONSOLE + +TIP: The sample data sets include summarized data. For example, the `total` +value is a sum of the requests that were received by a specific service at a +particular time. If your data is stored in {es}, you can generate +this type of sum or average by using aggregations. One of the benefits of +summarizing data this way is that {es} automatically distributes +these calculations across your cluster. You can then feed this summarized data +into {xpackml} instead of raw results, which reduces the volume +of data that must be considered while detecting anomalies. For the purposes of +this tutorial, however, these summary values are stored in {es}. For more +information, see <>. + +Before you load the data set, you need to set up {ref}/mapping.html[_mappings_] +for the fields. Mappings divide the documents in the index into logical groups +and specify a field's characteristics, such as the field's searchability or +whether or not it's _tokenized_, or broken up into separate words. + +The sample data includes an `upload_server-metrics.sh` script, which you can use +to create the mappings and load the data set. You can download it by clicking +here: https://download.elastic.co/demos/machine_learning/gettingstarted/upload_server-metrics.sh[upload_server-metrics.sh] +Before you run it, however, you must edit the USERNAME and PASSWORD variables +with your actual user ID and password. + +The script runs a command similar to the following example, which sets up a +mapping for the data set: + +[source,sh] +---------------------------------- +curl -u elastic:x-pack-test-password -X PUT -H 'Content-Type: application/json' +http://localhost:9200/server-metrics -d '{ + "settings":{ + "number_of_shards":1, + "number_of_replicas":0 + }, + "mappings":{ + "metric":{ + "properties":{ + "@timestamp":{ + "type":"date" + }, + "accept":{ + "type":"long" + }, + "deny":{ + "type":"long" + }, + "host":{ + "type":"keyword" + }, + "response":{ + "type":"float" + }, + "service":{ + "type":"keyword" + }, + "total":{ + "type":"long" + } + } + } + } +}' +---------------------------------- +// NOTCONSOLE + +NOTE: If you run this command, you must replace `x-pack-test-password` with your +actual password. + +You can then use the {es} `bulk` API to load the data set. The +`upload_server-metrics.sh` script runs commands similar to the following +example, which loads the four JSON files: + +[source,sh] +---------------------------------- +curl -u elastic:x-pack-test-password -X POST -H "Content-Type: application/json" +http://localhost:9200/server-metrics/_bulk --data-binary "@server-metrics_1.json" + +curl -u elastic:x-pack-test-password -X POST -H "Content-Type: application/json" +http://localhost:9200/server-metrics/_bulk --data-binary "@server-metrics_2.json" + +curl -u elastic:x-pack-test-password -X POST -H "Content-Type: application/json" +http://localhost:9200/server-metrics/_bulk --data-binary "@server-metrics_3.json" + +curl -u elastic:x-pack-test-password -X POST -H "Content-Type: application/json" +http://localhost:9200/server-metrics/_bulk --data-binary "@server-metrics_4.json" +---------------------------------- +// NOTCONSOLE + +TIP: This will upload 200MB of data. This is split into 4 files as there is a +maximum 100MB limit when using the `_bulk` API. + +These commands might take some time to run, depending on the computing resources +available. + +You can verify that the data was loaded successfully with the following command: + +[source,sh] +---------------------------------- +curl 'http://localhost:9200/_cat/indices?v' -u elastic:x-pack-test-password +---------------------------------- +// NOTCONSOLE + +You should see output similar to the following: + +[source,txt] +---------------------------------- +health status index ... pri rep docs.count ... +green open server-metrics ... 1 0 905940 ... +---------------------------------- +// NOTCONSOLE + +Next, you must define an index pattern for this data set: + +. Open {kib} in your web browser and log in. If you are running {kib} +locally, go to `http://localhost:5601/`. + +. Click the **Management** tab, then **{kib}** > **Index Patterns**. + +. If you already have index patterns, click **Create Index** to define a new +one. Otherwise, the **Create index pattern** wizard is already open. + +. For this tutorial, any pattern that matches the name of the index you've +loaded will work. For example, enter `server-metrics*` as the index pattern. + +. In the **Configure settings** step, select the `@timestamp` field in the +**Time Filter field name** list. + +. Click **Create index pattern**. + +This data set can now be analyzed in {ml} jobs in {kib}. diff --git a/x-pack/docs/en/ml/getting-started-forecast.asciidoc b/x-pack/docs/en/ml/getting-started-forecast.asciidoc new file mode 100644 index 0000000000000..bc445195bd417 --- /dev/null +++ b/x-pack/docs/en/ml/getting-started-forecast.asciidoc @@ -0,0 +1,76 @@ +[[ml-gs-forecast]] +=== Creating Forecasts + +In addition to detecting anomalous behavior in your data, you can use +{ml} to predict future behavior. For more information, see <>. + +To create a forecast in {kib}: + +. Go to the **Single Metric Viewer** and select one of the jobs that you created +in this tutorial. For example, select the `total-requests` job. + +. Click **Forecast**. + ++ +-- +[role="screenshot"] +image::images/ml-gs-forecast.jpg["Create a forecast from the Single Metric Viewer"] +-- + +. Specify a duration for your forecast. This value indicates how far to +extrapolate beyond the last record that was processed. You must use time units, +such as `30d` for 30 days. For more information, see +{ref}/common-options.html#time-units[Time Units]. In this example, we use a +duration of 1 week: + ++ +-- +[role="screenshot"] +image::images/ml-gs-duration.jpg["Specify a duration of 1w"] +-- + +. View the forecast in the **Single Metric Viewer**: + ++ +-- +[role="screenshot"] +image::images/ml-gs-forecast-results.jpg["View a forecast from the Single Metric Viewer"] + +The yellow line in the chart represents the predicted data values. The shaded +yellow area represents the bounds for the predicted values, which also gives an +indication of the confidence of the predictions. Note that the bounds generally +increase with time (that is to say, the confidence levels decrease), since you +are forecasting further into the future. Eventually if the confidence levels are +too low, the forecast stops. +-- + +. Optional: Compare the forecast to actual data. + ++ +-- +You can try this with the sample data by choosing a subset of the data when you +create the job, as described in <>. Create the forecast then process +the remaining data, as described in <>. +-- + +.. After you restart the {dfeed}, re-open the forecast by selecting the job in +the **Single Metric Viewer**, clicking **Forecast**, and selecting your forecast +from the list. For example: + ++ +-- +[role="screenshot"] +image::images/ml-gs-forecast-open.jpg["Open a forecast in the Single Metric Viewer"] +-- + +.. View the forecast and actual data in the **Single Metric Viewer**: + ++ +-- +[role="screenshot"] +image::images/ml-gs-forecast-actual.jpg["View a forecast over actual data in the Single Metric Viewer"] + +The chart contains the actual data values, the bounds for the expected values, +the anomalies, the forecast data values, and the bounds for the forecast. This +combination of actual and forecast data gives you an indication of how well the +{xpack} {ml} features can extrapolate the future behavior of the data. +-- + +Now that you have seen how easy it is to create forecasts with the sample data, +consider what type of events you might want to predict in your own data. For +more information and ideas, as well as a list of limitations related to +forecasts, see <>. diff --git a/x-pack/docs/en/ml/getting-started-multi.asciidoc b/x-pack/docs/en/ml/getting-started-multi.asciidoc new file mode 100644 index 0000000000000..804abacc605e0 --- /dev/null +++ b/x-pack/docs/en/ml/getting-started-multi.asciidoc @@ -0,0 +1,211 @@ +[[ml-gs-multi-jobs]] +=== Creating Multi-metric Jobs + +The multi-metric job wizard in {kib} provides a simple way to create more +complex jobs with multiple detectors. For example, in the single metric job, you +were tracking total requests versus time. You might also want to track other +metrics like average response time or the maximum number of denied requests. +Instead of creating jobs for each of those metrics, you can combine them in a +multi-metric job. + +You can also use multi-metric jobs to split a single time series into multiple +time series based on a categorical field. For example, you can split the data +based on its hostnames, locations, or users. Each time series is modeled +independently. By looking at temporal patterns on a per entity basis, you might +spot things that might have otherwise been hidden in the lumped view. + +Conceptually, you can think of this as running many independent single metric +jobs. By bundling them together in a multi-metric job, however, you can see an +overall score and shared influencers for all the metrics and all the entities in +the job. Multi-metric jobs therefore scale better than having many independent +single metric jobs and provide better results when you have influencers that are +shared across the detectors. + +The sample data for this tutorial contains information about the requests that +are received by various applications and services in a system. Let's assume that +you want to monitor the requests received and the response time. In particular, +you might want to track those metrics on a per service basis to see if any +services have unusual patterns. + +To create a multi-metric job in {kib}: + +. Open {kib} in your web browser and log in. If you are running {kib} locally, +go to `http://localhost:5601/`. + +. Click **Machine Learning** in the side navigation, then click **Create new job**. + +. Select the index pattern that you created for the sample data. For example, +`server-metrics*`. + +. In the **Use a wizard** section, click **Multi metric**. + +. Configure the job by providing the following job settings: + ++ +-- +[role="screenshot"] +image::images/ml-gs-multi-job.jpg["Create a new job from the server-metrics index"] +-- + +.. For the **Fields**, select `high mean(response)` and `sum(total)`. This +creates two detectors and specifies the analysis function and field that each +detector uses. The first detector uses the high mean function to detect +unusually high average values for the `response` field in each bucket. The +second detector uses the sum function to detect when the sum of the `total` +field is anomalous in each bucket. For more information about any of the +analytical functions, see <>. + +.. For the **Bucket span**, enter `10m`. This value specifies the size of the +interval that the analysis is aggregated into. As was the case in the single +metric example, this value has a significant impact on the analysis. When you're +creating jobs for your own data, you might need to experiment with different +bucket spans depending on the frequency of the input data, the duration of +typical anomalies, and the frequency at which alerting is required. + +.. For the **Split Data**, select `service`. When you specify this +option, the analysis is segmented such that you have completely independent +baselines for each distinct value of this field. +//TBD: What is the importance of having separate baselines? +There are seven unique service keyword values in the sample data. Thus for each +of the seven services, you will see the high mean response metrics and sum +total metrics. + ++ +-- +NOTE: If you are creating a job by using the {ml} APIs or the advanced job +wizard in {kib}, you can accomplish this split by using the +`partition_field_name` property. + +-- + +.. For the **Key Fields (Influencers)**, select `host`. Note that the `service` field +is also automatically selected because you used it to split the data. These key +fields are also known as _influencers_. +When you identify a field as an influencer, you are indicating that you think +it contains information about someone or something that influences or +contributes to anomalies. ++ +-- +[TIP] +======================== +Picking an influencer is strongly recommended for the following reasons: + +* It allows you to more easily assign blame for the anomaly +* It simplifies and aggregates the results + +The best influencer is the person or thing that you want to blame for the +anomaly. In many cases, users or client IP addresses make excellent influencers. +Influencers can be any field in your data; they do not need to be fields that +are specified in your detectors, though they often are. + +As a best practice, do not pick too many influencers. For example, you generally +do not need more than three. If you pick many influencers, the results can be +overwhelming and there is a small overhead to the analysis. + +======================== +//TBD: Is this something you can determine later from looking at results and +//update your job with if necessary? Is it all post-processing or does it affect +//the ongoing modeling? +-- + +. Click **Use full server-metrics* data**. Two graphs are generated for each +`service` value, which represent the high mean `response` values and +sum `total` values over time. For example: ++ +-- +[role="screenshot"] +image::images/ml-gs-job2-split.jpg["Kibana charts for data split by service"] +-- + +. Provide a name for the job, for example `response_requests_by_app`. The job +name must be unique in your cluster. You can also optionally provide a +description of the job. + +. Click **Create Job**. + +When the job is created, you can choose to view the results, continue the job in +real-time, and create a watch. In this tutorial, we will proceed to view the +results. + +TIP: The `create_multi_metic.sh` script creates a similar job and {dfeed} by +using the {ml} APIs. You can download that script by clicking +here: https://download.elastic.co/demos/machine_learning/gettingstarted/create_multi_metric.sh[create_multi_metric.sh] +For API reference information, see {ref}/ml-apis.html[Machine Learning APIs]. + +[[ml-gs-job2-analyze]] +=== Exploring Multi-metric Job Results + +The {xpackml} features analyze the input stream of data, model its behavior, and +perform analysis based on the two detectors you defined in your job. When an +event occurs outside of the model, that event is identified as an anomaly. + +You can use the **Anomaly Explorer** in {kib} to view the analysis results: + +[role="screenshot"] +image::images/ml-gs-job2-explorer.jpg["Job results in the Anomaly Explorer"] + +You can explore the overall anomaly time line, which shows the maximum anomaly +score for each section in the specified time period. You can change the time +period by using the time picker in the {kib} toolbar. Note that the sections in +this time line do not necessarily correspond to the bucket span. If you change +the time period, the sections change size too. The smallest possible size for +these sections is a bucket. If you specify a large time period, the sections can +span many buckets. + +On the left is a list of the top influencers for all of the detected anomalies +in that same time period. The list includes maximum anomaly scores, which in +this case are aggregated for each influencer, for each bucket, across all +detectors. There is also a total sum of the anomaly scores for each influencer. +You can use this list to help you narrow down the contributing factors and focus +on the most anomalous entities. + +If your job contains influencers, you can also explore swim lanes that +correspond to the values of an influencer. In this example, the swim lanes +correspond to the values for the `service` field that you used to split the data. +Each lane represents a unique application or service name. Since you specified +the `host` field as an influencer, you can also optionally view the results in +swim lanes for each host name: + +[role="screenshot"] +image::images/ml-gs-job2-explorer-host.jpg["Job results sorted by host"] + +By default, the swim lanes are ordered by their maximum anomaly score values. +You can click on the sections in the swim lane to see details about the +anomalies that occurred in that time interval. + +NOTE: The anomaly scores that you see in each section of the **Anomaly Explorer** +might differ slightly. This disparity occurs because for each job we generate +bucket results, influencer results, and record results. Anomaly scores are +generated for each type of result. The anomaly timeline uses the bucket-level +anomaly scores. The list of top influencers uses the influencer-level anomaly +scores. The list of anomalies uses the record-level anomaly scores. For more +information about these different result types, see +{ref}/ml-results-resource.html[Results Resources]. + +Click on a section in the swim lanes to obtain more information about the +anomalies in that time period. For example, click on the red section in the swim +lane for `server_2`: + +[role="screenshot"] +image::images/ml-gs-job2-explorer-anomaly.jpg["Job results for an anomaly"] + +You can see exact times when anomalies occurred and which detectors or metrics +caught the anomaly. Also note that because you split the data by the `service` +field, you see separate charts for each applicable service. In particular, you +see charts for each service for which there is data on the specified host in the +specified time interval. + +Below the charts, there is a table that provides more information, such as the +typical and actual values and the influencers that contributed to the anomaly. + +[role="screenshot"] +image::images/ml-gs-job2-explorer-table.jpg["Job results table"] + +Notice that there are anomalies for both detectors, that is to say for both the +`high_mean(response)` and the `sum(total)` metrics in this time interval. The +table aggregates the anomalies to show the highest severity anomaly per detector +and entity, which is the by, over, or partition field value that is displayed +in the **found for** column. To view all the anomalies without any aggregation, +set the **Interval** to `Show all`. + +By +investigating multiple metrics in a single job, you might see relationships +between events in your data that would otherwise be overlooked. diff --git a/x-pack/docs/en/ml/getting-started-next.asciidoc b/x-pack/docs/en/ml/getting-started-next.asciidoc new file mode 100644 index 0000000000000..8717474759236 --- /dev/null +++ b/x-pack/docs/en/ml/getting-started-next.asciidoc @@ -0,0 +1,55 @@ +[[ml-gs-next]] +=== Next Steps + +By completing this tutorial, you've learned how you can detect anomalous +behavior in a simple set of sample data. You created single and multi-metric +jobs in {kib}, which creates and opens jobs and creates and starts {dfeeds} for +you under the covers. You examined the results of the {ml} analysis in the +**Single Metric Viewer** and **Anomaly Explorer** in {kib}. You also +extrapolated the future behavior of a job by creating a forecast. + +If you want to learn about advanced job options, you might be interested in +the following video tutorial: +https://www.elastic.co/videos/machine-learning-lab-3-detect-outliers-in-a-population[Machine Learning Lab 3 - Detect Outliers in a Population]. + +If you intend to use {ml} APIs in your applications, a good next step might be +to learn about the APIs by retrieving information about these sample jobs. +For example, the following APIs retrieve information about the jobs and {dfeeds}. + +[source,js] +-------------------------------------------------- +GET _xpack/ml/anomaly_detectors + +GET _xpack/ml/datafeeds +-------------------------------------------------- +// CONSOLE + +For more information about the {ml} APIs, see <>. + +Ultimately, the next step is to start applying {ml} to your own data. +As mentioned in <>, there are three things to consider when you're +thinking about where {ml} will be most impactful: + +. It must be time series data. +. It should be information that contains key performance indicators for the +health, security, or success of your business or system. The better you know the +data, the quicker you will be able to create jobs that generate useful +insights. +. Ideally, the data is located in {es} and you can therefore create a {dfeed} +that retrieves data in real time. If your data is outside of {es}, you +cannot use {kib} to create your jobs and you cannot use {dfeeds}. Machine +learning analysis is still possible, however, by using APIs to create and manage +jobs and to post data to them. + +Once you have decided which data to analyze, you can start considering which +analysis functions you want to use. For more information, see <>. + +In general, it is a good idea to start with single metric jobs for your +key performance indicators. After you examine these simple analysis results, +you will have a better idea of what the influencers might be. You can create +multi-metric jobs and split the data or create more complex analysis functions +as necessary. For examples of more complicated configuration options, see +<>. + +If you encounter problems, we're here to help. See <> and +<>. diff --git a/x-pack/docs/en/ml/getting-started-single.asciidoc b/x-pack/docs/en/ml/getting-started-single.asciidoc new file mode 100644 index 0000000000000..3befdbaf34dd6 --- /dev/null +++ b/x-pack/docs/en/ml/getting-started-single.asciidoc @@ -0,0 +1,331 @@ +[[ml-gs-jobs]] +=== Creating Single Metric Jobs + +At this point in the tutorial, the goal is to detect anomalies in the +total requests received by your applications and services. The sample data +contains a single key performance indicator(KPI) to track this, which is the total +requests over time. It is therefore logical to start by creating a single metric +job for this KPI. + +TIP: If you are using aggregated data, you can create an advanced job +and configure it to use a `summary_count_field_name`. The {ml} algorithms will +make the best possible use of summarized data in this case. For simplicity, in +this tutorial we will not make use of that advanced functionality. For more +information, see <>. + +A single metric job contains a single _detector_. A detector defines the type of +analysis that will occur (for example, `max`, `average`, or `rare` analytical +functions) and the fields that will be analyzed. + +To create a single metric job in {kib}: + +. Open {kib} in your web browser and log in. If you are running {kib} locally, +go to `http://localhost:5601/`. + +. Click **Machine Learning** in the side navigation. + +. Click **Create new job**. + +. Select the index pattern that you created for the sample data. For example, +`server-metrics*`. + +. In the **Use a wizard** section, click **Single metric**. + +. Configure the job by providing the following information: + ++ +-- +[role="screenshot"] +image::images/ml-gs-single-job.jpg["Create a new job from the server-metrics index"] +-- + +.. For the **Aggregation**, select `Sum`. This value specifies the analysis +function that is used. ++ +-- +Some of the analytical functions look for single anomalous data points. For +example, `max` identifies the maximum value that is seen within a bucket. +Others perform some aggregation over the length of the bucket. For example, +`mean` calculates the mean of all the data points seen within the bucket. +Similarly, `count` calculates the total number of data points within the bucket. +In this tutorial, you are using the `sum` function, which calculates the sum of +the specified field's values within the bucket. For descriptions of all the +functions, see <>. +-- + +.. For the **Field**, select `total`. This value specifies the field that +the detector uses in the function. ++ +-- +NOTE: Some functions such as `count` and `rare` do not require fields. +-- + +.. For the **Bucket span**, enter `10m`. This value specifies the size of the +interval that the analysis is aggregated into. ++ +-- +The {xpackml} features use the concept of a bucket to divide up the time series +into batches for processing. For example, if you are monitoring +the total number of requests in the system, +using a bucket span of 1 hour would mean that at the end of each hour, it +calculates the sum of the requests for the last hour and computes the +anomalousness of that value compared to previous hours. + +The bucket span has two purposes: it dictates over what time span to look for +anomalous features in data, and also determines how quickly anomalies can be +detected. Choosing a shorter bucket span enables anomalies to be detected more +quickly. However, there is a risk of being too sensitive to natural variations +or noise in the input data. Choosing too long a bucket span can mean that +interesting anomalies are averaged away. There is also the possibility that the +aggregation might smooth out some anomalies based on when the bucket starts +in time. + +The bucket span has a significant impact on the analysis. When you're trying to +determine what value to use, take into account the granularity at which you +want to perform the analysis, the frequency of the input data, the duration of +typical anomalies, and the frequency at which alerting is required. +-- + +. Determine whether you want to process all of the data or only part of it. If +you want to analyze all of the existing data, click +**Use full server-metrics* data**. If you want to see what happens when you +stop and start {dfeeds} and process additional data over time, click the time +picker in the {kib} toolbar. Since the sample data spans a period of time +between March 23, 2017 and April 22, 2017, click **Absolute**. Set the start +time to March 23, 2017 and the end time to April 1, 2017, for example. Once +you've got the time range set up, click the **Go** button. + ++ +-- +[role="screenshot"] +image::images/ml-gs-job1-time.jpg["Setting the time range for the {dfeed}"] +-- ++ +-- +A graph is generated, which represents the total number of requests over time. + +Note that the **Estimate bucket span** option is no longer greyed out in the +**Buck span** field. This is an experimental feature that you can use to help +determine an appropriate bucket span for your data. For the purposes of this +tutorial, we will leave the bucket span at 10 minutes. +-- + +. Provide a name for the job, for example `total-requests`. The job name must +be unique in your cluster. You can also optionally provide a description of the +job and create a job group. + +. Click **Create Job**. + ++ +-- +[role="screenshot"] +image::images/ml-gs-job1.jpg["A graph of the total number of requests over time"] +-- + +As the job is created, the graph is updated to give a visual representation of +the progress of {ml} as the data is processed. This view is only available whilst the +job is running. + +When the job is created, you can choose to view the results, continue the job +in real-time, and create a watch. In this tutorial, we will look at how to +manage jobs and {dfeeds} before we view the results. + +TIP: The `create_single_metic.sh` script creates a similar job and {dfeed} by +using the {ml} APIs. You can download that script by clicking +here: https://download.elastic.co/demos/machine_learning/gettingstarted/create_single_metric.sh[create_single_metric.sh] +For API reference information, see {ref}/ml-apis.html[Machine Learning APIs]. + +[[ml-gs-job1-manage]] +=== Managing Jobs + +After you create a job, you can see its status in the **Job Management** tab: + + +[role="screenshot"] +image::images/ml-gs-job1-manage1.jpg["Status information for the total-requests job"] + +The following information is provided for each job: + +Job ID:: +The unique identifier for the job. + +Description:: +The optional description of the job. + +Processed records:: +The number of records that have been processed by the job. + +Memory status:: +The status of the mathematical models. When you create jobs by using the APIs or +by using the advanced options in {kib}, you can specify a `model_memory_limit`. +That value is the maximum amount of memory resources that the mathematical +models can use. Once that limit is approached, data pruning becomes more +aggressive. Upon exceeding that limit, new entities are not modeled. For more +information about this setting, see +{ref}/ml-job-resource.html#ml-apilimits[Analysis Limits]. The memory status +field reflects whether you have reached or exceeded the model memory limit. It +can have one of the following values: + +`ok`::: The models stayed below the configured value. +`soft_limit`::: The models used more than 60% of the configured memory limit +and older unused models will be pruned to free up space. +`hard_limit`::: The models used more space than the configured memory limit. +As a result, not all incoming data was processed. + +Job state:: +The status of the job, which can be one of the following values: + +`opened`::: The job is available to receive and process data. +`closed`::: The job finished successfully with its model state persisted. +The job must be opened before it can accept further data. +`closing`::: The job close action is in progress and has not yet completed. +A closing job cannot accept further data. +`failed`::: The job did not finish successfully due to an error. +This situation can occur due to invalid input data. +If the job had irrevocably failed, it must be force closed and then deleted. +If the {dfeed} can be corrected, the job can be closed and then re-opened. + +{dfeed-cap} state:: +The status of the {dfeed}, which can be one of the following values: + +started::: The {dfeed} is actively receiving data. +stopped::: The {dfeed} is stopped and will not receive data until it is +re-started. + +Latest timestamp:: +The timestamp of the last processed record. + + +If you click the arrow beside the name of job, you can show or hide additional +information, such as the settings, configuration information, or messages for +the job. + +You can also click one of the **Actions** buttons to start the {dfeed}, edit +the job or {dfeed}, and clone or delete the job, for example. + +[float] +[[ml-gs-job1-datafeed]] +==== Managing {dfeeds-cap} + +A {dfeed} can be started and stopped multiple times throughout its lifecycle. +If you want to retrieve more data from {es} and the {dfeed} is stopped, you must +restart it. + +For example, if you did not use the full data when you created the job, you can +now process the remaining data by restarting the {dfeed}: + +. In the **Machine Learning** / **Job Management** tab, click the following +button to start the {dfeed}: image:images/ml-start-feed.jpg["Start {dfeed}"] + + +. Choose a start time and end time. For example, +click **Continue from 2017-04-01 23:59:00** and select **2017-04-30** as the +search end time. Then click **Start**. The date picker defaults to the latest +timestamp of processed data. Be careful not to leave any gaps in the analysis, +otherwise you might miss anomalies. + ++ +-- +[role="screenshot"] +image::images/ml-gs-job1-datafeed.jpg["Restarting a {dfeed}"] +-- + +The {dfeed} state changes to `started`, the job state changes to `opened`, +and the number of processed records increases as the new data is analyzed. The +latest timestamp information also increases. + +TIP: If your data is being loaded continuously, you can continue running the job +in real time. For this, start your {dfeed} and select **No end time**. + +If you want to stop the {dfeed} at this point, you can click the following +button: image:images/ml-stop-feed.jpg["Stop {dfeed}"] + +Now that you have processed all the data, let's start exploring the job results. + +[[ml-gs-job1-analyze]] +=== Exploring Single Metric Job Results + +The {xpackml} features analyze the input stream of data, model its behavior, +and perform analysis based on the detectors you defined in your job. When an +event occurs outside of the model, that event is identified as an anomaly. + +Result records for each anomaly are stored in `.ml-anomalies-*` indices in {es}. +By default, the name of the index where {ml} results are stored is labelled +`shared`, which corresponds to the `.ml-anomalies-shared` index. + +You can use the **Anomaly Explorer** or the **Single Metric Viewer** in {kib} to +view the analysis results. + +Anomaly Explorer:: + This view contains swim lanes showing the maximum anomaly score over time. + There is an overall swim lane that shows the overall score for the job, and + also swim lanes for each influencer. By selecting a block in a swim lane, the + anomaly details are displayed alongside the original source data (where + applicable). + +Single Metric Viewer:: + This view contains a chart that represents the actual and expected values over + time. This is only available for jobs that analyze a single time series and + where `model_plot_config` is enabled. As in the **Anomaly Explorer**, anomalous + data points are shown in different colors depending on their score. + +By default when you view the results for a single metric job, the +**Single Metric Viewer** opens: +[role="screenshot"] +image::images/ml-gs-job1-analysis.jpg["Single Metric Viewer for total-requests job"] + + +The blue line in the chart represents the actual data values. The shaded blue +area represents the bounds for the expected values. The area between the upper +and lower bounds are the most likely values for the model. If a value is outside +of this area then it can be said to be anomalous. + +If you slide the time selector from the beginning of the data to the end of the +data, you can see how the model improves as it processes more data. At the +beginning, the expected range of values is pretty broad and the model is not +capturing the periodicity in the data. But it quickly learns and begins to +reflect the daily variation. + +Any data points outside the range that was predicted by the model are marked +as anomalies. When you have high volumes of real-life data, many anomalies +might be found. These vary in probability from very likely to highly unlikely, +that is to say, from not particularly anomalous to highly anomalous. There +can be none, one or two or tens, sometimes hundreds of anomalies found within +each bucket. There can be many thousands found per job. In order to provide +a sensible view of the results, an _anomaly score_ is calculated for each bucket +time interval. The anomaly score is a value from 0 to 100, which indicates +the significance of the observed anomaly compared to previously seen anomalies. +The highly anomalous values are shown in red and the low scored values are +indicated in blue. An interval with a high anomaly score is significant and +requires investigation. + +Slide the time selector to a section of the time series that contains a red +anomaly data point. If you hover over the point, you can see more information +about that data point. You can also see details in the **Anomalies** section +of the viewer. For example: +[role="screenshot"] +image::images/ml-gs-job1-anomalies.jpg["Single Metric Viewer Anomalies for total-requests job"] + +For each anomaly you can see key details such as the time, the actual and +expected ("typical") values, and their probability. + +By default, the table contains all anomalies that have a severity of "warning" +or higher in the selected section of the timeline. If you are only interested in +critical anomalies, for example, you can change the severity threshold for this +table. + +The anomalies table also automatically calculates an interval for the data in +the table. If the time difference between the earliest and latest records in the +table is less than two days, the data is aggregated by hour to show the details +of the highest severity anomaly for each detector. Otherwise, it is +aggregated by day. You can change the interval for the table, for example, to +show all anomalies. + +You can see the same information in a different format by using the +**Anomaly Explorer**: +[role="screenshot"] +image::images/ml-gs-job1-explorer.jpg["Anomaly Explorer for total-requests job"] + + +Click one of the red sections in the swim lane to see details about the anomalies +that occurred in that time interval. For example: +[role="screenshot"] +image::images/ml-gs-job1-explorer-anomaly.jpg["Anomaly Explorer details for total-requests job"] + +After you have identified anomalies, often the next step is to try to determine +the context of those situations. For example, are there other factors that are +contributing to the problem? Are the anomalies confined to particular +applications or servers? You can begin to troubleshoot these situations by +layering additional jobs or creating multi-metric jobs. diff --git a/x-pack/docs/en/ml/getting-started-wizards.asciidoc b/x-pack/docs/en/ml/getting-started-wizards.asciidoc new file mode 100644 index 0000000000000..2eb6b5c290425 --- /dev/null +++ b/x-pack/docs/en/ml/getting-started-wizards.asciidoc @@ -0,0 +1,99 @@ +[[ml-gs-wizards]] +=== Creating Jobs in {kib} +++++ +Creating Jobs +++++ + +Machine learning jobs contain the configuration information and metadata +necessary to perform an analytical task. They also contain the results of the +analytical task. + +[NOTE] +-- +This tutorial uses {kib} to create jobs and view results, but you can +alternatively use APIs to accomplish most tasks. +For API reference information, see {ref}/ml-apis.html[Machine Learning APIs]. + +The {xpackml} features in {kib} use pop-ups. You must configure your +web browser so that it does not block pop-up windows or create an +exception for your {kib} URL. +-- + +{kib} provides wizards that help you create typical {ml} jobs. For example, you +can use wizards to create single metric, multi-metric, population, and advanced +jobs. + +To see the job creation wizards: + +. Open {kib} in your web browser and log in. If you are running {kib} locally, +go to `http://localhost:5601/`. + +. Click **Machine Learning** in the side navigation. + +. Click **Create new job**. + +. Click the `server-metrics*` index pattern. + +You can then choose from a list of job wizards. For example: + +[role="screenshot"] +image::images/ml-create-job.jpg["Job creation wizards in {kib}"] + +If you are not certain which wizard to use, there is also a **Data Visualizer** +that can help you explore the fields in your data. + +To learn more about the sample data: + +. Click **Data Visualizer**. + ++ +-- +[role="screenshot"] +image::images/ml-data-visualizer.jpg["Data Visualizer in {kib}"] +-- + +. Select a time period that you're interested in exploring by using the time +picker in the {kib} toolbar. Alternatively, click +**Use full server-metrics* data** to view data over the full time range. In this +sample data, the documents relate to March and April 2017. + +. Optional: Change the number of documents per shard that are used in the +visualizations. There is a relatively small number of documents in the sample +data, so you can choose a value of `all`. For larger data sets, keep in mind +that using a large sample size increases query run times and increases the load +on the cluster. + +[role="screenshot"] +image::images/ml-data-metrics.jpg["Data Visualizer output for metrics in {kib}"] + +The fields in the indices are listed in two sections. The first section contains +the numeric ("metric") fields. The second section contains non-metric fields +(such as `keyword`, `text`, `date`, `boolean`, `ip`, and `geo_point` data types). + +For metric fields, the **Data Visualizer** indicates how many documents contain +the field in the selected time period. It also provides information about the +minimum, median, and maximum values, the number of distinct values, and their +distribution. You can use the distribution chart to get a better idea of how +the values in the data are clustered. Alternatively, you can view the top values +for metric fields. For example: + +[role="screenshot"] +image::images/ml-data-topmetrics.jpg["Data Visualizer output for top values in {kib}"] + +For date fields, the **Data Visualizer** provides the earliest and latest field +values and the number and percentage of documents that contain the field +during the selected time period. For example: + +[role="screenshot"] +image::images/ml-data-dates.jpg["Data Visualizer output for date fields in {kib}"] + +For keyword fields, the **Data Visualizer** provides the number of distinct +values, a list of the top values, and the number and percentage of documents +that contain the field during the selected time period. For example: + +[role="screenshot"] +image::images/ml-data-keywords.jpg["Data Visualizer output for date fields in {kib}"] + +In this tutorial, you will create single and multi-metric jobs that use the +`total`, `response`, `service`, and `host` fields. Though there is an option to +create an advanced job directly from the **Data Visualizer**, we will use the +single and multi-metric job creation wizards instead. diff --git a/x-pack/docs/en/ml/getting-started.asciidoc b/x-pack/docs/en/ml/getting-started.asciidoc new file mode 100644 index 0000000000000..5b15de51f0bca --- /dev/null +++ b/x-pack/docs/en/ml/getting-started.asciidoc @@ -0,0 +1,80 @@ +[[ml-getting-started]] +== Getting Started with Machine Learning +++++ +Getting Started +++++ + +Ready to get some hands-on experience with the {xpackml} features? This +tutorial shows you how to: + +* Load a sample data set into {es} +* Create single and multi-metric {ml} jobs in {kib} +* Use the results to identify possible anomalies in the data + +At the end of this tutorial, you should have a good idea of what {ml} is and +will hopefully be inspired to use it to detect anomalies in your own data. + +You might also be interested in these video tutorials, which use the same sample +data: + +* https://www.elastic.co/videos/machine-learning-tutorial-creating-a-single-metric-job[Machine Learning for the Elastic Stack: Creating a single metric job] +* https://www.elastic.co/videos/machine-learning-tutorial-creating-a-multi-metric-job[Machine Learning for the Elastic Stack: Creating a multi-metric job] + + +[float] +[[ml-gs-sysoverview]] +=== System Overview + +To follow the steps in this tutorial, you will need the following +components of the Elastic Stack: + +* {es} {version}, which stores the data and the analysis results +* {kib} {version}, which provides a helpful user interface for creating and +viewing jobs + +See the https://www.elastic.co/support/matrix[Elastic Support Matrix] for +information about supported operating systems. + +See {stack-ref}/installing-elastic-stack.html[Installing the Elastic Stack] for +information about installing each of the components. + +NOTE: To get started, you can install {es} and {kib} on a +single VM or even on your laptop (requires 64-bit OS). +As you add more data and your traffic grows, +you'll want to replace the single {es} instance with a cluster. + +By default, when you install {es} and {kib}, {xpack} is installed and the +{ml} features are enabled. You cannot use {ml} with the free basic license, but +you can try all of the {xpack} features with a <>. + +If you have multiple nodes in your cluster, you can optionally dedicate nodes to +specific purposes. If you want to control which nodes are +_machine learning nodes_ or limit which nodes run resource-intensive +activity related to jobs, see <>. + +[float] +[[ml-gs-users]] +==== Users, Roles, and Privileges + +The {xpackml} features implement cluster privileges and built-in roles to +make it easier to control which users have authority to view and manage the jobs, +{dfeeds}, and results. + +By default, you can perform all of the steps in this tutorial by using the +built-in `elastic` super user. However, the password must be set before the user +can do anything. For information about how to set that password, see +<>. + +If you are performing these steps in a production environment, take extra care +because `elastic` has the `superuser` role and you could inadvertently make +significant changes to the system. You can alternatively assign the +`machine_learning_admin` and `kibana_user` roles to a user ID of your choice. + +For more information, see <> and <>. + +include::getting-started-data.asciidoc[] +include::getting-started-wizards.asciidoc[] +include::getting-started-single.asciidoc[] +include::getting-started-multi.asciidoc[] +include::getting-started-forecast.asciidoc[] +include::getting-started-next.asciidoc[] diff --git a/x-pack/docs/en/ml/images/ml-category-advanced.jpg b/x-pack/docs/en/ml/images/ml-category-advanced.jpg new file mode 100644 index 0000000000000..0a862903c0bc5 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-category-advanced.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-category-anomalies.jpg b/x-pack/docs/en/ml/images/ml-category-anomalies.jpg new file mode 100644 index 0000000000000..2d8f805b96354 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-category-anomalies.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-categoryterms.jpg b/x-pack/docs/en/ml/images/ml-categoryterms.jpg new file mode 100644 index 0000000000000..331bce30d3388 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-categoryterms.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-create-job.jpg b/x-pack/docs/en/ml/images/ml-create-job.jpg new file mode 100644 index 0000000000000..506f3d8ea3c2e Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-create-job.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-create-jobs.jpg b/x-pack/docs/en/ml/images/ml-create-jobs.jpg new file mode 100644 index 0000000000000..0c37ea2c9ffc5 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-create-jobs.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-customurl.jpg b/x-pack/docs/en/ml/images/ml-customurl.jpg new file mode 100644 index 0000000000000..4dce63e24cb28 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-customurl.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-data-dates.jpg b/x-pack/docs/en/ml/images/ml-data-dates.jpg new file mode 100644 index 0000000000000..e00b765402aae Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-data-dates.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-data-keywords.jpg b/x-pack/docs/en/ml/images/ml-data-keywords.jpg new file mode 100644 index 0000000000000..8eb28cbedcf76 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-data-keywords.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-data-metrics.jpg b/x-pack/docs/en/ml/images/ml-data-metrics.jpg new file mode 100644 index 0000000000000..eeb83a76e1cb2 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-data-metrics.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-data-topmetrics.jpg b/x-pack/docs/en/ml/images/ml-data-topmetrics.jpg new file mode 100644 index 0000000000000..80eab8f203730 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-data-topmetrics.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-data-visualizer.jpg b/x-pack/docs/en/ml/images/ml-data-visualizer.jpg new file mode 100644 index 0000000000000..11758bab17b02 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-data-visualizer.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-edit-job.jpg b/x-pack/docs/en/ml/images/ml-edit-job.jpg new file mode 100644 index 0000000000000..e6a3e6b1106de Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-edit-job.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-aggregations.jpg b/x-pack/docs/en/ml/images/ml-gs-aggregations.jpg new file mode 100644 index 0000000000000..446dce7972765 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-aggregations.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-duration.jpg b/x-pack/docs/en/ml/images/ml-gs-duration.jpg new file mode 100644 index 0000000000000..0e93b3f4ccd07 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-duration.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-forecast-actual.jpg b/x-pack/docs/en/ml/images/ml-gs-forecast-actual.jpg new file mode 100644 index 0000000000000..6733b6e3477e8 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-forecast-actual.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-forecast-open.jpg b/x-pack/docs/en/ml/images/ml-gs-forecast-open.jpg new file mode 100644 index 0000000000000..e654c9e7804ac Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-forecast-open.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-forecast-results.jpg b/x-pack/docs/en/ml/images/ml-gs-forecast-results.jpg new file mode 100644 index 0000000000000..f6911b4193996 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-forecast-results.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-forecast.jpg b/x-pack/docs/en/ml/images/ml-gs-forecast.jpg new file mode 100644 index 0000000000000..eeb8923b412c4 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-forecast.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job-analysis.jpg b/x-pack/docs/en/ml/images/ml-gs-job-analysis.jpg new file mode 100644 index 0000000000000..7f80ff9726a1e Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job-analysis.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job-forecast.jpg b/x-pack/docs/en/ml/images/ml-gs-job-forecast.jpg new file mode 100644 index 0000000000000..aa891194e6346 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job-forecast.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-analysis.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-analysis.jpg new file mode 100644 index 0000000000000..9b34c916c8079 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job1-analysis.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-anomalies.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-anomalies.jpg new file mode 100644 index 0000000000000..d0d77827c90c4 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job1-anomalies.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-datafeed.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-datafeed.jpg new file mode 100644 index 0000000000000..aa36b5f13ea4c Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job1-datafeed.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-explorer-anomaly.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-explorer-anomaly.jpg new file mode 100644 index 0000000000000..9e6c76a5518dd Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job1-explorer-anomaly.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-explorer.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-explorer.jpg new file mode 100644 index 0000000000000..bb436a72e5064 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job1-explorer.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-manage1.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-manage1.jpg new file mode 100644 index 0000000000000..a2cba454e9d93 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job1-manage1.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-results.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-results.jpg new file mode 100644 index 0000000000000..0b04fec0e2dfb Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job1-results.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-time.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-time.jpg new file mode 100644 index 0000000000000..9cecf7e8b5414 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job1-time.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1.jpg b/x-pack/docs/en/ml/images/ml-gs-job1.jpg new file mode 100644 index 0000000000000..7251bfc3f6bfa Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job1.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job2-explorer-anomaly.jpg b/x-pack/docs/en/ml/images/ml-gs-job2-explorer-anomaly.jpg new file mode 100644 index 0000000000000..f7579dd338fe4 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job2-explorer-anomaly.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job2-explorer-host.jpg b/x-pack/docs/en/ml/images/ml-gs-job2-explorer-host.jpg new file mode 100644 index 0000000000000..cfe3f4fba6da7 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job2-explorer-host.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job2-explorer-table.jpg b/x-pack/docs/en/ml/images/ml-gs-job2-explorer-table.jpg new file mode 100644 index 0000000000000..cb3b8205bc87d Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job2-explorer-table.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job2-explorer.jpg b/x-pack/docs/en/ml/images/ml-gs-job2-explorer.jpg new file mode 100644 index 0000000000000..20809aa3d1b32 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job2-explorer.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job2-split.jpg b/x-pack/docs/en/ml/images/ml-gs-job2-split.jpg new file mode 100644 index 0000000000000..4e07b865532a6 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-job2-split.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-multi-job.jpg b/x-pack/docs/en/ml/images/ml-gs-multi-job.jpg new file mode 100644 index 0000000000000..03bb6ae11967a Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-multi-job.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-gs-single-job.jpg b/x-pack/docs/en/ml/images/ml-gs-single-job.jpg new file mode 100644 index 0000000000000..5d813444db9b9 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-gs-single-job.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-population-anomaly.jpg b/x-pack/docs/en/ml/images/ml-population-anomaly.jpg new file mode 100644 index 0000000000000..9fa726d050c74 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-population-anomaly.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-population-job.jpg b/x-pack/docs/en/ml/images/ml-population-job.jpg new file mode 100644 index 0000000000000..a51fa9c9c3791 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-population-job.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-population-results.jpg b/x-pack/docs/en/ml/images/ml-population-results.jpg new file mode 100644 index 0000000000000..ae4eb7609f5b0 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-population-results.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-scriptfields.jpg b/x-pack/docs/en/ml/images/ml-scriptfields.jpg new file mode 100644 index 0000000000000..0c9150734c0f4 Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-scriptfields.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-start-feed.jpg b/x-pack/docs/en/ml/images/ml-start-feed.jpg new file mode 100644 index 0000000000000..7ee09fd81bc6e Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-start-feed.jpg differ diff --git a/x-pack/docs/en/ml/images/ml-stop-feed.jpg b/x-pack/docs/en/ml/images/ml-stop-feed.jpg new file mode 100644 index 0000000000000..3bf3c64402bef Binary files /dev/null and b/x-pack/docs/en/ml/images/ml-stop-feed.jpg differ diff --git a/x-pack/docs/en/ml/images/ml.jpg b/x-pack/docs/en/ml/images/ml.jpg new file mode 100644 index 0000000000000..12f427675a1ae Binary files /dev/null and b/x-pack/docs/en/ml/images/ml.jpg differ diff --git a/x-pack/docs/en/ml/index.asciidoc b/x-pack/docs/en/ml/index.asciidoc new file mode 100644 index 0000000000000..c36f77ca812aa --- /dev/null +++ b/x-pack/docs/en/ml/index.asciidoc @@ -0,0 +1,27 @@ +[[xpack-ml]] += Machine Learning in the Elastic Stack + +[partintro] +-- +Machine learning is tightly integrated with the Elastic Stack. Data is pulled +from {es} for analysis and anomaly results are displayed in {kib} dashboards. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> + + +-- + +include::overview.asciidoc[] +include::getting-started.asciidoc[] +include::configuring.asciidoc[] +include::stopping-ml.asciidoc[] +// include::ml-scenarios.asciidoc[] +include::api-quickref.asciidoc[] +//include::troubleshooting.asciidoc[] Referenced from x-pack/docs/public/xpack-troubleshooting.asciidoc +include::functions.asciidoc[] diff --git a/x-pack/docs/en/ml/jobs.asciidoc b/x-pack/docs/en/ml/jobs.asciidoc new file mode 100644 index 0000000000000..52baef720bac6 --- /dev/null +++ b/x-pack/docs/en/ml/jobs.asciidoc @@ -0,0 +1,33 @@ +[[ml-jobs]] +=== Machine Learning Jobs +++++ +Jobs +++++ + +Machine learning jobs contain the configuration information and metadata +necessary to perform an analytics task. + +Each job has one or more _detectors_. A detector applies an analytical function +to specific fields in your data. For more information about the types of +analysis you can perform, see <>. + +A job can also contain properties that affect which types of entities or events +are considered anomalous. For example, you can specify whether entities are +analyzed relative to their own previous behavior or relative to other entities +in a population. There are also multiple options for splitting the data into +categories and partitions. Some of these more advanced job configurations +are described in the following section: <>. + +For a description of all the job properties, see +{ref}/ml-job-resource.html[Job Resources]. + +In {kib}, there are wizards that help you create specific types of jobs, such +as _single metric_, _multi-metric_, and _population_ jobs. A single metric job +is just a job with a single detector and limited job properties. To have access +to all of the job properties in {kib}, you must choose the _advanced_ job wizard. +If you want to try creating single and multi-metrics jobs in {kib} with sample +data, see <>. + +You can also optionally assign jobs to one or more _job groups_. You can use +job groups to view the results from multiple jobs more easily and to expedite +administrative tasks by opening or closing multiple jobs at once. diff --git a/x-pack/docs/en/ml/limitations.asciidoc b/x-pack/docs/en/ml/limitations.asciidoc new file mode 100644 index 0000000000000..1efe6b19027fe --- /dev/null +++ b/x-pack/docs/en/ml/limitations.asciidoc @@ -0,0 +1,198 @@ +[[ml-limitations]] +== Machine Learning Limitations + +The following limitations and known problems apply to the {version} release of +{xpack}: + +[float] +=== Categorization uses English dictionary words +//See x-pack-elasticsearch/#3021 +Categorization identifies static parts of unstructured logs and groups similar +messages together. The default categorization tokenizer assumes English language +log messages. For other languages you must define a different +`categorization_analyzer` for your job. For more information, see +<>. + +Additionally, a dictionary used to influence the categorization process contains +only English words. This means categorization might work better in English than +in other languages. The ability to customize the dictionary will be added in a +future release. + +[float] +=== Pop-ups must be enabled in browsers +//See x-pack-elasticsearch/#844 + +The {xpackml} features in {kib} use pop-ups. You must configure your +web browser so that it does not block pop-up windows or create an +exception for your {kib} URL. + +[float] +=== Anomaly Explorer omissions and limitations +//See x-pack-elasticsearch/#844 and x-pack-kibana/#1461 + +In {kib}, Anomaly Explorer charts are not displayed for anomalies +that were due to categorization, `time_of_day` functions, or `time_of_week` +functions. Those particular results do not display well as time series +charts. + +The charts are also not displayed for detectors that use script fields. In that +case, the original source data cannot be easily searched because it has been +somewhat transformed by the script. + +The Anomaly Explorer charts can also look odd in circumstances where there +is very little data to plot. For example, if there is only one data point, it is +represented as a single dot. If there are only two data points, they are joined +by a line. + +[float] +=== Jobs close on the {dfeed} end date +//See x-pack-elasticsearch/#1037 + +If you start a {dfeed} and specify an end date, it will close the job when +the {dfeed} stops. This behavior avoids having numerous open one-time jobs. + +If you do not specify an end date when you start a {dfeed}, the job +remains open when you stop the {dfeed}. This behavior avoids the overhead +of closing and re-opening large jobs when there are pauses in the {dfeed}. + +[float] +=== Jobs created in {kib} must use {dfeeds} + +If you create jobs in {kib}, you must use {dfeeds}. If the data that you want to +analyze is not stored in {es}, you cannot use {dfeeds} and therefore you cannot +create your jobs in {kib}. You can, however, use the {ml} APIs to create jobs +and to send batches of data directly to the jobs. For more information, see +<> and <>. + +[float] +=== Post data API requires JSON format + +The post data API enables you to send data to a job for analysis. The data that +you send to the job must use the JSON format. + +For more information about this API, see +{ref}/ml-post-data.html[Post Data to Jobs]. + + +[float] +=== Misleading high missing field counts +//See x-pack-elasticsearch/#684 + +One of the counts associated with a {ml} job is `missing_field_count`, +which indicates the number of records that are missing a configured field. +//This information is most useful when your job analyzes CSV data. In this case, +//missing fields indicate data is not being analyzed and you might receive poor results. + +Since jobs analyze JSON data, the `missing_field_count` might be misleading. +Missing fields might be expected due to the structure of the data and therefore +do not generate poor results. + +For more information about `missing_field_count`, +see {ref}/ml-jobstats.html#ml-datacounts[Data Counts Objects]. + + +[float] +=== Terms aggregation size affects data analysis +//See x-pack-elasticsearch/#601 + +By default, the `terms` aggregation returns the buckets for the top ten terms. +You can change this default behavior by setting the `size` parameter. + +If you are send pre-aggregated data to a job for analysis, you must ensure +that the `size` is configured correctly. Otherwise, some data might not be +analyzed. + + +[float] +=== Time-based index patterns are not supported +//See x-pack-elasticsearch/#1910 + +It is not possible to create an {xpackml} analysis job that uses time-based +index patterns, for example `[logstash-]YYYY.MM.DD`. +This applies to the single metric or multi metric job creation wizards in {kib}. + + +[float] +=== Fields named "by", "count", or "over" cannot be used to split data +//See x-pack-elasticsearch/#858 + +You cannot use the following field names in the `by_field_name` or +`over_field_name` properties in a job: `by`; `count`; `over`. This limitation +also applies to those properties when you create advanced jobs in {kib}. + + +[float] +=== Jobs created in {kib} use model plot config and pre-aggregated data +//See x-pack-elasticsearch/#844 + +If you create single or multi-metric jobs in {kib}, it might enable some +options under the covers that you'd want to reconsider for large or +long-running jobs. + +For example, when you create a single metric job in {kib}, it generally +enables the `model_plot_config` advanced configuration option. That configuration +option causes model information to be stored along with the results and provides +a more detailed view into anomaly detection. It is specifically used by the +**Single Metric Viewer** in {kib}. When this option is enabled, however, it can +add considerable overhead to the performance of the system. If you have jobs +with many entities, for example data from tens of thousands of servers, storing +this additional model information for every bucket might be problematic. If you +are not certain that you need this option or if you experience performance +issues, edit your job configuration to disable this option. + +For more information, see +{ref}/ml-job-resource.html#ml-apimodelplotconfig[Model Plot Config]. + +Likewise, when you create a single or multi-metric job in {kib}, in some cases +it uses aggregations on the data that it retrieves from {es}. One of the +benefits of summarizing data this way is that {es} automatically distributes +these calculations across your cluster. This summarized data is then fed into +{xpackml} instead of raw results, which reduces the volume of data that must +be considered while detecting anomalies. However, if you have two jobs, one of +which uses pre-aggregated data and another that does not, their results might +differ. This difference is due to the difference in precision of the input data. +The {ml} analytics are designed to be aggregation-aware and the likely increase +in performance that is gained by pre-aggregating the data makes the potentially +poorer precision worthwhile. If you want to view or change the aggregations +that are used in your job, refer to the `aggregations` property in your {dfeed}. + +For more information, see {ref}/ml-datafeed-resource.html[Datafeed Resources]. + +[float] +=== Security Integration + +When {security} is enabled, a {dfeed} stores the roles of the user who created +or updated the {dfeed} **at that time**. This means that if those roles are +updated then the {dfeed} subsequently runs with the new permissions that are +associated with the roles. However, if the user's roles are adjusted after +creating or updating the {dfeed}, the {dfeed} continues to run with the +permissions that were associated with the original roles. For more information, +see <>. + +[float] +=== Forecasts cannot be created for population jobs + +If you use an `over_field_name` property in your job (that is to say, it's a +_population job_), you cannot create a forecast. If you try to create a forecast +for this type of job, an error occurs. For more information about forecasts, +see <>. + +[float] +=== Forecasts cannot be created for jobs that use geographic, rare, or time functions + +If you use any of the following analytical functions in your job, you cannot +create a forecast: + +* `lat_long` +* `rare` and `freq_rare` +* `time_of_day` and `time_of_week` + +If you try to create a forecast for this type of job, an error occurs. For more +information about any of these functions, see <>. + +[float] +=== Jobs must be stopped before upgrades + +You must stop any {ml} jobs that are running before you start the upgrade +process. For more information, see <> and +{stack-ref}/upgrading-elastic-stack.html[Upgrading the Elastic Stack]. diff --git a/x-pack/docs/en/ml/overview.asciidoc b/x-pack/docs/en/ml/overview.asciidoc new file mode 100644 index 0000000000000..b82a281acb0d5 --- /dev/null +++ b/x-pack/docs/en/ml/overview.asciidoc @@ -0,0 +1,20 @@ +[[ml-overview]] +== Overview + +include::analyzing.asciidoc[] +include::forecasting.asciidoc[] +include::jobs.asciidoc[] +include::datafeeds.asciidoc[] +include::buckets.asciidoc[] +include::calendars.asciidoc[] + +[[ml-concepts]] +=== Basic Machine Learning Terms +++++ +Basic Terms +++++ + +There are a few concepts that are core to {ml} in {xpack}. Understanding these +concepts from the outset will tremendously help ease the learning process. + +include::architecture.asciidoc[] diff --git a/x-pack/docs/en/ml/populations.asciidoc b/x-pack/docs/en/ml/populations.asciidoc new file mode 100644 index 0000000000000..53e10ce8d41b6 --- /dev/null +++ b/x-pack/docs/en/ml/populations.asciidoc @@ -0,0 +1,88 @@ +[[ml-configuring-pop]] +=== Performing Population Analysis + +Entities or events in your data can be considered anomalous when: + +* Their behavior changes over time, relative to their own previous behavior, or +* Their behavior is different than other entities in a specified population. + +The latter method of detecting outliers is known as _population analysis_. The +{ml} analytics build a profile of what a "typical" user, machine, or other entity +does over a specified time period and then identify when one is behaving +abnormally compared to the population. + +This type of analysis is most useful when the behavior of the population as a +whole is mostly homogeneous and you want to identify outliers. In general, +population analysis is not useful when members of the population inherently +have vastly different behavior. You can, however, segment your data into groups +that behave similarly and run these as separate jobs. For example, you can use a +query filter in the {dfeed} to segment your data or you can use the +`partition_field_name` to split the analysis for the different groups. + +Population analysis scales well and has a lower resource footprint than +individual analysis of each series. For example, you can analyze populations +of hundreds of thousands or millions of entities. + +To specify the population, use the `over_field_name` property. For example: + +[source,js] +---------------------------------- +PUT _xpack/ml/anomaly_detectors/population +{ + "description" : "Population analysis", + "analysis_config" : { + "bucket_span":"10m", + "influencers": [ + "username" + ], + "detectors": [ + { + "function": "mean", + "field_name": "bytesSent", + "over_field_name": "username" <1> + } + ] + }, + "data_description" : { + "time_field":"@timestamp", + "time_format": "epoch_ms" + } +} +---------------------------------- +//CONSOLE +<1> This `over_field_name` property indicates that the metrics for each user ( + as identified by their `username` value) are analyzed relative to other users + in each bucket. + +//TO-DO: Per sophiec20 "Perhaps add the datafeed config and add a query filter to +//include only workstations as servers and printers would behave differently +//from the population + +If your data is stored in {es}, you can use the population job wizard in {kib} +to create a job with these same properties. For example, the population job +wizard provides the following job settings: + +[role="screenshot"] +image::images/ml-population-job.jpg["Job settings in the population job wizard] + +After you open the job and start the {dfeed} or supply data to the job, you can +view the results in {kib}. For example, you can view the results in the +**Anomaly Explorer**: + +[role="screenshot"] +image::images/ml-population-results.jpg["Population analysis results in the Anomaly Explorer"] + +As in this case, the results are often quite sparse. There might be just a few +data points for the selected time period. Population analysis is particularly +useful when you have many entities and the data for specific entitles is sporadic +or sparse. + +If you click on a section in the timeline or swimlanes, you can see more +details about the anomalies: + +[role="screenshot"] +image::images/ml-population-anomaly.jpg["Anomaly details for a specific user"] + +In this example, the user identified as `antonette` sent a high volume of bytes +on the date and time shown. This event is anomalous because the mean is two times +higher than the expected behavior of the population. diff --git a/x-pack/docs/en/ml/stopping-ml.asciidoc b/x-pack/docs/en/ml/stopping-ml.asciidoc new file mode 100644 index 0000000000000..862fe5cf05061 --- /dev/null +++ b/x-pack/docs/en/ml/stopping-ml.asciidoc @@ -0,0 +1,87 @@ +[[stopping-ml]] +== Stopping Machine Learning + +An orderly shutdown of {ml} ensures that: + +* {dfeeds-cap} are stopped +* Buffers are flushed +* Model history is pruned +* Final results are calculated +* Model snapshots are saved +* Jobs are closed + +This process ensures that jobs are in a consistent state in case you want to +subsequently re-open them. + +[float] +[[stopping-ml-datafeeds]] +=== Stopping {dfeeds-cap} + +When you stop a {dfeed}, it ceases to retrieve data from {es}. You can stop a +{dfeed} by using {kib} or the +{ref}/ml-stop-datafeed.html[stop {dfeeds} API]. For example, the following +request stops the `feed1` {dfeed}: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/datafeeds/feed1/_stop +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. +For more information, see <>. + +A {dfeed} can be started and stopped multiple times throughout its lifecycle. + +For examples of stopping {dfeeds} in {kib}, see <>. + +[float] +[[stopping-all-ml-datafeeds]] +==== Stopping All {dfeeds-cap} + +If you are upgrading your cluster, you can use the following request to stop all +{dfeeds}: + +[source,js] +---------------------------------- +POST _xpack/ml/datafeeds/_all/_stop +---------------------------------- +// CONSOLE + +[float] +[[closing-ml-jobs]] +=== Closing Jobs + +When you close a job, it cannot receive data or perform analysis operations. +If a job is associated with a {dfeed}, you must stop the {dfeed} before you can +close the jobs. If the {dfeed} has an end date, the job closes automatically on +that end date. + +You can close a job by using the {ref}/ml-close-job.html[close job API]. For +example, the following request closes the `job1` job: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/anomaly_detectors/job1/_close +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. +For more information, see <>. + +A job can be opened and closed multiple times throughout its lifecycle. + +[float] +[[closing-all-ml-datafeeds]] +==== Closing All Jobs + +If you are upgrading your cluster, you can use the following request to close +all open jobs on the cluster: + +[source,js] +---------------------------------- +POST _xpack/ml/anomaly_detectors/_all/_close +---------------------------------- +// CONSOLE diff --git a/x-pack/docs/en/ml/transforms.asciidoc b/x-pack/docs/en/ml/transforms.asciidoc new file mode 100644 index 0000000000000..9789518081be2 --- /dev/null +++ b/x-pack/docs/en/ml/transforms.asciidoc @@ -0,0 +1,611 @@ +[[ml-configuring-transform]] +=== Transforming Data With Script Fields + +If you use {dfeeds}, you can add scripts to transform your data before +it is analyzed. {dfeeds-cap} contain an optional `script_fields` property, where +you can specify scripts that evaluate custom expressions and return script +fields. + +If your {dfeed} defines script fields, you can use those fields in your job. +For example, you can use the script fields in the analysis functions in one or +more detectors. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +The following indices APIs create and add content to an index that is used in +subsequent examples: + +[source,js] +---------------------------------- +PUT /my_index +{ + "mappings":{ + "my_type":{ + "properties": { + "@timestamp": { + "type": "date" + }, + "aborted_count": { + "type": "long" + }, + "another_field": { + "type": "keyword" <1> + }, + "clientip": { + "type": "keyword" + }, + "coords": { + "properties": { + "lat": { + "type": "keyword" + }, + "lon": { + "type": "keyword" + } + } + }, + "error_count": { + "type": "long" + }, + "query": { + "type": "keyword" + }, + "some_field": { + "type": "keyword" + }, + "tokenstring1":{ + "type":"keyword" + }, + "tokenstring2":{ + "type":"keyword" + }, + "tokenstring3":{ + "type":"keyword" + } + } + } + } +} + +PUT /my_index/my_type/1 +{ + "@timestamp":"2017-03-23T13:00:00", + "error_count":36320, + "aborted_count":4156, + "some_field":"JOE", + "another_field":"SMITH ", + "tokenstring1":"foo-bar-baz", + "tokenstring2":"foo bar baz", + "tokenstring3":"foo-bar-19", + "query":"www.ml.elastic.co", + "clientip":"123.456.78.900", + "coords": { + "lat" : 41.44, + "lon":90.5 + } +} +---------------------------------- +// CONSOLE +// TESTSETUP +<1> In this example, string fields are mapped as `keyword` fields to support +aggregation. If you want both a full text (`text`) and a keyword (`keyword`) +version of the same field, use multi-fields. For more information, see +{ref}/multi-fields.html[fields]. + +[[ml-configuring-transform1]] +.Example 1: Adding two numerical fields +[source,js] +---------------------------------- +PUT _xpack/ml/anomaly_detectors/test1 +{ + "analysis_config":{ + "bucket_span": "10m", + "detectors":[ + { + "function":"mean", + "field_name": "total_error_count", <1> + "detector_description": "Custom script field transformation" + } + ] + }, + "data_description": { + "time_field":"@timestamp", + "time_format":"epoch_ms" + } +} + +PUT _xpack/ml/datafeeds/datafeed-test1 +{ + "job_id": "test1", + "indices": ["my_index"], + "types": ["my_type"], + "query": { + "match_all": { + "boost": 1 + } + }, + "script_fields": { + "total_error_count": { <2> + "script": { + "lang": "expression", + "inline": "doc['error_count'].value + doc['aborted_count'].value" + } + } + } +} +---------------------------------- +// CONSOLE +// TEST[skip:broken] +<1> A script field named `total_error_count` is referenced in the detector +within the job. +<2> The script field is defined in the {dfeed}. + +This `test1` job contains a detector that uses a script field in a mean analysis +function. The `datafeed-test1` {dfeed} defines the script field. It contains a +script that adds two fields in the document to produce a "total" error count. + +The syntax for the `script_fields` property is identical to that used by {es}. +For more information, see {ref}/search-request-script-fields.html[Script Fields]. + +You can preview the contents of the {dfeed} by using the following API: + +[source,js] +---------------------------------- +GET _xpack/ml/datafeeds/datafeed-test1/_preview +---------------------------------- +// CONSOLE +// TEST[continued] + +In this example, the API returns the following results, which contain a sum of +the `error_count` and `aborted_count` values: + +[source,js] +---------------------------------- +[ + { + "@timestamp": 1490274000000, + "total_error_count": 40476 + } +] +---------------------------------- +// TESTRESPONSE + + +NOTE: This example demonstrates how to use script fields, but it contains +insufficient data to generate meaningful results. For a full demonstration of +how to create jobs with sample data, see <>. + +You can alternatively use {kib} to create an advanced job that uses script +fields. To add the `script_fields` property to your {dfeed}, you must use the +**Edit JSON** tab. For example: + +[role="screenshot"] +image::images/ml-scriptfields.jpg[Adding script fields to a {dfeed} in {kib}] + +[[ml-configuring-transform-examples]] +==== Common Script Field Examples + +While the possibilities are limitless, there are a number of common scenarios +where you might use script fields in your {dfeeds}. + +[NOTE] +=============================== +Some of these examples use regular expressions. By default, regular +expressions are disabled because they circumvent the protection that Painless +provides against long running and memory hungry scripts. For more information, +see {ref}/modules-scripting-painless.html[Painless Scripting Language]. + +Machine learning analysis is case sensitive. For example, "John" is considered +to be different than "john". This is one reason you might consider using scripts +that convert your strings to upper or lowercase letters. +=============================== + +[[ml-configuring-transform2]] +.Example 2: Concatenating strings +[source,js] +-------------------------------------------------- +PUT _xpack/ml/anomaly_detectors/test2 +{ + "analysis_config":{ + "bucket_span": "10m", + "detectors":[ + { + "function":"low_info_content", + "field_name":"my_script_field", <1> + "detector_description": "Custom script field transformation" + } + ] + }, + "data_description": { + "time_field":"@timestamp", + "time_format":"epoch_ms" + } +} + +PUT _xpack/ml/datafeeds/datafeed-test2 +{ + "job_id": "test2", + "indices": ["my_index"], + "types": ["my_type"], + "query": { + "match_all": { + "boost": 1 + } + }, + "script_fields": { + "my_script_field": { + "script": { + "lang": "painless", + "inline": "doc['some_field'].value + '_' + doc['another_field'].value" <2> + } + } + } +} + +GET _xpack/ml/datafeeds/datafeed-test2/_preview +-------------------------------------------------- +// CONSOLE +// TEST[skip:broken] +<1> The script field has a rather generic name in this case, since it will +be used for various tests in the subsequent examples. +<2> The script field uses the plus (+) operator to concatenate strings. + +The preview {dfeed} API returns the following results, which show that "JOE" +and "SMITH " have been concatenated and an underscore was added: + +[source,js] +---------------------------------- +[ + { + "@timestamp": 1490274000000, + "my_script_field": "JOE_SMITH " + } +] +---------------------------------- +// TESTRESPONSE + +[[ml-configuring-transform3]] +.Example 3: Trimming strings +[source,js] +-------------------------------------------------- +POST _xpack/ml/datafeeds/datafeed-test2/_update +{ + "script_fields": { + "my_script_field": { + "script": { + "lang": "painless", + "inline": "doc['another_field'].value.trim()" <1> + } + } + } +} + +GET _xpack/ml/datafeeds/datafeed-test2/_preview +-------------------------------------------------- +// CONSOLE +// TEST[continued] +<1> This script field uses the `trim()` function to trim extra white space from a +string. + +The preview {dfeed} API returns the following results, which show that "SMITH " +has been trimmed to "SMITH": + +[source,js] +---------------------------------- +[ + { + "@timestamp": 1490274000000, + "my_script_field": "SMITH" + } +] +---------------------------------- +// TESTRESPONSE + +[[ml-configuring-transform4]] +.Example 4: Converting strings to lowercase +[source,js] +-------------------------------------------------- +POST _xpack/ml/datafeeds/datafeed-test2/_update +{ + "script_fields": { + "my_script_field": { + "script": { + "lang": "painless", + "inline": "doc['some_field'].value.toLowerCase()" <1> + } + } + } +} + +GET _xpack/ml/datafeeds/datafeed-test2/_preview +-------------------------------------------------- +// CONSOLE +// TEST[continued] +<1> This script field uses the `toLowerCase` function to convert a string to all +lowercase letters. Likewise, you can use the `toUpperCase{}` function to convert +a string to uppercase letters. + +The preview {dfeed} API returns the following results, which show that "JOE" +has been converted to "joe": + +[source,js] +---------------------------------- +[ + { + "@timestamp": 1490274000000, + "my_script_field": "joe" + } +] +---------------------------------- +// TESTRESPONSE + +[[ml-configuring-transform5]] +.Example 5: Converting strings to mixed case formats +[source,js] +-------------------------------------------------- +POST _xpack/ml/datafeeds/datafeed-test2/_update +{ + "script_fields": { + "my_script_field": { + "script": { + "lang": "painless", + "inline": "doc['some_field'].value.substring(0, 1).toUpperCase() + doc['some_field'].value.substring(1).toLowerCase()" <1> + } + } + } +} + +GET _xpack/ml/datafeeds/datafeed-test2/_preview +-------------------------------------------------- +// CONSOLE +// TEST[continued] +<1> This script field is a more complicated example of case manipulation. It uses +the `subString()` function to capitalize the first letter of a string and +converts the remaining characters to lowercase. + +The preview {dfeed} API returns the following results, which show that "JOE" +has been converted to "Joe": + +[source,js] +---------------------------------- +[ + { + "@timestamp": 1490274000000, + "my_script_field": "Joe" + } +] +---------------------------------- +// TESTRESPONSE + +[[ml-configuring-transform6]] +.Example 6: Replacing tokens +[source,js] +-------------------------------------------------- +POST _xpack/ml/datafeeds/datafeed-test2/_update +{ + "script_fields": { + "my_script_field": { + "script": { + "lang": "painless", + "inline": "/\\s/.matcher(doc['tokenstring2'].value).replaceAll('_')" <1> + } + } + } +} + +GET _xpack/ml/datafeeds/datafeed-test2/_preview +-------------------------------------------------- +// CONSOLE +// TEST[continued] +<1> This script field uses regular expressions to replace white +space with underscores. + +The preview {dfeed} API returns the following results, which show that +"foo bar baz" has been converted to "foo_bar_baz": + +[source,js] +---------------------------------- +[ + { + "@timestamp": 1490274000000, + "my_script_field": "foo_bar_baz" + } +] +---------------------------------- +// TESTRESPONSE + +[[ml-configuring-transform7]] +.Example 7: Regular expression matching and concatenation +[source,js] +-------------------------------------------------- +POST _xpack/ml/datafeeds/datafeed-test2/_update +{ + "script_fields": { + "my_script_field": { + "script": { + "lang": "painless", + "inline": "def m = /(.*)-bar-([0-9][0-9])/.matcher(doc['tokenstring3'].value); return m.find() ? m.group(1) + '_' + m.group(2) : '';" <1> + } + } + } +} + +GET _xpack/ml/datafeeds/datafeed-test2/_preview +-------------------------------------------------- +// CONSOLE +// TEST[continued] +<1> This script field looks for a specific regular expression pattern and emits the +matched groups as a concatenated string. If no match is found, it emits an empty +string. + +The preview {dfeed} API returns the following results, which show that +"foo-bar-19" has been converted to "foo_19": + +[source,js] +---------------------------------- +[ + { + "@timestamp": 1490274000000, + "my_script_field": "foo_19" + } +] +---------------------------------- +// TESTRESPONSE + +[[ml-configuring-transform8]] +.Example 8: Splitting strings by domain name +[source,js] +-------------------------------------------------- +PUT _xpack/ml/anomaly_detectors/test3 +{ + "description":"DNS tunneling", + "analysis_config":{ + "bucket_span": "30m", + "influencers": ["clientip","hrd"], + "detectors":[ + { + "function":"high_info_content", + "field_name": "sub", + "over_field_name": "hrd", + "exclude_frequent":"all" + } + ] + }, + "data_description": { + "time_field":"@timestamp", + "time_format":"epoch_ms" + } +} + +PUT _xpack/ml/datafeeds/datafeed-test3 +{ + "job_id": "test3", + "indices": ["my_index"], + "types": ["my_type"], + "query": { + "match_all": { + "boost": 1 + } + }, + "script_fields":{ + "sub":{ + "script":"return domainSplit(doc['query'].value, params).get(0);" + }, + "hrd":{ + "script":"return domainSplit(doc['query'].value, params).get(1);" + } + } +} + +GET _xpack/ml/datafeeds/datafeed-test3/_preview +-------------------------------------------------- +// CONSOLE +// TEST[skip:broken] + +If you have a single field that contains a well-formed DNS domain name, you can +use the `domainSplit()` function to split the string into its highest registered +domain and the sub-domain, which is everything to the left of the highest +registered domain. For example, the highest registered domain of +`www.ml.elastic.co` is `elastic.co` and the sub-domain is `www.ml`. The +`domainSplit()` function returns an array of two values: the first value is the +subdomain; the second value is the highest registered domain. + +NOTE: The `domainSplit()` function takes two arguments. The first argument is +the string you want to split. The second argument is always `params`. This is a +technical implementation detail related to how Painless operates internally. + +The preview {dfeed} API returns the following results, which show that +"www.ml.elastic.co" has been split into "elastic.co" and "www.ml": + +[source,js] +---------------------------------- +[ + { + "@timestamp": 1490274000000, + "clientip.keyword": "123.456.78.900", + "hrd": "elastic.co", + "sub": "www.ml" + } +] +---------------------------------- +// TESTRESPONSE + +[[ml-configuring-transform9]] +.Example 9: Transforming geo_point data +[source,js] +-------------------------------------------------- +PUT _xpack/ml/anomaly_detectors/test4 +{ + "analysis_config":{ + "bucket_span": "10m", + "detectors":[ + { + "function":"lat_long", + "field_name": "my_coordinates" + } + ] + }, + "data_description": { + "time_field":"@timestamp", + "time_format":"epoch_ms" + } +} + +PUT _xpack/ml/datafeeds/datafeed-test4 +{ + "job_id": "test4", + "indices": ["my_index"], + "types": ["my_type"], + "query": { + "match_all": { + "boost": 1 + } + }, + "script_fields": { + "my_coordinates": { + "script": { + "inline": "doc['coords.lat'].value + ',' + doc['coords.lon'].value", + "lang": "painless" + } + } + } +} + +GET _xpack/ml/datafeeds/datafeed-test4/_preview +-------------------------------------------------- +// CONSOLE +// TEST[skip:broken] + +In {es}, location data can be stored in `geo_point` fields but this data type is +not supported natively in {xpackml} analytics. This example of a script field +transforms the data into an appropriate format. For more information, +see <>. + +The preview {dfeed} API returns the following results, which show that +`41.44` and `90.5` have been combined into "41.44,90.5": + +[source,js] +---------------------------------- +[ + { + "@timestamp": 1490274000000, + "my_coordinates": "41.44,90.5" + } +] +---------------------------------- +// TESTRESPONSE + +//// +==== Configuring Script Fields in {dfeeds-cap} + +//TO-DO: Add Kibana steps from +//https://github.com/elastic/prelert-legacy/wiki/Transforming-data-with-script_fields#transforming-geo_point-data-to-a-workable-string-format +//// diff --git a/x-pack/docs/en/ml/troubleshooting.asciidoc b/x-pack/docs/en/ml/troubleshooting.asciidoc new file mode 100644 index 0000000000000..3412845e98077 --- /dev/null +++ b/x-pack/docs/en/ml/troubleshooting.asciidoc @@ -0,0 +1,116 @@ +[[ml-troubleshooting]] +== {xpackml} Troubleshooting +++++ +{xpackml} +++++ + +Use the information in this section to troubleshoot common problems and find +answers for frequently asked questions. + +* <> +* <> + +To get help, see <>. + +[[ml-rollingupgrade]] +=== Machine learning features unavailable after rolling upgrade + +This problem occurs after you upgrade all of the nodes in your cluster to +{version} by using rolling upgrades. When you try to use {xpackml} features for +the first time, all attempts fail, though `GET _xpack` and `GET _xpack/usage` +indicate that {xpack} is enabled. + +*Symptoms:* + +* Errors when you click *Machine Learning* in {kib}. +For example: `Jobs list could not be created` and `An internal server error occurred`. +* Null pointer and remote transport exceptions when you run {ml} APIs such as +`GET _xpack/ml/anomaly_detectors` and `GET _xpack/ml/datafeeds`. +* Errors in the log files on the master nodes. +For example: `unable to install ml metadata upon startup` + +*Resolution:* + +After you upgrade all master-eligible nodes to {es} {version} and {xpack} +{version}, restart the current master node, which triggers the {xpackml} +features to re-initialize. + +For more information, see {ref}/rolling-upgrades.html[Rolling upgrades]. + +[[ml-mappingclash]] +=== Job creation failure due to mapping clash + +This problem occurs when you try to create a job. + +*Symptoms:* + +* Illegal argument exception occurs when you click *Create Job* in {kib} or run +the create job API. For example: +`Save failed: [status_exception] This job would cause a mapping clash +with existing field [field_name] - avoid the clash by assigning a dedicated +results index` or `Save failed: [illegal_argument_exception] Can't merge a non +object mapping [field_name] with an object mapping [field_name]`. + +*Resolution:* + +This issue typically occurs when two or more jobs store their results in the +same index and the results contain fields with the same name but different +data types or different `fields` settings. + +By default, {ml} results are stored in the `.ml-anomalies-shared` index in {es}. +To resolve this issue, click *Advanced > Use dedicated index* when you create +the job in {kib}. If you are using the create job API, specify an index name in +the `results_index_name` property. + +[[ml-jobnames]] +=== {kib} cannot display jobs with invalid characters in their name + +This problem occurs when you create a job by using the +{ref}/ml-put-job.html[Create Jobs API] then try to view that job in {kib}. In +particular, the problem occurs when you use a period(.) in the job identifier. + +*Symptoms:* + +* When you try to open a job (named, for example, `job.test` in the +**Anomaly Explorer** or the **Single Metric Viewer**, the job name is split and +the text after the period is assumed to be the job name. If a job does not exist +with that abbreviated name, an error occurs. For example: +`Warning Requested job test does not exist`. If a job exists with that +abbreviated name, it is displayed. + +*Resolution:* + +Create jobs in {kib} or ensure that you create jobs with valid identifiers when +you use the {ml} APIs. For more information about valid identifiers, see +{ref}/ml-put-job.html[Create Jobs API] or +{ref}/ml-job-resource.html[Job Resources]. + +[[ml-upgradedf]] + +=== Upgraded nodes fail to start due to {dfeed} issues + +This problem occurs when you have a {dfeed} that contains search or query +domain specific language (DSL) that was discontinued. For example, if you +created a {dfeed} query in 5.x using search syntax that was deprecated in 5.x +and removed in 6.0, you must fix the {dfeed} before you upgrade to 6.0. + +*Symptoms:* + +* If {ref}/logging.html#deprecation-logging[deprecation logging] is enabled +before the upgrade, deprecation messages are generated when the {dfeeds} attempt +to retrieve data. +* After the upgrade, nodes fail to start and the error indicates that they +failed to read the local state. + +*Resolution:* + +Before you upgrade, identify the problematic search or query DSL. In 5.6.5 and +later, the Upgrade Assistant detects these scenarios. If you cannot fix the DSL +before the upgrade, you must delete the {dfeed} then re-create it with valid DSL +after the upgrade. + +If you do not fix or delete the {dfeed} before the upgrade, in order to successfully +start the failing nodes you must downgrade the nodes then fix the problem per +above. + +See also {stack-ref}/upgrading-elastic-stack.html[Upgrading the Elastic Stack]. diff --git a/x-pack/docs/en/monitoring/collectors.asciidoc b/x-pack/docs/en/monitoring/collectors.asciidoc new file mode 100644 index 0000000000000..336f204b5eefb --- /dev/null +++ b/x-pack/docs/en/monitoring/collectors.asciidoc @@ -0,0 +1,149 @@ +[role="xpack"] +[[es-monitoring-collectors]] +== Collectors + +Collectors, as their name implies, collect things. Each collector runs once for +each collection interval to obtain data from the public APIs in {es} and {xpack} +that it chooses to monitor. When the data collection is finished, the data is +handed in bulk to the <> to be sent to the +monitoring clusters. Regardless of the number of exporters, each collector only +runs once per collection interval. + +There is only one collector per data type gathered. In other words, for any +monitoring document that is created, it comes from a single collector rather +than being merged from multiple collectors. {monitoring} for {es} currently has +a few collectors because the goal is to minimize overlap between them for +optimal performance. + +Each collector can create zero or more monitoring documents. For example, +the `index_stats` collector collects all index statistics at the same time to +avoid many unnecessary calls. + +[options="header"] +|======================= +| Collector | Data Types | Description +| Cluster Stats | `cluster_stats` +| Gathers details about the cluster state, including parts of +the actual cluster state (for example `GET /_cluster/state`) and statistics +about it (for example, `GET /_cluster/stats`). This produces a single document +type. In versions prior to X-Pack 5.5, this was actually three separate collectors +that resulted in three separate types: `cluster_stats`, `cluster_state`, and +`cluster_info`. In 5.5 and later, all three are combined into `cluster_stats`. ++ +This only runs on the _elected_ master node and the data collected +(`cluster_stats`) largely controls the UI. When this data is not present, it +indicates either a misconfiguration on the elected master node, timeouts related +to the collection of the data, or issues with storing the data. Only a single +document is produced per collection. +| Index Stats | `indices_stats`, `index_stats` +| Gathers details about the indices in the cluster, both in summary and +individually. This creates many documents that represent parts of the index +statistics output (for example, `GET /_stats`). ++ +This information only needs to be collected once, so it is collected on the +_elected_ master node. The most common failure for this collector relates to an +extreme number of indices -- and therefore time to gather them -- resulting in +timeouts. One summary `indices_stats` document is produced per collection and one +`index_stats` document is produced per index, per collection. +| Index Recovery | `index_recovery` +| Gathers details about index recovery in the cluster. Index recovery represents +the assignment of _shards_ at the cluster level. If an index is not recovered, +it is not usable. This also corresponds to shard restoration via snapshots. ++ +This information only needs to be collected once, so it is collected on the +_elected_ master node. The most common failure for this collector relates to an +extreme number of shards -- and therefore time to gather them -- resulting in +timeouts. This creates a single document that contains all recoveries by default, +which can be quite large, but it gives the most accurate picture of recovery in +the production cluster. +| Shards | `shards` +| Gathers details about all _allocated_ shards for all indices, particularly +including what node the shard is allocated to. ++ +This information only needs to be collected once, so it is collected on the +_elected_ master node. The collector uses the local cluster state to get the +routing table without any network timeout issues unlike most other collectors. +Each shard is represented by a separate monitoring document. +| Jobs | `job_stats` +| Gathers details about all machine learning job statistics (for example, +`GET /_xpack/ml/anomaly_detectors/_stats`). ++ +This information only needs to be collected once, so it is collected on the +_elected_ master node. However, for the master node to be able to perform the +collection, the master node must have `xpack.ml.enabled` set to true (default) +and a license level that supports {ml}. +| Node Stats | `node_stats` +| Gathers details about the running node, such as memory utilization and CPU +usage (for example, `GET /_nodes/_local/stats`). ++ +This runs on _every_ node with {monitoring} enabled. One common failure +results in the timeout of the node stats request due to too many segment files. +As a result, the collector spends too much time waiting for the file system +stats to be calculated until it finally times out. A single `node_stats` +document is created per collection. This is collected per node to help to +discover issues with nodes communicating with each other, but not with the +monitoring cluster (for example, intermittent network issues or memory pressure). +|======================= + +{monitoring} uses a single threaded scheduler to run the collection of {es} +monitoring data by all of the appropriate collectors on each node. This +scheduler is managed locally by each node and its interval is controlled by +specifying the `xpack.monitoring.collection.interval`, which defaults to 10 +seconds (`10s`), at either the node or cluster level. + +Fundamentally, each collector works on the same principle. Per collection +interval, each collector is checked to see whether it should run and then the +appropriate collectors run. The failure of an individual collector does not +impact any other collector. + +Once collection has completed, all of the monitoring data is passed to the +exporters to route the monitoring data to the monitoring clusters. + +If gaps exist in the monitoring charts in {kib}, it is typically because either +a collector failed or the monitoring cluster did not receive the data (for +example, it was being restarted). In the event that a collector fails, a logged +error should exist on the node that attempted to perform the collection. + +NOTE: Collection is currently done serially, rather than in parallel, to avoid + extra overhead on the elected master node. The downside to this approach + is that collectors might observe a different version of the cluster state + within the same collection period. In practice, this does not make a + significant difference and running the collectors in parallel would not + prevent such a possibility. + +For more information about the configuration options for the collectors, see +<>. + +[float] +[[es-monitoring-stack]] +=== Collecting data from across the Elastic Stack + +{monitoring} in {es} also receives monitoring data from other parts of the +Elastic Stack. In this way, it serves as an unscheduled monitoring data +collector for the stack. + +By default, data collection is disabled. {es} monitoring data is not +collected and all monitoring data from other sources such as {kib}, Beats, and +Logstash is ignored. You must set `xpack.monitoring.collection.enabled` to `true` +to enable the collection of monitoring data. See <>. + +Once data is received, it is forwarded to the exporters +to be routed to the monitoring cluster like all monitoring data. + +WARNING: Because this stack-level "collector" lives outside of the collection +interval of {monitoring} for {es}, it is not impacted by the +`xpack.monitoring.collection.interval` setting. Therefore, data is passed to the +exporters whenever it is received. This behavior can result in indices for {kib}, +Logstash, or Beats being created somewhat unexpectedly. + +While the monitoring data is collected and processed, some production cluster +metadata is added to incoming documents. This metadata enables {kib} to link the +monitoring data to the appropriate cluster. If this linkage is unimportant to +the infrastructure that you're monitoring, it might be simpler to configure +Logstash and Beats to report monitoring data directly to the monitoring cluster. +This scenario also prevents the production cluster from adding extra overhead +related to monitoring data, which can be very useful when there are a large +number of Logstash nodes or Beats. + +For more information about typical monitoring architectures, see +{xpack-ref}/how-monitoring-works.html[How Monitoring Works]. diff --git a/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc b/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc new file mode 100644 index 0000000000000..99c69eeea8aec --- /dev/null +++ b/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc @@ -0,0 +1,145 @@ +[role="xpack"] +[[configuring-monitoring]] +== Configuring Monitoring in {es} +++++ +Configuring Monitoring +++++ + +By default, {monitoring} is enabled but data collection is disabled. Advanced +monitoring settings enable you to control how frequently data is collected, +configure timeouts, and set the retention period for locally-stored monitoring +indices. You can also adjust how monitoring data is displayed. + +. To collect monitoring data about your {es} cluster: + +.. Verify that the `xpack.monitoring.enabled` and +`xpack.monitoring.collection.enabled` settings are `true` on each node in the +cluster. By default, data collection is disabled. For more information, see +<>. + +.. Optional: Specify which indices you want to monitor. ++ +-- +By default, the monitoring agent collects data from all {es} indices. +To collect data from particular indices, configure the +`xpack.monitoring.collection.indices` setting. You can specify multiple indices +as a comma-separated list or use an index pattern to match multiple indices. For +example: + +[source,yaml] +---------------------------------- +xpack.monitoring.collection.indices: logstash-*, index1, test2 +---------------------------------- + +You can prepend `+` or `-` to explicitly include or exclude index names or +patterns. For example, to include all indices that start with `test` except +`test3`, you could specify `+test*,-test3`. +-- + +.. Optional: Specify how often to collect monitoring data. The default value for +the `xpack.monitoring.collection.interval` setting 10 seconds. See +<>. + +. Optional: Configure your cluster to route monitoring data from sources such +as {kib}, Beats, and Logstash to a monitoring cluster: + +.. Verify that `xpack.monitoring.collection.enabled` settings are `true` on each +node in the cluster. + +.. {xpack-ref}/xpack-monitoring.html[Configure {monitoring} across the Elastic Stack]. + +. Identify where to store monitoring data. ++ +-- +By default, {monitoring} uses a `local` exporter that indexes monitoring data +on the same cluster. +//See <> and <>. + +Alternatively, you can use an `http` exporter to send data to a separate +monitoring cluster. +//See <>. + +For more information about typical monitoring architectures, +see {xpack-ref}/how-monitoring-works.html[How Monitoring Works]. +-- + +. If {security} is enabled and you are using an `http` exporter to send data to + a dedicated monitoring cluster: + +.. Create a user on the monitoring cluster that has the +{xpack-ref}/built-in-roles.html#built-in-roles-remote-monitoring-agent[`remote_monitoring_agent` built-in role]. For example, the following request +creates a `remote_monitor` user that has the `remote_monitoring_agent` role: ++ +-- +[source, sh] +--------------------------------------------------------------- +POST /_xpack/security/user/remote_monitor +{ + "password" : "changeme", + "roles" : [ "remote_monitoring_agent"], + "full_name" : "Internal Agent For Remote Monitoring" +} +--------------------------------------------------------------- +// CONSOLE +-- + +.. On each node in the cluster that is being monitored, configure the `http` +exporter to use the appropriate credentials when data is shipped to the monitoring cluster. ++ +-- +If SSL/TLS is enabled on the monitoring cluster, you must use the HTTPS protocol in the `host` setting. You must also include the CA certificate in each node's trusted certificates in order to verify the identities of the nodes in the monitoring cluster. + +The following example specifies the location of the PEM encoded certificate with the `certificate_authorities` setting: + +[source,yaml] +-------------------------------------------------- +xpack.monitoring.exporters: + id1: + type: http + host: ["https://es-mon1:9200", "https://es-mon2:9200"] + auth: + username: remote_monitor <1> + password: changeme + ssl: + certificate_authorities: [ "/path/to/ca.crt" ] + id2: + type: local +-------------------------------------------------- +<1> The `username` and `password` parameters provide the user credentials. + +Alternatively, you can configure trusted certificates using a truststore +(a Java Keystore file that contains the certificates): + +[source,yaml] +-------------------------------------------------- +xpack.monitoring.exporters: + id1: + type: http + host: ["https://es-mon1:9200", "https://es-mon2:9200"] + auth: + username: remote_monitor + password: changeme + ssl: + truststore.path: /path/to/file + truststore.password: password + id2: + type: local +-------------------------------------------------- +-- + +. If {security} is enabled and you want to visualize monitoring data in {kib}, +you must create users that have access to the {kib} indices and permission to +read from the monitoring indices. ++ +-- +You set up {monitoring} UI users on the cluster where the monitoring data is +stored, that is to say the monitoring cluster. To grant all of the necessary permissions, assign users the +`monitoring_user` and `kibana_user` roles. For more information, see +{xpack-ref}/mapping-roles.html[Mapping users and groups to roles]. +-- + +. Optional: +<>. + +include::indices.asciidoc[] +include::{xes-repo-dir}/settings/monitoring-settings.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/monitoring/exporters.asciidoc b/x-pack/docs/en/monitoring/exporters.asciidoc new file mode 100644 index 0000000000000..e7727f1e97af0 --- /dev/null +++ b/x-pack/docs/en/monitoring/exporters.asciidoc @@ -0,0 +1,171 @@ +[role="xpack"] +[[es-monitoring-exporters]] +== Exporters + +The purpose of exporters is to take data collected from any Elastic Stack +source and route it to the monitoring cluster. It is possible to configure +more than one exporter, but the general and default setup is to use a single +exporter. + +There are two types of exporters in {es}: + +`local`:: +The default exporter used by {monitoring} for {es}. This exporter routes data +back into the _same_ cluster. See <>. + +`http`:: +The preferred exporter, which you can use to route data into any supported +{es} cluster accessible via HTTP. Production environments should always use a +separate monitoring cluster. See <>. + +Both exporters serve the same purpose: to set up the monitoring cluster and route +monitoring data. However, they perform these tasks in very different ways. Even +though things happen differently, both exporters are capable of sending all of +the same data. + +Exporters are configurable at both the node and cluster level. Cluster-wide +settings, which are updated with the +<>, take precedence over +settings in the `elasticsearch.yml` file on each node. When you update an +exporter, it is completely replaced by the updated version of the exporter. + +IMPORTANT: It is critical that all nodes share the same setup. Otherwise, +monitoring data might be routed in different ways or to different places. + +When the exporters route monitoring data into the monitoring cluster, they use +`_bulk` indexing for optimal performance. All monitoring data is forwarded in +bulk to all enabled exporters on the same node. From there, the exporters +serialize the monitoring data and send a bulk request to the monitoring cluster. +There is no queuing--in memory or persisted to disk--so any failure during the +export results in the loss of that batch of monitoring data. This design limits +the impact on {es} and the assumption is that the next pass will succeed. + +Routing monitoring data involves indexing it into the appropriate monitoring +indices. Once the data is indexed, it exists in a monitoring index that, by +default, is named with a daily index pattern. For {es} monitoring data, this is +an index that matches `.monitoring-es-6-*`. From there, the data lives inside +the monitoring cluster and must be curated or cleaned up as necessary. If you do +not curate the monitoring data, it eventually fills up the nodes and the cluster +might fail due to lack of disk space. + +TIP: You are strongly recommended to manage the curation of indices and +particularly the monitoring indices. To do so, you can take advantage of the +<> or +{curator-ref-current}/index.html[Elastic Curator]. + +//TO-DO: Add information about index lifecycle management https://github.com/elastic/x-pack-elasticsearch/issues/2814 + +When using cluster alerts, {watcher} creates daily `.watcher_history*` indices. +These are not managed by {monitoring} and they are not curated automatically. It +is therefore critical that you curate these indices to avoid an undesirable and +unexpected increase in the number of shards and indices and eventually the +amount of disk usage. If you are using a `local` exporter, you can set the +`xpack.watcher.history.cleaner_service.enabled` setting to `true` and curate the +`.watcher_history*` indices by using the +<>. See <>. + +There is also a disk watermark (known as the flood stage +watermark), which protects clusters from running out of disk space. When this +feature is triggered, it makes all indices (including monitoring indices) +read-only until the issue is fixed and a user manually makes the index writeable +again. While an active monitoring index is read-only, it will naturally fail to +write (index) new data and will continuously log errors that indicate the write +failure. For more information, see +{ref}/disk-allocator.html[Disk-based Shard Allocation]. + +[float] +[[es-monitoring-default-exporter]] +=== Default exporters + +If a node or cluster does not explicitly define an {monitoring} exporter, the +following default exporter is used: + +[source,yaml] +--------------------------------------------------- +xpack.monitoring.exporters.default_local: <1> + type: local +--------------------------------------------------- +<1> The exporter name uniquely defines the exporter, but it is otherwise unused. + When you specify your own exporters, you do not need to explicitly overwrite + or reference `default_local`. + +If another exporter is already defined, the default exporter is _not_ created. +When you define a new exporter, if the default exporter exists, it is +automatically removed. + +[float] +[[es-monitoring-templates]] +=== Exporter templates and ingest pipelines + +Before exporters can route monitoring data, they must set up certain {es} +resources. These resources include templates and ingest pipelines. The +following table lists the templates that are required before an exporter can +route monitoring data: + +[options="header"] +|======================= +| Template | Purpose +| `.monitoring-alerts` | All cluster alerts for monitoring data. +| `.monitoring-beats` | All Beats monitoring data. +| `.monitoring-es` | All {es} monitoring data. +| `.monitoring-kibana` | All {kib} monitoring data. +| `.monitoring-logstash` | All Logstash monitoring data. +|======================= + +The templates are ordinary {es} templates that control the default settings and +mappings for the monitoring indices. + +By default, monitoring indices are created daily (for example, +`.monitoring-es-6-2017.08.26`). You can change the default date suffix for +monitoring indices with the `index.name.time_format` setting. You can use this +setting to control how frequently monitoring indices are created by a specific +`http` exporter. You cannot use this setting with `local` exporters. For more +information, see <>. + +WARNING: Some users create their own templates that match _all_ index patterns, +which therefore impact the monitoring indices that get created. It is critical +that you do not disable `_source` storage for the monitoring indices. If you do, +{monitoring} for {kib} does not work and you cannot visualize monitoring data +for your cluster. + +The following table lists the ingest pipelines that are required before an +exporter can route monitoring data: + +[options="header"] +|======================= +| Pipeline | Purpose +| `xpack_monitoring_2` | Upgrades X-Pack monitoring data coming from X-Pack +5.0 - 5.4 to be compatible with the format used in {monitoring} 5.5. +| `xpack_monitoring_6` | A placeholder pipeline that is empty. +|======================= + +Exporters handle the setup of these resources before ever sending data. If +resource setup fails (for example, due to security permissions), no data is sent +and warnings are logged. + +NOTE: Empty pipelines are evaluated on the coordinating node during indexing and +they are ignored without any extra effort. This inherently makes them a safe, +no-op operation. + +For monitoring clusters that have disabled `node.ingest` on all nodes, it is +possible to disable the use of the ingest pipeline feature. However, doing so +blocks its purpose, which is to upgrade older monitoring data as our mappings +improve over time. Beginning in 6.0, the ingest pipeline feature is a +requirement on the monitoring cluster; you must have `node.ingest` enabled on at +least one node. + +WARNING: Once any node running 5.5 or later has set up the templates and ingest +pipeline on a monitoring cluster, you must use {kib} 5.5 or later to view all +subsequent data on the monitoring cluster. The easiest way to determine +whether this update has occurred is by checking for the presence of indices +matching `.monitoring-es-6-*` (or more concretely the existence of the +new pipeline). Versions prior to 5.5 used `.monitoring-es-2-*`. + +Each resource that is created by an {monitoring} exporter has a `version` field, +which is used to determine whether the resource should be replaced. The `version` +field value represents the latest version of {monitoring} that changed the +resource. If a resource is edited by someone or something external to +{monitoring}, those changes are lost the next time an automatic update occurs. + +include::local-export.asciidoc[] +include::http-export.asciidoc[] diff --git a/x-pack/docs/en/monitoring/http-export.asciidoc b/x-pack/docs/en/monitoring/http-export.asciidoc new file mode 100644 index 0000000000000..db1dbe2a29c5b --- /dev/null +++ b/x-pack/docs/en/monitoring/http-export.asciidoc @@ -0,0 +1,116 @@ +[role="xpack"] +[[http-exporter]] +=== HTTP Exporters + +The `http` exporter is the preferred exporter in {monitoring} because it enables +the use of a separate monitoring cluster. As a secondary benefit, it avoids +using a production cluster node as a coordinating node for indexing monitoring +data because all requests are HTTP requests to the monitoring cluster. + +The `http` exporter uses the low-level {es} REST Client, which enables it to +send its data to any {es} cluster it can access through the network. Its requests +make use of the <> parameter to +reduce bandwidth whenever possible, which helps to ensure that communications +between the production and monitoring clusters are as lightweight as possible. + +The `http` exporter supports a number of settings that control how it +communicates over HTTP to remote clusters. In most cases, it is not +necessary to explicitly configure these settings. For detailed +descriptions, see <>. + +[source,yaml] +---------------------------------- +xpack.monitoring.exporters: + my_local: <1> + type: local + my_remote: <2> + type: http + host: [ "10.1.2.3:9200", ... ] <3> + auth: <4> + username: my_username + password: changeme + connection: + timeout: 6s + read_timeout: 60s + ssl: ... <5> + proxy: + base_path: /some/base/path <6> + headers: <7> + My-Proxy-Header: abc123 + My-Other-Thing: [ def456, ... ] + index.name.time_format: YYYY-MM <8> + +---------------------------------- +<1> A `local` exporter defined explicitly whose arbitrary name is `my_local`. +<2> An `http` exporter defined whose arbitrary name is `my_remote`. This name +uniquely defines the exporter but is otherwise unused. +<3> `host` is a required setting for `http` exporters. It must specify the HTTP +port rather than the transport port. The default port value is `9200`. +<4> User authentication for those using {security} or some other + form of user authentication protecting the cluster. +<5> See <> for all TLS/SSL settings. If not supplied, +the default node-level TLS/SSL settings are used. +<6> Optional base path to prefix any outgoing request with in order to + work with proxies. +<7> Arbitrary key/value pairs to define as headers to send with every request. + The array-based key/value format sends one header per value. +<8> A mechanism for changing the date suffix used by default. + +NOTE: The `http` exporter accepts an array of `hosts` and it will round robin +through the list. It is a good idea to take advantage of that feature when the +monitoring cluster contains more than one node. + +Unlike the `local` exporter, _every_ node that uses the `http` exporter attempts +to check and create the resources that it needs. The `http` exporter avoids +re-checking the resources unless something triggers it to perform the checks +again. These triggers include: + +* The production cluster's node restarts. +* A connection failure to the monitoring cluster. +* The license on the production cluster changes. +* The `http` exporter is dynamically updated (and it is therefore replaced). + +The easiest way to trigger a check is to disable, then re-enable the exporter. + +WARNING: This resource management behavior can create a hole for users that +delete monitoring resources. Since the `http` exporter does not re-check its +resources unless one of the triggers occurs, this can result in malformed index +mappings. + +Unlike the `local` exporter, the `http` exporter is inherently routing requests +outside of the cluster. This situation means that the exporter must provide a +username and password when the monitoring cluster requires one (or other +appropriate security configurations, such as TLS/SSL settings). + +IMPORTANT: When discussing security relative to the `http` exporter, it is +critical to remember that all users are managed on the monitoring cluster. This +is particularly important to remember when you move from development +environments to production environments, where you often have dedicated +monitoring clusters. + +For more information about the configuration options for the `http` exporter, +see <>. + +[float] +[[http-exporter-dns]] +==== Using DNS Hosts in HTTP Exporters + +{monitoring} runs inside of the the JVM security manager. When the JVM has the +security manager enabled, the JVM changes the duration so that it caches DNS +lookups indefinitely (for example, the mapping of a DNS hostname to an IP +address). For this reason, if you are in an environment where the DNS response +might change from time-to-time (for example, talking to any load balanced cloud +provider), you are strongly discouraged from using DNS hostnames. + +Alternatively, you can set the JVM security property `networkaddress.cache.ttl`, +which accepts values in seconds. This property must be set for the node's JVM that +uses {monitoring} for {es} when using DNS that can change IP addresses. If you +do not apply this setting, the connection consistently fails after the IP +address changes. + +IMPORTANT: JVM security properties are different than system properties. They +cannot be set at startup via `-D` system property settings and instead they must +be set in code before the security manager has been setup _or_, more +appropriately, in the `$JAVA_HOME/lib/security/java.security` file. + +Restarting the node (and therefore the JVM) results in its cache being flushed. diff --git a/x-pack/docs/en/monitoring/index.asciidoc b/x-pack/docs/en/monitoring/index.asciidoc new file mode 100644 index 0000000000000..6b8ecc5038ea0 --- /dev/null +++ b/x-pack/docs/en/monitoring/index.asciidoc @@ -0,0 +1,45 @@ +[role="xpack"] +[[es-monitoring]] += Monitoring {es} + +[partintro] +-- +{monitoring} enables you to easily monitor the health of your {es} cluster. The +monitoring metrics are collected from each node and stored in {es} indices. + +Each {es} node is considered unique based on its persistent UUID, which is +written on first start to its <> directory, which +defaults to `./data`. + +All settings associated with {monitoring} in {es} must be set in either the +`elasticsearch.yml` file for each node or, where possible, in the dynamic +cluster settings. For more information, see <>. + +[[es-monitoring-overview]] +{es} is also at the core of {monitoring} across the Elastic Stack. In all cases, +{monitoring} documents are just ordinary JSON documents built by monitoring each +Elastic Stack component at some collection interval, then indexing those +documents into the monitoring cluster. Each component in the stack is +responsible for monitoring itself and then forwarding those documents to {es} +for both routing and indexing (storage). + +The routing and indexing processes in {es} are handled by what are called +<> and +<>. In the past, collectors and exporters +were considered to be part of a monitoring "agent", but that term is generally +not used anymore. + +You can view monitoring data from {kib} where it’s easy to spot issues at a +glance or delve into the system behavior over time to diagnose operational +issues. In addition to the built-in status warnings, you can also set up custom +alerts based on the data in the monitoring indices. + +For an introduction to monitoring your Elastic stack, including Beats, Logstash, +and {kib}, see {xpack-ref}/xpack-monitoring.html[Monitoring the Elastic Stack]. + +-- + +include::collectors.asciidoc[] +include::exporters.asciidoc[] +include::pause-export.asciidoc[] + diff --git a/x-pack/docs/en/monitoring/indices.asciidoc b/x-pack/docs/en/monitoring/indices.asciidoc new file mode 100644 index 0000000000000..10d2c212de273 --- /dev/null +++ b/x-pack/docs/en/monitoring/indices.asciidoc @@ -0,0 +1,42 @@ +[role="xpack"] +[[config-monitoring-indices]] +=== Configuring Indices for Monitoring + +<> are used to configure the indices +that store the monitoring data collected from a cluster. + +You can retrieve the templates through the `_template` API: + +[source,sh] +---------------------------------- +GET /_template/.monitoring-* +---------------------------------- + +By default, the template configures one shard and one replica for the +monitoring indices. To override the default settings, add your own template: + +. Set the `template` pattern to `.monitoring-*`. +. Set the template `order` to `1`. This ensures your template is +applied after the default template, which has an order of 0. +. Specify the `number_of_shards` and/or `number_of_replicas` in the `settings` +section. + +For example, the following template increases the number of shards to five +and the number of replicas to two. + +[source,js] +---------------------------------- +PUT /_template/custom_monitoring +{ + "index_patterns": ".monitoring-*", + "order": 1, + "settings": { + "number_of_shards": 5, + "number_of_replicas": 2 + } +} +---------------------------------- + +IMPORTANT: Only set the `number_of_shards` and `number_of_replicas` in the +settings section. Overriding other monitoring template settings could cause +your monitoring dashboards to stop working correctly. diff --git a/x-pack/docs/en/monitoring/local-export.asciidoc b/x-pack/docs/en/monitoring/local-export.asciidoc new file mode 100644 index 0000000000000..12d0ab5ea9f81 --- /dev/null +++ b/x-pack/docs/en/monitoring/local-export.asciidoc @@ -0,0 +1,82 @@ +[role="xpack"] +[[local-exporter]] +=== Local Exporters + +The `local` exporter is the default exporter in {monitoring}. It routes data +back into the same (local) cluster. In other words, it uses the production +cluster as the monitoring cluster. For example: + +[source,yaml] +--------------------------------------------------- +xpack.monitoring.exporters.my_local_exporter: <1> + type: local +--------------------------------------------------- +<1> The exporter name uniquely defines the exporter, but it is otherwise unused. + +This exporter exists to provide a convenient option when hardware is simply not +available. It is also a way for developers to get an idea of what their actions +do for pre-production clusters when they do not have the time or resources to +provide a separate monitoring cluster. However, this exporter has disadvantages +that impact the local cluster: + +* All indexing impacts the local cluster and the nodes that hold the monitoring +indices' shards. +* Most collectors run on the elected master node. Therefore most indexing occurs +with the elected master node as the coordinating node, which is a bad practice. +* Any usage of {monitoring} for {kib} uses the local cluster's resources for +searches and aggregations, which means that they might not be available for +non-monitoring tasks. +* If the local cluster goes down, the monitoring cluster has inherently gone +down with it (and vice versa), which generally defeats the purpose of monitoring. + +For the `local` exporter, all setup occurs only on the elected master node. This +means that if you do not see any monitoring templates or ingest pipelines, the +elected master node is having issues or it is not configured in the same way. +Unlike the `http` exporter, the `local` exporter has the advantage of accessing +the monitoring cluster's up-to-date cluster state. It can therefore always check +that the templates and ingest pipelines exist without a performance penalty. If +the elected master node encounters errors while trying to create the monitoring +resources, it logs errors, ignores that collection, and tries again after the +next collection. + +The elected master node is the only node to set up resources for the `local` +exporter. Therefore all other nodes wait for the resources to be set up before +indexing any monitoring data from their own collectors. Each of these nodes logs +a message indicating that they are waiting for the resources to be set up. + +One benefit of the `local` exporter is that it lives within the cluster and +therefore no extra configuration is required when the cluster is secured with +{security}. All operations, including indexing operations, that occur from a +`local` exporter make use of the internal transport mechanisms within {es}. This +behavior enables the exporter to be used without providing any user credentials +when {security} is enabled. + +For more information about the configuration options for the `local` exporter, +see <>. + +[[local-exporter-cleaner]] +==== Cleaner Service + +One feature of the `local` exporter, which is not present in the `http` exporter, +is a cleaner service. The cleaner service runs once per day at 01:00 AM UTC on +the elected master node. + +The role of the cleaner service is to clean, or curate, the monitoring indices +that are older than a configurable amount of time (the default is `7d`). This +cleaner exists as part of the `local` exporter as a safety mechanism. The `http` +exporter does not make use of it because it could enable a single misconfigured +node to prematurely curate data from other production clusters that share the +same monitoring cluster. + +In a dedicated monitoring cluster, the cleaning service can be used without +having to also monitor the monitoring cluster. For example: + +[source,yaml] +--------------------------------------------------- +xpack.monitoring.collection.enabled: false <1> +xpack.monitoring.history.duration: 3d <2> +--------------------------------------------------- +<1> Disable the collection of data on the monitoring cluster. +<2> Lower the default history duration from `7d` to `3d`. The minimum value is +`1d`. This setting can be modified only when using a Gold or higher level +license. For the Basic license level, it uses the default of 7 days. diff --git a/x-pack/docs/en/monitoring/pause-export.asciidoc b/x-pack/docs/en/monitoring/pause-export.asciidoc new file mode 100644 index 0000000000000..d26799c6892c3 --- /dev/null +++ b/x-pack/docs/en/monitoring/pause-export.asciidoc @@ -0,0 +1,35 @@ +[role="xpack"] +[[pause-export]] +== Pausing Data Collection + +To stop generating {monitoring} data in {es}, disable data collection: + +[source,yaml] +--------------------------------------------------- +xpack.monitoring.collection.enabled: false +--------------------------------------------------- + +When this setting is `false`, {es} monitoring data is not collected and all +monitoring data from other sources such as {kib}, Beats, and Logstash is ignored. + +You can update this setting by using the +{ref}/cluster-update-settings.html[Cluster Update Settings API]. + +If you want to separately disable a specific exporter, you can specify the +`enabled` setting (which defaults to `true`) per exporter. For example: + +[source,yaml] +--------------------------------------------------- +xpack.monitoring.exporters.my_http_exporter: + type: http + host: ["10.1.2.3:9200", "10.1.2.4:9200"] + enabled: false <1> +--------------------------------------------------- +<1> Disable the named exporter. If the same name as an existing exporter is not + used, then this will create a completely new exporter that is completely + ignored. This value can be set dynamically by using cluster settings. + +NOTE: Defining a disabled exporter prevents the default exporter from being + created. + +To re-start data collection, re-enable these settings. \ No newline at end of file diff --git a/x-pack/docs/en/node.asciidoc b/x-pack/docs/en/node.asciidoc new file mode 100644 index 0000000000000..316df743bf9a1 --- /dev/null +++ b/x-pack/docs/en/node.asciidoc @@ -0,0 +1,117 @@ +[float] +[[modules-node-xpack]] +== [xpack]#X-Pack node settings# + +//This content is referenced from the elastic/elasticsearch/docs/reference/modules/node.asciidoc + +If {xpack} is installed, there is an additional node type: + +<>:: + +A node that has `xpack.ml.enabled` and `node.ml` set to `true`, which is the +default behavior when {xpack} is installed. If you want to use {xpackml} +features, there must be at least one {ml} node in your cluster. For more +information about {xpackml} features, +see {xpack-ref}/xpack-ml.html[Machine Learning in the Elastic Stack]. + +IMPORTANT: Do not set use the `node.ml` setting unless {xpack} is installed. +Otherwise, the node fails to start. + +If {xpack} is installed, nodes are master-eligible, data, ingest, and {ml} +nodes by default. As the cluster grows and in particular if you have large +{ml} jobs, consider separating dedicated master-eligible nodes from dedicated +data nodes and dedicated {ml} nodes. + +To create a dedicated master-eligible node when {xpack} is installed, set: + +[source,yaml] +------------------- +node.master: true <1> +node.data: false <2> +node.ingest: false <3> +node.ml: false <4> +xpack.ml.enabled: true <5> +------------------- +<1> The `node.master` role is enabled by default. +<2> Disable the `node.data` role (enabled by default). +<3> Disable the `node.ingest` role (enabled by default). +<4> Disable the `node.ml` role (enabled by default in {xpack}). +<5> The `xpack.ml.enabled` setting is enabled by default in {xpack}. + +To create a dedicated data node when {xpack} is installed, set: + +[source,yaml] +------------------- +node.master: false <1> +node.data: true <2> +node.ingest: false <3> +node.ml: false <4> +------------------- +<1> Disable the `node.master` role (enabled by default). +<2> The `node.data` role is enabled by default. +<3> Disable the `node.ingest` role (enabled by default). +<4> Disable the `node.ml` role (enabled by default in {xpack}). + +To create a dedicated ingest node when {xpack} is installed, set: + +[source,yaml] +------------------- +node.master: false <1> +node.data: false <2> +node.ingest: true <3> +search.remote.connect: false <4> +node.ml: false <5> +------------------- +<1> Disable the `node.master` role (enabled by default). +<2> Disable the `node.data` role (enabled by default). +<3> The `node.ingest` role is enabled by default. +<4> Disable cross-cluster search (enabled by default). +<5> Disable the `node.ml` role (enabled by default in {xpack}). + +To create a dedicated coordinating node when {xpack} is installed, set: + +[source,yaml] +------------------- +node.master: false <1> +node.data: false <2> +node.ingest: false <3> +search.remote.connect: false <4> +node.ml: false <5> +------------------- +<1> Disable the `node.master` role (enabled by default). +<2> Disable the `node.data` role (enabled by default). +<3> Disable the `node.ingest` role (enabled by default). +<4> Disable cross-cluster search (enabled by default). +<5> Disable the `node.ml` role (enabled by default in {xpack}). + +[float] +[[ml-node]] +=== [xpack]#Machine learning node# + +The {xpackml} features provide {ml} nodes, which run jobs and handle {ml} API +requests. If `xpack.ml.enabled` is set to true and `node.ml` is set to `false`, +the node can service API requests but it cannot run jobs. + +If you want to use {xpackml} features in your cluster, you must enable {ml} +(set `xpack.ml.enabled` to `true`) on all master-eligible nodes. Do not use +these settings if you do not have {xpack} installed. + +For more information about these settings, see <>. + +To create a dedicated {ml} node, set: + +[source,yaml] +------------------- +node.master: false <1> +node.data: false <2> +node.ingest: false <3> +search.remote.connect: false <4> +node.ml: true <5> +xpack.ml.enabled: true <6> +------------------- +<1> Disable the `node.master` role (enabled by default). +<2> Disable the `node.data` role (enabled by default). +<3> Disable the `node.ingest` role (enabled by default). +<4> Disable cross-cluster search (enabled by default). +<5> The `node.ml` role is enabled by default in {xpack}. +<6> The `xpack.ml.enabled` setting is enabled by default in {xpack}. diff --git a/x-pack/docs/en/release-notes/7.0.0-alpha1.asciidoc b/x-pack/docs/en/release-notes/7.0.0-alpha1.asciidoc new file mode 100644 index 0000000000000..b68970c4c9d4b --- /dev/null +++ b/x-pack/docs/en/release-notes/7.0.0-alpha1.asciidoc @@ -0,0 +1,26 @@ +[[xes-7.0.0-alpha1]] +== {es} {xpack} 7.0.0-alpha1 Release Notes + +[float] +[[xes-breaking-7.0.0-alpha1]] +=== Breaking Changes + +Machine Learning:: +* The `max_running_jobs` node property is removed in this release. Use the +`xpack.ml.max_open_jobs` setting instead. For more information, see <>. + +Monitoring:: +* The `xpack.monitoring.collection.interval` setting can no longer be set to `-1` +to disable monitoring data collection. Use `xpack.monitoring.collection.enabled` +and set it to `false` (its default), which was added in 6.3.0. + +Security:: +* The fields returned as part of the mappings section by get index, get +mappings, get field mappings and field capabilities API are now only the +ones that the user is authorized to access in case field level security is enabled. + +See also: + +* <> +* {kibana-ref}/xkb-7.0.0-alpha1.html[{kib} {xpack} 7.0.0-alpha1 Release Notes] +* {logstash-ref}/xls-7.0.0-alpha1.html[Logstash {xpack} 7.0.0-alpha1 Release Notes] diff --git a/x-pack/docs/en/release-notes/xpack-breaking.asciidoc b/x-pack/docs/en/release-notes/xpack-breaking.asciidoc new file mode 100644 index 0000000000000..647b479c1da38 --- /dev/null +++ b/x-pack/docs/en/release-notes/xpack-breaking.asciidoc @@ -0,0 +1,36 @@ +[role="xpack"] +[[breaking-changes-xes]] += {xpack} Breaking Changes + +[partintro] +-- +This section summarizes the changes that you need to be aware of when migrating +your application from one version of {xpack} to another. + +* <> + +See also: + +* <> +* {kibana-ref}/breaking-changes-xkb.html[{kib} {xpack} Breaking Changes] +* {logstash-ref}/breaking-changes-xls.html[Logstash {xpack} Breaking Changes] + +-- + +[role="xpack"] +[[breaking-7.0.0-xes]] +== {xpack} Breaking changes in 7.0.0 + + +Machine Learning:: +* The `max_running_jobs` node property is removed in this release. Use the +`xpack.ml.max_open_jobs` setting instead. For more information, <>. + +Security:: +* The fields returned as part of the mappings section by get index, get +mappings, get field mappings and field capabilities API are now only the ones +that the user is authorized to access in case field level security is enabled. + +See also: + +* <> diff --git a/x-pack/docs/en/release-notes/xpack-xes.asciidoc b/x-pack/docs/en/release-notes/xpack-xes.asciidoc new file mode 100644 index 0000000000000..e58ef209ecd47 --- /dev/null +++ b/x-pack/docs/en/release-notes/xpack-xes.asciidoc @@ -0,0 +1,20 @@ +[role="xpack"] +[[release-notes-xes]] += {xpack} Release Notes + +[partintro] +-- +This section summarizes the changes in each release for all of the {xpack} +components in {es}. + +* <> + +See also: + +* <> +* {kibana-ref}/release-notes-xkb.html[{kib} {xpack} Release Notes] +* {logstash-ref}/release-notes-xls.html[Logstash {xpack} Release Notes] + +-- + +include::7.0.0-alpha1.asciidoc[] diff --git a/x-pack/docs/en/rest-api/defs.asciidoc b/x-pack/docs/en/rest-api/defs.asciidoc new file mode 100644 index 0000000000000..99600472a0930 --- /dev/null +++ b/x-pack/docs/en/rest-api/defs.asciidoc @@ -0,0 +1,30 @@ +[role="xpack"] +[[ml-api-definitions]] +== Definitions + +These resource definitions are used in {ml} APIs and in {kib} advanced +job configuration options. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +[role="xpack"] +include::ml/calendarresource.asciidoc[] +[role="xpack"] +include::ml/datafeedresource.asciidoc[] +[role="xpack"] +include::ml/jobresource.asciidoc[] +[role="xpack"] +include::ml/jobcounts.asciidoc[] +[role="xpack"] +include::ml/snapshotresource.asciidoc[] +[role="xpack"] +include::ml/resultsresource.asciidoc[] +[role="xpack"] +include::ml/eventresource.asciidoc[] diff --git a/x-pack/docs/en/rest-api/graph/explore.asciidoc b/x-pack/docs/en/rest-api/graph/explore.asciidoc new file mode 100644 index 0000000000000..f9902fcbe48a8 --- /dev/null +++ b/x-pack/docs/en/rest-api/graph/explore.asciidoc @@ -0,0 +1,403 @@ +[role="xpack"] +[[graph-explore-api]] +== Explore API + +The Graph explore API enables you to extract and summarize information about +the documents and terms in your Elasticsearch index. + +The easiest way to understand the behaviour of this API is to use the +Graph UI to explore connections. You can view the most recent request submitted +to the `_explore` endpoint from the *Last request* panel. For more information, +see {kibana-ref}/graph-getting-started.html[Getting Started with Graph]. + +For additional information about working with the explore API, see the Graph +{kibana-ref}/graph-troubleshooting.html[Troubleshooting] and +{kibana-ref}/graph-limitations.html[Limitations] topics. + +[float] +=== Request + +`POST /_xpack/graph/_explore` + +[float] +=== Description + +An initial request to the `_explore` API contains a seed query that identifies +the documents of interest and specifies the fields that define the vertices +and connections you want to include in the graph. Subsequent `_explore` requests +enable you to _spider out_ from one more vertices of interest. You can exclude +vertices that have already been returned. + +[float] +=== Request Body + +query:: +A seed query that identifies the documents of interest. Can be any valid +Elasticsearch query. For example: ++ +[source,js] +-------------------------------------------------- +"query": { + "bool": { + "must": { + "match": { + "query.raw": "midi" + } + }, + "filter": [ + { + "range": { + "query_time": { + "gte": "2015-10-01 00:00:00" + } + } + } + ] + } +} +-------------------------------------------------- + + +vertices:: +Specifies or more fields that contain the terms you want to include in the +graph as vertices. For example: ++ +[source,js] +-------------------------------------------------- +"vertices": [ + { + "field": "product" + } +] +-------------------------------------------------- + +field::: Identifies a field in the documents of interest. +include::: Identifies the terms of interest that form the starting points +from which you want to spider out. You do not have to specify a seed query +if you specify an include clause. The include clause implicitly querys for +documents that contain any of the listed terms listed. +In addition to specifying a simple array of strings, you can also pass +objects with `term` and `boost` values to boost matches on particular terms. +exclude::: +The `exclude` clause prevents the specified terms from being included in +the results. +size::: +Specifies the maximum number of vertex terms returned for each +field. Defaults to 5. +min_doc_count::: +Specifies how many documents must contain a pair of terms before it is +considered to be a useful connection. This setting acts as a certainty +threshold. Defaults to 3. +shard_min_doc_count::: +This advanced setting controls how many documents on a particular shard have +to contain a pair of terms before the connection is returned for global +consideration. Defaults to 2. + +connections:: +Specifies or more fields from which you want to extract terms that are +associated with the specified vertices. For example: ++ +[source,js] +-------------------------------------------------- +"connections": { <3> + "vertices": [ + { + "field": "query.raw" + } + ] +} +-------------------------------------------------- ++ +NOTE: Connections can be nested inside the `connections` object to +explore additional relationships in the data. Each level of nesting is +considered a _hop_, and proximity within the graph is often described in +terms of _hop depth_. + +query::: +An optional _guiding query_ that constrains the Graph API as it +explores connected terms. For example, you might want to direct the Graph +API to ignore older data by specifying a query that identifies recent +documents. +vertices::: +Contains the fields you are interested in. For example: ++ +[source,js] +-------------------------------------------------- +"vertices": [ + { + "field": "query.raw", + "size": 5, + "min_doc_count": 10, + "shard_min_doc_count": 3 + } +] +-------------------------------------------------- + +controls:: Direct the Graph API how to build the graph. + +use_significance::: +The `use_significance` flag filters associated terms so only those that are +significantly associated with your query are included. For information about +the algorithm used to calculate significance, see the +{ref}/search-aggregations-bucket-significantterms-aggregation.html[significant_terms +aggregation]. Defaults to `true`. +sample_size::: +Each _hop_ considers a sample of the best-matching documents on each +shard. Using samples improves the speed of execution and keeps +exploration focused on meaningfully-connected terms. Very small values +(less than 50) might not provide sufficient weight-of-evidence to identify +significant connections between terms. Very large sample sizes can dilute +the quality of the results and increase execution times. +Defaults to 100 documents. +timeout::: +The length of time in milliseconds after which exploration will be halted +and the results gathered so far are returned. This timeout is honored on +a best-effort basis. Execution might overrun this timeout if, for example, +a long pause is encountered while FieldData is loaded for a field. +sample_diversity::: +To avoid the top-matching documents sample being dominated by a single +source of results, it is sometimes necessary to request diversity in +the sample. You can do this by selecting a single-value field and setting +a maximum number of documents per value for that field. For example: ++ +[source,js] +-------------------------------------------------- +"sample_diversity": { + "field": "category.raw", + "max_docs_per_value": 500 +} +-------------------------------------------------- + +// [float] +// === Authorization + +[float] +=== Examples + +[float] +[[basic-search]] +==== Basic exploration + +An initial search typically begins with a query to identify strongly related terms. + +[source,js] +-------------------------------------------------- +POST clicklogs/_xpack/graph/_explore +{ + "query": { <1> + "match": { + "query.raw": "midi" + } + }, + "vertices": [ <2> + { + "field": "product" + } + ], + "connections": { <3> + "vertices": [ + { + "field": "query.raw" + } + ] + } +} +-------------------------------------------------- +// CONSOLE +<1> Seed the exploration with a query. This example is searching +clicklogs for people who searched for the term "midi". +<2> Identify the vertices to include in the graph. This example is looking for +product codes that are significantly associated with searches for "midi". +<3> Find the connections. This example is looking for other search +terms that led people to click on the products that are associated with +searches for "midi". + +The response from the explore API looks like this: + +[source,js] +-------------------------------------------------- +{ + "took": 0, + "timed_out": false, + "failures": [], + "vertices": [ <1> + { + "field": "query.raw", + "term": "midi cable", + "weight": 0.08745858139552132, + "depth": 1 + }, + { + "field": "product", + "term": "8567446", + "weight": 0.13247784285434397, + "depth": 0 + }, + { + "field": "product", + "term": "1112375", + "weight": 0.018600718471158982, + "depth": 0 + }, + { + "field": "query.raw", + "term": "midi keyboard", + "weight": 0.04802242866755111, + "depth": 1 + } + ], + "connections": [ <2> + { + "source": 0, + "target": 1, + "weight": 0.04802242866755111, + "doc_count": 13 + }, + { + "source": 2, + "target": 3, + "weight": 0.08120623870976627, + "doc_count": 23 + } + ] +} +-------------------------------------------------- +<1> An array of all of the vertices that were discovered. A vertex is an indexed +term, so the field and term value are provided. The `weight` attribute specifies +a significance score. The `depth` attribute specifies the hop-level at which +the term was first encountered. +<2> The connections between the vertices in the array. The `source` and `target` +properties are indexed into the vertices array and indicate which vertex term led +to the other as part of exploration. The `doc_count` value indicates how many +documents in the sample set contain this pairing of terms (this is +not a global count for all documents in the index). + +[float] +[[optional-controls]] +==== Optional controls + +The default settings are configured to remove noisy data and +get the "big picture" from your data. This example shows how to specify +additional parameters to influence how the graph is built. + +For tips on tuning the settings for more detailed forensic evaluation where +every document could be of interest, see the +{kibana-ref}/graph-troubleshooting.html[Troubleshooting] guide. + + +[source,js] +-------------------------------------------------- +POST clicklogs/_xpack/graph/_explore +{ + "query": { + "match": { + "query.raw": "midi" + } + }, + "controls": { + "use_significance": false,<1> + "sample_size": 2000,<2> + "timeout": 2000,<3> + "sample_diversity": {<4> + "field": "category.raw", + "max_docs_per_value": 500 + } + }, + "vertices": [ + { + "field": "product", + "size": 5,<5> + "min_doc_count": 10,<6> + "shard_min_doc_count": 3<7> + } + ], + "connections": { + "query": {<8> + "bool": { + "filter": [ + { + "range": { + "query_time": { + "gte": "2015-10-01 00:00:00" + } + } + } + ] + } + }, + "vertices": [ + { + "field": "query.raw", + "size": 5, + "min_doc_count": 10, + "shard_min_doc_count": 3 + } + ] + } +} +-------------------------------------------------- +// CONSOLE +<1> Disable `use_significance` to include all associated terms, not just the +ones that are significantly associated with the query. +<2> Increase the sample size to consider a larger set of documents on +each shard. +<3> Limit how long a graph request runs before returning results. +<4> Ensure diversity in the sample by setting a limit on the number of documents +per value in a particular single-value field, such as a category field. +<5> Control the maximum number of vertex terms returned for each field. +<6> Set a certainty threshold that specifies how many documents have to contain +a pair of terms before we consider it to be a useful connection. +<7> Specify how many documents on a shard have to contain a pair of terms before +the connection is returned for global consideration. +<8> Restrict which document are considered as you explore connected terms. + + +[float] +[[spider-search]] +==== Spidering operations + +After an initial search, you typically want to select vertices of interest and +see what additional vertices are connected. In graph-speak, this operation is +referred to as "spidering". By submitting a series of requests, you can +progressively build a graph of related information. + +To spider out, you need to specify two things: + + * The set of vertices for which you want to find additional connections + * The set of vertices you already know about that you want to exclude from the + results of the spidering operation. + +You specify this information using `include`and `exclude` clauses. For example, +the following request starts with the product `1854873` and spiders +out to find additional search terms associated with that product. The terms +"midi", "midi keyboard", and "synth" are excluded from the results. + +[source,js] +-------------------------------------------------- +POST clicklogs/_xpack/graph/_explore +{ + "vertices": [ + { + "field": "product", + "include": [ "1854873" ] <1> + } + ], + "connections": { + "vertices": [ + { + "field": "query.raw", + "exclude": [ <2> + "midi keyboard", + "midi", + "synth" + ] + } + ] + } +} +-------------------------------------------------- +// CONSOLE +<1> The vertices you want to start from are specified +as an array of terms in an `include` clause. +<2> The `exclude` clause prevents terms you already know about from being +included in the results. \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/index.asciidoc b/x-pack/docs/en/rest-api/index.asciidoc new file mode 100644 index 0000000000000..85c72a78d99fd --- /dev/null +++ b/x-pack/docs/en/rest-api/index.asciidoc @@ -0,0 +1,29 @@ +[role="xpack"] +[[xpack-api]] += {xpack} APIs + +[partintro] +-- +{xpack} exposes REST APIs that are used by the UI components and can be called +directly to configure and access {xpack} features. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +-- + + +include::info.asciidoc[] +include::graph/explore.asciidoc[] +include::licensing.asciidoc[] +include::migration.asciidoc[] +include::ml-api.asciidoc[] +include::rollup-api.asciidoc[] +include::security.asciidoc[] +include::watcher.asciidoc[] +include::defs.asciidoc[] diff --git a/x-pack/docs/en/rest-api/info.asciidoc b/x-pack/docs/en/rest-api/info.asciidoc new file mode 100644 index 0000000000000..ccb979124f2da --- /dev/null +++ b/x-pack/docs/en/rest-api/info.asciidoc @@ -0,0 +1,130 @@ +[role="xpack"] +[[info-api]] +== Info API + +The info API provides general information about the installed {xpack}. + +[float] +=== Request + +`GET /_xpack` + +[float] +=== Description + +The information provided by this API includes: + +* Build Information - including the build number and timestamp. +* License Information - basic information about the currently installed license. +* Features Information - The features that are currently enabled and available + under the current license. + +[float] +=== Path Parameters + +`categories`:: + (list) A comma-separated list of the information categories to include in the + response. For example, `build,license,features`. + +`human`:: + (boolean) Defines whether additional human-readable information is included in + the response. In particular, it adds descriptions and a tag line. The + default value is `true`. + +//=== Query Parameters + +//=== Authorization + +[float] +=== Examples + +The following example queries the info API: + +[source,js] +------------------------------------------------------------ +GET /_xpack +------------------------------------------------------------ +// CONSOLE + +Example response: +[source,js] +------------------------------------------------------------ +{ + "build" : { + "hash" : "2798b1a3ce779b3611bb53a0082d4d741e4d3168", + "date" : "2015-04-07T13:34:42Z" + }, + "license" : { + "uid" : "893361dc-9749-4997-93cb-802e3dofh7aa", + "type" : "trial", + "mode" : "trial", + "status" : "active", + "expiry_date_in_millis" : 1914278399999 + }, + "features" : { + "graph" : { + "description" : "Graph Data Exploration for the Elastic Stack", + "available" : true, + "enabled" : true + }, + "logstash" : { + "description" : "Logstash management component for X-Pack", + "available" : true, + "enabled" : true + }, + "ml" : { + "description" : "Machine Learning for the Elastic Stack", + "available" : true, + "enabled" : true, + "native_code_info" : { + "version" : "6.0.0-alpha1-SNAPSHOT", + "build_hash" : "d081461967d61a" + } + }, + "monitoring" : { + "description" : "Monitoring for the Elastic Stack", + "available" : true, + "enabled" : true + }, + "rollup": { + "description": "Time series pre-aggregation and rollup", + "available": true, + "enabled": true + }, + "security" : { + "description" : "Security for the Elastic Stack", + "available" : true, + "enabled" : true + }, + "watcher" : { + "description" : "Alerting, Notification and Automation for the Elastic Stack", + "available" : true, + "enabled" : true + } + }, + "tagline" : "You know, for X" +} +------------------------------------------------------------ +// TESTRESPONSE[s/"hash" : "2798b1a3ce779b3611bb53a0082d4d741e4d3168",/"hash" : "$body.build.hash",/] +// TESTRESPONSE[s/"date" : "2015-04-07T13:34:42Z"/"date" : "$body.build.date"/] +// TESTRESPONSE[s/"uid" : "893361dc-9749-4997-93cb-802e3dofh7aa",/"uid": "$body.license.uid",/] +// TESTRESPONSE[s/"expiry_date_in_millis" : 1914278399999/"expiry_date_in_millis" : "$body.license.expiry_date_in_millis"/] +// TESTRESPONSE[s/"version" : "6.0.0-alpha1-SNAPSHOT",/"version": "$body.features.ml.native_code_info.version",/] +// TESTRESPONSE[s/"build_hash" : "d081461967d61a"/"build_hash": "$body.features.ml.native_code_info.build_hash"/] +// So much s/// but at least we test that the layout is close to matching.... + +The following example only returns the build and features information: + +[source,js] +------------------------------------------------------------ +GET /_xpack?categories=build,features +------------------------------------------------------------ +// CONSOLE + +The following example removes the descriptions from the response: + +[source,js] +------------------------------------------------------------ +GET /_xpack?human=false +------------------------------------------------------------ +// CONSOLE diff --git a/x-pack/docs/en/rest-api/license/delete-license.asciidoc b/x-pack/docs/en/rest-api/license/delete-license.asciidoc new file mode 100644 index 0000000000000..24662664daa40 --- /dev/null +++ b/x-pack/docs/en/rest-api/license/delete-license.asciidoc @@ -0,0 +1,43 @@ +[role="xpack"] +[[delete-license]] +=== Delete License API + +This API enables you to delete licensing information. + +[float] +==== Request + +`DELETE /_xpack/license` + +[float] +==== Description + +When your license expires, {xpack} operates in a degraded mode. For more +information, see {xpack-ref}/license-expiration.html[License Expiration]. + +[float] +==== Authorization + +You must have `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Examples + +The following example queries the info API: + +[source,js] +------------------------------------------------------------ +DELETE _xpack/license +------------------------------------------------------------ +// CONSOLE +// TEST[skip:license testing issues] + +When the license is successfully deleted, the API returns the following response: +[source,js] +------------------------------------------------------------ +{ + "acknowledged": true +} +------------------------------------------------------------ diff --git a/x-pack/docs/en/rest-api/license/get-basic-status.asciidoc b/x-pack/docs/en/rest-api/license/get-basic-status.asciidoc new file mode 100644 index 0000000000000..c6c6385447ab3 --- /dev/null +++ b/x-pack/docs/en/rest-api/license/get-basic-status.asciidoc @@ -0,0 +1,45 @@ +[role="xpack"] +[[get-basic-status]] +=== Get Basic Status API + +This API enables you to check the status of your basic license. + +[float] +==== Request + +`GET _xpack/license/basic_status` + +[float] +==== Description + +In order to initiate a basic license, you must not currently have a basic +license. + +For more information about the different types of licenses, see +https://www.elastic.co/subscriptions. + +==== Authorization + +You must have `monitor` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Examples + +The following example checks whether you are eligible to start a basic: + +[source,js] +------------------------------------------------------------ +GET _xpack/license/basic_status +------------------------------------------------------------ +// CONSOLE + +Example response: +[source,js] +------------------------------------------------------------ +{ + "eligible_to_start_basic": true +} +------------------------------------------------------------ +// TESTRESPONSE[s/"eligible_to_start_basic": true/"eligible_to_start_basic": $body.eligible_to_start_basic/] diff --git a/x-pack/docs/en/rest-api/license/get-license.asciidoc b/x-pack/docs/en/rest-api/license/get-license.asciidoc new file mode 100644 index 0000000000000..cba6e71057661 --- /dev/null +++ b/x-pack/docs/en/rest-api/license/get-license.asciidoc @@ -0,0 +1,75 @@ +[role="xpack"] +[[get-license]] +=== Get License API + +This API enables you to retrieve licensing information. + +[float] +==== Request + +`GET /_xpack/license` + +[float] +==== Description + +This API returns information about the type of license, when it was issued, and +when it expires, for example. + +For more information about the different types of licenses, see +https://www.elastic.co/subscriptions. + + +[float] +==== Query Parameters + +`local`:: + (boolean) Specifies whether to retrieve local information. The default value + is `false`, which means the information is retrieved from the master node. + + +[float] +==== Authorization + +You must have `monitor` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +[float] +==== Examples + +The following example provides information about a basic license: + +[source,js] +-------------------------------------------------- +GET _xpack/license +-------------------------------------------------- +// CONSOLE + +[source,js] +-------------------------------------------------- +{ + "license" : { + "status" : "active", + "uid" : "cbff45e7-c553-41f7-ae4f-9205eabd80xx", + "type" : "trial", + "issue_date" : "2018-02-22T23:12:05.550Z", + "issue_date_in_millis" : 1519341125550, + "expiry_date" : "2018-03-24T23:12:05.550Z", + "expiry_date_in_millis" : 1521933125550, + "max_nodes" : 1000, + "issued_to" : "test", + "issuer" : "elasticsearch", + "start_date_in_millis" : -1 + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"cbff45e7-c553-41f7-ae4f-9205eabd80xx"/$body.license.uid/] +// TESTRESPONSE[s/"trial"/$body.license.type/] +// TESTRESPONSE[s/"2018-02-22T23:12:05.550Z"/$body.license.issue_date/] +// TESTRESPONSE[s/1519341125550/$body.license.issue_date_in_millis/] +// TESTRESPONSE[s/"2018-03-24T23:12:05.550Z"/$body.license.expiry_date/] +// TESTRESPONSE[s/1521933125550/$body.license.expiry_date_in_millis/] +// TESTRESPONSE[s/1000/$body.license.max_nodes/] +// TESTRESPONSE[s/"test"/$body.license.issued_to/] +// TESTRESPONSE[s/"elasticsearch"/$body.license.issuer/] diff --git a/x-pack/docs/en/rest-api/license/get-trial-status.asciidoc b/x-pack/docs/en/rest-api/license/get-trial-status.asciidoc new file mode 100644 index 0000000000000..b2cc1ce1b6c88 --- /dev/null +++ b/x-pack/docs/en/rest-api/license/get-trial-status.asciidoc @@ -0,0 +1,52 @@ +[role="xpack"] +[[get-trial-status]] +=== Get Trial Status API + +This API enables you to check the status of your trial license. + +[float] +==== Request + +`GET _xpack/license/trial_status` + +[float] +==== Description + +If a license is not already registered for the cluster, one is generated when +the nodes start. By default, this is a 30-day trial license that gives access +to all {xpack} features. + +NOTE: You are allowed to initiate a trial license only if your cluster has not +already activated a trial license for the current major X-Pack version. For +example, if you have already activated a trial for v6.0, you cannot start a new +trial until v7.0. You can, however, contact `info@elastic.co` to request an +extended trial license. + +For more information about the different types of licenses, see +https://www.elastic.co/subscriptions. + +==== Authorization + +You must have `monitor` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Examples + +The following example checks whether you are eligible to start a trial: + +[source,js] +------------------------------------------------------------ +GET _xpack/license/trial_status +------------------------------------------------------------ +// CONSOLE + +Example response: +[source,js] +------------------------------------------------------------ +{ + "eligible_to_start_trial": true +} +------------------------------------------------------------ +// TESTRESPONSE[s/"eligible_to_start_trial": true/"eligible_to_start_trial": $body.eligible_to_start_trial/] diff --git a/x-pack/docs/en/rest-api/license/start-basic.asciidoc b/x-pack/docs/en/rest-api/license/start-basic.asciidoc new file mode 100644 index 0000000000000..820b2b5eab64a --- /dev/null +++ b/x-pack/docs/en/rest-api/license/start-basic.asciidoc @@ -0,0 +1,74 @@ +[role="xpack"] +[[start-basic]] +=== Start Basic API + +This API starts an indefinite basic license. + +[float] +==== Request + +`POST _xpack/license/start_basic` + +[float] +==== Description + +The `start basic` API enables you to initiate an indefinite basic license, which +gives access to all {xpack} basic features. If the basic license does not support +all of the features that are available with your current license, however, you are +notified in the response. You must then re-submit the API request with the +`acknowledge` parameter set to `true`. + +To check the status of your basic license, use the following API: +<>. + +For more information about the different types of licenses, see +https://www.elastic.co/subscriptions. + +==== Authorization + +You must have `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Examples + +The following example starts a basic license if you do not currently have a license: + +[source,js] +------------------------------------------------------------ +POST _xpack/license/start_basic +------------------------------------------------------------ +// CONSOLE +// TEST[skip:license testing issues] + +Example response: +[source,js] +------------------------------------------------------------ +{ + "basic_was_started": true, + "acknowledged": true +} +------------------------------------------------------------ +// NOTCONSOLE + +The following example starts a basic license if you currently have a license with more +features than a basic license. As you are losing features, you must pass the acknowledge +parameter: + +[source,js] +------------------------------------------------------------ +POST _xpack/license/start_basic?acknowledge=true +------------------------------------------------------------ +// CONSOLE +// TEST[skip:license testing issues] + +Example response: +[source,js] +------------------------------------------------------------ +{ + "basic_was_started": true, + "acknowledged": true +} +------------------------------------------------------------ +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/license/start-trial.asciidoc b/x-pack/docs/en/rest-api/license/start-trial.asciidoc new file mode 100644 index 0000000000000..7754f6feef79c --- /dev/null +++ b/x-pack/docs/en/rest-api/license/start-trial.asciidoc @@ -0,0 +1,75 @@ +[role="xpack"] +[[start-trial]] +=== Start Trial API + +This API starts a 30-day trial license. + +[float] +==== Request + +`POST _xpack/license/start_trial` + +[float] +==== Description + +The `start trial` API enables you to upgrade from a basic license to a 30-day +trial license, which gives access to all {xpack} features. + +NOTE: You are allowed to initiate a trial license only if your cluster has not +already activated a trial license for the current major X-Pack version. For +example, if you have already activated a trial for v6.0, you cannot start a new +trial until v7.0. You can, however, contact `info@elastic.co` to request an +extended trial license. + +To check the status of your trial license, use the following API: +<>. + +For more information about the different types of licenses, see +https://www.elastic.co/subscriptions. + +==== Authorization + +You must have `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Examples + +The following example checks whether you are eligible to start a trial: + +[source,js] +------------------------------------------------------------ +GET _xpack/license/start_trial +------------------------------------------------------------ +// CONSOLE +// TEST[skip:license testing issues] + +Example response: +[source,js] +------------------------------------------------------------ +{ + "eligible_to_start_trial": true +} +------------------------------------------------------------ +// NOTCONSOLE + +The following example starts a 30-day trial license. The acknowledge +parameter is required as you are initiating a license that will expire. + +[source,js] +------------------------------------------------------------ +POST _xpack/license/start_trial?acknowledge=true +------------------------------------------------------------ +// CONSOLE +// TEST[skip:license testing issues] + +Example response: +[source,js] +------------------------------------------------------------ +{ + "trial_was_started": true, + "acknowledged": true +} +------------------------------------------------------------ +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/license/update-license.asciidoc b/x-pack/docs/en/rest-api/license/update-license.asciidoc new file mode 100644 index 0000000000000..54c5539840772 --- /dev/null +++ b/x-pack/docs/en/rest-api/license/update-license.asciidoc @@ -0,0 +1,160 @@ +[role="xpack"] +[[update-license]] +=== Update License API + +This API enables you to update your license. + +[float] +==== Request + +`PUT _xpack/license` + +[float] +==== Description + +You can update your license at runtime without shutting down your nodes. +License updates take effect immediately. If the license you are installing does +not support all of the features that were available with your previous license, +however, you are notified in the response. You must then re-submit the API +request with the `acknowledge` parameter set to `true`. + +For more information about the different types of licenses, see +https://www.elastic.co/subscriptions. + +[float] +==== Query Parameters + +`acknowledge`:: + (boolean) Specifies whether you acknowledge the license changes. The default + value is `false`. + +[float] +==== Request Body + +`licenses`:: + (array) A sequence of one or more JSON documents containing the license + information. + + +[float] +==== Authorization + +If {security} is enabled, you need `manage` cluster privileges to install the +license. + +If {security} is enabled and you are installing a gold or platinum license, you +must enable TLS on the transport networking layer before you install the license. +See <>. + +[float] +==== Examples + +The following example updates to a basic license: + +[source,js] +------------------------------------------------------------ +POST _xpack/license +{ + "licenses": [ + { + "uid":"893361dc-9749-4997-93cb-802e3d7fa4xx", + "type":"basic", + "issue_date_in_millis":1411948800000, + "expiry_date_in_millis":1914278399999, + "max_nodes":1, + "issued_to":"issuedTo", + "issuer":"issuer", + "signature":"xx" + } + ] +} +------------------------------------------------------------ +// CONSOLE +// TEST[skip:license testing issues] + +NOTE: These values are invalid; you must substitute the appropriate content +from your license file. + +You can alternatively use a `curl` command, for example: + +[source,js] +[source,shell] +------------------------------------------------------------ +curl -XPUT -u 'http://:/_xpack/license' -H "Content-Type: application/json" -d @license.json +------------------------------------------------------------ +// NOTCONSOLE + +On Windows machine, use the following command: + +[source,shell] +------------------------------------------------------------ +gc .\license.json | Invoke-WebRequest -uri http://:/_xpack/license -Credential elastic -Method Put -ContentType "application/json" +------------------------------------------------------------ + +In these examples, + +* `` is a user ID with the appropriate authority. +* `` is the hostname of the {es} node (`localhost` if executing + locally) +* `` is the http port (defaults to `9200`) +* `license.json` is the license JSON file + +NOTE: If your {es} node has SSL enabled on the HTTP interface, you must + start your URL with `https://` + +If you previously had a license with more features than the basic license, you +receive the following response: + +[source,js] +------------------------------------------------------------ + { + "acknowledged": false, + "license_status": "valid", + "acknowledge": { + "message": """This license update requires acknowledgement. To acknowledge the license, please read the following messages and update the license again, this time with the "acknowledge=true" parameter:""", + "watcher": [ + "Watcher will be disabled" + ], + "logstash": [ + "Logstash will no longer poll for centrally-managed pipelines" + ], + "security": [ + "The following X-Pack security functionality will be disabled: ..." ] + } +} +------------------------------------------------------------ + +To complete the update, you must re-submit the API request and set the +`acknowledge` parameter to `true`. For example: + +[source,js] +------------------------------------------------------------ +POST _xpack/license?acknowledge=true +{ + "licenses": [ + { + "uid":"893361dc-9749-4997-93cb-802e3d7fa4xx", + "type":"basic", + "issue_date_in_millis":1411948800000, + "expiry_date_in_millis":1914278399999, + "max_nodes":1, + "issued_to":"issuedTo", + "issuer":"issuer", + "signature":"xx" + } + ] +} +------------------------------------------------------------ +// CONSOLE +// TEST[skip:license testing issues] + +Alternatively: + +[source,sh] +------------------------------------------------------------ +curl -XPUT -u elastic 'http://:/_xpack/license?acknowledge=true' -H "Content-Type: application/json" -d @license.json +------------------------------------------------------------ +// NOTCONSOLE + +For more information about the features that are disabled when you downgrade +your license, see {xpack-ref}/license-expiration.html[License Expiration]. diff --git a/x-pack/docs/en/rest-api/licensing.asciidoc b/x-pack/docs/en/rest-api/licensing.asciidoc new file mode 100644 index 0000000000000..b30590630f7f9 --- /dev/null +++ b/x-pack/docs/en/rest-api/licensing.asciidoc @@ -0,0 +1,22 @@ +[role="xpack"] +[[licensing-apis]] +== Licensing APIs + +You can use the following APIs to manage your licenses: + +* <> +* <> +* <> +* <> +* <> +* <> +* <> + + +include::license/delete-license.asciidoc[] +include::license/get-license.asciidoc[] +include::license/get-trial-status.asciidoc[] +include::license/start-trial.asciidoc[] +include::license/get-basic-status.asciidoc[] +include::license/start-basic.asciidoc[] +include::license/update-license.asciidoc[] diff --git a/x-pack/docs/en/rest-api/migration.asciidoc b/x-pack/docs/en/rest-api/migration.asciidoc new file mode 100644 index 0000000000000..51f1e5fae0f65 --- /dev/null +++ b/x-pack/docs/en/rest-api/migration.asciidoc @@ -0,0 +1,13 @@ +[role="xpack"] +[[migration-api]] +== Migration APIs + +The migration APIs simplify upgrading {xpack} indices from one version to another. + +* <> +* <> +* <> + +include::migration/assistance.asciidoc[] +include::migration/upgrade.asciidoc[] +include::migration/deprecation.asciidoc[] diff --git a/x-pack/docs/en/rest-api/migration/assistance.asciidoc b/x-pack/docs/en/rest-api/migration/assistance.asciidoc new file mode 100644 index 0000000000000..1af625a97ecff --- /dev/null +++ b/x-pack/docs/en/rest-api/migration/assistance.asciidoc @@ -0,0 +1,91 @@ +[role="xpack"] +[[migration-api-assistance]] +=== Migration Assistance API + +The Migration Assistance API analyzes existing indices in the cluster and +returns the information about indices that require some changes before the +cluster can be upgraded to the next major version. + +[float] +==== Request + +`GET /_xpack/migration/assistance` + + +`GET /_xpack/migration/assistance/` + +//==== Description + +[float] +==== Path Parameters + +`index_name`:: + (string) Identifier for the index. It can be an index name or a wildcard + expression. + +//==== Query Parameters + +//==== Authorization + +[float] +==== Examples + +To see a list of indices that needs to be upgraded or reindexed, submit a GET +request to the `/_xpack/migration/assistance` endpoint: + +[source,js] +-------------------------------------------------- +GET /_xpack/migration/assistance +-------------------------------------------------- +// CONSOLE +// TEST[skip:cannot create an old index in docs test] + +A successful call returns a list of indices that need to updated or reindexed: + +[source,js] +-------------------------------------------------- +{ + "indices" : { + ".watches" : { + "action_required" : "upgrade" + }, + ".security" : { + "action_required" : "upgrade" + }, + "my_old_index": { + "action_required" : "reindex" + }, + "my_other_old_index": { + "action_required" : "reindex" + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +To check a particular index or set of indices, specify this index name or mask +as the last part of the `/_xpack/migration/assistance/index_name` endpoint: + +[source,js] +-------------------------------------------------- +GET /_xpack/migration/assistance/my_* +-------------------------------------------------- +// CONSOLE +// TEST[skip:cannot create an old index in docs test] + +A successful call returns a list of indices that needs to updated or reindexed +and match the index specified on the endpoint: + +[source,js] +-------------------------------------------------- +{ + "indices" : { + "my_old_index": { + "action_required" : "reindex" + }, + "my_other_old_index": { + "action_required" : "reindex" + } + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/x-pack/docs/en/rest-api/migration/deprecation.asciidoc b/x-pack/docs/en/rest-api/migration/deprecation.asciidoc new file mode 100644 index 0000000000000..54feee7903af8 --- /dev/null +++ b/x-pack/docs/en/rest-api/migration/deprecation.asciidoc @@ -0,0 +1,120 @@ +[role="xpack"] +[[migration-api-deprecation]] +=== Deprecation Info APIs + +The deprecation API is to be used to retrieve information about different +cluster, node, and index level settings that use deprecated features that will +be removed or changed in the next major version. + +[float] +==== Request + +`GET /_xpack/migration/deprecations` + + +`GET //_xpack/migration/deprecations` + +//=== Description + +[float] +==== Path Parameters + +`index_name`:: + (string) Identifier for the index. It can be an index name or a wildcard + expression. When you specify this parameter, only index-level deprecations for + the specified indices are returned. + +//=== Query Parameters + +//=== Authorization + +[float] +==== Examples + +To see the list of offenders in your cluster, submit a GET request to the +`_xpack/migration/deprecations` endpoint: + +[source,js] +-------------------------------------------------- +GET /_xpack/migration/deprecations +-------------------------------------------------- +// CONSOLE +// TEST[skip:cannot assert tests have certain deprecations] + +Example response: + + +["source","js",subs="attributes,callouts,macros"] +-------------------------------------------------- +{ + "cluster_settings" : [ + { + "level" : "info", + "message" : "Network settings changes", + "url" : "{ref-60}/breaking_60_indices_changes.html#_index_templates_use_literal_index_patterns_literal_instead_of_literal_template_literal", + "details" : "templates using `template` field: watches,.monitoring-alerts,.watch-history-6,.ml-notifications,security-index-template,triggered_watches,.monitoring-es,.ml-meta,.ml-state,.monitoring-logstash,.ml-anomalies-,.monitoring-kibana" + } + ], + "node_settings" : [ ], + "index_settings" : { + ".monitoring-es-6-2017.07.21" : [ + { + "level" : "info", + "message" : "Coercion of boolean fields", + "url" : "{ref-60}/breaking_60_mappings_changes.html#_coercion_of_boolean_fields", + "details" : "[[type: doc, field: spins], [type: doc, field: mlockall], [type: doc, field: node_master], [type: doc, field: primary]]" + } + ] + } +} +-------------------------------------------------- +// NOTCONSOLE + +The response breaks down all the specific forward-incompatible settings that you +should resolve before upgrading your cluster. Any offending settings are +represented as a deprecation warning. + +The following is an example deprecation warning: + +["source","js",subs="attributes,callouts,macros"] +-------------------------------------------------- +{ + "level" : "info", + "message" : "This is the generic descriptive message of the breaking change", + "url" : "{ref-60}/breaking_60_indices_changes.html", + "details" : "more information, like which nodes, indices, or settings are to blame" +} +-------------------------------------------------- +// NOTCONSOLE + +As is shown, there is a `level` property that describes the significance of the +issue. + +|======= +|none | Everything is good. +|info | An advisory note that something has changed. No action needed. +|warning | You can upgrade directly, but you are using deprecated functionality +which will not be available in the next major version. +|critical | You cannot upgrade without fixing this problem. +|======= + +The `message` property and the optional `details` property provide descriptive +information about the deprecation warning. The `url` property provides a link to +the Breaking Changes Documentation, where you can find more information about +this change. + +Any cluster-level deprecation warnings can be found under the `cluster_settings` +key. Similarly, any node-level warnings are found under `node_settings`. Since +only a select subset of your nodes might incorporate these settings, it is +important to read the `details` section for more information about which nodes +are affected. Index warnings are sectioned off per index and can be filtered +using an index-pattern in the query. + +The following example request shows only index-level deprecations of all +`logstash-*` indices: + +[source,js] +-------------------------------------------------- +GET /logstash-*/_xpack/migration/deprecations +-------------------------------------------------- +// CONSOLE +// TEST[skip:cannot assert tests have certain deprecations] diff --git a/x-pack/docs/en/rest-api/migration/upgrade.asciidoc b/x-pack/docs/en/rest-api/migration/upgrade.asciidoc new file mode 100644 index 0000000000000..839a0057e82fe --- /dev/null +++ b/x-pack/docs/en/rest-api/migration/upgrade.asciidoc @@ -0,0 +1,138 @@ +[role="xpack"] +[[migration-api-upgrade]] +=== Migration Upgrade API + +The Migration Upgrade API performs the upgrade of internal indices to make them +compatible with the next major version. + +[float] +==== Request + +`POST /_xpack/migration/upgrade/` + +[float] +==== Description + +Indices must be upgraded one at a time. + +[float] +==== Path Parameters + +`index_name`:: + (string) Identifier for the index. + +`wait_for_completion`:: + (boolean) Defines whether the upgrade call blocks until the upgrade process is + finished. The default value is `true`. If set to `false`, the upgrade can be + performed asynchronously. + +//==== Query Parameters + +//==== Authorization + +[float] +==== Examples + +The following example submits a POST request to the +`/_xpack/migration/upgrade/` endpoint: + +[source,js] +-------------------------------------------------- +POST /_xpack/migration/upgrade/.watches +-------------------------------------------------- +// CONSOLE +// TEST[skip:cannot create an old index in docs test] + +A successful call returns the statistics about the upgrade process: + +[source,js] +-------------------------------------------------- +{ + "took" : 127, + "timed_out" : false, + "total" : 4, + "updated" : 0, + "created" : 4, + "deleted" : 0, + "batches" : 1, + "version_conflicts" : 0, + "noops" : 0, + "retries" : { + "bulk" : 0, + "search" : 0 + }, + "throttled_millis" : 0, + "failures" : [ ] +} +-------------------------------------------------- +// NOTCONSOLE + +The following example upgrades a large index asynchronously by specifying the +`wait_for_completion` parameter: + +[source,js] +-------------------------------------------------- +POST /_xpack/migration/upgrade/.watches?wait_for_completion=false +-------------------------------------------------- +// CONSOLE +// TEST[skip:cannot create an old index in docs test] + +This call should return the id of the upgrade process task: + +[source,js] +-------------------------------------------------- +{ + "task" : "PFvgv7T6TGumRyFF3vqTFg:1137" +} +-------------------------------------------------- +// NOTCONSOLE + +The status of the running or finished upgrade requests can be obtained by using +the <>: + +[source,js] +-------------------------------------------------- +GET _tasks/PFvgv7T6TGumRyFF3vqTFg:1137?detailed=true +-------------------------------------------------- +// CONSOLE +// TEST[skip:cannot create an old index in docs test] + +[source,js] +-------------------------------------------------- +{ + "completed" : true, <1> + "task" : { + "node" : "PFvgv7T6TGumRyFF3vqTFg", + "id" : 1137, + "type" : "transport", + "action" : "cluster:admin/xpack/upgrade", + "description" : "", + "start_time_in_millis" : 1500650625413, + "running_time_in_nanos" : 947456819, + "cancellable" : true + }, + "response" : { <2> + "took" : 212, + "timed_out" : false, + "total" : 4, + "updated" : 0, + "created" : 4, + "deleted" : 0, + "batches" : 1, + "version_conflicts" : 0, + "noops" : 0, + "retries" : { + "bulk" : 0, + "search" : 0 + }, + "throttled_millis" : 0, + "failures" : [ ] + } +} +-------------------------------------------------- +// NOTCONSOLE + +<1> If the `completed` field value is `true`, the upgrade request has finished. +If it is `false`, the request is still running. + +<2> The `response` field contains the status of the upgrade request. diff --git a/x-pack/docs/en/rest-api/ml-api.asciidoc b/x-pack/docs/en/rest-api/ml-api.asciidoc new file mode 100644 index 0000000000000..e9a987cc4a709 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml-api.asciidoc @@ -0,0 +1,114 @@ +[role="xpack"] +[[ml-apis]] +== Machine Learning APIs + +You can use the following APIs to perform {ml} activities. +See <> for the resource definitions used by the +machine learning APIs and in advanced job configuration options in Kibana. + +[float] +[[ml-api-calendar-endpoint]] +=== Calendars + +* <>, <> +* <>, <> +* <>, <> +* <>, <> + +[float] +[[ml-api-datafeed-endpoint]] +=== {dfeeds-cap} + +* <>, <> +* <>, <> +* <>, <> +* <> +* <> + + + +[float] +[[ml-api-job-endpoint]] +=== Jobs + +//* <>, <> +* <>, <> +* <>, <> +* <>, <> +* <>, <> +* <> +* <> +* <> +* <> + +[float] +[[ml-api-snapshot-endpoint]] +=== Model Snapshots + +* <> +* <> +* <> +* <> + + +[float] +[[ml-api-result-endpoint]] +=== Results + +* <> +* <> +* <> +* <> +* <> + +//ADD +include::ml/post-calendar-event.asciidoc[] +include::ml/put-calendar-job.asciidoc[] +//CLOSE +include::ml/close-job.asciidoc[] +//CREATE +include::ml/put-calendar.asciidoc[] +include::ml/put-datafeed.asciidoc[] +include::ml/put-job.asciidoc[] +//DELETE +include::ml/delete-calendar.asciidoc[] +include::ml/delete-datafeed.asciidoc[] +include::ml/delete-calendar-event.asciidoc[] +include::ml/delete-job.asciidoc[] +include::ml/delete-calendar-job.asciidoc[] +include::ml/delete-snapshot.asciidoc[] +//FLUSH +include::ml/flush-job.asciidoc[] +//FORECAST +include::ml/forecast.asciidoc[] +//GET +include::ml/get-calendar.asciidoc[] +include::ml/get-bucket.asciidoc[] +include::ml/get-overall-buckets.asciidoc[] +include::ml/get-category.asciidoc[] +include::ml/get-datafeed.asciidoc[] +include::ml/get-datafeed-stats.asciidoc[] +include::ml/get-influencer.asciidoc[] +include::ml/get-job.asciidoc[] +include::ml/get-job-stats.asciidoc[] +include::ml/get-snapshot.asciidoc[] +include::ml/get-calendar-event.asciidoc[] +include::ml/get-record.asciidoc[] +//OPEN +include::ml/open-job.asciidoc[] +//POST +include::ml/post-data.asciidoc[] +//PREVIEW +include::ml/preview-datafeed.asciidoc[] +//REVERT +include::ml/revert-snapshot.asciidoc[] +//START/STOP +include::ml/start-datafeed.asciidoc[] +include::ml/stop-datafeed.asciidoc[] +//UPDATE +include::ml/update-datafeed.asciidoc[] +include::ml/update-job.asciidoc[] +include::ml/update-snapshot.asciidoc[] +//VALIDATE +//include::ml/validate-detector.asciidoc[] +//include::ml/validate-job.asciidoc[] diff --git a/x-pack/docs/en/rest-api/ml/calendarresource.asciidoc b/x-pack/docs/en/rest-api/ml/calendarresource.asciidoc new file mode 100644 index 0000000000000..8edb43ed7a393 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/calendarresource.asciidoc @@ -0,0 +1,14 @@ +[role="xpack"] +[[ml-calendar-resource]] +=== Calendar Resources + +A calendar resource has the following properties: + +`calendar_id`:: + (string) A numerical character string that uniquely identifies the calendar. + +`job_ids`:: + (array) An array of job identifiers. For example: `["total-requests"]`. + +For more information, see +{xpack-ref}/ml-calendars.html[Calendars and Scheduled Events]. diff --git a/x-pack/docs/en/rest-api/ml/close-job.asciidoc b/x-pack/docs/en/rest-api/ml/close-job.asciidoc new file mode 100644 index 0000000000000..3e612f5171da1 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/close-job.asciidoc @@ -0,0 +1,92 @@ +[role="xpack"] +[[ml-close-job]] +=== Close Jobs API +++++ +Close Jobs +++++ + +This API enables you to close one or more jobs. +A job can be opened and closed multiple times throughout its lifecycle. + +A closed job cannot receive data or perform analysis +operations, but you can still explore and navigate results. + + +==== Request + +`POST _xpack/ml/anomaly_detectors//_close` + + +`POST _xpack/ml/anomaly_detectors/,/_close` + + +`POST _xpack/ml/anomaly_detectors/_all/_close` + + + +==== Description + +You can close multiple jobs in a single API request by using a group name, a +comma-separated list of jobs, or a wildcard expression. You can close all jobs +by using `_all` or by specifying `*` as the ``. + +When you close a job, it runs housekeeping tasks such as pruning the model history, +flushing buffers, calculating final results and persisting the model snapshots. +Depending upon the size of the job, it could take several minutes to close and +the equivalent time to re-open. + +After it is closed, the job has a minimal overhead on the cluster except for +maintaining its meta data. Therefore it is a best practice to close jobs that +are no longer required to process data. + +When a {dfeed} that has a specified end date stops, it automatically closes +the job. + +NOTE: If you use the `force` query parameter, the request returns without performing +the associated actions such as flushing buffers and persisting the model snapshots. +Therefore, do not use this parameter if you want the job to be in a consistent state +after the close job API returns. The `force` query parameter should only be used in +situations where the job has already failed, or where you are not interested in +results the job might have recently produced or might produce in the future. + + +==== Path Parameters + +`job_id`:: + (string) Identifier for the job. It can be a job identifier, a group name, or + a wildcard expression. + + +==== Query Parameters + +`force`:: + (boolean) Use to close a failed job, or to forcefully close a job which has not + responded to its initial close request. + +`timeout`:: + (time units) Controls the time to wait until a job has closed. + The default value is 30 minutes. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example closes the `total-requests` job: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/anomaly_detectors/total-requests/_close +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_openjob] + +When the job is closed, you receive the following results: +[source,js] +---- +{ + "closed": true +} +---- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/datafeedresource.asciidoc b/x-pack/docs/en/rest-api/ml/datafeedresource.asciidoc new file mode 100644 index 0000000000000..0ffeb6bc89d72 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/datafeedresource.asciidoc @@ -0,0 +1,118 @@ +[role="xpack"] +[[ml-datafeed-resource]] +=== {dfeed-cap} Resources + +A {dfeed} resource has the following properties: + +`aggregations`:: + (object) If set, the {dfeed} performs aggregation searches. + Support for aggregations is limited and should only be used with + low cardinality data. For more information, see + {xpack-ref}/ml-configuring-aggregation.html[Aggregating Data for Faster Performance]. + +`chunking_config`:: + (object) Specifies how data searches are split into time chunks. + See <>. + For example: `{"mode": "manual", "time_span": "3h"}` + +`datafeed_id`:: + (string) A numerical character string that uniquely identifies the {dfeed}. + This property is informational; you cannot change the identifier for existing + {dfeeds}. + +`frequency`:: + (time units) The interval at which scheduled queries are made while the + {dfeed} runs in real time. The default value is either the bucket span for short + bucket spans, or, for longer bucket spans, a sensible fraction of the bucket + span. For example: `150s`. + +`indices`:: + (array) An array of index names. For example: `["it_ops_metrics"]` + +`job_id`:: + (string) The unique identifier for the job to which the {dfeed} sends data. + +`query`:: + (object) The {es} query domain-specific language (DSL). This value + corresponds to the query object in an {es} search POST body. All the + options that are supported by {es} can be used, as this object is + passed verbatim to {es}. By default, this property has the following + value: `{"match_all": {"boost": 1}}`. + +`query_delay`:: + (time units) The number of seconds behind real time that data is queried. For + example, if data from 10:04 a.m. might not be searchable in {es} until + 10:06 a.m., set this property to 120 seconds. The default value is randomly + selected between `60s` and `120s`. This randomness improves the query + performance when there are multiple jobs running on the same node. + +`script_fields`:: + (object) Specifies scripts that evaluate custom expressions and returns + script fields to the {dfeed}. + The <> in a job can contain + functions that use these script fields. + For more information, see + {xpack-ref}/ml-configuring-transform.html[Transforming Data With Script Fields]. + +`scroll_size`:: + (unsigned integer) The `size` parameter that is used in {es} searches. + The default value is `1000`. + +`types`:: + (array) A list of types to search for within the specified indices. For + example: `[]`. This property is provided for backwards compatibility with + releases earlier than 6.0.0. For more information, see <>. + +[[ml-datafeed-chunking-config]] +==== Chunking Configuration Objects + +{dfeeds-cap} might be required to search over long time periods, for several months +or years. This search is split into time chunks in order to ensure the load +on {es} is managed. Chunking configuration controls how the size of these time +chunks are calculated and is an advanced configuration option. + +A chunking configuration object has the following properties: + +`mode`:: + There are three available modes: + + `auto`::: The chunk size will be dynamically calculated. This is the default + and recommended value. + `manual`::: Chunking will be applied according to the specified `time_span`. + `off`::: No chunking will be applied. + +`time_span`:: + (time units) The time span that each search will be querying. + This setting is only applicable when the mode is set to `manual`. + For example: `3h`. + +[float] +[[ml-datafeed-counts]] +==== {dfeed-cap} Counts + +The get {dfeed} statistics API provides information about the operational +progress of a {dfeed}. All of these properties are informational; you cannot +update their values: + +`assignment_explanation`:: + (string) For started {dfeeds} only, contains messages relating to the + selection of a node. + +`datafeed_id`:: + (string) A numerical character string that uniquely identifies the {dfeed}. + +`node`:: + (object) The node upon which the {dfeed} is started. The {dfeed} and job will + be on the same node. + `id`::: The unique identifier of the node. For example, + "0-o0tOoRTwKFZifatTWKNw". + `name`::: The node name. For example, `0-o0tOo`. + `ephemeral_id`::: The node ephemeral ID. + `transport_address`::: The host and port where transport HTTP connections are + accepted. For example, `127.0.0.1:9300`. + `attributes`::: For example, `{"ml.max_open_jobs": "10"}`. + +`state`:: + (string) The status of the {dfeed}, which can be one of the following values: + + `started`::: The {dfeed} is actively receiving data. + `stopped`::: The {dfeed} is stopped and will not receive data until it is + re-started. diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc b/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc new file mode 100644 index 0000000000000..b6f3c644acfea --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc @@ -0,0 +1,55 @@ +[role="xpack"] +[[ml-delete-calendar-event]] +=== Delete Events from Calendar API +++++ +Delete Events from Calendar +++++ + +This API enables you to delete scheduled events from a calendar. + + +==== Request + +`DELETE _xpack/ml/calendars//events/` + + +==== Description + +This API removes individual events from a calendar. To remove all scheduled +events and delete the calendar, see the +<>. + +==== Path Parameters + +`calendar_id`(required):: + (string) Identifier for the calendar. + +`event_id` (required):: + (string) Identifier for the scheduled event. You can obtain this identifier + by using the <>. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. + +==== Examples + +The following example deletes a scheduled event from the `planned-outages` +calendar: + +[source,js] +-------------------------------------------------- +DELETE _xpack/ml/calendars/planned-outages/events/LS8LJGEBMTCMA-qz49st +-------------------------------------------------- +// CONSOLE +// TEST[skip:automatically-generated ID] + +When the event is removed, you receive the following results: +[source,js] +---- +{ + "acknowledged": true +} +---- diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar-job.asciidoc b/x-pack/docs/en/rest-api/ml/delete-calendar-job.asciidoc new file mode 100644 index 0000000000000..54fe9ebdaba9b --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/delete-calendar-job.asciidoc @@ -0,0 +1,53 @@ +[role="xpack"] +[[ml-delete-calendar-job]] +=== Delete Jobs from Calendar API +++++ +Delete Jobs from Calendar +++++ + +This API enables you to delete jobs from a calendar. + + +==== Request + +`DELETE _xpack/ml/calendars//jobs/` + + +==== Path Parameters + +`calendar_id`(required):: + (string) Identifier for the calendar. + +`job_id` (required):: + (string) An identifier for the job. It can be a job identifier, a group name, or a + comma-separated list of jobs or groups. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. + +==== Examples + +The following example removes the association between the `planned-outages` +calendar and `total-requests` job: + +[source,js] +-------------------------------------------------- +DELETE _xpack/ml/calendars/planned-outages/jobs/total-requests +-------------------------------------------------- +// CONSOLE +// TEST[setup:calendar_outages_addjob] + +When the job is removed from the calendar, you receive the following +results: + +[source,js] +---- +{ + "calendar_id": "planned-outages", + "job_ids": [] +} +---- +//TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar.asciidoc b/x-pack/docs/en/rest-api/ml/delete-calendar.asciidoc new file mode 100644 index 0000000000000..37b3ae3c87b36 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/delete-calendar.asciidoc @@ -0,0 +1,52 @@ +[role="xpack"] +[[ml-delete-calendar]] +=== Delete Calendar API +++++ +Delete Calendar +++++ + +This API enables you to delete a calendar. + + +==== Request + +`DELETE _xpack/ml/calendars/` + + +==== Description + +This API removes all scheduled events from the calendar then deletes the +calendar. + + +==== Path Parameters + +`calendar_id` (required):: + (string) Identifier for the calendar. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example deletes the `planned-outages` calendar: + +[source,js] +-------------------------------------------------- +DELETE _xpack/ml/calendars/planned-outages +-------------------------------------------------- +// CONSOLE +// TEST[setup:calendar_outages] + +When the calendar is deleted, you receive the following results: +[source,js] +---- +{ + "acknowledged": true +} +---- +//TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/delete-datafeed.asciidoc b/x-pack/docs/en/rest-api/ml/delete-datafeed.asciidoc new file mode 100644 index 0000000000000..de529267f4f7c --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/delete-datafeed.asciidoc @@ -0,0 +1,59 @@ +[role="xpack"] +[[ml-delete-datafeed]] +=== Delete {dfeeds-cap} API +++++ +Delete {dfeeds-cap} +++++ + +This API enables you to delete an existing {dfeed}. + + +==== Request + +`DELETE _xpack/ml/datafeeds/` + + +==== Description + +NOTE: Unless the `force` parameter is used, the {dfeed} must be stopped before it can be deleted. + + +==== Path Parameters + +`feed_id` (required):: + (string) Identifier for the {dfeed} + + +===== Query Parameters + +`force`:: + (boolean) Use to forcefully delete a started {dfeed}; this method is quicker than + stopping and deleting the {dfeed}. + + +===== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. +//<>. + + +==== Examples + +The following example deletes the `datafeed-total-requests` {dfeed}: + +[source,js] +-------------------------------------------------- +DELETE _xpack/ml/datafeeds/datafeed-total-requests +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_datafeed] + +When the {dfeed} is deleted, you receive the following results: +[source,js] +---- +{ + "acknowledged": true +} +---- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/delete-job.asciidoc b/x-pack/docs/en/rest-api/ml/delete-job.asciidoc new file mode 100644 index 0000000000000..7aaba59e122eb --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/delete-job.asciidoc @@ -0,0 +1,68 @@ +[role="xpack"] +[[ml-delete-job]] +=== Delete Jobs API +++++ +Delete Jobs +++++ + +This API enables you to delete an existing anomaly detection job. + + +==== Request + +`DELETE _xpack/ml/anomaly_detectors/` + + +==== Description + +All job configuration, model state and results are deleted. + +IMPORTANT: Deleting a job must be done via this API only. Do not delete the + job directly from the `.ml-*` indices using the Elasticsearch + DELETE Document API. When {security} is enabled, make sure no `write` + privileges are granted to anyone over the `.ml-*` indices. + +Before you can delete a job, you must delete the {dfeeds} that are associated +with it. See <>. Unless the `force` parameter +is used the job must be closed before it can be deleted. + +It is not currently possible to delete multiple jobs using wildcards or a comma +separated list. + +==== Path Parameters + +`job_id` (required):: + (string) Identifier for the job + +===== Query Parameters + +`force`:: + (boolean) Use to forcefully delete an opened job; this method is quicker than + closing and deleting the job. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example deletes the `total-requests` job: + +[source,js] +-------------------------------------------------- +DELETE _xpack/ml/anomaly_detectors/total-requests +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_job] + +When the job is deleted, you receive the following results: +[source,js] +---- +{ + "acknowledged": true +} +---- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/delete-snapshot.asciidoc b/x-pack/docs/en/rest-api/ml/delete-snapshot.asciidoc new file mode 100644 index 0000000000000..b63e37a1b454b --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/delete-snapshot.asciidoc @@ -0,0 +1,55 @@ +[role="xpack"] +[[ml-delete-snapshot]] +=== Delete Model Snapshots API +++++ +Delete Model Snapshots +++++ + +This API enables you to delete an existing model snapshot. + + +==== Request + +`DELETE _xpack/ml/anomaly_detectors//model_snapshots/` + + +==== Description + +IMPORTANT: You cannot delete the active model snapshot. To delete that snapshot, +first revert to a different one. To identify the active model snapshot, refer to +the `model_snapshot_id` in the results from the get jobs API. + +==== Path Parameters + +`job_id` (required):: + (string) Identifier for the job + +`snapshot_id` (required):: + (string) Identifier for the model snapshot + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. +//<>. + + +==== Examples + +The following example deletes the `1491948163` snapshot: + +[source,js] +-------------------------------------------------- +DELETE _xpack/ml/anomaly_detectors/farequote/model_snapshots/1491948163 +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +When the snapshot is deleted, you receive the following results: +[source,js] +---- +{ + "acknowledged": true +} +---- diff --git a/x-pack/docs/en/rest-api/ml/eventresource.asciidoc b/x-pack/docs/en/rest-api/ml/eventresource.asciidoc new file mode 100644 index 0000000000000..c9ab78964213e --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/eventresource.asciidoc @@ -0,0 +1,26 @@ +[role="xpack"] +[[ml-event-resource]] +=== Scheduled Event Resources + +An events resource has the following properties: + +`calendar_id`:: + (string) An identifier for the calendar that contains the scheduled + event. This property is optional in the <>. + +`description`:: + (string) A description of the scheduled event. + +`end_time`:: + (string) The timestamp for the end of the scheduled event. The datetime string + is in ISO 8601 format. + +`event_id`:: + (string) An automatically-generated identifier for the scheduled event. + +`start_time`:: + (string) The timestamp for the beginning of the scheduled event. The datetime + string is in ISO 8601 format. + +For more information, see +{xpack-ref}/ml-calendars.html[Calendars and Scheduled Events]. diff --git a/x-pack/docs/en/rest-api/ml/flush-job.asciidoc b/x-pack/docs/en/rest-api/ml/flush-job.asciidoc new file mode 100644 index 0000000000000..2a65c5284fcf4 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/flush-job.asciidoc @@ -0,0 +1,114 @@ +[role="xpack"] +[[ml-flush-job]] +=== Flush Jobs API +++++ +Flush Jobs +++++ + +This API forces any buffered data to be processed by the job. + + +==== Request + +`POST _xpack/ml/anomaly_detectors//_flush` + + +==== Description + +The flush jobs API is only applicable when sending data for analysis using the +<>. Depending on the content of the buffer, then it +might additionally calculate new results. + +Both flush and close operations are similar, however the flush is more efficient +if you are expecting to send more data for analysis. When flushing, the job +remains open and is available to continue analyzing data. A close operation +additionally prunes and persists the model state to disk and the job must be +opened again before analyzing further data. + + +==== Path Parameters + +`job_id` (required):: +(string) Identifier for the job + + +==== Query Parameters + +`advance_time`:: + (string) Specifies to advance to a particular time value. Results are + generated and the model is updated for data from the specified time interval. + +`calc_interim`:: + (boolean) If true, calculates the interim results for the most recent bucket + or all buckets within the latency period. + +`end`:: + (string) When used in conjunction with `calc_interim`, specifies the range + of buckets on which to calculate interim results. + +`skip_time`:: + (string) Specifies to skip to a particular time value. Results are not + generated and the model is not updated for data from the specified time + interval. + +`start`:: + (string) When used in conjunction with `calc_interim`, specifies the range of + buckets on which to calculate interim results. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example flushes the `total-requests` job: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/anomaly_detectors/total-requests/_flush +{ + "calc_interim": true +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_openjob] + +When the operation succeeds, you receive the following results: +[source,js] +---- +{ + "flushed": true, + "last_finalized_bucket_end": 1455234900000 +} +---- +// TESTRESPONSE[s/"last_finalized_bucket_end": 1455234900000/"last_finalized_bucket_end": $body.last_finalized_bucket_end/] + +The `last_finalized_bucket_end` provides the timestamp (in +milliseconds-since-the-epoch) of the end of the last bucket that was processed. + +If you want to flush the job to a specific timestamp, you can use the +`advance_time` or `skip_time` parameters. For example, to advance to 11 AM GMT +on January 1, 2018: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/anomaly_detectors/total-requests/_flush +{ + "advance_time": "1514804400" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_openjob] + +When the operation succeeds, you receive the following results: +[source,js] +---- +{ + "flushed": true, + "last_finalized_bucket_end": 1514804400000 +} +---- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/forecast.asciidoc b/x-pack/docs/en/rest-api/ml/forecast.asciidoc new file mode 100644 index 0000000000000..9e3e48a2e7b38 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/forecast.asciidoc @@ -0,0 +1,79 @@ +[role="xpack"] +[[ml-forecast]] +=== Forecast Jobs API +++++ +Forecast Jobs +++++ + +This API uses historical behavior to predict the future behavior of a time +series. + +==== Request + +`POST _xpack/ml/anomaly_detectors//_forecast` + + +==== Description + +See {xpack-ref}/ml-overview.html#ml-forecasting[Forecasting the Future]. + +[NOTE] +=============================== + +* If you use an `over_field_name` property in your job, you cannot create a +forecast. For more information about this property, see <>. +* The job must be open when you create a forecast. Otherwise, an error occurs. +=============================== + +==== Path Parameters + +`job_id`:: + (string) Identifier for the job. + + +==== Request Parameters + +`duration`:: + (time units) A period of time that indicates how far into the future to + forecast. For example, `30d` corresponds to 30 days. The default value is 1 + day. The forecast starts at the last record that was processed. For more + information about time units, see <>. + +`expires_in`:: + (time units) The period of time that forecast results are retained. + After a forecast expires, the results are deleted. The default value is 14 days. + If set to a value of `0`, the forecast is never automatically deleted. + For more information about time units, see <>. + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example requests a 10 day forecast for the `total-requests` job: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/anomaly_detectors/total-requests/_forecast +{ + "duration": "10d" +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +When the forecast is created, you receive the following results: +[source,js] +---- +{ + "acknowledged": true, + "forecast_id": "wkCWa2IB2lF8nSE_TzZo" +} +---- + +You can subsequently see the forecast in the *Single Metric Viewer* in {kib}. +//and in the results that you retrieve by using {ml} APIs such as the +//<> and <>. diff --git a/x-pack/docs/en/rest-api/ml/get-bucket.asciidoc b/x-pack/docs/en/rest-api/ml/get-bucket.asciidoc new file mode 100644 index 0000000000000..9a20d4fc15e52 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-bucket.asciidoc @@ -0,0 +1,137 @@ +[role="xpack"] +[[ml-get-bucket]] +=== Get Buckets API +++++ +Get Buckets +++++ + +This API enables you to retrieve job results for one or more buckets. + + +==== Request + +`GET _xpack/ml/anomaly_detectors//results/buckets` + + +`GET _xpack/ml/anomaly_detectors//results/buckets/` + + +==== Description + +The get buckets API presents a chronological view of the records, grouped by +bucket. + + +==== Path Parameters + +`job_id`:: + (string) Identifier for the job + +`timestamp`:: + (string) The timestamp of a single bucket result. + If you do not specify this optional parameter, the API returns information + about all buckets. + + +==== Request Body + +`anomaly_score`:: + (double) Returns buckets with anomaly scores greater or equal than this value. + +`desc`:: + (boolean) If true, the buckets are sorted in descending order. + +`end`:: + (string) Returns buckets with timestamps earlier than this time. + +`exclude_interim`:: + (boolean) If true, the output excludes interim results. + By default, interim results are included. + +`expand`:: + (boolean) If true, the output includes anomaly records. + +`page`:: +`from`::: + (integer) Skips the specified number of buckets. +`size`::: + (integer) Specifies the maximum number of buckets to obtain. + +`sort`:: + (string) Specifies the sort field for the requested buckets. + By default, the buckets are sorted by the `timestamp` field. + +`start`:: + (string) Returns buckets with timestamps after this time. + + +===== Results + +The API returns the following information: + +`buckets`:: + (array) An array of bucket objects. For more information, see + <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. You also need `read` index privilege on the index +that stores the results. The `machine_learning_admin` and `machine_learning_user` +roles provide these privileges. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges] and +{xpack-ref}/built-in-roles.html[Built-in Roles]. +//<> and <>. + + +==== Examples + +The following example gets bucket information for the `it-ops-kpi` job: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/anomaly_detectors/it-ops-kpi/results/buckets +{ + "anomaly_score": 80, + "start": "1454530200001" +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +In this example, the API returns a single result that matches the specified +score and time constraints: +[source,js] +---- +{ + "count": 1, + "buckets": [ + { + "job_id": "it-ops-kpi", + "timestamp": 1454943900000, + "anomaly_score": 94.1706, + "bucket_span": 300, + "initial_anomaly_score": 94.1706, + "event_count": 153, + "is_interim": false, + "bucket_influencers": [ + { + "job_id": "it-ops-kpi", + "result_type": "bucket_influencer", + "influencer_field_name": "bucket_time", + "initial_anomaly_score": 94.1706, + "anomaly_score": 94.1706, + "raw_anomaly_score": 2.32119, + "probability": 0.00000575042, + "timestamp": 1454943900000, + "bucket_span": 300, + "is_interim": false + } + ], + "processing_time_ms": 2, + "partition_scores": [], + "result_type": "bucket" + } + ] +} +---- diff --git a/x-pack/docs/en/rest-api/ml/get-calendar-event.asciidoc b/x-pack/docs/en/rest-api/ml/get-calendar-event.asciidoc new file mode 100644 index 0000000000000..1a10ad68d7f22 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-calendar-event.asciidoc @@ -0,0 +1,106 @@ +[role="xpack"] +[[ml-get-calendar-event]] +=== Get Scheduled Events API +++++ +Get Scheduled Events +++++ + +This API enables you to retrieve information about the scheduled events in +calendars. + + +==== Request + +`GET _xpack/ml/calendars//events` + + +`GET _xpack/ml/calendars/_all/events` + + +===== Description + +You can get scheduled event information for a single calendar or for all +calendars by using `_all`. + +==== Path Parameters + +`calendar_id` (required):: + (string) Identifier for the calendar. + +==== Request Body + +`end`:: + (string) Specifies to get events with timestamps earlier than this time. + +`from`:: + (integer) Skips the specified number of events. + +`size`:: + (integer) Specifies the maximum number of events to obtain. + +`start`:: + (string) Specifies to get events with timestamps after this time. + +==== Results + +The API returns the following information: + +`events`:: + (array) An array of scheduled event resources. + For more information, see <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example gets information about the scheduled events in the +`planned-outages` calendar: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/calendars/planned-outages/events +-------------------------------------------------- +// CONSOLE +// TEST[setup:calendar_outages_addevent] + +The API returns the following results: + +[source,js] +---- +{ + "count": 3, + "events": [ + { + "description": "event 1", + "start_time": 1513641600000, + "end_time": 1513728000000, + "calendar_id": "planned-outages", + "event_id": "LS8LJGEBMTCMA-qz49st" + }, + { + "description": "event 2", + "start_time": 1513814400000, + "end_time": 1513900800000, + "calendar_id": "planned-outages", + "event_id": "Li8LJGEBMTCMA-qz49st" + }, + { + "description": "event 3", + "start_time": 1514160000000, + "end_time": 1514246400000, + "calendar_id": "planned-outages", + "event_id": "Ly8LJGEBMTCMA-qz49st" + } + ] +} +---- +// TESTRESPONSE[s/LS8LJGEBMTCMA-qz49st/$body.$_path/] +// TESTRESPONSE[s/Li8LJGEBMTCMA-qz49st/$body.$_path/] +// TESTRESPONSE[s/Ly8LJGEBMTCMA-qz49st/$body.$_path/] + +For more information about these properties, see <>. diff --git a/x-pack/docs/en/rest-api/ml/get-calendar.asciidoc b/x-pack/docs/en/rest-api/ml/get-calendar.asciidoc new file mode 100644 index 0000000000000..245d570947276 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-calendar.asciidoc @@ -0,0 +1,82 @@ +[role="xpack"] +[[ml-get-calendar]] +=== Get Calendars API +++++ +Get Calendars +++++ + +This API enables you to retrieve configuration information for calendars. + + +==== Request + +`GET _xpack/ml/calendars/` + + +`GET _xpack/ml/calendars/_all` + + +===== Description + +You can get information for a single calendar or for all calendars by using +`_all`. + + +==== Path Parameters + +`calendar_id`:: + (string) Identifier for the calendar. + + +==== Request Body + +`from`::: + (integer) Skips the specified number of calendars. + +`size`::: + (integer) Specifies the maximum number of calendars to obtain. + + +==== Results + +The API returns the following information: + +`calendars`:: + (array) An array of calendar resources. + For more information, see <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example gets configuration information for the `planned-outages` +calendar: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/calendars/planned-outages +-------------------------------------------------- +// CONSOLE +// TEST[setup:calendar_outages_addjob] + +The API returns the following results: +[source,js] +---- +{ + "count": 1, + "calendars": [ + { + "calendar_id": "planned-outages", + "job_ids": [ + "total-requests" + ] + } + ] +} +---- +//TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/get-category.asciidoc b/x-pack/docs/en/rest-api/ml/get-category.asciidoc new file mode 100644 index 0000000000000..37d0a95c14c71 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-category.asciidoc @@ -0,0 +1,97 @@ +[role="xpack"] +[[ml-get-category]] +=== Get Categories API +++++ +Get Categories +++++ + +This API enables you to retrieve job results for one or more categories. + + +==== Request + +`GET _xpack/ml/anomaly_detectors//results/categories` + + +`GET _xpack/ml/anomaly_detectors//results/categories/` + +==== Description + +For more information about categories, see +{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. +//<>. + +==== Path Parameters + +`job_id`:: + (string) Identifier for the job. + +`category_id`:: + (long) Identifier for the category. If you do not specify this optional parameter, + the API returns information about all categories in the job. + + +==== Request Body + +`page`:: +`from`::: + (integer) Skips the specified number of categories. +`size`::: + (integer) Specifies the maximum number of categories to obtain. + + +==== Results + +The API returns the following information: + +`categories`:: + (array) An array of category objects. For more information, see + <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. You also need `read` index privilege on the index +that stores the results. The `machine_learning_admin` and `machine_learning_user` +roles provide these privileges. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges] and +{xpack-ref}/built-in-roles.html[Built-in Roles]. +//<> and <>. + + +==== Examples + +The following example gets information about one category for the +`it_ops_new_logs` job: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/anomaly_detectors/it_ops_new_logs/results/categories +{ + "page":{ + "size": 1 + } +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +In this example, the API returns the following information: +[source,js] +---- +{ + "count": 11, + "categories": [ + { + "job_id": "it_ops_new_logs", + "category_id": 1, + "terms": "Actual Transaction Already Voided Reversed hostname dbserver.acme.com physicalhost esxserver1.acme.com vmhost app1.acme.com", + "regex": ".*?Actual.+?Transaction.+?Already.+?Voided.+?Reversed.+?hostname.+?dbserver.acme.com.+?physicalhost.+?esxserver1.acme.com.+?vmhost.+?app1.acme.com.*", + "max_matching_length": 137, + "examples": [ + "Actual Transaction Already Voided / Reversed;hostname=dbserver.acme.com;physicalhost=esxserver1.acme.com;vmhost=app1.acme.com" + ] + } + ] +} +---- diff --git a/x-pack/docs/en/rest-api/ml/get-datafeed-stats.asciidoc b/x-pack/docs/en/rest-api/ml/get-datafeed-stats.asciidoc new file mode 100644 index 0000000000000..6c5b3af650b9a --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-datafeed-stats.asciidoc @@ -0,0 +1,100 @@ +[role="xpack"] +[[ml-get-datafeed-stats]] +=== Get {dfeed-cap} Statistics API +++++ +Get {dfeed-cap} Statistics +++++ + +This API enables you to retrieve usage information for {dfeeds}. + + +==== Request + + +`GET _xpack/ml/datafeeds//_stats` + + +`GET _xpack/ml/datafeeds/,/_stats` + + +`GET _xpack/ml/datafeeds/_stats` + + +`GET _xpack/ml/datafeeds/_all/_stats` + + + + +==== Description + +You can get statistics for multiple {dfeeds} in a single API request by using a +comma-separated list of {dfeeds} or a wildcard expression. You can get +statistics for all {dfeeds} by using `_all`, by specifying `*` as the +``, or by omitting the ``. + +If the {dfeed} is stopped, the only information you receive is the +`datafeed_id` and the `state`. + + +==== Path Parameters + +`feed_id`:: + (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a + wildcard expression. If you do not specify one of these options, the API + returns statistics for all {dfeeds}. + + +==== Results + +The API returns the following information: + +`datafeeds`:: + (array) An array of {dfeed} count objects. + For more information, see <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example gets usage information for the +`datafeed-total-requests` {dfeed}: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/datafeeds/datafeed-total-requests/_stats +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_startdf] + +The API returns the following results: +[source,js] +---- +{ + "count": 1, + "datafeeds": [ + { + "datafeed_id": "datafeed-total-requests", + "state": "started", + "node": { + "id": "2spCyo1pRi2Ajo-j-_dnPX", + "name": "node-0", + "ephemeral_id": "hoXMLZB0RWKfR9UPPUCxXX", + "transport_address": "127.0.0.1:9300", + "attributes": { + "ml.machine_memory": "17179869184", + "ml.max_open_jobs": "20", + "ml.enabled": "true" + } + }, + "assignment_explanation": "" + } + ] +} +---- +// TESTRESPONSE[s/"2spCyo1pRi2Ajo-j-_dnPX"/$body.$_path/] +// TESTRESPONSE[s/"node-0"/$body.$_path/] +// TESTRESPONSE[s/"hoXMLZB0RWKfR9UPPUCxXX"/$body.$_path/] +// TESTRESPONSE[s/"127.0.0.1:9300"/$body.$_path/] +// TESTRESPONSE[s/"17179869184"/$body.datafeeds.0.node.attributes.ml\\.machine_memory/] diff --git a/x-pack/docs/en/rest-api/ml/get-datafeed.asciidoc b/x-pack/docs/en/rest-api/ml/get-datafeed.asciidoc new file mode 100644 index 0000000000000..8d582ed672aff --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-datafeed.asciidoc @@ -0,0 +1,92 @@ +[role="xpack"] +[[ml-get-datafeed]] +=== Get {dfeeds-cap} API +++++ +Get {dfeeds-cap} +++++ + +This API enables you to retrieve configuration information for {dfeeds}. + +==== Request + + +`GET _xpack/ml/datafeeds/` + + +`GET _xpack/ml/datafeeds/,` + + +`GET _xpack/ml/datafeeds/` + + +`GET _xpack/ml/datafeeds/_all` + + + +===== Description + +You can get information for multiple {dfeeds} in a single API request by using a +comma-separated list of {dfeeds} or a wildcard expression. You can get +information for all {dfeeds} by using `_all`, by specifying `*` as the +``, or by omitting the ``. + +==== Path Parameters + +`feed_id`:: + (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a + wildcard expression. If you do not specify one of these options, the API + returns information about all {dfeeds}. + + +==== Results + +The API returns the following information: + +`datafeeds`:: + (array) An array of {dfeed} objects. + For more information, see <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example gets configuration information for the +`datafeed-total-requests` {dfeed}: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/datafeeds/datafeed-total-requests +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_datafeed] + +The API returns the following results: +[source,js] +---- +{ + "count": 1, + "datafeeds": [ + { + "datafeed_id": "datafeed-total-requests", + "job_id": "total-requests", + "query_delay": "83474ms", + "indices": [ + "server-metrics" + ], + "types": [], + "query": { + "match_all": { + "boost": 1.0 + } + }, + "scroll_size": 1000, + "chunking_config": { + "mode": "auto" + } + } + ] +} +---- +// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] diff --git a/x-pack/docs/en/rest-api/ml/get-influencer.asciidoc b/x-pack/docs/en/rest-api/ml/get-influencer.asciidoc new file mode 100644 index 0000000000000..6c49e66e944ac --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-influencer.asciidoc @@ -0,0 +1,109 @@ +[role="xpack"] +[[ml-get-influencer]] +=== Get Influencers API +++++ +Get Influencers +++++ + +This API enables you to retrieve job results for one or more influencers. + + +==== Request + +`GET _xpack/ml/anomaly_detectors//results/influencers` + +//===== Description + +==== Path Parameters + +`job_id`:: + (string) Identifier for the job. + +==== Request Body + +`desc`:: + (boolean) If true, the results are sorted in descending order. + +`end`:: + (string) Returns influencers with timestamps earlier than this time. + +`exclude_interim`:: + (boolean) If true, the output excludes interim results. + By default, interim results are included. + +`influencer_score`:: + (double) Returns influencers with anomaly scores greater or equal than this value. + +`page`:: +`from`::: + (integer) Skips the specified number of influencers. +`size`::: + (integer) Specifies the maximum number of influencers to obtain. + +`sort`:: + (string) Specifies the sort field for the requested influencers. + By default the influencers are sorted by the `influencer_score` value. + +`start`:: + (string) Returns influencers with timestamps after this time. + + +==== Results + +The API returns the following information: + +`influencers`:: + (array) An array of influencer objects. + For more information, see <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. You also need `read` index privilege on the index +that stores the results. The `machine_learning_admin` and `machine_learning_user` +roles provide these privileges. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges] and +{xpack-ref}/built-in-roles.html[Built-in Roles]. +//<> and <>. + + +==== Examples + +The following example gets influencer information for the `it_ops_new_kpi` job: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/anomaly_detectors/it_ops_new_kpi/results/influencers +{ + "sort": "influencer_score", + "desc": true +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +In this example, the API returns the following information, sorted based on the +influencer score in descending order: +[source,js] +---- +{ + "count": 28, + "influencers": [ + { + "job_id": "it_ops_new_kpi", + "result_type": "influencer", + "influencer_field_name": "kpi_indicator", + "influencer_field_value": "online_purchases", + "kpi_indicator": "online_purchases", + "influencer_score": 94.1386, + "initial_influencer_score": 94.1386, + "probability": 0.000111612, + "bucket_span": 600, + "is_interim": false, + "timestamp": 1454943600000 + }, + ... + ] +} +---- diff --git a/x-pack/docs/en/rest-api/ml/get-job-stats.asciidoc b/x-pack/docs/en/rest-api/ml/get-job-stats.asciidoc new file mode 100644 index 0000000000000..48ebac280aae3 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-job-stats.asciidoc @@ -0,0 +1,109 @@ +[role="xpack"] +[[ml-get-job-stats]] +=== Get Job Statistics API +++++ +Get Job Statistics +++++ + +This API enables you to retrieve usage information for jobs. + + +==== Request + + + +`GET _xpack/ml/anomaly_detectors//_stats` + +`GET _xpack/ml/anomaly_detectors/,/_stats` + + +`GET _xpack/ml/anomaly_detectors/_stats` + + +`GET _xpack/ml/anomaly_detectors/_all/_stats` + + + +===== Description + +You can get statistics for multiple jobs in a single API request by using a +group name, a comma-separated list of jobs, or a wildcard expression. You can +get statistics for all jobs by using `_all`, by specifying `*` as the +``, or by omitting the ``. + + +==== Path Parameters + +`job_id`:: + (string) An identifier for the job. It can be a job identifier, a group name, + or a wildcard expression. If you do not specify one of these options, the API + returns statistics for all jobs. + + +==== Results + +The API returns the following information: + +`jobs`:: + (array) An array of job statistics objects. + For more information, see <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example gets usage information for the `farequote` job: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/anomaly_detectors/farequote/_stats +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +The API returns the following results: +[source,js] +---- +{ + "count": 1, + "jobs": [ + { + "job_id": "farequote", + "data_counts": { + "job_id": "farequote", + "processed_record_count": 86275, + "processed_field_count": 172550, + "input_bytes": 6744714, + "input_field_count": 172550, + "invalid_date_count": 0, + "missing_field_count": 0, + "out_of_order_timestamp_count": 0, + "empty_bucket_count": 0, + "sparse_bucket_count": 15, + "bucket_count": 1528, + "earliest_record_timestamp": 1454803200000, + "latest_record_timestamp": 1455235196000, + "last_data_time": 1491948163685, + "latest_sparse_bucket_timestamp": 1455174900000, + "input_record_count": 86275 + }, + "model_size_stats": { + "job_id": "farequote", + "result_type": "model_size_stats", + "model_bytes": 387594, + "total_by_field_count": 21, + "total_over_field_count": 0, + "total_partition_field_count": 20, + "bucket_allocation_failures_count": 0, + "memory_status": "ok", + "log_time": 1491948163000, + "timestamp": 1455234600000 + }, + "state": "closed" + } + ] +} +---- diff --git a/x-pack/docs/en/rest-api/ml/get-job.asciidoc b/x-pack/docs/en/rest-api/ml/get-job.asciidoc new file mode 100644 index 0000000000000..c606cc5ad40e2 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-job.asciidoc @@ -0,0 +1,103 @@ +[role="xpack"] +[[ml-get-job]] +=== Get Jobs API +++++ +Get Jobs +++++ + +This API enables you to retrieve configuration information for jobs. + + +==== Request + +`GET _xpack/ml/anomaly_detectors/` + + +`GET _xpack/ml/anomaly_detectors/,` + + +`GET _xpack/ml/anomaly_detectors/` + + +`GET _xpack/ml/anomaly_detectors/_all` + + +===== Description + +You can get information for multiple jobs in a single API request by using a +group name, a comma-separated list of jobs, or a wildcard expression. You can +get information for all jobs by using `_all`, by specifying `*` as the +``, or by omitting the ``. + + +==== Path Parameters + +`job_id`:: + (string) Identifier for the job. It can be a job identifier, a group name, + or a wildcard expression. If you do not specify one of these options, the API + returns information for all jobs. + +==== Results + +The API returns the following information: + +`jobs`:: + (array) An array of job resources. + For more information, see <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example gets configuration information for the `total-requests` job: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/anomaly_detectors/total-requests +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_job] + +The API returns the following results: +[source,js] +---- +{ + "count": 1, + "jobs": [ + { + "job_id": "total-requests", + "job_type": "anomaly_detector", + "job_version": "7.0.0-alpha1", + "description": "Total sum of requests", + "create_time": 1517011406091, + "analysis_config": { + "bucket_span": "10m", + "detectors": [ + { + "detector_description": "Sum of total", + "function": "sum", + "field_name": "total", + "detector_index": 0 + } + ], + "influencers": [ ] + }, + "analysis_limits": { + "model_memory_limit": "1024mb", + "categorization_examples_limit": 4 + }, + "data_description": { + "time_field": "timestamp", + "time_format": "epoch_ms" + }, + "model_snapshot_retention_days": 1, + "results_index_name": "shared" + } + ] +} +---- +// TESTRESPONSE[s/"7.0.0-alpha1"/$body.$_path/] +// TESTRESPONSE[s/1517011406091/$body.$_path/] diff --git a/x-pack/docs/en/rest-api/ml/get-overall-buckets.asciidoc b/x-pack/docs/en/rest-api/ml/get-overall-buckets.asciidoc new file mode 100644 index 0000000000000..d0e8c1f214bd4 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-overall-buckets.asciidoc @@ -0,0 +1,190 @@ +[role="xpack"] +[[ml-get-overall-buckets]] +=== Get Overall Buckets API +++++ +Get Overall Buckets +++++ + +This API enables you to retrieve overall bucket results that summarize the +bucket results of multiple jobs. + +==== Request + +`GET _xpack/ml/anomaly_detectors//results/overall_buckets` + + +`GET _xpack/ml/anomaly_detectors/,/results/overall_buckets` + + +`GET _xpack/ml/anomaly_detectors/_all/results/overall_buckets` + +==== Description + +You can summarize the bucket results for all jobs by using `_all` or by +specifying `*` as the ``. + +An overall bucket has a span equal to the largest `bucket_span` value for the +specified jobs. + +The `overall_score` is calculated by combining the scores of all +the buckets within the overall bucket span. First, the maximum `anomaly_score` per +job in the overall bucket is calculated. Then the `top_n` of those scores are +averaged to result in the `overall_score`. This means that you can fine-tune +the `overall_score` so that it is more or less sensitive to the number +of jobs that detect an anomaly at the same time. For example, if you set `top_n` +to `1`, the `overall_score` is the maximum bucket +score in the overall bucket. Alternatively, if you set `top_n` to the number of +jobs, the `overall_score` is high only when all jobs detect anomalies in that +overall bucket. + +In addition, the optional parameter `bucket_span` may be used in order +to request overall buckets that span longer than the largest job's `bucket_span`. +When set, the `overall_score` will be the max `overall_score` of the corresponding +overall buckets with a span equal to the largest job's `bucket_span`. + +==== Path Parameters + +`job_id`:: + (string) Identifier for the job. It can be a job identifier, a group name, a + comma-separated list of jobs or groups, or a wildcard expression. + +==== Request Body + +`allow_no_jobs`:: + (boolean) If `false` and the `job_id` does not match any job an error will + be returned. The default value is `true`. + +`bucket_span`:: + (string) The span of the overall buckets. Must be greater or equal + to the largest job's `bucket_span`. Defaults to the largest job's `bucket_span`. + +`end`:: + (string) Returns overall buckets with timestamps earlier than this time. + +`exclude_interim`:: + (boolean) If `true`, the output excludes interim overall buckets. + Overall buckets are interim if any of the job buckets within + the overall bucket interval are interim. + By default, interim results are included. + +`overall_score`:: + (double) Returns overall buckets with overall scores greater or equal than this value. + +`start`:: + (string) Returns overall buckets with timestamps after this time. + +`top_n`:: + (integer) The number of top job bucket scores to be used in the + `overall_score` calculation. The default value is `1`. + + +===== Results + +The API returns the following information: + +`overall_buckets`:: + (array) An array of overall bucket objects. For more information, see + <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. You also need `read` index privilege on the index +that stores the results. The `machine_learning_admin` and `machine_learning_user` +roles provide these privileges. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges] and +{xpack-ref}/built-in-roles.html[Built-in Roles]. +//<> and <>. + + +==== Examples + +The following example gets overall buckets for jobs with IDs matching `job-*`: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/anomaly_detectors/job-*/results/overall_buckets +{ + "overall_score": 80, + "start": "1403532000000" +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +In this example, the API returns a single result that matches the specified +score and time constraints. The `overall_score` is the max job score as +`top_n` defaults to 1 when not specified: +[source,js] +---- +{ + "count": 1, + "overall_buckets": [ + { + "timestamp" : 1403532000000, + "bucket_span" : 3600, + "overall_score" : 80.0, + "jobs" : [ + { + "job_id" : "job-1", + "max_anomaly_score" : 30.0 + }, + { + "job_id" : "job-2", + "max_anomaly_score" : 10.0 + }, + { + "job_id" : "job-3", + "max_anomaly_score" : 80.0 + } + ], + "is_interim" : false, + "result_type" : "overall_bucket" + } + ] +} +---- + +The next example is similar but this time `top_n` is set to `2`: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/anomaly_detectors/job-*/results/overall_buckets +{ + "top_n": 2, + "overall_score": 50.0, + "start": "1403532000000" +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +Note how the `overall_score` is now the average of the top 2 job scores: +[source,js] +---- +{ + "count": 1, + "overall_buckets": [ + { + "timestamp" : 1403532000000, + "bucket_span" : 3600, + "overall_score" : 55.0, + "jobs" : [ + { + "job_id" : "job-1", + "max_anomaly_score" : 30.0 + }, + { + "job_id" : "job-2", + "max_anomaly_score" : 10.0 + }, + { + "job_id" : "job-3", + "max_anomaly_score" : 80.0 + } + ], + "is_interim" : false, + "result_type" : "overall_bucket" + } + ] +} +---- diff --git a/x-pack/docs/en/rest-api/ml/get-record.asciidoc b/x-pack/docs/en/rest-api/ml/get-record.asciidoc new file mode 100644 index 0000000000000..6cd222027e66b --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-record.asciidoc @@ -0,0 +1,118 @@ +[role="xpack"] +[[ml-get-record]] +=== Get Records API +++++ +Get Records +++++ + +This API enables you to retrieve anomaly records for a job. + + +==== Request + +`GET _xpack/ml/anomaly_detectors//results/records` + +//===== Description + +==== Path Parameters + +`job_id`:: + (string) Identifier for the job. + + +==== Request Body + +`desc`:: + (boolean) If true, the results are sorted in descending order. + +`end`:: + (string) Returns records with timestamps earlier than this time. + +`exclude_interim`:: + (boolean) If true, the output excludes interim results. + By default, interim results are included. + +`page`:: +`from`::: + (integer) Skips the specified number of records. +`size`::: + (integer) Specifies the maximum number of records to obtain. + +`record_score`:: + (double) Returns records with anomaly scores greater or equal than this value. + +`sort`:: + (string) Specifies the sort field for the requested records. + By default, the records are sorted by the `anomaly_score` value. + +`start`:: + (string) Returns records with timestamps after this time. + + +==== Results + +The API returns the following information: + +`records`:: + (array) An array of record objects. For more information, see + <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. You also need `read` index privilege on the index +that stores the results. The `machine_learning_admin` and `machine_learning_user` +roles provide these privileges. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges] and +{xpack-ref}/built-in-roles.html[Built-in Roles]. +//<> and <>. + + +==== Examples + +The following example gets record information for the `it-ops-kpi` job: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/anomaly_detectors/it-ops-kpi/results/records +{ + "sort": "record_score", + "desc": true, + "start": "1454944100000" +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +In this example, the API returns twelve results for the specified +time constraints: +[source,js] +---- +{ + "count": 12, + "records": [ + { + "job_id": "it-ops-kpi", + "result_type": "record", + "probability": 0.00000332668, + "record_score": 72.9929, + "initial_record_score": 65.7923, + "bucket_span": 300, + "detector_index": 0, + "is_interim": false, + "timestamp": 1454944200000, + "function": "low_sum", + "function_description": "sum", + "typical": [ + 1806.48 + ], + "actual": [ + 288 + ], + "field_name": "events_per_min" + }, + ... + ] +} +---- diff --git a/x-pack/docs/en/rest-api/ml/get-snapshot.asciidoc b/x-pack/docs/en/rest-api/ml/get-snapshot.asciidoc new file mode 100644 index 0000000000000..b992f5be7df31 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/get-snapshot.asciidoc @@ -0,0 +1,113 @@ +[role="xpack"] +[[ml-get-snapshot]] +=== Get Model Snapshots API +++++ +Get Model Snapshots +++++ + +This API enables you to retrieve information about model snapshots. + + +==== Request + +`GET _xpack/ml/anomaly_detectors//model_snapshots` + + +`GET _xpack/ml/anomaly_detectors//model_snapshots/` + +//===== Description + +==== Path Parameters + +`job_id`:: + (string) Identifier for the job. + +`snapshot_id`:: + (string) Identifier for the model snapshot. If you do not specify this + optional parameter, the API returns information about all model snapshots. + +==== Request Body + +`desc`:: + (boolean) If true, the results are sorted in descending order. + +`end`:: + (date) Returns snapshots with timestamps earlier than this time. + +`from`:: + (integer) Skips the specified number of snapshots. + +`size`:: + (integer) Specifies the maximum number of snapshots to obtain. + +`sort`:: + (string) Specifies the sort field for the requested snapshots. + By default, the snapshots are sorted by their timestamp. + +`start`:: + (string) Returns snapshots with timestamps after this time. + + +==== Results + +The API returns the following information: + +`model_snapshots`:: + (array) An array of model snapshot objects. For more information, see + <>. + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. +//<>. + + +==== Examples + +The following example gets model snapshot information for the +`it_ops_new_logs` job: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/anomaly_detectors/farequote/model_snapshots +{ + "start": "1491852977000" +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +In this example, the API provides a single result: +[source,js] +---- +{ + "count": 1, + "model_snapshots": [ + { + "job_id": "farequote", + "min_version": "6.3.0", + "timestamp": 1491948163000, + "description": "State persisted due to job close at 2017-04-11T15:02:43-0700", + "snapshot_id": "1491948163", + "snapshot_doc_count": 1, + "model_size_stats": { + "job_id": "farequote", + "result_type": "model_size_stats", + "model_bytes": 387594, + "total_by_field_count": 21, + "total_over_field_count": 0, + "total_partition_field_count": 20, + "bucket_allocation_failures_count": 0, + "memory_status": "ok", + "log_time": 1491948163000, + "timestamp": 1455234600000 + }, + "latest_record_time_stamp": 1455235196000, + "latest_result_time_stamp": 1455234900000, + "retain": false + } + ] +} +---- diff --git a/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc b/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc new file mode 100644 index 0000000000000..b2e24a298cbd0 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc @@ -0,0 +1,200 @@ +[role="xpack"] +[[ml-jobstats]] +=== Job Statistics + +The get job statistics API provides information about the operational +progress of a job. + +`assignment_explanation`:: + (string) For open jobs only, contains messages relating to the selection + of a node to run the job. + +`data_counts`:: + (object) An object that describes the number of records processed and + any related error counts. See <>. + +`job_id`:: + (string) A unique identifier for the job. + +`model_size_stats`:: + (object) An object that provides information about the size and contents of the model. + See <> + +`node`:: + (object) For open jobs only, contains information about the node where the + job runs. See <>. + +`open_time`:: + (string) For open jobs only, the elapsed time for which the job has been open. + For example, `28746386s`. + +`state`:: + (string) The status of the job, which can be one of the following values: + + `opened`::: The job is available to receive and process data. + `closed`::: The job finished successfully with its model state persisted. + The job must be opened before it can accept further data. + `closing`::: The job close action is in progress and has not yet completed. + A closing job cannot accept further data. + `failed`::: The job did not finish successfully due to an error. + This situation can occur due to invalid input data. + If the job had irrevocably failed, it must be force closed and then deleted. + If the {dfeed} can be corrected, the job can be closed and then re-opened. + `opening`::: The job open action is in progress and has not yet completed. + +[float] +[[ml-datacounts]] +==== Data Counts Objects + +The `data_counts` object describes the number of records processed +and any related error counts. + +The `data_count` values are cumulative for the lifetime of a job. If a model snapshot is reverted +or old results are deleted, the job counts are not reset. + +`bucket_count`:: + (long) The number of bucket results produced by the job. + +`earliest_record_timestamp`:: + (string) The timestamp of the earliest chronologically ordered record. + The datetime string is in ISO 8601 format. + +`empty_bucket_count`:: + (long) The number of buckets which did not contain any data. If your data contains many + empty buckets, consider increasing your `bucket_span` or using functions that are tolerant + to gaps in data such as `mean`, `non_null_sum` or `non_zero_count`. + +`input_bytes`:: + (long) The number of raw bytes read by the job. + +`input_field_count`:: + (long) The total number of record fields read by the job. This count includes + fields that are not used in the analysis. + +`input_record_count`:: + (long) The number of data records read by the job. + +`invalid_date_count`:: + (long) The number of records with either a missing date field or a date that could not be parsed. + +`job_id`:: + (string) A unique identifier for the job. + +`last_data_time`:: + (datetime) The timestamp at which data was last analyzed, according to server time. + +`latest_empty_bucket_timestamp`:: + (date) The timestamp of the last bucket that did not contain any data. + +`latest_record_timestamp`:: + (date) The timestamp of the last processed record. + +`latest_sparse_bucket_timestamp`:: + (date) The timestamp of the last bucket that was considered sparse. + +`missing_field_count`:: + (long) The number of records that are missing a field that the job is + configured to analyze. Records with missing fields are still processed because + it is possible that not all fields are missing. The value of + `processed_record_count` includes this count. + + +NOTE: If you are using {dfeeds} or posting data to the job in JSON format, a +high `missing_field_count` is often not an indication of data issues. It is not +necessarily a cause for concern. + +`out_of_order_timestamp_count`:: + (long) The number of records that are out of time sequence and + outside of the latency window. This information is applicable only when + you provide data to the job by using the <>. + These out of order records are discarded, since jobs require time series data + to be in ascending chronological order. + +`processed_field_count`:: + (long) The total number of fields in all the records that have been processed + by the job. Only fields that are specified in the detector configuration + object contribute to this count. The time stamp is not included in this count. + +`processed_record_count`:: + (long) The number of records that have been processed by the job. + This value includes records with missing fields, since they are nonetheless + analyzed. + + If you use {dfeeds} and have aggregations in your search query, + the `processed_record_count` will be the number of aggregated records + processed, not the number of {es} documents. + +`sparse_bucket_count`:: + (long) The number of buckets that contained few data points compared to the + expected number of data points. If your data contains many sparse buckets, + consider using a longer `bucket_span`. + +[float] +[[ml-modelsizestats]] +==== Model Size Stats Objects + +The `model_size_stats` object has the following properties: + +`bucket_allocation_failures_count`:: + (long) The number of buckets for which new entities in incoming data were not + processed due to insufficient model memory. This situation is also signified + by a `hard_limit: memory_status` property value. + +`job_id`:: + (string) A numerical character string that uniquely identifies the job. + +`log_time`:: + (date) The timestamp of the `model_size_stats` according to server time. + +`memory_status`:: + (string) The status of the mathematical models. + This property can have one of the following values: + `ok`::: The models stayed below the configured value. + `soft_limit`::: The models used more than 60% of the configured memory limit + and older unused models will be pruned to free up space. + `hard_limit`::: The models used more space than the configured memory limit. + As a result, not all incoming data was processed. + +`model_bytes`:: + (long) The number of bytes of memory used by the models. This is the maximum + value since the last time the model was persisted. If the job is closed, + this value indicates the latest size. + +`result_type`:: + (string) For internal use. The type of result. + +`total_by_field_count`:: + (long) The number of `by` field values that were analyzed by the models.+ + +NOTE: The `by` field values are counted separately for each detector and partition. + +`total_over_field_count`:: + (long) The number of `over` field values that were analyzed by the models.+ + +NOTE: The `over` field values are counted separately for each detector and partition. + +`total_partition_field_count`:: + (long) The number of `partition` field values that were analyzed by the models. + +`timestamp`:: + (date) The timestamp of the `model_size_stats` according to the timestamp of the data. + +[float] +[[ml-stats-node]] +==== Node Objects + +The `node` objects contains properties for the node that runs the job. +This information is available only for open jobs. + +`id`:: + (string) The unique identifier of the node. + +`name`:: + (string) The node name. + +`ephemeral_id`:: + (string) The ephemeral id of the node. + +`transport_address`:: + (string) The host and port where transport HTTP connections are accepted. + +`attributes`:: + (object) For example, {"ml.max_open_jobs": "10"}. diff --git a/x-pack/docs/en/rest-api/ml/jobresource.asciidoc b/x-pack/docs/en/rest-api/ml/jobresource.asciidoc new file mode 100644 index 0000000000000..bb959fd728cb6 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/jobresource.asciidoc @@ -0,0 +1,488 @@ +[role="xpack"] +[[ml-job-resource]] +=== Job Resources + +A job resource has the following properties: + +`analysis_config`:: + (object) The analysis configuration, which specifies how to analyze the data. + See <>. + +`analysis_limits`:: + (object) Defines approximate limits on the memory resource requirements for the job. + See <>. + +`background_persist_interval`:: + (time units) Advanced configuration option. + The time between each periodic persistence of the model. + The default value is a randomized value between 3 to 4 hours, which avoids + all jobs persisting at exactly the same time. The smallest allowed value is + 1 hour. ++ +-- +TIP: For very large models (several GB), persistence could take 10-20 minutes, +so do not set the `background_persist_interval` value too low. + +-- + +`create_time`:: + (string) The time the job was created. For example, `1491007356077`. This + property is informational; you cannot change its value. + +`custom_settings`:: + (object) Advanced configuration option. Contains custom meta data about the + job. For example, it can contain custom URL information as shown in + {xpack-ref}/ml-configuring-url.html[Adding Custom URLs to Machine Learning Results]. + +`data_description`:: + (object) Describes the data format and how APIs parse timestamp fields. + See <>. + +`description`:: + (string) An optional description of the job. + +`established_model_memory`:: + (long) The approximate amount of memory resources that have been used for + analytical processing. This field is present only when the analytics have used + a stable amount of memory for several consecutive buckets. + +`finished_time`:: + (string) If the job closed or failed, this is the time the job finished, + otherwise it is `null`. This property is informational; you cannot change its + value. + +`groups`:: + (array of strings) A list of job groups. A job can belong to no groups or + many. For example, `["group1", "group2"]`. + +`job_id`:: + (string) The unique identifier for the job. This identifier can contain + lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It + must start and end with alphanumeric characters. This property is + informational; you cannot change the identifier for existing jobs. + +`job_type`:: + (string) Reserved for future use, currently set to `anomaly_detector`. + +`job_version`:: + (string) The version of {es} that existed on the node when the job was created. + +`model_plot_config`:: + (object) Configuration properties for storing additional model information. + See <>. + +`model_snapshot_id`:: + (string) A numerical character string that uniquely identifies the model + snapshot. For example, `1491007364`. This property is informational; you + cannot change its value. For more information about model snapshots, see + <>. + +`model_snapshot_retention_days`:: + (long) The time in days that model snapshots are retained for the job. + Older snapshots are deleted. The default value is `1`, which means snapshots + are retained for one day (twenty-four hours). + +`renormalization_window_days`:: + (long) Advanced configuration option. + The period over which adjustments to the score are applied, as new data is seen. + The default value is the longer of 30 days or 100 `bucket_spans`. + +`results_index_name`:: + (string) The name of the index in which to store the {ml} results. + The default value is `shared`, + which corresponds to the index name `.ml-anomalies-shared` + +`results_retention_days`:: + (long) Advanced configuration option. + The number of days for which job results are retained. + Once per day at 00:30 (server time), results older than this period are + deleted from Elasticsearch. The default value is null, which means results + are retained. + +[[ml-analysisconfig]] +==== Analysis Configuration Objects + +An analysis configuration object has the following properties: + +`bucket_span`:: + (time units) The size of the interval that the analysis is aggregated into, + typically between `5m` and `1h`. The default value is `5m`. + +`categorization_field_name`:: + (string) If this property is specified, the values of the specified field will + be categorized. The resulting categories must be used in a detector by setting + `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword + `mlcategory`. For more information, see + {xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. + +`categorization_filters`:: + (array of strings) If `categorization_field_name` is specified, + you can also define optional filters. This property expects an array of + regular expressions. The expressions are used to filter out matching sequences + from the categorization field values. You can use this functionality to fine + tune the categorization by excluding sequences from consideration when + categories are defined. For example, you can exclude SQL statements that + appear in your log files. For more information, see + {xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. + This property cannot be used at the same time as `categorization_analyzer`. + If you only want to define simple regular expression filters that are applied + prior to tokenization, setting this property is the easiest method. + If you also want to customize the tokenizer or post-tokenization filtering, + use the `categorization_analyzer` property instead and include the filters as + `pattern_replace` character filters. The effect is exactly the same. + +`categorization_analyzer`:: + (object or string) If `categorization_field_name` is specified, you can also + define the analyzer that is used to interpret the categorization field. This + property cannot be used at the same time as `categorization_filters`. See + <>. + +`detectors`:: + (array) An array of detector configuration objects, + which describe the anomaly detectors that are used in the job. + See <>. + ++ +-- +NOTE: If the `detectors` array does not contain at least one detector, +no analysis can occur and an error is returned. + +-- + +`influencers`:: + (array of strings) A comma separated list of influencer field names. + Typically these can be the by, over, or partition fields that are used in the + detector configuration. You might also want to use a field name that is not + specifically named in a detector, but is available as part of the input data. + When you use multiple detectors, the use of influencers is recommended as it + aggregates results for each influencer entity. + +`latency`:: + (time units) The size of the window in which to expect data that is out of + time order. The default value is 0 (no latency). If you specify a non-zero + value, it must be greater than or equal to one second. For more information + about time units, see + {ref}/common-options.html#time-units[Time Units]. ++ +-- +NOTE: Latency is only applicable when you send data by using +the <> API. + +-- + +`multivariate_by_fields`:: + (boolean) This functionality is reserved for internal use. It is not supported + for use in customer environments and is not subject to the support SLA of + official GA features. ++ +-- +If set to `true`, the analysis will automatically find correlations +between metrics for a given `by` field value and report anomalies when those +correlations cease to hold. For example, suppose CPU and memory usage on host A +is usually highly correlated with the same metrics on host B. Perhaps this +correlation occurs because they are running a load-balanced application. +If you enable this property, then anomalies will be reported when, for example, +CPU usage on host A is high and the value of CPU usage on host B is low. +That is to say, you'll see an anomaly when the CPU of host A is unusual given +the CPU of host B. + +NOTE: To use the `multivariate_by_fields` property, you must also specify +`by_field_name` in your detector. + +-- + +`summary_count_field_name`:: + (string) If this property is specified, the data that is fed to the job is + expected to be pre-summarized. This property value is the name of the field + that contains the count of raw data points that have been summarized. The same + `summary_count_field_name` applies to all detectors in the job. ++ +-- + +NOTE: The `summary_count_field_name` property cannot be used with the `metric` +function. + +-- + +After you create a job, you cannot change the analysis configuration object; all +the properties are informational. + +[float] +[[ml-detectorconfig]] +==== Detector Configuration Objects + +Detector configuration objects specify which data fields a job analyzes. +They also specify which analytical functions are used. +You can specify multiple detectors for a job. +Each detector has the following properties: + +`by_field_name`:: + (string) The field used to split the data. + In particular, this property is used for analyzing the splits with respect to their own history. + It is used for finding unusual values in the context of the split. + +`detector_description`:: + (string) A description of the detector. For example, `Low event rate`. + +`detector_index`:: + (integer) A unique identifier for the detector. This identifier is based on + the order of the detectors in the `analysis_config`, starting at zero. You can + use this identifier when you want to update a specific detector. + +`exclude_frequent`:: + (string) Contains one of the following values: `all`, `none`, `by`, or `over`. + If set, frequent entities are excluded from influencing the anomaly results. + Entities can be considered frequent over time or frequent in a population. + If you are working with both over and by fields, then you can set `exclude_frequent` + to `all` for both fields, or to `by` or `over` for those specific fields. + +`field_name`:: + (string) The field that the detector uses in the function. If you use an event rate + function such as `count` or `rare`, do not specify this field. + ++ +-- +NOTE: The `field_name` cannot contain double quotes or backslashes. + +-- + +`function`:: + (string) The analysis function that is used. + For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. For more + information, see {xpack-ref}/ml-functions.html[Function Reference]. + +`over_field_name`:: + (string) The field used to split the data. + In particular, this property is used for analyzing the splits with respect to + the history of all splits. It is used for finding unusual values in the + population of all splits. For more information, see + {xpack-ref}/ml-configuring-pop.html[Performing Population Analysis]. + +`partition_field_name`:: + (string) The field used to segment the analysis. + When you use this property, you have completely independent baselines for each value of this field. + +`use_null`:: + (boolean) Defines whether a new series is used as the null series + when there is no value for the by or partition fields. The default value is `false`. + ++ +-- +IMPORTANT: Field names are case sensitive, for example a field named 'Bytes' +is different from one named 'bytes'. + +-- + +After you create a job, the only property you can change in the detector +configuration object is the `detector_description`; all other properties are +informational. + +[float] +[[ml-datadescription]] +==== Data Description Objects + +The data description defines the format of the input data when you send data to +the job by using the <> API. Note that when configure +a {dfeed}, these properties are automatically set. + +When data is received via the <> API, it is not stored +in {es}. Only the results for anomaly detection are retained. + +A data description object has the following properties: + +`format`:: + (string) Only `JSON` format is supported at this time. + +`time_field`:: + (string) The name of the field that contains the timestamp. + The default value is `time`. + +`time_format`:: + (string) The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. + The default value is `epoch`, which refers to UNIX or Epoch time (the number of seconds + since 1 Jan 1970). + The value `epoch_ms` indicates that time is measured in milliseconds since the epoch. + The `epoch` and `epoch_ms` time formats accept either integer or real values. + ++ +-- +NOTE: Custom patterns must conform to the Java `DateTimeFormatter` class. +When you use date-time formatting patterns, it is recommended that you provide +the full date, time and time zone. For example: `yyyy-MM-dd'T'HH:mm:ssX`. +If the pattern that you specify is not sufficient to produce a complete timestamp, +job creation fails. + +-- + +[float] +[[ml-categorizationanalyzer]] +==== Categorization Analyzer + +The categorization analyzer specifies how the `categorization_field` is +interpreted by the categorization process. The syntax is very similar to that +used to define the `analyzer` in the <>. + +The `categorization_analyzer` field can be specified either as a string or as +an object. + +If it is a string it must refer to a <> or +one added by another plugin. + +If it is an object it has the following properties: + +`char_filter`:: + (array of strings or objects) One or more + <>. In addition to the built-in + character filters, other plugins can provide more character filters. This + property is optional. If it is not specified, no character filters are applied + prior to categorization. If you are customizing some other aspect of the + analyzer and you need to achieve the equivalent of `categorization_filters` + (which are not permitted when some other aspect of the analyzer is customized), + add them here as + <>. + +`tokenizer`:: + (string or object) The name or definition of the + <> to use after character filters are applied. + This property is compulsory if `categorization_analyzer` is specified as an + object. Machine learning provides a tokenizer called `ml_classic` that + tokenizes in the same way as the non-customizable tokenizer in older versions + of the product. If you want to use that tokenizer but change the character or + token filters, specify `"tokenizer": "ml_classic"` in your + `categorization_analyzer`. + +`filter`:: + (array of strings or objects) One or more + <>. In addition to the built-in token + filters, other plugins can provide more token filters. This property is + optional. If it is not specified, no token filters are applied prior to + categorization. + +If you omit the `categorization_analyzer`, the following default values are used: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/anomaly_detectors/_validate +{ + "analysis_config" : { + "categorization_analyzer" : { + "tokenizer" : "ml_classic", + "filter" : [ + { "type" : "stop", "stopwords": [ + "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday", + "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun", + "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", + "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", + "GMT", "UTC" + ] } + ] + }, + "categorization_field_name": "message", + "detectors" :[{ + "function":"count", + "by_field_name": "mlcategory" + }] + }, + "data_description" : { + } +} +-------------------------------------------------- +// CONSOLE + +If you specify any part of the `categorization_analyzer`, however, any omitted +sub-properties are _not_ set to default values. + +If you are categorizing non-English messages in a language where words are +separated by spaces, you might get better results if you change the day or month +words in the stop token filter to the appropriate words in your language. If you +are categorizing messages in a language where words are not separated by spaces, +you must use a different tokenizer as well in order to get sensible +categorization results. + +It is important to be aware that analyzing for categorization of machine +generated log messages is a little different from tokenizing for search. +Features that work well for search, such as stemming, synonym substitution, and +lowercasing are likely to make the results of categorization worse. However, in +order for drill down from {ml} results to work correctly, the tokens that the +categorization analyzer produces must be similar to those produced by the search +analyzer. If they are sufficiently similar, when you search for the tokens that +the categorization analyzer produces then you find the original document that +the categorization field value came from. + +For more information, see +{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. + +[float] +[[ml-apilimits]] +==== Analysis Limits + +Limits can be applied for the resources required to hold the mathematical models in memory. +These limits are approximate and can be set per job. They do not control the +memory used by other processes, for example the Elasticsearch Java processes. +If necessary, you can increase the limits after the job is created. + +The `analysis_limits` object has the following properties: + +`categorization_examples_limit`:: + (long) The maximum number of examples stored per category in memory and + in the results data store. The default value is 4. If you increase this value, + more examples are available, however it requires that you have more storage available. + If you set this value to `0`, no examples are stored. + ++ +-- +NOTE: The `categorization_examples_limit` only applies to analysis that uses categorization. +For more information, see +{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. + +-- + +`model_memory_limit`:: + (long or string) The approximate maximum amount of memory resources that are + required for analytical processing. Once this limit is approached, data pruning + becomes more aggressive. Upon exceeding this limit, new entities are not + modeled. The default value for jobs created in version 6.1 and later is `1024mb`. + This value will need to be increased for jobs that are expected to analyze high + cardinality fields, but the default is set to a relatively small size to ensure + that high resource usage is a conscious decision. The default value for jobs + created in versions earlier than 6.1 is `4096mb`. ++ +-- +If you specify a number instead of a string, the units are assumed to be MiB. +Specifying a string is recommended for clarity. If you specify a byte size unit +of `b` or `kb` and the number does not equate to a discrete number of megabytes, +it is rounded down to the closest MiB. The minimum valid value is 1 MiB. If you +specify a value less than 1 MiB, an error occurs. For more information about +supported byte size units, see +{ref}/common-options.html#byte-units[Byte size units]. + +If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` +setting, an error occurs when you try to create jobs that have +`model_memory_limit` values greater than that setting. For more information, +see <>. +-- + +[float] +[[ml-apimodelplotconfig]] +==== Model Plot Config + +This advanced configuration option stores model information along with the +results. It provides a more detailed view into anomaly detection. + +WARNING: If you enable model plot it can add considerable overhead to the performance +of the system; it is not feasible for jobs with many entities. + +Model plot provides a simplified and indicative view of the model and its bounds. +It does not display complex features such as multivariate correlations or multimodal data. +As such, anomalies may occasionally be reported which cannot be seen in the model plot. + +Model plot config can be configured when the job is created or updated later. It must be +disabled if performance issues are experienced. + +The `model_plot_config` object has the following properties: + +`enabled`:: + (boolean) If true, enables calculation and storage of the model bounds for + each entity that is being analyzed. By default, this is not enabled. + +`terms`:: + experimental[] (string) Limits data collection to this comma separated list of + partition or by field values. If terms are not specified or it is an empty + string, no filtering is applied. For example, "CPU,NetworkIn,DiskWrites". + Wildcards are not supported. Only the specified `terms` can be viewed when + using the Single Metric Viewer. diff --git a/x-pack/docs/en/rest-api/ml/open-job.asciidoc b/x-pack/docs/en/rest-api/ml/open-job.asciidoc new file mode 100644 index 0000000000000..37d201ed2264e --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/open-job.asciidoc @@ -0,0 +1,69 @@ +[role="xpack"] +[[ml-open-job]] +=== Open Jobs API +++++ +Open Jobs +++++ + +This API enables you to open one or more jobs. +A job must be opened in order for it to be ready to receive and analyze data. +A job can be opened and closed multiple times throughout its lifecycle. + + +==== Request + +`POST _xpack/ml/anomaly_detectors/{job_id}/_open` + + +==== Description + +When you open a new job, it starts with an empty model. + +When you open an existing job, the most recent model state is automatically loaded. +The job is ready to resume its analysis from where it left off, once new data is received. + + +==== Path Parameters + +`job_id` (required):: +(string) Identifier for the job + + +==== Request Body + +`timeout`:: + (time) Controls the time to wait until a job has opened. + The default value is 30 minutes. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example opens the `total-requests` job and sets an optional +property: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/anomaly_detectors/total-requests/_open +{ + "timeout": "35m" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_job] + +When the job opens, you receive the following results: +[source,js] +---- +{ + "opened": true +} +---- +//CONSOLE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/post-calendar-event.asciidoc b/x-pack/docs/en/rest-api/ml/post-calendar-event.asciidoc new file mode 100644 index 0000000000000..ab0c1ebef64ab --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/post-calendar-event.asciidoc @@ -0,0 +1,87 @@ +[role="xpack"] +[[ml-post-calendar-event]] +=== Add Events to Calendar API +++++ +Add Events to Calendar +++++ + +This API enables you to post scheduled events in a calendar. + +==== Request + +`POST _xpack/ml/calendars//events` + + +==== Description + +This API accepts a list of {xpack-ref}/ml-calendars.html[scheduled events], each +of which must have a start time, end time, and description. + +==== Path Parameters + +`calendar_id` (required):: + (string) Identifier for the calendar. + + +==== Request Body + +`events`:: + (array) A list of one of more scheduled events. See <>. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +You can add scheduled events to the `planned-outages` calendar as follows: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/calendars/planned-outages/events +{ + "events" : [ + {"description": "event 1", "start_time": 1513641600000, "end_time": 1513728000000}, + {"description": "event 2", "start_time": 1513814400000, "end_time": 1513900800000}, + {"description": "event 3", "start_time": 1514160000000, "end_time": 1514246400000} + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:calendar_outages_addjob] + +The API returns the following results: + +[source,js] +---- +{ + "events": [ + { + "description": "event 1", + "start_time": 1513641600000, + "end_time": 1513728000000, + "calendar_id": "planned-outages" + }, + { + "description": "event 2", + "start_time": 1513814400000, + "end_time": 1513900800000, + "calendar_id": "planned-outages" + }, + { + "description": "event 3", + "start_time": 1514160000000, + "end_time": 1514246400000, + "calendar_id": "planned-outages" + } + ] +} +---- +//TESTRESPONSE + +For more information about these properties, see +<>. diff --git a/x-pack/docs/en/rest-api/ml/post-data.asciidoc b/x-pack/docs/en/rest-api/ml/post-data.asciidoc new file mode 100644 index 0000000000000..ec20be5dadb12 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/post-data.asciidoc @@ -0,0 +1,110 @@ +[role="xpack"] +[[ml-post-data]] +=== Post Data to Jobs API +++++ +Post Data to Jobs +++++ + +This API enables you to send data to an anomaly detection job for analysis. + + +==== Request + +`POST _xpack/ml/anomaly_detectors//_data` + + +==== Description + +The job must have a state of `open` to receive and process the data. + +The data that you send to the job must use the JSON format. Multiple JSON +documents can be sent, either adjacent with no separator in between them or +whitespace separated. Newline delimited JSON (NDJSON) is a possible whitespace +separated format, and for this the `Content-Type` header should be set to +`application/x-ndjson`. + +Upload sizes are limited to the Elasticsearch HTTP receive buffer size +(default 100 Mb). If your data is larger, split it into multiple chunks +and upload each one separately in sequential time order. When running in +real time, it is generally recommended that you perform many small uploads, +rather than queueing data to upload larger files. + +When uploading data, check the <> for progress. +The following records will not be processed: + +* Records not in chronological order and outside the latency window +* Records with an invalid timestamp + +//TBD link to Working with Out of Order timeseries concept doc + +IMPORTANT: For each job, data can only be accepted from a single connection at +a time. It is not currently possible to post data to multiple jobs using wildcards +or a comma-separated list. + + +==== Path Parameters + +`job_id` (required):: + (string) Identifier for the job + + +==== Query Parameters + +`reset_start`:: + (string) Specifies the start of the bucket resetting range + +`reset_end`:: + (string) Specifies the end of the bucket resetting range + + +==== Request Body + +A sequence of one or more JSON documents containing the data to be analyzed. +Only whitespace characters are permitted in between the documents. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. +//<>. + + +==== Examples + +The following example posts data from the it_ops_new_kpi.json file to the `it_ops_new_kpi` job: + +[source,js] +-------------------------------------------------- +$ curl -s -H "Content-type: application/json" +-X POST http:\/\/localhost:9200/_xpack/ml/anomaly_detectors/it_ops_new_kpi/_data +--data-binary @it_ops_new_kpi.json +-------------------------------------------------- + +When the data is sent, you receive information about the operational progress of the job. +For example: + +[source,js] +---- +{ + "job_id":"it_ops_new_kpi", + "processed_record_count":21435, + "processed_field_count":64305, + "input_bytes":2589063, + "input_field_count":85740, + "invalid_date_count":0, + "missing_field_count":0, + "out_of_order_timestamp_count":0, + "empty_bucket_count":16, + "sparse_bucket_count":0, + "bucket_count":2165, + "earliest_record_timestamp":1454020569000, + "latest_record_timestamp":1455318669000, + "last_data_time":1491952300658, + "latest_empty_bucket_timestamp":1454541600000, + "input_record_count":21435 +} +---- + +For more information about these properties, see <>. diff --git a/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc b/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc new file mode 100644 index 0000000000000..5f3bc5054e394 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc @@ -0,0 +1,80 @@ +[role="xpack"] +[[ml-preview-datafeed]] +=== Preview {dfeeds-cap} API +++++ +Preview {dfeeds-cap} +++++ + +This API enables you to preview a {dfeed}. + + +==== Request + +`GET _xpack/ml/datafeeds//_preview` + + +==== Description + +The preview {dfeeds} API returns the first "page" of results from the `search` +that is created by using the current {dfeed} settings. This preview shows the +structure of the data that will be passed to the anomaly detection engine. + + +==== Path Parameters + +`datafeed_id` (required):: + (string) Identifier for the {dfeed} + + +==== Authorization + +You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster +privileges to use this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. +//<>. + + +==== Security Integration + +When {security} is enabled, the {dfeed} query will be previewed using the +credentials of the user calling the preview {dfeed} API. When the {dfeed} +is started it will run the query using the roles of the last user to +create or update it. If the two sets of roles differ then the preview may +not accurately reflect what the {dfeed} will return when started. To avoid +such problems, the same user that creates/updates the {dfeed} should preview +it to ensure it is returning the expected data. + + +==== Examples + +The following example obtains a preview of the `datafeed-farequote` {dfeed}: + +[source,js] +-------------------------------------------------- +GET _xpack/ml/datafeeds/datafeed-farequote/_preview +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +The data that is returned for this example is as follows: +[source,js] +---- +[ + { + "@timestamp": 1454803200000, + "airline": "AAL", + "responsetime": 132.20460510253906 + }, + { + "@timestamp": 1454803200000, + "airline": "JZA", + "responsetime": 990.4628295898438 + }, + { + "@timestamp": 1454803200000, + "airline": "JBU", + "responsetime": 877.5927124023438 + }, + ... +] +---- diff --git a/x-pack/docs/en/rest-api/ml/put-calendar-job.asciidoc b/x-pack/docs/en/rest-api/ml/put-calendar-job.asciidoc new file mode 100644 index 0000000000000..5d2c012a919d7 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/put-calendar-job.asciidoc @@ -0,0 +1,54 @@ +[role="xpack"] +[[ml-put-calendar-job]] +=== Add Jobs to Calendar API +++++ +Add Jobs to Calendar +++++ + +This API enables you to add a job to a calendar. + +==== Request + +`PUT _xpack/ml/calendars//jobs/` + + +==== Path Parameters + +`calendar_id` (required):: + (string) Identifier for the calendar. + +`job_id` (required):: + (string) An identifier for the job. It can be a job identifier, a group name, or a + comma-separated list of jobs or groups. + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example associates the `planned-outages` calendar with the +`total-requests` job: + +[source,js] +-------------------------------------------------- +PUT _xpack/ml/calendars/planned-outages/jobs/total-requests +-------------------------------------------------- +// CONSOLE +// TEST[setup:calendar_outages_openjob] + +The API returns the following results: + +[source,js] +---- +{ + "calendar_id": "planned-outages", + "job_ids": [ + "total-requests" + ] +} +---- +//TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/put-calendar.asciidoc b/x-pack/docs/en/rest-api/ml/put-calendar.asciidoc new file mode 100644 index 0000000000000..23997906cb7f0 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/put-calendar.asciidoc @@ -0,0 +1,56 @@ +[role="xpack"] +[[ml-put-calendar]] +=== Create Calendar API +++++ +Create Calendar +++++ + +This API enables you to instantiate a calendar. + +==== Request + +`PUT _xpack/ml/calendars/` + +===== Description + +For more information, see +{xpack-ref}/ml-calendars.html[Calendars and Scheduled Events]. + +==== Path Parameters + +`calendar_id` (required):: + (string) Identifier for the calendar. + + +==== Request Body + +`description`:: + (string) A description of the calendar. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example creates the `planned-outages` calendar: + +[source,js] +-------------------------------------------------- +PUT _xpack/ml/calendars/planned-outages +-------------------------------------------------- +// CONSOLE + +When the calendar is created, you receive the following results: +[source,js] +---- +{ + "calendar_id": "planned-outages", + "job_ids": [] +} +---- +//TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/put-datafeed.asciidoc b/x-pack/docs/en/rest-api/ml/put-datafeed.asciidoc new file mode 100644 index 0000000000000..f1e41cad8b343 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/put-datafeed.asciidoc @@ -0,0 +1,135 @@ +[role="xpack"] +[[ml-put-datafeed]] +=== Create {dfeeds-cap} API +++++ +Create {dfeeds-cap} +++++ + +This API enables you to instantiate a {dfeed}. + + +==== Request + +`PUT _xpack/ml/datafeeds/` + + +==== Description + +You must create a job before you create a {dfeed}. You can associate only one +{dfeed} to each job. + + +==== Path Parameters + +`feed_id` (required):: + (string) A numerical character string that uniquely identifies the {dfeed}. + + +==== Request Body + +`aggregations`:: + (object) If set, the {dfeed} performs aggregation searches. + For more information, see <>. + +`chunking_config`:: + (object) Specifies how data searches are split into time chunks. + See <>. + +`frequency`:: + (time units) The interval at which scheduled queries are made while the {dfeed} + runs in real time. The default value is either the bucket span for short + bucket spans, or, for longer bucket spans, a sensible fraction of the bucket + span. For example: `150s`. + +`indices` (required):: + (array) An array of index names. Wildcards are supported. For example: + `["it_ops_metrics", "server*"]`. + +`job_id` (required):: + (string) A numerical character string that uniquely identifies the job. + +`query`:: + (object) The {es} query domain-specific language (DSL). This value + corresponds to the query object in an {es} search POST body. All the + options that are supported by {Es} can be used, as this object is + passed verbatim to {es}. By default, this property has the following + value: `{"match_all": {"boost": 1}}`. + +`query_delay`:: + (time units) The number of seconds behind real time that data is queried. For + example, if data from 10:04 a.m. might not be searchable in {es} until + 10:06 a.m., set this property to 120 seconds. The default value is `60s`. + +`script_fields`:: + (object) Specifies scripts that evaluate custom expressions and returns + script fields to the {dfeed}. + The <> in a job can contain + functions that use these script fields. + For more information, + see {ref}/search-request-script-fields.html[Script Fields]. + +`scroll_size`:: + (unsigned integer) The `size` parameter that is used in {es} searches. + The default value is `1000`. + +`types`:: + (array) A list of types to search for within the specified indices. + For example: `[]`. This property is provided for backwards compatibility with + releases earlier than 6.0.0. For more information, see <>. + +For more information about these properties, +see <>. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Security Integration + +When {security} is enabled, your {dfeed} will remember which roles the user who +created it had at the time of creation, and run the query using those same roles. + + +==== Examples + +The following example creates the `datafeed-total-requests` {dfeed}: + +[source,js] +-------------------------------------------------- +PUT _xpack/ml/datafeeds/datafeed-total-requests +{ + "job_id": "total-requests", + "indices": ["server-metrics"] +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_job] + +When the {dfeed} is created, you receive the following results: +[source,js] +---- +{ + "datafeed_id": "datafeed-total-requests", + "job_id": "total-requests", + "query_delay": "83474ms", + "indices": [ + "server-metrics" + ], + "types": [], + "query": { + "match_all": { + "boost": 1.0 + } + }, + "scroll_size": 1000, + "chunking_config": { + "mode": "auto" + } +} +---- +// TESTRESPONSE[s/"query_delay": "83474ms"/"query_delay": $body.query_delay/] +// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] diff --git a/x-pack/docs/en/rest-api/ml/put-job.asciidoc b/x-pack/docs/en/rest-api/ml/put-job.asciidoc new file mode 100644 index 0000000000000..62d15acf05383 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/put-job.asciidoc @@ -0,0 +1,142 @@ +[role="xpack"] +[[ml-put-job]] +=== Create Jobs API +++++ +Create Jobs +++++ + +This API enables you to instantiate a job. + +==== Request + +`PUT _xpack/ml/anomaly_detectors/` + +//===== Description + +==== Path Parameters + +`job_id` (required):: + (string) Identifier for the job. This identifier can contain lowercase + alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must + start and end with alphanumeric characters. + + +==== Request Body + +`analysis_config`:: + (object) The analysis configuration, which specifies how to analyze the data. + See <>. + +`analysis_limits`:: + (object) Specifies runtime limits for the job. See + <>. + +`background_persist_interval`:: + (time units) Advanced configuration option. The time between each periodic + persistence of the model. See <>. + +`custom_settings`:: + (object) Advanced configuration option. Contains custom meta data about the + job. See <>. + +`data_description` (required):: + (object) Describes the format of the input data. This object is required, but + it can be empty (`{}`). See <>. + +`description`:: + (string) A description of the job. + +`groups`:: + (array of strings) A list of job groups. See <>. + +`model_plot_config`:: + (object) Advanced configuration option. Specifies to store model information + along with the results. This adds overhead to the performance of the system + and is not feasible for jobs with many entities, see <>. + +`model_snapshot_retention_days`:: + (long) The time in days that model snapshots are retained for the job. + Older snapshots are deleted. The default value is `1`, which means snapshots + are retained for one day (twenty-four hours). + +`renormalization_window_days`:: + (long) Advanced configuration option. The period over which adjustments to the + score are applied, as new data is seen. See <>. + +`results_index_name`:: + (string) The name of the index in which to store the {ml} results. The default + value is `shared`, which corresponds to the index name `.ml-anomalies-shared`. + +`results_retention_days`:: + (long) Advanced configuration option. The number of days for which job results + are retained. See <>. + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example creates the `total-requests` job: + +[source,js] +-------------------------------------------------- +PUT _xpack/ml/anomaly_detectors/total-requests +{ + "description" : "Total sum of requests", + "analysis_config" : { + "bucket_span":"10m", + "detectors": [ + { + "detector_description": "Sum of total", + "function": "sum", + "field_name": "total" + } + ] + }, + "data_description" : { + "time_field":"timestamp", + "time_format": "epoch_ms" + } +} +-------------------------------------------------- +// CONSOLE + +When the job is created, you receive the following results: +[source,js] +---- +{ + "job_id": "total-requests", + "job_type": "anomaly_detector", + "job_version": "7.0.0-alpha1", + "description": "Total sum of requests", + "create_time": 1517011406091, + "analysis_config": { + "bucket_span": "10m", + "detectors": [ + { + "detector_description": "Sum of total", + "function": "sum", + "field_name": "total", + "detector_index": 0 + } + ], + "influencers": [] + }, + "analysis_limits": { + "model_memory_limit": "1024mb", + "categorization_examples_limit": 4 + }, + "data_description": { + "time_field": "timestamp", + "time_format": "epoch_ms" + }, + "model_snapshot_retention_days": 1, + "results_index_name": "shared" +} +---- +// TESTRESPONSE[s/"job_version": "7.0.0-alpha1"/"job_version": $body.job_version/] +// TESTRESPONSE[s/"create_time": 1517011406091/"create_time": $body.create_time/] diff --git a/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc b/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc new file mode 100644 index 0000000000000..fba6522141bf7 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc @@ -0,0 +1,455 @@ +[role="xpack"] +[[ml-results-resource]] +=== Results Resources + +Several different result types are created for each job. You can query anomaly +results for _buckets_, _influencers_, and _records_ by using the results API. +Summarized bucket results over multiple jobs can be queried as well; those +results are called _overall buckets_. + +Results are written for each `bucket_span`. The timestamp for the results is the +start of the bucket time interval. + +The results include scores, which are calculated for each anomaly result type and +each bucket interval. These scores are aggregated in order to reduce noise, and +normalized in order to identify and rank the most mathematically significant +anomalies. + +Bucket results provide the top level, overall view of the job and are ideal for +alerts. For example, the bucket results might indicate that at 16:05 the system +was unusual. This information is a summary of all the anomalies, pinpointing +when they occurred. + +Influencer results show which entities were anomalous and when. For example, +the influencer results might indicate that at 16:05 `user_name: Bob` was unusual. +This information is a summary of all the anomalies for each entity, so there +can be a lot of these results. Once you have identified a notable bucket time, +you can look to see which entities were significant. + +Record results provide details about what the individual anomaly was, when it +occurred and which entity was involved. For example, the record results might +indicate that at 16:05 Bob sent 837262434 bytes, when the typical value was +1067 bytes. Once you have identified a bucket time and perhaps a significant +entity too, you can drill through to the record results in order to investigate +the anomalous behavior. + +Categorization results contain the definitions of _categories_ that have been +identified. These are only applicable for jobs that are configured to analyze +unstructured log data using categorization. These results do not contain a +timestamp or any calculated scores. For more information, see +{xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. + +* <> +* <> +* <> +* <> +* <> + +NOTE: All of these resources and properties are informational; you cannot +change their values. + +[float] +[[ml-results-buckets]] +==== Buckets + +Bucket results provide the top level, overall view of the job and are best for +alerting. + +Each bucket has an `anomaly_score`, which is a statistically aggregated and +normalized view of the combined anomalousness of all the record results within +each bucket. + +One bucket result is written for each `bucket_span` for each job, even if it is +not considered to be anomalous. If the bucket is not anomalous, it has an +`anomaly_score` of zero. + +When you identify an anomalous bucket, you can investigate further by expanding +the bucket resource to show the records as nested objects. Alternatively, you +can access the records resource directly and filter by the date range. + +A bucket resource has the following properties: + +`anomaly_score`:: + (number) The maximum anomaly score, between 0-100, for any of the bucket + influencers. This is an overall, rate-limited score for the job. All the + anomaly records in the bucket contribute to this score. This value might be + updated as new data is analyzed. + +`bucket_influencers`:: + (array) An array of bucket influencer objects. + For more information, see <>. + +`bucket_span`:: + (number) The length of the bucket in seconds. + This value matches the `bucket_span` that is specified in the job. + +`event_count`:: + (number) The number of input data records processed in this bucket. + +`initial_anomaly_score`:: + (number) The maximum `anomaly_score` for any of the bucket influencers. + This is the initial value that was calculated at the time the bucket was + processed. + +`is_interim`:: + (boolean) If true, this is an interim result. In other words, the bucket + results are calculated based on partial input data. + +`job_id`:: + (string) The unique identifier for the job that these results belong to. + +`processing_time_ms`:: + (number) The amount of time, in milliseconds, that it took to analyze the + bucket contents and calculate results. + +`result_type`:: + (string) Internal. This value is always set to `bucket`. + +`timestamp`:: + (date) The start time of the bucket. This timestamp uniquely identifies the + bucket. + + +NOTE: Events that occur exactly at the timestamp of the bucket are included in +the results for the bucket. + + +[float] +[[ml-results-bucket-influencers]] +==== Bucket Influencers + +Bucket influencer results are available as nested objects contained within +bucket results. These results are an aggregation for each type of influencer. +For example, if both `client_ip` and `user_name` were specified as influencers, +then you would be able to determine when the `client_ip` or `user_name` values +were collectively anomalous. + +There is a built-in bucket influencer called `bucket_time` which is always +available. This bucket influencer is the aggregation of all records in the +bucket; it is not just limited to a type of influencer. + +NOTE: A bucket influencer is a type of influencer. For example, `client_ip` or +`user_name` can be bucket influencers, whereas `192.168.88.2` and `Bob` are +influencers. + +An bucket influencer object has the following properties: + +`anomaly_score`:: + (number) A normalized score between 0-100, which is calculated for each bucket + influencer. This score might be updated as newer data is analyzed. + +`bucket_span`:: + (number) The length of the bucket in seconds. This value matches the `bucket_span` + that is specified in the job. + +`initial_anomaly_score`:: + (number) The score between 0-100 for each bucket influencer. This score is + the initial value that was calculated at the time the bucket was processed. + +`influencer_field_name`:: + (string) The field name of the influencer. For example `client_ip` or + `user_name`. + +`influencer_field_value`:: + (string) The field value of the influencer. For example `192.168.88.2` or + `Bob`. + +`is_interim`:: + (boolean) If true, this is an interim result. In other words, the bucket + influencer results are calculated based on partial input data. + +`job_id`:: + (string) The unique identifier for the job that these results belong to. + +`probability`:: + (number) The probability that the bucket has this behavior, in the range 0 + to 1. For example, 0.0000109783. This value can be held to a high precision + of over 300 decimal places, so the `anomaly_score` is provided as a + human-readable and friendly interpretation of this. + +`raw_anomaly_score`:: + (number) Internal. + +`result_type`:: + (string) Internal. This value is always set to `bucket_influencer`. + +`timestamp`:: + (date) The start time of the bucket for which these results were calculated. + +[float] +[[ml-results-influencers]] +==== Influencers + +Influencers are the entities that have contributed to, or are to blame for, +the anomalies. Influencer results are available only if an +`influencer_field_name` is specified in the job configuration. + +Influencers are given an `influencer_score`, which is calculated based on the +anomalies that have occurred in each bucket interval. For jobs with more than +one detector, this gives a powerful view of the most anomalous entities. + +For example, if you are analyzing unusual bytes sent and unusual domains +visited and you specified `user_name` as the influencer, then an +`influencer_score` for each anomalous user name is written per bucket. For +example, if `user_name: Bob` had an `influencer_score` greater than 75, then +`Bob` would be considered very anomalous during this time interval in one or +both of those areas (unusual bytes sent or unusual domains visited). + +One influencer result is written per bucket for each influencer that is +considered anomalous. + +When you identify an influencer with a high score, you can investigate further +by accessing the records resource for that bucket and enumerating the anomaly +records that contain the influencer. + +An influencer object has the following properties: + +`bucket_span`:: + (number) The length of the bucket in seconds. This value matches the `bucket_span` + that is specified in the job. + +`influencer_score`:: + (number) A normalized score between 0-100, which is based on the probability + of the influencer in this bucket aggregated across detectors. Unlike + `initial_influencer_score`, this value will be updated by a re-normalization + process as new data is analyzed. + +`initial_influencer_score`:: + (number) A normalized score between 0-100, which is based on the probability + of the influencer aggregated across detectors. This is the initial value that + was calculated at the time the bucket was processed. + +`influencer_field_name`:: + (string) The field name of the influencer. + +`influencer_field_value`:: + (string) The entity that influenced, contributed to, or was to blame for the + anomaly. + +`is_interim`:: + (boolean) If true, this is an interim result. In other words, the influencer + results are calculated based on partial input data. + +`job_id`:: + (string) The unique identifier for the job that these results belong to. + +`probability`:: + (number) The probability that the influencer has this behavior, in the range + 0 to 1. For example, 0.0000109783. This value can be held to a high precision + of over 300 decimal places, so the `influencer_score` is provided as a + human-readable and friendly interpretation of this. +// For example, 0.03 means 3%. This value is held to a high precision of over +//300 decimal places. In scientific notation, a value of 3.24E-300 is highly +//unlikely and therefore highly anomalous. + +`result_type`:: + (string) Internal. This value is always set to `influencer`. + +`timestamp`:: + (date) The start time of the bucket for which these results were calculated. + +NOTE: Additional influencer properties are added, depending on the fields being +analyzed. For example, if it's analyzing `user_name` as an influencer, then a +field `user_name` is added to the result document. This information enables you to +filter the anomaly results more easily. + + +[float] +[[ml-results-records]] +==== Records + +Records contain the detailed analytical results. They describe the anomalous +activity that has been identified in the input data based on the detector +configuration. + +For example, if you are looking for unusually large data transfers, an anomaly +record can identify the source IP address, the destination, the time window +during which it occurred, the expected and actual size of the transfer, and the +probability of this occurrence. + +There can be many anomaly records depending on the characteristics and size of +the input data. In practice, there are often too many to be able to manually +process them. The {xpackml} features therefore perform a sophisticated +aggregation of the anomaly records into buckets. + +The number of record results depends on the number of anomalies found in each +bucket, which relates to the number of time series being modeled and the number of +detectors. + +A record object has the following properties: + +`actual`:: + (array) The actual value for the bucket. + +`bucket_span`:: + (number) The length of the bucket in seconds. + This value matches the `bucket_span` that is specified in the job. + +`by_field_name`:: + (string) The name of the analyzed field. This value is present only if + it is specified in the detector. For example, `client_ip`. + +`by_field_value`:: + (string) The value of `by_field_name`. This value is present only if + it is specified in the detector. For example, `192.168.66.2`. + +`causes`:: + (array) For population analysis, an over field must be specified in the + detector. This property contains an array of anomaly records that are the + causes for the anomaly that has been identified for the over field. If no + over fields exist, this field is not present. This sub-resource contains + the most anomalous records for the `over_field_name`. For scalability reasons, + a maximum of the 10 most significant causes of the anomaly are returned. As + part of the core analytical modeling, these low-level anomaly records are + aggregated for their parent over field record. The causes resource contains + similar elements to the record resource, namely `actual`, `typical`, + `*_field_name` and `*_field_value`. Probability and scores are not applicable + to causes. + +`detector_index`:: + (number) A unique identifier for the detector. + +`field_name`:: + (string) Certain functions require a field to operate on, for example, `sum()`. + For those functions, this value is the name of the field to be analyzed. + +`function`:: + (string) The function in which the anomaly occurs, as specified in the + detector configuration. For example, `max`. + +`function_description`:: + (string) The description of the function in which the anomaly occurs, as + specified in the detector configuration. + +`influencers`:: + (array) If `influencers` was specified in the detector configuration, then + this array contains influencers that contributed to or were to blame for an + anomaly. + +`initial_record_score`:: + (number) A normalized score between 0-100, which is based on the + probability of the anomalousness of this record. This is the initial value + that was calculated at the time the bucket was processed. + +`is_interim`:: + (boolean) If true, this is an interim result. In other words, the anomaly + record is calculated based on partial input data. + +`job_id`:: + (string) The unique identifier for the job that these results belong to. + +`over_field_name`:: + (string) The name of the over field that was used in the analysis. This value + is present only if it was specified in the detector. Over fields are used + in population analysis. For example, `user`. + +`over_field_value`:: + (string) The value of `over_field_name`. This value is present only if it + was specified in the detector. For example, `Bob`. + +`partition_field_name`:: + (string) The name of the partition field that was used in the analysis. This + value is present only if it was specified in the detector. For example, + `region`. + +`partition_field_value`:: + (string) The value of `partition_field_name`. This value is present only if + it was specified in the detector. For example, `us-east-1`. + +`probability`:: + (number) The probability of the individual anomaly occurring, in the range + 0 to 1. For example, 0.0000772031. This value can be held to a high precision + of over 300 decimal places, so the `record_score` is provided as a + human-readable and friendly interpretation of this. +//In scientific notation, a value of 3.24E-300 is highly unlikely and therefore +//highly anomalous. + +`record_score`:: + (number) A normalized score between 0-100, which is based on the probability + of the anomalousness of this record. Unlike `initial_record_score`, this + value will be updated by a re-normalization process as new data is analyzed. + +`result_type`:: + (string) Internal. This is always set to `record`. + +`timestamp`:: + (date) The start time of the bucket for which these results were calculated. + +`typical`:: + (array) The typical value for the bucket, according to analytical modeling. + +NOTE: Additional record properties are added, depending on the fields being +analyzed. For example, if it's analyzing `hostname` as a _by field_, then a field +`hostname` is added to the result document. This information enables you to +filter the anomaly results more easily. + + +[float] +[[ml-results-categories]] +==== Categories + +When `categorization_field_name` is specified in the job configuration, it is +possible to view the definitions of the resulting categories. A category +definition describes the common terms matched and contains examples of matched +values. + +The anomaly results from a categorization analysis are available as bucket, +influencer, and record results. For example, the results might indicate that +at 16:45 there was an unusual count of log message category 11. You can then +examine the description and examples of that category. + +A category resource has the following properties: + +`category_id`:: + (unsigned integer) A unique identifier for the category. + +`examples`:: + (array) A list of examples of actual values that matched the category. + +`job_id`:: + (string) The unique identifier for the job that these results belong to. + +`max_matching_length`:: + (unsigned integer) The maximum length of the fields that matched the category. + The value is increased by 10% to enable matching for similar fields that have + not been analyzed. + +`regex`:: + (string) A regular expression that is used to search for values that match the + category. + +`terms`:: + (string) A space separated list of the common tokens that are matched in + values of the category. + +[float] +[[ml-results-overall-buckets]] +==== Overall Buckets + +Overall buckets provide a summary of bucket results over multiple jobs. +Their `bucket_span` equals the longest `bucket_span` of the jobs in question. +The `overall_score` is the `top_n` average of the max `anomaly_score` per job +within the overall bucket time interval. +This means that you can fine-tune the `overall_score` so that it is more +or less sensitive to the number of jobs that detect an anomaly at the same time. + +An overall bucket resource has the following properties: + +`timestamp`:: + (date) The start time of the overall bucket. + +`bucket_span`:: + (number) The length of the bucket in seconds. Matches the `bucket_span` + of the job with the longest one. + +`overall_score`:: + (number) The `top_n` average of the max bucket `anomaly_score` per job. + +`jobs`:: + (array) An array of objects that contain the `max_anomaly_score` per `job_id`. + +`is_interim`:: + (boolean) If true, this is an interim result. In other words, the anomaly + record is calculated based on partial input data. + +`result_type`:: + (string) Internal. This is always set to `overall_bucket`. diff --git a/x-pack/docs/en/rest-api/ml/revert-snapshot.asciidoc b/x-pack/docs/en/rest-api/ml/revert-snapshot.asciidoc new file mode 100644 index 0000000000000..72b934a56b79f --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/revert-snapshot.asciidoc @@ -0,0 +1,127 @@ +[role="xpack"] +[[ml-revert-snapshot]] +=== Revert Model Snapshots API +++++ +Revert Model Snapshots +++++ + +This API enables you to revert to a specific snapshot. + +==== Request + +`POST _xpack/ml/anomaly_detectors//model_snapshots//_revert` + + +==== Description + +The {ml} feature in {xpack} reacts quickly to anomalous input, learning new +behaviors in data. Highly anomalous input increases the variance in the models +whilst the system learns whether this is a new step-change in behavior or a +one-off event. In the case where this anomalous input is known to be a one-off, +then it might be appropriate to reset the model state to a time before this +event. For example, you might consider reverting to a saved snapshot after Black +Friday or a critical system failure. + +//// +To revert to a saved snapshot, you must follow this sequence: +. Close the job +. Revert to a snapshot +. Open the job +. Send new data to the job + +When reverting to a snapshot, there is a choice to make about whether or not +you want to keep the results that were created between the time of the snapshot +and the current time. In the case of Black Friday for instance, you might want +to keep the results and carry on processing data from the current time, +though without the models learning the one-off behavior and compensating for it. +However, say in the event of a critical system failure and you decide to reset +and models to a previous known good state and process data from that time, +it makes sense to delete the intervening results for the known bad period and +resend data from that earlier time. + +Any gaps in data since the snapshot time will be treated as nulls and not modeled. +If there is a partial bucket at the end of the snapshot and/or at the beginning +of the new input data, then this will be ignored and treated as a gap. + +For jobs with many entities, the model state may be very large. +If a model state is several GB, this could take 10-20 mins to revert depending +upon machine spec and resources. If this is the case, please ensure this time +is planned for. +Model size (in bytes) is available as part of the Job Resource Model Size Stats. +//// +IMPORTANT: Before you revert to a saved snapshot, you must close the job. + + +==== Path Parameters + +`job_id` (required):: + (string) Identifier for the job + +`snapshot_id` (required):: + (string) Identifier for the model snapshot + +==== Request Body + +`delete_intervening_results`:: + (boolean) If true, deletes the results in the time period between the + latest results and the time of the reverted snapshot. It also resets the + model to accept records for this time period. The default value is false. + +NOTE: If you choose not to delete intervening results when reverting a snapshot, +the job will not accept input data that is older than the current time. +If you want to resend data, then delete the intervening results. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. +//<>. + + +==== Examples + +The following example reverts to the `1491856080` snapshot for the +`it_ops_new_kpi` job: + +[source,js] +-------------------------------------------------- +POST +_xpack/ml/anomaly_detectors/it_ops_new_kpi/model_snapshots/1491856080/_revert +{ + "delete_intervening_results": true +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +When the operation is complete, you receive the following results: +[source,js] +---- +{ + "model": { + "job_id": "it_ops_new_kpi", + "min_version": "6.3.0", + "timestamp": 1491856080000, + "description": "State persisted due to job close at 2017-04-10T13:28:00-0700", + "snapshot_id": "1491856080", + "snapshot_doc_count": 1, + "model_size_stats": { + "job_id": "it_ops_new_kpi", + "result_type": "model_size_stats", + "model_bytes": 29518, + "total_by_field_count": 3, + "total_over_field_count": 0, + "total_partition_field_count": 2, + "bucket_allocation_failures_count": 0, + "memory_status": "ok", + "log_time": 1491856080000, + "timestamp": 1455318000000 + }, + "latest_record_time_stamp": 1455318669000, + "latest_result_time_stamp": 1455318000000, + "retain": false + } +} +---- diff --git a/x-pack/docs/en/rest-api/ml/snapshotresource.asciidoc b/x-pack/docs/en/rest-api/ml/snapshotresource.asciidoc new file mode 100644 index 0000000000000..fb2e3d83de6d1 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/snapshotresource.asciidoc @@ -0,0 +1,103 @@ +[role="xpack"] +[[ml-snapshot-resource]] +=== Model Snapshot Resources + +Model snapshots are saved to disk periodically. +By default, this is occurs approximately every 3 hours to 4 hours and is +configurable with the `background_persist_interval` property. + +By default, model snapshots are retained for one day (twenty-four hours). You +can change this behavior by updating the `model_snapshot_retention_days` for the +job. When choosing a new value, consider the following: + +* Persistence enables resilience in the event of a system failure. +* Persistence enables snapshots to be reverted. +* The time taken to persist a job is proportional to the size of the model in memory. + +A model snapshot resource has the following properties: + +`description`:: + (string) An optional description of the job. + +`job_id`:: + (string) A numerical character string that uniquely identifies the job that + the snapshot was created for. + +`min_version`:: + (string) The minimum version required to be able to restore the model snapshot. + +`latest_record_time_stamp`:: + (date) The timestamp of the latest processed record. + +`latest_result_time_stamp`:: + (date) The timestamp of the latest bucket result. + +`model_size_stats`:: + (object) Summary information describing the model. + See <>. + +`retain`:: + (boolean) If true, this snapshot will not be deleted during automatic cleanup + of snapshots older than `model_snapshot_retention_days`. + However, this snapshot will be deleted when the job is deleted. + The default value is false. + +`snapshot_id`:: + (string) A numerical character string that uniquely identifies the model + snapshot. For example: "1491852978". + +`snapshot_doc_count`:: + (long) For internal use only. + +`timestamp`:: + (date) The creation timestamp for the snapshot. + +NOTE: All of these properties are informational with the exception of +`description` and `retain`. + +[float] +[[ml-snapshot-stats]] +==== Model Size Statistics + +The `model_size_stats` object has the following properties: + +`bucket_allocation_failures_count`:: + (long) The number of buckets for which entities were not processed due to + memory limit constraints. + +`job_id`:: + (string) A numerical character string that uniquely identifies the job. + +`log_time`:: + (date) The timestamp that the `model_size_stats` were recorded, according to + server-time. + +`memory_status`:: + (string) The status of the memory in relation to its `model_memory_limit`. + Contains one of the following values. + `ok`::: The internal models stayed below the configured value. + `soft_limit`::: The internal models require more than 60% of the configured + memory limit and more aggressive pruning will + be performed in order to try to reclaim space. + `hard_limit`::: The internal models require more space that the configured + memory limit. Some incoming data could not be processed. + +`model_bytes`:: + (long) An approximation of the memory resources required for this analysis. + +`result_type`:: + (string) Internal. This value is always set to "model_size_stats". + +`timestamp`:: + (date) The timestamp that the `model_size_stats` were recorded, according to the bucket timestamp of the data. + +`total_by_field_count`:: + (long) The number of _by_ field values analyzed. Note that these are counted separately for each detector and partition. + +`total_over_field_count`:: + (long) The number of _over_ field values analyzed. Note that these are counted separately for each detector and partition. + +`total_partition_field_count`:: + (long) The number of _partition_ field values analyzed. + +NOTE: All of these properties are informational; you cannot change their values. diff --git a/x-pack/docs/en/rest-api/ml/start-datafeed.asciidoc b/x-pack/docs/en/rest-api/ml/start-datafeed.asciidoc new file mode 100644 index 0000000000000..865ca4ae99722 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/start-datafeed.asciidoc @@ -0,0 +1,114 @@ +[role="xpack"] +[[ml-start-datafeed]] +=== Start {dfeeds-cap} API +++++ +Start {dfeeds-cap} +++++ + +This API enables you to start one or more {dfeeds}. +A {dfeed} must be started in order to retrieve data from {es}. +A {dfeed} can be started and stopped multiple times throughout its lifecycle. + +==== Request + +`POST _xpack/ml/datafeeds//_start` + +==== Description + +NOTE: Before you can start a {dfeed}, the job must be open. Otherwise, an error +occurs. + +When you start a {dfeed}, you can specify a start time. This enables you to +include a training period, providing you have this data available in {es}. +If you want to analyze from the beginning of a dataset, you can specify any date +earlier than that beginning date. + +If you do not specify a start time and the {dfeed} is associated with a new +job, the analysis starts from the earliest time for which data is available. + +When you start a {dfeed}, you can also specify an end time. If you do so, the +job analyzes data from the start time until the end time, at which point the +analysis stops. This scenario is useful for a one-off batch analysis. If you +do not specify an end time, the {dfeed} runs continuously. + +The `start` and `end` times can be specified by using one of the +following formats: + + +- ISO 8601 format with milliseconds, for example `2017-01-22T06:00:00.000Z` +- ISO 8601 format without milliseconds, for example `2017-01-22T06:00:00+00:00` +- Seconds from the Epoch, for example `1390370400` + +Date-time arguments using either of the ISO 8601 formats must have a time zone +designator, where Z is accepted as an abbreviation for UTC time. + +NOTE: When a URL is expected (for example, in browsers), the `+` used in time +zone designators must be encoded as `%2B`. + +If the system restarts, any jobs that had {dfeeds} running are also restarted. + +When a stopped {dfeed} is restarted, it continues processing input data from +the next millisecond after it was stopped. If new data was indexed for that +exact millisecond between stopping and starting, it will be ignored. +If you specify a `start` value that is earlier than the timestamp of the latest +processed record, the {dfeed} continues from 1 millisecond after the timestamp +of the latest processed record. + + +==== Path Parameters + +`feed_id` (required):: +(string) Identifier for the {dfeed} + +==== Request Body + +`end`:: + (string) The time that the {dfeed} should end. This value is exclusive. + The default value is an empty string. + +`start`:: + (string) The time that the {dfeed} should begin. This value is inclusive. + The default value is an empty string. + +`timeout`:: + (time) Controls the amount of time to wait until a {dfeed} starts. + The default value is 20 seconds. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. +//<>. + + +==== Security Integration + +When {security} is enabled, your {dfeed} will remember which roles the last +user to create or update it had at the time of creation/update, and run the query +using those same roles. + + +==== Examples + +The following example starts the `datafeed-it-ops-kpi` {dfeed}: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/datafeeds/datafeed-total-requests/_start +{ + "start": "2017-04-07T18:22:16Z" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_openjob] + +When the {dfeed} starts, you receive the following results: +[source,js] +---- +{ + "started": true +} +---- +// CONSOLE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/stop-datafeed.asciidoc b/x-pack/docs/en/rest-api/ml/stop-datafeed.asciidoc new file mode 100644 index 0000000000000..3511c9362c3fa --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/stop-datafeed.asciidoc @@ -0,0 +1,76 @@ +[role="xpack"] +[[ml-stop-datafeed]] +=== Stop {dfeeds-cap} API +++++ +Stop {dfeeds-cap} +++++ + +This API enables you to stop one or more {dfeeds}. + +A {dfeed} that is stopped ceases to retrieve data from {es}. +A {dfeed} can be started and stopped multiple times throughout its lifecycle. + +==== Request + +`POST _xpack/ml/datafeeds//_stop` + + +`POST _xpack/ml/datafeeds/,/_stop` + + +`POST _xpack/ml/datafeeds/_all/_stop` + +//TBD: Can there be spaces between the items in the list? + +===== Description + +You can stop multiple {dfeeds} in a single API request by using a +comma-separated list of {dfeeds} or a wildcard expression. You can close all +{dfeeds} by using `_all` or by specifying `*` as the ``. + + +==== Path Parameters + +`feed_id`:: + (string) Identifier for the {dfeed}. It can be a {dfeed} identifier or a + wildcard expression. + + +==== Request Body + +`force`:: + (boolean) If true, the {dfeed} is stopped forcefully. + +`timeout`:: + (time) Controls the amount of time to wait until a {dfeed} stops. + The default value is 20 seconds. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example stops the `datafeed-total-requests` {dfeed}: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/datafeeds/datafeed-total-requests/_stop +{ + "timeout": "30s" +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_startdf] + +When the {dfeed} stops, you receive the following results: +[source,js] +---- +{ + "stopped": true +} +---- +// CONSOLE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/update-datafeed.asciidoc b/x-pack/docs/en/rest-api/ml/update-datafeed.asciidoc new file mode 100644 index 0000000000000..277a9ce31773f --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/update-datafeed.asciidoc @@ -0,0 +1,136 @@ +[role="xpack"] +[[ml-update-datafeed]] +=== Update {dfeeds-cap} API +++++ +Update {dfeeds-cap} +++++ + +This API enables you to update certain properties of a {dfeed}. + +==== Request + +`POST _xpack/ml/datafeeds//_update` + +//===== Description + +==== Path Parameters + +`feed_id` (required):: + (string) Identifier for the {dfeed} + +==== Request Body + +The following properties can be updated after the {dfeed} is created: + +`aggregations`:: + (object) If set, the {dfeed} performs aggregation searches. + For more information, see <>. + +`chunking_config`:: + (object) Specifies how data searches are split into time chunks. + See <>. + +`frequency`:: + (time units) The interval at which scheduled queries are made while the + {dfeed} runs in real time. The default value is either the bucket span for short + bucket spans, or, for longer bucket spans, a sensible fraction of the bucket + span. For example: `150s`. + +`indices`:: + (array) An array of index names. Wildcards are supported. For example: + `["it_ops_metrics", "server*"]`. + +`job_id`:: + (string) A numerical character string that uniquely identifies the job. + +`query`:: + (object) The {es} query domain-specific language (DSL). This value + corresponds to the query object in an {es} search POST body. All the + options that are supported by {es} can be used, as this object is + passed verbatim to {es}. By default, this property has the following + value: `{"match_all": {"boost": 1}}`. + +`query_delay`:: + (time units) The number of seconds behind real-time that data is queried. For + example, if data from 10:04 a.m. might not be searchable in {es} until + 10:06 a.m., set this property to 120 seconds. The default value is `60s`. + +`script_fields`:: + (object) Specifies scripts that evaluate custom expressions and returns + script fields to the {dfeed}. + The <> in a job can contain + functions that use these script fields. + For more information, + see {ref}/search-request-script-fields.html[Script Fields]. + +`scroll_size`:: + (unsigned integer) The `size` parameter that is used in {es} searches. + The default value is `1000`. + +`types`:: + (array) A list of types to search for within the specified indices. + For example: `[]`. This property is provided for backwards compatibility with + releases earlier than 6.0.0. For more information, see <>. + +For more information about these properties, +see <>. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Security Integration + +When {security} is enabled, your {dfeed} will remember which roles the user who +updated it had at the time of update, and run the query using those same roles. + + +==== Examples + +The following example updates the query for the `datafeed-total-requests` +{dfeed} so that only log entries of error level are analyzed: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/datafeeds/datafeed-total-requests/_update +{ + "query": { + "term": { + "level": "error" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_datafeed] + +When the {dfeed} is updated, you receive the full {dfeed} configuration with +with the updated values: + +[source,js] +---- +{ + "datafeed_id": "datafeed-total-requests", + "job_id": "total-requests", + "query_delay": "83474ms", + "indices": ["server-metrics"], + "types": [], + "query": { + "term": { + "level": { + "value": "error", + "boost": 1.0 + } + } + }, + "scroll_size": 1000, + "chunking_config": { + "mode": "auto" + } +} +---- +// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] diff --git a/x-pack/docs/en/rest-api/ml/update-job.asciidoc b/x-pack/docs/en/rest-api/ml/update-job.asciidoc new file mode 100644 index 0000000000000..ebb41523de01a --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/update-job.asciidoc @@ -0,0 +1,169 @@ +[role="xpack"] +[[ml-update-job]] +=== Update Jobs API +++++ +Update Jobs +++++ + +This API enables you to update certain properties of a job. + +==== Request + +`POST _xpack/ml/anomaly_detectors//_update` + + +==== Path Parameters + +`job_id` (required):: + (string) Identifier for the job + +==== Request Body + +The following properties can be updated after the job is created: + +[cols="<,<,<",options="header",] +|======================================================================= +|Name |Description |Requires Restart + +|`analysis_limits`: `model_memory_limit` |The approximate maximum amount of +memory resources required for analytical processing. See <>. | Yes + +|`background_persist_interval` |Advanced configuration option. The time between +each periodic persistence of the model. See <>. | Yes + +|`custom_settings` |Contains custom meta data about the job. | No + +|`description` |A description of the job. See <>. | No + +|`groups` |A list of job groups. See <>. | No + +|`model_plot_config`: `enabled` |If true, enables calculation and storage of the +model bounds for each entity that is being analyzed. +See <>. | No + +|`model_snapshot_retention_days` |The time in days that model snapshots are +retained for the job. See <>. | Yes + +|`renormalization_window_days` |Advanced configuration option. The period over +which adjustments to the score are applied, as new data is seen. +See <>. | Yes + +|`results_retention_days` |Advanced configuration option. The number of days +for which job results are retained. See <>. | Yes + +|======================================================================= + +For those properties that have `Requires Restart` set to `Yes` in this table, +if the job is open when you make the update, you must stop the data feed, close +the job, then restart the data feed and open the job for the changes to take +effect. + +//|`analysis_config`: `detectors`: `detector_index` | A unique identifier of the +//detector. Matches the order of detectors returned by +//<>, starting from 0. | No +//|`analysis_config`: `detectors`: `detector_description` |A description of the +//detector. See <>. | No + +[NOTE] +-- +* You can update the `analysis_limits` only while the job is closed. +* The `model_memory_limit` property value cannot be decreased below the current usage. +* If the `memory_status` property in the `model_size_stats` object has a value +of `hard_limit`, this means that it was unable to process some data. You might +want to re-run this job with an increased `model_memory_limit`. +-- + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example updates the `total-requests` job: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/anomaly_detectors/total-requests/_update +{ + "description":"An updated job", + "groups": ["group1","group2"], + "model_plot_config": { + "enabled": true + }, + "analysis_limits": { + "model_memory_limit": "1024mb" + }, + "renormalization_window_days": 30, + "background_persist_interval": "2h", + "model_snapshot_retention_days": 7, + "results_retention_days": 60, + "custom_settings": { + "custom_urls" : [{ + "url_name" : "Lookup IP", + "url_value" : "http://geoiplookup.net/ip/$clientip$" + }] + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:server_metrics_job] + +When the job is updated, you receive a summary of the job configuration +information, including the updated property values. For example: + +[source,js] +---- +{ + "job_id": "total-requests", + "job_type": "anomaly_detector", + "job_version": "7.0.0-alpha1", + "groups": [ + "group1", + "group2" + ], + "description": "An updated job", + "create_time": 1518808660505, + "analysis_config": { + "bucket_span": "10m", + "detectors": [ + { + "detector_description": "Sum of total", + "function": "sum", + "field_name": "total", + "detector_index": 0 + } + ], + "influencers": [] + }, + "analysis_limits": { + "model_memory_limit": "1024mb", + "categorization_examples_limit": 4 + }, + "data_description": { + "time_field": "timestamp", + "time_format": "epoch_ms" + }, + "model_plot_config": { + "enabled": true + }, + "renormalization_window_days": 30, + "background_persist_interval": "2h", + "model_snapshot_retention_days": 7, + "results_retention_days": 60, + "custom_settings": { + "custom_urls": [ + { + "url_name": "Lookup IP", + "url_value": "http://geoiplookup.net/ip/$clientip$" + } + ] + }, + "results_index_name": "shared" +} +---- +// TESTRESPONSE[s/"job_version": "7.0.0-alpha1"/"job_version": $body.job_version/] +// TESTRESPONSE[s/"create_time": 1518808660505/"create_time": $body.create_time/] diff --git a/x-pack/docs/en/rest-api/ml/update-snapshot.asciidoc b/x-pack/docs/en/rest-api/ml/update-snapshot.asciidoc new file mode 100644 index 0000000000000..74a684619c411 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/update-snapshot.asciidoc @@ -0,0 +1,77 @@ +[role="xpack"] +[[ml-update-snapshot]] +=== Update Model Snapshots API +++++ +Update Model Snapshots +++++ + +This API enables you to update certain properties of a snapshot. + +==== Request + +`POST _xpack/ml/anomaly_detectors//model_snapshots//_update` + + +//==== Description + +==== Path Parameters + +`job_id` (required):: + (string) Identifier for the job + +`snapshot_id` (required):: + (string) Identifier for the model snapshot + +==== Request Body + +The following properties can be updated after the model snapshot is created: + +`description`:: + (string) An optional description of the model snapshot. For example, + "Before black friday". + +`retain`:: + (boolean) If true, this snapshot will not be deleted during automatic cleanup + of snapshots older than `model_snapshot_retention_days`. + Note that this snapshot will still be deleted when the job is deleted. + The default value is false. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. +//<>. + + +==== Examples + +The following example updates the snapshot identified as `1491852978`: + +[source,js] +-------------------------------------------------- +POST +_xpack/ml/anomaly_detectors/it_ops_new_logs/model_snapshots/1491852978/_update +{ + "description": "Snapshot 1", + "retain": true +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +When the snapshot is updated, you receive the following results: +[source,js] +---- +{ + "acknowledged": true, + "model": { + "job_id": "it_ops_new_logs", + "timestamp": 1491852978000, + "description": "Snapshot 1", +... + "retain": true + } +} +---- diff --git a/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc b/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc new file mode 100644 index 0000000000000..6fc5fea6fbb94 --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc @@ -0,0 +1,56 @@ +[role="xpack"] +[[ml-valid-detector]] +=== Validate Detectors API +++++ +Validate Detectors +++++ + +This API validates detector configuration information. + +==== Request + +`POST _xpack/ml/anomaly_detectors/_validate/detector` + +==== Description + +The validate detectors API enables you validate the detector configuration +before you create a job. + + +==== Request Body + +For a list of the properties that you can specify in the body of this API, +see <>. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. +//<>. + + +==== Examples + +The following example validates detector configuration information: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/anomaly_detectors/_validate/detector +{ + "function": "metric", + "field_name": "responsetime", + "by_field_name": "airline" +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +When the validation completes, you receive the following results: +[source,js] +---- +{ + "acknowledged": true +} +---- diff --git a/x-pack/docs/en/rest-api/ml/validate-job.asciidoc b/x-pack/docs/en/rest-api/ml/validate-job.asciidoc new file mode 100644 index 0000000000000..b206734bc033f --- /dev/null +++ b/x-pack/docs/en/rest-api/ml/validate-job.asciidoc @@ -0,0 +1,67 @@ +[role="xpack"] +[[ml-valid-job]] +=== Validate Jobs API +++++ +Validate Jobs +++++ + +This API validates job configuration information. + +==== Request + +`POST _xpack/ml/anomaly_detectors/_validate` + +==== Description + +The validate jobs API enables you validate the job configuration before you +create the job. + + +==== Request Body + +For a list of the properties that you can specify in the body of this API, +see <>. + + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. +//<>. + + +==== Examples + +The following example validates job configuration information: + +[source,js] +-------------------------------------------------- +POST _xpack/ml/anomaly_detectors/_validate +{ + "description" : "Unusual response times by airlines", + "analysis_config" : { + "bucket_span": "300S", + "detectors" :[ + { + "function": "metric", + "field_name": "responsetime", + "by_field_name": "airline"}], + "influencers": [ "airline" ] + }, + "data_description" : { + "time_field": "time", + "time_format": "yyyy-MM-dd'T'HH:mm:ssX" + } +} +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +When the validation is complete, you receive the following results: +[source,js] +---- +{ + "acknowledged": true +} +---- diff --git a/x-pack/docs/en/rest-api/rollup-api.asciidoc b/x-pack/docs/en/rest-api/rollup-api.asciidoc new file mode 100644 index 0000000000000..f1cd7c285a733 --- /dev/null +++ b/x-pack/docs/en/rest-api/rollup-api.asciidoc @@ -0,0 +1,35 @@ +[role="xpack"] +[[rollup-apis]] +== Rollup APIs + +[float] +[[rollup-jobs-endpoint]] +=== Jobs + +* <>, <>, +* <>, <>, +* <> +* <> + +[float] +[[rollup-data-endpoint]] +=== Data + +* <> + +[float] +[[rollup-search-endpoint]] +=== Search + +* <> + + + +include::rollup/delete-job.asciidoc[] +include::rollup/get-job.asciidoc[] +include::rollup/put-job.asciidoc[] +include::rollup/start-job.asciidoc[] +include::rollup/stop-job.asciidoc[] +include::rollup/rollup-caps.asciidoc[] +include::rollup/rollup-search.asciidoc[] +include::rollup/rollup-job-config.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/rollup/delete-job.asciidoc b/x-pack/docs/en/rest-api/rollup/delete-job.asciidoc new file mode 100644 index 0000000000000..056a4470480a0 --- /dev/null +++ b/x-pack/docs/en/rest-api/rollup/delete-job.asciidoc @@ -0,0 +1,84 @@ +[role="xpack"] +[[rollup-delete-job]] +=== Delete Job API +++++ +Delete Job +++++ + +This API deletes an existing rollup job. The job can be started or stopped, in both cases it will be deleted. Attempting +to delete a non-existing job will throw an exception + +==== Request + +`DELETE _xpack/rollup/job/` + +//===== Description + +==== Path Parameters + +`job_id` (required):: + (string) Identifier for the job + + +==== Request Body + +There is no request body for the Delete Job API. + +==== Authorization + +You must have `manage` or `manage_rollup` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +If we have a rollup job named `sensor`, it can be deleted with: + +[source,js] +-------------------------------------------------- +DELETE _xpack/rollup/job/sensor +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_rollup_job] + +Which will return the response: + +[source,js] +---- +{ + "acknowledged": true +} +---- +// TESTRESPONSE + +If however we try to delete a job which doesn't exist: + +[source,js] +-------------------------------------------------- +DELETE _xpack/rollup/job/does_not_exist +-------------------------------------------------- +// CONSOLE +// TEST[catch:missing] + +A 404 `resource_not_found` exception will be thrown: + +[source,js] +---- +{ + "error" : { + "root_cause" : [ + { + "type" : "resource_not_found_exception", + "reason" : "the task with id does_not_exist doesn't exist", + "stack_trace": ... + } + ], + "type" : "resource_not_found_exception", + "reason" : "the task with id does_not_exist doesn't exist", + "stack_trace": ... + }, + "status": 404 +} +---- +// TESTRESPONSE[s/"stack_trace": .../"stack_trace": $body.$_path/] diff --git a/x-pack/docs/en/rest-api/rollup/get-job.asciidoc b/x-pack/docs/en/rest-api/rollup/get-job.asciidoc new file mode 100644 index 0000000000000..4482a87527930 --- /dev/null +++ b/x-pack/docs/en/rest-api/rollup/get-job.asciidoc @@ -0,0 +1,273 @@ +[role="xpack"] +[[rollup-get-job]] +=== Get Rollup Jobs API +++++ +Get Job +++++ + +This API returns the configuration, stats and status of rollup jobs. The API can return the details for a single job, +or for all jobs. + +Note: This API only returns active (both `STARTED` and `STOPPED`) jobs. If a job was created, ran for a while then deleted, +this API will not return any details about that job. + +For details about a historical job, the <> may be more useful + +==== Request + +`GET _xpack/rollup/job/` + +//===== Description + +==== Path Parameters + +`job_id`:: + (string) Identifier for the job to retrieve. If omitted (or `_all` is used) all jobs will be returned + + +==== Request Body + +There is no request body for the Get Jobs API. + +==== Authorization + +You must have `monitor`, `monitor_rollup`, `manage` or `manage_rollup` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + +==== Examples + +If we have already created a rollup job named `sensor`, the details about the job can be retrieved with: + +[source,js] +-------------------------------------------------- +GET _xpack/rollup/job/sensor +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_rollup_job] + +Which will yield the following response: + +[source,js] +---- +{ + "jobs" : [ + { + "config" : { + "id" : "sensor", + "index_pattern" : "sensor-*", + "rollup_index" : "sensor_rollup", + "cron" : "*/30 * * * * ?", + "groups" : { + "date_histogram" : { + "interval" : "1h", + "delay": "7d", + "field": "timestamp", + "time_zone": "UTC" + }, + "terms" : { + "fields" : [ + "node" + ] + } + }, + "metrics" : [ + { + "field" : "temperature", + "metrics" : [ + "min", + "max", + "sum" + ] + }, + { + "field" : "voltage", + "metrics" : [ + "avg" + ] + } + ], + "timeout" : "20s", + "page_size" : 1000 + }, + "status" : { + "job_state" : "stopped" + }, + "stats" : { + "pages_processed" : 0, + "documents_processed" : 0, + "rollups_indexed" : 0, + "trigger_count" : 0 + } + } + ] +} +---- +// TESTRESPONSE + +The `jobs` array contains a single job (`id: sensor`) since we requested a single job in the endpoint's URL. The +details for this job contain three top-level parameters: `config`, `status` and `stats` + +`config` holds the rollup job's configuration, which is identical to the configuration that was supplied when creating +the job via the <>. + +The `status` object holds the current status of the rollup job's indexer. The possible values and their meanings are: + +- `stopped` means the indexer is paused and will not process data, even if it's cron interval triggers +- `started` means the indexer is running, but not actively indexing data. When the cron interval triggers, the job's +indexer will begin to process data +- `indexing` means the indexer is actively processing data and creating new rollup documents. When in this state, any +subsequent cron interval triggers will be ignored because the job is already active with the prior trigger +- `abort` a transient state, which is usually not witnessed by the user. The `abort` state is used if the task needs to +be shut down for some reason (job has been deleted, an unrecoverable error has been encountered, etc). Shortly after +the `abort` state is set, the job will remove itself from the cluster + +Finally, the `stats` object provides transient statistics about the rollup job, such as how many documents have been +processed and how many rollup summary docs have been indexed. These stats are not persisted, so if a node is restarted +these stats will be reset. + +If we add another job, we can see how multi-job responses are handled: + +[source,js] +-------------------------------------------------- +PUT _xpack/rollup/job/sensor2 <1> +{ + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] +} + +GET _xpack/rollup/job/_all <2> +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_rollup_job] +<1> We create a second job with name `sensor2` +<2> Then request all jobs by using `_all` in the GetJobs API + +Which will yield the following response: + +[source,js] +---- +{ + "jobs" : [ + { + "config" : { + "id" : "sensor2", + "index_pattern" : "sensor-*", + "rollup_index" : "sensor_rollup", + "cron" : "*/30 * * * * ?", + "groups" : { + "date_histogram" : { + "interval" : "1h", + "delay": "7d", + "field": "timestamp", + "time_zone": "UTC" + }, + "terms" : { + "fields" : [ + "node" + ] + } + }, + "metrics" : [ + { + "field" : "temperature", + "metrics" : [ + "min", + "max", + "sum" + ] + }, + { + "field" : "voltage", + "metrics" : [ + "avg" + ] + } + ], + "timeout" : "20s", + "page_size" : 1000 + }, + "status" : { + "job_state" : "stopped" + }, + "stats" : { + "pages_processed" : 0, + "documents_processed" : 0, + "rollups_indexed" : 0, + "trigger_count" : 0 + } + }, + { + "config" : { + "id" : "sensor", + "index_pattern" : "sensor-*", + "rollup_index" : "sensor_rollup", + "cron" : "*/30 * * * * ?", + "groups" : { + "date_histogram" : { + "interval" : "1h", + "delay": "7d", + "field": "timestamp", + "time_zone": "UTC" + }, + "terms" : { + "fields" : [ + "node" + ] + } + }, + "metrics" : [ + { + "field" : "temperature", + "metrics" : [ + "min", + "max", + "sum" + ] + }, + { + "field" : "voltage", + "metrics" : [ + "avg" + ] + } + ], + "timeout" : "20s", + "page_size" : 1000 + }, + "status" : { + "job_state" : "stopped" + }, + "stats" : { + "pages_processed" : 0, + "documents_processed" : 0, + "rollups_indexed" : 0, + "trigger_count" : 0 + } + } + ] +} +---- +// NOTCONSOLE diff --git a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc b/x-pack/docs/en/rest-api/rollup/put-job.asciidoc new file mode 100644 index 0000000000000..2cc869e1e3467 --- /dev/null +++ b/x-pack/docs/en/rest-api/rollup/put-job.asciidoc @@ -0,0 +1,96 @@ +[role="xpack"] +[[rollup-put-job]] +=== Create Job API +++++ +Create Job +++++ + +This API enables you to create a rollup job. The job will be created in a `STOPPED` state, and must be +started with the <>. + +==== Request + +`PUT _xpack/rollup/job/` + +//===== Description + +==== Path Parameters + +`job_id` (required):: + (string) Identifier for the job + + +==== Request Body + +`index_pattern` (required):: + (string) The index, or index pattern, that you wish to rollup. Supports wildcard-style patterns (`logstash-*`). + +`rollup_index` (required):: + (string) The index that you wish to store rollup results into. Can be shared with other rollup jobs. + +`cron` (required):: + (string) A cron string which defines when the rollup job should be executed. + +`page_size` (required):: + (int) The number of bucket results that should be processed on each iteration of the rollup indexer. A larger value + will tend to execute faster, but will require more memory during processing. + +`groups` (required):: + (object) Defines the grouping fields that are defined for this rollup job. See <>. + +`metrics`:: + (object) Defines the metrics that should be collected for each grouping tuple. See <>. + +==== Authorization + +You must have `manage` or `manage_rollup` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example creates a rollup job named "sensor", targeting the "sensor-*" index pattern: + +[source,js] +-------------------------------------------------- +PUT _xpack/rollup/job/sensor +{ + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_index] + +When the job is created, you receive the following results: + +[source,js] +---- +{ + "acknowledged": true +} +---- +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc b/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc new file mode 100644 index 0000000000000..5a4dab69d937f --- /dev/null +++ b/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc @@ -0,0 +1,180 @@ +[role="xpack"] +[[rollup-get-rollup-caps]] +=== Get Rollup Job Capabilities +++++ +Get Rollup Caps +++++ + +This API returns the rollup capabilities that have been configured for an index or index pattern. This API is useful +because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only +certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on +that configuration. + +This API will allow you to inspect an index and determine: + +1. Does this index have associated rollup data somewhere in the cluster? +2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data +live? + +==== Request + +`GET _xpack/rollup/data/{index}` + +//===== Description + +==== Path Parameters + +`index`:: + (string) Index, indices or index-pattern to return rollup capabilities for. If omitted (or `_all` is used) all available + rollup job capabilities will be returned + + +==== Request Body + +There is no request body for the Get Jobs API. + +==== Authorization + +You must have `monitor`, `monitor_rollup`, `manage` or `manage_rollup` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + +==== Examples + +Imagine we have an index named `sensor-1` full of raw data. We know that the data will grow over time, so there +will be a `sensor-2`, `sensor-3`, etc. Let's create a Rollup job that targets the index pattern `sensor-*` to accomodate +this future scaling: + +[source,js] +-------------------------------------------------- +PUT _xpack/rollup/job/sensor +{ + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_index] + +We can then retrieve the rollup capabilities of that index pattern (`sensor-*`) via the following command: + +[source,js] +-------------------------------------------------- +GET _xpack/rollup/data/sensor-* +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Which will yield the following response: + +[source,js] +---- +{ + "sensor-*" : { + "rollup_jobs" : [ + { + "job_id" : "sensor", + "rollup_index" : "sensor_rollup", + "index_pattern" : "sensor-*", + "fields" : { + "node" : [ + { + "agg" : "terms" + } + ], + "temperature" : [ + { + "agg" : "min" + }, + { + "agg" : "max" + }, + { + "agg" : "sum" + } + ], + "timestamp" : [ + { + "agg" : "date_histogram", + "time_zone" : "UTC", + "interval" : "1h", + "delay": "7d" + } + ], + "voltage" : [ + { + "agg" : "avg" + } + ] + } + } + ] + } +} +---- +// TESTRESPONSE + +The response that is returned contains information that is similar to the original Rollup configuration, but formatted +differently. First, there are some house-keeping details: the Rollup job's ID, the index that holds the rolled data, +the index pattern that the job was targeting. + +Next it shows a list of fields that contain data eligible for rollup searches. Here we see four fields: `node`, `temperature`, +`timestamp` and `voltage`. Each of these fields list the aggregations that are possible. For example, you can use a min, max +or sum aggregation on the `temperature` field, but only a `date_histogram` on `timestamp`. + +Note that the `rollup_jobs` element is an array; there can be multiple, independent jobs configured for a single index +or index pattern. Each of these jobs may have different configurations, so the API returns a list of all the various +configurations available. + +We could also retrieve the same information with a request to `_all`: + +[source,js] +-------------------------------------------------- +GET _xpack/rollup/data/_all +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +But note that if we use the concrete index name (`sensor-1`), we'll retrieve no rollup capabilities: + +[source,js] +-------------------------------------------------- +GET _xpack/rollup/data/sensor-1 +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +---- +{ + +} +---- +// TESTRESPONSE + +Why is this? The original rollup job was configured against a specific index pattern (`sensor-*`) not a concrete index +(`sensor-1`). So while the index belongs to the pattern, the rollup job is only valid across the entirety of the pattern +not just one of it's containing indices. So for that reason, the Rollup Capabilities API only returns information based +on the originally configured index name or pattern. \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc b/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc new file mode 100644 index 0000000000000..85f1a57caa763 --- /dev/null +++ b/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc @@ -0,0 +1,231 @@ +[role="xpack"] +[[rollup-job-config]] +=== Rollup Job Configuration + +The Rollup Job Configuration contains all the details about how the rollup job should run, when it indexes documents, +and what future queries will be able to execute against the rollup index. + +There are three main sections to the Job Configuration; the logistical details about the job (cron schedule, etc), what fields +should be grouped on, and what metrics to collect for each group. + +A full job configuration might look like this: + +[source,js] +-------------------------------------------------- +PUT _xpack/rollup/job/sensor +{ + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["hostname", "datacenter"] + }, + "histogram": { + "fields": ["load", "net_in", "net_out"], + "interval": 5 + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_index] + +==== Logistical Details + +In the above example, there are several pieces of logistical configuration for the job itself. + +`{job_id}` (required):: + (string) In the endpoint URL, you specify the name of the job (`sensor` in the above example). This can be any alphanumeric string, + and uniquely identifies the data that is associated with the rollup job. The ID is persistent, in that it is stored with the rolled + up data. So if you create a job, let it run for a while, then delete the job... the data that the job rolled up will still be + associated with this job ID. You will be unable to create a new job with the same ID, as that could lead to problems with mismatched + job configurations + +`index_pattern` (required):: + (string) The index, or index pattern, that you wish to rollup. Supports wildcard-style patterns (`logstash-*`). The job will + attempt to rollup the entire index or index-pattern. Once the "backfill" is finished, it will periodically (as defined by the cron) + look for new data and roll that up too. + +`rollup_index` (required):: + (string) The index that you wish to store rollup results into. All the rollup data that is generated by the job will be + stored in this index. When searching the rollup data, this index will be used in the <> endpoint's URL. + The rollup index be shared with other rollup jobs. The data is stored so that it doesn't interfere with unrelated jobs. + +`cron` (required):: + (string) A cron string which defines when the rollup job should be executed. The cron string defines an interval of when to run + the job's indexer. When the interval triggers, the indexer will attempt to rollup the data in the index pattern. The cron pattern + is unrelated to the time interval of the data being rolled up. For example, you may wish to create hourly rollups of your document (as + defined in the <>) but to only run the indexer on a daily basis at midnight, as defined by the cron. + The cron pattern is defined just like Watcher's Cron Schedule. + +`page_size` (required):: + (int) The number of bucket results that should be processed on each iteration of the rollup indexer. A larger value + will tend to execute faster, but will require more memory during processing. This has no effect on how the data is rolled up, it is + merely used for tweaking the speed/memory cost of the indexer. + +[[rollup-groups-config]] +==== Grouping Config + +The `groups` section of the configuration is where you decide which fields should be grouped on, and with what aggregations. These +fields will then be available later for aggregating into buckets. For example, this configuration: + +[source,js] +-------------------------------------------------- +"groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["hostname", "datacenter"] + }, + "histogram": { + "fields": ["load", "net_in", "net_out"], + "interval": 5 + } +} +-------------------------------------------------- +// NOTCONSOLE + +Allows `date_histogram`'s to be used on the `"timestamp"` field, `terms` aggregations to be used on the `"hostname"` and `"datacenter"` +fields, and `histograms` to be used on any of `"load"`, `"net_in"`, `"net_out"` fields. + +Importantly, these aggs/fields can be used in any combination. Think of the `groups` configuration as defining a set of tools that can +later be used in aggregations to partition the data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. +But Rollups provide enough flexibility that you simply need to determine _which_ fields are needed, not _in what order_ they are needed. + +There are three types of groupings currently available: + +===== Date Histogram + +A `date_histogram` group aggregates a `date` field into time-based buckets. The `date_histogram` group is *mandatory* -- you currently +cannot rollup documents without a timestamp and a `date_histogram` group. + +The `date_histogram` group has several parameters: + +`field` (required):: + The date field that is to be rolled up. + +`interval` (required):: + The interval of time buckets to be generated when rolling up. E.g. `"1h"` will produce hourly rollups. This follows standard time formatting + syntax as used elsewhere in Elasticsearch. The `interval` defines the _minimum_ interval that can be aggregated only. If hourly (`"1h"`) + intervals are configured, <> can execute aggregations with 1hr or greater (weekly, monthly, etc) intervals. + So define the interval as the smallest unit that you wish to later query. + + Note: smaller, more granular intervals take up proportionally more space. + +`delay`:: + How long to wait before rolling up new documents. By default, the indexer attempts to roll up all data that is available. However, it + is not uncommon for data to arrive out of order, sometimes even a few days late. The indexer is unable to deal with data that arrives + after a time-span has been rolled up (e.g. there is no provision to update already-existing rollups). + + Instead, you should specify a `delay` that matches the longest period of time you expect out-of-order data to arrive. E.g. a `delay` of + `"1d"` will instruct the indexer to roll up documents up to `"now - 1d"`, which provides a day of buffer time for out-of-order documents + to arrive. + +`time_zone`:: + Defines what time_zone the rollup documents are stored as. Unlike raw data, which can shift timezones on the fly, rolled documents have + to be stored with a specific timezone. By default, rollup documents are stored in `UTC`, but this can be changed with the `time_zone` + parameter. + +===== Terms + +The `terms` group can be used on `keyword` or numeric fields, to allow bucketing via the `terms` aggregation at a later point. The `terms` +group is optional. If defined, the indexer will enumerate and store _all_ values of a field for each time-period. This can be potentially +costly for high-cardinality groups such as IP addresses, especially if the time-bucket is particularly sparse. + +While it is unlikely that a rollup will ever be larger in size than the raw data, defining `terms` groups on multiple high-cardinality fields +can effectively reduce the compression of a rollup to a large extent. You should be judicious which high-cardinality fields are included +for that reason. + +The `terms` group has a single parameter: + +`fields` (required):: + The set of fields that you wish to collect terms for. This array can contain fields that are both `keyword` and numerics. Order + does not matter + + +===== Histogram + +The `histogram` group aggregates one or more numeric fields into numeric histogram intervals. This group is optional + + +The `histogram` group has a two parameters: + +`fields` (required):: + The set of fields that you wish to build histograms for. All fields specified must be some kind of numeric. Order does not matter + +`interval` (required):: + The interval of histogram buckets to be generated when rolling up. E.g. `5` will create buckets that are five units wide + (`0-5`, `5-10`, etc). Note that only one interval can be specified in the `histogram` group, meaning that all fields being grouped via + the histogram must share the same interval. + +[[rollup-metrics-config]] +==== Metrics Config + +After defining which groups should be generated for the data, you next configure which metrics should be collected. By default, only +the doc_counts are collected for each group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. + +Metrics are defined on a per-field basis, and for each field you configure which metric should be collected. For example: + +[source,js] +-------------------------------------------------- +"metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } +] +-------------------------------------------------- +// NOTCONSOLE + +This configuration defines metrics over two fields, `"temperature` and `"voltage"`. For the `"temperature"` field, we are collecting +the min, max and sum of the temperature. For `"voltage"`, we are collecting the average. These metrics are collected in a way that makes +them compatible with any combination of defined groups. + +The `metrics` configuration accepts an array of objects, where each object has two parameters: + +`field` (required):: + The field to collect metrics for. This must be a numeric of some kind + +`metrics` (required):: + An array of metrics to collect for the field. At least one metric must be configured. Acceptable metrics are min/max/sum/avg/value_count. + + + +.Averages aren't composable?! +********************************** +If you've worked with rollups before, you may be cautious around averages. If an average is saved for a 10 minute +interval, it usually isn't useful for larger intervals. You cannot average six 10-minute averages to find a +hourly average (average of averages is not equal to the total average). + +For this reason, other systems tend to either omit the ability to average, or store the average at multiple intervals +to support more flexible querying. + +Instead, the Rollup feature saves the `count` and `sum` for the defined time interval. This allows us to reconstruct +the average at any interval greater-than or equal to the defined interval. This gives maximum flexibility for +minimal storage costs... and you don't have to worry about average accuracies (no average of averages here!) +********************************** + diff --git a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc b/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc new file mode 100644 index 0000000000000..557953fefb231 --- /dev/null +++ b/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc @@ -0,0 +1,228 @@ +[role="xpack"] +[[rollup-search]] +=== Rollup Search +++++ +Rollup Search +++++ + +The Rollup Search endpoint allows searching rolled-up data using the standard query DSL. The Rollup Search endpoint +is needed because, internally, rolled-up documents utilize a different document structure than the original data. The +Rollup Search endpoint rewrites standard query DSL into a format that matches the rollup documents, then takes the response +and rewrites it back to what a client would expect given the original query. + +==== Request + +`GET {index}/_rollup_search` + +//===== Description + +==== Path Parameters + +`index`:: + (string) Index, indices or index-pattern to execute a rollup search against. This can include both rollup and non-rollup + indices. + +Rules for the `index` parameter: +- At least one index/index-pattern must be specified. This can be either a rollup or non-rollup index. Omitting the index parameter, +or using `_all`, is not permitted +- Multiple non-rollup indices may be specified +- Only one rollup index may be specified. If more than one are supplied an exception will be thrown + +==== Request Body + +The request body supports a subset of features from the regular Search API. It supports: + +- `query` param for specifying an DSL query, subject to some limitations +- `aggregations` param for specifying aggregations + +Functionality that is not available: + +- `size`: because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or +omitted entirely. +- `highlighter`, `suggestors`, `post_filter`, `profile`, `explain` are similarly disallowed + + +==== Historical-only search example + +Imagine we have an index named `sensor-1` full of raw data, and we have created a rollup job with the following configuration: + +[source,js] +-------------------------------------------------- +PUT _xpack/rollup/job/sensor +{ + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_index] + +This rolls up the `sensor-*` pattern and stores the results in `sensor_rollup`. To search this rolled up data, we +need to use the `_rollup_search` endpoint. However, you'll notice that we can use regular query DSL to search the +rolled-up data: + +[source,js] +-------------------------------------------------- +GET /sensor_rollup/_rollup_search +{ + "size": 0, + "aggregations": { + "max_temperature": { + "max": { + "field": "temperature" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_prefab_data] + +The query is targeting the `sensor_rollup` data, since this contains the rollup data as configured in the job. A `max` +aggregation has been used on the `temperature` field, yielding the following response: + +[source,js] +---- +{ + "took" : 102, + "timed_out" : false, + "terminated_early" : false, + "_shards" : ... , + "hits" : { + "total" : 0, + "max_score" : 0.0, + "hits" : [ ] + }, + "aggregations" : { + "max_temperature" : { + "value" : 202.0 + } + } +} +---- +// TESTRESPONSE[s/"took" : 102/"took" : $body.$_path/] +// TESTRESPONSE[s/"_shards" : \.\.\. /"_shards" : $body.$_path/] + +The response is exactly as you'd expect from a regular query + aggregation; it provides some metadata about the request +(`took`, `_shards`, etc), the search hits (which is always empty for rollup searches), and the aggregation response. + +Rollup searches are limited to functionality that was configured in the rollup job. For example, we are not able to calculate +the average temperature because `avg` was not one of the configured metrics for the `temperature` field. If we try +to execute that search: + +[source,js] +-------------------------------------------------- +GET sensor_rollup/_rollup_search +{ + "size": 0, + "aggregations": { + "avg_temperature": { + "avg": { + "field": "temperature" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] +// TEST[catch:/illegal_argument_exception/] + +[source,js] +---- +{ + "error" : { + "root_cause" : [ + { + "type" : "illegal_argument_exception", + "reason" : "There is not a rollup job that has a [avg] agg with name [avg_temperature] which also satisfies all requirements of query.", + "stack_trace": ... + } + ], + "type" : "illegal_argument_exception", + "reason" : "There is not a rollup job that has a [avg] agg with name [avg_temperature] which also satisfies all requirements of query.", + "stack_trace": ... + }, + "status": 400 +} +---- +// TESTRESPONSE[s/"stack_trace": \.\.\./"stack_trace": $body.$_path/] + +==== Searching both historical rollup and non-rollup data + +The Rollup Search API has the capability to search across both "live", non-rollup data as well as the aggregated rollup +data. This is done by simply adding the live indices to the URI: + + +[source,js] +-------------------------------------------------- +GET sensor-1,sensor_rollup/_rollup_search <1> +{ + "size": 0, + "aggregations": { + "max_temperature": { + "max": { + "field": "temperature" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] +<1> Note the URI now searches `sensor-1` and `sensor_rollup` at the same time + +When the search is executed, the Rollup Search endpoint will do two things: + +1. The original request will be sent to the non-rollup index unaltered +2. A rewritten version of the original request will be sent to the rollup index. + +When the two responses are received, the endpoint will then rewrite the rollup response and merge the two together. +During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup +index will be used. + +The response to the above query will look as expected, despite spanning rollup and non-rollup indices: + +[source,js] +---- +{ + "took" : 102, + "timed_out" : false, + "terminated_early" : false, + "_shards" : ... , + "hits" : { + "total" : 0, + "max_score" : 0.0, + "hits" : [ ] + }, + "aggregations" : { + "max_temperature" : { + "value" : 202.0 + } + } +} +---- +// TESTRESPONSE[s/"took" : 102/"took" : $body.$_path/] +// TESTRESPONSE[s/"_shards" : \.\.\. /"_shards" : $body.$_path/] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/rollup/start-job.asciidoc b/x-pack/docs/en/rest-api/rollup/start-job.asciidoc new file mode 100644 index 0000000000000..b8eccd5fbce82 --- /dev/null +++ b/x-pack/docs/en/rest-api/rollup/start-job.asciidoc @@ -0,0 +1,83 @@ +[role="xpack"] +[[rollup-start-job]] +=== Start Job API +++++ +Start Job +++++ + +This API starts an existing, stopped rollup job. If the job does not exist an exception will be thrown. +Starting an already started job has no action. + +==== Request + +`POST _xpack/rollup/job//_start` + +//===== Description + +==== Path Parameters + +`job_id` (required):: + (string) Identifier for the job + + +==== Request Body + +There is no request body for the Start Job API. + +==== Authorization + +You must have `manage` or `manage_rollup` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + +==== Examples + +If we have already created a rollup job named `sensor`, it can be started with: + +[source,js] +-------------------------------------------------- +POST _xpack/rollup/job/sensor/_start +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_rollup_job] + +Which will return the response: + +[source,js] +---- +{ + "started": true +} +---- +// TESTRESPONSE + +If however we try to start a job which doesn't exist: + +[source,js] +-------------------------------------------------- +POST _xpack/rollup/job/does_not_exist/_start +-------------------------------------------------- +// CONSOLE +// TEST[catch:missing] + +A 404 `resource_not_found` exception will be thrown: + +[source,js] +---- +{ + "error" : { + "root_cause" : [ + { + "type" : "resource_not_found_exception", + "reason" : "Task for Rollup Job [does_not_exist] not found", + "stack_trace": ... + } + ], + "type" : "resource_not_found_exception", + "reason" : "Task for Rollup Job [does_not_exist] not found", + "stack_trace": ... + }, + "status": 404 +} +---- +// TESTRESPONSE[s/"stack_trace": \.\.\./"stack_trace": $body.$_path/] diff --git a/x-pack/docs/en/rest-api/rollup/stop-job.asciidoc b/x-pack/docs/en/rest-api/rollup/stop-job.asciidoc new file mode 100644 index 0000000000000..9da3872a10b00 --- /dev/null +++ b/x-pack/docs/en/rest-api/rollup/stop-job.asciidoc @@ -0,0 +1,84 @@ +[role="xpack"] +[[rollup-stop-job]] +=== Stop Job API +++++ +Stop Job +++++ + +This API stops an existing, started rollup job. If the job does not exist an exception will be thrown. +Stopping an already stopped job has no action. + +==== Request + +`POST _xpack/rollup/job//_stop` + +//===== Description + +==== Path Parameters + +`job_id` (required):: + (string) Identifier for the job + + +==== Request Body + +There is no request body for the Stop Job API. + +==== Authorization + +You must have `manage` or `manage_rollup` cluster privileges to use this API. +For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +If we have an already-started rollup job named `sensor`, it can be stopped with: + +[source,js] +-------------------------------------------------- +POST _xpack/rollup/job/sensor/_stop +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_started_rollup_job] + +Which will return the response: + +[source,js] +---- +{ + "stopped": true +} +---- +// TESTRESPONSE + +If however we try to stop a job which doesn't exist: + +[source,js] +-------------------------------------------------- +POST _xpack/rollup/job/does_not_exist/_stop +-------------------------------------------------- +// CONSOLE +// TEST[catch:missing] + +A 404 `resource_not_found` exception will be thrown: + +[source,js] +---- +{ + "error" : { + "root_cause" : [ + { + "type" : "resource_not_found_exception", + "reason" : "Task for Rollup Job [does_not_exist] not found", + "stack_trace": ... + } + ], + "type" : "resource_not_found_exception", + "reason" : "Task for Rollup Job [does_not_exist] not found", + "stack_trace": ... + }, + "status": 404 +} +---- +// TESTRESPONSE[s/"stack_trace": .../"stack_trace": $body.$_path/] diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc new file mode 100644 index 0000000000000..227e343192a50 --- /dev/null +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -0,0 +1,22 @@ +[role="xpack"] +[[security-api]] +== Security APIs + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +include::security/authenticate.asciidoc[] +include::security/change-password.asciidoc[] +include::security/clear-cache.asciidoc[] +include::security/privileges.asciidoc[] +include::security/roles.asciidoc[] +include::security/role-mapping.asciidoc[] +include::security/ssl.asciidoc[] +include::security/tokens.asciidoc[] +include::security/users.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/authenticate.asciidoc b/x-pack/docs/en/rest-api/security/authenticate.asciidoc new file mode 100644 index 0000000000000..ba837ddfd2c20 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/authenticate.asciidoc @@ -0,0 +1,46 @@ +[role="xpack"] +[[security-api-authenticate]] +=== Authenticate API + +The Authenticate API enables you to submit a request with a basic auth header to +authenticate a user and retrieve information about the authenticated user. + + +==== Request + +`GET _xpack/security/_authenticate` + + +==== Description + +A successful call returns a JSON structure that shows what roles are assigned +to the user as well as any assigned metadata. + +If the user cannot be authenticated, this API returns a 401 status code. + +==== Examples + +To authenticate a user, submit a GET request to the +`_xpack/security/_authenticate` endpoint: + +[source,js] +-------------------------------------------------- +GET _xpack/security/_authenticate +-------------------------------------------------- +// CONSOLE + +The following example output provides information about the "rdeniro" user: + +[source,js] +-------------------------------------------------- +{ + "username": "rdeniro", + "roles": [ + "admin", + "kibana4" + ], + "metadata" : { + "employee_id": "8675309" + } +} +-------------------------------------------------- diff --git a/x-pack/docs/en/rest-api/security/change-password.asciidoc b/x-pack/docs/en/rest-api/security/change-password.asciidoc new file mode 100644 index 0000000000000..7dee98480e72c --- /dev/null +++ b/x-pack/docs/en/rest-api/security/change-password.asciidoc @@ -0,0 +1,53 @@ +[role="xpack"] +[[security-api-change-password]] +=== Change Password API + +The Change Password API enables you to submit a request to change the password +of a user. + +==== Request + +`POST _xpack/security/user/_password` + + +`POST _xpack/security/user//_password` + + +==== Path Parameters + +`username`:: + (string) The user whose password you want to change. If you do not specify + this parameter, the password is changed for the current user. + + +==== Request Body + +`password` (required):: + (string) The new password value. + + +==== Authorization + +Every user can change their own password. Users with the `manage_security` +privilege can change passwords of other users. + + +==== Examples + +The following example updates the password for the `elastic` user: + +[source,js] +-------------------------------------------------- +POST _xpack/security/user/elastic/_password +{ + "password": "x-pack-test-password" +} +-------------------------------------------------- +// CONSOLE + +A successful call returns an empty JSON structure. + +[source,js] +-------------------------------------------------- +{} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/clear-cache.asciidoc b/x-pack/docs/en/rest-api/security/clear-cache.asciidoc new file mode 100644 index 0000000000000..03fac3ba2c44a --- /dev/null +++ b/x-pack/docs/en/rest-api/security/clear-cache.asciidoc @@ -0,0 +1,59 @@ +[role="xpack"] +[[security-api-clear-cache]] +=== Clear Cache API + +The Clear Cache API evicts users from the user cache. You can completely clear +the cache or evict specific users. + +==== Request + +`POST _xpack/security/realm//_clear_cache` + + +`POST _xpack/security/realm//_clear_cache?usernames=` + + +==== Description + +User credentials are cached in memory on each node to avoid connecting to a +remote authentication service or hitting the disk for every incoming request. +There are realm settings that you can use to configure the user cache. For more +information, see {xpack-ref}/controlling-user-cache.html[Controlling the User Cache]. + +To evict roles from the role cache, see the +<>. + +==== Path Parameters + +`realms` (required):: + (list) A comma-separated list of the realms to clear. + +`usernames`:: + (list) A comma-separated list of the users to clear from the cache. If you + do not specify this parameter, the API evicts all users from the user cache. + +==== Examples + +For example, to evict all users cached by the `file` realm: + +[source,js] +-------------------------------------------------- +POST _xpack/security/realm/default_file/_clear_cache +-------------------------------------------------- +// CONSOLE + +To evict selected users, specify the `usernames` parameter: + +[source,js] +-------------------------------------------------- +POST _xpack/security/realm/default_file/_clear_cache?usernames=rdeniro,alpacino +-------------------------------------------------- +// CONSOLE + +To clear the caches for multiple realms, specify the realms as a comma-delimited +list: + +[source, js] +------------------------------------------------------------ +POST _xpack/security/realm/default_file,ldap1/_clear_cache +------------------------------------------------------------ +// CONSOLE diff --git a/x-pack/docs/en/rest-api/security/privileges.asciidoc b/x-pack/docs/en/rest-api/security/privileges.asciidoc new file mode 100644 index 0000000000000..4ec192d633b12 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/privileges.asciidoc @@ -0,0 +1,91 @@ +[role="xpack"] +[[security-api-privileges]] +=== Privilege APIs + +[[security-api-has-privilege]] + +The `has_privileges` API allows you to determine whether the logged in user has +a specified list of privileges. + +==== Request + +`GET _xpack/security/user/_has_privileges` + + +==== Description + +For a list of the privileges that you can specify in this API, +see {xpack-ref}/security-privileges.html[Security Privileges]. + +A successful call returns a JSON structure that shows whether each specified +privilege is assigned to the user. + + +==== Request Body + +`cluster`:: (list) A list of the cluster privileges that you want to check. + +`index`:: +`names`::: (list) A list of indices. +`privileges`::: (list) A list of the privileges that you want to check for the +specified indices. + +==== Authorization + +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. For +more information, see +{xpack-ref}/run-as-privilege.html[Submitting Requests on Behalf of Other Users]. + + +==== Examples + +The following example checks whether the current user has a specific set of +cluster and indices privileges: + +[source,js] +-------------------------------------------------- +GET _xpack/security/user/_has_privileges +{ + "cluster": [ "monitor", "manage" ], + "index" : [ + { + "names": [ "suppliers", "products" ], + "privileges": [ "read" ] + }, + { + "names": [ "inventory" ], + "privileges" : [ "read", "write" ] + } + ] +} +-------------------------------------------------- +// CONSOLE + +The following example output indicates which privileges the "rdeniro" user has: + +[source,js] +-------------------------------------------------- +{ + "username": "rdeniro", + "has_all_requested" : false, + "cluster" : { + "monitor" : true, + "manage" : false + }, + "index" : { + "suppliers" : { + "read" : true + }, + "products" : { + "read" : true + }, + "inventory" : { + "read" : true, + "write" : false + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"rdeniro"/"$body.username"/] +// TESTRESPONSE[s/: false/: true/] diff --git a/x-pack/docs/en/rest-api/security/role-mapping.asciidoc b/x-pack/docs/en/rest-api/security/role-mapping.asciidoc new file mode 100644 index 0000000000000..3844e30c62dc0 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/role-mapping.asciidoc @@ -0,0 +1,404 @@ +[role="xpack"] +[[security-api-role-mapping]] +=== Role Mapping APIs + +The Role Mapping API enables you to add, remove, and retrieve role mappings. + +==== Request + +`GET /_xpack/security/role_mapping` + + +`GET /_xpack/security/role_mapping/` + + +`DELETE /_xpack/security/role_mapping/` + + +`POST /_xpack/security/role_mapping/` + + +`PUT /_xpack/security/role_mapping/` + +==== Description + +Role mappings have _rules_ that identify users and a list of _roles_ that are +granted to those users. + +NOTE: This API does not create roles. Rather, it maps users to existing roles. +Roles can be created by using <> or +{xpack-ref}/defining-roles.html#roles-management-file[roles files]. + +The role mapping rule is a logical condition that is expressed using a JSON DSL. +The DSL supports the following rule types: + +|======================= +| Type | Value Type (child) | Description + +| `any` | An array of rules | If *any* of its children are true, it + evaluates to `true`. +| `all` | An array of rules | If *all* of its children are true, it + evaluates to `true`. +| `field` | An object | See <> +| `except` | A single rule as an object | Only valid as a child of an `all` + rule. If its child is `false`, the + `except` is `true`. +|======================= + +[float] +[[mapping-roles-rule-field]] +===== The Field Rule + +The `field` rule is the primary building block for a role-mapping expression. +It takes a single object as its value and that object must contain a single +member with key _F_ and value _V_. The field rule looks up the value of _F_ +within the user object and then tests whether the user value _matches_ the +provided value _V_. + +The value specified in the field rule can be one of the following types: +[cols="2,5,3m"] +|======================= +| Type | Description | Example + +| Simple String | Exactly matches the provided value. | "esadmin" +| Wildcard String | Matches the provided value using a wildcard. | "*,dc=example,dc=com" +| Regular Expression | Matches the provided value using a + {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp]. | "/.\*-admin[0-9]*/" +| Number | Matches an equivalent numerical value. | 7 +| Null | Matches a null or missing value. | null +| Array | Tests each element in the array in + accordance with the above definitions. + If _any_ of elements match, the match is successful. | ["admin", "operator"] +|======================= + +===== User Fields + +The _user object_ against which rules are evaluated has the following fields: +[cols="1s,,,m"] +|======================= +| Name | Type | Description | Example + +| username | string | The username by which {security} knows this user. | `"username": "jsmith"` +| dn | string | The _Distinguished Name_ of the user. | `"dn": "cn=jsmith,ou=users,dc=example,dc=com",` +| groups | array-of-string | The groups to which the user belongs. | `"groups" : [ "cn=admin,ou=groups,dc=example,dc=com", +"cn=esusers,ou=groups,dc=example,dc=com ]` +| metadata | object | Additional metadata for the user. | `"metadata": { "cn": "John Smith" }` +| realm | object | The realm that authenticated the user. The only field in this object is the realm name. | `"realm": { "name": "ldap1" }` +|======================= + +The `groups` field is multi-valued; a user can belong to many groups. When a +`field` rule is applied against a multi-valued field, it is considered to match +if _at least one_ of the member values matches. For example, the following rule +matches any user who is a member of the `admin` group, regardless of any +other groups they belong to: + +[source, js] +------------------------------------------------------------ +{ "field" : { "groups" : "admin" } } +------------------------------------------------------------ +// NOTCONSOLE + +For additional realm-specific details, see +{xpack-ref}/mapping-roles.html#ldap-role-mapping[Mapping Users and Groups to Roles]. + + +==== Path Parameters + +`name`:: + (string) The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. If you do not specify this + parameter for the Get Role Mappings API, it returns information about all + role mappings. + + +==== Request Body + +The following parameters can be specified in the body of a PUT or POST request +and pertain to adding a role mapping: + +`enabled` (required):: +(boolean) Mappings that have `enabled` set to `false` are ignored when role +mapping is performed. + +`metadata`:: +(object) Additional metadata that helps define which roles are assigned to each +user. Within the `metadata` object, keys beginning with `_` are reserved for +system usage. + +`roles` (required):: +(list) A list of roles that are granted to the users that match the role-mapping +rules. + +`rules` (required):: +(object) The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. + + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +[[security-api-put-role-mapping]] +To add a role mapping, submit a PUT or POST request to the `/_xpack/security/role_mapping/` endpoint. The following example assigns +the "user" role to all users: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping1 +{ + "roles": [ "user"], + "enabled": true, <1> + "rules": { + "field" : { "username" : "*" } + }, + "metadata" : { <2> + "version" : 1 + } +} +------------------------------------------------------------ +// CONSOLE +<1> Mappings that have `enabled` set to `false` are ignored when role mapping + is performed. +<2> Metadata is optional. + +A successful call returns a JSON structure that shows whether the mapping has +been created or updated. + +[source,js] +-------------------------------------------------- +{ + "role_mapping" : { + "created" : true <1> + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing mapping is updated, `created` is set to false. + +The following example assigns the "user" and "admin" roles to specific users: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role_mapping/mapping2 +{ + "roles": [ "user", "admin" ], + "enabled": true, + "rules": { + "field" : { "username" : [ "esadmin01", "esadmin02" ] } + } +} +-------------------------------------------------- +// CONSOLE + +The following example matches any user where either the username is `esadmin` +or the user is in the `cn=admin,dc=example,dc=com` group: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping3 +{ + "roles": [ "superuser" ], + "enabled": true, + "rules": { + "any": [ + { + "field": { + "username": "esadmin" + } + }, + { + "field": { + "groups": "cn=admins,dc=example,dc=com" + } + } + ] + } +} +------------------------------------------------------------ +// CONSOLE + +The following example matches users who authenticated against a specific realm: +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping4 +{ + "roles": [ "ldap-user" ], + "enabled": true, + "rules": { + "field" : { "realm.name" : "ldap1" } + } +} +------------------------------------------------------------ +// CONSOLE + +The following example matches users within a specific LDAP sub-tree: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping5 +{ + "roles": [ "example-user" ], + "enabled": true, + "rules": { + "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } + } +} +------------------------------------------------------------ +// CONSOLE + +The following example matches users within a particular LDAP sub-tree in a +specific realm: + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping6 +{ + "roles": [ "ldap-example-user" ], + "enabled": true, + "rules": { + "all": [ + { "field" : { "dn" : "*,ou=subtree,dc=example,dc=com" } }, + { "field" : { "realm.name" : "ldap1" } } + ] + } +} +------------------------------------------------------------ +// CONSOLE + +The rules can be more complex and include wildcard matching. For example, the +following mapping matches any user where *all* of these conditions are met: + +- the _Distinguished Name_ matches the pattern `*,ou=admin,dc=example,dc=com`, + or the username is `es-admin`, or the username is `es-system` +- the user in in the `cn=people,dc=example,dc=com` group +- the user does not have a `terminated_date` + + +[source, js] +------------------------------------------------------------ +POST /_xpack/security/role_mapping/mapping7 +{ + "roles": [ "superuser" ], + "enabled": true, + "rules": { + "all": [ + { + "any": [ + { + "field": { + "dn": "*,ou=admin,dc=example,dc=com" + } + }, + { + "field": { + "username": [ "es-admin", "es-system" ] + } + } + ] + }, + { + "field": { + "groups": "cn=people,dc=example,dc=com" + } + }, + { + "except": { + "field": { + "metadata.terminated_date": null + } + } + } + ] + } +} +------------------------------------------------------------ +// CONSOLE + +[[security-api-get-role-mapping]] +To retrieve a role mapping, issue a GET request to the +`/_xpack/security/role_mapping/` endpoint: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/role_mapping/mapping7 +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +A successful call retrieves an object, where the keys are the +names of the request mappings, and the values are +the JSON representation of those mappings. +If there is no mapping with the requested name, the +response will have status code `404`. + +[source,js] +-------------------------------------------------- +{ + "mapping7": { + "enabled": true, + "roles": [ + "superuser" + ], + "rules": { + "all": [ + { + "any": [ + { + "field": { + "dn": "*,ou=admin,dc=example,dc=com" + } + }, + { + "field": { + "username": [ + "es-admin", + "es-system" + ] + } + } + ] + }, + { + "field": { + "groups": "cn=people,dc=example,dc=com" + } + }, + { + "except": { + "field": { + "metadata.terminated_date": null + } + } + } + ] + }, + "metadata": {} + } +} +-------------------------------------------------- +// TESTRESPONSE + +You can specify multiple mapping names as a comma-separated list. +To retrieve all mappings, omit the name entirely. + +[[security-api-delete-role-mapping]] +To delete a role mapping, submit a DELETE request to the +`/_xpack/security/role_mapping/` endpoint: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/role_mapping/mapping1 +-------------------------------------------------- +// CONSOLE +// TEST[setup:role_mapping] + +If the mapping is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "found" : true +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/security/roles.asciidoc b/x-pack/docs/en/rest-api/security/roles.asciidoc new file mode 100644 index 0000000000000..d82c260006237 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/roles.asciidoc @@ -0,0 +1,202 @@ +[role="xpack"] +[[security-api-roles]] +=== Role Management APIs + +The Roles API enables you to add, remove, and retrieve roles in the `native` +realm. + +==== Request + +`GET /_xpack/security/role` + + +`GET /_xpack/security/role/` + + +`POST /_xpack/security/role//_clear_cache` + + +`POST /_xpack/security/role/` + + +`PUT /_xpack/security/role/` + + +==== Description + +The Roles API is generally the preferred way to manage roles, rather than using +file-based role management. For more information, see +{xpack-ref}/authorization.html[Configuring Role-based Access Control]. + + +==== Path Parameters + +`name`:: + (string) The name of the role. If you do not specify this parameter, the + Get Roles API returns information about all roles. + + +==== Request Body + +The following parameters can be specified in the body of a PUT or POST request +and pertain to adding a role: + +`cluster`:: (list) A list of cluster privileges. These privileges define the +cluster level actions that users with this role are able to execute. + +`indices`:: (list) A list of indices permissions entries. +`field_security`::: (list) The document fields that the owners of the role have +read access to. For more information, see +{xpack-ref}/field-and-document-access-control.html[Setting Up Field and Document Level Security]. +`names` (required)::: (list) A list of indices (or index name patterns) to which the +permissions in this entry apply. +`privileges`(required)::: (list) The index level privileges that the owners of the role +have on the specified indices. +`query`::: A search query that defines the documents the owners of the role have +read access to. A document within the specified indices must match this query in +order for it to be accessible by the owners of the role. + +`metadata`:: (object) Optional meta-data. Within the `metadata` object, keys +that begin with `_` are reserved for system usage. + +`run_as`:: (list) A list of users that the owners of this role can impersonate. +For more information, see +{xpack-ref}/run-as-privilege.html[Submitting Requests on Behalf of Other Users]. + +For more information, see {xpack-ref}/defining-roles.html[Defining Roles]. + + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster +privilege. + + +==== Examples + +[[security-api-put-role]] +To add a role, submit a PUT or POST request to the `/_xpack/security/role/` +endpoint: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role/my_admin_role +{ + "cluster": ["all"], + "indices": [ + { + "names": [ "index1", "index2" ], + "privileges": ["all"], + "field_security" : { // optional + "grant" : [ "title", "body" ] + }, + "query": "{\"match\": {\"title\": \"foo\"}}" // optional + } + ], + "run_as": [ "other_user" ], // optional + "metadata" : { // optional + "version" : 1 + } +} +-------------------------------------------------- +// CONSOLE + +A successful call returns a JSON structure that shows whether the role has been +created or updated. + +[source,js] +-------------------------------------------------- +{ + "role": { + "created": true <1> + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing role is updated, `created` is set to false. + +[[security-api-get-role]] +To retrieve a role from the `native` Security realm, issue a GET request to the +`/_xpack/security/role/` endpoint: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/role/my_admin_role +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +A successful call returns an array of roles with the JSON representation of the +role. If the role is not defined in the `native` realm, the request 404s. + +[source,js] +-------------------------------------------------- +{ + "my_admin_role": { + "cluster" : [ "all" ], + "indices" : [ { + "names" : [ "index1", "index2" ], + "privileges" : [ "all" ], + "field_security" : { + "grant" : [ "title", "body" ] + }, + "query" : "{\"match\": {\"title\": \"foo\"}}" + } ], + "run_as" : [ "other_user" ], + "metadata" : { + "version" : 1 + }, + "transient_metadata": { + "enabled": true + } + } +} +-------------------------------------------------- +// TESTRESPONSE + +You can specify multiple roles as a comma-separated list. To retrieve all roles, +omit the role name. + +[source,js] +-------------------------------------------------- +# Retrieve roles "r1", "r2", and "my_admin_role" +GET /_xpack/security/role/r1,r2,my_admin_role + +# Retrieve all roles +GET /_xpack/security/role +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +NOTE: If single role is requested, that role is returned as the response. When +requesting multiple roles, an object is returned holding the found roles, each +keyed by the relevant role name. + +[[security-api-delete-role]] +To delete a role, submit a DELETE request to the `/_xpack/security/role/` +endpoint: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/role/my_admin_role +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the role is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "found" : true +} +-------------------------------------------------- +// TESTRESPONSE + +[[security-api-clear-role-cache]] +The Clear Roles Cache API evicts roles from the native role cache. To clear the +cache for a role, submit a POST request `/_xpack/security/role//_clear_cache` +endpoint: + +[source,js] +-------------------------------------------------- +POST /_xpack/security/role/my_admin_role/_clear_cache +-------------------------------------------------- +// CONSOLE diff --git a/x-pack/docs/en/rest-api/security/ssl.asciidoc b/x-pack/docs/en/rest-api/security/ssl.asciidoc new file mode 100644 index 0000000000000..f7a40c6d87607 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/ssl.asciidoc @@ -0,0 +1,111 @@ +[role="xpack"] +[[security-api-ssl]] +=== SSL Certificate API + +The `certificates` API enables you to retrieve information about the X.509 +certificates that are used to encrypt communications in your {es} cluster. + +==== Request + +`GET /_xpack/ssl/certificates` + + +==== Description + +For more information about how certificates are configured in conjunction with +Transport Layer Security (TLS), see +{xpack-ref}/ssl-tls.html[Setting up SSL/TLS on a cluster]. + +The API returns a list that includes certificates from all TLS contexts +including: + +* {xpack} default TLS settings +* Settings for transport and HTTP interfaces +* TLS settings that are used within authentication realms +* TLS settings for remote monitoring exporters + +The list includes certificates that are used for configuring trust, such as +those configured in the `xpack.ssl.truststore` and +`xpack.ssl.certificate_authorities` settings. It also includes certificates that +that are used for configuring server identity, such as `xpack.ssl.keystore` and +`xpack.ssl.certificate` settings. + +The list does not include certificates that are sourced from the default SSL +context of the Java Runtime Environment (JRE), even if those certificates are in +use within {xpack}. + +If {xpack} is configured to use a keystore or truststore, the API output +includes all certificates in that store, even though some of the certificates +might not be in active use within the cluster. + + +==== Results + +The response is an array of objects, with each object representing a +single certificate. The fields in each object are: + +`path`:: (string) The path to the certificate, as configured in the +`elasticsearch.yml` file. +`format`:: (string) The format of the file. One of: `jks`, `PKCS12`, `PEM`. +`alias`:: (string) If the path refers to a container file (a jks keystore, or a + PKCS#12 file), the alias of the certificate. Otherwise, null. +`subject_dn`:: (string) The Distinguished Name of the certificate's subject. +`serial_number`:: (string) The hexadecimal representation of the certificate's +serial number. +`has_private_key`:: (boolean) If {xpack} has access to the private key for this +certificate, this field has a value of `true`. +`expiry`:: (string) The ISO formatted date of the certificate's expiry +(not-after) date. + +==== Authorization + +If {security} is enabled, you must have `monitor` cluster privileges to use this +API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + + +==== Examples + +The following example provides information about the certificates on a single +node of {es}: + +[source,js] +-------------------------------------------------- +GET /_xpack/ssl/certificates +-------------------------------------------------- +// CONSOLE +// TEST[skip:todo] + +The API returns the following results: +[source,js] +---- +[ + { + "path": "certs/elastic-certificates.p12", + "format": "PKCS12", + "alias": "instance", + "subject_dn": "CN=Elastic Certificate Tool Autogenerated CA", + "serial_number": "a20f0ee901e8f69dc633ff633e5cd5437cdb4137", + "has_private_key": false, + "expiry": "2021-01-15T20:42:49.000Z" + }, + { + "path": "certs/elastic-certificates.p12", + "format": "PKCS12", + "alias": "ca", + "subject_dn": "CN=Elastic Certificate Tool Autogenerated CA", + "serial_number": "a20f0ee901e8f69dc633ff633e5cd5437cdb4137", + "has_private_key": false, + "expiry": "2021-01-15T20:42:49.000Z" + }, + { + "path": "certs/elastic-certificates.p12", + "format": "PKCS12", + "alias": "instance", + "subject_dn": "CN=instance", + "serial_number": "fc1905e1494dc5230218d079c47a617088f84ce0", + "has_private_key": true, + "expiry": "2021-01-15T20:44:32.000Z" + } +] +---- diff --git a/x-pack/docs/en/rest-api/security/tokens.asciidoc b/x-pack/docs/en/rest-api/security/tokens.asciidoc new file mode 100644 index 0000000000000..70f255ead37c0 --- /dev/null +++ b/x-pack/docs/en/rest-api/security/tokens.asciidoc @@ -0,0 +1,158 @@ +[role="xpack"] +[[security-api-tokens]] +=== Token Management APIs + +The `token` API enables you to create and invalidate bearer tokens for access +without requiring basic authentication. + +==== Request + +`POST /_xpack/security/oauth2/token` + + +`DELETE /_xpack/security/oauth2/token` + +==== Description + +The tokens are created by the {es} Token Service, which is automatically enabled +when you configure TLS on the HTTP interface. See <>. Alternatively, +you can explicitly enable the `xpack.security.authc.token.enabled` setting. When +you are running in production mode, a bootstrap check prevents you from enabling +the token service unless you also enable TLS on the HTTP interface. + +The Get Token API takes the same parameters as a typical OAuth 2.0 token API +except for the use of a JSON request body. + +A successful Get Token API call returns a JSON structure that contains the access +token, the amount of time (seconds) that the token expires in, the type, and the +scope if available. + +The tokens returned by the Get Token API have a finite period of time for which +they are valid and after that time period, they can no longer be used. That time +period is defined by the `xpack.security.authc.token.timeout` setting. For more +information, see <>. + +If you want to invalidate a token immediately, you can do so by using the Delete +Token API. + + +==== Request Body + +The following parameters can be specified in the body of a POST request and +pertain to creating a token: + +`grant_type`:: +(string) The type of grant. Currently only the `password` grant type is supported. + +`password` (required):: +(string) The user's password. + +`scope`:: +(string) The scope of the token. Currently tokens are only issued for a scope of +`FULL` regardless of the value sent with the request. + +`username` (required):: +(string) The username that identifies the user. + +The following parameters can be specified in the body of a DELETE request and +pertain to deleting a token: + +`token`:: +(string) An access token. + +==== Examples +[[security-api-get-token]] +To obtain a token, submit a POST request to the `/_xpack/security/oauth2/token` +endpoint. + +[source,js] +-------------------------------------------------- +POST /_xpack/security/oauth2/token +{ + "grant_type" : "password", + "username" : "test_admin", + "password" : "x-pack-test-password" +} +-------------------------------------------------- +// CONSOLE + +The following example output contains the access token, the amount of time (in +seconds) that the token expires in, and the type: + +[source,js] +-------------------------------------------------- +{ + "access_token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + "type" : "Bearer", + "expires_in" : 1200, + "refresh_token": "vLBPvmAB6KvwvJZr27cS" +} +-------------------------------------------------- +// TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] +// TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] + +The token returned by this API can be used by sending a request with a +`Authorization` header with a value having the prefix `Bearer ` followed +by the value of the `access_token`. + +[source,shell] +-------------------------------------------------- +curl -H "Authorization: Bearer dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==" http://localhost:9200/_cluster/health +-------------------------------------------------- + +[[security-api-refresh-token]] +To extend the life of an existing token, the token api may be called again with the refresh +token within 24 hours of the token's creation. + +[source,js] +-------------------------------------------------- +POST /_xpack/security/oauth2/token +{ + "grant_type": "refresh_token", + "refresh_token": "vLBPvmAB6KvwvJZr27cS" +} +-------------------------------------------------- +// CONSOLE +// TEST[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] +// TEST[continued] + +The API will return a new token and refresh token. Each refresh token may only be used one time. + +[source,js] +-------------------------------------------------- +{ + "access_token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + "type" : "Bearer", + "expires_in" : 1200, + "refresh_token": "vLBPvmAB6KvwvJZr27cS" +} +-------------------------------------------------- +// TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] +// TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/] + +[[security-api-invalidate-token]] +If a token must be invalidated immediately, you can do so by submitting a DELETE +request to `/_xpack/security/oauth2/token`. For example: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/oauth2/token +{ + "token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==" +} +-------------------------------------------------- +// CONSOLE +// TEST[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/] +// TEST[continued] + +A successful call returns a JSON structure that indicates whether the token +has already been invalidated. + +[source,js] +-------------------------------------------------- +{ + "created" : true <1> +} +-------------------------------------------------- +// TESTRESPONSE + +<1> When a token has already been invalidated, `created` is set to false. diff --git a/x-pack/docs/en/rest-api/security/users.asciidoc b/x-pack/docs/en/rest-api/security/users.asciidoc new file mode 100644 index 0000000000000..926193481afbc --- /dev/null +++ b/x-pack/docs/en/rest-api/security/users.asciidoc @@ -0,0 +1,225 @@ +[role="xpack"] +[[security-api-users]] +=== User Management APIs + +The `user` API enables you to create, read, update, and delete users from the +`native` realm. These users are commonly referred to as *native users*. + + +==== Request + +`GET /_xpack/security/user` + + +`GET /_xpack/security/user/` + + +`DELETE /_xpack/security/user/` + + +`POST /_xpack/security/user/` + + +`PUT /_xpack/security/user/` + + +`PUT /_xpack/security/user//_disable` + + +`PUT /_xpack/security/user//_enable` + + +`PUT /_xpack/security/user//_password` + + +==== Description + +You can use the PUT user API to create or update users. When updating a user, +you can update everything but its `username` and `password`. To change a user's +password, use the <>. + +[[username-validation]] +NOTE: Usernames must be at least 1 and no more than 1024 characters. They can +contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, punctuation, and +printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. +Leading or trailing whitespace is not allowed. + +==== Path Parameters + +`username`:: + (string) An identifier for the user. If you omit this parameter from a Get + User API request, it retrieves information about all users. + + +==== Request Body + +The following parameters can be specified in the body of a POST or PUT request +and pertain to creating a user: + +`enabled`:: +(boolean) Specifies whether the user is enabled. The default value is `true`. + +`email`:: +(string) The email of the user. + +`full_name`:: +(string) The full name of the user. + +`metadata`:: +(object) Arbitrary metadata that you want to associate with the user. + +`password` (required):: +(string) The user's password. Passwords must be at least 6 characters long. + +`roles` (required):: +(list) A set of roles the user has. The roles determine the user's access +permissions. To create a user without any roles, specify an empty list: `[]`. + +==== Authorization + +To use this API, you must have at least the `manage_security` cluster privilege. + + +==== Examples + +[[security-api-put-user]] +To add a user, submit a PUT or POST request to the `/_xpack/security/user/` +endpoint. + +[source,js] +-------------------------------------------------- +POST /_xpack/security/user/jacknich +{ + "password" : "j@rV1s", + "roles" : [ "admin", "other_role1" ], + "full_name" : "Jack Nicholson", + "email" : "jacknich@example.com", + "metadata" : { + "intelligence" : 7 + } +} +-------------------------------------------------- +// CONSOLE + +A successful call returns a JSON structure that shows whether the user has been +created or updated. + +[source,js] +-------------------------------------------------- +{ + "user": { + "created" : true <1> + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> When an existing user is updated, `created` is set to false. + +After you add a user through the Users API, requests from that user can be +authenticated. For example: + +[source,shell] +-------------------------------------------------- +curl -u jacknich:j@rV1s http://localhost:9200/_cluster/health +-------------------------------------------------- + +[[security-api-get-user]] +To retrieve a native user, submit a GET request to the `/_xpack/security/user/` +endpoint: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/user/jacknich +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +A successful call returns an array of users with the JSON representation of the +user. Note that user passwords are not included. + +[source,js] +-------------------------------------------------- +{ + "jacknich": { <1> + "username" : "jacknich", + "roles" : [ "admin", "other_role1" ], + "full_name" : "Jack Nicholson", + "email" : "jacknich@example.com", + "enabled": true, + "metadata" : { + "intelligence" : 7 + } + } +} +-------------------------------------------------- +// TESTRESPONSE +<1> If the user is not defined in the `native` realm, the request 404s. + +You can specify multiple usernames as a comma-separated list: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/user/jacknich,rdinero +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Omit the username to retrieve all users: + +[source,js] +-------------------------------------------------- +GET /_xpack/security/user +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[[security-api-reset-user-password]] +To reset the password for a user, submit a PUT request to the +`/_xpack/security/user//_password` endpoint: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/user/jacknich/_password +{ + "password" : "s3cr3t" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[[security-api-disable-user]] +To disable a user, submit a PUT request to the +`/_xpack/security/user//_disable` endpoint: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/user/jacknich/_disable +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[[security-api-enable-user]] +To enable a user, submit a PUT request to the +`/_xpack/security/user//_enable` endpoint: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/user/jacknich/_enable +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[[security-api-delete-user]] +To delete a user, submit a DELETE request to the `/_xpack/security/user/` +endpoint: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/user/jacknich +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the user is successfully deleted, the request returns `{"found": true}`. +Otherwise, `found` is set to false. + +[source,js] +-------------------------------------------------- +{ + "found" : true +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/watcher.asciidoc b/x-pack/docs/en/rest-api/watcher.asciidoc new file mode 100644 index 0000000000000..2fcb9b9ca190b --- /dev/null +++ b/x-pack/docs/en/rest-api/watcher.asciidoc @@ -0,0 +1,25 @@ +[role="xpack"] +[[watcher-api]] +== Watcher APIs + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +include::watcher/put-watch.asciidoc[] +include::watcher/get-watch.asciidoc[] +include::watcher/delete-watch.asciidoc[] +include::watcher/execute-watch.asciidoc[] +include::watcher/ack-watch.asciidoc[] +include::watcher/activate-watch.asciidoc[] +include::watcher/deactivate-watch.asciidoc[] +include::watcher/stats.asciidoc[] +include::watcher/stop.asciidoc[] +include::watcher/start.asciidoc[] diff --git a/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc new file mode 100644 index 0000000000000..f599f9f14459d --- /dev/null +++ b/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc @@ -0,0 +1,283 @@ +[role="xpack"] +[[watcher-api-ack-watch]] +=== Ack Watch API + +{xpack-ref}/actions.html#actions-ack-throttle[Acknowledging a watch] enables you +to manually throttle execution of the watch's actions. An action's +_acknowledgement state_ is stored in the `status.actions..ack.state` +structure. + +IMPORTANT: If the specified watch is currently being executed, this API will return +an error. The reason for this is to prevent overwriting of the watch status from a watch +execution. + +[float] +==== Request + +`PUT _xpack/watcher/watch//_ack` + + +`PUT _xpack/watcher/watch//_ack/` + +[float] +==== Path Parameters + +`action_id`:: + (list) A comma-separated list of the action IDs to acknowledge. If you omit + this parameter, all of the actions of the watch are acknowledged. + +`watch_id` (required):: + (string) Identifier for the watch. + +[float] +==== Authorization + +You must have `manage_watcher` cluster privileges to use this API. For more +information, see {xpack-ref}/security-privileges.html[Security Privileges]. + + +[float] +==== Examples + +To demonstrate let's create a new watch: + +[source,js] +-------------------------------------------------- +PUT _xpack/watcher/watch/my_watch +{ + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "throttle_period": "15m", + "index": { + "index": "test", + "doc_type": "test2" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTSETUP + +The current status of a watch and the state of its actions is returned with the +watch definition when you call the <>: + +[source,js] +-------------------------------------------------- +GET _xpack/watcher/watch/my_watch +-------------------------------------------------- +// CONSOLE + +The action state of a newly-created watch is `awaits_successful_execution`: + +[source,js] +-------------------------------------------------- +{ + "found": true, + "_version": 1, + "_id": "my_watch", + "status": { + "version": 1, + "actions": { + "test_index": { + "ack": { + "timestamp": "2015-05-26T18:04:27.723Z", + "state": "awaits_successful_execution" + } + } + }, + "state": ... + }, + "watch": ... +} +-------------------------------------------------- +// TESTRESPONSE[s/"state": \.\.\./"state": "$body.status.state"/] +// TESTRESPONSE[s/"watch": \.\.\./"watch": "$body.watch"/] +// TESTRESPONSE[s/"timestamp": "2015-05-26T18:04:27.723Z"/"timestamp": "$body.status.actions.test_index.ack.timestamp"/] + +When the watch executes and the condition matches, the value of the `ack.state` +changes to `ackable`. Let's force execution of the watch and fetch it again to +check the status: + +[source,js] +-------------------------------------------------- +POST _xpack/watcher/watch/my_watch/_execute +{ + "record_execution" : true +} + +GET _xpack/watcher/watch/my_watch +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +and the action is now in `ackable` state: + +[source,js] +-------------------------------------------------- +{ + "found": true, + "_id": "my_watch", + "_version": 2, + "status": { + "version": 2, + "actions": { + "test_index": { + "ack": { + "timestamp": "2015-05-26T18:04:27.723Z", + "state": "ackable" + }, + "last_execution" : { + "timestamp": "2015-05-25T18:04:27.723Z", + "successful": true + }, + "last_successful_execution" : { + "timestamp": "2015-05-25T18:04:27.723Z", + "successful": true + } + } + }, + "state": ..., + "execution_state": "executed", + "last_checked": ..., + "last_met_condition": ... + }, + "watch": ... +} +-------------------------------------------------- +// TESTRESPONSE[s/"state": \.\.\./"state": "$body.status.state"/] +// TESTRESPONSE[s/"watch": \.\.\./"watch": "$body.watch"/] +// TESTRESPONSE[s/"last_checked": \.\.\./"last_checked": "$body.status.last_checked"/] +// TESTRESPONSE[s/"last_met_condition": \.\.\./"last_met_condition": "$body.status.last_met_condition"/] +// TESTRESPONSE[s/"timestamp": "2015-05-26T18:04:27.723Z"/"timestamp": "$body.status.actions.test_index.ack.timestamp"/] +// TESTRESPONSE[s/"timestamp": "2015-05-25T18:04:27.723Z"/"timestamp": "$body.status.actions.test_index.last_execution.timestamp"/] + +Now we can acknowledge it: + +[source,js] +-------------------------------------------------- +PUT _xpack/watcher/watch/my_watch/_ack/test_index +GET _xpack/watcher/watch/my_watch +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "found": true, + "_id": "my_watch", + "_version": 3, + "status": { + "version": 3, + "actions": { + "test_index": { + "ack": { + "timestamp": "2015-05-26T18:04:27.723Z", + "state": "acked" + }, + "last_execution" : { + "timestamp": "2015-05-25T18:04:27.723Z", + "successful": true + }, + "last_successful_execution" : { + "timestamp": "2015-05-25T18:04:27.723Z", + "successful": true + } + } + }, + "state": ..., + "execution_state": "executed", + "last_checked": ..., + "last_met_condition": ... + }, + "watch": ... +} +-------------------------------------------------- +// TESTRESPONSE[s/"state": \.\.\./"state": "$body.status.state"/] +// TESTRESPONSE[s/"watch": \.\.\./"watch": "$body.watch"/] +// TESTRESPONSE[s/"last_checked": \.\.\./"last_checked": "$body.status.last_checked"/] +// TESTRESPONSE[s/"last_met_condition": \.\.\./"last_met_condition": "$body.status.last_met_condition"/] +// TESTRESPONSE[s/"timestamp": "2015-05-26T18:04:27.723Z"/"timestamp": "$body.status.actions.test_index.ack.timestamp"/] +// TESTRESPONSE[s/"timestamp": "2015-05-25T18:04:27.723Z"/"timestamp": "$body.status.actions.test_index.last_execution.timestamp"/] + +Acknowledging an action throttles further executions of that action until its +`ack.state` is reset to `awaits_successful_execution`. This happens when the +condition of the watch is not met (the condition evaluates to `false`). + +You can acknowledge multiple actions by assigning the `actions` parameter a +comma-separated list of action ids: + +[source,js] +-------------------------------------------------- +POST _xpack/watcher/watch/my_watch/_ack/action1,action2 +-------------------------------------------------- +// CONSOLE + +To acknowledge all of the actions of a watch, simply omit the `actions` +parameter: + +[source,js] +-------------------------------------------------- +POST _xpack/watcher/watch/my_watch/_ack +-------------------------------------------------- +// TEST[s/^/POST _xpack\/watcher\/watch\/my_watch\/_execute\n{ "record_execution" : true }\n/] +// CONSOLE + + +The response looks like a get watch response, but only contains the status: + +[source,js] +-------------------------------------------------- +{ + "status": { + "state": { + "active": true, + "timestamp": "2015-05-26T18:04:27.723Z" + }, + "last_checked": "2015-05-26T18:04:27.753Z", + "last_met_condition": "2015-05-26T18:04:27.763Z", + "actions": { + "test_index": { + "ack" : { + "timestamp": "2015-05-26T18:04:27.713Z", + "state": "acked" + }, + "last_execution" : { + "timestamp": "2015-05-25T18:04:27.733Z", + "successful": true + }, + "last_successful_execution" : { + "timestamp": "2015-05-25T18:04:27.773Z", + "successful": true + } + } + }, + "execution_state": "executed", + "version": 2 + } +} + +-------------------------------------------------- +// TESTRESPONSE[s/"last_checked": "2015-05-26T18:04:27.753Z"/"last_checked": "$body.status.last_checked"/] +// TESTRESPONSE[s/"last_met_condition": "2015-05-26T18:04:27.763Z"/"last_met_condition": "$body.status.last_met_condition"/] +// TESTRESPONSE[s/"timestamp": "2015-05-26T18:04:27.723Z"/"timestamp": "$body.status.state.timestamp"/] +// TESTRESPONSE[s/"timestamp": "2015-05-26T18:04:27.713Z"/"timestamp": "$body.status.actions.test_index.ack.timestamp"/] +// TESTRESPONSE[s/"timestamp": "2015-05-25T18:04:27.733Z"/"timestamp": "$body.status.actions.test_index.last_execution.timestamp"/] +// TESTRESPONSE[s/"timestamp": "2015-05-25T18:04:27.773Z"/"timestamp": "$body.status.actions.test_index.last_successful_execution.timestamp"/] diff --git a/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc new file mode 100644 index 0000000000000..e853998415e96 --- /dev/null +++ b/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc @@ -0,0 +1,87 @@ +[role="xpack"] +[[watcher-api-activate-watch]] +=== Activate Watch API + +A watch can be either +{xpack-ref}/how-watcher-works.html#watch-active-state[active or inactive]. This +API enables you to activate a currently inactive watch. + +[float] +==== Request + +`PUT _xpack/watcher/watch//_activate` + +[float] +==== Path Parameters + +`watch_id` (required):: + (string) Identifier for the watch. + +[float] +==== Authorization + +You must have `manage_watcher` cluster privileges to use this API. For more +information, see {xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Examples + +The status of an inactive watch is returned with the watch definition when you +call the <>: + +[source,js] +-------------------------------------------------- +GET _xpack/watcher/watch/my_watch +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_inactive_watch] + +[source,js] +-------------------------------------------------- +{ + "found": true, + "_id": "my_watch", + "_version": 1, + "status": { + "state" : { + "active" : false, + "timestamp" : "2015-08-20T12:21:32.734Z" + }, + "actions": ..., + "version": 1 + }, + "watch": ... +} +-------------------------------------------------- +// TESTRESPONSE[s/2015-08-20T12:21:32.734Z/$body.status.state.timestamp/] +// TESTRESPONSE[s/"actions": \.\.\./"actions": "$body.status.actions"/] +// TESTRESPONSE[s/"watch": \.\.\./"watch": "$body.watch"/] +// TESTRESPONSE[s/"version": 1/"version": $body.status.version/] + +You can activate the watch by executing the following API call: + +[source,js] +-------------------------------------------------- +PUT _xpack/watcher/watch/my_watch/_activate +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_inactive_watch] + +The new state of the watch is returned as part of its overall status: + +[source,js] +-------------------------------------------------- +{ + "status": { + "state" : { + "active" : true, + "timestamp" : "2015-09-04T08:39:46.816Z" + }, + "actions": ..., + "version": 1 + } +} +-------------------------------------------------- +// TESTRESPONSE[s/2015-09-04T08:39:46.816Z/$body.status.state.timestamp/] +// TESTRESPONSE[s/"actions": \.\.\./"actions": "$body.status.actions"/] +// TESTRESPONSE[s/"version": 1/"version": $body.status.version/] diff --git a/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc new file mode 100644 index 0000000000000..ab5a7200907e1 --- /dev/null +++ b/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc @@ -0,0 +1,86 @@ +[role="xpack"] +[[watcher-api-deactivate-watch]] +=== Deactivate Watch API + +A watch can be either +{xpack-ref}/how-watcher-works.html#watch-active-state[active or inactive]. This +API enables you to deactivate a currently active watch. + +[float] +==== Request + +`PUT _xpack/watcher/watch//_deactivate` + +[float] +==== Path Parameters + +`watch_id` (required):: + (string) Identifier for the watch. + +[float] +==== Authorization +You must have `manage_watcher` cluster privileges to use this API. For more +information, see {xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Examples + +The status of an active watch is returned with the watch definition when you +call the <>: + +[source,js] +-------------------------------------------------- +GET _xpack/watcher/watch/my_watch +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_active_watch] + +[source,js] +-------------------------------------------------- +{ + "found": true, + "_id": "my_watch", + "_version": 1, + "status": { + "state" : { + "active" : true, + "timestamp" : "2015-08-20T12:21:32.734Z" + }, + "actions": ..., + "version": 1 + }, + "watch": ... +} +-------------------------------------------------- +// TESTRESPONSE[s/2015-08-20T12:21:32.734Z/$body.status.state.timestamp/] +// TESTRESPONSE[s/"actions": \.\.\./"actions": "$body.status.actions"/] +// TESTRESPONSE[s/"watch": \.\.\./"watch": "$body.watch"/] +// TESTRESPONSE[s/"version": 1/"version": $body.status.version/] + +You can deactivate the watch by executing the following API call: + +[source,js] +-------------------------------------------------- +PUT _xpack/watcher/watch/my_watch/_deactivate +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_active_watch] + +The new state of the watch is returned as part of its overall status: + +[source,js] +-------------------------------------------------- +{ + "status": { + "state" : { + "active" : false, + "timestamp" : "2015-09-04T08:39:46.816Z" + }, + "actions": ..., + "version": 1 + } +} +-------------------------------------------------- +// TESTRESPONSE[s/2015-09-04T08:39:46.816Z/$body.status.state.timestamp/] +// TESTRESPONSE[s/"actions": \.\.\./"actions": "$body.status.actions"/] +// TESTRESPONSE[s/"version": 1/"version": $body.status.version/] diff --git a/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc new file mode 100644 index 0000000000000..bedc60897e723 --- /dev/null +++ b/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc @@ -0,0 +1,60 @@ +[role="xpack"] +[[watcher-api-delete-watch]] +=== Delete Watch API + +The DELETE watch API removes a watch from {watcher}. + +[float] +==== Request + +`DELETE _xpack/watcher/watch/` + +[float] +==== Description + +When the watch is removed, the document representing the watch in the `.watches` +index is gone and it will never be run again. + +Please note that deleting a watch **does not** delete any watch execution records +related to this watch from the watch history. + +IMPORTANT: Deleting a watch must be done via this API only. Do not delete the + watch directly from the `.watches` index using the Elasticsearch + DELETE Document API. When {security} is enabled, make sure no `write` + privileges are granted to anyone over the `.watches` index. + +[float] +==== Path Parameters + +`watch_id` (required):: + (string) Identifier for the watch. + +[float] +==== Authorization + +You must have `manage_watcher` cluster privileges to use this API. For more +information, see {xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Examples + +The following example deletes a watch with the `my-watch` id: + +[source,js] +-------------------------------------------------- +DELETE _xpack/watcher/watch/my_watch +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_active_watch] + +Response: + +[source,js] +-------------------------------------------------- +{ + "found": true, + "_id": "my_watch", + "_version": 2 +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc new file mode 100644 index 0000000000000..91cd89bca6d41 --- /dev/null +++ b/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc @@ -0,0 +1,387 @@ +[role="xpack"] +[[watcher-api-execute-watch]] +=== Execute Watch API + +The execute watch API forces the execution of a stored watch. It can be used to +force execution of the watch outside of its triggering logic, or to simulate the +watch execution for debugging purposes. + +[float] +==== Request + +`POST _xpack/watcher/watch//_execute` + + +`POST _xpack/watcher/watch/_execute` + +[float] +==== Description + +For testing and debugging purposes, you also have fine-grained control on how +the watch runs. You can execute the watch without executing all of its actions +or alternatively by simulating them. You can also force execution by ignoring +the watch condition and control whether a watch record would be written to the +watch history after execution. + +[float] +[[watcher-api-execute-inline-watch]] +===== Inline Watch Execution + +You can use the Execute API to execute watches that are not yet registered by +specifying the watch definition inline. This serves as great tool for testing +and debugging your watches prior to adding them to {watcher}. + +[float] +==== Path Parameters + +`watch_id`:: + (string) Identifier for the watch. + +[float] +==== Query Parameters + +`debug`:: + (boolean) Defines whether the watch runs in debug mode. The default value is + `false`. + +[float] +==== Request Body + +This API supports the following fields: + +[cols=",^,^,", options="header"] +|====== +| Name | Required | Default | Description + +| `trigger_data` | no | | This structure is parsed as the data of the trigger event + that will be used during the watch execution + +| `ignore_condition` | no | false | When set to `true`, the watch execution uses the + {xpack-ref}/condition-always.html[Always Condition]. + This can also be specified as a HTTP parameter. + +| `alternative_input` | no | null | When present, the watch uses this object as a payload + instead of executing its own input. + +| `action_modes` | no | null | Determines how to handle the watch actions as part of the + watch execution. See <> + for more information. + +| `record_execution` | no | false | When set to `true`, the watch record representing the watch + execution result is persisted to the `.watcher-history` + index for the current time. In addition, the status of the + watch is updated, possibly throttling subsequent executions. + This can also be specified as a HTTP parameter. + +| `watch` | no | null | When present, this + {xpack-ref}/how-watcher-works.html#watch-definition[watch] is used + instead of the one specified in the request. This watch is + not persisted to the index and record_execution cannot be set. +|====== + +[float] +[[watcher-api-execute-watch-action-mode]] +===== Action Execution Modes + +Action modes define how actions are handled during the watch execution. There +are five possible modes an action can be associated with: + +[options="header"] +|====== +| Name | Description + +| `simulate` | The action execution is simulated. Each action type + define its own simulation operation mode. For example, the + {xpack-ref}/actions-email.html[email] action creates + the email that would have been sent but does not actually + send it. In this mode, the action might be throttled if the + current state of the watch indicates it should be. + +| `force_simulate` | Similar to the the `simulate` mode, except the action is + not be throttled even if the current state of the watch + indicates it should be. + +| `execute` | Executes the action as it would have been executed if the + watch would have been triggered by its own trigger. The + execution might be throttled if the current state of the + watch indicates it should be. + +| `force_execute` | Similar to the `execute` mode, except the action is not + throttled even if the current state of the watch indicates + it should be. + +| `skip` | The action is skipped and is not executed or simulated. + Effectively forces the action to be throttled. +|====== + +[float] +==== Authorization +You must have `manage_watcher` cluster privileges to use this API. For more +information, see {xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Security Integration + +When {security} is enabled on your Elasticsearch cluster, then watches will be +executed with the privileges of the user that stored the watches. If your user +is allowed to read index `a`, but not index `b`, then the exact same set of +rules will apply during execution of a watch. + +When using the execute watch API, the authorization data of the user that +called the API will be used as a base, instead of of the information who stored +the watch. + +[float] +==== Examples + +The following example executes the `my_watch` watch: + +[source,js] +-------------------------------------------------- +POST _xpack/watcher/watch/my_watch/_execute +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_active_watch] + +The following example shows a comprehensive example of executing the `my-watch` watch: + +[source,js] +-------------------------------------------------- +POST _xpack/watcher/watch/my_watch/_execute +{ + "trigger_data" : { <1> + "triggered_time" : "now", + "scheduled_time" : "now" + }, + "alternative_input" : { <2> + "foo" : "bar" + }, + "ignore_condition" : true, <3> + "action_modes" : { + "my-action" : "force_simulate" <4> + }, + "record_execution" : true <5> +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_active_watch] +<1> The triggered and schedule times are provided. +<2> The input as defined by the watch is ignored and instead the provided input + is used as the execution payload. +<3> The condition as defined by the watch is ignored and is assumed to + evaluate to `true`. +<4> Forces the simulation of `my-action`. Forcing the simulation means that + throttling is ignored and the watch is simulated by {watcher} instead of + being executed normally. +<5> The execution of the watch creates a watch record in the watch history, + and the throttling state of the watch is potentially updated accordingly. + +This is an example of the output: + +[source,js] +-------------------------------------------------- +{ + "_id": "my_watch_0-2015-06-02T23:17:55.124Z", <1> + "watch_record": { <2> + "watch_id": "my_watch", + "node": "my_node", + "messages": [], + "trigger_event": { + "type": "manual", + "triggered_time": "2015-06-02T23:17:55.124Z", + "manual": { + "schedule": { + "scheduled_time": "2015-06-02T23:17:55.124Z" + } + } + }, + "state": "executed", + "status": { + "version": 1, + "execution_state": "executed", + "state": { + "active": true, + "timestamp": "2015-06-02T23:17:55.111Z" + }, + "last_checked": "2015-06-02T23:17:55.124Z", + "last_met_condition": "2015-06-02T23:17:55.124Z", + "actions": { + "test_index": { + "ack": { + "timestamp": "2015-06-02T23:17:55.124Z", + "state": "ackable" + }, + "last_execution": { + "timestamp": "2015-06-02T23:17:55.124Z", + "successful": true + }, + "last_successful_execution": { + "timestamp": "2015-06-02T23:17:55.124Z", + "successful": true + } + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "result": { <3> + "execution_time": "2015-06-02T23:17:55.124Z", + "execution_duration": 12608, + "input": { + "type": "simple", + "payload": { + "foo": "bar" + }, + "status": "success" + }, + "condition": { + "type": "always", + "met": true, + "status": "success" + }, + "actions": [ + { + "id": "test_index", + "index": { + "response": { + "index": "test", + "type": "test2", + "version": 1, + "created": true, + "result": "created", + "id": "AVSHKzPa9zx62AzUzFXY" + } + }, + "status": "success", + "type": "index" + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/my_watch_0-2015-06-02T23:17:55.124Z/$body._id/] +// TESTRESPONSE[s/"triggered_time": "2015-06-02T23:17:55.124Z"/"triggered_time": "$body.watch_record.trigger_event.triggered_time"/] +// TESTRESPONSE[s/"scheduled_time": "2015-06-02T23:17:55.124Z"/"scheduled_time": "$body.watch_record.trigger_event.manual.schedule.scheduled_time"/] +// TESTRESPONSE[s/"execution_time": "2015-06-02T23:17:55.124Z"/"execution_time": "$body.watch_record.result.execution_time"/] +// TESTRESPONSE[s/"timestamp": "2015-06-02T23:17:55.111Z"/"timestamp": "$body.watch_record.status.state.timestamp"/] +// TESTRESPONSE[s/"timestamp": "2015-06-02T23:17:55.124Z"/"timestamp": "$body.watch_record.status.actions.test_index.ack.timestamp"/] +// TESTRESPONSE[s/"last_checked": "2015-06-02T23:17:55.124Z"/"last_checked": "$body.watch_record.status.last_checked"/] +// TESTRESPONSE[s/"last_met_condition": "2015-06-02T23:17:55.124Z"/"last_met_condition": "$body.watch_record.status.last_met_condition"/] +// TESTRESPONSE[s/"execution_duration": 12608/"execution_duration": "$body.watch_record.result.execution_duration"/] +// TESTRESPONSE[s/"id": "AVSHKzPa9zx62AzUzFXY"/"id": "$body.watch_record.result.actions.0.index.response.id"/] +// TESTRESPONSE[s/"node": "my_node"/"node": "$body.watch_record.node"/] +<1> The id of the watch record as it would be stored in the `.watcher-history` index. +<2> The watch record document as it would be stored in the `.watcher-history` index. +<3> The watch execution results. + +You can set a different execution mode for every action by associating the mode +name with the action id: + +[source,js] +-------------------------------------------------- +POST _xpack/watcher/watch/my_watch/_execute +{ + "action_modes" : { + "action1" : "force_simulate", + "action2" : "skip" + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_active_watch] + +You can also associate a single execution mode with all the actions in the watch +using `_all` as the action id: + +[source,js] +-------------------------------------------------- +POST _xpack/watcher/watch/my_watch/_execute +{ + "action_modes" : { + "_all" : "force_execute" + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_active_watch] + +The following example shows how to execute a watch inline: + +[source,js] +-------------------------------------------------- +POST _xpack/watcher/watch/_execute +{ + "watch" : { + "trigger" : { "schedule" : { "interval" : "10s" } }, + "input" : { + "search" : { + "request" : { + "indices" : [ "logs" ], + "body" : { + "query" : { + "match" : { "message": "error" } + } + } + } + } + }, + "condition" : { + "compare" : { "ctx.payload.hits.total" : { "gt" : 0 }} + }, + "actions" : { + "log_error" : { + "logging" : { + "text" : "Found {{ctx.payload.hits.total}} errors in the logs" + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +All other settings for this API still apply when inlining a watch. In the +following snippet, while the inline watch defines a `compare` condition, +during the execution this condition will be ignored: + +[source,js] +-------------------------------------------------- +POST _xpack/watcher/watch/_execute +{ + "ignore_condition" : true, + "watch" : { + "trigger" : { "schedule" : { "interval" : "10s" } }, + "input" : { + "search" : { + "request" : { + "indices" : [ "logs" ], + "body" : { + "query" : { + "match" : { "message": "error" } + } + } + } + } + }, + "condition" : { + "compare" : { "ctx.payload.hits.total" : { "gt" : 0 }} + }, + "actions" : { + "log_error" : { + "logging" : { + "text" : "Found {{ctx.payload.hits.total}} errors in the logs" + } + } + } + } +} +-------------------------------------------------- +// CONSOLE diff --git a/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc new file mode 100644 index 0000000000000..ee224f648634d --- /dev/null +++ b/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc @@ -0,0 +1,90 @@ +[role="xpack"] +[[watcher-api-get-watch]] +=== Get Watch API + +This API retrieves a watch by its ID. + +[float] +==== Request + +`GET _xpack/watcher/watch/` + +[float] +==== Path Parameters + +`watch_id` (required):: + (string) Identifier for the watch. + +[float] +==== Authorization + +You must have `manage_watcher` or `monitor_watcher` cluster privileges to use +this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Examples + +The following example gets a watch with `my-watch` id: + +[source,js] +-------------------------------------------------- +GET _xpack/watcher/watch/my_watch +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_active_watch] + +Response: + +[source,js] +-------------------------------------------------- +{ + "found": true, + "_id": "my_watch", + "_version": 1, + "status": { <1> + "version": 1, + "state": { + "active": true, + "timestamp": "2015-05-26T18:21:08.630Z" + }, + "actions": { + "test_index": { + "ack": { + "timestamp": "2015-05-26T18:21:08.630Z", + "state": "awaits_successful_execution" + } + } + } + }, + "watch": { + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "trigger": { + "schedule": { + "hourly": { + "minute": [0, 5] + } + } + }, + "actions": { + "test_index": { + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"timestamp": "2015-05-26T18:21:08.630Z"/"timestamp": "$body.status.state.timestamp"/] +<1> The current status of the watch diff --git a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc new file mode 100644 index 0000000000000..41c078a9c9e1a --- /dev/null +++ b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc @@ -0,0 +1,157 @@ +[role="xpack"] +[[watcher-api-put-watch]] +=== Put Watch API + +The PUT watch API either registers a new watch in {watcher} or update an +existing one. + +[float] +==== Request + +`PUT _xpack/watcher/watch/` + +[float] +==== Description + +When a watch is registered, a new document that represents the watch is added to +the `.watches` index and its trigger is immediately registered with the relevant +trigger engine. Typically for the `schedule` trigger, the scheduler is the +trigger engine. + +IMPORTANT: Putting a watch must be done via this API only. Do not put a watch + directly to the `.watches` index using the Elasticsearch Index API. + If {security} is enabled, make sure no `write` privileges are + granted to anyone over the `.watches` index. + +When adding a watch you can also define its initial +{xpack-ref}/how-watcher-works.html#watch-active-state[active state]. You do that +by setting the `active` parameter. + +[float] +==== Path Parameters + +`watch_id` (required):: + (string) Identifier for the watch. + +[float] +==== Query Parameters + +`active`:: + (boolean) Defines whether the watch is active or inactive by default. The + default value is `true`, which means the watch is active by default. + +[float] +==== Request Body + +A watch has the following fields: + +[options="header"] +|====== +| Name | Description + +| `trigger` | The {xpack-ref}/trigger.html[trigger] that defines when + the watch should run. + +| `input` | The {xpack-ref}/input.html[input] that defines the input + that loads the data for the watch. + +| `condition` | The {xpack-ref}/condition.html[condition] that defines if + the actions should be run. + +| `actions` | The list of {xpack-ref}/actions.html[actions] that will be + run if the condition matches + +| `metadata` | Metadata json that will be copied into the history entries. + +| `throttle_period` | The minimum time between actions being run, the default + for this is 5 seconds. This default can be changed in the + config file with the setting `xpack.watcher.throttle.period.default_period`. +|====== + +[float] +==== Authorization + +You must have `manage_watcher` cluster privileges to use this API. For more +information, see {xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Security Integration + +When {security} is enabled, your watch will only be able to index or search on +indices for which the user that stored the watch, has privileges. If the user is +able to read index `a`, but not index `b`, the same will apply, when the watch +is executed. + +[float] +==== Examples + +The following example adds a watch with the `my-watch` id that has the following +characteristics: + +* The watch schedule triggers every minute. +* The watch search input looks for any 404 HTTP responses that occurred in the + last five minutes. +* The watch condition checks if any search hits where found. +* When found, the watch action sends an email to an administrator. + +[source,js] +-------------------------------------------------- +PUT _xpack/watcher/watch/my-watch +{ + "trigger" : { + "schedule" : { "cron" : "0 0/1 * * * ?" } + }, + "input" : { + "search" : { + "request" : { + "indices" : [ + "logstash*" + ], + "body" : { + "query" : { + "bool" : { + "must" : { + "match": { + "response": 404 + } + }, + "filter" : { + "range": { + "@timestamp": { + "from": "{{ctx.trigger.scheduled_time}}||-5m", + "to": "{{ctx.trigger.triggered_time}}" + } + } + } + } + } + } + } + } + }, + "condition" : { + "compare" : { "ctx.payload.hits.total" : { "gt" : 0 }} + }, + "actions" : { + "email_admin" : { + "email" : { + "to" : "admin@domain.host.com", + "subject" : "404 recently encountered" + } + } + } +} +-------------------------------------------------- +// CONSOLE + +When you add a watch you can also define its initial +{xpack-ref}/how-watcher-works.html#watch-active-state[active state]. You do that +by setting the `active` parameter. The following command adds a watch and sets +it to be inactive by default: + +[source,js] +-------------------------------------------------- +PUT _xpack/watcher/watch/my-watch?active=false +-------------------------------------------------- + +NOTE: If you omit the `active` parameter, the watch is active by default. diff --git a/x-pack/docs/en/rest-api/watcher/start.asciidoc b/x-pack/docs/en/rest-api/watcher/start.asciidoc new file mode 100644 index 0000000000000..ffdec3326d5e0 --- /dev/null +++ b/x-pack/docs/en/rest-api/watcher/start.asciidoc @@ -0,0 +1,35 @@ +[role="xpack"] +[[watcher-api-start]] +=== Start API + +The `start` API starts the {watcher} service if the service is not already +running. + +[float] +==== Request + +`POST _xpack/watcher/_start` + +==== Authorization + +You must have `manage_watcher` cluster privileges to use this API. For more +information, see {xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Examples + +[source,js] +-------------------------------------------------- +POST _xpack/watcher/_start +-------------------------------------------------- +// CONSOLE + +{watcher} returns the following response if the request is successful: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/watcher/stats.asciidoc b/x-pack/docs/en/rest-api/watcher/stats.asciidoc new file mode 100644 index 0000000000000..38f8ede925e4b --- /dev/null +++ b/x-pack/docs/en/rest-api/watcher/stats.asciidoc @@ -0,0 +1,185 @@ +[role="xpack"] +[[watcher-api-stats]] +=== Stats API + +The `stats` API returns the current {watcher} metrics. + +[float] +==== Request + +`GET _xpack/watcher/stats` + + +`GET _xpack/watcher/stats/` + +[float] +==== Description + +This API always returns basic metrics. You retrieve more metrics by using +the `metric` parameter. + +[float] +===== Current executing watches metric + +The current executing watches metric gives insight into the watches that are +currently being executed by {watcher}. Additional information is shared per +watch that is currently executing. This information includes the `watch_id`, +the time its execution started and its current execution phase. + +To include this metric, the `metric` option should be set to `executing_watches` +or `_all`. In addition you can also specify the `emit_stacktraces=true` +parameter, which adds stack traces for each watch that is being executed. These +stack traces can give you more insight into an execution of a watch. + +[float] +===== Queued watches metric + +{watcher} moderates the execution of watches such that their execution won't put +too much pressure on the node and its resources. If too many watches trigger +concurrently and there isn't enough capacity to execute them all, some of the +watches are queued, waiting for the current executing watches to finish their +execution. The queued watches metric gives insight on these queued watches. + +To include this metric, the `metric` option should include `queued_watches` or +`_all`. + +[float] +==== Path Parameters + +`emit_stacktraces`:: + (boolean) Defines whether stack traces are generated for each watch that is + running. The default value is `false`. + +`metric`:: + (enum) Defines which additional metrics are included in the response. + `executing_watches`::: Includes the current executing watches in the response. + `queued_watches`::: Includes the watches queued for execution in the response. + `_all`::: Includes all metrics in the response. + +[float] +==== Authorization + +You must have `manage_watcher` or `monitor_watcher` cluster privileges to use +this API. For more information, see +{xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Examples + +The following example calls the `stats` API to retrieve basic metrics: + +[source,js] +-------------------------------------------------- +GET _xpack/watcher/stats +-------------------------------------------------- +// CONSOLE + +A successful call returns a JSON structure similar to the following example: + +[source,js] +-------------------------------------------------- +{ + "watcher_state": "started", <1> + "watch_count": 1, <2> + "execution_thread_pool": { + "size": 1000, <3> + "max_size": 1 <4> + } +} +-------------------------------------------------- + +<1> The current state of watcher, which can be `started`, `starting`, or `stopped`. +<2> The number of watches currently registered. +<3> The number of watches that were triggered and currently queued for execution. +<4> The largest size of the execution thread pool, which indicates the largest +number of concurrent executing watches. + +The following example specifies the `metric` option as a query string argument +and will include the basic metrics and metrics about the current executing watches: + +[source,js] +-------------------------------------------------- +GET _xpack/watcher/stats?metric=executing_watches +-------------------------------------------------- +// CONSOLE + +The following example specifies the `metric` option as part of the url path: + +[source,js] +-------------------------------------------------- +GET _xpack/watcher/stats/current_watches +-------------------------------------------------- +// CONSOLE + +The following snippet shows an example of a successful JSON response that +captures a watch in execution: + +[source,js] +-------------------------------------------------- +{ + "watcher_state": "started", + "watch_count": 2, + "execution_thread_pool": { + "queue_size": 1000, + "max_size": 20 + }, + "current_watches": [ <1> + { + "watch_id": "slow_condition", <2> + "watch_record_id": "slow_condition_3-2015-05-13T07:42:32.179Z", <3> + "triggered_time": "2015-05-12T11:53:51.800Z", <4> + "execution_time": "2015-05-13T07:42:32.179Z", <5> + "execution_phase": "condition" <6> + } + ] +} +-------------------------------------------------- + +<1> A list of all the watches that are currently being executed by {watcher}. + When no watches are currently executing, an empty array is returned. The + captured watches are sorted by execution time in descending order. Thus the + longest running watch is always at the top. +<2> The id of the watch being executed. +<3> The id of the watch record. +<4> The time the watch was triggered by the trigger engine. +<5> The time the watch was executed. This is just before the input is being + executed. +<6> The current watch execution phase. Can be `input`, `condition` `actions`, + `awaits_execution`, `started`, `watch_transform`, `aborted`, `finished`. + +The following example specifies the `queued_watches` metric option and includes +both the basic metrics and the queued watches: + +[source,js] +-------------------------------------------------- +GET _xpack/watcher/stats/queued_watches +-------------------------------------------------- +// CONSOLE + +An example of a successful JSON response that captures a watch in execution: + +[source,js] +-------------------------------------------------- +{ + "watcher_state": "started", + "watch_count": 10, + "execution_thread_pool": { + "queue_size": 1000, + "max_size": 20 + }, + "queued_watches": [ <1> + { + "watch_id": "slow_condition4", <2> + "watch_record_id": "slow_condition4_223-2015-05-21T11:59:59.811Z", <3> + "triggered_time": "2015-05-21T11:59:59.811Z", <4> + "execution_time": "2015-05-21T11:59:59.811Z" <5> + }, + ... + ] +} +-------------------------------------------------- +<1> A list of all watches that are currently queued for execution. When no + watches are queued, an empty array is returned. +<2> The id of the watch queued for execution. +<3> The id of the watch record. +<4> The time the watch was triggered by the trigger engine. +<5> The time the watch was went into a queued state. diff --git a/x-pack/docs/en/rest-api/watcher/stop.asciidoc b/x-pack/docs/en/rest-api/watcher/stop.asciidoc new file mode 100644 index 0000000000000..11345c89cefbb --- /dev/null +++ b/x-pack/docs/en/rest-api/watcher/stop.asciidoc @@ -0,0 +1,35 @@ +[role="xpack"] +[[watcher-api-stop]] +=== Stop API + +The `stop` API stops the {watcher} service if the service is running. + +[float] +==== Request + +`POST _xpack/watcher/_stop` + +[float] +==== Authorization + +You must have `manage_watcher` cluster privileges to use this API. For more +information, see {xpack-ref}/security-privileges.html[Security Privileges]. + +[float] +==== Examples + +[source,js] +-------------------------------------------------- +POST _xpack/watcher/_stop +-------------------------------------------------- +// CONSOLE + +{watcher} returns the following response if the request is successful: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/x-pack/docs/en/rollup/api-quickref.asciidoc b/x-pack/docs/en/rollup/api-quickref.asciidoc new file mode 100644 index 0000000000000..1ae6de4ee011c --- /dev/null +++ b/x-pack/docs/en/rollup/api-quickref.asciidoc @@ -0,0 +1,33 @@ +[[rollup-api-quickref]] +== API Quick Reference + +Most {rollup} endpoints have the following base: + +[source,js] +---- +/_xpack/rollup/ +---- +// NOTCONSOLE + +[float] +[[rollup-api-jobs]] +=== /job/ + +* {ref}/rollup-put-job.html[PUT /job/+++]: Create a job +* {ref}/rollup-get-job.html[GET /job]: List jobs +* {ref}/rollup-get-job.html[GET /job/+++]: Get job details +* {ref}/rollup-start-job.html[POST /job//_start]: Start a job +* {ref}/rollup-stop-job.html[POST /job/+++]: Stop a job +* {ref}/rollup-delete-job.html[DELETE /job/+++]: Delete a job + +[float] +[[rollup-api-data]] +=== /data/ + +* {ref}/rollup-get-rollup-caps.html[GET /data//_rollup_caps+++]: Get Rollup Capabilities + +[float] +[[rollup-api-index]] +=== // + +* {ref}/rollup-search.html[GET //_rollup_search]: Search rollup data diff --git a/x-pack/docs/en/rollup/index.asciidoc b/x-pack/docs/en/rollup/index.asciidoc new file mode 100644 index 0000000000000..69cd872e59669 --- /dev/null +++ b/x-pack/docs/en/rollup/index.asciidoc @@ -0,0 +1,30 @@ +[[xpack-rollup]] += Rolling up historical data + +[partintro] +-- + +experimental[] + +Keeping historical data around for analysis is extremely useful but often avoided due to the financial cost of +archiving massive amounts of data. Retention periods are thus driven by financial realities rather than by the +usefulness of extensive historical data. + +The Rollup feature in {xpack} provides a means to summarize and store historical data so that it can still be used +for analysis, but at a fraction of the storage cost of raw data. + + +* <> +* <> +* <> +* <> +* <> + + +-- + +include::overview.asciidoc[] +include::api-quickref.asciidoc[] +include::rollup-getting-started.asciidoc[] +include::understanding-groups.asciidoc[] +include::rollup-search-limitations.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/rollup/overview.asciidoc b/x-pack/docs/en/rollup/overview.asciidoc new file mode 100644 index 0000000000000..cee244a2ec241 --- /dev/null +++ b/x-pack/docs/en/rollup/overview.asciidoc @@ -0,0 +1,73 @@ +[[rollup-overview]] +== Overview + +Time-based data (documents that are predominantly identified by their timestamp) often have associated retention policies +to manage data growth. For example, your system may be generating 500,000 documents every second. That will generate +43 million documents per day, and nearly 16 billion documents a year. + +While your analysts and data scientists may wish you stored that data indefinitely for analysis, time is never-ending and +so your storage requirements will continue to grow without bound. Retention policies are therefore often dictated +by the simple calculation of storage costs over time, and what the organization is willing to pay to retain historical data. +Often these policies start deleting data after a few months or years. + +Storage cost is a fixed quantity. It takes X money to store Y data. But the utility of a piece of data often changes +with time. Sensor data gathered at millisecond granularity is extremely useful right now, reasonably useful if from a +few weeks ago, and only marginally useful if older than a few months. + +So while the cost of storing a millisecond of sensor data from ten years ago is fixed, the value of that individual sensor +reading often diminishes with time. It's not useless -- it could easily contribute to a useful analysis -- but it's reduced +value often leads to deletion rather than paying the fixed storage cost. + +=== Rollup store historical data at reduced granularity + +That's where Rollup comes into play. The Rollup functionality summarizes old, high-granularity data into a reduced +granularity format for long-term storage. By "rolling" the data up into a single summary document, historical data +can be compressed greatly compared to the raw data. + +For example, consider the system that's generating 43 million documents every day. The second-by-second data is useful +for real-time analysis, but historical analysis looking over ten years of data are likely to be working at a larger interval +such as hourly or daily trends. + +If we compress the 43 million documents into hourly summaries, we can save vast amounts of space. The Rollup feature +automates this process of summarizing historical data. + +Details about setting up and configuring Rollup are covered in <> + +=== Rollup uses standard query DSL + +The Rollup feature exposes a new search endpoint (`/_rollup_search` vs the standard `/_search`) which knows how to search +over rolled-up data. Importantly, this endpoint accepts 100% normal {es} Query DSL. Your application does not need to learn +a new DSL to inspect historical data, it can simply reuse existing queries and dashboards. + +There are some limitations to the functionality available; not all queries and aggregations are supported, certain search +features (highlighting, etc) are disabled, and available fields depend on how the rollup was configured. These limitations +are covered more in <>. + +But if your queries, aggregations and dashboards only use the available functionality, redirecting them to historical +data is trivial. + +=== Rollup merges "live" and "rolled" data + +A useful feature of Rollup is the ability to query both "live", realtime data in addition to historical "rolled" data +in a single query. + +For example, your system may keep a month of raw data. After a month, it is rolled up into historical summaries using +Rollup and the raw data is deleted. + +If you were to query the raw data, you'd only see the most recent month. And if you were to query the rolled up data, you +would only see data older than a month. The RollupSearch endpoint, however, supports querying both at the same time. +It will take the results from both data sources and merge them together. If there is overlap between the "live" and +"rolled" data, live data is preferred to increase accuracy. + +=== Rollup is multi-interval aware + +Finally, Rollup is capable of intelligently utilizing the best interval available. If you've worked with summarizing +features of other products, you'll find that they can be limiting. If you configure rollups at daily intervals... your +queries and charts can only work with daily intervals. If you need a monthly interval, you have to create another rollup +that explicitly stores monthly averages, etc. + +The Rollup feature stores data in such a way that queries can identify the smallest available interval and use that +for their processing. If you store rollups at a daily interval, queries can be executed on daily or longer intervals +(weekly, monthly, etc) without the need to explicitly configure a new rollup job. This helps alleviate one of the major +disadvantages of a rollup system; reduced flexibility relative to raw data. + diff --git a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc b/x-pack/docs/en/rollup/rollup-getting-started.asciidoc new file mode 100644 index 0000000000000..cf96d67454083 --- /dev/null +++ b/x-pack/docs/en/rollup/rollup-getting-started.asciidoc @@ -0,0 +1,294 @@ +[[rollup-getting-started]] +== Getting Started + +To use the Rollup feature, you need to create one or more "Rollup Jobs". These jobs run continuously in the background +and rollup the index or indices that you specify, placing the rolled documents in a secondary index (also of your choosing). + +Imagine you have a series of daily indices that hold sensor data (`sensor-2017-01-01`, `sensor-2017-01-02`, etc). A sample document might +look like this: + +[source,js] +-------------------------------------------------- +{ + "timestamp": 1516729294000, + "temperature": 200, + "voltage": 5.2, + "node": "a" +} +-------------------------------------------------- +// NOTCONSOLE + +[float] +=== Creating a Rollup Job + +We'd like to rollup these documents into hourly summaries, which will allow us to generate reports and dashboards with any time interval +one hour or greater. A rollup job might look like this: + +[source,js] +-------------------------------------------------- +PUT _xpack/rollup/job/sensor +{ + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_index] + +We give the job the ID of "sensor" (in the url: `PUT _xpack/rollup/job/sensor`), and tell it to rollup the index pattern `"sensor-*"`. +This job will find and rollup any index that matches that pattern. Rollup summaries are then stored in the `"sensor_rollup"` index. + +The `cron` parameter controls when and how often the job activates. When a rollup job's cron schedule triggers, it will begin rolling up +from where it left off after the last activation. So if you configure the cron to run every 30 seconds, the job will process the last 30 +seconds worth of data that was indexed into the `sensor-*` indices. + +If instead the cron was configured to run once a day at midnight, the job would process the last 24hours worth of data. The choice is largely +preference, based on how "realtime" you want the rollups, and if you wish to process continuously or move it to off-peak hours. + +Next, we define a set of `groups` and `metrics`. The metrics are fairly straightforward: we want to save the min/max/sum of the `temperature` +field, and the average of the `voltage` field. + +The groups are a little more interesting. Essentially, we are defining the dimensions that we wish to pivot on at a later date when +querying the data. The grouping in this job allows us to use date_histograms aggregations on the `timestamp` field, rolled up at hourly intervals. +It also allows us to run terms aggregations on the `node` field. + +.Date histogram interval vs cron schedule +********************************** +You'll note that the job's cron is configured to run every 30 seconds, but the date_histogram is configured to +rollup at hourly intervals. How do these relate? + +The date_histogram controls the granularity of the saved data. Data will be rolled up into hourly intervals, and you will be unable +to query with finer granularity. The cron simply controls when the process looks for new data to rollup. Every 30 seconds it will see +if there is a new hour's worth of data and roll it up. If not, the job goes back to sleep. + +Often, it doesn't make sense to define such a small cron (30s) on a large interval (1h), because the majority of the activations will +simply go back to sleep. But there's nothing wrong with it either, the job will do the right thing. + +********************************** + +For more details about the job syntax, see <>. + + +After you execute the above command and create the job, you'll receive the following response: + +[source,js] +---- +{ + "acknowledged": true +} +---- +// TESTRESPONSE + +[float] +=== Starting the job + +After the job is created, it will be sitting in an inactive state. Jobs need to be started before they begin processing data (this allows +you to stop them later as a way to temporarily pause, without deleting the configuration). + +To start the job, execute this command: + +[source,js] +-------------------------------------------------- +POST _xpack/rollup/job/sensor/_start +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_rollup_job] + +[float] +=== Searching the Rolled results + +After the job has run and processed some data, we can use the <> endpoint to do some searching. The Rollup feature is designed +so that you can use the same Query DSL syntax that you are accustomed to... it just happens to run on the rolled up data instead. + +For example, take this query: + +[source,js] +-------------------------------------------------- +GET /sensor_rollup/_rollup_search +{ + "size": 0, + "aggregations": { + "max_temperature": { + "max": { + "field": "temperature" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_prefab_data] + +It's a simple aggregation that calculates the maximum of the `temperature` field. But you'll notice that is is being sent to the `sensor_rollup` +index instead of the raw `sensor-*` indices. And you'll also notice that it is using the `_rollup_search` endpoint. Otherwise the syntax +is exactly as you'd expect. + +If you were to execute that query, you'd receive a result that looks like a normal aggregation response: + +[source,js] +---- +{ + "took" : 102, + "timed_out" : false, + "terminated_early" : false, + "_shards" : ... , + "hits" : { + "total" : 0, + "max_score" : 0.0, + "hits" : [ ] + }, + "aggregations" : { + "max_temperature" : { + "value" : 202.0 + } + } +} +---- +// TESTRESPONSE[s/"took" : 102/"took" : $body.$_path/] +// TESTRESPONSE[s/"_shards" : \.\.\. /"_shards" : $body.$_path/] + +The only notable difference is that Rollup search results have zero `hits`, because we aren't really searching the original, live data any +more. Otherwise it's identical syntax. + +There are a few interesting takeaways here. Firstly, even though the data was rolled up with hourly intervals and partitioned by +node name, the query we ran is just calculating the max temperature across all documents. The `groups` that were configured in the job +are not mandatory elements of a query, they are just extra dimensions you can partition on. Second, the request and response syntax +is nearly identical to normal DSL, making it easy to integrate into dashboards and applications. + +Finally, we can use those grouping fields we defined to construct a more complicated query: + +[source,js] +-------------------------------------------------- +GET /sensor_rollup/_rollup_search +{ + "size": 0, + "aggregations": { + "timeline": { + "date_histogram": { + "field": "timestamp", + "interval": "7d" + }, + "aggs": { + "nodes": { + "terms": { + "field": "node" + }, + "aggs": { + "max_temperature": { + "max": { + "field": "temperature" + } + }, + "avg_voltage": { + "avg": { + "field": "voltage" + } + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_prefab_data] + +Which returns a corresponding response: + +[source,js] +---- +{ + "took" : 93, + "timed_out" : false, + "terminated_early" : false, + "_shards" : ... , + "hits" : { + "total" : 0, + "max_score" : 0.0, + "hits" : [ ] + }, + "aggregations" : { + "timeline" : { + "meta" : { }, + "buckets" : [ + { + "key_as_string" : "2018-01-18T00:00:00.000Z", + "key" : 1516233600000, + "doc_count" : 6, + "nodes" : { + "doc_count_error_upper_bound" : 0, + "sum_other_doc_count" : 0, + "buckets" : [ + { + "key" : "a", + "doc_count" : 2, + "max_temperature" : { + "value" : 202.0 + }, + "avg_voltage" : { + "value" : 5.1499998569488525 + } + }, + { + "key" : "b", + "doc_count" : 2, + "max_temperature" : { + "value" : 201.0 + }, + "avg_voltage" : { + "value" : 5.700000047683716 + } + }, + { + "key" : "c", + "doc_count" : 2, + "max_temperature" : { + "value" : 202.0 + }, + "avg_voltage" : { + "value" : 4.099999904632568 + } + } + ] + } + } + ] + } + } +} +---- +// TESTRESPONSE[s/"took" : 93/"took" : $body.$_path/] +// TESTRESPONSE[s/"_shards" : \.\.\. /"_shards" : $body.$_path/] + +In addition to being more complicated (date histogram and a terms aggregation, plus an additional average metric), you'll notice +the date_histogram uses a `7d` interval instead of `1h`. + +[float] +=== Conclusion + +This quickstart should have provided a concise overview of the core functionality that Rollup exposes. There are more tips and things +to consider when setting up Rollups, which you can find throughout the rest of this section. You may also explore the <> +for an overview of what is available. diff --git a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc b/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc new file mode 100644 index 0000000000000..de47404a29da3 --- /dev/null +++ b/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc @@ -0,0 +1,114 @@ +[[rollup-search-limitations]] +== Rollup Search Limitations + +While we feel the Rollup function is extremely flexible, the nature of summarizing data means there will be some limitations. Once +live data is thrown away, you will always lose some flexibility. + +This page highlights the major limitations so that you are aware of them. + +[float] +=== Only one Rollup index per search + +When using the <> endpoint, the `index` parameter accepts one or more indices. These can be a mix of regular, non-rollup +indices and rollup indices. However, only one rollup index can be specified. The exact list of rules for the `index` parameter are as +follows: + +- At least one index/index-pattern must be specified. This can be either a rollup or non-rollup index. Omitting the index parameter, +or using `_all`, is not permitted +- Multiple non-rollup indices may be specified +- Only one rollup index may be specified. If more than one are supplied an exception will be thrown + +This limitation is driven by the logic that decides which jobs are the "best" for any given query. If you have ten jobs stored in a single +index, which cover the source data with varying degrees of completeness and different intervals, the query needs to determine which set +of jobs to actually search. Incorrect decisions can lead to inaccurate aggregation results (e.g. over-counting doc counts, or bad metrics). +Needless to say, this is a technically challenging piece of code. + +To help simplify the problem, we have limited search to just one rollup index at a time (which may contain multiple jobs). In the future we +may be able to open this up to multiple rollup jobs. + +[float] +=== Can only aggregate what's been stored + +A perhaps obvious limitation, but rollups can only aggregate on data that has been stored in the rollups. If you don't configure the +rollup job to store metrics about the `price` field, you won't be able to use the `price` field in any query or aggregation. + +For example, the `temperature` field in the following query has been stored in a rollup job... but not with an `avg` metric. Which means +the usage of `avg` here is not allowed: + +[source,js] +-------------------------------------------------- +GET sensor_rollup/_rollup_search +{ + "size": 0, + "aggregations": { + "avg_temperature": { + "avg": { + "field": "temperature" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sensor_prefab_data] +// TEST[catch:/illegal_argument_exception/] + +The response will tell you that the field and aggregation were not possible, because no rollup jobs were found which contained them: + +[source,js] +---- +{ + "error" : { + "root_cause" : [ + { + "type" : "illegal_argument_exception", + "reason" : "There is not a rollup job that has a [avg] agg with name [avg_temperature] which also satisfies all requirements of query.", + "stack_trace": ... + } + ], + "type" : "illegal_argument_exception", + "reason" : "There is not a rollup job that has a [avg] agg with name [avg_temperature] which also satisfies all requirements of query.", + "stack_trace": ... + }, + "status": 400 +} +---- +// TESTRESPONSE[s/"stack_trace": \.\.\./"stack_trace": $body.$_path/] + +[float] +=== Interval Granularity + +Rollups are stored at a certain granularity, as defined by the `date_histogram` group in the configuration. If data is rolled up at hourly +intervals, the <> API can aggregate on any time interval hourly or greater. Intervals that are less than an hour will throw +an exception, since the data simply doesn't exist for finer granularities. + +Because the RollupSearch endpoint can "upsample" intervals, there is no need to configure jobs with multiple intervals (hourly, daily, etc). +It's recommended to just configure a single job with the smallest granularity that is needed, and allow the search endpoint to upsample +as needed. + +That said, if multiple jobs are present in a single rollup index with varying intervals, the search endpoint will identify and use the job(s) +with the largest interval to satisfy the search reques. + +[float] +=== Limited querying components + +The Rollup functionality allows `query`'s in the search request, but with a limited subset of components. The queries currently allowed are: + +- Term Query +- Terms Query +- Range Query +- MatchAll Query +- Any compound query (Boolean, Boosting, ConstantScore, etc) + +Furthermore, these queries can only use fields that were also saved in the rollup job. If you wish to filter on a keyword `hostname` field, +that field must have been configured in the rollup job under a `terms` grouping. + +If you attempt to use an unsupported query, or the query references a field that wasn't configured in the rollup job, an exception will be +thrown. We expect the list of support queries to grow over time as more are implemented. + +[float] +=== Timezones + +Rollup documents are stored in the timezone of the `date_histogram` group configuration in the job. If no timezone is specified, the default +is to rollup timestamps in `UTC`. + diff --git a/x-pack/docs/en/rollup/understanding-groups.asciidoc b/x-pack/docs/en/rollup/understanding-groups.asciidoc new file mode 100644 index 0000000000000..d6eef54fab87e --- /dev/null +++ b/x-pack/docs/en/rollup/understanding-groups.asciidoc @@ -0,0 +1,410 @@ +[[rollup-understanding-groups]] +== Understanding Groups + +To preserve flexibility, Rollup Jobs are defined based on how future queries may need to use the data. Traditionally, systems force +the admin to make decisions about what metrics to rollup and on what interval. E.g. The average of `cpu_time` on an hourly basis. This +is limiting; if, at a future date, the admin wishes to see the average of `cpu_time` on an hourly basis _and partitioned by `host_name`_, +they are out of luck. + +Of course, the admin can decide to rollup the `[hour, host]` tuple on an hourly basis, but as the number of grouping keys grows, so do the +number of tuples the admin needs to configure. Furthermore, these `[hours, host]` tuples are only useful for hourly rollups... daily, weekly, +or monthly rollups all require new configurations. + +Rather than force the admin to decide ahead of time which individual tuples should be rolled up, Elasticsearch's Rollup jobs are configured +based on which groups are potentially useful to future queries. For example, this configuration: + +[source,js] +-------------------------------------------------- +"groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["hostname", "datacenter"] + }, + "histogram": { + "fields": ["load", "net_in", "net_out"], + "interval": 5 + } +} +-------------------------------------------------- +// NOTCONSOLE + +Allows `date_histogram`'s to be used on the `"timestamp"` field, `terms` aggregations to be used on the `"hostname"` and `"datacenter"` +fields, and `histograms` to be used on any of `"load"`, `"net_in"`, `"net_out"` fields. + +Importantly, these aggs/fields can be used in any combination. This aggregation: + +[source,js] +-------------------------------------------------- +"aggs" : { + "hourly": { + "date_histogram": { + "field": "timestamp", + "interval": "1h" + }, + "aggs": { + "host_names": { + "terms": { + "field": "hostname" + } + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +is just as valid as this aggregation: + +[source,js] +-------------------------------------------------- +"aggs" : { + "hourly": { + "date_histogram": { + "field": "timestamp", + "interval": "1h" + }, + "aggs": { + "data_center": { + "terms": { + "field": "datacenter" + } + }, + "aggs": { + "host_names": { + "terms": { + "field": "hostname" + } + }, + "aggs": { + "load_values": { + "histogram": { + "field": "load", + "interval": 5 + } + } + } + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE + + +You'll notice that the second aggregation is not only substantially larger, it also swapped the position of the terms aggregation on +`"hostname"`, illustrating how the order of aggregations does not matter to rollups. Similarly, while the `date_histogram` is required +for rolling up data, it isn't required while querying (although often used). For example, this is a valid aggregation for +Rollup Search to execute: + + +[source,js] +-------------------------------------------------- +"aggs" : { + "host_names": { + "terms": { + "field": "hostname" + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +Ultimately, when configuring `groups` for a job, think in terms of how you might wish to partition data in a query at a future date... +then include those in the config. Because Rollup Search allows any order or combination of the grouped fields, you just need to decide +if a field is useful for aggregating later, and how you might wish to use it (terms, histogram, etc) + +=== Grouping Limitations with heterogeneous indices + +There is a known limitation to Rollup groups, due to some internal implementation details at this time. The Rollup feature leverages +the `composite` aggregation from Elasticsearch. At the moment, the composite agg only returns buckets when all keys in the tuple are non-null. +Put another way, if the you request keys `[A,B,C]` in the composite aggregation, the only documents that are aggregated are those that have +_all_ of the keys `A, B` and `C`. + +Because Rollup uses the composite agg during the indexing process, it inherits this behavior. Practically speaking, if all of the documents +in your index are homogeneous (they have the same mapping), you can ignore this limitation and stop reading now. + +However, if you have a heterogeneous collection of documents that you wish to roll up, you may need to configure two or more jobs to +accurately cover the original data. + +As an example, if your index has two types of documents: + +[source,js] +-------------------------------------------------- +{ + "timestamp": 1516729294000, + "temperature": 200, + "voltage": 5.2, + "node": "a" +} +-------------------------------------------------- +// NOTCONSOLE + +and + +[source,js] +-------------------------------------------------- +{ + "timestamp": 1516729294000, + "price": 123, + "title": "Foo" +} +-------------------------------------------------- +// NOTCONSOLE + +it may be tempting to create a single, combined rollup job which covers both of these document types, something like this: + +[source,js] +-------------------------------------------------- +PUT _xpack/rollup/job/combined +{ + "index_pattern": "data-*", + "rollup_index": "data_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node", "title"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "price", + "metrics": ["avg"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +You can see that it includes a `terms` grouping on both "node" and "title", fields that are mutually exclusive in the document types. +*This will not work.* Because the `composite` aggregation (and by extension, Rollup) only returns buckets when all keys are non-null, +and there are no documents that have both a "node" field and a "title" field, this rollup job will not produce any rollups. + +Instead, you should configure two independent jobs (sharing the same index, or going to separate indices): + +[source,js] +-------------------------------------------------- +PUT _xpack/rollup/job/sensor +{ + "index_pattern": "data-*", + "rollup_index": "data_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +[source,js] +-------------------------------------------------- +PUT _xpack/rollup/job/purchases +{ + "index_pattern": "data-*", + "rollup_index": "data_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["title"] + } + }, + "metrics": [ + { + "field": "price", + "metrics": ["avg"] + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + +Notice that each job now deals with a single "document type", and will not run into the limitations described above. We are working on changes +in core Elasticsearch to remove this limitation from the `composite` aggregation, and the documentation will be updated accordingly +when this particular scenario is fixed. + +=== Doc counts and overlapping jobs + +There is an issue with doc counts, related to the above grouping limitation. Imagine you have two Rollup jobs saving to the same index, where +one job is a "subset" of another job. + +For example, you might have jobs with these two groupings: + +[source,js] +-------------------------------------------------- +PUT _xpack/rollup/job/sensor-all +{ + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "price", + "metrics": ["avg"] + } + ] + ... +} +-------------------------------------------------- +// NOTCONSOLE + +and + +[source,js] +-------------------------------------------------- +PUT _xpack/rollup/job/sensor-building +{ + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node", "building"] + } + } + ... +} +-------------------------------------------------- +// NOTCONSOLE + + +The first job `sensor-all` contains the groupings and metrics that apply to all data in the index. The second job is rolling up a subset +of data (in different buildings) which also include a building identifier. You did this because combining them would run into the limitation +described in the previous section. + +This _mostly_ works, but can sometimes return incorrect `doc_counts` when you search. All metrics will be valid however. + +The issue arises from the composite agg limitation described before, combined with search-time optimization. Imagine you try to run the +following aggregation: + +[source,js] +-------------------------------------------------- +"aggs" : { + "nodes": { + "terms": { + "field": "node" + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +This aggregation could be serviced by either `sensor-all` or `sensor-building` job, since they both group on the node field. So the RollupSearch +API will search both of them and merge results. This will result in *correct* doc_counts and *correct* metrics. No problem here. + +The issue arises from an aggregation that can _only_ be serviced by `sensor-building`, like this one: + +[source,js] +-------------------------------------------------- +"aggs" : { + "nodes": { + "terms": { + "field": "node" + }, + "aggs": { + "building": { + "terms": { + "field": "building" + } + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +Now we run into a problem. The RollupSearch API will correctly identify that only `sensor-building` job has all the required components +to answer the aggregation, and will search it exclusively. Unfortunately, due to the composite aggregation limitation, that job only +rolled up documents that have both a "node" and a "building" field. Meaning that the doc_counts for the `"nodes"` aggregation will not +include counts for any document that doesn't have `[node, building]` fields. + +- The `doc_count` for `"nodes"` aggregation will be incorrect because it only contains counts for `nodes` that also have buildings +- The `doc_count` for `"buildings"` aggregation will be correct +- Any metrics, on any level, will be correct + +==== Workarounds + +There are two main workarounds if you find yourself with a schema like the above. + +Easiest and most robust method: use separate indices to store your rollups. The limitations arise because you have several document +schemas co-habitating in a single index, which makes it difficult for rollups to correctly summarize. If you make several rollup +jobs and store them in separate indices, these sorts of difficulties do not arise. It does, however, keep you from searching across several +different rollup indices at the same time. + +The other workaround is to include an "off-target" aggregation in the query, which pulls in the "superset" job and corrects the doc counts. +The RollupSearch API determines the best job to search for each "leaf node" in the aggregation tree. So if we include a metric agg on `price`, +which was only defined in the `sensor-all` job, that will "pull in" the other job: + +[source,js] +-------------------------------------------------- +"aggs" : { + "nodes": { + "terms": { + "field": "node" + }, + "aggs": { + "building": { + "terms": { + "field": "building" + } + }, + "avg_price": { + "avg": { "field": "price" } <1> + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE +<1> Adding an avg aggregation here will fix the doc counts + +Because only `sensor-all` job had an `avg` on the price field, the RollupSearch API is forced to pull in that additional job for searching, +and will merge/correct the doc_counts as appropriate. This sort of workaround applies to any additional aggregation -- metric or bucketing -- +although it can be tedious to look through the jobs and determine the right one to add. + +==== Status + +We realize this is an onerous limitation, and somewhat breaks the rollup contract of "pick the fields to rollup, we do the rest". We are +actively working to get the limitation to `composite` agg fixed, and the related issues in Rollup. The documentation will be updated when +the fix is implemented. \ No newline at end of file diff --git a/x-pack/docs/en/security/auditing.asciidoc b/x-pack/docs/en/security/auditing.asciidoc new file mode 100644 index 0000000000000..8bff8727f8358 --- /dev/null +++ b/x-pack/docs/en/security/auditing.asciidoc @@ -0,0 +1,474 @@ +[[auditing]] +== Auditing Security Events + +You can enable auditing to keep track of security-related events such as +authentication failures and refused connections. Logging these events enables you +to monitor your cluster for suspicious activity and provides evidence in the +event of an attack. + +[IMPORTANT] +============================================================================ +Audit logs are **disabled** by default. To enable this functionality, you +must set `xpack.security.audit.enabled` to `true` in `elasticsearch.yml`. +============================================================================ + +{Security} provides two ways to persist audit logs: + +* The <> output, which persists events to + a dedicated `_access.log` file on the host's file system. +* The <> output, which persists events to an Elasticsearch index. +The audit index can reside on the same cluster, or a separate cluster. + +By default, only the `logfile` output is used when enabling auditing. +To facilitate browsing and analyzing the events, you can also enable +indexing by setting `xpack.security.audit.outputs` in `elasticsearch.yml`: + +[source,yaml] +---------------------------- +xpack.security.audit.outputs: [ index, logfile ] +---------------------------- + +The `index` output type should be used in conjunction with the `logfile` +output type Because it is possible for the `index` output type to lose +messages if the target index is unavailable, the `access.log` should be +used as the official record of events. + +NOTE: Audit events are batched for indexing so there is a lag before +events appear in the index. You can control how frequently batches of +events are pushed to the index by setting +`xpack.security.audit.index.flush_interval` in `elasticsearch.yml`. + +[float] +[[audit-event-types]] +=== Audit Event Types + +Each request may generate multiple audit events. +The following is a list of the events that can be generated: + +|====== +| `anonymous_access_denied` | | | Logged when a request is denied due to a missing + authentication token. +| `authentication_success` | | | Logged when a user successfully authenticates. +| `authentication_failed` | | | Logged when the authentication token cannot be + matched to a known user. +| `realm_authentication_failed` | | | Logged for every realm that fails to present a valid + authentication token. `` represents the + realm type. +| `access_denied` | | | Logged when an authenticated user attempts to execute + an action they do not have the necessary + <> to perform. +| `access_granted` | | | Logged when an authenticated user attempts to execute + an action they have the necessary privilege to perform. + When the `system_access_granted` event is included, all system + (internal) actions are also logged. The default setting does + not log system actions to avoid cluttering the logs. +| `run_as_granted` | | | Logged when an authenticated user attempts to <> + another user that they have the necessary privileges to do. +| `run_as_denied` | | | Logged when an authenticated user attempts to <> + another user action they do not have the necessary + <> to do so. +| `tampered_request` | | | Logged when {security} detects that the request has + been tampered with. Typically relates to `search/scroll` + requests when the scroll ID is believed to have been + tampered with. +| `connection_granted` | | | Logged when an incoming TCP connection passes the + <> for a specific + profile. +| `connection_denied` | | | Logged when an incoming TCP connection does not pass the + <> for a specific + profile. +|====== + +[float] +[[audit-event-attributes]] +=== Audit Event Attributes + +The following table shows the common attributes that can be associated with every event. + +.Common Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `timestamp` | When the event occurred. +| `node_name` | The name of the node. +| `node_host_name` | The hostname of the node. +| `node_host_address` | The IP address of the node. +| `layer` | The layer from which this event originated: `rest`, `transport` or `ip_filter` +| `event_type` | The type of event that occurred: `anonymous_access_denied`, + `authentication_failed`, `access_denied`, `access_granted`, + `connection_granted`, `connection_denied`, `tampered_request`, + `run_as_granted`, `run_as_denied`. +|====== + +The following tables show the attributes that can be associated with each type of event. +The log level determines which attributes are included in a log entry. + +.REST anonymous_access_denied Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_address` | The IP address from which the request originated. +| `uri` | The REST endpoint URI. +| `request_body` | The body of the request, if enabled. +|====== + +.REST authentication_success Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `user` | The authenticated user. +| `realm` | The realm that authenticated the user. +| `uri` | The REST endpoint URI. +| `params` | The REST URI query parameters. +| `request_body` | The body of the request, if enabled. +|====== + +.REST authentication_failed Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed authentication. +| `uri` | The REST endpoint URI. +| `request_body` | The body of the request, if enabled. +|====== + +.REST realm_authentication_failed Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed authentication. +| `uri` | The REST endpoint URI. +| `request_body` | The body of the request, if enabled. +| `realm` | The realm that failed to authenticate the user. + NOTE: A separate entry is logged for each + consulted realm. +|====== + +.Transport anonymous_access_denied Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +| `indices` | A comma-separated list of indices this request + pertains to (when applicable). +|====== + +.Transport authentication_success Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `user` | The authenticated user. +| `realm` | The realm that authenticated the user. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +|====== + +.Transport authentication_failed Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed authentication. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +| `indices` | A comma-separated list of indices this request + pertains to (when applicable). +|====== + +.Transport realm_authentication_failed Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed authentication. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +| `indices` | A comma-separated list of indices this request + pertains to (when applicable). +| `realm` | The realm that failed to authenticate the user. + NOTE: A separate entry is logged for each + consulted realm. +|====== + +.Transport access_granted Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that passed authentication. +| `roles` | The set of roles granting permissions. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +| `indices` | A comma-separated list of indices this request + pertains to (when applicable). +|====== + +.Transport access_denied Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed authentication. +| `roles` | The set of roles granting permissions. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +| `indices` | A comma-separated list of indices this request + relates to (when applicable). +|====== + +.Transport tampered_request Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_type` | Where the request originated: `rest` (request + originated from a REST API request), `transport` + (request was received on the transport channel), + `local_node` (the local node issued the request). +| `origin_address` | The IP address from which the request originated. +| `principal` | The principal (username) that failed to authenticate. +| `action` | The name of the action that was executed. +| `request` | The type of request that was executed. +| `indices` | A comma-separated list of indices this request + pertains to (when applicable). +|====== + +.IP Filter connection_granted Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_address` | The IP address from which the request originated. +| `transport_profile` | The transport profile the request targeted. +| `rule` | The <> rule that granted + the request. +|====== + +.IP Filter connection_denied Attributes +[cols="2,7",options="header"] +|====== +| Attribute | Description +| `origin_address` | The IP address from which the request originated. +| `transport_profile` | The transport profile the request targeted. +| `rule` | The <> rule that denied + the request. +|====== + +[float] +[[audit-log-output]] +=== Logfile Audit Output + +The `logfile` audit output is the default output for auditing. It writes data to +the `_access.log` file in the logs directory. + +[float] +[[audit-log-entry-format]] +=== Log Entry Format + +The format of a log entry is: + +[source,txt] +---------------------------------------------------------------------------- +[] [] [] [] +---------------------------------------------------------------------------- + +`` :: When the event occurred. You can configure the + timestamp format in `log4j2.properties`. +`` :: Information about the local node that generated + the log entry. You can control what node information + is included by configuring the + {ref}/auditing-settings.html#node-audit-settings[local node info settings]. +`` :: The layer from which this event originated: + `rest`, `transport` or `ip_filter`. +`` :: The type of event that occurred: `anonymous_access_denied`, + `authentication_failed`, `access_denied`, `access_granted`, + `connection_granted`, `connection_denied`. +`` :: A comma-separated list of key-value pairs that contain + data pertaining to the event. Formatted as + `attr1=[val1], attr2=[val2]`. See <> for the attributes that can be included + for each type of event. + +[float] +[[audit-log-settings]] +=== Logfile Output Settings + +The events and some other information about what gets logged can be +controlled using settings in the `elasticsearch.yml` file. See +{ref}/auditing-settings.html#event-audit-settings[Audited Event Settings] and +{ref}/auditing-settings.html#node-audit-settings[Local Node Info Settings]. + +IMPORTANT: No filtering is performed when auditing, so sensitive data may be +audited in plain text when including the request body in audit events. + +[[logging-file]] +You can also configure how the logfile is written in the `log4j2.properties` +file located in `CONFIG_DIR/x-pack`. By default, audit information is appended to the +`_access.log` file located in the standard Elasticsearch `logs` directory +(typically located at `$ES_HOME/logs`). The file rolls over on a daily basis. + +[float] +[[audit-log-ignore-policy]] +=== Logfile Audit Events Ignore Policies + +The comprehensive audit trail is necessary to ensure accountability. It offers tremendous +value during incident response and can even be required for demonstrating compliance. + +The drawback of an audited system is represented by the inevitable performance penalty incurred. +In all truth, the audit trail spends _I/O ops_ that are not available anymore for the user's queries. +Sometimes the verbosity of the audit trail may become a problem that the event type restrictions, +<>, will not alleviate. + +*Audit events ignore policies* are a finer way to tune the verbosity of the audit trail. +These policies define rules that match audit events which will be _ignored_ (read as: not printed). +Rules match on the values of attributes of audit events and complement the <> method. +Imagine the corpus of audit events and the policies chopping off unwanted events. + +IMPORTANT: When utilizing audit events ignore policies you are acknowledging potential +accountability gaps that could render illegitimate actions undetectable. +Please take time to review these policies whenever your system architecture changes. + +A policy is a named set of filter rules. Each filter rule applies to a single event attribute, +one of the `users`, `realms`, `roles` or `indices` attributes. The filter rule defines +a list of {ref}/query-dsl-regexp-query.html#regexp-syntax[Lucene regexp], *any* of which has to match the value of the audit +event attribute for the rule to match. +A policy matches an event if *all* the rules comprising it match the event. +An audit event is ignored, therefore not printed, if it matches *any* policy. All other +non-matching events are printed as usual. + +All policies are defined under the `xpack.security.audit.logfile.events.ignore_filters` +settings namespace. For example, the following policy named _example1_ matches +events from the _kibana_ or _admin_user_ principals **and** operating over indices of the +wildcard form _app-logs*_: + +[source,yaml] +---------------------------- +xpack.security.audit.logfile.events.ignore_filters: + example1: + users: ["kibana", "admin_user"] + indices: ["app-logs*"] +---------------------------- + +An audit event generated by the _kibana_ user and operating over multiple indices +, some of which do not match the indices wildcard, will not match. +As expected, operations generated by all other users (even operating only on indices that +match the _indices_ filter) will not match this policy either. + +Audit events of different types may have <>. +If an event does not contain an attribute for which some policy defines filters, the +event will not match the policy. +For example, the following policy named _example2_, will never match `authentication_success` or +`authentication_failed` events, irrespective of the user's roles, because these +event schemas do not contain the `role` attribute: + +[source,yaml] +---------------------------- +xpack.security.audit.logfile.events.ignore_filters: + example2: + roles: ["admin", "ops_admin_*"] +---------------------------- + +Likewise, any events of users with multiple roles, some of which do not match the +regexps will not match this policy. + +For completeness, although practical use cases should be sparse, a filter can match +a missing attribute of an event, using the empty string ("") or the empty list ([]). +For example, the following policy will match events that do not have the `indices` +attribute (`anonymous_access_denied`, `authentication_success` and other types) as well +as events over the _next_ index. + +[source,yaml] +---------------------------- +xpack.security.audit.logfile.events.ignore_filters: + example3: + indices: ["next", ""] +---------------------------- + + +[float] +[[audit-index]] +=== Index Audit Output + +In addition to logging to a file, you can store audit logs in Elasticsearch +rolling indices. These indices can be either on the same cluster, or on a +remote cluster. You configure the following settings in +`elasticsearch.yml` to control how audit entries are indexed. To enable +this output, you need to configure the setting `xpack.security.audit.outputs` +in the `elasticsearch.yml` file: + +[source,yaml] +---------------------------- +xpack.security.audit.outputs: [ index, logfile ] +---------------------------- + +For more configuration options, see +{ref}/auditing-settings.html#index-audit-settings[Audit Log Indexing Configuration Settings]. + +IMPORTANT: No filtering is performed when auditing, so sensitive data may be +audited in plain text when including the request body in audit events. + +[float] +==== Audit Index Settings + +You can also configure settings for the indices that the events are stored in. +These settings are configured in the `xpack.security.audit.index.settings` namespace +in `elasticsearch.yml`. For example, the following configuration sets the +number of shards and replicas to 1 for the audit indices: + +[source,yaml] +---------------------------- +xpack.security.audit.index.settings: + index: + number_of_shards: 1 + number_of_replicas: 1 +---------------------------- + +[float] +==== Forwarding Audit Logs to a Remote Cluster + +To index audit events to a remote Elasticsearch cluster, you configure +the following `xpack.security.audit.index.client` settings: + +* `xpack.security.audit.index.client.hosts` +* `xpack.security.audit.index.client.cluster.name` +* `xpack.security.audit.index.client.xpack.security.user` + +For more information about these settings, see +{ref}/auditing-settings.html#remote-audit-settings[Remote Audit Log Indexing Configuration Settings]. + +You can pass additional settings to the remote client by specifying them in the +`xpack.security.audit.index.client` namespace. For example, to allow the remote +client to discover all of the nodes in the remote cluster you can specify the +`client.transport.sniff` setting: + +[source,yaml] +---------------------------- +xpack.security.audit.index.client.transport.sniff: true +---------------------------- diff --git a/x-pack/docs/en/security/authentication.asciidoc b/x-pack/docs/en/security/authentication.asciidoc new file mode 100644 index 0000000000000..8838a0fefbf25 --- /dev/null +++ b/x-pack/docs/en/security/authentication.asciidoc @@ -0,0 +1,350 @@ +[[setting-up-authentication]] +== Setting Up User Authentication + +Authentication identifies an individual. To gain access to restricted resources, +a user must prove their identity, via passwords, credentials, or some other +means (typically referred to as authentication tokens). + +You can use the native support for managing and authenticating users, or +integrate with external user management systems such as LDAP and Active +Directory. For information about managing native users, +see <>. + +[float] +[[built-in-users]] +=== Built-in Users + +{security} provides built-in user credentials to help you get up and running. +These users have a fixed set of privileges and cannot be authenticated until their +passwords have been set. The `elastic` user can be used to +<>. + +`elastic`:: A built-in _superuser_. See <>. +`kibana`:: The user Kibana uses to connect and communicate with Elasticsearch. +`logstash_system`:: The user Logstash uses when storing monitoring information in Elasticsearch. +`beats_system`:: The user the Beats use when storing monitoring information in Elasticsearch. + + +[float] +[[built-in-user-explanation]] +==== How the Built-in Users Work +These built-in users are stored within a special `.security` index managed by +{security}. +This means that, if the password is changed, or a user is disabled, then that +change is automatically reflected on each node in the cluster. It also means +that if your `.security` index is deleted, or restored from a snapshot, then +any changes you have applied will be lost. + +Although they share the same API, the built-in users are separate and distinct +from users managed by the <>. Disabling the native +realm will not have any effect on the built-in users. The built-in users can +be disabled individually, using the +{ref}/security-api-users.html[user management API]. + +[float] +[[bootstrap-elastic-passwords]] +==== The Elastic Bootstrap Password + +When you install {es}, if the `elastic` user does not already have a password, +it uses a default bootstrap password. The bootstrap password is a transient +password that enables you to run the tools that set all the built-in user passwords. + +By default, the bootstrap password is derived from a randomized `keystore.seed` +setting, which is added to the keystore during installation. You do not need +to know or change this bootstrap password. If you have defined a +`bootstrap.password` setting in the keystore, however, that value is used instead. +For more information about interacting with the keystore, see +{ref}/secure-settings.html[Secure Settings]. + +NOTE: After you <>, +in particular for the `elastic` user, there is no further use for the bootstrap +password. + +[float] +[[set-built-in-user-passwords]] +==== Setting Built-in User Passwords + +You must set the passwords for all built-in users. + +The +elasticsearch-setup-passwords+ tool is the simplest method to set the +built-in users' passwords for the first time. It uses the `elastic` user's +bootstrap password to run user management API requests. For example, you can run +the command in an "interactive" mode, which prompts you to enter new passwords +for the `elastic`, `kibana`, `logstash_system`, and `beats_system` users: + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-setup-passwords interactive +-------------------------------------------------- + +For more information about the command options, see +{ref}/setup-passwords.html[elasticsearch-setup-passwords]. + +IMPORTANT: After you set a password for the `elastic` user, the bootstrap +password is no longer valid; you cannot run the `elasticsearch-setup-passwords` +command a second time. + +Alternatively, you can set the initial passwords for the built-in users by using +the *Management > Users* page in {kib} or the +{ref}/security-api-change-password.html[Change Password API]. These methods are +more complex. You must supply the `elastic` user and its bootstrap password to +log into {kib} or run the API. This requirement means that you cannot use the +default bootstrap password that is derived from the `keystore.seed` setting. +Instead, you must explicitly set a `bootstrap.password` setting in the keystore +before you start {es}. For example, the following command prompts you to enter a +new bootstrap password: + +[source,shell] +---------------------------------------------------- +bin/elasticsearch-keystore add "bootstrap.password" +---------------------------------------------------- + +You can then start {es} and {kib} and use the `elastic` user and bootstrap +password to log into {kib} and change the passwords. Alternatively, you can +submit Change Password API requests for each built-in user. These methods are +better suited for changing your passwords after the initial setup is complete, +since at that point the bootstrap password is no longer required. + +[float] +[[add-built-in-user-passwords]] +==== Adding Built-in User Passwords To {kib}, Logstash, and Beats + +After the `kibana` user password is set, you need to update the {kib} server +with the new password by setting `elasticsearch.password` in the `kibana.yml` +configuration file: + +[source,yaml] +----------------------------------------------- +elasticsearch.password: kibanapassword +----------------------------------------------- + +The `logstash_system` user is used internally within Logstash when +monitoring is enabled for Logstash. + +To enable this feature in Logstash, you need to update the Logstash +configuration with the new password by setting `xpack.monitoring.elasticsearch.password` in +the `logstash.yml` configuration file: + +[source,yaml] +---------------------------------------------------------- +xpack.monitoring.elasticsearch.password: logstashpassword +---------------------------------------------------------- + +If you have upgraded from an older version of elasticsearch/x-pack, +the `logstash_system` user may have defaulted to _disabled_ for security reasons. +Once the password has been changed, you can enable the user via the following API call: + +[source,js] +--------------------------------------------------------------------- +PUT _xpack/security/user/logstash_system/_enable +--------------------------------------------------------------------- +// CONSOLE + +The `beats_system` user is used internally within Beats when monitoring is +enabled for Beats. + +To enable this feature in Beats, you need to update the configuration for each +of your beats to reference the correct username and password. For example: + +[source,yaml] +---------------------------------------------------------- +xpack.monitoring.elasticsearch.username: beats_system +xpack.monitoring.elasticsearch.password: beatspassword +---------------------------------------------------------- + +If you have upgraded from an older version of {es}, then you may not have set a +password for the `beats_system` user. If this is the case, then you should use +the *Management > Users* page in {kib} or the +{ref}/security-api-change-password.html[Change Password API] to set a password +for this user. + +[float] +[[disabling-default-password]] +==== Disabling Default Password Functionality +[IMPORTANT] +============================================================================= +This setting is deprecated. The elastic user no longer has a default password. +The password must be set before the user can be used. +See <>. +============================================================================= + +[float] +[[internal-users]] +=== Internal Users + +{security} has three _internal_ users (`_system`, `_xpack`, and `_xpack_security`) +that are responsible for the operations that take place inside an {es} cluster. + +These users are only used by requests that originate from within the cluster. +For this reason, they cannot be used to authenticate against the API and there +is no password to manage or reset. + +From time-to-time you may find a reference to one of these users inside your +logs, including <>. + +[[how-authc-works]] +=== How Authentication Works + +Authentication in {security} is handled by one or more authentication services +called _realms_. A _realm_ is used to resolve and authenticate users based on +authentication tokens. {security} provides the following built-in realms: + +_native_:: +An internal realm where users are stored in a dedicated Elasticsearch index. +This realm supports an authentication token in the form of username and password, +and is available by default when no realms are explicitly configured. See +<>. + +_ldap_:: +A realm that uses an external LDAP server to authenticate the +users. This realm supports an authentication token in the form of username and +password, and requires explicit configuration in order to be used. See +<>. + +_active_directory_:: +A realm that uses an external Active Directory Server to authenticate the +users. With this realm, users are authenticated by usernames and passwords. +See <>. + +_pki_:: +A realm that authenticates users using Public Key Infrastructure (PKI). This +realm works in conjunction with SSL/TLS and identifies the users through the +Distinguished Name (DN) of the client's X.509 certificates. See <>. + +_file_:: +An internal realm where users are defined in files stored on each node in the +Elasticsearch cluster. This realm supports an authentication token in the form +of username and password, and is always available. See <>. + +_saml_:: +A realm that facilitates authentication using the SAML 2.0 Web SSO protocol. +This realm is designed to support authentication through {kib}, and is non +intended for use in the REST API. See <>. + + + +{security} also supports custom realms. If you need to integrate with another +authentication system, you can build a custom realm plugin. For more information, +see <>. + +Realms live within a _realm chain_. It is essentially a prioritized list of +configured realms (typically of various types). The order of the list determines +the order in which the realms will be consulted. You should make sure each +configured realm has a distinct `order` setting. In the event that two or more +realms have the same `order`, they will be processed in `name` order. +During the authentication process, {security} will consult and try to +authenticate the request one realm at a time. +Once one of the realms successfully authenticates the request, the authentication +is considered to be successful and the authenticated user will be associated +with the request (which will then proceed to the authorization phase). If a realm +cannot authenticate the request, the next in line realm in the chain will be +consulted. If all realms in the chain could not authenticate the request, the +authentication is then considered to be unsuccessful and an authentication error +will be returned (as HTTP status code `401`). + +NOTE: Some systems (e.g. Active Directory) have a temporary lock-out period after + several successive failed login attempts. If the same username exists in + multiple realms, unintentional account lockouts are possible. For more + information, please see <>. + +The default realm chain contains the `native` and `file` realms. To explicitly, +configure a realm chain, you specify the chain in `elasticsearch.yml`. When you +configure a realm chain, only the realms you specify are used for authentication. +To use the `native` and `file` realms, you must include them in the chain. + +The following snippet configures a realm chain that includes the `file` and +`native` realms, as well as two LDAP realms and an Active Directory realm. + +[source,yaml] +---------------------------------------- +xpack.security.authc: + realms: + + file: + type: file + order: 0 + + native: + type: native + order: 1 + + ldap1: + type: ldap + order: 2 + enabled: false + url: 'url_to_ldap1' + ... + + ldap2: + type: ldap + order: 3 + url: 'url_to_ldap2' + ... + + ad1: + type: active_directory + order: 4 + url: 'url_to_ad' +---------------------------------------- + +As can be seen above, each realm has a unique name that identifies it and each +realm type dictates its own set of required and optional settings. That said, +there are three settings that are common to all realms: + +[cols=",^,",options="header"] +|========= +| Setting | Required | Description + +| `type` | true | Identifies the type of the realm. The realm type + determines what other settings the realms should be + configured with. The type can be one of: `native`, + `ldap`, `active_directory`, `pki`, `file`, or in case + of a custom realm, the type name that identifies it. + +| `order` | false | A numeric value representing the priority/index of + the realm within the realm chain. This will determine + the order by which the realms will be consulted + during authentication, with lower order being consulted + first. + +| `enabled` | false | When set to `false` the realm will be disabled and + will not be added to the realm chain. This is useful + for debugging purposes as it enables you to remove + a realm from the chain without deleting and losing + its configuration. +|========= + +Realm types can roughly be classified in two categories: + +Internal:: Realms that are internal to Elasticsearch and don't require any + communication with external parties. They are fully managed by + {security}. There can only be a maximum of one configured realm + per internal realm type. {security} provides two internal realm + types: `native` and `file`. + +External:: Realms that require interaction with parties/components external to + Elasticsearch, typically, with enterprise grade identity management + systems. Unlike internal realms, there can be as many external realms + as one would like - each with its own unique name and configuration. + {security} provides three external realm types: `ldap`, + `active_directory` and `pki`. + +include::authentication/anonymous-access.asciidoc[] + +include::authentication/native-realm.asciidoc[] + +include::authentication/ldap-realm.asciidoc[] + +include::authentication/active-directory-realm.asciidoc[] + +include::authentication/pki-realm.asciidoc[] + +include::authentication/file-realm.asciidoc[] + +include::authentication/saml-realm.asciidoc[] + +include::authentication/custom-realm.asciidoc[] + +include::authentication/user-cache.asciidoc[] + +include::authentication/saml-guide.asciidoc[] diff --git a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc new file mode 100644 index 0000000000000..2aaca6def915a --- /dev/null +++ b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc @@ -0,0 +1,492 @@ +[[active-directory-realm]] +=== Active Directory User Authentication + +You can configure {security} to communicate with Active Directory to authenticate +users. To integrate with Active Directory, you configure an `active_directory` +realm and map Active Directory users and groups to {security} roles in the +<>. + +To protect passwords, communications between Elasticsearch and the Active Directory +server should be encrypted using SSL/TLS. Clients and nodes that connect via +SSL/TLS to the Active Directory server need to have the Active Directory server's +certificate or the server's root CA certificate installed in their keystore or +truststore. For more information about installing certificates, see +<>. + +==== Configuring an Active Directory Realm + +{security} uses LDAP to communicate with Active Directory, so `active_directory` +realms are similar to <>. Like LDAP directories, +Active Directory stores users and groups hierarchically. The directory's +hierarchy is built from containers such as the _organizational unit_ (`ou`), +_organization_ (`o`), and _domain controller_ (`dc`). + +The path to an entry is a _Distinguished Name_ (DN) that uniquely identifies a +user or group. User and group names typically have attributes such as a +_common name_ (`cn`) or _unique ID_ (`uid`). A DN is specified as a string, for +example `"cn=admin,dc=example,dc=com"` (white spaces are ignored). + +{security} only supports Active Directory security groups. You cannot map +distribution groups to roles. + +NOTE: When you use Active Directory for authentication, the username entered by + the user is expected to match the `sAMAccountName` or `userPrincipalName`, + not the common name. + +The Active Directory realm authenticates users using an LDAP bind request. After +authenticating the user, the realm then searches to find the user's entry in +Active Directory. Once the user has been found, the Active Directory realm then +retrieves the user's group memberships from the `tokenGroups` attribute on the +user's entry in Active Directory. + +To configure an `active_directory` realm: + +. Add a realm configuration of type `active_directory` to `elasticsearch.yml` +under the `xpack.security.authc.realms` namespace. At a minimum, you must set the realm +`type` to `active_directory` and specify the Active Directory `domain_name`. To +use SSL/TLS for secured communication with the Active Directory server, you must +also set the `url` attribute and specify the `ldaps` protocol and secure port +number. If you are configuring multiple realms, you should also explicitly set +the `order` attribute to control the order in which the realms are consulted +during authentication. See <> +for all of the options you can set for an `active_directory` realm. ++ +NOTE: Binding to Active Directory fails if the domain name is not mapped in DNS. + If DNS is not being provided by a Windows DNS server, add a mapping for + the domain in the local `/etc/hosts` file. ++ +For example, the following realm configuration configures {security} to connect +to `ldaps://example.com:636` to authenticate users through Active Directory. ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + active_directory: + type: active_directory + order: 0 <1> + domain_name: ad.example.com + url: ldaps://ad.example.com:636 <2> +------------------------------------------------------------ +<1> The realm order controls the order in which the configured realms are checked + when authenticating a user. +<2> If you don't specify the URL, it defaults to `ldap::389`. ++ +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. If you also want to use the +`native` or `file` realms, you must include them in the realm chain. + +. Restart Elasticsearch. + +===== Configuring a Bind User +By default, all of the LDAP operations are run by the user that {security} is +authenticating. In some cases, regular users may not be able to access all of the +necessary items within Active Directory and a _bind user_ is needed. A bind user +can be configured and will be used to perform all operations other than the LDAP +bind request, which is required to authenticate the credentials provided by the user. + +The use of a bind user enables the <> to be +used with the Active Directory realm and the ability to maintain a set of pooled +connections to Active Directory. These pooled connection reduce the number of +resources that must be created and destroyed with every user authentication. + +The following example shows the configuration of a bind user through the user of the +`bind_dn` and `secure_bind_password` settings. + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + active_directory: + type: active_directory + order: 0 + domain_name: ad.example.com + url: ldaps://ad.example.com:636 + bind_dn: es_svc_user@ad.example.com <1> +------------------------------------------------------------ +<1> This is the user that all Active Directory search requests are executed as. + Without a bind user configured, all requests run as the user that is authenticating + with Elasticsearch. + +The password for the `bind_dn` user should be configured by adding the appropriate +`secure_bind_password` setting to the {es} keystore. +For example, the following command adds the password for the example realm above: + +[source, shell] +------------------------------------------------------------ +bin/elasticsearch-keystore add xpack.security.authc.realms.active_directory.secure_bind_password +------------------------------------------------------------ + +When a bind user is configured, connection pooling is enabled by default. +Connection pooling can be disabled using the `user_search.pool.enabled` setting. + +===== Multiple Domain Support +When authenticating users across multiple domains in a forest, there are a few minor +differences in the configuration and the way that users will authenticate. The `domain_name` +setting should be set to the forest root domain name. The `url` setting also needs to +be set as you will need to authenticate against the Global Catalog, which uses a different +port and may not be running on every Domain Controller. + +For example, the following realm configuration configures {security} to connect to specific +Domain Controllers on the Global Catalog port with the domain name set to the forest root. + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + active_directory: + type: active_directory + order: 0 + domain_name: example.com <1> + url: ldaps://dc1.ad.example.com:3269, ldaps://dc2.ad.example.com:3269 <2> + load_balance: + type: "round_robin" <3> +------------------------------------------------------------ +<1> The `domain_name` is set to the name of the root domain in the forest. +<2> The `url` value used in this example has URLs for two different Domain Controllers, +which are also Global Catalog servers. Port 3268 is the default port for unencrypted +communication with the Global Catalog; port 3269 is the default port for SSL connections. +The servers that are being connected to can be in any domain of the forest as long as +they are also Global Catalog servers. +<3> A load balancing setting is provided to indicate the desired behavior when choosing +the server to connect to. + +In this configuration, users will need to use either their full User Principal +Name (UPN) or their Down-Level Logon Name. A UPN is typically a concatenation of +the username with `@:`. + {security} attempts to authenticate against this URL. If the + URL is not specified, it is derived from the `domain_name`, + assuming an unencrypted connection to port 389. For example, + `ldap://:389`. This settings is required when + connecting using SSL/TLS or via a custom port. +| `bind_dn` | no | The DN of the user that is used to bind to Active Directory + and perform searches. Due to its potential security + impact, `bind_dn` is not exposed via the + {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API]. +| `bind_password` | no | The password for the user that is used to bind to + Active Directory. Due to its potential security impact, + `bind_password` is not exposed via the + {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API]. + *Deprecated.* Use `secure_bind_password` instead. +| `secure_bind_password` | no | ({ref}/secure-settings.html[Secure]) + The password for the user that is used to bind to Active Directory. +| `load_balance.type` | no | The behavior to use when there are multiple LDAP URLs defined. + For supported values see <>. +| `load_balance.cache_ttl` | no | When using `dns_failover` or `dns_round_robin` as the load + balancing type, this setting controls the amount of time to + cache DNS lookups. Defaults to `1h`. +| `user_search.base_dn` | no | Specifies the context to search for the user. Defaults to the + root of the Active Directory domain. +| `user_search.scope` | no | Specifies whether the user search should be `sub_tree` (default), + `one_level`, or `base`. `sub_tree` searches all objects contained + under `base_dn`. `one_level` only searches users directly + contained within the `base_dn`. `base` specifies that the + `base_dn` is a user object and that it is the only user considered. +| `user_search.filter` | no | Specifies a filter to use to lookup a user given a username. + The default filter looks up `user` objects with either + `sAMAccountName` or `userPrincipalName`. If specified, this + must be a valid LDAP user search filter, for example + `(&(objectClass=user)(sAMAccountName={0}))`. For more + information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax]. +| `user_search.upn_filter` | no | Specifies a filter to use to lookup a user given a user principal name. + The default filter looks up `user` objects with + a matching `userPrincipalName`. If specified, this + must be a valid LDAP user search filter, for example + `(&(objectClass=user)(userPrincipalName={1}))`. `{1}` is + the full user principal name provided by the user. For more + information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax]. +| `user_search.down_level_filter` | no | Specifies a filter to use to lookup a user given a down level logon name (DOMAIN\user). + The default filter looks up `user` objects with a matching + `sAMAccountName` in the domain provided. If specified, this + must be a valid LDAP user search filter, for example + `(&(objectClass=user)(sAMAccountName={0}))`. For more + information, see https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx[Search Filter Syntax]. +| `user_search.pool.enabled` | no | Enables or disables connection pooling for user search. When + disabled a new connection is created for every search. The + default is `true` when `bind_dn` is provided. +| `user_search.pool.size` | no | Specifies the maximum number of connections to Active Directory + server to allow in the connection pool. Defaults to `20`. +| `user_search.pool.initial_size` | no | The initial number of connections to create to Active Directory + server on startup. Defaults to `0`. Values greater than `0` + could cause startup failures if the LDAP server is down. +| `user_search.pool.health_check.enabled` | no | Enables or disables a health check on Active Directory connections in + the connection pool. Connections are checked in the + background at the specified interval. Defaults to `true`. +| `user_search.pool.health_check.dn` | no | Specifies the distinguished name to retrieve as part of + the health check. Defaults to the value of `bind_dn` if present, and if + not falls back to `user_search.base_dn`. +| `user_search.pool.health_check.interval` | no | How often to perform background checks of connections in + the pool. Defaults to `60s`. +| `group_search.base_dn` | no | Specifies the context to search for groups in which the user + has membership. Defaults to the root of the Active Directory + domain. +| `group_search.scope` | no | Specifies whether the group search should be `sub_tree` (default), + `one_level` or `base`. `sub_tree` searches all objects contained + under `base_dn`. `one_level` searches for groups directly + contained within the `base_dn`. `base` specifies that the + `base_dn` is a group object and that it is the only group considered. +| `unmapped_groups_as_roles` | no | Specifies whether the names of any unmapped Active Directory + groups should be used as role names and assigned to the user. + A group is considered to be _unmapped_ if it is not referenced + in any <> (API based + role-mappings are not considered). + Defaults to `false`. +| `files.role_mapping` | no | Specifies the path and file name of the + <>. + Defaults to `ES_PATH_CONF/x-pack/role_mapping.yml`, + where `ES_PATH_CONF` is `ES_HOME/config` (zip/tar installations) + or `/etc/elasticsearch` (package installations). +| `follow_referrals` | no | Specifies whether {security} should follow referrals returned + by the Active Directory server. Referrals are URLs returned by + the server that are to be used to continue the LDAP operation + (such as `search`). Defaults to `true`. +| `metadata` | no | Specifies the list of additional LDAP attributes that should + be stored in the `metadata` of an authenticated user. +| `ssl.key` | no | Specifies the path to the PEM encoded private key to use if the Active Directory + server requires client authentication. `ssl.key` and `ssl.keystore.path` may not be used at the + same time. +| `ssl.key_passphrase` | no | Specifies the passphrase to decrypt the PEM encoded private key if it is encrypted. +| `ssl.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate chain) that goes with the key + if the Active Directory server requires client authentication. +| `ssl.certificate_authorities`| no | Specifies the paths to the PEM encoded certificate authority certificates that + should be trusted. `ssl.certificate_authorities` and `ssl.truststore.path` may not be used at + the same time. +| `ssl.keystore.path` | no | The path to the Java Keystore file that contains a private key and certificate. `ssl.key` and + `ssl.keystore.path` may not be used at the same time. +| `ssl.keystore.password` | no | The password to the keystore. +| `ssl.keystore.key_password`| no | The password for the key in the keystore. Defaults to the keystore password. +| `ssl.truststore.path` | no | The path to the Java Keystore file that contains the certificates to trust. + `ssl.certificate_authorities` and `ssl.truststore.path` may not be used at the same time. +| `ssl.truststore.password` | no | The password to the truststore. +| `ssl.verification_mode` | no | Specifies the type of verification to be performed when + connecting to an Active Directory server using `ldaps`. When + set to `full`, the hostname or IP address used in the `url` + must match one of the names in the certificate or the + connection will not be allowed. Due to their potential security impact, + `ssl` settings are not exposed via the + {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API]. ++ + Values are `none`, `certificate`, and `full`. Defaults to `full`. ++ + See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`] + for an explanation of these values. +| `ssl.supported_protocols` | no | Specifies the supported protocols for TLS/SSL. +| `ssl.cipher_suites` | no | Specifies the cipher suites that should be supported when communicating + with the Active Directory server. +| `cache.ttl` | no | Specifies the time-to-live for cached user entries. A user's + credentials are cached for this period of time. Specify the + time period using the standard Elasticsearch + {ref}/common-options.html#time-units[time units]. + Defaults to `20m`. +| `cache.max_users` | no | Specifies the maximum number of user entries that can be + stored in the cache at one time. Defaults to 100,000. +| `cache.hash_algo` | no | Specifies the hashing algorithm that is used for the + cached user credentials. + See <> for the + possible values. (Expert Setting). +|======================= + +[[mapping-roles-ad]] +==== Mapping Active Directory Users and Groups to Roles + +An integral part of a realm authentication process is to resolve the roles +associated with the authenticated user. Roles define the privileges a user has +in the cluster. + +Since with the `active_directory` realm the users are managed externally in the +Active Directory server, the expectation is that their roles are managed there +as well. In fact, Active Directory supports the notion of groups, which often +represent user roles for different systems in the organization. + +The `active_directory` realm enables you to map Active Directory users to roles +via their Active Directory groups, or other metadata. This role mapping can be +configured via the {ref}/security-api-role-mapping.html[role-mapping API], or by using +a file stored on each node. When a user authenticates against an Active +Directory realm, the privileges for that user are the union of all privileges +defined by the roles to which the user is mapped. + +Within a mapping definition, you specify groups using their distinguished +names. For example, the following mapping configuration maps the Active +Directory `admins` group to both the `monitoring` and `user` roles, maps the +`users` group to the `user` role and maps the `John Doe` user to the `user` +role. + +Configured via the role-mapping API: +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/admins +{ + "roles" : [ "monitoring" , "user" ], + "rules" : { "field" : { + "groups" : "cn=admins,dc=example,dc=com" <1> + } }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE +<1> The Active Directory distinguished name (DN) of the `admins` group. + +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/basic_users +{ + "roles" : [ "user" ], + "rules" : { "any": [ + { "field" : { + "groups" : "cn=users,dc=example,dc=com" <1> + } }, + { "field" : { + "dn" : "cn=John Doe,cn=contractors,dc=example,dc=com" <2> + } } + ] }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE +<1> The Active Directory distinguished name (DN) of the `users` group. +<2> The Active Directory distinguished name (DN) of the user `John Doe`. + +Or, alternatively, configured via the role-mapping file: +[source, yaml] +------------------------------------------------------------ +monitoring: <1> + - "cn=admins,dc=example,dc=com" <2> +user: + - "cn=users,dc=example,dc=com" <3> + - "cn=admins,dc=example,dc=com" + - "cn=John Doe,cn=contractors,dc=example,dc=com" <4> +------------------------------------------------------------ +<1> The name of the role. +<2> The Active Directory distinguished name (DN) of the `admins` group. +<3> The Active Directory distinguished name (DN) of the `users` group. +<4> The Active Directory distinguished name (DN) of the user `John Doe`. + +For more information, see <>. + +[[ad-user-metadata]] +==== User Metadata in Active Directory Realms +When a user is authenticated via an Active Directory realm, the following +properties are populated in the user's _metadata_. This metadata is returned in the +{ref}/security-api-authenticate.html[authenticate API], and can be used with +<> in roles. + +|======================= +| Field | Description +| `ldap_dn` | The distinguished name of the user. +| `ldap_groups` | The distinguished name of each of the groups that were + resolved for the user (regardless of whether those + groups were mapped to a role). +|======================= + +Additional metadata can be extracted from the Active Directory server by configuring +the `metadata` setting on the Active Directory realm. + +[[active-directory-ssl]] +==== Setting up SSL Between Elasticsearch and Active Directory + +To protect the user credentials that are sent for authentication, it's highly +recommended to encrypt communications between Elasticsearch and your Active +Directory server. Connecting via SSL/TLS ensures that the identity of the Active +Directory server is authenticated before {security} transmits the user +credentials, and the usernames and passwords are encrypted in transit. + +To encrypt communications between Elasticsearch and Active Directory: + +. Configure each node to trust certificates signed by the CA that signed your +Active Directory server certificates. The following example demonstrates how to trust a CA certificate, +`cacert.pem`, located within the {xpack} configuration directory: ++ +[source,shell] +-------------------------------------------------- +xpack: + security: + authc: + realms: + active_directory: + type: active_directory + order: 0 + domain_name: ad.example.com + url: ldaps://ad.example.com:636 + ssl: + certificate_authorities: [ "CONFIG_DIR/x-pack/cacert.pem" ] +-------------------------------------------------- ++ +The CA cert must be a PEM encoded certificate. + +. Set the `url` attribute in the realm configuration to specify the LDAPS protocol +and the secure port number. For example, `url: ldaps://ad.example.com:636`. + +. Restart Elasticsearch. + +NOTE: By default, when you configure {security} to connect to Active Directory + using SSL/TLS, {security} attempts to verify the hostname or IP address + specified with the `url` attribute in the realm configuration with the + values in the certificate. If the values in the certificate and realm + configuration do not match, {security} does not allow a connection to the + Active Directory server. This is done to protect against man-in-the-middle + attacks. If necessary, you can disable this behavior by setting the + {ref}/security-settings.html#ssl-tls-settings[`ssl.verification_mode`] property to `certificate`. diff --git a/x-pack/docs/en/security/authentication/anonymous-access.asciidoc b/x-pack/docs/en/security/authentication/anonymous-access.asciidoc new file mode 100644 index 0000000000000..c95328e99a3eb --- /dev/null +++ b/x-pack/docs/en/security/authentication/anonymous-access.asciidoc @@ -0,0 +1,30 @@ +[[anonymous-access]] +=== Enabling Anonymous Access + +Incoming requests are considered to be _anonymous_ if no authentication token +can be extracted from the incoming request. By default, anonymous requests are rejected and an authentication error is returned (status code `401`). + +To enable anonymous access, you assign one or more roles to anonymous +users in the `elasticsearch.yml` configuration file. For example, the following +configuration assigns anonymous users `role1` and `role2`: + +[source,yaml] +---------------------------------------- +xpack.security.authc: + anonymous: + username: anonymous_user <1> + roles: role1, role2 <2> + authz_exception: true <3> +---------------------------------------- +<1> The username/principal of the anonymous user. Defaults to +`_es_anonymous_user` if not specified. +<2> The roles to associate with the anonymous user. If no roles are specified, anonymous access is disabled--anonymous requests will be rejected and return an authentication error. +<3> When `true`, a 403 HTTP status code is returned if the anonymous user +does not have the permissions needed to perform the requested action and the +user will NOT be prompted to provide credentials to access the requested +resource. When `false`, a 401 HTTP status code is returned if the anonymous user +does not have the necessary permissions and the user is prompted for +credentials to access the requested resource. If you are using anonymous access +in combination with HTTP, you might need to set `authz_exception` to `false` +if your client does not support preemptive basic authentication. Defaults to +`true`. \ No newline at end of file diff --git a/x-pack/docs/en/security/authentication/custom-realm.asciidoc b/x-pack/docs/en/security/authentication/custom-realm.asciidoc new file mode 100644 index 0000000000000..a7df6f5ff865b --- /dev/null +++ b/x-pack/docs/en/security/authentication/custom-realm.asciidoc @@ -0,0 +1,99 @@ +[[custom-realms]] +=== Integrating with Other Authentication Systems + +If you are using an authentication system that is not supported out-of-the-box +by {security}, you can create a custom realm to interact with it to authenticate +users. You implement a custom realm as an SPI loaded security extension +as part of an ordinary elasticsearch plugin. + +[[implementing-custom-realm]] +==== Implementing a Custom Realm + +Sample code that illustrates the structure and implementation of a custom realm +is provided in the https://github.com/elastic/shield-custom-realm-example[custom-realm-example] +repository on GitHub. You can use this code as a starting point for creating your +own realm. + +To create a custom realm, you need to: + +. Extend `org.elasticsearch.xpack.security.authc.Realm` to communicate with your + authentication system to authenticate users. +. Implement the `org.elasticsearch.xpack.security.authc.Realm.Factory` interface in + a class that will be used to create the custom realm. +. Extend `org.elasticsearch.xpack.security.authc.DefaultAuthenticationFailureHandler` to + handle authentication failures when using your custom realm. + +To package your custom realm as a plugin: + +. Implement an extension class for your realm that extends + `org.elasticsearch.xpack.core.security.SecurityExtension`. There you need to + override one or more of the following methods: ++ +[source,java] +---------------------------------------------------- +@Override +public Map getRealms() { + ... +} +---------------------------------------------------- ++ +The `getRealms` method is used to provide a map of type names to the `Factory` that +will be used to create the realm. ++ +[source,java] +---------------------------------------------------- +@Override +public AuthenticationFailureHandler getAuthenticationFailureHandler() { + ... +} +---------------------------------------------------- ++ +The `getAuthenticationFailureHandler` method is used to optionally provide a +custom `AuthenticationFailureHandler`, which will control how X-Pack responds +in certain authentication failure events. ++ +[source,java] +---------------------------------------------------- +@Override +public List getSettingsFilter() { + ... +} +---------------------------------------------------- ++ +The `Plugin#getSettingsFilter` method returns a list of setting names that should be +filtered from the settings APIs as they may contain sensitive credentials. Note this method is not +part of the `SecurityExtension` interface, it's available as part of the elasticsearch plugin main class. + +. Create a build configuration file for the plugin; Gradle is our recommendation. +. Create a `META-INF/services/org.elasticsearch.xpack.core.security.SecurityExtension` descriptor file for the + extension that contains the fully qualified class name of your `org.elasticsearch.xpack.core.security.SecurityExtension` implementation +. Bundle all in a single zip file. + +[[using-custom-realm]] +==== Using a Custom Realm to Authenticate Users + +To use a custom realm: + +. Install the realm extension on each node in the cluster. You run + `bin/elasticsearch-plugin` with the `install` sub-command and specify the URL + pointing to the zip file that contains the extension. For example: ++ +[source,shell] +---------------------------------------- +bin/elasticsearch-plugin install file:////my-realm-1.0.zip +---------------------------------------- + +. Add a realm configuration of the appropriate realm type to `elasticsearch.yml` +under the `xpack.security.authc.realms` namespace. The options you can set depend +on the settings exposed by the custom realm. At a minimum, you must set the realm +`type` to the type defined by the extension. If you are configuring multiple +realms, you should also explicitly set the `order` attribute to control the +order in which the realms are consulted during authentication. You should make +sure each configured realm has a distinct `order` setting. In the event that +two or more realms have the same `order`, they will be processed in realm `name` order. ++ +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. If you also want to use the +`native` or `file` realms, you must include them in the realm chain. + +. Restart Elasticsearch. diff --git a/x-pack/docs/en/security/authentication/file-realm.asciidoc b/x-pack/docs/en/security/authentication/file-realm.asciidoc new file mode 100644 index 0000000000000..507baaf1f1f28 --- /dev/null +++ b/x-pack/docs/en/security/authentication/file-realm.asciidoc @@ -0,0 +1,146 @@ +[[file-realm]] +=== File-based User Authentication + +You can manage and authenticate users with the built-in `file` internal realm. +With the `file` realm users are defined in local files on each node in the cluster. + +IMPORTANT: As the administrator of the cluster, it is your responsibility to + ensure the same users are defined on every node in the cluster. + {security} does not deliver any mechanism to guarantee this. + +The `file` realm is primarily supported to serve as a fallback/recovery realm. It +is mostly useful in situations where all users locked themselves out of the system +(no one remembers their username/password). In this type of scenarios, the `file` +realm is your only way out - you can define a new `admin` user in the `file` realm +and use it to log in and reset the credentials of all other users. + +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. To use the +`file` realm as a fallback, you must include it in the realm chain. + +To define users, {security} provides the {ref}/users-command.html[users] +command-line tool. This tool enables you to add and remove users, assign user +roles and manage user passwords. + +==== Configuring a File Realm + +The `file` realm is added to the realm chain by default. You don't need to +explicitly configure a `file` realm to manage users with the `users` tool. + +Like other realms, you can configure options for a `file` realm in the +`xpack.security.authc.realms` namespace in `elasticsearch.yml`. + +To configure an `file` realm: + +. Add a realm configuration of type `file` to `elasticsearch.yml` under the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to +`file`. If you are configuring multiple realms, you should also explicitly set +the `order` attribute. See <> for all of the options you can set +for a `file` realm. ++ +For example, the following snippet shows a `file` realm configuration that sets +the `order` to zero so the realm is checked first: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + file1: + type: file + order: 0 +------------------------------------------------------------ + +. Restart Elasticsearch. + +[[file-realm-settings]] +===== File Realm Settings + +[cols="4,^3,10"] +|======================= +| Setting | Required | Description +| `type` | yes | Indicates the realm type. Must be set to `file`. +| `order` | no | Indicates the priority of this realm within the + realm chain. Realms with a lower order are + consulted first. Although not required, we + recommend explicitly setting this value when you + configure multiple realms. Defaults to + `Integer.MAX_VALUE`. +| `enabled` | no | Indicates whether this realm is enabled or + disabled. Enables you to disable a realm without + removing its configuration. Defaults to `true`. +| `cache.ttl` | no | Specifies the time-to-live for cached user entries. + A user's credentials are cached for this period of + time. Specify the time period using the standard + Elasticsearch {ref}/common-options.html#time-units[time units]. + Defaults to `20m`. +| `cache.max_users` | no | Specifies the maximum number of user entries that + can be stored in the cache at one time. Defaults + to 100,000. +| `cache.hash_algo` | no | Specifies the hashing algorithm that is used for + the cached user credentials. See <> for the possible values. + (Expert Setting). +|======================= + +==== A Look Under the Hood + +All the data about the users for the `file` realm is stored in two files, `users` +and `users_roles`. Both files are located in `CONFIG_DIR/x-pack/` and are read +on startup. + +By default, {security} checks these files for changes every 5 seconds. You can +change this default behavior by changing the `resource.reload.interval.high` setting in +the `elasticsearch.yml` file (as this is a common setting in Elasticsearch, +changing its value may effect other schedules in the system). + +[IMPORTANT] +============================== +These files are managed locally by the node and are **not** managed +globally by the cluster. This means that with a typical multi-node cluster, +the exact same changes need to be applied on each and every node in the +cluster. + +A safer approach would be to apply the change on one of the nodes and have the +`users` and `users_roles` files distributed/copied to all other nodes in the +cluster (either manually or using a configuration management system such as +Puppet or Chef). +============================== + +While it is possible to modify these files directly using any standard text +editor, we strongly recommend using the {ref}/users-command.html[`bin/elasticsearch-users`] +command-line tool to apply the required changes. + +[float] +[[users-file]] +===== The `users` File +The `users` file stores all the users and their passwords. Each line in the +`users` file represents a single user entry consisting of the username and +**hashed** password. + +[source,bash] +---------------------------------------------------------------------- +rdeniro:$2a$10$BBJ/ILiyJ1eBTYoRKxkqbuDEdYECplvxnqQ47uiowE7yGqvCEgj9W +alpacino:$2a$10$cNwHnElYiMYZ/T3K4PvzGeJ1KbpXZp2PfoQD.gfaVdImnHOwIuBKS +jacknich:$2a$10$GYUNWyABV/Ols/.bcwxuBuuaQzV6WIauW6RdboojxcixBq3LtI3ni +---------------------------------------------------------------------- + +NOTE: {security} uses `bcrypt` to hash the user passwords. + +[float] +[[users_defining-roles]] +==== The `users_roles` File + +The `users_roles` file stores the roles associated with the users, as in the +following example: + +[source,shell] +-------------------------------------------------- +admin:rdeniro +power_user:alpacino,jacknich +user:jacknich +-------------------------------------------------- + +Each row maps a role to a comma-separated list of all the users that are +associated with that role. diff --git a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc new file mode 100644 index 0000000000000..bd32c49622877 --- /dev/null +++ b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc @@ -0,0 +1,497 @@ +[[ldap-realm]] +=== LDAP User Authentication + +You can configure {security} to communicate with a Lightweight Directory Access +Protocol (LDAP) server to authenticate users. To integrate with LDAP, you +configure an `ldap` realm and map LDAP groups to user roles in the +<>. + +To protect passwords, communications between Elasticsearch and the LDAP server +should be encrypted using SSL/TLS. Clients and nodes that connect via SSL/TLS to +the LDAP server need to have the LDAP server's certificate or the server's root +CA certificate installed in their _keystore_ or _truststore_. For more information +about installing certificates, see <>. + +==== Configuring an LDAP Realm + +LDAP stores users and groups hierarchically, similar to the way folders are +grouped in a file system. An LDAP directory's hierarchy is built from containers +such as the _organizational unit_ (`ou`), _organization_ (`o`), and +_domain controller_ (`dc`). + +The path to an entry is a _Distinguished Name_ (DN) that uniquely identifies a +user or group. User and group names typically have attributes such as a +_common name_ (`cn`) or _unique ID_ (`uid`). A DN is specified as a string, +for example `"cn=admin,dc=example,dc=com"` (white spaces are ignored). + +The `ldap` realm supports two modes of operation, a user search mode +and a mode with specific templates for user DNs. See +<> for all of the options you can set for an +`ldap` realm. + +[[ldap-user-search]] +===== User Search Mode +LDAP user search is the most common mode of operation. In this mode, a specific +user with permission to search the LDAP directory is used to search for the +authenticating user DN based on its username and an LDAP attribute. Once found, +the user will be authenticated by attempting to bind to the LDAP server using the +found DN and the provided password. + +To configure an `ldap` Realm with User Search: + +. Add a realm configuration of type `ldap` to `elasticsearch.yml` under the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` +to `ldap`, specify the `url` of the LDAP server, and set `user_search.base_dn` +to the container DN where the users are searched for. If you are configuring +multiple realms, you should also explicitly set the `order` attribute to control +the order in which the realms are consulted during authentication. See +<> for all of the options you can set for an +`ldap` realm. ++ +For example, the following snippet shows an LDAP realm configured with a user search: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + ldap1: + type: ldap + order: 0 + url: "ldaps://ldap.example.com:636" + bind_dn: "cn=ldapuser, ou=users, o=services, dc=example, dc=com" + user_search: + base_dn: "dc=example,dc=com" + attribute: cn + group_search: + base_dn: "dc=example,dc=com" + files: + role_mapping: "CONFIG_DIR/x-pack/role_mapping.yml" + unmapped_groups_as_roles: false +------------------------------------------------------------ ++ +The password for the `bind_dn` user should be configured by adding the appropriate +`secure_bind_password` setting to the {es} keystore. +For example, the following command adds the password for the example realm above: ++ +[source, shell] +------------------------------------------------------------ +bin/elasticsearch-keystore add xpack.security.authc.realms.ldap1.secure_bind_password +------------------------------------------------------------ ++ +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. If you also want to use the +`native` or `file` realms, you must include them in the realm chain. + +. Restart Elasticsearch + + +===== User DN Templates Mode +If your LDAP environment uses a few specific standard naming conditions for +users, you can use User DN templates to configure the realm. The advantage of +this method is that a search does not have to be performed to find the user DN. +However, multiple bind operations might be needed to find the correct user DN. + +To configure an `ldap` Realm with User DN templates: + +. Add a realm configuration of type `ldap` to `elasticsearch.yml` in the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to +`ldap`, specify the `url` of the LDAP server, and specify at least one template +with the `user_dn_templates` option. If you are configuring multiple realms, you +should also explicitly set the `order` attribute to control the order in which +the realms are consulted during authentication. See <> +for all of the options you can set for an `ldap` realm. ++ +For example, the following snippet shows an LDAP realm configured with User DN templates: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + ldap1: + type: ldap + order: 0 + url: "ldaps://ldap.example.com:636" + user_dn_templates: + - "cn={0}, ou=users, o=marketing, dc=example, dc=com" + - "cn={0}, ou=users, o=engineering, dc=example, dc=com" + group_search: + base_dn: "dc=example,dc=com" + files: + role_mapping: "/mnt/elasticsearch/group_to_role_mapping.yml" + unmapped_groups_as_roles: false +------------------------------------------------------------ + +. Restart Elasticsearch + +IMPORTANT: The `bind_dn` setting is not used in template mode. +All LDAP operations will execute as the authenticating user. + + +[[ldap-load-balancing]] +===== Load Balancing and Failover +The `load_balance.type` setting can be used at the realm level to configure how +{security} should interact with multiple LDAP servers. {security} supports both +failover and load balancing modes of operation. + +.Load Balancing and Failover Types +|======================= +| Type | | | Description +| `failover` | | | The URLs specified are used in the order that they are specified. + The first server that can be connected to will be used for all + subsequent connections. If a connection to that server fails then + the next server that a connection can be established to will be + used for subsequent connections. +| `dns_failover` | | | In this mode of operation, only a single URL may be specified. + This URL must contain a DNS name. The system will be queried for + all IP addresses that correspond to this DNS name. Connections to + the LDAP server will always be tried in the order in which they + were retrieved. This differs from `failover` in that there is no + reordering of the list and if a server has failed at the beginning + of the list, it will still be tried for each subsequent connection. +| `round_robin` | | | Connections will continuously iterate through the list of provided + URLs. If a server is unavailable, iterating through the list of + URLs will continue until a successful connection is made. +| `dns_round_robin` | | | In this mode of operation, only a single URL may be specified. This + URL must contain a DNS name. The system will be queried for all IP + addresses that correspond to this DNS name. Connections will + continuously iterate through the list of addresses. If a server is + unavailable, iterating through the list of URLs will continue until + a successful connection is made. +|======================= + + +[[ldap-settings]] +===== LDAP Realm Settings + +.Common LDAP Realm Settings +[cols="4,^3,10"] +|======================= +| Setting | Required | Description +| `type` | yes | Indicates the realm type. Must be set to `ldap`. +| `order` | no | Indicates the priority of this realm within the realm + chain. Realms with a lower order are consulted first. + Although not required, we recommend explicitly + setting this value when you configure multiple realms. + Defaults to `Integer.MAX_VALUE`. +| `enabled` | no | Indicates whether this realm is enabled or disabled. + Enables you to disable a realm without removing its + configuration. Defaults to `true`. +| `url` | yes | Specifies one or more LDAP URLs of the form of + `ldap[s]://:`. Multiple URLs can be + defined using a comma separated value or array syntax: + `[ "ldaps://server1:636", "ldaps://server2:636" ]`. + `ldaps` and `ldap` URL protocols cannot be mixed in + the same realm. +| `load_balance.type` | no | The behavior to use when there are multiple LDAP URLs + defined. For supported values see + <>. +| `load_balance.cache_ttl` | no | When using `dns_failover` or `dns_round_robin` as the + load balancing type, this setting controls the amount of time + to cache DNS lookups. Defaults to `1h`. +| `user_group_attribute` | no | Specifies the attribute to examine on the user for group + membership. The default is `memberOf`. This setting will + be ignored if any `group_search` settings are specified. +| `group_search.base_dn` | no | Specifies a container DN to search for groups in which + the user has membership. When this element is absent, + Security searches for the attribute specified by + `user_group_attribute` set on the user to determine + group membership. +| `group_search.scope` | no | Specifies whether the group search should be + `sub_tree`, `one_level` or `base`. `one_level` only + searches objects directly contained within the + `base_dn`. The default `sub_tree` searches all objects + contained under `base_dn`. `base` specifies that the + `base_dn` is a group object, and that it is the only + group considered. +| `group_search.filter` | no | Specifies a filter to use to lookup a group. If not + set, the realm searches for `group`, + `groupOfNames`, `groupOfUniqueNames`, or `posixGroup` with the + attributes `member`, `memberOf`, or `memberUid`. Any instance of + `{0}` in the filter is replaced by the user + attribute defined in `group_search.user_attribute` +| `group_search.user_attribute` | no | Specifies the user attribute that is fetched and + provided as a parameter to the filter. If not set, + the user DN is passed to the filter. +| `unmapped_groups_as_roles` | no | Specifies whether the names of any unmapped LDAP groups + should be used as role names and assigned to the user. + A group is considered to be _unmapped_ if it is not referenced + in any <> (API based + role-mappings are not considered). + Defaults to `false`. +| `timeout.tcp_connect` | no | Specifies the TCP connect timeout period for establishing an + LDAP connection. An `s` at the end indicates seconds, or `ms` + indicates milliseconds. Defaults to `5s` (5 seconds). +| `timeout.tcp_read` | no | Specifies the TCP read timeout period after establishing an LDAP connection. + An `s` at the end indicates seconds, or `ms` indicates milliseconds. + Defaults to `5s` (5 seconds). +| `timeout.ldap_search` | no | Specifies the LDAP Server enforced timeout period for an LDAP search. + An `s` at the end indicates seconds, or `ms` indicates milliseconds. + Defaults to `5s` (5 seconds). +| `files.role_mapping` | no | Specifies the path and file name for the + <>. + Defaults to `ES_HOME/config/x-pack/role_mapping.yml`. +| `follow_referrals` | no | Specifies whether {security} should follow referrals + returned by the LDAP server. Referrals are URLs returned by + the server that are to be used to continue the LDAP operation + (e.g. search). Defaults to `true`. +| `metadata` | no | Specifies the list of additional LDAP attributes that should + be stored in the `metadata` of an authenticated user. +| `ssl.key` | no | Specifies the path to the PEM encoded private key to use if the LDAP + server requires client authentication. `ssl.key` and `ssl.keystore.path` + may not be used at the same time. +| `ssl.key_passphrase` | no | Specifies the passphrase to decrypt the PEM encoded private key if it is encrypted. +| `ssl.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate chain) that goes with the + key if the LDAP server requires client authentication. +| `ssl.certificate_authorities` | no | Specifies the paths to the PEM encoded certificate authority certificates that + should be trusted. `ssl.certificate_authorities` and `ssl.truststore.path` may not be used + at the same time. +| `ssl.keystore.path` | no | The path to the Java Keystore file that contains a private key and certificate. `ssl.key` and + `ssl.keystore.path` may not be used at the same time. +| `ssl.keystore.password` | no | The password to the keystore. +| `ssl.keystore.key_password` | no | The password for the key in the keystore. Defaults to the keystore password. +| `ssl.truststore.path` | no | The path to the Java Keystore file that contains the certificates to trust. + `ssl.certificate_authorities` and `ssl.truststore.path` may not be used at the same time. +| `ssl.truststore.password` | no | The password to the truststore. +| `ssl.verification_mode` | no | Specifies the type of verification to be performed when + connecting to a LDAP server using `ldaps`. When + set to `full`, the hostname or IP address used in the `url` + must match one of the names in the certificate or the + connection will not be allowed. Due to their potential security impact, + `ssl` settings are not exposed via the + {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API]. + Values are `none`, `certificate`, and `full`. Defaults to `full`. + See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`] + for an explanation of these values. +| `ssl.supported_protocols` | no | Specifies the supported protocols for SSL/TLS. +| `ssl.cipher_suites` | no | Specifies the cipher suites that should be supported when communicating + with the LDAP server. +| `cache.ttl` | no | Specifies the time-to-live for cached user entries. A + user's credentials are cached for this period of time. + Specify the time period using the standard Elasticsearch + {ref}/common-options.html#time-units[time units]. + Defaults to `20m`. +| `cache.max_users` | no | Specifies the maximum number of user entries that can be + stored in the cache at one time. Defaults to 100,000. +| `cache.hash_algo` | no | Specifies the hashing algorithm that is used for the + cached user credentials. See + <> for the possible + values. (Expert Setting). +|======================= + +.User Search Mode Settings +|======================= +| Setting | Required | Description +| `bind_dn` | no | The DN of the user that is used to bind to the LDAP + and perform searches. If not specified, an anonymous + bind is attempted. Due to its potential security + impact, `bind_dn` is not exposed via the + {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API]. +| `bind_password` | no | The password for the user that is used to bind to the + LDAP directory. Due to its potential security impact, + `bind_password` is not exposed via the + {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API]. + *Deprecated.* Use `secure_bind_password` instead. +| `secure_bind_password` | no | ({ref}/secure-settings.html[Secure]) + The password for the user that is used to bind to LDAP directory. +| `user_search.base_dn` | yes | Specifies a container DN to search for users. +| `user_search.scope` | no | The scope of the user search. Valid values are `sub_tree`, + `one_level` or `base`. `one_level` only searches objects + directly contained within the `base_dn`. `sub_tree` searches + all objects contained under `base_dn`. `base` specifies + that the `base_dn` is the user object, and that it is the + only user considered. Defaults to `sub_tree`. +| `user_search.filter` | no | Specifies the filter used to search the directory in attempt to match + an entry with the username provided by the user. Defaults to `(uid={0})`. + `{0}` is substituted with the username provided when searching. +| `user_search.attribute` | no | This setting is deprecated; use `user_search.filter` instead. + Specifies the attribute to match with the username presented + to. Defaults to `uid`. +| `user_search.pool.enabled` | no | Enables or disables connection pooling for user search. When + disabled a new connection is created for every search. The + default is `true`. +| `user_search.pool.size` | no | Specifies the maximum number of connections to the LDAP + server to allow in the connection pool. Defaults to `20`. +| `user_search.pool.initial_size` | no | The initial number of connections to create to the LDAP + server on startup. Defaults to `0`. Values greater than `0` + could cause startup failures if the LDAP server is down. +| `user_search.pool.health_check.enabled` | no | Enables or disables a health check on LDAP connections in + the connection pool. Connections are checked in the + background at the specified interval. Defaults to `true`. +| `user_search.pool.health_check.dn` | no/yes | Specifies the distinguished name to retrieve as part of + the health check. Defaults to the value of `bind_dn`. + This setting is required when `bind_dn` is not configured. +| `user_search.pool.health_check.interval` | no | How often to perform background checks of connections in + the pool. Defaults to `60s`. +|======================= + +.User Templates Mode Settings +[cols="4,^3,10"] +|======================= +| Setting | Required | Description +| `user_dn_templates` | yes | Specifies the DN template that replaces the + user name with the string `{0}`. This element + is multivalued, allowing for multiple user + contexts. +|======================= + + +NOTE: If any settings starting with `user_search` are specified, the + `user_dn_templates` the settings are ignored. + + +[[mapping-roles-ldap]] +==== Mapping LDAP Groups to Roles + +An integral part of a realm authentication process is to resolve the roles +associated with the authenticated user. Roles define the privileges a user has +in the cluster. + +Since with the `ldap` realm the users are managed externally in the LDAP server, +the expectation is that their roles are managed there as well. If fact, LDAP +supports the notion of groups, which often represent user roles for different +systems in the organization. + +The `ldap` realm enables you to map LDAP users to to roles via their LDAP +groups, or other metadata. This role mapping can be configured via the +{ref}/security-api-role-mapping.html[role-mapping API], or by using a file stored +on each node. When a user authenticates with LDAP, the privileges +for that user are the union of all privileges defined by the roles to which +the user is mapped. + +Within a mapping definition, you specify groups using their distinguished +names. For example, the following mapping configuration maps the LDAP +`admins` group to both the `monitoring` and `user` roles, and maps the +`users` group to the `user` role. + +Configured via the role-mapping API: +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/admins +{ + "roles" : [ "monitoring" , "user" ], + "rules" : { "field" : { + "groups" : "cn=admins,dc=example,dc=com" <1> + } }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE +<1> The LDAP distinguished name (DN) of the `admins` group. + +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/basic_users +{ + "roles" : [ "user" ], + "rules" : { "field" : { + "groups" : "cn=users,dc=example,dc=com" <1> + } }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE +<1> The LDAP distinguished name (DN) of the `users` group. + +Or, alternatively, configured via the role-mapping file: +[source, yaml] +------------------------------------------------------------ +monitoring: <1> + - "cn=admins,dc=example,dc=com" <2> +user: + - "cn=users,dc=example,dc=com" <3> + - "cn=admins,dc=example,dc=com" +------------------------------------------------------------ +<1> The name of the mapped role. +<2> The LDAP distinguished name (DN) of the `admins` group. +<3> The LDAP distinguished name (DN) of the `users` group. + +For more information, see <>. + +[[ldap-user-metadata]] +==== User Metadata in LDAP Realms +When a user is authenticated via an LDAP realm, the following properties are +populated in user's _metadata_. This metadata is returned in the +{ref}/security-api-authenticate.html[authenticate API], and can be used with +<> in roles. + +|======================= +| Field | Description +| `ldap_dn` | The distinguished name of the user. +| `ldap_groups` | The distinguished name of each of the groups that were + resolved for the user (regardless of whether those + groups were mapped to a role). +|======================= + +Additional fields can be included in the user's metadata by configuring +the `metadata` setting on the LDAP realm. This metadata is available for use +with the <> or in +<>. + +The example below includes the user's common name (`cn`) as an additional +field in their metadata. +[source,yaml] +-------------------------------------------------- +xpack: + security: + authc: + realms: + ldap1: + type: ldap + metadata: cn +-------------------------------------------------- + +[[ldap-ssl]] +==== Setting up SSL Between Elasticsearch and LDAP + +To protect the user credentials that are sent for authentication, it's highly +recommended to encrypt communications between Elasticsearch and your LDAP server. +Connecting via SSL/TLS ensures that the identity of the LDAP server is +authenticated before {security} transmits the user credentials and the contents +of the connection are encrypted. + +To encrypt communications between Elasticsearch and your LDAP server: + +. Configure the realm's SSL settings on each node to trust certificates signed by the CA that signed your +LDAP server certificates. The following example demonstrates how to trust a CA certificate, +`cacert.pem`, located within the {xpack} configuration directory: ++ +[source,shell] +-------------------------------------------------- +xpack: + security: + authc: + realms: + ldap1: + type: ldap + order: 0 + url: "ldaps://ldap.example.com:636" + ssl: + certificate_authorities: [ "CONFIG_DIR/x-pack/cacert.pem" ] +-------------------------------------------------- ++ +The CA cert must be a PEM encoded certificate. ++ +[NOTE] +=============================== +You can also specify the individual server certificates rather than the CA +certificate, but this is only recommended if you have a single LDAP server +or the certificates are self-signed. +=============================== + +. Set the `url` attribute in the realm configuration to specify the LDAPS +protocol and the secure port number. For example, `url: ldaps://ldap.example.com:636`. + +. Restart Elasticsearch. + +NOTE: By default, when you configure {security} to connect to an LDAP server + using SSL/TLS, {security} attempts to verify the hostname or IP address + specified with the `url` attribute in the realm configuration with the + values in the certificate. If the values in the certificate and realm + configuration do not match, {security} does not allow a connection to the + LDAP server. This is done to protect against man-in-the-middle attacks. If + necessary, you can disable this behavior by setting the + `ssl.verification_mode` property to `certificate`. diff --git a/x-pack/docs/en/security/authentication/native-realm.asciidoc b/x-pack/docs/en/security/authentication/native-realm.asciidoc new file mode 100644 index 0000000000000..997920013cda4 --- /dev/null +++ b/x-pack/docs/en/security/authentication/native-realm.asciidoc @@ -0,0 +1,65 @@ +[[native-realm]] +=== Native user authentication + +The easiest way to manage and authenticate users is with the internal `native` +realm. You can use the REST APIs or Kibana to add and remove users, assign user roles, and +manage user passwords. + +[[native-realm-configuration]] +[float] +==== Configuring a native realm + +The native realm is added to the realm chain by default. You don't need to +explicitly configure a native realm to manage users through the REST APIs. + + +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. To use the +`native` realm as a fallback, you must include it in the realm chain. + +You can, however, configure options for the `native` realm in the +`xpack.security.authc.realms` namespace in `elasticsearch.yml`. Explicitly +configuring a native realm enables you to set the order in which it appears in +the realm chain, temporary disable the realm, and control its cache options. + +To configure a native realm: + +. Add a realm configuration of type `native` to `elasticsearch.yml` under the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm +`type` to `native`. If you are configuring multiple realms, you should also +explicitly set the `order` attribute. See <> +for all of the options you can set for the `native` realm. ++ +For example, the following snippet shows a `native` realm configuration that +sets the `order` to zero so the realm is checked first: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + native1: + type: native + order: 0 +------------------------------------------------------------ + +. Restart Elasticsearch. + +[[native-settings]] +==== Native realm settings + +See {ref}/ref-native-settings.html[Native Realm Settings]. + +[[managing-native-users]] +==== Managing native users + +{security} enables you to easily manage users in {kib} on the +*Management / Security / Users* page. + +Alternatively, you can manage users through the `user` API. For more +information and examples, see {ref}/security-api-users.html[User Management APIs]. + +[[migrating-from-file]] +NOTE: To migrate file-based users to the `native` realm, use the +{ref}/migrate-tool.html[migrate tool]. diff --git a/x-pack/docs/en/security/authentication/pki-realm.asciidoc b/x-pack/docs/en/security/authentication/pki-realm.asciidoc new file mode 100644 index 0000000000000..57cf4dbbce090 --- /dev/null +++ b/x-pack/docs/en/security/authentication/pki-realm.asciidoc @@ -0,0 +1,185 @@ +[[pki-realm]] +=== PKI User Authentication + +You can configure {security} to use Public Key Infrastructure (PKI) certificates +to authenticate users in {es}. This requires clients to present X.509 +certificates. + +NOTE: You cannot use PKI certificates to authenticate users in {kib}. + +To use PKI in {es}, you configure a PKI realm, enable client authentication on +the desired network layers (transport or http), and map the Distinguished Names +(DNs) from the user certificates to {security} roles in the +<>. + +You can also use a combination of PKI and username/password authentication. For +example, you can enable SSL/TLS on the transport layer and define a PKI realm to +require transport clients to authenticate with X.509 certificates, while still +authenticating HTTP traffic using username and password credentials. You can also set +`xpack.security.transport.ssl.client_authentication` to `optional` to allow clients without +certificates to authenticate with other credentials. + +IMPORTANT: You must enable SSL/TLS and enabled client authentication to use PKI. + For more information, see <>. + +==== PKI Realm Configuration + +Like other realms, you configure options for a `pki` realm under the +`xpack.security.authc.realms` namespace in `elasticsearch.yml`. + +To configure a `pki` realm: + +. Add a realm configuration of type `pki` to `elasticsearch.yml` under the +`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to +`pki`. If you are configuring multiple realms, you should also explicitly set +the `order` attribute. See <> for all of the options you can set +for a `pki` realm. ++ +For example, the following snippet shows the most basic `pki` realm configuration: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + pki1: + type: pki +------------------------------------------------------------ ++ +With this configuration, any certificate trusted by the SSL/TLS layer is accepted +for authentication. The username is the common name (CN) extracted from the DN +of the certificate. ++ +IMPORTANT: When you configure realms in `elasticsearch.yml`, only the +realms you specify are used for authentication. If you also want to use the +`native` or `file` realms, you must include them in the realm chain. ++ +If you want to use something other than the CN of the DN as the username, you +can specify a regex to extract the desired username. For example, the regex in +the following configuration extracts the email address from the DN: ++ +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + pki1: + type: pki + username_pattern: "EMAILADDRESS=(.*?)(?:,|$)" +------------------------------------------------------------ ++ +. Restart Elasticsearch. + +[[pki-ssl-config]] +==== PKI and SSL Settings + +The PKI realm relies on the SSL settings of the node's network interface +(transport or http). The realm can be configured to be more restrictive than +the underlying network connection - that is, it is possible to configure the +node such that some connections are accepted by the network interface but then +fail to be authenticated by the PKI realm. However the reverse is not possible +- the PKI realm cannot authenticate a connection that has been refused by the +network interface. + +In particular this means: + +* The transport or http interface must request client certificates by setting + `client_authentication` to `optional` or `required`. +* The interface must _trust_ the certificate that is presented by the client + by configuring either the `truststore` or `certificate_authorities` paths, + or by setting `verification_mode` to `none`. ++ +See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`] +for an explanation of this setting. + +* The _protocols_ supported by the interface must be compatible with those + used by the client. + + +The relevant network interface (transport or http) must be configured to trust +any certificate that is to be used within the PKI realm. However it possible to +configure the PKI realm to trust only a _subset_ of the certificates accepted +by the network interface. +This is useful when the SSL/TLS layer trusts clients with certificates that are +signed by a different CA than the one that signs your users' certificates. + +To configure the PKI realm with its own truststore, specify the +`truststore.path` option as below: + +[source, yaml] +------------------------------------------------------------ +xpack: + security: + authc: + realms: + pki1: + type: pki + truststore: + path: "/path/to/pki_truststore.jks" + password: "x-pack-test-password" +------------------------------------------------------------ + +The `certificate_authorities` option may be used as an alternative to the +`truststore.path` setting. + + +[[pki-settings]] +===== PKI Realm Settings + +See +{ref}/security-settings.html#_settings_valid_for_all_realms[Security Settings for All Realms] +and +{ref}/security-settings.html#ref-pki-settings[PKI Realm Settings]. + +[[assigning-roles-pki]] +==== Mapping Roles for PKI Users + +You map roles for PKI users through the +{ref}/security-api-role-mapping.html[role-mapping API], or by using a file stored on +each node. When a user authenticates against a PKI realm, the privileges for +that user are the union of all privileges defined by the roles to which the +user is mapped. + +You identify a user by the distinguished name in their certificate. +For example, the following mapping configuration maps `John Doe` to the +`user` role: + +Using the role-mapping API: +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/users +{ + "roles" : [ "user" ], + "rules" : { "field" : { + "dn" : "cn=John Doe,ou=example,o=com" <1> + } }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE +<1> The distinguished name (DN) of a PKI user. + +Or, alternatively, configured in a role-mapping file: +[source, yaml] +------------------------------------------------------------ +user: <1> + - "cn=John Doe,ou=example,o=com" <2> +------------------------------------------------------------ +<1> The name of a role. +<2> The distinguished name (DN) of a PKI user. + +The disinguished name for a PKI user follows X.500 naming conventions which +place the most specific fields (like `cn` or `uid`) at the beginning of the +name, and the most general fields (like `o` or `dc`) at the end of the name. +Some tools, such as _openssl_, may print out the subject name in a different + format. + +One way that you can determine the correct DN for a certificate is to use the +{ref}/security-api-authenticate.html[authenticate API] (use the relevant PKI +certificate as the means of authentication) and inspect the metadata field in +the result. The user's distinguished name will be populated under the `pki_dn` +key. You can also use the authenticate API to validate your role mapping. + +For more information, see <>. diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc new file mode 100644 index 0000000000000..70d8180cedb4e --- /dev/null +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -0,0 +1,818 @@ +[[saml-guide]] + +== Configuring SAML Single-Sign-On on the Elastic Stack + +The Elastic Stack supports SAML single-sign-on (SSO) into {kib}, using {es} as +a backend service. In SAML terminology, the Elastic Stack is operating as a +_Service Provider_. + +The other component that is needed to enable SAML single-sign-on is the +_Identity Provider_, which is a service that handles your credentials and +performs that actual authentication of users. + +If you are interested in configuring SSO into {kib}, then you will need to +provide {es} with information about your _Identity Provider_, and you will need +to register the Elastic Stack as a known _Service Provider_ within that +Identity Provider. There are also a few configuration changes that are +required in {kib} to activate the SAML authentication provider. + +NOTE: The SAML support in {kib} is designed on the expectation that it will be +the primary (or sole) authentication method for users of that {kib} instance. +Once you enable SAML authentication in {kib} it will affect all users who try +to login. The <> section provides more detail about how this works. + +=== The Identity Provider + +The Elastic Stack supports the SAML 2.0 _Web Browser SSO_ and the SAML +2.0 _Single Logout_ profiles and can integrate with any Identity Provider (IdP) +that supports at least the SAML 2.0 _Web Browser SSO Profile_. +It has been tested with a number of popular IdP implementations. + +This guide assumes that you have an existing IdP and wish to add {kib} as a +Service Provider. + +The Elastic Stack uses a standard SAML _metadata_ document, in XML format that +defines the capabilities and features of your IdP. You should be able to +download or generate such a document within your IdP administration interface. + +Download the IdP metadata document and store it within the `config` directory on +each {es} node. For the purposes of this guide, we will assume that you are +storing it as `config/saml/idp-metadata.xml`. + +The IdP will have been assigned an identifier (_EntityID_ in SAML terminology) +which is most commonly expressed in _Uniform Resource Identifier_ (URI) form. +Your admin interface may tell you what this is, or you might need to +read the metadata document to find it - look for the `entityID` attribute on the +`EntityDescriptor` element. + +Most IdPs will provide an appropriate metadata file with all the features that +the Elastic Stack requires, and should only require the configuration steps +described below. For completeness sake, the minimum requirements that the Elastic +Stack has for the IdP's metadata are: + +- An `` with an `entityID` that matches the {es} + <> +- An `` that supports the SAML 2.0 protocol + (`urn:oasis:names:tc:SAML:2.0:protocol`). +- At least one `` that is configured for _signing_ (that is, it + has `use="signing"` or leaves the `use` unspecified) +- A `` with binding of HTTP-Redirect + (`urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect`) +- If you wish to support <>, a `` + with binding of HTTP-Redirect + (`urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect`) + +The Elastic Stack requires that all messages from the IdP are signed. +For authentication `` messages, the signature may be applied to either +the response itself, or to the individual assertions. +For `` messages, the message itself must be signed, and the +signature should be provided as a URL parameter, as required by the HTTP-Redirect +binding. + +=== Configure {es} for SAML Authentication + +There are five configuration steps to enable SAML authentication in {es}: + +. Enable SSL/TLS for HTTP +. Enable the Token Service +. Create one or more SAML realms +. Configure role mappings +. Generate a SAML Metadata file for use by your Identity Provider _(optional)_ + +==== Enable TLS for HTTP + +If your {es} cluster is operating in production mode, then you must +configure the HTTP interface to use SSL/TLS before you can enable SAML +authentication. + +For more information, see +{ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. + +==== Enable the Token Service + +The {es} SAML implementation makes use of the {es} Token Service. This service +is automatically enabled if you configure TLS on the HTTP interface, and can be +explicitly configured by including the following in your `elasticsearch.yml` file: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.token.enabled: true +------------------------------------------------------------ + +[[saml-create-realm]] +==== Create a SAML Realm + +SAML authentication is enabled by configuring a SAML realm within the +authentication chain for {es}. + +This realm has a few mandatory settings, and a number of optional settings. +The available settings are described in detail in the +<>, this guide will walk you through +the most common settings. + +Create a realm by adding the following to your `elasticsearch.yml` +configuration file. Each configuration value is explained below. + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.saml1: + type: saml + order: 2 + idp.metadata.path: saml/idp-metadata.xml + idp.entity_id: "https://sso.example.com/" + sp.entity_id: "https://kibana.example.com/" + sp.acs: "https://kibana.example.com/api/security/v1/saml" + sp.logout: "https://kibana.example.com/logout" + attributes.principal: "urn:oid:0.9.2342.19200300.100.1.1" + attributes.groups: "urn:oid:1.3.6.1.4.1.5923.1.5.1." +------------------------------------------------------------ + +IMPORTANT: SAML is used when authenticating via {kib}, but it is not an +effective means of authenticating directly to the {es} REST API. For this reason +we recommend that you include at least one additional realm such as the +<> in your authentication chain for use by API +clients. + +The configuration values used in the example above are: + +xpack.security.authc.realms.saml:: + This defines a new authentication realm named "saml1". + See <> for more explanation of realms. + +type:: The `type` must be `saml` +order:: + You should define a unique order on each realm in your authentication chain. + It is recommended that the SAML realm be at the bottom of your authentication + chain (that is, that it has the _highest_ order). + +idp.metadata.path:: + This is the path to the metadata file that you saved for your Identity Provider. + The path that you enter here is relative to your `config/` directory. + {security} will automatically monitor this file for changes and will + reload the configuration whenever it is updated. + +idp.entity_id:: + This is the identifier (SAML EntityID) that your IdP uses. + It should match the `entityID` attribute within the metadata file. + +sp.entity_id:: + This is a unique identifier for your {kib} instance, expressed as a URI. + You will use this value when you add {kib} as a service provider within your IdP. + We recommend that you use the base URL for your {kib} instance as the entity ID. + +sp.acs:: + The _Assertion Consumer Service_ (ACS) endpoint is the URL within {kib} that accepts + authentication messages from the IdP. + This ACS endpoint supports the SAML HTTP-POST binding only. + It must be a URL that is accessible from the web browser of the user who is + attempting to login to {kib}, it does not need to be directly accessible by {es} + or the IdP. + The correct value may vary depending on how you have installed {kib} and + whether there are any proxies involved, but it will typically be + +$\{kibana-url}/api/security/v1/saml+ where _$\{kibana-url}_ is the base URL for + your {kib} instance. + +sp.logout:: + This is the URL within {kib} that accepts logout messages from the IdP. + Like the `sp.acs` URL, it must be accessible from the web browser, but does + not need to be directly accessible by {es} or the IdP. The correct value may + vary depending on how you have installed {kib} and whether there are any + proxies involved, but it will typically be +$\{kibana-url}/logout+ where + _$\{kibana-url}_ is the base URL for your {kib} instance. + +attribute.principal:: See <>. +attribute.groups:: See <>. + +[[saml-attribute-mapping]] +==== Attribute Mapping + +When a user connects to {kib} through your Identity Provider, the Identity +Provider will supply a SAML Assertion about the user. The assertion will contain +an _Authentication Statement_ indicating that the user has successfully +authenticated to the IdP and one ore more _Attribute Statements_ that will +include _Attributes_ for the user. + +These attributes may include such things as: + +- the user's username +- the user's email address +- the user's groups or roles + +Attributes in SAML are named using a URI such as +`urn:oid:0.9.2342.19200300.100.1.1` or +`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn`, and have one or +more values associated with them. + +These attribute identifiers vary between IdPs, and most IdPs offer ways to +customise the URIs and their associated value. + +{es} uses these attributes to infer information about the user who has +logged in, and they can be used for role mapping (below). + +In order for these attributes to be useful, {es} and the IdP need to have a +common via for the names of the attributes. This is done manually, by +configuring the IdP and the {security} SAML realm to use the same URI name for +each logical user attribute. + +The recommended steps for configuring these SAML attributes are as follows: + +. Consult your IdP to see what user attributes it can provide. + This varies greatly between providers, but you should be able to obtain a list + from the documentation, or from your local admin. + +. Read through the list of <> that {es} + supports, and decide which of them are useful to you, and can be provided by + your IdP. At a _minimum_, the `principal` attribute is required. + +. Configure your IdP to "release" those attributes to your {kib} SAML service + provider. This process varies by provider - some will provide a user interface + for this, while others may require that you edit configuration files. + Usually the IdP (or your local administrator) will have suggestions about what + URI to use for each attribute. You can simply accept those suggestions, as the + {es} service is entirely configurable and does not require that any specific + URIs are used. + +. Configure the SAML realm in {es} to associate the {es} user properties (see + <> below), to the URIs that you configured + in your IdP. In the example above, we have configured the `principal` and + `groups` attributes. + +===== Special Attribute Names + +In general, {es} expects that the configured value for an attribute will be a +URI such as `urn:oid:0.9.2342.19200300.100.1.1`, however there are some +additional names that can be used: + +`nameid`:: + This uses the SAML `NamedID` value instead of a SAML attribute. SAML + `NameID` elements are an optional, but frequently provided, field within a + SAML Assertion that the IdP may use to identify the Subject of that + Assertion. In some cases the `NameID` will relate to the user's login + identifier (username) wihin the IdP, but in many cases they will be + internally generated identifiers that have no obvious meaning outside + of the IdP. + +`nameid:persistent`:: + This uses the SAML `NameID` value, but only if the NameID format is + `urn:oasis:names:tc:SAML:2.0:nameid-format:persistent`. + A SAML `NameID` element has an optional `Format` attribute that indicates + the semantics of the provided name. It is common for IdPs to be configured + with "transient" NameIDs that present a new identifier for each session. + Since it is rarely useful to use a transient NameID as part of an attribute + mapping, the "nameid:persistent" attribute name can be used as a safety + mechanism that will cause an error if you attempt to map from a `NameID` + that does not have a persistent value. + +_friendlyName_:: + A SAML attribute may have a _friendlyName_ in addition to its URI based name. + For example the attribute with a name of `urn:oid:0.9.2342.19200300.100.1.1` + might also have a friendlyName of `uid`. + You may use these friendly names within an attribute mapping, but it is + recommended that you use the URI based names, as friendlyNames are neither + standardized or mandatory. + +The example below configures a realm to use a persistent nameid for the principal, +and the attribute with the friendlyName "roles" for the user's groups. + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.saml1: + type: saml + order: 2 + idp.metadata.path: saml/idp-metadata.xml + idp.entity_id: "https://sso.example.com/" + sp.entity_id: "https://kibana.example.com/" + sp.acs: "https://kibana.example.com/api/security/v1/saml" + attributes.principal: "nameid:persistent" + attributes.groups: "roles" +------------------------------------------------------------ + +[[saml-user-properties]] +===== {es} User Properties + +The {es} SAML realm can be configured to map SAML `attributes` to the +following properties on the authenticated user: + +principal:: _(Required)_ + This is the _username_ that will be applied to a user that authenticates + against this realm. + The `principal` appears in places such as the {es} audit logs. + +groups:: _(Recommended)_ + If you wish to use your IdP's concept of groups or roles as the basis for a + user's {es} privileges, you should map them with this attribute. + The `groups` are passed directly to your + <> + +name:: _(Optional)_ The user's full name. +mail:: _(Optional)_ The user's email address. +dn:: _(Optional)_ The user's X.500 _Distinguished Name_. + +===== Extracting partial values from SAML Attributes + +There are some occasions where the IdP's attribute may contain more information +than you wish to use within {es}. A common example of this is one where the +IdP works exclusively with email addresses, but you would like the user's +`principal` to use the _local-name_ part of the email address. +For example if their email address was `james.wong@staff.example.com`, then you +would like their principal to simply be `james.wong`. + +This can be achieved using the `attribute_patterns` setting in the {es} +realm, as demonstrated in the realm configuration below: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.saml1: + type: saml + order: 2 + idp.metadata.path: saml/idp-metadata.xml + idp.entity_id: "https://sso.example.com/" + sp.entity_id: "https://kibana.example.com/" + sp.acs: "https://kibana.example.com/api/security/v1/saml" + attributes.principal: "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress" + attribute_patterns.principal: "^([^@]+)@staff\\.example\\.com$" +------------------------------------------------------------ + +In this case, the user's `principal` is mapped from an email attribute, but a +regular expression is applied to the value before it is assigned to the user. +If the regular expression matches, then the result of the first group is used as +effective value. If the regular expression does not match then the attribute +mapping fails. + +In this example, the email address must belong to the `staff.example.com` domain, +and then the local-part (anything before the `@`) is used as the principal. +Any users who try to login using a different email domain will fail because the +regular expression will not match against their email address, and thus their +principal attribute - which is mandatory - will not be populated. + +IMPORTANT: Small mistakes in these regular expressions can have significant +security consequences. For example, if we accidentally left off the trailing +`$` from the example above, then we would match any email address where the +domain starts with `staff.example.com`, and this would accept an email +address such as `admin@staff.example.com.attacker.net`. It is important that +you make sure your regular expressions are as precise as possible so that +you do not inadvertently open an avenue for user impersonation attacks. + +[[saml-logout]] +==== SAML Logout + +The SAML protocol supports the concept of Single Logout (SLO). +The level of support for SLO varies between Identity Providers. +You should consult the documentation for your IdP to determine what Logout +services it offers. + +By default the Elastic Stack will support SAML SLO if the following are true: + +- Your IdP metadata specifies that the IdP offers a SLO service +- You configure `sp.logout` +- The setting `idp.use_single_logout` is not `false` + +===== IdP SLO Service + +One of the values that {es} reads from the IdP's SAML metadata is the +``. In order for Single Logout to work with the Elastic +stack, {es} requires that this exist and support a binding of +`urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect`. + +The Elastic Stack will send both `` and `` +messages to this service as appropriate. + +===== The sp.logout setting + +The {es} realm setting `sp.logout` specifies a URL in {kib} to which the IdP can +send both `` and `` messages. This service uses +the SAML HTTP-Redirect binding. + +{es} will process `` messages, and perform a global signout that +invalidates any existing {es} security tokens that are associated with the +provided SAML session. + +If you do not configure a value for `sp.logout`, {es} will refuse all +`` messages. + +NOTE: It is common for IdPs to require that `LogoutRequest` messages be signed, +so you may need to configure <>. + +===== The idp.use_single_logout setting + +If your IdP provides a `` but you do not wish to use it, +you can configure `idp.use_single_logout: false` in your SAML realm, and {es} +will ignore the SLO service that your IdP provides. In this case, when a user +logs out of {kib} it will invalidate their {es} session (security token), but +will not perform any logout at the IdP. + +===== Using {kib} without Single Logout + +If your IdP does not support Single Logout, or you choose not to use it, then +{kib} will perform a "local logout" only. + +This means that {kib} will invalidate the session token it is using to +communicate with {es}, but will not be able to perform any sort of invalidation +of the Identity Provider session. In most cases this will mean that {kib} users +are still considered to be logged in to the IdP. Consequently, if the user +navigates to the {kib} landing page, they will be automatically reauthenticated, +and will commence a new {kib} session without needing to enter any credentials. + +The possible solutions to this problem are: + +- Ask your IdP administrator or vendor to provide a Single Logout service +- If your Idp does provide a Single Logout Service, make sure it is included in + the IdP metadata file, and do _not_ set `idp.use_single_logout` to `false`. +- Advise your users to close their browser after logging out of {kib} +- Enable the `force_authn` setting on your SAML realm. This setting causes the + Elastic Stack to request fresh authentication from the IdP every time a user + attempts to log into {kib}. + This setting defaults to `false` because it can be a more cumbersome user + experience, but it can also be an effective protection to stop users + piggy-backing on existing IdP sessions. + + +[[saml-enc-sign]] +==== Encryption and Signing + +The Elastic Stack supports generating signed SAML messages (for authentication +and/or logout), verifying signed SAML messages from the IdP (for both +authentication and logout) and can process encrypted content. + +You can configure {es} for signing, encryption or both, with the same +or separate keys used for each of those. + +The Elastic Stack uses X.509 certificates with RSA private keys for SAML +cryptography. These keys can be generated using any standard SSL tool, including +the `elasticsearch-certutil` tool that ships with X-Pack. + +Your IdP may require that the Elastic Stack have a cryptographic key for signing +SAML messages, and that you provide the corresponding signing certificate within +the Service Provider configuration (either within the Elastic Stack SAML +metadata file or manually configured within the IdP administration interface). +While most IdPs do not expected authentication requests to be signed, it is +commonly the case that signatures are required for logout requests. Your IdP +will validate these signatures against the signing certificate that has been +configured for the Elastic Stack Service Provider. + +Encryption certificates are rarely needed, but the Elastic Stack supports them +for cases where IdPs or local policies mandate their use. + +===== Generating certificates and keys. + +{es} supports certificates and keys in either PEM, PKCS#12 or JKS format. +Some Identity Providers are more restrictive in the formats they support, and +will require you to provide the certificates as a file in a particular format. +You should consult the documentation for your IdP to determine what formats they +support. Since PEM format is the most commonly supported format, the examples +below will generate certificates in that format. + +Using the {ref}/certutil.html[`elasticsearch-certutil`] tool, you can generate a +signing certificate with the following command: + +[source, sh] +-------------------------------------------------- +bin/elasticsearch-certutil cert -pem -days 1100 -name saml-sign -out saml-sign.zip +-------------------------------------------------- + +This will + +- generate a certificate and key pair (the `cert` subcommand) +- create the files in PEM format (`-pem` option) +- generate a certificate that is valid for 3 years (`-days 1100`) +- name the certificate `saml-sign` (`-name` option) +- save the certificate and key in the `saml-sign.zip` file (`-out` option) + +The generated zip archive will contain 3 files: + +- `saml-sign.crt`, the public certificate to be used for signing +- `saml-sign.key`, the private key for the certificate +- `ca.crt`, a CA certificate that is not need, and can be ignored. + +Encryption certificates can be generated with the same process. + +===== Configuring {es} for Signing + +By default, {security} will sign _all_ outgoing SAML messages if a signing +key has been configured. + +If you wish to use *PEM formatted* keys and certificates for signing, then +you should configure the following settings on the SAML realm: + +`signing.certificate`:: +The path to the PEM formatted certificate file. e.g. `saml/saml-sign.crt` + +`signing.key`:: +The path to the PEM formatted key file. e.g. `saml/saml-sign.key` + +`signing.secure_key_passphrase`:: +The passphrase for the key, if the file is encypted. This is a +{ref}/secure-settings.html[secure setting] that must be set with the +`elasticsearch-keystore` tool. + +If you wish to use *PKCS#12 formatted* files or a *Java Keystore* for +signing, then you should configure the following settings on the SAML realm: + +`signing.keystore.path`:: +The path to the PKCS#12 or JKS keystore. e.g. `saml/saml-sign.p12` + +`signing.keystore.alias`:: +The alias of the key within the keystore. e.g. `signing-key` + +`signing.keystore.secure_password`:: +The passphrase for the keystore, if the file is encypted. This is a +{ref}/secure-settings.html[secure setting] that must be set with the +`elasticsearch-keystore` tool. + +If you wish to sign some, but not all outgoing *SAML messages*, then you +should configure the following setting on the SAML realm: + +`signing.saml_messages`:: +A list of message types to sign. A message type is identified by the +_local name_ of the XML element used for the message. Supported values +are: `AuthnRequest`, `LogoutRequest` and `LogoutResponse`. + +===== Configuring {es} for Encrypted Messages + +{security} supports a single key for message decryption. If a key is +configured, then {security} will attempt to use it to decrypt +`EncryptedAssertion` and `EncryptedAttribute` elements in Authentication +responses, and `EncryptedID` elements in Logout requests. + +{security} will reject any SAML message that contains an `EncryptedAssertion` +that cannot be decrypted. + +If an `Assertion` contains both encrypted and plain-text attributes, then +failure to decrypt the encrypted attributes will not cause an automatic +rejection. Rather, {security} will process the available plain-text attributes +(and any `EncryptedAttributes` that could be decrypted). + +If you wish to use *PEM formatted* keys and certificates for SAML encryption, +then you should configure the following settings on the SAML realm: + +`encryption.certificate`:: +The path to the PEM formatted certificate file. e.g. `saml/saml-crypt.crt` + +`encryption.key`:: +The path to the PEM formatted key file. e.g. `saml/saml-crypt.key` + +`encryption.secure_key_passphrase`:: +The passphrase for the key, if the file is encypted. This is a +{ref}/secure-settings.html[secure setting] that must be set with the +`elasticsearch-keystore` tool. + +If you wish to use *PKCS#12 formatted* files or a *Java Keystore* for SAML +encryption, then you should configure the following settings on the SAML realm: + +`encryption.keystore.path`:: +The path to the PKCS#12 or JKS keystore. e.g. `saml/saml-crypt.p12` + +`encryption.keystore.alias`:: +The alias of the key within the keystore. e.g. `encryption-key` + +`encryption.keystore.secure_password`:: +The passphrase for the keystore, if the file is encypted. This is a +{ref}/secure-settings.html[secure setting] that must be set with the +`elasticsearch-keystore` tool. + +=== Generating SP metadata + +Some Identity Providers support importing a metadata file from the Service +Provider. This will automatically configure many of the integration options +between the IdP and the SP. + +The Elastic Stack supports generating such a metadata file using the +`bin/elasticsearch-saml-metadata` command in your {es} directory. + +The {ref}/saml-metadata.html[documentation for the elasticsearch-saml-metadata utility] +describes how to run it, and the available command line options. + +[[saml-role-mapping]] +=== Configuring Role Mappings + +When a user authenticates using SAML, they are identified to the Elastic Stack, +but this does not automatically grant them access to perform any actions or +access any data. + +Your SAML users cannot do anything until they are mapped to X-Pack Security +roles. This mapping is performed through the +{ref}/security-api-role-mapping.html[role-mapping API] + +This is an example of a simple role mapping that grants the `kibana_user` role +to any user who authenticates against the `saml1` realm: + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/role_mapping/saml-kibana +{ + "roles": [ "kibana_user" ], + "enabled": true, + "rules": { + "field": { "realm.name": "saml1" } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + + +The attributes that are mapped via the realm configuration are used to process +role mapping rules, and these rules determine which roles a user is granted. + +The user fields that are provided to the role +mapping are derived from the SAML attributes as follows: + +- `username`: The `principal` attribute +- `dn`: The `dn` attribute +- `groups`: The `groups` attribute +- `metadata`: See <> + +For more information, see <> and +{ref}/security-api-role-mapping.html[Role Mapping APIs]. + +If your IdP has the ability to provide groups or roles to Service Providers, +then you should map this SAML attribute to the `attributes.groups` setting in +the {es} realm, and then make use of it in a role mapping as per the example +below. + +This mapping grants the {es} `finance_data` role, to any users who authenticate +via the `saml1` realm with the `finance-team` group. + +[source,js] +-------------------------------------------------- +PUT /_xpack/security/role_mapping/saml-finance +{ + "roles": [ "finance_data" ], + "enabled": true, + "rules": { "all": [ + { "field": { "realm.name": "saml1" } }, + { "field": { "groups": "finance-team" } } + ] } +} +-------------------------------------------------- +// CONSOLE +// TEST + +[[saml-user-metadata]] +=== User Metadata + +By default users who authenticate via SAML will have some additional metadata +fields. + +- `saml_nameid` will be set to the value of the `NameID` element in the SAML + authentication response +- `saml_nameid_format` will be set to the full URI of the NameID's `format` + attribute +- Every SAML Attribute that is provided in the authentication response + (regardless of whether it is mapped to an {es} user property), will be added + as the metadata field `saml(name)` where "name" is the full URI name of the + attribute. For example `saml(urn:oid:0.9.2342.19200300.100.1.3)`. +- For every SAML Attribute that has a _friendlyName_, will also be added as the + metadata field `saml_friendlyName` where "name" is the full URI name of the + attribute. For example `saml_mail`. + +This behaviour can be disabled by adding `populate_user_metadata: false` to as +a setting in the saml realm. + +[[saml-kibana]] +=== Configuring {kib} + +SAML authentication in {kib} requires a small number of additional settings +in addition to the standard {kib} security configuration. The +{kibana-ref}/using-kibana-with-security.html[{kib} security documentation] +provides details on the available configuration options that you can apply. + +In particular, since your {es} nodes have been configured to use TLS on the HTTP +interface, you must configure {kib} to use a `https` URL to connect to {es}, and +you may need to configure `elasticsearch.ssl.certificateAuthorities` to trust +the certificates that {es} has been configured to use. + +SAML authentication in {kib} is also subject to the +`xpack.security.sessionTimeout` setting that is described in the {kib} security +documentation, and you may wish to adjst this timeout to meet your local needs. + +The two additional settings that are required for SAML support are shown below: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authProviders: [saml] +server.xsrf.whitelist: [/api/security/v1/saml] +------------------------------------------------------------ + +The configuration values used in the example above are: + +`xpack.security.authProviders`:: +Set this to `[ saml ]` to instruct {kib} to use SAML SSO as the authentication +method. + +`server.xsrf.whitelist`:: +{kib} has in-built protection against _Cross Site Request Forgery_ attacks which +are designed to prevent the {kib} server from processing requests that +originated from outside the {kib} application. +In order to support SAML authentication messages that originate from your +Identity Provider, we need to explicitly _whitelist_ the SAML authentication URL +within {kib}, so that the {kib} server will not reject these external messages. + +If your {kib} instance is behind a proxy, you may also need to add configuration +to tell {kib} how to form its public URL. This is needed because all SAML +messages are exchanged via the user's web browser, so {kib} needs to know what +URLs are used within the browser. In this case, the following settings should be +added to your `kibana.yml` configuration file: + +[source, yaml] +------------------------------------------------------------ +xpack.security.public: + protocol: https + hostname: kibana.proxy.com + port: 443 +------------------------------------------------------------ + +`xpack.security.public.protocol`:: +This is the protocol that the user's web browser uses to connect to the proxy. +Must be one of `http` or `https`. It is strongly recommended that you use the +`https` protocol for all access to {kib}. + +`xpack.security.public.hostname`:: +The fully qualified hostname that your users use to connect to the proxy server. + +`xpack.security.public.port`:: +The port number that your users use to connect to the proxy server (e.g. `80` +for `http` or `443` for `https`). + +These values must be aligned with the URLs used in the {es} configuration for +`sp.acs` and `sp.logout`. + +==== Supporting SAML and Basic authentication in {kib} + +The SAML support in {kib} is designed on the expectation that it will be the +primary (or sole) authentication method for users of that {kib} instance. +However, it is possible to support both SAML and Basic authentication within a +single {kib} instance by setting `xpack.security.authProviders` as per the +example below: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authProviders: [saml, basic] +------------------------------------------------------------ + +The order is important - this will _initiate_ SAML authentication for +unauthenticated users, but will _accept_ basic authentication. + +If {kib} is configured in this way, then users who wish to login with a +username and password, can do so by directly accessing the `/login` page in +{kib}. This login will not use SAML credentials, and will rely on one of the +other security realms within {es}. Only users who have a username and password +for a configured {es} authentication realm will be able to login via this page. + +Alternatively, when the `basic` authentication provider is enabled, you can +place a reverse proxy in front of {kib}, and configure it to send a basic +authentication header (`Authorization: Basic ....`) for each request. +If this header is present and valid, {kib} will not initiate the SAML +authentication process. + +==== Operating multiple {kib} instances + +If you wish to have multiple {kib} instances that authenticate against the same +{es} cluster, then each {kib} instance that is configured for SAML authentication, +requires its own SAML realm. + +Each SAML realm must have its own unique Entity ID (`sp.entity_id`), and its own +_Assertion Consumer Service_ (`sp.acs`). Each {kib} instance will be mapped to +the correct realm by looking up the matching `sp.acs` value. + +These realms may use the same Identity Provider, but are not required to. + +The following is example of having 3 difference {kib} instances, 2 of which +use the same internal IdP, and another which uses a different IdP. + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.saml_finance: + type: saml + order: 2 + idp.metadata.path: saml/idp-metadata.xml + idp.entity_id: "https://sso.example.com/" + sp.entity_id: "https://kibana.finance.example.com/" + sp.acs: "https://kibana.finance.example.com/api/security/v1/saml" + sp.logout: "https://kibana.finance.example.com/logout" + attributes.principal: "urn:oid:0.9.2342.19200300.100.1.1" + attributes.groups: "urn:oid:1.3.6.1.4.1.5923.1.5.1." +xpack.security.authc.realms.saml_sales: + type: saml + order: 3 + idp.metadata.path: saml/idp-metadata.xml + idp.entity_id: "https://sso.example.com/" + sp.entity_id: "https://kibana.sales.example.com/" + sp.acs: "https://kibana.sales.example.com/api/security/v1/saml" + sp.logout: "https://kibana.sales.example.com/logout" + attributes.principal: "urn:oid:0.9.2342.19200300.100.1.1" + attributes.groups: "urn:oid:1.3.6.1.4.1.5923.1.5.1." +xpack.security.authc.realms.saml_eng: + type: saml + order: 4 + idp.metadata.path: saml/idp-external.xml + idp.entity_id: "https://engineering.sso.example.net/" + sp.entity_id: "https://kibana.engineering.example.com/" + sp.acs: "https://kibana.engineering.example.com/api/security/v1/saml" + sp.logout: "https://kibana.engineering.example.com/logout" + attributes.principal: "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn" +------------------------------------------------------------ + +It is possible to have one or more {kib} instances that use SAML, while other +instances use basic authentication against another realm type (e.g. +<> or <>). diff --git a/x-pack/docs/en/security/authentication/saml-realm.asciidoc b/x-pack/docs/en/security/authentication/saml-realm.asciidoc new file mode 100644 index 0000000000000..4de8d5a28ce3e --- /dev/null +++ b/x-pack/docs/en/security/authentication/saml-realm.asciidoc @@ -0,0 +1,262 @@ +[[saml-realm]] +=== SAML Authentication +{security} supports user authentication using SAML Single Sign On. +{security} provides this support using the Web Browser SSO profile of the SAML +2.0 protocol. + +This protocol is specifically designed to support authentication via an +interactive web browser, so it does not operate as a standard authentication +realm. Instead, {security} provides features in {kib} and {es} that work +together to enable interactive SAML sessions. + +This means that the SAML realm is not suitable for use by standard REST clients. +If you configure a SAML realm for use in {kib}, you should also configure +another realm, such as the <> in your authentication +chain. + +In order to simplify the process of configuring SAML authentication within the +Elastic Stack, there is a step-by-step guide to +<>. + +The remainder of this document will describe {es} specific configuration options +for SAML realms. + + +[[saml-settings]] +==== SAML Realm Settings + +[cols="4,^3,10"] +|======================= +| Setting | Required | Description +| `type` | yes | Indicates the realm type. Must be set to `saml`. +| `order` | no | Indicates the priority of this realm within the realm chain. + Realms with a lower order are consulted first. Although not + required, we recommend explicitly setting this value when + you configure multiple realms. Defaults to `Integer.MAX_VALUE`. +| `enabled` | no | Indicates whether this realm is enabled or disabled. Enables + you to disable a realm without removing its configuration. + Defaults to `true`. +| `idp.entity_id` | yes | The Entity ID of the SAML Identity Provider. An Entity ID is + a URI with a maximum length of 1024 characters. It can be a + URL (`https://idp.example.com/`) or a URN (`urn:example.com:idp`) + and can be found in the configuration or the SAML metadata + of the Identity Provider. +| `idp.metadata.path` | yes | The path (_recommended_) or URL to a SAML 2.0 metadata file + describing the capabilities and configuration of the Identity + Provider. + If a path is provided, then it is resolved relative to the + {es} config directory. + If a URL is provided, then it must be either a `file` URL or + a `https` URL. + {security} will automatically poll this metadata resource and + will reload the IdP configuration when changes are detected. + File based resources are polled at a frequency determined by + the global {es} `resource.reload.interval.high` setting, which + defaults to 5 seconds. + HTTPS resources are polled at a frequency determined by + the realm's `idp.metadata.http.refresh` setting. +| `idp.metadata.http.refresh` | no | Controls the frequency with which `https` metadata is checked + for changes. Defaults to 1 hour. +| `idp.use_single_logout` | no | Indicates whether to utilise the Identity Provider's Single + Logout service (if one exists in the IdP metadata file). + Defaults to `true`. +| `sp.entity_id` | yes | The Entity ID to use for this SAML Service Provider. + This should be entered as a URI. We recommend that you use the + base URL of your {kib} instance, + e.g. `https://kibana.example.com/` +| `sp.acs` | yes | The URL of the Assertion Consumer Service within {kib}. + Typically this will be the "api/security/v1/saml" endpoint of + your {kib} server, + e.g. `https://kibana.example.com/api/security/v1/saml` +| `sp.logout` | no | The URL of the Single Logout service within {kib}. + Typically this will be the "logout" endpoint of + your {kib} server, + e.g. `https://kibana.example.com/logout` +| `attributes.principal` | yes | The Name of the SAML attribute that should be used as the + {security} user's principal (username) +| `attributes.groups` | no | The Name of the SAML attribute that should be used to populate + {security} user's groups +| `attributes.name` | no | The Name of the SAML attribute that should be used to populate + {security} user's full name +| `attributes.mail` | no | The Name of the SAML attribute that should be used to populate + {security} user's email address +| `attributes.dn` | no | The Name of the SAML attribute that should be used to populate + {security} user's X.500 _Distinguished Name_ +| `attribute_patterns.principal` | no | A java regular expression that is matched against the SAML attribute + specified by `attributes.pattern` before it is applied to the user's + _principal_ property. + The attribute value must match the pattern, and the value of the + first _capturing group_ is used as the principal. + e.g. `^([^@]+)@example\\.com$` matches email addresses from the + "example.com" domain and uses the local-part as the principal. +| `attribute_patterns.groups` | no | As per `attribute_patterns.principal`, but for the _group_ property. +| `attribute_patterns.name` | no | As per `attribute_patterns.principal`, but for the _name_ property. +| `attribute_patterns.mail` | no | As per `attribute_patterns.principal`, but for the _mail_ property. +| `attribute_patterns.dn` | no | As per `attribute_patterns.principal`, but for the _dn_ property. +| `nameid_format` | no | The NameID format that should be requested when asking the IdP + to authenticate the current user. + Defaults to requesting _transient_ names + (`urn:oasis:names:tc:SAML:2.0:nameid-format:transient`) +| `nameid.allow_create` | no | The value of the `AllowCreate` attribute of the `NameIdPolicy` + element in an authentication request. + Defaults to `false` +| `nameid.sp_qualifier` | no | The value of the `SPNameQualifier` attribute of the `NameIdPolicy` + element in an authentication request. + The default is to not include the `SPNameQualifier` attribute. +| `force_authn` | no | Whether to set the `ForceAuthn` attribute when requesting that the + IdP authenticate the current user. If this is set to `true`, the + IdP will be required to freshly establish the user's identity, + irrespective of any exiting sessions they may have. + Defaults to `false`. +| `populate_user_metadata` | no | Whether to populate the {es} user's metadata with the values that + are provided by the SAML attributes. Defaults to `true`. +| `allowed_clock_skew` | no | The maximum amount of skew that can be tolerated between the + IdP's clock and the {es} node's clock. Defaults to 3 minutes. +|======================= + +===== SAML Realm Signing Settings + +If a signing key is configured (i.e. is one of `signing.key` or `signing.keystore.path` has been set), then +{security} will sign outgoing SAML messages. Signing can be configured using the following settings. + +|======================= +| Setting | Required | Description +| `signing.saml_messages` | no | A list of SAML message types that should be signed, or `*` to + sign all messages. Each element in the list should be the + local name of a SAML XML Element. Supported element types are + `AuthnRequest`, `LogoutRequest` and `LogoutResponse`. + Defaults to `*`. +| `signing.key` | no | Specifies the path to the PEM encoded private key to use for + SAML message signing. + `signing.key` and `signing.keystore.path` may not be used at + the same time. +| `signing.secure_key_passphrase` | no | ({ref}/secure-settings.html[Secure]) + Specifies the passphrase to decrypt the PEM encoded private key if + it is encrypted. +| `signing.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate + chain) that corresponds to the `signing.key`. This certificate + must also be included in the Service Provider metadata, or + manually configured within the IdP to allow for signature + validation. + May only be used if `signing.key` is set. +| `signing.keystore.path` | no | The path to the keystore that contains a private key and + certificate. + Must be either a Java Keystore (jks) or a PKCS#12 file. + `signing.key` and `signing.keystore.path` may not be used at the + same time. +| `signing.keystore.type` | no | The type of the keystore. Must be one of "jks" or "PKCS12". + Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or + "pkcs12", otherwise uses "jks" +| `signing.keystore.alias` | no | Specifies the alias of the key within the keystore that should be + used for SAML message signing. Must be specified if the keystore + contains more than one private key. +| `signing.keystore.secure_password` | no | ({ref}/secure-settings.html[Secure]) The password to the keystore. +| `signing.keystore.secure_key_password` | no | ({ref}/secure-settings.html[Secure]) + The password for the key in the keystore. + Defaults to the keystore password. +|======================= + +===== SAML Realm Encryption Settings + +If an encryption key is configured (i.e. is one of `encryption.key` or +`encryption.keystore.path` has been set), then {security} will publish +an encryption certificate when generating metadata, and will attempt to +decrypt incoming SAML content. +Encryption can be configured using the following settings. + +|======================= +| Setting | Required | Description +| `encryption.key` | no | Specifies the path to the PEM encoded private key to use for + SAML message descryption. + `encryption.key` and `encryption.keystore.path` may not be used at + the same time. +| `encryption.secure_key_passphrase` | no | ({ref}/secure-settings.html[Secure]) + Specifies the passphrase to decrypt the PEM encoded private key if + it is encrypted. +| `encryption.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate + chain) that is associated with the `encryption.key`. This + certificate must also be included in the Service Provider metadata, + or manually configured within the IdP to enable message encryption. + May only be used if `encryption.key` is set. +| `encryption.keystore.path` | no | The path to the keystore that contains a private key and + certificate. + Must be either a Java Keystore (jks) or a PKCS#12 file. + `encryption.key` and `encryption.keystore.path` may not be used at + the same time. +| `encryption.keystore.type` | no | The type of the keystore. Must be one of "jks" or "PKCS12". + Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or + "pkcs12", otherwise uses "jks" +| `encryption.keystore.alias` | no | Specifies the alias of the key within the keystore that should be + used for SAML message decryption. If not specified, all compatible + key pairs from the keystore will be considered as candidate keys + for decryption. +| `encryption.keystore.secure_password` | no | ({ref}/secure-settings.html[Secure]) The password to the keystore. +| `encryption.keystore.secure_key_password` | no | ({ref}/secure-settings.html[Secure]) + The password for the key in the keystore. Only a single password is + supported. If you are using multiple decryption keys, then they + cannot have individual passwords. +|======================= + +===== SAML Realm SSL Settings + +If you are loading the IdP metadata over SSL/TLS (that is, `idp.metadata.path` is a URL using the `https` protocol) +Then the following settings may be used to configure SSL. If these are not specified, then the {xpack} +{ref}/security-settings.html#ssl-tls-settings[default SSL settings] are used. + +These settings are not used for any purpose other than loading metadata over https. + +|======================= +| Setting | Required | Description +| `ssl.key` | no | Specifies the path to the PEM encoded private key to use for http + client authentication. + `ssl.key` and `ssl.keystore.path` may not be used at the same time. +| `ssl.key_passphrase` | no | Specifies the passphrase to decrypt the PEM encoded private key if + it is encrypted. May not be used with `ssl.secure_key_passphrase` +| `ssl.secure_key_passphrase` | no | ({ref}/secure-settings.html[Secure]) + Specifies the passphrase to decrypt the PEM encoded private key if + it is encrypted. May not be used with `ssl.key_passphrase` +| `ssl.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate + chain) that goes with the key. May only be used if `ssl.key` is set. +| `ssl.certificate_authorities` | no | Specifies the paths to the PEM encoded certificate authority + certificates that should be trusted. + `ssl.certificate_authorities` and `ssl.truststore.path` may not be + used at the same time. +| `ssl.keystore.path` | no | The path to the keystore that contains a private key and + certificate. + Must be either a Java Keystore (jks) or a PKCS#12 file. + `ssl.key` and `ssl.keystore.path` may not be used at the same time. +| `ssl.keystore.type` | no | The type of the keystore. Must be one of "jks" or "PKCS12". + Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or + "pkcs12", otherwise uses "jks" +| `ssl.keystore.password` | no | The password to the keystore. + May not be used with `ssl.keystore.secure_password`. +| `ssl.keystore.secure_password` | no | ({ref}/secure-settings.html[Secure]) The password to the keystore. + May not be used with `ssl.keystore.password`. +| `ssl.keystore.key_password` | no | The password for the key in the keystore. + Defaults to the keystore password. + May not be used with `ssl.keystore.secure_key_password`. +| `ssl.keystore.secure_key_password` | no | ({ref}/secure-settings.html[Secure]) + The password for the key in the keystore. + Defaults to the keystore password. + May not be used with `ssl.keystore.key_password`. +| `ssl.truststore.path` | no | The path to the keystore that contains the certificates to trust. + Must be either a Java Keystore (jks) or a PKCS#12 file. + `ssl.certificate_authorities` and `ssl.truststore.path` may not be + used at the same time. +| `ssl.truststore.type` | no | The type of the truststore. Must be one of "jks" or "PKCS12". + Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or + "pkcs12", otherwise uses "jks" +| `ssl.truststore.password` | no | The password to the truststore. + May not be used with `ssl.truststore.secure_password`. +| `ssl.truststore.secure_password` | no | ({ref}/secure-settings.html[Secure]) The password to the truststore. + May not be used with `ssl.truststore.password`. +| `ssl.verification_mode` | no | One of `full` (verify the hostname and the certicate path), + `certificate` (verify the certificate path, but not the hostname) + or `none` (perform no verification). Defaults to `full`. ++ + See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`] + for a more detailed explanation of these values. +| `ssl.supported_protocols` | no | Specifies the supported protocols for TLS/SSL. +| `ssl.cipher_suites` | no | Specifies the cipher suites that should be supported. +|======================= + diff --git a/x-pack/docs/en/security/authentication/user-cache.asciidoc b/x-pack/docs/en/security/authentication/user-cache.asciidoc new file mode 100644 index 0000000000000..ba2b363a843ed --- /dev/null +++ b/x-pack/docs/en/security/authentication/user-cache.asciidoc @@ -0,0 +1,62 @@ +[[controlling-user-cache]] +=== Controlling the User Cache + +User credentials are cached in memory on each node to avoid connecting to a +remote authentication service or hitting the disk for every incoming request. +You can configure characteristics of the user cache with the `cache.ttl`, +`cache.max_users`, and `cache.hash_algo` realm settings. + +NOTE: PKI realms do not cache user credentials but do cache the resolved user +object to avoid unnecessarily needing to perform role mapping on each request. + +The cached user credentials are hashed in memory. By default, {security} uses a +salted `sha-256` hash algorithm. You can use a different hashing algorithm by +setting the `cache_hash_algo` setting to any of the following: + +[[cache-hash-algo]] +.Cache hash algorithms +|======================= +| Algorithm | | | Description +| `ssha256` | | | Uses a salted `sha-256` algorithm (default). +| `md5` | | | Uses `MD5` algorithm. +| `sha1` | | | Uses `SHA1` algorithm. +| `bcrypt` | | | Uses `bcrypt` algorithm with salt generated in 1024 rounds. +| `bcrypt4` | | | Uses `bcrypt` algorithm with salt generated in 16 rounds. +| `bcrypt5` | | | Uses `bcrypt` algorithm with salt generated in 32 rounds. +| `bcrypt6` | | | Uses `bcrypt` algorithm with salt generated in 64 rounds. +| `bcrypt7` | | | Uses `bcrypt` algorithm with salt generated in 128 rounds. +| `bcrypt8` | | | Uses `bcrypt` algorithm with salt generated in 256 rounds. +| `bcrypt9` | | | Uses `bcrypt` algorithm with salt generated in 512 rounds. +| `noop`,`clear_text` | | | Doesn't hash the credentials and keeps it in clear text in + memory. CAUTION: keeping clear text is considered insecure + and can be compromised at the OS level (for example through + memory dumps and using `ptrace`). +|======================= + +[[cache-eviction-api]] +==== Evicting Users from the Cache + +{security} exposes a +{ref}/security-api-clear-cache.html[Clear Cache API] you can use +to force the eviction of cached users. For example, the following request evicts +all users from the `ad1` realm: + +[source, js] +------------------------------------------------------------ +$ curl -XPOST 'http://localhost:9200/_xpack/security/realm/ad1/_clear_cache' +------------------------------------------------------------ + +To clear the cache for multiple realms, specify the realms as a comma-separated +list: + +[source, js] +------------------------------------------------------------ +$ curl -XPOST 'http://localhost:9200/_xpack/security/realm/ad1,ad2/_clear_cache' +------------------------------------------------------------ + +You can also evict specific users: + +[source, java] +------------------------------------------------------------ +$ curl -XPOST 'http://localhost:9200/_xpack/security/realm/ad1/_clear_cache?usernames=rdeniro,alpacino' +------------------------------------------------------------ diff --git a/x-pack/docs/en/security/authorization.asciidoc b/x-pack/docs/en/security/authorization.asciidoc new file mode 100644 index 0000000000000..4a3ffe399de1b --- /dev/null +++ b/x-pack/docs/en/security/authorization.asciidoc @@ -0,0 +1,349 @@ +[[authorization]] +== Configuring Role-based Access Control + +{security} introduces the concept of _authorization_ to {es}. +Authorization is the process of determining whether the user behind an incoming +request is allowed to execute it. This process takes place once a request is +successfully authenticated and the user behind the request is identified. + +[[roles]] +[float] +=== Roles, Permissions and Privileges + +The authorization process revolves around the following 5 constructs: + +_Secured Resource_:: +A resource to which access is restricted. Indices/aliases, documents, fields, +users and the {es} cluster itself are all examples of secured objects. + +_Privilege_:: +A named group representing one or more actions that a user may execute against a +secured resource. Each secured resource has its own sets of available privileges. +For example, `read` is an index privilege that represents all actions that enable +reading the indexed/stored data. For a complete list of available privileges +see <>. + +_Permissions_:: +A set of one or more privileges against a secured resource. Permissions can +easily be described in words, here are few examples: + * `read` privilege on the `products` index + * `manage` privilege on the cluster + * `run_as` privilege on `john` user + * `read` privilege on documents that match query X + * `read` privilege on `credit_card` field + +_Role_:: +A named sets of permissions + +_User_:: +The authenticated user. + +A secure {es} cluster manages the privileges of users through _roles_. +A role has a unique name and identifies a set of permissions that translate to +privileges on resources. A user can be associated with an arbitrary number of +roles. The total set of permissions that a user has is therefore defined by +union of the permissions in all its roles. + +As an administrator, you will need to define the roles that you want to use, +then assign users to the roles. These can be assigned to users in a number of +ways depending on the realms by which the users are authenticated. + +[[built-in-roles]] +=== Built-in Roles + +{security} applies a default role to all users, including +<>. The default role enables users to access +the authenticate endpoint, change their own passwords, and get information about +themselves. + +{security} also provides a set of built-in roles you can explicitly assign +to users. These roles have a fixed set of privileges and cannot be updated. + +[[built-in-roles-ingest-user]] `ingest_admin` :: +Grants access to manage *all* index templates and *all* ingest pipeline configurations. ++ +NOTE: This role does *not* provide the ability to create indices; those privileges +must be defined in a separate role. + +[[built-in-roles-kibana-dashboard]] `kibana_dashboard_only_user` :: +Grants access to the {kib} Dashboard and read-only permissions on the `.kibana` +index. This role does not have access to editing tools in {kib}. For more +information, see +{kibana-ref}/xpack-dashboard-only-mode.html[{kib} Dashboard Only Mode]. + +[[built-in-roles-kibana-system]] `kibana_system` :: +Grants access necessary for the {kib} system user to read from and write to the +{kib} indices, manage index templates, and check the availability of the {es} cluster. +This role grants read access to the `.monitoring-*` indices and read and write access +to the `.reporting-*` indices. For more information, see +{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]. ++ +NOTE: This role should not be assigned to users as the granted permissions may +change between releases. + +[[built-in-roles-kibana-user]] `kibana_user`:: +Grants the minimum privileges required for any user of {kib}. This role grants +access to the {kib} indices and grants monitoring privileges for the cluster. + +[[built-in-roles-logstash-admin]] `logstash_admin` :: +Grants access to the `.logstash*` indices for managing configurations. + +[[built-in-roles-logstash-system]] `logstash_system` :: +Grants access necessary for the Logstash system user to send system-level data +(such as monitoring) to {es}. For more information, see +{logstash-ref}/ls-security.html[Configuring Security in Logstash]. ++ +NOTE: This role should not be assigned to users as the granted permissions may +change between releases. ++ +NOTE: This role does not provide access to the logstash indices and is not +suitable for use within a Logstash pipeline. + +[[built-in-roles-beats-system]] `beats_system` :: +Grants access necessary for the Beats system user to send system-level data +(such as monitoring) to {es}. ++ +NOTE: This role should not be assigned to users as the granted permissions may +change between releases. ++ +NOTE: This role does not provide access to the beats indices and is not +suitable for writing beats output to {es}. + +[[built-in-roles-ml-admin]] `machine_learning_admin`:: +Grants `manage_ml` cluster privileges and read access to the `.ml-*` indices. + +[[built-in-roles-ml-user]] `machine_learning_user`:: +Grants the minimum privileges required to view {xpackml} configuration, +status, and results. This role grants `monitor_ml` cluster privileges and +read access to the `.ml-notifications` and `.ml-anomalies*` indices, +which store {ml} results. + +[[built-in-roles-monitoring-user]] `monitoring_user`:: +Grants the minimum privileges required for any user of {monitoring} other than those +required to use {kib}. This role grants access to the monitoring indices and grants +privileges necessary for reading basic cluster information. Monitoring users should +also be assigned the `kibana_user` role. + +[[built-in-roles-remote-monitoring-agent]] `remote_monitoring_agent`:: +Grants the minimum privileges required for a remote monitoring agent to write data +into this cluster. + +[[built-in-roles-reporting-user]] `reporting_user`:: +Grants the specific privileges required for users of {reporting} other than those +required to use {kib}. This role grants access to the reporting indices. Reporting +users should also be assigned the `kibana_user` role and a role that grants them +access to the data that will be used to generate reports with. + +[[built-in-roles-superuser]] `superuser`:: +Grants full access to the cluster, including all indices and data. A user with +the `superuser` role can also manage users and roles and +<> any other user in the system. Due to the +permissive nature of this role, take extra care when assigning it to a user. + +[[built-in-roles-transport-client]] `transport_client`:: +Grants the privileges required to access the cluster through the Java Transport +Client. The Java Transport Client fetches information about the nodes in the +cluster using the _Node Liveness API_ and the _Cluster State API_ (when +sniffing is enabled). Assign your users this role if they use the +Transport Client. ++ +NOTE: Using the Transport Client effectively means the users are granted access +to the cluster state. This means users can view the metadata over all indices, +index templates, mappings, node and basically everything about the cluster. +However, this role does not grant permission to view the data in all indices. + +[[built-in-roles-watcher-admin]] `watcher_admin`:: ++ +Grants write access to the `.watches` index, read access to the watch history and +the triggered watches index and allows to execute all watcher actions. + +[[built-in-roles-watcher-user]] `watcher_user`:: ++ +Grants read access to the `.watches` index, the get watch action and the watcher +stats. + + +[[defining-roles]] +=== Defining Roles + +A role is defined by the following JSON structure: + +[source,js] +----- +{ + "run_as": [ ... ], <1> + "cluster": [ ... ], <2> + "indices": [ ... ] <3> +} +----- +<1> A list of usernames the owners of this role can <>. +<2> A list of cluster privileges. These privileges define the + cluster level actions users with this role are able to execute. This field + is optional (missing `cluster` privileges effectively mean no cluster level + permissions). +<3> A list of indices permissions entries. This field is optional (missing `indices` + privileges effectively mean no index level permissions). + +[[valid-role-name]] +NOTE: Role names must be at least 1 and no more than 1024 characters. They can + contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, + punctuation, and printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. + Leading or trailing whitespace is not allowed. + +The following describes the structure of an indices permissions entry: + +[source,js] +------- +{ + "names": [ ... ], <1> + "privileges": [ ... ], <2> + "field_security" : { ... }, <3> + "query": "..." <4> +} +------- +<1> A list of indices (or index name patterns) to which the permissions in this + entry apply. +<2> The index level privileges the owners of the role have on the associated + indices (those indices that are specified in the `name` field) +<3> Specification for document fields the owners of the role have read access to. + See <> for details. +<4> A search query that defines the documents the owners of the role have read + access to. A document within the associated indices must match this query + in order for it to be accessible by the owners of the role. + +[TIP] +============================================================================== +When specifying index names, you can use indices and aliases with their full +names or regular expressions that refer to multiple indices. + +* Wildcard (default) - simple wildcard matching where `*` is a placeholder + for zero or more characters, `?` is a placeholder for a single character + and `\` may be used as an escape character. + +* Regular Expressions - A more powerful syntax for matching more complex + patterns. This regular expression is based on Lucene's regexp automaton + syntax. To enable this syntax, it must be wrapped within a pair of + forward slashes (`/`). Any pattern starting with `/` and not ending with + `/` is considered to be malformed. + +.Example Regular Expressions +[source,yaml] +------------------------------------------------------------------------------ +"foo-bar": # match the literal `foo-bar` +"foo-*": # match anything beginning with "foo-" +"logstash-201?-*": # ? matches any one character +"/.*-201[0-9]-.*/": # use a regex to match anything containing 2010-2019 +"/foo": # syntax error - missing final / +------------------------------------------------------------------------------ +============================================================================== + +The following snippet shows an example definition of a `clicks_admin` role: + +[source,js] +----------- +{ + "run_as": [ "clicks_watcher_1" ] + "cluster": [ "monitor" ], + "indices": [ + { + "names": [ "events-*" ], + "privileges": [ "read" ], + "field_security" : { + "grant" : [ "category", "@timestamp", "message" ] + }, + "query": "{\"match\": {\"category\": \"click\"}}" + } + ] +} +----------- + +Based on the above definition, users owning the `clicks_admin` role can: + + * Impersonate the `clicks_watcher_1` user and execute requests on its behalf. + * Monitor the {es} cluster + * Read data from all indices prefixed with `events-` + * Within these indices, only read the events of the `click` category + * Within these document, only read the `category`, `@timestamp` and `message` + fields. + +TIP: For a complete list of available <> + +There are two available mechanisms to define roles: using the _Role Management APIs_ +or in local files on the {es} nodes. {security} also supports implementing +custom roles providers. If you need to integrate with another system to retrieve +user roles, you can build a custom roles provider plugin. For more information, +see <>. + +[float] +[[roles-management-ui]] +=== Role Management UI + +{security} enables you to easily manage users and roles from within {kib}. To +manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. + +[float] +[[roles-management-api]] +=== Role Management API + +The _Role Management APIs_ enable you to add, update, remove and retrieve roles +dynamically. When you use the APIs to manage roles in the `native` realm, the +roles are stored in an internal {es} index. For more information and examples, +see {ref}/security-api-roles.html[Role Management APIs]. + +[float] +[[roles-management-file]] +=== File-based Role Management + +Apart from the _Role Management APIs_, roles can also be defined in local +`roles.yml` file located in `CONFIG_DIR/x-pack`. This is a YAML file where each +role definition is keyed by its name. + +[IMPORTANT] +============================== +If the same role name is used in the `roles.yml` file and through the +_Role Management APIs_, the role found in the file will be used. +============================== + +While the _Role Management APIs_ is the preferred mechanism to define roles, +using the `roles.yml` file becomes useful if you want to define fixed roles that +no one (beside an administrator having physical access to the {es} nodes) +would be able to change. + +[IMPORTANT] +============================== +The `roles.yml` file is managed locally by the node and is not globally by the +cluster. This means that with a typical multi-node cluster, the exact same +changes need to be applied on each and every node in the cluster. + +A safer approach would be to apply the change on one of the nodes and have the +`roles.yml` distributed/copied to all other nodes in the cluster (either +manually or using a configuration management system such as Puppet or Chef). +============================== + +The following snippet shows an example of the `roles.yml` file configuration: + +[source,yaml] +----------------------------------- +click_admins: + run_as: [ 'clicks_watcher_1' ] + cluster: [ 'monitor' ] + indices: + - names: [ 'events-*' ] + privileges: [ 'read' ] + field_security: + grant: ['category', '@timestamp', 'message' ] + query: '{"match": {"category": "click"}}' +----------------------------------- + +{security} continuously monitors the `roles.yml` file and automatically picks +up and applies any changes to it. + +include::authorization/alias-privileges.asciidoc[] + +include::authorization/mapping-roles.asciidoc[] + +include::authorization/field-and-document-access-control.asciidoc[] + +include::authorization/run-as-privilege.asciidoc[] + +include::authorization/custom-roles-provider.asciidoc[] diff --git a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc new file mode 100644 index 0000000000000..6916e2ab2ca30 --- /dev/null +++ b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc @@ -0,0 +1,101 @@ +[[securing-aliases]] +=== Granting Privileges for Indices & Aliases + +Elasticsearch allows to execute operations against {ref}/indices-aliases.html[index aliases], +which are effectively virtual indices. An alias points to one or more indices, +holds metadata and potentially a filter. {security} treats aliases and indices +the same. Privileges for indices actions are granted on specific indices or +aliases. In order for an indices action to be authorized, the user that executes +it needs to have permissions for that action on all the specific indices or +aliases that the request relates to. + +Let's look at an example. Assuming we have an index called `2015`, an alias that +points to it called `current_year`, and a user with the following role: + +[source,js] +-------------------------------------------------- +{ + "names" : [ "2015" ], + "privileges" : [ "read" ] +} +-------------------------------------------------- +// NOTCONSOLE + +The user attempts to retrieve a document from `current_year`: + +[source,shell] +------------------------------------------------------------------------------- +GET /current_year/event/1 +------------------------------------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT 2015\n{"aliases": {"current_year": {}}}\nPUT 2015\/event\/1\n{}\n/] + +The above request gets rejected, although the user has `read` privilege on the +concrete index that the `current_year` alias points to. The correct permission +would be as follows: + +[source,js] +-------------------------------------------------- +{ + "names" : [ "current_year" ], + "privileges" : [ "read" ] +} +-------------------------------------------------- +// NOTCONSOLE + +[float] +==== Managing aliases + +Unlike creating indices, which requires the `create_index` privilege, adding, +removing and retrieving aliases requires the `manage` permission. Aliases can be +added to an index directly as part of the index creation: + +[source,shell] +------------------------------------------------------------------------------- +PUT /2015 +{ + "aliases" : { + "current_year" : {} + } +} +------------------------------------------------------------------------------- +// CONSOLE + +or via the dedicated aliases api if the index already exists: + +[source,shell] +------------------------------------------------------------------------------- +POST /_aliases +{ + "actions" : [ + { "add" : { "index" : "2015", "alias" : "current_year" } } + ] +} +------------------------------------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT 2015\n/] + +The above requests both require the `manage` privilege on the alias name as well +as the targeted index, as follows: + +[source,js] +-------------------------------------------------- +{ + "names" : [ "20*", "current_year" ], + "privileges" : [ "manage" ] +} +-------------------------------------------------- +// NOTCONSOLE + +The index aliases api also allows also to delete aliases from existing indices. +The privileges required for such a request are the same as above. Both index and +alias need the `manage` permission. + + +[float] +==== Filtered aliases + +Aliases can hold a filter, which allows to select a subset of documents that can +be accessed out of all the documents that the physical index contains. These +filters are not always applied and should not be used in place of +<>. diff --git a/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc b/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc new file mode 100644 index 0000000000000..9056467ced9f9 --- /dev/null +++ b/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc @@ -0,0 +1,99 @@ +[[custom-roles-provider]] +=== Custom Roles Provider Extension + +If you need to retrieve user roles from a system not supported out-of-the-box +by {security}, you can create a custom roles provider to retrieve and resolve +roles. You implement a custom roles provider as an SPI loaded security extension +as part of an ordinary elasticsearch plugin. + +[[implementing-custom-roles-provider]] +==== Implementing a Custom Roles Provider + +To create a custom roles provider: + +. Implement the interface `BiConsumer, ActionListener>>`. + That is to say, the implementation consists of one method that takes a set of strings, + which are the role names to resolve, and an ActionListener, on which the set of resolved + role descriptors are passed on as the response. +. The custom roles provider implementation must take special care to not block on any I/O + operations. It is the responsibility of the implementation to ensure asynchronous behavior + and non-blocking calls, which is made easier by the fact that the `ActionListener` is + provided on which to send the response when the roles have been resolved and the response + is ready. + +To package your custom roles provider as a plugin: + +. Implement an extension class for your roles provider that implements + `org.elasticsearch.xpack.core.security.SecurityExtension`. There you need to + override one or more of the following methods: ++ +[source,java] +---------------------------------------------------- +@Override +public List, ActionListener>>> +getRolesProviders(Settings settings, ResourceWatcherService resourceWatcherService) { + ... +} +---------------------------------------------------- ++ +The `getRolesProviders` method is used to provide a list of custom roles providers that +will be used to resolve role names, if the role names could not be resolved by the reserved +roles or native roles stores. The list should be returned in the order that the custom role +providers should be invoked to resolve roles. For example, if `getRolesProviders` returns two +instances of roles providers, and both of them are able to resolve role `A`, then the resolved +role descriptor that will be used for role `A` will be the one resolved by the first roles +provider in the list. ++ +[source,java] +---------------------------------------------------- +@Override +public List getSettingsFilter() { + ... +} +---------------------------------------------------- ++ +The `Plugin#getSettingsFilter` method returns a list of setting names that should be +filtered from the settings APIs as they may contain sensitive credentials. Note this method is not +part of the `SecurityExtension` interface, it's available as part of the elasticsearch plugin main class. + +. Create a build configuration file for the plugin; Gradle is our recommendation. +. Create a `META-INF/services/org.elasticsearch.xpack.core.security.SecurityExtension` descriptor file for the + extension that contains the fully qualified class name of your `org.elasticsearch.xpack.core.security.SecurityExtension` implementation +. Bundle all in a single zip file. + +[[using-custom-roles-provider]] +==== Using a Custom Roles Provider to Resolve Roles + +To use a custom roles provider: + +. Install the roles provider extension on each node in the cluster. You run + `bin/elasticsearch-plugin` with the `install` sub-command and specify the URL + pointing to the zip file that contains the extension. For example: ++ +[source,shell] +---------------------------------------- +bin/elasticsearch-plugin install file:////my-roles-provider-1.0.zip +---------------------------------------- + +. Add any configuration parameters for any of the custom roles provider implementations +to `elasticsearch.yml`. The settings are not namespaced and you have access to any +settings when constructing the custom roles providers, although it is recommended to +have a namespacing convention for custom roles providers to keep your `elasticsearch.yml` +configuration easy to understand. ++ +For example, if you have a custom roles provider that +resolves roles from reading a blob in an S3 bucket on AWS, then you would specify settings +in `elasticsearch.yml` such as: ++ +[source,js] +---------------------------------------- +custom_roles_provider.s3_roles_provider.bucket: roles +custom_roles_provider.s3_roles_provider.region: us-east-1 +custom_roles_provider.s3_roles_provider.secret_key: xxx +custom_roles_provider.s3_roles_provider.access_key: xxx +---------------------------------------- ++ +These settings will be available as the first parameter in the `getRolesProviders` method, from +where you will create and return the custom roles provider instances. + +. Restart Elasticsearch. diff --git a/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc b/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc new file mode 100644 index 0000000000000..88d0e157ca052 --- /dev/null +++ b/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc @@ -0,0 +1,448 @@ +[[field-and-document-access-control]] +=== Setting Up Field and Document Level Security + +You can control access to data within an index by adding field and document level +security permissions to a role. Field level security permissions restrict access +to particular fields within a document. Document level security permissions +restrict access to particular documents within an index. + +NOTE: Document and field level security is currently meant to operate with +read-only privileged accounts. Users with document and field level +security enabled for an index should not perform write operations. + +A role can define both field and document level permissions on a per-index basis. +A role that doesn’t specify field level permissions grants access to ALL fields. +Similarly, a role that doesn't specify document level permissions grants access +to ALL documents in the index. + +[IMPORTANT] +===================================================================== +When assigning users multiple roles, be careful that you don't inadvertently +grant wider access than intended. Each user has a single set of field level and +document level permissions per index. See <>. +===================================================================== + +[[field-level-security]] +==== Field Level Security + +To enable field level security, specify the fields that each role can access +as part of the indices permissions in a role definition. Field level security is +thus bound to a well-defined set of indices (and potentially a set of +<>). + +The following role definition grants read access only to the `category`, +`@timestamp`, and `message` fields in all the `events-*` indices. + +[source,js] +-------------------------------------------------- +{ + "indices": [ + { + "names": [ "events-*" ], + "privileges": [ "read" ], + "field_security" : { + "grant" : [ "category", "@timestamp", "message" ] + } + } + ] +} +-------------------------------------------------- + +Access to the following meta fields is always allowed: `_id`, +`_type`, `_parent`, `_routing`, `_timestamp`, `_ttl`, `_size` and `_index`. If +you specify an empty list of fields, only these meta fields are accessible. + +NOTE: Omitting the fields entry entirely disables field-level security. + +You can also specify field expressions. For example, the following +example grants read access to all fields that start with an `event_` prefix: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant" : [ "event_*" ] + } + } + ] +} +-------------------------------------------------- + +Use the dot notations to refer to nested fields in more complex documents. For +example, assuming the following document: + +[source,js] +-------------------------------------------------- +{ + "customer": { + "handle": "Jim", + "email": "jim@mycompany.com", + "phone": "555-555-5555" + } +} +-------------------------------------------------- + +The following role definition enables only read access to the customer `handle` +field: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant" : [ "customer.handle" ] + } + } + ] +} +-------------------------------------------------- + +This is where wildcard support shines. For example, use `customer.*` to enable +only read access to the `customer` data: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant" : [ "customer.*" ] + } + } + ] +} +-------------------------------------------------- + +You can deny permission to access fields with the following syntax: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant" : [ "*"], + "except": [ "customer.handle" ] + } + } + ] +} +-------------------------------------------------- + + +The following rules apply: + +* The absence of `field_security` in a role is equivalent to * access. +* If permission has been granted explicitly to some fields, you can specify +denied fields. The denied fields must be a subset of the fields to which +permissions were granted. +* Defining denied and granted fields implies access to all granted fields except +those which match the pattern in the denied fields. + +For example: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "except": [ "customer.handle" ], + "grant" : [ "customer.*" ] + } + } + ] +} +-------------------------------------------------- + +In the above example, users can read all fields with the prefix "customer." +except for "customer.handle". + +An empty array for `grant` (for example, `"grant" : []`) means that access has +not been granted to any fields. + +===== Field Level Security and Roles + +When a user has several roles that specify field level permissions, the +resulting field level permissions per index are the union of the individual role +permissions. For example, if these two roles are merged: + +[source,js] +-------------------------------------------------- +{ + // role 1 + ... + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant": [ "a.*" ], + "except" : [ "a.b*" ] + } + } + ] +} + +{ + // role 2 + ... + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant": [ "a.b*" ], + "except" : [ "a.b.c*" ] + } + } + ] +} +-------------------------------------------------- + +The resulting permission is equal to: + +[source,js] +-------------------------------------------------- +{ + // role 1 + role 2 + ... + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "field_security" : { + "grant": [ "a.*" ], + "except" : [ "a.b.c*" ] + } + } + ] +} +-------------------------------------------------- + + +[[document-level-security]] +==== Document Level Security + +Document level security restricts the documents that users have read access to. +To enable document level security, specify a query that matches all the +accessible documents as part of the indices permissions within a role definition. +Document level security is thus bound to a well defined set of indices. + +Enabling document level security restricts which documents can be accessed from +any document-based read API. To enable document level security, you use a query +to specify the documents that each role can access in the `roles.yml` file. +You specify the document query with the `query` option. The document query is +associated with a particular index or index pattern and operates in conjunction +with the privileges specified for the indices. + +The following role definition grants read access only to documents that +belong to the `click` category within all the `events-*` indices: + +[source,js] +-------------------------------------------------- +{ + "indices": [ + { + "names": [ "events-*" ], + "privileges": [ "read" ], + "query": "{\"match\": {\"category\": \"click\"}}" + } + ] +} +-------------------------------------------------- + +NOTE: Omitting the `query` entry entirely disables document level security for + the respective indices permission entry. + +The specified `query` expects the same format as if it was defined in the +search request and supports the full {es} {ref}/query-dsl.html[Query DSL]. + +For example, the following role grants read access only to the documents whose +`department_id` equals `12`: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "*" ], + "privileges" : [ "read" ], + "query" : { + "term" : { "department_id" : 12 } + } + } + ] +} +-------------------------------------------------- + +NOTE: `query` also accepts queries written as string values. + +[[templating-role-query]] +===== Templating a Role Query + +You can use Mustache templates in a role query to insert the username of the +current authenticated user into the role. Like other places in {es} that support +templating or scripting, you can specify inline, stored, or file-based templates +and define custom parameters. You access the details for the current +authenticated user through the `_user` parameter. + +For example, the following role query uses a template to insert the username +of the current authenticated user: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "my_index" ], + "privileges" : [ "read" ], + "query" : { + "template" : { + "source" : { + "term" : { "acl.username" : "{{_user.username}}" } + } + } + } + } + ] +} +-------------------------------------------------- + +You can access the following information through the `_user` variable: + +[options="header"] +|====== +| Property | Description +| `_user.username` | The username of the current authenticated user. +| `_user.full_name` | If specified, the full name of the current authenticated user. +| `_user.email` | If specified, the email of the current authenticated user. +| `_user.roles` | If associated, a list of the role names of the current authenticated user. +| `_user.metadata` | If specified, a hash holding custom metadata of the current authenticated user. +|====== + +You can also access custom user metadata. For example, if you maintain a +`group_id` in your user metadata, you can apply document level security +based on the `group.id` field in your documents: + +[source,js] +-------------------------------------------------- +{ + "indices" : [ + { + "names" : [ "my_index" ], + "privileges" : [ "read" ], + "query" : { + "template" : { + "source" : { + "term" : { "group.id" : "{{_user.metadata.group_id}}" } + } + } + } + } + ] +} +-------------------------------------------------- + +[[set-security-user-processor]] +===== Set Security User Ingest Processor + +If an index is shared by many small users it makes sense to put all these users +into the same index. Having a dedicated index or shard per user is wasteful. +To guarantee that a user reads only their own documents, it makes sense to set up +document level security. In this scenario, each document must have the username +or role name associated with it, so that this information can be used by the +role query for document level security. This is a situation where the +`set_security_user` ingest processor can help. + +NOTE: Document level security doesn't apply to write APIs. You must use unique +ids for each user that uses the same index, otherwise they might overwrite other +users' documents. The ingest processor just adds properties for the current +authenticated user to the documents that are being indexed. + +The `set_security_user` processor attaches user-related details (such as +`username`, `roles`, `email`, `full_name` and `metadata` ) from the current +authenticated user to the current document by pre-processing the ingest. When +you index data with an ingest pipeline, user details are automatically attached +to the document. For example: + +[source,js] +-------------------------------------------------- +PUT shared-logs/log/1?pipeline=my_pipeline_id +{ + ... +} +-------------------------------------------------- + +Read the {ref}/ingest.html[ingest docs] for more information +about setting up a pipeline and other processors. + +[[set-security-user-options]] +.Set Security User Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to store the user information into. +| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`. +|====== + +The following example adds all user details for the current authenticated user +to the `user` field for all documents that are processed by this pipeline: + +[source,js] +-------------------------------------------------- +{ + "processors" : [ + { + "set_security_user": { + "field": "user" + } + } + ] +} +-------------------------------------------------- + +[[multiple-roles-dls-fls]] +==== Multiple Roles with Document and Field Level Security + +A user can have many roles and each role can define different permissions on the +same index. It is important to understand the behavior of document and field +level security in this scenario. + +Document level security takes into account each role held by the user and +combines each document level security query for a given index with an "OR". This +means that only one of the role queries must match for a document to be returned. +For example, if a role grants access to an index without document level security +and another grants access with document level security, document level security +is not applied; the user with both roles has access to all of the documents in +the index. + +Field level security takes into account each role the user has and combines +all of the fields listed into a single set for each index. For example, if a +role grants access to an index without field level security and another grants +access with field level security, field level security is not be applied for +that index; the user with both roles has access to all of the fields in the +index. + +For example, let's say `role_a` grants access to only the `address` field of the +documents in `index1`; it doesn't specify any document restrictions. Conversely, +`role_b` limits access to a subset of the documents in `index1`; it doesn't +specify any field restrictions. If you assign a user both roles, `role_a` gives +the user access to all documents and `role_b` gives the user access to all +fields. + +If you need to restrict access to both documents and fields, consider splitting +documents by index instead. diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc new file mode 100644 index 0000000000000..590546e217c86 --- /dev/null +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -0,0 +1,156 @@ +[[mapping-roles]] +=== Mapping Users and Groups to Roles + +If you authenticate users with the `native` or `file` realms, you can manage +role assignment by using the <> or +the {ref}/users-command.html[users] command-line tool respectively. + +For other types of realms, you must create _role-mappings_ that define which +roles should be assigned to each user based on their username, groups, or +other metadata. + +{security} allows role-mappings to be defined via an +<>, or managed through <>. +These two sources of role-mapping are combined inside of {security}, so it is +possible for a single user to have some roles that have been mapped through +the API, and other roles that are mapped through files. + +When you use role-mappings, you assign existing roles to users. +The available roles should either be added using the +{ref}/security-api-roles.html[Role Management APIs] or defined in the +<>. Either role-mapping method can use +either role management method. For example, when you use the role mapping API, +you are able to map users to both API-managed roles and file-managed roles +(and likewise for file-based role-mappings). + +[[mapping-roles-api]] +==== Using the Role Mapping API + +You can define role-mappings through the +{ref}/security-api-role-mapping.html[role mapping API]. + +[[mapping-roles-file]] +==== Using Role Mapping Files + +To use file based role-mappings, you must configure the mappings in a YAML file +and copy it to each node in the cluster. Tools like Puppet or Chef can help with +this. + +By default, role mappings are stored in `ES_PATH_CONF/x-pack/role_mapping.yml`, +where `ES_PATH_CONF` is `ES_HOME/config` (zip/tar installations) or +`/etc/elasticsearch` (package installations). To specify a different location, +you configure the `files.role_mapping` realm settings in `elasticsearch.yml`. +This setting enables you to use a different set of mappings for each realm type: + +|===== +| `xpack.security.authc.ldap.files.role_mapping` | | | The location of the role mappings for LDAP realms. +| `xpack.security.authc.active_directory.files.role_mapping` | | | The location of the role mappings for Active Directory realms. +| `xpack.security.authc.pki.files.role_mapping` | | | The location of the role mappings for PKI realms. +|===== + +Within the role mapping file, the security roles are keys and groups and users +are values. The mappings can have a many-to-many relationship. When you map roles +to groups, the roles of a user in that group are the combination of the roles +assigned to that group and the roles assigned to that user. + +By default, {security} checks role mapping files for changes every 5 seconds. +You can change this default behavior by changing the +`resource.reload.interval.high` setting in the `elasticsearch.yml` file. Since +this is a common setting in Elasticsearch, changing its value might effect other +schedules in the system. + +==== Realm Specific Details +[float] +[[ldap-role-mapping]] +===== Active Directory and LDAP Realms + +To specify users and groups in the role mappings, you use their +_Distinguished Names_ (DNs). A DN is a string that uniquely identifies the user +or group, for example `"cn=John Doe,cn=contractors,dc=example,dc=com"`. + +NOTE: {security} only supports Active Directory security groups. You cannot map + distribution groups to roles. + +For example, the following snippet uses the file-based method to map the +`admins` group to the `monitoring` role and map the `John Doe` user, the +`users` group, and the `admins` group to the `user` role. + +[source, yaml] +------------------------------------------------------------ +monitoring: <1> + - "cn=admins,dc=example,dc=com" <2> +user: + - "cn=John Doe,cn=contractors,dc=example,dc=com" <3> + - "cn=users,dc=example,dc=com" + - "cn=admins,dc=example,dc=com" +------------------------------------------------------------ +<1> The name of a {security} role. +<2> The distinguished name of an LDAP group or an Active Directory security group. +<3> The distinguished name of an LDAP or Active Directory user. + +You can use the role-mapping API to define equivalent mappings as follows: +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/admins +{ + "roles" : [ "monitoring", "user" ], + "rules" : { "field" : { "groups" : "cn=admins,dc=example,dc=com" } }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE + +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/basic_users +{ + "roles" : [ "user" ], + "rules" : { "any" : [ + { "field" : { "dn" : "cn=John Doe,cn=contractors,dc=example,dc=com" } }, + { "field" : { "groups" : "cn=users,dc=example,dc=com" } } + ] }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE + +[float] +[[pki-role-mapping]] +===== PKI Realms + +PKI realms support mapping users to roles, but you cannot map groups as +the PKI realm has no notion of a group. + +This is an example using a file-based mapping: + +[source, yaml] +------------------------------------------------------------ +monitoring: + - "cn=Admin,ou=example,o=com" +user: + - "cn=John Doe,ou=example,o=com" +------------------------------------------------------------ + +The following example creates equivalent mappings using the API: + +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/admin_user +{ + "roles" : [ "monitoring" ], + "rules" : { "field" : { "dn" : "cn=Admin,ou=example,o=com" } }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE + +[source,js] +-------------------------------------------------- +PUT _xpack/security/role_mapping/basic_user +{ + "roles" : [ "user" ], + "rules" : { "field" : { "dn" : "cn=John Doe,ou=example,o=com" } }, + "enabled": true +} +-------------------------------------------------- +// CONSOLE diff --git a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc new file mode 100644 index 0000000000000..e246f2b194281 --- /dev/null +++ b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc @@ -0,0 +1,33 @@ +[[run-as-privilege]] +=== Submitting Requests on Behalf of Other Users + +{security} supports a permission that enables an authenticated user to submit +requests on behalf of other users. If your application already authenticates +users, you can use the _run as_ mechanism to restrict data access according to +{security} permissions without having to re-authenticate each user through. + +To "run as" (impersonate) another user, you must be able to retrieve the user from +the realm you use to authenticate. Both the internal `native` and `file` realms +support this out of the box. The LDAP realm must be configured to run in +<>. The Active Directory realm must be +<> to support +_run as_. The PKI realm does not support _run as_. + +To submit requests on behalf of other users, you need to have the `run_as` +permission. For example, the following role grants permission to submit request +on behalf of `jacknich` or `redeniro`: + +[source,js] +--------------------------------------------------- +{ + "run_as" : [ "jacknich", "rdeniro" ] +} +--------------------------------------------------- + +To submit a request as another user, you specify the user in the +`es-security-runas-user` request header. For example: + +[source,shell] +--------------------------------------------------- +curl -H "es-security-runas-user: jacknich" -u es_admin -XGET 'http://localhost:9200/' +--------------------------------------------------- diff --git a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc new file mode 100644 index 0000000000000..e25586dfb371c --- /dev/null +++ b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc @@ -0,0 +1,44 @@ +[[ccs-clients-integrations]] +== Cross Cluster Search, Clients and Integrations + +When using {ref}/modules-cross-cluster-search.html[Cross Cluster Search] +you need to take extra steps to secure communications with the connected +clusters. + +* <> + +You will need to update the configuration for several clients to work with a +secured cluster: + +* <> +* <> + + +{security} enables you to secure your {es} cluster. But {es} itself is only one +product within the Elastic Stack. It is often the case that other products in +the stack are connected to the cluster and therefore need to be secured as well, +or at least communicate with the cluster in a secured way: + +* <> +* {auditbeat-ref}/securing-beats.html[Auditbeat] +* {filebeat-ref}/securing-beats.html[Filebeat] +* {heartbeat-ref}/securing-beats.html[Heartbeat] +* {kibana-ref}/using-kibana-with-security.html[{kib}] +* {logstash-ref}/ls-security.html[Logstash] +* {metricbeat-ref}/securing-beats.html[Metricbeat] +* <> +* {packetbeat-ref}/securing-beats.html[Packetbeat] +* {kibana-ref}/secure-reporting.html[Reporting] +* {winlogbeat-ref}/securing-beats.html[Winlogbeat] + +include::ccs-clients-integrations/cross-cluster.asciidoc[] + +include::ccs-clients-integrations/java.asciidoc[] + +include::ccs-clients-integrations/http.asciidoc[] + +include::ccs-clients-integrations/hadoop.asciidoc[] + +include::ccs-clients-integrations/beats.asciidoc[] + +include::ccs-clients-integrations/monitoring.asciidoc[] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/beats.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/beats.asciidoc new file mode 100644 index 0000000000000..43c8be5409c28 --- /dev/null +++ b/x-pack/docs/en/security/ccs-clients-integrations/beats.asciidoc @@ -0,0 +1,11 @@ +[[beats]] +=== Beats and Security + +See: + +* {auditbeat-ref}/securing-beats.html[Auditbeat and {security}] +* {filebeat-ref}/securing-beats.html[Filebeat and {security}] +* {heartbeat-ref}/securing-beats.html[Heartbeat and {security}] +* {metricbeat-ref}/securing-beats.html[Metricbeat and {security}] +* {packetbeat-ref}/securing-beats.html[Packetbeat and {security}] +* {winlogbeat-ref}/securing-beats.html[Winlogbeat and {security}] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc new file mode 100644 index 0000000000000..1cbcf623a5fc3 --- /dev/null +++ b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc @@ -0,0 +1,160 @@ +[[cross-cluster-configuring]] +=== Cross Cluster Search and Security + +{ref}/modules-cross-cluster-search.html[Cross Cluster Search] enables +federated search across multiple clusters. When using cross cluster search +with secured clusters, all clusters must have {security} enabled. + +The local cluster (the cluster used to initiate cross cluster search) must be +allowed to connect to the remote clusters, which means that the CA used to +sign the SSL/TLS key of the local cluster must be trusted by the remote +clusters. + +User authentication is performed on the local cluster and the user and user's +roles are passed to the remote clusters. A remote cluster checks the user's +roles against its local role definitions to determine which indices the user +is allowed to access. + + +[WARNING] +This feature was added as Beta in {es} `v5.3` with further improvements made in +5.4 and 5.5. It requires gateway eligible nodes to be on `v5.5` onwards. + +To use cross cluster search with secured clusters: + +* Enable {security} on every node in each connected cluster. For more +information about the `xpack.security.enabled` setting, see +{ref}/security-settings.html[Security Settings in {es}]. + +* Enable encryption globally. To encrypt communications, you must enable + <> on every node. + +* Enable a trust relationship between the cluster used for performing cross + cluster search (the local cluster) and all remote clusters. This can be done + either by: ++ + ** Using the same certificate authority to generate certificates for all + connected clusters, or + ** Adding the CA certificate from the local cluster as a trusted CA in + each remote cluster (see {ref}/security-settings.html#transport-tls-ssl-settings[Transport TLS settings]). + +* Configure the local cluster to connect to remote clusters as described + in {ref}/modules-cross-cluster-search.html#_configuring_cross_cluster_search[Configuring Cross Cluster Search]. + For example, the following configuration adds two remote clusters + to the local cluster: ++ +-- +[source,js] +----------------------------------------------------------- +PUT _cluster/settings +{ + "persistent": { + "search": { + "remote": { + "cluster_one": { + "seeds": [ "10.0.1.1:9300" ] + }, + "cluster_two": { + "seeds": [ "10.0.2.1:9300" ] + } + } + } + } +} +----------------------------------------------------------- +// CONSOLE +-- + +* On the local cluster, ensure that users are assigned to (at least) one role + that exists on the remote clusters. On the remote clusters, use that role + to define which indices the user may access. (See <>). + +==== Example Configuration of Cross Cluster Search + +In the following example, we will configure the user `alice` to have permissions +to search any index starting with `logs-` in cluster `two` from cluster `one`. + +First, enable cluster `one` to perform cross cluster search on remote cluster +`two` by running the following request as the superuser on cluster `one`: + +[source,js] +----------------------------------------------------------- +PUT _cluster/settings +{ + "persistent": { + "search.remote.cluster_two.seeds": [ "10.0.2.1:9300" ] + } +} +----------------------------------------------------------- +// CONSOLE + +Next, set up a role called `cluster_two_logs` on both cluster `one` and +cluster `two`. + +On cluster `one`, this role does not need any special privileges: + +[source,js] +----------------------------------------------------------- +POST /_xpack/security/role/cluster_two_logs +{ +} +----------------------------------------------------------- +// CONSOLE + +On cluster `two`, this role allows the user to query local indices called +`logs-` from a remote cluster: + +[source,js] +----------------------------------------------------------- +POST /_xpack/security/role/cluster_two_logs +{ + "cluster": [ + "transport_client" + ], + "indices": [ + { + "names": [ + "logs-*" + ], + "privileges": [ + "read", + "read_cross_cluster" + ] + } + ] +} +----------------------------------------------------------- +// CONSOLE + +Finally, create a user on cluster `one` and apply the `cluster_two_logs` role: + +[source,js] +----------------------------------------------------------- +POST /_xpack/security/user/alice +{ + "password" : "somepassword", + "roles" : [ "cluster_two_logs" ], + "full_name" : "Alice", + "email" : "alice@example.com", + "enabled": true +} +----------------------------------------------------------- +// CONSOLE + +With all of the above setup, the user `alice` is able to search indices in +cluster `two` as follows: + +[source,js] +----------------------------------------------------------- +GET two:logs-2017.04/_search <1> +{ + "query": { + "match_all": {} + } +} +----------------------------------------------------------- +// CONSOLE +// TEST[skip:todo] +//TBD: Is there a missing description of the <1> callout above? + +include::{xkb-repo-dir}/security/cross-cluster-kibana.asciidoc[] diff --git a/x-pack/docs/en/security/ccs-clients-integrations/hadoop.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/hadoop.asciidoc new file mode 100644 index 0000000000000..0613f1ef77131 --- /dev/null +++ b/x-pack/docs/en/security/ccs-clients-integrations/hadoop.asciidoc @@ -0,0 +1,23 @@ +[[hadoop]] +=== ES-Hadoop and Security + +Elasticsearch for Apache Hadoop ("ES-Hadoop") is capable of using HTTP basic and +PKI authentication and/or TLS/SSL when accessing an Elasticsearch cluster. For +full details please refer to the ES-Hadoop documentation, in particular the +`Security` section. + +For authentication purposes, select the user for your ES-Hadoop client (for +maintenance purposes it is best to create a dedicated user). Then, assign that +user to a role with the privileges required by your Hadoop/Spark/Storm job. +Configure ES-Hadoop to use the user name and password through the +`es.net.http.auth.user` and `es.net.http.auth.pass` properties. + +If PKI authentication is enabled, setup the appropriate `keystore` and `truststore` +instead through `es.net.ssl.keystore.location` and `es.net.truststore.location` +(and their respective `.pass` properties to specify the password). + +For secured transport, enable SSL/TLS through the `es.net.ssl` property by +setting it to `true`. Depending on your SSL configuration (keystore, truststore, etc...) +you might need to set other parameters as well - please refer to the +http://www.elastic.co/guide/en/elasticsearch/hadoop/current/configuration.html[ES-Hadoop] documentation, +specifically the `Configuration` and `Security` chapters. diff --git a/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc new file mode 100644 index 0000000000000..d78c32bc361ff --- /dev/null +++ b/x-pack/docs/en/security/ccs-clients-integrations/http.asciidoc @@ -0,0 +1,62 @@ +[[http-clients]] +=== HTTP/REST Clients and Security + +{security} works with standard HTTP {wikipedia}/Basic_access_authentication[basic authentication] +headers to authenticate users. Since Elasticsearch is stateless, this header must +be sent with every request: + +[source,shell] +-------------------------------------------------- +Authorization: Basic <1> +-------------------------------------------------- +<1> The `` is computed as `base64(USERNAME:PASSWORD)` + +[float] +==== Client examples + +This example uses `curl` without basic auth to create an index: + +[source,shell] +------------------------------------------------------------------------------- +curl -XPUT 'localhost:9200/idx' +------------------------------------------------------------------------------- + +[source,js] +------------------------------------------------------------------------------- +{ + "error": "AuthenticationException[Missing authentication token]", + "status": 401 +} +------------------------------------------------------------------------------- + +Since no user is associated with the request above, an authentication error is +returned. Now we'll use `curl` with basic auth to create an index as the +`rdeniro` user: + +[source,shell] +--------------------------------------------------------- +curl --user rdeniro:taxidriver -XPUT 'localhost:9200/idx' +--------------------------------------------------------- + +[source,js] +--------------------------------------------------------- +{ + "acknowledged": true +} +--------------------------------------------------------- + +[float] +==== Client Libraries over HTTP + +For more information about how to use {security} with the language specific clients +please refer to +https://github.com/elasticsearch/elasticsearch-ruby/tree/master/elasticsearch-transport#authentication[Ruby], +http://elasticsearch-py.readthedocs.org/en/master/#ssl-and-authentication[Python], +https://metacpan.org/pod/Search::Elasticsearch::Cxn::HTTPTiny#CONFIGURATION[Perl], +http://www.elastic.co/guide/en/elasticsearch/client/php-api/current/_security.html[PHP], +http://nest.azurewebsites.net/elasticsearch-net/security.html[.NET], +http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/auth-reference.html[Javascript] + +//// +Groovy - TODO link +//// diff --git a/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc new file mode 100644 index 0000000000000..3c537ef5ee2eb --- /dev/null +++ b/x-pack/docs/en/security/ccs-clients-integrations/java.asciidoc @@ -0,0 +1,200 @@ +[[java-clients]] +=== Java Client and Security + +deprecated[7.0.0, The `TransportClient` is deprecated in favour of the {java-rest}/java-rest-high.html[Java High Level REST Client] and will be removed in Elasticsearch 8.0. The {java-rest}/java-rest-high-level-migration.html[migration guide] describes all the steps needed to migrate.] + +{security} supports the Java http://www.elastic.co/guide/en/elasticsearch/client/java-api/current/transport-client.html[transport client] for Elasticsearch. +The transport client uses the same transport protocol that the cluster nodes use +for inter-node communication. It is very efficient as it does not have to marshall +and unmarshall JSON requests like a typical REST client. + +NOTE: Using the Java Node Client with secured clusters is not recommended or + supported. + +[float] +[[transport-client]] +==== Configuring the Transport Client to work with a Secured Cluster + +To use the transport client with a secured cluster, you need to: + +[[java-transport-client-role]] +. {ref}/setup-xpack-client.html[Configure the {xpack} transport client]. + +. Configure a user with the privileges required to start the transport client. +A default `transport_client` role is built-in to {xpack} that grants the +appropriate cluster permissions for the transport client to work with the secured +cluster. The transport client uses the _Nodes Info API_ to fetch information about +the nodes in the cluster. + +. Set up the transport client. At a minimum, you must configure `xpack.security.user` to +include the name and password of your transport client user in your requests. The +following snippet configures the user credentials globally--every request +submitted with this client includes the `transport_client_user` credentials in +its headers. ++ +-- +[source,java] +------------------------------------------------------------------------------------------------- +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; +... + +TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() + .put("cluster.name", "myClusterName") + .put("xpack.security.user", "transport_client_user:x-pack-test-password") + ... + .build()) + .addTransportAddress(new TransportAddress("localhost", 9300)) + .addTransportAddress(new TransportAddress("localhost", 9301)); +------------------------------------------------------------------------------------------------- + +WARNING: If you configure a transport client without SSL, passwords are sent in + clear text. + +You can also add an `Authorization` header to each request. If you've configured +global authorization credentials, the `Authorization` header overrides the global +authentication credentials. This is useful when an application has multiple users +who access Elasticsearch using the same client. You can set the global token to +a user that only has the `transport_client` role, and add the `transport_client` +role to the individual users. + +For example, the following snippet adds the `Authorization` header to a search +request: + +[source,java] +-------------------------------------------------------------------------------------------------- +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; + +import static UsernamePasswordToken.basicAuthHeaderValue; +... + +TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() + .put("cluster.name", "myClusterName") + .put("xpack.security.user", "transport_client_user:x-pack-test-password") + ... + .build()) + .build() + .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9300)) + .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9301)) + +String token = basicAuthHeaderValue("test_user", new SecureString("x-pack-test-password".toCharArray())); + +client.filterWithHeader(Collections.singletonMap("Authorization", token)) + .prepareSearch().get(); +-------------------------------------------------------------------------------------------------- +-- + +. Enable SSL to authenticate clients and encrypt communications. To enable SSL, +you need to: + +.. Configure the paths to the client's key and certificate in addition to the certificate authorities. +Client authentication requires every client to have a certification signed by a trusted CA. ++ +-- +NOTE: Client authentication is enabled by default. For information about + disabling client authentication, see <>. + +[source,java] +-------------------------------------------------------------------------------------------------- +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; +... + +TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() + .put("cluster.name", "myClusterName") + .put("xpack.security.user", "transport_client_user:x-pack-test-password") + .put("xpack.ssl.key", "/path/to/client.key") + .put("xpack.ssl.certificate", "/path/to/client.crt") + .put("xpack.ssl.certificate_authorities", "/path/to/ca.crt") + ... + .build()); +-------------------------------------------------------------------------------------------------- +-- + +.. Enable the SSL transport by setting `xpack.security.transport.ssl.enabled` to `true` in the +client configuration. ++ +-- +[source,java] +-------------------------------------------------------------------------------------------------- +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; +... + +TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() + .put("cluster.name", "myClusterName") + .put("xpack.security.user", "transport_client_user:x-pack-test-password") + .put("xpack.ssl.key", "/path/to/client.key") + .put("xpack.ssl.certificate", "/path/to/client.crt") + .put("xpack.ssl.certificate_authorities", "/path/to/ca.crt") + .put("xpack.security.transport.ssl.enabled", "true") + ... + .build()) + .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9300)) + .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9301)) +-------------------------------------------------------------------------------------------------- +-- + +[float] +[[disabling-client-auth]] +===== Disabling Client Authentication + +If you want to disable client authentication, you can use a client-specific +transport protocol. For more information see <>. + +If you are not using client authentication and sign the Elasticsearch node +certificates with your own CA, you need to provide the path to the CA +certificate in your client configuration. + +[source,java] +------------------------------------------------------------------------------------------------------ +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; +... + +TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() + .put("cluster.name", "myClusterName") + .put("xpack.security.user", "test_user:x-pack-test-password") + .put("xpack.ssl.certificate_authorities", "/path/to/ca.crt") + .put("xpack.security.transport.ssl.enabled", "true") + ... + .build()) + .addTransportAddress(new TransportAddress("localhost", 9300)) + .addTransportAddress(new TransportAddress("localhost", 9301)); +------------------------------------------------------------------------------------------------------ + +NOTE: If you are using a public CA that is already trusted by the Java runtime, + you do not need to set the `xpack.ssl.certificate_authorities`. + +[float] +[[connecting-anonymously]] +===== Connecting Anonymously + +To enable the transport client to connect anonymously, you must assign the +anonymous user the privileges defined in the <> +role. Anonymous access must also be enabled, of course. For more information, +see <>. + +[float] +[[security-client]] +==== Security Client + +{security} exposes its own API through the `SecurityClient` class. To get a hold +of a `SecurityClient` you'll first need to create the `XPackClient`, which is a +wrapper around the existing Elasticsearch clients (any client class implementing +`org.elasticsearch.client.Client`). + +The following example shows how you can clear {security}'s realm caches using +the `SecurityClient`: + +[source,java] +------------------------------------------------------------------------------------------------------ +Client client = ... // create the transport client + +XPackClient xpackClient = new XPackClient(client); +SecurityClient securityClient = xpackClient.security(); +ClearRealmCacheResponse response = securityClient.authc().prepareClearRealmCache() + .realms("ldap1", "ad1") <1> + .usernames("rdeniro") + .get(); +------------------------------------------------------------------------------------------------------ +<1> Clears the `ldap1` and `ad1` realm caches for the `rdeniro` user. diff --git a/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc new file mode 100644 index 0000000000000..67bffadfb296b --- /dev/null +++ b/x-pack/docs/en/security/ccs-clients-integrations/monitoring.asciidoc @@ -0,0 +1,24 @@ +[[secure-monitoring]] +=== Monitoring and Security + +<> consists of two components: an agent +that you install on on each {es} and Logstash node, and a Monitoring UI +in {kib}. The monitoring agent collects and indexes metrics from the nodes +and you visualize the data through the Monitoring dashboards in {kib}. The agent +can index data on the same {es} cluster, or send it to an external +monitoring cluster. + +To use {monitoring} with {security} enabled, you need to +{kibana-ref}/using-kibana-with-security.html[set up {kib} to work with {security}] +and create at least one user for the Monitoring UI. If you are using an external +monitoring cluster, you also need to configure a user for the monitoring agent +and configure the agent to use the appropriate credentials when communicating +with the monitoring cluster. + +For more information, see: + +* {ref}/configuring-monitoring.html[Configuring monitoring in {es}] +* {kibana-ref}/monitoring-xpack-kibana.html[Configuring monitoring in {kib}] +* {logstash-ref}/configuring-logstash.html[Configuring monitoring for Logstash nodes] + + diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc new file mode 100644 index 0000000000000..aab00fb225fd6 --- /dev/null +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -0,0 +1,132 @@ +[role="xpack"] +[[configuring-security]] +== Configuring Security in {es} +++++ +Configuring Security +++++ + +{security} enables you to easily secure a cluster. With {security}, you can +password-protect your data as well as implement more advanced security measures +such as encrypting communications, role-based access control, IP filtering, and +auditing. For more information, see +{xpack-ref}/xpack-security.html[Securing the Elastic Stack]. + +To use {security} in {es}: + +. Verify that you are using a license that includes the {security} feature. ++ +-- +If you want to try all of the {xpack} features, you can start a 30-day trial. At +the end of the trial period, you can purchase a subscription to keep using the +full functionality of the {xpack} components. For more information, see +https://www.elastic.co/subscriptions and +{xpack-ref}/license-management.html[License Management]. +-- + +. Verify that the `xpack.security.enabled` setting is `true` on each node in +your cluster. If you are using a trial license, the default value is `false`. +For more information, see {ref}/security-settings.html[Security Settings in {es}]. + +. Configure Transport Layer Security (TLS/SSL) for internode-communication. ++ +-- +NOTE: This requirement applies to clusters with more than one node and to +clusters with a single node that listens on an external interface. Single-node +clusters that use a loopback interface do not have this requirement. For more +information, see +{xpack-ref}/encrypting-communications.html[Encrypting Communications]. + +-- +.. <>. + +.. <>. + +. If it is not already running, start {es}. + +. Set the passwords for all built-in users. ++ +-- +{security} provides +{xpack-ref}/setting-up-authentication.html#built-in-users[built-in users] to +help you get up and running. The +elasticsearch-setup-passwords+ command is the +simplest method to set the built-in users' passwords for the first time. + +For example, you can run the command in an "interactive" mode, which prompts you +to enter new passwords for the `elastic`, `kibana`, `beats_system`, and +`logstash_system` users: + +[source,shell] +-------------------------------------------------- +bin/elasticsearch-setup-passwords interactive +-------------------------------------------------- + +For more information about the command options, see <>. + +IMPORTANT: The `elasticsearch-setup-passwords` command uses a transient bootstrap +password that is no longer valid after the command runs successfully. You cannot +run the `elasticsearch-setup-passwords` command a second time. Instead, you can +update passwords from the **Management > Users** UI in {kib} or use the security +user API. + +-- + +. Set up roles and users to control access to {es}. +For example, to grant _John Doe_ full access to all indices that match +the pattern `events*` and enable him to create visualizations and dashboards +for those indices in {kib}, you could create an `events_admin` role and +and assign the role to a new `johndoe` user. ++ +-- +[source,shell] +---------------------------------------------------------- +curl -XPOST -u elastic 'localhost:9200/_xpack/security/role/events_admin' -H "Content-Type: application/json" -d '{ + "indices" : [ + { + "names" : [ "events*" ], + "privileges" : [ "all" ] + }, + { + "names" : [ ".kibana*" ], + "privileges" : [ "manage", "read", "index" ] + } + ] +}' + +curl -XPOST -u elastic 'localhost:9200/_xpack/security/user/johndoe' -H "Content-Type: application/json" -d '{ + "password" : "userpassword", + "full_name" : "John Doe", + "email" : "john.doe@anony.mous", + "roles" : [ "events_admin" ] +}' +---------------------------------------------------------- +// NOTCONSOLE +-- + +[[enable-auditing]] +. Enable auditing to keep track of attempted and successful interactions with + your {es} cluster: ++ +-- +.. Add the following setting to `elasticsearch.yml` on all nodes in your cluster: ++ +[source,yaml] +---------------------------- +xpack.security.audit.enabled: true +---------------------------- ++ +For more information, see {xpack-ref}/auditing.html[Auditing Security Events] +and <>. + +.. Restart {es}. + +By default, events are logged to a dedicated `elasticsearch-access.log` file in +`ES_HOME/logs`. You can also store the events in an {es} index for +easier analysis and control what events are logged. +-- + +include::securing-communications/securing-elasticsearch.asciidoc[] +include::securing-communications/configuring-tls-docker.asciidoc[] +include::securing-communications/enabling-cipher-suites.asciidoc[] +include::securing-communications/separating-node-client-traffic.asciidoc[] +include::{xes-repo-dir}/settings/security-settings.asciidoc[] +include::{xes-repo-dir}/settings/audit-settings.asciidoc[] diff --git a/x-pack/docs/en/security/getting-started.asciidoc b/x-pack/docs/en/security/getting-started.asciidoc new file mode 100644 index 0000000000000..8aa35a9428160 --- /dev/null +++ b/x-pack/docs/en/security/getting-started.asciidoc @@ -0,0 +1,38 @@ +[[security-getting-started]] +== Getting Started with Security + +To secure a cluster, you must enable {security} on every node in the +cluster. Basic authentication is enabled by default--to communicate +with the cluster, you must specify a username and password. +Unless you {xpack-ref}/anonymous-access.html[enable anonymous access], all +requests that don't include a user name and password are rejected. + +To get started with {security}: + +. {ref}/configuring-security.html[Configure security in {es}]. Encrypt +inter-node communications, set passwords for the +<>, and manage your users and roles. + +. {kibana-ref}/using-kibana-with-security.html[Configure security in {kib}]. +Set the authentication credentials in {kib} and encrypt communications between +the browser and the {kib} server. + +. {logstash-ref}/ls-security.html[Configure security in Logstash]. Set the +authentication credentials for Logstash and encrypt communications between +Logstash and {es}. + +. <>. Configure authentication +credentials and encrypt connections to {es}. + +. Configure the Java transport client to use encrypted communications. +See <>. + +. Configure {es} for Apache Hadoop to use secured transport. See +{hadoop-ref}/security.html[{es} for Apache Hadoop Security]. + +Depending on your security requirements, you might also want to: + +* Integrate with {xpack-ref}/ldap-realm.html[LDAP] or {xpack-ref}/active-directory-realm.html[Active Directory], +or {xpack-ref}/pki-realm.html[require certificates] for authentication. +* Use {xpack-ref}/ip-filtering.html[IP Filtering] to allow or deny requests from particular +IP addresses or address ranges. diff --git a/x-pack/docs/en/security/gs-index.asciidoc b/x-pack/docs/en/security/gs-index.asciidoc new file mode 100644 index 0000000000000..b320b2a6d82e5 --- /dev/null +++ b/x-pack/docs/en/security/gs-index.asciidoc @@ -0,0 +1,100 @@ +[[xpack-security]] += Securing Elasticsearch and Kibana + +[partintro] +-- +{security} enables you to easily secure a cluster. With Security, +you can password-protect your data as well as implement more advanced security +measures such as encrypting communications, role-based access control, +IP filtering, and auditing. This guide describes how to configure the security +features you need, and interact with your secured cluster. + +Security protects Elasticsearch clusters by: + +* <> + with password protection, role-based access control, and IP filtering. +* <> + with message authentication and SSL/TLS encryption. +* <> + so you know who's doing what to your cluster and the data it stores. + +[float] +[[preventing-unauthorized-access]] +=== Preventing Unauthorized Access + +To prevent unauthorized access to your Elasticsearch cluster, you must have a +way to _authenticate_ users. This simply means that you need a way to validate +that a user is who they claim to be. For example, you have to make sure only +the person named _Kelsey Andorra_ can sign in as the user `kandorra`. X-Pack +Security provides a standalone authentication mechanism that enables you to +quickly password-protect your cluster. If you're already using {xpack-ref}/ldap-realm.html[LDAP], +{xpack-ref}/active-directory-realm.html[ Active Directory], or {xpack-ref}/pki-realm.html[ PKI] to manage +users in your organization, {security} is able to integrate with those +systems to perform user authentication. + +In many cases, simply authenticating users isn't enough. You also need a way to +control what data users have access to and what tasks they can perform. {security} +enables you to _authorize_ users by assigning access _privileges_ to _roles_, +and assigning those roles to users. For example, this +{xpack-ref}/authorization.html[role-based access control] mechanism (a.k.a RBAC) enables +you to specify that the user `kandorra` can only perform read operations on the +`events` index and can't do anything at all with other indices. + +{security} also supports {xpack-ref}/ip-filtering.html[ IP-based authorization]. You can +whitelist and blacklist specific IP addresses or subnets to control network-level +access to a server. + +[float] +[[preserving-data-integrity]] +=== Preserving Data Integrity + +A critical part of security is keeping confidential data confidential. +Elasticsearch has built-in protections against accidental data loss and +corruption. However, there's nothing to stop deliberate tampering or data +interception. {security} preserves the integrity of your data by +{xpack-ref}/ssl-tls.html[encrypting communications] to and from nodes and +{xpack-ref}/enable-message-authentication.html[authenticating message] to verify that they +have not been tampered with or corrupted in transit during node-to-node +communication. For even greater protection, you can increase the +{xpack-ref}/ciphers.html[encryption strength] and +{xpack-ref}/separating-node-client-traffic.html[separate client traffic from node-to-node communications]. + + +[float] +[[maintaining-audit-trail]] +=== Maintaining an Audit Trail + +Keeping a system secure takes vigilance. By using {security} to maintain +an audit trail, you can easily see who is accessing your cluster and what they're +doing. By analyzing access patterns and failed attempts to access your cluster, +you can gain insights into attempted attacks and data breaches. Keeping an +auditable log of the activity in your cluster can also help diagnose operational +issues. + +[float] +=== Where to Go Next + +* <> + steps through how to install and start using Security for basic authentication. + +* {xpack-ref}/how-security-works.html[How Security Works] + provides more information about how Security supports user authentication, + authorization, and encryption. + +* {xpack-ref}/ccs-clients-integrations.html[Integrations] + shows you how to interact with an Elasticsearch cluster protected by + X-Pack Security. + +* {xpack-ref}/security-reference.html[Reference] + provides detailed information about the access privileges you can grant to + users, the settings you can configure for Security in `elasticsearch.yml`, + and the files where Security configuration information is stored. + +[float] +=== Have Comments, Questions, or Feedback? + +Head over to our {security-forum}[Security Discussion Forum] +to share your experience, questions, and suggestions. +-- + +include::getting-started.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/security/how-security-works.asciidoc b/x-pack/docs/en/security/how-security-works.asciidoc new file mode 100644 index 0000000000000..8cd7befc64226 --- /dev/null +++ b/x-pack/docs/en/security/how-security-works.asciidoc @@ -0,0 +1,148 @@ +[[how-security-works]] +== How Security Works + +An Elasticsearch cluster is typically made out of many moving parts. There are +the Elasticsearch nodes that form the cluster, and often Logstash instances, +Kibana instances, Beats agents an clients, all communicating with the it. +It should not come as a surprise that securing such clusters has many facets and +layers. + +{security} provides the means to secure the Elastic cluster on several levels: + + * User authentication + * Authorization and Role Based Access Control (a.k.a RBAC) + * Node/Client Authentication and Channel Encryption + * Auditing + + +[float] +=== User Authentication + +User authentication is the process of identifying the users behind the requests +that hit the cluster and verifying that indeed they are who they claim to be. The +authentication process is handled by one or more authentication services called +_realms_. {security} provides the following built-in realms: + +|====== +| `native` | | | An internal realm where users are stored in a dedicated + Elasticsearch index. With this realm, users are + authenticated by usernames and passwords. The users + are managed via the + {ref}/security-api-users.html[User Management API]. + +| `ldap` | | | A realm that uses an external LDAP server to authenticate + the users. With this realm, users are authenticated by + usernames and passwords. + +| `active_directory` | | | A realm that uses an external Active Directory Server + to authenticate the users. With this realm, users + are authenticated by usernames and passwords. + +| `pki` | | | A realm that authenticates users using Public Key + Infrastructure (PKI). This realm works in conjunction + with SSL/TLS and identifies the users through the + Distinguished Name (DN) of the client's X.509 + certificates. + +| `file` | | | An internal realm where users are defined in files + stored on each node in the Elasticsearch cluster. + With this realm, users are authenticated by usernames + and passwords. The users are managed via dedicated + tools that are provided by {xpack} on installation. + +| `saml` | | | A realm that uses SAML 2.0 Web SSO. This realm is + designed to be used with {kib}. +|====== + +If none of the built-in realms meets your needs, you can also build your own +custom realm and plug it into {xpack}. + +When {security} is enabled, depending on the realms you've configured, you will +need to attach your user credentials to the requests sent to Elasticsearch. For +example, when using realms that support usernames and passwords you can simply +attach {wikipedia}/Basic_access_authentication[basic auth] header to the requests. + +For more information on user authentication see <> + + +[float] +=== Authorization + +The authorization process takes place once a request is authenticated and the +User behind the request is identified. Authorization is the process of determining +whether the user behind an incoming request is allowed to execute it. Naturally, +this process takes place right after an successful authentication - when the +user identity is known. + +The authorization process revolves around the following 5 constructs: + +_Secured Resource_:: +A resource to which access is restricted. Indices/aliases, documents, fields, +users and the Elasticsearch cluster itself are all examples of secured objects. + +_Privilege_:: +A named group representing one or more actions that a user may execute against a +secured resource. Each secured resource has its own sets of available privileges. +For example, `read` is an index privilege that represents all actions that enable +reading the indexed/stored data. For a complete list of available privileges +see <>. + +_Permissions_:: +A set of one or more privileges against a secured resource. Permissions can +easily be described in words, here are few examples: + * `read` privilege on the `products` index + * `manage` privilege on the cluster + * `run_as` privilege on `john` user + * `read` privilege on documents that match query X + * `read` privilege on `credit_card` field + +_Role_:: +A named sets of permissions + +_User_:: +The authenticated user. + +A secure Elasticsearch cluster manages the privileges of users through _roles_. +A role has a unique name and identifies a set of permissions that translate to +privileges on resources. A user can be associated with an arbitrary number of +roles. The total set of permissions that a user has is therefore defined by +union of the permissions in all its roles. + +Roles can be assigned to users in a number of ways depending on the realms by +which the users are authenticated. + +For more information on user authentication see <> + + +[float] +=== Node/Client Authentication and Channel Encryption + +{security} supports configuring SSL/TLS for securing the communication channels +to, from and within the cluster. This support accounts for: + + * Encryption of data transmitted over the wires + * Certificate based node authentication - preventing unauthorized nodes/clients + from establishing a connection with the cluster. + +For more information, see <>. + +{security} also enables you to <> which can +be seen as a light mechanism for node/client authentication. With IP Filtering +you can restrict the nodes and clients that can connect to the cluster based +on their IP addresses. The IP filters configuration provides whitelisting +and blacklisting of IPs, subnets and DNS domains. + + +[float] +=== Auditing +When dealing with any secure system, it is critical to have a audit trail +mechanism set in place. Audit trails log various activities/events that occur in +the system, enabling you to analyze and back track past events when things go +wrong (e.g. security breach). + +{security} provides such audit trail functionality for all nodes in the cluster. +You can configure the audit level which accounts for the type of events that are +logged. These events include failed authentication attempts, user access denied, +node connection denied, and more. + +For more information on auditing see <>. diff --git a/x-pack/docs/en/security/images/kibana-login.jpg b/x-pack/docs/en/security/images/kibana-login.jpg new file mode 100644 index 0000000000000..9a1916d5217dc Binary files /dev/null and b/x-pack/docs/en/security/images/kibana-login.jpg differ diff --git a/x-pack/docs/en/security/images/nexus.png b/x-pack/docs/en/security/images/nexus.png new file mode 100644 index 0000000000000..7be7a6536f699 Binary files /dev/null and b/x-pack/docs/en/security/images/nexus.png differ diff --git a/x-pack/docs/en/security/index.asciidoc b/x-pack/docs/en/security/index.asciidoc new file mode 100644 index 0000000000000..6804a5d3a98d7 --- /dev/null +++ b/x-pack/docs/en/security/index.asciidoc @@ -0,0 +1,113 @@ +[[xpack-security]] += Securing the Elastic Stack + +[partintro] +-- +{security} enables you to easily secure a cluster. With {security}, +you can password-protect your data as well as implement more advanced security +measures such as encrypting communications, role-based access control, +IP filtering, and auditing. This guide describes how to configure the security +features you need, and interact with your secured cluster. + +Security protects Elasticsearch clusters by: + +* <> + with password protection, role-based access control, and IP filtering. +* <> + with message authentication and SSL/TLS encryption. +* <> + so you know who's doing what to your cluster and the data it stores. + +[float] +[[preventing-unauthorized-access]] +=== Preventing Unauthorized Access + +To prevent unauthorized access to your Elasticsearch cluster, you must have a +way to _authenticate_ users. This simply means that you need a way to validate +that a user is who they claim to be. For example, you have to make sure only +the person named _Kelsey Andorra_ can sign in as the user `kandorra`. X-Pack +Security provides a standalone authentication mechanism that enables you to +quickly password-protect your cluster. If you're already using <>, +<>, or <> to manage +users in your organization, {security} is able to integrate with those +systems to perform user authentication. + +In many cases, simply authenticating users isn't enough. You also need a way to +control what data users have access to and what tasks they can perform. {security} +enables you to _authorize_ users by assigning access _privileges_ to _roles_, +and assigning those roles to users. For example, this +<> mechanism (a.k.a RBAC) enables +you to specify that the user `kandorra` can only perform read operations on the +`events` index and can't do anything at all with other indices. + +{security} also supports <>. You can +whitelist and blacklist specific IP addresses or subnets to control network-level +access to a server. + +[float] +[[preserving-data-integrity]] +=== Preserving Data Integrity + +A critical part of security is keeping confidential data confidential. +Elasticsearch has built-in protections against accidental data loss and +corruption. However, there's nothing to stop deliberate tampering or data +interception. {security} preserves the integrity of your data by +<> to and from nodes. +For even greater protection, you can increase the <> and +<>. + + +[float] +[[maintaining-audit-trail]] +=== Maintaining an Audit Trail + +Keeping a system secure takes vigilance. By using {security} to maintain +an audit trail, you can easily see who is accessing your cluster and what they're +doing. By analyzing access patterns and failed attempts to access your cluster, +you can gain insights into attempted attacks and data breaches. Keeping an +auditable log of the activity in your cluster can also help diagnose operational +issues. + +[float] +=== Where to Go Next + +* <> + steps through how to install and start using Security for basic authentication. + +* <> + provides more information about how Security supports user authentication, + authorization, and encryption. + +* <> + shows you how to interact with an Elasticsearch cluster protected by + X-Pack Security. + +* <> + provides detailed information about the access privileges you can grant to + users, the settings you can configure for Security in `elasticsearch.yml`, + and the files where Security configuration information is stored. + +[float] +=== Have Comments, Questions, or Feedback? + +Head over to our {security-forum}[Security Discussion Forum] +to share your experience, questions, and suggestions. +-- + +include::getting-started.asciidoc[] + +include::how-security-works.asciidoc[] + +include::authentication.asciidoc[] + +include::authorization.asciidoc[] + +include::auditing.asciidoc[] + +include::securing-communications.asciidoc[] + +include::using-ip-filtering.asciidoc[] + +include::ccs-clients-integrations.asciidoc[] + +include::reference.asciidoc[] diff --git a/x-pack/docs/en/security/limitations.asciidoc b/x-pack/docs/en/security/limitations.asciidoc new file mode 100644 index 0000000000000..c2616ac6565bd --- /dev/null +++ b/x-pack/docs/en/security/limitations.asciidoc @@ -0,0 +1,88 @@ +[[security-limitations]] +== Security Limitations + +[float] +=== Plugins + +Elasticsearch's plugin infrastructure is extremely flexible in terms of what can +be extended. While it opens up Elasticsearch to a wide variety of (often custom) +additional functionality, when it comes to security, this high extensibility level +comes at a cost. We have no control over the third-party plugins' code (open +source or not) and therefore we cannot guarantee their compliance with {security}. +For this reason, third-party plugins are not officially supported on clusters +with {security} enabled. + +[float] +=== Changes in Index Wildcard Behavior + +Elasticsearch clusters with {security} enabled apply the `/_all` wildcard, and +all other wildcards, to the indices that the current user has privileges for, not +the set of all indices on the cluster. +While creating or retrieving aliases by providing wildcard expressions for alias names, if there are no existing authorized aliases +that match the wildcard expression provided an IndexNotFoundException is returned. + +[float] +=== Multi Document APIs + +Multi get and multi term vectors API throw IndexNotFoundException when trying to access non existing indices that the user is +not authorized for. By doing that they leak information regarding the fact that the index doesn't exist, while the user is not +authorized to know anything about those indices. + +[float] +=== Filtered Index Aliases + +Aliases containing filters are not a secure way to restrict access to individual +documents, due to the limitations described in <>. +{security} provides a secure way to restrict access to documents through the +<> feature. + +[float] +=== Field and Document Level Security Limitations + +When a user's role enables document or field level security for an index: + +* The user cannot perform write operations: +** The update API isn't supported. +** Update requests included in bulk requests aren't supported. +* The request cache is disabled for search requests. + +When a user's role enables document level security for an index: + +* Document level security isn't applied for APIs that aren't document based. + An example is the field stats API. +* Document level security doesn't affect global index statistics that relevancy + scoring uses. So this means that scores are computed without taking the role + query into account. Note that documents not matching with the role query are + never returned. +* The `has_child` and `has_parent` queries aren't supported as query in the + role definition. The `has_child` and `has_parent` queries can be used in the + search API with document level security enabled. +* Any query that makes remote calls to fetch data to query by isn't supported. + The following queries aren't supported: +** The `terms` query with terms lookup isn't supported. +** The `geo_shape` query with indexed shapes isn't supported. +** The `percolate` query isn't supported. +* If suggesters are specified and document level security is enabled then + the specified suggesters are ignored. +* A search request cannot be profiled if document level security is enabled. + +[float] +[[alias-limitations]] +=== Index and Field Names Can Be Leaked When Using Aliases + +Calling certain Elasticsearch APIs on an alias can potentially leak information +about indices that the user isn't authorized to access. For example, when you get +the mappings for an alias with the `_mapping` API, the response includes the +index name and mappings for each index that the alias applies to. + +Until this limitation is addressed, avoid index and field names that contain +confidential or sensitive information. + +[float] +=== LDAP Realm + +The <> does not currently support the discovery of nested +LDAP Groups. For example, if a user is a member of `group_1` and `group_1` is a +member of `group_2`, only `group_1` will be discovered. However, the +<> *does* support transitive +group membership. diff --git a/x-pack/docs/en/security/reference.asciidoc b/x-pack/docs/en/security/reference.asciidoc new file mode 100644 index 0000000000000..90668651b5d50 --- /dev/null +++ b/x-pack/docs/en/security/reference.asciidoc @@ -0,0 +1,11 @@ +[[security-reference]] +== Reference +* <> +* {ref}/security-settings.html[Security Settings] +* <> +* {ref}/security-api.html[Security API] +* {ref}/xpack-commands.html[Security Commands] + +include::reference/privileges.asciidoc[] + +include::reference/files.asciidoc[] diff --git a/x-pack/docs/en/security/reference/files.asciidoc b/x-pack/docs/en/security/reference/files.asciidoc new file mode 100644 index 0000000000000..cec8f9d1a3bcc --- /dev/null +++ b/x-pack/docs/en/security/reference/files.asciidoc @@ -0,0 +1,37 @@ +[[security-files]] +=== Security Files + +The {security} uses the following files: + +* `CONFIG_DIR/x-pack/roles.yml` defines the roles in use on the cluster + (read more <>). + +* `CONFIG_DIR/elasticsearch-users` defines the users and their hashed passwords for + the <>. + +* `CONFIG_DIR/elasticsearch-users_roles` defines the user roles assignment for the + the <>. + +* `CONFIG_DIR/x-pack/role_mapping.yml` defines the role assignments for a + Distinguished Name (DN) to a role. This allows for LDAP and Active Directory + groups and users and PKI users to be mapped to roles (read more + <>). + +* `CONFIG_DIR/x-pack/log4j2.properties` contains audit information (read more + <>). + +[[security-files-location]] + +IMPORTANT: Any files that {security} uses must be stored in the Elasticsearch + configuration directory. Elasticsearch runs with restricted permissions + and is only permitted to read from the locations configured in the + directory layout for enhanced security. + +Several of these files are in the YAML format. When you edit these files, be +aware that YAML is indentation-level sensitive and indentation errors can lead +to configuration errors. Avoid the tab character to set indentation levels, or +use an editor that automatically expands tabs to spaces. + +Be careful to properly escape YAML constructs such as `:` or leading exclamation +points within quoted strings. Using the `|` or `>` characters to define block +literals instead of escaping the problematic characters can help avoid problems. diff --git a/x-pack/docs/en/security/reference/privileges.asciidoc b/x-pack/docs/en/security/reference/privileges.asciidoc new file mode 100644 index 0000000000000..b467b58283d97 --- /dev/null +++ b/x-pack/docs/en/security/reference/privileges.asciidoc @@ -0,0 +1,134 @@ +[[security-privileges]] +=== Security Privileges + +This section lists the privileges that you can assign to a role. + +[[privileges-list-cluster]] +==== Cluster Privileges + +[horizontal] +`all`:: +All cluster administration operations, like snapshotting, node shutdown/restart, +settings update, rerouting, or managing users and roles. + +`monitor`:: +All cluster read-only operations, like cluster health and state, hot threads, +node info, node and cluster stats, and pending cluster tasks. + +`monitor_ml`:: +All read only {ml} operations, such as getting information about {dfeeds}, jobs, +model snapshots, or results. + +`monitor_watcher`:: +All read only watcher operations, such as getting a watch and watcher stats. + +`manage`:: +Builds on `monitor` and adds cluster operations that change values in the cluster. +This includes snapshotting, updating settings, and rerouting. It also includes +obtaining snapshot and restore status. This privilege does not include the +ability to manage security. + +`manage_index_templates`:: +All operations on index templates. + +`manage_ml`:: +All {ml} operations, such as creating and deleting {dfeeds}, jobs, and model +snapshots. ++ +-- +NOTE: {dfeeds-cap} that were created prior to version 6.2 or created when {security} +was disabled run as a system user with elevated privileges, including permission +to read all indices. Newer {dfeeds} run with the security roles of the user who created +or updated them. + +-- + +`manage_pipeline`:: +All operations on ingest pipelines. + +`manage_security`:: +All security related operations such as CRUD operations on users and roles and +cache clearing. + +`manage_watcher`:: +All watcher operations, such as putting watches, executing, activate or acknowledging. ++ +-- +NOTE: Watches that were created prior to version 6.1 or created when {security} +was disabled run as a system user with elevated privileges, including permission +to read and write all indices. Newer watches run with the security roles of the user +who created or updated them. + +-- + +`transport_client`:: +All privileges necessary for a transport client to connect. Required by the remote +cluster to enable <>. + +[[privileges-list-indices]] +==== Indices Privileges + +[horizontal] +`all`:: +Any action on an index + +`monitor`:: +All actions that are required for monitoring (recovery, segments info, index +stats and status). + +`manage`:: +All `monitor` privileges plus index administration (aliases, analyze, cache clear, +close, delete, exists, flush, mapping, open, force merge, refresh, settings, +search shards, templates, validate). + +`view_index_metadata`:: +Read-only access to index metadata (aliases, aliases exists, get index, exists, field mappings, +mappings, search shards, type exists, validate, warmers, settings). This +privilege is primarily available for use by {kib} users. + +`read`:: +Read only access to actions (count, explain, get, mget, get indexed scripts, +more like this, multi percolate/search/termvector, percolate, scroll, +clear_scroll, search, suggest, tv). + +`read_cross_cluster`:: +Read only access to the search action from a <>. + +`index`:: +Privilege to index and update documents. Also grants access to the update +mapping action. + +`create`:: +Privilege to index documents. Also grants access to the update mapping +action. ++ +-- +NOTE: This privilege does not restrict the index operation to the creation +of documents but instead restricts API use to the index API. The index API allows a user +to overwrite a previously indexed document. + +-- + +`delete`:: +Privilege to delete documents. + +`write`:: +Privilege to perform all write operations to documents, which includes the +permission to index, update, and delete documents as well as performing bulk +operations. Also grants access to the update mapping action. + +`delete_index`:: +Privilege to delete an index. + +`create_index`:: +Privilege to create an index. A create index request may contain aliases to be +added to the index once created. In that case the request requires the `manage` +privilege as well, on both the index and the aliases names. + +==== Run As Privilege + +The `run_as` permission enables an authenticated user to submit requests on +behalf of another user. The value can be a user name or a comma-separated list +of user names. (You can also specify users as an array of strings or a YAML +sequence.) For more information, see +<>. diff --git a/x-pack/docs/en/security/securing-communications.asciidoc b/x-pack/docs/en/security/securing-communications.asciidoc new file mode 100644 index 0000000000000..e876ce9160b86 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications.asciidoc @@ -0,0 +1,31 @@ +[[encrypting-communications]] +== Encrypting Communications + +Elasticsearch nodes store data that may be confidential. Attacks on the data may +come from the network. These attacks could include sniffing of the data, +manipulation of the data, and attempts to gain access to the server and thus the +files storing the data. Securing your nodes is required in order to use a production +license that enables {security} and helps reduce the risk from network-based attacks. + +This section shows how to: + +* Encrypt traffic to, from and within an Elasticsearch cluster using SSL/TLS, +* Require nodes to authenticate as they join the cluster using SSL certificates, and +* Make it more difficult for remote attackers to issue any commands to Elasticsearch. + +The authentication of new nodes helps prevent a rogue node from joining the +cluster and receiving data through replication. + +include::securing-communications/setting-up-ssl.asciidoc[] + +//TO-DO: These sections can be removed when all links to them are removed. + +[[ciphers]] +=== Enabling Cipher Suites for Stronger Encryption + +See {ref}/ciphers.html[Enabling Cipher Suites for Stronger Encryption]. + +[[separating-node-client-traffic]] +=== Separating node-to-node and client traffic + +See {ref}/separating-node-client-traffic.html[Separating node-to-node and client traffic]. diff --git a/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc b/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc new file mode 100644 index 0000000000000..05d6574b56f41 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc @@ -0,0 +1,191 @@ +[role="xpack"] +[[configuring-tls-docker]] +=== Encrypting Communications in an {es} Docker Image + +Starting with version 6.0.0, {security} (Gold, Platinum or Enterprise subscriptions) https://www.elastic.co/guide/en/elasticsearch/reference/6.0/breaking-6.0.0-xes.html[requires SSL/TLS] +encryption for the transport networking layer. + +This section demonstrates an easy path to get started with SSL/TLS for both +HTTPS and transport using the `elasticsearch-platinum` docker image. + +For further details, please refer to +{xpack-ref}/encrypting-communications.html[Encrypting Communications] and +https://www.elastic.co/subscriptions[available subscriptions]. + +[float] +==== Prepare the environment + +<>. + +Inside a new, empty, directory create the following **four files**: + +`instances.yml`: +["source","yaml"] +---- +instances: + - name: es01 + dns: + - es01 <1> + - localhost + ip: + - 127.0.0.1 + - name: es02 + dns: + - es02 + - localhost + ip: + - 127.0.0.1 +---- +<1> Allow use of embedded Docker DNS server names. + +`.env`: +[source,yaml] +---- +CERTS_DIR=/usr/share/elasticsearch/config/x-pack/certificates <1> +ELASTIC_PASSWORD=PleaseChangeMe <2> +---- +<1> The path, inside the Docker image, where certificates are expected to be found. +<2> Initial password for the `elastic` user. + +[[getting-starter-tls-create-certs-composefile]] +`create-certs.yml`: +ifeval::["{release-state}"=="unreleased"] + +WARNING: Version {version} of {es} has not yet been released, so a +`create-certs.yml` is not available for this version. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] +["source","yaml",subs="attributes"] +---- +version: '2.2' +services: + create_certs: + container_name: create_certs + image: docker.elastic.co/elasticsearch/elasticsearch-platinum:{version} + command: > + bash -c ' + if [[ ! -d config/x-pack/certificates/certs ]]; then + mkdir config/x-pack/certificates/certs; + fi; + if [[ ! -f /local/certs/bundle.zip ]]; then + bin/elasticsearch-certgen --silent --in config/x-pack/certificates/instances.yml --out config/x-pack/certificates/certs/bundle.zip; + unzip config/x-pack/certificates/certs/bundle.zip -d config/x-pack/certificates/certs; <1> + fi; + chgrp -R 0 config/x-pack/certificates/certs + ' + user: $\{UID:-1000\} + working_dir: /usr/share/elasticsearch + volumes: ['.:/usr/share/elasticsearch/config/x-pack/certificates'] +---- + +<1> The new node certificates and CA certificate+key are placed under the local directory `certs`. +endif::[] + +[[getting-starter-tls-create-docker-compose]] +`docker-compose.yml`: +ifeval::["{release-state}"=="unreleased"] + +WARNING: Version {version} of {es} has not yet been released, so a +`docker-compose.yml` is not available for this version. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] +["source","yaml",subs="attributes"] +---- +version: '2.2' +services: + es01: + container_name: es01 + image: docker.elastic.co/elasticsearch/elasticsearch-platinum:{version} + environment: + - node.name=es01 + - discovery.zen.minimum_master_nodes=2 + - ELASTIC_PASSWORD=$ELASTIC_PASSWORD <1> + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - xpack.security.http.ssl.enabled=true + - xpack.security.transport.ssl.enabled=true + - xpack.security.transport.ssl.verification_mode=certificate <2> + - xpack.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt + - xpack.ssl.certificate=$CERTS_DIR/es01/es01.crt + - xpack.ssl.key=$CERTS_DIR/es01/es01.key + volumes: ['esdata_01:/usr/share/elasticsearch/data', './certs:$CERTS_DIR'] + ports: + - 9200:9200 + healthcheck: + test: curl --cacert $CERTS_DIR/ca/ca.crt -s https://localhost:9200 >/dev/null; if [[ $$? == 52 ]]; then echo 0; else echo 1; fi + interval: 30s + timeout: 10s + retries: 5 + es02: + container_name: es02 + image: docker.elastic.co/elasticsearch/elasticsearch-platinum:{version} + environment: + - node.name=es02 + - discovery.zen.minimum_master_nodes=2 + - ELASTIC_PASSWORD=$ELASTIC_PASSWORD + - discovery.zen.ping.unicast.hosts=es01 + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - xpack.security.http.ssl.enabled=true + - xpack.security.transport.ssl.enabled=true + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt + - xpack.ssl.certificate=$CERTS_DIR/es02/es02.crt + - xpack.ssl.key=$CERTS_DIR/es02/es02.key + volumes: ['esdata_02:/usr/share/elasticsearch/data', './certs:$CERTS_DIR'] + wait_until_ready: + image: docker.elastic.co/elasticsearch/elasticsearch-platinum:{version} + command: /usr/bin/true + depends_on: {"es01": {"condition": "service_healthy"}} +volumes: {"esdata_01": {"driver": "local"}, "esdata_02": {"driver": "local"}} +---- + +<1> Bootstrap `elastic` with the password defined in `.env`. See {xpack-ref}/setting-up-authentication.html#bootstrap-elastic-passwords[the Elastic Bootstrap Password]. +<2> Disable verification of authenticity for inter-node communication. Allows +creating self-signed certificates without having to pin specific internal IP addresses. +endif::[] + +[float] +==== Run the example +. Generate the certificates (only needed once): ++ +-- +["source","sh"] +---- +docker-compose -f create-certs.yml up +---- +-- +. Start two {es} nodes configured for SSL/TLS: ++ +-- +["source","sh"] +---- +docker-compose up -d +---- +-- +. Access the {es} API over SSL/TLS using the bootstrapped password: ++ +-- +["source","sh"] +---- +curl --cacert certs/ca/ca.crt -u elastic:PleaseChangeMe https://localhost:9200 +---- +// NOTCONSOLE +-- +. The `elasticsearch-setup-passwords` tool can also be used to generate random +passwords for all users: ++ +-- +WARNING: Windows users not running PowerShell will need to remove `\` and join lines in the snippet below. +["source","sh"] +---- +docker exec es01 /bin/bash -c "bin/elasticsearch-setup-passwords \ +auto --batch \ +-Expack.ssl.certificate=x-pack/certificates/es01/es01.crt \ +-Expack.ssl.certificate_authorities=x-pack/certificates/ca/ca.crt \ +-Expack.ssl.key=x-pack/certificates/es01/es01.key \ +--url https://localhost:9200" +---- +-- diff --git a/x-pack/docs/en/security/securing-communications/enabling-cipher-suites.asciidoc b/x-pack/docs/en/security/securing-communications/enabling-cipher-suites.asciidoc new file mode 100644 index 0000000000000..38db876542747 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/enabling-cipher-suites.asciidoc @@ -0,0 +1,26 @@ +[role="xpack"] +[[ciphers]] +=== Enabling Cipher Suites for Stronger Encryption + +The TLS and SSL protocols use a cipher suite that determines the strength of +encryption used to protect the data. You may want to increase the strength of +encryption used when using a Oracle JVM; the IcedTea OpenJDK ships without these +restrictions in place. This step is not required to successfully use encrypted +communication. + +The _Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy +Files_ enable the use of additional cipher suites for Java in a separate JAR file +that you need to add to your Java installation. You can download this JAR file +from Oracle's http://www.oracle.com/technetwork/java/javase/downloads/index.html[download page]. +The _JCE Unlimited Strength Jurisdiction Policy Files`_ are required for +encryption with key lengths greater than 128 bits, such as 256-bit AES encryption. + +After installation, all cipher suites in the JCE are available for use but requires +configuration in order to use them. To enable the use of stronger cipher suites with +{security}, configure the `cipher_suites` parameter. See the +{ref}/security-settings.html#ssl-tls-settings[Configuration Parameters for TLS/SSL] +section of this document for specific parameter information. + +NOTE: The _JCE Unlimited Strength Jurisdiction Policy Files_ must be installed + on all nodes in the cluster to establish an improved level of encryption + strength. diff --git a/x-pack/docs/en/security/securing-communications/node-certificates.asciidoc b/x-pack/docs/en/security/securing-communications/node-certificates.asciidoc new file mode 100644 index 0000000000000..604355e21bf17 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/node-certificates.asciidoc @@ -0,0 +1,96 @@ +[[node-certificates]] +==== Generating Node Certificates + +TLS requires X.509 certificates to perform encryption and authentication of the +application that is being communicated with. In order for the communication +between nodes to be truly secure, the certificates must be validated. The +recommended approach for validating certificate authenticity in a {es} cluster +is to trust the certificate authority (CA) that signed the certificate. By doing +this, as nodes are added to your cluster they just need to use a certificate +signed by the same CA and the node is automatically allowed to join the cluster. +Additionally, it is recommended that the certificates contain subject alternative +names (SAN) that correspond to the node's IP address and DNS name so that +hostname verification can be performed. + +In order to simplify the process of generating certificates for the Elastic +Stack, a command line tool, {ref}/certutil.html[`elasticsearch-certutil`] has been +included with {xpack}. This tool takes care of generating a CA and signing +certificates with the CA. `elasticsearch-certutil` can be used interactively or +in a silent mode through the use of an input file. The `elasticsearch-certutil` +tool also supports generation of certificate signing requests (CSR), so that a +commercial- or organization-specific CA can be used to sign the certificates. +For example: + +. Optional: Create a certificate authority for your {es} cluster. ++ +-- +For example, use the `elasticsearch-certutil ca` command: + +[source,shell] +---------------------------------------------------------- +bin/elasticsearch-certutil ca +---------------------------------------------------------- + +You can configure the cluster to trust all nodes that have a certificate that +has been signed by this CA. + +The command outputs a single file, with a default name of `elastic-stack-ca.p12`. +This file is a PKCS#12 keystore that contains the public certificate for your CA +and the private key that is used to sign the certificates for each node. + +The `elasticsearch-certutil` command also prompts you for a password to protect +the file and key. If you plan to add more nodes to your cluster in the future, +retain a copy of the file and remember its password. +-- + +. Generate a certificate and private key for for each node in your cluster. ++ +-- +For example, use the `elasticsearch-certutil cert` command: + +[source,shell] +---------------------------------------------------------- +bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12 +---------------------------------------------------------- +The output is a single PKCS#12 keystore that includes the node certificate, node +key, and CA certificate. + +You are also prompted for a password. You can enter a password for your +certificate and key, or you can leave the password blank by pressing Enter. + +By default `elasticsearch-certutil` generates certificates that have no hostname +information in them (that is, they do not have any Subject Alternative Name +fields). This means that you can use the certificate for every node in your +cluster, but you must turn off hostname verification as shown in the +configuration below. + +If you want to use hostname verification within your cluster, run the +`elasticsearch-certutil cert` command once for each of your nodes and provide +the `--name`, `--dns` and `--ip` options. + +NOTE: You should secure the output files, since they contain the private keys +for your instance. + +Alternatively, if you want to use a commercial or organization-specific CA, +you can use the `elasticsearch-certutil csr` command to generate certificate +signing requests (CSR) for the nodes in your cluster. For more information, see +<>. +-- + +. Copy the node certificate to the appropriate locations. ++ +-- +Copy the applicable `.p12` file into a directory within the {es} configuration +directory on each node. For example, `/home/es/config/certs`. There is no need +to copy the CA file to this directory. + +For each additional Elastic product that you want to configure, copy the +certificates to the relevant configuration directory. +-- + +NOTE: If you choose not to use `elasticsearch-certutil`, the certificates that +you obtain must allow for both `clientAuth` and `serverAuth` if the extended key +usage extension is present. The certificates need to be in PEM or PKCS#12 +format. Although not required, it is highly recommended that the certificate +contain the DNS names and/or IP addresses of the node so that hostname +verification can be used. diff --git a/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc new file mode 100644 index 0000000000000..b100567edf8b9 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/securing-elasticsearch.asciidoc @@ -0,0 +1,28 @@ +[role="xpack"] +[[configuring-tls]] +=== Encrypting Communications in {es} + +{security} enables you to encrypt traffic to, from, and within your {es} cluster. +Connections are secured using Transport Layer Security (TLS/SSL). + +WARNING: Clusters that do not have encryption enabled send all data in plain text +including passwords and will not be able to install a license that enables {security}. + +To enable encryption, you need to perform the following steps on each node in +the cluster: + +. Verify that the `xpack.security.enabled` setting is `true`. For more +information, see <>. + +. <>. + +. Configure each node to: +.. Required: <>. +.. Recommended: <>. + +For more information about encrypting communications across the Elastic Stack, +see {xpack-ref}/encrypting-communications.html[Encrypting Communications]. + +include::node-certificates.asciidoc[] +include::tls-transport.asciidoc[] +include::tls-http.asciidoc[] diff --git a/x-pack/docs/en/security/securing-communications/separating-node-client-traffic.asciidoc b/x-pack/docs/en/security/securing-communications/separating-node-client-traffic.asciidoc new file mode 100644 index 0000000000000..887d4701d78e8 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/separating-node-client-traffic.asciidoc @@ -0,0 +1,68 @@ +[role="xpack"] +[[separating-node-client-traffic]] +=== Separating node-to-node and client traffic + +Elasticsearch has the feature of so called {ref}/modules-transport.html#_tcp_transport_profiles[TCP transport profiles] +that allows it to bind to several ports and addresses. {security} extends on this +functionality to enhance the security of the cluster by enabling the separation +of node-to-node transport traffic from client transport traffic. This is important +if the client transport traffic is not trusted and could potentially be malicious. +To separate the node-to-node traffic from the client traffic, add the following +to `elasticsearch.yml`: + +[source, yaml] +-------------------------------------------------- +transport.profiles.client: <1> + port: 9500-9600 <2> + xpack.security: + type: client <3> +-------------------------------------------------- +<1> `client` is the name of this example profile +<2> The port range that will be used by transport clients to communicate with + this cluster +<3> Categorizes the profile as a `client`. This accounts for additional security + filters by denying request attempts on for internal cluster operations + (e.g shard level actions and ping requests) from this profile. + +If supported by your environment, an internal network can be used for node-to-node +traffic and public network can be used for client traffic by adding the following +to `elasticsearch.yml`: + +[source, yaml] +-------------------------------------------------- +transport.profiles.default.bind_host: 10.0.0.1 <1> +transport.profiles.client.bind_host: 1.1.1.1 <2> +-------------------------------------------------- +<1> The bind address for the network that will be used for node-to-node communication +<2> The bind address for the network used for client communication + +If separate networks are not available, then +{xpack-ref}/ip-filtering.html[IP Filtering] can +be enabled to limit access to the profiles. + +When using SSL for transport, a different set of certificates can also be used +for the client traffic by adding the following to `elasticsearch.yml`: + +[source, yaml] +-------------------------------------------------- +transport.profiles.client.xpack.security.ssl.truststore: + path: /path/to/another/truststore + password: x-pack-test-password + +transport.profiles.client.xpack.security.ssl.keystore: + path: /path/to/another/keystore + password: x-pack-test-password +-------------------------------------------------- + +To change the default behavior that requires certificates for transport clients, +set the following value in the `elasticsearch.yml` file: + +[source, yaml] +-------------------------------------------------- +transport.profiles.client.xpack.security.ssl.client_authentication: none +-------------------------------------------------- + +This setting keeps certificate authentication active for node-to-node traffic, +but removes the requirement to distribute a signed certificate to transport +clients. For more information, see +{xpack-ref}/java-clients.html#transport-client[Configuring the Transport Client to work with a Secured Cluster]. diff --git a/x-pack/docs/en/security/securing-communications/setting-up-ssl.asciidoc b/x-pack/docs/en/security/securing-communications/setting-up-ssl.asciidoc new file mode 100644 index 0000000000000..ad2548f73f441 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/setting-up-ssl.asciidoc @@ -0,0 +1,39 @@ +[[ssl-tls]] +=== Setting Up TLS on a Cluster + +{security} enables you to encrypt traffic to, from, and within your {es} +cluster. Connections are secured using Transport Layer Security (TLS), which is +commonly referred to as "SSL". + +WARNING: Clusters that do not have encryption enabled send all data in plain text +including passwords and will not be able to install a license that enables {security}. + +The following steps describe how to enable encryption across the various +components of the Elastic Stack. You must perform each of the steps that are +applicable to your cluster. + +. Generate a private key and X.509 certificate for each of your {es} nodes. See +{ref}/configuring-tls.html#node-certificates[Generating Node Certificates]. + +. Configure each node in the cluster to identify itself using its signed +certificate and enable TLS on the transport layer. You can also optionally +enable TLS on the HTTP layer. See +{ref}/configuring-tls.html#tls-transport[Encrypting Communications Between Nodes in a Cluster] and +{ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. + +. Configure {monitoring} to use encrypted connections. See <>. + +. Configure {kib} to encrypt communications between the browser and +the {kib} server and to connect to {es} via HTTPS. See +{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]. + +. Configure Logstash to use TLS encryption. See +{logstash-ref}/ls-security.html[Configuring Security in Logstash]. + +. Configure Beats to use encrypted connections. See <>. + +. Configure the Java transport client to use encrypted communications. +See <>. + +. Configure {es} for Apache Hadoop to use secured transport. See +{hadoop-ref}/security.html[{es} for Apache Hadoop Security]. diff --git a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc new file mode 100644 index 0000000000000..dae088667c6fc --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc @@ -0,0 +1,83 @@ +[role="xpack"] +[[tls-http]] +==== Encrypting HTTP Client Communications + +When {security} is enabled, you can optionally use TLS to ensure that +communication between HTTP clients and the cluster is encrypted. + +NOTE: Enabling TLS on the HTTP layer is strongly recommended but is not required. +If you enable TLS on the HTTP layer in {es}, then you might need to make +configuration changes in other parts of the Elastic Stack and in any {es} +clients that you use. + +. If you have not done so already, <>. + +. Enable TLS and specify the information required to access the node’s +certificate. + +** If the certificate is in PKCS#12 format, add the following information to the +`elasticsearch.yml` file on each node: ++ +-- +[source, yaml] +-------------------------------------------------- +xpack.security.http.ssl.enabled: true +xpack.security.http.ssl.keystore.path: certs/elastic-certificates.p12 <1> +xpack.security.http.ssl.truststore.path: certs/elastic-certificates.p12 <2> +-------------------------------------------------- +<1> If you created a separate certificate for each node, then you might need to +customize this path on each node. If the filename matches the node name, you can +use the `certs/${node.name}.p12` format, for example. +<2> The `elasticsearch-certutil` output includes the CA certificate inside the +PKCS#12 keystore, therefore the keystore can also be used as the truststore. +This name should match the `keystore.path` value. +-- + +** If the certificate is in PEM format, add the following information to the +`elasticsearch.yml` file on each node: ++ +-- +[source, yaml] +-------------------------------------------------- +xpack.security.http.ssl.enabled: true +xpack.security.http.ssl.key: /home/es/config/x-pack/node01.key <1> +xpack.security.http.ssl.certificate: /home/es/config/x-pack/node01.crt <2> +xpack.security.http.ssl.certificate_authorities: [ "/home/es/config/x-pack/ca.crt" ] <3> +-------------------------------------------------- +<1> The full path to the node key file. This must be a location within the + {es} configuration directory. +<2> The full path to the node certificate. This must be a location within the + {es} configuration directory. +<3> An array of paths to the CA certificates that should be trusted. These paths + must be a location within the {es} configuration directory. +-- + +. If you secured the node's certificate with a password, add the password to +your {es} keystore: + +** If the signed certificate is in PKCS#12 format, use the following commands: ++ +-- +[source,shell] +----------------------------------------------------------- +bin/elasticsearch-keystore add xpack.security.http.ssl.keystore.secure_password + +bin/elasticsearch-keystore add xpack.security.http.ssl.truststore.secure_password +----------------------------------------------------------- +-- + +** If the certificate is in PEM format, use the following commands: ++ +-- +[source,shell] +----------------------------------------------------------- +bin/elasticsearch-keystore add xpack.security.http.ssl.secure_key_passphrase +----------------------------------------------------------- +-- + +. Restart {es}. + +NOTE: All TLS-related node settings are considered to be highly sensitive and +therefore are not exposed via the +{ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API] For more +information about any of these settings, see <>. diff --git a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc new file mode 100644 index 0000000000000..9bce211a1e278 --- /dev/null +++ b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc @@ -0,0 +1,97 @@ +[role="xpack"] +[[tls-transport]] +==== Encrypting Communications Between Nodes in a Cluster + +The transport networking layer is used for internal communication between nodes +in a cluster. When {security} is enabled, you must use TLS to ensure that +communication between the nodes is encrypted. + +. <>. + +. Enable TLS and specify the information required to access the node’s +certificate. + +** If the signed certificate is in PKCS#12 format, add the following information to the +`elasticsearch.yml` file on each node: ++ +-- +[source,yaml] +----------------------------------------------------------- +xpack.security.transport.ssl.enabled: true +xpack.security.transport.ssl.verification_mode: certificate <1> +xpack.security.transport.ssl.keystore.path: certs/elastic-certificates.p12 <2> +xpack.security.transport.ssl.truststore.path: certs/elastic-certificates.p12 <3> +----------------------------------------------------------- +<1> If you used the `--dns` or `--ip` options with the `elasticsearch-certutil cert` command +and you want to enable strict hostname checking, set the verification mode to +`full`. +See <> for a description of these values. + +<2> If you created a separate certificate for each node, then you might need to +customize this path on each node. If the filename matches the node name, you can +use the `certs/${node.name}.p12` format, for example. +<3> The `elasticsearch-certutil` output includes the CA certificate inside the +PKCS#12 keystore, therefore the keystore can also be used as the truststore. +This name should match the `keystore.path` value. +-- + +** If the certificate is in PEM format, add the following information to the +`elasticsearch.yml` file on each node: ++ +-- +[source, yaml] +-------------------------------------------------- +xpack.security.transport.ssl.enabled: true +xpack.security.transport.ssl.verification_mode: certificate <1> +xpack.security.transport.ssl.key: /home/es/config/x-pack/node01.key <2> +xpack.security.transport.ssl.certificate: /home/es/config/x-pack/node01.crt <3> +xpack.security.transport.ssl.certificate_authorities: [ "/home/es/config/x-pack/ca.crt" ] <4> +-------------------------------------------------- +<1> If you used the `--dns` or `--ip` options with the `elasticsearch-certutil cert` command +and you want to enable strict hostname checking, set the verification mode to +`full`. +See <> for a description of these values. +<2> The full path to the node key file. This must be a location within the + {es} configuration directory. +<3> The full path to the node certificate. This must be a location within the + {es} configuration directory. +<4> An array of paths to the CA certificates that should be trusted. These paths + must be a location within the {es} configuration directory. +-- + +. If you secured the node's certificate with a password, add the password to +your {es} keystore: + +** If the signed certificate is in PKCS#12 format, use the following commands: ++ +-- +[source,shell] +----------------------------------------------------------- +bin/elasticsearch-keystore add xpack.security.transport.ssl.keystore.secure_password + +bin/elasticsearch-keystore add xpack.security.transport.ssl.truststore.secure_password +----------------------------------------------------------- +-- + +** If the certificate is in PEM format, use the following commands: ++ +-- +[source,shell] +----------------------------------------------------------- +bin/elasticsearch-keystore add xpack.security.transport.ssl.secure_key_passphrase +----------------------------------------------------------- +-- + +. Restart {es}. ++ +-- +You must perform a full cluster restart. Nodes which are configured to use TLS +cannot communicate with nodes that are using unencrypted networking (and +vice-versa). After enabling TLS you must restart all nodes in order to maintain +communication across the cluster. +-- + +NOTE: All TLS-related node settings are considered to be highly sensitive and +therefore are not exposed via the +{ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API] For more +information about any of these settings, see <>. diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc new file mode 100644 index 0000000000000..6b8e884149616 --- /dev/null +++ b/x-pack/docs/en/security/troubleshooting.asciidoc @@ -0,0 +1,417 @@ +[[security-troubleshooting]] +== {security} Troubleshooting +++++ +{security} +++++ + +Use the information in this section to troubleshoot common problems and find +answers for frequently asked questions. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + + +To get help, see <>. + +[[security-trb-settings]] +=== Some settings are not returned via the nodes settings API + +*Symptoms:* + +* When you use the {ref}/cluster-nodes-info.html[nodes info API] to retrieve +settings for a node, some information is missing. + +*Resolution:* + +This is intentional. Some of the settings are considered to be highly +sensitive: all `ssl` settings, ldap `bind_dn`, and `bind_password`. +For this reason, we filter these settings and do not expose them via +the nodes info API rest endpoint. You can also define additional +sensitive settings that should be hidden using the +`xpack.security.hide_settings` setting. For example, this snippet +hides the `url` settings of the `ldap1` realm and all settings of the +`ad1` realm. + +[source, yaml] +------------------------------------------ +xpack.security.hide_settings: xpack.security.authc.realms.ldap1.url, +xpack.security.authc.realms.ad1.* +------------------------------------------ + +[[security-trb-roles]] +=== Authorization exceptions + +*Symptoms:* + +* I configured the appropriate roles and the users, but I still get an +authorization exception. +* I can authenticate to LDAP, but I still get an authorization exception. + + +*Resolution:* + +. Verify that the role names associated with the users match the roles defined +in the `roles.yml` file. You can use the `elasticsearch-users` tool to list all +the users. Any unknown roles are marked with `*`. ++ +-- +[source, shell] +------------------------------------------ +bin/xpack/users list +rdeniro : admin +alpacino : power_user +jacknich : monitoring,unknown_role* <1> +------------------------------------------ +<1> `unknown_role` was not found in `roles.yml` + +For more information about this command, see +{ref}/users-command.html[Users Command]. +-- + +. If you are authenticating to LDAP, a number of configuration options can cause +this error. ++ +-- +|====================== +|_group identification_ | + +Groups are located by either an LDAP search or by the "memberOf" attribute on +the user. Also, If subtree search is turned off, it will search only one +level deep. See the <> for all the options. +There are many options here and sticking to the defaults will not work for all +scenarios. + +| _group to role mapping_| + +Either the `role_mapping.yml` file or the location for this file could be +misconfigured. See <> for more. + +|_role definition_| + +The role definition might be missing or invalid. + +|====================== + +To help track down these possibilities, add the following lines to the end of +the `log4j2.properties` configuration file in the `CONFIG_DIR`: + +[source,properties] +---------------- +logger.authc.name = org.elasticsearch.xpack.security.authc +logger.authc.level = DEBUG +---------------- + +A successful authentication should produce debug statements that list groups and +role mappings. +-- + +[[security-trb-extraargs]] +=== Users command fails due to extra arguments + +*Symptoms:* + +* The `elasticsearch-users` command fails with the following message: +`ERROR: extra arguments [...] were provided`. + +*Resolution:* + +This error occurs when the `elasticsearch-users` tool is parsing the input and +finds unexpected arguments. This can happen when there are special characters +used in some of the arguments. For example, on Windows systems the `,` character +is considered a parameter separator; in other words `-r role1,role2` is +translated to `-r role1 role2` and the `elasticsearch-users` tool only +recognizes `role1` as an expected parameter. The solution here is to quote the +parameter: `-r "role1,role2"`. + +For more information about this command, see +{ref}/users-command.html[`elasticsearch-users` command]. + +[[trouble-shoot-active-directory]] +=== Users are frequently locked out of Active Directory + +*Symptoms:* + +* Certain users are being frequently locked out of Active Directory. + +*Resolution:* + +Check your realm configuration; realms are checked serially, one after another. +If your Active Directory realm is being checked before other realms and there +are usernames that appear in both Active Directory and another realm, a valid +login for one realm might be causing failed login attempts in another realm. + +For example, if `UserA` exists in both Active Directory and a file realm, and +the Active Directory realm is checked first and file is checked second, an +attempt to authenticate as `UserA` in the file realm would first attempt to +authenticate against Active Directory and fail, before successfully +authenticating against the `file` realm. Because authentication is verified on +each request, the Active Directory realm would be checked - and fail - on each +request for `UserA` in the `file` realm. In this case, while the authentication +request completed successfully, the account on Active Directory would have +received several failed login attempts, and that account might become +temporarily locked out. Plan the order of your realms accordingly. + +Also note that it is not typically necessary to define multiple Active Directory +realms to handle domain controller failures. When using Microsoft DNS, the DNS +entry for the domain should always point to an available domain controller. + + +[[trb-security-maccurl]] +=== Certificate verification fails for curl on Mac + +*Symptoms:* + +* `curl` on the Mac returns a certificate verification error even when the +`--cacert` option is used. + + +*Resolution:* + +Apple's integration of `curl` with their keychain technology disables the +`--cacert` option. +See http://curl.haxx.se/mail/archive-2013-10/0036.html for more information. + +You can use another tool, such as `wget`, to test certificates. Alternately, you +can add the certificate for the signing certificate authority MacOS system +keychain, using a procedure similar to the one detailed at the +http://support.apple.com/kb/PH14003[Apple knowledge base]. Be sure to add the +signing CA's certificate and not the server's certificate. + + +[[trb-security-sslhandshake]] +=== SSLHandshakeException causes connections to fail + +*Symptoms:* + +* A `SSLHandshakeException` causes a connection to a node to fail and indicates +that there is a configuration issue. Some of the common exceptions are shown +below with tips on how to resolve these issues. + + +*Resolution:* + +`java.security.cert.CertificateException: No name matching node01.example.com found`:: ++ +-- +Indicates that a client connection was made to `node01.example.com` but the +certificate returned did not contain the name `node01.example.com`. In most +cases, the issue can be resolved by ensuring the name is specified during +certificate creation. For more information, see <>. Another scenario is +when the environment does not wish to use DNS names in certificates at all. In +this scenario, all settings in `elasticsearch.yml` should only use IP addresses +including the `network.publish_host` setting. +-- + +`java.security.cert.CertificateException: No subject alternative names present`:: ++ +-- +Indicates that a client connection was made to an IP address but the returned +certificate did not contain any `SubjectAlternativeName` entries. IP addresses +are only used for hostname verification if they are specified as a +`SubjectAlternativeName` during certificate creation. If the intent was to use +IP addresses for hostname verification, then the certificate will need to be +regenerated with the appropriate IP address. See <>. +-- + +`javax.net.ssl.SSLHandshakeException: null cert chain` and `javax.net.ssl.SSLException: Received fatal alert: bad_certificate`:: ++ +-- +The `SSLHandshakeException` indicates that a self-signed certificate was +returned by the client that is not trusted as it cannot be found in the +`truststore` or `keystore`. This `SSLException` is seen on the client side of +the connection. +-- + +`sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target` and `javax.net.ssl.SSLException: Received fatal alert: certificate_unknown`:: ++ +-- +This `SunCertPathBuilderException` indicates that a certificate was returned +during the handshake that is not trusted. This message is seen on the client +side of the connection. The `SSLException` is seen on the server side of the +connection. The CA certificate that signed the returned certificate was not +found in the `keystore` or `truststore` and needs to be added to trust this +certificate. +-- + +[[trb-security-ssl]] +=== Common SSL/TLS exceptions + +*Symptoms:* + +* You might see some exceptions related to SSL/TLS in your logs. Some of the +common exceptions are shown below with tips on how to resolve these issues. + + + + +*Resolution:* + +`WARN: received plaintext http traffic on a https channel, closing connection`:: ++ +-- +Indicates that there was an incoming plaintext http request. This typically +occurs when an external applications attempts to make an unencrypted call to the +REST interface. Please ensure that all applications are using `https` when +calling the REST interface with SSL enabled. +-- + +`org.elasticsearch.common.netty.handler.ssl.NotSslRecordException: not an SSL/TLS record:`:: ++ +-- +Indicates that there was incoming plaintext traffic on an SSL connection. This +typically occurs when a node is not configured to use encrypted communication +and tries to connect to nodes that are using encrypted communication. Please +verify that all nodes are using the same setting for +`xpack.security.transport.ssl.enabled`. + +For more information about this setting, see +{ref}/security-settings.html[Security Settings in {es}]. +-- + +`java.io.StreamCorruptedException: invalid internal transport message format, got`:: ++ +-- +Indicates an issue with data received on the transport interface in an unknown +format. This can happen when a node with encrypted communication enabled +connects to a node that has encrypted communication disabled. Please verify that +all nodes are using the same setting for `xpack.security.transport.ssl.enabled`. + +For more information about this setting, see +{ref}/security-settings.html[Security Settings in {es}]. +-- + +`java.lang.IllegalArgumentException: empty text`:: ++ +-- +This exception is typically seen when a `https` request is made to a node that +is not using `https`. If `https` is desired, please ensure the following setting +is in `elasticsearch.yml`: + +[source,yaml] +---------------- +xpack.security.http.ssl.enabled: true +---------------- + +For more information about this setting, see +{ref}/security-settings.html[Security Settings in {es}]. +-- + +`ERROR: unsupported ciphers [...] were requested but cannot be used in this JVM`:: ++ +-- +This error occurs when a SSL/TLS cipher suite is specified that cannot supported +by the JVM that {es} is running in. Security tries to use the specified cipher +suites that are supported by this JVM. This error can occur when using the +Security defaults as some distributions of OpenJDK do not enable the PKCS11 +provider by default. In this case, we recommend consulting your JVM +documentation for details on how to enable the PKCS11 provider. + +Another common source of this error is requesting cipher suites that use +encrypting with a key length greater than 128 bits when running on an Oracle JDK. +In this case, you must install the +<>. +-- + +[[trb-security-internalserver]] +=== Internal Server Error in Kibana + +*Symptoms:* + +* In 5.1.1, an `UnhandledPromiseRejectionWarning` occurs and {kib} displays an +Internal Server Error. +//TBD: Is the same true for later releases? + +*Resolution:* + +If the Security plugin is enabled in {es} but disabled in {kib}, you must +still set `elasticsearch.username` and `elasticsearch.password` in `kibana.yml`. +Otherwise, {kib} cannot connect to {es}. + + +[[trb-security-setup]] +=== Setup-passwords command fails due to connection failure + +The {ref}/setup-passwords.html[elasticsearch-setup-passwords command] sets +passwords for the built-in users by sending user management API requests. If +your cluster uses SSL/TLS for the HTTP (REST) interface, the command attempts to +establish a connection with the HTTPS protocol. If the connection attempt fails, +the command fails. + +*Symptoms:* + +. {es} is running HTTPS, but the command fails to detect it and returns the +following errors: ++ +-- +[source, shell] +------------------------------------------ +Cannot connect to elasticsearch node. +java.net.SocketException: Unexpected end of file from server +... +ERROR: Failed to connect to elasticsearch at +http://127.0.0.1:9200/_xpack/security/_authenticate?pretty. +Is the URL correct and elasticsearch running? +------------------------------------------ +-- + +. SSL/TLS is configured, but trust cannot be established. The command returns +the following errors: ++ +-- +[source, shell] +------------------------------------------ +SSL connection to +https://127.0.0.1:9200/_xpack/security/_authenticate?pretty +failed: sun.security.validator.ValidatorException: +PKIX path building failed: +sun.security.provider.certpath.SunCertPathBuilderException: +unable to find valid certification path to requested target +Please check the elasticsearch SSL settings under +xpack.security.http.ssl. +... +ERROR: Failed to establish SSL connection to elasticsearch at +https://127.0.0.1:9200/_xpack/security/_authenticate?pretty. +------------------------------------------ +-- + +. The command fails because hostname verification fails, which results in the +following errors: ++ +-- +[source, shell] +------------------------------------------ +SSL connection to +https://idp.localhost.test:9200/_xpack/security/_authenticate?pretty +failed: java.security.cert.CertificateException: +No subject alternative DNS name matching +elasticsearch.example.com found. +Please check the elasticsearch SSL settings under +xpack.security.http.ssl. +... +ERROR: Failed to establish SSL connection to elasticsearch at +https://elasticsearch.example.com:9200/_xpack/security/_authenticate?pretty. +------------------------------------------ +-- + +*Resolution:* + +. If your cluster uses TLS/SSL for the HTTP interface but the +`elasticsearch-setup-passwords` command attempts to establish a non-secure +connection, use the `--url` command option to explicitly specify an HTTPS URL. +Alternatively, set the `xpack.security.http.ssl.enabled` setting to `true`. + +. If the command does not trust the {es} server, verify that you configured the +`xpack.security.http.ssl.certificate_authorities` setting or the +`xpack.security.http.ssl.truststore.path` setting. + +. If hostname verification fails, you can disable this verification by setting +`xpack.security.http.ssl.verification_mode` to `certificate`. + +For more information about these settings, see +{ref}/security-settings.html[Security Settings in {es}]. diff --git a/x-pack/docs/en/security/using-ip-filtering.asciidoc b/x-pack/docs/en/security/using-ip-filtering.asciidoc new file mode 100644 index 0000000000000..37beced5a9455 --- /dev/null +++ b/x-pack/docs/en/security/using-ip-filtering.asciidoc @@ -0,0 +1,143 @@ +[[ip-filtering]] +== Restricting Connections with IP Filtering + +You can apply IP filtering to application clients, node clients, or transport +clients, in addition to other nodes that are attempting to join the cluster. + +If a node's IP address is on the blacklist, {security} will still allow the +connection to Elasticsearch, but it will be dropped immediately, and no requests +will be processed. + +NOTE: Elasticsearch installations are not designed to be publicly accessible + over the Internet. IP Filtering and the other security capabilities of + {security} do not change this condition. + +[float] +=== Enabling IP filtering + +{security} features an access control feature that allows or rejects hosts, +domains, or subnets. + +You configure IP filtering by specifying the `xpack.security.transport.filter.allow` and +`xpack.security.transport.filter.deny` settings in in `elasticsearch.yml`. Allow rules +take precedence over the deny rules. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.allow: "192.168.0.1" +xpack.security.transport.filter.deny: "192.168.0.0/24" +-------------------------------------------------- + +The `_all` keyword can be used to deny all connections that are not explicitly +allowed. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.allow: [ "192.168.0.1", "192.168.0.2", "192.168.0.3", "192.168.0.4" ] +xpack.security.transport.filter.deny: _all +-------------------------------------------------- + +IP filtering configuration also support IPv6 addresses. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.allow: "2001:0db8:1234::/48" +xpack.security.transport.filter.deny: "1234:0db8:85a3:0000:0000:8a2e:0370:7334" +-------------------------------------------------- + +You can also filter by hostnames when DNS lookups are available. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.allow: localhost +xpack.security.transport.filter.deny: '*.google.com' +-------------------------------------------------- + +[float] +=== Disabling IP Filtering + +Disabling IP filtering can slightly improve performance under some conditions. +To disable IP filtering entirely, set the value of the `xpack.security.transport.filter.enabled` +setting in the `elasticsearch.yml` configuration file to `false`. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.enabled: false +-------------------------------------------------- + +You can also disable IP filtering for the transport protocol but enable it for +HTTP only. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.enabled: false +xpack.security.http.filter.enabled: true +-------------------------------------------------- + +[float] +=== Specifying TCP transport profiles + +{ref}/modules-transport.html#_tcp_transport_profiles[TCP transport profiles] +enable Elasticsearch to bind on multiple hosts. {security} enables you to apply +different IP filtering on different profiles. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.allow: 172.16.0.0/24 +xpack.security.transport.filter.deny: _all +transport.profiles.client.xpack.security.filter.allow: 192.168.0.0/24 +transport.profiles.client.xpack.security.filter.deny: _all +-------------------------------------------------- + +NOTE: When you do not specify a profile, `default` is used automatically. + +[float] +=== HTTP Filtering + +You may want to have different IP filtering for the transport and HTTP protocols. + +[source,yaml] +-------------------------------------------------- +xpack.security.transport.filter.allow: localhost +xpack.security.transport.filter.deny: '*.google.com' +xpack.security.http.filter.allow: 172.16.0.0/16 +xpack.security.http.filter.deny: _all +-------------------------------------------------- + +[float] +[[dynamic-ip-filtering]] +==== Dynamically updating ip filter settings + +In case of running in an environment with highly dynamic IP addresses like cloud +based hosting, it is very hard to know the IP addresses upfront when provisioning +a machine. Instead of changing the configuration file and restarting the node, +you can use the _Cluster Update Settings API_. For example: + +[source,js] +-------------------------------------------------- +PUT /_cluster/settings +{ + "persistent" : { + "xpack.security.transport.filter.allow" : "172.16.0.0/24" + } +} +-------------------------------------------------- +// CONSOLE + +You can also dynamically disable filtering completely: + +[source,js] +-------------------------------------------------- +PUT /_cluster/settings +{ + "persistent" : { + "xpack.security.transport.filter.enabled" : false + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +NOTE: In order to avoid locking yourself out of the cluster, the default bound + transport address will never be denied. This means you can always SSH into + a system and use curl to apply changes. diff --git a/x-pack/docs/en/settings/audit-settings.asciidoc b/x-pack/docs/en/settings/audit-settings.asciidoc new file mode 100644 index 0000000000000..14e5d6fa28f46 --- /dev/null +++ b/x-pack/docs/en/settings/audit-settings.asciidoc @@ -0,0 +1,141 @@ +[role="xpack"] +[[auditing-settings]] +=== Auditing Security Settings +++++ +Auditing Settings +++++ + +All of these settings can be added to the `elasticsearch.yml` configuration +file. For more information, see +{xpack-ref}/auditing.html[Auditing Security Events]. + +[[general-audit-settings]] +==== General Auditing Settings + +`xpack.security.audit.enabled`:: +Set to `true` to enable auditing on the node. The default value is `false`. + +`xpack.security.audit.outputs`:: +Specifies where audit logs are output. For example: `[ index, logfile ]`. The +default value is `logfile`, which puts the auditing events in a dedicated +`_access.log` file on the node. You can also specify `index`, which +puts the auditing events in an {es} index that is prefixed with +`.security_audit_log`. The index can reside on the same cluster or a separate +cluster. ++ +-- +TIP: If the index is unavailable, it is possible for auditing events to +be lost. The `index` output type should therefore be used in conjunction with +the `logfile` output type and the latter should be the official record of events. + +-- + +[[event-audit-settings]] +==== Audited Event Settings + +The events and some other information about what gets logged can be +controlled by using the following settings: + +`xpack.security.audit.logfile.events.include`:: +Specifies which events to include in the auditing output. The default value is: +`access_denied, access_granted, anonymous_access_denied, authentication_failed, connection_denied, tampered_request, run_as_denied, run_as_granted`. + +`xpack.security.audit.logfile.events.exclude`:: +Excludes the specified events from the output. By default, no events are +excluded. + +`xpack.security.audit.logfile.events.emit_request_body`:: +Specifies whether to include the request body from REST requests on certain +event types such as `authentication_failed`. The default value is `false`. ++ +-- +IMPORTANT: No filtering is performed when auditing, so sensitive data may be +audited in plain text when including the request body in audit events. + +-- + +[[node-audit-settings]] +==== Local Node Info Settings + +`xpack.security.audit.logfile.prefix.emit_node_name`:: +Specifies whether to include the node's name in the local node info. The +default value is `true`. + +`xpack.security.audit.logfile.prefix.emit_node_host_address`:: +Specifies whether to include the node's IP address in the local node info. The +default value is `false`. + +`xpack.security.audit.logfile.prefix.emit_node_host_name`:: +Specifies whether to include the node's host name in the local node info. The +default value is `false`. + +[[index-audit-settings]] +==== Audit Log Indexing Configuration Settings + +`xpack.security.audit.index.bulk_size`:: +Controls how many audit events are batched into a single write. The default +value is `1000`. + +`xpack.security.audit.index.flush_interval`:: +Controls how often buffered events are flushed to the index. The default value +is `1s`. + +`xpack.security.audit.index.rollover`:: +Controls how often to roll over to a new index: `hourly`, `daily`, `weekly`, or +`monthly`. The default value is `daily`. + +`xpack.security.audit.index.events.include`:: +Specifies the audit events to be indexed. The default value is +`anonymous_access_denied, authentication_failed, realm_authentication_failed, access_granted, access_denied, tampered_request, connection_granted, connection_denied, run_as_granted, run_as_denied`. +See {xpack-ref}/auditing.html#audit-event-types[Audit Entry Types] for the +complete list. + +`xpack.security.audit.index.events.exclude`:: +Excludes the specified auditing events from indexing. By default, no events are +excluded. + +`xpack.security.audit.index.events.emit_request_body`:: +Specifies whether to include the request body from REST requests on certain +event types such as `authentication_failed`. The default value is `false`. + +`xpack.security.audit.index.settings`:: +Specifies settings for the indices that the events are stored in. For example, +the following configuration sets the number of shards and replicas to 1 for the +audit indices: ++ +-- +[source,yaml] +---------------------------- +xpack.security.audit.index.settings: + index: + number_of_shards: 1 + number_of_replicas: 1 +---------------------------- +-- + +[[remote-audit-settings]] +==== Remote Audit Log Indexing Configuration Settings + +To index audit events to a remote {es} cluster, you configure the following +`xpack.security.audit.index.client` settings: + +`xpack.security.audit.index.client.hosts`:: +Specifies a comma-separated list of `host:port` pairs. These hosts should be +nodes in the remote cluster. + +`xpack.security.audit.index.client.cluster.name`:: +Specifies the name of the remote cluster. + +`xpack.security.audit.index.client.xpack.security.user`:: +Specifies the `username:password` pair that is used to authenticate with the +remote cluster. + +You can pass additional settings to the remote client by specifying them in the +`xpack.security.audit.index.client` namespace. For example, to allow the remote +client to discover all of the nodes in the remote cluster you can specify the +`client.transport.sniff` setting: + +[source,yaml] +---------------------------- +xpack.security.audit.index.client.transport.sniff: true +---------------------------- diff --git a/x-pack/docs/en/settings/configuring-xes.asciidoc b/x-pack/docs/en/settings/configuring-xes.asciidoc new file mode 100644 index 0000000000000..29c6b95dddf0f --- /dev/null +++ b/x-pack/docs/en/settings/configuring-xes.asciidoc @@ -0,0 +1,12 @@ +[role="xpack"] +[[settings-xpack]] +== {xpack} Settings in {es} +++++ +{xpack} Settings +++++ + +include::{asciidoc-dir}/../../shared/settings.asciidoc[] +include::license-settings.asciidoc[] +include::ml-settings.asciidoc[] +include::notification-settings.asciidoc[] +include::sql-settings.asciidoc[] diff --git a/x-pack/docs/en/settings/images/monitoring-es-cgroup-true.png b/x-pack/docs/en/settings/images/monitoring-es-cgroup-true.png new file mode 100644 index 0000000000000..c8412642db555 Binary files /dev/null and b/x-pack/docs/en/settings/images/monitoring-es-cgroup-true.png differ diff --git a/x-pack/docs/en/settings/license-settings.asciidoc b/x-pack/docs/en/settings/license-settings.asciidoc new file mode 100644 index 0000000000000..791d3f61d4598 --- /dev/null +++ b/x-pack/docs/en/settings/license-settings.asciidoc @@ -0,0 +1,19 @@ +[role="xpack"] +[[license-settings]] +=== {xpack} License Settings +++++ +License Settings +++++ + +You can configure this licensing setting in the `elasticsearch.yml` file. +For more information, see +{xpack-ref}/license-management.html[{xpack} License Management]. + +`xpack.license.self_generated.type`:: +Set to `basic` (default) to enable basic {xpack} features. + ++ +-- +If set to `trial`, the self-generated license gives access only to all the features +of a x-pack for 30 days. You can later downgrade the cluster to a basic license if +needed. +-- diff --git a/x-pack/docs/en/settings/ml-settings.asciidoc b/x-pack/docs/en/settings/ml-settings.asciidoc new file mode 100644 index 0000000000000..ff5ec6f205e83 --- /dev/null +++ b/x-pack/docs/en/settings/ml-settings.asciidoc @@ -0,0 +1,64 @@ +[role="xpack"] +[[ml-settings]] +=== Machine Learning Settings in Elasticsearch +++++ +Machine Learning Settings +++++ + +You do not need to configure any settings to use {ml}. It is enabled by default. + +[float] +[[general-ml-settings]] +==== General Machine Learning Settings + +`node.ml`:: +Set to `true` (default) to identify the node as a _machine learning node_. + ++ +If set to `false` in `elasticsearch.yml`, the node cannot run jobs. If set to +`true` but `xpack.ml.enabled` is set to `false`, the `node.ml` setting is +ignored and the node cannot run jobs. If you want to run jobs, there must be at +least one machine learning node in your cluster. + ++ +IMPORTANT: On dedicated coordinating nodes or dedicated master nodes, disable +the `node.ml` role. + +`xpack.ml.enabled`:: +Set to `true` (default) to enable {ml} on the node. + ++ +If set to `false` in `elasticsearch.yml`, the {ml} APIs are disabled on the node. +Therefore the node cannot open jobs, start {dfeeds}, or receive transport (internal) +communication requests related to {ml} APIs. It also affects all {kib} instances +that connect to this {es} instance; you do not need to disable {ml} in those +`kibana.yml` files. For more information about disabling {ml} in specific {kib} +instances, see +{kibana-ref}/ml-settings-kb.html[{kib} Machine Learning Settings]. ++ +IMPORTANT: If you want to use {ml} features in your cluster, you must have +`xpack.ml.enabled` set to `true` on all master-eligible nodes. This is the +default behavior. + +`xpack.ml.max_open_jobs`:: +The maximum number of jobs that can run on a node. Defaults to `20`. +The maximum number of jobs is also constrained by memory usage, so fewer +jobs than specified by this setting will run on a node if the estimated +memory use of the jobs would be higher than allowed. + +`xpack.ml.max_machine_memory_percent`:: +The maximum percentage of the machine's memory that {ml} may use for running +analytics processes. (These processes are separate to the {es} JVM.) Defaults to +`30` percent. The limit is based on the total memory of the machine, not current +free memory. Jobs will not be allocated to a node if doing so would cause the +estimated memory use of {ml} jobs to exceed the limit. + +`xpack.ml.max_model_memory_limit`:: +The maximum `model_memory_limit` property value that can be set for any job on +this node. If you try to create a job with a `model_memory_limit` property value +that is greater than this setting value, an error occurs. Existing jobs are not +affected when you update this setting. For more information about the +`model_memory_limit` property, see <>. + +`xpack.ml.node_concurrent_job_allocations`:: +The maximum number of jobs that can concurrently be in the `opening` state on +each node. Typically, jobs spend a small amount of time in this state before +they move to `open` state. Jobs that must restore large models when they are +opening spend more time in the `opening` state. Defaults to `2`. diff --git a/x-pack/docs/en/settings/monitoring-settings.asciidoc b/x-pack/docs/en/settings/monitoring-settings.asciidoc new file mode 100644 index 0000000000000..5c812b6f53c66 --- /dev/null +++ b/x-pack/docs/en/settings/monitoring-settings.asciidoc @@ -0,0 +1,291 @@ +[role="xpack"] +[[monitoring-settings]] +=== Monitoring Settings in Elasticsearch +++++ +Monitoring Settings +++++ + +By default, monitoring is enabled but data collection is disabled. To enable +data collection, use the `xpack.monitoring.collection.enabled` setting. + +You can configure these monitoring settings in the `elasticsearch.yml` file. +Some of them can also be set across the cluster by using the +<>. + +TIP: Cluster settings take precedence over settings in the `elasticsearch.yml` +file. + +To adjust how monitoring data is displayed in the monitoring UI, configure +{kibana-ref}/monitoring-settings-kb.html[`xpack.monitoring` settings] in +`kibana.yml`. To control how monitoring data is collected from +Logstash, configure +{logstash-ref}/configuring-logstash.html#monitoring-settings[`xpack.monitoring` settings] +in `logstash.yml`. + +For more information, see +{xpack-ref}/xpack-monitoring.html[Monitoring the Elastic Stack]. + +[float] +[[general-monitoring-settings]] +==== General Monitoring Settings +`xpack.monitoring.enabled`:: +Set to `true` (default) to enable {es} {monitoring} for {es} on the node. ++ +-- +NOTE: To enable data collection, you must also set `xpack.monitoring.collection.enabled` +to `true`. Its default value is `false`. +-- + +[float] +[[monitoring-collection-settings]] +==== Monitoring Collection Settings + +The `xpack.monitoring.collection` settings control how data is collected from +your Elasticsearch nodes. + +`xpack.monitoring.collection.enabled`:: + +added[6.3.0] Set to `true` to enable the collection of monitoring data. When +this setting is `false` (default), {es} monitoring data is not collected and +all monitoring data from other sources such as {kib}, Beats, and Logstash is +ignored. ++ +You can update this setting through the +<>. + +`xpack.monitoring.collection.interval`:: + +Setting to `-1` to disable data collection is no longer supported beginning with +7.0.0. deprecated[6.3.0, Use `xpack.monitoring.collection.enabled` set to +`false` instead.] ++ +Controls how often data samples are collected. Defaults to `10s`. If you +modify the collection interval, set the `xpack.monitoring.min_interval_seconds` +option in `kibana.yml` to the same value. ++ +You can update this setting through the +<>. + +`xpack.monitoring.collection.cluster.stats.timeout`:: + +Sets the timeout for collecting the cluster statistics. Defaults to `10s`. + +`xpack.monitoring.collection.indices`:: + +Controls which indices Monitoring collects data from. Defaults to all indices. Specify the index names +as a comma-separated list, for example `test1,test2,test3`. Names can include wildcards, for +example `test*`. You can explicitly include or exclude indices by prepending +`+` to include the index, or `-` to exclude the index. For example, to include all indices that +start with `test` except `test3`, you could specify `+test*,-test3`. ++ +You can update this setting through the +<>. + +`xpack.monitoring.collection.index.stats.timeout`:: + +Sets the timeout for collecting index statistics. Defaults to `10s`. + +`xpack.monitoring.collection.indices.stats.timeout`:: + +Sets the timeout for collecting total indices statistics. Defaults to `10s`. + +`xpack.monitoring.collection.index.recovery.active_only`:: + +Controls whether or not all recoveries are collected. Set to `true` to +collect only active recoveries. Defaults to `false`. + +`xpack.monitoring.collection.index.recovery.timeout`:: + +Sets the timeout for collecting the recovery information. Defaults to `10s`. + +`xpack.monitoring.history.duration`:: + +Sets the retention duration beyond which the indices created by a Monitoring +exporter are automatically deleted. Defaults to `7d` (7 days). ++ +-- +This setting has a minimum value of `1d` (1 day) to ensure that something is +being monitored, and it cannot be disabled. + +IMPORTANT: This setting currently only impacts `local`-type exporters. Indices created using +the `http` exporter will not be deleted automatically. + +If both {monitoring} and {watcher} are enabled, you can use this setting to +affect the {watcher} cleaner service too. For more information, see the +`xpack.watcher.history.cleaner_service.enabled` setting in the +<>. +-- + +`xpack.monitoring.exporters`:: + +Configures where the agent stores monitoring data. By default, the agent uses a +local exporter that indexes monitoring data on the cluster where it is installed. +Use an HTTP exporter to send data to a separate monitoring cluster. For more +information, see <>, +<>, and +{xpack-ref}/how-monitoring-works.html[How Monitoring Works]. + +[float] +[[local-exporter-settings]] +==== Local Exporter Settings + +The `local` exporter is the default exporter used by Monitoring. As the name is +meant to imply, it exports data to the _local_ cluster, which means that there +is not much needed to be configured. + +If you do not supply _any_ exporters, then Monitoring will automatically create +one for you. If any exporter is provided, then no default is added. + +[source,yaml] +---------------------------------- +xpack.monitoring.exporters.my_local: + type: local +---------------------------------- + +`type`:: + +The value for a Local exporter must always be `local` and it is required. + +`use_ingest`:: + +Whether to supply a placeholder pipeline to the cluster and a pipeline processor with +every bulk request. The default value is `true`. If disabled, then it means that it will not +use pipelines, which means that a future release cannot automatically upgrade bulk requests +to future-proof them. + +`cluster_alerts.management.enabled`:: + +Whether to create cluster alerts for this cluster. The default value is `true`. +To use this feature, {watcher} must be enabled. If you have a basic license, +cluster alerts are not displayed. + +[float] +[[http-exporter-settings]] +==== HTTP Exporter Settings + +The following lists settings that can be supplied with the `http` exporter. +All settings are shown as what follows the name you select for your exporter: + +[source,yaml] +---------------------------------- +xpack.monitoring.exporters.my_remote: + type: http + host: ["host:port", ...] +---------------------------------- + +`type`:: + +The value for an HTTP exporter must always be `http` and it is required. + +`host`:: + +Host supports multiple formats, both as an array or as a single value. Supported formats include +`hostname`, `hostname:port`, `http://hostname` `http://hostname:port`, `https://hostname`, and +`https://hostname:port`. Hosts cannot be assumed. The default scheme is always `http` and the default +port is always `9200` if not supplied as part of the `host` string. ++ +[source,yaml] +---------------------------------- +xpack.monitoring.exporters: + example1: + type: http + host: "10.1.2.3" + example2: + type: http + host: ["http://10.1.2.4"] + example3: + type: http + host: ["10.1.2.5", "10.1.2.6"] + example4: + type: http + host: ["https://10.1.2.3:9200"] +---------------------------------- + +`auth.username`:: + +The username is required if a `auth.password` is supplied. + +`auth.password`:: + +The password for the `auth.username`. + +`connection.timeout`:: + +The amount of time that the HTTP connection is supposed to wait for a socket to open for the +request. The default value is `6s`. + +`connection.read_timeout`:: + +The amount of time that the HTTP connection is supposed to wait for a socket to +send back a response. The default value is `10 * connection.timeout` (`60s` if neither are set). + +`ssl`:: + +Each HTTP exporter can define its own TLS / SSL settings or inherit them. See the +<>. + +`proxy.base_path`:: + +The base path to prefix any outgoing request, such as `/base/path` (e.g., bulk requests would +then be sent as `/base/path/_bulk`). There is no default value. + +`headers`:: + +Optional headers that are added to every request, which can assist with routing requests through +proxies. ++ +[source,yaml] +---------------------------------- +xpack.monitoring.exporters.my_remote: + headers: + X-My-Array: [abc, def, xyz] + X-My-Header: abc123 +---------------------------------- ++ +Array-based headers are sent `n` times where `n` is the size of the array. `Content-Type` +and `Content-Length` cannot be set. Any headers created by the Monitoring agent will override +anything defined here. + +`index.name.time_format`:: + +A mechanism for changing the default date suffix for the, by default, daily Monitoring indices. +The default value is `YYYY.MM.DD`, which is why the indices are created daily. + +`use_ingest`:: + +Whether to supply a placeholder pipeline to the monitoring cluster and a pipeline processor with +every bulk request. The default value is `true`. If disabled, then it means that it will not +use pipelines, which means that a future release cannot automatically upgrade bulk requests +to future-proof them. + +`cluster_alerts.management.enabled`:: + +Whether to create cluster alerts for this cluster. The default value is `true`. +To use this feature, {watcher} must be enabled. If you have a basic license, +cluster alerts are not displayed. + +`cluster_alerts.management.blacklist`:: + +Prevents the creation of specific cluster alerts. It also removes any applicable +watches that already exist in the current cluster. + ++ +-- +You can add any of the following watch identifiers to the blacklist: + +* `elasticsearch_cluster_status` +* `elasticsearch_version_mismatch` +* `elasticsearch_nodes` +* `kibana_version_mismatch` +* `logstash_version_mismatch` +* `xpack_license_expiration` + +For example: `["elasticsearch_version_mismatch","xpack_license_expiration"]`. +-- + +[[ssl-monitoring-settings]] +:ssl-prefix: xpack.monitoring.exporters.$NAME +:component: {monitoring} +:verifies: +:server!: + +include::ssl-settings.asciidoc[] diff --git a/x-pack/docs/en/settings/notification-settings.asciidoc b/x-pack/docs/en/settings/notification-settings.asciidoc new file mode 100644 index 0000000000000..7a3d832ed3451 --- /dev/null +++ b/x-pack/docs/en/settings/notification-settings.asciidoc @@ -0,0 +1,357 @@ +[role="xpack"] +[[notification-settings]] +=== {watcher} Settings in Elasticsearch +++++ +{watcher} Settings +++++ + +You configure {watcher} settings to set up {watcher} and send notifications via +<>, +<>, +<>, and +<>. + +All of these settings can be added to the `elasticsearch.yml` configuration file, +with the exception of the secure settings, which you add to the {es} keystore. +For more information about creating and updating the {es} keystore, see +<>. + +[float] +[[general-notification-settings]] +==== General Watcher Settings +`xpack.watcher.enabled`:: +Set to `false` to disable {watcher} on the node. + +`xpack.watcher.encrypt_sensitive_data`:: +Set to `true` to encrypt sensitive data. If this setting is enabled, you +must also specify the `xpack.watcher.encryption_key` setting. For more +information, see +{xpack-ref}/encrypting-data.html[Encrypting sensitive data in {watcher}]. + +`xpack.watcher.encryption_key` (<>):: +Specifies the path to a file that contains a key for encrypting sensitive data. +If `xpack.watcher.encrypt_sensitive_data` is set to `true`, this setting is +required. For more information, see +{xpack-ref}/encrypting-data.html[Encrypting sensitive data in {watcher}]. + +`xpack.watcher.history.cleaner_service.enabled`:: +added[6.3.0,Default changed to `true`.] ++ +Set to `true` (default) to enable the cleaner service. If this setting is +`true`, the `xpack.monitoring.enabled` setting must also be set to `true` with +a local exporter enabled. The cleaner service removes previous versions of +{watcher} indices (for example, `.watcher-history*`) when it determines that +they are old. The duration of {watcher} indices is determined by the +`xpack.monitoring.history.duration` setting, which defaults to 7 days. For +more information about that setting, see <>. + +`xpack.http.proxy.host`:: +Specifies the address of the proxy server to use to connect to HTTP services. + +`xpack.http.proxy.port`:: +Specifies the port number to use to connect to the proxy server. + +`xpack.http.default_connection_timeout`:: +The maximum period to wait until abortion of the request, when a +connection is being initiated. + +`xpack.http.default_read_timeout`:: +The maximum period of inactivity between two data packets, before the +request is aborted. + +`xpack.http.max_response_size`:: +Specifies the maximum size a HTTP response is allowed to have, defaults to +`10mb`, the maximum configurable value is `50mb`. + +[[ssl-notification-settings]] +:ssl-prefix: xpack.http +:component: {watcher} +:verifies: +:server!: + +include::ssl-settings.asciidoc[] + +[float] +[[email-notification-settings]] +==== Email Notification Settings +You can configure the following email notification settings in +`elasticsearch.yml`. For more information about sending notifications +via email, see {xpack-ref}/actions-email.html#configuring-email-actions[Configuring Email]. + +`xpack.notification.email.account`:: +Specifies account information for sending notifications via email. You +can specify the following email account attributes: + +[[email-account-attributes]] + `profile`;; + The {xpack-ref}/actions-email.html#configuring-email[email profile] to use to build the MIME + messages that are sent from the account. Valid values: `standard`, `gmail` and + `outlook`. Defaults to `standard`. + + `email_defaults.*`;; + An optional set of email attributes to use as defaults + for the emails sent from the account. See {xpack-ref}/actions-email.html#email-action-attributes[ + Email Action Attributes] for the supported attributes. + + `smtp.auth`;; + Set to `true` to attempt to authenticate the user using the + AUTH command. Defaults to `false`. + + `smtp.host`;; + The SMTP server to connect to. Required. + + `smtp.port`;; + The SMTP server port to connect to. Defaults to 25. + + `smtp.user`;; + The user name for SMTP. Required. + + `smtp.password`;; + The password for the specified SMTP user. + + `smtp.starttls.enable`;; + Set to `true` to enable the use of the `STARTTLS` + command (if supported by the server) to switch the connection to a + TLS-protected connection before issuing any login commands. Note that + an appropriate trust store must configured so that the client will + trust the server's certificate. Defaults to `false`. + + `smtp.starttls.required`;; + If `true`, then `STARTTLS` will be required. If that command fails, the + connection will fail. Defaults to `false`. + + `smtp.timeout`;; + The socket read timeout. Default is two minutes. + + `smtp.connection_timeout`;; + The socket connection timeout. Default is two minutes. + + `smtp.write_timeout`;; + The socket write timeout. Default is two minutes. + + `smtp.local_address`;; + A configurable local address when sending emails. Not configured by default. + + `smtp.local_port`;; + A configurable local port when sending emails. Not configured by default. + + `smtp.send_partial`;; + Send an email, despite one of the receiver addresses being invalid. + + `smtp.wait_on_quit`;; + If set to false the QUIT command is sent and the connection closed. If set to + true, the QUIT command is sent and a reply is waited for. True by default. + +`xpack.notification.email.html.sanitization.allow`:: +Specifies the HTML elements that are allowed in email notifications. For +more information, see {xpack-ref}/actions-email.html#email-html-sanitization[Configuring HTML +Sanitization Options]. You can specify individual HTML elements +and the following HTML feature groups: + +[[html-feature-groups]] + `_tables`;; + All table related elements: ``, `` + and ` elements. +* Fixed the Watcher/Marvel examples in the documentation. + +[float] +==== 2.3.3 +May 18, 2016 + +.Enhancements +* Adds support for Elasticsearch 2.3.3 + +[float] +==== 2.3.2 +April 26, 2016 + +.Bug Fixes +* All SMTP connection timeouts are now set to two minutes by default to prevent +a watch from getting stuck. +* HTTP headers from responses that contained dots led to exceptions when the +HTTP response was stored in the watch history. All dots in any header names +are now replaced with underscores. For example, a header called `foo.bar` +becomes `foo_bar` +* Hipchat action: Fall back to the default Hipchat color and format if they +are not specified at the account level or within the action itself, instead +of failing. + +[float] +==== 2.3.1 +April 4, 2016 + +.Enhancements +* Adds support for Elasticsearch 2.3.1 + +[float] +==== 2.3.0 +March 30, 2016 + +.Bug fixes +* The http client does not do any URL escaping by itself anymore, preventing + potential wrong double escapes. + +.Enhancement +* Support `url` in http requests as a shortcut for `path`, `scheme`, `port`, `params` +* Support `ignore_condition` and `record_execution` as parameters in the + {ref}/watcher-api-execute-watch.html[Execute Watch API] + +.New Features +* Added <> +* Added support for adding <> + via HTTP requests and superceding and deprecating the usage of `attach_data` + in order to use this feature + +[float] +==== 2.2.1 +March 10, 2016 + +.Bug Fixes +* The `croneval` CLI tool sets the correct environment to run + +[float] +==== 2.2.0 +February 2, 2016 + +.Enhancements +* Adds support for Elasticsearch 2.2.0. + +[float] +==== 2.1.2 +February 2, 2016 + +.Enhancements +* Adds support for Elasticssearch 2.1.2 + +[float] +==== 2.1.1 +December 17, 2015 + +.Bug Fixes +* Fixed an issue that prevented sending of emails + +[float] +==== 2.1.0 +November 24, 2015 + +.New Features +* Adds support for <> + +.Enhancements +* Adds support for Elasticsearch 2.1.0. +* Adds support for configuring a proxy in the webhook action, http input and + configuring a default proxy (which is also used by the slack action), using the + `watcher.http.proxy.host` and `watcher.http.proxy.port` settings. + +.Bug Fixes +* Fixed an issue where the scheduler may get stuck during Watcher startup. This + caused no watches to ever fire. +* Fixed an issue where under specific conditions Watcher would not start if there + are not finished watch executions from the previous time that watcher was + running and those watch execution are unable the execute during the current + start process. + +[float] +==== 2.0.1 +November 24, 2015 + +.Enhancement +* Adds support for Elasticsearch 2.0.1. + +.Bug fixes +* Fixed an issue where under specific conditions Watcher would not start if + there are not finished watch executions from the previous time that watcher + was running and those watch execution are unable the execute during the current + start process. + +[float] +==== 2.0.0 +October 28, 2015 + +.Breaking Changes +* The dynamic index names support has been removed and Elasticsearch's date math + index names support should be used instead. The only difference between Watcher's + dynamic index names support and Elasticsearch's date math index names support is + how timezones are expressed. In Watcher this is done via node settings, in + Elasticsearch the timezone is part of the date math index names support. Only + if you're using dynamic index names with timezones in Watcher then you need to + upgrade your watches after the upgrade, otherwise your watches will work as + they did before the upgrade. For example if `watcher.dynamic_indices.time_zone` + setting was set to `+01:00` and a watch has the following index name + `` then after the upgrade you need to update this watch to + use the following index name ``. + +.New Features +* Added new <> +* Added new <> +* Watches now have an <>. In addition, a new + API was added to {ref}/watcher-api-activate-watch.html[activate] + /{ref}watcher-api-deactivate-watch.html[deactivate] registered watches. +* Added new <>, that can compare an array + of values in the <> + to a given value. + +.Enhancements +* Watcher continuously checks if the index templates for `.watches`, + `.triggered_watches` and `.watch_history-*` exist. Whereas before the existence + of these index templates was only checked at Watcher startup time. The absence + of these index templates leads to watcher data being indexed incorrectly, which + then can cause Watcher to behave incorrectly. +* If Watcher was stopped via the stop Watcher api and after that a master + election took place then Watcher would then unexpectedly start. +* During Watcher start up only wait for the shards of the `.watches` and + `.triggered_watches` indices to be available. Before Watcher also waited for + the shards of the `.watch_history-*` indices, which wasn't needed. This + improved time it takes for Watcher to startup. +* If `action.auto_create_index` setting has been configured then Watcher will + check if the setting is too restrictive. If the `action.auto_create_index` is + too restrictive then Watcher will fail during startup with a descriptive error + message. + +.Bug Fixes +* If Watcher was installed with Security then the Watcher index templates couldn't + be stored and could lead to Watcher behaving incorrectly. This was caused by + Watcher not detecting correctly if Security was installed. +* Update `croneval` command line utility to properly handle whitespaces in the + elasticsearch home path. +* Fixed an issue where the scheduler may get stuck during Watcher startup. This + caused no watches to ever fire. +* Fixed url encoding issue in http input and webhook output. The url params were + url encoded twice. + +[float] +==== 1.0.1 +July 29, 2015 + +.Enhancements +* Dynamic index names now support specifying a time zone to be used when + computing the names of the indices. The default is UTC. Previously, the + computation was fixed to always use UTC when computing the names of the + indices. + +.Bug Fixes +* Fixed a compatibility issue with Elasticsearch 1.6.1 and 1.7.2, which were + released earlier today. + +[float] +==== 1.0.0 +June 25, 2015 + +.Enhancements +* Added execution time aware dynamic index names support to `index` + action, `search` input, and `search` transform. +* You must now explicitly specify the unit when configuring any time value. + (Numeric-only values are no longer supported.) +* Cleaned up the {ref}/watcher-api-get-watch.html[Get Watch API] response. +* Cleaned up the <> response. + + +[float] +==== 1.0.0-rc1 +June 19, 2015 + +.New Features +* Added <> support to the Execute API + +.Enhancements +* Added execution context <> support. +* Email html body sanitization is now <>. +* It is now possible to configure timeouts for http requests in + <> and <>. + +[float] +==== 1.0.0-Beta2 +June 10, 2015 + +.New Features +* <> are now applied at the action + level rather than the watch level. +* Added support for <> + indexing to the index action. +* Added a queued watches metric that's accessible via the <>. +* Added a currently-executing watches metric that's accessible via the + <>. + +.Enhancements +* The <> result now includes the value of + each field that was referenced in the comparison. +* The <> now supports a default trigger + event (**breaking change**). +* The `watch_record` document structure in the `.watch_history-*` indices has + changed significantly (**breaking change**). +* A new internal index was introduced - `.triggered_watches` +* Added support for headers in the <> result + and the <> result. +* Add plain text response body support for the <>. + +.Bug Fixes +* Disallow negative time value settings for <> +* Added support for separate keystore and truststore in <> + and <>. diff --git a/x-pack/docs/en/watcher/transform.asciidoc b/x-pack/docs/en/watcher/transform.asciidoc new file mode 100644 index 0000000000000..1b99d595b9c8f --- /dev/null +++ b/x-pack/docs/en/watcher/transform.asciidoc @@ -0,0 +1,62 @@ +[[transform]] +== Transforms + +A _Transform_ processes and changes the payload in the watch execution context +to prepare it for the watch actions. {watcher} supports three types of +transforms: <>, +<> and <>. + + +NOTE: Transforms are optional. When none are defined, the actions have access to + the payload as loaded by the watch input. + +You can define transforms in two places: + +* As a top level construct in the watch definition. In this case, the payload is + transformed before any of the watch actions are executed. + +* As part of the definition of an action. In this case, the payload is + transformed before that action is executed. The transformation is only applied + to the payload for that specific action. + +If all actions require the same view of the payload, define a transform as part +of the watch definition. If each action requires a different view of the payload, +define different transforms as part of the action definitions so each action has +the payload prepared by its own dedicated transform. + +The following example defines two transforms, one at the watch level and one as +part of the definition of the `my_webhook` action. + +[source,js] +-------------------------------------------------- +{ + "trigger" : { ...} + "input" : { ... }, + "condition" : { ... }, + "transform" : { <1> + "search" : { + "body" : { "query" : { "match_all" : {} } } + } + }, + "actions" : { + "my_webhook": { + "transform" : { <2> + "script" : "return ctx.payload.hits" + }, + "webhook" : { + "host" : "host.domain", + "port" : 8089, + "path" : "/notify/{{ctx.watch_id}}" + } + } + ] +} +-------------------------------------------------- +<1> A watch level `transform` +<2> An action level `transform` + +include::transform/search.asciidoc[] + +include::transform/script.asciidoc[] + +include::transform/chain.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/watcher/transform/chain.asciidoc b/x-pack/docs/en/watcher/transform/chain.asciidoc new file mode 100644 index 0000000000000..f17b05c71b4cc --- /dev/null +++ b/x-pack/docs/en/watcher/transform/chain.asciidoc @@ -0,0 +1,45 @@ +[[transform-chain]] +=== Chain Transform + +A <> that executes an ordered list of configured transforms +in a chain, where the output of one transform serves as the input of the next +transform in the chain. The payload that is accepted by this transform serves as +the input of the first transform in the chain and the output of the last transform +in the chain is the output of the `chain` transform as a whole. + +You can use chain transforms to build more complex transforms out of the other +available transforms. For example, you can combine a <> +transform and a <> transform, as shown in the +following snippet: + +[source,js] +-------------------------------------------------- +"transform" : { + "chain" : [ <1> + { + "search" : { <2> + "indices" : [ "logstash-*" ], + "body" : { + "size" : 0, + "query" : { + "match" : { "priority" : "error" } + } + } + } + }, + { + "script" : "return [ error_count : ctx.payload.hits.total ]" <3> + } + ] +} +-------------------------------------------------- +<1> The `chain` transform definition +<2> The first transform in the chain (in this case, a `search` transform) +<3> The second and final transform in the chain (in this case, a `script` + transform) + +This example executes a `count` search on the cluster to look for `error` events. +The search results are then passed to the second `script` transform. The `script` +transform extracts the total hit count and assigns it to the `error_count` field +in a newly-generated payload. This new payload is the output of the `chain` +transform and replaces the payload in the watch execution context. diff --git a/x-pack/docs/en/watcher/transform/script.asciidoc b/x-pack/docs/en/watcher/transform/script.asciidoc new file mode 100644 index 0000000000000..0a3bd401dc744 --- /dev/null +++ b/x-pack/docs/en/watcher/transform/script.asciidoc @@ -0,0 +1,63 @@ +[[transform-script]] +=== Script Transform + +A <> that executes a script on the current payload in the +watch execution context and replaces it with a newly generated one. The following +snippet shows how a simple script transform can be defined on the watch level: + +TIP: The `script` transform is often useful when used in combination with the + <> transform, where the script can extract only + the significant data from a search result, and by that, keep the payload + minimal. This can be achieved with the <> + transform. + + +[source,js] +-------------------------------------------------- +{ + "transform" : { + "script" : "return [ 'time' : ctx.trigger.scheduled_time ]" <1> + } +} +-------------------------------------------------- +<1> A simple `painless` script that creates a new payload with a single `time` + field holding the scheduled time. + +NOTE: The executed script may either return a valid model that is the equivalent + of a Java(TM) Map or a JSON object (you will need to consult the + documentation of the specific scripting language to find out what this + construct is). Any other value that is returned will be assigned and + accessible to/via the `_value` variable. + +The `script` attribute may hold a string value in which case it will be treated +as an inline script and the default elasticsearch script languages will be assumed +(as described in {ref}/modules-scripting.html#modules-scripting[here]). You can +use the other scripting languages supported by Elasticsearch. For this, you need +to set the `script` field to an object describing the script and its language. +The following table lists the possible settings that can be configured: + +[[transform-script-settings]] +.Script Transform Settings +[options="header,footer"] +|====== +| Name |Required | Default | Description + +| `inline` | yes* | - | When using an inline script, this field holds + the script itself. + +| `id` | yes* | - | When referring to a stored script, this + field holds the id of the script. + +| `lang` | no | `painless` | The script language + +| `params` | no | - | Additional parameters/variables that are + accessible by the script + +|====== + +When using the object notation of the script, one (and only one) of `inline`, +or `id` fields must be defined + +NOTE: In addition to the provided `params`, the scripts also have access to the + <>. + diff --git a/x-pack/docs/en/watcher/transform/search.asciidoc b/x-pack/docs/en/watcher/transform/search.asciidoc new file mode 100644 index 0000000000000..eaf7c80c6cbb3 --- /dev/null +++ b/x-pack/docs/en/watcher/transform/search.asciidoc @@ -0,0 +1,175 @@ +[[transform-search]] +=== Search Transform + +A <> that executes a search on the cluster and replaces +the current payload in the watch execution context with the returned search +response. The following snippet shows how a simple search transform can be +defined on the watch level: + +[source,js] +-------------------------------------------------- +{ + "transform" : { + "search" : { + "request" : { + "body" : { "query" : { "match_all" : {} }} + } + } + } +} +-------------------------------------------------- + +Like every other search based construct, one can make use of the full search +API supported by Elasticsearch. For example, the following search transform +execute a search over all events indices, matching events with `error` priority: + +[source,js] +-------------------------------------------------- +{ + "transform" : { + "search" : { + "request" : { + "indices" : [ "events-*" ], + "body" : { + "size" : 0, + "query" : { + "match" : { "priority" : "error"} + } + } + } + } + } +} +-------------------------------------------------- + +The following table lists all available settings for the search transform: + +[[transform-search-settings]] +.Search Transform Settings +[cols=",^,,", options="header"] +|====== +| Name |Required | Default | Description + +| `request.search_type` | no | query_then_fetch | The search {ref}/search-request-search-type.html[type]. + +| `request.indices` | no | all indices | One or more indices to search on. + +| `request.types` | no | all types | One or more document types to search on (may be a + comma-delimited string or an array of document types + names) + +| `request.body` | no | `match_all` query | The body of the request. The + {ref}/search-request-body.html[request body] follows + the same structure you normally send in the body of + a REST `_search` request. The body can be static text + or include `mustache` <>. + +| `request.indices_options.expand_wildcards` | no | `open` | Determines how to expand indices wildcards. Can be one + of `open`, `closed`, `none` or `all` + (see {ref}/multi-index.html[multi-index support]) + +| `request.indices_options.ignore_unavailable` | no | `true` | A boolean value that determines whether the search + should leniently ignore unavailable indices + (see {ref}/multi-index.html[multi-index support]) + +| `request.indices_options.allow_no_indices` | no | `true` | A boolean value that determines whether the search + should leniently return no results when no indices + are resolved (see {ref}/multi-index.html[multi-index support]) + +| `request.template` | no | - | The body of the search template. See + <> for more information. + +| `timeout` | no | 30s | The timeout for waiting for the search api call to + return. If no response is returned within this time, + the search transform times out and fails. This setting + overrides the default timeouts. +|====== + +[[transform-search-template]] +==== Template Support + +The search transform support mustache <>. This can either +be as part of the body definition, or alternatively, point to an existing +template (either defined in a file or {ref}/search-template.html#pre-registered-templates[registered] +as a script in Elasticsearch). + +For example, the following snippet shows a search that refers to the scheduled +time of the watch: + +[source,js] +-------------------------------------------------- +{ + "transform" : { + "search" : { + "request" : { + "indices" : [ "logstash-*" ], + "types" : [ "event" ], + "body" : { + "size" : 0, + "query" : { + "bool" : { + "must" : { + "match" : { "priority" : "error"} + }, + "filter" : [ + { + "range" : { + "@timestamp" : { + "from" : "{{ctx.trigger.scheduled_time}}||-30s", + "to" : "{{ctx.trigger.triggered_time}}" + } + } + } + ] + } + } + } + } + } + } +} +-------------------------------------------------- + +The model of the template is a union between the provided `template.params` +settings and the <>. + +The following is an example of using templates that refer to provided parameters: + +[source,js] +-------------------------------------------------- +{ + "transform" : { + "search" : { + "request" : { + "indices" : [ "logstash-*" ], + "types" : [ "event" ], + "template" : { + "source" : { + "size" : 0, + "query" : { + "bool" : { + "must" : { + "match" : { "priority" : "{{priority}}"} + }, + "filter" : [ + { + "range" : { + "@timestamp" : { + "from" : "{{ctx.trigger.scheduled_time}}||-30s", + "to" : "{{ctx.trigger.triggered_time}}" + } + } + } + ] + } + }, + "params" : { + "priority" : "error" + } + } + } + } + } + } +} +-------------------------------------------------- diff --git a/x-pack/docs/en/watcher/trigger.asciidoc b/x-pack/docs/en/watcher/trigger.asciidoc new file mode 100644 index 0000000000000..ee52dbba3bd7a --- /dev/null +++ b/x-pack/docs/en/watcher/trigger.asciidoc @@ -0,0 +1,12 @@ +[[trigger]] +== Triggers + +Every watch must have a `trigger` that defines when the watch execution process +should start. When you create a watch, its trigger is registered with the +appropriate _Trigger Engine_. The trigger engine is responsible for evaluating +the trigger and triggering the watch when needed. + +{watcher} is designed to support different types of triggers, but only time-based +<> triggers are currently available. + +include::trigger/schedule.asciidoc[] diff --git a/x-pack/docs/en/watcher/trigger/schedule.asciidoc b/x-pack/docs/en/watcher/trigger/schedule.asciidoc new file mode 100644 index 0000000000000..7cd38c5fc9ba0 --- /dev/null +++ b/x-pack/docs/en/watcher/trigger/schedule.asciidoc @@ -0,0 +1,41 @@ +[[trigger-schedule]] +=== Schedule Trigger + +Schedule <> define when the watch execution should start based +on date and time. All times are specified in UTC time. + +{watcher} uses the system clock to determine the current time. To ensure schedules +are triggered when expected, you should synchronize the clocks of all nodes in the +cluster using a time service such as http://www.ntp.org/[NTP]. + +Keep in mind that the throttle period can affect when a watch is actually executed. +The default throttle period is five seconds (5000 ms). If you configure a schedule +that's more frequent than the throttle period, the throttle period overrides the +schedule. For example, if you set the throttle period to one minute (60000 ms) +and set the schedule to every 10 seconds, the watch is executed no more than +once per minute. For more information about throttling, see +<>. + +{watcher} provides several types of schedule triggers: + +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +include::schedule/hourly.asciidoc[] + +include::schedule/daily.asciidoc[] + +include::schedule/weekly.asciidoc[] + +include::schedule/monthly.asciidoc[] + +include::schedule/yearly.asciidoc[] + +include::schedule/cron.asciidoc[] + +include::schedule/interval.asciidoc[] diff --git a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc new file mode 100644 index 0000000000000..c24668a688dca --- /dev/null +++ b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc @@ -0,0 +1,234 @@ +[[schedule-cron]] +==== `cron` Schedule + +A <> trigger that enables you to use a +https://en.wikipedia.org/wiki/Cron[cron] style expression to specify when you +want the scheduler to start the watch execution. {watcher} uses the cron parser +from the http://www.quartz-scheduler.org[Quartz Job Scheduler]. For more +information about writing Quartz cron expressions, see the +http://www.quartz-scheduler.org/documentation/quartz-2.2.x/tutorials/tutorial-lesson-06.html[Quartz CronTrigger Tutorial]. + +WARNING: While `cron` triggers are super powerful, we recommend using one of + the other schedule types if you can, as they are much more + straightforward to configure. If you use `cron`, construct your `cron` + expressions with care to be sure you are actually setting the schedule + you want. You can use the <> tool to validate + your cron expressions and see what the resulting trigger times will be. + +===== Cron Expressions + +A cron expression is a string of the following form: + +[source,txt] +------------------------------ + [year] +------------------------------ + +All elements are required except for `year`. <> shows +the valid values for each element in a cron expression. + +[[schedule-cron-elements]] +.Cron Expression Elements +[cols=",^,,", options="header"] +|====== +| Name | Required | Valid Values | Valid Special Characters +| `seconds` | yes | `0`-`59` | `,` `-` `*` `/` +| `minutes` | yes | `0`-`59` | `,` `-` `*` `/` +| `hours` | yes | `0`-`23` | `,` `-` `*` `/` +| `day_of_month` | yes | `1`-`31` | `,` `-` `*` `/` `?` `L` `W` +| `month` | yes | `1`-`12` or `JAN`-`DEC` | `,` `-` `*` `/` +| `day_of_week` | yes | `1`-`7` or `SUN`-`SAT` | `,` `-` `*` `/` `?` `L` `#` +| `year` | no | empty or `1970`-`2099 | `,` `-` `*` `/` +|====== + +The special characters you can use in a cron expression are described in +<>. The names of months and days of the week +are not case sensitive. For example, `MON` and `mon` are equivalent. + +NOTE: Currently, you must specify `?` for either the `day_of_week` or + `day_of_month`. Explicitly specifying both values is not supported. + +[[schedule-cron-special-characters]] +.Cron Special Characters +[options="header"] +|====== +| Special Character | Description + +| * | All values. Selects every possible value for a field. For + example, `*` in the `hours` field means "every hour". + +| ? | No specific value. Use when you don't care what the value + is. For example, if you want the schedule to trigger on a + particular day of the month, but don't care what day of + the week that happens to be, you can specify `?` in the + `day_of_week` field. + +| - | A range of values (inclusive). Use to separate a minimum + and maximum value. For example, if you want the schedule + to trigger every hour between 9:00 AM and 5:00 PM, you + could specify `9-17` in the `hours` field. + +| , | Multiple values. Use to separate multiple values for a + field. For example, if you want the schedule to trigger + every Tuesday and Thursday, you could specify `TUE,THU` + in the `day_of_week` field. + +| / | Increment. Use to separate values when specifying a time + increment. The first value represents the starting point, + and the second value represents the interval. For example, + if you want the schedule to trigger every 20 minutes + starting at the top of the hour, you could specify `0/20` + in the `minutes` field. Similarly, specifying `1/5` in + `day_of_month` field will trigger every 5 days starting on + the first day of the month. + +| L | Last. Use in the `day_of_month` field to mean the last day + of the month--day 31 for January, day 28 for February in + non-leap years, day 30 for April, and so on. Use alone in + the `day_of_week` field in place of `7` or `SAT`, or after + a particular day of the week to select the last day of that + type in the month. For example `6L` means the last Friday + of the month. You can specify `LW` in the `day_of_month` + field to specify the last weekday of the month. Avoid using + the `L` option when specifying lists or ranges of values, + as the results likely won't be what you expect. + +| W | Weekday. Use to specify the weekday (Monday-Friday) nearest + the given day. As an example, if you specify `15W` in the + `day_of_month` field and the 15th is a Saturday, the + schedule will trigger on the 14th. If the 15th is a Sunday, + the schedule will trigger on Monday the 16th. If the 15th + is a Tuesday, the schedule will trigger on Tuesday the 15th. + However if you specify `1W` as the value for `day_of_month`, + and the 1st is a Saturday, the schedule will trigger on + Monday the 3rd--it won't jump over the month boundary. You + can specify `LW` in the `day_of_month` field to specify the + last weekday of the month. You can only use the `W` option + when the `day_of_month` is a single day--it is not valid + when specifying a range or list of days. + +| # | Nth XXX day in a month. Use in the `day_of_week` field to + specify the nth XXX day of the month. For example, if you + specify `6#1`, the schedule will trigger on the first + Friday of the month. Note that if you specify `3#5` and + there are not 5 Tuesdays in a particular month, the + schedule won't trigger that month. + +|====== + +.Setting Daily Triggers +[options="header"] +|====== +| Cron Expression | Description +| `0 5 9 * * ?` | Trigger at 9:05 AM every day. +| `0 5 9 * * ? 2015` | Trigger at 9:05 AM every day during the year 2015. +|====== + +.Restricting Triggers to a Range of Days or Times +[options="header"] +|====== +| Cron Expression | Description +| `0 5 9 ? * MON-FRI` | Trigger at 9:05 AM Monday through Friday. +| `0 0-5 9 * * ?` | Trigger every minute starting at 9:00 AM and ending + at 9:05 AM every day. +|====== + +.Setting Interval Triggers +[options="header"] +|====== +| Cron Expression | Description +| `0 0/15 9 * * ?` | Trigger every 15 minutes starting at 9:00 AM and ending + at 9:45 AM every day. +| `0 5 9 1/3 * ?` | Trigger at 9:05 AM every 3 days every month, starting + on the first day of the month. +|====== + +.Setting Schedules that Trigger on a Particular Day +[options="header"] +|====== +| Cron Expression | Description +| `0 1 4 1 4 ?` | Trigger every April 1st at 4:01 AM. +| `0 0,30 9 ? 4 WED` | Trigger at 9:00 AM and at 9:30 AM every Wednesday in + the month of April. +| `0 5 9 15 * ?` | Trigger at 9:05 AM on the 15th day of every month. +| `0 5 9 15W * ?` | Trigger at 9:05 AM on the nearest weekday to the 15th + of every month. +| `0 5 9 ? * 6#1` | Trigger at 9:05 AM on the first Friday of every month. +|====== + +.Setting Triggers Using Last +[options="header"] +|====== +| Cron Expression | Description +| `0 5 9 L * ?` | Trigger at 9:05 AM on the last day of every month. +| `0 5 9 ? * 2L` | Trigger at 9:05 AM on the last Monday of every month +| `0 5 9 LW * ?` | Trigger at 9:05 AM on the last weekday of every month. +|====== + + +===== Configuring a Cron Schedule + +To configure a `cron` schedule, you simply specify the cron expression as a +string value. For example, the following snippet configures a `cron` schedule +that triggers every day at noon: + +[source,js] +-------------------------------------------------- +{ + ... + "trigger" : { + "schedule" : { + "cron" : "0 0 12 * * ?" + } + } + ... +} +-------------------------------------------------- + +===== Configuring a Multiple Times Cron Schedule + +To configure a `cron` schedule that triggers multiple times, you can +specify an array of cron expressions. For example, the following `cron` +schedule triggers every even minute during weekdays and every uneven +minute during the weekend: + +[source,js] +-------------------------------------------------- +{ + ... + "trigger" : { + "schedule" : { + "cron" : [ + "0 0/2 * ? * MON-FRI"", + "0 1-59/2 * ? * SAT-SUN" + ] + } + } + ... +} +-------------------------------------------------- + +[[croneval]] +===== Verifying Cron Expressions + +{xpack} ships with a `elasticsearch-croneval` command line tool that you can use to verify that +your cron expressions are valid and produce the expected results. This tool is +provided in the `$ES_HOME/bin/x-pack` directory. + +To verify a cron expression, simply pass it in as a parameter to `elasticsearch-croneval`: + +[source,bash] +-------------------------------------------------- +bin/elasticsearch-croneval "0 0/1 * * * ?" +-------------------------------------------------- + +If the cron expression is valid, `elasticsearch-croneval` displays the next 10 times that the +schedule will be triggered. + +You can specify the `-c` option to control how many future trigger times are +displayed. For example, the following command displays the next 20 trigger times: + +[source,bash] +-------------------------------------------------- +bin/elasticsearch-croneval "0 0/1 * * * ?" -c 20 +-------------------------------------------------- diff --git a/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc new file mode 100644 index 0000000000000..e3165695e6aa8 --- /dev/null +++ b/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc @@ -0,0 +1,91 @@ +[[schedule-daily]] +==== Daily Schedule + +A <> that triggers at a particular time +every day. To use the `daily` schedule, you specify the time of day (or times) +when you want the scheduler to start the watch execution with the `at` attribute. + +Times are specified in the form `HH:mm` on a 24-hour clock. You can also use the +reserved values `midnight` and `noon` for `00:00` and `12:00`, and +<>. + +NOTE: If you don't specify the `at` attribute for a `daily` schedule, it defaults + to firing once daily at midnight, `00:00`. + +===== Configuring a Daily Schedule + +To configure a once a day schedule, you specify a single time with the `at` +attribute. For example, the following `daily` schedule triggers once every +day at 5:00 PM: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "daily" : { "at" : "17:00" } + } + } +} +-------------------------------------------------- + +===== Configuring a Multiple Times Daily Schedule + +To configure a `daily` schedule that triggers at multiple times during the day, +you specify an array of times. For example, the following `daily` schedule +triggers at `00:00`, `12:00`, and `17:00` every day. + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "daily" : { "at" : [ "midnight", "noon", "17:00" ] } + } + } +} +-------------------------------------------------- + +[[specifying-times-using-objects]] +===== Specifying Times Using Objects + +In addition to using the `HH:mm` string syntax to specify times, you can specify +a time as an object that has `hour` and `minute` attributes. + +For example, the following `daily` schedule triggers once every day at 5:00 PM: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "daily" : { + "at" { + "hour" : 17, + "minute" : 0 + } + } + } + } +} +-------------------------------------------------- + +To specify multiple times using the object notation, you specify multiple hours +or minutes as an array. For example, following `daily` schedule triggers at +`00:00`, `00:30`, `12:00`, `12:30`, `17:00` and `17:30` every day: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "daily" : { + "at" { + "hour" : [ 0, 12, 17 ], + "minute" : [0, 30] + } + } + } + } +} +-------------------------------------------------- diff --git a/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc new file mode 100644 index 0000000000000..48cc9dc2aa4a8 --- /dev/null +++ b/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc @@ -0,0 +1,48 @@ +[[schedule-hourly]] +==== Hourly Schedule + +A <> that triggers at a particular minute every +hour of the day. To use the `hourly` schedule, you specify the minute (or minutes) +when you want the scheduler to start the watch execution with the `minute` +attribute. + +NOTE: If you don't specify the `minute` attribute for an `hourly` schedule, it + defaults to `0` and the schedule triggers on the hour every hour--`12:00`, + `13:00`, `14:00`, and so on. + +===== Configuring a Once an Hour Schedule + +To configure a once an hour schedule, you specify a single time with the `minute` +attribute. + +For example, the following `hourly` schedule triggers at minute 30 every hour-- +`12:30`, `13:30`, `14:30`, ...: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "hourly" : { "minute" : 30 } + } + } +} +-------------------------------------------------- + +===== Configuring a Multiple Times Hourly Schedule + +To configure an `hourly` schedule that triggers at multiple times during the +hour, you specify an array of minutes. For example, the following schedule +triggers every 15 minutes every hour--`12:00`, `12:15`, `12:30`, `12:45`, +`1:00`, `1:15`, ...: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "hourly" : { "minute" : [ 0, 15, 30, 45 ] } + } + } +} +-------------------------------------------------- diff --git a/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc new file mode 100644 index 0000000000000..b65c16646e176 --- /dev/null +++ b/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc @@ -0,0 +1,36 @@ +[[schedule-interval]] +==== Interval Schedule + +A <> that triggers at a fixed time interval. The +interval can be set in seconds, minutes, hours, days, or weeks: + +* `"Xs"` - trigger every `X` seconds. For example, `"30s"` means every 30 seconds. +* `"Xm"` - trigger every `X` minutes. For example, `"5m"` means every 5 minutes. +* `"Xh"` - trigger every `X` hours. For example, `"12h"` means every 12 hours. +* `"Xd"` - trigger every `X` days. For example, `"3d"` means every 3 days. +* `"Xw"` - trigger every `X` weeks. For example, `"2w"` means every 2 weeks. + +If you don't specify a time unit, it defaults to seconds. + +NOTE: The interval value differs from the standard _time value_ used in + Elasticsearch. You cannot configure intervals in milliseconds or + nanoseconds. + +===== Configuring an Interval Schedule + +To configure an `interval` schedule, you specify a string value that represents +the interval. If you omit the unit of time (`s`,`m`, `h`, `d`, or `w`), it +defaults to seconds. + +For example, the following `interval` schedule triggers every five minutes: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "interval" : "5m" + } + } +} +-------------------------------------------------- diff --git a/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc new file mode 100644 index 0000000000000..e6bf292d91811 --- /dev/null +++ b/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc @@ -0,0 +1,70 @@ +[[schedule-monthly]] +==== Monthly Schedule + +A <> that triggers at a specific day and time +every month. To use the `monthly` schedule, you specify the day of the month and +time (or days and times) when you want the scheduler to start the watch execution +with the `on` and `at` attributes. + +You specify the day of month as a numeric value between `1` and `31` (inclusive). +Times are specified in the form `HH:mm` on a 24-hour clock. You can also use the +reserved values `midnight` and `noon` for `00:00` and `12:00`. + +===== Configuring a Monthly Schedule + +To configure a once a month schedule, you specify a single day and time with the +`on` and `at` attributes. For example, the following `monthly` schedule triggers +on the 10th of each month at noon: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "monthly" : { "on" : 10, "at" : "noon" } + } + } +} +-------------------------------------------------- + +NOTE: You can also specify the day and time with the `day` and `time` attributes, + they are interchangeable with `on` and `at`. + +===== Configuring a Multiple Times Monthly Schedule + +To configure a `monthly` schedule that triggers multiple times a month, you can +specify an array of day and time values. For example, the following `monthly` +schedule triggers at 12:00 PM on the 10th of each month and at 5:00 PM on the +20th of each month: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "monthly" : [ + { "on" : 10, "at" : "noon" }, + { "on" : 20, "at" : "17:00" } + ] + } + } +} +-------------------------------------------------- + +Alternatively, you can specify days and times in an object that has `on` and `at` +attributes that contain an array of values. For example, the following `monthly` +schedule triggers at 12:00 AM and 12:00 PM on the 10th and 20th of each month. + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "monthly" : { + "on" : [ 10, 20 ], + "at" : [ "midnight", "noon" ] + } + } + } +} +-------------------------------------------------- diff --git a/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc new file mode 100644 index 0000000000000..a5ac52d0e0d01 --- /dev/null +++ b/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc @@ -0,0 +1,75 @@ +[[schedule-weekly]] +==== Weekly Schedule + +A <> that triggers at a specific day and time +every week. To use the `weekly` schedule, you specify the day and time (or days +and times) when you want the scheduler to start the watch execution with the `on` +and `at` attributes. + +You can specify the day of the week by name, abbreviation, or number (with Sunday +being the first day of the week): + +* `sunday`, `monday`, `tuesday`, `wednesday`, `thursday`, `friday` and `saturday` +* `sun`, `mon`, `tue`, `wed`, `thu`, `fri` and `sat` +* `1`, `2`, `3`, `4`, `5`, `6` and `7` + +Times are specified in the form `HH:mm` on a 24-hour clock. You can also use the +reserved values `midnight` and `noon` for `00:00` and `12:00`. + +===== Configuring a Weekly Schedule + +To configure a once a week schedule, you specify the day with the `on` attribute +and the time with the `at` attribute. For example, the following `weekly` schedule +triggers once a week on Friday at 5:00 PM: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "weekly" : { "on" : "friday", "at" : "17:00" } + } + } +} +-------------------------------------------------- + +NOTE: You can also specify the day and time with the `day` and `time` attributes, + they are interchangeable with `on` and `at`. + +===== Configuring a Multiple Times Weekly Schedule + +To configure a `weekly` schedule that triggers multiple times a week, you can +specify an array of day and time values. For example, the following `weekly` +schedule triggers every Tuesday at 12:00 PM and every Friday at 5:00 PM: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "weekly" : [ + { "on" : "tuesday", "at" : "noon" }, + { "on" : "friday", "at" : "17:00" } + ] + } + } +} +-------------------------------------------------- + +Alternatively, you can specify days and times in an object that has `on` and +`minute` attributes that contain an array of values. For example, the following +`weekly` schedule triggers every Tuesday and Friday at 12:00 PM and 17:00 PM: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "weekly" : { + "on" : [ "tuesday", "friday" ], + "at" : [ "noon", "17:00" ] + } + } + } +} +-------------------------------------------------- diff --git a/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc new file mode 100644 index 0000000000000..9ea9e1d1b47bc --- /dev/null +++ b/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc @@ -0,0 +1,83 @@ +[[schedule-yearly]] +==== Yearly Schedule + +A <> that triggers at a specific day and time +every year. To use the `yearly` schedule, you specify the month, day, and time +(or months, days, and times) when you want the scheduler to start the watch +execution with the `in`, `on`, and `at` attributes. + +You can specify the month by name, abbreviation, or number: + +* `january`, `february`, `march`, `april`, `may`, `june`, `july`, + `august`, `september`, `october`, `november` and `december` + +* `jan`, `feb`, `mar`, `apr`, `may`, `jun`, `jul`, `aug`, + `sep`, `oct`, `nov` and `dec` + +* `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`, `9`, `10`, `11` and `12` + +You specify the day of month as a numeric value between `1` and `31` (inclusive). +The Times are specified in the form `HH:mm` on a 24-hour clock. You can also use +the reserved values `midnight` and `noon` for `00:00` and `12:00`. + +===== Configuring a Yearly Schedule + +To configure a once a year schedule, you specify the month with the `in` attribute, +the day with the `on` attribute, and the time with the `at` attribute. For +example, the following `yearly` schedule triggers once a year at noon on January +10th: + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "yearly" : { "in" : "january", "on" : 10, "at" : "noon" } + } + } +} +-------------------------------------------------- + +NOTE: You can also specify the month, day, and time with the `month`, `day`, and + `time` attributes, they are interchangeable with `in`, `on`, and `at`. + +===== Configuring a Multiple Times Yearly Schedule + +To configure a `yearly` schedule that triggers multiple times a year, you can +specify an array of month, day, and time values. For example, the following +`yearly` schedule triggers twice a year: at noon on January 10th, and at 5:00 PM +on July 20th. + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "yearly" : [ + { "in" : "january", "on" : 10, "at" : "noon" }, + { "in" : "july", "on" : 20, "at" : "17:00" } + ] + } + } +} +-------------------------------------------------- + +Alternatively, you can specify the months, days, and times in an object that has +`in`, `on`, and `minute` attributes that contain an array of values. For example, +the following `yearly` schedule triggers at 12:00 AM and 12:00 PM on January 10th, +January 20th, December 10th, and December 20th. + +[source,js] +-------------------------------------------------- +{ + "trigger" : { + "schedule" : { + "yearly" : { + "in" : [ "jan", "dec" ], + "on" : [ 10, 20 ], + "at" : [ "midnight", "noon" ] + } + } + } +} +-------------------------------------------------- diff --git a/x-pack/docs/en/watcher/troubleshooting.asciidoc b/x-pack/docs/en/watcher/troubleshooting.asciidoc new file mode 100644 index 0000000000000..8b793142ecc2b --- /dev/null +++ b/x-pack/docs/en/watcher/troubleshooting.asciidoc @@ -0,0 +1,61 @@ +[[watcher-troubleshooting]] +== {xpack} {watcher} Troubleshooting +++++ +{xpack} {watcher} +++++ + +[float] +=== Dynamic Mapping Error When Trying to Add a Watch + +If you get the _Dynamic Mapping is Disabled_ error when you try to add a watch, +verify that the index mappings for the `.watches` index are available. You can +do that by submitting the following request: + +[source,js] +-------------------------------------------------- +GET .watches/_mapping +-------------------------------------------------- +// CONSOLE +// TEST[setup:my_active_watch] + +If the index mappings are missing, follow these steps to restore the correct +mappings: + +. Stop the Elasticsearch node. +. Add `xpack.watcher.index.rest.direct_access : true` to `elasticsearch.yml`. +. Restart the Elasticsearch node. +. Delete the `.watches` index: ++ +[source,js] +-------------------------------------------------- +DELETE .watches +-------------------------------------------------- ++ +. Disable direct access to the `.watches` index: +.. Stop the Elasticsearch node. +.. Remove `xpack.watcher.index.rest.direct_access : true` from `elasticsearch.yml`. +.. Restart the Elasticsearch node. + +[float] +=== Unable to Send Email + +If you get an authentication error indicating that you need to continue the +sign-in process from a web browser when Watcher attempts to send email, you need +to configure Gmail to +https://support.google.com/accounts/answer/6010255?hl=en[Allow Less Secure Apps to access your account]. + +If you have two-step verification enabled for your email account, you must +generate and use an App Specific password to send email from {watcher}. For more +information, see: + +- Gmail: https://support.google.com/accounts/answer/185833?hl=en[Sign in using App Passwords] +- Outlook.com: http://windows.microsoft.com/en-us/windows/app-passwords-two-step-verification[App passwords and two-step verification] + +[float] +=== {watcher} Not Responsive + +Keep in mind that there's no built-in validation of scripts that you add to a +watch. Buggy or deliberately malicious scripts can negatively impact {watcher} +performance. For example, if you add multiple watches with buggy script +conditions in a short period of time, {watcher} might be temporarily unable to +process watches until the bad watches time out. diff --git a/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java b/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..269c12030cfab --- /dev/null +++ b/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import org.apache.http.HttpHost; +import org.elasticsearch.Version; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.yaml.ClientYamlDocsTestClient; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ClientYamlTestClient; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; +import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; +import org.elasticsearch.xpack.test.rest.XPackRestIT; +import org.junit.After; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.is; + +public class XDocsClientYamlTestSuiteIT extends XPackRestIT { + private static final String USER_TOKEN = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray())); + + public XDocsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected void afterIfFailed(List errors) { + super.afterIfFailed(errors); + String name = getTestName().split("=")[1]; + name = name.substring(0, name.length() - 1); + name = name.replaceAll("/([^/]+)$", ".asciidoc:$1"); + logger.error("This failing test was generated by documentation starting at {}. It may include many snippets. " + + "See Elasticsearch's docs/README.asciidoc for an explanation of test generation.", name); + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + @Override + protected ClientYamlTestClient initClientYamlTestClient(ClientYamlSuiteRestSpec restSpec, RestClient restClient, + List hosts, Version esVersion) throws IOException { + return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion); + } + + /** + * All tests run as a an administrative user but use es-shield-runas-user to become a less privileged user. + */ + @Override + protected Settings restClientSettings() { + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", USER_TOKEN) + .build(); + } + + /** + * Re-enables watcher after every test just in case any test disables it. One does. + */ + @After + public void reenableWatcher() throws Exception { + if (isWatcherTest()) { + assertBusy(() -> { + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + + switch (state) { + case "stopped": + ClientYamlTestResponse startResponse = + getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged"); + assertThat(isAcknowledged, is(true)); + break; + case "stopping": + throw new AssertionError("waiting until stopping state reached stopped state to start again"); + case "starting": + throw new AssertionError("waiting until starting state reached started state"); + case "started": + // all good here, we are done + break; + default: + throw new AssertionError("unknown state[" + state + "]"); + } + }); + } + } + + @Override + protected boolean isWatcherTest() { + String testName = getTestName(); + return testName != null && testName.contains("watcher/"); + } + + @Override + protected boolean isMonitoringTest() { + return false; + } + + @Override + protected boolean isMachineLearningTest() { + String testName = getTestName(); + return testName != null && testName.contains("ml/"); + } + + @Override + protected boolean isRollupTest() { + String testName = getTestName(); + return testName != null && testName.contains("rollup/"); + } + + /** + * Deletes users after every test just in case any test adds any. + */ + @After + public void deleteUsers() throws Exception { + ClientYamlTestResponse response = getAdminExecutionContext().callApi("xpack.security.get_user", emptyMap(), emptyList(), + emptyMap()); + @SuppressWarnings("unchecked") + Map users = (Map) response.getBody(); + for (String user: users.keySet()) { + Map metaDataMap = (Map) ((Map) users.get(user)).get("metadata"); + Boolean reserved = metaDataMap == null ? null : (Boolean) metaDataMap.get("_reserved"); + if (reserved == null || reserved == false) { + logger.warn("Deleting leftover user {}", user); + getAdminExecutionContext().callApi("xpack.security.delete_user", singletonMap("username", user), emptyList(), emptyMap()); + } + } + } + + @Override + protected boolean randomizeContentType() { + return false; + } +} diff --git a/x-pack/license-tools/bin/key-pair-generator b/x-pack/license-tools/bin/key-pair-generator new file mode 100755 index 0000000000000..721b8617c76b1 --- /dev/null +++ b/x-pack/license-tools/bin/key-pair-generator @@ -0,0 +1,38 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +CDPATH="" +SCRIPT="$0" + +# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path. +while [ -h "$SCRIPT" ] ; do + ls=`ls -ld "$SCRIPT"` + # Drop everything prior to -> + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + SCRIPT="$link" + else + SCRIPT=`dirname "$SCRIPT"`/"$link" + fi +done + +# determine license home +LICENSE_HOME=`dirname "$SCRIPT"`/.. + +# make LICENSE_HOME absolute +LICENSE_HOME=`cd "$LICENSE_HOME"; pwd` + +# setup classpath +LICENSE_CLASSPATH=$LICENSE_CLASSPATH:$LICENSE_HOME/lib/* + +if [ -x "$JAVA_HOME/bin/java" ]; then + JAVA=$JAVA_HOME/bin/java +else + JAVA=`which java` +fi + +exec "$JAVA" $JAVA_OPTS -Xmx64m -Xms16m -cp "$LICENSE_CLASSPATH" -Des.path.home="`pwd`" org.elasticsearch.license.licensor.tools.KeyPairGeneratorTool "$@" + diff --git a/x-pack/license-tools/bin/license-generator b/x-pack/license-tools/bin/license-generator new file mode 100755 index 0000000000000..1c280810f46ae --- /dev/null +++ b/x-pack/license-tools/bin/license-generator @@ -0,0 +1,37 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +CDPATH="" +SCRIPT="$0" + +# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path. +while [ -h "$SCRIPT" ] ; do + ls=`ls -ld "$SCRIPT"` + # Drop everything prior to -> + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + SCRIPT="$link" + else + SCRIPT=`dirname "$SCRIPT"`/"$link" + fi +done + +# determine license home +LICENSE_HOME=`dirname "$SCRIPT"`/.. + +# make LICENSE_HOME absolute +LICENSE_HOME=`cd "$LICENSE_HOME"; pwd` + +# setup classpath +LICENSE_CLASSPATH=$LICENSE_CLASSPATH:$LICENSE_HOME/lib/* + +if [ -x "$JAVA_HOME/bin/java" ]; then + JAVA=$JAVA_HOME/bin/java +else + JAVA=`which java` +fi + +exec "$JAVA" $JAVA_OPTS -Xmx64m -Xms16m -cp "$LICENSE_CLASSPATH" -Des.path.home="`pwd`" org.elasticsearch.license.licensor.tools.LicenseGeneratorTool "$@" diff --git a/x-pack/license-tools/bin/verify-license b/x-pack/license-tools/bin/verify-license new file mode 100755 index 0000000000000..629fd44732e86 --- /dev/null +++ b/x-pack/license-tools/bin/verify-license @@ -0,0 +1,38 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +CDPATH="" +SCRIPT="$0" + +# SCRIPT may be an arbitrarily deep series of symlinks. Loop until we have the concrete path. +while [ -h "$SCRIPT" ] ; do + ls=`ls -ld "$SCRIPT"` + # Drop everything prior to -> + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + SCRIPT="$link" + else + SCRIPT=`dirname "$SCRIPT"`/"$link" + fi +done + +# determine license home +LICENSE_HOME=`dirname "$SCRIPT"`/.. + +# make LICENSE_HOME absolute +LICENSE_HOME=`cd "$LICENSE_HOME"; pwd` + +# setup classpath +LICENSE_CLASSPATH=$LICENSE_CLASSPATH:$LICENSE_HOME/lib/* + +if [ -x "$JAVA_HOME/bin/java" ]; then + JAVA=$JAVA_HOME/bin/java +else + JAVA=`which java` +fi + +exec "$JAVA" $JAVA_OPTS -Xmx64m -Xms16m -cp "$LICENSE_CLASSPATH" -Des.path.home="`pwd`" org.elasticsearch.license.licensor.tools.LicenseVerificationTool "$@" + diff --git a/x-pack/license-tools/build.gradle b/x-pack/license-tools/build.gradle new file mode 100644 index 0000000000000..3ef08073bbf84 --- /dev/null +++ b/x-pack/license-tools/build.gradle @@ -0,0 +1,26 @@ +apply plugin: 'elasticsearch.build' + +dependencies { + compile project(xpackModule('core')) + compile "org.elasticsearch:elasticsearch:${version}" + testCompile "org.elasticsearch.test:framework:${version}" +} + +project.forbiddenPatterns { + exclude '**/*.key' +} + +dependencyLicenses.enabled = false + +task buildZip(type: Zip, dependsOn: jar) { + String parentDir = "license-tools-${version}" + into(parentDir + '/lib') { + from jar + from configurations.runtime + } + into(parentDir + '/bin') { + from 'bin' + } +} + +assemble.dependsOn buildZip diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/LicenseSigner.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/LicenseSigner.java new file mode 100644 index 0000000000000..1b28878e88875 --- /dev/null +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/LicenseSigner.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license.licensor; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.license.CryptUtils; +import org.elasticsearch.license.License; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.security.Signature; +import java.security.SignatureException; +import java.util.Base64; +import java.util.Collections; +import java.util.Map; + +/** + * Responsible for generating a license signature according to the signature spec and sign it with + * the provided encrypted private key + */ +public class LicenseSigner { + + private static final int MAGIC_LENGTH = 13; + + private final Path publicKeyPath; + + private final Path privateKeyPath; + + public LicenseSigner(final Path privateKeyPath, final Path publicKeyPath) { + this.publicKeyPath = publicKeyPath; + this.privateKeyPath = privateKeyPath; + } + + /** + * Generates a signature for the {@code licenseSpec}. Signature structure: + * + * | VERSION | MAGIC | PUB_KEY_DIGEST | SIGNED_LICENSE_CONTENT | + * + * + * @return a signed License + */ + public License sign(License licenseSpec) throws IOException { + XContentBuilder contentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final Map licenseSpecViewMode = + Collections.singletonMap(License.LICENSE_SPEC_VIEW_MODE, "true"); + licenseSpec.toXContent(contentBuilder, new ToXContent.MapParams(licenseSpecViewMode)); + final byte[] signedContent; + try { + final Signature rsa = Signature.getInstance("SHA512withRSA"); + rsa.initSign(CryptUtils.readEncryptedPrivateKey(Files.readAllBytes(privateKeyPath))); + final BytesRefIterator iterator = BytesReference.bytes(contentBuilder).iterator(); + BytesRef ref; + while((ref = iterator.next()) != null) { + rsa.update(ref.bytes, ref.offset, ref.length); + } + signedContent = rsa.sign(); + } catch (InvalidKeyException + | IOException + | NoSuchAlgorithmException + | SignatureException e) { + throw new IllegalStateException(e); + } + final byte[] magic = new byte[MAGIC_LENGTH]; + SecureRandom random = new SecureRandom(); + random.nextBytes(magic); + final byte[] hash = Base64.getEncoder().encode(Files.readAllBytes(publicKeyPath)); + assert hash != null; + byte[] bytes = new byte[4 + 4 + MAGIC_LENGTH + 4 + hash.length + 4 + signedContent.length]; + ByteBuffer byteBuffer = ByteBuffer.wrap(bytes); + byteBuffer.putInt(licenseSpec.version()) + .putInt(magic.length) + .put(magic) + .putInt(hash.length) + .put(hash) + .putInt(signedContent.length) + .put(signedContent); + + return License.builder() + .fromLicenseSpec(licenseSpec, Base64.getEncoder().encodeToString(bytes)) + .build(); + } +} diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java new file mode 100644 index 0000000000000..5a8ea91d3627b --- /dev/null +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license.licensor.tools; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.LoggingAwareCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.SecureRandom; + +import static org.elasticsearch.license.CryptUtils.writeEncryptedPrivateKey; +import static org.elasticsearch.license.CryptUtils.writeEncryptedPublicKey; + +public class KeyPairGeneratorTool extends LoggingAwareCommand { + + private final OptionSpec publicKeyPathOption; + private final OptionSpec privateKeyPathOption; + + public KeyPairGeneratorTool() { + super("Generates a key pair with RSA 2048-bit security"); + // TODO: in jopt-simple 5.0 we can use a PathConverter to take Path instead of File + this.publicKeyPathOption = parser.accepts("publicKeyPath", "public key path") + .withRequiredArg().required(); + this.privateKeyPathOption = parser.accepts("privateKeyPath", "private key path") + .withRequiredArg().required(); + } + + public static void main(String[] args) throws Exception { + exit(new KeyPairGeneratorTool().main(args, Terminal.DEFAULT)); + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("This tool generates and saves a key pair to the provided publicKeyPath"); + terminal.println("and privateKeyPath. The tool checks the existence of the provided key"); + terminal.println("paths and will not override if any existing keys are found."); + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + Path publicKeyPath = parsePath(publicKeyPathOption.value(options)); + Path privateKeyPath = parsePath(privateKeyPathOption.value(options)); + if (Files.exists(privateKeyPath)) { + throw new UserException(ExitCodes.USAGE, privateKeyPath + " already exists"); + } else if (Files.exists(publicKeyPath)) { + throw new UserException(ExitCodes.USAGE, publicKeyPath + " already exists"); + } + + SecureRandom random = new SecureRandom(); + KeyPairGenerator keyGen = KeyPairGenerator.getInstance("RSA"); + keyGen.initialize(2048, random); + KeyPair keyPair = keyGen.generateKeyPair(); + + Files.write(privateKeyPath, writeEncryptedPrivateKey(keyPair.getPrivate())); + Files.write(publicKeyPath, writeEncryptedPublicKey(keyPair.getPublic())); + + terminal.println( + Terminal.Verbosity.VERBOSE, + "generating key pair [public key: " + + publicKeyPath + + ", private key: " + + privateKeyPath + "]"); + } + + @SuppressForbidden(reason = "Parsing command line path") + private static Path parsePath(String path) { + return PathUtils.get(path); + } + +} diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseGeneratorTool.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseGeneratorTool.java new file mode 100644 index 0000000000000..14720e2d4b6ef --- /dev/null +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseGeneratorTool.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license.licensor.tools; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.LoggingAwareCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.license.License; +import org.elasticsearch.license.licensor.LicenseSigner; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; + +public class LicenseGeneratorTool extends LoggingAwareCommand { + + private final OptionSpec publicKeyPathOption; + private final OptionSpec privateKeyPathOption; + private final OptionSpec licenseOption; + private final OptionSpec licenseFileOption; + + public LicenseGeneratorTool() { + super("Generates signed elasticsearch license(s) for a given license spec(s)"); + publicKeyPathOption = parser.accepts("publicKeyPath", "path to public key file") + .withRequiredArg().required(); + privateKeyPathOption = parser.accepts("privateKeyPath", "path to private key file") + .withRequiredArg().required(); + // TODO: with jopt-simple 5.0, we can make these requiredUnless each other + // which is effectively "one must be present" + licenseOption = parser.accepts("license", "license json spec") + .withRequiredArg(); + licenseFileOption = parser.accepts("licenseFile", "license json spec file") + .withRequiredArg(); + } + + public static void main(String[] args) throws Exception { + exit(new LicenseGeneratorTool().main(args, Terminal.DEFAULT)); + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("This tool generate elasticsearch license(s) for the provided"); + terminal.println("license spec(s). The tool can take arbitrary number of"); + terminal.println("`--license` and/or `--licenseFile` to generate corresponding"); + terminal.println("signed license(s)."); + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + Path publicKeyPath = parsePath(publicKeyPathOption.value(options)); + Path privateKeyPath = parsePath(privateKeyPathOption.value(options)); + if (Files.exists(privateKeyPath) == false) { + throw new UserException(ExitCodes.USAGE, privateKeyPath + " does not exist"); + } else if (Files.exists(publicKeyPath) == false) { + throw new UserException(ExitCodes.USAGE, publicKeyPath + " does not exist"); + } + + final License licenseSpec; + if (options.has(licenseOption)) { + final BytesArray bytes = + new BytesArray(licenseOption.value(options).getBytes(StandardCharsets.UTF_8)); + licenseSpec = + License.fromSource(bytes, XContentType.JSON); + } else if (options.has(licenseFileOption)) { + Path licenseSpecPath = parsePath(licenseFileOption.value(options)); + if (Files.exists(licenseSpecPath) == false) { + throw new UserException(ExitCodes.USAGE, licenseSpecPath + " does not exist"); + } + final BytesArray bytes = new BytesArray(Files.readAllBytes(licenseSpecPath)); + licenseSpec = License.fromSource(bytes, XContentType.JSON); + } else { + throw new UserException( + ExitCodes.USAGE, + "Must specify either --license or --licenseFile"); + } + + // sign + License license = new LicenseSigner(privateKeyPath, publicKeyPath).sign(licenseSpec); + + // dump + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.startObject(); + builder.startObject("license"); + license.toInnerXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.endObject(); + builder.flush(); + terminal.println(Strings.toString(builder)); + } + + @SuppressForbidden(reason = "Parsing command line path") + private static Path parsePath(String path) { + return PathUtils.get(path); + } + +} diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java new file mode 100644 index 0000000000000..0c04e39bd91ea --- /dev/null +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license.licensor.tools; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.LoggingAwareCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseVerifier; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; + +public class LicenseVerificationTool extends LoggingAwareCommand { + + private final OptionSpec publicKeyPathOption; + private final OptionSpec licenseOption; + private final OptionSpec licenseFileOption; + + public LicenseVerificationTool() { + super("Generates signed elasticsearch license(s) for a given license spec(s)"); + publicKeyPathOption = parser.accepts("publicKeyPath", "path to public key file") + .withRequiredArg().required(); + // TODO: with jopt-simple 5.0, we can make these requiredUnless each other + // which is effectively "one must be present" + licenseOption = parser.accepts("license", "license json spec") + .withRequiredArg(); + licenseFileOption = parser.accepts("licenseFile", "license json spec file") + .withRequiredArg(); + } + + public static void main(String[] args) throws Exception { + exit(new LicenseVerificationTool().main(args, Terminal.DEFAULT)); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + Path publicKeyPath = parsePath(publicKeyPathOption.value(options)); + if (Files.exists(publicKeyPath) == false) { + throw new UserException(ExitCodes.USAGE, publicKeyPath + " does not exist"); + } + + final License licenseSpec; + if (options.has(licenseOption)) { + final BytesArray bytes = + new BytesArray(licenseOption.value(options).getBytes(StandardCharsets.UTF_8)); + licenseSpec = + License.fromSource(bytes, XContentType.JSON); + } else if (options.has(licenseFileOption)) { + Path licenseSpecPath = parsePath(licenseFileOption.value(options)); + if (Files.exists(licenseSpecPath) == false) { + throw new UserException(ExitCodes.USAGE, licenseSpecPath + " does not exist"); + } + final BytesArray bytes = new BytesArray(Files.readAllBytes(licenseSpecPath)); + licenseSpec = License.fromSource(bytes, XContentType.JSON); + } else { + throw new UserException( + ExitCodes.USAGE, + "Must specify either --license or --licenseFile"); + } + + // verify + if (!LicenseVerifier.verifyLicense(licenseSpec, Files.readAllBytes(publicKeyPath))) { + throw new UserException(ExitCodes.DATA_ERROR, "Invalid License!"); + } + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.startObject(); + builder.startObject("license"); + licenseSpec.toInnerXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.endObject(); + builder.flush(); + terminal.println(Strings.toString(builder)); + } + + @SuppressForbidden(reason = "Parsing command line path") + private static Path parsePath(String path) { + return PathUtils.get(path); + } +} diff --git a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/LicenseVerificationTests.java b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/LicenseVerificationTests.java new file mode 100644 index 0000000000000..34383739b5057 --- /dev/null +++ b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/LicenseVerificationTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license.licensor; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.license.DateUtils; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseVerifier; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Files; +import java.nio.file.Path; + +public class LicenseVerificationTests extends ESTestCase { + + protected Path pubKeyPath = null; + protected Path priKeyPath = null; + + @Before + public void setup() throws Exception { + pubKeyPath = getDataPath("/public.key"); + priKeyPath = getDataPath("/private.key"); + } + + @After + public void cleanUp() { + pubKeyPath = null; + priKeyPath = null; + } + + public void testGeneratedLicenses() throws Exception { + final TimeValue fortyEightHours = TimeValue.timeValueHours(2 * 24); + final License license = + TestUtils.generateSignedLicense(fortyEightHours, pubKeyPath, priKeyPath); + assertTrue(LicenseVerifier.verifyLicense(license, Files.readAllBytes(pubKeyPath))); + } + + public void testLicenseTampering() throws Exception { + final TimeValue twoHours = TimeValue.timeValueHours(2); + License license = TestUtils.generateSignedLicense(twoHours, pubKeyPath, priKeyPath); + + final License tamperedLicense = License.builder() + .fromLicenseSpec(license, license.signature()) + .expiryDate(license.expiryDate() + 10 * 24 * 60 * 60 * 1000L) + .validate() + .build(); + + assertFalse(LicenseVerifier.verifyLicense(tamperedLicense, Files.readAllBytes(pubKeyPath))); + } + + public void testRandomLicenseVerification() throws Exception { + TestUtils.LicenseSpec licenseSpec = TestUtils.generateRandomLicenseSpec( + randomIntBetween(License.VERSION_START, License.VERSION_CURRENT)); + License generatedLicense = generateSignedLicense(licenseSpec, pubKeyPath, priKeyPath); + assertTrue(LicenseVerifier.verifyLicense(generatedLicense, Files.readAllBytes(pubKeyPath))); + } + + private static License generateSignedLicense( + TestUtils.LicenseSpec spec, Path pubKeyPath, Path priKeyPath) throws Exception { + LicenseSigner signer = new LicenseSigner(priKeyPath, pubKeyPath); + License.Builder builder = License.builder() + .uid(spec.uid) + .feature(spec.feature) + .type(spec.type) + .subscriptionType(spec.subscriptionType) + .issuedTo(spec.issuedTo) + .issuer(spec.issuer) + .maxNodes(spec.maxNodes); + + if (spec.expiryDate != null) { + builder.expiryDate(DateUtils.endOfTheDay(spec.expiryDate)); + } else { + builder.expiryDate(spec.expiryDateInMillis); + } + if (spec.issueDate != null) { + builder.issueDate(DateUtils.beginningOfTheDay(spec.issueDate)); + } else { + builder.issueDate(spec.issueDateInMillis); + } + builder.version(spec.version); + return signer.sign(builder.build()); + } + +} diff --git a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/TestUtils.java b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/TestUtils.java new file mode 100644 index 0000000000000..31b458489d4d5 --- /dev/null +++ b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/TestUtils.java @@ -0,0 +1,251 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license.licensor; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.joda.DateMathParser; +import org.elasticsearch.common.joda.FormatDateTimeFormatter; +import org.elasticsearch.common.joda.Joda; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.license.DateUtils; +import org.elasticsearch.license.License; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.MatcherAssert; +import org.joda.time.format.DateTimeFormatter; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.UUID; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomInt; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.hamcrest.core.IsEqual.equalTo; + +public class TestUtils { + + public static final String PUBLIC_KEY_RESOURCE = "/public.key"; + public static final String PRIVATE_KEY_RESOURCE = "/private.key"; + + private static final FormatDateTimeFormatter formatDateTimeFormatter = + Joda.forPattern("yyyy-MM-dd"); + private static final DateMathParser dateMathParser = + new DateMathParser(formatDateTimeFormatter); + private static final DateTimeFormatter dateTimeFormatter = formatDateTimeFormatter.printer(); + + public static String dumpLicense(License license) throws Exception { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.startObject(); + builder.startObject("license"); + license.toInnerXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.endObject(); + return Strings.toString(builder); + } + + public static String dateMathString(String time, final long now) { + return dateTimeFormatter.print(dateMathParser.parse(time, () -> now)); + } + + public static long dateMath(String time, final long now) { + return dateMathParser.parse(time, () -> now); + } + + public static LicenseSpec generateRandomLicenseSpec(int version) { + boolean datesInMillis = randomBoolean(); + long now = System.currentTimeMillis(); + String uid = UUID.randomUUID().toString(); + String issuer = "issuer__" + randomInt(); + String issuedTo = "issuedTo__" + randomInt(); + String type = version < License.VERSION_NO_FEATURE_TYPE ? + randomFrom("subscription", "internal", "development") : + randomFrom("basic", "silver", "dev", "gold", "platinum"); + final String subscriptionType; + final String feature; + if (version < License.VERSION_NO_FEATURE_TYPE) { + subscriptionType = randomFrom("gold", "silver", "platinum"); + feature = "feature__" + randomInt(); + } else { + subscriptionType = null; + feature = null; + } + int maxNodes = randomIntBetween(5, 100); + if (datesInMillis) { + long issueDateInMillis = dateMath("now", now); + long expiryDateInMillis = dateMath("now+10d/d", now); + return new LicenseSpec( + version, + uid, + feature, + issueDateInMillis, + expiryDateInMillis, + type, + subscriptionType, + issuedTo, + issuer, + maxNodes); + } else { + String issueDate = dateMathString("now", now); + String expiryDate = dateMathString("now+10d/d", now); + return new LicenseSpec( + version, + uid, + feature, + issueDate, + expiryDate, type, + subscriptionType, + issuedTo, + issuer, + maxNodes); + } + } + + public static String generateLicenseSpecString(LicenseSpec licenseSpec) throws IOException { + XContentBuilder licenses = jsonBuilder(); + licenses.startObject(); + licenses.startObject("license") + .field("uid", licenseSpec.uid) + .field("type", licenseSpec.type) + .field("subscription_type", licenseSpec.subscriptionType) + .field("issued_to", licenseSpec.issuedTo) + .field("issuer", licenseSpec.issuer) + .field("feature", licenseSpec.feature) + .field("max_nodes", licenseSpec.maxNodes); + + if (licenseSpec.issueDate != null) { + licenses.field("issue_date", licenseSpec.issueDate); + } else { + licenses.field("issue_date_in_millis", licenseSpec.issueDateInMillis); + } + if (licenseSpec.expiryDate != null) { + licenses.field("expiry_date", licenseSpec.expiryDate); + } else { + licenses.field("expiry_date_in_millis", licenseSpec.expiryDateInMillis); + } + licenses.field("version", licenseSpec.version); + licenses.endObject(); + licenses.endObject(); + return Strings.toString(licenses); + } + + public static void assertLicenseSpec(LicenseSpec spec, License license) { + MatcherAssert.assertThat(license.uid(), equalTo(spec.uid)); + MatcherAssert.assertThat(license.issuedTo(), equalTo(spec.issuedTo)); + MatcherAssert.assertThat(license.issuer(), equalTo(spec.issuer)); + MatcherAssert.assertThat(license.type(), equalTo(spec.type)); + MatcherAssert.assertThat(license.maxNodes(), equalTo(spec.maxNodes)); + if (spec.issueDate != null) { + MatcherAssert.assertThat( + license.issueDate(), + equalTo(DateUtils.beginningOfTheDay(spec.issueDate))); + } else { + MatcherAssert.assertThat(license.issueDate(), equalTo(spec.issueDateInMillis)); + } + if (spec.expiryDate != null) { + MatcherAssert.assertThat( + license.expiryDate(), + equalTo(DateUtils.endOfTheDay(spec.expiryDate))); + } else { + MatcherAssert.assertThat(license.expiryDate(), equalTo(spec.expiryDateInMillis)); + } + } + + public static License generateSignedLicense( + TimeValue expiryDuration, Path pubKeyPath, Path priKeyPath) throws Exception { + long issue = System.currentTimeMillis(); + int version = ESTestCase.randomIntBetween(License.VERSION_START, License.VERSION_CURRENT); + String type = version < License.VERSION_NO_FEATURE_TYPE ? + randomFrom("subscription", "internal", "development") : + randomFrom("trial", "basic", "silver", "dev", "gold", "platinum"); + final License.Builder builder = License.builder() + .uid(UUID.randomUUID().toString()) + .expiryDate(issue + expiryDuration.getMillis()) + .issueDate(issue) + .version(version) + .type(type) + .issuedTo("customer") + .issuer("elasticsearch") + .maxNodes(5); + if (version == License.VERSION_START) { + builder.subscriptionType(randomFrom("dev", "gold", "platinum", "silver")); + builder.feature(ESTestCase.randomAlphaOfLength(10)); + } + LicenseSigner signer = new LicenseSigner(priKeyPath, pubKeyPath); + return signer.sign(builder.build()); + } + + public static class LicenseSpec { + public final int version; + public final String feature; + public final String issueDate; + public final long issueDateInMillis; + public final String expiryDate; + public final long expiryDateInMillis; + public final String uid; + public final String type; + public final String subscriptionType; + public final String issuedTo; + public final String issuer; + public final int maxNodes; + + public LicenseSpec( + int version, + String uid, + String feature, + long issueDateInMillis, + long expiryDateInMillis, + String type, + String subscriptionType, + String issuedTo, + String issuer, + int maxNodes) { + this.version = version; + this.feature = feature; + this.issueDateInMillis = issueDateInMillis; + this.issueDate = null; + this.expiryDateInMillis = expiryDateInMillis; + this.expiryDate = null; + this.uid = uid; + this.type = type; + this.subscriptionType = subscriptionType; + this.issuedTo = issuedTo; + this.issuer = issuer; + this.maxNodes = maxNodes; + } + + public LicenseSpec( + int version, + String uid, + String feature, + String issueDate, + String expiryDate, + String type, + String subscriptionType, + String issuedTo, + String issuer, + int maxNodes) { + this.version = version; + this.feature = feature; + this.issueDate = issueDate; + this.issueDateInMillis = -1; + this.expiryDate = expiryDate; + this.expiryDateInMillis = -1; + this.uid = uid; + this.type = type; + this.subscriptionType = subscriptionType; + this.issuedTo = issuedTo; + this.issuer = issuer; + this.maxNodes = maxNodes; + } + } + +} diff --git a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/tools/KeyPairGenerationToolTests.java b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/tools/KeyPairGenerationToolTests.java new file mode 100644 index 0000000000000..a55037e3f7c18 --- /dev/null +++ b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/tools/KeyPairGenerationToolTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license.licensor.tools; + +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.CommandTestCase; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserException; + +import java.nio.file.Files; +import java.nio.file.Path; + +import static org.hamcrest.CoreMatchers.containsString; + +public class KeyPairGenerationToolTests extends CommandTestCase { + + @Override + protected Command newCommand() { + return new KeyPairGeneratorTool(); + } + + public void testMissingKeyPaths() throws Exception { + Path exists = createTempFile("", "existing"); + Path dne = createTempDir().resolve("dne"); + UserException e = expectThrows( + UserException.class, + () -> execute( + "--publicKeyPath", + exists.toString(), + "--privateKeyPath", + dne.toString())); + assertThat(e.getMessage(), containsString("existing")); + assertEquals(ExitCodes.USAGE, e.exitCode); + e = expectThrows( + UserException.class, + () -> execute( + "--publicKeyPath", + dne.toString(), + "--privateKeyPath", + exists.toString())); + assertThat(e.getMessage(), containsString("existing")); + assertEquals(ExitCodes.USAGE, e.exitCode); + } + + public void testTool() throws Exception { + Path keysDir = createTempDir(); + Path publicKeyFilePath = keysDir.resolve("public"); + Path privateKeyFilePath = keysDir.resolve("private"); + + execute( + "--publicKeyPath", + publicKeyFilePath.toString(), + "--privateKeyPath", + privateKeyFilePath.toString()); + assertTrue(publicKeyFilePath.toString(), Files.exists(publicKeyFilePath)); + assertTrue(privateKeyFilePath.toString(), Files.exists(privateKeyFilePath)); + } + +} diff --git a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/tools/LicenseGenerationToolTests.java b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/tools/LicenseGenerationToolTests.java new file mode 100644 index 0000000000000..28971e9b5a890 --- /dev/null +++ b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/tools/LicenseGenerationToolTests.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license.licensor.tools; + +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.CommandTestCase; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.license.License; +import org.elasticsearch.license.licensor.TestUtils; +import org.junit.Before; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; + +public class LicenseGenerationToolTests extends CommandTestCase { + + protected Path pubKeyPath = null; + protected Path priKeyPath = null; + + @Before + public void setup() throws Exception { + pubKeyPath = getDataPath(TestUtils.PUBLIC_KEY_RESOURCE); + priKeyPath = getDataPath(TestUtils.PRIVATE_KEY_RESOURCE); + } + + @Override + protected Command newCommand() { + return new LicenseGeneratorTool(); + } + + public void testMissingKeyPaths() throws Exception { + Path pub = createTempDir().resolve("pub"); + Path pri = createTempDir().resolve("pri"); + UserException e = expectThrows( + UserException.class, + () -> execute( + "--publicKeyPath", + pub.toString(), + "--privateKeyPath", + pri.toString())); + assertTrue(e.getMessage(), e.getMessage().contains("pri does not exist")); + assertEquals(ExitCodes.USAGE, e.exitCode); + + Files.createFile(pri); + e = expectThrows( + UserException.class, + () -> execute( + "--publicKeyPath", + pub.toString(), + "--privateKeyPath", + pri.toString())); + assertTrue(e.getMessage(), e.getMessage().contains("pub does not exist")); + assertEquals(ExitCodes.USAGE, e.exitCode); + } + + public void testMissingLicenseSpec() throws Exception { + UserException e = expectThrows( + UserException.class, + () -> execute( + "--publicKeyPath", + pubKeyPath.toString(), + "--privateKeyPath", + priKeyPath.toString())); + assertTrue( + e.getMessage(), + e.getMessage().contains("Must specify either --license or --licenseFile")); + assertEquals(ExitCodes.USAGE, e.exitCode); + } + + public void testLicenseSpecString() throws Exception { + TestUtils.LicenseSpec inputLicenseSpec = + TestUtils.generateRandomLicenseSpec(License.VERSION_CURRENT); + String licenseSpecString = TestUtils.generateLicenseSpecString(inputLicenseSpec); + String output = execute( + "--publicKeyPath", + pubKeyPath.toString(), + "--privateKeyPath", + priKeyPath.toString(), + "--license", + licenseSpecString); + final BytesArray bytes = new BytesArray(output.getBytes(StandardCharsets.UTF_8)); + License outputLicense = License.fromSource(bytes, XContentType.JSON); + TestUtils.assertLicenseSpec(inputLicenseSpec, outputLicense); + } + + public void testLicenseSpecFile() throws Exception { + TestUtils.LicenseSpec inputLicenseSpec = + TestUtils.generateRandomLicenseSpec(License.VERSION_CURRENT); + String licenseSpecString = TestUtils.generateLicenseSpecString(inputLicenseSpec); + Path licenseSpecFile = createTempFile(); + Files.write(licenseSpecFile, licenseSpecString.getBytes(StandardCharsets.UTF_8)); + String output = execute( + "--publicKeyPath", + pubKeyPath.toString(), + "--privateKeyPath", + priKeyPath.toString(), + "--licenseFile", + licenseSpecFile.toString()); + final BytesArray bytes = new BytesArray(output.getBytes(StandardCharsets.UTF_8)); + License outputLicense = License.fromSource(bytes, XContentType.JSON); + TestUtils.assertLicenseSpec(inputLicenseSpec, outputLicense); + } + +} diff --git a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/tools/LicenseVerificationToolTests.java b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/tools/LicenseVerificationToolTests.java new file mode 100644 index 0000000000000..b6dea94ba0df0 --- /dev/null +++ b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/tools/LicenseVerificationToolTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license.licensor.tools; + +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.CommandTestCase; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.license.License; +import org.elasticsearch.license.licensor.TestUtils; +import org.junit.Before; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; + +public class LicenseVerificationToolTests extends CommandTestCase { + protected Path pubKeyPath = null; + protected Path priKeyPath = null; + + @Before + public void setup() throws Exception { + logger.error("project.basedir [{}]", System.getProperty("project.basedir")); + pubKeyPath = getDataPath(TestUtils.PUBLIC_KEY_RESOURCE); + priKeyPath = getDataPath(TestUtils.PRIVATE_KEY_RESOURCE); + } + + @Override + protected Command newCommand() { + return new LicenseVerificationTool(); + } + + public void testMissingKeyPath() throws Exception { + Path pub = createTempDir().resolve("pub"); + UserException e = expectThrows( + UserException.class, + () -> execute("--publicKeyPath", pub.toString())); + assertTrue(e.getMessage(), e.getMessage().contains("pub does not exist")); + assertEquals(ExitCodes.USAGE, e.exitCode); + } + + public void testMissingLicenseSpec() throws Exception { + UserException e = expectThrows(UserException.class, () -> { + execute("--publicKeyPath", pubKeyPath.toString()); + }); + assertTrue( + e.getMessage(), + e.getMessage().contains("Must specify either --license or --licenseFile")); + assertEquals(ExitCodes.USAGE, e.exitCode); + } + + public void testBrokenLicense() throws Exception { + final TimeValue oneHour = TimeValue.timeValueHours(1); + License signedLicense = TestUtils.generateSignedLicense(oneHour, pubKeyPath, priKeyPath); + License tamperedLicense = License.builder() + .fromLicenseSpec(signedLicense, signedLicense.signature()) + .expiryDate(signedLicense.expiryDate() + randomIntBetween(1, 1000)).build(); + UserException e = expectThrows( + UserException.class, + () -> execute( + "--publicKeyPath", + pubKeyPath.toString(), + "--license", + TestUtils.dumpLicense(tamperedLicense))); + assertEquals("Invalid License!", e.getMessage()); + assertEquals(ExitCodes.DATA_ERROR, e.exitCode); + } + + public void testLicenseSpecString() throws Exception { + final TimeValue oneHour = TimeValue.timeValueHours(1); + License signedLicense = TestUtils.generateSignedLicense(oneHour, pubKeyPath, priKeyPath); + String output = execute( + "--publicKeyPath", + pubKeyPath.toString(), + "--license", + TestUtils.dumpLicense(signedLicense)); + assertFalse(output, output.isEmpty()); + } + + public void testLicenseSpecFile() throws Exception { + final TimeValue oneHour = TimeValue.timeValueHours(1); + License signedLicense = TestUtils.generateSignedLicense(oneHour, pubKeyPath, priKeyPath); + Path licenseSpecFile = createTempFile(); + Files.write( + licenseSpecFile, + TestUtils.dumpLicense(signedLicense).getBytes(StandardCharsets.UTF_8)); + String output = execute( + "--publicKeyPath", + pubKeyPath.toString(), + "--licenseFile", + licenseSpecFile.toString()); + assertFalse(output, output.isEmpty()); + } + +} diff --git a/x-pack/license-tools/src/test/resources/log4j.properties b/x-pack/license-tools/src/test/resources/log4j.properties new file mode 100644 index 0000000000000..76defc8660c81 --- /dev/null +++ b/x-pack/license-tools/src/test/resources/log4j.properties @@ -0,0 +1,11 @@ +es.logger.level=INFO +log4j.rootLogger=${es.logger.level}, out + +log4j.logger.org.apache.http=INFO, out +log4j.additivity.org.apache.http=false + +log4j.logger.org.elasticsearch.license=TRACE + +log4j.appender.out=org.apache.log4j.ConsoleAppender +log4j.appender.out.layout=org.apache.log4j.PatternLayout +log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n diff --git a/x-pack/license-tools/src/test/resources/private.key b/x-pack/license-tools/src/test/resources/private.key new file mode 100644 index 0000000000000..1f545803d8755 Binary files /dev/null and b/x-pack/license-tools/src/test/resources/private.key differ diff --git a/x-pack/license-tools/src/test/resources/public.key b/x-pack/license-tools/src/test/resources/public.key new file mode 100644 index 0000000000000..2a9f272e0b36e --- /dev/null +++ b/x-pack/license-tools/src/test/resources/public.key @@ -0,0 +1,3 @@ +���q�n���g��wM}���UiK��0�b�2غq�]�쇴����c�+I��� &IJ�f�~��� ��]d�}o�O�Id�� +5A(쵴^��W�D��J��}�-O��?u�N5��vp�{��������t���7���� #�Vq��ktwm��]�L��z"| Q�l��Q�s�>�<}�[�2���Z�|5�����7%��D +Y�xn:�l�L��H��2��HvEEW�\�H:�6�h9 [!������+;�.w7C�_|� Ӫ��*��D`��?��xU/3>x�Uӓ+ � \ No newline at end of file diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle new file mode 100644 index 0000000000000..110b26182ad8e --- /dev/null +++ b/x-pack/plugin/build.gradle @@ -0,0 +1,178 @@ +import org.elasticsearch.gradle.LoggedExec +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.NodeInfo + +import java.nio.charset.StandardCharsets +import java.nio.file.Files +import java.nio.file.Path +import java.nio.file.StandardCopyOption +import org.elasticsearch.gradle.test.RunTask; + +apply plugin: 'elasticsearch.es-meta-plugin' + +archivesBaseName = 'x-pack' + +es_meta_plugin { + name = 'x-pack' + description = 'Elasticsearch Expanded Pack Plugin' + plugins = ['core', 'deprecation', 'graph', 'logstash', + 'ml', 'monitoring', 'security', 'upgrade', 'watcher', 'sql', 'rollup'] +} + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +// https://github.com/elastic/x-plugins/issues/724 +configurations { + testArtifacts.extendsFrom testRuntime +} + +task testJar(type: Jar) { + appendix 'test' + from sourceSets.test.output + /* + * Stick the license and notice file in the jar. This isn't strictly + * needed because we don't publish it but it makes our super-paranoid + * tests happy. + */ + metaInf { + from(project.licenseFile.parent) { + include project.licenseFile.name + rename { 'LICENSE.txt' } + } + from(project.noticeFile.parent) { + include project.noticeFile.name + } + } +} +artifacts { + testArtifacts testJar +} + +integTestRunner { + /* + * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each + * other if we allow them to set the number of available processors as it's set-once in Netty. + */ + systemProperty 'es.set.netty.runtime.available.processors', 'false' + + + // TODO: fix this rest test to not depend on a hardcoded port! + def blacklist = ['getting_started/10_monitor_cluster_health/*'] + boolean snapshot = "true".equals(System.getProperty("build.snapshot", "true")) + if (!snapshot) { + // these tests attempt to install basic/internal licenses signed against the dev/public.key + // Since there is no infrastructure in place (anytime soon) to generate licenses using the production + // private key, these tests are whitelisted in non-snapshot test runs + blacklist.addAll(['xpack/15_basic/*', 'license/20_put_license/*']) + } + systemProperty 'tests.rest.blacklist', blacklist.join(',') +} + +// location of generated keystores and certificates +File keystoreDir = new File(project.buildDir, 'keystore') + +// Generate the node's keystore +File nodeKeystore = new File(keystoreDir, 'test-node.jks') +task createNodeKeyStore(type: LoggedExec) { + doFirst { + if (nodeKeystore.parentFile.exists() == false) { + nodeKeystore.parentFile.mkdirs() + } + if (nodeKeystore.exists()) { + delete nodeKeystore + } + } + executable = new File(project.runtimeJavaHome, 'bin/keytool') + standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) + args '-genkey', + '-alias', 'test-node', + '-keystore', nodeKeystore, + '-keyalg', 'RSA', + '-keysize', '2048', + '-validity', '712', + '-dname', 'CN=smoke-test-plugins-ssl', + '-keypass', 'keypass', + '-storepass', 'keypass' +} + +// Add keystores to test classpath: it expects it there +sourceSets.test.resources.srcDir(keystoreDir) +processTestResources.dependsOn(createNodeKeyStore) + +integTestCluster { + dependsOn createNodeKeyStore + setting 'xpack.ml.enabled', 'true' + setting 'xpack.security.enabled', 'true' + setting 'logger.org.elasticsearch.xpack.ml.datafeed', 'TRACE' + // Integration tests are supposed to enable/disable exporters before/after each test + setting 'xpack.monitoring.exporters._local.type', 'local' + setting 'xpack.monitoring.exporters._local.enabled', 'false' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.security.transport.ssl.keystore.path', nodeKeystore.name + setting 'xpack.security.transport.ssl.verification_mode', 'certificate' + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + keystoreSetting 'bootstrap.password', 'x-pack-test-password' + keystoreSetting 'xpack.security.transport.ssl.keystore.secure_password', 'keypass' + distribution = 'zip' // this is important since we use the reindex module in ML + + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'x_pack_rest_user', '-p', 'x-pack-test-password', '-r', 'superuser' + + extraConfigFile nodeKeystore.name, nodeKeystore + + waitCondition = { NodeInfo node, AntBuilder ant -> + File tmpFile = new File(node.cwd, 'wait.success') + + for (int i = 0; i < 10; i++) { + // we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned + HttpURLConnection httpURLConnection = null; + try { + httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}&wait_for_status=yellow").openConnection(); + httpURLConnection.setRequestProperty("Authorization", "Basic " + + Base64.getEncoder().encodeToString("x_pack_rest_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); + httpURLConnection.setRequestMethod("GET"); + httpURLConnection.connect(); + if (httpURLConnection.getResponseCode() == 200) { + tmpFile.withWriter StandardCharsets.UTF_8.name(), { + it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) + } + } + } catch (Exception e) { + if (i == 9) { + logger.error("final attempt of calling cluster health failed", e) + } else { + logger.debug("failed to call cluster health", e) + } + } finally { + if (httpURLConnection != null) { + httpURLConnection.disconnect(); + } + } + + // did not start, so wait a bit before trying again + Thread.sleep(500L); + } + return tmpFile.exists() + } +} + +run { + def licenseType = System.getProperty("license_type", "basic") + if (licenseType == 'trial') { + setting 'xpack.ml.enabled', 'true' + setting 'xpack.graph.enabled', 'true' + setting 'xpack.watcher.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + } else if (licenseType != 'basic') { + throw new IllegalArgumentException("Unsupported self-generated license type: [" + licenseType + "]. Must be " + + "[basic] or [trial].") + } + setting 'xpack.security.enabled', 'true' + setting 'xpack.monitoring.enabled', 'true' + setting 'xpack.sql.enabled', 'true' + setting 'xpack.rollup.enabled', 'true' + keystoreSetting 'bootstrap.password', 'password' +} diff --git a/x-pack/plugin/bwc/build.gradle b/x-pack/plugin/bwc/build.gradle new file mode 100644 index 0000000000000..757448e35cd12 --- /dev/null +++ b/x-pack/plugin/bwc/build.gradle @@ -0,0 +1,226 @@ +import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.LoggedExec +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.test.NodeInfo + +import static org.elasticsearch.gradle.BuildPlugin.getJavaHome + +/** + * Subdirectories of this project are dummy projects which does a local + * checkout of the appropriate version's branch, and builds a snapshot. This + * allows backcompat tests to test against the next unreleased versions + * without relying on snapshots. + */ + +subprojects { + + Version bwcVersion = bwcVersions.getSnapshotForProject(project.name) + if (bwcVersion == null) { + // this project wont do anything + return + } + + String bwcBranch + if (project.name == 'next-minor-snapshot') { + // this is always a .x series + bwcBranch = "${bwcVersion.major}.x" + } else { + bwcBranch = "${bwcVersion.major}.${bwcVersion.minor}" + } + + apply plugin: 'distribution' + // Not published so no need to assemble + tasks.remove(assemble) + build.dependsOn.remove('assemble') + + File esCheckoutDir = file("${buildDir}/bwc/checkout-es-${bwcBranch}") + /* Delay building the path as the path will not exist during configuration which will + * fail on Windows due to getting the short name requiring the path to already exist. + */ + Object esCheckoutPath = """${-> + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + esCheckoutDir.mkdirs() + NodeInfo.getShortPathName(esCheckoutDir.toString()) + } else { + esCheckoutDir.toString() + } + }""" + File xpackCheckoutDir = file("${esCheckoutDir}-extra/x-pack-elasticsearch") + Object xpackCheckoutPath = """${-> + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + xpackCheckoutDir.mkdirs() + NodeInfo.getShortPathName(xpackCheckoutDir.toString()) + } else { + xpackCheckoutDir.toString() + } + }""" + + final String remote = System.getProperty("tests.bwc.remote", "elastic") + + task createElasticsearchClone(type: LoggedExec) { + onlyIf { esCheckoutDir.exists() == false } + commandLine = ['git', 'clone', rootDir, esCheckoutPath] + } + + task createXPackClone(type: LoggedExec) { + onlyIf { xpackCheckoutDir.exists() == false } + commandLine = ['git', 'clone', xpackRootProject.projectDir, xpackCheckoutPath] + } + + // we use regular Exec here to ensure we always get output, regardless of logging level + task findElasticsearchRemote(type: Exec) { + dependsOn createElasticsearchClone + workingDir = esCheckoutDir + commandLine = ['git', 'remote', '-v'] + ignoreExitValue = true + ByteArrayOutputStream output = new ByteArrayOutputStream() + standardOutput = output + doLast { + if (execResult.exitValue != 0) { + output.toString('UTF-8').eachLine { line -> logger.error(line) } + execResult.assertNormalExitValue() + } + project.ext.esRemoteExists = false + output.toString('UTF-8').eachLine { + if (it.contains("${remote}\t")) { + project.ext.esRemoteExists = true + } + } + } + } + + task findXPackRemote(type: Exec) { + dependsOn createXPackClone + workingDir = xpackCheckoutDir + commandLine = ['git', 'remote', '-v'] + ignoreExitValue = true + ByteArrayOutputStream output = new ByteArrayOutputStream() + standardOutput = output + doLast { + if (execResult.exitValue != 0) { + output.toString('UTF-8').eachLine { line -> logger.error(line) } + execResult.assertNormalExitValue() + } + project.ext.xpackRemoteExists = false + output.toString('UTF-8').eachLine { + if (it.contains("${remote}\t")) { + project.ext.xpackRemoteExists = true + } + } + } + } + + task addElasticsearchRemote(type: LoggedExec) { + dependsOn findElasticsearchRemote + onlyIf { project.ext.esRemoteExists == false } + workingDir = esCheckoutDir + commandLine = ['git', 'remote', 'add', "${remote}", "git@github.com:${remote}/elasticsearch.git"] + } + + task addXPackRemote(type: LoggedExec) { + dependsOn findXPackRemote + onlyIf { project.ext.xpackRemoteExists == false } + workingDir = xpackCheckoutDir + commandLine = ['git', 'remote', 'add', "${remote}", "git@github.com:${remote}/x-pack-elasticsearch.git"] + } + + task fetchElasticsearchLatest(type: LoggedExec) { + dependsOn addElasticsearchRemote + workingDir = esCheckoutDir + commandLine = ['git', 'fetch', '--all'] + } + + task fetchXPackLatest(type: LoggedExec) { + dependsOn addXPackRemote + workingDir = xpackCheckoutDir + commandLine = ['git', 'fetch', '--all'] + } + + String esBuildMetadataKey = "bwc_refspec_${project.path.substring(1)}_elasticsearch" + task checkoutElasticsearchBwcBranch(type: LoggedExec) { + dependsOn fetchElasticsearchLatest + def String refspec = System.getProperty("tests.bwc.refspec", buildMetadata.get(esBuildMetadataKey, "${remote}/${bwcBranch}")) + workingDir = esCheckoutDir + commandLine = ['git', 'checkout', refspec] + } + + String xpackBuildMetadataKey = "bwc_refspec_${project.path.substring(1)}_xpack" + task checkoutXPackBwcBranch(type: LoggedExec) { + dependsOn fetchXPackLatest + def String refspec = System.getProperty("tests.bwc.refspec", buildMetadata.get(xpackBuildMetadataKey, "${remote}/${bwcBranch}")) + workingDir = xpackCheckoutDir + commandLine = ['git', 'checkout', refspec] + } + + File esBuildMetadataFile = project.file("build/${project.name}_elasticsearch/build_metadata") + task writeElasticsearchBuildMetadata(type: LoggedExec) { + dependsOn checkoutElasticsearchBwcBranch + workingDir = esCheckoutDir + commandLine = ['git', 'rev-parse', 'HEAD'] + ignoreExitValue = true + ByteArrayOutputStream output = new ByteArrayOutputStream() + standardOutput = output + doLast { + if (execResult.exitValue != 0) { + output.toString('UTF-8').eachLine { line -> logger.error(line) } + execResult.assertNormalExitValue() + } + project.mkdir(esBuildMetadataFile.parent) + esBuildMetadataFile.setText("${esBuildMetadataKey}=${output.toString('UTF-8')}", 'UTF-8') + } + } + + File xpackBuildMetadataFile = project.file("build/${project.name}_xpack/build_metadata") + task writeXPackBuildMetadata(type: LoggedExec) { + dependsOn checkoutXPackBwcBranch + workingDir = xpackCheckoutDir + commandLine = ['git', 'rev-parse', 'HEAD'] + ignoreExitValue = true + ByteArrayOutputStream output = new ByteArrayOutputStream() + standardOutput = output + doLast { + if (execResult.exitValue != 0) { + output.toString('UTF-8').eachLine { line -> logger.error(line) } + execResult.assertNormalExitValue() + } + project.mkdir(xpackBuildMetadataFile.parent) + xpackBuildMetadataFile.setText("${xpackBuildMetadataKey}=${output.toString('UTF-8')}", 'UTF-8') + } + } + + File bwcZip = file("${xpackCheckoutDir}/plugin/build/distributions/x-pack-${bwcVersion}.zip") + task buildBwcVersion(type: Exec) { + dependsOn checkoutXPackBwcBranch, checkoutElasticsearchBwcBranch, writeElasticsearchBuildMetadata, writeXPackBuildMetadata + workingDir = xpackCheckoutDir + if (["5.6", "6.0", "6.1"].contains(bwcBranch)) { + // we are building branches that are officially built with JDK 8, push JAVA8_HOME to JAVA_HOME for these builds + environment('JAVA_HOME', getJavaHome(it, 8)) + } else if ("6.2".equals(bwcBranch)) { + environment('JAVA_HOME', getJavaHome(it, 9)) + } else { + environment('JAVA_HOME', project.compilerJavaHome) + } + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + executable 'cmd' + args '/C', 'call', new File(xpackCheckoutDir, 'gradlew').toString() + } else { + executable new File(xpackCheckoutDir, 'gradlew').toString() + } + args ":x-pack-elasticsearch:plugin:assemble", "-Dbuild.snapshot=true" + final LogLevel logLevel = gradle.startParameter.logLevel + if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { + args "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" + } + final String showStacktraceName = gradle.startParameter.showStacktrace.name() + assert ["INTERNAL_EXCEPTIONS", "ALWAYS", "ALWAYS_FULL"].contains(showStacktraceName) + if (showStacktraceName.equals("ALWAYS")) { + args "--stacktrace" + } else if (showStacktraceName.equals("ALWAYS_FULL")) { + args "--full-stacktrace" + } + } + + artifacts { + 'default' file: bwcZip, name: 'x-pack', type: 'zip', builtBy: buildBwcVersion + } +} diff --git a/x-pack/plugin/bwc/maintenance-bugfix-snapshot/build.gradle b/x-pack/plugin/bwc/maintenance-bugfix-snapshot/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/bwc/next-bugfix-snapshot/build.gradle b/x-pack/plugin/bwc/next-bugfix-snapshot/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/bwc/next-minor-snapshot/build.gradle b/x-pack/plugin/bwc/next-minor-snapshot/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/bwc/staged-minor-snapshot/build.gradle b/x-pack/plugin/bwc/staged-minor-snapshot/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle new file mode 100644 index 0000000000000..ca38aee302e3b --- /dev/null +++ b/x-pack/plugin/core/build.gradle @@ -0,0 +1,134 @@ +import org.elasticsearch.gradle.MavenFilteringHack + +import java.nio.file.Files +import java.nio.file.Path +import java.nio.file.Paths +import java.nio.file.StandardCopyOption + +apply plugin: 'elasticsearch.esplugin' + +archivesBaseName = 'x-pack-core' + +esplugin { + name 'x-pack-core' + description 'Elasticsearch Expanded Pack Plugin - Core' + classname 'org.elasticsearch.xpack.core.XPackPlugin' + hasNativeController false + requiresKeystore false +} + +dependencyLicenses { + mapping from: /bc.*/, to: 'bouncycastle' + mapping from: /http.*/, to: 'httpclient' // pulled in by rest client + mapping from: /commons-.*/, to: 'commons' // pulled in by rest client +} + +dependencies { + compileOnly "org.elasticsearch:elasticsearch:${version}" + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" + compile "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" + + compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "commons-codec:commons-codec:${versions.commonscodec}" + + // security deps + compile 'com.unboundid:unboundid-ldapsdk:3.2.0' + compile 'org.bouncycastle:bcprov-jdk15on:1.58' + compile 'org.bouncycastle:bcpkix-jdk15on:1.58' + compile project(path: ':modules:transport-netty4', configuration: 'runtime') + + testCompile 'org.elasticsearch:securemock:1.2' + testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}" + testCompile "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" + testCompile "org.slf4j:slf4j-api:${versions.slf4j}" + testCompile project(path: ':modules:reindex', configuration: 'runtime') + testCompile project(path: ':modules:parent-join', configuration: 'runtime') + testCompile project(path: ':modules:analysis-common', configuration: 'runtime') +} + +ext.expansions = [ + 'project.version': version +] + +processResources { + from(sourceSets.main.resources.srcDirs) { + exclude '**/public.key' + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) + } + boolean snapshot = "true".equals(System.getProperty("build.snapshot", "true")) + String licenseKey = System.getProperty("license.key") + if (licenseKey != null) { + println "Using provided license key from ${licenseKey}" + } else if (snapshot) { + licenseKey = Paths.get(project.projectDir.path, 'snapshot.key') + } else { + throw new IllegalArgumentException('Property license.key must be set for release build') + } + if (Files.exists(Paths.get(licenseKey)) == false) { + throw new IllegalArgumentException('license.key at specified path [' + licenseKey + '] does not exist') + } + from(licenseKey) { + rename { String filename -> 'public.key' } + } +} + +forbiddenPatterns { + exclude '**/*.key' + exclude '**/*.p12' + exclude '**/*.der' + exclude '**/*.zip' +} + +compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" +compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" + +licenseHeaders { + approvedLicenses << 'BCrypt (BSD-like)' + additionalLicense 'BCRYP', 'BCrypt (BSD-like)', 'Copyright (c) 2006 Damien Miller ' +} + +// make LicenseSigner available for testing signed licenses +sourceSets.test.java { + srcDir '../../license-tools/src/main/java' +} + +test { + /* + * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each + * other if we allow them to set the number of available processors as it's set-once in Netty. + */ + systemProperty 'es.set.netty.runtime.available.processors', 'false' +} + +// TODO: don't publish test artifacts just to run messy tests, fix the tests! +// https://github.com/elastic/x-plugins/issues/724 +configurations { + testArtifacts.extendsFrom testRuntime +} +task testJar(type: Jar) { + appendix 'test' + from sourceSets.test.output +} +artifacts { + // normal es plugins do not publish the jar but we need to since users need it for Transport Clients and extensions + archives jar + testArtifacts testJar +} + +thirdPartyAudit.excludes = [ + //commons-logging optional dependencies + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + //commons-logging provided dependencies + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener' +] + +// xpack modules are installed in real clusters as the meta plugin, so +// installing them as individual plugins for integ tests doesn't make sense, +// so we disable integ tests and there are no integ tests in xpack core module +integTest.enabled = false diff --git a/x-pack/plugin/core/licenses/bcpkix-jdk15on-1.58.jar.sha1 b/x-pack/plugin/core/licenses/bcpkix-jdk15on-1.58.jar.sha1 new file mode 100644 index 0000000000000..1fbdc7fcc1fa8 --- /dev/null +++ b/x-pack/plugin/core/licenses/bcpkix-jdk15on-1.58.jar.sha1 @@ -0,0 +1 @@ +15a760a039b040e767a75c77ffcc4ff62558f903 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/bcprov-jdk15on-1.58.jar.sha1 b/x-pack/plugin/core/licenses/bcprov-jdk15on-1.58.jar.sha1 new file mode 100644 index 0000000000000..95bc28eb146ef --- /dev/null +++ b/x-pack/plugin/core/licenses/bcprov-jdk15on-1.58.jar.sha1 @@ -0,0 +1 @@ +2c9aa1c4e3372b447ba5daabade4adf2a2264b12 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/bouncycastle-LICENSE.txt b/x-pack/plugin/core/licenses/bouncycastle-LICENSE.txt new file mode 100644 index 0000000000000..1bd35a7a35c21 --- /dev/null +++ b/x-pack/plugin/core/licenses/bouncycastle-LICENSE.txt @@ -0,0 +1,17 @@ +Copyright (c) 2000-2015 The Legion of the Bouncy Castle Inc. (http://www.bouncycastle.org) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software +and associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/x-pack/plugin/core/licenses/bouncycastle-NOTICE.txt b/x-pack/plugin/core/licenses/bouncycastle-NOTICE.txt new file mode 100644 index 0000000000000..8b137891791fe --- /dev/null +++ b/x-pack/plugin/core/licenses/bouncycastle-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/x-pack/plugin/core/licenses/commons-LICENSE.txt b/x-pack/plugin/core/licenses/commons-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/core/licenses/commons-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/core/licenses/commons-NOTICE.txt b/x-pack/plugin/core/licenses/commons-NOTICE.txt new file mode 100644 index 0000000000000..1da9af50f6008 --- /dev/null +++ b/x-pack/plugin/core/licenses/commons-NOTICE.txt @@ -0,0 +1,17 @@ +Apache Commons Codec +Copyright 2002-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java +contains test data from http://aspell.net/test/orig/batch0.tab. +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + +=============================================================================== + +The content of package org.apache.commons.codec.language.bm has been translated +from the original php source code available at http://stevemorse.org/phoneticinfo.htm +with permission from the original authors. +Original source copyright: +Copyright (c) 2008 Alexander Beider & Stephen P. Morse. diff --git a/x-pack/plugin/core/licenses/commons-codec-1.10.jar.sha1 b/x-pack/plugin/core/licenses/commons-codec-1.10.jar.sha1 new file mode 100644 index 0000000000000..3fe8682a1b0f9 --- /dev/null +++ b/x-pack/plugin/core/licenses/commons-codec-1.10.jar.sha1 @@ -0,0 +1 @@ +4b95f4897fa13f2cd904aee711aeafc0c5295cd8 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/commons-logging-1.1.3.jar.sha1 b/x-pack/plugin/core/licenses/commons-logging-1.1.3.jar.sha1 new file mode 100644 index 0000000000000..5b8f029e58293 --- /dev/null +++ b/x-pack/plugin/core/licenses/commons-logging-1.1.3.jar.sha1 @@ -0,0 +1 @@ +f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpasyncclient-4.1.2.jar.sha1 b/x-pack/plugin/core/licenses/httpasyncclient-4.1.2.jar.sha1 new file mode 100644 index 0000000000000..065ed920a1773 --- /dev/null +++ b/x-pack/plugin/core/licenses/httpasyncclient-4.1.2.jar.sha1 @@ -0,0 +1 @@ +95aa3e6fb520191a0970a73cf09f62948ee614be \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpclient-4.5.2.jar.sha1 b/x-pack/plugin/core/licenses/httpclient-4.5.2.jar.sha1 new file mode 100644 index 0000000000000..6937112a09fb6 --- /dev/null +++ b/x-pack/plugin/core/licenses/httpclient-4.5.2.jar.sha1 @@ -0,0 +1 @@ +733db77aa8d9b2d68015189df76ab06304406e50 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpclient-LICENSE.txt b/x-pack/plugin/core/licenses/httpclient-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/x-pack/plugin/core/licenses/httpclient-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/x-pack/plugin/core/licenses/httpclient-NOTICE.txt b/x-pack/plugin/core/licenses/httpclient-NOTICE.txt new file mode 100644 index 0000000000000..91e5c40c4c6d3 --- /dev/null +++ b/x-pack/plugin/core/licenses/httpclient-NOTICE.txt @@ -0,0 +1,6 @@ +Apache HttpComponents Client +Copyright 1999-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/x-pack/plugin/core/licenses/httpcore-4.4.5.jar.sha1 b/x-pack/plugin/core/licenses/httpcore-4.4.5.jar.sha1 new file mode 100644 index 0000000000000..581726601745b --- /dev/null +++ b/x-pack/plugin/core/licenses/httpcore-4.4.5.jar.sha1 @@ -0,0 +1 @@ +e7501a1b34325abb00d17dde96150604a0658b54 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpcore-nio-4.4.5.jar.sha1 b/x-pack/plugin/core/licenses/httpcore-nio-4.4.5.jar.sha1 new file mode 100644 index 0000000000000..d6a80bf100de3 --- /dev/null +++ b/x-pack/plugin/core/licenses/httpcore-nio-4.4.5.jar.sha1 @@ -0,0 +1 @@ +f4be009e7505f6ceddf21e7960c759f413f15056 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/unboundid-ldapsdk-3.2.0.jar.sha1 b/x-pack/plugin/core/licenses/unboundid-ldapsdk-3.2.0.jar.sha1 new file mode 100644 index 0000000000000..23697f364e9a7 --- /dev/null +++ b/x-pack/plugin/core/licenses/unboundid-ldapsdk-3.2.0.jar.sha1 @@ -0,0 +1 @@ +f76725e5a215ea468ecda06a8d66a809281e685f \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/unboundid-ldapsdk-LICENSE.txt b/x-pack/plugin/core/licenses/unboundid-ldapsdk-LICENSE.txt new file mode 100644 index 0000000000000..e57554e569219 --- /dev/null +++ b/x-pack/plugin/core/licenses/unboundid-ldapsdk-LICENSE.txt @@ -0,0 +1,91 @@ + UnboundID LDAP SDK Free Use License + +THIS IS AN AGREEMENT BETWEEN YOU ("YOU") AND UNBOUNDID CORP. ("UNBOUNDID") +REGARDING YOUR USE OF UNBOUNDID LDAP SDK FOR JAVA AND ANY ASSOCIATED +DOCUMENTATION, OBJECT CODE, COMPILED LIBRARIES, SOURCE CODE AND SOURCE FILES OR +OTHER MATERIALS MADE AVAILABLE BY UNBOUNDID (COLLECTIVELY REFERRED TO IN THIS +AGREEMENT AS THE ("SDK"). + +BY INSTALLING, ACCESSING OR OTHERWISE USING THE SDK, YOU ACCEPT THE TERMS OF +THIS AGREEMENT. IF YOU DO NOT AGREE TO THE TERMS OF THIS AGREEMENT, DO NOT +INSTALL, ACCESS OR USE THE SDK. + +USE OF THE SDK. Subject to your compliance with this Agreement, UnboundID +grants to You a non-exclusive, royalty-free license, under UnboundID's +intellectual property rights in the SDK, to use, reproduce, modify and +distribute this release of the SDK; provided that no license is granted herein +under any patents that may be infringed by your modifications, derivative works +or by other works in which the SDK may be incorporated (collectively, your +"Applications"). You may reproduce and redistribute the SDK with your +Applications provided that you (i) include this license file and an +unmodified copy of the unboundid-ldapsdk-se.jar file; and (ii) such +redistribution is subject to a license whose terms do not conflict with or +contradict the terms of this Agreement. You may also reproduce and redistribute +the SDK without your Applications provided that you redistribute the SDK +complete and unmodified (i.e., with all "read me" files, copyright notices, and +other legal notices and terms that UnboundID has included in the SDK). + +SCOPE OF LICENSES. This Agreement does not grant You the right to use any +UnboundID intellectual property which is not included as part of the SDK. The +SDK is licensed, not sold. This Agreement only gives You some rights to use +the SDK. UnboundID reserves all other rights. Unless applicable law gives You +more rights despite this limitation, You may use the SDK only as expressly +permitted in this Agreement. + +SUPPORT. UnboundID is not obligated to provide any technical or other support +("Support Services") for the SDK to You under this Agreement. However, if +UnboundID chooses to provide any Support Services to You, Your use of such +Support Services will be governed by then-current UnboundID support policies. + +TERMINATION. UnboundID reserves the right to discontinue offering the SDK and +to modify the SDK at any time in its sole discretion. Notwithstanding anything +contained in this Agreement to the contrary, UnboundID may also, in its sole +discretion, terminate or suspend access to the SDK to You or any end user at +any time. In addition, if you fail to comply with the terms of this Agreement, +then any rights granted herein will be automatically terminated if such failure +is not corrected within 30 days of the initial notification of such failure. +You acknowledge that termination and/or monetary damages may not be a +sufficient remedy if You breach this Agreement and that UnboundID will be +entitled, without waiving any other rights or remedies, to injunctive or +equitable relief as may be deemed proper by a court of competent jurisdiction +in the event of a breach. UnboundID may also terminate this Agreement if the +SDK becomes, or in UnboundID?s reasonable opinion is likely to become, the +subject of a claim of intellectual property infringement or trade secret +misappropriation. All rights and licenses granted herein will simultaneously +and automatically terminate upon termination of this Agreement for any reason. + +DISCLAIMER OF WARRANTY. THE SDK IS PROVIDED "AS IS" AND UNBOUNDID DOES NOT +WARRANT THAT THE SDK WILL BE ERROR-FREE, VIRUS-FREE, WILL PERFORM IN AN +UNINTERRUPTED, SECURE OR TIMELY MANNER, OR WILL INTEROPERATE WITH OTHER +HARDWARE, SOFTWARE, SYSTEMS OR DATA. TO THE MAXIMUM EXTENT ALLOWED BY LAW, ALL +CONDITIONS, REPRESENTATIONS AND WARRANTIES, WHETHER EXPRESS, IMPLIED, STATUTORY +OR OTHERWISE INCLUDING, WITHOUT LIMITATION, ANY IMPLIED WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE (EVEN IF UNBOUNDID HAD BEEN +INFORMED OF SUCH PURPOSE), OR NON-INFRINGEMENT OF THIRD PARTY RIGHTS ARE HEREBY +DISCLAIMED. + +LIMITATION OF LIABILITY. IN NO EVENT WILL UNBOUNDID OR ITS SUPPLIERS BE LIABLE +FOR ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, LOST PROFITS, +REVENUE, DATA OR DATA USE, BUSINESS INTERRUPTION, COST OF COVER, DIRECT, +INDIRECT, SPECIAL, PUNITIVE, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND) +ARISING OUT OF THE USE OF OR INABILITY TO USE THE SDK OR IN ANY WAY RELATED TO +THIS AGREEMENT, EVEN IF UNBOUNDID HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + +ADDITIONAL RIGHTS. Certain states do not allow the exclusion of implied +warranties or limitation of liability for certain kinds of damages, so the +exclusion of limited warranties and limitation of liability set forth above may +not apply to You. + +EXPORT RESTRICTIONS. The SDK is subject to United States export control laws. +You acknowledge and agree that You are responsible for compliance with all +domestic and international export laws and regulations that apply to the SDK. + +MISCELLANEOUS. This Agreement constitutes the entire agreement with respect to +the SDK. If any provision of this Agreement shall be held to be invalid, +illegal or unenforceable, the validity, legality and enforceability of the +remaining provisions shall in no way be affected or impaired thereby. This +Agreement and performance hereunder shall be governed by and construed in +accordance with the laws of the State of Texas without regard to its conflict +of laws rules. Any disputes related to this Agreement shall be exclusively +litigated in the state or federal courts located in Travis County, Texas. diff --git a/x-pack/plugin/core/licenses/unboundid-ldapsdk-NOTICE.txt b/x-pack/plugin/core/licenses/unboundid-ldapsdk-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/core/snapshot.key b/x-pack/plugin/core/snapshot.key new file mode 100644 index 0000000000000..2a9f272e0b36e --- /dev/null +++ b/x-pack/plugin/core/snapshot.key @@ -0,0 +1,3 @@ +���q�n���g��wM}���UiK��0�b�2غq�]�쇴����c�+I��� &IJ�f�~��� ��]d�}o�O�Id�� +5A(쵴^��W�D��J��}�-O��?u�N5��vp�{��������t���7���� #�Vq��ktwm��]�L��z"| Q�l��Q�s�>�<}�[�2���Z�|5�����7%��D +Y�xn:�l�L��H��2��HvEEW�\�H:�6�h9 [!������+;�.w7C�_|� Ӫ��*��D`��?��xU/3>x�Uӓ+ � \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/bin/x-pack-env b/x-pack/plugin/core/src/main/bin/x-pack-env new file mode 100644 index 0000000000000..fb5489cfebc43 --- /dev/null +++ b/x-pack/plugin/core/src/main/bin/x-pack-env @@ -0,0 +1,8 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +# include x-pack-core jars in classpath +ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack/x-pack-core/*" diff --git a/x-pack/plugin/core/src/main/bin/x-pack-env.bat b/x-pack/plugin/core/src/main/bin/x-pack-env.bat new file mode 100644 index 0000000000000..de45a53c9269c --- /dev/null +++ b/x-pack/plugin/core/src/main/bin/x-pack-env.bat @@ -0,0 +1,5 @@ +rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +rem or more contributor license agreements. Licensed under the Elastic License; +rem you may not use this file except in compliance with the Elastic License. + +set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack/x-pack-core/* diff --git a/x-pack/plugin/core/src/main/config/log4j2.properties b/x-pack/plugin/core/src/main/config/log4j2.properties new file mode 100644 index 0000000000000..c4cdbc0640c85 --- /dev/null +++ b/x-pack/plugin/core/src/main/config/log4j2.properties @@ -0,0 +1,22 @@ +appender.audit_rolling.type = RollingFile +appender.audit_rolling.name = audit_rolling +appender.audit_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_access.log +appender.audit_rolling.layout.type = PatternLayout +appender.audit_rolling.layout.pattern = [%d{ISO8601}] %m%n +appender.audit_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_access-%d{yyyy-MM-dd}.log +appender.audit_rolling.policies.type = Policies +appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.audit_rolling.policies.time.interval = 1 +appender.audit_rolling.policies.time.modulate = true + +logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail +logger.xpack_security_audit_logfile.level = info +logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling +logger.xpack_security_audit_logfile.additivity = false + +logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature +logger.xmlsig.level = error +logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter +logger.samlxml_decrypt.level = fatal +logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter +logger.saml2_decrypt.level = fatal diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/common/network/InetAddressHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/common/network/InetAddressHelper.java new file mode 100644 index 0000000000000..4c52cfb5c4cd5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/common/network/InetAddressHelper.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.common.network; + +import java.net.InetAddress; +import java.net.SocketException; + +/** + * We use this class to access the package private method in NetworkUtils to resolve anyLocalAddress InetAddresses for certificate + * generation + */ +public class InetAddressHelper { + + private InetAddressHelper() {} + + public static InetAddress[] getAllAddresses() throws SocketException { + return NetworkUtils.getAllAddresses(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/CryptUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/CryptUtils.java new file mode 100644 index 0000000000000..c3c69d5bfcd68 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/CryptUtils.java @@ -0,0 +1,255 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + + +import javax.crypto.BadPaddingException; +import javax.crypto.Cipher; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.PBEKeySpec; +import javax.crypto.spec.SecretKeySpec; +import java.nio.charset.StandardCharsets; +import java.security.InvalidKeyException; +import java.security.KeyFactory; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.security.SecureRandom; +import java.security.spec.InvalidKeySpecException; +import java.security.spec.PKCS8EncodedKeySpec; +import java.security.spec.X509EncodedKeySpec; +import java.util.Base64; + +public class CryptUtils { + private static final int minimumPadding = 20; + private static final byte[] salt = { + (byte) 0xA9, (byte) 0xA2, (byte) 0xB5, (byte) 0xDE, + (byte) 0x2A, (byte) 0x8A, (byte) 0x9A, (byte) 0xE6 + }; + private static final int iterationCount = 1024; + private static final int aesKeyLength = 128; + private static final String keyAlgorithm = "RSA"; + private static final String passHashAlgorithm = "SHA-512"; + private static final String DEFAULT_PASS_PHRASE = "elasticsearch-license"; + + private static final SecureRandom random = new SecureRandom(); + + /** + * Read encrypted private key file content with default pass phrase + */ + public static PrivateKey readEncryptedPrivateKey(byte[] fileContents) { + try { + return readEncryptedPrivateKey(fileContents, hashPassPhrase(DEFAULT_PASS_PHRASE)); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException(e); + } + } + + /** + * Read encrypted public key file content with default pass phrase + */ + public static PublicKey readEncryptedPublicKey(byte[] fileContents) { + try { + return readEncryptedPublicKey(fileContents, hashPassPhrase(DEFAULT_PASS_PHRASE)); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException(e); + } + } + + /** + * Returns encrypted public key file content with default pass phrase + */ + public static byte[] writeEncryptedPublicKey(PublicKey publicKey) { + try { + return writeEncryptedPublicKey(publicKey, hashPassPhrase(DEFAULT_PASS_PHRASE)); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException(e); + } + } + + /** + * Returns encrypted private key file content with default pass phrase + */ + public static byte[] writeEncryptedPrivateKey(PrivateKey privateKey) { + try { + return writeEncryptedPrivateKey(privateKey, hashPassPhrase(DEFAULT_PASS_PHRASE)); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException(e); + } + } + + /** + * Read encrypted private key file content with provided passPhrase + */ + public static PrivateKey readEncryptedPrivateKey(byte[] fileContents, char[] passPhrase) { + PKCS8EncodedKeySpec privateKeySpec = new PKCS8EncodedKeySpec(decrypt(fileContents, passPhrase)); + try { + return KeyFactory.getInstance(keyAlgorithm).generatePrivate(privateKeySpec); + } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { + throw new IllegalStateException(e); + } + } + + /** + * Read encrypted public key file content with provided passPhrase + */ + public static PublicKey readEncryptedPublicKey(byte[] fileContents, char[] passPhrase) { + X509EncodedKeySpec publicKeySpec = new X509EncodedKeySpec(decrypt(fileContents, passPhrase)); + try { + return KeyFactory.getInstance(CryptUtils.keyAlgorithm).generatePublic(publicKeySpec); + } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { + throw new IllegalStateException(e); + } + } + + /** + * Returns encrypted public key file content with provided passPhrase + */ + public static byte[] writeEncryptedPublicKey(PublicKey publicKey, char[] passPhrase) { + X509EncodedKeySpec encodedKeySpec = new X509EncodedKeySpec(publicKey.getEncoded()); + return encrypt(encodedKeySpec.getEncoded(), passPhrase); + } + + /** + * Returns encrypted private key file content with provided passPhrase + */ + public static byte[] writeEncryptedPrivateKey(PrivateKey privateKey, char[] passPhrase) { + PKCS8EncodedKeySpec encodedKeySpec = new PKCS8EncodedKeySpec(privateKey.getEncoded()); + return encrypt(encodedKeySpec.getEncoded(), passPhrase); + } + + /** + * Encrypts provided data with DEFAULT_PASS_PHRASE + */ + public static byte[] encrypt(byte[] data) { + try { + return encrypt(data, hashPassPhrase(DEFAULT_PASS_PHRASE)); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException(e); + } + } + + /** + * Decrypts provided encryptedData with DEFAULT_PASS_PHRASE + */ + public static byte[] decrypt(byte[] encryptedData) { + try { + return decrypt(encryptedData, hashPassPhrase(DEFAULT_PASS_PHRASE)); + } catch (NoSuchAlgorithmException e) { + throw new IllegalStateException(e); + } + } + + /** + * Encrypts provided data with passPhrase + */ + public static byte[] encrypt(byte[] data, char[] passPhrase) { + try { + final Cipher encryptionCipher = getEncryptionCipher(getSecretKey(passPhrase)); + return encryptionCipher.doFinal(pad(data, minimumPadding)); + } catch (InvalidKeySpecException | IllegalBlockSizeException | BadPaddingException e) { + throw new IllegalStateException(e); + } + } + + /** + * Decrypts provided encryptedData with passPhrase + */ + private static byte[] decrypt(byte[] encryptedData, char[] passPhrase) { + try { + final Cipher cipher = getDecryptionCipher(getSecretKey(passPhrase)); + return unPad(cipher.doFinal(encryptedData)); + } catch (IllegalBlockSizeException | BadPaddingException | InvalidKeySpecException e) { + throw new IllegalStateException(e); + } + + } + + private static SecretKey getSecretKey(char[] passPhrase) throws InvalidKeySpecException { + try { + PBEKeySpec keySpec = new PBEKeySpec(passPhrase, salt, iterationCount, aesKeyLength); + + byte[] shortKey = SecretKeyFactory.getInstance("PBEWithSHA1AndDESede"). + generateSecret(keySpec).getEncoded(); + + byte[] intermediaryKey = new byte[aesKeyLength / 8]; + for (int i = 0, j = 0; i < aesKeyLength / 8; i++) { + intermediaryKey[i] = shortKey[j]; + if (++j == shortKey.length) + j = 0; + } + + return new SecretKeySpec(intermediaryKey, "AES"); + } catch (NoSuchAlgorithmException | InvalidKeySpecException e) { + throw new IllegalStateException(e); + } + } + + private static Cipher getEncryptionCipher(SecretKey secretKey) { + return getCipher(Cipher.ENCRYPT_MODE, secretKey); + } + + private static Cipher getDecryptionCipher(SecretKey secretKey) { + return getCipher(Cipher.DECRYPT_MODE, secretKey); + } + + private static Cipher getCipher(int mode, SecretKey secretKey) { + try { + Cipher cipher = Cipher.getInstance(secretKey.getAlgorithm()); + cipher.init(mode, secretKey, random); + return cipher; + } catch (NoSuchAlgorithmException | InvalidKeyException | NoSuchPaddingException e) { + throw new IllegalStateException(e); + } + } + + private static byte[] pad(byte[] bytes, int length) { + if (bytes.length >= length) { + byte[] out = new byte[bytes.length + 1]; + System.arraycopy(bytes, 0, out, 0, bytes.length); + out[bytes.length] = (byte) 1; + return out; + } + + byte[] out = new byte[length + 1]; + + int i = 0; + for (; i < bytes.length; i++) + out[i] = bytes[i]; + + int padded = length - i; + + // fill the rest with random bytes + byte[] fill = new byte[padded - 1]; + random.nextBytes(fill); + System.arraycopy(fill, 0, out, i, padded - 1); + + out[length] = (byte) (padded + 1); + + return out; + } + + private static byte[] unPad(byte[] bytes) { + int padded = (int) bytes[bytes.length - 1]; + int targetLength = bytes.length - padded; + + byte[] out = new byte[targetLength]; + + System.arraycopy(bytes, 0, out, 0, targetLength); + + return out; + } + + private static char[] hashPassPhrase(String passPhrase) throws NoSuchAlgorithmException { + final byte[] passBytes = passPhrase.getBytes(StandardCharsets.UTF_8); + final byte[] digest = MessageDigest.getInstance(passHashAlgorithm).digest(passBytes); + return Base64.getEncoder().encodeToString(digest).toCharArray(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DateUtils.java new file mode 100644 index 0000000000000..74183ab5e0525 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DateUtils.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.joda.FormatDateTimeFormatter; +import org.elasticsearch.common.joda.Joda; +import org.joda.time.MutableDateTime; +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +public class DateUtils { + + private static final FormatDateTimeFormatter formatDateOnlyFormatter = Joda.forPattern("yyyy-MM-dd"); + + private static final DateTimeFormatter dateOnlyFormatter = formatDateOnlyFormatter.parser().withZoneUTC(); + + private static final DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateTime().withZoneUTC(); + + public static long endOfTheDay(String date) { + try { + // Try parsing using complete date/time format + return dateTimeFormatter.parseDateTime(date).getMillis(); + } catch (IllegalArgumentException ex) { + // Fall back to the date only format + MutableDateTime dateTime = dateOnlyFormatter.parseMutableDateTime(date); + dateTime.millisOfDay().set(dateTime.millisOfDay().getMaximumValue()); + return dateTime.getMillis(); + } + } + + public static long beginningOfTheDay(String date) { + try { + // Try parsing using complete date/time format + return dateTimeFormatter.parseDateTime(date).getMillis(); + } catch (IllegalArgumentException ex) { + // Fall back to the date only format + return dateOnlyFormatter.parseDateTime(date).getMillis(); + } + + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java new file mode 100644 index 0000000000000..de356870fbbe8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class DeleteLicenseAction extends Action { + + public static final DeleteLicenseAction INSTANCE = new DeleteLicenseAction(); + public static final String NAME = "cluster:admin/xpack/license/delete"; + + private DeleteLicenseAction() { + super(NAME); + } + + @Override + public DeleteLicenseResponse newResponse() { + return new DeleteLicenseResponse(); + } + + @Override + public DeleteLicenseRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new DeleteLicenseRequestBuilder(client, this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseRequest.java new file mode 100644 index 0000000000000..29558cf9e42bb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseRequest.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + + +public class DeleteLicenseRequest extends AcknowledgedRequest { + + public DeleteLicenseRequest() { + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseRequestBuilder.java new file mode 100644 index 0000000000000..b554b0055376b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseRequestBuilder.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class DeleteLicenseRequestBuilder extends AcknowledgedRequestBuilder { + + public DeleteLicenseRequestBuilder(ElasticsearchClient client) { + this(client, DeleteLicenseAction.INSTANCE); + } + + /** + * Creates new get licenses request builder + * + * @param client elasticsearch client + */ + public DeleteLicenseRequestBuilder(ElasticsearchClient client, DeleteLicenseAction action) { + super(client, action, new DeleteLicenseRequest()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseResponse.java new file mode 100644 index 0000000000000..c30890a0ff6bf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DeleteLicenseResponse.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class DeleteLicenseResponse extends AcknowledgedResponse { + + DeleteLicenseResponse() { + } + + DeleteLicenseResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ExpirationCallback.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ExpirationCallback.java new file mode 100644 index 0000000000000..ba74ddf300306 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ExpirationCallback.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; + +import java.util.UUID; + +abstract class ExpirationCallback { + + static final String EXPIRATION_JOB_PREFIX = ".license_expiration_job_"; + + public enum Orientation {PRE, POST} + + /** + * Callback that is triggered every frequency when + * current time is between max and min + * before license expiry. + */ + public abstract static class Pre extends ExpirationCallback { + + /** + * Callback schedule prior to license expiry + * + * @param min latest relative time to execute before license expiry + * @param max earliest relative time to execute before license expiry + * @param frequency interval between execution + */ + Pre(TimeValue min, TimeValue max, TimeValue frequency) { + super(Orientation.PRE, min, max, frequency); + } + } + + /** + * Callback that is triggered every frequency when + * current time is between min and max + * after license expiry. + */ + public abstract static class Post extends ExpirationCallback { + + /** + * Callback schedule after license expiry + * + * @param min earliest relative time to execute after license expiry + * @param max latest relative time to execute after license expiry + * @param frequency interval between execution + */ + Post(TimeValue min, TimeValue max, TimeValue frequency) { + super(Orientation.POST, min, max, frequency); + } + } + + private final String id; + private final Orientation orientation; + private final long min; + private final long max; + private final long frequency; + + private ExpirationCallback(Orientation orientation, TimeValue min, TimeValue max, TimeValue frequency) { + this.orientation = orientation; + this.min = (min == null) ? 0 : min.getMillis(); + this.max = (max == null) ? Long.MAX_VALUE : max.getMillis(); + this.frequency = frequency.getMillis(); + this.id = String.join("", EXPIRATION_JOB_PREFIX, UUID.randomUUID().toString()); + } + + public final String getId() { + return id; + } + + public final long getFrequency() { + return frequency; + } + + /** + * Calculates the delay for the next trigger time. When now is in a + * valid time bracket with respect to expirationDate, the delay is 0. + * When now is before the time bracket, than delay to the start of the + * time bracket and when now is passed the valid time bracket, the delay + * is null + * @param expirationDate license expiry date in milliseconds + * @param now current time in milliseconds + * @return time delay + */ + final TimeValue delay(long expirationDate, long now) { + final TimeValue delay; + switch (orientation) { + case PRE: + if (expirationDate >= now) { + // license not yet expired + long preExpiryDuration = expirationDate - now; + if (preExpiryDuration > max) { + // license duration is longer than maximum duration, delay it to the first match time + delay = TimeValue.timeValueMillis(preExpiryDuration - max); + } else if (preExpiryDuration <= max && preExpiryDuration >= min) { + // no delay in valid time bracket + delay = TimeValue.timeValueMillis(0); + } else { + // passed last match time + delay = null; + } + } else { + // invalid after license expiry + delay = null; + } + break; + case POST: + if (expirationDate >= now) { + // license not yet expired, delay it to the first match time + delay = TimeValue.timeValueMillis(expirationDate - now + min); + } else { + // license has expired + long expiredDuration = now - expirationDate; + if (expiredDuration < min) { + // license expiry duration is shorter than minimum duration, delay it to the first match time + delay = TimeValue.timeValueMillis(min - expiredDuration); + } else if (expiredDuration >= min && expiredDuration <= max) { + // no delay in valid time bracket + delay = TimeValue.timeValueMillis(0); + } else { + // passed last match time + delay = null; + } + } + break; + default: + throw new IllegalStateException("orientation [" + orientation + "] unknown"); + } + return delay; + } + + /** + * {@link SchedulerEngine.Schedule#nextScheduledTimeAfter(long, long)} with respect to + * license expiry date + */ + public final long nextScheduledTimeForExpiry(long expiryDate, long startTime, long time) { + TimeValue delay = delay(expiryDate, time); + if (delay != null) { + long delayInMillis = delay.getMillis(); + if (delayInMillis == 0L) { + if (startTime == time) { + // initial trigger and in time bracket, schedule immediately + return time; + } else { + // in time bracket, add frequency + return time + frequency; + } + } else { + // not in time bracket + return time + delayInMillis; + } + } + return -1; + } + + /** + * Code to execute when the expiry callback is triggered in a valid + * time bracket + * @param license license to operate on + */ + public abstract void on(License license); + + public final String toString() { + return LoggerMessageFormat.format(null, "ExpirationCallback:(orientation [{}], min [{}], max [{}], freq [{}])", + orientation.name(), TimeValue.timeValueMillis(min), TimeValue.timeValueMillis(max), + TimeValue.timeValueMillis(frequency)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusAction.java new file mode 100644 index 0000000000000..5011ddf5782d0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class GetBasicStatusAction extends Action { + + public static final GetBasicStatusAction INSTANCE = new GetBasicStatusAction(); + public static final String NAME = "cluster:admin/xpack/license/basic_status"; + + private GetBasicStatusAction() { + super(NAME); + } + + @Override + public GetBasicStatusRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new GetBasicStatusRequestBuilder(client, this); + } + + @Override + public GetBasicStatusResponse newResponse() { + return new GetBasicStatusResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequest.java new file mode 100644 index 0000000000000..286c0d185a5ba --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequest.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +public class GetBasicStatusRequest extends MasterNodeReadRequest { + + public GetBasicStatusRequest() { + } + + public GetBasicStatusRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequestBuilder.java new file mode 100644 index 0000000000000..ba973a95f3d6f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequestBuilder.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +class GetBasicStatusRequestBuilder extends ActionRequestBuilder { + + GetBasicStatusRequestBuilder(ElasticsearchClient client, GetBasicStatusAction action) { + super(client, action, new GetBasicStatusRequest()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusResponse.java new file mode 100644 index 0000000000000..2f1cb7be37827 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusResponse.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +class GetBasicStatusResponse extends ActionResponse { + + private boolean eligibleToStartBasic; + + GetBasicStatusResponse() { + } + + GetBasicStatusResponse(boolean eligibleToStartBasic) { + this.eligibleToStartBasic = eligibleToStartBasic; + } + + boolean isEligibleToStartBasic() { + return eligibleToStartBasic; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + eligibleToStartBasic = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(eligibleToStartBasic); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseAction.java new file mode 100644 index 0000000000000..472634107969a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class GetLicenseAction extends Action { + + public static final GetLicenseAction INSTANCE = new GetLicenseAction(); + public static final String NAME = "cluster:monitor/xpack/license/get"; + + private GetLicenseAction() { + super(NAME); + } + + @Override + public GetLicenseResponse newResponse() { + return new GetLicenseResponse(); + } + + @Override + public GetLicenseRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new GetLicenseRequestBuilder(client, this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequest.java new file mode 100644 index 0000000000000..914e18772af03 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequest.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + + +public class GetLicenseRequest extends MasterNodeReadRequest { + + public GetLicenseRequest() { + } + + public GetLicenseRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequestBuilder.java new file mode 100644 index 0000000000000..7e92a54bce2d6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseRequestBuilder.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class GetLicenseRequestBuilder extends MasterNodeReadOperationRequestBuilder { + + public GetLicenseRequestBuilder(ElasticsearchClient client) { + this(client, GetLicenseAction.INSTANCE); + } + + /** + * Creates new get licenses request builder + * + * @param client elasticsearch client + */ + public GetLicenseRequestBuilder(ElasticsearchClient client, GetLicenseAction action) { + super(client, action, new GetLicenseRequest()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseResponse.java new file mode 100644 index 0000000000000..53339269560f3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetLicenseResponse.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class GetLicenseResponse extends ActionResponse { + + private License license; + + GetLicenseResponse() { + } + + GetLicenseResponse(License license) { + this.license = license; + } + + public License license() { + return license; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + if (in.readBoolean()) { + license = License.readLicense(in); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (license == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + license.writeTo(out); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusAction.java new file mode 100644 index 0000000000000..2f690a35fc58e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class GetTrialStatusAction extends Action { + + public static final GetTrialStatusAction INSTANCE = new GetTrialStatusAction(); + public static final String NAME = "cluster:admin/xpack/license/trial_status"; + + private GetTrialStatusAction() { + super(NAME); + } + + @Override + public GetTrialStatusRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new GetTrialStatusRequestBuilder(client, this); + } + + @Override + public GetTrialStatusResponse newResponse() { + return new GetTrialStatusResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequest.java new file mode 100644 index 0000000000000..a94ae697d7880 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequest.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +public class GetTrialStatusRequest extends MasterNodeReadRequest { + + public GetTrialStatusRequest() { + } + + public GetTrialStatusRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequestBuilder.java new file mode 100644 index 0000000000000..c9786418b639d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequestBuilder.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +class GetTrialStatusRequestBuilder extends ActionRequestBuilder { + + GetTrialStatusRequestBuilder(ElasticsearchClient client, GetTrialStatusAction action) { + super(client, action, new GetTrialStatusRequest()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusResponse.java new file mode 100644 index 0000000000000..6712e68efae84 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusResponse.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +class GetTrialStatusResponse extends ActionResponse { + + private boolean eligibleToStartTrial; + + GetTrialStatusResponse() { + } + + GetTrialStatusResponse(boolean eligibleToStartTrial) { + this.eligibleToStartTrial = eligibleToStartTrial; + } + + boolean isEligibleToStartTrial() { + return eligibleToStartTrial; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + eligibleToStartTrial = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(eligibleToStartTrial); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java new file mode 100644 index 0000000000000..df94a9132a059 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java @@ -0,0 +1,822 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Comparator; +import java.util.List; +import java.util.Locale; + +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; + +/** + * Data structure for license. Use {@link Builder} to build a license. + * Provides serialization/deserialization & validation methods for license object + */ +public class License implements ToXContentObject { + public static final int VERSION_START = 1; + public static final int VERSION_NO_FEATURE_TYPE = 2; + public static final int VERSION_START_DATE = 3; + public static final int VERSION_CURRENT = VERSION_START_DATE; + + /** + * XContent param name to deserialize license(s) with + * an additional status field, indicating whether a + * particular license is 'active' or 'expired' and no signature + * and in a human readable format + */ + public static final String REST_VIEW_MODE = "rest_view"; + /** + * XContent param name to deserialize license(s) with + * no signature + */ + public static final String LICENSE_SPEC_VIEW_MODE = "license_spec_view"; + /** + * XContent param name to deserialize licenses according + * to a specific license version + */ + public static final String LICENSE_VERSION_MODE = "license_version"; + + public static final Comparator LATEST_ISSUE_DATE_FIRST = Comparator.comparing(License::issueDate).reversed(); + + private final int version; + private final String uid; + private final String issuer; + private final String issuedTo; + private final long issueDate; + private final String type; + private final String subscriptionType; + private final String feature; + private final String signature; + private final long expiryDate; + private final long startDate; + private final int maxNodes; + private final OperationMode operationMode; + + /** + * Decouples operation mode of a license from the license type value. + *

+ * Note: The mode indicates features that should be made available, but it does not indicate whether the license is active! + * + * The id byte is used for ordering operation modes + */ + public enum OperationMode { + MISSING((byte) 0), + TRIAL((byte) 1), + BASIC((byte) 2), + STANDARD((byte) 3), + GOLD((byte) 4), + PLATINUM((byte) 5); + + private final byte id; + + OperationMode(byte id) { + this.id = id; + } + + /** Returns non-zero positive number when opMode1 is greater than opMode2 */ + public static int compare(OperationMode opMode1, OperationMode opMode2) { + return Integer.compare(opMode1.id, opMode2.id); + } + + public static OperationMode resolve(String type) { + switch (type.toLowerCase(Locale.ROOT)) { + case "missing": + return MISSING; + case "trial": + case "none": // bwc for 1.x subscription_type field + case "dev": // bwc for 1.x subscription_type field + case "development": // bwc for 1.x subscription_type field + return TRIAL; + case "basic": + return BASIC; + case "standard": + return STANDARD; + case "silver": + case "gold": + return GOLD; + case "platinum": + case "cloud_internal": + case "internal": // bwc for 1.x subscription_type field + return PLATINUM; + default: + throw new IllegalArgumentException("unknown type [" + type + "]"); + } + } + } + + private License(int version, String uid, String issuer, String issuedTo, long issueDate, String type, + String subscriptionType, String feature, String signature, long expiryDate, int maxNodes, long startDate) { + this.version = version; + this.uid = uid; + this.issuer = issuer; + this.issuedTo = issuedTo; + this.issueDate = issueDate; + this.type = type; + this.subscriptionType = subscriptionType; + this.feature = feature; + this.signature = signature; + // We will validate that only a basic license can have the BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS + // in the validate() method. + if (expiryDate == -1) { + this.expiryDate = LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS; + } else { + this.expiryDate = expiryDate; + } + this.maxNodes = maxNodes; + this.startDate = startDate; + if (version == VERSION_START) { + // in 1.x: the acceptable values for 'subscription_type': none | dev | silver | gold | platinum + this.operationMode = OperationMode.resolve(subscriptionType); + } else { + // in 2.x: the acceptable values for 'type': trial | basic | silver | dev | gold | platinum + this.operationMode = OperationMode.resolve(type); + } + validate(); + } + + /** + * @return version of the license + */ + public int version() { + return version; + } + + /** + * @return a unique identifier for a license + */ + public String uid() { + return uid; + } + + /** + * @return type of the license [trial, subscription, internal] + */ + public String type() { + return type; + } + + /** + * @return the issueDate in milliseconds + */ + public long issueDate() { + return issueDate; + } + + /** + * @return the startDate in milliseconds + */ + public long startDate() { + return startDate; + } + + /** + * @return the expiry date in milliseconds + */ + public long expiryDate() { + return expiryDate; + } + + /** + * @return the maximum number of nodes this license has been issued for + */ + public int maxNodes() { + return maxNodes; + } + + /** + * @return a string representing the entity this licenses has been issued to + */ + public String issuedTo() { + return issuedTo; + } + + /** + * @return a string representing the entity responsible for issuing this license (internal) + */ + public String issuer() { + return issuer; + } + + /** + * @return a string representing the signature of the license used for license verification + */ + public String signature() { + return signature; + } + + /** + * @return the operation mode of the license as computed from the license type or from + * the license mode file + */ + public OperationMode operationMode() { + synchronized (this) { + if (canReadOperationModeFromFile() && operationModeFileWatcher != null) { + return operationModeFileWatcher.getCurrentOperationMode(); + } + } + return operationMode; + } + + private boolean canReadOperationModeFromFile() { + return type.equals("cloud_internal"); + } + + private volatile OperationModeFileWatcher operationModeFileWatcher; + + /** + * Sets the operation mode file watcher for the license and initializes the + * file watcher when the license type allows to override operation mode from file + */ + public synchronized void setOperationModeFileWatcher(final OperationModeFileWatcher operationModeFileWatcher) { + this.operationModeFileWatcher = operationModeFileWatcher; + if (canReadOperationModeFromFile()) { + this.operationModeFileWatcher.init(); + } + } + + /** + * Removes operation mode file watcher, so unused license objects can be gc'ed + */ + public synchronized void removeOperationModeFileWatcher() { + this.operationModeFileWatcher = null; + } + + /** + * @return the current license's status + */ + public Status status() { + long now = System.currentTimeMillis(); + if (issueDate > now) { + return Status.INVALID; + } else if (expiryDate < now) { + return Status.EXPIRED; + } + return Status.ACTIVE; + } + + private void validate() { + if (issuer == null) { + throw new IllegalStateException("issuer can not be null"); + } else if (issuedTo == null) { + throw new IllegalStateException("issuedTo can not be null"); + } else if (issueDate == -1) { + throw new IllegalStateException("issueDate has to be set"); + } else if (type == null) { + throw new IllegalStateException("type can not be null"); + } else if (subscriptionType == null && version == VERSION_START) { + throw new IllegalStateException("subscriptionType can not be null"); + } else if (uid == null) { + throw new IllegalStateException("uid can not be null"); + } else if (feature == null && version == VERSION_START) { + throw new IllegalStateException("feature can not be null"); + } else if (maxNodes == -1) { + throw new IllegalStateException("maxNodes has to be set"); + } else if (expiryDate == -1) { + throw new IllegalStateException("expiryDate has to be set"); + } else if (expiryDate == LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS && "basic".equals(type) == false) { + throw new IllegalStateException("only basic licenses are allowed to have no expiration"); + } + } + + public static License readLicense(StreamInput in) throws IOException { + int version = in.readVInt(); // Version for future extensibility + if (version > VERSION_CURRENT) { + throw new ElasticsearchException("Unknown license version found, please upgrade all nodes to the latest elasticsearch-license" + + " plugin"); + } + Builder builder = builder(); + builder.version(version); + builder.uid(in.readString()); + builder.type(in.readString()); + if (version == VERSION_START) { + builder.subscriptionType(in.readString()); + } + builder.issueDate(in.readLong()); + if (version == VERSION_START) { + builder.feature(in.readString()); + } + builder.expiryDate(in.readLong()); + builder.maxNodes(in.readInt()); + builder.issuedTo(in.readString()); + builder.issuer(in.readString()); + builder.signature(in.readOptionalString()); + if (version >= VERSION_START_DATE) { + builder.startDate(in.readLong()); + } + return builder.build(); + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(version); + out.writeString(uid); + out.writeString(type); + if (version == VERSION_START) { + out.writeString(subscriptionType); + } + out.writeLong(issueDate); + if (version == VERSION_START) { + out.writeString(feature); + } + out.writeLong(expiryDate); + out.writeInt(maxNodes); + out.writeString(issuedTo); + out.writeString(issuer); + out.writeOptionalString(signature); + if (version >= VERSION_START_DATE) { + out.writeLong(startDate); + } + } + + @Override + public String toString() { + try { + final XContentBuilder builder = XContentFactory.jsonBuilder(); + toXContent(builder, ToXContent.EMPTY_PARAMS); + return Strings.toString(builder); + } catch (IOException e) { + return ""; + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + toInnerXContent(builder, params); + builder.endObject(); + return builder; + } + + public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) throws IOException { + boolean licenseSpecMode = params.paramAsBoolean(LICENSE_SPEC_VIEW_MODE, false); + boolean restViewMode = params.paramAsBoolean(REST_VIEW_MODE, false); + boolean previouslyHumanReadable = builder.humanReadable(); + if (licenseSpecMode && restViewMode) { + throw new IllegalArgumentException("can have either " + REST_VIEW_MODE + " or " + LICENSE_SPEC_VIEW_MODE); + } else if (restViewMode) { + if (!previouslyHumanReadable) { + builder.humanReadable(true); + } + } + final int version; + if (params.param(LICENSE_VERSION_MODE) != null && restViewMode) { + version = Integer.parseInt(params.param(LICENSE_VERSION_MODE)); + } else { + version = this.version; + } + if (restViewMode) { + builder.field(Fields.STATUS, status().label()); + } + builder.field(Fields.UID, uid); + builder.field(Fields.TYPE, type); + if (version == VERSION_START) { + builder.field(Fields.SUBSCRIPTION_TYPE, subscriptionType); + } + builder.timeField(Fields.ISSUE_DATE_IN_MILLIS, Fields.ISSUE_DATE, issueDate); + if (version == VERSION_START) { + builder.field(Fields.FEATURE, feature); + } + + if (expiryDate != LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS) { + builder.timeField(Fields.EXPIRY_DATE_IN_MILLIS, Fields.EXPIRY_DATE, expiryDate); + } + builder.field(Fields.MAX_NODES, maxNodes); + builder.field(Fields.ISSUED_TO, issuedTo); + builder.field(Fields.ISSUER, issuer); + if (!licenseSpecMode && !restViewMode && signature != null) { + builder.field(Fields.SIGNATURE, signature); + } + if (restViewMode) { + builder.humanReadable(previouslyHumanReadable); + } + if (version >= VERSION_START_DATE) { + builder.timeField(Fields.START_DATE_IN_MILLIS, Fields.START_DATE, startDate); + } + return builder; + } + + public static License fromXContent(XContentParser parser) throws IOException { + Builder builder = new Builder(); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + token = parser.nextToken(); + if (token.isValue()) { + if (Fields.UID.equals(currentFieldName)) { + builder.uid(parser.text()); + } else if (Fields.TYPE.equals(currentFieldName)) { + builder.type(parser.text()); + } else if (Fields.SUBSCRIPTION_TYPE.equals(currentFieldName)) { + builder.subscriptionType(parser.text()); + } else if (Fields.ISSUE_DATE.equals(currentFieldName)) { + builder.issueDate(parseDate(parser, "issue", false)); + } else if (Fields.ISSUE_DATE_IN_MILLIS.equals(currentFieldName)) { + builder.issueDate(parser.longValue()); + } else if (Fields.FEATURE.equals(currentFieldName)) { + builder.feature(parser.text()); + } else if (Fields.EXPIRY_DATE.equals(currentFieldName)) { + builder.expiryDate(parseDate(parser, "expiration", true)); + } else if (Fields.EXPIRY_DATE_IN_MILLIS.equals(currentFieldName)) { + builder.expiryDate(parser.longValue()); + } else if (Fields.START_DATE.equals(currentFieldName)) { + builder.startDate(parseDate(parser, "start", false)); + } else if (Fields.START_DATE_IN_MILLIS.equals(currentFieldName)) { + builder.startDate(parser.longValue()); + } else if (Fields.MAX_NODES.equals(currentFieldName)) { + builder.maxNodes(parser.intValue()); + } else if (Fields.ISSUED_TO.equals(currentFieldName)) { + builder.issuedTo(parser.text()); + } else if (Fields.ISSUER.equals(currentFieldName)) { + builder.issuer(parser.text()); + } else if (Fields.SIGNATURE.equals(currentFieldName)) { + builder.signature(parser.text()); + } else if (Fields.VERSION.equals(currentFieldName)) { + builder.version(parser.intValue()); + } + // Ignore unknown elements - might be new version of license + } else if (token == XContentParser.Token.START_ARRAY) { + // It was probably created by newer version - ignoring + parser.skipChildren(); + } else if (token == XContentParser.Token.START_OBJECT) { + // It was probably created by newer version - ignoring + parser.skipChildren(); + } + } + } + // not a license spec + if (builder.signature != null) { + byte[] signatureBytes = Base64.getDecoder().decode(builder.signature); + ByteBuffer byteBuffer = ByteBuffer.wrap(signatureBytes); + int version = byteBuffer.getInt(); + // we take the absolute version, because negative versions + // mean that the license was generated by the cluster (see TrialLicense) + // and positive version means that the license was signed + if (version < 0) { + version *= -1; + } + if (version == 0) { + throw new ElasticsearchException("malformed signature for license [" + builder.uid + "]"); + } else if (version > VERSION_CURRENT) { + throw new ElasticsearchException("Unknown license version found, please upgrade all nodes to the latest " + + "elasticsearch-license plugin"); + } + // signature version is the source of truth + builder.version(version); + } + return builder.build(); + } + + /** + * Returns true if the license was auto-generated (by license plugin), + * false otherwise + */ + public static boolean isAutoGeneratedLicense(String signature) { + try { + byte[] signatureBytes = Base64.getDecoder().decode(signature); + ByteBuffer byteBuffer = ByteBuffer.wrap(signatureBytes); + return byteBuffer.getInt() < 0; + } catch (IllegalArgumentException e) { + throw new IllegalStateException(e); + } + } + + public static License fromSource(BytesReference bytes, XContentType xContentType) throws IOException { + if (bytes == null || bytes.length() == 0) { + throw new ElasticsearchParseException("failed to parse license - no content provided"); + } + if (xContentType == null) { + throw new ElasticsearchParseException("failed to parse license - no content-type provided"); + } + // EMPTY is safe here because we don't call namedObject + try (InputStream byteStream = bytes.streamInput(); + XContentParser parser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, byteStream)) + { + License license = null; + if (parser.nextToken() == XContentParser.Token.START_OBJECT) { + if (parser.nextToken() == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if (Fields.LICENSES.equals(currentFieldName)) { + final List pre20Licenses = new ArrayList<>(); + if (parser.nextToken() == XContentParser.Token.START_ARRAY) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + pre20Licenses.add(License.fromXContent(parser)); + } + // take the latest issued unexpired license + CollectionUtil.timSort(pre20Licenses, LATEST_ISSUE_DATE_FIRST); + long now = System.currentTimeMillis(); + for (License oldLicense : pre20Licenses) { + if (oldLicense.expiryDate() > now) { + license = oldLicense; + break; + } + } + if (license == null && !pre20Licenses.isEmpty()) { + license = pre20Licenses.get(0); + } + } else { + throw new ElasticsearchParseException("failed to parse licenses expected an array of licenses"); + } + } else if (Fields.LICENSE.equals(currentFieldName)) { + license = License.fromXContent(parser); + } + // Ignore all other fields - might be created with new version + } else { + throw new ElasticsearchParseException("failed to parse licenses expected field"); + } + } else { + throw new ElasticsearchParseException("failed to parse licenses expected start object"); + } + return license; + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + License license = (License) o; + + if (issueDate != license.issueDate) return false; + if (expiryDate != license.expiryDate) return false; + if (startDate!= license.startDate) return false; + if (maxNodes != license.maxNodes) return false; + if (version != license.version) return false; + if (uid != null ? !uid.equals(license.uid) : license.uid != null) return false; + if (issuer != null ? !issuer.equals(license.issuer) : license.issuer != null) return false; + if (issuedTo != null ? !issuedTo.equals(license.issuedTo) : license.issuedTo != null) return false; + if (type != null ? !type.equals(license.type) : license.type != null) return false; + if (subscriptionType != null ? !subscriptionType.equals(license.subscriptionType) : license.subscriptionType != null) + return false; + if (feature != null ? !feature.equals(license.feature) : license.feature != null) return false; + return !(signature != null ? !signature.equals(license.signature) : license.signature != null); + + } + + @Override + public int hashCode() { + int result = uid != null ? uid.hashCode() : 0; + result = 31 * result + (issuer != null ? issuer.hashCode() : 0); + result = 31 * result + (issuedTo != null ? issuedTo.hashCode() : 0); + result = 31 * result + (int) (issueDate ^ (issueDate >>> 32)); + result = 31 * result + (type != null ? type.hashCode() : 0); + result = 31 * result + (subscriptionType != null ? subscriptionType.hashCode() : 0); + result = 31 * result + (feature != null ? feature.hashCode() : 0); + result = 31 * result + (signature != null ? signature.hashCode() : 0); + result = 31 * result + (int) (expiryDate ^ (expiryDate >>> 32)); + result = 31 * result + (int) (startDate ^ (startDate>>> 32)); + result = 31 * result + maxNodes; + result = 31 * result + version; + return result; + } + + public static final class Fields { + public static final String STATUS = "status"; + public static final String UID = "uid"; + public static final String TYPE = "type"; + public static final String SUBSCRIPTION_TYPE = "subscription_type"; + public static final String ISSUE_DATE_IN_MILLIS = "issue_date_in_millis"; + public static final String ISSUE_DATE = "issue_date"; + public static final String FEATURE = "feature"; + public static final String EXPIRY_DATE_IN_MILLIS = "expiry_date_in_millis"; + public static final String EXPIRY_DATE = "expiry_date"; + public static final String START_DATE_IN_MILLIS = "start_date_in_millis"; + public static final String START_DATE = "start_date"; + public static final String MAX_NODES = "max_nodes"; + public static final String ISSUED_TO = "issued_to"; + public static final String ISSUER = "issuer"; + public static final String VERSION = "version"; + public static final String SIGNATURE = "signature"; + + public static final String LICENSES = "licenses"; + public static final String LICENSE = "license"; + + } + + private static long parseDate(XContentParser parser, String description, boolean endOfTheDay) throws IOException { + if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return parser.longValue(); + } else { + try { + if (endOfTheDay) { + return DateUtils.endOfTheDay(parser.text()); + } else { + return DateUtils.beginningOfTheDay(parser.text()); + } + } catch (IllegalArgumentException ex) { + throw new ElasticsearchParseException("invalid " + description + " date format " + parser.text()); + } + } + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private int version = License.VERSION_CURRENT; + private String uid; + private String issuer; + private String issuedTo; + private long issueDate = -1; + private String type; + private String subscriptionType; + private String feature; + private String signature; + private long expiryDate = -1; + private long startDate = -1; + private int maxNodes = -1; + + public Builder uid(String uid) { + this.uid = uid; + return this; + } + + public Builder version(int version) { + this.version = version; + return this; + } + + public Builder issuer(String issuer) { + this.issuer = issuer; + return this; + } + + public Builder issuedTo(String issuedTo) { + this.issuedTo = issuedTo; + return this; + } + + public Builder issueDate(long issueDate) { + this.issueDate = issueDate; + return this; + } + + public Builder type(String type) { + this.type = type; + return this; + } + + public Builder subscriptionType(String subscriptionType) { + this.subscriptionType = subscriptionType; + return this; + } + + public Builder feature(String feature) { + this.feature = feature; + return this; + } + + public Builder expiryDate(long expiryDate) { + this.expiryDate = expiryDate; + return this; + } + + public Builder maxNodes(int maxNodes) { + this.maxNodes = maxNodes; + return this; + } + + public Builder signature(String signature) { + if (signature != null) { + this.signature = signature; + } + return this; + } + + public Builder startDate(long startDate) { + this.startDate = startDate; + return this; + } + + public Builder fromLicenseSpec(License license, String signature) { + return uid(license.uid()) + .version(license.version()) + .issuedTo(license.issuedTo()) + .issueDate(license.issueDate()) + .startDate(license.startDate()) + .type(license.type()) + .subscriptionType(license.subscriptionType) + .feature(license.feature) + .maxNodes(license.maxNodes()) + .expiryDate(license.expiryDate()) + .issuer(license.issuer()) + .signature(signature); + } + + /** + * Returns a builder that converts pre 2.0 licenses + * to the new license format + */ + public Builder fromPre20LicenseSpec(License pre20License) { + return uid(pre20License.uid()) + .issuedTo(pre20License.issuedTo()) + .issueDate(pre20License.issueDate()) + .maxNodes(pre20License.maxNodes()) + .expiryDate(pre20License.expiryDate()); + } + + public License build() { + return new License(version, uid, issuer, issuedTo, issueDate, type, + subscriptionType, feature, signature, expiryDate, maxNodes, startDate); + } + + public Builder validate() { + if (issuer == null) { + throw new IllegalStateException("issuer can not be null"); + } else if (issuedTo == null) { + throw new IllegalStateException("issuedTo can not be null"); + } else if (issueDate == -1) { + throw new IllegalStateException("issueDate has to be set"); + } else if (type == null) { + throw new IllegalStateException("type can not be null"); + } else if (uid == null) { + throw new IllegalStateException("uid can not be null"); + } else if (signature == null) { + throw new IllegalStateException("signature can not be null"); + } else if (maxNodes == -1) { + throw new IllegalStateException("maxNodes has to be set"); + } else if (expiryDate == -1) { + throw new IllegalStateException("expiryDate has to be set"); + } + return this; + } + } + + public enum Status { + + ACTIVE("active"), + INVALID("invalid"), + EXPIRED("expired"); + + private final String label; + + Status(String label) { + this.label = label; + } + + public String label() { + return label; + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeString(label); + } + + public static Status readFrom(StreamInput in) throws IOException { + String value = in.readString(); + switch (value) { + case "active": + return ACTIVE; + case "invalid": + return INVALID; + case "expired": + return EXPIRED; + default: + throw new IllegalArgumentException("unknown license status [" + value + "]"); + } + } + } + + /** + * Returns true iff the license is a production licnese + */ + public boolean isProductionLicense() { + switch (operationMode()) { + case MISSING: + case TRIAL: + case BASIC: + return false; + case STANDARD: + case GOLD: + case PLATINUM: + return true; + default: + throw new AssertionError("unknown operation mode: " + operationMode()); + + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java new file mode 100644 index 0000000000000..14e72142715b1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -0,0 +1,525 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.joda.FormatDateTimeFormatter; +import org.elasticsearch.common.joda.Joda; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.env.Environment; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Service responsible for managing {@link LicensesMetaData}. + *

+ * On the master node, the service handles updating the cluster state when a new license is registered. + * It also listens on all nodes for cluster state updates, and updates {@link XPackLicenseState} when + * the license changes are detected in the cluster state. + */ +public class LicenseService extends AbstractLifecycleComponent implements ClusterStateListener, SchedulerEngine.Listener { + + public static final Setting SELF_GENERATED_LICENSE_TYPE = new Setting<>("xpack.license.self_generated.type", + (s) -> "basic", (s) -> { + if (SelfGeneratedLicense.validSelfGeneratedType(s)) { + return s; + } else { + throw new IllegalArgumentException("Illegal self generated license type [" + s + "]. Must be trial or basic."); + } + }, Setting.Property.NodeScope); + + // pkg private for tests + static final TimeValue NON_BASIC_SELF_GENERATED_LICENSE_DURATION = TimeValue.timeValueHours(30 * 24); + + static final Set VALID_TRIAL_TYPES = new HashSet<>(Arrays.asList("trial", "platinum", "gold")); + + /** + * Duration of grace period after a license has expired + */ + static final TimeValue GRACE_PERIOD_DURATION = days(7); + + public static final long BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS = Long.MAX_VALUE - days(365).millis(); + + private final ClusterService clusterService; + + /** + * The xpack feature state to update when license changes are made. + */ + private final XPackLicenseState licenseState; + + /** + * Currently active license + */ + private final AtomicReference currentLicense = new AtomicReference<>(); + private SchedulerEngine scheduler; + private final Clock clock; + + /** + * File watcher for operation mode changes + */ + private final OperationModeFileWatcher operationModeFileWatcher; + + /** + * Callbacks to notify relative to license expiry + */ + private List expirationCallbacks = new ArrayList<>(); + + /** + * Max number of nodes licensed by generated trial license + */ + static final int SELF_GENERATED_LICENSE_MAX_NODES = 1000; + + public static final String LICENSE_JOB = "licenseJob"; + + private static final FormatDateTimeFormatter DATE_FORMATTER = Joda.forPattern("EEEE, MMMMM dd, yyyy", Locale.ROOT); + + private static final String ACKNOWLEDGEMENT_HEADER = "This license update requires acknowledgement. To acknowledge the license, " + + "please read the following messages and update the license again, this time with the \"acknowledge=true\" parameter:"; + + public LicenseService(Settings settings, ClusterService clusterService, Clock clock, Environment env, + ResourceWatcherService resourceWatcherService, XPackLicenseState licenseState) { + super(settings); + this.clusterService = clusterService; + this.clock = clock; + this.scheduler = new SchedulerEngine(clock); + this.licenseState = licenseState; + this.operationModeFileWatcher = new OperationModeFileWatcher(resourceWatcherService, + XPackPlugin.resolveConfigFile(env, "license_mode"), logger, () -> updateLicenseState(getLicense())); + this.scheduler.register(this); + populateExpirationCallbacks(); + } + + private void logExpirationWarning(long expirationMillis, boolean expired) { + String expiredMsg = expired ? "expired" : "will expire"; + String general = LoggerMessageFormat.format(null, "License [{}] on [{}].\n" + + "# If you have a new license, please update it. Otherwise, please reach out to\n" + + "# your support contact.\n" + + "# ", expiredMsg, DATE_FORMATTER.printer().print(expirationMillis)); + if (expired) { + general = general.toUpperCase(Locale.ROOT); + } + StringBuilder builder = new StringBuilder(general); + builder.append(System.lineSeparator()); + if (expired) { + builder.append("# COMMERCIAL PLUGINS OPERATING WITH REDUCED FUNCTIONALITY"); + } else { + builder.append("# Commercial plugins operate with reduced functionality on license expiration:"); + } + XPackLicenseState.EXPIRATION_MESSAGES.forEach((feature, messages) -> { + if (messages.length > 0) { + builder.append(System.lineSeparator()); + builder.append("# - "); + builder.append(feature); + for (String message : messages) { + builder.append(System.lineSeparator()); + builder.append("# - "); + builder.append(message); + } + } + }); + logger.warn("{}", builder); + } + + private void populateExpirationCallbacks() { + expirationCallbacks.add(new ExpirationCallback.Pre(days(7), days(25), days(1)) { + @Override + public void on(License license) { + logExpirationWarning(license.expiryDate(), false); + } + }); + expirationCallbacks.add(new ExpirationCallback.Pre(days(0), days(7), TimeValue.timeValueMinutes(10)) { + @Override + public void on(License license) { + logExpirationWarning(license.expiryDate(), false); + } + }); + expirationCallbacks.add(new ExpirationCallback.Post(days(0), null, TimeValue.timeValueMinutes(10)) { + @Override + public void on(License license) { + // logged when grace period begins + logExpirationWarning(license.expiryDate(), true); + } + }); + } + + /** + * Registers new license in the cluster + * Master only operation. Installs a new license on the master provided it is VALID + */ + public void registerLicense(final PutLicenseRequest request, final ActionListener listener) { + final License newLicense = request.license(); + final long now = clock.millis(); + if (!LicenseVerifier.verifyLicense(newLicense) || newLicense.issueDate() > now || newLicense.startDate() > now) { + listener.onResponse(new PutLicenseResponse(true, LicensesStatus.INVALID)); + } else if (newLicense.type().equals("basic")) { + listener.onFailure(new IllegalArgumentException("Registering basic licenses is not allowed.")); + } else if (newLicense.expiryDate() < now) { + listener.onResponse(new PutLicenseResponse(true, LicensesStatus.EXPIRED)); + } else { + if (!request.acknowledged()) { + // TODO: ack messages should be generated on the master, since another node's cluster state may be behind... + final License currentLicense = getLicense(); + if (currentLicense != null) { + Map acknowledgeMessages = getAckMessages(newLicense, currentLicense); + if (acknowledgeMessages.isEmpty() == false) { + // needs acknowledgement + listener.onResponse(new PutLicenseResponse(false, LicensesStatus.VALID, ACKNOWLEDGEMENT_HEADER, + acknowledgeMessages)); + return; + } + } + } + + if (newLicense.isProductionLicense() + && XPackSettings.SECURITY_ENABLED.get(settings) + && XPackSettings.TRANSPORT_SSL_ENABLED.get(settings) == false + && isProductionMode(settings, clusterService.localNode())) { + // security is on but TLS is not configured we gonna fail the entire request and throw an exception + throw new IllegalStateException("Cannot install a [" + newLicense.operationMode() + + "] license unless TLS is configured or security is disabled"); + // TODO we should really validate that all nodes have xpack installed and are consistently configured but this + // should happen on a different level and not in this code + } else { + clusterService.submitStateUpdateTask("register license [" + newLicense.uid() + "]", new + AckedClusterStateUpdateTask(request, listener) { + @Override + protected PutLicenseResponse newResponse(boolean acknowledged) { + return new PutLicenseResponse(acknowledged, LicensesStatus.VALID); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + MetaData currentMetadata = currentState.metaData(); + LicensesMetaData licensesMetaData = currentMetadata.custom(LicensesMetaData.TYPE); + Version trialVersion = null; + if (licensesMetaData != null) { + trialVersion = licensesMetaData.getMostRecentTrialVersion(); + } + MetaData.Builder mdBuilder = MetaData.builder(currentMetadata); + mdBuilder.putCustom(LicensesMetaData.TYPE, new LicensesMetaData(newLicense, trialVersion)); + return ClusterState.builder(currentState).metaData(mdBuilder).build(); + } + }); + } + } + } + + public static Map getAckMessages(License newLicense, License currentLicense) { + Map acknowledgeMessages = new HashMap<>(); + if (!License.isAutoGeneratedLicense(currentLicense.signature()) // current license is not auto-generated + && currentLicense.issueDate() > newLicense.issueDate()) { // and has a later issue date + acknowledgeMessages.put("license", new String[]{ + "The new license is older than the currently installed license. " + + "Are you sure you want to override the current license?"}); + } + XPackLicenseState.ACKNOWLEDGMENT_MESSAGES.forEach((feature, ackMessages) -> { + String[] messages = ackMessages.apply(currentLicense.operationMode(), newLicense.operationMode()); + if (messages.length > 0) { + acknowledgeMessages.put(feature, messages); + } + }); + return acknowledgeMessages; + } + + + private static TimeValue days(int days) { + return TimeValue.timeValueHours(days * 24); + } + + @Override + public void triggered(SchedulerEngine.Event event) { + final LicensesMetaData licensesMetaData = clusterService.state().metaData().custom(LicensesMetaData.TYPE); + if (licensesMetaData != null) { + final License license = licensesMetaData.getLicense(); + if (event.getJobName().equals(LICENSE_JOB)) { + updateLicenseState(license); + } else if (event.getJobName().startsWith(ExpirationCallback.EXPIRATION_JOB_PREFIX)) { + expirationCallbacks.stream() + .filter(expirationCallback -> expirationCallback.getId().equals(event.getJobName())) + .forEach(expirationCallback -> expirationCallback.on(license)); + } + } + } + + /** + * Remove license from the cluster state metadata + */ + public void removeLicense(final DeleteLicenseRequest request, final ActionListener listener) { + clusterService.submitStateUpdateTask("delete license", + new AckedClusterStateUpdateTask(request, listener) { + @Override + protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { + return new ClusterStateUpdateResponse(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + MetaData metaData = currentState.metaData(); + final LicensesMetaData currentLicenses = metaData.custom(LicensesMetaData.TYPE); + if (currentLicenses.getLicense() != LicensesMetaData.LICENSE_TOMBSTONE) { + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + LicensesMetaData newMetadata = new LicensesMetaData(LicensesMetaData.LICENSE_TOMBSTONE, + currentLicenses.getMostRecentTrialVersion()); + mdBuilder.putCustom(LicensesMetaData.TYPE, newMetadata); + return ClusterState.builder(currentState).metaData(mdBuilder).build(); + } else { + return currentState; + } + } + }); + } + + public License getLicense() { + final License license = getLicense(clusterService.state().metaData()); + return license == LicensesMetaData.LICENSE_TOMBSTONE ? null : license; + } + + void startTrialLicense(PostStartTrialRequest request, final ActionListener listener) { + if (VALID_TRIAL_TYPES.contains(request.getType()) == false) { + throw new IllegalArgumentException("Cannot start trial of type [" + request.getType() + "]. Valid trial types are " + + VALID_TRIAL_TYPES + "."); + } + StartTrialClusterTask task = new StartTrialClusterTask(logger, clusterService.getClusterName().value(), clock, request, listener); + clusterService.submitStateUpdateTask("started trial license", task); + } + + void startBasicLicense(PostStartBasicRequest request, final ActionListener listener) { + StartBasicClusterTask task = new StartBasicClusterTask(logger, clusterService.getClusterName().value(), clock, request, listener); + clusterService.submitStateUpdateTask("start basic license", task); + } + + /** + * Master-only operation to generate a one-time global self generated license. + * The self generated license is only generated and stored if the current cluster state metadata + * has no existing license. If the cluster currently has a basic license that has an expiration date, + * a new basic license with no expiration date is generated. + */ + private void registerOrUpdateSelfGeneratedLicense() { + clusterService.submitStateUpdateTask("maybe generate license for cluster", + new StartupSelfGeneratedLicenseTask(settings, clock, clusterService)); + } + + @Override + protected void doStart() throws ElasticsearchException { + clusterService.addListener(this); + scheduler.start(Collections.emptyList()); + logger.debug("initializing license state"); + if (clusterService.lifecycleState() == Lifecycle.State.STARTED) { + final ClusterState clusterState = clusterService.state(); + if (clusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) == false && + clusterState.nodes().getMasterNode() != null) { + final LicensesMetaData currentMetaData = clusterState.metaData().custom(LicensesMetaData.TYPE); + boolean noLicense = currentMetaData == null || currentMetaData.getLicense() == null; + if (clusterState.getNodes().isLocalNodeElectedMaster() && + (noLicense || LicenseUtils.licenseNeedsExtended(currentMetaData.getLicense()))) { + // triggers a cluster changed event eventually notifying the current licensee + registerOrUpdateSelfGeneratedLicense(); + } + } + } + } + + @Override + protected void doStop() throws ElasticsearchException { + clusterService.removeListener(this); + scheduler.stop(); + // clear current license + currentLicense.set(null); + } + + @Override + protected void doClose() throws ElasticsearchException { + } + + /** + * When there is no global block on {@link org.elasticsearch.gateway.GatewayService#STATE_NOT_RECOVERED_BLOCK} + * notify licensees and issue auto-generated license if no license has been installed/issued yet. + */ + @Override + public void clusterChanged(ClusterChangedEvent event) { + final ClusterState previousClusterState = event.previousState(); + final ClusterState currentClusterState = event.state(); + if (!currentClusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + final LicensesMetaData prevLicensesMetaData = previousClusterState.getMetaData().custom(LicensesMetaData.TYPE); + final LicensesMetaData currentLicensesMetaData = currentClusterState.getMetaData().custom(LicensesMetaData.TYPE); + if (logger.isDebugEnabled()) { + logger.debug("previous [{}]", prevLicensesMetaData); + logger.debug("current [{}]", currentLicensesMetaData); + } + // notify all interested plugins + if (previousClusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) + || prevLicensesMetaData == null) { + if (currentLicensesMetaData != null) { + onUpdate(currentLicensesMetaData); + } + } else if (!prevLicensesMetaData.equals(currentLicensesMetaData)) { + onUpdate(currentLicensesMetaData); + } + + License currentLicense = null; + boolean noLicenseInPrevMetadata = prevLicensesMetaData == null || prevLicensesMetaData.getLicense() == null; + if (noLicenseInPrevMetadata == false) { + currentLicense = prevLicensesMetaData.getLicense(); + } + boolean noLicenseInCurrentMetadata = (currentLicensesMetaData == null || currentLicensesMetaData.getLicense() == null); + if (noLicenseInCurrentMetadata == false) { + currentLicense = currentLicensesMetaData.getLicense(); + } + + boolean noLicense = noLicenseInPrevMetadata && noLicenseInCurrentMetadata; + // auto-generate license if no licenses ever existed or if the current license is basic and + // needs extended. this will trigger a subsequent cluster changed event + if (currentClusterState.getNodes().isLocalNodeElectedMaster() + && (noLicense || LicenseUtils.licenseNeedsExtended(currentLicense))) { + registerOrUpdateSelfGeneratedLicense(); + } + } else if (logger.isDebugEnabled()) { + logger.debug("skipped license notifications reason: [{}]", GatewayService.STATE_NOT_RECOVERED_BLOCK); + } + } + + protected void updateLicenseState(final License license) { + if (license == LicensesMetaData.LICENSE_TOMBSTONE) { + // implies license has been explicitly deleted + licenseState.update(License.OperationMode.MISSING, false); + return; + } + if (license != null) { + long time = clock.millis(); + boolean active; + if (license.expiryDate() == BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS) { + active = true; + } else { + // We subtract the grace period from the current time to avoid overflowing on an expiration + // date that is near Long.MAX_VALUE + active = time >= license.issueDate() && time - GRACE_PERIOD_DURATION.getMillis() < license.expiryDate(); + } + licenseState.update(license.operationMode(), active); + + if (active) { + if (time < license.expiryDate()) { + logger.debug("license [{}] - valid", license.uid()); + } else { + logger.warn("license [{}] - grace", license.uid()); + } + } else { + logger.warn("license [{}] - expired", license.uid()); + } + } + } + + /** + * Notifies registered licensees of license state change and/or new active license + * based on the license in currentLicensesMetaData. + * Additionally schedules license expiry notifications and event callbacks + * relative to the current license's expiry + */ + private void onUpdate(final LicensesMetaData currentLicensesMetaData) { + final License license = getLicense(currentLicensesMetaData); + // license can be null if the trial license is yet to be auto-generated + // in this case, it is a no-op + if (license != null) { + final License previousLicense = currentLicense.get(); + if (license.equals(previousLicense) == false) { + currentLicense.set(license); + license.setOperationModeFileWatcher(operationModeFileWatcher); + scheduler.add(new SchedulerEngine.Job(LICENSE_JOB, nextLicenseCheck(license))); + for (ExpirationCallback expirationCallback : expirationCallbacks) { + scheduler.add(new SchedulerEngine.Job(expirationCallback.getId(), + (startTime, now) -> + expirationCallback.nextScheduledTimeForExpiry(license.expiryDate(), startTime, now))); + } + if (previousLicense != null) { + // remove operationModeFileWatcher to gc the old license object + previousLicense.removeOperationModeFileWatcher(); + } + logger.info("license [{}] mode [{}] - valid", license.uid(), + license.operationMode().name().toLowerCase(Locale.ROOT)); + } + updateLicenseState(license); + } + } + + // pkg private for tests + static SchedulerEngine.Schedule nextLicenseCheck(License license) { + return (startTime, time) -> { + if (time < license.issueDate()) { + // when we encounter a license with a future issue date + // which can happen with autogenerated license, + // we want to schedule a notification on the license issue date + // so the license is notificed once it is valid + // see https://github.com/elastic/x-plugins/issues/983 + return license.issueDate(); + } else if (time < license.expiryDate()) { + return license.expiryDate(); + } else if (time < license.expiryDate() + GRACE_PERIOD_DURATION.getMillis()) { + return license.expiryDate() + GRACE_PERIOD_DURATION.getMillis(); + } + return -1; // license is expired, no need to check again + }; + } + + public static License getLicense(final MetaData metaData) { + final LicensesMetaData licensesMetaData = metaData.custom(LicensesMetaData.TYPE); + return getLicense(licensesMetaData); + } + + static License getLicense(final LicensesMetaData metaData) { + if (metaData != null) { + License license = metaData.getLicense(); + if (license == LicensesMetaData.LICENSE_TOMBSTONE) { + return license; + } else if (license != null) { + boolean autoGeneratedLicense = License.isAutoGeneratedLicense(license.signature()); + if ((autoGeneratedLicense && SelfGeneratedLicense.verify(license)) + || (!autoGeneratedLicense && LicenseVerifier.verifyLicense(license))) { + return license; + } + } + } + return null; + } + + private static boolean isProductionMode(Settings settings, DiscoveryNode localNode) { + final boolean singleNodeDisco = "single-node".equals(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)); + return singleNodeDisco == false && isBoundToLoopback(localNode) == false; + } + + private static boolean isBoundToLoopback(DiscoveryNode localNode) { + return localNode.getAddress().address().getAddress().isLoopbackAddress(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java new file mode 100644 index 0000000000000..c5ab35f862ccb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.rest.RestStatus; + +public class LicenseUtils { + + public static final String EXPIRED_FEATURE_METADATA = "es.license.expired.feature"; + + /** + * Exception to be thrown when a feature action requires a valid license, but license + * has expired + * + * feature accessible through {@link #EXPIRED_FEATURE_METADATA} in the + * exception's rest header + */ + public static ElasticsearchSecurityException newComplianceException(String feature) { + ElasticsearchSecurityException e = new ElasticsearchSecurityException("current license is non-compliant for [{}]", + RestStatus.FORBIDDEN, feature); + e.addMetadata(EXPIRED_FEATURE_METADATA, feature); + return e; + } + + /** + * Checks if a given {@link ElasticsearchSecurityException} refers to a feature that + * requires a valid license, but the license has expired. + */ + public static boolean isLicenseExpiredException(ElasticsearchSecurityException exception) { + return (exception != null) && (exception.getMetadata(EXPIRED_FEATURE_METADATA) != null); + } + + public static boolean licenseNeedsExtended(License license) { + return "basic".equals(license.type()) && license.expiryDate() != LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java new file mode 100644 index 0000000000000..c670f070ad7c5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.Streams; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import java.security.Signature; +import java.security.SignatureException; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; + +/** + * Responsible for verifying signed licenses + */ +public class LicenseVerifier { + + /** + * verifies the license content with the signature using the packaged + * public key + * @param license to verify + * @return true if valid, false otherwise + */ + public static boolean verifyLicense(final License license, byte[] encryptedPublicKeyData) { + byte[] signedContent = null; + byte[] signatureHash = null; + try { + byte[] signatureBytes = Base64.getDecoder().decode(license.signature()); + ByteBuffer byteBuffer = ByteBuffer.wrap(signatureBytes); + int version = byteBuffer.getInt(); + int magicLen = byteBuffer.getInt(); + byte[] magic = new byte[magicLen]; + byteBuffer.get(magic); + int hashLen = byteBuffer.getInt(); + signatureHash = new byte[hashLen]; + byteBuffer.get(signatureHash); + int signedContentLen = byteBuffer.getInt(); + signedContent = new byte[signedContentLen]; + byteBuffer.get(signedContent); + XContentBuilder contentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + license.toXContent(contentBuilder, new ToXContent.MapParams(Collections.singletonMap(License.LICENSE_SPEC_VIEW_MODE, "true"))); + Signature rsa = Signature.getInstance("SHA512withRSA"); + rsa.initVerify(CryptUtils.readEncryptedPublicKey(encryptedPublicKeyData)); + BytesRefIterator iterator = BytesReference.bytes(contentBuilder).iterator(); + BytesRef ref; + while((ref = iterator.next()) != null) { + rsa.update(ref.bytes, ref.offset, ref.length); + } + return rsa.verify(signedContent) + && Arrays.equals(Base64.getEncoder().encode(encryptedPublicKeyData), signatureHash); + } catch (IOException | NoSuchAlgorithmException | SignatureException | InvalidKeyException e) { + throw new IllegalStateException(e); + } finally { + Arrays.fill(encryptedPublicKeyData, (byte) 0); + if (signedContent != null) { + Arrays.fill(signedContent, (byte) 0); + } + if (signatureHash != null) { + Arrays.fill(signatureHash, (byte) 0); + } + } + } + + public static boolean verifyLicense(final License license) { + final byte[] publicKeyBytes; + try (InputStream is = LicenseVerifier.class.getResourceAsStream("/public.key")) { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + Streams.copy(is, out); + publicKeyBytes = out.toByteArray(); + } catch (IOException ex) { + throw new IllegalStateException(ex); + } + return verifyLicense(license, publicKeyBytes); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java new file mode 100644 index 0000000000000..56475de123f3c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java @@ -0,0 +1,218 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractNamedDiffable; +import org.elasticsearch.cluster.MergableCustomMetaData; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.License.OperationMode; + +import java.io.IOException; +import java.util.EnumSet; + +/** + * Contains metadata about registered licenses + */ +public class LicensesMetaData extends AbstractNamedDiffable implements MetaData.Custom, + MergableCustomMetaData { + + public static final String TYPE = "licenses"; + + /** + * When license is explicitly removed by a user, LICENSE_TOMBSTONE + * is used as a placeholder in the license metadata. This enables + * us to distinguish between the scenario when a cluster never + * had a license (null) and when a license was removed explicitly + * (LICENSE_TOMBSTONE). + * We rely on this to decide whether to generate a unsigned trial + * license or not. we should only generate a license if no license + * ever existed in the cluster state + */ + public static final License LICENSE_TOMBSTONE = License.builder() + .type("trial") + .issuer("elasticsearch") + .uid("TOMBSTONE") + .issuedTo("") + .maxNodes(0) + .issueDate(0) + .expiryDate(0) + .build(); + + private License license; + + // This field describes the version of x-pack for which this cluster has exercised a trial. If the field + // is null, then no trial has been exercised. We keep the version to leave open the possibility that we + // may eventually allow a cluster to exercise a trial every time they upgrade to a new major version. + @Nullable + private Version trialVersion; + + LicensesMetaData(License license, Version trialVersion) { + this.license = license; + this.trialVersion = trialVersion; + } + + public License getLicense() { + return license; + } + + boolean isEligibleForTrial() { + if (trialVersion == null) { + return true; + } + return Version.CURRENT.major > trialVersion.major; + } + + Version getMostRecentTrialVersion() { + return trialVersion; + } + + @Override + public String toString() { + return "LicensesMetaData{" + + "license=" + license + + ", trialVersion=" + trialVersion + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + LicensesMetaData that = (LicensesMetaData) o; + + if (license != null ? !license.equals(that.license) : that.license != null) return false; + return trialVersion != null ? trialVersion.equals(that.trialVersion) : that.trialVersion == null; + } + + @Override + public int hashCode() { + int result = license != null ? license.hashCode() : 0; + result = 31 * result + (trialVersion != null ? trialVersion.hashCode() : 0); + return result; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public EnumSet context() { + return EnumSet.of(MetaData.XContentContext.GATEWAY); + } + + public static LicensesMetaData fromXContent(XContentParser parser) throws IOException { + License license = LICENSE_TOMBSTONE; + Version trialLicense = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String fieldName = parser.currentName(); + if (fieldName != null) { + if (fieldName.equals(Fields.LICENSE)) { + token = parser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + license = License.fromXContent(parser); + } else if (token == XContentParser.Token.VALUE_NULL) { + license = LICENSE_TOMBSTONE; + } + } else if (fieldName.equals(Fields.TRIAL_LICENSE)) { + parser.nextToken(); + trialLicense = Version.fromString(parser.text()); + } + } + } + } + return new LicensesMetaData(license, trialLicense); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (license == LICENSE_TOMBSTONE) { + builder.nullField(Fields.LICENSE); + } else { + builder.startObject(Fields.LICENSE); + license.toInnerXContent(builder, params); + builder.endObject(); + } + if (trialVersion != null) { + builder.field(Fields.TRIAL_LICENSE, trialVersion.toString()); + } + return builder; + } + + @Override + public void writeTo(StreamOutput streamOutput) throws IOException { + if (license == LICENSE_TOMBSTONE) { + streamOutput.writeBoolean(false); // no license + } else { + streamOutput.writeBoolean(true); // has a license + license.writeTo(streamOutput); + } + if (streamOutput.getVersion().onOrAfter(Version.V_6_1_0)) { + if (trialVersion == null) { + streamOutput.writeBoolean(false); + } else { + streamOutput.writeBoolean(true); + Version.writeVersion(trialVersion, streamOutput); + } + } + } + + public LicensesMetaData(StreamInput streamInput) throws IOException { + if (streamInput.readBoolean()) { + license = License.readLicense(streamInput); + } else { + license = LICENSE_TOMBSTONE; + } + if (streamInput.getVersion().onOrAfter(Version.V_6_1_0)) { + boolean hasExercisedTrial = streamInput.readBoolean(); + if (hasExercisedTrial) { + this.trialVersion = Version.readVersion(streamInput); + } + } + } + + public static NamedDiff readDiffFrom(StreamInput streamInput) throws IOException { + return readDiffFrom(MetaData.Custom.class, TYPE, streamInput); + } + + public static License extractLicense(LicensesMetaData licensesMetaData) { + if (licensesMetaData != null) { + License license = licensesMetaData.getLicense(); + if (license == LicensesMetaData.LICENSE_TOMBSTONE) { + return null; + } else { + return license; + } + } + return null; + } + + @Override + public LicensesMetaData merge(LicensesMetaData other) { + if (other.license == null) { + return this; + } else if (license == null + || OperationMode.compare(other.license.operationMode(), license.operationMode()) > 0) { + return other; + } + return this; + } + + private static final class Fields { + private static final String LICENSE = "license"; + private static final String TRIAL_LICENSE = "trial_license"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesStatus.java new file mode 100644 index 0000000000000..91e0d7239cfa1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesStatus.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +public enum LicensesStatus { + VALID((byte) 0), + INVALID((byte) 1), + EXPIRED((byte) 2); + + private final byte id; + + LicensesStatus(byte id) { + this.id = id; + } + + public int id() { + return id; + } + + public static LicensesStatus fromId(int id) { + if (id == 0) { + return VALID; + } else if (id == 1) { + return INVALID; + } else if (id == 2) { + return EXPIRED; + } else { + throw new IllegalStateException("no valid LicensesStatus for id=" + id); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/Licensing.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/Licensing.java new file mode 100644 index 0000000000000..aedd1410d4c95 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/Licensing.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.core.XPackPlugin.transportClientMode; + +public class Licensing implements ActionPlugin { + + public static final String NAME = "license"; + protected final Settings settings; + + // Until this is moved out to its own plugin (its currently in XPackPlugin.java, we need to make sure that any edits to this file + // are also carried out in XPackClientPlugin.java + public List getNamedWriteables() { + List entries = new ArrayList<>(); + entries.add(new NamedWriteableRegistry.Entry(MetaData.Custom.class, LicensesMetaData.TYPE, LicensesMetaData::new)); + entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, LicensesMetaData.TYPE, LicensesMetaData::readDiffFrom)); + return entries; + } + + // Until this is moved out to its own plugin (its currently in XPackPlugin.java, we need to make sure that any edits to this file + // are also carried out in XPackClientPlugin.java + public List getNamedXContent() { + List entries = new ArrayList<>(); + // Metadata + entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(LicensesMetaData.TYPE), + LicensesMetaData::fromXContent)); + return entries; + } + + public Licensing(Settings settings) { + this.settings = settings; + } + + @Override + public List> getActions() { + return Arrays.asList(new ActionHandler<>(PutLicenseAction.INSTANCE, TransportPutLicenseAction.class), + new ActionHandler<>(GetLicenseAction.INSTANCE, TransportGetLicenseAction.class), + new ActionHandler<>(DeleteLicenseAction.INSTANCE, TransportDeleteLicenseAction.class), + new ActionHandler<>(PostStartTrialAction.INSTANCE, TransportPostStartTrialAction.class), + new ActionHandler<>(GetTrialStatusAction.INSTANCE, TransportGetTrialStatusAction.class), + new ActionHandler<>(PostStartBasicAction.INSTANCE, TransportPostStartBasicAction.class), + new ActionHandler<>(GetBasicStatusAction.INSTANCE, TransportGetBasicStatusAction.class)); + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + List handlers = new ArrayList<>(); + handlers.add(new RestGetLicenseAction(settings, restController)); + handlers.add(new RestPutLicenseAction(settings, restController)); + handlers.add(new RestDeleteLicenseAction(settings, restController)); + handlers.add(new RestGetTrialStatus(settings, restController)); + handlers.add(new RestGetBasicStatus(settings, restController)); + handlers.add(new RestPostStartTrialLicense(settings, restController)); + handlers.add(new RestPostStartBasicLicense(settings, restController)); + return handlers; + } + + // Until this is moved out to its own plugin (its currently in XPackPlugin.java, we need to make sure that any edits to this file + // are also carried out in XPackClientPlugin.java + public List> getSettings() { + // TODO convert this wildcard to a real setting + return Collections.singletonList(Setting.groupSetting("license.", Setting.Property.NodeScope)); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java new file mode 100644 index 0000000000000..21381b376925d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.rest.action.RestBuilderListener; + +public class LicensingClient { + + private final ElasticsearchClient client; + + public LicensingClient(ElasticsearchClient client) { + this.client = client; + } + + public PutLicenseRequestBuilder preparePutLicense(License license) { + return new PutLicenseRequestBuilder(client).setLicense(license); + } + + public void putLicense(PutLicenseRequest request, ActionListener listener) { + client.execute(PutLicenseAction.INSTANCE, request, listener); + } + + public GetLicenseRequestBuilder prepareGetLicense() { + return new GetLicenseRequestBuilder(client); + } + + public void getLicense(GetLicenseRequest request, ActionListener listener) { + client.execute(GetLicenseAction.INSTANCE, request, listener); + } + + public DeleteLicenseRequestBuilder prepareDeleteLicense() { + return new DeleteLicenseRequestBuilder(client); + } + + public void deleteLicense(DeleteLicenseRequest request, ActionListener listener) { + client.execute(DeleteLicenseAction.INSTANCE, request, listener); + } + + public PostStartTrialRequestBuilder preparePostStartTrial() { + return new PostStartTrialRequestBuilder(client, PostStartTrialAction.INSTANCE); + } + + public GetTrialStatusRequestBuilder prepareGetStartTrial() { + return new GetTrialStatusRequestBuilder(client, GetTrialStatusAction.INSTANCE); + } + + public void postStartTrial(PostStartTrialRequest request, ActionListener listener) { + client.execute(PostStartTrialAction.INSTANCE, request, listener); + } + + public void postStartBasic(PostStartBasicRequest request, ActionListener listener) { + client.execute(PostStartBasicAction.INSTANCE, request, listener); + } + + public PostStartBasicRequestBuilder preparePostStartBasic() { + return new PostStartBasicRequestBuilder(client, PostStartBasicAction.INSTANCE); + } + + public GetBasicStatusRequestBuilder prepareGetStartBasic() { + return new GetBasicStatusRequestBuilder(client, GetBasicStatusAction.INSTANCE); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/OperationModeFileWatcher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/OperationModeFileWatcher.java new file mode 100644 index 0000000000000..6ad3a28c8cea9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/OperationModeFileWatcher.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.license.License.OperationMode; +import org.elasticsearch.watcher.FileChangesListener; +import org.elasticsearch.watcher.FileWatcher; +import org.elasticsearch.watcher.ResourceWatcherService; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * File based watcher for license {@link OperationMode} + * Watches for changes in licenseModePath, use + * {@link #getCurrentOperationMode()} to access the latest mode + * + * In case of failure to read a valid operation mode from licenseModePath, + * the operation mode will default to PLATINUM + */ +public final class OperationModeFileWatcher implements FileChangesListener { + private final ResourceWatcherService resourceWatcherService; + private final Path licenseModePath; + private final AtomicBoolean initialized = new AtomicBoolean(); + private final OperationMode defaultOperationMode = OperationMode.PLATINUM; + private volatile OperationMode currentOperationMode = defaultOperationMode; + private final Logger logger; + private final Runnable onChange; + + public OperationModeFileWatcher(ResourceWatcherService resourceWatcherService, Path licenseModePath, + Logger logger, Runnable onChange) { + this.resourceWatcherService = resourceWatcherService; + this.licenseModePath = licenseModePath; + this.logger = logger; + this.onChange = onChange; + } + + public void init() { + if (initialized.compareAndSet(false, true)) { + final FileWatcher watcher = new FileWatcher(licenseModePath); + watcher.addListener(this); + try { + resourceWatcherService.add(watcher, ResourceWatcherService.Frequency.HIGH); + if (Files.exists(licenseModePath)) { + onChange(licenseModePath); + } + } catch (IOException e) { + logger.error("couldn't initialize watching license mode file", e); + } + } + } + + /** + * Returns the current operation mode based on license mode file. + * Defaults to {@link OperationMode#PLATINUM} + */ + public OperationMode getCurrentOperationMode() { + return currentOperationMode; + } + + @Override + public void onFileInit(Path file) { + onChange(file); + } + + @Override + public void onFileCreated(Path file) { + onChange(file); + } + + @Override + public void onFileDeleted(Path file) { + onChange(file); + } + + @Override + public void onFileChanged(Path file) { + onChange(file); + } + + private synchronized void onChange(Path file) { + if (file.equals(licenseModePath)) { + OperationMode newOperationMode = defaultOperationMode; + try { + if (Files.exists(licenseModePath) + && Files.isReadable(licenseModePath)) { + final byte[] content; + try { + content = Files.readAllBytes(licenseModePath); + } catch (IOException e) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "couldn't read operation mode from [{}]", licenseModePath.toAbsolutePath()), e); + return; + } + // this UTF-8 conversion is much pickier than java String + final String operationMode = new BytesRef(content).utf8ToString(); + try { + newOperationMode = OperationMode.resolve(operationMode); + } catch (IllegalArgumentException e) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "invalid operation mode in [{}]", licenseModePath.toAbsolutePath()), e); + return; + } + } + } finally { + // set this after the fact to prevent that we are jumping back and forth first setting to defautl and then reading the + // actual op mode resetting it. + this.currentOperationMode = newOperationMode; + } + onChange.run(); + } + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicAction.java new file mode 100644 index 0000000000000..eb55b1be00655 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class PostStartBasicAction extends Action { + + public static final PostStartBasicAction INSTANCE = new PostStartBasicAction(); + public static final String NAME = "cluster:admin/xpack/license/start_basic"; + + private PostStartBasicAction() { + super(NAME); + } + + @Override + public PostStartBasicRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new PostStartBasicRequestBuilder(client, this); + } + + @Override + public PostStartBasicResponse newResponse() { + return new PostStartBasicResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequest.java new file mode 100644 index 0000000000000..35867c1413e1b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequest.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class PostStartBasicRequest extends AcknowledgedRequest { + + private boolean acknowledge = false; + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public PostStartBasicRequest acknowledge(boolean acknowledge) { + this.acknowledge = acknowledge; + return this; + } + + public boolean isAcknowledged() { + return acknowledge; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + acknowledge = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(acknowledge); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequestBuilder.java new file mode 100644 index 0000000000000..eb1ebb7b56c85 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequestBuilder.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +class PostStartBasicRequestBuilder extends ActionRequestBuilder { + + PostStartBasicRequestBuilder(ElasticsearchClient client, PostStartBasicAction action) { + super(client, action, new PostStartBasicRequest()); + } + + public PostStartBasicRequestBuilder setAcknowledge(boolean acknowledge) { + request.acknowledge(acknowledge); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicResponse.java new file mode 100644 index 0000000000000..985c3689e6d7c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicResponse.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.license.PostStartBasicResponse.Status.NEED_ACKNOWLEDGEMENT; + +class PostStartBasicResponse extends AcknowledgedResponse { + + private Map acknowledgeMessages; + private String acknowledgeMessage; + + enum Status { + GENERATED_BASIC(true, null, RestStatus.OK), + ALREADY_USING_BASIC(false, "Operation failed: Current license is basic.", RestStatus.FORBIDDEN), + NEED_ACKNOWLEDGEMENT(false, "Operation failed: Needs acknowledgement.", RestStatus.OK); + + private final boolean isBasicStarted; + private final String errorMessage; + private final RestStatus restStatus; + + Status(boolean isBasicStarted, String errorMessage, RestStatus restStatus) { + this.isBasicStarted = isBasicStarted; + this.errorMessage = errorMessage; + this.restStatus = restStatus; + } + + boolean isBasicStarted() { + return isBasicStarted; + } + + String getErrorMessage() { + return errorMessage; + } + + RestStatus getRestStatus() { + return restStatus; + } + } + + private Status status; + + PostStartBasicResponse() { + } + + PostStartBasicResponse(Status status) { + this(status, Collections.emptyMap(), null); + } + + PostStartBasicResponse(Status status, Map acknowledgeMessages, String acknowledgeMessage) { + super(status != NEED_ACKNOWLEDGEMENT); + this.status = status; + this.acknowledgeMessages = acknowledgeMessages; + this.acknowledgeMessage = acknowledgeMessage; + } + + public Status getStatus() { + return status; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + status = in.readEnum(Status.class); + acknowledgeMessage = in.readOptionalString(); + int size = in.readVInt(); + Map acknowledgeMessages = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String feature = in.readString(); + int nMessages = in.readVInt(); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = in.readString(); + } + acknowledgeMessages.put(feature, messages); + } + this.acknowledgeMessages = acknowledgeMessages; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + out.writeEnum(status); + out.writeOptionalString(acknowledgeMessage); + out.writeVInt(acknowledgeMessages.size()); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + out.writeString(entry.getKey()); + out.writeVInt(entry.getValue().length); + for (String message : entry.getValue()) { + out.writeString(message); + } + } + } + + @Override + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + if (status.isBasicStarted()) { + builder.field("basic_was_started", true); + } else { + builder.field("basic_was_started", false); + builder.field("error_message", status.getErrorMessage()); + } + if (acknowledgeMessages.isEmpty() == false) { + builder.startObject("acknowledge"); + builder.field("message", acknowledgeMessage); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + builder.startArray(entry.getKey()); + for (String message : entry.getValue()) { + builder.value(message); + } + builder.endArray(); + } + builder.endObject(); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialAction.java new file mode 100644 index 0000000000000..b0634ef22a9e1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class PostStartTrialAction extends Action { + + public static final PostStartTrialAction INSTANCE = new PostStartTrialAction(); + public static final String NAME = "cluster:admin/xpack/license/start_trial"; + + private PostStartTrialAction() { + super(NAME); + } + + @Override + public PostStartTrialRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new PostStartTrialRequestBuilder(client, this); + } + + @Override + public PostStartTrialResponse newResponse() { + return new PostStartTrialResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java new file mode 100644 index 0000000000000..c6293646c09f7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequest.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class PostStartTrialRequest extends MasterNodeRequest { + + private boolean acknowledge = false; + private String type; + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public PostStartTrialRequest setType(String type) { + this.type = type; + return this; + } + + public String getType() { + return type; + } + + public PostStartTrialRequest acknowledge(boolean acknowledge) { + this.acknowledge = acknowledge; + return this; + } + + public boolean isAcknowledged() { + return acknowledge; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + type = in.readString(); + acknowledge = in.readBoolean(); + } else { + type = "trial"; + acknowledge = true; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // TODO: Change to 6.3 after backport + Version version = Version.V_7_0_0_alpha1; + if (out.getVersion().onOrAfter(version)) { + super.writeTo(out); + out.writeString(type); + out.writeBoolean(acknowledge); + } else { + if ("trial".equals(type) == false) { + throw new IllegalArgumentException("All nodes in cluster must be version [" + version + + "] or newer to start trial with a different type than 'trial'. Attempting to write to " + + "a node with version [" + out.getVersion() + "] with trial type [" + type + "]."); + } else if (acknowledge == false) { + throw new IllegalArgumentException("Request must be acknowledged to send to a node with a version " + + "prior to [" + version + "]. Attempting to send request to node with version [" + out.getVersion() + "] " + + "without acknowledgement."); + } else { + super.writeTo(out); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java new file mode 100644 index 0000000000000..6b0beba171bdd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +class PostStartTrialRequestBuilder extends ActionRequestBuilder { + + PostStartTrialRequestBuilder(ElasticsearchClient client, PostStartTrialAction action) { + super(client, action, new PostStartTrialRequest()); + } + + public PostStartTrialRequestBuilder setAcknowledge(boolean acknowledge) { + request.acknowledge(acknowledge); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java new file mode 100644 index 0000000000000..25f354d7765a1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +class PostStartTrialResponse extends ActionResponse { + + // Nodes Prior to 6.3 did not have NEED_ACKNOWLEDGEMENT as part of status + enum Pre63Status { + UPGRADED_TO_TRIAL, + TRIAL_ALREADY_ACTIVATED; + } + enum Status { + UPGRADED_TO_TRIAL(true, null, RestStatus.OK), + TRIAL_ALREADY_ACTIVATED(false, "Operation failed: Trial was already activated.", RestStatus.FORBIDDEN), + NEED_ACKNOWLEDGEMENT(false,"Operation failed: Needs acknowledgement.", RestStatus.OK); + + private final boolean isTrialStarted; + + private final String errorMessage; + private final RestStatus restStatus; + Status(boolean isTrialStarted, String errorMessage, RestStatus restStatus) { + this.isTrialStarted = isTrialStarted; + this.errorMessage = errorMessage; + this.restStatus = restStatus; + } + + boolean isTrialStarted() { + return isTrialStarted; + } + + String getErrorMessage() { + return errorMessage; + } + + RestStatus getRestStatus() { + return restStatus; + } + + } + + private Status status; + private Map acknowledgeMessages; + private String acknowledgeMessage; + + PostStartTrialResponse() { + } + + PostStartTrialResponse(Status status) { + this(status, Collections.emptyMap(), null); + } + + PostStartTrialResponse(Status status, Map acknowledgeMessages, String acknowledgeMessage) { + this.status = status; + this.acknowledgeMessages = acknowledgeMessages; + this.acknowledgeMessage = acknowledgeMessage; + } + + public Status getStatus() { + return status; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + status = in.readEnum(Status.class); + // TODO: Change to 6.3 after backport + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + acknowledgeMessage = in.readOptionalString(); + int size = in.readVInt(); + Map acknowledgeMessages = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String feature = in.readString(); + int nMessages = in.readVInt(); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = in.readString(); + } + acknowledgeMessages.put(feature, messages); + } + this.acknowledgeMessages = acknowledgeMessages; + } else { + this.acknowledgeMessages = Collections.emptyMap(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // TODO: Change to 6.3 after backport + Version version = Version.V_7_0_0_alpha1; + if (out.getVersion().onOrAfter(version)) { + out.writeEnum(status); + out.writeOptionalString(acknowledgeMessage); + out.writeVInt(acknowledgeMessages.size()); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + out.writeString(entry.getKey()); + out.writeVInt(entry.getValue().length); + for (String message : entry.getValue()) { + out.writeString(message); + } + } + } else { + if (status == Status.UPGRADED_TO_TRIAL) { + out.writeEnum(Pre63Status.UPGRADED_TO_TRIAL); + } else if (status == Status.TRIAL_ALREADY_ACTIVATED) { + out.writeEnum(Pre63Status.TRIAL_ALREADY_ACTIVATED); + } else { + throw new IllegalArgumentException("Starting trial on node with version [" + Version.CURRENT + "] requires " + + "acknowledgement parameter."); + } + } + } + + Map getAcknowledgementMessages() { + return acknowledgeMessages; + } + + String getAcknowledgementMessage() { + return acknowledgeMessage; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseAction.java new file mode 100644 index 0000000000000..4aee591b9c547 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class PutLicenseAction extends Action { + + public static final PutLicenseAction INSTANCE = new PutLicenseAction(); + public static final String NAME = "cluster:admin/xpack/license/put"; + + private PutLicenseAction() { + super(NAME); + } + + @Override + public PutLicenseResponse newResponse() { + return new PutLicenseResponse(); + } + + @Override + public PutLicenseRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new PutLicenseRequestBuilder(client, this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseRequest.java new file mode 100644 index 0000000000000..6657adee41d36 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseRequest.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; + + +public class PutLicenseRequest extends AcknowledgedRequest { + + private License license; + private boolean acknowledge = false; + + public PutLicenseRequest() { + } + + @Override + public ActionRequestValidationException validate() { + return (license == null) ? ValidateActions.addValidationError("license is missing", null) : null; + } + + /** + * Parses license from json format to an instance of {@link License} + * + * @param licenseDefinition licenses definition + * @param xContentType the content type of the license + */ + public PutLicenseRequest license(BytesReference licenseDefinition, XContentType xContentType) { + try { + return license(License.fromSource(licenseDefinition, xContentType)); + } catch (IOException e) { + throw new IllegalArgumentException("failed to parse license source", e); + } + } + + public PutLicenseRequest license(License license) { + this.license = license; + return this; + } + + public License license() { + return license; + } + + public PutLicenseRequest acknowledge(boolean acknowledge) { + this.acknowledge = acknowledge; + return this; + } + + public boolean acknowledged() { + return acknowledge; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + license = License.readLicense(in); + acknowledge = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + license.writeTo(out); + out.writeBoolean(acknowledge); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseRequestBuilder.java new file mode 100644 index 0000000000000..b7c93d03cd5ff --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseRequestBuilder.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentType; + +/** + * Register license request builder + */ +public class PutLicenseRequestBuilder extends AcknowledgedRequestBuilder { + + public PutLicenseRequestBuilder(ElasticsearchClient client) { + this(client, PutLicenseAction.INSTANCE); + } + + /** + * Constructs register license request + * + * @param client elasticsearch client + */ + public PutLicenseRequestBuilder(ElasticsearchClient client, PutLicenseAction action) { + super(client, action, new PutLicenseRequest()); + } + + /** + * Sets the license + * + * @param license license + * @return this builder + */ + public PutLicenseRequestBuilder setLicense(License license) { + request.license(license); + return this; + } + + public PutLicenseRequestBuilder setLicense(BytesReference licenseSource, XContentType xContentType) { + request.license(licenseSource, xContentType); + return this; + } + + public PutLicenseRequestBuilder setAcknowledge(boolean acknowledge) { + request.acknowledge(acknowledge); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseResponse.java new file mode 100644 index 0000000000000..a17836a836b66 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PutLicenseResponse.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class PutLicenseResponse extends AcknowledgedResponse { + + private LicensesStatus status; + private Map acknowledgeMessages; + private String acknowledgeHeader; + + PutLicenseResponse() { + } + + public PutLicenseResponse(boolean acknowledged, LicensesStatus status) { + this(acknowledged, status, null, Collections.emptyMap()); + } + + public PutLicenseResponse(boolean acknowledged, LicensesStatus status, String acknowledgeHeader, + Map acknowledgeMessages) { + super(acknowledged); + this.status = status; + this.acknowledgeHeader = acknowledgeHeader; + this.acknowledgeMessages = acknowledgeMessages; + } + + public LicensesStatus status() { + return status; + } + + public Map acknowledgeMessages() { + return acknowledgeMessages; + } + + public String acknowledgeHeader() { + return acknowledgeHeader; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + status = LicensesStatus.fromId(in.readVInt()); + acknowledgeHeader = in.readOptionalString(); + int size = in.readVInt(); + Map acknowledgeMessages = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String feature = in.readString(); + int nMessages = in.readVInt(); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = in.readString(); + } + acknowledgeMessages.put(feature, messages); + } + this.acknowledgeMessages = acknowledgeMessages; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + out.writeVInt(status.id()); + out.writeOptionalString(acknowledgeHeader); + out.writeVInt(acknowledgeMessages.size()); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + out.writeString(entry.getKey()); + out.writeVInt(entry.getValue().length); + for (String message : entry.getValue()) { + out.writeString(message); + } + } + } + + @Override + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + switch (status) { + case VALID: + builder.field("license_status", "valid"); + break; + case INVALID: + builder.field("license_status", "invalid"); + break; + case EXPIRED: + builder.field("license_status", "expired"); + break; + default: + throw new IllegalArgumentException("unknown status [" + status + "] found"); + } + if (!acknowledgeMessages.isEmpty()) { + builder.startObject("acknowledge"); + builder.field("message", acknowledgeHeader); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + builder.startArray(entry.getKey()); + for (String message : entry.getValue()) { + builder.value(message); + } + builder.endArray(); + } + builder.endObject(); + } + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java new file mode 100644 index 0000000000000..7395f00649608 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.rest.XPackRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.DELETE; + +public class RestDeleteLicenseAction extends XPackRestHandler { + public RestDeleteLicenseAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(DELETE, URI_BASE + "/license", this); + } + + @Override + public String getName() { + return "xpack_delete_license_action"; + } + + @Override + public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPackClient client) throws IOException { + DeleteLicenseRequest deleteLicenseRequest = new DeleteLicenseRequest(); + deleteLicenseRequest.timeout(request.paramAsTime("timeout", deleteLicenseRequest.timeout())); + deleteLicenseRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteLicenseRequest.masterNodeTimeout())); + + return channel -> client.es().admin().cluster().execute(DeleteLicenseAction.INSTANCE, deleteLicenseRequest, + new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetBasicStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetBasicStatus.java new file mode 100644 index 0000000000000..7490ae74f1eb5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetBasicStatus.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.rest.XPackRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestGetBasicStatus extends XPackRestHandler { + + RestGetBasicStatus(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, URI_BASE + "/license/basic_status", this); + } + + @Override + protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + return channel -> client.licensing().prepareGetStartBasic().execute( + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(GetBasicStatusResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + builder.field("eligible_to_start_basic", response.isEligibleToStartBasic()); + builder.endObject(); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } + + @Override + public String getName() { + return "xpack_basic_status_action"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java new file mode 100644 index 0000000000000..31004823e4920 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.rest.XPackRestHandler; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestStatus.NOT_FOUND; +import static org.elasticsearch.rest.RestStatus.OK; + +public class RestGetLicenseAction extends XPackRestHandler { + + @Inject + public RestGetLicenseAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, URI_BASE + "/license", this); + } + + @Override + public String getName() { + return "xpack_get_license_action"; + } + + /** + * There will be only one license displayed per feature, the selected license will have the latest expiry_date + * out of all other licenses for the feature. + *

+ * The licenses are sorted by latest issue_date + */ + @Override + public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPackClient client) throws IOException { + final Map overrideParams = new HashMap<>(2); + overrideParams.put(License.REST_VIEW_MODE, "true"); + overrideParams.put(License.LICENSE_VERSION_MODE, String.valueOf(License.VERSION_CURRENT)); + final ToXContent.Params params = new ToXContent.DelegatingMapParams(overrideParams, request); + GetLicenseRequest getLicenseRequest = new GetLicenseRequest(); + getLicenseRequest.local(request.paramAsBoolean("local", getLicenseRequest.local())); + return channel -> client.es().admin().cluster().execute(GetLicenseAction.INSTANCE, getLicenseRequest, + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(GetLicenseResponse response, XContentBuilder builder) throws Exception { + // Default to pretty printing, but allow ?pretty=false to disable + if (!request.hasParam("pretty")) { + builder.prettyPrint().lfAtEnd(); + } + boolean hasLicense = response.license() != null; + builder.startObject(); + if (hasLicense) { + builder.startObject("license"); + response.license().toInnerXContent(builder, params); + builder.endObject(); + } + builder.endObject(); + return new BytesRestResponse(hasLicense ? OK : NOT_FOUND, builder); + } + }); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java new file mode 100644 index 0000000000000..a136f2a88a65d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.rest.XPackRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestGetTrialStatus extends XPackRestHandler { + + RestGetTrialStatus(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, URI_BASE + "/license/trial_status", this); + } + + @Override + protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + return channel -> client.licensing().prepareGetStartTrial().execute( + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(GetTrialStatusResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + builder.field("eligible_to_start_trial", response.isEligibleToStartTrial()); + builder.endObject(); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } + + @Override + public String getName() { + return "xpack_trial_status_action"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java new file mode 100644 index 0000000000000..1c07faafd992f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.rest.XPackRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestPostStartBasicLicense extends XPackRestHandler { + + RestPostStartBasicLicense(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(POST, URI_BASE + "/license/start_basic", this); + } + + @Override + protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + PostStartBasicRequest startBasicRequest = new PostStartBasicRequest(); + startBasicRequest.acknowledge(request.paramAsBoolean("acknowledge", false)); + startBasicRequest.timeout(request.paramAsTime("timeout", startBasicRequest.timeout())); + startBasicRequest.masterNodeTimeout(request.paramAsTime("master_timeout", startBasicRequest.masterNodeTimeout())); + return channel -> client.licensing().postStartBasic(startBasicRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(PostStartBasicResponse response, XContentBuilder builder) throws Exception { + PostStartBasicResponse.Status status = response.getStatus(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + return new BytesRestResponse(status.getRestStatus(), builder); + } + }); + } + + @Override + public String getName() { + return "xpack_start_basic_action"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java new file mode 100644 index 0000000000000..af738b9aadf7f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.rest.XPackRestHandler; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestPostStartTrialLicense extends XPackRestHandler { + + RestPostStartTrialLicense(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(POST, URI_BASE + "/license/start_trial", this); + } + + @Override + protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + PostStartTrialRequest startTrialRequest = new PostStartTrialRequest(); + startTrialRequest.setType(request.param("type", "trial")); + startTrialRequest.acknowledge(request.paramAsBoolean("acknowledge", false)); + return channel -> client.licensing().postStartTrial(startTrialRequest, + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(PostStartTrialResponse response, XContentBuilder builder) throws Exception { + PostStartTrialResponse.Status status = response.getStatus(); + builder.startObject(); + builder.field("acknowledged", startTrialRequest.isAcknowledged()); + if (status.isTrialStarted()) { + builder.field("trial_was_started", true); + builder.field("type", startTrialRequest.getType()); + } else { + builder.field("trial_was_started", false); + builder.field("error_message", status.getErrorMessage()); + } + + Map acknowledgementMessages = response.getAcknowledgementMessages(); + if (acknowledgementMessages.isEmpty() == false) { + builder.startObject("acknowledge"); + builder.field("message", response.getAcknowledgementMessage()); + for (Map.Entry entry : acknowledgementMessages.entrySet()) { + builder.startArray(entry.getKey()); + for (String message : entry.getValue()) { + builder.value(message); + } + builder.endArray(); + } + builder.endObject(); + } + builder.endObject(); + return new BytesRestResponse(status.getRestStatus(), builder); + } + }); + } + + @Override + public String getName() { + return "xpack_upgrade_to_trial_action"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java new file mode 100644 index 0000000000000..0a3a6ea2394cf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.rest.XPackRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; + +public class RestPutLicenseAction extends XPackRestHandler { + + public RestPutLicenseAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(POST, URI_BASE + "/license", this); + controller.registerHandler(PUT, URI_BASE + "/license", this); + } + + @Override + public String getName() { + return "xpack_put_license_action"; + } + + @Override + public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPackClient client) throws IOException { + if (request.hasContent() == false) { + throw new IllegalArgumentException("The license must be provided in the request body"); + } + PutLicenseRequest putLicenseRequest = new PutLicenseRequest(); + putLicenseRequest.license(request.content(), request.getXContentType()); + putLicenseRequest.acknowledge(request.paramAsBoolean("acknowledge", false)); + putLicenseRequest.timeout(request.paramAsTime("timeout", putLicenseRequest.timeout())); + putLicenseRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putLicenseRequest.masterNodeTimeout())); + + if ("basic".equals(putLicenseRequest.license().type())) { + throw new IllegalArgumentException("Installing basic licenses is no longer allowed. Use the POST " + + "/_xpack/license/start_basic API to install a basic license that does not expire."); + } + + return channel -> client.es().admin().cluster().execute(PutLicenseAction.INSTANCE, putLicenseRequest, + new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java new file mode 100644 index 0000000000000..7ec6b0b95eb74 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Base64; +import java.util.Collections; + +import static org.elasticsearch.license.CryptUtils.decrypt; +import static org.elasticsearch.license.CryptUtils.encrypt; + +class SelfGeneratedLicense { + + public static License create(License.Builder specBuilder) { + License spec = specBuilder + .issuer("elasticsearch") + .version(License.VERSION_CURRENT) + .build(); + final String signature; + try { + XContentBuilder contentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + spec.toXContent(contentBuilder, new ToXContent.MapParams(Collections.singletonMap(License.LICENSE_SPEC_VIEW_MODE, "true"))); + byte[] encrypt = encrypt(BytesReference.toBytes(BytesReference.bytes(contentBuilder))); + byte[] bytes = new byte[4 + 4 + encrypt.length]; + ByteBuffer byteBuffer = ByteBuffer.wrap(bytes); + // always generate license version -VERSION_CURRENT + byteBuffer.putInt(-License.VERSION_CURRENT) + .putInt(encrypt.length) + .put(encrypt); + signature = Base64.getEncoder().encodeToString(bytes); + } catch (IOException e) { + throw new IllegalStateException(e); + } + return License.builder().fromLicenseSpec(spec, signature).build(); + } + + public static boolean verify(final License license) { + try { + byte[] signatureBytes = Base64.getDecoder().decode(license.signature()); + ByteBuffer byteBuffer = ByteBuffer.wrap(signatureBytes); + int version = byteBuffer.getInt(); + int contentLen = byteBuffer.getInt(); + byte[] content = new byte[contentLen]; + byteBuffer.get(content); + final License expectedLicense; + // EMPTY is safe here because we don't call namedObject + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, decrypt(content))) { + parser.nextToken(); + expectedLicense = License.builder().fromLicenseSpec(License.fromXContent(parser), + license.signature()).version(-version).build(); + } + return license.equals(expectedLicense); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + public static boolean validSelfGeneratedType(String type) { + return "basic".equals(type) || "trial".equals(type); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java new file mode 100644 index 0000000000000..355482872d629 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; + +import java.time.Clock; +import java.util.Collections; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicReference; + +public class StartBasicClusterTask extends ClusterStateUpdateTask { + + private static final String ACKNOWLEDGEMENT_HEADER = "This license update requires acknowledgement. To acknowledge the license, " + + "please read the following messages and call /start_basic again, this time with the \"acknowledge=true\" parameter:"; + + private final Logger logger; + private final String clusterName; + private final PostStartBasicRequest request; + private final ActionListener listener; + private final Clock clock; + private AtomicReference> ackMessages = new AtomicReference<>(Collections.emptyMap()); + + StartBasicClusterTask(Logger logger, String clusterName, Clock clock, PostStartBasicRequest request, + ActionListener listener) { + this.logger = logger; + this.clusterName = clusterName; + this.request = request; + this.listener = listener; + this.clock = clock; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + LicensesMetaData oldLicensesMetaData = oldState.metaData().custom(LicensesMetaData.TYPE); + logger.debug("license prior to starting basic license: {}", oldLicensesMetaData); + License oldLicense = LicensesMetaData.extractLicense(oldLicensesMetaData); + Map acknowledgeMessages = ackMessages.get(); + if (acknowledgeMessages.isEmpty() == false) { + listener.onResponse(new PostStartBasicResponse(PostStartBasicResponse.Status.NEED_ACKNOWLEDGEMENT, acknowledgeMessages, + ACKNOWLEDGEMENT_HEADER)); + } else if (oldLicense != null && oldLicense.type().equals("basic")) { + listener.onResponse(new PostStartBasicResponse(PostStartBasicResponse.Status.ALREADY_USING_BASIC)); + } else { + listener.onResponse(new PostStartBasicResponse(PostStartBasicResponse.Status.GENERATED_BASIC)); + } + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + LicensesMetaData licensesMetaData = currentState.metaData().custom(LicensesMetaData.TYPE); + License currentLicense = LicensesMetaData.extractLicense(licensesMetaData); + if (currentLicense == null || currentLicense.type().equals("basic") == false) { + long issueDate = clock.millis(); + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + License.Builder specBuilder = License.builder() + .uid(UUID.randomUUID().toString()) + .issuedTo(clusterName) + .maxNodes(LicenseService.SELF_GENERATED_LICENSE_MAX_NODES) + .issueDate(issueDate) + .type("basic") + .expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + if (request.isAcknowledged() == false && currentLicense != null) { + Map ackMessages = LicenseService.getAckMessages(selfGeneratedLicense, currentLicense); + if (ackMessages.isEmpty() == false) { + this.ackMessages.set(ackMessages); + return currentState; + } + } + Version trialVersion = null; + if (licensesMetaData != null) { + trialVersion = licensesMetaData.getMostRecentTrialVersion(); + } + LicensesMetaData newLicensesMetaData = new LicensesMetaData(selfGeneratedLicense, trialVersion); + mdBuilder.putCustom(LicensesMetaData.TYPE, newLicensesMetaData); + return ClusterState.builder(currentState).metaData(mdBuilder).build(); + } else { + return currentState; + } + } + + @Override + public void onFailure(String source, @Nullable Exception e) { + logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e); + listener.onFailure(e); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java new file mode 100644 index 0000000000000..355672dedf717 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; + +import java.time.Clock; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +public class StartTrialClusterTask extends ClusterStateUpdateTask { + + private static final String ACKNOWLEDGEMENT_HEADER = "This API initiates a free 30-day trial for all platinum features. " + + "By starting this trial, you agree that it is subject to the terms and conditions at" + + " https://www.elastic.co/legal/trial_license/. To begin your free trial, call /start_trial again and specify " + + "the \"acknowledge=true\" parameter."; + + private static final Map ACK_MESSAGES = Collections.singletonMap("security", + new String[] {"With a trial license, X-Pack security features are available, but are not enabled by default."}); + + private final Logger logger; + private final String clusterName; + private final PostStartTrialRequest request; + private final ActionListener listener; + private final Clock clock; + + StartTrialClusterTask(Logger logger, String clusterName, Clock clock, PostStartTrialRequest request, + ActionListener listener) { + this.logger = logger; + this.clusterName = clusterName; + this.request = request; + this.listener = listener; + this.clock = clock; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + LicensesMetaData oldLicensesMetaData = oldState.metaData().custom(LicensesMetaData.TYPE); + logger.debug("started self generated trial license: {}", oldLicensesMetaData); + + if (request.isAcknowledged() == false) { + listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.NEED_ACKNOWLEDGEMENT, + ACK_MESSAGES, ACKNOWLEDGEMENT_HEADER)); + } else if (oldLicensesMetaData == null || oldLicensesMetaData.isEligibleForTrial()) { + listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.UPGRADED_TO_TRIAL)); + } else { + listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.TRIAL_ALREADY_ACTIVATED)); + } + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + LicensesMetaData currentLicensesMetaData = currentState.metaData().custom(LicensesMetaData.TYPE); + + if (request.isAcknowledged() == false) { + return currentState; + } else if (currentLicensesMetaData == null || currentLicensesMetaData.isEligibleForTrial()) { + long issueDate = clock.millis(); + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + long expiryDate = issueDate + LicenseService.NON_BASIC_SELF_GENERATED_LICENSE_DURATION.getMillis(); + + License.Builder specBuilder = License.builder() + .uid(UUID.randomUUID().toString()) + .issuedTo(clusterName) + .maxNodes(LicenseService.SELF_GENERATED_LICENSE_MAX_NODES) + .issueDate(issueDate) + .type(request.getType()) + .expiryDate(expiryDate); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + LicensesMetaData newLicensesMetaData = new LicensesMetaData(selfGeneratedLicense, Version.CURRENT); + mdBuilder.putCustom(LicensesMetaData.TYPE, newLicensesMetaData); + return ClusterState.builder(currentState).metaData(mdBuilder).build(); + } else { + return currentState; + } + } + + @Override + public void onFailure(String source, @Nullable Exception e) { + logger.error(new ParameterizedMessage("unexpected failure during [{}]", source), e); + listener.onFailure(e); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java new file mode 100644 index 0000000000000..ef654513c80ca --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; + +import java.time.Clock; +import java.util.UUID; + +public class StartupSelfGeneratedLicenseTask extends ClusterStateUpdateTask { + + /** + * Max number of nodes licensed by generated trial license + */ + private int selfGeneratedLicenseMaxNodes = 1000; + + private final Settings settings; + private final Clock clock; + private final ClusterService clusterService; + private final Logger logger; + + public StartupSelfGeneratedLicenseTask(Settings settings, Clock clock, ClusterService clusterService) { + this.settings = settings; + this.clock = clock; + this.clusterService = clusterService; + this.logger = Loggers.getLogger(getClass(), settings); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + LicensesMetaData licensesMetaData = newState.metaData().custom(LicensesMetaData.TYPE); + if (logger.isDebugEnabled()) { + logger.debug("registered self generated license: {}", licensesMetaData); + } + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + final MetaData metaData = currentState.metaData(); + final LicensesMetaData currentLicensesMetaData = metaData.custom(LicensesMetaData.TYPE); + // do not generate a license if any license is present + if (currentLicensesMetaData == null) { + String type = LicenseService.SELF_GENERATED_LICENSE_TYPE.get(settings); + if (SelfGeneratedLicense.validSelfGeneratedType(type) == false) { + throw new IllegalArgumentException("Illegal self generated license type [" + type + + "]. Must be trial or basic."); + } + + return updateWithLicense(currentState, type); + } else if (LicenseUtils.licenseNeedsExtended(currentLicensesMetaData.getLicense())) { + return extendBasic(currentState, currentLicensesMetaData); + } else { + return currentState; + } + } + + @Override + public void onFailure(String source, @Nullable Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + } + + private ClusterState extendBasic(ClusterState currentState, LicensesMetaData currentLicenseMetadata) { + License license = currentLicenseMetadata.getLicense(); + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + LicensesMetaData newLicenseMetadata = createBasicLicenseFromExistingLicense(currentLicenseMetadata); + mdBuilder.putCustom(LicensesMetaData.TYPE, newLicenseMetadata); + logger.info("Existing basic license has an expiration. Basic licenses no longer expire." + + "Regenerating license.\n\nOld license:\n {}\n\n New license:\n{}", license, newLicenseMetadata.getLicense()); + return ClusterState.builder(currentState).metaData(mdBuilder).build(); + } + + private LicensesMetaData createBasicLicenseFromExistingLicense(LicensesMetaData currentLicenseMetadata) { + License currentLicense = currentLicenseMetadata.getLicense(); + License.Builder specBuilder = License.builder() + .uid(currentLicense.uid()) + .issuedTo(currentLicense.issuedTo()) + .maxNodes(selfGeneratedLicenseMaxNodes) + .issueDate(currentLicense.issueDate()) + .type("basic") + .expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + Version trialVersion = currentLicenseMetadata.getMostRecentTrialVersion(); + return new LicensesMetaData(selfGeneratedLicense, trialVersion); + } + + private ClusterState updateWithLicense(ClusterState currentState, String type) { + long issueDate = clock.millis(); + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + long expiryDate; + if ("basic".equals(type)) { + expiryDate = LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS; + } else { + expiryDate = issueDate + LicenseService.NON_BASIC_SELF_GENERATED_LICENSE_DURATION.getMillis(); + } + License.Builder specBuilder = License.builder() + .uid(UUID.randomUUID().toString()) + .issuedTo(clusterService.getClusterName().value()) + .maxNodes(selfGeneratedLicenseMaxNodes) + .issueDate(issueDate) + .type(type) + .expiryDate(expiryDate); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + LicensesMetaData licensesMetaData; + if ("trial".equals(type)) { + licensesMetaData = new LicensesMetaData(selfGeneratedLicense, Version.CURRENT); + } else { + licensesMetaData = new LicensesMetaData(selfGeneratedLicense, null); + } + mdBuilder.putCustom(LicensesMetaData.TYPE, licensesMetaData); + return ClusterState.builder(currentState).metaData(mdBuilder).build(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java new file mode 100644 index 0000000000000..a1d57684a1737 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class TransportDeleteLicenseAction extends TransportMasterNodeAction { + + private final LicenseService licenseService; + + @Inject + public TransportDeleteLicenseAction(Settings settings, TransportService transportService, ClusterService clusterService, + LicenseService licenseService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, DeleteLicenseAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, DeleteLicenseRequest::new); + this.licenseService = licenseService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected DeleteLicenseResponse newResponse() { + return new DeleteLicenseResponse(); + } + + @Override + protected ClusterBlockException checkBlock(DeleteLicenseRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void masterOperation(final DeleteLicenseRequest request, ClusterState state, final ActionListener + listener) throws ElasticsearchException { + licenseService.removeLicense(request, new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + listener.onResponse(new DeleteLicenseResponse(clusterStateUpdateResponse.isAcknowledged())); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetBasicStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetBasicStatusAction.java new file mode 100644 index 0000000000000..2b9dadddabeac --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetBasicStatusAction.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class TransportGetBasicStatusAction extends TransportMasterNodeReadAction { + + @Inject + public TransportGetBasicStatusAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, GetBasicStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, + GetBasicStatusRequest::new, indexNameExpressionResolver); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected GetBasicStatusResponse newResponse() { + return new GetBasicStatusResponse(); + } + + @Override + protected void masterOperation(GetBasicStatusRequest request, ClusterState state, + ActionListener listener) throws Exception { + LicensesMetaData licensesMetaData = state.metaData().custom(LicensesMetaData.TYPE); + if (licensesMetaData == null) { + listener.onResponse(new GetBasicStatusResponse(true)); + } else { + License license = licensesMetaData.getLicense(); + listener.onResponse(new GetBasicStatusResponse(license == null || license.type().equals("basic") == false)); + } + + } + + @Override + protected ClusterBlockException checkBlock(GetBasicStatusRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java new file mode 100644 index 0000000000000..ba6da84f19bb5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class TransportGetLicenseAction extends TransportMasterNodeReadAction { + + private final LicenseService licenseService; + + @Inject + public TransportGetLicenseAction(Settings settings, TransportService transportService, ClusterService clusterService, + LicenseService licenseService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, GetLicenseAction.NAME, transportService, clusterService, threadPool, actionFilters, + GetLicenseRequest::new, indexNameExpressionResolver); + this.licenseService = licenseService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected GetLicenseResponse newResponse() { + return new GetLicenseResponse(); + } + + @Override + protected ClusterBlockException checkBlock(GetLicenseRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected void masterOperation(final GetLicenseRequest request, ClusterState state, + final ActionListener listener) throws ElasticsearchException { + listener.onResponse(new GetLicenseResponse(licenseService.getLicense())); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetTrialStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetTrialStatusAction.java new file mode 100644 index 0000000000000..028bf45d873a9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetTrialStatusAction.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class TransportGetTrialStatusAction extends TransportMasterNodeReadAction { + + @Inject + public TransportGetTrialStatusAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, GetTrialStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, + GetTrialStatusRequest::new, indexNameExpressionResolver); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected GetTrialStatusResponse newResponse() { + return new GetTrialStatusResponse(); + } + + @Override + protected void masterOperation(GetTrialStatusRequest request, ClusterState state, + ActionListener listener) throws Exception { + LicensesMetaData licensesMetaData = state.metaData().custom(LicensesMetaData.TYPE); + listener.onResponse(new GetTrialStatusResponse(licensesMetaData == null || licensesMetaData.isEligibleForTrial())); + + } + + @Override + protected ClusterBlockException checkBlock(GetTrialStatusRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartBasicAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartBasicAction.java new file mode 100644 index 0000000000000..0e9316ab2b33a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartBasicAction.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class TransportPostStartBasicAction extends TransportMasterNodeAction { + + private final LicenseService licenseService; + + @Inject + public TransportPostStartBasicAction(Settings settings, TransportService transportService, ClusterService clusterService, + LicenseService licenseService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, PostStartBasicAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, PostStartBasicRequest::new); + this.licenseService = licenseService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected PostStartBasicResponse newResponse() { + return new PostStartBasicResponse(); + } + + @Override + protected void masterOperation(PostStartBasicRequest request, ClusterState state, + ActionListener listener) throws Exception { + licenseService.startBasicLicense(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(PostStartBasicRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartTrialAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartTrialAction.java new file mode 100644 index 0000000000000..e70662cdc3131 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPostStartTrialAction.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class TransportPostStartTrialAction extends TransportMasterNodeAction { + + private final LicenseService licenseService; + + @Inject + public TransportPostStartTrialAction(Settings settings, TransportService transportService, ClusterService clusterService, + LicenseService licenseService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, PostStartTrialAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, PostStartTrialRequest::new); + this.licenseService = licenseService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected PostStartTrialResponse newResponse() { + return new PostStartTrialResponse(); + } + + @Override + protected void masterOperation(PostStartTrialRequest request, ClusterState state, + ActionListener listener) throws Exception { + licenseService.startTrialLicense(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(PostStartTrialRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPutLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPutLicenseAction.java new file mode 100644 index 0000000000000..032d1eb6e86d1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportPutLicenseAction.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class TransportPutLicenseAction extends TransportMasterNodeAction { + + private final LicenseService licenseService; + + @Inject + public TransportPutLicenseAction(Settings settings, TransportService transportService, ClusterService clusterService, + LicenseService licenseService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, PutLicenseAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + PutLicenseRequest::new); + this.licenseService = licenseService; + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected PutLicenseResponse newResponse() { + return new PutLicenseResponse(); + } + + @Override + protected ClusterBlockException checkBlock(PutLicenseRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void masterOperation(final PutLicenseRequest request, ClusterState state, final ActionListener + listener) throws ElasticsearchException { + licenseService.registerLicense(request, listener); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackInfoResponse.java new file mode 100644 index 0000000000000..7c2886345470c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackInfoResponse.java @@ -0,0 +1,296 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackBuild; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +public class XPackInfoResponse extends ActionResponse { + + @Nullable private BuildInfo buildInfo; + @Nullable private LicenseInfo licenseInfo; + @Nullable private FeatureSetsInfo featureSetsInfo; + + public XPackInfoResponse() {} + + public XPackInfoResponse(@Nullable BuildInfo buildInfo, @Nullable LicenseInfo licenseInfo, @Nullable FeatureSetsInfo featureSetsInfo) { + this.buildInfo = buildInfo; + this.licenseInfo = licenseInfo; + this.featureSetsInfo = featureSetsInfo; + } + + /** + * @return The build info (incl. build hash and timestamp) + */ + public BuildInfo getBuildInfo() { + return buildInfo; + } + + /** + * @return The current license info (incl. UID, type/mode. status and expiry date). May return {@code null} when no + * license is currently installed. + */ + public LicenseInfo getLicenseInfo() { + return licenseInfo; + } + + /** + * @return The current status of the feature sets in X-Pack. Feature sets describe the features available/enabled in X-Pack. + */ + public FeatureSetsInfo getFeatureSetsInfo() { + return featureSetsInfo; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalWriteable(buildInfo); + out.writeOptionalWriteable(licenseInfo); + out.writeOptionalWriteable(featureSetsInfo); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + this.buildInfo = in.readOptionalWriteable(BuildInfo::new); + this.licenseInfo = in.readOptionalWriteable(LicenseInfo::new); + this.featureSetsInfo = in.readOptionalWriteable(FeatureSetsInfo::new); + } + + public static class LicenseInfo implements ToXContentObject, Writeable { + + private final String uid; + private final String type; + private final String mode; + private final long expiryDate; + private final License.Status status; + + public LicenseInfo(License license) { + this(license.uid(), license.type(), license.operationMode().name().toLowerCase(Locale.ROOT), + license.status(), license.expiryDate()); + } + + public LicenseInfo(StreamInput in) throws IOException { + this(in.readString(), in.readString(), in.readString(), License.Status.readFrom(in), in.readLong()); + } + + public LicenseInfo(String uid, String type, String mode, License.Status status, long expiryDate) { + this.uid = uid; + this.type = type; + this.mode = mode; + this.status = status; + this.expiryDate = expiryDate; + } + + public String getUid() { + return uid; + } + + public String getType() { + return type; + } + + public String getMode() { + return mode; + } + + public long getExpiryDate() { + return expiryDate; + } + + public License.Status getStatus() { + return status; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("uid", uid) + .field("type", type) + .field("mode", mode) + .field("status", status.label()) + .timeField("expiry_date_in_millis", "expiry_date", expiryDate) + .endObject(); + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeString(uid); + out.writeString(type); + out.writeString(mode); + status.writeTo(out); + out.writeLong(expiryDate); + } + } + + public static class BuildInfo implements ToXContentObject, Writeable { + + private final String hash; + private final String timestamp; + + public BuildInfo(XPackBuild build) { + this(build.shortHash(), build.date()); + } + + public BuildInfo(StreamInput input) throws IOException { + this(input.readString(), input.readString()); + } + + public BuildInfo(String hash, String timestamp) { + this.hash = hash; + this.timestamp = timestamp; + } + + public String getHash() { + return hash; + } + + public String getTimestamp() { + return timestamp; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("hash", hash) + .field("date", timestamp) + .endObject(); + } + + public void writeTo(StreamOutput output) throws IOException { + output.writeString(hash); + output.writeString(timestamp); + } + } + + public static class FeatureSetsInfo implements ToXContentObject, Writeable { + + private final Map featureSets; + + public FeatureSetsInfo(StreamInput in) throws IOException { + int size = in.readVInt(); + Map featureSets = new HashMap<>(size); + for (int i = 0; i < size; i++) { + FeatureSet featureSet = new FeatureSet(in); + featureSets.put(featureSet.name, featureSet); + } + this.featureSets = Collections.unmodifiableMap(featureSets); + } + + public FeatureSetsInfo(Set featureSets) { + Map map = new HashMap<>(featureSets.size()); + for (FeatureSet featureSet : featureSets) { + map.put(featureSet.name, featureSet); + } + this.featureSets = Collections.unmodifiableMap(map); + } + + public Map getFeatureSets() { + return featureSets; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + List names = new ArrayList<>(this.featureSets.keySet()).stream().sorted().collect(Collectors.toList()); + for (String name : names) { + builder.field(name, featureSets.get(name), params); + } + return builder.endObject(); + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(featureSets.size()); + for (FeatureSet featureSet : featureSets.values()) { + featureSet.writeTo(out); + } + } + + public static class FeatureSet implements ToXContentObject, Writeable { + + private final String name; + @Nullable private final String description; + private final boolean available; + private final boolean enabled; + @Nullable private final Map nativeCodeInfo; + + public FeatureSet(StreamInput in) throws IOException { + this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), + in.getVersion().onOrAfter(Version.V_5_4_0) ? in.readMap() : null); + } + + public FeatureSet(String name, @Nullable String description, boolean available, boolean enabled, + @Nullable Map nativeCodeInfo) { + this.name = name; + this.description = description; + this.available = available; + this.enabled = enabled; + this.nativeCodeInfo = nativeCodeInfo; + } + + public String name() { + return name; + } + + @Nullable + public String description() { + return description; + } + + public boolean available() { + return available; + } + + public boolean enabled() { + return enabled; + } + + @Nullable + public Map nativeCodeInfo() { + return nativeCodeInfo; + } + + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (description != null) { + builder.field("description", description); + } + builder.field("available", available); + builder.field("enabled", enabled); + if (nativeCodeInfo != null) { + builder.field("native_code_info", nativeCodeInfo); + } + return builder.endObject(); + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeOptionalString(description); + out.writeBoolean(available); + out.writeBoolean(enabled); + if (out.getVersion().onOrAfter(Version.V_5_4_0)) { + out.writeMap(nativeCodeInfo); + } + } + } + + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java new file mode 100644 index 0000000000000..2e4caff1a725d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -0,0 +1,576 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.License.OperationMode; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.monitoring.MonitoringField; + +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.BiFunction; + +/** + * A holder for the current state of the license for all xpack features. + */ +public class XPackLicenseState { + + /** Messages for each feature which are printed when the license expires. */ + static final Map EXPIRATION_MESSAGES; + static { + Map messages = new LinkedHashMap<>(); + messages.put(XPackField.SECURITY, new String[] { + "Cluster health, cluster stats and indices stats operations are blocked", + "All data operations (read and write) continue to work" + }); + messages.put(XPackField.WATCHER, new String[] { + "PUT / GET watch APIs are disabled, DELETE watch API continues to work", + "Watches execute and write to the history", + "The actions of the watches don't execute" + }); + messages.put(XPackField.MONITORING, new String[] { + "The agent will stop collecting cluster and indices metrics", + "The agent will stop automatically cleaning indices older than [xpack.monitoring.history.duration]" + }); + messages.put(XPackField.GRAPH, new String[] { + "Graph explore APIs are disabled" + }); + messages.put(XPackField.MACHINE_LEARNING, new String[] { + "Machine learning APIs are disabled" + }); + messages.put(XPackField.LOGSTASH, new String[] { + "Logstash will continue to poll centrally-managed pipelines" + }); + messages.put(XPackField.DEPRECATION, new String[] { + "Deprecation APIs are disabled" + }); + messages.put(XPackField.UPGRADE, new String[] { + "Upgrade API is disabled" + }); + messages.put(XPackField.SQL, new String[] { + "SQL support is disabled" + }); + messages.put(XPackField.ROLLUP, new String[] { + "Creating and Starting rollup jobs will no longer be allowed.", + "Stopping/Deleting existing jobs, RollupCaps API and RollupSearch continue to function." + }); + EXPIRATION_MESSAGES = Collections.unmodifiableMap(messages); + } + + /** + * Messages for each feature which are printed when the license type changes. + * The value is a function taking the old and new license type, and returns the messages for that feature. + */ + static final Map> ACKNOWLEDGMENT_MESSAGES; + static { + Map> messages = new LinkedHashMap<>(); + messages.put(XPackField.SECURITY, XPackLicenseState::securityAcknowledgementMessages); + messages.put(XPackField.WATCHER, XPackLicenseState::watcherAcknowledgementMessages); + messages.put(XPackField.MONITORING, XPackLicenseState::monitoringAcknowledgementMessages); + messages.put(XPackField.GRAPH, XPackLicenseState::graphAcknowledgementMessages); + messages.put(XPackField.MACHINE_LEARNING, XPackLicenseState::machineLearningAcknowledgementMessages); + messages.put(XPackField.LOGSTASH, XPackLicenseState::logstashAcknowledgementMessages); + messages.put(XPackField.SQL, XPackLicenseState::sqlAcknowledgementMessages); + ACKNOWLEDGMENT_MESSAGES = Collections.unmodifiableMap(messages); + } + + private static String[] securityAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { + switch (newMode) { + case BASIC: + switch (currentMode) { + case TRIAL: + case STANDARD: + case GOLD: + case PLATINUM: + return new String[] { + "The following X-Pack security functionality will be disabled: authentication, authorization, " + + "ip filtering, and auditing. Please restart your node after applying the license.", + "Field and document level access control will be disabled.", + "Custom realms will be ignored." + }; + } + break; + case GOLD: + switch (currentMode) { + case BASIC: + case STANDARD: + // ^^ though technically it was already disabled, it's not bad to remind them + case TRIAL: + case PLATINUM: + return new String[] { + "Field and document level access control will be disabled.", + "Custom realms will be ignored." + }; + } + break; + case STANDARD: + switch (currentMode) { + case BASIC: + // ^^ though technically it was already disabled, it's not bad to remind them + case GOLD: + case PLATINUM: + case TRIAL: + return new String[] { + "Authentication will be limited to the native realms.", + "IP filtering and auditing will be disabled.", + "Field and document level access control will be disabled.", + "Custom realms will be ignored." + }; + } + } + return Strings.EMPTY_ARRAY; + } + + private static String[] watcherAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { + switch (newMode) { + case BASIC: + switch (currentMode) { + case TRIAL: + case STANDARD: + case GOLD: + case PLATINUM: + return new String[] { "Watcher will be disabled" }; + } + break; + } + return Strings.EMPTY_ARRAY; + } + + private static String[] monitoringAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { + switch (newMode) { + case BASIC: + switch (currentMode) { + case TRIAL: + case STANDARD: + case GOLD: + case PLATINUM: + return new String[] { + LoggerMessageFormat.format( + "Multi-cluster support is disabled for clusters with [{}] license. If you are\n" + + "running multiple clusters, users won't be able to access the clusters with\n" + + "[{}] licenses from within a single X-Pack Kibana instance. You will have to deploy a\n" + + "separate and dedicated X-pack Kibana instance for each [{}] cluster you wish to monitor.", + newMode, newMode, newMode), + LoggerMessageFormat.format( + "Automatic index cleanup is locked to {} days for clusters with [{}] license.", + MonitoringField.HISTORY_DURATION.getDefault(Settings.EMPTY).days(), newMode) + }; + } + break; + } + return Strings.EMPTY_ARRAY; + } + + private static String[] graphAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { + switch (newMode) { + case BASIC: + case STANDARD: + case GOLD: + switch (currentMode) { + case TRIAL: + case PLATINUM: + return new String[] { "Graph will be disabled" }; + } + break; + } + return Strings.EMPTY_ARRAY; + } + + private static String[] machineLearningAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { + switch (newMode) { + case BASIC: + case STANDARD: + case GOLD: + switch (currentMode) { + case TRIAL: + case PLATINUM: + return new String[] { "Machine learning will be disabled" }; + } + break; + } + return Strings.EMPTY_ARRAY; + } + + private static String[] logstashAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { + switch (newMode) { + case BASIC: + switch (currentMode) { + case TRIAL: + case STANDARD: + case GOLD: + case PLATINUM: + return new String[] { "Logstash will no longer poll for centrally-managed pipelines" }; + } + break; + } + return Strings.EMPTY_ARRAY; + } + + private static String[] sqlAcknowledgementMessages(OperationMode currentMode, OperationMode newMode) { + switch (newMode) { + case BASIC: + case STANDARD: + case GOLD: + switch (currentMode) { + case TRIAL: + case PLATINUM: + return new String[] { "JDBC support will be disabled, but you can continue to use SQL CLI and REST endpoint" }; + } + break; + } + return Strings.EMPTY_ARRAY; + } + + /** A wrapper for the license mode and state, to allow atomically swapping. */ + private static class Status { + + /** The current "mode" of the license (ie license type). */ + final OperationMode mode; + + /** True if the license is active, or false if it is expired. */ + final boolean active; + + Status(OperationMode mode, boolean active) { + this.mode = mode; + this.active = active; + } + } + + private volatile Status status = new Status(OperationMode.TRIAL, true); + private final List listeners = new CopyOnWriteArrayList<>(); + private final boolean isSecurityEnabled; + private final boolean isSecurityExplicitlyEnabled; + + public XPackLicenseState(Settings settings) { + this.isSecurityEnabled = XPackSettings.SECURITY_ENABLED.get(settings); + this.isSecurityExplicitlyEnabled = settings.hasValue(XPackSettings.SECURITY_ENABLED.getKey()) && isSecurityEnabled; + } + + /** Updates the current state of the license, which will change what features are available. */ + void update(OperationMode mode, boolean active) { + status = new Status(mode, active); + listeners.forEach(Runnable::run); + } + + /** Add a listener to be notified on license change */ + public void addListener(Runnable runnable) { + listeners.add(Objects.requireNonNull(runnable)); + } + + /** Remove a listener */ + public void removeListener(Runnable runnable) { + listeners.remove(runnable); + } + + /** Return the current license type. */ + public OperationMode getOperationMode() { + return status.mode; + } + + /** Return true if the license is currently within its time boundaries, false otherwise. */ + public boolean isActive() { + return status.active; + } + + /** + * @return true if authentication and authorization should be enabled. this does not indicate what realms are available + * @see #allowedRealmType() for the enabled realms + */ + public boolean isAuthAllowed() { + OperationMode mode = status.mode; + return mode == OperationMode.STANDARD || mode == OperationMode.GOLD || mode == OperationMode.PLATINUM + || mode == OperationMode.TRIAL; + } + + /** + * @return true if IP filtering should be enabled + */ + public boolean isIpFilteringAllowed() { + OperationMode mode = status.mode; + return mode == OperationMode.GOLD || mode == OperationMode.PLATINUM + || mode == OperationMode.TRIAL; + } + + /** + * @return true if auditing should be enabled + */ + public boolean isAuditingAllowed() { + OperationMode mode = status.mode; + return mode == OperationMode.GOLD || mode == OperationMode.PLATINUM + || mode == OperationMode.TRIAL; + } + + /** + * Indicates whether the stats and health API calls should be allowed. If a license is expired and past the grace + * period then we deny these calls. + * + * @return true if the license allows for the stats and health APIs to be used. + */ + public boolean isStatsAndHealthAllowed() { + return status.active; + } + + /** + * Determine if Document Level Security (DLS) and Field Level Security (FLS) should be enabled. + *

+ * DLS and FLS are only disabled when the mode is not: + *

    + *
  • {@link OperationMode#PLATINUM}
  • + *
  • {@link OperationMode#TRIAL}
  • + *
+ * Note: This does not consider the state of the license so that Security does not suddenly leak information! + * + * @return {@code true} to enable DLS and FLS. Otherwise {@code false}. + */ + public boolean isDocumentAndFieldLevelSecurityAllowed() { + OperationMode mode = status.mode; + return mode == OperationMode.TRIAL || mode == OperationMode.PLATINUM; + } + + /** Classes of realms that may be available based on the license type. */ + public enum AllowedRealmType { + NONE, + NATIVE, + DEFAULT, + ALL + } + + /** + * @return the type of realms that are enabled based on the license {@link OperationMode} + */ + public AllowedRealmType allowedRealmType() { + switch (status.mode) { + case PLATINUM: + case TRIAL: + return AllowedRealmType.ALL; + case GOLD: + return AllowedRealmType.DEFAULT; + case STANDARD: + return AllowedRealmType.NATIVE; + default: + return AllowedRealmType.NONE; + } + } + + /** + * @return whether custom role providers are allowed based on the license {@link OperationMode} + */ + public boolean isCustomRoleProvidersAllowed() { + final Status localStatus = status; + return (localStatus.mode == OperationMode.PLATINUM || localStatus.mode == OperationMode.TRIAL ) + && localStatus.active; + } + + /** + * Determine if Watcher is available based on the current license. + *

+ * Watcher is available if the license is active (hasn't expired) and of one of the following types: + *

    + *
  • {@link OperationMode#STANDARD}
  • + *
  • {@link OperationMode#PLATINUM}
  • + *
  • {@link OperationMode#GOLD}
  • + *
  • {@link OperationMode#TRIAL}
  • + *
+ * + * @return {@code true} as long as the license is valid. Otherwise {@code false}. + */ + public boolean isWatcherAllowed() { + // status is volatile, so a local variable is used for a consistent view + Status localStatus = status; + + if (localStatus.active == false) { + return false; + } + + switch (localStatus.mode) { + case TRIAL: + case GOLD: + case PLATINUM: + case STANDARD: + return true; + default: + return false; + } + } + + /** + * Monitoring is always available as long as there is a valid license + * + * @return true if the license is active + */ + public boolean isMonitoringAllowed() { + return status.active; + } + + /** + * Monitoring Cluster Alerts requires the equivalent license to use Watcher. + * + * @return {@link #isWatcherAllowed()} + * @see #isWatcherAllowed() + */ + public boolean isMonitoringClusterAlertsAllowed() { + return isWatcherAllowed(); + } + + /** + * Determine if the current license allows the retention of indices to be modified. + *

+ * Only users with a non-{@link OperationMode#BASIC} license can update the retention period. + *

+ * Note: This does not consider the state of the license so that any change is remembered for when they fix their license. + * + * @return {@code true} if the user is allowed to modify the retention. Otherwise {@code false}. + */ + public boolean isUpdateRetentionAllowed() { + final OperationMode mode = status.mode; + return mode != OperationMode.BASIC && mode != OperationMode.MISSING; + } + + /** + * Determine if Graph Exploration should be enabled. + *

+ * Exploration is only disabled when the license has expired or if the mode is not: + *

    + *
  • {@link OperationMode#PLATINUM}
  • + *
  • {@link OperationMode#TRIAL}
  • + *
+ * + * @return {@code true} as long as the license is valid. Otherwise {@code false}. + */ + public boolean isGraphAllowed() { + // status is volatile + Status localStatus = status; + OperationMode operationMode = localStatus.mode; + + boolean licensed = operationMode == OperationMode.TRIAL || operationMode == OperationMode.PLATINUM; + + return licensed && localStatus.active; + } + + /** + * Determine if Machine Learning should be enabled. + *

+ * Machine Learning is only disabled when the license has expired or if the + * mode is not: + *

    + *
  • {@link OperationMode#PLATINUM}
  • + *
  • {@link OperationMode#TRIAL}
  • + *
+ * + * @return {@code true} as long as the license is valid. Otherwise + * {@code false}. + */ + public boolean isMachineLearningAllowed() { + // status is volatile + Status localStatus = status; + OperationMode operationMode = localStatus.mode; + + boolean licensed = operationMode == OperationMode.TRIAL || operationMode == OperationMode.PLATINUM; + + return licensed && localStatus.active; + } + + /** + * Rollup is always available as long as there is a valid license + * + * @return true if the license is active + */ + public boolean isRollupAllowed() { + return status.active; + } + + /** + * Logstash is allowed as long as there is an active license of type TRIAL, STANDARD, GOLD or PLATINUM + * @return {@code true} as long as there is a valid license + */ + public boolean isLogstashAllowed() { + Status localStatus = status; + + if (localStatus.active == false) { + return false; + } + + switch (localStatus.mode) { + case TRIAL: + case GOLD: + case PLATINUM: + case STANDARD: + return true; + default: + return false; + } + } + + /** + * Deprecation APIs are always allowed as long as there is an active license + * @return {@code true} as long as there is a valid license + */ + public boolean isDeprecationAllowed() { + return status.active; + } + + /** + * Determine if Upgrade API should be enabled. + *

+ * Upgrade API is not available in for all license types except {@link OperationMode#MISSING} + * + * @return {@code true} as long as the license is valid. Otherwise + * {@code false}. + */ + public boolean isUpgradeAllowed() { + // status is volatile + Status localStatus = status; + // Should work on all active licenses + return localStatus.active; + } + + /** + * Determine if SQL support should be enabled. + *

+ * SQL is available for all license types except {@link OperationMode#MISSING} + */ + public boolean isSqlAllowed() { + return status.active; + } + + /** + * Determine if JDBC support should be enabled. + *

+ * JDBC is available only in for {@link OperationMode#PLATINUM} and {@link OperationMode#TRIAL} licences + */ + public boolean isJdbcAllowed() { + // status is volatile + Status localStatus = status; + OperationMode operationMode = localStatus.mode; + + boolean licensed = operationMode == OperationMode.TRIAL || operationMode == OperationMode.PLATINUM; + + return licensed && localStatus.active; + } + + public boolean isTrialLicense() { + return status.mode == OperationMode.TRIAL; + } + + public boolean isSecurityAvailable() { + OperationMode mode = status.mode; + return mode == OperationMode.GOLD || mode == OperationMode.PLATINUM || mode == OperationMode.STANDARD || + mode == OperationMode.TRIAL; + } + + public boolean isSecurityEnabled() { + final OperationMode mode = status.mode; + return mode == OperationMode.TRIAL ? isSecurityExplicitlyEnabled : isSecurityEnabled; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/package-info.java new file mode 100644 index 0000000000000..25373122ea6a3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/package-info.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Licensing for xpack. + * + * A {@link org.elasticsearch.license.License} is a signed set of json properties that determine what features + * are available in a running cluster. Licenses are registered through a + * {@link org.elasticsearch.license.PutLicenseRequest}. This action is handled by the master node, which places + * the signed license into the cluster state. Each node listens for cluster state updates via the + * {@link org.elasticsearch.license.LicenseService}, and updates its local copy of the license when it detects + * changes in the cluster state. + * + * The logic for which features are available given the current license is handled by + * {@link org.elasticsearch.license.XPackLicenseState}, which is updated by the + * {@link org.elasticsearch.license.LicenseService} when the license changes. + */ +package org.elasticsearch.license; \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java new file mode 100644 index 0000000000000..d657d4df809c4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.FilterClient; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +import java.util.function.BiConsumer; +import java.util.function.Supplier; + +/** + * Utility class to help with the execution of requests made using a {@link Client} such that they + * have the origin as a transient and listeners have the appropriate context upon invocation + */ +public final class ClientHelper { + + public static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin"; + public static final String SECURITY_ORIGIN = "security"; + public static final String WATCHER_ORIGIN = "watcher"; + public static final String ML_ORIGIN = "ml"; + public static final String MONITORING_ORIGIN = "monitoring"; + public static final String DEPRECATION_ORIGIN = "deprecation"; + public static final String PERSISTENT_TASK_ORIGIN = "persistent_tasks"; + public static final String ROLLUP_ORIGIN = "rollup"; + + private ClientHelper() {} + + /** + * Stashes the current context and sets the origin in the current context. The original context is returned as a stored context + */ + public static ThreadContext.StoredContext stashWithOrigin(ThreadContext threadContext, String origin) { + final ThreadContext.StoredContext storedContext = threadContext.stashContext(); + threadContext.putTransient(ACTION_ORIGIN_TRANSIENT_NAME, origin); + return storedContext; + } + + /** + * Returns a client that will always set the appropriate origin and ensure the proper context is restored by listeners + */ + public static Client clientWithOrigin(Client client, String origin) { + return new ClientWithOrigin(client, origin); + } + + /** + * Executes a consumer after setting the origin and wrapping the listener so that the proper context is restored + */ + public static void executeAsyncWithOrigin( + ThreadContext threadContext, String origin, Request request, ActionListener listener, + BiConsumer> consumer) { + final Supplier supplier = threadContext.newRestorableContext(false); + try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, origin)) { + consumer.accept(request, new ContextPreservingActionListener<>(supplier, listener)); + } + } + + /** + * Executes an asynchronous action using the provided client. The origin is set in the context and the listener + * is wrapped to ensure the proper context is restored + */ + public static > void executeAsyncWithOrigin( + Client client, String origin, Action action, Request request, + ActionListener listener) { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + final Supplier supplier = threadContext.newRestorableContext(false); + try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, origin)) { + client.execute(action, request, new ContextPreservingActionListener<>(supplier, listener)); + } + } + + private static final class ClientWithOrigin extends FilterClient { + + private final String origin; + + private ClientWithOrigin(Client in, String origin) { + super(in); + this.origin = origin; + } + + @Override + protected > void doExecute( + Action action, Request request, ActionListener listener) { + final Supplier supplier = in().threadPool().getThreadContext().newRestorableContext(false); + try (ThreadContext.StoredContext ignore = in().threadPool().getThreadContext().stashContext()) { + in().threadPool().getThreadContext().putTransient(ACTION_ORIGIN_TRANSIENT_NAME, origin); + super.doExecute(action, request, new ContextPreservingActionListener<>(supplier, listener)); + } + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/EmptyXPackFeatureSet.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/EmptyXPackFeatureSet.java new file mode 100644 index 0000000000000..3a6cc899d6274 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/EmptyXPackFeatureSet.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.elasticsearch.action.ActionListener; + +import java.util.Collections; +import java.util.Map; + +public class EmptyXPackFeatureSet implements XPackFeatureSet { + @Override + public String name() { + return "Empty XPackFeatureSet"; + } + + @Override + public String description() { + return "Core will not function without this empty featureset compliments of the way the TransportXPackInfoAction Guice works"; + } + + @Override + public boolean available() { + return false; + } + + @Override + public boolean enabled() { + return false; + } + + @Override + public Map nativeCodeInfo() { + return Collections.emptyMap(); + } + + @Override + public void usage(ActionListener listener) { + + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackBuild.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackBuild.java new file mode 100644 index 0000000000000..2c1d455e6aa9d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackBuild.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.jar.JarInputStream; +import java.util.jar.Manifest; + +/** + * Information about the built version of x-pack that is running. + */ +public class XPackBuild { + + public static final XPackBuild CURRENT; + + static { + final String shortHash; + final String date; + + Path path = getElasticsearchCodebase(); + if (path.toString().endsWith(".jar")) { + try (JarInputStream jar = new JarInputStream(Files.newInputStream(path))) { + Manifest manifest = jar.getManifest(); + shortHash = manifest.getMainAttributes().getValue("Change"); + date = manifest.getMainAttributes().getValue("Build-Date"); + } catch (IOException e) { + throw new RuntimeException(e); + } + } else { + // not running from a jar (unit tests, IDE) + shortHash = "Unknown"; + date = "Unknown"; + } + + CURRENT = new XPackBuild(shortHash, date); + } + + /** + * Returns path to xpack codebase path + */ + @SuppressForbidden(reason = "looks up path of xpack.jar directly") + static Path getElasticsearchCodebase() { + URL url = XPackBuild.class.getProtectionDomain().getCodeSource().getLocation(); + try { + return PathUtils.get(url.toURI()); + } catch (URISyntaxException bogus) { + throw new RuntimeException(bogus); + } + } + + private String shortHash; + private String date; + + XPackBuild(String shortHash, String date) { + this.shortHash = shortHash; + this.date = date; + } + + public String shortHash() { + return shortHash; + } + + public String date() { + return date; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java new file mode 100644 index 0000000000000..588a9c0543379 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.license.LicensingClient; +import org.elasticsearch.license.XPackInfoResponse; +import org.elasticsearch.xpack.core.action.XPackInfoAction; +import org.elasticsearch.xpack.core.action.XPackInfoRequest; +import org.elasticsearch.xpack.core.action.XPackInfoRequestBuilder; +import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; +import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; + +import java.util.Collections; +import java.util.Map; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +public class XPackClient { + + private final Client client; + + private final LicensingClient licensingClient; + private final MonitoringClient monitoringClient; + private final SecurityClient securityClient; + private final WatcherClient watcherClient; + private final MachineLearningClient machineLearning; + + public XPackClient(Client client) { + this.client = client; + this.licensingClient = new LicensingClient(client); + this.monitoringClient = new MonitoringClient(client); + this.securityClient = new SecurityClient(client); + this.watcherClient = new WatcherClient(client); + this.machineLearning = new MachineLearningClient(client); + } + + public Client es() { + return client; + } + + public LicensingClient licensing() { + return licensingClient; + } + + public MonitoringClient monitoring() { + return monitoringClient; + } + + public SecurityClient security() { + return securityClient; + } + + public WatcherClient watcher() { + return watcherClient; + } + + public MachineLearningClient machineLearning() { + return machineLearning; + } + + public XPackClient withHeaders(Map headers) { + return new XPackClient(client.filterWithHeader(headers)); + } + + /** + * Returns a client that will call xpack APIs on behalf of the given user. + * + * @param username The username of the user + * @param passwd The password of the user. This char array can be cleared after calling this method. + */ + public XPackClient withAuth(String username, char[] passwd) { + return withHeaders(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(username, new SecureString(passwd)))); + } + + public XPackInfoRequestBuilder prepareInfo() { + return new XPackInfoRequestBuilder(client); + } + + public void info(XPackInfoRequest request, ActionListener listener) { + client.execute(XPackInfoAction.INSTANCE, request, listener); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java new file mode 100644 index 0000000000000..4853588bd3ead --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -0,0 +1,417 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.elasticsearch.action.GenericAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.license.DeleteLicenseAction; +import org.elasticsearch.license.GetBasicStatusAction; +import org.elasticsearch.license.GetLicenseAction; +import org.elasticsearch.license.GetTrialStatusAction; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.LicensesMetaData; +import org.elasticsearch.license.PostStartBasicAction; +import org.elasticsearch.license.PostStartTrialAction; +import org.elasticsearch.license.PutLicenseAction; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.NetworkPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.core.action.XPackInfoAction; +import org.elasticsearch.xpack.core.action.XPackUsageAction; +import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; +import org.elasticsearch.xpack.core.graph.GraphFeatureSetUsage; +import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; +import org.elasticsearch.xpack.core.logstash.LogstashFeatureSetUsage; +import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.action.CloseJobAction; +import org.elasticsearch.xpack.core.ml.action.DeleteCalendarAction; +import org.elasticsearch.xpack.core.ml.action.DeleteCalendarEventAction; +import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; +import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction; +import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; +import org.elasticsearch.xpack.core.ml.action.DeleteModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; +import org.elasticsearch.xpack.core.ml.action.FlushJobAction; +import org.elasticsearch.xpack.core.ml.action.ForecastJobAction; +import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; +import org.elasticsearch.xpack.core.ml.action.GetCalendarEventsAction; +import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction; +import org.elasticsearch.xpack.core.ml.action.GetCategoriesAction; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; +import org.elasticsearch.xpack.core.ml.action.GetFiltersAction; +import org.elasticsearch.xpack.core.ml.action.GetInfluencersAction; +import org.elasticsearch.xpack.core.ml.action.MlInfoAction; +import org.elasticsearch.xpack.core.ml.action.GetJobsAction; +import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; +import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; +import org.elasticsearch.xpack.core.ml.action.GetOverallBucketsAction; +import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; +import org.elasticsearch.xpack.core.ml.action.IsolateDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.KillProcessAction; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.xpack.core.ml.action.PersistJobAction; +import org.elasticsearch.xpack.core.ml.action.PostCalendarEventsAction; +import org.elasticsearch.xpack.core.ml.action.PostDataAction; +import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.PutCalendarAction; +import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.PutFilterAction; +import org.elasticsearch.xpack.core.ml.action.PutJobAction; +import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; +import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; +import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.action.UpdateProcessAction; +import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; +import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; +import org.elasticsearch.persistent.CompletionPersistentTaskAction; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksNodeService; +import org.elasticsearch.persistent.RemovePersistentTaskAction; +import org.elasticsearch.persistent.StartPersistentTaskAction; +import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; +import org.elasticsearch.xpack.core.rollup.RollupFeatureSetUsage; +import org.elasticsearch.xpack.core.rollup.RollupField; +import org.elasticsearch.xpack.core.rollup.action.DeleteRollupJobAction; +import org.elasticsearch.xpack.core.rollup.action.GetRollupCapsAction; +import org.elasticsearch.xpack.core.rollup.action.GetRollupJobsAction; +import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction; +import org.elasticsearch.xpack.core.rollup.action.RollupSearchAction; +import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction; +import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction; +import org.elasticsearch.xpack.core.rollup.job.RollupJob; +import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; +import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.SecuritySettings; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheAction; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; +import org.elasticsearch.xpack.core.security.action.role.GetRolesAction; +import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserAction; +import org.elasticsearch.xpack.core.security.action.user.GetUsersAction; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; +import org.elasticsearch.xpack.core.security.action.user.PutUserAction; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledAction; +import org.elasticsearch.xpack.core.security.authc.TokenMetaData; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AllExpression; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AnyExpression; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExceptExpression; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; +import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction; +import org.elasticsearch.xpack.core.watcher.WatcherFeatureSetUsage; +import org.elasticsearch.xpack.core.watcher.WatcherMetaData; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPlugin { + + private final Settings settings; + + public XPackClientPlugin(final Settings settings) { + this.settings = settings; + } + + @Override + public List> getSettings() { + ArrayList> settings = new ArrayList<>(); + // the only licensing one + settings.add(Setting.groupSetting("license.", Setting.Property.NodeScope)); + + //TODO split these settings up + settings.addAll(XPackSettings.getAllSettings()); + + settings.add(LicenseService.SELF_GENERATED_LICENSE_TYPE); + + // we add the `xpack.version` setting to all internal indices + settings.add(Setting.simpleString("index.xpack.version", Setting.Property.IndexScope)); + + return settings; + } + + @Override + public Settings additionalSettings() { + return additionalSettings(settings, XPackSettings.SECURITY_ENABLED.get(settings), XPackPlugin.transportClientMode(settings)); + } + + static Settings additionalSettings(final Settings settings, final boolean enabled, final boolean transportClientMode) { + if (enabled && transportClientMode) { + final Settings.Builder builder = Settings.builder(); + builder.put(SecuritySettings.addTransportSettings(settings)); + builder.put(SecuritySettings.addUserSettings(settings)); + return builder.build(); + } else { + return Settings.EMPTY; + } + } + + @Override + public List getClientActions() { + return Arrays.asList( + // deprecation + DeprecationInfoAction.INSTANCE, + // graph + GraphExploreAction.INSTANCE, + // ML + GetJobsAction.INSTANCE, + GetJobsStatsAction.INSTANCE, + MlInfoAction.INSTANCE, + PutJobAction.INSTANCE, + UpdateJobAction.INSTANCE, + DeleteJobAction.INSTANCE, + OpenJobAction.INSTANCE, + GetFiltersAction.INSTANCE, + PutFilterAction.INSTANCE, + DeleteFilterAction.INSTANCE, + KillProcessAction.INSTANCE, + GetBucketsAction.INSTANCE, + GetInfluencersAction.INSTANCE, + GetOverallBucketsAction.INSTANCE, + GetRecordsAction.INSTANCE, + PostDataAction.INSTANCE, + CloseJobAction.INSTANCE, + FinalizeJobExecutionAction.INSTANCE, + FlushJobAction.INSTANCE, + ValidateDetectorAction.INSTANCE, + ValidateJobConfigAction.INSTANCE, + GetCategoriesAction.INSTANCE, + GetModelSnapshotsAction.INSTANCE, + RevertModelSnapshotAction.INSTANCE, + UpdateModelSnapshotAction.INSTANCE, + GetDatafeedsAction.INSTANCE, + GetDatafeedsStatsAction.INSTANCE, + PutDatafeedAction.INSTANCE, + UpdateDatafeedAction.INSTANCE, + DeleteDatafeedAction.INSTANCE, + PreviewDatafeedAction.INSTANCE, + StartDatafeedAction.INSTANCE, + StopDatafeedAction.INSTANCE, + IsolateDatafeedAction.INSTANCE, + DeleteModelSnapshotAction.INSTANCE, + UpdateProcessAction.INSTANCE, + DeleteExpiredDataAction.INSTANCE, + ForecastJobAction.INSTANCE, + GetCalendarsAction.INSTANCE, + PutCalendarAction.INSTANCE, + DeleteCalendarAction.INSTANCE, + DeleteCalendarEventAction.INSTANCE, + UpdateCalendarJobAction.INSTANCE, + GetCalendarEventsAction.INSTANCE, + PostCalendarEventsAction.INSTANCE, + PersistJobAction.INSTANCE, + // licensing + StartPersistentTaskAction.INSTANCE, + UpdatePersistentTaskStatusAction.INSTANCE, + RemovePersistentTaskAction.INSTANCE, + CompletionPersistentTaskAction.INSTANCE, + // security + ClearRealmCacheAction.INSTANCE, + ClearRolesCacheAction.INSTANCE, + GetUsersAction.INSTANCE, + PutUserAction.INSTANCE, + DeleteUserAction.INSTANCE, + GetRolesAction.INSTANCE, + PutRoleAction.INSTANCE, + DeleteRoleAction.INSTANCE, + ChangePasswordAction.INSTANCE, + AuthenticateAction.INSTANCE, + SetEnabledAction.INSTANCE, + HasPrivilegesAction.INSTANCE, + GetRoleMappingsAction.INSTANCE, + PutRoleMappingAction.INSTANCE, + DeleteRoleMappingAction.INSTANCE, + CreateTokenAction.INSTANCE, + InvalidateTokenAction.INSTANCE, + GetCertificateInfoAction.INSTANCE, + RefreshTokenAction.INSTANCE, + // upgrade + IndexUpgradeInfoAction.INSTANCE, + IndexUpgradeAction.INSTANCE, + // watcher + PutWatchAction.INSTANCE, + DeleteWatchAction.INSTANCE, + GetWatchAction.INSTANCE, + WatcherStatsAction.INSTANCE, + AckWatchAction.INSTANCE, + ActivateWatchAction.INSTANCE, + WatcherServiceAction.INSTANCE, + ExecuteWatchAction.INSTANCE, + // license + PutLicenseAction.INSTANCE, + GetLicenseAction.INSTANCE, + DeleteLicenseAction.INSTANCE, + PostStartTrialAction.INSTANCE, + GetTrialStatusAction.INSTANCE, + PostStartBasicAction.INSTANCE, + GetBasicStatusAction.INSTANCE, + // x-pack + XPackInfoAction.INSTANCE, + XPackUsageAction.INSTANCE, + // rollup + RollupSearchAction.INSTANCE, + PutRollupJobAction.INSTANCE, + StartRollupJobAction.INSTANCE, + StopRollupJobAction.INSTANCE, + DeleteRollupJobAction.INSTANCE, + GetRollupJobsAction.INSTANCE, + GetRollupCapsAction.INSTANCE + ); + } + + @Override + public List getNamedWriteables() { + return Arrays.asList( + // graph + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.GRAPH, GraphFeatureSetUsage::new), + // logstash + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.LOGSTASH, LogstashFeatureSetUsage::new), + // ML - Custom metadata + new NamedWriteableRegistry.Entry(MetaData.Custom.class, "ml", MlMetadata::new), + new NamedWriteableRegistry.Entry(NamedDiff.class, "ml", MlMetadata.MlMetadataDiff::new), + new NamedWriteableRegistry.Entry(MetaData.Custom.class, PersistentTasksCustomMetaData.TYPE, + PersistentTasksCustomMetaData::new), + new NamedWriteableRegistry.Entry(NamedDiff.class, PersistentTasksCustomMetaData.TYPE, + PersistentTasksCustomMetaData::readDiffFrom), + // ML - Persistent action requests + new NamedWriteableRegistry.Entry(PersistentTaskParams.class, StartDatafeedAction.TASK_NAME, + StartDatafeedAction.DatafeedParams::new), + new NamedWriteableRegistry.Entry(PersistentTaskParams.class, OpenJobAction.TASK_NAME, + OpenJobAction.JobParams::new), + // ML - Task statuses + new NamedWriteableRegistry.Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, + PersistentTasksNodeService.Status::new), + new NamedWriteableRegistry.Entry(Task.Status.class, JobTaskStatus.NAME, JobTaskStatus::new), + new NamedWriteableRegistry.Entry(Task.Status.class, DatafeedState.NAME, DatafeedState::fromStream), + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.MACHINE_LEARNING, + MachineLearningFeatureSetUsage::new), + // monitoring + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.MONITORING, MonitoringFeatureSetUsage::new), + // security + new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TokenMetaData.TYPE, TokenMetaData::new), + new NamedWriteableRegistry.Entry(NamedDiff.class, TokenMetaData.TYPE, TokenMetaData::readDiffFrom), + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SECURITY, SecurityFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(RoleMapperExpression.class, AllExpression.NAME, AllExpression::new), + new NamedWriteableRegistry.Entry(RoleMapperExpression.class, AnyExpression.NAME, AnyExpression::new), + new NamedWriteableRegistry.Entry(RoleMapperExpression.class, FieldExpression.NAME, FieldExpression::new), + new NamedWriteableRegistry.Entry(RoleMapperExpression.class, ExceptExpression.NAME, ExceptExpression::new), + // watcher + new NamedWriteableRegistry.Entry(MetaData.Custom.class, WatcherMetaData.TYPE, WatcherMetaData::new), + new NamedWriteableRegistry.Entry(NamedDiff.class, WatcherMetaData.TYPE, WatcherMetaData::readDiffFrom), + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.WATCHER, WatcherFeatureSetUsage::new), + // licensing + new NamedWriteableRegistry.Entry(MetaData.Custom.class, LicensesMetaData.TYPE, LicensesMetaData::new), + new NamedWriteableRegistry.Entry(NamedDiff.class, LicensesMetaData.TYPE, LicensesMetaData::readDiffFrom), + // rollup + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ROLLUP, RollupFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(PersistentTaskParams.class, RollupJob.NAME, RollupJob::new), + new NamedWriteableRegistry.Entry(Task.Status.class, RollupJobStatus.NAME, RollupJobStatus::new) + ); + } + + @Override + public List getNamedXContent() { + return Arrays.asList( + // ML - Custom metadata + new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField("ml"), + parser -> MlMetadata.METADATA_PARSER.parse(parser, null).build()), + new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(PersistentTasksCustomMetaData.TYPE), + PersistentTasksCustomMetaData::fromXContent), + // ML - Persistent action requests + new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(StartDatafeedAction.TASK_NAME), + StartDatafeedAction.DatafeedParams::fromXContent), + new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(OpenJobAction.TASK_NAME), + OpenJobAction.JobParams::fromXContent), + // ML - Task statuses + new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(DatafeedState.NAME), DatafeedState::fromXContent), + new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(JobTaskStatus.NAME), JobTaskStatus::fromXContent), + // watcher + new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(WatcherMetaData.TYPE), + WatcherMetaData::fromXContent), + // licensing + new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(LicensesMetaData.TYPE), + LicensesMetaData::fromXContent), + //rollup + new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(RollupField.TASK_NAME), + parser -> RollupJob.fromXContent(parser)), + new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(RollupJobStatus.NAME), RollupJobStatus::fromXContent) + ); + } + + @Override + public Map> getTransports( + final Settings settings, + final ThreadPool threadPool, + final BigArrays bigArrays, + final PageCacheRecycler pageCacheRecycler, + final CircuitBreakerService circuitBreakerService, + final NamedWriteableRegistry namedWriteableRegistry, + final NetworkService networkService) { + // this should only be used in the transport layer, so do not add it if it is not in transport mode or we are disabled + if (XPackPlugin.transportClientMode(settings) == false || XPackSettings.SECURITY_ENABLED.get(settings) == false) { + return Collections.emptyMap(); + } + final SSLService sslService; + try { + sslService = new SSLService(settings, null); + } catch (Exception e) { + throw new RuntimeException(e); + } + return Collections.singletonMap(SecurityField.NAME4, () -> new SecurityNetty4Transport(settings, threadPool, + networkService, bigArrays, namedWriteableRegistry, circuitBreakerService, sslService)); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureSet.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureSet.java new file mode 100644 index 0000000000000..075625704d14c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackFeatureSet.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; + +public interface XPackFeatureSet { + + String name(); + + String description(); + + boolean available(); + + boolean enabled(); + + Map nativeCodeInfo(); + + void usage(ActionListener listener); + + abstract class Usage implements ToXContentObject, NamedWriteable { + + private static final String AVAILABLE_XFIELD = "available"; + private static final String ENABLED_XFIELD = "enabled"; + + protected final String name; + protected final boolean available; + protected final boolean enabled; + + public Usage(StreamInput input) throws IOException { + this(input.readString(), input.readBoolean(), input.readBoolean()); + } + + public Usage(String name, boolean available, boolean enabled) { + this.name = name; + this.available = available; + this.enabled = enabled; + } + + public String name() { + return name; + } + + public boolean available() { + return available; + } + + public boolean enabled() { + return enabled; + } + + @Override + public String getWriteableName() { + return name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeBoolean(available); + out.writeBoolean(enabled); + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerXContent(builder, params); + return builder.endObject(); + } + + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(AVAILABLE_XFIELD, available); + builder.field(ENABLED_XFIELD, enabled); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java new file mode 100644 index 0000000000000..dd482c4e22d78 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +public final class XPackField { + // These should be moved back to XPackPlugin once its moved to common + /** Name constant for the security feature. */ + public static final String SECURITY = "security"; + /** Name constant for the monitoring feature. */ + public static final String MONITORING = "monitoring"; + /** Name constant for the watcher feature. */ + public static final String WATCHER = "watcher"; + /** Name constant for the graph feature. */ + public static final String GRAPH = "graph"; + /** Name constant for the machine learning feature. */ + public static final String MACHINE_LEARNING = "ml"; + /** Name constant for the Logstash feature. */ + public static final String LOGSTASH = "logstash"; + /** Name constant for the Deprecation API feature. */ + public static final String DEPRECATION = "deprecation"; + /** Name constant for the upgrade feature. */ + public static final String UPGRADE = "upgrade"; + // inside of YAML settings we still use xpack do not having handle issues with dashes + public static final String SETTINGS_NAME = "xpack"; + /** Name constant for the sql feature. */ + public static final String SQL = "sql"; + /** Name constant for the rollup feature. */ + public static final String ROLLUP = "rollup"; + + private XPackField() {} + + public static String featureSettingPrefix(String featureName) { + return XPackField.SETTINGS_NAME + "." + featureName; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java new file mode 100644 index 0000000000000..5ee46f3b3c97a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -0,0 +1,237 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; +import org.bouncycastle.operator.OperatorCreationException; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.GenericAction; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Binder; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.inject.multibindings.Multibinder; +import org.elasticsearch.common.inject.util.Providers; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.Licensing; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.ExtensiblePlugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.action.TransportXPackInfoAction; +import org.elasticsearch.xpack.core.action.TransportXPackUsageAction; +import org.elasticsearch.xpack.core.action.XPackInfoAction; +import org.elasticsearch.xpack.core.action.XPackUsageAction; +import org.elasticsearch.xpack.core.rest.action.RestXPackInfoAction; +import org.elasticsearch.xpack.core.rest.action.RestXPackUsageAction; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationReloader; +import org.elasticsearch.xpack.core.ssl.SSLService; + +import javax.security.auth.DestroyFailedException; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.GeneralSecurityException; +import java.security.PrivilegedAction; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +public class XPackPlugin extends XPackClientPlugin implements ScriptPlugin, ExtensiblePlugin { + + private static Logger logger = ESLoggerFactory.getLogger(XPackPlugin.class); + private static DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + + // TODO: clean up this library to not ask for write access to all system properties! + static { + // invoke this clinit in unbound with permissions to access all system properties + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new SpecialPermission()); + } + try { + AccessController.doPrivileged(new PrivilegedAction() { + @Override + public Void run() { + try { + Class.forName("com.unboundid.util.Debug"); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + return null; + } + }); + // TODO: fix gradle to add all security resources (plugin metadata) to test classpath + // of watcher plugin, which depends on it directly. This prevents these plugins + // from being initialized correctly by the test framework, and means we have to + // have this leniency. + } catch (ExceptionInInitializerError bogus) { + if (bogus.getCause() instanceof SecurityException == false) { + throw bogus; // some other bug + } + } + } + + protected final Settings settings; + //private final Environment env; + protected boolean transportClientMode; + protected final Licensing licensing; + // These should not be directly accessed as they cannot be overriden in tests. Please use the getters so they can be overridden. + private static final SetOnce licenseState = new SetOnce<>(); + private static final SetOnce sslService = new SetOnce<>(); + private static final SetOnce licenseService = new SetOnce<>(); + + public XPackPlugin( + final Settings settings, + final Path configPath) throws IOException, DestroyFailedException, OperatorCreationException, GeneralSecurityException { + super(settings); + this.settings = settings; + this.transportClientMode = transportClientMode(settings); + Environment env = transportClientMode ? null : new Environment(settings, configPath); + + setSslService(new SSLService(settings, env)); + setLicenseState(new XPackLicenseState(settings)); + + this.licensing = new Licensing(settings); + } + + // overridable by tests + protected Clock getClock() { + return Clock.systemUTC(); + } + + protected SSLService getSslService() { return getSharedSslService(); } + protected LicenseService getLicenseService() { return getSharedLicenseService(); } + protected XPackLicenseState getLicenseState() { return getSharedLicenseState(); } + protected void setSslService(SSLService sslService) { XPackPlugin.sslService.set(sslService); } + protected void setLicenseService(LicenseService licenseService) { XPackPlugin.licenseService.set(licenseService); } + protected void setLicenseState(XPackLicenseState licenseState) { XPackPlugin.licenseState.set(licenseState); } + public static SSLService getSharedSslService() { return sslService.get(); } + public static LicenseService getSharedLicenseService() { return licenseService.get(); } + public static XPackLicenseState getSharedLicenseState() { return licenseState.get(); } + + @Override + public Collection createGuiceModules() { + ArrayList modules = new ArrayList<>(); + //modules.add(b -> b.bind(Clock.class).toInstance(getClock())); + // used to get core up and running, we do not bind the actual feature set here + modules.add(b -> XPackPlugin.createFeatureSetMultiBinder(b, EmptyXPackFeatureSet.class)); + + if (transportClientMode) { + modules.add(b -> b.bind(XPackLicenseState.class).toProvider(Providers.of(null))); + } + return modules; + } + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, + NamedXContentRegistry xContentRegistry, Environment environment, + NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + List components = new ArrayList<>(); + + // just create the reloader as it will pull all of the loaded ssl configurations and start watching them + new SSLConfigurationReloader(settings, environment, getSslService(), resourceWatcherService); + + setLicenseService(new LicenseService(settings, clusterService, getClock(), + environment, resourceWatcherService, getLicenseState())); + + // It is useful to override these as they are what guice is injecting into actions + components.add(getSslService()); + components.add(getLicenseService()); + components.add(getLicenseState()); + + return components; + } + + @Override + public List> getActions() { + List> actions = new ArrayList<>(); + actions.add(new ActionHandler<>(XPackInfoAction.INSTANCE, TransportXPackInfoAction.class)); + actions.add(new ActionHandler<>(XPackUsageAction.INSTANCE, TransportXPackUsageAction.class)); + actions.addAll(licensing.getActions()); + return actions; + } + + @Override + public List getClientActions() { + List actions = new ArrayList<>(); + actions.addAll(licensing.getClientActions()); + actions.addAll(super.getClientActions()); + return actions; + } + + @Override + public List getActionFilters() { + List filters = new ArrayList<>(); + filters.addAll(licensing.getActionFilters()); + return filters; + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + List handlers = new ArrayList<>(); + handlers.add(new RestXPackInfoAction(settings, restController)); + handlers.add(new RestXPackUsageAction(settings, restController)); + handlers.addAll(licensing.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter, + indexNameExpressionResolver, nodesInCluster)); + return handlers; + } + + public static void bindFeatureSet(Binder binder, Class featureSet) { + Multibinder featureSetBinder = createFeatureSetMultiBinder(binder, featureSet); + featureSetBinder.addBinding().to(featureSet); + } + + public static Multibinder createFeatureSetMultiBinder(Binder binder, Class featureSet) { + binder.bind(featureSet).asEagerSingleton(); + return Multibinder.newSetBinder(binder, XPackFeatureSet.class); + } + + public static boolean transportClientMode(Settings settings) { + return TransportClient.CLIENT_TYPE.equals(settings.get(Client.CLIENT_TYPE_SETTING_S.getKey())); + } + + public static Path resolveConfigFile(Environment env, String name) { + Path config = env.configFile().resolve(name); + if (Files.exists(config) == false) { + Path legacyConfig = env.configFile().resolve("x-pack").resolve(name); + if (Files.exists(legacyConfig)) { + deprecationLogger.deprecated("Config file [" + name + "] is in a deprecated location. Move from " + + legacyConfig.toString() + " to " + config.toString()); + return legacyConfig; + } + } + return config; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java new file mode 100644 index 0000000000000..a88d423be95b2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -0,0 +1,155 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.ssl.SSLClientAuth; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; +import org.elasticsearch.xpack.core.ssl.VerificationMode; + +import javax.crypto.Cipher; + +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.xpack.core.security.SecurityField.USER_SETTING; + +/** + * A container for xpack setting constants. + */ +public class XPackSettings { + /** Setting for enabling or disabling security. Defaults to true. */ + public static final Setting SECURITY_ENABLED = Setting.boolSetting("xpack.security.enabled", true, Setting.Property.NodeScope); + + /** Setting for enabling or disabling monitoring. */ + public static final Setting MONITORING_ENABLED = Setting.boolSetting("xpack.monitoring.enabled", true, + Setting.Property.NodeScope); + + /** Setting for enabling or disabling watcher. Defaults to true. */ + public static final Setting WATCHER_ENABLED = Setting.boolSetting("xpack.watcher.enabled", true, Setting.Property.NodeScope); + + /** Setting for enabling or disabling graph. Defaults to true. */ + public static final Setting GRAPH_ENABLED = Setting.boolSetting("xpack.graph.enabled", true, Setting.Property.NodeScope); + + /** Setting for enabling or disabling machine learning. Defaults to false. */ + public static final Setting MACHINE_LEARNING_ENABLED = Setting.boolSetting("xpack.ml.enabled", true, + Setting.Property.NodeScope); + + /** Setting for enabling or disabling rollup. Defaults to true. */ + public static final Setting ROLLUP_ENABLED = Setting.boolSetting("xpack.rollup.enabled", true, + Setting.Property.NodeScope); + + /** Setting for enabling or disabling auditing. Defaults to false. */ + public static final Setting AUDIT_ENABLED = Setting.boolSetting("xpack.security.audit.enabled", false, + Setting.Property.NodeScope); + + /** Setting for enabling or disabling document/field level security. Defaults to true. */ + public static final Setting DLS_FLS_ENABLED = Setting.boolSetting("xpack.security.dls_fls.enabled", true, + Setting.Property.NodeScope); + + /** Setting for enabling or disabling Logstash extensions. Defaults to true. */ + public static final Setting LOGSTASH_ENABLED = Setting.boolSetting("xpack.logstash.enabled", true, + Setting.Property.NodeScope); + + /** Setting for enabling or disabling TLS. Defaults to false. */ + public static final Setting TRANSPORT_SSL_ENABLED = Setting.boolSetting("xpack.security.transport.ssl.enabled", false, + Property.NodeScope); + + /** Setting for enabling or disabling http ssl. Defaults to false. */ + public static final Setting HTTP_SSL_ENABLED = Setting.boolSetting("xpack.security.http.ssl.enabled", false, + Setting.Property.NodeScope); + + /** Setting for enabling or disabling the reserved realm. Defaults to true */ + public static final Setting RESERVED_REALM_ENABLED_SETTING = Setting.boolSetting("xpack.security.authc.reserved_realm.enabled", + true, Setting.Property.NodeScope); + + /** Setting for enabling or disabling the token service. Defaults to true */ + public static final Setting TOKEN_SERVICE_ENABLED_SETTING = Setting.boolSetting("xpack.security.authc.token.enabled", (s) -> { + if (NetworkModule.HTTP_ENABLED.get(s)) { + return XPackSettings.HTTP_SSL_ENABLED.getRaw(s); + } else { + return Boolean.TRUE.toString(); + } + }, Setting.Property.NodeScope); + + /** Setting for enabling or disabling sql. Defaults to true. */ + public static final Setting SQL_ENABLED = Setting.boolSetting("xpack.sql.enabled", true, Setting.Property.NodeScope); + + /* + * SSL settings. These are the settings that are specifically registered for SSL. Many are private as we do not explicitly use them + * but instead parse based on a prefix (eg *.ssl.*) + */ + public static final List DEFAULT_CIPHERS; + + static { + List ciphers = Arrays.asList("TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_128_CBC_SHA"); + try { + final boolean use256Bit = Cipher.getMaxAllowedKeyLength("AES") > 128; + if (use256Bit) { + List strongerCiphers = new ArrayList<>(ciphers.size() * 2); + strongerCiphers.addAll(Arrays.asList("TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_256_CBC_SHA")); + strongerCiphers.addAll(ciphers); + ciphers = strongerCiphers; + } + } catch (NoSuchAlgorithmException e) { + // ignore it here - there will be issues elsewhere and its not nice to throw in a static initializer + } + + DEFAULT_CIPHERS = ciphers; + } + + public static final List DEFAULT_SUPPORTED_PROTOCOLS = Arrays.asList("TLSv1.2", "TLSv1.1", "TLSv1"); + public static final SSLClientAuth CLIENT_AUTH_DEFAULT = SSLClientAuth.REQUIRED; + public static final SSLClientAuth HTTP_CLIENT_AUTH_DEFAULT = SSLClientAuth.NONE; + public static final VerificationMode VERIFICATION_MODE_DEFAULT = VerificationMode.FULL; + + // global settings that apply to everything! + public static final String GLOBAL_SSL_PREFIX = "xpack.ssl."; + private static final SSLConfigurationSettings GLOBAL_SSL = SSLConfigurationSettings.withPrefix(GLOBAL_SSL_PREFIX); + + // http specific settings + public static final String HTTP_SSL_PREFIX = SecurityField.setting("http.ssl."); + private static final SSLConfigurationSettings HTTP_SSL = SSLConfigurationSettings.withPrefix(HTTP_SSL_PREFIX); + + // transport specific settings + public static final String TRANSPORT_SSL_PREFIX = SecurityField.setting("transport.ssl."); + private static final SSLConfigurationSettings TRANSPORT_SSL = SSLConfigurationSettings.withPrefix(TRANSPORT_SSL_PREFIX); + + /** Returns all settings created in {@link XPackSettings}. */ + public static List> getAllSettings() { + ArrayList> settings = new ArrayList<>(); + settings.addAll(GLOBAL_SSL.getAllSettings()); + settings.addAll(HTTP_SSL.getAllSettings()); + settings.addAll(TRANSPORT_SSL.getAllSettings()); + settings.add(SECURITY_ENABLED); + settings.add(MONITORING_ENABLED); + settings.add(GRAPH_ENABLED); + settings.add(MACHINE_LEARNING_ENABLED); + settings.add(AUDIT_ENABLED); + settings.add(WATCHER_ENABLED); + settings.add(DLS_FLS_ENABLED); + settings.add(LOGSTASH_ENABLED); + settings.add(TRANSPORT_SSL_ENABLED); + settings.add(HTTP_SSL_ENABLED); + settings.add(RESERVED_REALM_ENABLED_SETTING); + settings.add(TOKEN_SERVICE_ENABLED_SETTING); + settings.add(SQL_ENABLED); + settings.add(USER_SETTING); + settings.add(ROLLUP_ENABLED); + return Collections.unmodifiableList(settings); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackInfoAction.java new file mode 100644 index 0000000000000..415e601a40aa9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackInfoAction.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackInfoResponse; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackBuild; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.license.XPackInfoResponse.FeatureSetsInfo.FeatureSet; +import org.elasticsearch.license.XPackInfoResponse.LicenseInfo; + +import java.util.Set; +import java.util.stream.Collectors; + +public class TransportXPackInfoAction extends HandledTransportAction { + + private final LicenseService licenseService; + private final Set featureSets; + + @Inject + public TransportXPackInfoAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + LicenseService licenseService, Set featureSets) { + super(settings, XPackInfoAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + XPackInfoRequest::new); + this.licenseService = licenseService; + this.featureSets = featureSets; + } + + @Override + protected void doExecute(XPackInfoRequest request, ActionListener listener) { + + + XPackInfoResponse.BuildInfo buildInfo = null; + if (request.getCategories().contains(XPackInfoRequest.Category.BUILD)) { + buildInfo = new XPackInfoResponse.BuildInfo(XPackBuild.CURRENT); + } + + LicenseInfo licenseInfo = null; + if (request.getCategories().contains(XPackInfoRequest.Category.LICENSE)) { + License license = licenseService.getLicense(); + if (license != null) { + licenseInfo = new LicenseInfo(license); + } + } + + XPackInfoResponse.FeatureSetsInfo featureSetsInfo = null; + if (request.getCategories().contains(XPackInfoRequest.Category.FEATURES)) { + Set featureSets = this.featureSets.stream().map(fs -> + new FeatureSet(fs.name(), request.isVerbose() ? fs.description() : null, fs.available(), fs.enabled(), + request.isVerbose() ? fs.nativeCodeInfo() : null)) + .collect(Collectors.toSet()); + featureSetsInfo = new XPackInfoResponse.FeatureSetsInfo(featureSets); + } + + listener.onResponse(new XPackInfoResponse(buildInfo, licenseInfo, featureSetsInfo)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java new file mode 100644 index 0000000000000..f3abad5e68bb3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackFeatureSet.Usage; +import org.elasticsearch.xpack.core.common.IteratingActionListener; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.function.BiConsumer; + +public class TransportXPackUsageAction extends TransportMasterNodeAction { + + private final List featureSets; + + @Inject + public TransportXPackUsageAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ClusterService clusterService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Set featureSets) { + super(settings, XPackUsageAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + XPackUsageRequest::new); + this.featureSets = Collections.unmodifiableList(new ArrayList<>(featureSets)); + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected XPackUsageResponse newResponse() { + return new XPackUsageResponse(); + } + + @Override + protected void masterOperation(XPackUsageRequest request, ClusterState state, ActionListener listener) + throws Exception { + final ActionListener> usageActionListener = new ActionListener>() { + @Override + public void onResponse(List usages) { + listener.onResponse(new XPackUsageResponse(usages)); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }; + final AtomicReferenceArray featureSetUsages = new AtomicReferenceArray<>(featureSets.size()); + final AtomicInteger position = new AtomicInteger(0); + final BiConsumer>> consumer = (featureSet, iteratingListener) -> { + featureSet.usage(new ActionListener() { + @Override + public void onResponse(Usage usage) { + featureSetUsages.set(position.getAndIncrement(), usage); + iteratingListener.onResponse(null); // just send null back and keep iterating + } + + @Override + public void onFailure(Exception e) { + iteratingListener.onFailure(e); + } + }); + }; + IteratingActionListener, XPackFeatureSet> iteratingActionListener = + new IteratingActionListener<>(usageActionListener, consumer, featureSets, + threadPool.getThreadContext(), () -> { + final List usageList = new ArrayList<>(featureSetUsages.length()); + for (int i = 0; i < featureSetUsages.length(); i++) { + usageList.add(featureSetUsages.get(i)); + } + return usageList; + }); + iteratingActionListener.run(); + } + + @Override + protected ClusterBlockException checkBlock(XPackUsageRequest request, ClusterState state) { + return null; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoAction.java new file mode 100644 index 0000000000000..bdca705baff8f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoAction.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.license.XPackInfoResponse; + +public class XPackInfoAction extends Action { + + public static final String NAME = "cluster:monitor/xpack/info"; + public static final XPackInfoAction INSTANCE = new XPackInfoAction(); + + public XPackInfoAction() { + super(NAME); + } + + @Override + public XPackInfoRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new XPackInfoRequestBuilder(client); + } + + @Override + public XPackInfoResponse newResponse() { + return new XPackInfoResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoRequest.java new file mode 100644 index 0000000000000..b3c88be93aaca --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoRequest.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Locale; + +public class XPackInfoRequest extends ActionRequest { + + public enum Category { + BUILD, LICENSE, FEATURES; + + public static EnumSet toSet(String... categories) { + EnumSet set = EnumSet.noneOf(Category.class); + for (String category : categories) { + switch (category) { + case "_all": + return EnumSet.allOf(Category.class); + case "_none": + return EnumSet.noneOf(Category.class); + default: + set.add(Category.valueOf(category.toUpperCase(Locale.ROOT))); + } + } + return set; + } + } + + private boolean verbose; + private EnumSet categories = EnumSet.noneOf(Category.class); + + public XPackInfoRequest() {} + + public void setVerbose(boolean verbose) { + this.verbose = verbose; + } + + public boolean isVerbose() { + return verbose; + } + + public void setCategories(EnumSet categories) { + this.categories = categories; + } + + public EnumSet getCategories() { + return categories; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + this.verbose = in.readBoolean(); + EnumSet categories = EnumSet.noneOf(Category.class); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + categories.add(Category.valueOf(in.readString())); + } + this.categories = categories; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(verbose); + out.writeVInt(categories.size()); + for (Category category : categories) { + out.writeString(category.name()); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoRequestBuilder.java new file mode 100644 index 0000000000000..6ee4fb925bde2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoRequestBuilder.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.license.XPackInfoResponse; + +import java.util.EnumSet; + +public class XPackInfoRequestBuilder extends ActionRequestBuilder { + + public XPackInfoRequestBuilder(ElasticsearchClient client) { + this(client, XPackInfoAction.INSTANCE); + } + + public XPackInfoRequestBuilder(ElasticsearchClient client, XPackInfoAction action) { + super(client, action, new XPackInfoRequest()); + } + + public XPackInfoRequestBuilder setVerbose(boolean verbose) { + request.setVerbose(verbose); + return this; + } + + + public XPackInfoRequestBuilder setCategories(EnumSet categories) { + request.setCategories(categories); + return this; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java new file mode 100644 index 0000000000000..252283a1dfc7d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class XPackUsageAction extends Action { + + public static final String NAME = "cluster:monitor/xpack/usage"; + public static final XPackUsageAction INSTANCE = new XPackUsageAction(); + + public XPackUsageAction() { + super(NAME); + } + + @Override + public XPackUsageRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new XPackUsageRequestBuilder(client); + } + + @Override + public XPackUsageResponse newResponse() { + return new XPackUsageResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequest.java new file mode 100644 index 0000000000000..d578249c147c3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequest.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeRequest; + +public class XPackUsageRequest extends MasterNodeRequest { + + @Override + public ActionRequestValidationException validate() { + return null; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequestBuilder.java new file mode 100644 index 0000000000000..789460f133969 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequestBuilder.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class XPackUsageRequestBuilder + extends MasterNodeOperationRequestBuilder { + + public XPackUsageRequestBuilder(ElasticsearchClient client) { + this(client, XPackUsageAction.INSTANCE); + } + + public XPackUsageRequestBuilder(ElasticsearchClient client, XPackUsageAction action) { + super(client, action, new XPackUsageRequest()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java new file mode 100644 index 0000000000000..afca0e000fcc8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.XPackFeatureSet; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class XPackUsageResponse extends ActionResponse { + + private List usages; + + public XPackUsageResponse() {} + + public XPackUsageResponse(List usages) { + this.usages = usages; + } + + public List getUsages() { + return usages; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(usages.size()); + for (XPackFeatureSet.Usage usage : usages) { + out.writeNamedWriteable(usage); + } + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + int size = in.readVInt(); + usages = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + usages.add(in.readNamedWriteable(XPackFeatureSet.Usage.class)); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/IteratingActionListener.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/IteratingActionListener.java new file mode 100644 index 0000000000000..46ebd89b8ea76 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/IteratingActionListener.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.common; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +import java.util.Collections; +import java.util.List; +import java.util.function.BiConsumer; +import java.util.function.Supplier; + +/** + * This action listener wraps another listener and provides a framework for iteration over a List while calling an asynchronous function + * for each. The listener calls the {@link BiConsumer} with the current element in the list and a {@link ActionListener}. This function + * is expected to call the listener in case of success or failure due to an exception. If there is a failure due to an exception the wrapped + * listener's {@link ActionListener#onFailure(Exception)} method is called. If the consumer calls {@link #onResponse(Object)} with a + * non-null object, iteration will cease and the wrapped listener will be called with the response. In the case of a null value being passed + * to {@link #onResponse(Object)} then iteration will continue by applying the {@link BiConsumer} to the next item in the list; if the list + * has no more elements then the wrapped listener will be called with a null value, unless an optional {@link Supplier} is provided + * that supplies the response to send for {@link ActionListener#onResponse(Object)}. + * + * After creation, iteration is started by calling {@link #run()} + */ +public final class IteratingActionListener implements ActionListener, Runnable { + + private final List consumables; + private final ActionListener delegate; + private final BiConsumer> consumer; + private final ThreadContext threadContext; + private final Supplier consumablesFinishedResponse; + + private int position = 0; + + /** + * Constructs an {@link IteratingActionListener}. + * + * @param delegate the delegate listener to call when all consumables have finished executing + * @param consumer the consumer that is executed for each consumable instance + * @param consumables the instances that can be consumed to produce a response which is ultimately sent on the delegate listener + * @param threadContext the thread context for the thread pool that created the listener + */ + public IteratingActionListener(ActionListener delegate, BiConsumer> consumer, List consumables, + ThreadContext threadContext) { + this(delegate, consumer, consumables, threadContext, null); + } + + /** + * Constructs an {@link IteratingActionListener}. + * + * @param delegate the delegate listener to call when all consumables have finished executing + * @param consumer the consumer that is executed for each consumable instance + * @param consumables the instances that can be consumed to produce a response which is ultimately sent on the delegate listener + * @param threadContext the thread context for the thread pool that created the listener + * @param consumablesFinishedResponse a supplier that maps the last consumable's response to a response + * to be sent on the delegate listener, in case the last consumable returns a + * {@code null} value, but the delegate listener should respond with some other value + * (perhaps a concatenation of the results of all the consumables). + */ + public IteratingActionListener(ActionListener delegate, BiConsumer> consumer, List consumables, + ThreadContext threadContext, @Nullable Supplier consumablesFinishedResponse) { + this.delegate = delegate; + this.consumer = consumer; + this.consumables = Collections.unmodifiableList(consumables); + this.threadContext = threadContext; + this.consumablesFinishedResponse = consumablesFinishedResponse; + } + + @Override + public void run() { + if (consumables.isEmpty()) { + onResponse(null); + } else if (position < 0 || position >= consumables.size()) { + onFailure(new IllegalStateException("invalid position [" + position + "]. List size [" + consumables.size() + "]")); + } else { + try (ThreadContext.StoredContext ignore = threadContext.newStoredContext(false)) { + consumer.accept(consumables.get(position++), this); + } + } + } + + @Override + public void onResponse(T response) { + // we need to store the context here as there is a chance that this method is called from a thread outside of the ThreadPool + // like a LDAP connection reader thread and we can pollute the context in certain cases + try (ThreadContext.StoredContext ignore = threadContext.newStoredContext(false)) { + if (response == null) { + if (position == consumables.size()) { + if (consumablesFinishedResponse != null) { + delegate.onResponse(consumablesFinishedResponse.get()); + } else { + delegate.onResponse(null); + } + } else { + consumer.accept(consumables.get(position++), this); + } + } else { + delegate.onResponse(response); + } + } + } + + @Override + public void onFailure(Exception e) { + // we need to store the context here as there is a chance that this method is called from a thread outside of the ThreadPool + // like a LDAP connection reader thread and we can pollute the context in certain cases + try (ThreadContext.StoredContext ignore = threadContext.newStoredContext(false)) { + delegate.onFailure(e); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/socket/SocketAccess.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/socket/SocketAccess.java new file mode 100644 index 0000000000000..660b0421b3d53 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/socket/SocketAccess.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.common.socket; + +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.CheckedSupplier; + +import java.io.IOException; +import java.net.SocketPermission; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; + +/** + * X-pack uses various libraries that establish socket connections. For these remote calls the plugin requires + * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in + * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. + */ +public final class SocketAccess { + + private SocketAccess() { + } + + public static R doPrivileged(CheckedSupplier supplier) throws IOException { + SpecialPermission.check(); + try { + return AccessController.doPrivileged((PrivilegedExceptionAction) supplier::get); + } catch (PrivilegedActionException e) { + throw (IOException) e.getCause(); + } + } + + public static void doPrivileged(CheckedRunnable action) throws IOException { + SpecialPermission.check(); + try { + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + action.run(); + return null; + }); + } catch (PrivilegedActionException e) { + throw (IOException) e.getCause(); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java new file mode 100644 index 0000000000000..a5eddbd37f8e2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoAction.java @@ -0,0 +1,268 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.deprecation; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class DeprecationInfoAction extends Action { + + public static final DeprecationInfoAction INSTANCE = new DeprecationInfoAction(); + public static final String NAME = "cluster:admin/xpack/deprecation/info"; + + private DeprecationInfoAction() { + super(NAME); + } + + /** + * helper utility function to reduce repeat of running a specific {@link Set} of checks. + * + * @param checks The functional checks to execute using the mapper function + * @param mapper The function that executes the lambda check with the appropriate arguments + * @param The signature of the check (BiFunction, Function, including the appropriate arguments) + * @return The list of {@link DeprecationIssue} that were found in the cluster + */ + public static List filterChecks(List checks, Function mapper) { + return checks.stream().map(mapper).filter(Objects::nonNull).collect(Collectors.toList()); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends ActionResponse implements ToXContentObject { + private List clusterSettingsIssues; + private List nodeSettingsIssues; + private Map> indexSettingsIssues; + + public Response() { + } + + public Response(List clusterSettingsIssues, + List nodeSettingsIssues, + Map> indexSettingsIssues) { + this.clusterSettingsIssues = clusterSettingsIssues; + this.nodeSettingsIssues = nodeSettingsIssues; + this.indexSettingsIssues = indexSettingsIssues; + } + + public List getClusterSettingsIssues() { + return clusterSettingsIssues; + } + + public List getNodeSettingsIssues() { + return nodeSettingsIssues; + } + + public Map> getIndexSettingsIssues() { + return indexSettingsIssues; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + clusterSettingsIssues = in.readList(DeprecationIssue::new); + nodeSettingsIssues = in.readList(DeprecationIssue::new); + indexSettingsIssues = in.readMapOfLists(StreamInput::readString, DeprecationIssue::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(clusterSettingsIssues); + out.writeList(nodeSettingsIssues); + out.writeMapOfLists(indexSettingsIssues, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .array("cluster_settings", clusterSettingsIssues.toArray()) + .array("node_settings", nodeSettingsIssues.toArray()) + .field("index_settings") + .map(indexSettingsIssues) + .endObject(); + } + + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(clusterSettingsIssues, response.clusterSettingsIssues) && + Objects.equals(nodeSettingsIssues, response.nodeSettingsIssues) && + Objects.equals(indexSettingsIssues, response.indexSettingsIssues); + } + + @Override + public int hashCode() { + return Objects.hash(clusterSettingsIssues, nodeSettingsIssues, indexSettingsIssues); + } + + /** + * This is the function that does the bulk of the logic of taking the appropriate ES dependencies + * like {@link NodeInfo}, {@link ClusterState}. Alongside these objects and the list of deprecation checks, + * this function will run through all the checks and build out the final list of issues that exist in the + * cluster. + * + * @param nodesInfo The list of {@link NodeInfo} metadata objects for retrieving node-level information + * @param nodesStats The list of {@link NodeStats} metadata objects for retrieving node-level information + * @param state The cluster state + * @param indexNameExpressionResolver Used to resolve indices into their concrete names + * @param indices The list of index expressions to evaluate using `indexNameExpressionResolver` + * @param indicesOptions The options to use when resolving and filtering which indices to check + * @param clusterSettingsChecks The list of cluster-level checks + * @param nodeSettingsChecks The list of node-level checks + * @param indexSettingsChecks The list of index-level checks that will be run across all specified + * concrete indices + * @return The list of deprecation issues found in the cluster + */ + public static DeprecationInfoAction.Response from(List nodesInfo, List nodesStats, ClusterState state, + IndexNameExpressionResolver indexNameExpressionResolver, + String[] indices, IndicesOptions indicesOptions, + List>clusterSettingsChecks, + List, List, DeprecationIssue>> nodeSettingsChecks, + List> indexSettingsChecks) { + List clusterSettingsIssues = filterChecks(clusterSettingsChecks, + (c) -> c.apply(state)); + List nodeSettingsIssues = filterChecks(nodeSettingsChecks, + (c) -> c.apply(nodesInfo, nodesStats)); + + String[] concreteIndexNames = indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, indices); + + Map> indexSettingsIssues = new HashMap<>(); + for (String concreteIndex : concreteIndexNames) { + IndexMetaData indexMetaData = state.getMetaData().index(concreteIndex); + List singleIndexIssues = filterChecks(indexSettingsChecks, + c -> c.apply(indexMetaData)); + if (singleIndexIssues.size() > 0) { + indexSettingsIssues.put(concreteIndex, singleIndexIssues); + } + } + + return new DeprecationInfoAction.Response(clusterSettingsIssues, nodeSettingsIssues, indexSettingsIssues); + } + } + + public static class Request extends MasterNodeReadRequest implements IndicesRequest.Replaceable { + + private String[] indices = Strings.EMPTY_ARRAY; + private static final IndicesOptions INDICES_OPTIONS = IndicesOptions.fromOptions(false, true, + true, true); + + public Request() { + } + + public Request(String... indices) { + this.indices = indices; + } + + public Request(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public Request indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public IndicesOptions indicesOptions() { + return INDICES_OPTIONS; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (indices == null) { + validationException = addValidationError("index/indices is missing", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Arrays.equals(indices, request.indices); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices)); + } + + } + + public static class RequestBuilder extends MasterNodeReadOperationRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, DeprecationInfoAction action) { + super(client, action, new Request()); + } + + public RequestBuilder setIndices(String... indices) { + request.indices(indices); + return this; + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationIssue.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationIssue.java new file mode 100644 index 0000000000000..ff1b0d303d022 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/deprecation/DeprecationIssue.java @@ -0,0 +1,136 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.deprecation; + + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +/** + * Information about deprecated items + */ +public class DeprecationIssue implements Writeable, ToXContentObject { + + public enum Level implements Writeable { + NONE, + INFO, + WARNING, + CRITICAL + ; + + public static Level fromString(String value) { + return Level.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static Level readFromStream(StreamInput in) throws IOException { + int ordinal = in.readVInt(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IOException("Unknown Level ordinal [" + ordinal + "]"); + } + return values()[ordinal]; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(ordinal()); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } + + private Level level; + private String message; + private String url; + private String details; + + // pkg-private for tests + DeprecationIssue() { + + } + + public DeprecationIssue(Level level, String message, String url, @Nullable String details) { + this.level = level; + this.message = message; + this.url = url; + this.details = details; + } + + public DeprecationIssue(StreamInput in) throws IOException { + level = Level.readFromStream(in); + message = in.readString(); + url = in.readString(); + details = in.readOptionalString(); + } + + + public Level getLevel() { + return level; + } + + public String getMessage() { + return message; + } + + public String getUrl() { + return url; + } + + public String getDetails() { + return details; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + level.writeTo(out); + out.writeString(message); + out.writeString(url); + out.writeOptionalString(details); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject() + .field("level", level) + .field("message", message) + .field("url", url); + if (details != null) { + builder.field("details", details); + } + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DeprecationIssue that = (DeprecationIssue) o; + return Objects.equals(level, that.level) && + Objects.equals(message, that.message) && + Objects.equals(url, that.url) && + Objects.equals(details, that.details); + } + + @Override + public int hashCode() { + return Objects.hash(level, message, url, details); + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java new file mode 100644 index 0000000000000..1fd35f20f04ed --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.graph; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; + +public class GraphFeatureSetUsage extends XPackFeatureSet.Usage { + + public GraphFeatureSetUsage(StreamInput input) throws IOException { + super(input); + } + + public GraphFeatureSetUsage(boolean available, boolean enabled) { + super(XPackField.GRAPH, available, enabled); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Connection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Connection.java new file mode 100644 index 0000000000000..f3d9289644918 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Connection.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.graph.action; + +import com.carrotsearch.hppc.ObjectIntHashMap; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.xpack.core.graph.action.Vertex.VertexId; + +import java.io.IOException; +import java.util.Map; + +/** + * A Connection links exactly two {@link Vertex} objects. The basis of a + * connection is one or more documents have been found that contain + * this pair of terms and the strength of the connection is recorded + * as a weight. + */ +public class Connection { + Vertex from; + Vertex to; + double weight; + long docCount; + + public Connection(Vertex from, Vertex to, double weight, long docCount) { + this.from = from; + this.to = to; + this.weight = weight; + this.docCount = docCount; + } + + void readFrom(StreamInput in, Map vertices) throws IOException { + from = vertices.get(new VertexId(in.readString(), in.readString())); + to = vertices.get(new VertexId(in.readString(), in.readString())); + weight = in.readDouble(); + docCount = in.readVLong(); + } + + Connection() { + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(from.getField()); + out.writeString(from.getTerm()); + out.writeString(to.getField()); + out.writeString(to.getTerm()); + out.writeDouble(weight); + out.writeVLong(docCount); + } + + public ConnectionId getId() { + return new ConnectionId(from.getId(), to.getId()); + } + + public Vertex getFrom() { + return from; + } + + public Vertex getTo() { + return to; + } + + /** + * @return a measure of the relative connectedness between a pair of {@link Vertex} objects + */ + public double getWeight() { + return weight; + } + + /** + * @return the number of documents in the sampled set that contained this + * pair of {@link Vertex} objects. + */ + public long getDocCount() { + return docCount; + } + + void toXContent(XContentBuilder builder, Params params, ObjectIntHashMap vertexNumbers) throws IOException { + builder.field("source", vertexNumbers.get(from)); + builder.field("target", vertexNumbers.get(to)); + builder.field("weight", weight); + builder.field("doc_count", docCount); + } + + /** + * An identifier (implements hashcode and equals) that represents a + * unique key for a {@link Connection} + */ + public static class ConnectionId { + private final VertexId source; + private final VertexId target; + + public ConnectionId(VertexId source, VertexId target) { + this.source = source; + this.target = target; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + ConnectionId vertexId = (ConnectionId) o; + + if (source != null ? !source.equals(vertexId.source) : vertexId.source != null) + return false; + if (target != null ? !target.equals(vertexId.target) : vertexId.target != null) + return false; + + return true; + } + + @Override + public int hashCode() { + int result = source != null ? source.hashCode() : 0; + result = 31 * result + (target != null ? target.hashCode() : 0); + return result; + } + + public VertexId getSource() { + return source; + } + + public VertexId getTarget() { + return target; + } + + @Override + public String toString() { + return getSource() + "->" + getTarget(); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java new file mode 100644 index 0000000000000..2442beb2e250b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreAction.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.graph.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class GraphExploreAction extends Action { + + public static final GraphExploreAction INSTANCE = new GraphExploreAction(); + public static final String NAME = "indices:data/read/xpack/graph/explore"; + + private GraphExploreAction() { + super(NAME); + } + + @Override + public GraphExploreResponse newResponse() { + return new GraphExploreResponse(); + } + + @Override + public GraphExploreRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new GraphExploreRequestBuilder(client, this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequest.java new file mode 100644 index 0000000000000..e44f9f7603752 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequest.java @@ -0,0 +1,335 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.graph.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + +/** + * Holds the criteria required to guide the exploration of connected terms which + * can be returned as a graph. + */ +public class GraphExploreRequest extends ActionRequest implements IndicesRequest.Replaceable { + + public static final String NO_HOPS_ERROR_MESSAGE = "Graph explore request must have at least one hop"; + public static final String NO_VERTICES_ERROR_MESSAGE = "Graph explore hop must have at least one VertexRequest"; + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); + private String[] types = Strings.EMPTY_ARRAY; + private String routing; + private TimeValue timeout; + + private int sampleSize = SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE; + private String sampleDiversityField; + private int maxDocsPerDiversityValue; + private boolean useSignificance = true; + private boolean returnDetailedInfo; + + private List hops = new ArrayList<>(); + + public GraphExploreRequest() { + } + + /** + * Constructs a new graph request to run against the provided + * indices. No indices means it will run against all indices. + */ + public GraphExploreRequest(String... indices) { + this.indices = indices; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (hops.size() == 0) { + validationException = ValidateActions.addValidationError(NO_HOPS_ERROR_MESSAGE, validationException); + } + for (Hop hop : hops) { + validationException = hop.validate(validationException); + } + return validationException; + } + + @Override + public String[] indices() { + return this.indices; + } + + + @Override + public GraphExploreRequest indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public GraphExploreRequest indicesOptions(IndicesOptions indicesOptions) { + if (indicesOptions == null) { + throw new IllegalArgumentException("IndicesOptions must not be null"); + } + this.indicesOptions = indicesOptions; + return this; + } + + public String[] types() { + return this.types; + } + + public GraphExploreRequest types(String... types) { + this.types = types; + return this; + } + + public String routing() { + return this.routing; + } + + public GraphExploreRequest routing(String routing) { + this.routing = routing; + return this; + } + + public GraphExploreRequest routing(String... routings) { + this.routing = Strings.arrayToCommaDelimitedString(routings); + return this; + } + + public TimeValue timeout() { + return timeout; + } + + /** + * Graph exploration can be set to timeout after the given period. Search operations involved in + * each hop are limited to the remaining time available but can still overrun due to the nature + * of their "best efforts" timeout support. When a timeout occurs partial results are returned. + * @param timeout a {@link TimeValue} object which determines the maximum length of time to spend exploring + */ + public GraphExploreRequest timeout(TimeValue timeout) { + if (timeout == null) { + throw new IllegalArgumentException("timeout must not be null"); + } + this.timeout = timeout; + return this; + } + + public GraphExploreRequest timeout(String timeout) { + timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout")); + return this; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + types = in.readStringArray(); + routing = in.readOptionalString(); + timeout = in.readOptionalTimeValue(); + sampleSize = in.readInt(); + sampleDiversityField = in.readOptionalString(); + maxDocsPerDiversityValue = in.readInt(); + + useSignificance = in.readBoolean(); + returnDetailedInfo = in.readBoolean(); + + int numHops = in.readInt(); + Hop parentHop = null; + for (int i = 0; i < numHops; i++) { + Hop hop = new Hop(parentHop); + hop.readFrom(in); + hops.add(hop); + parentHop = hop; + } + + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + out.writeStringArray(types); + out.writeOptionalString(routing); + out.writeOptionalTimeValue(timeout); + + out.writeInt(sampleSize); + out.writeOptionalString(sampleDiversityField); + out.writeInt(maxDocsPerDiversityValue); + + out.writeBoolean(useSignificance); + out.writeBoolean(returnDetailedInfo); + out.writeInt(hops.size()); + for (Iterator iterator = hops.iterator(); iterator.hasNext();) { + Hop hop = iterator.next(); + hop.writeTo(out); + } + } + + @Override + public String toString() { + return "graph explore [" + Arrays.toString(indices) + "][" + Arrays.toString(types) + "]"; + } + + /** + * The number of top-matching documents that are considered during each hop (default is + * {@link SamplerAggregationBuilder#DEFAULT_SHARD_SAMPLE_SIZE} + * Very small values (less than 50) may not provide sufficient weight-of-evidence to identify + * significant connections between terms. + *

Very large values (many thousands) are not recommended with loosely defined queries (fuzzy queries or those + * with many OR clauses). + * This is because any useful signals in the best documents are diluted with irrelevant noise from low-quality matches. + * Performance is also typically better with smaller samples as there are less look-ups required for background frequencies + * of terms found in the documents + *

+ * + * @param maxNumberOfDocsPerHop shard-level sample size in documents + */ + public void sampleSize(int maxNumberOfDocsPerHop) { + sampleSize = maxNumberOfDocsPerHop; + } + + public int sampleSize() { + return sampleSize; + } + + /** + * Optional choice of single-value field on which to diversify sampled + * search results + */ + public void sampleDiversityField(String name) { + sampleDiversityField = name; + } + + public String sampleDiversityField() { + return sampleDiversityField; + } + + /** + * Optional number of permitted docs with same value in sampled search + * results. Must also declare which field using sampleDiversityField + */ + public void maxDocsPerDiversityValue(int maxDocs) { + this.maxDocsPerDiversityValue = maxDocs; + } + + public int maxDocsPerDiversityValue() { + return maxDocsPerDiversityValue; + } + + /** + * Controls the choice of algorithm used to select interesting terms. The default + * value is true which means terms are selected based on significance (see the {@link SignificantTerms} + * aggregation) rather than popularity (using the {@link TermsAggregator}). + * @param value true if the significant_terms algorithm should be used. + */ + public void useSignificance(boolean value) { + this.useSignificance = value; + } + + public boolean useSignificance() { + return useSignificance; + } + + /** + * Return detailed information about vertex frequencies as part of JSON results - defaults to false + * @param value true if detailed information is required in JSON responses + */ + public void returnDetailedInfo(boolean value) { + this.returnDetailedInfo = value; + } + + public boolean returnDetailedInfo() { + return returnDetailedInfo; + } + + + /** + * Add a stage in the graph exploration. Each hop represents a stage of + * querying elasticsearch to identify terms which can then be connnected + * to other terms in a subsequent hop. + * @param guidingQuery optional choice of query which influences which documents + * are considered in this stage + * @return a {@link Hop} object that holds settings for a stage in the graph exploration + */ + public Hop createNextHop(QueryBuilder guidingQuery) { + Hop parent = null; + if (hops.size() > 0) { + parent = hops.get(hops.size() - 1); + } + Hop newHop = new Hop(parent); + newHop.guidingQuery = guidingQuery; + hops.add(newHop); + return newHop; + } + + public int getHopNumbers() { + return hops.size(); + } + + public Hop getHop(int hopNumber) { + return hops.get(hopNumber); + } + + public static class TermBoost { + String term; + float boost; + + public TermBoost(String term, float boost) { + super(); + this.term = term; + if (boost <= 0) { + throw new IllegalArgumentException("Boosts must be a positive non-zero number"); + } + this.boost = boost; + } + + TermBoost() { + } + + public String getTerm() { + return term; + } + + public float getBoost() { + return boost; + } + + void readFrom(StreamInput in) throws IOException { + this.term = in.readString(); + this.boost = in.readFloat(); + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(term); + out.writeFloat(boost); + } + + } + + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java new file mode 100644 index 0000000000000..c6044d1aabf12 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreRequestBuilder.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.graph.action; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; + +/** + * Creates a new {@link GraphExploreRequestBuilder} + * + * @see GraphExploreRequest + */ +public class GraphExploreRequestBuilder extends ActionRequestBuilder { + + public GraphExploreRequestBuilder(ElasticsearchClient client, GraphExploreAction action) { + super(client, action, new GraphExploreRequest()); + } + + public GraphExploreRequestBuilder setIndices(String... indices) { + request.indices(indices); + return this; + } + + /** + * Specifies what type of requested indices to ignore and wildcard indices expressions. + *

+ * For example indices that don't exist. + */ + public GraphExploreRequestBuilder setIndicesOptions(IndicesOptions options) { + request.indicesOptions(options); + return this; + } + + /** + * A comma separated list of routing values to control the shards the action will be executed on. + */ + public GraphExploreRequestBuilder setRouting(String routing) { + request.routing(routing); + return this; + } + + /** + * The routing values to control the shards that the action will be executed on. + */ + public GraphExploreRequestBuilder setRouting(String... routing) { + request.routing(routing); + return this; + } + + /** + * Optional choice of single-value field on which to diversify sampled + * search results + */ + public GraphExploreRequestBuilder sampleDiversityField(String fieldName) { + request.sampleDiversityField(fieldName); + return this; + } + + public String sampleDiversityField() { + return request.sampleDiversityField(); + } + + /** + * Optional number of permitted docs with same value in sampled search + * results. Must also declare which field using sampleDiversityField + */ + public GraphExploreRequestBuilder maxDocsPerDiversityValue(int max) { + request.maxDocsPerDiversityValue(max); + return this; + } + + public int maxDocsPerDiversityValue() { + return request.maxDocsPerDiversityValue(); + } + + + /** + * An optional timeout to control how long the graph exploration is allowed + * to take. + */ + public GraphExploreRequestBuilder setTimeout(TimeValue timeout) { + request.timeout(timeout); + return this; + } + + /** + * An optional timeout to control how long the graph exploration is allowed + * to take. + */ + public GraphExploreRequestBuilder setTimeout(String timeout) { + request.timeout(timeout); + return this; + } + + /** + * The types of documents the graph exploration will run against. Defaults + * to all types. + */ + public GraphExploreRequestBuilder setTypes(String... types) { + request.types(types); + return this; + } + + /** + * Add a stage in the graph exploration. Each hop represents a stage of + * querying elasticsearch to identify terms which can then be connnected + * to other terms in a subsequent hop. + * @param guidingQuery optional choice of query which influences which documents + * are considered in this stage + * @return a {@link Hop} object that holds settings for a stage in the graph exploration + */ + public Hop createNextHop(@Nullable QueryBuilder guidingQuery) { + return request.createNextHop(guidingQuery); + } + + /** + * Controls the choice of algorithm used to select interesting terms. The default + * value is true which means terms are selected based on significance (see the {@link SignificantTerms} + * aggregation) rather than popularity (using the {@link TermsAggregator}). + * @param value true if the significant_terms algorithm should be used. + */ + public GraphExploreRequestBuilder useSignificance(boolean value) { + request.useSignificance(value); + return this; + } + + + /** + * The number of top-matching documents that are considered during each hop (default is + * {@link SamplerAggregationBuilder#DEFAULT_SHARD_SAMPLE_SIZE} + * Very small values (less than 50) may not provide sufficient weight-of-evidence to identify + * significant connections between terms. + *

Very large values (many thousands) are not recommended with loosely defined queries (fuzzy queries or + * those with many OR clauses). + * This is because any useful signals in the best documents are diluted with irrelevant noise from low-quality matches. + * Performance is also typically better with smaller samples as there are less look-ups required for background frequencies + * of terms found in the documents + *

+ * + * @param maxNumberOfDocsPerHop the shard-level sample size in documents + */ + public GraphExploreRequestBuilder sampleSize(int maxNumberOfDocsPerHop) { + request.sampleSize(maxNumberOfDocsPerHop); + return this; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreResponse.java new file mode 100644 index 0000000000000..3d6c5f5aaca5e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/GraphExploreResponse.java @@ -0,0 +1,202 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.graph.action; + +import com.carrotsearch.hppc.ObjectIntHashMap; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.graph.action.Connection.ConnectionId; +import org.elasticsearch.xpack.core.graph.action.Vertex.VertexId; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; + +/** + * Graph explore response holds a graph of {@link Vertex} and {@link Connection} objects + * (nodes and edges in common graph parlance). + * + * @see GraphExploreRequest + */ +public class GraphExploreResponse extends ActionResponse implements ToXContentObject { + + private long tookInMillis; + private boolean timedOut = false; + private ShardOperationFailedException[] shardFailures = ShardSearchFailure.EMPTY_ARRAY; + private Map vertices; + private Map connections; + private boolean returnDetailedInfo; + static final String RETURN_DETAILED_INFO_PARAM = "returnDetailedInfo"; + + public GraphExploreResponse() { + } + + public GraphExploreResponse(long tookInMillis, boolean timedOut, ShardOperationFailedException[] shardFailures, + Map vertices, Map connections, boolean returnDetailedInfo) { + this.tookInMillis = tookInMillis; + this.timedOut = timedOut; + this.shardFailures = shardFailures; + this.vertices = vertices; + this.connections = connections; + this.returnDetailedInfo = returnDetailedInfo; + } + + + public TimeValue getTook() { + return new TimeValue(tookInMillis); + } + + public long getTookInMillis() { + return tookInMillis; + } + + /** + * @return true if the time stated in {@link GraphExploreRequest#timeout(TimeValue)} was exceeded + * (not all hops may have been completed in this case) + */ + public boolean isTimedOut() { + return this.timedOut; + } + public ShardOperationFailedException[] getShardFailures() { + return shardFailures; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + tookInMillis = in.readVLong(); + timedOut = in.readBoolean(); + + int size = in.readVInt(); + if (size == 0) { + shardFailures = ShardSearchFailure.EMPTY_ARRAY; + } else { + shardFailures = new ShardSearchFailure[size]; + for (int i = 0; i < shardFailures.length; i++) { + shardFailures[i] = readShardSearchFailure(in); + } + } + // read vertices + size = in.readVInt(); + vertices = new HashMap<>(); + for (int i = 0; i < size; i++) { + Vertex n = Vertex.readFrom(in); + vertices.put(n.getId(), n); + } + + size = in.readVInt(); + + connections = new HashMap<>(); + for (int i = 0; i < size; i++) { + Connection e = new Connection(); + e.readFrom(in, vertices); + connections.put(e.getId(), e); + } + + returnDetailedInfo = in.readBoolean(); + + } + + public Collection getConnections() { + return connections.values(); + } + + public Collection getVertices() { + return vertices.values(); + } + + public Vertex getVertex(VertexId id) { + return vertices.get(id); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(tookInMillis); + out.writeBoolean(timedOut); + + out.writeVInt(shardFailures.length); + for (ShardOperationFailedException shardSearchFailure : shardFailures) { + shardSearchFailure.writeTo(out); + } + + out.writeVInt(vertices.size()); + for (Vertex vertex : vertices.values()) { + vertex.writeTo(out); + } + + out.writeVInt(connections.size()); + for (Connection connection : connections.values()) { + connection.writeTo(out); + } + + out.writeBoolean(returnDetailedInfo); + + } + + static final class Fields { + static final String TOOK = "took"; + static final String TIMED_OUT = "timed_out"; + static final String INDICES = "_indices"; + static final String FAILURES = "failures"; + static final String VERTICES = "vertices"; + static final String CONNECTIONS = "connections"; + + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Fields.TOOK, tookInMillis); + builder.field(Fields.TIMED_OUT, timedOut); + + builder.startArray(Fields.FAILURES); + if (shardFailures != null) { + for (ShardOperationFailedException shardFailure : shardFailures) { + builder.startObject(); + shardFailure.toXContent(builder, params); + builder.endObject(); + } + } + builder.endArray(); + + ObjectIntHashMap vertexNumbers = new ObjectIntHashMap<>(vertices.size()); + + Map extraParams = new HashMap<>(); + extraParams.put(RETURN_DETAILED_INFO_PARAM, Boolean.toString(returnDetailedInfo)); + Params extendedParams = new DelegatingMapParams(extraParams, params); + + builder.startArray(Fields.VERTICES); + for (Vertex vertex : vertices.values()) { + builder.startObject(); + vertexNumbers.put(vertex, vertexNumbers.size()); + vertex.toXContent(builder, extendedParams); + builder.endObject(); + } + builder.endArray(); + + builder.startArray(Fields.CONNECTIONS); + for (Connection connection : connections.values()) { + builder.startObject(); + connection.toXContent(builder, extendedParams, vertexNumbers); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Hop.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Hop.java new file mode 100644 index 0000000000000..8ba7005f15fcf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Hop.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.graph.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * A Hop represents one of potentially many stages in a graph exploration. + * Each Hop identifies one or more fields in which it will attempt to find + * terms that are significantly connected to the previous Hop. Each field is identified + * using a {@link VertexRequest} + * + *

An example series of Hops on webserver logs would be: + *

    + *
  1. an initial Hop to find + * the top ten IPAddresses trying to access urls containing the word "admin"
  2. + *
  3. a secondary Hop to see which other URLs those IPAddresses were trying to access
  4. + *
+ * + *

+ * Optionally, each hop can contain a "guiding query" that further limits the set of documents considered. + * In our weblog example above we might choose to constrain the second hop to only look at log records that + * had a reponse code of 404. + *

+ *

+ * If absent, the list of {@link VertexRequest}s is inherited from the prior Hop's list to avoid repeating + * the fields that will be examined at each stage. + *

+ * + */ +public class Hop { + final Hop parentHop; + List vertices = null; + QueryBuilder guidingQuery = null; + + public Hop(Hop parent) { + this.parentHop = parent; + } + + public ActionRequestValidationException validate(ActionRequestValidationException validationException) { + + if (getEffectiveVertexRequests().size() == 0) { + validationException = ValidateActions.addValidationError(GraphExploreRequest.NO_VERTICES_ERROR_MESSAGE, validationException); + } + return validationException; + + } + + public Hop getParentHop() { + return parentHop; + } + + void writeTo(StreamOutput out) throws IOException { + out.writeOptionalNamedWriteable(guidingQuery); + if (vertices == null) { + out.writeVInt(0); + } else { + out.writeVInt(vertices.size()); + for (VertexRequest vr : vertices) { + vr.writeTo(out); + } + } + } + + void readFrom(StreamInput in) throws IOException { + guidingQuery = in.readOptionalNamedWriteable(QueryBuilder.class); + int size = in.readVInt(); + if (size > 0) { + vertices = new ArrayList<>(); + for (int i = 0; i < size; i++) { + VertexRequest vr = new VertexRequest(); + vr.readFrom(in); + vertices.add(vr); + } + } + } + + public QueryBuilder guidingQuery() { + if (guidingQuery != null) { + return guidingQuery; + } + return QueryBuilders.matchAllQuery(); + } + + /** + * Add a field in which this {@link Hop} will look for terms that are highly linked to + * previous hops and optionally the guiding query. + * + * @param fieldName a field in the chosen index + */ + public VertexRequest addVertexRequest(String fieldName) { + if (vertices == null) { + vertices = new ArrayList<>(); + } + VertexRequest vr = new VertexRequest(); + vr.fieldName(fieldName); + vertices.add(vr); + return vr; + } + + /** + * An optional parameter that focuses the exploration on documents that + * match the given query. + * + * @param queryBuilder any query + */ + public void guidingQuery(QueryBuilder queryBuilder) { + guidingQuery = queryBuilder; + } + + protected List getEffectiveVertexRequests() { + if (vertices != null) { + return vertices; + } + if (parentHop == null) { + return Collections.emptyList(); + } + // otherwise inherit settings from parent + return parentHop.getEffectiveVertexRequests(); + } + + public int getNumberVertexRequests() { + return getEffectiveVertexRequests().size(); + } + + public VertexRequest getVertexRequest(int requestNumber) { + return getEffectiveVertexRequests().get(requestNumber); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Vertex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Vertex.java new file mode 100644 index 0000000000000..c85d6d7dfd6e1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/Vertex.java @@ -0,0 +1,189 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.graph.action; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * A vertex in a graph response represents a single term (a field and value pair) + * which appears in one or more documents found as part of the graph exploration. + * + * A vertex term could be a bank account number, an email address, a hashtag or any + * other term that appears in documents and is interesting to represent in a network. + */ +public class Vertex implements ToXContentFragment { + + private final String field; + private final String term; + private double weight; + private final int depth; + private final long bg; + private long fg; + + public Vertex(String field, String term, double weight, int depth, long bg, long fg) { + super(); + this.field = field; + this.term = term; + this.weight = weight; + this.depth = depth; + this.bg = bg; + this.fg = fg; + } + + static Vertex readFrom(StreamInput in) throws IOException { + return new Vertex(in.readString(), in.readString(), in.readDouble(), in.readVInt(), in.readVLong(), in.readVLong()); + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(field); + out.writeString(term); + out.writeDouble(weight); + out.writeVInt(depth); + out.writeVLong(bg); + out.writeVLong(fg); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + boolean returnDetailedInfo = params.paramAsBoolean(GraphExploreResponse.RETURN_DETAILED_INFO_PARAM, false); + builder.field("field", field); + builder.field("term", term); + builder.field("weight", weight); + builder.field("depth", depth); + if (returnDetailedInfo) { + builder.field("fg", fg); + builder.field("bg", bg); + } + return builder; + } + + /** + * @return a {@link VertexId} object that uniquely identifies this Vertex + */ + public VertexId getId() { + return createId(field, term); + } + + /** + * A convenience method for creating a {@link VertexId} + * @param field the field + * @param term the term + * @return a {@link VertexId} that can be used for looking up vertices + */ + public static VertexId createId(String field, String term) { + return new VertexId(field,term); + } + + @Override + public String toString() { + return getId().toString(); + } + + public String getField() { + return field; + } + + public String getTerm() { + return term; + } + + /** + * The weight of a vertex is an accumulation of all of the {@link Connection}s + * that are linked to this {@link Vertex} as part of a graph exploration. + * It is used internally to identify the most interesting vertices to be returned. + * @return a measure of the {@link Vertex}'s relative importance. + */ + public double getWeight() { + return weight; + } + + public void setWeight(final double weight) { + this.weight = weight; + } + + /** + * If the {@link GraphExploreRequest#useSignificance(boolean)} is true (the default) + * this statistic is available. + * @return the number of documents in the index that contain this term (see bg_count in + * + * the significant_terms aggregation) + */ + public long getBg() { + return bg; + } + + /** + * If the {@link GraphExploreRequest#useSignificance(boolean)} is true (the default) + * this statistic is available. + * Together with {@link #getBg()} these numbers are used to derive the significance of a term. + * @return the number of documents in the sample of best matching documents that contain this term (see fg_count in + * + * the significant_terms aggregation) + */ + public long getFg() { + return fg; + } + + public void setFg(final long fg) { + this.fg = fg; + } + + /** + * @return the sequence number in the series of hops where this Vertex term was first encountered + */ + public int getHopDepth() { + return depth; + } + + /** + * An identifier (implements hashcode and equals) that represents a + * unique key for a {@link Vertex} + */ + public static class VertexId { + private final String field; + private final String term; + + public VertexId(String field, String term) { + this.field = field; + this.term = term; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + VertexId vertexId = (VertexId) o; + + if (field != null ? !field.equals(vertexId.field) : vertexId.field != null) + return false; + if (term != null ? !term.equals(vertexId.term) : vertexId.term != null) + return false; + + return true; + } + + @Override + public int hashCode() { + int result = field != null ? field.hashCode() : 0; + result = 31 * result + (term != null ? term.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return field + ":" + term; + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/VertexRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/VertexRequest.java new file mode 100644 index 0000000000000..f7f7dec4b1722 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/action/VertexRequest.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.graph.action; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest.TermBoost; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * A request to identify terms from a choice of field as part of a {@link Hop}. + * Optionally, a set of terms can be provided that are used as an exclusion or + * inclusion list to filter which terms are considered. + * + */ +public class VertexRequest { + private String fieldName; + private int size = 5; + private Map includes; + private Set excludes; + public static final int DEFAULT_MIN_DOC_COUNT = 3; + private int minDocCount = DEFAULT_MIN_DOC_COUNT; + public static final int DEFAULT_SHARD_MIN_DOC_COUNT = 2; + private int shardMinDocCount = DEFAULT_SHARD_MIN_DOC_COUNT; + + + public VertexRequest() { + + } + + void readFrom(StreamInput in) throws IOException { + fieldName = in.readString(); + size = in.readVInt(); + minDocCount = in.readVInt(); + shardMinDocCount = in.readVInt(); + + int numIncludes = in.readVInt(); + if (numIncludes > 0) { + includes = new HashMap<>(); + for (int i = 0; i < numIncludes; i++) { + TermBoost tb = new TermBoost(); + tb.readFrom(in); + includes.put(tb.term, tb); + } + } + + int numExcludes = in.readVInt(); + if (numExcludes > 0) { + excludes = new HashSet<>(); + for (int i = 0; i < numExcludes; i++) { + excludes.add(in.readString()); + } + } + + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + out.writeVInt(size); + out.writeVInt(minDocCount); + out.writeVInt(shardMinDocCount); + + if (includes != null) { + out.writeVInt(includes.size()); + for (TermBoost tb : includes.values()) { + tb.writeTo(out); + } + } else { + out.writeVInt(0); + } + + if (excludes != null) { + out.writeVInt(excludes.size()); + for (String term : excludes) { + out.writeString(term); + } + } else { + out.writeVInt(0); + } + } + + public String fieldName() { + return fieldName; + } + + public VertexRequest fieldName(String fieldName) { + this.fieldName = fieldName; + return this; + } + + public int size() { + return size; + } + + /** + * @param size The maximum number of terms that should be returned from this field as part of this {@link Hop} + */ + public VertexRequest size(int size) { + this.size = size; + return this; + } + + public boolean hasIncludeClauses() { + return includes != null && includes.size() > 0; + } + + public boolean hasExcludeClauses() { + return excludes != null && excludes.size() > 0; + } + + /** + * Adds a term that should be excluded from results + * @param term A term to be excluded + */ + public void addExclude(String term) { + if (includes != null) { + throw new IllegalArgumentException("Cannot have both include and exclude clauses"); + } + if (excludes == null) { + excludes = new HashSet<>(); + } + excludes.add(term); + } + + /** + * Adds a term to the set of allowed values - the boost defines the relative + * importance when pursuing connections in subsequent {@link Hop}s. The boost value + * appears as part of the query. + * @param term a required term + * @param boost an optional boost + */ + public void addInclude(String term, float boost) { + if (excludes != null) { + throw new IllegalArgumentException("Cannot have both include and exclude clauses"); + } + if (includes == null) { + includes = new HashMap<>(); + } + includes.put(term, new TermBoost(term, boost)); + } + + public TermBoost[] includeValues() { + return includes.values().toArray(new TermBoost[includes.size()]); + } + + public String[] includeValuesAsStringArray() { + String[] result = new String[includes.size()]; + int i = 0; + for (TermBoost tb : includes.values()) { + result[i++] = tb.term; + } + return result; + } + + public String[] excludesAsArray() { + return excludes.toArray(new String[excludes.size()]); + } + + public int minDocCount() { + return minDocCount; + } + + /** + * A "certainty" threshold which defines the weight-of-evidence required before + * a term found in this field is identified as a useful connection + * + * @param value The minimum number of documents that contain this term found in the samples used across all shards + */ + public VertexRequest minDocCount(int value) { + minDocCount = value; + return this; + } + + + public int shardMinDocCount() { + return Math.min(shardMinDocCount, minDocCount); + } + + /** + * A "certainty" threshold which defines the weight-of-evidence required before + * a term found in this field is identified as a useful connection + * + * @param value The minimum number of documents that contain this term found in the samples used across all shards + */ + public VertexRequest shardMinDocCount(int value) { + shardMinDocCount = value; + return this; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java new file mode 100644 index 0000000000000..97879106465bb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.logstash; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; + +public class LogstashFeatureSetUsage extends XPackFeatureSet.Usage { + + public LogstashFeatureSetUsage(StreamInput in) throws IOException { + super(in); + } + + public LogstashFeatureSetUsage(boolean available, boolean enabled) { + super(XPackField.LOGSTASH, available, enabled); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MLMetadataField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MLMetadataField.java new file mode 100644 index 0000000000000..bef1b57902db1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MLMetadataField.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml; + +public final class MLMetadataField { + + public static final String TYPE = "ml"; + + private MLMetadataField() {} + + /** + * Namespaces the task ids for datafeeds. + * A job id can be used as a datafeed id, because they are stored separately in cluster state. + */ + public static String datafeedTaskId(String datafeedId) { + return "datafeed-" + datafeedId; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java new file mode 100644 index 0000000000000..1779ca703a5d7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class MachineLearningFeatureSetUsage extends XPackFeatureSet.Usage { + + public static final String ALL = "_all"; + public static final String JOBS_FIELD = "jobs"; + public static final String DATAFEEDS_FIELD = "datafeeds"; + public static final String COUNT = "count"; + public static final String DETECTORS = "detectors"; + public static final String MODEL_SIZE = "model_size"; + + private final Map jobsUsage; + private final Map datafeedsUsage; + + public MachineLearningFeatureSetUsage(boolean available, boolean enabled, Map jobsUsage, + Map datafeedsUsage) { + super(XPackField.MACHINE_LEARNING, available, enabled); + this.jobsUsage = Objects.requireNonNull(jobsUsage); + this.datafeedsUsage = Objects.requireNonNull(datafeedsUsage); + } + + public MachineLearningFeatureSetUsage(StreamInput in) throws IOException { + super(in); + this.jobsUsage = in.readMap(); + this.datafeedsUsage = in.readMap(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(jobsUsage); + out.writeMap(datafeedsUsage); + } + + @Override + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + super.innerXContent(builder, params); + if (jobsUsage != null) { + builder.field(JOBS_FIELD, jobsUsage); + } + if (datafeedsUsage != null) { + builder.field(DATAFEEDS_FIELD, datafeedsUsage); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java new file mode 100644 index 0000000000000..6b5ba086c6fe0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; + +public final class MachineLearningField { + public static final Setting AUTODETECT_PROCESS = + Setting.boolSetting("xpack.ml.autodetect_process", true, Setting.Property.NodeScope); + public static final Setting MAX_MODEL_MEMORY_LIMIT = + Setting.memorySizeSetting("xpack.ml.max_model_memory_limit", new ByteSizeValue(0), + Setting.Property.Dynamic, Setting.Property.NodeScope); + public static final TimeValue STATE_PERSIST_RESTORE_TIMEOUT = TimeValue.timeValueMinutes(30); + + private MachineLearningField() {} + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlClientHelper.java new file mode 100644 index 0000000000000..a76c5c51e8d7f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlClientHelper.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; +import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; + +import java.util.Map; +import java.util.Set; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +/** + * A helper class for actions which decides if we should run via the _xpack user and set ML as origin + * or if we should use the run_as functionality by setting the correct headers + */ +public class MlClientHelper { + + /** + * List of headers that are related to security + */ + public static final Set SECURITY_HEADER_FILTERS = Sets.newHashSet(AuthenticationServiceField.RUN_AS_USER_HEADER, + AuthenticationField.AUTHENTICATION_KEY); + + /** + * Execute a client operation and return the response, try to run a datafeed search with least privileges, when headers exist + * + * @param datafeedConfig The config for a datafeed + * @param client The client used to query + * @param supplier The action to run + * @return An instance of the response class + */ + public static T execute(DatafeedConfig datafeedConfig, Client client, Supplier supplier) { + return execute(datafeedConfig.getHeaders(), client, supplier); + } + + /** + * Execute a client operation and return the response, try to run an action with least privileges, when headers exist + * + * @param headers Request headers, ideally including security headers + * @param client The client used to query + * @param supplier The action to run + * @return An instance of the response class + */ + public static T execute(Map headers, Client client, Supplier supplier) { + // no headers, we will have to use the xpack internal user for our execution by specifying the ml origin + if (headers == null || headers.isEmpty()) { + try (ThreadContext.StoredContext ignore = ClientHelper.stashWithOrigin(client.threadPool().getThreadContext(), + ClientHelper.ML_ORIGIN)) { + return supplier.get(); + } + } else { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashContext()) { + Map filteredHeaders = headers.entrySet().stream() + .filter(e -> SECURITY_HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + client.threadPool().getThreadContext().copyHeaders(filteredHeaders.entrySet()); + return supplier.get(); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java new file mode 100644 index 0000000000000..d625e6e311aaf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.calendars.Calendar; +import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public final class MlMetaIndex { + /** + * Where to store the ml info in Elasticsearch - must match what's + * expected by kibana/engineAPI/app/directives/mlLogUsage.js + */ + public static final String INDEX_NAME = ".ml-meta"; + + public static final String INCLUDE_TYPE_KEY = "include_type"; + + public static final String TYPE = "doc"; + + private MlMetaIndex() {} + + public static XContentBuilder docMapping() throws IOException { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.startObject(TYPE); + ElasticsearchMappings.addDefaultMapping(builder); + builder.startObject(ElasticsearchMappings.PROPERTIES) + .startObject(Calendar.ID.getPreferredName()) + .field(ElasticsearchMappings.TYPE, ElasticsearchMappings.KEYWORD) + .endObject() + .startObject(Calendar.JOB_IDS.getPreferredName()) + .field(ElasticsearchMappings.TYPE, ElasticsearchMappings.KEYWORD) + .endObject() + .startObject(Calendar.DESCRIPTION.getPreferredName()) + .field(ElasticsearchMappings.TYPE, ElasticsearchMappings.KEYWORD) + .endObject() + .startObject(ScheduledEvent.START_TIME.getPreferredName()) + .field(ElasticsearchMappings.TYPE, ElasticsearchMappings.DATE) + .endObject() + .startObject(ScheduledEvent.END_TIME.getPreferredName()) + .field(ElasticsearchMappings.TYPE, ElasticsearchMappings.DATE) + .endObject() + .endObject() + .endObject() + .endObject(); + return builder; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java new file mode 100644 index 0000000000000..b09a7463ffdb1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -0,0 +1,471 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml; + +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedJobValidator; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobState; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.groups.GroupOrJobLookup; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.NameResolver; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +public class MlMetadata implements MetaData.Custom { + + private static final ParseField JOBS_FIELD = new ParseField("jobs"); + private static final ParseField DATAFEEDS_FIELD = new ParseField("datafeeds"); + + public static final MlMetadata EMPTY_METADATA = new MlMetadata(Collections.emptySortedMap(), Collections.emptySortedMap()); + // This parser follows the pattern that metadata is parsed leniently (to allow for enhancements) + public static final ObjectParser METADATA_PARSER = new ObjectParser<>("ml_metadata", true, Builder::new); + + static { + METADATA_PARSER.declareObjectArray(Builder::putJobs, (p, c) -> Job.METADATA_PARSER.apply(p, c).build(), JOBS_FIELD); + METADATA_PARSER.declareObjectArray(Builder::putDatafeeds, + (p, c) -> DatafeedConfig.METADATA_PARSER.apply(p, c).build(), DATAFEEDS_FIELD); + } + + private final SortedMap jobs; + private final SortedMap datafeeds; + private final GroupOrJobLookup groupOrJobLookup; + + private MlMetadata(SortedMap jobs, SortedMap datafeeds) { + this.jobs = Collections.unmodifiableSortedMap(jobs); + this.datafeeds = Collections.unmodifiableSortedMap(datafeeds); + this.groupOrJobLookup = new GroupOrJobLookup(jobs.values()); + } + + public Map getJobs() { + return jobs; + } + + public boolean isGroupOrJob(String id) { + return groupOrJobLookup.isGroupOrJob(id); + } + + public Set expandJobIds(String expression, boolean allowNoJobs) { + return groupOrJobLookup.expandJobIds(expression, allowNoJobs); + } + + public boolean isJobDeleted(String jobId) { + Job job = jobs.get(jobId); + return job == null || job.isDeleted(); + } + + public SortedMap getDatafeeds() { + return datafeeds; + } + + public DatafeedConfig getDatafeed(String datafeedId) { + return datafeeds.get(datafeedId); + } + + public Optional getDatafeedByJobId(String jobId) { + return datafeeds.values().stream().filter(s -> s.getJobId().equals(jobId)).findFirst(); + } + + public Set expandDatafeedIds(String expression, boolean allowNoDatafeeds) { + return NameResolver.newUnaliased(datafeeds.keySet(), ExceptionsHelper::missingDatafeedException) + .expand(expression, allowNoDatafeeds); + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_5_4_0; + } + + @Override + public String getWriteableName() { + return MLMetadataField.TYPE; + } + + @Override + public EnumSet context() { + return MetaData.ALL_CONTEXTS; + } + + @Override + public Diff diff(MetaData.Custom previousState) { + return new MlMetadataDiff((MlMetadata) previousState, this); + } + + public MlMetadata(StreamInput in) throws IOException { + int size = in.readVInt(); + TreeMap jobs = new TreeMap<>(); + for (int i = 0; i < size; i++) { + jobs.put(in.readString(), new Job(in)); + } + this.jobs = jobs; + size = in.readVInt(); + TreeMap datafeeds = new TreeMap<>(); + for (int i = 0; i < size; i++) { + datafeeds.put(in.readString(), new DatafeedConfig(in)); + } + this.datafeeds = datafeeds; + + this.groupOrJobLookup = new GroupOrJobLookup(jobs.values()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + writeMap(jobs, out); + writeMap(datafeeds, out); + } + + private static void writeMap(Map map, StreamOutput out) throws IOException { + out.writeVInt(map.size()); + for (Map.Entry entry : map.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + DelegatingMapParams extendedParams = + new DelegatingMapParams(Collections.singletonMap(ToXContentParams.FOR_CLUSTER_STATE, "true"), params); + mapValuesToXContent(JOBS_FIELD, jobs, builder, extendedParams); + mapValuesToXContent(DATAFEEDS_FIELD, datafeeds, builder, extendedParams); + return builder; + } + + private static void mapValuesToXContent(ParseField field, Map map, XContentBuilder builder, + Params params) throws IOException { + builder.startArray(field.getPreferredName()); + for (Map.Entry entry : map.entrySet()) { + entry.getValue().toXContent(builder, params); + } + builder.endArray(); + } + + public static class MlMetadataDiff implements NamedDiff { + + final Diff> jobs; + final Diff> datafeeds; + + MlMetadataDiff(MlMetadata before, MlMetadata after) { + this.jobs = DiffableUtils.diff(before.jobs, after.jobs, DiffableUtils.getStringKeySerializer()); + this.datafeeds = DiffableUtils.diff(before.datafeeds, after.datafeeds, DiffableUtils.getStringKeySerializer()); + } + + public MlMetadataDiff(StreamInput in) throws IOException { + this.jobs = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), Job::new, + MlMetadataDiff::readJobDiffFrom); + this.datafeeds = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), DatafeedConfig::new, + MlMetadataDiff::readSchedulerDiffFrom); + } + + @Override + public MetaData.Custom apply(MetaData.Custom part) { + TreeMap newJobs = new TreeMap<>(jobs.apply(((MlMetadata) part).jobs)); + TreeMap newDatafeeds = new TreeMap<>(datafeeds.apply(((MlMetadata) part).datafeeds)); + return new MlMetadata(newJobs, newDatafeeds); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + jobs.writeTo(out); + datafeeds.writeTo(out); + } + + @Override + public String getWriteableName() { + return MLMetadataField.TYPE; + } + + static Diff readJobDiffFrom(StreamInput in) throws IOException { + return AbstractDiffable.readDiffFrom(Job::new, in); + } + + static Diff readSchedulerDiffFrom(StreamInput in) throws IOException { + return AbstractDiffable.readDiffFrom(DatafeedConfig::new, in); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + MlMetadata that = (MlMetadata) o; + return Objects.equals(jobs, that.jobs) && + Objects.equals(datafeeds, that.datafeeds); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + + @Override + public int hashCode() { + return Objects.hash(jobs, datafeeds); + } + + public static class Builder { + + private TreeMap jobs; + private TreeMap datafeeds; + + public Builder() { + jobs = new TreeMap<>(); + datafeeds = new TreeMap<>(); + } + + public Builder(@Nullable MlMetadata previous) { + if (previous == null) { + jobs = new TreeMap<>(); + datafeeds = new TreeMap<>(); + } else { + jobs = new TreeMap<>(previous.jobs); + datafeeds = new TreeMap<>(previous.datafeeds); + } + } + + public Builder putJob(Job job, boolean overwrite) { + if (jobs.containsKey(job.getId()) && overwrite == false) { + throw ExceptionsHelper.jobAlreadyExists(job.getId()); + } + this.jobs.put(job.getId(), job); + return this; + } + + public Builder deleteJob(String jobId, PersistentTasksCustomMetaData tasks) { + checkJobHasNoDatafeed(jobId); + + JobState jobState = MlMetadata.getJobState(jobId, tasks); + if (jobState.isAnyOf(JobState.CLOSED, JobState.FAILED) == false) { + throw ExceptionsHelper.conflictStatusException("Unexpected job state [" + jobState + "], expected [" + + JobState.CLOSED + " or " + JobState.FAILED + "]"); + } + Job job = jobs.remove(jobId); + if (job == null) { + throw new ResourceNotFoundException("job [" + jobId + "] does not exist"); + } + if (job.isDeleted() == false) { + throw ExceptionsHelper.conflictStatusException("Cannot delete job [" + jobId + "] because it hasn't marked as deleted"); + } + return this; + } + + public Builder putDatafeed(DatafeedConfig datafeedConfig, ThreadContext threadContext) { + if (datafeeds.containsKey(datafeedConfig.getId())) { + throw new ResourceAlreadyExistsException("A datafeed with id [" + datafeedConfig.getId() + "] already exists"); + } + String jobId = datafeedConfig.getJobId(); + checkJobIsAvailableForDatafeed(jobId); + Job job = jobs.get(jobId); + DatafeedJobValidator.validate(datafeedConfig, job); + + if (threadContext != null) { + // Adjust the request, adding security headers from the current thread context + DatafeedConfig.Builder builder = new DatafeedConfig.Builder(datafeedConfig); + Map headers = threadContext.getHeaders().entrySet().stream() + .filter(e -> MlClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + builder.setHeaders(headers); + datafeedConfig = builder.build(); + } + + datafeeds.put(datafeedConfig.getId(), datafeedConfig); + return this; + } + + private void checkJobIsAvailableForDatafeed(String jobId) { + Job job = jobs.get(jobId); + if (job == null || job.isDeleted()) { + throw ExceptionsHelper.missingJobException(jobId); + } + Optional existingDatafeed = getDatafeedByJobId(jobId); + if (existingDatafeed.isPresent()) { + throw ExceptionsHelper.conflictStatusException("A datafeed [" + existingDatafeed.get().getId() + + "] already exists for job [" + jobId + "]"); + } + } + + public Builder updateDatafeed(DatafeedUpdate update, PersistentTasksCustomMetaData persistentTasks, ThreadContext threadContext) { + String datafeedId = update.getId(); + DatafeedConfig oldDatafeedConfig = datafeeds.get(datafeedId); + if (oldDatafeedConfig == null) { + throw ExceptionsHelper.missingDatafeedException(datafeedId); + } + checkDatafeedIsStopped(() -> Messages.getMessage(Messages.DATAFEED_CANNOT_UPDATE_IN_CURRENT_STATE, datafeedId, + DatafeedState.STARTED), datafeedId, persistentTasks); + DatafeedConfig newDatafeedConfig = update.apply(oldDatafeedConfig, threadContext); + if (newDatafeedConfig.getJobId().equals(oldDatafeedConfig.getJobId()) == false) { + checkJobIsAvailableForDatafeed(newDatafeedConfig.getJobId()); + } + Job job = jobs.get(newDatafeedConfig.getJobId()); + DatafeedJobValidator.validate(newDatafeedConfig, job); + datafeeds.put(datafeedId, newDatafeedConfig); + return this; + } + + public Builder removeDatafeed(String datafeedId, PersistentTasksCustomMetaData persistentTasks) { + DatafeedConfig datafeed = datafeeds.get(datafeedId); + if (datafeed == null) { + throw ExceptionsHelper.missingDatafeedException(datafeedId); + } + checkDatafeedIsStopped(() -> Messages.getMessage(Messages.DATAFEED_CANNOT_DELETE_IN_CURRENT_STATE, datafeedId, + DatafeedState.STARTED), datafeedId, persistentTasks); + datafeeds.remove(datafeedId); + return this; + } + + private Optional getDatafeedByJobId(String jobId) { + return datafeeds.values().stream().filter(s -> s.getJobId().equals(jobId)).findFirst(); + } + + private void checkDatafeedIsStopped(Supplier msg, String datafeedId, PersistentTasksCustomMetaData persistentTasks) { + if (persistentTasks != null) { + if (persistentTasks.getTask(MLMetadataField.datafeedTaskId(datafeedId)) != null) { + throw ExceptionsHelper.conflictStatusException(msg.get()); + } + } + } + + private Builder putJobs(Collection jobs) { + for (Job job : jobs) { + putJob(job, true); + } + return this; + } + + private Builder putDatafeeds(Collection datafeeds) { + for (DatafeedConfig datafeed : datafeeds) { + this.datafeeds.put(datafeed.getId(), datafeed); + } + return this; + } + + public MlMetadata build() { + return new MlMetadata(jobs, datafeeds); + } + + public void markJobAsDeleted(String jobId, PersistentTasksCustomMetaData tasks, boolean allowDeleteOpenJob) { + Job job = jobs.get(jobId); + if (job == null) { + throw ExceptionsHelper.missingJobException(jobId); + } + if (job.isDeleted()) { + // Job still exists but is already being deleted + throw new JobAlreadyMarkedAsDeletedException(); + } + + checkJobHasNoDatafeed(jobId); + + if (allowDeleteOpenJob == false) { + PersistentTask jobTask = getJobTask(jobId, tasks); + if (jobTask != null) { + JobTaskStatus jobTaskStatus = (JobTaskStatus) jobTask.getStatus(); + throw ExceptionsHelper.conflictStatusException("Cannot delete job [" + jobId + "] because the job is " + + ((jobTaskStatus == null) ? JobState.OPENING : jobTaskStatus.getState())); + } + } + Job.Builder jobBuilder = new Job.Builder(job); + jobBuilder.setDeleted(true); + putJob(jobBuilder.build(), true); + } + + void checkJobHasNoDatafeed(String jobId) { + Optional datafeed = getDatafeedByJobId(jobId); + if (datafeed.isPresent()) { + throw ExceptionsHelper.conflictStatusException("Cannot delete job [" + jobId + "] because datafeed [" + + datafeed.get().getId() + "] refers to it"); + } + } + } + + /** + * Namespaces the task ids for jobs. + * A datafeed id can be used as a job id, because they are stored separately in cluster state. + */ + public static String jobTaskId(String jobId) { + return "job-" + jobId; + } + + @Nullable + public static PersistentTask getJobTask(String jobId, @Nullable PersistentTasksCustomMetaData tasks) { + if (tasks == null) { + return null; + } + return tasks.getTask(jobTaskId(jobId)); + } + + @Nullable + public static PersistentTask getDatafeedTask(String datafeedId, @Nullable PersistentTasksCustomMetaData tasks) { + if (tasks == null) { + return null; + } + return tasks.getTask(MLMetadataField.datafeedTaskId(datafeedId)); + } + + public static JobState getJobState(String jobId, @Nullable PersistentTasksCustomMetaData tasks) { + PersistentTask task = getJobTask(jobId, tasks); + if (task != null) { + JobTaskStatus jobTaskState = (JobTaskStatus) task.getStatus(); + if (jobTaskState == null) { + return JobState.OPENING; + } + return jobTaskState.getState(); + } + // If we haven't opened a job than there will be no persistent task, which is the same as if the job was closed + return JobState.CLOSED; + } + + public static DatafeedState getDatafeedState(String datafeedId, @Nullable PersistentTasksCustomMetaData tasks) { + PersistentTask task = getDatafeedTask(datafeedId, tasks); + if (task != null && task.getStatus() != null) { + return (DatafeedState) task.getStatus(); + } else { + // If we haven't started a datafeed then there will be no persistent task, + // which is the same as if the datafeed was't started + return DatafeedState.STOPPED; + } + } + + public static class JobAlreadyMarkedAsDeletedException extends RuntimeException { + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlParserType.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlParserType.java new file mode 100644 index 0000000000000..64f52ab2d2c60 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlParserType.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml; + +/** + * In order to allow enhancements that require additions to the ML custom cluster state to be made in minor versions, + * when we parse our metadata from persisted cluster state we ignore unknown fields. However, we don't want to be + * lenient when parsing config as this would mean user mistakes could go undetected. Therefore, for all JSON objects + * that are used in both custom cluster state and config we have two parsers, one tolerant of unknown fields (for + * parsing cluster state) and one strict (for parsing config). This class enumerates the two options. + */ +public enum MlParserType { + + METADATA, CONFIG; + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java new file mode 100644 index 0000000000000..c66686136e7e5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CloseJobAction.java @@ -0,0 +1,272 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.ml.MachineLearningField; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import java.io.IOException; +import java.util.Objects; + +public class CloseJobAction extends Action { + + public static final CloseJobAction INSTANCE = new CloseJobAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/close"; + + private CloseJobAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends BaseTasksRequest implements ToXContentObject { + + public static final ParseField TIMEOUT = new ParseField("timeout"); + public static final ParseField FORCE = new ParseField("force"); + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + public static ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString(Request::setJobId, Job.ID); + PARSER.declareString((request, val) -> + request.setCloseTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); + PARSER.declareBoolean(Request::setForce, FORCE); + PARSER.declareBoolean(Request::setAllowNoJobs, ALLOW_NO_JOBS); + } + + public static Request parseRequest(String jobId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (jobId != null) { + request.setJobId(jobId); + } + return request; + } + + private String jobId; + private boolean force = false; + private boolean allowNoJobs = true; + // A big state can take a while to persist. For symmetry with the _open endpoint any + // changes here should be reflected there too. + private TimeValue timeout = MachineLearningField.STATE_PERSIST_RESTORE_TIMEOUT; + + private String[] openJobIds; + + private boolean local; + + public Request() { + openJobIds = new String[] {}; + } + + public Request(String jobId) { + this(); + this.jobId = jobId; + } + + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public TimeValue getCloseTimeout() { + return timeout; + } + + public void setCloseTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + public boolean isForce() { + return force; + } + + public void setForce(boolean force) { + this.force = force; + } + + public boolean allowNoJobs() { + return allowNoJobs; + } + + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + public boolean isLocal() { return local; } + + public void setLocal(boolean local) { + this.local = local; + } + + public String[] getOpenJobIds() { return openJobIds; } + + public void setOpenJobIds(String [] openJobIds) { + this.openJobIds = openJobIds; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + timeout = in.readTimeValue(); + force = in.readBoolean(); + openJobIds = in.readStringArray(); + local = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + allowNoJobs = in.readBoolean(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + out.writeTimeValue(timeout); + out.writeBoolean(force); + out.writeStringArray(openJobIds); + out.writeBoolean(local); + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeBoolean(allowNoJobs); + } + } + + @Override + public boolean match(Task task) { + for (String id : openJobIds) { + if (OpenJobAction.JobTaskMatcher.match(task, id)) { + return true; + } + } + return false; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // openJobIds are excluded + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep()); + builder.field(FORCE.getPreferredName(), force); + builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + // openJobIds are excluded + return Objects.hash(jobId, timeout, force, allowNoJobs); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + // openJobIds are excluded + return Objects.equals(jobId, other.jobId) && + Objects.equals(timeout, other.timeout) && + Objects.equals(force, other.force) && + Objects.equals(allowNoJobs, other.allowNoJobs); + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, CloseJobAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { + + private boolean closed; + + public Response() { + super(null, null); + + } + + public Response(StreamInput in) throws IOException { + super(null, null); + readFrom(in); + } + + public Response(boolean closed) { + super(null, null); + this.closed = closed; + } + + public boolean isClosed() { + return closed; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + closed = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(closed); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("closed", closed); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return closed == response.closed; + } + + @Override + public int hashCode() { + return Objects.hash(closed); + } + } + +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java new file mode 100644 index 0000000000000..2a01282d41115 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.ml.calendars.Calendar; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteCalendarAction extends Action { + + public static final DeleteCalendarAction INSTANCE = new DeleteCalendarAction(); + public static final String NAME = "cluster:admin/xpack/ml/calendars/delete"; + + private DeleteCalendarAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest { + + + private String calendarId; + + public Request() { + } + + public Request(String calendarId) { + this.calendarId = ExceptionsHelper.requireNonNull(calendarId, Calendar.ID.getPreferredName()); + } + + public String getCalendarId() { + return calendarId; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + calendarId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(calendarId); + } + + @Override + public int hashCode() { + return Objects.hash(calendarId); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Request other = (Request) obj; + return Objects.equals(calendarId, other.calendarId); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, DeleteCalendarAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends AcknowledgedResponse { + + public Response(boolean acknowledged) { + super(acknowledged); + } + + public Response() {} + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java new file mode 100644 index 0000000000000..01d5e3e37215b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.ml.calendars.Calendar; +import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteCalendarEventAction extends Action { + + public static final DeleteCalendarEventAction INSTANCE = new DeleteCalendarEventAction(); + public static final String NAME = "cluster:admin/xpack/ml/calendars/events/delete"; + + private DeleteCalendarEventAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest { + private String calendarId; + private String eventId; + + public Request() { + } + + public Request(String calendarId, String eventId) { + this.calendarId = ExceptionsHelper.requireNonNull(calendarId, Calendar.ID.getPreferredName()); + this.eventId = ExceptionsHelper.requireNonNull(eventId, ScheduledEvent.EVENT_ID.getPreferredName()); + } + + public String getCalendarId() { + return calendarId; + } + + public String getEventId() { + return eventId; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + calendarId = in.readString(); + eventId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(calendarId); + out.writeString(eventId); + } + + @Override + public int hashCode() { + return Objects.hash(eventId, calendarId); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Request other = (Request) obj; + return Objects.equals(eventId, other.eventId) && Objects.equals(calendarId, other.calendarId); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, DeleteCalendarEventAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends AcknowledgedResponse { + + public Response(boolean acknowledged) { + super(acknowledged); + } + + private Response() {} + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java new file mode 100644 index 0000000000000..6876294bdf301 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteDatafeedAction extends Action { + + public static final DeleteDatafeedAction INSTANCE = new DeleteDatafeedAction(); + public static final String NAME = "cluster:admin/xpack/ml/datafeeds/delete"; + + private DeleteDatafeedAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest implements ToXContentFragment { + + public static final ParseField FORCE = new ParseField("force"); + + private String datafeedId; + private boolean force; + + public Request(String datafeedId) { + this.datafeedId = ExceptionsHelper.requireNonNull(datafeedId, DatafeedConfig.ID.getPreferredName()); + } + + public Request() { + } + + public String getDatafeedId() { + return datafeedId; + } + + public boolean isForce() { + return force; + } + + public void setForce(boolean force) { + this.force = force; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + datafeedId = in.readString(); + if (in.getVersion().onOrAfter(Version.V_5_5_0)) { + force = in.readBoolean(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(datafeedId); + if (out.getVersion().onOrAfter(Version.V_5_5_0)) { + out.writeBoolean(force); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(DatafeedConfig.ID.getPreferredName(), datafeedId); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request other = (Request) o; + return Objects.equals(datafeedId, other.datafeedId) && Objects.equals(force, other.force); + } + + @Override + public int hashCode() { + return Objects.hash(datafeedId, force); + } + } + + public static class RequestBuilder extends MasterNodeOperationRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, DeleteDatafeedAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends AcknowledgedResponse { + + public Response() { + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataAction.java new file mode 100644 index 0000000000000..aeacf68fe18c7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataAction.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteExpiredDataAction extends Action { + + public static final DeleteExpiredDataAction INSTANCE = new DeleteExpiredDataAction(); + public static final String NAME = "cluster:admin/xpack/ml/delete_expired_data"; + + private DeleteExpiredDataAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest { + + public Request() {} + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, DeleteExpiredDataAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private static final ParseField DELETED = new ParseField("deleted"); + + private boolean deleted; + + public Response(boolean deleted) { + this.deleted = deleted; + } + + public Response() {} + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + deleted = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(deleted); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DELETED.getPreferredName(), deleted); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(deleted, response.deleted); + } + + @Override + public int hashCode() { + return Objects.hash(deleted); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java new file mode 100644 index 0000000000000..86e343fda22cc --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + + +public class DeleteFilterAction extends Action { + + public static final DeleteFilterAction INSTANCE = new DeleteFilterAction(); + public static final String NAME = "cluster:admin/xpack/ml/filters/delete"; + + private DeleteFilterAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest { + + public static final ParseField FILTER_ID = new ParseField("filter_id"); + + private String filterId; + + public Request() { + + } + + public Request(String filterId) { + this.filterId = ExceptionsHelper.requireNonNull(filterId, FILTER_ID.getPreferredName()); + } + + public String getFilterId() { + return filterId; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + filterId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(filterId); + } + + @Override + public int hashCode() { + return Objects.hash(filterId); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(filterId, other.filterId); + } + } + + public static class RequestBuilder extends MasterNodeOperationRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, DeleteFilterAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends AcknowledgedResponse { + + public Response(boolean acknowledged) { + super(acknowledged); + } + + public Response() {} + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + } + +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java new file mode 100644 index 0000000000000..4fa264862dbaf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.persistence.JobStorageDeletionTask; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class DeleteJobAction extends Action { + + public static final DeleteJobAction INSTANCE = new DeleteJobAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/delete"; + + private DeleteJobAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest { + + private String jobId; + private boolean force; + + public Request(String jobId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public Request() {} + + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public boolean isForce() { + return force; + } + + public void setForce(boolean force) { + this.force = force; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new JobStorageDeletionTask(id, type, action, "delete-job-" + jobId, parentTaskId, headers); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + if (in.getVersion().onOrAfter(Version.V_5_5_0)) { + force = in.readBoolean(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + if (out.getVersion().onOrAfter(Version.V_5_5_0)) { + out.writeBoolean(force); + } + } + + @Override + public int hashCode() { + return Objects.hash(jobId, force); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + DeleteJobAction.Request other = (DeleteJobAction.Request) obj; + return Objects.equals(jobId, other.jobId) && Objects.equals(force, other.force); + } + } + + static class RequestBuilder extends MasterNodeOperationRequestBuilder { + + RequestBuilder(ElasticsearchClient client, DeleteJobAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends AcknowledgedResponse { + + public Response(boolean acknowledged) { + super(acknowledged); + } + + public Response() {} + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + } + +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteModelSnapshotAction.java new file mode 100644 index 0000000000000..39c497ab51ea6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteModelSnapshotAction.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotField; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; + +public class DeleteModelSnapshotAction extends Action { + + public static final DeleteModelSnapshotAction INSTANCE = new DeleteModelSnapshotAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/model_snapshots/delete"; + + private DeleteModelSnapshotAction() { + super(NAME); + } + + @Override + public DeleteModelSnapshotAction.RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public DeleteModelSnapshotAction.Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest { + + private String jobId; + private String snapshotId; + + public Request() { + } + + public Request(String jobId, String snapshotId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + this.snapshotId = ExceptionsHelper.requireNonNull(snapshotId, ModelSnapshotField.SNAPSHOT_ID.getPreferredName()); + } + + public String getJobId() { + return jobId; + } + + public String getSnapshotId() { + return snapshotId; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + snapshotId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + out.writeString(snapshotId); + } + } + + public static class Response extends AcknowledgedResponse { + + public Response(boolean acknowledged) { + super(acknowledged); + } + + public Response() {} + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, DeleteModelSnapshotAction action) { + super(client, action, new Request()); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java new file mode 100644 index 0000000000000..9259aa0c60473 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class FinalizeJobExecutionAction extends Action { + + public static final FinalizeJobExecutionAction INSTANCE = new FinalizeJobExecutionAction(); + public static final String NAME = "cluster:internal/xpack/ml/job/finalize_job_execution"; + + private FinalizeJobExecutionAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, INSTANCE); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends MasterNodeRequest { + + private String[] jobIds; + + public Request(String[] jobIds) { + this.jobIds = jobIds; + } + + public Request() { + } + + public String[] getJobIds() { + return jobIds; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobIds = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(jobIds); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + public static class RequestBuilder + extends MasterNodeOperationRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, FinalizeJobExecutionAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends AcknowledgedResponse { + + public Response(boolean acknowledged) { + super(acknowledged); + } + + public Response() { + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java new file mode 100644 index 0000000000000..2da2505a77170 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java @@ -0,0 +1,273 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.output.FlushAcknowledgement; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +public class FlushJobAction extends Action { + + public static final FlushJobAction INSTANCE = new FlushJobAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/flush"; + + private FlushJobAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends JobTaskRequest implements ToXContentObject { + + public static final ParseField CALC_INTERIM = new ParseField("calc_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField ADVANCE_TIME = new ParseField("advance_time"); + public static final ParseField SKIP_TIME = new ParseField("skip_time"); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareBoolean(Request::setCalcInterim, CALC_INTERIM); + PARSER.declareString(Request::setStart, START); + PARSER.declareString(Request::setEnd, END); + PARSER.declareString(Request::setAdvanceTime, ADVANCE_TIME); + PARSER.declareString(Request::setSkipTime, SKIP_TIME); + } + + public static Request parseRequest(String jobId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (jobId != null) { + request.jobId = jobId; + } + return request; + } + + private boolean calcInterim = false; + private String start; + private String end; + private String advanceTime; + private String skipTime; + + public Request() { + } + + public Request(String jobId) { + super(jobId); + } + + public boolean getCalcInterim() { + return calcInterim; + } + + public void setCalcInterim(boolean calcInterim) { + this.calcInterim = calcInterim; + } + + public String getStart() { + return start; + } + + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + public void setEnd(String end) { + this.end = end; + } + + public String getAdvanceTime() { + return advanceTime; + } + + public void setAdvanceTime(String advanceTime) { + this.advanceTime = advanceTime; + } + + public String getSkipTime() { + return skipTime; + } + + public void setSkipTime(String skipTime) { + this.skipTime = skipTime; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + calcInterim = in.readBoolean(); + start = in.readOptionalString(); + end = in.readOptionalString(); + advanceTime = in.readOptionalString(); + if (in.getVersion().after(Version.V_5_5_0)) { + skipTime = in.readOptionalString(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(calcInterim); + out.writeOptionalString(start); + out.writeOptionalString(end); + out.writeOptionalString(advanceTime); + if (out.getVersion().after(Version.V_5_5_0)) { + out.writeOptionalString(skipTime); + } + } + + @Override + public int hashCode() { + return Objects.hash(jobId, calcInterim, start, end, advanceTime, skipTime); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(jobId, other.jobId) && + calcInterim == other.calcInterim && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(advanceTime, other.advanceTime) && + Objects.equals(skipTime, other.skipTime); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(CALC_INTERIM.getPreferredName(), calcInterim); + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (advanceTime != null) { + builder.field(ADVANCE_TIME.getPreferredName(), advanceTime); + } + if (skipTime != null) { + builder.field(SKIP_TIME.getPreferredName(), skipTime); + } + builder.endObject(); + return builder; + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, FlushJobAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { + + private boolean flushed; + private Date lastFinalizedBucketEnd; + + public Response() { + super(null, null); + } + + public Response(boolean flushed, @Nullable Date lastFinalizedBucketEnd) { + super(null, null); + this.flushed = flushed; + this.lastFinalizedBucketEnd = lastFinalizedBucketEnd; + } + + public boolean isFlushed() { + return flushed; + } + + public Date getLastFinalizedBucketEnd() { + return lastFinalizedBucketEnd; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + flushed = in.readBoolean(); + if (in.getVersion().after(Version.V_5_5_0)) { + lastFinalizedBucketEnd = new Date(in.readVLong()); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(flushed); + if (out.getVersion().after(Version.V_5_5_0)) { + out.writeVLong(lastFinalizedBucketEnd.getTime()); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("flushed", flushed); + if (lastFinalizedBucketEnd != null) { + builder.timeField(FlushAcknowledgement.LAST_FINALIZED_BUCKET_END.getPreferredName(), + FlushAcknowledgement.LAST_FINALIZED_BUCKET_END.getPreferredName() + "_string", lastFinalizedBucketEnd.getTime()); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return flushed == response.flushed && + Objects.equals(lastFinalizedBucketEnd, response.lastFinalizedBucketEnd); + } + + @Override + public int hashCode() { + return Objects.hash(flushed, lastFinalizedBucketEnd); + } + } + +} + + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ForecastJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ForecastJobAction.java new file mode 100644 index 0000000000000..4868a1e73da37 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ForecastJobAction.java @@ -0,0 +1,236 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.Forecast; + +import java.io.IOException; +import java.util.Objects; + +public class ForecastJobAction extends Action { + + public static final ForecastJobAction INSTANCE = new ForecastJobAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/forecast"; + + private ForecastJobAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends JobTaskRequest implements ToXContentObject { + + public static final ParseField DURATION = new ParseField("duration"); + public static final ParseField EXPIRES_IN = new ParseField("expires_in"); + + // Max allowed duration: 8 weeks + private static final TimeValue MAX_DURATION = TimeValue.parseTimeValue("56d", ""); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareString(Request::setDuration, DURATION); + PARSER.declareString(Request::setExpiresIn, EXPIRES_IN); + } + + public static Request parseRequest(String jobId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (jobId != null) { + request.jobId = jobId; + } + return request; + } + + private TimeValue duration; + private TimeValue expiresIn; + + public Request() { + } + + public Request(String jobId) { + super(jobId); + } + + public TimeValue getDuration() { + return duration; + } + + public void setDuration(String duration) { + setDuration(TimeValue.parseTimeValue(duration, DURATION.getPreferredName())); + } + + public void setDuration(TimeValue duration) { + this.duration = duration; + if (this.duration.compareTo(TimeValue.ZERO) <= 0) { + throw new IllegalArgumentException("[" + DURATION.getPreferredName() + "] must be positive: [" + + duration.getStringRep() + "]"); + } + if (this.duration.compareTo(MAX_DURATION) > 0) { + throw new IllegalArgumentException("[" + DURATION.getPreferredName() + "] must be " + + MAX_DURATION.getStringRep() + " or less: [" + duration.getStringRep() + "]"); + } + } + + public TimeValue getExpiresIn() { + return expiresIn; + } + + public void setExpiresIn(String expiration) { + setExpiresIn(TimeValue.parseTimeValue(expiration, EXPIRES_IN.getPreferredName())); + } + + public void setExpiresIn(TimeValue expiresIn) { + this.expiresIn = expiresIn; + if (this.expiresIn.compareTo(TimeValue.ZERO) < 0) { + throw new IllegalArgumentException("[" + EXPIRES_IN.getPreferredName() + "] must be non-negative: [" + + expiresIn.getStringRep() + "]"); + } + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.duration = in.readOptionalTimeValue(); + this.expiresIn = in.readOptionalTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalTimeValue(duration); + out.writeOptionalTimeValue(expiresIn); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, duration, expiresIn); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(jobId, other.jobId) + && Objects.equals(duration, other.duration) + && Objects.equals(expiresIn, other.expiresIn); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (duration != null) { + builder.field(DURATION.getPreferredName(), duration.getStringRep()); + } + if (expiresIn != null) { + builder.field(EXPIRES_IN.getPreferredName(), expiresIn.getStringRep()); + } + builder.endObject(); + return builder; + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, ForecastJobAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { + + private boolean acknowledged; + private String forecastId; + + public Response() { + super(null, null); + } + + public Response(boolean acknowledged, String forecastId) { + super(null, null); + this.acknowledged = acknowledged; + this.forecastId = forecastId; + } + + public boolean isAcknowledged() { + return acknowledged; + } + + public String getForecastId() { + return forecastId; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + acknowledged = in.readBoolean(); + forecastId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(acknowledged); + out.writeString(forecastId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("acknowledged", acknowledged); + builder.field(Forecast.FORECAST_ID.getPreferredName(), forecastId); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return this.acknowledged == other.acknowledged && Objects.equals(this.forecastId, other.forecastId); + } + + @Override + public int hashCode() { + return Objects.hash(acknowledged, forecastId); + } + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java new file mode 100644 index 0000000000000..dec819613cb37 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetBucketsAction.java @@ -0,0 +1,362 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.elasticsearch.xpack.core.ml.job.results.Result; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class GetBucketsAction extends Action { + + public static final GetBucketsAction INSTANCE = new GetBucketsAction(); + public static final String NAME = "cluster:monitor/xpack/ml/job/results/buckets/get"; + + private GetBucketsAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + public static final ParseField EXPAND = new ParseField("expand"); + public static final ParseField EXCLUDE_INTERIM = new ParseField("exclude_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField ANOMALY_SCORE = new ParseField("anomaly_score"); + public static final ParseField TIMESTAMP = new ParseField("timestamp"); + public static final ParseField SORT = new ParseField("sort"); + public static final ParseField DESCENDING = new ParseField("desc"); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareString(Request::setTimestamp, Result.TIMESTAMP); + PARSER.declareBoolean(Request::setExpand, EXPAND); + PARSER.declareBoolean(Request::setExcludeInterim, EXCLUDE_INTERIM); + PARSER.declareStringOrNull(Request::setStart, START); + PARSER.declareStringOrNull(Request::setEnd, END); + PARSER.declareObject(Request::setPageParams, PageParams.PARSER, PageParams.PAGE); + PARSER.declareDouble(Request::setAnomalyScore, ANOMALY_SCORE); + PARSER.declareString(Request::setSort, SORT); + PARSER.declareBoolean(Request::setDescending, DESCENDING); + } + + public static Request parseRequest(String jobId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (jobId != null) { + request.jobId = jobId; + } + return request; + } + + private String jobId; + private String timestamp; + private boolean expand = false; + private boolean excludeInterim = false; + private String start; + private String end; + private PageParams pageParams; + private Double anomalyScore; + private String sort = Result.TIMESTAMP.getPreferredName(); + private boolean descending = false; + + public Request() { + } + + public Request(String jobId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public String getJobId() { + return jobId; + } + + public void setTimestamp(String timestamp) { + if (pageParams != null || start != null || end != null || anomalyScore != null) { + throw new IllegalArgumentException("Param [" + TIMESTAMP.getPreferredName() + "] is incompatible with [" + + PageParams.FROM.getPreferredName() + "," + + PageParams.SIZE.getPreferredName() + "," + + START.getPreferredName() + "," + + END.getPreferredName() + "," + + ANOMALY_SCORE.getPreferredName() + "]"); + } + this.timestamp = ExceptionsHelper.requireNonNull(timestamp, Result.TIMESTAMP.getPreferredName()); + } + + public String getTimestamp() { + return timestamp; + } + + public boolean isExpand() { + return expand; + } + + public void setExpand(boolean expand) { + this.expand = expand; + } + + public boolean isExcludeInterim() { + return excludeInterim; + } + + public void setExcludeInterim(boolean excludeInterim) { + this.excludeInterim = excludeInterim; + } + + public String getStart() { + return start; + } + + public void setStart(String start) { + if (timestamp != null) { + throw new IllegalArgumentException("Param [" + START.getPreferredName() + "] is incompatible with [" + + TIMESTAMP.getPreferredName() + "]."); + } + this.start = start; + } + + public String getEnd() { + return end; + } + + public void setEnd(String end) { + if (timestamp != null) { + throw new IllegalArgumentException("Param [" + END.getPreferredName() + "] is incompatible with [" + + TIMESTAMP.getPreferredName() + "]."); + } + this.end = end; + } + + public PageParams getPageParams() { + return pageParams; + } + + public void setPageParams(PageParams pageParams) { + if (timestamp != null) { + throw new IllegalArgumentException("Param [" + PageParams.FROM.getPreferredName() + + ", " + PageParams.SIZE.getPreferredName() + "] is incompatible with [" + TIMESTAMP.getPreferredName() + "]."); + } + this.pageParams = ExceptionsHelper.requireNonNull(pageParams, PageParams.PAGE.getPreferredName()); + } + + public Double getAnomalyScore() { + return anomalyScore; + } + + public void setAnomalyScore(double anomalyScore) { + if (timestamp != null) { + throw new IllegalArgumentException("Param [" + ANOMALY_SCORE.getPreferredName() + "] is incompatible with [" + + TIMESTAMP.getPreferredName() + "]."); + } + this.anomalyScore = anomalyScore; + } + + public String getSort() { + return sort; + } + + public void setSort(String sort) { + this.sort = sort; + } + + public boolean isDescending() { + return descending; + } + + public void setDescending(boolean descending) { + this.descending = descending; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + timestamp = in.readOptionalString(); + expand = in.readBoolean(); + excludeInterim = in.readBoolean(); + start = in.readOptionalString(); + end = in.readOptionalString(); + anomalyScore = in.readOptionalDouble(); + pageParams = in.readOptionalWriteable(PageParams::new); + if (in.getVersion().onOrAfter(Version.V_5_5_0)) { + sort = in.readString(); + descending = in.readBoolean(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + out.writeOptionalString(timestamp); + out.writeBoolean(expand); + out.writeBoolean(excludeInterim); + out.writeOptionalString(start); + out.writeOptionalString(end); + out.writeOptionalDouble(anomalyScore); + out.writeOptionalWriteable(pageParams); + if (out.getVersion().onOrAfter(Version.V_5_5_0)) { + out.writeString(sort); + out.writeBoolean(descending); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (timestamp != null) { + builder.field(Result.TIMESTAMP.getPreferredName(), timestamp); + } + builder.field(EXPAND.getPreferredName(), expand); + builder.field(EXCLUDE_INTERIM.getPreferredName(), excludeInterim); + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + if (anomalyScore != null) { + builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore); + } + builder.field(SORT.getPreferredName(), sort); + builder.field(DESCENDING.getPreferredName(), descending); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, timestamp, expand, excludeInterim, anomalyScore, pageParams, start, end, sort, descending); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(timestamp, other.timestamp) && + Objects.equals(expand, other.expand) && + Objects.equals(excludeInterim, other.excludeInterim) && + Objects.equals(anomalyScore, other.anomalyScore) && + Objects.equals(pageParams, other.pageParams) && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(sort, other.sort) && + Objects.equals(descending, other.descending); + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private QueryPage buckets; + + public Response() { + } + + public Response(QueryPage buckets) { + this.buckets = buckets; + } + + public QueryPage getBuckets() { + return buckets; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + buckets = new QueryPage<>(in, Bucket::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + buckets.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + buckets.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(buckets); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(buckets, other.buckets); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsAction.java new file mode 100644 index 0000000000000..2fe993b513804 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsAction.java @@ -0,0 +1,250 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.calendars.Calendar; +import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class GetCalendarEventsAction extends Action { + public static final GetCalendarEventsAction INSTANCE = new GetCalendarEventsAction(); + public static final String NAME = "cluster:monitor/xpack/ml/calendars/events/get"; + + private GetCalendarEventsAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString(Request::setCalendarId, Calendar.ID); + PARSER.declareString(Request::setStart, START); + PARSER.declareString(Request::setEnd, END); + PARSER.declareString(Request::setJobId, Job.ID); + PARSER.declareObject(Request::setPageParams, PageParams.PARSER, PageParams.PAGE); + } + + public static Request parseRequest(String calendarId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (calendarId != null) { + request.setCalendarId(calendarId); + } + return request; + } + + private String calendarId; + private String start; + private String end; + private String jobId; + private PageParams pageParams = PageParams.defaultParams(); + + public Request() { + } + + public Request(String calendarId) { + setCalendarId(calendarId); + } + + public String getCalendarId() { + return calendarId; + } + + private void setCalendarId(String calendarId) { + this.calendarId = ExceptionsHelper.requireNonNull(calendarId, Calendar.ID.getPreferredName()); + } + + public String getStart() { + return start; + } + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + public void setEnd(String end) { + this.end = end; + } + + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public PageParams getPageParams() { + return pageParams; + } + + public void setPageParams(PageParams pageParams) { + this.pageParams = Objects.requireNonNull(pageParams); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException e = null; + + boolean calendarIdIsAll = GetCalendarsAction.Request.ALL.equals(calendarId); + if (jobId != null && calendarIdIsAll == false) { + e = ValidateActions.addValidationError("If " + Job.ID.getPreferredName() + " is used " + + Calendar.ID.getPreferredName() + " must be '" + GetCalendarsAction.Request.ALL + "'", e); + } + return e; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + calendarId = in.readString(); + start = in.readOptionalString(); + end = in.readOptionalString(); + jobId = in.readOptionalString(); + pageParams = new PageParams(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(calendarId); + out.writeOptionalString(start); + out.writeOptionalString(end); + out.writeOptionalString(jobId); + pageParams.writeTo(out); + } + + @Override + public int hashCode() { + return Objects.hash(calendarId, start, end, pageParams, jobId); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(calendarId, other.calendarId) && Objects.equals(start, other.start) + && Objects.equals(end, other.end) && Objects.equals(pageParams, other.pageParams) + && Objects.equals(jobId, other.jobId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Calendar.ID.getPreferredName(), calendarId); + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (jobId != null) { + builder.field(Job.ID.getPreferredName(), jobId); + } + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + builder.endObject(); + return builder; + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private QueryPage scheduledEvents; + + public Response() { + } + + public Response(QueryPage scheduledEvents) { + this.scheduledEvents = scheduledEvents; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + scheduledEvents = new QueryPage<>(in, ScheduledEvent::new); + + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + scheduledEvents.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return scheduledEvents.toXContent(builder, params); + } + + @Override + public int hashCode() { + return Objects.hash(scheduledEvents); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(scheduledEvents, other.scheduledEvents); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsAction.java new file mode 100644 index 0000000000000..f11d9ccbf869a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsAction.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.calendars.Calendar; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class GetCalendarsAction extends Action { + + public static final GetCalendarsAction INSTANCE = new GetCalendarsAction(); + public static final String NAME = "cluster:monitor/xpack/ml/calendars/get"; + + private GetCalendarsAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + public static final String ALL = "_all"; + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString(Request::setCalendarId, Calendar.ID); + PARSER.declareObject(Request::setPageParams, PageParams.PARSER, PageParams.PAGE); + } + + public static Request parseRequest(String calendarId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (calendarId != null) { + request.setCalendarId(calendarId); + } + return request; + } + + private String calendarId; + private PageParams pageParams; + + public Request() { + } + + public void setCalendarId(String calendarId) { + this.calendarId = calendarId; + } + + public String getCalendarId() { + return calendarId; + } + + public PageParams getPageParams() { + return pageParams; + } + + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + + if (calendarId != null && pageParams != null) { + validationException = addValidationError("Params [" + PageParams.FROM.getPreferredName() + + ", " + PageParams.SIZE.getPreferredName() + "] are incompatible with [" + + Calendar.ID.getPreferredName() + "].", + validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + calendarId = in.readOptionalString(); + pageParams = in.readOptionalWriteable(PageParams::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(calendarId); + out.writeOptionalWriteable(pageParams); + } + + @Override + public int hashCode() { + return Objects.hash(calendarId, pageParams); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(calendarId, other.calendarId) && Objects.equals(pageParams, other.pageParams); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (calendarId != null) { + builder.field(Calendar.ID.getPreferredName(), calendarId); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + builder.endObject(); + return builder; + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements StatusToXContentObject { + + private QueryPage calendars; + + public Response(QueryPage calendars) { + this.calendars = calendars; + } + + public Response() { + } + + public QueryPage getCalendars() { + return calendars; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + calendars = new QueryPage<>(in, Calendar::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + calendars.writeTo(out); + } + + @Override + public RestStatus status() { + return RestStatus.OK; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + calendars.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(calendars); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(calendars, other.calendars); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesAction.java new file mode 100644 index 0000000000000..84976c61e4517 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesAction.java @@ -0,0 +1,224 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class GetCategoriesAction extends +Action { + + public static final GetCategoriesAction INSTANCE = new GetCategoriesAction(); + public static final String NAME = "cluster:monitor/xpack/ml/job/results/categories/get"; + + private GetCategoriesAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + public static final ParseField CATEGORY_ID = new ParseField("category_id"); + public static final ParseField FROM = new ParseField("from"); + public static final ParseField SIZE = new ParseField("size"); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareLong(Request::setCategoryId, CATEGORY_ID); + PARSER.declareObject(Request::setPageParams, PageParams.PARSER, PageParams.PAGE); + } + + public static Request parseRequest(String jobId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (jobId != null) { + request.jobId = jobId; + } + return request; + } + + private String jobId; + private Long categoryId; + private PageParams pageParams; + + public Request(String jobId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public Request() { + } + + public String getJobId() { return jobId; } + + public PageParams getPageParams() { return pageParams; } + + public Long getCategoryId() { return categoryId; } + + public void setCategoryId(Long categoryId) { + if (pageParams != null) { + throw new IllegalArgumentException("Param [" + CATEGORY_ID.getPreferredName() + "] is incompatible with [" + + PageParams.FROM.getPreferredName() + ", " + PageParams.SIZE.getPreferredName() + "]."); + } + this.categoryId = ExceptionsHelper.requireNonNull(categoryId, CATEGORY_ID.getPreferredName()); + } + + public void setPageParams(PageParams pageParams) { + if (categoryId != null) { + throw new IllegalArgumentException("Param [" + PageParams.FROM.getPreferredName() + ", " + + PageParams.SIZE.getPreferredName() + "] is incompatible with [" + CATEGORY_ID.getPreferredName() + "]."); + } + this.pageParams = pageParams; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (pageParams == null && categoryId == null) { + validationException = addValidationError("Both [" + CATEGORY_ID.getPreferredName() + "] and [" + + PageParams.FROM.getPreferredName() + ", " + PageParams.SIZE.getPreferredName() + "] " + + "cannot be null" , validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + categoryId = in.readOptionalLong(); + pageParams = in.readOptionalWriteable(PageParams::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + out.writeOptionalLong(categoryId); + out.writeOptionalWriteable(pageParams); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (categoryId != null) { + builder.field(CATEGORY_ID.getPreferredName(), categoryId); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + Request request = (Request) o; + return Objects.equals(jobId, request.jobId) + && Objects.equals(categoryId, request.categoryId) + && Objects.equals(pageParams, request.pageParams); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, categoryId, pageParams); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, GetCategoriesAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private QueryPage result; + + public Response(QueryPage result) { + this.result = result; + } + + public Response() { + } + + public QueryPage getResult() { + return result; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + result = new QueryPage<>(in, CategoryDefinition::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + result.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + result.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + Response response = (Response) o; + return Objects.equals(result, response.result); + } + + @Override + public int hashCode() { + return Objects.hash(result); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java new file mode 100644 index 0000000000000..3f71ac2dc027d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java @@ -0,0 +1,187 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class GetDatafeedsAction extends Action { + + public static final GetDatafeedsAction INSTANCE = new GetDatafeedsAction(); + public static final String NAME = "cluster:monitor/xpack/ml/datafeeds/get"; + + public static final String ALL = "_all"; + + private GetDatafeedsAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends MasterNodeReadRequest { + + public static final ParseField ALLOW_NO_DATAFEEDS = new ParseField("allow_no_datafeeds"); + + private String datafeedId; + private boolean allowNoDatafeeds = true; + + public Request(String datafeedId) { + this(); + this.datafeedId = ExceptionsHelper.requireNonNull(datafeedId, DatafeedConfig.ID.getPreferredName()); + } + + public Request() { + local(true); + } + + public Request(StreamInput in) throws IOException { + super(in); + datafeedId = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + allowNoDatafeeds = in.readBoolean(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(datafeedId); + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeBoolean(allowNoDatafeeds); + } + } + + public String getDatafeedId() { + return datafeedId; + } + + public boolean allowNoDatafeeds() { + return allowNoDatafeeds; + } + + public void setAllowNoDatafeeds(boolean allowNoDatafeeds) { + this.allowNoDatafeeds = allowNoDatafeeds; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public int hashCode() { + return Objects.hash(datafeedId, allowNoDatafeeds); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(datafeedId, other.datafeedId) && Objects.equals(allowNoDatafeeds, other.allowNoDatafeeds); + } + } + + public static class RequestBuilder extends MasterNodeReadOperationRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, GetDatafeedsAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private QueryPage datafeeds; + + public Response(QueryPage datafeeds) { + this.datafeeds = datafeeds; + } + + public Response() {} + + public QueryPage getResponse() { + return datafeeds; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + datafeeds = new QueryPage<>(in, DatafeedConfig::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + datafeeds.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + datafeeds.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(datafeeds); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(datafeeds, other.datafeeds); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java new file mode 100644 index 0000000000000..4219cb2a3ca8a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java @@ -0,0 +1,287 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class GetDatafeedsStatsAction extends Action { + + public static final GetDatafeedsStatsAction INSTANCE = new GetDatafeedsStatsAction(); + public static final String NAME = "cluster:monitor/xpack/ml/datafeeds/stats/get"; + + public static final String ALL = "_all"; + private static final String STATE = "state"; + + private GetDatafeedsStatsAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends MasterNodeReadRequest { + + public static final ParseField ALLOW_NO_DATAFEEDS = new ParseField("allow_no_datafeeds"); + + private String datafeedId; + private boolean allowNoDatafeeds = true; + + public Request(String datafeedId) { + this.datafeedId = ExceptionsHelper.requireNonNull(datafeedId, DatafeedConfig.ID.getPreferredName()); + } + + public Request() {} + + public Request(StreamInput in) throws IOException { + super(in); + datafeedId = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + allowNoDatafeeds = in.readBoolean(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(datafeedId); + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeBoolean(allowNoDatafeeds); + } + } + + public String getDatafeedId() { + return datafeedId; + } + + public boolean allowNoDatafeeds() { + return allowNoDatafeeds; + } + + public void setAllowNoDatafeeds(boolean allowNoDatafeeds) { + this.allowNoDatafeeds = allowNoDatafeeds; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public int hashCode() { + return Objects.hash(datafeedId, allowNoDatafeeds); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(datafeedId, other.datafeedId) && Objects.equals(allowNoDatafeeds, other.allowNoDatafeeds); + } + } + + public static class RequestBuilder extends MasterNodeReadOperationRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, GetDatafeedsStatsAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + public static class DatafeedStats implements ToXContentObject, Writeable { + + private final String datafeedId; + private final DatafeedState datafeedState; + @Nullable + private DiscoveryNode node; + @Nullable + private String assignmentExplanation; + + public DatafeedStats(String datafeedId, DatafeedState datafeedState, @Nullable DiscoveryNode node, + @Nullable String assignmentExplanation) { + this.datafeedId = Objects.requireNonNull(datafeedId); + this.datafeedState = Objects.requireNonNull(datafeedState); + this.node = node; + this.assignmentExplanation = assignmentExplanation; + } + + DatafeedStats(StreamInput in) throws IOException { + datafeedId = in.readString(); + datafeedState = DatafeedState.fromStream(in); + node = in.readOptionalWriteable(DiscoveryNode::new); + assignmentExplanation = in.readOptionalString(); + } + + public String getDatafeedId() { + return datafeedId; + } + + public DatafeedState getDatafeedState() { + return datafeedState; + } + + public DiscoveryNode getNode() { + return node; + } + + public String getAssignmentExplanation() { + return assignmentExplanation; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DatafeedConfig.ID.getPreferredName(), datafeedId); + builder.field(STATE, datafeedState.toString()); + if (node != null) { + builder.startObject("node"); + builder.field("id", node.getId()); + builder.field("name", node.getName()); + builder.field("ephemeral_id", node.getEphemeralId()); + builder.field("transport_address", node.getAddress().toString()); + + builder.startObject("attributes"); + for (Map.Entry entry : node.getAttributes().entrySet()) { + if (entry.getKey().startsWith("ml.")) { + builder.field(entry.getKey(), entry.getValue()); + } + } + builder.endObject(); + builder.endObject(); + } + if (assignmentExplanation != null) { + builder.field("assignment_explanation", assignmentExplanation); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(datafeedId); + datafeedState.writeTo(out); + out.writeOptionalWriteable(node); + out.writeOptionalString(assignmentExplanation); + } + + @Override + public int hashCode() { + return Objects.hash(datafeedId, datafeedState, node, assignmentExplanation); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + DatafeedStats other = (DatafeedStats) obj; + return Objects.equals(datafeedId, other.datafeedId) && + Objects.equals(this.datafeedState, other.datafeedState) && + Objects.equals(this.node, other.node) && + Objects.equals(this.assignmentExplanation, other.assignmentExplanation); + } + } + + private QueryPage datafeedsStats; + + public Response(QueryPage datafeedsStats) { + this.datafeedsStats = datafeedsStats; + } + + public Response() {} + + public QueryPage getResponse() { + return datafeedsStats; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + datafeedsStats = new QueryPage<>(in, DatafeedStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + datafeedsStats.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + datafeedsStats.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(datafeedsStats); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(datafeedsStats, other.datafeedsStats); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetFiltersAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetFiltersAction.java new file mode 100644 index 0000000000000..9d375ec0dfc7b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetFiltersAction.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + + +public class GetFiltersAction extends Action { + + public static final GetFiltersAction INSTANCE = new GetFiltersAction(); + public static final String NAME = "cluster:admin/xpack/ml/filters/get"; + + private GetFiltersAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest { + + private String filterId; + private PageParams pageParams; + + public Request() { + } + + public void setFilterId(String filterId) { + this.filterId = filterId; + } + + public String getFilterId() { + return filterId; + } + + public PageParams getPageParams() { + return pageParams; + } + + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (pageParams != null && filterId != null) { + validationException = addValidationError("Params [" + PageParams.FROM.getPreferredName() + + ", " + PageParams.SIZE.getPreferredName() + "] are incompatible with [" + + MlFilter.ID.getPreferredName() + "]", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + filterId = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(filterId); + } + + @Override + public int hashCode() { + return Objects.hash(filterId); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(filterId, other.filterId); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements StatusToXContentObject { + + private QueryPage filters; + + public Response(QueryPage filters) { + this.filters = filters; + } + + public Response() { + } + + public QueryPage getFilters() { + return filters; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + filters = new QueryPage<>(in, MlFilter::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + filters.writeTo(out); + } + + @Override + public RestStatus status() { + return RestStatus.OK; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + filters.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(filters); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(filters, other.filters); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersAction.java new file mode 100644 index 0000000000000..39721b0c728f7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersAction.java @@ -0,0 +1,292 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.Influencer; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class GetInfluencersAction +extends Action { + + public static final GetInfluencersAction INSTANCE = new GetInfluencersAction(); + public static final String NAME = "cluster:monitor/xpack/ml/job/results/influencers/get"; + + private GetInfluencersAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField EXCLUDE_INTERIM = new ParseField("exclude_interim"); + public static final ParseField INFLUENCER_SCORE = new ParseField("influencer_score"); + public static final ParseField SORT_FIELD = new ParseField("sort"); + public static final ParseField DESCENDING_SORT = new ParseField("desc"); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareStringOrNull(Request::setStart, START); + PARSER.declareStringOrNull(Request::setEnd, END); + PARSER.declareBoolean(Request::setExcludeInterim, EXCLUDE_INTERIM); + PARSER.declareObject(Request::setPageParams, PageParams.PARSER, PageParams.PAGE); + PARSER.declareDouble(Request::setInfluencerScore, INFLUENCER_SCORE); + PARSER.declareString(Request::setSort, SORT_FIELD); + PARSER.declareBoolean(Request::setDescending, DESCENDING_SORT); + } + + public static Request parseRequest(String jobId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (jobId != null) { + request.jobId = jobId; + } + return request; + } + + private String jobId; + private String start; + private String end; + private boolean excludeInterim = false; + private PageParams pageParams = new PageParams(); + private double influencerScore = 0.0; + private String sort = Influencer.INFLUENCER_SCORE.getPreferredName(); + private boolean descending = true; + + public Request() { + } + + public Request(String jobId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public String getJobId() { + return jobId; + } + + public String getStart() { + return start; + } + + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + public void setEnd(String end) { + this.end = end; + } + + public boolean isDescending() { + return descending; + } + + public void setDescending(boolean descending) { + this.descending = descending; + } + + public boolean isExcludeInterim() { + return excludeInterim; + } + + public void setExcludeInterim(boolean excludeInterim) { + this.excludeInterim = excludeInterim; + } + + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + public PageParams getPageParams() { + return pageParams; + } + + public double getInfluencerScore() { + return influencerScore; + } + + public void setInfluencerScore(double anomalyScoreFilter) { + this.influencerScore = anomalyScoreFilter; + } + + public String getSort() { + return sort; + } + + public void setSort(String sort) { + this.sort = ExceptionsHelper.requireNonNull(sort, SORT_FIELD.getPreferredName()); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + excludeInterim = in.readBoolean(); + pageParams = new PageParams(in); + start = in.readOptionalString(); + end = in.readOptionalString(); + sort = in.readOptionalString(); + descending = in.readBoolean(); + influencerScore = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + out.writeBoolean(excludeInterim); + pageParams.writeTo(out); + out.writeOptionalString(start); + out.writeOptionalString(end); + out.writeOptionalString(sort); + out.writeBoolean(descending); + out.writeDouble(influencerScore); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(EXCLUDE_INTERIM.getPreferredName(), excludeInterim); + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + builder.field(START.getPreferredName(), start); + builder.field(END.getPreferredName(), end); + builder.field(SORT_FIELD.getPreferredName(), sort); + builder.field(DESCENDING_SORT.getPreferredName(), descending); + builder.field(INFLUENCER_SCORE.getPreferredName(), influencerScore); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, excludeInterim, pageParams, start, end, sort, descending, influencerScore); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(jobId, other.jobId) && Objects.equals(start, other.start) + && Objects.equals(end, other.end) + && Objects.equals(excludeInterim, other.excludeInterim) + && Objects.equals(pageParams, other.pageParams) + && Objects.equals(influencerScore, other.influencerScore) + && Objects.equals(descending, other.descending) + && Objects.equals(sort, other.sort); + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private QueryPage influencers; + + public Response() { + } + + public Response(QueryPage influencers) { + this.influencers = influencers; + } + + public QueryPage getInfluencers() { + return influencers; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + influencers = new QueryPage<>(in, Influencer::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + influencers.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + influencers.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(influencers); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(influencers, other.influencers); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java new file mode 100644 index 0000000000000..f315783330e7d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class GetJobsAction extends Action { + + public static final GetJobsAction INSTANCE = new GetJobsAction(); + public static final String NAME = "cluster:monitor/xpack/ml/job/get"; + + private GetJobsAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends MasterNodeReadRequest { + + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + private String jobId; + private boolean allowNoJobs = true; + + public Request(String jobId) { + this(); + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public Request() { + local(true); + } + + public Request(StreamInput in) throws IOException { + super(in); + jobId = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + allowNoJobs = in.readBoolean(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeBoolean(allowNoJobs); + } + } + + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + public String getJobId() { + return jobId; + } + + public boolean allowNoJobs() { + return allowNoJobs; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, allowNoJobs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(jobId, other.jobId) && Objects.equals(allowNoJobs, other.allowNoJobs); + } + } + + public static class RequestBuilder extends MasterNodeReadOperationRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, GetJobsAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private QueryPage jobs; + + public Response(QueryPage jobs) { + this.jobs = jobs; + } + + public Response() {} + + public QueryPage getResponse() { + return jobs; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobs = new QueryPage<>(in, Job::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + jobs.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + jobs.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(jobs, other.jobs); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java new file mode 100644 index 0000000000000..b53f61e35fcf3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -0,0 +1,357 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobState; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class GetJobsStatsAction extends Action { + + public static final GetJobsStatsAction INSTANCE = new GetJobsStatsAction(); + public static final String NAME = "cluster:monitor/xpack/ml/job/stats/get"; + + private static final String DATA_COUNTS = "data_counts"; + private static final String MODEL_SIZE_STATS = "model_size_stats"; + private static final String STATE = "state"; + private static final String NODE = "node"; + + private GetJobsStatsAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends BaseTasksRequest { + + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + private String jobId; + private boolean allowNoJobs = true; + + // used internally to expand _all jobid to encapsulate all jobs in cluster: + private List expandedJobsIds; + + public Request(String jobId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + this.expandedJobsIds = Collections.singletonList(jobId); + } + + public Request() {} + + public List getExpandedJobsIds() { return expandedJobsIds; } + + public void setExpandedJobsIds(List expandedJobsIds) { this.expandedJobsIds = expandedJobsIds; } + + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + public String getJobId() { + return jobId; + } + + public boolean allowNoJobs() { + return allowNoJobs; + } + + @Override + public boolean match(Task task) { + return jobId.equals(MetaData.ALL) || OpenJobAction.JobTaskMatcher.match(task, jobId); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + expandedJobsIds = in.readList(StreamInput::readString); + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + allowNoJobs = in.readBoolean(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + out.writeStringList(expandedJobsIds); + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeBoolean(allowNoJobs); + } + } + + @Override + public int hashCode() { + return Objects.hash(jobId, allowNoJobs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(jobId, other.jobId) && Objects.equals(allowNoJobs, other.allowNoJobs); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, GetJobsStatsAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements ToXContentObject { + + public static class JobStats implements ToXContentObject, Writeable { + private final String jobId; + private DataCounts dataCounts; + @Nullable + private ModelSizeStats modelSizeStats; + @Nullable + private TimeValue openTime; + private JobState state; + @Nullable + private DiscoveryNode node; + @Nullable + private String assignmentExplanation; + + public JobStats(String jobId, DataCounts dataCounts, @Nullable ModelSizeStats modelSizeStats, JobState state, + @Nullable DiscoveryNode node, @Nullable String assignmentExplanation, @Nullable TimeValue opentime) { + this.jobId = Objects.requireNonNull(jobId); + this.dataCounts = Objects.requireNonNull(dataCounts); + this.modelSizeStats = modelSizeStats; + this.state = Objects.requireNonNull(state); + this.node = node; + this.assignmentExplanation = assignmentExplanation; + this.openTime = opentime; + } + + public JobStats(StreamInput in) throws IOException { + jobId = in.readString(); + dataCounts = new DataCounts(in); + modelSizeStats = in.readOptionalWriteable(ModelSizeStats::new); + state = JobState.fromStream(in); + node = in.readOptionalWriteable(DiscoveryNode::new); + assignmentExplanation = in.readOptionalString(); + openTime = in.readOptionalTimeValue(); + } + + public String getJobId() { + return jobId; + } + + public DataCounts getDataCounts() { + return dataCounts; + } + + public ModelSizeStats getModelSizeStats() { + return modelSizeStats; + } + + public JobState getState() { + return state; + } + + public DiscoveryNode getNode() { + return node; + } + + public String getAssignmentExplanation() { + return assignmentExplanation; + } + + public TimeValue getOpenTime() { + return openTime; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // TODO: Have callers wrap the content with an object as they choose rather than forcing it upon them + builder.startObject(); + { + toUnwrappedXContent(builder); + } + return builder.endObject(); + } + + public XContentBuilder toUnwrappedXContent(XContentBuilder builder) throws IOException { + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(DATA_COUNTS, dataCounts); + if (modelSizeStats != null) { + builder.field(MODEL_SIZE_STATS, modelSizeStats); + } + builder.field(STATE, state.toString()); + if (node != null) { + builder.startObject(NODE); + builder.field("id", node.getId()); + builder.field("name", node.getName()); + builder.field("ephemeral_id", node.getEphemeralId()); + builder.field("transport_address", node.getAddress().toString()); + + builder.startObject("attributes"); + for (Map.Entry entry : node.getAttributes().entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + builder.endObject(); + builder.endObject(); + } + if (assignmentExplanation != null) { + builder.field("assignment_explanation", assignmentExplanation); + } + if (openTime != null) { + builder.field("open_time", openTime.getStringRep()); + } + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + dataCounts.writeTo(out); + out.writeOptionalWriteable(modelSizeStats); + state.writeTo(out); + out.writeOptionalWriteable(node); + out.writeOptionalString(assignmentExplanation); + out.writeOptionalTimeValue(openTime); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, dataCounts, modelSizeStats, state, node, assignmentExplanation, openTime); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + JobStats other = (JobStats) obj; + return Objects.equals(jobId, other.jobId) + && Objects.equals(this.dataCounts, other.dataCounts) + && Objects.equals(this.modelSizeStats, other.modelSizeStats) + && Objects.equals(this.state, other.state) + && Objects.equals(this.node, other.node) + && Objects.equals(this.assignmentExplanation, other.assignmentExplanation) + && Objects.equals(this.openTime, other.openTime); + } + } + + private QueryPage jobsStats; + + public Response(QueryPage jobsStats) { + super(Collections.emptyList(), Collections.emptyList()); + this.jobsStats = jobsStats; + } + + public Response(List taskFailures, List nodeFailures, + QueryPage jobsStats) { + super(taskFailures, nodeFailures); + this.jobsStats = jobsStats; + } + + public Response() { + super(Collections.emptyList(), Collections.emptyList()); + } + + public QueryPage getResponse() { + return jobsStats; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobsStats = new QueryPage<>(in, JobStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + jobsStats.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject();; + jobsStats.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobsStats); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(jobsStats, other.jobsStats); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsAction.java new file mode 100644 index 0000000000000..382b60a07d58d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsAction.java @@ -0,0 +1,290 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class GetModelSnapshotsAction +extends Action { + + public static final GetModelSnapshotsAction INSTANCE = new GetModelSnapshotsAction(); + public static final String NAME = "cluster:monitor/xpack/ml/job/model_snapshots/get"; + + private GetModelSnapshotsAction() { + super(NAME); + } + + @Override + public GetModelSnapshotsAction.RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public GetModelSnapshotsAction.Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + public static final ParseField SNAPSHOT_ID = new ParseField("snapshot_id"); + public static final ParseField SORT = new ParseField("sort"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField DESC = new ParseField("desc"); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareString((request, snapshotId) -> request.snapshotId = snapshotId, SNAPSHOT_ID); + PARSER.declareString(Request::setStart, START); + PARSER.declareString(Request::setEnd, END); + PARSER.declareString(Request::setSort, SORT); + PARSER.declareBoolean(Request::setDescOrder, DESC); + PARSER.declareObject(Request::setPageParams, PageParams.PARSER, PageParams.PAGE); + } + + public static Request parseRequest(String jobId, String snapshotId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (jobId != null) { + request.jobId = jobId; + } + if (snapshotId != null) { + request.snapshotId = snapshotId; + } + return request; + } + + private String jobId; + private String snapshotId; + private String sort; + private String start; + private String end; + private boolean desc = true; + private PageParams pageParams = new PageParams(); + + public Request() { + } + + public Request(String jobId, String snapshotId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + this.snapshotId = snapshotId; + } + + public String getJobId() { + return jobId; + } + + @Nullable + public String getSnapshotId() { + return snapshotId; + } + + @Nullable + public String getSort() { + return sort; + } + + public void setSort(String sort) { + this.sort = sort; + } + + public boolean getDescOrder() { + return desc; + } + + public void setDescOrder(boolean desc) { + this.desc = desc; + } + + public PageParams getPageParams() { + return pageParams; + } + + public void setPageParams(PageParams pageParams) { + this.pageParams = ExceptionsHelper.requireNonNull(pageParams, PageParams.PAGE.getPreferredName()); + } + + @Nullable + public String getStart() { + return start; + } + + public void setStart(String start) { + this.start = ExceptionsHelper.requireNonNull(start, START.getPreferredName()); + } + + @Nullable + public String getEnd() { + return end; + } + + public void setEnd(String end) { + this.end = ExceptionsHelper.requireNonNull(end, END.getPreferredName()); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + snapshotId = in.readOptionalString(); + sort = in.readOptionalString(); + start = in.readOptionalString(); + end = in.readOptionalString(); + desc = in.readBoolean(); + pageParams = new PageParams(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + out.writeOptionalString(snapshotId); + out.writeOptionalString(sort); + out.writeOptionalString(start); + out.writeOptionalString(end); + out.writeBoolean(desc); + pageParams.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (snapshotId != null) { + builder.field(SNAPSHOT_ID.getPreferredName(), snapshotId); + } + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (sort != null) { + builder.field(SORT.getPreferredName(), sort); + } + builder.field(DESC.getPreferredName(), desc); + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, snapshotId, start, end, sort, desc); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(jobId, other.jobId) + && Objects.equals(snapshotId, other.snapshotId) + && Objects.equals(start, other.start) + && Objects.equals(end, other.end) + && Objects.equals(sort, other.sort) + && Objects.equals(desc, other.desc); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private QueryPage page; + + public Response(QueryPage page) { + this.page = page; + } + + public Response() { + } + + public QueryPage getPage() { + return page; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + page = new QueryPage<>(in, ModelSnapshot::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + page.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + page.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(page); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(page, other.page); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, GetModelSnapshotsAction action) { + super(client, action, new Request()); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java new file mode 100644 index 0000000000000..ec1d5484255ed --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java @@ -0,0 +1,349 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.joda.DateMathParser; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.job.results.OverallBucket; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Collections; +import java.util.Objects; +import java.util.function.LongSupplier; + +/** + *

+ * This action returns summarized bucket results over multiple jobs. + * Overall buckets have the span of the largest job's bucket_span. + * Their score is calculated by finding the max anomaly score per job + * and then averaging the top N. + *

+ *

+ * Overall buckets can be optionally aggregated into larger intervals + * by setting the bucket_span parameter. When that is the case, the + * overall_score is the max of the overall buckets that are within + * the interval. + *

+ */ +public class GetOverallBucketsAction + extends Action { + + public static final GetOverallBucketsAction INSTANCE = new GetOverallBucketsAction(); + public static final String NAME = "cluster:monitor/xpack/ml/job/results/overall_buckets/get"; + + private GetOverallBucketsAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + public static final ParseField TOP_N = new ParseField("top_n"); + public static final ParseField BUCKET_SPAN = new ParseField("bucket_span"); + public static final ParseField OVERALL_SCORE = new ParseField("overall_score"); + public static final ParseField EXCLUDE_INTERIM = new ParseField("exclude_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareInt(Request::setTopN, TOP_N); + PARSER.declareString(Request::setBucketSpan, BUCKET_SPAN); + PARSER.declareDouble(Request::setOverallScore, OVERALL_SCORE); + PARSER.declareBoolean(Request::setExcludeInterim, EXCLUDE_INTERIM); + PARSER.declareString((request, startTime) -> request.setStart(parseDateOrThrow( + startTime, START, System::currentTimeMillis)), START); + PARSER.declareString((request, endTime) -> request.setEnd(parseDateOrThrow( + endTime, END, System::currentTimeMillis)), END); + PARSER.declareBoolean(Request::setAllowNoJobs, ALLOW_NO_JOBS); + } + + static long parseDateOrThrow(String date, ParseField paramName, LongSupplier now) { + DateMathParser dateMathParser = new DateMathParser(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER); + + try { + return dateMathParser.parse(date, now); + } catch (Exception e) { + String msg = Messages.getMessage(Messages.REST_INVALID_DATETIME_PARAMS, paramName.getPreferredName(), date); + throw new ElasticsearchParseException(msg, e); + } + } + + public static Request parseRequest(String jobId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (jobId != null) { + request.jobId = jobId; + } + return request; + } + + private String jobId; + private int topN = 1; + private TimeValue bucketSpan; + private double overallScore = 0.0; + private boolean excludeInterim = false; + private Long start; + private Long end; + private boolean allowNoJobs = true; + + public Request() { + } + + public Request(String jobId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public String getJobId() { + return jobId; + } + + public int getTopN() { + return topN; + } + + public void setTopN(int topN) { + if (topN <= 0) { + throw new IllegalArgumentException("[topN] parameter must be positive, found [" + topN + "]"); + } + this.topN = topN; + } + + public TimeValue getBucketSpan() { + return bucketSpan; + } + + public void setBucketSpan(TimeValue bucketSpan) { + this.bucketSpan = bucketSpan; + } + + public void setBucketSpan(String bucketSpan) { + this.bucketSpan = TimeValue.parseTimeValue(bucketSpan, BUCKET_SPAN.getPreferredName()); + } + + public double getOverallScore() { + return overallScore; + } + + public void setOverallScore(double overallScore) { + this.overallScore = overallScore; + } + + public boolean isExcludeInterim() { + return excludeInterim; + } + + public void setExcludeInterim(boolean excludeInterim) { + this.excludeInterim = excludeInterim; + } + + public Long getStart() { + return start; + } + + public void setStart(Long start) { + this.start = start; + } + + public void setStart(String start) { + setStart(parseDateOrThrow(start, START, System::currentTimeMillis)); + } + + public Long getEnd() { + return end; + } + + public void setEnd(Long end) { + this.end = end; + } + + public void setEnd(String end) { + setEnd(parseDateOrThrow(end, END, System::currentTimeMillis)); + } + + public boolean allowNoJobs() { + return allowNoJobs; + } + + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + topN = in.readVInt(); + bucketSpan = in.readOptionalTimeValue(); + overallScore = in.readDouble(); + excludeInterim = in.readBoolean(); + start = in.readOptionalLong(); + end = in.readOptionalLong(); + allowNoJobs = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + out.writeVInt(topN); + out.writeOptionalTimeValue(bucketSpan); + out.writeDouble(overallScore); + out.writeBoolean(excludeInterim); + out.writeOptionalLong(start); + out.writeOptionalLong(end); + out.writeBoolean(allowNoJobs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(TOP_N.getPreferredName(), topN); + if (bucketSpan != null) { + builder.field(BUCKET_SPAN.getPreferredName(), bucketSpan.getStringRep()); + } + builder.field(OVERALL_SCORE.getPreferredName(), overallScore); + builder.field(EXCLUDE_INTERIM.getPreferredName(), excludeInterim); + if (start != null) { + builder.field(START.getPreferredName(), String.valueOf(start)); + } + if (end != null) { + builder.field(END.getPreferredName(), String.valueOf(end)); + } + builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, topN, bucketSpan, overallScore, excludeInterim, start, end, allowNoJobs); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (getClass() != other.getClass()) { + return false; + } + Request that = (Request) other; + return Objects.equals(jobId, that.jobId) && + this.topN == that.topN && + Objects.equals(bucketSpan, that.bucketSpan) && + this.excludeInterim == that.excludeInterim && + this.overallScore == that.overallScore && + Objects.equals(start, that.start) && + Objects.equals(end, that.end) && + this.allowNoJobs == that.allowNoJobs; + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private QueryPage overallBuckets; + + public Response() { + overallBuckets = new QueryPage<>(Collections.emptyList(), 0, OverallBucket.RESULTS_FIELD); + } + + public Response(QueryPage overallBuckets) { + this.overallBuckets = overallBuckets; + } + + public QueryPage getOverallBuckets() { + return overallBuckets; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + overallBuckets = new QueryPage<>(in, OverallBucket::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + overallBuckets.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + overallBuckets.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(overallBuckets); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(overallBuckets, other.overallBuckets); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java new file mode 100644 index 0000000000000..586a40af81a29 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetRecordsAction.java @@ -0,0 +1,292 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; +import org.elasticsearch.xpack.core.ml.job.results.Influencer; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class GetRecordsAction extends Action { + + public static final GetRecordsAction INSTANCE = new GetRecordsAction(); + public static final String NAME = "cluster:monitor/xpack/ml/job/results/records/get"; + + private GetRecordsAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField EXCLUDE_INTERIM = new ParseField("exclude_interim"); + public static final ParseField RECORD_SCORE_FILTER = new ParseField("record_score"); + public static final ParseField SORT = new ParseField("sort"); + public static final ParseField DESCENDING = new ParseField("desc"); + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareStringOrNull(Request::setStart, START); + PARSER.declareStringOrNull(Request::setEnd, END); + PARSER.declareString(Request::setSort, SORT); + PARSER.declareBoolean(Request::setDescending, DESCENDING); + PARSER.declareBoolean(Request::setExcludeInterim, EXCLUDE_INTERIM); + PARSER.declareObject(Request::setPageParams, PageParams.PARSER, PageParams.PAGE); + PARSER.declareDouble(Request::setRecordScore, RECORD_SCORE_FILTER); + } + + public static Request parseRequest(String jobId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (jobId != null) { + request.jobId = jobId; + } + return request; + } + + private String jobId; + private String start; + private String end; + private boolean excludeInterim = false; + private PageParams pageParams = new PageParams(); + private double recordScoreFilter = 0.0; + private String sort = Influencer.INFLUENCER_SCORE.getPreferredName(); + private boolean descending = true; + + public Request() { + } + + public Request(String jobId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public String getJobId() { + return jobId; + } + + public String getStart() { + return start; + } + + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + public void setEnd(String end) { + this.end = end; + } + + public boolean isDescending() { + return descending; + } + + public void setDescending(boolean descending) { + this.descending = descending; + } + + public boolean isExcludeInterim() { + return excludeInterim; + } + + public void setExcludeInterim(boolean excludeInterim) { + this.excludeInterim = excludeInterim; + } + + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + public PageParams getPageParams() { + return pageParams; + } + + public double getRecordScoreFilter() { + return recordScoreFilter; + } + + public void setRecordScore(double recordScoreFilter) { + this.recordScoreFilter = recordScoreFilter; + } + + public String getSort() { + return sort; + } + + public void setSort(String sort) { + this.sort = ExceptionsHelper.requireNonNull(sort, SORT.getPreferredName()); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + excludeInterim = in.readBoolean(); + pageParams = new PageParams(in); + start = in.readOptionalString(); + end = in.readOptionalString(); + sort = in.readOptionalString(); + descending = in.readBoolean(); + recordScoreFilter = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + out.writeBoolean(excludeInterim); + pageParams.writeTo(out); + out.writeOptionalString(start); + out.writeOptionalString(end); + out.writeOptionalString(sort); + out.writeBoolean(descending); + out.writeDouble(recordScoreFilter); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(START.getPreferredName(), start); + builder.field(END.getPreferredName(), end); + builder.field(SORT.getPreferredName(), sort); + builder.field(DESCENDING.getPreferredName(), descending); + builder.field(RECORD_SCORE_FILTER.getPreferredName(), recordScoreFilter); + builder.field(EXCLUDE_INTERIM.getPreferredName(), excludeInterim); + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, start, end, sort, descending, recordScoreFilter, excludeInterim, pageParams); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(sort, other.sort) && + Objects.equals(descending, other.descending) && + Objects.equals(recordScoreFilter, other.recordScoreFilter) && + Objects.equals(excludeInterim, other.excludeInterim) && + Objects.equals(pageParams, other.pageParams); + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private QueryPage records; + + public Response() { + } + + public Response(QueryPage records) { + this.records = records; + } + + public QueryPage getRecords() { + return records; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + records = new QueryPage<>(in, AnomalyRecord::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + records.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + records.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(records); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(records, other.records); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java new file mode 100644 index 0000000000000..98ca6c29f4b0b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/IsolateDatafeedAction.java @@ -0,0 +1,181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.ml.MLMetadataField; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +/** + * An internal action that isolates a datafeed. + * Datafeed isolation is effectively disconnecting a running datafeed task + * from its job, i.e. even though the datafeed performs a search, the retrieved + * data is not sent to the job, etc. As stopping a datafeed cannot always happen + * instantaneously (e.g. cannot cancel an ongoing search), isolating a datafeed + * task ensures the current datafeed task can complete inconsequentially while + * the datafeed persistent task may be stopped or reassigned on another node. + */ +public class IsolateDatafeedAction + extends Action { + + public static final IsolateDatafeedAction INSTANCE = new IsolateDatafeedAction(); + public static final String NAME = "cluster:internal/xpack/ml/datafeed/isolate"; + + private IsolateDatafeedAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends BaseTasksRequest implements ToXContentObject { + + public static ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, datafeedId) -> request.datafeedId = datafeedId, DatafeedConfig.ID); + } + + public static Request fromXContent(XContentParser parser) { + return parseRequest(null, parser); + } + + public static Request parseRequest(String datafeedId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (datafeedId != null) { + request.datafeedId = datafeedId; + } + return request; + } + + private String datafeedId; + + public Request(String datafeedId) { + this.datafeedId = ExceptionsHelper.requireNonNull(datafeedId, DatafeedConfig.ID.getPreferredName()); + } + + public Request() { + } + + public String getDatafeedId() { + return datafeedId; + } + + @Override + public boolean match(Task task) { + String expectedDescription = MLMetadataField.datafeedTaskId(datafeedId); + if (task instanceof StartDatafeedAction.DatafeedTaskMatcher && expectedDescription.equals(task.getDescription())){ + return true; + } + return false; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + datafeedId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(datafeedId); + } + + @Override + public int hashCode() { + return Objects.hash(datafeedId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DatafeedConfig.ID.getPreferredName(), datafeedId); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(datafeedId, other.datafeedId); + } + } + + public static class Response extends BaseTasksResponse implements Writeable { + + private boolean isolated; + + public Response(boolean isolated) { + super(null, null); + this.isolated = isolated; + } + + public Response(StreamInput in) throws IOException { + super(null, null); + readFrom(in); + } + + public Response() { + super(null, null); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + isolated = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(isolated); + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, IsolateDatafeedAction action) { + super(client, action, new Request()); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/JobTaskRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/JobTaskRequest.java new file mode 100644 index 0000000000000..adc84b2cf46d8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/JobTaskRequest.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; + +public class JobTaskRequest> extends BaseTasksRequest { + + String jobId; + + JobTaskRequest() { + } + + JobTaskRequest(String jobId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public String getJobId() { + return jobId; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + } + + @Override + public boolean match(Task task) { + return OpenJobAction.JobTaskMatcher.match(task, jobId); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/KillProcessAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/KillProcessAction.java new file mode 100644 index 0000000000000..48ee4432ff015 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/KillProcessAction.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Objects; + +public class KillProcessAction extends Action { + + public static final KillProcessAction INSTANCE = new KillProcessAction(); + public static final String NAME = "cluster:internal/xpack/ml/job/kill/process"; + + private KillProcessAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, KillProcessAction action) { + super(client, action, new Request()); + } + } + + public static class Request extends JobTaskRequest { + + public Request(String jobId) { + super(jobId); + } + + public Request() { + super(); + } + } + + public static class Response extends BaseTasksResponse implements Writeable { + + private boolean killed; + + public Response() { + super(null, null); + } + + public Response(StreamInput in) throws IOException { + super(null, null); + readFrom(in); + } + + public Response(boolean killed) { + super(null, null); + this.killed = killed; + } + + public boolean isKilled() { + return killed; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + killed = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(killed); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return killed == response.killed; + } + + @Override + public int hashCode() { + return Objects.hash(killed); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java new file mode 100644 index 0000000000000..53a0758bcf807 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlInfoAction.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +public class MlInfoAction extends Action { + + public static final MlInfoAction INSTANCE = new MlInfoAction(); + public static final String NAME = "cluster:monitor/xpack/ml/info/get"; + + private MlInfoAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest { + + public Request() { + super(); + } + + public Request(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, MlInfoAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private Map info; + + public Response(Map info) { + this.info = info; + } + + public Response() { + this.info = Collections.emptyMap(); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + info = in.readMap(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(info); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.map(info); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(info); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(info, other.info); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java new file mode 100644 index 0000000000000..ffe82f2abe58d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java @@ -0,0 +1,296 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.ml.MachineLearningField; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.persistent.PersistentTaskParams; + +import java.io.IOException; +import java.util.Objects; + +public class OpenJobAction extends Action { + + public static final OpenJobAction INSTANCE = new OpenJobAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/open"; + public static final String TASK_NAME = "xpack/ml/job"; + + private OpenJobAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends MasterNodeRequest implements ToXContentObject { + + public static Request fromXContent(XContentParser parser) { + return parseRequest(null, parser); + } + + public static Request parseRequest(String jobId, XContentParser parser) { + JobParams jobParams = JobParams.PARSER.apply(parser, null); + if (jobId != null) { + jobParams.jobId = jobId; + } + return new Request(jobParams); + } + + private JobParams jobParams; + + public Request(JobParams jobParams) { + this.jobParams = jobParams; + } + + public Request(String jobId) { + this.jobParams = new JobParams(jobId); + } + + public Request(StreamInput in) throws IOException { + readFrom(in); + } + + public Request() { + } + + public JobParams getJobParams() { + return jobParams; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobParams = new JobParams(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + jobParams.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + jobParams.toXContent(builder, params); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobParams); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + OpenJobAction.Request other = (OpenJobAction.Request) obj; + return Objects.equals(jobParams, other.jobParams); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + public static class JobParams implements PersistentTaskParams { + + /** TODO Remove in 7.0.0 */ + public static final ParseField IGNORE_DOWNTIME = new ParseField("ignore_downtime"); + + public static final ParseField TIMEOUT = new ParseField("timeout"); + public static ObjectParser PARSER = new ObjectParser<>(TASK_NAME, JobParams::new); + + static { + PARSER.declareString(JobParams::setJobId, Job.ID); + PARSER.declareBoolean((p, v) -> {}, IGNORE_DOWNTIME); + PARSER.declareString((params, val) -> + params.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); + } + + public static JobParams fromXContent(XContentParser parser) { + return parseRequest(null, parser); + } + + public static JobParams parseRequest(String jobId, XContentParser parser) { + JobParams params = PARSER.apply(parser, null); + if (jobId != null) { + params.jobId = jobId; + } + return params; + } + + private String jobId; + // A big state can take a while to restore. For symmetry with the _close endpoint any + // changes here should be reflected there too. + private TimeValue timeout = MachineLearningField.STATE_PERSIST_RESTORE_TIMEOUT; + + JobParams() { + } + + public JobParams(String jobId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public JobParams(StreamInput in) throws IOException { + jobId = in.readString(); + if (in.getVersion().onOrBefore(Version.V_5_5_0)) { + // Read `ignoreDowntime` + in.readBoolean(); + } + timeout = TimeValue.timeValueMillis(in.readVLong()); + } + + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public TimeValue getTimeout() { + return timeout; + } + + public void setTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + @Override + public String getWriteableName() { + return TASK_NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + if (out.getVersion().onOrBefore(Version.V_5_5_0)) { + // Write `ignoreDowntime` - true by default + out.writeBoolean(true); + } + out.writeVLong(timeout.millis()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep()); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, timeout); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + OpenJobAction.JobParams other = (OpenJobAction.JobParams) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(timeout, other.timeout); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + public static class Response extends AcknowledgedResponse { + public Response() { + super(); + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + writeAcknowledged(out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AcknowledgedResponse that = (AcknowledgedResponse) o; + return isAcknowledged() == that.isAcknowledged(); + } + + @Override + public int hashCode() { + return Objects.hash(isAcknowledged()); + } + + } + + public interface JobTaskMatcher { + + static boolean match(Task task, String expectedJobId) { + String expectedDescription = "job-" + expectedJobId; + return task instanceof JobTaskMatcher && expectedDescription.equals(task.getDescription()); + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, OpenJobAction action) { + super(client, action, new Request()); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PersistJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PersistJobAction.java new file mode 100644 index 0000000000000..71f65051464b2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PersistJobAction.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Objects; + +public class PersistJobAction extends Action { + + public static final PersistJobAction INSTANCE = new PersistJobAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/persist"; + + private PersistJobAction() { + super(NAME); + } + + @Override + public PersistJobAction.RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends JobTaskRequest { + + public Request() { + } + + public Request(String jobId) { + super(jobId); + } + + public boolean isBackGround() { + return true; + } + + public boolean isForeground() { + return !isBackGround(); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + // isBackground for fwc + in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + // isBackground for fwc + out.writeBoolean(true); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, isBackGround()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + PersistJobAction.Request other = (PersistJobAction.Request) obj; + return Objects.equals(jobId, other.jobId) && this.isBackGround() == other.isBackGround(); + } + } + + public static class Response extends BaseTasksResponse implements Writeable { + + boolean persisted; + + public Response() { + super(null, null); + } + + public Response(boolean persisted) { + super(null, null); + this.persisted = persisted; + } + + public boolean isPersisted() { + return persisted; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + persisted = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(persisted); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response that = (Response) o; + return this.persisted == that.persisted; + } + + @Override + public int hashCode() { + return Objects.hash(persisted); + } + } + + static class RequestBuilder extends ActionRequestBuilder { + RequestBuilder(ElasticsearchClient client, PersistJobAction action) { + super(client, action, new PersistJobAction.Request()); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java new file mode 100644 index 0000000000000..82e9072751ee6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java @@ -0,0 +1,200 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.calendars.Calendar; +import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +public class PostCalendarEventsAction extends Action { + public static final PostCalendarEventsAction INSTANCE = new PostCalendarEventsAction(); + public static final String NAME = "cluster:admin/xpack/ml/calendars/events/post"; + + public static final ParseField EVENTS = new ParseField("events"); + + private PostCalendarEventsAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest { + + private static final ObjectParser, Void> PARSER = new ObjectParser<>(NAME, ArrayList::new); + + static { + PARSER.declareObjectArray(List::addAll, (p, c) -> ScheduledEvent.STRICT_PARSER.apply(p, null), ScheduledEvent.RESULTS_FIELD); + } + + public static Request parseRequest(String calendarId, XContentParser parser) throws IOException { + List events = PARSER.apply(parser, null); + + for (ScheduledEvent.Builder event : events) { + if (event.getCalendarId() != null && event.getCalendarId().equals(calendarId) == false) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.INCONSISTENT_ID, + Calendar.ID.getPreferredName(), event.getCalendarId(), calendarId)); + } + // Set the calendar Id in case it is null + event.calendarId(calendarId); + } + + return new Request(calendarId, events.stream().map(ScheduledEvent.Builder::build).collect(Collectors.toList())); + } + + private String calendarId; + private List scheduledEvents; + + public Request() { + } + + public Request(String calendarId, List scheduledEvents) { + this.calendarId = ExceptionsHelper.requireNonNull(calendarId, Calendar.ID.getPreferredName()); + this.scheduledEvents = ExceptionsHelper.requireNonNull(scheduledEvents, EVENTS.getPreferredName()); + + if (scheduledEvents.isEmpty()) { + throw ExceptionsHelper.badRequestException("At least 1 event is required"); + } + } + + public String getCalendarId() { + return calendarId; + } + + public List getScheduledEvents() { + return scheduledEvents; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + calendarId = in.readString(); + scheduledEvents = in.readList(ScheduledEvent::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(calendarId); + out.writeList(scheduledEvents); + } + + @Override + public int hashCode() { + return Objects.hash(calendarId, scheduledEvents); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(calendarId, other.calendarId) && Objects.equals(scheduledEvents, other.scheduledEvents); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private List scheduledEvents; + + public Response() { + } + + public Response(List scheduledEvents) { + this.scheduledEvents = scheduledEvents; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + if (in.getVersion().before(Version.V_6_3_0)) { + //the acknowledged flag was removed + in.readBoolean(); + } + in.readList(ScheduledEvent::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (out.getVersion().before(Version.V_6_3_0)) { + //the acknowledged flag is no longer supported + out.writeBoolean(true); + } + out.writeList(scheduledEvents); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(EVENTS.getPreferredName(), scheduledEvents); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(scheduledEvents); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(scheduledEvents, other.scheduledEvents); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostDataAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostDataAction.java new file mode 100644 index 0000000000000..9ba1bf574db8b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostDataAction.java @@ -0,0 +1,225 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; + +import java.io.IOException; +import java.util.Objects; + +public class PostDataAction extends Action { + + public static final PostDataAction INSTANCE = new PostDataAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/data/post"; + + private PostDataAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, PostDataAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements StatusToXContentObject, Writeable { + + private DataCounts dataCounts; + + public Response(String jobId) { + super(null, null); + dataCounts = new DataCounts(jobId); + } + + public Response() { + super(null, null); + } + + public Response(DataCounts counts) { + super(null, null); + this.dataCounts = counts; + } + + public DataCounts getDataCounts() { + return dataCounts; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + dataCounts = new DataCounts(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + dataCounts.writeTo(out); + } + + @Override + public RestStatus status() { + return RestStatus.ACCEPTED; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + dataCounts.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hashCode(dataCounts); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + + return Objects.equals(dataCounts, other.dataCounts); + + } + } + + public static class Request extends JobTaskRequest { + + public static final ParseField RESET_START = new ParseField("reset_start"); + public static final ParseField RESET_END = new ParseField("reset_end"); + + private String resetStart = ""; + private String resetEnd = ""; + private DataDescription dataDescription; + private XContentType xContentType; + private BytesReference content; + + public Request() { + } + + public Request(String jobId) { + super(jobId); + } + + public String getResetStart() { + return resetStart; + } + + public void setResetStart(String resetStart) { + this.resetStart = resetStart; + } + + public String getResetEnd() { + return resetEnd; + } + + public void setResetEnd(String resetEnd) { + this.resetEnd = resetEnd; + } + + public DataDescription getDataDescription() { + return dataDescription; + } + + public void setDataDescription(DataDescription dataDescription) { + this.dataDescription = dataDescription; + } + + public BytesReference getContent() { return content; } + + public XContentType getXContentType() { + return xContentType; + } + + public void setContent(BytesReference content, XContentType xContentType) { + this.content = content; + this.xContentType = xContentType; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + resetStart = in.readOptionalString(); + resetEnd = in.readOptionalString(); + dataDescription = in.readOptionalWriteable(DataDescription::new); + content = in.readBytesReference(); + if (in.readBoolean()) { + xContentType = in.readEnum(XContentType.class); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(resetStart); + out.writeOptionalString(resetEnd); + out.writeOptionalWriteable(dataDescription); + out.writeBytesReference(content); + boolean hasXContentType = xContentType != null; + out.writeBoolean(hasXContentType); + if (hasXContentType) { + out.writeEnum(xContentType); + } + } + + @Override + public int hashCode() { + // content stream not included + return Objects.hash(jobId, resetStart, resetEnd, dataDescription, xContentType); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + + // content stream not included + return Objects.equals(jobId, other.jobId) && + Objects.equals(resetStart, other.resetStart) && + Objects.equals(resetEnd, other.resetEnd) && + Objects.equals(dataDescription, other.dataDescription) && + Objects.equals(xContentType, other.xContentType); + } + } + + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java new file mode 100644 index 0000000000000..15fbe43754831 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Objects; + +public class PreviewDatafeedAction extends Action { + + public static final PreviewDatafeedAction INSTANCE = new PreviewDatafeedAction(); + public static final String NAME = "cluster:admin/xpack/ml/datafeeds/preview"; + + private PreviewDatafeedAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + private String datafeedId; + + public Request() { + } + + public Request(String datafeedId) { + setDatafeedId(datafeedId); + } + + public String getDatafeedId() { + return datafeedId; + } + + public final void setDatafeedId(String datafeedId) { + this.datafeedId = ExceptionsHelper.requireNonNull(datafeedId, DatafeedConfig.ID.getPreferredName()); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + datafeedId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(datafeedId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DatafeedConfig.ID.getPreferredName(), datafeedId); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(datafeedId); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(datafeedId, other.datafeedId); + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private BytesReference preview; + + public Response() { + } + + public Response(BytesReference preview) { + this.preview = preview; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + preview = in.readBytesReference(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBytesReference(preview); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + try (InputStream stream = preview.streamInput()) { + builder.rawValue(stream, XContentType.JSON); + } + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(preview); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(preview, other.preview); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java new file mode 100644 index 0000000000000..5644d05af049d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutCalendarAction.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.calendars.Calendar; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.MlStrings; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class PutCalendarAction extends Action { + public static final PutCalendarAction INSTANCE = new PutCalendarAction(); + public static final String NAME = "cluster:admin/xpack/ml/calendars/put"; + + private PutCalendarAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + public static Request parseRequest(String calendarId, XContentParser parser) { + Calendar.Builder builder = Calendar.STRICT_PARSER.apply(parser, null); + if (builder.getId() == null) { + builder.setId(calendarId); + } else if (!Strings.isNullOrEmpty(calendarId) && !calendarId.equals(builder.getId())) { + // If we have both URI and body filter ID, they must be identical + throw new IllegalArgumentException(Messages.getMessage(Messages.INCONSISTENT_ID, Calendar.ID.getPreferredName(), + builder.getId(), calendarId)); + } + return new Request(builder.build()); + } + + private Calendar calendar; + + public Request() { + + } + + public Request(Calendar calendar) { + this.calendar = ExceptionsHelper.requireNonNull(calendar, "calendar"); + } + + public Calendar getCalendar() { + return calendar; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if ("_all".equals(calendar.getId())) { + validationException = + addValidationError("Cannot create a Calendar with the reserved name [_all]", + validationException); + } + if (!MlStrings.isValidId(calendar.getId())) { + validationException = addValidationError(Messages.getMessage( + Messages.INVALID_ID, Calendar.ID.getPreferredName(), calendar.getId()), + validationException); + } + if (!MlStrings.hasValidLengthForId(calendar.getId())) { + validationException = addValidationError(Messages.getMessage( + Messages.JOB_CONFIG_ID_TOO_LONG, MlStrings.ID_LENGTH_LIMIT), + validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + calendar = new Calendar(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + calendar.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + calendar.toXContent(builder, params); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(calendar); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(calendar, other.calendar); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private Calendar calendar; + + public Response() { + } + + public Response(Calendar calendar) { + this.calendar = calendar; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + if (in.getVersion().before(Version.V_6_3_0)) { + //the acknowledged flag was removed + in.readBoolean(); + } + calendar = new Calendar(in); + + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (out.getVersion().before(Version.V_6_3_0)) { + //the acknowledged flag is no longer supported + out.writeBoolean(true); + } + calendar.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return calendar.toXContent(builder, params); + } + + @Override + public int hashCode() { + return Objects.hash(calendar); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(calendar, other.calendar); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java new file mode 100644 index 0000000000000..bb9b9be16e79a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; + +import java.io.IOException; +import java.util.Objects; + +public class PutDatafeedAction extends Action { + + public static final PutDatafeedAction INSTANCE = new PutDatafeedAction(); + public static final String NAME = "cluster:admin/xpack/ml/datafeeds/put"; + + private PutDatafeedAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + + public static Request parseRequest(String datafeedId, XContentParser parser) { + DatafeedConfig.Builder datafeed = DatafeedConfig.CONFIG_PARSER.apply(parser, null); + datafeed.setId(datafeedId); + return new Request(datafeed.build()); + } + + private DatafeedConfig datafeed; + + public Request(DatafeedConfig datafeed) { + this.datafeed = datafeed; + } + + public Request() { + } + + public DatafeedConfig getDatafeed() { + return datafeed; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + datafeed = new DatafeedConfig(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + datafeed.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + datafeed.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(datafeed, request.datafeed); + } + + @Override + public int hashCode() { + return Objects.hash(datafeed); + } + } + + public static class RequestBuilder extends MasterNodeOperationRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, PutDatafeedAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private DatafeedConfig datafeed; + + public Response(DatafeedConfig datafeed) { + this.datafeed = datafeed; + } + + public Response() { + } + + public DatafeedConfig getResponse() { + return datafeed; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + if (in.getVersion().before(Version.V_6_3_0)) { + //the acknowledged flag was removed + in.readBoolean(); + } + datafeed = new DatafeedConfig(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (out.getVersion().before(Version.V_6_3_0)) { + //the acknowledged flag is no longer supported + out.writeBoolean(true); + } + datafeed.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + datafeed.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(datafeed, response.datafeed); + } + + @Override + public int hashCode() { + return Objects.hash(datafeed); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java new file mode 100644 index 0000000000000..a5d58d8576c8b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutFilterAction.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + + +public class PutFilterAction extends Action { + + public static final PutFilterAction INSTANCE = new PutFilterAction(); + public static final String NAME = "cluster:admin/xpack/ml/filters/put"; + + private PutFilterAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + public static Request parseRequest(String filterId, XContentParser parser) { + MlFilter.Builder filter = MlFilter.STRICT_PARSER.apply(parser, null); + if (filter.getId() == null) { + filter.setId(filterId); + } else if (!Strings.isNullOrEmpty(filterId) && !filterId.equals(filter.getId())) { + // If we have both URI and body filter ID, they must be identical + throw new IllegalArgumentException(Messages.getMessage(Messages.INCONSISTENT_ID, MlFilter.ID.getPreferredName(), + filter.getId(), filterId)); + } + return new Request(filter.build()); + } + + private MlFilter filter; + + public Request() { + + } + + public Request(MlFilter filter) { + this.filter = ExceptionsHelper.requireNonNull(filter, "filter"); + } + + public MlFilter getFilter() { + return this.filter; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + filter = new MlFilter(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + filter.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + filter.toXContent(builder, params); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(filter); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(filter, other.filter); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends AcknowledgedResponse { + + public Response() { + super(true); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java new file mode 100644 index 0000000000000..57f4b040010b2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public class PutJobAction extends Action { + + public static final PutJobAction INSTANCE = new PutJobAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/put"; + + private PutJobAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + + public static Request parseRequest(String jobId, XContentParser parser) { + Job.Builder jobBuilder = Job.CONFIG_PARSER.apply(parser, null); + if (jobBuilder.getId() == null) { + jobBuilder.setId(jobId); + } else if (!Strings.isNullOrEmpty(jobId) && !jobId.equals(jobBuilder.getId())) { + // If we have both URI and body jobBuilder ID, they must be identical + throw new IllegalArgumentException(Messages.getMessage(Messages.INCONSISTENT_ID, Job.ID.getPreferredName(), + jobBuilder.getId(), jobId)); + } + + return new Request(jobBuilder); + } + + private Job.Builder jobBuilder; + + public Request(Job.Builder jobBuilder) { + // Validate the jobBuilder immediately so that errors can be detected prior to transportation. + jobBuilder.validateInputFields(); + + // Some fields cannot be set at create time + List invalidJobCreationSettings = jobBuilder.invalidCreateTimeSettings(); + if (invalidJobCreationSettings.isEmpty() == false) { + throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_INVALID_CREATE_SETTINGS, + String.join(",", invalidJobCreationSettings))); + } + + this.jobBuilder = jobBuilder; + } + + public Request() { + } + + public Job.Builder getJobBuilder() { + return jobBuilder; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobBuilder = new Job.Builder(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + jobBuilder.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + jobBuilder.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(jobBuilder, request.jobBuilder); + } + + @Override + public int hashCode() { + return Objects.hash(jobBuilder); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + + public static class RequestBuilder extends MasterNodeOperationRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, PutJobAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private Job job; + + public Response(Job job) { + this.job = job; + } + + public Response() { + } + + public Job getResponse() { + return job; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + if (in.getVersion().before(Version.V_6_3_0)) { + //the acknowledged flag was removed + in.readBoolean(); + } + job = new Job(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (out.getVersion().before(Version.V_6_3_0)) { + //the acknowledged flag is no longer supported + out.writeBoolean(true); + } + job.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + job.doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(job, response.job); + } + + @Override + public int hashCode() { + return Objects.hash(job); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java new file mode 100644 index 0000000000000..67b362a3359a5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java @@ -0,0 +1,235 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class RevertModelSnapshotAction +extends Action { + + public static final RevertModelSnapshotAction INSTANCE = new RevertModelSnapshotAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/model_snapshots/revert"; + + private RevertModelSnapshotAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + + public static final ParseField SNAPSHOT_ID = new ParseField("snapshot_id"); + public static final ParseField DELETE_INTERVENING = new ParseField("delete_intervening_results"); + + private static ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareString((request, snapshotId) -> request.snapshotId = snapshotId, SNAPSHOT_ID); + PARSER.declareBoolean(Request::setDeleteInterveningResults, DELETE_INTERVENING); + } + + public static Request parseRequest(String jobId, String snapshotId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (jobId != null) { + request.jobId = jobId; + } + if (snapshotId != null) { + request.snapshotId = snapshotId; + } + return request; + } + + private String jobId; + private String snapshotId; + private boolean deleteInterveningResults; + + public Request() { + } + + public Request(String jobId, String snapshotId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + this.snapshotId = ExceptionsHelper.requireNonNull(snapshotId, SNAPSHOT_ID.getPreferredName()); + } + + public String getJobId() { + return jobId; + } + + public String getSnapshotId() { + return snapshotId; + } + + public boolean getDeleteInterveningResults() { + return deleteInterveningResults; + } + + public void setDeleteInterveningResults(boolean deleteInterveningResults) { + this.deleteInterveningResults = deleteInterveningResults; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + snapshotId = in.readString(); + deleteInterveningResults = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + out.writeString(snapshotId); + out.writeBoolean(deleteInterveningResults); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(SNAPSHOT_ID.getPreferredName(), snapshotId); + builder.field(DELETE_INTERVENING.getPreferredName(), deleteInterveningResults); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, snapshotId, deleteInterveningResults); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(jobId, other.jobId) && Objects.equals(snapshotId, other.snapshotId) + && Objects.equals(deleteInterveningResults, other.deleteInterveningResults); + } + } + + static class RequestBuilder extends MasterNodeOperationRequestBuilder { + + RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } + + public static class Response extends ActionResponse implements StatusToXContentObject { + + private static final ParseField MODEL = new ParseField("model"); + private ModelSnapshot model; + + public Response() { + + } + + public Response(ModelSnapshot modelSnapshot) { + model = modelSnapshot; + } + + public ModelSnapshot getModel() { + return model; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + if (in.getVersion().before(Version.V_6_3_0)) { + //the acknowledged flag was removed + in.readBoolean(); + } + model = new ModelSnapshot(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (out.getVersion().before(Version.V_6_3_0)) { + //the acknowledged flag is no longer supported + out.writeBoolean(true); + } + model.writeTo(out); + } + + @Override + public RestStatus status() { + return RestStatus.OK; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MODEL.getPreferredName()); + builder = model.toXContent(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(model); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(model, other.model); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java new file mode 100644 index 0000000000000..cd37354f42e4d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java @@ -0,0 +1,327 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.joda.DateMathParser; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.persistent.PersistentTaskParams; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.LongSupplier; + +public class StartDatafeedAction + extends Action { + + public static final ParseField START_TIME = new ParseField("start"); + public static final ParseField END_TIME = new ParseField("end"); + public static final ParseField TIMEOUT = new ParseField("timeout"); + + public static final StartDatafeedAction INSTANCE = new StartDatafeedAction(); + public static final String NAME = "cluster:admin/xpack/ml/datafeed/start"; + public static final String TASK_NAME = "xpack/ml/datafeed"; + + private StartDatafeedAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends MasterNodeRequest implements ToXContentObject { + + public static Request fromXContent(XContentParser parser) { + return parseRequest(null, parser); + } + + public static Request parseRequest(String datafeedId, XContentParser parser) { + DatafeedParams params = DatafeedParams.PARSER.apply(parser, null); + if (datafeedId != null) { + params.datafeedId = datafeedId; + } + return new Request(params); + } + + private DatafeedParams params; + + public Request(String datafeedId, long startTime) { + this.params = new DatafeedParams(datafeedId, startTime); + } + + public Request(String datafeedId, String startTime) { + this.params = new DatafeedParams(datafeedId, startTime); + } + + public Request(DatafeedParams params) { + this.params = params; + } + + public Request(StreamInput in) throws IOException { + readFrom(in); + } + + public Request() { + } + + public DatafeedParams getParams() { + return params; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException e = null; + if (params.endTime != null && params.endTime <= params.startTime) { + e = ValidateActions.addValidationError(START_TIME.getPreferredName() + " [" + + params.startTime + "] must be earlier than " + END_TIME.getPreferredName() + + " [" + params.endTime + "]", e); + } + return e; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + params = new DatafeedParams(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + params.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + this.params.toXContent(builder, params); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(params); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(params, other.params); + } + } + + public static class DatafeedParams implements PersistentTaskParams { + + public static ObjectParser PARSER = new ObjectParser<>(TASK_NAME, DatafeedParams::new); + + static { + PARSER.declareString((params, datafeedId) -> params.datafeedId = datafeedId, DatafeedConfig.ID); + PARSER.declareString((params, startTime) -> params.startTime = parseDateOrThrow( + startTime, START_TIME, System::currentTimeMillis), START_TIME); + PARSER.declareString(DatafeedParams::setEndTime, END_TIME); + PARSER.declareString((params, val) -> + params.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); + } + + static long parseDateOrThrow(String date, ParseField paramName, LongSupplier now) { + DateMathParser dateMathParser = new DateMathParser(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER); + + try { + return dateMathParser.parse(date, now); + } catch (Exception e) { + String msg = Messages.getMessage(Messages.REST_INVALID_DATETIME_PARAMS, paramName.getPreferredName(), date); + throw new ElasticsearchParseException(msg, e); + } + } + + public static DatafeedParams fromXContent(XContentParser parser) { + return parseRequest(null, parser); + } + + public static DatafeedParams parseRequest(String datafeedId, XContentParser parser) { + DatafeedParams params = PARSER.apply(parser, null); + if (datafeedId != null) { + params.datafeedId = datafeedId; + } + return params; + } + + public DatafeedParams(String datafeedId, long startTime) { + this.datafeedId = ExceptionsHelper.requireNonNull(datafeedId, DatafeedConfig.ID.getPreferredName()); + this.startTime = startTime; + } + + public DatafeedParams(String datafeedId, String startTime) { + this(datafeedId, parseDateOrThrow(startTime, START_TIME, System::currentTimeMillis)); + } + + public DatafeedParams(StreamInput in) throws IOException { + datafeedId = in.readString(); + startTime = in.readVLong(); + endTime = in.readOptionalLong(); + timeout = TimeValue.timeValueMillis(in.readVLong()); + } + + DatafeedParams() { + } + + private String datafeedId; + private long startTime; + private Long endTime; + private TimeValue timeout = TimeValue.timeValueSeconds(20); + + public String getDatafeedId() { + return datafeedId; + } + + public long getStartTime() { + return startTime; + } + + public Long getEndTime() { + return endTime; + } + + public void setEndTime(String endTime) { + setEndTime(parseDateOrThrow(endTime, END_TIME, System::currentTimeMillis)); + } + + public void setEndTime(Long endTime) { + this.endTime = endTime; + } + + public TimeValue getTimeout() { + return timeout; + } + + public void setTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + @Override + public String getWriteableName() { + return TASK_NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(datafeedId); + out.writeVLong(startTime); + out.writeOptionalLong(endTime); + out.writeVLong(timeout.millis()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(DatafeedConfig.ID.getPreferredName(), datafeedId); + builder.field(START_TIME.getPreferredName(), String.valueOf(startTime)); + if (endTime != null) { + builder.field(END_TIME.getPreferredName(), String.valueOf(endTime)); + } + builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep()); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(datafeedId, startTime, endTime, timeout); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + DatafeedParams other = (DatafeedParams) obj; + return Objects.equals(datafeedId, other.datafeedId) && + Objects.equals(startTime, other.startTime) && + Objects.equals(endTime, other.endTime) && + Objects.equals(timeout, other.timeout); + } + } + + public static class Response extends AcknowledgedResponse { + public Response() { + super(); + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + writeAcknowledged(out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AcknowledgedResponse that = (AcknowledgedResponse) o; + return isAcknowledged() == that.isAcknowledged(); + } + + @Override + public int hashCode() { + return Objects.hash(isAcknowledged()); + } + + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, StartDatafeedAction action) { + super(client, action, new Request()); + } + } + + public interface DatafeedTaskMatcher { + + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java new file mode 100644 index 0000000000000..b59677f283378 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedAction.java @@ -0,0 +1,245 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.ml.MLMetadataField; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class StopDatafeedAction + extends Action { + + public static final StopDatafeedAction INSTANCE = new StopDatafeedAction(); + public static final String NAME = "cluster:admin/xpack/ml/datafeed/stop"; + public static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueMinutes(5); + + private StopDatafeedAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends BaseTasksRequest implements ToXContentObject { + + public static final ParseField TIMEOUT = new ParseField("timeout"); + public static final ParseField FORCE = new ParseField("force"); + public static final ParseField ALLOW_NO_DATAFEEDS = new ParseField("allow_no_datafeeds"); + + public static ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, datafeedId) -> request.datafeedId = datafeedId, DatafeedConfig.ID); + PARSER.declareString((request, val) -> + request.setStopTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); + PARSER.declareBoolean(Request::setForce, FORCE); + PARSER.declareBoolean(Request::setAllowNoDatafeeds, ALLOW_NO_DATAFEEDS); + } + + public static Request fromXContent(XContentParser parser) { + return parseRequest(null, parser); + } + + public static Request parseRequest(String datafeedId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (datafeedId != null) { + request.datafeedId = datafeedId; + } + return request; + } + + private String datafeedId; + private String[] resolvedStartedDatafeedIds; + private TimeValue stopTimeout = DEFAULT_TIMEOUT; + private boolean force = false; + private boolean allowNoDatafeeds = true; + + public Request(String datafeedId) { + this.datafeedId = ExceptionsHelper.requireNonNull(datafeedId, DatafeedConfig.ID.getPreferredName()); + this.resolvedStartedDatafeedIds = new String[] { datafeedId }; + } + + public Request() { + } + + public String getDatafeedId() { + return datafeedId; + } + + public String[] getResolvedStartedDatafeedIds() { + return resolvedStartedDatafeedIds; + } + + public void setResolvedStartedDatafeedIds(String[] resolvedStartedDatafeedIds) { + this.resolvedStartedDatafeedIds = resolvedStartedDatafeedIds; + } + + public TimeValue getStopTimeout() { + return stopTimeout; + } + + public void setStopTimeout(TimeValue stopTimeout) { + this.stopTimeout = ExceptionsHelper.requireNonNull(stopTimeout, TIMEOUT.getPreferredName()); + } + + public boolean isForce() { + return force; + } + + public void setForce(boolean force) { + this.force = force; + } + + public boolean allowNoDatafeeds() { + return allowNoDatafeeds; + } + + public void setAllowNoDatafeeds(boolean allowNoDatafeeds) { + this.allowNoDatafeeds = allowNoDatafeeds; + } + + @Override + public boolean match(Task task) { + for (String id : resolvedStartedDatafeedIds) { + String expectedDescription = MLMetadataField.datafeedTaskId(id); + if (task instanceof StartDatafeedAction.DatafeedTaskMatcher && expectedDescription.equals(task.getDescription())){ + return true; + } + } + return false; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + datafeedId = in.readString(); + resolvedStartedDatafeedIds = in.readStringArray(); + stopTimeout = in.readTimeValue(); + force = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + allowNoDatafeeds = in.readBoolean(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(datafeedId); + out.writeStringArray(resolvedStartedDatafeedIds); + out.writeTimeValue(stopTimeout); + out.writeBoolean(force); + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeBoolean(allowNoDatafeeds); + } + } + + @Override + public int hashCode() { + return Objects.hash(datafeedId, stopTimeout, force, allowNoDatafeeds); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DatafeedConfig.ID.getPreferredName(), datafeedId); + builder.field(TIMEOUT.getPreferredName(), stopTimeout.getStringRep()); + builder.field(FORCE.getPreferredName(), force); + builder.field(ALLOW_NO_DATAFEEDS.getPreferredName(), allowNoDatafeeds); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(datafeedId, other.datafeedId) && + Objects.equals(stopTimeout, other.stopTimeout) && + Objects.equals(force, other.force) && + Objects.equals(allowNoDatafeeds, other.allowNoDatafeeds); + } + } + + public static class Response extends BaseTasksResponse implements Writeable { + + private boolean stopped; + + public Response(boolean stopped) { + super(null, null); + this.stopped = stopped; + } + + public Response(StreamInput in) throws IOException { + super(null, null); + readFrom(in); + } + + public Response() { + super(null, null); + } + + public boolean isStopped() { + return stopped; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + stopped = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(stopped); + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, StopDatafeedAction action) { + super(client, action, new Request()); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobAction.java new file mode 100644 index 0000000000000..d1a453c7e164a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobAction.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.ml.calendars.Calendar; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class UpdateCalendarJobAction extends Action { + public static final UpdateCalendarJobAction INSTANCE = new UpdateCalendarJobAction(); + public static final String NAME = "cluster:admin/xpack/ml/calendars/jobs/update"; + + private UpdateCalendarJobAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public PutCalendarAction.Response newResponse() { + return new PutCalendarAction.Response(); + } + + public static class Request extends ActionRequest { + + private String calendarId; + private String jobIdsToAddExpression; + private String jobIdsToRemoveExpression; + + public Request() { + } + + /** + * Job id expressions may be a single job, job group or comma separated + * list of job Ids or groups + */ + public Request(String calendarId, String jobIdsToAddExpression, String jobIdsToRemoveExpression) { + this.calendarId = ExceptionsHelper.requireNonNull(calendarId, Calendar.ID.getPreferredName()); + this.jobIdsToAddExpression = jobIdsToAddExpression; + this.jobIdsToRemoveExpression = jobIdsToRemoveExpression; + } + + public String getCalendarId() { + return calendarId; + } + + public String getJobIdsToAddExpression() { + return jobIdsToAddExpression; + } + + public String getJobIdsToRemoveExpression() { + return jobIdsToRemoveExpression; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + calendarId = in.readString(); + jobIdsToAddExpression = in.readOptionalString(); + jobIdsToRemoveExpression = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(calendarId); + out.writeOptionalString(jobIdsToAddExpression); + out.writeOptionalString(jobIdsToRemoveExpression); + } + + @Override + public int hashCode() { + return Objects.hash(calendarId, jobIdsToAddExpression, jobIdsToRemoveExpression); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(calendarId, other.calendarId) && Objects.equals(jobIdsToAddExpression, other.jobIdsToAddExpression) + && Objects.equals(jobIdsToRemoveExpression, other.jobIdsToRemoveExpression); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new Request()); + } + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java new file mode 100644 index 0000000000000..588fb264190be --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate; + +import java.io.IOException; +import java.util.Objects; + +public class UpdateDatafeedAction extends Action { + + public static final UpdateDatafeedAction INSTANCE = new UpdateDatafeedAction(); + public static final String NAME = "cluster:admin/xpack/ml/datafeeds/update"; + + private UpdateDatafeedAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public PutDatafeedAction.Response newResponse() { + return new PutDatafeedAction.Response(); + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + + public static Request parseRequest(String datafeedId, XContentParser parser) { + DatafeedUpdate.Builder update = DatafeedUpdate.PARSER.apply(parser, null); + update.setId(datafeedId); + return new Request(update.build()); + } + + private DatafeedUpdate update; + + public Request(DatafeedUpdate update) { + this.update = update; + } + + public Request() { + } + + public DatafeedUpdate getUpdate() { + return update; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + update = new DatafeedUpdate(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + update.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + update.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(update, request.update); + } + + @Override + public int hashCode() { + return Objects.hash(update); + } + } + + public static class RequestBuilder extends MasterNodeOperationRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, UpdateDatafeedAction action) { + super(client, action, new Request()); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java new file mode 100644 index 0000000000000..f7998a52d496a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java @@ -0,0 +1,169 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class UpdateJobAction extends Action { + public static final UpdateJobAction INSTANCE = new UpdateJobAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/update"; + + private UpdateJobAction() { + super(NAME); + } + + @Override + public UpdateJobAction.RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new UpdateJobAction.RequestBuilder(client, this); + } + + @Override + public PutJobAction.Response newResponse() { + return new PutJobAction.Response(); + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + + public static UpdateJobAction.Request parseRequest(String jobId, XContentParser parser) { + JobUpdate update = JobUpdate.PARSER.apply(parser, null).setJobId(jobId).build(); + return new UpdateJobAction.Request(jobId, update); + } + + private String jobId; + private JobUpdate update; + + /** Indicates an update that was not triggered by a user */ + private boolean isInternal; + private boolean waitForAck = true; + + public Request(String jobId, JobUpdate update) { + this(jobId, update, false); + } + + private Request(String jobId, JobUpdate update, boolean isInternal) { + this.jobId = jobId; + this.update = update; + this.isInternal = isInternal; + if (MetaData.ALL.equals(jobId)) { + throw ExceptionsHelper.badRequestException("Cannot update more than 1 job at a time"); + } + } + + public Request() { + } + + public static Request internal(String jobId, JobUpdate update) { + return new Request(jobId, update, true); + } + + public String getJobId() { + return jobId; + } + + public JobUpdate getJobUpdate() { + return update; + } + + public boolean isInternal() { + return isInternal; + } + + public boolean isWaitForAck() { + return waitForAck; + } + + public void setWaitForAck(boolean waitForAck) { + this.waitForAck = waitForAck; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + update = new JobUpdate(in); + if (in.getVersion().onOrAfter(Version.V_6_2_2)) { + isInternal = in.readBoolean(); + } else { + isInternal = false; + } + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + waitForAck = in.readBoolean(); + } else { + waitForAck = true; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + update.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_6_2_2)) { + out.writeBoolean(isInternal); + } + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + out.writeBoolean(waitForAck); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // only serialize the update, as the job id is specified as part of the url + update.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + UpdateJobAction.Request that = (UpdateJobAction.Request) o; + return Objects.equals(jobId, that.jobId) && + Objects.equals(update, that.update) && + isInternal == that.isInternal; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, update, isInternal); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + + public static class RequestBuilder extends MasterNodeOperationRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, UpdateJobAction action) { + super(client, action, new Request()); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotAction.java new file mode 100644 index 0000000000000..6b62d148c948c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotAction.java @@ -0,0 +1,246 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotField; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +public class UpdateModelSnapshotAction extends Action { + + public static final UpdateModelSnapshotAction INSTANCE = new UpdateModelSnapshotAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/model_snapshots/update"; + + private UpdateModelSnapshotAction() { + super(NAME); + } + + @Override + public UpdateModelSnapshotAction.RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public UpdateModelSnapshotAction.Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); + + static { + PARSER.declareString((request, jobId) -> request.jobId = jobId, Job.ID); + PARSER.declareString((request, snapshotId) -> request.snapshotId = snapshotId, ModelSnapshotField.SNAPSHOT_ID); + PARSER.declareString(Request::setDescription, ModelSnapshot.DESCRIPTION); + PARSER.declareBoolean(Request::setRetain, ModelSnapshot.RETAIN); + } + + public static Request parseRequest(String jobId, String snapshotId, XContentParser parser) { + Request request = PARSER.apply(parser, null); + if (jobId != null) { + request.jobId = jobId; + } + if (snapshotId != null) { + request.snapshotId = snapshotId; + } + return request; + } + + private String jobId; + private String snapshotId; + private String description; + private Boolean retain; + + public Request() { + } + + public Request(String jobId, String snapshotId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + this.snapshotId = ExceptionsHelper.requireNonNull(snapshotId, ModelSnapshotField.SNAPSHOT_ID.getPreferredName()); + } + + public String getJobId() { + return jobId; + } + + public String getSnapshotId() { + return snapshotId; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public Boolean getRetain() { + return retain; + } + + public void setRetain(Boolean retain) { + this.retain = retain; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobId = in.readString(); + snapshotId = in.readString(); + description = in.readOptionalString(); + retain = in.readOptionalBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(jobId); + out.writeString(snapshotId); + out.writeOptionalString(description); + out.writeOptionalBoolean(retain); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(ModelSnapshotField.SNAPSHOT_ID.getPreferredName(), snapshotId); + if (description != null) { + builder.field(ModelSnapshot.DESCRIPTION.getPreferredName(), description); + } + if (retain != null) { + builder.field(ModelSnapshot.RETAIN.getPreferredName(), retain); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, snapshotId, description, retain); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(jobId, other.jobId) + && Objects.equals(snapshotId, other.snapshotId) + && Objects.equals(description, other.description) + && Objects.equals(retain, other.retain); + } + } + + public static class Response extends ActionResponse implements StatusToXContentObject { + + private static final ParseField ACKNOWLEDGED = new ParseField("acknowledged"); + private static final ParseField MODEL = new ParseField("model"); + + private ModelSnapshot model; + + public Response() { + + } + + public Response(ModelSnapshot modelSnapshot) { + model = modelSnapshot; + } + + public ModelSnapshot getModel() { + return model; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + model = new ModelSnapshot(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + model.writeTo(out); + } + + @Override + public RestStatus status() { + return RestStatus.OK; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ACKNOWLEDGED.getPreferredName(), true); + builder.field(MODEL.getPreferredName()); + builder = model.toXContent(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(model); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(model, other.model); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, UpdateModelSnapshotAction action) { + super(client, action, new Request()); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java new file mode 100644 index 0000000000000..31ba85232d504 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java @@ -0,0 +1,196 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public class UpdateProcessAction extends + Action { + + public static final UpdateProcessAction INSTANCE = new UpdateProcessAction(); + public static final String NAME = "cluster:internal/xpack/ml/job/update/process"; + + private UpdateProcessAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, UpdateProcessAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements StatusToXContentObject, Writeable { + + private boolean isUpdated; + + public Response() { + super(null, null); + this.isUpdated = true; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + isUpdated = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(isUpdated); + } + + public boolean isUpdated() { + return isUpdated; + } + + @Override + public RestStatus status() { + return RestStatus.ACCEPTED; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("updated", isUpdated); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hashCode(isUpdated); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + + return this.isUpdated == other.isUpdated; + } + } + + public static class Request extends JobTaskRequest { + + private ModelPlotConfig modelPlotConfig; + private List detectorUpdates; + private MlFilter filter; + private boolean updateScheduledEvents = false; + + public Request() { + } + + public Request(String jobId, ModelPlotConfig modelPlotConfig, List detectorUpdates, MlFilter filter, + boolean updateScheduledEvents) { + super(jobId); + this.modelPlotConfig = modelPlotConfig; + this.detectorUpdates = detectorUpdates; + this.filter = filter; + this.updateScheduledEvents = updateScheduledEvents; + } + + public ModelPlotConfig getModelPlotConfig() { + return modelPlotConfig; + } + + public List getDetectorUpdates() { + return detectorUpdates; + } + + public MlFilter getFilter() { + return filter; + } + + public boolean isUpdateScheduledEvents() { + return updateScheduledEvents; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + modelPlotConfig = in.readOptionalWriteable(ModelPlotConfig::new); + if (in.readBoolean()) { + detectorUpdates = in.readList(JobUpdate.DetectorUpdate::new); + } + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { + filter = in.readOptionalWriteable(MlFilter::new); + updateScheduledEvents = in.readBoolean(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalWriteable(modelPlotConfig); + boolean hasDetectorUpdates = detectorUpdates != null; + out.writeBoolean(hasDetectorUpdates); + if (hasDetectorUpdates) { + out.writeList(detectorUpdates); + } + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { + out.writeOptionalWriteable(filter); + out.writeBoolean(updateScheduledEvents); + } + } + + @Override + public int hashCode() { + return Objects.hash(getJobId(), modelPlotConfig, detectorUpdates, filter, updateScheduledEvents); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + + return Objects.equals(getJobId(), other.getJobId()) && + Objects.equals(modelPlotConfig, other.modelPlotConfig) && + Objects.equals(detectorUpdates, other.detectorUpdates) && + Objects.equals(filter, other.filter) && + Objects.equals(updateScheduledEvents, other.updateScheduledEvents); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorAction.java new file mode 100644 index 0000000000000..13948fc5fdc8f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorAction.java @@ -0,0 +1,138 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.job.config.Detector; + +import java.io.IOException; +import java.util.Objects; + +public class ValidateDetectorAction +extends Action { + + public static final ValidateDetectorAction INSTANCE = new ValidateDetectorAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/validate/detector"; + + protected ValidateDetectorAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, INSTANCE); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class RequestBuilder extends ActionRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, ValidateDetectorAction action) { + super(client, action, new Request()); + } + + } + + public static class Request extends ActionRequest implements ToXContentObject { + + private Detector detector; + + public static Request parseRequest(XContentParser parser) { + Detector detector = Detector.CONFIG_PARSER.apply(parser, null).build(); + return new Request(detector); + } + + public Request() { + this.detector = null; + } + + public Request(Detector detector) { + this.detector = detector; + } + + public Detector getDetector() { + return detector; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + detector.writeTo(out); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + detector = new Detector(in); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + detector.toXContent(builder, params); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(detector); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(detector, other.detector); + } + + } + + public static class Response extends AcknowledgedResponse { + + public Response() { + super(); + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java new file mode 100644 index 0000000000000..e0cde4f9358c9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigAction.java @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; + +import java.io.IOException; +import java.util.Date; +import java.util.List; +import java.util.Objects; + +public class ValidateJobConfigAction +extends Action { + + public static final ValidateJobConfigAction INSTANCE = new ValidateJobConfigAction(); + public static final String NAME = "cluster:admin/xpack/ml/job/validate"; + + protected ValidateJobConfigAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, INSTANCE); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class RequestBuilder extends ActionRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, ValidateJobConfigAction action) { + super(client, action, new Request()); + } + + } + + public static class Request extends ActionRequest { + + private Job job; + + public static Request parseRequest(XContentParser parser) { + Job.Builder job = Job.CONFIG_PARSER.apply(parser, null); + // When jobs are PUT their ID must be supplied in the URL - assume this will + // be valid unless an invalid job ID is specified in the JSON to be validated + job.setId(job.getId() != null ? job.getId() : "ok"); + + // Some fields cannot be set at create time + List invalidJobCreationSettings = job.invalidCreateTimeSettings(); + if (invalidJobCreationSettings.isEmpty() == false) { + throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_INVALID_CREATE_SETTINGS, + String.join(",", invalidJobCreationSettings))); + } + + return new Request(job.build(new Date())); + } + + public Request() { + this.job = null; + } + + public Request(Job job) { + this.job = job; + } + + public Job getJob() { + return job; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + job.writeTo(out); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + job = new Job(in); + } + + @Override + public int hashCode() { + return Objects.hash(job); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(job, other.job); + } + + } + + public static class Response extends AcknowledgedResponse { + + public Response() { + super(); + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/util/PageParams.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/util/PageParams.java new file mode 100644 index 0000000000000..a3c1cb660f7cd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/util/PageParams.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action.util; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class PageParams implements ToXContentObject, Writeable { + + public static final ParseField PAGE = new ParseField("page"); + public static final ParseField FROM = new ParseField("from"); + public static final ParseField SIZE = new ParseField("size"); + + public static final int DEFAULT_FROM = 0; + public static final int DEFAULT_SIZE = 100; + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(PAGE.getPreferredName(), + a -> new PageParams(a[0] == null ? DEFAULT_FROM : (int) a[0], a[1] == null ? DEFAULT_SIZE : (int) a[1])); + + static { + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), FROM); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), SIZE); + } + + private final int from; + private final int size; + + public static PageParams defaultParams() { + return new PageParams(DEFAULT_FROM, DEFAULT_SIZE); + } + + public PageParams(StreamInput in) throws IOException { + this(in.readVInt(), in.readVInt()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(from); + out.writeVInt(size); + } + + public PageParams() { + this.from = DEFAULT_FROM; + this.size = DEFAULT_SIZE; + } + + public PageParams(int from, int size) { + if (from < 0) { + throw new IllegalArgumentException("Parameter [" + FROM.getPreferredName() + "] cannot be < 0"); + } + if (size < 0) { + throw new IllegalArgumentException("Parameter [" + PageParams.SIZE.getPreferredName() + "] cannot be < 0"); + } + this.from = from; + this.size = size; + } + + public int getFrom() { + return from; + } + + public int getSize() { + return size; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(FROM.getPreferredName(), from); + builder.field(SIZE.getPreferredName(), size); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(from, size); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PageParams other = (PageParams) obj; + return Objects.equals(from, other.from) && + Objects.equals(size, other.size); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/util/QueryPage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/util/QueryPage.java new file mode 100644 index 0000000000000..706f113aa3423 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/util/QueryPage.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action.util; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * Generic wrapper class for a page of query results and the total number of + * query results.
+ * {@linkplain #count()} is the total number of results but that value may + * not be equal to the actual length of the {@linkplain #results()} list if from + * & take or some cursor was used in the database query. + */ +public final class QueryPage implements ToXContentObject, Writeable { + + public static final ParseField COUNT = new ParseField("count"); + public static final ParseField DEFAULT_RESULTS_FIELD = new ParseField("results_field"); + + private final ParseField resultsField; + private final List results; + private final long count; + + public QueryPage(List results, long count, ParseField resultsField) { + this.results = results; + this.count = count; + this.resultsField = ExceptionsHelper.requireNonNull(resultsField, DEFAULT_RESULTS_FIELD.getPreferredName()); + } + + public QueryPage(StreamInput in, Reader hitReader) throws IOException { + resultsField = new ParseField(in.readString()); + results = in.readList(hitReader); + count = in.readLong(); + } + + public static ResourceNotFoundException emptyQueryPage(ParseField resultsField) { + return new ResourceNotFoundException("Could not find requested " + resultsField.getPreferredName()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(resultsField.getPreferredName()); + out.writeList(results); + out.writeLong(count); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + builder.field(COUNT.getPreferredName(), count); + builder.field(resultsField.getPreferredName(), results); + return builder; + } + + public List results() { + return results; + } + + public long count() { + return count; + } + + public ParseField getResultsField() { + return resultsField; + } + + @Override + public int hashCode() { + return Objects.hash(results, count); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + @SuppressWarnings("unchecked") + QueryPage other = (QueryPage) obj; + return Objects.equals(results, other.results) && + Objects.equals(count, other.count); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/Calendar.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/Calendar.java new file mode 100644 index 0000000000000..9add81aace357 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/Calendar.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.calendars; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.MlMetaIndex; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * A simple calendar object for scheduled (special) events. + * The calendar consists of a name an a list of job Ids or job groups. + */ +public class Calendar implements ToXContentObject, Writeable { + + public static final String CALENDAR_TYPE = "calendar"; + + public static final ParseField TYPE = new ParseField("type"); + public static final ParseField JOB_IDS = new ParseField("job_ids"); + public static final ParseField ID = new ParseField("calendar_id"); + public static final ParseField DESCRIPTION = new ParseField("description"); + + private static final String DOCUMENT_ID_PREFIX = "calendar_"; + + // For QueryPage + public static final ParseField RESULTS_FIELD = new ParseField("calendars"); + + public static final ObjectParser STRICT_PARSER = createParser(false); + public static final ObjectParser LENIENT_PARSER = createParser(true); + + private static ObjectParser createParser(boolean ignoreUnknownFields) { + ObjectParser parser = new ObjectParser<>(ID.getPreferredName(), ignoreUnknownFields, Builder::new); + + parser.declareString(Builder::setId, ID); + parser.declareStringArray(Builder::setJobIds, JOB_IDS); + parser.declareString((builder, s) -> {}, TYPE); + parser.declareStringOrNull(Builder::setDescription, DESCRIPTION); + + return parser; + } + + public static String documentId(String calendarId) { + return DOCUMENT_ID_PREFIX + calendarId; + } + + private final String id; + private final List jobIds; + private final String description; + + /** + * {@code jobIds} can be a mix of job groups and job Ids + * @param id The calendar Id + * @param jobIds List of job Ids or job groups + * @param description An optional description + */ + public Calendar(String id, List jobIds, @Nullable String description) { + this.id = Objects.requireNonNull(id, ID.getPreferredName() + " must not be null"); + this.jobIds = Objects.requireNonNull(jobIds, JOB_IDS.getPreferredName() + " must not be null"); + this.description = description; + } + + public Calendar(StreamInput in) throws IOException { + id = in.readString(); + jobIds = Arrays.asList(in.readStringArray()); + description = in.readOptionalString(); + } + + public String getId() { + return id; + } + + public String documentId() { + return documentId(id); + } + + public List getJobIds() { + return Collections.unmodifiableList(jobIds); + } + + @Nullable + public String getDescription() { + return description; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + out.writeStringArray(jobIds.toArray(new String[jobIds.size()])); + out.writeOptionalString(description); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + builder.field(JOB_IDS.getPreferredName(), jobIds); + if (description != null) { + builder.field(DESCRIPTION.getPreferredName(), description); + } + if (params.paramAsBoolean(MlMetaIndex.INCLUDE_TYPE_KEY, false)) { + builder.field(TYPE.getPreferredName(), CALENDAR_TYPE); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + + if (!(obj instanceof Calendar)) { + return false; + } + + Calendar other = (Calendar) obj; + return id.equals(other.id) && jobIds.equals(other.jobIds) && Objects.equals(description, other.description); + } + + @Override + public int hashCode() { + return Objects.hash(id, jobIds, description); + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + + private String calendarId; + private List jobIds = Collections.emptyList(); + private String description; + + public String getId() { + return calendarId; + } + + public void setId(String calendarId) { + this.calendarId = calendarId; + } + + public Builder setJobIds(List jobIds) { + this.jobIds = jobIds; + return this; + } + + public Builder setDescription(String description) { + this.description = description; + return this; + } + + public Calendar build() { + return new Calendar(calendarId, jobIds, description); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java new file mode 100644 index 0000000000000..68e1201816dc4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java @@ -0,0 +1,279 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.calendars; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.MlMetaIndex; +import org.elasticsearch.xpack.core.ml.job.config.Connective; +import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; +import org.elasticsearch.xpack.core.ml.job.config.Operator; +import org.elasticsearch.xpack.core.ml.job.config.RuleAction; +import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.Intervals; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +public class ScheduledEvent implements ToXContentObject, Writeable { + + public static final ParseField DESCRIPTION = new ParseField("description"); + public static final ParseField START_TIME = new ParseField("start_time"); + public static final ParseField END_TIME = new ParseField("end_time"); + public static final ParseField TYPE = new ParseField("type"); + public static final ParseField EVENT_ID = new ParseField("event_id"); + + public static final ParseField RESULTS_FIELD = new ParseField("events"); + + public static final String SCHEDULED_EVENT_TYPE = "scheduled_event"; + public static final String DOCUMENT_ID_PREFIX = "event_"; + + public static final ObjectParser STRICT_PARSER = createParser(false); + public static final ObjectParser LENIENT_PARSER = createParser(true); + + private static ObjectParser createParser(boolean ignoreUnknownFields) { + ObjectParser parser = new ObjectParser<>("scheduled_event", ignoreUnknownFields, Builder::new); + + parser.declareString(ScheduledEvent.Builder::description, DESCRIPTION); + parser.declareField(ScheduledEvent.Builder::startTime, p -> { + if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return ZonedDateTime.ofInstant(Instant.ofEpochMilli(p.longValue()), ZoneOffset.UTC); + } else if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return ZonedDateTime.ofInstant(Instant.ofEpochMilli(TimeUtils.dateStringToEpoch(p.text())), ZoneOffset.UTC); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + START_TIME.getPreferredName() + "]"); + }, START_TIME, ObjectParser.ValueType.VALUE); + parser.declareField(ScheduledEvent.Builder::endTime, p -> { + if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return ZonedDateTime.ofInstant(Instant.ofEpochMilli(p.longValue()), ZoneOffset.UTC); + } else if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return ZonedDateTime.ofInstant(Instant.ofEpochMilli(TimeUtils.dateStringToEpoch(p.text())), ZoneOffset.UTC); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + END_TIME.getPreferredName() + "]"); + }, END_TIME, ObjectParser.ValueType.VALUE); + + parser.declareString(ScheduledEvent.Builder::calendarId, Calendar.ID); + parser.declareString((builder, s) -> {}, TYPE); + + return parser; + } + + public static String documentId(String eventId) { + return DOCUMENT_ID_PREFIX + eventId; + } + + private final String description; + private final ZonedDateTime startTime; + private final ZonedDateTime endTime; + private final String calendarId; + private final String eventId; + + ScheduledEvent(String description, ZonedDateTime startTime, ZonedDateTime endTime, String calendarId, @Nullable String eventId) { + this.description = Objects.requireNonNull(description); + this.startTime = Objects.requireNonNull(startTime); + this.endTime = Objects.requireNonNull(endTime); + this.calendarId = Objects.requireNonNull(calendarId); + this.eventId = eventId; + } + + public ScheduledEvent(StreamInput in) throws IOException { + description = in.readString(); + startTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(in.readVLong()), ZoneOffset.UTC); + endTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(in.readVLong()), ZoneOffset.UTC); + calendarId = in.readString(); + eventId = in.readOptionalString(); + } + + public String getDescription() { + return description; + } + + public ZonedDateTime getStartTime() { + return startTime; + } + + public ZonedDateTime getEndTime() { + return endTime; + } + + public String getCalendarId() { + return calendarId; + } + + public String getEventId() { + return eventId; + } + + /** + * Convert the scheduled event to a detection rule. + * The rule will have 2 time based conditions for the start and + * end of the event. + * + * The rule's start and end times are aligned with the bucket span + * so the start time is rounded down to a bucket interval and the + * end time rounded up. + * + * @param bucketSpan Bucket span to align to + * @return The event as a detection rule. + */ + public DetectionRule toDetectionRule(TimeValue bucketSpan) { + List conditions = new ArrayList<>(); + + long bucketSpanSecs = bucketSpan.getSeconds(); + + long bucketStartTime = Intervals.alignToFloor(getStartTime().toEpochSecond(), bucketSpanSecs); + conditions.add(RuleCondition.createTime(Operator.GTE, bucketStartTime)); + long bucketEndTime = Intervals.alignToCeil(getEndTime().toEpochSecond(), bucketSpanSecs); + conditions.add(RuleCondition.createTime(Operator.LT, bucketEndTime)); + + DetectionRule.Builder builder = new DetectionRule.Builder(conditions); + builder.setActions(RuleAction.FILTER_RESULTS, RuleAction.SKIP_SAMPLING); + builder.setConditionsConnective(Connective.AND); + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(description); + out.writeVLong(startTime.toInstant().toEpochMilli()); + out.writeVLong(endTime.toInstant().toEpochMilli()); + out.writeString(calendarId); + out.writeOptionalString(eventId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DESCRIPTION.getPreferredName(), description); + builder.timeField(START_TIME.getPreferredName(), START_TIME.getPreferredName() + "_string", startTime.toInstant().toEpochMilli()); + builder.timeField(END_TIME.getPreferredName(), END_TIME.getPreferredName() + "_string", endTime.toInstant().toEpochMilli()); + builder.field(Calendar.ID.getPreferredName(), calendarId); + if (eventId != null) { + builder.field(EVENT_ID.getPreferredName(), eventId); + } + if (params.paramAsBoolean(MlMetaIndex.INCLUDE_TYPE_KEY, false)) { + builder.field(TYPE.getPreferredName(), SCHEDULED_EVENT_TYPE); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + + if (!(obj instanceof ScheduledEvent)) { + return false; + } + + ScheduledEvent other = (ScheduledEvent) obj; + // In Java 8 the tests pass with ZonedDateTime.isEquals() or ZonedDateTime.toInstant.equals() + // but in Java 9 & 10 the same tests fail. + // Both isEquals() and toInstant.equals() work the same; convert to epoch seconds and + // compare seconds and nanos are equal. For some reason the nanos are different in Java 9 & 10. + // It's sufficient to compare just the epoch seconds for the purpose of establishing equality + // which only occurs in testing. + // Note ZonedDataTime.equals() fails because the time zone and date-time must be the same + // which isn't the case in tests where the time zone is randomised. + return description.equals(other.description) + && Objects.equals(startTime.toInstant().getEpochSecond(), other.startTime.toInstant().getEpochSecond()) + && Objects.equals(endTime.toInstant().getEpochSecond(), other.endTime.toInstant().getEpochSecond()) + && calendarId.equals(other.calendarId); + } + + @Override + public int hashCode() { + return Objects.hash(description, startTime, endTime, calendarId); + } + + public static class Builder { + private String description; + private ZonedDateTime startTime; + private ZonedDateTime endTime; + private String calendarId; + private String eventId; + + public Builder description(String description) { + this.description = description; + return this; + } + + public Builder startTime(ZonedDateTime startTime) { + this.startTime = startTime; + return this; + } + + public Builder endTime(ZonedDateTime endTime) { + this.endTime = endTime; + return this; + } + + public Builder calendarId(String calendarId) { + this.calendarId = calendarId; + return this; + } + + public String getCalendarId() { + return calendarId; + } + + public Builder eventId(String eventId) { + this.eventId = eventId; + return this; + } + + public ScheduledEvent build() { + if (description == null) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.FIELD_CANNOT_BE_NULL, DESCRIPTION.getPreferredName())); + } + + if (startTime == null) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.FIELD_CANNOT_BE_NULL, START_TIME.getPreferredName())); + } + + if (endTime == null) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.FIELD_CANNOT_BE_NULL, END_TIME.getPreferredName())); + } + + if (calendarId == null) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.FIELD_CANNOT_BE_NULL, Calendar.ID.getPreferredName())); + } + + if (startTime.isBefore(endTime) == false) { + throw ExceptionsHelper.badRequestException("Event start time [" + startTime + + "] must come before end time [" + endTime + "]"); + } + + ScheduledEvent event = new ScheduledEvent(description, startTime, endTime, calendarId, eventId); + + return event; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/client/MachineLearningClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/client/MachineLearningClient.java new file mode 100644 index 0000000000000..3308d6891e2c4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/client/MachineLearningClient.java @@ -0,0 +1,384 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.client; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.ml.action.CloseJobAction; +import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction; +import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; +import org.elasticsearch.xpack.core.ml.action.DeleteModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.action.FlushJobAction; +import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; +import org.elasticsearch.xpack.core.ml.action.GetCategoriesAction; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; +import org.elasticsearch.xpack.core.ml.action.GetFiltersAction; +import org.elasticsearch.xpack.core.ml.action.GetInfluencersAction; +import org.elasticsearch.xpack.core.ml.action.GetJobsAction; +import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; +import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; +import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.xpack.core.ml.action.PostDataAction; +import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.PutFilterAction; +import org.elasticsearch.xpack.core.ml.action.PutJobAction; +import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; +import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; +import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; + +public class MachineLearningClient { + + private final ElasticsearchClient client; + + public MachineLearningClient(ElasticsearchClient client) { + this.client = client; + } + + public void closeJob(CloseJobAction.Request request, + ActionListener listener) { + client.execute(CloseJobAction.INSTANCE, request, listener); + } + + public ActionFuture closeJob(CloseJobAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(CloseJobAction.INSTANCE, request, listener); + return listener; + } + + public void deleteDatafeed(DeleteDatafeedAction.Request request, + ActionListener listener) { + client.execute(DeleteDatafeedAction.INSTANCE, request, listener); + } + + public ActionFuture deleteDatafeed( + DeleteDatafeedAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(DeleteDatafeedAction.INSTANCE, request, listener); + return listener; + } + + public void deleteFilter(DeleteFilterAction.Request request, + ActionListener listener) { + client.execute(DeleteFilterAction.INSTANCE, request, listener); + } + + public ActionFuture deleteFilter( + DeleteFilterAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(DeleteFilterAction.INSTANCE, request, listener); + return listener; + } + + public void deleteJob(DeleteJobAction.Request request, + ActionListener listener) { + client.execute(DeleteJobAction.INSTANCE, request, listener); + } + + public ActionFuture deleteJob(DeleteJobAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(DeleteJobAction.INSTANCE, request, listener); + return listener; + } + + public void deleteModelSnapshot(DeleteModelSnapshotAction.Request request, + ActionListener listener) { + client.execute(DeleteModelSnapshotAction.INSTANCE, request, listener); + } + + public ActionFuture deleteModelSnapshot( + DeleteModelSnapshotAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(DeleteModelSnapshotAction.INSTANCE, request, listener); + return listener; + } + + public void flushJob(FlushJobAction.Request request, + ActionListener listener) { + client.execute(FlushJobAction.INSTANCE, request, listener); + } + + public ActionFuture flushJob(FlushJobAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(FlushJobAction.INSTANCE, request, listener); + return listener; + } + + public void getBuckets(GetBucketsAction.Request request, + ActionListener listener) { + client.execute(GetBucketsAction.INSTANCE, request, listener); + } + + public ActionFuture getBuckets(GetBucketsAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(GetBucketsAction.INSTANCE, request, listener); + return listener; + } + + public void getCategories(GetCategoriesAction.Request request, + ActionListener listener) { + client.execute(GetCategoriesAction.INSTANCE, request, listener); + } + + public ActionFuture getCategories( + GetCategoriesAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(GetCategoriesAction.INSTANCE, request, listener); + return listener; + } + + public void getDatafeeds(GetDatafeedsAction.Request request, + ActionListener listener) { + client.execute(GetDatafeedsAction.INSTANCE, request, listener); + } + + public ActionFuture getDatafeeds( + GetDatafeedsAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(GetDatafeedsAction.INSTANCE, request, listener); + return listener; + } + + public void getDatafeedsStats(GetDatafeedsStatsAction.Request request, + ActionListener listener) { + client.execute(GetDatafeedsStatsAction.INSTANCE, request, listener); + } + + public ActionFuture getDatafeedsStats( + GetDatafeedsStatsAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(GetDatafeedsStatsAction.INSTANCE, request, listener); + return listener; + } + + public void getFilters(GetFiltersAction.Request request, + ActionListener listener) { + client.execute(GetFiltersAction.INSTANCE, request, listener); + } + + public ActionFuture getFilters(GetFiltersAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(GetFiltersAction.INSTANCE, request, listener); + return listener; + } + + public void getInfluencers(GetInfluencersAction.Request request, + ActionListener listener) { + client.execute(GetInfluencersAction.INSTANCE, request, listener); + } + + public ActionFuture getInfluencers( + GetInfluencersAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(GetInfluencersAction.INSTANCE, request, listener); + return listener; + } + + public void getJobs(GetJobsAction.Request request, + ActionListener listener) { + client.execute(GetJobsAction.INSTANCE, request, listener); + } + + public ActionFuture getJobs(GetJobsAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(GetJobsAction.INSTANCE, request, listener); + return listener; + } + + public void getJobsStats(GetJobsStatsAction.Request request, + ActionListener listener) { + client.execute(GetJobsStatsAction.INSTANCE, request, listener); + } + + public ActionFuture getJobsStats( + GetJobsStatsAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(GetJobsStatsAction.INSTANCE, request, listener); + return listener; + } + + public void getModelSnapshots(GetModelSnapshotsAction.Request request, + ActionListener listener) { + client.execute(GetModelSnapshotsAction.INSTANCE, request, listener); + } + + public ActionFuture getModelSnapshots( + GetModelSnapshotsAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(GetModelSnapshotsAction.INSTANCE, request, listener); + return listener; + } + + public void getRecords(GetRecordsAction.Request request, + ActionListener listener) { + client.execute(GetRecordsAction.INSTANCE, request, listener); + } + + public ActionFuture getRecords(GetRecordsAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(GetRecordsAction.INSTANCE, request, listener); + return listener; + } + + public void openJob(OpenJobAction.Request request, + ActionListener listener) { + client.execute(OpenJobAction.INSTANCE, request, listener); + } + + public ActionFuture openJob(OpenJobAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(OpenJobAction.INSTANCE, request, listener); + return listener; + } + + public void postData(PostDataAction.Request request, + ActionListener listener) { + client.execute(PostDataAction.INSTANCE, request, listener); + } + + public ActionFuture postData(PostDataAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(PostDataAction.INSTANCE, request, listener); + return listener; + } + + public void putDatafeed(PutDatafeedAction.Request request, + ActionListener listener) { + client.execute(PutDatafeedAction.INSTANCE, request, listener); + } + + public ActionFuture putDatafeed(PutDatafeedAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(PutDatafeedAction.INSTANCE, request, listener); + return listener; + } + + public void putFilter(PutFilterAction.Request request, + ActionListener listener) { + client.execute(PutFilterAction.INSTANCE, request, listener); + } + + public ActionFuture putFilter(PutFilterAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(PutFilterAction.INSTANCE, request, listener); + return listener; + } + + public void putJob(PutJobAction.Request request, + ActionListener listener) { + client.execute(PutJobAction.INSTANCE, request, listener); + } + + public ActionFuture putJob(PutJobAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(PutJobAction.INSTANCE, request, listener); + return listener; + } + + public void revertModelSnapshot(RevertModelSnapshotAction.Request request, + ActionListener listener) { + client.execute(RevertModelSnapshotAction.INSTANCE, request, listener); + } + + public ActionFuture revertModelSnapshot( + RevertModelSnapshotAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(RevertModelSnapshotAction.INSTANCE, request, listener); + return listener; + } + + public void startDatafeed(StartDatafeedAction.Request request, + ActionListener listener) { + client.execute(StartDatafeedAction.INSTANCE, request, listener); + } + + public ActionFuture startDatafeed( + StartDatafeedAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(StartDatafeedAction.INSTANCE, request, listener); + return listener; + } + + public void stopDatafeed(StopDatafeedAction.Request request, + ActionListener listener) { + client.execute(StopDatafeedAction.INSTANCE, request, listener); + } + + public ActionFuture stopDatafeed( + StopDatafeedAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(StopDatafeedAction.INSTANCE, request, listener); + return listener; + } + + public void updateDatafeed(UpdateDatafeedAction.Request request, + ActionListener listener) { + client.execute(UpdateDatafeedAction.INSTANCE, request, listener); + } + + public ActionFuture updateDatafeed( + UpdateDatafeedAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(UpdateDatafeedAction.INSTANCE, request, listener); + return listener; + } + + public void updateJob(UpdateJobAction.Request request, + ActionListener listener) { + client.execute(UpdateJobAction.INSTANCE, request, listener); + } + + public ActionFuture updateJob(UpdateJobAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(UpdateJobAction.INSTANCE, request, listener); + return listener; + } + + public void updateModelSnapshot(UpdateModelSnapshotAction.Request request, + ActionListener listener) { + client.execute(UpdateModelSnapshotAction.INSTANCE, request, listener); + } + + public ActionFuture updateModelSnapshot( + UpdateModelSnapshotAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(UpdateModelSnapshotAction.INSTANCE, request, listener); + return listener; + } + + public void validateDetector(ValidateDetectorAction.Request request, + ActionListener listener) { + client.execute(ValidateDetectorAction.INSTANCE, request, listener); + } + + public ActionFuture validateDetector( + ValidateDetectorAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(ValidateDetectorAction.INSTANCE, request, listener); + return listener; + } + + public void validateJobConfig(ValidateJobConfigAction.Request request, + ActionListener listener) { + client.execute(ValidateJobConfigAction.INSTANCE, request, listener); + } + + public ActionFuture validateJobConfig( + ValidateJobConfigAction.Request request) { + PlainActionFuture listener = PlainActionFuture.newFuture(); + client.execute(ValidateJobConfigAction.INSTANCE, request, listener); + return listener; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/ChunkingConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/ChunkingConfig.java new file mode 100644 index 0000000000000..43adcf4d63afa --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/ChunkingConfig.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.MlParserType; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.EnumMap; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +/** + * The description of how searches should be chunked. + */ +public class ChunkingConfig implements ToXContentObject, Writeable { + + public static final ParseField MODE_FIELD = new ParseField("mode"); + public static final ParseField TIME_SPAN_FIELD = new ParseField("time_span"); + + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly + public static final ConstructingObjectParser METADATA_PARSER = new ConstructingObjectParser<>( + "chunking_config", true, a -> new ChunkingConfig((Mode) a[0], (TimeValue) a[1])); + public static final ConstructingObjectParser CONFIG_PARSER = new ConstructingObjectParser<>( + "chunking_config", false, a -> new ChunkingConfig((Mode) a[0], (TimeValue) a[1])); + public static final Map> PARSERS = + new EnumMap<>(MlParserType.class); + + static { + PARSERS.put(MlParserType.METADATA, METADATA_PARSER); + PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER); + for (MlParserType parserType : MlParserType.values()) { + ConstructingObjectParser parser = PARSERS.get(parserType); + assert parser != null; + parser.declareField(ConstructingObjectParser.constructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return Mode.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, MODE_FIELD, ValueType.STRING); + parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return TimeValue.parseTimeValue(p.text(), TIME_SPAN_FIELD.getPreferredName()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, TIME_SPAN_FIELD, ValueType.STRING); + } + } + + private final Mode mode; + private final TimeValue timeSpan; + + public ChunkingConfig(StreamInput in) throws IOException { + mode = Mode.readFromStream(in); + timeSpan = in.readOptionalTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + mode.writeTo(out); + out.writeOptionalTimeValue(timeSpan); + } + + ChunkingConfig(Mode mode, @Nullable TimeValue timeSpan) { + this.mode = ExceptionsHelper.requireNonNull(mode, MODE_FIELD.getPreferredName()); + this.timeSpan = timeSpan; + if (mode == Mode.MANUAL) { + if (timeSpan == null) { + throw new IllegalArgumentException("when chunk mode is manual time_span is required"); + } + if (timeSpan.getMillis() <= 0) { + throw new IllegalArgumentException("chunk time_span has to be positive"); + } + } else { + if (timeSpan != null) { + throw new IllegalArgumentException("chunk time_span may only be set when mode is manual"); + } + } + } + + @Nullable + public TimeValue getTimeSpan() { + return timeSpan; + } + + public boolean isEnabled() { + return mode != Mode.OFF; + } + + Mode getMode() { + return mode; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MODE_FIELD.getPreferredName(), mode); + if (timeSpan != null) { + builder.field(TIME_SPAN_FIELD.getPreferredName(), timeSpan.getStringRep()); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(mode, timeSpan); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + ChunkingConfig other = (ChunkingConfig) obj; + return Objects.equals(this.mode, other.mode) && + Objects.equals(this.timeSpan, other.timeSpan); + } + + public static ChunkingConfig newAuto() { + return new ChunkingConfig(Mode.AUTO, null); + } + + public static ChunkingConfig newOff() { + return new ChunkingConfig(Mode.OFF, null); + } + + public static ChunkingConfig newManual(TimeValue timeSpan) { + return new ChunkingConfig(Mode.MANUAL, timeSpan); + } + + public enum Mode implements Writeable { + AUTO, MANUAL, OFF; + + public static Mode fromString(String value) { + return Mode.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static Mode readFromStream(StreamInput in) throws IOException { + return in.readEnum(Mode.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java new file mode 100644 index 0000000000000..d06c911e13cfe --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -0,0 +1,632 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.ml.MlParserType; +import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.MlStrings; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +/** + * Datafeed configuration options. Describes where to proactively pull input + * data from. + *

+ * If a value has not been set it will be null. Object wrappers are + * used around integral types and booleans so they can take null + * values. + */ +public class DatafeedConfig extends AbstractDiffable implements ToXContentObject { + + public static final int DEFAULT_SCROLL_SIZE = 1000; + + private static final int SECONDS_IN_MINUTE = 60; + private static final int TWO_MINS_SECONDS = 2 * SECONDS_IN_MINUTE; + private static final int TWENTY_MINS_SECONDS = 20 * SECONDS_IN_MINUTE; + private static final int HALF_DAY_SECONDS = 12 * 60 * SECONDS_IN_MINUTE; + + // Used for QueryPage + public static final ParseField RESULTS_FIELD = new ParseField("datafeeds"); + + /** + * The field name used to specify document counts in Elasticsearch + * aggregations + */ + public static final String DOC_COUNT = "doc_count"; + + public static final ParseField ID = new ParseField("datafeed_id"); + public static final ParseField QUERY_DELAY = new ParseField("query_delay"); + public static final ParseField FREQUENCY = new ParseField("frequency"); + public static final ParseField INDEXES = new ParseField("indexes"); + public static final ParseField INDICES = new ParseField("indices"); + public static final ParseField TYPES = new ParseField("types"); + public static final ParseField QUERY = new ParseField("query"); + public static final ParseField SCROLL_SIZE = new ParseField("scroll_size"); + public static final ParseField AGGREGATIONS = new ParseField("aggregations"); + public static final ParseField AGGS = new ParseField("aggs"); + public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields"); + public static final ParseField SOURCE = new ParseField("_source"); + public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config"); + public static final ParseField HEADERS = new ParseField("headers"); + + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly + public static final ObjectParser METADATA_PARSER = new ObjectParser<>("datafeed_config", true, Builder::new); + public static final ObjectParser CONFIG_PARSER = new ObjectParser<>("datafeed_config", false, Builder::new); + public static final Map> PARSERS = new EnumMap<>(MlParserType.class); + + static { + PARSERS.put(MlParserType.METADATA, METADATA_PARSER); + PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER); + for (MlParserType parserType : MlParserType.values()) { + ObjectParser parser = PARSERS.get(parserType); + assert parser != null; + parser.declareString(Builder::setId, ID); + parser.declareString(Builder::setJobId, Job.ID); + parser.declareStringArray(Builder::setIndices, INDEXES); + parser.declareStringArray(Builder::setIndices, INDICES); + parser.declareStringArray(Builder::setTypes, TYPES); + parser.declareString((builder, val) -> + builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY); + parser.declareString((builder, val) -> + builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY); + parser.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY); + parser.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS); + parser.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS); + parser.declareObject(Builder::setScriptFields, (p, c) -> { + List parsedScriptFields = new ArrayList<>(); + while (p.nextToken() != XContentParser.Token.END_OBJECT) { + parsedScriptFields.add(new SearchSourceBuilder.ScriptField(p)); + } + parsedScriptFields.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName)); + return parsedScriptFields; + }, SCRIPT_FIELDS); + parser.declareInt(Builder::setScrollSize, SCROLL_SIZE); + // TODO this is to read former _source field. Remove in v7.0.0 + parser.declareBoolean((builder, value) -> {}, SOURCE); + parser.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSERS.get(parserType), CHUNKING_CONFIG); + } + // Headers are only parsed by the metadata parser, so headers supplied in the _body_ of a REST request will be rejected. + // (For config headers are explicitly transferred from the auth headers by code in the put/update datafeed actions.) + METADATA_PARSER.declareObject(Builder::setHeaders, (p, c) -> p.mapStrings(), HEADERS); + } + + private final String id; + private final String jobId; + + /** + * The delay before starting to query a period of time + */ + private final TimeValue queryDelay; + + /** + * The frequency with which queries are executed + */ + private final TimeValue frequency; + + private final List indices; + private final List types; + private final QueryBuilder query; + private final AggregatorFactories.Builder aggregations; + private final List scriptFields; + private final Integer scrollSize; + private final ChunkingConfig chunkingConfig; + private final Map headers; + + private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, + QueryBuilder query, AggregatorFactories.Builder aggregations, List scriptFields, + Integer scrollSize, ChunkingConfig chunkingConfig, Map headers) { + this.id = id; + this.jobId = jobId; + this.queryDelay = queryDelay; + this.frequency = frequency; + this.indices = indices; + this.types = types; + this.query = query; + this.aggregations = aggregations; + this.scriptFields = scriptFields; + this.scrollSize = scrollSize; + this.chunkingConfig = chunkingConfig; + this.headers = Objects.requireNonNull(headers); + } + + public DatafeedConfig(StreamInput in) throws IOException { + this.id = in.readString(); + this.jobId = in.readString(); + this.queryDelay = in.readOptionalTimeValue(); + this.frequency = in.readOptionalTimeValue(); + if (in.readBoolean()) { + this.indices = in.readList(StreamInput::readString); + } else { + this.indices = null; + } + if (in.readBoolean()) { + this.types = in.readList(StreamInput::readString); + } else { + this.types = null; + } + this.query = in.readNamedWriteable(QueryBuilder.class); + this.aggregations = in.readOptionalWriteable(AggregatorFactories.Builder::new); + if (in.readBoolean()) { + this.scriptFields = in.readList(SearchSourceBuilder.ScriptField::new); + } else { + this.scriptFields = null; + } + this.scrollSize = in.readOptionalVInt(); + if (in.getVersion().before(Version.V_5_5_0)) { + // read former _source field + in.readBoolean(); + } + this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { + this.headers = in.readMap(StreamInput::readString, StreamInput::readString); + } else { + this.headers = Collections.emptyMap(); + } + } + + public String getId() { + return id; + } + + public String getJobId() { + return jobId; + } + + public TimeValue getQueryDelay() { + return queryDelay; + } + + public TimeValue getFrequency() { + return frequency; + } + + public List getIndices() { + return indices; + } + + public List getTypes() { + return types; + } + + public Integer getScrollSize() { + return scrollSize; + } + + public QueryBuilder getQuery() { + return query; + } + + public AggregatorFactories.Builder getAggregations() { + return aggregations; + } + + /** + * Returns the histogram's interval as epoch millis. + */ + public long getHistogramIntervalMillis() { + return ExtractorUtils.getHistogramIntervalMillis(aggregations); + } + + /** + * @return {@code true} when there are non-empty aggregations, {@code false} otherwise + */ + public boolean hasAggregations() { + return aggregations != null && aggregations.count() > 0; + } + + public List getScriptFields() { + return scriptFields == null ? Collections.emptyList() : scriptFields; + } + + public ChunkingConfig getChunkingConfig() { + return chunkingConfig; + } + + public Map getHeaders() { + return headers; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + out.writeString(jobId); + out.writeOptionalTimeValue(queryDelay); + out.writeOptionalTimeValue(frequency); + if (indices != null) { + out.writeBoolean(true); + out.writeStringList(indices); + } else { + out.writeBoolean(false); + } + if (types != null) { + out.writeBoolean(true); + out.writeStringList(types); + } else { + out.writeBoolean(false); + } + out.writeNamedWriteable(query); + out.writeOptionalWriteable(aggregations); + if (scriptFields != null) { + out.writeBoolean(true); + out.writeList(scriptFields); + } else { + out.writeBoolean(false); + } + out.writeOptionalVInt(scrollSize); + if (out.getVersion().before(Version.V_5_5_0)) { + // write former _source field + out.writeBoolean(false); + } + out.writeOptionalWriteable(chunkingConfig); + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { + out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + builder.field(ID.getPreferredName(), id); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(QUERY_DELAY.getPreferredName(), queryDelay.getStringRep()); + if (frequency != null) { + builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep()); + } + builder.field(INDICES.getPreferredName(), indices); + builder.field(TYPES.getPreferredName(), types); + builder.field(QUERY.getPreferredName(), query); + if (aggregations != null) { + builder.field(AGGREGATIONS.getPreferredName(), aggregations); + } + if (scriptFields != null) { + builder.startObject(SCRIPT_FIELDS.getPreferredName()); + for (SearchSourceBuilder.ScriptField scriptField : scriptFields) { + scriptField.toXContent(builder, params); + } + builder.endObject(); + } + builder.field(SCROLL_SIZE.getPreferredName(), scrollSize); + if (chunkingConfig != null) { + builder.field(CHUNKING_CONFIG.getPreferredName(), chunkingConfig); + } + if (headers.isEmpty() == false && params.paramAsBoolean(ToXContentParams.FOR_CLUSTER_STATE, false) == true) { + builder.field(HEADERS.getPreferredName(), headers); + } + return builder; + } + + /** + * The lists of indices and types are compared for equality but they are not + * sorted first so this test could fail simply because the indices and types + * lists are in different orders. + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof DatafeedConfig == false) { + return false; + } + + DatafeedConfig that = (DatafeedConfig) other; + + return Objects.equals(this.id, that.id) + && Objects.equals(this.jobId, that.jobId) + && Objects.equals(this.frequency, that.frequency) + && Objects.equals(this.queryDelay, that.queryDelay) + && Objects.equals(this.indices, that.indices) + && Objects.equals(this.types, that.types) + && Objects.equals(this.query, that.query) + && Objects.equals(this.scrollSize, that.scrollSize) + && Objects.equals(this.aggregations, that.aggregations) + && Objects.equals(this.scriptFields, that.scriptFields) + && Objects.equals(this.chunkingConfig, that.chunkingConfig) + && Objects.equals(this.headers, that.headers); + } + + @Override + public int hashCode() { + return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields, + chunkingConfig, headers); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + /** + * Calculates a sensible default frequency for a given bucket span. + *

+ * The default depends on the bucket span: + *

    + *
  • <= 2 mins -> 1 min
  • + *
  • <= 20 mins -> bucket span / 2
  • + *
  • <= 12 hours -> 10 mins
  • + *
  • > 12 hours -> 1 hour
  • + *
+ * + * If the datafeed has aggregations, the default frequency is the + * closest multiple of the histogram interval based on the rules above. + * + * @param bucketSpan the bucket span + * @return the default frequency + */ + public TimeValue defaultFrequency(TimeValue bucketSpan) { + TimeValue defaultFrequency = defaultFrequencyTarget(bucketSpan); + if (hasAggregations()) { + long histogramIntervalMillis = getHistogramIntervalMillis(); + long targetFrequencyMillis = defaultFrequency.millis(); + long defaultFrequencyMillis = histogramIntervalMillis > targetFrequencyMillis ? histogramIntervalMillis + : (targetFrequencyMillis / histogramIntervalMillis) * histogramIntervalMillis; + defaultFrequency = TimeValue.timeValueMillis(defaultFrequencyMillis); + } + return defaultFrequency; + } + + private TimeValue defaultFrequencyTarget(TimeValue bucketSpan) { + long bucketSpanSeconds = bucketSpan.seconds(); + if (bucketSpanSeconds <= 0) { + throw new IllegalArgumentException("Bucket span has to be > 0"); + } + + if (bucketSpanSeconds <= TWO_MINS_SECONDS) { + return TimeValue.timeValueSeconds(SECONDS_IN_MINUTE); + } + if (bucketSpanSeconds <= TWENTY_MINS_SECONDS) { + return TimeValue.timeValueSeconds(bucketSpanSeconds / 2); + } + if (bucketSpanSeconds <= HALF_DAY_SECONDS) { + return TimeValue.timeValueMinutes(10); + } + return TimeValue.timeValueHours(1); + } + + public static class Builder { + + private static final TimeValue MIN_DEFAULT_QUERY_DELAY = TimeValue.timeValueMinutes(1); + private static final TimeValue MAX_DEFAULT_QUERY_DELAY = TimeValue.timeValueMinutes(2); + private static final int DEFAULT_AGGREGATION_CHUNKING_BUCKETS = 1000; + + private String id; + private String jobId; + private TimeValue queryDelay; + private TimeValue frequency; + private List indices = Collections.emptyList(); + private List types = Collections.emptyList(); + private QueryBuilder query = QueryBuilders.matchAllQuery(); + private AggregatorFactories.Builder aggregations; + private List scriptFields; + private Integer scrollSize = DEFAULT_SCROLL_SIZE; + private ChunkingConfig chunkingConfig; + private Map headers = Collections.emptyMap(); + + public Builder() { + } + + public Builder(String id, String jobId) { + this(); + this.id = ExceptionsHelper.requireNonNull(id, ID.getPreferredName()); + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public Builder(DatafeedConfig config) { + this.id = config.id; + this.jobId = config.jobId; + this.queryDelay = config.queryDelay; + this.frequency = config.frequency; + this.indices = config.indices; + this.types = config.types; + this.query = config.query; + this.aggregations = config.aggregations; + this.scriptFields = config.scriptFields; + this.scrollSize = config.scrollSize; + this.chunkingConfig = config.chunkingConfig; + this.headers = config.headers; + } + + public void setId(String datafeedId) { + id = ExceptionsHelper.requireNonNull(datafeedId, ID.getPreferredName()); + } + + public void setJobId(String jobId) { + this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + } + + public void setHeaders(Map headers) { + this.headers = ExceptionsHelper.requireNonNull(headers, HEADERS.getPreferredName()); + } + + public void setIndices(List indices) { + this.indices = ExceptionsHelper.requireNonNull(indices, INDICES.getPreferredName()); + } + + public void setTypes(List types) { + this.types = ExceptionsHelper.requireNonNull(types, TYPES.getPreferredName()); + } + + public void setQueryDelay(TimeValue queryDelay) { + TimeUtils.checkNonNegativeMultiple(queryDelay, TimeUnit.MILLISECONDS, QUERY_DELAY); + this.queryDelay = queryDelay; + } + + public void setFrequency(TimeValue frequency) { + TimeUtils.checkPositiveMultiple(frequency, TimeUnit.SECONDS, FREQUENCY); + this.frequency = frequency; + } + + public void setQuery(QueryBuilder query) { + this.query = ExceptionsHelper.requireNonNull(query, QUERY.getPreferredName()); + } + + public void setAggregations(AggregatorFactories.Builder aggregations) { + this.aggregations = aggregations; + } + + public void setScriptFields(List scriptFields) { + List sorted = new ArrayList<>(); + for (SearchSourceBuilder.ScriptField scriptField : scriptFields) { + sorted.add(scriptField); + } + sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName)); + this.scriptFields = sorted; + } + + public void setScrollSize(int scrollSize) { + if (scrollSize < 0) { + String msg = Messages.getMessage(Messages.DATAFEED_CONFIG_INVALID_OPTION_VALUE, + DatafeedConfig.SCROLL_SIZE.getPreferredName(), scrollSize); + throw ExceptionsHelper.badRequestException(msg); + } + this.scrollSize = scrollSize; + } + + public void setChunkingConfig(ChunkingConfig chunkingConfig) { + this.chunkingConfig = chunkingConfig; + } + + public DatafeedConfig build() { + ExceptionsHelper.requireNonNull(id, ID.getPreferredName()); + ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); + if (!MlStrings.isValidId(id)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.INVALID_ID, ID.getPreferredName(), id)); + } + if (indices == null || indices.isEmpty() || indices.contains(null) || indices.contains("")) { + throw invalidOptionValue(INDICES.getPreferredName(), indices); + } + if (types == null || types.contains(null) || types.contains("")) { + throw invalidOptionValue(TYPES.getPreferredName(), types); + } + validateAggregations(); + setDefaultChunkingConfig(); + setDefaultQueryDelay(); + return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, + chunkingConfig, headers); + } + + void validateAggregations() { + if (aggregations == null) { + return; + } + if (scriptFields != null && !scriptFields.isEmpty()) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_CONFIG_CANNOT_USE_SCRIPT_FIELDS_WITH_AGGS)); + } + List aggregatorFactories = aggregations.getAggregatorFactories(); + if (aggregatorFactories.isEmpty()) { + throw ExceptionsHelper.badRequestException(Messages.DATAFEED_AGGREGATIONS_REQUIRES_DATE_HISTOGRAM); + } + + AggregationBuilder histogramAggregation = ExtractorUtils.getHistogramAggregation(aggregatorFactories); + checkNoMoreHistogramAggregations(histogramAggregation.getSubAggregations()); + checkHistogramAggregationHasChildMaxTimeAgg(histogramAggregation); + checkHistogramIntervalIsPositive(histogramAggregation); + } + + private static void checkNoMoreHistogramAggregations(List aggregations) { + for (AggregationBuilder agg : aggregations) { + if (ExtractorUtils.isHistogram(agg)) { + throw ExceptionsHelper.badRequestException(Messages.DATAFEED_AGGREGATIONS_MAX_ONE_DATE_HISTOGRAM); + } + checkNoMoreHistogramAggregations(agg.getSubAggregations()); + } + } + + static void checkHistogramAggregationHasChildMaxTimeAgg(AggregationBuilder histogramAggregation) { + String timeField = null; + if (histogramAggregation instanceof ValuesSourceAggregationBuilder) { + timeField = ((ValuesSourceAggregationBuilder) histogramAggregation).field(); + } + + for (AggregationBuilder agg : histogramAggregation.getSubAggregations()) { + if (agg instanceof MaxAggregationBuilder) { + MaxAggregationBuilder maxAgg = (MaxAggregationBuilder)agg; + if (maxAgg.field().equals(timeField)) { + return; + } + } + } + + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_DATA_HISTOGRAM_MUST_HAVE_NESTED_MAX_AGGREGATION, timeField)); + } + + private static void checkHistogramIntervalIsPositive(AggregationBuilder histogramAggregation) { + long interval = ExtractorUtils.getHistogramIntervalMillis(histogramAggregation); + if (interval <= 0) { + throw ExceptionsHelper.badRequestException(Messages.DATAFEED_AGGREGATIONS_INTERVAL_MUST_BE_GREATER_THAN_ZERO); + } + } + + private void setDefaultChunkingConfig() { + if (chunkingConfig == null) { + if (aggregations == null) { + chunkingConfig = ChunkingConfig.newAuto(); + } else { + long histogramIntervalMillis = ExtractorUtils.getHistogramIntervalMillis(aggregations); + chunkingConfig = ChunkingConfig.newManual(TimeValue.timeValueMillis( + DEFAULT_AGGREGATION_CHUNKING_BUCKETS * histogramIntervalMillis)); + } + } + } + + private void setDefaultQueryDelay() { + if (queryDelay == null) { + Random random = new Random(jobId.hashCode()); + long delayMillis = random.longs(MIN_DEFAULT_QUERY_DELAY.millis(), MAX_DEFAULT_QUERY_DELAY.millis()) + .findFirst().getAsLong(); + queryDelay = TimeValue.timeValueMillis(delayMillis); + } + } + + private static ElasticsearchException invalidOptionValue(String fieldName, Object value) { + String msg = Messages.getMessage(Messages.DATAFEED_CONFIG_INVALID_OPTION_VALUE, fieldName, value); + throw ExceptionsHelper.badRequestException(msg); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedJobValidator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedJobValidator.java new file mode 100644 index 0000000000000..b829b3fa44307 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedJobValidator.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +public final class DatafeedJobValidator { + + private DatafeedJobValidator() {} + + /** + * Validates a datafeedConfig in relation to the job it refers to + * @param datafeedConfig the datafeed config + * @param job the job + */ + public static void validate(DatafeedConfig datafeedConfig, Job job) { + AnalysisConfig analysisConfig = job.getAnalysisConfig(); + if (analysisConfig.getLatency() != null && analysisConfig.getLatency().seconds() > 0) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.DATAFEED_DOES_NOT_SUPPORT_JOB_WITH_LATENCY)); + } + if (datafeedConfig.hasAggregations()) { + checkSummaryCountFieldNameIsSet(analysisConfig); + checkValidHistogramInterval(datafeedConfig, analysisConfig); + checkFrequencyIsMultipleOfHistogramInterval(datafeedConfig); + } + } + + private static void checkSummaryCountFieldNameIsSet(AnalysisConfig analysisConfig) { + if (Strings.isNullOrEmpty(analysisConfig.getSummaryCountFieldName())) { + throw ExceptionsHelper.badRequestException(Messages.getMessage( + Messages.DATAFEED_AGGREGATIONS_REQUIRES_JOB_WITH_SUMMARY_COUNT_FIELD)); + } + } + + private static void checkValidHistogramInterval(DatafeedConfig datafeedConfig, AnalysisConfig analysisConfig) { + long histogramIntervalMillis = datafeedConfig.getHistogramIntervalMillis(); + long bucketSpanMillis = analysisConfig.getBucketSpan().millis(); + if (histogramIntervalMillis > bucketSpanMillis) { + throw ExceptionsHelper.badRequestException(Messages.getMessage( + Messages.DATAFEED_AGGREGATIONS_INTERVAL_MUST_LESS_OR_EQUAL_TO_BUCKET_SPAN, + TimeValue.timeValueMillis(histogramIntervalMillis).getStringRep(), + TimeValue.timeValueMillis(bucketSpanMillis).getStringRep())); + } + + if (bucketSpanMillis % histogramIntervalMillis != 0) { + throw ExceptionsHelper.badRequestException(Messages.getMessage( + Messages.DATAFEED_AGGREGATIONS_INTERVAL_MUST_BE_DIVISOR_OF_BUCKET_SPAN, + TimeValue.timeValueMillis(histogramIntervalMillis).getStringRep(), + TimeValue.timeValueMillis(bucketSpanMillis).getStringRep())); + } + } + + private static void checkFrequencyIsMultipleOfHistogramInterval(DatafeedConfig datafeedConfig) { + TimeValue frequency = datafeedConfig.getFrequency(); + if (frequency != null) { + long histogramIntervalMillis = datafeedConfig.getHistogramIntervalMillis(); + long frequencyMillis = frequency.millis(); + if (frequencyMillis % histogramIntervalMillis != 0) { + throw ExceptionsHelper.badRequestException(Messages.getMessage( + Messages.DATAFEED_FREQUENCY_MUST_BE_MULTIPLE_OF_AGGREGATIONS_INTERVAL, + frequency, TimeValue.timeValueMillis(histogramIntervalMillis).getStringRep())); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java new file mode 100644 index 0000000000000..7343600a6ee37 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; + +import java.io.IOException; +import java.util.Locale; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public enum DatafeedState implements Task.Status { + + STARTED, STOPPED, STARTING, STOPPING; + + public static final String NAME = StartDatafeedAction.TASK_NAME; + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME, args -> fromString((String) args[0])); + + static { + PARSER.declareString(constructorArg(), new ParseField("state")); + } + + public static DatafeedState fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + public static DatafeedState fromStream(StreamInput in) throws IOException { + return in.readEnum(DatafeedState.class); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + DatafeedState state = this; + // STARTING & STOPPING states were introduced in v5.5. + if (out.getVersion().before(Version.V_5_5_0)) { + if (this == STARTING) { + state = STOPPED; + } else if (this == STOPPING) { + state = STARTED; + } + } + out.writeEnum(state); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("state", name().toLowerCase(Locale.ROOT)); + builder.endObject(); + return builder; + } + + @Override + public boolean isFragment() { + return false; + } + + public static DatafeedState fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java new file mode 100644 index 0000000000000..6255be9f4383a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -0,0 +1,442 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.ml.MlClientHelper; +import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * A datafeed update contains partial properties to update a {@link DatafeedConfig}. + * The main difference between this class and {@link DatafeedConfig} is that here all + * fields are nullable. + */ +public class DatafeedUpdate implements Writeable, ToXContentObject { + + public static final ObjectParser PARSER = new ObjectParser<>("datafeed_update", Builder::new); + + static { + PARSER.declareString(Builder::setId, DatafeedConfig.ID); + PARSER.declareString(Builder::setJobId, Job.ID); + PARSER.declareStringArray(Builder::setIndices, DatafeedConfig.INDEXES); + PARSER.declareStringArray(Builder::setIndices, DatafeedConfig.INDICES); + PARSER.declareStringArray(Builder::setTypes, DatafeedConfig.TYPES); + PARSER.declareString((builder, val) -> builder.setQueryDelay( + TimeValue.parseTimeValue(val, DatafeedConfig.QUERY_DELAY.getPreferredName())), DatafeedConfig.QUERY_DELAY); + PARSER.declareString((builder, val) -> builder.setFrequency( + TimeValue.parseTimeValue(val, DatafeedConfig.FREQUENCY.getPreferredName())), DatafeedConfig.FREQUENCY); + PARSER.declareObject(Builder::setQuery, + (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), DatafeedConfig.QUERY); + PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), + DatafeedConfig.AGGREGATIONS); + PARSER.declareObject(Builder::setAggregations,(p, c) -> AggregatorFactories.parseAggregators(p), + DatafeedConfig.AGGS); + PARSER.declareObject(Builder::setScriptFields, (p, c) -> { + List parsedScriptFields = new ArrayList<>(); + while (p.nextToken() != XContentParser.Token.END_OBJECT) { + parsedScriptFields.add(new SearchSourceBuilder.ScriptField(p)); + } + parsedScriptFields.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName)); + return parsedScriptFields; + }, DatafeedConfig.SCRIPT_FIELDS); + PARSER.declareInt(Builder::setScrollSize, DatafeedConfig.SCROLL_SIZE); + PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.CONFIG_PARSER, DatafeedConfig.CHUNKING_CONFIG); + } + + private final String id; + private final String jobId; + private final TimeValue queryDelay; + private final TimeValue frequency; + private final List indices; + private final List types; + private final QueryBuilder query; + private final AggregatorFactories.Builder aggregations; + private final List scriptFields; + private final Integer scrollSize; + private final ChunkingConfig chunkingConfig; + + private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, + QueryBuilder query, AggregatorFactories.Builder aggregations, List scriptFields, + Integer scrollSize, ChunkingConfig chunkingConfig) { + this.id = id; + this.jobId = jobId; + this.queryDelay = queryDelay; + this.frequency = frequency; + this.indices = indices; + this.types = types; + this.query = query; + this.aggregations = aggregations; + this.scriptFields = scriptFields; + this.scrollSize = scrollSize; + this.chunkingConfig = chunkingConfig; + } + + public DatafeedUpdate(StreamInput in) throws IOException { + this.id = in.readString(); + this.jobId = in.readOptionalString(); + this.queryDelay = in.readOptionalTimeValue(); + this.frequency = in.readOptionalTimeValue(); + if (in.readBoolean()) { + this.indices = in.readList(StreamInput::readString); + } else { + this.indices = null; + } + if (in.readBoolean()) { + this.types = in.readList(StreamInput::readString); + } else { + this.types = null; + } + this.query = in.readOptionalNamedWriteable(QueryBuilder.class); + this.aggregations = in.readOptionalWriteable(AggregatorFactories.Builder::new); + if (in.readBoolean()) { + this.scriptFields = in.readList(SearchSourceBuilder.ScriptField::new); + } else { + this.scriptFields = null; + } + this.scrollSize = in.readOptionalVInt(); + if (in.getVersion().before(Version.V_5_5_0)) { + // TODO for former _source param - remove in v7.0.0 + in.readOptionalBoolean(); + } + this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new); + } + + /** + * Get the id of the datafeed to update + */ + public String getId() { + return id; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + out.writeOptionalString(jobId); + out.writeOptionalTimeValue(queryDelay); + out.writeOptionalTimeValue(frequency); + if (indices != null) { + out.writeBoolean(true); + out.writeStringList(indices); + } else { + out.writeBoolean(false); + } + if (types != null) { + out.writeBoolean(true); + out.writeStringList(types); + } else { + out.writeBoolean(false); + } + out.writeOptionalNamedWriteable(query); + out.writeOptionalWriteable(aggregations); + if (scriptFields != null) { + out.writeBoolean(true); + out.writeList(scriptFields); + } else { + out.writeBoolean(false); + } + out.writeOptionalVInt(scrollSize); + if (out.getVersion().before(Version.V_5_5_0)) { + // TODO for former _source param - remove in v7.0.0 + out.writeOptionalBoolean(null); + } + out.writeOptionalWriteable(chunkingConfig); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DatafeedConfig.ID.getPreferredName(), id); + addOptionalField(builder, Job.ID, jobId); + if (queryDelay != null) { + builder.field(DatafeedConfig.QUERY_DELAY.getPreferredName(), queryDelay.getStringRep()); + } + if (frequency != null) { + builder.field(DatafeedConfig.FREQUENCY.getPreferredName(), frequency.getStringRep()); + } + addOptionalField(builder, DatafeedConfig.INDICES, indices); + addOptionalField(builder, DatafeedConfig.TYPES, types); + addOptionalField(builder, DatafeedConfig.QUERY, query); + addOptionalField(builder, DatafeedConfig.AGGREGATIONS, aggregations); + if (scriptFields != null) { + builder.startObject(DatafeedConfig.SCRIPT_FIELDS.getPreferredName()); + for (SearchSourceBuilder.ScriptField scriptField : scriptFields) { + scriptField.toXContent(builder, params); + } + builder.endObject(); + } + addOptionalField(builder, DatafeedConfig.SCROLL_SIZE, scrollSize); + addOptionalField(builder, DatafeedConfig.CHUNKING_CONFIG, chunkingConfig); + builder.endObject(); + return builder; + } + + private void addOptionalField(XContentBuilder builder, ParseField field, Object value) throws IOException { + if (value != null) { + builder.field(field.getPreferredName(), value); + } + } + + String getJobId() { + return jobId; + } + + TimeValue getQueryDelay() { + return queryDelay; + } + + TimeValue getFrequency() { + return frequency; + } + + List getIndices() { + return indices; + } + + List getTypes() { + return types; + } + + Integer getScrollSize() { + return scrollSize; + } + + QueryBuilder getQuery() { + return query; + } + + AggregatorFactories.Builder getAggregations() { + return aggregations; + } + + /** + * Returns the histogram's interval as epoch millis. + */ + long getHistogramIntervalMillis() { + return ExtractorUtils.getHistogramIntervalMillis(aggregations); + } + + /** + * @return {@code true} when there are non-empty aggregations, {@code false} + * otherwise + */ + boolean hasAggregations() { + return aggregations != null && aggregations.count() > 0; + } + + List getScriptFields() { + return scriptFields == null ? Collections.emptyList() : scriptFields; + } + + ChunkingConfig getChunkingConfig() { + return chunkingConfig; + } + + /** + * Applies the update to the given {@link DatafeedConfig} + * @return a new {@link DatafeedConfig} that contains the update + */ + public DatafeedConfig apply(DatafeedConfig datafeedConfig, ThreadContext threadContext) { + if (id.equals(datafeedConfig.getId()) == false) { + throw new IllegalArgumentException("Cannot apply update to datafeedConfig with different id"); + } + + DatafeedConfig.Builder builder = new DatafeedConfig.Builder(datafeedConfig); + if (jobId != null) { + builder.setJobId(jobId); + } + if (queryDelay != null) { + builder.setQueryDelay(queryDelay); + } + if (frequency != null) { + builder.setFrequency(frequency); + } + if (indices != null) { + builder.setIndices(indices); + } + if (types != null) { + builder.setTypes(types); + } + if (query != null) { + builder.setQuery(query); + } + if (aggregations != null) { + builder.setAggregations(aggregations); + } + if (scriptFields != null) { + builder.setScriptFields(scriptFields); + } + if (scrollSize != null) { + builder.setScrollSize(scrollSize); + } + if (chunkingConfig != null) { + builder.setChunkingConfig(chunkingConfig); + } + + if (threadContext != null) { + // Adjust the request, adding security headers from the current thread context + Map headers = threadContext.getHeaders().entrySet().stream() + .filter(e -> MlClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + builder.setHeaders(headers); + } + + return builder.build(); + } + + /** + * The lists of indices and types are compared for equality but they are not + * sorted first so this test could fail simply because the indices and types + * lists are in different orders. + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof DatafeedUpdate == false) { + return false; + } + + DatafeedUpdate that = (DatafeedUpdate) other; + + return Objects.equals(this.id, that.id) + && Objects.equals(this.jobId, that.jobId) + && Objects.equals(this.frequency, that.frequency) + && Objects.equals(this.queryDelay, that.queryDelay) + && Objects.equals(this.indices, that.indices) + && Objects.equals(this.types, that.types) + && Objects.equals(this.query, that.query) + && Objects.equals(this.scrollSize, that.scrollSize) + && Objects.equals(this.aggregations, that.aggregations) + && Objects.equals(this.scriptFields, that.scriptFields) + && Objects.equals(this.chunkingConfig, that.chunkingConfig); + } + + @Override + public int hashCode() { + return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields, + chunkingConfig); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public static class Builder { + + private String id; + private String jobId; + private TimeValue queryDelay; + private TimeValue frequency; + private List indices; + private List types; + private QueryBuilder query; + private AggregatorFactories.Builder aggregations; + private List scriptFields; + private Integer scrollSize; + private ChunkingConfig chunkingConfig; + + public Builder() { + } + + public Builder(String id) { + this.id = ExceptionsHelper.requireNonNull(id, DatafeedConfig.ID.getPreferredName()); + } + + public Builder(DatafeedUpdate config) { + this.id = config.id; + this.jobId = config.jobId; + this.queryDelay = config.queryDelay; + this.frequency = config.frequency; + this.indices = config.indices; + this.types = config.types; + this.query = config.query; + this.aggregations = config.aggregations; + this.scriptFields = config.scriptFields; + this.scrollSize = config.scrollSize; + this.chunkingConfig = config.chunkingConfig; + } + + public void setId(String datafeedId) { + id = ExceptionsHelper.requireNonNull(datafeedId, DatafeedConfig.ID.getPreferredName()); + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public void setIndices(List indices) { + this.indices = indices; + } + + public void setTypes(List types) { + this.types = types; + } + + public void setQueryDelay(TimeValue queryDelay) { + this.queryDelay = queryDelay; + } + + public void setFrequency(TimeValue frequency) { + this.frequency = frequency; + } + + public void setQuery(QueryBuilder query) { + this.query = query; + } + + public void setAggregations(AggregatorFactories.Builder aggregations) { + this.aggregations = aggregations; + } + + public void setScriptFields(List scriptFields) { + List sorted = new ArrayList<>(scriptFields); + sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName)); + this.scriptFields = sorted; + } + + public void setScrollSize(int scrollSize) { + this.scrollSize = scrollSize; + } + + public void setChunkingConfig(ChunkingConfig chunkingConfig) { + this.chunkingConfig = chunkingConfig; + } + + public DatafeedUpdate build() { + return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, + chunkingConfig); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/DataExtractor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/DataExtractor.java new file mode 100644 index 0000000000000..20968b22425a2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/DataExtractor.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed.extractor; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Optional; + +public interface DataExtractor { + + /** + * @return {@code true} if the search has not finished yet, or {@code false} otherwise + */ + boolean hasNext(); + + /** + * Returns the next available extracted data. Note that it is possible for the + * extracted data to be empty the last time this method can be called. + * @return an optional input stream with the next available extracted data + * @throws IOException if an error occurs while extracting the data + */ + Optional next() throws IOException; + + /** + * @return {@code true} if the extractor has been cancelled, or {@code false} otherwise + */ + boolean isCancelled(); + + /** + * Cancel the current search. + */ + void cancel(); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java new file mode 100644 index 0000000000000..b0794adae4a69 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java @@ -0,0 +1,184 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed.extractor; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Collects common utility methods needed by various {@link DataExtractor} implementations + */ +public final class ExtractorUtils { + + private static final Logger LOGGER = Loggers.getLogger(ExtractorUtils.class); + private static final String EPOCH_MILLIS = "epoch_millis"; + + private ExtractorUtils() {} + + /** + * Combines a user query with a time range query. + */ + public static QueryBuilder wrapInTimeRangeQuery(QueryBuilder userQuery, String timeField, long start, long end) { + QueryBuilder timeQuery = new RangeQueryBuilder(timeField).gte(start).lt(end).format(EPOCH_MILLIS); + return new BoolQueryBuilder().filter(userQuery).filter(timeQuery); + } + + /** + * Checks that a {@link SearchResponse} has an OK status code and no shard failures + */ + public static void checkSearchWasSuccessful(String jobId, SearchResponse searchResponse) throws IOException { + if (searchResponse.status() != RestStatus.OK) { + throw new IOException("[" + jobId + "] Search request returned status code: " + searchResponse.status() + + ". Response was:\n" + searchResponse.toString()); + } + ShardSearchFailure[] shardFailures = searchResponse.getShardFailures(); + if (shardFailures != null && shardFailures.length > 0) { + LOGGER.error("[{}] Search request returned shard failures: {}", jobId, Arrays.toString(shardFailures)); + throw new IOException(ExceptionsHelper.shardFailuresToErrorMsg(jobId, shardFailures)); + } + int unavailableShards = searchResponse.getTotalShards() - searchResponse.getSuccessfulShards(); + if (unavailableShards > 0) { + throw new IOException("[" + jobId + "] Search request encountered [" + unavailableShards + "] unavailable shards"); + } + } + + /** + * Find the (date) histogram in {@code aggFactory} and extract its interval. + * Throws if there is no (date) histogram or if the histogram has sibling + * aggregations. + * @param aggFactory Aggregations factory + * @return The histogram interval + */ + public static long getHistogramIntervalMillis(AggregatorFactories.Builder aggFactory) { + AggregationBuilder histogram = getHistogramAggregation(aggFactory.getAggregatorFactories()); + return getHistogramIntervalMillis(histogram); + } + + /** + * Find and return (date) histogram in {@code aggregations} + * @param aggregations List of aggregations + * @return A {@link HistogramAggregationBuilder} or a {@link DateHistogramAggregationBuilder} + */ + public static AggregationBuilder getHistogramAggregation(List aggregations) { + if (aggregations.isEmpty()) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.DATAFEED_AGGREGATIONS_REQUIRES_DATE_HISTOGRAM)); + } + if (aggregations.size() != 1) { + throw ExceptionsHelper.badRequestException(Messages.DATAFEED_AGGREGATIONS_REQUIRES_DATE_HISTOGRAM_NO_SIBLINGS); + } + + AggregationBuilder agg = aggregations.get(0); + if (isHistogram(agg)) { + return agg; + } else { + return getHistogramAggregation(agg.getSubAggregations()); + } + } + + public static boolean isHistogram(AggregationBuilder aggregationBuilder) { + return aggregationBuilder instanceof HistogramAggregationBuilder + || aggregationBuilder instanceof DateHistogramAggregationBuilder; + } + + /** + * Get the interval from {@code histogramAggregation} or throw an {@code IllegalStateException} + * if {@code histogramAggregation} is not a {@link HistogramAggregationBuilder} or a + * {@link DateHistogramAggregationBuilder} + * + * @param histogramAggregation Must be a {@link HistogramAggregationBuilder} or a + * {@link DateHistogramAggregationBuilder} + * @return The histogram interval + */ + public static long getHistogramIntervalMillis(AggregationBuilder histogramAggregation) { + if (histogramAggregation instanceof HistogramAggregationBuilder) { + return (long) ((HistogramAggregationBuilder) histogramAggregation).interval(); + } else if (histogramAggregation instanceof DateHistogramAggregationBuilder) { + return validateAndGetDateHistogramInterval((DateHistogramAggregationBuilder) histogramAggregation); + } else { + throw new IllegalStateException("Invalid histogram aggregation [" + histogramAggregation.getName() + "]"); + } + } + + /** + * Returns the date histogram interval as epoch millis if valid, or throws + * an {@link ElasticsearchException} with the validation error + */ + private static long validateAndGetDateHistogramInterval(DateHistogramAggregationBuilder dateHistogram) { + if (dateHistogram.timeZone() != null && dateHistogram.timeZone().equals(DateTimeZone.UTC) == false) { + throw ExceptionsHelper.badRequestException("ML requires date_histogram.time_zone to be UTC"); + } + + if (dateHistogram.dateHistogramInterval() != null) { + return validateAndGetCalendarInterval(dateHistogram.dateHistogramInterval().toString()); + } else { + return dateHistogram.interval(); + } + } + + static long validateAndGetCalendarInterval(String calendarInterval) { + TimeValue interval; + DateTimeUnit dateTimeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(calendarInterval); + if (dateTimeUnit != null) { + switch (dateTimeUnit) { + case WEEK_OF_WEEKYEAR: + interval = new TimeValue(7, TimeUnit.DAYS); + break; + case DAY_OF_MONTH: + interval = new TimeValue(1, TimeUnit.DAYS); + break; + case HOUR_OF_DAY: + interval = new TimeValue(1, TimeUnit.HOURS); + break; + case MINUTES_OF_HOUR: + interval = new TimeValue(1, TimeUnit.MINUTES); + break; + case SECOND_OF_MINUTE: + interval = new TimeValue(1, TimeUnit.SECONDS); + break; + case MONTH_OF_YEAR: + case YEAR_OF_CENTURY: + case QUARTER: + throw ExceptionsHelper.badRequestException(invalidDateHistogramCalendarIntervalMessage(calendarInterval)); + default: + throw ExceptionsHelper.badRequestException("Unexpected dateTimeUnit [" + dateTimeUnit + "]"); + } + } else { + interval = TimeValue.parseTimeValue(calendarInterval, "date_histogram.interval"); + } + if (interval.days() > 7) { + throw ExceptionsHelper.badRequestException(invalidDateHistogramCalendarIntervalMessage(calendarInterval)); + } + return interval.millis(); + } + + private static String invalidDateHistogramCalendarIntervalMessage(String interval) { + throw ExceptionsHelper.badRequestException("When specifying a date_histogram calendar interval [" + + interval + "], ML does not accept intervals longer than a week because of " + + "variable lengths of periods greater than a week"); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java new file mode 100644 index 0000000000000..02d8b6f529327 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -0,0 +1,794 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.MlParserType; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; +import java.util.stream.Collectors; + +/** + * Autodetect analysis configuration options describes which fields are + * analysed and the functions to use. + *

+ * The configuration can contain multiple detectors, a new anomaly detector will + * be created for each detector configuration. The fields + * bucketSpan, summaryCountFieldName and categorizationFieldName + * apply to all detectors. + *

+ * If a value has not been set it will be null + * Object wrappers are used around integral types & booleans so they can take + * null values. + */ +public class AnalysisConfig implements ToXContentObject, Writeable { + /** + * Serialisation names + */ + public static final ParseField ANALYSIS_CONFIG = new ParseField("analysis_config"); + private static final ParseField BUCKET_SPAN = new ParseField("bucket_span"); + private static final ParseField CATEGORIZATION_FIELD_NAME = new ParseField("categorization_field_name"); + static final ParseField CATEGORIZATION_FILTERS = new ParseField("categorization_filters"); + private static final ParseField CATEGORIZATION_ANALYZER = CategorizationAnalyzerConfig.CATEGORIZATION_ANALYZER; + private static final ParseField LATENCY = new ParseField("latency"); + private static final ParseField SUMMARY_COUNT_FIELD_NAME = new ParseField("summary_count_field_name"); + private static final ParseField DETECTORS = new ParseField("detectors"); + private static final ParseField INFLUENCERS = new ParseField("influencers"); + private static final ParseField OVERLAPPING_BUCKETS = new ParseField("overlapping_buckets"); + private static final ParseField RESULT_FINALIZATION_WINDOW = new ParseField("result_finalization_window"); + private static final ParseField MULTIVARIATE_BY_FIELDS = new ParseField("multivariate_by_fields"); + private static final ParseField MULTIPLE_BUCKET_SPANS = new ParseField("multiple_bucket_spans"); + private static final ParseField USER_PER_PARTITION_NORMALIZATION = new ParseField("use_per_partition_normalization"); + + public static final String ML_CATEGORY_FIELD = "mlcategory"; + public static final Set AUTO_CREATED_FIELDS = new HashSet<>(Collections.singletonList(ML_CATEGORY_FIELD)); + + public static final long DEFAULT_RESULT_FINALIZATION_WINDOW = 2L; + + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser METADATA_PARSER = + new ConstructingObjectParser<>(ANALYSIS_CONFIG.getPreferredName(), true, + a -> new AnalysisConfig.Builder((List) a[0])); + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser CONFIG_PARSER = + new ConstructingObjectParser<>(ANALYSIS_CONFIG.getPreferredName(), false, + a -> new AnalysisConfig.Builder((List) a[0])); + public static final Map> PARSERS = + new EnumMap<>(MlParserType.class); + + static { + PARSERS.put(MlParserType.METADATA, METADATA_PARSER); + PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER); + for (MlParserType parserType : MlParserType.values()) { + ConstructingObjectParser parser = PARSERS.get(parserType); + assert parser != null; + parser.declareObjectArray(ConstructingObjectParser.constructorArg(), + (p, c) -> Detector.PARSERS.get(parserType).apply(p, c).build(), DETECTORS); + parser.declareString((builder, val) -> + builder.setBucketSpan(TimeValue.parseTimeValue(val, BUCKET_SPAN.getPreferredName())), BUCKET_SPAN); + parser.declareString(Builder::setCategorizationFieldName, CATEGORIZATION_FIELD_NAME); + parser.declareStringArray(Builder::setCategorizationFilters, CATEGORIZATION_FILTERS); + // This one is nasty - the syntax for analyzers takes either names or objects at many levels, hence it's not + // possible to simply declare whether the field is a string or object and a completely custom parser is required + parser.declareField(Builder::setCategorizationAnalyzerConfig, + (p, c) -> CategorizationAnalyzerConfig.buildFromXContentFragment(p, parserType), + CATEGORIZATION_ANALYZER, ObjectParser.ValueType.OBJECT_OR_STRING); + parser.declareString((builder, val) -> + builder.setLatency(TimeValue.parseTimeValue(val, LATENCY.getPreferredName())), LATENCY); + parser.declareString(Builder::setSummaryCountFieldName, SUMMARY_COUNT_FIELD_NAME); + parser.declareStringArray(Builder::setInfluencers, INFLUENCERS); + parser.declareBoolean(Builder::setOverlappingBuckets, OVERLAPPING_BUCKETS); + parser.declareLong(Builder::setResultFinalizationWindow, RESULT_FINALIZATION_WINDOW); + parser.declareBoolean(Builder::setMultivariateByFields, MULTIVARIATE_BY_FIELDS); + parser.declareStringArray((builder, values) -> builder.setMultipleBucketSpans( + values.stream().map(v -> TimeValue.parseTimeValue(v, MULTIPLE_BUCKET_SPANS.getPreferredName())) + .collect(Collectors.toList())), MULTIPLE_BUCKET_SPANS); + parser.declareBoolean(Builder::setUsePerPartitionNormalization, USER_PER_PARTITION_NORMALIZATION); + } + } + + /** + * These values apply to all detectors + */ + private final TimeValue bucketSpan; + private final String categorizationFieldName; + private final List categorizationFilters; + private final CategorizationAnalyzerConfig categorizationAnalyzerConfig; + private final TimeValue latency; + private final String summaryCountFieldName; + private final List detectors; + private final List influencers; + private final Boolean overlappingBuckets; + private final Long resultFinalizationWindow; + private final Boolean multivariateByFields; + private final List multipleBucketSpans; + private final boolean usePerPartitionNormalization; + + private AnalysisConfig(TimeValue bucketSpan, String categorizationFieldName, List categorizationFilters, + CategorizationAnalyzerConfig categorizationAnalyzerConfig, TimeValue latency, String summaryCountFieldName, + List detectors, List influencers, Boolean overlappingBuckets, Long resultFinalizationWindow, + Boolean multivariateByFields, List multipleBucketSpans, boolean usePerPartitionNormalization) { + this.detectors = detectors; + this.bucketSpan = bucketSpan; + this.latency = latency; + this.categorizationFieldName = categorizationFieldName; + this.categorizationAnalyzerConfig = categorizationAnalyzerConfig; + this.categorizationFilters = categorizationFilters; + this.summaryCountFieldName = summaryCountFieldName; + this.influencers = influencers; + this.overlappingBuckets = overlappingBuckets; + this.resultFinalizationWindow = resultFinalizationWindow; + this.multivariateByFields = multivariateByFields; + this.multipleBucketSpans = multipleBucketSpans; + this.usePerPartitionNormalization = usePerPartitionNormalization; + } + + public AnalysisConfig(StreamInput in) throws IOException { + bucketSpan = in.readTimeValue(); + categorizationFieldName = in.readOptionalString(); + categorizationFilters = in.readBoolean() ? in.readList(StreamInput::readString) : null; + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { + categorizationAnalyzerConfig = in.readOptionalWriteable(CategorizationAnalyzerConfig::new); + } else { + categorizationAnalyzerConfig = null; + } + latency = in.readOptionalTimeValue(); + summaryCountFieldName = in.readOptionalString(); + detectors = in.readList(Detector::new); + influencers = in.readList(StreamInput::readString); + overlappingBuckets = in.readOptionalBoolean(); + resultFinalizationWindow = in.readOptionalLong(); + multivariateByFields = in.readOptionalBoolean(); + if (in.readBoolean()) { + final int arraySize = in.readVInt(); + final List spans = new ArrayList<>(arraySize); + for (int i = 0; i < arraySize; i++) { + spans.add(in.readTimeValue()); + } + multipleBucketSpans = spans; + } else { + multipleBucketSpans = null; + } + usePerPartitionNormalization = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeTimeValue(bucketSpan); + out.writeOptionalString(categorizationFieldName); + if (categorizationFilters != null) { + out.writeBoolean(true); + out.writeStringList(categorizationFilters); + } else { + out.writeBoolean(false); + } + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { + out.writeOptionalWriteable(categorizationAnalyzerConfig); + } + out.writeOptionalTimeValue(latency); + out.writeOptionalString(summaryCountFieldName); + out.writeList(detectors); + out.writeStringList(influencers); + out.writeOptionalBoolean(overlappingBuckets); + out.writeOptionalLong(resultFinalizationWindow); + out.writeOptionalBoolean(multivariateByFields); + if (multipleBucketSpans != null) { + out.writeBoolean(true); + out.writeVInt(multipleBucketSpans.size()); + for (TimeValue span : multipleBucketSpans) { + out.writeTimeValue(span); + } + } else { + out.writeBoolean(false); + } + out.writeBoolean(usePerPartitionNormalization); + } + + /** + * The analysis bucket span + * + * @return The bucketspan or null if not set + */ + public TimeValue getBucketSpan() { + return bucketSpan; + } + + public String getCategorizationFieldName() { + return categorizationFieldName; + } + + public List getCategorizationFilters() { + return categorizationFilters; + } + + public CategorizationAnalyzerConfig getCategorizationAnalyzerConfig() { + return categorizationAnalyzerConfig; + } + + /** + * The latency interval during which out-of-order records should be handled. + * + * @return The latency interval or null if not set + */ + public TimeValue getLatency() { + return latency; + } + + /** + * The name of the field that contains counts for pre-summarised input + * + * @return The field name or null if not set + */ + public String getSummaryCountFieldName() { + return summaryCountFieldName; + } + + /** + * The list of analysis detectors. In a valid configuration the list should + * contain at least 1 {@link Detector} + * + * @return The Detectors used in this job + */ + public List getDetectors() { + return detectors; + } + + /** + * The list of influence field names + */ + public List getInfluencers() { + return influencers; + } + + /** + * Return the list of term fields. + * These are the influencer fields, partition field, + * by field and over field of each detector. + * null and empty strings are filtered from the + * config. + * + * @return Set of term fields - never null + */ + public Set termFields() { + return termFields(getDetectors(), getInfluencers()); + } + + static SortedSet termFields(List detectors, List influencers) { + SortedSet termFields = new TreeSet<>(); + + detectors.forEach(d -> termFields.addAll(d.getByOverPartitionTerms())); + + for (String i : influencers) { + addIfNotNull(termFields, i); + } + + // remove empty strings + termFields.remove(""); + + return termFields; + } + + public Set extractReferencedFilters() { + return detectors.stream().map(Detector::extractReferencedFilters) + .flatMap(Set::stream).collect(Collectors.toSet()); + } + + public Boolean getOverlappingBuckets() { + return overlappingBuckets; + } + + public Long getResultFinalizationWindow() { + return resultFinalizationWindow; + } + + public Boolean getMultivariateByFields() { + return multivariateByFields; + } + + public List getMultipleBucketSpans() { + return multipleBucketSpans; + } + + public boolean getUsePerPartitionNormalization() { + return usePerPartitionNormalization; + } + + /** + * Return the set of fields required by the analysis. + * These are the influencer fields, metric field, partition field, + * by field and over field of each detector, plus the summary count + * field and the categorization field name of the job. + * null and empty strings are filtered from the + * config. + * + * @return Set of required analysis fields - never null + */ + public Set analysisFields() { + Set analysisFields = termFields(); + + addIfNotNull(analysisFields, categorizationFieldName); + addIfNotNull(analysisFields, summaryCountFieldName); + + for (Detector d : getDetectors()) { + addIfNotNull(analysisFields, d.getFieldName()); + } + + // remove empty strings + analysisFields.remove(""); + + return analysisFields; + } + + private static void addIfNotNull(Set fields, String field) { + if (field != null) { + fields.add(field); + } + } + + public List fields() { + return collectNonNullAndNonEmptyDetectorFields(Detector::getFieldName); + } + + private List collectNonNullAndNonEmptyDetectorFields( + Function fieldGetter) { + Set fields = new HashSet<>(); + + for (Detector d : getDetectors()) { + addIfNotNull(fields, fieldGetter.apply(d)); + } + + // remove empty strings + fields.remove(""); + + return new ArrayList<>(fields); + } + + public List byFields() { + return collectNonNullAndNonEmptyDetectorFields(Detector::getByFieldName); + } + + public List overFields() { + return collectNonNullAndNonEmptyDetectorFields(Detector::getOverFieldName); + } + + + public List partitionFields() { + return collectNonNullAndNonEmptyDetectorFields(Detector::getPartitionFieldName); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(BUCKET_SPAN.getPreferredName(), bucketSpan.getStringRep()); + if (categorizationFieldName != null) { + builder.field(CATEGORIZATION_FIELD_NAME.getPreferredName(), categorizationFieldName); + } + if (categorizationFilters != null) { + builder.field(CATEGORIZATION_FILTERS.getPreferredName(), categorizationFilters); + } + if (categorizationAnalyzerConfig != null) { + // This cannot be builder.field(CATEGORIZATION_ANALYZER.getPreferredName(), categorizationAnalyzerConfig, params); + // because that always writes categorizationAnalyzerConfig as an object, and in the case of a global analyzer it + // gets written as a single string. + categorizationAnalyzerConfig.toXContent(builder, params); + } + if (latency != null) { + builder.field(LATENCY.getPreferredName(), latency.getStringRep()); + } + if (summaryCountFieldName != null) { + builder.field(SUMMARY_COUNT_FIELD_NAME.getPreferredName(), summaryCountFieldName); + } + builder.startArray(DETECTORS.getPreferredName()); + for (Detector detector: detectors) { + detector.toXContent(builder, params); + } + builder.endArray(); + builder.field(INFLUENCERS.getPreferredName(), influencers); + if (overlappingBuckets != null) { + builder.field(OVERLAPPING_BUCKETS.getPreferredName(), overlappingBuckets); + } + if (resultFinalizationWindow != null) { + builder.field(RESULT_FINALIZATION_WINDOW.getPreferredName(), resultFinalizationWindow); + } + if (multivariateByFields != null) { + builder.field(MULTIVARIATE_BY_FIELDS.getPreferredName(), multivariateByFields); + } + if (multipleBucketSpans != null) { + builder.field(MULTIPLE_BUCKET_SPANS.getPreferredName(), + multipleBucketSpans.stream().map(TimeValue::getStringRep).collect(Collectors.toList())); + } + if (usePerPartitionNormalization) { + builder.field(USER_PER_PARTITION_NORMALIZATION.getPreferredName(), usePerPartitionNormalization); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AnalysisConfig that = (AnalysisConfig) o; + return Objects.equals(latency, that.latency) && + usePerPartitionNormalization == that.usePerPartitionNormalization && + Objects.equals(bucketSpan, that.bucketSpan) && + Objects.equals(categorizationFieldName, that.categorizationFieldName) && + Objects.equals(categorizationFilters, that.categorizationFilters) && + Objects.equals(categorizationAnalyzerConfig, that.categorizationAnalyzerConfig) && + Objects.equals(summaryCountFieldName, that.summaryCountFieldName) && + Objects.equals(detectors, that.detectors) && + Objects.equals(influencers, that.influencers) && + Objects.equals(overlappingBuckets, that.overlappingBuckets) && + Objects.equals(resultFinalizationWindow, that.resultFinalizationWindow) && + Objects.equals(multivariateByFields, that.multivariateByFields) && + Objects.equals(multipleBucketSpans, that.multipleBucketSpans); + } + + @Override + public int hashCode() { + return Objects.hash( + bucketSpan, categorizationFieldName, categorizationFilters, categorizationAnalyzerConfig, latency, + summaryCountFieldName, detectors, influencers, overlappingBuckets, resultFinalizationWindow, + multivariateByFields, multipleBucketSpans, usePerPartitionNormalization + ); + } + + public static class Builder { + + public static final TimeValue DEFAULT_BUCKET_SPAN = TimeValue.timeValueMinutes(5); + + private List detectors; + private TimeValue bucketSpan = DEFAULT_BUCKET_SPAN; + private TimeValue latency; + private String categorizationFieldName; + private List categorizationFilters; + private CategorizationAnalyzerConfig categorizationAnalyzerConfig; + private String summaryCountFieldName; + private List influencers = new ArrayList<>(); + private Boolean overlappingBuckets; + private Long resultFinalizationWindow; + private Boolean multivariateByFields; + private List multipleBucketSpans; + private boolean usePerPartitionNormalization = false; + + public Builder(List detectors) { + setDetectors(detectors); + } + + public Builder(AnalysisConfig analysisConfig) { + this.detectors = analysisConfig.detectors; + this.bucketSpan = analysisConfig.bucketSpan; + this.latency = analysisConfig.latency; + this.categorizationFieldName = analysisConfig.categorizationFieldName; + this.categorizationFilters = analysisConfig.categorizationFilters; + this.categorizationAnalyzerConfig = analysisConfig.categorizationAnalyzerConfig; + this.summaryCountFieldName = analysisConfig.summaryCountFieldName; + this.influencers = analysisConfig.influencers; + this.overlappingBuckets = analysisConfig.overlappingBuckets; + this.resultFinalizationWindow = analysisConfig.resultFinalizationWindow; + this.multivariateByFields = analysisConfig.multivariateByFields; + this.multipleBucketSpans = analysisConfig.multipleBucketSpans; + this.usePerPartitionNormalization = analysisConfig.usePerPartitionNormalization; + } + + public void setDetectors(List detectors) { + if (detectors == null) { + this.detectors = null; + return; + } + // We always assign sequential IDs to the detectors that are correct for this analysis config + int detectorIndex = 0; + List sequentialIndexDetectors = new ArrayList<>(detectors.size()); + for (Detector origDetector : detectors) { + Detector.Builder builder = new Detector.Builder(origDetector); + builder.setDetectorIndex(detectorIndex++); + sequentialIndexDetectors.add(builder.build()); + } + this.detectors = sequentialIndexDetectors; + } + + public void setBucketSpan(TimeValue bucketSpan) { + this.bucketSpan = bucketSpan; + } + + public void setLatency(TimeValue latency) { + this.latency = latency; + } + + public void setCategorizationFieldName(String categorizationFieldName) { + this.categorizationFieldName = categorizationFieldName; + } + + public void setCategorizationFilters(List categorizationFilters) { + this.categorizationFilters = categorizationFilters; + } + + public void setCategorizationAnalyzerConfig(CategorizationAnalyzerConfig categorizationAnalyzerConfig) { + this.categorizationAnalyzerConfig = categorizationAnalyzerConfig; + } + + public void setSummaryCountFieldName(String summaryCountFieldName) { + this.summaryCountFieldName = summaryCountFieldName; + } + + public void setInfluencers(List influencers) { + this.influencers = influencers; + } + + public void setOverlappingBuckets(Boolean overlappingBuckets) { + this.overlappingBuckets = overlappingBuckets; + } + + public void setResultFinalizationWindow(Long resultFinalizationWindow) { + this.resultFinalizationWindow = resultFinalizationWindow; + } + + public void setMultivariateByFields(Boolean multivariateByFields) { + this.multivariateByFields = multivariateByFields; + } + + public void setMultipleBucketSpans(List multipleBucketSpans) { + this.multipleBucketSpans = multipleBucketSpans; + } + + public void setUsePerPartitionNormalization(boolean usePerPartitionNormalization) { + this.usePerPartitionNormalization = usePerPartitionNormalization; + } + + /** + * Checks the configuration is valid + *

    + *
  1. Check that if non-null BucketSpan and Latency are >= 0
  2. + *
  3. Check that if non-null Latency is <= MAX_LATENCY
  4. + *
  5. Check there is at least one detector configured
  6. + *
  7. Check all the detectors are configured correctly
  8. + *
  9. Check that OVERLAPPING_BUCKETS is set appropriately
  10. + *
  11. Check that MULTIPLE_BUCKETSPANS are set appropriately
  12. + *
  13. If Per Partition normalization is configured at least one detector + * must have a partition field and no influences can be used
  14. + *
+ */ + public AnalysisConfig build() { + TimeUtils.checkPositiveMultiple(bucketSpan, TimeUnit.SECONDS, BUCKET_SPAN); + if (latency != null) { + TimeUtils.checkNonNegativeMultiple(latency, TimeUnit.SECONDS, LATENCY); + } + + verifyDetectorAreDefined(); + Detector.Builder.verifyFieldName(summaryCountFieldName); + Detector.Builder.verifyFieldName(categorizationFieldName); + + verifyMlCategoryIsUsedWhenCategorizationFieldNameIsSet(); + verifyCategorizationAnalyzer(); + verifyCategorizationFilters(); + checkFieldIsNotNegativeIfSpecified(RESULT_FINALIZATION_WINDOW.getPreferredName(), resultFinalizationWindow); + verifyMultipleBucketSpans(); + + verifyNoMetricFunctionsWhenSummaryCountFieldNameIsSet(); + + overlappingBuckets = verifyOverlappingBucketsConfig(overlappingBuckets, detectors); + + if (usePerPartitionNormalization) { + checkDetectorsHavePartitionFields(detectors); + checkNoInfluencersAreSet(influencers); + } + + verifyNoInconsistentNestedFieldNames(); + + return new AnalysisConfig(bucketSpan, categorizationFieldName, categorizationFilters, categorizationAnalyzerConfig, + latency, summaryCountFieldName, detectors, influencers, overlappingBuckets, + resultFinalizationWindow, multivariateByFields, multipleBucketSpans, usePerPartitionNormalization); + } + + private void verifyNoMetricFunctionsWhenSummaryCountFieldNameIsSet() { + if (Strings.isNullOrEmpty(summaryCountFieldName) == false && + detectors.stream().anyMatch(d -> DetectorFunction.METRIC.equals(d.getFunction()))) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.JOB_CONFIG_FUNCTION_INCOMPATIBLE_PRESUMMARIZED, DetectorFunction.METRIC)); + } + } + + private static void checkFieldIsNotNegativeIfSpecified(String fieldName, Long value) { + if (value != null && value < 0) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, fieldName, 0, value); + throw ExceptionsHelper.badRequestException(msg); + } + } + + private void verifyDetectorAreDefined() { + if (detectors == null || detectors.isEmpty()) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_NO_DETECTORS)); + } + } + + private void verifyNoInconsistentNestedFieldNames() { + SortedSet termFields = termFields(detectors, influencers); + // We want to outlaw nested fields where a less nested field clashes with one of the nested levels. + // For example, this is not allowed: + // - a + // - a.b + // Nor is this: + // - a.b + // - a.b.c + // But this is OK: + // - a.b + // - a.c + // The sorted set makes it relatively easy to detect the situations we want to avoid. + String prevTermField = null; + for (String termField : termFields) { + if (prevTermField != null && termField.startsWith(prevTermField + ".")) { + throw ExceptionsHelper.badRequestException("Fields [" + prevTermField + "] and [" + termField + + "] cannot both be used in the same analysis_config"); + } + prevTermField = termField; + } + } + + private void verifyMlCategoryIsUsedWhenCategorizationFieldNameIsSet() { + Set byOverPartitionFields = new TreeSet<>(); + detectors.forEach(d -> byOverPartitionFields.addAll(d.getByOverPartitionTerms())); + boolean isMlCategoryUsed = byOverPartitionFields.contains(ML_CATEGORY_FIELD); + if (isMlCategoryUsed && categorizationFieldName == null) { + throw ExceptionsHelper.badRequestException(CATEGORIZATION_FIELD_NAME.getPreferredName() + + " must be set for " + ML_CATEGORY_FIELD + " to be available"); + } + if (categorizationFieldName != null && isMlCategoryUsed == false) { + throw ExceptionsHelper.badRequestException(CATEGORIZATION_FIELD_NAME.getPreferredName() + + " is set but " + ML_CATEGORY_FIELD + " is not used in any detector by/over/partition field"); + } + } + + private void verifyCategorizationAnalyzer() { + if (categorizationAnalyzerConfig == null) { + return; + } + + verifyCategorizationFieldNameSetIfAnalyzerIsSet(); + } + + private void verifyCategorizationFieldNameSetIfAnalyzerIsSet() { + if (categorizationFieldName == null) { + throw ExceptionsHelper.badRequestException(Messages.getMessage( + Messages.JOB_CONFIG_CATEGORIZATION_ANALYZER_REQUIRES_CATEGORIZATION_FIELD_NAME)); + } + } + + private void verifyCategorizationFilters() { + if (categorizationFilters == null || categorizationFilters.isEmpty()) { + return; + } + + verifyCategorizationAnalyzerNotSetIfFiltersAreSet(); + verifyCategorizationFieldNameSetIfFiltersAreSet(); + verifyCategorizationFiltersAreDistinct(); + verifyCategorizationFiltersContainNoneEmpty(); + verifyCategorizationFiltersAreValidRegex(); + } + + private void verifyCategorizationAnalyzerNotSetIfFiltersAreSet() { + if (categorizationAnalyzerConfig != null) { + throw ExceptionsHelper.badRequestException(Messages.getMessage( + Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_INCOMPATIBLE_WITH_CATEGORIZATION_ANALYZER)); + } + } + + private void verifyCategorizationFieldNameSetIfFiltersAreSet() { + if (categorizationFieldName == null) { + throw ExceptionsHelper.badRequestException(Messages.getMessage( + Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_REQUIRE_CATEGORIZATION_FIELD_NAME)); + } + } + + private void verifyCategorizationFiltersAreDistinct() { + if (categorizationFilters.stream().distinct().count() != categorizationFilters.size()) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_DUPLICATES)); + } + } + + private void verifyCategorizationFiltersContainNoneEmpty() { + if (categorizationFilters.stream().anyMatch(String::isEmpty)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_EMPTY)); + } + } + + private void verifyCategorizationFiltersAreValidRegex() { + for (String filter : categorizationFilters) { + if (!isValidRegex(filter)) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_INVALID_REGEX, filter)); + } + } + } + + private void verifyMultipleBucketSpans() { + if (multipleBucketSpans == null) { + return; + } + + for (TimeValue span : multipleBucketSpans) { + if ((span.getSeconds() % bucketSpan.getSeconds() != 0L) || (span.compareTo(bucketSpan) <= 0)) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.JOB_CONFIG_MULTIPLE_BUCKETSPANS_MUST_BE_MULTIPLE, span, bucketSpan)); + } + } + } + + private static void checkDetectorsHavePartitionFields(List detectors) { + for (Detector detector : detectors) { + if (!Strings.isNullOrEmpty(detector.getPartitionFieldName())) { + return; + } + } + throw ExceptionsHelper.badRequestException(Messages.getMessage( + Messages.JOB_CONFIG_PER_PARTITION_NORMALIZATION_REQUIRES_PARTITION_FIELD)); + } + + private static void checkNoInfluencersAreSet(List influencers) { + if (!influencers.isEmpty()) { + throw ExceptionsHelper.badRequestException(Messages.getMessage( + Messages.JOB_CONFIG_PER_PARTITION_NORMALIZATION_CANNOT_USE_INFLUENCERS)); + } + } + + private static boolean isValidRegex(String exp) { + try { + Pattern.compile(exp); + return true; + } catch (PatternSyntaxException e) { + return false; + } + } + + private static Boolean verifyOverlappingBucketsConfig(Boolean overlappingBuckets, List detectors) { + // If any detector function is rare/freq_rare, mustn't use overlapping buckets + boolean mustNotUse = false; + + List illegalFunctions = new ArrayList<>(); + for (Detector d : detectors) { + if (Detector.NO_OVERLAPPING_BUCKETS_FUNCTIONS.contains(d.getFunction())) { + illegalFunctions.add(d.getFunction()); + mustNotUse = true; + } + } + + if (Boolean.TRUE.equals(overlappingBuckets) && mustNotUse) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.JOB_CONFIG_OVERLAPPING_BUCKETS_INCOMPATIBLE_FUNCTION, illegalFunctions.toString())); + } + + return overlappingBuckets; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisLimits.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisLimits.java new file mode 100644 index 0000000000000..569d62a02cf8b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisLimits.java @@ -0,0 +1,223 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.MlParserType; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.EnumMap; +import java.util.Map; +import java.util.Objects; + +/** + * Analysis limits for autodetect. In particular, + * this is a collection of parameters that allow limiting + * the resources used by the job. + */ +public class AnalysisLimits implements ToXContentObject, Writeable { + + /** + * Prior to 6.1 the default model memory size limit was 4GB, and defined in the C++ code. The default + * is now 1GB and defined here in the Java code. Prior to 6.3, a value of null means that + * the old default value should be used. From 6.3 onwards, the value will always be explicit. + */ + public static final long DEFAULT_MODEL_MEMORY_LIMIT_MB = 1024L; + static final long PRE_6_1_DEFAULT_MODEL_MEMORY_LIMIT_MB = 4096L; + + public static final long DEFAULT_CATEGORIZATION_EXAMPLES_LIMIT = 4; + + /** + * Serialisation field names + */ + public static final ParseField MODEL_MEMORY_LIMIT = new ParseField("model_memory_limit"); + public static final ParseField CATEGORIZATION_EXAMPLES_LIMIT = new ParseField("categorization_examples_limit"); + + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly + public static final ConstructingObjectParser METADATA_PARSER = new ConstructingObjectParser<>( + "analysis_limits", true, a -> new AnalysisLimits( + a[0] == null ? PRE_6_1_DEFAULT_MODEL_MEMORY_LIMIT_MB : (Long) a[0], + a[1] == null ? DEFAULT_CATEGORIZATION_EXAMPLES_LIMIT : (Long) a[1])); + public static final ConstructingObjectParser CONFIG_PARSER = new ConstructingObjectParser<>( + "analysis_limits", false, a -> new AnalysisLimits((Long) a[0], (Long) a[1])); + public static final Map> PARSERS = + new EnumMap<>(MlParserType.class); + + static { + PARSERS.put(MlParserType.METADATA, METADATA_PARSER); + PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER); + for (MlParserType parserType : MlParserType.values()) { + ConstructingObjectParser parser = PARSERS.get(parserType); + assert parser != null; + parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return ByteSizeValue.parseBytesSizeValue(p.text(), MODEL_MEMORY_LIMIT.getPreferredName()).getMb(); + } else if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return p.longValue(); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, MODEL_MEMORY_LIMIT, ObjectParser.ValueType.VALUE); + parser.declareLong(ConstructingObjectParser.optionalConstructorArg(), CATEGORIZATION_EXAMPLES_LIMIT); + } + } + + /** + * The model memory limit in MiBs. + * It is initialised to null. + * A value of null will result to the default defined in the C++ code being used. + * However, for jobs created in version 6.1 or higher this will rarely be null because + * the put_job action set it to a new default defined in the Java code. + */ + private final Long modelMemoryLimit; + + /** + * It is initialised to null. + * A value of null will result to the default being used. + */ + private final Long categorizationExamplesLimit; + + public AnalysisLimits(Long categorizationExamplesLimit) { + this(DEFAULT_MODEL_MEMORY_LIMIT_MB, categorizationExamplesLimit); + } + + public AnalysisLimits(Long modelMemoryLimit, Long categorizationExamplesLimit) { + if (modelMemoryLimit != null && modelMemoryLimit < 1) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_MODEL_MEMORY_LIMIT_TOO_LOW, modelMemoryLimit); + throw ExceptionsHelper.badRequestException(msg); + } + if (categorizationExamplesLimit != null && categorizationExamplesLimit < 0) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, CATEGORIZATION_EXAMPLES_LIMIT, 0, + categorizationExamplesLimit); + throw ExceptionsHelper.badRequestException(msg); + } + this.modelMemoryLimit = modelMemoryLimit; + this.categorizationExamplesLimit = categorizationExamplesLimit; + } + + public AnalysisLimits(StreamInput in) throws IOException { + this(in.readOptionalLong(), in.readOptionalLong()); + } + + /** + * Creates a new {@code AnalysisLimits} object after validating it against external limitations + * and filling missing values with their defaults. Validations: + * + *
    + *
  • check model memory limit doesn't exceed the MAX_MODEL_MEM setting
  • + *
+ * + * @param source an optional {@code AnalysisLimits} whose explicit values will be copied + * @param maxModelMemoryLimit the max allowed model memory limit + * @param defaultModelMemoryLimit the default model memory limit to be used if an explicit value is missing + * @return a new {@code AnalysisLimits} that is validated and has no missing values + */ + public static AnalysisLimits validateAndSetDefaults(@Nullable AnalysisLimits source, @Nullable ByteSizeValue maxModelMemoryLimit, + long defaultModelMemoryLimit) { + + boolean maxModelMemoryIsSet = maxModelMemoryLimit != null && maxModelMemoryLimit.getMb() > 0; + + long modelMemoryLimit = defaultModelMemoryLimit; + if (maxModelMemoryIsSet) { + modelMemoryLimit = Math.min(maxModelMemoryLimit.getMb(), modelMemoryLimit); + } + + long categorizationExamplesLimit = DEFAULT_CATEGORIZATION_EXAMPLES_LIMIT; + + if (source != null) { + if (source.getModelMemoryLimit() != null) { + modelMemoryLimit = source.getModelMemoryLimit(); + } + if (source.getCategorizationExamplesLimit() != null) { + categorizationExamplesLimit = source.getCategorizationExamplesLimit(); + } + } + + if (maxModelMemoryIsSet && modelMemoryLimit > maxModelMemoryLimit.getMb()) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_MODEL_MEMORY_LIMIT_GREATER_THAN_MAX, + new ByteSizeValue(modelMemoryLimit, ByteSizeUnit.MB), + maxModelMemoryLimit)); + } + + return new AnalysisLimits(modelMemoryLimit, categorizationExamplesLimit); + } + + /** + * Maximum size of the model in MB before the anomaly detector + * will drop new samples to prevent the model using any more + * memory. + * + * @return The set memory limit or null if not set + */ + @Nullable + public Long getModelMemoryLimit() { + return modelMemoryLimit; + } + + /** + * Gets the limit to the number of examples that are stored per category + * + * @return the limit or null if not set + */ + @Nullable + public Long getCategorizationExamplesLimit() { + return categorizationExamplesLimit; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalLong(modelMemoryLimit); + out.writeOptionalLong(categorizationExamplesLimit); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (modelMemoryLimit != null) { + builder.field(MODEL_MEMORY_LIMIT.getPreferredName(), modelMemoryLimit + "mb"); + } + if (categorizationExamplesLimit != null) { + builder.field(CATEGORIZATION_EXAMPLES_LIMIT.getPreferredName(), categorizationExamplesLimit); + } + builder.endObject(); + return builder; + } + + /** + * Overridden equality test + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof AnalysisLimits == false) { + return false; + } + + AnalysisLimits that = (AnalysisLimits) other; + return Objects.equals(this.modelMemoryLimit, that.modelMemoryLimit) && + Objects.equals(this.categorizationExamplesLimit, that.categorizationExamplesLimit); + } + + @Override + public int hashCode() { + return Objects.hash(modelMemoryLimit, categorizationExamplesLimit); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java new file mode 100644 index 0000000000000..1c2808c70ffcf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java @@ -0,0 +1,625 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.apache.lucene.analysis.Analyzer; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.analyze.TransportAnalyzeAction; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.analysis.CharFilterFactory; +import org.elasticsearch.index.analysis.CustomAnalyzer; +import org.elasticsearch.index.analysis.CustomAnalyzerProvider; +import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.index.analysis.TokenizerFactory; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction; +import org.elasticsearch.xpack.core.ml.MlParserType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + + +/** + * Configuration for the categorization analyzer. + * + * The syntax is a subset of what can be supplied to the {@linkplain RestAnalyzeAction _analyze endpoint}. + * To summarise, the first option is to specify the name of an out-of-the-box analyzer: + * + * "categorization_analyzer" : "standard" + * + * + * The second option is to specify a custom analyzer by combining the char_filters, tokenizer + * and token_filters fields. In turn, each of these can be specified as the name of an out-of-the-box + * one or as an object defining a custom one. For example: + * + * "char_filters" : [ + * "html_strip", + * { "type" : "pattern_replace", "pattern": "SQL: .*" } + * ], + * "tokenizer" : "thai", + * "token_filters" : [ + * "lowercase", + * { "type" : "pattern_replace", "pattern": "^[0-9].*" } + * ] + * + * + * Unfortunately there is no easy to to reuse a subset of the _analyze action implementation, so much + * of the code in this file is copied from {@link TransportAnalyzeAction}. Unfortunately the logic required here is + * not quite identical to that of {@link TransportAnalyzeAction}, and the required code is hard to partially reuse. + * TODO: consider refactoring ES core to allow more reuse. + */ +public class CategorizationAnalyzerConfig implements ToXContentFragment, Writeable { + + public static final ParseField CATEGORIZATION_ANALYZER = new ParseField("categorization_analyzer"); + private static final ParseField TOKENIZER = RestAnalyzeAction.Fields.TOKENIZER; + private static final ParseField TOKEN_FILTERS = RestAnalyzeAction.Fields.TOKEN_FILTERS; + private static final ParseField CHAR_FILTERS = RestAnalyzeAction.Fields.CHAR_FILTERS; + + /** + * This method is only used in the unit tests - in production code this config is always parsed as a fragment. + */ + public static CategorizationAnalyzerConfig buildFromXContentObject(XContentParser parser, MlParserType parserType) throws IOException { + + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Expected start object but got [" + parser.currentToken() + "]"); + } + if (parser.nextToken() != XContentParser.Token.FIELD_NAME + || CATEGORIZATION_ANALYZER.match(parser.currentName(), parser.getDeprecationHandler()) == false) { + throw new IllegalArgumentException("Expected [" + CATEGORIZATION_ANALYZER + "] field but got [" + parser.currentToken() + "]"); + } + parser.nextToken(); + CategorizationAnalyzerConfig categorizationAnalyzerConfig = buildFromXContentFragment(parser, parserType); + parser.nextToken(); + return categorizationAnalyzerConfig; + } + + /** + * Parse a categorization_analyzer from configuration or cluster state. A custom parser is needed + * due to the complexity of the format, with many elements able to be specified as either the name of a built-in + * element or an object containing a custom definition. + * + * The parser is strict when parsing config and lenient when parsing cluster state. + */ + static CategorizationAnalyzerConfig buildFromXContentFragment(XContentParser parser, MlParserType parserType) throws IOException { + + CategorizationAnalyzerConfig.Builder builder = new CategorizationAnalyzerConfig.Builder(); + + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_STRING) { + builder.setAnalyzer(parser.text()); + } else if (token != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("[" + CATEGORIZATION_ANALYZER + "] should be analyzer's name or settings [" + token + "]"); + } else { + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (CHAR_FILTERS.match(currentFieldName, parser.getDeprecationHandler()) + && token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + builder.addCharFilter(parser.text()); + } else if (token == XContentParser.Token.START_OBJECT) { + builder.addCharFilter(parser.map()); + } else { + throw new IllegalArgumentException("[" + currentFieldName + "] in [" + CATEGORIZATION_ANALYZER + + "] array element should contain char_filter's name or settings [" + token + "]"); + } + } + } else if (TOKENIZER.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + builder.setTokenizer(parser.text()); + } else if (token == XContentParser.Token.START_OBJECT) { + builder.setTokenizer(parser.map()); + } else { + throw new IllegalArgumentException("[" + currentFieldName + "] in [" + CATEGORIZATION_ANALYZER + + "] should be tokenizer's name or settings [" + token + "]"); + } + } else if (TOKEN_FILTERS.match(currentFieldName, parser.getDeprecationHandler()) + && token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + builder.addTokenFilter(parser.text()); + } else if (token == XContentParser.Token.START_OBJECT) { + builder.addTokenFilter(parser.map()); + } else { + throw new IllegalArgumentException("[" + currentFieldName + "] in [" + CATEGORIZATION_ANALYZER + + "] array element should contain token_filter's name or settings [" + token + "]"); + } + } + // Be lenient when parsing cluster state - assume unknown fields are from future versions + } else if (parserType == MlParserType.CONFIG) { + throw new IllegalArgumentException("Parameter [" + currentFieldName + "] in [" + CATEGORIZATION_ANALYZER + + "] is unknown or of the wrong type [" + token + "]"); + } + } + } + + return builder.build(); + } + + /** + * Create a categorization_analyzer that mimics what the tokenizer and filters built into the ML C++ + * code do. This is the default analyzer for categorization to ensure that people upgrading from previous versions + * get the same behaviour from their categorization jobs before and after upgrade. + * @param categorizationFilters Categorization filters (if any) from the analysis_config. + * @return The default categorization analyzer. + */ + public static CategorizationAnalyzerConfig buildDefaultCategorizationAnalyzer(List categorizationFilters) { + + CategorizationAnalyzerConfig.Builder builder = new CategorizationAnalyzerConfig.Builder(); + + if (categorizationFilters != null) { + for (String categorizationFilter : categorizationFilters) { + Map charFilter = new HashMap<>(); + charFilter.put("type", "pattern_replace"); + charFilter.put("pattern", categorizationFilter); + builder.addCharFilter(charFilter); + } + } + + builder.setTokenizer("ml_classic"); + + Map tokenFilter = new HashMap<>(); + tokenFilter.put("type", "stop"); + tokenFilter.put("stopwords", Arrays.asList( + "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday", + "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun", + "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", + "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", + "GMT", "UTC")); + builder.addTokenFilter(tokenFilter); + + return builder.build(); + } + + /** + * Simple store of either a name of a built-in analyzer element or a custom definition. + */ + public static class NameOrDefinition implements ToXContentFragment, Writeable { + + // Exactly one of these two members is not null + public final String name; + public final Settings definition; + + NameOrDefinition(String name) { + this.name = Objects.requireNonNull(name); + this.definition = null; + } + + NameOrDefinition(ParseField field, Map definition) { + this.name = null; + Objects.requireNonNull(definition); + try { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.map(definition); + this.definition = Settings.builder().loadFromSource(Strings.toString(builder), builder.contentType()).build(); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to parse [" + definition + "] in [" + field.getPreferredName() + "]", e); + } + } + + NameOrDefinition(StreamInput in) throws IOException { + name = in.readOptionalString(); + if (in.readBoolean()) { + definition = Settings.readSettingsFromStream(in); + } else { + definition = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(name); + boolean isNotNullDefinition = this.definition != null; + out.writeBoolean(isNotNullDefinition); + if (isNotNullDefinition) { + Settings.writeSettingsToStream(definition, out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (definition == null) { + builder.value(name); + } else { + builder.startObject(); + definition.toXContent(builder, params); + builder.endObject(); + } + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + NameOrDefinition that = (NameOrDefinition) o; + return Objects.equals(name, that.name) && + Objects.equals(definition, that.definition); + } + + @Override + public int hashCode() { + return Objects.hash(name, definition); + } + + @Override + public String toString() { + if (definition == null) { + return name; + } else { + return definition.toDelimitedString(';'); + } + } + } + + private final String analyzer; + private final List charFilters; + private final NameOrDefinition tokenizer; + private final List tokenFilters; + + private CategorizationAnalyzerConfig(String analyzer, List charFilters, NameOrDefinition tokenizer, + List tokenFilters) { + this.analyzer = analyzer; + this.charFilters = Objects.requireNonNull(charFilters); + this.tokenizer = tokenizer; + this.tokenFilters = Objects.requireNonNull(tokenFilters); + } + + public CategorizationAnalyzerConfig(StreamInput in) throws IOException { + analyzer = in.readOptionalString(); + charFilters = in.readList(NameOrDefinition::new); + tokenizer = in.readOptionalWriteable(NameOrDefinition::new); + tokenFilters = in.readList(NameOrDefinition::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(analyzer); + out.writeList(charFilters); + out.writeOptionalWriteable(tokenizer); + out.writeList(tokenFilters); + } + + public String getAnalyzer() { + return analyzer; + } + + public List getCharFilters() { + return charFilters; + } + + public NameOrDefinition getTokenizer() { + return tokenizer; + } + + public List getTokenFilters() { + return tokenFilters; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (analyzer != null) { + builder.field(CATEGORIZATION_ANALYZER.getPreferredName(), analyzer); + } else { + builder.startObject(CATEGORIZATION_ANALYZER.getPreferredName()); + if (charFilters.isEmpty() == false) { + builder.startArray(CHAR_FILTERS.getPreferredName()); + for (NameOrDefinition charFilter : charFilters) { + charFilter.toXContent(builder, params); + } + builder.endArray(); + } + if (tokenizer != null) { + builder.field(TOKENIZER.getPreferredName(), tokenizer); + } + if (tokenFilters.isEmpty() == false) { + builder.startArray(TOKEN_FILTERS.getPreferredName()); + for (NameOrDefinition tokenFilter : tokenFilters) { + tokenFilter.toXContent(builder, params); + } + builder.endArray(); + } + builder.endObject(); + } + return builder; + } + + /** + * Convert the config to an {@link Analyzer}. This may be a global analyzer or a newly created custom analyzer. + * In the case of a global analyzer the caller must NOT close it when they have finished with it. In the case of + * a newly created custom analyzer the caller is responsible for closing it. + * @return The first tuple member is the {@link Analyzer}; the second indicates whether the caller is responsible + * for closing it. + */ + public Tuple toAnalyzer(AnalysisRegistry analysisRegistry, Environment environment) throws IOException { + if (analyzer != null) { + Analyzer globalAnalyzer = analysisRegistry.getAnalyzer(analyzer); + if (globalAnalyzer == null) { + throw new IllegalArgumentException("Failed to find global analyzer [" + analyzer + "]"); + } + return new Tuple<>(globalAnalyzer, Boolean.FALSE); + } else { + List charFilterFactoryList = + parseCharFilterFactories(analysisRegistry, environment); + + Tuple tokenizerFactory = parseTokenizerFactory(analysisRegistry, + environment); + + List tokenFilterFactoryList = parseTokenFilterFactories(analysisRegistry, + environment, tokenizerFactory, charFilterFactoryList); + + return new Tuple<>(new CustomAnalyzer(tokenizerFactory.v1(), tokenizerFactory.v2(), + charFilterFactoryList.toArray(new CharFilterFactory[charFilterFactoryList.size()]), + tokenFilterFactoryList.toArray(new TokenFilterFactory[tokenFilterFactoryList.size()])), Boolean.TRUE); + } + } + + + /** + * Get char filter factories for each configured char filter. Each configuration + * element can be the name of an out-of-the-box char filter, or a custom definition. + */ + private List parseCharFilterFactories(AnalysisRegistry analysisRegistry, + Environment environment) throws IOException { + final List charFilterFactoryList = new ArrayList<>(); + for (NameOrDefinition charFilter : charFilters) { + final CharFilterFactory charFilterFactory; + if (charFilter.name != null) { + AnalysisModule.AnalysisProvider charFilterFactoryFactory = + analysisRegistry.getCharFilterProvider(charFilter.name); + if (charFilterFactoryFactory == null) { + throw new IllegalArgumentException("Failed to find global char filter under [" + charFilter.name + "]"); + } + charFilterFactory = charFilterFactoryFactory.get(environment, charFilter.name); + } else { + String charFilterTypeName = charFilter.definition.get("type"); + if (charFilterTypeName == null) { + throw new IllegalArgumentException("Missing [type] setting for char filter: " + charFilter.definition); + } + AnalysisModule.AnalysisProvider charFilterFactoryFactory = + analysisRegistry.getCharFilterProvider(charFilterTypeName); + if (charFilterFactoryFactory == null) { + throw new IllegalArgumentException("Failed to find global char filter under [" + charFilterTypeName + "]"); + } + Settings settings = augmentSettings(charFilter.definition); + // Need to set anonymous "name" of char_filter + charFilterFactory = charFilterFactoryFactory.get(buildDummyIndexSettings(settings), environment, + "_anonymous_charfilter", settings); + } + if (charFilterFactory == null) { + throw new IllegalArgumentException("Failed to find char filter [" + charFilter + "]"); + } + charFilterFactoryList.add(charFilterFactory); + } + return charFilterFactoryList; + } + + /** + * Get the tokenizer factory for the configured tokenizer. The configuration + * can be the name of an out-of-the-box tokenizer, or a custom definition. + */ + private Tuple parseTokenizerFactory(AnalysisRegistry analysisRegistry, + Environment environment) throws IOException { + final String name; + final TokenizerFactory tokenizerFactory; + if (tokenizer.name != null) { + name = tokenizer.name; + AnalysisModule.AnalysisProvider tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(name); + if (tokenizerFactoryFactory == null) { + throw new IllegalArgumentException("Failed to find global tokenizer under [" + name + "]"); + } + tokenizerFactory = tokenizerFactoryFactory.get(environment, name); + } else { + String tokenizerTypeName = tokenizer.definition.get("type"); + if (tokenizerTypeName == null) { + throw new IllegalArgumentException("Missing [type] setting for tokenizer: " + tokenizer.definition); + } + AnalysisModule.AnalysisProvider tokenizerFactoryFactory = + analysisRegistry.getTokenizerProvider(tokenizerTypeName); + if (tokenizerFactoryFactory == null) { + throw new IllegalArgumentException("Failed to find global tokenizer under [" + tokenizerTypeName + "]"); + } + Settings settings = augmentSettings(tokenizer.definition); + // Need to set anonymous "name" of tokenizer + name = "_anonymous_tokenizer"; + tokenizerFactory = tokenizerFactoryFactory.get(buildDummyIndexSettings(settings), environment, name, settings); + } + return new Tuple<>(name, tokenizerFactory); + } + + /** + * Get token filter factories for each configured token filter. Each configuration + * element can be the name of an out-of-the-box token filter, or a custom definition. + */ + private List parseTokenFilterFactories(AnalysisRegistry analysisRegistry, Environment environment, + Tuple tokenizerFactory, + List charFilterFactoryList) throws IOException { + final List tokenFilterFactoryList = new ArrayList<>(); + for (NameOrDefinition tokenFilter : tokenFilters) { + TokenFilterFactory tokenFilterFactory; + if (tokenFilter.name != null) { + AnalysisModule.AnalysisProvider tokenFilterFactoryFactory; + tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name); + if (tokenFilterFactoryFactory == null) { + throw new IllegalArgumentException("Failed to find global token filter under [" + tokenFilter.name + "]"); + } + tokenFilterFactory = tokenFilterFactoryFactory.get(environment, tokenFilter.name); + } else { + String filterTypeName = tokenFilter.definition.get("type"); + if (filterTypeName == null) { + throw new IllegalArgumentException("Missing [type] setting for token filter: " + tokenFilter.definition); + } + AnalysisModule.AnalysisProvider tokenFilterFactoryFactory = + analysisRegistry.getTokenFilterProvider(filterTypeName); + if (tokenFilterFactoryFactory == null) { + throw new IllegalArgumentException("Failed to find global token filter under [" + filterTypeName + "]"); + } + Settings settings = augmentSettings(tokenFilter.definition); + // Need to set anonymous "name" of token_filter + tokenFilterFactory = tokenFilterFactoryFactory.get(buildDummyIndexSettings(settings), environment, + "_anonymous_tokenfilter", settings); + tokenFilterFactory = CustomAnalyzerProvider.checkAndApplySynonymFilter(tokenFilterFactory, tokenizerFactory.v1(), + tokenizerFactory.v2(), tokenFilterFactoryList, charFilterFactoryList, environment); + } + if (tokenFilterFactory == null) { + throw new IllegalArgumentException("Failed to find or create token filter [" + tokenFilter + "]"); + } + tokenFilterFactoryList.add(tokenFilterFactory); + } + return tokenFilterFactoryList; + } + + /** + * The Elasticsearch analysis functionality is designed to work with indices. For + * categorization we have to pretend we've got some index settings. + */ + private IndexSettings buildDummyIndexSettings(Settings settings) { + IndexMetaData metaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(settings).build(); + return new IndexSettings(metaData, Settings.EMPTY); + } + + /** + * The behaviour of Elasticsearch analyzers can vary between versions. + * For categorization we'll always use the latest version of the text analysis. + * The other settings are just to stop classes that expect to be associated with + * an index from complaining. + */ + private Settings augmentSettings(Settings settings) { + return Settings.builder().put(settings) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CategorizationAnalyzerConfig that = (CategorizationAnalyzerConfig) o; + return Objects.equals(analyzer, that.analyzer) && + Objects.equals(charFilters, that.charFilters) && + Objects.equals(tokenizer, that.tokenizer) && + Objects.equals(tokenFilters, that.tokenFilters); + } + + @Override + public int hashCode() { + return Objects.hash(analyzer, charFilters, tokenizer, tokenFilters); + } + + public static class Builder { + + private String analyzer; + private List charFilters = new ArrayList<>(); + private NameOrDefinition tokenizer; + private List tokenFilters = new ArrayList<>(); + + public Builder() { + } + + public Builder(CategorizationAnalyzerConfig categorizationAnalyzerConfig) { + this.analyzer = categorizationAnalyzerConfig.analyzer; + this.charFilters = new ArrayList<>(categorizationAnalyzerConfig.charFilters); + this.tokenizer = categorizationAnalyzerConfig.tokenizer; + this.tokenFilters = new ArrayList<>(categorizationAnalyzerConfig.tokenFilters); + } + + public Builder setAnalyzer(String analyzer) { + this.analyzer = analyzer; + return this; + } + + public Builder addCharFilter(String charFilter) { + this.charFilters.add(new NameOrDefinition(charFilter)); + return this; + } + + public Builder addCharFilter(Map charFilter) { + this.charFilters.add(new NameOrDefinition(CHAR_FILTERS, charFilter)); + return this; + } + + public Builder setTokenizer(String tokenizer) { + this.tokenizer = new NameOrDefinition(tokenizer); + return this; + } + + public Builder setTokenizer(Map tokenizer) { + this.tokenizer = new NameOrDefinition(TOKENIZER, tokenizer); + return this; + } + + public Builder addTokenFilter(String tokenFilter) { + this.tokenFilters.add(new NameOrDefinition(tokenFilter)); + return this; + } + + public Builder addTokenFilter(Map tokenFilter) { + this.tokenFilters.add(new NameOrDefinition(TOKEN_FILTERS, tokenFilter)); + return this; + } + + /** + * Create a config validating only structure, not exact analyzer/tokenizer/filter names + */ + public CategorizationAnalyzerConfig build() { + if (analyzer == null && tokenizer == null) { + throw new IllegalArgumentException(CATEGORIZATION_ANALYZER + " that is not a global analyzer must specify a [" + + TOKENIZER + "] field"); + } + if (analyzer != null && charFilters.isEmpty() == false) { + throw new IllegalArgumentException(CATEGORIZATION_ANALYZER + " that is a global analyzer cannot also specify a [" + + CHAR_FILTERS + "] field"); + } + if (analyzer != null && tokenizer != null) { + throw new IllegalArgumentException(CATEGORIZATION_ANALYZER + " that is a global analyzer cannot also specify a [" + + TOKENIZER + "] field"); + } + if (analyzer != null && tokenFilters.isEmpty() == false) { + throw new IllegalArgumentException(CATEGORIZATION_ANALYZER + " that is a global analyzer cannot also specify a [" + + TOKEN_FILTERS + "] field"); + } + return new CategorizationAnalyzerConfig(analyzer, charFilters, tokenizer, tokenFilters); + } + + /** + * Verify that the builder will build a valid config. This is not done as part of the basic build + * because it verifies that the names of analyzers/tokenizers/filters referenced by the config are + * known, and the validity of these names could change over time. + */ + public void verify(AnalysisRegistry analysisRegistry, Environment environment) throws IOException { + Tuple tuple = build().toAnalyzer(analysisRegistry, environment); + if (tuple.v2()) { + tuple.v1().close(); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Condition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Condition.java new file mode 100644 index 0000000000000..7d3074df0ae28 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Condition.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; + +/** + * A class that describes a condition. + * The {@linkplain Operator} enum defines the available + * comparisons a condition can use. + */ +public class Condition implements ToXContentObject, Writeable { + public static final ParseField CONDITION_FIELD = new ParseField("condition"); + public static final ParseField VALUE_FIELD = new ParseField("value"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + CONDITION_FIELD.getPreferredName(), a -> new Condition((Operator) a[0], (String) a[1])); + + static { + PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return Operator.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, Operator.OPERATOR_FIELD, ValueType.STRING); + PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return p.text(); + } + if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return null; + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, VALUE_FIELD, ValueType.STRING_OR_NULL); + } + + private final Operator op; + private final String value; + + public Condition(StreamInput in) throws IOException { + op = Operator.readFromStream(in); + value = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + op.writeTo(out); + out.writeOptionalString(value); + } + + public Condition(Operator op, String value) { + if (value == null) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_CONDITION_INVALID_VALUE_NULL)); + } + + if (op.expectsANumericArgument()) { + try { + Double.parseDouble(value); + } catch (NumberFormatException nfe) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_CONDITION_INVALID_VALUE_NUMBER, value); + throw ExceptionsHelper.badRequestException(msg); + } + } else { + try { + Pattern.compile(value); + } catch (PatternSyntaxException e) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_CONDITION_INVALID_VALUE_REGEX, value); + throw ExceptionsHelper.badRequestException(msg); + } + } + this.op = op; + this.value = value; + } + + public Operator getOperator() { + return op; + } + + public String getValue() { + return value; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Operator.OPERATOR_FIELD.getPreferredName(), op); + builder.field(VALUE_FIELD.getPreferredName(), value); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(op, value); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + Condition other = (Condition) obj; + return Objects.equals(this.op, other.op) && + Objects.equals(this.value, other.value); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Connective.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Connective.java new file mode 100644 index 0000000000000..0b4ad010fdd32 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Connective.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +public enum Connective implements Writeable { + OR, AND; + + /** + * Case-insensitive from string method. + * + * @param value + * String representation + * @return The connective type + */ + public static Connective fromString(String value) { + return Connective.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static Connective readFromStream(StreamInput in) throws IOException { + return in.readEnum(Connective.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DataDescription.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DataDescription.java new file mode 100644 index 0000000000000..9ff578be50b85 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DataDescription.java @@ -0,0 +1,382 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.MlParserType; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.time.DateTimeFormatterTimestampConverter; + +import java.io.IOException; +import java.time.ZoneOffset; +import java.util.EnumMap; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +/** + * Describes the format of the data used in the job and how it should + * be interpreted by autodetect. + *

+ * Data must either be in a textual delineated format (e.g. csv, tsv) or JSON + * the {@linkplain DataFormat} enum indicates which. {@link #getTimeField()} + * is the name of the field containing the timestamp and {@link #getTimeFormat()} + * is the format code for the date string in as described by + * {@link java.time.format.DateTimeFormatter}. The default quote character for + * delineated formats is {@value #DEFAULT_QUOTE_CHAR} but any other character can be + * used. + */ +public class DataDescription implements ToXContentObject, Writeable { + /** + * Enum of the acceptable data formats. + */ + public enum DataFormat implements Writeable { + XCONTENT, + DELIMITED; + + /** + * Delimited used to be called delineated. We keep supporting that for backwards + * compatibility. + */ + private static final String DEPRECATED_DELINEATED = "DELINEATED"; + + /** + * Case-insensitive from string method. + * Works with either JSON, json, etc. + * + * @param value String representation + * @return The data format + */ + public static DataFormat forString(String value) { + String valueUpperCase = value.toUpperCase(Locale.ROOT); + return DEPRECATED_DELINEATED.equals(valueUpperCase) ? DELIMITED : DataFormat + .valueOf(valueUpperCase); + } + + public static DataFormat readFromStream(StreamInput in) throws IOException { + return in.readEnum(DataFormat.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } + + private static final ParseField DATA_DESCRIPTION_FIELD = new ParseField("data_description"); + private static final ParseField FORMAT_FIELD = new ParseField("format"); + private static final ParseField TIME_FIELD_NAME_FIELD = new ParseField("time_field"); + private static final ParseField TIME_FORMAT_FIELD = new ParseField("time_format"); + private static final ParseField FIELD_DELIMITER_FIELD = new ParseField("field_delimiter"); + private static final ParseField QUOTE_CHARACTER_FIELD = new ParseField("quote_character"); + + /** + * Special time format string for epoch times (seconds) + */ + public static final String EPOCH = "epoch"; + + /** + * Special time format string for epoch times (milli-seconds) + */ + public static final String EPOCH_MS = "epoch_ms"; + + /** + * By default autodetect expects the timestamp in a field with this name + */ + public static final String DEFAULT_TIME_FIELD = "time"; + + /** + * The default field delimiter expected by the native autodetect + * program. + */ + public static final char DEFAULT_DELIMITER = '\t'; + + /** + * Csv data must have this line ending + */ + public static final char LINE_ENDING = '\n'; + + /** + * The default quote character used to escape text in + * delineated data formats + */ + public static final char DEFAULT_QUOTE_CHAR = '"'; + + private final DataFormat dataFormat; + private final String timeFieldName; + private final String timeFormat; + private final Character fieldDelimiter; + private final Character quoteCharacter; + + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly + public static final ObjectParser METADATA_PARSER = + new ObjectParser<>(DATA_DESCRIPTION_FIELD.getPreferredName(), true, Builder::new); + public static final ObjectParser CONFIG_PARSER = + new ObjectParser<>(DATA_DESCRIPTION_FIELD.getPreferredName(), false, Builder::new); + public static final Map> PARSERS = new EnumMap<>(MlParserType.class); + + static { + PARSERS.put(MlParserType.METADATA, METADATA_PARSER); + PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER); + for (MlParserType parserType : MlParserType.values()) { + ObjectParser parser = PARSERS.get(parserType); + assert parser != null; + parser.declareString(Builder::setFormat, FORMAT_FIELD); + parser.declareString(Builder::setTimeField, TIME_FIELD_NAME_FIELD); + parser.declareString(Builder::setTimeFormat, TIME_FORMAT_FIELD); + parser.declareField(Builder::setFieldDelimiter, DataDescription::extractChar, FIELD_DELIMITER_FIELD, ValueType.STRING); + parser.declareField(Builder::setQuoteCharacter, DataDescription::extractChar, QUOTE_CHARACTER_FIELD, ValueType.STRING); + } + } + + public DataDescription(DataFormat dataFormat, String timeFieldName, String timeFormat, Character fieldDelimiter, + Character quoteCharacter) { + this.dataFormat = dataFormat; + this.timeFieldName = timeFieldName; + this.timeFormat = timeFormat; + this.fieldDelimiter = fieldDelimiter; + this.quoteCharacter = quoteCharacter; + } + + public DataDescription(StreamInput in) throws IOException { + dataFormat = DataFormat.readFromStream(in); + timeFieldName = in.readString(); + timeFormat = in.readString(); + fieldDelimiter = in.readBoolean() ? (char) in.read() : null; + quoteCharacter = in.readBoolean() ? (char) in.read() : null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + dataFormat.writeTo(out); + out.writeString(timeFieldName); + out.writeString(timeFormat); + if (fieldDelimiter != null) { + out.writeBoolean(true); + out.write(fieldDelimiter); + } else { + out.writeBoolean(false); + } + if (quoteCharacter != null) { + out.writeBoolean(true); + out.write(quoteCharacter); + } else { + out.writeBoolean(false); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (dataFormat != DataFormat.XCONTENT) { + builder.field(FORMAT_FIELD.getPreferredName(), dataFormat); + } + builder.field(TIME_FIELD_NAME_FIELD.getPreferredName(), timeFieldName); + builder.field(TIME_FORMAT_FIELD.getPreferredName(), timeFormat); + if (fieldDelimiter != null) { + builder.field(FIELD_DELIMITER_FIELD.getPreferredName(), String.valueOf(fieldDelimiter)); + } + if (quoteCharacter != null) { + builder.field(QUOTE_CHARACTER_FIELD.getPreferredName(), String.valueOf(quoteCharacter)); + } + builder.endObject(); + return builder; + } + + /** + * The format of the data to be processed. + * Defaults to {@link DataDescription.DataFormat#XCONTENT} + * + * @return The data format + */ + public DataFormat getFormat() { + return dataFormat; + } + + /** + * The name of the field containing the timestamp + * + * @return A String if set or null + */ + public String getTimeField() { + return timeFieldName; + } + + /** + * Either {@value #EPOCH}, {@value #EPOCH_MS} or a SimpleDateTime format string. + * If not set (is null or an empty string) or set to + * {@value #EPOCH_MS} (the default) then the date is assumed to be in + * milliseconds from the epoch. + * + * @return A String if set or null + */ + public String getTimeFormat() { + return timeFormat; + } + + /** + * If the data is in a delineated format with a header e.g. csv or tsv + * this is the delimiter character used. This is only applicable if + * {@linkplain #getFormat()} is {@link DataDescription.DataFormat#DELIMITED}. + * The default value for delimited format is {@value #DEFAULT_DELIMITER}. + * + * @return A char + */ + public Character getFieldDelimiter() { + return fieldDelimiter; + } + + /** + * The quote character used in delineated formats. + * The default value for delimited format is {@value #DEFAULT_QUOTE_CHAR}. + * + * @return The delineated format quote character + */ + public Character getQuoteCharacter() { + return quoteCharacter; + } + + /** + * Returns true if the data described by this object needs + * transforming before processing by autodetect. + * A transformation must be applied if either a timeformat is + * not in seconds since the epoch or the data is in Json format. + * + * @return True if the data should be transformed. + */ + public boolean transform() { + return dataFormat == DataFormat.XCONTENT || isTransformTime(); + } + + /** + * Return true if the time is in a format that needs transforming. + * Anytime format this isn't {@value #EPOCH} or null + * needs transforming. + * + * @return True if the time field needs to be transformed. + */ + public boolean isTransformTime() { + return timeFormat != null && !EPOCH.equals(timeFormat); + } + + /** + * Return true if the time format is {@value #EPOCH_MS} + * + * @return True if the date is in milli-seconds since the epoch. + */ + public boolean isEpochMs() { + return EPOCH_MS.equals(timeFormat); + } + + private static Character extractChar(XContentParser parser) throws IOException { + if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + String charStr = parser.text(); + if (charStr.length() != 1) { + throw new IllegalArgumentException("String must be a single character, found [" + charStr + "]"); + } + return charStr.charAt(0); + } + throw new IllegalArgumentException("Unsupported token [" + parser.currentToken() + "]"); + } + + /** + * Overridden equality test + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof DataDescription == false) { + return false; + } + + DataDescription that = (DataDescription) other; + + return this.dataFormat == that.dataFormat && + Objects.equals(this.quoteCharacter, that.quoteCharacter) && + Objects.equals(this.timeFieldName, that.timeFieldName) && + Objects.equals(this.timeFormat, that.timeFormat) && + Objects.equals(this.fieldDelimiter, that.fieldDelimiter); + } + + @Override + public int hashCode() { + return Objects.hash(dataFormat, quoteCharacter, timeFieldName, timeFormat, fieldDelimiter); + } + + public static class Builder { + + private DataFormat dataFormat = DataFormat.XCONTENT; + private String timeFieldName = DEFAULT_TIME_FIELD; + private String timeFormat = EPOCH_MS; + private Character fieldDelimiter; + private Character quoteCharacter; + + public void setFormat(DataFormat format) { + dataFormat = ExceptionsHelper.requireNonNull(format, FORMAT_FIELD.getPreferredName() + " must not be null"); + } + + private void setFormat(String format) { + setFormat(DataFormat.forString(format)); + } + + public void setTimeField(String fieldName) { + timeFieldName = ExceptionsHelper.requireNonNull(fieldName, TIME_FIELD_NAME_FIELD.getPreferredName() + " must not be null"); + } + + public void setTimeFormat(String format) { + ExceptionsHelper.requireNonNull(format, TIME_FORMAT_FIELD.getPreferredName() + " must not be null"); + switch (format) { + case EPOCH: + case EPOCH_MS: + break; + default: + try { + DateTimeFormatterTimestampConverter.ofPattern(format, ZoneOffset.UTC); + } catch (IllegalArgumentException e) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_INVALID_TIMEFORMAT, format)); + } + } + timeFormat = format; + } + + public void setFieldDelimiter(Character delimiter) { + fieldDelimiter = delimiter; + } + + public void setQuoteCharacter(Character value) { + quoteCharacter = value; + } + + public DataDescription build() { + if (dataFormat == DataFormat.DELIMITED) { + if (fieldDelimiter == null) { + fieldDelimiter = DEFAULT_DELIMITER; + } + if (quoteCharacter == null) { + quoteCharacter = DEFAULT_QUOTE_CHAR; + } + } + return new DataDescription(dataFormat, timeFieldName, timeFormat, fieldDelimiter, quoteCharacter); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DefaultDetectorDescription.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DefaultDetectorDescription.java new file mode 100644 index 0000000000000..ccd53aba55b83 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DefaultDetectorDescription.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.core.ml.utils.MlStrings; + + +public final class DefaultDetectorDescription { + private static final String BY_TOKEN = " by "; + private static final String OVER_TOKEN = " over "; + + private static final String USE_NULL_OPTION = " usenull="; + private static final String PARTITION_FIELD_OPTION = " partitionfield="; + private static final String EXCLUDE_FREQUENT_OPTION = " excludefrequent="; + + private DefaultDetectorDescription() { + // do nothing + } + + /** + * Returns the default description for the given {@code detector} + * + * @param detector the {@code Detector} for which a default description is requested + * @return the default description + */ + public static String of(Detector detector) { + StringBuilder sb = new StringBuilder(); + appendOn(detector, sb); + return sb.toString(); + } + + /** + * Appends to the given {@code StringBuilder} the default description + * for the given {@code detector} + * + * @param detector the {@code Detector} for which a default description is requested + * @param sb the {@code StringBuilder} to append to + */ + public static void appendOn(Detector detector, StringBuilder sb) { + if (isNotNullOrEmpty(detector.getFunction().getFullName())) { + sb.append(detector.getFunction()); + if (isNotNullOrEmpty(detector.getFieldName())) { + sb.append('(').append(quoteField(detector.getFieldName())) + .append(')'); + } + } else if (isNotNullOrEmpty(detector.getFieldName())) { + sb.append(quoteField(detector.getFieldName())); + } + + if (isNotNullOrEmpty(detector.getByFieldName())) { + sb.append(BY_TOKEN).append(quoteField(detector.getByFieldName())); + } + + if (isNotNullOrEmpty(detector.getOverFieldName())) { + sb.append(OVER_TOKEN).append(quoteField(detector.getOverFieldName())); + } + + if (detector.isUseNull()) { + sb.append(USE_NULL_OPTION).append(detector.isUseNull()); + } + + if (isNotNullOrEmpty(detector.getPartitionFieldName())) { + sb.append(PARTITION_FIELD_OPTION).append(quoteField(detector.getPartitionFieldName())); + } + + if (detector.getExcludeFrequent() != null) { + sb.append(EXCLUDE_FREQUENT_OPTION).append(detector.getExcludeFrequent()); + } + } + + private static String quoteField(String field) { + return MlStrings.doubleQuoteIfNotAlphaNumeric(field); + } + + private static boolean isNotNullOrEmpty(String arg) { + return !Strings.isNullOrEmpty(arg); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java new file mode 100644 index 0000000000000..0948e978c886e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java @@ -0,0 +1,263 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.MlParserType; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumMap; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +public class DetectionRule implements ToXContentObject, Writeable { + + public static final ParseField DETECTION_RULE_FIELD = new ParseField("detection_rule"); + public static final ParseField ACTIONS_FIELD = new ParseField("actions", "rule_action"); + public static final ParseField TARGET_FIELD_NAME_FIELD = new ParseField("target_field_name"); + public static final ParseField TARGET_FIELD_VALUE_FIELD = new ParseField("target_field_value"); + public static final ParseField CONDITIONS_CONNECTIVE_FIELD = new ParseField("conditions_connective"); + public static final ParseField CONDITIONS_FIELD = new ParseField("conditions", "rule_conditions"); + + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly + public static final ObjectParser METADATA_PARSER = + new ObjectParser<>(DETECTION_RULE_FIELD.getPreferredName(), true, Builder::new); + public static final ObjectParser CONFIG_PARSER = + new ObjectParser<>(DETECTION_RULE_FIELD.getPreferredName(), false, Builder::new); + public static final Map> PARSERS = new EnumMap<>(MlParserType.class); + + static { + PARSERS.put(MlParserType.METADATA, METADATA_PARSER); + PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER); + for (MlParserType parserType : MlParserType.values()) { + ObjectParser parser = PARSERS.get(parserType); + assert parser != null; + parser.declareStringArray(Builder::setActions, ACTIONS_FIELD); + parser.declareString(Builder::setTargetFieldName, TARGET_FIELD_NAME_FIELD); + parser.declareString(Builder::setTargetFieldValue, TARGET_FIELD_VALUE_FIELD); + parser.declareField(Builder::setConditionsConnective, p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return Connective.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, CONDITIONS_CONNECTIVE_FIELD, ValueType.STRING); + parser.declareObjectArray(Builder::setConditions, (p, c) -> + RuleCondition.PARSERS.get(parserType).apply(p, c), CONDITIONS_FIELD); + } + } + + private final EnumSet actions; + private final String targetFieldName; + private final String targetFieldValue; + private final Connective conditionsConnective; + private final List conditions; + + private DetectionRule(EnumSet actions, @Nullable String targetFieldName, @Nullable String targetFieldValue, + Connective conditionsConnective, List conditions) { + this.actions = Objects.requireNonNull(actions); + this.targetFieldName = targetFieldName; + this.targetFieldValue = targetFieldValue; + this.conditionsConnective = Objects.requireNonNull(conditionsConnective); + this.conditions = Collections.unmodifiableList(conditions); + } + + public DetectionRule(StreamInput in) throws IOException { + actions = EnumSet.noneOf(RuleAction.class); + if (in.getVersion().before(Version.V_6_2_0)) { + actions.add(RuleAction.readFromStream(in)); + } else { + int actionsCount = in.readVInt(); + for (int i = 0; i < actionsCount; ++i) { + actions.add(RuleAction.readFromStream(in)); + } + } + + conditionsConnective = Connective.readFromStream(in); + int size = in.readVInt(); + conditions = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + conditions.add(new RuleCondition(in)); + } + targetFieldName = in.readOptionalString(); + targetFieldValue = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().before(Version.V_6_2_0)) { + // Only filter_results is supported prior to 6.2.0 + RuleAction.FILTER_RESULTS.writeTo(out); + } else { + out.writeVInt(actions.size()); + for (RuleAction action : actions) { + action.writeTo(out); + } + } + + conditionsConnective.writeTo(out); + out.writeVInt(conditions.size()); + for (RuleCondition condition : conditions) { + condition.writeTo(out); + } + out.writeOptionalString(targetFieldName); + out.writeOptionalString(targetFieldValue); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ACTIONS_FIELD.getPreferredName(), actions); + builder.field(CONDITIONS_CONNECTIVE_FIELD.getPreferredName(), conditionsConnective); + builder.field(CONDITIONS_FIELD.getPreferredName(), conditions); + if (targetFieldName != null) { + builder.field(TARGET_FIELD_NAME_FIELD.getPreferredName(), targetFieldName); + } + if (targetFieldValue != null) { + builder.field(TARGET_FIELD_VALUE_FIELD.getPreferredName(), targetFieldValue); + } + builder.endObject(); + return builder; + } + + public EnumSet getActions() { + return actions; + } + + @Nullable + public String getTargetFieldName() { + return targetFieldName; + } + + @Nullable + public String getTargetFieldValue() { + return targetFieldValue; + } + + public Connective getConditionsConnective() { + return conditionsConnective; + } + + public List getConditions() { + return conditions; + } + + public Set extractReferencedFilters() { + return conditions.stream().map(RuleCondition::getFilterId).filter(Objects::nonNull).collect(Collectors.toSet()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj instanceof DetectionRule == false) { + return false; + } + + DetectionRule other = (DetectionRule) obj; + return Objects.equals(actions, other.actions) + && Objects.equals(targetFieldName, other.targetFieldName) + && Objects.equals(targetFieldValue, other.targetFieldValue) + && Objects.equals(conditionsConnective, other.conditionsConnective) + && Objects.equals(conditions, other.conditions); + } + + @Override + public int hashCode() { + return Objects.hash(actions, targetFieldName, targetFieldValue, conditionsConnective, conditions); + } + + public static class Builder { + private EnumSet actions = EnumSet.of(RuleAction.FILTER_RESULTS); + private String targetFieldName; + private String targetFieldValue; + private Connective conditionsConnective = Connective.OR; + private List conditions = Collections.emptyList(); + + public Builder(List conditions) { + this.conditions = ExceptionsHelper.requireNonNull(conditions, CONDITIONS_FIELD.getPreferredName()); + } + + private Builder() { + } + + public Builder setActions(List actions) { + this.actions.clear(); + actions.stream().map(RuleAction::fromString).forEach(this.actions::add); + return this; + } + + public Builder setActions(EnumSet actions) { + this.actions = Objects.requireNonNull(actions, ACTIONS_FIELD.getPreferredName()); + return this; + } + + public Builder setActions(RuleAction... actions) { + this.actions.clear(); + Arrays.stream(actions).forEach(this.actions::add); + return this; + } + + public Builder setTargetFieldName(String targetFieldName) { + this.targetFieldName = targetFieldName; + return this; + } + + public Builder setTargetFieldValue(String targetFieldValue) { + this.targetFieldValue = targetFieldValue; + return this; + } + + public Builder setConditionsConnective(Connective connective) { + this.conditionsConnective = ExceptionsHelper.requireNonNull(connective, CONDITIONS_CONNECTIVE_FIELD.getPreferredName()); + return this; + } + + public Builder setConditions(List conditions) { + this.conditions = ExceptionsHelper.requireNonNull(conditions, CONDITIONS_FIELD.getPreferredName()); + return this; + } + + public DetectionRule build() { + if (targetFieldValue != null && targetFieldName == null) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_MISSING_TARGET_FIELD_NAME, targetFieldValue); + throw ExceptionsHelper.badRequestException(msg); + } + if (conditions == null || conditions.isEmpty()) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_REQUIRES_AT_LEAST_ONE_CONDITION); + throw ExceptionsHelper.badRequestException(msg); + } + for (RuleCondition condition : conditions) { + if (condition.getType().isCategorical() && targetFieldName != null) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_CATEGORICAL_INVALID_OPTION, + DetectionRule.TARGET_FIELD_NAME_FIELD.getPreferredName()); + throw ExceptionsHelper.badRequestException(msg); + } + } + return new DetectionRule(actions, targetFieldName, targetFieldValue, conditionsConnective, conditions); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java new file mode 100644 index 0000000000000..e5cf4b16f6e73 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java @@ -0,0 +1,786 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.MlParserType; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumMap; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + + +/** + * Defines the fields to be used in the analysis. + * fieldname must be set and only one of byFieldName + * and overFieldName should be set. + */ +public class Detector implements ToXContentObject, Writeable { + + public enum ExcludeFrequent implements Writeable { + ALL, + NONE, + BY, + OVER; + + /** + * Case-insensitive from string method. + * Works with either JSON, json, etc. + * + * @param value String representation + * @return The data format + */ + public static ExcludeFrequent forString(String value) { + return valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static ExcludeFrequent readFromStream(StreamInput in) throws IOException { + return in.readEnum(ExcludeFrequent.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } + + public static final ParseField DETECTOR_DESCRIPTION_FIELD = new ParseField("detector_description"); + public static final ParseField FUNCTION_FIELD = new ParseField("function"); + public static final ParseField FIELD_NAME_FIELD = new ParseField("field_name"); + public static final ParseField BY_FIELD_NAME_FIELD = new ParseField("by_field_name"); + public static final ParseField OVER_FIELD_NAME_FIELD = new ParseField("over_field_name"); + public static final ParseField PARTITION_FIELD_NAME_FIELD = new ParseField("partition_field_name"); + public static final ParseField USE_NULL_FIELD = new ParseField("use_null"); + public static final ParseField EXCLUDE_FREQUENT_FIELD = new ParseField("exclude_frequent"); + // TODO: Remove the deprecated detector_rules setting in 7.0 + public static final ParseField RULES_FIELD = new ParseField("rules", "detector_rules"); + public static final ParseField DETECTOR_INDEX = new ParseField("detector_index"); + + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly + public static final ObjectParser METADATA_PARSER = new ObjectParser<>("detector", true, Builder::new); + public static final ObjectParser CONFIG_PARSER = new ObjectParser<>("detector", false, Builder::new); + public static final Map> PARSERS = new EnumMap<>(MlParserType.class); + + static { + PARSERS.put(MlParserType.METADATA, METADATA_PARSER); + PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER); + for (MlParserType parserType : MlParserType.values()) { + ObjectParser parser = PARSERS.get(parserType); + assert parser != null; + parser.declareString(Builder::setDetectorDescription, DETECTOR_DESCRIPTION_FIELD); + parser.declareString(Builder::setFunction, FUNCTION_FIELD); + parser.declareString(Builder::setFieldName, FIELD_NAME_FIELD); + parser.declareString(Builder::setByFieldName, BY_FIELD_NAME_FIELD); + parser.declareString(Builder::setOverFieldName, OVER_FIELD_NAME_FIELD); + parser.declareString(Builder::setPartitionFieldName, PARTITION_FIELD_NAME_FIELD); + parser.declareBoolean(Builder::setUseNull, USE_NULL_FIELD); + parser.declareField(Builder::setExcludeFrequent, p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return ExcludeFrequent.forString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, EXCLUDE_FREQUENT_FIELD, ObjectParser.ValueType.STRING); + parser.declareObjectArray(Builder::setRules, (p, c) -> + DetectionRule.PARSERS.get(parserType).apply(p, c).build(), RULES_FIELD); + parser.declareInt(Builder::setDetectorIndex, DETECTOR_INDEX); + } + } + + public static final String BY = "by"; + public static final String OVER = "over"; + + /** + * The set of functions that do not require a field, by field or over field + */ + public static final EnumSet COUNT_WITHOUT_FIELD_FUNCTIONS = EnumSet.of( + DetectorFunction.COUNT, + DetectorFunction.HIGH_COUNT, + DetectorFunction.LOW_COUNT, + DetectorFunction.NON_ZERO_COUNT, + DetectorFunction.LOW_NON_ZERO_COUNT, + DetectorFunction.HIGH_NON_ZERO_COUNT, + DetectorFunction.TIME_OF_DAY, + DetectorFunction.TIME_OF_WEEK + ); + + /** + * The set of functions that require a fieldname + */ + public static final EnumSet FIELD_NAME_FUNCTIONS = EnumSet.of( + DetectorFunction.DISTINCT_COUNT, + DetectorFunction.LOW_DISTINCT_COUNT, + DetectorFunction.HIGH_DISTINCT_COUNT, + DetectorFunction.INFO_CONTENT, + DetectorFunction.LOW_INFO_CONTENT, + DetectorFunction.HIGH_INFO_CONTENT, + DetectorFunction.METRIC, + DetectorFunction.MEAN, DetectorFunction.AVG, + DetectorFunction.HIGH_MEAN, DetectorFunction.HIGH_AVG, + DetectorFunction.LOW_MEAN, DetectorFunction.LOW_AVG, + DetectorFunction.MEDIAN, + DetectorFunction.LOW_MEDIAN, + DetectorFunction.HIGH_MEDIAN, + DetectorFunction.MIN, + DetectorFunction.MAX, + DetectorFunction.SUM, + DetectorFunction.LOW_SUM, + DetectorFunction.HIGH_SUM, + DetectorFunction.NON_NULL_SUM, + DetectorFunction.LOW_NON_NULL_SUM, + DetectorFunction.HIGH_NON_NULL_SUM, + DetectorFunction.VARP, + DetectorFunction.LOW_VARP, + DetectorFunction.HIGH_VARP, + DetectorFunction.LAT_LONG + ); + + /** + * The set of functions that require a by fieldname + */ + public static final EnumSet BY_FIELD_NAME_FUNCTIONS = EnumSet.of( + DetectorFunction.RARE, + DetectorFunction.FREQ_RARE + ); + + /** + * The set of functions that require a over fieldname + */ + public static final EnumSet OVER_FIELD_NAME_FUNCTIONS = EnumSet.of( + DetectorFunction.FREQ_RARE + ); + + /** + * The set of functions that cannot have an over fieldname + */ + public static final EnumSet NO_OVER_FIELD_NAME_FUNCTIONS = EnumSet.of( + DetectorFunction.NON_ZERO_COUNT, + DetectorFunction.LOW_NON_ZERO_COUNT, + DetectorFunction.HIGH_NON_ZERO_COUNT + ); + + /** + * The set of functions that must not be used with overlapping buckets + */ + public static final EnumSet NO_OVERLAPPING_BUCKETS_FUNCTIONS = EnumSet.of( + DetectorFunction.RARE, + DetectorFunction.FREQ_RARE + ); + + /** + * The set of functions that should not be used with overlapping buckets + * as they gain no benefit but have overhead + */ + public static final EnumSet OVERLAPPING_BUCKETS_FUNCTIONS_NOT_NEEDED = EnumSet.of( + DetectorFunction.MIN, + DetectorFunction.MAX, + DetectorFunction.TIME_OF_DAY, + DetectorFunction.TIME_OF_WEEK + ); + + /** + * field names cannot contain any of these characters + * ", \ + */ + public static final Character[] PROHIBITED_FIELDNAME_CHARACTERS = {'"', '\\'}; + public static final String PROHIBITED = String.join(",", + Arrays.stream(PROHIBITED_FIELDNAME_CHARACTERS).map( + c -> Character.toString(c)).collect(Collectors.toList())); + + + private final String detectorDescription; + private final DetectorFunction function; + private final String fieldName; + private final String byFieldName; + private final String overFieldName; + private final String partitionFieldName; + private final boolean useNull; + private final ExcludeFrequent excludeFrequent; + private final List rules; + private final int detectorIndex; + + public Detector(StreamInput in) throws IOException { + detectorDescription = in.readString(); + function = DetectorFunction.fromString(in.readString()); + fieldName = in.readOptionalString(); + byFieldName = in.readOptionalString(); + overFieldName = in.readOptionalString(); + partitionFieldName = in.readOptionalString(); + useNull = in.readBoolean(); + excludeFrequent = in.readBoolean() ? ExcludeFrequent.readFromStream(in) : null; + rules = in.readList(DetectionRule::new); + if (in.getVersion().onOrAfter(Version.V_5_5_0)) { + detectorIndex = in.readInt(); + } else { + // negative means unknown, and is expected for 5.4 jobs + detectorIndex = -1; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(detectorDescription); + out.writeString(function.getFullName()); + out.writeOptionalString(fieldName); + out.writeOptionalString(byFieldName); + out.writeOptionalString(overFieldName); + out.writeOptionalString(partitionFieldName); + out.writeBoolean(useNull); + if (excludeFrequent != null) { + out.writeBoolean(true); + excludeFrequent.writeTo(out); + } else { + out.writeBoolean(false); + } + out.writeList(rules); + if (out.getVersion().onOrAfter(Version.V_5_5_0)) { + out.writeInt(detectorIndex); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DETECTOR_DESCRIPTION_FIELD.getPreferredName(), detectorDescription); + builder.field(FUNCTION_FIELD.getPreferredName(), function); + if (fieldName != null) { + builder.field(FIELD_NAME_FIELD.getPreferredName(), fieldName); + } + if (byFieldName != null) { + builder.field(BY_FIELD_NAME_FIELD.getPreferredName(), byFieldName); + } + if (overFieldName != null) { + builder.field(OVER_FIELD_NAME_FIELD.getPreferredName(), overFieldName); + } + if (partitionFieldName != null) { + builder.field(PARTITION_FIELD_NAME_FIELD.getPreferredName(), partitionFieldName); + } + if (useNull) { + builder.field(USE_NULL_FIELD.getPreferredName(), useNull); + } + if (excludeFrequent != null) { + builder.field(EXCLUDE_FREQUENT_FIELD.getPreferredName(), excludeFrequent); + } + if (rules.isEmpty() == false) { + builder.field(RULES_FIELD.getPreferredName(), rules); + } + // negative means "unknown", which should only happen for a 5.4 job + if (detectorIndex >= 0 + // no point writing this to cluster state, as the indexes will get reassigned on reload anyway + && params.paramAsBoolean(ToXContentParams.FOR_CLUSTER_STATE, false) == false) { + builder.field(DETECTOR_INDEX.getPreferredName(), detectorIndex); + } + builder.endObject(); + return builder; + } + + private Detector(String detectorDescription, DetectorFunction function, String fieldName, String byFieldName, String overFieldName, + String partitionFieldName, boolean useNull, ExcludeFrequent excludeFrequent, List rules, + int detectorIndex) { + this.function = function; + this.fieldName = fieldName; + this.byFieldName = byFieldName; + this.overFieldName = overFieldName; + this.partitionFieldName = partitionFieldName; + this.useNull = useNull; + this.excludeFrequent = excludeFrequent; + this.rules = Collections.unmodifiableList(rules); + this.detectorDescription = detectorDescription != null ? detectorDescription : DefaultDetectorDescription.of(this); + this.detectorIndex = detectorIndex; + } + + public String getDetectorDescription() { + return detectorDescription; + } + + /** + * The analysis function used e.g. count, rare, min etc. + * + * @return The function or null if not set + */ + public DetectorFunction getFunction() { + return function; + } + + /** + * The Analysis field + * + * @return The field to analyse + */ + public String getFieldName() { + return fieldName; + } + + /** + * The 'by' field or null if not set. + * + * @return The 'by' field + */ + public String getByFieldName() { + return byFieldName; + } + + /** + * The 'over' field or null if not set. + * + * @return The 'over' field + */ + public String getOverFieldName() { + return overFieldName; + } + + /** + * Segments the analysis along another field to have completely + * independent baselines for each instance of partitionfield + * + * @return The Partition Field + */ + public String getPartitionFieldName() { + return partitionFieldName; + } + + /** + * Where there isn't a value for the 'by' or 'over' field should a new + * series be used as the 'null' series. + * + * @return true if the 'null' series should be created + */ + public boolean isUseNull() { + return useNull; + } + + /** + * Excludes frequently-occuring metrics from the analysis; + * can apply to 'by' field, 'over' field, or both + * + * @return the value that the user set + */ + public ExcludeFrequent getExcludeFrequent() { + return excludeFrequent; + } + + public List getRules() { + return rules; + } + + /** + * @return the detector index or a negative number if unknown + */ + public int getDetectorIndex() { + return detectorIndex; + } + + /** + * Returns a list with the byFieldName, overFieldName and partitionFieldName that are not null + * + * @return a list with the byFieldName, overFieldName and partitionFieldName that are not null + */ + public List extractAnalysisFields() { + List analysisFields = Arrays.asList(getByFieldName(), + getOverFieldName(), getPartitionFieldName()); + return analysisFields.stream().filter(item -> item != null).collect(Collectors.toList()); + } + + public Set extractReferencedFilters() { + return rules == null ? Collections.emptySet() + : rules.stream().map(DetectionRule::extractReferencedFilters) + .flatMap(Set::stream).collect(Collectors.toSet()); + } + + /** + * Returns the set of by/over/partition terms + */ + public Set getByOverPartitionTerms() { + Set terms = new HashSet<>(); + if (byFieldName != null) { + terms.add(byFieldName); + } + if (overFieldName != null) { + terms.add(overFieldName); + } + if (partitionFieldName != null) { + terms.add(partitionFieldName); + } + return terms; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof Detector == false) { + return false; + } + + Detector that = (Detector) other; + + return Objects.equals(this.detectorDescription, that.detectorDescription) && + Objects.equals(this.function, that.function) && + Objects.equals(this.fieldName, that.fieldName) && + Objects.equals(this.byFieldName, that.byFieldName) && + Objects.equals(this.overFieldName, that.overFieldName) && + Objects.equals(this.partitionFieldName, that.partitionFieldName) && + Objects.equals(this.useNull, that.useNull) && + Objects.equals(this.excludeFrequent, that.excludeFrequent) && + Objects.equals(this.rules, that.rules) && + this.detectorIndex == that.detectorIndex; + } + + @Override + public int hashCode() { + return Objects.hash(detectorDescription, function, fieldName, byFieldName, overFieldName, partitionFieldName, useNull, + excludeFrequent, rules, detectorIndex); + } + + public static class Builder { + + /** + * Functions that do not support rules: + *

    + *
  • lat_long - because it is a multivariate feature + *
  • metric - because having the same conditions on min,max,mean is + * error-prone + *
+ */ + static final EnumSet FUNCTIONS_WITHOUT_RULE_SUPPORT = EnumSet.of( + DetectorFunction.LAT_LONG, DetectorFunction.METRIC); + + private String detectorDescription; + private DetectorFunction function; + private String fieldName; + private String byFieldName; + private String overFieldName; + private String partitionFieldName; + private boolean useNull = false; + private ExcludeFrequent excludeFrequent; + private List rules = Collections.emptyList(); + // negative means unknown, and is expected for v5.4 jobs + private int detectorIndex = -1; + + public Builder() { + } + + public Builder(Detector detector) { + detectorDescription = detector.detectorDescription; + function = detector.function; + fieldName = detector.fieldName; + byFieldName = detector.byFieldName; + overFieldName = detector.overFieldName; + partitionFieldName = detector.partitionFieldName; + useNull = detector.useNull; + excludeFrequent = detector.excludeFrequent; + rules = new ArrayList<>(detector.getRules()); + detectorIndex = detector.detectorIndex; + } + + public Builder(String function, String fieldName) { + this(DetectorFunction.fromString(function), fieldName); + } + + public Builder(DetectorFunction function, String fieldName) { + this.function = function; + this.fieldName = fieldName; + } + + public void setDetectorDescription(String detectorDescription) { + this.detectorDescription = detectorDescription; + } + + public void setFunction(String function) { + this.function = DetectorFunction.fromString(function); + } + + public void setFieldName(String fieldName) { + this.fieldName = fieldName; + } + + public void setByFieldName(String byFieldName) { + this.byFieldName = byFieldName; + } + + public void setOverFieldName(String overFieldName) { + this.overFieldName = overFieldName; + } + + public void setPartitionFieldName(String partitionFieldName) { + this.partitionFieldName = partitionFieldName; + } + + public void setUseNull(boolean useNull) { + this.useNull = useNull; + } + + public void setExcludeFrequent(ExcludeFrequent excludeFrequent) { + this.excludeFrequent = excludeFrequent; + } + + public void setRules(List rules) { + this.rules = rules; + } + + public void setDetectorIndex(int detectorIndex) { + this.detectorIndex = detectorIndex; + } + + public Detector build() { + boolean emptyField = Strings.isEmpty(fieldName); + boolean emptyByField = Strings.isEmpty(byFieldName); + boolean emptyOverField = Strings.isEmpty(overFieldName); + boolean emptyPartitionField = Strings.isEmpty(partitionFieldName); + + if (emptyField && emptyByField && emptyOverField) { + if (!Detector.COUNT_WITHOUT_FIELD_FUNCTIONS.contains(function)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_ANALYSIS_FIELD_MUST_BE_SET)); + } + } + + // check functions have required fields + + if (emptyField && Detector.FIELD_NAME_FUNCTIONS.contains(function)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_FUNCTION_REQUIRES_FIELDNAME, function)); + } + + if (!emptyField && (Detector.FIELD_NAME_FUNCTIONS.contains(function) == false)) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.JOB_CONFIG_FIELDNAME_INCOMPATIBLE_FUNCTION, function)); + } + + if (emptyByField && Detector.BY_FIELD_NAME_FUNCTIONS.contains(function)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_FUNCTION_REQUIRES_BYFIELD, function)); + } + + if (emptyOverField && Detector.OVER_FIELD_NAME_FUNCTIONS.contains(function)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_FUNCTION_REQUIRES_OVERFIELD, function)); + } + + if (!emptyOverField && Detector.NO_OVER_FIELD_NAME_FUNCTIONS.contains(function)) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.JOB_CONFIG_OVERFIELD_INCOMPATIBLE_FUNCTION, function)); + } + + // field names cannot contain certain characters + String[] fields = { fieldName, byFieldName, overFieldName, partitionFieldName }; + for (String field : fields) { + verifyFieldName(field); + } + + DetectorFunction function = this.function == null ? DetectorFunction.METRIC : this.function; + if (rules.isEmpty() == false) { + if (FUNCTIONS_WITHOUT_RULE_SUPPORT.contains(function)) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_NOT_SUPPORTED_BY_FUNCTION, function); + throw ExceptionsHelper.badRequestException(msg); + } + for (DetectionRule rule : rules) { + checkScoping(rule); + } + } + + // partition, by and over field names cannot be duplicates + if (!emptyPartitionField) { + if (partitionFieldName.equals(byFieldName)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_DETECTOR_DUPLICATE_FIELD_NAME, + PARTITION_FIELD_NAME_FIELD.getPreferredName(), BY_FIELD_NAME_FIELD.getPreferredName(), + partitionFieldName)); + } + if (partitionFieldName.equals(overFieldName)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_DETECTOR_DUPLICATE_FIELD_NAME, + PARTITION_FIELD_NAME_FIELD.getPreferredName(), OVER_FIELD_NAME_FIELD.getPreferredName(), + partitionFieldName)); + } + } + if (!emptyByField && byFieldName.equals(overFieldName)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_DETECTOR_DUPLICATE_FIELD_NAME, + BY_FIELD_NAME_FIELD.getPreferredName(), OVER_FIELD_NAME_FIELD.getPreferredName(), + byFieldName)); + } + + // by/over field names cannot be "count", "over', "by" - this requirement dates back to the early + // days of the ML code and could be removed now BUT ONLY IF THE C++ CODE IS CHANGED + // FIRST - see https://github.com/elastic/x-pack-elasticsearch/issues/858 + if (DetectorFunction.COUNT.getFullName().equals(byFieldName)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_DETECTOR_COUNT_DISALLOWED, + BY_FIELD_NAME_FIELD.getPreferredName())); + } + if (DetectorFunction.COUNT.getFullName().equals(overFieldName)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_DETECTOR_COUNT_DISALLOWED, + OVER_FIELD_NAME_FIELD.getPreferredName())); + } + + if (BY.equals(byFieldName)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_DETECTOR_BY_DISALLOWED, + BY_FIELD_NAME_FIELD.getPreferredName())); + } + if (BY.equals(overFieldName)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_DETECTOR_BY_DISALLOWED, + OVER_FIELD_NAME_FIELD.getPreferredName())); + } + + if (OVER.equals(byFieldName)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_DETECTOR_OVER_DISALLOWED, + BY_FIELD_NAME_FIELD.getPreferredName())); + } + if (OVER.equals(overFieldName)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_DETECTOR_OVER_DISALLOWED, + OVER_FIELD_NAME_FIELD.getPreferredName())); + } + + return new Detector(detectorDescription, function, fieldName, byFieldName, overFieldName, partitionFieldName, + useNull, excludeFrequent, rules, detectorIndex); + } + + public List extractAnalysisFields() { + List analysisFields = Arrays.asList(byFieldName, overFieldName, partitionFieldName); + return analysisFields.stream().filter(item -> item != null).collect(Collectors.toList()); + } + + /** + * Check that the characters used in a field name will not cause problems. + * + * @param field The field name to be validated + */ + public static void verifyFieldName(String field) throws ElasticsearchParseException { + if (field != null && containsInvalidChar(field)) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.JOB_CONFIG_INVALID_FIELDNAME_CHARS, field, Detector.PROHIBITED)); + } + if (RecordWriter.CONTROL_FIELD_NAME.equals(field)) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.JOB_CONFIG_INVALID_FIELDNAME, field, RecordWriter.CONTROL_FIELD_NAME)); + } + } + + private static boolean containsInvalidChar(String field) { + for (Character ch : Detector.PROHIBITED_FIELDNAME_CHARACTERS) { + if (field.indexOf(ch) >= 0) { + return true; + } + } + return field.chars().anyMatch(Character::isISOControl); + } + + private void checkScoping(DetectionRule rule) throws ElasticsearchParseException { + String targetFieldName = rule.getTargetFieldName(); + checkTargetFieldNameIsValid(extractAnalysisFields(), targetFieldName); + for (RuleCondition condition : rule.getConditions()) { + List validOptions = Collections.emptyList(); + switch (condition.getType()) { + case CATEGORICAL: + case CATEGORICAL_COMPLEMENT: + validOptions = extractAnalysisFields(); + break; + case NUMERICAL_ACTUAL: + case NUMERICAL_TYPICAL: + case NUMERICAL_DIFF_ABS: + validOptions = getValidFieldNameOptionsForNumeric(rule); + break; + case TIME: + default: + break; + } + if (!validOptions.contains(condition.getFieldName())) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_INVALID_FIELD_NAME, validOptions, + condition.getFieldName()); + throw ExceptionsHelper.badRequestException(msg); + } + } + } + + private void checkTargetFieldNameIsValid(List analysisFields, String targetFieldName) + throws ElasticsearchParseException { + if (targetFieldName != null && !analysisFields.contains(targetFieldName)) { + String msg = + Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_INVALID_TARGET_FIELD_NAME, analysisFields, targetFieldName); + throw ExceptionsHelper.badRequestException(msg); + } + } + + private List getValidFieldNameOptionsForNumeric(DetectionRule rule) { + List result = new ArrayList<>(); + if (overFieldName != null) { + result.add(byFieldName == null ? overFieldName : byFieldName); + } else if (byFieldName != null) { + result.add(byFieldName); + } + + if (rule.getTargetFieldName() != null) { + ScopingLevel targetLevel = ScopingLevel.from(this, rule.getTargetFieldName()); + result = result.stream().filter(field -> targetLevel.isHigherThan(ScopingLevel.from(this, field))) + .collect(Collectors.toList()); + } + + if (isEmptyFieldNameAllowed(rule)) { + result.add(null); + } + return result; + } + + private boolean isEmptyFieldNameAllowed(DetectionRule rule) { + List analysisFields = extractAnalysisFields(); + return analysisFields.isEmpty() || (rule.getTargetFieldName() != null && analysisFields.size() == 1); + } + + enum ScopingLevel { + PARTITION(3), + OVER(2), + BY(1); + + int level; + + ScopingLevel(int level) { + this.level = level; + } + + boolean isHigherThan(ScopingLevel other) { + return level > other.level; + } + + static ScopingLevel from(Detector.Builder detector, String fieldName) { + if (fieldName.equals(detector.partitionFieldName)) { + return ScopingLevel.PARTITION; + } + if (fieldName.equals(detector.overFieldName)) { + return ScopingLevel.OVER; + } + if (fieldName.equals(detector.byFieldName)) { + return ScopingLevel.BY; + } + throw ExceptionsHelper.badRequestException( + "fieldName '" + fieldName + "' does not match an analysis field"); + } + } + + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectorFunction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectorFunction.java new file mode 100644 index 0000000000000..1f819af3f59e7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectorFunction.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.xpack.core.ml.job.messages.Messages; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; + +public enum DetectorFunction { + + COUNT, + LOW_COUNT, + HIGH_COUNT, + NON_ZERO_COUNT("nzc"), + LOW_NON_ZERO_COUNT("low_nzc"), + HIGH_NON_ZERO_COUNT("high_nzc"), + DISTINCT_COUNT("dc"), + LOW_DISTINCT_COUNT("low_dc"), + HIGH_DISTINCT_COUNT("high_dc"), + RARE, + FREQ_RARE, + INFO_CONTENT, + LOW_INFO_CONTENT, + HIGH_INFO_CONTENT, + METRIC, + MEAN, + LOW_MEAN, + HIGH_MEAN, + AVG, + LOW_AVG, + HIGH_AVG, + MEDIAN, + LOW_MEDIAN, + HIGH_MEDIAN, + MIN, + MAX, + SUM, + LOW_SUM, + HIGH_SUM, + NON_NULL_SUM, + LOW_NON_NULL_SUM, + HIGH_NON_NULL_SUM, + VARP, + LOW_VARP, + HIGH_VARP, + TIME_OF_DAY, + TIME_OF_WEEK, + LAT_LONG; + + private Set shortcuts; + + DetectorFunction() { + shortcuts = Collections.emptySet(); + } + + DetectorFunction(String... shortcuts) { + this.shortcuts = Arrays.stream(shortcuts).collect(Collectors.toSet()); + } + + public String getFullName() { + return name().toLowerCase(Locale.ROOT); + } + + @Override + public String toString() { + return getFullName(); + } + + public static DetectorFunction fromString(String op) { + for (DetectorFunction function : values()) { + if (function.getFullName().equals(op) || function.shortcuts.contains(op)) { + return function; + } + } + throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_UNKNOWN_FUNCTION, op)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java new file mode 100644 index 0000000000000..dc109ba084a53 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -0,0 +1,1225 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.xpack.core.ml.MlParserType; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.MlStrings; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.EnumMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.TimeUnit; + +/** + * This class represents a configured and created Job. The creation time is set + * to the time the object was constructed and the finished time and last + * data time fields are {@code null} until the job has seen some data or it is + * finished respectively. + */ +public class Job extends AbstractDiffable implements Writeable, ToXContentObject { + + public static final String TYPE = "job"; + + public static final String ANOMALY_DETECTOR_JOB_TYPE = "anomaly_detector"; + + /* + * Field names used in serialization + */ + public static final ParseField ID = new ParseField("job_id"); + public static final ParseField JOB_TYPE = new ParseField("job_type"); + public static final ParseField JOB_VERSION = new ParseField("job_version"); + public static final ParseField GROUPS = new ParseField("groups"); + public static final ParseField ANALYSIS_CONFIG = AnalysisConfig.ANALYSIS_CONFIG; + public static final ParseField ANALYSIS_LIMITS = new ParseField("analysis_limits"); + public static final ParseField CREATE_TIME = new ParseField("create_time"); + public static final ParseField CUSTOM_SETTINGS = new ParseField("custom_settings"); + public static final ParseField DATA_DESCRIPTION = new ParseField("data_description"); + public static final ParseField DESCRIPTION = new ParseField("description"); + public static final ParseField FINISHED_TIME = new ParseField("finished_time"); + public static final ParseField LAST_DATA_TIME = new ParseField("last_data_time"); + public static final ParseField ESTABLISHED_MODEL_MEMORY = new ParseField("established_model_memory"); + public static final ParseField MODEL_PLOT_CONFIG = new ParseField("model_plot_config"); + public static final ParseField RENORMALIZATION_WINDOW_DAYS = new ParseField("renormalization_window_days"); + public static final ParseField BACKGROUND_PERSIST_INTERVAL = new ParseField("background_persist_interval"); + public static final ParseField MODEL_SNAPSHOT_RETENTION_DAYS = new ParseField("model_snapshot_retention_days"); + public static final ParseField RESULTS_RETENTION_DAYS = new ParseField("results_retention_days"); + public static final ParseField MODEL_SNAPSHOT_ID = new ParseField("model_snapshot_id"); + public static final ParseField MODEL_SNAPSHOT_MIN_VERSION = new ParseField("model_snapshot_min_version"); + public static final ParseField RESULTS_INDEX_NAME = new ParseField("results_index_name"); + public static final ParseField DELETED = new ParseField("deleted"); + + // Used for QueryPage + public static final ParseField RESULTS_FIELD = new ParseField("jobs"); + + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly + public static final ObjectParser METADATA_PARSER = new ObjectParser<>("job_details", true, Builder::new); + public static final ObjectParser CONFIG_PARSER = new ObjectParser<>("job_details", false, Builder::new); + public static final Map> PARSERS = new EnumMap<>(MlParserType.class); + + public static final TimeValue MIN_BACKGROUND_PERSIST_INTERVAL = TimeValue.timeValueHours(1); + public static final ByteSizeValue PROCESS_MEMORY_OVERHEAD = new ByteSizeValue(100, ByteSizeUnit.MB); + + public static final long DEFAULT_MODEL_SNAPSHOT_RETENTION_DAYS = 1; + + static { + PARSERS.put(MlParserType.METADATA, METADATA_PARSER); + PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER); + for (MlParserType parserType : MlParserType.values()) { + ObjectParser parser = PARSERS.get(parserType); + assert parser != null; + parser.declareString(Builder::setId, ID); + parser.declareString(Builder::setJobType, JOB_TYPE); + parser.declareString(Builder::setJobVersion, JOB_VERSION); + parser.declareStringArray(Builder::setGroups, GROUPS); + parser.declareStringOrNull(Builder::setDescription, DESCRIPTION); + parser.declareField(Builder::setCreateTime, p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException("unexpected token [" + p.currentToken() + + "] for [" + CREATE_TIME.getPreferredName() + "]"); + }, CREATE_TIME, ValueType.VALUE); + parser.declareField(Builder::setFinishedTime, p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + FINISHED_TIME.getPreferredName() + "]"); + }, FINISHED_TIME, ValueType.VALUE); + parser.declareField(Builder::setLastDataTime, p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + LAST_DATA_TIME.getPreferredName() + "]"); + }, LAST_DATA_TIME, ValueType.VALUE); + parser.declareLong(Builder::setEstablishedModelMemory, ESTABLISHED_MODEL_MEMORY); + parser.declareObject(Builder::setAnalysisConfig, AnalysisConfig.PARSERS.get(parserType), ANALYSIS_CONFIG); + parser.declareObject(Builder::setAnalysisLimits, AnalysisLimits.PARSERS.get(parserType), ANALYSIS_LIMITS); + parser.declareObject(Builder::setDataDescription, DataDescription.PARSERS.get(parserType), DATA_DESCRIPTION); + parser.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.PARSERS.get(parserType), MODEL_PLOT_CONFIG); + parser.declareLong(Builder::setRenormalizationWindowDays, RENORMALIZATION_WINDOW_DAYS); + parser.declareString((builder, val) -> builder.setBackgroundPersistInterval( + TimeValue.parseTimeValue(val, BACKGROUND_PERSIST_INTERVAL.getPreferredName())), BACKGROUND_PERSIST_INTERVAL); + parser.declareLong(Builder::setResultsRetentionDays, RESULTS_RETENTION_DAYS); + parser.declareLong(Builder::setModelSnapshotRetentionDays, MODEL_SNAPSHOT_RETENTION_DAYS); + parser.declareField(Builder::setCustomSettings, (p, c) -> p.map(), CUSTOM_SETTINGS, ValueType.OBJECT); + parser.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID); + parser.declareStringOrNull(Builder::setModelSnapshotMinVersion, MODEL_SNAPSHOT_MIN_VERSION); + parser.declareString(Builder::setResultsIndexName, RESULTS_INDEX_NAME); + parser.declareBoolean(Builder::setDeleted, DELETED); + } + } + + private final String jobId; + private final String jobType; + + /** + * The version when the job was created. + * Will be null for versions before 5.5. + */ + @Nullable + private final Version jobVersion; + + private final List groups; + private final String description; + // TODO: Use java.time for the Dates here: x-pack-elasticsearch#829 + private final Date createTime; + private final Date finishedTime; + private final Date lastDataTime; + private final Long establishedModelMemory; + private final AnalysisConfig analysisConfig; + private final AnalysisLimits analysisLimits; + private final DataDescription dataDescription; + private final ModelPlotConfig modelPlotConfig; + private final Long renormalizationWindowDays; + private final TimeValue backgroundPersistInterval; + private final Long modelSnapshotRetentionDays; + private final Long resultsRetentionDays; + private final Map customSettings; + private final String modelSnapshotId; + private final Version modelSnapshotMinVersion; + private final String resultsIndexName; + private final boolean deleted; + + private Job(String jobId, String jobType, Version jobVersion, List groups, String description, Date createTime, + Date finishedTime, Date lastDataTime, Long establishedModelMemory, + AnalysisConfig analysisConfig, AnalysisLimits analysisLimits, DataDescription dataDescription, + ModelPlotConfig modelPlotConfig, Long renormalizationWindowDays, TimeValue backgroundPersistInterval, + Long modelSnapshotRetentionDays, Long resultsRetentionDays, Map customSettings, + String modelSnapshotId, Version modelSnapshotMinVersion, String resultsIndexName, boolean deleted) { + + this.jobId = jobId; + this.jobType = jobType; + this.jobVersion = jobVersion; + this.groups = groups; + this.description = description; + this.createTime = createTime; + this.finishedTime = finishedTime; + this.lastDataTime = lastDataTime; + this.establishedModelMemory = establishedModelMemory; + this.analysisConfig = analysisConfig; + this.analysisLimits = analysisLimits; + this.dataDescription = dataDescription; + this.modelPlotConfig = modelPlotConfig; + this.renormalizationWindowDays = renormalizationWindowDays; + this.backgroundPersistInterval = backgroundPersistInterval; + this.modelSnapshotRetentionDays = modelSnapshotRetentionDays; + this.resultsRetentionDays = resultsRetentionDays; + this.customSettings = customSettings; + this.modelSnapshotId = modelSnapshotId; + this.modelSnapshotMinVersion = modelSnapshotMinVersion; + this.resultsIndexName = resultsIndexName; + this.deleted = deleted; + } + + public Job(StreamInput in) throws IOException { + jobId = in.readString(); + jobType = in.readString(); + if (in.getVersion().onOrAfter(Version.V_5_5_0)) { + jobVersion = in.readBoolean() ? Version.readVersion(in) : null; + } else { + jobVersion = null; + } + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + groups = in.readList(StreamInput::readString); + } else { + groups = Collections.emptyList(); + } + description = in.readOptionalString(); + createTime = new Date(in.readVLong()); + finishedTime = in.readBoolean() ? new Date(in.readVLong()) : null; + lastDataTime = in.readBoolean() ? new Date(in.readVLong()) : null; + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + establishedModelMemory = in.readOptionalLong(); + } else { + establishedModelMemory = null; + } + analysisConfig = new AnalysisConfig(in); + analysisLimits = in.readOptionalWriteable(AnalysisLimits::new); + dataDescription = in.readOptionalWriteable(DataDescription::new); + modelPlotConfig = in.readOptionalWriteable(ModelPlotConfig::new); + renormalizationWindowDays = in.readOptionalLong(); + backgroundPersistInterval = in.readOptionalTimeValue(); + modelSnapshotRetentionDays = in.readOptionalLong(); + resultsRetentionDays = in.readOptionalLong(); + customSettings = in.readMap(); + modelSnapshotId = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1) && in.readBoolean()) { + modelSnapshotMinVersion = Version.readVersion(in); + } else { + modelSnapshotMinVersion = null; + } + resultsIndexName = in.readString(); + deleted = in.readBoolean(); + } + + /** + * Return the Job Id. + * + * @return The job Id string + */ + public String getId() { + return jobId; + } + + public String getJobType() { + return jobType; + } + + public Version getJobVersion() { + return jobVersion; + } + + public List getGroups() { + return groups; + } + + /** + * The name of the index storing the job's results and state. + * This defaults to {@link #getId()} if a specific index name is not set. + * @return The job's index name + */ + public String getResultsIndexName() { + return AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + resultsIndexName; + } + + /** + * Private version of getResultsIndexName so that a job can be built from another + * job and pass index name validation + * @return The job's index name, minus prefix + */ + private String getResultsIndexNameNoPrefix() { + return resultsIndexName; + } + + /** + * The job description + * + * @return job description + */ + public String getDescription() { + return description; + } + + /** + * The Job creation time. This name is preferred when serialising to the + * REST API. + * + * @return The date the job was created + */ + public Date getCreateTime() { + return createTime; + } + + /** + * The time the job was finished or null if not finished. + * + * @return The date the job was last retired or null + */ + public Date getFinishedTime() { + return finishedTime; + } + + /** + * The last time data was uploaded to the job or null if no + * data has been seen. + * + * @return The date at which the last data was processed + */ + public Date getLastDataTime() { + return lastDataTime; + } + + /** + * The established model memory of the job, or null if model + * memory has not reached equilibrium yet. + * + * @return The established model memory of the job + */ + public Long getEstablishedModelMemory() { + return establishedModelMemory; + } + + /** + * The analysis configuration object + * + * @return The AnalysisConfig + */ + public AnalysisConfig getAnalysisConfig() { + return analysisConfig; + } + + /** + * The analysis options object + * + * @return The AnalysisLimits + */ + public AnalysisLimits getAnalysisLimits() { + return analysisLimits; + } + + public ModelPlotConfig getModelPlotConfig() { + return modelPlotConfig; + } + + /** + * If not set the input data is assumed to be csv with a '_time' field in + * epoch format. + * + * @return A DataDescription or null + * @see DataDescription + */ + public DataDescription getDataDescription() { + return dataDescription; + } + + /** + * The duration of the renormalization window in days + * + * @return renormalization window in days + */ + public Long getRenormalizationWindowDays() { + return renormalizationWindowDays; + } + + /** + * The background persistence interval + * + * @return background persistence interval + */ + public TimeValue getBackgroundPersistInterval() { + return backgroundPersistInterval; + } + + public Long getModelSnapshotRetentionDays() { + return modelSnapshotRetentionDays; + } + + public Long getResultsRetentionDays() { + return resultsRetentionDays; + } + + public Map getCustomSettings() { + return customSettings; + } + + public String getModelSnapshotId() { + return modelSnapshotId; + } + + public Version getModelSnapshotMinVersion() { + return modelSnapshotMinVersion; + } + + public boolean isDeleted() { + return deleted; + } + + /** + * Get all input data fields mentioned in the job configuration, + * namely analysis fields and the time field. + * + * @return the collection of fields - never null + */ + public Collection allInputFields() { + Set allFields = new TreeSet<>(); + + // analysis fields + if (analysisConfig != null) { + allFields.addAll(analysisConfig.analysisFields()); + } + + // time field + if (dataDescription != null) { + String timeField = dataDescription.getTimeField(); + if (timeField != null) { + allFields.add(timeField); + } + } + + // remove empty strings + allFields.remove(""); + + // the categorisation field isn't an input field + allFields.remove(AnalysisConfig.ML_CATEGORY_FIELD); + + return allFields; + } + + /** + * Make a best estimate of the job's memory footprint using the information available. + * If a job has an established model memory size, then this is the best estimate. + * Otherwise, assume the maximum model memory limit will eventually be required. + * In either case, a fixed overhead is added to account for the memory required by the + * program code and stack. + * @return an estimate of the memory requirement of this job, in bytes + */ + public long estimateMemoryFootprint() { + if (establishedModelMemory != null && establishedModelMemory > 0) { + return establishedModelMemory + PROCESS_MEMORY_OVERHEAD.getBytes(); + } + return ByteSizeUnit.MB.toBytes(analysisLimits.getModelMemoryLimit()) + PROCESS_MEMORY_OVERHEAD.getBytes(); + } + + /** + * Returns the timestamp before which data is not accepted by the job. + * This is the latest record timestamp minus the job latency. + * @param dataCounts the job data counts + * @return the timestamp before which data is not accepted by the job + */ + public long earliestValidTimestamp(DataCounts dataCounts) { + long currentTime = 0; + Date latestRecordTimestamp = dataCounts.getLatestRecordTimeStamp(); + if (latestRecordTimestamp != null) { + TimeValue latency = analysisConfig.getLatency(); + long latencyMillis = latency == null ? 0 : latency.millis(); + currentTime = latestRecordTimestamp.getTime() - latencyMillis; + } + return currentTime; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + out.writeString(jobType); + if (out.getVersion().onOrAfter(Version.V_5_5_0)) { + if (jobVersion != null) { + out.writeBoolean(true); + Version.writeVersion(jobVersion, out); + } else { + out.writeBoolean(false); + } + } + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeStringList(groups); + } + out.writeOptionalString(description); + out.writeVLong(createTime.getTime()); + if (finishedTime != null) { + out.writeBoolean(true); + out.writeVLong(finishedTime.getTime()); + } else { + out.writeBoolean(false); + } + if (lastDataTime != null) { + out.writeBoolean(true); + out.writeVLong(lastDataTime.getTime()); + } else { + out.writeBoolean(false); + } + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeOptionalLong(establishedModelMemory); + } + analysisConfig.writeTo(out); + out.writeOptionalWriteable(analysisLimits); + out.writeOptionalWriteable(dataDescription); + out.writeOptionalWriteable(modelPlotConfig); + out.writeOptionalLong(renormalizationWindowDays); + out.writeOptionalTimeValue(backgroundPersistInterval); + out.writeOptionalLong(modelSnapshotRetentionDays); + out.writeOptionalLong(resultsRetentionDays); + out.writeMap(customSettings); + out.writeOptionalString(modelSnapshotId); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (modelSnapshotMinVersion != null) { + out.writeBoolean(true); + Version.writeVersion(modelSnapshotMinVersion, out); + } else { + out.writeBoolean(false); + } + } + out.writeString(resultsIndexName); + out.writeBoolean(deleted); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + final String humanReadableSuffix = "_string"; + + builder.field(ID.getPreferredName(), jobId); + builder.field(JOB_TYPE.getPreferredName(), jobType); + if (jobVersion != null) { + builder.field(JOB_VERSION.getPreferredName(), jobVersion); + } + if (groups.isEmpty() == false) { + builder.field(GROUPS.getPreferredName(), groups); + } + if (description != null) { + builder.field(DESCRIPTION.getPreferredName(), description); + } + builder.timeField(CREATE_TIME.getPreferredName(), CREATE_TIME.getPreferredName() + humanReadableSuffix, createTime.getTime()); + if (finishedTime != null) { + builder.timeField(FINISHED_TIME.getPreferredName(), FINISHED_TIME.getPreferredName() + humanReadableSuffix, + finishedTime.getTime()); + } + if (lastDataTime != null) { + builder.timeField(LAST_DATA_TIME.getPreferredName(), LAST_DATA_TIME.getPreferredName() + humanReadableSuffix, + lastDataTime.getTime()); + } + if (establishedModelMemory != null) { + builder.field(ESTABLISHED_MODEL_MEMORY.getPreferredName(), establishedModelMemory); + } + builder.field(ANALYSIS_CONFIG.getPreferredName(), analysisConfig, params); + if (analysisLimits != null) { + builder.field(ANALYSIS_LIMITS.getPreferredName(), analysisLimits, params); + } + if (dataDescription != null) { + builder.field(DATA_DESCRIPTION.getPreferredName(), dataDescription, params); + } + if (modelPlotConfig != null) { + builder.field(MODEL_PLOT_CONFIG.getPreferredName(), modelPlotConfig, params); + } + if (renormalizationWindowDays != null) { + builder.field(RENORMALIZATION_WINDOW_DAYS.getPreferredName(), renormalizationWindowDays); + } + if (backgroundPersistInterval != null) { + builder.field(BACKGROUND_PERSIST_INTERVAL.getPreferredName(), backgroundPersistInterval.getStringRep()); + } + if (modelSnapshotRetentionDays != null) { + builder.field(MODEL_SNAPSHOT_RETENTION_DAYS.getPreferredName(), modelSnapshotRetentionDays); + } + if (resultsRetentionDays != null) { + builder.field(RESULTS_RETENTION_DAYS.getPreferredName(), resultsRetentionDays); + } + if (customSettings != null) { + builder.field(CUSTOM_SETTINGS.getPreferredName(), customSettings); + } + if (modelSnapshotId != null) { + builder.field(MODEL_SNAPSHOT_ID.getPreferredName(), modelSnapshotId); + } + if (modelSnapshotMinVersion != null) { + builder.field(MODEL_SNAPSHOT_MIN_VERSION.getPreferredName(), modelSnapshotMinVersion); + } + builder.field(RESULTS_INDEX_NAME.getPreferredName(), resultsIndexName); + if (params.paramAsBoolean("all", false)) { + builder.field(DELETED.getPreferredName(), deleted); + } + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof Job == false) { + return false; + } + + Job that = (Job) other; + return Objects.equals(this.jobId, that.jobId) + && Objects.equals(this.jobType, that.jobType) + && Objects.equals(this.jobVersion, that.jobVersion) + && Objects.equals(this.groups, that.groups) + && Objects.equals(this.description, that.description) + && Objects.equals(this.createTime, that.createTime) + && Objects.equals(this.finishedTime, that.finishedTime) + && Objects.equals(this.lastDataTime, that.lastDataTime) + && Objects.equals(this.establishedModelMemory, that.establishedModelMemory) + && Objects.equals(this.analysisConfig, that.analysisConfig) + && Objects.equals(this.analysisLimits, that.analysisLimits) && Objects.equals(this.dataDescription, that.dataDescription) + && Objects.equals(this.modelPlotConfig, that.modelPlotConfig) + && Objects.equals(this.renormalizationWindowDays, that.renormalizationWindowDays) + && Objects.equals(this.backgroundPersistInterval, that.backgroundPersistInterval) + && Objects.equals(this.modelSnapshotRetentionDays, that.modelSnapshotRetentionDays) + && Objects.equals(this.resultsRetentionDays, that.resultsRetentionDays) + && Objects.equals(this.customSettings, that.customSettings) + && Objects.equals(this.modelSnapshotId, that.modelSnapshotId) + && Objects.equals(this.modelSnapshotMinVersion, that.modelSnapshotMinVersion) + && Objects.equals(this.resultsIndexName, that.resultsIndexName) + && Objects.equals(this.deleted, that.deleted); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, jobType, jobVersion, groups, description, createTime, finishedTime, lastDataTime, establishedModelMemory, + analysisConfig, analysisLimits, dataDescription, modelPlotConfig, renormalizationWindowDays, + backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, customSettings, + modelSnapshotId, modelSnapshotMinVersion, resultsIndexName, deleted); + } + + // Class already extends from AbstractDiffable, so copied from ToXContentToBytes#toString() + @Override + public final String toString() { + return Strings.toString(this); + } + + private static void checkValueNotLessThan(long minVal, String name, Long value) { + if (value != null && value < minVal) { + throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, name, minVal, value)); + } + } + + /** + * Returns the job types that are compatible with a node running on {@code nodeVersion} + * @param nodeVersion the version of the node + * @return the compatible job types + */ + public static Set getCompatibleJobTypes(Version nodeVersion) { + Set compatibleTypes = new HashSet<>(); + if (nodeVersion.onOrAfter(Version.V_5_4_0)) { + compatibleTypes.add(ANOMALY_DETECTOR_JOB_TYPE); + } + return compatibleTypes; + } + + public static class Builder implements Writeable, ToXContentObject { + + private String id; + private String jobType = ANOMALY_DETECTOR_JOB_TYPE; + private Version jobVersion; + private List groups = Collections.emptyList(); + private String description; + private AnalysisConfig analysisConfig; + private AnalysisLimits analysisLimits; + private DataDescription dataDescription; + private Date createTime; + private Date finishedTime; + private Date lastDataTime; + private Long establishedModelMemory; + private ModelPlotConfig modelPlotConfig; + private Long renormalizationWindowDays; + private TimeValue backgroundPersistInterval; + private Long modelSnapshotRetentionDays = DEFAULT_MODEL_SNAPSHOT_RETENTION_DAYS; + private Long resultsRetentionDays; + private Map customSettings; + private String modelSnapshotId; + private Version modelSnapshotMinVersion; + private String resultsIndexName; + private boolean deleted; + + public Builder() { + } + + public Builder(String id) { + this.id = id; + } + + public Builder(Job job) { + this.id = job.getId(); + this.jobType = job.getJobType(); + this.jobVersion = job.getJobVersion(); + this.groups = job.getGroups(); + this.description = job.getDescription(); + this.analysisConfig = job.getAnalysisConfig(); + this.analysisLimits = job.getAnalysisLimits(); + this.dataDescription = job.getDataDescription(); + this.createTime = job.getCreateTime(); + this.finishedTime = job.getFinishedTime(); + this.lastDataTime = job.getLastDataTime(); + this.establishedModelMemory = job.getEstablishedModelMemory(); + this.modelPlotConfig = job.getModelPlotConfig(); + this.renormalizationWindowDays = job.getRenormalizationWindowDays(); + this.backgroundPersistInterval = job.getBackgroundPersistInterval(); + this.modelSnapshotRetentionDays = job.getModelSnapshotRetentionDays(); + this.resultsRetentionDays = job.getResultsRetentionDays(); + this.customSettings = job.getCustomSettings(); + this.modelSnapshotId = job.getModelSnapshotId(); + this.modelSnapshotMinVersion = job.getModelSnapshotMinVersion(); + this.resultsIndexName = job.getResultsIndexNameNoPrefix(); + this.deleted = job.isDeleted(); + } + + public Builder(StreamInput in) throws IOException { + id = in.readOptionalString(); + jobType = in.readString(); + if (in.getVersion().onOrAfter(Version.V_5_5_0)) { + jobVersion = in.readBoolean() ? Version.readVersion(in) : null; + } + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + groups = in.readList(StreamInput::readString); + } else { + groups = Collections.emptyList(); + } + description = in.readOptionalString(); + createTime = in.readBoolean() ? new Date(in.readVLong()) : null; + finishedTime = in.readBoolean() ? new Date(in.readVLong()) : null; + lastDataTime = in.readBoolean() ? new Date(in.readVLong()) : null; + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + establishedModelMemory = in.readOptionalLong(); + } + analysisConfig = in.readOptionalWriteable(AnalysisConfig::new); + analysisLimits = in.readOptionalWriteable(AnalysisLimits::new); + dataDescription = in.readOptionalWriteable(DataDescription::new); + modelPlotConfig = in.readOptionalWriteable(ModelPlotConfig::new); + renormalizationWindowDays = in.readOptionalLong(); + backgroundPersistInterval = in.readOptionalTimeValue(); + modelSnapshotRetentionDays = in.readOptionalLong(); + resultsRetentionDays = in.readOptionalLong(); + customSettings = in.readMap(); + modelSnapshotId = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1) && in.readBoolean()) { + modelSnapshotMinVersion = Version.readVersion(in); + } else { + modelSnapshotMinVersion = null; + } + resultsIndexName = in.readOptionalString(); + deleted = in.readBoolean(); + } + + public Builder setId(String id) { + this.id = id; + return this; + } + + public String getId() { + return id; + } + + public void setJobVersion(Version jobVersion) { + this.jobVersion = jobVersion; + } + + private void setJobVersion(String jobVersion) { + this.jobVersion = Version.fromString(jobVersion); + } + + private void setJobType(String jobType) { + this.jobType = jobType; + } + + public void setGroups(List groups) { + this.groups = groups == null ? Collections.emptyList() : groups; + } + + public Builder setCustomSettings(Map customSettings) { + this.customSettings = customSettings; + return this; + } + + public Builder setDescription(String description) { + this.description = description; + return this; + } + + public Builder setAnalysisConfig(AnalysisConfig.Builder configBuilder) { + analysisConfig = ExceptionsHelper.requireNonNull(configBuilder, ANALYSIS_CONFIG.getPreferredName()).build(); + return this; + } + + public AnalysisLimits getAnalysisLimits() { + return analysisLimits; + } + + public Builder setAnalysisLimits(AnalysisLimits analysisLimits) { + this.analysisLimits = ExceptionsHelper.requireNonNull(analysisLimits, ANALYSIS_LIMITS.getPreferredName()); + return this; + } + + public Builder setCreateTime(Date createTime) { + this.createTime = createTime; + return this; + } + + public Builder setFinishedTime(Date finishedTime) { + this.finishedTime = finishedTime; + return this; + } + + /** + * Set the wall clock time of the last data upload + * @param lastDataTime Wall clock time + */ + public Builder setLastDataTime(Date lastDataTime) { + this.lastDataTime = lastDataTime; + return this; + } + + public Builder setEstablishedModelMemory(Long establishedModelMemory) { + this.establishedModelMemory = establishedModelMemory; + return this; + } + + public Builder setDataDescription(DataDescription.Builder description) { + dataDescription = ExceptionsHelper.requireNonNull(description, DATA_DESCRIPTION.getPreferredName()).build(); + return this; + } + + public Builder setModelPlotConfig(ModelPlotConfig modelPlotConfig) { + this.modelPlotConfig = modelPlotConfig; + return this; + } + + public Builder setBackgroundPersistInterval(TimeValue backgroundPersistInterval) { + this.backgroundPersistInterval = backgroundPersistInterval; + return this; + } + + public Builder setRenormalizationWindowDays(Long renormalizationWindowDays) { + this.renormalizationWindowDays = renormalizationWindowDays; + return this; + } + + public Builder setModelSnapshotRetentionDays(Long modelSnapshotRetentionDays) { + this.modelSnapshotRetentionDays = modelSnapshotRetentionDays; + return this; + } + + public Builder setResultsRetentionDays(Long resultsRetentionDays) { + this.resultsRetentionDays = resultsRetentionDays; + return this; + } + + public Builder setModelSnapshotId(String modelSnapshotId) { + this.modelSnapshotId = modelSnapshotId; + return this; + } + + public Builder setModelSnapshotMinVersion(Version modelSnapshotMinVersion) { + this.modelSnapshotMinVersion = modelSnapshotMinVersion; + return this; + } + + public Builder setModelSnapshotMinVersion(String modelSnapshotMinVersion) { + this.modelSnapshotMinVersion = Version.fromString(modelSnapshotMinVersion); + return this; + } + + public Builder setResultsIndexName(String resultsIndexName) { + this.resultsIndexName = resultsIndexName; + return this; + } + + public Builder setDeleted(boolean deleted) { + this.deleted = deleted; + return this; + } + + /** + * Return the list of fields that have been set and are invalid to + * be set when the job is created e.g. model snapshot Id should not + * be set at job creation. + * @return List of fields set fields that should not be. + */ + public List invalidCreateTimeSettings() { + List invalidCreateValues = new ArrayList<>(); + if (modelSnapshotId != null) { + invalidCreateValues.add(MODEL_SNAPSHOT_ID.getPreferredName()); + } + if (lastDataTime != null) { + invalidCreateValues.add(LAST_DATA_TIME.getPreferredName()); + } + if (finishedTime != null) { + invalidCreateValues.add(FINISHED_TIME.getPreferredName()); + } + if (createTime != null) { + invalidCreateValues.add(CREATE_TIME.getPreferredName()); + } + return invalidCreateValues; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(id); + out.writeString(jobType); + if (out.getVersion().onOrAfter(Version.V_5_5_0)) { + if (jobVersion != null) { + out.writeBoolean(true); + Version.writeVersion(jobVersion, out); + } else { + out.writeBoolean(false); + } + } + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeStringList(groups); + } + out.writeOptionalString(description); + if (createTime != null) { + out.writeBoolean(true); + out.writeVLong(createTime.getTime()); + } else { + out.writeBoolean(false); + } + if (finishedTime != null) { + out.writeBoolean(true); + out.writeVLong(finishedTime.getTime()); + } else { + out.writeBoolean(false); + } + if (lastDataTime != null) { + out.writeBoolean(true); + out.writeVLong(lastDataTime.getTime()); + } else { + out.writeBoolean(false); + } + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeOptionalLong(establishedModelMemory); + } + out.writeOptionalWriteable(analysisConfig); + out.writeOptionalWriteable(analysisLimits); + out.writeOptionalWriteable(dataDescription); + out.writeOptionalWriteable(modelPlotConfig); + out.writeOptionalLong(renormalizationWindowDays); + out.writeOptionalTimeValue(backgroundPersistInterval); + out.writeOptionalLong(modelSnapshotRetentionDays); + out.writeOptionalLong(resultsRetentionDays); + out.writeMap(customSettings); + out.writeOptionalString(modelSnapshotId); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (modelSnapshotMinVersion != null) { + out.writeBoolean(true); + Version.writeVersion(modelSnapshotMinVersion, out); + } else { + out.writeBoolean(false); + } + } + out.writeOptionalString(resultsIndexName); + out.writeBoolean(deleted); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (id != null) { + builder.field(ID.getPreferredName(), id); + } + builder.field(JOB_TYPE.getPreferredName(), jobType); + if (jobVersion != null) { + builder.field(JOB_VERSION.getPreferredName(), jobVersion); + } + if (description != null) { + builder.field(DESCRIPTION.getPreferredName(), description); + } + if (createTime != null) { + builder.field(CREATE_TIME.getPreferredName(), createTime.getTime()); + } + if (finishedTime != null) { + builder.field(FINISHED_TIME.getPreferredName(), finishedTime.getTime()); + } + if (lastDataTime != null) { + builder.field(LAST_DATA_TIME.getPreferredName(), lastDataTime.getTime()); + } + if (establishedModelMemory != null) { + builder.field(ESTABLISHED_MODEL_MEMORY.getPreferredName(), establishedModelMemory); + } + if (analysisConfig != null) { + builder.field(ANALYSIS_CONFIG.getPreferredName(), analysisConfig, params); + } + if (analysisLimits != null) { + builder.field(ANALYSIS_LIMITS.getPreferredName(), analysisLimits, params); + } + if (dataDescription != null) { + builder.field(DATA_DESCRIPTION.getPreferredName(), dataDescription, params); + } + if (modelPlotConfig != null) { + builder.field(MODEL_PLOT_CONFIG.getPreferredName(), modelPlotConfig, params); + } + if (renormalizationWindowDays != null) { + builder.field(RENORMALIZATION_WINDOW_DAYS.getPreferredName(), renormalizationWindowDays); + } + if (backgroundPersistInterval != null) { + builder.field(BACKGROUND_PERSIST_INTERVAL.getPreferredName(), backgroundPersistInterval.getStringRep()); + } + if (modelSnapshotRetentionDays != null) { + builder.field(MODEL_SNAPSHOT_RETENTION_DAYS.getPreferredName(), modelSnapshotRetentionDays); + } + if (resultsRetentionDays != null) { + builder.field(RESULTS_RETENTION_DAYS.getPreferredName(), resultsRetentionDays); + } + if (customSettings != null) { + builder.field(CUSTOM_SETTINGS.getPreferredName(), customSettings); + } + if (modelSnapshotId != null) { + builder.field(MODEL_SNAPSHOT_ID.getPreferredName(), modelSnapshotId); + } + if (modelSnapshotMinVersion != null) { + builder.field(MODEL_SNAPSHOT_MIN_VERSION.getPreferredName(), modelSnapshotMinVersion); + } + if (resultsIndexName != null) { + builder.field(RESULTS_INDEX_NAME.getPreferredName(), resultsIndexName); + } + if (params.paramAsBoolean("all", false)) { + builder.field(DELETED.getPreferredName(), deleted); + } + + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Job.Builder that = (Job.Builder) o; + return Objects.equals(this.id, that.id) + && Objects.equals(this.jobType, that.jobType) + && Objects.equals(this.jobVersion, that.jobVersion) + && Objects.equals(this.description, that.description) + && Objects.equals(this.analysisConfig, that.analysisConfig) + && Objects.equals(this.analysisLimits, that.analysisLimits) + && Objects.equals(this.dataDescription, that.dataDescription) + && Objects.equals(this.createTime, that.createTime) + && Objects.equals(this.finishedTime, that.finishedTime) + && Objects.equals(this.lastDataTime, that.lastDataTime) + && Objects.equals(this.establishedModelMemory, that.establishedModelMemory) + && Objects.equals(this.modelPlotConfig, that.modelPlotConfig) + && Objects.equals(this.renormalizationWindowDays, that.renormalizationWindowDays) + && Objects.equals(this.backgroundPersistInterval, that.backgroundPersistInterval) + && Objects.equals(this.modelSnapshotRetentionDays, that.modelSnapshotRetentionDays) + && Objects.equals(this.resultsRetentionDays, that.resultsRetentionDays) + && Objects.equals(this.customSettings, that.customSettings) + && Objects.equals(this.modelSnapshotId, that.modelSnapshotId) + && Objects.equals(this.modelSnapshotMinVersion, that.modelSnapshotMinVersion) + && Objects.equals(this.resultsIndexName, that.resultsIndexName) + && Objects.equals(this.deleted, that.deleted); + } + + @Override + public int hashCode() { + return Objects.hash(id, jobType, jobVersion, description, analysisConfig, analysisLimits, dataDescription, createTime, + finishedTime, lastDataTime, establishedModelMemory, modelPlotConfig, renormalizationWindowDays, + backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, customSettings, modelSnapshotId, + modelSnapshotMinVersion, resultsIndexName, deleted); + } + + /** + * Call this method to validate that the job JSON provided by a user is valid. + * Throws an exception if there are any problems; normal return implies valid. + */ + public void validateInputFields() { + + if (analysisConfig == null) { + throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_MISSING_ANALYSISCONFIG)); + } + + if (dataDescription == null) { + throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_MISSING_DATA_DESCRIPTION)); + } + + checkTimeFieldNotInAnalysisConfig(dataDescription, analysisConfig); + + checkValidBackgroundPersistInterval(); + checkValueNotLessThan(0, RENORMALIZATION_WINDOW_DAYS.getPreferredName(), renormalizationWindowDays); + checkValueNotLessThan(0, MODEL_SNAPSHOT_RETENTION_DAYS.getPreferredName(), modelSnapshotRetentionDays); + checkValueNotLessThan(0, RESULTS_RETENTION_DAYS.getPreferredName(), resultsRetentionDays); + + if (!MlStrings.isValidId(id)) { + throw new IllegalArgumentException(Messages.getMessage(Messages.INVALID_ID, ID.getPreferredName(), id)); + } + if (!MlStrings.hasValidLengthForId(id)) { + throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_ID_TOO_LONG, MlStrings.ID_LENGTH_LIMIT)); + } + + validateGroups(); + + // Results index name not specified in user input means use the default, so is acceptable in this validation + if (!Strings.isNullOrEmpty(resultsIndexName) && !MlStrings.isValidId(resultsIndexName)) { + throw new IllegalArgumentException( + Messages.getMessage(Messages.INVALID_ID, RESULTS_INDEX_NAME.getPreferredName(), resultsIndexName)); + } + + // Creation time is NOT required in user input, hence validated only on build + } + + /** + * This is meant to be called when a new job is created. + * It will optionally validate the model memory limit against the max limit + * and it will set the current version defaults to missing values. + */ + public void validateAnalysisLimitsAndSetDefaults(@Nullable ByteSizeValue maxModelMemoryLimit) { + analysisLimits = AnalysisLimits.validateAndSetDefaults(analysisLimits, maxModelMemoryLimit, + AnalysisLimits.DEFAULT_MODEL_MEMORY_LIMIT_MB); + } + + /** + * Validate the char filter/tokenizer/token filter names used in the categorization analyzer config (if any). + * The overall structure can be validated at parse time, but the exact names need to be checked separately, + * as plugins that provide the functionality can be installed/uninstalled. + */ + public void validateCategorizationAnalyzer(AnalysisRegistry analysisRegistry, Environment environment) throws IOException { + CategorizationAnalyzerConfig categorizationAnalyzerConfig = analysisConfig.getCategorizationAnalyzerConfig(); + if (categorizationAnalyzerConfig != null) { + new CategorizationAnalyzerConfig.Builder(categorizationAnalyzerConfig).verify(analysisRegistry, environment); + } + } + + private void validateGroups() { + for (String group : this.groups) { + if (MlStrings.isValidId(group) == false) { + throw new IllegalArgumentException(Messages.getMessage(Messages.INVALID_GROUP, group)); + } + } + } + + /** + * Builds a job with the given {@code createTime} and the current version. + * This should be used when a new job is created as opposed to {@link #build()}. + * + * @param createTime The time this job was created + * @return The job + */ + public Job build(Date createTime) { + setCreateTime(createTime); + setJobVersion(Version.CURRENT); + // TODO: Maybe we _could_ accept a value for this supplied at create time - it would + // mean cloned jobs that hadn't been edited much would start with an accurate expected size. + // But on the other hand it would mean jobs that were cloned and then completely changed + // would start with a size that was completely wrong. + setEstablishedModelMemory(null); + return build(); + } + + /** + * Builds a job. + * This should be used when an existing job is being built + * as opposed to {@link #build(Date)}. + * + * @return The job + */ + public Job build() { + + // If at the build stage there are missing values from analysis limits, + // it means we are reading a pre 6.3 job. Since 6.1, the model_memory_limit + // is always populated. So, if the value is missing, we fill with the pre 6.1 + // default. We do not need to check against the max limit here so we pass null. + analysisLimits = AnalysisLimits.validateAndSetDefaults(analysisLimits, null, + AnalysisLimits.PRE_6_1_DEFAULT_MODEL_MEMORY_LIMIT_MB); + + validateInputFields(); + + // Creation time is NOT required in user input, hence validated only on build + ExceptionsHelper.requireNonNull(createTime, CREATE_TIME.getPreferredName()); + + if (Strings.isNullOrEmpty(resultsIndexName)) { + resultsIndexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + } else if (!resultsIndexName.equals(AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT)) { + // User-defined names are prepended with "custom" + // Conditional guards against multiple prepending due to updates instead of first creation + resultsIndexName = resultsIndexName.startsWith("custom-") + ? resultsIndexName + : "custom-" + resultsIndexName; + } + + return new Job( + id, jobType, jobVersion, groups, description, createTime, finishedTime, lastDataTime, establishedModelMemory, + analysisConfig, analysisLimits, dataDescription, modelPlotConfig, renormalizationWindowDays, + backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, customSettings, + modelSnapshotId, modelSnapshotMinVersion, resultsIndexName, deleted); + } + + private void checkValidBackgroundPersistInterval() { + if (backgroundPersistInterval != null) { + TimeUtils.checkMultiple(backgroundPersistInterval, TimeUnit.SECONDS, BACKGROUND_PERSIST_INTERVAL); + checkValueNotLessThan(MIN_BACKGROUND_PERSIST_INTERVAL.getSeconds(), BACKGROUND_PERSIST_INTERVAL.getPreferredName(), + backgroundPersistInterval.getSeconds()); + } + } + + static void checkTimeFieldNotInAnalysisConfig(DataDescription dataDescription, AnalysisConfig analysisConfig) { + if (analysisConfig.analysisFields().contains(dataDescription.getTimeField())) { + throw new IllegalArgumentException(Messages.getMessage(Messages.JOB_CONFIG_TIME_FIELD_NOT_ALLOWED_IN_ANALYSIS_CONFIG)); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java new file mode 100644 index 0000000000000..e89149a062b68 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobState.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Locale; + +/** + * Jobs whether running or complete are in one of these states. + * When a job is created it is initialised in to the state closed + * i.e. it is not running. + */ +public enum JobState implements Writeable { + + CLOSING, CLOSED, OPENED, FAILED, OPENING; + + public static JobState fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + public static JobState fromStream(StreamInput in) throws IOException { + return in.readEnum(JobState.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + JobState state = this; + // Pre v5.5 the OPENING state didn't exist + if (this == OPENING && out.getVersion().before(Version.V_5_5_0)) { + state = CLOSED; + } + out.writeEnum(state); + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } + + + /** + * @return {@code true} if state matches any of the given {@code candidates} + */ + public boolean isAnyOf(JobState... candidates) { + return Arrays.stream(candidates).anyMatch(candidate -> this == candidate); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskStatus.java new file mode 100644 index 0000000000000..de102798d1ca6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskStatus.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class JobTaskStatus implements Task.Status { + + public static final String NAME = OpenJobAction.TASK_NAME; + + private static ParseField STATE = new ParseField("state"); + private static ParseField ALLOCATION_ID = new ParseField("allocation_id"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME, + args -> new JobTaskStatus((JobState) args[0], (Long) args[1])); + + static { + PARSER.declareField(constructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return JobState.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, STATE, ObjectParser.ValueType.STRING); + PARSER.declareLong(constructorArg(), ALLOCATION_ID); + } + + public static JobTaskStatus fromXContent(XContentParser parser) { + try { + return PARSER.parse(parser, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private final JobState state; + private final long allocationId; + + public JobTaskStatus(JobState state, long allocationId) { + this.state = Objects.requireNonNull(state); + this.allocationId = allocationId; + } + + public JobTaskStatus(StreamInput in) throws IOException { + state = JobState.fromStream(in); + allocationId = in.readLong(); + } + + public JobState getState() { + return state; + } + + public boolean isStatusStale(PersistentTask task) { + return allocationId != task.getAllocationId(); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + state.writeTo(out); + out.writeLong(allocationId); + } + + @Override + public boolean isFragment() { + return false; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(STATE.getPreferredName(), state.value()); + builder.field(ALLOCATION_ID.getPreferredName(), allocationId); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + JobTaskStatus that = (JobTaskStatus) o; + return state == that.state && + Objects.equals(allocationId, that.allocationId); + } + + @Override + public int hashCode() { + return Objects.hash(state, allocationId); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java new file mode 100644 index 0000000000000..8644254b92162 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -0,0 +1,649 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; + +public class JobUpdate implements Writeable, ToXContentObject { + public static final ParseField DETECTORS = new ParseField("detectors"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "job_update", args -> new Builder((String) args[0])); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), Job.ID); + PARSER.declareStringArray(Builder::setGroups, Job.GROUPS); + PARSER.declareStringOrNull(Builder::setDescription, Job.DESCRIPTION); + PARSER.declareObjectArray(Builder::setDetectorUpdates, DetectorUpdate.PARSER, DETECTORS); + PARSER.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.CONFIG_PARSER, Job.MODEL_PLOT_CONFIG); + PARSER.declareObject(Builder::setAnalysisLimits, AnalysisLimits.CONFIG_PARSER, Job.ANALYSIS_LIMITS); + PARSER.declareString((builder, val) -> builder.setBackgroundPersistInterval( + TimeValue.parseTimeValue(val, Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName())), Job.BACKGROUND_PERSIST_INTERVAL); + PARSER.declareLong(Builder::setRenormalizationWindowDays, Job.RENORMALIZATION_WINDOW_DAYS); + PARSER.declareLong(Builder::setResultsRetentionDays, Job.RESULTS_RETENTION_DAYS); + PARSER.declareLong(Builder::setModelSnapshotRetentionDays, Job.MODEL_SNAPSHOT_RETENTION_DAYS); + PARSER.declareStringArray(Builder::setCategorizationFilters, AnalysisConfig.CATEGORIZATION_FILTERS); + PARSER.declareField(Builder::setCustomSettings, (p, c) -> p.map(), Job.CUSTOM_SETTINGS, ObjectParser.ValueType.OBJECT); + PARSER.declareString(Builder::setModelSnapshotId, Job.MODEL_SNAPSHOT_ID); + PARSER.declareString(Builder::setModelSnapshotMinVersion, Job.MODEL_SNAPSHOT_MIN_VERSION); + PARSER.declareLong(Builder::setEstablishedModelMemory, Job.ESTABLISHED_MODEL_MEMORY); + } + + private final String jobId; + private final List groups; + private final String description; + private final List detectorUpdates; + private final ModelPlotConfig modelPlotConfig; + private final AnalysisLimits analysisLimits; + private final Long renormalizationWindowDays; + private final TimeValue backgroundPersistInterval; + private final Long modelSnapshotRetentionDays; + private final Long resultsRetentionDays; + private final List categorizationFilters; + private final Map customSettings; + private final String modelSnapshotId; + private final Version modelSnapshotMinVersion; + private final Long establishedModelMemory; + + private JobUpdate(String jobId, @Nullable List groups, @Nullable String description, + @Nullable List detectorUpdates, @Nullable ModelPlotConfig modelPlotConfig, + @Nullable AnalysisLimits analysisLimits, @Nullable TimeValue backgroundPersistInterval, + @Nullable Long renormalizationWindowDays, @Nullable Long resultsRetentionDays, + @Nullable Long modelSnapshotRetentionDays, @Nullable List categorisationFilters, + @Nullable Map customSettings, @Nullable String modelSnapshotId, + @Nullable Version modelSnapshotMinVersion, @Nullable Long establishedModelMemory) { + this.jobId = jobId; + this.groups = groups; + this.description = description; + this.detectorUpdates = detectorUpdates; + this.modelPlotConfig = modelPlotConfig; + this.analysisLimits = analysisLimits; + this.renormalizationWindowDays = renormalizationWindowDays; + this.backgroundPersistInterval = backgroundPersistInterval; + this.modelSnapshotRetentionDays = modelSnapshotRetentionDays; + this.resultsRetentionDays = resultsRetentionDays; + this.categorizationFilters = categorisationFilters; + this.customSettings = customSettings; + this.modelSnapshotId = modelSnapshotId; + this.modelSnapshotMinVersion = modelSnapshotMinVersion; + this.establishedModelMemory = establishedModelMemory; + } + + public JobUpdate(StreamInput in) throws IOException { + jobId = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + String[] groupsArray = in.readOptionalStringArray(); + groups = groupsArray == null ? null : Arrays.asList(groupsArray); + } else { + groups = null; + } + description = in.readOptionalString(); + if (in.readBoolean()) { + detectorUpdates = in.readList(DetectorUpdate::new); + } else { + detectorUpdates = null; + } + modelPlotConfig = in.readOptionalWriteable(ModelPlotConfig::new); + analysisLimits = in.readOptionalWriteable(AnalysisLimits::new); + renormalizationWindowDays = in.readOptionalLong(); + backgroundPersistInterval = in.readOptionalTimeValue(); + modelSnapshotRetentionDays = in.readOptionalLong(); + resultsRetentionDays = in.readOptionalLong(); + if (in.readBoolean()) { + categorizationFilters = in.readList(StreamInput::readString); + } else { + categorizationFilters = null; + } + customSettings = in.readMap(); + modelSnapshotId = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.readBoolean()) { + modelSnapshotMinVersion = Version.readVersion(in); + } else { + modelSnapshotMinVersion = null; + } + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + establishedModelMemory = in.readOptionalLong(); + } else { + establishedModelMemory = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + String[] groupsArray = groups == null ? null : groups.toArray(new String[groups.size()]); + out.writeOptionalStringArray(groupsArray); + } + out.writeOptionalString(description); + out.writeBoolean(detectorUpdates != null); + if (detectorUpdates != null) { + out.writeList(detectorUpdates); + } + out.writeOptionalWriteable(modelPlotConfig); + out.writeOptionalWriteable(analysisLimits); + out.writeOptionalLong(renormalizationWindowDays); + out.writeOptionalTimeValue(backgroundPersistInterval); + out.writeOptionalLong(modelSnapshotRetentionDays); + out.writeOptionalLong(resultsRetentionDays); + out.writeBoolean(categorizationFilters != null); + if (categorizationFilters != null) { + out.writeStringList(categorizationFilters); + } + out.writeMap(customSettings); + out.writeOptionalString(modelSnapshotId); + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + if (modelSnapshotMinVersion != null) { + out.writeBoolean(true); + Version.writeVersion(modelSnapshotMinVersion, out); + } else { + out.writeBoolean(false); + } + } + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeOptionalLong(establishedModelMemory); + } + } + + public String getJobId() { + return jobId; + } + + public List getGroups() { + return groups; + } + + public String getDescription() { + return description; + } + + public List getDetectorUpdates() { + return detectorUpdates; + } + + public ModelPlotConfig getModelPlotConfig() { + return modelPlotConfig; + } + + public AnalysisLimits getAnalysisLimits() { + return analysisLimits; + } + + public Long getRenormalizationWindowDays() { + return renormalizationWindowDays; + } + + public TimeValue getBackgroundPersistInterval() { + return backgroundPersistInterval; + } + + public Long getModelSnapshotRetentionDays() { + return modelSnapshotRetentionDays; + } + + public Long getResultsRetentionDays() { + return resultsRetentionDays; + } + + public List getCategorizationFilters() { + return categorizationFilters; + } + + public Map getCustomSettings() { + return customSettings; + } + + public String getModelSnapshotId() { + return modelSnapshotId; + } + + public Version getModelSnapshotMinVersion() { + return modelSnapshotMinVersion; + } + + public Long getEstablishedModelMemory() { + return establishedModelMemory; + } + + public boolean isAutodetectProcessUpdate() { + return modelPlotConfig != null || detectorUpdates != null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (groups != null) { + builder.field(Job.GROUPS.getPreferredName(), groups); + } + if (description != null) { + builder.field(Job.DESCRIPTION.getPreferredName(), description); + } + if (detectorUpdates != null) { + builder.field(DETECTORS.getPreferredName(), detectorUpdates); + } + if (modelPlotConfig != null) { + builder.field(Job.MODEL_PLOT_CONFIG.getPreferredName(), modelPlotConfig); + } + if (analysisLimits != null) { + builder.field(Job.ANALYSIS_LIMITS.getPreferredName(), analysisLimits); + } + if (renormalizationWindowDays != null) { + builder.field(Job.RENORMALIZATION_WINDOW_DAYS.getPreferredName(), renormalizationWindowDays); + } + if (backgroundPersistInterval != null) { + builder.field(Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName(), backgroundPersistInterval); + } + if (modelSnapshotRetentionDays != null) { + builder.field(Job.MODEL_SNAPSHOT_RETENTION_DAYS.getPreferredName(), modelSnapshotRetentionDays); + } + if (resultsRetentionDays != null) { + builder.field(Job.RESULTS_RETENTION_DAYS.getPreferredName(), resultsRetentionDays); + } + if (categorizationFilters != null) { + builder.field(AnalysisConfig.CATEGORIZATION_FILTERS.getPreferredName(), categorizationFilters); + } + if (customSettings != null) { + builder.field(Job.CUSTOM_SETTINGS.getPreferredName(), customSettings); + } + if (modelSnapshotId != null) { + builder.field(Job.MODEL_SNAPSHOT_ID.getPreferredName(), modelSnapshotId); + } + if (modelSnapshotMinVersion != null) { + builder.field(Job.MODEL_SNAPSHOT_MIN_VERSION.getPreferredName(), modelSnapshotMinVersion); + } + if (establishedModelMemory != null) { + builder.field(Job.ESTABLISHED_MODEL_MEMORY.getPreferredName(), establishedModelMemory); + } + builder.endObject(); + return builder; + } + + public Set getUpdateFields() { + Set updateFields = new TreeSet<>(); + if (groups != null) { + updateFields.add(Job.GROUPS.getPreferredName()); + } + if (description != null) { + updateFields.add(Job.DESCRIPTION.getPreferredName()); + } + if (detectorUpdates != null) { + updateFields.add(DETECTORS.getPreferredName()); + } + if (modelPlotConfig != null) { + updateFields.add(Job.MODEL_PLOT_CONFIG.getPreferredName()); + } + if (analysisLimits != null) { + updateFields.add(Job.ANALYSIS_LIMITS.getPreferredName()); + } + if (renormalizationWindowDays != null) { + updateFields.add(Job.RENORMALIZATION_WINDOW_DAYS.getPreferredName()); + } + if (backgroundPersistInterval != null) { + updateFields.add(Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName()); + } + if (modelSnapshotRetentionDays != null) { + updateFields.add(Job.MODEL_SNAPSHOT_RETENTION_DAYS.getPreferredName()); + } + if (resultsRetentionDays != null) { + updateFields.add(Job.RESULTS_RETENTION_DAYS.getPreferredName()); + } + if (categorizationFilters != null) { + updateFields.add(AnalysisConfig.CATEGORIZATION_FILTERS.getPreferredName()); + } + if (customSettings != null) { + updateFields.add(Job.CUSTOM_SETTINGS.getPreferredName()); + } + if (modelSnapshotId != null) { + updateFields.add(Job.MODEL_SNAPSHOT_ID.getPreferredName()); + } + if (modelSnapshotMinVersion != null) { + updateFields.add(Job.MODEL_SNAPSHOT_MIN_VERSION.getPreferredName()); + } + if (establishedModelMemory != null) { + updateFields.add(Job.ESTABLISHED_MODEL_MEMORY.getPreferredName()); + } + return updateFields; + } + + /** + * Updates {@code source} with the new values in this object returning a new {@link Job}. + * + * @param source Source job to be updated + * @param maxModelMemoryLimit The maximum model memory allowed + * @return A new job equivalent to {@code source} updated. + */ + public Job mergeWithJob(Job source, ByteSizeValue maxModelMemoryLimit) { + Job.Builder builder = new Job.Builder(source); + if (groups != null) { + builder.setGroups(groups); + } + if (description != null) { + builder.setDescription(description); + } + if (detectorUpdates != null && detectorUpdates.isEmpty() == false) { + AnalysisConfig ac = source.getAnalysisConfig(); + int numDetectors = ac.getDetectors().size(); + for (DetectorUpdate dd : detectorUpdates) { + if (dd.getDetectorIndex() >= numDetectors) { + throw ExceptionsHelper.badRequestException("Supplied detector_index [{}] is >= the number of detectors [{}]", + dd.getDetectorIndex(), numDetectors); + } + + Detector.Builder detectorbuilder = new Detector.Builder(ac.getDetectors().get(dd.getDetectorIndex())); + if (dd.getDescription() != null) { + detectorbuilder.setDetectorDescription(dd.getDescription()); + } + if (dd.getRules() != null) { + detectorbuilder.setRules(dd.getRules()); + } + ac.getDetectors().set(dd.getDetectorIndex(), detectorbuilder.build()); + } + + AnalysisConfig.Builder acBuilder = new AnalysisConfig.Builder(ac); + builder.setAnalysisConfig(acBuilder); + } + if (modelPlotConfig != null) { + builder.setModelPlotConfig(modelPlotConfig); + } + if (analysisLimits != null) { + AnalysisLimits validatedLimits = AnalysisLimits.validateAndSetDefaults(analysisLimits, maxModelMemoryLimit, + AnalysisLimits.DEFAULT_MODEL_MEMORY_LIMIT_MB); + builder.setAnalysisLimits(validatedLimits); + } + if (renormalizationWindowDays != null) { + builder.setRenormalizationWindowDays(renormalizationWindowDays); + } + if (backgroundPersistInterval != null) { + builder.setBackgroundPersistInterval(backgroundPersistInterval); + } + if (modelSnapshotRetentionDays != null) { + builder.setModelSnapshotRetentionDays(modelSnapshotRetentionDays); + } + if (resultsRetentionDays != null) { + builder.setResultsRetentionDays(resultsRetentionDays); + } + if (categorizationFilters != null) { + AnalysisConfig.Builder analysisConfigBuilder = new AnalysisConfig.Builder(source.getAnalysisConfig()); + analysisConfigBuilder.setCategorizationFilters(categorizationFilters); + builder.setAnalysisConfig(analysisConfigBuilder); + } + if (customSettings != null) { + builder.setCustomSettings(customSettings); + } + if (modelSnapshotId != null) { + builder.setModelSnapshotId(modelSnapshotId); + } + if (modelSnapshotMinVersion != null) { + builder.setModelSnapshotMinVersion(modelSnapshotMinVersion); + } + if (establishedModelMemory != null) { + // An established model memory of zero means we don't actually know the established model memory + if (establishedModelMemory > 0) { + builder.setEstablishedModelMemory(establishedModelMemory); + } else { + builder.setEstablishedModelMemory(null); + } + } + return builder.build(); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof JobUpdate == false) { + return false; + } + + JobUpdate that = (JobUpdate) other; + + return Objects.equals(this.jobId, that.jobId) + && Objects.equals(this.groups, that.groups) + && Objects.equals(this.description, that.description) + && Objects.equals(this.detectorUpdates, that.detectorUpdates) + && Objects.equals(this.modelPlotConfig, that.modelPlotConfig) + && Objects.equals(this.analysisLimits, that.analysisLimits) + && Objects.equals(this.renormalizationWindowDays, that.renormalizationWindowDays) + && Objects.equals(this.backgroundPersistInterval, that.backgroundPersistInterval) + && Objects.equals(this.modelSnapshotRetentionDays, that.modelSnapshotRetentionDays) + && Objects.equals(this.resultsRetentionDays, that.resultsRetentionDays) + && Objects.equals(this.categorizationFilters, that.categorizationFilters) + && Objects.equals(this.customSettings, that.customSettings) + && Objects.equals(this.modelSnapshotId, that.modelSnapshotId) + && Objects.equals(this.modelSnapshotMinVersion, that.modelSnapshotMinVersion) + && Objects.equals(this.establishedModelMemory, that.establishedModelMemory); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, renormalizationWindowDays, + backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, categorizationFilters, customSettings, + modelSnapshotId, modelSnapshotMinVersion, establishedModelMemory); + } + + public static class DetectorUpdate implements Writeable, ToXContentObject { + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("detector_update", a -> new DetectorUpdate((int) a[0], (String) a[1], + (List) a[2])); + + static { + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), Detector.DETECTOR_INDEX); + PARSER.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), Job.DESCRIPTION); + PARSER.declareObjectArray(ConstructingObjectParser.optionalConstructorArg(), (parser, parseFieldMatcher) -> + DetectionRule.CONFIG_PARSER.apply(parser, parseFieldMatcher).build(), Detector.RULES_FIELD); + } + + private int detectorIndex; + private String description; + private List rules; + + public DetectorUpdate(int detectorIndex, String description, List rules) { + this.detectorIndex = detectorIndex; + this.description = description; + this.rules = rules; + } + + public DetectorUpdate(StreamInput in) throws IOException { + detectorIndex = in.readInt(); + description = in.readOptionalString(); + if (in.readBoolean()) { + rules = in.readList(DetectionRule::new); + } else { + rules = null; + } + } + + public int getDetectorIndex() { + return detectorIndex; + } + + public String getDescription() { + return description; + } + + public List getRules() { + return rules; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(detectorIndex); + out.writeOptionalString(description); + out.writeBoolean(rules != null); + if (rules != null) { + out.writeList(rules); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.field(Detector.DETECTOR_INDEX.getPreferredName(), detectorIndex); + if (description != null) { + builder.field(Job.DESCRIPTION.getPreferredName(), description); + } + if (rules != null) { + builder.field(Detector.RULES_FIELD.getPreferredName(), rules); + } + builder.endObject(); + + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(detectorIndex, description, rules); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof DetectorUpdate == false) { + return false; + } + + DetectorUpdate that = (DetectorUpdate) other; + return this.detectorIndex == that.detectorIndex && Objects.equals(this.description, that.description) + && Objects.equals(this.rules, that.rules); + } + } + + public static class Builder { + + private String jobId; + private List groups; + private String description; + private List detectorUpdates; + private ModelPlotConfig modelPlotConfig; + private AnalysisLimits analysisLimits; + private Long renormalizationWindowDays; + private TimeValue backgroundPersistInterval; + private Long modelSnapshotRetentionDays; + private Long resultsRetentionDays; + private List categorizationFilters; + private Map customSettings; + private String modelSnapshotId; + private Version modelSnapshotMinVersion; + private Long establishedModelMemory; + + public Builder(String jobId) { + this.jobId = jobId; + } + + public Builder setJobId(String jobId) { + this.jobId = jobId; + return this; + } + + public Builder setGroups(List groups) { + this.groups = groups; + return this; + } + + public Builder setDescription(String description) { + this.description = description; + return this; + } + + public Builder setDetectorUpdates(List detectorUpdates) { + this.detectorUpdates = detectorUpdates; + return this; + } + + public Builder setModelPlotConfig(ModelPlotConfig modelPlotConfig) { + this.modelPlotConfig = modelPlotConfig; + return this; + } + + public Builder setAnalysisLimits(AnalysisLimits analysisLimits) { + this.analysisLimits = analysisLimits; + return this; + } + + public Builder setRenormalizationWindowDays(Long renormalizationWindowDays) { + this.renormalizationWindowDays = renormalizationWindowDays; + return this; + } + + public Builder setBackgroundPersistInterval(TimeValue backgroundPersistInterval) { + this.backgroundPersistInterval = backgroundPersistInterval; + return this; + } + + public Builder setModelSnapshotRetentionDays(Long modelSnapshotRetentionDays) { + this.modelSnapshotRetentionDays = modelSnapshotRetentionDays; + return this; + } + + public Builder setResultsRetentionDays(Long resultsRetentionDays) { + this.resultsRetentionDays = resultsRetentionDays; + return this; + } + + public Builder setCategorizationFilters(List categorizationFilters) { + this.categorizationFilters = categorizationFilters; + return this; + } + + public Builder setCustomSettings(Map customSettings) { + this.customSettings = customSettings; + return this; + } + + public Builder setModelSnapshotId(String modelSnapshotId) { + this.modelSnapshotId = modelSnapshotId; + return this; + } + + public Builder setModelSnapshotMinVersion(Version modelSnapshotMinVersion) { + this.modelSnapshotMinVersion = modelSnapshotMinVersion; + return this; + } + + public Builder setModelSnapshotMinVersion(String modelSnapshotMinVersion) { + this.modelSnapshotMinVersion = Version.fromString(modelSnapshotMinVersion); + return this; + } + + public Builder setEstablishedModelMemory(Long establishedModelMemory) { + this.establishedModelMemory = establishedModelMemory; + return this; + } + + public JobUpdate build() { + return new JobUpdate(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, backgroundPersistInterval, + renormalizationWindowDays, resultsRetentionDays, modelSnapshotRetentionDays, categorizationFilters, customSettings, + modelSnapshotId, modelSnapshotMinVersion, establishedModelMemory); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java new file mode 100644 index 0000000000000..de6ee3d509c69 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.MlMetaIndex; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class MlFilter implements ToXContentObject, Writeable { + + public static final String DOCUMENT_ID_PREFIX = "filter_"; + + public static final String FILTER_TYPE = "filter"; + + public static final ParseField TYPE = new ParseField("type"); + public static final ParseField ID = new ParseField("filter_id"); + public static final ParseField ITEMS = new ParseField("items"); + + // For QueryPage + public static final ParseField RESULTS_FIELD = new ParseField("filters"); + + public static final ObjectParser STRICT_PARSER = createParser(false); + public static final ObjectParser LENIENT_PARSER = createParser(true); + + private static ObjectParser createParser(boolean ignoreUnknownFields) { + ObjectParser parser = new ObjectParser<>(TYPE.getPreferredName(), ignoreUnknownFields, Builder::new); + + parser.declareString((builder, s) -> {}, TYPE); + parser.declareString(Builder::setId, ID); + parser.declareStringArray(Builder::setItems, ITEMS); + + return parser; + } + + private final String id; + private final List items; + + public MlFilter(String id, List items) { + this.id = Objects.requireNonNull(id, ID.getPreferredName() + " must not be null"); + this.items = Objects.requireNonNull(items, ITEMS.getPreferredName() + " must not be null"); + } + + public MlFilter(StreamInput in) throws IOException { + id = in.readString(); + items = Arrays.asList(in.readStringArray()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + out.writeStringArray(items.toArray(new String[items.size()])); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + builder.field(ITEMS.getPreferredName(), items); + if (params.paramAsBoolean(MlMetaIndex.INCLUDE_TYPE_KEY, false)) { + builder.field(TYPE.getPreferredName(), FILTER_TYPE); + } + builder.endObject(); + return builder; + } + + public String getId() { + return id; + } + + public List getItems() { + return new ArrayList<>(items); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + + if (!(obj instanceof MlFilter)) { + return false; + } + + MlFilter other = (MlFilter) obj; + return id.equals(other.id) && items.equals(other.items); + } + + @Override + public int hashCode() { + return Objects.hash(id, items); + } + + public String documentId() { + return documentId(id); + } + + public static String documentId(String filterId) { + return DOCUMENT_ID_PREFIX + filterId; + } + + public static class Builder { + + private String id; + private List items = Collections.emptyList(); + + public Builder setId(String id) { + this.id = id; + return this; + } + + private Builder() {} + + @Nullable + public String getId() { + return id; + } + + public Builder setItems(List items) { + this.items = items; + return this; + } + + public MlFilter build() { + return new MlFilter(id, items); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/ModelPlotConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/ModelPlotConfig.java new file mode 100644 index 0000000000000..bde19aa2786a4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/ModelPlotConfig.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.MlParserType; + +import java.io.IOException; +import java.util.EnumMap; +import java.util.Map; +import java.util.Objects; + +public class ModelPlotConfig implements ToXContentObject, Writeable { + + private static final ParseField TYPE_FIELD = new ParseField("model_plot_config"); + private static final ParseField ENABLED_FIELD = new ParseField("enabled"); + public static final ParseField TERMS_FIELD = new ParseField("terms"); + + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly + public static final ConstructingObjectParser METADATA_PARSER = + new ConstructingObjectParser<>(TYPE_FIELD.getPreferredName(), true, + a -> new ModelPlotConfig((boolean) a[0], (String) a[1])); + public static final ConstructingObjectParser CONFIG_PARSER = + new ConstructingObjectParser<>(TYPE_FIELD.getPreferredName(), false, + a -> new ModelPlotConfig((boolean) a[0], (String) a[1])); + public static final Map> PARSERS = + new EnumMap<>(MlParserType.class); + + static { + PARSERS.put(MlParserType.METADATA, METADATA_PARSER); + PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER); + for (MlParserType parserType : MlParserType.values()) { + ConstructingObjectParser parser = PARSERS.get(parserType); + assert parser != null; + parser.declareBoolean(ConstructingObjectParser.constructorArg(), ENABLED_FIELD); + parser.declareString(ConstructingObjectParser.optionalConstructorArg(), TERMS_FIELD); + } + } + + private final boolean enabled; + private final String terms; + + public ModelPlotConfig() { + this(true, null); + } + + public ModelPlotConfig(boolean enabled) { + this(false, null); + } + + public ModelPlotConfig(boolean enabled, String terms) { + this.enabled = enabled; + this.terms = terms; + } + + public ModelPlotConfig(StreamInput in) throws IOException { + enabled = in.readBoolean(); + terms = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(enabled); + out.writeOptionalString(terms); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ENABLED_FIELD.getPreferredName(), enabled); + if (terms != null) { + builder.field(TERMS_FIELD.getPreferredName(), terms); + } + builder.endObject(); + return builder; + } + + public boolean isEnabled() { + return enabled; + } + + public String getTerms() { + return this.terms; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof ModelPlotConfig == false) { + return false; + } + + ModelPlotConfig that = (ModelPlotConfig) other; + return this.enabled == that.enabled && Objects.equals(this.terms, that.terms); + } + + @Override + public int hashCode() { + return Objects.hash(enabled, terms); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Operator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Operator.java new file mode 100644 index 0000000000000..5813a10c93bb3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Operator.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Enum representing logical comparisons on doubles + */ +public enum Operator implements Writeable { + EQ { + @Override + public boolean test(double lhs, double rhs) { + return Double.compare(lhs, rhs) == 0; + } + }, + GT { + @Override + public boolean test(double lhs, double rhs) { + return Double.compare(lhs, rhs) > 0; + } + }, + GTE { + @Override + public boolean test(double lhs, double rhs) { + return Double.compare(lhs, rhs) >= 0; + } + }, + LT { + @Override + public boolean test(double lhs, double rhs) { + return Double.compare(lhs, rhs) < 0; + } + }, + LTE { + @Override + public boolean test(double lhs, double rhs) { + return Double.compare(lhs, rhs) <= 0; + } + }, + MATCH { + @Override + public boolean match(Pattern pattern, String field) { + Matcher match = pattern.matcher(field); + return match.matches(); + } + + @Override + public boolean expectsANumericArgument() { + return false; + } + }; + + public static final ParseField OPERATOR_FIELD = new ParseField("operator"); + + public boolean test(double lhs, double rhs) { + return false; + } + + public boolean match(Pattern pattern, String field) { + return false; + } + + public boolean expectsANumericArgument() { + return true; + } + + public static Operator fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + public static Operator readFromStream(StreamInput in) throws IOException { + return in.readEnum(Operator.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/RuleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/RuleAction.java new file mode 100644 index 0000000000000..607961140be4e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/RuleAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +public enum RuleAction implements Writeable { + FILTER_RESULTS, + SKIP_SAMPLING; + + /** + * Case-insensitive from string method. + * + * @param value String representation + * @return The rule action + */ + public static RuleAction fromString(String value) { + return RuleAction.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static RuleAction readFromStream(StreamInput in) throws IOException { + return in.readEnum(RuleAction.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/RuleCondition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/RuleCondition.java new file mode 100644 index 0000000000000..6ca24c518d8fa --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/RuleCondition.java @@ -0,0 +1,281 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.MlParserType; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.EnumMap; +import java.util.EnumSet; +import java.util.Map; +import java.util.Objects; + +public class RuleCondition implements ToXContentObject, Writeable { + public static final ParseField TYPE_FIELD = new ParseField("type", "condition_type"); + public static final ParseField RULE_CONDITION_FIELD = new ParseField("rule_condition"); + public static final ParseField FIELD_NAME_FIELD = new ParseField("field_name"); + public static final ParseField FIELD_VALUE_FIELD = new ParseField("field_value"); + public static final ParseField FILTER_ID_FIELD = new ParseField(MlFilter.ID.getPreferredName(), "value_filter"); + + // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly + public static final ConstructingObjectParser METADATA_PARSER = + new ConstructingObjectParser<>(RULE_CONDITION_FIELD.getPreferredName(), true, + a -> new RuleCondition((RuleConditionType) a[0], (String) a[1], (String) a[2], (Condition) a[3], (String) a[4])); + public static final ConstructingObjectParser CONFIG_PARSER = + new ConstructingObjectParser<>(RULE_CONDITION_FIELD.getPreferredName(), false, + a -> new RuleCondition((RuleConditionType) a[0], (String) a[1], (String) a[2], (Condition) a[3], (String) a[4])); + public static final Map> PARSERS = + new EnumMap<>(MlParserType.class); + + static { + PARSERS.put(MlParserType.METADATA, METADATA_PARSER); + PARSERS.put(MlParserType.CONFIG, CONFIG_PARSER); + for (MlParserType parserType : MlParserType.values()) { + ConstructingObjectParser parser = PARSERS.get(parserType); + assert parser != null; + parser.declareField(ConstructingObjectParser.constructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return RuleConditionType.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, TYPE_FIELD, ValueType.STRING); + parser.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), FIELD_NAME_FIELD); + parser.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), FIELD_VALUE_FIELD); + parser.declareObject(ConstructingObjectParser.optionalConstructorArg(), Condition.PARSER, Condition.CONDITION_FIELD); + parser.declareStringOrNull(ConstructingObjectParser.optionalConstructorArg(), FILTER_ID_FIELD); + } + } + + private final RuleConditionType type; + private final String fieldName; + private final String fieldValue; + private final Condition condition; + private final String filterId; + + public RuleCondition(StreamInput in) throws IOException { + type = RuleConditionType.readFromStream(in); + condition = in.readOptionalWriteable(Condition::new); + fieldName = in.readOptionalString(); + fieldValue = in.readOptionalString(); + filterId = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + type.writeTo(out); + out.writeOptionalWriteable(condition); + out.writeOptionalString(fieldName); + out.writeOptionalString(fieldValue); + out.writeOptionalString(filterId); + } + + RuleCondition(RuleConditionType type, String fieldName, String fieldValue, Condition condition, String filterId) { + this.type = type; + this.fieldName = fieldName; + this.fieldValue = fieldValue; + this.condition = condition; + this.filterId = filterId; + + verifyFieldsBoundToType(this); + verifyFieldValueRequiresFieldName(this); + } + + public RuleCondition(RuleCondition ruleCondition) { + this.type = ruleCondition.type; + this.fieldName = ruleCondition.fieldName; + this.fieldValue = ruleCondition.fieldValue; + this.condition = ruleCondition.condition; + this.filterId = ruleCondition.filterId; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TYPE_FIELD.getPreferredName(), type); + if (condition != null) { + builder.field(Condition.CONDITION_FIELD.getPreferredName(), condition); + } + if (fieldName != null) { + builder.field(FIELD_NAME_FIELD.getPreferredName(), fieldName); + } + if (fieldValue != null) { + builder.field(FIELD_VALUE_FIELD.getPreferredName(), fieldValue); + } + if (filterId != null) { + builder.field(FILTER_ID_FIELD.getPreferredName(), filterId); + } + builder.endObject(); + return builder; + } + + public RuleConditionType getType() { + return type; + } + + /** + * The field name for which the rule applies. Can be null, meaning rule + * applies to all results. + */ + public String getFieldName() { + return fieldName; + } + + /** + * The value of the field name for which the rule applies. When set, the + * rule applies only to the results that have the fieldName/fieldValue pair. + * When null, the rule applies to all values for of the specified field + * name. Only applicable when fieldName is not null. + */ + public String getFieldValue() { + return fieldValue; + } + + public Condition getCondition() { + return condition; + } + + /** + * The unique identifier of a filter. Required when the rule type is + * categorical. Should be null for all other types. + */ + public String getFilterId() { + return filterId; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj instanceof RuleCondition == false) { + return false; + } + + RuleCondition other = (RuleCondition) obj; + return Objects.equals(type, other.type) && Objects.equals(fieldName, other.fieldName) + && Objects.equals(fieldValue, other.fieldValue) && Objects.equals(condition, other.condition) + && Objects.equals(filterId, other.filterId); + } + + @Override + public int hashCode() { + return Objects.hash(type, fieldName, fieldValue, condition, filterId); + } + + public static RuleCondition createCategorical(String fieldName, String filterId) { + return new RuleCondition(RuleConditionType.CATEGORICAL, fieldName, null, null, filterId); + } + + public static RuleCondition createNumerical(RuleConditionType conditionType, String fieldName, String fieldValue, + Condition condition ) { + if (conditionType.isNumerical() == false) { + throw new IllegalStateException("Rule condition type [" + conditionType + "] not valid for a numerical condition"); + } + return new RuleCondition(conditionType, fieldName, fieldValue, condition, null); + } + + public static RuleCondition createTime(Operator operator, long epochSeconds) { + return new RuleCondition(RuleConditionType.TIME, null, null, new Condition(operator, Long.toString(epochSeconds)), null); + } + + private static void verifyFieldsBoundToType(RuleCondition ruleCondition) throws ElasticsearchParseException { + switch (ruleCondition.getType()) { + case CATEGORICAL: + case CATEGORICAL_COMPLEMENT: + verifyCategorical(ruleCondition); + break; + case NUMERICAL_ACTUAL: + case NUMERICAL_TYPICAL: + case NUMERICAL_DIFF_ABS: + verifyNumerical(ruleCondition); + break; + case TIME: + verifyTimeRule(ruleCondition); + break; + default: + throw new IllegalStateException(); + } + } + + private static void verifyCategorical(RuleCondition ruleCondition) throws ElasticsearchParseException { + checkCategoricalHasNoField(Condition.CONDITION_FIELD.getPreferredName(), ruleCondition.getCondition()); + checkCategoricalHasNoField(RuleCondition.FIELD_VALUE_FIELD.getPreferredName(), ruleCondition.getFieldValue()); + checkCategoricalHasField(FILTER_ID_FIELD.getPreferredName(), ruleCondition.getFilterId()); + } + + private static void checkCategoricalHasNoField(String fieldName, Object fieldValue) throws ElasticsearchParseException { + if (fieldValue != null) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_CATEGORICAL_INVALID_OPTION, fieldName); + throw ExceptionsHelper.badRequestException(msg); + } + } + + private static void checkCategoricalHasField(String fieldName, Object fieldValue) throws ElasticsearchParseException { + if (fieldValue == null) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_CATEGORICAL_MISSING_OPTION, fieldName); + throw ExceptionsHelper.badRequestException(msg); + } + } + + private static void verifyNumerical(RuleCondition ruleCondition) throws ElasticsearchParseException { + checkNumericalHasNoField(FILTER_ID_FIELD.getPreferredName(), ruleCondition.getFilterId()); + checkNumericalHasField(Condition.CONDITION_FIELD.getPreferredName(), ruleCondition.getCondition()); + if (ruleCondition.getFieldName() != null && ruleCondition.getFieldValue() == null) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_WITH_FIELD_NAME_REQUIRES_FIELD_VALUE); + throw ExceptionsHelper.badRequestException(msg); + } + checkNumericalConditionOparatorsAreValid(ruleCondition); + } + + private static void checkNumericalHasNoField(String fieldName, Object fieldValue) throws ElasticsearchParseException { + if (fieldValue != null) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_INVALID_OPTION, fieldName); + throw ExceptionsHelper.badRequestException(msg); + } + } + + private static void checkNumericalHasField(String fieldName, Object fieldValue) throws ElasticsearchParseException { + if (fieldValue == null) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_MISSING_OPTION, fieldName); + throw ExceptionsHelper.badRequestException(msg); + } + } + + private static void verifyFieldValueRequiresFieldName(RuleCondition ruleCondition) throws ElasticsearchParseException { + if (ruleCondition.getFieldValue() != null && ruleCondition.getFieldName() == null) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_MISSING_FIELD_NAME, + ruleCondition.getFieldValue()); + throw ExceptionsHelper.badRequestException(msg); + } + } + + static EnumSet VALID_CONDITION_OPERATORS = EnumSet.of(Operator.LT, Operator.LTE, Operator.GT, Operator.GTE); + + private static void checkNumericalConditionOparatorsAreValid(RuleCondition ruleCondition) throws ElasticsearchParseException { + Operator operator = ruleCondition.getCondition().getOperator(); + if (!VALID_CONDITION_OPERATORS.contains(operator)) { + String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_INVALID_OPERATOR, operator); + throw ExceptionsHelper.badRequestException(msg); + } + } + + private static void verifyTimeRule(RuleCondition ruleCondition) { + checkNumericalConditionOparatorsAreValid(ruleCondition); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/RuleConditionType.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/RuleConditionType.java new file mode 100644 index 0000000000000..aa563d001b5ca --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/RuleConditionType.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +public enum RuleConditionType implements Writeable { + CATEGORICAL(false, true), + NUMERICAL_ACTUAL(true, false), + NUMERICAL_TYPICAL(true, false), + NUMERICAL_DIFF_ABS(true, false), + TIME(false, false), + CATEGORICAL_COMPLEMENT(false, true); + + private final boolean isNumerical; + private final boolean isCategorical; + + RuleConditionType(boolean isNumerical, boolean isCategorical) { + this.isNumerical = isNumerical; + this.isCategorical = isCategorical; + } + + public boolean isNumerical() { + return isNumerical; + } + + public boolean isCategorical() { + return isCategorical; + } + + /** + * Case-insensitive from string method. + * + * @param value + * String representation + * @return The condition type + */ + public static RuleConditionType fromString(String value) { + return RuleConditionType.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static RuleConditionType readFromStream(StreamInput in) throws IOException { + return in.readEnum(RuleConditionType.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (this == CATEGORICAL_COMPLEMENT && out.getVersion().before(Version.V_6_3_0)) { + out.writeEnum(CATEGORICAL); + } else { + out.writeEnum(this); + } + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJob.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJob.java new file mode 100644 index 0000000000000..d5be3ffd08871 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJob.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.groups; + +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * An interface to represent either a job or a group of jobs + */ +interface GroupOrJob { + + boolean isGroup(); + List jobs(); + + final class Group implements GroupOrJob { + + private final List jobs; + + Group(List jobs) { + this.jobs = Collections.unmodifiableList(jobs); + } + + @Override + public boolean isGroup() { + return true; + } + + @Override + public List jobs() { + return jobs; + } + } + + final class SingleJob implements GroupOrJob { + + private final Job job; + + SingleJob(Job job) { + this.job = Objects.requireNonNull(job); + } + + @Override + public boolean isGroup() { + return false; + } + + @Override + public List jobs() { + return Collections.singletonList(job); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookup.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookup.java new file mode 100644 index 0000000000000..fde28a84f8d2e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookup.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.groups; + +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.NameResolver; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.stream.Collectors; + +/** + * A lookup that allows expanding expressions that may consist of job + * IDs, job group names, wildcard patterns or a comma separated combination + * of the aforementioned to the matching job IDs. + * The lookup is immutable. + */ +public class GroupOrJobLookup { + + private final SortedMap groupOrJobLookup; + + public GroupOrJobLookup(Collection jobs) { + groupOrJobLookup = new TreeMap<>(); + jobs.forEach(this::put); + } + + private void put(Job job) { + if (groupOrJobLookup.containsKey(job.getId())) { + throw new ResourceAlreadyExistsException(Messages.getMessage(Messages.JOB_AND_GROUP_NAMES_MUST_BE_UNIQUE, job.getId())); + } + groupOrJobLookup.put(job.getId(), new GroupOrJob.SingleJob(job)); + for (String groupName : job.getGroups()) { + GroupOrJob oldGroup = groupOrJobLookup.get(groupName); + if (oldGroup == null) { + groupOrJobLookup.put(groupName, new GroupOrJob.Group(Collections.singletonList(job))); + } else { + if (oldGroup.isGroup() == false) { + throw new ResourceAlreadyExistsException(Messages.getMessage(Messages.JOB_AND_GROUP_NAMES_MUST_BE_UNIQUE, groupName)); + } + List groupJobs = new ArrayList<>(oldGroup.jobs()); + groupJobs.add(job); + groupOrJobLookup.put(groupName, new GroupOrJob.Group(groupJobs)); + } + } + } + + public Set expandJobIds(String expression, boolean allowNoJobs) { + return new GroupOrJobResolver().expand(expression, allowNoJobs); + } + + public boolean isGroupOrJob(String id) { + return groupOrJobLookup.containsKey(id); + } + + private class GroupOrJobResolver extends NameResolver { + + private GroupOrJobResolver() { + super(ExceptionsHelper::missingJobException); + } + + @Override + protected Set keys() { + return groupOrJobLookup.keySet(); + } + + @Override + protected Set nameSet() { + return groupOrJobLookup.values().stream() + .filter(groupOrJob -> groupOrJob.isGroup() == false) + .map(groupOrJob -> groupOrJob.jobs().get(0).getId()) + .collect(Collectors.toSet()); + } + + @Override + protected List lookup(String key) { + GroupOrJob groupOrJob = groupOrJobLookup.get(key); + return groupOrJob == null ? Collections.emptyList() : groupOrJob.jobs().stream().map(Job::getId).collect(Collectors.toList()); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java new file mode 100644 index 0000000000000..7e5dc231e057a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -0,0 +1,212 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.messages; + +import org.elasticsearch.xpack.core.ml.MachineLearningField; + +import java.text.MessageFormat; +import java.util.Locale; + +/** + * Log and audit message strings + */ +public final class Messages { + + public static final String DATAFEED_AGGREGATIONS_REQUIRES_JOB_WITH_SUMMARY_COUNT_FIELD = + "A job configured with a datafeed with aggregations must set summary_count_field_name; use doc_count or suitable alternative"; + public static final String DATAFEED_CANNOT_DELETE_IN_CURRENT_STATE = "Cannot delete datafeed [{0}] while its status is {1}"; + public static final String DATAFEED_CANNOT_UPDATE_IN_CURRENT_STATE = "Cannot update datafeed [{0}] while its status is {1}"; + public static final String DATAFEED_CONFIG_CANNOT_USE_SCRIPT_FIELDS_WITH_AGGS = + "script_fields cannot be used in combination with aggregations"; + public static final String DATAFEED_CONFIG_INVALID_OPTION_VALUE = "Invalid {0} value ''{1}'' in datafeed configuration"; + public static final String DATAFEED_DOES_NOT_SUPPORT_JOB_WITH_LATENCY = "A job configured with datafeed cannot support latency"; + public static final String DATAFEED_NOT_FOUND = "No datafeed with id [{0}] exists"; + public static final String DATAFEED_AGGREGATIONS_REQUIRES_DATE_HISTOGRAM = + "A date_histogram (or histogram) aggregation is required"; + public static final String DATAFEED_AGGREGATIONS_MAX_ONE_DATE_HISTOGRAM = + "Aggregations can only have 1 date_histogram or histogram aggregation"; + public static final String DATAFEED_AGGREGATIONS_REQUIRES_DATE_HISTOGRAM_NO_SIBLINGS = + "The date_histogram (or histogram) aggregation cannot have sibling aggregations"; + public static final String DATAFEED_AGGREGATIONS_INTERVAL_MUST_BE_GREATER_THAN_ZERO = + "Aggregation interval must be greater than 0"; + public static final String DATAFEED_AGGREGATIONS_INTERVAL_MUST_BE_DIVISOR_OF_BUCKET_SPAN = + "Aggregation interval [{0}] must be a divisor of the bucket_span [{1}]"; + public static final String DATAFEED_AGGREGATIONS_INTERVAL_MUST_LESS_OR_EQUAL_TO_BUCKET_SPAN = + "Aggregation interval [{0}] must be less than or equal to the bucket_span [{1}]"; + public static final String DATAFEED_DATA_HISTOGRAM_MUST_HAVE_NESTED_MAX_AGGREGATION = + "Date histogram must have nested max aggregation for time_field [{0}]"; + public static final String DATAFEED_MISSING_MAX_AGGREGATION_FOR_TIME_FIELD = "Missing max aggregation for time_field [{0}]"; + public static final String DATAFEED_FREQUENCY_MUST_BE_MULTIPLE_OF_AGGREGATIONS_INTERVAL = + "Datafeed frequency [{0}] must be a multiple of the aggregation interval [{1}]"; + + public static final String INCONSISTENT_ID = + "Inconsistent {0}; ''{1}'' specified in the body differs from ''{2}'' specified as a URL argument"; + public static final String INVALID_ID = "Invalid {0}; ''{1}'' can contain lowercase alphanumeric (a-z and 0-9), hyphens or " + + "underscores; must start and end with alphanumeric"; + public static final String INVALID_GROUP = "Invalid group id ''{0}''; must be non-empty string and may contain lowercase alphanumeric" + + " (a-z and 0-9), hyphens or underscores; must start and end with alphanumeric"; + + public static final String JOB_AUDIT_DATAFEED_DATA_SEEN_AGAIN = "Datafeed has started retrieving data again"; + public static final String JOB_AUDIT_CREATED = "Job created"; + public static final String JOB_AUDIT_UPDATED = "Job updated: {0}"; + public static final String JOB_AUDIT_CLOSING = "Job is closing"; + public static final String JOB_AUDIT_FORCE_CLOSING = "Job is closing (forced)"; + public static final String JOB_AUDIT_DATAFEED_CONTINUED_REALTIME = "Datafeed continued in real-time"; + public static final String JOB_AUDIT_DATAFEED_DATA_ANALYSIS_ERROR = "Datafeed is encountering errors submitting data for analysis: {0}"; + public static final String JOB_AUDIT_DATAFEED_DATA_EXTRACTION_ERROR = "Datafeed is encountering errors extracting data: {0}"; + public static final String JOB_AUDIT_DATAFEED_LOOKBACK_COMPLETED = "Datafeed lookback completed"; + public static final String JOB_AUDIT_DATAFEED_LOOKBACK_NO_DATA = "Datafeed lookback retrieved no data"; + public static final String JOB_AUDIT_DATAFEED_NO_DATA = "Datafeed has been retrieving no data for a while"; + public static final String JOB_AUDIT_DATAFEED_RECOVERED = "Datafeed has recovered data extraction and analysis"; + public static final String JOB_AUDIT_DATAFEED_STARTED_FROM_TO = "Datafeed started (from: {0} to: {1}) with frequency [{2}]"; + public static final String JOB_AUDIT_DATAFEED_STARTED_REALTIME = "Datafeed started in real-time"; + public static final String JOB_AUDIT_DATAFEED_STOPPED = "Datafeed stopped"; + public static final String JOB_AUDIT_DELETED = "Job deleted"; + public static final String JOB_AUDIT_KILLING = "Killing job"; + public static final String JOB_AUDIT_OLD_RESULTS_DELETED = "Deleted results prior to {1}"; + public static final String JOB_AUDIT_REVERTED = "Job model snapshot reverted to ''{0}''"; + public static final String JOB_AUDIT_SNAPSHOT_DELETED = "Model snapshot [{0}] with description ''{1}'' deleted"; + public static final String JOB_AUDIT_FILTER_UPDATED_ON_PROCESS = "Updated filter [{0}] in running process"; + public static final String JOB_AUDIT_CALENDARS_UPDATED_ON_PROCESS = "Updated calendars in running process"; + public static final String JOB_AUDIT_MEMORY_STATUS_SOFT_LIMIT = "Job memory status changed to soft_limit; memory pruning will now be " + + "more aggressive"; + public static final String JOB_AUDIT_MEMORY_STATUS_HARD_LIMIT = "Job memory status changed to hard_limit at {0}; adjust the " + + "analysis_limits.model_memory_limit setting to ensure all data is analyzed"; + + public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_DUPLICATES = "categorization_filters contain duplicates"; + public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_EMPTY = + "categorization_filters are not allowed to contain empty strings"; + public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_INVALID_REGEX = + "categorization_filters contains invalid regular expression ''{0}''"; + public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_INCOMPATIBLE_WITH_CATEGORIZATION_ANALYZER = + "categorization_filters cannot be used with categorization_analyzer - " + + "instead specify them as pattern_replace char_filters in the analyzer"; + public static final String JOB_CONFIG_CATEGORIZATION_FILTERS_REQUIRE_CATEGORIZATION_FIELD_NAME = + "categorization_filters require setting categorization_field_name"; + public static final String JOB_CONFIG_CATEGORIZATION_ANALYZER_REQUIRES_CATEGORIZATION_FIELD_NAME = + "categorization_analyzer requires setting categorization_field_name"; + public static final String JOB_CONFIG_CONDITION_INVALID_VALUE_NULL = "Invalid condition: the value field cannot be null"; + public static final String JOB_CONFIG_CONDITION_INVALID_VALUE_NUMBER = + "Invalid condition value: cannot parse a double from string ''{0}''"; + public static final String JOB_CONFIG_CONDITION_INVALID_VALUE_REGEX = + "Invalid condition value: ''{0}'' is not a valid regular expression"; + public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_CATEGORICAL_INVALID_OPTION = + "Invalid detector rule: a categorical rule_condition does not support {0}"; + public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_CATEGORICAL_MISSING_OPTION = + "Invalid detector rule: a categorical rule_condition requires {0} to be set"; + public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_INVALID_FIELD_NAME = + "Invalid detector rule: field_name has to be one of {0}; actual was ''{1}''"; + public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_MISSING_FIELD_NAME = + "Invalid detector rule: missing field_name in rule_condition where field_value ''{0}'' is set"; + public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_INVALID_OPERATOR = + "Invalid detector rule: operator ''{0}'' is not allowed"; + public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_INVALID_OPTION = + "Invalid detector rule: a numerical rule_condition does not support {0}"; + public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_MISSING_OPTION = + "Invalid detector rule: a numerical rule_condition requires {0} to be set"; + public static final String JOB_CONFIG_DETECTION_RULE_CONDITION_NUMERICAL_WITH_FIELD_NAME_REQUIRES_FIELD_VALUE = + "Invalid detector rule: a numerical rule_condition with field_name requires that field_value is set"; + public static final String JOB_CONFIG_DETECTION_RULE_INVALID_TARGET_FIELD_NAME = + "Invalid detector rule: target_field_name has to be one of {0}; actual was ''{1}''"; + public static final String JOB_CONFIG_DETECTION_RULE_MISSING_TARGET_FIELD_NAME = + "Invalid detector rule: missing target_field_name where target_field_value ''{0}'' is set"; + public static final String JOB_CONFIG_DETECTION_RULE_NOT_SUPPORTED_BY_FUNCTION = + "Invalid detector rule: function {0} does not support rules"; + public static final String JOB_CONFIG_DETECTION_RULE_REQUIRES_AT_LEAST_ONE_CONDITION = + "Invalid detector rule: at least one rule_condition is required"; + public static final String JOB_CONFIG_FIELDNAME_INCOMPATIBLE_FUNCTION = "field_name cannot be used with function ''{0}''"; + public static final String JOB_CONFIG_FIELD_VALUE_TOO_LOW = "{0} cannot be less than {1,number}. Value = {2,number}"; + public static final String JOB_CONFIG_MODEL_MEMORY_LIMIT_TOO_LOW = "model_memory_limit must be at least 1 MiB. Value = {0,number}"; + public static final String JOB_CONFIG_MODEL_MEMORY_LIMIT_GREATER_THAN_MAX = + "model_memory_limit [{0}] must be less than the value of the " + + MachineLearningField.MAX_MODEL_MEMORY_LIMIT.getKey() + + " setting [{1}]"; + public static final String JOB_CONFIG_FUNCTION_INCOMPATIBLE_PRESUMMARIZED = + "The ''{0}'' function cannot be used in jobs that will take pre-summarized input"; + public static final String JOB_CONFIG_FUNCTION_REQUIRES_BYFIELD = "by_field_name must be set when the ''{0}'' function is used"; + public static final String JOB_CONFIG_FUNCTION_REQUIRES_FIELDNAME = "field_name must be set when the ''{0}'' function is used"; + public static final String JOB_CONFIG_FUNCTION_REQUIRES_OVERFIELD = "over_field_name must be set when the ''{0}'' function is used"; + public static final String JOB_CONFIG_ID_ALREADY_TAKEN = "The job cannot be created with the Id ''{0}''. The Id is already used."; + public static final String JOB_CONFIG_ID_TOO_LONG = "The job id cannot contain more than {0,number,integer} characters."; + public static final String JOB_CONFIG_INVALID_CREATE_SETTINGS = + "The job is configured with fields [{0}] that are illegal to set at job creation"; + public static final String JOB_CONFIG_INVALID_FIELDNAME_CHARS = + "Invalid field name ''{0}''. Field names including over, by and partition " + + "fields cannot contain any of these characters: {1}"; + public static final String JOB_CONFIG_INVALID_FIELDNAME = + "Invalid field name ''{0}''. Field names including over, by and partition fields cannot be ''{1}''"; + public static final String JOB_CONFIG_INVALID_TIMEFORMAT = "Invalid Time format string ''{0}''"; + public static final String JOB_CONFIG_MISSING_ANALYSISCONFIG = "An analysis_config must be set"; + public static final String JOB_CONFIG_MISSING_DATA_DESCRIPTION = "A data_description must be set"; + public static final String JOB_CONFIG_MULTIPLE_BUCKETSPANS_MUST_BE_MULTIPLE = + "Multiple bucket_span ''{0}'' must be a multiple of the main bucket_span ''{1}''"; + public static final String JOB_CONFIG_ANALYSIS_FIELD_MUST_BE_SET = + "Unless a count or temporal function is used one of field_name, by_field_name or over_field_name must be set"; + public static final String JOB_CONFIG_NO_DETECTORS = "No detectors configured"; + public static final String JOB_CONFIG_OVERFIELD_INCOMPATIBLE_FUNCTION = + "over_field_name cannot be used with function ''{0}''"; + public static final String JOB_CONFIG_OVERLAPPING_BUCKETS_INCOMPATIBLE_FUNCTION = + "Overlapping buckets cannot be used with function ''{0}''"; + public static final String JOB_CONFIG_PER_PARTITION_NORMALIZATION_CANNOT_USE_INFLUENCERS = + "A job configured with Per-Partition Normalization cannot use influencers"; + public static final String JOB_CONFIG_PER_PARTITION_NORMALIZATION_REQUIRES_PARTITION_FIELD = + "If the job is configured with Per-Partition Normalization enabled a detector must have a partition field"; + public static final String JOB_CONFIG_UNKNOWN_FUNCTION = "Unknown function ''{0}''"; + public static final String JOB_CONFIG_UPDATE_ANALYSIS_LIMITS_MODEL_MEMORY_LIMIT_CANNOT_BE_DECREASED = + "Invalid update value for analysis_limits: model_memory_limit cannot be decreased below current usage; " + + "current usage [{0}], update had [{1}]"; + public static final String JOB_CONFIG_DETECTOR_DUPLICATE_FIELD_NAME = + "{0} and {1} cannot be the same: ''{2}''"; + public static final String JOB_CONFIG_DETECTOR_COUNT_DISALLOWED = + "''count'' is not a permitted value for {0}"; + public static final String JOB_CONFIG_DETECTOR_BY_DISALLOWED = + "''by'' is not a permitted value for {0}"; + public static final String JOB_CONFIG_DETECTOR_OVER_DISALLOWED = + "''over'' is not a permitted value for {0}"; + public static final String JOB_CONFIG_MAPPING_TYPE_CLASH = + "This job would cause a mapping clash with existing field [{0}] - avoid the clash by assigning a dedicated results index"; + public static final String JOB_CONFIG_TIME_FIELD_NOT_ALLOWED_IN_ANALYSIS_CONFIG = + "data_description.time_field may not be used in the analysis_config"; + + public static final String JOB_AND_GROUP_NAMES_MUST_BE_UNIQUE = + "job and group names must be unique but job [{0}] and group [{0}] have the same name"; + + public static final String JOB_UNKNOWN_ID = "No known job with id ''{0}''"; + + public static final String REST_CANNOT_DELETE_HIGHEST_PRIORITY = + "Model snapshot ''{0}'' is the active snapshot for job ''{1}'', so cannot be deleted"; + public static final String REST_INVALID_DATETIME_PARAMS = + "Query param [{0}] with value [{1}] cannot be parsed as a date or converted to a number (epoch)."; + public static final String REST_INVALID_FLUSH_PARAMS_MISSING = "Invalid flush parameters: ''{0}'' has not been specified."; + public static final String REST_INVALID_FLUSH_PARAMS_UNEXPECTED = "Invalid flush parameters: unexpected ''{0}''."; + public static final String REST_JOB_NOT_CLOSED_REVERT = "Can only revert to a model snapshot when the job is closed."; + public static final String REST_NO_SUCH_MODEL_SNAPSHOT = "No model snapshot with id [{0}] exists for job [{1}]"; + public static final String REST_START_AFTER_END = "Invalid time range: end time ''{0}'' is earlier than start time ''{1}''."; + + public static final String FIELD_CANNOT_BE_NULL = "Field [{0}] cannot be null"; + + private Messages() { + } + + /** + * Returns the message parameter + * + * @param message Should be one of the statics defined in this class + */ + public static String getMessage(String message) { + return message; + } + + /** + * Format the message with the supplied arguments + * + * @param message Should be one of the statics defined in this class + * @param args MessageFormat arguments. See {@linkplain MessageFormat#format(Object)}] + */ + public static String getMessage(String message, Object...args) { + return new MessageFormat(message, Locale.ROOT).format(args); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java new file mode 100644 index 0000000000000..1ac842e8898bf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.persistence; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.xpack.core.ml.MLMetadataField; +import org.elasticsearch.xpack.core.ml.MlMetadata; + +/** + * Methods for handling index naming related functions + */ +public final class AnomalyDetectorsIndex { + + private AnomalyDetectorsIndex() { + } + + public static String jobResultsIndexPrefix() { + return AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX; + } + + /** + * The name of the alias pointing to the indices where the job's results are stored + * @param jobId Job Id + * @return The read alias + */ + public static String jobResultsAliasedName(String jobId) { + return AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + jobId; + } + + /** + * The name of the alias pointing to the write index for a job + * @param jobId Job Id + * @return The write alias + */ + public static String resultsWriteAlias(String jobId) { + // ".write" rather than simply "write" to avoid the danger of clashing + // with the read alias of a job whose name begins with "write-" + return AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + ".write-" + jobId; + } + + /** + * Retrieves the currently defined physical index from the job state + * @param jobId Job Id + * @return The index name + */ + public static String getPhysicalIndexFromState(ClusterState state, String jobId) { + MlMetadata meta = state.getMetaData().custom(MLMetadataField.TYPE); + return meta.getJobs().get(jobId).getResultsIndexName(); + } + + /** + * The name of the default index where a job's state is stored + * @return The index name + */ + public static String jobStateIndexName() { + return AnomalyDetectorsIndexFields.STATE_INDEX_NAME; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndexFields.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndexFields.java new file mode 100644 index 0000000000000..9cdaf10326dfb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndexFields.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.persistence; + +public final class AnomalyDetectorsIndexFields { + + public static final String RESULTS_INDEX_PREFIX = ".ml-anomalies-"; + public static final String STATE_INDEX_NAME = ".ml-state"; + public static final String RESULTS_INDEX_DEFAULT = "shared"; + + private AnomalyDetectorsIndexFields() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java new file mode 100644 index 0000000000000..bc420c658d0b5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -0,0 +1,712 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.persistence; + +import org.elasticsearch.Version; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotField; +import org.elasticsearch.xpack.core.ml.job.results.AnomalyCause; +import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.elasticsearch.xpack.core.ml.job.results.BucketInfluencer; +import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition; +import org.elasticsearch.xpack.core.ml.job.results.Forecast; +import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; +import org.elasticsearch.xpack.core.ml.job.results.Influence; +import org.elasticsearch.xpack.core.ml.job.results.Influencer; +import org.elasticsearch.xpack.core.ml.job.results.ModelPlot; +import org.elasticsearch.xpack.core.ml.job.results.ReservedFieldNames; +import org.elasticsearch.xpack.core.ml.job.results.Result; +import org.elasticsearch.xpack.core.ml.notifications.AuditMessage; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +/** + * Static methods to create Elasticsearch mappings for the autodetect + * persisted objects/documents + *

+ * ElasticSearch automatically recognises array types so they are + * not explicitly mapped as such. For arrays of objects the type + * must be set to nested so the arrays are searched properly + * see https://www.elastic.co/guide/en/elasticsearch/guide/current/nested-objects.html + *

+ * It is expected that indexes to which these mappings are applied have their + * default analyzer set to "keyword", which does not tokenise fields. The + * index-wide default analyzer cannot be set via these mappings, so needs to be + * set in the index settings during index creation. For the results mapping the + * _all field is disabled and a custom all field is used in its place. The index + * settings must have {@code "index.query.default_field": "all_field_values" } set + * for the queries to use the custom all field. The custom all field has its + * analyzer set to "whitespace" by these mappings, so that it gets tokenised + * using whitespace. + */ +public class ElasticsearchMappings { + + public static final String DOC_TYPE = "doc"; + + /** + * String constants used in mappings + */ + public static final String ENABLED = "enabled"; + public static final String ANALYZER = "analyzer"; + public static final String WHITESPACE = "whitespace"; + public static final String NESTED = "nested"; + public static final String COPY_TO = "copy_to"; + public static final String PROPERTIES = "properties"; + public static final String TYPE = "type"; + public static final String DYNAMIC = "dynamic"; + public static final String FIELDS = "fields"; + + /** + * Name of the custom 'all' field for results + */ + public static final String ALL_FIELD_VALUES = "all_field_values"; + + /** + * Name of the Elasticsearch field by which documents are sorted by default + */ + public static final String ES_DOC = "_doc"; + + /** + * Elasticsearch data types + */ + public static final String BOOLEAN = "boolean"; + public static final String DATE = "date"; + public static final String DOUBLE = "double"; + public static final String INTEGER = "integer"; + public static final String KEYWORD = "keyword"; + public static final String LONG = "long"; + public static final String TEXT = "text"; + + static final String RAW = "raw"; + + private ElasticsearchMappings() { + } + + /** + * Creates a default mapping which has a dynamic template that + * treats all dynamically added fields as keywords. This is needed + * so that the per-job term fields will not be automatically added + * as fields of type 'text' to the index mappings of newly rolled indices. + * + * @throws IOException On write error + */ + public static void addDefaultMapping(XContentBuilder builder) throws IOException { + builder.startArray("dynamic_templates") + .startObject() + .startObject("strings_as_keywords") + .field("match", "*") + .startObject("mapping") + .field(TYPE, KEYWORD) + .endObject() + .endObject() + .endObject() + .endArray(); + } + + /** + * Inserts "_meta" containing useful information like the version into the mapping + * template. + * + * @param builder The builder for the mappings + * @throws IOException On write error + */ + public static void addMetaInformation(XContentBuilder builder) throws IOException { + builder.startObject("_meta") + .field("version", Version.CURRENT) + .endObject(); + } + + public static XContentBuilder docMapping() throws IOException { + return docMapping(Collections.emptyList()); + } + + public static XContentBuilder docMapping(Collection extraTermFields) throws IOException { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.startObject(DOC_TYPE); + addMetaInformation(builder); + addDefaultMapping(builder); + builder.startObject(PROPERTIES); + + // Add result all field for easy searches in kibana + builder.startObject(ALL_FIELD_VALUES) + .field(TYPE, TEXT) + .field(ANALYZER, WHITESPACE) + .endObject(); + + builder.startObject(Job.ID.getPreferredName()) + .field(TYPE, KEYWORD) + .field(COPY_TO, ALL_FIELD_VALUES) + .endObject(); + + builder.startObject(Result.TIMESTAMP.getPreferredName()) + .field(TYPE, DATE) + .endObject(); + + addResultsMapping(builder); + addCategoryDefinitionMapping(builder); + addDataCountsMapping(builder); + addModelSnapshotMapping(builder); + + addTermFields(builder, extraTermFields); + + // end properties + builder.endObject(); + // end mapping + builder.endObject(); + // end doc + builder.endObject(); + + return builder; + } + + /** + * Create the Elasticsearch mapping for results objects + * {@link Bucket}s, {@link AnomalyRecord}s, {@link Influencer} and + * {@link BucketInfluencer} + * + * The mapping has a custom all field containing the *_FIELD_VALUE fields + * e.g. BY_FIELD_VALUE, OVER_FIELD_VALUE, etc. The custom all field {@link #ALL_FIELD_VALUES} + * must be set in the index settings. A custom all field is preferred over the usual + * '_all' field as most fields do not belong in '_all', disabling '_all' and + * using a custom all field simplifies the mapping. + * + * These fields are copied to the custom all field + *

    + *
  • by_field_value
  • + *
  • partition_field_value
  • + *
  • over_field_value
  • + *
  • AnomalyCause.correlated_by_field_value
  • + *
  • AnomalyCause.by_field_value
  • + *
  • AnomalyCause.partition_field_value
  • + *
  • AnomalyCause.over_field_value
  • + *
  • AnomalyRecord.Influencers.influencer_field_values
  • + *
  • Influencer.influencer_field_value
  • + *
+ * + * @throws IOException On write error + */ + private static void addResultsMapping(XContentBuilder builder) throws IOException { + builder.startObject(Result.RESULT_TYPE.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Bucket.ANOMALY_SCORE.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(Bucket.INITIAL_ANOMALY_SCORE.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(Result.IS_INTERIM.getPreferredName()) + .field(TYPE, BOOLEAN) + .endObject() + .startObject(Bucket.EVENT_COUNT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(Bucket.BUCKET_SPAN.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(Bucket.PROCESSING_TIME_MS.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(Bucket.SCHEDULED_EVENTS.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Bucket.PARTITION_SCORES.getPreferredName()) + .field(TYPE, NESTED) + .startObject(PROPERTIES) + .startObject(AnomalyRecord.PARTITION_FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Bucket.INITIAL_ANOMALY_SCORE.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(AnomalyRecord.PROBABILITY.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .endObject() + .endObject() + + .startObject(Bucket.BUCKET_INFLUENCERS.getPreferredName()) + .field(TYPE, NESTED) + .startObject(PROPERTIES) + .startObject(Job.ID.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Result.RESULT_TYPE.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(BucketInfluencer.INFLUENCER_FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(BucketInfluencer.INITIAL_ANOMALY_SCORE.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(BucketInfluencer.ANOMALY_SCORE.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(BucketInfluencer.PROBABILITY.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(Result.TIMESTAMP.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(BucketInfluencer.BUCKET_SPAN.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(Result.IS_INTERIM.getPreferredName()) + .field(TYPE, BOOLEAN) + .endObject() + .endObject() + .endObject() + + // Model Plot Output + .startObject(ModelPlot.MODEL_FEATURE.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(ModelPlot.MODEL_LOWER.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(ModelPlot.MODEL_UPPER.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(ModelPlot.MODEL_MEDIAN.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject(); + + addForecastFieldsToMapping(builder); + addAnomalyRecordFieldsToMapping(builder); + addInfluencerFieldsToMapping(builder); + addModelSizeStatsFieldsToMapping(builder); + } + + public static XContentBuilder termFieldsMapping(String type, Collection termFields) { + try { + XContentBuilder builder = jsonBuilder().startObject(); + if (type != null) { + builder.startObject(type); + } + builder.startObject(PROPERTIES); + addTermFields(builder, termFields); + builder.endObject(); + if (type != null) { + builder.endObject(); + } + return builder.endObject(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private static void addTermFields(XContentBuilder builder, Collection termFields) throws IOException { + for (String fieldName : termFields) { + if (ReservedFieldNames.isValidFieldName(fieldName)) { + builder.startObject(fieldName).field(TYPE, KEYWORD).endObject(); + } + } + } + + private static void addForecastFieldsToMapping(XContentBuilder builder) throws IOException { + + // Forecast Output + builder.startObject(Forecast.FORECAST_LOWER.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(Forecast.FORECAST_UPPER.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(Forecast.FORECAST_PREDICTION.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(Forecast.FORECAST_ID.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject(); + + // Forecast Stats Output + // re-used: TIMESTAMP, PROCESSING_TIME_MS, PROCESSED_RECORD_COUNT, LATEST_RECORD_TIME + builder.startObject(ForecastRequestStats.START_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(ForecastRequestStats.END_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(ForecastRequestStats.CREATE_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(ForecastRequestStats.EXPIRY_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(ForecastRequestStats.MESSAGES.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(ForecastRequestStats.PROGRESS.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(ForecastRequestStats.STATUS.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(ForecastRequestStats.MEMORY_USAGE.getPreferredName()) + .field(TYPE, LONG) + .endObject(); + } + + /** + * AnomalyRecord fields to be added under the 'properties' section of the mapping + * @param builder Add properties to this builder + * @throws IOException On write error + */ + private static void addAnomalyRecordFieldsToMapping(XContentBuilder builder) throws IOException { + builder.startObject(Detector.DETECTOR_INDEX.getPreferredName()) + .field(TYPE, INTEGER) + .endObject() + .startObject(AnomalyRecord.ACTUAL.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(AnomalyRecord.TYPICAL.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(AnomalyRecord.PROBABILITY.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(AnomalyRecord.FUNCTION.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyRecord.FUNCTION_DESCRIPTION.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyRecord.BY_FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyRecord.BY_FIELD_VALUE.getPreferredName()) + .field(TYPE, KEYWORD) + .field(COPY_TO, ALL_FIELD_VALUES) + .endObject() + .startObject(AnomalyRecord.FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyRecord.PARTITION_FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName()) + .field(TYPE, KEYWORD) + .field(COPY_TO, ALL_FIELD_VALUES) + .endObject() + .startObject(AnomalyRecord.OVER_FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyRecord.OVER_FIELD_VALUE.getPreferredName()) + .field(TYPE, KEYWORD) + .field(COPY_TO, ALL_FIELD_VALUES) + .endObject() + .startObject(AnomalyRecord.RECORD_SCORE.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(AnomalyRecord.INITIAL_RECORD_SCORE.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(AnomalyRecord.CAUSES.getPreferredName()) + .field(TYPE, NESTED) + .startObject(PROPERTIES) + .startObject(AnomalyCause.ACTUAL.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(AnomalyCause.TYPICAL.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(AnomalyCause.PROBABILITY.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(AnomalyCause.FUNCTION.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyCause.FUNCTION_DESCRIPTION.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyCause.BY_FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyCause.BY_FIELD_VALUE.getPreferredName()) + .field(TYPE, KEYWORD) + .field(COPY_TO, ALL_FIELD_VALUES) + .endObject() + .startObject(AnomalyCause.CORRELATED_BY_FIELD_VALUE.getPreferredName()) + .field(TYPE, KEYWORD) + .field(COPY_TO, ALL_FIELD_VALUES) + .endObject() + .startObject(AnomalyCause.FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyCause.PARTITION_FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyCause.PARTITION_FIELD_VALUE.getPreferredName()) + .field(TYPE, KEYWORD) + .field(COPY_TO, ALL_FIELD_VALUES) + .endObject() + .startObject(AnomalyCause.OVER_FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AnomalyCause.OVER_FIELD_VALUE.getPreferredName()) + .field(TYPE, KEYWORD) + .field(COPY_TO, ALL_FIELD_VALUES) + .endObject() + .endObject() + .endObject() + .startObject(AnomalyRecord.INFLUENCERS.getPreferredName()) + /* Array of influences */ + .field(TYPE, NESTED) + .startObject(PROPERTIES) + .startObject(Influence.INFLUENCER_FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Influence.INFLUENCER_FIELD_VALUES.getPreferredName()) + .field(TYPE, KEYWORD) + .field(COPY_TO, ALL_FIELD_VALUES) + .endObject() + .endObject() + .endObject(); + } + + private static void addInfluencerFieldsToMapping(XContentBuilder builder) throws IOException { + builder.startObject(Influencer.INFLUENCER_SCORE.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(Influencer.INITIAL_INFLUENCER_SCORE.getPreferredName()) + .field(TYPE, DOUBLE) + .endObject() + .startObject(Influencer.INFLUENCER_FIELD_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Influencer.INFLUENCER_FIELD_VALUE.getPreferredName()) + .field(TYPE, KEYWORD) + .field(COPY_TO, ALL_FIELD_VALUES) + .endObject(); + } + + /** + * {@link DataCounts} mapping. + * The type is disabled so {@link DataCounts} aren't searchable and + * the '_all' field is disabled + * + * @throws IOException On builder write error + */ + private static void addDataCountsMapping(XContentBuilder builder) throws IOException { + builder.startObject(DataCounts.PROCESSED_RECORD_COUNT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataCounts.PROCESSED_FIELD_COUNT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataCounts.INPUT_BYTES.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataCounts.INPUT_RECORD_COUNT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataCounts.INPUT_FIELD_COUNT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataCounts.INVALID_DATE_COUNT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataCounts.MISSING_FIELD_COUNT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataCounts.OUT_OF_ORDER_TIME_COUNT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataCounts.EMPTY_BUCKET_COUNT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataCounts.SPARSE_BUCKET_COUNT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataCounts.BUCKET_COUNT.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(DataCounts.EARLIEST_RECORD_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(DataCounts.LATEST_RECORD_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(DataCounts.LATEST_EMPTY_BUCKET_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(DataCounts.LATEST_SPARSE_BUCKET_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(DataCounts.LAST_DATA_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject(); + } + + /** + * Create the Elasticsearch mapping for {@linkplain CategoryDefinition}. + * The '_all' field is disabled as the document isn't meant to be searched. + * + * @throws IOException On builder error + */ + private static void addCategoryDefinitionMapping(XContentBuilder builder) throws IOException { + builder.startObject(CategoryDefinition.CATEGORY_ID.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(CategoryDefinition.TERMS.getPreferredName()) + .field(TYPE, TEXT) + .endObject() + .startObject(CategoryDefinition.REGEX.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(CategoryDefinition.MAX_MATCHING_LENGTH.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(CategoryDefinition.EXAMPLES.getPreferredName()) + .field(TYPE, TEXT) + .endObject(); + } + + /** + * Create the Elasticsearch mapping for state. State could potentially be + * huge (target document size is 16MB and there can be many documents) so all + * analysis by Elasticsearch is disabled. The only way to retrieve state is + * by knowing the ID of a particular document. + */ + public static XContentBuilder stateMapping() throws IOException { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.startObject(DOC_TYPE); + addMetaInformation(builder); + builder.field(ENABLED, false); + builder.endObject(); + builder.endObject(); + + return builder; + } + + /** + * Create the Elasticsearch mapping for {@linkplain ModelSnapshot}. + * The '_all' field is disabled but the type is searchable + */ + private static void addModelSnapshotMapping(XContentBuilder builder) throws IOException { + builder.startObject(ModelSnapshot.DESCRIPTION.getPreferredName()) + .field(TYPE, TEXT) + .endObject() + .startObject(ModelSnapshotField.SNAPSHOT_ID.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(ModelSnapshot.SNAPSHOT_DOC_COUNT.getPreferredName()) + .field(TYPE, INTEGER) + .endObject() + .startObject(ModelSnapshot.RETAIN.getPreferredName()) + .field(TYPE, BOOLEAN) + .endObject() + .startObject(ModelSizeStats.RESULT_TYPE_FIELD.getPreferredName()) + .startObject(PROPERTIES) + .startObject(Job.ID.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(Result.RESULT_TYPE.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(ModelSizeStats.TIMESTAMP_FIELD.getPreferredName()) + .field(TYPE, DATE) + .endObject(); + + addModelSizeStatsFieldsToMapping(builder); + + // end model size stats properties + builder.endObject(); + // end model size stats mapping + builder.endObject(); + + builder.startObject(ModelSnapshot.QUANTILES.getPreferredName()) + .field(ENABLED, false) + .endObject() + .startObject(ModelSnapshot.LATEST_RECORD_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(ModelSnapshot.LATEST_RESULT_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject(); + } + + /** + * {@link ModelSizeStats} fields to be added under the 'properties' section of the mapping + * @param builder Add properties to this builder + * @throws IOException On write error + */ + private static void addModelSizeStatsFieldsToMapping(XContentBuilder builder) throws IOException { + builder.startObject(ModelSizeStats.MODEL_BYTES_FIELD.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(ModelSizeStats.TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(ModelSizeStats.TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(ModelSizeStats.TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(ModelSizeStats.BUCKET_ALLOCATION_FAILURES_COUNT_FIELD.getPreferredName()) + .field(TYPE, LONG) + .endObject() + .startObject(ModelSizeStats.MEMORY_STATUS_FIELD.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(ModelSizeStats.LOG_TIME_FIELD.getPreferredName()) + .field(TYPE, DATE) + .endObject(); + } + + public static XContentBuilder auditMessageMapping() throws IOException { + return jsonBuilder() + .startObject() + .startObject(AuditMessage.TYPE.getPreferredName()) + .startObject(PROPERTIES) + .startObject(Job.ID.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AuditMessage.LEVEL.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AuditMessage.MESSAGE.getPreferredName()) + .field(TYPE, TEXT) + .startObject(FIELDS) + .startObject(RAW) + .field(TYPE, KEYWORD) + .endObject() + .endObject() + .endObject() + .startObject(AuditMessage.TIMESTAMP.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(AuditMessage.NODE_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .endObject() + .endObject() + .endObject(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobDataDeleter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobDataDeleter.java new file mode 100644 index 0000000000000..0a7d27f7a0ec4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobDataDeleter.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.persistence; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.query.ConstantScoreQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.reindex.DeleteByQueryAction; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelState; +import org.elasticsearch.xpack.core.ml.job.results.Result; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; + +public class JobDataDeleter { + + private static final Logger LOGGER = Loggers.getLogger(JobDataDeleter.class); + + private final Client client; + private final String jobId; + + public JobDataDeleter(Client client, String jobId) { + this.client = Objects.requireNonNull(client); + this.jobId = Objects.requireNonNull(jobId); + } + + /** + * Delete a list of model snapshots and their corresponding state documents. + * + * @param modelSnapshots the model snapshots to delete + */ + public void deleteModelSnapshots(List modelSnapshots, ActionListener listener) { + if (modelSnapshots.isEmpty()) { + listener.onResponse(new BulkResponse(new BulkItemResponse[0], 0L)); + return; + } + + String stateIndexName = AnomalyDetectorsIndex.jobStateIndexName(); + + // TODO: remove in 7.0 + ActionListener docDeleteListener = ActionListener.wrap( + response -> { + // if the doc delete worked then don't bother trying the old types + if (response.hasFailures() == false) { + listener.onResponse(response); + return; + } + BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); + for (ModelSnapshot modelSnapshot : modelSnapshots) { + for (String stateDocId : modelSnapshot.legacyStateDocumentIds()) { + bulkRequestBuilder.add(client.prepareDelete(stateIndexName, ModelState.TYPE, stateDocId)); + } + + bulkRequestBuilder.add(client.prepareDelete(AnomalyDetectorsIndex.jobResultsAliasedName(modelSnapshot.getJobId()), + ModelSnapshot.TYPE.getPreferredName(), ModelSnapshot.v54DocumentId(modelSnapshot))); + } + + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + try { + bulkRequestBuilder.execute(ActionListener.wrap( + listener::onResponse, + // ignore problems relating to single type indices - if we're running against a single type + // index then it must be type doc, so just return the response from deleting that type + e -> { + if (e instanceof IllegalArgumentException + && e.getMessage().contains("as the final mapping would have more than 1 type")) { + listener.onResponse(response); + } + listener.onFailure(e); + } + )); + } catch (Exception e) { + listener.onFailure(e); + } + }, + listener::onFailure + ); + + BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); + for (ModelSnapshot modelSnapshot : modelSnapshots) { + for (String stateDocId : modelSnapshot.stateDocumentIds()) { + bulkRequestBuilder.add(client.prepareDelete(stateIndexName, ElasticsearchMappings.DOC_TYPE, stateDocId)); + } + + bulkRequestBuilder.add(client.prepareDelete(AnomalyDetectorsIndex.jobResultsAliasedName(modelSnapshot.getJobId()), + ElasticsearchMappings.DOC_TYPE, ModelSnapshot.documentId(modelSnapshot))); + } + + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + try { + executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkRequestBuilder.request(), docDeleteListener); + } catch (Exception e) { + listener.onFailure(e); + } + } + + /** + * Asynchronously delete all result types (Buckets, Records, Influencers) from {@code cutOffTime} + * + * @param cutoffEpochMs Results at and after this time will be deleted + * @param listener Response listener + */ + public void deleteResultsFromTime(long cutoffEpochMs, ActionListener listener) { + DeleteByQueryHolder deleteByQueryHolder = new DeleteByQueryHolder(AnomalyDetectorsIndex.jobResultsAliasedName(jobId)); + deleteByQueryHolder.dbqRequest.setRefresh(true); + + QueryBuilder query = QueryBuilders.boolQuery() + .filter(QueryBuilders.existsQuery(Result.RESULT_TYPE.getPreferredName())) + .filter(QueryBuilders.rangeQuery(Result.TIMESTAMP.getPreferredName()).gte(cutoffEpochMs)); + deleteByQueryHolder.searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); + deleteByQueryHolder.searchRequest.source(new SearchSourceBuilder().query(query)); + executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, deleteByQueryHolder.dbqRequest, + ActionListener.wrap(r -> listener.onResponse(true), listener::onFailure)); + } + + /** + * Delete all results marked as interim + */ + public void deleteInterimResults() { + DeleteByQueryHolder deleteByQueryHolder = new DeleteByQueryHolder(AnomalyDetectorsIndex.jobResultsAliasedName(jobId)); + deleteByQueryHolder.dbqRequest.setRefresh(false); + + deleteByQueryHolder.searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); + QueryBuilder qb = QueryBuilders.termQuery(Result.IS_INTERIM.getPreferredName(), true); + deleteByQueryHolder.searchRequest.source(new SearchSourceBuilder().query(new ConstantScoreQueryBuilder(qb))); + + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { + client.execute(DeleteByQueryAction.INSTANCE, deleteByQueryHolder.dbqRequest).get(); + } catch (Exception e) { + LOGGER.error("[" + jobId + "] An error occurred while deleting interim results", e); + } + } + + // Wrapper to ensure safety + private static class DeleteByQueryHolder { + + private final SearchRequest searchRequest; + private final DeleteByQueryRequest dbqRequest; + + private DeleteByQueryHolder(String index) { + // The search request has to be constructed and passed to the DeleteByQueryRequest before more details are set to it + searchRequest = new SearchRequest(index); + dbqRequest = new DeleteByQueryRequest(searchRequest); + dbqRequest.setSlices(5); + dbqRequest.setAbortOnVersionConflict(false); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobStorageDeletionTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobStorageDeletionTask.java new file mode 100644 index 0000000000000..54faafb8ee584 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/JobStorageDeletionTask.java @@ -0,0 +1,239 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.persistence; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.query.ConstantScoreQueryBuilder; +import org.elasticsearch.index.query.IdsQueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.DeleteByQueryAction; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.CategorizerState; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; +import org.elasticsearch.xpack.core.ml.utils.MlIndicesUtils; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; + +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +/* + Moving this class to plugin-core caused a *lot* of server side logic to be pulled in to plugin-core. This should be considered as needing + refactoring to move it back to core. See DeleteJobAction for its use. +*/ +public class JobStorageDeletionTask extends Task { + + private static final int MAX_SNAPSHOTS_TO_DELETE = 10000; + + private final Logger logger; + + public JobStorageDeletionTask(long id, String type, String action, String description, TaskId parentTask, Map headers) { + super(id, type, action, description, parentTask, headers); + this.logger = Loggers.getLogger(getClass()); + } + + public void delete(String jobId, Client client, ClusterState state, + CheckedConsumer finishedHandler, + Consumer failureHandler) { + + final String indexName = AnomalyDetectorsIndex.getPhysicalIndexFromState(state, jobId); + final String indexPattern = indexName + "-*"; + + ActionListener deleteAliasHandler = ActionListener.wrap(finishedHandler, failureHandler); + + // Step 5. DBQ state done, delete the aliases + ActionListener dbqHandler = ActionListener.wrap( + bulkByScrollResponse -> { + if (bulkByScrollResponse.isTimedOut()) { + logger.warn("[{}] DeleteByQuery for indices [{}, {}] timed out.", jobId, indexName, indexPattern); + } + if (!bulkByScrollResponse.getBulkFailures().isEmpty()) { + logger.warn("[{}] {} failures and {} conflicts encountered while running DeleteByQuery on indices [{}, {}].", + jobId, bulkByScrollResponse.getBulkFailures().size(), bulkByScrollResponse.getVersionConflicts(), + indexName, indexPattern); + for (BulkItemResponse.Failure failure : bulkByScrollResponse.getBulkFailures()) { + logger.warn("DBQ failure: " + failure); + } + } + deleteAliases(jobId, client, deleteAliasHandler); + }, + failureHandler); + + // Step 4. Delete categorizer state done, DeleteByQuery on the index, matching all docs with the right job_id + ActionListener deleteCategorizerStateHandler = ActionListener.wrap( + response -> { + logger.info("Running DBQ on [" + indexName + "," + indexPattern + "] for job [" + jobId + "]"); + SearchRequest searchRequest = new SearchRequest(indexName, indexPattern); + DeleteByQueryRequest request = new DeleteByQueryRequest(searchRequest); + ConstantScoreQueryBuilder query = + new ConstantScoreQueryBuilder(new TermQueryBuilder(Job.ID.getPreferredName(), jobId)); + searchRequest.source(new SearchSourceBuilder().query(query)); + searchRequest.indicesOptions(MlIndicesUtils.addIgnoreUnavailable(IndicesOptions.lenientExpandOpen())); + request.setSlices(5); + request.setAbortOnVersionConflict(false); + request.setRefresh(true); + + executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, request, dbqHandler); + }, + failureHandler); + + // Step 3. Delete quantiles done, delete the categorizer state + ActionListener deleteQuantilesHandler = ActionListener.wrap( + response -> deleteCategorizerState(jobId, client, 1, deleteCategorizerStateHandler), + failureHandler); + + // Step 2. Delete state done, delete the quantiles + ActionListener deleteStateHandler = ActionListener.wrap( + bulkResponse -> deleteQuantiles(jobId, client, deleteQuantilesHandler), + failureHandler); + + // Step 1. Delete the model state + deleteModelState(jobId, client, deleteStateHandler); + } + + private void deleteQuantiles(String jobId, Client client, ActionListener finishedHandler) { + // The quantiles type and doc ID changed in v5.5 so delete both the old and new format + SearchRequest searchRequest = new SearchRequest(AnomalyDetectorsIndex.jobStateIndexName()); + DeleteByQueryRequest request = new DeleteByQueryRequest(searchRequest); + // Just use ID here, not type, as trying to delete different types spams the logs with an exception stack trace + IdsQueryBuilder query = new IdsQueryBuilder().addIds(Quantiles.documentId(jobId), + // TODO: remove in 7.0 + Quantiles.v54DocumentId(jobId)); + searchRequest.source(new SearchSourceBuilder().query(query)); + searchRequest.indicesOptions(MlIndicesUtils.addIgnoreUnavailable(IndicesOptions.lenientExpandOpen())); + request.setAbortOnVersionConflict(false); + request.setRefresh(true); + + executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, request, ActionListener.wrap( + response -> finishedHandler.onResponse(true), + e -> { + // It's not a problem for us if the index wasn't found - it's equivalent to document not found + if (e instanceof IndexNotFoundException) { + finishedHandler.onResponse(true); + } else { + finishedHandler.onFailure(e); + } + })); + } + + private void deleteModelState(String jobId, Client client, ActionListener listener) { + GetModelSnapshotsAction.Request request = new GetModelSnapshotsAction.Request(jobId, null); + request.setPageParams(new PageParams(0, MAX_SNAPSHOTS_TO_DELETE)); + executeAsyncWithOrigin(client, ML_ORIGIN, GetModelSnapshotsAction.INSTANCE, request, ActionListener.wrap( + response -> { + List deleteCandidates = response.getPage().results(); + JobDataDeleter deleter = new JobDataDeleter(client, jobId); + deleter.deleteModelSnapshots(deleteCandidates, listener); + }, + listener::onFailure)); + } + + private void deleteCategorizerState(String jobId, Client client, int docNum, ActionListener finishedHandler) { + // The categorizer state type and doc ID changed in v5.5 so delete both the old and new format + SearchRequest searchRequest = new SearchRequest(AnomalyDetectorsIndex.jobStateIndexName()); + DeleteByQueryRequest request = new DeleteByQueryRequest(searchRequest); + // Just use ID here, not type, as trying to delete different types spams the logs with an exception stack trace + IdsQueryBuilder query = new IdsQueryBuilder().addIds(CategorizerState.documentId(jobId, docNum), + // TODO: remove in 7.0 + CategorizerState.v54DocumentId(jobId, docNum)); + searchRequest.source(new SearchSourceBuilder().query(query)); + searchRequest.indicesOptions(MlIndicesUtils.addIgnoreUnavailable(IndicesOptions.lenientExpandOpen())); + request.setAbortOnVersionConflict(false); + request.setRefresh(true); + + executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, request, ActionListener.wrap( + response -> { + // If we successfully deleted a document try the next one; if not we're done + if (response.getDeleted() > 0) { + // There's an assumption here that there won't be very many categorizer + // state documents, so the recursion won't go more than, say, 5 levels deep + deleteCategorizerState(jobId, client, docNum + 1, finishedHandler); + return; + } + finishedHandler.onResponse(true); + }, + e -> { + // It's not a problem for us if the index wasn't found - it's equivalent to document not found + if (e instanceof IndexNotFoundException) { + finishedHandler.onResponse(true); + } else { + finishedHandler.onFailure(e); + } + })); + } + + private void deleteAliases(String jobId, Client client, ActionListener finishedHandler) { + final String readAliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); + final String writeAliasName = AnomalyDetectorsIndex.resultsWriteAlias(jobId); + + // first find the concrete indices associated with the aliases + GetAliasesRequest aliasesRequest = new GetAliasesRequest().aliases(readAliasName, writeAliasName) + .indicesOptions(IndicesOptions.lenientExpandOpen()); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, aliasesRequest, + ActionListener.wrap( + getAliasesResponse -> { + // remove the aliases from the concrete indices found in the first step + IndicesAliasesRequest removeRequest = buildRemoveAliasesRequest(getAliasesResponse); + if (removeRequest == null) { + // don't error if the job's aliases have already been deleted - carry on and delete the + // rest of the job's data + finishedHandler.onResponse(true); + return; + } + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, removeRequest, + ActionListener.wrap(removeResponse -> finishedHandler.onResponse(true), + finishedHandler::onFailure), + client.admin().indices()::aliases); + }, + finishedHandler::onFailure), client.admin().indices()::getAliases); + } + + private IndicesAliasesRequest buildRemoveAliasesRequest(GetAliasesResponse getAliasesResponse) { + Set aliases = new HashSet<>(); + List indices = new ArrayList<>(); + for (ObjectObjectCursor> entry : getAliasesResponse.getAliases()) { + // The response includes _all_ indices, but only those associated with + // the aliases we asked about will have associated AliasMetaData + if (entry.value.isEmpty() == false) { + indices.add(entry.key); + entry.value.forEach(metadata -> aliases.add(metadata.getAlias())); + } + } + return aliases.isEmpty() ? null : new IndicesAliasesRequest().addAliasAction( + IndicesAliasesRequest.AliasActions.remove() + .aliases(aliases.toArray(new String[aliases.size()])) + .indices(indices.toArray(new String[indices.size()]))); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java new file mode 100644 index 0000000000000..ad8b24e66c643 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.output; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +/** + * Simple class to parse and store a flush ID. + */ +public class FlushAcknowledgement implements ToXContentObject, Writeable { + /** + * Field Names + */ + public static final ParseField TYPE = new ParseField("flush"); + public static final ParseField ID = new ParseField("id"); + public static final ParseField LAST_FINALIZED_BUCKET_END = new ParseField("last_finalized_bucket_end"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + TYPE.getPreferredName(), a -> new FlushAcknowledgement((String) a[0], (Date) a[1])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ID); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + LAST_FINALIZED_BUCKET_END.getPreferredName() + "]"); + }, LAST_FINALIZED_BUCKET_END, ObjectParser.ValueType.VALUE); + } + + private String id; + private Date lastFinalizedBucketEnd; + + public FlushAcknowledgement(String id, Date lastFinalizedBucketEnd) { + this.id = id; + this.lastFinalizedBucketEnd = lastFinalizedBucketEnd; + } + + public FlushAcknowledgement(StreamInput in) throws IOException { + id = in.readString(); + if (in.getVersion().after(Version.V_5_5_0)) { + lastFinalizedBucketEnd = new Date(in.readVLong()); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + if (out.getVersion().after(Version.V_5_5_0)) { + out.writeVLong(lastFinalizedBucketEnd.getTime()); + } + } + + public String getId() { + return id; + } + + public Date getLastFinalizedBucketEnd() { + return lastFinalizedBucketEnd; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + if (lastFinalizedBucketEnd != null) { + builder.timeField(LAST_FINALIZED_BUCKET_END.getPreferredName(), LAST_FINALIZED_BUCKET_END.getPreferredName() + "_string", + lastFinalizedBucketEnd.getTime()); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id, lastFinalizedBucketEnd); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + FlushAcknowledgement other = (FlushAcknowledgement) obj; + return Objects.equals(id, other.id) && + Objects.equals(lastFinalizedBucketEnd, other.lastFinalizedBucketEnd); + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerState.java new file mode 100644 index 0000000000000..8c08300354698 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerState.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + + +/** + * The categorizer state does not need to be understood on the Java side. + * The Java code only needs to know how to form the document IDs so that + * it can retrieve and delete the correct documents. + */ +public class CategorizerState { + + /** + * Legacy type, now used only as a discriminant in the document ID + */ + public static final String TYPE = "categorizer_state"; + + public static final String documentId(String jobId, int docNum) { + return documentPrefix(jobId) + docNum; + } + + public static final String documentPrefix(String jobId) { + return jobId + "_" + TYPE + "#"; + } + + /** + * This is how the IDs were formed in v5.4 + */ + public static final String v54DocumentId(String jobId, int docNum) { + return v54DocumentPrefix(jobId) + docNum; + } + + public static final String v54DocumentPrefix(String jobId) { + return jobId + "#"; + } + + private CategorizerState() { + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java new file mode 100644 index 0000000000000..f2545c5abf782 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCounts.java @@ -0,0 +1,643 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +/** + * Job processed record counts. + *

+ * The getInput... methods return the actual number of + * fields/records sent the the API including invalid records. + * The getProcessed... methods are the number sent to the + * Engine. + *

+ * The inputRecordCount field is calculated so it + * should not be set in deserialisation but it should be serialised + * so the field is visible. + */ + +public class DataCounts implements ToXContentObject, Writeable { + + private static final String DOCUMENT_SUFFIX = "_data_counts"; + public static final String PROCESSED_RECORD_COUNT_STR = "processed_record_count"; + public static final String PROCESSED_FIELD_COUNT_STR = "processed_field_count"; + public static final String INPUT_BYTES_STR = "input_bytes"; + public static final String INPUT_RECORD_COUNT_STR = "input_record_count"; + public static final String INPUT_FIELD_COUNT_STR = "input_field_count"; + public static final String INVALID_DATE_COUNT_STR = "invalid_date_count"; + public static final String MISSING_FIELD_COUNT_STR = "missing_field_count"; + public static final String OUT_OF_ORDER_TIME_COUNT_STR = "out_of_order_timestamp_count"; + public static final String EMPTY_BUCKET_COUNT_STR = "empty_bucket_count"; + public static final String SPARSE_BUCKET_COUNT_STR = "sparse_bucket_count"; + public static final String BUCKET_COUNT_STR = "bucket_count"; + public static final String EARLIEST_RECORD_TIME_STR = "earliest_record_timestamp"; + public static final String LATEST_RECORD_TIME_STR = "latest_record_timestamp"; + public static final String LAST_DATA_TIME_STR = "last_data_time"; + public static final String LATEST_EMPTY_BUCKET_TIME_STR = "latest_empty_bucket_timestamp"; + public static final String LATEST_SPARSE_BUCKET_TIME_STR = "latest_sparse_bucket_timestamp"; + + public static final ParseField PROCESSED_RECORD_COUNT = new ParseField(PROCESSED_RECORD_COUNT_STR); + public static final ParseField PROCESSED_FIELD_COUNT = new ParseField(PROCESSED_FIELD_COUNT_STR); + public static final ParseField INPUT_BYTES = new ParseField(INPUT_BYTES_STR); + public static final ParseField INPUT_RECORD_COUNT = new ParseField(INPUT_RECORD_COUNT_STR); + public static final ParseField INPUT_FIELD_COUNT = new ParseField(INPUT_FIELD_COUNT_STR); + public static final ParseField INVALID_DATE_COUNT = new ParseField(INVALID_DATE_COUNT_STR); + public static final ParseField MISSING_FIELD_COUNT = new ParseField(MISSING_FIELD_COUNT_STR); + public static final ParseField OUT_OF_ORDER_TIME_COUNT = new ParseField(OUT_OF_ORDER_TIME_COUNT_STR); + public static final ParseField EMPTY_BUCKET_COUNT = new ParseField(EMPTY_BUCKET_COUNT_STR); + public static final ParseField SPARSE_BUCKET_COUNT = new ParseField(SPARSE_BUCKET_COUNT_STR); + public static final ParseField BUCKET_COUNT = new ParseField(BUCKET_COUNT_STR); + public static final ParseField EARLIEST_RECORD_TIME = new ParseField(EARLIEST_RECORD_TIME_STR); + public static final ParseField LATEST_RECORD_TIME = new ParseField(LATEST_RECORD_TIME_STR); + public static final ParseField LAST_DATA_TIME = new ParseField(LAST_DATA_TIME_STR); + public static final ParseField LATEST_EMPTY_BUCKET_TIME = new ParseField(LATEST_EMPTY_BUCKET_TIME_STR); + public static final ParseField LATEST_SPARSE_BUCKET_TIME = new ParseField(LATEST_SPARSE_BUCKET_TIME_STR); + + public static final ParseField TYPE = new ParseField("data_counts"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_counts", true, + a -> new DataCounts((String) a[0], (long) a[1], (long) a[2], (long) a[3], (long) a[4], (long) a[5], (long) a[6], + (long) a[7], (long) a[8], (long) a[9], (long) a[10], (Date) a[11], (Date) a[12], (Date) a[13], (Date) a[14], + (Date) a[15])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), PROCESSED_RECORD_COUNT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), PROCESSED_FIELD_COUNT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), INPUT_BYTES); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), INPUT_FIELD_COUNT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), INVALID_DATE_COUNT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MISSING_FIELD_COUNT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), OUT_OF_ORDER_TIME_COUNT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), EMPTY_BUCKET_COUNT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SPARSE_BUCKET_COUNT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), BUCKET_COUNT); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + EARLIEST_RECORD_TIME.getPreferredName() + "]"); + }, EARLIEST_RECORD_TIME, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + LATEST_RECORD_TIME.getPreferredName() + "]"); + }, LATEST_RECORD_TIME, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + LAST_DATA_TIME.getPreferredName() + "]"); + }, LAST_DATA_TIME, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + LATEST_EMPTY_BUCKET_TIME.getPreferredName() + "]"); + }, LATEST_EMPTY_BUCKET_TIME, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + LATEST_SPARSE_BUCKET_TIME.getPreferredName() + "]"); + }, LATEST_SPARSE_BUCKET_TIME, ValueType.VALUE); + PARSER.declareLong((t, u) -> {;}, INPUT_RECORD_COUNT); + } + + public static String documentId(String jobId) { + return jobId + DOCUMENT_SUFFIX; + } + + public static String v54DocumentId(String jobId) { + return jobId + "-data-counts"; + } + + private final String jobId; + private long processedRecordCount; + private long processedFieldCount; + private long inputBytes; + private long inputFieldCount; + private long invalidDateCount; + private long missingFieldCount; + private long outOfOrderTimeStampCount; + private long emptyBucketCount; + private long sparseBucketCount; + private long bucketCount; + // TODO: Use java.time for the Dates here: x-pack-elasticsearch#829 + private Date earliestRecordTimeStamp; + private Date latestRecordTimeStamp; + private Date lastDataTimeStamp; + private Date latestEmptyBucketTimeStamp; + private Date latestSparseBucketTimeStamp; + + public DataCounts(String jobId, long processedRecordCount, long processedFieldCount, long inputBytes, + long inputFieldCount, long invalidDateCount, long missingFieldCount, long outOfOrderTimeStampCount, + long emptyBucketCount, long sparseBucketCount, long bucketCount, + Date earliestRecordTimeStamp, Date latestRecordTimeStamp, Date lastDataTimeStamp, + Date latestEmptyBucketTimeStamp, Date latestSparseBucketTimeStamp) { + this.jobId = jobId; + this.processedRecordCount = processedRecordCount; + this.processedFieldCount = processedFieldCount; + this.inputBytes = inputBytes; + this.inputFieldCount = inputFieldCount; + this.invalidDateCount = invalidDateCount; + this.missingFieldCount = missingFieldCount; + this.outOfOrderTimeStampCount = outOfOrderTimeStampCount; + this.emptyBucketCount = emptyBucketCount; + this.sparseBucketCount = sparseBucketCount; + this.bucketCount = bucketCount; + this.latestRecordTimeStamp = latestRecordTimeStamp; + this.earliestRecordTimeStamp = earliestRecordTimeStamp; + this.lastDataTimeStamp = lastDataTimeStamp; + this.latestEmptyBucketTimeStamp = latestEmptyBucketTimeStamp; + this.latestSparseBucketTimeStamp = latestSparseBucketTimeStamp; + } + + public DataCounts(String jobId) { + this.jobId = jobId; + } + + public DataCounts(DataCounts lhs) { + jobId = lhs.jobId; + processedRecordCount = lhs.processedRecordCount; + processedFieldCount = lhs.processedFieldCount; + inputBytes = lhs.inputBytes; + inputFieldCount = lhs.inputFieldCount; + invalidDateCount = lhs.invalidDateCount; + missingFieldCount = lhs.missingFieldCount; + outOfOrderTimeStampCount = lhs.outOfOrderTimeStampCount; + emptyBucketCount = lhs.emptyBucketCount; + sparseBucketCount = lhs.sparseBucketCount; + bucketCount = lhs.bucketCount; + latestRecordTimeStamp = lhs.latestRecordTimeStamp; + earliestRecordTimeStamp = lhs.earliestRecordTimeStamp; + lastDataTimeStamp = lhs.lastDataTimeStamp; + latestEmptyBucketTimeStamp = lhs.latestEmptyBucketTimeStamp; + latestSparseBucketTimeStamp = lhs.latestSparseBucketTimeStamp; + } + + public DataCounts(StreamInput in) throws IOException { + jobId = in.readString(); + processedRecordCount = in.readVLong(); + processedFieldCount = in.readVLong(); + inputBytes = in.readVLong(); + inputFieldCount = in.readVLong(); + invalidDateCount = in.readVLong(); + missingFieldCount = in.readVLong(); + outOfOrderTimeStampCount = in.readVLong(); + emptyBucketCount = in.readVLong(); + sparseBucketCount = in.readVLong(); + bucketCount = in.readVLong(); + if (in.readBoolean()) { + latestRecordTimeStamp = new Date(in.readVLong()); + } + if (in.readBoolean()) { + earliestRecordTimeStamp = new Date(in.readVLong()); + } + if (in.readBoolean()) { + lastDataTimeStamp = new Date(in.readVLong()); + } + if (in.readBoolean()) { + latestEmptyBucketTimeStamp = new Date(in.readVLong()); + } + if (in.readBoolean()) { + latestSparseBucketTimeStamp = new Date(in.readVLong()); + } + in.readVLong(); // throw away inputRecordCount + } + + public String getJobid() { + return jobId; + } + + /** + * Number of records processed by this job. + * This value is the number of records sent passed on to + * the engine i.e. {@linkplain #getInputRecordCount()} minus + * records with bad dates or out of order + * + * @return Number of records processed by this job {@code long} + */ + public long getProcessedRecordCount() { + return processedRecordCount; + } + + public void incrementProcessedRecordCount(long additional) { + processedRecordCount += additional; + } + + /** + * Number of data points (processed record count * the number + * of analysed fields) processed by this job. This count does + * not include the time field. + * + * @return Number of data points processed by this job {@code long} + */ + public long getProcessedFieldCount() { + return processedFieldCount; + } + + public void calcProcessedFieldCount(long analysisFieldsPerRecord) { + processedFieldCount = + (processedRecordCount * analysisFieldsPerRecord) + - missingFieldCount; + + // processedFieldCount could be a -ve value if no + // records have been written in which case it should be 0 + processedFieldCount = (processedFieldCount < 0) ? 0 : processedFieldCount; + } + + /** + * Total number of input records read. + * This = processed record count + date parse error records count + * + out of order record count. + *

+ * Records with missing fields are counted as they are still written. + * + * @return Total number of input records read {@code long} + */ + public long getInputRecordCount() { + return processedRecordCount + outOfOrderTimeStampCount + + invalidDateCount; + } + + /** + * The total number of bytes sent to this job. + * This value includes the bytes from any records + * that have been discarded for any reason + * e.g. because the date cannot be read + * + * @return Volume in bytes + */ + public long getInputBytes() { + return inputBytes; + } + + public void incrementInputBytes(long additional) { + inputBytes += additional; + } + + /** + * The total number of fields sent to the job + * including fields that aren't analysed. + * + * @return The total number of fields sent to the job + */ + public long getInputFieldCount() { + return inputFieldCount; + } + + public void incrementInputFieldCount(long additional) { + inputFieldCount += additional; + } + + /** + * The number of records with an invalid date field that could + * not be parsed or converted to epoch time. + * + * @return The number of records with an invalid date field + */ + public long getInvalidDateCount() { + return invalidDateCount; + } + + public void incrementInvalidDateCount(long additional) { + invalidDateCount += additional; + } + + + /** + * The number of missing fields that had been + * configured for analysis. + * + * @return The number of missing fields + */ + public long getMissingFieldCount() { + return missingFieldCount; + } + + public void incrementMissingFieldCount(long additional) { + missingFieldCount += additional; + } + + /** + * The number of records with a timestamp that is + * before the time of the latest record. Records should + * be in ascending chronological order + * + * @return The number of records with a timestamp that is before the time of the latest record + */ + public long getOutOfOrderTimeStampCount() { + return outOfOrderTimeStampCount; + } + + public void incrementOutOfOrderTimeStampCount(long additional) { + outOfOrderTimeStampCount += additional; + } + + /** + * The number of buckets with no records in it. Used to measure general data fitness and/or + * configuration problems (bucket span). + * + * @return Number of empty buckets processed by this job {@code long} + */ + public long getEmptyBucketCount() { + return emptyBucketCount; + } + + public void incrementEmptyBucketCount(long additional) { + emptyBucketCount += additional; + } + + /** + * The number of buckets with few records compared to the overall counts. + * Used to measure general data fitness and/or configuration problems (bucket span). + * + * @return Number of sparse buckets processed by this job {@code long} + */ + public long getSparseBucketCount() { + return sparseBucketCount; + } + + public void incrementSparseBucketCount(long additional) { + sparseBucketCount += additional; + } + + /** + * The number of buckets overall. + * + * @return Number of buckets processed by this job {@code long} + */ + public long getBucketCount() { + return bucketCount; + } + + public void incrementBucketCount(long additional) { + bucketCount += additional; + } + /** + * The time of the first record seen. + * + * @return The first record time + */ + public Date getEarliestRecordTimeStamp() { + return earliestRecordTimeStamp; + } + + /** + * If {@code earliestRecordTimeStamp} has not been set (i.e. is {@code null}) + * then set it to {@code timeStamp} + * + * @param timeStamp Candidate time + * @throws IllegalStateException if {@code earliestRecordTimeStamp} is already set + */ + public void setEarliestRecordTimeStamp(Date timeStamp) { + if (earliestRecordTimeStamp != null) { + throw new IllegalStateException("earliestRecordTimeStamp can only be set once"); + } + earliestRecordTimeStamp = timeStamp; + } + + + /** + * The time of the latest record seen. + * + * @return Latest record time + */ + public Date getLatestRecordTimeStamp() { + return latestRecordTimeStamp; + } + + public void setLatestRecordTimeStamp(Date latestRecordTimeStamp) { + this.latestRecordTimeStamp = latestRecordTimeStamp; + } + + public void updateLatestRecordTimeStamp(Date latestRecordTimeStamp) { + if (latestRecordTimeStamp != null && + (this.latestRecordTimeStamp == null || + latestRecordTimeStamp.after(this.latestRecordTimeStamp))) { + this.latestRecordTimeStamp = latestRecordTimeStamp; + } + } + + /** + * The wall clock time the latest record was seen. + * + * @return Wall clock time of the lastest record + */ + public Date getLastDataTimeStamp() { + return lastDataTimeStamp; + } + + public void setLastDataTimeStamp(Date lastDataTimeStamp) { + this.lastDataTimeStamp = lastDataTimeStamp; + } + + /** + * The time of the latest empty bucket seen. + * + * @return Latest empty bucket time + */ + public Date getLatestEmptyBucketTimeStamp() { + return latestEmptyBucketTimeStamp; + } + + public void setLatestEmptyBucketTimeStamp(Date latestEmptyBucketTimeStamp) { + this.latestEmptyBucketTimeStamp = latestEmptyBucketTimeStamp; + } + + public void updateLatestEmptyBucketTimeStamp(Date latestEmptyBucketTimeStamp) { + if (latestEmptyBucketTimeStamp != null && + (this.latestEmptyBucketTimeStamp == null || + latestEmptyBucketTimeStamp.after(this.latestEmptyBucketTimeStamp))) { + this.latestEmptyBucketTimeStamp = latestEmptyBucketTimeStamp; + } + } + + /** + * The time of the latest sparse bucket seen. + * + * @return Latest sparse bucket time + */ + public Date getLatestSparseBucketTimeStamp() { + return latestSparseBucketTimeStamp; + } + + public void setLatestSparseBucketTimeStamp(Date latestSparseBucketTimeStamp) { + this.latestSparseBucketTimeStamp = latestSparseBucketTimeStamp; + } + + public void updateLatestSparseBucketTimeStamp(Date latestSparseBucketTimeStamp) { + if (latestSparseBucketTimeStamp != null && + (this.latestSparseBucketTimeStamp == null || + latestSparseBucketTimeStamp.after(this.latestSparseBucketTimeStamp))) { + this.latestSparseBucketTimeStamp = latestSparseBucketTimeStamp; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + out.writeVLong(processedRecordCount); + out.writeVLong(processedFieldCount); + out.writeVLong(inputBytes); + out.writeVLong(inputFieldCount); + out.writeVLong(invalidDateCount); + out.writeVLong(missingFieldCount); + out.writeVLong(outOfOrderTimeStampCount); + out.writeVLong(emptyBucketCount); + out.writeVLong(sparseBucketCount); + out.writeVLong(bucketCount); + if (latestRecordTimeStamp != null) { + out.writeBoolean(true); + out.writeVLong(latestRecordTimeStamp.getTime()); + } else { + out.writeBoolean(false); + } + if (earliestRecordTimeStamp != null) { + out.writeBoolean(true); + out.writeVLong(earliestRecordTimeStamp.getTime()); + } else { + out.writeBoolean(false); + } + if (lastDataTimeStamp != null) { + out.writeBoolean(true); + out.writeVLong(lastDataTimeStamp.getTime()); + } else { + out.writeBoolean(false); + } + if (latestEmptyBucketTimeStamp != null) { + out.writeBoolean(true); + out.writeVLong(latestEmptyBucketTimeStamp.getTime()); + } else { + out.writeBoolean(false); + } + if (latestSparseBucketTimeStamp != null) { + out.writeBoolean(true); + out.writeVLong(latestSparseBucketTimeStamp.getTime()); + } else { + out.writeBoolean(false); + } + out.writeVLong(getInputRecordCount()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + doXContentBody(builder, params); + builder.endObject(); + return builder; + } + + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(PROCESSED_RECORD_COUNT.getPreferredName(), processedRecordCount); + builder.field(PROCESSED_FIELD_COUNT.getPreferredName(), processedFieldCount); + builder.field(INPUT_BYTES.getPreferredName(), inputBytes); + builder.field(INPUT_FIELD_COUNT.getPreferredName(), inputFieldCount); + builder.field(INVALID_DATE_COUNT.getPreferredName(), invalidDateCount); + builder.field(MISSING_FIELD_COUNT.getPreferredName(), missingFieldCount); + builder.field(OUT_OF_ORDER_TIME_COUNT.getPreferredName(), outOfOrderTimeStampCount); + builder.field(EMPTY_BUCKET_COUNT.getPreferredName(), emptyBucketCount); + builder.field(SPARSE_BUCKET_COUNT.getPreferredName(), sparseBucketCount); + builder.field(BUCKET_COUNT.getPreferredName(), bucketCount); + if (earliestRecordTimeStamp != null) { + builder.timeField(EARLIEST_RECORD_TIME.getPreferredName(), EARLIEST_RECORD_TIME.getPreferredName() + "_string", + earliestRecordTimeStamp.getTime()); + } + if (latestRecordTimeStamp != null) { + builder.timeField(LATEST_RECORD_TIME.getPreferredName(), LATEST_RECORD_TIME.getPreferredName() + "_string", + latestRecordTimeStamp.getTime()); + } + if (lastDataTimeStamp != null) { + builder.timeField(LAST_DATA_TIME.getPreferredName(), LAST_DATA_TIME.getPreferredName() + "_string", + lastDataTimeStamp.getTime()); + } + if (latestEmptyBucketTimeStamp != null) { + builder.timeField(LATEST_EMPTY_BUCKET_TIME.getPreferredName(), LATEST_EMPTY_BUCKET_TIME.getPreferredName() + "_string", + latestEmptyBucketTimeStamp.getTime()); + } + if (latestSparseBucketTimeStamp != null) { + builder.timeField(LATEST_SPARSE_BUCKET_TIME.getPreferredName(), LATEST_SPARSE_BUCKET_TIME.getPreferredName() + "_string", + latestSparseBucketTimeStamp.getTime()); + } + builder.field(INPUT_RECORD_COUNT.getPreferredName(), getInputRecordCount()); + + return builder; + } + + /** + * Equality test + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof DataCounts == false) { + return false; + } + + DataCounts that = (DataCounts) other; + + return Objects.equals(this.jobId, that.jobId) && + this.processedRecordCount == that.processedRecordCount && + this.processedFieldCount == that.processedFieldCount && + this.inputBytes == that.inputBytes && + this.inputFieldCount == that.inputFieldCount && + this.invalidDateCount == that.invalidDateCount && + this.missingFieldCount == that.missingFieldCount && + this.outOfOrderTimeStampCount == that.outOfOrderTimeStampCount && + this.emptyBucketCount == that.emptyBucketCount && + this.sparseBucketCount == that.sparseBucketCount && + this.bucketCount == that.bucketCount && + Objects.equals(this.latestRecordTimeStamp, that.latestRecordTimeStamp) && + Objects.equals(this.earliestRecordTimeStamp, that.earliestRecordTimeStamp) && + Objects.equals(this.lastDataTimeStamp, that.lastDataTimeStamp) && + Objects.equals(this.latestEmptyBucketTimeStamp, that.latestEmptyBucketTimeStamp) && + Objects.equals(this.latestSparseBucketTimeStamp, that.latestSparseBucketTimeStamp); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, processedRecordCount, processedFieldCount, + inputBytes, inputFieldCount, invalidDateCount, missingFieldCount, + outOfOrderTimeStampCount, lastDataTimeStamp, emptyBucketCount, sparseBucketCount, bucketCount, + latestRecordTimeStamp, earliestRecordTimeStamp, latestEmptyBucketTimeStamp, latestSparseBucketTimeStamp); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStats.java new file mode 100644 index 0000000000000..2da8226093052 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStats.java @@ -0,0 +1,351 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.Result; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.Date; +import java.util.Locale; +import java.util.Objects; + +/** + * Provide access to the C++ model memory usage numbers for the Java process. + */ +public class ModelSizeStats implements ToXContentObject, Writeable { + + /** + * Result type + */ + public static final String RESULT_TYPE_VALUE = "model_size_stats"; + public static final ParseField RESULT_TYPE_FIELD = new ParseField(RESULT_TYPE_VALUE); + + /** + * Field Names + */ + public static final ParseField MODEL_BYTES_FIELD = new ParseField("model_bytes"); + public static final ParseField TOTAL_BY_FIELD_COUNT_FIELD = new ParseField("total_by_field_count"); + public static final ParseField TOTAL_OVER_FIELD_COUNT_FIELD = new ParseField("total_over_field_count"); + public static final ParseField TOTAL_PARTITION_FIELD_COUNT_FIELD = new ParseField("total_partition_field_count"); + public static final ParseField BUCKET_ALLOCATION_FAILURES_COUNT_FIELD = new ParseField("bucket_allocation_failures_count"); + public static final ParseField MEMORY_STATUS_FIELD = new ParseField("memory_status"); + public static final ParseField LOG_TIME_FIELD = new ParseField("log_time"); + public static final ParseField TIMESTAMP_FIELD = new ParseField("timestamp"); + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(RESULT_TYPE_FIELD.getPreferredName(), + ignoreUnknownFields, a -> new Builder((String) a[0])); + + parser.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + parser.declareString((modelSizeStat, s) -> {}, Result.RESULT_TYPE); + parser.declareLong(Builder::setModelBytes, MODEL_BYTES_FIELD); + parser.declareLong(Builder::setBucketAllocationFailuresCount, BUCKET_ALLOCATION_FAILURES_COUNT_FIELD); + parser.declareLong(Builder::setTotalByFieldCount, TOTAL_BY_FIELD_COUNT_FIELD); + parser.declareLong(Builder::setTotalOverFieldCount, TOTAL_OVER_FIELD_COUNT_FIELD); + parser.declareLong(Builder::setTotalPartitionFieldCount, TOTAL_PARTITION_FIELD_COUNT_FIELD); + parser.declareField(Builder::setLogTime, p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + LOG_TIME_FIELD.getPreferredName() + "]"); + }, LOG_TIME_FIELD, ValueType.VALUE); + parser.declareField(Builder::setTimestamp, p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + TIMESTAMP_FIELD.getPreferredName() + "]"); + }, TIMESTAMP_FIELD, ValueType.VALUE); + parser.declareField(Builder::setMemoryStatus, p -> MemoryStatus.fromString(p.text()), MEMORY_STATUS_FIELD, ValueType.STRING); + + return parser; + } + + /** + * The status of the memory monitored by the ResourceMonitor. OK is default, + * SOFT_LIMIT means that the models have done some aggressive pruning to + * keep the memory below the limit, and HARD_LIMIT means that samples have + * been dropped + */ + public enum MemoryStatus implements Writeable { + OK, SOFT_LIMIT, HARD_LIMIT; + + public static MemoryStatus fromString(String statusName) { + return valueOf(statusName.trim().toUpperCase(Locale.ROOT)); + } + + public static MemoryStatus readFromStream(StreamInput in) throws IOException { + return in.readEnum(MemoryStatus.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } + + private final String jobId; + private final long modelBytes; + private final long totalByFieldCount; + private final long totalOverFieldCount; + private final long totalPartitionFieldCount; + private final long bucketAllocationFailuresCount; + private final MemoryStatus memoryStatus; + private final Date timestamp; + private final Date logTime; + + private ModelSizeStats(String jobId, long modelBytes, long totalByFieldCount, long totalOverFieldCount, + long totalPartitionFieldCount, long bucketAllocationFailuresCount, MemoryStatus memoryStatus, + Date timestamp, Date logTime) { + this.jobId = jobId; + this.modelBytes = modelBytes; + this.totalByFieldCount = totalByFieldCount; + this.totalOverFieldCount = totalOverFieldCount; + this.totalPartitionFieldCount = totalPartitionFieldCount; + this.bucketAllocationFailuresCount = bucketAllocationFailuresCount; + this.memoryStatus = memoryStatus; + this.timestamp = timestamp; + this.logTime = logTime; + } + + public ModelSizeStats(StreamInput in) throws IOException { + jobId = in.readString(); + modelBytes = in.readVLong(); + totalByFieldCount = in.readVLong(); + totalOverFieldCount = in.readVLong(); + totalPartitionFieldCount = in.readVLong(); + bucketAllocationFailuresCount = in.readVLong(); + memoryStatus = MemoryStatus.readFromStream(in); + logTime = new Date(in.readVLong()); + timestamp = in.readBoolean() ? new Date(in.readVLong()) : null; + } + + public String getId() { + return documentIdPrefix(jobId) + logTime.getTime(); + } + + public static String documentIdPrefix(String jobId) { + return jobId + "_model_size_stats_"; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + out.writeVLong(modelBytes); + out.writeVLong(totalByFieldCount); + out.writeVLong(totalOverFieldCount); + out.writeVLong(totalPartitionFieldCount); + out.writeVLong(bucketAllocationFailuresCount); + memoryStatus.writeTo(out); + out.writeVLong(logTime.getTime()); + boolean hasTimestamp = timestamp != null; + out.writeBoolean(hasTimestamp); + if (hasTimestamp) { + out.writeVLong(timestamp.getTime()); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + doXContentBody(builder); + builder.endObject(); + return builder; + } + + public XContentBuilder doXContentBody(XContentBuilder builder) throws IOException { + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); + builder.field(MODEL_BYTES_FIELD.getPreferredName(), modelBytes); + builder.field(TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName(), totalByFieldCount); + builder.field(TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName(), totalOverFieldCount); + builder.field(TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName(), totalPartitionFieldCount); + builder.field(BUCKET_ALLOCATION_FAILURES_COUNT_FIELD.getPreferredName(), bucketAllocationFailuresCount); + builder.field(MEMORY_STATUS_FIELD.getPreferredName(), memoryStatus); + builder.timeField(LOG_TIME_FIELD.getPreferredName(), LOG_TIME_FIELD.getPreferredName() + "_string", logTime.getTime()); + if (timestamp != null) { + builder.timeField(TIMESTAMP_FIELD.getPreferredName(), TIMESTAMP_FIELD.getPreferredName() + "_string", timestamp.getTime()); + } + + return builder; + } + + public String getJobId() { + return jobId; + } + + public long getModelBytes() { + return modelBytes; + } + + public long getTotalByFieldCount() { + return totalByFieldCount; + } + + public long getTotalPartitionFieldCount() { + return totalPartitionFieldCount; + } + + public long getTotalOverFieldCount() { + return totalOverFieldCount; + } + + public long getBucketAllocationFailuresCount() { + return bucketAllocationFailuresCount; + } + + public MemoryStatus getMemoryStatus() { + return memoryStatus; + } + + /** + * The timestamp of the last processed record when this instance was created. + * @return The record time + */ + public Date getTimestamp() { + return timestamp; + } + + /** + * The wall clock time at the point when this instance was created. + * @return The wall clock time + */ + public Date getLogTime() { + return logTime; + } + + @Override + public int hashCode() { + // this.id excluded here as it is generated by the datastore + return Objects.hash(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, + this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + } + + /** + * Compare all the fields. + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof ModelSizeStats == false) { + return false; + } + + ModelSizeStats that = (ModelSizeStats) other; + + return this.modelBytes == that.modelBytes && this.totalByFieldCount == that.totalByFieldCount + && this.totalOverFieldCount == that.totalOverFieldCount && this.totalPartitionFieldCount == that.totalPartitionFieldCount + && this.bucketAllocationFailuresCount == that.bucketAllocationFailuresCount + && Objects.equals(this.memoryStatus, that.memoryStatus) && Objects.equals(this.timestamp, that.timestamp) + && Objects.equals(this.logTime, that.logTime) + && Objects.equals(this.jobId, that.jobId); + } + + public static class Builder { + + private final String jobId; + private long modelBytes; + private long totalByFieldCount; + private long totalOverFieldCount; + private long totalPartitionFieldCount; + private long bucketAllocationFailuresCount; + private MemoryStatus memoryStatus; + private Date timestamp; + private Date logTime; + + public Builder(String jobId) { + this.jobId = jobId; + memoryStatus = MemoryStatus.OK; + logTime = new Date(); + } + + public Builder(ModelSizeStats modelSizeStats) { + this.jobId = modelSizeStats.jobId; + this.modelBytes = modelSizeStats.modelBytes; + this.totalByFieldCount = modelSizeStats.totalByFieldCount; + this.totalOverFieldCount = modelSizeStats.totalOverFieldCount; + this.totalPartitionFieldCount = modelSizeStats.totalPartitionFieldCount; + this.bucketAllocationFailuresCount = modelSizeStats.bucketAllocationFailuresCount; + this.memoryStatus = modelSizeStats.memoryStatus; + this.timestamp = modelSizeStats.timestamp; + this.logTime = modelSizeStats.logTime; + } + + public Builder setModelBytes(long modelBytes) { + this.modelBytes = modelBytes; + return this; + } + + public Builder setTotalByFieldCount(long totalByFieldCount) { + this.totalByFieldCount = totalByFieldCount; + return this; + } + + public Builder setTotalPartitionFieldCount(long totalPartitionFieldCount) { + this.totalPartitionFieldCount = totalPartitionFieldCount; + return this; + } + + public Builder setTotalOverFieldCount(long totalOverFieldCount) { + this.totalOverFieldCount = totalOverFieldCount; + return this; + } + + public Builder setBucketAllocationFailuresCount(long bucketAllocationFailuresCount) { + this.bucketAllocationFailuresCount = bucketAllocationFailuresCount; + return this; + } + + public Builder setMemoryStatus(MemoryStatus memoryStatus) { + Objects.requireNonNull(memoryStatus, "[" + MEMORY_STATUS_FIELD.getPreferredName() + "] must not be null"); + this.memoryStatus = memoryStatus; + return this; + } + + public Builder setTimestamp(Date timestamp) { + this.timestamp = timestamp; + return this; + } + + public Builder setLogTime(Date logTime) { + this.logTime = logTime; + return this; + } + + public ModelSizeStats build() { + return new ModelSizeStats(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount, + bucketAllocationFailuresCount, memoryStatus, timestamp, logTime); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java new file mode 100644 index 0000000000000..1588298918e22 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshot.java @@ -0,0 +1,467 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Objects; + +/** + * ModelSnapshot Result POJO + */ +public class ModelSnapshot implements ToXContentObject, Writeable { + /** + * Field Names + */ + public static final ParseField TIMESTAMP = new ParseField("timestamp"); + public static final ParseField DESCRIPTION = new ParseField("description"); + public static final ParseField SNAPSHOT_DOC_COUNT = new ParseField("snapshot_doc_count"); + public static final ParseField LATEST_RECORD_TIME = new ParseField("latest_record_time_stamp"); + public static final ParseField LATEST_RESULT_TIME = new ParseField("latest_result_time_stamp"); + public static final ParseField QUANTILES = new ParseField("quantiles"); + public static final ParseField RETAIN = new ParseField("retain"); + public static final ParseField MIN_VERSION = new ParseField("min_version"); + + // Used for QueryPage + public static final ParseField RESULTS_FIELD = new ParseField("model_snapshots"); + + /** + * Legacy type, now used only as a discriminant in the document ID + */ + public static final ParseField TYPE = new ParseField("model_snapshot"); + + public static final ObjectParser STRICT_PARSER = createParser(false); + public static final ObjectParser LENIENT_PARSER = createParser(true); + + private static ObjectParser createParser(boolean ignoreUnknownFields) { + ObjectParser parser = new ObjectParser<>(TYPE.getPreferredName(), ignoreUnknownFields, Builder::new); + + parser.declareString(Builder::setJobId, Job.ID); + parser.declareString(Builder::setMinVersion, MIN_VERSION); + parser.declareField(Builder::setTimestamp, p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + + TIMESTAMP.getPreferredName() + "]"); + }, TIMESTAMP, ValueType.VALUE); + parser.declareString(Builder::setDescription, DESCRIPTION); + parser.declareString(Builder::setSnapshotId, ModelSnapshotField.SNAPSHOT_ID); + parser.declareInt(Builder::setSnapshotDocCount, SNAPSHOT_DOC_COUNT); + parser.declareObject(Builder::setModelSizeStats, ignoreUnknownFields ? ModelSizeStats.LENIENT_PARSER : ModelSizeStats.STRICT_PARSER, + ModelSizeStats.RESULT_TYPE_FIELD); + parser.declareField(Builder::setLatestRecordTimeStamp, p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + LATEST_RECORD_TIME.getPreferredName() + "]"); + }, LATEST_RECORD_TIME, ValueType.VALUE); + parser.declareField(Builder::setLatestResultTimeStamp, p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + p.currentToken() + "] for [" + LATEST_RESULT_TIME.getPreferredName() + "]"); + }, LATEST_RESULT_TIME, ValueType.VALUE); + parser.declareObject(Builder::setQuantiles, ignoreUnknownFields ? Quantiles.LENIENT_PARSER : Quantiles.STRICT_PARSER, QUANTILES); + parser.declareBoolean(Builder::setRetain, RETAIN); + + return parser; + } + + + private final String jobId; + + /** + * The minimum version a node should have to be able + * to read this model snapshot. + */ + private final Version minVersion; + + private final Date timestamp; + private final String description; + private final String snapshotId; + private final int snapshotDocCount; + private final ModelSizeStats modelSizeStats; + private final Date latestRecordTimeStamp; + private final Date latestResultTimeStamp; + private final Quantiles quantiles; + private final boolean retain; + + + private ModelSnapshot(String jobId, Version minVersion, Date timestamp, String description, String snapshotId, int snapshotDocCount, + ModelSizeStats modelSizeStats, Date latestRecordTimeStamp, Date latestResultTimeStamp, Quantiles quantiles, + boolean retain) { + this.jobId = jobId; + this.minVersion = minVersion; + this.timestamp = timestamp; + this.description = description; + this.snapshotId = snapshotId; + this.snapshotDocCount = snapshotDocCount; + this.modelSizeStats = modelSizeStats; + this.latestRecordTimeStamp = latestRecordTimeStamp; + this.latestResultTimeStamp = latestResultTimeStamp; + this.quantiles = quantiles; + this.retain = retain; + } + + public ModelSnapshot(StreamInput in) throws IOException { + jobId = in.readString(); + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + minVersion = Version.readVersion(in); + } else { + minVersion = Version.V_5_5_0; + } + timestamp = in.readBoolean() ? new Date(in.readVLong()) : null; + description = in.readOptionalString(); + snapshotId = in.readOptionalString(); + snapshotDocCount = in.readInt(); + modelSizeStats = in.readOptionalWriteable(ModelSizeStats::new); + latestRecordTimeStamp = in.readBoolean() ? new Date(in.readVLong()) : null; + latestResultTimeStamp = in.readBoolean() ? new Date(in.readVLong()) : null; + quantiles = in.readOptionalWriteable(Quantiles::new); + retain = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + Version.writeVersion(minVersion, out); + } + if (timestamp != null) { + out.writeBoolean(true); + out.writeVLong(timestamp.getTime()); + } else { + out.writeBoolean(false); + } + out.writeOptionalString(description); + out.writeOptionalString(snapshotId); + out.writeInt(snapshotDocCount); + out.writeOptionalWriteable(modelSizeStats); + if (latestRecordTimeStamp != null) { + out.writeBoolean(true); + out.writeVLong(latestRecordTimeStamp.getTime()); + } else { + out.writeBoolean(false); + } + if (latestResultTimeStamp != null) { + out.writeBoolean(true); + out.writeVLong(latestResultTimeStamp.getTime()); + } else { + out.writeBoolean(false); + } + out.writeOptionalWriteable(quantiles); + out.writeBoolean(retain); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(MIN_VERSION.getPreferredName(), minVersion); + if (timestamp != null) { + builder.timeField(TIMESTAMP.getPreferredName(), TIMESTAMP.getPreferredName() + "_string", timestamp.getTime()); + } + if (description != null) { + builder.field(DESCRIPTION.getPreferredName(), description); + } + if (snapshotId != null) { + builder.field(ModelSnapshotField.SNAPSHOT_ID.getPreferredName(), snapshotId); + } + builder.field(SNAPSHOT_DOC_COUNT.getPreferredName(), snapshotDocCount); + if (modelSizeStats != null) { + builder.field(ModelSizeStats.RESULT_TYPE_FIELD.getPreferredName(), modelSizeStats); + } + if (latestRecordTimeStamp != null) { + builder.timeField(LATEST_RECORD_TIME.getPreferredName(), LATEST_RECORD_TIME.getPreferredName() + "_string", + latestRecordTimeStamp.getTime()); + } + if (latestResultTimeStamp != null) { + builder.timeField(LATEST_RESULT_TIME.getPreferredName(), LATEST_RESULT_TIME.getPreferredName() + "_string", + latestResultTimeStamp.getTime()); + } + if (quantiles != null) { + builder.field(QUANTILES.getPreferredName(), quantiles); + } + builder.field(RETAIN.getPreferredName(), retain); + builder.endObject(); + return builder; + } + + public String getJobId() { + return jobId; + } + + public Version getMinVersion() { + return minVersion; + } + + public Date getTimestamp() { + return timestamp; + } + + public String getDescription() { + return description; + } + + public String getSnapshotId() { + return snapshotId; + } + + public int getSnapshotDocCount() { + return snapshotDocCount; + } + + public ModelSizeStats getModelSizeStats() { + return modelSizeStats; + } + + public Quantiles getQuantiles() { + return quantiles; + } + + public Date getLatestRecordTimeStamp() { + return latestRecordTimeStamp; + } + + public Date getLatestResultTimeStamp() { + return latestResultTimeStamp; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, minVersion, timestamp, description, snapshotId, quantiles, snapshotDocCount, modelSizeStats, + latestRecordTimeStamp, latestResultTimeStamp, retain); + } + + /** + * Compare all the fields. + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof ModelSnapshot == false) { + return false; + } + + ModelSnapshot that = (ModelSnapshot) other; + + return Objects.equals(this.jobId, that.jobId) + && Objects.equals(this.minVersion, that.minVersion) + && Objects.equals(this.timestamp, that.timestamp) + && Objects.equals(this.description, that.description) + && Objects.equals(this.snapshotId, that.snapshotId) + && this.snapshotDocCount == that.snapshotDocCount + && Objects.equals(this.modelSizeStats, that.modelSizeStats) + && Objects.equals(this.quantiles, that.quantiles) + && Objects.equals(this.latestRecordTimeStamp, that.latestRecordTimeStamp) + && Objects.equals(this.latestResultTimeStamp, that.latestResultTimeStamp) + && this.retain == that.retain; + } + + public List stateDocumentIds() { + List stateDocumentIds = new ArrayList<>(snapshotDocCount); + // The state documents count suffices are 1-based + for (int i = 1; i <= snapshotDocCount; i++) { + stateDocumentIds.add(ModelState.documentId(jobId, snapshotId, i)); + } + return stateDocumentIds; + } + + /** + * This is how the IDs were formed in v5.4 + */ + public List legacyStateDocumentIds() { + List stateDocumentIds = new ArrayList<>(snapshotDocCount); + // The state documents count suffices are 1-based + for (int i = 1; i <= snapshotDocCount; i++) { + stateDocumentIds.add(ModelState.v54DocumentId(jobId, snapshotId, i)); + } + return stateDocumentIds; + } + + public static String documentIdPrefix(String jobId) { + return jobId + "_" + TYPE + "_"; + } + + public static String documentId(ModelSnapshot snapshot) { + return documentId(snapshot.getJobId(), snapshot.getSnapshotId()); + } + + /** + * This is how the IDs were formed in v5.4 + */ + public static String v54DocumentId(ModelSnapshot snapshot) { + return v54DocumentId(snapshot.getJobId(), snapshot.getSnapshotId()); + } + + public static String documentId(String jobId, String snapshotId) { + return documentIdPrefix(jobId) + snapshotId; + } + + /** + * This is how the IDs were formed in v5.4 + */ + public static String v54DocumentId(String jobId, String snapshotId) { + return jobId + "-" + snapshotId; + } + + public static ModelSnapshot fromJson(BytesReference bytesReference) { + try (InputStream stream = bytesReference.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(bytesReference)) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + return LENIENT_PARSER.apply(parser, null).build(); + } catch (IOException e) { + throw new ElasticsearchParseException("failed to parse modelSnapshot", e); + } + } + + public static class Builder { + private String jobId; + + // Stored snapshot documents created prior to 6.3.0 will have no + // value for min_version. We default it to 5.5.0 as there were + // no model changes between 5.5.0 and 6.3.0. + private Version minVersion = Version.V_5_5_0; + + private Date timestamp; + private String description; + private String snapshotId; + private int snapshotDocCount; + private ModelSizeStats modelSizeStats; + private Date latestRecordTimeStamp; + private Date latestResultTimeStamp; + private Quantiles quantiles; + private boolean retain; + + + public Builder() { + } + + public Builder(String jobId) { + this(); + this.jobId = jobId; + } + + public Builder(ModelSnapshot modelSnapshot) { + this.jobId = modelSnapshot.jobId; + this.timestamp = modelSnapshot.timestamp; + this.description = modelSnapshot.description; + this.snapshotId = modelSnapshot.snapshotId; + this.snapshotDocCount = modelSnapshot.snapshotDocCount; + this.modelSizeStats = modelSnapshot.modelSizeStats; + this.latestRecordTimeStamp = modelSnapshot.latestRecordTimeStamp; + this.latestResultTimeStamp = modelSnapshot.latestResultTimeStamp; + this.quantiles = modelSnapshot.quantiles; + this.retain = modelSnapshot.retain; + this.minVersion = modelSnapshot.minVersion; + } + + public Builder setJobId(String jobId) { + this.jobId = jobId; + return this; + } + + public Builder setMinVersion(Version minVersion) { + this.minVersion = minVersion; + return this; + } + + public Builder setMinVersion(String minVersion) { + this.minVersion = Version.fromString(minVersion); + return this; + } + + public Builder setTimestamp(Date timestamp) { + this.timestamp = timestamp; + return this; + } + + public Builder setDescription(String description) { + this.description = description; + return this; + } + + public Builder setSnapshotId(String snapshotId) { + this.snapshotId = snapshotId; + return this; + } + + public Builder setSnapshotDocCount(int snapshotDocCount) { + this.snapshotDocCount = snapshotDocCount; + return this; + } + + public Builder setModelSizeStats(ModelSizeStats.Builder modelSizeStats) { + this.modelSizeStats = modelSizeStats.build(); + return this; + } + + public Builder setModelSizeStats(ModelSizeStats modelSizeStats) { + this.modelSizeStats = modelSizeStats; + return this; + } + + public Builder setLatestRecordTimeStamp(Date latestRecordTimeStamp) { + this.latestRecordTimeStamp = latestRecordTimeStamp; + return this; + } + + public Builder setLatestResultTimeStamp(Date latestResultTimeStamp) { + this.latestResultTimeStamp = latestResultTimeStamp; + return this; + } + + public Builder setQuantiles(Quantiles quantiles) { + this.quantiles = quantiles; + return this; + } + + public Builder setRetain(boolean value) { + this.retain = value; + return this; + } + + public ModelSnapshot build() { + return new ModelSnapshot(jobId, minVersion, timestamp, description, snapshotId, snapshotDocCount, modelSizeStats, + latestRecordTimeStamp, latestResultTimeStamp, quantiles, retain); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshotField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshotField.java new file mode 100644 index 0000000000000..c7e005ec66d5f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshotField.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.common.ParseField; + +public final class ModelSnapshotField { + + public static final ParseField SNAPSHOT_ID = new ParseField("snapshot_id"); + + private ModelSnapshotField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java new file mode 100644 index 0000000000000..dce791a2b3d26 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + + +/** + * The model state does not need to be understood on the Java side. + * The Java code only needs to know how to form the document IDs so that + * it can retrieve and delete the correct documents. + */ +public class ModelState { + + /** + * Legacy type, now used only as a discriminant in the document ID + */ + public static final String TYPE = "model_state"; + + public static final String documentId(String jobId, String snapshotId, int docNum) { + return jobId + "_" + TYPE + "_" + snapshotId + "#" + docNum; + } + + /** + * This is how the IDs were formed in v5.4 + */ + public static final String v54DocumentId(String jobId, String snapshotId, int docNum) { + return jobId + "-" + snapshotId + "#" + docNum; + } + + private ModelState() { + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/Quantiles.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/Quantiles.java new file mode 100644 index 0000000000000..0c167aadb7623 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/Quantiles.java @@ -0,0 +1,138 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +/** + * Quantiles Result POJO + */ +public class Quantiles implements ToXContentObject, Writeable { + + /** + * Field Names + */ + public static final ParseField TIMESTAMP = new ParseField("timestamp"); + public static final ParseField QUANTILE_STATE = new ParseField("quantile_state"); + + /** + * Legacy type, now used only as a discriminant in the document ID + */ + public static final ParseField TYPE = new ParseField("quantiles"); + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(TYPE.getPreferredName(), ignoreUnknownFields, + a -> new Quantiles((String) a[0], (Date) a[1], (String) a[2])); + + parser.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + parser.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> new Date(p.longValue()), TIMESTAMP, ValueType.LONG); + parser.declareString(ConstructingObjectParser.constructorArg(), QUANTILE_STATE); + + return parser; + } + + public static String documentId(String jobId) { + return jobId + "_" + TYPE; + } + + /** + * This is how the IDs were formed in v5.4 + */ + public static String v54DocumentId(String jobId) { + return jobId + "-" + TYPE; + } + + private final String jobId; + private final Date timestamp; + private final String quantileState; + + public Quantiles(String jobId, Date timestamp, String quantileState) { + this.jobId = jobId; + this.timestamp = Objects.requireNonNull(timestamp); + this.quantileState = Objects.requireNonNull(quantileState); + } + + public Quantiles(StreamInput in) throws IOException { + jobId = in.readString(); + timestamp = new Date(in.readVLong()); + quantileState = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + out.writeVLong(timestamp.getTime()); + out.writeOptionalString(quantileState); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (timestamp != null) { + builder.field(TIMESTAMP.getPreferredName(), timestamp.getTime()); + } + if (quantileState != null) { + builder.field(QUANTILE_STATE.getPreferredName(), quantileState); + } + builder.endObject(); + return builder; + } + + public String getJobId() { + return jobId; + } + + public Date getTimestamp() { + return timestamp; + } + + public String getQuantileState() { + return quantileState; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, timestamp, quantileState); + } + + /** + * Compare all the fields. + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof Quantiles == false) { + return false; + } + + Quantiles that = (Quantiles) other; + + return Objects.equals(this.jobId, that.jobId) && Objects.equals(this.timestamp, that.timestamp) + && Objects.equals(this.quantileState, that.quantileState); + + + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/writer/RecordWriter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/writer/RecordWriter.java new file mode 100644 index 0000000000000..61b904246d50f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/writer/RecordWriter.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.writer; + +import java.io.IOException; +import java.util.List; + +/** + * Interface for classes that write arrays of strings to the + * Ml analytics processes. + */ +public interface RecordWriter { + /** + * Value must match api::CAnomalyDetector::CONTROL_FIELD_NAME in the C++ + * code. + */ + String CONTROL_FIELD_NAME = "."; + + /** + * Value must match api::CBaseTokenListDataTyper::PRETOKENISED_TOKEN_FIELD in the C++ + * code. + */ + String PRETOKENISED_TOKEN_FIELD = "..."; + + /** + * Write each String in the record array + */ + void writeRecord(String[] record) throws IOException; + + /** + * Write each String in the record list + */ + void writeRecord(List record) throws IOException; + + /** + * Flush the outputIndex stream. + */ + void flush() throws IOException; + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyCause.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyCause.java new file mode 100644 index 0000000000000..50efe24ab0f6f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyCause.java @@ -0,0 +1,357 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * Anomaly Cause POJO. + * Used as a nested level inside population anomaly records. + */ +public class AnomalyCause implements ToXContentObject, Writeable { + public static final ParseField ANOMALY_CAUSE = new ParseField("anomaly_cause"); + /** + * Result fields + */ + public static final ParseField PROBABILITY = new ParseField("probability"); + public static final ParseField OVER_FIELD_NAME = new ParseField("over_field_name"); + public static final ParseField OVER_FIELD_VALUE = new ParseField("over_field_value"); + public static final ParseField BY_FIELD_NAME = new ParseField("by_field_name"); + public static final ParseField BY_FIELD_VALUE = new ParseField("by_field_value"); + public static final ParseField CORRELATED_BY_FIELD_VALUE = new ParseField("correlated_by_field_value"); + public static final ParseField PARTITION_FIELD_NAME = new ParseField("partition_field_name"); + public static final ParseField PARTITION_FIELD_VALUE = new ParseField("partition_field_value"); + public static final ParseField FUNCTION = new ParseField("function"); + public static final ParseField FUNCTION_DESCRIPTION = new ParseField("function_description"); + public static final ParseField TYPICAL = new ParseField("typical"); + public static final ParseField ACTUAL = new ParseField("actual"); + public static final ParseField INFLUENCERS = new ParseField("influencers"); + + /** + * Metric Results + */ + public static final ParseField FIELD_NAME = new ParseField("field_name"); + + public static final ObjectParser STRICT_PARSER = createParser(false); + public static final ObjectParser LENIENT_PARSER = createParser(true); + + private static ObjectParser createParser(boolean ignoreUnknownFields) { + ObjectParser parser = new ObjectParser<>(ANOMALY_CAUSE.getPreferredName(), ignoreUnknownFields, + AnomalyCause::new); + + parser.declareDouble(AnomalyCause::setProbability, PROBABILITY); + parser.declareString(AnomalyCause::setByFieldName, BY_FIELD_NAME); + parser.declareString(AnomalyCause::setByFieldValue, BY_FIELD_VALUE); + parser.declareString(AnomalyCause::setCorrelatedByFieldValue, CORRELATED_BY_FIELD_VALUE); + parser.declareString(AnomalyCause::setPartitionFieldName, PARTITION_FIELD_NAME); + parser.declareString(AnomalyCause::setPartitionFieldValue, PARTITION_FIELD_VALUE); + parser.declareString(AnomalyCause::setFunction, FUNCTION); + parser.declareString(AnomalyCause::setFunctionDescription, FUNCTION_DESCRIPTION); + parser.declareDoubleArray(AnomalyCause::setTypical, TYPICAL); + parser.declareDoubleArray(AnomalyCause::setActual, ACTUAL); + parser.declareString(AnomalyCause::setFieldName, FIELD_NAME); + parser.declareString(AnomalyCause::setOverFieldName, OVER_FIELD_NAME); + parser.declareString(AnomalyCause::setOverFieldValue, OVER_FIELD_VALUE); + parser.declareObjectArray(AnomalyCause::setInfluencers, ignoreUnknownFields ? Influence.LENIENT_PARSER : Influence.STRICT_PARSER, + INFLUENCERS); + + return parser; + } + + private double probability; + private String byFieldName; + private String byFieldValue; + private String correlatedByFieldValue; + private String partitionFieldName; + private String partitionFieldValue; + private String function; + private String functionDescription; + private List typical; + private List actual; + + private String fieldName; + + private String overFieldName; + private String overFieldValue; + + private List influencers; + + public AnomalyCause() { + } + + @SuppressWarnings("unchecked") + public AnomalyCause(StreamInput in) throws IOException { + probability = in.readDouble(); + byFieldName = in.readOptionalString(); + byFieldValue = in.readOptionalString(); + correlatedByFieldValue = in.readOptionalString(); + partitionFieldName = in.readOptionalString(); + partitionFieldValue = in.readOptionalString(); + function = in.readOptionalString(); + functionDescription = in.readOptionalString(); + if (in.readBoolean()) { + typical = (List) in.readGenericValue(); + } + if (in.readBoolean()) { + actual = (List) in.readGenericValue(); + } + fieldName = in.readOptionalString(); + overFieldName = in.readOptionalString(); + overFieldValue = in.readOptionalString(); + if (in.readBoolean()) { + influencers = in.readList(Influence::new); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(probability); + out.writeOptionalString(byFieldName); + out.writeOptionalString(byFieldValue); + out.writeOptionalString(correlatedByFieldValue); + out.writeOptionalString(partitionFieldName); + out.writeOptionalString(partitionFieldValue); + out.writeOptionalString(function); + out.writeOptionalString(functionDescription); + boolean hasTypical = typical != null; + out.writeBoolean(hasTypical); + if (hasTypical) { + out.writeGenericValue(typical); + } + boolean hasActual = actual != null; + out.writeBoolean(hasActual); + if (hasActual) { + out.writeGenericValue(actual); + } + out.writeOptionalString(fieldName); + out.writeOptionalString(overFieldName); + out.writeOptionalString(overFieldValue); + boolean hasInfluencers = influencers != null; + out.writeBoolean(hasInfluencers); + if (hasInfluencers) { + out.writeList(influencers); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(PROBABILITY.getPreferredName(), probability); + if (byFieldName != null) { + builder.field(BY_FIELD_NAME.getPreferredName(), byFieldName); + } + if (byFieldValue != null) { + builder.field(BY_FIELD_VALUE.getPreferredName(), byFieldValue); + } + if (correlatedByFieldValue != null) { + builder.field(CORRELATED_BY_FIELD_VALUE.getPreferredName(), correlatedByFieldValue); + } + if (partitionFieldName != null) { + builder.field(PARTITION_FIELD_NAME.getPreferredName(), partitionFieldName); + } + if (partitionFieldValue != null) { + builder.field(PARTITION_FIELD_VALUE.getPreferredName(), partitionFieldValue); + } + if (function != null) { + builder.field(FUNCTION.getPreferredName(), function); + } + if (functionDescription != null) { + builder.field(FUNCTION_DESCRIPTION.getPreferredName(), functionDescription); + } + if (typical != null) { + builder.field(TYPICAL.getPreferredName(), typical); + } + if (actual != null) { + builder.field(ACTUAL.getPreferredName(), actual); + } + if (fieldName != null) { + builder.field(FIELD_NAME.getPreferredName(), fieldName); + } + if (overFieldName != null) { + builder.field(OVER_FIELD_NAME.getPreferredName(), overFieldName); + } + if (overFieldValue != null) { + builder.field(OVER_FIELD_VALUE.getPreferredName(), overFieldValue); + } + if (influencers != null) { + builder.field(INFLUENCERS.getPreferredName(), influencers); + } + builder.endObject(); + return builder; + } + + + public double getProbability() { + return probability; + } + + public void setProbability(double value) { + probability = value; + } + + + public String getByFieldName() { + return byFieldName; + } + + public void setByFieldName(String value) { + byFieldName = value.intern(); + } + + public String getByFieldValue() { + return byFieldValue; + } + + public void setByFieldValue(String value) { + byFieldValue = value.intern(); + } + + public String getCorrelatedByFieldValue() { + return correlatedByFieldValue; + } + + public void setCorrelatedByFieldValue(String value) { + correlatedByFieldValue = value.intern(); + } + + public String getPartitionFieldName() { + return partitionFieldName; + } + + public void setPartitionFieldName(String field) { + partitionFieldName = field.intern(); + } + + public String getPartitionFieldValue() { + return partitionFieldValue; + } + + public void setPartitionFieldValue(String value) { + partitionFieldValue = value.intern(); + } + + public String getFunction() { + return function; + } + + public void setFunction(String name) { + function = name.intern(); + } + + public String getFunctionDescription() { + return functionDescription; + } + + public void setFunctionDescription(String functionDescription) { + this.functionDescription = functionDescription.intern(); + } + + public List getTypical() { + return typical; + } + + public void setTypical(List typical) { + this.typical = typical; + } + + public List getActual() { + return actual; + } + + public void setActual(List actual) { + this.actual = actual; + } + + public String getFieldName() { + return fieldName; + } + + public void setFieldName(String field) { + fieldName = field.intern(); + } + + public String getOverFieldName() { + return overFieldName; + } + + public void setOverFieldName(String name) { + overFieldName = name.intern(); + } + + public String getOverFieldValue() { + return overFieldValue; + } + + public void setOverFieldValue(String value) { + overFieldValue = value.intern(); + } + + public List getInfluencers() { + return influencers; + } + + public void setInfluencers(List influencers) { + this.influencers = influencers; + } + + @Override + public int hashCode() { + return Objects.hash(probability, + actual, + typical, + byFieldName, + byFieldValue, + correlatedByFieldValue, + fieldName, + function, + functionDescription, + overFieldName, + overFieldValue, + partitionFieldName, + partitionFieldValue, + influencers); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof AnomalyCause == false) { + return false; + } + + AnomalyCause that = (AnomalyCause)other; + + return this.probability == that.probability && + Objects.deepEquals(this.typical, that.typical) && + Objects.deepEquals(this.actual, that.actual) && + Objects.equals(this.function, that.function) && + Objects.equals(this.functionDescription, that.functionDescription) && + Objects.equals(this.fieldName, that.fieldName) && + Objects.equals(this.byFieldName, that.byFieldName) && + Objects.equals(this.byFieldValue, that.byFieldValue) && + Objects.equals(this.correlatedByFieldValue, that.correlatedByFieldValue) && + Objects.equals(this.partitionFieldName, that.partitionFieldName) && + Objects.equals(this.partitionFieldValue, that.partitionFieldValue) && + Objects.equals(this.overFieldName, that.overFieldName) && + Objects.equals(this.overFieldValue, that.overFieldValue) && + Objects.equals(this.influencers, that.influencers); + } + + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java new file mode 100644 index 0000000000000..360bcfaaeadfd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java @@ -0,0 +1,574 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Anomaly Record POJO. + * Uses the object wrappers Boolean and Double so null values + * can be returned if the members have not been set. + */ +public class AnomalyRecord implements ToXContentObject, Writeable { + + /** + * Result type + */ + public static final String RESULT_TYPE_VALUE = "record"; + /** + * Result fields (all detector types) + */ + public static final ParseField PROBABILITY = new ParseField("probability"); + public static final ParseField BY_FIELD_NAME = new ParseField("by_field_name"); + public static final ParseField BY_FIELD_VALUE = new ParseField("by_field_value"); + public static final ParseField CORRELATED_BY_FIELD_VALUE = new ParseField("correlated_by_field_value"); + public static final ParseField PARTITION_FIELD_NAME = new ParseField("partition_field_name"); + public static final ParseField PARTITION_FIELD_VALUE = new ParseField("partition_field_value"); + public static final ParseField FUNCTION = new ParseField("function"); + public static final ParseField FUNCTION_DESCRIPTION = new ParseField("function_description"); + public static final ParseField TYPICAL = new ParseField("typical"); + public static final ParseField ACTUAL = new ParseField("actual"); + public static final ParseField INFLUENCERS = new ParseField("influencers"); + public static final ParseField BUCKET_SPAN = new ParseField("bucket_span"); + + // Used for QueryPage + public static final ParseField RESULTS_FIELD = new ParseField("records"); + + /** + * Metric Results (including population metrics) + */ + public static final ParseField FIELD_NAME = new ParseField("field_name"); + + /** + * Population results + */ + public static final ParseField OVER_FIELD_NAME = new ParseField("over_field_name"); + public static final ParseField OVER_FIELD_VALUE = new ParseField("over_field_value"); + public static final ParseField CAUSES = new ParseField("causes"); + + /** + * Normalization + */ + public static final ParseField RECORD_SCORE = new ParseField("record_score"); + public static final ParseField INITIAL_RECORD_SCORE = new ParseField("initial_record_score"); + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + // As a record contains fields named after the data fields, the parser for the record should always ignore unknown fields. + // However, it makes sense to offer strict/lenient parsing for other members, e.g. influences, anomaly causes, etc. + ConstructingObjectParser parser = new ConstructingObjectParser<>(RESULT_TYPE_VALUE, true, + a -> new AnomalyRecord((String) a[0], (Date) a[1], (long) a[2])); + + parser.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + parser.declareField(ConstructingObjectParser.constructorArg(), p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + + Result.TIMESTAMP.getPreferredName() + "]"); + }, Result.TIMESTAMP, ValueType.VALUE); + parser.declareLong(ConstructingObjectParser.constructorArg(), BUCKET_SPAN); + parser.declareString((anomalyRecord, s) -> {}, Result.RESULT_TYPE); + parser.declareDouble(AnomalyRecord::setProbability, PROBABILITY); + parser.declareDouble(AnomalyRecord::setRecordScore, RECORD_SCORE); + parser.declareDouble(AnomalyRecord::setInitialRecordScore, INITIAL_RECORD_SCORE); + parser.declareInt(AnomalyRecord::setDetectorIndex, Detector.DETECTOR_INDEX); + parser.declareBoolean(AnomalyRecord::setInterim, Result.IS_INTERIM); + parser.declareString(AnomalyRecord::setByFieldName, BY_FIELD_NAME); + parser.declareString(AnomalyRecord::setByFieldValue, BY_FIELD_VALUE); + parser.declareString(AnomalyRecord::setCorrelatedByFieldValue, CORRELATED_BY_FIELD_VALUE); + parser.declareString(AnomalyRecord::setPartitionFieldName, PARTITION_FIELD_NAME); + parser.declareString(AnomalyRecord::setPartitionFieldValue, PARTITION_FIELD_VALUE); + parser.declareString(AnomalyRecord::setFunction, FUNCTION); + parser.declareString(AnomalyRecord::setFunctionDescription, FUNCTION_DESCRIPTION); + parser.declareDoubleArray(AnomalyRecord::setTypical, TYPICAL); + parser.declareDoubleArray(AnomalyRecord::setActual, ACTUAL); + parser.declareString(AnomalyRecord::setFieldName, FIELD_NAME); + parser.declareString(AnomalyRecord::setOverFieldName, OVER_FIELD_NAME); + parser.declareString(AnomalyRecord::setOverFieldValue, OVER_FIELD_VALUE); + parser.declareObjectArray(AnomalyRecord::setCauses, ignoreUnknownFields ? AnomalyCause.LENIENT_PARSER : AnomalyCause.STRICT_PARSER, + CAUSES); + parser.declareObjectArray(AnomalyRecord::setInfluencers, ignoreUnknownFields ? Influence.LENIENT_PARSER : Influence.STRICT_PARSER, + INFLUENCERS); + + return parser; + } + + private final String jobId; + private int detectorIndex; + private double probability; + private String byFieldName; + private String byFieldValue; + private String correlatedByFieldValue; + private String partitionFieldName; + private String partitionFieldValue; + private String function; + private String functionDescription; + private List typical; + private List actual; + private boolean isInterim; + + private String fieldName; + + private String overFieldName; + private String overFieldValue; + private List causes; + + private double recordScore; + + private double initialRecordScore; + + private final Date timestamp; + private final long bucketSpan; + + private List influences; + + public AnomalyRecord(String jobId, Date timestamp, long bucketSpan) { + this.jobId = jobId; + this.timestamp = ExceptionsHelper.requireNonNull(timestamp, Result.TIMESTAMP.getPreferredName()); + this.bucketSpan = bucketSpan; + } + + @SuppressWarnings("unchecked") + public AnomalyRecord(StreamInput in) throws IOException { + jobId = in.readString(); + // bwc for removed sequenceNum field + if (in.getVersion().before(Version.V_5_5_0)) { + in.readInt(); + } + detectorIndex = in.readInt(); + probability = in.readDouble(); + byFieldName = in.readOptionalString(); + byFieldValue = in.readOptionalString(); + correlatedByFieldValue = in.readOptionalString(); + partitionFieldName = in.readOptionalString(); + partitionFieldValue = in.readOptionalString(); + function = in.readOptionalString(); + functionDescription = in.readOptionalString(); + fieldName = in.readOptionalString(); + overFieldName = in.readOptionalString(); + overFieldValue = in.readOptionalString(); + if (in.readBoolean()) { + typical = (List) in.readGenericValue(); + } + if (in.readBoolean()) { + actual = (List) in.readGenericValue(); + } + isInterim = in.readBoolean(); + if (in.readBoolean()) { + causes = in.readList(AnomalyCause::new); + } + recordScore = in.readDouble(); + initialRecordScore = in.readDouble(); + timestamp = new Date(in.readLong()); + bucketSpan = in.readLong(); + if (in.readBoolean()) { + influences = in.readList(Influence::new); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + // bwc for removed sequenceNum field + if (out.getVersion().before(Version.V_5_5_0)) { + out.writeInt(0); + } + out.writeInt(detectorIndex); + out.writeDouble(probability); + out.writeOptionalString(byFieldName); + out.writeOptionalString(byFieldValue); + out.writeOptionalString(correlatedByFieldValue); + out.writeOptionalString(partitionFieldName); + out.writeOptionalString(partitionFieldValue); + out.writeOptionalString(function); + out.writeOptionalString(functionDescription); + out.writeOptionalString(fieldName); + out.writeOptionalString(overFieldName); + out.writeOptionalString(overFieldValue); + boolean hasTypical = typical != null; + out.writeBoolean(hasTypical); + if (hasTypical) { + out.writeGenericValue(typical); + } + boolean hasActual = actual != null; + out.writeBoolean(hasActual); + if (hasActual) { + out.writeGenericValue(actual); + } + out.writeBoolean(isInterim); + boolean hasCauses = causes != null; + out.writeBoolean(hasCauses); + if (hasCauses) { + out.writeList(causes); + } + out.writeDouble(recordScore); + out.writeDouble(initialRecordScore); + out.writeLong(timestamp.getTime()); + out.writeLong(bucketSpan); + boolean hasInfluencers = influences != null; + out.writeBoolean(hasInfluencers); + if (hasInfluencers) { + out.writeList(influences); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder, params); + builder.endObject(); + return builder; + } + + XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); + builder.field(PROBABILITY.getPreferredName(), probability); + builder.field(RECORD_SCORE.getPreferredName(), recordScore); + builder.field(INITIAL_RECORD_SCORE.getPreferredName(), initialRecordScore); + builder.field(BUCKET_SPAN.getPreferredName(), bucketSpan); + builder.field(Detector.DETECTOR_INDEX.getPreferredName(), detectorIndex); + builder.field(Result.IS_INTERIM.getPreferredName(), isInterim); + builder.timeField(Result.TIMESTAMP.getPreferredName(), Result.TIMESTAMP.getPreferredName() + "_string", timestamp.getTime()); + if (byFieldName != null) { + builder.field(BY_FIELD_NAME.getPreferredName(), byFieldName); + } + if (byFieldValue != null) { + builder.field(BY_FIELD_VALUE.getPreferredName(), byFieldValue); + } + if (correlatedByFieldValue != null) { + builder.field(CORRELATED_BY_FIELD_VALUE.getPreferredName(), correlatedByFieldValue); + } + if (partitionFieldName != null) { + builder.field(PARTITION_FIELD_NAME.getPreferredName(), partitionFieldName); + } + if (partitionFieldValue != null) { + builder.field(PARTITION_FIELD_VALUE.getPreferredName(), partitionFieldValue); + } + if (function != null) { + builder.field(FUNCTION.getPreferredName(), function); + } + if (functionDescription != null) { + builder.field(FUNCTION_DESCRIPTION.getPreferredName(), functionDescription); + } + if (typical != null) { + builder.field(TYPICAL.getPreferredName(), typical); + } + if (actual != null) { + builder.field(ACTUAL.getPreferredName(), actual); + } + if (fieldName != null) { + builder.field(FIELD_NAME.getPreferredName(), fieldName); + } + if (overFieldName != null) { + builder.field(OVER_FIELD_NAME.getPreferredName(), overFieldName); + } + if (overFieldValue != null) { + builder.field(OVER_FIELD_VALUE.getPreferredName(), overFieldValue); + } + if (causes != null) { + builder.field(CAUSES.getPreferredName(), causes); + } + if (influences != null) { + builder.field(INFLUENCERS.getPreferredName(), influences); + } + + Map> inputFields = inputFieldMap(); + for (String fieldName : inputFields.keySet()) { + builder.field(fieldName, inputFields.get(fieldName)); + } + return builder; + } + + private Map> inputFieldMap() { + // LinkedHashSet preserves insertion order when iterating entries + Map> result = new HashMap<>(); + + addInputFieldsToMap(result, byFieldName, byFieldValue); + addInputFieldsToMap(result, overFieldName, overFieldValue); + addInputFieldsToMap(result, partitionFieldName, partitionFieldValue); + + if (influences != null) { + for (Influence inf : influences) { + String fieldName = inf.getInfluencerFieldName(); + for (String fieldValue : inf.getInfluencerFieldValues()) { + addInputFieldsToMap(result, fieldName, fieldValue); + } + } + } + return result; + } + + private void addInputFieldsToMap(Map> inputFields, String fieldName, String fieldValue) { + if (!Strings.isNullOrEmpty(fieldName) && fieldValue != null) { + if (ReservedFieldNames.isValidFieldName(fieldName)) { + inputFields.computeIfAbsent(fieldName, k -> new LinkedHashSet<>()).add(fieldValue); + } + } + } + + public String getJobId() { + return this.jobId; + } + + /** + * Data store ID of this record. + */ + public String getId() { + int valuesHash = Objects.hash(byFieldValue, overFieldValue, partitionFieldValue); + int length = (byFieldValue == null ? 0 : byFieldValue.length()) + + (overFieldValue == null ? 0 : overFieldValue.length()) + + (partitionFieldValue == null ? 0 : partitionFieldValue.length()); + + return jobId + "_record_" + timestamp.getTime() + "_" + bucketSpan + "_" + detectorIndex + "_" + valuesHash + "_" + length; + } + + public int getDetectorIndex() { + return detectorIndex; + } + + public void setDetectorIndex(int detectorIndex) { + this.detectorIndex = detectorIndex; + } + + public double getRecordScore() { + return recordScore; + } + + public void setRecordScore(double recordScore) { + this.recordScore = recordScore; + } + + public double getInitialRecordScore() { + return initialRecordScore; + } + + public void setInitialRecordScore(double initialRecordScore) { + this.initialRecordScore = initialRecordScore; + } + + public Date getTimestamp() { + return timestamp; + } + + /** + * Bucketspan expressed in seconds + */ + public long getBucketSpan() { + return bucketSpan; + } + + public double getProbability() { + return probability; + } + + public void setProbability(double value) { + probability = value; + } + + public String getByFieldName() { + return byFieldName; + } + + public void setByFieldName(String value) { + byFieldName = value.intern(); + } + + public String getByFieldValue() { + return byFieldValue; + } + + public void setByFieldValue(String value) { + byFieldValue = value.intern(); + } + + public String getCorrelatedByFieldValue() { + return correlatedByFieldValue; + } + + public void setCorrelatedByFieldValue(String value) { + correlatedByFieldValue = value.intern(); + } + + public String getPartitionFieldName() { + return partitionFieldName; + } + + public void setPartitionFieldName(String field) { + partitionFieldName = field.intern(); + } + + public String getPartitionFieldValue() { + return partitionFieldValue; + } + + public void setPartitionFieldValue(String value) { + partitionFieldValue = value.intern(); + } + + public String getFunction() { + return function; + } + + public void setFunction(String name) { + function = name.intern(); + } + + public String getFunctionDescription() { + return functionDescription; + } + + public void setFunctionDescription(String functionDescription) { + this.functionDescription = functionDescription.intern(); + } + + public List getTypical() { + return typical; + } + + public void setTypical(List typical) { + this.typical = typical; + } + + public List getActual() { + return actual; + } + + public void setActual(List actual) { + this.actual = actual; + } + + public boolean isInterim() { + return isInterim; + } + + public void setInterim(boolean isInterim) { + this.isInterim = isInterim; + } + + public String getFieldName() { + return fieldName; + } + + public void setFieldName(String field) { + fieldName = field.intern(); + } + + public String getOverFieldName() { + return overFieldName; + } + + public void setOverFieldName(String name) { + overFieldName = name.intern(); + } + + public String getOverFieldValue() { + return overFieldValue; + } + + public void setOverFieldValue(String value) { + overFieldValue = value.intern(); + } + + public List getCauses() { + return causes; + } + + public void setCauses(List causes) { + this.causes = causes; + } + + public void addCause(AnomalyCause cause) { + if (causes == null) { + causes = new ArrayList<>(); + } + causes.add(cause); + } + + public List getInfluencers() { + return influences; + } + + public void setInfluencers(List influencers) { + this.influences = influencers; + } + + + @Override + public int hashCode() { + return Objects.hash(jobId, detectorIndex, bucketSpan, probability, recordScore, + initialRecordScore, typical, actual,function, functionDescription, fieldName, + byFieldName, byFieldValue, correlatedByFieldValue, partitionFieldName, + partitionFieldValue, overFieldName, overFieldValue, timestamp, isInterim, + causes, influences, jobId); + } + + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof AnomalyRecord == false) { + return false; + } + + AnomalyRecord that = (AnomalyRecord) other; + + return Objects.equals(this.jobId, that.jobId) + && this.detectorIndex == that.detectorIndex + && this.bucketSpan == that.bucketSpan + && this.probability == that.probability + && this.recordScore == that.recordScore + && this.initialRecordScore == that.initialRecordScore + && Objects.deepEquals(this.typical, that.typical) + && Objects.deepEquals(this.actual, that.actual) + && Objects.equals(this.function, that.function) + && Objects.equals(this.functionDescription, that.functionDescription) + && Objects.equals(this.fieldName, that.fieldName) + && Objects.equals(this.byFieldName, that.byFieldName) + && Objects.equals(this.byFieldValue, that.byFieldValue) + && Objects.equals(this.correlatedByFieldValue, that.correlatedByFieldValue) + && Objects.equals(this.partitionFieldName, that.partitionFieldName) + && Objects.equals(this.partitionFieldValue, that.partitionFieldValue) + && Objects.equals(this.overFieldName, that.overFieldName) + && Objects.equals(this.overFieldValue, that.overFieldValue) + && Objects.equals(this.timestamp, that.timestamp) + && Objects.equals(this.isInterim, that.isInterim) + && Objects.equals(this.causes, that.causes) + && Objects.equals(this.influences, that.influences); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java new file mode 100644 index 0000000000000..8a88232a559d4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java @@ -0,0 +1,379 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +/** + * Bucket Result POJO + */ +public class Bucket implements ToXContentObject, Writeable { + /* + * Field Names + */ + private static final ParseField JOB_ID = Job.ID; + + public static final ParseField ANOMALY_SCORE = new ParseField("anomaly_score"); + public static final ParseField INITIAL_ANOMALY_SCORE = new ParseField("initial_anomaly_score"); + public static final ParseField EVENT_COUNT = new ParseField("event_count"); + public static final ParseField RECORDS = new ParseField("records"); + public static final ParseField BUCKET_INFLUENCERS = new ParseField("bucket_influencers"); + public static final ParseField BUCKET_SPAN = new ParseField("bucket_span"); + public static final ParseField PROCESSING_TIME_MS = new ParseField("processing_time_ms"); + public static final ParseField PARTITION_SCORES = new ParseField("partition_scores"); + public static final ParseField SCHEDULED_EVENTS = new ParseField("scheduled_events"); + + // Used for QueryPage + public static final ParseField RESULTS_FIELD = new ParseField("buckets"); + + /** + * Result type + */ + public static final String RESULT_TYPE_VALUE = "bucket"; + public static final ParseField RESULT_TYPE_FIELD = new ParseField(RESULT_TYPE_VALUE); + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(RESULT_TYPE_VALUE, ignoreUnknownFields, + a -> new Bucket((String) a[0], (Date) a[1], (long) a[2])); + + parser.declareString(ConstructingObjectParser.constructorArg(), JOB_ID); + parser.declareField(ConstructingObjectParser.constructorArg(), p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + + Result.TIMESTAMP.getPreferredName() + "]"); + }, Result.TIMESTAMP, ValueType.VALUE); + parser.declareLong(ConstructingObjectParser.constructorArg(), BUCKET_SPAN); + parser.declareDouble(Bucket::setAnomalyScore, ANOMALY_SCORE); + parser.declareDouble(Bucket::setInitialAnomalyScore, INITIAL_ANOMALY_SCORE); + parser.declareBoolean(Bucket::setInterim, Result.IS_INTERIM); + parser.declareLong(Bucket::setEventCount, EVENT_COUNT); + parser.declareObjectArray(Bucket::setRecords, ignoreUnknownFields ? AnomalyRecord.LENIENT_PARSER : AnomalyRecord.STRICT_PARSER, + RECORDS); + parser.declareObjectArray(Bucket::setBucketInfluencers, ignoreUnknownFields ? + BucketInfluencer.LENIENT_PARSER : BucketInfluencer.STRICT_PARSER, BUCKET_INFLUENCERS); + parser.declareLong(Bucket::setProcessingTimeMs, PROCESSING_TIME_MS); + parser.declareObjectArray(Bucket::setPartitionScores, ignoreUnknownFields ? + PartitionScore.LENIENT_PARSER : PartitionScore.STRICT_PARSER, PARTITION_SCORES); + parser.declareString((bucket, s) -> {}, Result.RESULT_TYPE); + parser.declareStringArray(Bucket::setScheduledEvents, SCHEDULED_EVENTS); + + return parser; + } + + private final String jobId; + private final Date timestamp; + private final long bucketSpan; + private double anomalyScore; + private double initialAnomalyScore; + private List records = new ArrayList<>(); + private long eventCount; + private boolean isInterim; + private List bucketInfluencers = new ArrayList<>(); // Can't use emptyList as might be appended to + private long processingTimeMs; + private List partitionScores = Collections.emptyList(); + private List scheduledEvents = Collections.emptyList(); + + public Bucket(String jobId, Date timestamp, long bucketSpan) { + this.jobId = jobId; + this.timestamp = ExceptionsHelper.requireNonNull(timestamp, Result.TIMESTAMP.getPreferredName()); + this.bucketSpan = bucketSpan; + } + + public Bucket(Bucket other) { + this.jobId = other.jobId; + this.timestamp = other.timestamp; + this.bucketSpan = other.bucketSpan; + this.anomalyScore = other.anomalyScore; + this.initialAnomalyScore = other.initialAnomalyScore; + this.records = new ArrayList<>(other.records); + this.eventCount = other.eventCount; + this.isInterim = other.isInterim; + this.bucketInfluencers = new ArrayList<>(other.bucketInfluencers); + this.processingTimeMs = other.processingTimeMs; + this.partitionScores = new ArrayList<>(other.partitionScores); + this.scheduledEvents = new ArrayList<>(other.scheduledEvents); + } + + public Bucket(StreamInput in) throws IOException { + jobId = in.readString(); + timestamp = new Date(in.readLong()); + anomalyScore = in.readDouble(); + bucketSpan = in.readLong(); + initialAnomalyScore = in.readDouble(); + // bwc for recordCount + if (in.getVersion().before(Version.V_5_5_0)) { + in.readInt(); + } + records = in.readList(AnomalyRecord::new); + eventCount = in.readLong(); + isInterim = in.readBoolean(); + bucketInfluencers = in.readList(BucketInfluencer::new); + processingTimeMs = in.readLong(); + // bwc for perPartitionMaxProbability + if (in.getVersion().before(Version.V_5_5_0)) { + in.readGenericValue(); + } + partitionScores = in.readList(PartitionScore::new); + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { + scheduledEvents = in.readList(StreamInput::readString); + if (scheduledEvents.isEmpty()) { + scheduledEvents = Collections.emptyList(); + } + } else { + scheduledEvents = Collections.emptyList(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + out.writeLong(timestamp.getTime()); + out.writeDouble(anomalyScore); + out.writeLong(bucketSpan); + out.writeDouble(initialAnomalyScore); + // bwc for recordCount + if (out.getVersion().before(Version.V_5_5_0)) { + out.writeInt(0); + } + out.writeList(records); + out.writeLong(eventCount); + out.writeBoolean(isInterim); + out.writeList(bucketInfluencers); + out.writeLong(processingTimeMs); + // bwc for perPartitionMaxProbability + if (out.getVersion().before(Version.V_5_5_0)) { + out.writeGenericValue(Collections.emptyMap()); + } + out.writeList(partitionScores); + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { + out.writeStringList(scheduledEvents); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(JOB_ID.getPreferredName(), jobId); + builder.timeField(Result.TIMESTAMP.getPreferredName(), Result.TIMESTAMP.getPreferredName() + "_string", timestamp.getTime()); + builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore); + builder.field(BUCKET_SPAN.getPreferredName(), bucketSpan); + builder.field(INITIAL_ANOMALY_SCORE.getPreferredName(), initialAnomalyScore); + if (records.isEmpty() == false) { + builder.field(RECORDS.getPreferredName(), records); + } + builder.field(EVENT_COUNT.getPreferredName(), eventCount); + builder.field(Result.IS_INTERIM.getPreferredName(), isInterim); + builder.field(BUCKET_INFLUENCERS.getPreferredName(), bucketInfluencers); + builder.field(PROCESSING_TIME_MS.getPreferredName(), processingTimeMs); + if (partitionScores.isEmpty() == false) { + builder.field(PARTITION_SCORES.getPreferredName(), partitionScores); + } + if (scheduledEvents.isEmpty() == false) { + builder.field(SCHEDULED_EVENTS.getPreferredName(), scheduledEvents); + } + builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); + builder.endObject(); + return builder; + } + + public String getJobId() { + return jobId; + } + + public String getId() { + return jobId + "_bucket_" + timestamp.getTime() + "_" + bucketSpan; + } + + /** + * Timestamp expressed in seconds since the epoch (rather than Java's + * convention of milliseconds). + */ + public long getEpoch() { + return timestamp.getTime() / 1000; + } + + public Date getTimestamp() { + return timestamp; + } + + /** + * Bucketspan expressed in seconds + */ + public long getBucketSpan() { + return bucketSpan; + } + + public double getAnomalyScore() { + return anomalyScore; + } + + public void setAnomalyScore(double anomalyScore) { + this.anomalyScore = anomalyScore; + } + + public double getInitialAnomalyScore() { + return initialAnomalyScore; + } + + public void setInitialAnomalyScore(double initialAnomalyScore) { + this.initialAnomalyScore = initialAnomalyScore; + } + + /** + * Get all the anomaly records associated with this bucket. + * The records are not part of the bucket document. They will + * only be present when the bucket was retrieved and expanded + * to contain the associated records. + * + * @return the anomaly records for the bucket IF the bucket was expanded. + */ + public List getRecords() { + return records; + } + + public void setRecords(List records) { + this.records = Objects.requireNonNull(records); + } + + /** + * The number of records (events) actually processed in this bucket. + */ + public long getEventCount() { + return eventCount; + } + + public void setEventCount(long value) { + eventCount = value; + } + + public boolean isInterim() { + return isInterim; + } + + public void setInterim(boolean isInterim) { + this.isInterim = isInterim; + } + + public long getProcessingTimeMs() { + return processingTimeMs; + } + + public void setProcessingTimeMs(long timeMs) { + processingTimeMs = timeMs; + } + + public List getBucketInfluencers() { + return bucketInfluencers; + } + + public void setBucketInfluencers(List bucketInfluencers) { + this.bucketInfluencers = Objects.requireNonNull(bucketInfluencers); + } + + public void addBucketInfluencer(BucketInfluencer bucketInfluencer) { + bucketInfluencers.add(bucketInfluencer); + } + + public List getPartitionScores() { + return partitionScores; + } + + public void setPartitionScores(List scores) { + partitionScores = Objects.requireNonNull(scores); + } + + public List getScheduledEvents() { + return scheduledEvents; + } + + public void setScheduledEvents(List scheduledEvents) { + this.scheduledEvents = ExceptionsHelper.requireNonNull(scheduledEvents, SCHEDULED_EVENTS.getPreferredName()); + } + + public double partitionInitialAnomalyScore(String partitionValue) { + Optional first = partitionScores.stream().filter(s -> partitionValue.equals(s.getPartitionFieldValue())) + .findFirst(); + + return first.isPresent() ? first.get().getInitialRecordScore() : 0.0; + } + + public double partitionAnomalyScore(String partitionValue) { + Optional first = partitionScores.stream().filter(s -> partitionValue.equals(s.getPartitionFieldValue())) + .findFirst(); + + return first.isPresent() ? first.get().getRecordScore() : 0.0; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, timestamp, eventCount, initialAnomalyScore, anomalyScore, records, + isInterim, bucketSpan, bucketInfluencers, partitionScores, processingTimeMs, scheduledEvents); + } + + /** + * Compare all the fields and embedded anomaly records (if any) + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof Bucket == false) { + return false; + } + + Bucket that = (Bucket) other; + + return Objects.equals(this.jobId, that.jobId) && Objects.equals(this.timestamp, that.timestamp) + && (this.eventCount == that.eventCount) && (this.bucketSpan == that.bucketSpan) + && (this.anomalyScore == that.anomalyScore) && (this.initialAnomalyScore == that.initialAnomalyScore) + && Objects.equals(this.records, that.records) && Objects.equals(this.isInterim, that.isInterim) + && Objects.equals(this.bucketInfluencers, that.bucketInfluencers) + && Objects.equals(this.partitionScores, that.partitionScores) + && (this.processingTimeMs == that.processingTimeMs) + && Objects.equals(this.scheduledEvents, that.scheduledEvents); + } + + /** + * This method encapsulated the logic for whether a bucket should be normalized. + * Buckets that have a zero anomaly score themselves and no partition scores with + * non-zero score should not be normalized as their score will not change and they + * will just add overhead. + * + * @return true if the bucket should be normalized or false otherwise + */ + public boolean isNormalizable() { + return anomalyScore > 0.0 || partitionScores.stream().anyMatch(s -> s.getRecordScore() > 0); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java new file mode 100644 index 0000000000000..8b18562ec6d1e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencer.java @@ -0,0 +1,242 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +public class BucketInfluencer implements ToXContentObject, Writeable { + /** + * Result type + */ + public static final String RESULT_TYPE_VALUE = "bucket_influencer"; + public static final ParseField RESULT_TYPE_FIELD = new ParseField(RESULT_TYPE_VALUE); + + /** + * Field names + */ + public static final ParseField INFLUENCER_FIELD_NAME = new ParseField("influencer_field_name"); + public static final ParseField INITIAL_ANOMALY_SCORE = new ParseField("initial_anomaly_score"); + public static final ParseField ANOMALY_SCORE = new ParseField("anomaly_score"); + public static final ParseField RAW_ANOMALY_SCORE = new ParseField("raw_anomaly_score"); + public static final ParseField PROBABILITY = new ParseField("probability"); + public static final ParseField BUCKET_SPAN = new ParseField("bucket_span"); + + /** + * The influencer field name used for time influencers + */ + public static final String BUCKET_TIME = "bucket_time"; + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(RESULT_TYPE_FIELD.getPreferredName(), + ignoreUnknownFields, a -> new BucketInfluencer((String) a[0], (Date) a[1], (long) a[2])); + + parser.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + parser.declareField(ConstructingObjectParser.constructorArg(), p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + + Result.TIMESTAMP.getPreferredName() + "]"); + }, Result.TIMESTAMP, ValueType.VALUE); + parser.declareLong(ConstructingObjectParser.constructorArg(), BUCKET_SPAN); + parser.declareString((bucketInfluencer, s) -> {}, Result.RESULT_TYPE); + parser.declareString(BucketInfluencer::setInfluencerFieldName, INFLUENCER_FIELD_NAME); + parser.declareDouble(BucketInfluencer::setInitialAnomalyScore, INITIAL_ANOMALY_SCORE); + parser.declareDouble(BucketInfluencer::setAnomalyScore, ANOMALY_SCORE); + parser.declareDouble(BucketInfluencer::setRawAnomalyScore, RAW_ANOMALY_SCORE); + parser.declareDouble(BucketInfluencer::setProbability, PROBABILITY); + parser.declareBoolean(BucketInfluencer::setIsInterim, Result.IS_INTERIM); + + return parser; + } + + private final String jobId; + private String influenceField; + private double initialAnomalyScore; + private double anomalyScore; + private double rawAnomalyScore; + private double probability; + private boolean isInterim; + private final Date timestamp; + private final long bucketSpan; + + public BucketInfluencer(String jobId, Date timestamp, long bucketSpan) { + this.jobId = jobId; + this.timestamp = ExceptionsHelper.requireNonNull(timestamp, Result.TIMESTAMP.getPreferredName()); + this.bucketSpan = bucketSpan; + } + + public BucketInfluencer(StreamInput in) throws IOException { + jobId = in.readString(); + influenceField = in.readOptionalString(); + initialAnomalyScore = in.readDouble(); + anomalyScore = in.readDouble(); + rawAnomalyScore = in.readDouble(); + probability = in.readDouble(); + isInterim = in.readBoolean(); + timestamp = new Date(in.readLong()); + bucketSpan = in.readLong(); + // bwc for removed sequenceNum field + if (in.getVersion().before(Version.V_5_5_0)) { + in.readInt(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + out.writeOptionalString(influenceField); + out.writeDouble(initialAnomalyScore); + out.writeDouble(anomalyScore); + out.writeDouble(rawAnomalyScore); + out.writeDouble(probability); + out.writeBoolean(isInterim); + out.writeLong(timestamp.getTime()); + out.writeLong(bucketSpan); + // bwc for removed sequenceNum field + if (out.getVersion().before(Version.V_5_5_0)) { + out.writeInt(0); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder, params); + builder.endObject(); + return builder; + } + + XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); + if (influenceField != null) { + builder.field(INFLUENCER_FIELD_NAME.getPreferredName(), influenceField); + } + builder.field(INITIAL_ANOMALY_SCORE.getPreferredName(), initialAnomalyScore); + builder.field(ANOMALY_SCORE.getPreferredName(), anomalyScore); + builder.field(RAW_ANOMALY_SCORE.getPreferredName(), rawAnomalyScore); + builder.field(PROBABILITY.getPreferredName(), probability); + builder.timeField(Result.TIMESTAMP.getPreferredName(), Result.TIMESTAMP.getPreferredName() + "_string", timestamp.getTime()); + builder.field(BUCKET_SPAN.getPreferredName(), bucketSpan); + builder.field(Result.IS_INTERIM.getPreferredName(), isInterim); + return builder; + } + + /** + * Data store ID of this bucket influencer. + */ + public String getId() { + return jobId + "_bucket_influencer_" + timestamp.getTime() + "_" + bucketSpan + + (influenceField == null ? "" : "_" + influenceField); + } + + public String getJobId() { + return jobId; + } + + public double getProbability() { + return probability; + } + + public void setProbability(double probability) { + this.probability = probability; + } + + public String getInfluencerFieldName() { + return influenceField; + } + + public void setInfluencerFieldName(String fieldName) { + this.influenceField = fieldName; + } + + public double getInitialAnomalyScore() { + return initialAnomalyScore; + } + + public void setInitialAnomalyScore(double influenceScore) { + this.initialAnomalyScore = influenceScore; + } + + public double getAnomalyScore() { + return anomalyScore; + } + + public void setAnomalyScore(double score) { + anomalyScore = score; + } + + public double getRawAnomalyScore() { + return rawAnomalyScore; + } + + public void setRawAnomalyScore(double score) { + rawAnomalyScore = score; + } + + public void setIsInterim(boolean isInterim) { + this.isInterim = isInterim; + } + + public boolean isInterim() { + return isInterim; + } + + public Date getTimestamp() { + return timestamp; + } + + @Override + public int hashCode() { + return Objects.hash(influenceField, initialAnomalyScore, anomalyScore, rawAnomalyScore, probability, isInterim, timestamp, jobId, + bucketSpan); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + BucketInfluencer other = (BucketInfluencer) obj; + + return Objects.equals(influenceField, other.influenceField) && Double.compare(initialAnomalyScore, other.initialAnomalyScore) == 0 + && Double.compare(anomalyScore, other.anomalyScore) == 0 && Double.compare(rawAnomalyScore, other.rawAnomalyScore) == 0 + && Double.compare(probability, other.probability) == 0 && Objects.equals(isInterim, other.isInterim) + && Objects.equals(timestamp, other.timestamp) && Objects.equals(jobId, other.jobId) && bucketSpan == other.bucketSpan; + + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java new file mode 100644 index 0000000000000..98c38241856b6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; + +public class CategoryDefinition implements ToXContentObject, Writeable { + + /** + * Legacy type, now used only as a discriminant in the document ID + */ + public static final ParseField TYPE = new ParseField("category_definition"); + + public static final ParseField CATEGORY_ID = new ParseField("category_id"); + public static final ParseField TERMS = new ParseField("terms"); + public static final ParseField REGEX = new ParseField("regex"); + public static final ParseField MAX_MATCHING_LENGTH = new ParseField("max_matching_length"); + public static final ParseField EXAMPLES = new ParseField("examples"); + + // Used for QueryPage + public static final ParseField RESULTS_FIELD = new ParseField("categories"); + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(TYPE.getPreferredName(), + ignoreUnknownFields, a -> new CategoryDefinition((String) a[0])); + + parser.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + parser.declareLong(CategoryDefinition::setCategoryId, CATEGORY_ID); + parser.declareString(CategoryDefinition::setTerms, TERMS); + parser.declareString(CategoryDefinition::setRegex, REGEX); + parser.declareLong(CategoryDefinition::setMaxMatchingLength, MAX_MATCHING_LENGTH); + parser.declareStringArray(CategoryDefinition::setExamples, EXAMPLES); + + return parser; + } + + private final String jobId; + private long categoryId = 0L; + private String terms = ""; + private String regex = ""; + private long maxMatchingLength = 0L; + private final Set examples; + + public CategoryDefinition(String jobId) { + this.jobId = jobId; + examples = new TreeSet<>(); + } + + public CategoryDefinition(StreamInput in) throws IOException { + jobId = in.readString(); + categoryId = in.readLong(); + terms = in.readString(); + regex = in.readString(); + maxMatchingLength = in.readLong(); + examples = new TreeSet<>(in.readList(StreamInput::readString)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + out.writeLong(categoryId); + out.writeString(terms); + out.writeString(regex); + out.writeLong(maxMatchingLength); + out.writeStringList(new ArrayList<>(examples)); + } + + public String getJobId() { + return jobId; + } + + public String getId() { + return jobId + "_" + TYPE + "_" + categoryId; + } + + public long getCategoryId() { + return categoryId; + } + + public void setCategoryId(long categoryId) { + this.categoryId = categoryId; + } + + public String getTerms() { + return terms; + } + + public void setTerms(String terms) { + this.terms = terms; + } + + public String getRegex() { + return regex; + } + + public void setRegex(String regex) { + this.regex = regex; + } + + public long getMaxMatchingLength() { + return maxMatchingLength; + } + + public void setMaxMatchingLength(long maxMatchingLength) { + this.maxMatchingLength = maxMatchingLength; + } + + public List getExamples() { + return new ArrayList<>(examples); + } + + public void setExamples(Collection examples) { + this.examples.clear(); + this.examples.addAll(examples); + } + + public void addExample(String example) { + examples.add(example); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(CATEGORY_ID.getPreferredName(), categoryId); + builder.field(TERMS.getPreferredName(), terms); + builder.field(REGEX.getPreferredName(), regex); + builder.field(MAX_MATCHING_LENGTH.getPreferredName(), maxMatchingLength); + builder.field(EXAMPLES.getPreferredName(), examples); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof CategoryDefinition == false) { + return false; + } + CategoryDefinition that = (CategoryDefinition) other; + return Objects.equals(this.jobId, that.jobId) + && Objects.equals(this.categoryId, that.categoryId) + && Objects.equals(this.terms, that.terms) + && Objects.equals(this.regex, that.regex) + && Objects.equals(this.maxMatchingLength, that.maxMatchingLength) + && Objects.equals(this.examples, that.examples); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, categoryId, terms, regex, maxMatchingLength, examples); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Forecast.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Forecast.java new file mode 100644 index 0000000000000..47f6769a07f9d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Forecast.java @@ -0,0 +1,290 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +/** + * Model Forecast POJO. + */ +public class Forecast implements ToXContentObject, Writeable { + /** + * Result type + */ + public static final String RESULT_TYPE_VALUE = "model_forecast"; + public static final ParseField RESULTS_FIELD = new ParseField(RESULT_TYPE_VALUE); + + public static final ParseField FORECAST_ID = new ParseField("forecast_id"); + public static final ParseField PARTITION_FIELD_NAME = new ParseField("partition_field_name"); + public static final ParseField PARTITION_FIELD_VALUE = new ParseField("partition_field_value"); + public static final ParseField BY_FIELD_NAME = new ParseField("by_field_name"); + public static final ParseField BY_FIELD_VALUE = new ParseField("by_field_value"); + public static final ParseField MODEL_FEATURE = new ParseField("model_feature"); + public static final ParseField FORECAST_LOWER = new ParseField("forecast_lower"); + public static final ParseField FORECAST_UPPER = new ParseField("forecast_upper"); + public static final ParseField FORECAST_PREDICTION = new ParseField("forecast_prediction"); + public static final ParseField BUCKET_SPAN = new ParseField("bucket_span"); + public static final ParseField DETECTOR_INDEX = new ParseField("detector_index"); + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(RESULT_TYPE_VALUE, ignoreUnknownFields, + a -> new Forecast((String) a[0], (String) a[1], (Date) a[2], (long) a[3], (int) a[4])); + + parser.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + parser.declareString(ConstructingObjectParser.constructorArg(), FORECAST_ID); + parser.declareField(ConstructingObjectParser.constructorArg(), p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + + Result.TIMESTAMP.getPreferredName() + "]"); + }, Result.TIMESTAMP, ValueType.VALUE); + parser.declareLong(ConstructingObjectParser.constructorArg(), BUCKET_SPAN); + parser.declareInt(ConstructingObjectParser.constructorArg(), DETECTOR_INDEX); + parser.declareString((modelForecast, s) -> {}, Result.RESULT_TYPE); + parser.declareString(Forecast::setPartitionFieldName, PARTITION_FIELD_NAME); + parser.declareString(Forecast::setPartitionFieldValue, PARTITION_FIELD_VALUE); + parser.declareString(Forecast::setByFieldName, BY_FIELD_NAME); + parser.declareString(Forecast::setByFieldValue, BY_FIELD_VALUE); + parser.declareString(Forecast::setModelFeature, MODEL_FEATURE); + parser.declareDouble(Forecast::setForecastLower, FORECAST_LOWER); + parser.declareDouble(Forecast::setForecastUpper, FORECAST_UPPER); + parser.declareDouble(Forecast::setForecastPrediction, FORECAST_PREDICTION); + + return parser; + } + + private final String jobId; + private final String forecastId; + private final Date timestamp; + private final long bucketSpan; + private int detectorIndex; + private String partitionFieldName; + private String partitionFieldValue; + private String byFieldName; + private String byFieldValue; + private String modelFeature; + private double forecastLower; + private double forecastUpper; + private double forecastPrediction; + + public Forecast(String jobId, String forecastId, Date timestamp, long bucketSpan, int detectorIndex) { + this.jobId = Objects.requireNonNull(jobId); + this.forecastId = Objects.requireNonNull(forecastId); + this.timestamp = timestamp; + this.bucketSpan = bucketSpan; + this.detectorIndex = detectorIndex; + } + + public Forecast(StreamInput in) throws IOException { + jobId = in.readString(); + forecastId = in.readString(); + timestamp = new Date(in.readLong()); + partitionFieldName = in.readOptionalString(); + partitionFieldValue = in.readOptionalString(); + byFieldName = in.readOptionalString(); + byFieldValue = in.readOptionalString(); + modelFeature = in.readOptionalString(); + forecastLower = in.readDouble(); + forecastUpper = in.readDouble(); + forecastPrediction = in.readDouble(); + bucketSpan = in.readLong(); + detectorIndex = in.readInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + out.writeString(forecastId); + out.writeLong(timestamp.getTime()); + out.writeOptionalString(partitionFieldName); + out.writeOptionalString(partitionFieldValue); + out.writeOptionalString(byFieldName); + out.writeOptionalString(byFieldValue); + out.writeOptionalString(modelFeature); + out.writeDouble(forecastLower); + out.writeDouble(forecastUpper); + out.writeDouble(forecastPrediction); + out.writeLong(bucketSpan); + out.writeInt(detectorIndex); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(FORECAST_ID.getPreferredName(), forecastId); + builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); + builder.field(BUCKET_SPAN.getPreferredName(), bucketSpan); + builder.field(DETECTOR_INDEX.getPreferredName(), detectorIndex); + if (timestamp != null) { + builder.timeField(Result.TIMESTAMP.getPreferredName(), + Result.TIMESTAMP.getPreferredName() + "_string", timestamp.getTime()); + } + if (partitionFieldName != null) { + builder.field(PARTITION_FIELD_NAME.getPreferredName(), partitionFieldName); + } + if (partitionFieldValue != null) { + builder.field(PARTITION_FIELD_VALUE.getPreferredName(), partitionFieldValue); + } + if (byFieldName != null) { + builder.field(BY_FIELD_NAME.getPreferredName(), byFieldName); + } + if (byFieldValue != null) { + builder.field(BY_FIELD_VALUE.getPreferredName(), byFieldValue); + } + if (modelFeature != null) { + builder.field(MODEL_FEATURE.getPreferredName(), modelFeature); + } + builder.field(FORECAST_LOWER.getPreferredName(), forecastLower); + builder.field(FORECAST_UPPER.getPreferredName(), forecastUpper); + builder.field(FORECAST_PREDICTION.getPreferredName(), forecastPrediction); + builder.endObject(); + return builder; + } + + public String getJobId() { + return jobId; + } + + public String getForecastId() { + return forecastId; + } + + public String getId() { + int valuesHash = Objects.hash(byFieldValue, partitionFieldValue); + int length = (byFieldValue == null ? 0 : byFieldValue.length()) + + (partitionFieldValue == null ? 0 : partitionFieldValue.length()); + return jobId + "_model_forecast_" + forecastId + "_" + timestamp.getTime() + + "_" + bucketSpan + "_" + detectorIndex + "_" + + valuesHash + "_" + length; + } + + public Date getTimestamp() { + return timestamp; + } + + public long getBucketSpan() { + return bucketSpan; + } + + public String getPartitionFieldName() { + return partitionFieldName; + } + + public void setPartitionFieldName(String partitionFieldName) { + this.partitionFieldName = partitionFieldName; + } + + public int getDetectorIndex() { + return detectorIndex; + } + + public String getPartitionFieldValue() { + return partitionFieldValue; + } + + public void setPartitionFieldValue(String partitionFieldValue) { + this.partitionFieldValue = partitionFieldValue; + } + + public String getByFieldName() { + return byFieldName; + } + + public void setByFieldName(String byFieldName) { + this.byFieldName = byFieldName; + } + + public String getByFieldValue() { + return byFieldValue; + } + + public void setByFieldValue(String byFieldValue) { + this.byFieldValue = byFieldValue; + } + + public String getModelFeature() { + return modelFeature; + } + + public void setModelFeature(String modelFeature) { + this.modelFeature = modelFeature; + } + + public double getForecastLower() { + return forecastLower; + } + + public void setForecastLower(double forecastLower) { + this.forecastLower = forecastLower; + } + + public double getForecastUpper() { + return forecastUpper; + } + + public void setForecastUpper(double forecastUpper) { + this.forecastUpper = forecastUpper; + } + + public double getForecastPrediction() { + return forecastPrediction; + } + + public void setForecastPrediction(double forecastPrediction) { + this.forecastPrediction = forecastPrediction; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof Forecast == false) { + return false; + } + Forecast that = (Forecast) other; + return Objects.equals(this.jobId, that.jobId) && + Objects.equals(this.forecastId, that.forecastId) && + Objects.equals(this.timestamp, that.timestamp) && + Objects.equals(this.partitionFieldValue, that.partitionFieldValue) && + Objects.equals(this.partitionFieldName, that.partitionFieldName) && + Objects.equals(this.byFieldValue, that.byFieldValue) && + Objects.equals(this.byFieldName, that.byFieldName) && + Objects.equals(this.modelFeature, that.modelFeature) && + this.forecastLower == that.forecastLower && + this.forecastUpper == that.forecastUpper && + this.forecastPrediction == that.forecastPrediction && + this.bucketSpan == that.bucketSpan && + this.detectorIndex == that.detectorIndex; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, forecastId, timestamp, partitionFieldName, partitionFieldValue, + byFieldName, byFieldValue, modelFeature, forecastLower, forecastUpper, + forecastPrediction, bucketSpan, detectorIndex); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ForecastRequestStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ForecastRequestStats.java new file mode 100644 index 0000000000000..a9daa78a6362a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ForecastRequestStats.java @@ -0,0 +1,344 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import java.io.IOException; +import java.time.Instant; +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +/** + * Model ForecastRequestStats POJO. + * + * This information is produced by the autodetect process and contains + * information about errors, progress and counters. There is exactly 1 document + * per forecast request, getting updated while the request is processed. + */ +public class ForecastRequestStats implements ToXContentObject, Writeable { + /** + * Result type + */ + public static final String RESULT_TYPE_VALUE = "model_forecast_request_stats"; + + public static final ParseField RESULTS_FIELD = new ParseField(RESULT_TYPE_VALUE); + public static final ParseField FORECAST_ID = new ParseField("forecast_id"); + public static final ParseField START_TIME = new ParseField("forecast_start_timestamp"); + public static final ParseField END_TIME = new ParseField("forecast_end_timestamp"); + public static final ParseField CREATE_TIME = new ParseField("forecast_create_timestamp"); + public static final ParseField EXPIRY_TIME = new ParseField("forecast_expiry_timestamp"); + public static final ParseField MESSAGES = new ParseField("forecast_messages"); + public static final ParseField PROCESSING_TIME_MS = new ParseField("processing_time_ms"); + public static final ParseField PROGRESS = new ParseField("forecast_progress"); + public static final ParseField PROCESSED_RECORD_COUNT = new ParseField("processed_record_count"); + public static final ParseField STATUS = new ParseField("forecast_status"); + public static final ParseField MEMORY_USAGE = new ParseField("forecast_memory_bytes"); + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(RESULT_TYPE_VALUE, ignoreUnknownFields, + a -> new ForecastRequestStats((String) a[0], (String) a[1])); + + parser.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + parser.declareString(ConstructingObjectParser.constructorArg(), FORECAST_ID); + + parser.declareString((modelForecastRequestStats, s) -> {}, Result.RESULT_TYPE); + parser.declareLong(ForecastRequestStats::setRecordCount, PROCESSED_RECORD_COUNT); + parser.declareStringArray(ForecastRequestStats::setMessages, MESSAGES); + parser.declareField(ForecastRequestStats::setTimeStamp, + p -> Instant.ofEpochMilli(p.longValue()), Result.TIMESTAMP, ValueType.LONG); + parser.declareField(ForecastRequestStats::setStartTime, + p -> Instant.ofEpochMilli(p.longValue()), START_TIME, ValueType.LONG); + parser.declareField(ForecastRequestStats::setEndTime, + p -> Instant.ofEpochMilli(p.longValue()), END_TIME, ValueType.LONG); + parser.declareField(ForecastRequestStats::setCreateTime, + p -> Instant.ofEpochMilli(p.longValue()), CREATE_TIME, ValueType.LONG); + parser.declareField(ForecastRequestStats::setExpiryTime, + p -> Instant.ofEpochMilli(p.longValue()), EXPIRY_TIME, ValueType.LONG); + parser.declareDouble(ForecastRequestStats::setProgress, PROGRESS); + parser.declareLong(ForecastRequestStats::setProcessingTime, PROCESSING_TIME_MS); + parser.declareField(ForecastRequestStats::setStatus, p -> ForecastRequestStatus.fromString(p.text()), STATUS, ValueType.STRING); + parser.declareLong(ForecastRequestStats::setMemoryUsage, MEMORY_USAGE); + + return parser; + } + + public enum ForecastRequestStatus implements Writeable { + OK, FAILED, STOPPED, STARTED, FINISHED, SCHEDULED; + + public static ForecastRequestStatus fromString(String statusName) { + return valueOf(statusName.trim().toUpperCase(Locale.ROOT)); + } + + public static ForecastRequestStatus readFromStream(StreamInput in) throws IOException { + return in.readEnum(ForecastRequestStatus.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } + + private final String jobId; + private final String forecastId; + private long recordCount; + private List messages; + private Instant timestamp = Instant.EPOCH; + private Instant startTime = Instant.EPOCH; + private Instant endTime = Instant.EPOCH; + private Instant createTime = Instant.EPOCH; + private Instant expiryTime = Instant.EPOCH; + private double progress; + private long processingTime; + private long memoryUsage; + private ForecastRequestStatus status = ForecastRequestStatus.OK; + + public ForecastRequestStats(String jobId, String forecastId) { + this.jobId = Objects.requireNonNull(jobId); + this.forecastId = Objects.requireNonNull(forecastId); + } + + public ForecastRequestStats(StreamInput in) throws IOException { + jobId = in.readString(); + forecastId = in.readString(); + recordCount = in.readLong(); + if (in.readBoolean()) { + messages = in.readList(StreamInput::readString); + } else { + messages = null; + } + + timestamp = Instant.ofEpochMilli(in.readVLong()); + startTime = Instant.ofEpochMilli(in.readVLong()); + endTime = Instant.ofEpochMilli(in.readVLong()); + createTime = Instant.ofEpochMilli(in.readVLong()); + expiryTime = Instant.ofEpochMilli(in.readVLong()); + progress = in.readDouble(); + processingTime = in.readLong(); + setMemoryUsage(in.readLong()); + status = ForecastRequestStatus.readFromStream(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + out.writeString(forecastId); + out.writeLong(recordCount); + if (messages != null) { + out.writeBoolean(true); + out.writeStringList(messages); + } else { + out.writeBoolean(false); + } + out.writeVLong(timestamp.toEpochMilli()); + out.writeVLong(startTime.toEpochMilli()); + out.writeVLong(endTime.toEpochMilli()); + out.writeVLong(createTime.toEpochMilli()); + out.writeVLong(expiryTime.toEpochMilli()); + out.writeDouble(progress); + out.writeLong(processingTime); + out.writeLong(getMemoryUsage()); + status.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); + builder.field(FORECAST_ID.getPreferredName(), forecastId); + builder.field(PROCESSED_RECORD_COUNT.getPreferredName(), recordCount); + if (messages != null) { + builder.field(MESSAGES.getPreferredName(), messages); + } + if (timestamp.equals(Instant.EPOCH) == false) { + builder.field(Result.TIMESTAMP.getPreferredName(), timestamp.toEpochMilli()); + } + if (startTime.equals(Instant.EPOCH) == false) { + builder.field(START_TIME.getPreferredName(), startTime.toEpochMilli()); + } + if (endTime.equals(Instant.EPOCH) == false) { + builder.field(END_TIME.getPreferredName(), endTime.toEpochMilli()); + } + if (createTime.equals(Instant.EPOCH) == false) { + builder.field(CREATE_TIME.getPreferredName(), createTime.toEpochMilli()); + } + if (expiryTime.equals(Instant.EPOCH) == false) { + builder.field(EXPIRY_TIME.getPreferredName(), expiryTime.toEpochMilli()); + } + builder.field(PROGRESS.getPreferredName(), progress); + builder.field(PROCESSING_TIME_MS.getPreferredName(), processingTime); + builder.field(MEMORY_USAGE.getPreferredName(), getMemoryUsage()); + builder.field(STATUS.getPreferredName(), status); + builder.endObject(); + return builder; + } + + public String getJobId() { + return jobId; + } + + public String getForecastId() { + return forecastId; + } + + public static String documentId(String jobId, String forecastId) { + return jobId + "_model_forecast_request_stats_" + forecastId; + } + + /** + * Return the document ID used for indexing. As there is 1 and only 1 document + * per forecast request, the id has no dynamic parts. + * + * @return id + */ + public String getId() { + return documentId(jobId, forecastId); + } + + public void setRecordCount(long recordCount) { + this.recordCount = recordCount; + } + + public long getRecordCount() { + return recordCount; + } + + public List getMessages() { + return messages; + } + + public void setMessages(List messages) { + this.messages = messages; + } + + public void setTimeStamp(Instant timestamp) { + this.timestamp = timestamp; + } + + public Instant getTimestamp() { + return timestamp; + } + + public void setStartTime(Instant startTime) { + this.startTime = startTime; + } + + public Instant getStartTime() { + return startTime; + } + + public Instant getEndTime() { + return endTime; + } + + public void setEndTime(Instant endTime) { + this.endTime = endTime; + } + + public void setCreateTime(Instant createTime) { + this.createTime = createTime; + } + + public Instant getCreateTime() { + return createTime; + } + + public void setExpiryTime(Instant expiryTime) { + this.expiryTime = expiryTime; + } + + public Instant getExpiryTime() { + return expiryTime; + } + + /** + * Progress information of the ForecastRequest in the range 0 to 1, + * while 1 means finished + * + * @return progress value + */ + public double getProgress() { + return progress; + } + + public void setProgress(double progress) { + this.progress = progress; + } + + public long getProcessingTime() { + return processingTime; + } + + public void setProcessingTime(long processingTime) { + this.processingTime = processingTime; + } + + public long getMemoryUsage() { + return memoryUsage; + } + + public void setMemoryUsage(long memoryUsage) { + this.memoryUsage = memoryUsage; + } + + public ForecastRequestStatus getStatus() { + return status; + } + + public void setStatus(ForecastRequestStatus jobStatus) { + Objects.requireNonNull(jobStatus, "[" + STATUS.getPreferredName() + "] must not be null"); + this.status = jobStatus; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof ForecastRequestStats == false) { + return false; + } + ForecastRequestStats that = (ForecastRequestStats) other; + return Objects.equals(this.jobId, that.jobId) && + Objects.equals(this.forecastId, that.forecastId) && + this.recordCount == that.recordCount && + Objects.equals(this.messages, that.messages) && + Objects.equals(this.timestamp, that.timestamp) && + Objects.equals(this.startTime, that.startTime) && + Objects.equals(this.endTime, that.endTime) && + Objects.equals(this.createTime, that.createTime) && + Objects.equals(this.expiryTime, that.expiryTime) && + this.progress == that.progress && + this.processingTime == that.processingTime && + this.memoryUsage == that.memoryUsage && + Objects.equals(this.status, that.status); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, forecastId, recordCount, messages, timestamp, startTime, endTime, createTime, expiryTime, + progress, processingTime, memoryUsage, status); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influence.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influence.java new file mode 100644 index 0000000000000..ab6ca54f3a160 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influence.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * Influence field name and list of influence field values/score pairs + */ +public class Influence implements ToXContentObject, Writeable { + + /** + * Note all publicly exposed field names are "influencer" not "influence" + */ + public static final ParseField INFLUENCER = new ParseField("influencer"); + public static final ParseField INFLUENCER_FIELD_NAME = new ParseField("influencer_field_name"); + public static final ParseField INFLUENCER_FIELD_VALUES = new ParseField("influencer_field_values"); + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(INFLUENCER.getPreferredName(), + ignoreUnknownFields, a -> new Influence((String) a[0], (List) a[1])); + + parser.declareString(ConstructingObjectParser.constructorArg(), INFLUENCER_FIELD_NAME); + parser.declareStringArray(ConstructingObjectParser.constructorArg(), INFLUENCER_FIELD_VALUES); + + return parser; + } + private String field; + private List fieldValues; + + public Influence(String field, List fieldValues) { + this.field = field; + this.fieldValues = fieldValues; + } + + public Influence(StreamInput in) throws IOException { + this.field = in.readString(); + this.fieldValues = Arrays.asList(in.readStringArray()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(field); + out.writeStringArray(fieldValues.toArray(new String[fieldValues.size()])); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INFLUENCER_FIELD_NAME.getPreferredName(), field); + builder.field(INFLUENCER_FIELD_VALUES.getPreferredName(), fieldValues); + builder.endObject(); + return builder; + } + + public String getInfluencerFieldName() { + return field; + } + + public List getInfluencerFieldValues() { + return fieldValues; + } + + @Override + public int hashCode() { + return Objects.hash(field, fieldValues); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + Influence other = (Influence) obj; + return Objects.equals(field, other.field) && Objects.equals(fieldValues, other.fieldValues); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java new file mode 100644 index 0000000000000..97ed643c44dd5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influencer.java @@ -0,0 +1,229 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +public class Influencer implements ToXContentObject, Writeable { + /** + * Result type + */ + public static final String RESULT_TYPE_VALUE = "influencer"; + public static final ParseField RESULT_TYPE_FIELD = new ParseField(RESULT_TYPE_VALUE); + + /* + * Field names + */ + public static final ParseField PROBABILITY = new ParseField("probability"); + public static final ParseField BUCKET_SPAN = new ParseField("bucket_span"); + public static final ParseField INFLUENCER_FIELD_NAME = new ParseField("influencer_field_name"); + public static final ParseField INFLUENCER_FIELD_VALUE = new ParseField("influencer_field_value"); + public static final ParseField INITIAL_INFLUENCER_SCORE = new ParseField("initial_influencer_score"); + public static final ParseField INFLUENCER_SCORE = new ParseField("influencer_score"); + + // Used for QueryPage + public static final ParseField RESULTS_FIELD = new ParseField("influencers"); + + // Influencers contain data fields, thus we always parse them leniently + public static final ConstructingObjectParser LENIENT_PARSER = new ConstructingObjectParser<>( + RESULT_TYPE_FIELD.getPreferredName(), true, + a -> new Influencer((String) a[0], (String) a[1], (String) a[2], (Date) a[3], (long) a[4])); + + static { + LENIENT_PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + LENIENT_PARSER.declareString(ConstructingObjectParser.constructorArg(), INFLUENCER_FIELD_NAME); + LENIENT_PARSER.declareString(ConstructingObjectParser.constructorArg(), INFLUENCER_FIELD_VALUE); + LENIENT_PARSER.declareField(ConstructingObjectParser.constructorArg(), p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + + Result.TIMESTAMP.getPreferredName() + "]"); + }, Result.TIMESTAMP, ValueType.VALUE); + LENIENT_PARSER.declareLong(ConstructingObjectParser.constructorArg(), BUCKET_SPAN); + LENIENT_PARSER.declareString((influencer, s) -> {}, Result.RESULT_TYPE); + LENIENT_PARSER.declareDouble(Influencer::setProbability, PROBABILITY); + LENIENT_PARSER.declareDouble(Influencer::setInfluencerScore, INFLUENCER_SCORE); + LENIENT_PARSER.declareDouble(Influencer::setInitialInfluencerScore, INITIAL_INFLUENCER_SCORE); + LENIENT_PARSER.declareBoolean(Influencer::setInterim, Result.IS_INTERIM); + } + + private final String jobId; + private final Date timestamp; + private final long bucketSpan; + private String influenceField; + private String influenceValue; + private double probability; + private double initialInfluencerScore; + private double influencerScore; + private boolean isInterim; + + public Influencer(String jobId, String fieldName, String fieldValue, Date timestamp, long bucketSpan) { + this.jobId = jobId; + influenceField = fieldName; + influenceValue = fieldValue; + this.timestamp = ExceptionsHelper.requireNonNull(timestamp, Result.TIMESTAMP.getPreferredName()); + this.bucketSpan = bucketSpan; + } + + public Influencer(StreamInput in) throws IOException { + jobId = in.readString(); + timestamp = new Date(in.readLong()); + influenceField = in.readString(); + influenceValue = in.readString(); + probability = in.readDouble(); + initialInfluencerScore = in.readDouble(); + influencerScore = in.readDouble(); + isInterim = in.readBoolean(); + bucketSpan = in.readLong(); + // bwc for removed sequenceNum field + if (in.getVersion().before(Version.V_5_5_0)) { + in.readInt(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + out.writeLong(timestamp.getTime()); + out.writeString(influenceField); + out.writeString(influenceValue); + out.writeDouble(probability); + out.writeDouble(initialInfluencerScore); + out.writeDouble(influencerScore); + out.writeBoolean(isInterim); + out.writeLong(bucketSpan); + // bwc for removed sequenceNum field + if (out.getVersion().before(Version.V_5_5_0)) { + out.writeInt(0); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + innerToXContent(builder, params); + builder.endObject(); + return builder; + } + + XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); + builder.field(INFLUENCER_FIELD_NAME.getPreferredName(), influenceField); + builder.field(INFLUENCER_FIELD_VALUE.getPreferredName(), influenceValue); + if (ReservedFieldNames.isValidFieldName(influenceField)) { + builder.field(influenceField, influenceValue); + } + builder.field(INFLUENCER_SCORE.getPreferredName(), influencerScore); + builder.field(INITIAL_INFLUENCER_SCORE.getPreferredName(), initialInfluencerScore); + builder.field(PROBABILITY.getPreferredName(), probability); + builder.field(BUCKET_SPAN.getPreferredName(), bucketSpan); + builder.field(Result.IS_INTERIM.getPreferredName(), isInterim); + builder.timeField(Result.TIMESTAMP.getPreferredName(), Result.TIMESTAMP.getPreferredName() + "_string", timestamp.getTime()); + return builder; + } + + public String getJobId() { + return jobId; + } + + public String getId() { + return jobId + "_influencer_" + timestamp.getTime() + "_" + bucketSpan + "_" + + influenceField + "_" + influenceValue.hashCode() + "_" + influenceValue.length(); + } + + public double getProbability() { + return probability; + } + + public void setProbability(double probability) { + this.probability = probability; + } + + public Date getTimestamp() { + return timestamp; + } + + public String getInfluencerFieldName() { + return influenceField; + } + + public String getInfluencerFieldValue() { + return influenceValue; + } + + public double getInitialInfluencerScore() { + return initialInfluencerScore; + } + + public void setInitialInfluencerScore(double score) { + initialInfluencerScore = score; + } + + public double getInfluencerScore() { + return influencerScore; + } + + public void setInfluencerScore(double score) { + influencerScore = score; + } + + public boolean isInterim() { + return isInterim; + } + + public void setInterim(boolean value) { + isInterim = value; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, timestamp, influenceField, influenceValue, initialInfluencerScore, + influencerScore, probability, isInterim, bucketSpan); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + Influencer other = (Influencer) obj; + return Objects.equals(jobId, other.jobId) && Objects.equals(timestamp, other.timestamp) + && Objects.equals(influenceField, other.influenceField) + && Objects.equals(influenceValue, other.influenceValue) + && Double.compare(initialInfluencerScore, other.initialInfluencerScore) == 0 + && Double.compare(influencerScore, other.influencerScore) == 0 && Double.compare(probability, other.probability) == 0 + && (isInterim == other.isInterim) && (bucketSpan == other.bucketSpan); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java new file mode 100644 index 0000000000000..c331d8b043797 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ModelPlot.java @@ -0,0 +1,388 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +/** + * Model Plot POJO. + */ +public class ModelPlot implements ToXContentObject, Writeable { + /** + * Result type + */ + public static final String RESULT_TYPE_VALUE = "model_plot"; + public static final ParseField RESULTS_FIELD = new ParseField(RESULT_TYPE_VALUE); + + public static final ParseField PARTITION_FIELD_NAME = new ParseField("partition_field_name"); + public static final ParseField PARTITION_FIELD_VALUE = new ParseField("partition_field_value"); + public static final ParseField OVER_FIELD_NAME = new ParseField("over_field_name"); + public static final ParseField OVER_FIELD_VALUE = new ParseField("over_field_value"); + public static final ParseField BY_FIELD_NAME = new ParseField("by_field_name"); + public static final ParseField BY_FIELD_VALUE = new ParseField("by_field_value"); + public static final ParseField MODEL_FEATURE = new ParseField("model_feature"); + public static final ParseField MODEL_LOWER = new ParseField("model_lower"); + public static final ParseField MODEL_UPPER = new ParseField("model_upper"); + public static final ParseField MODEL_MEDIAN = new ParseField("model_median"); + public static final ParseField ACTUAL = new ParseField("actual"); + public static final ParseField BUCKET_SPAN = new ParseField("bucket_span"); + public static final ParseField DETECTOR_INDEX = new ParseField("detector_index"); + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(RESULT_TYPE_VALUE, ignoreUnknownFields, + a -> new ModelPlot((String) a[0], (Date) a[1], (long) a[2], (int) a[3])); + + parser.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + parser.declareField(ConstructingObjectParser.constructorArg(), p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + + Result.TIMESTAMP.getPreferredName() + "]"); + }, Result.TIMESTAMP, ValueType.VALUE); + parser.declareLong(ConstructingObjectParser.constructorArg(), BUCKET_SPAN); + parser.declareInt(ConstructingObjectParser.constructorArg(), DETECTOR_INDEX); + parser.declareString((modelPlot, s) -> {}, Result.RESULT_TYPE); + parser.declareString(ModelPlot::setPartitionFieldName, PARTITION_FIELD_NAME); + parser.declareString(ModelPlot::setPartitionFieldValue, PARTITION_FIELD_VALUE); + parser.declareString(ModelPlot::setOverFieldName, OVER_FIELD_NAME); + parser.declareString(ModelPlot::setOverFieldValue, OVER_FIELD_VALUE); + parser.declareString(ModelPlot::setByFieldName, BY_FIELD_NAME); + parser.declareString(ModelPlot::setByFieldValue, BY_FIELD_VALUE); + parser.declareString(ModelPlot::setModelFeature, MODEL_FEATURE); + parser.declareDouble(ModelPlot::setModelLower, MODEL_LOWER); + parser.declareDouble(ModelPlot::setModelUpper, MODEL_UPPER); + parser.declareDouble(ModelPlot::setModelMedian, MODEL_MEDIAN); + parser.declareDouble(ModelPlot::setActual, ACTUAL); + + return parser; + } + + private final String jobId; + private final Date timestamp; + private final long bucketSpan; + private int detectorIndex; + private String partitionFieldName; + private String partitionFieldValue; + private String overFieldName; + private String overFieldValue; + private String byFieldName; + private String byFieldValue; + private String modelFeature; + private double modelLower; + private double modelUpper; + private double modelMedian; + /** + * This can be null because buckets where no values were observed will still have a model, but no actual + */ + private Double actual; + + public ModelPlot(String jobId, Date timestamp, long bucketSpan, int detectorIndex) { + this.jobId = jobId; + this.timestamp = timestamp; + this.bucketSpan = bucketSpan; + this.detectorIndex = detectorIndex; + } + + public ModelPlot(StreamInput in) throws IOException { + jobId = in.readString(); + // timestamp isn't optional in v5.5 + if (in.getVersion().before(Version.V_5_5_0)) { + if (in.readBoolean()) { + timestamp = new Date(in.readLong()); + } else { + timestamp = new Date(); + } + } else { + timestamp = new Date(in.readLong()); + } + // bwc for removed id field + if (in.getVersion().before(Version.V_5_5_0)) { + in.readOptionalString(); + } + partitionFieldName = in.readOptionalString(); + partitionFieldValue = in.readOptionalString(); + overFieldName = in.readOptionalString(); + overFieldValue = in.readOptionalString(); + byFieldName = in.readOptionalString(); + byFieldValue = in.readOptionalString(); + modelFeature = in.readOptionalString(); + modelLower = in.readDouble(); + modelUpper = in.readDouble(); + modelMedian = in.readDouble(); + if (in.getVersion().before(Version.V_6_0_0_rc1)) { + actual = in.readDouble(); + } else { + actual = in.readOptionalDouble(); + } + if (in.getVersion().onOrAfter(Version.V_5_5_0)) { + bucketSpan = in.readLong(); + } else { + bucketSpan = 0; + } + if (in.getVersion().onOrAfter(Version.V_6_1_0)) { + detectorIndex = in.readInt(); + } else { + // default to -1 as marker for no detector index + detectorIndex = -1; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + // timestamp isn't optional in v5.5 + if (out.getVersion().before(Version.V_5_5_0)) { + boolean hasTimestamp = timestamp != null; + out.writeBoolean(hasTimestamp); + if (hasTimestamp) { + out.writeLong(timestamp.getTime()); + } + } else { + out.writeLong(timestamp.getTime()); + } + // bwc for removed id field + if (out.getVersion().before(Version.V_5_5_0)) { + out.writeOptionalString(null); + } + out.writeOptionalString(partitionFieldName); + out.writeOptionalString(partitionFieldValue); + out.writeOptionalString(overFieldName); + out.writeOptionalString(overFieldValue); + out.writeOptionalString(byFieldName); + out.writeOptionalString(byFieldValue); + out.writeOptionalString(modelFeature); + out.writeDouble(modelLower); + out.writeDouble(modelUpper); + out.writeDouble(modelMedian); + if (out.getVersion().before(Version.V_6_0_0_rc1)) { + if (actual == null) { + // older versions cannot accommodate null, so we have no choice but to propagate the bug of + // https://github.com/elastic/x-pack-elasticsearch/issues/2528 + out.writeDouble(0.0); + } else { + out.writeDouble(actual); + } + } else { + out.writeOptionalDouble(actual); + } + if (out.getVersion().onOrAfter(Version.V_5_5_0)) { + out.writeLong(bucketSpan); + } + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeInt(detectorIndex); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); + builder.field(BUCKET_SPAN.getPreferredName(), bucketSpan); + builder.field(DETECTOR_INDEX.getPreferredName(), detectorIndex); + + if (timestamp != null) { + builder.timeField(Result.TIMESTAMP.getPreferredName(), + Result.TIMESTAMP.getPreferredName() + "_string", timestamp.getTime()); + } + if (partitionFieldName != null) { + builder.field(PARTITION_FIELD_NAME.getPreferredName(), partitionFieldName); + } + if (partitionFieldValue != null) { + builder.field(PARTITION_FIELD_VALUE.getPreferredName(), partitionFieldValue); + } + if (overFieldName != null) { + builder.field(OVER_FIELD_NAME.getPreferredName(), overFieldName); + } + if (overFieldValue != null) { + builder.field(OVER_FIELD_VALUE.getPreferredName(), overFieldValue); + } + if (byFieldName != null) { + builder.field(BY_FIELD_NAME.getPreferredName(), byFieldName); + } + if (byFieldValue != null) { + builder.field(BY_FIELD_VALUE.getPreferredName(), byFieldValue); + } + if (modelFeature != null) { + builder.field(MODEL_FEATURE.getPreferredName(), modelFeature); + } + builder.field(MODEL_LOWER.getPreferredName(), modelLower); + builder.field(MODEL_UPPER.getPreferredName(), modelUpper); + builder.field(MODEL_MEDIAN.getPreferredName(), modelMedian); + if (actual != null) { + builder.field(ACTUAL.getPreferredName(), actual); + } + builder.endObject(); + return builder; + } + + public String getJobId() { + return jobId; + } + + public String getId() { + int valuesHash = Objects.hash(byFieldValue, overFieldValue, partitionFieldValue); + int length = (byFieldValue == null ? 0 : byFieldValue.length()) + + (overFieldValue == null ? 0 : overFieldValue.length()) + + (partitionFieldValue == null ? 0 : partitionFieldValue.length()); + return jobId + "_model_plot_" + timestamp.getTime() + "_" + bucketSpan + + "_" + detectorIndex + "_" + valuesHash + "_" + length; + } + + public Date getTimestamp() { + return timestamp; + } + + public long getBucketSpan() { + return bucketSpan; + } + + public int getDetectorIndex() { + return detectorIndex; + } + + public String getPartitionFieldName() { + return partitionFieldName; + } + + public void setPartitionFieldName(String partitionFieldName) { + this.partitionFieldName = partitionFieldName; + } + + public String getPartitionFieldValue() { + return partitionFieldValue; + } + + public void setPartitionFieldValue(String partitionFieldValue) { + this.partitionFieldValue = partitionFieldValue; + } + + public String getOverFieldName() { + return overFieldName; + } + + public void setOverFieldName(String overFieldName) { + this.overFieldName = overFieldName; + } + + public String getOverFieldValue() { + return overFieldValue; + } + + public void setOverFieldValue(String overFieldValue) { + this.overFieldValue = overFieldValue; + } + + public String getByFieldName() { + return byFieldName; + } + + public void setByFieldName(String byFieldName) { + this.byFieldName = byFieldName; + } + + public String getByFieldValue() { + return byFieldValue; + } + + public void setByFieldValue(String byFieldValue) { + this.byFieldValue = byFieldValue; + } + + public String getModelFeature() { + return modelFeature; + } + + public void setModelFeature(String modelFeature) { + this.modelFeature = modelFeature; + } + + public double getModelLower() { + return modelLower; + } + + public void setModelLower(double modelLower) { + this.modelLower = modelLower; + } + + public double getModelUpper() { + return modelUpper; + } + + public void setModelUpper(double modelUpper) { + this.modelUpper = modelUpper; + } + + public double getModelMedian() { + return modelMedian; + } + + public void setModelMedian(double modelMedian) { + this.modelMedian = modelMedian; + } + + public Double getActual() { + return actual; + } + + public void setActual(Double actual) { + this.actual = actual; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof ModelPlot == false) { + return false; + } + ModelPlot that = (ModelPlot) other; + return Objects.equals(this.jobId, that.jobId) && + Objects.equals(this.timestamp, that.timestamp) && + Objects.equals(this.partitionFieldValue, that.partitionFieldValue) && + Objects.equals(this.partitionFieldName, that.partitionFieldName) && + Objects.equals(this.overFieldValue, that.overFieldValue) && + Objects.equals(this.overFieldName, that.overFieldName) && + Objects.equals(this.byFieldValue, that.byFieldValue) && + Objects.equals(this.byFieldName, that.byFieldName) && + Objects.equals(this.modelFeature, that.modelFeature) && + this.modelLower == that.modelLower && + this.modelUpper == that.modelUpper && + this.modelMedian == that.modelMedian && + Objects.equals(this.actual, that.actual) && + this.bucketSpan == that.bucketSpan && + this.detectorIndex == that.detectorIndex; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, timestamp, partitionFieldName, partitionFieldValue, + overFieldName, overFieldValue, byFieldName, byFieldValue, + modelFeature, modelLower, modelUpper, modelMedian, actual, bucketSpan, detectorIndex); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/OverallBucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/OverallBucket.java new file mode 100644 index 0000000000000..075001a76b118 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/OverallBucket.java @@ -0,0 +1,199 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Date; +import java.util.List; +import java.util.Objects; + +/** + * Overall Bucket Result POJO + */ +public class OverallBucket implements ToXContentObject, Writeable { + + public static final ParseField OVERALL_SCORE = new ParseField("overall_score"); + public static final ParseField BUCKET_SPAN = new ParseField("bucket_span"); + public static final ParseField JOBS = new ParseField("jobs"); + + // Used for QueryPage + public static final ParseField RESULTS_FIELD = new ParseField("overall_buckets"); + + /** + * Result type + */ + public static final String RESULT_TYPE_VALUE = "overall_bucket"; + + private final Date timestamp; + private final long bucketSpan; + private final double overallScore; + private final List jobs; + private final boolean isInterim; + + public OverallBucket(Date timestamp, long bucketSpan, double overallScore, List jobs, boolean isInterim) { + this.timestamp = ExceptionsHelper.requireNonNull(timestamp, Result.TIMESTAMP.getPreferredName()); + this.bucketSpan = bucketSpan; + this.overallScore = overallScore; + this.jobs = jobs; + this.isInterim = isInterim; + } + + public OverallBucket(StreamInput in) throws IOException { + timestamp = new Date(in.readLong()); + bucketSpan = in.readLong(); + overallScore = in.readDouble(); + jobs = in.readList(JobInfo::new); + isInterim = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(timestamp.getTime()); + out.writeLong(bucketSpan); + out.writeDouble(overallScore); + out.writeList(jobs); + out.writeBoolean(isInterim); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.timeField(Result.TIMESTAMP.getPreferredName(), Result.TIMESTAMP.getPreferredName() + "_string", timestamp.getTime()); + builder.field(BUCKET_SPAN.getPreferredName(), bucketSpan); + builder.field(OVERALL_SCORE.getPreferredName(), overallScore); + builder.field(JOBS.getPreferredName(), jobs); + builder.field(Result.IS_INTERIM.getPreferredName(), isInterim); + builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE); + builder.endObject(); + return builder; + } + + public Date getTimestamp() { + return timestamp; + } + + /** + * Bucketspan expressed in seconds + */ + public long getBucketSpan() { + return bucketSpan; + } + + public double getOverallScore() { + return overallScore; + } + + public List getJobs() { + return jobs; + } + + public boolean isInterim() { + return isInterim; + } + + @Override + public int hashCode() { + return Objects.hash(timestamp, bucketSpan, overallScore, jobs, isInterim); + } + + /** + * Compare all the fields and embedded anomaly records (if any) + */ + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof OverallBucket == false) { + return false; + } + + OverallBucket that = (OverallBucket) other; + + return Objects.equals(this.timestamp, that.timestamp) + && this.bucketSpan == that.bucketSpan + && this.overallScore == that.overallScore + && Objects.equals(this.jobs, that.jobs) + && this.isInterim == that.isInterim; + } + + public static class JobInfo implements ToXContentObject, Writeable, Comparable { + + private static final ParseField MAX_ANOMALY_SCORE = new ParseField("max_anomaly_score"); + + private final String jobId; + private final double maxAnomalyScore; + + public JobInfo(String jobId, double maxAnomalyScore) { + this.jobId = Objects.requireNonNull(jobId); + this.maxAnomalyScore = maxAnomalyScore; + } + + public JobInfo(StreamInput in) throws IOException { + jobId = in.readString(); + maxAnomalyScore = in.readDouble(); + } + + public String getJobId() { + return jobId; + } + + public double getMaxAnomalyScore() { + return maxAnomalyScore; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobId); + out.writeDouble(maxAnomalyScore); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(MAX_ANOMALY_SCORE.getPreferredName(), maxAnomalyScore); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, maxAnomalyScore); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof JobInfo == false) { + return false; + } + JobInfo that = (JobInfo) other; + return Objects.equals(this.jobId, that.jobId) && this.maxAnomalyScore == that.maxAnomalyScore; + } + + @Override + public int compareTo(JobInfo other) { + int result = this.jobId.compareTo(other.jobId); + if (result == 0) { + result = Double.compare(this.maxAnomalyScore, other.maxAnomalyScore); + } + return result; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/PartitionScore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/PartitionScore.java new file mode 100644 index 0000000000000..3d0acc8fde667 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/PartitionScore.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class PartitionScore implements ToXContentObject, Writeable { + public static final ParseField PARTITION_SCORE = new ParseField("partition_score"); + + private final String partitionFieldValue; + private final String partitionFieldName; + private final double initialRecordScore; + private double recordScore; + private double probability; + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(PARTITION_SCORE.getPreferredName(), + ignoreUnknownFields, a -> new PartitionScore((String) a[0], (String) a[1], (Double) a[2], (Double) a[3], (Double) a[4])); + + parser.declareString(ConstructingObjectParser.constructorArg(), AnomalyRecord.PARTITION_FIELD_NAME); + parser.declareString(ConstructingObjectParser.constructorArg(), AnomalyRecord.PARTITION_FIELD_VALUE); + parser.declareDouble(ConstructingObjectParser.constructorArg(), AnomalyRecord.INITIAL_RECORD_SCORE); + parser.declareDouble(ConstructingObjectParser.constructorArg(), AnomalyRecord.RECORD_SCORE); + parser.declareDouble(ConstructingObjectParser.constructorArg(), AnomalyRecord.PROBABILITY); + + return parser; + } + + public PartitionScore(String fieldName, String fieldValue, double initialRecordScore, double recordScore, double probability) { + partitionFieldName = fieldName; + partitionFieldValue = fieldValue; + this.initialRecordScore = initialRecordScore; + this.recordScore = recordScore; + this.probability = probability; + } + + public PartitionScore(StreamInput in) throws IOException { + partitionFieldName = in.readString(); + partitionFieldValue = in.readString(); + initialRecordScore = in.readDouble(); + recordScore = in.readDouble(); + probability = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(partitionFieldName); + out.writeString(partitionFieldValue); + out.writeDouble(initialRecordScore); + out.writeDouble(recordScore); + out.writeDouble(probability); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(AnomalyRecord.PARTITION_FIELD_NAME.getPreferredName(), partitionFieldName); + builder.field(AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName(), partitionFieldValue); + builder.field(AnomalyRecord.INITIAL_RECORD_SCORE.getPreferredName(), initialRecordScore); + builder.field(AnomalyRecord.RECORD_SCORE.getPreferredName(), recordScore); + builder.field(AnomalyRecord.PROBABILITY.getPreferredName(), probability); + builder.endObject(); + return builder; + } + + public double getInitialRecordScore() { + return initialRecordScore; + } + + public double getRecordScore() { + return recordScore; + } + + public void setRecordScore(double recordScore) { + this.recordScore = recordScore; + } + + public String getPartitionFieldName() { + return partitionFieldName; + } + + public String getPartitionFieldValue() { + return partitionFieldValue; + } + + public double getProbability() { + return probability; + } + + public void setProbability(double probability) { + this.probability = probability; + } + + @Override + public int hashCode() { + return Objects.hash(partitionFieldName, partitionFieldValue, probability, initialRecordScore, recordScore); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof PartitionScore == false) { + return false; + } + + PartitionScore that = (PartitionScore) other; + + // id is excluded from the test as it is generated by the datastore + return Objects.equals(this.partitionFieldValue, that.partitionFieldValue) + && Objects.equals(this.partitionFieldName, that.partitionFieldName) && (this.probability == that.probability) + && (this.initialRecordScore == that.initialRecordScore) && (this.recordScore == that.recordScore); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java new file mode 100644 index 0000000000000..fb9a697ac4644 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java @@ -0,0 +1,189 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotField; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.regex.Pattern; + + +/** + * Defines the field names that we use for our results. + * Fields from the raw data with these names are not added to any result. Even + * different types of results will not have raw data fields with reserved names + * added to them, as it could create confusion if in some results a given field + * contains raw data and in others it contains some aspect of our output. + */ +public final class ReservedFieldNames { + private static final Pattern DOT_PATTERN = Pattern.compile("\\."); + + /** + * This array should be updated to contain all the field names that appear + * in any documents we store in our results index. (The reason it's any + * documents we store and not just results documents is that Elasticsearch + * 2.x requires mappings for given fields be consistent across all types + * in a given index.) + */ + private static final String[] RESERVED_FIELD_NAME_ARRAY = { + ElasticsearchMappings.ALL_FIELD_VALUES, + + Job.ID.getPreferredName(), + + AnomalyCause.PROBABILITY.getPreferredName(), + AnomalyCause.OVER_FIELD_NAME.getPreferredName(), + AnomalyCause.OVER_FIELD_VALUE.getPreferredName(), + AnomalyCause.BY_FIELD_NAME.getPreferredName(), + AnomalyCause.BY_FIELD_VALUE.getPreferredName(), + AnomalyCause.CORRELATED_BY_FIELD_VALUE.getPreferredName(), + AnomalyCause.PARTITION_FIELD_NAME.getPreferredName(), + AnomalyCause.PARTITION_FIELD_VALUE.getPreferredName(), + AnomalyCause.FUNCTION.getPreferredName(), + AnomalyCause.FUNCTION_DESCRIPTION.getPreferredName(), + AnomalyCause.TYPICAL.getPreferredName(), + AnomalyCause.ACTUAL.getPreferredName(), + AnomalyCause.INFLUENCERS.getPreferredName(), + AnomalyCause.FIELD_NAME.getPreferredName(), + + AnomalyRecord.PROBABILITY.getPreferredName(), + AnomalyRecord.BY_FIELD_NAME.getPreferredName(), + AnomalyRecord.BY_FIELD_VALUE.getPreferredName(), + AnomalyRecord.CORRELATED_BY_FIELD_VALUE.getPreferredName(), + AnomalyRecord.PARTITION_FIELD_NAME.getPreferredName(), + AnomalyRecord.PARTITION_FIELD_VALUE.getPreferredName(), + AnomalyRecord.FUNCTION.getPreferredName(), + AnomalyRecord.FUNCTION_DESCRIPTION.getPreferredName(), + AnomalyRecord.TYPICAL.getPreferredName(), + AnomalyRecord.ACTUAL.getPreferredName(), + AnomalyRecord.INFLUENCERS.getPreferredName(), + AnomalyRecord.FIELD_NAME.getPreferredName(), + AnomalyRecord.OVER_FIELD_NAME.getPreferredName(), + AnomalyRecord.OVER_FIELD_VALUE.getPreferredName(), + AnomalyRecord.CAUSES.getPreferredName(), + AnomalyRecord.RECORD_SCORE.getPreferredName(), + AnomalyRecord.INITIAL_RECORD_SCORE.getPreferredName(), + AnomalyRecord.BUCKET_SPAN.getPreferredName(), + + Bucket.ANOMALY_SCORE.getPreferredName(), + Bucket.BUCKET_INFLUENCERS.getPreferredName(), + Bucket.BUCKET_SPAN.getPreferredName(), + Bucket.EVENT_COUNT.getPreferredName(), + Bucket.INITIAL_ANOMALY_SCORE.getPreferredName(), + Bucket.PROCESSING_TIME_MS.getPreferredName(), + Bucket.PARTITION_SCORES.getPreferredName(), + Bucket.SCHEDULED_EVENTS.getPreferredName(), + + BucketInfluencer.INITIAL_ANOMALY_SCORE.getPreferredName(), BucketInfluencer.ANOMALY_SCORE.getPreferredName(), + BucketInfluencer.RAW_ANOMALY_SCORE.getPreferredName(), BucketInfluencer.PROBABILITY.getPreferredName(), + + CategoryDefinition.CATEGORY_ID.getPreferredName(), + CategoryDefinition.TERMS.getPreferredName(), + CategoryDefinition.REGEX.getPreferredName(), + CategoryDefinition.MAX_MATCHING_LENGTH.getPreferredName(), + CategoryDefinition.EXAMPLES.getPreferredName(), + + DataCounts.PROCESSED_RECORD_COUNT.getPreferredName(), + DataCounts.PROCESSED_FIELD_COUNT.getPreferredName(), + DataCounts.INPUT_BYTES.getPreferredName(), + DataCounts.INPUT_RECORD_COUNT.getPreferredName(), + DataCounts.INPUT_FIELD_COUNT.getPreferredName(), + DataCounts.INVALID_DATE_COUNT.getPreferredName(), + DataCounts.MISSING_FIELD_COUNT.getPreferredName(), + DataCounts.OUT_OF_ORDER_TIME_COUNT.getPreferredName(), + DataCounts.EMPTY_BUCKET_COUNT.getPreferredName(), + DataCounts.SPARSE_BUCKET_COUNT.getPreferredName(), + DataCounts.BUCKET_COUNT.getPreferredName(), + DataCounts.LATEST_RECORD_TIME.getPreferredName(), + DataCounts.EARLIEST_RECORD_TIME.getPreferredName(), + DataCounts.LAST_DATA_TIME.getPreferredName(), + DataCounts.LATEST_EMPTY_BUCKET_TIME.getPreferredName(), + DataCounts.LATEST_SPARSE_BUCKET_TIME.getPreferredName(), + + Detector.DETECTOR_INDEX.getPreferredName(), + + Influence.INFLUENCER_FIELD_NAME.getPreferredName(), + Influence.INFLUENCER_FIELD_VALUES.getPreferredName(), + + Influencer.PROBABILITY.getPreferredName(), + Influencer.INFLUENCER_FIELD_NAME.getPreferredName(), + Influencer.INFLUENCER_FIELD_VALUE.getPreferredName(), + Influencer.INITIAL_INFLUENCER_SCORE.getPreferredName(), + Influencer.INFLUENCER_SCORE.getPreferredName(), + Influencer.BUCKET_SPAN.getPreferredName(), + + ModelPlot.PARTITION_FIELD_NAME.getPreferredName(), ModelPlot.PARTITION_FIELD_VALUE.getPreferredName(), + ModelPlot.OVER_FIELD_NAME.getPreferredName(), ModelPlot.OVER_FIELD_VALUE.getPreferredName(), + ModelPlot.BY_FIELD_NAME.getPreferredName(), ModelPlot.BY_FIELD_VALUE.getPreferredName(), + ModelPlot.MODEL_FEATURE.getPreferredName(), ModelPlot.MODEL_LOWER.getPreferredName(), + ModelPlot.MODEL_UPPER.getPreferredName(), ModelPlot.MODEL_MEDIAN.getPreferredName(), + ModelPlot.ACTUAL.getPreferredName(), + + Forecast.FORECAST_LOWER.getPreferredName(), Forecast.FORECAST_UPPER.getPreferredName(), + Forecast.FORECAST_PREDICTION.getPreferredName(), + Forecast.FORECAST_ID.getPreferredName(), + + //re-use: TIMESTAMP + ForecastRequestStats.START_TIME.getPreferredName(), + ForecastRequestStats.END_TIME.getPreferredName(), + ForecastRequestStats.CREATE_TIME.getPreferredName(), + ForecastRequestStats.EXPIRY_TIME.getPreferredName(), + ForecastRequestStats.MESSAGES.getPreferredName(), + ForecastRequestStats.PROGRESS.getPreferredName(), + ForecastRequestStats.STATUS.getPreferredName(), + ForecastRequestStats.MEMORY_USAGE.getPreferredName(), + + ModelSizeStats.MODEL_BYTES_FIELD.getPreferredName(), + ModelSizeStats.TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName(), + ModelSizeStats.TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName(), + ModelSizeStats.TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName(), + ModelSizeStats.BUCKET_ALLOCATION_FAILURES_COUNT_FIELD.getPreferredName(), + ModelSizeStats.MEMORY_STATUS_FIELD.getPreferredName(), + ModelSizeStats.LOG_TIME_FIELD.getPreferredName(), + + ModelSnapshot.DESCRIPTION.getPreferredName(), + ModelSnapshotField.SNAPSHOT_ID.getPreferredName(), + ModelSnapshot.SNAPSHOT_DOC_COUNT.getPreferredName(), + ModelSnapshot.LATEST_RECORD_TIME.getPreferredName(), + ModelSnapshot.LATEST_RESULT_TIME.getPreferredName(), + ModelSnapshot.RETAIN.getPreferredName(), + + Result.RESULT_TYPE.getPreferredName(), + Result.TIMESTAMP.getPreferredName(), + Result.IS_INTERIM.getPreferredName() + }; + + /** + * Test if fieldName is one of the reserved names or if it contains dots then + * that the segment before the first dot is not a reserved name. A fieldName + * containing dots represents nested fields in which case we only care about + * the top level. + * + * @param fieldName Document field name. This may contain dots '.' + * @return True if fieldName is not a reserved name or the top level segment + * is not a reserved name. + */ + public static boolean isValidFieldName(String fieldName) { + String[] segments = DOT_PATTERN.split(fieldName); + return !RESERVED_FIELD_NAMES.contains(segments[0]); + } + + /** + * A set of all reserved field names in our results. Fields from the raw + * data with these names are not added to any result. + */ + public static final Set RESERVED_FIELD_NAMES = new HashSet<>(Arrays.asList(RESERVED_FIELD_NAME_ARRAY)); + + private ReservedFieldNames() { + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Result.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Result.java new file mode 100644 index 0000000000000..658f094434710 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Result.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; + +/** + * A wrapper for concrete result objects plus meta information. + * Also contains common attributes for results. + */ +public class Result { + + /** + * Serialisation fields + */ + public static final ParseField TYPE = new ParseField("result"); + public static final ParseField RESULT_TYPE = new ParseField("result_type"); + public static final ParseField TIMESTAMP = new ParseField("timestamp"); + public static final ParseField IS_INTERIM = new ParseField("is_interim"); + + @Nullable + public final String index; + @Nullable + public final T result; + + public Result(String index, T result) { + this.index = index; + this.result = result; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java new file mode 100644 index 0000000000000..850d89d0a7282 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessage.java @@ -0,0 +1,199 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.notifications; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +public class AuditMessage implements ToXContentObject, Writeable { + public static final ParseField TYPE = new ParseField("audit_message"); + + public static final ParseField MESSAGE = new ParseField("message"); + public static final ParseField LEVEL = new ParseField("level"); + public static final ParseField TIMESTAMP = new ParseField("timestamp"); + public static final ParseField NODE_NAME = new ParseField("node_name"); + + public static final ObjectParser PARSER = new ObjectParser<>(TYPE.getPreferredName(), true, AuditMessage::new); + + static { + PARSER.declareString(AuditMessage::setJobId, Job.ID); + PARSER.declareString(AuditMessage::setMessage, MESSAGE); + PARSER.declareField(AuditMessage::setLevel, p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return Level.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, LEVEL, ValueType.STRING); + PARSER.declareField(AuditMessage::setTimestamp, p -> { + if (p.currentToken() == Token.VALUE_NUMBER) { + return new Date(p.longValue()); + } else if (p.currentToken() == Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(p.text())); + } + throw new IllegalArgumentException("unexpected token [" + p.currentToken() + "] for [" + TIMESTAMP.getPreferredName() + "]"); + }, TIMESTAMP, ValueType.VALUE); + PARSER.declareString(AuditMessage::setNodeName, NODE_NAME); + } + + private String jobId; + private String message; + private Level level; + private Date timestamp; + private String nodeName; + + private AuditMessage() { + + } + + AuditMessage(String jobId, String message, Level level, String nodeName) { + this.jobId = jobId; + this.message = message; + this.level = level; + timestamp = new Date(); + this.nodeName = nodeName; + } + + public AuditMessage(StreamInput in) throws IOException { + jobId = in.readOptionalString(); + message = in.readOptionalString(); + if (in.readBoolean()) { + level = Level.readFromStream(in); + } + if (in.readBoolean()) { + timestamp = new Date(in.readLong()); + } + nodeName = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(jobId); + out.writeOptionalString(message); + boolean hasLevel = level != null; + out.writeBoolean(hasLevel); + if (hasLevel) { + level.writeTo(out); + } + boolean hasTimestamp = timestamp != null; + out.writeBoolean(hasTimestamp); + if (hasTimestamp) { + out.writeLong(timestamp.getTime()); + } + out.writeOptionalString(nodeName); + } + + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public Level getLevel() { + return level; + } + + public void setLevel(Level level) { + this.level = level; + } + + public Date getTimestamp() { + return timestamp; + } + + public void setTimestamp(Date timestamp) { + this.timestamp = timestamp; + } + + public String getNodeName() { + return nodeName; + } + + public void setNodeName(String nodeName) { + this.nodeName = nodeName; + } + + public static AuditMessage newInfo(String jobId, String message, String nodeName) { + return new AuditMessage(jobId, message, Level.INFO, nodeName); + } + + public static AuditMessage newWarning(String jobId, String message, String nodeName) { + return new AuditMessage(jobId, message, Level.WARNING, nodeName); + } + + public static AuditMessage newActivity(String jobId, String message, String nodeName) { + return new AuditMessage(jobId, message, Level.ACTIVITY, nodeName); + } + + public static AuditMessage newError(String jobId, String message, String nodeName) { + return new AuditMessage(jobId, message, Level.ERROR, nodeName); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (jobId != null) { + builder.field(Job.ID.getPreferredName(), jobId); + } + if (message != null) { + builder.field(MESSAGE.getPreferredName(), message); + } + if (level != null) { + builder.field(LEVEL.getPreferredName(), level); + } + if (timestamp != null) { + builder.field(TIMESTAMP.getPreferredName(), timestamp.getTime()); + } + if (nodeName != null) { + builder.field(NODE_NAME.getPreferredName(), nodeName); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, message, level, timestamp); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + AuditMessage other = (AuditMessage) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(message, other.message) && + Objects.equals(level, other.level) && + Objects.equals(timestamp, other.timestamp); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditorField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditorField.java new file mode 100644 index 0000000000000..ac83b7c37f529 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditorField.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.notifications; + +public final class AuditorField { + public static final String NOTIFICATIONS_INDEX = ".ml-notifications"; + + private AuditorField() {} + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/Level.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/Level.java new file mode 100644 index 0000000000000..f54f9cf268077 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/Level.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.notifications; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +public enum Level implements Writeable { + INFO, ACTIVITY, WARNING, ERROR; + + /** + * Case-insensitive from string method. + * + * @param value + * String representation + * @return The condition type + */ + public static Level fromString(String value) { + return Level.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static Level readFromStream(StreamInput in) throws IOException { + return in.readEnum(Level.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java new file mode 100644 index 0000000000000..150c539b1ae3b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ExceptionsHelper.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.utils; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; + +public class ExceptionsHelper { + + private ExceptionsHelper() {} + + public static ResourceNotFoundException missingJobException(String jobId) { + return new ResourceNotFoundException(Messages.getMessage(Messages.JOB_UNKNOWN_ID, jobId)); + } + + public static ResourceAlreadyExistsException jobAlreadyExists(String jobId) { + return new ResourceAlreadyExistsException(Messages.getMessage(Messages.JOB_CONFIG_ID_ALREADY_TAKEN, jobId)); + } + + public static ResourceNotFoundException missingDatafeedException(String datafeedId) { + return new ResourceNotFoundException(Messages.getMessage(Messages.DATAFEED_NOT_FOUND, datafeedId)); + } + + public static ElasticsearchException serverError(String msg) { + return new ElasticsearchException(msg); + } + + public static ElasticsearchException serverError(String msg, Throwable cause) { + return new ElasticsearchException(msg, cause); + } + + public static ElasticsearchStatusException conflictStatusException(String msg, Object... args) { + return new ElasticsearchStatusException(msg, RestStatus.CONFLICT, args); + } + + public static ElasticsearchStatusException badRequestException(String msg, Throwable cause, Object... args) { + return new ElasticsearchStatusException(msg, RestStatus.BAD_REQUEST, cause, args); + } + + public static ElasticsearchStatusException badRequestException(String msg, Object... args) { + return new ElasticsearchStatusException(msg, RestStatus.BAD_REQUEST, args); + } + + /** + * Creates an error message that explains there are shard failures, displays info + * for the first failure (shard/reason) and kindly asks to see more info in the logs + */ + public static String shardFailuresToErrorMsg(String jobId, ShardSearchFailure[] shardFailures) { + if (shardFailures == null || shardFailures.length == 0) { + throw new IllegalStateException("Invalid call with null or empty shardFailures"); + } + SearchShardTarget shardTarget = shardFailures[0].shard(); + return "[" + jobId + "] Search request returned shard failures; first failure: shard [" + + (shardTarget == null ? "_na" : shardTarget) + "], reason [" + + shardFailures[0].reason() + "]; see logs for more info"; + } + + /** + * A more REST-friendly Object.requireNonNull() + */ + public static T requireNonNull(T obj, String paramName) { + if (obj == null) { + throw new IllegalArgumentException("[" + paramName + "] must not be null."); + } + return obj; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/Intervals.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/Intervals.java new file mode 100644 index 0000000000000..6cbb84d56fb07 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/Intervals.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.utils; + +/** + * A collection of utilities related to intervals + */ +public class Intervals { + + private Intervals() {} + + /** + * Aligns a {@code value} to a multiple of an {@code interval} by rounding down. + * @param value the value to align to a multiple of the {@code interval} + * @param interval the interval + * @return the multiple of the {@code interval} that is less or equal to the {@code value} + */ + public static long alignToFloor(long value, long interval) { + long result = (value / interval) * interval; + if (result == value || value >= 0) { + return result; + } + return result - interval; + } + + /** + * Aligns a {@code value} to a multiple of an {@code interval} by rounding up. + * @param value the value to align to a multiple of the {@code interval} + * @param interval the interval + * @return the multiple of the {@code interval} that is greater or equal to the {@code value} + */ + public static long alignToCeil(long value, long interval) { + long result = alignToFloor(value, interval); + return result == value ? result : result + interval; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndicesUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndicesUtils.java new file mode 100644 index 0000000000000..c916b6664d201 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndicesUtils.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.utils; + +import org.elasticsearch.action.support.IndicesOptions; + +/** + * Common index related operations that ML requires. + */ +public final class MlIndicesUtils { + + private MlIndicesUtils() { + } + + public static IndicesOptions addIgnoreUnavailable(IndicesOptions indicesOptions) { + return IndicesOptions.fromOptions(true, indicesOptions.allowNoIndices(), indicesOptions.expandWildcardsOpen(), + indicesOptions.expandWildcardsClosed(), indicesOptions); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlStrings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlStrings.java new file mode 100644 index 0000000000000..717420061ee30 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlStrings.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.utils; + +import org.elasticsearch.cluster.metadata.MetaData; + +import java.util.regex.Pattern; + +/** + * Another String utilities class. Class name is prefixed with Ml to avoid confusion + * with one of the myriad String utility classes out there. + */ +public final class MlStrings { + + private static final Pattern NEEDS_QUOTING = Pattern.compile("\\W"); + + /** + * Valid user id pattern. + * Matches a string that contains lower case characters, digits, hyphens, underscores or dots. + * The string may start and end only in lower case characters or digits. + * Note that '.' is allowed but not documented. + */ + private static final Pattern VALID_ID_CHAR_PATTERN = Pattern.compile("[a-z0-9](?:[a-z0-9_\\-\\.]*[a-z0-9])?"); + + public static final int ID_LENGTH_LIMIT = 64; + + private MlStrings() { + } + + /** + * Surrounds with double quotes the given {@code input} if it contains + * any non-word characters. Any double quotes contained in {@code input} + * will be escaped. + * + * @param input any non null string + * @return {@code input} when it does not contain non-word characters, or a new string + * that contains {@code input} surrounded by double quotes otherwise + */ + public static String doubleQuoteIfNotAlphaNumeric(String input) { + if (!NEEDS_QUOTING.matcher(input).find()) { + return input; + } + + StringBuilder quoted = new StringBuilder(); + quoted.append('\"'); + + for (int i = 0; i < input.length(); ++i) { + char c = input.charAt(i); + if (c == '\"' || c == '\\') { + quoted.append('\\'); + } + quoted.append(c); + } + + quoted.append('\"'); + return quoted.toString(); + } + + public static boolean isValidId(String id) { + return id != null && VALID_ID_CHAR_PATTERN.matcher(id).matches() && !MetaData.ALL.equals(id); + } + + /** + * Checks if the given {@code id} has a valid length. + * We keep IDs in a length shorter or equal than {@link #ID_LENGTH_LIMIT} + * in order to avoid unfriendly errors when storing docs with + * more than 512 bytes. + * + * @param id the id + * @return {@code true} if the id has a valid length + */ + public static boolean hasValidLengthForId(String id) { + return id.length() <= ID_LENGTH_LIMIT; + } + + /** + * Returns the path to the parent field if {@code fieldPath} is nested + * or {@code fieldPath} itself. + * + * @param fieldPath a field path + * @return the path to the parent field if {code fieldPath} is nested + * or {@code} fieldPath itself + */ + public static String getParentField(String fieldPath) { + if (fieldPath == null) { + return fieldPath; + } + int lastIndexOfDot = fieldPath.lastIndexOf('.'); + if (lastIndexOfDot < 0) { + return fieldPath; + } + return fieldPath.substring(0, lastIndexOfDot); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/NameResolver.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/NameResolver.java new file mode 100644 index 0000000000000..f737a3d9ad7d0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/NameResolver.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.utils; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.regex.Regex; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Expands an expression into the set of matching names. + * It optionally supports aliases to the name set. + */ +public abstract class NameResolver { + + private final Function notFoundExceptionSupplier; + + protected NameResolver(Function notFoundExceptionSupplier) { + this.notFoundExceptionSupplier = Objects.requireNonNull(notFoundExceptionSupplier); + } + + /** + * Expands an expression into the set of matching names. + * For example, given a set of names ["foo-1", "foo-2", "bar-1", bar-2"], + * expressions resolve follows: + *

    + *
  • "foo-1" : ["foo-1"]
  • + *
  • "bar-1" : ["bar-1"]
  • + *
  • "foo-1,foo-2" : ["foo-1", "foo-2"]
  • + *
  • "foo-*" : ["foo-1", "foo-2"]
  • + *
  • "*-1" : ["bar-1", "foo-1"]
  • + *
  • "*" : ["bar-1", "bar-2", "foo-1", "foo-2"]
  • + *
  • "_all" : ["bar-1", "bar-2", "foo-1", "foo-2"]
  • + *
+ * + * @param expression the expression to resolve + * @param allowNoMatch if {@code false}, an error is thrown when no name matches the {@code expression}. + * This only applies to wild card expressions, if {@code expression} is not a + * wildcard then setting this true will not suppress the exception + * @return the sorted set of matching names + */ + public SortedSet expand(String expression, boolean allowNoMatch) { + SortedSet result = new TreeSet<>(); + if (MetaData.ALL.equals(expression) || Regex.isMatchAllPattern(expression)) { + result.addAll(nameSet()); + } else { + String[] tokens = Strings.tokenizeToStringArray(expression, ","); + for (String token : tokens) { + if (Regex.isSimpleMatchPattern(token)) { + List expanded = keys().stream() + .filter(key -> Regex.simpleMatch(token, key)) + .map(this::lookup) + .flatMap(List::stream) + .collect(Collectors.toList()); + if (expanded.isEmpty() && allowNoMatch == false) { + throw notFoundExceptionSupplier.apply(token); + } + result.addAll(expanded); + } else { + List matchingNames = lookup(token); + // allowNoMatch only applies to wildcard expressions, + // this isn't so don't check the allowNoMatch here + if (matchingNames.isEmpty()) { + throw notFoundExceptionSupplier.apply(token); + } + result.addAll(matchingNames); + } + } + } + if (result.isEmpty() && allowNoMatch == false) { + throw notFoundExceptionSupplier.apply(expression); + } + return result; + } + + /** + * @return the set of registered keys + */ + protected abstract Set keys(); + + /** + * @return the set of all names + */ + protected abstract Set nameSet(); + + /** + * Looks up a key and returns the matching names. + * @param key the key to look up + * @return a list of the matching names or {@code null} when no matching names exist + */ + protected abstract List lookup(String key); + + /** + * Creates a {@code NameResolver} that has no aliases + * @param nameSet the set of all names + * @param notFoundExceptionSupplier a supplier of {@link ResourceNotFoundException} to be used when an expression matches no name + * @return the unaliased {@code NameResolver} + */ + public static NameResolver newUnaliased(Set nameSet, Function notFoundExceptionSupplier) { + return new NameResolver(notFoundExceptionSupplier) { + @Override + protected Set keys() { + return nameSet; + } + + @Override + protected Set nameSet() { + return nameSet; + } + + @Override + protected List lookup(String key) { + return nameSet.contains(key) ? Collections.singletonList(key) : Collections.emptyList(); + } + }; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ToXContentParams.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ToXContentParams.java new file mode 100644 index 0000000000000..d120e8cf6685e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/ToXContentParams.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.utils; + + +/** + * Parameters used by machine learning for controlling X Content serialisation. + */ +public final class ToXContentParams { + + /** + * Parameter to indicate whether we are serialising to X Content for cluster state output. + */ + public static final String FOR_CLUSTER_STATE = "for_cluster_state"; + + private ToXContentParams() { + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/DateTimeFormatterTimestampConverter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/DateTimeFormatterTimestampConverter.java new file mode 100644 index 0000000000000..556c2f37b485d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/DateTimeFormatterTimestampConverter.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.utils.time; + +import org.elasticsearch.cli.SuppressForbidden; + +import java.time.DateTimeException; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; + +/** + *

This class implements {@link TimestampConverter} using the {@link DateTimeFormatter} + * of the Java 8 time API for parsing timestamps and other classes of that API for converting + * timestamps to epoch times. + * + *

Objects of this class are immutable and thread-safe + * + */ +public class DateTimeFormatterTimestampConverter implements TimestampConverter { + private final DateTimeFormatter formatter; + private final boolean hasTimeZone; + private final ZoneId defaultZoneId; + + private DateTimeFormatterTimestampConverter(DateTimeFormatter dateTimeFormatter, boolean hasTimeZone, ZoneId defaultTimezone) { + formatter = dateTimeFormatter; + this.hasTimeZone = hasTimeZone; + defaultZoneId = defaultTimezone; + } + + /** + * Creates a formatter according to the given pattern + * @param pattern the pattern to be used by the formatter, not null. + * See {@link DateTimeFormatter} for the syntax of the accepted patterns + * @param defaultTimezone the timezone to be used for dates without timezone information. + * @return a {@code TimestampConverter} + * @throws IllegalArgumentException if the pattern is invalid or cannot produce a full timestamp + * (e.g. contains a date but not a time) + */ + public static TimestampConverter ofPattern(String pattern, ZoneId defaultTimezone) { + DateTimeFormatter formatter = new DateTimeFormatterBuilder() + .parseLenient() + .appendPattern(pattern) + .parseDefaulting(ChronoField.YEAR_OF_ERA, LocalDate.now(defaultTimezone).getYear()) + .toFormatter(); + + String now = formatter.format(ZonedDateTime.ofInstant(Instant.ofEpochSecond(0), ZoneOffset.UTC)); + try { + TemporalAccessor parsed = formatter.parse(now); + boolean hasTimeZone = parsed.isSupported(ChronoField.INSTANT_SECONDS); + if (hasTimeZone) { + Instant.from(parsed); + } + else { + LocalDateTime.from(parsed); + } + return new DateTimeFormatterTimestampConverter(formatter, hasTimeZone, defaultTimezone); + } + catch (DateTimeException e) { + throw new IllegalArgumentException("Timestamp cannot be derived from pattern: " + pattern); + } + } + + @Override + public long toEpochSeconds(String timestamp) { + return toInstant(timestamp).getEpochSecond(); + } + + @Override + public long toEpochMillis(String timestamp) { + return toInstant(timestamp).toEpochMilli(); + } + + private Instant toInstant(String timestamp) { + TemporalAccessor parsed = formatter.parse(timestamp); + if (hasTimeZone) { + return Instant.from(parsed); + } + return toInstantUnsafelyIgnoringAmbiguity(parsed); + } + + @SuppressForbidden(reason = "TODO https://github.com/elastic/x-pack-elasticsearch/issues/3810") + private Instant toInstantUnsafelyIgnoringAmbiguity(TemporalAccessor parsed) { + return LocalDateTime.from(parsed).atZone(defaultZoneId).toInstant(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java new file mode 100644 index 0000000000000..6b334972366c9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.utils.time; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.mapper.DateFieldMapper; + +import java.util.concurrent.TimeUnit; + +public final class TimeUtils { + private TimeUtils() { + // Do nothing + } + + /** + * First tries to parse the date first as a Long and convert that to an + * epoch time. If the long number has more than 10 digits it is considered a + * time in milliseconds else if 10 or less digits it is in seconds. If that + * fails it tries to parse the string using + * {@link DateFieldMapper#DEFAULT_DATE_TIME_FORMATTER} + * + * If the date string cannot be parsed -1 is returned. + * + * @return The epoch time in milliseconds or -1 if the date cannot be + * parsed. + */ + public static long dateStringToEpoch(String date) { + try { + long epoch = Long.parseLong(date); + if (date.trim().length() <= 10) { // seconds + return epoch * 1000; + } else { + return epoch; + } + } catch (NumberFormatException nfe) { + // not a number + } + + try { + return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseMillis(date); + } catch (IllegalArgumentException e) { + } + // Could not do the conversion + return -1; + } + + /** + * Checks that the given {@code timeValue} is a non-negative multiple value of the {@code baseUnit}. + * + *

    + *
  • 400ms is valid for base unit of seconds
  • + *
  • 450ms is invalid for base unit of seconds but valid for base unit of milliseconds
  • + *
+ */ + public static void checkNonNegativeMultiple(TimeValue timeValue, TimeUnit baseUnit, ParseField field) { + checkNonNegative(timeValue, field); + checkMultiple(timeValue, baseUnit, field); + } + + /** + * Checks that the given {@code timeValue} is a positive multiple value of the {@code baseUnit}. + * + *
    + *
  • 400ms is valid for base unit of seconds
  • + *
  • 450ms is invalid for base unit of seconds but valid for base unit of milliseconds
  • + *
+ */ + public static void checkPositiveMultiple(TimeValue timeValue, TimeUnit baseUnit, ParseField field) { + checkPositive(timeValue, field); + checkMultiple(timeValue, baseUnit, field); + } + + private static void checkNonNegative(TimeValue timeValue, ParseField field) { + long nanos = timeValue.getNanos(); + if (nanos < 0) { + throw new IllegalArgumentException(field.getPreferredName() + " cannot be less than 0. Value = " + timeValue.toString()); + } + } + + private static void checkPositive(TimeValue timeValue, ParseField field) { + long nanos = timeValue.getNanos(); + if (nanos <= 0) { + throw new IllegalArgumentException(field.getPreferredName() + " cannot be less or equal than 0. Value = " + + timeValue.toString()); + } + } + + /** + * Check the given {@code timeValue} is a multiple of the {@code baseUnit} + */ + public static void checkMultiple(TimeValue timeValue, TimeUnit baseUnit, ParseField field) { + long nanos = timeValue.getNanos(); + TimeValue base = new TimeValue(1, baseUnit); + long baseNanos = base.getNanos(); + if (nanos % baseNanos != 0) { + throw new IllegalArgumentException(field.getPreferredName() + " has to be a multiple of " + base.toString() + "; actual was '" + + timeValue.toString() + "'"); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimestampConverter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimestampConverter.java new file mode 100644 index 0000000000000..4d23fe891a009 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimestampConverter.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.utils.time; + +import java.time.format.DateTimeParseException; + +/** + * A converter that enables conversions of textual timestamps to epoch seconds + * or milliseconds according to a given pattern. + */ +public interface TimestampConverter { + /** + * Converts the a textual timestamp into an epoch in seconds + * + * @param timestamp the timestamp to convert, not null. The timestamp is expected to + * be formatted according to the pattern of the formatter. In addition, the pattern is + * assumed to contain both date and time information. + * @return the epoch in seconds for the given timestamp + * @throws DateTimeParseException if unable to parse the given timestamp + */ + long toEpochSeconds(String timestamp); + + /** + * Converts the a textual timestamp into an epoch in milliseconds + * + * @param timestamp the timestamp to convert, not null. The timestamp is expected to + * be formatted according to the pattern of the formatter. In addition, the pattern is + * assumed to contain both date and time information. + * @return the epoch in milliseconds for the given timestamp + * @throws DateTimeParseException if unable to parse the given timestamp + */ + long toEpochMillis(String timestamp); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoredSystem.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoredSystem.java new file mode 100644 index 0000000000000..7567c275156f8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoredSystem.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring; + +import java.util.Arrays; +import java.util.Locale; +import java.util.stream.Stream; + +public enum MonitoredSystem { + + ES("es"), + KIBANA("kibana"), + LOGSTASH("logstash"), + BEATS("beats"), + UNKNOWN("unknown"); + + private final String system; + + MonitoredSystem(String system) { + this.system = system; + } + + public String getSystem() { + return system; + } + + public static MonitoredSystem fromSystem(String system) { + switch (system.toLowerCase(Locale.ROOT)) { + case "es": + return ES; + case "kibana": + return KIBANA; + case "logstash": + return LOGSTASH; + case "beats": + return BEATS; + default: + // Return an "unknown" monitored system + // that can easily be filtered out if + // a node receives documents for a new + // system it does not know yet + return UNKNOWN; + } + } + + /** + * Get all {@code MonitoredSystem}s except {@linkplain MonitoredSystem#UNKNOWN UNKNOWN}. + * + * @return Never {@code null}. A filtered {@code Stream} that removes the {@code UNKNOWN} {@code MonitoredSystem}. + */ + public static Stream allSystems() { + return Arrays.stream(MonitoredSystem.values()).filter(s -> s != MonitoredSystem.UNKNOWN); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java new file mode 100644 index 0000000000000..a8cf5b895fb35 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +public class MonitoringFeatureSetUsage extends XPackFeatureSet.Usage { + + @Nullable + private Boolean collectionEnabled; + @Nullable + private Map exporters; + + public MonitoringFeatureSetUsage(StreamInput in) throws IOException { + super(in); + exporters = in.readMap(); + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + collectionEnabled = in.readOptionalBoolean(); + } + } + + public MonitoringFeatureSetUsage(boolean available, boolean enabled, + boolean collectionEnabled, Map exporters) { + super(XPackField.MONITORING, available, enabled); + this.exporters = exporters; + this.collectionEnabled = collectionEnabled; + } + + public Map getExporters() { + return exporters == null ? Collections.emptyMap() : Collections.unmodifiableMap(exporters); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(exporters); + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + out.writeOptionalBoolean(collectionEnabled); + } + } + + @Override + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + super.innerXContent(builder, params); + if (collectionEnabled != null) { + builder.field("collection_enabled", collectionEnabled); + } + if (exporters != null) { + builder.field("enabled_exporters", exporters); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringField.java new file mode 100644 index 0000000000000..9174a9d662410 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringField.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; + +import static org.elasticsearch.common.settings.Setting.timeSetting; + +public final class MonitoringField { + + /** + * The minimum amount of time allowed for the history duration. + */ + public static final TimeValue HISTORY_DURATION_MINIMUM = TimeValue.timeValueHours(24); + /** + * The default retention duration of the monitoring history data. + *

+ * Expected values: + *

    + *
  • Default: 7 days
  • + *
  • Minimum: 1 day
  • + *
+ * + * @see MonitoringField#HISTORY_DURATION_MINIMUM + */ + public static final Setting HISTORY_DURATION = timeSetting("xpack.monitoring.history.duration", + TimeValue.timeValueHours(7 * 24), // default value (7 days) + HISTORY_DURATION_MINIMUM, // minimum value + Setting.Property.Dynamic, Setting.Property.NodeScope); + + private MonitoringField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkAction.java new file mode 100644 index 0000000000000..24dc9be34731d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkAction.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class MonitoringBulkAction extends Action { + + public static final MonitoringBulkAction INSTANCE = new MonitoringBulkAction(); + public static final String NAME = "cluster:admin/xpack/monitoring/bulk"; + + private MonitoringBulkAction() { + super(NAME); + } + + @Override + public MonitoringBulkRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new MonitoringBulkRequestBuilder(client); + } + + @Override + public MonitoringBulkResponse newResponse() { + return new MonitoringBulkResponse(); + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkDoc.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkDoc.java new file mode 100644 index 0000000000000..9ddc2c6d9ec69 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkDoc.java @@ -0,0 +1,155 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring.action; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; +import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; +import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; + +import java.io.IOException; +import java.util.Objects; + +public class MonitoringBulkDoc implements Writeable { + + private final MonitoredSystem system; + private final String type; + private final String id; + private final long timestamp; + private final long intervalMillis; + private final BytesReference source; + private final XContentType xContentType; + + public MonitoringBulkDoc(final MonitoredSystem system, + final String type, + @Nullable final String id, + final long timestamp, + final long intervalMillis, + final BytesReference source, + final XContentType xContentType) { + + this.system = Objects.requireNonNull(system); + this.type = Objects.requireNonNull(type); + // We allow strings to be "" because Logstash 5.2 - 5.3 would submit empty _id values for time-based documents + this.id = Strings.isNullOrEmpty(id) ? null : id; + this.timestamp = timestamp; + this.intervalMillis = intervalMillis; + this.source = Objects.requireNonNull(source); + this.xContentType = Objects.requireNonNull(xContentType); + } + + /** + * Read from a stream. + */ + public static MonitoringBulkDoc readFrom(StreamInput in) throws IOException { + final MonitoredSystem system = MonitoredSystem.fromSystem(in.readOptionalString()); + + if (in.getVersion().before(Version.V_6_0_0_rc1)) { + in.readOptionalString(); // Monitoring version, removed in 6.0 rc1 + in.readOptionalString(); // Cluster UUID, removed in 6.0 rc1 + } + + final long timestamp = in.readVLong(); + + if (in.getVersion().before(Version.V_6_0_0_rc1)) { + in.readOptionalWriteable(MonitoringDoc.Node::new);// Source node, removed in 6.0 rc1 + MonitoringIndex.readFrom(in);// Monitoring index, removed in 6.0 rc1 + } + + final String type = in.readOptionalString(); + final String id = in.readOptionalString(); + final BytesReference source = in.readBytesReference(); + final XContentType xContentType = (source != BytesArray.EMPTY) ? in.readEnum(XContentType.class) : XContentType.JSON; + + long interval = 0L; + if (in.getVersion().onOrAfter(Version.V_6_0_0_rc1)) { + interval = in.readVLong(); + } + return new MonitoringBulkDoc(system, type, id, timestamp, interval, source, xContentType); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(system.getSystem()); + if (out.getVersion().before(Version.V_6_0_0_rc1)) { + out.writeOptionalString(MonitoringTemplateUtils.TEMPLATE_VERSION); + out.writeOptionalString(null); + } + out.writeVLong(timestamp); + if (out.getVersion().before(Version.V_6_0_0_rc1)) { + out.writeOptionalWriteable(null); + MonitoringIndex.IGNORED_DATA.writeTo(out); + } + out.writeOptionalString(type); + out.writeOptionalString(id); + out.writeBytesReference(source); + if (source != BytesArray.EMPTY) { + out.writeEnum(xContentType); + } + if (out.getVersion().onOrAfter(Version.V_6_0_0_rc1)) { + out.writeVLong(intervalMillis); + } + } + + public MonitoredSystem getSystem() { + return system; + } + + public String getType() { + return type; + } + + public String getId() { + return id; + } + + public long getTimestamp() { + return timestamp; + } + + public long getIntervalMillis() { + return intervalMillis; + } + + public BytesReference getSource() { + return source; + } + + public XContentType getXContentType() { + return xContentType; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MonitoringBulkDoc that = (MonitoringBulkDoc) o; + return timestamp == that.timestamp + && intervalMillis == that.intervalMillis + && system == that.system + && Objects.equals(type, that.type) + && Objects.equals(id, that.id) + && Objects.equals(source, that.source) + && xContentType == that.xContentType; + } + + @Override + public int hashCode() { + return Objects.hash(system, type, id, timestamp, intervalMillis, source, xContentType); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java new file mode 100644 index 0000000000000..adb6b04db35b2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.client.Requests; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * A monitoring bulk request holds one or more {@link MonitoringBulkDoc}s. + *

+ * Every monitoring document added to the request is associated to a {@link MonitoredSystem}. The monitored system is used + * to resolve the index name in which the document will be indexed into. + */ +public class MonitoringBulkRequest extends ActionRequest { + + private final List docs = new ArrayList<>(); + + /** + * @return the list of {@link MonitoringBulkDoc} to be indexed + */ + public Collection getDocs() { + return Collections.unmodifiableCollection(new ArrayList<>(this.docs)); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (docs.isEmpty()) { + validationException = addValidationError("no monitoring documents added", validationException); + } + for (int i = 0; i < docs.size(); i++) { + MonitoringBulkDoc doc = docs.get(i); + if (doc.getSource() == null || doc.getSource().length() == 0) { + validationException = addValidationError("source is missing for monitoring document [" + i + "]", validationException); + } + } + return validationException; + } + + /** + * Adds a monitoring document to the list of documents to be indexed. + */ + public MonitoringBulkRequest add(MonitoringBulkDoc doc) { + docs.add(doc); + return this; + } + + /** + * Parses a monitoring bulk request and builds the list of documents to be indexed. + */ + public MonitoringBulkRequest add(final MonitoredSystem system, + final String defaultType, + final BytesReference content, + final XContentType xContentType, + final long timestamp, + final long intervalMillis) throws IOException { + + // MonitoringBulkRequest accepts a body request that has the same format as the BulkRequest: + // instead of duplicating the parsing logic here we use a new BulkRequest instance to parse the content. + final BulkRequest bulkRequest = Requests.bulkRequest().add(content, null, defaultType, xContentType); + + for (DocWriteRequest request : bulkRequest.requests()) { + if (request instanceof IndexRequest) { + final IndexRequest indexRequest = (IndexRequest) request; + + // we no longer accept non-timestamped indexes from Kibana, LS, or Beats because we do not use the data + // and it was duplicated anyway; by simply dropping it, we allow BWC for older clients that still send it + if (MonitoringIndex.from(indexRequest.index()) != MonitoringIndex.TIMESTAMPED) { + continue; + } + + final BytesReference source = indexRequest.source(); + if (source.length() == 0) { + throw new IllegalArgumentException("source is missing for monitoring document [" + + indexRequest.index() + "][" + indexRequest.type() + "][" + indexRequest.id() + "]"); + } + + // builds a new monitoring document based on the index request + add(new MonitoringBulkDoc(system, indexRequest.type(), indexRequest.id(), timestamp, intervalMillis, source, xContentType)); + } else { + throw new IllegalArgumentException("monitoring bulk requests should only contain index requests"); + } + } + return this; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + docs.addAll(in.readList(MonitoringBulkDoc::readFrom)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(docs); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequestBuilder.java new file mode 100644 index 0000000000000..e028b2ffc7d68 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequestBuilder.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring.action; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; + +import java.io.IOException; + +public class MonitoringBulkRequestBuilder + extends ActionRequestBuilder { + + public MonitoringBulkRequestBuilder(ElasticsearchClient client) { + super(client, MonitoringBulkAction.INSTANCE, new MonitoringBulkRequest()); + } + + public MonitoringBulkRequestBuilder add(MonitoringBulkDoc doc) { + request.add(doc); + return this; + } + + public MonitoringBulkRequestBuilder add(final MonitoredSystem system, + final String type, + final BytesReference content, + final XContentType xContentType, + final long timestamp, + final long intervalMillis) throws IOException { + request.add(system, type, content, xContentType, timestamp, intervalMillis); + return this; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java new file mode 100644 index 0000000000000..12192da0bb22f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkResponse.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.util.Objects; + +public class MonitoringBulkResponse extends ActionResponse { + + private long tookInMillis; + private Error error; + private boolean ignored; + + public MonitoringBulkResponse() { + } + + public MonitoringBulkResponse(final long tookInMillis, final boolean ignored) { + this.tookInMillis = tookInMillis; + this.ignored = ignored; + } + + public MonitoringBulkResponse(final long tookInMillis, final Error error) { + this(tookInMillis, false); + this.error = error; + } + + public TimeValue getTook() { + return new TimeValue(tookInMillis); + } + + public long getTookInMillis() { + return tookInMillis; + } + + /** + * Determine if the request was ignored. + * + * @return {@code true} if the request was ignored because collection was disabled. + */ + public boolean isIgnored() { + return ignored; + } + + /** + * Returns HTTP status + * + *

    + *
  • {@link RestStatus#OK} if monitoring bulk request was successful (or ignored because collection is disabled)
  • + *
  • {@link RestStatus#INTERNAL_SERVER_ERROR} if monitoring bulk request was partially successful or failed completely
  • + *
+ */ + public RestStatus status() { + if (error == null) { + return RestStatus.OK; + } + + return RestStatus.INTERNAL_SERVER_ERROR; + } + + public Error getError() { + return error; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + tookInMillis = in.readVLong(); + error = in.readOptionalWriteable(Error::new); + + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + ignored = in.readBoolean(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(tookInMillis); + out.writeOptionalWriteable(error); + + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + out.writeBoolean(ignored); + } + } + + public static class Error implements Writeable, ToXContentObject { + + private final Throwable cause; + private final RestStatus status; + + public Error(Throwable t) { + cause = Objects.requireNonNull(t); + status = ExceptionsHelper.status(t); + } + + public Error(StreamInput in) throws IOException { + this(in.readException()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeException(getCause()); + } + + /** + * The failure message. + */ + public String getMessage() { + return this.cause.toString(); + } + + /** + * The rest status. + */ + public RestStatus getStatus() { + return this.status; + } + + /** + * The actual cause of the failure. + */ + public Throwable getCause() { + return cause; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + ElasticsearchException.generateThrowableXContent(builder, params, cause); + builder.endObject(); + return builder; + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder("Error ["); + sb.append("cause=").append(cause); + sb.append(", status=").append(status); + sb.append(']'); + return sb.toString(); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringIndex.java new file mode 100644 index 0000000000000..8f80b523f0735 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringIndex.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring.action; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * {@code MonitoringIndex} represents the receivable index from any request. + *

+ * This allows external systems to provide details for an index without having to know its exact name. + */ +public enum MonitoringIndex implements Writeable { + + /** + * A formerly used index format, which is no longer relevant. This is maintained to allow BWC for older clients. + */ + IGNORED_DATA { + @Override + public boolean matchesIndexName(String indexName) { + return false; + } + }, + + /** + * Timestamped data that drives the charts (e.g., memory statistics). + */ + TIMESTAMPED { + @Override + public boolean matchesIndexName(String indexName) { + return Strings.isEmpty(indexName); + } + }; + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte)ordinal()); + } + + public static MonitoringIndex readFrom(StreamInput in) throws IOException { + return values()[in.readByte()]; + } + + /** + * Determine if the {@code indexName} matches {@code this} monitoring index. + * + * @param indexName The name of the index. + * @return {@code true} if {@code this} matches the {@code indexName} + */ + public abstract boolean matchesIndexName(String indexName); + + /** + * Find the {@link MonitoringIndex} to use for the request. + * + * @param indexName The name of the index. + * @return Never {@code null}. + * @throws IllegalArgumentException if {@code indexName} is unrecognized + */ + public static MonitoringIndex from(String indexName) { + if (TIMESTAMPED.matchesIndexName(indexName)) { + return TIMESTAMPED; + } else if ("_data".equals(indexName)) { + // we explicitly ignore this where it's used to maintain binary BWC + return IGNORED_DATA; + } + + throw new IllegalArgumentException("unrecognized index name [" + indexName + "]"); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/client/MonitoringClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/client/MonitoringClient.java new file mode 100644 index 0000000000000..2dba6e6a4664f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/client/MonitoringClient.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring.client; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; +import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkRequest; +import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkRequestBuilder; +import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkResponse; + +import java.util.Map; + +public class MonitoringClient { + + private final Client client; + + @Inject + public MonitoringClient(Client client) { + this.client = client; + } + + + /** + * Creates a request builder that bulk index monitoring documents. + * + * @return The request builder + */ + public MonitoringBulkRequestBuilder prepareMonitoringBulk() { + return new MonitoringBulkRequestBuilder(client); + } + + /** + * Executes a bulk of index operations that concern monitoring documents. + * + * @param request The monitoring bulk request + * @param listener A listener to be notified with a result + */ + public void bulk(MonitoringBulkRequest request, ActionListener listener) { + client.execute(MonitoringBulkAction.INSTANCE, request, listener); + } + + /** + * Executes a bulk of index operations that concern monitoring documents. + * + * @param request The monitoring bulk request + */ + public ActionFuture bulk(MonitoringBulkRequest request) { + return client.execute(MonitoringBulkAction.INSTANCE, request); + } + + public MonitoringClient filterWithHeader(Map headers) { + return new MonitoringClient(client.filterWithHeader(headers)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringDoc.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringDoc.java new file mode 100644 index 0000000000000..93785053889c8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringDoc.java @@ -0,0 +1,253 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring.exporter; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Objects; + +/** + * Base class for all monitoring documents. + */ +public abstract class MonitoringDoc implements ToXContentObject { + + private final String cluster; + private final long timestamp; + private final long intervalMillis; + private final Node node; + private final MonitoredSystem system; + private final String type; + private final String id; + + public MonitoringDoc(final String cluster, + final long timestamp, + final long intervalMillis, + @Nullable final Node node, + final MonitoredSystem system, + final String type, + @Nullable final String id) { + + this.cluster = Objects.requireNonNull(cluster); + this.timestamp = timestamp; + this.intervalMillis = intervalMillis; + this.node = node; + this.system = Objects.requireNonNull(system); + this.type = Objects.requireNonNull(type); + this.id = id; + } + + public String getCluster() { + return cluster; + } + + public long getTimestamp() { + return timestamp; + } + + public long getIntervalMillis() { + return intervalMillis; + } + + public Node getNode() { + return node; + } + + public MonitoredSystem getSystem() { + return system; + } + + public String getType() { + return type; + } + + public String getId() { + return id; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MonitoringDoc that = (MonitoringDoc) o; + return timestamp == that.timestamp + && intervalMillis == that.intervalMillis + && Objects.equals(cluster, that.cluster) + && Objects.equals(node, that.node) + && system == that.system + && Objects.equals(type, that.type) + && Objects.equals(id, that.id); + } + + @Override + public int hashCode() { + return Objects.hash(cluster, timestamp, intervalMillis, node, system, type, id); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("cluster_uuid", cluster); + builder.field("timestamp", toUTC(timestamp)); + builder.field("interval_ms", intervalMillis); + builder.field("type", type); + builder.field("source_node", node); + innerToXContent(builder, params); + } + return builder.endObject(); + } + + protected abstract void innerToXContent(XContentBuilder builder, Params params) throws IOException; + + /** + * Converts a timestamp in milliseconds to its {@link String} representation in UTC time. + * + * @param timestamp the timestamp to convert + * @return a string representing the timestamp + */ + public static String toUTC(final long timestamp) { + return new DateTime(timestamp, DateTimeZone.UTC).toString(); + } + + /** + * {@link Node} represents the node of the cluster from which the monitoring document + * has been collected. + */ + public static class Node implements Writeable, ToXContentObject { + + private final String uuid; + private final String host; + private final String transportAddress; + private final String ip; + private final String name; + private final long timestamp; + + public Node(final String uuid, + final String host, + final String transportAddress, + final String ip, + final String name, + final long timestamp) { + this.uuid = uuid; + this.host = host; + this.transportAddress = transportAddress; + this.ip = ip; + this.name = name; + this.timestamp = timestamp; + } + + /** + * Read from a stream. + */ + public Node(StreamInput in) throws IOException { + uuid = in.readOptionalString(); + host = in.readOptionalString(); + transportAddress = in.readOptionalString(); + ip = in.readOptionalString(); + name = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_6_0_0_rc1)) { + timestamp = in.readVLong(); + } else { + // Read the node attributes (removed in 6.0 rc1) + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + in.readString(); + in.readString(); + } + timestamp = 0L; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(uuid); + out.writeOptionalString(host); + out.writeOptionalString(transportAddress); + out.writeOptionalString(ip); + out.writeOptionalString(name); + if (out.getVersion().onOrAfter(Version.V_6_0_0_rc1)) { + out.writeVLong(timestamp); + } else { + // Write an empty map of node attributes (removed in 6.0 rc1) + out.writeVInt(0); + } + } + + public String getUUID() { + return uuid; + } + + public String getHost() { + return host; + } + + public String getTransportAddress() { + return transportAddress; + } + + public String getIp() { + return ip; + } + + public String getName() { + return name; + } + + public long getTimestamp() { + return timestamp; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("uuid", uuid); + builder.field("host", host); + builder.field("transport_address", transportAddress); + builder.field("ip", ip); + builder.field("name", name); + builder.field("timestamp", toUTC(timestamp)); + } + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Node node = (Node) o; + return Objects.equals(uuid, node.uuid) + && Objects.equals(host, node.host) + && Objects.equals(transportAddress, node.transportAddress) + && Objects.equals(ip, node.ip) + && Objects.equals(name, node.name) + && Objects.equals(timestamp, node.timestamp); + } + + @Override + public int hashCode() { + return Objects.hash(uuid, host, transportAddress, ip, name, timestamp); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringTemplateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringTemplateUtils.java new file mode 100644 index 0000000000000..96bc59af1ca63 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/exporter/MonitoringTemplateUtils.java @@ -0,0 +1,263 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring.exporter; + +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; +import org.elasticsearch.xpack.core.template.TemplateUtils; +import org.joda.time.format.DateTimeFormatter; +import org.elasticsearch.common.Strings; + +import java.io.IOException; +import java.util.Locale; +import java.util.regex.Pattern; + +public final class MonitoringTemplateUtils { + + private static final String TEMPLATE_FILE = "/monitoring-%s.json"; + private static final String TEMPLATE_VERSION_PROPERTY = Pattern.quote("${monitoring.template.version}"); + + /** + * The last version of X-Pack that updated the templates and pipelines. + *

+ * It may be possible for this to diverge between templates and pipelines, but for now they're the same. + */ + public static final int LAST_UPDATED_VERSION = Version.V_7_0_0_alpha1.id; + + /** + * Current version of templates used in their name to differentiate from breaking changes (separate from product version). + */ + public static final String TEMPLATE_VERSION = "6"; + /** + * The previous version of templates, which we still support via the REST _xpack/monitoring/_bulk endpoint because + * nothing changed for those documents. + */ + public static final String OLD_TEMPLATE_VERSION = "2"; + + /** + * IDs of templates that can be used with {@linkplain #loadTemplate(String) loadTemplate}. + */ + public static final String[] TEMPLATE_IDS = { "alerts", "es", "kibana", "logstash", "beats" }; + + /** + * IDs of templates that can be used with {@linkplain #createEmptyTemplate(String) createEmptyTemplate} that are not managed by a + * Resolver. + *

+ * These should only be used by the HTTP Exporter to create old templates so that older versions can be properly upgraded. Older + * instances will attempt to create a named template based on the templates that they expect (e.g., ".monitoring-es-2") and not the + * ones that we are creating. + */ + public static final String[] OLD_TEMPLATE_IDS = { "data", "es", "kibana", "logstash", "alerts" }; + + /** + * IDs of pipelines that can be used with + */ + public static final String[] PIPELINE_IDS = { TEMPLATE_VERSION, OLD_TEMPLATE_VERSION }; + + private MonitoringTemplateUtils() { } + + /** + * Get a template name for any template ID. + * + * @param id The template identifier. + * @return Never {@code null} {@link String} prefixed by ".monitoring-". + * @see #TEMPLATE_IDS + */ + public static String templateName(final String id) { + return ".monitoring-" + id; + } + + /** + * Get a template name for any template ID for old templates in the previous version. + * + * @param id The template identifier. + * @return Never {@code null} {@link String} prefixed by ".monitoring-" and ended by the {@code OLD_TEMPLATE_VERSION}. + * @see #OLD_TEMPLATE_IDS + */ + public static String oldTemplateName(final String id) { + return ".monitoring-" + id + "-" + OLD_TEMPLATE_VERSION; + } + + public static String loadTemplate(final String id) { + String resource = String.format(Locale.ROOT, TEMPLATE_FILE, id); + return TemplateUtils.loadTemplate(resource, TEMPLATE_VERSION, TEMPLATE_VERSION_PROPERTY); + } + + /** + * Create a template that does nothing but exist and provide a newer {@code version} so that we know that we created it. + * + * @param id The template identifier. + * @return Never {@code null}. + * @see #OLD_TEMPLATE_IDS + * @see #OLD_TEMPLATE_VERSION + */ + public static String createEmptyTemplate(final String id) { + // e.g., { "index_patterns": [ ".monitoring-data-2*" ], "version": 6000002 } + return "{\"index_patterns\":[\".monitoring-" + id + "-" + OLD_TEMPLATE_VERSION + "*\"],\"version\":" + LAST_UPDATED_VERSION + "}"; + } + + /** + * Get a pipeline name for any template ID. + * + * @param id The template identifier. + * @return Never {@code null} {@link String} prefixed by "xpack_monitoring_" and the {@code id}. + * @see #TEMPLATE_IDS + */ + public static String pipelineName(String id) { + return "xpack_monitoring_" + id; + } + + /** + * Create a pipeline that allows documents for different template versions to be upgraded. + *

+ * The expectation is that you will call either {@link Strings#toString(XContentBuilder)} or + * {@link BytesReference#bytes(XContentBuilder)}}. + * + * @param id The API version (e.g., "2") to use + * @param type The type of data you want to format for the request + * @return Never {@code null}. Always an ended-object. + * @throws IllegalArgumentException if {@code apiVersion} is unrecognized + * @see #PIPELINE_IDS + */ + public static XContentBuilder loadPipeline(final String id, final XContentType type) { + switch (id) { + case TEMPLATE_VERSION: + return emptyPipeline(type); + case OLD_TEMPLATE_VERSION: + return pipelineForApiVersion2(type); + } + + throw new IllegalArgumentException("unrecognized pipeline API version [" + id + "]"); + } + + /** + * Create a pipeline to upgrade documents from {@link MonitoringTemplateUtils#OLD_TEMPLATE_VERSION} + *


+     * {
+     *   "description" : "This pipeline upgrades documents ...",
+     *   "version": 6000001,
+     *   "processors": [ ]
+     * }
+     * 
+ * The expectation is that you will call either {@link Strings#toString(XContentBuilder)} or + * {@link BytesReference#bytes(XContentBuilder)}}. + * + * @param type The type of data you want to format for the request + * @return Never {@code null}. Always an ended-object. + * @see #LAST_UPDATED_VERSION + */ + static XContentBuilder pipelineForApiVersion2(final XContentType type) { + try { + // For now: We prepend the API version to the string so that it's easy to parse in the future; if we ever add metadata + // to pipelines, then it would better serve this use case + return XContentBuilder.builder(type.xContent()).startObject() + .field("description", "This pipeline upgrades documents from the older version of the Monitoring API to " + + "the newer version (" + TEMPLATE_VERSION + ") by fixing breaking " + + "changes in those older documents before they are indexed from the older version (" + + OLD_TEMPLATE_VERSION + ").") + .field("version", LAST_UPDATED_VERSION) + .startArray("processors") + .startObject() + // Drop the .monitoring-data-2 index and effectively drop unnecessary data (duplicate or simply unused) + .startObject("script") + .field("source", + "boolean legacyIndex = ctx._index == '.monitoring-data-2';" + + "if (legacyIndex || ctx._index.startsWith('.monitoring-es-2')) {" + + "if (ctx._type == 'cluster_info') {" + + "ctx._type = 'cluster_stats';" + + "ctx._id = null;" + + "} else if (legacyIndex || ctx._type == 'cluster_stats' || ctx._type == 'node') {" + + "String index = ctx._index;" + + "Object clusterUuid = ctx.cluster_uuid;" + + "Object timestamp = ctx.timestamp;" + + + "ctx.clear();" + + + "ctx._id = 'xpack_monitoring_2_drop_bucket';" + + "ctx._index = index;" + + "ctx._type = 'legacy_data';" + + "ctx.timestamp = timestamp;" + + "ctx.cluster_uuid = clusterUuid;" + + "}" + + "if (legacyIndex) {" + + "ctx._index = '<.monitoring-es-" + TEMPLATE_VERSION + "-{now}>';" + + "}" + + "}") + .endObject() + .endObject() + .startObject() + .startObject("rename") + .field("field", "_type") + .field("target_field", "type") + .endObject() + .endObject() + .startObject() + .startObject("set") + .field("field", "_type") + .field("value", "doc") + .endObject() + .endObject() + .startObject() + .startObject("gsub") + .field("field", "_index") + .field("pattern", "(.monitoring-\\w+-)2(-.+)") + .field("replacement", "$1" + TEMPLATE_VERSION + "$2") + .endObject() + .endObject() + .endArray() + .endObject(); + } catch (final IOException e) { + throw new RuntimeException("Failed to create pipeline to upgrade from older version [" + OLD_TEMPLATE_VERSION + + "] to the newer version [" + TEMPLATE_VERSION + "].", e); + } + } + + /** + * Create an empty pipeline. + *

+     * {
+     *   "description" : "This is a placeholder pipeline ...",
+     *   "version": 6000001,
+     *   "processors": [ ]
+     * }
+     * 
+ * The expectation is that you will call either {@link Strings#toString(XContentBuilder)} or + * {@link BytesReference#bytes(XContentBuilder)}}. + * + * @param type The type of data you want to format for the request + * @return Never {@code null}. Always an ended-object. + * @see #LAST_UPDATED_VERSION + */ + public static XContentBuilder emptyPipeline(final XContentType type) { + try { + // For now: We prepend the API version to the string so that it's easy to parse in the future; if we ever add metadata + // to pipelines, then it would better serve this use case + return XContentBuilder.builder(type.xContent()).startObject() + .field("description", "This is a placeholder pipeline for Monitoring API version " + TEMPLATE_VERSION + + " so that future versions may fix breaking changes.") + .field("version", LAST_UPDATED_VERSION) + .startArray("processors").endArray() + .endObject(); + } catch (final IOException e) { + throw new RuntimeException("Failed to create empty pipeline", e); + } + } + + /** + * Get the index name given a specific date format, a monitored system and a timestamp. + * + * @param formatter the {@link DateTimeFormatter} to use to compute the timestamped index name + * @param system the {@link MonitoredSystem} for which the index name is computed + * @param timestamp the timestamp value to use to compute the timestamped index name + * @return the index name as a @{link String} + */ + public static String indexName(final DateTimeFormatter formatter, final MonitoredSystem system, final long timestamp) { + return ".monitoring-" + system.getSystem() + "-" + TEMPLATE_VERSION + "-" + formatter.print(timestamp); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/XPackRestHandler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/XPackRestHandler.java new file mode 100644 index 0000000000000..5ac0969624bc8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/XPackRestHandler.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.core.XPackClient; + +import java.io.IOException; + +public abstract class XPackRestHandler extends BaseRestHandler { + + protected static String URI_BASE = "/_xpack"; + + public XPackRestHandler(Settings settings) { + super(settings); + } + + @Override + public final RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + return doPrepareRequest(request, new XPackClient(client)); + } + + protected abstract RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException; +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackInfoAction.java new file mode 100644 index 0000000000000..e1c694742cba4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackInfoAction.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rest.action; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackInfoResponse; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.action.XPackInfoRequest; +import org.elasticsearch.xpack.core.rest.XPackRestHandler; + +import java.io.IOException; +import java.util.EnumSet; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.HEAD; +import static org.elasticsearch.rest.RestStatus.OK; + +public class RestXPackInfoAction extends XPackRestHandler { + public RestXPackInfoAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(HEAD, URI_BASE, this); + controller.registerHandler(GET, URI_BASE, this); + } + + @Override + public String getName() { + return "xpack_info_action"; + } + + @Override + public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + + // we piggyback verbosity on "human" output + boolean verbose = request.paramAsBoolean("human", true); + + EnumSet categories = XPackInfoRequest.Category + .toSet(request.paramAsStringArray("categories", new String[] { "_all" })); + return channel -> + client.prepareInfo() + .setVerbose(verbose) + .setCategories(categories) + .execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(XPackInfoResponse infoResponse, XContentBuilder builder) throws Exception { + + builder.startObject(); + + if (infoResponse.getBuildInfo() != null) { + builder.field("build", infoResponse.getBuildInfo(), request); + } + + if (infoResponse.getLicenseInfo() != null) { + builder.field("license", infoResponse.getLicenseInfo(), request); + } else if (categories.contains(XPackInfoRequest.Category.LICENSE)) { + // if the user requested the license info, and there is no license, we should send + // back an explicit null value (indicating there is no license). This is different + // than not adding the license info at all + builder.nullField("license"); + } + + if (infoResponse.getFeatureSetsInfo() != null) { + builder.field("features", infoResponse.getFeatureSetsInfo(), request); + } + + if (verbose) { + builder.field("tagline", "You know, for X"); + } + + builder.endObject(); + return new BytesRestResponse(OK, builder); + } + }); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java new file mode 100644 index 0000000000000..0f09f17dbb066 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rest.action; + +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.action.XPackUsageRequestBuilder; +import org.elasticsearch.xpack.core.action.XPackUsageResponse; +import org.elasticsearch.xpack.core.rest.XPackRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestStatus.OK; + +public class RestXPackUsageAction extends XPackRestHandler { + public RestXPackUsageAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, URI_BASE + "/usage", this); + } + + @Override + public String getName() { + return "xpack_usage_action"; + } + + @Override + public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + final TimeValue masterTimeout = request.paramAsTime("master_timeout", MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT); + return channel -> new XPackUsageRequestBuilder(client.es()) + .setMasterNodeTimeout(masterTimeout) + .execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(XPackUsageResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + for (XPackFeatureSet.Usage usage : response.getUsages()) { + builder.field(usage.name(), usage); + } + builder.endObject(); + return new BytesRestResponse(OK, builder); + } + }); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java new file mode 100644 index 0000000000000..b0377fbcc67e8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; + +public class RollupFeatureSetUsage extends XPackFeatureSet.Usage { + + public RollupFeatureSetUsage(StreamInput input) throws IOException { + super(input); + } + + public RollupFeatureSetUsage(boolean available, boolean enabled) { + super(XPackField.ROLLUP, available, enabled); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupField.java new file mode 100644 index 0000000000000..1e2e011276dc3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupField.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; + +import java.util.Arrays; +import java.util.List; + +public class RollupField { + // Fields that are used both in core Rollup actions and Rollup plugin + public static final ParseField ID = new ParseField("id"); + public static final String TASK_NAME = "xpack/rollup/job"; + public static final String ROLLUP_META = "_rollup"; + public static final String INTERVAL = "interval"; + public static final String COUNT_FIELD = "_count"; + public static final String VERSION_FIELD = "version"; + public static final String VALUE = "value"; + public static final String TIMESTAMP = "timestamp"; + public static final String FILTER = "filter"; + public static final String NAME = "rollup"; + public static final String TYPE_NAME = "_doc"; + public static final String AGG = "agg"; + public static final String ROLLUP_MISSING = "ROLLUP_MISSING_40710B25931745D4B0B8B310F6912A69"; + public static final List SUPPORTED_METRICS = Arrays.asList(MaxAggregationBuilder.NAME, MinAggregationBuilder.NAME, + SumAggregationBuilder.NAME, AvgAggregationBuilder.NAME, ValueCountAggregationBuilder.NAME); + + /** + * Format to the appropriate Rollup field name convention + * + * @param source Source aggregation to get type and name from + * @param extra The type of value this field is (VALUE, INTERVAL, etc) + * @return formatted field name + */ + public static String formatFieldName(ValuesSourceAggregationBuilder source, String extra) { + return source.field() + "." + source.getType() + "." + extra; + } + + /** + * Format to the appropriate Rollup field name convention + * + * @param field The field we are formatting + * @param type The aggregation type that was used for rollup + * @param extra The type of value this field is (VALUE, INTERVAL, etc) + * @return formatted field name + */ + public static String formatFieldName(String field, String type, String extra) { + return field + "." + type + "." + extra; + } + + /** + * Format to the appropriate Rollup convention for internal Metadata fields (_rollup) + */ + public static String formatMetaField(String extra) { + return RollupField.ROLLUP_META + "." + extra; + } + + /** + * Format to the appropriate Rollup convention for extra Count aggs. + * These are added to averages and bucketing aggs that need a count + */ + public static String formatCountAggName(String field) { + return field + "." + RollupField.COUNT_FIELD; + } + + /** + * Format to the appropriate Rollup convention for agg names that + * might conflict with empty buckets. `value` is appended to agg name. + * E.g. used for averages + */ + public static String formatValueAggName(String field) { + return field + "." + RollupField.VALUE; + } + + /** + * Format into the convention for computed field lookups + */ + public static String formatComputed(String field, String agg) { + return field + "." + agg; + } + + /** + * Format into the convention used by the Indexer's composite agg, so that + * the normal field name is translated into a Rollup fieldname via the agg name + */ + public static String formatIndexerAggName(String field, String agg) { + return field + "." + agg; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java new file mode 100644 index 0000000000000..771f5e98d5c68 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/DeleteRollupJobAction.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.action; + + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.rollup.RollupField; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteRollupJobAction extends Action { + + public static final DeleteRollupJobAction INSTANCE = new DeleteRollupJobAction(); + public static final String NAME = "cluster:admin/xpack/rollup/delete"; + + private DeleteRollupJobAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, INSTANCE); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest implements ToXContent { + private String id; + + public Request(String id) { + this.id = ExceptionsHelper.requireNonNull(id, RollupField.ID.getPreferredName()); + } + + public Request() {} + + public String getId() { + return id; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RollupField.ID.getPreferredName(), id); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(id, other.id); + } + } + + public static class RequestBuilder extends MasterNodeOperationRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, DeleteRollupJobAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends AcknowledgedResponse { + + public Response() { + super(); + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java new file mode 100644 index 0000000000000..83e9c7f8a5b42 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.action; + + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.rollup.RollupField; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +public class GetRollupCapsAction extends Action { + + public static final GetRollupCapsAction INSTANCE = new GetRollupCapsAction(); + public static final String NAME = "cluster:monitor/xpack/rollup/get/caps"; + public static final ParseField CONFIG = new ParseField("config"); + public static final ParseField STATUS = new ParseField("status"); + + private GetRollupCapsAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, INSTANCE); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends ActionRequest implements ToXContent { + private String indexPattern; + + public Request(String indexPattern) { + if (Strings.isNullOrEmpty(indexPattern) || indexPattern.equals("*")) { + this.indexPattern = MetaData.ALL; + } else { + this.indexPattern = indexPattern; + } + } + + public Request() {} + + public String getIndexPattern() { + return indexPattern; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.indexPattern = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(indexPattern); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RollupField.ID.getPreferredName(), indexPattern); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(indexPattern); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(indexPattern, other.indexPattern); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, GetRollupCapsAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends ActionResponse implements Writeable, ToXContentObject { + + private Map jobs = Collections.emptyMap(); + + public Response() { + + } + + public Response(Map jobs) { + this.jobs = Objects.requireNonNull(jobs); + } + + Response(StreamInput in) throws IOException { + jobs = in.readMap(StreamInput::readString, RollableIndexCaps::new); + } + + public Map getJobs() { + return jobs; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(jobs, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + for (Map.Entry entry : jobs.entrySet()) { + entry.getValue().toXContent(builder, params); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(jobs, other.jobs); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java new file mode 100644 index 0000000000000..2e8ed2852e27f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java @@ -0,0 +1,292 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.action; + + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.rollup.RollupField; +import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; +import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; +import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class GetRollupJobsAction extends Action { + + public static final GetRollupJobsAction INSTANCE = new GetRollupJobsAction(); + public static final String NAME = "cluster:monitor/xpack/rollup/get"; + public static final ParseField JOBS = new ParseField("jobs"); + public static final ParseField CONFIG = new ParseField("config"); + public static final ParseField STATUS = new ParseField("status"); + public static final ParseField STATS = new ParseField("stats"); + + private GetRollupJobsAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, INSTANCE); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends BaseTasksRequest implements ToXContent { + private String id; + + public Request(String id) { + if (Strings.isNullOrEmpty(id) || id.equals("*")) { + this.id = MetaData.ALL; + } else { + this.id = id; + } + } + + public Request() {} + + @Override + public boolean match(Task task) { + // If we are retrieving all the jobs, the task description just needs to start + // with `rollup_` + if (id.equals(MetaData.ALL)) { + return task.getDescription().startsWith(RollupField.NAME + "_"); + } + // Otherwise find the task by ID + return task.getDescription().equals(RollupField.NAME + "_" + id); + } + + public String getId() { + return id; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + if (Strings.isNullOrEmpty(id) || id.equals("*")) { + this.id = MetaData.ALL; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RollupField.ID.getPreferredName(), id); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(id, other.id); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, GetRollupJobsAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { + + private List jobs; + + public Response(List jobs) { + super(Collections.emptyList(), Collections.emptyList()); + this.jobs = jobs; + } + + public Response(List jobs, List taskFailures, List nodeFailures) { + super(taskFailures, nodeFailures); + this.jobs = jobs; + } + + public Response() { + super(Collections.emptyList(), Collections.emptyList()); + } + + public Response(StreamInput in) throws IOException { + super(Collections.emptyList(), Collections.emptyList()); + readFrom(in); + } + + public List getJobs() { + return jobs; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + jobs = in.readList(JobWrapper::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(jobs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(JOBS.getPreferredName(), jobs); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(jobs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Response other = (Response) obj; + return Objects.equals(jobs, other.jobs); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } + + public static class JobWrapper implements Writeable, ToXContentObject { + private final RollupJobConfig job; + private final RollupJobStats stats; + private final RollupJobStatus status; + + public static final ConstructingObjectParser PARSER + = new ConstructingObjectParser<>(NAME, a -> new JobWrapper((RollupJobConfig) a[0], + (RollupJobStats) a[1], (RollupJobStatus)a[2])); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> RollupJobConfig.PARSER.apply(p,c).build(), CONFIG); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupJobStats.PARSER::apply, STATS); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupJobStatus.PARSER::apply, STATUS); + } + + public JobWrapper(RollupJobConfig job, RollupJobStats stats, RollupJobStatus status) { + this.job = job; + this.stats = stats; + this.status = status; + } + + public JobWrapper(StreamInput in) throws IOException { + this.job = new RollupJobConfig(in); + this.stats = new RollupJobStats(in); + this.status = new RollupJobStatus(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + job.writeTo(out); + stats.writeTo(out); + status.writeTo(out); + } + + public RollupJobConfig getJob() { + return job; + } + + public RollupJobStats getStats() { + return stats; + } + + public RollupJobStatus getStatus() { + return status; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CONFIG.getPreferredName()); + job.toXContent(builder, params); + builder.field(STATUS.getPreferredName(), status); + builder.field(STATS.getPreferredName(), stats); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(job, stats, status); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + JobWrapper other = (JobWrapper) obj; + return Objects.equals(job, other.job) + && Objects.equals(stats, other.stats) + && Objects.equals(status, other.status); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java new file mode 100644 index 0000000000000..bb64d97f1c87d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class PutRollupJobAction extends Action { + + public static final PutRollupJobAction INSTANCE = new PutRollupJobAction(); + public static final String NAME = "cluster:admin/xpack/rollup/put"; + + private PutRollupJobAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, INSTANCE); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { + + private RollupJobConfig config; + private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); + + public Request(RollupJobConfig config) { + this.config = config; + } + + public Request() { + + } + + public static Request parseRequest(String id, XContentParser parser) { + RollupJobConfig.Builder config = RollupJobConfig.Builder.fromXContent(id, parser); + return new Request(config.build()); + } + + public RollupJobConfig getConfig() { + return config; + } + + public void setConfig(RollupJobConfig config) { + this.config = config; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.config = new RollupJobConfig(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + this.config.writeTo(out); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public ActionRequestValidationException validateMappings(Map> fieldCapsResponse) { + ActionRequestValidationException validationException = new ActionRequestValidationException(); + if (fieldCapsResponse.size() == 0) { + validationException.addValidationError("Could not find any fields in the index/index-pattern that were configured in job"); + return validationException; + } + config.validateMappings(fieldCapsResponse, validationException); + if (validationException.validationErrors().size() > 0) { + return validationException; + } + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return this.config.toXContent(builder, params); + } + + @Override + public String[] indices() { + return new String[]{this.config.getIndexPattern()}; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public int hashCode() { + return Objects.hash(config); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(config, other.config); + } + } + + public static class RequestBuilder extends MasterNodeOperationRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, PutRollupJobAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends AcknowledgedResponse { + + public Response() { + super(); + } + + public Response(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollableIndexCaps.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollableIndexCaps.java new file mode 100644 index 0000000000000..91f581f1c09e0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollableIndexCaps.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.action; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; + +/** + * Represents the rollup capabilities of a non-rollup index. E.g. what values/aggregations + * were rolled up for this index, in what rollup jobs that data is stored and where those + * concrete rollup indices exist + * + * The index name can either be a single index, or an index pattern (logstash-*) + */ +public class RollableIndexCaps implements Writeable, ToXContentFragment { + static ParseField ROLLUP_JOBS = new ParseField("rollup_jobs"); + + private String indexName; + private List jobCaps; + + public RollableIndexCaps(String indexName) { + this.indexName = indexName; + this.jobCaps = new ArrayList<>(); + } + + public RollableIndexCaps(StreamInput in) throws IOException { + this.indexName = in.readString(); + this.jobCaps = in.readList(RollupJobCaps::new); + } + + public void addJobCap(RollupJobCaps jobCap) { + jobCaps.add(jobCap); + } + + public String getIndexName() { + return indexName; + } + + public List getJobCaps() { + return jobCaps; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(indexName); + out.writeList(jobCaps); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(indexName); + jobCaps.sort(Comparator.comparing(RollupJobCaps::getJobID)); + builder.field(ROLLUP_JOBS.getPreferredName(), jobCaps); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + RollableIndexCaps that = (RollableIndexCaps) other; + + return Objects.equals(this.jobCaps, that.jobCaps) + && Objects.equals(this.indexName, that.indexName); + } + + @Override + public int hashCode() { + return Objects.hash(jobCaps, indexName); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java new file mode 100644 index 0000000000000..f37a351c0136d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java @@ -0,0 +1,224 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.action; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.xpack.core.rollup.RollupField; +import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; + +/** + * Represents the Rollup capabilities for a specific job on a single rollup index + */ +public class RollupJobCaps implements Writeable, ToXContentObject { + private static ParseField JOB_ID = new ParseField("job_id"); + private static ParseField ROLLUP_INDEX = new ParseField("rollup_index"); + private static ParseField INDEX_PATTERN = new ParseField("index_pattern"); + private static ParseField FIELDS = new ParseField("fields"); + + private String jobID; + private String rollupIndex; + private String indexPattern; + private Map fieldCapLookup = new HashMap<>(); + + // TODO now that these rollup caps are being used more widely (e.g. search), perhaps we should + // store the RollupJob and translate into FieldCaps on demand for json output. Would make working with + // it internally a lot easier + public RollupJobCaps(RollupJobConfig job) { + jobID = job.getId(); + rollupIndex = job.getRollupIndex(); + indexPattern = job.getIndexPattern(); + Map dateHistoAggCap = job.getGroupConfig().getDateHisto().toAggCap(); + String dateField = job.getGroupConfig().getDateHisto().getField(); + RollupFieldCaps fieldCaps = fieldCapLookup.get(dateField); + if (fieldCaps == null) { + fieldCaps = new RollupFieldCaps(); + } + fieldCaps.addAgg(dateHistoAggCap); + fieldCapLookup.put(dateField, fieldCaps); + + if (job.getGroupConfig().getHisto() != null) { + Map histoAggCap = job.getGroupConfig().getHisto().toAggCap(); + Arrays.stream(job.getGroupConfig().getHisto().getFields()).forEach(field -> { + RollupFieldCaps caps = fieldCapLookup.get(field); + if (caps == null) { + caps = new RollupFieldCaps(); + } + caps.addAgg(histoAggCap); + fieldCapLookup.put(field, caps); + }); + } + + if (job.getGroupConfig().getTerms() != null) { + Map histoAggCap = job.getGroupConfig().getTerms().toAggCap(); + Arrays.stream(job.getGroupConfig().getTerms().getFields()).forEach(field -> { + RollupFieldCaps caps = fieldCapLookup.get(field); + if (caps == null) { + caps = new RollupFieldCaps(); + } + caps.addAgg(histoAggCap); + fieldCapLookup.put(field, caps); + }); + } + + if (job.getMetricsConfig().size() > 0) { + job.getMetricsConfig().forEach(metricConfig -> { + List> metrics = metricConfig.toAggCap(); + metrics.forEach(m -> { + RollupFieldCaps caps = fieldCapLookup.get(metricConfig.getField()); + if (caps == null) { + caps = new RollupFieldCaps(); + } + caps.addAgg(m); + fieldCapLookup.put(metricConfig.getField(), caps); + }); + }); + } + } + + public RollupJobCaps(StreamInput in) throws IOException { + this.jobID = in.readString(); + this.rollupIndex = in.readString(); + this.indexPattern = in.readString(); + this.fieldCapLookup = in.readMap(StreamInput::readString, RollupFieldCaps::new); + } + + public Map getFieldCaps() { + return fieldCapLookup; + } + + public String getRollupIndex() { + return rollupIndex; + } + + public String getIndexPattern() { + return indexPattern; + } + + public String getJobID() { + return jobID; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(jobID); + out.writeString(rollupIndex); + out.writeString(indexPattern); + out.writeMap(fieldCapLookup, StreamOutput::writeString, (o, value) -> value.writeTo(o)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(JOB_ID.getPreferredName(), jobID); + builder.field(ROLLUP_INDEX.getPreferredName(), rollupIndex); + builder.field(INDEX_PATTERN.getPreferredName(), indexPattern); + builder.startObject(FIELDS.getPreferredName()); + for (Map.Entry fieldCap : fieldCapLookup.entrySet()) { + builder.array(fieldCap.getKey(), fieldCap.getValue()); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + RollupJobCaps that = (RollupJobCaps) other; + + return Objects.equals(this.jobID, that.jobID) + && Objects.equals(this.rollupIndex, that.rollupIndex) + && Objects.equals(this.fieldCapLookup, that.fieldCapLookup); + } + + @Override + public int hashCode() { + return Objects.hash(jobID, rollupIndex, fieldCapLookup); + } + + public static class RollupFieldCaps implements Writeable, ToXContentObject { + private List> aggs = new ArrayList<>(); + + RollupFieldCaps() { } + + RollupFieldCaps(StreamInput in) throws IOException { + int size = in.readInt(); + aggs = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + aggs.add(in.readMap()); + } + } + + void addAgg(Map agg) { + aggs.add(agg); + } + + public List> getAggs() { + return aggs; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(aggs.size()); + for (Map agg : aggs) { + out.writeMap(agg); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + for (Map agg : aggs) { + builder.map(agg); + } + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + RollupFieldCaps that = (RollupFieldCaps) other; + + return Objects.equals(this.aggs, that.aggs); + } + + @Override + public int hashCode() { + return Objects.hash(aggs); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java new file mode 100644 index 0000000000000..b4d3d6efb7d47 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupSearchAction.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.ElasticsearchClient; + +public class RollupSearchAction extends Action { + + public static final RollupSearchAction INSTANCE = new RollupSearchAction(); + public static final String NAME = "indices:admin/xpack/rollup/search"; + + private RollupSearchAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client); + } + + @Override + public SearchResponse newResponse() { + return new SearchResponse(); + } + + static class RequestBuilder extends ActionRequestBuilder { + RequestBuilder(ElasticsearchClient client) { + super(client, INSTANCE, new SearchRequest()); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java new file mode 100644 index 0000000000000..042dbd18d42d4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StartRollupJobAction.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.action; + + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.rollup.RollupField; + +import java.io.IOException; +import java.util.Collections; +import java.util.Objects; + +public class StartRollupJobAction extends Action { + + public static final StartRollupJobAction INSTANCE = new StartRollupJobAction(); + public static final String NAME = "cluster:admin/xpack/rollup/start"; + + private StartRollupJobAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, INSTANCE); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends BaseTasksRequest implements ToXContent { + private String id; + + public Request(String id) { + this.id = ExceptionsHelper.requireNonNull(id, RollupField.ID.getPreferredName()); + } + + public Request() {} + + public String getId() { + return id; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RollupField.ID.getPreferredName(), id); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(id, other.id); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, StartRollupJobAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { + + private boolean started; + + public Response() { + super(Collections.emptyList(), Collections.emptyList()); + } + + public Response(StreamInput in) throws IOException { + super(Collections.emptyList(), Collections.emptyList()); + readFrom(in); + } + + public Response(boolean started) { + super(Collections.emptyList(), Collections.emptyList()); + this.started = started; + } + + public boolean isStarted() { + return started; + } + + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + started = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(started); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("started", started); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return started == response.started; + } + + @Override + public int hashCode() { + return Objects.hash(started); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java new file mode 100644 index 0000000000000..cfb7da0bd9323 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/StopRollupJobAction.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.rollup.RollupField; + +import java.io.IOException; +import java.util.Collections; +import java.util.Objects; + +public class StopRollupJobAction extends Action { + + public static final StopRollupJobAction INSTANCE = new StopRollupJobAction(); + public static final String NAME = "cluster:admin/xpack/rollup/stop"; + + private StopRollupJobAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, INSTANCE); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends BaseTasksRequest implements ToXContent { + private String id; + + public Request(String id) { + this.id = ExceptionsHelper.requireNonNull(id, RollupField.ID.getPreferredName()); + } + + public Request() {} + + public String getId() { + return id; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RollupField.ID.getPreferredName(), id); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(id, other.id); + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, StopRollupJobAction action) { + super(client, action, new Request()); + } + } + + public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { + + private boolean stopped; + + public Response() { + super(Collections.emptyList(), Collections.emptyList()); + } + + public Response(StreamInput in) throws IOException { + super(Collections.emptyList(), Collections.emptyList()); + readFrom(in); + } + + public Response(boolean stopped) { + super(Collections.emptyList(), Collections.emptyList()); + this.stopped = stopped; + } + + public boolean isStopped() { + return stopped; + } + + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + stopped = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(stopped); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("stopped", stopped); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return stopped == response.stopped; + } + + @Override + public int hashCode() { + return Objects.hash(stopped); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistoGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistoGroupConfig.java new file mode 100644 index 0000000000000..4b4e4cf7b7c81 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistoGroupConfig.java @@ -0,0 +1,308 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.DateHistogramValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.xpack.core.rollup.RollupField; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * The configuration object for the histograms in the rollup config + * + * { + * "groups": [ + * "date_histogram": { + * "field" : "foo", + * "interval" : "1d", + * "delay": "30d", + * "time_zone" : "EST" + * } + * ] + * } + */ +public class DateHistoGroupConfig implements Writeable, ToXContentFragment { + private static final String NAME = "date_histo_group_config"; + public static final ObjectParser PARSER + = new ObjectParser<>(NAME, DateHistoGroupConfig.Builder::new); + + private static final ParseField INTERVAL = new ParseField("interval"); + private static final ParseField DELAY = new ParseField("delay"); + private static final ParseField FIELD = new ParseField("field"); + public static final ParseField TIME_ZONE = new ParseField("time_zone"); + + private final DateHistogramInterval interval; + private final String field; + private final DateTimeZone timeZone; + private final DateHistogramInterval delay; + + static { + PARSER.declareField(DateHistoGroupConfig.Builder::setInterval, + p -> new DateHistogramInterval(p.text()), INTERVAL, ObjectParser.ValueType.STRING); + PARSER.declareString(DateHistoGroupConfig.Builder::setField, FIELD); + PARSER.declareField(DateHistoGroupConfig.Builder::setDelay, + p -> new DateHistogramInterval(p.text()), DELAY, ObjectParser.ValueType.LONG); + PARSER.declareField(DateHistoGroupConfig.Builder::setTimeZone, p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return DateTimeZone.forID(p.text()); + } else { + return DateTimeZone.forOffsetHours(p.intValue()); + } + }, TIME_ZONE, ObjectParser.ValueType.LONG); + } + + private DateHistoGroupConfig(DateHistogramInterval interval, + String field, + DateHistogramInterval delay, + DateTimeZone timeZone) { + this.interval = interval; + this.field = field; + this.delay = delay; + this.timeZone = Objects.requireNonNull(timeZone); + } + + DateHistoGroupConfig(StreamInput in) throws IOException { + interval = new DateHistogramInterval(in); + field = in.readString(); + delay = in.readOptionalWriteable(DateHistogramInterval::new); + timeZone = in.readTimeZone(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + interval.writeTo(out); + out.writeString(field); + out.writeOptionalWriteable(delay); + out.writeTimeZone(timeZone); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(INTERVAL.getPreferredName(), interval.toString()); + builder.field(FIELD.getPreferredName(), field); + if (delay != null) { + builder.field(DELAY.getPreferredName(), delay.toString()); + } + builder.field(TIME_ZONE.getPreferredName(), timeZone.toString()); + + return builder; + } + + /** + * Get the date field + */ + public String getField() { + return field; + } + + /** + * Get the date interval + */ + public DateHistogramInterval getInterval() { + return interval; + } + + /** + * Get the time delay for this histogram + */ + public DateHistogramInterval getDelay() { + return delay; + } + + /** + * Get the timezone to apply + */ + public DateTimeZone getTimeZone() { + return timeZone; + } + + /** + * Create the rounding for this date histogram + */ + public Rounding createRounding() { + return createRounding(interval.toString(), timeZone, ""); + } + ; + /** + * This returns a set of aggregation builders which represent the configured + * set of date histograms. Used by the rollup indexer to iterate over historical data + */ + public List> toBuilders() { + DateHistogramValuesSourceBuilder vsBuilder = + new DateHistogramValuesSourceBuilder(RollupField.formatIndexerAggName(field, DateHistogramAggregationBuilder.NAME)); + vsBuilder.dateHistogramInterval(interval); + vsBuilder.field(field); + vsBuilder.timeZone(timeZone); + + return Collections.singletonList(vsBuilder); + } + + /** + * @return A map representing this config object as a RollupCaps aggregation object + */ + public Map toAggCap() { + Map map = new HashMap<>(3); + map.put("agg", DateHistogramAggregationBuilder.NAME); + map.put(INTERVAL.getPreferredName(), interval.toString()); + if (delay != null) { + map.put(DELAY.getPreferredName(), delay.toString()); + } + map.put(TIME_ZONE.getPreferredName(), timeZone.toString()); + + return map; + } + + public Map getMetadata() { + return Collections.singletonMap(RollupField.formatMetaField(RollupField.INTERVAL), interval.toString()); + } + + public void validateMappings(Map> fieldCapsResponse, + ActionRequestValidationException validationException) { + + Map fieldCaps = fieldCapsResponse.get(field); + if (fieldCaps != null && fieldCaps.isEmpty() == false) { + if (fieldCaps.containsKey("date") && fieldCaps.size() == 1) { + if (fieldCaps.get("date").isAggregatable()) { + return; + } else { + validationException.addValidationError("The field [" + field + "] must be aggregatable across all indices, " + + "but is not."); + } + + } else { + validationException.addValidationError("The field referenced by a date_histo group must be a [date] type across all " + + "indices in the index pattern. Found: " + fieldCaps.keySet().toString() + " for field [" + field + "]"); + } + } + validationException.addValidationError("Could not find a [date] field with name [" + field + "] in any of the indices matching " + + "the index pattern."); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DateHistoGroupConfig that = (DateHistoGroupConfig) other; + + return Objects.equals(this.interval, that.interval) + && Objects.equals(this.field, that.field) + && Objects.equals(this.delay, that.delay) + && Objects.equals(this.timeZone, that.timeZone); + } + + @Override + public int hashCode() { + return Objects.hash(interval, field, delay, timeZone); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + private static Rounding createRounding(String expr, DateTimeZone timeZone, String settingName) { + DateTimeUnit timeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(expr); + final Rounding.Builder rounding; + if (timeUnit != null) { + rounding = new Rounding.Builder(timeUnit); + } else { + rounding = new Rounding.Builder(TimeValue.parseTimeValue(expr, settingName)); + } + rounding.timeZone(timeZone); + return rounding.build(); + } + + public static class Builder { + private DateHistogramInterval interval; + private String field; + private DateHistogramInterval delay; + private DateTimeZone timeZone; + + public DateHistogramInterval getInterval() { + return interval; + } + + public DateHistoGroupConfig.Builder setInterval(DateHistogramInterval interval) { + this.interval = interval; + return this; + } + + public String getField() { + return field; + } + + public DateHistoGroupConfig.Builder setField(String field) { + this.field = field; + return this; + } + + public DateTimeZone getTimeZone() { + return timeZone; + } + + public DateHistoGroupConfig.Builder setTimeZone(DateTimeZone timeZone) { + this.timeZone = timeZone; + return this; + } + + public DateHistogramInterval getDelay() { + return delay; + } + + public DateHistoGroupConfig.Builder setDelay(DateHistogramInterval delay) { + this.delay = delay; + return this; + } + + public DateHistoGroupConfig build() { + if (field == null || field.isEmpty()) { + throw new IllegalArgumentException("Parameter [" + FIELD.getPreferredName() + "] is mandatory."); + } + if (timeZone == null) { + timeZone = DateTimeZone.UTC; + } + if (interval == null) { + throw new IllegalArgumentException("Parameter [" + INTERVAL.getPreferredName() + "] is mandatory."); + } + // validate interval + createRounding(interval.toString(), timeZone, INTERVAL.getPreferredName()); + if (delay != null) { + // and delay + TimeValue.parseTimeValue(delay.toString(), INTERVAL.getPreferredName()); + } + return new DateHistoGroupConfig(interval, field, delay, timeZone); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/GroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/GroupConfig.java new file mode 100644 index 0000000000000..dcb393b088618 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/GroupConfig.java @@ -0,0 +1,197 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * The configuration object for the groups section in the rollup config. + * Basically just a wrapper for histo/date histo/terms objects + * + * { + * "groups": [ + * "date_histogram": {...}, + * "histogram" : {...}, + * "terms" : {...} + * ] + * } + */ +public class GroupConfig implements Writeable, ToXContentObject { + private static final String NAME = "grouping_config"; + private static final ParseField DATE_HISTO = new ParseField("date_histogram"); + private static final ParseField HISTO = new ParseField("histogram"); + private static final ParseField TERMS = new ParseField("terms"); + + private final DateHistoGroupConfig dateHisto; + private final HistoGroupConfig histo; + private final TermsGroupConfig terms; + + public static final ObjectParser PARSER = new ObjectParser<>(NAME, GroupConfig.Builder::new); + + static { + PARSER.declareObject(GroupConfig.Builder::setDateHisto, (p,c) -> DateHistoGroupConfig.PARSER.apply(p,c).build(), DATE_HISTO); + PARSER.declareObject(GroupConfig.Builder::setHisto, (p,c) -> HistoGroupConfig.PARSER.apply(p,c).build(), HISTO); + PARSER.declareObject(GroupConfig.Builder::setTerms, (p,c) -> TermsGroupConfig.PARSER.apply(p,c).build(), TERMS); + } + + private GroupConfig(DateHistoGroupConfig dateHisto, @Nullable HistoGroupConfig histo, @Nullable TermsGroupConfig terms) { + this.dateHisto = Objects.requireNonNull(dateHisto, "A date_histogram group is mandatory"); + this.histo = histo; + this.terms = terms; + } + + GroupConfig(StreamInput in) throws IOException { + dateHisto = new DateHistoGroupConfig(in); + histo = in.readOptionalWriteable(HistoGroupConfig::new); + terms = in.readOptionalWriteable(TermsGroupConfig::new); + } + + public DateHistoGroupConfig getDateHisto() { + return dateHisto; + } + + public HistoGroupConfig getHisto() { + return histo; + } + + public TermsGroupConfig getTerms() { + return terms; + } + + public Set getAllFields() { + Set fields = new HashSet<>(); + fields.add(dateHisto.getField()); + if (histo != null) { + fields.addAll(histo.getAllFields()); + } + if (terms != null) { + fields.addAll(terms.getAllFields()); + } + return fields; + } + + public void validateMappings(Map> fieldCapsResponse, + ActionRequestValidationException validationException) { + dateHisto.validateMappings(fieldCapsResponse, validationException); + if (histo != null) { + histo.validateMappings(fieldCapsResponse, validationException); + } + if (terms != null) { + terms.validateMappings(fieldCapsResponse, validationException); + } + } + + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(DATE_HISTO.getPreferredName()); + dateHisto.toXContent(builder, params); + builder.endObject(); + if (histo != null) { + builder.startObject(HISTO.getPreferredName()); + histo.toXContent(builder, params); + builder.endObject(); + } + if (terms != null) { + builder.startObject(TERMS.getPreferredName()); + terms.toXContent(builder, params); + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + dateHisto.writeTo(out); + out.writeOptionalWriteable(histo); + out.writeOptionalWriteable(terms); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + GroupConfig that = (GroupConfig) other; + + return Objects.equals(this.dateHisto, that.dateHisto) + && Objects.equals(this.histo, that.histo) + && Objects.equals(this.terms, that.terms); + } + + @Override + public int hashCode() { + return Objects.hash(dateHisto, histo, terms); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + public static class Builder { + private DateHistoGroupConfig dateHisto; + private HistoGroupConfig histo; + private TermsGroupConfig terms; + + public DateHistoGroupConfig getDateHisto() { + return dateHisto; + } + + public GroupConfig.Builder setDateHisto(DateHistoGroupConfig dateHisto) { + this.dateHisto = dateHisto; + return this; + } + + public HistoGroupConfig getHisto() { + return histo; + } + + public GroupConfig.Builder setHisto(HistoGroupConfig histo) { + this.histo = histo; + return this; + } + + public TermsGroupConfig getTerms() { + return terms; + } + + public GroupConfig.Builder setTerms(TermsGroupConfig terms) { + this.terms = terms; + return this; + } + + public GroupConfig build() { + if (dateHisto == null) { + throw new IllegalArgumentException("A date_histogram group is mandatory"); + } + return new GroupConfig(dateHisto, histo, terms); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistoGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistoGroupConfig.java new file mode 100644 index 0000000000000..8b8d53b4ce9af --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/HistoGroupConfig.java @@ -0,0 +1,216 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.HistogramValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.xpack.core.rollup.RollupField; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * The configuration object for the histograms in the rollup config + * + * { + * "groups": [ + * "histogram": { + * "fields" : [ "foo", "bar" ], + * "interval" : 123 + * } + * ] + * } + */ +public class HistoGroupConfig implements Writeable, ToXContentFragment { + private static final String NAME = "histo_group_config"; + public static final ObjectParser PARSER + = new ObjectParser<>(NAME, HistoGroupConfig.Builder::new); + + private static final ParseField INTERVAL = new ParseField("interval"); + private static final ParseField FIELDS = new ParseField("fields"); + private static final List MAPPER_TYPES = Stream.of(NumberFieldMapper.NumberType.values()) + .map(NumberFieldMapper.NumberType::typeName) + .collect(Collectors.toList()); + + + private final long interval; + private final String[] fields; + + static { + PARSER.declareLong(HistoGroupConfig.Builder::setInterval, INTERVAL); + PARSER.declareStringArray(HistoGroupConfig.Builder::setFields, FIELDS); + } + + private HistoGroupConfig(long interval, String[] fields) { + this.interval = interval; + this.fields = fields; + } + + HistoGroupConfig(StreamInput in) throws IOException { + interval = in.readVLong(); + fields = in.readStringArray(); + } + + public long getInterval() { + return interval; + } + + public String[] getFields() { + return fields; + } + + /** + * This returns a set of aggregation builders which represent the configured + * set of histograms. Used by the rollup indexer to iterate over historical data + */ + public List> toBuilders() { + if (fields.length == 0) { + return Collections.emptyList(); + } + + return Arrays.stream(fields).map(f -> { + HistogramValuesSourceBuilder vsBuilder + = new HistogramValuesSourceBuilder(RollupField.formatIndexerAggName(f, HistogramAggregationBuilder.NAME)); + vsBuilder.interval(interval); + vsBuilder.field(f); + return vsBuilder; + }).collect(Collectors.toList()); + } + + /** + * @return A map representing this config object as a RollupCaps aggregation object + */ + public Map toAggCap() { + Map map = new HashMap<>(2); + map.put("agg", HistogramAggregationBuilder.NAME); + map.put(INTERVAL.getPreferredName(), interval); + return map; + } + + public Map getMetadata() { + return Collections.singletonMap(RollupField.formatMetaField(RollupField.INTERVAL), interval); + } + + public Set getAllFields() { + return Arrays.stream(fields).collect(Collectors.toSet()); + } + + public void validateMappings(Map> fieldCapsResponse, + ActionRequestValidationException validationException) { + + Arrays.stream(fields).forEach(field -> { + Map fieldCaps = fieldCapsResponse.get(field); + if (fieldCaps != null && fieldCaps.isEmpty() == false) { + fieldCaps.forEach((key, value) -> { + if (MAPPER_TYPES.contains(key)) { + if (value.isAggregatable() == false) { + validationException.addValidationError("The field [" + field + "] must be aggregatable across all indices, " + + "but is not."); + } + } else { + validationException.addValidationError("The field referenced by a histo group must be a [numeric] type, " + + "but found " + fieldCaps.keySet().toString() + " for field [" + field + "]"); + } + }); + } else { + validationException.addValidationError("Could not find a [numeric] field with name [" + field + + "] in any of the indices matching the index pattern."); + } + }); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(INTERVAL.getPreferredName(), interval); + builder.field(FIELDS.getPreferredName(), fields); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(interval); + out.writeStringArray(fields); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + HistoGroupConfig that = (HistoGroupConfig) other; + + return Objects.equals(this.interval, that.interval) + && Arrays.equals(this.fields, that.fields); + } + + @Override + public int hashCode() { + return Objects.hash(interval, Arrays.hashCode(fields)); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + public static class Builder { + private long interval = 0; + private List fields; + + public long getInterval() { + return interval; + } + + public HistoGroupConfig.Builder setInterval(long interval) { + this.interval = interval; + return this; + } + + public List getFields() { + return fields; + } + + public HistoGroupConfig.Builder setFields(List fields) { + this.fields = fields; + return this; + } + + public HistoGroupConfig build() { + if (interval <= 0) { + throw new IllegalArgumentException("Parameter [" + INTERVAL.getPreferredName() + "] must be a positive long."); + } + if (fields == null || fields.isEmpty()) { + throw new IllegalArgumentException("Parameter [" + FIELDS + "] must have at least one value."); + } + return new HistoGroupConfig(interval, fields.toArray(new String[0])); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java new file mode 100644 index 0000000000000..6e211c1df9e3e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +/** + * IndexerState represents the internal state of the indexer. It + * is also persistent when changing from started/stopped in case the allocated + * task is restarted elsewhere. + */ +public enum IndexerState implements Writeable { + // Indexer is running, but not actively indexing data (e.g. it's idle) + STARTED, + + // Indexer is actively indexing data + INDEXING, + + // Transition state to where an indexer has acknowledged the stop + // but is still in process of halting + STOPPING, + + // Indexer is "paused" and ignoring scheduled triggers + STOPPED, + + // Something (internal or external) has requested the indexer abort and shutdown + ABORTING; + + public final ParseField STATE = new ParseField("job_state"); + + public static IndexerState fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + public static IndexerState fromStream(StreamInput in) throws IOException { + return in.readEnum(IndexerState.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + IndexerState state = this; + out.writeEnum(state); + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java new file mode 100644 index 0000000000000..52f565ee6ae39 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java @@ -0,0 +1,261 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.elasticsearch.xpack.core.rollup.RollupField; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * The configuration object for the metrics portion of a rollup job config + * + * { + * "metrics": [ + * { + * "field": "foo", + * "metrics": [ "min", "max", "sum"] + * }, + * { + * "field": "bar", + * "metrics": [ "max" ] + * } + * ] + * } + */ +public class MetricConfig implements Writeable, ToXContentFragment { + private static final String NAME = "metric_config"; + + private String field; + private List metrics; + + private static final ParseField FIELD = new ParseField("field"); + private static final ParseField METRICS = new ParseField("metrics"); + + // TODO: replace these with an enum + private static final ParseField MIN = new ParseField("min"); + private static final ParseField MAX = new ParseField("max"); + private static final ParseField SUM = new ParseField("sum"); + private static final ParseField AVG = new ParseField("avg"); + private static final ParseField VALUE_COUNT = new ParseField("value_count"); + + private static final List MAPPER_TYPES; + static { + List types = Stream.of(NumberFieldMapper.NumberType.values()) + .map(NumberFieldMapper.NumberType::typeName) + .collect(Collectors.toList()); + types.add("scaled_float"); // have to add manually since scaled_float is in a module + MAPPER_TYPES = types; + } + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, a -> new MetricConfig((String)a[0], (List) a[1])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), FIELD); + PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), METRICS); + } + + MetricConfig(String name, List metrics) { + this.field = name; + this.metrics = metrics; + } + + MetricConfig(StreamInput in) throws IOException { + field = in.readString(); + metrics = in.readList(StreamInput::readString); + } + + public String getField() { + return field; + } + + public List getMetrics() { + return metrics; + } + + /** + * This returns a set of aggregation builders which represent the configured + * set of metrics. Used by the rollup indexer to iterate over historical data + */ + public List toBuilders() { + if (metrics.size() == 0) { + return Collections.emptyList(); + } + + List aggs = new ArrayList<>(metrics.size()); + for (String metric : metrics) { + ValuesSourceAggregationBuilder.LeafOnly newBuilder; + if (metric.equals(MIN.getPreferredName())) { + newBuilder = new MinAggregationBuilder(RollupField.formatFieldName(field, MinAggregationBuilder.NAME, RollupField.VALUE)); + } else if (metric.equals(MAX.getPreferredName())) { + newBuilder = new MaxAggregationBuilder(RollupField.formatFieldName(field, MaxAggregationBuilder.NAME, RollupField.VALUE)); + } else if (metric.equals(AVG.getPreferredName())) { + // Avgs are sum + count + newBuilder = new SumAggregationBuilder(RollupField.formatFieldName(field, AvgAggregationBuilder.NAME, RollupField.VALUE)); + ValuesSourceAggregationBuilder.LeafOnly countBuilder + = new ValueCountAggregationBuilder( + RollupField.formatFieldName(field, AvgAggregationBuilder.NAME, RollupField.COUNT_FIELD), ValueType.NUMERIC); + countBuilder.field(field); + aggs.add(countBuilder); + } else if (metric.equals(SUM.getPreferredName())) { + newBuilder = new SumAggregationBuilder(RollupField.formatFieldName(field, SumAggregationBuilder.NAME, RollupField.VALUE)); + } else if (metric.equals(VALUE_COUNT.getPreferredName())) { + // TODO allow non-numeric value_counts. + // Hardcoding this is fine for now since the job validation guarantees that all metric fields are numerics + newBuilder = new ValueCountAggregationBuilder( + RollupField.formatFieldName(field, ValueCountAggregationBuilder.NAME, RollupField.VALUE), ValueType.NUMERIC); + } else { + throw new IllegalArgumentException("Unsupported metric type [" + metric + "]"); + } + newBuilder.field(field); + aggs.add(newBuilder); + } + return aggs; + } + + /** + * @return A map representing this config object as a RollupCaps aggregation object + */ + public List> toAggCap() { + return metrics.stream().map(metric -> Collections.singletonMap("agg", (Object)metric)).collect(Collectors.toList()); + } + + public void validateMappings(Map> fieldCapsResponse, + ActionRequestValidationException validationException) { + + Map fieldCaps = fieldCapsResponse.get(field); + if (fieldCaps != null && fieldCaps.isEmpty() == false) { + fieldCaps.forEach((key, value) -> { + if (MAPPER_TYPES.contains(key)) { + if (value.isAggregatable() == false) { + validationException.addValidationError("The field [" + field + "] must be aggregatable across all indices, " + + "but is not."); + } + } else { + validationException.addValidationError("The field referenced by a metric group must be a [numeric] type, but found " + + fieldCaps.keySet().toString() + " for field [" + field + "]"); + } + }); + } else { + validationException.addValidationError("Could not find a [numeric] field with name [" + field + "] in any of the " + + "indices matching the index pattern."); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(FIELD.getPreferredName(), field); + builder.field(METRICS.getPreferredName(), metrics); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(field); + out.writeStringList(metrics); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + MetricConfig that = (MetricConfig) other; + + return Objects.equals(this.field, that.field) + && Objects.equals(this.metrics, that.metrics); + } + + @Override + public int hashCode() { + return Objects.hash(field, metrics); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + + public static class Builder { + private String field; + private List metrics; + + public Builder() { + } + + public Builder(MetricConfig config) { + this.field = config.getField(); + this.metrics = config.getMetrics(); + } + + public String getField() { + return field; + } + + public MetricConfig.Builder setField(String field) { + this.field = field; + return this; + } + + public List getMetrics() { + return metrics; + } + + public MetricConfig.Builder setMetrics(List metrics) { + this.metrics = metrics; + return this; + } + + public MetricConfig build() { + if (Strings.isNullOrEmpty(field) == true) { + throw new IllegalArgumentException("Parameter [" + FIELD.getPreferredName() + "] must be a non-null, non-empty string."); + } + if (metrics == null || metrics.isEmpty()) { + throw new IllegalArgumentException("Parameter [" + METRICS.getPreferredName() + + "] must be a non-null, non-empty array of strings."); + } + metrics.forEach(m -> { + if (RollupField.SUPPORTED_METRICS.contains(m) == false) { + throw new IllegalArgumentException("Unsupported metric [" + m + "]. " + + "Supported metrics include: " + RollupField.SUPPORTED_METRICS); + } + }); + return new MetricConfig(field, metrics); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java new file mode 100644 index 0000000000000..e71186b60e020 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.persistent.PersistentTaskParams; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * This class is the main wrapper object that is serialized into the PersistentTask's cluster state. + * It holds the config (RollupJobConfig) and a map of authentication headers. Only RollupJobConfig + * is ever serialized to the user, so the headers should never leak + */ +public class RollupJob extends AbstractDiffable implements PersistentTaskParams { + + public static final String NAME = "xpack/rollup/job"; + + private final Map headers; + private final RollupJobConfig config; + + private static final ParseField CONFIG = new ParseField("config"); + private static final ParseField HEADERS = new ParseField("headers"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER + = new ConstructingObjectParser<>(NAME, a -> new RollupJob((RollupJobConfig) a[0], (Map) a[1])); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> RollupJobConfig.PARSER.apply(p,c).build(), CONFIG); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.mapStrings(), HEADERS); + } + + public RollupJob(RollupJobConfig config, Map headers) { + this.config = Objects.requireNonNull(config); + this.headers = headers == null ? Collections.emptyMap() : headers; + } + + public RollupJob(StreamInput in) throws IOException { + this.config = new RollupJobConfig(in); + headers = in.readMap(StreamInput::readString, StreamInput::readString); + } + + public RollupJobConfig getConfig() { + return config; + } + + public Map getHeaders() { + return headers; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CONFIG.getPreferredName(), config); + builder.field(HEADERS.getPreferredName(), headers); + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + config.writeTo(out); + out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + } + + static Diff readJobDiffFrom(StreamInput in) throws IOException { + return AbstractDiffable.readDiffFrom(RollupJob::new, in); + } + + public static RollupJob fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + RollupJob that = (RollupJob) other; + + return Objects.equals(this.config, that.config) + && Objects.equals(this.headers, that.headers); + } + + @Override + public int hashCode() { + return Objects.hash(config, headers); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java new file mode 100644 index 0000000000000..a799cbe944715 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java @@ -0,0 +1,407 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.rollup.RollupField; +import org.elasticsearch.xpack.core.scheduler.Cron; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * This class holds the configuration details of a rollup job, such as the groupings, metrics, what + * index to rollup and where to roll them to. + */ +public class RollupJobConfig implements NamedWriteable, ToXContentObject { + private static final String NAME = "xpack/rollup/jobconfig"; + + public static final ParseField TIMEOUT = new ParseField("timeout"); + public static final ParseField CURRENT = new ParseField("current"); + public static final ParseField CRON = new ParseField("cron"); + public static final ParseField PAGE_SIZE = new ParseField("page_size"); + + private static final ParseField INDEX_PATTERN = new ParseField("index_pattern"); + private static final ParseField ROLLUP_INDEX = new ParseField("rollup_index"); + private static final ParseField GROUPS = new ParseField("groups"); + private static final ParseField METRICS = new ParseField("metrics"); + + private String id; + private String indexPattern; + private String rollupIndex; + private GroupConfig groupConfig; + private List metricsConfig = Collections.emptyList(); + private TimeValue timeout = TimeValue.timeValueSeconds(20); + private String cron; + private int pageSize; + + public static final ObjectParser PARSER = new ObjectParser<>(NAME, false, RollupJobConfig.Builder::new); + + static { + PARSER.declareString(RollupJobConfig.Builder::setId, RollupField.ID); + PARSER.declareObject(RollupJobConfig.Builder::setGroupConfig, (p, c) -> GroupConfig.PARSER.apply(p,c).build(), GROUPS); + PARSER.declareObjectArray(RollupJobConfig.Builder::setMetricsConfig, MetricConfig.PARSER, METRICS); + PARSER.declareString((params, val) -> + params.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT); + PARSER.declareString(RollupJobConfig.Builder::setIndexPattern, INDEX_PATTERN); + PARSER.declareString(RollupJobConfig.Builder::setRollupIndex, ROLLUP_INDEX); + PARSER.declareString(RollupJobConfig.Builder::setCron, CRON); + PARSER.declareInt(RollupJobConfig.Builder::setPageSize, PAGE_SIZE); + } + + RollupJobConfig(String id, String indexPattern, String rollupIndex, String cron, int pageSize, GroupConfig groupConfig, + List metricsConfig, TimeValue timeout) { + this.id = id; + this.indexPattern = indexPattern; + this.rollupIndex = rollupIndex; + this.groupConfig = groupConfig; + this.metricsConfig = metricsConfig; + this.timeout = timeout; + this.cron = cron; + this.pageSize = pageSize; + } + + public RollupJobConfig(StreamInput in) throws IOException { + id = in.readString(); + indexPattern = in.readString(); + rollupIndex = in.readString(); + cron = in.readString(); + groupConfig = in.readOptionalWriteable(GroupConfig::new); + metricsConfig = in.readList(MetricConfig::new); + timeout = in.readTimeValue(); + pageSize = in.readInt(); + } + + public RollupJobConfig() {} + + public String getId() { + return id; + } + + public GroupConfig getGroupConfig() { + return groupConfig; + } + + public List getMetricsConfig() { + return metricsConfig; + } + + public TimeValue getTimeout() { + return timeout; + } + + public String getIndexPattern() { + return indexPattern; + } + + public String getRollupIndex() { + return rollupIndex; + } + + public String getCron() { + return cron; + } + + public int getPageSize() { + return pageSize; + } + + @Override + public String getWriteableName() { + return NAME; + } + + public Set getAllFields() { + Set fields = new HashSet<>(groupConfig.getAllFields()); + fields.addAll(metricsConfig.stream().map(MetricConfig::getField).collect(Collectors.toSet())); + return fields; + } + + public void validateMappings(Map> fieldCapsResponse, + ActionRequestValidationException validationException) { + groupConfig.validateMappings(fieldCapsResponse, validationException); + for (MetricConfig m : metricsConfig) { + m.validateMappings(fieldCapsResponse, validationException); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(RollupField.ID.getPreferredName(), id); + builder.field(INDEX_PATTERN.getPreferredName(), indexPattern); + builder.field(ROLLUP_INDEX.getPreferredName(), rollupIndex); + builder.field(CRON.getPreferredName(), cron); + if (groupConfig != null) { + builder.field(GROUPS.getPreferredName(), groupConfig); + } + if (metricsConfig != null) { + builder.startArray(METRICS.getPreferredName()); + for (MetricConfig config : metricsConfig) { + builder.startObject(); + config.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + } + if (timeout != null) { + builder.field(TIMEOUT.getPreferredName(), timeout); + } + builder.field(PAGE_SIZE.getPreferredName(), pageSize); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + out.writeString(indexPattern); + out.writeString(rollupIndex); + out.writeString(cron); + out.writeOptionalWriteable(groupConfig); + out.writeList(metricsConfig); + out.writeTimeValue(timeout); + out.writeInt(pageSize); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + RollupJobConfig that = (RollupJobConfig) other; + + return Objects.equals(this.id, that.id) + && Objects.equals(this.indexPattern, that.indexPattern) + && Objects.equals(this.rollupIndex, that.rollupIndex) + && Objects.equals(this.cron, that.cron) + && Objects.equals(this.groupConfig, that.groupConfig) + && Objects.equals(this.metricsConfig, that.metricsConfig) + && Objects.equals(this.timeout, that.timeout) + && Objects.equals(this.pageSize, that.pageSize); + } + + @Override + public int hashCode() { + return Objects.hash(id, indexPattern, rollupIndex, cron, groupConfig, + metricsConfig, timeout, pageSize); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + /** + * Same as toString() but more explicitly named so the caller knows this is turned into JSON + */ + public String toJSONString() { + return toString(); + } + + public static class Builder implements Writeable, ToXContentObject { + private String id; + private String indexPattern; + private String rollupIndex; + private GroupConfig groupConfig; + private List metricsConfig = Collections.emptyList(); + private TimeValue timeout = TimeValue.timeValueSeconds(20); + private String cron; + private int pageSize = 0; + + public Builder(RollupJobConfig job) { + this.id = job.getId(); + this.indexPattern = job.getIndexPattern(); + this.rollupIndex = job.getRollupIndex(); + this.groupConfig = job.getGroupConfig(); + this.metricsConfig = job.getMetricsConfig(); + this.timeout = job.getTimeout(); + this.cron = job.getCron(); + this.pageSize = job.getPageSize(); + } + + public static RollupJobConfig.Builder fromXContent(String id, XContentParser parser) { + RollupJobConfig.Builder config = RollupJobConfig.PARSER.apply(parser, null); + if (id != null) { + config.setId(id); + } + return config; + } + + public Builder() {} + + public String getId() { + return id; + } + + public RollupJobConfig.Builder setId(String id) { + this.id = id; + return this; + } + + public String getIndexPattern() { + return indexPattern; + } + + public RollupJobConfig.Builder setIndexPattern(String indexPattern) { + this.indexPattern = indexPattern; + return this; + } + + public String getRollupIndex() { + return rollupIndex; + } + + public RollupJobConfig.Builder setRollupIndex(String rollupIndex) { + this.rollupIndex = rollupIndex; + return this; + } + + public GroupConfig getGroupConfig() { + return groupConfig; + } + + public RollupJobConfig.Builder setGroupConfig(GroupConfig groupConfig) { + this.groupConfig = groupConfig; + return this; + } + + public List getMetricsConfig() { + return metricsConfig; + } + + public RollupJobConfig.Builder setMetricsConfig(List metricsConfig) { + this.metricsConfig = metricsConfig; + return this; + } + + public TimeValue getTimeout() { + return timeout; + } + + public RollupJobConfig.Builder setTimeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + public String getCron() { + return cron; + } + + public RollupJobConfig.Builder setCron(String cron) { + this.cron = cron; + return this; + } + + public int getPageSize() { + return pageSize; + } + + public RollupJobConfig.Builder setPageSize(int pageSize) { + this.pageSize = pageSize; + return this; + } + + public RollupJobConfig build() { + if (id == null || id.isEmpty()) { + throw new IllegalArgumentException("An ID is mandatory."); + } + if (indexPattern == null || indexPattern.isEmpty()) { + throw new IllegalArgumentException("An index pattern is mandatory."); + } + if (rollupIndex == null || rollupIndex.isEmpty()) { + throw new IllegalArgumentException("A rollup index name is mandatory."); + } + if (cron == null || cron.isEmpty()) { + throw new IllegalArgumentException("A cron schedule is mandatory."); + } + if (pageSize <= 0) { + throw new IllegalArgumentException("Parameter [" + PAGE_SIZE.getPreferredName() + + "] is mandatory and must be a positive long."); + } + // Cron doesn't have a parse helper method to see if the cron is valid, + // so just construct a temporary cron object and if the cron is bad, it'll + // throw an exception + Cron testCron = new Cron(cron); + if (groupConfig == null && (metricsConfig == null || metricsConfig.isEmpty())) { + throw new IllegalArgumentException("At least one grouping or metric must be configured."); + } + return new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groupConfig, + metricsConfig, timeout); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (id != null) { + builder.field(RollupField.ID.getPreferredName(), id); + } + if (indexPattern != null) { + builder.field(INDEX_PATTERN.getPreferredName(), indexPattern); + } + if (indexPattern != null) { + builder.field(ROLLUP_INDEX.getPreferredName(), rollupIndex); + } + if (cron != null) { + builder.field(CRON.getPreferredName(), cron); + } + if (groupConfig != null) { + builder.field(GROUPS.getPreferredName(), groupConfig); + } + if (metricsConfig != null) { + builder.startArray(METRICS.getPreferredName()); + for (MetricConfig config : metricsConfig) { + builder.startObject(); + config.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + } + if (timeout != null) { + builder.field(TIMEOUT.getPreferredName(), timeout); + } + builder.field(PAGE_SIZE.getPreferredName(), pageSize); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + out.writeOptionalString(indexPattern); + out.writeOptionalString(rollupIndex); + out.writeOptionalString(cron); + out.writeOptionalWriteable(groupConfig); + out.writeList(metricsConfig); + out.writeTimeValue(timeout); + out.writeInt(pageSize); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java new file mode 100644 index 0000000000000..06cfb520af552 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * This class holds the runtime statistics of a job. The stats are not used by any internal process + * and are only for external monitoring/reference. Statistics are not persisted with the job, so if the + * allocated task is shutdown/restarted on a different node all the stats will reset. + */ +public class RollupJobStats implements ToXContentObject, Writeable { + + public static final ParseField NAME = new ParseField("job_stats"); + + private static ParseField NUM_PAGES = new ParseField("pages_processed"); + private static ParseField NUM_DOCUMENTS = new ParseField("documents_processed"); + private static ParseField NUM_ROLLUPS = new ParseField("rollups_indexed"); + private static ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); + + private long numPages = 0; + private long numDocuments = 0; + private long numRollups = 0; + private long numInvocations = 0; + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME.getPreferredName(), + args -> new RollupJobStats((long) args[0], (long) args[1], (long) args[2], (long) args[3])); + + static { + PARSER.declareLong(constructorArg(), NUM_PAGES); + PARSER.declareLong(constructorArg(), NUM_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_ROLLUPS); + PARSER.declareLong(constructorArg(), NUM_INVOCATIONS); + } + + public RollupJobStats() { + } + + public RollupJobStats(long numPages, long numDocuments, long numRollups, long numInvocations) { + this.numPages = numPages; + this.numDocuments = numDocuments; + this.numRollups = numRollups; + this.numInvocations = numInvocations; + } + + public RollupJobStats(StreamInput in) throws IOException { + this.numPages = in.readVLong(); + this.numDocuments = in.readVLong(); + this.numRollups = in.readVLong(); + this.numInvocations = in.readVLong(); + } + + public long getNumPages() { + return numPages; + } + + public long getNumDocuments() { + return numDocuments; + } + + public long getNumInvocations() { + return numInvocations; + } + + public long getNumRollups() { + return numRollups; + } + + public void incrementNumPages(long n) { + assert(n >= 0); + numPages += n; + } + + public void incrementNumDocuments(long n) { + assert(n >= 0); + numDocuments += n; + } + + public void incrementNumInvocations(long n) { + assert(n >= 0); + numInvocations += n; + } + + public void incrementNumRollups(long n) { + assert(n >= 0); + numRollups += n; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(numPages); + out.writeVLong(numDocuments); + out.writeVLong(numRollups); + out.writeVLong(numInvocations); + } + + public static RollupJobStats fromXContent(XContentParser parser) { + try { + return PARSER.parse(parser, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUM_PAGES.getPreferredName(), numPages); + builder.field(NUM_DOCUMENTS.getPreferredName(), numDocuments); + builder.field(NUM_ROLLUPS.getPreferredName(), numRollups); + builder.field(NUM_INVOCATIONS.getPreferredName(), numInvocations); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + RollupJobStats that = (RollupJobStats) other; + + return Objects.equals(this.numPages, that.numPages) + && Objects.equals(this.numDocuments, that.numDocuments) + && Objects.equals(this.numRollups, that.numRollups) + && Objects.equals(this.numInvocations, that.numInvocations); + } + + @Override + public int hashCode() { + return Objects.hash(numPages, numDocuments, numRollups, numInvocations); + } + +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java new file mode 100644 index 0000000000000..86bc95e092ca3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.tasks.Task; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * This class is essentially just a wrapper around the IndexerState and the + * indexer's current position. When the allocated task updates its status, + * it is providing a new version of this. + */ +public class RollupJobStatus implements Task.Status { + public static final String NAME = "xpack/rollup/job"; + + private final IndexerState state; + + @Nullable + private final TreeMap currentPosition; + + private static final ParseField STATE = new ParseField("job_state"); + private static final ParseField CURRENT_POSITION = new ParseField("current_position"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME, + args -> new RollupJobStatus((IndexerState) args[0], (HashMap) args[1])); + + static { + PARSER.declareField(constructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return IndexerState.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, STATE, ObjectParser.ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.START_OBJECT) { + return p.map(); + } + if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return null; + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, CURRENT_POSITION, ObjectParser.ValueType.VALUE_OBJECT_ARRAY); + } + + public RollupJobStatus(IndexerState state, @Nullable Map position) { + this.state = state; + this.currentPosition = position == null ? null : new TreeMap<>(position); + } + + public RollupJobStatus(StreamInput in) throws IOException { + state = IndexerState.fromStream(in); + currentPosition = in.readBoolean() ? new TreeMap<>(in.readMap()) : null; + } + + public IndexerState getState() { + return state; + } + + public Map getPosition() { + return currentPosition; + } + + public static RollupJobStatus fromXContent(XContentParser parser) { + try { + return PARSER.parse(parser, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(STATE.getPreferredName(), state.value()); + if (currentPosition != null) { + builder.field(CURRENT_POSITION.getPreferredName(), currentPosition); + } + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + state.writeTo(out); + out.writeBoolean(currentPosition != null); + if (currentPosition != null) { + out.writeMap(currentPosition); + } + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + RollupJobStatus that = (RollupJobStatus) other; + + return Objects.equals(this.state, that.state) + && Objects.equals(this.currentPosition, that.currentPosition); + } + + @Override + public int hashCode() { + return Objects.hash(state, currentPosition); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java new file mode 100644 index 0000000000000..2f1c35a73edb4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java @@ -0,0 +1,193 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.xpack.core.rollup.RollupField; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * The configuration object for the histograms in the rollup config + * + * { + * "groups": [ + * "terms": { + * "fields" : [ "foo", "bar" ] + * } + * ] + * } + */ +public class TermsGroupConfig implements Writeable, ToXContentFragment { + private static final String NAME = "term_group_config"; + public static final ObjectParser PARSER = new ObjectParser<>(NAME, TermsGroupConfig.Builder::new); + + private static final ParseField FIELDS = new ParseField("fields"); + private static final List FLOAT_TYPES = Arrays.asList("half_float", "float", "double", "scaled_float"); + private static final List NATURAL_TYPES = Arrays.asList("byte", "short", "integer", "long"); + private final String[] fields; + + static { + PARSER.declareStringArray(TermsGroupConfig.Builder::setFields, FIELDS); + } + + private TermsGroupConfig(String[] fields) { + this.fields = fields; + } + + TermsGroupConfig(StreamInput in) throws IOException { + fields = in.readStringArray(); + } + + public String[] getFields() { + return fields; + } + + /** + * This returns a set of aggregation builders which represent the configured + * set of date histograms. Used by the rollup indexer to iterate over historical data + */ + public List> toBuilders() { + if (fields.length == 0) { + return Collections.emptyList(); + } + + return Arrays.stream(fields).map(f -> { + TermsValuesSourceBuilder vsBuilder + = new TermsValuesSourceBuilder(RollupField.formatIndexerAggName(f, TermsAggregationBuilder.NAME)); + vsBuilder.field(f); + return vsBuilder; + }).collect(Collectors.toList()); + } + + /** + * @return A map representing this config object as a RollupCaps aggregation object + */ + public Map toAggCap() { + Map map = new HashMap<>(1); + map.put("agg", TermsAggregationBuilder.NAME); + return map; + } + + public Map getMetadata() { + return Collections.emptyMap(); + } + + public Set getAllFields() { + return Arrays.stream(fields).collect(Collectors.toSet()); + } + + public void validateMappings(Map> fieldCapsResponse, + ActionRequestValidationException validationException) { + + Arrays.stream(fields).forEach(field -> { + Map fieldCaps = fieldCapsResponse.get(field); + if (fieldCaps != null && fieldCaps.isEmpty() == false) { + fieldCaps.forEach((key, value) -> { + if (key.equals(KeywordFieldMapper.CONTENT_TYPE) || key.equals(TextFieldMapper.CONTENT_TYPE)) { + if (value.isAggregatable() == false) { + validationException.addValidationError("The field [" + field + "] must be aggregatable across all indices, " + + "but is not."); + } + } else if (FLOAT_TYPES.contains(key)) { + if (value.isAggregatable() == false) { + validationException.addValidationError("The field [" + field + "] must be aggregatable across all indices, " + + "but is not."); + } + } else if (NATURAL_TYPES.contains(key)) { + if (value.isAggregatable() == false) { + validationException.addValidationError("The field [" + field + "] must be aggregatable across all indices, " + + "but is not."); + } + } else { + validationException.addValidationError("The field referenced by a terms group must be a [numeric] or " + + "[keyword/text] type, but found " + fieldCaps.keySet().toString() + " for field [" + field + "]"); + } + }); + } else { + validationException.addValidationError("Could not find a [numeric] or [keyword/text] field with name [" + field + + "] in any of the indices matching the index pattern."); + } + }); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(FIELDS.getPreferredName(), fields); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(fields); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + TermsGroupConfig that = (TermsGroupConfig) other; + + return Arrays.equals(this.fields, that.fields); + } + + @Override + public int hashCode() { + return Arrays.hashCode(fields); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + public static class Builder { + private List fields; + + public List getFields() { + return fields; + } + + public TermsGroupConfig.Builder setFields(List fields) { + this.fields = fields; + return this; + } + + public TermsGroupConfig build() { + if (fields == null || fields.isEmpty()) { + throw new IllegalArgumentException("Parameter [" + FIELDS + "] must have at least one value."); + } + return new TermsGroupConfig(fields.toArray(new String[0])); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java new file mode 100644 index 0000000000000..f0bc4b98db1b4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java @@ -0,0 +1,1512 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.scheduler; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; + +import java.io.IOException; +import java.util.Calendar; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.SortedSet; +import java.util.StringTokenizer; +import java.util.TimeZone; +import java.util.TreeSet; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; + + +/** + * + * THIS CLASS IS A COPY OF + * + * {@code CronExpression} + * FROM THE QUARTZ PROJECT + * + * + * Provides a parser and evaluator for unix-like cron expressions. Cron + * expressions provide the ability to specify complex time combinations such as + * "At 8:00am every Monday through Friday" or "At 1:30am every + * last Friday of the month". + *

+ * Cron expressions are comprised of 6 required fields and one optional field + * separated by white space. The fields respectively are described as follows: + * + *

`, `
`. + + `_blocks`;; + The following block elements: `

`, `

`, `

`, + `

`, `

`, `

`, `

`, `
`, `
and
+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Fields in cron expressions
Field Name Allowed Values Allowed Special Characters
Seconds 0-59 , - * /
Minutes 0-59 , - * /
Hours 0-23 , - * /
Day-of-month 1-31 , - * ? / L W
Month 0-11 or JAN-DEC , - * /
Day-of-Week 1-7 or SUN-SAT , - * ? / L #
Year (Optional) empty, 1970-2199 , - * /
+ *

+ * The '*' character is used to specify all values. For example, "*" + * in the minute field means "every minute". + *

+ * The '?' character is allowed for the day-of-month and day-of-week fields. It + * is used to specify 'no specific value'. This is useful when you need to + * specify something in one of the two fields, but not the other. + *

+ * The '-' character is used to specify ranges For example "10-12" in + * the hour field means "the hours 10, 11 and 12". + *

+ * The ',' character is used to specify additional values. For example + * "MON,WED,FRI" in the day-of-week field means "the days Monday, + * Wednesday, and Friday". + *

+ * The '/' character is used to specify increments. For example "0/15" + * in the seconds field means "the seconds 0, 15, 30, and 45". And + * "5/15" in the seconds field means "the seconds 5, 20, 35, and + * 50". Specifying '*' before the '/' is equivalent to specifying 0 is + * the value to start with. Essentially, for each field in the expression, there + * is a set of numbers that can be turned on or off. For seconds and minutes, + * the numbers range from 0 to 59. For hours 0 to 23, for days of the month 0 to + * 31, and for months 0 to 11 (JAN to DEC). The "/" character simply helps you turn + * on every "nth" value in the given set. Thus "7/6" in the + * month field only turns on month "7", it does NOT mean every 6th + * month, please note that subtlety. + *

+ * The 'L' character is allowed for the day-of-month and day-of-week fields. + * This character is short-hand for "last", but it has different + * meaning in each of the two fields. For example, the value "L" in + * the day-of-month field means "the last day of the month" - day 31 + * for January, day 28 for February on non-leap years. If used in the + * day-of-week field by itself, it simply means "7" or + * "SAT". But if used in the day-of-week field after another value, it + * means "the last xxx day of the month" - for example "6L" + * means "the last friday of the month". You can also specify an offset + * from the last day of the month, such as "L-3" which would mean the third-to-last + * day of the calendar month. When using the 'L' option, it is important not to + * specify lists, or ranges of values, as you'll get confusing/unexpected results. + *

+ * The 'W' character is allowed for the day-of-month field. This character + * is used to specify the weekday (Monday-Friday) nearest the given day. As an + * example, if you were to specify "15W" as the value for the + * day-of-month field, the meaning is: "the nearest weekday to the 15th of + * the month". So if the 15th is a Saturday, the trigger will fire on + * Friday the 14th. If the 15th is a Sunday, the trigger will fire on Monday the + * 16th. If the 15th is a Tuesday, then it will fire on Tuesday the 15th. + * However if you specify "1W" as the value for day-of-month, and the + * 1st is a Saturday, the trigger will fire on Monday the 3rd, as it will not + * 'jump' over the boundary of a month's days. The 'W' character can only be + * specified when the day-of-month is a single day, not a range or list of days. + *

+ * The 'L' and 'W' characters can also be combined for the day-of-month + * expression to yield 'LW', which translates to "last weekday of the + * month". + *

+ * The '#' character is allowed for the day-of-week field. This character is + * used to specify "the nth" XXX day of the month. For example, the + * value of "6#3" in the day-of-week field means the third Friday of + * the month (day 6 = Friday and "#3" = the 3rd one in the month). + * Other examples: "2#1" = the first Monday of the month and + * "4#5" = the fifth Wednesday of the month. Note that if you specify + * "#5" and there is not 5 of the given day-of-week in the month, then + * no firing will occur that month. If the '#' character is used, there can + * only be one expression in the day-of-week field ("3#1,6#3" is + * not valid, since there are two expressions). + * + *

+ * The legal characters and the names of months and days of the week are not + * case sensitive. + * + *

+ * NOTES: + *

    + *
  • Support for specifying both a day-of-week and a day-of-month value is + * not complete (you'll need to use the '?' character in one of these fields). + *
  • + *
  • Overflowing ranges is supported - that is, having a larger number on + * the left hand side than the right. You might do 22-2 to catch 10 o'clock + * at night until 2 o'clock in the morning, or you might have NOV-FEB. It is + * very important to note that overuse of overflowing ranges creates ranges + * that don't make sense and no effort has been made to determine which + * interpretation CronExpression chooses. An example would be + * "0 0 14-6 ? * FRI-MON".
  • + *
+ * + * + * @author Sharada Jambula, James House + * @author Contributions from Mads Henderson + * @author Refactoring from CronTrigger to CronExpression by Aaron Craven + */ +public class Cron implements ToXContentFragment { + protected static final TimeZone UTC = DateTimeZone.UTC.toTimeZone(); + protected static final DateTimeFormatter formatter = DateTimeFormat.forPattern("YYYY-MM-dd'T'HH:mm:ss"); + + private static final int SECOND = 0; + private static final int MINUTE = 1; + private static final int HOUR = 2; + private static final int DAY_OF_MONTH = 3; + private static final int MONTH = 4; + private static final int DAY_OF_WEEK = 5; + private static final int YEAR = 6; + private static final int ALL_SPEC_INT = 99; // '*' + private static final int NO_SPEC_INT = 98; // '?' + private static final Integer ALL_SPEC = ALL_SPEC_INT; + private static final Integer NO_SPEC = NO_SPEC_INT; + + private static final Map monthMap = new HashMap<>(20); + private static final Map dayMap = new HashMap<>(60); + static { + monthMap.put("JAN", 0); + monthMap.put("FEB", 1); + monthMap.put("MAR", 2); + monthMap.put("APR", 3); + monthMap.put("MAY", 4); + monthMap.put("JUN", 5); + monthMap.put("JUL", 6); + monthMap.put("AUG", 7); + monthMap.put("SEP", 8); + monthMap.put("OCT", 9); + monthMap.put("NOV", 10); + monthMap.put("DEC", 11); + + dayMap.put("SUN", 1); + dayMap.put("MON", 2); + dayMap.put("TUE", 3); + dayMap.put("WED", 4); + dayMap.put("THU", 5); + dayMap.put("FRI", 6); + dayMap.put("SAT", 7); + } + + private final String expression; + + private transient TreeSet seconds; + private transient TreeSet minutes; + private transient TreeSet hours; + private transient TreeSet daysOfMonth; + private transient TreeSet months; + private transient TreeSet daysOfWeek; + private transient TreeSet years; + + private transient boolean lastdayOfWeek = false; + private transient int nthdayOfWeek = 0; + private transient boolean lastdayOfMonth = false; + private transient boolean nearestWeekday = false; + private transient int lastdayOffset = 0; + private transient boolean expressionParsed = false; + + public static final int MAX_YEAR = Calendar.getInstance(UTC, Locale.ROOT).get(Calendar.YEAR) + 100; + + /** + * Constructs a new CronExpression based on the specified + * parameter. + * + * @param expression String representation of the cron expression the + * new object should represent + * @throws IllegalArgumentException + * if the string expression cannot be parsed into a valid + * CronExpression + */ + public Cron(String expression) { + assert expression != null : "cron expression cannot be null"; + this.expression = expression.toUpperCase(Locale.ROOT); + try { + buildExpression(this.expression); + } catch (Exception e) { + throw illegalArgument("invalid cron expression [{}]", e, expression); + } + } + + /** + * Constructs a new {@code CronExpression} as a copy of an existing + * instance. + * + * @param cron The existing cron expression to be copied + */ + public Cron(Cron cron) { + this(cron.expression); + } + + /** + * Returns the next date/time after the given date/time which + * satisfies the cron expression. + * + * @param time the time since the epoch, or -1 if next time is unsupported (e.g. the cron expression points to + * a time that is previous to the given time) + * @return the next valid time (since the epoch) + */ + public long getNextValidTimeAfter(final long time) { + + // Computation is based on Gregorian year only. + Calendar cl = new java.util.GregorianCalendar(UTC, Locale.ROOT); + + // move ahead one second, since we're computing the time *after* the + // given time + final long afterTime = time + 1000; + // CronTrigger does not deal with milliseconds + cl.setTimeInMillis(afterTime); + cl.set(Calendar.MILLISECOND, 0); + + boolean gotOne = false; + // loop until we've computed the next time, or we've past the endTime + while (!gotOne) { + + if(cl.get(Calendar.YEAR) > 2999) { // prevent endless loop... + return -1; + } + + SortedSet st = null; + int t = 0; + + int sec = cl.get(Calendar.SECOND); + int min = cl.get(Calendar.MINUTE); + + // get second................................................. + st = seconds.tailSet(sec); + if (st != null && st.size() != 0) { + sec = st.first(); + } else { + sec = seconds.first(); + min++; + cl.set(Calendar.MINUTE, min); + } + cl.set(Calendar.SECOND, sec); + + min = cl.get(Calendar.MINUTE); + int hr = cl.get(Calendar.HOUR_OF_DAY); + t = -1; + + // get minute................................................. + st = minutes.tailSet(min); + if (st != null && st.size() != 0) { + t = min; + min = st.first(); + } else { + min = minutes.first(); + hr++; + } + if (min != t) { + cl.set(Calendar.SECOND, 0); + cl.set(Calendar.MINUTE, min); + setCalendarHour(cl, hr); + continue; + } + cl.set(Calendar.MINUTE, min); + + hr = cl.get(Calendar.HOUR_OF_DAY); + int day = cl.get(Calendar.DAY_OF_MONTH); + t = -1; + + // get hour................................................... + st = hours.tailSet(hr); + if (st != null && st.size() != 0) { + t = hr; + hr = st.first(); + } else { + hr = hours.first(); + day++; + } + if (hr != t) { + cl.set(Calendar.SECOND, 0); + cl.set(Calendar.MINUTE, 0); + cl.set(Calendar.DAY_OF_MONTH, day); + setCalendarHour(cl, hr); + continue; + } + cl.set(Calendar.HOUR_OF_DAY, hr); + + day = cl.get(Calendar.DAY_OF_MONTH); + int mon = cl.get(Calendar.MONTH) + 1; + // '+ 1' because calendar is 0-based for this field, and we are + // 1-based + t = -1; + int tmon = mon; + + // get day................................................... + boolean dayOfMSpec = !daysOfMonth.contains(NO_SPEC); + boolean dayOfWSpec = !daysOfWeek.contains(NO_SPEC); + if (dayOfMSpec && !dayOfWSpec) { // get day by day of month rule + st = daysOfMonth.tailSet(day); + if (lastdayOfMonth) { + if(!nearestWeekday) { + t = day; + day = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); + day -= lastdayOffset; + if(t > day) { + mon++; + if(mon > 12) { + mon = 1; + tmon = 3333; // ensure test of mon != tmon further below fails + cl.add(Calendar.YEAR, 1); + } + day = 1; + } + } else { + t = day; + day = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); + day -= lastdayOffset; + + Calendar tcal = Calendar.getInstance(UTC, Locale.ROOT); + tcal.set(Calendar.SECOND, 0); + tcal.set(Calendar.MINUTE, 0); + tcal.set(Calendar.HOUR_OF_DAY, 0); + tcal.set(Calendar.DAY_OF_MONTH, day); + tcal.set(Calendar.MONTH, mon - 1); + tcal.set(Calendar.YEAR, cl.get(Calendar.YEAR)); + + int ldom = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); + int dow = tcal.get(Calendar.DAY_OF_WEEK); + + if(dow == Calendar.SATURDAY && day == 1) { + day += 2; + } else if(dow == Calendar.SATURDAY) { + day -= 1; + } else if(dow == Calendar.SUNDAY && day == ldom) { + day -= 2; + } else if(dow == Calendar.SUNDAY) { + day += 1; + } + + tcal.set(Calendar.SECOND, sec); + tcal.set(Calendar.MINUTE, min); + tcal.set(Calendar.HOUR_OF_DAY, hr); + tcal.set(Calendar.DAY_OF_MONTH, day); + tcal.set(Calendar.MONTH, mon - 1); + long nTime = tcal.getTimeInMillis(); + if(nTime < afterTime) { + day = 1; + mon++; + } + } + } else if(nearestWeekday) { + t = day; + day = daysOfMonth.first(); + + Calendar tcal = Calendar.getInstance(UTC, Locale.ROOT); + tcal.set(Calendar.SECOND, 0); + tcal.set(Calendar.MINUTE, 0); + tcal.set(Calendar.HOUR_OF_DAY, 0); + tcal.set(Calendar.DAY_OF_MONTH, day); + tcal.set(Calendar.MONTH, mon - 1); + tcal.set(Calendar.YEAR, cl.get(Calendar.YEAR)); + + int ldom = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); + int dow = tcal.get(Calendar.DAY_OF_WEEK); + + if(dow == Calendar.SATURDAY && day == 1) { + day += 2; + } else if(dow == Calendar.SATURDAY) { + day -= 1; + } else if(dow == Calendar.SUNDAY && day == ldom) { + day -= 2; + } else if(dow == Calendar.SUNDAY) { + day += 1; + } + + + tcal.set(Calendar.SECOND, sec); + tcal.set(Calendar.MINUTE, min); + tcal.set(Calendar.HOUR_OF_DAY, hr); + tcal.set(Calendar.DAY_OF_MONTH, day); + tcal.set(Calendar.MONTH, mon - 1); + long nTime = tcal.getTimeInMillis(); + if(nTime < afterTime) { + day = daysOfMonth.first(); + mon++; + } + } else if (st != null && st.size() != 0) { + t = day; + day = st.first(); + // make sure we don't over-run a short month, such as february + int lastDay = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); + if (day > lastDay) { + day = daysOfMonth.first(); + mon++; + } + } else { + day = daysOfMonth.first(); + mon++; + } + + if (day != t || mon != tmon) { + cl.set(Calendar.SECOND, 0); + cl.set(Calendar.MINUTE, 0); + cl.set(Calendar.HOUR_OF_DAY, 0); + cl.set(Calendar.DAY_OF_MONTH, day); + cl.set(Calendar.MONTH, mon - 1); + // '- 1' because calendar is 0-based for this field, and we + // are 1-based + continue; + } + } else if (dayOfWSpec && !dayOfMSpec) { // get day by day of week rule + if (lastdayOfWeek) { // are we looking for the last XXX day of + // the month? + int dow = daysOfWeek.first(); // desired + // d-o-w + int cDow = cl.get(Calendar.DAY_OF_WEEK); // current d-o-w + int daysToAdd = 0; + if (cDow < dow) { + daysToAdd = dow - cDow; + } + if (cDow > dow) { + daysToAdd = dow + (7 - cDow); + } + + int lDay = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); + + if (day + daysToAdd > lDay) { // did we already miss the + // last one? + cl.set(Calendar.SECOND, 0); + cl.set(Calendar.MINUTE, 0); + cl.set(Calendar.HOUR_OF_DAY, 0); + cl.set(Calendar.DAY_OF_MONTH, 1); + cl.set(Calendar.MONTH, mon); + // no '- 1' here because we are promoting the month + continue; + } + + // find date of last occurrence of this day in this month... + while ((day + daysToAdd + 7) <= lDay) { + daysToAdd += 7; + } + + day += daysToAdd; + + if (daysToAdd > 0) { + cl.set(Calendar.SECOND, 0); + cl.set(Calendar.MINUTE, 0); + cl.set(Calendar.HOUR_OF_DAY, 0); + cl.set(Calendar.DAY_OF_MONTH, day); + cl.set(Calendar.MONTH, mon - 1); + // '- 1' here because we are not promoting the month + continue; + } + + } else if (nthdayOfWeek != 0) { + // are we looking for the Nth XXX day in the month? + int dow = daysOfWeek.first(); // desired + // d-o-w + int cDow = cl.get(Calendar.DAY_OF_WEEK); // current d-o-w + int daysToAdd = 0; + if (cDow < dow) { + daysToAdd = dow - cDow; + } else if (cDow > dow) { + daysToAdd = dow + (7 - cDow); + } + + boolean dayShifted = false; + if (daysToAdd > 0) { + dayShifted = true; + } + + day += daysToAdd; + int weekOfMonth = day / 7; + if (day % 7 > 0) { + weekOfMonth++; + } + + daysToAdd = (nthdayOfWeek - weekOfMonth) * 7; + day += daysToAdd; + if (daysToAdd < 0 + || day > getLastDayOfMonth(mon, cl + .get(Calendar.YEAR))) { + cl.set(Calendar.SECOND, 0); + cl.set(Calendar.MINUTE, 0); + cl.set(Calendar.HOUR_OF_DAY, 0); + cl.set(Calendar.DAY_OF_MONTH, 1); + cl.set(Calendar.MONTH, mon); + // no '- 1' here because we are promoting the month + continue; + } else if (daysToAdd > 0 || dayShifted) { + cl.set(Calendar.SECOND, 0); + cl.set(Calendar.MINUTE, 0); + cl.set(Calendar.HOUR_OF_DAY, 0); + cl.set(Calendar.DAY_OF_MONTH, day); + cl.set(Calendar.MONTH, mon - 1); + // '- 1' here because we are NOT promoting the month + continue; + } + } else { + int cDow = cl.get(Calendar.DAY_OF_WEEK); // current d-o-w + int dow = daysOfWeek.first(); // desired + // d-o-w + st = daysOfWeek.tailSet(cDow); + if (st != null && st.size() > 0) { + dow = st.first(); + } + + int daysToAdd = 0; + if (cDow < dow) { + daysToAdd = dow - cDow; + } + if (cDow > dow) { + daysToAdd = dow + (7 - cDow); + } + + int lDay = getLastDayOfMonth(mon, cl.get(Calendar.YEAR)); + + if (day + daysToAdd > lDay) { // will we pass the end of + // the month? + cl.set(Calendar.SECOND, 0); + cl.set(Calendar.MINUTE, 0); + cl.set(Calendar.HOUR_OF_DAY, 0); + cl.set(Calendar.DAY_OF_MONTH, 1); + cl.set(Calendar.MONTH, mon); + // no '- 1' here because we are promoting the month + continue; + } else if (daysToAdd > 0) { // are we swithing days? + cl.set(Calendar.SECOND, 0); + cl.set(Calendar.MINUTE, 0); + cl.set(Calendar.HOUR_OF_DAY, 0); + cl.set(Calendar.DAY_OF_MONTH, day + daysToAdd); + cl.set(Calendar.MONTH, mon - 1); + // '- 1' because calendar is 0-based for this field, + // and we are 1-based + continue; + } + } + } else { // dayOfWSpec && !dayOfMSpec + return -1; +// throw new UnsupportedOperationException( +// "Support for specifying both a day-of-week AND a day-of-month parameter is not implemented."); + } + cl.set(Calendar.DAY_OF_MONTH, day); + + mon = cl.get(Calendar.MONTH) + 1; + // '+ 1' because calendar is 0-based for this field, and we are + // 1-based + int year = cl.get(Calendar.YEAR); + t = -1; + + // test for expressions that never generate a valid fire date, + // but keep looping... + if (year > MAX_YEAR) { + return -1; +// throw new ElasticsearchIllegalArgumentException("given time is not supported by cron [" + formatter.print(time) + "]"); + } + + // get month................................................... + st = months.tailSet(mon); + if (st != null && st.size() != 0) { + t = mon; + mon = st.first(); + } else { + mon = months.first(); + year++; + } + if (mon != t) { + cl.set(Calendar.SECOND, 0); + cl.set(Calendar.MINUTE, 0); + cl.set(Calendar.HOUR_OF_DAY, 0); + cl.set(Calendar.DAY_OF_MONTH, 1); + cl.set(Calendar.MONTH, mon - 1); + // '- 1' because calendar is 0-based for this field, and we are + // 1-based + cl.set(Calendar.YEAR, year); + continue; + } + cl.set(Calendar.MONTH, mon - 1); + // '- 1' because calendar is 0-based for this field, and we are + // 1-based + + year = cl.get(Calendar.YEAR); + t = -1; + + // get year................................................... + st = years.tailSet(year); + if (st != null && st.size() != 0) { + t = year; + year = st.first(); + } else { + return -1; +// throw new ElasticsearchIllegalArgumentException("given time is not supported by cron [" + formatter.print(time) + "]"); + } + + if (year != t) { + cl.set(Calendar.SECOND, 0); + cl.set(Calendar.MINUTE, 0); + cl.set(Calendar.HOUR_OF_DAY, 0); + cl.set(Calendar.DAY_OF_MONTH, 1); + cl.set(Calendar.MONTH, 0); + // '- 1' because calendar is 0-based for this field, and we are + // 1-based + cl.set(Calendar.YEAR, year); + continue; + } + cl.set(Calendar.YEAR, year); + + gotOne = true; + } // while( !done ) + + return cl.getTimeInMillis(); + } + + public String expression() { + return expression; + } + + public String getExpressionSummary() { + StringBuilder buf = new StringBuilder(); + + buf.append("seconds: "); + buf.append(expressionSetSummary(seconds)); + buf.append("\n"); + buf.append("minutes: "); + buf.append(expressionSetSummary(minutes)); + buf.append("\n"); + buf.append("hours: "); + buf.append(expressionSetSummary(hours)); + buf.append("\n"); + buf.append("daysOfMonth: "); + buf.append(expressionSetSummary(daysOfMonth)); + buf.append("\n"); + buf.append("months: "); + buf.append(expressionSetSummary(months)); + buf.append("\n"); + buf.append("daysOfWeek: "); + buf.append(expressionSetSummary(daysOfWeek)); + buf.append("\n"); + buf.append("lastdayOfWeek: "); + buf.append(lastdayOfWeek); + buf.append("\n"); + buf.append("nearestWeekday: "); + buf.append(nearestWeekday); + buf.append("\n"); + buf.append("NthDayOfWeek: "); + buf.append(nthdayOfWeek); + buf.append("\n"); + buf.append("lastdayOfMonth: "); + buf.append(lastdayOfMonth); + buf.append("\n"); + buf.append("years: "); + buf.append(expressionSetSummary(years)); + buf.append("\n"); + + return buf.toString(); + } + + @Override + public int hashCode() { + return Objects.hash(expression); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + final Cron other = (Cron) obj; + return Objects.equals(this.expression, other.expression); + } + + /** + * Returns the string representation of the CronExpression + * + * @return a string representation of the CronExpression + */ + @Override + public String toString() { + return expression; + } + + /** + * Indicates whether the specified cron expression can be parsed into a + * valid cron expression + * + * @param expression the expression to evaluate + * @return a boolean indicating whether the given expression is a valid cron + * expression + */ + public static boolean isValid(String expression) { + try { + validate(expression); + } catch (IllegalArgumentException pe) { + return false; + } + return true; + } + + public static void validate(String expression) throws IllegalArgumentException { + new Cron(expression); + } + + + //////////////////////////////////////////////////////////////////////////// + // + // Expression Parsing Functions + // + //////////////////////////////////////////////////////////////////////////// + + private void buildExpression(String expression) { + expressionParsed = true; + + try { + + if (seconds == null) { + seconds = new TreeSet(); + } + if (minutes == null) { + minutes = new TreeSet(); + } + if (hours == null) { + hours = new TreeSet(); + } + if (daysOfMonth == null) { + daysOfMonth = new TreeSet(); + } + if (months == null) { + months = new TreeSet(); + } + if (daysOfWeek == null) { + daysOfWeek = new TreeSet(); + } + if (years == null) { + years = new TreeSet(); + } + + int exprOn = SECOND; + + StringTokenizer exprsTok = new StringTokenizer(expression, " \t", + false); + + while (exprsTok.hasMoreTokens() && exprOn <= YEAR) { + String expr = exprsTok.nextToken().trim(); + + // throw an exception if L is used with other days of the month + if(exprOn == DAY_OF_MONTH && expr.indexOf('L') != -1 && expr.length() > 1 && expr.contains(",")) { + throw illegalArgument("support for specifying 'L' and 'LW' with other days of the month is not implemented"); + } + // throw an exception if L is used with other days of the week + if(exprOn == DAY_OF_WEEK && expr.indexOf('L') != -1 && expr.length() > 1 && expr.contains(",")) { + throw illegalArgument("support for specifying 'L' with other days of the week is not implemented"); + } + if(exprOn == DAY_OF_WEEK && expr.indexOf('#') != -1 && expr.indexOf('#', expr.indexOf('#') +1) != -1) { + throw illegalArgument("support for specifying multiple \"nth\" days is not implemented."); + } + + StringTokenizer vTok = new StringTokenizer(expr, ","); + while (vTok.hasMoreTokens()) { + String v = vTok.nextToken(); + storeExpressionVals(0, v, exprOn); + } + + exprOn++; + } + + if (exprOn <= DAY_OF_WEEK) { + throw illegalArgument("unexpected end of expression at pos [{}].", expression.length()); + } + + if (exprOn <= YEAR) { + storeExpressionVals(0, "*", YEAR); + } + + TreeSet dow = getSet(DAY_OF_WEEK); + TreeSet dom = getSet(DAY_OF_MONTH); + + // Copying the logic from the UnsupportedOperationException below + boolean dayOfMSpec = !dom.contains(NO_SPEC); + boolean dayOfWSpec = !dow.contains(NO_SPEC); + + if (!dayOfMSpec || dayOfWSpec) { + if (!dayOfWSpec || dayOfMSpec) { + throw illegalArgument("support for specifying both a day-of-week AND a day-of-month parameter is not implemented."); + } + } + } catch (Exception e) { + throw illegalArgument("illegal cron expression format [{}]", e.toString()); + } + } + + private int storeExpressionVals(int pos, String s, int type) throws ElasticsearchParseException { + + int incr = 0; + int i = skipWhiteSpace(pos, s); + if (i >= s.length()) { + return i; + } + char c = s.charAt(i); + if ((c >= 'A') && (c <= 'Z') && (!s.equals("L")) && (!s.equals("LW")) && (!s.matches("^L-[0-9]*[W]?"))) { + String sub = s.substring(i, i + 3); + int sval = -1; + int eval = -1; + if (type == MONTH) { + sval = getMonthNumber(sub) + 1; + if (sval <= 0) { + throw illegalArgument("invalid Month value [{}] at pos [{}]", sub, i); + } + if (s.length() > i + 3) { + c = s.charAt(i + 3); + if (c == '-') { + i += 4; + sub = s.substring(i, i + 3); + eval = getMonthNumber(sub) + 1; + if (eval <= 0) { + throw illegalArgument("invalid Month value [{}] at pos [{}]", sub, i); + } + } + } + } else if (type == DAY_OF_WEEK) { + sval = getDayOfWeekNumber(sub); + if (sval < 0) { + throw illegalArgument("invalid Day-of-Week value [{}] at pos [{}]", sub, i); + } + if (s.length() > i + 3) { + c = s.charAt(i + 3); + if (c == '-') { + i += 4; + sub = s.substring(i, i + 3); + eval = getDayOfWeekNumber(sub); + if (eval < 0) { + throw illegalArgument("invalid Day-of-Week value [{}] at pos [{}]", sub, i); + } + } else if (c == '#') { + try { + i += 4; + nthdayOfWeek = Integer.parseInt(s.substring(i)); + if (nthdayOfWeek < 1 || nthdayOfWeek > 5) { + throw new Exception(); + } + } catch (Exception e) { + throw illegalArgument("a numeric value between 1 and 5 must follow the '#' option at pos [{}]", i); + } + } else if (c == 'L') { + lastdayOfWeek = true; + i++; + } + } + + } else { + throw illegalArgument("illegal characters [{}] at pos [{}] '", sub, i); + } + if (eval != -1) { + incr = 1; + } + addToSet(sval, eval, incr, type); + return (i + 3); + } + + if (c == '?') { + i++; + if ((i + 1) < s.length() + && (s.charAt(i) != ' ' && s.charAt(i + 1) != '\t')) { + throw illegalArgument("illegal character [{}] after '?' at pos [{}]", s.charAt(i), i); + } + if (type != DAY_OF_WEEK && type != DAY_OF_MONTH) { + throw illegalArgument("'?' can only be specified for Day-of-Month or Day-of-Week. at pos [{}]", i); + } + if (type == DAY_OF_WEEK && !lastdayOfMonth) { + int val = daysOfMonth.last(); + if (val == NO_SPEC_INT) { + throw illegalArgument("'?' can only be specified for Day-of-Month -OR- Day-of-Week. at pos [{}]", i); + } + } + + addToSet(NO_SPEC_INT, -1, 0, type); + return i; + } + + if (c == '*' || c == '/') { + if (c == '*' && (i + 1) >= s.length()) { + addToSet(ALL_SPEC_INT, -1, incr, type); + return i + 1; + } else if (c == '/' + && ((i + 1) >= s.length() || s.charAt(i + 1) == ' ' || s + .charAt(i + 1) == '\t')) { + throw illegalArgument("'/' must be followed by an integer. at pos [{}]", i); + } else if (c == '*') { + i++; + } + c = s.charAt(i); + if (c == '/') { // is an increment specified? + i++; + if (i >= s.length()) { + throw illegalArgument("Unexpected end of string. at pos [{}]", i); + } + + incr = getNumericValue(s, i); + + i++; + if (incr > 10) { + i++; + } + if (incr > 59 && (type == SECOND || type == MINUTE)) { + throw illegalArgument("increment [{}] > 60 at pos [{}]", incr, i); + } else if (incr > 23 && (type == HOUR)) { + throw illegalArgument("increment [{}] > 24 at pos [{}]", incr, i); + } else if (incr > 31 && (type == DAY_OF_MONTH)) { + throw illegalArgument("increment [{}] > 31 at pos [{}] ", incr, i); + } else if (incr > 7 && (type == DAY_OF_WEEK)) { + throw illegalArgument("increment [{}] > 7 at pos [{}] ", incr, i); + } else if (incr > 12 && (type == MONTH)) { + throw illegalArgument("increment [{}] > 12 at pos [{}]", incr, i); + } + } else { + incr = 1; + } + + addToSet(ALL_SPEC_INT, -1, incr, type); + return i; + } else if (c == 'L') { + i++; + if (type == DAY_OF_MONTH) { + lastdayOfMonth = true; + } + if (type == DAY_OF_WEEK) { + addToSet(7, 7, 0, type); + } + if(type == DAY_OF_MONTH && s.length() > i) { + c = s.charAt(i); + if(c == '-') { + ValueSet vs = getValue(0, s, i+1); + lastdayOffset = vs.value; + if(lastdayOffset > 30) + throw illegalArgument("offset from last day must be <= 30 at pos [{}]", i + 1); + i = vs.pos; + } + if(s.length() > i) { + c = s.charAt(i); + if(c == 'W') { + nearestWeekday = true; + i++; + } + } + } + return i; + } else if (c >= '0' && c <= '9') { + int val = Integer.parseInt(String.valueOf(c)); + i++; + if (i >= s.length()) { + addToSet(val, -1, -1, type); + } else { + c = s.charAt(i); + if (c >= '0' && c <= '9') { + ValueSet vs = getValue(val, s, i); + val = vs.value; + i = vs.pos; + } + i = checkNext(i, s, val, type); + return i; + } + } else { + throw illegalArgument("Unexpected character [{}] at pos [{}] ", c, i); + } + + return i; + } + + private int checkNext(int pos, String s, int val, int type) throws ElasticsearchParseException { + + int end = -1; + int i = pos; + + if (i >= s.length()) { + addToSet(val, end, -1, type); + return i; + } + + char c = s.charAt(pos); + + if (c == 'L') { + if (type == DAY_OF_WEEK) { + if(val < 1 || val > 7) + throw illegalArgument("Day-of-Week values must be between 1 and 7"); + lastdayOfWeek = true; + } else { + throw illegalArgument("'L' option is not valid here. at pos [{}]", i); + } + TreeSet set = getSet(type); + set.add(val); + i++; + return i; + } + + if (c == 'W') { + if (type == DAY_OF_MONTH) { + nearestWeekday = true; + } else { + throw illegalArgument("'W' option is not valid here. at pos [{}]", i); + } + if(val > 31) + throw illegalArgument("the 'W' option does not make sense with values larger than 31 (max number of days in a month) at " + + "pos [{}]", i); + TreeSet set = getSet(type); + set.add(val); + i++; + return i; + } + + if (c == '#') { + if (type != DAY_OF_WEEK) { + throw illegalArgument("'#' option is not valid here. at pos [{}]", i); + } + i++; + try { + nthdayOfWeek = Integer.parseInt(s.substring(i)); + if (nthdayOfWeek < 1 || nthdayOfWeek > 5) { + throw new Exception(); + } + } catch (Exception e) { + throw illegalArgument("a numeric value between 1 and 5 must follow the '#' option at pos [{}]", i); + } + + TreeSet set = getSet(type); + set.add(val); + i++; + return i; + } + + if (c == '-') { + i++; + c = s.charAt(i); + int v = Integer.parseInt(String.valueOf(c)); + end = v; + i++; + if (i >= s.length()) { + addToSet(val, end, 1, type); + return i; + } + c = s.charAt(i); + if (c >= '0' && c <= '9') { + ValueSet vs = getValue(v, s, i); + end = vs.value; + i = vs.pos; + } + if (i < s.length() && ((c = s.charAt(i)) == '/')) { + i++; + c = s.charAt(i); + int v2 = Integer.parseInt(String.valueOf(c)); + i++; + if (i >= s.length()) { + addToSet(val, end, v2, type); + return i; + } + c = s.charAt(i); + if (c >= '0' && c <= '9') { + ValueSet vs = getValue(v2, s, i); + int v3 = vs.value; + addToSet(val, end, v3, type); + i = vs.pos; + return i; + } else { + addToSet(val, end, v2, type); + return i; + } + } else { + addToSet(val, end, 1, type); + return i; + } + } + + if (c == '/') { + i++; + c = s.charAt(i); + int v2 = Integer.parseInt(String.valueOf(c)); + i++; + if (i >= s.length()) { + addToSet(val, end, v2, type); + return i; + } + c = s.charAt(i); + if (c >= '0' && c <= '9') { + ValueSet vs = getValue(v2, s, i); + int v3 = vs.value; + addToSet(val, end, v3, type); + i = vs.pos; + return i; + } else { + throw illegalArgument("Unexpected character [{}] after '/' at pos [{}]", c, i); + } + } + + addToSet(val, end, 0, type); + i++; + return i; + } + + private static String expressionSetSummary(java.util.Set set) { + + if (set.contains(NO_SPEC)) { + return "?"; + } + if (set.contains(ALL_SPEC)) { + return "*"; + } + + StringBuilder buf = new StringBuilder(); + + Iterator itr = set.iterator(); + boolean first = true; + while (itr.hasNext()) { + Integer iVal = itr.next(); + String val = iVal.toString(); + if (!first) { + buf.append(","); + } + buf.append(val); + first = false; + } + + return buf.toString(); + } + + private static String expressionSetSummary(java.util.ArrayList list) { + + if (list.contains(NO_SPEC)) { + return "?"; + } + if (list.contains(ALL_SPEC)) { + return "*"; + } + + StringBuilder buf = new StringBuilder(); + + Iterator itr = list.iterator(); + boolean first = true; + while (itr.hasNext()) { + Integer iVal = itr.next(); + String val = iVal.toString(); + if (!first) { + buf.append(","); + } + buf.append(val); + first = false; + } + + return buf.toString(); + } + + private static int skipWhiteSpace(int i, String s) { + for (; i < s.length() && (s.charAt(i) == ' ' || s.charAt(i) == '\t'); i++) { + ; + } + + return i; + } + + private static int findNextWhiteSpace(int i, String s) { + for (; i < s.length() && (s.charAt(i) != ' ' || s.charAt(i) != '\t'); i++) { + ; + } + + return i; + } + + private void addToSet(int val, int end, int incr, int type) throws ElasticsearchParseException { + + TreeSet set = getSet(type); + + if (type == SECOND || type == MINUTE) { + if ((val < 0 || val > 59 || end > 59) && (val != ALL_SPEC_INT)) { + throw illegalArgument("Minute and Second values must be between 0 and 59"); + } + } else if (type == HOUR) { + if ((val < 0 || val > 23 || end > 23) && (val != ALL_SPEC_INT)) { + throw illegalArgument("Hour values must be between 0 and 23"); + } + } else if (type == DAY_OF_MONTH) { + if ((val < 1 || val > 31 || end > 31) && (val != ALL_SPEC_INT) + && (val != NO_SPEC_INT)) { + throw illegalArgument("Day of month values must be between 1 and 31"); + } + } else if (type == MONTH) { + if ((val < 1 || val > 12 || end > 12) && (val != ALL_SPEC_INT)) { + throw illegalArgument("Month values must be between 1 and 12"); + } + } else if (type == DAY_OF_WEEK) { + if ((val == 0 || val > 7 || end > 7) && (val != ALL_SPEC_INT) + && (val != NO_SPEC_INT)) { + throw illegalArgument("Day-of-Week values must be between 1 and 7"); + } + } + + if ((incr == 0 || incr == -1) && val != ALL_SPEC_INT) { + if (val != -1) { + set.add(val); + } else { + set.add(NO_SPEC); + } + + return; + } + + int startAt = val; + int stopAt = end; + + if (val == ALL_SPEC_INT && incr <= 0) { + incr = 1; + set.add(ALL_SPEC); // put in a marker, but also fill values + } + + if (type == SECOND || type == MINUTE) { + if (stopAt == -1) { + stopAt = 59; + } + if (startAt == -1 || startAt == ALL_SPEC_INT) { + startAt = 0; + } + } else if (type == HOUR) { + if (stopAt == -1) { + stopAt = 23; + } + if (startAt == -1 || startAt == ALL_SPEC_INT) { + startAt = 0; + } + } else if (type == DAY_OF_MONTH) { + if (stopAt == -1) { + stopAt = 31; + } + if (startAt == -1 || startAt == ALL_SPEC_INT) { + startAt = 1; + } + } else if (type == MONTH) { + if (stopAt == -1) { + stopAt = 12; + } + if (startAt == -1 || startAt == ALL_SPEC_INT) { + startAt = 1; + } + } else if (type == DAY_OF_WEEK) { + if (stopAt == -1) { + stopAt = 7; + } + if (startAt == -1 || startAt == ALL_SPEC_INT) { + startAt = 1; + } + } else if (type == YEAR) { + if (stopAt == -1) { + stopAt = MAX_YEAR; + } + if (startAt == -1 || startAt == ALL_SPEC_INT) { + startAt = 1970; + } + } + + // if the end of the range is before the start, then we need to overflow into + // the next day, month etc. This is done by adding the maximum amount for that + // type, and using modulus max to determine the value being added. + int max = -1; + if (stopAt < startAt) { + switch (type) { + case SECOND : max = 60; break; + case MINUTE : max = 60; break; + case HOUR : max = 24; break; + case MONTH : max = 12; break; + case DAY_OF_WEEK : max = 7; break; + case DAY_OF_MONTH : max = 31; break; + case YEAR : throw new IllegalArgumentException("Start year must be less than stop year"); + default : throw new IllegalArgumentException("Unexpected type encountered"); + } + stopAt += max; + } + + for (int i = startAt; i <= stopAt; i += incr) { + if (max == -1) { + // ie: there's no max to overflow over + set.add(i); + } else { + // take the modulus to get the real value + int i2 = i % max; + + // 1-indexed ranges should not include 0, and should include their max + if (i2 == 0 && (type == MONTH || type == DAY_OF_WEEK || type == DAY_OF_MONTH) ) { + i2 = max; + } + + set.add(i2); + } + } + } + + private TreeSet getSet(int type) { + switch (type) { + case SECOND: + return seconds; + case MINUTE: + return minutes; + case HOUR: + return hours; + case DAY_OF_MONTH: + return daysOfMonth; + case MONTH: + return months; + case DAY_OF_WEEK: + return daysOfWeek; + case YEAR: + return years; + default: + return null; + } + } + + private ValueSet getValue(int v, String s, int i) { + char c = s.charAt(i); + StringBuilder s1 = new StringBuilder(String.valueOf(v)); + while (c >= '0' && c <= '9') { + s1.append(c); + i++; + if (i >= s.length()) { + break; + } + c = s.charAt(i); + } + ValueSet val = new ValueSet(); + + val.pos = (i < s.length()) ? i : i + 1; + val.value = Integer.parseInt(s1.toString()); + return val; + } + + private int getNumericValue(String s, int i) { + int endOfVal = findNextWhiteSpace(i, s); + String val = s.substring(i, endOfVal); + return Integer.parseInt(val); + } + + private int getMonthNumber(String s) { + Integer integer = monthMap.get(s); + + if (integer == null) { + return -1; + } + + return integer; + } + + private int getDayOfWeekNumber(String s) { + Integer integer = dayMap.get(s); + + if (integer == null) { + return -1; + } + + return integer; + } + + /** + * Advance the calendar to the particular hour paying particular attention + * to daylight saving problems. + * + * @param cal the calendar to operate on + * @param hour the hour to set + */ + private static void setCalendarHour(Calendar cal, int hour) { + cal.set(java.util.Calendar.HOUR_OF_DAY, hour); + if (cal.get(java.util.Calendar.HOUR_OF_DAY) != hour && hour != 24) { + cal.set(java.util.Calendar.HOUR_OF_DAY, hour + 1); + } + } + + private static boolean isLeapYear(int year) { + return ((year % 4 == 0 && year % 100 != 0) || (year % 400 == 0)); + } + + private int getLastDayOfMonth(int monthNum, int year) { + + switch (monthNum) { + case 1: + return 31; + case 2: + return (isLeapYear(year)) ? 29 : 28; + case 3: + return 31; + case 4: + return 30; + case 5: + return 31; + case 6: + return 30; + case 7: + return 31; + case 8: + return 31; + case 9: + return 30; + case 10: + return 31; + case 11: + return 30; + case 12: + return 31; + default: + throw new IllegalArgumentException("Illegal month number: " + + monthNum); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } + + private static class ValueSet { + int value; + int pos; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java new file mode 100644 index 0000000000000..11107ef86f486 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/SchedulerEngine.java @@ -0,0 +1,189 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.scheduler; + +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.FutureUtils; + +import java.time.Clock; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +public class SchedulerEngine { + + public static class Job { + private final String id; + private final Schedule schedule; + + public Job(String id, Schedule schedule) { + this.id = id; + this.schedule = schedule; + } + + public String getId() { + return id; + } + + public Schedule getSchedule() { + return schedule; + } + } + + public static class Event { + private final String jobName; + private final long triggeredTime; + private final long scheduledTime; + + public Event(String jobName, long triggeredTime, long scheduledTime) { + this.jobName = jobName; + this.triggeredTime = triggeredTime; + this.scheduledTime = scheduledTime; + } + + public String getJobName() { + return jobName; + } + + public long getTriggeredTime() { + return triggeredTime; + } + + public long getScheduledTime() { + return scheduledTime; + } + } + + public interface Listener { + void triggered(Event event); + } + + public interface Schedule { + + /** + * Returns the next scheduled time after the given time, according to this schedule. If the given schedule + * cannot resolve the next scheduled time, then {@code -1} is returned. It really depends on the type of + * schedule to determine when {@code -1} is returned. Some schedules (e.g. IntervalSchedule) will never return + * {@code -1} as they can always compute the next scheduled time. {@code Cron} based schedules are good example + * of schedules that may return {@code -1}, for example, when the schedule only points to times that are all + * before the given time (in which case, there is no next scheduled time for the given time). + * + * Example: + * + * cron 0 0 0 * 1 ? 2013 (only points to days in January 2013) + * + * time 2015-01-01 12:00:00 (this time is in 2015) + * + */ + long nextScheduledTimeAfter(long startTime, long now); + } + + private final Map schedules = ConcurrentCollections.newConcurrentMap(); + private final ScheduledExecutorService scheduler; + private final Clock clock; + private final List listeners = new CopyOnWriteArrayList<>(); + + public SchedulerEngine(Clock clock) { + this.clock = clock; + this.scheduler = Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory("trigger_engine_scheduler")); + } + + public void register(Listener listener) { + listeners.add(listener); + } + + public void unregister(Listener listener) { + listeners.remove(listener); + } + + public void start(Collection jobs) { + jobs.forEach(this::add); + } + + public void stop() { + scheduler.shutdownNow(); + try { + scheduler.awaitTermination(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + public void add(Job job) { + ActiveSchedule schedule = new ActiveSchedule(job.getId(), job.getSchedule(), clock.millis()); + schedules.compute(schedule.name, (name, previousSchedule) -> { + if (previousSchedule != null) { + previousSchedule.cancel(); + } + return schedule; + }); + } + + public boolean remove(String jobId) { + ActiveSchedule removedSchedule = schedules.remove(jobId); + if (removedSchedule != null) { + removedSchedule.cancel(); + } + return removedSchedule != null; + } + + /** + * @return The number of currently active/triggered jobs + */ + public int jobCount() { + return schedules.size(); + } + + protected void notifyListeners(String name, long triggeredTime, long scheduledTime) { + final Event event = new Event(name, triggeredTime, scheduledTime); + for (Listener listener : listeners) { + listener.triggered(event); + } + } + + class ActiveSchedule implements Runnable { + + private final String name; + private final Schedule schedule; + private final long startTime; + + private volatile ScheduledFuture future; + private volatile long scheduledTime; + + ActiveSchedule(String name, Schedule schedule, long startTime) { + this.name = name; + this.schedule = schedule; + this.startTime = startTime; + this.scheduleNextRun(startTime); + } + + @Override + public void run() { + long triggeredTime = clock.millis(); + notifyListeners(name, triggeredTime, scheduledTime); + scheduleNextRun(triggeredTime); + } + + private void scheduleNextRun(long currentTime) { + this.scheduledTime = schedule.nextScheduledTimeAfter(startTime, currentTime); + if (scheduledTime != -1) { + long delay = Math.max(0, scheduledTime - currentTime); + future = scheduler.schedule(this, delay, TimeUnit.MILLISECONDS); + } + } + + public void cancel() { + FutureUtils.cancel(future); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/ScrollHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/ScrollHelper.java new file mode 100644 index 0000000000000..a481f8803111a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/ScrollHelper.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.search.SearchHit; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.Function; + +public final class ScrollHelper { + + private ScrollHelper() {} + + /** + * This method fetches all results for the given search request, parses them using the given hit parser and calls the + * listener once done. + */ + public static void fetchAllByEntity(Client client, SearchRequest request, final ActionListener> listener, + Function hitParser) { + final List results = new ArrayList<>(); + if (request.scroll() == null) { // we do scroll by default lets see if we can get rid of this at some point. + request.scroll(TimeValue.timeValueSeconds(10L)); + } + final Consumer clearScroll = (response) -> { + if (response != null && response.getScrollId() != null) { + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.addScrollId(response.getScrollId()); + client.clearScroll(clearScrollRequest, ActionListener.wrap((r) -> {}, (e) -> {})); + } + }; + // This function is MADNESS! But it works, don't think about it too hard... + // simon edit: just watch this if you got this far https://www.youtube.com/watch?v=W-lF106Dgk8 + client.search(request, new ContextPreservingActionListener<>(client.threadPool().getThreadContext().newRestorableContext(true), + new ActionListener() { + private volatile SearchResponse lastResponse = null; + + @Override + public void onResponse(SearchResponse resp) { + try { + lastResponse = resp; + if (resp.getHits().getHits().length > 0) { + for (SearchHit hit : resp.getHits().getHits()) { + final T oneResult = hitParser.apply(hit); + if (oneResult != null) { + results.add(oneResult); + } + } + + if (results.size() > resp.getHits().getTotalHits()) { + clearScroll.accept(lastResponse); + listener.onFailure(new IllegalStateException("scrolling returned more hits [" + results.size() + + "] than expected [" + resp.getHits().getTotalHits() + "] so bailing out to prevent unbounded " + + "memory consumption.")); + } else if (results.size() == resp.getHits().getTotalHits()) { + clearScroll.accept(resp); + // Finally, return the list of the entity + listener.onResponse(Collections.unmodifiableList(results)); + } else { + SearchScrollRequest scrollRequest = new SearchScrollRequest(resp.getScrollId()); + scrollRequest.scroll(request.scroll().keepAlive()); + client.searchScroll(scrollRequest, this); + } + } else { + clearScroll.accept(resp); + // Finally, return the list of the entity + listener.onResponse(Collections.unmodifiableList(results)); + } + } catch (Exception e){ + onFailure(e); // lets clean up things + } + } + + @Override + public void onFailure(Exception t) { + try { + // attempt to clear the scroll request + clearScroll.accept(lastResponse); + } finally { + if (t instanceof IndexNotFoundException) { + // since this is expected to happen at times, we just call the listener with an empty list + listener.onResponse(Collections.emptyList()); + } else { + listener.onFailure(t); + } + } + } + })); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java new file mode 100644 index 0000000000000..99788ac1de43a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityContext.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext; +import org.elasticsearch.node.Node; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Consumer; + +/** + * A lightweight utility that can find the current user and authentication information for the local thread. + */ +public class SecurityContext { + + private final Logger logger; + private final ThreadContext threadContext; + private final UserSettings userSettings; + private final String nodeName; + + /** + * Creates a new security context. + * If cryptoService is null, security is disabled and {@link UserSettings#getUser()} + * and {@link UserSettings#getAuthentication()} will always return null. + */ + public SecurityContext(Settings settings, ThreadContext threadContext) { + this.logger = Loggers.getLogger(getClass(), settings); + this.threadContext = threadContext; + this.userSettings = new UserSettings(settings, threadContext); + this.nodeName = Node.NODE_NAME_SETTING.get(settings); + } + + /** Returns the current user information, or null if the current request has no authentication info. */ + public User getUser() { + Authentication authentication = getAuthentication(); + return authentication == null ? null : authentication.getUser(); + } + + /** Returns the authentication information, or null if the current request has no authentication info. */ + public Authentication getAuthentication() { + try { + return Authentication.readFromContext(threadContext); + } catch (IOException e) { + // TODO: this seems bogus, the only way to get an ioexception here is from a corrupt or tampered + // auth header, which should be be audited? + logger.error("failed to read authentication", e); + return null; + } + } + + /** + * Sets the user forcefully to the provided user. There must not be an existing user in the ThreadContext otherwise an exception + * will be thrown. This method is package private for testing. + */ + public void setUser(User user, Version version) { + Objects.requireNonNull(user); + final Authentication.RealmRef authenticatedBy = new Authentication.RealmRef("__attach", "__attach", nodeName); + final Authentication.RealmRef lookedUpBy; + if (user.isRunAs()) { + lookedUpBy = authenticatedBy; + } else { + lookedUpBy = null; + } + setAuthentication(new Authentication(user, authenticatedBy, lookedUpBy, version)); + } + + /** Writes the authentication to the thread context */ + private void setAuthentication(Authentication authentication) { + try { + authentication.writeToContext(threadContext); + } catch (IOException e) { + throw new AssertionError("how can we have a IOException with a user we set", e); + } + } + + /** + * Runs the consumer in a new context as the provided user. The original context is provided to the consumer. When this method + * returns, the original context is restored. + */ + public void executeAsUser(User user, Consumer consumer, Version version) { + final StoredContext original = threadContext.newStoredContext(true); + try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { + setUser(user, version); + consumer.accept(original); + } + } + + /** + * Runs the consumer in a new context after setting a new version of the authentication that is compatible with the version provided. + * The original context is provided to the consumer. When this method returns, the original context is restored. + */ + public void executeAfterRewritingAuthentication(Consumer consumer, Version version) { + final StoredContext original = threadContext.newStoredContext(true); + final Authentication authentication = Objects.requireNonNull(userSettings.getAuthentication()); + try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { + setAuthentication(new Authentication(authentication.getUser(), authentication.getAuthenticatedBy(), + authentication.getLookedUpBy(), version)); + consumer.accept(original); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityExtension.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityExtension.java new file mode 100644 index 0000000000000..190e9f7520b6c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityExtension.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security; + +import org.apache.lucene.util.SPIClassIterator; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.ServiceConfigurationError; +import java.util.Set; +import java.util.function.BiConsumer; + +/** + * An SPI extension point allowing to plug in custom functionality in x-pack authentication module. + */ +public interface SecurityExtension { + + /** + * Returns authentication realm implementations added by this extension. + * + * The key of the returned {@link Map} is the type name of the realm, and the value + * is a {@link Realm.Factory} which will construct + * that realm for use in authentication when that realm type is configured. + * + * @param resourceWatcherService Use to watch configuration files for changes + */ + default Map getRealms(ResourceWatcherService resourceWatcherService) { + return Collections.emptyMap(); + } + + /** + * Returns the set of {@link Setting settings} that may be configured for the each type of realm. + * + * Each setting key must be unqualified and is in the same format as will be provided via {@link RealmConfig#settings()}. + * If a given realm-type is not present in the returned map, then it will be treated as if it supported all possible settings. + * + * The life-cycle of an extension dictates that this method will be called before {@link #getRealms(ResourceWatcherService)} + */ + default Map>> getRealmSettings() { return Collections.emptyMap(); } + + /** + * Returns a handler for authentication failures, or null to use the default handler. + * + * Only one installed extension may have an authentication failure handler. If more than + * one extension returns a non-null handler, an error is raised. + */ + default AuthenticationFailureHandler getAuthenticationFailureHandler() { + return null; + } + + /** + * Returns an ordered list of role providers that are used to resolve role names + * to {@link RoleDescriptor} objects. Each provider is invoked in order to + * resolve any role names not resolved by the reserved or native roles stores. + * + * Each role provider is represented as a {@link BiConsumer} which takes a set + * of roles to resolve as the first parameter to consume and an {@link ActionListener} + * as the second parameter to consume. The implementation of the role provider + * should be asynchronous if the computation is lengthy or any disk and/or network + * I/O is involved. The implementation is responsible for resolving whatever roles + * it can into a set of {@link RoleDescriptor} instances. If successful, the + * implementation must invoke {@link ActionListener#onResponse(Object)} to pass along + * the resolved set of role descriptors. If a failure was encountered, the + * implementation must invoke {@link ActionListener#onFailure(Exception)}. + * + * By default, an empty list is returned. + * + * @param settings The configured settings for the node + * @param resourceWatcherService Use to watch configuration files for changes + */ + default List, ActionListener>>> + getRolesProviders(Settings settings, ResourceWatcherService resourceWatcherService) { + return Collections.emptyList(); + } + + /** + * Loads the XPackSecurityExtensions from the given class loader + */ + static List loadExtensions(ClassLoader loader) { + SPIClassIterator iterator = SPIClassIterator.get(SecurityExtension.class, loader); + List extensions = new ArrayList<>(); + while (iterator.hasNext()) { + final Class c = iterator.next(); + try { + extensions.add(c.getConstructor().newInstance()); + } catch (Exception e) { + throw new ServiceConfigurationError("failed to load security extension [" + c.getName() + "]", e); + } + } + return extensions; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java new file mode 100644 index 0000000000000..b549cffc0cc33 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage { + + private static final String REALMS_XFIELD = "realms"; + private static final String ROLES_XFIELD = "roles"; + private static final String ROLE_MAPPING_XFIELD = "role_mapping"; + private static final String SSL_XFIELD = "ssl"; + private static final String AUDIT_XFIELD = "audit"; + private static final String IP_FILTER_XFIELD = "ipfilter"; + private static final String ANONYMOUS_XFIELD = "anonymous"; + + private Map realmsUsage; + private Map rolesStoreUsage; + private Map sslUsage; + private Map auditUsage; + private Map ipFilterUsage; + private Map anonymousUsage; + private Map roleMappingStoreUsage; + + public SecurityFeatureSetUsage(StreamInput in) throws IOException { + super(in); + realmsUsage = in.readMap(); + rolesStoreUsage = in.readMap(); + sslUsage = in.readMap(); + auditUsage = in.readMap(); + ipFilterUsage = in.readMap(); + if (in.getVersion().before(Version.V_6_0_0_beta1)) { + // system key has been removed but older send its usage, so read the map and ignore + in.readMap(); + } + anonymousUsage = in.readMap(); + roleMappingStoreUsage = in.readMap(); + } + + public SecurityFeatureSetUsage(boolean available, boolean enabled, Map realmsUsage, + Map rolesStoreUsage, Map roleMappingStoreUsage, + Map sslUsage, Map auditUsage, + Map ipFilterUsage, Map anonymousUsage) { + super(XPackField.SECURITY, available, enabled); + this.realmsUsage = realmsUsage; + this.rolesStoreUsage = rolesStoreUsage; + this.roleMappingStoreUsage = roleMappingStoreUsage; + this.sslUsage = sslUsage; + this.auditUsage = auditUsage; + this.ipFilterUsage = ipFilterUsage; + this.anonymousUsage = anonymousUsage; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(realmsUsage); + out.writeMap(rolesStoreUsage); + out.writeMap(sslUsage); + out.writeMap(auditUsage); + out.writeMap(ipFilterUsage); + if (out.getVersion().before(Version.V_6_0_0_beta1)) { + // system key has been removed but older versions still expected it so send a empty map + out.writeMap(Collections.emptyMap()); + } + out.writeMap(anonymousUsage); + out.writeMap(roleMappingStoreUsage); + } + + @Override + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + super.innerXContent(builder, params); + if (enabled) { + builder.field(REALMS_XFIELD, realmsUsage); + builder.field(ROLES_XFIELD, rolesStoreUsage); + builder.field(ROLE_MAPPING_XFIELD, roleMappingStoreUsage); + builder.field(SSL_XFIELD, sslUsage); + builder.field(AUDIT_XFIELD, auditUsage); + builder.field(IP_FILTER_XFIELD, ipFilterUsage); + builder.field(ANONYMOUS_XFIELD, anonymousUsage); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityField.java new file mode 100644 index 0000000000000..8d813925e33dc --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityField.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.XPackField; + +import java.util.Optional; + +public final class SecurityField { + + public static final String NAME4 = XPackField.SECURITY + "4"; + public static final Setting> USER_SETTING = + new Setting<>(setting("user"), (String) null, Optional::ofNullable, Setting.Property.NodeScope); + + private SecurityField() {} + + public static String setting(String setting) { + assert setting != null && setting.startsWith(".") == false; + return settingPrefix() + setting; + } + + public static String settingPrefix() { + return XPackField.featureSettingPrefix(XPackField.SECURITY) + "."; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityLifecycleServiceField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityLifecycleServiceField.java new file mode 100644 index 0000000000000..8e3cc23d8032f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityLifecycleServiceField.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security; + +public final class SecurityLifecycleServiceField { + public static final String SECURITY_TEMPLATE_NAME = "security-index-template"; + + private SecurityLifecycleServiceField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecuritySettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecuritySettings.java new file mode 100644 index 0000000000000..c48245c054fb8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecuritySettings.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security; + +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; + +import java.util.Optional; + +public final class SecuritySettings { + + public static Settings addTransportSettings(final Settings settings) { + final Settings.Builder builder = Settings.builder(); + if (NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings)) { + final String transportType = NetworkModule.TRANSPORT_TYPE_SETTING.get(settings); + if (SecurityField.NAME4.equals(transportType) == false) { + throw new IllegalArgumentException("transport type setting [" + NetworkModule.TRANSPORT_TYPE_KEY + + "] must be [" + SecurityField.NAME4 + "] but is [" + transportType + "]"); + } + } else { + // default to security4 + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4); + } + return builder.build(); + } + + public static Settings addUserSettings(final Settings settings) { + final Settings.Builder builder = Settings.builder(); + String authHeaderSettingName = ThreadContext.PREFIX + "." + UsernamePasswordToken.BASIC_AUTH_HEADER; + if (settings.get(authHeaderSettingName) == null) { + Optional userOptional = SecurityField.USER_SETTING.get(settings); // TODO migrate to securesetting! + userOptional.ifPresent(userSetting -> { + final int i = userSetting.indexOf(":"); + if (i < 0 || i == userSetting.length() - 1) { + throw new IllegalArgumentException("invalid [" + SecurityField.USER_SETTING.getKey() + + "] setting. must be in the form of \":\""); + } + String username = userSetting.substring(0, i); + String password = userSetting.substring(i + 1); + builder.put(authHeaderSettingName, UsernamePasswordToken.basicAuthHeaderValue(username, new SecureString(password))); + }); + } + return builder.build(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/UserSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/UserSettings.java new file mode 100644 index 0000000000000..7f22f90351ef5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/UserSettings.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; + +public final class UserSettings { + private final Logger logger; + private final ThreadContext threadContext; + + UserSettings(Settings settings, ThreadContext threadContext) { + this.logger = Loggers.getLogger(getClass(), settings); + this.threadContext = threadContext; + } + + + /** + * Returns the current user information, or null if the current request has no authentication info. + */ + public User getUser() { + Authentication authentication = getAuthentication(); + return authentication == null ? null : authentication.getUser(); + } + + /** + * Returns the authentication information, or null if the current request has no authentication info. + */ + public Authentication getAuthentication() { + try { + return Authentication.readFromContext(threadContext); + } catch (IOException e) { + // TODO: this seems bogus, the only way to get an ioexception here is from a corrupt or tampered + // auth header, which should be be audited? + logger.error("failed to read authentication", e); + return null; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheAction.java new file mode 100644 index 0000000000000..83e0334bc0b4a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.realm; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class ClearRealmCacheAction extends Action { + + public static final ClearRealmCacheAction INSTANCE = new ClearRealmCacheAction(); + public static final String NAME = "cluster:admin/xpack/security/realm/cache/clear"; + + protected ClearRealmCacheAction() { + super(NAME); + } + + @Override + public ClearRealmCacheRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new ClearRealmCacheRequestBuilder(client, this); + } + + @Override + public ClearRealmCacheResponse newResponse() { + return new ClearRealmCacheResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequest.java new file mode 100644 index 0000000000000..3d7436b96160e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequest.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.realm; + +import org.elasticsearch.action.support.nodes.BaseNodeRequest; +import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class ClearRealmCacheRequest extends BaseNodesRequest { + + String[] realms; + String[] usernames; + + /** + * @return {@code true} if this request targets realms, {@code false} otherwise. + */ + public boolean allRealms() { + return realms == null || realms.length == 0; + } + + /** + * @return The realms that should be evicted. Empty array indicates all realms. + */ + public String[] realms() { + return realms; + } + + /** + * Sets the realms for which caches will be evicted. When not set all the caches of all realms will be + * evicted. + * + * @param realms The realm names + */ + public ClearRealmCacheRequest realms(String... realms) { + this.realms = realms; + return this; + } + + /** + * @return {@code true} if this request targets users, {@code false} otherwise. + */ + public boolean allUsernames() { + return usernames == null || usernames.length == 0; + } + + /** + * @return The usernames of the users that should be evicted. Empty array indicates all users. + */ + public String[] usernames() { + return usernames; + } + + /** + * Sets the usernames of the users that should be evicted from the caches. When not set, all users + * will be evicted. + * + * @param usernames The usernames + */ + public ClearRealmCacheRequest usernames(String... usernames) { + this.usernames = usernames; + return this; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + realms = in.readStringArray(); + usernames = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArrayNullable(realms); + out.writeStringArrayNullable(usernames); + } + + public static class Node extends BaseNodeRequest { + + private String[] realms; + private String[] usernames; + + public Node() { + } + + public Node(ClearRealmCacheRequest request, String nodeId) { + super(nodeId); + this.realms = request.realms; + this.usernames = request.usernames; + } + public String[] getRealms() { return realms; } + public String[] getUsernames() { return usernames; } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + realms = in.readStringArray(); + usernames = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArrayNullable(realms); + out.writeStringArrayNullable(usernames); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequestBuilder.java new file mode 100644 index 0000000000000..ae97c4037c40b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheRequestBuilder.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.realm; + +import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class ClearRealmCacheRequestBuilder extends NodesOperationRequestBuilder { + + public ClearRealmCacheRequestBuilder(ElasticsearchClient client) { + this(client, ClearRealmCacheAction.INSTANCE); + } + + public ClearRealmCacheRequestBuilder(ElasticsearchClient client, ClearRealmCacheAction action) { + super(client, action, new ClearRealmCacheRequest()); + } + + /** + * Sets the realms for which caches will be evicted. When not set all the caches of all realms will be + * evicted. + * + * @param realms The realm names + */ + public ClearRealmCacheRequestBuilder realms(String... realms) { + request.realms(realms); + return this; + } + + /** + * Sets the usernames of the users that should be evicted from the caches. When not set, all users + * will be evicted. + * + * @param usernames The usernames + */ + public ClearRealmCacheRequestBuilder usernames(String... usernames) { + request.usernames(usernames); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheResponse.java new file mode 100644 index 0000000000000..a5094fb5c3613 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheResponse.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.realm; + +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.nodes.BaseNodeResponse; +import org.elasticsearch.action.support.nodes.BaseNodesResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; + +import java.io.IOException; +import java.util.List; + +public class ClearRealmCacheResponse extends BaseNodesResponse implements ToXContentFragment { + + public ClearRealmCacheResponse() { + } + + public ClearRealmCacheResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + } + + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(Node::readNodeResponse); + } + + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeStreamableList(nodes); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("nodes"); + for (ClearRealmCacheResponse.Node node : getNodes()) { + builder.startObject(node.getNode().getId()); + builder.field("name", node.getNode().getName()); + builder.endObject(); + } + builder.endObject(); + + return builder; + } + + @Override + public String toString() { + try { + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return Strings.toString(builder); + } catch (IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } + + public static class Node extends BaseNodeResponse { + + public Node() { + } + + public Node(DiscoveryNode node) { + super(node); + } + + public static Node readNodeResponse(StreamInput in) throws IOException { + Node node = new Node(); + node.readFrom(in); + return node; + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheAction.java new file mode 100644 index 0000000000000..68a385f2dfa1f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * The action for clearing the cache used by native roles that are stored in an index. + */ +public class ClearRolesCacheAction extends Action { + + public static final ClearRolesCacheAction INSTANCE = new ClearRolesCacheAction(); + public static final String NAME = "cluster:admin/xpack/security/roles/cache/clear"; + + protected ClearRolesCacheAction() { + super(NAME); + } + + @Override + public ClearRolesCacheRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new ClearRolesCacheRequestBuilder(client, this, new ClearRolesCacheRequest()); + } + + @Override + public ClearRolesCacheResponse newResponse() { + return new ClearRolesCacheResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequest.java new file mode 100644 index 0000000000000..bb0bb293a880b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequest.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.support.nodes.BaseNodeRequest; +import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * The request used to clear the cache for native roles stored in an index. + */ +public class ClearRolesCacheRequest extends BaseNodesRequest { + + String[] names; + + /** + * Sets the roles for which caches will be evicted. When not set all the roles will be evicted from the cache. + * + * @param names The role names + */ + public ClearRolesCacheRequest names(String... names) { + this.names = names; + return this; + } + + /** + * @return an array of role names that will have the cache evicted or null if all + */ + public String[] names() { + return names; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + names = in.readOptionalStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalStringArray(names); + } + + public static class Node extends BaseNodeRequest { + private String[] names; + + public Node() { + } + + public Node(ClearRolesCacheRequest request, String nodeId) { + super(nodeId); + this.names = request.names(); + } + + public String[] getNames() { return names; } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + names = in.readOptionalStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalStringArray(names); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequestBuilder.java new file mode 100644 index 0000000000000..3fa4933f0fb16 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheRequestBuilder.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Request builder for the {@link ClearRolesCacheRequest} + */ +public class ClearRolesCacheRequestBuilder extends NodesOperationRequestBuilder { + + public ClearRolesCacheRequestBuilder(ElasticsearchClient client) { + this(client, ClearRolesCacheAction.INSTANCE, new ClearRolesCacheRequest()); + } + + public ClearRolesCacheRequestBuilder(ElasticsearchClient client, ClearRolesCacheAction action, ClearRolesCacheRequest request) { + super(client, action, request); + } + + /** + * Set the roles to be cleared + * + * @param names the names of the roles that should be cleared + * @return the builder instance + */ + public ClearRolesCacheRequestBuilder names(String... names) { + request.names(names); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheResponse.java new file mode 100644 index 0000000000000..7b3a780f2111e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheResponse.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.nodes.BaseNodeResponse; +import org.elasticsearch.action.support.nodes.BaseNodesResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; + +import java.io.IOException; +import java.util.List; + +/** + * The response object that will be returned when clearing the cache of native roles + */ +public class ClearRolesCacheResponse extends BaseNodesResponse implements ToXContentFragment { + + public ClearRolesCacheResponse() { + } + + public ClearRolesCacheResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + } + + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(Node::readNodeResponse); + } + + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeStreamableList(nodes); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("nodes"); + for (ClearRolesCacheResponse.Node node: getNodes()) { + builder.startObject(node.getNode().getId()); + builder.field("name", node.getNode().getName()); + builder.endObject(); + } + builder.endObject(); + + return builder; + } + + @Override + public String toString() { + try { + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return Strings.toString(builder); + } catch (IOException e) { + return "{ \"error\" : \"" + e.getMessage() + "\"}"; + } + } + + public static class Node extends BaseNodeResponse { + + public Node() { + } + + public Node(DiscoveryNode node) { + super(node); + } + + public static Node readNodeResponse(StreamInput in) throws IOException { + Node node = new Node(); + node.readFrom(in); + return node; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleAction.java new file mode 100644 index 0000000000000..f7148411c9df4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleAction.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for deleting a role from the security index + */ +public class DeleteRoleAction extends Action { + + public static final DeleteRoleAction INSTANCE = new DeleteRoleAction(); + public static final String NAME = "cluster:admin/xpack/security/role/delete"; + + + protected DeleteRoleAction() { + super(NAME); + } + + @Override + public DeleteRoleRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new DeleteRoleRequestBuilder(client, this); + } + + @Override + public DeleteRoleResponse newResponse() { + return new DeleteRoleResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequest.java new file mode 100644 index 0000000000000..ff4d416e2002c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequest.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * A request delete a role from the security index + */ +public class DeleteRoleRequest extends ActionRequest implements WriteRequest { + + private String name; + private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + + public DeleteRoleRequest() { + } + + @Override + public DeleteRoleRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return this; + } + + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (name == null) { + validationException = addValidationError("role name is missing", validationException); + } + return validationException; + } + + public void name(String name) { + this.name = name; + } + + public String name() { + return name; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + name = in.readString(); + refreshPolicy = RefreshPolicy.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + refreshPolicy.writeTo(out); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequestBuilder.java new file mode 100644 index 0000000000000..ef1fab2d8e61b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleRequestBuilder.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.WriteRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * A builder for requests to delete a role from the security index + */ +public class DeleteRoleRequestBuilder extends ActionRequestBuilder + implements WriteRequestBuilder { + + public DeleteRoleRequestBuilder(ElasticsearchClient client) { + this(client, DeleteRoleAction.INSTANCE); + } + + public DeleteRoleRequestBuilder(ElasticsearchClient client, DeleteRoleAction action) { + super(client, action, new DeleteRoleRequest()); + } + + public DeleteRoleRequestBuilder name(String name) { + request.name(name); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleResponse.java new file mode 100644 index 0000000000000..33314eb08695b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/DeleteRoleResponse.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Response for a role being deleted from the security index + */ +public class DeleteRoleResponse extends ActionResponse implements ToXContentObject { + + private boolean found = false; + + public DeleteRoleResponse() {} + + public DeleteRoleResponse(boolean found) { + this.found = found; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().field("found", found).endObject(); + return builder; + } + + public boolean found() { + return this.found; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + found = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(found); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesAction.java new file mode 100644 index 0000000000000..aa770ba160bc8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesAction.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action to retrieve a role from the security index + */ +public class GetRolesAction extends Action { + + public static final GetRolesAction INSTANCE = new GetRolesAction(); + public static final String NAME = "cluster:admin/xpack/security/role/get"; + + + protected GetRolesAction() { + super(NAME); + } + + @Override + public GetRolesRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new GetRolesRequestBuilder(client, this); + } + + @Override + public GetRolesResponse newResponse() { + return new GetRolesResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequest.java new file mode 100644 index 0000000000000..25851be2f015d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequest.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request to retrieve roles from the security index + */ +public class GetRolesRequest extends ActionRequest { + + private String[] names = Strings.EMPTY_ARRAY; + + public GetRolesRequest() { + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (names == null) { + validationException = addValidationError("role is missing", validationException); + } + return validationException; + } + + public void names(String... names) { + this.names = names; + } + + public String[] names() { + return names; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + names = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(names); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequestBuilder.java new file mode 100644 index 0000000000000..f630874609219 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesRequestBuilder.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Builder for requests to retrieve a role from the security index + */ +public class GetRolesRequestBuilder extends ActionRequestBuilder { + + public GetRolesRequestBuilder(ElasticsearchClient client) { + this(client, GetRolesAction.INSTANCE); + } + + public GetRolesRequestBuilder(ElasticsearchClient client, GetRolesAction action) { + super(client, action, new GetRolesRequest()); + } + + public GetRolesRequestBuilder names(String... names) { + request.names(names); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesResponse.java new file mode 100644 index 0000000000000..93c9d6bca9b64 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/GetRolesResponse.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; + +/** + * A response for the {@code Get Roles} API that holds the retrieved role descriptors. + */ +public class GetRolesResponse extends ActionResponse { + + private RoleDescriptor[] roles; + + public GetRolesResponse(RoleDescriptor... roles) { + this.roles = roles; + } + + public RoleDescriptor[] roles() { + return roles; + } + + public boolean hasRoles() { + return roles.length > 0; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + int size = in.readVInt(); + roles = new RoleDescriptor[size]; + for (int i = 0; i < size; i++) { + roles[i] = RoleDescriptor.readFrom(in); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(roles.length); + for (RoleDescriptor role : roles) { + RoleDescriptor.writeTo(role, out); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleAction.java new file mode 100644 index 0000000000000..8a7461eaeff15 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleAction.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for adding a role to the security index + */ +public class PutRoleAction extends Action { + + public static final PutRoleAction INSTANCE = new PutRoleAction(); + public static final String NAME = "cluster:admin/xpack/security/role/put"; + + + protected PutRoleAction() { + super(NAME); + } + + @Override + public PutRoleRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new PutRoleRequestBuilder(client, this); + } + + @Override + public PutRoleResponse newResponse() { + return new PutRoleResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java new file mode 100644 index 0000000000000..d0f3423fdcfe0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java @@ -0,0 +1,157 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request object for adding a role to the security index + */ +public class PutRoleRequest extends ActionRequest implements WriteRequest { + + private String name; + private String[] clusterPrivileges = Strings.EMPTY_ARRAY; + private List indicesPrivileges = new ArrayList<>(); + private String[] runAs = Strings.EMPTY_ARRAY; + private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + private Map metadata; + + public PutRoleRequest() { + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (name == null) { + validationException = addValidationError("role name is missing", validationException); + } + if (metadata != null && MetadataUtils.containsReservedMetadata(metadata)) { + validationException = + addValidationError("metadata keys may not start with [" + MetadataUtils.RESERVED_PREFIX + "]", validationException); + } + return validationException; + } + + public void name(String name) { + this.name = name; + } + + public void cluster(String... clusterPrivileges) { + this.clusterPrivileges = clusterPrivileges; + } + + void addIndex(RoleDescriptor.IndicesPrivileges... privileges) { + this.indicesPrivileges.addAll(Arrays.asList(privileges)); + } + + public void addIndex(String[] indices, String[] privileges, String[] grantedFields, String[] deniedFields, + @Nullable BytesReference query) { + this.indicesPrivileges.add(RoleDescriptor.IndicesPrivileges.builder() + .indices(indices) + .privileges(privileges) + .grantedFields(grantedFields) + .deniedFields(deniedFields) + .query(query) + .build()); + } + + public void runAs(String... usernames) { + this.runAs = usernames; + } + + @Override + public PutRoleRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return this; + } + + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}, the default), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}). + */ + @Override + public WriteRequest.RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + public void metadata(Map metadata) { + this.metadata = metadata; + } + + public String name() { + return name; + } + + public String[] cluster() { + return clusterPrivileges; + } + + public RoleDescriptor.IndicesPrivileges[] indices() { + return indicesPrivileges.toArray(new RoleDescriptor.IndicesPrivileges[indicesPrivileges.size()]); + } + + public String[] runAs() { + return runAs; + } + + public Map metadata() { + return metadata; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + name = in.readString(); + clusterPrivileges = in.readStringArray(); + int indicesSize = in.readVInt(); + indicesPrivileges = new ArrayList<>(indicesSize); + for (int i = 0; i < indicesSize; i++) { + indicesPrivileges.add(RoleDescriptor.IndicesPrivileges.createFrom(in)); + } + runAs = in.readStringArray(); + refreshPolicy = RefreshPolicy.readFrom(in); + metadata = in.readMap(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + out.writeStringArray(clusterPrivileges); + out.writeVInt(indicesPrivileges.size()); + for (RoleDescriptor.IndicesPrivileges index : indicesPrivileges) { + index.writeTo(out); + } + out.writeStringArray(runAs); + refreshPolicy.writeTo(out); + out.writeMap(metadata); + } + + public RoleDescriptor roleDescriptor() { + return new RoleDescriptor(name, + clusterPrivileges, + indicesPrivileges.toArray(new RoleDescriptor.IndicesPrivileges[indicesPrivileges.size()]), + runAs, + metadata); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java new file mode 100644 index 0000000000000..79142a7b5758a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestBuilder.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.WriteRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.Map; + +/** + * Builder for requests to add a role to the administrative index + */ +public class PutRoleRequestBuilder extends ActionRequestBuilder + implements WriteRequestBuilder { + + public PutRoleRequestBuilder(ElasticsearchClient client) { + this(client, PutRoleAction.INSTANCE); + } + + public PutRoleRequestBuilder(ElasticsearchClient client, PutRoleAction action) { + super(client, action, new PutRoleRequest()); + } + + /** + * Populate the put role request from the source and the role's name + */ + public PutRoleRequestBuilder source(String name, BytesReference source, XContentType xContentType) throws IOException { + // we pass false as last parameter because we want to reject the request if field permissions + // are given in 2.x syntax + RoleDescriptor descriptor = RoleDescriptor.parse(name, source, false, xContentType); + assert name.equals(descriptor.getName()); + request.name(name); + request.cluster(descriptor.getClusterPrivileges()); + request.addIndex(descriptor.getIndicesPrivileges()); + request.runAs(descriptor.getRunAs()); + request.metadata(descriptor.getMetadata()); + return this; + } + + public PutRoleRequestBuilder name(String name) { + request.name(name); + return this; + } + + public PutRoleRequestBuilder cluster(String... cluster) { + request.cluster(cluster); + return this; + } + + public PutRoleRequestBuilder runAs(String... runAsUsers) { + request.runAs(runAsUsers); + return this; + } + + public PutRoleRequestBuilder addIndices(String[] indices, String[] privileges, String[] grantedFields, String[] deniedFields, + @Nullable BytesReference query) { + request.addIndex(indices, privileges, grantedFields, deniedFields, query); + return this; + } + + public PutRoleRequestBuilder metadata(Map metadata) { + request.metadata(metadata); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleResponse.java new file mode 100644 index 0000000000000..98115bfd366e5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleResponse.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.role; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Response when adding a role, includes a boolean for whether the role was + * created or updated. + */ +public class PutRoleResponse extends ActionResponse implements ToXContentObject { + + private boolean created; + + public PutRoleResponse() { + } + + public PutRoleResponse(boolean created) { + this.created = created; + } + + public boolean isCreated() { + return created; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().field("created", created).endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(created); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.created = in.readBoolean(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingAction.java new file mode 100644 index 0000000000000..79e99f3843491 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingAction.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.rolemapping; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for deleting a role-mapping from the + * org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class DeleteRoleMappingAction extends Action { + + public static final DeleteRoleMappingAction INSTANCE = new DeleteRoleMappingAction(); + public static final String NAME = "cluster:admin/xpack/security/role_mapping/delete"; + + private DeleteRoleMappingAction() { + super(NAME); + } + + @Override + public DeleteRoleMappingRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new DeleteRoleMappingRequestBuilder(client, this); + } + + @Override + public DeleteRoleMappingResponse newResponse() { + return new DeleteRoleMappingResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequest.java new file mode 100644 index 0000000000000..9d3e1758026ea --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequest.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.rolemapping; + +import java.io.IOException; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * A request delete a role-mapping from the org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class DeleteRoleMappingRequest extends ActionRequest implements WriteRequest { + + private String name; + private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + + public DeleteRoleMappingRequest() { + } + + @Override + public DeleteRoleMappingRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return this; + } + + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + @Override + public ActionRequestValidationException validate() { + if (name == null) { + return addValidationError("role-mapping name is missing", null); + } else { + return null; + } + } + + public void setName(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + name = in.readString(); + refreshPolicy = RefreshPolicy.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + refreshPolicy.writeTo(out); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequestBuilder.java new file mode 100644 index 0000000000000..106e35896014e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingRequestBuilder.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.rolemapping; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.WriteRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * A builder for requests to delete a role-mapping from the + * org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class DeleteRoleMappingRequestBuilder extends ActionRequestBuilder + implements WriteRequestBuilder { + + public DeleteRoleMappingRequestBuilder(ElasticsearchClient client, + DeleteRoleMappingAction action) { + super(client, action, new DeleteRoleMappingRequest()); + } + + public DeleteRoleMappingRequestBuilder name(String name) { + request.setName(name); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingResponse.java new file mode 100644 index 0000000000000..af470c6c8216a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/DeleteRoleMappingResponse.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.rolemapping; + +import java.io.IOException; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +/** + * Response for a role-mapping being deleted from the + * org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class DeleteRoleMappingResponse extends ActionResponse implements ToXContentObject { + + private boolean found = false; + + /** + * Package private for {@link DeleteRoleMappingAction#newResponse()} + */ + public DeleteRoleMappingResponse() {} + + public DeleteRoleMappingResponse(boolean found) { + this.found = found; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().field("found", found).endObject(); + return builder; + } + + /** + * If true, indicates the {@link DeleteRoleMappingRequest#getName() named role-mapping} was found and deleted. + * Otherwise, the role-mapping could not be found. + */ + public boolean isFound() { + return this.found; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + found = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(found); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsAction.java new file mode 100644 index 0000000000000..49ada117ae6a1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsAction.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.rolemapping; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action to retrieve one or more role-mappings from X-Pack security + + * see org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class GetRoleMappingsAction extends Action { + + public static final GetRoleMappingsAction INSTANCE = new GetRoleMappingsAction(); + public static final String NAME = "cluster:admin/xpack/security/role_mapping/get"; + + private GetRoleMappingsAction() { + super(NAME); + } + + @Override + public GetRoleMappingsRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new GetRoleMappingsRequestBuilder(client, this); + } + + @Override + public GetRoleMappingsResponse newResponse() { + return new GetRoleMappingsResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequest.java new file mode 100644 index 0000000000000..ff59aa8482d61 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequest.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.rolemapping; + +import java.io.IOException; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request to retrieve role-mappings from X-Pack security + * + * see org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class GetRoleMappingsRequest extends ActionRequest { + + private String[] names = Strings.EMPTY_ARRAY; + + public GetRoleMappingsRequest() { + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (names == null) { + validationException = addValidationError("role-mapping names are missing", + validationException); + } + return validationException; + } + + /** + * Specify (by name) which role-mappings to delete. + * @see ExpressionRoleMapping#getName() + */ + public void setNames(String... names) { + this.names = names; + } + + /** + * @see #setNames(String...) + */ + public String[] getNames() { + return names; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + names = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(names); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequestBuilder.java new file mode 100644 index 0000000000000..1beaed5d4aea7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsRequestBuilder.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.rolemapping; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Builder for a request to retrieve role-mappings from X-Pack security + * + * see org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class GetRoleMappingsRequestBuilder extends ActionRequestBuilder { + + public GetRoleMappingsRequestBuilder(ElasticsearchClient client, GetRoleMappingsAction action) { + super(client, action, new GetRoleMappingsRequest()); + } + + public GetRoleMappingsRequestBuilder names(String... names) { + request.setNames(names); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsResponse.java new file mode 100644 index 0000000000000..738dfa65aaa01 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/GetRoleMappingsResponse.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.rolemapping; + +import java.io.IOException; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; + +/** + * Response to {@link GetRoleMappingsAction get role-mappings API}. + * + * see org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class GetRoleMappingsResponse extends ActionResponse { + + private ExpressionRoleMapping[] mappings; + + public GetRoleMappingsResponse(ExpressionRoleMapping... mappings) { + this.mappings = mappings; + } + + public ExpressionRoleMapping[] mappings() { + return mappings; + } + + public boolean hasMappings() { + return mappings.length > 0; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + int size = in.readVInt(); + mappings = new ExpressionRoleMapping[size]; + for (int i = 0; i < size; i++) { + mappings[i] = new ExpressionRoleMapping(in); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(mappings.length); + for (ExpressionRoleMapping mapping : mappings) { + mapping.writeTo(out); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingAction.java new file mode 100644 index 0000000000000..4e9b427c63acf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingAction.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.rolemapping; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for adding a role to the security index + */ +public class PutRoleMappingAction extends Action { + + public static final PutRoleMappingAction INSTANCE = new PutRoleMappingAction(); + public static final String NAME = "cluster:admin/xpack/security/role_mapping/put"; + + private PutRoleMappingAction() { + super(NAME); + } + + @Override + public PutRoleMappingRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new PutRoleMappingRequestBuilder(client, this); + } + + @Override + public PutRoleMappingResponse newResponse() { + return new PutRoleMappingResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java new file mode 100644 index 0000000000000..0f876f6445317 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.rolemapping; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionParser; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request object for adding/updating a role-mapping to the native store + * + * see org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class PutRoleMappingRequest extends ActionRequest + implements WriteRequest { + + private String name = null; + private boolean enabled = true; + private List roles = Collections.emptyList(); + private RoleMapperExpression rules = null; + private Map metadata = Collections.emptyMap(); + private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + + public PutRoleMappingRequest() { + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (name == null) { + validationException = addValidationError("role-mapping name is missing", + validationException); + } + if (roles.isEmpty()) { + validationException = addValidationError("role-mapping roles are missing", + validationException); + } + if (rules == null) { + validationException = addValidationError("role-mapping rules are missing", + validationException); + } + if (MetadataUtils.containsReservedMetadata(metadata)) { + validationException = addValidationError("metadata keys may not start with [" + + MetadataUtils.RESERVED_PREFIX + "]", validationException); + } + return validationException; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public boolean isEnabled() { + return enabled; + } + + public void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + public List getRoles() { + return Collections.unmodifiableList(roles); + } + + public void setRoles(List roles) { + this.roles = new ArrayList<>(roles); + } + + public RoleMapperExpression getRules() { + return rules; + } + + public void setRules(RoleMapperExpression expression) { + this.rules = expression; + } + + @Override + public PutRoleMappingRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return this; + } + + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}, the default), + * wait for a refresh ({@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes + * entirely ({@linkplain RefreshPolicy#NONE}). + */ + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + public void setMetadata(Map metadata) { + this.metadata = Objects.requireNonNull(metadata); + } + + public Map getMetadata() { + return metadata; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.name = in.readString(); + this.enabled = in.readBoolean(); + this.roles = in.readList(StreamInput::readString); + this.rules = ExpressionParser.readExpression(in); + this.metadata = in.readMap(); + this.refreshPolicy = RefreshPolicy.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + out.writeBoolean(enabled); + out.writeStringList(roles); + ExpressionParser.writeExpression(rules, out); + out.writeMap(metadata); + refreshPolicy.writeTo(out); + } + + public ExpressionRoleMapping getMapping() { + return new ExpressionRoleMapping( + name, + rules, + roles, + metadata, + enabled + ); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java new file mode 100644 index 0000000000000..1054ab9daa0d7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.rolemapping; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.WriteRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; + +/** + * Builder for requests to add/update a role-mapping to the native store + * + * see org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class PutRoleMappingRequestBuilder extends ActionRequestBuilder implements + WriteRequestBuilder { + + public PutRoleMappingRequestBuilder(ElasticsearchClient client, PutRoleMappingAction action) { + super(client, action, new PutRoleMappingRequest()); + } + + /** + * Populate the put role request from the source and the role's name + */ + public PutRoleMappingRequestBuilder source(String name, BytesReference source, + XContentType xContentType) throws IOException { + ExpressionRoleMapping mapping = ExpressionRoleMapping.parse(name, source, xContentType); + request.setName(name); + request.setEnabled(mapping.isEnabled()); + request.setRoles(mapping.getRoles()); + request.setRules(mapping.getExpression()); + request.setMetadata(mapping.getMetadata()); + return this; + } + + public PutRoleMappingRequestBuilder name(String name) { + request.setName(name); + return this; + } + + public PutRoleMappingRequestBuilder roles(String... roles) { + request.setRoles(Arrays.asList(roles)); + return this; + } + + public PutRoleMappingRequestBuilder expression(RoleMapperExpression expression) { + request.setRules(expression); + return this; + } + + public PutRoleMappingRequestBuilder enabled(boolean enabled) { + request.setEnabled(enabled); + return this; + } + + public PutRoleMappingRequestBuilder metadata(Map metadata) { + request.setMetadata(metadata); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingResponse.java new file mode 100644 index 0000000000000..eec74c6d61cb1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingResponse.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.rolemapping; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Response when adding/updating a role-mapping. + * + * see org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class PutRoleMappingResponse extends ActionResponse implements ToXContentObject { + + private boolean created; + + public PutRoleMappingResponse() { + } + + public PutRoleMappingResponse(boolean created) { + this.created = created; + } + + public boolean isCreated() { + return created; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().field("created", created).endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(created); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.created = in.readBoolean(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateAction.java new file mode 100644 index 0000000000000..5169162920925 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateAction.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for authenticating using SAML assertions + */ +public final class SamlAuthenticateAction + extends Action { + + public static final String NAME = "cluster:admin/xpack/security/saml/authenticate"; + public static final SamlAuthenticateAction INSTANCE = new SamlAuthenticateAction(); + + private SamlAuthenticateAction() { + super(NAME); + } + + @Override + public SamlAuthenticateRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new SamlAuthenticateRequestBuilder(client); + } + + @Override + public SamlAuthenticateResponse newResponse() { + return new SamlAuthenticateResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateRequest.java new file mode 100644 index 0000000000000..0d6d0f44c7110 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateRequest.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import java.util.List; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; + +/** + * Represents a request to authenticate using SAML assertions. + */ +public final class SamlAuthenticateRequest extends ActionRequest { + + private byte[] saml; + private List validRequestIds; + + public SamlAuthenticateRequest() { + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public byte[] getSaml() { + return saml; + } + + public void setSaml(byte[] saml) { + this.saml = saml; + } + + public List getValidRequestIds() { + return validRequestIds; + } + + public void setValidRequestIds(List validRequestIds) { + this.validRequestIds = validRequestIds; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateRequestBuilder.java new file mode 100644 index 0000000000000..8b8efb504c46e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateRequestBuilder.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import java.util.List; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Request builder used to populate a {@link SamlAuthenticateRequest} + */ +public final class SamlAuthenticateRequestBuilder + extends ActionRequestBuilder { + + public SamlAuthenticateRequestBuilder(ElasticsearchClient client) { + super(client, SamlAuthenticateAction.INSTANCE, new SamlAuthenticateRequest()); + } + + public SamlAuthenticateRequestBuilder saml(byte[] saml) { + request.setSaml(saml); + return this; + } + + public SamlAuthenticateRequestBuilder validRequestIds(List validRequestIds) { + request.setValidRequestIds(validRequestIds); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateResponse.java new file mode 100644 index 0000000000000..84e029adac7f5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateResponse.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import java.io.IOException; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; + +/** + * The response from converting a SAML assertion into a security token. + * Actually nothing SAML specific in this... + */ +public final class SamlAuthenticateResponse extends ActionResponse { + + private String principal; + private String tokenString; + private String refreshToken; + private TimeValue expiresIn; + + public SamlAuthenticateResponse() { + } + + public SamlAuthenticateResponse(String principal, String tokenString, String refreshToken, TimeValue expiresIn) { + this.principal = principal; + this.tokenString = tokenString; + this.refreshToken = refreshToken; + this.expiresIn = expiresIn; + } + + public String getPrincipal() { + return principal; + } + + public String getTokenString() { + return tokenString; + } + + public String getRefreshToken() { + return refreshToken; + } + + public TimeValue getExpiresIn() { + return expiresIn; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(principal); + out.writeString(tokenString); + out.writeString(refreshToken); + out.writeTimeValue(expiresIn); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + principal = in.readString(); + tokenString = in.readString(); + refreshToken = in.readString(); + expiresIn = in.readTimeValue(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionAction.java new file mode 100644 index 0000000000000..c9bbf3a6671ff --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionAction.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action to perform IdP-initiated logout for a SAML-SSO user + */ +public final class SamlInvalidateSessionAction + extends Action { + + public static final String NAME = "cluster:admin/xpack/security/saml/invalidate"; + public static final SamlInvalidateSessionAction INSTANCE = new SamlInvalidateSessionAction(); + + private SamlInvalidateSessionAction() { + super(NAME); + } + + @Override + public SamlInvalidateSessionRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new SamlInvalidateSessionRequestBuilder(client); + } + + @Override + public SamlInvalidateSessionResponse newResponse() { + return new SamlInvalidateSessionResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionRequest.java new file mode 100644 index 0000000000000..b2b49db838f31 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionRequest.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Represents a request to invalidate a SAML session using a {@code LogoutRequest}. + */ +public final class SamlInvalidateSessionRequest extends ActionRequest { + + @Nullable + private String realmName; + + @Nullable + private String assertionConsumerServiceURL; + + private String queryString; + + public SamlInvalidateSessionRequest() { + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(queryString)) { + validationException = addValidationError("queryString is missing", validationException); + } + return validationException; + } + + public String getQueryString() { + return queryString; + } + + public void setQueryString(String queryString) { + this.queryString = queryString; + } + + public String getRealmName() { + return realmName; + } + + public void setRealmName(String realmName) { + this.realmName = realmName; + } + + public String getAssertionConsumerServiceURL() { + return assertionConsumerServiceURL; + } + + public void setAssertionConsumerServiceURL(String assertionConsumerServiceURL) { + this.assertionConsumerServiceURL = assertionConsumerServiceURL; + } + + @Override + public String toString() { + return getClass().getSimpleName() + "{" + + "realmName='" + realmName + '\'' + + ", assertionConsumerServiceURL='" + assertionConsumerServiceURL + '\'' + + ", url-query=" + queryString.length() + " chars" + + '}'; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionRequestBuilder.java new file mode 100644 index 0000000000000..0d9b26a7b650f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionRequestBuilder.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Request builder used to populate a {@link SamlInvalidateSessionRequest} + */ +public final class SamlInvalidateSessionRequestBuilder + extends ActionRequestBuilder { + + public SamlInvalidateSessionRequestBuilder(ElasticsearchClient client) { + super(client, SamlInvalidateSessionAction.INSTANCE, new SamlInvalidateSessionRequest()); + } + + public SamlInvalidateSessionRequestBuilder queryString(String query) { + request.setQueryString(query); + return this; + } + + public SamlInvalidateSessionRequestBuilder realmName(String name) { + request.setRealmName(name); + return this; + } + + public SamlInvalidateSessionRequestBuilder assertionConsumerService(String url) { + request.setAssertionConsumerServiceURL(url); + return this; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionResponse.java new file mode 100644 index 0000000000000..7b90071ec6c70 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlInvalidateSessionResponse.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import java.io.IOException; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +/** + * Response to an IdP-initiated SAML {@code <LogoutRequest>} + */ +public final class SamlInvalidateSessionResponse extends ActionResponse { + + private String realmName; + private int count; + private String redirectUrl; + + public SamlInvalidateSessionResponse() { + } + + public SamlInvalidateSessionResponse(String realmName, int count, String redirectUrl) { + this.realmName = realmName; + this.count = count; + this.redirectUrl = redirectUrl; + } + + public String getRealmName() { + return realmName; + } + + public int getCount() { + return count; + } + + public String getRedirectUrl() { + return redirectUrl; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(realmName); + out.writeInt(count); + out.writeString(redirectUrl); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + realmName = in.readString(); + count = in.readInt(); + redirectUrl = in.readString(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutAction.java new file mode 100644 index 0000000000000..bc7b0ed257dd4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for initiating a logout process for a SAML-SSO user + */ +public final class SamlLogoutAction extends Action { + + public static final String NAME = "cluster:admin/xpack/security/saml/logout"; + public static final SamlLogoutAction INSTANCE = new SamlLogoutAction(); + + private SamlLogoutAction() { + super(NAME); + } + + @Override + public SamlLogoutRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new SamlLogoutRequestBuilder(client); + } + + @Override + public SamlLogoutResponse newResponse() { + return new SamlLogoutResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutRequest.java new file mode 100644 index 0000000000000..45088fdd3d93a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutRequest.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Represents a request to prepare a SAML {@code <LogoutRequest>}. + */ +public final class SamlLogoutRequest extends ActionRequest { + + private String token; + @Nullable + private String refreshToken; + + public SamlLogoutRequest() { + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(token)) { + validationException = addValidationError("token is missing", validationException); + } + return validationException; + } + + public String getToken() { + return token; + } + + public void setToken(String token) { + this.token = token; + } + + public String getRefreshToken() { + return refreshToken; + } + + public void setRefreshToken(String refreshToken) { + this.refreshToken = refreshToken; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutRequestBuilder.java new file mode 100644 index 0000000000000..8d920a0368fd6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutRequestBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Request builder used to populate a {@link SamlLogoutRequest} + */ +public final class SamlLogoutRequestBuilder extends ActionRequestBuilder { + + public SamlLogoutRequestBuilder(ElasticsearchClient client) { + super(client, SamlLogoutAction.INSTANCE, new SamlLogoutRequest()); + } + + public SamlLogoutRequestBuilder token(String token) { + request.setToken(token); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutResponse.java new file mode 100644 index 0000000000000..2770b7925bd2f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlLogoutResponse.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import java.io.IOException; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +/** + * Response containing a SAML {@code <LogoutRequest>} for the current user + */ +public final class SamlLogoutResponse extends ActionResponse { + + private String redirectUrl; + + public SamlLogoutResponse() { + } + + public SamlLogoutResponse(String redirectUrl) { + this.redirectUrl = redirectUrl; + } + + public String getRedirectUrl() { + return redirectUrl; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(redirectUrl); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + redirectUrl = in.readString(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationAction.java new file mode 100644 index 0000000000000..bbc4cdbd7fd5d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationAction.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for initiating an authentication process using SAML assertions + */ +public final class SamlPrepareAuthenticationAction + extends Action { + + public static final String NAME = "cluster:admin/xpack/security/saml/prepare"; + public static final SamlPrepareAuthenticationAction INSTANCE = new SamlPrepareAuthenticationAction(); + + private SamlPrepareAuthenticationAction() { + super(NAME); + } + + @Override + public SamlPrepareAuthenticationRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new SamlPrepareAuthenticationRequestBuilder(client); + } + + @Override + public SamlPrepareAuthenticationResponse newResponse() { + return new SamlPrepareAuthenticationResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequest.java new file mode 100644 index 0000000000000..bd1c59a48b42b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequest.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import java.io.IOException; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +/** + * Represents a request to prepare a SAML {@code <AuthnRequest>}. + */ +public final class SamlPrepareAuthenticationRequest extends ActionRequest { + + @Nullable + private String realmName; + + @Nullable + private String assertionConsumerServiceURL; + + public SamlPrepareAuthenticationRequest() { + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public String getRealmName() { + return realmName; + } + + public void setRealmName(String realmName) { + this.realmName = realmName; + } + + public String getAssertionConsumerServiceURL() { + return assertionConsumerServiceURL; + } + + public void setAssertionConsumerServiceURL(String assertionConsumerServiceURL) { + this.assertionConsumerServiceURL = assertionConsumerServiceURL; + } + + @Override + public String toString() { + return getClass().getSimpleName() + "{" + + "realmName=" + realmName + + ", assertionConsumerServiceURL=" + assertionConsumerServiceURL + + '}'; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + realmName = in.readOptionalString(); + assertionConsumerServiceURL = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(realmName); + out.writeOptionalString(assertionConsumerServiceURL); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequestBuilder.java new file mode 100644 index 0000000000000..7474557a76d77 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequestBuilder.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Request builder used to populate a {@link SamlPrepareAuthenticationRequest} + */ +public final class SamlPrepareAuthenticationRequestBuilder extends ActionRequestBuilder { + + public SamlPrepareAuthenticationRequestBuilder(ElasticsearchClient client) { + super(client, SamlPrepareAuthenticationAction.INSTANCE, new SamlPrepareAuthenticationRequest()); + } + + public SamlPrepareAuthenticationRequestBuilder realmName(String name) { + request.setRealmName(name); + return this; + } + + public SamlPrepareAuthenticationRequestBuilder assertionConsumerService(String url) { + request.setAssertionConsumerServiceURL(url); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationResponse.java new file mode 100644 index 0000000000000..d9ff6f68c6b72 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationResponse.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.saml; + +import java.io.IOException; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +/** + * Response containing a SAML {@code <AuthnRequest>} for a specific realm. + */ +public final class SamlPrepareAuthenticationResponse extends ActionResponse { + + private String realmName; + private String requestId; + private String redirectUrl; + + public SamlPrepareAuthenticationResponse() { + } + + public SamlPrepareAuthenticationResponse(String realmName, String requestId, String redirectUrl) { + this.realmName = realmName; + this.requestId = requestId; + this.redirectUrl = redirectUrl; + } + + public String getRealmName() { + return realmName; + } + + public String getRequestId() { + return requestId; + } + + public String getRedirectUrl() { + return redirectUrl; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(redirectUrl); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + redirectUrl = in.readString(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenAction.java new file mode 100644 index 0000000000000..98a312b7fcc34 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.token; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for creating a new token + */ +public final class CreateTokenAction extends Action { + + public static final String NAME = "cluster:admin/xpack/security/token/create"; + public static final CreateTokenAction INSTANCE = new CreateTokenAction(); + + private CreateTokenAction() { + super(NAME); + } + + @Override + public CreateTokenRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new CreateTokenRequestBuilder(client, INSTANCE); + } + + @Override + public CreateTokenResponse newResponse() { + return new CreateTokenResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java new file mode 100644 index 0000000000000..5956e1a661345 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java @@ -0,0 +1,188 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.token; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.xpack.core.security.authc.support.CharArrays; + +import java.io.IOException; +import java.util.Arrays; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Represents a request to create a token based on the provided information. This class accepts the + * fields for an OAuth 2.0 access token request that uses the password grant type or the + * refresh_token grant type. + */ +public final class CreateTokenRequest extends ActionRequest { + + private String grantType; + private String username; + private SecureString password; + private String scope; + private String refreshToken; + + public CreateTokenRequest() {} + + public CreateTokenRequest(String grantType, @Nullable String username, @Nullable SecureString password, @Nullable String scope, + @Nullable String refreshToken) { + this.grantType = grantType; + this.username = username; + this.password = password; + this.scope = scope; + this.refreshToken = refreshToken; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if ("password".equals(grantType)) { + if (Strings.isNullOrEmpty(username)) { + validationException = addValidationError("username is missing", validationException); + } + if (password == null || password.getChars() == null || password.getChars().length == 0) { + validationException = addValidationError("password is missing", validationException); + } + if (refreshToken != null) { + validationException = + addValidationError("refresh_token is not supported with the password grant_type", validationException); + } + } else if ("refresh_token".equals(grantType)) { + if (username != null) { + validationException = + addValidationError("username is not supported with the refresh_token grant_type", validationException); + } + if (password != null) { + validationException = + addValidationError("password is not supported with the refresh_token grant_type", validationException); + } + if (refreshToken == null) { + validationException = addValidationError("refresh_token is missing", validationException); + } + } else { + validationException = addValidationError("grant_type only supports the values: [password, refresh_token]", validationException); + } + + return validationException; + } + + public void setGrantType(String grantType) { + this.grantType = grantType; + } + + public void setUsername(@Nullable String username) { + this.username = username; + } + + public void setPassword(@Nullable SecureString password) { + this.password = password; + } + + public void setScope(@Nullable String scope) { + this.scope = scope; + } + + public void setRefreshToken(@Nullable String refreshToken) { + this.refreshToken = refreshToken; + } + + public String getGrantType() { + return grantType; + } + + @Nullable + public String getUsername() { + return username; + } + + @Nullable + public SecureString getPassword() { + return password; + } + + @Nullable + public String getScope() { + return scope; + } + + @Nullable + public String getRefreshToken() { + return refreshToken; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(grantType); + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { + out.writeOptionalString(username); + if (password == null) { + out.writeOptionalBytesReference(null); + } else { + final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); + try { + out.writeOptionalBytesReference(new BytesArray(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); + } + } + out.writeOptionalString(refreshToken); + } else { + if ("refresh_token".equals(grantType)) { + throw new IllegalArgumentException("a refresh request cannot be sent to an older version"); + } else { + out.writeString(username); + final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); + try { + out.writeByteArray(passwordBytes); + } finally { + Arrays.fill(passwordBytes, (byte) 0); + } + } + } + out.writeOptionalString(scope); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + grantType = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { + username = in.readOptionalString(); + BytesReference bytesRef = in.readOptionalBytesReference(); + if (bytesRef != null) { + byte[] bytes = BytesReference.toBytes(bytesRef); + try { + password = new SecureString(CharArrays.utf8BytesToChars(bytes)); + } finally { + Arrays.fill(bytes, (byte) 0); + } + } else { + password = null; + } + refreshToken = in.readOptionalString(); + } else { + username = in.readString(); + final byte[] passwordBytes = in.readByteArray(); + try { + password = new SecureString(CharArrays.utf8BytesToChars(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); + } + } + scope = in.readOptionalString(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestBuilder.java new file mode 100644 index 0000000000000..314a0aaf4aa2d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequestBuilder.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.token; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.SecureString; + +/** + * Request builder used to populate a {@link CreateTokenRequest} + */ +public final class CreateTokenRequestBuilder + extends ActionRequestBuilder { + + public CreateTokenRequestBuilder(ElasticsearchClient client, + Action action) { + super(client, action, new CreateTokenRequest()); + } + + /** + * Specifies the grant type for this request. Currently only password is supported + */ + public CreateTokenRequestBuilder setGrantType(String grantType) { + request.setGrantType(grantType); + return this; + } + + /** + * Set the username to be used for authentication with a password grant + */ + public CreateTokenRequestBuilder setUsername(@Nullable String username) { + request.setUsername(username); + return this; + } + + /** + * Set the password credentials associated with the user. These credentials will be used for + * authentication and the resulting token will be for this user + */ + public CreateTokenRequestBuilder setPassword(@Nullable SecureString password) { + request.setPassword(password); + return this; + } + + /** + * Set the scope of the access token. A null scope implies the default scope. If + * the requested scope differs from the scope of the token, the token's scope will be returned + * in the response + */ + public CreateTokenRequestBuilder setScope(@Nullable String scope) { + request.setScope(scope); + return this; + } + + public CreateTokenRequestBuilder setRefreshToken(@Nullable String refreshToken) { + request.setRefreshToken(refreshToken); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java new file mode 100644 index 0000000000000..1cb1029e820e0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.token; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Response containing the token string that was generated from a token creation request. This + * object also contains the scope and expiration date. If the scope was not provided or if the + * provided scope matches the scope of the token, then the scope value is null + */ +public final class CreateTokenResponse extends ActionResponse implements ToXContentObject { + + private String tokenString; + private TimeValue expiresIn; + private String scope; + private String refreshToken; + + CreateTokenResponse() {} + + public CreateTokenResponse(String tokenString, TimeValue expiresIn, String scope, String refreshToken) { + this.tokenString = Objects.requireNonNull(tokenString); + this.expiresIn = Objects.requireNonNull(expiresIn); + this.scope = scope; + this.refreshToken = refreshToken; + } + + public String getTokenString() { + return tokenString; + } + + public String getScope() { + return scope; + } + + public TimeValue getExpiresIn() { + return expiresIn; + } + + public String getRefreshToken() { + return refreshToken; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(tokenString); + out.writeTimeValue(expiresIn); + out.writeOptionalString(scope); + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { + out.writeString(refreshToken); + } + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + tokenString = in.readString(); + expiresIn = in.readTimeValue(); + scope = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { + refreshToken = in.readString(); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject() + .field("access_token", tokenString) + .field("type", "Bearer") + .field("expires_in", expiresIn.seconds()); + if (refreshToken != null) { + builder.field("refresh_token", refreshToken); + } + // only show the scope if it is not null + if (scope != null) { + builder.field("scope", scope); + } + return builder.endObject(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenAction.java new file mode 100644 index 0000000000000..5f199876d6a99 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.token; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for invalidating a given token + */ +public final class InvalidateTokenAction extends Action { + + public static final String NAME = "cluster:admin/xpack/security/token/invalidate"; + public static final InvalidateTokenAction INSTANCE = new InvalidateTokenAction(); + + private InvalidateTokenAction() { + super(NAME); + } + + @Override + public InvalidateTokenRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new InvalidateTokenRequestBuilder(client); + } + + @Override + public InvalidateTokenResponse newResponse() { + return new InvalidateTokenResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequest.java new file mode 100644 index 0000000000000..7a8372fe456d3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequest.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.token; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request for invalidating a token so that it can no longer be used + */ +public final class InvalidateTokenRequest extends ActionRequest { + + public enum Type { + ACCESS_TOKEN, + REFRESH_TOKEN + } + + private String tokenString; + private Type tokenType; + + public InvalidateTokenRequest() {} + + /** + * @param tokenString the string representation of the token + */ + public InvalidateTokenRequest(String tokenString, Type type) { + this.tokenString = tokenString; + this.tokenType = type; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (Strings.isNullOrEmpty(tokenString)) { + validationException = addValidationError("token string must be provided", null); + } + if (tokenType == null) { + validationException = addValidationError("token type must be provided", validationException); + } + return validationException; + } + + public String getTokenString() { + return tokenString; + } + + void setTokenString(String token) { + this.tokenString = token; + } + + public Type getTokenType() { + return tokenType; + } + + void setTokenType(Type tokenType) { + this.tokenType = tokenType; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(tokenString); + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { + out.writeVInt(tokenType.ordinal()); + } else if (tokenType == Type.REFRESH_TOKEN) { + throw new IllegalArgumentException("refresh token invalidation cannot be serialized with version [" + out.getVersion() + + "]"); + } + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + tokenString = in.readString(); + if (in.getVersion().onOrAfter(Version.V_6_2_0)) { + tokenType = Type.values()[in.readVInt()]; + } else { + tokenType = Type.ACCESS_TOKEN; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequestBuilder.java new file mode 100644 index 0000000000000..861757aff3c02 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenRequestBuilder.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.token; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Request builder that is used to populate a {@link InvalidateTokenRequest} + */ +public final class InvalidateTokenRequestBuilder + extends ActionRequestBuilder { + + public InvalidateTokenRequestBuilder(ElasticsearchClient client) { + super(client, InvalidateTokenAction.INSTANCE, new InvalidateTokenRequest()); + } + + /** + * The string representation of the token that is being invalidated. This is the value returned + * from a create token request. + */ + public InvalidateTokenRequestBuilder setTokenString(String token) { + request.setTokenString(token); + return this; + } + + /** + * Sets the type of the token that should be invalidated + */ + public InvalidateTokenRequestBuilder setType(InvalidateTokenRequest.Type type) { + request.setTokenType(type); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponse.java new file mode 100644 index 0000000000000..cebb005b27254 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/InvalidateTokenResponse.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.token; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Response for a invalidation of a token. + */ +public final class InvalidateTokenResponse extends ActionResponse { + + private boolean created; + + public InvalidateTokenResponse() {} + + public InvalidateTokenResponse(boolean created) { + this.created = created; + } + + /** + * If the token is already invalidated then created will be false + */ + public boolean isCreated() { + return created; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(created); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + created = in.readBoolean(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/RefreshTokenAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/RefreshTokenAction.java new file mode 100644 index 0000000000000..a322f3fa847ad --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/RefreshTokenAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.token; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public final class RefreshTokenAction extends Action { + + public static final String NAME = "cluster:admin/xpack/security/token/refresh"; + public static final RefreshTokenAction INSTANCE = new RefreshTokenAction(); + + private RefreshTokenAction() { + super(NAME); + } + + @Override + public CreateTokenRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new CreateTokenRequestBuilder(client, INSTANCE); + } + + @Override + public CreateTokenResponse newResponse() { + return new CreateTokenResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateAction.java new file mode 100644 index 0000000000000..b109ae0861a86 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class AuthenticateAction extends Action { + + public static final String NAME = "cluster:admin/xpack/security/user/authenticate"; + public static final AuthenticateAction INSTANCE = new AuthenticateAction(); + + public AuthenticateAction() { + super(NAME); + } + + @Override + public AuthenticateRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new AuthenticateRequestBuilder(client); + } + + @Override + public AuthenticateResponse newResponse() { + return new AuthenticateResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java new file mode 100644 index 0000000000000..1b1b5d8db6ca8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class AuthenticateRequest extends ActionRequest implements UserRequest { + + private String username; + + public AuthenticateRequest() {} + + public AuthenticateRequest(String username) { + this.username = username; + } + + @Override + public ActionRequestValidationException validate() { + // we cannot apply our validation rules here as an authenticate request could be for an LDAP user that doesn't fit our restrictions + return null; + } + + public String username() { + return username; + } + + public void username(String username) { + this.username = username; + } + + @Override + public String[] usernames() { + return new String[] { username }; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + username = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(username); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequestBuilder.java new file mode 100644 index 0000000000000..da3ce7956b84e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequestBuilder.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class AuthenticateRequestBuilder + extends ActionRequestBuilder { + + public AuthenticateRequestBuilder(ElasticsearchClient client) { + this(client, AuthenticateAction.INSTANCE); + } + + public AuthenticateRequestBuilder(ElasticsearchClient client, AuthenticateAction action) { + super(client, action, new AuthenticateRequest()); + } + + public AuthenticateRequestBuilder username(String username) { + request.username(username); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java new file mode 100644 index 0000000000000..0cf7ace1103d0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; + +public class AuthenticateResponse extends ActionResponse { + + private User user; + + public AuthenticateResponse() {} + + public AuthenticateResponse(User user) { + this.user = user; + } + + public User user() { + return user; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + User.writeTo(user, out); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + user = User.readFrom(in); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordAction.java new file mode 100644 index 0000000000000..44b31e3f3ded6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordAction.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class ChangePasswordAction extends Action { + + public static final ChangePasswordAction INSTANCE = new ChangePasswordAction(); + public static final String NAME = "cluster:admin/xpack/security/user/change_password"; + + protected ChangePasswordAction() { + super(NAME); + } + + @Override + public ChangePasswordRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new ChangePasswordRequestBuilder(client); + } + + @Override + public ChangePasswordResponse newResponse() { + return new ChangePasswordResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java new file mode 100644 index 0000000000000..f84b133d984b6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequest.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.security.authc.support.CharArrays; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request to change a user's password. + */ +public class ChangePasswordRequest extends ActionRequest + implements UserRequest, WriteRequest { + + private String username; + private char[] passwordHash; + private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (username == null) { + validationException = addValidationError("username is missing", validationException); + } + if (passwordHash == null) { + validationException = addValidationError("password is missing", validationException); + } + return validationException; + } + + public String username() { + return username; + } + + public void username(String username) { + this.username = username; + } + + public char[] passwordHash() { + return passwordHash; + } + + public void passwordHash(char[] passwordHash) { + this.passwordHash = passwordHash; + } + + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}, the default), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}). + */ + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + @Override + public ChangePasswordRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return this; + } + + @Override + public String[] usernames() { + return new String[] { username }; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + username = in.readString(); + passwordHash = CharArrays.utf8BytesToChars(BytesReference.toBytes(in.readBytesReference())); + refreshPolicy = RefreshPolicy.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(username); + out.writeBytesReference(new BytesArray(CharArrays.toUtf8Bytes(passwordHash))); + refreshPolicy.writeTo(out); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequestBuilder.java new file mode 100644 index 0000000000000..b7ff68118e3cb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordRequestBuilder.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.WriteRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.support.Validation; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.CharBuffer; + +/** + * Request to change a user's password. + */ +public class ChangePasswordRequestBuilder + extends ActionRequestBuilder + implements WriteRequestBuilder { + + public ChangePasswordRequestBuilder(ElasticsearchClient client) { + super(client, ChangePasswordAction.INSTANCE, new ChangePasswordRequest()); + } + + public ChangePasswordRequestBuilder username(String username) { + request.username(username); + return this; + } + + public static char[] validateAndHashPassword(SecureString password) { + Validation.Error error = Validation.Users.validatePassword(password.getChars()); + if (error != null) { + ValidationException validationException = new ValidationException(); + validationException.addValidationError(error.toString()); + throw validationException; + } + return Hasher.BCRYPT.hash(password); + } + + /** + * Sets the password. Note: the char[] passed to this method will be cleared. + */ + public ChangePasswordRequestBuilder password(char[] password) { + try (SecureString secureString = new SecureString(password)) { + char[] hash = validateAndHashPassword(secureString); + request.passwordHash(hash); + } + return this; + } + + /** + * Populate the change password request from the source in the provided content type + */ + public ChangePasswordRequestBuilder source(BytesReference source, XContentType xContentType) throws IOException { + // EMPTY is ok here because we never call namedObject + try (InputStream stream = source.streamInput(); + XContentParser parser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + XContentUtils.verifyObject(parser); + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (User.Fields.PASSWORD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + String password = parser.text(); + final char[] passwordChars = password.toCharArray(); + password(passwordChars); + assert CharBuffer.wrap(passwordChars).chars().noneMatch((i) -> (char) i != (char) 0) : "expected password to " + + "clear the char[] but it did not!"; + } else { + throw new ElasticsearchParseException( + "expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token); + } + } else { + throw new ElasticsearchParseException("failed to parse change password request. unexpected field [{}]", + currentFieldName); + } + } + } + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordResponse.java new file mode 100644 index 0000000000000..4a1c5c33a6d7c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ChangePasswordResponse.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionResponse; + +public class ChangePasswordResponse extends ActionResponse { + + public ChangePasswordResponse() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserAction.java new file mode 100644 index 0000000000000..54f99f923dbd9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for deleting a native user. + */ +public class DeleteUserAction extends Action { + + public static final DeleteUserAction INSTANCE = new DeleteUserAction(); + public static final String NAME = "cluster:admin/xpack/security/user/delete"; + + protected DeleteUserAction() { + super(NAME); + } + + @Override + public DeleteUserRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new DeleteUserRequestBuilder(client, this); + } + + @Override + public DeleteUserResponse newResponse() { + return new DeleteUserResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserRequest.java new file mode 100644 index 0000000000000..6587576b515b3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserRequest.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * A request to delete a native user. + */ +public class DeleteUserRequest extends ActionRequest implements UserRequest, WriteRequest { + + private String username; + private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + + public DeleteUserRequest() { + } + + public DeleteUserRequest(String username) { + this.username = username; + } + + @Override + public DeleteUserRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return this; + } + + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (username == null) { + validationException = addValidationError("username is missing", validationException); + } + return validationException; + } + + public String username() { + return this.username; + } + + public void username(String username) { + this.username = username; + } + + @Override + public String[] usernames() { + return new String[] { username }; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + username = in.readString(); + refreshPolicy = RefreshPolicy.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(username); + refreshPolicy.writeTo(out); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserRequestBuilder.java new file mode 100644 index 0000000000000..1eecf5cf36b86 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserRequestBuilder.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.WriteRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class DeleteUserRequestBuilder extends ActionRequestBuilder + implements WriteRequestBuilder { + + public DeleteUserRequestBuilder(ElasticsearchClient client) { + this(client, DeleteUserAction.INSTANCE); + } + + public DeleteUserRequestBuilder(ElasticsearchClient client, DeleteUserAction action) { + super(client, action, new DeleteUserRequest()); + } + + public DeleteUserRequestBuilder username(String username) { + request.username(username); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserResponse.java new file mode 100644 index 0000000000000..8b668a9b97b14 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/DeleteUserResponse.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Response when deleting a native user. Returns a single boolean field for whether the user was + * found (and deleted) or not found. + */ +public class DeleteUserResponse extends ActionResponse implements ToXContentObject { + + private boolean found; + + public DeleteUserResponse() { + } + + public DeleteUserResponse(boolean found) { + this.found = found; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().field("found", found).endObject(); + return builder; + } + + public boolean found() { + return this.found; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + found = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(found); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersAction.java new file mode 100644 index 0000000000000..4b9bf56fb7b91 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for retrieving a user from the security index + */ +public class GetUsersAction extends Action { + + public static final GetUsersAction INSTANCE = new GetUsersAction(); + public static final String NAME = "cluster:admin/xpack/security/user/get"; + + protected GetUsersAction() { + super(NAME); + } + + @Override + public GetUsersRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new GetUsersRequestBuilder(client, this); + } + + @Override + public GetUsersResponse newResponse() { + return new GetUsersResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequest.java new file mode 100644 index 0000000000000..3ed0f798b371c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequest.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request to retrieve a native user. + */ +public class GetUsersRequest extends ActionRequest implements UserRequest { + + private String[] usernames; + + public GetUsersRequest() { + usernames = Strings.EMPTY_ARRAY; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (usernames == null) { + validationException = addValidationError("usernames cannot be null", validationException); + } + return validationException; + } + + public void usernames(String... usernames) { + this.usernames = usernames; + } + + @Override + public String[] usernames() { + return usernames; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + usernames = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(usernames); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequestBuilder.java new file mode 100644 index 0000000000000..6226146dd7cf4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequestBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class GetUsersRequestBuilder extends ActionRequestBuilder { + + public GetUsersRequestBuilder(ElasticsearchClient client) { + this(client, GetUsersAction.INSTANCE); + } + + public GetUsersRequestBuilder(ElasticsearchClient client, GetUsersAction action) { + super(client, action, new GetUsersRequest()); + } + + public GetUsersRequestBuilder usernames(String... usernames) { + request.usernames(usernames); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java new file mode 100644 index 0000000000000..666b79cfe5db7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; +import java.util.Collection; + +/** + * Response containing a User retrieved from the security index + */ +public class GetUsersResponse extends ActionResponse { + + private User[] users; + + public GetUsersResponse(User... users) { + this.users = users; + } + + public GetUsersResponse(Collection users) { + this(users.toArray(new User[users.size()])); + } + + public User[] users() { + return users; + } + + public boolean hasUsers() { + return users != null && users.length > 0; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + int size = in.readVInt(); + if (size < 0) { + users = null; + } else { + users = new User[size]; + for (int i = 0; i < size; i++) { + users[i] = User.readFrom(in); + } + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(users == null ? -1 : users.length); + if (users != null) { + for (User user : users) { + User.writeTo(user, out); + } + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesAction.java new file mode 100644 index 0000000000000..5df11a349a724 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesAction.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +/** + * This action is testing whether a user has the specified + * {@link RoleDescriptor.IndicesPrivileges privileges} + */ +public class HasPrivilegesAction extends Action { + + public static final HasPrivilegesAction INSTANCE = new HasPrivilegesAction(); + public static final String NAME = "cluster:admin/xpack/security/user/has_privileges"; + + private HasPrivilegesAction() { + super(NAME); + } + + @Override + public HasPrivilegesRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new HasPrivilegesRequestBuilder(client); + } + + @Override + public HasPrivilegesResponse newResponse() { + return new HasPrivilegesResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java new file mode 100644 index 0000000000000..101ae00d635fc --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequest.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * A request for checking a user's privileges + */ +public class HasPrivilegesRequest extends ActionRequest implements UserRequest { + + private String username; + private String[] clusterPrivileges; + private RoleDescriptor.IndicesPrivileges[] indexPrivileges; + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (clusterPrivileges == null) { + validationException = addValidationError("clusterPrivileges must not be null", validationException); + } + if (indexPrivileges == null) { + validationException = addValidationError("indexPrivileges must not be null", validationException); + } + if (clusterPrivileges != null && clusterPrivileges.length == 0 && indexPrivileges != null && indexPrivileges.length == 0) { + validationException = addValidationError("clusterPrivileges and indexPrivileges cannot both be empty", + validationException); + } + return validationException; + } + + /** + * @return the username that this request applies to. + */ + public String username() { + return username; + } + + /** + * Set the username that the request applies to. Must not be {@code null} + */ + public void username(String username) { + this.username = username; + } + + @Override + public String[] usernames() { + return new String[] { username }; + } + + public RoleDescriptor.IndicesPrivileges[] indexPrivileges() { + return indexPrivileges; + } + + public String[] clusterPrivileges() { + return clusterPrivileges; + } + + public void indexPrivileges(RoleDescriptor.IndicesPrivileges... privileges) { + this.indexPrivileges = privileges; + } + + public void clusterPrivileges(String... privileges) { + this.clusterPrivileges = privileges; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.username = in.readString(); + this.clusterPrivileges = in.readStringArray(); + int indexSize = in.readVInt(); + indexPrivileges = new RoleDescriptor.IndicesPrivileges[indexSize]; + for (int i = 0; i < indexSize; i++) { + indexPrivileges[i] = RoleDescriptor.IndicesPrivileges.createFrom(in); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(username); + out.writeStringArray(clusterPrivileges); + out.writeVInt(indexPrivileges.length); + for (RoleDescriptor.IndicesPrivileges priv : indexPrivileges) { + priv.writeTo(out); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilder.java new file mode 100644 index 0000000000000..2bf2bdb4d876e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesRequestBuilder.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; + +/** + * Request builder for checking a user's privileges + */ +public class HasPrivilegesRequestBuilder + extends ActionRequestBuilder { + + public HasPrivilegesRequestBuilder(ElasticsearchClient client) { + super(client, HasPrivilegesAction.INSTANCE, new HasPrivilegesRequest()); + } + + /** + * Set the username of the user that should enabled or disabled. Must not be {@code null} + */ + public HasPrivilegesRequestBuilder username(String username) { + request.username(username); + return this; + } + + /** + * Set whether the user should be enabled or not + */ + public HasPrivilegesRequestBuilder source(String username, BytesReference source, XContentType xContentType) throws IOException { + final RoleDescriptor role = RoleDescriptor.parsePrivilegesCheck(username + "/has_privileges", source, xContentType); + request.username(username); + request.indexPrivileges(role.getIndicesPrivileges()); + request.clusterPrivileges(role.getClusterPrivileges()); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java new file mode 100644 index 0000000000000..dcc34d75ddbaf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +/** + * Response for a {@link HasPrivilegesRequest} + */ +public class HasPrivilegesResponse extends ActionResponse { + private boolean completeMatch; + private Map cluster; + private List index; + + public HasPrivilegesResponse() { + this(true, Collections.emptyMap(), Collections.emptyList()); + } + + public HasPrivilegesResponse(boolean completeMatch, Map cluster, Collection index) { + super(); + this.completeMatch = completeMatch; + this.cluster = new HashMap<>(cluster); + this.index = new ArrayList<>(index); + } + + public boolean isCompleteMatch() { + return completeMatch; + } + + public Map getClusterPrivileges() { + return Collections.unmodifiableMap(cluster); + } + + public List getIndexPrivileges() { + return Collections.unmodifiableList(index); + } + + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + completeMatch = in.readBoolean(); + int count = in.readVInt(); + index = new ArrayList<>(count); + for (int i = 0; i < count; i++) { + final String index = in.readString(); + final Map privileges = in.readMap(StreamInput::readString, StreamInput::readBoolean); + this.index.add(new IndexPrivileges(index, privileges)); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(completeMatch); + out.writeVInt(index.size()); + for (IndexPrivileges index : index) { + out.writeString(index.index); + out.writeMap(index.privileges, StreamOutput::writeString, StreamOutput::writeBoolean); + } + } + + public static class IndexPrivileges { + private final String index; + private final Map privileges; + + public IndexPrivileges(String index, Map privileges) { + this.index = Objects.requireNonNull(index); + this.privileges = Collections.unmodifiableMap(privileges); + } + + public String getIndex() { + return index; + } + + public Map getPrivileges() { + return privileges; + } + + @Override + public String toString() { + return getClass().getSimpleName() + "{" + + "index='" + index + '\'' + + ", privileges=" + privileges + + '}'; + } + + @Override + public int hashCode() { + int result = index.hashCode(); + result = 31 * result + privileges.hashCode(); + return result; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + final IndexPrivileges other = (IndexPrivileges) o; + return this.index.equals(other.index) && this.privileges.equals(other.privileges); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserAction.java new file mode 100644 index 0000000000000..377fb57d00754 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Action for putting (adding/updating) a native user. + */ +public class PutUserAction extends Action { + + public static final PutUserAction INSTANCE = new PutUserAction(); + public static final String NAME = "cluster:admin/xpack/security/user/put"; + + protected PutUserAction() { + super(NAME); + } + + @Override + public PutUserRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new PutUserRequestBuilder(client, this); + } + + @Override + public PutUserResponse newResponse() { + return new PutUserResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java new file mode 100644 index 0000000000000..c018ad5f73eda --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequest.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; +import org.elasticsearch.xpack.core.security.support.Validation; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request object to put a native user. + */ +public class PutUserRequest extends ActionRequest implements UserRequest, WriteRequest { + + private String username; + private String[] roles; + private String fullName; + private String email; + private Map metadata; + private char[] passwordHash; + private boolean enabled = true; + private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + + public PutUserRequest() { + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (username == null) { + validationException = addValidationError("user is missing", validationException); + } else { + Validation.Error error = Validation.Users.validateUsername(username, false, Settings.EMPTY); + if (error != null) { + validationException = addValidationError(error.toString(), validationException); + } + } + if (roles == null) { + validationException = addValidationError("roles are missing", validationException); + } + if (metadata != null && MetadataUtils.containsReservedMetadata(metadata)) { + validationException = addValidationError("metadata keys may not start with [" + MetadataUtils.RESERVED_PREFIX + "]", + validationException); + } + // we do not check for a password hash here since it is possible that the user exists and we don't want to update the password + return validationException; + } + + public void username(String username) { + this.username = username; + } + + public void roles(String... roles) { + this.roles = roles; + } + + public void fullName(String fullName) { + this.fullName = fullName; + } + + public void email(String email) { + this.email = email; + } + + public void metadata(Map metadata) { + this.metadata = metadata; + } + + public void passwordHash(@Nullable char[] passwordHash) { + this.passwordHash = passwordHash; + } + + public boolean enabled() { + return enabled; + } + + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}, the default), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}). + */ + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + @Override + public PutUserRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return this; + } + + public String username() { + return username; + } + + public String[] roles() { + return roles; + } + + public String fullName() { + return fullName; + } + + public String email() { + return email; + } + + public Map metadata() { + return metadata; + } + + @Nullable + public char[] passwordHash() { + return passwordHash; + } + + public void enabled(boolean enabled) { + this.enabled = enabled; + } + + @Override + public String[] usernames() { + return new String[] { username }; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + username = in.readString(); + BytesReference passwordHashRef = in.readBytesReference(); + if (passwordHashRef == BytesArray.EMPTY) { + passwordHash = null; + } else { + passwordHash = CharArrays.utf8BytesToChars(BytesReference.toBytes(passwordHashRef)); + } + roles = in.readStringArray(); + fullName = in.readOptionalString(); + email = in.readOptionalString(); + metadata = in.readBoolean() ? in.readMap() : null; + refreshPolicy = RefreshPolicy.readFrom(in); + enabled = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(username); + BytesReference passwordHashRef; + if (passwordHash == null) { + passwordHashRef = null; + } else { + passwordHashRef = new BytesArray(CharArrays.toUtf8Bytes(passwordHash)); + } + out.writeBytesReference(passwordHashRef); + out.writeStringArray(roles); + out.writeOptionalString(fullName); + out.writeOptionalString(email); + if (metadata == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeMap(metadata); + } + refreshPolicy.writeTo(out); + out.writeBoolean(enabled); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java new file mode 100644 index 0000000000000..9974716055db6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserRequestBuilder.java @@ -0,0 +1,182 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.WriteRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.support.Validation; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; + +public class PutUserRequestBuilder extends ActionRequestBuilder + implements WriteRequestBuilder { + + private final Hasher hasher = Hasher.BCRYPT; + + public PutUserRequestBuilder(ElasticsearchClient client) { + this(client, PutUserAction.INSTANCE); + } + + public PutUserRequestBuilder(ElasticsearchClient client, PutUserAction action) { + super(client, action, new PutUserRequest()); + } + + public PutUserRequestBuilder username(String username) { + request.username(username); + return this; + } + + public PutUserRequestBuilder roles(String... roles) { + request.roles(roles); + return this; + } + + public PutUserRequestBuilder password(@Nullable char[] password) { + if (password != null) { + Validation.Error error = Validation.Users.validatePassword(password); + if (error != null) { + ValidationException validationException = new ValidationException(); + validationException.addValidationError(error.toString()); + throw validationException; + } + request.passwordHash(hasher.hash(new SecureString(password))); + } else { + request.passwordHash(null); + } + return this; + } + + public PutUserRequestBuilder metadata(Map metadata) { + request.metadata(metadata); + return this; + } + + public PutUserRequestBuilder fullName(String fullName) { + request.fullName(fullName); + return this; + } + + public PutUserRequestBuilder email(String email) { + request.email(email); + return this; + } + + public PutUserRequestBuilder passwordHash(char[] passwordHash) { + request.passwordHash(passwordHash); + return this; + } + + public PutUserRequestBuilder enabled(boolean enabled) { + request.enabled(enabled); + return this; + } + + /** + * Populate the put user request using the given source and username + */ + public PutUserRequestBuilder source(String username, BytesReference source, XContentType xContentType) throws IOException { + Objects.requireNonNull(xContentType); + username(username); + // EMPTY is ok here because we never call namedObject + try (InputStream stream = source.streamInput(); + XContentParser parser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + XContentUtils.verifyObject(parser); + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (User.Fields.PASSWORD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + String password = parser.text(); + char[] passwordChars = password.toCharArray(); + password(passwordChars); + Arrays.fill(passwordChars, (char) 0); + } else { + throw new ElasticsearchParseException( + "expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token); + } + } else if (User.Fields.PASSWORD_HASH.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + char[] passwordChars = parser.text().toCharArray(); + passwordHash(passwordChars); + } else { + throw new ElasticsearchParseException( + "expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token); + } + } else if (User.Fields.ROLES.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + roles(Strings.commaDelimitedListToStringArray(parser.text())); + } else { + roles(XContentUtils.readStringArray(parser, false)); + } + } else if (User.Fields.FULL_NAME.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + fullName(parser.text()); + } else if (token != XContentParser.Token.VALUE_NULL) { + throw new ElasticsearchParseException( + "expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token); + } + } else if (User.Fields.EMAIL.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + email(parser.text()); + } else if (token != XContentParser.Token.VALUE_NULL) { + throw new ElasticsearchParseException( + "expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token); + } + } else if (User.Fields.METADATA.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.START_OBJECT) { + metadata(parser.map()); + } else { + throw new ElasticsearchParseException( + "expected field [{}] to be of type object, but found [{}] instead", currentFieldName, token); + } + } else if (User.Fields.ENABLED.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_BOOLEAN) { + enabled(parser.booleanValue()); + } else { + throw new ElasticsearchParseException( + "expected field [{}] to be of type boolean, but found [{}] instead", currentFieldName, token); + } + } else if (User.Fields.USERNAME.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == Token.VALUE_STRING) { + if (username.equals(parser.text()) == false) { + throw new IllegalArgumentException("[username] in source does not match the username provided [" + + username + "]"); + } + } else { + throw new ElasticsearchParseException( + "expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token); + } + } else { + throw new ElasticsearchParseException("failed to parse add user request. unexpected field [{}]", currentFieldName); + } + } + return this; + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserResponse.java new file mode 100644 index 0000000000000..30d15e5a3fdcf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/PutUserResponse.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Response when adding a user to the security index. Returns a + * single boolean field for whether the user was created or updated. + */ +public class PutUserResponse extends ActionResponse implements ToXContentObject { + + private boolean created; + + public PutUserResponse() { + } + + public PutUserResponse(boolean created) { + this.created = created; + } + + public boolean created() { + return created; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().field("created", created).endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(created); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.created = in.readBoolean(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledAction.java new file mode 100644 index 0000000000000..53bb5463c9182 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * This action is for setting the enabled flag on a native or reserved user + */ +public class SetEnabledAction extends Action { + + public static final SetEnabledAction INSTANCE = new SetEnabledAction(); + public static final String NAME = "cluster:admin/xpack/security/user/set_enabled"; + + private SetEnabledAction() { + super(NAME); + } + + @Override + public SetEnabledRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new SetEnabledRequestBuilder(client); + } + + @Override + public SetEnabledResponse newResponse() { + return new SetEnabledResponse(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledRequest.java new file mode 100644 index 0000000000000..664a46ae3e727 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledRequest.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.security.support.Validation.Error; +import org.elasticsearch.xpack.core.security.support.Validation.Users; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * The request that allows to set a user as enabled or disabled + */ +public class SetEnabledRequest extends ActionRequest implements UserRequest, WriteRequest { + + private Boolean enabled; + private String username; + private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + Error error = Users.validateUsername(username, true, Settings.EMPTY); + if (error != null) { + validationException = addValidationError(error.toString(), validationException); + } + if (enabled == null) { + validationException = addValidationError("enabled must be set", validationException); + } + return validationException; + } + + /** + * @return whether the user should be set to enabled or not + */ + public Boolean enabled() { + return enabled; + } + + /** + * Set whether the user should be enabled or not. + */ + public void enabled(boolean enabled) { + this.enabled = enabled; + } + + /** + * @return the username that this request applies to. + */ + public String username() { + return username; + } + + /** + * Set the username that the request applies to. Must not be {@code null} + */ + public void username(String username) { + this.username = username; + } + + @Override + public String[] usernames() { + return new String[] { username }; + } + + /** + * Should this request trigger a refresh ({@linkplain RefreshPolicy#IMMEDIATE}, the default), wait for a refresh ( + * {@linkplain RefreshPolicy#WAIT_UNTIL}), or proceed ignore refreshes entirely ({@linkplain RefreshPolicy#NONE}). + */ + @Override + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + @Override + public SetEnabledRequest setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return this; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.enabled = in.readBoolean(); + this.username = in.readString(); + this.refreshPolicy = RefreshPolicy.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(enabled); + out.writeString(username); + refreshPolicy.writeTo(out); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledRequestBuilder.java new file mode 100644 index 0000000000000..2f4381ade1114 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledRequestBuilder.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.WriteRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Request builder for setting a user as enabled or disabled + */ +public class SetEnabledRequestBuilder extends ActionRequestBuilder + implements WriteRequestBuilder { + + public SetEnabledRequestBuilder(ElasticsearchClient client) { + super(client, SetEnabledAction.INSTANCE, new SetEnabledRequest()); + } + + /** + * Set the username of the user that should enabled or disabled. Must not be {@code null} + */ + public SetEnabledRequestBuilder username(String username) { + request.username(username); + return this; + } + + /** + * Set whether the user should be enabled or not + */ + public SetEnabledRequestBuilder enabled(boolean enabled) { + request.enabled(enabled); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledResponse.java new file mode 100644 index 0000000000000..72d1d16b82e24 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/SetEnabledResponse.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionResponse; + +/** + * Empty response for a {@link SetEnabledRequest} + */ +public class SetEnabledResponse extends ActionResponse { +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/UserRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/UserRequest.java new file mode 100644 index 0000000000000..fe2327805eae5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/UserRequest.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.action.user; + +/** + * Interface for requests that involve user operations + */ +public interface UserRequest { + + /** + * Accessor for the usernames that this request pertains to. null should never be returned! + */ + String[] usernames(); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java new file mode 100644 index 0000000000000..2a2fdd95d61a9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.security.user.InternalUserSerializationHelper; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; +import java.util.Base64; +import java.util.Objects; + +// TODO(hub-cap) Clean this up after moving User over - This class can re-inherit its field AUTHENTICATION_KEY in AuthenticationField. +// That interface can be removed +public class Authentication { + + private final User user; + private final RealmRef authenticatedBy; + private final RealmRef lookedUpBy; + private final Version version; + + public Authentication(User user, RealmRef authenticatedBy, RealmRef lookedUpBy) { + this(user, authenticatedBy, lookedUpBy, Version.CURRENT); + } + + public Authentication(User user, RealmRef authenticatedBy, RealmRef lookedUpBy, Version version) { + this.user = Objects.requireNonNull(user); + this.authenticatedBy = Objects.requireNonNull(authenticatedBy); + this.lookedUpBy = lookedUpBy; + this.version = version; + } + + public Authentication(StreamInput in) throws IOException { + this.user = InternalUserSerializationHelper.readFrom(in); + this.authenticatedBy = new RealmRef(in); + if (in.readBoolean()) { + this.lookedUpBy = new RealmRef(in); + } else { + this.lookedUpBy = null; + } + this.version = in.getVersion(); + } + + public User getUser() { + return user; + } + + public RealmRef getAuthenticatedBy() { + return authenticatedBy; + } + + public RealmRef getLookedUpBy() { + return lookedUpBy; + } + + public Version getVersion() { + return version; + } + + public static Authentication readFromContext(ThreadContext ctx) + throws IOException, IllegalArgumentException { + Authentication authentication = ctx.getTransient(AuthenticationField.AUTHENTICATION_KEY); + if (authentication != null) { + assert ctx.getHeader(AuthenticationField.AUTHENTICATION_KEY) != null; + return authentication; + } + + String authenticationHeader = ctx.getHeader(AuthenticationField.AUTHENTICATION_KEY); + if (authenticationHeader == null) { + return null; + } + return deserializeHeaderAndPutInContext(authenticationHeader, ctx); + } + + public static Authentication getAuthentication(ThreadContext context) { + return context.getTransient(AuthenticationField.AUTHENTICATION_KEY); + } + + static Authentication deserializeHeaderAndPutInContext(String header, ThreadContext ctx) + throws IOException, IllegalArgumentException { + assert ctx.getTransient(AuthenticationField.AUTHENTICATION_KEY) == null; + + byte[] bytes = Base64.getDecoder().decode(header); + StreamInput input = StreamInput.wrap(bytes); + Version version = Version.readVersion(input); + input.setVersion(version); + Authentication authentication = new Authentication(input); + ctx.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + return authentication; + } + + /** + * Writes the authentication to the context. There must not be an existing authentication in the context and if there is an + * {@link IllegalStateException} will be thrown + */ + public void writeToContext(ThreadContext ctx) + throws IOException, IllegalArgumentException { + ensureContextDoesNotContainAuthentication(ctx); + String header = encode(); + ctx.putTransient(AuthenticationField.AUTHENTICATION_KEY, this); + ctx.putHeader(AuthenticationField.AUTHENTICATION_KEY, header); + } + + void ensureContextDoesNotContainAuthentication(ThreadContext ctx) { + if (ctx.getTransient(AuthenticationField.AUTHENTICATION_KEY) != null) { + if (ctx.getHeader(AuthenticationField.AUTHENTICATION_KEY) == null) { + throw new IllegalStateException("authentication present as a transient but not a header"); + } + throw new IllegalStateException("authentication is already present in the context"); + } + } + + public String encode() throws IOException { + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(version); + Version.writeVersion(version, output); + writeTo(output); + return Base64.getEncoder().encodeToString(BytesReference.toBytes(output.bytes())); + } + + public void writeTo(StreamOutput out) throws IOException { + InternalUserSerializationHelper.writeTo(user, out); + authenticatedBy.writeTo(out); + if (lookedUpBy != null) { + out.writeBoolean(true); + lookedUpBy.writeTo(out); + } else { + out.writeBoolean(false); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Authentication that = (Authentication) o; + + if (!user.equals(that.user)) return false; + if (!authenticatedBy.equals(that.authenticatedBy)) return false; + if (lookedUpBy != null ? !lookedUpBy.equals(that.lookedUpBy) : that.lookedUpBy != null) return false; + return version.equals(that.version); + } + + @Override + public int hashCode() { + int result = user.hashCode(); + result = 31 * result + authenticatedBy.hashCode(); + result = 31 * result + (lookedUpBy != null ? lookedUpBy.hashCode() : 0); + result = 31 * result + version.hashCode(); + return result; + } + + public static class RealmRef { + + private final String nodeName; + private final String name; + private final String type; + + public RealmRef(String name, String type, String nodeName) { + this.nodeName = nodeName; + this.name = name; + this.type = type; + } + + public RealmRef(StreamInput in) throws IOException { + this.nodeName = in.readString(); + this.name = in.readString(); + this.type = in.readString(); + } + + void writeTo(StreamOutput out) throws IOException { + out.writeString(nodeName); + out.writeString(name); + out.writeString(type); + } + + public String getNodeName() { + return nodeName; + } + + public String getName() { + return name; + } + + public String getType() { + return type; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RealmRef realmRef = (RealmRef) o; + + if (!nodeName.equals(realmRef.nodeName)) return false; + if (!name.equals(realmRef.name)) return false; + return type.equals(realmRef.type); + } + + @Override + public int hashCode() { + int result = nodeName.hashCode(); + result = 31 * result + name.hashCode(); + result = 31 * result + type.hashCode(); + return result; + } + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationFailureHandler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationFailureHandler.java new file mode 100644 index 0000000000000..c366196658322 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationFailureHandler.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.transport.TransportMessage; + +/** + * A AuthenticationFailureHandler is responsible for the handling of a request that has failed authentication. This must + * consist of returning an exception and this exception can have headers to indicate authentication is required or another + * HTTP operation such as a redirect. + *

+ * For example, when using Basic authentication, most clients wait to send credentials until they have been challenged + * for them. In this workflow a client makes a request, the server responds with a 401 status with the header + * WWW-Authenticate: Basic realm=auth-realm, and then the client will send credentials. The same scheme also + * applies for other methods of authentication, with changes to the value provided in the WWW-Authenticate header. + *

+ * Additionally, some methods of authentication may require a different status code. When using an single sign on system, + * clients will often retrieve a token from a single sign on system that is presented to the server and verified. When a + * client does not provide such a token, then the server can choose to redirect the client to the single sign on system to + * retrieve a token. This can be accomplished in the AuthenticationFailureHandler by setting the + * {@link org.elasticsearch.rest.RestStatus#FOUND} + * with a Location header that contains the location to redirect the user to. + */ +public interface AuthenticationFailureHandler { + + /** + * This method is called when there has been an authentication failure for the given REST request and authentication + * token. + * + * @param request The request that was being authenticated when the exception occurred + * @param token The token that was extracted from the request + * @param context The context of the request that failed authentication that could not be authenticated + * @return ElasticsearchSecurityException with the appropriate headers and message + */ + ElasticsearchSecurityException failedAuthentication(RestRequest request, AuthenticationToken token, ThreadContext context); + + /** + * This method is called when there has been an authentication failure for the given message and token + * + * @param message The transport message that could not be authenticated + * @param token The token that was extracted from the message + * @param action The name of the action that the message is trying to perform + * @param context The context of the request that failed authentication that could not be authenticated + * @return ElasticsearchSecurityException with the appropriate headers and message + */ + ElasticsearchSecurityException failedAuthentication(TransportMessage message, AuthenticationToken token, String action, + ThreadContext context); + + /** + * The method is called when an exception has occurred while processing the REST request. This could be an error that + * occurred while attempting to extract a token or while attempting to authenticate the request + * + * @param request The request that was being authenticated when the exception occurred + * @param e The exception that was thrown + * @param context The context of the request that failed authentication that could not be authenticated + * @return ElasticsearchSecurityException with the appropriate headers and message + */ + ElasticsearchSecurityException exceptionProcessingRequest(RestRequest request, Exception e, ThreadContext context); + + /** + * The method is called when an exception has occurred while processing the transport message. This could be an error that + * occurred while attempting to extract a token or while attempting to authenticate the request + * + * @param message The message that was being authenticated when the exception occurred + * @param action The name of the action that the message is trying to perform + * @param e The exception that was thrown + * @param context The context of the request that failed authentication that could not be authenticated + * @return ElasticsearchSecurityException with the appropriate headers and message + */ + ElasticsearchSecurityException exceptionProcessingRequest(TransportMessage message, String action, Exception e, ThreadContext context); + + /** + * This method is called when a REST request is received and no authentication token could be extracted AND anonymous + * access is disabled. If anonymous access is enabled, this method will not be called + * + * @param request The request that did not have a token + * @param context The context of the request that failed authentication that could not be authenticated + * @return ElasticsearchSecurityException with the appropriate headers and message + */ + ElasticsearchSecurityException missingToken(RestRequest request, ThreadContext context); + + /** + * This method is called when a transport message is received and no authentication token could be extracted AND + * anonymous access is disabled. If anonymous access is enabled this method will not be called + * + * @param message The message that did not have a token + * @param action The name of the action that the message is trying to perform + * @param context The context of the request that failed authentication that could not be authenticated + * @return ElasticsearchSecurityException with the appropriate headers and message + */ + ElasticsearchSecurityException missingToken(TransportMessage message, String action, ThreadContext context); + + /** + * This method is called when anonymous access is enabled, a request does not pass authorization with the anonymous + * user, AND the anonymous service is configured to throw an authentication exception instead of an authorization + * exception + * + * @param action the action that failed authorization for anonymous access + * @param context The context of the request that failed authentication that could not be authenticated + * @return ElasticsearchSecurityException with the appropriate headers and message + */ + ElasticsearchSecurityException authenticationRequired(String action, ThreadContext context); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationField.java new file mode 100644 index 0000000000000..a53a58d637a96 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationField.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +public final class AuthenticationField { + + public static final String AUTHENTICATION_KEY = "_xpack_security_authentication"; + + private AuthenticationField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationResult.java new file mode 100644 index 0000000000000..0f073ef4ae398 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationResult.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.Objects; + +/** + * Represents the result of an authentication attempt. + * This allows a {@link Realm} to respond in 3 different ways (without needing to + * resort to {@link org.elasticsearch.action.ActionListener#onFailure(Exception)}) + *

    + *
  1. Successful authentication of a user
  2. + *
  3. Unable to authenticate user, try another realm (optionally with a diagnostic message)
  4. + *
  5. Unable to authenticate user, terminate authentication (with an error message)
  6. + *
+ */ +public final class AuthenticationResult { + private static final AuthenticationResult NOT_HANDLED = new AuthenticationResult(Status.CONTINUE, null, null, null); + + public enum Status { + SUCCESS, + CONTINUE, + TERMINATE, + } + + private final Status status; + private final User user; + private final String message; + private final Exception exception; + + private AuthenticationResult(Status status, @Nullable User user, @Nullable String message, @Nullable Exception exception) { + this.status = status; + this.user = user; + this.message = message; + this.exception = exception; + } + + public Status getStatus() { + return status; + } + + public User getUser() { + return user; + } + + public String getMessage() { + return message; + } + + public Exception getException() { + return exception; + } + + /** + * Creates an {@code AuthenticationResult} that indicates that the supplied {@link User} + * has been successfully authenticated. + *

+ * The {@link #getStatus() status} is set to {@link Status#SUCCESS}. + *

+ * Neither the {@link #getMessage() message} nor {@link #getException() exception} are populated. + *

+ * @param user The user that was authenticated. Cannot be {@code null}. + */ + public static AuthenticationResult success(User user) { + Objects.requireNonNull(user); + return new AuthenticationResult(Status.SUCCESS, user, null, null); + } + + /** + * Creates an {@code AuthenticationResult} that indicates that the realm did not handle the + * authentication request in any way, and has no failure messages. + *

+ * The {@link #getStatus() status} is set to {@link Status#CONTINUE}. + *

+ * The {@link #getMessage() message}, {@link #getException() exception}, and {@link #getUser() user} are all set to {@code null}. + *

+ */ + public static AuthenticationResult notHandled() { + return NOT_HANDLED; + } + + /** + * Creates an {@code AuthenticationResult} that indicates that the realm attempted to handle the authentication request but was + * unsuccessful. The reason for the failure is given in the supplied message and optional exception. + *

+ * The {@link #getStatus() status} is set to {@link Status#CONTINUE}. + *

+ * The {@link #getUser() user} is not populated. + *

+ */ + public static AuthenticationResult unsuccessful(String message, @Nullable Exception cause) { + Objects.requireNonNull(message); + return new AuthenticationResult(Status.CONTINUE, null, message, cause); + } + + /** + * Creates an {@code AuthenticationResult} that indicates that the realm attempted to handle the authentication request, was + * unsuccessful and wants to terminate this authentication request. + * The reason for the failure is given in the supplied message and optional exception. + *

+ * The {@link #getStatus() status} is set to {@link Status#TERMINATE}. + *

+ * The {@link #getUser() user} is not populated. + *

+ */ + public static AuthenticationResult terminate(String message, @Nullable Exception cause) { + return new AuthenticationResult(Status.TERMINATE, null, message, cause); + } + + public boolean isAuthenticated() { + return status == Status.SUCCESS; + } + + @Override + public String toString() { + return "AuthenticationResult{" + + "status=" + status + + ", user=" + user + + ", message=" + message + + ", exception=" + exception + + '}'; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationServiceField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationServiceField.java new file mode 100644 index 0000000000000..60921b68036f5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationServiceField.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +import org.elasticsearch.common.settings.Setting; + +import static org.elasticsearch.xpack.core.security.SecurityField.setting; + +public final class AuthenticationServiceField { + + public static final Setting RUN_AS_ENABLED = + Setting.boolSetting(setting("authc.run_as.enabled"), true, Setting.Property.NodeScope); + public static final String RUN_AS_USER_HEADER = "es-security-runas-user"; + + private AuthenticationServiceField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationToken.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationToken.java new file mode 100644 index 0000000000000..f218d2f9567f7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/AuthenticationToken.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +/** + * Interface for a token that is used for authentication. This token is the representation of the authentication + * information that is presented with a request. The token will be extracted by a {@link Realm} and subsequently + * used by a Realm to attempt authentication of a user. + */ +public interface AuthenticationToken { + + String principal(); + + Object credentials(); + + void clearCredentials(); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java new file mode 100644 index 0000000000000..8b31e77f9f8b7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/DefaultAuthenticationFailureHandler.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.TransportMessage; + +import static org.elasticsearch.xpack.core.security.support.Exceptions.authenticationError; + +/** + * The default implementation of a {@link AuthenticationFailureHandler}. This handler will return an exception with a + * RestStatus of 401 and the WWW-Authenticate header with a Basic challenge. + */ +public class DefaultAuthenticationFailureHandler implements AuthenticationFailureHandler { + + @Override + public ElasticsearchSecurityException failedAuthentication(RestRequest request, AuthenticationToken token, + ThreadContext context) { + return authenticationError("unable to authenticate user [{}] for REST request [{}]", token.principal(), request.uri()); + } + + @Override + public ElasticsearchSecurityException failedAuthentication(TransportMessage message, AuthenticationToken token, String action, + ThreadContext context) { + return authenticationError("unable to authenticate user [{}] for action [{}]", token.principal(), action); + } + + @Override + public ElasticsearchSecurityException exceptionProcessingRequest(RestRequest request, Exception e, ThreadContext context) { + if (e instanceof ElasticsearchSecurityException) { + assert ((ElasticsearchSecurityException) e).status() == RestStatus.UNAUTHORIZED; + assert ((ElasticsearchSecurityException) e).getHeader("WWW-Authenticate").size() == 1; + return (ElasticsearchSecurityException) e; + } + return authenticationError("error attempting to authenticate request", e); + } + + @Override + public ElasticsearchSecurityException exceptionProcessingRequest(TransportMessage message, String action, Exception e, + ThreadContext context) { + if (e instanceof ElasticsearchSecurityException) { + assert ((ElasticsearchSecurityException) e).status() == RestStatus.UNAUTHORIZED; + assert ((ElasticsearchSecurityException) e).getHeader("WWW-Authenticate").size() == 1; + return (ElasticsearchSecurityException) e; + } + return authenticationError("error attempting to authenticate request", e); + } + + @Override + public ElasticsearchSecurityException missingToken(RestRequest request, ThreadContext context) { + return authenticationError("missing authentication token for REST request [{}]", request.uri()); + } + + @Override + public ElasticsearchSecurityException missingToken(TransportMessage message, String action, ThreadContext context) { + return authenticationError("missing authentication token for action [{}]", action); + } + + @Override + public ElasticsearchSecurityException authenticationRequired(String action, ThreadContext context) { + return authenticationError("action [{}] requires authentication", action); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/InternalRealmsSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/InternalRealmsSettings.java new file mode 100644 index 0000000000000..0570a2bdad23f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/InternalRealmsSettings.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; +import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; +import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; +import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +public final class InternalRealmsSettings { + private InternalRealmsSettings() {} + + /** + * Provides the {@link Setting setting configuration} for each internal realm type. + * This excludes the ReservedRealm, as it cannot be configured dynamically. + * @return A map from realm-type to a collection of Setting objects. + */ + public static Map>> getSettings() { + Map>> map = new HashMap<>(); + map.put(FileRealmSettings.TYPE, FileRealmSettings.getSettings()); + map.put(NativeRealmSettings.TYPE, NativeRealmSettings.getSettings()); + map.put(LdapRealmSettings.AD_TYPE, LdapRealmSettings.getSettings(LdapRealmSettings.AD_TYPE)); + map.put(LdapRealmSettings.LDAP_TYPE, LdapRealmSettings.getSettings(LdapRealmSettings.LDAP_TYPE)); + map.put(PkiRealmSettings.TYPE, PkiRealmSettings.getSettings()); + map.put(SamlRealmSettings.TYPE, SamlRealmSettings.getSettings()); + return Collections.unmodifiableMap(map); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/KeyAndTimestamp.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/KeyAndTimestamp.java new file mode 100644 index 0000000000000..f7c79b2105352 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/KeyAndTimestamp.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.SecureString; + +import java.io.IOException; +import java.util.Arrays; + +public final class KeyAndTimestamp implements Writeable { + private final SecureString key; + private final long timestamp; + + public KeyAndTimestamp(SecureString key, long timestamp) { + this.key = key; + this.timestamp = timestamp; + } + + KeyAndTimestamp(StreamInput input) throws IOException { + timestamp = input.readVLong(); + byte[] keyBytes = input.readByteArray(); + final char[] ref = new char[keyBytes.length]; + int len = UnicodeUtil.UTF8toUTF16(keyBytes, 0, keyBytes.length, ref); + key = new SecureString(Arrays.copyOfRange(ref, 0, len)); + } + + public long getTimestamp() { return timestamp; } + public SecureString getKey() { return key; } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(timestamp); + BytesRef bytesRef = new BytesRef(key); + out.writeVInt(bytesRef.length); + out.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + KeyAndTimestamp that = (KeyAndTimestamp) o; + + if (timestamp != that.timestamp) return false; + return key.equals(that.key); + } + + @Override + public int hashCode() { + int result = key.hashCode(); + result = 31 * result + (int) (timestamp ^ (timestamp >>> 32)); + return result; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java new file mode 100644 index 0000000000000..234141c77c9a7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.HashMap; +import java.util.Map; + +/** + * An authentication mechanism to which the default authentication org.elasticsearch.xpack.security.authc.AuthenticationService + * delegates the authentication process. Different realms may be defined, each may be based on different + * authentication mechanism supporting its own specific authentication token type. + */ +public abstract class Realm implements Comparable { + + protected final Logger logger; + protected final String type; + + public String getType() { + return type; + } + + protected RealmConfig config; + + public Realm(String type, RealmConfig config) { + this.type = type; + this.config = config; + this.logger = config.logger(getClass()); + } + + /** + * @return The type of this realm + */ + public String type() { + return type; + } + + /** + * @return The name of this realm. + */ + public String name() { + return config.name; + } + + /** + * @return The order of this realm within the executing realm chain. + */ + public int order() { + return config.order; + } + + @Override + public int compareTo(Realm other) { + int result = Integer.compare(config.order, other.config.order); + if (result == 0) { + // If same order, compare based on the realm name + result = config.name.compareTo(other.config.name); + } + return result; + } + + /** + * @return {@code true} if this realm supports the given authentication token, {@code false} otherwise. + */ + public abstract boolean supports(AuthenticationToken token); + + /** + * Attempts to extract an authentication token from the given context. If an appropriate token + * is found it's returned, otherwise {@code null} is returned. + * + * @param context The context that will provide information about the incoming request + * @return The authentication token or {@code null} if not found + */ + public abstract AuthenticationToken token(ThreadContext context); + + /** + * Authenticates the given token in an asynchronous fashion. + *

+ * A successful authentication will call {@link ActionListener#onResponse} with a + * {@link AuthenticationResult#success successful} result, which includes the user associated with the given token. + *
+ * If the realm does not support, or cannot handle the token, it will call {@link ActionListener#onResponse} with a + * {@link AuthenticationResult#notHandled not-handled} result. + * This can include cases where the token identifies as user that is not known by this realm. + *
+ * If the realm can handle the token, but authentication failed it will typically call {@link ActionListener#onResponse} with a + * {@link AuthenticationResult#unsuccessful failure} result, which includes a diagnostic message regarding the failure. + * This can include cases where the token identifies a valid user, but has an invalid password. + *
+ * If the realm wishes to assert that it has the exclusive right to handle the provided token, but authentication was not successful + * it typically call {@link ActionListener#onResponse} with a + * {@link AuthenticationResult#terminate termination} result, which includes a diagnostic message regarding the failure. + * This can include cases where the token identifies a valid user, but has an invalid password and no other realm is allowed to + * authenticate that user. + *

+ *

+ * The remote address should be {@code null} if the request initiated from the local node. + *

+ * + * @param token The authentication token + * @param listener The listener to pass the authentication result to + */ + public abstract void authenticate(AuthenticationToken token, ActionListener listener); + + /** + * Looks up the user identified the String identifier. A successful lookup will call the {@link ActionListener#onResponse} + * with the {@link User} identified by the username. An unsuccessful lookup call with {@code null} as the argument. If lookup is not + * supported, simply return {@code null} when called. + * + * @param username the String identifier for the user + * @param listener The listener to pass the lookup result to + */ + public abstract void lookupUser(String username, ActionListener listener); + + public Map usageStats() { + Map stats = new HashMap<>(); + stats.put("name", name()); + stats.put("order", order()); + return stats; + } + + @Override + public String toString() { + return type + "/" + config.name; + } + + /** + * A factory interface to construct a security realm. + */ + public interface Factory { + + /** + * Constructs a realm which will be used for authentication. + * + * @param config The configuration for the realm + * @throws Exception an exception may be thrown if there was an error during realm creation + */ + Realm create(RealmConfig config) throws Exception; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmConfig.java new file mode 100644 index 0000000000000..865d0117b81da --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmConfig.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; + +public class RealmConfig { + + final String name; + final boolean enabled; + final int order; + private final String type; + final Settings settings; + + private final Environment env; + private final Settings globalSettings; + private final ThreadContext threadContext; + + public RealmConfig(String name, Settings settings, Settings globalSettings, Environment env, + ThreadContext threadContext) { + this.name = name; + this.settings = settings; + this.globalSettings = globalSettings; + this.env = env; + enabled = RealmSettings.ENABLED_SETTING.get(settings); + order = RealmSettings.ORDER_SETTING.get(settings); + type = RealmSettings.TYPE_SETTING.get(settings); + this.threadContext = threadContext; + } + + public String name() { + return name; + } + + public boolean enabled() { + return enabled; + } + + public int order() { + return order; + } + + public String type() { + return type; + } + + public Settings settings() { + return settings; + } + + public Settings globalSettings() { + return globalSettings; + } + + public Logger logger(Class clazz) { + return Loggers.getLogger(clazz, globalSettings); + } + + public Environment env() { + return env; + } + + public ThreadContext threadContext() { + return threadContext; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java new file mode 100644 index 0000000000000..f7fabab2799af --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmSettings.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +import org.elasticsearch.common.settings.AbstractScopedSettings; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.security.SecurityExtension; + +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.Strings.isNullOrEmpty; +import static org.elasticsearch.xpack.core.security.SecurityField.setting; + +/** + * Configures the {@link Setting#groupSetting(String, Consumer, Setting.Property...) group setting} for security + * {@link Realm realms}, with validation according to the realm type. + *

+ * The allowable settings for a given realm are dependent on the {@link Realm#type() realm type}, so it is not possible + * to simply provide a list of {@link Setting} objects and rely on the global setting vacomlidation (e.g. A custom realm-type might + * define a setting with the same logical key as an internal realm-type, but a different data type). + *

+ * Instead, realm configuration relies on the validator parameter to + * {@link Setting#groupSetting(String, Consumer, Setting.Property...)} in order to validate each realm in a way that respects the + * declared type. + * Internally, this validation delegates to {@link AbstractScopedSettings#validate(Settings, boolean)} so that validation is reasonably + * aligned + * with the way we validate settings globally. + *

+ *

+ * The allowable settings for each realm-type are determined by calls to {@link InternalRealmsSettings#getSettings()} and + * {@link org.elasticsearch.xpack.core.security.SecurityExtension#getRealmSettings()} + */ +public class RealmSettings { + + public static final String PREFIX = setting("authc.realms."); + + static final Setting TYPE_SETTING = Setting.simpleString("type", Setting.Property.NodeScope); + static final Setting ENABLED_SETTING = Setting.boolSetting("enabled", true, Setting.Property.NodeScope); + static final Setting ORDER_SETTING = Setting.intSetting("order", Integer.MAX_VALUE, Setting.Property.NodeScope); + + /** + * Add the {@link Setting} configuration for all realms to the provided list. + */ + public static void addSettings(List> settingsList, List extensions) { + settingsList.add(getGroupSetting(extensions)); + } + + public static Collection getSettingsFilter(List extensions) { + return getSettingsByRealm(extensions).values().stream() + .flatMap(Collection::stream) + .filter(Setting::isFiltered) + .map(setting -> PREFIX + "*." + setting.getKey()) + .collect(Collectors.toSet()); + } + + /** + * Extract the child {@link Settings} for the {@link #PREFIX realms prefix}. + * The top level names in the returned Settings will be the names of the configured realms. + */ + public static Settings get(Settings settings) { + return settings.getByPrefix(RealmSettings.PREFIX); + } + + /** + * Extracts the realm settings from a global settings object. + * Returns a Map of realm-name to realm-settings. + */ + public static Map getRealmSettings(Settings globalSettings) { + Settings realmsSettings = RealmSettings.get(globalSettings); + return realmsSettings.names().stream() + .collect(Collectors.toMap(Function.identity(), realmsSettings::getAsSettings)); + } + + /** + * Convert the child {@link Setting} for the provided realm into a fully scoped key for use in an error message. + * @see #PREFIX + */ + public static String getFullSettingKey(RealmConfig realm, Setting setting) { + return getFullSettingKey(realm.name(), setting); + } + + /** + * @see #getFullSettingKey(RealmConfig, Setting) + */ + public static String getFullSettingKey(RealmConfig realm, String subKey) { + return getFullSettingKey(realm.name(), subKey); + } + + private static String getFullSettingKey(String name, Setting setting) { + return getFullSettingKey(name, setting.getKey()); + } + + private static String getFullSettingKey(String name, String subKey) { + return PREFIX + name + "." + subKey; + } + + private static Setting getGroupSetting(List extensions) { + return Setting.groupSetting(PREFIX, getSettingsValidator(extensions), Setting.Property.NodeScope); + } + + private static Consumer getSettingsValidator(List extensions) { + final Map>> childSettings = getSettingsByRealm(extensions); + childSettings.forEach(RealmSettings::verify); + return validator(childSettings); + } + + /** + * @return A map from realm-type to a collection of Setting objects. + * @see InternalRealmsSettings#getSettings() + */ + private static Map>> getSettingsByRealm(List extensions) { + final Map>> settingsByRealm = new HashMap<>(InternalRealmsSettings.getSettings()); + if (extensions != null) { + extensions.forEach(ext -> { + final Map>> extSettings = ext.getRealmSettings(); + extSettings.keySet().stream().filter(settingsByRealm::containsKey).forEach(type -> { + throw new IllegalArgumentException("duplicate realm type " + type); + }); + settingsByRealm.putAll(extSettings); + }); + } + return settingsByRealm; + } + + private static void verify(String type, Set> settings) { + Set keys = new HashSet<>(); + settings.forEach(setting -> { + final String key = setting.getKey(); + if (keys.contains(key)) { + throw new IllegalArgumentException("duplicate setting for key " + key + " in realm type " + type); + } + keys.add(key); + if (setting.getProperties().contains(Setting.Property.NodeScope) == false) { + throw new IllegalArgumentException("setting " + key + " in realm type " + type + " does not have NodeScope"); + } + }); + } + + private static Consumer validator(Map>> validSettings) { + return (settings) -> settings.names().forEach(n -> validateRealm(n, settings.getAsSettings(n), validSettings)); + } + + private static void validateRealm(String name, Settings settings, Map>> validSettings) { + final String type = getRealmType(settings); + if (isNullOrEmpty(type)) { + throw new IllegalArgumentException("missing realm type [" + getFullSettingKey(name, TYPE_SETTING) + "] for realm"); + } + validateRealm(name, type, settings, validSettings.get(type)); + } + + public static String getRealmType(Settings settings) { + return TYPE_SETTING.get(settings); + } + + private static void validateRealm(String name, String type, Settings settings, Set> validSettings) { + if (validSettings == null) { + // For backwards compatibility, we assume that is we don't know the valid settings for a realm.type then everything + // is valid. Ideally we would reject these, but XPackExtension doesn't enforce that realm-factories and realm-settings are + // perfectly aligned + return; + } + + // Don't validate secure settings because they might have been cleared already + settings = Settings.builder().put(settings, false).build(); + validSettings.removeIf(s -> s instanceof SecureSetting); + + Set> settingSet = new HashSet<>(validSettings); + settingSet.add(TYPE_SETTING); + settingSet.add(ENABLED_SETTING); + settingSet.add(ORDER_SETTING); + final AbstractScopedSettings validator = new AbstractScopedSettings(settings, settingSet, Setting.Property.NodeScope) { }; + try { + validator.validate(settings, false); + } catch (RuntimeException e) { + throw new IllegalArgumentException("incorrect configuration for realm [" + getFullSettingKey(name, "") + + "] of type " + type, e); + } + } + + private RealmSettings() { + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java new file mode 100644 index 0000000000000..3b8ea2910d13d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractNamedDiffable; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +public final class TokenMetaData extends AbstractNamedDiffable implements ClusterState.Custom { + + /** + * The type of {@link ClusterState} data. + */ + public static final String TYPE = "security_tokens"; + + private final List keys; + + public List getKeys() { + return keys; + } + + private final byte[] currentKeyHash; + + public byte[] getCurrentKeyHash() { + return currentKeyHash; + } + + public TokenMetaData(List keys, byte[] currentKeyHash) { + this.keys = keys; + this.currentKeyHash = currentKeyHash; + } + + public TokenMetaData(StreamInput input) throws IOException { + currentKeyHash = input.readByteArray(); + keys = Collections.unmodifiableList(input.readList(KeyAndTimestamp::new)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByteArray(currentKeyHash); + out.writeList(keys); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(ClusterState.Custom.class, TYPE, in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // never render this to the user + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TokenMetaData that = (TokenMetaData)o; + return keys.equals(that.keys) && currentKeyHash.equals(that.currentKeyHash); + } + + @Override + public int hashCode() { + int result = keys.hashCode(); + result = 31 * result + currentKeyHash.hashCode(); + return result; + } + + @Override + public String toString() { + return "TokenMetaData{ everything is secret }"; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_6_0_0_beta2; + } + + @Override + public boolean isPrivate() { + // never sent this to a client + return true; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java new file mode 100644 index 0000000000000..c9868f448b40f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/ClientReservedRealm.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.esnative; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.UsernamesField; + +public class ClientReservedRealm { + + public static boolean isReserved(String username, Settings settings) { + assert username != null; + switch (username) { + case UsernamesField.ELASTIC_NAME: + case UsernamesField.KIBANA_NAME: + case UsernamesField.LOGSTASH_NAME: + case UsernamesField.BEATS_NAME: + return XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings); + default: + return AnonymousUser.isAnonymousUsername(username, settings); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/NativeRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/NativeRealmSettings.java new file mode 100644 index 0000000000000..e41b14099813a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/esnative/NativeRealmSettings.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.esnative; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; + +import java.util.Set; + +public final class NativeRealmSettings { + public static final String TYPE = "native"; + + private NativeRealmSettings() {} + + /** + * @return The {@link Setting setting configuration} for this realm type + */ + public static Set> getSettings() { + return CachingUsernamePasswordRealmSettings.getCachingSettings(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/file/FileRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/file/FileRealmSettings.java new file mode 100644 index 0000000000000..110b8af9d7bcb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/file/FileRealmSettings.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.file; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; + +import java.util.Set; + +public final class FileRealmSettings { + public static final String TYPE = "file"; + + private FileRealmSettings() {} + + /** + * @return The {@link Setting setting configuration} for this realm type + */ + public static Set> getSettings() { + return CachingUsernamePasswordRealmSettings.getCachingSettings(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/ActiveDirectorySessionFactorySettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/ActiveDirectorySessionFactorySettings.java new file mode 100644 index 0000000000000..691b43f24635e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/ActiveDirectorySessionFactorySettings.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.ldap; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; + +import java.util.HashSet; +import java.util.Set; + +public final class ActiveDirectorySessionFactorySettings { + public static final String AD_DOMAIN_NAME_SETTING = "domain_name"; + public static final String AD_GROUP_SEARCH_BASEDN_SETTING = "group_search.base_dn"; + public static final String AD_GROUP_SEARCH_SCOPE_SETTING = "group_search.scope"; + public static final String AD_USER_SEARCH_BASEDN_SETTING = "user_search.base_dn"; + public static final String AD_USER_SEARCH_FILTER_SETTING = "user_search.filter"; + public static final String AD_UPN_USER_SEARCH_FILTER_SETTING = "user_search.upn_filter"; + public static final String AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING = "user_search.down_level_filter"; + public static final String AD_USER_SEARCH_SCOPE_SETTING = "user_search.scope"; + public static final Setting AD_LDAP_PORT_SETTING = Setting.intSetting("port.ldap", 389, Setting.Property.NodeScope); + public static final Setting AD_LDAPS_PORT_SETTING = Setting.intSetting("port.ldaps", 636, Setting.Property.NodeScope); + public static final Setting AD_GC_LDAP_PORT_SETTING = Setting.intSetting("port.gc_ldap", 3268, Setting.Property.NodeScope); + public static final Setting AD_GC_LDAPS_PORT_SETTING = Setting.intSetting("port.gc_ldaps", 3269, Setting.Property.NodeScope); + public static final Setting POOL_ENABLED = Setting.boolSetting("user_search.pool.enabled", + settings -> Boolean.toString(PoolingSessionFactorySettings.BIND_DN.exists(settings)), Setting.Property.NodeScope); + + private ActiveDirectorySessionFactorySettings() {} + + public static Set> getSettings() { + Set> settings = new HashSet<>(); + settings.addAll(SessionFactorySettings.getSettings()); + settings.add(Setting.simpleString(AD_DOMAIN_NAME_SETTING, Setting.Property.NodeScope)); + settings.add(Setting.simpleString(AD_GROUP_SEARCH_BASEDN_SETTING, Setting.Property.NodeScope)); + settings.add(Setting.simpleString(AD_GROUP_SEARCH_SCOPE_SETTING, Setting.Property.NodeScope)); + settings.add(Setting.simpleString(AD_USER_SEARCH_BASEDN_SETTING, Setting.Property.NodeScope)); + settings.add(Setting.simpleString(AD_USER_SEARCH_FILTER_SETTING, Setting.Property.NodeScope)); + settings.add(Setting.simpleString(AD_UPN_USER_SEARCH_FILTER_SETTING, Setting.Property.NodeScope)); + settings.add(Setting.simpleString(AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING, Setting.Property.NodeScope)); + settings.add(Setting.simpleString(AD_USER_SEARCH_SCOPE_SETTING, Setting.Property.NodeScope)); + settings.add(AD_LDAP_PORT_SETTING); + settings.add(AD_LDAPS_PORT_SETTING); + settings.add(AD_GC_LDAP_PORT_SETTING); + settings.add(AD_GC_LDAPS_PORT_SETTING); + settings.add(POOL_ENABLED); + settings.addAll(PoolingSessionFactorySettings.getSettings()); + return settings; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java new file mode 100644 index 0000000000000..0bb9f195af7fc --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapRealmSettings.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.ldap; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapMetaDataResolverSettings; +import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.mapper.CompositeRoleMapperSettings; + +import java.util.HashSet; +import java.util.Set; + +public final class LdapRealmSettings { + public static final String LDAP_TYPE = "ldap"; + public static final String AD_TYPE = "active_directory"; + public static final Setting EXECUTION_TIMEOUT = + Setting.timeSetting("timeout.execution", TimeValue.timeValueSeconds(30L), Setting.Property.NodeScope); + + private LdapRealmSettings() {} + + /** + * @param type Either {@link #AD_TYPE} or {@link #LDAP_TYPE} + * @return The {@link Setting setting configuration} for this realm type + */ + public static Set> getSettings(String type) { + Set> settings = new HashSet<>(); + settings.addAll(CachingUsernamePasswordRealmSettings.getCachingSettings()); + settings.addAll(CompositeRoleMapperSettings.getSettings()); + settings.add(LdapRealmSettings.EXECUTION_TIMEOUT); + if (AD_TYPE.equals(type)) { + settings.addAll(ActiveDirectorySessionFactorySettings.getSettings()); + } else { + assert LDAP_TYPE.equals(type) : "type [" + type + "] is unknown. expected one of [" + AD_TYPE + ", " + LDAP_TYPE + "]"; + settings.addAll(LdapSessionFactorySettings.getSettings()); + settings.addAll(LdapUserSearchSessionFactorySettings.getSettings()); + } + settings.addAll(LdapMetaDataResolverSettings.getSettings()); + return settings; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapSessionFactorySettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapSessionFactorySettings.java new file mode 100644 index 0000000000000..4fb7e9a1d9346 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapSessionFactorySettings.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.ldap; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.function.Function; + +public final class LdapSessionFactorySettings { + + public static final Setting> USER_DN_TEMPLATES_SETTING = Setting.listSetting("user_dn_templates", + Collections.emptyList(), Function.identity(), Setting.Property.NodeScope); + + public static Set> getSettings() { + Set> settings = new HashSet<>(); + settings.addAll(SessionFactorySettings.getSettings()); + settings.add(USER_DN_TEMPLATES_SETTING); + return settings; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapUserSearchSessionFactorySettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapUserSearchSessionFactorySettings.java new file mode 100644 index 0000000000000..86f635e7427ff --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/LdapUserSearchSessionFactorySettings.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.ldap; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; + +import java.util.HashSet; +import java.util.Set; +import java.util.function.Function; + +public final class LdapUserSearchSessionFactorySettings { + public static final Setting SEARCH_ATTRIBUTE = new Setting<>("user_search.attribute", + LdapUserSearchSessionFactorySettings.DEFAULT_USERNAME_ATTRIBUTE, + Function.identity(), Setting.Property.NodeScope, Setting.Property.Deprecated); + public static final Setting SEARCH_BASE_DN = Setting.simpleString("user_search.base_dn", Setting.Property.NodeScope); + public static final Setting SEARCH_FILTER = Setting.simpleString("user_search.filter", Setting.Property.NodeScope); + public static final Setting SEARCH_SCOPE = new Setting<>("user_search.scope", (String) null, + s -> LdapSearchScope.resolve(s, LdapSearchScope.SUB_TREE), Setting.Property.NodeScope); + public static final Setting POOL_ENABLED = Setting.boolSetting("user_search.pool.enabled", true, Setting.Property.NodeScope); + private static final String DEFAULT_USERNAME_ATTRIBUTE = "uid"; + + private LdapUserSearchSessionFactorySettings() {} + + public static Set> getSettings() { + Set> settings = new HashSet<>(); + settings.addAll(SessionFactorySettings.getSettings()); + settings.addAll(PoolingSessionFactorySettings.getSettings()); + settings.add(SEARCH_BASE_DN); + settings.add(SEARCH_SCOPE); + settings.add(SEARCH_ATTRIBUTE); + settings.add(POOL_ENABLED); + settings.add(SEARCH_FILTER); + + settings.addAll(SearchGroupsResolverSettings.getSettings()); + settings.addAll(UserAttributeGroupsResolverSettings.getSettings()); + + return settings; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/PoolingSessionFactorySettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/PoolingSessionFactorySettings.java new file mode 100644 index 0000000000000..88ff5485a5474 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/PoolingSessionFactorySettings.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.ldap; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; + +import java.util.Optional; +import java.util.Set; + +import static org.elasticsearch.common.settings.SecureSetting.secureString; + +public final class PoolingSessionFactorySettings { + public static final TimeValue DEFAULT_HEALTH_CHECK_INTERVAL = TimeValue.timeValueSeconds(60L); + public static final Setting BIND_DN = Setting.simpleString("bind_dn", Setting.Property.NodeScope, Setting.Property.Filtered); + public static final Setting LEGACY_BIND_PASSWORD = new Setting<>("bind_password", "", SecureString::new, + Setting.Property.NodeScope, Setting.Property.Filtered, Setting.Property.Deprecated); + public static final Setting SECURE_BIND_PASSWORD = secureString("secure_bind_password", LEGACY_BIND_PASSWORD); + + public static final int DEFAULT_CONNECTION_POOL_INITIAL_SIZE = 0; + public static final Setting POOL_INITIAL_SIZE = Setting.intSetting("user_search.pool.initial_size", + DEFAULT_CONNECTION_POOL_INITIAL_SIZE, 0, Setting.Property.NodeScope); + public static final int DEFAULT_CONNECTION_POOL_SIZE = 20; + public static final Setting POOL_SIZE = Setting.intSetting("user_search.pool.size", + DEFAULT_CONNECTION_POOL_SIZE, 1, Setting.Property.NodeScope); + public static final Setting HEALTH_CHECK_INTERVAL = Setting.timeSetting("user_search.pool.health_check.interval", + DEFAULT_HEALTH_CHECK_INTERVAL, Setting.Property.NodeScope); + public static final Setting HEALTH_CHECK_ENABLED = Setting.boolSetting("user_search.pool.health_check.enabled", + true, Setting.Property.NodeScope); + public static final Setting> HEALTH_CHECK_DN = new Setting<>("user_search.pool.health_check.dn", (String) null, + Optional::ofNullable, Setting.Property.NodeScope); + + private PoolingSessionFactorySettings() {} + + public static Set> getSettings() { + return Sets.newHashSet(POOL_INITIAL_SIZE, POOL_SIZE, HEALTH_CHECK_ENABLED, HEALTH_CHECK_INTERVAL, HEALTH_CHECK_DN, BIND_DN, + SECURE_BIND_PASSWORD, LEGACY_BIND_PASSWORD); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/SearchGroupsResolverSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/SearchGroupsResolverSettings.java new file mode 100644 index 0000000000000..67ae3bcf24b2d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/SearchGroupsResolverSettings.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.ldap; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; + +import java.util.HashSet; +import java.util.Set; +import java.util.function.Function; + +public final class SearchGroupsResolverSettings { + public static final Setting BASE_DN = Setting.simpleString("group_search.base_dn", + Setting.Property.NodeScope); + public static final Setting SCOPE = new Setting<>("group_search.scope", (String) null, + s -> LdapSearchScope.resolve(s, LdapSearchScope.SUB_TREE), Setting.Property.NodeScope); + public static final Setting USER_ATTRIBUTE = Setting.simpleString( + "group_search.user_attribute", Setting.Property.NodeScope); + private static final String GROUP_SEARCH_DEFAULT_FILTER = "(&" + + "(|(objectclass=groupOfNames)(objectclass=groupOfUniqueNames)" + + "(objectclass=group)(objectclass=posixGroup))" + + "(|(uniqueMember={0})(member={0})(memberUid={0})))"; + public static final Setting FILTER = new Setting<>("group_search.filter", + GROUP_SEARCH_DEFAULT_FILTER, Function.identity(), Setting.Property.NodeScope); + + private SearchGroupsResolverSettings() {} + + public static Set> getSettings() { + Set> settings = new HashSet<>(); + settings.add(BASE_DN); + settings.add(FILTER); + settings.add(USER_ATTRIBUTE); + settings.add(SCOPE); + return settings; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/UserAttributeGroupsResolverSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/UserAttributeGroupsResolverSettings.java new file mode 100644 index 0000000000000..88538a810a5dc --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/UserAttributeGroupsResolverSettings.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.ldap; + +import org.elasticsearch.common.settings.Setting; + +import java.util.Collections; +import java.util.Set; +import java.util.function.Function; + +public final class UserAttributeGroupsResolverSettings { + public static final Setting ATTRIBUTE = new Setting<>("user_group_attribute", "memberOf", + Function.identity(), Setting.Property.NodeScope); + + private UserAttributeGroupsResolverSettings() {} + + public static Set> getSettings() { + return Collections.singleton(ATTRIBUTE); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapLoadBalancingSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapLoadBalancingSettings.java new file mode 100644 index 0000000000000..4d7aff0939754 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapLoadBalancingSettings.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.ldap.support; + +import org.elasticsearch.common.settings.Setting; + +import java.util.HashSet; +import java.util.Set; + +public final class LdapLoadBalancingSettings { + public static final String LOAD_BALANCE_SETTINGS = "load_balance"; + public static final String LOAD_BALANCE_TYPE_SETTING = "type"; + public static final String CACHE_TTL_SETTING = "cache_ttl"; + + private LdapLoadBalancingSettings() {} + + public static Set> getSettings() { + Set> settings = new HashSet<>(); + settings.add(Setting.simpleString(LOAD_BALANCE_SETTINGS + "." + LOAD_BALANCE_TYPE_SETTING, Setting.Property.NodeScope)); + settings.add(Setting.simpleString(LOAD_BALANCE_SETTINGS + "." + CACHE_TTL_SETTING, Setting.Property.NodeScope)); + return settings; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapMetaDataResolverSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapMetaDataResolverSettings.java new file mode 100644 index 0000000000000..e284de9c03c3d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapMetaDataResolverSettings.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.ldap.support; + +import org.elasticsearch.common.settings.Setting; + +import java.util.Collections; +import java.util.List; +import java.util.function.Function; + +public final class LdapMetaDataResolverSettings { + public static final Setting> ADDITIONAL_META_DATA_SETTING = Setting.listSetting( + "metadata", Collections.emptyList(), Function.identity(), Setting.Property.NodeScope); + + private LdapMetaDataResolverSettings() {} + + public static List> getSettings() { + return Collections.singletonList(ADDITIONAL_META_DATA_SETTING); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapSearchScope.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapSearchScope.java new file mode 100644 index 0000000000000..3c3d6bc8ab818 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/LdapSearchScope.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.ldap.support; + +import com.unboundid.ldap.sdk.SearchScope; + +import java.util.Locale; + +public enum LdapSearchScope { + + BASE(SearchScope.BASE), + ONE_LEVEL(SearchScope.ONE), + SUB_TREE(SearchScope.SUB); + + private final SearchScope scope; + + LdapSearchScope(SearchScope scope) { + this.scope = scope; + } + + public SearchScope scope() { + return scope; + } + + public static LdapSearchScope resolve(String scope, LdapSearchScope defaultScope) { + if (scope == null) { + return defaultScope; + } + switch (scope.toLowerCase(Locale.ENGLISH)) { + case "base": + case "object": return BASE; + case "one_level" : return ONE_LEVEL; + case "sub_tree" : return SUB_TREE; + default: + throw new IllegalArgumentException("unknown search scope [" + scope + "]"); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/SessionFactorySettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/SessionFactorySettings.java new file mode 100644 index 0000000000000..42fc70f25176d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/ldap/support/SessionFactorySettings.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.ldap.support; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import java.util.function.Function; + +public final class SessionFactorySettings { + public static final String URLS_SETTING = "url"; + public static final String TIMEOUT_TCP_CONNECTION_SETTING = "timeout.tcp_connect"; + public static final String TIMEOUT_TCP_READ_SETTING = "timeout.tcp_read"; + public static final String TIMEOUT_LDAP_SETTING = "timeout.ldap_search"; + public static final String HOSTNAME_VERIFICATION_SETTING = "hostname_verification"; + public static final String FOLLOW_REFERRALS_SETTING = "follow_referrals"; + public static final Setting IGNORE_REFERRAL_ERRORS_SETTING = Setting.boolSetting( + "ignore_referral_errors", true, Setting.Property.NodeScope); + public static final TimeValue TIMEOUT_DEFAULT = TimeValue.timeValueSeconds(5); + + private SessionFactorySettings() {} + + public static Set> getSettings() { + Set> settings = new HashSet<>(); + settings.addAll(LdapLoadBalancingSettings.getSettings()); + settings.add(Setting.listSetting(URLS_SETTING, Collections.emptyList(), Function.identity(), + Setting.Property.NodeScope)); + settings.add(Setting.timeSetting(TIMEOUT_TCP_CONNECTION_SETTING, TIMEOUT_DEFAULT, Setting.Property.NodeScope)); + settings.add(Setting.timeSetting(TIMEOUT_TCP_READ_SETTING, TIMEOUT_DEFAULT, Setting.Property.NodeScope)); + settings.add(Setting.timeSetting(TIMEOUT_LDAP_SETTING, TIMEOUT_DEFAULT, Setting.Property.NodeScope)); + settings.add(Setting.boolSetting(HOSTNAME_VERIFICATION_SETTING, true, Setting.Property.NodeScope, Setting.Property.Filtered)); + settings.add(Setting.boolSetting(FOLLOW_REFERRALS_SETTING, true, Setting.Property.NodeScope)); + settings.add(IGNORE_REFERRAL_ERRORS_SETTING); + settings.addAll(SSLConfigurationSettings.withPrefix("ssl.").getAllSettings()); + return settings; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java new file mode 100644 index 0000000000000..a3539b30d3e57 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/pki/PkiRealmSettings.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.pki; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.support.mapper.CompositeRoleMapperSettings; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; + +import java.util.HashSet; +import java.util.Set; +import java.util.regex.Pattern; + +public final class PkiRealmSettings { + public static final String TYPE = "pki"; + public static final String DEFAULT_USERNAME_PATTERN = "CN=(.*?)(?:,|$)"; + public static final Setting USERNAME_PATTERN_SETTING = new Setting<>("username_pattern", DEFAULT_USERNAME_PATTERN, + s -> Pattern.compile(s, Pattern.CASE_INSENSITIVE), Setting.Property.NodeScope); + private static final TimeValue DEFAULT_TTL = TimeValue.timeValueMinutes(20); + public static final Setting CACHE_TTL_SETTING = Setting.timeSetting("cache.ttl", DEFAULT_TTL, Setting.Property.NodeScope); + private static final int DEFAULT_MAX_USERS = 100_000; //100k users + public static final Setting CACHE_MAX_USERS_SETTING = Setting.intSetting("cache.max_users", DEFAULT_MAX_USERS, + Setting.Property.NodeScope); + public static final SSLConfigurationSettings SSL_SETTINGS = SSLConfigurationSettings.withoutPrefix(); + + private PkiRealmSettings() {} + + /** + * @return The {@link Setting setting configuration} for this realm type + */ + public static Set> getSettings() { + Set> settings = new HashSet<>(); + settings.add(USERNAME_PATTERN_SETTING); + settings.add(CACHE_TTL_SETTING); + settings.add(CACHE_MAX_USERS_SETTING); + + settings.add(SSL_SETTINGS.truststorePath); + settings.add(SSL_SETTINGS.truststorePassword); + settings.add(SSL_SETTINGS.legacyTruststorePassword); + settings.add(SSL_SETTINGS.truststoreAlgorithm); + settings.add(SSL_SETTINGS.caPaths); + + settings.addAll(CompositeRoleMapperSettings.getSettings()); + + return settings; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java new file mode 100644 index 0000000000000..996b886c0dbcf --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.saml; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; +import org.elasticsearch.xpack.core.ssl.X509KeyPairSettings; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.function.Function; + +public class SamlRealmSettings { + + public static final String TYPE = "saml"; + private static final String TRANSIENT_NAMEID_FORMAT = "urn:oasis:names:tc:SAML:2.0:nameid-format:transient"; + + // these settings will be used under the prefix xpack.security.authc.realms.REALM_NAME. + private static final String IDP_METADATA_SETTING_PREFIX = "idp.metadata."; + + public static final Setting IDP_ENTITY_ID = Setting.simpleString("idp.entity_id", Setting.Property.NodeScope); + public static final Setting IDP_METADATA_PATH + = Setting.simpleString(IDP_METADATA_SETTING_PREFIX + "path", Setting.Property.NodeScope); + public static final Setting IDP_METADATA_HTTP_REFRESH + = Setting.timeSetting(IDP_METADATA_SETTING_PREFIX + "http.refresh", TimeValue.timeValueHours(1), Setting.Property.NodeScope); + public static final Setting IDP_SINGLE_LOGOUT = Setting.boolSetting("idp.use_single_logout", true, Setting.Property.NodeScope); + + public static final Setting SP_ENTITY_ID = Setting.simpleString("sp.entity_id", Setting.Property.NodeScope); + public static final Setting SP_ACS = Setting.simpleString("sp.acs", Setting.Property.NodeScope); + public static final Setting SP_LOGOUT = Setting.simpleString("sp.logout", Setting.Property.NodeScope); + + public static final Setting NAMEID_FORMAT = new Setting<>("nameid_format", s -> TRANSIENT_NAMEID_FORMAT, Function.identity(), + Setting.Property.NodeScope); + public static final Setting NAMEID_ALLOW_CREATE = Setting.boolSetting("nameid.allow_create", false, + Setting.Property.NodeScope); + public static final Setting NAMEID_SP_QUALIFIER = Setting.simpleString("nameid.sp_qualifier", Setting.Property.NodeScope); + + public static final Setting FORCE_AUTHN = Setting.boolSetting("force_authn", false, Setting.Property.NodeScope); + public static final Setting POPULATE_USER_METADATA = Setting.boolSetting("populate_user_metadata", true, + Setting.Property.NodeScope); + + public static final AttributeSetting PRINCIPAL_ATTRIBUTE = new AttributeSetting("principal"); + public static final AttributeSetting GROUPS_ATTRIBUTE = new AttributeSetting("groups"); + public static final AttributeSetting DN_ATTRIBUTE = new AttributeSetting("dn"); + public static final AttributeSetting NAME_ATTRIBUTE = new AttributeSetting("name"); + public static final AttributeSetting MAIL_ATTRIBUTE = new AttributeSetting("mail"); + + public static final X509KeyPairSettings ENCRYPTION_SETTINGS = new X509KeyPairSettings("encryption.", false); + public static final Setting ENCRYPTION_KEY_ALIAS = + Setting.simpleString("encryption.keystore.alias", Setting.Property.NodeScope); + + public static final X509KeyPairSettings SIGNING_SETTINGS = new X509KeyPairSettings("signing.", false); + public static final Setting SIGNING_KEY_ALIAS = + Setting.simpleString("signing.keystore.alias", Setting.Property.NodeScope); + public static final Setting> SIGNING_MESSAGE_TYPES = Setting.listSetting("signing.saml_messages", + Collections.singletonList("*"), Function.identity(), Setting.Property.NodeScope); + + public static final Setting CLOCK_SKEW = Setting.positiveTimeSetting("allowed_clock_skew", TimeValue.timeValueMinutes(3), + Setting.Property.NodeScope); + + public static final String SSL_PREFIX = "ssl."; + + private SamlRealmSettings() { + } + + /** + * @return The {@link Setting setting configuration} for this realm type + */ + public static Set> getSettings() { + final Set> set = Sets.newHashSet(IDP_ENTITY_ID, IDP_METADATA_PATH, IDP_SINGLE_LOGOUT, + SP_ENTITY_ID, SP_ACS, SP_LOGOUT, + NAMEID_FORMAT, NAMEID_ALLOW_CREATE, NAMEID_SP_QUALIFIER, FORCE_AUTHN, + POPULATE_USER_METADATA, CLOCK_SKEW, + ENCRYPTION_KEY_ALIAS, SIGNING_KEY_ALIAS, SIGNING_MESSAGE_TYPES); + set.addAll(ENCRYPTION_SETTINGS.getAllSettings()); + set.addAll(SIGNING_SETTINGS.getAllSettings()); + set.addAll(SSLConfigurationSettings.withPrefix(SSL_PREFIX).getAllSettings()); + set.addAll(PRINCIPAL_ATTRIBUTE.settings()); + set.addAll(GROUPS_ATTRIBUTE.settings()); + set.addAll(DN_ATTRIBUTE.settings()); + set.addAll(NAME_ATTRIBUTE.settings()); + set.addAll(MAIL_ATTRIBUTE.settings()); + return set; + } + + /** + * The SAML realm offers a number of settings that rely on attributes that are populate by the Identity Provider in the SAML Response. + * Each attribute has 2 settings: + *

    + *
  • The name of the SAML attribute to use
  • + *
  • A java pattern (regex) to apply to that attribute value in order to extract the substring that should be used.
  • + *
+ * For example, the Elasticsearch User Principal could be configured to come from the SAML "mail" attribute, and extract only the + * local-port of the user's email address (i.e. the name before the '@'). + * This class encapsulates those 2 settings. + */ + public static final class AttributeSetting { + public static final String ATTRIBUTES_PREFIX = "attributes."; + public static final String ATTRIBUTE_PATTERNS_PREFIX = "attribute_patterns."; + + private final Setting attribute; + private final Setting pattern; + + public AttributeSetting(String name) { + attribute = Setting.simpleString(ATTRIBUTES_PREFIX + name, Setting.Property.NodeScope); + pattern = Setting.simpleString(ATTRIBUTE_PATTERNS_PREFIX + name, Setting.Property.NodeScope); + } + + public Collection> settings() { + return Arrays.asList(getAttribute(), getPattern()); + } + + public String name() { + return getAttribute().getKey(); + } + + public Setting getAttribute() { + return attribute; + } + + public Setting getPattern() { + return pattern; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java new file mode 100644 index 0000000000000..ceb93dc4c853c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/BCrypt.java @@ -0,0 +1,787 @@ +package org.elasticsearch.xpack.core.security.authc.support; + +// Copyright (c) 2006 Damien Miller +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +import org.elasticsearch.common.settings.SecureString; + +import java.security.SecureRandom; + +/** + * BCrypt implements OpenBSD-style Blowfish password hashing using + * the scheme described in "A Future-Adaptable Password Scheme" by + * Niels Provos and David Mazieres. + *

+ * This password hashing system tries to thwart off-line password + * cracking using a computationally-intensive hashing algorithm, + * based on Bruce Schneier's Blowfish cipher. The work factor of + * the algorithm is parameterised, so it can be increased as + * computers get faster. + *

+ * Usage is really simple. To hash a password for the first time, + * call the hashpw method with a random salt, like this: + *

+ * + * String pw_hash = BCrypt.hashpw(plain_password, BCrypt.gensalt());
+ *
+ *

+ * To check whether a plaintext password matches one that has been + * hashed previously, use the checkpw method: + *

+ * + * if (BCrypt.checkpw(candidate_password, stored_hash))
+ *     System.out.println("It matches");
+ * else
+ *     System.out.println("It does not match");
+ *
+ *

+ * The gensalt() method takes an optional parameter (log_rounds) + * that determines the computational complexity of the hashing: + *

+ * + * String strong_salt = BCrypt.gensalt(10)
+ * String stronger_salt = BCrypt.gensalt(12)
+ *
+ *

+ * The amount of work increases exponentially (2**log_rounds), so + * each increment is twice as much work. The default log_rounds is + * 10, and the valid range is 4 to 30. + * + * @author Damien Miller + * @version 0.2 + */ +public class BCrypt { + // BCrypt parameters + private static final int GENSALT_DEFAULT_LOG2_ROUNDS = 10; + private static final int BCRYPT_SALT_LEN = 16; + + // Blowfish parameters + private static final int BLOWFISH_NUM_ROUNDS = 16; + + // Initial contents of key schedule + private static final int P_orig[] = { + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, + 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89, + 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, + 0x9216d5d9, 0x8979fb1b + }; + private static final int S_orig[] = { + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, + 0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99, + 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, + 0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee, + 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, + 0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e, + 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, + 0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce, + 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, + 0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677, + 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, + 0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88, + 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, + 0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0, + 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, + 0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88, + 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, + 0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d, + 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, + 0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba, + 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, + 0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09, + 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, + 0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279, + 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, + 0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82, + 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, + 0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0, + 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, + 0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8, + 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, + 0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7, + 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, + 0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1, + 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, + 0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477, + 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, + 0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af, + 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, + 0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41, + 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, + 0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915, + 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, + 0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266, + 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, + 0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6, + 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, + 0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1, + 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, + 0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff, + 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, + 0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7, + 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, + 0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf, + 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, + 0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87, + 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, + 0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16, + 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, + 0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509, + 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, + 0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f, + 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, + 0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960, + 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, + 0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802, + 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, + 0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf, + 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, + 0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50, + 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, + 0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281, + 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, + 0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128, + 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, + 0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0, + 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, + 0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3, + 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, + 0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061, + 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, + 0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735, + 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, + 0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340, + 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, + 0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068, + 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, + 0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45, + 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, + 0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb, + 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, + 0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42, + 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, + 0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb, + 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, + 0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33, + 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, + 0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc, + 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, + 0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b, + 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, + 0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728, + 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, + 0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37, + 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, + 0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b, + 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, + 0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d, + 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, + 0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9, + 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, + 0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d, + 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, + 0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61, + 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, + 0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2, + 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, + 0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633, + 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, + 0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52, + 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, + 0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62, + 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, + 0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24, + 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, + 0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c, + 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, + 0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe, + 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, + 0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8, + 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, + 0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22, + 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, + 0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9, + 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, + 0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51, + 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, + 0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b, + 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, + 0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd, + 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, + 0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb, + 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, + 0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32, + 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, + 0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae, + 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, + 0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47, + 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, + 0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84, + 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, + 0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd, + 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, + 0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38, + 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, + 0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525, + 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, + 0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964, + 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, + 0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d, + 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, + 0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02, + 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, + 0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a, + 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, + 0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0, + 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, + 0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9, + 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6 + }; + + // bcrypt IV: "OrpheanBeholderScryDoubt". The C implementation calls + // this "ciphertext", but it is really plaintext or an IV. We keep + // the name to make code comparison easier. + private static final int bf_crypt_ciphertext[] = { + 0x4f727068, 0x65616e42, 0x65686f6c, + 0x64657253, 0x63727944, 0x6f756274 + }; + + // Table for Base64 encoding + private static final char base64_code[] = { + '.', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', + 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', + 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', + 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', + 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', + '6', '7', '8', '9' + }; + + // Table for Base64 decoding + private static final byte index_64[] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 0, 1, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, -1, -1, + -1, -1, -1, -1, -1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + -1, -1, -1, -1, -1, -1, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, -1, -1, -1, -1, -1 + }; + + // Expanded Blowfish key + private int P[]; + private int S[]; + + /** + * Encode a byte array using bcrypt's slightly-modified base64 + * encoding scheme. Note that this is *not* compatible with + * the standard MIME-base64 encoding. + * + * @param d the byte array to encode + * @param len the number of bytes to encode + * @return base64-encoded string + * @exception IllegalArgumentException if the length is invalid + */ + private static String encode_base64(byte d[], int len) + throws IllegalArgumentException { + int off = 0; + StringBuffer rs = new StringBuffer(); + int c1, c2; + + if (len <= 0 || len > d.length) + throw new IllegalArgumentException ("Invalid len"); + + while (off < len) { + c1 = d[off++] & 0xff; + rs.append(base64_code[(c1 >> 2) & 0x3f]); + c1 = (c1 & 0x03) << 4; + if (off >= len) { + rs.append(base64_code[c1 & 0x3f]); + break; + } + c2 = d[off++] & 0xff; + c1 |= (c2 >> 4) & 0x0f; + rs.append(base64_code[c1 & 0x3f]); + c1 = (c2 & 0x0f) << 2; + if (off >= len) { + rs.append(base64_code[c1 & 0x3f]); + break; + } + c2 = d[off++] & 0xff; + c1 |= (c2 >> 6) & 0x03; + rs.append(base64_code[c1 & 0x3f]); + rs.append(base64_code[c2 & 0x3f]); + } + return rs.toString(); + } + + /** + * Look up the 3 bits base64-encoded by the specified character, + * range-checking againt conversion table + * @param x the base64-encoded value + * @return the decoded value of x + */ + private static byte char64(char x) { + if ((int)x < 0 || (int)x > index_64.length) + return -1; + return index_64[(int)x]; + } + + /** + * Decode a string encoded using bcrypt's base64 scheme to a + * byte array. Note that this is *not* compatible with + * the standard MIME-base64 encoding. + * @param s the string to decode + * @param maxolen the maximum number of bytes to decode + * @return an array containing the decoded bytes + * @throws IllegalArgumentException if maxolen is invalid + */ + private static byte[] decode_base64(String s, int maxolen) + throws IllegalArgumentException { + StringBuffer rs = new StringBuffer(); + int off = 0, slen = s.length(), olen = 0; + byte ret[]; + byte c1, c2, c3, c4, o; + + if (maxolen <= 0) + throw new IllegalArgumentException ("Invalid maxolen"); + + while (off < slen - 1 && olen < maxolen) { + c1 = char64(s.charAt(off++)); + c2 = char64(s.charAt(off++)); + if (c1 == -1 || c2 == -1) + break; + o = (byte)(c1 << 2); + o |= (c2 & 0x30) >> 4; + rs.append((char)o); + if (++olen >= maxolen || off >= slen) + break; + c3 = char64(s.charAt(off++)); + if (c3 == -1) + break; + o = (byte)((c2 & 0x0f) << 4); + o |= (c3 & 0x3c) >> 2; + rs.append((char)o); + if (++olen >= maxolen || off >= slen) + break; + c4 = char64(s.charAt(off++)); + o = (byte)((c3 & 0x03) << 6); + o |= c4; + rs.append((char)o); + ++olen; + } + + ret = new byte[olen]; + for (off = 0; off < olen; off++) + ret[off] = (byte)rs.charAt(off); + return ret; + } + + /** + * Blowfish encipher a single 64-bit block encoded as + * two 32-bit halves + * @param lr an array containing the two 32-bit half blocks + * @param off the position in the array of the blocks + */ + private void encipher(int lr[], int off) { + int i, n, l = lr[off], r = lr[off + 1]; + + l ^= P[0]; + for (i = 0; i <= BLOWFISH_NUM_ROUNDS - 2;) { + // Feistel substitution on left word + n = S[(l >> 24) & 0xff]; + n += S[0x100 | ((l >> 16) & 0xff)]; + n ^= S[0x200 | ((l >> 8) & 0xff)]; + n += S[0x300 | (l & 0xff)]; + r ^= n ^ P[++i]; + + // Feistel substitution on right word + n = S[(r >> 24) & 0xff]; + n += S[0x100 | ((r >> 16) & 0xff)]; + n ^= S[0x200 | ((r >> 8) & 0xff)]; + n += S[0x300 | (r & 0xff)]; + l ^= n ^ P[++i]; + } + lr[off] = r ^ P[BLOWFISH_NUM_ROUNDS + 1]; + lr[off + 1] = l; + } + + /** + * Cycically extract a word of key material + * @param data the string to extract the data from + * @param offp a "pointer" (as a one-entry array) to the + * current offset into data + * @return the next word of material from data + */ + private static int streamtoword(byte data[], int offp[]) { + int i; + int word = 0; + int off = offp[0]; + + for (i = 0; i < 4; i++) { + word = (word << 8) | (data[off] & 0xff); + off = (off + 1) % data.length; + } + + offp[0] = off; + return word; + } + + /** + * Initialise the Blowfish key schedule + */ + private void init_key() { + P = P_orig.clone(); + S = S_orig.clone(); + } + + /** + * Key the Blowfish cipher + * @param key an array containing the key + */ + private void key(byte key[]) { + int i; + int koffp[] = { 0 }; + int lr[] = { 0, 0 }; + int plen = P.length, slen = S.length; + + for (i = 0; i < plen; i++) + P[i] = P[i] ^ streamtoword(key, koffp); + + for (i = 0; i < plen; i += 2) { + encipher(lr, 0); + P[i] = lr[0]; + P[i + 1] = lr[1]; + } + + for (i = 0; i < slen; i += 2) { + encipher(lr, 0); + S[i] = lr[0]; + S[i + 1] = lr[1]; + } + } + + /** + * Perform the "enhanced key schedule" step described by + * Provos and Mazieres in "A Future-Adaptable Password Scheme" + * http://www.openbsd.org/papers/bcrypt-paper.ps + * @param data salt information + * @param key password information + */ + private void ekskey(byte data[], byte key[]) { + int i; + int koffp[] = { 0 }, doffp[] = { 0 }; + int lr[] = { 0, 0 }; + int plen = P.length, slen = S.length; + + for (i = 0; i < plen; i++) + P[i] = P[i] ^ streamtoword(key, koffp); + + for (i = 0; i < plen; i += 2) { + lr[0] ^= streamtoword(data, doffp); + lr[1] ^= streamtoword(data, doffp); + encipher(lr, 0); + P[i] = lr[0]; + P[i + 1] = lr[1]; + } + + for (i = 0; i < slen; i += 2) { + lr[0] ^= streamtoword(data, doffp); + lr[1] ^= streamtoword(data, doffp); + encipher(lr, 0); + S[i] = lr[0]; + S[i + 1] = lr[1]; + } + } + + /** + * Perform the central password hashing step in the + * bcrypt scheme + * @param password the password to hash + * @param salt the binary salt to hash with the password + * @param log_rounds the binary logarithm of the number + * of rounds of hashing to apply + * @param cdata the plaintext to encrypt + * @return an array containing the binary hashed password + */ + public byte[] crypt_raw(byte password[], byte salt[], int log_rounds, + int cdata[]) { + int rounds, i, j; + int clen = cdata.length; + byte ret[]; + + if (log_rounds < 4 || log_rounds > 30) + throw new IllegalArgumentException ("Bad number of rounds"); + rounds = 1 << log_rounds; + if (salt.length != BCRYPT_SALT_LEN) + throw new IllegalArgumentException ("Bad salt length"); + + init_key(); + ekskey(salt, password); + for (i = 0; i != rounds; i++) { + key(password); + key(salt); + } + + for (i = 0; i < 64; i++) { + for (j = 0; j < (clen >> 1); j++) + encipher(cdata, j << 1); + } + + ret = new byte[clen * 4]; + for (i = 0, j = 0; i < clen; i++) { + ret[j++] = (byte)((cdata[i] >> 24) & 0xff); + ret[j++] = (byte)((cdata[i] >> 16) & 0xff); + ret[j++] = (byte)((cdata[i] >> 8) & 0xff); + ret[j++] = (byte)(cdata[i] & 0xff); + } + return ret; + } + + /** + * Hash a password using the OpenBSD bcrypt scheme. + * + * Modified from the original to take a SecureString instead of the original + * + * @param password the password to hash + * @param salt the salt to hash with (perhaps generated + * using BCrypt.gensalt) + * @return the hashed password + */ + public static String hashpw(SecureString password, String salt) { + BCrypt B; + String real_salt; + byte passwordb[], saltb[], hashed[]; + char minor = (char)0; + int rounds, off = 0; + StringBuffer rs = new StringBuffer(); + + if (salt.charAt(0) != '$' || salt.charAt(1) != '2') + throw new IllegalArgumentException ("Invalid salt version"); + if (salt.charAt(2) == '$') + off = 3; + else { + minor = salt.charAt(2); + if (minor != 'a' || salt.charAt(3) != '$') + throw new IllegalArgumentException ("Invalid salt revision"); + off = 4; + } + + // Extract number of rounds + if (salt.charAt(off + 2) > '$') + throw new IllegalArgumentException ("Missing salt rounds"); + rounds = Integer.parseInt(salt.substring(off, off + 2)); + + real_salt = salt.substring(off + 3, off + 25); + + /*************************** ES CHANGE START *************************/ + /* original code before introducing SecureString + try { + passwordb = (password + (minor >= 'a' ? "\000" : "")).getBytes("UTF-8"); + } catch (UnsupportedEncodingException uee) { + throw new AssertionError("UTF-8 is not supported"); + } + + */ + + // the next lines are the SecureString replacement for the above commented-out section + if (minor >= 'a') { + try (SecureString secureString = new SecureString(CharArrays.concat(password.getChars(), "\000".toCharArray()))) { + passwordb = CharArrays.toUtf8Bytes(secureString.getChars()); + } + } else { + passwordb = CharArrays.toUtf8Bytes(password.getChars()); + } + /*************************** ES CHANGE END *************************/ + + saltb = decode_base64(real_salt, BCRYPT_SALT_LEN); + + B = new BCrypt(); + hashed = B.crypt_raw(passwordb, saltb, rounds, + bf_crypt_ciphertext.clone()); + + rs.append("$2"); + if (minor >= 'a') + rs.append(minor); + rs.append("$"); + if (rounds < 10) + rs.append("0"); + if (rounds > 30) { + throw new IllegalArgumentException( + "rounds exceeds maximum (30)"); + } + rs.append(Integer.toString(rounds)); + rs.append("$"); + rs.append(encode_base64(saltb, saltb.length)); + rs.append(encode_base64(hashed, + bf_crypt_ciphertext.length * 4 - 1)); + return rs.toString(); + } + + /** + * Generate a salt for use with the BCrypt.hashpw() method + * @param log_rounds the log2 of the number of rounds of + * hashing to apply - the work factor therefore increases as + * 2**log_rounds. + * @param random an instance of SecureRandom to use + * @return an encoded salt value + */ + public static String gensalt(int log_rounds, SecureRandom random) { + StringBuffer rs = new StringBuffer(); + byte rnd[] = new byte[BCRYPT_SALT_LEN]; + + random.nextBytes(rnd); + + rs.append("$2a$"); + if (log_rounds < 10) + rs.append("0"); + if (log_rounds > 30) { + throw new IllegalArgumentException( + "log_rounds exceeds maximum (30)"); + } + rs.append(Integer.toString(log_rounds)); + rs.append("$"); + rs.append(encode_base64(rnd, rnd.length)); + return rs.toString(); + } + + /** + * Generate a salt for use with the BCrypt.hashpw() method + * @param log_rounds the log2 of the number of rounds of + * hashing to apply - the work factor therefore increases as + * 2**log_rounds. + * @return an encoded salt value + */ + public static String gensalt(int log_rounds) { + return gensalt(log_rounds, new SecureRandom()); + } + + /** + * Generate a salt for use with the BCrypt.hashpw() method, + * selecting a reasonable default for the number of hashing + * rounds to apply + * @return an encoded salt value + */ + public static String gensalt() { + return gensalt(GENSALT_DEFAULT_LOG2_ROUNDS); + } + + /** + * Check that a plaintext password matches a previously hashed + * one. + * + * Modified from the original to take a SecureString plaintext and use a constant time comparison + * @param plaintext the plaintext password to verify + * @param hashed the previously-hashed password + * @return true if the passwords match, false otherwise + */ + public static boolean checkpw(SecureString plaintext, String hashed) { + /*************************** ES CHANGE START *************************/ + // this method previously took a string and did its own constant time comparison + return CharArrays.constantTimeEquals(hashed, hashpw(plaintext, hashed)); + /*************************** ES CHANGE END *************************/ + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CachingUsernamePasswordRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CachingUsernamePasswordRealmSettings.java new file mode 100644 index 0000000000000..a1d031a5b00c0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CachingUsernamePasswordRealmSettings.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +public final class CachingUsernamePasswordRealmSettings { + public static final Setting CACHE_HASH_ALGO_SETTING = Setting.simpleString("cache.hash_algo", Setting.Property.NodeScope); + private static final TimeValue DEFAULT_TTL = TimeValue.timeValueMinutes(20); + public static final Setting CACHE_TTL_SETTING = Setting.timeSetting("cache.ttl", DEFAULT_TTL, Setting.Property.NodeScope); + private static final int DEFAULT_MAX_USERS = 100_000; //100k users + public static final Setting CACHE_MAX_USERS_SETTING = Setting.intSetting("cache.max_users", DEFAULT_MAX_USERS, + Setting.Property.NodeScope); + + private CachingUsernamePasswordRealmSettings() {} + + /** + * Returns the {@link Setting setting configuration} that is common for all caching realms + */ + public static Set> getCachingSettings() { + return new HashSet<>(Arrays.asList(CACHE_HASH_ALGO_SETTING, CACHE_TTL_SETTING, CACHE_MAX_USERS_SETTING)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CharArrays.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CharArrays.java new file mode 100644 index 0000000000000..26df90c31a2de --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/CharArrays.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +/** + * Helper class similar to Arrays to handle conversions for Char arrays + */ +public class CharArrays { + + public static char[] utf8BytesToChars(byte[] utf8Bytes) { + ByteBuffer byteBuffer = ByteBuffer.wrap(utf8Bytes); + CharBuffer charBuffer = StandardCharsets.UTF_8.decode(byteBuffer); + char[] chars = Arrays.copyOfRange(charBuffer.array(), charBuffer.position(), charBuffer.limit()); + byteBuffer.clear(); + charBuffer.clear(); + return chars; + } + + /** + * Like String.indexOf for for an array of chars + */ + static int indexOf(char[] array, char ch) { + for (int i = 0; (i < array.length); i++) { + if (array[i] == ch) { + return i; + } + } + return -1; + } + + /** + * Converts the provided char[] to a UTF-8 byte[]. The provided char[] is not modified by this + * method, so the caller needs to take care of clearing the value if it is sensitive. + */ + public static byte[] toUtf8Bytes(char[] chars) { + CharBuffer charBuffer = CharBuffer.wrap(chars); + ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(charBuffer); + byte[] bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit()); + Arrays.fill(byteBuffer.array(), (byte) 0); // clear sensitive data + return bytes; + } + + public static boolean charsBeginsWith(String prefix, char[] chars) { + if (chars == null || prefix == null) { + return false; + } + + if (prefix.length() > chars.length) { + return false; + } + + for (int i = 0; i < prefix.length(); i++) { + if (chars[i] != prefix.charAt(i)) { + return false; + } + } + + return true; + } + + public static boolean constantTimeEquals(char[] a, char[] b) { + if (a.length != b.length) { + return false; + } + + int equals = 0; + for (int i = 0; i < a.length; i++) { + equals |= a[i] ^ b[i]; + } + + return equals == 0; + } + + public static boolean constantTimeEquals(String a, String b) { + if (a.length() != b.length()) { + return false; + } + + int equals = 0; + for (int i = 0; i < a.length(); i++) { + equals |= a.charAt(i) ^ b.charAt(i); + } + + return equals == 0; + } + + public static char[] concat(char[] a, char[] b) { + final char[] result = new char[a.length + b.length]; + System.arraycopy(a, 0, result, 0, a.length); + System.arraycopy(b, 0, result, a.length, b.length); + return result; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DnRoleMapperSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DnRoleMapperSettings.java new file mode 100644 index 0000000000000..034f7a18dbee4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/DnRoleMapperSettings.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support; + +import org.elasticsearch.common.settings.Setting; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; + +public final class DnRoleMapperSettings { + + private static final String DEFAULT_FILE_NAME = "role_mapping.yml"; + public static final Setting ROLE_MAPPING_FILE_SETTING = new Setting<>("files.role_mapping", DEFAULT_FILE_NAME, + Function.identity(), Setting.Property.NodeScope); + public static final Setting USE_UNMAPPED_GROUPS_AS_ROLES_SETTING = + Setting.boolSetting("unmapped_groups_as_roles", false, Setting.Property.NodeScope); + + public static List> getSettings() { + return Arrays.asList(USE_UNMAPPED_GROUPS_AS_ROLES_SETTING, ROLE_MAPPING_FILE_SETTING); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java new file mode 100644 index 0000000000000..0d4a1d23e7910 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java @@ -0,0 +1,298 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support; + +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.settings.SecureString; + +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.util.Base64; +import java.util.Locale; +import java.util.Random; + +public enum Hasher { + + BCRYPT() { + @Override + public char[] hash(SecureString text) { + String salt = BCrypt.gensalt(); + return BCrypt.hashpw(text, salt).toCharArray(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + String hashStr = new String(hash); + if (!hashStr.startsWith(BCRYPT_PREFIX)) { + return false; + } + return BCrypt.checkpw(text, hashStr); + } + }, + + BCRYPT4() { + @Override + public char[] hash(SecureString text) { + String salt = BCrypt.gensalt(4); + return BCrypt.hashpw(text, salt).toCharArray(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + String hashStr = new String(hash); + if (!hashStr.startsWith(BCRYPT_PREFIX)) { + return false; + } + return BCrypt.checkpw(text, hashStr); + } + }, + + BCRYPT5() { + @Override + public char[] hash(SecureString text) { + String salt = BCrypt.gensalt(5); + return BCrypt.hashpw(text, salt).toCharArray(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + String hashStr = new String(hash); + if (!hashStr.startsWith(BCRYPT_PREFIX)) { + return false; + } + return BCrypt.checkpw(text, hashStr); + } + }, + + BCRYPT6() { + @Override + public char[] hash(SecureString text) { + String salt = BCrypt.gensalt(6); + return BCrypt.hashpw(text, salt).toCharArray(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + String hashStr = new String(hash); + if (!hashStr.startsWith(BCRYPT_PREFIX)) { + return false; + } + return BCrypt.checkpw(text, hashStr); + } + }, + + BCRYPT7() { + @Override + public char[] hash(SecureString text) { + String salt = BCrypt.gensalt(7); + return BCrypt.hashpw(text, salt).toCharArray(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + String hashStr = new String(hash); + if (!hashStr.startsWith(BCRYPT_PREFIX)) { + return false; + } + return BCrypt.checkpw(text, hashStr); + } + }, + + BCRYPT8() { + @Override + public char[] hash(SecureString text) { + String salt = BCrypt.gensalt(8); + return BCrypt.hashpw(text, salt).toCharArray(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + String hashStr = new String(hash); + if (!hashStr.startsWith(BCRYPT_PREFIX)) { + return false; + } + return BCrypt.checkpw(text, hashStr); + } + }, + + BCRYPT9() { + @Override + public char[] hash(SecureString text) { + String salt = BCrypt.gensalt(9); + return BCrypt.hashpw(text, salt).toCharArray(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + String hashStr = new String(hash); + if (!hashStr.startsWith(BCRYPT_PREFIX)) { + return false; + } + return BCrypt.checkpw(text, hashStr); + } + }, + + SHA1() { + @Override + public char[] hash(SecureString text) { + byte[] textBytes = CharArrays.toUtf8Bytes(text.getChars()); + MessageDigest md = MessageDigests.sha1(); + md.update(textBytes); + String hash = Base64.getEncoder().encodeToString(md.digest()); + return (SHA1_PREFIX + hash).toCharArray(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + String hashStr = new String(hash); + if (!hashStr.startsWith(SHA1_PREFIX)) { + return false; + } + byte[] textBytes = CharArrays.toUtf8Bytes(text.getChars()); + MessageDigest md = MessageDigests.sha1(); + md.update(textBytes); + String passwd64 = Base64.getEncoder().encodeToString(md.digest()); + String hashNoPrefix = hashStr.substring(SHA1_PREFIX.length()); + return CharArrays.constantTimeEquals(hashNoPrefix, passwd64); + } + }, + + MD5() { + @Override + public char[] hash(SecureString text) { + MessageDigest md = MessageDigests.md5(); + md.update(CharArrays.toUtf8Bytes(text.getChars())); + String hash = Base64.getEncoder().encodeToString(md.digest()); + return (MD5_PREFIX + hash).toCharArray(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + String hashStr = new String(hash); + if (!hashStr.startsWith(MD5_PREFIX)) { + return false; + } + hashStr = hashStr.substring(MD5_PREFIX.length()); + MessageDigest md = MessageDigests.md5(); + md.update(CharArrays.toUtf8Bytes(text.getChars())); + String computedHashStr = Base64.getEncoder().encodeToString(md.digest()); + return CharArrays.constantTimeEquals(hashStr, computedHashStr); + } + }, + + SSHA256() { + @Override + public char[] hash(SecureString text) { + MessageDigest md = MessageDigests.sha256(); + md.update(CharArrays.toUtf8Bytes(text.getChars())); + char[] salt = SaltProvider.salt(8); + md.update(CharArrays.toUtf8Bytes(salt)); + String hash = Base64.getEncoder().encodeToString(md.digest()); + char[] result = new char[SSHA256_PREFIX.length() + salt.length + hash.length()]; + System.arraycopy(SSHA256_PREFIX.toCharArray(), 0, result, 0, SSHA256_PREFIX.length()); + System.arraycopy(salt, 0, result, SSHA256_PREFIX.length(), salt.length); + System.arraycopy(hash.toCharArray(), 0, result, SSHA256_PREFIX.length() + salt.length, hash.length()); + return result; + } + + @Override + public boolean verify(SecureString text, char[] hash) { + String hashStr = new String(hash); + if (!hashStr.startsWith(SSHA256_PREFIX)) { + return false; + } + hashStr = hashStr.substring(SSHA256_PREFIX.length()); + char[] saltAndHash = hashStr.toCharArray(); + MessageDigest md = MessageDigests.sha256(); + md.update(CharArrays.toUtf8Bytes(text.getChars())); + md.update(new String(saltAndHash, 0, 8).getBytes(StandardCharsets.UTF_8)); + String computedHash = Base64.getEncoder().encodeToString(md.digest()); + return CharArrays.constantTimeEquals(computedHash, new String(saltAndHash, 8, saltAndHash.length - 8)); + } + }, + + NOOP() { + @Override + public char[] hash(SecureString text) { + return text.clone().getChars(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + return CharArrays.constantTimeEquals(text.getChars(), hash); + } + }; + + private static final String BCRYPT_PREFIX = "$2a$"; + private static final String SHA1_PREFIX = "{SHA}"; + private static final String MD5_PREFIX = "{MD5}"; + private static final String SSHA256_PREFIX = "{SSHA256}"; + + public static Hasher resolve(String name, Hasher defaultHasher) { + if (name == null) { + return defaultHasher; + } + switch (name.toLowerCase(Locale.ROOT)) { + case "bcrypt": + return BCRYPT; + case "bcrypt4": + return BCRYPT4; + case "bcrypt5": + return BCRYPT5; + case "bcrypt6": + return BCRYPT6; + case "bcrypt7": + return BCRYPT7; + case "bcrypt8": + return BCRYPT8; + case "bcrypt9": + return BCRYPT9; + case "sha1": + return SHA1; + case "md5": + return MD5; + case "ssha256": + return SSHA256; + case "noop": + case "clear_text": + return NOOP; + default: + return defaultHasher; + } + } + + public static Hasher resolve(String name) { + Hasher hasher = resolve(name, null); + if (hasher == null) { + throw new IllegalArgumentException("unknown hash function [" + name + "]"); + } + return hasher; + } + + public abstract char[] hash(SecureString data); + + public abstract boolean verify(SecureString data, char[] hash); + + static final class SaltProvider { + + static final char[] ALPHABET = new char[]{ + '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', + 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', + 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', + 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' + }; + + public static char[] salt(int length) { + Random random = Randomness.get(); + char[] salt = new char[length]; + for (int i = 0; i < length; i++) { + salt[i] = ALPHABET[(random.nextInt(ALPHABET.length))]; + } + return salt; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UsernamePasswordToken.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UsernamePasswordToken.java new file mode 100644 index 0000000000000..4fdf32608dd6a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/UsernamePasswordToken.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; + +import java.nio.CharBuffer; +import java.util.Arrays; +import java.util.Base64; +import java.util.Objects; + +import static org.elasticsearch.xpack.core.security.support.Exceptions.authenticationError; + +public class UsernamePasswordToken implements AuthenticationToken { + + public static final String BASIC_AUTH_PREFIX = "Basic "; + public static final String BASIC_AUTH_HEADER = "Authorization"; + private final String username; + private final SecureString password; + + public UsernamePasswordToken(String username, SecureString password) { + this.username = username; + this.password = password; + } + + public static String basicAuthHeaderValue(String username, SecureString passwd) { + CharBuffer chars = CharBuffer.allocate(username.length() + passwd.length() + 1); + byte[] charBytes = null; + try { + chars.put(username).put(':').put(passwd.getChars()); + charBytes = CharArrays.toUtf8Bytes(chars.array()); + + //TODO we still have passwords in Strings in headers. Maybe we can look into using a CharSequence? + String basicToken = Base64.getEncoder().encodeToString(charBytes); + return "Basic " + basicToken; + } finally { + Arrays.fill(chars.array(), (char) 0); + if (charBytes != null) { + Arrays.fill(charBytes, (byte) 0); + } + } + } + + @Override + public String principal() { + return username; + } + + @Override + public SecureString credentials() { + return password; + } + + @Override + public void clearCredentials() { + password.close(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + UsernamePasswordToken that = (UsernamePasswordToken) o; + + return Objects.equals(password, that.password) && + Objects.equals(username, that.username); + } + + @Override + public int hashCode() { + return Objects.hash(username, password.hashCode()); + } + + public static UsernamePasswordToken extractToken(ThreadContext context) { + String authStr = context.getHeader(BASIC_AUTH_HEADER); + if (authStr == null) { + return null; + } + + return extractToken(authStr); + } + + private static UsernamePasswordToken extractToken(String headerValue) { + if (headerValue.startsWith(BASIC_AUTH_PREFIX) == false) { + // the header does not start with 'Basic ' so we cannot use it, but it may be valid for another realm + return null; + } + + // if there is nothing after the prefix, the header is bad + if (headerValue.length() == BASIC_AUTH_PREFIX.length()) { + throw authenticationError("invalid basic authentication header value"); + } + + char[] userpasswd; + try { + userpasswd = CharArrays.utf8BytesToChars(Base64.getDecoder().decode(headerValue.substring(BASIC_AUTH_PREFIX.length()).trim())); + } catch (IllegalArgumentException e) { + throw authenticationError("invalid basic authentication header encoding", e); + } + + int i = CharArrays.indexOf(userpasswd, ':'); + if (i < 0) { + throw authenticationError("invalid basic authentication header value"); + } + + return new UsernamePasswordToken( + new String(Arrays.copyOfRange(userpasswd, 0, i)), + new SecureString(Arrays.copyOfRange(userpasswd, i + 1, userpasswd.length))); + } + + public static void putTokenHeader(ThreadContext context, UsernamePasswordToken token) { + context.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue(token.username, token.password)); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/CompositeRoleMapperSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/CompositeRoleMapperSettings.java new file mode 100644 index 0000000000000..54fa0c2ffbcd9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/CompositeRoleMapperSettings.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support.mapper; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; + +import java.util.Collection; + +public final class CompositeRoleMapperSettings { + private CompositeRoleMapperSettings() {} + + public static Collection> getSettings() { + return DnRoleMapperSettings.getSettings(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java new file mode 100644 index 0000000000000..8e3f8e5593df1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java @@ -0,0 +1,249 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support.mapper; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionParser; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; + +/** + * A representation of a single role-mapping for use in NativeRoleMappingStore. + * Logically, this represents a set of roles that should be applied to any user where a boolean + * expression evaluates to true. + * + * @see RoleMapperExpression + * @see ExpressionParser + */ +public class ExpressionRoleMapping implements ToXContentObject, Writeable { + + private static final ObjectParser PARSER = new ObjectParser<>("role-mapping", Builder::new); + + /** + * The Upgrade API added a 'type' field when converting from 5 to 6. + * We don't use it, but we need to skip it if it exists. + */ + private static final String UPGRADE_API_TYPE_FIELD = "type"; + + static { + PARSER.declareStringArray(Builder::roles, Fields.ROLES); + PARSER.declareField(Builder::rules, ExpressionParser::parseObject, Fields.RULES, ObjectParser.ValueType.OBJECT); + PARSER.declareField(Builder::metadata, XContentParser::map, Fields.METADATA, ObjectParser.ValueType.OBJECT); + PARSER.declareBoolean(Builder::enabled, Fields.ENABLED); + BiConsumer ignored = (b, v) -> { + }; + // skip the doc_type and type fields in case we're parsing directly from the index + PARSER.declareString(ignored, new ParseField(NativeRoleMappingStoreField.DOC_TYPE_FIELD)); + PARSER.declareString(ignored, new ParseField(UPGRADE_API_TYPE_FIELD)); + } + + private final String name; + private final RoleMapperExpression expression; + private final List roles; + private final Map metadata; + private final boolean enabled; + + public ExpressionRoleMapping(String name, RoleMapperExpression expr, List roles, Map metadata, + boolean enabled) { + this.name = name; + this.expression = expr; + this.roles = roles; + this.metadata = metadata; + this.enabled = enabled; + } + + public ExpressionRoleMapping(StreamInput in) throws IOException { + this.name = in.readString(); + this.enabled = in.readBoolean(); + this.roles = in.readList(StreamInput::readString); + this.expression = ExpressionParser.readExpression(in); + this.metadata = in.readMap(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeBoolean(enabled); + out.writeStringList(roles); + ExpressionParser.writeExpression(expression, out); + out.writeMap(metadata); + } + + /** + * The name of this mapping. The name exists for the sole purpose of providing a meaningful identifier for each mapping, so that it may + * be referred to for update, retrieval or deletion. The name does not affect the set of roles that a mapping provides. + */ + public String getName() { + return name; + } + + /** + * The expression that determines whether the roles in this mapping should be applied to any given user. + * If the expression + * {@link RoleMapperExpression#match(org.elasticsearch.xpack.security.authc.support.mapper.expressiondsl.ExpressionModel) matches} a + * org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData user, then the user should be assigned this mapping's + * {@link #getRoles() roles} + */ + public RoleMapperExpression getExpression() { + return expression; + } + + /** + * The list of {@link RoleDescriptor roles} (specified by name) that should be assigned to users + * that match the {@link #getExpression() expression} in this mapping. + */ + public List getRoles() { + return Collections.unmodifiableList(roles); + } + + /** + * Meta-data for this mapping. This exists for external systems of user to track information about this mapping such as where it was + * sourced from, when it was loaded, etc. + * This is not used within the mapping process, and does not affect whether the expression matches, nor which roles are assigned. + */ + public Map getMetadata() { + return Collections.unmodifiableMap(metadata); + } + + /** + * Whether this mapping is enabled. Mappings that are not enabled are not applied to users. + */ + public boolean isEnabled() { + return enabled; + } + + @Override + public String toString() { + return getClass().getSimpleName() + "<" + name + " ; " + roles + " = " + Strings.toString(expression) + ">"; + } + + /** + * Parse an {@link ExpressionRoleMapping} from the provided XContent + */ + public static ExpressionRoleMapping parse(String name, BytesReference source, XContentType xContentType) throws IOException { + final NamedXContentRegistry registry = NamedXContentRegistry.EMPTY; + try (InputStream stream = source.streamInput(); + XContentParser parser = xContentType.xContent() + .createParser(registry, LoggingDeprecationHandler.INSTANCE, stream)) { + return parse(name, parser); + } + } + + /** + * Parse an {@link ExpressionRoleMapping} from the provided XContent + */ + public static ExpressionRoleMapping parse(String name, XContentParser parser) throws IOException { + try { + final Builder builder = PARSER.parse(parser, null); + return builder.build(name); + } catch (IllegalArgumentException | IllegalStateException e) { + throw new ParsingException(parser.getTokenLocation(), e.getMessage(), e); + } + } + + /** + * Converts this {@link ExpressionRoleMapping} into XContent that is compatible with + * the format handled by {@link #parse(String, XContentParser)}. + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return toXContent(builder, params, false); + } + + public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean includeDocType) throws IOException { + builder.startObject(); + builder.field(Fields.ENABLED.getPreferredName(), enabled); + builder.startArray(Fields.ROLES.getPreferredName()); + for (String r : roles) { + builder.value(r); + } + builder.endArray(); + builder.field(Fields.RULES.getPreferredName()); + expression.toXContent(builder, params); + + builder.field(Fields.METADATA.getPreferredName(), metadata); + + if (includeDocType) { + builder.field(NativeRoleMappingStoreField.DOC_TYPE_FIELD, NativeRoleMappingStoreField.DOC_TYPE_ROLE_MAPPING); + } + return builder.endObject(); + } + + /** + * Used to facilitate the use of {@link ObjectParser} (via {@link #PARSER}). + */ + private static class Builder { + private RoleMapperExpression rules; + private List roles; + private Map metadata = Collections.emptyMap(); + private Boolean enabled; + + Builder rules(RoleMapperExpression expression) { + this.rules = expression; + return this; + } + + Builder roles(List roles) { + this.roles = roles; + return this; + } + + Builder metadata(Map metadata) { + this.metadata = metadata; + return this; + } + + Builder enabled(boolean enabled) { + this.enabled = enabled; + return this; + } + + private ExpressionRoleMapping build(String name) { + if (roles == null) { + throw missingField(name, Fields.ROLES); + } + if (rules == null) { + throw missingField(name, Fields.RULES); + } + if (enabled == null) { + throw missingField(name, Fields.ENABLED); + } + return new ExpressionRoleMapping(name, rules, roles, metadata, enabled); + } + + private IllegalStateException missingField(String id, ParseField field) { + return new IllegalStateException("failed to parse role-mapping [" + id + "]. missing field [" + field + "]"); + } + + } + + public interface Fields { + ParseField ROLES = new ParseField("roles"); + ParseField ENABLED = new ParseField("enabled"); + ParseField RULES = new ParseField("rules"); + ParseField METADATA = new ParseField("metadata"); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/NativeRoleMappingStoreField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/NativeRoleMappingStoreField.java new file mode 100644 index 0000000000000..28cb3d4fbbe66 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/NativeRoleMappingStoreField.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support.mapper; + +public final class NativeRoleMappingStoreField { + + public static final String DOC_TYPE_FIELD = "doc_type"; + public static final String DOC_TYPE_ROLE_MAPPING = "role-mapping"; + public static final String ID_PREFIX = DOC_TYPE_ROLE_MAPPING + "_"; + public static final String SECURITY_GENERIC_TYPE = "doc"; + + private NativeRoleMappingStoreField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AllExpression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AllExpression.java new file mode 100644 index 0000000000000..d7a4781903b44 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AllExpression.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; + +/** + * An expression that evaluates to true if-and-only-if all its children + * evaluate to true. + * An all expression with no children is always true. + */ +public final class AllExpression implements RoleMapperExpression { + + public static final String NAME = "all"; + + private final List elements; + + AllExpression(List elements) { + assert elements != null; + this.elements = elements; + } + + public AllExpression(StreamInput in) throws IOException { + this(ExpressionParser.readExpressionList(in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + ExpressionParser.writeExpressionList(elements, out); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean match(ExpressionModel model) { + return elements.stream().allMatch(RoleMapperExpression.predicate(model)); + } + + public List getElements() { + return Collections.unmodifiableList(elements); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + final AllExpression that = (AllExpression) o; + return this.elements.equals(that.elements); + } + + @Override + public int hashCode() { + return elements.hashCode(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray(ExpressionParser.Fields.ALL.getPreferredName()); + for (RoleMapperExpression e : elements) { + e.toXContent(builder, params); + } + builder.endArray(); + return builder.endObject(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AnyExpression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AnyExpression.java new file mode 100644 index 0000000000000..7f609e73daf8b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AnyExpression.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; + +/** + * An expression that evaluates to true if at least one of its children + * evaluate to true. + * An any expression with no children is never true. + */ +public final class AnyExpression implements RoleMapperExpression { + + public static final String NAME = "any"; + + private final List elements; + + AnyExpression(List elements) { + assert elements != null; + this.elements = elements; + } + + public AnyExpression(StreamInput in) throws IOException { + this(ExpressionParser.readExpressionList(in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + ExpressionParser.writeExpressionList(elements, out); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean match(ExpressionModel model) { + return elements.stream().anyMatch(RoleMapperExpression.predicate(model)); + } + + public List getElements() { + return Collections.unmodifiableList(elements); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + final AnyExpression that = (AnyExpression) o; + return this.elements.equals(that.elements); + } + + @Override + public int hashCode() { + return elements.hashCode(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray(ExpressionParser.Fields.ANY.getPreferredName()); + for (RoleMapperExpression e : elements) { + e.toXContent(builder, params); + } + builder.endArray(); + return builder.endObject(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExceptExpression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExceptExpression.java new file mode 100644 index 0000000000000..b85a59ee70802 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExceptExpression.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl; + +import java.io.IOException; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; + +/** + * A negating expression. That is, this expression evaluates to true if-and-only-if + * its delegate expression evaluate to false. + * Syntactically, except expressions are intended to be children of all + * expressions ({@link AllExpression}). + */ +public final class ExceptExpression implements RoleMapperExpression { + + public static final String NAME = "except"; + + private final RoleMapperExpression expression; + + ExceptExpression(RoleMapperExpression expression) { + assert expression != null; + this.expression = expression; + } + + public ExceptExpression(StreamInput in) throws IOException { + this(ExpressionParser.readExpression(in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + ExpressionParser.writeExpression(expression, out); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean match(ExpressionModel model) { + return !expression.match(model); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + final ExceptExpression that = (ExceptExpression) o; + return this.expression.equals(that.expression); + } + + @Override + public int hashCode() { + return expression.hashCode(); + } + + RoleMapperExpression getInnerExpression() { + return expression; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ExpressionParser.Fields.EXCEPT.getPreferredName()); + expression.toXContent(builder, params); + return builder.endObject(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java new file mode 100644 index 0000000000000..8d43f864878af --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl; + +import org.elasticsearch.common.Numbers; +import org.elasticsearch.common.collect.Tuple; + +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Predicate; + +/** + * Represents the "model" object to be evaluated within a {@link RoleMapperExpression}. + * The model is a flat object, where fields are defined by strings and value is either a + * string, boolean, or number, or a collection of the above. + */ +public class ExpressionModel { + + public static final Predicate NULL_PREDICATE = field -> field.getValue() == null; + private Map>> fields; + + public ExpressionModel() { + this.fields = new HashMap<>(); + } + + /** + * Defines a field using a predicate that corresponds to the type of {@code value} + * + * @see #buildPredicate(Object) + */ + public ExpressionModel defineField(String name, Object value) { + return defineField(name, value, buildPredicate(value)); + } + + /** + * Defines a field using a supplied predicate. + */ + public ExpressionModel defineField(String name, Object value, Predicate predicate) { + this.fields.put(name, new Tuple<>(value, predicate)); + return this; + } + + /** + * Returns {@code true} if the named field, matches any of the provided values. + */ + public boolean test(String field, List values) { + final Tuple> tuple = this.fields.get(field); + final Predicate predicate; + if (tuple == null) { + predicate = NULL_PREDICATE; + } else { + predicate = tuple.v2(); + } + return values.stream().anyMatch(predicate); + } + + /** + * Constructs a {@link Predicate} that matches correctly based on the type of the provided parameter. + */ + static Predicate buildPredicate(Object object) { + if (object == null) { + return NULL_PREDICATE; + } + if (object instanceof Boolean) { + return field -> object.equals(field.getValue()); + } + if (object instanceof Number) { + return field -> numberEquals((Number) object, field.getValue()); + } + if (object instanceof String) { + return field -> field.getAutomaton() == null ? object.equals(field.getValue()) : field.getAutomaton().run((String) object); + } + if (object instanceof Collection) { + return ((Collection) object).stream() + .map(element -> buildPredicate(element)) + .reduce((a, b) -> a.or(b)) + .orElse(fieldValue -> false); + } + throw new IllegalArgumentException("Unsupported value type " + object.getClass()); + } + + /** + * A comparison of {@link Number} objects that compares by floating point when either value is a {@link Float} or {@link Double} + * otherwise compares by {@link Numbers#toLongExact long}. + */ + private static boolean numberEquals(Number left, Object other) { + if (left.equals(other)) { + return true; + } + if ((other instanceof Number) == false) { + return false; + } + Number right = (Number) other; + if (left instanceof Double || left instanceof Float + || right instanceof Double || right instanceof Float) { + return Double.compare(left.doubleValue(), right.doubleValue()) == 0; + } + return Numbers.toLongExact(left) == Numbers.toLongExact(right); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java new file mode 100644 index 0000000000000..d2392b3d172aa --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * Parses the JSON (XContent) based boolean expression DSL into a tree of {@link RoleMapperExpression} objects. + */ +public final class ExpressionParser { + + public static RoleMapperExpression readExpression(StreamInput in) throws IOException { + return in.readNamedWriteable(RoleMapperExpression.class); + } + + public static void writeExpression(RoleMapperExpression expression, StreamOutput out) throws IOException { + out.writeNamedWriteable(expression); + } + + static List readExpressionList(StreamInput in) throws IOException { + return in.readNamedWriteableList(RoleMapperExpression.class); + } + + static void writeExpressionList(List list, StreamOutput out) throws IOException { + out.writeNamedWriteableList(list); + } + + /** + * This function exists to be compatible with + * {@link org.elasticsearch.common.xcontent.ContextParser#parse(XContentParser, Object)} + */ + public static RoleMapperExpression parseObject(XContentParser parser, String id) throws IOException { + return new ExpressionParser().parse(id, parser); + } + + /** + * @param name The name of the expression tree within its containing object. Used to provide + * descriptive error messages. + * @param content The XContent (typically JSON) DSL representation of the expression + */ + public RoleMapperExpression parse(String name, XContentSource content) throws IOException { + try (InputStream stream = content.getBytes().streamInput()) { + return parse(name, content.parser(NamedXContentRegistry.EMPTY, stream)); + } + } + + /** + * @param name The name of the expression tree within its containing object. Used to provide + * descriptive error messages. + * @param parser A parser over the XContent (typically JSON) DSL representation of the + * expression + */ + public RoleMapperExpression parse(String name, XContentParser parser) throws IOException { + return parseRulesObject(name, parser, false); + } + + private RoleMapperExpression parseRulesObject(String objectName, XContentParser parser, + boolean allowExcept) throws IOException { + // find the start of the DSL object + XContentParser.Token token; + if (parser.currentToken() == null) { + token = parser.nextToken(); + } else { + token = parser.currentToken(); + } + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse rules expression. expected [{}] to be an object but found [{}] instead", + objectName, token); + } + + final String fieldName = readFieldName(objectName, parser); + final RoleMapperExpression expr = parseExpression(parser, fieldName, allowExcept, objectName); + if (parser.nextToken() != XContentParser.Token.END_OBJECT) { + throw new ElasticsearchParseException("failed to parse rules expression. object [{}] contains multiple fields", objectName); + } + return expr; + } + + private RoleMapperExpression parseExpression(XContentParser parser, String field, boolean allowExcept, String objectName) + throws IOException { + + if (Fields.ANY.match(field, parser.getDeprecationHandler())) { + return new AnyExpression(parseExpressionArray(Fields.ANY, parser, false)); + } else if (Fields.ALL.match(field, parser.getDeprecationHandler())) { + return new AllExpression(parseExpressionArray(Fields.ALL, parser, true)); + } else if (Fields.FIELD.match(field, parser.getDeprecationHandler())) { + return parseFieldExpression(parser); + } else if (Fields.EXCEPT.match(field, parser.getDeprecationHandler())) { + if (allowExcept) { + return parseExceptExpression(parser); + } else { + throw new ElasticsearchParseException("failed to parse rules expression. field [{}] is not allowed within [{}]", + field, objectName); + } + } else { + throw new ElasticsearchParseException("failed to parse rules expression. field [{}] is not recognised in object [{}]", + field, objectName); + } + } + + private RoleMapperExpression parseFieldExpression(XContentParser parser) throws IOException { + checkStartObject(parser); + final String fieldName = readFieldName(Fields.FIELD.getPreferredName(), parser); + final List values; + if (parser.nextToken() == XContentParser.Token.START_ARRAY) { + values = parseArray(Fields.FIELD, parser, this::parseFieldValue); + } else { + values = Collections.singletonList(parseFieldValue(parser)); + } + if (parser.nextToken() != XContentParser.Token.END_OBJECT) { + throw new ElasticsearchParseException("failed to parse rules expression. object [{}] contains multiple fields", + Fields.FIELD.getPreferredName()); + } + return new FieldExpression(fieldName, values); + } + + private RoleMapperExpression parseExceptExpression(XContentParser parser) throws IOException { + checkStartObject(parser); + return new ExceptExpression(parseRulesObject(Fields.EXCEPT.getPreferredName(), parser, false)); + } + + private void checkStartObject(XContentParser parser) throws IOException { + final XContentParser.Token token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse rules expression. expected an object but found [{}] instead", token); + } + } + + private String readFieldName(String objectName, XContentParser parser) throws IOException { + if (parser.nextToken() != XContentParser.Token.FIELD_NAME) { + throw new ElasticsearchParseException("failed to parse rules expression. object [{}] does not contain any fields", objectName); + } + return parser.currentName(); + } + + private List parseExpressionArray(ParseField field, XContentParser parser, boolean allowExcept) + throws IOException { + parser.nextToken(); // parseArray requires that the parser is positioned at the START_ARRAY token + return parseArray(field, parser, p -> parseRulesObject(field.getPreferredName(), p, allowExcept)); + } + + private List parseArray(ParseField field, XContentParser parser, CheckedFunction elementParser) + throws IOException { + final XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.START_ARRAY) { + List list = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + list.add(elementParser.apply(parser)); + } + return list; + } else { + throw new ElasticsearchParseException("failed to parse rules expression. field [{}] requires an array", field); + } + } + + private FieldExpression.FieldValue parseFieldValue(XContentParser parser) throws IOException { + switch (parser.currentToken()) { + case VALUE_STRING: + return new FieldExpression.FieldValue(parser.text()); + + case VALUE_BOOLEAN: + return new FieldExpression.FieldValue(parser.booleanValue()); + + case VALUE_NUMBER: + return new FieldExpression.FieldValue(parser.longValue()); + + case VALUE_NULL: + return new FieldExpression.FieldValue(null); + + default: + throw new ElasticsearchParseException("failed to parse rules expression. expected a field value but found [{}] instead", + parser.currentToken()); + } + } + + public interface Fields { + ParseField ANY = new ParseField("any"); + ParseField ALL = new ParseField("all"); + ParseField EXCEPT = new ParseField("except"); + ParseField FIELD = new ParseField("field"); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java new file mode 100644 index 0000000000000..0e681b110efa4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl; + +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.support.Automatons; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * An expression that evaluates to true if a field (map element) matches + * the provided values. A field expression may have more than one provided value, in which + * case the expression is true if any of the values are matched. + */ +public final class FieldExpression implements RoleMapperExpression { + + public static final String NAME = "field"; + + private final String field; + private final List values; + + public FieldExpression(String field, List values) { + if (field == null || field.isEmpty()) { + throw new IllegalArgumentException("null or empty field name (" + field + ")"); + } + if (values == null || values.isEmpty()) { + throw new IllegalArgumentException("null or empty values (" + values + ")"); + } + this.field = field; + this.values = values; + } + + public FieldExpression(StreamInput in) throws IOException { + this(in.readString(), in.readList(FieldValue::readFrom)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(field); + out.writeList(values); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean match(ExpressionModel model) { + return model.test(field, values); + } + + public String getField() { + return field; + } + + public List getValues() { + return Collections.unmodifiableList(values); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + final FieldExpression that = (FieldExpression) o; + + return this.field.equals(that.field) && this.values.equals(that.values); + } + + @Override + public int hashCode() { + int result = field.hashCode(); + result = 31 * result + values.hashCode(); + return result; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(ExpressionParser.Fields.FIELD.getPreferredName()); + if (this.values.size() == 1) { + builder.field(this.field); + values.get(0).toXContent(builder, params); + } else { + builder.startArray(this.field); + for (FieldValue fp : values) { + fp.toXContent(builder, params); + } + builder.endArray(); + } + builder.endObject(); + return builder.endObject(); + } + + public static class FieldValue implements ToXContent, Writeable { + private final Object value; + @Nullable + private final CharacterRunAutomaton automaton; + + public FieldValue(Object value) { + this.value = value; + this.automaton = buildAutomaton(value); + } + + private static CharacterRunAutomaton buildAutomaton(Object value) { + if (value instanceof String) { + final String str = (String) value; + if (Regex.isSimpleMatchPattern(str) || isLuceneRegex(str)) { + return new CharacterRunAutomaton(Automatons.patterns(str)); + } + } + return null; + } + + private static boolean isLuceneRegex(String str) { + return str.length() > 1 && str.charAt(0) == '/' && str.charAt(str.length() - 1) == '/'; + } + + public Object getValue() { + return value; + } + + public CharacterRunAutomaton getAutomaton() { + return automaton; + } + + public static FieldValue readFrom(StreamInput in) throws IOException { + return new FieldValue(in.readGenericValue()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeGenericValue(value); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(value); + } + + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/RoleMapperExpression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/RoleMapperExpression.java new file mode 100644 index 0000000000000..89b80d9296eb3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/RoleMapperExpression.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl; + +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.xcontent.ToXContentObject; + +import java.util.function.Predicate; + +/** + * Implementations of this interface represent an expression over a simple object that resolves to + * a boolean value. The "simple object" is provided as a {@link ExpressionModel}. + */ +public interface RoleMapperExpression extends ToXContentObject, NamedWriteable { + + /** + * Determines whether this expression matches against the provided object. + * @param model + */ + boolean match(ExpressionModel model); + + /** + * Adapt this expression to a standard {@link Predicate} + */ + default Predicate asPredicate() { + return this::match; + } + + /** + * Creates an inverted predicate that can test whether an expression matches + * a fixed object. Its purpose is for cases where there is a {@link java.util.stream.Stream} of + * expressions, that need to be filtered against a single map. + */ + static Predicate predicate(ExpressionModel map) { + return expr -> expr.match(map); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/AuthorizationServiceField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/AuthorizationServiceField.java new file mode 100644 index 0000000000000..3dd3dff87143f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/AuthorizationServiceField.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz; + +public final class AuthorizationServiceField { + public static final String INDICES_PERMISSIONS_KEY = "_indices_permissions"; + + private AuthorizationServiceField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/IndicesAndAliasesResolverField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/IndicesAndAliasesResolverField.java new file mode 100644 index 0000000000000..932febda1808f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/IndicesAndAliasesResolverField.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz; + +public final class IndicesAndAliasesResolverField { + //placeholder used in the security plugin to indicate that the request is authorized knowing that it will yield an empty response + public static final String NO_INDEX_PLACEHOLDER = "-*"; + + private IndicesAndAliasesResolverField() {} + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java new file mode 100644 index 0000000000000..2e03cbb24a320 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -0,0 +1,715 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; +import org.elasticsearch.xpack.core.security.support.Validation; +import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * A holder for a Role that contains user-readable information about the Role + * without containing the actual Role object. + */ +public class RoleDescriptor implements ToXContentObject { + + public static final String ROLE_TYPE = "role"; + + private final String name; + private final String[] clusterPrivileges; + private final IndicesPrivileges[] indicesPrivileges; + private final String[] runAs; + private final Map metadata; + private final Map transientMetadata; + + public RoleDescriptor(String name, + @Nullable String[] clusterPrivileges, + @Nullable IndicesPrivileges[] indicesPrivileges, + @Nullable String[] runAs) { + this(name, clusterPrivileges, indicesPrivileges, runAs, null); + } + + public RoleDescriptor(String name, + @Nullable String[] clusterPrivileges, + @Nullable IndicesPrivileges[] indicesPrivileges, + @Nullable String[] runAs, + @Nullable Map metadata) { + this(name, clusterPrivileges, indicesPrivileges, runAs, metadata, null); + } + + + public RoleDescriptor(String name, + @Nullable String[] clusterPrivileges, + @Nullable IndicesPrivileges[] indicesPrivileges, + @Nullable String[] runAs, + @Nullable Map metadata, + @Nullable Map transientMetadata) { + this.name = name; + this.clusterPrivileges = clusterPrivileges != null ? clusterPrivileges : Strings.EMPTY_ARRAY; + this.indicesPrivileges = indicesPrivileges != null ? indicesPrivileges : IndicesPrivileges.NONE; + this.runAs = runAs != null ? runAs : Strings.EMPTY_ARRAY; + this.metadata = metadata != null ? Collections.unmodifiableMap(metadata) : Collections.emptyMap(); + this.transientMetadata = transientMetadata != null ? Collections.unmodifiableMap(transientMetadata) : + Collections.singletonMap("enabled", true); + } + + public String getName() { + return this.name; + } + + public String[] getClusterPrivileges() { + return this.clusterPrivileges; + } + + public IndicesPrivileges[] getIndicesPrivileges() { + return this.indicesPrivileges; + } + + public String[] getRunAs() { + return this.runAs; + } + + public Map getMetadata() { + return metadata; + } + + public Map getTransientMetadata() { + return transientMetadata; + } + + public boolean isUsingDocumentOrFieldLevelSecurity() { + return Arrays.stream(indicesPrivileges).anyMatch(ip -> ip.isUsingDocumentLevelSecurity() || ip.isUsingFieldLevelSecurity()); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("Role["); + sb.append("name=").append(name); + sb.append(", cluster=[").append(Strings.arrayToCommaDelimitedString(clusterPrivileges)); + sb.append("], indicesPrivileges=["); + for (IndicesPrivileges group : indicesPrivileges) { + sb.append(group.toString()).append(","); + } + sb.append("], runAs=[").append(Strings.arrayToCommaDelimitedString(runAs)); + sb.append("], metadata=["); + MetadataUtils.writeValue(sb, metadata); + sb.append("]]"); + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RoleDescriptor that = (RoleDescriptor) o; + + if (!name.equals(that.name)) return false; + if (!Arrays.equals(clusterPrivileges, that.clusterPrivileges)) return false; + if (!Arrays.equals(indicesPrivileges, that.indicesPrivileges)) return false; + if (!metadata.equals(that.getMetadata())) return false; + return Arrays.equals(runAs, that.runAs); + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + Arrays.hashCode(clusterPrivileges); + result = 31 * result + Arrays.hashCode(indicesPrivileges); + result = 31 * result + Arrays.hashCode(runAs); + result = 31 * result + metadata.hashCode(); + return result; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return toXContent(builder, params, false); + } + + /** + * Generates x-content for this {@link RoleDescriptor} instance. + * + * @param builder the x-content builder + * @param params the parameters for x-content generation directives + * @param docCreation {@code true} if the x-content is being generated for creating a document + * in the security index, {@code false} if the x-content being generated + * is for API display purposes + * @return x-content builder + * @throws IOException if there was an error writing the x-content to the builder + */ + public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean docCreation) throws IOException { + builder.startObject(); + builder.array(Fields.CLUSTER.getPreferredName(), clusterPrivileges); + builder.array(Fields.INDICES.getPreferredName(), (Object[]) indicesPrivileges); + if (runAs != null) { + builder.array(Fields.RUN_AS.getPreferredName(), runAs); + } + builder.field(Fields.METADATA.getPreferredName(), metadata); + if (docCreation) { + builder.field(Fields.TYPE.getPreferredName(), ROLE_TYPE); + } else { + builder.field(Fields.TRANSIENT_METADATA.getPreferredName(), transientMetadata); + } + return builder.endObject(); + } + + public static RoleDescriptor readFrom(StreamInput in) throws IOException { + String name = in.readString(); + String[] clusterPrivileges = in.readStringArray(); + int size = in.readVInt(); + IndicesPrivileges[] indicesPrivileges = new IndicesPrivileges[size]; + for (int i = 0; i < size; i++) { + indicesPrivileges[i] = IndicesPrivileges.createFrom(in); + } + String[] runAs = in.readStringArray(); + Map metadata = in.readMap(); + + final Map transientMetadata; + if (in.getVersion().onOrAfter(Version.V_5_2_0)) { + transientMetadata = in.readMap(); + } else { + transientMetadata = Collections.emptyMap(); + } + return new RoleDescriptor(name, clusterPrivileges, indicesPrivileges, runAs, metadata, transientMetadata); + } + + public static void writeTo(RoleDescriptor descriptor, StreamOutput out) throws IOException { + out.writeString(descriptor.name); + out.writeStringArray(descriptor.clusterPrivileges); + out.writeVInt(descriptor.indicesPrivileges.length); + for (IndicesPrivileges group : descriptor.indicesPrivileges) { + group.writeTo(out); + } + out.writeStringArray(descriptor.runAs); + out.writeMap(descriptor.metadata); + if (out.getVersion().onOrAfter(Version.V_5_2_0)) { + out.writeMap(descriptor.transientMetadata); + } + } + + public static RoleDescriptor parse(String name, BytesReference source, boolean allow2xFormat, XContentType xContentType) + throws IOException { + assert name != null; + // EMPTY is safe here because we never use namedObject + try (InputStream stream = source.streamInput(); + XContentParser parser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + return parse(name, parser, allow2xFormat); + } + } + + public static RoleDescriptor parse(String name, XContentParser parser, boolean allow2xFormat) throws IOException { + // validate name + Validation.Error validationError = Validation.Roles.validateRoleName(name, true); + if (validationError != null) { + ValidationException ve = new ValidationException(); + ve.addValidationError(validationError.toString()); + throw ve; + } + + // advance to the START_OBJECT token if needed + XContentParser.Token token = parser.currentToken() == null ? parser.nextToken() : parser.currentToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse role [{}]. expected an object but found [{}] instead", name, token); + } + String currentFieldName = null; + IndicesPrivileges[] indicesPrivileges = null; + String[] clusterPrivileges = null; + String[] runAsUsers = null; + Map metadata = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Fields.INDEX.match(currentFieldName, parser.getDeprecationHandler()) + || Fields.INDICES.match(currentFieldName, parser.getDeprecationHandler())) { + indicesPrivileges = parseIndices(name, parser, allow2xFormat); + } else if (Fields.RUN_AS.match(currentFieldName, parser.getDeprecationHandler())) { + runAsUsers = readStringArray(name, parser, true); + } else if (Fields.CLUSTER.match(currentFieldName, parser.getDeprecationHandler())) { + clusterPrivileges = readStringArray(name, parser, true); + } else if (Fields.METADATA.match(currentFieldName, parser.getDeprecationHandler())) { + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException( + "expected field [{}] to be of type object, but found [{}] instead", currentFieldName, token); + } + metadata = parser.map(); + } else if (Fields.TRANSIENT_METADATA.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.START_OBJECT) { + // consume object but just drop + parser.map(); + } else { + throw new ElasticsearchParseException("expected field [{}] to be an object, but found [{}] instead", + currentFieldName, token); + } + } else if (Fields.TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + // don't need it + } else { + throw new ElasticsearchParseException("failed to parse role [{}]. unexpected field [{}]", name, currentFieldName); + } + } + return new RoleDescriptor(name, clusterPrivileges, indicesPrivileges, runAsUsers, metadata); + } + + private static String[] readStringArray(String roleName, XContentParser parser, boolean allowNull) throws IOException { + try { + return XContentUtils.readStringArray(parser, allowNull); + } catch (ElasticsearchParseException e) { + // re-wrap in order to add the role name + throw new ElasticsearchParseException("failed to parse role [{}]", e, roleName); + } + } + + public static RoleDescriptor parsePrivilegesCheck(String description, BytesReference source, XContentType xContentType) + throws IOException { + try (InputStream stream = source.streamInput(); + XContentParser parser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + // advance to the START_OBJECT token + XContentParser.Token token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse privileges check [{}]. expected an object but found [{}] instead", + description, token); + } + String currentFieldName = null; + IndicesPrivileges[] indexPrivileges = null; + String[] clusterPrivileges = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Fields.INDEX.match(currentFieldName, parser.getDeprecationHandler())) { + indexPrivileges = parseIndices(description, parser, false); + } else if (Fields.CLUSTER.match(currentFieldName, parser.getDeprecationHandler())) { + clusterPrivileges = readStringArray(description, parser, true); + } else { + throw new ElasticsearchParseException("failed to parse privileges check [{}]. unexpected field [{}]", + description, currentFieldName); + } + } + if (indexPrivileges == null && clusterPrivileges == null) { + throw new ElasticsearchParseException("failed to parse privileges check [{}]. fields [{}] and [{}] are both missing", + description, Fields.INDEX, Fields.CLUSTER); + } + if (indexPrivileges != null) { + if (Arrays.stream(indexPrivileges).anyMatch(IndicesPrivileges::isUsingFieldLevelSecurity)) { + throw new ElasticsearchParseException("Field [{}] is not supported in a has_privileges request", + RoleDescriptor.Fields.FIELD_PERMISSIONS); + } + if (Arrays.stream(indexPrivileges).anyMatch(IndicesPrivileges::isUsingDocumentLevelSecurity)) { + throw new ElasticsearchParseException("Field [{}] is not supported in a has_privileges request", Fields.QUERY); + } + } + return new RoleDescriptor(description, clusterPrivileges, indexPrivileges, null); + } + } + + private static RoleDescriptor.IndicesPrivileges[] parseIndices(String roleName, XContentParser parser, + boolean allow2xFormat) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_ARRAY) { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected field [{}] value " + + "to be an array, but found [{}] instead", roleName, parser.currentName(), parser.currentToken()); + } + List privileges = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + privileges.add(parseIndex(roleName, parser, allow2xFormat)); + } + return privileges.toArray(new IndicesPrivileges[privileges.size()]); + } + + private static RoleDescriptor.IndicesPrivileges parseIndex(String roleName, XContentParser parser, + boolean allow2xFormat) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected field [{}] value to " + + "be an array of objects, but found an array element of type [{}]", roleName, parser.currentName(), token); + } + String currentFieldName = null; + String[] names = null; + BytesReference query = null; + String[] privileges = null; + String[] grantedFields = null; + String[] deniedFields = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Fields.NAMES.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + names = new String[] { parser.text() }; + } else if (token == XContentParser.Token.START_ARRAY) { + names = readStringArray(roleName, parser, false); + if (names.length == 0) { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. [{}] cannot be an empty " + + "array", roleName, currentFieldName); + } + } else { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected field [{}] " + + "value to be a string or an array of strings, but found [{}] instead", roleName, currentFieldName, token); + } + } else if (Fields.QUERY.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.START_OBJECT) { + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.generator().copyCurrentStructure(parser); + query = BytesReference.bytes(builder); + } else if (token == XContentParser.Token.VALUE_STRING) { + final String text = parser.text(); + if (text.isEmpty() == false) { + query = new BytesArray(text); + } + } else if (token != XContentParser.Token.VALUE_NULL) { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected field [{}] " + + "value to be null, a string, an array, or an object, but found [{}] instead", roleName, currentFieldName, + token); + } + } else if (Fields.FIELD_PERMISSIONS.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.START_OBJECT) { + token = parser.nextToken(); + do { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + if (Fields.GRANT_FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { + parser.nextToken(); + grantedFields = readStringArray(roleName, parser, true); + if (grantedFields == null) { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. {} must not " + + "be null.", roleName, Fields.GRANT_FIELDS); + } + } else if (Fields.EXCEPT_FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { + parser.nextToken(); + deniedFields = readStringArray(roleName, parser, true); + if (deniedFields == null) { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. {} must not " + + "be null.", roleName, Fields.EXCEPT_FIELDS); + } + } else { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. " + + "\"{}\" only accepts options {} and {}, but got: {}", + roleName, Fields.FIELD_PERMISSIONS, Fields.GRANT_FIELDS, Fields.EXCEPT_FIELDS + , parser.currentName()); + } + } else { + if (token == XContentParser.Token.END_OBJECT) { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. " + + "\"{}\" must not be empty.", roleName, Fields.FIELD_PERMISSIONS); + } else { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected {} but " + + "got {}.", roleName, XContentParser.Token.FIELD_NAME, + token); + } + } + } while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT); + } else { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected {} or {} but got {}" + + " in \"{}\".", roleName, XContentParser.Token.START_OBJECT, + XContentParser.Token.START_ARRAY, token, Fields.FIELD_PERMISSIONS); + } + } else if (Fields.PRIVILEGES.match(currentFieldName, parser.getDeprecationHandler())) { + privileges = readStringArray(roleName, parser, true); + } else if (Fields.FIELD_PERMISSIONS_2X.match(currentFieldName, parser.getDeprecationHandler())) { + if (allow2xFormat) { + grantedFields = readStringArray(roleName, parser, true); + } else { + throw new ElasticsearchParseException("[\"fields\": [...]] format has changed for field" + + " permissions in role [{}], use [\"{}\": {\"{}\":[...]," + "\"{}\":[...]}] instead", + roleName, Fields.FIELD_PERMISSIONS, Fields.GRANT_FIELDS, Fields.EXCEPT_FIELDS, roleName); + } + } else if (Fields.TRANSIENT_METADATA.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.START_OBJECT) { + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + // it is transient metadata, skip it + } + } else { + throw new ElasticsearchParseException("failed to parse transient metadata for role [{}]. expected {} but got {}" + + " in \"{}\".", roleName, XContentParser.Token.START_OBJECT, token, Fields.TRANSIENT_METADATA); + } + } else { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. unexpected field [{}]", + roleName, currentFieldName); + } + } + if (names == null) { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. missing required [{}] field", + roleName, Fields.NAMES.getPreferredName()); + } + if (privileges == null) { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. missing required [{}] field", + roleName, Fields.PRIVILEGES.getPreferredName()); + } + if (deniedFields != null && grantedFields == null) { + throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. {} requires {} if {} is given", + roleName, Fields.FIELD_PERMISSIONS, Fields.GRANT_FIELDS, Fields.EXCEPT_FIELDS); + } + return RoleDescriptor.IndicesPrivileges.builder() + .indices(names) + .privileges(privileges) + .grantedFields(grantedFields) + .deniedFields(deniedFields) + .query(query) + .build(); + } + + /** + * A class representing permissions for a group of indices mapped to + * privileges, field permissions, and a query. + */ + public static class IndicesPrivileges implements ToXContentObject, Streamable { + + private static final IndicesPrivileges[] NONE = new IndicesPrivileges[0]; + + private String[] indices; + private String[] privileges; + private String[] grantedFields = null; + private String[] deniedFields = null; + private BytesReference query; + + private IndicesPrivileges() { + } + + public static Builder builder() { + return new Builder(); + } + + public String[] getIndices() { + return this.indices; + } + + public String[] getPrivileges() { + return this.privileges; + } + + @Nullable + public String[] getGrantedFields() { + return this.grantedFields; + } + + @Nullable + public String[] getDeniedFields() { + return this.deniedFields; + } + + @Nullable + public BytesReference getQuery() { + return this.query; + } + + public boolean isUsingDocumentLevelSecurity() { + return query != null; + } + + public boolean isUsingFieldLevelSecurity() { + return hasDeniedFields() || hasGrantedFields(); + } + + private boolean hasDeniedFields() { + return deniedFields != null && deniedFields.length > 0; + } + + private boolean hasGrantedFields() { + if (grantedFields != null && grantedFields.length >= 0) { + // we treat just '*' as no FLS since that's what the UI defaults to + if (grantedFields.length == 1 && "*".equals(grantedFields[0])) { + return false; + } else { + return true; + } + } + return false; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("IndicesPrivileges["); + sb.append("indices=[").append(Strings.arrayToCommaDelimitedString(indices)); + sb.append("], privileges=[").append(Strings.arrayToCommaDelimitedString(privileges)); + sb.append("], "); + if (grantedFields != null || deniedFields != null) { + sb.append(RoleDescriptor.Fields.FIELD_PERMISSIONS).append("=["); + if (grantedFields == null) { + sb.append(RoleDescriptor.Fields.GRANT_FIELDS).append("=null"); + } else { + sb.append(RoleDescriptor.Fields.GRANT_FIELDS).append("=[") + .append(Strings.arrayToCommaDelimitedString(grantedFields)); + sb.append("]"); + } + if (deniedFields == null) { + sb.append(", ").append(RoleDescriptor.Fields.EXCEPT_FIELDS).append("=null"); + } else { + sb.append(", ").append(RoleDescriptor.Fields.EXCEPT_FIELDS).append("=[") + .append(Strings.arrayToCommaDelimitedString(deniedFields)); + sb.append("]"); + } + sb.append("]"); + } + if (query != null) { + sb.append(", query="); + sb.append(query.utf8ToString()); + } + sb.append("]"); + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IndicesPrivileges that = (IndicesPrivileges) o; + + if (!Arrays.equals(indices, that.indices)) return false; + if (!Arrays.equals(privileges, that.privileges)) return false; + if (!Arrays.equals(grantedFields, that.grantedFields)) return false; + if (!Arrays.equals(deniedFields, that.deniedFields)) return false; + return !(query != null ? !query.equals(that.query) : that.query != null); + } + + @Override + public int hashCode() { + int result = Arrays.hashCode(indices); + result = 31 * result + Arrays.hashCode(privileges); + result = 31 * result + Arrays.hashCode(grantedFields); + result = 31 * result + Arrays.hashCode(deniedFields); + result = 31 * result + (query != null ? query.hashCode() : 0); + return result; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.array("names", indices); + builder.array("privileges", privileges); + if (grantedFields != null || deniedFields != null) { + builder.startObject(RoleDescriptor.Fields.FIELD_PERMISSIONS.getPreferredName()); + if (grantedFields != null) { + builder.array(RoleDescriptor.Fields.GRANT_FIELDS.getPreferredName(), grantedFields); + } + if (deniedFields != null) { + builder.array(RoleDescriptor.Fields.EXCEPT_FIELDS.getPreferredName(), deniedFields); + } + builder.endObject(); + } + if (query != null) { + builder.field("query", query.utf8ToString()); + } + return builder.endObject(); + } + + public static IndicesPrivileges createFrom(StreamInput in) throws IOException { + IndicesPrivileges ip = new IndicesPrivileges(); + ip.readFrom(in); + return ip; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + this.indices = in.readStringArray(); + this.grantedFields = in.readOptionalStringArray(); + this.deniedFields = in.readOptionalStringArray(); + this.privileges = in.readStringArray(); + this.query = in.readOptionalBytesReference(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(indices); + out.writeOptionalStringArray(grantedFields); + out.writeOptionalStringArray(deniedFields); + out.writeStringArray(privileges); + out.writeOptionalBytesReference(query); + } + + public static class Builder { + + private IndicesPrivileges indicesPrivileges = new IndicesPrivileges(); + + private Builder() { + } + + public Builder indices(String... indices) { + indicesPrivileges.indices = indices; + return this; + } + + public Builder privileges(String... privileges) { + indicesPrivileges.privileges = privileges; + return this; + } + + public Builder grantedFields(String... grantedFields) { + indicesPrivileges.grantedFields = grantedFields; + return this; + } + + public Builder deniedFields(String... deniedFields) { + indicesPrivileges.deniedFields = deniedFields; + return this; + } + + public Builder query(@Nullable String query) { + return query(query == null ? null : new BytesArray(query)); + } + + public Builder query(@Nullable BytesReference query) { + if (query == null) { + indicesPrivileges.query = null; + } else { + indicesPrivileges.query = query; + } + return this; + } + + public IndicesPrivileges build() { + if (indicesPrivileges.indices == null || indicesPrivileges.indices.length == 0) { + throw new IllegalArgumentException("indices privileges must refer to at least one index name or index name pattern"); + } + if (indicesPrivileges.privileges == null || indicesPrivileges.privileges.length == 0) { + throw new IllegalArgumentException("indices privileges must define at least one privilege"); + } + return indicesPrivileges; + } + } + } + + public interface Fields { + ParseField CLUSTER = new ParseField("cluster"); + ParseField INDEX = new ParseField("index"); + ParseField INDICES = new ParseField("indices"); + ParseField RUN_AS = new ParseField("run_as"); + ParseField NAMES = new ParseField("names"); + ParseField QUERY = new ParseField("query"); + ParseField PRIVILEGES = new ParseField("privileges"); + ParseField FIELD_PERMISSIONS = new ParseField("field_security"); + ParseField FIELD_PERMISSIONS_2X = new ParseField("fields"); + ParseField GRANT_FIELDS = new ParseField("grant"); + ParseField EXCEPT_FIELDS = new ParseField("except"); + ParseField METADATA = new ParseField("metadata"); + ParseField TRANSIENT_METADATA = new ParseField("transient_metadata"); + ParseField TYPE = new ParseField("type"); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReader.java new file mode 100644 index 0000000000000..c7d84b3c40fa9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReader.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.accesscontrol; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FilterDirectoryReader; +import org.apache.lucene.index.FilterLeafReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.util.BitSet; +import org.apache.lucene.util.BitSetIterator; +import org.apache.lucene.util.Bits; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; + +/** + * A reader that only exposes documents via {@link #getLiveDocs()} that matches with the provided role query. + */ +public final class DocumentSubsetReader extends FilterLeafReader { + + public static DocumentSubsetDirectoryReader wrap(DirectoryReader in, BitsetFilterCache bitsetFilterCache, + Query roleQuery) throws IOException { + return new DocumentSubsetDirectoryReader(in, bitsetFilterCache, roleQuery); + } + + /** + * Cache of the number of live docs for a given (segment, role query) pair. + * This is useful because numDocs() is called eagerly by BaseCompositeReader so computing + * numDocs() lazily doesn't help. Plus it helps reuse the result of the computation either + * between refreshes, or across refreshes if no more documents were deleted in the + * considered segment. The size of the top-level map is bounded by the number of segments + * on the node. + */ + static final Map> NUM_DOCS_CACHE = new ConcurrentHashMap<>(); + + /** + * Compute the number of live documents. This method is SLOW. + */ + private static int computeNumDocs(LeafReader reader, Query roleQuery, BitSet roleQueryBits) { + final Bits liveDocs = reader.getLiveDocs(); + if (roleQueryBits == null) { + return 0; + } else if (liveDocs == null) { + // slow + return roleQueryBits.cardinality(); + } else { + // very slow, but necessary in order to be correct + int numDocs = 0; + DocIdSetIterator it = new BitSetIterator(roleQueryBits, 0L); // we don't use the cost + try { + for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) { + if (liveDocs.get(doc)) { + numDocs++; + } + } + return numDocs; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + + /** + * Like {@link #computeNumDocs} but caches results. + */ + private static int getNumDocs(LeafReader reader, Query roleQuery, BitSet roleQueryBits) throws IOException, ExecutionException { + IndexReader.CacheHelper cacheHelper = reader.getReaderCacheHelper(); // this one takes deletes into account + if (cacheHelper == null) { + throw new IllegalStateException("Reader " + reader + " does not support caching"); + } + final boolean[] added = new boolean[] { false }; + Cache perReaderCache = NUM_DOCS_CACHE.computeIfAbsent(cacheHelper.getKey(), + key -> { + added[0] = true; + return CacheBuilder.builder() + // Not configurable, this limit only exists so that if a role query is updated + // then we won't risk OOME because of old role queries that are not used anymore + .setMaximumWeight(1000) + .weigher((k, v) -> 1) // just count + .build(); + }); + if (added[0]) { + IndexReader.ClosedListener closedListener = NUM_DOCS_CACHE::remove; + try { + cacheHelper.addClosedListener(closedListener); + } catch (AlreadyClosedException e) { + closedListener.onClose(cacheHelper.getKey()); + throw e; + } + } + return perReaderCache.computeIfAbsent(roleQuery, q -> computeNumDocs(reader, roleQuery, roleQueryBits)); + } + + public static final class DocumentSubsetDirectoryReader extends FilterDirectoryReader { + + private final Query roleQuery; + private final BitsetFilterCache bitsetFilterCache; + + DocumentSubsetDirectoryReader(final DirectoryReader in, final BitsetFilterCache bitsetFilterCache, final Query roleQuery) + throws IOException { + super(in, new SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader reader) { + try { + return new DocumentSubsetReader(reader, bitsetFilterCache, roleQuery); + } catch (Exception e) { + throw ExceptionsHelper.convertToElastic(e); + } + } + }); + this.bitsetFilterCache = bitsetFilterCache; + this.roleQuery = roleQuery; + + verifyNoOtherDocumentSubsetDirectoryReaderIsWrapped(in); + } + + @Override + protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { + return new DocumentSubsetDirectoryReader(in, bitsetFilterCache, roleQuery); + } + + private static void verifyNoOtherDocumentSubsetDirectoryReaderIsWrapped(DirectoryReader reader) { + if (reader instanceof FilterDirectoryReader) { + FilterDirectoryReader filterDirectoryReader = (FilterDirectoryReader) reader; + if (filterDirectoryReader instanceof DocumentSubsetDirectoryReader) { + throw new IllegalArgumentException(LoggerMessageFormat.format("Can't wrap [{}] twice", + DocumentSubsetDirectoryReader.class)); + } else { + verifyNoOtherDocumentSubsetDirectoryReaderIsWrapped(filterDirectoryReader.getDelegate()); + } + } + } + + @Override + public CacheHelper getReaderCacheHelper() { + return in.getReaderCacheHelper(); + } + } + + private final BitSet roleQueryBits; + private final int numDocs; + + private DocumentSubsetReader(final LeafReader in, BitsetFilterCache bitsetFilterCache, final Query roleQuery) throws Exception { + super(in); + this.roleQueryBits = bitsetFilterCache.getBitSetProducer(roleQuery).getBitSet(in.getContext()); + this.numDocs = getNumDocs(in, roleQuery, roleQueryBits); + } + + @Override + public Bits getLiveDocs() { + final Bits actualLiveDocs = in.getLiveDocs(); + if (roleQueryBits == null) { + // If we would a null liveDocs then that would mean that no docs are marked as deleted, + // but that isn't the case. No docs match with the role query and therefor all docs are marked as deleted + return new Bits.MatchNoBits(in.maxDoc()); + } else if (actualLiveDocs == null) { + return roleQueryBits; + } else { + // apply deletes when needed: + return new Bits() { + + @Override + public boolean get(int index) { + return roleQueryBits.get(index) && actualLiveDocs.get(index); + } + + @Override + public int length() { + return roleQueryBits.length(); + } + }; + } + } + + @Override + public int numDocs() { + return numDocs; + } + + @Override + public boolean hasDeletions() { + // we always return liveDocs and hide docs: + return true; + } + + @Override + public CacheHelper getCoreCacheHelper() { + return in.getCoreCacheHelper(); + } + + @Override + public CacheHelper getReaderCacheHelper() { + // Not delegated since we change the live docs + return null; + } + + BitSet getRoleQueryBits() { + return roleQueryBits; + } + + Bits getWrappedLiveDocs() { + return in.getLiveDocs(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java new file mode 100644 index 0000000000000..5779924bb27fb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java @@ -0,0 +1,471 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.accesscontrol; + +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.FilterDirectoryReader; +import org.apache.lucene.index.FilterLeafReader; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.FilterIterator; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * A {@link FilterLeafReader} that exposes only a subset + * of fields from the underlying wrapped reader. + */ +// based on lucene/test-framework's FieldFilterLeafReader. +public final class FieldSubsetReader extends FilterLeafReader { + + /** + * Wraps a provided DirectoryReader, exposing a subset of fields. + *

+ * Note that for convenience, the returned reader + * can be used normally (e.g. passed to {@link DirectoryReader#openIfChanged(DirectoryReader)}) + * and so on. + * @param in reader to filter + * @param filter fields to filter. + */ + public static DirectoryReader wrap(DirectoryReader in, CharacterRunAutomaton filter) throws IOException { + return new FieldSubsetDirectoryReader(in, filter); + } + + // wraps subreaders with fieldsubsetreaders. + static class FieldSubsetDirectoryReader extends FilterDirectoryReader { + + private final CharacterRunAutomaton filter; + + FieldSubsetDirectoryReader(DirectoryReader in, final CharacterRunAutomaton filter) throws IOException { + super(in, new FilterDirectoryReader.SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader reader) { + return new FieldSubsetReader(reader, filter); + } + }); + this.filter = filter; + verifyNoOtherFieldSubsetDirectoryReaderIsWrapped(in); + } + + @Override + protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { + return new FieldSubsetDirectoryReader(in, filter); + } + + /** Return the automaton that is used to filter fields. */ + CharacterRunAutomaton getFilter() { + return filter; + } + + private static void verifyNoOtherFieldSubsetDirectoryReaderIsWrapped(DirectoryReader reader) { + if (reader instanceof FilterDirectoryReader) { + FilterDirectoryReader filterDirectoryReader = (FilterDirectoryReader) reader; + if (filterDirectoryReader instanceof FieldSubsetDirectoryReader) { + throw new IllegalArgumentException(LoggerMessageFormat.format("Can't wrap [{}] twice", + FieldSubsetDirectoryReader.class)); + } else { + verifyNoOtherFieldSubsetDirectoryReaderIsWrapped(filterDirectoryReader.getDelegate()); + } + } + } + + @Override + public CacheHelper getReaderCacheHelper() { + return in.getReaderCacheHelper(); + } + } + + /** List of filtered fields */ + private final FieldInfos fieldInfos; + /** An automaton that only accepts authorized fields. */ + private final CharacterRunAutomaton filter; + + /** + * Wrap a single segment, exposing a subset of its fields. + */ + FieldSubsetReader(LeafReader in, CharacterRunAutomaton filter) { + super(in); + ArrayList filteredInfos = new ArrayList<>(); + for (FieldInfo fi : in.getFieldInfos()) { + if (filter.run(fi.name)) { + filteredInfos.add(fi); + } + } + fieldInfos = new FieldInfos(filteredInfos.toArray(new FieldInfo[filteredInfos.size()])); + this.filter = filter; + } + + /** returns true if this field is allowed. */ + boolean hasField(String field) { + return fieldInfos.fieldInfo(field) != null; + } + + @Override + public FieldInfos getFieldInfos() { + return fieldInfos; + } + + @Override + public Fields getTermVectors(int docID) throws IOException { + Fields f = super.getTermVectors(docID); + if (f == null) { + return null; + } + f = new FieldFilterFields(f); + // we need to check for emptyness, so we can return null: + return f.iterator().hasNext() ? f : null; + } + + /** Filter a map by a {@link CharacterRunAutomaton} that defines the fields to retain. */ + static Map filter(Map map, CharacterRunAutomaton includeAutomaton, int initialState) { + Map filtered = new HashMap<>(); + for (Map.Entry entry : map.entrySet()) { + String key = entry.getKey(); + + int state = step(includeAutomaton, key, initialState); + if (state == -1) { + continue; + } + + Object value = entry.getValue(); + + if (value instanceof Map) { + state = includeAutomaton.step(state, '.'); + if (state == -1) { + continue; + } + + Map mapValue = (Map) value; + Map filteredValue = filter(mapValue, includeAutomaton, state); + if (filteredValue.isEmpty() == false) { + filtered.put(key, filteredValue); + } + } else if (value instanceof Iterable) { + Iterable iterableValue = (Iterable) value; + List filteredValue = filter(iterableValue, includeAutomaton, state); + if (filteredValue.isEmpty() == false) { + filtered.put(key, filteredValue); + } + } else if (includeAutomaton.isAccept(state)) { + filtered.put(key, value); + } + } + return filtered; + } + + /** Filter a list by a {@link CharacterRunAutomaton} that defines the fields to retain. */ + private static List filter(Iterable iterable, CharacterRunAutomaton includeAutomaton, int initialState) { + List filtered = new ArrayList<>(); + for (Object value : iterable) { + if (value instanceof Map) { + int state = includeAutomaton.step(initialState, '.'); + if (state == -1) { + continue; + } + Map filteredValue = filter((Map)value, includeAutomaton, state); + if (filteredValue.isEmpty() == false) { + filtered.add(filteredValue); + } + } else if (value instanceof Iterable) { + List filteredValue = filter((Iterable) value, includeAutomaton, initialState); + if (filteredValue.isEmpty() == false) { + filtered.add(filteredValue); + } + } else if (includeAutomaton.isAccept(initialState)) { + filtered.add(value); + } + } + return filtered; + } + + /** Step through all characters of the provided string, and return the + * resulting state, or -1 if that did not lead to a valid state. */ + private static int step(CharacterRunAutomaton automaton, String key, int state) { + for (int i = 0; state != -1 && i < key.length(); ++i) { + state = automaton.step(state, key.charAt(i)); + } + return state; + } + + @Override + public void document(final int docID, final StoredFieldVisitor visitor) throws IOException { + super.document(docID, new StoredFieldVisitor() { + @Override + public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { + if (SourceFieldMapper.NAME.equals(fieldInfo.name)) { + // for _source, parse, filter out the fields we care about, and serialize back downstream + BytesReference bytes = new BytesArray(value); + Tuple> result = XContentHelper.convertToMap(bytes, true); + Map transformedSource = filter(result.v2(), filter, 0); + XContentBuilder xContentBuilder = XContentBuilder.builder(result.v1().xContent()).map(transformedSource); + visitor.binaryField(fieldInfo, BytesReference.toBytes(BytesReference.bytes(xContentBuilder))); + } else { + visitor.binaryField(fieldInfo, value); + } + } + + @Override + public void stringField(FieldInfo fieldInfo, byte[] value) throws IOException { + visitor.stringField(fieldInfo, value); + } + + @Override + public void intField(FieldInfo fieldInfo, int value) throws IOException { + visitor.intField(fieldInfo, value); + } + + @Override + public void longField(FieldInfo fieldInfo, long value) throws IOException { + visitor.longField(fieldInfo, value); + } + + @Override + public void floatField(FieldInfo fieldInfo, float value) throws IOException { + visitor.floatField(fieldInfo, value); + } + + @Override + public void doubleField(FieldInfo fieldInfo, double value) throws IOException { + visitor.doubleField(fieldInfo, value); + } + + @Override + public Status needsField(FieldInfo fieldInfo) throws IOException { + return hasField(fieldInfo.name) ? visitor.needsField(fieldInfo) : Status.NO; + } + }); + } + + @Override + public Terms terms(String field) throws IOException { + return wrapTerms(super.terms(field), field); + } + + @Override + public NumericDocValues getNumericDocValues(String field) throws IOException { + return hasField(field) ? super.getNumericDocValues(field) : null; + } + + @Override + public BinaryDocValues getBinaryDocValues(String field) throws IOException { + return hasField(field) ? super.getBinaryDocValues(field) : null; + } + + @Override + public SortedDocValues getSortedDocValues(String field) throws IOException { + return hasField(field) ? super.getSortedDocValues(field) : null; + } + + @Override + public SortedNumericDocValues getSortedNumericDocValues(String field) throws IOException { + return hasField(field) ? super.getSortedNumericDocValues(field) : null; + } + + @Override + public SortedSetDocValues getSortedSetDocValues(String field) throws IOException { + return hasField(field) ? super.getSortedSetDocValues(field) : null; + } + + @Override + public NumericDocValues getNormValues(String field) throws IOException { + return hasField(field) ? super.getNormValues(field) : null; + } + + // we share core cache keys (for e.g. fielddata) + + @Override + public CacheHelper getCoreCacheHelper() { + return in.getCoreCacheHelper(); + } + + @Override + public CacheHelper getReaderCacheHelper() { + return in.getReaderCacheHelper(); + } + + /** + * Filters the Fields instance from the postings. + *

+ * In addition to only returning fields allowed in this subset, + * the ES internal _field_names (used by exists filter) has special handling, + * to hide terms for fields that don't exist. + */ + class FieldFilterFields extends FilterFields { + + FieldFilterFields(Fields in) { + super(in); + } + + @Override + public int size() { + // this information is not cheap, return -1 like MultiFields does: + return -1; + } + + @Override + public Iterator iterator() { + return new FilterIterator(super.iterator()) { + @Override + protected boolean predicateFunction(String field) { + return hasField(field); + } + }; + } + + @Override + public Terms terms(String field) throws IOException { + return wrapTerms(super.terms(field), field); + } + } + + private Terms wrapTerms(Terms terms, String field) { + if (!hasField(field)) { + return null; + } else if (FieldNamesFieldMapper.NAME.equals(field)) { + // for the _field_names field, fields for the document + // are encoded as postings, where term is the field. + // so we hide terms for fields we filter out. + if (terms != null) { + // check for null, in case term dictionary is not a ghostbuster + // So just because its in fieldinfos and "indexed=true" doesn't mean you can go grab a Terms for it. + // It just means at one point there was a document with that field indexed... + // The fields infos isn't updates/removed even if no docs refer to it + terms = new FieldNamesTerms(terms); + } + return terms; + } else { + return terms; + } + } + + /** + * Terms impl for _field_names (used by exists filter) that filters out terms + * representing fields that should not be visible in this reader. + */ + class FieldNamesTerms extends FilterTerms { + + FieldNamesTerms(Terms in) { + super(in); + } + + @Override + public TermsEnum iterator() throws IOException { + return new FieldNamesTermsEnum(in.iterator()); + } + + // we don't support field statistics (since we filter out terms) + // but this isn't really a big deal: _field_names is not used for ranking. + + @Override + public int getDocCount() throws IOException { + return -1; + } + + @Override + public long getSumDocFreq() throws IOException { + return -1; + } + + @Override + public long getSumTotalTermFreq() throws IOException { + return -1; + } + + @Override + public long size() throws IOException { + return -1; + } + } + + /** + * TermsEnum impl for _field_names (used by exists filter) that filters out terms + * representing fields that should not be visible in this reader. + */ + class FieldNamesTermsEnum extends FilterTermsEnum { + + FieldNamesTermsEnum(TermsEnum in) { + super(in); + } + + /** Return true if term is accepted (matches a field name in this reader). */ + boolean accept(BytesRef term) { + return hasField(term.utf8ToString()); + } + + @Override + public boolean seekExact(BytesRef term) throws IOException { + return accept(term) && in.seekExact(term); + } + + @Override + public SeekStatus seekCeil(BytesRef term) throws IOException { + SeekStatus status = in.seekCeil(term); + if (status == SeekStatus.END || accept(term())) { + return status; + } + return next() == null ? SeekStatus.END : SeekStatus.NOT_FOUND; + } + + @Override + public BytesRef next() throws IOException { + BytesRef next; + while ((next = in.next()) != null) { + if (accept(next)) { + break; + } + } + return next; + } + + // we don't support ordinals, but _field_names is not used in this way + + @Override + public void seekExact(long ord) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public long ord() throws IOException { + throw new UnsupportedOperationException(); + } + } + + @Override + public PointValues getPointValues(String fieldName) throws IOException { + if (hasField(fieldName)) { + return super.getPointValues(fieldName); + } else { + return null; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/IndicesAccessControl.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/IndicesAccessControl.java new file mode 100644 index 0000000000000..6df9ad834c1e5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/IndicesAccessControl.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.accesscontrol; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; + +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +/** + * Encapsulates the field and document permissions per concrete index based on the current request. + */ +public class IndicesAccessControl { + + public static final IndicesAccessControl ALLOW_ALL = new IndicesAccessControl(true, Collections.emptyMap()); + public static final IndicesAccessControl ALLOW_NO_INDICES = new IndicesAccessControl(true, + Collections.singletonMap(IndicesAndAliasesResolverField.NO_INDEX_PLACEHOLDER, + new IndicesAccessControl.IndexAccessControl(true, new FieldPermissions(), null))); + + private final boolean granted; + private final Map indexPermissions; + + public IndicesAccessControl(boolean granted, Map indexPermissions) { + this.granted = granted; + this.indexPermissions = indexPermissions; + } + + /** + * @return The document and field permissions for an index if exist, otherwise null is returned. + * If null is being returned this means that there are no field or document level restrictions. + */ + @Nullable + public IndexAccessControl getIndexPermissions(String index) { + return indexPermissions.get(index); + } + + /** + * @return Whether any role / permission group is allowed to access all indices. + */ + public boolean isGranted() { + return granted; + } + + /** + * Encapsulates the field and document permissions for an index. + */ + public static class IndexAccessControl { + + private final boolean granted; + private final FieldPermissions fieldPermissions; + private final Set queries; + + public IndexAccessControl(boolean granted, FieldPermissions fieldPermissions, Set queries) { + this.granted = granted; + this.fieldPermissions = fieldPermissions; + this.queries = queries; + } + + /** + * @return Whether any role / permission group is allowed to this index. + */ + public boolean isGranted() { + return granted; + } + + /** + * @return The allowed fields for this index permissions. + */ + public FieldPermissions getFieldPermissions() { + return fieldPermissions; + } + + /** + * @return The allowed documents expressed as a query for this index permission. If null is returned + * then this means that there are no document level restrictions + */ + @Nullable + public Set getQueries() { + return queries; + } + + @Override + public String toString() { + return "IndexAccessControl{" + + "granted=" + granted + + ", fieldPermissions=" + fieldPermissions + + ", queries=" + queries + + '}'; + } + } + + @Override + public String toString() { + return "IndicesAccessControl{" + + "granted=" + granted + + ", indexPermissions=" + indexPermissions + + '}'; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java new file mode 100644 index 0000000000000..f945b4e24c77c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java @@ -0,0 +1,367 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.accesscontrol; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.BulkScorer; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.ConjunctionDISI; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.search.join.BitSetProducer; +import org.apache.lucene.search.join.ToChildBlockJoinQuery; +import org.apache.lucene.util.BitSet; +import org.apache.lucene.util.BitSetIterator; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.SparseFixedBitSet; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.BoostingQueryBuilder; +import org.elasticsearch.index.query.ConstantScoreQueryBuilder; +import org.elasticsearch.index.query.GeoShapeQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.elasticsearch.index.shard.IndexSearcherWrapper; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.TemplateScript; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader; +import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import static org.apache.lucene.search.BooleanClause.Occur.SHOULD; + +/** + * An {@link IndexSearcherWrapper} implementation that is used for field and document level security. + *

+ * Based on the {@link ThreadContext} this class will enable field and/or document level security. + *

+ * Field level security is enabled by wrapping the original {@link DirectoryReader} in a {@link FieldSubsetReader} + * in the {@link #wrap(DirectoryReader)} method. + *

+ * Document level security is enabled by wrapping the original {@link DirectoryReader} in a {@link DocumentSubsetReader} + * instance. + */ +public class SecurityIndexSearcherWrapper extends IndexSearcherWrapper { + + private final Function queryShardContextProvider; + private final BitsetFilterCache bitsetFilterCache; + private final XPackLicenseState licenseState; + private final ThreadContext threadContext; + private final Logger logger; + private final ScriptService scriptService; + + public SecurityIndexSearcherWrapper(IndexSettings indexSettings, Function queryShardContextProvider, + BitsetFilterCache bitsetFilterCache, ThreadContext threadContext, XPackLicenseState licenseState, + ScriptService scriptService) { + this.scriptService = scriptService; + this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings()); + this.queryShardContextProvider = queryShardContextProvider; + this.bitsetFilterCache = bitsetFilterCache; + this.threadContext = threadContext; + this.licenseState = licenseState; + } + + @Override + protected DirectoryReader wrap(DirectoryReader reader) { + if (licenseState.isSecurityEnabled() == false || licenseState.isDocumentAndFieldLevelSecurityAllowed() == false) { + return reader; + } + + try { + final IndicesAccessControl indicesAccessControl = getIndicesAccessControl(); + + ShardId shardId = ShardUtils.extractShardId(reader); + if (shardId == null) { + throw new IllegalStateException(LoggerMessageFormat.format("couldn't extract shardId from reader [{}]", reader)); + } + + IndicesAccessControl.IndexAccessControl permissions = indicesAccessControl.getIndexPermissions(shardId.getIndexName()); + // No permissions have been defined for an index, so don't intercept the index reader for access control + if (permissions == null) { + return reader; + } + + if (permissions.getQueries() != null) { + BooleanQuery.Builder filter = new BooleanQuery.Builder(); + for (BytesReference bytesReference : permissions.getQueries()) { + QueryShardContext queryShardContext = queryShardContextProvider.apply(shardId); + String templateResult = evaluateTemplate(bytesReference.utf8ToString()); + try (XContentParser parser = XContentFactory.xContent(templateResult) + .createParser(queryShardContext.getXContentRegistry(), LoggingDeprecationHandler.INSTANCE, templateResult)) { + QueryBuilder queryBuilder = queryShardContext.parseInnerQueryBuilder(parser); + verifyRoleQuery(queryBuilder); + failIfQueryUsesClient(queryBuilder, queryShardContext); + Query roleQuery = queryShardContext.toFilter(queryBuilder).query(); + filter.add(roleQuery, SHOULD); + if (queryShardContext.getMapperService().hasNested()) { + // If access is allowed on root doc then also access is allowed on all nested docs of that root document: + BitSetProducer rootDocs = queryShardContext.bitsetFilter( + Queries.newNonNestedFilter(queryShardContext.indexVersionCreated())); + ToChildBlockJoinQuery includeNestedDocs = new ToChildBlockJoinQuery(roleQuery, rootDocs); + filter.add(includeNestedDocs, SHOULD); + } + } + } + + // at least one of the queries should match + filter.setMinimumNumberShouldMatch(1); + reader = DocumentSubsetReader.wrap(reader, bitsetFilterCache, new ConstantScoreQuery(filter.build())); + } + + return permissions.getFieldPermissions().filter(reader); + } catch (IOException e) { + logger.error("Unable to apply field level security"); + throw ExceptionsHelper.convertToElastic(e); + } + } + + @Override + protected IndexSearcher wrap(IndexSearcher searcher) throws EngineException { + if (licenseState.isSecurityEnabled() == false || licenseState.isDocumentAndFieldLevelSecurityAllowed() == false) { + return searcher; + } + + final DirectoryReader directoryReader = (DirectoryReader) searcher.getIndexReader(); + if (directoryReader instanceof DocumentSubsetDirectoryReader) { + // The reasons why we return a custom searcher: + // 1) in the case the role query is sparse then large part of the main query can be skipped + // 2) If the role query doesn't match with any docs in a segment, that a segment can be skipped + IndexSearcher indexSearcher = new IndexSearcherWrapper((DocumentSubsetDirectoryReader) directoryReader); + indexSearcher.setQueryCache(indexSearcher.getQueryCache()); + indexSearcher.setQueryCachingPolicy(indexSearcher.getQueryCachingPolicy()); + indexSearcher.setSimilarity(indexSearcher.getSimilarity(true)); + return indexSearcher; + } + return searcher; + } + + static class IndexSearcherWrapper extends IndexSearcher { + + IndexSearcherWrapper(DocumentSubsetDirectoryReader r) { + super(r); + } + + @Override + protected void search(List leaves, Weight weight, Collector collector) throws IOException { + for (LeafReaderContext ctx : leaves) { // search each subreader + final LeafCollector leafCollector; + try { + leafCollector = collector.getLeafCollector(ctx); + } catch (CollectionTerminatedException e) { + // there is no doc of interest in this reader context + // continue with the following leaf + continue; + } + // The reader is always of type DocumentSubsetReader when we get here: + DocumentSubsetReader reader = (DocumentSubsetReader) ctx.reader(); + + BitSet roleQueryBits = reader.getRoleQueryBits(); + if (roleQueryBits == null) { + // nothing matches with the role query, so skip this segment: + continue; + } + + // if the role query result set is sparse then we should use the SparseFixedBitSet for advancing: + if (roleQueryBits instanceof SparseFixedBitSet) { + Scorer scorer = weight.scorer(ctx); + if (scorer != null) { + SparseFixedBitSet sparseFixedBitSet = (SparseFixedBitSet) roleQueryBits; + Bits realLiveDocs = reader.getWrappedLiveDocs(); + try { + intersectScorerAndRoleBits(scorer, sparseFixedBitSet, leafCollector, realLiveDocs); + } catch (CollectionTerminatedException e) { + // collection was terminated prematurely + // continue with the following leaf + } + } + } else { + BulkScorer bulkScorer = weight.bulkScorer(ctx); + if (bulkScorer != null) { + Bits liveDocs = reader.getLiveDocs(); + try { + bulkScorer.score(leafCollector, liveDocs); + } catch (CollectionTerminatedException e) { + // collection was terminated prematurely + // continue with the following leaf + } + } + } + } + } + } + + static void intersectScorerAndRoleBits(Scorer scorer, SparseFixedBitSet roleBits, LeafCollector collector, Bits acceptDocs) throws + IOException { + // ConjunctionDISI uses the DocIdSetIterator#cost() to order the iterators, so if roleBits has the lowest cardinality it should + // be used first: + DocIdSetIterator iterator = ConjunctionDISI.intersectIterators(Arrays.asList(new BitSetIterator(roleBits, + roleBits.approximateCardinality()), scorer.iterator())); + for (int docId = iterator.nextDoc(); docId < DocIdSetIterator.NO_MORE_DOCS; docId = iterator.nextDoc()) { + if (acceptDocs == null || acceptDocs.get(docId)) { + collector.collect(docId); + } + } + } + + String evaluateTemplate(String querySource) throws IOException { + // EMPTY is safe here because we never use namedObject + try (XContentParser parser = XContentFactory.xContent(querySource).createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, querySource)) { + XContentParser.Token token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("Unexpected token [" + token + "]"); + } + token = parser.nextToken(); + if (token != XContentParser.Token.FIELD_NAME) { + throw new ElasticsearchParseException("Unexpected token [" + token + "]"); + } + if ("template".equals(parser.currentName())) { + token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("Unexpected token [" + token + "]"); + } + Script script = Script.parse(parser); + // Add the user details to the params + Map params = new HashMap<>(); + if (script.getParams() != null) { + params.putAll(script.getParams()); + } + User user = getUser(); + Map userModel = new HashMap<>(); + userModel.put("username", user.principal()); + userModel.put("full_name", user.fullName()); + userModel.put("email", user.email()); + userModel.put("roles", Arrays.asList(user.roles())); + userModel.put("metadata", Collections.unmodifiableMap(user.metadata())); + params.put("_user", userModel); + // Always enforce mustache script lang: + script = new Script(script.getType(), + script.getType() == ScriptType.STORED ? null : "mustache", script.getIdOrCode(), script.getOptions(), params); + TemplateScript compiledTemplate = scriptService.compile(script, TemplateScript.CONTEXT).newInstance(script.getParams()); + return compiledTemplate.execute(); + } else { + return querySource; + } + } + } + + protected IndicesAccessControl getIndicesAccessControl() { + IndicesAccessControl indicesAccessControl = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + if (indicesAccessControl == null) { + throw Exceptions.authorizationError("no indices permissions found"); + } + return indicesAccessControl; + } + + protected User getUser(){ + Authentication authentication = Authentication.getAuthentication(threadContext); + return authentication.getUser(); + } + + /** + * Checks whether the role query contains queries we know can't be used as DLS role query. + */ + static void verifyRoleQuery(QueryBuilder queryBuilder) throws IOException { + if (queryBuilder instanceof TermsQueryBuilder) { + TermsQueryBuilder termsQueryBuilder = (TermsQueryBuilder) queryBuilder; + if (termsQueryBuilder.termsLookup() != null) { + throw new IllegalArgumentException("terms query with terms lookup isn't supported as part of a role query"); + } + } else if (queryBuilder instanceof GeoShapeQueryBuilder) { + GeoShapeQueryBuilder geoShapeQueryBuilder = (GeoShapeQueryBuilder) queryBuilder; + if (geoShapeQueryBuilder.shape() == null) { + throw new IllegalArgumentException("geoshape query referring to indexed shapes isn't support as part of a role query"); + } + } else if (queryBuilder.getName().equals("percolate")) { + // actually only if percolate query is referring to an existing document then this is problematic, + // a normal percolate query does work. However we can't check that here as this query builder is inside + // another module. So we don't allow the entire percolate query. I don't think users would ever use + // a percolate query as role query, so this restriction shouldn't prohibit anyone from using dls. + throw new IllegalArgumentException("percolate query isn't support as part of a role query"); + } else if (queryBuilder.getName().equals("has_child")) { + throw new IllegalArgumentException("has_child query isn't support as part of a role query"); + } else if (queryBuilder.getName().equals("has_parent")) { + throw new IllegalArgumentException("has_parent query isn't support as part of a role query"); + } else if (queryBuilder instanceof BoolQueryBuilder) { + BoolQueryBuilder boolQueryBuilder = (BoolQueryBuilder) queryBuilder; + List clauses = new ArrayList<>(); + clauses.addAll(boolQueryBuilder.filter()); + clauses.addAll(boolQueryBuilder.must()); + clauses.addAll(boolQueryBuilder.mustNot()); + clauses.addAll(boolQueryBuilder.should()); + for (QueryBuilder clause : clauses) { + verifyRoleQuery(clause); + } + } else if (queryBuilder instanceof ConstantScoreQueryBuilder) { + verifyRoleQuery(((ConstantScoreQueryBuilder) queryBuilder).innerQuery()); + } else if (queryBuilder instanceof FunctionScoreQueryBuilder) { + verifyRoleQuery(((FunctionScoreQueryBuilder) queryBuilder).query()); + } else if (queryBuilder instanceof BoostingQueryBuilder) { + verifyRoleQuery(((BoostingQueryBuilder) queryBuilder).negativeQuery()); + verifyRoleQuery(((BoostingQueryBuilder) queryBuilder).positiveQuery()); + } + } + + /** + * Fall back validation that verifies that queries during rewrite don't use + * the client to make remote calls. In the case of DLS this can cause a dead + * lock if DLS is also applied on these remote calls. For example in the + * case of terms query with lookup, this can cause recursive execution of + * the DLS query until the get thread pool has been exhausted: + * https://github.com/elastic/x-plugins/issues/3145 + */ + static void failIfQueryUsesClient(QueryBuilder queryBuilder, QueryRewriteContext original) + throws IOException { + QueryRewriteContext copy = new QueryRewriteContext( + original.getXContentRegistry(), original.getWriteableRegistry(), null, original::nowInMillis); + Rewriteable.rewrite(queryBuilder, copy); + if (copy.hasAsyncActions()) { + throw new IllegalStateException("role queries are not allowed to execute additional requests"); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SetSecurityUserProcessor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SetSecurityUserProcessor.java new file mode 100644 index 0000000000000..051a077646320 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SetSecurityUserProcessor.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.accesscontrol; + +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.ingest.AbstractProcessor; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.Arrays; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException; +import static org.elasticsearch.ingest.ConfigurationUtils.readOptionalList; +import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty; + +/** + * A processor that adds information of the current authenticated user to the document being ingested. + */ +public final class SetSecurityUserProcessor extends AbstractProcessor { + + public static final String TYPE = "set_security_user"; + + private final ThreadContext threadContext; + private final String field; + private final Set properties; + + public SetSecurityUserProcessor(String tag, ThreadContext threadContext, String field, Set properties) { + super(tag); + this.threadContext = threadContext; + this.field = field; + this.properties = properties; + } + + @Override + public void execute(IngestDocument ingestDocument) throws Exception { + Authentication authentication = Authentication.getAuthentication(threadContext); + if (authentication == null) { + throw new IllegalStateException("No user authenticated, only use this processor via authenticated user"); + } + User user = authentication.getUser(); + if (user == null) { + throw new IllegalStateException("No user for authentication"); + } + + Map userObject = new HashMap<>(); + for (Property property : properties) { + switch (property) { + case USERNAME: + if (user.principal() != null) { + userObject.put("username", user.principal()); + } + break; + case FULL_NAME: + if (user.fullName() != null) { + userObject.put("full_name", user.fullName()); + } + break; + case EMAIL: + if (user.email() != null) { + userObject.put("email", user.email()); + } + break; + case ROLES: + if (user.roles() != null && user.roles().length != 0) { + userObject.put("roles", Arrays.asList(user.roles())); + } + break; + case METADATA: + if (user.metadata() != null && user.metadata().isEmpty() == false) { + userObject.put("metadata", user.metadata()); + } + break; + default: + throw new UnsupportedOperationException("unsupported property [" + property + "]"); + } + } + ingestDocument.setFieldValue(field, userObject); + } + + @Override + public String getType() { + return TYPE; + } + + String getField() { + return field; + } + + Set getProperties() { + return properties; + } + + public static final class Factory implements Processor.Factory { + + private final ThreadContext threadContext; + + public Factory(ThreadContext threadContext) { + this.threadContext = threadContext; + } + + @Override + public SetSecurityUserProcessor create(Map processorFactories, String tag, + Map config) throws Exception { + String field = readStringProperty(TYPE, tag, config, "field"); + List propertyNames = readOptionalList(TYPE, tag, config, "properties"); + Set properties; + if (propertyNames != null) { + properties = EnumSet.noneOf(Property.class); + for (String propertyName : propertyNames) { + properties.add(Property.parse(tag, propertyName)); + } + } else { + properties = EnumSet.allOf(Property.class); + } + return new SetSecurityUserProcessor(tag, threadContext, field, properties); + } + } + + public enum Property { + + USERNAME, + FULL_NAME, + EMAIL, + ROLES, + METADATA; + + static Property parse(String tag, String value) { + try { + return valueOf(value.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + // not using the original exception as its message is confusing + // (e.g. 'No enum constant SetSecurityUserProcessor.Property.INVALID') + throw newConfigurationException(TYPE, tag, "properties", "Property value [" + value + "] is in valid"); + } + } + + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java new file mode 100644 index 0000000000000..7c990bd735a41 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; + +import java.util.function.Predicate; + +/** + * A permission that is based on privileges for cluster wide actions + */ +public final class ClusterPermission { + + public static final ClusterPermission NONE = new ClusterPermission(ClusterPrivilege.NONE); + + private final ClusterPrivilege privilege; + private final Predicate predicate; + + ClusterPermission(ClusterPrivilege privilege) { + this.privilege = privilege; + this.predicate = privilege.predicate(); + } + + public ClusterPrivilege privilege() { + return privilege; + } + + public boolean check(String action) { + return predicate.test(action); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java new file mode 100644 index 0000000000000..144d2877463b8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.apache.lucene.util.automaton.MinimizationOperations; +import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.FieldSubsetReader; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition.FieldGrantExcludeGroup; +import org.elasticsearch.xpack.core.security.support.Automatons; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.apache.lucene.util.automaton.Operations.subsetOf; + +/** + * Stores patterns to fields which access is granted or denied to and maintains an automaton that can be used to check if permission is + * allowed for a specific field. + * Field permissions are configured via a list of strings that are patterns a field has to match. Two lists determine whether or + * not a field is granted access to: + * 1. It has to match the patterns in grantedFieldsArray + * 2. it must not match the patterns in deniedFieldsArray + */ +public final class FieldPermissions implements Accountable { + + public static final FieldPermissions DEFAULT = new FieldPermissions(); + + private static final long BASE_FIELD_PERM_DEF_BYTES = RamUsageEstimator.shallowSizeOf(new FieldPermissionsDefinition(null, null)); + private static final long BASE_FIELD_GROUP_BYTES = RamUsageEstimator.shallowSizeOf(new FieldGrantExcludeGroup(null, null)); + private static final long BASE_HASHSET_SIZE = RamUsageEstimator.shallowSizeOfInstance(HashSet.class); + private static final long BASE_HASHSET_ENTRY_SIZE; + static { + HashMap map = new HashMap<>(); + map.put(FieldPermissions.class.getName(), new Object()); + long mapEntryShallowSize = RamUsageEstimator.shallowSizeOf(map.entrySet().iterator().next()); + // assume a load factor of 50% + // for each entry, we need two object refs, one for the entry itself + // and one for the free space that is due to the fact hash tables can + // not be fully loaded + BASE_HASHSET_ENTRY_SIZE = mapEntryShallowSize + 2 * RamUsageEstimator.NUM_BYTES_OBJECT_REF; + } + + private final FieldPermissionsDefinition fieldPermissionsDefinition; + // an automaton that represents a union of one more sets of permitted and denied fields + private final CharacterRunAutomaton permittedFieldsAutomaton; + private final boolean permittedFieldsAutomatonIsTotal; + private final Automaton originalAutomaton; + + private final long ramBytesUsed; + + /** Constructor that does not enable field-level security: all fields are accepted. */ + public FieldPermissions() { + this(new FieldPermissionsDefinition(null, null), Automatons.MATCH_ALL); + } + + /** Constructor that enables field-level security based on include/exclude rules. Exclude rules + * have precedence over include rules. */ + public FieldPermissions(FieldPermissionsDefinition fieldPermissionsDefinition) { + this(fieldPermissionsDefinition, initializePermittedFieldsAutomaton(fieldPermissionsDefinition)); + } + + /** Constructor that enables field-level security based on include/exclude rules. Exclude rules + * have precedence over include rules. */ + FieldPermissions(FieldPermissionsDefinition fieldPermissionsDefinition, Automaton permittedFieldsAutomaton) { + if (permittedFieldsAutomaton.isDeterministic() == false && permittedFieldsAutomaton.getNumStates() > 1) { + // we only accept deterministic automata so that the CharacterRunAutomaton constructor + // directly wraps the provided automaton + throw new IllegalArgumentException("Only accepts deterministic automata"); + } + this.fieldPermissionsDefinition = fieldPermissionsDefinition; + this.originalAutomaton = permittedFieldsAutomaton; + this.permittedFieldsAutomaton = new CharacterRunAutomaton(permittedFieldsAutomaton); + // we cache the result of isTotal since this might be a costly operation + this.permittedFieldsAutomatonIsTotal = Operations.isTotal(permittedFieldsAutomaton); + + long ramBytesUsed = BASE_FIELD_PERM_DEF_BYTES; + + for (FieldGrantExcludeGroup group : fieldPermissionsDefinition.getFieldGrantExcludeGroups()) { + ramBytesUsed += BASE_FIELD_GROUP_BYTES + BASE_HASHSET_ENTRY_SIZE; + if (group.getGrantedFields() != null) { + ramBytesUsed += RamUsageEstimator.shallowSizeOf(group.getGrantedFields()); + } + if (group.getExcludedFields() != null) { + ramBytesUsed += RamUsageEstimator.shallowSizeOf(group.getExcludedFields()); + } + } + ramBytesUsed += permittedFieldsAutomaton.ramBytesUsed(); + ramBytesUsed += runAutomatonRamBytesUsed(permittedFieldsAutomaton); + this.ramBytesUsed = ramBytesUsed; + } + + /** + * Return an estimation of the ram bytes used by a {@link CharacterRunAutomaton} + * that wraps the given automaton. + */ + private static long runAutomatonRamBytesUsed(Automaton a) { + return a.getNumStates() * 5; // wild guess, better than 0 + } + + public static Automaton initializePermittedFieldsAutomaton(FieldPermissionsDefinition fieldPermissionsDefinition) { + Set groups = fieldPermissionsDefinition.getFieldGrantExcludeGroups(); + assert groups.size() > 0 : "there must always be a single group for field inclusion/exclusion"; + List automatonList = + groups.stream() + .map(g -> FieldPermissions.initializePermittedFieldsAutomaton(g.getGrantedFields(), g.getExcludedFields())) + .collect(Collectors.toList()); + return Automatons.unionAndMinimize(automatonList); + } + + private static Automaton initializePermittedFieldsAutomaton(final String[] grantedFields, final String[] deniedFields) { + Automaton grantedFieldsAutomaton; + if (grantedFields == null || Arrays.stream(grantedFields).anyMatch(Regex::isMatchAllPattern)) { + grantedFieldsAutomaton = Automatons.MATCH_ALL; + } else { + // an automaton that includes metadata fields, including join fields created by the _parent field such + // as _parent#type + Automaton metaFieldsAutomaton = Operations.concatenate(Automata.makeChar('_'), Automata.makeAnyString()); + grantedFieldsAutomaton = Operations.union(Automatons.patterns(grantedFields), metaFieldsAutomaton); + } + + Automaton deniedFieldsAutomaton; + if (deniedFields == null || deniedFields.length == 0) { + deniedFieldsAutomaton = Automatons.EMPTY; + } else { + deniedFieldsAutomaton = Automatons.patterns(deniedFields); + } + + grantedFieldsAutomaton = MinimizationOperations.minimize(grantedFieldsAutomaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES); + deniedFieldsAutomaton = MinimizationOperations.minimize(deniedFieldsAutomaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES); + + if (subsetOf(deniedFieldsAutomaton, grantedFieldsAutomaton) == false) { + throw new ElasticsearchSecurityException("Exceptions for field permissions must be a subset of the " + + "granted fields but " + Strings.arrayToCommaDelimitedString(deniedFields) + " is not a subset of " + + Strings.arrayToCommaDelimitedString(grantedFields)); + } + + grantedFieldsAutomaton = Automatons.minusAndMinimize(grantedFieldsAutomaton, deniedFieldsAutomaton); + return grantedFieldsAutomaton; + } + + /** + * Returns true if this field permission policy allows access to the field and false if not. + * fieldName can be a wildcard. + */ + public boolean grantsAccessTo(String fieldName) { + return permittedFieldsAutomatonIsTotal || permittedFieldsAutomaton.run(fieldName); + } + + FieldPermissionsDefinition getFieldPermissionsDefinition() { + return fieldPermissionsDefinition; + } + + /** Return whether field-level security is enabled, ie. whether any field might be filtered out. */ + public boolean hasFieldLevelSecurity() { + return permittedFieldsAutomatonIsTotal == false; + } + + /** Return a wrapped reader that only exposes allowed fields. */ + public DirectoryReader filter(DirectoryReader reader) throws IOException { + if (hasFieldLevelSecurity() == false) { + return reader; + } + return FieldSubsetReader.wrap(reader, permittedFieldsAutomaton); + } + + // for testing only + Automaton getIncludeAutomaton() { + return originalAutomaton; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + FieldPermissions that = (FieldPermissions) o; + + if (permittedFieldsAutomatonIsTotal != that.permittedFieldsAutomatonIsTotal) return false; + return fieldPermissionsDefinition != null ? + fieldPermissionsDefinition.equals(that.fieldPermissionsDefinition) : that.fieldPermissionsDefinition == null; + } + + @Override + public int hashCode() { + int result = fieldPermissionsDefinition != null ? fieldPermissionsDefinition.hashCode() : 0; + result = 31 * result + (permittedFieldsAutomatonIsTotal ? 1 : 0); + return result; + } + + @Override + public long ramBytesUsed() { + return ramBytesUsed; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsCache.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsCache.java new file mode 100644 index 0000000000000..4036d2c09587e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsCache.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.apache.lucene.util.automaton.Automaton; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition.FieldGrantExcludeGroup; +import org.elasticsearch.xpack.core.security.support.Automatons; + +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.security.SecurityField.setting; + +/** + * A service for managing the caching of {@link FieldPermissions} as these may often need to be combined or created and internally they + * use an {@link org.apache.lucene.util.automaton.Automaton}, which can be costly to create once you account for minimization + */ +public final class FieldPermissionsCache { + + public static final Setting CACHE_SIZE_SETTING = Setting.longSetting( + setting("authz.store.roles.field_permissions.cache.max_size_in_bytes"), 100 * 1024 * 1024, -1L, Property.NodeScope); + private final Cache cache; + + public FieldPermissionsCache(Settings settings) { + this.cache = CacheBuilder.builder() + .setMaximumWeight(CACHE_SIZE_SETTING.get(settings)) + .weigher((key, fieldPermissions) -> fieldPermissions.ramBytesUsed()) + .build(); + } + + /** + * Gets a {@link FieldPermissions} instance that corresponds to the granted and denied parameters. The instance may come from the cache + * or if it gets created, the instance will be cached + */ + FieldPermissions getFieldPermissions(String[] granted, String[] denied) { + return getFieldPermissions(new FieldPermissionsDefinition(granted, denied)); + } + + /** + * Gets a {@link FieldPermissions} instance that corresponds to the granted and denied parameters. The instance may come from the cache + * or if it gets created, the instance will be cached + */ + public FieldPermissions getFieldPermissions(FieldPermissionsDefinition fieldPermissionsDefinition) { + try { + return cache.computeIfAbsent(fieldPermissionsDefinition, + (key) -> new FieldPermissions(key, FieldPermissions.initializePermittedFieldsAutomaton(key))); + } catch (ExecutionException e) { + throw new ElasticsearchException("unable to compute field permissions", e); + } + } + + /** + * Returns a field permissions object that corresponds to the merging of the given field permissions and caches the instance if one was + * not found in the cache. + */ + FieldPermissions getFieldPermissions(Collection fieldPermissionsCollection) { + Optional allowAllFieldPermissions = fieldPermissionsCollection.stream() + .filter(((Predicate) (FieldPermissions::hasFieldLevelSecurity)).negate()) + .findFirst(); + return allowAllFieldPermissions.orElseGet(() -> { + final Set fieldGrantExcludeGroups = fieldPermissionsCollection.stream() + .flatMap(fieldPermission -> fieldPermission.getFieldPermissionsDefinition().getFieldGrantExcludeGroups().stream()) + .collect(Collectors.toSet()); + final FieldPermissionsDefinition combined = new FieldPermissionsDefinition(fieldGrantExcludeGroups); + try { + return cache.computeIfAbsent(combined, (key) -> { + List automatonList = fieldPermissionsCollection.stream() + .map(FieldPermissions::getIncludeAutomaton) + .collect(Collectors.toList()); + return new FieldPermissions(key, Automatons.unionAndMinimize(automatonList)); + }); + } catch (ExecutionException e) { + throw new ElasticsearchException("unable to compute field permissions", e); + } + }); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsDefinition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsDefinition.java new file mode 100644 index 0000000000000..ad340c0f239da --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsDefinition.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.permission; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Set; + +/** + * Represents the definition of a {@link FieldPermissions}. Field permissions are defined as a + * collections of grant and exclude definitions where the exclude definition must be a subset of + * the grant definition. + */ +public final class FieldPermissionsDefinition { + + private final Set fieldGrantExcludeGroups; + + public FieldPermissionsDefinition(String[] grant, String[] exclude) { + this(Collections.singleton(new FieldGrantExcludeGroup(grant, exclude))); + } + + public FieldPermissionsDefinition(Set fieldGrantExcludeGroups) { + this.fieldGrantExcludeGroups = Collections.unmodifiableSet(fieldGrantExcludeGroups); + } + + public Set getFieldGrantExcludeGroups() { + return fieldGrantExcludeGroups; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + FieldPermissionsDefinition that = (FieldPermissionsDefinition) o; + + return fieldGrantExcludeGroups != null ? + fieldGrantExcludeGroups.equals(that.fieldGrantExcludeGroups) : + that.fieldGrantExcludeGroups == null; + } + + @Override + public int hashCode() { + return fieldGrantExcludeGroups != null ? fieldGrantExcludeGroups.hashCode() : 0; + } + + public static final class FieldGrantExcludeGroup { + private final String[] grantedFields; + private final String[] excludedFields; + + public FieldGrantExcludeGroup(String[] grantedFields, String[] excludedFields) { + this.grantedFields = grantedFields; + this.excludedFields = excludedFields; + } + + public String[] getGrantedFields() { + return grantedFields; + } + + public String[] getExcludedFields() { + return excludedFields; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + FieldGrantExcludeGroup that = (FieldGrantExcludeGroup) o; + + if (!Arrays.equals(grantedFields, that.grantedFields)) return false; + return Arrays.equals(excludedFields, that.excludedFields); + } + + @Override + public int hashCode() { + int result = Arrays.hashCode(grantedFields); + result = 31 * result + Arrays.hashCode(excludedFields); + return result; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java new file mode 100644 index 0000000000000..ea7a37b205431 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -0,0 +1,272 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.core.security.support.Automatons; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.SortedMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; +import java.util.function.Predicate; + +import static java.util.Collections.unmodifiableMap; +import static java.util.Collections.unmodifiableSet; + +/** + * A permission that is based on privileges for index related actions executed + * on specific indices + */ +public final class IndicesPermission implements Iterable { + + public static final IndicesPermission NONE = new IndicesPermission(); + + private final Function> loadingFunction; + + private final ConcurrentHashMap> allowedIndicesMatchersForAction = new ConcurrentHashMap<>(); + + private final Group[] groups; + + public IndicesPermission(Group... groups) { + this.groups = groups; + loadingFunction = (action) -> { + List indices = new ArrayList<>(); + for (Group group : groups) { + if (group.actionMatcher.test(action)) { + indices.addAll(Arrays.asList(group.indices)); + } + } + return indexMatcher(indices); + }; + } + + static Predicate indexMatcher(List indices) { + try { + return Automatons.predicate(indices); + } catch (TooComplexToDeterminizeException e) { + Loggers.getLogger(IndicesPermission.class).debug("Index pattern automaton [{}] is too complex", indices); + String description = Strings.collectionToCommaDelimitedString(indices); + if (description.length() > 80) { + description = Strings.cleanTruncate(description, 80) + "..."; + } + throw new ElasticsearchSecurityException("The set of permitted index patterns [{}] is too complex to evaluate", e, description); + } + } + + @Override + public Iterator iterator() { + return Arrays.asList(groups).iterator(); + } + + public Group[] groups() { + return groups; + } + + /** + * @return A predicate that will match all the indices that this permission + * has the privilege for executing the given action on. + */ + public Predicate allowedIndicesMatcher(String action) { + return allowedIndicesMatchersForAction.computeIfAbsent(action, loadingFunction); + } + + /** + * Checks if the permission matches the provided action, without looking at indices. + * To be used in very specific cases where indices actions need to be authorized regardless of their indices. + * The usecase for this is composite actions that are initially only authorized based on the action name (indices are not + * checked on the coordinating node), and properly authorized later at the shard level checking their indices as well. + */ + public boolean check(String action) { + for (Group group : groups) { + if (group.check(action)) { + return true; + } + } + return false; + } + + public Automaton allowedActionsMatcher(String index) { + List automatonList = new ArrayList<>(); + for (Group group : groups) { + if (group.indexNameMatcher.test(index)) { + automatonList.add(group.privilege.getAutomaton()); + } + } + return automatonList.isEmpty() ? Automatons.EMPTY : Automatons.unionAndMinimize(automatonList); + } + + /** + * Authorizes the provided action against the provided indices, given the current cluster metadata + */ + public Map authorize(String action, Set requestedIndicesOrAliases, + MetaData metaData, FieldPermissionsCache fieldPermissionsCache) { + // now... every index that is associated with the request, must be granted + // by at least one indices permission group + + SortedMap allAliasesAndIndices = metaData.getAliasAndIndexLookup(); + Map> fieldPermissionsByIndex = new HashMap<>(); + Map roleQueriesByIndex = new HashMap<>(); + Map grantedBuilder = new HashMap<>(); + + for (String indexOrAlias : requestedIndicesOrAliases) { + boolean granted = false; + Set concreteIndices = new HashSet<>(); + AliasOrIndex aliasOrIndex = allAliasesAndIndices.get(indexOrAlias); + if (aliasOrIndex != null) { + for (IndexMetaData indexMetaData : aliasOrIndex.getIndices()) { + concreteIndices.add(indexMetaData.getIndex().getName()); + } + } + + for (Group group : groups) { + if (group.check(action, indexOrAlias)) { + granted = true; + for (String index : concreteIndices) { + Set fieldPermissions = fieldPermissionsByIndex.computeIfAbsent(index, (k) -> new HashSet<>()); + fieldPermissionsByIndex.put(indexOrAlias, fieldPermissions); + fieldPermissions.add(group.getFieldPermissions()); + DocumentLevelPermissions permissions = + roleQueriesByIndex.computeIfAbsent(index, (k) -> new DocumentLevelPermissions()); + roleQueriesByIndex.putIfAbsent(indexOrAlias, permissions); + if (group.hasQuery()) { + permissions.addAll(group.getQuery()); + } else { + // if more than one permission matches for a concrete index here and if + // a single permission doesn't have a role query then DLS will not be + // applied even when other permissions do have a role query + permissions.setAllowAll(true); + } + } + } + } + + if (concreteIndices.isEmpty()) { + grantedBuilder.put(indexOrAlias, granted); + } else { + grantedBuilder.put(indexOrAlias, granted); + for (String concreteIndex : concreteIndices) { + grantedBuilder.put(concreteIndex, granted); + } + } + } + + Map indexPermissions = new HashMap<>(); + for (Map.Entry entry : grantedBuilder.entrySet()) { + String index = entry.getKey(); + DocumentLevelPermissions permissions = roleQueriesByIndex.get(index); + final Set roleQueries; + if (permissions != null && permissions.isAllowAll() == false) { + roleQueries = unmodifiableSet(permissions.queries); + } else { + roleQueries = null; + } + + final FieldPermissions fieldPermissions; + final Set indexFieldPermissions = fieldPermissionsByIndex.get(index); + if (indexFieldPermissions != null && indexFieldPermissions.isEmpty() == false) { + fieldPermissions = indexFieldPermissions.size() == 1 ? indexFieldPermissions.iterator().next() : + fieldPermissionsCache.getFieldPermissions(indexFieldPermissions); + } else { + fieldPermissions = FieldPermissions.DEFAULT; + } + indexPermissions.put(index, new IndicesAccessControl.IndexAccessControl(entry.getValue(), fieldPermissions, roleQueries)); + } + return unmodifiableMap(indexPermissions); + } + + public static class Group { + private final IndexPrivilege privilege; + private final Predicate actionMatcher; + private final String[] indices; + private final Predicate indexNameMatcher; + + public FieldPermissions getFieldPermissions() { + return fieldPermissions; + } + + private final FieldPermissions fieldPermissions; + private final Set query; + + public Group(IndexPrivilege privilege, FieldPermissions fieldPermissions, @Nullable Set query, String... indices) { + assert indices.length != 0; + this.privilege = privilege; + this.actionMatcher = privilege.predicate(); + this.indices = indices; + this.indexNameMatcher = indexMatcher(Arrays.asList(indices)); + this.fieldPermissions = Objects.requireNonNull(fieldPermissions); + this.query = query; + } + + public IndexPrivilege privilege() { + return privilege; + } + + public String[] indices() { + return indices; + } + + @Nullable + public Set getQuery() { + return query; + } + + private boolean check(String action) { + return actionMatcher.test(action); + } + + private boolean check(String action, String index) { + assert index != null; + return check(action) && indexNameMatcher.test(index); + } + + boolean hasQuery() { + return query != null; + } + } + + private static class DocumentLevelPermissions { + + private Set queries = null; + private boolean allowAll = false; + + private void addAll(Set query) { + if (allowAll == false) { + if (queries == null) { + queries = new HashSet<>(); + } + queries.addAll(query); + } + } + + private boolean isAllowAll() { + return allowAll; + } + + private void setAllowAll(boolean allowAll) { + this.allowAll = allowAll; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java new file mode 100644 index 0000000000000..8fed501ece2c9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.Privilege; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +public final class Role { + + public static final Role EMPTY = Role.builder("__empty").build(); + + private final String[] names; + private final ClusterPermission cluster; + private final IndicesPermission indices; + private final RunAsPermission runAs; + + Role(String[] names, ClusterPermission cluster, IndicesPermission indices, RunAsPermission runAs) { + this.names = names; + this.cluster = Objects.requireNonNull(cluster); + this.indices = Objects.requireNonNull(indices); + this.runAs = Objects.requireNonNull(runAs); + } + + public String[] names() { + return names; + } + + public ClusterPermission cluster() { + return cluster; + } + + public IndicesPermission indices() { + return indices; + } + + public RunAsPermission runAs() { + return runAs; + } + + public static Builder builder(String... names) { + return new Builder(names, null); + } + + public static Builder builder(String[] names, FieldPermissionsCache fieldPermissionsCache) { + return new Builder(names, fieldPermissionsCache); + } + + public static Builder builder(RoleDescriptor rd, FieldPermissionsCache fieldPermissionsCache) { + return new Builder(rd, fieldPermissionsCache); + } + + /** + * Returns whether at least one group encapsulated by this indices permissions is authorized to execute the + * specified action with the requested indices/aliases. At the same time if field and/or document level security + * is configured for any group also the allowed fields and role queries are resolved. + */ + public IndicesAccessControl authorize(String action, Set requestedIndicesOrAliases, MetaData metaData, + FieldPermissionsCache fieldPermissionsCache) { + Map indexPermissions = indices.authorize( + action, requestedIndicesOrAliases, metaData, fieldPermissionsCache + ); + + // At least one role / indices permission set need to match with all the requested indices/aliases: + boolean granted = true; + for (Map.Entry entry : indexPermissions.entrySet()) { + if (!entry.getValue().isGranted()) { + granted = false; + break; + } + } + return new IndicesAccessControl(granted, indexPermissions); + } + + public static class Builder { + + private final String[] names; + private ClusterPermission cluster = ClusterPermission.NONE; + private RunAsPermission runAs = RunAsPermission.NONE; + private List groups = new ArrayList<>(); + private FieldPermissionsCache fieldPermissionsCache = null; + + private Builder(String[] names, FieldPermissionsCache fieldPermissionsCache) { + this.names = names; + this.fieldPermissionsCache = fieldPermissionsCache; + } + + private Builder(RoleDescriptor rd, @Nullable FieldPermissionsCache fieldPermissionsCache) { + this.names = new String[] { rd.getName() }; + this.fieldPermissionsCache = fieldPermissionsCache; + if (rd.getClusterPrivileges().length == 0) { + cluster = ClusterPermission.NONE; + } else { + this.cluster(ClusterPrivilege.get(Sets.newHashSet(rd.getClusterPrivileges()))); + } + groups.addAll(convertFromIndicesPrivileges(rd.getIndicesPrivileges(), fieldPermissionsCache)); + String[] rdRunAs = rd.getRunAs(); + if (rdRunAs != null && rdRunAs.length > 0) { + this.runAs(new Privilege(Sets.newHashSet(rdRunAs), rdRunAs)); + } + } + + public Builder cluster(ClusterPrivilege privilege) { + cluster = new ClusterPermission(privilege); + return this; + } + + public Builder runAs(Privilege privilege) { + runAs = new RunAsPermission(privilege); + return this; + } + + public Builder add(IndexPrivilege privilege, String... indices) { + groups.add(new IndicesPermission.Group(privilege, FieldPermissions.DEFAULT, null, indices)); + return this; + } + + public Builder add(FieldPermissions fieldPermissions, Set query, IndexPrivilege privilege, String... indices) { + groups.add(new IndicesPermission.Group(privilege, fieldPermissions, query, indices)); + return this; + } + + public Role build() { + IndicesPermission indices = groups.isEmpty() ? IndicesPermission.NONE : + new IndicesPermission(groups.toArray(new IndicesPermission.Group[groups.size()])); + return new Role(names, cluster, indices, runAs); + } + + static List convertFromIndicesPrivileges(RoleDescriptor.IndicesPrivileges[] indicesPrivileges, + @Nullable FieldPermissionsCache fieldPermissionsCache) { + List list = new ArrayList<>(indicesPrivileges.length); + for (RoleDescriptor.IndicesPrivileges privilege : indicesPrivileges) { + final FieldPermissions fieldPermissions; + if (fieldPermissionsCache != null) { + fieldPermissions = fieldPermissionsCache.getFieldPermissions(privilege.getGrantedFields(), privilege.getDeniedFields()); + } else { + fieldPermissions = new FieldPermissions( + new FieldPermissionsDefinition(privilege.getGrantedFields(), privilege.getDeniedFields())); + } + final Set query = privilege.getQuery() == null ? null : Collections.singleton(privilege.getQuery()); + list.add(new IndicesPermission.Group(IndexPrivilege.get(Sets.newHashSet(privilege.getPrivileges())), + fieldPermissions, + query, + privilege.getIndices())); + + } + return list; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RunAsPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RunAsPermission.java new file mode 100644 index 0000000000000..a8b2f7bfaef68 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RunAsPermission.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.elasticsearch.xpack.core.security.authz.privilege.Privilege; + +import java.util.function.Predicate; + +/** + * A permissions that is based on a general privilege that contains patterns of users that this + * user can execute a request as + */ +public final class RunAsPermission { + + public static final RunAsPermission NONE = new RunAsPermission(Privilege.NONE); + + private final Predicate predicate; + + RunAsPermission(Privilege privilege) { + this.predicate = privilege.predicate(); + } + + /** + * Checks if this permission grants run as to the specified user + */ + public boolean check(String username) { + return predicate.test(username); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java new file mode 100644 index 0000000000000..068c722c778a9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java @@ -0,0 +1,138 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.privilege; + +import org.apache.lucene.util.automaton.Automaton; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; +import org.elasticsearch.xpack.core.security.support.Automatons; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Predicate; + +import static org.elasticsearch.xpack.core.security.support.Automatons.minusAndMinimize; +import static org.elasticsearch.xpack.core.security.support.Automatons.patterns; + +public final class ClusterPrivilege extends Privilege { + + // shared automatons + private static final Automaton MANAGE_SECURITY_AUTOMATON = patterns("cluster:admin/xpack/security/*"); + private static final Automaton MANAGE_SAML_AUTOMATON = patterns("cluster:admin/xpack/security/saml/*", + InvalidateTokenAction.NAME, RefreshTokenAction.NAME); + private static final Automaton MONITOR_AUTOMATON = patterns("cluster:monitor/*"); + private static final Automaton MONITOR_ML_AUTOMATON = patterns("cluster:monitor/xpack/ml/*"); + private static final Automaton MONITOR_WATCHER_AUTOMATON = patterns("cluster:monitor/xpack/watcher/*"); + private static final Automaton MONITOR_ROLLUP_AUTOMATON = patterns("cluster:monitor/xpack/rollup/*"); + private static final Automaton ALL_CLUSTER_AUTOMATON = patterns("cluster:*", "indices:admin/template/*"); + private static final Automaton MANAGE_AUTOMATON = minusAndMinimize(ALL_CLUSTER_AUTOMATON, MANAGE_SECURITY_AUTOMATON); + private static final Automaton MANAGE_ML_AUTOMATON = patterns("cluster:admin/xpack/ml/*", "cluster:monitor/xpack/ml/*"); + private static final Automaton MANAGE_WATCHER_AUTOMATON = patterns("cluster:admin/xpack/watcher/*", "cluster:monitor/xpack/watcher/*"); + private static final Automaton TRANSPORT_CLIENT_AUTOMATON = patterns("cluster:monitor/nodes/liveness", "cluster:monitor/state"); + private static final Automaton MANAGE_IDX_TEMPLATE_AUTOMATON = patterns("indices:admin/template/*"); + private static final Automaton MANAGE_INGEST_PIPELINE_AUTOMATON = patterns("cluster:admin/ingest/pipeline/*"); + private static final Automaton MANAGE_ROLLUP_AUTOMATON = patterns("cluster:admin/xpack/rollup/*", "cluster:monitor/xpack/rollup/*"); + + public static final ClusterPrivilege NONE = new ClusterPrivilege("none", Automatons.EMPTY); + public static final ClusterPrivilege ALL = new ClusterPrivilege("all", ALL_CLUSTER_AUTOMATON); + public static final ClusterPrivilege MONITOR = new ClusterPrivilege("monitor", MONITOR_AUTOMATON); + public static final ClusterPrivilege MONITOR_ML = new ClusterPrivilege("monitor_ml", MONITOR_ML_AUTOMATON); + public static final ClusterPrivilege MONITOR_WATCHER = new ClusterPrivilege("monitor_watcher", MONITOR_WATCHER_AUTOMATON); + public static final ClusterPrivilege MONITOR_ROLLUP = new ClusterPrivilege("monitor_rollup", MONITOR_ROLLUP_AUTOMATON); + public static final ClusterPrivilege MANAGE = new ClusterPrivilege("manage", MANAGE_AUTOMATON); + public static final ClusterPrivilege MANAGE_ML = new ClusterPrivilege("manage_ml", MANAGE_ML_AUTOMATON); + public static final ClusterPrivilege MANAGE_WATCHER = new ClusterPrivilege("manage_watcher", MANAGE_WATCHER_AUTOMATON); + public static final ClusterPrivilege MANAGE_ROLLUP = new ClusterPrivilege("manage_rollup", MANAGE_ROLLUP_AUTOMATON); + public static final ClusterPrivilege MANAGE_IDX_TEMPLATES = + new ClusterPrivilege("manage_index_templates", MANAGE_IDX_TEMPLATE_AUTOMATON); + public static final ClusterPrivilege MANAGE_INGEST_PIPELINES = + new ClusterPrivilege("manage_ingest_pipelines", MANAGE_INGEST_PIPELINE_AUTOMATON); + public static final ClusterPrivilege TRANSPORT_CLIENT = new ClusterPrivilege("transport_client", TRANSPORT_CLIENT_AUTOMATON); + public static final ClusterPrivilege MANAGE_SECURITY = new ClusterPrivilege("manage_security", MANAGE_SECURITY_AUTOMATON); + public static final ClusterPrivilege MANAGE_SAML = new ClusterPrivilege("manage_saml", MANAGE_SAML_AUTOMATON); + public static final ClusterPrivilege MANAGE_PIPELINE = new ClusterPrivilege("manage_pipeline", "cluster:admin/ingest/pipeline/*"); + + public static final Predicate ACTION_MATCHER = ClusterPrivilege.ALL.predicate(); + + private static final Map VALUES = MapBuilder.newMapBuilder() + .put("none", NONE) + .put("all", ALL) + .put("monitor", MONITOR) + .put("monitor_ml", MONITOR_ML) + .put("monitor_watcher", MONITOR_WATCHER) + .put("monitor_rollup", MONITOR_ROLLUP) + .put("manage", MANAGE) + .put("manage_ml", MANAGE_ML) + .put("manage_watcher", MANAGE_WATCHER) + .put("manage_index_templates", MANAGE_IDX_TEMPLATES) + .put("manage_ingest_pipelines", MANAGE_INGEST_PIPELINES) + .put("transport_client", TRANSPORT_CLIENT) + .put("manage_security", MANAGE_SECURITY) + .put("manage_saml", MANAGE_SAML) + .put("manage_pipeline", MANAGE_PIPELINE) + .put("manage_rollup", MANAGE_ROLLUP) + .immutableMap(); + + private static final ConcurrentHashMap, ClusterPrivilege> CACHE = new ConcurrentHashMap<>(); + + private ClusterPrivilege(String name, String... patterns) { + super(name, patterns); + } + + private ClusterPrivilege(String name, Automaton automaton) { + super(Collections.singleton(name), automaton); + } + + private ClusterPrivilege(Set name, Automaton automaton) { + super(name, automaton); + } + + public static ClusterPrivilege get(Set name) { + if (name == null || name.isEmpty()) { + return NONE; + } + return CACHE.computeIfAbsent(name, ClusterPrivilege::resolve); + } + + private static ClusterPrivilege resolve(Set name) { + final int size = name.size(); + if (size == 0) { + throw new IllegalArgumentException("empty set should not be used"); + } + + Set actions = new HashSet<>(); + Set automata = new HashSet<>(); + for (String part : name) { + part = part.toLowerCase(Locale.ROOT); + if (ACTION_MATCHER.test(part)) { + actions.add(actionToPattern(part)); + } else { + ClusterPrivilege privilege = VALUES.get(part); + if (privilege != null && size == 1) { + return privilege; + } else if (privilege != null) { + automata.add(privilege.automaton); + } else { + throw new IllegalArgumentException("unknown cluster privilege [" + name + "]. a privilege must be either " + + "one of the predefined fixed cluster privileges [" + + Strings.collectionToCommaDelimitedString(VALUES.entrySet()) + "] or a pattern over one of the available " + + "cluster actions"); + } + } + } + + if (actions.isEmpty() == false) { + automata.add(patterns(actions)); + } + return new ClusterPrivilege(name, Automatons.unionAndMinimize(automata)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/HealthAndStatsPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/HealthAndStatsPrivilege.java new file mode 100644 index 0000000000000..9cfa1300e4361 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/HealthAndStatsPrivilege.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.privilege; + +public final class HealthAndStatsPrivilege extends Privilege { + + public static final HealthAndStatsPrivilege INSTANCE = new HealthAndStatsPrivilege(); + + public static final String NAME = "health_and_stats"; + + private HealthAndStatsPrivilege() { + super(NAME, "cluster:monitor/health*", + "cluster:monitor/stats*", + "indices:monitor/stats*", + "cluster:monitor/nodes/stats*"); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java new file mode 100644 index 0000000000000..723dff61679f8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.privilege; + +import org.apache.lucene.util.automaton.Automaton; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; +import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistAction; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsAction; +import org.elasticsearch.action.admin.indices.exists.types.TypesExistsAction; +import org.elasticsearch.action.admin.indices.get.GetIndexAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.xpack.core.security.support.Automatons; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Predicate; + +import static org.elasticsearch.xpack.core.security.support.Automatons.patterns; +import static org.elasticsearch.xpack.core.security.support.Automatons.unionAndMinimize; + +public final class IndexPrivilege extends Privilege { + + private static final Automaton ALL_AUTOMATON = patterns("indices:*", "internal:transport/proxy/indices:*"); + private static final Automaton READ_AUTOMATON = patterns("indices:data/read/*"); + private static final Automaton READ_CROSS_CLUSTER_AUTOMATON = patterns("internal:transport/proxy/indices:data/read/*", + ClusterSearchShardsAction.NAME); + private static final Automaton CREATE_AUTOMATON = patterns("indices:data/write/index*", "indices:data/write/bulk*", + PutMappingAction.NAME); + private static final Automaton INDEX_AUTOMATON = + patterns("indices:data/write/index*", "indices:data/write/bulk*", "indices:data/write/update*", PutMappingAction.NAME); + private static final Automaton DELETE_AUTOMATON = patterns("indices:data/write/delete*", "indices:data/write/bulk*"); + private static final Automaton WRITE_AUTOMATON = patterns("indices:data/write/*", PutMappingAction.NAME); + private static final Automaton MONITOR_AUTOMATON = patterns("indices:monitor/*"); + private static final Automaton MANAGE_AUTOMATON = + unionAndMinimize(Arrays.asList(MONITOR_AUTOMATON, patterns("indices:admin/*"))); + private static final Automaton CREATE_INDEX_AUTOMATON = patterns(CreateIndexAction.NAME); + private static final Automaton DELETE_INDEX_AUTOMATON = patterns(DeleteIndexAction.NAME); + private static final Automaton VIEW_METADATA_AUTOMATON = patterns(GetAliasesAction.NAME, AliasesExistAction.NAME, + GetIndexAction.NAME, IndicesExistsAction.NAME, GetFieldMappingsAction.NAME + "*", GetMappingsAction.NAME, + ClusterSearchShardsAction.NAME, TypesExistsAction.NAME, ValidateQueryAction.NAME + "*", GetSettingsAction.NAME); + + public static final IndexPrivilege NONE = new IndexPrivilege("none", Automatons.EMPTY); + public static final IndexPrivilege ALL = new IndexPrivilege("all", ALL_AUTOMATON); + public static final IndexPrivilege READ = new IndexPrivilege("read", READ_AUTOMATON); + public static final IndexPrivilege READ_CROSS_CLUSTER = new IndexPrivilege("read_cross_cluster", READ_CROSS_CLUSTER_AUTOMATON); + public static final IndexPrivilege CREATE = new IndexPrivilege("create", CREATE_AUTOMATON); + public static final IndexPrivilege INDEX = new IndexPrivilege("index", INDEX_AUTOMATON); + public static final IndexPrivilege DELETE = new IndexPrivilege("delete", DELETE_AUTOMATON); + public static final IndexPrivilege WRITE = new IndexPrivilege("write", WRITE_AUTOMATON); + public static final IndexPrivilege MONITOR = new IndexPrivilege("monitor", MONITOR_AUTOMATON); + public static final IndexPrivilege MANAGE = new IndexPrivilege("manage", MANAGE_AUTOMATON); + public static final IndexPrivilege DELETE_INDEX = new IndexPrivilege("delete_index", DELETE_INDEX_AUTOMATON); + public static final IndexPrivilege CREATE_INDEX = new IndexPrivilege("create_index", CREATE_INDEX_AUTOMATON); + public static final IndexPrivilege VIEW_METADATA = new IndexPrivilege("view_index_metadata", VIEW_METADATA_AUTOMATON); + + private static final Map VALUES = MapBuilder.newMapBuilder() + .put("none", NONE) + .put("all", ALL) + .put("manage", MANAGE) + .put("create_index", CREATE_INDEX) + .put("monitor", MONITOR) + .put("read", READ) + .put("index", INDEX) + .put("delete", DELETE) + .put("write", WRITE) + .put("create", CREATE) + .put("delete_index", DELETE_INDEX) + .put("view_index_metadata", VIEW_METADATA) + .put("read_cross_cluster", READ_CROSS_CLUSTER) + .immutableMap(); + + public static final Predicate ACTION_MATCHER = ALL.predicate(); + public static final Predicate CREATE_INDEX_MATCHER = CREATE_INDEX.predicate(); + + private static final ConcurrentHashMap, IndexPrivilege> CACHE = new ConcurrentHashMap<>(); + + private IndexPrivilege(String name, Automaton automaton) { + super(Collections.singleton(name), automaton); + } + + private IndexPrivilege(Set name, Automaton automaton) { + super(name, automaton); + } + + public static IndexPrivilege get(Set name) { + return CACHE.computeIfAbsent(name, (theName) -> { + if (theName.isEmpty()) { + return NONE; + } else { + return resolve(theName); + } + }); + } + + private static IndexPrivilege resolve(Set name) { + final int size = name.size(); + if (size == 0) { + throw new IllegalArgumentException("empty set should not be used"); + } + + Set actions = new HashSet<>(); + Set automata = new HashSet<>(); + for (String part : name) { + part = part.toLowerCase(Locale.ROOT); + if (ACTION_MATCHER.test(part)) { + actions.add(actionToPattern(part)); + } else { + IndexPrivilege indexPrivilege = VALUES.get(part); + if (indexPrivilege != null && size == 1) { + return indexPrivilege; + } else if (indexPrivilege != null) { + automata.add(indexPrivilege.automaton); + } else { + throw new IllegalArgumentException("unknown index privilege [" + part + "]. a privilege must be either " + + "one of the predefined fixed indices privileges [" + + Strings.collectionToCommaDelimitedString(VALUES.entrySet()) + "] or a pattern over one of the available index" + + " actions"); + } + } + } + + if (actions.isEmpty() == false) { + automata.add(patterns(actions)); + } + return new IndexPrivilege(name, unionAndMinimize(automata)); + } + + static Map values() { + return VALUES; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/Privilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/Privilege.java new file mode 100644 index 0000000000000..54db92dacae88 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/Privilege.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.privilege; + +import org.apache.lucene.util.automaton.Automaton; +import org.elasticsearch.xpack.core.security.support.Automatons; + +import java.util.Collections; +import java.util.Set; +import java.util.function.Predicate; + +import static org.elasticsearch.xpack.core.security.support.Automatons.patterns; + +public class Privilege { + + public static final Privilege NONE = new Privilege(Collections.singleton("none"), Automatons.EMPTY); + public static final Privilege ALL = new Privilege(Collections.singleton("all"), Automatons.MATCH_ALL); + + protected final Set name; + protected final Automaton automaton; + protected final Predicate predicate; + + public Privilege(String name, String... patterns) { + this(Collections.singleton(name), patterns); + } + + public Privilege(Set name, String... patterns) { + this(name, patterns(patterns)); + } + + public Privilege(Set name, Automaton automaton) { + this.name = name; + this.automaton = automaton; + this.predicate = Automatons.predicate(automaton); + } + + public Set name() { + return name; + } + + public Predicate predicate() { + return predicate; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Privilege privilege = (Privilege) o; + + if (name != null ? !name.equals(privilege.name) : privilege.name != null) return false; + + return true; + } + + @Override + public int hashCode() { + return name != null ? name.hashCode() : 0; + } + + static String actionToPattern(String text) { + return text + "*"; + } + + @Override + public String toString() { + return name.toString(); + } + + public Automaton getAutomaton() { + return automaton; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java new file mode 100644 index 0000000000000..f1527429b323e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.privilege; + +import org.elasticsearch.xpack.core.security.support.Automatons; + +import java.util.Collections; +import java.util.function.Predicate; + +public final class SystemPrivilege extends Privilege { + + public static SystemPrivilege INSTANCE = new SystemPrivilege(); + + private static final Predicate PREDICATE = Automatons.predicate(Automatons. + minusAndMinimize(Automatons.patterns( + "internal:*", + "indices:monitor/*", // added for monitoring + "cluster:monitor/*", // added for monitoring + "cluster:admin/reroute", // added for DiskThresholdDecider.DiskListener + "indices:admin/mapping/put", // needed for recovery and shrink api + "indices:admin/template/put", // needed for the TemplateUpgradeService + "indices:admin/template/delete", // needed for the TemplateUpgradeService + "indices:admin/seq_no/global_checkpoint_sync*" // needed for global checkpoint syncs + ), Automatons.patterns("internal:transport/proxy/*"))); // no proxy actions for system user! + + private SystemPrivilege() { + super(Collections.singleton("internal")); + } + + @Override + public Predicate predicate() { + return PREDICATE; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java new file mode 100644 index 0000000000000..059c4dfbb6547 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.store; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.UsernamesField; +import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.watch.Watch; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +public class ReservedRolesStore { + + public static final RoleDescriptor SUPERUSER_ROLE_DESCRIPTOR = new RoleDescriptor("superuser", + new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build()}, + new String[] { "*" }, + MetadataUtils.DEFAULT_RESERVED_METADATA); + public static final Role SUPERUSER_ROLE = Role.builder(SUPERUSER_ROLE_DESCRIPTOR, null).build(); + private static final Map RESERVED_ROLES = initializeReservedRoles(); + + private static Map initializeReservedRoles() { + return MapBuilder.newMapBuilder() + .put("superuser", new RoleDescriptor("superuser", new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build()}, + new String[] { "*" }, + MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("transport_client", new RoleDescriptor("transport_client", new String[] { "transport_client" }, null, null, + MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("kibana_user", new RoleDescriptor("kibana_user", null, new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*").privileges("manage", "read", "index", "delete") + .build() }, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("monitoring_user", new RoleDescriptor("monitoring_user", + new String[] { "cluster:monitor/main" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices(".monitoring-*").privileges("read", "read_cross_cluster").build() + }, + null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("remote_monitoring_agent", new RoleDescriptor("remote_monitoring_agent", + new String[] { + "manage_index_templates", "manage_ingest_pipelines", "monitor", + "cluster:monitor/xpack/watcher/watch/get", + "cluster:admin/xpack/watcher/watch/put", + "cluster:admin/xpack/watcher/watch/delete", + }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices(".monitoring-*").privileges("all").build() }, + null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("ingest_admin", new RoleDescriptor("ingest_admin", new String[] { "manage_index_templates", "manage_pipeline" }, + null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + // reporting_user doesn't have any privileges in Elasticsearch, and Kibana authorizes privileges based on this role + .put("reporting_user", new RoleDescriptor("reporting_user", null, null, + null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("kibana_dashboard_only_user", new RoleDescriptor( + "kibana_dashboard_only_user", + null, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices(".kibana*").privileges("read", "view_index_metadata").build() + }, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put(KibanaUser.ROLE_NAME, new RoleDescriptor(KibanaUser.ROLE_NAME, + new String[] { "monitor", "manage_index_templates", MonitoringBulkAction.NAME, "manage_saml" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*", ".reporting-*").privileges("all").build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices(".monitoring-*").privileges("read", "read_cross_cluster").build() + }, + null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("logstash_system", new RoleDescriptor("logstash_system", new String[] { "monitor", MonitoringBulkAction.NAME}, + null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put(UsernamesField.BEATS_ROLE, new RoleDescriptor(UsernamesField.BEATS_ROLE, + new String[] { "monitor", MonitoringBulkAction.NAME}, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("machine_learning_user", new RoleDescriptor("machine_learning_user", new String[] { "monitor_ml" }, + new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".ml-anomalies*", + ".ml-notifications").privileges("view_index_metadata", "read").build() }, + null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("machine_learning_admin", new RoleDescriptor("machine_learning_admin", new String[] { "manage_ml" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices(".ml-*").privileges("view_index_metadata", "read") + .build() }, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("watcher_admin", new RoleDescriptor("watcher_admin", new String[] { "manage_watcher" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices(Watch.INDEX, TriggeredWatchStoreField.INDEX_NAME, + HistoryStoreField.INDEX_PREFIX + "*").privileges("read").build() }, + null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("watcher_user", new RoleDescriptor("watcher_user", new String[] { "monitor_watcher" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices(Watch.INDEX) + .privileges("read") + .build(), + RoleDescriptor.IndicesPrivileges.builder().indices(HistoryStoreField.INDEX_PREFIX + "*") + .privileges("read") + .build() }, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("logstash_admin", new RoleDescriptor("logstash_admin", null, new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices(".logstash*") + .privileges("create", "delete", "index", "manage", "read").build() }, + null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("rollup_user", new RoleDescriptor("rollup_user", new String[] { "monitor_rollup" }, + null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("rollup_admin", new RoleDescriptor("rollup_admin", new String[] { "manage_rollup" }, + null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .immutableMap(); + } + + public static boolean isReserved(String role) { + return RESERVED_ROLES.containsKey(role) || UsernamesField.SYSTEM_ROLE.equals(role) || UsernamesField.XPACK_ROLE.equals(role); + } + + public Map usageStats() { + return Collections.emptyMap(); + } + + public RoleDescriptor roleDescriptor(String role) { + return RESERVED_ROLES.get(role); + } + + public Collection roleDescriptors() { + return RESERVED_ROLES.values(); + } + + public static Set names() { + return RESERVED_ROLES.keySet(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java new file mode 100644 index 0000000000000..af1cfe0579e03 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/client/SecurityClient.java @@ -0,0 +1,319 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.client; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequestBuilder; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheAction; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequest; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequestBuilder; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheResponse; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequestBuilder; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; +import org.elasticsearch.xpack.core.security.action.role.GetRolesAction; +import org.elasticsearch.xpack.core.security.action.role.GetRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.GetRolesRequestBuilder; +import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; +import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequestBuilder; +import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequestBuilder; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequestBuilder; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; +import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateAction; +import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateRequest; +import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateRequestBuilder; +import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateResponse; +import org.elasticsearch.xpack.core.security.action.saml.SamlPrepareAuthenticationRequestBuilder; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequest; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequestBuilder; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenRequest; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenRequestBuilder; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenResponse; +import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequest; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserAction; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequest; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserResponse; +import org.elasticsearch.xpack.core.security.action.user.GetUsersAction; +import org.elasticsearch.xpack.core.security.action.user.GetUsersRequest; +import org.elasticsearch.xpack.core.security.action.user.GetUsersRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; +import org.elasticsearch.xpack.core.security.action.user.PutUserAction; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledAction; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledRequest; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledResponse; + +import java.io.IOException; +import java.util.List; + +/** + * A wrapper to elasticsearch clients that exposes all Security related APIs + */ +public class SecurityClient { + + private final ElasticsearchClient client; + + public SecurityClient(ElasticsearchClient client) { + this.client = client; + } + + /**************** + * authc things * + ****************/ + + /** + * Clears the realm caches. It's possible to clear all user entries from all realms in the cluster or alternatively + * select the realms (by their unique names) and/or users (by their usernames) that should be evicted. + */ + @SuppressWarnings("unchecked") + public ClearRealmCacheRequestBuilder prepareClearRealmCache() { + return new ClearRealmCacheRequestBuilder(client); + } + + /** + * Clears the realm caches. It's possible to clear all user entries from all realms in the cluster or alternatively + * select the realms (by their unique names) and/or users (by their usernames) that should be evicted. + */ + @SuppressWarnings("unchecked") + public void clearRealmCache(ClearRealmCacheRequest request, ActionListener listener) { + client.execute(ClearRealmCacheAction.INSTANCE, request, listener); + } + + /** + * Clears the realm caches. It's possible to clear all user entries from all realms in the cluster or alternatively + * select the realms (by their unique names) and/or users (by their usernames) that should be evicted. + */ + @SuppressWarnings("unchecked") + public ActionFuture clearRealmCache(ClearRealmCacheRequest request) { + return client.execute(ClearRealmCacheAction.INSTANCE, request); + } + + /**************** + * authz things * + ****************/ + + /** + * Clears the roles cache. This API only works for the naitve roles that are stored in an elasticsearch index. It is + * possible to clear the cache of all roles or to specify the names of individual roles that should have their cache + * cleared. + */ + public ClearRolesCacheRequestBuilder prepareClearRolesCache() { + return new ClearRolesCacheRequestBuilder(client); + } + + /** + * Clears the roles cache. This API only works for the naitve roles that are stored in an elasticsearch index. It is + * possible to clear the cache of all roles or to specify the names of individual roles that should have their cache + * cleared. + */ + public void clearRolesCache(ClearRolesCacheRequest request, ActionListener listener) { + client.execute(ClearRolesCacheAction.INSTANCE, request, listener); + } + + /** + * Clears the roles cache. This API only works for the naitve roles that are stored in an elasticsearch index. It is + * possible to clear the cache of all roles or to specify the names of individual roles that should have their cache + * cleared. + */ + public ActionFuture clearRolesCache(ClearRolesCacheRequest request) { + return client.execute(ClearRolesCacheAction.INSTANCE, request); + } + + /** + * Permissions / Privileges + */ + public HasPrivilegesRequestBuilder prepareHasPrivileges(String username) { + return new HasPrivilegesRequestBuilder(client).username(username); + } + + public HasPrivilegesRequestBuilder prepareHasPrivileges(String username, BytesReference source, XContentType xContentType) + throws IOException { + return new HasPrivilegesRequestBuilder(client).source(username, source, xContentType); + } + + public void hasPrivileges(HasPrivilegesRequest request, ActionListener listener) { + client.execute(HasPrivilegesAction.INSTANCE, request, listener); + } + + /** User Management */ + + public GetUsersRequestBuilder prepareGetUsers(String... usernames) { + return new GetUsersRequestBuilder(client).usernames(usernames); + } + + public void getUsers(GetUsersRequest request, ActionListener listener) { + client.execute(GetUsersAction.INSTANCE, request, listener); + } + + public DeleteUserRequestBuilder prepareDeleteUser(String username) { + return new DeleteUserRequestBuilder(client).username(username); + } + + public void deleteUser(DeleteUserRequest request, ActionListener listener) { + client.execute(DeleteUserAction.INSTANCE, request, listener); + } + + public PutUserRequestBuilder preparePutUser(String username, BytesReference source, XContentType xContentType) throws IOException { + return new PutUserRequestBuilder(client).source(username, source, xContentType); + } + + public PutUserRequestBuilder preparePutUser(String username, char[] password, String... roles) { + return new PutUserRequestBuilder(client).username(username).password(password).roles(roles); + } + + public void putUser(PutUserRequest request, ActionListener listener) { + client.execute(PutUserAction.INSTANCE, request, listener); + } + + /** + * Populates the {@link ChangePasswordRequest} with the username and password. Note: the passed in char[] will be cleared by this + * method. + */ + public ChangePasswordRequestBuilder prepareChangePassword(String username, char[] password) { + return new ChangePasswordRequestBuilder(client).username(username).password(password); + } + + public ChangePasswordRequestBuilder prepareChangePassword(String username, BytesReference source, XContentType xContentType) + throws IOException { + return new ChangePasswordRequestBuilder(client).username(username).source(source, xContentType); + } + + public void changePassword(ChangePasswordRequest request, ActionListener listener) { + client.execute(ChangePasswordAction.INSTANCE, request, listener); + } + + public SetEnabledRequestBuilder prepareSetEnabled(String username, boolean enabled) { + return new SetEnabledRequestBuilder(client).username(username).enabled(enabled); + } + + public void setEnabled(SetEnabledRequest request, ActionListener listener) { + client.execute(SetEnabledAction.INSTANCE, request, listener); + } + + /** Role Management */ + + public GetRolesRequestBuilder prepareGetRoles(String... names) { + return new GetRolesRequestBuilder(client).names(names); + } + + public void getRoles(GetRolesRequest request, ActionListener listener) { + client.execute(GetRolesAction.INSTANCE, request, listener); + } + + public DeleteRoleRequestBuilder prepareDeleteRole(String name) { + return new DeleteRoleRequestBuilder(client).name(name); + } + + public void deleteRole(DeleteRoleRequest request, ActionListener listener) { + client.execute(DeleteRoleAction.INSTANCE, request, listener); + } + + public PutRoleRequestBuilder preparePutRole(String name) { + return new PutRoleRequestBuilder(client).name(name); + } + + public PutRoleRequestBuilder preparePutRole(String name, BytesReference source, XContentType xContentType) throws IOException { + return new PutRoleRequestBuilder(client).source(name, source, xContentType); + } + + public void putRole(PutRoleRequest request, ActionListener listener) { + client.execute(PutRoleAction.INSTANCE, request, listener); + } + + /** Role Mappings */ + + public GetRoleMappingsRequestBuilder prepareGetRoleMappings(String... names) { + return new GetRoleMappingsRequestBuilder(client, GetRoleMappingsAction.INSTANCE) + .names(names); + } + + public void getRoleMappings(GetRoleMappingsRequest request, + ActionListener listener) { + client.execute(GetRoleMappingsAction.INSTANCE, request, listener); + } + + public PutRoleMappingRequestBuilder preparePutRoleMapping( + String name, BytesReference content, XContentType xContentType) throws IOException { + return new PutRoleMappingRequestBuilder(client, PutRoleMappingAction.INSTANCE).source(name, content, xContentType); + } + + public DeleteRoleMappingRequestBuilder prepareDeleteRoleMapping(String name) { + return new DeleteRoleMappingRequestBuilder(client, DeleteRoleMappingAction.INSTANCE) + .name(name); + } + + public CreateTokenRequestBuilder prepareCreateToken() { + return new CreateTokenRequestBuilder(client, CreateTokenAction.INSTANCE); + } + + public void createToken(CreateTokenRequest request, ActionListener listener) { + client.execute(CreateTokenAction.INSTANCE, request, listener); + } + + public InvalidateTokenRequestBuilder prepareInvalidateToken(String token) { + return new InvalidateTokenRequestBuilder(client).setTokenString(token); + } + + public void invalidateToken(InvalidateTokenRequest request, ActionListener listener) { + client.execute(InvalidateTokenAction.INSTANCE, request, listener); + } + + public SamlAuthenticateRequestBuilder prepareSamlAuthenticate(byte[] xmlContent, List validIds) { + final SamlAuthenticateRequestBuilder builder = new SamlAuthenticateRequestBuilder(client); + builder.saml(xmlContent); + builder.validRequestIds(validIds); + return builder; + } + + public void samlAuthenticate(SamlAuthenticateRequest request, ActionListener< SamlAuthenticateResponse> listener) { + client.execute(SamlAuthenticateAction.INSTANCE, request, listener); + } + + public SamlPrepareAuthenticationRequestBuilder prepareSamlPrepareAuthentication() { + return new SamlPrepareAuthenticationRequestBuilder(client); + } + + public CreateTokenRequestBuilder prepareRefreshToken(String refreshToken) { + return new CreateTokenRequestBuilder(client, RefreshTokenAction.INSTANCE) + .setRefreshToken(refreshToken) + .setGrantType("refresh_token"); + } + + public void refreshToken(CreateTokenRequest request, ActionListener listener) { + client.execute(RefreshTokenAction.INSTANCE, request, listener); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/IndexAuditTrailField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/IndexAuditTrailField.java new file mode 100644 index 0000000000000..340dad82844a0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/index/IndexAuditTrailField.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.index; + +public final class IndexAuditTrailField { + public static final String INDEX_NAME_PREFIX = ".security_audit_log"; + + private IndexAuditTrailField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/rest/RestRequestFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/rest/RestRequestFilter.java new file mode 100644 index 0000000000000..aec5b3a04d255 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/rest/RestRequestFilter.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.rest; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.rest.RestRequest; + +import java.io.IOException; +import java.net.SocketAddress; +import java.util.Map; +import java.util.Set; + +/** + * Identifies an object that supplies a filter for the content of a {@link RestRequest}. This interface should be implemented by a + * {@link org.elasticsearch.rest.RestHandler} that expects there will be sensitive content in the body of the request such as a password + */ +public interface RestRequestFilter { + + /** + * Wraps the RestRequest and returns a version that provides the filtered content + */ + default RestRequest getFilteredRequest(RestRequest restRequest) throws IOException { + Set fields = getFilteredFields(); + if (restRequest.hasContent() && fields.isEmpty() == false) { + return new RestRequest(restRequest.getXContentRegistry(), restRequest.params(), restRequest.path(), restRequest.getHeaders()) { + + private BytesReference filteredBytes = null; + + @Override + public Method method() { + return restRequest.method(); + } + + @Override + public String uri() { + return restRequest.uri(); + } + + @Override + public boolean hasContent() { + return true; + } + + @Nullable + @Override + public SocketAddress getRemoteAddress() { + return restRequest.getRemoteAddress(); + } + + @Nullable + @Override + public SocketAddress getLocalAddress() { + return restRequest.getLocalAddress(); + } + + @Override + public BytesReference content() { + if (filteredBytes == null) { + BytesReference content = restRequest.content(); + Tuple> result = XContentHelper.convertToMap(content, true); + Map transformedSource = XContentMapValues.filter(result.v2(), null, + fields.toArray(Strings.EMPTY_ARRAY)); + try { + XContentBuilder xContentBuilder = XContentBuilder.builder(result.v1().xContent()).map(transformedSource); + filteredBytes = BytesReference.bytes(xContentBuilder); + } catch (IOException e) { + throw new ElasticsearchException("failed to parse request", e); + } + } + return filteredBytes; + } + }; + } else { + return restRequest; + } + } + + /** + * The list of fields that should be filtered. This can be a dot separated pattern to match sub objects and also supports wildcards + */ + Set getFilteredFields(); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java new file mode 100644 index 0000000000000..36e0b8ddb009b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.support; + +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.apache.lucene.util.automaton.RegExp; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.function.Predicate; + +import static org.apache.lucene.util.automaton.MinimizationOperations.minimize; +import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; +import static org.apache.lucene.util.automaton.Operations.concatenate; +import static org.apache.lucene.util.automaton.Operations.minus; +import static org.apache.lucene.util.automaton.Operations.union; +import static org.elasticsearch.common.Strings.collectionToDelimitedString; + +public final class Automatons { + + public static final Automaton EMPTY = Automata.makeEmpty(); + public static final Automaton MATCH_ALL = Automata.makeAnyString(); + + static final char WILDCARD_STRING = '*'; // String equality with support for wildcards + static final char WILDCARD_CHAR = '?'; // Char equality with support for wildcards + static final char WILDCARD_ESCAPE = '\\'; // Escape character + + private Automatons() { + } + + /** + * Builds and returns an automaton that will represent the union of all the given patterns. + */ + public static Automaton patterns(String... patterns) { + return patterns(Arrays.asList(patterns)); + } + + /** + * Builds and returns an automaton that will represent the union of all the given patterns. + */ + public static Automaton patterns(Collection patterns) { + if (patterns.isEmpty()) { + return EMPTY; + } + Automaton automaton = null; + for (String pattern : patterns) { + final Automaton patternAutomaton = minimize(pattern(pattern), DEFAULT_MAX_DETERMINIZED_STATES); + automaton = automaton == null ? patternAutomaton : unionAndMinimize(Arrays.asList(automaton, patternAutomaton)); + } + // the automaton is always minimized and deterministic + return automaton; + } + + /** + * Builds and returns an automaton that represents the given pattern. + */ + static Automaton pattern(String pattern) { + if (pattern.startsWith("/")) { // it's a lucene regexp + if (pattern.length() == 1 || !pattern.endsWith("/")) { + throw new IllegalArgumentException("invalid pattern [" + pattern + "]. patterns starting with '/' " + + "indicate regular expression pattern and therefore must also end with '/'." + + " other patterns (those that do not start with '/') will be treated as simple wildcard patterns"); + } + String regex = pattern.substring(1, pattern.length() - 1); + return new RegExp(regex).toAutomaton(); + } else if (pattern.equals("*")) { + return MATCH_ALL; + } else { + return wildcard(pattern); + } + } + + /** + * Builds and returns an automaton that represents the given pattern. + */ + @SuppressWarnings("fallthrough") // explicit fallthrough at end of switch + static Automaton wildcard(String text) { + List automata = new ArrayList<>(); + for (int i = 0; i < text.length();) { + final char c = text.charAt(i); + int length = 1; + switch(c) { + case WILDCARD_STRING: + automata.add(Automata.makeAnyString()); + break; + case WILDCARD_CHAR: + automata.add(Automata.makeAnyChar()); + break; + case WILDCARD_ESCAPE: + // add the next codepoint instead, if it exists + if (i + length < text.length()) { + final char nextChar = text.charAt(i + length); + length += 1; + automata.add(Automata.makeChar(nextChar)); + break; + } // else fallthru, lenient parsing with a trailing \ + default: + automata.add(Automata.makeChar(c)); + } + i += length; + } + return concatenate(automata); + } + + public static Automaton unionAndMinimize(Collection automata) { + Automaton res = union(automata); + return minimize(res, DEFAULT_MAX_DETERMINIZED_STATES); + } + + public static Automaton minusAndMinimize(Automaton a1, Automaton a2) { + Automaton res = minus(a1, a2, DEFAULT_MAX_DETERMINIZED_STATES); + return minimize(res, DEFAULT_MAX_DETERMINIZED_STATES); + } + + public static Predicate predicate(String... patterns) { + return predicate(Arrays.asList(patterns)); + } + + public static Predicate predicate(Collection patterns) { + return predicate(patterns(patterns), collectionToDelimitedString(patterns, "|")); + } + + public static Predicate predicate(Automaton automaton) { + return predicate(automaton, "Predicate for " + automaton); + } + + private static Predicate predicate(Automaton automaton, final String toString) { + CharacterRunAutomaton runAutomaton = new CharacterRunAutomaton(automaton, DEFAULT_MAX_DETERMINIZED_STATES); + return new Predicate() { + @Override + public boolean test(String s) { + return runAutomaton.run(s); + } + + @Override + public String toString() { + return toString; + } + }; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Exceptions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Exceptions.java new file mode 100644 index 0000000000000..9cf09482a5268 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Exceptions.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.support; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.XPackField; + +public class Exceptions { + + private Exceptions() { + } + + public static ElasticsearchSecurityException authenticationError(String msg, Throwable cause, Object... args) { + ElasticsearchSecurityException e = new ElasticsearchSecurityException(msg, RestStatus.UNAUTHORIZED, cause, args); + e.addHeader("WWW-Authenticate", "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\""); + return e; + } + + public static ElasticsearchSecurityException authenticationError(String msg, Object... args) { + ElasticsearchSecurityException e = new ElasticsearchSecurityException(msg, RestStatus.UNAUTHORIZED, args); + e.addHeader("WWW-Authenticate", "Basic realm=\"" + XPackField.SECURITY + "\" charset=\"UTF-8\""); + return e; + } + + public static ElasticsearchSecurityException authorizationError(String msg, Object... args) { + return new ElasticsearchSecurityException(msg, RestStatus.FORBIDDEN, args); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MetadataUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MetadataUtils.java new file mode 100644 index 0000000000000..ff457cb8d06ec --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MetadataUtils.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.support; + +import java.lang.reflect.Array; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; + +public class MetadataUtils { + + public static final String RESERVED_PREFIX = "_"; + public static final String RESERVED_METADATA_KEY = RESERVED_PREFIX + "reserved"; + public static final Map DEFAULT_RESERVED_METADATA = Collections.singletonMap(RESERVED_METADATA_KEY, true); + + private MetadataUtils() { + } + + public static void writeValue(StringBuilder sb, Object object) { + if (object == null) { + sb.append(object); + } else if (object instanceof Map) { + sb.append("{"); + for (Map.Entry entry : ((Map) object).entrySet()) { + sb.append(entry.getKey()).append("="); + writeValue(sb, entry.getValue()); + } + sb.append("}"); + + } else if (object instanceof Collection) { + sb.append("["); + boolean first = true; + for (Object item : (Collection) object) { + if (!first) { + sb.append(","); + } + writeValue(sb, item); + first = false; + } + sb.append("]"); + } else if (object.getClass().isArray()) { + sb.append("["); + for (int i = 0; i < Array.getLength(object); i++) { + if (i != 0) { + sb.append(","); + } + writeValue(sb, Array.get(object, i)); + } + sb.append("]"); + } else { + sb.append(object); + } + } + + public static void verifyNoReservedMetadata(Map metadata) { + for (String key : metadata.keySet()) { + if (key.startsWith(RESERVED_PREFIX)) { + throw new IllegalArgumentException("invalid user metadata. [" + key + "] is a reserved for internal use"); + } + } + } + + public static boolean containsReservedMetadata(Map metadata) { + for (String key : metadata.keySet()) { + if (key.startsWith(RESERVED_PREFIX)) { + return true; + } + } + return false; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/NoOpLogger.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/NoOpLogger.java new file mode 100644 index 0000000000000..9338b2242d1e5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/NoOpLogger.java @@ -0,0 +1,2231 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.support; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.Marker; +import org.apache.logging.log4j.message.EntryMessage; +import org.apache.logging.log4j.message.Message; +import org.apache.logging.log4j.message.MessageFactory; +import org.apache.logging.log4j.util.MessageSupplier; +import org.apache.logging.log4j.util.Supplier; + +/** + * A logger that doesn't log anything. + */ +public class NoOpLogger implements Logger { + + public static NoOpLogger INSTANCE = new NoOpLogger(); + + private NoOpLogger() { + + } + + @Override + public void catching(Level level, Throwable t) { + + } + + @Override + public void catching(Throwable t) { + + } + + @Override + public void debug(Marker marker, Message msg) { + + } + + @Override + public void debug(Marker marker, Message msg, Throwable t) { + + } + + @Override + public void debug(Marker marker, MessageSupplier msgSupplier) { + + } + + @Override + public void debug(Marker marker, MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void debug(Marker marker, CharSequence message) { + + } + + @Override + public void debug(Marker marker, CharSequence message, Throwable t) { + + } + + @Override + public void debug(Marker marker, Object message) { + + } + + @Override + public void debug(Marker marker, Object message, Throwable t) { + + } + + @Override + public void debug(Marker marker, String message) { + + } + + @Override + public void debug(Marker marker, String message, Object... params) { + + } + + @Override + public void debug(Marker marker, String message, Supplier... paramSuppliers) { + + } + + @Override + public void debug(Marker marker, String message, Throwable t) { + + } + + @Override + public void debug(Marker marker, Supplier msgSupplier) { + + } + + @Override + public void debug(Marker marker, Supplier msgSupplier, Throwable t) { + + } + + @Override + public void debug(Message msg) { + + } + + @Override + public void debug(Message msg, Throwable t) { + + } + + @Override + public void debug(MessageSupplier msgSupplier) { + + } + + @Override + public void debug(MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void debug(CharSequence message) { + + } + + @Override + public void debug(CharSequence message, Throwable t) { + + } + + @Override + public void debug(Object message) { + + } + + @Override + public void debug(Object message, Throwable t) { + + } + + @Override + public void debug(String message) { + + } + + @Override + public void debug(String message, Object... params) { + + } + + @Override + public void debug(String message, Supplier... paramSuppliers) { + + } + + @Override + public void debug(String message, Throwable t) { + + } + + @Override + public void debug(Supplier msgSupplier) { + + } + + @Override + public void debug(Supplier msgSupplier, Throwable t) { + + } + + @Override + public void debug(Marker marker, String message, Object p0) { + + } + + @Override + public void debug(Marker marker, String message, Object p0, Object p1) { + + } + + @Override + public void debug(Marker marker, String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void debug(Marker marker, String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void debug(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void debug(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void debug(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void debug( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7) { + + } + + @Override + public void debug( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8) { + + } + + @Override + public void debug( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public void debug(String message, Object p0) { + + } + + @Override + public void debug(String message, Object p0, Object p1) { + + } + + @Override + public void debug(String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void debug(String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void debug(String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void debug(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void debug(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void debug(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7) { + + } + + @Override + public void debug(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8) { + + } + + @Override + public void debug( + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public void entry() { + + } + + @Override + public void entry(Object... params) { + + } + + @Override + public void error(Marker marker, Message msg) { + + } + + @Override + public void error(Marker marker, Message msg, Throwable t) { + + } + + @Override + public void error(Marker marker, MessageSupplier msgSupplier) { + + } + + @Override + public void error(Marker marker, MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void error(Marker marker, CharSequence message) { + + } + + @Override + public void error(Marker marker, CharSequence message, Throwable t) { + + } + + @Override + public void error(Marker marker, Object message) { + + } + + @Override + public void error(Marker marker, Object message, Throwable t) { + + } + + @Override + public void error(Marker marker, String message) { + + } + + @Override + public void error(Marker marker, String message, Object... params) { + + } + + @Override + public void error(Marker marker, String message, Supplier... paramSuppliers) { + + } + + @Override + public void error(Marker marker, String message, Throwable t) { + + } + + @Override + public void error(Marker marker, Supplier msgSupplier) { + + } + + @Override + public void error(Marker marker, Supplier msgSupplier, Throwable t) { + + } + + @Override + public void error(Message msg) { + + } + + @Override + public void error(Message msg, Throwable t) { + + } + + @Override + public void error(MessageSupplier msgSupplier) { + + } + + @Override + public void error(MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void error(CharSequence message) { + + } + + @Override + public void error(CharSequence message, Throwable t) { + + } + + @Override + public void error(Object message) { + + } + + @Override + public void error(Object message, Throwable t) { + + } + + @Override + public void error(String message) { + + } + + @Override + public void error(String message, Object... params) { + + } + + @Override + public void error(String message, Supplier... paramSuppliers) { + + } + + @Override + public void error(String message, Throwable t) { + + } + + @Override + public void error(Supplier msgSupplier) { + + } + + @Override + public void error(Supplier msgSupplier, Throwable t) { + + } + + @Override + public void error(Marker marker, String message, Object p0) { + + } + + @Override + public void error(Marker marker, String message, Object p0, Object p1) { + + } + + @Override + public void error(Marker marker, String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void error(Marker marker, String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void error(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void error(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void error(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void error( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7) { + + } + + @Override + public void error( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8) { + + } + + @Override + public void error( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public void error(String message, Object p0) { + + } + + @Override + public void error(String message, Object p0, Object p1) { + + } + + @Override + public void error(String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void error(String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void error(String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void error(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void error(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void error(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7) { + + } + + @Override + public void error(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8) { + + } + + @Override + public void error( + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public void exit() { + + } + + @Override + public R exit(R result) { + return null; + } + + @Override + public void fatal(Marker marker, Message msg) { + + } + + @Override + public void fatal(Marker marker, Message msg, Throwable t) { + + } + + @Override + public void fatal(Marker marker, MessageSupplier msgSupplier) { + + } + + @Override + public void fatal(Marker marker, MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void fatal(Marker marker, CharSequence message) { + + } + + @Override + public void fatal(Marker marker, CharSequence message, Throwable t) { + + } + + @Override + public void fatal(Marker marker, Object message) { + + } + + @Override + public void fatal(Marker marker, Object message, Throwable t) { + + } + + @Override + public void fatal(Marker marker, String message) { + + } + + @Override + public void fatal(Marker marker, String message, Object... params) { + + } + + @Override + public void fatal(Marker marker, String message, Supplier... paramSuppliers) { + + } + + @Override + public void fatal(Marker marker, String message, Throwable t) { + + } + + @Override + public void fatal(Marker marker, Supplier msgSupplier) { + + } + + @Override + public void fatal(Marker marker, Supplier msgSupplier, Throwable t) { + + } + + @Override + public void fatal(Message msg) { + + } + + @Override + public void fatal(Message msg, Throwable t) { + + } + + @Override + public void fatal(MessageSupplier msgSupplier) { + + } + + @Override + public void fatal(MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void fatal(CharSequence message) { + + } + + @Override + public void fatal(CharSequence message, Throwable t) { + + } + + @Override + public void fatal(Object message) { + + } + + @Override + public void fatal(Object message, Throwable t) { + + } + + @Override + public void fatal(String message) { + + } + + @Override + public void fatal(String message, Object... params) { + + } + + @Override + public void fatal(String message, Supplier... paramSuppliers) { + + } + + @Override + public void fatal(String message, Throwable t) { + + } + + @Override + public void fatal(Supplier msgSupplier) { + + } + + @Override + public void fatal(Supplier msgSupplier, Throwable t) { + + } + + @Override + public void fatal(Marker marker, String message, Object p0) { + + } + + @Override + public void fatal(Marker marker, String message, Object p0, Object p1) { + + } + + @Override + public void fatal(Marker marker, String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void fatal(Marker marker, String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void fatal(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void fatal(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void fatal(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void fatal( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7) { + + } + + @Override + public void fatal( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8) { + + } + + @Override + public void fatal( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public void fatal(String message, Object p0) { + + } + + @Override + public void fatal(String message, Object p0, Object p1) { + + } + + @Override + public void fatal(String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void fatal(String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void fatal(String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void fatal(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void fatal(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void fatal(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7) { + + } + + @Override + public void fatal(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8) { + + } + + @Override + public void fatal( + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public Level getLevel() { + return null; + } + + @Override + public MF getMessageFactory() { + return null; + } + + @Override + public String getName() { + return null; + } + + @Override + public void info(Marker marker, Message msg) { + + } + + @Override + public void info(Marker marker, Message msg, Throwable t) { + + } + + @Override + public void info(Marker marker, MessageSupplier msgSupplier) { + + } + + @Override + public void info(Marker marker, MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void info(Marker marker, CharSequence message) { + + } + + @Override + public void info(Marker marker, CharSequence message, Throwable t) { + + } + + @Override + public void info(Marker marker, Object message) { + + } + + @Override + public void info(Marker marker, Object message, Throwable t) { + + } + + @Override + public void info(Marker marker, String message) { + + } + + @Override + public void info(Marker marker, String message, Object... params) { + + } + + @Override + public void info(Marker marker, String message, Supplier... paramSuppliers) { + + } + + @Override + public void info(Marker marker, String message, Throwable t) { + + } + + @Override + public void info(Marker marker, Supplier msgSupplier) { + + } + + @Override + public void info(Marker marker, Supplier msgSupplier, Throwable t) { + + } + + @Override + public void info(Message msg) { + + } + + @Override + public void info(Message msg, Throwable t) { + + } + + @Override + public void info(MessageSupplier msgSupplier) { + + } + + @Override + public void info(MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void info(CharSequence message) { + + } + + @Override + public void info(CharSequence message, Throwable t) { + + } + + @Override + public void info(Object message) { + + } + + @Override + public void info(Object message, Throwable t) { + + } + + @Override + public void info(String message) { + + } + + @Override + public void info(String message, Object... params) { + + } + + @Override + public void info(String message, Supplier... paramSuppliers) { + + } + + @Override + public void info(String message, Throwable t) { + + } + + @Override + public void info(Supplier msgSupplier) { + + } + + @Override + public void info(Supplier msgSupplier, Throwable t) { + + } + + @Override + public void info(Marker marker, String message, Object p0) { + + } + + @Override + public void info(Marker marker, String message, Object p0, Object p1) { + + } + + @Override + public void info(Marker marker, String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void info(Marker marker, String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void info(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void info(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void info(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void info( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7) { + + } + + @Override + public void info( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8) { + + } + + @Override + public void info( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public void info(String message, Object p0) { + + } + + @Override + public void info(String message, Object p0, Object p1) { + + } + + @Override + public void info(String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void info(String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void info(String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void info(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void info(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void info(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7) { + + } + + @Override + public void info(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8) { + + } + + @Override + public void info( + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public boolean isDebugEnabled() { + return false; + } + + @Override + public boolean isDebugEnabled(Marker marker) { + return false; + } + + @Override + public boolean isEnabled(Level level) { + return false; + } + + @Override + public boolean isEnabled(Level level, Marker marker) { + return false; + } + + @Override + public boolean isErrorEnabled() { + return false; + } + + @Override + public boolean isErrorEnabled(Marker marker) { + return false; + } + + @Override + public boolean isFatalEnabled() { + return false; + } + + @Override + public boolean isFatalEnabled(Marker marker) { + return false; + } + + @Override + public boolean isInfoEnabled() { + return false; + } + + @Override + public boolean isInfoEnabled(Marker marker) { + return false; + } + + @Override + public boolean isTraceEnabled() { + return false; + } + + @Override + public boolean isTraceEnabled(Marker marker) { + return false; + } + + @Override + public boolean isWarnEnabled() { + return false; + } + + @Override + public boolean isWarnEnabled(Marker marker) { + return false; + } + + @Override + public void log(Level level, Marker marker, Message msg) { + + } + + @Override + public void log(Level level, Marker marker, Message msg, Throwable t) { + + } + + @Override + public void log(Level level, Marker marker, MessageSupplier msgSupplier) { + + } + + @Override + public void log(Level level, Marker marker, MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void log(Level level, Marker marker, CharSequence message) { + + } + + @Override + public void log(Level level, Marker marker, CharSequence message, Throwable t) { + + } + + @Override + public void log(Level level, Marker marker, Object message) { + + } + + @Override + public void log(Level level, Marker marker, Object message, Throwable t) { + + } + + @Override + public void log(Level level, Marker marker, String message) { + + } + + @Override + public void log(Level level, Marker marker, String message, Object... params) { + + } + + @Override + public void log(Level level, Marker marker, String message, Supplier... paramSuppliers) { + + } + + @Override + public void log(Level level, Marker marker, String message, Throwable t) { + + } + + @Override + public void log(Level level, Marker marker, Supplier msgSupplier) { + + } + + @Override + public void log(Level level, Marker marker, Supplier msgSupplier, Throwable t) { + + } + + @Override + public void log(Level level, Message msg) { + + } + + @Override + public void log(Level level, Message msg, Throwable t) { + + } + + @Override + public void log(Level level, MessageSupplier msgSupplier) { + + } + + @Override + public void log(Level level, MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void log(Level level, CharSequence message) { + + } + + @Override + public void log(Level level, CharSequence message, Throwable t) { + + } + + @Override + public void log(Level level, Object message) { + + } + + @Override + public void log(Level level, Object message, Throwable t) { + + } + + @Override + public void log(Level level, String message) { + + } + + @Override + public void log(Level level, String message, Object... params) { + + } + + @Override + public void log(Level level, String message, Supplier... paramSuppliers) { + + } + + @Override + public void log(Level level, String message, Throwable t) { + + } + + @Override + public void log(Level level, Supplier msgSupplier) { + + } + + @Override + public void log(Level level, Supplier msgSupplier, Throwable t) { + + } + + @Override + public void log(Level level, Marker marker, String message, Object p0) { + + } + + @Override + public void log(Level level, Marker marker, String message, Object p0, Object p1) { + + } + + @Override + public void log(Level level, Marker marker, String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void log(Level level, Marker marker, String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void log(Level level, Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void log(Level level, Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void log( + Level level, + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6) { + + } + + @Override + public void log( + Level level, + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7) { + + } + + @Override + public void log( + Level level, + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8) { + + } + + @Override + public void log( + Level level, + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public void log(Level level, String message, Object p0) { + + } + + @Override + public void log(Level level, String message, Object p0, Object p1) { + + } + + @Override + public void log(Level level, String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void log(Level level, String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void log(Level level, String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void log(Level level, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void log(Level level, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void log(Level level, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7) { + + } + + @Override + public void log( + Level level, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8) { + + } + + @Override + public void log( + Level level, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public void printf(Level level, Marker marker, String format, Object... params) { + + } + + @Override + public void printf(Level level, String format, Object... params) { + + } + + @Override + public T throwing(Level level, T t) { + return null; + } + + @Override + public T throwing(T t) { + return null; + } + + @Override + public void trace(Marker marker, Message msg) { + + } + + @Override + public void trace(Marker marker, Message msg, Throwable t) { + + } + + @Override + public void trace(Marker marker, MessageSupplier msgSupplier) { + + } + + @Override + public void trace(Marker marker, MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void trace(Marker marker, CharSequence message) { + + } + + @Override + public void trace(Marker marker, CharSequence message, Throwable t) { + + } + + @Override + public void trace(Marker marker, Object message) { + + } + + @Override + public void trace(Marker marker, Object message, Throwable t) { + + } + + @Override + public void trace(Marker marker, String message) { + + } + + @Override + public void trace(Marker marker, String message, Object... params) { + + } + + @Override + public void trace(Marker marker, String message, Supplier... paramSuppliers) { + + } + + @Override + public void trace(Marker marker, String message, Throwable t) { + + } + + @Override + public void trace(Marker marker, Supplier msgSupplier) { + + } + + @Override + public void trace(Marker marker, Supplier msgSupplier, Throwable t) { + + } + + @Override + public void trace(Message msg) { + + } + + @Override + public void trace(Message msg, Throwable t) { + + } + + @Override + public void trace(MessageSupplier msgSupplier) { + + } + + @Override + public void trace(MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void trace(CharSequence message) { + + } + + @Override + public void trace(CharSequence message, Throwable t) { + + } + + @Override + public void trace(Object message) { + + } + + @Override + public void trace(Object message, Throwable t) { + + } + + @Override + public void trace(String message) { + + } + + @Override + public void trace(String message, Object... params) { + + } + + @Override + public void trace(String message, Supplier... paramSuppliers) { + + } + + @Override + public void trace(String message, Throwable t) { + + } + + @Override + public void trace(Supplier msgSupplier) { + + } + + @Override + public void trace(Supplier msgSupplier, Throwable t) { + + } + + @Override + public void trace(Marker marker, String message, Object p0) { + + } + + @Override + public void trace(Marker marker, String message, Object p0, Object p1) { + + } + + @Override + public void trace(Marker marker, String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void trace(Marker marker, String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void trace(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void trace(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void trace(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void trace( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7) { + + } + + @Override + public void trace( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8) { + + } + + @Override + public void trace( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public void trace(String message, Object p0) { + + } + + @Override + public void trace(String message, Object p0, Object p1) { + + } + + @Override + public void trace(String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void trace(String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void trace(String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void trace(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void trace(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void trace(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7) { + + } + + @Override + public void trace(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8) { + + } + + @Override + public void trace( + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public EntryMessage traceEntry() { + return null; + } + + @Override + public EntryMessage traceEntry(String format, Object... params) { + return null; + } + + @Override + public EntryMessage traceEntry(Supplier... paramSuppliers) { + return null; + } + + @Override + public EntryMessage traceEntry(String format, Supplier... paramSuppliers) { + return null; + } + + @Override + public EntryMessage traceEntry(Message message) { + return null; + } + + @Override + public void traceExit() { + + } + + @Override + public R traceExit(R result) { + return null; + } + + @Override + public R traceExit(String format, R result) { + return null; + } + + @Override + public void traceExit(EntryMessage message) { + + } + + @Override + public R traceExit(EntryMessage message, R result) { + return null; + } + + @Override + public R traceExit(Message message, R result) { + return null; + } + + @Override + public void warn(Marker marker, Message msg) { + + } + + @Override + public void warn(Marker marker, Message msg, Throwable t) { + + } + + @Override + public void warn(Marker marker, MessageSupplier msgSupplier) { + + } + + @Override + public void warn(Marker marker, MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void warn(Marker marker, CharSequence message) { + + } + + @Override + public void warn(Marker marker, CharSequence message, Throwable t) { + + } + + @Override + public void warn(Marker marker, Object message) { + + } + + @Override + public void warn(Marker marker, Object message, Throwable t) { + + } + + @Override + public void warn(Marker marker, String message) { + + } + + @Override + public void warn(Marker marker, String message, Object... params) { + + } + + @Override + public void warn(Marker marker, String message, Supplier... paramSuppliers) { + + } + + @Override + public void warn(Marker marker, String message, Throwable t) { + + } + + @Override + public void warn(Marker marker, Supplier msgSupplier) { + + } + + @Override + public void warn(Marker marker, Supplier msgSupplier, Throwable t) { + + } + + @Override + public void warn(Message msg) { + + } + + @Override + public void warn(Message msg, Throwable t) { + + } + + @Override + public void warn(MessageSupplier msgSupplier) { + + } + + @Override + public void warn(MessageSupplier msgSupplier, Throwable t) { + + } + + @Override + public void warn(CharSequence message) { + + } + + @Override + public void warn(CharSequence message, Throwable t) { + + } + + @Override + public void warn(Object message) { + + } + + @Override + public void warn(Object message, Throwable t) { + + } + + @Override + public void warn(String message) { + + } + + @Override + public void warn(String message, Object... params) { + + } + + @Override + public void warn(String message, Supplier... paramSuppliers) { + + } + + @Override + public void warn(String message, Throwable t) { + + } + + @Override + public void warn(Supplier msgSupplier) { + + } + + @Override + public void warn(Supplier msgSupplier, Throwable t) { + + } + + @Override + public void warn(Marker marker, String message, Object p0) { + + } + + @Override + public void warn(Marker marker, String message, Object p0, Object p1) { + + } + + @Override + public void warn(Marker marker, String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void warn(Marker marker, String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void warn(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void warn(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void warn(Marker marker, String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void warn( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7) { + + } + + @Override + public void warn( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8) { + + } + + @Override + public void warn( + Marker marker, + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + + @Override + public void warn(String message, Object p0) { + + } + + @Override + public void warn(String message, Object p0, Object p1) { + + } + + @Override + public void warn(String message, Object p0, Object p1, Object p2) { + + } + + @Override + public void warn(String message, Object p0, Object p1, Object p2, Object p3) { + + } + + @Override + public void warn(String message, Object p0, Object p1, Object p2, Object p3, Object p4) { + + } + + @Override + public void warn(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) { + + } + + @Override + public void warn(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) { + + } + + @Override + public void warn(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7) { + + } + + @Override + public void warn(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8) { + + } + + @Override + public void warn( + String message, + Object p0, + Object p1, + Object p2, + Object p3, + Object p4, + Object p5, + Object p6, + Object p7, + Object p8, + Object p9) { + + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Validation.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Validation.java new file mode 100644 index 0000000000000..55caed434114a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Validation.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.support; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; + +import java.util.Locale; +import java.util.Set; + +import static java.util.Collections.unmodifiableSet; + +public final class Validation { + + static final int MIN_NAME_LENGTH = 1; + static final int MAX_NAME_LENGTH = 1024; + + static final Set VALID_NAME_CHARS = unmodifiableSet(Sets.newHashSet( + ' ', '!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', + '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', + 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_', + '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', + 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~' + )); + + private static final String INVALID_NAME_MESSAGE = + "%1s names must be at least " + MIN_NAME_LENGTH + " and no more than " + MAX_NAME_LENGTH + " characters. " + + "They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the " + + "Basic Latin (ASCII) block. Leading or trailing whitespace is not allowed."; + + private static boolean isValidUserOrRoleName(String name) { + if (name.length() < MIN_NAME_LENGTH || name.length() > MAX_NAME_LENGTH) { + return false; + } + + for (char character : name.toCharArray()) { + if (!VALID_NAME_CHARS.contains(character)) { + return false; + } + } + + // We only check against the space character here (U+0020) since it's the only whitespace character in the + // set that we allow. + // + // Note for the future if we allow the full unicode range: the String and Character methods that deal with + // whitespace don't work for the whole range. They match characters that are considered whitespace to the Java + // language, which doesn't include characters like IDEOGRAPHIC SPACE (U+3000). The best approach here may be + // to match against java.util.regex.Pattern's \p{Space} class (which is by default broader than \s) or make a + // list from the codepoints listed in this page https://en.wikipedia.org/wiki/Whitespace_character + if (name.startsWith(" ") || name.endsWith(" ")) { + return false; + } + + return true; + } + + public static final class Users { + + private static final int MIN_PASSWD_LENGTH = 6; + + /** + * Validate the username + * @param username the username to validate + * @param allowReserved whether or not to allow reserved user names + * @param settings the settings which may contain information about reserved users + * @return {@code null} if valid + */ + public static Error validateUsername(String username, boolean allowReserved, Settings settings) { + if (!isValidUserOrRoleName(username)) { + return new Error(String.format(Locale.ROOT, INVALID_NAME_MESSAGE, "User")); + } + if (allowReserved == false && ClientReservedRealm.isReserved(username, settings)) { + return new Error("Username [" + username + "] is reserved and may not be used."); + } + return null; + } + + public static Error validatePassword(char[] password) { + return password.length >= MIN_PASSWD_LENGTH ? + null : + new Error("passwords must be at least [" + MIN_PASSWD_LENGTH + "] characters long"); + } + + } + + public static final class Roles { + + public static Error validateRoleName(String roleName) { + return validateRoleName(roleName, false); + } + + public static Error validateRoleName(String roleName, boolean allowReserved) { + if (!isValidUserOrRoleName(roleName)) { + return new Error(String.format(Locale.ROOT, INVALID_NAME_MESSAGE, "Role")); + } + if (allowReserved == false && ReservedRolesStore.isReserved(roleName)) { + return new Error("Role [" + roleName + "] is reserved and may not be used."); + } + return null; + } + } + + public static class Error { + + private final String message; + + private Error(String message) { + this.message = message; + } + + @Override + public String toString() { + return message; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/SSLExceptionHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/SSLExceptionHelper.java new file mode 100644 index 0000000000000..ddc02ac55f7c2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/SSLExceptionHelper.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.transport; + + +import io.netty.handler.codec.DecoderException; +import io.netty.handler.ssl.NotSslRecordException; + +import javax.net.ssl.SSLException; + +public class SSLExceptionHelper { + + private SSLExceptionHelper() { + } + + public static boolean isNotSslRecordException(Throwable e) { + return e instanceof NotSslRecordException && e.getCause() == null; + } + + public static boolean isCloseDuringHandshakeException(Throwable e) { + return e instanceof SSLException + && e.getCause() == null + && "Received close_notify during handshake".equals(e.getMessage()); + } + + public static boolean isReceivedCertificateUnknownException(Throwable e) { + return e instanceof DecoderException + && e.getCause() instanceof SSLException + && "Received fatal alert: certificate_unknown".equals(e.getCause().getMessage()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java new file mode 100644 index 0000000000000..a7ef1f0c02f4f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java @@ -0,0 +1,215 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.transport.netty4; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import io.netty.handler.ssl.SslHandler; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TcpChannel; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.netty4.Netty4Transport; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.transport.SSLExceptionHelper; +import org.elasticsearch.xpack.core.ssl.SSLConfiguration; +import org.elasticsearch.xpack.core.ssl.SSLService; + +import javax.net.ssl.SSLEngine; + +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.core.security.SecurityField.setting; + +/** + * Implementation of a transport that extends the {@link Netty4Transport} to add SSL and IP Filtering + */ +public class SecurityNetty4Transport extends Netty4Transport { + + private final SSLService sslService; + private final SSLConfiguration sslConfiguration; + private final Map profileConfiguration; + private final boolean sslEnabled; + + public SecurityNetty4Transport( + final Settings settings, + final ThreadPool threadPool, + final NetworkService networkService, + final BigArrays bigArrays, + final NamedWriteableRegistry namedWriteableRegistry, + final CircuitBreakerService circuitBreakerService, + final SSLService sslService) { + super(settings, threadPool, networkService, bigArrays, namedWriteableRegistry, circuitBreakerService); + this.sslService = sslService; + this.sslEnabled = XPackSettings.TRANSPORT_SSL_ENABLED.get(settings); + final Settings transportSSLSettings = settings.getByPrefix(setting("transport.ssl.")); + if (sslEnabled) { + this.sslConfiguration = sslService.sslConfiguration(transportSSLSettings, Settings.EMPTY); + Map profileSettingsMap = settings.getGroups("transport.profiles.", true); + Map profileConfiguration = new HashMap<>(profileSettingsMap.size() + 1); + for (Map.Entry entry : profileSettingsMap.entrySet()) { + Settings profileSettings = entry.getValue(); + final Settings profileSslSettings = profileSslSettings(profileSettings); + SSLConfiguration configuration = sslService.sslConfiguration(profileSslSettings, transportSSLSettings); + profileConfiguration.put(entry.getKey(), configuration); + } + + if (profileConfiguration.containsKey(TcpTransport.DEFAULT_PROFILE) == false) { + profileConfiguration.put(TcpTransport.DEFAULT_PROFILE, sslConfiguration); + } + + this.profileConfiguration = Collections.unmodifiableMap(profileConfiguration); + } else { + this.profileConfiguration = Collections.emptyMap(); + this.sslConfiguration = null; + } + } + + @Override + protected void doStart() { + super.doStart(); + } + + @Override + public final ChannelHandler getServerChannelInitializer(String name) { + if (sslEnabled) { + SSLConfiguration configuration = profileConfiguration.get(name); + if (configuration == null) { + throw new IllegalStateException("unknown profile: " + name); + } + return getSslChannelInitializer(name, configuration); + } else { + return getNoSslChannelInitializer(name); + } + } + + protected ChannelHandler getNoSslChannelInitializer(final String name) { + return super.getServerChannelInitializer(name); + } + + @Override + protected ChannelHandler getClientChannelInitializer() { + return new SecurityClientChannelInitializer(); + } + + @Override + protected void onException(TcpChannel channel, Exception e) { + if (!lifecycle.started()) { + // just close and ignore - we are already stopped and just need to make sure we release all resources + TcpChannel.closeChannel(channel, false); + } else if (SSLExceptionHelper.isNotSslRecordException(e)) { + if (logger.isTraceEnabled()) { + logger.trace( + new ParameterizedMessage("received plaintext traffic on an encrypted channel, closing connection {}", channel), e); + } else { + logger.warn("received plaintext traffic on an encrypted channel, closing connection {}", channel); + } + TcpChannel.closeChannel(channel, false); + } else if (SSLExceptionHelper.isCloseDuringHandshakeException(e)) { + if (logger.isTraceEnabled()) { + logger.trace(new ParameterizedMessage("connection {} closed during ssl handshake", channel), e); + } else { + logger.warn("connection {} closed during handshake", channel); + } + TcpChannel.closeChannel(channel, false); + } else if (SSLExceptionHelper.isReceivedCertificateUnknownException(e)) { + if (logger.isTraceEnabled()) { + logger.trace(new ParameterizedMessage("client did not trust server's certificate, closing connection {}", channel), e); + } else { + logger.warn("client did not trust this server's certificate, closing connection {}", channel); + } + TcpChannel.closeChannel(channel, false); + } else { + super.onException(channel, e); + } + } + + public class SslChannelInitializer extends ServerChannelInitializer { + private final SSLConfiguration configuration; + + public SslChannelInitializer(String name, SSLConfiguration configuration) { + super(name); + this.configuration = configuration; + } + + @Override + protected void initChannel(Channel ch) throws Exception { + super.initChannel(ch); + SSLEngine serverEngine = sslService.createSSLEngine(configuration, null, -1); + serverEngine.setUseClientMode(false); + final SslHandler sslHandler = new SslHandler(serverEngine); + ch.pipeline().addFirst("sslhandler", sslHandler); + } + } + + protected ServerChannelInitializer getSslChannelInitializer(final String name, final SSLConfiguration configuration) { + return new SslChannelInitializer(name, sslConfiguration); + } + + private class SecurityClientChannelInitializer extends ClientChannelInitializer { + + private final boolean hostnameVerificationEnabled; + + SecurityClientChannelInitializer() { + this.hostnameVerificationEnabled = sslEnabled && sslConfiguration.verificationMode().isHostnameVerificationEnabled(); + } + + @Override + protected void initChannel(Channel ch) throws Exception { + super.initChannel(ch); + if (sslEnabled) { + ch.pipeline().addFirst(new ClientSslHandlerInitializer(sslConfiguration, sslService, hostnameVerificationEnabled)); + } + } + } + + private static class ClientSslHandlerInitializer extends ChannelOutboundHandlerAdapter { + + private final boolean hostnameVerificationEnabled; + private final SSLConfiguration sslConfiguration; + private final SSLService sslService; + + private ClientSslHandlerInitializer(SSLConfiguration sslConfiguration, SSLService sslService, boolean hostnameVerificationEnabled) { + this.sslConfiguration = sslConfiguration; + this.hostnameVerificationEnabled = hostnameVerificationEnabled; + this.sslService = sslService; + } + + @Override + public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress, + SocketAddress localAddress, ChannelPromise promise) throws Exception { + final SSLEngine sslEngine; + if (hostnameVerificationEnabled) { + InetSocketAddress inetSocketAddress = (InetSocketAddress) remoteAddress; + // we create the socket based on the name given. don't reverse DNS + sslEngine = sslService.createSSLEngine(sslConfiguration, inetSocketAddress.getHostString(), + inetSocketAddress.getPort()); + } else { + sslEngine = sslService.createSSLEngine(sslConfiguration, null, -1); + } + + sslEngine.setUseClientMode(true); + ctx.pipeline().replace(this, "ssl", new SslHandler(sslEngine)); + super.connect(ctx, remoteAddress, localAddress, promise); + } + } + + public static Settings profileSslSettings(Settings profileSettings) { + return profileSettings.getByPrefix(setting("ssl.")); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java new file mode 100644 index 0000000000000..36354ff58b318 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/AnonymousUser.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.xpack.core.security.SecurityField.setting; + +/** + * The user object for the anonymous user. + */ +public class AnonymousUser extends User { + + public static final String DEFAULT_ANONYMOUS_USERNAME = "_anonymous"; + public static final Setting USERNAME_SETTING = + new Setting<>(setting("authc.anonymous.username"), DEFAULT_ANONYMOUS_USERNAME, s -> s, Property.NodeScope); + public static final Setting> ROLES_SETTING = + Setting.listSetting(setting("authc.anonymous.roles"), Collections.emptyList(), s -> s, Property.NodeScope); + + public AnonymousUser(Settings settings) { + super(USERNAME_SETTING.get(settings), ROLES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY), null, null, + MetadataUtils.DEFAULT_RESERVED_METADATA, isAnonymousEnabled(settings)); + } + + public static boolean isAnonymousEnabled(Settings settings) { + return ROLES_SETTING.exists(settings) && ROLES_SETTING.get(settings).isEmpty() == false; + } + + public static boolean isAnonymousUsername(String username, Settings settings) { + // this is possibly the same check but we should not let anything use the default name either + return USERNAME_SETTING.get(settings).equals(username) || DEFAULT_ANONYMOUS_USERNAME.equals(username); + } + + public static void addSettings(List> settingsList) { + settingsList.add(USERNAME_SETTING); + settingsList.add(ROLES_SETTING); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java new file mode 100644 index 0000000000000..dfa437fa8d2c4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BeatsSystemUser.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.Version; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +/** + * Built in user for beats internals. Currently used for Beats monitoring. + */ +public class BeatsSystemUser extends User { + + public static final String NAME = UsernamesField.BEATS_NAME; + public static final String ROLE_NAME = UsernamesField.BEATS_ROLE; + public static final Version DEFINED_SINCE = Version.V_6_3_0; + public static final BuiltinUserInfo USER_INFO = new BuiltinUserInfo(NAME, ROLE_NAME, DEFINED_SINCE); + + public BeatsSystemUser(boolean enabled) { + super(NAME, new String[]{ ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BuiltinUserInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BuiltinUserInfo.java new file mode 100644 index 0000000000000..0ecd457ba0951 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/BuiltinUserInfo.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.Version; + +/** + * BuiltinUserInfo provides common user meta data for newly introduced pre defined System Users. + */ +public class BuiltinUserInfo { + private final String name; + private final String role; + private final Version definedSince; + + public BuiltinUserInfo(String name, String role, Version definedSince) { + this.name = name; + this.role = role; + this.definedSince = definedSince; + } + + /** Get the builtin users name. */ + public String getName() { + return name; + } + + /** Get the builtin users default role name. */ + public String getRole() { + return role; + } + + /** Get version the builtin user was introduced with. */ + public Version getDefinedSince() { + return definedSince; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/ElasticUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/ElasticUser.java new file mode 100644 index 0000000000000..ec618a4f4821c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/ElasticUser.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + + +/** + * The reserved {@code elastic} superuser. Has full permission/access to the cluster/indices and can + * run as any other user. + */ +public class ElasticUser extends User { + + public static final String NAME = UsernamesField.ELASTIC_NAME; + // used for testing in a different package + public static final String ROLE_NAME = UsernamesField.ELASTIC_ROLE; + + public ElasticUser(boolean enabled) { + super(NAME, new String[] { ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUserSerializationHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUserSerializationHelper.java new file mode 100644 index 0000000000000..fa41828a7bba8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUserSerializationHelper.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class InternalUserSerializationHelper { + public static User readFrom(StreamInput input) throws IOException { + final boolean isInternalUser = input.readBoolean(); + final String username = input.readString(); + if (isInternalUser) { + if (SystemUser.is(username)) { + return SystemUser.INSTANCE; + } else if (XPackUser.is(username)) { + return XPackUser.INSTANCE; + } else if (XPackSecurityUser.is(username)) { + return XPackSecurityUser.INSTANCE; + } + throw new IllegalStateException("user [" + username + "] is not an internal user"); + } + return User.partialReadFrom(username, input); + } + public static void writeTo(User user, StreamOutput output) throws IOException { + if (SystemUser.is(user)) { + output.writeBoolean(true); + output.writeString(SystemUser.NAME); + } else if (XPackUser.is(user)) { + output.writeBoolean(true); + output.writeString(XPackUser.NAME); + } else if (XPackSecurityUser.is(user)) { + output.writeBoolean(true); + output.writeString(XPackSecurityUser.NAME); + } else { + User.writeTo(user, output); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/KibanaUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/KibanaUser.java new file mode 100644 index 0000000000000..8dfa149987d0e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/KibanaUser.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +/** + * Built in user for the kibana server + */ +public class KibanaUser extends User { + + public static final String NAME = UsernamesField.KIBANA_NAME; + public static final String ROLE_NAME = UsernamesField.KIBANA_ROLE; + + public KibanaUser(boolean enabled) { + super(NAME, new String[]{ ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java new file mode 100644 index 0000000000000..ce37d742a191f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/LogstashSystemUser.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.Version; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +/** + * Built in user for logstash internals. Currently used for Logstash monitoring. + */ +public class LogstashSystemUser extends User { + + public static final String NAME = UsernamesField.LOGSTASH_NAME; + public static final String ROLE_NAME = UsernamesField.LOGSTASH_ROLE; + public static final Version DEFINED_SINCE = Version.V_5_2_0; + public static final BuiltinUserInfo USER_INFO = new BuiltinUserInfo(NAME, ROLE_NAME, DEFINED_SINCE); + + public LogstashSystemUser(boolean enabled) { + super(NAME, new String[]{ ROLE_NAME }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, enabled); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java new file mode 100644 index 0000000000000..4569c2a68a09b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/SystemUser.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.xpack.core.security.authz.privilege.SystemPrivilege; + +import java.util.function.Predicate; + +/** + * Internal user that is applied to all requests made elasticsearch itself + */ +public class SystemUser extends User { + + public static final String NAME = UsernamesField.SYSTEM_NAME; + public static final String ROLE_NAME = UsernamesField.SYSTEM_ROLE; + + public static final User INSTANCE = new SystemUser(); + + private static final Predicate PREDICATE = SystemPrivilege.INSTANCE.predicate(); + + private SystemUser() { + super(NAME, ROLE_NAME); + } + + @Override + public boolean equals(Object o) { + return o == INSTANCE; + } + + @Override + public int hashCode() { + return System.identityHashCode(this); + } + + public static boolean is(User user) { + return INSTANCE.equals(user); + } + + public static boolean is(String principal) { + return NAME.equals(principal); + } + + public static boolean isAuthorized(String action) { + return PREDICATE.test(action); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/User.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/User.java new file mode 100644 index 0000000000000..e8161b9a7e239 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/User.java @@ -0,0 +1,247 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.Map; + +/** + * An authenticated user + */ +public class User implements ToXContentObject { + + private final String username; + private final String[] roles; + private final User authenticatedUser; + private final Map metadata; + private final boolean enabled; + + @Nullable private final String fullName; + @Nullable private final String email; + + public User(String username, String... roles) { + this(username, roles, null, null, null, true); + } + + public User(String username, String[] roles, User authenticatedUser) { + this(username, roles, null, null, null, true, authenticatedUser); + } + + public User(User user, User authenticatedUser) { + this(user.principal(), user.roles(), user.fullName(), user.email(), user.metadata(), user.enabled(), authenticatedUser); + } + + public User(String username, String[] roles, String fullName, String email, Map metadata, boolean enabled) { + this(username, roles, fullName, email, metadata, enabled, null); + } + + private User(String username, String[] roles, String fullName, String email, Map metadata, boolean enabled, + User authenticatedUser) { + this.username = username; + this.roles = roles == null ? Strings.EMPTY_ARRAY : roles; + this.metadata = metadata != null ? Collections.unmodifiableMap(metadata) : Collections.emptyMap(); + this.fullName = fullName; + this.email = email; + this.enabled = enabled; + assert (authenticatedUser == null || authenticatedUser.isRunAs() == false) : "the authenticated user should not be a run_as user"; + this.authenticatedUser = authenticatedUser; + } + + /** + * @return The principal of this user - effectively serving as the + * unique identity of of the user. + */ + public String principal() { + return this.username; + } + + /** + * @return The roles this user is associated with. The roles are + * identified by their unique names and each represents as + * set of permissions + */ + public String[] roles() { + return this.roles; + } + + /** + * @return The metadata that is associated with this user. Can never be {@code null}. + */ + public Map metadata() { + return metadata; + } + + /** + * @return The full name of this user. May be {@code null}. + */ + public String fullName() { + return fullName; + } + + /** + * @return The email of this user. May be {@code null}. + */ + public String email() { + return email; + } + + /** + * @return whether the user is enabled or not + */ + public boolean enabled() { + return enabled; + } + + /** + * @return The user that was originally authenticated. + * This may be the user itself, or a different user which used runAs. + */ + public User authenticatedUser() { + return authenticatedUser == null ? this : authenticatedUser; + } + + /** Return true if this user was not the originally authenticated user, false otherwise. */ + public boolean isRunAs() { + return authenticatedUser != null; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("User[username=").append(username); + sb.append(",roles=[").append(Strings.arrayToCommaDelimitedString(roles)).append("]"); + sb.append(",fullName=").append(fullName); + sb.append(",email=").append(email); + sb.append(",metadata="); + MetadataUtils.writeValue(sb, metadata); + if (authenticatedUser != null) { + sb.append(",authenticatedUser=[").append(authenticatedUser.toString()).append("]"); + } + sb.append("]"); + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o instanceof User == false) return false; + + User user = (User) o; + + if (!username.equals(user.username)) return false; + // Probably incorrect - comparing Object[] arrays with Arrays.equals + if (!Arrays.equals(roles, user.roles)) return false; + if (authenticatedUser != null ? !authenticatedUser.equals(user.authenticatedUser) : user.authenticatedUser != null) return false; + if (!metadata.equals(user.metadata)) return false; + if (fullName != null ? !fullName.equals(user.fullName) : user.fullName != null) return false; + return !(email != null ? !email.equals(user.email) : user.email != null); + + } + + @Override + public int hashCode() { + int result = username.hashCode(); + result = 31 * result + Arrays.hashCode(roles); + result = 31 * result + (authenticatedUser != null ? authenticatedUser.hashCode() : 0); + result = 31 * result + metadata.hashCode(); + result = 31 * result + (fullName != null ? fullName.hashCode() : 0); + result = 31 * result + (email != null ? email.hashCode() : 0); + return result; + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Fields.USERNAME.getPreferredName(), principal()); + builder.array(Fields.ROLES.getPreferredName(), roles()); + builder.field(Fields.FULL_NAME.getPreferredName(), fullName()); + builder.field(Fields.EMAIL.getPreferredName(), email()); + builder.field(Fields.METADATA.getPreferredName(), metadata()); + builder.field(Fields.ENABLED.getPreferredName(), enabled()); + return builder.endObject(); + } + + public static User partialReadFrom(String username, StreamInput input) throws IOException { + String[] roles = input.readStringArray(); + Map metadata = input.readMap(); + String fullName = input.readOptionalString(); + String email = input.readOptionalString(); + boolean enabled = input.readBoolean(); + User outerUser = new User(username, roles, fullName, email, metadata, enabled, null); + boolean hasInnerUser = input.readBoolean(); + if (hasInnerUser) { + User innerUser = readFrom(input); + if (input.getVersion().onOrBefore(Version.V_5_4_0)) { + // backcompat: runas user was read first, so reverse outer and inner + return new User(innerUser, outerUser); + } else { + return new User(outerUser, innerUser); + } + } else { + return outerUser; + } + } + + public static User readFrom(StreamInput input) throws IOException { + final boolean isInternalUser = input.readBoolean(); + assert isInternalUser == false: "should always return false. Internal users should use the InternalUserSerializationHelper"; + final String username = input.readString(); + return partialReadFrom(username, input); + } + + public static void writeTo(User user, StreamOutput output) throws IOException { + if (user.authenticatedUser == null) { + // no backcompat necessary, since there is no inner user + writeUser(user, output); + } else if (output.getVersion().onOrBefore(Version.V_5_4_0)) { + // backcompat: write runas user as the "inner" user + writeUser(user.authenticatedUser, output); + output.writeBoolean(true); + writeUser(user, output); + } else { + writeUser(user, output); + output.writeBoolean(true); + writeUser(user.authenticatedUser, output); + } + output.writeBoolean(false); // last user written, regardless of bwc, does not have an inner user + } + + /** Write just the given {@link User}, but not the inner {@link #authenticatedUser}. */ + private static void writeUser(User user, StreamOutput output) throws IOException { + output.writeBoolean(false); // not a system user + output.writeString(user.username); + output.writeStringArray(user.roles); + output.writeMap(user.metadata); + output.writeOptionalString(user.fullName); + output.writeOptionalString(user.email); + output.writeBoolean(user.enabled); + } + + public interface Fields { + ParseField USERNAME = new ParseField("username"); + ParseField PASSWORD = new ParseField("password"); + ParseField PASSWORD_HASH = new ParseField("password_hash"); + ParseField ROLES = new ParseField("roles"); + ParseField FULL_NAME = new ParseField("full_name"); + ParseField EMAIL = new ParseField("email"); + ParseField METADATA = new ParseField("metadata"); + ParseField ENABLED = new ParseField("enabled"); + ParseField TYPE = new ParseField("type"); + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java new file mode 100644 index 0000000000000..3b691b927b4a3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/UsernamesField.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +public final class UsernamesField { + public static final String ELASTIC_NAME = "elastic"; + public static final String ELASTIC_ROLE = "superuser"; + public static final String KIBANA_NAME = "kibana"; + public static final String KIBANA_ROLE = "kibana_system"; + public static final String SYSTEM_NAME = "_system"; + public static final String SYSTEM_ROLE = "_system"; + public static final String XPACK_SECURITY_NAME = "_xpack_security"; + public static final String XPACK_SECURITY_ROLE = "superuser"; + public static final String XPACK_NAME = "_xpack"; + public static final String XPACK_ROLE = "_xpack"; + public static final String LOGSTASH_NAME = "logstash_system"; + public static final String LOGSTASH_ROLE = "logstash_system"; + public static final String BEATS_NAME = "beats_system"; + public static final String BEATS_ROLE = "beats_system"; + + private UsernamesField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackSecurityUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackSecurityUser.java new file mode 100644 index 0000000000000..906d354837783 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackSecurityUser.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +/** + * internal user that manages xpack security. Has all cluster/indices permissions. + */ +public class XPackSecurityUser extends User { + + public static final String NAME = UsernamesField.XPACK_SECURITY_NAME; + public static final XPackSecurityUser INSTANCE = new XPackSecurityUser(); + private static final String ROLE_NAME = UsernamesField.XPACK_SECURITY_ROLE; + + private XPackSecurityUser() { + super(NAME, ROLE_NAME); + } + + @Override + public boolean equals(Object o) { + return INSTANCE == o; + } + + @Override + public int hashCode() { + return System.identityHashCode(this); + } + + public static boolean is(User user) { + return INSTANCE.equals(user); + } + + public static boolean is(String principal) { + return NAME.equals(principal); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackUser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackUser.java new file mode 100644 index 0000000000000..38c9fe84aa934 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/XPackUser.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.user; + +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +/** + * XPack internal user that manages xpack. Has all cluster/indices permissions for x-pack to operate excluding security permissions. + */ +public class XPackUser extends User { + + public static final String NAME = UsernamesField.XPACK_NAME; + public static final String ROLE_NAME = UsernamesField.XPACK_ROLE; + public static final Role ROLE = Role.builder(new RoleDescriptor(ROLE_NAME, new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("/@&~(\\.security.*)/").privileges("all").build(), + RoleDescriptor.IndicesPrivileges.builder().indices(IndexAuditTrailField.INDEX_NAME_PREFIX + "-*") + .privileges("read").build() + }, + new String[] { "*" }, + MetadataUtils.DEFAULT_RESERVED_METADATA), null).build(); + public static final XPackUser INSTANCE = new XPackUser(); + + private XPackUser() { + super(NAME, ROLE_NAME); + } + + @Override + public boolean equals(Object o) { + return INSTANCE == o; + } + + @Override + public int hashCode() { + return System.identityHashCode(this); + } + + public static boolean is(User user) { + return INSTANCE.equals(user); + } + + public static boolean is(String principal) { + return NAME.equals(principal); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/xcontent/XContentUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/xcontent/XContentUtils.java new file mode 100644 index 0000000000000..c4ad5a9026ab4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/xcontent/XContentUtils.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.xcontent; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class XContentUtils { + + private XContentUtils() { + } + + /** + * Ensures that we're currently on the start of an object, or that the next token is a start of an object. + * + * @throws ElasticsearchParseException if the current or the next token is a {@code START_OBJECT} + */ + public static void verifyObject(XContentParser parser) throws IOException, ElasticsearchParseException { + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + return; + } + XContentParser.Token token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("expected an object, but found token [{}]", parser.currentToken()); + } + } + + public static String[] readStringArray(XContentParser parser, boolean allowNull) throws IOException { + if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { + if (allowNull) { + return null; + } + throw new ElasticsearchParseException("could not parse [{}] field. expected a string array but found null value instead", + parser.currentName()); + } + if (parser.currentToken() != XContentParser.Token.START_ARRAY) { + throw new ElasticsearchParseException("could not parse [{}] field. expected a string array but found [{}] value instead", + parser.currentName(), parser.currentToken()); + } + + List list = new ArrayList<>(); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + list.add(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse [{}] field. expected a string array but one of the value in the " + + "array is [{}]", parser.currentName(), token); + } + } + return list.toArray(new String[list.size()]); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertUtils.java new file mode 100644 index 0000000000000..557ef3f1de73d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertUtils.java @@ -0,0 +1,667 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.bouncycastle.asn1.ASN1Encodable; +import org.bouncycastle.asn1.ASN1ObjectIdentifier; +import org.bouncycastle.asn1.DERSequence; +import org.bouncycastle.asn1.DERTaggedObject; +import org.bouncycastle.asn1.DERUTF8String; +import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; +import org.bouncycastle.asn1.pkcs.PrivateKeyInfo; +import org.bouncycastle.asn1.x500.X500Name; +import org.bouncycastle.asn1.x509.AuthorityKeyIdentifier; +import org.bouncycastle.asn1.x509.BasicConstraints; +import org.bouncycastle.asn1.x509.Extension; +import org.bouncycastle.asn1.x509.ExtensionsGenerator; +import org.bouncycastle.asn1.x509.GeneralName; +import org.bouncycastle.asn1.x509.GeneralNames; +import org.bouncycastle.asn1.x509.Time; +import org.bouncycastle.cert.CertIOException; +import org.bouncycastle.cert.X509CertificateHolder; +import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; +import org.bouncycastle.cert.jcajce.JcaX509ExtensionUtils; +import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.openssl.PEMEncryptedKeyPair; +import org.bouncycastle.openssl.PEMKeyPair; +import org.bouncycastle.openssl.PEMParser; +import org.bouncycastle.openssl.X509TrustedCertificateBlock; +import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter; +import org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder; +import org.bouncycastle.operator.ContentSigner; +import org.bouncycastle.operator.OperatorCreationException; +import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; +import org.bouncycastle.pkcs.PKCS10CertificationRequest; +import org.bouncycastle.pkcs.jcajce.JcaPKCS10CertificationRequestBuilder; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.network.InetAddressHelper; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509ExtendedKeyManager; +import javax.net.ssl.X509ExtendedTrustManager; +import javax.security.auth.x500.X500Principal; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.math.BigInteger; +import java.net.InetAddress; +import java.net.SocketException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.Key; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.SecureRandom; +import java.security.UnrecoverableKeyException; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings.getKeyStoreType; + +/** + * Utility methods that deal with {@link Certificate}, {@link KeyStore}, {@link X509ExtendedTrustManager}, {@link X509ExtendedKeyManager} + * and other certificate related objects. + */ +public class CertUtils { + + static final String CN_OID = "2.5.4.3"; + + private static final int SERIAL_BIT_LENGTH = 20 * 8; + static final BouncyCastleProvider BC_PROV = new BouncyCastleProvider(); + + private CertUtils() { + } + + /** + * Resolves a path with or without an {@link Environment} as we may be running in a transport client where we do not have access to + * the environment + */ + @SuppressForbidden(reason = "we don't have the environment to resolve files from when running in a transport client") + static Path resolvePath(String path, @Nullable Environment environment) { + if (environment != null) { + return environment.configFile().resolve(path); + } + return PathUtils.get(path).normalize(); + } + + /** + * Creates a {@link KeyStore} from a PEM encoded certificate and key file + */ + static KeyStore getKeyStoreFromPEM(Path certificatePath, Path keyPath, char[] keyPassword) + throws IOException, CertificateException, KeyStoreException, NoSuchAlgorithmException { + final PrivateKey key; + try (Reader reader = Files.newBufferedReader(keyPath, StandardCharsets.UTF_8)) { + key = CertUtils.readPrivateKey(reader, () -> keyPassword); + } + final Certificate[] certificates = readCertificates(Collections.singletonList(certificatePath)); + return getKeyStore(certificates, key, keyPassword); + } + + + /** + * Returns a {@link X509ExtendedKeyManager} that is built from the provided private key and certificate chain + */ + public static X509ExtendedKeyManager keyManager(Certificate[] certificateChain, PrivateKey privateKey, char[] password) + throws NoSuchAlgorithmException, UnrecoverableKeyException, KeyStoreException, IOException, CertificateException { + KeyStore keyStore = getKeyStore(certificateChain, privateKey, password); + return keyManager(keyStore, password, KeyManagerFactory.getDefaultAlgorithm()); + } + + private static KeyStore getKeyStore(Certificate[] certificateChain, PrivateKey privateKey, char[] password) + throws KeyStoreException, IOException, NoSuchAlgorithmException, CertificateException { + KeyStore keyStore = KeyStore.getInstance("jks"); + keyStore.load(null, null); + // password must be non-null for keystore... + keyStore.setKeyEntry("key", privateKey, password, certificateChain); + return keyStore; + } + + /** + * Returns a {@link X509ExtendedKeyManager} that is built from the provided keystore + */ + static X509ExtendedKeyManager keyManager(KeyStore keyStore, char[] password, String algorithm) + throws NoSuchAlgorithmException, UnrecoverableKeyException, KeyStoreException { + KeyManagerFactory kmf = KeyManagerFactory.getInstance(algorithm); + kmf.init(keyStore, password); + KeyManager[] keyManagers = kmf.getKeyManagers(); + for (KeyManager keyManager : keyManagers) { + if (keyManager instanceof X509ExtendedKeyManager) { + return (X509ExtendedKeyManager) keyManager; + } + } + throw new IllegalStateException("failed to find a X509ExtendedKeyManager"); + } + + public static X509ExtendedKeyManager getKeyManager(X509KeyPairSettings keyPair, Settings settings, + @Nullable String trustStoreAlgorithm, Environment environment) { + if (trustStoreAlgorithm == null) { + trustStoreAlgorithm = TrustManagerFactory.getDefaultAlgorithm(); + } + final KeyConfig keyConfig = createKeyConfig(keyPair, settings, trustStoreAlgorithm); + if (keyConfig == null) { + return null; + } else { + return keyConfig.createKeyManager(environment); + } + } + + static KeyConfig createKeyConfig(X509KeyPairSettings keyPair, Settings settings, String trustStoreAlgorithm) { + String keyPath = keyPair.keyPath.get(settings).orElse(null); + String keyStorePath = keyPair.keystorePath.get(settings).orElse(null); + + if (keyPath != null && keyStorePath != null) { + throw new IllegalArgumentException("you cannot specify a keystore and key file"); + } + + if (keyPath != null) { + SecureString keyPassword = keyPair.keyPassword.get(settings); + String certPath = keyPair.certificatePath.get(settings).orElse(null); + if (certPath == null) { + throw new IllegalArgumentException("you must specify the certificates [" + keyPair.certificatePath.getKey() + + "] to use with the key [" + keyPair.keyPath.getKey() + "]"); + } + return new PEMKeyConfig(keyPath, keyPassword, certPath); + } + + if (keyStorePath != null) { + SecureString keyStorePassword = keyPair.keystorePassword.get(settings); + String keyStoreAlgorithm = keyPair.keystoreAlgorithm.get(settings); + String keyStoreType = getKeyStoreType(keyPair.keystoreType, settings, keyStorePath); + SecureString keyStoreKeyPassword = keyPair.keystoreKeyPassword.get(settings); + if (keyStoreKeyPassword.length() == 0) { + keyStoreKeyPassword = keyStorePassword; + } + return new StoreKeyConfig(keyStorePath, keyStoreType, keyStorePassword, keyStoreKeyPassword, keyStoreAlgorithm, + trustStoreAlgorithm); + } + return null; + + } + + /** + * Creates a {@link X509ExtendedTrustManager} based on the provided certificates + * + * @param certificates the certificates to trust + * @return a trust manager that trusts the provided certificates + */ + public static X509ExtendedTrustManager trustManager(Certificate[] certificates) + throws NoSuchAlgorithmException, UnrecoverableKeyException, KeyStoreException, IOException, CertificateException { + KeyStore store = trustStore(certificates); + return trustManager(store, TrustManagerFactory.getDefaultAlgorithm()); + } + + static KeyStore trustStore(Certificate[] certificates) + throws KeyStoreException, IOException, NoSuchAlgorithmException, CertificateException { + assert certificates != null : "Cannot create trust store with null certificates"; + KeyStore store = KeyStore.getInstance("jks"); + store.load(null, null); + int counter = 0; + for (Certificate certificate : certificates) { + store.setCertificateEntry("cert" + counter, certificate); + counter++; + } + return store; + } + + /** + * Loads the truststore and creates a {@link X509ExtendedTrustManager} + * + * @param trustStorePath the path to the truststore + * @param trustStorePassword the password to the truststore + * @param trustStoreAlgorithm the algorithm to use for the truststore + * @param env the environment to use for file resolution. May be {@code null} + * @return a trust manager with the trust material from the store + */ + public static X509ExtendedTrustManager trustManager(String trustStorePath, String trustStoreType, char[] trustStorePassword, + String trustStoreAlgorithm, @Nullable Environment env) + throws NoSuchAlgorithmException, UnrecoverableKeyException, KeyStoreException, IOException, CertificateException { + KeyStore trustStore = readKeyStore(resolvePath(trustStorePath, env), trustStoreType, trustStorePassword); + return trustManager(trustStore, trustStoreAlgorithm); + } + + static KeyStore readKeyStore(Path path, String type, char[] password) + throws IOException, KeyStoreException, CertificateException, NoSuchAlgorithmException { + try (InputStream in = Files.newInputStream(path)) { + KeyStore store = KeyStore.getInstance(type); + assert password != null; + store.load(in, password); + return store; + } + } + + /** + * Creates a {@link X509ExtendedTrustManager} based on the trust material in the provided {@link KeyStore} + */ + static X509ExtendedTrustManager trustManager(KeyStore keyStore, String algorithm) + throws NoSuchAlgorithmException, UnrecoverableKeyException, KeyStoreException, IOException, CertificateException { + TrustManagerFactory tmf = TrustManagerFactory.getInstance(algorithm); + tmf.init(keyStore); + TrustManager[] trustManagers = tmf.getTrustManagers(); + for (TrustManager trustManager : trustManagers) { + if (trustManager instanceof X509ExtendedTrustManager) { + return (X509ExtendedTrustManager) trustManager; + } + } + throw new IllegalStateException("failed to find a X509ExtendedTrustManager"); + } + + /** + * Reads the provided paths and parses them into {@link Certificate} objects + * + * @param certPaths the paths to the PEM encoded certificates + * @param environment the environment to resolve files against. May be {@code null} + * @return an array of {@link Certificate} objects + */ + public static Certificate[] readCertificates(List certPaths, @Nullable Environment environment) + throws CertificateException, IOException { + final List resolvedPaths = certPaths.stream().map(p -> resolvePath(p, environment)).collect(Collectors.toList()); + return readCertificates(resolvedPaths); + } + + public static Certificate[] readCertificates(List certPaths) throws CertificateException, IOException { + List certificates = new ArrayList<>(certPaths.size()); + CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); + for (Path path : certPaths) { + try (Reader reader = Files.newBufferedReader(path, StandardCharsets.UTF_8)) { + readCertificates(reader, certificates, certFactory); + } + } + return certificates.toArray(new Certificate[certificates.size()]); + } + + /** + * Reads the certificates from the provided reader + */ + static void readCertificates(Reader reader, List certificates, CertificateFactory certFactory) + throws IOException, CertificateException { + try (PEMParser pemParser = new PEMParser(reader)) { + + Object parsed = pemParser.readObject(); + if (parsed == null) { + throw new IllegalArgumentException("could not parse pem certificate"); + } + + while (parsed != null) { + X509CertificateHolder holder; + if (parsed instanceof X509CertificateHolder) { + holder = (X509CertificateHolder) parsed; + } else if (parsed instanceof X509TrustedCertificateBlock) { + X509TrustedCertificateBlock certificateBlock = (X509TrustedCertificateBlock) parsed; + holder = certificateBlock.getCertificateHolder(); + } else { + String msg = "parsed an unsupported object [" + parsed.getClass().getSimpleName() + "]"; + if (parsed instanceof PEMEncryptedKeyPair || parsed instanceof PEMKeyPair || parsed instanceof PrivateKeyInfo) { + msg = msg + ". Encountered a PEM Key while expecting a PEM certificate."; + } + throw new IllegalArgumentException(msg); + } + certificates.add(certFactory.generateCertificate(new ByteArrayInputStream(holder.getEncoded()))); + parsed = pemParser.readObject(); + } + } + } + + /** + * Reads the private key from the reader and optionally uses the password supplier to retrieve a password if the key is encrypted + */ + public static PrivateKey readPrivateKey(Reader reader, Supplier passwordSupplier) throws IOException { + try (PEMParser parser = new PEMParser(reader)) { + PrivateKeyInfo privateKeyInfo = innerReadPrivateKey(parser, passwordSupplier); + if (parser.readObject() != null) { + throw new IllegalStateException("key file contained more that one entry"); + } + JcaPEMKeyConverter converter = new JcaPEMKeyConverter(); + converter.setProvider(BC_PROV); + return converter.getPrivateKey(privateKeyInfo); + } + } + + private static PrivateKeyInfo innerReadPrivateKey(PEMParser parser, Supplier passwordSupplier) throws IOException { + final Object parsed = parser.readObject(); + if (parsed == null) { + throw new IllegalStateException("key file did not contain a supported key"); + } + + PrivateKeyInfo privateKeyInfo; + if (parsed instanceof PEMEncryptedKeyPair) { + char[] keyPassword = passwordSupplier.get(); + if (keyPassword == null) { + throw new IllegalArgumentException("cannot read encrypted key without a password"); + } + // we have an encrypted key pair so we need to decrypt it + PEMEncryptedKeyPair encryptedKeyPair = (PEMEncryptedKeyPair) parsed; + privateKeyInfo = encryptedKeyPair + .decryptKeyPair(new JcePEMDecryptorProviderBuilder().setProvider(BC_PROV).build(keyPassword)) + .getPrivateKeyInfo(); + } else if (parsed instanceof PEMKeyPair) { + privateKeyInfo = ((PEMKeyPair) parsed).getPrivateKeyInfo(); + } else if (parsed instanceof PrivateKeyInfo) { + privateKeyInfo = (PrivateKeyInfo) parsed; + } else if (parsed instanceof ASN1ObjectIdentifier) { + // skip this object and recurse into this method again to read the next object + return innerReadPrivateKey(parser, passwordSupplier); + } else { + String msg = "parsed an unsupported object [" + parsed.getClass().getSimpleName() + "]"; + if (parsed instanceof X509CertificateHolder || parsed instanceof X509TrustedCertificateBlock) { + msg = msg + ". Encountered a PEM Certificate while expecting a PEM Key."; + } + throw new IllegalArgumentException(msg); + } + + return privateKeyInfo; + } + + /** + * Read all certificate-key pairs from a PKCS#12 container. + * + * @param path The path to the PKCS#12 container file. + * @param password The password for the container file + * @param keyPassword A supplier for the password for each key. The key alias is supplied as an argument to the function, and it should + * return the password for that key. If it returns {@code null}, then the key-pair for that alias is not read. + */ + public static Map readPkcs12KeyPairs(Path path, char[] password, Function keyPassword, Environment + env) + throws CertificateException, NoSuchAlgorithmException, KeyStoreException, IOException, UnrecoverableKeyException { + final KeyStore store = readKeyStore(path, "PKCS12", password); + final Enumeration enumeration = store.aliases(); + final Map map = new HashMap<>(store.size()); + while (enumeration.hasMoreElements()) { + final String alias = enumeration.nextElement(); + if (store.isKeyEntry(alias)) { + final char[] pass = keyPassword.apply(alias); + map.put(store.getCertificate(alias), store.getKey(alias, pass)); + } + } + return map; + } + + /** + * Generates a CA certificate + */ + public static X509Certificate generateCACertificate(X500Principal x500Principal, KeyPair keyPair, int days) + throws OperatorCreationException, CertificateException, CertIOException, NoSuchAlgorithmException { + return generateSignedCertificate(x500Principal, null, keyPair, null, null, true, days, null); + } + + /** + * Generates a signed certificate using the provided CA private key and + * information from the CA certificate + * + * @param principal + * the principal of the certificate; commonly referred to as the + * distinguished name (DN) + * @param subjectAltNames + * the subject alternative names that should be added to the + * certificate as an X509v3 extension. May be {@code null} + * @param keyPair + * the key pair that will be associated with the certificate + * @param caCert + * the CA certificate. If {@code null}, this results in a self signed + * certificate + * @param caPrivKey + * the CA private key. If {@code null}, this results in a self signed + * certificate + * @param days + * no of days certificate will be valid from now + * @return a signed {@link X509Certificate} + */ + public static X509Certificate generateSignedCertificate(X500Principal principal, GeneralNames subjectAltNames, KeyPair keyPair, + X509Certificate caCert, PrivateKey caPrivKey, int days) + throws OperatorCreationException, CertificateException, CertIOException, NoSuchAlgorithmException { + return generateSignedCertificate(principal, subjectAltNames, keyPair, caCert, caPrivKey, false, days, null); + } + + /** + * Generates a signed certificate using the provided CA private key and + * information from the CA certificate + * + * @param principal + * the principal of the certificate; commonly referred to as the + * distinguished name (DN) + * @param subjectAltNames + * the subject alternative names that should be added to the + * certificate as an X509v3 extension. May be {@code null} + * @param keyPair + * the key pair that will be associated with the certificate + * @param caCert + * the CA certificate. If {@code null}, this results in a self signed + * certificate + * @param caPrivKey + * the CA private key. If {@code null}, this results in a self signed + * certificate + * @param days + * no of days certificate will be valid from now + * @param signatureAlgorithm + * algorithm used for signing certificate. If {@code null} or + * empty, then use default algorithm {@link CertUtils#getDefaultSignatureAlgorithm(PrivateKey)} + * @return a signed {@link X509Certificate} + */ + public static X509Certificate generateSignedCertificate(X500Principal principal, GeneralNames subjectAltNames, KeyPair keyPair, + X509Certificate caCert, PrivateKey caPrivKey, int days, String signatureAlgorithm) + throws OperatorCreationException, CertificateException, CertIOException, NoSuchAlgorithmException { + return generateSignedCertificate(principal, subjectAltNames, keyPair, caCert, caPrivKey, false, days, signatureAlgorithm); + } + + /** + * Generates a signed certificate + * + * @param principal + * the principal of the certificate; commonly referred to as the + * distinguished name (DN) + * @param subjectAltNames + * the subject alternative names that should be added to the + * certificate as an X509v3 extension. May be {@code null} + * @param keyPair + * the key pair that will be associated with the certificate + * @param caCert + * the CA certificate. If {@code null}, this results in a self signed + * certificate + * @param caPrivKey + * the CA private key. If {@code null}, this results in a self signed + * certificate + * @param isCa + * whether or not the generated certificate is a CA + * @param days + * no of days certificate will be valid from now + * @param signatureAlgorithm + * algorithm used for signing certificate. If {@code null} or + * empty, then use default algorithm {@link CertUtils#getDefaultSignatureAlgorithm(PrivateKey)} + * @return a signed {@link X509Certificate} + */ + private static X509Certificate generateSignedCertificate(X500Principal principal, GeneralNames subjectAltNames, KeyPair keyPair, + X509Certificate caCert, PrivateKey caPrivKey, boolean isCa, int days, String signatureAlgorithm) + throws NoSuchAlgorithmException, CertificateException, CertIOException, OperatorCreationException { + Objects.requireNonNull(keyPair, "Key-Pair must not be null"); + final DateTime notBefore = new DateTime(DateTimeZone.UTC); + if (days < 1) { + throw new IllegalArgumentException("the certificate must be valid for at least one day"); + } + final DateTime notAfter = notBefore.plusDays(days); + final BigInteger serial = CertUtils.getSerial(); + JcaX509ExtensionUtils extUtils = new JcaX509ExtensionUtils(); + + X500Name subject = X500Name.getInstance(principal.getEncoded()); + final X500Name issuer; + final AuthorityKeyIdentifier authorityKeyIdentifier; + if (caCert != null) { + if (caCert.getBasicConstraints() < 0) { + throw new IllegalArgumentException("ca certificate is not a CA!"); + } + issuer = X500Name.getInstance(caCert.getIssuerX500Principal().getEncoded()); + authorityKeyIdentifier = extUtils.createAuthorityKeyIdentifier(caCert.getPublicKey()); + } else { + issuer = subject; + authorityKeyIdentifier = extUtils.createAuthorityKeyIdentifier(keyPair.getPublic()); + } + + JcaX509v3CertificateBuilder builder = + new JcaX509v3CertificateBuilder(issuer, serial, + new Time(notBefore.toDate(), Locale.ROOT), new Time(notAfter.toDate(), Locale.ROOT), subject, keyPair.getPublic()); + + builder.addExtension(Extension.subjectKeyIdentifier, false, extUtils.createSubjectKeyIdentifier(keyPair.getPublic())); + builder.addExtension(Extension.authorityKeyIdentifier, false, authorityKeyIdentifier); + if (subjectAltNames != null) { + builder.addExtension(Extension.subjectAlternativeName, false, subjectAltNames); + } + builder.addExtension(Extension.basicConstraints, isCa, new BasicConstraints(isCa)); + + PrivateKey signingKey = caPrivKey != null ? caPrivKey : keyPair.getPrivate(); + ContentSigner signer = new JcaContentSignerBuilder( + (Strings.isNullOrEmpty(signatureAlgorithm)) ? getDefaultSignatureAlgorithm(signingKey) : signatureAlgorithm) + .setProvider(CertUtils.BC_PROV).build(signingKey); + X509CertificateHolder certificateHolder = builder.build(signer); + return new JcaX509CertificateConverter().getCertificate(certificateHolder); + } + + /** + * Based on the private key algorithm {@link PrivateKey#getAlgorithm()} + * determines default signing algorithm used by CertUtils + * + * @param key + * {@link PrivateKey} + * @return algorithm + */ + private static String getDefaultSignatureAlgorithm(PrivateKey key) { + String signatureAlgorithm = null; + switch (key.getAlgorithm()) { + case "RSA": + signatureAlgorithm = "SHA256withRSA"; + break; + case "DSA": + signatureAlgorithm = "SHA256withDSA"; + break; + case "EC": + signatureAlgorithm = "SHA256withECDSA"; + break; + default: + throw new IllegalArgumentException("Unsupported algorithm : " + key.getAlgorithm() + + " for signature, allowed values for private key algorithm are [RSA, DSA, EC]"); + } + return signatureAlgorithm; + } + + /** + * Generates a certificate signing request + * + * @param keyPair the key pair that will be associated by the certificate generated from the certificate signing request + * @param principal the principal of the certificate; commonly referred to as the distinguished name (DN) + * @param sanList the subject alternative names that should be added to the certificate as an X509v3 extension. May be + * {@code null} + * @return a certificate signing request + */ + static PKCS10CertificationRequest generateCSR(KeyPair keyPair, X500Principal principal, GeneralNames sanList) + throws IOException, OperatorCreationException { + Objects.requireNonNull(keyPair, "Key-Pair must not be null"); + Objects.requireNonNull(keyPair.getPublic(), "Public-Key must not be null"); + Objects.requireNonNull(principal, "Principal must not be null"); + JcaPKCS10CertificationRequestBuilder builder = new JcaPKCS10CertificationRequestBuilder(principal, keyPair.getPublic()); + if (sanList != null) { + ExtensionsGenerator extGen = new ExtensionsGenerator(); + extGen.addExtension(Extension.subjectAlternativeName, false, sanList); + builder.addAttribute(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest, extGen.generate()); + } + + return builder.build(new JcaContentSignerBuilder("SHA256withRSA").setProvider(CertUtils.BC_PROV).build(keyPair.getPrivate())); + } + + /** + * Gets a random serial for a certificate that is generated from a {@link SecureRandom} + */ + public static BigInteger getSerial() { + SecureRandom random = new SecureRandom(); + BigInteger serial = new BigInteger(SERIAL_BIT_LENGTH, random); + assert serial.compareTo(BigInteger.valueOf(0L)) >= 0; + return serial; + } + + /** + * Generates a RSA key pair with the provided key size (in bits) + */ + public static KeyPair generateKeyPair(int keysize) throws NoSuchAlgorithmException { + // generate a private key + KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA"); + keyPairGenerator.initialize(keysize); + return keyPairGenerator.generateKeyPair(); + } + + /** + * Converts the {@link InetAddress} objects into a {@link GeneralNames} object that is used to represent subject alternative names. + */ + public static GeneralNames getSubjectAlternativeNames(boolean resolveName, Set addresses) throws SocketException { + Set generalNameList = new HashSet<>(); + for (InetAddress address : addresses) { + if (address.isAnyLocalAddress()) { + // it is a wildcard address + for (InetAddress inetAddress : InetAddressHelper.getAllAddresses()) { + addSubjectAlternativeNames(resolveName, inetAddress, generalNameList); + } + } else { + addSubjectAlternativeNames(resolveName, address, generalNameList); + } + } + return new GeneralNames(generalNameList.toArray(new GeneralName[generalNameList.size()])); + } + + @SuppressForbidden(reason = "need to use getHostName to resolve DNS name and getHostAddress to ensure we resolved the name") + private static void addSubjectAlternativeNames(boolean resolveName, InetAddress inetAddress, Set list) { + String hostaddress = inetAddress.getHostAddress(); + String ip = NetworkAddress.format(inetAddress); + list.add(new GeneralName(GeneralName.iPAddress, ip)); + if (resolveName && (inetAddress.isLinkLocalAddress() == false)) { + String possibleHostName = inetAddress.getHostName(); + if (possibleHostName.equals(hostaddress) == false) { + list.add(new GeneralName(GeneralName.dNSName, possibleHostName)); + } + } + } + + /** + * Creates an X.509 {@link GeneralName} for use as a Common Name in the certificate's Subject Alternative Names + * extension. A common name is a name with a tag of {@link GeneralName#otherName OTHER}, with an object-id that references + * the {@link #CN_OID cn} attribute, an explicit tag of '0', and a DER encoded UTF8 string for the name. + * This usage of using the {@code cn} OID as a Subject Alternative Name is non-standard and will not be + * recognised by other X.509/TLS implementations. + */ + public static GeneralName createCommonName(String cn) { + final ASN1Encodable[] sequence = { new ASN1ObjectIdentifier(CN_OID), new DERTaggedObject(true, 0, new DERUTF8String(cn)) }; + return new GeneralName(GeneralName.otherName, new DERSequence(sequence)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertificateGenerateTool.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertificateGenerateTool.java new file mode 100644 index 0000000000000..eb3a92ce5e879 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertificateGenerateTool.java @@ -0,0 +1,729 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import joptsimple.ArgumentAcceptingOptionSpec; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.bouncycastle.asn1.DERIA5String; +import org.bouncycastle.asn1.x509.GeneralName; +import org.bouncycastle.asn1.x509.GeneralNames; +import org.bouncycastle.openssl.PEMEncryptor; +import org.bouncycastle.openssl.jcajce.JcaPEMWriter; +import org.bouncycastle.openssl.jcajce.JcePEMEncryptorBuilder; +import org.bouncycastle.pkcs.PKCS10CertificationRequest; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.Environment; + +import javax.security.auth.x500.X500Principal; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Reader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermission; +import java.security.KeyPair; +import java.security.KeyStore; +import java.security.PrivateKey; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.regex.Pattern; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +/** + * CLI tool to make generation of certificates or certificate requests easier for users + * @deprecated Replaced by {@link CertificateTool} + */ +@Deprecated +public class CertificateGenerateTool extends EnvironmentAwareCommand { + + private static final String AUTO_GEN_CA_DN = "CN=Elastic Certificate Tool Autogenerated CA"; + private static final String DESCRIPTION = "Simplifies certificate creation for use with the Elastic Stack"; + private static final String DEFAULT_CSR_FILE = "csr-bundle.zip"; + private static final String DEFAULT_CERT_FILE = "certificate-bundle.zip"; + private static final int DEFAULT_DAYS = 3 * 365; + private static final int FILE_EXTENSION_LENGTH = 4; + static final int MAX_FILENAME_LENGTH = 255 - FILE_EXTENSION_LENGTH; + private static final Pattern ALLOWED_FILENAME_CHAR_PATTERN = + Pattern.compile("[a-zA-Z0-9!@#$%^&{}\\[\\]()_+\\-=,.~'` ]{1," + MAX_FILENAME_LENGTH + "}"); + private static final int DEFAULT_KEY_SIZE = 2048; + + /** + * Wraps the certgen object parser. + */ + private static class InputFileParser { + private static final ObjectParser, Void> PARSER = new ObjectParser<>("certgen"); + + // if the class initializer here runs before the main method, logging will not have been configured; this will lead to status logger + // error messages from the class initializer for ParseField since it creates Logger instances; therefore, we bury the initialization + // of the parser in this class so that we can defer initialization until after logging has been initialized + static { + @SuppressWarnings("unchecked") final ConstructingObjectParser instanceParser = + new ConstructingObjectParser<>( + "instances", + a -> new CertificateInformation( + (String) a[0], (String) (a[1] == null ? a[0] : a[1]), + (List) a[2], (List) a[3], (List) a[4])); + instanceParser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("name")); + instanceParser.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("filename")); + instanceParser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("ip")); + instanceParser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("dns")); + instanceParser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("cn")); + + PARSER.declareObjectArray(List::addAll, instanceParser, new ParseField("instances")); + } + } + + private final OptionSpec outputPathSpec; + private final OptionSpec csrSpec; + private final OptionSpec caCertPathSpec; + private final OptionSpec caKeyPathSpec; + private final OptionSpec caPasswordSpec; + private final OptionSpec caDnSpec; + private final OptionSpec keysizeSpec; + private final OptionSpec inputFileSpec; + private final OptionSpec daysSpec; + private final ArgumentAcceptingOptionSpec p12Spec; + + CertificateGenerateTool() { + super(DESCRIPTION); + outputPathSpec = parser.accepts("out", "path of the zip file that the output should be written to") + .withRequiredArg(); + csrSpec = parser.accepts("csr", "only generate certificate signing requests"); + caCertPathSpec = parser.accepts("cert", "path to an existing ca certificate").availableUnless(csrSpec).withRequiredArg(); + caKeyPathSpec = parser.accepts("key", "path to an existing ca private key") + .availableIf(caCertPathSpec) + .requiredIf(caCertPathSpec) + .withRequiredArg(); + caPasswordSpec = parser.accepts("pass", "password for an existing ca private key or the generated ca private key") + .availableUnless(csrSpec) + .withOptionalArg(); + caDnSpec = parser.accepts("dn", "distinguished name to use for the generated ca. defaults to " + AUTO_GEN_CA_DN) + .availableUnless(caCertPathSpec) + .availableUnless(csrSpec) + .withRequiredArg(); + keysizeSpec = parser.accepts("keysize", "size in bits of RSA keys").withRequiredArg().ofType(Integer.class); + inputFileSpec = parser.accepts("in", "file containing details of the instances in yaml format").withRequiredArg(); + daysSpec = parser.accepts("days", "number of days that the generated certificates are valid") + .availableUnless(csrSpec) + .withRequiredArg() + .ofType(Integer.class); + p12Spec = parser.accepts("p12", "output a p12 (PKCS#12) version for each certificate/key pair, with optional password") + .availableUnless(csrSpec) + .withOptionalArg(); + } + + public static void main(String[] args) throws Exception { + new CertificateGenerateTool().main(args, Terminal.DEFAULT); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + final boolean csrOnly = options.has(csrSpec); + printIntro(terminal, csrOnly); + final Path outputFile = getOutputFile(terminal, outputPathSpec.value(options), csrOnly ? DEFAULT_CSR_FILE : DEFAULT_CERT_FILE); + final String inputFile = inputFileSpec.value(options); + final int keysize = options.has(keysizeSpec) ? keysizeSpec.value(options) : DEFAULT_KEY_SIZE; + if (csrOnly) { + Collection certificateInformations = getCertificateInformationList(terminal, inputFile); + generateAndWriteCsrs(outputFile, certificateInformations, keysize); + } else { + final String dn = options.has(caDnSpec) ? caDnSpec.value(options) : AUTO_GEN_CA_DN; + final boolean prompt = options.has(caPasswordSpec); + final char[] keyPass = options.hasArgument(caPasswordSpec) ? caPasswordSpec.value(options).toCharArray() : null; + final int days = options.hasArgument(daysSpec) ? daysSpec.value(options) : DEFAULT_DAYS; + final char[] p12Password; + if (options.hasArgument(p12Spec)) { + p12Password = p12Spec.value(options).toCharArray(); + } else if (options.has(p12Spec)) { + p12Password = new char[0]; + } else { + p12Password = null; + } + CAInfo caInfo = getCAInfo(terminal, dn, caCertPathSpec.value(options), caKeyPathSpec.value(options), keyPass, prompt, env, + keysize, days); + Collection certificateInformations = getCertificateInformationList(terminal, inputFile); + generateAndWriteSignedCertificates(outputFile, certificateInformations, caInfo, keysize, days, p12Password); + } + printConclusion(terminal, csrOnly, outputFile); + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("Simplifies the generation of certificate signing requests and signed"); + terminal.println("certificates. The tool runs interactively unless the 'in' and 'out' parameters"); + terminal.println("are specified. In the interactive mode, the tool will prompt for required"); + terminal.println("values that have not been provided through the use of command line options."); + terminal.println(""); + } + + /** + * Checks for output file in the user specified options or prompts the user for the output file + * + * @param terminal terminal to communicate with a user + * @param outputPath user specified output file, may be {@code null} + * @return a {@link Path} to the output file + */ + static Path getOutputFile(Terminal terminal, String outputPath, String defaultFilename) throws IOException { + Path file; + if (outputPath != null) { + file = resolvePath(outputPath); + } else { + file = resolvePath(defaultFilename); + String input = terminal.readText("Please enter the desired output file [" + file + "]: "); + if (input.isEmpty() == false) { + file = resolvePath(input); + } + } + return file.toAbsolutePath(); + } + + @SuppressForbidden(reason = "resolve paths against CWD for a CLI tool") + private static Path resolvePath(String pathStr) { + return PathUtils.get(pathStr).normalize(); + } + + /** + * This method handles the collection of information about each instance that is necessary to generate a certificate. The user may + * be prompted or the information can be gathered from a file + * @param terminal the terminal to use for user interaction + * @param inputFile an optional file that will be used to load the instance information + * @return a {@link Collection} of {@link CertificateInformation} that represents each instance + */ + static Collection getCertificateInformationList(Terminal terminal, String inputFile) + throws Exception { + if (inputFile != null) { + return parseAndValidateFile(terminal, resolvePath(inputFile).toAbsolutePath()); + } + Map map = new HashMap<>(); + boolean done = false; + while (done == false) { + String name = terminal.readText("Enter instance name: "); + if (name.isEmpty() == false) { + final boolean isNameValidFilename = Name.isValidFilename(name); + String filename = terminal.readText("Enter name for directories and files " + (isNameValidFilename ? "[" + name + "]" : "") + + ": " ); + if (filename.isEmpty() && isNameValidFilename) { + filename = name; + } + String ipAddresses = terminal.readText("Enter IP Addresses for instance (comma-separated if more than one) []: "); + String dnsNames = terminal.readText("Enter DNS names for instance (comma-separated if more than one) []: "); + List ipList = Arrays.asList(Strings.splitStringByCommaToArray(ipAddresses)); + List dnsList = Arrays.asList(Strings.splitStringByCommaToArray(dnsNames)); + List commonNames = null; + + CertificateInformation information = new CertificateInformation(name, filename, ipList, dnsList, commonNames); + List validationErrors = information.validate(); + if (validationErrors.isEmpty()) { + if (map.containsKey(name)) { + terminal.println("Overwriting previously defined instance information [" + name + "]"); + } + map.put(name, information); + } else { + for (String validationError : validationErrors) { + terminal.println(validationError); + } + terminal.println("Skipping entry as invalid values were found"); + } + } else { + terminal.println("A name must be provided"); + } + + String exit = terminal.readText("Would you like to specify another instance? Press 'y' to continue entering instance " + + "information: "); + if ("y".equals(exit) == false) { + done = true; + } + } + return map.values(); + } + + static Collection parseAndValidateFile(Terminal terminal, Path file) throws Exception { + final Collection config = parseFile(file); + boolean hasError = false; + for (CertificateInformation certInfo : config) { + final List errors = certInfo.validate(); + if (errors.size() > 0) { + hasError = true; + terminal.println(Terminal.Verbosity.SILENT, "Configuration for instance " + certInfo.name.originalName + + " has invalid details"); + for (String message : errors) { + terminal.println(Terminal.Verbosity.SILENT, " * " + message); + } + terminal.println(""); + } + } + if (hasError) { + throw new UserException(ExitCodes.CONFIG, "File " + file + " contains invalid configuration details (see messages above)"); + } + return config; + } + + /** + * Parses the input file to retrieve the certificate information + * @param file the file to parse + * @return a collection of certificate information + */ + static Collection parseFile(Path file) throws Exception { + try (Reader reader = Files.newBufferedReader(file)) { + // EMPTY is safe here because we never use namedObject + XContentParser xContentParser = XContentType.YAML.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, reader); + return InputFileParser.PARSER.parse(xContentParser, new ArrayList<>(), null); + } + } + + /** + * Generates certificate signing requests and writes them out to the specified file in zip format + * @param outputFile the file to write the output to. This file must not already exist + * @param certInfo the details to use in the certificate signing requests + */ + static void generateAndWriteCsrs(Path outputFile, Collection certInfo, int keysize) throws Exception { + fullyWriteFile(outputFile, (outputStream, pemWriter) -> { + for (CertificateInformation certificateInformation : certInfo) { + KeyPair keyPair = CertUtils.generateKeyPair(keysize); + GeneralNames sanList = getSubjectAlternativeNamesValue(certificateInformation.ipAddresses, certificateInformation.dnsNames, + certificateInformation.commonNames); + PKCS10CertificationRequest csr = CertUtils.generateCSR(keyPair, certificateInformation.name.x500Principal, sanList); + + final String dirName = certificateInformation.name.filename + "/"; + ZipEntry zipEntry = new ZipEntry(dirName); + assert zipEntry.isDirectory(); + outputStream.putNextEntry(zipEntry); + + // write csr + outputStream.putNextEntry(new ZipEntry(dirName + certificateInformation.name.filename + ".csr")); + pemWriter.writeObject(csr); + pemWriter.flush(); + outputStream.closeEntry(); + + // write private key + outputStream.putNextEntry(new ZipEntry(dirName + certificateInformation.name.filename + ".key")); + pemWriter.writeObject(keyPair.getPrivate()); + pemWriter.flush(); + outputStream.closeEntry(); + } + }); + } + + /** + * Returns the CA certificate and private key that will be used to sign certificates. These may be specified by the user or + * automatically generated + * + * @param terminal the terminal to use for prompting the user + * @param dn the distinguished name to use for the CA + * @param caCertPath the path to the CA certificate or {@code null} if not provided + * @param caKeyPath the path to the CA private key or {@code null} if not provided + * @param prompt whether we should prompt the user for a password + * @param keyPass the password to the private key. If not present and the key is encrypted the user will be prompted + * @param env the environment for this tool to resolve files with + * @param keysize the size of the key in bits + * @param days the number of days that the certificate should be valid for + * @return CA cert and private key + */ + static CAInfo getCAInfo(Terminal terminal, String dn, String caCertPath, String caKeyPath, char[] keyPass, boolean prompt, + Environment env, int keysize, int days) throws Exception { + if (caCertPath != null) { + assert caKeyPath != null; + final String resolvedCaCertPath = resolvePath(caCertPath).toAbsolutePath().toString(); + Certificate[] certificates = CertUtils.readCertificates(Collections.singletonList(resolvedCaCertPath), env); + if (certificates.length != 1) { + throw new IllegalArgumentException("expected a single certificate in file [" + caCertPath + "] but found [" + + certificates.length + "]"); + } + Certificate caCert = certificates[0]; + PrivateKey privateKey = readPrivateKey(caKeyPath, keyPass, terminal, prompt); + return new CAInfo((X509Certificate) caCert, privateKey); + } + + // generate the CA keys and cert + X500Principal x500Principal = new X500Principal(dn); + KeyPair keyPair = CertUtils.generateKeyPair(keysize); + Certificate caCert = CertUtils.generateCACertificate(x500Principal, keyPair, days); + final char[] password; + if (prompt) { + password = terminal.readSecret("Enter password for CA private key: "); + } else { + password = keyPass; + } + return new CAInfo((X509Certificate) caCert, keyPair.getPrivate(), true, password); + } + + /** + * Generates signed certificates in PEM format stored in a zip file + * @param outputFile the file that the certificates will be written to. This file must not exist + * @param certificateInformations details for creation of the certificates + * @param caInfo the CA information to sign the certificates with + * @param keysize the size of the key in bits + * @param days the number of days that the certificate should be valid for + */ + static void generateAndWriteSignedCertificates(Path outputFile, Collection certificateInformations, + CAInfo caInfo, int keysize, int days, char[] pkcs12Password) throws Exception { + fullyWriteFile(outputFile, (outputStream, pemWriter) -> { + // write out the CA info first if it was generated + writeCAInfoIfGenerated(outputStream, pemWriter, caInfo); + + for (CertificateInformation certificateInformation : certificateInformations) { + KeyPair keyPair = CertUtils.generateKeyPair(keysize); + Certificate certificate = CertUtils.generateSignedCertificate(certificateInformation.name.x500Principal, + getSubjectAlternativeNamesValue(certificateInformation.ipAddresses, certificateInformation.dnsNames, + certificateInformation.commonNames), + keyPair, caInfo.caCert, caInfo.privateKey, days); + + final String dirName = certificateInformation.name.filename + "/"; + ZipEntry zipEntry = new ZipEntry(dirName); + assert zipEntry.isDirectory(); + outputStream.putNextEntry(zipEntry); + + // write cert + final String entryBase = dirName + certificateInformation.name.filename; + outputStream.putNextEntry(new ZipEntry(entryBase + ".crt")); + pemWriter.writeObject(certificate); + pemWriter.flush(); + outputStream.closeEntry(); + + // write private key + outputStream.putNextEntry(new ZipEntry(entryBase + ".key")); + pemWriter.writeObject(keyPair.getPrivate()); + pemWriter.flush(); + outputStream.closeEntry(); + + if (pkcs12Password != null) { + final KeyStore pkcs12 = KeyStore.getInstance("PKCS12"); + pkcs12.load(null); + pkcs12.setKeyEntry(certificateInformation.name.originalName, keyPair.getPrivate(), pkcs12Password, + new Certificate[]{certificate}); + + outputStream.putNextEntry(new ZipEntry(entryBase + ".p12")); + pkcs12.store(outputStream, pkcs12Password); + outputStream.closeEntry(); + } + } + }); + } + + /** + * This method handles the deletion of a file in the case of a partial write + * @param file the file that is being written to + * @param writer writes the contents of the file + */ + private static void fullyWriteFile(Path file, Writer writer) throws Exception { + boolean success = false; + try (OutputStream outputStream = Files.newOutputStream(file, StandardOpenOption.CREATE_NEW); + ZipOutputStream zipOutputStream = new ZipOutputStream(outputStream, StandardCharsets.UTF_8); + JcaPEMWriter pemWriter = new JcaPEMWriter(new OutputStreamWriter(zipOutputStream, StandardCharsets.UTF_8))) { + writer.write(zipOutputStream, pemWriter); + + // set permissions to 600 + PosixFileAttributeView view = Files.getFileAttributeView(file, PosixFileAttributeView.class); + if (view != null) { + view.setPermissions(Sets.newHashSet(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE)); + } + + success = true; + } finally { + if (success == false) { + Files.deleteIfExists(file); + } + } + } + + /** + * This method handles writing out the certificate authority cert and private key if the certificate authority was generated by + * this invocation of the tool + * @param outputStream the output stream to write to + * @param pemWriter the writer for PEM objects + * @param info the certificate authority information + */ + private static void writeCAInfoIfGenerated(ZipOutputStream outputStream, JcaPEMWriter pemWriter, CAInfo info) throws Exception { + if (info.generated) { + final String caDirName = "ca/"; + ZipEntry zipEntry = new ZipEntry(caDirName); + assert zipEntry.isDirectory(); + outputStream.putNextEntry(zipEntry); + outputStream.putNextEntry(new ZipEntry(caDirName + "ca.crt")); + pemWriter.writeObject(info.caCert); + pemWriter.flush(); + outputStream.closeEntry(); + outputStream.putNextEntry(new ZipEntry(caDirName + "ca.key")); + if (info.password != null && info.password.length > 0) { + try { + PEMEncryptor encryptor = new JcePEMEncryptorBuilder("DES-EDE3-CBC").setProvider(CertUtils.BC_PROV).build(info.password); + pemWriter.writeObject(info.privateKey, encryptor); + } finally { + // we can safely nuke the password chars now + Arrays.fill(info.password, (char) 0); + } + } else { + pemWriter.writeObject(info.privateKey); + } + pemWriter.flush(); + outputStream.closeEntry(); + } + } + + private static void printIntro(Terminal terminal, boolean csr) { + terminal.println("******************************************************************************"); + terminal.println("Note: The 'elasticsearch-certgen' tool has been deprecated in favour of the"); + terminal.println(" 'elasticsearch-certutil' tool. This command will be removed in a future"); + terminal.println(" release."); + terminal.println("******************************************************************************"); + terminal.println(""); + + terminal.println("This tool assists you in the generation of X.509 certificates and certificate"); + terminal.println("signing requests for use with SSL in the Elastic stack. Depending on the command"); + terminal.println("line option specified, you may be prompted for the following:"); + terminal.println(""); + terminal.println("* The path to the output file"); + if (csr) { + terminal.println(" * The output file is a zip file containing the certificate signing requests"); + terminal.println(" and private keys for each instance."); + } else { + terminal.println(" * The output file is a zip file containing the signed certificates and"); + terminal.println(" private keys for each instance. If a Certificate Authority was generated,"); + terminal.println(" the certificate and private key will also be included in the output file."); + } + terminal.println("* Information about each instance"); + terminal.println(" * An instance is any piece of the Elastic Stack that requires a SSL certificate."); + terminal.println(" Depending on your configuration, Elasticsearch, Logstash, Kibana, and Beats"); + terminal.println(" may all require a certificate and private key."); + terminal.println(" * The minimum required value for each instance is a name. This can simply be the"); + terminal.println(" hostname, which will be used as the Common Name of the certificate. A full"); + terminal.println(" distinguished name may also be used."); + terminal.println(" * A filename value may be required for each instance. This is necessary when the"); + terminal.println(" name would result in an invalid file or directory name. The name provided here"); + terminal.println(" is used as the directory name (within the zip) and the prefix for the key and"); + terminal.println(" certificate files. The filename is required if you are prompted and the name"); + terminal.println(" is not displayed in the prompt."); + terminal.println(" * IP addresses and DNS names are optional. Multiple values can be specified as a"); + terminal.println(" comma separated string. If no IP addresses or DNS names are provided, you may"); + terminal.println(" disable hostname verification in your SSL configuration."); + + if (csr == false) { + terminal.println("* Certificate Authority private key password"); + terminal.println(" * The password may be left empty if desired."); + } + terminal.println(""); + terminal.println("Let's get started..."); + terminal.println(""); + } + + private static void printConclusion(Terminal terminal, boolean csr, Path outputFile) { + if (csr) { + terminal.println("Certificate signing requests written to " + outputFile); + terminal.println(""); + terminal.println("This file should be properly secured as it contains the private keys for all"); + terminal.println("instances."); + terminal.println(""); + terminal.println("After unzipping the file, there will be a directory for each instance containing"); + terminal.println("the certificate signing request and the private key. Provide the certificate"); + terminal.println("signing requests to your certificate authority. Once you have received the"); + terminal.println("signed certificate, copy the signed certificate, key, and CA certificate to the"); + terminal.println("configuration directory of the Elastic product that they will be used for and"); + terminal.println("follow the SSL configuration instructions in the product guide."); + } else { + terminal.println("Certificates written to " + outputFile); + terminal.println(""); + terminal.println("This file should be properly secured as it contains the private keys for all"); + terminal.println("instances and the certificate authority."); + terminal.println(""); + terminal.println("After unzipping the file, there will be a directory for each instance containing"); + terminal.println("the certificate and private key. Copy the certificate, key, and CA certificate"); + terminal.println("to the configuration directory of the Elastic product that they will be used for"); + terminal.println("and follow the SSL configuration instructions in the product guide."); + terminal.println(""); + terminal.println("For client applications, you may only need to copy the CA certificate and"); + terminal.println("configure the client to trust this certificate."); + } + } + + /** + * Helper method to read a private key and support prompting of user for a key. To avoid passwords being placed as an argument we + * can prompt the user for their password if we encounter an encrypted key. + * @param path the path to the private key + * @param password the password provided by the user or {@code null} + * @param terminal the terminal to use for user interaction + * @param prompt whether to prompt the user or not + * @return the {@link PrivateKey} that was read from the file + */ + private static PrivateKey readPrivateKey(String path, char[] password, Terminal terminal, boolean prompt) + throws Exception { + AtomicReference passwordReference = new AtomicReference<>(password); + try (Reader reader = Files.newBufferedReader(resolvePath(path), StandardCharsets.UTF_8)) { + return CertUtils.readPrivateKey(reader, () -> { + if (password != null || prompt == false) { + return password; + } + char[] promptedValue = terminal.readSecret("Enter password for CA private key: "); + passwordReference.set(promptedValue); + return promptedValue; + }); + } finally { + if (passwordReference.get() != null) { + Arrays.fill(passwordReference.get(), (char) 0); + } + } + } + + private static GeneralNames getSubjectAlternativeNamesValue(List ipAddresses, List dnsNames, List commonNames) { + Set generalNameList = new HashSet<>(); + for (String ip : ipAddresses) { + generalNameList.add(new GeneralName(GeneralName.iPAddress, ip)); + } + + for (String dns : dnsNames) { + generalNameList.add(new GeneralName(GeneralName.dNSName, dns)); + } + + for (String cn : commonNames) { + generalNameList.add(CertUtils.createCommonName(cn)); + } + + if (generalNameList.isEmpty()) { + return null; + } + return new GeneralNames(generalNameList.toArray(new GeneralName[0])); + } + + static class CertificateInformation { + final Name name; + final List ipAddresses; + final List dnsNames; + final List commonNames; + + CertificateInformation(String name, String filename, List ipAddresses, List dnsNames, List commonNames) { + this.name = Name.fromUserProvidedName(name, filename); + this.ipAddresses = ipAddresses == null ? Collections.emptyList() : ipAddresses; + this.dnsNames = dnsNames == null ? Collections.emptyList() : dnsNames; + this.commonNames = commonNames == null ? Collections.emptyList() : commonNames; + } + + List validate() { + List errors = new ArrayList<>(); + if (name.error != null) { + errors.add(name.error); + } + for (String ip : ipAddresses) { + if (InetAddresses.isInetAddress(ip) == false) { + errors.add("[" + ip + "] is not a valid IP address"); + } + } + for (String dnsName : dnsNames) { + if (DERIA5String.isIA5String(dnsName) == false) { + errors.add("[" + dnsName + "] is not a valid DNS name"); + } + } + return errors; + } + } + + static class Name { + + final String originalName; + final X500Principal x500Principal; + final String filename; + final String error; + + private Name(String name, X500Principal x500Principal, String filename, String error) { + this.originalName = name; + this.x500Principal = x500Principal; + this.filename = filename; + this.error = error; + } + + static Name fromUserProvidedName(String name, String filename) { + if ("ca".equals(name)) { + return new Name(name, null, null, "[ca] may not be used as an instance name"); + } + + final X500Principal principal; + try { + if (name.contains("=")) { + principal = new X500Principal(name); + } else { + principal = new X500Principal("CN=" + name); + } + } catch (IllegalArgumentException e) { + String error = "[" + name + "] could not be converted to a valid DN\n" + e.getMessage() + "\n" + + ExceptionsHelper.stackTrace(e); + return new Name(name, null, null, error); + } + + boolean validFilename = isValidFilename(filename); + if (validFilename == false) { + return new Name(name, principal, null, "[" + filename + "] is not a valid filename"); + } + return new Name(name, principal, resolvePath(filename).toString(), null); + } + + static boolean isValidFilename(String name) { + return ALLOWED_FILENAME_CHAR_PATTERN.matcher(name).matches() + && ALLOWED_FILENAME_CHAR_PATTERN.matcher(resolvePath(name).toString()).matches() + && name.startsWith(".") == false; + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "{original=[" + originalName + "] principal=[" + x500Principal + + "] file=[" + filename + "] err=[" + error + "]}"; + } + } + + static class CAInfo { + final X509Certificate caCert; + final PrivateKey privateKey; + final boolean generated; + final char[] password; + + CAInfo(X509Certificate caCert, PrivateKey privateKey) { + this(caCert, privateKey, false, null); + } + + CAInfo(X509Certificate caCert, PrivateKey privateKey, boolean generated, char[] password) { + this.caCert = caCert; + this.privateKey = privateKey; + this.generated = generated; + this.password = password; + } + } + + private interface Writer { + void write(ZipOutputStream zipOutputStream, JcaPEMWriter pemWriter) throws Exception; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertificateTool.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertificateTool.java new file mode 100644 index 0000000000000..60e3b3e556ae1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertificateTool.java @@ -0,0 +1,1181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import joptsimple.OptionSpecBuilder; +import org.bouncycastle.asn1.DERIA5String; +import org.bouncycastle.asn1.x509.GeneralName; +import org.bouncycastle.asn1.x509.GeneralNames; +import org.bouncycastle.openssl.PEMEncryptor; +import org.bouncycastle.openssl.jcajce.JcaPEMWriter; +import org.bouncycastle.openssl.jcajce.JcePEMEncryptorBuilder; +import org.bouncycastle.pkcs.PKCS10CertificationRequest; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.LoggingAwareMultiCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.Terminal.Verbosity; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.Environment; + +import javax.security.auth.x500.X500Principal; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Reader; +import java.nio.CharBuffer; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermission; +import java.security.Key; +import java.security.KeyPair; +import java.security.KeyStore; +import java.security.PrivateKey; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +/** + * CLI tool to make generation of certificates or certificate requests easier for users + */ +public class CertificateTool extends LoggingAwareMultiCommand { + + private static final String AUTO_GEN_CA_DN = "CN=Elastic Certificate Tool Autogenerated CA"; + private static final String DESCRIPTION = "Simplifies certificate creation for use with the Elastic Stack"; + private static final String DEFAULT_CSR_ZIP = "csr-bundle.zip"; + private static final String DEFAULT_CERT_ZIP = "certificate-bundle.zip"; + private static final String DEFAULT_CA_ZIP = "elastic-stack-ca.zip"; + private static final String DEFAULT_CA_P12 = "elastic-stack-ca.p12"; + + static final String DEFAULT_CERT_NAME = "instance"; + + /** + * Used to test whether passwords are ASCII (which PKCS/PBE requires) + */ + private static final CharsetEncoder ASCII_ENCODER = StandardCharsets.US_ASCII.newEncoder(); + + private static final int DEFAULT_DAYS = 3 * 365; + private static final int FILE_EXTENSION_LENGTH = 4; + static final int MAX_FILENAME_LENGTH = 255 - FILE_EXTENSION_LENGTH; + private static final Pattern ALLOWED_FILENAME_CHAR_PATTERN = + Pattern.compile("[a-zA-Z0-9!@#$%^&{}\\[\\]()_+\\-=,.~'` ]{1," + MAX_FILENAME_LENGTH + "}"); + private static final int DEFAULT_KEY_SIZE = 2048; + + /** + * Wraps the certgen object parser. + */ + private static class CertificateToolParser { + private static final ObjectParser, Void> PARSER = new ObjectParser<>("certgen"); + + // if the class initializer here runs before the main method, logging will not have been configured; this will lead to status logger + // error messages from the class initializer for ParseField since it creates Logger instances; therefore, we bury the initialization + // of the parser in this class so that we can defer initialization until after logging has been initialized + static { + @SuppressWarnings("unchecked") final ConstructingObjectParser instanceParser = + new ConstructingObjectParser<>( + "instances", + a -> new CertificateInformation( + (String) a[0], (String) (a[1] == null ? a[0] : a[1]), + (List) a[2], (List) a[3], (List) a[4])); + instanceParser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("name")); + instanceParser.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("filename")); + instanceParser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("ip")); + instanceParser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("dns")); + instanceParser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), new ParseField("cn")); + + PARSER.declareObjectArray(List::addAll, instanceParser, new ParseField("instances")); + } + } + + + public static void main(String[] args) throws Exception { + new CertificateTool().main(args, Terminal.DEFAULT); + } + + CertificateTool() { + super(DESCRIPTION); + subcommands.put("csr", new SigningRequestCommand()); + subcommands.put("cert", new GenerateCertificateCommand()); + subcommands.put("ca", new CertificateAuthorityCommand()); + } + + + static final String INTRO_TEXT = "This tool assists you in the generation of X.509 certificates and certificate\n" + + "signing requests for use with SSL/TLS in the Elastic stack."; + + static final String INSTANCE_EXPLANATION = + " * An instance is any piece of the Elastic Stack that requires a SSL certificate.\n" + + " Depending on your configuration, Elasticsearch, Logstash, Kibana, and Beats\n" + + " may all require a certificate and private key.\n" + + " * The minimum required value for each instance is a name. This can simply be the\n" + + " hostname, which will be used as the Common Name of the certificate. A full\n" + + " distinguished name may also be used.\n" + + " * A filename value may be required for each instance. This is necessary when the\n" + + " name would result in an invalid file or directory name. The name provided here\n" + + " is used as the directory name (within the zip) and the prefix for the key and\n" + + " certificate files. The filename is required if you are prompted and the name\n" + + " is not displayed in the prompt.\n" + + " * IP addresses and DNS names are optional. Multiple values can be specified as a\n" + + " comma separated string. If no IP addresses or DNS names are provided, you may\n" + + " disable hostname verification in your SSL configuration."; + + static final String CA_EXPLANATION = + " * All certificates generated by this tool will be signed by a certificate authority (CA).\n" + + " * The tool can automatically generate a new CA for you, or you can provide your own with the\n" + + " -ca or -ca-cert command line options."; + + + abstract static class CertificateCommand extends EnvironmentAwareCommand { + // Common option for multiple commands. + // Not every command uses every option, but where they are common we want to keep them consistent + final OptionSpec outputPathSpec; + final OptionSpec outputPasswordSpec; + final OptionSpec keysizeSpec; + + OptionSpec pemFormatSpec; + OptionSpec daysSpec; + + OptionSpec caPkcs12PathSpec; + OptionSpec caCertPathSpec; + OptionSpec caKeyPathSpec; + OptionSpec caPasswordSpec; + OptionSpec caDnSpec; + OptionSpec keepCaKeySpec; + + OptionSpec multipleNodesSpec; + OptionSpec nameSpec; + OptionSpec dnsNamesSpec; + OptionSpec ipAddressesSpec; + + OptionSpec inputFileSpec; + + CertificateCommand(String description) { + super(description); + outputPathSpec = parser.accepts("out", "path to the output file that should be produced").withRequiredArg(); + outputPasswordSpec = parser.accepts("pass", "password for generated private keys").withOptionalArg(); + keysizeSpec = parser.accepts("keysize", "size in bits of RSA keys").withRequiredArg().ofType(Integer.class); + } + + final void acceptCertificateGenerationOptions() { + pemFormatSpec = parser.accepts("pem", "output certificates and keys in PEM format instead of PKCS#12"); + daysSpec = parser.accepts("days", "number of days that the generated certificates are valid") + .withRequiredArg().ofType(Integer.class); + } + + final void acceptsCertificateAuthority() { + caPkcs12PathSpec = parser.accepts("ca", "path to an existing ca key pair (in PKCS#12 format)").withRequiredArg(); + caCertPathSpec = parser.accepts("ca-cert", "path to an existing ca certificate") + .availableUnless(caPkcs12PathSpec) + .withRequiredArg(); + caKeyPathSpec = parser.accepts("ca-key", "path to an existing ca private key") + .availableIf(caCertPathSpec) + .requiredIf(caCertPathSpec) + .withRequiredArg(); + + keepCaKeySpec = parser.accepts("keep-ca-key", "retain the CA private key for future use") + .availableUnless(caPkcs12PathSpec) + .availableUnless(caCertPathSpec); + + caPasswordSpec = parser.accepts("ca-pass", "password for an existing ca private key or the generated ca private key") + .withOptionalArg(); + + acceptsCertificateAuthorityName(); + } + + void acceptsCertificateAuthorityName() { + OptionSpecBuilder builder = parser.accepts("ca-dn", + "distinguished name to use for the generated ca. defaults to " + AUTO_GEN_CA_DN); + if (caPkcs12PathSpec != null) { + builder = builder.availableUnless(caPkcs12PathSpec); + } + if (caCertPathSpec != null) { + builder = builder.availableUnless(caCertPathSpec); + } + caDnSpec = builder.withRequiredArg(); + } + + final void acceptInstanceDetails() { + multipleNodesSpec = parser.accepts("multiple", "generate files for multiple instances"); + nameSpec = parser.accepts("name", "name of the generated certificate").availableUnless(multipleNodesSpec).withRequiredArg(); + dnsNamesSpec = parser.accepts("dns", "comma separated DNS names").availableUnless(multipleNodesSpec).withRequiredArg(); + ipAddressesSpec = parser.accepts("ip", "comma separated IP addresses").availableUnless(multipleNodesSpec).withRequiredArg(); + } + + final void acceptInputFile() { + inputFileSpec = parser.accepts("in", "file containing details of the instances in yaml format").withRequiredArg(); + } + + // For testing + OptionParser getParser() { + return parser; + } + + /** + * Checks for output file in the user specified options or prompts the user for the output file. + * The resulting path is stored in the {@code config} parameter. + */ + Path resolveOutputPath(Terminal terminal, OptionSet options, String defaultFilename) throws IOException { + return resolveOutputPath(terminal, outputPathSpec.value(options), defaultFilename); + } + + static Path resolveOutputPath(Terminal terminal, String userOption, String defaultFilename) { + Path file; + if (userOption != null) { + file = CertificateTool.resolvePath(userOption); + } else { + file = CertificateTool.resolvePath(defaultFilename); + String input = terminal.readText("Please enter the desired output file [" + file + "]: "); + if (input.isEmpty() == false) { + file = CertificateTool.resolvePath(input); + } + } + return file.toAbsolutePath(); + } + + final int getKeySize(OptionSet options) { + if (options.has(keysizeSpec)) { + return keysizeSpec.value(options); + } else { + return DEFAULT_KEY_SIZE; + } + } + + final int getDays(OptionSet options) { + if (options.has(daysSpec)) { + return daysSpec.value(options); + } else { + return DEFAULT_DAYS; + } + } + + boolean keepCaKey(OptionSet options) { + return options.has(keepCaKeySpec); + } + + boolean usePemFormat(OptionSet options) { + return options.has(pemFormatSpec); + } + + boolean useOutputPassword(OptionSet options) { + return options.has(outputPasswordSpec); + } + + char[] getOutputPassword(OptionSet options) { + return getChars(outputPasswordSpec.value(options)); + } + + protected Path resolvePath(OptionSet options, OptionSpec spec) { + final String value = spec.value(options); + if (Strings.isNullOrEmpty(value)) { + return null; + } + return CertificateTool.resolvePath(value); + } + + /** + * Returns the CA certificate and private key that will be used to sign certificates. These may be specified by the user or + * automatically generated + * + * @return CA cert and private key + */ + CAInfo getCAInfo(Terminal terminal, OptionSet options, Environment env) throws Exception { + if (options.has(caPkcs12PathSpec)) { + return loadPkcs12CA(terminal, options, env); + } else if (options.has(caCertPathSpec)) { + return loadPemCA(terminal, options, env); + } else { + return generateCA(terminal, options); + } + } + + private CAInfo loadPkcs12CA(Terminal terminal, OptionSet options, Environment env) throws Exception { + Path path = resolvePath(options, caPkcs12PathSpec); + char[] passwordOption = getChars(caPasswordSpec.value(options)); + + Map keys = withPassword("CA (" + path + ")", passwordOption, + terminal, password -> CertUtils.readPkcs12KeyPairs(path, password, a -> password, env)); + + if (keys.size() != 1) { + throw new IllegalArgumentException("expected a single key in file [" + path.toAbsolutePath() + "] but found [" + + keys.size() + "]"); + } + final Map.Entry pair = keys.entrySet().iterator().next(); + return new CAInfo((X509Certificate) pair.getKey(), (PrivateKey) pair.getValue()); + } + + private CAInfo loadPemCA(Terminal terminal, OptionSet options, Environment env) throws Exception { + if (options.hasArgument(caKeyPathSpec) == false) { + throw new UserException(ExitCodes.USAGE, "Option " + caCertPathSpec + " also requires " + caKeyPathSpec); + } + Path cert = resolvePath(options, caCertPathSpec); + Path key = resolvePath(options, caKeyPathSpec); + String password = caPasswordSpec.value(options); + + final String resolvedCaCertPath = cert.toAbsolutePath().toString(); + Certificate[] certificates = CertUtils.readCertificates(Collections.singletonList(resolvedCaCertPath), env); + if (certificates.length != 1) { + throw new IllegalArgumentException("expected a single certificate in file [" + resolvedCaCertPath + "] but found [" + + certificates.length + "]"); + } + X509Certificate caCert = (X509Certificate) certificates[0]; + PrivateKey privateKey = readPrivateKey(key, getChars(password), terminal); + return new CAInfo(caCert, privateKey); + } + + CAInfo generateCA(Terminal terminal, OptionSet options) throws Exception { + String dn = caDnSpec.value(options); + if (Strings.isNullOrEmpty(dn)) { + dn = AUTO_GEN_CA_DN; + } + X500Principal x500Principal = new X500Principal(dn); + KeyPair keyPair = CertUtils.generateKeyPair(getKeySize(options)); + X509Certificate caCert = CertUtils.generateCACertificate(x500Principal, keyPair, getDays(options)); + + if (options.hasArgument(caPasswordSpec)) { + char[] password = getChars(caPasswordSpec.value(options)); + return new CAInfo(caCert, keyPair.getPrivate(), true, password); + } + if (options.has(caPasswordSpec)) { + return withPassword("CA Private key", null, terminal, p -> new CAInfo(caCert, keyPair.getPrivate(), true, p.clone())); + } + return new CAInfo(caCert, keyPair.getPrivate(), true, null); + } + + /** + * This method handles the collection of information about each instance that is necessary to generate a certificate. The user may + * be prompted or the information can be gathered from a file + * + * @return a {@link Collection} of {@link CertificateInformation} that represents each instance + */ + Collection getCertificateInformationList(Terminal terminal, OptionSet options) + throws Exception { + final Path input = resolvePath(options, inputFileSpec); + if (input != null) { + return parseAndValidateFile(terminal, input.toAbsolutePath()); + } + if (options.has(multipleNodesSpec)) { + return readMultipleCertificateInformation(terminal); + } else { + final Function> splitByComma = v -> Arrays.stream(Strings.splitStringByCommaToArray(v)); + final List dns = dnsNamesSpec.values(options).stream().flatMap(splitByComma).collect(Collectors.toList()); + final List ip = ipAddressesSpec.values(options).stream().flatMap(splitByComma).collect(Collectors.toList()); + final List cn = null; + final String name = getCertificateName(options); + final String fileName; + if (Name.isValidFilename(name)) { + fileName = name; + } else { + fileName = requestFileName(terminal, name); + } + CertificateInformation information = new CertificateInformation(name, fileName, ip, dns, cn); + List validationErrors = information.validate(); + if (validationErrors.isEmpty()) { + return Collections.singleton(information); + } else { + validationErrors.forEach(terminal::println); + return Collections.emptyList(); + } + } + } + + protected String getCertificateName(OptionSet options) { + return options.has(nameSpec) ? nameSpec.value(options) : DEFAULT_CERT_NAME; + } + + static Collection readMultipleCertificateInformation(Terminal terminal) { + Map map = new HashMap<>(); + boolean done = false; + while (done == false) { + String name = terminal.readText("Enter instance name: "); + if (name.isEmpty() == false) { + String filename = requestFileName(terminal, name); + String ipAddresses = terminal.readText("Enter IP Addresses for instance (comma-separated if more than one) []: "); + String dnsNames = terminal.readText("Enter DNS names for instance (comma-separated if more than one) []: "); + List ipList = Arrays.asList(Strings.splitStringByCommaToArray(ipAddresses)); + List dnsList = Arrays.asList(Strings.splitStringByCommaToArray(dnsNames)); + List commonNames = null; + + CertificateInformation information = new CertificateInformation(name, filename, ipList, dnsList, commonNames); + List validationErrors = information.validate(); + if (validationErrors.isEmpty()) { + if (map.containsKey(name)) { + terminal.println("Overwriting previously defined instance information [" + name + "]"); + } + map.put(name, information); + } else { + for (String validationError : validationErrors) { + terminal.println(validationError); + } + terminal.println("Skipping entry as invalid values were found"); + } + } else { + terminal.println("A name must be provided"); + } + + String exit = terminal.readText("Would you like to specify another instance? Press 'y' to continue entering instance " + + "information: "); + if ("y".equals(exit) == false) { + done = true; + } + } + return map.values(); + } + + private static String requestFileName(Terminal terminal, String certName) { + final boolean isNameValidFilename = Name.isValidFilename(certName); + while (true) { + String filename = terminal.readText("Enter name for directories and files of " + certName + + (isNameValidFilename ? " [" + certName + "]" : "") + ": "); + if (filename.isEmpty() && isNameValidFilename) { + return certName; + } + if (Name.isValidFilename(filename)) { + return filename; + } else { + terminal.println(Terminal.Verbosity.SILENT, "'" + filename + "' is not a valid filename"); + continue; + } + } + } + + /** + * This method handles writing out the certificate authority in PEM format to a zip file. + * + * @param outputStream the output stream to write to + * @param pemWriter the writer for PEM objects + * @param info the certificate authority information + * @param includeKey if true, write the CA key in PEM format + */ + static void writeCAInfo(ZipOutputStream outputStream, JcaPEMWriter pemWriter, CAInfo info, boolean includeKey) + throws Exception { + final String caDirName = createCaDirectory(outputStream); + outputStream.putNextEntry(new ZipEntry(caDirName + "ca.crt")); + pemWriter.writeObject(info.certAndKey.cert); + pemWriter.flush(); + outputStream.closeEntry(); + if (includeKey) { + outputStream.putNextEntry(new ZipEntry(caDirName + "ca.key")); + if (info.password != null && info.password.length > 0) { + try { + PEMEncryptor encryptor = getEncrypter(info.password); + pemWriter.writeObject(info.certAndKey.key, encryptor); + } finally { + // we can safely nuke the password chars now + Arrays.fill(info.password, (char) 0); + } + } else { + pemWriter.writeObject(info.certAndKey.key); + } + pemWriter.flush(); + outputStream.closeEntry(); + } + } + + /** + * This method handles writing out the certificate authority in PKCS#12 format to a zip file. + * + * @param outputStream the output stream to write to + * @param info the certificate authority information + * @param terminal used to prompt for a password (if not already supplied) + */ + static void writeCAInfo(ZipOutputStream outputStream, CAInfo info, Terminal terminal) throws Exception { + final String dirName = createCaDirectory(outputStream); + final String fileName = dirName + "ca.p12"; + outputStream.putNextEntry(new ZipEntry(fileName)); + withPassword("Generated CA", info.password, terminal, caPassword -> { + writePkcs12(fileName, outputStream, "ca", info.certAndKey, null, caPassword, null); + return null; + }); + outputStream.closeEntry(); + } + + private static String createCaDirectory(ZipOutputStream outputStream) throws IOException { + final String caDirName = "ca/"; + ZipEntry zipEntry = new ZipEntry(caDirName); + assert zipEntry.isDirectory(); + outputStream.putNextEntry(zipEntry); + return caDirName; + } + + static void writePkcs12(String fileName, OutputStream output, String alias, CertificateAndKey pair, X509Certificate caCert, + char[] password, Terminal terminal) throws Exception { + final KeyStore pkcs12 = KeyStore.getInstance("PKCS12"); + pkcs12.load(null); + withPassword(fileName, password, terminal, p12Password -> { + if (isAscii(p12Password)) { + pkcs12.setKeyEntry(alias, pair.key, p12Password, new Certificate[] { pair.cert }); + if (caCert != null) { + pkcs12.setCertificateEntry("ca", caCert); + } + pkcs12.store(output, p12Password); + return null; + } else { + throw new UserException(ExitCodes.CONFIG, "PKCS#12 passwords must be plain ASCII"); + } + }); + } + } + + static class SigningRequestCommand extends CertificateCommand { + + SigningRequestCommand() { + super("generate certificate signing requests"); + acceptInstanceDetails(); + acceptInputFile(); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + terminal.println(INTRO_TEXT); + terminal.println(""); + terminal.println("The 'csr' mode generates certificate signing requests that can be sent to"); + terminal.println("a trusted certificate authority"); + terminal.println(" * By default, this generates a single CSR for a single instance."); + terminal.println(" * You can use the '-multiple' option to generate CSRs for multiple" ); + terminal.println(" instances, each with their own private key."); + terminal.println(" * The '-in' option allows for the CSR generation to be automated"); + terminal.println(" by describing the details of each instance in a YAML file"); + terminal.println(""); + terminal.println(INSTANCE_EXPLANATION); + terminal.println(""); + terminal.println("The 'csr' mode produces a single zip file which contains the certificate"); + terminal.println("signing requests and private keys for each instance."); + terminal.println(" * Each certificate signing request is provided as a standard PEM encoding of a PKCS#10 CSR."); + terminal.println(" * Each key is provided as a PEM encoding of an RSA private key"); + terminal.println(""); + + final Path output = resolveOutputPath(terminal, options, DEFAULT_CSR_ZIP); + final int keySize = getKeySize(options); + Collection certificateInformations = getCertificateInformationList(terminal, options); + generateAndWriteCsrs(output, keySize, certificateInformations); + + terminal.println(""); + terminal.println("Certificate signing requests have been written to " + output); + terminal.println(""); + terminal.println("This file should be properly secured as it contains the private keys for all"); + terminal.println("instances."); + terminal.println(""); + terminal.println("After unzipping the file, there will be a directory for each instance containing"); + terminal.println("the certificate signing request and the private key. Provide the certificate"); + terminal.println("signing requests to your certificate authority. Once you have received the"); + terminal.println("signed certificate, copy the signed certificate, key, and CA certificate to the"); + terminal.println("configuration directory of the Elastic product that they will be used for and"); + terminal.println("follow the SSL configuration instructions in the product guide."); + } + + /** + * Generates certificate signing requests and writes them out to the specified file in zip format + * + * @param certInfo the details to use in the certificate signing requests + */ + void generateAndWriteCsrs(Path output, int keySize, Collection certInfo) throws Exception { + fullyWriteZipFile(output, (outputStream, pemWriter) -> { + for (CertificateInformation certificateInformation : certInfo) { + KeyPair keyPair = CertUtils.generateKeyPair(keySize); + GeneralNames sanList = getSubjectAlternativeNamesValue(certificateInformation.ipAddresses, + certificateInformation.dnsNames, certificateInformation.commonNames); + PKCS10CertificationRequest csr = CertUtils.generateCSR(keyPair, certificateInformation.name.x500Principal, sanList); + + final String dirName = certificateInformation.name.filename + "/"; + ZipEntry zipEntry = new ZipEntry(dirName); + assert zipEntry.isDirectory(); + outputStream.putNextEntry(zipEntry); + + // write csr + outputStream.putNextEntry(new ZipEntry(dirName + certificateInformation.name.filename + ".csr")); + pemWriter.writeObject(csr); + pemWriter.flush(); + outputStream.closeEntry(); + + // write private key + outputStream.putNextEntry(new ZipEntry(dirName + certificateInformation.name.filename + ".key")); + pemWriter.writeObject(keyPair.getPrivate()); + pemWriter.flush(); + outputStream.closeEntry(); + } + }); + } + } + + static class GenerateCertificateCommand extends CertificateCommand { + + GenerateCertificateCommand() { + super("generate X.509 certificates and keys"); + acceptCertificateGenerationOptions(); + acceptInstanceDetails(); + acceptsCertificateAuthority(); + acceptInputFile(); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + terminal.println(INTRO_TEXT); + terminal.println(""); + terminal.println("The 'cert' mode generates X.509 certificate and private keys."); + terminal.println(" * By default, this generates a single certificate and key for use"); + terminal.println(" on a single instance."); + terminal.println(" * The '-multiple' option will prompt you to enter details for multiple"); + terminal.println(" instances and will generate a certificate and key for each one"); + terminal.println(" * The '-in' option allows for the certificate generation to be automated by describing"); + terminal.println(" the details of each instance in a YAML file"); + terminal.println(""); + terminal.println(INSTANCE_EXPLANATION); + terminal.println(""); + terminal.println(CA_EXPLANATION); + terminal.println(""); + terminal.println("By default the 'cert' mode produces a single PKCS#12 output file which holds:"); + terminal.println(" * The instance certificate"); + terminal.println(" * The private key for the instance certificate"); + terminal.println(" * The CA certificate"); + terminal.println(""); + terminal.println("If you specify any of the following options:"); + terminal.println(" * -pem (PEM formatted output)"); + terminal.println(" * -keep-ca-key (retain generated CA key)"); + terminal.println(" * -multiple (generate multiple certificates)"); + terminal.println(" * -in (generate certificates from an input file)"); + terminal.println("then the output will be be a zip file containing individual certificate/key files"); + terminal.println(""); + + CAInfo caInfo = getCAInfo(terminal, options, env); + Collection certInfo = getCertificateInformationList(terminal, options); + final boolean keepCaKey = keepCaKey(options); + final boolean usePemFormat = usePemFormat(options); + final boolean writeZipFile = options.has(multipleNodesSpec) || options.has(inputFileSpec) || keepCaKey || usePemFormat; + + final String outputName; + if (writeZipFile) { + outputName = DEFAULT_CERT_ZIP; + } else if (options.has(nameSpec)) { + outputName = nameSpec.value(options) + ".p12"; + } else { + outputName = "elastic-certificates.p12"; + } + final Path output = resolveOutputPath(terminal, options, outputName); + + generateAndWriteSignedCertificates(output, writeZipFile, options, certInfo, caInfo, terminal); + + terminal.println(""); + terminal.println("Certificates written to " + output); + terminal.println(""); + if (certInfo.size() > 1) { + terminal.println(Terminal.Verbosity.NORMAL, "This file should be properly secured as it contains the private keys for "); + terminal.print(Terminal.Verbosity.NORMAL, "all instances"); + } else { + terminal.println(Terminal.Verbosity.NORMAL, "This file should be properly secured as it contains the private key for "); + terminal.print(Terminal.Verbosity.NORMAL, "your instance"); + } + if (caInfo.generated && keepCaKey) { + terminal.println(Terminal.Verbosity.NORMAL, " and for the certificate authority."); + } else { + terminal.println(Terminal.Verbosity.NORMAL, "."); + } + terminal.println(""); + final String filesDescription; + if (writeZipFile) { + terminal.println("After unzipping the file, there will be a directory for each instance."); + if (usePemFormat) { + terminal.println("Each instance has a certificate and private key."); + filesDescription = "the certificate, key, and CA certificate"; + } else { + terminal.println("Each instance has a single PKCS#12 (.p12) file containing the instance"); + terminal.println("certificate, instance private key and the CA certificate"); + filesDescription = "this '.p12' file"; + } + } else { + terminal.println("This file is a self contained file and can be copied and used 'as is'"); + filesDescription = "this '.p12' file"; + } + terminal.println("For each Elastic product that you wish to configure, you should copy"); + terminal.println(filesDescription + " to the relevant configuration directory"); + terminal.println("and then follow the SSL configuration instructions in the product guide."); + terminal.println(""); + if (usePemFormat || caInfo.generated == false) { + terminal.println("For client applications, you may only need to copy the CA certificate and"); + terminal.println("configure the client to trust this certificate."); + } + } + + /** + * Generates signed certificates in either PKCS#12 format or PEM format, wrapped in a zip file if necessary. + * + * @param output the output file (either zip, or PKCS#12) + * @param writeZipFile if true, output a zip file, otherwise output a single PKCS#12 file + * @param options the current command line options + * @param certs the certificates to write to the file + * @param caInfo the CA information to sign the certificates with + * @param terminal the terminal to use if prompting for passwords + */ + void generateAndWriteSignedCertificates(Path output, boolean writeZipFile, OptionSet options, + Collection certs, CAInfo caInfo, Terminal terminal) + throws Exception { + + checkDirectory(output, terminal); + + final int keySize = getKeySize(options); + final int days = getDays(options); + final char[] outputPassword = super.getOutputPassword(options); + if (writeZipFile) { + final boolean usePem = usePemFormat(options); + final boolean usePassword = super.useOutputPassword(options); + fullyWriteZipFile(output, (outputStream, pemWriter) -> { + // write out the CA info first if it was generated + if (caInfo.generated) { + final boolean writeCAKey = keepCaKey(options); + if (usePem) { + writeCAInfo(outputStream, pemWriter, caInfo, writeCAKey); + } else if (writeCAKey) { + writeCAInfo(outputStream, caInfo, terminal); + } + } + + for (CertificateInformation certificateInformation : certs) { + CertificateAndKey pair = generateCertificateAndKey(certificateInformation, caInfo, keySize, days); + + final String dirName = certificateInformation.name.filename + "/"; + ZipEntry zipEntry = new ZipEntry(dirName); + assert zipEntry.isDirectory(); + outputStream.putNextEntry(zipEntry); + + final String entryBase = dirName + certificateInformation.name.filename; + + if (usePem) { + // write cert + outputStream.putNextEntry(new ZipEntry(entryBase + ".crt")); + pemWriter.writeObject(pair.cert); + pemWriter.flush(); + outputStream.closeEntry(); + + // write private key + final String keyFileName = entryBase + ".key"; + outputStream.putNextEntry(new ZipEntry(keyFileName)); + if (usePassword) { + withPassword(keyFileName, outputPassword, terminal, password -> { + pemWriter.writeObject(pair.key, getEncrypter(password)); + return null; + }); + } else { + pemWriter.writeObject(pair.key); + } + pemWriter.flush(); + outputStream.closeEntry(); + } else { + final String fileName = entryBase + ".p12"; + outputStream.putNextEntry(new ZipEntry(fileName)); + writePkcs12(fileName, outputStream, certificateInformation.name.originalName, pair, caInfo.certAndKey.cert, + outputPassword, terminal); + outputStream.closeEntry(); + } + } + }); + } else { + assert certs.size() == 1; + CertificateInformation certificateInformation = certs.iterator().next(); + CertificateAndKey pair = generateCertificateAndKey(certificateInformation, caInfo, keySize, days); + fullyWriteFile(output, stream -> writePkcs12(output.getFileName().toString(), stream, + certificateInformation.name.originalName, pair, caInfo.certAndKey.cert, outputPassword, terminal)); + } + } + + private CertificateAndKey generateCertificateAndKey(CertificateInformation certificateInformation, CAInfo caInfo, + int keySize, int days) throws Exception { + KeyPair keyPair = CertUtils.generateKeyPair(keySize); + Certificate certificate = CertUtils.generateSignedCertificate(certificateInformation.name.x500Principal, + getSubjectAlternativeNamesValue(certificateInformation.ipAddresses, certificateInformation.dnsNames, + certificateInformation.commonNames), + keyPair, caInfo.certAndKey.cert, caInfo.certAndKey.key, days); + return new CertificateAndKey((X509Certificate) certificate, keyPair.getPrivate()); + } + + } + + static class CertificateAuthorityCommand extends CertificateCommand { + + CertificateAuthorityCommand() { + super("generate a new local certificate authority"); + acceptCertificateGenerationOptions(); + acceptsCertificateAuthorityName(); + super.caPasswordSpec = super.outputPasswordSpec; + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + terminal.println(INTRO_TEXT); + terminal.println(""); + terminal.println("The 'ca' mode generates a new 'certificate authority'"); + terminal.println("This will create a new X.509 certificate and private key that can be used"); + terminal.println("to sign certificate when running in 'cert' mode."); + terminal.println(""); + terminal.println("Use the 'ca-dn' option if you wish to configure the 'distinguished name'"); + terminal.println("of the certificate authority"); + terminal.println(""); + terminal.println("By default the 'ca' mode produces a single PKCS#12 output file which holds:"); + terminal.println(" * The CA certificate"); + terminal.println(" * The CA's private key"); + terminal.println(""); + terminal.println("If you elect to generate PEM format certificates (the -pem option), then the output will"); + terminal.println("be a zip file containing individual files for the CA certificate and private key"); + terminal.println(""); + + CAInfo caInfo = generateCA(terminal, options); + final boolean writeZipFile = usePemFormat(options); + final Path output = resolveOutputPath(terminal, options, writeZipFile ? DEFAULT_CA_ZIP : DEFAULT_CA_P12); + writeCertificateAuthority(output, caInfo, writeZipFile, terminal); + } + + private void writeCertificateAuthority(Path output, CAInfo caInfo, boolean writePemZip, Terminal terminal) throws Exception { + checkDirectory(output, terminal); + if (writePemZip) { + fullyWriteZipFile(output, (outputStream, pemWriter) -> writeCAInfo(outputStream, pemWriter, caInfo, true)); + } else { + final String fileName = output.getFileName().toString(); + fullyWriteFile(output, outputStream -> + writePkcs12(fileName, outputStream, "ca", caInfo.certAndKey, null, caInfo.password, terminal)); + } + } + } + + @SuppressForbidden(reason = "resolve paths against CWD for a CLI tool") + static Path resolvePath(String pathStr) { + return PathUtils.get(pathStr).normalize(); + } + + static Collection parseAndValidateFile(Terminal terminal, Path file) throws Exception { + final Collection config = parseFile(file); + boolean hasError = false; + for (CertificateInformation certInfo : config) { + final List errors = certInfo.validate(); + if (errors.size() > 0) { + hasError = true; + terminal.println(Verbosity.SILENT, "Configuration for instance " + certInfo.name.originalName + " has invalid details"); + for (String message : errors) { + terminal.println(Verbosity.SILENT, " * " + message); + } + terminal.println(""); + } + } + if (hasError) { + throw new UserException(ExitCodes.CONFIG, "File " + file + " contains invalid configuration details (see messages above)"); + } + return config; + } + + /** + * Parses the input file to retrieve the certificate information + * + * @param file the file to parse + * @return a collection of certificate information + */ + static Collection parseFile(Path file) throws Exception { + try (Reader reader = Files.newBufferedReader(file)) { + // EMPTY is safe here because we never use namedObject + XContentParser xContentParser = XContentType.YAML.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, reader); + return CertificateToolParser.PARSER.parse(xContentParser, new ArrayList<>(), null); + } + } + + private static PEMEncryptor getEncrypter(char[] password) { + return new JcePEMEncryptorBuilder("DES-EDE3-CBC").setProvider(CertUtils.BC_PROV).build(password); + } + + private static T withPassword(String description, char[] password, Terminal terminal, + CheckedFunction body) throws E { + if (password == null) { + char[] promptedValue = terminal.readSecret("Enter password for " + description + " : "); + try { + return body.apply(promptedValue); + } finally { + Arrays.fill(promptedValue, (char) 0); + } + } else { + return body.apply(password); + } + } + + /** + * This method handles the deletion of a file in the case of a partial write + * + * @param file the file that is being written to + * @param writer writes the contents of the file + */ + private static void fullyWriteZipFile(Path file, Writer writer) throws Exception { + fullyWriteFile(file, outputStream -> { + try (ZipOutputStream zipOutputStream = new ZipOutputStream(outputStream, StandardCharsets.UTF_8); + JcaPEMWriter pemWriter = new JcaPEMWriter(new OutputStreamWriter(zipOutputStream, StandardCharsets.UTF_8))) { + writer.write(zipOutputStream, pemWriter); + } + }); + } + + /** + * Checks whether the parent directories of {@code path} exist, and offers to create them if needed. + */ + private static void checkDirectory(Path path, Terminal terminal) throws UserException { + final Path parent = path.getParent(); + if (Files.isDirectory(parent)) { + return; + } + if (Files.exists(parent)) { + terminal.println(Terminal.Verbosity.SILENT, "Path " + parent + " exists, but is not a directory. Cannot write to " + path); + throw new UserException(ExitCodes.CANT_CREATE, "Cannot write to " + path); + } + if (terminal.promptYesNo("Directory " + parent + " does not exist. Do you want to create it?", true)) { + try { + Files.createDirectories(parent); + } catch (IOException e) { + throw new UserException(ExitCodes.CANT_CREATE, "Cannot create directory " + parent, e); + } + } else { + throw new UserException(ExitCodes.CANT_CREATE, "Directory " + parent + " does not exist"); + } + + } + + /** + * This method handles the deletion of a file in the case of a partial write + * + * @param file the file that is being written to + * @param writer writes the contents of the file + */ + private static void fullyWriteFile(Path file, CheckedConsumer writer) throws Exception { + assert file != null; + assert writer != null; + + boolean success = false; + if (Files.exists(file)) { + throw new UserException(ExitCodes.IO_ERROR, "Output file '" + file + "' already exists"); + } + try (OutputStream outputStream = Files.newOutputStream(file, StandardOpenOption.CREATE_NEW)) { + writer.accept(outputStream); + + // set permissions to 600 + PosixFileAttributeView view = Files.getFileAttributeView(file, PosixFileAttributeView.class); + if (view != null) { + view.setPermissions(Sets.newHashSet(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE)); + } + + success = true; + } finally { + if (success == false) { + Files.deleteIfExists(file); + } + } + } + + /** + * Helper method to read a private key and support prompting of user for a key. To avoid passwords being placed as an argument we + * can prompt the user for their password if we encounter an encrypted key. + * + * @param path the path to the private key + * @param password the password provided by the user or {@code null} + * @param terminal the terminal to use for user interaction + * @return the {@link PrivateKey} that was read from the file + */ + private static PrivateKey readPrivateKey(Path path, char[] password, Terminal terminal) + throws Exception { + AtomicReference passwordReference = new AtomicReference<>(password); + try (Reader reader = Files.newBufferedReader(path, StandardCharsets.UTF_8)) { + return CertUtils.readPrivateKey(reader, () -> { + if (password != null) { + return password; + } + char[] promptedValue = terminal.readSecret("Enter password for CA private key (" + path.getFileName() + ") : "); + passwordReference.set(promptedValue); + return promptedValue; + }); + } finally { + if (passwordReference.get() != null) { + Arrays.fill(passwordReference.get(), (char) 0); + } + } + } + + private static GeneralNames getSubjectAlternativeNamesValue(List ipAddresses, List dnsNames, List commonNames) { + Set generalNameList = new HashSet<>(); + for (String ip : ipAddresses) { + generalNameList.add(new GeneralName(GeneralName.iPAddress, ip)); + } + + for (String dns : dnsNames) { + generalNameList.add(new GeneralName(GeneralName.dNSName, dns)); + } + + for (String cn : commonNames) { + generalNameList.add(CertUtils.createCommonName(cn)); + } + + if (generalNameList.isEmpty()) { + return null; + } + return new GeneralNames(generalNameList.toArray(new GeneralName[0])); + } + + private static boolean isAscii(char[] str) { + return ASCII_ENCODER.canEncode(CharBuffer.wrap(str)); + } + + private static char[] getChars(String password) { + return password == null ? null : password.toCharArray(); + } + + + static class CertificateInformation { + final Name name; + final List ipAddresses; + final List dnsNames; + final List commonNames; + + CertificateInformation(String name, String filename, List ipAddresses, List dnsNames, List commonNames) { + this.name = Name.fromUserProvidedName(name, filename); + this.ipAddresses = ipAddresses == null ? Collections.emptyList() : ipAddresses; + this.dnsNames = dnsNames == null ? Collections.emptyList() : dnsNames; + this.commonNames = commonNames == null ? Collections.emptyList() : commonNames; + } + + List validate() { + List errors = new ArrayList<>(); + if (name.error != null) { + errors.add(name.error); + } + for (String ip : ipAddresses) { + if (InetAddresses.isInetAddress(ip) == false) { + errors.add("[" + ip + "] is not a valid IP address"); + } + } + for (String dnsName : dnsNames) { + if (DERIA5String.isIA5String(dnsName) == false) { + errors.add("[" + dnsName + "] is not a valid DNS name"); + } + } + return errors; + } + } + + static class Name { + + final String originalName; + final X500Principal x500Principal; + final String filename; + final String error; + + private Name(String name, X500Principal x500Principal, String filename, String error) { + this.originalName = name; + this.x500Principal = x500Principal; + this.filename = filename; + this.error = error; + } + + static Name fromUserProvidedName(String name, String filename) { + if ("ca".equals(name)) { + return new Name(name, null, null, "[ca] may not be used as an instance name"); + } + if (name == null) { + return new Name("", null, null, "instance name may not be null"); + } + + final X500Principal principal; + try { + if (name.contains("=")) { + principal = new X500Principal(name); + } else { + principal = new X500Principal("CN=" + name); + } + } catch (IllegalArgumentException e) { + String error = "[" + name + "] could not be converted to a valid DN\n" + e.getMessage() + "\n" + + ExceptionsHelper.stackTrace(e); + return new Name(name, null, null, error); + } + + boolean validFilename = isValidFilename(filename); + if (validFilename == false) { + return new Name(name, principal, null, "[" + filename + "] is not a valid filename"); + } + return new Name(name, principal, resolvePath(filename).toString(), null); + } + + static boolean isValidFilename(String name) { + return ALLOWED_FILENAME_CHAR_PATTERN.matcher(name).matches() + && ALLOWED_FILENAME_CHAR_PATTERN.matcher(resolvePath(name).toString()).matches() + && name.startsWith(".") == false; + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "{original=[" + originalName + "] principal=[" + x500Principal + + "] file=[" + filename + "] err=[" + error + "]}"; + } + } + + static class CertificateAndKey { + final X509Certificate cert; + final PrivateKey key; + + CertificateAndKey(X509Certificate cert, PrivateKey key) { + this.cert = cert; + this.key = key; + } + } + + static class CAInfo { + final CertificateAndKey certAndKey; + final boolean generated; + final char[] password; + + CAInfo(X509Certificate caCert, PrivateKey privateKey) { + this(caCert, privateKey, false, null); + } + + CAInfo(X509Certificate caCert, PrivateKey privateKey, boolean generated, char[] password) { + this.certAndKey = new CertificateAndKey(caCert, privateKey); + this.generated = generated; + this.password = password; + } + } + + private interface Writer { + void write(ZipOutputStream zipOutputStream, JcaPEMWriter pemWriter) throws Exception; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertificateTrustRestrictions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertificateTrustRestrictions.java new file mode 100644 index 0000000000000..43a805cd4e026 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/CertificateTrustRestrictions.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.xpack.core.security.support.Automatons; + +import java.util.Collection; +import java.util.Collections; +import java.util.Set; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * Im memory representation of the trusted names for a "trust group". + * + * @see RestrictedTrustManager + */ +class CertificateTrustRestrictions { + + private final Set> trustedNames; + + CertificateTrustRestrictions(Collection trustedNames) { + this.trustedNames = trustedNames.stream().map(Automatons::predicate).collect(Collectors.toSet()); + } + + /** + * @return The names (X509 certificate subjectAlternateNames) of the nodes that are + * allowed to connect to this cluster (for the targeted interface) . + */ + Set> getTrustedNames() { + return Collections.unmodifiableSet(trustedNames); + } + + @Override + public String toString() { + return "{trustedNames=" + trustedNames + '}'; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/DefaultJDKTrustConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/DefaultJDKTrustConfig.java new file mode 100644 index 0000000000000..073fc06c13704 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/DefaultJDKTrustConfig.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509ExtendedTrustManager; + +import java.io.IOException; +import java.nio.file.Path; +import java.security.GeneralSecurityException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +/** + * This class represents a trust configuration that corresponds to the default trusted certificates of the JDK + */ +class DefaultJDKTrustConfig extends TrustConfig { + + static final DefaultJDKTrustConfig INSTANCE = new DefaultJDKTrustConfig(); + + private DefaultJDKTrustConfig() { + } + + @Override + X509ExtendedTrustManager createTrustManager(@Nullable Environment environment) { + try { + return CertUtils.trustManager(null, TrustManagerFactory.getDefaultAlgorithm()); + } catch (Exception e) { + throw new ElasticsearchException("failed to initialize a TrustManagerFactory", e); + } + } + + @Override + /** + * We don't return the list of JDK certificates here, because they are not managed by Elasticsearch, and the purpose + * of this method is to obtain information about certificate (files/stores) that X-Pack directly manages. + */ + Collection certificates(Environment environment) throws GeneralSecurityException, IOException { + return Collections.emptyList(); + } + + @Override + List filesToMonitor(@Nullable Environment environment) { + return Collections.emptyList(); + } + + @Override + public String toString() { + return "JDK trusted certs"; + } + + @Override + public boolean equals(Object o) { + return o == this; + } + + @Override + public int hashCode() { + return System.identityHashCode(this); + } + + /** + * Merges the default trust configuration with the provided {@link TrustConfig} + * @param trustConfig the trust configuration to merge with + * @return a {@link TrustConfig} that represents a combination of both trust configurations + */ + static TrustConfig merge(TrustConfig trustConfig) { + if (trustConfig == null) { + return INSTANCE; + } else { + return new CombiningTrustConfig(Arrays.asList(INSTANCE, trustConfig)); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/KeyConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/KeyConfig.java new file mode 100644 index 0000000000000..823f0da367eb2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/KeyConfig.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import javax.net.ssl.X509ExtendedKeyManager; +import javax.net.ssl.X509ExtendedTrustManager; + +import java.nio.file.Path; +import java.security.PrivateKey; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +abstract class KeyConfig extends TrustConfig { + + static final KeyConfig NONE = new KeyConfig() { + @Override + X509ExtendedKeyManager createKeyManager(@Nullable Environment environment) { + return null; + } + + @Override + X509ExtendedTrustManager createTrustManager(@Nullable Environment environment) { + return null; + } + + @Override + Collection certificates(Environment environment) { + return Collections.emptyList(); + } + + @Override + List filesToMonitor(@Nullable Environment environment) { + return Collections.emptyList(); + } + + @Override + public String toString() { + return "NONE"; + } + + @Override + public boolean equals(Object o) { + return o == this; + } + + @Override + public int hashCode() { + return System.identityHashCode(this); + } + + @Override + List privateKeys(@Nullable Environment environment) { + return Collections.emptyList(); + } + }; + + abstract X509ExtendedKeyManager createKeyManager(@Nullable Environment environment); + + abstract List privateKeys(@Nullable Environment environment); + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PEMKeyConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PEMKeyConfig.java new file mode 100644 index 0000000000000..c130d69b917dc --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PEMKeyConfig.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import javax.net.ssl.X509ExtendedKeyManager; +import javax.net.ssl.X509ExtendedTrustManager; + +import java.io.IOException; +import java.io.Reader; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.UnrecoverableKeyException; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Implementation of a key configuration that is backed by a PEM encoded key file and one or more certificates + */ +class PEMKeyConfig extends KeyConfig { + + private final String keyPath; + private final SecureString keyPassword; + private final String certPath; + + /** + * Creates a new key configuration backed by the key and certificate chain provided + * + * @param keyPath the path to the key file + * @param keyPassword the password for the key. + * @param certChainPath the path to the file containing the certificate chain + */ + PEMKeyConfig(String keyPath, SecureString keyPassword, String certChainPath) { + this.keyPath = Objects.requireNonNull(keyPath, "key file must be specified"); + this.keyPassword = Objects.requireNonNull(keyPassword).clone(); + this.certPath = Objects.requireNonNull(certChainPath, "certificate must be specified"); + } + + @Override + X509ExtendedKeyManager createKeyManager(@Nullable Environment environment) { + try { + PrivateKey privateKey = readPrivateKey(CertUtils.resolvePath(keyPath, environment), keyPassword); + if (privateKey == null) { + throw new IllegalArgumentException("private key [" + keyPath + "] could not be loaded"); + } + Certificate[] certificateChain = getCertificateChain(environment); + + return CertUtils.keyManager(certificateChain, privateKey, keyPassword.getChars()); + } catch (IOException | UnrecoverableKeyException | NoSuchAlgorithmException | CertificateException | KeyStoreException e) { + throw new ElasticsearchException("failed to initialize a KeyManagerFactory", e); + } + } + + private Certificate[] getCertificateChain(@Nullable Environment environment) throws CertificateException, IOException { + return CertUtils.readCertificates(Collections.singletonList(certPath), environment); + } + + @Override + Collection certificates(Environment environment) throws CertificateException, IOException { + final Certificate[] chain = getCertificateChain(environment); + final List info = new ArrayList<>(chain.length); + for (int i = 0; i < chain.length; i++) { + final Certificate cert = chain[i]; + if (cert instanceof X509Certificate) { + info.add(new CertificateInfo(certPath, "PEM", null, i == 0, (X509Certificate) cert)); + } + } + return info; + } + + @Override + List privateKeys(@Nullable Environment environment) { + try { + return Collections.singletonList(readPrivateKey(CertUtils.resolvePath(keyPath, environment), keyPassword)); + } catch (IOException e) { + throw new UncheckedIOException("failed to read key", e); + } + } + + private static PrivateKey readPrivateKey(Path keyPath, SecureString keyPassword) throws IOException { + try (Reader reader = Files.newBufferedReader(keyPath, StandardCharsets.UTF_8)) { + return CertUtils.readPrivateKey(reader, keyPassword::getChars); + } + } + + @Override + X509ExtendedTrustManager createTrustManager(@Nullable Environment environment) { + try { + Certificate[] certificates = getCertificateChain(environment); + return CertUtils.trustManager(certificates); + } catch (Exception e) { + throw new ElasticsearchException("failed to initialize a TrustManagerFactory", e); + } + } + + @Override + List filesToMonitor(@Nullable Environment environment) { + List paths = new ArrayList<>(2); + paths.add(CertUtils.resolvePath(keyPath, environment)); + paths.add(CertUtils.resolvePath(certPath, environment)); + return paths; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + PEMKeyConfig that = (PEMKeyConfig) o; + + if (keyPath != null ? !keyPath.equals(that.keyPath) : that.keyPath != null) return false; + if (keyPassword != null ? !keyPassword.equals(that.keyPassword) : that.keyPassword != null) return false; + return certPath != null ? certPath.equals(that.certPath) : that.certPath == null; + + } + + @Override + public int hashCode() { + int result = keyPath != null ? keyPath.hashCode() : 0; + result = 31 * result + (keyPassword != null ? keyPassword.hashCode() : 0); + result = 31 * result + (certPath != null ? certPath.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "keyPath=[" + keyPath + + "], certPaths=[" + certPath + + "]"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PEMTrustConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PEMTrustConfig.java new file mode 100644 index 0000000000000..e191d0e15475f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/PEMTrustConfig.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import javax.net.ssl.X509ExtendedTrustManager; + +import java.io.IOException; +import java.nio.file.Path; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Implementation of trust configuration that is backed by PEM encoded certificate files. + */ +class PEMTrustConfig extends TrustConfig { + + private final List caPaths; + + /** + * Create a new trust configuration that is built from the certificate files + * @param caPaths the paths to the certificate files to trust + */ + PEMTrustConfig(List caPaths) { + this.caPaths = Objects.requireNonNull(caPaths, "ca paths must be specified"); + } + + @Override + X509ExtendedTrustManager createTrustManager(@Nullable Environment environment) { + try { + Certificate[] certificates = CertUtils.readCertificates(caPaths, environment); + return CertUtils.trustManager(certificates); + } catch (Exception e) { + throw new ElasticsearchException("failed to initialize a TrustManagerFactory", e); + } + } + + @Override + Collection certificates(Environment environment) throws CertificateException, IOException { + final List info = new ArrayList<>(caPaths.size()); + for (String path : caPaths) { + Certificate[] chain = CertUtils.readCertificates(Collections.singletonList(path), environment); + for (final Certificate cert : chain) { + if (cert instanceof X509Certificate) { + info.add(new CertificateInfo(path, "PEM", null, false, (X509Certificate) cert)); + } + } + } + return info; + } + + @Override + List filesToMonitor(@Nullable Environment environment) { + List paths = new ArrayList<>(caPaths.size()); + for (String path : caPaths) { + paths.add(CertUtils.resolvePath(path, environment)); + } + return paths; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + PEMTrustConfig that = (PEMTrustConfig) o; + + return caPaths != null ? caPaths.equals(that.caPaths) : that.caPaths == null; + + } + + @Override + public int hashCode() { + return caPaths != null ? caPaths.hashCode() : 0; + } + + @Override + public String toString() { + return "ca=[" + Strings.collectionToCommaDelimitedString(caPaths) + "]"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustConfig.java new file mode 100644 index 0000000000000..85022fde92896 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustConfig.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import javax.net.ssl.X509ExtendedTrustManager; + +import java.io.IOException; +import java.nio.file.Path; +import java.security.GeneralSecurityException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * An implementation of {@link TrustConfig} that constructs a {@link RestrictedTrustManager}. + * This implementation always wraps another TrustConfig to perform the + * underlying certificate validation. + */ +public final class RestrictedTrustConfig extends TrustConfig { + + private static final String RESTRICTIONS_KEY_SUBJECT_NAME = "trust.subject_name"; + private final Settings settings; + private final String groupConfigPath; + private final TrustConfig delegate; + + RestrictedTrustConfig(Settings settings, String groupConfigPath, TrustConfig delegate) { + this.settings = settings; + this.groupConfigPath = Objects.requireNonNull(groupConfigPath); + this.delegate = Objects.requireNonNull(delegate); + } + + @Override + RestrictedTrustManager createTrustManager(@Nullable Environment environment) { + try { + final X509ExtendedTrustManager delegateTrustManager = delegate.createTrustManager(environment); + final CertificateTrustRestrictions trustGroupConfig = readTrustGroup(resolveGroupConfigPath(environment)); + return new RestrictedTrustManager(settings, delegateTrustManager, trustGroupConfig); + } catch (IOException e) { + throw new ElasticsearchException("failed to initialize TrustManager for {}", e, toString()); + } + } + + @Override + Collection certificates(Environment environment) throws GeneralSecurityException, IOException { + return delegate.certificates(environment); + } + + @Override + List filesToMonitor(@Nullable Environment environment) { + List files = new ArrayList<>(delegate.filesToMonitor(environment)); + files.add(resolveGroupConfigPath(environment)); + return Collections.unmodifiableList(files); + } + + @Override + public String toString() { + return "restrictedTrust=[" + groupConfigPath + ']'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RestrictedTrustConfig that = (RestrictedTrustConfig) o; + return this.groupConfigPath.equals(that.groupConfigPath) && this.delegate.equals(that.delegate); + } + + @Override + public int hashCode() { + int result = groupConfigPath.hashCode(); + result = 31 * result + delegate.hashCode(); + return result; + } + + private Path resolveGroupConfigPath(@Nullable Environment environment) { + return CertUtils.resolvePath(groupConfigPath, environment); + } + + private CertificateTrustRestrictions readTrustGroup(Path path) throws IOException { + Settings settings = Settings.builder().loadFromPath(path).build(); + final List trustNodeNames = settings.getAsList(RESTRICTIONS_KEY_SUBJECT_NAME); + return new CertificateTrustRestrictions(trustNodeNames); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManager.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManager.java new file mode 100644 index 0000000000000..895642dd557fd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/RestrictedTrustManager.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.bouncycastle.asn1.ASN1ObjectIdentifier; +import org.bouncycastle.asn1.ASN1Primitive; +import org.bouncycastle.asn1.ASN1Sequence; +import org.bouncycastle.asn1.ASN1String; +import org.bouncycastle.asn1.ASN1TaggedObject; +import org.bouncycastle.asn1.DERTaggedObject; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.X509ExtendedTrustManager; + +import java.net.Socket; +import java.security.cert.CertificateException; +import java.security.cert.CertificateParsingException; +import java.security.cert.X509Certificate; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +/** + * An X509 trust manager that only trusts connections from a restricted set of predefined network entities (nodes, clients, etc). + * The trusted entities are defined as a list of predicates on {@link CertificateTrustRestrictions} that are applied to the + * common-names of the certificate. + * The common-names are read as subject-alternative-names with type 'Other' and a 'cn' OID. + * The underlying certificate validation is delegated to another TrustManager. + */ +public final class RestrictedTrustManager extends X509ExtendedTrustManager { + + private final Logger logger; + private final X509ExtendedTrustManager delegate; + private final CertificateTrustRestrictions trustRestrictions; + private final int SAN_CODE_OTHERNAME = 0; + + public RestrictedTrustManager(Settings settings, X509ExtendedTrustManager delegate, CertificateTrustRestrictions restrictions) { + this.logger = Loggers.getLogger(getClass(), settings); + this.delegate = delegate; + this.trustRestrictions = restrictions; + logger.debug("Configured with trust restrictions: [{}]", restrictions); + } + + @Override + public void checkClientTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException { + delegate.checkClientTrusted(chain, authType, socket); + verifyTrust(chain); + } + + @Override + public void checkServerTrusted(X509Certificate[] chain, String authType, Socket socket) throws CertificateException { + delegate.checkServerTrusted(chain, authType, socket); + verifyTrust(chain); + } + + @Override + public void checkClientTrusted(X509Certificate[] chain, String authType, SSLEngine engine) throws CertificateException { + delegate.checkClientTrusted(chain, authType, engine); + verifyTrust(chain); + } + + @Override + public void checkServerTrusted(X509Certificate[] chain, String authType, SSLEngine engine) throws CertificateException { + delegate.checkServerTrusted(chain, authType, engine); + verifyTrust(chain); + } + + @Override + public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException { + delegate.checkClientTrusted(chain, authType); + verifyTrust(chain); + } + + @Override + public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { + delegate.checkServerTrusted(chain, authType); + verifyTrust(chain); + } + + @Override + public X509Certificate[] getAcceptedIssuers() { + return delegate.getAcceptedIssuers(); + } + + private void verifyTrust(X509Certificate[] chain) throws CertificateException { + if (chain.length == 0) { + throw new CertificateException("No certificate presented"); + } + final X509Certificate certificate = chain[0]; + Set names = readCommonNames(certificate); + if (verifyCertificateNames(names)) { + logger.debug(() -> new ParameterizedMessage("Trusting certificate [{}] [{}] with common-names [{}]", + certificate.getSubjectDN(), certificate.getSerialNumber().toString(16), names)); + } else { + logger.info("Rejecting certificate [{}] [{}] with common-names [{}]", + certificate.getSubjectDN(), certificate.getSerialNumber().toString(16), names); + throw new CertificateException("Certificate for " + certificate.getSubjectDN() + + " with common-names " + names + + " does not match the trusted names " + trustRestrictions.getTrustedNames()); + } + } + + private boolean verifyCertificateNames(Set names) { + for (Predicate trust : trustRestrictions.getTrustedNames()) { + final Optional match = names.stream().filter(trust).findFirst(); + if (match.isPresent()) { + logger.debug("Name [{}] matches trusted pattern [{}]", match.get(), trust); + return true; + } + } + return false; + } + + private Set readCommonNames(X509Certificate certificate) throws CertificateParsingException { + return getSubjectAlternativeNames(certificate).stream() + .filter(pair -> ((Integer) pair.get(0)).intValue() == SAN_CODE_OTHERNAME) + .map(pair -> pair.get(1)) + .map(value -> { + ASN1Sequence seq = ASN1Sequence.getInstance(value); + if (seq.size() != 2) { + String message = "Incorrect sequence length for 'other name' [" + seq + "]"; + assert false : message; + logger.warn(message); + return null; + } + final String id = ASN1ObjectIdentifier.getInstance(seq.getObjectAt(0)).getId(); + if (CertUtils.CN_OID.equals(id)) { + ASN1TaggedObject tagged = DERTaggedObject.getInstance(seq.getObjectAt(1)); + // The JRE's handling of OtherNames is buggy. + // The internal sun classes go to a lot of trouble to parse the GeneralNames into real object + // And then java.security.cert.X509Certificate just turns them back into bytes + // But in doing so, it ends up wrapping the "other name" bytes with a second tag + // Specifically: sun.security.x509.OtherName(DerValue) never decodes the tagged "nameValue" + // But: sun.security.x509.OtherName.encode() wraps the nameValue in a DER Tag. + // So, there's a good chance that our tagged nameValue contains... a tagged name value. + if (tagged.getObject() instanceof ASN1TaggedObject) { + tagged = (ASN1TaggedObject) tagged.getObject(); + } + final ASN1Primitive nameValue = tagged.getObject(); + if (nameValue instanceof ASN1String) { + final String cn = ((ASN1String) nameValue).getString(); + logger.trace("Read cn [{}] from ASN1Sequence [{}]", cn, seq); + return cn; + } else { + logger.warn("Certificate [{}] has 'otherName' [{}] with unsupported name-value type [{}]", + certificate.getSubjectDN(), seq, nameValue.getClass().getSimpleName()); + return null; + } + } else { + logger.debug("Certificate [{}] has 'otherName' [{}] with unsupported object-id [{}]", + certificate.getSubjectDN(), seq, id); + return null; + } + }) + .filter(Objects::nonNull) + .collect(Collectors.toSet()); + } + + + private Collection> getSubjectAlternativeNames(X509Certificate certificate) throws CertificateParsingException { + final Collection> sans = certificate.getSubjectAlternativeNames(); + logger.trace("Certificate [{}] has subject alternative names [{}]", certificate.getSubjectDN(), sans); + return sans == null ? Collections.emptyList() : sans; + } +} + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLClientAuth.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLClientAuth.java new file mode 100644 index 0000000000000..673f251deb669 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLClientAuth.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import javax.net.ssl.SSLParameters; + +import java.util.Locale; + +/** + * The client authentication mode that is used for SSL servers + */ +public enum SSLClientAuth { + + NONE() { + public boolean enabled() { + return false; + } + + public void configure(SSLParameters sslParameters) { + // nothing to do here + assert !sslParameters.getWantClientAuth(); + assert !sslParameters.getNeedClientAuth(); + } + }, + OPTIONAL() { + public boolean enabled() { + return true; + } + + public void configure(SSLParameters sslParameters) { + sslParameters.setWantClientAuth(true); + } + }, + REQUIRED() { + public boolean enabled() { + return true; + } + + public void configure(SSLParameters sslParameters) { + sslParameters.setNeedClientAuth(true); + } + }; + + /** + * @return true if client authentication is enabled + */ + public abstract boolean enabled(); + + /** + * Configure client authentication of the provided {@link SSLParameters} + */ + public abstract void configure(SSLParameters sslParameters); + + public static SSLClientAuth parse(String value) { + assert value != null; + switch (value.toLowerCase(Locale.ROOT)) { + case "none": + return NONE; + case "optional": + return OPTIONAL; + case "required": + return REQUIRED; + default: + throw new IllegalArgumentException("could not resolve ssl client auth. unknown value [" + value + "]"); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfiguration.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfiguration.java new file mode 100644 index 0000000000000..a9ba62998bd6f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfiguration.java @@ -0,0 +1,272 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.TrustManagerFactory; + +import java.io.IOException; +import java.nio.file.Path; +import java.security.GeneralSecurityException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings.getKeyStoreType; + + +/** + * Represents the configuration for an SSLContext + */ +public final class SSLConfiguration { + + // These settings are never registered, but they exist so that we can parse the values defined under grouped settings. Also, some are + // implemented as optional settings, which provides a declarative manner for fallback as we typically fallback to values from a + // different configuration + static final SSLConfigurationSettings SETTINGS_PARSER = SSLConfigurationSettings.withoutPrefix(); + + private final KeyConfig keyConfig; + private final TrustConfig trustConfig; + private final List ciphers; + private final List supportedProtocols; + private final SSLClientAuth sslClientAuth; + private final VerificationMode verificationMode; + + /** + * Creates a new SSLConfiguration from the given settings. There is no fallback configuration when invoking this constructor so + * un-configured aspects will take on their default values. + * + * @param settings the SSL specific settings; only the settings under a *.ssl. prefix + */ + SSLConfiguration(Settings settings) { + this.keyConfig = createKeyConfig(settings, (SSLConfiguration) null); + this.trustConfig = createTrustConfig(settings, keyConfig, null); + this.ciphers = getListOrDefault(SETTINGS_PARSER.ciphers, settings, XPackSettings.DEFAULT_CIPHERS); + this.supportedProtocols = getListOrDefault(SETTINGS_PARSER.supportedProtocols, settings, XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS); + this.sslClientAuth = SETTINGS_PARSER.clientAuth.get(settings).orElse(XPackSettings.CLIENT_AUTH_DEFAULT); + this.verificationMode = SETTINGS_PARSER.verificationMode.get(settings).orElse(XPackSettings.VERIFICATION_MODE_DEFAULT); + } + + /** + * Creates a new SSLConfiguration from the given settings and global/default SSLConfiguration. If the settings do not contain a value + * for a given aspect, the value from the global configuration will be used. + * + * @param settings the SSL specific settings; only the settings under a *.ssl. prefix + * @param globalSSLConfiguration the default configuration that is used as a fallback + */ + SSLConfiguration(Settings settings, SSLConfiguration globalSSLConfiguration) { + Objects.requireNonNull(globalSSLConfiguration); + this.keyConfig = createKeyConfig(settings, globalSSLConfiguration); + this.trustConfig = createTrustConfig(settings, keyConfig, globalSSLConfiguration); + this.ciphers = getListOrDefault(SETTINGS_PARSER.ciphers, settings, globalSSLConfiguration.cipherSuites()); + this.supportedProtocols = getListOrDefault(SETTINGS_PARSER.supportedProtocols, settings, + globalSSLConfiguration.supportedProtocols()); + this.sslClientAuth = SETTINGS_PARSER.clientAuth.get(settings).orElse(globalSSLConfiguration.sslClientAuth()); + this.verificationMode = SETTINGS_PARSER.verificationMode.get(settings).orElse(globalSSLConfiguration.verificationMode()); + } + + /** + * The configuration for the key, if any, that will be used as part of this ssl configuration + */ + KeyConfig keyConfig() { + return keyConfig; + } + + /** + * The configuration of trust material that will be used as part of this ssl configuration + */ + TrustConfig trustConfig() { + return trustConfig; + } + + /** + * The cipher suites that will be used for this ssl configuration + */ + List cipherSuites() { + return ciphers; + } + + /** + * The protocols that are supported by this configuration + */ + List supportedProtocols() { + return supportedProtocols; + } + + /** + * The verification mode for this configuration; this mode controls certificate and hostname verification + */ + public VerificationMode verificationMode() { + return verificationMode; + } + + /** + * The client auth configuration + */ + SSLClientAuth sslClientAuth() { + return sslClientAuth; + } + + /** + * Provides the list of paths to files that back this configuration + */ + List filesToMonitor(@Nullable Environment environment) { + if (keyConfig() == trustConfig()) { + return keyConfig().filesToMonitor(environment); + } + List paths = new ArrayList<>(keyConfig().filesToMonitor(environment)); + paths.addAll(trustConfig().filesToMonitor(environment)); + return paths; + } + + @Override + public String toString() { + return "SSLConfiguration{" + + "keyConfig=[" + keyConfig + + "], trustConfig=" + trustConfig + + "], cipherSuites=[" + ciphers + + "], supportedProtocols=[" + supportedProtocols + + "], sslClientAuth=[" + sslClientAuth + + "], verificationMode=[" + verificationMode + + "]}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof SSLConfiguration)) return false; + + SSLConfiguration that = (SSLConfiguration) o; + + if (this.keyConfig() != null ? !this.keyConfig().equals(that.keyConfig()) : that.keyConfig() != null) { + return false; + } + if (this.trustConfig() != null ? !this.trustConfig().equals(that.trustConfig()) : that.trustConfig() != null) { + return false; + } + if (this.cipherSuites() != null ? !this.cipherSuites().equals(that.cipherSuites()) : that.cipherSuites() != null) { + return false; + } + if (!this.supportedProtocols().equals(that.supportedProtocols())) { + return false; + } + if (this.verificationMode() != that.verificationMode()) { + return false; + } + if (this.sslClientAuth() != that.sslClientAuth()) { + return false; + } + return this.supportedProtocols() != null ? + this.supportedProtocols().equals(that.supportedProtocols()) : that.supportedProtocols() == null; + } + + @Override + public int hashCode() { + int result = this.keyConfig() != null ? this.keyConfig().hashCode() : 0; + result = 31 * result + (this.trustConfig() != null ? this.trustConfig().hashCode() : 0); + result = 31 * result + (this.cipherSuites() != null ? this.cipherSuites().hashCode() : 0); + result = 31 * result + (this.supportedProtocols() != null ? this.supportedProtocols().hashCode() : 0); + result = 31 * result + this.verificationMode().hashCode(); + result = 31 * result + this.sslClientAuth().hashCode(); + return result; + } + + private static KeyConfig createKeyConfig(Settings settings, SSLConfiguration global) { + final String trustStoreAlgorithm = SETTINGS_PARSER.truststoreAlgorithm.get(settings); + final KeyConfig config = CertUtils.createKeyConfig(SETTINGS_PARSER.x509KeyPair, settings, trustStoreAlgorithm); + if (config != null) { + return config; + } + if (global != null) { + return global.keyConfig(); + } + if (System.getProperty("javax.net.ssl.keyStore") != null) { + // TODO: we should not support loading a keystore from sysprops... + try (SecureString keystorePassword = new SecureString(System.getProperty("javax.net.ssl.keyStorePassword", ""))) { + return new StoreKeyConfig(System.getProperty("javax.net.ssl.keyStore"), "jks", keystorePassword, keystorePassword, + System.getProperty("ssl.KeyManagerFactory.algorithm", KeyManagerFactory.getDefaultAlgorithm()), + System.getProperty("ssl.TrustManagerFactory.algorithm", TrustManagerFactory.getDefaultAlgorithm())); + } + } + return KeyConfig.NONE; + } + + private static TrustConfig createTrustConfig(Settings settings, KeyConfig keyConfig, SSLConfiguration global) { + final TrustConfig trustConfig = createCertChainTrustConfig(settings, keyConfig, global); + return SETTINGS_PARSER.trustRestrictionsPath.get(settings) + .map(path -> (TrustConfig) new RestrictedTrustConfig(settings, path, trustConfig)) + .orElse(trustConfig); + } + + private static TrustConfig createCertChainTrustConfig(Settings settings, KeyConfig keyConfig, SSLConfiguration global) { + String trustStorePath = SETTINGS_PARSER.truststorePath.get(settings).orElse(null); + + List caPaths = getListOrNull(SETTINGS_PARSER.caPaths, settings); + if (trustStorePath != null && caPaths != null) { + throw new IllegalArgumentException("you cannot specify a truststore and ca files"); + } + + VerificationMode verificationMode = SETTINGS_PARSER.verificationMode.get(settings).orElseGet(() -> { + if (global != null) { + return global.verificationMode(); + } + return XPackSettings.VERIFICATION_MODE_DEFAULT; + }); + if (verificationMode.isCertificateVerificationEnabled() == false) { + return TrustAllConfig.INSTANCE; + } else if (caPaths != null) { + return new PEMTrustConfig(caPaths); + } else if (trustStorePath != null) { + SecureString trustStorePassword = SETTINGS_PARSER.truststorePassword.get(settings); + String trustStoreAlgorithm = SETTINGS_PARSER.truststoreAlgorithm.get(settings); + String trustStoreType = getKeyStoreType(SETTINGS_PARSER.truststoreType, settings, trustStorePath); + return new StoreTrustConfig(trustStorePath, trustStoreType, trustStorePassword, trustStoreAlgorithm); + } else if (global == null && System.getProperty("javax.net.ssl.trustStore") != null) { + try (SecureString truststorePassword = new SecureString(System.getProperty("javax.net.ssl.trustStorePassword", ""))) { + return new StoreTrustConfig(System.getProperty("javax.net.ssl.trustStore"), "jks", truststorePassword, + System.getProperty("ssl.TrustManagerFactory.algorithm", TrustManagerFactory.getDefaultAlgorithm())); + } + } else if (global != null && keyConfig == global.keyConfig()) { + return global.trustConfig(); + } else if (keyConfig != KeyConfig.NONE) { + return DefaultJDKTrustConfig.merge(keyConfig); + } else { + return DefaultJDKTrustConfig.INSTANCE; + } + } + + private static List getListOrNull(Setting> listSetting, Settings settings) { + return getListOrDefault(listSetting, settings, null); + } + + private static List getListOrDefault(Setting> listSetting, Settings settings, List defaultValue) { + if (listSetting.exists(settings)) { + return listSetting.get(settings); + } + return defaultValue; + } + + /** + * Returns information about each certificate that referenced by this SSL configurations. + * This includes certificates used for identity (with a private key) and those used for trust, but excludes + * certificates that are provided by the JRE. + * @see TrustConfig#certificates(Environment) + */ + List getDefinedCertificates(@Nullable Environment environment) throws GeneralSecurityException, IOException { + List certificates = new ArrayList<>(); + certificates.addAll(keyConfig.certificates(environment)); + certificates.addAll(trustConfig.certificates(environment)); + return certificates; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloader.java new file mode 100644 index 0000000000000..2217513c03fe7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloader.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.watcher.FileChangesListener; +import org.elasticsearch.watcher.FileWatcher; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.watcher.ResourceWatcherService.Frequency; + +import javax.net.ssl.SSLContext; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArraySet; + +/** + * Ensures that the files backing an {@link SSLConfiguration} are monitored for changes and the underlying key/trust material is reloaded + * and the {@link SSLContext} has existing sessions invalidated to force the use of the new key/trust material + */ +public class SSLConfigurationReloader extends AbstractComponent { + + private final ConcurrentHashMap pathToChangeListenerMap = new ConcurrentHashMap<>(); + private final Environment environment; + private final ResourceWatcherService resourceWatcherService; + private final SSLService sslService; + + public SSLConfigurationReloader(Settings settings, Environment env, SSLService sslService, ResourceWatcherService resourceWatcher) { + super(settings); + this.environment = env; + this.resourceWatcherService = resourceWatcher; + this.sslService = sslService; + startWatching(sslService.getLoadedSSLConfigurations()); + } + + /** + * Collects all of the directories that need to be monitored for the provided {@link SSLConfiguration} instances and ensures that + * they are being watched for changes + */ + private void startWatching(Collection sslConfigurations) { + for (SSLConfiguration sslConfiguration : sslConfigurations) { + for (Path directory : directoriesToMonitor(sslConfiguration.filesToMonitor(environment))) { + pathToChangeListenerMap.compute(directory, (path, listener) -> { + if (listener != null) { + listener.addSSLConfiguration(sslConfiguration); + return listener; + } + + ChangeListener changeListener = new ChangeListener(); + changeListener.addSSLConfiguration(sslConfiguration); + FileWatcher fileWatcher = new FileWatcher(path); + fileWatcher.addListener(changeListener); + try { + resourceWatcherService.add(fileWatcher, Frequency.HIGH); + return changeListener; + } catch (IOException e) { + logger.error("failed to start watching directory [{}] for ssl configuration [{}]", path, sslConfiguration); + } + return null; + }); + } + } + } + + /** + * Reloads the ssl context associated with this configuration. It is visible so that tests can override as needed + */ + void reloadSSLContext(SSLConfiguration configuration) { + logger.debug("reloading ssl configuration [{}]", configuration); + sslService.sslContextHolder(configuration).reload(); + } + + /** + * Returns a unique set of directories that need to be monitored based on the provided file paths + */ + private static Set directoriesToMonitor(List filePaths) { + Set paths = new HashSet<>(); + for (Path path : filePaths) { + paths.add(path.getParent()); + } + return paths; + } + + private class ChangeListener implements FileChangesListener { + + private final CopyOnWriteArraySet sslConfigurations = new CopyOnWriteArraySet<>(); + + /** + * Adds the given ssl configuration to those that have files within the directory watched by this change listener + */ + private void addSSLConfiguration(SSLConfiguration sslConfiguration) { + sslConfigurations.add(sslConfiguration); + } + + @Override + public void onFileCreated(Path file) { + onFileChanged(file); + } + + @Override + public void onFileDeleted(Path file) { + onFileChanged(file); + } + + @Override + public void onFileChanged(Path file) { + boolean reloaded = false; + for (SSLConfiguration sslConfiguration : sslConfigurations) { + if (sslConfiguration.filesToMonitor(environment).contains(file)) { + reloadSSLContext(sslConfiguration); + reloaded = true; + } + } + + if (reloaded) { + logger.info("reloaded [{}] and updated ssl contexts using this file", file); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java new file mode 100644 index 0000000000000..8952619408b4a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java @@ -0,0 +1,224 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; + +import javax.net.ssl.TrustManagerFactory; + +import java.security.KeyStore; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Optional; +import java.util.function.Function; + +/** + * Bridges SSLConfiguration into the {@link Settings} framework, using {@link Setting} objects. + */ +public class SSLConfigurationSettings { + + final X509KeyPairSettings x509KeyPair; + + public final Setting> ciphers; + public final Setting> supportedProtocols; + + public final Setting> truststorePath; + public final Setting truststorePassword; + public final Setting truststoreAlgorithm; + public final Setting> truststoreType; + public final Setting> trustRestrictionsPath; + public final Setting> caPaths; + public final Setting> clientAuth; + public final Setting> verificationMode; + + // public for PKI realm + public final Setting legacyTruststorePassword; + + private final List> allSettings; + + /** + * We explicitly default to "jks" here (rather than {@link KeyStore#getDefaultType()}) for backwards compatibility. + * Older versions of X-Pack only supported JKS and never looked at the JVM's configured default. + */ + private static final String DEFAULT_KEYSTORE_TYPE = "jks"; + private static final String PKCS12_KEYSTORE_TYPE = "PKCS12"; + + private static final Function>> CIPHERS_SETTING_TEMPLATE = key -> Setting.listSetting(key, Collections + .emptyList(), Function.identity(), Property.NodeScope, Property.Filtered); + public static final Setting> CIPHERS_SETTING_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.cipher_suites", CIPHERS_SETTING_TEMPLATE); + + private static final Function>> SUPPORTED_PROTOCOLS_TEMPLATE = key -> Setting.listSetting(key, + Collections.emptyList(), Function.identity(), Property.NodeScope, Property.Filtered); + public static final Setting> SUPPORTED_PROTOCOLS_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.supported_protocols", SUPPORTED_PROTOCOLS_TEMPLATE) ; + + public static final Setting> KEYSTORE_PATH_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.keystore.path", X509KeyPairSettings.KEYSTORE_PATH_TEMPLATE); + + public static final Setting LEGACY_KEYSTORE_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.keystore.password", X509KeyPairSettings.LEGACY_KEYSTORE_PASSWORD_TEMPLATE); + + public static final Setting KEYSTORE_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.keystore.secure_password", X509KeyPairSettings.KEYSTORE_PASSWORD_TEMPLATE); + + public static final Setting LEGACY_KEYSTORE_KEY_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.keystore.key_password", X509KeyPairSettings.LEGACY_KEYSTORE_KEY_PASSWORD_TEMPLATE); + + public static final Setting KEYSTORE_KEY_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.keystore.secure_key_password", X509KeyPairSettings.KEYSTORE_KEY_PASSWORD_TEMPLATE); + + private static final Function>> TRUST_STORE_PATH_TEMPLATE = key -> new Setting<>(key, s -> null, + Optional::ofNullable, Property.NodeScope, Property.Filtered); + public static final Setting> TRUST_STORE_PATH_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.truststore.path", TRUST_STORE_PATH_TEMPLATE); + + public static final Setting> KEY_PATH_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.key", X509KeyPairSettings.KEY_PATH_TEMPLATE); + + private static final Function> LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE = key -> + new Setting<>(key, "", SecureString::new, Property.Deprecated, Property.Filtered, Property.NodeScope); + public static final Setting LEGACY_TRUSTSTORE_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.truststore.password", LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE); + + private static final Function> TRUSTSTORE_PASSWORD_TEMPLATE = key -> + SecureSetting.secureString(key, LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE.apply(key.replace("truststore.secure_password", + "truststore.password"))); + public static final Setting TRUSTSTORE_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.truststore.secure_password", TRUSTSTORE_PASSWORD_TEMPLATE); + + public static final Setting KEY_STORE_ALGORITHM_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.keystore.algorithm", X509KeyPairSettings.KEY_STORE_ALGORITHM_TEMPLATE); + + private static final Function> TRUST_STORE_ALGORITHM_TEMPLATE = key -> + new Setting<>(key, s -> TrustManagerFactory.getDefaultAlgorithm(), + Function.identity(), Property.NodeScope, Property.Filtered); + public static final Setting TRUST_STORE_ALGORITHM_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.truststore.algorithm", TRUST_STORE_ALGORITHM_TEMPLATE); + + public static final Setting> KEY_STORE_TYPE_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.keystore.type", X509KeyPairSettings.KEY_STORE_TYPE_TEMPLATE); + + private static final Function>> TRUST_STORE_TYPE_TEMPLATE = + X509KeyPairSettings.KEY_STORE_TYPE_TEMPLATE; + public static final Setting> TRUST_STORE_TYPE_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.truststore.type", TRUST_STORE_TYPE_TEMPLATE); + + private static final Function>> TRUST_RESTRICTIONS_TEMPLATE = key -> new Setting<>(key, s -> null, + Optional::ofNullable, Property.NodeScope, Property.Filtered); + public static final Setting> TRUST_RESTRICTIONS_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.trust_restrictions", TRUST_RESTRICTIONS_TEMPLATE); + + public static final Setting LEGACY_KEY_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.key_passphrase", X509KeyPairSettings.LEGACY_KEY_PASSWORD_TEMPLATE); + + public static final Setting KEY_PASSWORD_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.secure_key_passphrase", X509KeyPairSettings.KEY_PASSWORD_TEMPLATE); + + public static final Setting> CERT_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.certificate", X509KeyPairSettings.CERT_TEMPLATE); + + private static final Function>> CAPATH_SETTING_TEMPLATE = key -> Setting.listSetting(key, Collections + .emptyList(), Function.identity(), Property.NodeScope, Property.Filtered); + public static final Setting> CAPATH_SETTING_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.certificate_authorities", CAPATH_SETTING_TEMPLATE); + + private static final Function>> CLIENT_AUTH_SETTING_TEMPLATE = + key -> new Setting<>(key, (String) null, s -> s == null ? Optional.empty() : Optional.of(SSLClientAuth.parse(s)), + Property.NodeScope, Property.Filtered); + public static final Setting> CLIENT_AUTH_SETTING_PROFILES = Setting.affixKeySetting("transport.profiles.", + "xpack.security.ssl.client_authentication", CLIENT_AUTH_SETTING_TEMPLATE); + + private static final Function>> VERIFICATION_MODE_SETTING_TEMPLATE = + key -> new Setting<>(key, (String) null, s -> s == null ? Optional.empty() : Optional.of(VerificationMode.parse(s)), + Property.NodeScope, Property.Filtered); + public static final Setting> VERIFICATION_MODE_SETTING_PROFILES = Setting.affixKeySetting( + "transport.profiles.", "xpack.security.ssl.verification_mode", VERIFICATION_MODE_SETTING_TEMPLATE); + + /** + * @see #withoutPrefix + * @see #withPrefix + * @param prefix The prefix under which each setting should be defined. Must be either the empty string ("") or a string + * ending in "." + */ + private SSLConfigurationSettings(String prefix) { + assert prefix != null : "Prefix cannot be null (but can be blank)"; + + x509KeyPair = new X509KeyPairSettings(prefix, true); + ciphers = CIPHERS_SETTING_TEMPLATE.apply(prefix + "cipher_suites"); + supportedProtocols = SUPPORTED_PROTOCOLS_TEMPLATE.apply(prefix + "supported_protocols"); + truststorePath = TRUST_STORE_PATH_TEMPLATE.apply(prefix + "truststore.path"); + legacyTruststorePassword = LEGACY_TRUSTSTORE_PASSWORD_TEMPLATE.apply(prefix + "truststore.password"); + truststorePassword = TRUSTSTORE_PASSWORD_TEMPLATE.apply(prefix + "truststore.secure_password"); + truststoreAlgorithm = TRUST_STORE_ALGORITHM_TEMPLATE.apply(prefix + "truststore.algorithm"); + truststoreType = TRUST_STORE_TYPE_TEMPLATE.apply(prefix + "truststore.type"); + trustRestrictionsPath = TRUST_RESTRICTIONS_TEMPLATE.apply(prefix + "trust_restrictions.path"); + caPaths = CAPATH_SETTING_TEMPLATE.apply(prefix + "certificate_authorities"); + clientAuth = CLIENT_AUTH_SETTING_TEMPLATE.apply(prefix + "client_authentication"); + verificationMode = VERIFICATION_MODE_SETTING_TEMPLATE.apply(prefix + "verification_mode"); + + final List> settings = CollectionUtils.arrayAsArrayList(ciphers, supportedProtocols, + truststorePath, truststorePassword, truststoreAlgorithm, truststoreType, trustRestrictionsPath, + caPaths, clientAuth, verificationMode, legacyTruststorePassword); + settings.addAll(x509KeyPair.getAllSettings()); + this.allSettings = Collections.unmodifiableList(settings); + } + + public static String getKeyStoreType(Setting> setting, Settings settings, String path) { + return setting.get(settings).orElseGet(() -> inferKeyStoreType(path)); + } + + private static String inferKeyStoreType(String path) { + String name = path == null ? "" : path.toLowerCase(Locale.ROOT); + if (name.endsWith(".p12") || name.endsWith(".pfx") || name.endsWith(".pkcs12")) { + return PKCS12_KEYSTORE_TYPE; + } else { + return DEFAULT_KEYSTORE_TYPE; + } + } + + public List> getAllSettings() { + return allSettings; + } + + /** + * Construct settings that are un-prefixed. That is, they can be used to read from a {@link Settings} object where the configuration + * keys are the root names of the Settings. + */ + public static SSLConfigurationSettings withoutPrefix() { + return new SSLConfigurationSettings(""); + } + + /** + * Construct settings that have a prefixed. That is, they can be used to read from a {@link Settings} object where the configuration + * keys are prefixed-children of the Settings. + * @param prefix A string that must end in "ssl." + */ + public static SSLConfigurationSettings withPrefix(String prefix) { + assert prefix.endsWith("ssl.") : "The ssl config prefix (" + prefix + ") should end in 'ssl.'"; + return new SSLConfigurationSettings(prefix); + } + + + public static Collection> getProfileSettings() { + return Arrays.asList(CIPHERS_SETTING_PROFILES, SUPPORTED_PROTOCOLS_PROFILES, KEYSTORE_PATH_PROFILES, + LEGACY_KEYSTORE_PASSWORD_PROFILES, KEYSTORE_PASSWORD_PROFILES, LEGACY_KEYSTORE_KEY_PASSWORD_PROFILES, + KEYSTORE_KEY_PASSWORD_PROFILES, TRUST_STORE_PATH_PROFILES, LEGACY_TRUSTSTORE_PASSWORD_PROFILES, + TRUSTSTORE_PASSWORD_PROFILES, KEY_STORE_ALGORITHM_PROFILES, TRUST_STORE_ALGORITHM_PROFILES, + KEY_STORE_TYPE_PROFILES, TRUST_STORE_TYPE_PROFILES, TRUST_RESTRICTIONS_PROFILES, + KEY_PATH_PROFILES, LEGACY_KEY_PASSWORD_PROFILES, KEY_PASSWORD_PROFILES,CERT_PROFILES,CAPATH_SETTING_PROFILES, + CLIENT_AUTH_SETTING_PROFILES, VERIFICATION_MODE_SETTING_PROFILES); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java new file mode 100644 index 0000000000000..c59a2889c28db --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -0,0 +1,913 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; +import org.apache.lucene.util.SetOnce; +import org.bouncycastle.operator.OperatorCreationException; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; +import javax.net.ssl.SSLSessionContext; +import javax.net.ssl.SSLSocket; +import javax.net.ssl.SSLSocketFactory; +import javax.net.ssl.X509ExtendedKeyManager; +import javax.net.ssl.X509ExtendedTrustManager; +import javax.security.auth.DestroyFailedException; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.Socket; +import java.security.GeneralSecurityException; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.Principal; +import java.security.PrivateKey; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +/** + * Provides access to {@link SSLEngine} and {@link SSLSocketFactory} objects based on a provided configuration. All + * configurations loaded by this service must be configured on construction. + */ +public class SSLService extends AbstractComponent { + + private final Map sslContexts; + private final SSLConfiguration globalSSLConfiguration; + private final SetOnce transportSSLConfiguration = new SetOnce<>(); + private final Environment env; + + /** + * Create a new SSLService that parses the settings for the ssl contexts that need to be created, creates them, and then caches them + * for use later + */ + public SSLService(Settings settings, Environment environment) throws CertificateException, UnrecoverableKeyException, + NoSuchAlgorithmException, IOException, DestroyFailedException, KeyStoreException, OperatorCreationException { + super(settings); + this.env = environment; + this.globalSSLConfiguration = new SSLConfiguration(settings.getByPrefix(XPackSettings.GLOBAL_SSL_PREFIX)); + this.sslContexts = loadSSLConfigurations(); + } + + private SSLService(Settings settings, Environment environment, SSLConfiguration globalSSLConfiguration, + Map sslContexts) { + super(settings); + this.env = environment; + this.globalSSLConfiguration = globalSSLConfiguration; + this.sslContexts = sslContexts; + } + + /** + * Creates a new SSLService that supports dynamic creation of SSLContext instances. Instances created by this service will not be + * cached and will not be monitored for reloading. This dynamic server does have access to the cached and monitored instances that + * have been created during initialization + */ + public SSLService createDynamicSSLService() { + return new SSLService(settings, env, globalSSLConfiguration, sslContexts) { + + @Override + Map loadSSLConfigurations() { + // we don't need to load anything... + return Collections.emptyMap(); + } + + /** + * Returns the existing {@link SSLContextHolder} for the configuration + * @throws IllegalArgumentException if not found + */ + @Override + SSLContextHolder sslContextHolder(SSLConfiguration sslConfiguration) { + SSLContextHolder holder = sslContexts.get(sslConfiguration); + if (holder == null) { + // normally we'd throw here but let's create a new one that is not cached and will not be monitored for changes! + holder = createSslContext(sslConfiguration); + } + return holder; + } + }; + } + + /** + * Create a new {@link SSLIOSessionStrategy} based on the provided settings. The settings are used to identify the SSL configuration + * that should be used to create the context. + * + * @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will return + * a context created from the default configuration + * @return Never {@code null}. + */ + public SSLIOSessionStrategy sslIOSessionStrategy(Settings settings) { + SSLConfiguration config = sslConfiguration(settings); + SSLContext sslContext = sslContext(config); + String[] ciphers = supportedCiphers(sslParameters(sslContext).getCipherSuites(), config.cipherSuites(), false); + String[] supportedProtocols = config.supportedProtocols().toArray(Strings.EMPTY_ARRAY); + HostnameVerifier verifier; + + if (config.verificationMode().isHostnameVerificationEnabled()) { + verifier = SSLIOSessionStrategy.getDefaultHostnameVerifier(); + } else { + verifier = NoopHostnameVerifier.INSTANCE; + } + + return sslIOSessionStrategy(sslContext, supportedProtocols, ciphers, verifier); + } + + /** + * The {@link SSLParameters} that are associated with the {@code sslContext}. + *

+ * This method exists to simplify testing since {@link SSLContext#getSupportedSSLParameters()} is {@code final}. + * + * @param sslContext The SSL context for the current SSL settings + * @return Never {@code null}. + */ + SSLParameters sslParameters(SSLContext sslContext) { + return sslContext.getSupportedSSLParameters(); + } + + /** + * This method only exists to simplify testing of {@link #sslIOSessionStrategy(Settings)} because {@link SSLIOSessionStrategy} does + * not expose any of the parameters that you give it. + * + * @param sslContext SSL Context used to handle SSL / TCP requests + * @param protocols Supported protocols + * @param ciphers Supported ciphers + * @param verifier Hostname verifier + * @return Never {@code null}. + */ + SSLIOSessionStrategy sslIOSessionStrategy(SSLContext sslContext, String[] protocols, String[] ciphers, HostnameVerifier verifier) { + return new SSLIOSessionStrategy(sslContext, protocols, ciphers, verifier); + } + + /** + * Create a new {@link SSLSocketFactory} based on the provided settings. The settings are used to identify the ssl configuration that + * should be used to create the socket factory. The socket factory will also properly configure the ciphers and protocols on each + * socket that is created + * @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will return + * a factory created from the default configuration + * @return Never {@code null}. + */ + public SSLSocketFactory sslSocketFactory(Settings settings) { + SSLConfiguration sslConfiguration = sslConfiguration(settings); + SSLSocketFactory socketFactory = sslContext(sslConfiguration).getSocketFactory(); + return new SecuritySSLSocketFactory(socketFactory, sslConfiguration.supportedProtocols().toArray(Strings.EMPTY_ARRAY), + supportedCiphers(socketFactory.getSupportedCipherSuites(), sslConfiguration.cipherSuites(), false)); + } + + /** + * Creates an {@link SSLEngine} based on the provided settings. The settings are used to identify the ssl configuration that should be + * used to create the engine. This SSLEngine cannot be used for hostname verification since the engine will not be created with the + * host and port. This method is useful to obtain an SSLEngine that will be used for server connections or client connections that + * will not use hostname verification. + * @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will return + * a SSLEngine created from the default configuration + * @param fallbackSettings the settings that should be used for the fallback of the SSLConfiguration. Using {@link Settings#EMPTY} + * results in a fallback to the global configuration + * @return {@link SSLEngine} + */ + public SSLEngine createSSLEngine(Settings settings, Settings fallbackSettings) { + return createSSLEngine(settings, fallbackSettings, null, -1); + } + + /** + * Creates an {@link SSLEngine} based on the provided settings. The settings are used to identify the ssl configuration that should be + * used to create the engine. This SSLEngine can be used for a connection that requires hostname verification assuming the provided + * host and port are correct. The SSLEngine created by this method is most useful for clients with hostname verification enabled + * @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will return + * a SSLEngine created from the default configuration + * @param fallbackSettings the settings that should be used for the fallback of the SSLConfiguration. Using {@link Settings#EMPTY} + * results in a fallback to the global configuration + * @param host the host of the remote endpoint. If using hostname verification, this should match what is in the remote endpoint's + * certificate + * @param port the port of the remote endpoint + * @return {@link SSLEngine} + */ + public SSLEngine createSSLEngine(Settings settings, Settings fallbackSettings, String host, int port) { + SSLConfiguration configuration = sslConfiguration(settings, fallbackSettings); + return createSSLEngine(configuration, host, port); + } + + /** + * Creates an {@link SSLEngine} based on the provided configuration. This SSLEngine can be used for a connection that requires + * hostname verification assuming the provided + * host and port are correct. The SSLEngine created by this method is most useful for clients with hostname verification enabled + * @param configuration the ssl configuration + * @param host the host of the remote endpoint. If using hostname verification, this should match what is in the remote endpoint's + * certificate + * @param port the port of the remote endpoint + * @return {@link SSLEngine} + * @see #sslConfiguration(Settings, Settings) + */ + public SSLEngine createSSLEngine(SSLConfiguration configuration, String host, int port) { + SSLContext sslContext = sslContext(configuration); + SSLEngine sslEngine = sslContext.createSSLEngine(host, port); + String[] ciphers = supportedCiphers(sslEngine.getSupportedCipherSuites(), configuration.cipherSuites(), false); + String[] supportedProtocols = configuration.supportedProtocols().toArray(Strings.EMPTY_ARRAY); + SSLParameters parameters = new SSLParameters(ciphers, supportedProtocols); + if (configuration.verificationMode().isHostnameVerificationEnabled() && host != null) { + // By default, a SSLEngine will not perform hostname verification. In order to perform hostname verification + // we need to specify a EndpointIdentificationAlgorithm. We use the HTTPS algorithm to prevent against + // man in the middle attacks for all of our connections. + parameters.setEndpointIdentificationAlgorithm("HTTPS"); + } + // we use the cipher suite order so that we can prefer the ciphers we set first in the list + parameters.setUseCipherSuitesOrder(true); + configuration.sslClientAuth().configure(parameters); + + // many SSLEngine options can be configured using either SSLParameters or direct methods on the engine itself, but there is one + // tricky aspect; if you set a value directly on the engine and then later set the SSLParameters the value set directly on the + // engine will be overwritten by the value in the SSLParameters + sslEngine.setSSLParameters(parameters); + return sslEngine; + } + + /** + * Returns whether the provided settings results in a valid configuration that can be used for server connections + * @param sslConfiguration the configuration to check + */ + public boolean isConfigurationValidForServerUsage(SSLConfiguration sslConfiguration) { + return sslConfiguration.keyConfig() != KeyConfig.NONE; + } + + /** + * Indicates whether client authentication is enabled for a particular configuration + * @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. The global configuration + * will be used for fallback + */ + public boolean isSSLClientAuthEnabled(Settings settings) { + return isSSLClientAuthEnabled(settings, Settings.EMPTY); + } + + /** + * Indicates whether client authentication is enabled for a particular configuration + * @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix + * @param fallback the settings that should be used for the fallback of the SSLConfiguration. Using {@link Settings#EMPTY} + * results in a fallback to the global configuration + */ + public boolean isSSLClientAuthEnabled(Settings settings, Settings fallback) { + SSLConfiguration sslConfiguration = sslConfiguration(settings, fallback); + return isSSLClientAuthEnabled(sslConfiguration); + } + + /** + * Indicates whether client authentication is enabled for a particular configuration + */ + public boolean isSSLClientAuthEnabled(SSLConfiguration sslConfiguration) { + return sslConfiguration.sslClientAuth().enabled(); + } + + /** + * Returns the {@link VerificationMode} that is specified in the settings (or the default) + * @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix + * @param fallback the settings that should be used for the fallback of the SSLConfiguration. Using {@link Settings#EMPTY} + * results in a fallback to the global configuration + */ + public VerificationMode getVerificationMode(Settings settings, Settings fallback) { + SSLConfiguration sslConfiguration = sslConfiguration(settings, fallback); + return sslConfiguration.verificationMode(); + } + + /** + * Returns the {@link SSLContext} for the global configuration. Mainly used for testing + */ + SSLContext sslContext() { + return sslContextHolder(globalSSLConfiguration).sslContext(); + } + + /** + * Returns the {@link SSLContext} for the configuration + */ + SSLContext sslContext(SSLConfiguration configuration) { + return sslContextHolder(configuration).sslContext(); + } + + /** + * Returns the existing {@link SSLContextHolder} for the configuration + * @throws IllegalArgumentException if not found + */ + SSLContextHolder sslContextHolder(SSLConfiguration sslConfiguration) { + SSLContextHolder holder = sslContexts.get(sslConfiguration); + if (holder == null) { + throw new IllegalArgumentException("did not find a SSLContext for [" + sslConfiguration.toString() + "]"); + } + return holder; + } + + /** + * Returns the existing {@link SSLConfiguration} for the given settings + * @param settings the settings for the ssl configuration + * @return the ssl configuration for the provided settings. If the settings are empty, the global configuration is returned + */ + SSLConfiguration sslConfiguration(Settings settings) { + if (settings.isEmpty()) { + return globalSSLConfiguration; + } + return new SSLConfiguration(settings, globalSSLConfiguration); + } + + /** + * Returns the existing {@link SSLConfiguration} for the given settings and applies the provided fallback settings instead of the global + * configuration + * @param settings the settings for the ssl configuration + * @param fallbackSettings the settings that should be used for the fallback of the SSLConfiguration. Using {@link Settings#EMPTY} + * results in a fallback to the global configuration + * @return the ssl configuration for the provided settings. If the settings are empty, the global configuration is returned + */ + public SSLConfiguration sslConfiguration(Settings settings, Settings fallbackSettings) { + if (settings.isEmpty() && fallbackSettings.isEmpty()) { + return globalSSLConfiguration; + } + SSLConfiguration fallback = sslConfiguration(fallbackSettings); + return new SSLConfiguration(settings, fallback); + } + + /** + * Accessor to the loaded ssl configuration objects at the current point in time. This is useful for testing + */ + Collection getLoadedSSLConfigurations() { + return Collections.unmodifiableSet(new HashSet<>(sslContexts.keySet())); + } + + /** + * Returns the intersection of the supported ciphers with the requested ciphers. This method will also optionally log if unsupported + * ciphers were requested. + * @throws IllegalArgumentException if no supported ciphers are in the requested ciphers + */ + String[] supportedCiphers(String[] supportedCiphers, List requestedCiphers, boolean log) { + List supportedCiphersList = new ArrayList<>(requestedCiphers.size()); + List unsupportedCiphers = new LinkedList<>(); + boolean found; + for (String requestedCipher : requestedCiphers) { + found = false; + for (String supportedCipher : supportedCiphers) { + if (supportedCipher.equals(requestedCipher)) { + found = true; + supportedCiphersList.add(requestedCipher); + break; + } + } + + if (!found) { + unsupportedCiphers.add(requestedCipher); + } + } + + if (supportedCiphersList.isEmpty()) { + throw new IllegalArgumentException("none of the ciphers " + Arrays.toString(requestedCiphers.toArray()) + + " are supported by this JVM"); + } + + if (log && !unsupportedCiphers.isEmpty()) { + logger.error("unsupported ciphers [{}] were requested but cannot be used in this JVM, however there are supported ciphers " + + "that will be used [{}]. If you are trying to use ciphers with a key length greater than 128 bits on an Oracle JVM, " + + "you will need to install the unlimited strength JCE policy files.", unsupportedCiphers, supportedCiphersList); + } + + return supportedCiphersList.toArray(new String[supportedCiphersList.size()]); + } + + /** + * Creates an {@link SSLContext} based on the provided configuration + * @param sslConfiguration the configuration to use for context creation + * @return the created SSLContext + */ + private SSLContextHolder createSslContext(SSLConfiguration sslConfiguration) { + if (logger.isDebugEnabled()) { + logger.debug("using ssl settings [{}]", sslConfiguration); + } + ReloadableTrustManager trustManager = + new ReloadableTrustManager(sslConfiguration.trustConfig().createTrustManager(env), sslConfiguration.trustConfig()); + ReloadableX509KeyManager keyManager = + new ReloadableX509KeyManager(sslConfiguration.keyConfig().createKeyManager(env), sslConfiguration.keyConfig()); + return createSslContext(keyManager, trustManager, sslConfiguration); + } + + /** + * Creates an {@link SSLContext} based on the provided configuration and trust/key managers + * @param sslConfiguration the configuration to use for context creation + * @param keyManager the key manager to use + * @param trustManager the trust manager to use + * @return the created SSLContext + */ + private SSLContextHolder createSslContext(ReloadableX509KeyManager keyManager, ReloadableTrustManager trustManager, + SSLConfiguration sslConfiguration) { + // Initialize sslContext + try { + SSLContext sslContext = SSLContext.getInstance(sslContextAlgorithm(sslConfiguration.supportedProtocols())); + sslContext.init(new X509ExtendedKeyManager[] { keyManager }, new X509ExtendedTrustManager[] { trustManager }, null); + + // check the supported ciphers and log them here to prevent spamming logs on every call + supportedCiphers(sslContext.getSupportedSSLParameters().getCipherSuites(), sslConfiguration.cipherSuites(), true); + + return new SSLContextHolder(sslContext, trustManager, keyManager); + } catch (NoSuchAlgorithmException | KeyManagementException e) { + throw new ElasticsearchException("failed to initialize the SSLContext", e); + } + } + + /** + * Parses the settings to load all SSLConfiguration objects that will be used. + */ + Map loadSSLConfigurations() throws CertificateException, + UnrecoverableKeyException, NoSuchAlgorithmException, IOException, DestroyFailedException, KeyStoreException, + OperatorCreationException { + Map sslConfigurations = new HashMap<>(); + sslConfigurations.put(globalSSLConfiguration, createSslContext(globalSSLConfiguration)); + + final Settings transportSSLSettings = settings.getByPrefix(XPackSettings.TRANSPORT_SSL_PREFIX); + List sslSettingsList = new ArrayList<>(); + sslSettingsList.add(getHttpTransportSSLSettings(settings)); + sslSettingsList.add(settings.getByPrefix("xpack.http.ssl.")); + sslSettingsList.addAll(getRealmsSSLSettings(settings)); + sslSettingsList.addAll(getMonitoringExporterSettings(settings)); + + sslSettingsList.forEach((sslSettings) -> + sslConfigurations.computeIfAbsent(new SSLConfiguration(sslSettings, globalSSLConfiguration), this::createSslContext)); + + // transport is special because we want to use a auto-generated key when there isn't one + final SSLConfiguration transportSSLConfiguration = new SSLConfiguration(transportSSLSettings, globalSSLConfiguration); + this.transportSSLConfiguration.set(transportSSLConfiguration); + List profileSettings = getTransportProfileSSLSettings(settings); + sslConfigurations.computeIfAbsent(transportSSLConfiguration, this::createSslContext); + profileSettings.forEach((profileSetting) -> + sslConfigurations.computeIfAbsent(new SSLConfiguration(profileSetting, transportSSLConfiguration), this::createSslContext)); + return Collections.unmodifiableMap(sslConfigurations); + } + + + /** + * Returns information about each certificate that is referenced by any SSL configuration. + * This includes certificates used for identity (with a private key) and those used for trust, but excludes + * certificates that are provided by the JRE. + * Due to the nature of KeyStores, this may include certificates that are available, but never used + * such as a CA certificate that is no longer in use, or a server certificate for an unrelated host. + * @see TrustConfig#certificates(Environment) + */ + public Set getLoadedCertificates() throws GeneralSecurityException, IOException { + Set certificates = new HashSet<>(); + for (SSLConfiguration config : this.getLoadedSSLConfigurations()) { + certificates.addAll(config.getDefinedCertificates(env)); + } + return certificates; + } + + /** + * This socket factory wraps an existing SSLSocketFactory and sets the protocols and ciphers on each SSLSocket after it is created. This + * is needed even though the SSLContext is configured properly as the configuration does not flow down to the sockets created by the + * SSLSocketFactory obtained from the SSLContext. + */ + private static class SecuritySSLSocketFactory extends SSLSocketFactory { + + private final SSLSocketFactory delegate; + private final String[] supportedProtocols; + private final String[] ciphers; + + SecuritySSLSocketFactory(SSLSocketFactory delegate, String[] supportedProtocols, String[] ciphers) { + this.delegate = delegate; + this.supportedProtocols = supportedProtocols; + this.ciphers = ciphers; + } + + @Override + public String[] getDefaultCipherSuites() { + return ciphers; + } + + @Override + public String[] getSupportedCipherSuites() { + return delegate.getSupportedCipherSuites(); + } + + @Override + public Socket createSocket() throws IOException { + SSLSocket sslSocket = createWithPermissions(delegate::createSocket); + configureSSLSocket(sslSocket); + return sslSocket; + } + + @Override + public Socket createSocket(Socket socket, String host, int port, boolean autoClose) throws IOException { + SSLSocket sslSocket = createWithPermissions(() -> delegate.createSocket(socket, host, port, autoClose)); + configureSSLSocket(sslSocket); + return sslSocket; + } + + @Override + public Socket createSocket(String host, int port) throws IOException { + SSLSocket sslSocket = createWithPermissions(() -> delegate.createSocket(host, port)); + configureSSLSocket(sslSocket); + return sslSocket; + } + + @Override + public Socket createSocket(String host, int port, InetAddress localHost, int localPort) throws IOException { + SSLSocket sslSocket = createWithPermissions(() -> delegate.createSocket(host, port, localHost, localPort)); + configureSSLSocket(sslSocket); + return sslSocket; + } + + @Override + public Socket createSocket(InetAddress host, int port) throws IOException { + SSLSocket sslSocket = createWithPermissions(() -> delegate.createSocket(host, port)); + configureSSLSocket(sslSocket); + return sslSocket; + } + + @Override + public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort) throws IOException { + SSLSocket sslSocket = createWithPermissions(() -> delegate.createSocket(address, port, localAddress, localPort)); + configureSSLSocket(sslSocket); + return sslSocket; + } + + private void configureSSLSocket(SSLSocket socket) { + SSLParameters parameters = new SSLParameters(ciphers, supportedProtocols); + // we use the cipher suite order so that we can prefer the ciphers we set first in the list + parameters.setUseCipherSuitesOrder(true); + socket.setSSLParameters(parameters); + } + + private static SSLSocket createWithPermissions(CheckedSupplier supplier) throws IOException { + return (SSLSocket) SocketAccess.doPrivileged(supplier); + } + } + + /** + * Wraps a trust manager to delegate to. If the trust material needs to be reloaded, then the delegate will be switched after + * reloading + */ + final class ReloadableTrustManager extends X509ExtendedTrustManager { + + private volatile X509ExtendedTrustManager trustManager; + private final TrustConfig trustConfig; + + ReloadableTrustManager(X509ExtendedTrustManager trustManager, TrustConfig trustConfig) { + this.trustManager = trustManager == null ? new EmptyX509TrustManager() : trustManager; + this.trustConfig = trustConfig; + } + + @Override + public void checkClientTrusted(X509Certificate[] x509Certificates, String s, Socket socket) throws CertificateException { + trustManager.checkClientTrusted(x509Certificates, s, socket); + } + + @Override + public void checkServerTrusted(X509Certificate[] x509Certificates, String s, Socket socket) throws CertificateException { + trustManager.checkServerTrusted(x509Certificates, s, socket); + } + + @Override + public void checkClientTrusted(X509Certificate[] x509Certificates, String s, SSLEngine sslEngine) throws CertificateException { + trustManager.checkClientTrusted(x509Certificates, s, sslEngine); + } + + @Override + public void checkServerTrusted(X509Certificate[] x509Certificates, String s, SSLEngine sslEngine) throws CertificateException { + trustManager.checkServerTrusted(x509Certificates, s, sslEngine); + } + + @Override + public void checkClientTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException { + trustManager.checkClientTrusted(x509Certificates, s); + } + + @Override + public void checkServerTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException { + trustManager.checkServerTrusted(x509Certificates, s); + } + + @Override + public X509Certificate[] getAcceptedIssuers() { + return trustManager.getAcceptedIssuers(); + } + + void reload() { + X509ExtendedTrustManager loadedTrustManager = trustConfig.createTrustManager(env); + if (loadedTrustManager == null) { + this.trustManager = new EmptyX509TrustManager(); + } else { + this.trustManager = loadedTrustManager; + } + } + + X509ExtendedTrustManager getTrustManager() { + return trustManager; + } + } + + /** + * Wraps a key manager and delegates all calls to it. When the key material needs to be reloaded, then the delegate is swapped after + * a new one has been loaded + */ + final class ReloadableX509KeyManager extends X509ExtendedKeyManager { + + private volatile X509ExtendedKeyManager keyManager; + private final KeyConfig keyConfig; + + ReloadableX509KeyManager(X509ExtendedKeyManager keyManager, KeyConfig keyConfig) { + this.keyManager = keyManager == null ? new EmptyKeyManager() : keyManager; + this.keyConfig = keyConfig; + } + + @Override + public String[] getClientAliases(String s, Principal[] principals) { + return keyManager.getClientAliases(s, principals); + } + + @Override + public String chooseClientAlias(String[] strings, Principal[] principals, Socket socket) { + return keyManager.chooseClientAlias(strings, principals, socket); + } + + @Override + public String[] getServerAliases(String s, Principal[] principals) { + return keyManager.getServerAliases(s, principals); + } + + @Override + public String chooseServerAlias(String s, Principal[] principals, Socket socket) { + return keyManager.chooseServerAlias(s, principals, socket); + } + + @Override + public X509Certificate[] getCertificateChain(String s) { + return keyManager.getCertificateChain(s); + } + + @Override + public PrivateKey getPrivateKey(String s) { + return keyManager.getPrivateKey(s); + } + + @Override + public String chooseEngineClientAlias(String[] strings, Principal[] principals, SSLEngine engine) { + return keyManager.chooseEngineClientAlias(strings, principals, engine); + } + + @Override + public String chooseEngineServerAlias(String s, Principal[] principals, SSLEngine engine) { + return keyManager.chooseEngineServerAlias(s, principals, engine); + } + + void reload() { + X509ExtendedKeyManager loadedKeyManager = keyConfig.createKeyManager(env); + if (loadedKeyManager == null) { + this.keyManager = new EmptyKeyManager(); + } else { + this.keyManager = loadedKeyManager; + } + } + + // pkg-private accessor for testing + X509ExtendedKeyManager getKeyManager() { + return keyManager; + } + } + + /** + * A struct for holding the SSLContext and the backing key manager and trust manager + */ + static final class SSLContextHolder { + + private final SSLContext context; + private final ReloadableTrustManager trustManager; + private final ReloadableX509KeyManager keyManager; + + SSLContextHolder(SSLContext context, ReloadableTrustManager trustManager, ReloadableX509KeyManager keyManager) { + this.context = context; + this.trustManager = trustManager; + this.keyManager = keyManager; + } + + SSLContext sslContext() { + return context; + } + + ReloadableX509KeyManager keyManager() { + return keyManager; + } + + ReloadableTrustManager trustManager() { + return trustManager; + } + + synchronized void reload() { + trustManager.reload(); + keyManager.reload(); + invalidateSessions(context.getClientSessionContext()); + invalidateSessions(context.getServerSessionContext()); + } + + /** + * Invalidates the sessions in the provided {@link SSLSessionContext} + */ + private static void invalidateSessions(SSLSessionContext sslSessionContext) { + Enumeration sessionIds = sslSessionContext.getIds(); + while (sessionIds.hasMoreElements()) { + byte[] sessionId = sessionIds.nextElement(); + sslSessionContext.getSession(sessionId).invalidate(); + } + } + } + + /** + * This is an empty key manager that is used in case a loaded key manager is null + */ + private static final class EmptyKeyManager extends X509ExtendedKeyManager { + + @Override + public String[] getClientAliases(String s, Principal[] principals) { + return new String[0]; + } + + @Override + public String chooseClientAlias(String[] strings, Principal[] principals, Socket socket) { + return null; + } + + @Override + public String[] getServerAliases(String s, Principal[] principals) { + return new String[0]; + } + + @Override + public String chooseServerAlias(String s, Principal[] principals, Socket socket) { + return null; + } + + @Override + public X509Certificate[] getCertificateChain(String s) { + return new X509Certificate[0]; + } + + @Override + public PrivateKey getPrivateKey(String s) { + return null; + } + } + + /** + * This is an empty trust manager that is used in case a loaded trust manager is null + */ + static final class EmptyX509TrustManager extends X509ExtendedTrustManager { + + @Override + public void checkClientTrusted(X509Certificate[] x509Certificates, String s, Socket socket) throws CertificateException { + throw new CertificateException("no certificates are trusted"); + } + + @Override + public void checkServerTrusted(X509Certificate[] x509Certificates, String s, Socket socket) throws CertificateException { + throw new CertificateException("no certificates are trusted"); + } + + @Override + public void checkClientTrusted(X509Certificate[] x509Certificates, String s, SSLEngine sslEngine) throws CertificateException { + throw new CertificateException("no certificates are trusted"); + } + + @Override + public void checkServerTrusted(X509Certificate[] x509Certificates, String s, SSLEngine sslEngine) throws CertificateException { + throw new CertificateException("no certificates are trusted"); + } + + @Override + public void checkClientTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException { + throw new CertificateException("no certificates are trusted"); + } + + @Override + public void checkServerTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException { + throw new CertificateException("no certificates are trusted"); + } + + @Override + public X509Certificate[] getAcceptedIssuers() { + return new X509Certificate[0]; + } + } + + private static List getRealmsSSLSettings(Settings settings) { + List sslSettings = new ArrayList<>(); + Settings realmsSettings = settings.getByPrefix(SecurityField.setting("authc.realms.")); + for (String name : realmsSettings.names()) { + Settings realmSSLSettings = realmsSettings.getAsSettings(name).getByPrefix("ssl."); + if (realmSSLSettings.isEmpty() == false) { + sslSettings.add(realmSSLSettings); + } + } + return sslSettings; + } + + private static List getTransportProfileSSLSettings(Settings settings) { + List sslSettings = new ArrayList<>(); + Map profiles = settings.getGroups("transport.profiles.", true); + for (Entry entry : profiles.entrySet()) { + Settings profileSettings = entry.getValue().getByPrefix("xpack.security.ssl."); + if (profileSettings.isEmpty() == false) { + sslSettings.add(profileSettings); + } + } + return sslSettings; + } + + public static Settings getHttpTransportSSLSettings(Settings settings) { + Settings httpSSLSettings = settings.getByPrefix(XPackSettings.HTTP_SSL_PREFIX); + if (httpSSLSettings.isEmpty()) { + return httpSSLSettings; + } + + Settings.Builder builder = Settings.builder().put(httpSSLSettings); + if (builder.get("client_authentication") == null) { + builder.put("client_authentication", XPackSettings.HTTP_CLIENT_AUTH_DEFAULT); + } + return builder.build(); + } + + private static List getMonitoringExporterSettings(Settings settings) { + List sslSettings = new ArrayList<>(); + Map exportersSettings = settings.getGroups("xpack.monitoring.exporters."); + for (Entry entry : exportersSettings.entrySet()) { + Settings exporterSSLSettings = entry.getValue().getByPrefix("ssl."); + if (exporterSSLSettings.isEmpty() == false) { + sslSettings.add(exporterSSLSettings); + } + } + return sslSettings; + } + + /** + * Maps the supported protocols to an appropriate ssl context algorithm. We make an attempt to use the "best" algorithm when + * possible. The names in this method are taken from the + * JCA Standard Algorithm Name + * Documentation for Java 8. + */ + private static String sslContextAlgorithm(List supportedProtocols) { + if (supportedProtocols.isEmpty()) { + return "TLSv1.2"; + } + + String algorithm = "SSL"; + for (String supportedProtocol : supportedProtocols) { + switch (supportedProtocol) { + case "TLSv1.2": + return "TLSv1.2"; + case "TLSv1.1": + if ("TLSv1.2".equals(algorithm) == false) { + algorithm = "TLSv1.1"; + } + break; + case "TLSv1": + switch (algorithm) { + case "TLSv1.2": + case "TLSv1.1": + break; + default: + algorithm = "TLSv1"; + } + break; + case "SSLv3": + switch (algorithm) { + case "SSLv2": + case "SSL": + algorithm = "SSLv3"; + } + break; + case "SSLv2": + case "SSLv2Hello": + break; + default: + throw new IllegalArgumentException("found unexpected value in supported protocols: " + supportedProtocol); + } + } + return algorithm; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/StoreKeyConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/StoreKeyConfig.java new file mode 100644 index 0000000000000..ea9c9267d6591 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/StoreKeyConfig.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import javax.net.ssl.X509ExtendedKeyManager; +import javax.net.ssl.X509ExtendedTrustManager; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.GeneralSecurityException; +import java.security.Key; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.UnrecoverableKeyException; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Enumeration; +import java.util.List; +import java.util.Objects; + +/** + * A key configuration that is backed by a {@link KeyStore} + */ +class StoreKeyConfig extends KeyConfig { + + final String keyStorePath; + final String keyStoreType; + final SecureString keyStorePassword; + final String keyStoreAlgorithm; + final SecureString keyPassword; + final String trustStoreAlgorithm; + + /** + * Creates a new configuration that can be used to load key and trust material from a {@link KeyStore} + * @param keyStorePath the path to the keystore file + * @param keyStoreType the type of the keystore file + * @param keyStorePassword the password for the keystore + * @param keyPassword the password for the private key in the keystore + * @param keyStoreAlgorithm the algorithm for the keystore + * @param trustStoreAlgorithm the algorithm to use when loading as a truststore + */ + StoreKeyConfig(String keyStorePath, String keyStoreType, SecureString keyStorePassword, SecureString keyPassword, + String keyStoreAlgorithm, String trustStoreAlgorithm) { + this.keyStorePath = Objects.requireNonNull(keyStorePath, "keystore path must be specified"); + this.keyStoreType = Objects.requireNonNull(keyStoreType, "keystore type must be specified"); + // since we support reloading the keystore, we must store the passphrase in memory for the life of the node, so we + // clone the password and never close it during our uses below + this.keyStorePassword = Objects.requireNonNull(keyStorePassword, "keystore password must be specified").clone(); + this.keyPassword = Objects.requireNonNull(keyPassword).clone(); + this.keyStoreAlgorithm = keyStoreAlgorithm; + this.trustStoreAlgorithm = trustStoreAlgorithm; + } + + @Override + X509ExtendedKeyManager createKeyManager(@Nullable Environment environment) { + try { + KeyStore ks = getKeyStore(environment); + checkKeyStore(ks); + return CertUtils.keyManager(ks, keyPassword.getChars(), keyStoreAlgorithm); + } catch (IOException | CertificateException | NoSuchAlgorithmException | UnrecoverableKeyException | KeyStoreException e) { + throw new ElasticsearchException("failed to initialize a KeyManagerFactory", e); + } + } + + @Override + X509ExtendedTrustManager createTrustManager(@Nullable Environment environment) { + try { + return CertUtils.trustManager(keyStorePath, keyStoreType, keyStorePassword.getChars(), trustStoreAlgorithm, environment); + } catch (Exception e) { + throw new ElasticsearchException("failed to initialize a TrustManagerFactory", e); + } + } + + @Override + Collection certificates(Environment environment) throws GeneralSecurityException, IOException { + final Path path = CertUtils.resolvePath(keyStorePath, environment); + final KeyStore trustStore = CertUtils.readKeyStore(path, keyStoreType, keyStorePassword.getChars()); + final List certificates = new ArrayList<>(); + final Enumeration aliases = trustStore.aliases(); + while (aliases.hasMoreElements()) { + String alias = aliases.nextElement(); + final Certificate[] chain = trustStore.getCertificateChain(alias); + if (chain == null) { + continue; + } + for (int i = 0; i < chain.length; i++) { + final Certificate certificate = chain[i]; + if (certificate instanceof X509Certificate) { + certificates.add(new CertificateInfo(keyStorePath, keyStoreType, alias, i == 0, (X509Certificate) certificate)); + } + } + } + return certificates; + } + + @Override + List filesToMonitor(@Nullable Environment environment) { + return Collections.singletonList(CertUtils.resolvePath(keyStorePath, environment)); + } + + @Override + List privateKeys(@Nullable Environment environment) { + try { + KeyStore keyStore = getKeyStore(environment); + List privateKeys = new ArrayList<>(); + for (Enumeration e = keyStore.aliases(); e.hasMoreElements(); ) { + final String alias = e.nextElement(); + if (keyStore.isKeyEntry(alias)) { + Key key = keyStore.getKey(alias, keyPassword.getChars()); + if (key instanceof PrivateKey) { + privateKeys.add((PrivateKey) key); + } + } + } + return privateKeys; + } catch (Exception e) { + throw new ElasticsearchException("failed to list keys", e); + } + } + + private KeyStore getKeyStore(@Nullable Environment environment) + throws KeyStoreException, CertificateException, NoSuchAlgorithmException, IOException { + try (InputStream in = Files.newInputStream(CertUtils.resolvePath(keyStorePath, environment))) { + KeyStore ks = KeyStore.getInstance(keyStoreType); + ks.load(in, keyStorePassword.getChars()); + return ks; + } + } + + private void checkKeyStore(KeyStore keyStore) throws KeyStoreException { + Enumeration aliases = keyStore.aliases(); + while (aliases.hasMoreElements()) { + String alias = aliases.nextElement(); + if (keyStore.isKeyEntry(alias)) { + return; + } + } + throw new IllegalArgumentException("the keystore [" + keyStorePath + "] does not contain a private key entry"); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + StoreKeyConfig that = (StoreKeyConfig) o; + + if (keyStorePath != null ? !keyStorePath.equals(that.keyStorePath) : that.keyStorePath != null) return false; + if (keyStorePassword != null ? !keyStorePassword.equals(that.keyStorePassword) : that.keyStorePassword != null) + return false; + if (keyStoreAlgorithm != null ? !keyStoreAlgorithm.equals(that.keyStoreAlgorithm) : that.keyStoreAlgorithm != null) + return false; + if (keyPassword != null ? !keyPassword.equals(that.keyPassword) : that.keyPassword != null) return false; + return trustStoreAlgorithm != null ? trustStoreAlgorithm.equals(that.trustStoreAlgorithm) : that.trustStoreAlgorithm == null; + } + + @Override + public int hashCode() { + int result = keyStorePath != null ? keyStorePath.hashCode() : 0; + result = 31 * result + (keyStorePassword != null ? keyStorePassword.hashCode() : 0); + result = 31 * result + (keyStoreAlgorithm != null ? keyStoreAlgorithm.hashCode() : 0); + result = 31 * result + (keyPassword != null ? keyPassword.hashCode() : 0); + result = 31 * result + (trustStoreAlgorithm != null ? trustStoreAlgorithm.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "keyStorePath=[" + keyStorePath + + "], keyStoreType=[" + keyStoreType + + "], keyStoreAlgorithm=[" + keyStoreAlgorithm + + "], trustStoreAlgorithm=[" + trustStoreAlgorithm + + "]"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/StoreTrustConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/StoreTrustConfig.java new file mode 100644 index 0000000000000..9b06249000c72 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/StoreTrustConfig.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import javax.net.ssl.X509ExtendedTrustManager; + +import java.io.IOException; +import java.nio.file.Path; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Enumeration; +import java.util.List; +import java.util.Objects; + +/** + * Trust configuration that is backed by a {@link java.security.KeyStore} + */ +class StoreTrustConfig extends TrustConfig { + + final String trustStorePath; + final String trustStoreType; + final SecureString trustStorePassword; + final String trustStoreAlgorithm; + + /** + * Create a new configuration based on the provided parameters + * + * @param trustStorePath the path to the truststore + * @param trustStorePassword the password for the truststore + * @param trustStoreAlgorithm the algorithm to use for reading the truststore + */ + StoreTrustConfig(String trustStorePath, String trustStoreType, SecureString trustStorePassword, String trustStoreAlgorithm) { + this.trustStorePath = trustStorePath; + this.trustStoreType = trustStoreType; + // since we support reloading the truststore, we must store the passphrase in memory for the life of the node, so we + // clone the password and never close it during our uses below + this.trustStorePassword = Objects.requireNonNull(trustStorePassword, "truststore password must be specified").clone(); + this.trustStoreAlgorithm = trustStoreAlgorithm; + } + + @Override + X509ExtendedTrustManager createTrustManager(@Nullable Environment environment) { + try { + return CertUtils.trustManager(trustStorePath, trustStoreType, trustStorePassword.getChars(), trustStoreAlgorithm, environment); + } catch (Exception e) { + throw new ElasticsearchException("failed to initialize a TrustManagerFactory", e); + } + } + + @Override + Collection certificates(Environment environment) throws GeneralSecurityException, IOException { + final Path path = CertUtils.resolvePath(trustStorePath, environment); + final KeyStore trustStore = CertUtils.readKeyStore(path, trustStoreType, trustStorePassword.getChars()); + final List certificates = new ArrayList<>(); + final Enumeration aliases = trustStore.aliases(); + while (aliases.hasMoreElements()) { + String alias = aliases.nextElement(); + final Certificate certificate = trustStore.getCertificate(alias); + if (certificate instanceof X509Certificate) { + final boolean hasKey = trustStore.isKeyEntry(alias); + certificates.add(new CertificateInfo(trustStorePath, trustStoreType, alias, hasKey, (X509Certificate) certificate)); + } + } + return certificates; + } + + @Override + List filesToMonitor(@Nullable Environment environment) { + if (trustStorePath == null) { + return Collections.emptyList(); + } + return Collections.singletonList(CertUtils.resolvePath(trustStorePath, environment)); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + StoreTrustConfig that = (StoreTrustConfig) o; + + if (trustStorePath != null ? !trustStorePath.equals(that.trustStorePath) : that.trustStorePath != null) return false; + if (trustStorePassword != null ? !trustStorePassword.equals(that.trustStorePassword) : that.trustStorePassword != null) + return false; + return trustStoreAlgorithm != null ? trustStoreAlgorithm.equals(that.trustStoreAlgorithm) : that.trustStoreAlgorithm == null; + } + + @Override + public int hashCode() { + int result = trustStorePath != null ? trustStorePath.hashCode() : 0; + result = 31 * result + (trustStorePassword != null ? trustStorePassword.hashCode() : 0); + result = 31 * result + (trustStoreAlgorithm != null ? trustStoreAlgorithm.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "trustStorePath=[" + trustStorePath + + "], trustStoreAlgorithm=[" + trustStoreAlgorithm + + "]"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java new file mode 100644 index 0000000000000..f7a8c29a45a12 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TLSLicenseBootstrapCheck.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.xpack.core.XPackSettings; + +/** + * Bootstrap check to ensure that if we are starting up with a production license in the local clusterstate TLS is enabled + */ +public final class TLSLicenseBootstrapCheck implements BootstrapCheck { + @Override + public BootstrapCheckResult check(BootstrapContext context) { + if (XPackSettings.TRANSPORT_SSL_ENABLED.get(context.settings) == false) { + License license = LicenseService.getLicense(context.metaData); + if (license != null && license.isProductionLicense()) { + return BootstrapCheckResult.failure("Transport SSL must be enabled for setups with production licenses. Please set " + + "[xpack.security.transport.ssl.enabled] to [true] or disable security by setting [xpack.security.enabled] " + + "to [false]"); + } + } + return BootstrapCheckResult.success(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TrustAllConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TrustAllConfig.java new file mode 100644 index 0000000000000..cf6aa340b098d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TrustAllConfig.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.X509ExtendedTrustManager; + +import java.io.IOException; +import java.net.Socket; +import java.nio.file.Path; +import java.security.GeneralSecurityException; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +/** + * A trust manager that trusts all certificates + */ +class TrustAllConfig extends TrustConfig { + + public static final TrustAllConfig INSTANCE = new TrustAllConfig(); + + /** + * The {@link X509ExtendedTrustManager} that will trust all certificates. All methods are implemented as a no-op and do not throw + * exceptions regardless of the certificate presented. + */ + private static final X509ExtendedTrustManager TRUST_MANAGER = new X509ExtendedTrustManager() { + @Override + public void checkClientTrusted(X509Certificate[] x509Certificates, String s, Socket socket) throws CertificateException { + } + + @Override + public void checkServerTrusted(X509Certificate[] x509Certificates, String s, Socket socket) throws CertificateException { + } + + @Override + public void checkClientTrusted(X509Certificate[] x509Certificates, String s, SSLEngine sslEngine) throws CertificateException { + } + + @Override + public void checkServerTrusted(X509Certificate[] x509Certificates, String s, SSLEngine sslEngine) throws CertificateException { + } + + @Override + public void checkClientTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException { + } + + @Override + public void checkServerTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException { + } + + @Override + public X509Certificate[] getAcceptedIssuers() { + return new X509Certificate[0]; + } + }; + + private TrustAllConfig() { + } + + @Override + X509ExtendedTrustManager createTrustManager(@Nullable Environment environment) { + return TRUST_MANAGER; + } + + @Override + Collection certificates(Environment environment) throws GeneralSecurityException, IOException { + return Collections.emptyList(); + } + + @Override + List filesToMonitor(@Nullable Environment environment) { + return Collections.emptyList(); + } + + @Override + public String toString() { + return "trust all"; + } + + @Override + public boolean equals(Object o) { + return o == this; + } + + @Override + public int hashCode() { + return System.identityHashCode(this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TrustConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TrustConfig.java new file mode 100644 index 0000000000000..b6f638f3b69c6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TrustConfig.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import javax.net.ssl.X509ExtendedTrustManager; + +import java.io.IOException; +import java.nio.file.Path; +import java.security.GeneralSecurityException; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * The configuration of trust material for SSL usage + */ +abstract class TrustConfig { + + /** + * Creates a {@link X509ExtendedTrustManager} based on the provided configuration + * @param environment the environment to resolve files against or null in the case of running in a transport client + */ + abstract X509ExtendedTrustManager createTrustManager(@Nullable Environment environment); + + abstract Collection certificates(@Nullable Environment environment) throws GeneralSecurityException, IOException; + + /** + * Returns a list of files that should be monitored for changes + * @param environment the environment to resolve files against or null in the case of running in a transport client + */ + abstract List filesToMonitor(@Nullable Environment environment); + + /** + * {@inheritDoc}. Declared as abstract to force implementors to provide a custom implementation + */ + public abstract String toString(); + + /** + * {@inheritDoc}. Declared as abstract to force implementors to provide a custom implementation + */ + public abstract boolean equals(Object o); + + /** + * {@inheritDoc}. Declared as abstract to force implementors to provide a custom implementation + */ + public abstract int hashCode(); + + /** + * A trust configuration that is a combination of a trust configuration with the default JDK trust configuration. This trust + * configuration returns a trust manager verifies certificates against both the default JDK trusted configurations and the specific + * {@link TrustConfig} provided. + */ + static class CombiningTrustConfig extends TrustConfig { + + private final List trustConfigs; + + CombiningTrustConfig(List trustConfig) { + this.trustConfigs = Collections.unmodifiableList(trustConfig); + } + + @Override + X509ExtendedTrustManager createTrustManager(@Nullable Environment environment) { + Optional matchAll = trustConfigs.stream().filter(TrustAllConfig.INSTANCE::equals).findAny(); + if (matchAll.isPresent()) { + return matchAll.get().createTrustManager(environment); + } + + try { + return CertUtils.trustManager(trustConfigs.stream() + .flatMap((tc) -> Arrays.stream(tc.createTrustManager(environment).getAcceptedIssuers())) + .collect(Collectors.toList()) + .toArray(new X509Certificate[0])); + } catch (Exception e) { + throw new ElasticsearchException("failed to create trust manager", e); + } + } + + @Override + Collection certificates(Environment environment) throws GeneralSecurityException, IOException { + List certificates = new ArrayList<>(); + for (TrustConfig tc : trustConfigs) { + certificates.addAll(tc.certificates(environment)); + } + return certificates; + } + + @Override + List filesToMonitor(@Nullable Environment environment) { + return trustConfigs.stream().flatMap((tc) -> tc.filesToMonitor(environment).stream()).collect(Collectors.toList()); + } + + @Override + public String toString() { + return "Combining Trust Config{" + trustConfigs.stream().map(TrustConfig::toString).collect(Collectors.joining(", ")) + "}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof CombiningTrustConfig)) { + return false; + } + + CombiningTrustConfig that = (CombiningTrustConfig) o; + return trustConfigs.equals(that.trustConfigs); + } + + @Override + public int hashCode() { + return trustConfigs.hashCode(); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/VerificationMode.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/VerificationMode.java new file mode 100644 index 0000000000000..9166739614053 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/VerificationMode.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import java.util.Locale; + +/** + * Represents the verification mode to be used for SSL connections. + */ +public enum VerificationMode { + NONE { + @Override + public boolean isHostnameVerificationEnabled() { + return false; + } + + @Override + public boolean isCertificateVerificationEnabled() { + return false; + } + }, + CERTIFICATE { + @Override + public boolean isHostnameVerificationEnabled() { + return false; + } + + @Override + public boolean isCertificateVerificationEnabled() { + return true; + } + }, + FULL { + @Override + public boolean isHostnameVerificationEnabled() { + return true; + } + + @Override + public boolean isCertificateVerificationEnabled() { + return true; + } + }; + + /** + * @return true if hostname verification is enabled + */ + public abstract boolean isHostnameVerificationEnabled(); + + /** + * @return true if certificate verification is enabled + */ + public abstract boolean isCertificateVerificationEnabled(); + + public static VerificationMode parse(String value) { + assert value != null; + switch (value.toLowerCase(Locale.ROOT)) { + case "none": + return NONE; + case "certificate": + return CERTIFICATE; + case "full": + return FULL; + default: + throw new IllegalArgumentException("could not resolve verification mode. unknown value [" + value + "]"); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/X509KeyPairSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/X509KeyPairSettings.java new file mode 100644 index 0000000000000..33b78b2da8609 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/X509KeyPairSettings.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.util.CollectionUtils; + +import javax.net.ssl.KeyManagerFactory; + +import java.security.KeyStore; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.function.Function; + +/** + * An encapsulation of the configuration options for X.509 Key Pair support in X-Pack security. + * The most common use is as the private key and associated certificate for SSL/TLS support, but it can also be used for providing + * signing or encryption keys (if they are X.509 based). + * This class supports using a {@link java.security.KeyStore} (with configurable {@link KeyStore#getType() type}) or PEM based files. + */ +public class X509KeyPairSettings { + + static final Function>> KEYSTORE_PATH_TEMPLATE = key -> new Setting<>(key, s -> null, + Optional::ofNullable, Setting.Property.NodeScope, Setting.Property.Filtered); + + static final Function> LEGACY_KEYSTORE_PASSWORD_TEMPLATE = key -> new Setting<>(key, "", + SecureString::new, Setting.Property.Deprecated, Setting.Property.Filtered, Setting.Property.NodeScope); + static final Function> KEYSTORE_PASSWORD_TEMPLATE = key -> SecureSetting.secureString(key, + LEGACY_KEYSTORE_PASSWORD_TEMPLATE.apply(key.replace("keystore.secure_password", "keystore.password"))); + + static final Function> KEY_STORE_ALGORITHM_TEMPLATE = key -> + new Setting<>(key, s -> KeyManagerFactory.getDefaultAlgorithm(), + Function.identity(), Setting.Property.NodeScope, Setting.Property.Filtered); + + static final Function>> KEY_STORE_TYPE_TEMPLATE = key -> + new Setting<>(key, s -> null, Optional::ofNullable, Setting.Property.NodeScope, Setting.Property.Filtered); + + static final Function> LEGACY_KEYSTORE_KEY_PASSWORD_TEMPLATE = key -> new Setting<>(key, "", + SecureString::new, Setting.Property.Deprecated, Setting.Property.Filtered, Setting.Property.NodeScope); + static final Function> KEYSTORE_KEY_PASSWORD_TEMPLATE = key -> + SecureSetting.secureString(key, LEGACY_KEYSTORE_KEY_PASSWORD_TEMPLATE.apply(key.replace("keystore.secure_key_password", + "keystore.key_password"))); + + static final Function>> KEY_PATH_TEMPLATE = key -> new Setting<>(key, s -> null, + Optional::ofNullable, Setting.Property.NodeScope, Setting.Property.Filtered); + + static final Function>> CERT_TEMPLATE = key -> new Setting<>(key, s -> null, + Optional::ofNullable, Setting.Property.NodeScope, Setting.Property.Filtered); + + static final Function> LEGACY_KEY_PASSWORD_TEMPLATE = key -> new Setting<>(key, "", + SecureString::new, Setting.Property.Deprecated, Setting.Property.Filtered, Setting.Property.NodeScope); + static final Function> KEY_PASSWORD_TEMPLATE = key -> + SecureSetting.secureString(key, LEGACY_KEY_PASSWORD_TEMPLATE.apply(key.replace("secure_key_passphrase", + "key_passphrase"))); + + + private final String prefix; + + // Specify private cert/key pair via keystore + final Setting> keystorePath; + final Setting keystorePassword; + final Setting keystoreAlgorithm; + final Setting> keystoreType; + final Setting keystoreKeyPassword; + + // Specify private cert/key pair via key and certificate files + final Setting> keyPath; + final Setting keyPassword; + final Setting> certificatePath; + + // Optional support for legacy (non secure) passwords + // pkg private for tests + final Setting legacyKeystorePassword; + final Setting legacyKeystoreKeyPassword; + final Setting legacyKeyPassword; + + private final List> allSettings; + + public X509KeyPairSettings(String prefix, boolean acceptNonSecurePasswords) { + keystorePath = KEYSTORE_PATH_TEMPLATE.apply(prefix + "keystore.path"); + keystorePassword = KEYSTORE_PASSWORD_TEMPLATE.apply(prefix + "keystore.secure_password"); + keystoreAlgorithm = KEY_STORE_ALGORITHM_TEMPLATE.apply(prefix + "keystore.algorithm"); + keystoreType = KEY_STORE_TYPE_TEMPLATE.apply(prefix + "keystore.type"); + keystoreKeyPassword = KEYSTORE_KEY_PASSWORD_TEMPLATE.apply(prefix + "keystore.secure_key_password"); + + keyPath = KEY_PATH_TEMPLATE.apply(prefix + "key"); + keyPassword = KEY_PASSWORD_TEMPLATE.apply(prefix + "secure_key_passphrase"); + certificatePath = CERT_TEMPLATE.apply(prefix + "certificate"); + + legacyKeystorePassword = LEGACY_KEYSTORE_PASSWORD_TEMPLATE.apply(prefix + "keystore.password"); + legacyKeystoreKeyPassword = LEGACY_KEYSTORE_KEY_PASSWORD_TEMPLATE.apply(prefix + "keystore.key_password"); + legacyKeyPassword = LEGACY_KEY_PASSWORD_TEMPLATE.apply(prefix + "key_passphrase"); + this.prefix = prefix; + + final List> settings = CollectionUtils.arrayAsArrayList( + keystorePath, keystorePassword, keystoreAlgorithm, keystoreType, keystoreKeyPassword, + keyPath, keyPassword, certificatePath); + if (acceptNonSecurePasswords) { + settings.add(legacyKeystorePassword); + settings.add(legacyKeystoreKeyPassword); + settings.add(legacyKeyPassword); + } + allSettings = Collections.unmodifiableList(settings); + } + + + public Collection> getAllSettings() { + return allSettings; + } + + public String getPrefix() { + return prefix; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/GetCertificateInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/GetCertificateInfoAction.java new file mode 100644 index 0000000000000..54e7548b8038b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/GetCertificateInfoAction.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; + +/** + * Action to obtain information about X.509 (SSL/TLS) certificates that are being used by X-Pack. + * The primary use case is for tracking the expiry dates of certificates. + */ +public class GetCertificateInfoAction + extends Action { + + public static final GetCertificateInfoAction INSTANCE = new GetCertificateInfoAction(); + public static final String NAME = "cluster:monitor/xpack/ssl/certificates/get"; + + private GetCertificateInfoAction() { + super(NAME); + } + + @Override + public GetCertificateInfoAction.RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new GetCertificateInfoAction.RequestBuilder(client, this); + } + + @Override + public GetCertificateInfoAction.Response newResponse() { + return new GetCertificateInfoAction.Response(); + } + + public static class Request extends ActionRequest { + + @Override + public ActionRequestValidationException validate() { + return null; + } + + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private Collection certificates; + + public Response() { + } + + public Response(Collection certificates) { + this.certificates = certificates; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(); + for (CertificateInfo cert : certificates) { + cert.toXContent(builder, params); + } + return builder.endArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(certificates.size()); + for (CertificateInfo cert : certificates) { + cert.writeTo(out); + } + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.certificates = new ArrayList<>(); + int count = in.readVInt(); + for (int i = 0; i < count; i++) { + certificates.add(new CertificateInfo(in)); + } + } + } + + public static class RequestBuilder extends ActionRequestBuilder { + + public RequestBuilder(ElasticsearchClient client, GetCertificateInfoAction action) { + super(client, action, new Request()); + } + + public RequestBuilder(ElasticsearchClient client) { + this(client, GetCertificateInfoAction.INSTANCE); + } + } + +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/TransportGetCertificateInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/TransportGetCertificateInfoAction.java new file mode 100644 index 0000000000000..3670efeeeeee9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/action/TransportGetCertificateInfoAction.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; + +import java.io.IOException; +import java.security.GeneralSecurityException; +import java.util.Collection; + +public class TransportGetCertificateInfoAction extends HandledTransportAction { + + private final SSLService sslService; + + @Inject + public TransportGetCertificateInfoAction(Settings settings, ThreadPool threadPool, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + SSLService sslService) { + super(settings, GetCertificateInfoAction.NAME, threadPool, transportService, actionFilters, + indexNameExpressionResolver, GetCertificateInfoAction.Request::new); + this.sslService = sslService; + } + + @Override + protected void doExecute(GetCertificateInfoAction.Request request, + ActionListener listener) { + try { + Collection certificates = sslService.getLoadedCertificates(); + listener.onResponse(new GetCertificateInfoAction.Response(certificates)); + } catch (GeneralSecurityException | IOException e) { + listener.onFailure(e); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/cert/CertificateInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/cert/CertificateInfo.java new file mode 100644 index 0000000000000..cb05a67886393 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/cert/CertificateInfo.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl.cert; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.security.cert.X509Certificate; +import java.util.Objects; + +/** + * Simple model of an X.509 certificate that is known to X-Pack + */ +public class CertificateInfo implements ToXContentObject, Writeable { + private final String path; + private final String format; + private final String alias; + private final String subjectDn; + private final String serialNumber; + private final boolean hasPrivateKey; + private final DateTime expiry; + + public CertificateInfo(String path, String format, String alias, boolean hasPrivateKey, X509Certificate certificate) { + Objects.requireNonNull(certificate, "Certificate cannot be null"); + this.path = Objects.requireNonNull(path, "Certificate path cannot be null"); + this.format = Objects.requireNonNull(format, "Certificate format cannot be null"); + this.alias = alias; + this.subjectDn = Objects.requireNonNull(certificate.getSubjectDN().getName()); + this.serialNumber = certificate.getSerialNumber().toString(16); + this.hasPrivateKey = hasPrivateKey; + this.expiry = new DateTime(certificate.getNotAfter(), DateTimeZone.UTC); + } + + public CertificateInfo(StreamInput in) throws IOException { + this.path = in.readString(); + this.format = in.readString(); + this.alias = in.readOptionalString(); + this.subjectDn = in.readString(); + this.serialNumber = in.readString(); + this.hasPrivateKey = in.readBoolean(); + this.expiry = new DateTime(in.readLong(), DateTimeZone.UTC); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(path); + out.writeString(format); + out.writeOptionalString(alias); + out.writeString(subjectDn); + out.writeString(serialNumber); + out.writeBoolean(hasPrivateKey); + out.writeLong(expiry.getMillis()); + } + + public String path() { + return path; + } + + public String format() { + return format; + } + + public String alias() { + return alias; + } + + public String subjectDn() { + return subjectDn; + } + + public String serialNumber() { + return serialNumber; + } + + public DateTime expiry() { + return expiry; + } + + public boolean hasPrivateKey() { + return hasPrivateKey; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("path", path) + .field("format", format) + .field("alias", alias) + .field("subject_dn", subjectDn) + .field("serial_number", serialNumber) + .field("has_private_key", hasPrivateKey) + .timeField("expiry", expiry) + .endObject(); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + + final CertificateInfo that = (CertificateInfo) other; + return this.path.equals(that.path) + && this.format.equals(that.format) + && this.hasPrivateKey == that.hasPrivateKey + && Objects.equals(this.alias, that.alias) + && Objects.equals(this.serialNumber, that.serialNumber) + && Objects.equals(this.subjectDn, that.subjectDn) + && Objects.equals(this.expiry, that.expiry); + } + + @Override + public int hashCode() { + int result = path.hashCode(); + result = 31 * result + (alias != null ? alias.hashCode() : 0); + result = 31 * result + (serialNumber != null ? serialNumber.hashCode() : 0); + return result; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/rest/RestGetCertificateInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/rest/RestGetCertificateInfoAction.java new file mode 100644 index 0000000000000..d7d7d7042ed3c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/rest/RestGetCertificateInfoAction.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction; +import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction.Response; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +/** + * A REST handler to obtain information about TLS/SSL (X.509) certificates + * @see GetCertificateInfoAction + */ +public class RestGetCertificateInfoAction extends BaseRestHandler { + + public RestGetCertificateInfoAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, "/_xpack/ssl/certificates", this); + } + + @Override + public String getName() { + return "xpack_ssl_get_certificates"; + } + + @Override + protected final RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + return channel -> new GetCertificateInfoAction.RequestBuilder(client, GetCertificateInfoAction.INSTANCE) + .execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(Response response, XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, response.toXContent(builder, request)); + } + }); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java new file mode 100644 index 0000000000000..893c91f056c57 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java @@ -0,0 +1,191 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.template; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.compress.NotXContentException; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.Streams; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Map; +import java.util.function.Predicate; +import java.util.regex.Pattern; + +import static org.elasticsearch.common.xcontent.XContentHelper.convertToMap; + +/** + * Handling versioned templates for time-based indices in x-pack + */ +public class TemplateUtils { + + private TemplateUtils() {} + + /** + * Loads a JSON template as a resource and puts it into the provided map + */ + public static void loadTemplateIntoMap(String resource, Map map, String templateName, String version, + String versionProperty, Logger logger) { + final String template = loadTemplate(resource, version, versionProperty); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, template)) { + map.put(templateName, IndexTemplateMetaData.Builder.fromXContent(parser, templateName)); + } catch (IOException e) { + // TODO: should we handle this with a thrown exception? + logger.error("Error loading template [{}] as part of metadata upgrading", templateName); + } + } + + /** + * Loads a built-in template and returns its source. + */ + public static String loadTemplate(String resource, String version, String versionProperty) { + try { + BytesReference source = load(resource); + validate(source); + + return filter(source, version, versionProperty); + } catch (Exception e) { + throw new IllegalArgumentException("Unable to load template [" + resource + "]", e); + } + } + + /** + * Loads a resource from the classpath and returns it as a {@link BytesReference} + */ + public static BytesReference load(String name) throws IOException { + try (InputStream is = TemplateUtils.class.getResourceAsStream(name)) { + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + Streams.copy(is, out); + return new BytesArray(out.toByteArray()); + } + } + } + + /** + * Parses and validates that the source is not empty. + */ + public static void validate(BytesReference source) { + if (source == null) { + throw new ElasticsearchParseException("Template must not be null"); + } + + try { + XContentHelper.convertToMap(source, false, XContentType.JSON).v2(); + } catch (NotXContentException e) { + throw new ElasticsearchParseException("Template must not be empty"); + } catch (Exception e) { + throw new ElasticsearchParseException("Invalid template", e); + } + } + + /** + * Filters the source: replaces any template version property with the version number + */ + public static String filter(BytesReference source, String version, String versionProperty) { + return Pattern.compile(versionProperty) + .matcher(source.utf8ToString()) + .replaceAll(version); + } + + /** + * Checks if a versioned template exists, and if it exists checks if the version is greater than or equal to the current version. + * @param templateName Name of the index template + * @param state Cluster state + */ + public static boolean checkTemplateExistsAndVersionIsGTECurrentVersion(String templateName, ClusterState state) { + IndexTemplateMetaData templateMetaData = state.metaData().templates().get(templateName); + if (templateMetaData == null) { + return false; + } + + return templateMetaData.version() != null && templateMetaData.version() >= Version.CURRENT.id; + } + + /** + * Checks if a versioned template exists, and if it exists checks if it is up-to-date with current version. + * @param versionKey The property in the mapping's _meta field which stores the version info + * @param templateName Name of the index template + * @param state Cluster state + * @param logger Logger + */ + public static boolean checkTemplateExistsAndIsUpToDate( + String templateName, String versionKey, ClusterState state, Logger logger) { + + return checkTemplateExistsAndVersionMatches(templateName, versionKey, state, logger, + Version.CURRENT::equals); + } + + /** + * Checks if template with given name exists and if it matches the version predicate given + * @param versionKey The property in the mapping's _meta field which stores the version info + * @param templateName Name of the index template + * @param state Cluster state + * @param logger Logger + * @param predicate Predicate to execute on version check + */ + public static boolean checkTemplateExistsAndVersionMatches( + String templateName, String versionKey, ClusterState state, Logger logger, Predicate predicate) { + + IndexTemplateMetaData templateMeta = state.metaData().templates().get(templateName); + if (templateMeta == null) { + return false; + } + ImmutableOpenMap mappings = templateMeta.getMappings(); + // check all mappings contain correct version in _meta + // we have to parse the source here which is annoying + for (Object typeMapping : mappings.values().toArray()) { + CompressedXContent typeMappingXContent = (CompressedXContent) typeMapping; + try { + Map typeMappingMap = convertToMap( + new BytesArray(typeMappingXContent.uncompressed()), false, + XContentType.JSON).v2(); + // should always contain one entry with key = typename + assert (typeMappingMap.size() == 1); + String key = typeMappingMap.keySet().iterator().next(); + // get the actual mapping entries + @SuppressWarnings("unchecked") + Map mappingMap = (Map) typeMappingMap.get(key); + if (containsCorrectVersion(versionKey, mappingMap, predicate) == false) { + return false; + } + } catch (ElasticsearchParseException e) { + logger.error(new ParameterizedMessage( + "Cannot parse the template [{}]", templateName), e); + throw new IllegalStateException("Cannot parse the template " + templateName, e); + } + } + return true; + } + + private static boolean containsCorrectVersion(String versionKey, Map typeMappingMap, + Predicate predicate) { + @SuppressWarnings("unchecked") + Map meta = (Map) typeMappingMap.get("_meta"); + if (meta == null) { + // pre 5.0, cannot be up to date + return false; + } + return predicate.test(Version.fromString((String) meta.get(versionKey))); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/IndexUpgradeCheckVersion.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/IndexUpgradeCheckVersion.java new file mode 100644 index 0000000000000..e09f73a688e57 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/IndexUpgradeCheckVersion.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.upgrade; + +public final class IndexUpgradeCheckVersion { + public static final int UPRADE_VERSION = 6; + + private IndexUpgradeCheckVersion() {} + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/IndexUpgradeServiceFields.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/IndexUpgradeServiceFields.java new file mode 100644 index 0000000000000..e666ab89f5378 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/IndexUpgradeServiceFields.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.upgrade; + +import org.elasticsearch.action.support.IndicesOptions; + +public final class IndexUpgradeServiceFields { + + public static final IndicesOptions UPGRADE_INDEX_OPTIONS = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + + private IndexUpgradeServiceFields() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/UpgradeActionRequired.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/UpgradeActionRequired.java new file mode 100644 index 0000000000000..1bc4d92f33d90 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/UpgradeActionRequired.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.upgrade; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +/** + * Indicates the type of the upgrade required for the index + */ +public enum UpgradeActionRequired implements Writeable { + NOT_APPLICABLE, // Indicates that the check is not applicable to this index type, the next check will be performed + UP_TO_DATE, // Indicates that the check finds this index to be up to date - no additional checks are required + REINDEX, // The index should be reindex + UPGRADE; // The index should go through the upgrade procedure + + public static UpgradeActionRequired fromString(String value) { + return UpgradeActionRequired.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static UpgradeActionRequired readFromStream(StreamInput in) throws IOException { + return in.readEnum(UpgradeActionRequired.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/UpgradeField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/UpgradeField.java new file mode 100644 index 0000000000000..48060c5772550 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/UpgradeField.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.upgrade; + +import org.elasticsearch.cluster.metadata.IndexMetaData; + +public final class UpgradeField { + // this is the required index.format setting for 6.0 services (watcher and security) to start up + // this index setting is set by the upgrade API or automatically when a 6.0 index template is created + private static final int EXPECTED_INDEX_FORMAT_VERSION = 6; + + private UpgradeField() {} + + /** + * Checks the format of an internal index and returns true if the index is up to date or false if upgrade is required + */ + public static boolean checkInternalIndexFormat(IndexMetaData indexMetaData) { + return indexMetaData.getSettings().getAsInt(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 0) == EXPECTED_INDEX_FORMAT_VERSION; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeAction.java new file mode 100644 index 0000000000000..f5b5844e5f267 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeAction.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.upgrade.actions; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.xpack.core.upgrade.IndexUpgradeServiceFields.UPGRADE_INDEX_OPTIONS; + +public class IndexUpgradeAction extends Action { + + public static final IndexUpgradeAction INSTANCE = new IndexUpgradeAction(); + public static final String NAME = "cluster:admin/xpack/upgrade"; + + private IndexUpgradeAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public BulkByScrollResponse newResponse() { + return new BulkByScrollResponse(); + } + + public static class Request extends MasterNodeReadRequest implements IndicesRequest { + + private String index = null; + + /** + * Should this task store its result? + */ + private boolean shouldStoreResult; + + // for serialization + public Request() { + + } + + public Request(String index) { + this.index = index; + } + + public Request(StreamInput in) throws IOException { + super(in); + index = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(index); + } + + public String index() { + return index; + } + + /** + * Sets the index. + */ + @SuppressWarnings("unchecked") + public final Request index(String index) { + this.index = index; + return this; + } + + @Override + public String[] indices() { + return new String[]{index}; + } + + @Override + public IndicesOptions indicesOptions() { + return UPGRADE_INDEX_OPTIONS; + } + + /** + * Should this task store its result after it has finished? + */ + public Request setShouldStoreResult(boolean shouldStoreResult) { + this.shouldStoreResult = shouldStoreResult; + return this; + } + + @Override + public boolean getShouldStoreResult() { + return shouldStoreResult; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (index == null) { + validationException = addValidationError("index is missing", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(index, request.index); + } + + @Override + public int hashCode() { + return Objects.hash(index); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, getDescription(), parentTaskId, headers) { + @Override + public boolean shouldCancelChildrenOnCancellation() { + return true; + } + }; + } + } + + public static class RequestBuilder extends MasterNodeReadOperationRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, IndexUpgradeAction action) { + super(client, action, new Request()); + } + + public RequestBuilder setIndex(String index) { + request.index(index); + return this; + } + } + +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeInfoAction.java new file mode 100644 index 0000000000000..74df1caecdde0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/upgrade/actions/IndexUpgradeInfoAction.java @@ -0,0 +1,201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.upgrade.actions; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.upgrade.UpgradeActionRequired; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class IndexUpgradeInfoAction extends Action { + + public static final IndexUpgradeInfoAction INSTANCE = new IndexUpgradeInfoAction(); + public static final String NAME = "cluster:admin/xpack/upgrade/info"; + + private IndexUpgradeInfoAction() { + super(NAME); + } + + @Override + public RequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RequestBuilder(client, this); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Response extends ActionResponse implements ToXContentObject { + private Map actions; + + public Response() { + + } + + public Response(Map actions) { + this.actions = actions; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + actions = in.readMap(StreamInput::readString, UpgradeActionRequired::readFromStream); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(actions, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + } + + public Map getActions() { + return actions; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.startObject("indices"); + for (Map.Entry entry : actions.entrySet()) { + builder.startObject(entry.getKey()); + { + builder.field("action_required", entry.getValue().toString()); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(actions, response.actions); + } + + @Override + public int hashCode() { + return Objects.hash(actions); + } + } + + public static class Request extends MasterNodeReadRequest implements IndicesRequest.Replaceable { + + private String[] indices = null; + private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true); + + // for serialization + public Request() { + + } + + public Request(String... indices) { + this.indices = indices; + } + + public Request(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public Request indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public void indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (indices == null) { + validationException = addValidationError("index/indices is missing", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Arrays.equals(indices, request.indices) && + Objects.equals(indicesOptions.toString(), request.indicesOptions.toString()); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices), indicesOptions.toString()); + } + } + + public static class RequestBuilder extends MasterNodeReadOperationRequestBuilder { + + protected RequestBuilder(ElasticsearchClient client, IndexUpgradeInfoAction action) { + super(client, action, new Request()); + } + + public RequestBuilder setIndices(String... indices) { + request.indices(indices); + return this; + } + + public RequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { + request.indicesOptions(indicesOptions); + return this; + } + } + +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java new file mode 100644 index 0000000000000..393c05b8fb3e0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; +import java.util.Map; + +public class WatcherFeatureSetUsage extends XPackFeatureSet.Usage { + + private final Map stats; + + public WatcherFeatureSetUsage(StreamInput in) throws IOException { + super(in); + stats = in.readMap(); + } + + public WatcherFeatureSetUsage(boolean available, boolean enabled, Map stats) { + super(XPackField.WATCHER, available, enabled); + this.stats = stats; + } + + public Map stats() { + return stats; + } + + @Override + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + super.innerXContent(builder, params); + if (enabled) { + for (Map.Entry entry : stats.entrySet()) { + builder.field(entry.getKey(), entry.getValue()); + } + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(stats); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherField.java new file mode 100644 index 0000000000000..b7ad6ee423d28 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherField.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher; + +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.Setting; + +import java.io.InputStream; + +public final class WatcherField { + + public static final Setting ENCRYPTION_KEY_SETTING = + SecureSetting.secureFile("xpack.watcher.encryption_key", null); + + private WatcherField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetaData.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetaData.java new file mode 100644 index 0000000000000..3a490f08b79e5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetaData.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher; + +import org.elasticsearch.cluster.AbstractNamedDiffable; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Objects; + +public class WatcherMetaData extends AbstractNamedDiffable implements MetaData.Custom { + + public static final String TYPE = "watcher"; + + private final boolean manuallyStopped; + + public WatcherMetaData(boolean manuallyStopped) { + this.manuallyStopped = manuallyStopped; + } + + public boolean manuallyStopped() { + return manuallyStopped; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public EnumSet context() { + return EnumSet.of(MetaData.XContentContext.GATEWAY); + } + + public WatcherMetaData(StreamInput streamInput) throws IOException { + this(streamInput.readBoolean()); + } + + public static NamedDiff readDiffFrom(StreamInput streamInput) throws IOException { + return readDiffFrom(MetaData.Custom.class, TYPE, streamInput); + } + + @Override + public void writeTo(StreamOutput streamOutput) throws IOException { + streamOutput.writeBoolean(manuallyStopped); + } + + @Override + public String toString() { + return "manuallyStopped["+ manuallyStopped +"]"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + WatcherMetaData action = (WatcherMetaData) o; + + return manuallyStopped == action.manuallyStopped; + } + + @Override + public int hashCode() { + return Objects.hash(manuallyStopped); + } + + public static MetaData.Custom fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + Boolean manuallyStopped = null; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + switch (token) { + case FIELD_NAME: + currentFieldName = parser.currentName(); + break; + case VALUE_BOOLEAN: + if (Field.MANUALLY_STOPPED.match(currentFieldName, parser.getDeprecationHandler())) { + manuallyStopped = parser.booleanValue(); + } + break; + } + } + if (manuallyStopped != null) { + return new WatcherMetaData(manuallyStopped); + } + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(Field.MANUALLY_STOPPED.getPreferredName(), manuallyStopped); + return builder; + } + + interface Field { + + ParseField MANUALLY_STOPPED = new ParseField("manually_stopped"); + + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherState.java new file mode 100644 index 0000000000000..ac7002667df0a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherState.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher; + +/** + * Encapsulates the state of the watcher plugin. + */ +public enum WatcherState { + + /** + * The watcher plugin is not running and not functional. + */ + STOPPED(0), + + /** + * The watcher plugin is performing the necessary operations to get into a started state. + */ + STARTING(1), + + /** + * The watcher plugin is running and completely functional. + */ + STARTED(2), + + /** + * The watcher plugin is shutting down and not functional. + */ + STOPPING(3); + + private final byte id; + + WatcherState(int id) { + this.id = (byte) id; + } + + public byte getId() { + return id; + } + + public static WatcherState fromId(byte id) { + switch (id) { + case 0: + return STOPPED; + case 1: + return STARTING; + case 2: + return STARTED; + default: //3 + assert id == 3 : "unknown watcher state id [" + id + "]"; + return STOPPING; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/Action.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/Action.java new file mode 100644 index 0000000000000..3fe07dddacbca --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/Action.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; + +public interface Action extends ToXContentObject { + + String type(); + + abstract class Result implements ToXContentFragment { + + public enum Status { + SUCCESS, + FAILURE, + PARTIAL_FAILURE, + ACKNOWLEDGED, + THROTTLED, + CONDITION_FAILED, + SIMULATED; + + public String value() { + return name().toLowerCase(Locale.ROOT); + } + } + + protected final String type; + protected final Status status; + + protected Result(String type, Status status) { + this.type = type; + this.status = status; + } + + public String type() { + return type; + } + + public Status status() { + return status; + } + + /** + * {@code StoppedResult} is a {@link Result} with a {@link #reason()}. + *

+ * Any {@code StoppedResult} should provide a reason why it is stopped. + */ + public static class StoppedResult extends Result { + + private static ParseField REASON = new ParseField("reason"); + + private final String reason; + + protected StoppedResult(String type, Status status, String reason, Object... args) { + super(type, status); + this.reason = LoggerMessageFormat.format(reason, args); + } + + public String reason() { + return reason; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(REASON.getPreferredName(), reason); + } + + } + + /** + * {@code Failure} is a {@link StoppedResult} with a status of {@link Status#FAILURE} for actiosn that have failed unexpectedly + * (e.g., an exception was thrown in a place that wouldn't expect one, like transformation or an HTTP request). + */ + public static class Failure extends StoppedResult { + + public Failure(String type, String reason, Object... args) { + super(type, Status.FAILURE, reason, args); + } + } + + public static class FailureWithException extends Result { + + private final Exception exception; + + public FailureWithException(String type, Exception exception) { + super(type, Status.FAILURE); + this.exception = exception; + } + + public Exception getException() { + return exception; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + ElasticsearchException.generateFailureXContent(builder, params, exception, true); + return builder; + } + } + + /** + * {@code Throttled} is a {@link StoppedResult} with a status of {@link Status#THROTTLED} for actions that have been throttled. + */ + public static class Throttled extends StoppedResult { + + public Throttled(String type, String reason) { + super(type, Status.THROTTLED, reason); + } + + } + + /** + * {@code Acknowledged} is a {@link StoppedResult} with a status of {@link Status#ACKNOWLEDGED} for actions that + * have been throttled. + */ + public static class Acknowledged extends StoppedResult { + + public Acknowledged(String type, String reason) { + super(type, Status.ACKNOWLEDGED, reason); + } + } + + /** + * {@code ConditionFailed} is a {@link StoppedResult} with a status of {@link Status#FAILURE} for actions that have been skipped + * because the action's condition failed (either expected or unexpected). + */ + public static class ConditionFailed extends StoppedResult { + + public ConditionFailed(String type, String reason, Object... args) { + super(type, Status.CONDITION_FAILED, reason, args); + } + + } + } + + interface Builder { + + A build(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionFactory.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionFactory.java new file mode 100644 index 0000000000000..3618b2de4080b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionFactory.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Parses xcontent to a concrete action of the same type. + */ +public abstract class ActionFactory { + + protected final Logger actionLogger; + + protected ActionFactory(Logger actionLogger) { + this.actionLogger = actionLogger; + } + + /** + * Parses the given xcontent and creates a concrete action + */ + public abstract ExecutableAction parseExecutable(String watchId, String actionId, XContentParser parser) throws IOException; +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionRegistry.java new file mode 100644 index 0000000000000..37a639ca82285 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionRegistry.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.watcher.condition.ConditionRegistry; +import org.elasticsearch.xpack.core.watcher.support.WatcherUtils; +import org.elasticsearch.xpack.core.watcher.transform.TransformRegistry; + +import java.io.IOException; +import java.time.Clock; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class ActionRegistry { + + private final Map parsers; + private final ConditionRegistry conditionRegistry; + private final TransformRegistry transformRegistry; + private final Clock clock; + private final XPackLicenseState licenseState; + + public ActionRegistry(Map parsers, + ConditionRegistry conditionRegistry, TransformRegistry transformRegistry, + Clock clock, + XPackLicenseState licenseState) { + this.parsers = parsers; + this.conditionRegistry = conditionRegistry; + this.transformRegistry = transformRegistry; + this.clock = clock; + this.licenseState = licenseState; + } + + ActionFactory factory(String type) { + return parsers.get(type); + } + + public List parseActions(String watchId, XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse actions for watch [{}]. expected an object but found [{}] instead", + watchId, parser.currentToken()); + } + List actions = new ArrayList<>(); + + String id = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + id = parser.currentName(); + if (WatcherUtils.isValidId(id) == false) { + throw new ElasticsearchParseException("could not parse action [{}] for watch [{}]. id contains whitespace", id, + watchId); + } + } else if (token == XContentParser.Token.START_OBJECT && id != null) { + actions.add(ActionWrapper.parse(watchId, id, parser, this, clock, licenseState)); + } + } + return actions; + } + + public TransformRegistry getTransformRegistry() { + return transformRegistry; + } + + public ConditionRegistry getConditionRegistry() { + return conditionRegistry; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionStatus.java new file mode 100644 index 0000000000000..fb397b8f09a5b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionStatus.java @@ -0,0 +1,503 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; +import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.dateTimeFormatter; + +public class ActionStatus implements ToXContentObject { + + private AckStatus ackStatus; + @Nullable private Execution lastExecution; + @Nullable private Execution lastSuccessfulExecution; + @Nullable private Throttle lastThrottle; + + public ActionStatus(DateTime now) { + this(new AckStatus(now, AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION), null, null, null); + } + + public ActionStatus(AckStatus ackStatus, @Nullable Execution lastExecution, @Nullable Execution lastSuccessfulExecution, + @Nullable Throttle lastThrottle) { + this.ackStatus = ackStatus; + this.lastExecution = lastExecution; + this.lastSuccessfulExecution = lastSuccessfulExecution; + this.lastThrottle = lastThrottle; + } + + public AckStatus ackStatus() { + return ackStatus; + } + + public Execution lastExecution() { + return lastExecution; + } + + public Execution lastSuccessfulExecution() { + return lastSuccessfulExecution; + } + + public Throttle lastThrottle() { + return lastThrottle; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ActionStatus that = (ActionStatus) o; + + return Objects.equals(ackStatus, that.ackStatus) && + Objects.equals(lastExecution, that.lastExecution) && + Objects.equals(lastSuccessfulExecution, that.lastSuccessfulExecution) && + Objects.equals(lastThrottle, that.lastThrottle); + } + + @Override + public int hashCode() { + return Objects.hash(ackStatus, lastExecution, lastSuccessfulExecution, lastThrottle); + } + + public void update(DateTime timestamp, Action.Result result) { + switch (result.status()) { + + case FAILURE: + String reason = result instanceof Action.Result.Failure ? ((Action.Result.Failure) result).reason() : ""; + lastExecution = Execution.failure(timestamp, reason); + return; + + case THROTTLED: + reason = result instanceof Action.Result.Throttled ? ((Action.Result.Throttled) result).reason() : ""; + lastThrottle = new Throttle(timestamp, reason); + return; + + case SUCCESS: + case SIMULATED: + lastExecution = Execution.successful(timestamp); + lastSuccessfulExecution = lastExecution; + if (ackStatus.state == AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION) { + ackStatus = new AckStatus(timestamp, AckStatus.State.ACKABLE); + } + } + } + + public boolean onAck(DateTime timestamp) { + if (ackStatus.state == AckStatus.State.ACKABLE) { + ackStatus = new AckStatus(timestamp, AckStatus.State.ACKED); + return true; + } + return false; + } + + public boolean resetAckStatus(DateTime timestamp) { + if (ackStatus.state != AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION) { + ackStatus = new AckStatus(timestamp, AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION); + return true; + } + return false; + } + + public static void writeTo(ActionStatus status, StreamOutput out) throws IOException { + AckStatus.writeTo(status.ackStatus, out); + out.writeBoolean(status.lastExecution != null); + if (status.lastExecution != null) { + Execution.writeTo(status.lastExecution, out); + } + out.writeBoolean(status.lastSuccessfulExecution != null); + if (status.lastSuccessfulExecution != null) { + Execution.writeTo(status.lastSuccessfulExecution, out); + } + out.writeBoolean(status.lastThrottle != null); + if (status.lastThrottle != null) { + Throttle.writeTo(status.lastThrottle, out); + } + } + + public static ActionStatus readFrom(StreamInput in) throws IOException { + AckStatus ackStatus = AckStatus.readFrom(in); + Execution lastExecution = in.readBoolean() ? Execution.readFrom(in) : null; + Execution lastSuccessfulExecution = in.readBoolean() ? Execution.readFrom(in) : null; + Throttle lastThrottle = in.readBoolean() ? Throttle.readFrom(in) : null; + return new ActionStatus(ackStatus, lastExecution, lastSuccessfulExecution, lastThrottle); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Field.ACK_STATUS.getPreferredName(), ackStatus, params); + if (lastExecution != null) { + builder.field(Field.LAST_EXECUTION.getPreferredName(), lastExecution, params); + } + if (lastSuccessfulExecution != null) { + builder.field(Field.LAST_SUCCESSFUL_EXECUTION.getPreferredName(), lastSuccessfulExecution, params); + } + if (lastThrottle != null) { + builder.field(Field.LAST_THROTTLE.getPreferredName(), lastThrottle, params); + } + return builder.endObject(); + } + + public static ActionStatus parse(String watchId, String actionId, XContentParser parser) throws IOException { + AckStatus ackStatus = null; + Execution lastExecution = null; + Execution lastSuccessfulExecution = null; + Throttle lastThrottle = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.ACK_STATUS.match(currentFieldName, parser.getDeprecationHandler())) { + ackStatus = AckStatus.parse(watchId, actionId, parser); + } else if (Field.LAST_EXECUTION.match(currentFieldName, parser.getDeprecationHandler())) { + lastExecution = Execution.parse(watchId, actionId, parser); + } else if (Field.LAST_SUCCESSFUL_EXECUTION.match(currentFieldName, parser.getDeprecationHandler())) { + lastSuccessfulExecution = Execution.parse(watchId, actionId, parser); + } else if (Field.LAST_THROTTLE.match(currentFieldName, parser.getDeprecationHandler())) { + lastThrottle = Throttle.parse(watchId, actionId, parser); + } else { + throw new ElasticsearchParseException("could not parse action status for [{}/{}]. unexpected field [{}]", watchId, + actionId, currentFieldName); + } + } + if (ackStatus == null) { + throw new ElasticsearchParseException("could not parse action status for [{}/{}]. missing required field [{}]", watchId, + actionId, Field.ACK_STATUS.getPreferredName()); + } + return new ActionStatus(ackStatus, lastExecution, lastSuccessfulExecution, lastThrottle); + } + + public static class AckStatus implements ToXContentObject { + + public enum State { + AWAITS_SUCCESSFUL_EXECUTION((byte) 1), + ACKABLE((byte) 2), + ACKED((byte) 3); + + private byte value; + + State(byte value) { + this.value = value; + } + + static State resolve(byte value) { + switch (value) { + case 1 : return AWAITS_SUCCESSFUL_EXECUTION; + case 2 : return ACKABLE; + case 3 : return ACKED; + default: + throw illegalArgument("unknown action ack status state value [{}]", value); + } + } + } + + private final DateTime timestamp; + private final State state; + + public AckStatus(DateTime timestamp, State state) { + this.timestamp = timestamp.toDateTime(DateTimeZone.UTC); + this.state = state; + } + + public DateTime timestamp() { + return timestamp; + } + + public State state() { + return state; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + AckStatus ackStatus = (AckStatus) o; + + return Objects.equals(timestamp, ackStatus.timestamp) && Objects.equals(state, ackStatus.state); + } + + @Override + public int hashCode() { + return Objects.hash(timestamp, state); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(Field.TIMESTAMP.getPreferredName()).value(dateTimeFormatter.printer().print(timestamp)) + .field(Field.ACK_STATUS_STATE.getPreferredName(), state.name().toLowerCase(Locale.ROOT)) + .endObject(); + } + + public static AckStatus parse(String watchId, String actionId, XContentParser parser) throws IOException { + DateTime timestamp = null; + State state = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.TIMESTAMP.match(currentFieldName, parser.getDeprecationHandler())) { + timestamp = dateTimeFormatter.parser().parseDateTime(parser.text()); + } else if (Field.ACK_STATUS_STATE.match(currentFieldName, parser.getDeprecationHandler())) { + state = State.valueOf(parser.text().toUpperCase(Locale.ROOT)); + } else { + throw new ElasticsearchParseException("could not parse action status for [{}/{}]. unexpected field [{}.{}]", watchId, + actionId, Field.ACK_STATUS.getPreferredName(), currentFieldName); + } + } + if (timestamp == null) { + throw new ElasticsearchParseException("could not parse action status for [{}/{}]. missing required field [{}.{}]", + watchId, actionId, Field.ACK_STATUS.getPreferredName(), Field.TIMESTAMP.getPreferredName()); + } + if (state == null) { + throw new ElasticsearchParseException("could not parse action status for [{}/{}]. missing required field [{}.{}]", + watchId, actionId, Field.ACK_STATUS.getPreferredName(), Field.ACK_STATUS_STATE.getPreferredName()); + } + return new AckStatus(timestamp, state); + } + + static void writeTo(AckStatus status, StreamOutput out) throws IOException { + out.writeLong(status.timestamp.getMillis()); + out.writeByte(status.state.value); + } + + static AckStatus readFrom(StreamInput in) throws IOException { + DateTime timestamp = new DateTime(in.readLong(), DateTimeZone.UTC); + State state = State.resolve(in.readByte()); + return new AckStatus(timestamp, state); + } + } + + public static class Execution implements ToXContentObject { + + public static Execution successful(DateTime timestamp) { + return new Execution(timestamp, true, null); + } + + public static Execution failure(DateTime timestamp, String reason) { + return new Execution(timestamp, false, reason); + } + + private final DateTime timestamp; + private final boolean successful; + private final String reason; + + private Execution(DateTime timestamp, boolean successful, String reason) { + this.timestamp = timestamp.toDateTime(DateTimeZone.UTC); + this.successful = successful; + this.reason = reason; + } + + public DateTime timestamp() { + return timestamp; + } + + public boolean successful() { + return successful; + } + + public String reason() { + return reason; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Execution execution = (Execution) o; + + return Objects.equals(successful, execution.successful) && + Objects.equals(timestamp, execution.timestamp) && + Objects.equals(reason, execution.reason); + } + + @Override + public int hashCode() { + return Objects.hash(timestamp, successful, reason); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Field.TIMESTAMP.getPreferredName()).value(dateTimeFormatter.printer().print(timestamp)); + builder.field(Field.EXECUTION_SUCCESSFUL.getPreferredName(), successful); + if (reason != null) { + builder.field(Field.REASON.getPreferredName(), reason); + } + return builder.endObject(); + } + + public static Execution parse(String watchId, String actionId, XContentParser parser) throws IOException { + DateTime timestamp = null; + Boolean successful = null; + String reason = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.TIMESTAMP.match(currentFieldName, parser.getDeprecationHandler())) { + timestamp = dateTimeFormatter.parser().parseDateTime(parser.text()); + } else if (Field.EXECUTION_SUCCESSFUL.match(currentFieldName, parser.getDeprecationHandler())) { + successful = parser.booleanValue(); + } else if (Field.REASON.match(currentFieldName, parser.getDeprecationHandler())) { + reason = parser.text(); + } else { + throw new ElasticsearchParseException("could not parse action status for [{}/{}]. unexpected field [{}.{}]", watchId, + actionId, Field.LAST_EXECUTION.getPreferredName(), currentFieldName); + } + } + if (timestamp == null) { + throw new ElasticsearchParseException("could not parse action status for [{}/{}]. missing required field [{}.{}]", + watchId, actionId, Field.LAST_EXECUTION.getPreferredName(), Field.TIMESTAMP.getPreferredName()); + } + if (successful == null) { + throw new ElasticsearchParseException("could not parse action status for [{}/{}]. missing required field [{}.{}]", + watchId, actionId, Field.LAST_EXECUTION.getPreferredName(), Field.EXECUTION_SUCCESSFUL.getPreferredName()); + } + if (successful) { + return successful(timestamp); + } + if (reason == null) { + throw new ElasticsearchParseException("could not parse action status for [{}/{}]. missing required field for unsuccessful" + + " execution [{}.{}]", watchId, actionId, Field.LAST_EXECUTION.getPreferredName(), Field.REASON.getPreferredName()); + } + return failure(timestamp, reason); + } + + public static void writeTo(Execution execution, StreamOutput out) throws IOException { + out.writeLong(execution.timestamp.getMillis()); + out.writeBoolean(execution.successful); + if (!execution.successful) { + out.writeString(execution.reason); + } + } + + public static Execution readFrom(StreamInput in) throws IOException { + DateTime timestamp = new DateTime(in.readLong(), DateTimeZone.UTC); + boolean successful = in.readBoolean(); + if (successful) { + return successful(timestamp); + } + return failure(timestamp, in.readString()); + } + } + + public static class Throttle implements ToXContentObject { + + private final DateTime timestamp; + private final String reason; + + public Throttle(DateTime timestamp, String reason) { + this.timestamp = timestamp.toDateTime(DateTimeZone.UTC); + this.reason = reason; + } + + public DateTime timestamp() { + return timestamp; + } + + public String reason() { + return reason; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Throttle throttle = (Throttle) o; + return Objects.equals(timestamp, throttle.timestamp) && Objects.equals(reason, throttle.reason); + } + + @Override + public int hashCode() { + return Objects.hash(timestamp, reason); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(Field.TIMESTAMP.getPreferredName()).value(dateTimeFormatter.printer().print(timestamp)) + .field(Field.REASON.getPreferredName(), reason) + .endObject(); + } + + public static Throttle parse(String watchId, String actionId, XContentParser parser) throws IOException { + DateTime timestamp = null; + String reason = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.TIMESTAMP.match(currentFieldName, parser.getDeprecationHandler())) { + timestamp = dateTimeFormatter.parser().parseDateTime(parser.text()); + } else if (Field.REASON.match(currentFieldName, parser.getDeprecationHandler())) { + reason = parser.text(); + } else { + throw new ElasticsearchParseException("could not parse action status for [{}/{}]. unexpected field [{}.{}]", watchId, + actionId, Field.LAST_THROTTLE.getPreferredName(), currentFieldName); + } + } + if (timestamp == null) { + throw new ElasticsearchParseException("could not parse action status for [{}/{}]. missing required field [{}.{}]", + watchId, actionId, Field.LAST_THROTTLE.getPreferredName(), Field.TIMESTAMP.getPreferredName()); + } + if (reason == null) { + throw new ElasticsearchParseException("could not parse action status for [{}/{}]. missing required field [{}.{}]", + watchId, actionId, Field.LAST_THROTTLE.getPreferredName(), Field.REASON.getPreferredName()); + } + return new Throttle(timestamp, reason); + } + + static void writeTo(Throttle throttle, StreamOutput out) throws IOException { + out.writeLong(throttle.timestamp.getMillis()); + out.writeString(throttle.reason); + } + + static Throttle readFrom(StreamInput in) throws IOException { + DateTime timestamp = new DateTime(in.readLong(), DateTimeZone.UTC); + return new Throttle(timestamp, in.readString()); + } + } + + interface Field { + ParseField ACK_STATUS = new ParseField("ack"); + ParseField ACK_STATUS_STATE = new ParseField("state"); + + ParseField LAST_EXECUTION = new ParseField("last_execution"); + ParseField LAST_SUCCESSFUL_EXECUTION = new ParseField("last_successful_execution"); + ParseField EXECUTION_SUCCESSFUL = new ParseField("successful"); + + ParseField LAST_THROTTLE = new ParseField("last_throttle"); + + ParseField TIMESTAMP = new ParseField("timestamp"); + ParseField REASON = new ParseField("reason"); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapper.java new file mode 100644 index 0000000000000..47d3500f2e920 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapper.java @@ -0,0 +1,241 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.watcher.actions.throttler.ActionThrottler; +import org.elasticsearch.xpack.core.watcher.actions.throttler.Throttler; +import org.elasticsearch.xpack.core.watcher.actions.throttler.ThrottlerField; +import org.elasticsearch.xpack.core.watcher.condition.Condition; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.time.Clock; +import java.util.Objects; + +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; + +public class ActionWrapper implements ToXContentObject { + + private String id; + @Nullable + private final ExecutableCondition condition; + @Nullable + private final ExecutableTransform transform; + private final ActionThrottler throttler; + private final ExecutableAction action; + + public ActionWrapper(String id, ActionThrottler throttler, + @Nullable ExecutableCondition condition, + @Nullable ExecutableTransform transform, + ExecutableAction action) { + this.id = id; + this.condition = condition; + this.throttler = throttler; + this.transform = transform; + this.action = action; + } + + public String id() { + return id; + } + + public ExecutableCondition condition() { + return condition; + } + + public ExecutableTransform transform() { + return transform; + } + + public Throttler throttler() { + return throttler; + } + + public ExecutableAction action() { + return action; + } + + /** + * Execute the current {@link #action()}. + *

+ * This executes in the order of: + *

    + *
  1. Throttling
  2. + *
  3. Conditional Check
  4. + *
  5. Transformation
  6. + *
  7. Action
  8. + *
+ * + * @param ctx The current watch's context + * @return Never {@code null} + */ + public ActionWrapperResult execute(WatchExecutionContext ctx) { + ActionWrapperResult result = ctx.actionsResults().get(id); + if (result != null) { + return result; + } + if (!ctx.skipThrottling(id)) { + Throttler.Result throttleResult = throttler.throttle(id, ctx); + if (throttleResult.throttle()) { + if (throttleResult.type() == Throttler.Type.ACK) { + return new ActionWrapperResult(id, new Action.Result.Acknowledged(action.type(), throttleResult.reason())); + } else { + return new ActionWrapperResult(id, new Action.Result.Throttled(action.type(), throttleResult.reason())); + } + } + } + Condition.Result conditionResult = null; + if (condition != null) { + try { + conditionResult = condition.execute(ctx); + if (conditionResult.met() == false) { + ctx.watch().status().actionStatus(id).resetAckStatus(DateTime.now(DateTimeZone.UTC)); + return new ActionWrapperResult(id, conditionResult, null, + new Action.Result.ConditionFailed(action.type(), "condition not met. skipping")); + } + } catch (RuntimeException e) { + action.logger().error( + (Supplier) () -> new ParameterizedMessage( + "failed to execute action [{}/{}]. failed to execute condition", ctx.watch().id(), id), e); + return new ActionWrapperResult(id, new Action.Result.ConditionFailed(action.type(), + "condition failed. skipping: {}", e.getMessage())); + } + } + Payload payload = ctx.payload(); + Transform.Result transformResult = null; + if (transform != null) { + try { + transformResult = transform.execute(ctx, payload); + if (transformResult.status() == Transform.Result.Status.FAILURE) { + action.logger().error("failed to execute action [{}/{}]. failed to transform payload. {}", ctx.watch().id(), id, + transformResult.reason()); + String msg = "Failed to transform payload"; + return new ActionWrapperResult(id, conditionResult, transformResult, new Action.Result.Failure(action.type(), msg)); + } + payload = transformResult.payload(); + } catch (Exception e) { + action.logger().error( + (Supplier) () -> new ParameterizedMessage( + "failed to execute action [{}/{}]. failed to transform payload.", ctx.watch().id(), id), e); + return new ActionWrapperResult(id, conditionResult, null, new Action.Result.FailureWithException(action.type(), e)); + } + } + try { + Action.Result actionResult = action.execute(id, ctx, payload); + return new ActionWrapperResult(id, conditionResult, transformResult, actionResult); + } catch (Exception e) { + action.logger().error( + (Supplier) () -> new ParameterizedMessage("failed to execute action [{}/{}]", ctx.watch().id(), id), e); + return new ActionWrapperResult(id, new Action.Result.FailureWithException(action.type(), e)); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ActionWrapper that = (ActionWrapper) o; + + return Objects.equals(id, that.id) && + Objects.equals(condition, that.condition) && + Objects.equals(transform, that.transform) && + Objects.equals(action, that.action); + } + + @Override + public int hashCode() { + return Objects.hash(id, condition, transform, action); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + TimeValue throttlePeriod = throttler.throttlePeriod(); + if (throttlePeriod != null) { + builder.humanReadableField(ThrottlerField.THROTTLE_PERIOD.getPreferredName(), + ThrottlerField.THROTTLE_PERIOD_HUMAN.getPreferredName(), throttlePeriod); + } + if (condition != null) { + builder.startObject(WatchField.CONDITION.getPreferredName()) + .field(condition.type(), condition, params) + .endObject(); + } + if (transform != null) { + builder.startObject(Transform.TRANSFORM.getPreferredName()) + .field(transform.type(), transform, params) + .endObject(); + } + builder.field(action.type(), action, params); + return builder.endObject(); + } + + static ActionWrapper parse(String watchId, String actionId, XContentParser parser, ActionRegistry actionRegistry, Clock clock, + XPackLicenseState licenseState) throws IOException { + + assert parser.currentToken() == XContentParser.Token.START_OBJECT; + + ExecutableCondition condition = null; + ExecutableTransform transform = null; + TimeValue throttlePeriod = null; + ExecutableAction action = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + if (WatchField.CONDITION.match(currentFieldName, parser.getDeprecationHandler())) { + condition = actionRegistry.getConditionRegistry().parseExecutable(watchId, parser); + } else if (Transform.TRANSFORM.match(currentFieldName, parser.getDeprecationHandler())) { + transform = actionRegistry.getTransformRegistry().parse(watchId, parser); + } else if (ThrottlerField.THROTTLE_PERIOD.match(currentFieldName, parser.getDeprecationHandler())) { + throttlePeriod = timeValueMillis(parser.longValue()); + } else if (ThrottlerField.THROTTLE_PERIOD_HUMAN.match(currentFieldName, parser.getDeprecationHandler())) { + try { + throttlePeriod = WatcherDateTimeUtils.parseTimeValue(parser, ThrottlerField.THROTTLE_PERIOD_HUMAN.toString()); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse action [{}/{}]. failed to parse field [{}] as time value", + pe, watchId, actionId, currentFieldName); + } + } else { + // it's the type of the action + ActionFactory actionFactory = actionRegistry.factory(currentFieldName); + if (actionFactory == null) { + throw new ElasticsearchParseException("could not parse action [{}/{}]. unknown action type [{}]", watchId, + actionId, currentFieldName); + } + action = actionFactory.parseExecutable(watchId, actionId, parser); + } + } + } + if (action == null) { + throw new ElasticsearchParseException("could not parse watch action [{}/{}]. missing action type", watchId, actionId); + } + + ActionThrottler throttler = new ActionThrottler(clock, throttlePeriod, licenseState); + return new ActionWrapper(actionId, throttler, condition, transform, action); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapperField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapperField.java new file mode 100644 index 0000000000000..f3a45e4c11c47 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapperField.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions; + +import org.elasticsearch.common.ParseField; + +public final class ActionWrapperField { + public static final ParseField ID = new ParseField("id"); + public static final ParseField TYPE = new ParseField("type"); + public static final ParseField STATUS = new ParseField("status"); + + private ActionWrapperField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapperResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapperResult.java new file mode 100644 index 0000000000000..f2a29e159c02f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapperResult.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.condition.Condition; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; + +import java.io.IOException; +import java.util.Objects; + +public class ActionWrapperResult implements ToXContentObject { + + private final String id; + @Nullable + private final Condition.Result condition; + @Nullable + private final Transform.Result transform; + private final Action.Result action; + + public ActionWrapperResult(String id, Action.Result action) { + this(id, null, null, action); + } + + public ActionWrapperResult(String id, @Nullable Condition.Result condition, @Nullable Transform.Result transform, + Action.Result action) { + this.id = id; + this.condition = condition; + this.transform = transform; + this.action = action; + } + + public String id() { + return id; + } + + public Condition.Result condition() { + return condition; + } + + public Transform.Result transform() { + return transform; + } + + public Action.Result action() { + return action; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ActionWrapperResult result = (ActionWrapperResult) o; + + return Objects.equals(id, result.id) && + Objects.equals(condition, result.condition) && + Objects.equals(transform, result.transform) && + Objects.equals(action, result.action); + } + + @Override + public int hashCode() { + return Objects.hash(id, condition, transform, action); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ActionWrapperField.ID.getPreferredName(), id); + builder.field(ActionWrapperField.TYPE.getPreferredName(), action.type()); + builder.field(ActionWrapperField.STATUS.getPreferredName(), action.status().value()); + if (condition != null) { + builder.field(WatchField.CONDITION.getPreferredName(), condition, params); + } + if (transform != null) { + builder.field(Transform.TRANSFORM.getPreferredName(), transform, params); + } + action.toXContent(builder, params); + return builder.endObject(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ExecutableAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ExecutableAction.java new file mode 100644 index 0000000000000..457f10249e599 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ExecutableAction.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; + +public abstract class ExecutableAction implements ToXContentObject { + + protected final A action; + protected final Logger logger; + + protected ExecutableAction(A action, Logger logger) { + this.action = action; + this.logger = logger; + } + + /** + * @return the type of this action + */ + public String type() { + return action.type(); + } + + public A action() { + return action; + } + + /** + * yack... needed to expose that for testing purposes + */ + public Logger logger() { + return logger; + } + + public abstract Action.Result execute(String actionId, WatchExecutionContext context, Payload payload) throws Exception; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ExecutableAction that = (ExecutableAction) o; + + return action.equals(that.action); + } + + @Override + public int hashCode() { + return action.hashCode(); + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return action.toXContent(builder, params); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/AckThrottler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/AckThrottler.java new file mode 100644 index 0000000000000..370e77dcb2e60 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/AckThrottler.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions.throttler; + +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus.AckStatus; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; + +import static org.elasticsearch.xpack.core.watcher.actions.throttler.Throttler.Type.ACK; +import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.formatDate; + +public class AckThrottler implements Throttler { + + @Override + public Result throttle(String actionId, WatchExecutionContext ctx) { + ActionStatus actionStatus = ctx.watch().status().actionStatus(actionId); + AckStatus ackStatus = actionStatus.ackStatus(); + if (ackStatus.state() == AckStatus.State.ACKED) { + return Result.throttle(ACK, "action [{}] was acked at [{}]", actionId, formatDate(ackStatus.timestamp())); + } + return Result.NO; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/ActionThrottler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/ActionThrottler.java new file mode 100644 index 0000000000000..d02ee20def0df --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/ActionThrottler.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions.throttler; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; + +import java.time.Clock; + +import static org.elasticsearch.xpack.core.watcher.actions.throttler.Throttler.Type.LICENSE; + +public class ActionThrottler implements Throttler { + + private static final AckThrottler ACK_THROTTLER = new AckThrottler(); + + private final XPackLicenseState licenseState; + private final PeriodThrottler periodThrottler; + private final AckThrottler ackThrottler; + + public ActionThrottler(Clock clock, @Nullable TimeValue throttlePeriod, XPackLicenseState licenseState) { + this(new PeriodThrottler(clock, throttlePeriod), ACK_THROTTLER, licenseState); + } + + ActionThrottler(PeriodThrottler periodThrottler, AckThrottler ackThrottler, XPackLicenseState licenseState) { + this.periodThrottler = periodThrottler; + this.ackThrottler = ackThrottler; + this.licenseState = licenseState; + } + + public TimeValue throttlePeriod() { + return periodThrottler != null ? periodThrottler.period() : null; + } + + @Override + public Result throttle(String actionId, WatchExecutionContext ctx) { + if (licenseState.isWatcherAllowed() == false) { + return Result.throttle(LICENSE, "watcher license does not allow action execution"); + } + if (periodThrottler != null) { + Result throttleResult = periodThrottler.throttle(actionId, ctx); + if (throttleResult.throttle()) { + return throttleResult; + } + } + return ackThrottler.throttle(actionId, ctx); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java new file mode 100644 index 0000000000000..34f4af70b8354 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions.throttler; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; + +import java.time.Clock; + +import static org.elasticsearch.xpack.core.watcher.actions.throttler.Throttler.Type.PERIOD; + +/** + * This throttler throttles the action based on its last successful execution time. If the time passed since + * the last successful execution is lower than the given period, the aciton will be throttled. + */ +public class PeriodThrottler implements Throttler { + + @Nullable private final TimeValue period; + private final Clock clock; + + public PeriodThrottler(Clock clock, TimeValue period) { + this.period = period; + this.clock = clock; + } + + public TimeValue period() { + return period; + } + + @Override + public Result throttle(String actionId, WatchExecutionContext ctx) { + TimeValue period = this.period; + if (period == null) { + // falling back on the throttle period of the watch + period = ctx.watch().throttlePeriod(); + } + if (period == null) { + // falling back on the default throttle period of watcher + period = ctx.defaultThrottlePeriod(); + } + ActionStatus status = ctx.watch().status().actionStatus(actionId); + if (status.lastSuccessfulExecution() == null) { + return Result.NO; + } + TimeValue timeElapsed = TimeValue.timeValueMillis(clock.millis() - status.lastSuccessfulExecution().timestamp().getMillis()); + if (timeElapsed.getMillis() <= period.getMillis()) { + return Result.throttle(PERIOD, "throttling interval is set to [{}] but time elapsed since last execution is [{}]", + period, timeElapsed); + } + return Result.NO; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/Throttler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/Throttler.java new file mode 100644 index 0000000000000..af0b8ae02c5a3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/Throttler.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions.throttler; + +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; + +import static org.elasticsearch.xpack.core.watcher.actions.throttler.Throttler.Type.NONE; + +public interface Throttler { + + Result throttle(String actionId, WatchExecutionContext ctx); + + enum Type { + // throttling happened because a user actively acknowledged the action, which means it is muted until the condition becomes false + // the current implementation uses an implementation of a throttler to decide that an action should not be executed because + // it has been acked/muted before + ACK, + + // throttling happened because of license reasons + LICENSE, + + // time based throttling for a certain period of time + PERIOD, + + // no throttling, used to indicate a not throttledresult + NONE; + } + + class Result { + + public static final Result NO = new Result(NONE, false, null); + + private Type type; + private final boolean throttle; + private final String reason; + + private Result(Type type, boolean throttle, String reason) { + this.type = type; + this.throttle = throttle; + this.reason = reason; + } + + public static Result throttle(Type type, String reason, Object... args) { + return new Result(type, true, LoggerMessageFormat.format(reason, args)); + } + + public boolean throttle() { + return throttle; + } + + public String reason() { + return reason; + } + + public Type type() { + return type; + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/ThrottlerField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/ThrottlerField.java new file mode 100644 index 0000000000000..7d0e6698f917c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/ThrottlerField.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.actions.throttler; + +import org.elasticsearch.common.ParseField; + +public final class ThrottlerField { + public static final ParseField THROTTLE_PERIOD = new ParseField("throttle_period_in_millis"); + public static final ParseField THROTTLE_PERIOD_HUMAN = new ParseField("throttle_period"); + + private ThrottlerField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatchSourceBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatchSourceBuilder.java new file mode 100644 index 0000000000000..b82e9b641095c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatchSourceBuilder.java @@ -0,0 +1,224 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.client; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.throttler.ThrottlerField; +import org.elasticsearch.xpack.core.watcher.condition.AlwaysCondition; +import org.elasticsearch.xpack.core.watcher.condition.Condition; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.input.none.NoneInput; +import org.elasticsearch.xpack.core.watcher.support.Exceptions; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.trigger.Trigger; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public class WatchSourceBuilder implements ToXContentObject { + + private Trigger trigger; + private Input input = NoneInput.INSTANCE; + private Condition condition = AlwaysCondition.INSTANCE; + private Transform transform = null; + private Map actions = new HashMap<>(); + private TimeValue defaultThrottlePeriod = null; + private Map metadata; + + public WatchSourceBuilder trigger(Trigger.Builder trigger) { + return trigger(trigger.build()); + } + + public WatchSourceBuilder trigger(Trigger trigger) { + this.trigger = trigger; + return this; + } + + public WatchSourceBuilder input(Input.Builder input) { + return input(input.build()); + } + + public WatchSourceBuilder input(Input input) { + this.input = input; + return this; + } + + public WatchSourceBuilder condition(Condition condition) { + this.condition = condition; + return this; + } + + public WatchSourceBuilder transform(Transform transform) { + this.transform = transform; + return this; + } + + public WatchSourceBuilder transform(Transform.Builder transform) { + return transform(transform.build()); + } + + public WatchSourceBuilder defaultThrottlePeriod(TimeValue throttlePeriod) { + this.defaultThrottlePeriod = throttlePeriod; + return this; + } + + public WatchSourceBuilder addAction(String id, Action.Builder action) { + return addAction(id, null, null, action.build()); + } + + public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Action.Builder action) { + return addAction(id, throttlePeriod, null, action.build()); + } + + public WatchSourceBuilder addAction(String id, Transform.Builder transform, Action.Builder action) { + return addAction(id, null, transform.build(), action.build()); + } + + public WatchSourceBuilder addAction(String id, Condition condition, Action.Builder action) { + return addAction(id, null, condition, null, action.build()); + } + + public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Transform.Builder transform, Action.Builder action) { + return addAction(id, throttlePeriod, transform.build(), action.build()); + } + + public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Transform transform, Action action) { + actions.put(id, new TransformedAction(id, action, throttlePeriod, null, transform)); + return this; + } + + public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Condition condition, Transform.Builder transform, + Action.Builder action) { + return addAction(id, throttlePeriod, condition, transform.build(), action.build()); + } + + public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Condition condition, Transform transform, Action action) { + actions.put(id, new TransformedAction(id, action, throttlePeriod, condition, transform)); + return this; + } + + public WatchSourceBuilder metadata(Map metadata) { + this.metadata = metadata; + return this; + } + + public XContentSource build() throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + return new XContentSource(toXContent(builder, ToXContent.EMPTY_PARAMS)); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (trigger == null) { + throw Exceptions.illegalState("failed to build watch source. no trigger defined"); + } + builder.startObject(WatchField.TRIGGER.getPreferredName()) + .field(trigger.type(), trigger, params) + .endObject(); + + builder.startObject(WatchField.INPUT.getPreferredName()) + .field(input.type(), input, params) + .endObject(); + + builder.startObject(WatchField.CONDITION.getPreferredName()) + .field(condition.type(), condition, params) + .endObject(); + + if (transform != null) { + builder.startObject(WatchField.TRANSFORM.getPreferredName()) + .field(transform.type(), transform, params) + .endObject(); + } + + if (defaultThrottlePeriod != null) { + builder.humanReadableField(WatchField.THROTTLE_PERIOD.getPreferredName(), + WatchField.THROTTLE_PERIOD_HUMAN.getPreferredName(), defaultThrottlePeriod); + } + + builder.startObject(WatchField.ACTIONS.getPreferredName()); + for (Map.Entry entry : actions.entrySet()) { + builder.field(entry.getKey(), entry.getValue(), params); + } + builder.endObject(); + + if (metadata != null) { + builder.field(WatchField.METADATA.getPreferredName(), metadata); + } + + return builder.endObject(); + } + + /** + * Returns a {@link org.elasticsearch.common.bytes.BytesReference} + * containing the {@link ToXContent} output in binary format. Builds the + * request as the provided contentType + */ + public final BytesReference buildAsBytes(XContentType contentType) { + try { + WatcherParams params = WatcherParams.builder().hideSecrets(false).build(); + return XContentHelper.toXContent(this, contentType, params,false); + } catch (Exception e) { + throw new ElasticsearchException("Failed to build ToXContent", e); + } + } + + static class TransformedAction implements ToXContentObject { + + private final String id; + private final Action action; + @Nullable private final TimeValue throttlePeriod; + @Nullable private final Condition condition; + @Nullable private final Transform transform; + + TransformedAction(String id, Action action, @Nullable TimeValue throttlePeriod, + @Nullable Condition condition, @Nullable Transform transform) { + this.id = id; + this.throttlePeriod = throttlePeriod; + this.condition = condition; + this.transform = transform; + this.action = action; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (throttlePeriod != null) { + builder.humanReadableField(ThrottlerField.THROTTLE_PERIOD.getPreferredName(), + ThrottlerField.THROTTLE_PERIOD_HUMAN.getPreferredName(), throttlePeriod); + } + if (condition != null) { + builder.startObject(WatchField.CONDITION.getPreferredName()) + .field(condition.type(), condition, params) + .endObject(); + } + if (transform != null) { + builder.startObject(Transform.TRANSFORM.getPreferredName()) + .field(transform.type(), transform, params) + .endObject(); + } + builder.field(action.type(), action, params); + return builder.endObject(); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatcherClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatcherClient.java new file mode 100644 index 0000000000000..10c4f0fffc37c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatcherClient.java @@ -0,0 +1,326 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.client; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchRequestBuilder; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchRequestBuilder; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchRequestBuilder; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequestBuilder; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchRequestBuilder; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequestBuilder; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceRequestBuilder; +import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsRequestBuilder; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; + +import java.util.Map; + +public class WatcherClient { + + private final Client client; + + public WatcherClient(Client client) { + this.client = client; + } + + /** + * Creates a request builder that gets an watch by id + * + * @param id the id of the watch + * @return The request builder + */ + public GetWatchRequestBuilder prepareGetWatch(String id) { + return new GetWatchRequestBuilder(client, id); + } + + /** + * Creates a request builder that gets an watch + * + * @return the request builder + */ + public GetWatchRequestBuilder prepareGetWatch() { + return new GetWatchRequestBuilder(client); + } + + /** + * Gets an watch from the watch index + * + * @param request The get watch request + * @param listener The listener for the get watch response containing the GetResponse for this watch + */ + public void getWatch(GetWatchRequest request, ActionListener listener) { + client.execute(GetWatchAction.INSTANCE, request, listener); + } + + /** + * Gets an watch from the watch index + * + * @param request The get watch request with the watch id + * @return The response containing the GetResponse for this watch + */ + public ActionFuture getWatch(GetWatchRequest request) { + return client.execute(GetWatchAction.INSTANCE, request); + } + + /** + * Creates a request builder to delete an watch by id + * + * @param id the id of the watch + * @return The request builder + */ + public DeleteWatchRequestBuilder prepareDeleteWatch(String id) { + return new DeleteWatchRequestBuilder(client, id); + } + + /** + * Creates a request builder that deletes an watch + * + * @return The request builder + */ + public DeleteWatchRequestBuilder prepareDeleteWatch() { + return new DeleteWatchRequestBuilder(client); + } + + /** + * Deletes an watch + * + * @param request The delete request with the watch id to be deleted + * @param listener The listener for the delete watch response containing the DeleteResponse for this action + */ + public void deleteWatch(DeleteWatchRequest request, ActionListener listener) { + client.execute(DeleteWatchAction.INSTANCE, request, listener); + } + + /** + * Deletes an watch + * + * @param request The delete request with the watch id to be deleted + * @return The response containing the DeleteResponse for this action + */ + public ActionFuture deleteWatch(DeleteWatchRequest request) { + return client.execute(DeleteWatchAction.INSTANCE, request); + } + + /** + * Creates a request builder to build a request to put an watch + * + * @param id The id of the watch to put + * @return The builder to create the watch + */ + public PutWatchRequestBuilder preparePutWatch(String id) { + return new PutWatchRequestBuilder(client, id); + } + + /** + * Creates a request builder to build a request to put a watch + * + * @return The builder + */ + public PutWatchRequestBuilder preparePutWatch() { + return new PutWatchRequestBuilder(client); + } + + /** + * Adds the given watch to the watcher + * + * @param request The request containing the watch to be added + * @param listener The listener for the response containing the IndexResponse for this watch + */ + public void putWatch(PutWatchRequest request, ActionListener listener) { + client.execute(PutWatchAction.INSTANCE, request, listener); + } + + /** + * Adds a new watch + * + * @param request The request containing the watch to be added + * @return The response containing the IndexResponse for this watch + */ + public ActionFuture putWatch(PutWatchRequest request) { + return client.execute(PutWatchAction.INSTANCE, request); + } + + /** + * Gets the watcher stats + * + * @param request The request for the watcher stats + * @return The response containing the StatsResponse for this action + */ + public ActionFuture watcherStats(WatcherStatsRequest request) { + return client.execute(WatcherStatsAction.INSTANCE, request); + } + + /** + * Creates a request builder to build a request to get the watcher stats + * + * @return The builder get the watcher stats + */ + public WatcherStatsRequestBuilder prepareWatcherStats() { + return new WatcherStatsRequestBuilder(client); + } + + /** + * Gets the watcher stats + * + * @param request The request for the watcher stats + * @param listener The listener for the response containing the WatcherStatsResponse + */ + public void watcherStats(WatcherStatsRequest request, ActionListener listener) { + client.execute(WatcherStatsAction.INSTANCE, request, listener); + } + + /** + * Creates a request builder to ack a watch by id + * + * @param id the id of the watch + * @return The request builder + */ + public AckWatchRequestBuilder prepareAckWatch(String id) { + return new AckWatchRequestBuilder(client, id); + } + + /** + * Ack a watch + * + * @param request The ack request with the watch id to be acked + * @param listener The listener for the ack watch response + */ + public void ackWatch(AckWatchRequest request, ActionListener listener) { + client.execute(AckWatchAction.INSTANCE, request, listener); + } + + /** + * Acks a watch + * + * @param request The ack request with the watch id to be acked + * @return The AckWatchResponse + */ + public ActionFuture ackWatch(AckWatchRequest request) { + return client.execute(AckWatchAction.INSTANCE, request); + } + + /** + * Creates a request builder to activate a watch by id + * + * @param id the id of the watch + * @param activate indicates whether to activate or deactivate the watch + * @return The request builder + */ + public ActivateWatchRequestBuilder prepareActivateWatch(String id, boolean activate) { + return new ActivateWatchRequestBuilder(client, id, activate); + } + + /** + * Activate a watch + * + * @param request The activate request with the watch id + * @param listener The listener for the activate watch response + */ + public void activateWatch(ActivateWatchRequest request, ActionListener listener) { + client.execute(ActivateWatchAction.INSTANCE, request, listener); + } + + /** + * Activates a watch + * + * @param request The de/activate request with the watch id. + */ + public ActionFuture activateWatch(ActivateWatchRequest request) { + return client.execute(ActivateWatchAction.INSTANCE, request); + } + + + /** + * Prepare a watch service request. + */ + public WatcherServiceRequestBuilder prepareWatchService() { + return new WatcherServiceRequestBuilder(client); + } + + /** + * Perform an watcher service request to either start, stop or restart the service. + */ + public void watcherService(WatcherServiceRequest request, ActionListener listener) { + client.execute(WatcherServiceAction.INSTANCE, request, listener); + } + + /** + * Perform an watcher service request to either start, stop or restart the service. + */ + public ActionFuture watcherService(WatcherServiceRequest request) { + return client.execute(WatcherServiceAction.INSTANCE, request); + } + + + + /** + * Creates a request builder to execute a watch by id + * + * @param id the id of the watch + * @return The request builder + */ + public ExecuteWatchRequestBuilder prepareExecuteWatch(String id) { + return new ExecuteWatchRequestBuilder(client, id); + } + + /** + * Creates a request builder that executes a watch + * + * @return The request builder + */ + public ExecuteWatchRequestBuilder prepareExecuteWatch() { + return new ExecuteWatchRequestBuilder(client); + } + + /** + * executes a watch + * + * @param request The run request with the watch id to be executed + * @param listener The listener for the run watch response + */ + public void executeWatch(ExecuteWatchRequest request, ActionListener listener) { + client.execute(ExecuteWatchAction.INSTANCE, request, listener); + } + + /** + * Executes an watch + * + * @param request The execute request with the watch id to be executed + * @return The AckWatchResponse + */ + public ActionFuture executeWatch(ExecuteWatchRequest request) { + return client.execute(ExecuteWatchAction.INSTANCE, request); + } + + public WatcherClient filterWithHeader(Map headers) { + return new WatcherClient(client.filterWithHeader(headers)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/secret/Secret.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/secret/Secret.java new file mode 100644 index 0000000000000..46ecd299c8157 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/secret/Secret.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.common.secret; + +import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; + +import java.io.IOException; +import java.util.Arrays; + +public class Secret { + + protected final char[] text; + + public Secret(char[] text) { + this.text = text; + } + + public char[] text(CryptoService service) { + if (service == null) { + return text; + } + return service.decrypt(text); + } + + public String value() throws IOException { + return new String(text); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Secret secret = (Secret) o; + + return Arrays.equals(text, secret.text); + } + + @Override + public int hashCode() { + return Arrays.hashCode(text); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java new file mode 100644 index 0000000000000..bdf923a79d0e0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java @@ -0,0 +1,138 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.common.stats; + +import com.carrotsearch.hppc.ObjectLongHashMap; +import com.carrotsearch.hppc.cursors.ObjectLongCursor; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Helper class to create simple usage stat counters based on longs + * Internally this is a map mapping from String to a long, which is the counter + * Calling toNestedMap() will create a nested map, where each dot of the key name will nest deeper + * The main reason for this class is that the stats producer should not be worried about how the map is actually nested + */ +public class Counters implements Streamable { + + private ObjectLongHashMap counters = new ObjectLongHashMap<>(); + + public Counters(String ... names) { + for (String name : names) { + set(name); + } + } + + /** + * Sets a counter. This ensures that the counter is there, even though it is never incremented. + * @param name Name of the counter + */ + public void set(String name) { + counters.put(name, 0); + } + + /** + * Increment the counter by one + * @param name Name of the counter + */ + public void inc(String name) { + inc(name, 1); + } + + /** + * Increment the counter by configured number + * @param name The name of the counter + * @param count Incremental value + */ + public void inc(String name, long count) { + counters.addTo(name, count); + } + + public long get(String name) { + return counters.get(name); + } + + public long size() { + return counters.size(); + } + + public boolean hasCounters() { + return size() > 0; + } + + /** + * Convert the counters to a nested map, using the "." as a splitter to create deeper maps + * @return A nested map with all the current configured counters + */ + @SuppressWarnings("unchecked") + public Map toNestedMap() { + Map map = new HashMap<>(); + for (ObjectLongCursor counter : counters) { + if (counter.key.contains(".")) { + String[] parts = counter.key.split("\\."); + Map curr = map; + for (int i = 0; i < parts.length; i++) { + String part = parts[i]; + boolean isLast = i == parts.length - 1; + if (isLast == false) { + if (curr.containsKey(part) == false) { + curr.put(part, new HashMap()); + curr = (Map) curr.get(part); + } else { + curr = (Map) curr.get(part); + } + } else { + curr.put(part, counter.value); + } + } + } else { + map.put(counter.key, counter.value); + } + } + + return map; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + int counters = in.readVInt(); + for (int i = 0; i < counters; i++) { + inc(in.readString(), in.readVLong()); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(counters.size()); + for (ObjectLongCursor cursor : counters) { + out.writeString(cursor.key); + out.writeVLong(cursor.value); + } + } + + public static Counters read(StreamInput in) throws IOException { + Counters counters = new Counters(); + counters.readFrom(in); + return counters; + } + + public static Counters merge(List counters) { + Counters result = new Counters(); + for (Counters c : counters) { + for (ObjectLongCursor cursor : c.counters) { + result.inc(cursor.key, cursor.value); + } + } + + return result; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/xcontent/XContentUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/xcontent/XContentUtils.java new file mode 100644 index 0000000000000..da8ac3ef9d8f1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/xcontent/XContentUtils.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.common.xcontent; + +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class XContentUtils { + + private XContentUtils() { + } + + // TODO open this up in core + public static Object readValue(XContentParser parser, XContentParser.Token token) throws IOException { + if (token == XContentParser.Token.VALUE_NULL) { + return null; + } else if (token == XContentParser.Token.VALUE_STRING) { + return parser.text(); + } else if (token == XContentParser.Token.VALUE_NUMBER) { + XContentParser.NumberType numberType = parser.numberType(); + if (numberType == XContentParser.NumberType.INT) { + return parser.intValue(); + } else if (numberType == XContentParser.NumberType.LONG) { + return parser.longValue(); + } else if (numberType == XContentParser.NumberType.FLOAT) { + return parser.floatValue(); + } else if (numberType == XContentParser.NumberType.DOUBLE) { + return parser.doubleValue(); + } + } else if (token == XContentParser.Token.VALUE_BOOLEAN) { + return parser.booleanValue(); + } else if (token == XContentParser.Token.START_OBJECT) { + return parser.map(); + } else if (token == XContentParser.Token.START_ARRAY) { + return parser.list(); + } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { + return parser.binaryValue(); + } + return null; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/AlwaysCondition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/AlwaysCondition.java new file mode 100644 index 0000000000000..20aafa7c2769f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/AlwaysCondition.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.condition; + +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +public class AlwaysCondition implements Condition { + public static final String TYPE = "always"; + public static final Condition INSTANCE = new AlwaysCondition(); + + protected AlwaysCondition() { } + + @Override + public boolean equals(Object obj) { + return obj instanceof AlwaysCondition; + } + + @Override + public int hashCode() { + // All instances has to produce the same hashCode because they are all equal + return 0; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().endObject(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/Condition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/Condition.java new file mode 100644 index 0000000000000..c21f906685f2e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/Condition.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.condition; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; + +public interface Condition extends ToXContentObject { + + /** + * @return the type of this condition + */ + String type(); + + class Result implements ToXContentObject { // don't make this final - we can't mock final classes :( + + public Map getResolvedValues() { + return resolveValues; + } + + public enum Status { + SUCCESS, FAILURE + } + + private final String type; + private final Status status; + private final String reason; + private final boolean met; + @Nullable + private final Map resolveValues; + + public Result(Map resolveValues, String type, boolean met) { + // TODO: FAILURE status is never used, but a some code assumes that it is used + this.status = Status.SUCCESS; + this.type = type; + this.met = met; + this.reason = null; + this.resolveValues = resolveValues; + } + + public String type() { + return type; + } + + public Status status() { + return status; + } + + public boolean met() { + return met; + } + + public String reason() { + assert status == Status.FAILURE; + return reason; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("type", type); + builder.field("status", status.name().toLowerCase(Locale.ROOT)); + switch (status) { + case SUCCESS: + assert reason == null; + builder.field("met", met); + break; + case FAILURE: + assert reason != null && !met; + builder.field("reason", reason); + break; + default: + assert false; + } + if (resolveValues != null) { + builder.startObject(type).field("resolved_values", resolveValues).endObject(); + } + return builder.endObject(); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/ConditionFactory.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/ConditionFactory.java new file mode 100644 index 0000000000000..37b923ff73bf7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/ConditionFactory.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.condition; + +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.time.Clock; + +/** + * Parses xcontent to a concrete condition of the same type. + */ +public interface ConditionFactory { + + /** + * Parses the given xcontent and creates a concrete condition + * @param watchId The id of the watch + * @param parser The parsing that contains the condition content + */ + ExecutableCondition parse(Clock clock, String watchId, XContentParser parser) throws IOException; + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/ConditionRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/ConditionRegistry.java new file mode 100644 index 0000000000000..769f95f7c4948 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/ConditionRegistry.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.condition; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.time.Clock; +import java.util.Map; + +public class ConditionRegistry { + + private final Map factories; + private final Clock clock; + + public ConditionRegistry(Map factories, Clock clock) { + this.clock = clock; + this.factories = factories; + } + + /** + * Parses the xcontent and returns the appropriate executable condition. Expecting the following format: + *

+     *     {
+     *         "condition_type" : {
+     *             ...              //condition body
+     *         }
+     *     }
+     * 
+ * + * @param watchId The id of the watch + * @param parser The parsing that contains the condition content + */ + public ExecutableCondition parseExecutable(String watchId, XContentParser parser) throws IOException { + ExecutableCondition condition = null; + ConditionFactory factory; + + String type = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + type = parser.currentName(); + } else if (type == null) { + throw new ElasticsearchParseException("could not parse condition for watch [{}]. invalid definition. expected a field " + + "indicating the condition type, but found", watchId, token); + } else { + factory = factories.get(type); + if (factory == null) { + throw new ElasticsearchParseException("could not parse condition for watch [{}]. unknown condition type [{}]", + watchId, type); + } + condition = factory.parse(clock, watchId, parser); + } + } + if (condition == null) { + throw new ElasticsearchParseException("could not parse condition for watch [{}]. missing required condition type field", + watchId); + } + return condition; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/ExecutableCondition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/ExecutableCondition.java new file mode 100644 index 0000000000000..deae53bd0c7d3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/condition/ExecutableCondition.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.condition; + +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; + +public interface ExecutableCondition extends Condition { + + /** + * Executes this condition + */ + Result execute(WatchExecutionContext ctx); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java new file mode 100644 index 0000000000000..b1f3a32769ec9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java @@ -0,0 +1,212 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.crypto; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.watcher.WatcherField; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.authc.support.CharArrays; + +import javax.crypto.BadPaddingException; +import javax.crypto.Cipher; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.SecretKey; +import javax.crypto.spec.IvParameterSpec; +import javax.crypto.spec.SecretKeySpec; + +import java.io.IOException; +import java.io.InputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.Arrays; +import java.util.Base64; +import java.util.List; + +/** + * Service that provides cryptographic methods based on a shared system key + */ +public class CryptoService extends AbstractComponent { + + public static final String KEY_ALGO = "HmacSHA512"; + public static final int KEY_SIZE = 1024; + + public static final String ENCRYPTED_TEXT_PREFIX = "::es_encrypted::"; + + // the encryption used in this class was picked when Java 7 was still the min. supported + // version. The use of counter mode was chosen to simplify the need to deal with padding + // and for its speed. 128 bit key length is chosen due to the JCE policy that ships by + // default with the Oracle JDK. + // TODO: with better support in Java 8, we should consider moving to use AES GCM as it + // also provides authentication of the encrypted data, which is something that we are + // missing here. + private static final String DEFAULT_ENCRYPTION_ALGORITHM = "AES/CTR/NoPadding"; + private static final String DEFAULT_KEY_ALGORITH = "AES"; + private static final int DEFAULT_KEY_LENGTH = 128; + + private static final Setting ENCRYPTION_ALGO_SETTING = + new Setting<>(SecurityField.setting("encryption.algorithm"), s -> DEFAULT_ENCRYPTION_ALGORITHM, s -> s, Property.NodeScope); + private static final Setting ENCRYPTION_KEY_LENGTH_SETTING = + Setting.intSetting(SecurityField.setting("encryption_key.length"), DEFAULT_KEY_LENGTH, Property.NodeScope); + private static final Setting ENCRYPTION_KEY_ALGO_SETTING = + new Setting<>(SecurityField.setting("encryption_key.algorithm"), DEFAULT_KEY_ALGORITH, s -> s, Property.NodeScope); + + private final SecureRandom secureRandom = new SecureRandom(); + private final String encryptionAlgorithm; + private final int ivLength; + /* + * The encryption key is derived from the system key. + */ + private final SecretKey encryptionKey; + + public CryptoService(Settings settings) throws IOException { + super(settings); + this.encryptionAlgorithm = ENCRYPTION_ALGO_SETTING.get(settings); + final int keyLength = ENCRYPTION_KEY_LENGTH_SETTING.get(settings); + this.ivLength = keyLength / 8; + String keyAlgorithm = ENCRYPTION_KEY_ALGO_SETTING.get(settings); + + if (keyLength % 8 != 0) { + throw new IllegalArgumentException("invalid key length [" + keyLength + "]. value must be a multiple of 8"); + } + + SecretKey systemKey = readSystemKey(WatcherField.ENCRYPTION_KEY_SETTING.get(settings)); + try { + encryptionKey = encryptionKey(systemKey, keyLength, keyAlgorithm); + } catch (NoSuchAlgorithmException nsae) { + throw new ElasticsearchException("failed to start crypto service. could not load encryption key", nsae); + } + assert encryptionKey != null : "the encryption key should never be null"; + } + + private static SecretKey readSystemKey(InputStream in) throws IOException { + final int keySizeBytes = KEY_SIZE / 8; + final byte[] keyBytes = new byte[keySizeBytes]; + final int read = Streams.readFully(in, keyBytes); + if (read != keySizeBytes) { + throw new IllegalArgumentException( + "key size did not match expected value; was the key generated with elasticsearch-syskeygen?"); + } + return new SecretKeySpec(keyBytes, KEY_ALGO); + } + + /** + * Encrypts the provided char array and returns the encrypted values in a char array + * @param chars the characters to encrypt + * @return character array representing the encrypted data + */ + public char[] encrypt(char[] chars) { + byte[] charBytes = CharArrays.toUtf8Bytes(chars); + String base64 = Base64.getEncoder().encodeToString(encryptInternal(charBytes, encryptionKey)); + return ENCRYPTED_TEXT_PREFIX.concat(base64).toCharArray(); + } + + /** + * Decrypts the provided char array and returns the plain-text chars + * @param chars the data to decrypt + * @return plaintext chars + */ + public char[] decrypt(char[] chars) { + if (!isEncrypted(chars)) { + // Not encrypted + return chars; + } + + String encrypted = new String(chars, ENCRYPTED_TEXT_PREFIX.length(), chars.length - ENCRYPTED_TEXT_PREFIX.length()); + byte[] bytes; + try { + bytes = Base64.getDecoder().decode(encrypted); + } catch (IllegalArgumentException e) { + throw new ElasticsearchException("unable to decode encrypted data", e); + } + + byte[] decrypted = decryptInternal(bytes, encryptionKey); + return CharArrays.utf8BytesToChars(decrypted); + } + + /** + * Checks whether the given chars are encrypted + * @param chars the chars to check if they are encrypted + * @return true is data is encrypted + */ + protected boolean isEncrypted(char[] chars) { + return CharArrays.charsBeginsWith(ENCRYPTED_TEXT_PREFIX, chars); + } + + private byte[] encryptInternal(byte[] bytes, SecretKey key) { + byte[] iv = new byte[ivLength]; + secureRandom.nextBytes(iv); + Cipher cipher = cipher(Cipher.ENCRYPT_MODE, encryptionAlgorithm, key, iv); + try { + byte[] encrypted = cipher.doFinal(bytes); + byte[] output = new byte[iv.length + encrypted.length]; + System.arraycopy(iv, 0, output, 0, iv.length); + System.arraycopy(encrypted, 0, output, iv.length, encrypted.length); + return output; + } catch (BadPaddingException | IllegalBlockSizeException e) { + throw new ElasticsearchException("error encrypting data", e); + } + } + + private byte[] decryptInternal(byte[] bytes, SecretKey key) { + if (bytes.length < ivLength) { + logger.error("received data for decryption with size [{}] that is less than IV length [{}]", bytes.length, ivLength); + throw new IllegalArgumentException("invalid data to decrypt"); + } + + byte[] iv = new byte[ivLength]; + System.arraycopy(bytes, 0, iv, 0, ivLength); + byte[] data = new byte[bytes.length - ivLength]; + System.arraycopy(bytes, ivLength, data, 0, bytes.length - ivLength); + + Cipher cipher = cipher(Cipher.DECRYPT_MODE, encryptionAlgorithm, key, iv); + try { + return cipher.doFinal(data); + } catch (BadPaddingException | IllegalBlockSizeException e) { + throw new IllegalStateException("error decrypting data", e); + } + } + + + private static Cipher cipher(int mode, String encryptionAlgorithm, SecretKey key, byte[] initializationVector) { + try { + Cipher cipher = Cipher.getInstance(encryptionAlgorithm); + cipher.init(mode, key, new IvParameterSpec(initializationVector)); + return cipher; + } catch (Exception e) { + throw new ElasticsearchException("error creating cipher", e); + } + } + + private static SecretKey encryptionKey(SecretKey systemKey, int keyLength, String algorithm) throws NoSuchAlgorithmException { + byte[] bytes = systemKey.getEncoded(); + if ((bytes.length * 8) < keyLength) { + throw new IllegalArgumentException("at least " + keyLength + " bits should be provided as key data"); + } + + MessageDigest messageDigest = MessageDigest.getInstance("SHA-256"); + byte[] digest = messageDigest.digest(bytes); + assert digest.length == (256 / 8); + + if ((digest.length * 8) < keyLength) { + throw new IllegalArgumentException("requested key length is too large"); + } + byte[] truncatedDigest = Arrays.copyOfRange(digest, 0, (keyLength / 8)); + + return new SecretKeySpec(truncatedDigest, algorithm); + } + + public static void addSettings(List> settings) { + settings.add(ENCRYPTION_KEY_LENGTH_SETTING); + settings.add(ENCRYPTION_KEY_ALGO_SETTING); + settings.add(ENCRYPTION_ALGO_SETTING); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ActionExecutionMode.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ActionExecutionMode.java new file mode 100644 index 0000000000000..9f8f623f487ef --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ActionExecutionMode.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.execution; + +import java.util.Locale; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; + +public enum ActionExecutionMode { + + /** + * The action will be simulated (not actually executed) and it will be throttled if needed. + */ + SIMULATE((byte) 1, false, true), + + /** + * The action will be simulated (not actually executed) and it will not be throttled. + */ + FORCE_SIMULATE((byte) 2, true, true), + + /** + * The action will be executed and it will be throttled if needed. + */ + EXECUTE((byte) 3, false, false), + + /** + * The action will be executed and it will not be throttled. + */ + FORCE_EXECUTE((byte) 4, true, false), + + /** + * The action will be skipped (it won't be executed nor simulated) - effectively it will be forcefully throttled + */ + SKIP((byte) 5, false, false); + + private final byte id; + private final boolean force; + private final boolean simulate; + + ActionExecutionMode(byte id, boolean froce, boolean simulate) { + this.id = id; + this.force = froce; + this.simulate = simulate; + } + + public final byte id() { + return id; + } + + public final boolean simulate() { + return simulate; + } + + public final boolean force() { + return force; + } + + public static ActionExecutionMode resolve(byte id) { + switch (id) { + case 1: return SIMULATE; + case 2: return FORCE_SIMULATE; + case 3: return EXECUTE; + case 4: return FORCE_EXECUTE; + case 5: return SKIP; + } + throw illegalArgument("unknown action execution mode id [{}]", id); + } + + public static ActionExecutionMode resolve(String key) { + if (key == null) { + return null; + } + switch (key.toLowerCase(Locale.ROOT)) { + case "simulate": return SIMULATE; + case "force_simulate": return FORCE_SIMULATE; + case "execute": return EXECUTE; + case "force_execute": return FORCE_EXECUTE; + case "skip": return SKIP; + } + throw illegalArgument("unknown action execution mode [{}]", key); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ExecutionPhase.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ExecutionPhase.java new file mode 100644 index 0000000000000..f9032ebeaa127 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ExecutionPhase.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.execution; + +import java.util.Locale; + +public enum ExecutionPhase { + + // awaiting execution of the watch + AWAITS_EXECUTION(false), + // initial phase, watch execution has started, but the input is not yet processed + STARTED(false), + // input is being executed + INPUT(false), + // condition phase is being executed + CONDITION(false), + // transform phase (optional, depends if a global transform was configured in the watch) + WATCH_TRANSFORM(false), + // actions phase, all actions, including specific action transforms + ACTIONS(false), + // missing watch, failed execution of input/condition/transform, + ABORTED(true), + // successful run + FINISHED(true); + + private final boolean sealed; + + ExecutionPhase(boolean sealed) { + this.sealed = sealed; + } + + public boolean sealed() { + return sealed; + } + + public String id() { + return name().toLowerCase(Locale.ROOT); + } + + public static ExecutionPhase resolve(String id) { + return valueOf(id.toUpperCase(Locale.ROOT)); + } + + @Override + public String toString() { + return id(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ExecutionState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ExecutionState.java new file mode 100644 index 0000000000000..8323b40adab62 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/ExecutionState.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.execution; + +import java.util.Locale; + +public enum ExecutionState { + + // the condition of the watch was not met + EXECUTION_NOT_NEEDED, + + // Execution has been throttled due to time-based throttling - this might only affect a single action though + THROTTLED, + + // Execution has been throttled due to ack-based throttling/muting of an action - this might only affect a single action though + ACKNOWLEDGED, + + // regular execution + EXECUTED, + + // an error in the condition or the execution of the input + FAILED, + + // a rejection due to a filled up threadpool + THREADPOOL_REJECTION, + + // the execution was scheduled, but in between the watch was deleted + NOT_EXECUTED_WATCH_MISSING, + + // even though the execution was scheduled, it was not executed, because the watch was already queued in the thread pool + NOT_EXECUTED_ALREADY_QUEUED, + + // this can happen when a watch was executed, but not completely finished (the triggered watch entry was not deleted), and then + // watcher is restarted (manually or due to host switch) - the triggered watch will be executed but the history entry already + // exists + EXECUTED_MULTIPLE_TIMES; + + public String id() { + return name().toLowerCase(Locale.ROOT); + } + + public static ExecutionState resolve(String id) { + return valueOf(id.toUpperCase(Locale.ROOT)); + } + + @Override + public String toString() { + return id(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/QueuedWatch.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/QueuedWatch.java new file mode 100644 index 0000000000000..b6b67057769aa --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/QueuedWatch.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.execution; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; + +public class QueuedWatch implements Streamable, ToXContentObject { + + private String watchId; + private String watchRecordId; + private DateTime triggeredTime; + private DateTime executionTime; + + public QueuedWatch() { + } + + public QueuedWatch(WatchExecutionContext ctx) { + this.watchId = ctx.id().watchId(); + this.watchRecordId = ctx.id().value(); + this.triggeredTime = ctx.triggerEvent().triggeredTime(); + this.executionTime = ctx.executionTime(); + } + + public String watchId() { + return watchId; + } + + public DateTime triggeredTime() { + return triggeredTime; + } + + public void triggeredTime(DateTime triggeredTime) { + this.triggeredTime = triggeredTime; + } + + public DateTime executionTime() { + return executionTime; + } + + public void executionTime(DateTime executionTime) { + this.executionTime = executionTime; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + watchId = in.readString(); + watchRecordId = in.readString(); + triggeredTime = new DateTime(in.readVLong(), DateTimeZone.UTC); + executionTime = new DateTime(in.readVLong(), DateTimeZone.UTC); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(watchId); + out.writeString(watchRecordId); + out.writeVLong(triggeredTime.getMillis()); + out.writeVLong(executionTime.getMillis()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("watch_id", watchId); + builder.field("watch_record_id", watchRecordId); + builder.timeField("triggered_time", triggeredTime); + builder.timeField("execution_time", executionTime); + builder.endObject(); + return builder; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/TriggeredWatchStoreField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/TriggeredWatchStoreField.java new file mode 100644 index 0000000000000..ac4a0c16c9c4e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/TriggeredWatchStoreField.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.execution; + +public final class TriggeredWatchStoreField { + + public static final String INDEX_NAME = ".triggered_watches"; + public static final String DOC_TYPE = "doc"; +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionContext.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionContext.java new file mode 100644 index 0000000000000..4cdd4bb0e3575 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionContext.java @@ -0,0 +1,246 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.execution; + +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapperResult; +import org.elasticsearch.xpack.core.watcher.condition.Condition; +import org.elasticsearch.xpack.core.watcher.history.WatchRecord; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.joda.time.DateTime; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; + +public abstract class WatchExecutionContext { + + private final Wid id; + private final DateTime executionTime; + private final TriggerEvent triggerEvent; + private final TimeValue defaultThrottlePeriod; + + private ExecutionPhase phase = ExecutionPhase.AWAITS_EXECUTION; + private long relativeStartTime; + private Watch watch; + + private Payload payload; + private Map vars = new HashMap<>(); + + private Input.Result inputResult; + private Condition.Result conditionResult; + private Transform.Result transformResult; + private ConcurrentMap actionsResults = ConcurrentCollections.newConcurrentMap(); + private String nodeId; + + public WatchExecutionContext(String watchId, DateTime executionTime, TriggerEvent triggerEvent, TimeValue defaultThrottlePeriod) { + this.id = new Wid(watchId, executionTime); + this.executionTime = executionTime; + this.triggerEvent = triggerEvent; + this.defaultThrottlePeriod = defaultThrottlePeriod; + } + + /** + * @return true if the watch associated with this context is known to watcher (i.e. it's stored + * in watcher. This plays a key role in how we handle execution. For example, if + * the watch is known, but then the watch is not there (perhaps deleted in between) + * we abort execution. It also plays a part (along with {@link #recordExecution()} + * in the decision of whether the watch record should be stored and if the watch + * status should be updated. + */ + public abstract boolean knownWatch(); + + /** + * @return true if this action should be simulated + */ + public abstract boolean simulateAction(String actionId); + + public abstract boolean skipThrottling(String actionId); + + /** + * @return true if execution is allowed (this depends on the type of the watch context) + */ + public abstract boolean shouldBeExecuted(); + + /** + * @return true if this execution should be recorded in the .watcher-history index + */ + public abstract boolean recordExecution(); + + public Watch watch() { + return watch; + } + + public void ensureWatchExists(CheckedSupplier supplier) throws Exception { + if (watch == null) { + watch = supplier.get(); + } + } + + public Wid id() { + return id; + } + + public DateTime executionTime() { + return executionTime; + } + + /** + * @return The default throttle period in the system. + */ + public TimeValue defaultThrottlePeriod() { + return defaultThrottlePeriod; + } + + public boolean overrideRecordOnConflict() { + return false; + } + + public TriggerEvent triggerEvent() { + return triggerEvent; + } + + public Payload payload() { + return payload; + } + + public Map vars() { + return vars; + } + + public ExecutionPhase executionPhase() { + return phase; + } + + /** + * @param nodeId The node id this watch execution context runs on + */ + public void setNodeId(String nodeId) { + this.nodeId = nodeId; + } + + /** + * @return The node this watch execution context runs on, which will be stored in the watch history + */ + public String getNodeId() { + return nodeId; + } + + public void start() { + assert phase == ExecutionPhase.AWAITS_EXECUTION; + relativeStartTime = System.nanoTime(); + phase = ExecutionPhase.STARTED; + } + + public void beforeInput() { + assert phase == ExecutionPhase.STARTED; + phase = ExecutionPhase.INPUT; + } + + public void onInputResult(Input.Result inputResult) { + assert !phase.sealed(); + this.inputResult = inputResult; + if (inputResult.status() == Input.Result.Status.SUCCESS) { + this.payload = inputResult.payload(); + } + } + + public Input.Result inputResult() { + return inputResult; + } + + public void beforeCondition() { + assert phase == ExecutionPhase.INPUT; + phase = ExecutionPhase.CONDITION; + } + + public void onConditionResult(Condition.Result conditionResult) { + assert !phase.sealed(); + this.conditionResult = conditionResult; + watch().status().onCheck(conditionResult.met(), executionTime); + } + + public Condition.Result conditionResult() { + return conditionResult; + } + + public void beforeWatchTransform() { + assert phase == ExecutionPhase.CONDITION; + this.phase = ExecutionPhase.WATCH_TRANSFORM; + } + + public void onWatchTransformResult(Transform.Result result) { + assert !phase.sealed(); + this.transformResult = result; + if (result.status() == Transform.Result.Status.SUCCESS) { + this.payload = result.payload(); + } + } + + public Transform.Result transformResult() { + return transformResult; + } + + public void beforeActions() { + assert phase == ExecutionPhase.CONDITION || phase == ExecutionPhase.WATCH_TRANSFORM; + phase = ExecutionPhase.ACTIONS; + } + + public void onActionResult(ActionWrapperResult result) { + assert !phase.sealed(); + actionsResults.put(result.id(), result); + watch().status().onActionResult(result.id(), executionTime, result.action()); + } + + public Map actionsResults() { + return Collections.unmodifiableMap(actionsResults); + } + + public WatchRecord abortBeforeExecution(ExecutionState state, String message) { + assert !phase.sealed(); + phase = ExecutionPhase.ABORTED; + return new WatchRecord.MessageWatchRecord(id, triggerEvent, state, message, getNodeId()); + } + + public WatchRecord abortFailedExecution(String message) { + assert !phase.sealed(); + phase = ExecutionPhase.ABORTED; + long executionTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); + WatchExecutionResult result = new WatchExecutionResult(this, executionTime); + watch().status().setExecutionState(WatchRecord.getState(result)); + return new WatchRecord.MessageWatchRecord(this, result, message); + } + + public WatchRecord abortFailedExecution(Exception e) { + assert !phase.sealed(); + phase = ExecutionPhase.ABORTED; + long executionTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); + WatchExecutionResult result = new WatchExecutionResult(this, executionTime); + watch().status().setExecutionState(WatchRecord.getState(result)); + return new WatchRecord.ExceptionWatchRecord(this, result, e); + } + + public WatchRecord finish() { + assert !phase.sealed(); + phase = ExecutionPhase.FINISHED; + long executionTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); + WatchExecutionResult result = new WatchExecutionResult(this, executionTime); + watch().status().setExecutionState(WatchRecord.getState(result)); + return new WatchRecord.MessageWatchRecord(this, result); + } + + public WatchExecutionSnapshot createSnapshot(Thread executionThread) { + return new WatchExecutionSnapshot(this, executionThread.getStackTrace()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionResult.java new file mode 100644 index 0000000000000..cce6a1a25114c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionResult.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.execution; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapperResult; +import org.elasticsearch.xpack.core.watcher.condition.Condition; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.util.Map; + +public class WatchExecutionResult implements ToXContentObject { + + private final DateTime executionTime; + private final long executionDurationMs; + @Nullable private final Input.Result inputResult; + @Nullable private final Condition.Result conditionResult; + @Nullable private final Transform.Result transformResult; + private final Map actionsResults; + + public WatchExecutionResult(WatchExecutionContext context, long executionDurationMs) { + this(context.executionTime(), executionDurationMs, context.inputResult(), context.conditionResult(), context.transformResult(), + context.actionsResults()); + } + + private WatchExecutionResult(DateTime executionTime, long executionDurationMs, Input.Result inputResult, + Condition.Result conditionResult, @Nullable Transform.Result transformResult, + Map actionsResults) { + this.executionTime = executionTime; + this.inputResult = inputResult; + this.conditionResult = conditionResult; + this.transformResult = transformResult; + this.actionsResults = actionsResults; + this.executionDurationMs = executionDurationMs; + } + + public DateTime executionTime() { + return executionTime; + } + + public long executionDurationMs() { + return executionDurationMs; + } + + public Input.Result inputResult() { + return inputResult; + } + + public Condition.Result conditionResult() { + return conditionResult; + } + + public Transform.Result transformResult() { + return transformResult; + } + + public Map actionsResults() { + return actionsResults; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + WatcherDateTimeUtils.writeDate(Field.EXECUTION_TIME.getPreferredName(), builder, executionTime); + builder.field(Field.EXECUTION_DURATION.getPreferredName(), executionDurationMs); + + if (inputResult != null) { + builder.field(Field.INPUT.getPreferredName(), inputResult, params); + } + if (conditionResult != null) { + builder.field(Field.CONDITION.getPreferredName(), conditionResult, params); + } + if (transformResult != null) { + builder.field(Transform.TRANSFORM.getPreferredName(), transformResult, params); + } + builder.startArray(Field.ACTIONS.getPreferredName()); + for (ActionWrapperResult result : actionsResults.values()) { + result.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + public interface Field { + ParseField EXECUTION_TIME = new ParseField("execution_time"); + ParseField EXECUTION_DURATION = new ParseField("execution_duration"); + ParseField INPUT = new ParseField("input"); + ParseField CONDITION = new ParseField("condition"); + ParseField ACTIONS = new ParseField("actions"); + ParseField TYPE = new ParseField("type"); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionSnapshot.java new file mode 100644 index 0000000000000..986011d35205f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionSnapshot.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.execution; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapperResult; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Map; + +public class WatchExecutionSnapshot implements Streamable, ToXContentObject { + + private String watchId; + private String watchRecordId; + private DateTime triggeredTime; + private DateTime executionTime; + private ExecutionPhase phase; + private String[] executedActions; + private StackTraceElement[] executionStackTrace; + + public WatchExecutionSnapshot() { + } + + public WatchExecutionSnapshot(WatchExecutionContext context, StackTraceElement[] executionStackTrace) { + watchId = context.id().watchId(); + watchRecordId = context.id().value(); + triggeredTime = context.triggerEvent().triggeredTime(); + executionTime = context.executionTime(); + phase = context.executionPhase(); + if (phase == ExecutionPhase.ACTIONS) { + Map actionResults = context.actionsResults(); + executedActions = new String[actionResults.size()]; + int i = 0; + for (ActionWrapperResult actionResult : actionResults.values()) { + executedActions[i++] = actionResult.id(); + } + } + this.executionStackTrace = executionStackTrace; + } + + public String watchId() { + return watchId; + } + + public String watchRecordId() { + return watchRecordId; + } + + public DateTime triggeredTime() { + return triggeredTime; + } + + public DateTime executionTime() { + return executionTime; + } + + public ExecutionPhase executionPhase() { + return phase; + } + + public StackTraceElement[] executionStackTrace() { + return executionStackTrace; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + watchId = in.readString(); + watchRecordId = in.readString(); + triggeredTime = new DateTime(in.readVLong(), DateTimeZone.UTC); + executionTime = new DateTime(in.readVLong(), DateTimeZone.UTC); + phase = ExecutionPhase.resolve(in.readString()); + int size = in.readVInt(); + executionStackTrace = new StackTraceElement[size]; + for (int i = 0; i < size; i++) { + String declaringClass = in.readString(); + String methodName = in.readString(); + String fileName = in.readOptionalString(); + int lineNumber = in.readInt(); + executionStackTrace[i] = new StackTraceElement(declaringClass, methodName, fileName, lineNumber); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(watchId); + out.writeString(watchRecordId); + out.writeVLong(triggeredTime.getMillis()); + out.writeVLong(executionTime.getMillis()); + out.writeString(phase.id()); + out.writeVInt(executionStackTrace.length); + for (StackTraceElement element : executionStackTrace) { + out.writeString(element.getClassName()); + out.writeString(element.getMethodName()); + out.writeOptionalString(element.getFileName()); + out.writeInt(element.getLineNumber()); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("watch_id", watchId); + builder.field("watch_record_id", watchRecordId); + builder.timeField("triggered_time", triggeredTime); + builder.timeField("execution_time", executionTime); + builder.field("execution_phase", phase); + if (executedActions != null) { + builder.startArray("executed_actions"); + for (String executedAction : executedActions) { + builder.value(executedAction); + } + builder.endArray(); + } + if (params.paramAsBoolean("emit_stacktraces", false)) { + builder.startArray("stack_trace"); + for (StackTraceElement element : executionStackTrace) { + builder.value(element.toString()); + } + builder.endArray(); + } + builder.endObject(); + return builder; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/Wid.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/Wid.java new file mode 100644 index 0000000000000..a7a8fb4bea3cb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/Wid.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.execution; + +import org.joda.time.DateTime; +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.util.UUID; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; + +/** + * A representation class of a watch id, its execution time and a random UUID + * This class exists to be able to store several events from the same possible execution time and the same watch + * in the triggered store index or the history store + * + * One 'specialty' of this class is the handling of the underscore in the value. Nothing except the watchId should contain an + * underscore, otherwise this class will not be able to extract the proper watch id, when a a single string is handed over in its ctor + * + * This is also the reason why UUID.randomUUID() is used instead of UUIDs.base64UUID(), as the latter one contains underscores. Also this + * is not dependant on having time based uuids here, as the time is already included in the value + */ +public class Wid { + + private static final DateTimeFormatter formatter = ISODateTimeFormat.dateTime(); + + private final String watchId; + private final String value; + + public Wid(String watchId, DateTime executionTime) { + this.watchId = watchId; + this.value = watchId + "_" + UUID.randomUUID().toString() + "-" + formatter.print(executionTime); + } + + public Wid(String value) { + this.value = value; + int index = value.lastIndexOf("_"); + if (index <= 0) { + throw illegalArgument("invalid watcher execution id [{}]", value); + } + this.watchId = value.substring(0, index); + } + + public String value() { + return value; + } + + public String watchId() { + return watchId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Wid wid = (Wid) o; + + return value.equals(wid.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } + + @Override + public String toString() { + return value; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/history/HistoryStoreField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/history/HistoryStoreField.java new file mode 100644 index 0000000000000..e655be8beff6b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/history/HistoryStoreField.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.history; + +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.joda.time.DateTime; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; + +public final class HistoryStoreField { + + public static final String INDEX_PREFIX = ".watcher-history-"; + public static final String INDEX_PREFIX_WITH_TEMPLATE = INDEX_PREFIX + WatcherIndexTemplateRegistryField.INDEX_TEMPLATE_VERSION + "-"; + static final DateTimeFormatter indexTimeFormat = DateTimeFormat.forPattern("YYYY.MM.dd"); + + /** + * Calculates the correct history index name for a given time + */ + public static String getHistoryIndexNameForTime(DateTime time) { + return INDEX_PREFIX_WITH_TEMPLATE + indexTimeFormat.print(time); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/history/WatchRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/history/WatchRecord.java new file mode 100644 index 0000000000000..74e7b2115faa9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/history/WatchRecord.java @@ -0,0 +1,310 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.history; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapperResult; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionResult; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +public abstract class WatchRecord implements ToXContentObject { + + public static final ParseField WATCH_ID = new ParseField("watch_id"); + public static final ParseField STATE = new ParseField("state"); + public static final ParseField TRIGGER_EVENT = new ParseField("trigger_event"); + public static final ParseField NODE = new ParseField("node"); + private static final ParseField MESSAGES = new ParseField("messages"); + private static final ParseField STATUS = new ParseField("status"); + private static final ParseField VARS = new ParseField("vars"); + private static final ParseField METADATA = new ParseField("metadata"); + private static final ParseField EXECUTION_RESULT = new ParseField("result"); + private static final ParseField EXCEPTION = new ParseField("exception"); + + protected final Wid id; + protected final Watch watch; + private final String nodeId; + protected final TriggerEvent triggerEvent; + protected final ExecutionState state; + + // only emitted to xcontent in "debug" mode + protected final Map vars; + + @Nullable protected final ExecutableInput input; + @Nullable protected final ExecutableCondition condition; + @Nullable protected final Map metadata; + @Nullable protected final WatchExecutionResult executionResult; + + private WatchRecord(Wid id, TriggerEvent triggerEvent, ExecutionState state, Map vars, ExecutableInput input, + ExecutableCondition condition, Map metadata, Watch watch, WatchExecutionResult executionResult, + String nodeId) { + this.id = id; + this.triggerEvent = triggerEvent; + this.state = state; + this.vars = vars; + this.input = input; + this.condition = condition; + this.metadata = metadata; + this.executionResult = executionResult; + this.watch = watch; + this.nodeId = nodeId; + } + + private WatchRecord(Wid id, TriggerEvent triggerEvent, ExecutionState state, String nodeId) { + this(id, triggerEvent, state, Collections.emptyMap(), null, null, null, null, null, nodeId); + } + + private WatchRecord(WatchRecord record, ExecutionState state) { + this(record.id, record.triggerEvent, state, record.vars, record.input, record.condition, record.metadata, record.watch, + record.executionResult, record.nodeId); + } + + private WatchRecord(WatchExecutionContext context, ExecutionState state) { + this(context.id(), context.triggerEvent(), state, context.vars(), + context.watch() != null ? context.watch().input() : null, + context.watch() != null ? context.watch().condition() : null, + context.watch() != null ? context.watch().metadata() : null, + context.watch(), + null, context.getNodeId()); + } + + private WatchRecord(WatchExecutionContext context, WatchExecutionResult executionResult) { + this(context.id(), context.triggerEvent(), getState(executionResult), context.vars(), context.watch().input(), + context.watch().condition(), context.watch().metadata(), context.watch(), executionResult, context.getNodeId()); + } + + public static ExecutionState getState(WatchExecutionResult executionResult) { + if (executionResult == null || executionResult.conditionResult() == null) { + return ExecutionState.FAILED; + } + if (executionResult.conditionResult().met()) { + final Collection values = executionResult.actionsResults().values(); + // acknowledged as state wins because the user had explicitely set this, where as throttled may happen due to execution + if (values.stream().anyMatch((r) -> r.action().status() == Action.Result.Status.ACKNOWLEDGED)) { + return ExecutionState.ACKNOWLEDGED; + } else if (values.stream().anyMatch((r) -> r.action().status() == Action.Result.Status.THROTTLED)) { + return ExecutionState.THROTTLED; + } else { + return ExecutionState.EXECUTED; + } + } else { + return ExecutionState.EXECUTION_NOT_NEEDED; + } + } + + public Wid id() { + return id; + } + + public TriggerEvent triggerEvent() { + return triggerEvent; + } + + public String watchId() { + return id.watchId(); + } + + public ExecutableInput input() { return input; } + + public ExecutionState state() { + return state; + } + + public Map metadata() { + return metadata; + } + + public WatchExecutionResult result() { + return executionResult; + } + + public String getNodeId() { + return nodeId; + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(WATCH_ID.getPreferredName(), id.watchId()); + builder.field(NODE.getPreferredName(), nodeId); + builder.field(STATE.getPreferredName(), state.id()); + + if (watch != null && watch.status() != null) { + builder.field(STATUS.getPreferredName(), watch.status(), params); + } + + builder.field(TRIGGER_EVENT.getPreferredName()); + triggerEvent.recordXContent(builder, params); + + if (!vars.isEmpty() && WatcherParams.debug(params)) { + builder.field(VARS.getPreferredName(), vars); + } + + if (input != null) { + builder.startObject(WatchField.INPUT.getPreferredName()) + .field(input.type(), input, params) + .endObject(); + } + if (condition != null) { + builder.startObject(WatchField.CONDITION.getPreferredName()) + .field(condition.type(), condition, params) + .endObject(); + } + if (metadata != null) { + builder.field(METADATA.getPreferredName(), metadata); + } + if (executionResult != null) { + builder.field(EXECUTION_RESULT.getPreferredName(), executionResult, params); + } + innerToXContent(builder, params); + builder.endObject(); + return builder; + } + + abstract void innerToXContent(XContentBuilder builder, Params params) throws IOException; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + WatchRecord entry = (WatchRecord) o; + return Objects.equals(id, entry.id); + } + + @Override + public int hashCode() { + return id.hashCode(); + } + + @Override + public String toString() { + return id.toString(); + } + + public static class MessageWatchRecord extends WatchRecord { + @Nullable private final String[] messages; + + /** + * Called when the execution was aborted before it started + */ + public MessageWatchRecord(Wid id, TriggerEvent triggerEvent, ExecutionState state, String message, String nodeId) { + super(id, triggerEvent, state, nodeId); + this.messages = new String[] { message }; + } + + /** + * Called when the execution was aborted due to an error during execution (the given result should reflect + * were exactly the execution failed) + */ + public MessageWatchRecord(WatchExecutionContext context, WatchExecutionResult executionResult, String message) { + super(context, executionResult); + this.messages = new String[] { message }; + } + + /** + * Called when the execution finished. + */ + public MessageWatchRecord(WatchExecutionContext context, WatchExecutionResult executionResult) { + super(context, executionResult); + this.messages = Strings.EMPTY_ARRAY; + } + + public MessageWatchRecord(WatchRecord record, ExecutionState state, String message) { + super(record, state); + if (record instanceof MessageWatchRecord) { + MessageWatchRecord messageWatchRecord = (MessageWatchRecord) record; + if (messageWatchRecord.messages.length == 0) { + this.messages = new String[] { message }; + } else { + String[] newMessages = new String[messageWatchRecord.messages.length + 1]; + System.arraycopy(messageWatchRecord.messages, 0, newMessages, 0, messageWatchRecord.messages.length); + newMessages[messageWatchRecord.messages.length] = message; + this.messages = newMessages; + } + } else { + messages = new String []{ message }; + } + } + + public String[] messages(){ + return messages; + } + + @Override + void innerToXContent(XContentBuilder builder, Params params) throws IOException { + if (messages != null) { + builder.array(MESSAGES.getPreferredName(), messages); + } + } + } + + public static class ExceptionWatchRecord extends WatchRecord { + + private static final Map STACK_TRACE_ENABLED_PARAMS = MapBuilder.newMapBuilder() + .put(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false") + .immutableMap(); + + @Nullable private final Exception exception; + + public ExceptionWatchRecord(WatchExecutionContext context, WatchExecutionResult executionResult, Exception exception) { + super(context, executionResult); + this.exception = exception; + } + + public ExceptionWatchRecord(WatchRecord record, Exception exception) { + super(record, ExecutionState.FAILED); + this.exception = exception; + } + + public ExceptionWatchRecord(WatchExecutionContext context, Exception exception) { + super(context, ExecutionState.FAILED); + this.exception = exception; + } + + public Exception getException() { + return exception; + } + + @Override + void innerToXContent(XContentBuilder builder, Params params) throws IOException { + if (exception != null) { + if (exception instanceof ElasticsearchException) { + ElasticsearchException elasticsearchException = (ElasticsearchException) exception; + builder.startObject(EXCEPTION.getPreferredName()); + Params delegatingParams = new DelegatingMapParams(STACK_TRACE_ENABLED_PARAMS, params); + elasticsearchException.toXContent(builder, delegatingParams); + builder.endObject(); + } else { + builder.startObject(EXCEPTION.getPreferredName()) + .field("type", ElasticsearchException.getExceptionName(exception)) + .field("reason", exception.getMessage()) + .endObject(); + } + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/input/ExecutableInput.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/input/ExecutableInput.java new file mode 100644 index 0000000000000..1c08af3cf903f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/input/ExecutableInput.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.input; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; + +public abstract class ExecutableInput implements ToXContentObject { + + protected final I input; + protected final Logger logger; + + protected ExecutableInput(I input, Logger logger) { + this.input = input; + this.logger = logger; + } + + /** + * @return the type of this input + */ + public final String type() { + return input.type(); + } + + public I input() { + return input; + } + + /** + * Executes this input + */ + public abstract R execute(WatchExecutionContext ctx, @Nullable Payload payload); + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return input.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ExecutableInput that = (ExecutableInput) o; + + return input.equals(that.input); + } + + @Override + public int hashCode() { + return input.hashCode(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/input/Input.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/input/Input.java new file mode 100644 index 0000000000000..3188f5e951c02 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/input/Input.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.input; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; +import java.util.Locale; + +public interface Input extends ToXContentObject { + + String type(); + + abstract class Result implements ToXContentObject { + + private static final ParseField STATUS = new ParseField("status"); + private static final ParseField TYPE = new ParseField("type"); + private static final ParseField PAYLOAD = new ParseField("payload"); + + public enum Status { + SUCCESS, FAILURE + } + + protected final String type; + protected final Status status; + private final Payload payload; + @Nullable private final Exception exception; + + protected Result(String type, Payload payload) { + this.status = Status.SUCCESS; + this.type = type; + this.payload = payload; + this.exception = null; + } + + protected Result(String type, Exception e) { + this.status = Status.FAILURE; + this.type = type; + this.payload = Payload.EMPTY; + this.exception = e; + } + + public String type() { + return type; + } + + public Status status() { + return status; + } + + public Payload payload() { + return payload; + } + + public Exception getException() { + assert status == Status.FAILURE; + return exception; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TYPE.getPreferredName(), type); + builder.field(STATUS.getPreferredName(), status.name().toLowerCase(Locale.ROOT)); + switch (status) { + case SUCCESS: + assert payload != null; + builder.field(PAYLOAD.getPreferredName(), payload, params); + break; + case FAILURE: + assert exception != null; + ElasticsearchException.generateFailureXContent(builder, params, exception, true); + break; + default: + assert false; + } + typeXContent(builder, params); + return builder.endObject(); + } + + protected abstract XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException; + } + + interface Builder { + I build(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/input/none/NoneInput.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/input/none/NoneInput.java new file mode 100644 index 0000000000000..656689604879b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/input/none/NoneInput.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.input.none; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; + +public class NoneInput implements Input { + + public static final String TYPE = "none"; + public static final NoneInput INSTANCE = new NoneInput(); + + private NoneInput() { + } + + @Override + public String type() { + return TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().endObject(); + } + + public static NoneInput parse(String watchId, XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + String formattedMessage = "could not parse [{}] input for watch [{}]. expected an empty object but found [{}] instead"; + throw new ElasticsearchParseException(formattedMessage, TYPE, watchId, parser.currentToken()); + } + if (parser.nextToken() != XContentParser.Token.END_OBJECT) { + String formattedMessage = "could not parse [{}] input for watch [{}]. expected an empty object but found [{}] instead"; + throw new ElasticsearchParseException(formattedMessage, TYPE, watchId, parser.currentToken()); + } + return INSTANCE; + } + + public static Builder builder() { + return Builder.INSTANCE; + } + + public static class Result extends Input.Result { + + public static final Result INSTANCE = new Result(); + + private Result() { + super(TYPE, Payload.EMPTY); + } + + @Override + protected XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + } + + public static class Builder implements Input.Builder { + + private static final Builder INSTANCE = new Builder(); + + private Builder() { + } + + @Override + public NoneInput build() { + return NoneInput.INSTANCE; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/Exceptions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/Exceptions.java new file mode 100644 index 0000000000000..a8082a09355ae --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/Exceptions.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.support; + +import java.io.IOException; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; + +public class Exceptions { + + private Exceptions() { + } + + public static IllegalArgumentException illegalArgument(String msg, Object... args) { + return new IllegalArgumentException(format(msg, args)); + } + + public static IllegalArgumentException illegalArgument(String msg, Throwable cause, Object... args) { + return new IllegalArgumentException(format(msg, args), cause); + } + + public static IllegalStateException illegalState(String msg, Object... args) { + return new IllegalStateException(format(msg, args)); + } + + public static IllegalStateException illegalState(String msg, Throwable cause, Object... args) { + return new IllegalStateException(format(msg, args), cause); + } + + public static IOException ioException(String msg, Object... args) { + return new IOException(format(msg, args)); + } + + public static IOException ioException(String msg, Throwable cause, Object... args) { + return new IOException(format(msg, args), cause); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherDateTimeUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherDateTimeUtils.java new file mode 100644 index 0000000000000..097d136c629bd --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherDateTimeUtils.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.support; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.joda.DateMathParser; +import org.elasticsearch.common.joda.FormatDateTimeFormatter; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.time.Clock; +import java.util.Locale; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +public class WatcherDateTimeUtils { + + public static final FormatDateTimeFormatter dateTimeFormatter = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; + public static final DateMathParser dateMathParser = new DateMathParser(dateTimeFormatter); + + private WatcherDateTimeUtils() { + } + + public static DateTime convertToDate(Object value, Clock clock) { + if (value instanceof DateTime) { + return (DateTime) value; + } + if (value instanceof String) { + return parseDateMath((String) value, DateTimeZone.UTC, clock); + } + if (value instanceof Number) { + return new DateTime(((Number) value).longValue(), DateTimeZone.UTC); + } + return null; + } + + public static DateTime parseDate(String dateAsText) { + return parseDate(dateAsText, null); + } + + public static DateTime parseDate(String format, DateTimeZone timeZone) { + DateTime dateTime = dateTimeFormatter.parser().parseDateTime(format); + return timeZone != null ? dateTime.toDateTime(timeZone) : dateTime; + } + + public static String formatDate(DateTime date) { + return dateTimeFormatter.printer().print(date); + } + + public static DateTime parseDateMath(String fieldName, XContentParser parser, DateTimeZone timeZone, Clock clock) throws IOException { + if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { + throw new ElasticsearchParseException("could not parse date/time expected date field [{}] to not be null but was null", + fieldName); + } + return parseDateMathOrNull(fieldName, parser, timeZone, clock); + } + + public static DateTime parseDateMathOrNull(String fieldName, XContentParser parser, DateTimeZone timeZone, + Clock clock) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_NUMBER) { + return new DateTime(parser.longValue(), timeZone); + } + if (token == XContentParser.Token.VALUE_STRING) { + try { + return parseDateMath(parser.text(), timeZone, clock); + } catch (ElasticsearchParseException epe) { + throw new ElasticsearchParseException("could not parse date/time. expected date field [{}] to be either a number or a " + + "DateMath string but found [{}] instead", epe, fieldName, parser.text()); + } + } + if (token == XContentParser.Token.VALUE_NULL) { + return null; + } + throw new ElasticsearchParseException("could not parse date/time. expected date field [{}] to be either a number or a string but " + + "found [{}] instead", fieldName, token); + } + + public static DateTime parseDateMath(String valueString, DateTimeZone timeZone, final Clock clock) { + return new DateTime(dateMathParser.parse(valueString, clock::millis), timeZone); + } + + public static DateTime parseDate(String fieldName, XContentParser parser, DateTimeZone timeZone) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_NUMBER) { + return new DateTime(parser.longValue(), timeZone); + } + if (token == XContentParser.Token.VALUE_STRING) { + return parseDate(parser.text(), timeZone); + } + if (token == XContentParser.Token.VALUE_NULL) { + return null; + } + throw new ElasticsearchParseException("could not parse date/time. expected date field [{}] to be either a number or a string but " + + "found [{}] instead", fieldName, token); + } + + public static XContentBuilder writeDate(String fieldName, XContentBuilder builder, DateTime date) throws IOException { + if (date == null) { + return builder.nullField(fieldName); + } + return builder.field(fieldName, formatDate(date)); + } + + public static void writeDate(StreamOutput out, DateTime date) throws IOException { + out.writeLong(date.getMillis()); + } + + public static DateTime readDate(StreamInput in, DateTimeZone timeZone) throws IOException { + return new DateTime(in.readLong(), timeZone); + } + + public static void writeOptionalDate(StreamOutput out, DateTime date) throws IOException { + if (date == null) { + out.writeBoolean(false); + return; + } + out.writeBoolean(true); + out.writeLong(date.getMillis()); + } + + public static DateTime readOptionalDate(StreamInput in, DateTimeZone timeZone) throws IOException { + return in.readBoolean() ? new DateTime(in.readLong(), timeZone) : null; + } + + public static TimeValue parseTimeValue(XContentParser parser, String settingName) throws IOException { + final XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_NULL) { + return null; + } + if (token == XContentParser.Token.VALUE_STRING) { + try { + TimeValue value = parseTimeValueSupportingFractional(parser.text(), settingName); + if (value.millis() < 0) { + throw new ElasticsearchParseException("could not parse time value [{}]. Time value cannot be negative.", parser.text()); + } + return value; + } catch (ElasticsearchParseException epe) { + throw new ElasticsearchParseException("failed to parse time unit", epe); + } + + } + throw new ElasticsearchParseException("could not parse time value. expected either a string or a null value but found [{}] " + + "instead", token); + } + + /** + * Parse a {@link TimeValue} with support for fractional values. + */ + public static TimeValue parseTimeValueSupportingFractional(@Nullable String sValue, String settingName) { + // TODO we can potentially remove this in 6.x + // This code is lifted almost straight from 2.x's TimeValue.java + Objects.requireNonNull(settingName); + if (sValue == null) { + return null; + } + try { + long millis; + String lowerSValue = sValue.toLowerCase(Locale.ROOT).trim(); + if (lowerSValue.endsWith("ms")) { + millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 2))); + } else if (lowerSValue.endsWith("s")) { + millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 1000); + } else if (lowerSValue.endsWith("m")) { + millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 60 * 1000); + } else if (lowerSValue.endsWith("h")) { + millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 60 * 60 * 1000); + } else if (lowerSValue.endsWith("d")) { + millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 24 * 60 * 60 * 1000); + } else if (lowerSValue.endsWith("w")) { + millis = (long) (Double.parseDouble(lowerSValue.substring(0, lowerSValue.length() - 1)) * 7 * 24 * 60 * 60 * 1000); + } else if (lowerSValue.equals("-1")) { + // Allow this special value to be unit-less: + millis = -1; + } else if (lowerSValue.equals("0")) { + // Allow this special value to be unit-less: + millis = 0; + } else { + throw new ElasticsearchParseException( + "Failed to parse setting [{}] with value [{}] as a time value: unit is missing or unrecognized", + settingName, sValue); + } + return new TimeValue(millis, TimeUnit.MILLISECONDS); + } catch (NumberFormatException e) { + throw new ElasticsearchParseException("Failed to parse [{}]", e, sValue); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java new file mode 100644 index 0000000000000..4cf0898bae2ff --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.support; + +public final class WatcherIndexTemplateRegistryField { + // history (please add a comment why you increased the version here) + // version 1: initial + // version 2: added mappings for jira action + // version 3: include watch status in history + // version 6: upgrade to ES 6, removal of _status field + // version 7: add full exception stack traces for better debugging + // Note: if you change this, also inform the kibana team around the watcher-ui + public static final String INDEX_TEMPLATE_VERSION = "7"; + public static final String HISTORY_TEMPLATE_NAME = ".watch-history-" + INDEX_TEMPLATE_VERSION; + public static final String TRIGGERED_TEMPLATE_NAME = ".triggered_watches"; + public static final String WATCHES_TEMPLATE_NAME = ".watches"; + public static final String[] TEMPLATE_NAMES = new String[] { + HISTORY_TEMPLATE_NAME, TRIGGERED_TEMPLATE_NAME, WATCHES_TEMPLATE_NAME + }; + + private WatcherIndexTemplateRegistryField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherUtils.java new file mode 100644 index 0000000000000..15d56db578b09 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherUtils.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.support; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.lang.reflect.Array; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Pattern; + +import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.formatDate; + +public final class WatcherUtils { + + private static final Pattern NO_WS_PATTERN = Pattern.compile("\\S+"); + + private WatcherUtils() { + } + + public static Map responseToData(ToXContentObject response) throws IOException { + return XContentHelper.convertToMap(XContentHelper.toXContent(response, XContentType.JSON, false), false, XContentType.JSON).v2(); + } + + public static Map flattenModel(Map map) { + Map result = new HashMap<>(); + flattenModel("", map, result); + return result; + } + + private static void flattenModel(String key, Object value, Map result) { + if (value == null) { + result.put(key, null); + return; + } + if (value instanceof Map) { + for (Map.Entry entry : ((Map) value).entrySet()) { + if ("".equals(key)) { + flattenModel(entry.getKey(), entry.getValue(), result); + } else { + flattenModel(key + "." + entry.getKey(), entry.getValue(), result); + } + } + return; + } + if (value instanceof Iterable) { + int i = 0; + for (Object item : (Iterable) value) { + flattenModel(key + "." + i++, item, result); + } + return; + } + if (value.getClass().isArray()) { + for (int i = 0; i < Array.getLength(value); i++) { + flattenModel(key + "." + i, Array.get(value, i), result); + } + return; + } + if (value instanceof DateTime) { + result.put(key, formatDate((DateTime) value)); + return; + } + if (value instanceof TimeValue) { + result.put(key, String.valueOf(((TimeValue) value).getMillis())); + return; + } + result.put(key, String.valueOf(value)); + } + + public static boolean isValidId(String id) { + return Strings.isEmpty(id) == false && NO_WS_PATTERN.matcher(id).matches(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/ObjectPath.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/ObjectPath.java new file mode 100644 index 0000000000000..67ef405238aba --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/ObjectPath.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.support.xcontent; + +import org.elasticsearch.common.Strings; + +import java.lang.reflect.Array; +import java.util.List; +import java.util.Map; + +public class ObjectPath { + + private ObjectPath() { + } + + public static T eval(String path, Object object) { + return (T) evalContext(path, object); + } + + private static Object evalContext(String path, Object ctx) { + final String[] parts; + if (path == null || path.isEmpty()) parts = Strings.EMPTY_ARRAY; + else parts = path.split("\\."); + StringBuilder resolved = new StringBuilder(); + for (String part : parts) { + if (ctx == null) { + return null; + } + if (ctx instanceof Map) { + ctx = ((Map) ctx).get(part); + if (resolved.length() != 0) { + resolved.append("."); + } + resolved.append(part); + } else if (ctx instanceof List) { + try { + int index = Integer.parseInt(part); + ctx = ((List) ctx).get(index); + if (resolved.length() != 0) { + resolved.append("."); + } + resolved.append(part); + } catch (NumberFormatException nfe) { + return null; + } + } else if (ctx.getClass().isArray()) { + try { + int index = Integer.parseInt(part); + ctx = Array.get(ctx, index); + if (resolved.length() != 0) { + resolved.append("."); + } + resolved.append(part); + } catch (NumberFormatException nfe) { + return null; + } + } else { + return null; + } + } + return ctx; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherParams.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherParams.java new file mode 100644 index 0000000000000..a530108a6fae6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherParams.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.support.xcontent; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.xpack.core.watcher.watch.Watch; + +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableMap; + +public class WatcherParams extends ToXContent.DelegatingMapParams { + + public static final WatcherParams HIDE_SECRETS = WatcherParams.builder().hideSecrets(true).build(); + + private static final String HIDE_SECRETS_KEY = "hide_secrets"; + private static final String HIDE_HEADERS = "hide_headers"; + private static final String DEBUG_KEY = "debug"; + + public static boolean hideSecrets(ToXContent.Params params) { + return wrap(params).hideSecrets(); + } + + public static boolean debug(ToXContent.Params params) { + return wrap(params).debug(); + } + + public static boolean hideHeaders(ToXContent.Params params) { + return wrap(params).hideHeaders(); + } + + private WatcherParams(Map params, ToXContent.Params delegate) { + super(params, delegate); + } + + private boolean hideSecrets() { + return paramAsBoolean(HIDE_SECRETS_KEY, true); + } + + private boolean debug() { + return paramAsBoolean(DEBUG_KEY, false); + } + + private boolean hideHeaders() { + return paramAsBoolean(HIDE_HEADERS, true); + } + + public static WatcherParams wrap(ToXContent.Params params) { + return params instanceof WatcherParams ? + (WatcherParams) params : + new WatcherParams(emptyMap(), params); + } + + public static Builder builder() { + return builder(ToXContent.EMPTY_PARAMS); + } + + public static Builder builder(ToXContent.Params delegate) { + return new Builder(delegate); + } + + public static class Builder { + + private final ToXContent.Params delegate; + private final Map params = new HashMap<>(); + + private Builder(ToXContent.Params delegate) { + this.delegate = delegate; + } + + public Builder hideSecrets(boolean hideSecrets) { + params.put(HIDE_SECRETS_KEY, String.valueOf(hideSecrets)); + return this; + } + + public Builder hideHeaders(boolean hideHeaders) { + params.put(HIDE_HEADERS, String.valueOf(hideHeaders)); + return this; + } + + public Builder debug(boolean debug) { + params.put(DEBUG_KEY, String.valueOf(debug)); + return this; + } + + public Builder includeStatus(boolean includeStatus) { + params.put(Watch.INCLUDE_STATUS_KEY, String.valueOf(includeStatus)); + return this; + } + + public Builder put(String key, Object value) { + params.put(key, String.valueOf(value)); + return this; + } + + public WatcherParams build() { + return new WatcherParams(unmodifiableMap(new HashMap<>(params)), delegate); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java new file mode 100644 index 0000000000000..6b97512c23722 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java @@ -0,0 +1,304 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.support.xcontent; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.common.secret.Secret; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.nio.CharBuffer; +import java.time.Clock; +import java.util.List; +import java.util.Map; + +/** + * A xcontent parser that is used by watcher. This is a special parser that is + * aware of watcher services. In particular, it's aware of the used {@link Clock} + * and the CryptoService. The former (clock) may be used when the current time + * is required during the parse phase of construct. The latter (crypto service) is used + * to encode secret values (e.g. passwords, security tokens, etc..) to {@link Secret}s. + * {@link Secret}s are encrypted values that are stored in memory and are decrypted + * on demand when needed. + */ +public class WatcherXContentParser implements XContentParser { + + public static final String REDACTED_PASSWORD = "::es_redacted::"; + + public static Secret secretOrNull(XContentParser parser) throws IOException { + String text = parser.textOrNull(); + if (text == null) { + return null; + } + + char[] chars = text.toCharArray(); + boolean isEncryptedAlready = text.startsWith(CryptoService.ENCRYPTED_TEXT_PREFIX); + if (isEncryptedAlready) { + return new Secret(chars); + } + + if (parser instanceof WatcherXContentParser) { + WatcherXContentParser watcherParser = (WatcherXContentParser) parser; + if (REDACTED_PASSWORD.equals(text)) { + if (watcherParser.allowRedactedPasswords) { + return null; + } else { + throw new ElasticsearchParseException("found redacted password in field [{}]", parser.currentName()); + } + } else if (watcherParser.cryptoService != null) { + return new Secret(watcherParser.cryptoService.encrypt(chars)); + } + } + + return new Secret(chars); + } + + private final DateTime parseTime; + private final XContentParser parser; + @Nullable private final CryptoService cryptoService; + private final boolean allowRedactedPasswords; + + public WatcherXContentParser(XContentParser parser, DateTime parseTime, @Nullable CryptoService cryptoService, + boolean allowRedactedPasswords) { + this.parseTime = parseTime; + this.parser = parser; + this.cryptoService = cryptoService; + this.allowRedactedPasswords = allowRedactedPasswords; + } + + public DateTime getParseDateTime() { return parseTime; } + + @Override + public XContentType contentType() { + return parser.contentType(); + } + + @Override + public Token nextToken() throws IOException { + return parser.nextToken(); + } + + @Override + public void skipChildren() throws IOException { + parser.skipChildren(); + } + + @Override + public Token currentToken() { + return parser.currentToken(); + } + + @Override + public String currentName() throws IOException { + return parser.currentName(); + } + + @Override + public Map map() throws IOException { + return parser.map(); + } + + @Override + public Map mapOrdered() throws IOException { + return parser.mapOrdered(); + } + + @Override + public Map mapStrings() throws IOException { + return parser.mapStrings(); + } + + @Override + public Map mapStringsOrdered() throws IOException { + return parser.mapStringsOrdered(); + } + + @Override + public List list() throws IOException { + return parser.list(); + } + + @Override + public List listOrderedMap() throws IOException { + return parser.listOrderedMap(); + } + + @Override + public String text() throws IOException { + return parser.text(); + } + + @Override + public String textOrNull() throws IOException { + return parser.textOrNull(); + } + + @Override + public CharBuffer charBufferOrNull() throws IOException { + return parser.charBufferOrNull(); + } + + @Override + public CharBuffer charBuffer() throws IOException { + return parser.charBuffer(); + } + + @Override + public Object objectText() throws IOException { + return parser.objectText(); + } + + @Override + public Object objectBytes() throws IOException { + return parser.objectBytes(); + } + + @Override + public boolean hasTextCharacters() { + return parser.hasTextCharacters(); + } + + @Override + public char[] textCharacters() throws IOException { + return parser.textCharacters(); + } + + @Override + public int textLength() throws IOException { + return parser.textLength(); + } + + @Override + public int textOffset() throws IOException { + return parser.textOffset(); + } + + @Override + public Number numberValue() throws IOException { + return parser.numberValue(); + } + + @Override + public NumberType numberType() throws IOException { + return parser.numberType(); + } + + @Override + public short shortValue(boolean coerce) throws IOException { + return parser.shortValue(coerce); + } + + @Override + public int intValue(boolean coerce) throws IOException { + return parser.intValue(coerce); + } + + @Override + public long longValue(boolean coerce) throws IOException { + return parser.longValue(coerce); + } + + @Override + public float floatValue(boolean coerce) throws IOException { + return parser.floatValue(coerce); + } + + @Override + public double doubleValue(boolean coerce) throws IOException { + return parser.doubleValue(coerce); + } + + @Override + public short shortValue() throws IOException { + return parser.shortValue(); + } + + @Override + public int intValue() throws IOException { + return parser.intValue(); + } + + @Override + public long longValue() throws IOException { + return parser.longValue(); + } + + @Override + public float floatValue() throws IOException { + return parser.floatValue(); + } + + @Override + public double doubleValue() throws IOException { + return parser.doubleValue(); + } + + @Override + public boolean isBooleanValue() throws IOException { + return parser.isBooleanValue(); + } + + @Override + public boolean booleanValue() throws IOException { + return parser.booleanValue(); + } + + @Override + @SuppressWarnings("deprecated") + public boolean isBooleanValueLenient() throws IOException { + return parser.isBooleanValueLenient(); + } + + @Override + @SuppressWarnings("deprecated") + public boolean booleanValueLenient() throws IOException { + return parser.booleanValueLenient(); + } + + @Override + public byte[] binaryValue() throws IOException { + return parser.binaryValue(); + } + + @Override + public XContentLocation getTokenLocation() { + return parser.getTokenLocation(); + } + + @Override + public T namedObject(Class categoryClass, String name, Object context) throws IOException { + return parser.namedObject(categoryClass, name, context); + } + + @Override + public NamedXContentRegistry getXContentRegistry() { + return parser.getXContentRegistry(); + } + + @Override + public boolean isClosed() { + return parser.isClosed(); + } + + @Override + public void close() throws IOException { + parser.close(); + } + + @Override + public DeprecationHandler getDeprecationHandler() { + return parser.getDeprecationHandler(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/XContentSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/XContentSource.java new file mode 100644 index 0000000000000..1661c5c27ee40 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/XContentSource.java @@ -0,0 +1,136 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.support.xcontent; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.common.xcontent.XContentUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.Map; + +/** + * Encapsulates the xcontent source + */ +public class XContentSource implements ToXContent { + + private final BytesReference bytes; + private final XContentType contentType; + private Object data; + + /** + * Constructs a new XContentSource out of the given bytes reference. + */ + public XContentSource(BytesReference bytes, XContentType xContentType) throws ElasticsearchParseException { + if (xContentType == null) { + throw new IllegalArgumentException("xContentType must not be null"); + } + this.bytes = bytes; + this.contentType = xContentType; + } + + /** + * Constructs a new xcontent source from the bytes of the given xcontent builder + */ + public XContentSource(XContentBuilder builder) { + this(BytesReference.bytes(builder), builder.contentType()); + } + + /** + * @return The bytes reference of the source + */ + public BytesReference getBytes() { + return bytes; + } + + /** + * @return true if the top level value of the source is a map + */ + public boolean isMap() { + return data() instanceof Map; + } + + /** + * @return The source as a map + */ + public Map getAsMap() { + return (Map) data(); + } + + /** + * @return true if the top level value of the source is a list + */ + public boolean isList() { + return data() instanceof List; + } + + /** + * @return The source as a list + */ + public List getAsList() { + return (List) data(); + } + + /** + * Extracts a value identified by the given path in the source. + * + * @param path a dot notation path to the requested value + * @return The extracted value or {@code null} if no value is associated with the given path + */ + public T getValue(String path) { + return (T) ObjectPath.eval(path, data()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // EMPTY is safe here because we never use namedObject + try (InputStream stream = bytes.streamInput(); + XContentParser parser = parser(NamedXContentRegistry.EMPTY, stream)) { + parser.nextToken(); + builder.generator().copyCurrentStructure(parser); + return builder; + } + } + + public XContentParser parser(NamedXContentRegistry xContentRegistry, InputStream stream) throws IOException { + return contentType.xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream); + } + + public static XContentSource readFrom(StreamInput in) throws IOException { + return new XContentSource(in.readBytesReference(), in.readEnum(XContentType.class)); + } + + public static void writeTo(XContentSource source, StreamOutput out) throws IOException { + out.writeBytesReference(source.bytes); + out.writeEnum(source.contentType); + } + + private Object data() { + if (data == null) { + // EMPTY is safe here because we never use namedObject + try (InputStream stream = bytes.streamInput(); + XContentParser parser = parser(NamedXContentRegistry.EMPTY, stream)) { + data = XContentUtils.readValue(parser, parser.nextToken()); + } catch (IOException ex) { + throw new ElasticsearchException("failed to read value", ex); + } + } + return data; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/ExecutableTransform.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/ExecutableTransform.java new file mode 100644 index 0000000000000..85dd4a916713b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/ExecutableTransform.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transform; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; + +public abstract class ExecutableTransform implements ToXContentFragment { + + protected final T transform; + protected final Logger logger; + + public ExecutableTransform(T transform, Logger logger) { + this.transform = transform; + this.logger = logger; + } + + public final String type() { + return transform.type(); + } + + public T transform() { + return transform; + } + + public abstract R execute(WatchExecutionContext ctx, Payload payload); + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return transform.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ExecutableTransform that = (ExecutableTransform) o; + + return transform.equals(that.transform); + } + + @Override + public int hashCode() { + return transform.hashCode(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/Transform.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/Transform.java new file mode 100644 index 0000000000000..b3e318aad6bd2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/Transform.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transform; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; +import java.util.Locale; + +public interface Transform extends ToXContentObject { + + ParseField TRANSFORM = new ParseField("transform"); + + String type(); + + abstract class Result implements ToXContentObject { + + private static final ParseField TYPE = new ParseField("type"); + private static final ParseField STATUS = new ParseField("status"); + private static final ParseField PAYLOAD = new ParseField("payload"); + private static final ParseField REASON = new ParseField("reason"); + + public enum Status { + SUCCESS, FAILURE + } + + protected final String type; + protected final Status status; + @Nullable protected final Payload payload; + @Nullable protected final String reason; + @Nullable protected final Exception exception; + + public Result(String type, Payload payload) { + this.type = type; + this.status = Status.SUCCESS; + this.payload = payload; + this.reason = null; + this.exception = null; + } + + public Result(String type, String reason) { + this.type = type; + this.status = Status.FAILURE; + this.reason = reason; + this.payload = null; + this.exception = null; + } + + public Result(String type, Exception e) { + this.type = type; + this.status = Status.FAILURE; + this.reason = e.getMessage(); + this.payload = null; + this.exception = e; + } + + public String type() { + return type; + } + + public Status status() { + return status; + } + + public Payload payload() { + assert status == Status.SUCCESS; + return payload; + } + + public String reason() { + assert status == Status.FAILURE; + return reason; + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TYPE.getPreferredName(), type); + builder.field(STATUS.getPreferredName(), status.name().toLowerCase(Locale.ROOT)); + switch (status) { + case SUCCESS: + assert exception == null; + builder.field(PAYLOAD.getPreferredName(), payload, params); + break; + case FAILURE: + assert payload == null; + builder.field(REASON.getPreferredName(), reason); + ElasticsearchException.generateFailureXContent(builder, params, exception, true); + break; + default: + assert false; + } + typeXContent(builder, params); + return builder.endObject(); + } + + protected abstract XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException; + + } + + interface Builder { + + T build(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/TransformFactory.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/TransformFactory.java new file mode 100644 index 0000000000000..3d15431c45194 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/TransformFactory.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transform; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public abstract class TransformFactory> { + + protected final Logger transformLogger; + + public TransformFactory(Logger transformLogger) { + this.transformLogger = transformLogger; + } + + /** + * @return The type of the transform + */ + public abstract String type(); + + /** + * Parses the given xcontent and creates a concrete transform + * + * @param watchId The id of the watch + * @param parser The parsing that contains the condition content + */ + public abstract T parseTransform(String watchId, XContentParser parser) throws IOException; + + /** + * Creates an executable transform out of the given transform. + */ + public abstract E createExecutable(T transform); + + public E parseExecutable(String watchId, XContentParser parser) throws IOException { + T transform = parseTransform(watchId, parser); + return createExecutable(transform); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/TransformRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/TransformRegistry.java new file mode 100644 index 0000000000000..4d729c10c7d5b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/TransformRegistry.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transform; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.transform.chain.ChainTransform; +import org.elasticsearch.xpack.core.watcher.transform.chain.ChainTransformFactory; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class TransformRegistry { + + private final Map factories; + + public TransformRegistry(Settings settings, Map factories) { + Map map = new HashMap<>(factories); + map.put(ChainTransform.TYPE, new ChainTransformFactory(settings, this)); + this.factories = Collections.unmodifiableMap(map); + } + + public TransformFactory factory(String type) { + return factories.get(type); + } + + public ExecutableTransform parse(String watchId, XContentParser parser) throws IOException { + String type = null; + XContentParser.Token token; + ExecutableTransform transform = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + type = parser.currentName(); + } else if (type != null) { + transform = parse(watchId, type, parser); + } + } + return transform; + } + + private ExecutableTransform parse(String watchId, String type, XContentParser parser) throws IOException { + TransformFactory factory = factories.get(type); + if (factory == null) { + throw new ElasticsearchParseException("could not parse transform for watch [{}], unknown transform type [{}]", watchId, type); + } + return factory.parseExecutable(watchId, parser); + } + + public Transform parseTransform(String watchId, String type, XContentParser parser) throws IOException { + TransformFactory factory = factories.get(type); + if (factory == null) { + throw new ElasticsearchParseException("could not parse transform for watch [{}], unknown transform type [{}]", watchId, type); + } + return factory.parseTransform(watchId, parser); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransform.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransform.java new file mode 100644 index 0000000000000..e150f58bc5539 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransform.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transform.chain; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.transform.TransformRegistry; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class ChainTransform implements Transform { + + public static final String TYPE = "chain"; + + private final List transforms; + + public ChainTransform(Transform... transforms) { + this(Arrays.asList(transforms)); + } + + public ChainTransform(List transforms) { + this.transforms = Collections.unmodifiableList(transforms); + } + + @Override + public String type() { + return TYPE; + } + + public List getTransforms() { + return transforms; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ChainTransform that = (ChainTransform) o; + + return transforms.equals(that.transforms); + } + + @Override + public int hashCode() { + return transforms.hashCode(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(); + for (Transform transform : transforms) { + builder.startObject() + .field(transform.type(), transform, params) + .endObject(); + } + return builder.endArray(); + } + + static ChainTransform parse(String watchId, XContentParser parser, TransformRegistry transformRegistry) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token != XContentParser.Token.START_ARRAY) { + throw new ElasticsearchParseException("could not parse [{}] transform for watch [{}]. expected an array of transform objects," + + " but found [{}] instead", TYPE, watchId, token); + } + + List transforms = new ArrayList<>(); + + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse [{}] transform for watch [{}]. expected a transform object, but " + + "found [{}] instead", TYPE, watchId, token); + } + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + transforms.add(transformRegistry.parseTransform(watchId, currentFieldName, parser)); + } + } + } + return new ChainTransform(transforms); + } + + public static Builder builder(Transform... transforms) { + return new Builder(transforms); + } + + public static class Result extends Transform.Result { + + private final List results; + + public Result(Payload payload, List results) { + super(TYPE, payload); + this.results = Collections.unmodifiableList(results); + } + + public Result(Exception e, List results) { + super(TYPE, e); + this.results = Collections.unmodifiableList(results); + } + + public Result(String errorMessage, List results) { + super(TYPE, errorMessage); + this.results = Collections.unmodifiableList(results); + } + + public List results() { + return results; + } + + @Override + protected XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException { + if (!results.isEmpty()) { + builder.startObject(type); + builder.startArray(Field.RESULTS.getPreferredName()); + for (Transform.Result result : results) { + result.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + } + return builder; + } + } + + public static class Builder implements Transform.Builder { + + private final List transforms = new ArrayList<>(); + + public Builder(Transform... transforms) { + add(transforms); + } + + public Builder add(Transform... transforms) { + Collections.addAll(this.transforms, transforms); + return this; + } + + public Builder add(Transform.Builder... transforms) { + for (Transform.Builder transform: transforms) { + this.transforms.add(transform.build()); + } + return this; + } + + @Override + public ChainTransform build() { + return new ChainTransform(transforms); + } + } + + interface Field { + ParseField RESULTS = new ParseField("results"); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransformFactory.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransformFactory.java new file mode 100644 index 0000000000000..403f1d02909d5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransformFactory.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transform.chain; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.transform.TransformFactory; +import org.elasticsearch.xpack.core.watcher.transform.TransformRegistry; + +import java.io.IOException; +import java.util.ArrayList; + +public final class ChainTransformFactory extends TransformFactory { + + private final TransformRegistry registry; + + public ChainTransformFactory(Settings settings, TransformRegistry registry) { + super(Loggers.getLogger(ExecutableChainTransform.class, settings)); + this.registry = registry; + } + + @Override + public String type() { + return ChainTransform.TYPE; + } + + @Override + public ChainTransform parseTransform(String watchId, XContentParser parser) throws IOException { + return ChainTransform.parse(watchId, parser, registry); + } + + @Override + public ExecutableChainTransform createExecutable(ChainTransform chainTransform) { + ArrayList executables = new ArrayList<>(); + for (Transform transform : chainTransform.getTransforms()) { + TransformFactory factory = registry.factory(transform.type()); + executables.add(factory.createExecutable(transform)); + } + return new ExecutableChainTransform(chainTransform, transformLogger, executables); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ExecutableChainTransform.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ExecutableChainTransform.java new file mode 100644 index 0000000000000..5887a2bdd6d38 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ExecutableChainTransform.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transform.chain; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.core.watcher.transform.chain.ChainTransform.TYPE; + +public class ExecutableChainTransform extends ExecutableTransform { + + private final List transforms; + + public ExecutableChainTransform(ChainTransform transform, Logger logger, ExecutableTransform... transforms) { + this(transform, logger, Arrays.asList(transforms)); + } + + public ExecutableChainTransform(ChainTransform transform, Logger logger, List transforms) { + super(transform, logger); + this.transforms = Collections.unmodifiableList(transforms); + } + + public List executableTransforms() { + return transforms; + } + + @Override + public ChainTransform.Result execute(WatchExecutionContext ctx, Payload payload) { + List results = new ArrayList<>(); + try { + return doExecute(ctx, payload, results); + } catch (Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("failed to execute [{}] transform for [{}]", TYPE, ctx.id()), e); + return new ChainTransform.Result(e, results); + } + } + + + ChainTransform.Result doExecute(WatchExecutionContext ctx, Payload payload, List results) throws IOException { + for (ExecutableTransform transform : transforms) { + Transform.Result result = transform.execute(ctx, payload); + results.add(result); + if (result.status() == Transform.Result.Status.FAILURE) { + return new ChainTransform.Result(format("failed to execute [{}] transform for [{}]. failed to execute sub-transform [{}]", + ChainTransform.TYPE, ctx.id(), transform.type()), results); + } + payload = result.payload(); + } + return new ChainTransform.Result(payload, results); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchAction.java new file mode 100644 index 0000000000000..d895ae31c5014 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.ack; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * This action acks a watch in memory, and the index + */ +public class AckWatchAction extends Action { + + public static final AckWatchAction INSTANCE = new AckWatchAction(); + public static final String NAME = "cluster:admin/xpack/watcher/watch/ack"; + + private AckWatchAction() { + super(NAME); + } + + @Override + public AckWatchResponse newResponse() { + return new AckWatchResponse(); + } + + @Override + public AckWatchRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new AckWatchRequestBuilder(client); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequest.java new file mode 100644 index 0000000000000..d56bb5779e229 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequest.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.ack; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.watcher.support.WatcherUtils; + +import java.io.IOException; +import java.util.Locale; + +/** + * A ack watch request to ack a watch by name (id) + */ +public class AckWatchRequest extends ActionRequest { + + private String watchId; + private String[] actionIds = Strings.EMPTY_ARRAY; + + public AckWatchRequest() { + this(null, (String[]) null); + } + + public AckWatchRequest(String watchId, String... actionIds) { + this.watchId = watchId; + this.actionIds = actionIds; + } + + public AckWatchRequest(StreamInput in) throws IOException { + super(in); + watchId = in.readString(); + actionIds = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(watchId); + out.writeStringArray(actionIds); + } + + /** + * @return The id of the watch to be acked + */ + public String getWatchId() { + return watchId; + } + + /** + * @param actionIds The ids of the actions to be acked + */ + public void setActionIds(String... actionIds) { + this.actionIds = actionIds; + } + + /** + * @return The ids of the actions to be acked + */ + public String[] getActionIds() { + return actionIds; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (watchId == null){ + validationException = ValidateActions.addValidationError("watch id is missing", validationException); + } else if (WatcherUtils.isValidId(watchId) == false) { + validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); + } + if (actionIds != null) { + for (String actionId : actionIds) { + if (actionId == null) { + validationException = ValidateActions.addValidationError( + String.format(Locale.ROOT, "action id may not be null"), validationException); + } else if (WatcherUtils.isValidId(actionId) == false) { + validationException = ValidateActions.addValidationError( + String.format(Locale.ROOT, "action id [%s] contains whitespace", actionId), validationException); + } + } + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ack [").append(watchId).append("]"); + if (actionIds.length > 0) { + sb.append("["); + for (int i = 0; i < actionIds.length; i++) { + if (i > 0) { + sb.append(", "); + } + sb.append(actionIds[i]); + } + sb.append("]"); + } + return sb.toString(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequestBuilder.java new file mode 100644 index 0000000000000..16e0d20eaf3f0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchRequestBuilder.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.ack; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * A ack watch action request builder. + */ +public class AckWatchRequestBuilder extends ActionRequestBuilder { + + public AckWatchRequestBuilder(ElasticsearchClient client) { + super(client, AckWatchAction.INSTANCE, new AckWatchRequest()); + } + + public AckWatchRequestBuilder(ElasticsearchClient client, String id) { + super(client, AckWatchAction.INSTANCE, new AckWatchRequest(id)); + } + + public AckWatchRequestBuilder setActionIds(String... actionIds) { + request.setActionIds(actionIds); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchResponse.java new file mode 100644 index 0000000000000..188c49963151f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/ack/AckWatchResponse.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.ack; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; + +import java.io.IOException; + +/** + * This class contains the status of the watch. If the watch was successfully acked + * this will be reflected in the watch status. + */ +public class AckWatchResponse extends ActionResponse { + + private WatchStatus status; + + public AckWatchResponse() { + } + + public AckWatchResponse(@Nullable WatchStatus status) { + this.status = status; + } + + /** + * @return The watch status + */ + public WatchStatus getStatus() { + return status; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + status = in.readBoolean() ? WatchStatus.read(in) : null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(status != null); + if (status != null) { + status.writeTo(out); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchAction.java new file mode 100644 index 0000000000000..2832b779c402c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.activate; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * This action acks a watch in memory, and the index + */ +public class ActivateWatchAction extends Action { + + public static final ActivateWatchAction INSTANCE = new ActivateWatchAction(); + public static final String NAME = "cluster:admin/xpack/watcher/watch/activate"; + + private ActivateWatchAction() { + super(NAME); + } + + @Override + public ActivateWatchResponse newResponse() { + return new ActivateWatchResponse(); + } + + @Override + public ActivateWatchRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new ActivateWatchRequestBuilder(client); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequest.java new file mode 100644 index 0000000000000..326c951bedfac --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequest.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.activate; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.watcher.support.WatcherUtils; + +import java.io.IOException; + +/** + * A ack watch request to ack a watch by name (id) + */ +public class ActivateWatchRequest extends ActionRequest { + + private String watchId; + private boolean activate; + + public ActivateWatchRequest() { + this(null, true); + } + + public ActivateWatchRequest(String watchId, boolean activate) { + this.watchId = watchId; + this.activate = activate; + } + + public ActivateWatchRequest(StreamInput in) throws IOException { + super(in); + watchId = in.readString(); + activate = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(watchId); + out.writeBoolean(activate); + } + + /** + * @return The id of the watch to be acked + */ + public String getWatchId() { + return watchId; + } + + /** + * @return {@code true} if the request is for activating the watch, {@code false} if its + * for deactivating it. + */ + public boolean isActivate() { + return activate; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (watchId == null){ + validationException = ValidateActions.addValidationError("watch id is missing", validationException); + } else if (WatcherUtils.isValidId(watchId) == false) { + validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public String toString() { + return activate ? + "activate [" + watchId + "]" : + "deactivate [" + watchId + "]"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequestBuilder.java new file mode 100644 index 0000000000000..0122b055ae060 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchRequestBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.activate; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * A activate watch action request builder. + */ +public class ActivateWatchRequestBuilder extends ActionRequestBuilder { + + public ActivateWatchRequestBuilder(ElasticsearchClient client) { + super(client, ActivateWatchAction.INSTANCE, new ActivateWatchRequest()); + } + + public ActivateWatchRequestBuilder(ElasticsearchClient client, String id, boolean activate) { + super(client, ActivateWatchAction.INSTANCE, new ActivateWatchRequest(id, activate)); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchResponse.java new file mode 100644 index 0000000000000..0c92fc046722a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/activate/ActivateWatchResponse.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.activate; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; + +import java.io.IOException; + +/** + * This class contains the status of the watch. If the watch was successfully de/activates + * this will reflected the new state of the watch. + */ +public class ActivateWatchResponse extends ActionResponse { + + private WatchStatus status; + + public ActivateWatchResponse() { + } + + public ActivateWatchResponse(@Nullable WatchStatus status) { + this.status = status; + } + + /** + * @return The watch status + */ + public WatchStatus getStatus() { + return status; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + status = in.readBoolean() ? WatchStatus.read(in) : null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(status != null); + if (status != null) { + status.writeTo(out); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchAction.java new file mode 100644 index 0000000000000..ffbe6539f60d9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.delete; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * This action deletes an watch from in memory, the scheduler and the index + */ +public class DeleteWatchAction extends Action { + + public static final DeleteWatchAction INSTANCE = new DeleteWatchAction(); + public static final String NAME = "cluster:admin/xpack/watcher/watch/delete"; + + private DeleteWatchAction() { + super(NAME); + } + + @Override + public DeleteWatchResponse newResponse() { + return new DeleteWatchResponse(); + } + + @Override + public DeleteWatchRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new DeleteWatchRequestBuilder(client); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchRequest.java new file mode 100644 index 0000000000000..f8c1a71f1ebc7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchRequest.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.delete; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.xpack.core.watcher.support.WatcherUtils; + +import java.io.IOException; + +/** + * A delete watch request to delete an watch by name (id) + */ +public class DeleteWatchRequest extends ActionRequest { + + private String id; + private long version = Versions.MATCH_ANY; + + public DeleteWatchRequest() { + this(null); + } + + public DeleteWatchRequest(String id) { + this.id = id; + } + + /** + * @return The name of the watch to be deleted + */ + public String getId() { + return id; + } + + /** + * Sets the name of the watch to be deleted + */ + public void setId(String id) { + this.id = id; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (id == null){ + validationException = ValidateActions.addValidationError("watch id is missing", validationException); + } else if (WatcherUtils.isValidId(id) == false) { + validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + version = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeLong(version); + } + + @Override + public String toString() { + return "delete [" + id + "]"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchRequestBuilder.java new file mode 100644 index 0000000000000..1ba3fe12ea47d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchRequestBuilder.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.delete; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * A delete document action request builder. + */ +public class DeleteWatchRequestBuilder extends ActionRequestBuilder { + + public DeleteWatchRequestBuilder(ElasticsearchClient client) { + super(client, DeleteWatchAction.INSTANCE, new DeleteWatchRequest()); + } + + public DeleteWatchRequestBuilder(ElasticsearchClient client, String id) { + super(client, DeleteWatchAction.INSTANCE, new DeleteWatchRequest(id)); + } + + /** + * Sets the id of the watch to be deleted + */ + public DeleteWatchRequestBuilder setId(String id) { + this.request().setId(id); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchResponse.java new file mode 100644 index 0000000000000..bda5ecabaa022 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/delete/DeleteWatchResponse.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.delete; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class DeleteWatchResponse extends ActionResponse { + + private String id; + private long version; + private boolean found; + + public DeleteWatchResponse() { + } + + public DeleteWatchResponse(String id, long version, boolean found) { + this.id = id; + this.version = version; + this.found = found; + } + + public String getId() { + return id; + } + + public long getVersion() { + return version; + } + + public boolean isFound() { + return found; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + version = in.readVLong(); + found = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeVLong(version); + out.writeBoolean(found); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchAction.java new file mode 100644 index 0000000000000..f8a8b009e44b7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchAction.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.execute; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * This action executes a watch, either ignoring the schedule and condition or just the schedule and can execute a subset of the actions, + * optionally persisting the history entry + */ +public class ExecuteWatchAction extends Action { + + public static final ExecuteWatchAction INSTANCE = new ExecuteWatchAction(); + public static final String NAME = "cluster:admin/xpack/watcher/watch/execute"; + + private ExecuteWatchAction() { + super(NAME); + } + + @Override + public ExecuteWatchResponse newResponse() { + return new ExecuteWatchResponse(); + } + + @Override + public ExecuteWatchRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new ExecuteWatchRequestBuilder(client); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java new file mode 100644 index 0000000000000..bece3e5a6f50a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java @@ -0,0 +1,281 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.execute; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.support.WatcherUtils; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * An execute watch request to execute a watch by id + */ +public class ExecuteWatchRequest extends ActionRequest { + + public static final String INLINE_WATCH_ID = "_inlined_"; + + private String id; + private boolean ignoreCondition = false; + private boolean recordExecution = false; + @Nullable private Map triggerData = null; + @Nullable private Map alternativeInput = null; + private Map actionModes = new HashMap<>(); + private BytesReference watchSource; + private XContentType xContentType = XContentType.JSON; + + private boolean debug = false; + + public ExecuteWatchRequest() { + } + + /** + * @param id the id of the watch to execute + */ + public ExecuteWatchRequest(String id) { + this.id = id; + } + + public ExecuteWatchRequest(StreamInput in) throws IOException { + super(in); + id = in.readOptionalString(); + ignoreCondition = in.readBoolean(); + recordExecution = in.readBoolean(); + if (in.readBoolean()){ + alternativeInput = in.readMap(); + } + if (in.readBoolean()) { + triggerData = in.readMap(); + } + long actionModesCount = in.readLong(); + actionModes = new HashMap<>(); + for (int i = 0; i < actionModesCount; i++) { + actionModes.put(in.readString(), ActionExecutionMode.resolve(in.readByte())); + } + if (in.readBoolean()) { + watchSource = in.readBytesReference(); + xContentType = in.readEnum(XContentType.class); + } + debug = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(id); + out.writeBoolean(ignoreCondition); + out.writeBoolean(recordExecution); + out.writeBoolean(alternativeInput != null); + if (alternativeInput != null) { + out.writeMap(alternativeInput); + } + out.writeBoolean(triggerData != null); + if (triggerData != null) { + out.writeMap(triggerData); + } + out.writeLong(actionModes.size()); + for (Map.Entry entry : actionModes.entrySet()) { + out.writeString(entry.getKey()); + out.writeByte(entry.getValue().id()); + } + out.writeBoolean(watchSource != null); + if (watchSource != null) { + out.writeBytesReference(watchSource); + out.writeEnum(xContentType); + } + out.writeBoolean(debug); + } + + /** + * @return The id of the watch to be executed + */ + public String getId() { + return id; + } + + /** + * Sets the id of the watch to be executed + */ + public void setId(String id) { + this.id = id; + } + + /** + * @return Should the condition for this execution be ignored + */ + public boolean isIgnoreCondition() { + return ignoreCondition; + } + + /** + * @param ignoreCondition set if the condition for this execution be ignored + */ + public void setIgnoreCondition(boolean ignoreCondition) { + this.ignoreCondition = ignoreCondition; + } + + /** + * @return Should this execution be recorded in the history index + */ + public boolean isRecordExecution() { + return recordExecution; + } + + /** + * @param recordExecution Sets if this execution be recorded in the history index + */ + public void setRecordExecution(boolean recordExecution) { + this.recordExecution = recordExecution; + } + + /** + * @return The alertnative input to use (may be null) + */ + public Map getAlternativeInput() { + return alternativeInput; + } + + /** + * @param alternativeInput Set's the alernative input + */ + public void setAlternativeInput(Map alternativeInput) { + this.alternativeInput = alternativeInput; + } + + /** + * @param data The data that should be associated with the trigger event. + */ + public void setTriggerData(Map data) throws IOException { + this.triggerData = data; + } + + /** + * @param event the trigger event to use + */ + public void setTriggerEvent(TriggerEvent event) throws IOException { + setTriggerData(event.data()); + } + + /** + * @return the trigger to use + */ + public Map getTriggerData() { + return triggerData; + } + + /** + * @return the source of the watch to execute + */ + public BytesReference getWatchSource() { + return watchSource; + } + + public XContentType getXContentType() { + return xContentType; + } + + /** + * @param watchSource instead of using an existing watch use this non persisted watch + */ + public void setWatchSource(BytesReference watchSource, XContentType xContentType) { + this.watchSource = watchSource; + this.xContentType = xContentType; + } + + /** + * @param watchSource instead of using an existing watch use this non persisted watch + */ + public void setWatchSource(WatchSourceBuilder watchSource) { + this.watchSource = watchSource.buildAsBytes(XContentType.JSON); + this.xContentType = XContentType.JSON; + } + + /** + * @return the execution modes for the actions. These modes determine the nature of the execution + * of the watch actions while the watch is executing. + */ + public Map getActionModes() { + return actionModes; + } + + /** + * Sets the action execution mode for the give action (identified by its id). + * + * @param actionId the action id. + * @param actionMode the execution mode of the action. + */ + public void setActionMode(String actionId, ActionExecutionMode actionMode) { + actionModes.put(actionId, actionMode); + } + + /** + * @return whether the watch should execute in debug mode. In debug mode the execution {@code vars} + * will be returned as part of the watch record. + */ + public boolean isDebug() { + return debug; + } + + /** + * @param debug indicates whether the watch should execute in debug mode. In debug mode the + * returned watch record will hold the execution {@code vars} + */ + public void setDebug(boolean debug) { + this.debug = debug; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (id == null && watchSource == null){ + validationException = ValidateActions.addValidationError("a watch execution request must either have a watch id or an inline " + + "watch source, but both are missing", validationException); + } + if (id != null && WatcherUtils.isValidId(id) == false) { + validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); + } + for (String actionId : actionModes.keySet()) { + if (actionId == null) { + validationException = ValidateActions.addValidationError( + String.format(Locale.ROOT, "action id may not be null"), validationException); + } else if (WatcherUtils.isValidId(actionId) == false) { + validationException = ValidateActions.addValidationError( + String.format(Locale.ROOT, "action id [%s] contains whitespace", actionId), validationException); + } + } + if (watchSource != null && id != null) { + validationException = ValidateActions.addValidationError("a watch execution request must either have a watch id or an inline " + + "watch source but not both", validationException); + } + if (watchSource != null && recordExecution) { + validationException = ValidateActions.addValidationError("the execution of an inline watch cannot be recorded", + validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public String toString() { + return "execute[" + id + "]"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequestBuilder.java new file mode 100644 index 0000000000000..ee1f94da3df0d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequestBuilder.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.execute; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; + +import java.io.IOException; +import java.util.Map; + +/** + * A execute watch action request builder. + */ +public class ExecuteWatchRequestBuilder extends ActionRequestBuilder { + + public ExecuteWatchRequestBuilder(ElasticsearchClient client) { + super(client, ExecuteWatchAction.INSTANCE, new ExecuteWatchRequest()); + } + + public ExecuteWatchRequestBuilder(ElasticsearchClient client, String watchName) { + super(client, ExecuteWatchAction.INSTANCE, new ExecuteWatchRequest(watchName)); + } + + /** + * Sets the id of the watch to be executed + */ + public ExecuteWatchRequestBuilder setId(String id) { + this.request().setId(id); + return this; + } + + /** + * @param ignoreCondition set if the condition for this execution be ignored + */ + public ExecuteWatchRequestBuilder setIgnoreCondition(boolean ignoreCondition) { + request.setIgnoreCondition(ignoreCondition); + return this; + } + + /** + * @param recordExecution Sets if this execution be recorded in the history index and reflected in the watch + */ + public ExecuteWatchRequestBuilder setRecordExecution(boolean recordExecution) { + request.setRecordExecution(recordExecution); + return this; + } + + /** + * @param alternativeInput Set's the alernative input + */ + public ExecuteWatchRequestBuilder setAlternativeInput(Map alternativeInput) { + request.setAlternativeInput(alternativeInput); + return this; + } + + /** + * @param data The data that should be associated with the trigger event + */ + public ExecuteWatchRequestBuilder setTriggerData(Map data) throws IOException { + request.setTriggerData(data); + return this; + } + + /** + * @param triggerEvent the trigger event to use + */ + public ExecuteWatchRequestBuilder setTriggerEvent(TriggerEvent triggerEvent) throws IOException { + request.setTriggerEvent(triggerEvent); + return this; + } + + /** + * @param watchSource instead of using an existing watch use this non persisted watch + */ + public ExecuteWatchRequestBuilder setWatchSource(BytesReference watchSource, XContentType xContentType) { + request.setWatchSource(watchSource, xContentType); + return this; + } + + /** + * @param watchSource instead of using an existing watch use this non persisted watch + */ + public ExecuteWatchRequestBuilder setWatchSource(WatchSourceBuilder watchSource) { + request.setWatchSource(watchSource); + return this; + } + + /** + * Sets the mode in which the given action (identified by its id) will be handled. + * + * @param actionId The id of the action + * @param actionMode The mode in which the action will be handled in the execution + */ + public ExecuteWatchRequestBuilder setActionMode(String actionId, ActionExecutionMode actionMode) { + request.setActionMode(actionId, actionMode); + return this; + } + + /** + * @param debug indicates whether the watch should execute in debug mode. In debug mode the + * returned watch record will hold the execution {@code vars} + */ + public ExecuteWatchRequestBuilder setDebug(boolean debug) { + request.setDebug(debug); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchResponse.java new file mode 100644 index 0000000000000..c0eac9b3c61d2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchResponse.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.execute; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; + +import java.io.IOException; + +/** + * This class contains the WatchHistory generated by running the watch + */ +public class ExecuteWatchResponse extends ActionResponse { + + private String recordId; + private XContentSource recordSource; + + public ExecuteWatchResponse() { + } + + public ExecuteWatchResponse(String recordId, BytesReference recordSource, XContentType contentType) { + this.recordId = recordId; + this.recordSource = new XContentSource(recordSource, contentType); + } + + /** + * @return The id of the watch record holding the watch execution result. + */ + public String getRecordId() { + return recordId; + } + + /** + * @return The watch record source + */ + public XContentSource getRecordSource() { + return recordSource; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + recordId = in.readString(); + recordSource = XContentSource.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(recordId); + XContentSource.writeTo(recordSource, out); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchAction.java new file mode 100644 index 0000000000000..a68223c6f6c53 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchAction.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.get; + +import org.elasticsearch.client.ElasticsearchClient; + +/** + * This action gets an watch by name + */ +public class GetWatchAction extends org.elasticsearch.action.Action { + + public static final GetWatchAction INSTANCE = new GetWatchAction(); + public static final String NAME = "cluster:monitor/xpack/watcher/watch/get"; + + private GetWatchAction() { + super(NAME); + } + + @Override + public GetWatchResponse newResponse() { + return new GetWatchResponse(); + } + + @Override + public GetWatchRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new GetWatchRequestBuilder(client); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchRequest.java new file mode 100644 index 0000000000000..a2e7dc171a750 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchRequest.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.get; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.core.watcher.support.WatcherUtils; + +import java.io.IOException; + +/** + * The request to get the watch by name (id) + */ +public class GetWatchRequest extends ActionRequest { + + private String id; + + public GetWatchRequest() { + } + + /** + * @param id name (id) of the watch to retrieve + */ + public GetWatchRequest(String id) { + this.id = id; + } + + public GetWatchRequest(StreamInput in) throws IOException { + super(in); + id = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + } + + GetWatchRequest setId(String id) { + this.id = id; + return this; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (id == null) { + validationException = ValidateActions.addValidationError("watch id is missing", validationException); + } else if (WatcherUtils.isValidId(id) == false) { + validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); + } + + return validationException; + } + + + /** + * @return The name of the watch to retrieve + */ + public String getId() { + return id; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public String toString() { + return "get [" + id +"]"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchRequestBuilder.java new file mode 100644 index 0000000000000..178cebcaf7262 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchRequestBuilder.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.get; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * A delete document action request builder. + */ +public class GetWatchRequestBuilder extends ActionRequestBuilder { + + public GetWatchRequestBuilder(ElasticsearchClient client, String id) { + super(client, GetWatchAction.INSTANCE, new GetWatchRequest(id)); + } + + + public GetWatchRequestBuilder(ElasticsearchClient client) { + super(client, GetWatchAction.INSTANCE, new GetWatchRequest()); + } + + public GetWatchRequestBuilder setId(String id) { + request.setId(id); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java new file mode 100644 index 0000000000000..ad1ed234a6ede --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/get/GetWatchResponse.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.get; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; + +import java.io.IOException; + +public class GetWatchResponse extends ActionResponse { + + private String id; + private WatchStatus status; + private boolean found = false; + private XContentSource source; + private long version; + + public GetWatchResponse() { + } + + /** + * ctor for missing watch + */ + public GetWatchResponse(String id) { + this.id = id; + this.found = false; + this.source = null; + version = Versions.NOT_FOUND; + } + + /** + * ctor for found watch + */ + public GetWatchResponse(String id, long version, WatchStatus status, BytesReference source, XContentType contentType) { + this.id = id; + this.status = status; + this.found = true; + this.source = new XContentSource(source, contentType); + this.version = version; + } + + public String getId() { + return id; + } + + public WatchStatus getStatus() { + return status; + } + + public boolean isFound() { + return found; + } + + public XContentSource getSource() { + return source; + } + + public long getVersion() { + return version; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + found = in.readBoolean(); + if (found) { + status = WatchStatus.read(in); + source = XContentSource.readFrom(in); + version = in.readZLong(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeBoolean(found); + if (found) { + status.writeTo(out); + XContentSource.writeTo(source, out); + out.writeZLong(version); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchAction.java new file mode 100644 index 0000000000000..9b0024ee43bd6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.put; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * This action puts an watch into the watch index and adds it to the scheduler + */ +public class PutWatchAction extends Action { + + public static final PutWatchAction INSTANCE = new PutWatchAction(); + public static final String NAME = "cluster:admin/xpack/watcher/watch/put"; + + private PutWatchAction() { + super(NAME); + } + + @Override + public PutWatchResponse newResponse() { + return new PutWatchResponse(); + } + + @Override + public PutWatchRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new PutWatchRequestBuilder(client); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchRequest.java new file mode 100644 index 0000000000000..1985602d4df30 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchRequest.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.put; + + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.support.WatcherUtils; + +import java.io.IOException; + +/** + * This request class contains the data needed to create a watch along with the name of the watch. + * The name of the watch will become the ID of the indexed document. + */ +public class PutWatchRequest extends ActionRequest { + + private String id; + private BytesReference source; + private boolean active = true; + private XContentType xContentType = XContentType.JSON; + private long version = Versions.MATCH_ANY; + + public PutWatchRequest() { + } + + public PutWatchRequest(String id, BytesReference source, XContentType xContentType) { + this.id = id; + this.source = source; + this.xContentType = xContentType; + } + + public PutWatchRequest(StreamInput in) throws IOException { + super(in); + id = in.readString(); + source = in.readBytesReference(); + active = in.readBoolean(); + xContentType = in.readEnum(XContentType.class); + version = in.readZLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeBytesReference(source); + out.writeBoolean(active); + out.writeEnum(xContentType); + out.writeZLong(version); + } + + /** + * @return The name that will be the ID of the indexed document + */ + public String getId() { + return id; + } + + /** + * Set the watch name + */ + public void setId(String id) { + this.id = id; + } + + /** + * @return The source of the watch + */ + public BytesReference getSource() { + return source; + } + + /** + * Set the source of the watch + */ + public void setSource(WatchSourceBuilder source) { + setSource(source.buildAsBytes(XContentType.JSON), XContentType.JSON); + } + + /** + * Set the source of the watch + */ + public void setSource(BytesReference source, XContentType xContentType) { + this.source = source; + this.xContentType = xContentType; + } + + /** + * @return The initial active state of the watch (defaults to {@code true}, e.g. "active") + */ + public boolean isActive() { + return active; + } + + /** + * Sets the initial active state of the watch + */ + public void setActive(boolean active) { + this.active = active; + } + + /** + * Get the content type for the source + */ + public XContentType xContentType() { + return xContentType; + } + + public long getVersion() { + return version; + } + + public void setVersion(long version) { + this.version = version; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (id == null) { + validationException = ValidateActions.addValidationError("watch id is missing", validationException); + } else if (WatcherUtils.isValidId(id) == false) { + validationException = ValidateActions.addValidationError("watch id contains whitespace", validationException); + } + if (source == null) { + validationException = ValidateActions.addValidationError("watch source is missing", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchRequestBuilder.java new file mode 100644 index 0000000000000..3d9ad2a01ac5c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchRequestBuilder.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.put; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; + +public class PutWatchRequestBuilder extends ActionRequestBuilder { + + public PutWatchRequestBuilder(ElasticsearchClient client) { + super(client, PutWatchAction.INSTANCE, new PutWatchRequest()); + } + + public PutWatchRequestBuilder(ElasticsearchClient client, String id) { + super(client, PutWatchAction.INSTANCE, new PutWatchRequest()); + request.setId(id); + } + + /** + * @param id The watch id to be created + */ + public PutWatchRequestBuilder setId(String id){ + request.setId(id); + return this; + } + + /** + * @param source the source of the watch to be created + * @param xContentType the content type of the source + */ + public PutWatchRequestBuilder setSource(BytesReference source, XContentType xContentType) { + request.setSource(source, xContentType); + return this; + } + + /** + * @param source the source of the watch to be created + */ + public PutWatchRequestBuilder setSource(WatchSourceBuilder source) { + request.setSource(source); + return this; + } + + /** + * @param active Sets whether the watcher is in/active by default + */ + public PutWatchRequestBuilder setActive(boolean active) { + request.setActive(active); + return this; + } + + /** + * @param version Sets the version to be set when running the update + */ + public PutWatchRequestBuilder setVersion(long version) { + request.setVersion(version); + return this; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchResponse.java new file mode 100644 index 0000000000000..b84901ecac163 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/PutWatchResponse.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.put; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * The Response for a put watch action + */ +public class PutWatchResponse extends ActionResponse { + + private String id; + private long version; + private boolean created; + + public PutWatchResponse() { + } + + public PutWatchResponse(String id, long version, boolean created) { + this.id = id; + this.version = version; + this.created = created; + } + + public String getId() { + return id; + } + + public long getVersion() { + return version; + } + + public boolean isCreated() { + return created; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + out.writeVLong(version); + out.writeBoolean(created); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + id = in.readString(); + version = in.readVLong(); + created = in.readBoolean(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceAction.java new file mode 100644 index 0000000000000..05cddee273c73 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceAction.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.service; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + + +public class WatcherServiceAction extends Action { + + public static final WatcherServiceAction INSTANCE = new WatcherServiceAction(); + public static final String NAME = "cluster:admin/xpack/watcher/service"; + + private WatcherServiceAction() { + super(NAME); + } + + @Override + public WatcherServiceResponse newResponse() { + return new WatcherServiceResponse(); + } + + @Override + public WatcherServiceRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new WatcherServiceRequestBuilder(client); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequest.java new file mode 100644 index 0000000000000..64fc7abc2ae1b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequest.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.service; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Locale; + +public class WatcherServiceRequest extends MasterNodeRequest { + + public enum Command { START, STOP } + + private Command command; + + public WatcherServiceRequest() { + } + + /** + * Starts the watcher service if not already started. + */ + public WatcherServiceRequest start() { + command = Command.START; + return this; + } + + /** + * Stops the watcher service if not already stopped. + */ + public WatcherServiceRequest stop() { + command = Command.STOP; + return this; + } + + public Command getCommand() { + return command; + } + + @Override + public ActionRequestValidationException validate() { + if (command == null) { + return ValidateActions.addValidationError("no command specified", null); + } else { + return null; + } + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + command = Command.valueOf(in.readString().toUpperCase(Locale.ROOT)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(command.name().toLowerCase(Locale.ROOT)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequestBuilder.java new file mode 100644 index 0000000000000..b43d02aac83f8 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequestBuilder.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.service; + +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class WatcherServiceRequestBuilder extends MasterNodeOperationRequestBuilder { + + public WatcherServiceRequestBuilder(ElasticsearchClient client) { + super(client, WatcherServiceAction.INSTANCE, new WatcherServiceRequest()); + } + + /** + * Starts watcher if not already started. + */ + public WatcherServiceRequestBuilder start() { + request.start(); + return this; + } + + /** + * Stops watcher if not already stopped. + */ + public WatcherServiceRequestBuilder stop() { + request.stop(); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceResponse.java new file mode 100644 index 0000000000000..61ac4435f89e6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceResponse.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.service; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class WatcherServiceResponse extends AcknowledgedResponse { + + public WatcherServiceResponse() { + } + + public WatcherServiceResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsAction.java new file mode 100644 index 0000000000000..0c111090f5413 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsAction.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.stats; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * This Action gets the stats for the watcher plugin + */ +public class WatcherStatsAction extends Action { + + public static final WatcherStatsAction INSTANCE = new WatcherStatsAction(); + public static final String NAME = "cluster:monitor/xpack/watcher/stats/dist"; + + private WatcherStatsAction() { + super(NAME); + } + + @Override + public WatcherStatsResponse newResponse() { + return new WatcherStatsResponse(); + } + + @Override + public WatcherStatsRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new WatcherStatsRequestBuilder(client); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsRequest.java new file mode 100644 index 0000000000000..4fdbd2ac407ce --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsRequest.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.stats; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.nodes.BaseNodeRequest; +import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * The Request to get the watcher stats + */ +public class WatcherStatsRequest extends BaseNodesRequest { + + private boolean includeCurrentWatches; + private boolean includeQueuedWatches; + private boolean includeStats; + + public WatcherStatsRequest() { + } + + public boolean includeCurrentWatches() { + return includeCurrentWatches; + } + + public void includeCurrentWatches(boolean currentWatches) { + this.includeCurrentWatches = currentWatches; + } + + public boolean includeQueuedWatches() { + return includeQueuedWatches; + } + + public void includeQueuedWatches(boolean includeQueuedWatches) { + this.includeQueuedWatches = includeQueuedWatches; + } + + public boolean includeStats() { + return includeStats; + } + + public void includeStats(boolean includeStats) { + this.includeStats = includeStats; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + includeCurrentWatches = in.readBoolean(); + includeQueuedWatches = in.readBoolean(); + includeStats = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(includeCurrentWatches); + out.writeBoolean(includeQueuedWatches); + out.writeBoolean(includeStats); + } + + @Override + public String toString() { + return "watcher_stats"; + } + + public static class Node extends BaseNodeRequest { + + private boolean includeCurrentWatches; + private boolean includeQueuedWatches; + private boolean includeStats; + + public Node() {} + + public Node(WatcherStatsRequest request, String nodeId) { + super(nodeId); + includeCurrentWatches = request.includeCurrentWatches(); + includeQueuedWatches = request.includeQueuedWatches(); + includeStats = request.includeStats(); + } + + public boolean includeCurrentWatches() { + return includeCurrentWatches; + } + + public boolean includeQueuedWatches() { + return includeQueuedWatches; + } + + public boolean includeStats() { + return includeStats; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + includeCurrentWatches = in.readBoolean(); + includeQueuedWatches = in.readBoolean(); + includeStats = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(includeCurrentWatches); + out.writeBoolean(includeQueuedWatches); + out.writeBoolean(includeStats); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsRequestBuilder.java new file mode 100644 index 0000000000000..d2ca4e98db648 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsRequestBuilder.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.stats; + +import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Watcher stats request builder. + */ +public class WatcherStatsRequestBuilder extends NodesOperationRequestBuilder { + + public WatcherStatsRequestBuilder(ElasticsearchClient client) { + super(client, WatcherStatsAction.INSTANCE, new WatcherStatsRequest()); + } + + public WatcherStatsRequestBuilder setIncludeCurrentWatches(boolean includeCurrentWatches) { + request().includeCurrentWatches(includeCurrentWatches); + return this; + } + + public WatcherStatsRequestBuilder setIncludeQueuedWatches(boolean includeQueuedWatches) { + request().includeQueuedWatches(includeQueuedWatches); + return this; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsResponse.java new file mode 100644 index 0000000000000..13b3ddb60b290 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsResponse.java @@ -0,0 +1,258 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.transport.actions.stats; + +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.nodes.BaseNodeResponse; +import org.elasticsearch.action.support.nodes.BaseNodesResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.WatcherMetaData; +import org.elasticsearch.xpack.core.watcher.WatcherState; +import org.elasticsearch.xpack.core.watcher.common.stats.Counters; +import org.elasticsearch.xpack.core.watcher.execution.QueuedWatch; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionSnapshot; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; + +public class WatcherStatsResponse extends BaseNodesResponse + implements ToXContentObject { + + private WatcherMetaData watcherMetaData; + + public WatcherStatsResponse() { + } + + public WatcherStatsResponse(ClusterName clusterName, WatcherMetaData watcherMetaData, + List nodes, List failures) { + super(clusterName, nodes, failures); + this.watcherMetaData = watcherMetaData; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(watcherMetaData.manuallyStopped()); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + watcherMetaData = new WatcherMetaData(in.readBoolean()); + } + + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(Node::readNodeResponse); + } + + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeStreamableList(nodes); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + watcherMetaData.toXContent(builder, params); + builder.startArray("stats"); + for (Node node : getNodes()) { + node.toXContent(builder, params); + } + builder.endArray(); + + return builder; + } + + /** + * Sum all watches across all nodes to get a total count of watches in the cluster + * + * @return The sum of all watches being executed + */ + public long getWatchesCount() { + return getNodes().stream().mapToLong(WatcherStatsResponse.Node::getWatchesCount).sum(); + } + + public WatcherMetaData watcherMetaData() { + return watcherMetaData; + } + + public static class Node extends BaseNodeResponse implements ToXContentObject { + + private long watchesCount; + private WatcherState watcherState; + private long threadPoolQueueSize; + private long threadPoolMaxSize; + private List snapshots; + private List queuedWatches; + private Counters stats; + + public Node() { + } + + public Node(DiscoveryNode node) { + super(node); + } + + /** + * @return The current execution thread pool queue size + */ + public long getThreadPoolQueueSize() { + return threadPoolQueueSize; + } + + public void setThreadPoolQueueSize(long threadPoolQueueSize) { + this.threadPoolQueueSize = threadPoolQueueSize; + } + + /** + * @return The max number of threads in the execution thread pool + */ + public long getThreadPoolMaxSize() { + return threadPoolMaxSize; + } + + public void setThreadPoolMaxSize(long threadPoolMaxSize) { + this.threadPoolMaxSize = threadPoolMaxSize; + } + + /** + * @return The number of watches currently registered in the system + */ + public long getWatchesCount() { + return watchesCount; + } + + public void setWatchesCount(long watchesCount) { + this.watchesCount = watchesCount; + } + + /** + * @return The state of the watch service. + */ + public WatcherState getWatcherState() { + return watcherState; + } + + public void setWatcherState(WatcherState watcherServiceState) { + this.watcherState = watcherServiceState; + } + + @Nullable + public List getSnapshots() { + return snapshots; + } + + public void setSnapshots(List snapshots) { + this.snapshots = snapshots; + } + + @Nullable + public List getQueuedWatches() { + return queuedWatches; + } + + public void setQueuedWatches(List queuedWatches) { + this.queuedWatches = queuedWatches; + } + + public Counters getStats() { + return stats; + } + + public void setStats(Counters stats) { + this.stats = stats; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + watchesCount = in.readLong(); + threadPoolQueueSize = in.readLong(); + threadPoolMaxSize = in.readLong(); + watcherState = WatcherState.fromId(in.readByte()); + + if (in.readBoolean()) { + snapshots = in.readStreamableList(WatchExecutionSnapshot::new); + } + if (in.readBoolean()) { + queuedWatches = in.readStreamableList(QueuedWatch::new); + } + if (in.readBoolean()) { + stats = Counters.read(in); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeLong(watchesCount); + out.writeLong(threadPoolQueueSize); + out.writeLong(threadPoolMaxSize); + out.writeByte(watcherState.getId()); + + out.writeBoolean(snapshots != null); + if (snapshots != null) { + out.writeStreamableList(snapshots); + } + out.writeBoolean(queuedWatches != null); + if (queuedWatches != null) { + out.writeStreamableList(queuedWatches); + } + out.writeBoolean(stats != null); + if (stats != null) { + stats.writeTo(out); + } + } + + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) + throws IOException { + builder.startObject(); + builder.field("node_id", getNode().getId()); + builder.field("watcher_state", watcherState.toString().toLowerCase(Locale.ROOT)); + builder.field("watch_count", watchesCount); + builder.startObject("execution_thread_pool"); + builder.field("queue_size", threadPoolQueueSize); + builder.field("max_size", threadPoolMaxSize); + builder.endObject(); + + if (snapshots != null) { + builder.startArray("current_watches"); + for (WatchExecutionSnapshot snapshot : snapshots) { + snapshot.toXContent(builder, params); + } + builder.endArray(); + } + if (queuedWatches != null) { + builder.startArray("queued_watches"); + for (QueuedWatch queuedWatch : queuedWatches) { + queuedWatch.toXContent(builder, params); + } + builder.endArray(); + } + if (stats != null && stats.hasCounters()) { + builder.field("stats", stats.toNestedMap()); + } + builder.endObject(); + return builder; + } + + static WatcherStatsResponse.Node readNodeResponse(StreamInput in) + throws IOException { + WatcherStatsResponse.Node node = new WatcherStatsResponse.Node(); + node.readFrom(in); + return node; + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/trigger/Trigger.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/trigger/Trigger.java new file mode 100644 index 0000000000000..d7fbeb5ec6c7f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/trigger/Trigger.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.trigger; + +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public interface Trigger extends ToXContentObject { + + String type(); + + interface Parser { + + String type(); + + T parse(XContentParser parser) throws IOException; + } + + interface Builder { + + T build(); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/trigger/TriggerEvent.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/trigger/TriggerEvent.java new file mode 100644 index 0000000000000..1dfac8a8fcb97 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/trigger/TriggerEvent.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.trigger; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public abstract class TriggerEvent implements ToXContentObject { + + private final String jobName; + protected final DateTime triggeredTime; + protected final Map data; + + public TriggerEvent(String jobName, DateTime triggeredTime) { + this.jobName = jobName; + this.triggeredTime = triggeredTime; + this.data = new HashMap<>(); + data.put(Field.TRIGGERED_TIME.getPreferredName(), triggeredTime); + } + + public String jobName() { + return jobName; + } + + public abstract String type(); + + public DateTime triggeredTime() { + return triggeredTime; + } + + public final Map data() { + return data; + } + + @Override + public String toString() { + return new StringBuilder("[") + .append("name=[").append(jobName).append("],") + .append("triggered_time=[").append(triggeredTime).append("],") + .append("data=[").append(data).append("]") + .append("]") + .toString(); + } + + public void recordXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Field.TYPE.getPreferredName(), type()); + WatcherDateTimeUtils.writeDate(Field.TRIGGERED_TIME.getPreferredName(), builder, triggeredTime); + recordDataXContent(builder, params); + builder.endObject(); + } + + public abstract void recordDataXContent(XContentBuilder builder, Params params) throws IOException; + + protected interface Field { + ParseField TYPE = new ParseField("type"); + ParseField TRIGGERED_TIME = new ParseField("triggered_time"); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Payload.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Payload.java new file mode 100644 index 0000000000000..7ef57c37f3046 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Payload.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.watch; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.core.watcher.support.WatcherUtils.responseToData; + +public interface Payload extends ToXContentObject { + + Simple EMPTY = new Simple(Collections.emptyMap()); + + Map data(); + + class Simple implements Payload { + + private final Map data; + + public Simple() { + this(new HashMap<>()); + } + + public Simple(String key, Object value) { + this(new MapBuilder().put(key, value).map()); + } + + public Simple(Map data) { + this.data = data; + } + + @Override + public Map data() { + return data; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.map(data); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Simple simple = (Simple) o; + + if (!data.equals(simple.data)) return false; + + return true; + } + + @Override + public int hashCode() { + return data.hashCode(); + } + + @Override + public String toString() { + return "simple[" + Objects.toString(data) + "]"; + } + } + + class XContent extends Simple { + public XContent(ToXContentObject response) throws IOException { + super(responseToData(response)); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java new file mode 100644 index 0000000000000..75034752c3cc6 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.watch; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.trigger.Trigger; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class Watch implements ToXContentObject { + + public static final String INCLUDE_STATUS_KEY = "include_status"; + public static final String INDEX = ".watches"; + public static final String DOC_TYPE = "doc"; + + private final String id; + private final Trigger trigger; + private final ExecutableInput input; + private final ExecutableCondition condition; + @Nullable private final ExecutableTransform transform; + private final List actions; + @Nullable private final TimeValue throttlePeriod; + @Nullable private final Map metadata; + private final WatchStatus status; + + private transient long version; + + public Watch(String id, Trigger trigger, ExecutableInput input, ExecutableCondition condition, @Nullable ExecutableTransform transform, + @Nullable TimeValue throttlePeriod, List actions, @Nullable Map metadata, + WatchStatus status, long version) { + this.id = id; + this.trigger = trigger; + this.input = input; + this.condition = condition; + this.transform = transform; + this.actions = actions; + this.throttlePeriod = throttlePeriod; + this.metadata = metadata; + this.status = status; + this.version = version; + } + + public String id() { + return id; + } + + public Trigger trigger() { + return trigger; + } + + public ExecutableInput input() { return input;} + + public ExecutableCondition condition() { + return condition; + } + + public ExecutableTransform transform() { + return transform; + } + + public TimeValue throttlePeriod() { + return throttlePeriod; + } + + public List actions() { + return actions; + } + + public Map metadata() { + return metadata; + } + + public WatchStatus status() { + return status; + } + + public long version() { + return version; + } + + public void version(long version) { + this.version = version; + } + + /** + * Sets the state of this watch to in/active + * + * @return {@code true} if the status of this watch changed, {@code false} otherwise. + */ + public boolean setState(boolean active, DateTime now) { + return status.setActive(active, now); + } + + /** + * Acks this watch. + * + * @return {@code true} if the status of this watch changed, {@code false} otherwise. + */ + public boolean ack(DateTime now, String... actions) { + return status.onAck(now, actions); + } + + public boolean acked(String actionId) { + ActionStatus actionStatus = status.actionStatus(actionId); + return actionStatus.ackStatus().state() == ActionStatus.AckStatus.State.ACKED; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Watch watch = (Watch) o; + return watch.id.equals(id); + } + + @Override + public int hashCode() { + return id.hashCode(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(WatchField.TRIGGER.getPreferredName()).startObject().field(trigger.type(), trigger, params).endObject(); + builder.field(WatchField.INPUT.getPreferredName()).startObject().field(input.type(), input, params).endObject(); + builder.field(WatchField.CONDITION.getPreferredName()).startObject().field(condition.type(), condition, params).endObject(); + if (transform != null) { + builder.field(WatchField.TRANSFORM.getPreferredName()).startObject().field(transform.type(), transform, params).endObject(); + } + if (throttlePeriod != null) { + builder.humanReadableField(WatchField.THROTTLE_PERIOD.getPreferredName(), + WatchField.THROTTLE_PERIOD_HUMAN.getPreferredName(), throttlePeriod); + } + builder.startObject(WatchField.ACTIONS.getPreferredName()); + for (ActionWrapper action : actions) { + builder.field(action.id(), action, params); + } + builder.endObject(); + if (metadata != null) { + builder.field(WatchField.METADATA.getPreferredName(), metadata); + } + if (params.paramAsBoolean(INCLUDE_STATUS_KEY, false)) { + builder.field(WatchField.STATUS.getPreferredName(), status, params); + } + builder.endObject(); + return builder; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchField.java new file mode 100644 index 0000000000000..dbc3ce76c9517 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchField.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.watch; + +import org.elasticsearch.common.ParseField; + +public final class WatchField { + public static final ParseField TRIGGER = new ParseField("trigger"); + public static final ParseField INPUT = new ParseField("input"); + public static final ParseField CONDITION = new ParseField("condition"); + public static final ParseField ACTIONS = new ParseField("actions"); + public static final ParseField TRANSFORM = new ParseField("transform"); + public static final ParseField THROTTLE_PERIOD = new ParseField("throttle_period_in_millis"); + public static final ParseField THROTTLE_PERIOD_HUMAN = new ParseField("throttle_period"); + public static final ParseField METADATA = new ParseField("metadata"); + public static final ParseField STATUS = new ParseField("status"); + public static final ParseField VERSION = new ParseField("_version"); + public static final String ALL_ACTIONS_ID = "_all"; + + private WatchField() {} +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java new file mode 100644 index 0000000000000..69d114bd2b045 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java @@ -0,0 +1,415 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.watcher.watch; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherXContentParser; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.parseDate; +import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.readDate; +import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.readOptionalDate; +import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.writeDate; +import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.writeOptionalDate; +import static org.joda.time.DateTimeZone.UTC; + +public class WatchStatus implements ToXContentObject, Streamable { + + public static final String INCLUDE_STATE = "include_state"; + + private State state; + + @Nullable private ExecutionState executionState; + @Nullable private DateTime lastChecked; + @Nullable private DateTime lastMetCondition; + @Nullable private long version; + @Nullable private Map headers; + private Map actions; + + // for serialization + private WatchStatus() { + } + + public WatchStatus(DateTime now, Map actions) { + this(-1, new State(true, now), null, null, null, actions, Collections.emptyMap()); + } + + private WatchStatus(long version, State state, ExecutionState executionState, DateTime lastChecked, DateTime lastMetCondition, + Map actions, Map headers) { + this.version = version; + this.lastChecked = lastChecked; + this.lastMetCondition = lastMetCondition; + this.actions = actions; + this.state = state; + this.executionState = executionState; + this.headers = headers; + } + + public State state() { + return state; + } + + public boolean checked() { + return lastChecked != null; + } + + public DateTime lastChecked() { + return lastChecked; + } + + public ActionStatus actionStatus(String actionId) { + return actions.get(actionId); + } + + public long version() { + return version; + } + + public void version(long version) { + this.version = version; + } + + public void setExecutionState(ExecutionState executionState) { + this.executionState = executionState; + } + + public ExecutionState getExecutionState() { + return executionState; + } + + public Map getHeaders() { + return headers; + } + + public void setHeaders(Map headers) { + this.headers = headers; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + WatchStatus that = (WatchStatus) o; + + return Objects.equals(lastChecked, that.lastChecked) && + Objects.equals(lastMetCondition, that.lastMetCondition) && + Objects.equals(version, that.version) && + Objects.equals(executionState, that.executionState) && + Objects.equals(actions, that.actions) && + Objects.equals(headers, that.headers); + } + + @Override + public int hashCode() { + return Objects.hash(lastChecked, lastMetCondition, actions, version, executionState, headers); + } + + /** + * Called whenever an watch is checked, ie. the condition of the watch is evaluated to see if + * the watch should be executed. + * + * @param metCondition indicates whether the watch's condition was met. + */ + public void onCheck(boolean metCondition, DateTime timestamp) { + lastChecked = timestamp; + if (metCondition) { + lastMetCondition = timestamp; + } else { + for (ActionStatus status : actions.values()) { + status.resetAckStatus(timestamp); + } + } + } + + public void onActionResult(String actionId, DateTime timestamp, Action.Result result) { + ActionStatus status = actions.get(actionId); + status.update(timestamp, result); + } + + /** + * Notifies this status that the givne actions were acked. If the current state of one of these actions is + * {@link ActionStatus.AckStatus.State#ACKABLE ACKABLE}, + * then we'll it'll change to {@link ActionStatus.AckStatus.State#ACKED ACKED} + * (when set to {@link ActionStatus.AckStatus.State#ACKED ACKED}, the AckThrottler + * will throttle the execution of the action. + * + * @return {@code true} if the state of changed due to the ack, {@code false} otherwise. + */ + boolean onAck(DateTime timestamp, String... actionIds) { + boolean changed = false; + boolean containsAll = false; + for (String actionId : actionIds) { + if (actionId.equals(WatchField.ALL_ACTIONS_ID)) { + containsAll = true; + break; + } + } + if (containsAll) { + for (ActionStatus status : actions.values()) { + changed |= status.onAck(timestamp); + } + return changed; + } + + for (String actionId : actionIds) { + ActionStatus status = actions.get(actionId); + if (status != null) { + changed |= status.onAck(timestamp); + } + } + + return changed; + } + + boolean setActive(boolean active, DateTime now) { + boolean change = this.state.active != active; + if (change) { + this.state = new State(active, now); + } + return change; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(version); + writeOptionalDate(out, lastChecked); + writeOptionalDate(out, lastMetCondition); + out.writeInt(actions.size()); + for (Map.Entry entry : actions.entrySet()) { + out.writeString(entry.getKey()); + ActionStatus.writeTo(entry.getValue(), out); + } + out.writeBoolean(state.active); + writeDate(out, state.timestamp); + out.writeBoolean(executionState != null); + if (executionState != null) { + out.writeString(executionState.id()); + } + boolean statusHasHeaders = headers != null && headers.isEmpty() == false; + out.writeBoolean(statusHasHeaders); + if (statusHasHeaders) { + out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + } + } + + @Override + public void readFrom(StreamInput in) throws IOException { + version = in.readLong(); + lastChecked = readOptionalDate(in, UTC); + lastMetCondition = readOptionalDate(in, UTC); + int count = in.readInt(); + Map actions = new HashMap<>(count); + for (int i = 0; i < count; i++) { + actions.put(in.readString(), ActionStatus.readFrom(in)); + } + this.actions = unmodifiableMap(actions); + state = new State(in.readBoolean(), readDate(in, UTC)); + boolean executionStateExists = in.readBoolean(); + if (executionStateExists) { + executionState = ExecutionState.resolve(in.readString()); + } + if (in.readBoolean()) { + headers = in.readMap(StreamInput::readString, StreamInput::readString); + } + } + + public static WatchStatus read(StreamInput in) throws IOException { + WatchStatus status = new WatchStatus(); + status.readFrom(in); + return status; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (params.paramAsBoolean(INCLUDE_STATE, true)) { + builder.field(Field.STATE.getPreferredName(), state, params); + } + if (lastChecked != null) { + builder.timeField(Field.LAST_CHECKED.getPreferredName(), lastChecked); + } + if (lastMetCondition != null) { + builder.timeField(Field.LAST_MET_CONDITION.getPreferredName(), lastMetCondition); + } + if (actions != null) { + builder.startObject(Field.ACTIONS.getPreferredName()); + for (Map.Entry entry : actions.entrySet()) { + builder.field(entry.getKey(), entry.getValue(), params); + } + builder.endObject(); + } + if (executionState != null) { + builder.field(Field.EXECUTION_STATE.getPreferredName(), executionState.id()); + } + if (headers != null && headers.isEmpty() == false && WatcherParams.hideHeaders(params) == false) { + builder.field(Field.HEADERS.getPreferredName(), headers); + } + builder.field(Field.VERSION.getPreferredName(), version); + return builder.endObject(); + } + + public static WatchStatus parse(String watchId, WatcherXContentParser parser) throws IOException { + State state = null; + ExecutionState executionState = null; + DateTime lastChecked = null; + DateTime lastMetCondition = null; + Map actions = null; + long version = -1; + Map headers = Collections.emptyMap(); + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.STATE.match(currentFieldName, parser.getDeprecationHandler())) { + try { + state = State.parse(parser); + } catch (ElasticsearchParseException e) { + throw new ElasticsearchParseException("could not parse watch status for [{}]. failed to parse field [{}]", + e, watchId, currentFieldName); + } + } else if (Field.VERSION.match(currentFieldName, parser.getDeprecationHandler())) { + if (token.isValue()) { + version = parser.longValue(); + } else { + throw new ElasticsearchParseException("could not parse watch status for [{}]. expecting field [{}] to hold a long " + + "value, found [{}] instead", watchId, currentFieldName, token); + } + } else if (Field.LAST_CHECKED.match(currentFieldName, parser.getDeprecationHandler())) { + if (token.isValue()) { + lastChecked = parseDate(currentFieldName, parser, UTC); + } else { + throw new ElasticsearchParseException("could not parse watch status for [{}]. expecting field [{}] to hold a date " + + "value, found [{}] instead", watchId, currentFieldName, token); + } + } else if (Field.LAST_MET_CONDITION.match(currentFieldName, parser.getDeprecationHandler())) { + if (token.isValue()) { + lastMetCondition = parseDate(currentFieldName, parser, UTC); + } else { + throw new ElasticsearchParseException("could not parse watch status for [{}]. expecting field [{}] to hold a date " + + "value, found [{}] instead", watchId, currentFieldName, token); + } + } else if (Field.EXECUTION_STATE.match(currentFieldName, parser.getDeprecationHandler())) { + if (token.isValue()) { + executionState = ExecutionState.resolve(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse watch status for [{}]. expecting field [{}] to hold a string " + + "value, found [{}] instead", watchId, currentFieldName, token); + } + } else if (Field.ACTIONS.match(currentFieldName, parser.getDeprecationHandler())) { + actions = new HashMap<>(); + if (token == XContentParser.Token.START_OBJECT) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + ActionStatus actionStatus = ActionStatus.parse(watchId, currentFieldName, parser); + actions.put(currentFieldName, actionStatus); + } + } + } else { + throw new ElasticsearchParseException("could not parse watch status for [{}]. expecting field [{}] to be an object, " + + "found [{}] instead", watchId, currentFieldName, token); + } + } else if (Field.HEADERS.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.START_OBJECT) { + headers = parser.mapStrings(); + } + } + } + + // if the watch status doesn't have a state, we assume active + // this is to support old watches that weren't upgraded yet to + // contain the state + if (state == null) { + state = new State(true, parser.getParseDateTime()); + } + actions = actions == null ? emptyMap() : unmodifiableMap(actions); + + return new WatchStatus(version, state, executionState, lastChecked, lastMetCondition, actions, headers); + } + + public static class State implements ToXContentObject { + + final boolean active; + final DateTime timestamp; + + public State(boolean active, DateTime timestamp) { + this.active = active; + this.timestamp = timestamp; + } + + public boolean isActive() { + return active; + } + + public DateTime getTimestamp() { + return timestamp; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Field.ACTIVE.getPreferredName(), active); + writeDate(Field.TIMESTAMP.getPreferredName(), builder, timestamp); + return builder.endObject(); + } + + public static State parse(XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("expected an object but found [{}] instead", parser.currentToken()); + } + boolean active = true; + DateTime timestamp = DateTime.now(UTC); + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.ACTIVE.match(currentFieldName, parser.getDeprecationHandler())) { + active = parser.booleanValue(); + } else if (Field.TIMESTAMP.match(currentFieldName, parser.getDeprecationHandler())) { + timestamp = parseDate(currentFieldName, parser, UTC); + } + } + return new State(active, timestamp); + } + } + + public interface Field { + ParseField STATE = new ParseField("state"); + ParseField ACTIVE = new ParseField("active"); + ParseField TIMESTAMP = new ParseField("timestamp"); + ParseField LAST_CHECKED = new ParseField("last_checked"); + ParseField LAST_MET_CONDITION = new ParseField("last_met_condition"); + ParseField ACTIONS = new ParseField("actions"); + ParseField VERSION = new ParseField("version"); + ParseField EXECUTION_STATE = new ParseField("execution_state"); + ParseField HEADERS = new ParseField("headers"); + } +} diff --git a/x-pack/plugin/core/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/core/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..0cd7a32bcc47b --- /dev/null +++ b/x-pack/plugin/core/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,34 @@ +grant { + // bouncy castle + permission java.security.SecurityPermission "putProviderProperty.BC"; + + // needed for x-pack security extension + permission java.security.SecurityPermission "createPolicy.JavaPolicy"; + permission java.security.SecurityPermission "getPolicy"; + permission java.security.SecurityPermission "setPolicy"; + permission java.util.PropertyPermission "*", "read,write"; + + // needed for multiple server implementations used in tests + permission java.net.SocketPermission "*", "accept,connect"; +}; + +grant codeBase "${codebase.netty-common}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; +}; + +grant codeBase "${codebase.netty-transport}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 + // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! + permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; +}; + +grant codeBase "${codebase.elasticsearch-rest-client}" { + // rest client uses system properties which gets the default proxy + permission java.net.NetPermission "getProxySelector"; +}; + +grant codeBase "${codebase.httpasyncclient}" { + // rest client uses system properties which gets the default proxy + permission java.net.NetPermission "getProxySelector"; +}; diff --git a/x-pack/plugin/core/src/main/resources/logstash-index-template.json b/x-pack/plugin/core/src/main/resources/logstash-index-template.json new file mode 100644 index 0000000000000..1456b2d7b5e08 --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/logstash-index-template.json @@ -0,0 +1,50 @@ +{ + "index_patterns" : [ ".logstash" ], + "settings": { + "index": { + "number_of_shards": 1, + "auto_expand_replicas": "0-1", + "codec": "best_compression" + } + }, + "mappings" : { + "doc" : { + "_meta": { + "logstash-version": "${logstash.template.version}" + }, + "dynamic": "strict", + "properties":{ + "description":{ + "type":"text" + }, + "last_modified":{ + "type":"date" + }, + "pipeline_metadata":{ + "properties":{ + "version":{ + "type":"short" + }, + "type":{ + "type":"keyword" + } + } + }, + "pipeline":{ + "type":"text" + }, + "pipeline_settings": { + "dynamic": false, + "type": "object" + }, + "username":{ + "type":"keyword" + }, + "metadata":{ + "type":"object", + "dynamic":false + } + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/monitoring-alerts.json b/x-pack/plugin/core/src/main/resources/monitoring-alerts.json new file mode 100644 index 0000000000000..01186bca77db5 --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/monitoring-alerts.json @@ -0,0 +1,60 @@ +{ + "index_patterns": [ ".monitoring-alerts-${monitoring.template.version}" ], + "version": 7000001, + "settings": { + "index": { + "number_of_shards": 1, + "number_of_replicas": 0, + "auto_expand_replicas": "0-1", + "format": 6, + "codec": "best_compression" + } + }, + "mappings": { + "doc": { + "dynamic": false, + "properties": { + "timestamp": { + "type": "date" + }, + "update_timestamp": { + "type": "date" + }, + "resolved_timestamp": { + "type": "date" + }, + "prefix": { + "type": "text" + }, + "message": { + "type": "text" + }, + "suffix": { + "type": "text" + }, + "metadata": { + "properties": { + "cluster_uuid": { + "type": "keyword" + }, + "link": { + "type": "keyword" + }, + "severity": { + "type": "short" + }, + "type": { + "type": "keyword" + }, + "version": { + "type": "keyword" + }, + "watch": { + "type": "keyword" + } + } + } + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/monitoring-beats.json b/x-pack/plugin/core/src/main/resources/monitoring-beats.json new file mode 100644 index 0000000000000..68e6c06ad09f6 --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/monitoring-beats.json @@ -0,0 +1,314 @@ +{ + "index_patterns": [ + ".monitoring-beats-${monitoring.template.version}-*" + ], + "settings": { + "index.auto_expand_replicas": "0-1", + "index.codec": "best_compression", + "index.format": 6, + "index.number_of_replicas": 0, + "index.number_of_shards": 1 + }, + "version": 7000001, + "mappings": { + "doc": { + "dynamic": false, + "properties": { + "beats_stats": { + "properties": { + "beat": { + "properties": { + "host": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "uuid": { + "type": "keyword" + }, + "version": { + "type": "keyword" + } + } + }, + "metrics": { + "properties": { + "beat": { + "properties": { + "cpu": { + "properties": { + "system": { + "properties": { + "ticks": { + "type": "long" + }, + "time": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + }, + "total": { + "properties": { + "value": { + "type": "long" + }, + "ticks": { + "type": "long" + }, + "time": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + }, + "user": { + "properties": { + "ticks": { + "type": "long" + }, + "time": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + } + } + }, + "info": { + "properties": { + "ephemeral_id": { + "type": "keyword" + }, + "uptime": { + "properties": { + "ms": { + "type": "long" + } + } + } + } + }, + "memstats": { + "properties": { + "gc_next": { + "type": "long" + }, + "memory_alloc": { + "type": "long" + }, + "memory_total": { + "type": "long" + }, + "rss": { + "type": "long" + } + } + } + } + }, + "libbeat": { + "properties": { + "config": { + "properties": { + "module": { + "properties": { + "running": { + "type": "long" + }, + "starts": { + "type": "long" + }, + "stops": { + "type": "long" + } + } + }, + "reloads": { + "type": "long" + } + } + }, + "output": { + "properties": { + "events": { + "properties": { + "acked": { + "type": "long" + }, + "active": { + "type": "long" + }, + "batches": { + "type": "long" + }, + "dropped": { + "type": "long" + }, + "duplicates": { + "type": "long" + }, + "failed": { + "type": "long" + }, + "total": { + "type": "long" + } + } + }, + "read": { + "properties": { + "bytes": { + "type": "long" + }, + "errors": { + "type": "long" + } + } + }, + "type": { + "type": "keyword" + }, + "write": { + "properties": { + "bytes": { + "type": "long" + }, + "errors": { + "type": "long" + } + } + } + } + }, + "pipeline": { + "properties": { + "clients": { + "type": "long" + }, + "events": { + "properties": { + "active": { + "type": "long" + }, + "dropped": { + "type": "long" + }, + "failed": { + "type": "long" + }, + "filtered": { + "type": "long" + }, + "published": { + "type": "long" + }, + "retry": { + "type": "long" + }, + "total": { + "type": "long" + } + } + }, + "queue": { + "properties": { + "acked": { + "type": "long" + } + } + } + } + } + } + }, + "system": { + "properties": { + "load": { + "properties": { + "1": { + "type": "double" + }, + "15": { + "type": "double" + }, + "5": { + "type": "double" + }, + "norm": { + "properties": { + "1": { + "type": "double" + }, + "15": { + "type": "double" + }, + "5": { + "type": "double" + } + } + } + } + } + } + } + } + }, + "tags": { + "type": "keyword" + }, + "timestamp": { + "format": "date_time", + "type": "date" + } + } + }, + "cluster_uuid": { + "type": "keyword" + }, + "interval_ms": { + "type": "long" + }, + "source_node": { + "properties": { + "host": { + "type": "keyword" + }, + "ip": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "transport_address": { + "type": "keyword" + }, + "uuid": { + "type": "keyword" + } + } + }, + "timestamp": { + "format": "date_time", + "type": "date" + }, + "type": { + "type": "keyword" + } + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/monitoring-es.json b/x-pack/plugin/core/src/main/resources/monitoring-es.json new file mode 100644 index 0000000000000..a1726a7a74ae4 --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/monitoring-es.json @@ -0,0 +1,923 @@ +{ + "index_patterns": [ ".monitoring-es-${monitoring.template.version}-*" ], + "version": 7000001, + "settings": { + "index.number_of_shards": 1, + "index.number_of_replicas": 0, + "index.auto_expand_replicas": "0-1", + "index.format": 6, + "index.codec": "best_compression" + }, + "mappings": { + "doc": { + "date_detection": false, + "dynamic": false, + "properties": { + "cluster_uuid": { + "type": "keyword" + }, + "state_uuid": { + "type": "keyword" + }, + "timestamp": { + "type": "date", + "format": "date_time" + }, + "interval_ms": { + "type": "long" + }, + "type": { + "type": "keyword" + }, + "source_node": { + "properties": { + "uuid": { + "type": "keyword" + }, + "host": { + "type": "keyword" + }, + "transport_address": { + "type": "keyword" + }, + "ip": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "timestamp": { + "type": "date", + "format": "date_time" + } + } + }, + "indices_stats": { + "properties": { + "_all": { + "properties": { + "primaries": { + "properties": { + "docs": { + "properties": { + "count": { + "type": "long" + } + } + }, + "indexing": { + "properties": { + "index_total": { + "type": "long" + }, + "index_time_in_millis": { + "type": "long" + } + } + }, + "search": { + "properties": { + "query_total": { + "type": "long" + }, + "query_time_in_millis": { + "type": "long" + } + } + } + } + }, + "total": { + "properties": { + "docs": { + "properties": { + "count": { + "type": "long" + } + } + }, + "indexing": { + "properties": { + "index_total": { + "type": "long" + }, + "index_time_in_millis": { + "type": "long" + } + } + }, + "search": { + "properties": { + "query_total": { + "type": "long" + }, + "query_time_in_millis": { + "type": "long" + } + } + } + } + } + } + } + } + }, + "index_stats": { + "properties": { + "index": { + "type": "keyword" + }, + "primaries": { + "properties": { + "docs": { + "properties": { + "count": { + "type": "long" + } + } + }, + "fielddata" : { + "properties": { + "memory_size_in_bytes": { + "type": "long" + }, + "evictions": { + "type": "long" + } + } + }, + "store": { + "properties": { + "size_in_bytes": { + "type": "long" + } + } + }, + "indexing": { + "properties": { + "index_total": { + "type": "long" + }, + "index_time_in_millis": { + "type": "long" + }, + "throttle_time_in_millis": { + "type": "long" + } + } + }, + "merges": { + "properties": { + "total_size_in_bytes": { + "type": "long" + } + } + }, + "query_cache": { + "properties": { + "memory_size_in_bytes": { + "type": "long" + }, + "evictions": { + "type": "long" + }, + "hit_count": { + "type": "long" + }, + "miss_count": { + "type": "long" + } + } + }, + "request_cache": { + "properties": { + "memory_size_in_bytes": { + "type": "long" + }, + "evictions": { + "type": "long" + }, + "hit_count": { + "type": "long" + }, + "miss_count": { + "type": "long" + } + } + }, + "search": { + "properties": { + "query_total": { + "type": "long" + }, + "query_time_in_millis": { + "type": "long" + } + } + }, + "segments": { + "properties": { + "count": { + "type": "integer" + }, + "memory_in_bytes": { + "type": "long" + }, + "terms_memory_in_bytes": { + "type": "long" + }, + "points_memory_in_bytes": { + "type": "long" + }, + "stored_fields_memory_in_bytes": { + "type": "long" + }, + "term_vectors_memory_in_bytes": { + "type": "long" + }, + "norms_memory_in_bytes": { + "type": "long" + }, + "doc_values_memory_in_bytes": { + "type": "long" + }, + "index_writer_memory_in_bytes": { + "type": "long" + }, + "version_map_memory_in_bytes": { + "type": "long" + }, + "fixed_bit_set_memory_in_bytes": { + "type": "long" + } + } + }, + "refresh": { + "properties": { + "total_time_in_millis": { + "type": "long" + } + } + } + } + }, + "total": { + "properties": { + "docs": { + "properties": { + "count": { + "type": "long" + } + } + }, + "fielddata" : { + "properties": { + "memory_size_in_bytes": { + "type": "long" + }, + "evictions": { + "type": "long" + } + } + }, + "store": { + "properties": { + "size_in_bytes": { + "type": "long" + } + } + }, + "indexing": { + "properties": { + "index_total": { + "type": "long" + }, + "index_time_in_millis": { + "type": "long" + }, + "throttle_time_in_millis": { + "type": "long" + } + } + }, + "merges": { + "properties": { + "total_size_in_bytes": { + "type": "long" + } + } + }, + "query_cache": { + "properties": { + "memory_size_in_bytes": { + "type": "long" + }, + "evictions": { + "type": "long" + }, + "hit_count": { + "type": "long" + }, + "miss_count": { + "type": "long" + } + } + }, + "request_cache": { + "properties": { + "memory_size_in_bytes": { + "type": "long" + }, + "evictions": { + "type": "long" + }, + "hit_count": { + "type": "long" + }, + "miss_count": { + "type": "long" + } + } + }, + "search": { + "properties": { + "query_total": { + "type": "long" + }, + "query_time_in_millis": { + "type": "long" + } + } + }, + "segments": { + "properties": { + "count": { + "type": "integer" + }, + "memory_in_bytes": { + "type": "long" + }, + "terms_memory_in_bytes": { + "type": "long" + }, + "points_memory_in_bytes": { + "type": "long" + }, + "stored_fields_memory_in_bytes": { + "type": "long" + }, + "term_vectors_memory_in_bytes": { + "type": "long" + }, + "norms_memory_in_bytes": { + "type": "long" + }, + "doc_values_memory_in_bytes": { + "type": "long" + }, + "index_writer_memory_in_bytes": { + "type": "long" + }, + "version_map_memory_in_bytes": { + "type": "long" + }, + "fixed_bit_set_memory_in_bytes": { + "type": "long" + } + } + }, + "refresh": { + "properties": { + "total_time_in_millis": { + "type": "long" + } + } + } + } + } + } + }, + "cluster_stats": { + "properties": { + "nodes": { + "type": "object" + }, + "indices": { + "type": "object" + } + } + }, + "cluster_state": { + "properties": { + "version": { + "type": "long" + }, + "nodes_hash": { + "type": "integer" + }, + "master_node": { + "type": "keyword" + }, + "state_uuid": { + "type": "keyword" + }, + "status": { + "type": "keyword" + }, + "nodes": { + "type": "object" + }, + "shards": { + "type": "object" + } + } + }, + "node_stats": { + "properties": { + "node_id": { + "type": "keyword" + }, + "node_master": { + "type": "boolean" + }, + "mlockall": { + "type": "boolean" + }, + "indices": { + "properties": { + "docs": { + "properties": { + "count": { + "type": "long" + } + } + }, + "fielddata" : { + "properties": { + "memory_size_in_bytes": { + "type": "long" + }, + "evictions": { + "type": "long" + } + } + }, + "indexing": { + "properties": { + "index_time_in_millis": { + "type": "long" + }, + "index_total": { + "type": "long" + }, + "throttle_time_in_millis": { + "type": "long" + } + } + }, + "query_cache": { + "properties": { + "memory_size_in_bytes": { + "type": "long" + }, + "evictions": { + "type": "long" + }, + "hit_count": { + "type": "long" + }, + "miss_count": { + "type": "long" + } + } + }, + "request_cache": { + "properties": { + "memory_size_in_bytes": { + "type": "long" + }, + "evictions": { + "type": "long" + }, + "hit_count": { + "type": "long" + }, + "miss_count": { + "type": "long" + } + } + }, + "search": { + "properties": { + "query_time_in_millis": { + "type": "long" + }, + "query_total": { + "type": "long" + } + } + }, + "segments": { + "properties": { + "count": { + "type": "integer" + }, + "memory_in_bytes": { + "type": "long" + }, + "terms_memory_in_bytes": { + "type": "long" + }, + "points_memory_in_bytes": { + "type": "long" + }, + "stored_fields_memory_in_bytes": { + "type": "long" + }, + "term_vectors_memory_in_bytes": { + "type": "long" + }, + "norms_memory_in_bytes": { + "type": "long" + }, + "doc_values_memory_in_bytes": { + "type": "long" + }, + "index_writer_memory_in_bytes": { + "type": "long" + }, + "version_map_memory_in_bytes": { + "type": "long" + }, + "fixed_bit_set_memory_in_bytes": { + "type": "long" + } + } + }, + "store": { + "properties": { + "size_in_bytes": { + "type": "long" + } + } + } + } + }, + "fs": { + "properties": { + "total": { + "properties": { + "total_in_bytes": { + "type": "long" + }, + "free_in_bytes": { + "type": "long" + }, + "available_in_bytes": { + "type": "long" + } + } + }, + "data": { + "properties": { + "spins": { + "type": "boolean" + } + } + }, + "io_stats": { + "properties": { + "total": { + "properties": { + "operations": { + "type": "long" + }, + "read_operations": { + "type": "long" + }, + "write_operations": { + "type": "long" + }, + "read_kilobytes": { + "type": "long" + }, + "write_kilobytes": { + "type": "long" + } + } + } + } + } + } + }, + "os": { + "properties": { + "cgroup": { + "properties": { + "cpuacct": { + "properties": { + "control_group": { + "type": "keyword" + }, + "usage_nanos": { + "type": "long" + } + } + }, + "cpu": { + "properties": { + "cfs_quota_micros": { + "type": "long" + }, + "control_group": { + "type": "keyword" + }, + "stat": { + "properties": { + "number_of_elapsed_periods": { + "type": "long" + }, + "number_of_times_throttled": { + "type": "long" + }, + "time_throttled_nanos": { + "type": "long" + } + } + } + } + }, + "memory": { + "properties": { + "control_group": { + "type": "keyword" + }, + "limit_in_bytes": { + "type": "keyword" + }, + "usage_in_bytes": { + "type": "keyword" + } + } + } + } + }, + "cpu": { + "properties": { + "load_average": { + "properties": { + "1m": { + "type": "half_float" + }, + "5m": { + "type": "half_float" + }, + "15m": { + "type": "half_float" + } + } + } + } + } + } + }, + "process": { + "properties": { + "open_file_descriptors": { + "type": "long" + }, + "max_file_descriptors": { + "type": "long" + }, + "cpu": { + "properties": { + "percent": { + "type": "half_float" + } + } + } + } + }, + "jvm": { + "properties": { + "mem": { + "properties": { + "heap_used_in_bytes": { + "type": "long" + }, + "heap_used_percent": { + "type": "half_float" + }, + "heap_max_in_bytes": { + "type": "long" + } + } + }, + "gc": { + "properties": { + "collectors": { + "properties": { + "young": { + "properties": { + "collection_count": { + "type": "long" + }, + "collection_time_in_millis": { + "type": "long" + } + } + }, + "old": { + "properties": { + "collection_count": { + "type": "long" + }, + "collection_time_in_millis": { + "type": "long" + } + } + } + } + } + } + } + } + }, + "thread_pool": { + "properties": { + "bulk": { + "properties": { + "threads": { + "type": "integer" + }, + "queue": { + "type": "integer" + }, + "rejected": { + "type": "long" + } + } + }, + "generic": { + "properties": { + "threads": { + "type": "integer" + }, + "queue": { + "type": "integer" + }, + "rejected": { + "type": "long" + } + } + }, + "get": { + "properties": { + "threads": { + "type": "integer" + }, + "queue": { + "type": "integer" + }, + "rejected": { + "type": "long" + } + } + }, + "index": { + "properties": { + "threads": { + "type": "integer" + }, + "queue": { + "type": "integer" + }, + "rejected": { + "type": "long" + } + } + }, + "maanagement": { + "properties": { + "threads": { + "type": "integer" + }, + "queue": { + "type": "integer" + }, + "rejected": { + "type": "long" + } + } + }, + "search": { + "properties": { + "threads": { + "type": "integer" + }, + "queue": { + "type": "integer" + }, + "rejected": { + "type": "long" + } + } + }, + "watcher": { + "properties": { + "threads": { + "type": "integer" + }, + "queue": { + "type": "integer" + }, + "rejected": { + "type": "long" + } + } + } + } + } + } + }, + "index_recovery": { + "type": "object" + }, + "shard": { + "properties": { + "state": { + "type": "keyword" + }, + "primary": { + "type": "boolean" + }, + "index": { + "type": "keyword" + }, + "relocating_node": { + "type": "keyword" + }, + "shard": { + "type": "long" + }, + "node": { + "type": "keyword" + } + } + }, + "job_stats": { + "properties": { + "job_id": { + "type": "keyword" + }, + "state": { + "type": "keyword" + }, + "data_counts": { + "properties": { + "input_bytes": { + "type": "long" + }, + "processed_record_count": { + "type": "long" + }, + "empty_bucket_count": { + "type": "long" + }, + "sparse_bucket_count": { + "type": "long" + }, + "bucket_count": { + "type": "long" + }, + "earliest_record_timestamp": { + "type": "date" + }, + "latest_record_timestamp": { + "type": "date" + } + } + }, + "model_size_stats": { + "properties": { + "model_bytes": { + "type": "long" + }, + "bucket_allocation_failures_count": { + "type": "long" + } + } + }, + "node": { + "properties": { + "id": { + "type": "keyword" + } + } + } + } + } + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/monitoring-kibana.json b/x-pack/plugin/core/src/main/resources/monitoring-kibana.json new file mode 100644 index 0000000000000..51e9f7cdd1906 --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/monitoring-kibana.json @@ -0,0 +1,225 @@ +{ + "index_patterns": [ ".monitoring-kibana-${monitoring.template.version}-*" ], + "version": 7000001, + "settings": { + "index.number_of_shards": 1, + "index.number_of_replicas": 0, + "index.auto_expand_replicas": "0-1", + "index.format": 6, + "index.codec": "best_compression" + }, + "mappings": { + "doc": { + "dynamic": false, + "properties": { + "cluster_uuid": { + "type": "keyword" + }, + "timestamp": { + "type": "date", + "format": "date_time" + }, + "interval_ms": { + "type": "long" + }, + "type": { + "type": "keyword" + }, + "source_node": { + "properties": { + "uuid": { + "type": "keyword" + }, + "host": { + "type": "keyword" + }, + "transport_address": { + "type": "keyword" + }, + "ip": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "timestamp": { + "type": "date", + "format": "date_time" + } + } + }, + "kibana_stats": { + "properties": { + "kibana": { + "properties": { + "uuid": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "host": { + "type": "keyword" + }, + "transport_address": { + "type": "keyword" + }, + "version": { + "type": "keyword" + }, + "snapshot": { + "type": "boolean" + }, + "status": { + "type": "keyword" + }, + "statuses": { + "properties": { + "name": { + "type": "keyword" + }, + "state": { + "type": "keyword" + } + } + } + } + }, + "cloud": { + "properties": { + "name": { + "type": "keyword" + }, + "id": { + "type": "keyword" + }, + "vm_type": { + "type": "keyword" + }, + "region": { + "type": "keyword" + }, + "zone": { + "type": "keyword" + }, + "metadata": { + "type": "object" + } + } + }, + "os": { + "properties": { + "load": { + "properties": { + "1m": { + "type": "half_float" + }, + "5m": { + "type": "half_float" + }, + "15m": { + "type": "half_float" + } + } + }, + "memory": { + "properties": { + "total_in_bytes": { + "type": "float" + }, + "free_in_bytes": { + "type": "float" + }, + "used_in_bytes": { + "type": "float" + } + } + }, + "uptime_in_millis": { + "type": "long" + } + } + }, + "process": { + "properties": { + "memory": { + "properties": { + "heap": { + "properties": { + "total_in_bytes": { + "type": "float" + }, + "used_in_bytes": { + "type": "float" + }, + "size_limit": { + "type": "float" + } + } + }, + "resident_set_size_in_bytes": { + "type": "float" + } + } + }, + "event_loop_delay": { + "type": "float" + }, + "uptime_in_millis": { + "type": "long" + } + } + }, + "sockets": { + "properties": { + "http": { + "properties": { + "total": { + "type": "long" + } + } + }, + "https": { + "properties": { + "total": { + "type": "long" + } + } + } + } + }, + "timestamp": { + "type": "date" + }, + "requests": { + "properties": { + "disconnects": { + "type": "long" + }, + "total": { + "type": "long" + }, + "status_codes": { + "type": "object" + } + } + }, + "response_times": { + "properties": { + "average": { + "type": "float" + }, + "max": { + "type": "float" + } + } + }, + "concurrent_connections": { + "type": "long" + } + } + } + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/monitoring-logstash.json b/x-pack/plugin/core/src/main/resources/monitoring-logstash.json new file mode 100644 index 0000000000000..cadefd6864dc5 --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/monitoring-logstash.json @@ -0,0 +1,412 @@ +{ + "index_patterns": [ ".monitoring-logstash-${monitoring.template.version}-*" ], + "version": 7000001, + "settings": { + "index.number_of_shards": 1, + "index.number_of_replicas": 0, + "index.auto_expand_replicas": "0-1", + "index.format": 6, + "index.codec": "best_compression" + }, + "mappings": { + "doc": { + "dynamic": false, + "properties": { + "cluster_uuid": { + "type": "keyword" + }, + "timestamp": { + "type": "date", + "format": "date_time" + }, + "interval_ms": { + "type": "long" + }, + "type": { + "type": "keyword" + }, + "source_node": { + "properties": { + "uuid": { + "type": "keyword" + }, + "host": { + "type": "keyword" + }, + "transport_address": { + "type": "keyword" + }, + "ip": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "timestamp": { + "type": "date", + "format": "date_time" + } + } + }, + "logstash_stats": { + "type": "object", + "properties": { + "logstash": { + "properties": { + "uuid": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "ephemeral_id": { + "type": "keyword" + }, + "host": { + "type": "keyword" + }, + "http_address": { + "type": "keyword" + }, + "version": { + "type": "keyword" + }, + "snapshot": { + "type": "boolean" + }, + "status": { + "type": "keyword" + }, + "pipeline": { + "properties": { + "workers": { + "type": "short" + }, + "batch_size": { + "type": "long" + } + } + } + } + }, + "events": { + "properties": { + "filtered": { + "type": "long" + }, + "in": { + "type": "long" + }, + "out": { + "type": "long" + }, + "duration_in_millis": { + "type": "long" + } + } + }, + "timestamp": { + "type": "date" + }, + "jvm": { + "properties": { + "uptime_in_millis": { + "type": "long" + }, + "gc": { + "properties": { + "collectors": { + "properties": { + "old": { + "properties": { + "collection_count": { + "type": "long" + }, + "collection_time_in_millis": { + "type": "long" + } + } + }, + "young": { + "properties": { + "collection_count": { + "type": "long" + }, + "collection_time_in_millis": { + "type": "long" + } + } + } + } + } + } + }, + "mem": { + "properties": { + "heap_max_in_bytes": { + "type": "long" + }, + "heap_used_in_bytes": { + "type": "long" + }, + "heap_used_percent": { + "type": "long" + } + } + } + } + }, + "os": { + "properties": { + "cpu": { + "properties": { + "load_average": { + "properties": { + "1m": { + "type": "half_float" + }, + "5m": { + "type": "half_float" + }, + "15m": { + "type": "half_float" + } + } + } + } + }, + "cgroup": { + "properties": { + "cpuacct": { + "properties": { + "control_group": { + "type": "keyword" + }, + "usage_nanos": { + "type": "long" + } + } + }, + "cpu": { + "properties": { + "control_group": { + "type": "keyword" + }, + "stat": { + "properties": { + "number_of_elapsed_periods": { + "type": "long" + }, + "number_of_times_throttled": { + "type": "long" + }, + "time_throttled_nanos": { + "type": "long" + } + } + } + } + } + } + } + } + }, + "process": { + "properties": { + "cpu": { + "properties": { + "percent": { + "type": "long" + } + } + }, + "max_file_descriptors": { + "type": "long" + }, + "open_file_descriptors": { + "type": "long" + } + } + }, + "reloads": { + "properties": { + "failures": { + "type": "long" + }, + "successes": { + "type": "long" + } + } + }, + "queue": { + "properties": { + "events_count": { + "type": "long" + }, + "type": { + "type": "keyword" + } + } + }, + "pipelines": { + "type": "nested", + "properties": { + "id": { + "type": "keyword" + }, + "hash": { + "type": "keyword" + }, + "ephemeral_id": { + "type": "keyword" + }, + "events": { + "properties": { + "in": { + "type": "long" + }, + "filtered": { + "type": "long" + }, + "out": { + "type": "long" + }, + "duration_in_millis":{ + "type": "long" + }, + "queue_push_duration_in_millis": { + "type": "long" + } + } + }, + "queue": { + "properties": { + "events_count": { + "type": "long" + }, + "type": { + "type": "keyword" + }, + "max_queue_size_in_bytes": { + "type": "long" + }, + "queue_size_in_bytes": { + "type": "long" + } + } + }, + "vertices": { + "type": "nested", + "properties": { + "id": { + "type": "keyword" + }, + "pipeline_ephemeral_id": { "type": "keyword" }, + "events_in": { "type": "long" }, + "events_out": { "type": "long" }, + "duration_in_millis": { "type": "long" }, + "queue_push_duration_in_millis": { "type": "long" }, + "long_counters": { + "type": "nested", + "properties": { + "name": { + "type": "keyword" + }, + "value": { + "type": "long" + } + } + }, + "double_gauges": { + "type": "nested", + "properties": { + "name": { + "type": "keyword" + }, + "value": { + "type": "double" + } + } + } + } + }, + "reloads": { + "properties": { + "failures": { + "type": "long" + }, + "successes": { + "type": "long" + } + } + } + } + }, + "workers": { + "type": "short" + }, + "batch_size": { + "type": "integer" + } + } + }, + "logstash_state": { + "properties": { + "uuid": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "host": { + "type": "keyword" + }, + "http_address": { + "type": "keyword" + }, + "ephemeral_id": { + "type": "keyword" + }, + "version": { + "type": "keyword" + }, + "snapshot": { + "type": "boolean" + }, + "status": { + "type": "keyword" + }, + "pipeline": { + "properties": { + "id": { + "type": "keyword" + }, + "hash": { + "type": "keyword" + }, + "ephemeral_id": { + "type": "keyword" + }, + "workers": { + "type": "short" + }, + "batch_size": { + "type": "integer" + }, + "format": { + "type": "keyword" + }, + "version": { + "type": "keyword" + }, + "representation": { + "enabled": false + } + } + } + } + } + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/rollup-dynamic-template.json b/x-pack/plugin/core/src/main/resources/rollup-dynamic-template.json new file mode 100644 index 0000000000000..94336c60c4d68 --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/rollup-dynamic-template.json @@ -0,0 +1,26 @@ +{ + "_meta":{ + "_rollup": { + "ROLLUP_METADATA_PLACEHOLDER":"ROLLUP_METADATA_PLACEHOLDER" + }, + "rollup-version": "${rollup.dynamic_template.version}" + }, + "dynamic_templates": [ + { + "strings": { + "match_mapping_type": "string", + "mapping": { + "type": "keyword" + } + } + }, + { + "date_histograms": { + "path_match": "*.date_histogram.timestamp", + "mapping": { + "type": "date" + } + } + } + ] +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/resources/security-index-template.json b/x-pack/plugin/core/src/main/resources/security-index-template.json new file mode 100644 index 0000000000000..778f44a93bf3a --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/security-index-template.json @@ -0,0 +1,182 @@ +{ + "index_patterns" : [ ".security-*" ], + "order" : 1000, + "settings" : { + "number_of_shards" : 1, + "number_of_replicas" : 0, + "auto_expand_replicas" : "0-all", + "index.priority": 1000, + "index.format": 6, + "analysis" : { + "filter" : { + "email" : { + "type" : "pattern_capture", + "preserve_original" : true, + "patterns" : [ + "([^@]+)", + "(\\p{L}+)", + "(\\d+)", + "@(.+)" + ] + } + }, + "analyzer" : { + "email" : { + "tokenizer" : "uax_url_email", + "filter" : [ + "email", + "lowercase", + "unique" + ] + } + } + } + }, + "mappings" : { + "doc" : { + "_meta": { + "security-version": "${security.template.version}" + }, + "dynamic" : "strict", + "properties" : { + "username" : { + "type" : "keyword" + }, + "roles" : { + "type" : "keyword" + }, + "password" : { + "type" : "keyword", + "index" : false, + "doc_values": false + }, + "full_name" : { + "type" : "text" + }, + "email" : { + "type" : "text", + "analyzer" : "email" + }, + "metadata" : { + "type" : "object", + "dynamic" : true + }, + "enabled": { + "type": "boolean" + }, + "cluster" : { + "type" : "keyword" + }, + "indices" : { + "type" : "object", + "properties" : { + "field_security" : { + "properties" : { + "grant": { + "type": "keyword" + }, + "except": { + "type": "keyword" + } + } + }, + "names" : { + "type" : "keyword" + }, + "privileges" : { + "type" : "keyword" + }, + "query" : { + "type" : "keyword" + } + } + }, + "name" : { + "type" : "keyword" + }, + "run_as" : { + "type" : "keyword" + }, + "doc_type" : { + "type" : "keyword" + }, + "type" : { + "type" : "keyword" + }, + "expiration_time" : { + "type" : "date", + "format" : "epoch_millis" + }, + "creation_time" : { + "type" : "date", + "format" : "epoch_millis" + }, + "rules" : { + "type" : "object", + "dynamic" : true + }, + "refresh_token" : { + "type" : "object", + "properties" : { + "token" : { + "type" : "keyword" + }, + "refreshed" : { + "type" : "boolean" + }, + "invalidated" : { + "type" : "boolean" + }, + "client" : { + "type" : "object", + "properties" : { + "type" : { + "type" : "keyword" + }, + "user" : { + "type" : "keyword" + }, + "realm" : { + "type" : "keyword" + } + } + } + } + }, + "access_token" : { + "type" : "object", + "properties" : { + "user_token" : { + "type" : "object", + "properties" : { + "id" : { + "type" : "keyword" + }, + "expiration_time" : { + "type" : "date", + "format" : "epoch_millis" + }, + "version" : { + "type" : "integer" + }, + "metadata" : { + "type" : "object", + "dynamic" : true + }, + "authentication" : { + "type" : "binary" + } + } + }, + "invalidated" : { + "type" : "boolean" + }, + "realm" : { + "type" : "keyword" + } + } + } + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/security_audit_log.json b/x-pack/plugin/core/src/main/resources/security_audit_log.json new file mode 100644 index 0000000000000..f5decbb4019be --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/security_audit_log.json @@ -0,0 +1,87 @@ +{ + "index_patterns": [ ".security_audit_log*" ], + "order": 1000, + "settings": { + "index.format": 6 + }, + "mappings": { + "doc": { + "_meta": { + "security-version": "${security.template.version}" + }, + "dynamic" : "strict", + "properties": { + "@timestamp": { + "type": "date", + "format": "date_time", + "doc_values": true + }, + "node_name": { + "type": "keyword" + }, + "node_host_name": { + "type": "keyword" + }, + "node_host_address": { + "type": "keyword" + }, + "layer": { + "type": "keyword" + }, + "event_type": { + "type": "keyword" + }, + "origin_address": { + "type": "keyword" + }, + "origin_type": { + "type": "keyword" + }, + "principal": { + "type": "keyword" + }, + "roles": { + "type": "keyword" + }, + "run_by_principal": { + "type": "keyword" + }, + "run_as_principal": { + "type": "keyword" + }, + "action": { + "type": "keyword" + }, + "indices": { + "type": "keyword" + }, + "request": { + "type": "keyword" + }, + "request_body": { + "type": "keyword", + "index": false, + "doc_values": false + }, + "uri": { + "type": "keyword" + }, + "realm": { + "type": "keyword" + }, + "run_by_realm": { + "type": "keyword" + }, + "run_as_realm": { + "type": "keyword" + }, + "transport_profile": { + "type": "keyword" + }, + "rule": { + "type": "keyword" + } + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/triggered-watches.json b/x-pack/plugin/core/src/main/resources/triggered-watches.json new file mode 100644 index 0000000000000..fb345260008ca --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/triggered-watches.json @@ -0,0 +1,40 @@ +{ + "index_patterns": [ ".triggered_watches*" ], + "order": 2147483647, + "settings": { + "index.number_of_shards": 1, + "index.auto_expand_replicas": "0-1", + "index.refresh_interval" : "-1", + "index.format": 6, + "index.priority": 900 + }, + "mappings": { + "doc": { + "dynamic" : "strict", + "properties": { + "trigger_event": { + "type": "object", + "dynamic": true, + "enabled" : false, + "properties": { + "schedule": { + "type": "object", + "dynamic": true, + "properties": { + "triggered_time": { + "type": "date" + }, + "scheduled_time": { + "type": "date" + } + } + } + } + }, + "state": { + "type": "keyword" + } + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/watch-history.json b/x-pack/plugin/core/src/main/resources/watch-history.json new file mode 100644 index 0000000000000..a26305b35542a --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/watch-history.json @@ -0,0 +1,603 @@ +{ + "index_patterns": [ ".watcher-history-${xpack.watcher.template.version}*" ], + "order": 2147483647, + "settings": { + "xpack.watcher.template.version": "${xpack.watcher.template.version}", + "index.number_of_shards": 1, + "index.number_of_replicas": 0, + "index.auto_expand_replicas": "0-1", + "index.format": 6 + }, + "mappings": { + "doc": { + "dynamic_templates": [ + { + "disabled_payload_fields": { + "path_match": "result\\.(input(\\..+)*|(transform(\\..+)*)|(actions\\.transform(\\..+)*))\\.payload", + "match_pattern": "regex", + "mapping": { + "type": "object", + "enabled": false + } + } + }, + { + "disabled_search_request_body_fields": { + "path_match": "result\\.(input(\\..+)*|(transform(\\..+)*)|(actions\\.transform(\\..+)*))\\.search\\.request\\.(body|template)", + "match_pattern": "regex", + "mapping": { + "type": "object", + "enabled": false + } + } + }, + { + "disabled_exception_fields": { + "path_match": "result\\.(input(\\..+)*|(transform(\\..+)*)|(actions\\.transform(\\..+)*)|actions)\\.error", + "match_pattern": "regex", + "mapping": { + "type": "object", + "enabled": false + } + } + }, + { + "disabled_jira_custom_fields": { + "path_match": "result.actions.jira.fields.customfield_*", + "mapping": { + "type": "object", + "enabled": false + } + } + } + ], + "dynamic": false, + "properties": { + "watch_id": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "trigger_event": { + "type": "object", + "dynamic": true, + "properties": { + "type" : { + "type" : "keyword" + }, + "triggered_time": { + "type": "date" + }, + "manual": { + "type": "object", + "dynamic": true, + "properties": { + "schedule": { + "type": "object", + "dynamic": true, + "properties": { + "scheduled_time": { + "type": "date" + } + } + } + } + }, + "schedule": { + "type": "object", + "dynamic": true, + "properties": { + "scheduled_time": { + "type": "date" + } + } + } + } + }, + "vars" : { + "type" : "object", + "enabled" : false + }, + "input": { + "type": "object", + "enabled": false + }, + "condition": { + "type": "object", + "enabled": false + }, + "state": { + "type": "keyword" + }, + "status": { + "type": "object", + "enabled" : false, + "dynamic" : true + }, + "messages": { + "type": "text" + }, + "exception" : { + "type" : "object", + "enabled" : false + }, + "result": { + "type": "object", + "dynamic": true, + "properties": { + "execution_time": { + "type": "date" + }, + "execution_duration": { + "type": "long" + }, + "input": { + "type": "object", + "dynamic": true, + "properties": { + "type" : { + "type" : "keyword" + }, + "status" : { + "type" : "keyword" + }, + "payload" : { + "type" : "object", + "enabled" : false + }, + "search": { + "type": "object", + "dynamic": true, + "properties": { + "request": { + "type": "object", + "dynamic": true, + "properties": { + "search_type": { + "type": "keyword" + }, + "indices": { + "type": "keyword" + }, + "types": { + "type": "keyword" + } + } + } + } + }, + "http": { + "type": "object", + "dynamic": true, + "properties": { + "request": { + "type": "object", + "dynamic": true, + "properties": { + "path": { + "type": "keyword" + }, + "host": { + "type": "keyword" + } + } + } + } + } + } + }, + "condition" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "type" : { + "type" : "keyword" + }, + "status" : { + "type" : "keyword" + }, + "met" : { + "type" : "boolean" + }, + "compare" : { + "type" : "object", + "enabled" : false + }, + "array_compare" : { + "type" : "object", + "enabled" : false + }, + "script" : { + "type" : "object", + "enabled" : false + } + } + }, + "transform" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "type" : { + "type" : "keyword" + }, + "search" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "request" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "indices" : { + "type" : "keyword" + }, + "types" : { + "type" : "keyword" + } + } + } + } + } + } + }, + "actions": { + "type": "nested", + "include_in_parent": true, + "dynamic": true, + "properties": { + "id" : { + "type" : "keyword" + }, + "type" : { + "type" : "keyword" + }, + "status" : { + "type" : "keyword" + }, + "reason" : { + "type" : "keyword" + }, + "email": { + "type": "object", + "dynamic": true, + "properties": { + "message": { + "type": "object", + "dynamic": true, + "properties": { + "id": { + "type": "keyword" + }, + "from": { + "type": "keyword" + }, + "reply_to": { + "type": "keyword" + }, + "to": { + "type": "keyword" + }, + "cc": { + "type": "keyword" + }, + "bcc": { + "type": "keyword" + } + } + } + } + }, + "webhook": { + "type": "object", + "dynamic": true, + "properties": { + "request": { + "type": "object", + "dynamic": true, + "properties": { + "path": { + "type": "keyword" + }, + "host": { + "type": "keyword" + } + } + } + } + }, + "index": { + "type": "object", + "dynamic": true, + "properties": { + "response": { + "type": "object", + "dynamic": true, + "properties": { + "index": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "id": { + "type": "keyword" + } + } + } + } + }, + "hipchat" : { + "type": "object", + "dynamic": true, + "properties": { + "account": { + "type": "keyword" + }, + "sent_messages": { + "type": "nested", + "include_in_parent": true, + "dynamic": true, + "properties": { + "status": { + "type": "keyword" + }, + "reason": { + "type": "text" + }, + "request" : { + "type" : "object", + "enabled" : false + }, + "response" : { + "type" : "object", + "enabled" : false + }, + "room" : { + "type": "keyword" + }, + "user" : { + "type": "keyword" + }, + "message" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "message_format" : { + "type" : "keyword" + }, + "color" : { + "type" : "keyword" + }, + "notify" : { + "type" : "boolean" + }, + "message" : { + "type" : "text" + }, + "from" : { + "type" : "text" + } + } + } + } + } + } + }, + "jira" : { + "type": "object", + "dynamic": true, + "properties": { + "account": { + "type": "keyword" + }, + "reason": { + "type": "text" + }, + "request" : { + "type" : "object", + "enabled" : false + }, + "response" : { + "type" : "object", + "enabled" : false + }, + "fields": { + "type": "object", + "dynamic": true, + "properties": { + "summary": { + "type": "text" + }, + "description": { + "type": "text" + }, + "labels" : { + "type": "text" + }, + "project" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "key" : { + "type" : "keyword" + }, + "id" : { + "type" : "keyword" + } + } + }, + "issuetype" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "name" : { + "type": "keyword" + }, + "id" : { + "type" : "keyword" + } + } + } + } + }, + "result": { + "type": "object", + "dynamic": true, + "properties" : { + "id" : { + "type" : "keyword" + }, + "key" : { + "type" : "keyword" + }, + "self" : { + "type" : "keyword" + } + } + } + } + }, + "slack" : { + "type": "object", + "dynamic": true, + "properties": { + "account": { + "type": "keyword" + }, + "sent_messages": { + "type": "nested", + "include_in_parent": true, + "dynamic": true, + "properties": { + "status": { + "type": "keyword" + }, + "reason": { + "type": "text" + }, + "request" : { + "type" : "object", + "enabled" : false + }, + "response" : { + "type" : "object", + "enabled" : false + }, + "to" : { + "type": "keyword" + }, + "message" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "from" : { + "type" : "text" + }, + "icon" : { + "type" : "keyword" + }, + "text" : { + "type" : "text" + }, + "attachments" : { + "type" : "nested", + "include_in_parent": true, + "dynamic" : true, + "properties" : { + "color" : { + "type" : "keyword" + } + } + } + } + } + } + } + } + }, + "pagerduty" : { + "type": "object", + "dynamic": true, + "properties": { + "account": { + "type": "keyword" + }, + "sent_event": { + "type": "nested", + "include_in_parent": true, + "dynamic": true, + "properties": { + "reason": { + "type": "text" + }, + "request" : { + "type" : "object", + "enabled" : false + }, + "response" : { + "type" : "object", + "enabled" : false + }, + "event" : { + "type" : "object", + "dynamic" : true, + "properties" : { + "type" : { + "type" : "keyword" + }, + "client" : { + "type" : "text" + }, + "client_url" : { + "type" : "keyword" + }, + "account" : { + "type" : "keyword" + }, + "attach_payload" : { + "type" : "boolean" + }, + "incident_key" : { + "type" : "keyword" + }, + "description" : { + "type" : "text" + }, + "context" : { + "type" : "nested", + "include_in_parent": true, + "dynamic" : true, + "properties" : { + "type" : { + "type" : "keyword" + }, + "href" : { + "type" : "keyword" + }, + "src" : { + "type" : "keyword" + }, + "alt" : { + "type" : "text" + } + } + } + } + } + } + } + } + } + } + } + } + }, + "metadata": { + "type": "object", + "dynamic": true + } + } + } + } +} diff --git a/x-pack/plugin/core/src/main/resources/watches.json b/x-pack/plugin/core/src/main/resources/watches.json new file mode 100644 index 0000000000000..ad744c44f1119 --- /dev/null +++ b/x-pack/plugin/core/src/main/resources/watches.json @@ -0,0 +1,62 @@ +{ + "index_patterns": [ ".watches*" ], + "order": 2147483647, + "settings": { + "index.number_of_shards": 1, + "index.number_of_replicas": 0, + "index.auto_expand_replicas": "0-1", + "index.format": 6, + "index.priority": 800 + }, + "mappings": { + "doc": { + "dynamic" : "strict", + "properties": { + "status": { + "type": "object", + "enabled" : false, + "dynamic" : true + }, + "trigger" : { + "type": "object", + "enabled" : false, + "dynamic" : true + }, + "input": { + "type": "object", + "enabled" : false, + "dynamic" : true + }, + "condition": { + "type": "object", + "enabled" : false, + "dynamic" : true + }, + "throttle_period": { + "type" : "keyword", + "index" : false, + "doc_values" : false + }, + "throttle_period_in_millis": { + "type" : "long", + "index" : false, + "doc_values" : false + }, + "transform": { + "type" : "object", + "enabled" : false, + "dynamic" : true + }, + "actions": { + "type" : "object", + "enabled" : false, + "dynamic" : true + }, + "metadata" : { + "type" : "object", + "dynamic": true + } + } + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/action/MockIndicesRequest.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/action/MockIndicesRequest.java new file mode 100644 index 0000000000000..cc20a10857dcc --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/action/MockIndicesRequest.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.action; + +import org.elasticsearch.action.support.IndicesOptions; + +public class MockIndicesRequest extends ActionRequest implements IndicesRequest, CompositeIndicesRequest { + + private final String[] indices; + private final IndicesOptions indicesOptions; + + public MockIndicesRequest(IndicesOptions indicesOptions, String... indices) { + this.indices = indices; + this.indicesOptions = indicesOptions; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/http/netty4/Netty4HttpMockUtil.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/http/netty4/Netty4HttpMockUtil.java new file mode 100644 index 0000000000000..87e3e78cbc4a3 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/http/netty4/Netty4HttpMockUtil.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.http.netty4; + +import org.elasticsearch.transport.netty4.Netty4OpenChannelsHandler; + +import static org.mockito.Mockito.mock; + +/** Allows setting a mock into Netty3HttpServerTransport */ +public class Netty4HttpMockUtil { + + /** + * We don't really need to start Netty for these tests, but we can't create a pipeline + * with a null handler. So we set it to a mock for tests. + */ + public static void setOpenChannelsHandlerToMock(Netty4HttpServerTransport transport) throws Exception { + transport.serverOpenChannels = mock(Netty4OpenChannelsHandler.class); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java new file mode 100644 index 0000000000000..2f110f4f8a9e8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicenseServiceTestCase.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Path; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public abstract class AbstractLicenseServiceTestCase extends ESTestCase { + + protected LicenseService licenseService; + protected ClusterService clusterService; + protected ResourceWatcherService resourceWatcherService; + protected ClockMock clock; + protected DiscoveryNodes discoveryNodes; + protected Environment environment; + protected String licenseType; + + @Before + public void init() throws Exception { + clusterService = mock(ClusterService.class); + clock = ClockMock.frozen(); + discoveryNodes = mock(DiscoveryNodes.class); + resourceWatcherService = mock(ResourceWatcherService.class); + environment = mock(Environment.class); + } + + protected void setInitialState(License license, XPackLicenseState licenseState, Settings settings) { + setInitialState(license, licenseState, settings, randomBoolean() ? "trial" : "basic"); + } + + protected void setInitialState(License license, XPackLicenseState licenseState, Settings settings, String selfGeneratedType) { + Path tempDir = createTempDir(); + when(environment.configFile()).thenReturn(tempDir); + licenseType = selfGeneratedType; + settings = Settings.builder().put(settings).put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), licenseType).build(); + licenseService = new LicenseService(settings, clusterService, clock, environment, resourceWatcherService, licenseState); + ClusterState state = mock(ClusterState.class); + final ClusterBlocks noBlock = ClusterBlocks.builder().build(); + when(state.blocks()).thenReturn(noBlock); + MetaData metaData = mock(MetaData.class); + when(metaData.custom(LicensesMetaData.TYPE)).thenReturn(new LicensesMetaData(license, null)); + when(state.metaData()).thenReturn(metaData); + final DiscoveryNode mockNode = getLocalNode(); + when(discoveryNodes.getMasterNode()).thenReturn(mockNode); + when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(false); + when(state.nodes()).thenReturn(discoveryNodes); + when(state.getNodes()).thenReturn(discoveryNodes); // it is really ridiculous we have nodes() and getNodes()... + when(clusterService.state()).thenReturn(state); + when(clusterService.lifecycleState()).thenReturn(Lifecycle.State.STARTED); + when(clusterService.getClusterName()).thenReturn(new ClusterName("a")); + when(clusterService.localNode()).thenReturn(mockNode); + } + + protected DiscoveryNode getLocalNode() { + return new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + licenseService.stop(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java new file mode 100644 index 0000000000000..e9c9ba95bfd38 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.XPackSettings; + +import java.util.Arrays; +import java.util.Collection; +import java.util.concurrent.CountDownLatch; + +public abstract class AbstractLicensesIntegrationTestCase extends ESIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return Arrays.asList(XPackClientPlugin.class, CommonAnalysisPlugin.class); + } + + @Override + protected Settings transportClientSettings() { + // Plugin should be loaded on the transport client as well + return nodeSettings(0); + } + + protected void putLicense(final License license) throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); + clusterService.submitStateUpdateTask("putting license", new ClusterStateUpdateTask() { + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + mdBuilder.putCustom(LicensesMetaData.TYPE, new LicensesMetaData(license, null)); + return ClusterState.builder(currentState).metaData(mdBuilder).build(); + } + + @Override + public void onFailure(String source, @Nullable Exception e) { + logger.error("error on metaData cleanup after test", e); + } + }); + latch.await(); + } + + protected void putLicenseTombstone() throws InterruptedException { + putLicense(LicensesMetaData.LICENSE_TOMBSTONE); + } + + protected void wipeAllLicenses() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); + clusterService.submitStateUpdateTask("delete licensing metadata", new ClusterStateUpdateTask() { + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + mdBuilder.removeCustom(LicensesMetaData.TYPE); + return ClusterState.builder(currentState).metaData(mdBuilder).build(); + } + + @Override + public void onFailure(String source, @Nullable Exception e) { + logger.error("error on metaData cleanup after test", e); + } + }); + latch.await(); + } + + protected void assertLicenseActive(boolean active) throws InterruptedException { + boolean success = awaitBusy(() -> { + for (XPackLicenseState licenseState : internalCluster().getDataNodeInstances(XPackLicenseState.class)) { + if (licenseState.isActive() == active) { + return true; + } + } + return false; + }); + assertTrue(success); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ExpirationCallbackTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ExpirationCallbackTests.java new file mode 100644 index 0000000000000..1aff695546268 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ExpirationCallbackTests.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class ExpirationCallbackTests extends ESTestCase { + + public void testPostExpirationDelay() throws Exception { + TimeValue expiryDuration = TimeValue.timeValueSeconds(randomIntBetween(5, 10)); + TimeValue min = TimeValue.timeValueSeconds(1); + TimeValue max = TimeValue.timeValueSeconds(4); + TimeValue frequency = TimeValue.timeValueSeconds(1); + NoopPostExpirationCallback post = new NoopPostExpirationCallback(min, max, frequency); + long now = System.currentTimeMillis(); + long expiryDate = now + expiryDuration.getMillis(); + assertThat(post.delay(expiryDate, now), + equalTo(TimeValue.timeValueMillis(expiryDuration.getMillis() + min.getMillis()))); // before license expiry + assertThat(post.delay(expiryDate, expiryDate), equalTo(min)); // on license expiry + int latestValidTriggerDelay = (int) (expiryDuration.getMillis() + max.getMillis()); + int earliestValidTriggerDelay = (int) (expiryDuration.getMillis() + min.getMillis()); + assertExpirationCallbackDelay(post, expiryDuration.millis(), latestValidTriggerDelay, earliestValidTriggerDelay); + } + + public void testPreExpirationDelay() throws Exception { + TimeValue expiryDuration = TimeValue.timeValueSeconds(randomIntBetween(5, 10)); + TimeValue min = TimeValue.timeValueSeconds(1); + TimeValue max = TimeValue.timeValueSeconds(4); + TimeValue frequency = TimeValue.timeValueSeconds(1); + NoopPreExpirationCallback pre = new NoopPreExpirationCallback(min, max, frequency); + long now = System.currentTimeMillis(); + long expiryDate = now + expiryDuration.getMillis(); + assertThat(pre.delay(expiryDate, expiryDate), nullValue()); // on license expiry + int latestValidTriggerDelay = (int) (expiryDuration.getMillis() - min.getMillis()); + int earliestValidTriggerDelay = (int) (expiryDuration.getMillis() - max.getMillis()); + assertExpirationCallbackDelay(pre, expiryDuration.millis(), latestValidTriggerDelay, earliestValidTriggerDelay); + } + + public void testPostExpirationWithNullMax() throws Exception { + int postExpirySeconds = randomIntBetween(5, 10); + TimeValue postExpiryDuration = TimeValue.timeValueSeconds(postExpirySeconds); + TimeValue min = TimeValue.timeValueSeconds(postExpirySeconds - randomIntBetween(1, 3)); + + final ExpirationCallback.Post post = new NoopPostExpirationCallback(min, null, timeValueMillis(10)); + long now = System.currentTimeMillis(); + assertThat(post.delay(now - postExpiryDuration.millis(), now), equalTo(TimeValue.timeValueMillis(0))); + } + + public void testPreExpirationWithNullMin() throws Exception { + int expirySeconds = randomIntBetween(5, 10); + TimeValue expiryDuration = TimeValue.timeValueSeconds(expirySeconds); + TimeValue max = TimeValue.timeValueSeconds(expirySeconds + randomIntBetween(1, 10)); + + final ExpirationCallback.Pre pre = new NoopPreExpirationCallback(null, max, timeValueMillis(10)); + long now = System.currentTimeMillis(); + assertThat(pre.delay(expiryDuration.millis() + now, now), equalTo(TimeValue.timeValueMillis(0))); + } + + public void testPreExpirationScheduleTime() throws Exception { + TimeValue expiryDuration = TimeValue.timeValueSeconds(randomIntBetween(5, 10)); + TimeValue min = TimeValue.timeValueSeconds(1); + TimeValue max = TimeValue.timeValueSeconds(4); + TimeValue frequency = TimeValue.timeValueSeconds(1); + NoopPreExpirationCallback pre = new NoopPreExpirationCallback(min, max, frequency); + int latestValidTriggerDelay = (int) (expiryDuration.getMillis() - min.getMillis()); + int earliestValidTriggerDelay = (int) (expiryDuration.getMillis() - max.getMillis()); + assertExpirationCallbackScheduleTime(pre, expiryDuration.millis(), latestValidTriggerDelay, earliestValidTriggerDelay); + } + + public void testPostExpirationScheduleTime() throws Exception { + TimeValue expiryDuration = TimeValue.timeValueSeconds(randomIntBetween(5, 10)); + TimeValue min = TimeValue.timeValueSeconds(1); + TimeValue max = TimeValue.timeValueSeconds(4); + TimeValue frequency = TimeValue.timeValueSeconds(1); + NoopPostExpirationCallback pre = new NoopPostExpirationCallback(min, max, frequency); + int latestValidTriggerDelay = (int) (expiryDuration.getMillis() + max.getMillis()); + int earliestValidTriggerDelay = (int) (expiryDuration.getMillis() + min.getMillis()); + assertExpirationCallbackScheduleTime(pre, expiryDuration.millis(), latestValidTriggerDelay, earliestValidTriggerDelay); + } + + private void assertExpirationCallbackDelay(ExpirationCallback expirationCallback, long expiryDuration, + int latestValidTriggerDelay, int earliestValidTriggerDelay) { + long now = System.currentTimeMillis(); + long expiryDate = now + expiryDuration; + // bounds + assertThat(expirationCallback.delay(expiryDate, now + earliestValidTriggerDelay), equalTo(TimeValue.timeValueMillis(0))); + assertThat(expirationCallback.delay(expiryDate, now + latestValidTriggerDelay), equalTo(TimeValue.timeValueMillis(0))); + // in match + assertThat(expirationCallback.delay(expiryDate, + now + randomIntBetween(earliestValidTriggerDelay, latestValidTriggerDelay)), + equalTo(TimeValue.timeValueMillis(0))); + // out of bounds + int deltaBeforeEarliestMatch = between(1, earliestValidTriggerDelay); + assertThat(expirationCallback.delay(expiryDate, now + deltaBeforeEarliestMatch), + equalTo(TimeValue.timeValueMillis(earliestValidTriggerDelay - deltaBeforeEarliestMatch))); + int deltaAfterLatestMatch = between(latestValidTriggerDelay + 1, Integer.MAX_VALUE); // after expiry and after max + assertThat(expirationCallback.delay(expiryDate, expiryDate + deltaAfterLatestMatch), nullValue()); + } + + public void assertExpirationCallbackScheduleTime(ExpirationCallback expirationCallback, long expiryDuration, + int latestValidTriggerDelay, int earliestValidTriggerDelay) { + long now = System.currentTimeMillis(); + long expiryDate = now + expiryDuration; + int validTriggerInterval = between(earliestValidTriggerDelay, latestValidTriggerDelay); + assertThat(expirationCallback.nextScheduledTimeForExpiry(expiryDate, + now + validTriggerInterval, now + validTriggerInterval), + equalTo(now + validTriggerInterval)); + assertThat(expirationCallback.nextScheduledTimeForExpiry(expiryDate, now, now + validTriggerInterval), + equalTo(now + validTriggerInterval + expirationCallback.getFrequency())); + + int deltaBeforeEarliestMatch = between(1, earliestValidTriggerDelay - 1); + assertThat(expirationCallback.nextScheduledTimeForExpiry(expiryDate, now, now + deltaBeforeEarliestMatch), + equalTo(now + deltaBeforeEarliestMatch + + expirationCallback.delay(expiryDate, now + deltaBeforeEarliestMatch).getMillis())); + assertThat(expirationCallback.nextScheduledTimeForExpiry(expiryDate, + now + deltaBeforeEarliestMatch, now + deltaBeforeEarliestMatch), + equalTo(now + deltaBeforeEarliestMatch + + expirationCallback.delay(expiryDate, now + deltaBeforeEarliestMatch).getMillis())); + + int deltaAfterLatestMatch = between(latestValidTriggerDelay + 1, Integer.MAX_VALUE); // after expiry and after max + assertThat(expirationCallback.nextScheduledTimeForExpiry(expiryDate, now, now + deltaAfterLatestMatch), equalTo(-1L)); + assertThat(expirationCallback.nextScheduledTimeForExpiry(expiryDate, + now + deltaAfterLatestMatch, now + deltaAfterLatestMatch), + equalTo(-1L)); + } + + private static class NoopPostExpirationCallback extends ExpirationCallback.Post { + + NoopPostExpirationCallback(TimeValue min, TimeValue max, TimeValue frequency) { + super(min, max, frequency); + } + + @Override + public void on(License license) {} + } + + private static class NoopPreExpirationCallback extends ExpirationCallback.Pre { + + NoopPreExpirationCallback(TimeValue min, TimeValue max, TimeValue frequency) { + super(min, max, frequency); + } + + @Override + public void on(License license) {} + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseClusterChangeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseClusterChangeTests.java new file mode 100644 index 0000000000000..146e52717f5d6 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseClusterChangeTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.junit.After; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class LicenseClusterChangeTests extends AbstractLicenseServiceTestCase { + + private TestUtils.AssertingLicenseState licenseState; + + @Before + public void setup() { + licenseState = new TestUtils.AssertingLicenseState(); + setInitialState(null, licenseState, Settings.EMPTY); + licenseService.start(); + } + + @After + public void teardown() { + licenseService.stop(); + } + + + public void testNotificationOnNewLicense() throws Exception { + ClusterState oldState = ClusterState.builder(new ClusterName("a")).build(); + final License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); + MetaData metaData = MetaData.builder().putCustom(LicensesMetaData.TYPE, new LicensesMetaData(license, null)).build(); + ClusterState newState = ClusterState.builder(new ClusterName("a")).metaData(metaData).build(); + licenseService.clusterChanged(new ClusterChangedEvent("simulated", newState, oldState)); + assertThat(licenseState.activeUpdates.size(), equalTo(1)); + assertTrue(licenseState.activeUpdates.get(0)); + } + + public void testNoNotificationOnExistingLicense() throws Exception { + final License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); + MetaData metaData = MetaData.builder().putCustom(LicensesMetaData.TYPE, new LicensesMetaData(license, null)).build(); + ClusterState newState = ClusterState.builder(new ClusterName("a")).metaData(metaData).build(); + ClusterState oldState = ClusterState.builder(newState).build(); + licenseService.clusterChanged(new ClusterChangedEvent("simulated", newState, oldState)); + assertThat(licenseState.activeUpdates.size(), equalTo(0)); + } + + public void testSelfGeneratedLicenseGeneration() throws Exception { + DiscoveryNode master = new DiscoveryNode("b", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + ClusterState oldState = ClusterState.builder(new ClusterName("a")) + .nodes(DiscoveryNodes.builder().masterNodeId(master.getId()).add(master)).build(); + when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(true); + ClusterState newState = ClusterState.builder(oldState).nodes(discoveryNodes).build(); + + licenseService.clusterChanged(new ClusterChangedEvent("simulated", newState, oldState)); + ArgumentCaptor stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); + verify(clusterService, times(1)).submitStateUpdateTask(any(), stateUpdater.capture()); + ClusterState stateWithLicense = stateUpdater.getValue().execute(newState); + LicensesMetaData licenseMetaData = stateWithLicense.metaData().custom(LicensesMetaData.TYPE); + assertNotNull(licenseMetaData); + assertNotNull(licenseMetaData.getLicense()); + assertEquals(licenseType, licenseMetaData.getLicense().type()); + long expiration; + if (licenseType.equals("basic")) { + expiration = LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS; + } else { + expiration = LicenseService.NON_BASIC_SELF_GENERATED_LICENSE_DURATION.millis() + clock.millis(); + } + assertEquals(expiration, licenseMetaData.getLicense().expiryDate()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseOperationModeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseOperationModeTests.java new file mode 100644 index 0000000000000..7a6dc03c7bc2a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseOperationModeTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Locale; + +import static org.elasticsearch.license.License.OperationMode; +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests {@link License.OperationMode} for correctness. + *

+ * If you change the behavior of these tests, then it means that licensing changes across the products! + */ +public class LicenseOperationModeTests extends ESTestCase { + public void testResolveTrial() { + // assert 1.x BWC + assertResolve(OperationMode.TRIAL, "nONE", "DEv", "deveLopment"); + // assert expected (2.x+) variant + assertResolve(OperationMode.TRIAL, "tRiAl", "trial"); + } + + public void testResolveBasic() { + // assert expected (2.x+) variant (note: no 1.x variant of BASIC) + assertResolve(OperationMode.BASIC, "bAsIc", "basic"); + } + + public void testResolveStandard() { + // assert expected (2.x+) variant (note: no 1.x variant of STANDARD) + assertResolve(OperationMode.STANDARD, "StAnDARd", "standard"); + } + + public void testResolveGold() { + // assert expected (2.x+) variant (note: no different 1.x variant of GOLD) + assertResolve(OperationMode.GOLD, "SiLvEr", "gOlD", "silver", "gold"); + } + + public void testResolvePlatinum() { + // assert 1.x BWC + assertResolve(OperationMode.PLATINUM, "iNtErNaL"); + // assert expected (2.x+) variant + assertResolve(OperationMode.PLATINUM, "PlAtINum", "platinum"); + } + + public void testResolveUnknown() { + // 'enterprise' is a type that exists in cloud but should be rejected under normal operation + // See https://github.com/elastic/x-plugins/issues/3371 + String[] types = { "unknown", "fake", "enterprise" }; + + for (String type : types) { + try { + OperationMode.resolve(type); + + fail(String.format(Locale.ROOT, "[%s] should not be recognized as an operation mode", type)); + } + catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("unknown type [" + type + "]")); + } + } + } + + private static void assertResolve(OperationMode expected, String... types) { + for (String type : types) { + assertThat(OperationMode.resolve(type), equalTo(expected)); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseOperationModeUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseOperationModeUpdateTests.java new file mode 100644 index 0000000000000..a69331287918b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseOperationModeUpdateTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.watcher.FileWatcher; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.junit.Before; + +import java.nio.file.Path; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; + +public class LicenseOperationModeUpdateTests extends ESTestCase { + + private OperationModeFileWatcher operationModeFileWatcher; + private Path licenseModeFile; + private ResourceWatcherService resourceWatcherService; + + @Before + public void init() throws Exception { + licenseModeFile = createTempFile(); + resourceWatcherService = mock(ResourceWatcherService.class); + operationModeFileWatcher = new OperationModeFileWatcher(resourceWatcherService, licenseModeFile, logger, () -> {}); + } + + public void testLicenseOperationModeUpdate() throws Exception { + String type = randomFrom("trial", "basic", "standard", "gold", "platinum"); + License license = License.builder() + .uid("id") + .expiryDate(0) + .issueDate(0) + .issuedTo("elasticsearch") + .issuer("issuer") + .type(type) + .maxNodes(1) + .build(); + + assertThat(license.operationMode(), equalTo(License.OperationMode.resolve(type))); + OperationModeFileWatcherTests.writeMode("gold", licenseModeFile); + license.setOperationModeFileWatcher(operationModeFileWatcher); + verifyZeroInteractions(resourceWatcherService); + assertThat(license.operationMode(), equalTo(License.OperationMode.resolve(type))); + } + + public void testCloudInternalLicenseOperationModeUpdate() throws Exception { + License license = License.builder() + .uid("id") + .expiryDate(0) + .issueDate(0) + .issuedTo("elasticsearch") + .issuer("issuer") + .type("cloud_internal") + .maxNodes(1) + .build(); + + assertThat(license.operationMode(), equalTo(License.OperationMode.PLATINUM)); + OperationModeFileWatcherTests.writeMode("gold", licenseModeFile); + license.setOperationModeFileWatcher(operationModeFileWatcher); + verify(resourceWatcherService, times(1)).add(any(FileWatcher.class), eq(ResourceWatcherService.Frequency.HIGH)); + assertThat(license.operationMode(), equalTo(License.OperationMode.GOLD)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java new file mode 100644 index 0000000000000..2a237f090e2fd --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +import java.util.UUID; + +import static org.elasticsearch.license.TestUtils.dateMath; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class LicenseRegistrationTests extends AbstractLicenseServiceTestCase { + + public void testSelfGeneratedTrialLicense() throws Exception { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + setInitialState(null, licenseState, Settings.EMPTY, "trial"); + when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(true); + licenseService.start(); + + ClusterState state = ClusterState.builder(new ClusterName("a")).build(); + ArgumentCaptor stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); + verify(clusterService, Mockito.times(1)).submitStateUpdateTask(any(), stateUpdater.capture()); + ClusterState stateWithLicense = stateUpdater.getValue().execute(state); + LicensesMetaData licenseMetaData = stateWithLicense.metaData().custom(LicensesMetaData.TYPE); + assertNotNull(licenseMetaData); + assertNotNull(licenseMetaData.getLicense()); + assertFalse(licenseMetaData.isEligibleForTrial()); + assertEquals("trial", licenseMetaData.getLicense().type()); + assertEquals(clock.millis() + LicenseService.NON_BASIC_SELF_GENERATED_LICENSE_DURATION.millis(), + licenseMetaData.getLicense().expiryDate()); + } + + public void testSelfGeneratedBasicLicense() throws Exception { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + setInitialState(null, licenseState, Settings.EMPTY, "basic"); + when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(true); + licenseService.start(); + + ClusterState state = ClusterState.builder(new ClusterName("a")).build(); + ArgumentCaptor stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); + verify(clusterService, Mockito.times(1)).submitStateUpdateTask(any(), stateUpdater.capture()); + ClusterState stateWithLicense = stateUpdater.getValue().execute(state); + LicensesMetaData licenseMetaData = stateWithLicense.metaData().custom(LicensesMetaData.TYPE); + assertNotNull(licenseMetaData); + assertNotNull(licenseMetaData.getLicense()); + assertTrue(licenseMetaData.isEligibleForTrial()); + assertEquals("basic", licenseMetaData.getLicense().type()); + assertEquals(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS, licenseMetaData.getLicense().expiryDate()); + } + + public void testNonSelfGeneratedBasicLicenseIsReplaced() throws Exception { + long now = System.currentTimeMillis(); + String uid = UUID.randomUUID().toString(); + final License.Builder builder = License.builder() + .uid(uid) + .version(License.VERSION_CURRENT) + .expiryDate(dateMath("now+2h", now)) + .startDate(now) + .issueDate(now) + .type("basic") + .issuedTo("customer") + .issuer("elasticsearch") + .maxNodes(5); + License license = TestUtils.generateSignedLicense(builder); + + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + setInitialState(license, licenseState, Settings.EMPTY); + when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(true); + licenseService.start(); + + MetaData.Builder mdBuilder = MetaData.builder(); + mdBuilder.putCustom(LicensesMetaData.TYPE, new LicensesMetaData(license, null)); + ClusterState state = ClusterState.builder(new ClusterName("a")).metaData(mdBuilder.build()).build(); + ArgumentCaptor stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); + verify(clusterService, Mockito.times(1)).submitStateUpdateTask(any(), stateUpdater.capture()); + ClusterState stateWithLicense = stateUpdater.getValue().execute(state); + LicensesMetaData licenseMetaData = stateWithLicense.metaData().custom(LicensesMetaData.TYPE); + assertNotNull(licenseMetaData); + assertNotNull(licenseMetaData.getLicense()); + assertTrue(licenseMetaData.isEligibleForTrial()); + assertEquals("basic", licenseMetaData.getLicense().type()); + assertEquals(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS, licenseMetaData.getLicense().expiryDate()); + assertEquals(uid, licenseMetaData.getLicense().uid()); + } + + public void testExpiredSelfGeneratedBasicLicenseIsExtended() throws Exception { + long now = System.currentTimeMillis(); + String uid = UUID.randomUUID().toString(); + License.Builder builder = License.builder() + .uid(uid) + .issuedTo("name") + .maxNodes(1000) + .issueDate(dateMath("now-10h", now)) + .type("basic") + .expiryDate(dateMath("now-2h", now)); + License license = SelfGeneratedLicense.create(builder); + + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + setInitialState(license, licenseState, Settings.EMPTY); + when(discoveryNodes.isLocalNodeElectedMaster()).thenReturn(true); + licenseService.start(); + + MetaData.Builder mdBuilder = MetaData.builder(); + mdBuilder.putCustom(LicensesMetaData.TYPE, new LicensesMetaData(license, null)); + ClusterState state = ClusterState.builder(new ClusterName("a")).metaData(mdBuilder.build()).build(); + ArgumentCaptor stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); + verify(clusterService, Mockito.times(1)).submitStateUpdateTask(any(), stateUpdater.capture()); + ClusterState stateWithLicense = stateUpdater.getValue().execute(state); + LicensesMetaData licenseMetaData = stateWithLicense.metaData().custom(LicensesMetaData.TYPE); + assertNotNull(licenseMetaData); + assertNotNull(licenseMetaData.getLicense()); + assertTrue(licenseMetaData.isEligibleForTrial()); + assertEquals("basic", licenseMetaData.getLicense().type()); + assertEquals(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS, licenseMetaData.getLicense().expiryDate()); + assertEquals(uid, licenseMetaData.getLicense().uid()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseScheduleTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseScheduleTests.java new file mode 100644 index 0000000000000..977061154dcde --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseScheduleTests.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.junit.Before; + +import static org.hamcrest.Matchers.equalTo; + +public class LicenseScheduleTests extends ESTestCase { + + private License license; + private SchedulerEngine.Schedule schedule; + + @Before + public void setuo() throws Exception { + license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(12)); + schedule = LicenseService.nextLicenseCheck(license); + } + + public void testEnabledLicenseSchedule() throws Exception { + int expiryDuration = (int) (license.expiryDate() - license.issueDate()); + long triggeredTime = license.issueDate() + between(0, expiryDuration); + assertThat(schedule.nextScheduledTimeAfter(license.issueDate(), triggeredTime), equalTo(license.expiryDate())); + } + + public void testGraceLicenseSchedule() throws Exception { + long triggeredTime = license.expiryDate() + between(1, + ((int) LicenseService.GRACE_PERIOD_DURATION.getMillis())); + assertThat(schedule.nextScheduledTimeAfter(license.issueDate(), triggeredTime), + equalTo(license.expiryDate() + LicenseService.GRACE_PERIOD_DURATION.getMillis())); + } + + public void testExpiredLicenseSchedule() throws Exception { + long triggeredTime = license.expiryDate() + LicenseService.GRACE_PERIOD_DURATION.getMillis() + + randomIntBetween(1, 1000); + assertThat(schedule.nextScheduledTimeAfter(license.issueDate(), triggeredTime), + equalTo(-1L)); + } + + public void testInvalidLicenseSchedule() throws Exception { + long triggeredTime = license.issueDate() - randomIntBetween(1, 1000); + assertThat(schedule.nextScheduledTimeAfter(triggeredTime, triggeredTime), + equalTo(license.issueDate())); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseSerializationTests.java new file mode 100644 index 0000000000000..d7cf5ab50fb48 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseSerializationTests.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.Map; +import java.util.UUID; + +import static org.hamcrest.core.IsEqual.equalTo; +import static org.hamcrest.core.IsNull.notNullValue; +import static org.hamcrest.core.IsNull.nullValue; + +public class LicenseSerializationTests extends ESTestCase { + public void testSimpleIssueExpiryDate() throws Exception { + long now = System.currentTimeMillis(); + String issueDate = TestUtils.dateMathString("now", now); + String expiryDate = TestUtils.dateMathString("now+10d/d", now); + String licenseSpecs = TestUtils.generateLicenseSpecString(new TestUtils.LicenseSpec(issueDate, expiryDate)); + License generatedLicense = License.fromSource(new BytesArray(licenseSpecs.getBytes(StandardCharsets.UTF_8)), XContentType.JSON); + assertThat(generatedLicense.issueDate(), equalTo(DateUtils.beginningOfTheDay(issueDate))); + assertThat(generatedLicense.expiryDate(), equalTo(DateUtils.endOfTheDay(expiryDate))); + } + + public void testLicensesFields() throws Exception { + TestUtils.LicenseSpec randomLicenseSpec = TestUtils.generateRandomLicenseSpec(License.VERSION_START); + String licenseSpecsSource = TestUtils.generateLicenseSpecString(randomLicenseSpec); + final License fromSource = + License.fromSource(new BytesArray(licenseSpecsSource.getBytes(StandardCharsets.UTF_8)), XContentType.JSON); + TestUtils.assertLicenseSpec(randomLicenseSpec, fromSource); + } + + public void testLicenseRestView() throws Exception { + long now = System.currentTimeMillis(); + String expiredLicenseExpiryDate = TestUtils.dateMathString("now-1d/d", now); + String validLicenseIssueDate = TestUtils.dateMathString("now-10d/d", now); + String invalidLicenseIssueDate = TestUtils.dateMathString("now+1d/d", now); + String validLicenseExpiryDate = TestUtils.dateMathString("now+2d/d", now); + + License license = TestUtils.generateLicenses(new TestUtils.LicenseSpec(validLicenseIssueDate, expiredLicenseExpiryDate)); + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + license.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(License.REST_VIEW_MODE, "true"))); + builder.flush(); + Map map = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + + // should have an extra status field, human readable issue_data and expiry_date + assertThat(map.get("status"), notNullValue()); + assertThat(map.get("issue_date"), notNullValue()); + assertThat(map.get("expiry_date"), notNullValue()); + assertThat(map.get("status"), equalTo("expired")); + builder = XContentFactory.contentBuilder(XContentType.JSON); + license.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.flush(); + map = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + assertThat(map.get("status"), nullValue()); + + license = TestUtils.generateLicenses(new TestUtils.LicenseSpec(validLicenseIssueDate, validLicenseExpiryDate)); + builder = XContentFactory.contentBuilder(XContentType.JSON); + license.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(License.REST_VIEW_MODE, "true"))); + builder.flush(); + map = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + + // should have an extra status field, human readable issue_data and expiry_date + assertThat(map.get("status"), notNullValue()); + assertThat(map.get("issue_date"), notNullValue()); + assertThat(map.get("expiry_date"), notNullValue()); + assertThat(map.get("status"), equalTo("active")); + builder = XContentFactory.contentBuilder(XContentType.JSON); + license.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.flush(); + map = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + assertThat(map.get("status"), nullValue()); + + license = TestUtils.generateLicenses(new TestUtils.LicenseSpec(invalidLicenseIssueDate, validLicenseExpiryDate)); + builder = XContentFactory.contentBuilder(XContentType.JSON); + license.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(License.REST_VIEW_MODE, "true"))); + builder.flush(); + map = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + + // should have an extra status field, human readable issue_data and expiry_date + assertThat(map.get("status"), notNullValue()); + assertThat(map.get("issue_date"), notNullValue()); + assertThat(map.get("expiry_date"), notNullValue()); + assertThat(map.get("status"), equalTo("invalid")); + builder = XContentFactory.contentBuilder(XContentType.JSON); + license.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.flush(); + map = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + assertThat(map.get("status"), nullValue()); + } + + public void testLicenseRestViewNonExpiringBasic() throws Exception { + long now = System.currentTimeMillis(); + + License.Builder specBuilder = License.builder() + .uid(UUID.randomUUID().toString()) + .issuedTo("test") + .maxNodes(1000) + .issueDate(now) + .type("basic") + .expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS); + License license = SelfGeneratedLicense.create(specBuilder); + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + license.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(License.REST_VIEW_MODE, "true"))); + builder.flush(); + Map map = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + + // should have an extra status field, human readable issue_data and no expiry_date + assertThat(map.get("status"), notNullValue()); + assertThat(map.get("type"), equalTo("basic")); + assertThat(map.get("issue_date"), notNullValue()); + assertThat(map.get("expiry_date"), nullValue()); + assertThat(map.get("expiry_date_in_millis"), nullValue()); + assertThat(map.get("status"), equalTo("active")); + builder = XContentFactory.contentBuilder(XContentType.JSON); + license.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.flush(); + map = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + assertThat(map.get("status"), nullValue()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java new file mode 100644 index 0000000000000..4e7356ad63d13 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.Netty4Plugin; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; + +import java.util.Arrays; +import java.util.Collection; + +import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; + +@ESIntegTestCase.ClusterScope(scope = TEST, numDataNodes = 0, numClientNodes = 0, maxNumDataNodes = 0, transportClientRatio = 0, + autoMinMasterNodes = false) +public class LicenseServiceClusterNotRecoveredTests extends AbstractLicensesIntegrationTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return nodeSettingsBuilder(nodeOrdinal).build(); + } + + private Settings.Builder nodeSettingsBuilder(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("node.data", true) + .put("resource.reload.interval.high", "500ms") // for license mode file watcher + .put(NetworkModule.HTTP_ENABLED.getKey(), true); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class, Netty4Plugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + public void testClusterNotRecovered() throws Exception { + logger.info("--> start one master out of two [recovery state]"); + internalCluster().startNode(nodeSettingsBuilder(0).put("discovery.zen.minimum_master_nodes", 2).put("node.master", true)); + logger.info("--> start second master out of two [recovered state]"); + internalCluster().startNode(nodeSettingsBuilder(1).put("discovery.zen.minimum_master_nodes", 2).put("node.master", true)); + assertLicenseActive(true); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java new file mode 100644 index 0000000000000..ad508ddb7bc77 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterTests.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.transport.Netty4Plugin; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collection; + +import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; + +@ClusterScope(scope = TEST, numDataNodes = 0, numClientNodes = 0, maxNumDataNodes = 0, transportClientRatio = 0) +public class LicenseServiceClusterTests extends AbstractLicensesIntegrationTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return nodeSettingsBuilder(nodeOrdinal).build(); + } + + private Settings.Builder nodeSettingsBuilder(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("node.data", true) + .put("resource.reload.interval.high", "500ms") // for license mode file watcher + .put(NetworkModule.HTTP_ENABLED.getKey(), true); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class, Netty4Plugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + public void testClusterRestartWithLicense() throws Exception { + wipeAllLicenses(); + + int numNodes = randomIntBetween(1, 5); + logger.info("--> starting {} node(s)", numNodes); + internalCluster().startNodes(numNodes); + ensureGreen(); + + logger.info("--> put signed license"); + LicensingClient licensingClient = new LicensingClient(client()); + License license = TestUtils.generateSignedLicense(TimeValue.timeValueMinutes(1)); + putLicense(license); + assertThat(licensingClient.prepareGetLicense().get().license(), equalTo(license)); + assertOperationMode(license.operationMode()); + + logger.info("--> restart all nodes"); + internalCluster().fullRestart(); + ensureYellow(); + licensingClient = new LicensingClient(client()); + logger.info("--> get and check signed license"); + assertThat(licensingClient.prepareGetLicense().get().license(), equalTo(license)); + logger.info("--> remove licenses"); + licensingClient.prepareDeleteLicense().get(); + assertOperationMode(License.OperationMode.MISSING); + + logger.info("--> restart all nodes"); + internalCluster().fullRestart(); + licensingClient = new LicensingClient(client()); + ensureYellow(); + assertThat(licensingClient.prepareGetLicense().get().license(), nullValue()); + assertOperationMode(License.OperationMode.MISSING); + + + wipeAllLicenses(); + } + + public void testCloudInternalLicense() throws Exception { + wipeAllLicenses(); + + int numNodes = randomIntBetween(1, 5); + logger.info("--> starting {} node(s)", numNodes); + internalCluster().startNodes(numNodes); + ensureGreen(); + + logger.info("--> put signed license"); + LicensingClient licensingClient = new LicensingClient(client()); + License license = TestUtils.generateSignedLicense("cloud_internal", License.VERSION_CURRENT, System.currentTimeMillis(), + TimeValue.timeValueMinutes(1)); + putLicense(license); + assertThat(licensingClient.prepareGetLicense().get().license(), equalTo(license)); + assertOperationMode(License.OperationMode.PLATINUM); + writeCloudInternalMode("gold"); + assertOperationMode(License.OperationMode.GOLD); + writeCloudInternalMode("basic"); + assertOperationMode(License.OperationMode.BASIC); + } + + public void testClusterRestartWhileEnabled() throws Exception { + wipeAllLicenses(); + internalCluster().startNode(); + ensureGreen(); + assertLicenseActive(true); + logger.info("--> restart node"); + internalCluster().fullRestart(); + ensureYellow(); + logger.info("--> await node for enabled"); + assertLicenseActive(true); + } + + public void testClusterRestartWhileGrace() throws Exception { + wipeAllLicenses(); + internalCluster().startNode(); + assertLicenseActive(true); + putLicense(TestUtils.generateSignedLicense(TimeValue.timeValueMillis(0))); + ensureGreen(); + assertLicenseActive(true); + logger.info("--> restart node"); + internalCluster().fullRestart(); + ensureYellow(); + logger.info("--> await node for grace_period"); + assertLicenseActive(true); + } + + public void testClusterRestartWhileExpired() throws Exception { + wipeAllLicenses(); + internalCluster().startNode(); + ensureGreen(); + assertLicenseActive(true); + putLicense(TestUtils.generateExpiredNonBasicLicense(System.currentTimeMillis() - LicenseService.GRACE_PERIOD_DURATION.getMillis())); + assertLicenseActive(false); + logger.info("--> restart node"); + internalCluster().fullRestart(); + ensureYellow(); + logger.info("--> await node for disabled"); + assertLicenseActive(false); + } + + private void assertOperationMode(License.OperationMode operationMode) throws InterruptedException { + boolean success = awaitBusy(() -> { + for (XPackLicenseState licenseState : internalCluster().getDataNodeInstances(XPackLicenseState.class)) { + if (licenseState.getOperationMode() == operationMode) { + return true; + } + } + return false; + }); + assertTrue(success); + } + + private void writeCloudInternalMode(String mode) throws Exception { + for (Environment environment : internalCluster().getDataOrMasterNodeInstances(Environment.class)) { + Path licenseModePath = XPackPlugin.resolveConfigFile(environment, "license_mode"); + Files.createDirectories(licenseModePath.getParent()); + Files.write(licenseModePath, mode.getBytes(StandardCharsets.UTF_8)); + } + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseTLSTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseTLSTests.java new file mode 100644 index 0000000000000..588dbabb9db8a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseTLSTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; + +import java.net.InetAddress; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class LicenseTLSTests extends AbstractLicenseServiceTestCase { + + private InetAddress inetAddress; + + public void testApplyLicenseInDevMode() throws Exception { + License newLicense = TestUtils.generateSignedLicense(randomFrom("gold", "platinum"), TimeValue.timeValueHours(24L)); + PutLicenseRequest request = new PutLicenseRequest(); + request.acknowledge(true); + request.license(newLicense); + Settings settings = Settings.builder().put("xpack.security.enabled", true).build(); + XPackLicenseState licenseState = new XPackLicenseState(settings); + inetAddress = InetAddress.getLoopbackAddress(); + + setInitialState(null, licenseState, settings); + licenseService.start(); + PlainActionFuture responseFuture = new PlainActionFuture<>(); + licenseService.registerLicense(request, responseFuture); + verify(clusterService).submitStateUpdateTask(any(String.class), any(ClusterStateUpdateTask.class)); + + inetAddress = TransportAddress.META_ADDRESS; + settings = Settings.builder() + .put("xpack.security.enabled", true) + .put("discovery.type", "single-node") + .build(); + licenseService.stop(); + licenseState = new XPackLicenseState(settings); + setInitialState(null, licenseState, settings); + licenseService.start(); + licenseService.registerLicense(request, responseFuture); + verify(clusterService, times(2)).submitStateUpdateTask(any(String.class), any(ClusterStateUpdateTask.class)); + } + + public void testApplyLicenseInProdMode() throws Exception { + final String licenseType = randomFrom("GOLD", "PLATINUM"); + License newLicense = TestUtils.generateSignedLicense(licenseType, TimeValue.timeValueHours(24L)); + PutLicenseRequest request = new PutLicenseRequest(); + request.acknowledge(true); + request.license(newLicense); + Settings settings = Settings.builder().put("xpack.security.enabled", true).build(); + XPackLicenseState licenseState = new XPackLicenseState(settings); + inetAddress = TransportAddress.META_ADDRESS; + + setInitialState(null, licenseState, settings); + licenseService.start(); + PlainActionFuture responseFuture = new PlainActionFuture<>(); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> licenseService.registerLicense(request, responseFuture)); + assertThat(e.getMessage(), + containsString("Cannot install a [" + licenseType + "] license unless TLS is configured or security is disabled")); + + settings = Settings.builder().put("xpack.security.enabled", false).build(); + licenseService.stop(); + licenseState = new XPackLicenseState(settings); + setInitialState(null, licenseState, settings); + licenseService.start(); + licenseService.registerLicense(request, responseFuture); + verify(clusterService).submitStateUpdateTask(any(String.class), any(ClusterStateUpdateTask.class)); + + settings = Settings.builder() + .put("xpack.security.enabled", true) + .put("xpack.security.transport.ssl.enabled", true) + .build(); + licenseService.stop(); + licenseState = new XPackLicenseState(settings); + setInitialState(null, licenseState, settings); + licenseService.start(); + licenseService.registerLicense(request, responseFuture); + verify(clusterService, times(2)).submitStateUpdateTask(any(String.class), any(ClusterStateUpdateTask.class)); + } + + @Override + protected DiscoveryNode getLocalNode() { + return new DiscoveryNode("localnode", new TransportAddress(inetAddress, randomIntBetween(9300, 9399)), + emptyMap(), emptySet(), Version.CURRENT); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseUtilsTests.java new file mode 100644 index 0000000000000..26ed6f5e446db --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseUtilsTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class LicenseUtilsTests extends ESTestCase { + + public void testNewExpirationException() { + for (String feature : Arrays.asList("feature", randomAlphaOfLength(5), null, "")) { + ElasticsearchSecurityException exception = LicenseUtils.newComplianceException(feature); + assertNotNull(exception); + assertThat(exception.getMetadataKeys(), contains(LicenseUtils.EXPIRED_FEATURE_METADATA)); + assertThat(exception.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasSize(1)); + assertThat(exception.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA).iterator().next(), equalTo(feature)); + } + } + + public void testIsLicenseExpiredException() { + ElasticsearchSecurityException exception = LicenseUtils.newComplianceException("feature"); + assertTrue(LicenseUtils.isLicenseExpiredException(exception)); + + exception = new ElasticsearchSecurityException("msg"); + assertFalse(LicenseUtils.isLicenseExpiredException(exception)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesAcknowledgementTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesAcknowledgementTests.java new file mode 100644 index 0000000000000..8e1c363c814da --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesAcknowledgementTests.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; + +import static org.elasticsearch.common.unit.TimeValue.timeValueHours; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class LicensesAcknowledgementTests extends AbstractLicenseServiceTestCase { + + public void testAcknowledgment() throws Exception { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + setInitialState(TestUtils.generateSignedLicense("gold", timeValueHours(2)), licenseState, Settings.EMPTY); + licenseService.start(); + // try installing a signed license + long issueDate = System.currentTimeMillis() - TimeValue.timeValueHours(24 * 2).getMillis(); + License signedLicense = TestUtils.generateSignedLicense("trial", License.VERSION_CURRENT, issueDate, timeValueHours(10)); + PutLicenseRequest putLicenseRequest = new PutLicenseRequest().license(signedLicense); + // ensure acknowledgement message was part of the response + licenseService.registerLicense(putLicenseRequest, new AssertingLicensesUpdateResponse(false, LicensesStatus.VALID, true)); + assertThat(licenseService.getLicense(), not(signedLicense)); + verify(clusterService, times(0)).submitStateUpdateTask(any(String.class), any(ClusterStateUpdateTask.class)); + + // try installing a signed license with acknowledgement + putLicenseRequest = new PutLicenseRequest().license(signedLicense).acknowledge(true); + // ensure license was installed and no acknowledgment message was returned + licenseService.registerLicense(putLicenseRequest, new AssertingLicensesUpdateResponse(true, LicensesStatus.VALID, false)); + verify(clusterService, times(1)).submitStateUpdateTask(any(String.class), any(ClusterStateUpdateTask.class)); + } + + public void testRejectUpgradeToProductionWithoutTLS() throws Exception { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + setInitialState(TestUtils.generateSignedLicense("trial", timeValueHours(2)), licenseState, Settings.EMPTY); + licenseService.start(); + // try installing a signed license + License signedLicense = TestUtils.generateSignedLicense("platinum", timeValueHours(10)); + PutLicenseRequest putLicenseRequest = new PutLicenseRequest().license(signedLicense); + // ensure acknowledgement message was part of the response + IllegalStateException ise = expectThrows(IllegalStateException.class, () -> + licenseService.registerLicense(putLicenseRequest, new AssertingLicensesUpdateResponse(false, LicensesStatus.VALID, true))); + assertEquals("Cannot install a [PLATINUM] license unless TLS is configured or security is disabled", ise.getMessage()); + } + + public void testUpgradeToProductionWithoutTLSAndSecurityDisabled() throws Exception { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + setInitialState(TestUtils.generateSignedLicense("trial", timeValueHours(2)), licenseState, Settings.builder() + .put("xpack.security.enabled", false).build()); + licenseService.start(); + // try installing a signed license + License signedLicense = TestUtils.generateSignedLicense("platinum", timeValueHours(10)); + PutLicenseRequest putLicenseRequest = new PutLicenseRequest().license(signedLicense); + licenseService.registerLicense(putLicenseRequest, new AssertingLicensesUpdateResponse(false, LicensesStatus.VALID, true)); + assertThat(licenseService.getLicense(), not(signedLicense)); + verify(clusterService, times(1)).submitStateUpdateTask(any(String.class), any(ClusterStateUpdateTask.class)); + + // try installing a signed license with acknowledgement + putLicenseRequest = new PutLicenseRequest().license(signedLicense).acknowledge(true); + // ensure license was installed and no acknowledgment message was returned + licenseService.registerLicense(putLicenseRequest, new AssertingLicensesUpdateResponse(true, LicensesStatus.VALID, false)); + verify(clusterService, times(2)).submitStateUpdateTask(any(String.class), any(ClusterStateUpdateTask.class)); + } + + public void testUpgradeToProductionWithTLSAndSecurity() throws Exception { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + setInitialState(TestUtils.generateSignedLicense("trial", timeValueHours(2)), licenseState, Settings.builder() + .put("xpack.security.enabled", true) + .put("xpack.security.transport.ssl.enabled", true).build()); + licenseService.start(); + // try installing a signed license + License signedLicense = TestUtils.generateSignedLicense("platinum", timeValueHours(10)); + PutLicenseRequest putLicenseRequest = new PutLicenseRequest().license(signedLicense); + licenseService.registerLicense(putLicenseRequest, new AssertingLicensesUpdateResponse(false, LicensesStatus.VALID, true)); + assertThat(licenseService.getLicense(), not(signedLicense)); + verify(clusterService, times(1)).submitStateUpdateTask(any(String.class), any(ClusterStateUpdateTask.class)); + + // try installing a signed license with acknowledgement + putLicenseRequest = new PutLicenseRequest().license(signedLicense).acknowledge(true); + // ensure license was installed and no acknowledgment message was returned + licenseService.registerLicense(putLicenseRequest, new AssertingLicensesUpdateResponse(true, LicensesStatus.VALID, false)); + verify(clusterService, times(2)).submitStateUpdateTask(any(String.class), any(ClusterStateUpdateTask.class)); + } + + private static class AssertingLicensesUpdateResponse implements ActionListener { + private final boolean expectedAcknowledgement; + private final LicensesStatus expectedStatus; + private final boolean expectAckMessages; + + AssertingLicensesUpdateResponse(boolean expectedAcknowledgement, LicensesStatus expectedStatus, + boolean expectAckMessages) { + this.expectedAcknowledgement = expectedAcknowledgement; + this.expectedStatus = expectedStatus; + this.expectAckMessages = expectAckMessages; + } + + @Override + public void onResponse(PutLicenseResponse licensesUpdateResponse) { + assertThat(licensesUpdateResponse.isAcknowledged(), equalTo(expectedAcknowledgement)); + assertThat(licensesUpdateResponse.status(), equalTo(expectedStatus)); + assertEquals(licensesUpdateResponse.acknowledgeMessages().isEmpty(), expectAckMessages == false); + } + + @Override + public void onFailure(Exception throwable) { + throw new RuntimeException(throwable); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java new file mode 100644 index 0000000000000..540dbd891bd9b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.junit.Before; + +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class LicensesManagerServiceTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return Collections.singletonList(LocalStateCompositeXPackPlugin.class); + } + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(XPackSettings.SECURITY_ENABLED.getKey(), false) + .put(XPackSettings.MONITORING_ENABLED.getKey(), false) + .put(XPackSettings.WATCHER_ENABLED.getKey(), false) + .put(XPackSettings.GRAPH_ENABLED.getKey(), false) + .put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false) + .build(); + } + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @Before + public void waitForTrialLicenseToBeGenerated() throws Exception { + assertBusy(() -> assertNotNull(getInstanceFromNode(ClusterService.class).state().metaData().custom(LicensesMetaData.TYPE))); + } + + public void testStoreAndGetLicenses() throws Exception { + LicenseService licenseService = getInstanceFromNode(LicenseService.class); + ClusterService clusterService = getInstanceFromNode(ClusterService.class); + License goldLicense = TestUtils.generateSignedLicense("gold", TimeValue.timeValueHours(1)); + TestUtils.registerAndAckSignedLicenses(licenseService, goldLicense, LicensesStatus.VALID); + License silverLicense = TestUtils.generateSignedLicense("silver", TimeValue.timeValueHours(2)); + TestUtils.registerAndAckSignedLicenses(licenseService, silverLicense, LicensesStatus.VALID); + License platinumLicense = TestUtils.generateSignedLicense("platinum", TimeValue.timeValueHours(1)); + TestUtils.registerAndAckSignedLicenses(licenseService, platinumLicense, LicensesStatus.VALID); + LicensesMetaData licensesMetaData = clusterService.state().metaData().custom(LicensesMetaData.TYPE); + assertThat(licensesMetaData.getLicense(), equalTo(platinumLicense)); + final License getLicenses = licenseService.getLicense(); + assertThat(getLicenses, equalTo(platinumLicense)); + } + + // TODO: Add test/feature blocking the registration of basic license + + public void testEffectiveLicenses() throws Exception { + final LicenseService licenseService = getInstanceFromNode(LicenseService.class); + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + License goldLicense = TestUtils.generateSignedLicense("gold", TimeValue.timeValueSeconds(5)); + // put gold license + TestUtils.registerAndAckSignedLicenses(licenseService, goldLicense, LicensesStatus.VALID); + LicensesMetaData licensesMetaData = clusterService.state().metaData().custom(LicensesMetaData.TYPE); + assertThat(LicenseService.getLicense(licensesMetaData), equalTo(goldLicense)); + + License platinumLicense = TestUtils.generateSignedLicense("platinum", TimeValue.timeValueSeconds(3)); + // put platinum license + TestUtils.registerAndAckSignedLicenses(licenseService, platinumLicense, LicensesStatus.VALID); + licensesMetaData = clusterService.state().metaData().custom(LicensesMetaData.TYPE); + assertThat(LicenseService.getLicense(licensesMetaData), equalTo(platinumLicense)); + } + + public void testInvalidLicenseStorage() throws Exception { + LicenseService licenseService = getInstanceFromNode(LicenseService.class); + ClusterService clusterService = getInstanceFromNode(ClusterService.class); + License signedLicense = TestUtils.generateSignedLicense(TimeValue.timeValueMinutes(2)); + + // modify content of signed license + License tamperedLicense = License.builder() + .fromLicenseSpec(signedLicense, signedLicense.signature()) + .expiryDate(signedLicense.expiryDate() + 10 * 24 * 60 * 60 * 1000L) + .validate() + .build(); + + TestUtils.registerAndAckSignedLicenses(licenseService, tamperedLicense, LicensesStatus.INVALID); + + // ensure that the invalid license never made it to cluster state + LicensesMetaData licensesMetaData = clusterService.state().metaData().custom(LicensesMetaData.TYPE); + assertThat(licensesMetaData.getLicense(), not(equalTo(tamperedLicense))); + } + + public void testRemoveLicenses() throws Exception { + LicenseService licenseService = getInstanceFromNode(LicenseService.class); + ClusterService clusterService = getInstanceFromNode(ClusterService.class); + + // generate signed licenses + License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(1)); + TestUtils.registerAndAckSignedLicenses(licenseService, license, LicensesStatus.VALID); + LicensesMetaData licensesMetaData = clusterService.state().metaData().custom(LicensesMetaData.TYPE); + assertThat(licensesMetaData.getLicense(), not(LicensesMetaData.LICENSE_TOMBSTONE)); + + // remove signed licenses + removeAndAckSignedLicenses(licenseService); + licensesMetaData = clusterService.state().metaData().custom(LicensesMetaData.TYPE); + assertThat(licensesMetaData.getLicense(), equalTo(LicensesMetaData.LICENSE_TOMBSTONE)); + } + + private void removeAndAckSignedLicenses(final LicenseService licenseService) { + final CountDownLatch latch = new CountDownLatch(1); + final AtomicBoolean success = new AtomicBoolean(false); + licenseService.removeLicense(new DeleteLicenseRequest(), new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + if (clusterStateUpdateResponse.isAcknowledged()) { + success.set(true); + } + latch.countDown(); + } + + @Override + public void onFailure(Exception throwable) { + latch.countDown(); + } + }); + try { + latch.await(); + } catch (InterruptedException e) { + fail(e.getMessage()); + } + assertThat("remove license(s) failed", success.get(), equalTo(true)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java new file mode 100644 index 0000000000000..b85a3480fa739 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoriesMetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class LicensesMetaDataSerializationTests extends ESTestCase { + + public void testXContentSerializationOneSignedLicense() throws Exception { + License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(2)); + LicensesMetaData licensesMetaData = new LicensesMetaData(license, null); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder.startObject("licenses"); + licensesMetaData.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.endObject(); + LicensesMetaData licensesMetaDataFromXContent = getLicensesMetaDataFromXContent(createParser(builder)); + assertThat(licensesMetaDataFromXContent.getLicense(), equalTo(license)); + assertNull(licensesMetaDataFromXContent.getMostRecentTrialVersion()); + } + + public void testXContentSerializationOneSignedLicenseWithUsedTrial() throws Exception { + License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(2)); + LicensesMetaData licensesMetaData = new LicensesMetaData(license, Version.CURRENT); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder.startObject("licenses"); + licensesMetaData.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.endObject(); + LicensesMetaData licensesMetaDataFromXContent = getLicensesMetaDataFromXContent(createParser(builder)); + assertThat(licensesMetaDataFromXContent.getLicense(), equalTo(license)); + assertEquals(licensesMetaDataFromXContent.getMostRecentTrialVersion(), Version.CURRENT); + } + + public void testLicenseMetadataParsingDoesNotSwallowOtherMetaData() throws Exception { + new Licensing(Settings.EMPTY); // makes sure LicensePlugin is registered in Custom MetaData + License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(2)); + LicensesMetaData licensesMetaData = new LicensesMetaData(license, Version.CURRENT); + RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repo", "fs", Settings.EMPTY); + RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(repositoryMetaData); + final MetaData.Builder metaDataBuilder = MetaData.builder(); + if (randomBoolean()) { // random order of insertion + metaDataBuilder.putCustom(licensesMetaData.getWriteableName(), licensesMetaData); + metaDataBuilder.putCustom(repositoriesMetaData.getWriteableName(), repositoriesMetaData); + } else { + metaDataBuilder.putCustom(repositoriesMetaData.getWriteableName(), repositoriesMetaData); + metaDataBuilder.putCustom(licensesMetaData.getWriteableName(), licensesMetaData); + } + // serialize metadata + XContentBuilder builder = XContentFactory.jsonBuilder(); + Params params = new ToXContent.MapParams(Collections.singletonMap(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY)); + builder.startObject(); + builder = metaDataBuilder.build().toXContent(builder, params); + builder.endObject(); + // deserialize metadata again + MetaData metaData = MetaData.Builder.fromXContent(createParser(builder)); + // check that custom metadata still present + assertThat(metaData.custom(licensesMetaData.getWriteableName()), notNullValue()); + assertThat(metaData.custom(repositoriesMetaData.getWriteableName()), notNullValue()); + } + + public void testXContentSerializationOneTrial() throws Exception { + long issueDate = System.currentTimeMillis(); + License.Builder specBuilder = License.builder() + .uid(UUID.randomUUID().toString()) + .issuedTo("customer") + .maxNodes(5) + .issueDate(issueDate) + .type(randomBoolean() ? "trial" : "basic") + .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()); + final License trialLicense = SelfGeneratedLicense.create(specBuilder); + LicensesMetaData licensesMetaData = new LicensesMetaData(trialLicense, Version.CURRENT); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder.startObject("licenses"); + licensesMetaData.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.endObject(); + LicensesMetaData licensesMetaDataFromXContent = getLicensesMetaDataFromXContent(createParser(builder)); + assertThat(licensesMetaDataFromXContent.getLicense(), equalTo(trialLicense)); + assertEquals(licensesMetaDataFromXContent.getMostRecentTrialVersion(), Version.CURRENT); + } + + public void testLicenseTombstoneFromXContext() throws Exception { + final XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder.startObject("licenses"); + builder.nullField("license"); + builder.endObject(); + builder.endObject(); + LicensesMetaData metaDataFromXContent = getLicensesMetaDataFromXContent(createParser(builder)); + assertThat(metaDataFromXContent.getLicense(), equalTo(LicensesMetaData.LICENSE_TOMBSTONE)); + } + + public void testLicenseTombstoneWithUsedTrialFromXContext() throws Exception { + final XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder.startObject("licenses"); + builder.nullField("license"); + builder.field("trial_license", Version.CURRENT.toString()); + builder.endObject(); + builder.endObject(); + LicensesMetaData metaDataFromXContent = getLicensesMetaDataFromXContent(createParser(builder)); + assertThat(metaDataFromXContent.getLicense(), equalTo(LicensesMetaData.LICENSE_TOMBSTONE)); + assertEquals(metaDataFromXContent.getMostRecentTrialVersion(), Version.CURRENT); + } + + private static LicensesMetaData getLicensesMetaDataFromXContent(XContentParser parser) throws Exception { + parser.nextToken(); // consume null + parser.nextToken(); // consume "licenses" + LicensesMetaData licensesMetaDataFromXContent = LicensesMetaData.fromXContent(parser); + parser.nextToken(); // consume endObject + assertThat(parser.nextToken(), nullValue()); + return licensesMetaDataFromXContent; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(Stream.concat( + new Licensing(Settings.EMPTY).getNamedXContent().stream(), + ClusterModule.getNamedXWriteables().stream() + ).collect(Collectors.toList())); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesTransportTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesTransportTests.java new file mode 100644 index 0000000000000..a48132ef3d79d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesTransportTests.java @@ -0,0 +1,233 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; + +import java.nio.charset.StandardCharsets; +import java.util.Collection; +import java.util.Collections; +import java.util.UUID; + +import static org.elasticsearch.license.TestUtils.dateMath; +import static org.elasticsearch.license.TestUtils.generateExpiredNonBasicLicense; +import static org.elasticsearch.license.TestUtils.generateSignedLicense; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.not; + +public class LicensesTransportTests extends ESSingleNodeTestCase { + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @Override + protected Collection> getPlugins() { + return Collections.singletonList(LocalStateCompositeXPackPlugin.class); + } + + @Override + protected Settings nodeSettings() { + Settings.Builder newSettings = Settings.builder(); + newSettings.put(super.nodeSettings()); + newSettings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); +// newSettings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); +// newSettings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + newSettings.put(Node.NODE_DATA_SETTING.getKey(), true); + return newSettings.build(); + } + + public void testEmptyGetLicense() throws Exception { + // basic license is added async, we should wait for it + assertBusy(() -> { + try { + final ActionFuture getLicenseFuture = + new GetLicenseRequestBuilder(client().admin().cluster(), GetLicenseAction.INSTANCE).execute(); + final GetLicenseResponse getLicenseResponse; + getLicenseResponse = getLicenseFuture.get(); + assertNotNull(getLicenseResponse.license()); + assertThat(getLicenseResponse.license().operationMode(), equalTo(License.OperationMode.BASIC)); + } catch (Exception e) { + throw new RuntimeException("unexpected exception", e); + } + }); + } + + public void testPutLicense() throws Exception { + License signedLicense = generateSignedLicense(TimeValue.timeValueMinutes(2)); + + // put license + PutLicenseRequestBuilder putLicenseRequestBuilder = + new PutLicenseRequestBuilder(client().admin().cluster(), PutLicenseAction.INSTANCE).setLicense(signedLicense) + .setAcknowledge(true); + PutLicenseResponse putLicenseResponse = putLicenseRequestBuilder.get(); + assertThat(putLicenseResponse.isAcknowledged(), equalTo(true)); + assertThat(putLicenseResponse.status(), equalTo(LicensesStatus.VALID)); + + // get and check license + GetLicenseResponse getLicenseResponse = new GetLicenseRequestBuilder(client().admin().cluster(), GetLicenseAction.INSTANCE).get(); + assertThat(getLicenseResponse.license(), equalTo(signedLicense)); + } + + public void testPutLicenseFromString() throws Exception { + License signedLicense = generateSignedLicense(TimeValue.timeValueMinutes(2)); + String licenseString = TestUtils.dumpLicense(signedLicense); + + // put license source + PutLicenseRequestBuilder putLicenseRequestBuilder = + new PutLicenseRequestBuilder(client().admin().cluster(), PutLicenseAction.INSTANCE) + .setLicense(new BytesArray(licenseString.getBytes(StandardCharsets.UTF_8)), XContentType.JSON) + .setAcknowledge(true); + PutLicenseResponse putLicenseResponse = putLicenseRequestBuilder.get(); + assertThat(putLicenseResponse.isAcknowledged(), equalTo(true)); + assertThat(putLicenseResponse.status(), equalTo(LicensesStatus.VALID)); + + // get and check license + GetLicenseResponse getLicenseResponse = new GetLicenseRequestBuilder(client().admin().cluster(), GetLicenseAction.INSTANCE).get(); + assertThat(getLicenseResponse.license(), equalTo(signedLicense)); + } + + public void testPutInvalidLicense() throws Exception { + License signedLicense = generateSignedLicense(TimeValue.timeValueMinutes(2)); + + // modify content of signed license + License tamperedLicense = License.builder() + .fromLicenseSpec(signedLicense, signedLicense.signature()) + .expiryDate(signedLicense.expiryDate() + 10 * 24 * 60 * 60 * 1000L) + .validate() + .build(); + + PutLicenseRequestBuilder builder = new PutLicenseRequestBuilder(client().admin().cluster(), PutLicenseAction.INSTANCE); + builder.setLicense(tamperedLicense); + + // try to put license (should be invalid) + final PutLicenseResponse putLicenseResponse = builder.get(); + assertThat(putLicenseResponse.status(), equalTo(LicensesStatus.INVALID)); + + // try to get invalid license + GetLicenseResponse getLicenseResponse = new GetLicenseRequestBuilder(client().admin().cluster(), GetLicenseAction.INSTANCE).get(); + assertThat(getLicenseResponse.license(), not(tamperedLicense)); + } + + public void testPutBasicLicenseIsInvalid() throws Exception { + License signedLicense = generateSignedLicense("basic", License.VERSION_CURRENT, -1, TimeValue.timeValueMinutes(2)); + + PutLicenseRequestBuilder builder = new PutLicenseRequestBuilder(client().admin().cluster(), PutLicenseAction.INSTANCE); + builder.setLicense(signedLicense); + + // try to put license (should be invalid) + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, builder::get); + assertEquals(iae.getMessage(), "Registering basic licenses is not allowed."); + + // try to get invalid license + GetLicenseResponse getLicenseResponse = new GetLicenseRequestBuilder(client().admin().cluster(), GetLicenseAction.INSTANCE).get(); + assertThat(getLicenseResponse.license(), not(signedLicense)); + } + + public void testPutExpiredLicense() throws Exception { + License expiredLicense = generateExpiredNonBasicLicense(); + PutLicenseRequestBuilder builder = new PutLicenseRequestBuilder(client().admin().cluster(), PutLicenseAction.INSTANCE); + builder.setLicense(expiredLicense); + PutLicenseResponse putLicenseResponse = builder.get(); + assertThat(putLicenseResponse.status(), equalTo(LicensesStatus.EXPIRED)); + // get license should not return the expired license + GetLicenseResponse getLicenseResponse = new GetLicenseRequestBuilder(client().admin().cluster(), GetLicenseAction.INSTANCE).get(); + assertThat(getLicenseResponse.license(), not(expiredLicense)); + } + + public void testPutLicensesSimple() throws Exception { + License goldSignedLicense = generateSignedLicense("gold", TimeValue.timeValueMinutes(5)); + PutLicenseRequestBuilder putLicenseRequestBuilder = + new PutLicenseRequestBuilder(client().admin().cluster(), PutLicenseAction.INSTANCE).setLicense(goldSignedLicense) + .setAcknowledge(true); + PutLicenseResponse putLicenseResponse = putLicenseRequestBuilder.get(); + assertThat(putLicenseResponse.status(), equalTo(LicensesStatus.VALID)); + GetLicenseResponse getLicenseResponse = new GetLicenseRequestBuilder(client().admin().cluster(), GetLicenseAction.INSTANCE).get(); + assertThat(getLicenseResponse.license(), equalTo(goldSignedLicense)); + + License platinumSignedLicense = generateSignedLicense("platinum", TimeValue.timeValueMinutes(2)); + putLicenseRequestBuilder.setLicense(platinumSignedLicense); + putLicenseResponse = putLicenseRequestBuilder.get(); + assertThat(putLicenseResponse.isAcknowledged(), equalTo(true)); + assertThat(putLicenseResponse.status(), equalTo(LicensesStatus.VALID)); + getLicenseResponse = new GetLicenseRequestBuilder(client().admin().cluster(), GetLicenseAction.INSTANCE).get(); + assertThat(getLicenseResponse.license(), equalTo(platinumSignedLicense)); + } + + public void testRemoveLicensesSimple() throws Exception { + License goldLicense = generateSignedLicense("gold", TimeValue.timeValueMinutes(5)); + PutLicenseRequestBuilder putLicenseRequestBuilder = + new PutLicenseRequestBuilder(client().admin().cluster(), PutLicenseAction.INSTANCE).setLicense(goldLicense) + .setAcknowledge(true); + PutLicenseResponse putLicenseResponse = putLicenseRequestBuilder.get(); + assertThat(putLicenseResponse.isAcknowledged(), equalTo(true)); + assertThat(putLicenseResponse.status(), equalTo(LicensesStatus.VALID)); + GetLicenseResponse getLicenseResponse = new GetLicenseRequestBuilder(client().admin().cluster(), GetLicenseAction.INSTANCE).get(); + assertThat(getLicenseResponse.license(), equalTo(goldLicense)); + // delete all licenses + DeleteLicenseRequestBuilder deleteLicenseRequestBuilder = + new DeleteLicenseRequestBuilder(client().admin().cluster(), DeleteLicenseAction.INSTANCE); + DeleteLicenseResponse deleteLicenseResponse = deleteLicenseRequestBuilder.get(); + assertThat(deleteLicenseResponse.isAcknowledged(), equalTo(true)); + // get licenses (expected no licenses) + getLicenseResponse = new GetLicenseRequestBuilder(client().admin().cluster(), GetLicenseAction.INSTANCE).get(); + assertNull(getLicenseResponse.license()); + } + + public void testLicenseIsRejectWhenStartDateLaterThanNow() throws Exception { + long now = System.currentTimeMillis(); + final License.Builder builder = License.builder() + .uid(UUID.randomUUID().toString()) + .version(License.VERSION_CURRENT) + .expiryDate(dateMath("now+2h", now)) + .startDate(dateMath("now+1h", now)) + .issueDate(now) + .type(License.OperationMode.TRIAL.toString()) + .issuedTo("customer") + .issuer("elasticsearch") + .maxNodes(5); + License license = TestUtils.generateSignedLicense(builder); + + PutLicenseRequestBuilder putLicenseRequestBuilder = + new PutLicenseRequestBuilder(client().admin().cluster(), PutLicenseAction.INSTANCE).setLicense(license) + .setAcknowledge(true); + PutLicenseResponse putLicenseResponse = putLicenseRequestBuilder.get(); + assertThat(putLicenseResponse.isAcknowledged(), equalTo(true)); + assertThat(putLicenseResponse.status(), equalTo(LicensesStatus.INVALID)); + } + + public void testLicenseIsAcceptedWhenStartDateBeforeThanNow() throws Exception { + long now = System.currentTimeMillis(); + final License.Builder builder = License.builder() + .uid(UUID.randomUUID().toString()) + .version(License.VERSION_CURRENT) + .expiryDate(dateMath("now+2h", now)) + .startDate(now) + .issueDate(now) + .type(License.OperationMode.TRIAL.toString()) + .issuedTo("customer") + .issuer("elasticsearch") + .maxNodes(5); + License license = TestUtils.generateSignedLicense(builder); + + PutLicenseRequestBuilder putLicenseRequestBuilder = + new PutLicenseRequestBuilder(client().admin().cluster(), PutLicenseAction.INSTANCE).setLicense(license) + .setAcknowledge(true); + PutLicenseResponse putLicenseResponse = putLicenseRequestBuilder.get(); + assertThat(putLicenseResponse.isAcknowledged(), equalTo(true)); + assertThat(putLicenseResponse.status(), equalTo(LicensesStatus.VALID)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/OperationModeFileWatcherTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/OperationModeFileWatcherTests.java new file mode 100644 index 0000000000000..b4efed1bacc32 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/OperationModeFileWatcherTests.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; + +public class OperationModeFileWatcherTests extends ESTestCase { + private ResourceWatcherService watcherService; + private TestThreadPool threadPool; + private Path licenseModePath; + private OperationModeFileWatcher operationModeFileWatcher; + private AtomicReference onChangeCounter; + + @Before + public void setup() throws Exception { + threadPool = new TestThreadPool("license mode file watcher tests"); + Settings settings = Settings.builder() + .put("resource.reload.interval.high", "10ms") + .build(); + watcherService = new ResourceWatcherService(settings, + threadPool); + watcherService.start(); + licenseModePath = createTempFile(); + onChangeCounter = new AtomicReference<>(new CountDownLatch(1)); + operationModeFileWatcher = new OperationModeFileWatcher(watcherService, licenseModePath, logger, + () -> onChangeCounter.get().countDown()); + } + + @After + public void shutdown() throws InterruptedException { + terminate(threadPool); + watcherService.stop(); + } + + public void testInit() throws Exception { + onChangeCounter.set(new CountDownLatch(2)); + writeMode("gold"); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.PLATINUM)); + operationModeFileWatcher.init(); + assertTrue(onChangeCounter.get().await(5, TimeUnit.SECONDS)); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.GOLD)); + } + + public void testUpdateModeFromFile() throws Exception { + Files.delete(licenseModePath); + operationModeFileWatcher.init(); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.PLATINUM)); + writeMode("gold"); + assertTrue(onChangeCounter.get().await(5, TimeUnit.SECONDS)); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.GOLD)); + onChangeCounter.set(new CountDownLatch(1)); + writeMode("basic"); + assertTrue(onChangeCounter.get().await(5, TimeUnit.SECONDS)); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.BASIC)); + } + + public void testDeleteModeFromFile() throws Exception { + Files.delete(licenseModePath); + operationModeFileWatcher.init(); + writeMode("gold"); + assertTrue(onChangeCounter.get().await(5, TimeUnit.SECONDS)); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.GOLD)); + onChangeCounter.set(new CountDownLatch(1)); + + Files.delete(licenseModePath); + assertTrue(onChangeCounter.get().await(5, TimeUnit.SECONDS)); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.PLATINUM)); + } + + public void testInvalidModeFromFile() throws Exception { + writeMode("invalid"); + operationModeFileWatcher.init(); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.PLATINUM)); + operationModeFileWatcher.onFileChanged(licenseModePath); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.PLATINUM)); + } + + public void testLicenseModeFileIsDirectory() throws Exception { + licenseModePath = createTempDir(); + operationModeFileWatcher.init(); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.PLATINUM)); + operationModeFileWatcher.onFileChanged(licenseModePath); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.PLATINUM)); + } + + public void testLicenseModeFileCreatedAfterInit() throws Exception { + Files.delete(licenseModePath); + operationModeFileWatcher.init(); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.PLATINUM)); + onChangeCounter.set(new CountDownLatch(1)); + Path tempFile = createTempFile(); + writeMode("gold", tempFile); + licenseModePath = tempFile; + assertTrue(onChangeCounter.get().await(5, TimeUnit.SECONDS)); + assertThat(operationModeFileWatcher.getCurrentOperationMode(), equalTo(License.OperationMode.GOLD)); + } + + private void writeMode(String mode) throws IOException { + writeMode(mode, licenseModePath); + } + + static void writeMode(String mode, Path file) throws IOException { + Files.write(file, mode.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/PutLicenseResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/PutLicenseResponseTests.java new file mode 100644 index 0000000000000..d4b7900fa5bc8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/PutLicenseResponseTests.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; + +public class PutLicenseResponseTests extends ESTestCase { + @SuppressWarnings("unchecked") + public void testSerialization() throws Exception { + boolean acknowledged = randomBoolean(); + LicensesStatus status = randomFrom(LicensesStatus.VALID, LicensesStatus.INVALID, LicensesStatus.EXPIRED); + Map ackMessages = randomAckMessages(); + + PutLicenseResponse response = new PutLicenseResponse(acknowledged, status, "", ackMessages); + + XContentBuilder contentBuilder = XContentFactory.jsonBuilder(); + response.toXContent(contentBuilder, ToXContent.EMPTY_PARAMS); + + Map map = XContentHelper.convertToMap(BytesReference.bytes(contentBuilder), false, + contentBuilder.contentType()).v2(); + assertThat(map.containsKey("acknowledged"), equalTo(true)); + boolean actualAcknowledged = (boolean) map.get("acknowledged"); + assertThat(actualAcknowledged, equalTo(acknowledged)); + + assertThat(map.containsKey("license_status"), equalTo(true)); + String actualStatus = (String) map.get("license_status"); + assertThat(actualStatus, equalTo(status.name().toLowerCase(Locale.ROOT))); + + assertThat(map.containsKey("acknowledge"), equalTo(true)); + Map> actualAckMessages = (Map>) map.get("acknowledge"); + assertTrue(actualAckMessages.containsKey("message")); + actualAckMessages.remove("message"); + assertThat(actualAckMessages.keySet(), equalTo(ackMessages.keySet())); + for (Map.Entry> entry : actualAckMessages.entrySet()) { + assertArrayEquals(entry.getValue().toArray(), ackMessages.get(entry.getKey())); + } + } + + public void testStreamSerialization() throws IOException { + boolean acknowledged = randomBoolean(); + LicensesStatus status = randomFrom(LicensesStatus.VALID, LicensesStatus.INVALID, LicensesStatus.EXPIRED); + Map ackMessages = randomAckMessages(); + + // write the steam so that we can attempt to read it back + BytesStreamOutput output = new BytesStreamOutput(); + + PutLicenseResponse response = new PutLicenseResponse(acknowledged, status, "", ackMessages); + // write it out + response.writeTo(output); + + StreamInput input = output.bytes().streamInput(); + + // read it back in + response.readFrom(input); + + assertThat(response.isAcknowledged(), equalTo(acknowledged)); + assertThat(response.status(), equalTo(status)); + assertThat(response.acknowledgeMessages(), not(sameInstance(ackMessages))); + assertThat(response.acknowledgeMessages().size(), equalTo(ackMessages.size())); + + for (String key : ackMessages.keySet()) { + assertArrayEquals(ackMessages.get(key), response.acknowledgeMessages().get(key)); + } + } + + private static Map randomAckMessages() { + int nFeatures = randomIntBetween(1, 5); + + Map ackMessages = new HashMap<>(); + + for (int i = 0; i < nFeatures; i++) { + String feature = randomAlphaOfLengthBetween(9, 15); + int nMessages = randomIntBetween(1, 5); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = randomAlphaOfLengthBetween(10, 30); + } + ackMessages.put(feature, messages); + } + + return ackMessages; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java new file mode 100644 index 0000000000000..bf48079781410 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Base64; +import java.util.Collections; +import java.util.UUID; + +import static org.elasticsearch.license.CryptUtils.encrypt; +import static org.hamcrest.Matchers.equalTo; + + +public class SelfGeneratedLicenseTests extends ESTestCase { + + public void testBasic() throws Exception { + long issueDate = System.currentTimeMillis(); + License.Builder specBuilder = License.builder() + .uid(UUID.randomUUID().toString()) + .issuedTo("customer") + .maxNodes(5) + .type(randomBoolean() ? "trial" : "basic") + .issueDate(issueDate) + .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()); + License trialLicense = SelfGeneratedLicense.create(specBuilder); + assertThat(SelfGeneratedLicense.verify(trialLicense), equalTo(true)); + } + + public void testTampered() throws Exception { + long issueDate = System.currentTimeMillis(); + License.Builder specBuilder = License.builder() + .uid(UUID.randomUUID().toString()) + .issuedTo("customer") + .type(randomBoolean() ? "trial" : "basic") + .maxNodes(5) + .issueDate(issueDate) + .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()); + License trialLicense = SelfGeneratedLicense.create(specBuilder); + final String originalSignature = trialLicense.signature(); + License tamperedLicense = License.builder().fromLicenseSpec(trialLicense, originalSignature) + .expiryDate(System.currentTimeMillis() + TimeValue.timeValueHours(5).getMillis()) + .build(); + assertThat(SelfGeneratedLicense.verify(trialLicense), equalTo(true)); + assertThat(SelfGeneratedLicense.verify(tamperedLicense), equalTo(false)); + } + + public void testFrom1x() throws Exception { + long issueDate = System.currentTimeMillis(); + License.Builder specBuilder = License.builder() + .uid(UUID.randomUUID().toString()) + .issuedTo("customer") + .type("subscription") + .subscriptionType("trial") + .issuer("elasticsearch") + .feature("") + .version(License.VERSION_START) + .maxNodes(5) + .issueDate(issueDate) + .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()); + License pre20TrialLicense = specBuilder.build(); + License license = SelfGeneratedLicense.create(License.builder().fromPre20LicenseSpec(pre20TrialLicense).type("trial")); + assertThat(SelfGeneratedLicense.verify(license), equalTo(true)); + } + + public void testTrialLicenseVerifyWithOlderVersion() throws Exception { + long issueDate = System.currentTimeMillis(); + License.Builder specBuilder = License.builder() + .issuedTo("customer") + .maxNodes(5) + .issueDate(issueDate) + .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()) + .feature("") + .subscriptionType("trial") + .version(1); + License trialLicenseV1 = createTrialLicense(specBuilder); + assertThat(SelfGeneratedLicense.verify(trialLicenseV1), equalTo(true)); + } + + private static License createTrialLicense(License.Builder specBuilder) { + License spec = specBuilder + .type(randomBoolean() ? "trial" : "basic") + .issuer("elasticsearch") + .uid(UUID.randomUUID().toString()) + .build(); + final String signature; + try { + XContentBuilder contentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + spec.toXContent(contentBuilder, new ToXContent.MapParams(Collections.singletonMap(License.LICENSE_SPEC_VIEW_MODE, "true"))); + byte[] encrypt = encrypt(BytesReference.toBytes(BytesReference.bytes(contentBuilder))); + byte[] bytes = new byte[4 + 4 + encrypt.length]; + ByteBuffer byteBuffer = ByteBuffer.wrap(bytes); + byteBuffer.putInt(-spec.version()) + .putInt(encrypt.length) + .put(encrypt); + signature = Base64.getEncoder().encodeToString(bytes); + } catch (IOException e) { + throw new IllegalStateException(e); + } + return License.builder().fromLicenseSpec(spec, signature).build(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java new file mode 100644 index 0000000000000..55b14a4d79280 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartBasicLicenseTests.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.Netty4Plugin; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackClientPlugin; + +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collection; + +import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; + +@ESIntegTestCase.ClusterScope(scope = SUITE) +public class StartBasicLicenseTests extends AbstractLicensesIntegrationTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("node.data", true) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "basic") + .put(NetworkModule.HTTP_ENABLED.getKey(), true).build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class); + } + + public void testStartBasicLicense() throws Exception { + LicensingClient licensingClient = new LicensingClient(client()); + License license = TestUtils.generateSignedLicense("trial", License.VERSION_CURRENT, -1, TimeValue.timeValueHours(24)); + licensingClient.preparePutLicense(license).get(); + + assertBusy(() -> { + GetLicenseResponse getLicenseResponse = licensingClient.prepareGetLicense().get(); + assertEquals("trial", getLicenseResponse.license().type()); + }); + + // Testing that you can start a basic license when you have no license + if (randomBoolean()) { + licensingClient.prepareDeleteLicense().get(); + assertNull(licensingClient.prepareGetLicense().get().license()); + } + + RestClient restClient = getRestClient(); + Response response = restClient.performRequest("GET", "/_xpack/license/basic_status"); + String body = Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertEquals("{\"eligible_to_start_basic\":true}", body); + + Response response2 = restClient.performRequest("POST", "/_xpack/license/start_basic?acknowledge=true"); + String body2 = Streams.copyToString(new InputStreamReader(response2.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(200, response2.getStatusLine().getStatusCode()); + assertTrue(body2.contains("\"acknowledged\":true")); + assertTrue(body2.contains("\"basic_was_started\":true")); + + assertBusy(() -> { + GetLicenseResponse currentLicense = licensingClient.prepareGetLicense().get(); + assertEquals("basic", currentLicense.license().type()); + }); + + long expirationMillis = licensingClient.prepareGetLicense().get().license().expiryDate(); + assertEquals(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS, expirationMillis); + + Response response3 = restClient.performRequest("GET", "/_xpack/license"); + String body3 = Streams.copyToString(new InputStreamReader(response3.getEntity().getContent(), StandardCharsets.UTF_8)); + assertTrue(body3.contains("\"type\" : \"basic\"")); + assertFalse(body3.contains("expiry_date")); + assertFalse(body3.contains("expiry_date_in_millis")); + + + Response response4 = restClient.performRequest("GET", "/_xpack/license/basic_status"); + String body4 = Streams.copyToString(new InputStreamReader(response4.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(200, response3.getStatusLine().getStatusCode()); + assertEquals("{\"eligible_to_start_basic\":false}", body4); + + ResponseException ex = expectThrows(ResponseException.class, + () -> restClient.performRequest("POST", "/_xpack/license/start_basic")); + Response response5 = ex.getResponse(); + String body5 = Streams.copyToString(new InputStreamReader(response5.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(403, response5.getStatusLine().getStatusCode()); + assertTrue(body5.contains("\"basic_was_started\":false")); + assertTrue(body5.contains("\"acknowledged\":true")); + assertTrue(body5.contains("\"error_message\":\"Operation failed: Current license is basic.\"")); + } + + public void testUnacknowledgedStartBasicLicense() throws Exception { + LicensingClient licensingClient = new LicensingClient(client()); + License license = TestUtils.generateSignedLicense("trial", License.VERSION_CURRENT, -1, TimeValue.timeValueHours(24)); + licensingClient.preparePutLicense(license).get(); + + assertBusy(() -> { + GetLicenseResponse getLicenseResponse = licensingClient.prepareGetLicense().get(); + assertEquals("trial", getLicenseResponse.license().type()); + }); + + Response response2 = getRestClient().performRequest("POST", "/_xpack/license/start_basic"); + String body2 = Streams.copyToString(new InputStreamReader(response2.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(200, response2.getStatusLine().getStatusCode()); + assertTrue(body2.contains("\"acknowledged\":false")); + assertTrue(body2.contains("\"basic_was_started\":false")); + assertTrue(body2.contains("\"error_message\":\"Operation failed: Needs acknowledgement.\"")); + assertTrue(body2.contains("\"message\":\"This license update requires acknowledgement. To acknowledge the license, " + + "please read the following messages and call /start_basic again, this time with the \\\"acknowledge=true\\\"")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java new file mode 100644 index 0000000000000..b7a09d24b1359 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.Netty4Plugin; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackClientPlugin; + +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collection; + +import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; + +@ESIntegTestCase.ClusterScope(scope = SUITE) +public class StartTrialLicenseTests extends AbstractLicensesIntegrationTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("node.data", true) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "basic") + .put(NetworkModule.HTTP_ENABLED.getKey(), true).build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class); + } + + public void testStartTrial() throws Exception { + LicensingClient licensingClient = new LicensingClient(client()); + ensureStartingWithBasic(); + + RestClient restClient = getRestClient(); + Response response = restClient.performRequest("GET", "/_xpack/license/trial_status"); + String body = Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertEquals("{\"eligible_to_start_trial\":true}", body); + + // Test that starting will fail without acknowledgement + Response response2 = restClient.performRequest("POST", "/_xpack/license/start_trial"); + String body2 = Streams.copyToString(new InputStreamReader(response2.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(200, response2.getStatusLine().getStatusCode()); + assertTrue(body2.contains("\"trial_was_started\":false")); + assertTrue(body2.contains("\"error_message\":\"Operation failed: Needs acknowledgement.\"")); + assertTrue(body2.contains("\"acknowledged\":false")); + + assertBusy(() -> { + GetLicenseResponse getLicenseResponse = licensingClient.prepareGetLicense().get(); + assertEquals("basic", getLicenseResponse.license().type()); + }); + + String type = randomFrom(LicenseService.VALID_TRIAL_TYPES); + + Response response3 = restClient.performRequest("POST", "/_xpack/license/start_trial?acknowledge=true&type=" + type); + String body3 = Streams.copyToString(new InputStreamReader(response3.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(200, response3.getStatusLine().getStatusCode()); + assertTrue(body3.contains("\"trial_was_started\":true")); + assertTrue(body3.contains("\"type\":\"" + type + "\"")); + assertTrue(body3.contains("\"acknowledged\":true")); + + assertBusy(() -> { + GetLicenseResponse postTrialLicenseResponse = licensingClient.prepareGetLicense().get(); + assertEquals(type, postTrialLicenseResponse.license().type()); + }); + + Response response4 = restClient.performRequest("GET", "/_xpack/license/trial_status"); + String body4 = Streams.copyToString(new InputStreamReader(response4.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(200, response4.getStatusLine().getStatusCode()); + assertEquals("{\"eligible_to_start_trial\":false}", body4); + + String secondAttemptType = randomFrom(LicenseService.VALID_TRIAL_TYPES); + + ResponseException ex = expectThrows(ResponseException.class, + () -> restClient.performRequest("POST", "/_xpack/license/start_trial?acknowledge=true&type=" + secondAttemptType)); + Response response5 = ex.getResponse(); + String body5 = Streams.copyToString(new InputStreamReader(response5.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(403, response5.getStatusLine().getStatusCode()); + assertTrue(body5.contains("\"trial_was_started\":false")); + assertTrue(body5.contains("\"error_message\":\"Operation failed: Trial was already activated.\"")); + } + + public void testInvalidType() throws Exception { + ensureStartingWithBasic(); + + ResponseException ex = expectThrows(ResponseException.class, () -> + getRestClient().performRequest("POST", "/_xpack/license/start_trial?type=basic")); + Response response = ex.getResponse(); + String body = Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(400, response.getStatusLine().getStatusCode()); + assertTrue(body.contains("\"type\":\"illegal_argument_exception\"")); + assertTrue(body.contains("\"reason\":\"Cannot start trial of type [basic]. Valid trial types are [")); + } + + private void ensureStartingWithBasic() throws Exception { + LicensingClient licensingClient = new LicensingClient(client()); + GetLicenseResponse getLicenseResponse = licensingClient.prepareGetLicense().get(); + + if ("basic".equals(getLicenseResponse.license().type()) == false) { + licensingClient.preparePostStartBasic().setAcknowledge(true).get(); + } + + assertBusy(() -> { + GetLicenseResponse postTrialLicenseResponse = licensingClient.prepareGetLicense().get(); + assertEquals("basic", postTrialLicenseResponse.license().type()); + }); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java new file mode 100644 index 0000000000000..e8fa6f32a9bb5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java @@ -0,0 +1,378 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import com.carrotsearch.randomizedtesting.RandomizedTest; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.joda.DateMathParser; +import org.elasticsearch.common.joda.FormatDateTimeFormatter; +import org.elasticsearch.common.joda.Joda; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.license.licensor.LicenseSigner; +import org.hamcrest.MatcherAssert; +import org.joda.time.format.DateTimeFormatter; +import org.junit.Assert; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomInt; +import static org.apache.lucene.util.LuceneTestCase.createTempFile; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; +import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomIntBetween; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.junit.Assert.assertThat; + +public class TestUtils { + + private static final FormatDateTimeFormatter formatDateTimeFormatter = Joda.forPattern("yyyy-MM-dd"); + private static final DateMathParser dateMathParser = new DateMathParser(formatDateTimeFormatter); + private static final DateTimeFormatter dateTimeFormatter = formatDateTimeFormatter.printer(); + + public static String dateMathString(String time, final long now) { + return dateTimeFormatter.print(dateMathParser.parse(time, () -> now)); + } + + public static long dateMath(String time, final long now) { + return dateMathParser.parse(time, () -> now); + } + + public static LicenseSpec generateRandomLicenseSpec(int version) { + boolean datesInMillis = randomBoolean(); + long now = System.currentTimeMillis(); + String uid = UUID.randomUUID().toString(); + String feature = "feature__" + randomInt(); + String issuer = "issuer__" + randomInt(); + String issuedTo = "issuedTo__" + randomInt(); + final String type; + final String subscriptionType; + if (version < License.VERSION_NO_FEATURE_TYPE) { + subscriptionType = randomFrom("gold", "silver", "platinum"); + type = "subscription";//randomFrom("subscription", "internal", "development"); + } else { + subscriptionType = null; + type = randomFrom("basic", "dev", "gold", "silver", "platinum"); + } + int maxNodes = RandomizedTest.randomIntBetween(5, 100); + if (datesInMillis) { + long issueDateInMillis = dateMath("now", now); + long expiryDateInMillis = dateMath("now+10d/d", now); + return new LicenseSpec(version, uid, feature, issueDateInMillis, expiryDateInMillis, type, subscriptionType, issuedTo, issuer, + maxNodes); + } else { + String issueDate = dateMathString("now", now); + String expiryDate = dateMathString("now+10d/d", now); + return new LicenseSpec(version, uid, feature, issueDate, expiryDate, type, subscriptionType, issuedTo, issuer, maxNodes); + } + } + + public static String generateLicenseSpecString(LicenseSpec licenseSpec) throws IOException { + XContentBuilder licenses = jsonBuilder(); + licenses.startObject(); + licenses.startArray("licenses"); + licenses.startObject() + .field("uid", licenseSpec.uid) + .field("type", licenseSpec.type) + .field("subscription_type", licenseSpec.subscriptionType) + .field("issued_to", licenseSpec.issuedTo) + .field("issuer", licenseSpec.issuer) + .field("feature", licenseSpec.feature) + .field("max_nodes", licenseSpec.maxNodes); + + if (licenseSpec.issueDate != null) { + licenses.field("issue_date", licenseSpec.issueDate); + } else { + licenses.field("issue_date_in_millis", licenseSpec.issueDateInMillis); + } + if (licenseSpec.expiryDate != null) { + licenses.field("expiry_date", licenseSpec.expiryDate); + } else { + licenses.field("expiry_date_in_millis", licenseSpec.expiryDateInMillis); + } + licenses.field("version", licenseSpec.version); + licenses.endObject(); + licenses.endArray(); + licenses.endObject(); + return Strings.toString(licenses); + } + + public static License generateLicenses(LicenseSpec spec) { + License.Builder builder = License.builder() + .uid(spec.uid) + .feature(spec.feature) + .type(spec.type) + .subscriptionType(spec.subscriptionType) + .issuedTo(spec.issuedTo) + .issuer(spec.issuer) + .maxNodes(spec.maxNodes); + + if (spec.expiryDate != null) { + builder.expiryDate(DateUtils.endOfTheDay(spec.expiryDate)); + } else { + builder.expiryDate(spec.expiryDateInMillis); + } + if (spec.issueDate != null) { + builder.issueDate(DateUtils.beginningOfTheDay(spec.issueDate)); + } else { + builder.issueDate(spec.issueDateInMillis); + } + return builder.build(); + } + + public static void assertLicenseSpec(LicenseSpec spec, License license) { + MatcherAssert.assertThat(license.uid(), equalTo(spec.uid)); + MatcherAssert.assertThat(license.issuedTo(), equalTo(spec.issuedTo)); + MatcherAssert.assertThat(license.issuer(), equalTo(spec.issuer)); + MatcherAssert.assertThat(license.type(), equalTo(spec.type)); + MatcherAssert.assertThat(license.maxNodes(), equalTo(spec.maxNodes)); + if (spec.issueDate != null) { + MatcherAssert.assertThat(license.issueDate(), equalTo(DateUtils.beginningOfTheDay(spec.issueDate))); + } else { + MatcherAssert.assertThat(license.issueDate(), equalTo(spec.issueDateInMillis)); + } + if (spec.expiryDate != null) { + MatcherAssert.assertThat(license.expiryDate(), equalTo(DateUtils.endOfTheDay(spec.expiryDate))); + } else { + MatcherAssert.assertThat(license.expiryDate(), equalTo(spec.expiryDateInMillis)); + } + } + + public static class LicenseSpec { + public final int version; + public final String feature; + public final String issueDate; + public final long issueDateInMillis; + public final String expiryDate; + public final long expiryDateInMillis; + public final String uid; + public final String type; + public final String subscriptionType; + public final String issuedTo; + public final String issuer; + public final int maxNodes; + + public LicenseSpec(String issueDate, String expiryDate) { + this(License.VERSION_CURRENT, UUID.randomUUID().toString(), "feature", issueDate, expiryDate, "trial", "none", "customer", + "elasticsearch", 5); + } + + public LicenseSpec(int version, String uid, String feature, long issueDateInMillis, long expiryDateInMillis, String type, + String subscriptionType, String issuedTo, String issuer, int maxNodes) { + this.version = version; + this.feature = feature; + this.issueDateInMillis = issueDateInMillis; + this.issueDate = null; + this.expiryDateInMillis = expiryDateInMillis; + this.expiryDate = null; + this.uid = uid; + this.type = type; + this.subscriptionType = subscriptionType; + this.issuedTo = issuedTo; + this.issuer = issuer; + this.maxNodes = maxNodes; + } + + public LicenseSpec(int version, String uid, String feature, String issueDate, String expiryDate, String type, + String subscriptionType, String issuedTo, String issuer, int maxNodes) { + this.version = version; + this.feature = feature; + this.issueDate = issueDate; + this.issueDateInMillis = -1; + this.expiryDate = expiryDate; + this.expiryDateInMillis = -1; + this.uid = uid; + this.type = type; + this.subscriptionType = subscriptionType; + this.issuedTo = issuedTo; + this.issuer = issuer; + this.maxNodes = maxNodes; + } + } + + public static Path getTestPriKeyPath() throws Exception { + return getResourcePath("/private.key"); + } + + public static Path getTestPubKeyPath() throws Exception { + return getResourcePath("/public.key"); + } + + public static String dumpLicense(License license) throws Exception { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.startObject(); + builder.startObject("license"); + license.toInnerXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.endObject(); + return Strings.toString(builder); + } + + public static License generateSignedLicense(TimeValue expiryDuration) throws Exception { + return generateSignedLicense(null, -1, expiryDuration); + } + + public static License generateSignedLicense(String type, TimeValue expiryDuration) throws Exception { + return generateSignedLicense(type, -1, expiryDuration); + } + + public static License generateSignedLicense(long issueDate, TimeValue expiryDuration) throws Exception { + return generateSignedLicense(null, issueDate, expiryDuration); + } + + public static License generateSignedLicense(String type, long issueDate, TimeValue expiryDuration) throws Exception { + return generateSignedLicense(type, randomIntBetween(License.VERSION_START, License.VERSION_CURRENT), issueDate, expiryDuration); + } + + /** + * This method which chooses the license type randomly if the type is null. However, it will not randomly + * choose trial or basic types as those types can only be self-generated. + */ + public static License generateSignedLicense(String type, int version, long issueDate, TimeValue expiryDuration) throws Exception { + long issue = (issueDate != -1L) ? issueDate : System.currentTimeMillis() - TimeValue.timeValueHours(2).getMillis(); + final String licenseType; + if (version < License.VERSION_NO_FEATURE_TYPE) { + licenseType = randomFrom("subscription", "internal", "development"); + } else { + licenseType = (type != null) ? type : randomFrom("silver", "dev", "gold", "platinum"); + } + final License.Builder builder = License.builder() + .uid(UUID.randomUUID().toString()) + .version(version) + .expiryDate(System.currentTimeMillis() + expiryDuration.getMillis()) + .issueDate(issue) + .type(licenseType) + .issuedTo("customer") + .issuer("elasticsearch") + .maxNodes(5); + if (version == License.VERSION_START) { + builder.subscriptionType((type != null) ? type : randomFrom("dev", "gold", "platinum", "silver")); + builder.feature(randomAlphaOfLength(10)); + } + LicenseSigner signer = new LicenseSigner(getTestPriKeyPath(), getTestPubKeyPath()); + return signer.sign(builder.build()); + } + + public static License generateSignedLicense(License.Builder builder) throws Exception { + LicenseSigner signer = new LicenseSigner(getTestPriKeyPath(), getTestPubKeyPath()); + return signer.sign(builder.build()); + } + + public static License generateExpiredNonBasicLicense(long expiryDate) throws Exception { + return generateExpiredNonBasicLicense(randomFrom("silver", "dev", "gold", "platinum"), expiryDate); + } + + public static License generateExpiredNonBasicLicense() throws Exception { + return generateExpiredNonBasicLicense(randomFrom("silver", "dev", "gold", "platinum")); + } + + public static License generateExpiredNonBasicLicense(String type) throws Exception { + return generateExpiredNonBasicLicense(type, + System.currentTimeMillis() - TimeValue.timeValueHours(randomIntBetween(1, 10)).getMillis()); + } + + public static License generateExpiredNonBasicLicense(String type, long expiryDate) throws Exception { + final License.Builder builder = License.builder() + .uid(UUID.randomUUID().toString()) + .version(License.VERSION_CURRENT) + .expiryDate(expiryDate) + .issueDate(expiryDate - TimeValue.timeValueMinutes(10).getMillis()) + .type(type) + .issuedTo("customer") + .issuer("elasticsearch") + .maxNodes(5); + LicenseSigner signer = new LicenseSigner(getTestPriKeyPath(), getTestPubKeyPath()); + return signer.sign(builder.build()); + } + + private static Path getResourcePath(String resource) throws Exception { + Path resourceFile = createTempFile(); + try (InputStream resourceInput = TestUtils.class.getResourceAsStream(resource)) { + Files.copy(resourceInput, resourceFile, StandardCopyOption.REPLACE_EXISTING); + } + return resourceFile; + } + + public static void registerAndAckSignedLicenses(final LicenseService licenseService, License license, + final LicensesStatus expectedStatus) { + PutLicenseRequest putLicenseRequest = new PutLicenseRequest().license(license).acknowledge(true); + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference status = new AtomicReference<>(); + licenseService.registerLicense(putLicenseRequest, new ActionListener() { + @Override + public void onResponse(PutLicenseResponse licensesUpdateResponse) { + status.set(licensesUpdateResponse.status()); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + latch.countDown(); + } + }); + try { + latch.await(); + } catch (InterruptedException e) { + Assert.fail(e.getMessage()); + } + assertThat(status.get(), equalTo(expectedStatus)); + } + + public static class AssertingLicenseState extends XPackLicenseState { + public final List modeUpdates = new ArrayList<>(); + public final List activeUpdates = new ArrayList<>(); + + public AssertingLicenseState() { + super(Settings.EMPTY); + } + + @Override + void update(License.OperationMode mode, boolean active) { + modeUpdates.add(mode); + activeUpdates.add(active); + } + } + + /** + * A license state that makes the {@link #update(License.OperationMode, boolean)} + * method public for use in tests. + */ + public static class UpdatableLicenseState extends XPackLicenseState { + public UpdatableLicenseState() { + this(Settings.EMPTY); + } + + public UpdatableLicenseState(Settings settings) { + super(settings); + } + + @Override + public void update(License.OperationMode mode, boolean active) { + super.update(mode, active); + } + } + + public static void putLicense(MetaData.Builder builder, License license) { + builder.putCustom(LicensesMetaData.TYPE, new LicensesMetaData(license, null)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java new file mode 100644 index 0000000000000..335932df770e8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -0,0 +1,417 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.License.OperationMode; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; + +import java.util.Arrays; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static org.elasticsearch.license.License.OperationMode.BASIC; +import static org.elasticsearch.license.License.OperationMode.GOLD; +import static org.elasticsearch.license.License.OperationMode.MISSING; +import static org.elasticsearch.license.License.OperationMode.PLATINUM; +import static org.elasticsearch.license.License.OperationMode.STANDARD; +import static org.elasticsearch.license.License.OperationMode.TRIAL; +import static org.hamcrest.Matchers.is; + +/** + * Unit tests for the {@link XPackLicenseState} + */ +public class XPackLicenseStateTests extends ESTestCase { + + /** Creates a license state with the given license type and active state, and checks the given method returns expected. */ + void assertAllowed(OperationMode mode, boolean active, Predicate predicate, boolean expected) { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + licenseState.update(mode, active); + assertEquals(expected, predicate.test(licenseState)); + } + + /** + * Checks the ack message going from the {@code from} license type to {@code to} license type. + * TODO: check the actual messages, not just the number of them! This was copied from previous license tests... + */ + void assertAckMesssages(String feature, OperationMode from, OperationMode to, int expectedMessages) { + String[] gotMessages = XPackLicenseState.ACKNOWLEDGMENT_MESSAGES.get(feature).apply(from, to); + assertEquals(expectedMessages, gotMessages.length); + } + + static T randomFrom(T[] values, Predicate filter) { + return randomFrom(Arrays.stream(values).filter(filter).collect(Collectors.toList())); + } + + static OperationMode randomMode() { + return randomFrom(OperationMode.values()); + } + + public static OperationMode randomTrialStandardGoldOrPlatinumMode() { + return randomFrom(TRIAL, STANDARD, GOLD, PLATINUM); + } + + public static OperationMode randomTrialOrPlatinumMode() { + return randomFrom(TRIAL, PLATINUM); + } + + public static OperationMode randomTrialBasicStandardGoldOrPlatinumMode() { + return randomFrom(TRIAL, BASIC, STANDARD, GOLD, PLATINUM); + } + + public static OperationMode randomBasicStandardOrGold() { + return randomFrom(BASIC, STANDARD, GOLD); + } + + public void testSecurityDefaults() { + XPackLicenseState licenseState = + new XPackLicenseState(Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build()); + assertThat(licenseState.isAuthAllowed(), is(true)); + assertThat(licenseState.isIpFilteringAllowed(), is(true)); + assertThat(licenseState.isAuditingAllowed(), is(true)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(true)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); + + licenseState = new XPackLicenseState(Settings.EMPTY); + assertThat(licenseState.isAuthAllowed(), is(true)); + assertThat(licenseState.isIpFilteringAllowed(), is(true)); + assertThat(licenseState.isAuditingAllowed(), is(true)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(true)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); + } + + public void testSecurityBasic() { + XPackLicenseState licenseState = new XPackLicenseState(randomFrom(Settings.EMPTY, + Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); + licenseState.update(BASIC, true); + + assertThat(licenseState.isAuthAllowed(), is(false)); + assertThat(licenseState.isIpFilteringAllowed(), is(false)); + assertThat(licenseState.isAuditingAllowed(), is(false)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NONE)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + } + + public void testSecurityBasicExpired() { + XPackLicenseState licenseState = new XPackLicenseState(randomFrom(Settings.EMPTY, + Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); + licenseState.update(BASIC, false); + + assertThat(licenseState.isAuthAllowed(), is(false)); + assertThat(licenseState.isIpFilteringAllowed(), is(false)); + assertThat(licenseState.isAuditingAllowed(), is(false)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(false)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NONE)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + } + + public void testSecurityStandard() { + XPackLicenseState licenseState = new XPackLicenseState(randomFrom(Settings.EMPTY, + Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); + licenseState.update(STANDARD, true); + + assertThat(licenseState.isAuthAllowed(), is(true)); + assertThat(licenseState.isIpFilteringAllowed(), is(false)); + assertThat(licenseState.isAuditingAllowed(), is(false)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NATIVE)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + } + + public void testSecurityStandardExpired() { + XPackLicenseState licenseState = new XPackLicenseState(randomFrom(Settings.EMPTY, + Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); + licenseState.update(STANDARD, false); + + assertThat(licenseState.isAuthAllowed(), is(true)); + assertThat(licenseState.isIpFilteringAllowed(), is(false)); + assertThat(licenseState.isAuditingAllowed(), is(false)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(false)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.NATIVE)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + } + + public void testSecurityGold() { + XPackLicenseState licenseState = new XPackLicenseState(randomFrom(Settings.EMPTY, + Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); + licenseState.update(GOLD, true); + + assertThat(licenseState.isAuthAllowed(), is(true)); + assertThat(licenseState.isIpFilteringAllowed(), is(true)); + assertThat(licenseState.isAuditingAllowed(), is(true)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.DEFAULT)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + } + + public void testSecurityGoldExpired() { + XPackLicenseState licenseState = new XPackLicenseState(randomFrom(Settings.EMPTY, + Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); + licenseState.update(GOLD, false); + + assertThat(licenseState.isAuthAllowed(), is(true)); + assertThat(licenseState.isIpFilteringAllowed(), is(true)); + assertThat(licenseState.isAuditingAllowed(), is(true)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(false)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(false)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.DEFAULT)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + } + + public void testSecurityPlatinum() { + XPackLicenseState licenseState = new XPackLicenseState(randomFrom(Settings.EMPTY, + Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); + licenseState.update(PLATINUM, true); + + assertThat(licenseState.isAuthAllowed(), is(true)); + assertThat(licenseState.isIpFilteringAllowed(), is(true)); + assertThat(licenseState.isAuditingAllowed(), is(true)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(true)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(true)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(true)); + } + + public void testSecurityPlatinumExpired() { + XPackLicenseState licenseState = new XPackLicenseState(randomFrom(Settings.EMPTY, + Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build())); + licenseState.update(PLATINUM, false); + + assertThat(licenseState.isAuthAllowed(), is(true)); + assertThat(licenseState.isIpFilteringAllowed(), is(true)); + assertThat(licenseState.isAuditingAllowed(), is(true)); + assertThat(licenseState.isStatsAndHealthAllowed(), is(false)); + assertThat(licenseState.isDocumentAndFieldLevelSecurityAllowed(), is(true)); + assertThat(licenseState.allowedRealmType(), is(XPackLicenseState.AllowedRealmType.ALL)); + assertThat(licenseState.isCustomRoleProvidersAllowed(), is(false)); + } + + public void testSecurityAckBasicToNotGoldOrStandard() { + OperationMode toMode = randomFrom(OperationMode.values(), mode -> mode != GOLD && mode != STANDARD); + assertAckMesssages(XPackField.SECURITY, BASIC, toMode, 0); + } + + public void testSecurityAckAnyToTrialOrPlatinum() { + assertAckMesssages(XPackField.SECURITY, randomMode(), randomTrialOrPlatinumMode(), 0); + } + + public void testSecurityAckTrialStandardGoldOrPlatinumToBasic() { + assertAckMesssages(XPackField.SECURITY, randomTrialStandardGoldOrPlatinumMode(), BASIC, 3); + } + + public void testSecurityAckAnyToStandard() { + OperationMode from = randomFrom(BASIC, GOLD, PLATINUM, TRIAL); + assertAckMesssages(XPackField.SECURITY, from, STANDARD, 4); + } + + public void testSecurityAckBasicStandardTrialOrPlatinumToGold() { + OperationMode from = randomFrom(BASIC, PLATINUM, TRIAL, STANDARD); + assertAckMesssages(XPackField.SECURITY, from, GOLD, 2); + } + + public void testMonitoringAckBasicToAny() { + assertAckMesssages(XPackField.MONITORING, BASIC, randomMode(), 0); + } + + public void testMonitoringAckAnyToTrialGoldOrPlatinum() { + assertAckMesssages(XPackField.MONITORING, randomMode(), randomTrialStandardGoldOrPlatinumMode(), 0); + } + + public void testMonitoringAckNotBasicToBasic() { + OperationMode from = randomFrom(STANDARD, GOLD, PLATINUM, TRIAL); + assertAckMesssages(XPackField.MONITORING, from, BASIC, 2); + } + + public void testMonitoringAllowed() { + assertAllowed(randomMode(), true, XPackLicenseState::isMonitoringAllowed, true); + assertAllowed(randomMode(), false, XPackLicenseState::isMonitoringAllowed, false); + } + + public void testMonitoringUpdateRetention() { + assertAllowed(STANDARD, true, XPackLicenseState::isUpdateRetentionAllowed, true); + assertAllowed(GOLD, true, XPackLicenseState::isUpdateRetentionAllowed, true); + assertAllowed(PLATINUM, true, XPackLicenseState::isUpdateRetentionAllowed, true); + assertAllowed(TRIAL, true, XPackLicenseState::isUpdateRetentionAllowed, true); + assertAllowed(BASIC, true, XPackLicenseState::isUpdateRetentionAllowed, false); + assertAllowed(MISSING, false, XPackLicenseState::isUpdateRetentionAllowed, false); + } + + public void testWatcherPlatinumGoldTrialStandard() throws Exception { + assertAllowed(TRIAL, true, XPackLicenseState::isWatcherAllowed, true); + assertAllowed(GOLD, true, XPackLicenseState::isWatcherAllowed, true); + assertAllowed(PLATINUM, true, XPackLicenseState::isWatcherAllowed, true); + assertAllowed(STANDARD, true, XPackLicenseState::isWatcherAllowed, true); + } + + public void testWatcherBasicLicense() throws Exception { + assertAllowed(BASIC, true, XPackLicenseState::isWatcherAllowed, false); + } + + public void testWatcherInactive() { + assertAllowed(BASIC, false, XPackLicenseState::isWatcherAllowed, false); + } + + public void testWatcherInactivePlatinumGoldTrial() throws Exception { + assertAllowed(TRIAL, false, XPackLicenseState::isWatcherAllowed, false); + assertAllowed(GOLD, false, XPackLicenseState::isWatcherAllowed, false); + assertAllowed(PLATINUM, false, XPackLicenseState::isWatcherAllowed, false); + assertAllowed(STANDARD, false, XPackLicenseState::isWatcherAllowed, false); + } + + public void testGraphPlatinumTrial() throws Exception { + assertAllowed(TRIAL, true, XPackLicenseState::isGraphAllowed, true); + assertAllowed(PLATINUM, true, XPackLicenseState::isGraphAllowed, true); + } + + public void testGraphBasic() throws Exception { + assertAllowed(BASIC, true, XPackLicenseState::isGraphAllowed, false); + } + + public void testGraphStandard() throws Exception { + assertAllowed(STANDARD, true, XPackLicenseState::isGraphAllowed, false); + } + + public void testGraphInactiveBasic() { + assertAllowed(BASIC, false, XPackLicenseState::isGraphAllowed, false); + } + + public void testGraphInactivePlatinumTrial() throws Exception { + assertAllowed(TRIAL, false, XPackLicenseState::isMachineLearningAllowed, false); + assertAllowed(PLATINUM, false, XPackLicenseState::isMachineLearningAllowed, false); + } + + public void testMachineLearningPlatinumTrial() throws Exception { + assertAllowed(TRIAL, true, XPackLicenseState::isMachineLearningAllowed, true); + assertAllowed(PLATINUM, true, XPackLicenseState::isMachineLearningAllowed, true); + } + + public void testMachineLearningBasic() throws Exception { + assertAllowed(BASIC, true, XPackLicenseState::isMachineLearningAllowed, false); + } + + public void testMachineLearningStandard() throws Exception { + assertAllowed(STANDARD, true, XPackLicenseState::isMachineLearningAllowed, false); + } + + public void testMachineLearningInactiveBasic() { + assertAllowed(BASIC, false, XPackLicenseState::isMachineLearningAllowed, false); + } + + public void testMachineLearningInactivePlatinumTrial() throws Exception { + assertAllowed(TRIAL, false, XPackLicenseState::isMachineLearningAllowed, false); + assertAllowed(PLATINUM, false, XPackLicenseState::isMachineLearningAllowed, false); + } + + public void testLogstashPlatinumGoldTrialStandard() throws Exception { + assertAllowed(TRIAL, true, XPackLicenseState::isLogstashAllowed, true); + assertAllowed(GOLD, true, XPackLicenseState::isLogstashAllowed, true); + assertAllowed(PLATINUM, true, XPackLicenseState::isLogstashAllowed, true); + assertAllowed(STANDARD, true, XPackLicenseState::isLogstashAllowed, true); + } + + public void testLogstashBasicLicense() throws Exception { + assertAllowed(BASIC, true, XPackLicenseState::isLogstashAllowed, false); + } + + public void testLogstashInactive() { + assertAllowed(BASIC, false, XPackLicenseState::isLogstashAllowed, false); + assertAllowed(TRIAL, false, XPackLicenseState::isLogstashAllowed, false); + assertAllowed(GOLD, false, XPackLicenseState::isLogstashAllowed, false); + assertAllowed(PLATINUM, false, XPackLicenseState::isLogstashAllowed, false); + assertAllowed(STANDARD, false, XPackLicenseState::isLogstashAllowed, false); + } + + public void testSqlDefaults() { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + assertThat(licenseState.isSqlAllowed(), is(true)); + assertThat(licenseState.isJdbcAllowed(), is(true)); + } + + public void testSqlBasic() { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + licenseState.update(BASIC, true); + + assertThat(licenseState.isSqlAllowed(), is(true)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlBasicExpired() { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + licenseState.update(BASIC, false); + + assertThat(licenseState.isSqlAllowed(), is(false)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlStandard() { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + licenseState.update(STANDARD, true); + + assertThat(licenseState.isSqlAllowed(), is(true)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlStandardExpired() { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + licenseState.update(STANDARD, false); + + assertThat(licenseState.isSqlAllowed(), is(false)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlGold() { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + licenseState.update(GOLD, true); + + assertThat(licenseState.isSqlAllowed(), is(true)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlGoldExpired() { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + licenseState.update(GOLD, false); + + assertThat(licenseState.isSqlAllowed(), is(false)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlPlatinum() { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + licenseState.update(PLATINUM, true); + + assertThat(licenseState.isSqlAllowed(), is(true)); + assertThat(licenseState.isJdbcAllowed(), is(true)); + } + + public void testSqlPlatinumExpired() { + XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); + licenseState.update(PLATINUM, false); + + assertThat(licenseState.isSqlAllowed(), is(false)); + assertThat(licenseState.isJdbcAllowed(), is(false)); + } + + public void testSqlAckAnyToTrialOrPlatinum() { + assertAckMesssages(XPackField.SQL, randomMode(), randomTrialOrPlatinumMode(), 0); + } + + public void testSqlAckTrialOrPlatinumToNotTrialOrPlatinum() { + assertAckMesssages(XPackField.SQL, randomTrialOrPlatinumMode(), randomBasicStandardOrGold(), 1); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/script/MockMustacheScriptEngine.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/script/MockMustacheScriptEngine.java new file mode 100644 index 0000000000000..4f9b125a9fd6b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/script/MockMustacheScriptEngine.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.script; + +import org.elasticsearch.common.settings.Settings; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.function.Function; + +/** + * A mock script engine that registers itself under the 'mustache' name so that + * TextTemplateEngine (watcher) + * uses it and adds validation that watcher tests don't rely on mustache templating/ + */ +public class MockMustacheScriptEngine extends MockScriptEngine { + + public static final String NAME = "mustache"; + + public static class TestPlugin extends MockScriptPlugin { + @Override + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { + return new MockMustacheScriptEngine(); + } + + @Override + protected Map, Object>> pluginScripts() { + return Collections.emptyMap(); + } + } + + @Override + public String getType() { + return NAME; + } + + @Override + public T compile(String name, String script, ScriptContext context, Map params) { + if (script.contains("{{") && script.contains("}}")) { + throw new IllegalArgumentException("Fix your test to not rely on mustache"); + } + if (context.instanceClazz.equals(TemplateScript.class) == false) { + throw new IllegalArgumentException("mock mustache only understands template scripts, not [" + context.name + "]"); + } + return context.factoryClazz.cast((TemplateScript.Factory) vars -> + new TemplateScript(vars) { + @Override + public String execute() { + return script; + } + }); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/SecuritySettingsSourceField.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/SecuritySettingsSourceField.java new file mode 100644 index 0000000000000..12082b770436d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/SecuritySettingsSourceField.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test; + +import org.elasticsearch.common.settings.SecureString; + +public final class SecuritySettingsSourceField { + public static final SecureString TEST_PASSWORD_SECURE_STRING = new SecureString("x-pack-test-password".toCharArray()); + public static final String TEST_PASSWORD = "x-pack-test-password"; + + private SecuritySettingsSourceField() {} +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/TestMatchers.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/TestMatchers.java new file mode 100644 index 0000000000000..9fd1d64323eb5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/TestMatchers.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test; + +import java.nio.file.Files; +import java.nio.file.LinkOption; +import java.nio.file.Path; +import java.util.function.Predicate; +import java.util.regex.Pattern; + +import org.hamcrest.CustomMatcher; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; + +public class TestMatchers extends Matchers { + + public static Matcher pathExists(Path path, LinkOption... options) { + return new CustomMatcher("Path " + path + " exists") { + @Override + public boolean matches(Object item) { + return Files.exists(path, options); + } + }; + } + + public static Matcher matchesPattern(String regex) { + return matchesPattern(Pattern.compile(regex)); + } + + public static Matcher matchesPattern(Pattern pattern) { + return predicate("Matches " + pattern.pattern(), String.class, pattern.asPredicate()); + } + + private static Matcher predicate(String description, Class type, Predicate stringPredicate) { + return new CustomMatcher(description) { + @Override + public boolean matches(Object item) { + if (type.isInstance(item)) { + return stringPredicate.test(type.cast(item)); + } else { + return false; + } + } + }; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/Headers.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/Headers.java new file mode 100644 index 0000000000000..a5d735be7668a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/Headers.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test.http; + +import org.elasticsearch.common.SuppressForbidden; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * A helper class to not leak the internal headers class into our tests + * Currently setting multiple values for a single header is not supported, as it was not needed yet + */ +@SuppressForbidden(reason = "use http server") +public class Headers { + + final com.sun.net.httpserver.Headers headers; + + /** + * Creates a class with empty headers + */ + Headers() { + this.headers = new com.sun.net.httpserver.Headers(); + } + + /** + * Creates a class headers from http + * @param headers The internal sun webserver headers object + */ + Headers(com.sun.net.httpserver.Headers headers) { + this.headers = headers; + } + + /** + * @param name The name of header + * @return A list of values for this header + */ + public List get(String name) { + return headers.get(name); + } + + /** + * Adds a new header to this headers object + * @param name Name of the header + * @param value Value of the header + */ + void add(String name, String value) { + this.headers.compute(name, (k, v) -> { + if (v == null) { + return Collections.singletonList(value); + } else { + List list = new ArrayList<>(); + list.addAll(v); + list.add(value); + return list; + } + }); + } + + /** + * @param name Name of the header + * @return Returns the first header value or null if none exists + */ + String getFirst(String name) { + return headers.getFirst(name); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockRequest.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockRequest.java new file mode 100644 index 0000000000000..7a6a81620ade3 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockRequest.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test.http; + +import org.elasticsearch.common.SuppressForbidden; + +import java.net.URI; +import java.util.Locale; + +/** + * A request parsed by the MockWebServer + */ +public class MockRequest { + + private final String method; + private final URI uri; + private final Headers headers; + private String body = null; + + @SuppressForbidden(reason = "use http server header class") + MockRequest(String method, URI uri, com.sun.net.httpserver.Headers headers) { + this.method = method; + this.uri = uri; + this.headers = new Headers(headers); + } + + /** + * @return The HTTP method of the incoming request + */ + public String getMethod() { + return method; + } + + /** + * @return The URI of the incoming request + */ + public URI getUri() { + return uri; + } + + /** + * @return The specific value of a request header, null if it does not exist + */ + public String getHeader(String name) { + return headers.getFirst(name); + } + + /** + * @return All headers associated with this request + */ + public Headers getHeaders() { + return headers; + } + + /** + * @return The body the incoming request had, null if no body was found + */ + public String getBody() { + return body; + } + + @Override + public String toString() { + return String.format(Locale.ROOT, "%s %s", method, uri); + } + + /** + * @param body Sets the body of the incoming request + */ + void setBody(String body) { + this.body = body; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockResponse.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockResponse.java new file mode 100644 index 0000000000000..e2d9a41dee3a0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockResponse.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test.http; + +import org.elasticsearch.common.unit.TimeValue; + +/** + * A response to be sent via the mock webserver. Parts of the response can be configured + */ +public class MockResponse { + + private String body = null; + private int statusCode = 200; + private TimeValue bodyDelay = null; + private Headers headers = new Headers(); + private TimeValue beforeReplyDelay = null; + + /** + * @param body The body to be returned if the response is sent by the webserver + * @return The updated mock response + */ + public MockResponse setBody(String body) { + this.body = body; + return this; + } + + /** + * @param statusCode The status code to be returned if the response is sent by the webserver, defaults to 200 + * @return The updated mock response + */ + public MockResponse setResponseCode(int statusCode) { + this.statusCode = statusCode; + return this; + } + + /** + * @param timeValue Allows to specify a delay between sending of headers and the body to inject artificial latency + * @return The updated mock response + */ + public MockResponse setBodyDelay(TimeValue timeValue) { + this.bodyDelay = timeValue; + return this; + } + + /** + * @param timeValue Allows to specify a delay before anything is sent back to the client + * @return The updated mock response + */ + public MockResponse setBeforeReplyDelay(TimeValue timeValue) { + this.beforeReplyDelay = timeValue; + return this; + } + + /** + * Adds a new header to a response + * @param name Header name + * @param value header value + * @return The updated mock response + */ + public MockResponse addHeader(String name, String value) { + headers.add(name, value); + return this; + } + + /** + * @return the body of the request + */ + String getBody() { + return body; + } + + /** + * @return The HTTP status code + */ + int getStatusCode() { + return statusCode; + } + + /** + * @return The time to delay the between sending the headers and the body + */ + TimeValue getBodyDelay() { + return bodyDelay; + } + + /** + * @return All configured headers for this request + */ + Headers getHeaders() { + return headers; + } + + /** + * @return The time to delay before the first byte is being returned + */ + TimeValue getBeforeReplyDelay() { + return beforeReplyDelay; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockWebServer.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockWebServer.java new file mode 100644 index 0000000000000..2e2210fac9a6d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/test/http/MockWebServer.java @@ -0,0 +1,270 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test.http; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpServer; +import com.sun.net.httpserver.HttpsConfigurator; +import com.sun.net.httpserver.HttpsParameters; +import com.sun.net.httpserver.HttpsServer; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.mocksocket.MockHttpServer; + +import javax.net.ssl.SSLContext; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.ESTestCase.terminate; + +/** + * A MockWebServer to test against. Holds a list of responses, which can be enqueed. + * The webserver has to enqueue at least the amount of responses with the number of requests that happen, otherwise errors + * will be returned. + *

+ * Each response that was executed also contains the request, so you can check if requests happened in the correct order. + */ +@SuppressForbidden(reason = "use http server") +public class MockWebServer implements Closeable { + + private HttpServer server; + private final Queue responses = ConcurrentCollections.newQueue(); + private final Queue requests = ConcurrentCollections.newQueue(); + private final Logger logger; + private final SSLContext sslContext; + private final boolean needClientAuth; + private final Set latches = ConcurrentCollections.newConcurrentSet(); + private String hostname; + private int port; + + /** + * Instantiates a webserver without https + */ + public MockWebServer() { + this(null, false); + } + + /** + * Instantiates a webserver with https + * @param sslContext The SSL context to be used for encryption + * @param needClientAuth Should clientAuth be used, which requires a client side certificate + */ + public MockWebServer(SSLContext sslContext, boolean needClientAuth) { + this.needClientAuth = needClientAuth; + this.logger = ESLoggerFactory.getLogger(this.getClass()); + this.sslContext = sslContext; + } + + /** + * Starts the webserver and binds it to an arbitrary ephemeral port + * The webserver will be able to serve requests once this method returns + * + * @throws IOException in case of a binding or other I/O errors + */ + public void start() throws IOException { + InetSocketAddress address = new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0); + if (sslContext != null) { + HttpsServer httpsServer = MockHttpServer.createHttps(address, 0); + httpsServer.setHttpsConfigurator(new CustomHttpsConfigurator(sslContext, needClientAuth)); + server = httpsServer; + } else { + server = MockHttpServer.createHttp(address, 0); + } + + server.start(); + // Uses #InetSocketAddress.getHostString() to prevent reverse dns lookups, eager binding, so we can find out host/port regardless + // if the webserver was already shut down + this.hostname = server.getAddress().getHostString(); + this.port = server.getAddress().getPort(); + server.createContext("/", s -> { + try { + MockResponse response = responses.poll(); + MockRequest request = createRequest(s); + requests.add(request); + + if (logger.isDebugEnabled()) { + logger.debug("[{}:{}] incoming HTTP request [{} {}], returning status [{}] body [{}]", getHostName(), getPort(), + s.getRequestMethod(), s.getRequestURI(), response.getStatusCode(), getStartOfBody(response)); + } + + sleepIfNeeded(response.getBeforeReplyDelay()); + + s.getResponseHeaders().putAll(response.getHeaders().headers); + + if (Strings.isEmpty(response.getBody())) { + s.sendResponseHeaders(response.getStatusCode(), 0); + } else { + byte[] responseAsBytes = response.getBody().getBytes(StandardCharsets.UTF_8); + s.sendResponseHeaders(response.getStatusCode(), responseAsBytes.length); + sleepIfNeeded(response.getBodyDelay()); + if ("HEAD".equals(request.getMethod()) == false) { + try (OutputStream responseBody = s.getResponseBody()) { + responseBody.write(responseAsBytes); + } + } + } + } catch (Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("failed to respond to request [{} {}]", + s.getRequestMethod(), s.getRequestURI()), e); + } finally { + s.close(); + } + + }); + logger.info("bound HTTP mock server to [{}:{}]", getHostName(), getPort()); + } + + /** + * A custom HttpsConfigurator that takes the SSL context and the required client authentication into account + * Also configured the protocols and cipher suites to match the security default ones + */ + @SuppressForbidden(reason = "use http server") + private static final class CustomHttpsConfigurator extends HttpsConfigurator { + + private final boolean needClientAuth; + + CustomHttpsConfigurator(SSLContext sslContext, boolean needClientAuth) { + super(sslContext); + this.needClientAuth = needClientAuth; + } + + @Override + public void configure(HttpsParameters params) { + params.setNeedClientAuth(needClientAuth); + } + } + + /** + * Sleep the specified amount of time, if the time value is not null + */ + private void sleepIfNeeded(TimeValue timeValue) throws InterruptedException { + if (timeValue == null) { + return; + } + + CountDownLatch latch = new CountDownLatch(1); + latches.add(latch); + try { + latch.await(timeValue.millis(), TimeUnit.MILLISECONDS); + } finally { + latches.remove(latch); + } + } + + /** + * Creates a MockRequest from an incoming HTTP request, that can later be checked in your test assertions + */ + private MockRequest createRequest(HttpExchange exchange) throws IOException { + MockRequest request = new MockRequest(exchange.getRequestMethod(), exchange.getRequestURI(), exchange.getRequestHeaders()); + if (exchange.getRequestBody() != null) { + String body = Streams.copyToString(new InputStreamReader(exchange.getRequestBody(), StandardCharsets.UTF_8)); + if (Strings.isEmpty(body) == false) { + request.setBody(body); + } + } + return request; + } + + /** + * @return The hostname the server is bound to. + */ + public String getHostName() { + return hostname; + } + + /** + * @return The tcp port that the server is bound to + */ + public int getPort() { + return port; + } + + /** + * Adds a response to the response queue that is used when a request comes in + * Note: Every response is only processed once + * @param response The created mock response + */ + public void enqueue(MockResponse response) { + if (logger.isTraceEnabled()) { + logger.trace("[{}:{}] Enqueueing response [{}], status [{}] body [{}]", getHostName(), getPort(), responses.size(), + response.getStatusCode(), getStartOfBody(response)); + } + responses.add(response); + } + + /** + * @return The requests that have been made to this mock web server + */ + public List requests() { + return new ArrayList<>(requests); + } + + /** + * Removes the first request in the list of requests and returns it to the caller. + * This can be used as a queue if you are sure the order of your requests. + */ + public MockRequest takeRequest() { + return requests.poll(); + } + + /** + * A utility method to peek into the requests and find out if #MockWebServer.takeRequests will not throw an out of bound exception + * @return true if more requests are available, false otherwise + */ + public boolean hasMoreRequests() { + return requests.isEmpty() == false; + } + + /** + * Closes down the webserver. Also tries to stop all the currently sleeping requests first by counting down their respective + * latches. + */ + @Override + public void close() { + logger.debug("[{}:{}] Counting down all latches before terminating executor", getHostName(), getPort()); + latches.forEach(CountDownLatch::countDown); + + if (server.getExecutor() instanceof ExecutorService) { + try { + terminate((ExecutorService) server.getExecutor()); + } catch (InterruptedException e) { + } + } + server.stop(0); + } + + /** + * Helper method to return the first 20 chars of a request's body + * @param response The MockResponse to inspect + * @return Returns the first 20 chars or an empty string if the response body is not configured + */ + private String getStartOfBody(MockResponse response) { + if (Strings.isEmpty(response.getBody())) { + return ""; + } + int length = Math.min(20, response.getBody().length()); + return response.getBody().substring(0, length).replaceAll("\n", ""); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java new file mode 100644 index 0000000000000..a243b8c995d23 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ClientHelper; + +import java.util.concurrent.CountDownLatch; + +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ClientHelperTests extends ESTestCase { + + public void testStashContext() { + final String origin = randomAlphaOfLengthBetween(4, 16); + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + + final boolean setOtherValues = randomBoolean(); + if (setOtherValues) { + threadContext.putTransient("foo", "bar"); + threadContext.putHeader("foo", "bar"); + } + + assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + ThreadContext.StoredContext storedContext = ClientHelper.stashWithOrigin(threadContext, origin); + assertEquals(origin, threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + assertNull(threadContext.getTransient("foo")); + assertNull(threadContext.getTransient("bar")); + + storedContext.close(); + assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + + if (setOtherValues) { + assertEquals("bar", threadContext.getTransient("foo")); + assertEquals("bar", threadContext.getHeader("foo")); + } + } + + public void testExecuteAsyncWrapsListener() throws Exception { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final String headerName = randomAlphaOfLengthBetween(4, 16); + final String headerValue = randomAlphaOfLengthBetween(4, 16); + final String origin = randomAlphaOfLengthBetween(4, 16); + final CountDownLatch latch = new CountDownLatch(2); + final ActionListener listener = ActionListener.wrap(v -> { + assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + assertEquals(headerValue, threadContext.getHeader(headerName)); + latch.countDown(); + }, e -> fail(e.getMessage())); + + final ClusterHealthRequest request = new ClusterHealthRequest(); + threadContext.putHeader(headerName, headerValue); + + ClientHelper.executeAsyncWithOrigin(threadContext, origin, request, listener, (req, listener1) -> { + assertSame(request, req); + assertEquals(origin, threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + assertNull(threadContext.getHeader(headerName)); + latch.countDown(); + listener1.onResponse(null); + }); + + latch.await(); + } + + public void testExecuteWithClient() throws Exception { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final Client client = mock(Client.class); + final ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + final String headerName = randomAlphaOfLengthBetween(4, 16); + final String headerValue = randomAlphaOfLengthBetween(4, 16); + final String origin = randomAlphaOfLengthBetween(4, 16); + final CountDownLatch latch = new CountDownLatch(2); + final ActionListener listener = ActionListener.wrap(v -> { + assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + assertEquals(headerValue, threadContext.getHeader(headerName)); + latch.countDown(); + }, e -> fail(e.getMessage())); + + doAnswer(invocationOnMock -> { + assertEquals(origin, threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + assertNull(threadContext.getHeader(headerName)); + latch.countDown(); + ((ActionListener)invocationOnMock.getArguments()[2]).onResponse(null); + return null; + }).when(client).execute(anyObject(), anyObject(), anyObject()); + + threadContext.putHeader(headerName, headerValue); + ClientHelper.executeAsyncWithOrigin(client, origin, ClusterHealthAction.INSTANCE, new ClusterHealthRequest(), listener); + + latch.await(); + } + + public void testClientWithOrigin() throws Exception { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final Client client = mock(Client.class); + final ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(threadContext); + when(client.settings()).thenReturn(Settings.EMPTY); + + final String headerName = randomAlphaOfLengthBetween(4, 16); + final String headerValue = randomAlphaOfLengthBetween(4, 16); + final String origin = randomAlphaOfLengthBetween(4, 16); + final CountDownLatch latch = new CountDownLatch(2); + final ActionListener listener = ActionListener.wrap(v -> { + assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + assertEquals(headerValue, threadContext.getHeader(headerName)); + latch.countDown(); + }, e -> fail(e.getMessage())); + + + doAnswer(invocationOnMock -> { + assertEquals(origin, threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + assertNull(threadContext.getHeader(headerName)); + latch.countDown(); + ((ActionListener)invocationOnMock.getArguments()[2]).onResponse(null); + return null; + }).when(client).execute(anyObject(), anyObject(), anyObject()); + + threadContext.putHeader(headerName, headerValue); + Client clientWithOrigin = ClientHelper.clientWithOrigin(client, origin); + clientWithOrigin.execute(null, null, listener); + latch.await(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java new file mode 100644 index 0000000000000..0becdffb7ea7d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -0,0 +1,398 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.analysis.TokenizerFactory; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.AnalysisPlugin; +import org.elasticsearch.plugins.ClusterPlugin; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.plugins.IngestPlugin; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.NetworkPlugin; +import org.elasticsearch.plugins.PersistentTaskPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.xpack.core.ssl.SSLService; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; + +import static java.util.stream.Collectors.toList; + +public class LocalStateCompositeXPackPlugin extends XPackPlugin implements ScriptPlugin, ActionPlugin, IngestPlugin, NetworkPlugin, + ClusterPlugin, DiscoveryPlugin, MapperPlugin, AnalysisPlugin, PersistentTaskPlugin { + + private XPackLicenseState licenseState; + private SSLService sslService; + private LicenseService licenseService; + protected List plugins = new ArrayList<>(); + + public LocalStateCompositeXPackPlugin(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + } + + //Get around all the setOnce nonsense in the plugin + @Override + protected SSLService getSslService() { + return sslService; + } + + @Override + protected void setSslService(SSLService sslService) { + this.sslService = sslService; + } + + @Override + protected LicenseService getLicenseService() { + return licenseService; + } + + @Override + protected void setLicenseService(LicenseService licenseService) { + this.licenseService = licenseService; + } + + @Override + protected XPackLicenseState getLicenseState() { + return licenseState; + } + + @Override + protected void setLicenseState(XPackLicenseState licenseState) { + this.licenseState = licenseState; + } + + @Override + public Collection createGuiceModules() { + ArrayList modules = new ArrayList<>(); + modules.addAll(super.createGuiceModules()); + filterPlugins(Plugin.class).stream().forEach(p -> + modules.addAll(p.createGuiceModules()) + ); + return modules; + } + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, + NamedXContentRegistry xContentRegistry, Environment environment, + NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + List components = new ArrayList<>(); + components.addAll(super.createComponents(client, clusterService, threadPool, resourceWatcherService, scriptService, + xContentRegistry, environment, nodeEnvironment, namedWriteableRegistry)); + + filterPlugins(Plugin.class).stream().forEach(p -> + components.addAll(p.createComponents(client, clusterService, threadPool, resourceWatcherService, scriptService, + xContentRegistry, environment, nodeEnvironment, namedWriteableRegistry)) + ); + return components; + } + + @Override + public Collection getRestHeaders() { + List headers = new ArrayList<>(); + headers.addAll(super.getRestHeaders()); + filterPlugins(ActionPlugin.class).stream().forEach(p -> headers.addAll(p.getRestHeaders())); + return headers; + } + + @Override + public List> getSettings() { + ArrayList> settings = new ArrayList<>(); + settings.addAll(super.getSettings()); + + filterPlugins(Plugin.class).stream().forEach(p -> + settings.addAll(p.getSettings()) + ); + return settings; + } + + @Override + public List getSettingsFilter() { + List filters = new ArrayList<>(); + filters.addAll(super.getSettingsFilter()); + filterPlugins(Plugin.class).stream().forEach(p -> + filters.addAll(p.getSettingsFilter()) + ); + return filters; + } + + @Override + public List> getActions() { + List> actions = new ArrayList<>(); + actions.addAll(super.getActions()); + filterPlugins(ActionPlugin.class).stream().forEach(p -> + actions.addAll(p.getActions()) + ); + return actions; + } + + @Override + public List getActionFilters() { + List filters = new ArrayList<>(); + filters.addAll(super.getActionFilters()); + filterPlugins(ActionPlugin.class).stream().forEach(p -> + filters.addAll(p.getActionFilters()) + ); + return filters; + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + List handlers = new ArrayList<>(); + handlers.addAll(super.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter, + indexNameExpressionResolver, nodesInCluster)); + filterPlugins(ActionPlugin.class).stream().forEach(p -> + handlers.addAll(p.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, + settingsFilter, indexNameExpressionResolver, nodesInCluster)) + ); + return handlers; + } + + @Override + public List getNamedWriteables() { + List entries = new ArrayList<>(); + entries.addAll(super.getNamedWriteables()); + for (Plugin p : plugins) { + entries.addAll(p.getNamedWriteables()); + } + return entries; + } + + @Override + public List getNamedXContent() { + List entries = new ArrayList<>(); + entries.addAll(super.getNamedXContent()); + for (Plugin p : plugins) { + entries.addAll(p.getNamedXContent()); + } + return entries; + } + + // End of the XPackPlugin overrides + + @Override + public Settings additionalSettings() { + Settings.Builder builder = Settings.builder(); + builder.put(super.additionalSettings()); + filterPlugins(Plugin.class).stream().forEach(p -> + builder.put(p.additionalSettings()) + ); + return builder.build(); + } + + + @Override + public List getContexts() { + List contexts = new ArrayList<>(); + contexts.addAll(super.getContexts()); + filterPlugins(ScriptPlugin.class).stream().forEach(p -> contexts.addAll(p.getContexts())); + return contexts; + } + + @Override + public Map getProcessors(Processor.Parameters parameters) { + Map processors = new HashMap<>(); + filterPlugins(IngestPlugin.class).stream().forEach(p -> processors.putAll(p.getProcessors(parameters))); + return processors; + } + + @Override + public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, ThreadContext threadContext) { + List interceptors = new ArrayList<>(); + filterPlugins(NetworkPlugin.class).stream().forEach(p -> interceptors.addAll(p.getTransportInterceptors(namedWriteableRegistry, + threadContext))); + return interceptors; + } + + @Override + public Map> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService) { + Map> transports = new HashMap<>(); + transports.putAll(super.getTransports(settings, threadPool, bigArrays, pageCacheRecycler, + circuitBreakerService, namedWriteableRegistry, networkService)); + filterPlugins(NetworkPlugin.class).stream().forEach(p -> transports.putAll(p.getTransports(settings, threadPool, bigArrays, + pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService))); + return transports; + + + } + + @Override + public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher) { + Map> transports = new HashMap<>(); + filterPlugins(NetworkPlugin.class).stream().forEach(p -> transports.putAll(p.getHttpTransports(settings, threadPool, bigArrays, + circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, dispatcher))); + return transports; + } + + @Override + public List getBootstrapChecks() { + List checks = new ArrayList<>(); + filterPlugins(Plugin.class).stream().forEach(p -> checks.addAll(p.getBootstrapChecks())); + return Collections.unmodifiableList(checks); + } + + @Override + public UnaryOperator getRestHandlerWrapper(ThreadContext threadContext) { + + // There can be only one. + List> items = filterPlugins(ActionPlugin.class).stream().map(p -> + p.getRestHandlerWrapper(threadContext)).filter(Objects::nonNull).collect(Collectors.toList()); + + if (items.size() > 1) { + throw new UnsupportedOperationException("Only the security ActionPlugin should override this"); + } else if (items.size() == 1) { + return items.get(0); + } else { + return null; + } + } + + @Override + public List> getExecutorBuilders(final Settings settings) { + List> builders = new ArrayList<>(); + filterPlugins(Plugin.class).stream().forEach(p -> builders.addAll(p.getExecutorBuilders(settings))); + return builders; + } + @Override + public UnaryOperator> getIndexTemplateMetaDataUpgrader() { + return templates -> { + for(Plugin p: plugins) { + templates = p.getIndexTemplateMetaDataUpgrader().apply(templates); + } + return templates; + }; + } + + @Override + public Map> getTokenizers() { + Map> tokenizers = new HashMap<>(); + filterPlugins(AnalysisPlugin.class).stream().forEach(p -> tokenizers.putAll(p.getTokenizers())); + return tokenizers; + } + + @Override + public void onIndexModule(IndexModule indexModule) { + super.onIndexModule(indexModule); + filterPlugins(Plugin.class).stream().forEach(p -> p.onIndexModule(indexModule)); + } + + @Override + public Map> getInitialClusterStateCustomSupplier() { + Map> suppliers = new HashMap<>(); + filterPlugins(ClusterPlugin.class).stream().forEach(p -> suppliers.putAll(p.getInitialClusterStateCustomSupplier())); + return suppliers; + } + + @Override + public Function> getFieldFilter() { + List>> items = filterPlugins(MapperPlugin.class).stream().map(p -> + p.getFieldFilter()).collect(Collectors.toList()); + if (items.size() > 1) { + throw new UnsupportedOperationException("Only the security MapperPlugin should override this"); + } else if (items.size() == 1) { + return items.get(0); + } else { + // return the same default from MapperPlugin + return MapperPlugin.NOOP_FIELD_FILTER; + } + } + + @Override + public BiConsumer getJoinValidator() { + // There can be only one. + List> items = filterPlugins(DiscoveryPlugin.class).stream().map(p -> + p.getJoinValidator()).collect(Collectors.toList()); + if (items.size() > 1) { + throw new UnsupportedOperationException("Only the security DiscoveryPlugin should override this"); + } else if (items.size() == 1) { + return items.get(0); + } else { + return null; + } + } + + @Override + public List> getPersistentTasksExecutor(ClusterService clusterService, + ThreadPool threadPool, Client client) { + return filterPlugins(PersistentTaskPlugin.class).stream() + .map(p -> p.getPersistentTasksExecutor(clusterService, threadPool, client)) + .flatMap(List::stream) + .collect(toList()); + } + + private List filterPlugins(Class type) { + return plugins.stream().filter(x -> type.isAssignableFrom(x.getClass())).map(p -> ((T)p)) + .collect(Collectors.toList()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/TestXPackTransportClient.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/TestXPackTransportClient.java new file mode 100644 index 0000000000000..30c370c14c27d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/TestXPackTransportClient.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import io.netty.util.ThreadDeathWatcher; +import io.netty.util.concurrent.GlobalEventExecutor; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.core.security.SecurityField; + +import java.util.Arrays; +import java.util.Collection; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.ESTestCase.getTestTransportPlugin; + +/** + * TransportClient.Builder that installs the XPackPlugin by default. + */ +@SuppressWarnings({"unchecked","varargs"}) +public class TestXPackTransportClient extends TransportClient { + + @SafeVarargs + public TestXPackTransportClient(Settings settings, Class... plugins) { + this(settings, Arrays.asList(plugins)); + } + + public TestXPackTransportClient(Settings settings, Collection> plugins) { + super(settings, Settings.EMPTY, addPlugins(plugins, getTestTransportPlugin()), null); + } + + @Override + public void close() { + super.close(); + if (NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) == false + || NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(SecurityField.NAME4)) { + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + try { + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java new file mode 100644 index 0000000000000..ed76a9a27809c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackSettingsTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackSettings; + +import javax.crypto.Cipher; + +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.not; + +public class XPackSettingsTests extends ESTestCase { + + public void testDefaultSSLCiphers() throws Exception { + assertThat(XPackSettings.DEFAULT_CIPHERS, hasItem("TLS_RSA_WITH_AES_128_CBC_SHA")); + + final boolean useAES256 = Cipher.getMaxAllowedKeyLength("AES") > 128; + if (useAES256) { + logger.info("AES 256 is available"); + assertThat(XPackSettings.DEFAULT_CIPHERS, hasItem("TLS_RSA_WITH_AES_256_CBC_SHA")); + } else { + logger.info("AES 256 is not available"); + assertThat(XPackSettings.DEFAULT_CIPHERS, not(hasItem("TLS_RSA_WITH_AES_256_CBC_SHA"))); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/TransportXPackInfoActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/TransportXPackInfoActionTests.java new file mode 100644 index 0000000000000..f87d9f33cf5e8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/TransportXPackInfoActionTests.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackInfoResponse; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.license.XPackInfoResponse.FeatureSetsInfo.FeatureSet; + +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsNull.notNullValue; +import static org.hamcrest.core.IsNull.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportXPackInfoActionTests extends ESTestCase { + + public void testDoExecute() throws Exception { + + LicenseService licenseService = mock(LicenseService.class); + + final Set featureSets = new HashSet<>(); + int featureSetCount = randomIntBetween(0, 5); + for (int i = 0; i < featureSetCount; i++) { + XPackFeatureSet fs = mock(XPackFeatureSet.class); + when(fs.name()).thenReturn(randomAlphaOfLength(5)); + when(fs.description()).thenReturn(randomAlphaOfLength(10)); + when(fs.available()).thenReturn(randomBoolean()); + when(fs.enabled()).thenReturn(randomBoolean()); + featureSets.add(fs); + } + + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportXPackInfoAction action = new TransportXPackInfoAction(Settings.EMPTY, mock(ThreadPool.class), transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), licenseService, featureSets); + + License license = mock(License.class); + long expiryDate = randomLong(); + when(license.expiryDate()).thenReturn(expiryDate); + License.Status status = randomFrom(License.Status.values()); + when(license.status()).thenReturn(status); + String type = randomAlphaOfLength(10); + when(license.type()).thenReturn(type); + License.OperationMode mode = randomFrom(License.OperationMode.values()); + when(license.operationMode()).thenReturn(mode); + String uid = randomAlphaOfLength(30); + when(license.uid()).thenReturn(uid); + when(licenseService.getLicense()).thenReturn(license); + + XPackInfoRequest request = new XPackInfoRequest(); + request.setVerbose(randomBoolean()); + + EnumSet categories = EnumSet.noneOf(XPackInfoRequest.Category.class); + int maxCategoryCount = randomIntBetween(0, XPackInfoRequest.Category.values().length); + for (int i = 0; i < maxCategoryCount; i++) { + categories.add(randomFrom(XPackInfoRequest.Category.values())); + } + request.setCategories(categories); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference response = new AtomicReference<>(); + final AtomicReference error = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(XPackInfoResponse infoResponse) { + response.set(infoResponse); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + error.set(e); + latch.countDown(); + } + }); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("waiting too long for "); + } + + assertThat(error.get(), nullValue()); + assertThat(response.get(), notNullValue()); + + if (request.getCategories().contains(XPackInfoRequest.Category.BUILD)) { + assertThat(response.get().getBuildInfo(), notNullValue()); + } else { + assertThat(response.get().getBuildInfo(), nullValue()); + } + + if (request.getCategories().contains(XPackInfoRequest.Category.LICENSE)) { + assertThat(response.get().getLicenseInfo(), notNullValue()); + assertThat(response.get().getLicenseInfo().getExpiryDate(), is(expiryDate)); + assertThat(response.get().getLicenseInfo().getStatus(), is(status)); + assertThat(response.get().getLicenseInfo().getType(), is(type)); + assertThat(response.get().getLicenseInfo().getMode(), is(mode.name().toLowerCase(Locale.ROOT))); + assertThat(response.get().getLicenseInfo().getUid(), is(uid)); + } else { + assertThat(response.get().getLicenseInfo(), nullValue()); + } + + if (request.getCategories().contains(XPackInfoRequest.Category.FEATURES)) { + assertThat(response.get().getFeatureSetsInfo(), notNullValue()); + Map features = response.get().getFeatureSetsInfo().getFeatureSets(); + assertThat(features.size(), is(featureSets.size())); + for (XPackFeatureSet fs : featureSets) { + assertThat(features, hasKey(fs.name())); + assertThat(features.get(fs.name()).name(), equalTo(fs.name())); + if (!request.isVerbose()) { + assertThat(features.get(fs.name()).description(), is(nullValue())); + } else { + assertThat(features.get(fs.name()).description(), is(fs.description())); + } + assertThat(features.get(fs.name()).available(), equalTo(fs.available())); + assertThat(features.get(fs.name()).enabled(), equalTo(fs.enabled())); + } + } else { + assertThat(response.get().getFeatureSetsInfo(), nullValue()); + } + + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/IteratingActionListenerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/IteratingActionListenerTests.java new file mode 100644 index 0000000000000..0648d774075d5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/IteratingActionListenerTests.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.common; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.HppcMaps.Object; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.junit.Assert; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; + +import static org.hamcrest.Matchers.sameInstance; + +public class IteratingActionListenerTests extends ESTestCase { + + public void testIteration() { + final int numberOfItems = scaledRandomIntBetween(1, 32); + final int numberOfIterations = scaledRandomIntBetween(1, numberOfItems); + List items = new ArrayList<>(numberOfItems); + for (int i = 0; i < numberOfItems; i++) { + items.add(new Object()); + } + + final AtomicInteger iterations = new AtomicInteger(0); + final BiConsumer> consumer = (listValue, listener) -> { + final int current = iterations.incrementAndGet(); + if (current == numberOfIterations) { + listener.onResponse(items.get(current - 1)); + } else { + listener.onResponse(null); + } + }; + + IteratingActionListener iteratingListener = new IteratingActionListener<>(ActionListener.wrap((object) -> { + assertNotNull(object); + assertThat(object, sameInstance(items.get(numberOfIterations - 1))); + }, (e) -> { + logger.error("unexpected exception", e); + fail("exception should not have been thrown"); + }), consumer, items, new ThreadContext(Settings.EMPTY)); + iteratingListener.run(); + + // we never really went async, its all chained together so verify this for sanity + assertEquals(numberOfIterations, iterations.get()); + } + + public void testIterationDoesntAllowThreadContextLeak() { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final int numberOfItems = scaledRandomIntBetween(1, 32); + final int numberOfIterations = scaledRandomIntBetween(1, numberOfItems); + List items = new ArrayList<>(numberOfItems); + for (int i = 0; i < numberOfItems; i++) { + items.add(new Object()); + } + + threadContext.putHeader("outside", "listener"); + final AtomicInteger iterations = new AtomicInteger(0); + final BiConsumer> consumer = (listValue, listener) -> { + final int current = iterations.incrementAndGet(); + assertEquals("listener", threadContext.getHeader("outside")); + if (current == numberOfIterations) { + threadContext.putHeader("foo", "bar"); + listener.onResponse(items.get(current - 1)); + } else { + listener.onResponse(null); + } + }; + + IteratingActionListener iteratingListener = new IteratingActionListener<>(ActionListener.wrap((object) -> { + assertNotNull(object); + assertThat(object, sameInstance(items.get(numberOfIterations - 1))); + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals("listener", threadContext.getHeader("outside")); + }, (e) -> { + logger.error("unexpected exception", e); + fail("exception should not have been thrown"); + }), consumer, items, threadContext); + iteratingListener.run(); + + // we never really went async, its all chained together so verify this for sanity + assertEquals(numberOfIterations, iterations.get()); + assertNull(threadContext.getHeader("foo")); + assertEquals("listener", threadContext.getHeader("outside")); + } + + public void testIterationEmptyList() { + IteratingActionListener listener = new IteratingActionListener<>(ActionListener.wrap(Assert::assertNull, + (e) -> { + logger.error("unexpected exception", e); + fail("exception should not have been thrown"); + }), (listValue, iteratingListener) -> { + fail("consumer should not have been called!!!"); + }, Collections.emptyList(), new ThreadContext(Settings.EMPTY)); + listener.run(); + } + + public void testFailure() { + final int numberOfItems = scaledRandomIntBetween(1, 32); + final int numberOfIterations = scaledRandomIntBetween(1, numberOfItems); + List items = new ArrayList<>(numberOfItems); + for (int i = 0; i < numberOfItems; i++) { + items.add(new Object()); + } + + final AtomicInteger iterations = new AtomicInteger(0); + final BiConsumer> consumer = (listValue, listener) -> { + final int current = iterations.incrementAndGet(); + if (current == numberOfIterations) { + listener.onFailure(new ElasticsearchException("expected exception")); + } else { + listener.onResponse(null); + } + }; + + final AtomicBoolean onFailureCalled = new AtomicBoolean(false); + IteratingActionListener iteratingListener = new IteratingActionListener<>(ActionListener.wrap((object) -> { + fail("onResponse should not have been called, but was called with: " + object); + }, (e) -> { + assertEquals("expected exception", e.getMessage()); + assertTrue(onFailureCalled.compareAndSet(false, true)); + }), consumer, items, new ThreadContext(Settings.EMPTY)); + iteratingListener.run(); + + // we never really went async, its all chained together so verify this for sanity + assertEquals(numberOfIterations, iterations.get()); + assertTrue(onFailureCalled.get()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationChecksTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationChecksTests.java new file mode 100644 index 0000000000000..8757b83b136a9 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationChecksTests.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.deprecation; + +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.hamcrest.core.IsEqual.equalTo; + +public class DeprecationChecksTests extends ESTestCase { + + public void testFilterChecks() throws IOException { + DeprecationIssue issue = DeprecationIssueTests.createTestInstance(); + int numChecksPassed = randomIntBetween(0, 5); + int numChecksFailed = 10 - numChecksPassed; + List> checks = new ArrayList<>(); + for (int i = 0; i < numChecksFailed; i++) { + checks.add(() -> issue); + } + for (int i = 0; i < numChecksPassed; i++) { + checks.add(() -> null); + } + List filteredIssues = DeprecationInfoAction.filterChecks(checks, Supplier::get); + assertThat(filteredIssues.size(), equalTo(numChecksFailed)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionRequestTests.java new file mode 100644 index 0000000000000..3eac1f1ace6c6 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionRequestTests.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.deprecation; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +public class DeprecationInfoActionRequestTests extends AbstractWireSerializingTestCase { + + @Override + protected DeprecationInfoAction.Request createTestInstance() { + return new DeprecationInfoAction.Request(randomAlphaOfLength(10)); + } + + @Override + protected Writeable.Reader instanceReader() { + return DeprecationInfoAction.Request::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java new file mode 100644 index 0000000000000..160641be46986 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationInfoActionResponseTests.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.deprecation; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.AbstractStreamableTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.core.IsEqual.equalTo; + +public class DeprecationInfoActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected DeprecationInfoAction.Response createTestInstance() { + List clusterIssues = Stream.generate(DeprecationIssueTests::createTestInstance) + .limit(randomIntBetween(0, 10)).collect(Collectors.toList()); + List nodeIssues = Stream.generate(DeprecationIssueTests::createTestInstance) + .limit(randomIntBetween(0, 10)).collect(Collectors.toList()); + Map> indexIssues = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 10); i++) { + List perIndexIssues = Stream.generate(DeprecationIssueTests::createTestInstance) + .limit(randomIntBetween(0, 10)).collect(Collectors.toList()); + indexIssues.put(randomAlphaOfLength(10), perIndexIssues); + } + return new DeprecationInfoAction.Response(clusterIssues, nodeIssues, indexIssues); + } + + @Override + protected DeprecationInfoAction.Response createBlankInstance() { + return new DeprecationInfoAction.Response(); + } + + public void testFrom() throws IOException { + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_all"); + mapping.field("enabled", false); + mapping.endObject().endObject(); + + MetaData metadata = MetaData.builder().put(IndexMetaData.builder("test") + .putMapping("testUnderscoreAll", Strings.toString(mapping)) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0)) + .build(); + + DiscoveryNode discoveryNode = DiscoveryNode.createLocal(Settings.EMPTY, + new TransportAddress(TransportAddress.META_ADDRESS, 9300), "test"); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(metadata).build(); + List nodeInfos = Collections.singletonList(new NodeInfo(Version.CURRENT, Build.CURRENT, + discoveryNode, null, null, null, null, + null, null, null, null, null, null)); + List nodeStats = Collections.singletonList(new NodeStats(discoveryNode, 0L, null, + null, null, null, null, null, null, null, null, + null, null, null, null)); + IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(Settings.EMPTY); + IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, + true, true); + boolean clusterIssueFound = randomBoolean(); + boolean nodeIssueFound = randomBoolean(); + boolean indexIssueFound = randomBoolean(); + DeprecationIssue foundIssue = DeprecationIssueTests.createTestInstance(); + List> clusterSettingsChecks = + Collections.unmodifiableList(Arrays.asList( + (s) -> clusterIssueFound ? foundIssue : null + )); + List, List, DeprecationIssue>> nodeSettingsChecks = + Collections.unmodifiableList(Arrays.asList( + (ln, ls) -> nodeIssueFound ? foundIssue : null + )); + + List> indexSettingsChecks = + Collections.unmodifiableList(Arrays.asList( + (idx) -> indexIssueFound ? foundIssue : null + )); + + DeprecationInfoAction.Response response = DeprecationInfoAction.Response.from(nodeInfos, nodeStats, state, + resolver, Strings.EMPTY_ARRAY, indicesOptions, + clusterSettingsChecks, nodeSettingsChecks, indexSettingsChecks); + + if (clusterIssueFound) { + assertThat(response.getClusterSettingsIssues(), equalTo(Collections.singletonList(foundIssue))); + } else { + assertThat(response.getClusterSettingsIssues(), empty()); + } + + if (nodeIssueFound) { + assertThat(response.getNodeSettingsIssues(), equalTo(Collections.singletonList(foundIssue))); + } else { + assertTrue(response.getNodeSettingsIssues().isEmpty()); + } + + if (indexIssueFound) { + assertThat(response.getIndexSettingsIssues(), equalTo(Collections.singletonMap("test", + Collections.singletonList(foundIssue)))); + } else { + assertTrue(response.getIndexSettingsIssues().isEmpty()); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationIssueTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationIssueTests.java new file mode 100644 index 0000000000000..db14865e8a9ab --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/deprecation/DeprecationIssueTests.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.deprecation; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; +import static org.elasticsearch.xpack.core.deprecation.DeprecationIssue.Level; +import static org.hamcrest.core.IsEqual.equalTo; + +public class DeprecationIssueTests extends ESTestCase { + DeprecationIssue issue; + + static DeprecationIssue createTestInstance() { + String details = randomBoolean() ? randomAlphaOfLength(10) : null; + return new DeprecationIssue(randomFrom(Level.values()), randomAlphaOfLength(10), + randomAlphaOfLength(10), details); + } + + @Before + public void setup() { + issue = createTestInstance(); + } + + public void testEqualsAndHashCode() { + DeprecationIssue other = new DeprecationIssue(issue.getLevel(), issue.getMessage(), issue.getUrl(), issue.getDetails()); + assertThat(issue, equalTo(other)); + assertThat(other, equalTo(issue)); + assertThat(issue.hashCode(), equalTo(other.hashCode())); + } + + public void testSerialization() throws IOException { + BytesStreamOutput out = new BytesStreamOutput(); + issue.writeTo(out); + StreamInput in = out.bytes().streamInput(); + DeprecationIssue other = new DeprecationIssue(in); + assertThat(issue, equalTo(other)); + } + + public void testToXContent() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + issue.toXContent(builder, EMPTY_PARAMS); + Map toXContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + String level = (String) toXContentMap.get("level"); + String message = (String) toXContentMap.get("message"); + String url = (String) toXContentMap.get("url"); + if (issue.getDetails() != null) { + assertTrue(toXContentMap.containsKey("details")); + } + String details = (String) toXContentMap.get("details"); + DeprecationIssue other = new DeprecationIssue(Level.fromString(level), message, url, details); + assertThat(issue, equalTo(other)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/CloseJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/CloseJobActionRequestTests.java new file mode 100644 index 0000000000000..a8224d31d8d0b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/CloseJobActionRequestTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.CloseJobAction.Request; + +public class CloseJobActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected Request createTestInstance() { + Request request = new Request(randomAlphaOfLengthBetween(1, 20)); + if (randomBoolean()) { + request.setCloseTimeout(TimeValue.timeValueMillis(randomNonNegativeLong())); + } + if (randomBoolean()) { + request.setForce(randomBoolean()); + } + if (randomBoolean()) { + request.setAllowNoJobs(randomBoolean()); + } + return request; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(null, parser); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/CloseJobActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/CloseJobActionResponseTests.java new file mode 100644 index 0000000000000..c558a5cba0c45 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/CloseJobActionResponseTests.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.CloseJobAction.Response; + +public class CloseJobActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(randomBoolean()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventActionRequestTests.java new file mode 100644 index 0000000000000..e6f35c76b788a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventActionRequestTests.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.DeleteCalendarEventAction.Request; + +public class DeleteCalendarEventActionRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedRequestTests.java new file mode 100644 index 0000000000000..47cf3dc5eec5c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedRequestTests.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction.Request; + +public class DeleteDatafeedRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataActionResponseTests.java new file mode 100644 index 0000000000000..34a03fb2e407f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteExpiredDataActionResponseTests.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction.Response; + +public class DeleteExpiredDataActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(randomBoolean()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteJobRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteJobRequestTests.java new file mode 100644 index 0000000000000..2482e9b562526 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/DeleteJobRequestTests.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; + +public class DeleteJobRequestTests extends AbstractStreamableTestCase { + + @Override + protected DeleteJobAction.Request createTestInstance() { + DeleteJobAction.Request request = new DeleteJobAction.Request(randomAlphaOfLengthBetween(1, 20)); + request.setForce(randomBoolean()); + return request; + } + + @Override + protected DeleteJobAction.Request createBlankInstance() { + return new DeleteJobAction.Request(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ForecastJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ForecastJobActionRequestTests.java new file mode 100644 index 0000000000000..cdcac09e0736b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ForecastJobActionRequestTests.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.ForecastJobAction.Request; + +import static org.hamcrest.Matchers.equalTo; + +public class ForecastJobActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(null, parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createTestInstance() { + Request request = new Request(randomAlphaOfLengthBetween(1, 20)); + if (randomBoolean()) { + request.setDuration(TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000)).getStringRep()); + } + if (randomBoolean()) { + request.setExpiresIn(TimeValue.timeValueSeconds(randomIntBetween(0, 1_000_000)).getStringRep()); + } + return request; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + public void testSetDuration_GivenZero() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new Request().setDuration("0")); + assertThat(e.getMessage(), equalTo("[duration] must be positive: [0ms]")); + } + + public void testSetDuration_GivenNegative() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new Request().setDuration("-1s")); + assertThat(e.getMessage(), equalTo("[duration] must be positive: [-1]")); + } + + public void testSetExpiresIn_GivenZero() { + Request request = new Request(); + request.setExpiresIn("0"); + assertThat(request.getExpiresIn(), equalTo(TimeValue.ZERO)); + } + + public void testSetExpiresIn_GivenNegative() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new Request().setExpiresIn("-1s")); + assertThat(e.getMessage(), equalTo("[expires_in] must be non-negative: [-1]")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ForecastJobActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ForecastJobActionResponseTests.java new file mode 100644 index 0000000000000..b6c0759bceb3e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ForecastJobActionResponseTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.ForecastJobAction.Response; + +public class ForecastJobActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(randomBoolean(), randomAlphaOfLength(20)); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionRequestTests.java new file mode 100644 index 0000000000000..eb5ccb59f3d72 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionRequestTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.GetBucketsAction.Request; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; + +public class GetBucketActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected Request createTestInstance() { + GetBucketsAction.Request request = new GetBucketsAction.Request(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + request.setTimestamp(String.valueOf(randomLong())); + } else { + if (randomBoolean()) { + request.setStart(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setEnd(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + if (randomBoolean()) { + request.setAnomalyScore(randomDouble()); + } + if (randomBoolean()) { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + if (randomBoolean()) { + request.setSort("anomaly_score"); + } + request.setDescending(randomBoolean()); + } + if (randomBoolean()) { + request.setExpand(randomBoolean()); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + return request; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new GetBucketsAction.Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return GetBucketsAction.Request.parseRequest(null, parser); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionResponseTests.java new file mode 100644 index 0000000000000..4fbb7a9249641 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetBucketActionResponseTests.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.GetBucketsAction.Response; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.elasticsearch.xpack.core.ml.job.results.BucketInfluencer; +import org.elasticsearch.xpack.core.ml.job.results.PartitionScore; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; + +public class GetBucketActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + int listSize = randomInt(10); + List hits = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + String jobId = "foo"; + Bucket bucket = new Bucket(jobId, new Date(randomLong()), randomNonNegativeLong()); + if (randomBoolean()) { + bucket.setAnomalyScore(randomDouble()); + } + if (randomBoolean()) { + int size = randomInt(10); + List bucketInfluencers = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + BucketInfluencer bucketInfluencer = new BucketInfluencer("foo", bucket.getTimestamp(), bucket.getBucketSpan()); + bucketInfluencer.setAnomalyScore(randomDouble()); + bucketInfluencer.setInfluencerFieldName(randomAlphaOfLengthBetween(1, 20)); + bucketInfluencer.setInitialAnomalyScore(randomDouble()); + bucketInfluencer.setProbability(randomDouble()); + bucketInfluencer.setRawAnomalyScore(randomDouble()); + bucketInfluencers.add(bucketInfluencer); + } + bucket.setBucketInfluencers(bucketInfluencers); + } + if (randomBoolean()) { + bucket.setEventCount(randomNonNegativeLong()); + } + if (randomBoolean()) { + bucket.setInitialAnomalyScore(randomDouble()); + } + if (randomBoolean()) { + bucket.setInterim(randomBoolean()); + } + if (randomBoolean()) { + int size = randomInt(10); + List partitionScores = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + partitionScores.add(new PartitionScore(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20), + randomDouble(), randomDouble(), randomDouble())); + } + bucket.setPartitionScores(partitionScores); + } + if (randomBoolean()) { + bucket.setProcessingTimeMs(randomLong()); + } + if (randomBoolean()) { + int size = randomInt(10); + List records = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + AnomalyRecord anomalyRecord = new AnomalyRecord(jobId, new Date(randomLong()), randomNonNegativeLong()); + anomalyRecord.setActual(Collections.singletonList(randomDouble())); + anomalyRecord.setTypical(Collections.singletonList(randomDouble())); + anomalyRecord.setProbability(randomDouble()); + anomalyRecord.setInterim(randomBoolean()); + records.add(anomalyRecord); + } + bucket.setRecords(records); + } + hits.add(bucket); + } + QueryPage buckets = new QueryPage<>(hits, listSize, Bucket.RESULTS_FIELD); + return new Response(buckets); + } + + @Override + protected Response createBlankInstance() { + return new GetBucketsAction.Response(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsActionRequestTests.java new file mode 100644 index 0000000000000..eccfd933e5a52 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarEventsActionRequestTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.GetCalendarEventsAction; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; + +public class GetCalendarEventsActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected GetCalendarEventsAction.Request createTestInstance() { + String id = randomAlphaOfLengthBetween(1, 20); + GetCalendarEventsAction.Request request = new GetCalendarEventsAction.Request(id); + if (randomBoolean()) { + request.setStart(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setEnd(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setJobId(randomAlphaOfLength(8)); + } + if (randomBoolean()) { + request.setPageParams(new PageParams(randomIntBetween(0, 10), randomIntBetween(1, 10))); + } + return request; + } + + @Override + protected GetCalendarEventsAction.Request createBlankInstance() { + return new GetCalendarEventsAction.Request(); + } + + @Override + protected GetCalendarEventsAction.Request doParseInstance(XContentParser parser) { + return GetCalendarEventsAction.Request.parseRequest(null, parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testValidate() { + GetCalendarEventsAction.Request request = new GetCalendarEventsAction.Request("cal-name"); + request.setJobId("foo"); + + ActionRequestValidationException validationException = request.validate(); + assertNotNull(validationException); + assertEquals("Validation Failed: 1: If job_id is used calendar_id must be '_all';", validationException.getMessage()); + + request = new GetCalendarEventsAction.Request("_all"); + request.setJobId("foo"); + assertNull(request.validate()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsActionRequestTests.java new file mode 100644 index 0000000000000..33584567ef40d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCalendarsActionRequestTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; + +public class GetCalendarsActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected GetCalendarsAction.Request createTestInstance() { + GetCalendarsAction.Request request = new GetCalendarsAction.Request(); + if (randomBoolean()) { + request.setCalendarId(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setPageParams(PageParams.defaultParams()); + } + return request; + } + + @Override + protected GetCalendarsAction.Request createBlankInstance() { + return new GetCalendarsAction.Request(); + } + + @Override + protected GetCalendarsAction.Request doParseInstance(XContentParser parser) { + return GetCalendarsAction.Request.parseRequest(null, parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesRequestTests.java new file mode 100644 index 0000000000000..31cf1767b7c63 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesRequestTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; + +public class GetCategoriesRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected GetCategoriesAction.Request createTestInstance() { + String jobId = randomAlphaOfLength(10); + GetCategoriesAction.Request request = new GetCategoriesAction.Request(jobId); + if (randomBoolean()) { + request.setCategoryId(randomNonNegativeLong()); + } else { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + return request; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected GetCategoriesAction.Request createBlankInstance() { + return new GetCategoriesAction.Request(); + } + + @Override + protected GetCategoriesAction.Request doParseInstance(XContentParser parser) { + return GetCategoriesAction.Request.parseRequest(null, parser); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesResponseTests.java new file mode 100644 index 0000000000000..c77d000bb5f2d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetCategoriesResponseTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition; + +import java.util.Collections; + +public class GetCategoriesResponseTests extends AbstractStreamableTestCase { + + @Override + protected GetCategoriesAction.Response createTestInstance() { + CategoryDefinition definition = new CategoryDefinition(randomAlphaOfLength(10)); + QueryPage queryPage = + new QueryPage<>(Collections.singletonList(definition), 1L, CategoryDefinition.RESULTS_FIELD); + return new GetCategoriesAction.Response(queryPage); + } + + @Override + protected GetCategoriesAction.Response createBlankInstance() { + return new GetCategoriesAction.Response(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionRequestTests.java new file mode 100644 index 0000000000000..37a539c00218b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionRequestTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction.Request; + +public class GetDatafeedStatsActionRequestTests extends AbstractWireSerializingTestCase { + + @Override + protected Request createTestInstance() { + Request request = new Request(randomBoolean() ? MetaData.ALL : randomAlphaOfLengthBetween(1, 20)); + request.setAllowNoDatafeeds(randomBoolean()); + return request; + } + + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionResponseTests.java new file mode 100644 index 0000000000000..fce0529e558d1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedStatsActionResponseTests.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction.Response; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; + +public class GetDatafeedStatsActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + final Response result; + + int listSize = randomInt(10); + List datafeedStatsList = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + String datafeedId = randomAlphaOfLength(10); + DatafeedState datafeedState = randomFrom(DatafeedState.values()); + + DiscoveryNode node = null; + if (randomBoolean()) { + node = new DiscoveryNode("_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.CURRENT); + } + String explanation = null; + if (randomBoolean()) { + explanation = randomAlphaOfLength(3); + } + Response.DatafeedStats datafeedStats = new Response.DatafeedStats(datafeedId, datafeedState, node, explanation); + datafeedStatsList.add(datafeedStats); + } + + result = new Response(new QueryPage<>(datafeedStatsList, datafeedStatsList.size(), DatafeedConfig.RESULTS_FIELD)); + + return result; + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @SuppressWarnings("unchecked") + public void testDatafeedStatsToXContent() throws IOException { + Map attributes = new HashMap<>(); + attributes.put("ml.enabled", "true"); + attributes.put("ml.max_open_jobs", "5"); + attributes.put("non-ml-attribute", "should be filtered out"); + TransportAddress transportAddress = new TransportAddress(TransportAddress.META_ADDRESS, 9000); + + DiscoveryNode node = new DiscoveryNode("df-node-name", "df-node-id", transportAddress, attributes, + EnumSet.noneOf(DiscoveryNode.Role.class), + Version.CURRENT); + + Response.DatafeedStats stats = new Response.DatafeedStats("df-id", DatafeedState.STARTED, node, null); + + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference bytes; + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + stats.toXContent(builder, ToXContent.EMPTY_PARAMS); + bytes = BytesReference.bytes(builder); + } + + Map dfStatsMap = XContentHelper.convertToMap(bytes, randomBoolean(), xContentType).v2(); + + assertThat(dfStatsMap.size(), is(equalTo(3))); + assertThat(dfStatsMap, hasEntry("datafeed_id", "df-id")); + assertThat(dfStatsMap, hasEntry("state", "started")); + assertThat(dfStatsMap, hasKey("node")); + + Map nodeMap = (Map) dfStatsMap.get("node"); + assertThat(nodeMap, hasEntry("id", "df-node-id")); + assertThat(nodeMap, hasEntry("name", "df-node-name")); + assertThat(nodeMap, hasKey("ephemeral_id")); + assertThat(nodeMap, hasKey("transport_address")); + assertThat(nodeMap, hasKey("attributes")); + + Map nodeAttributes = (Map) nodeMap.get("attributes"); + assertThat(nodeAttributes.size(), is(equalTo(2))); + assertThat(nodeAttributes, hasEntry("ml.enabled", "true")); + assertThat(nodeAttributes, hasEntry("ml.max_open_jobs", "5")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionRequestTests.java new file mode 100644 index 0000000000000..5fbdfd2e8cb03 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionRequestTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction.Request; + +public class GetDatafeedsActionRequestTests extends AbstractWireSerializingTestCase { + + @Override + protected Request createTestInstance() { + Request request = new Request(randomBoolean() ? MetaData.ALL : randomAlphaOfLengthBetween(1, 20)); + request.setAllowNoDatafeeds(randomBoolean()); + return request; + } + + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionResponseTests.java new file mode 100644 index 0000000000000..f5eef5f1776f7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionResponseTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction.Response; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfigTests; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class GetDatafeedsActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + int listSize = randomInt(10); + List datafeedList = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + datafeedList.add(DatafeedConfigTests.createRandomizedDatafeedConfig(randomAlphaOfLength(10))); + } + return new Response(new QueryPage<>(datafeedList, datafeedList.size(), DatafeedConfig.RESULTS_FIELD)); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionRequestTests.java new file mode 100644 index 0000000000000..3d385d280788e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionRequestTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.GetFiltersAction.Request; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; + +public class GetFiltersActionRequestTests extends AbstractStreamableTestCase { + + + @Override + protected Request createTestInstance() { + Request request = new Request(); + if (randomBoolean()) { + request.setFilterId(randomAlphaOfLengthBetween(1, 20)); + } else { + if (randomBoolean()) { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + } + return request; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionResponseTests.java new file mode 100644 index 0000000000000..c8465c87587e9 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetFiltersActionResponseTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.GetFiltersAction.Response; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; + +import java.util.Collections; + +public class GetFiltersActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + final QueryPage result; + + MlFilter doc = new MlFilter( + randomAlphaOfLengthBetween(1, 20), Collections.singletonList(randomAlphaOfLengthBetween(1, 20))); + result = new QueryPage<>(Collections.singletonList(doc), 1, MlFilter.RESULTS_FIELD); + return new Response(result); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionRequestTests.java new file mode 100644 index 0000000000000..c150e87cf3c10 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionRequestTests.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.GetInfluencersAction.Request; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; + +public class GetInfluencersActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected Request doParseInstance(XContentParser parser) { + return GetInfluencersAction.Request.parseRequest(null, parser); + } + + @Override + protected Request createTestInstance() { + Request request = new Request(randomAlphaOfLengthBetween(1, 20)); + if (randomBoolean()) { + String start = randomBoolean() ? randomAlphaOfLengthBetween(1, 20) : String.valueOf(randomNonNegativeLong()); + request.setStart(start); + } + if (randomBoolean()) { + String end = randomBoolean() ? randomAlphaOfLengthBetween(1, 20) : String.valueOf(randomNonNegativeLong()); + request.setEnd(end); + } + if (randomBoolean()) { + request.setInfluencerScore(randomDouble()); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + if (randomBoolean()) { + request.setSort(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setDescending(randomBoolean()); + } + if (randomBoolean()) { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + return request; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionResponseTests.java new file mode 100644 index 0000000000000..daeda3ed82a95 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetInfluencersActionResponseTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.GetInfluencersAction.Response; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.results.Influencer; + +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +public class GetInfluencersActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + int listSize = randomInt(10); + List hits = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + Influencer influencer = new Influencer(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20), + randomAlphaOfLengthBetween(1, 20), new Date(randomNonNegativeLong()), randomNonNegativeLong()); + influencer.setInfluencerScore(randomDouble()); + influencer.setInitialInfluencerScore(randomDouble()); + influencer.setProbability(randomDouble()); + influencer.setInterim(randomBoolean()); + hits.add(influencer); + } + QueryPage buckets = new QueryPage<>(hits, listSize, Influencer.RESULTS_FIELD); + return new Response(buckets); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionRequestTests.java new file mode 100644 index 0000000000000..913618de38b58 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionRequestTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction.Request; + +public class GetJobStatsActionRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + Request request = new Request(randomBoolean() ? MetaData.ALL : randomAlphaOfLengthBetween(1, 20)); + request.setAllowNoJobs(randomBoolean()); + return request; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionResponseTests.java new file mode 100644 index 0000000000000..ff979a8570aba --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionResponseTests.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction.Response; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobState; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCountsTests; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; + +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; + +import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; + +public class GetJobStatsActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + final Response result; + + int listSize = randomInt(10); + List jobStatsList = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + String jobId = randomAlphaOfLength(10); + + DataCounts dataCounts = new DataCountsTests().createTestInstance(); + + ModelSizeStats sizeStats = null; + if (randomBoolean()) { + sizeStats = new ModelSizeStats.Builder("foo").build(); + } + JobState jobState = randomFrom(EnumSet.allOf(JobState.class)); + + DiscoveryNode node = null; + if (randomBoolean()) { + node = new DiscoveryNode("_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.CURRENT); + } + String explanation = null; + if (randomBoolean()) { + explanation = randomAlphaOfLength(3); + } + TimeValue openTime = null; + if (randomBoolean()) { + openTime = parseTimeValue(randomPositiveTimeValue(), "open_time-Test"); + } + Response.JobStats jobStats = new Response.JobStats(jobId, dataCounts, sizeStats, jobState, node, explanation, openTime); + jobStatsList.add(jobStats); + } + + result = new Response(new QueryPage<>(jobStatsList, jobStatsList.size(), Job.RESULTS_FIELD)); + + return result; + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobsActionRequestTests.java new file mode 100644 index 0000000000000..326df1b2989f2 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobsActionRequestTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.ml.action.GetJobsAction.Request; + +public class GetJobsActionRequestTests extends AbstractWireSerializingTestCase { + + @Override + protected Request createTestInstance() { + Request request = new Request(randomBoolean() ? MetaData.ALL : randomAlphaOfLengthBetween(1, 20)); + request.setAllowNoJobs(randomBoolean()); + return request; + } + + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobsActionResponseTests.java new file mode 100644 index 0000000000000..fee8a3a8e826b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobsActionResponseTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.GetJobsAction.Response; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobTests; + +import java.util.ArrayList; +import java.util.List; + +public class GetJobsActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + final Response result; + + int listSize = randomInt(10); + List jobList = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + jobList.add(JobTests.createRandomizedJob()); + } + + result = new Response(new QueryPage<>(jobList, jobList.size(), Job.RESULTS_FIELD)); + + return result; + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionRequestTests.java new file mode 100644 index 0000000000000..3c9275de357fc --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionRequestTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction.Request; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; + +public class GetModelSnapshotsActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected Request doParseInstance(XContentParser parser) { + return GetModelSnapshotsAction.Request.parseRequest(null, null, parser); + } + + @Override + protected Request createTestInstance() { + Request request = new Request(randomAlphaOfLengthBetween(1, 20), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 20)); + if (randomBoolean()) { + request.setStart(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setEnd(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setSort(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setDescOrder(randomBoolean()); + } + if (randomBoolean()) { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + return request; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionResponseTests.java new file mode 100644 index 0000000000000..0cf34c66f249e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetModelSnapshotsActionResponseTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction.Response; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotTests; + +import java.util.ArrayList; +import java.util.List; + +public class GetModelSnapshotsActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + int listSize = randomInt(10); + List hits = new ArrayList<>(listSize); + for (int j = 0; j < listSize; j++) { + hits.add(ModelSnapshotTests.createRandomized()); + } + QueryPage snapshots = new QueryPage<>(hits, listSize, ModelSnapshot.RESULTS_FIELD); + return new Response(snapshots); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionRequestTests.java new file mode 100644 index 0000000000000..9799010204cd6 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionRequestTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.GetOverallBucketsAction.Request; + +public class GetOverallBucketsActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected Request createTestInstance() { + Request request = new Request(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + request.setTopN(randomIntBetween(1, 1000)); + } + if (randomBoolean()) { + request.setBucketSpan(TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000))); + } + if (randomBoolean()) { + request.setStart(randomNonNegativeLong()); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + if (randomBoolean()) { + request.setOverallScore(randomDouble()); + } + if (randomBoolean()) { + request.setEnd(randomNonNegativeLong()); + } + request.setAllowNoJobs(randomBoolean()); + return request; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(null, parser); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionResponseTests.java new file mode 100644 index 0000000000000..8873b340c12ba --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsActionResponseTests.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.GetOverallBucketsAction.Response; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.results.OverallBucket; + +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +public class GetOverallBucketsActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + int listSize = randomInt(10); + List hits = new ArrayList<>(listSize); + String jobId = randomAlphaOfLengthBetween(1, 20); + for (int bucketIndex = 0; bucketIndex < listSize; bucketIndex++) { + int jobsCount = randomInt(5); + List jobs = new ArrayList<>(jobsCount); + for (int jobIndex = 0; jobIndex < jobsCount; jobIndex++) { + jobs.add(new OverallBucket.JobInfo(jobId, randomDouble())); + } + hits.add(new OverallBucket(new Date(randomNonNegativeLong()), randomNonNegativeLong(), randomDouble(), jobs, randomBoolean())); + } + QueryPage snapshots = new QueryPage<>(hits, listSize, OverallBucket.RESULTS_FIELD); + return new Response(snapshots); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionRequestTests.java new file mode 100644 index 0000000000000..d249b0a8e7024 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionRequestTests.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.GetRecordsAction.Request; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; + +public class GetRecordsActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected Request doParseInstance(XContentParser parser) { + return GetRecordsAction.Request.parseRequest(null, parser); + } + + @Override + protected Request createTestInstance() { + Request request = new Request(randomAlphaOfLengthBetween(1, 20)); + if (randomBoolean()) { + String start = randomBoolean() ? randomAlphaOfLengthBetween(1, 20) : String.valueOf(randomNonNegativeLong()); + request.setStart(start); + } + if (randomBoolean()) { + String end = randomBoolean() ? randomAlphaOfLengthBetween(1, 20) : String.valueOf(randomNonNegativeLong()); + request.setEnd(end); + } + if (randomBoolean()) { + request.setSort(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setDescending(randomBoolean()); + } + if (randomBoolean()) { + request.setRecordScore(randomDouble()); + } + if (randomBoolean()) { + request.setExcludeInterim(randomBoolean()); + } + if (randomBoolean()) { + int from = randomInt(10000); + int size = randomInt(10000); + request.setPageParams(new PageParams(from, size)); + } + return request; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionResponseTests.java new file mode 100644 index 0000000000000..78630e0cf0ffd --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetRecordsActionResponseTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.GetRecordsAction.Response; +import org.elasticsearch.xpack.core.ml.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; + +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +public class GetRecordsActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + int listSize = randomInt(10); + List hits = new ArrayList<>(listSize); + String jobId = randomAlphaOfLengthBetween(1, 20); + for (int j = 0; j < listSize; j++) { + AnomalyRecord record = new AnomalyRecord(jobId, new Date(), 600); + hits.add(record); + } + QueryPage snapshots = new QueryPage<>(hits, listSize, AnomalyRecord.RESULTS_FIELD); + return new Response(snapshots); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/MlInfoActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/MlInfoActionResponseTests.java new file mode 100644 index 0000000000000..7fb216d445d15 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/MlInfoActionResponseTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.MlInfoAction.Response; + +import java.util.HashMap; +import java.util.Map; + +public class MlInfoActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + int size = randomInt(10); + Map info = new HashMap<>(); + for (int j = 0; j < size; j++) { + info.put(randomAlphaOfLength(20), randomAlphaOfLength(20)); + } + return new Response(info); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/OpenJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/OpenJobActionRequestTests.java new file mode 100644 index 0000000000000..de85907a83e6b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/OpenJobActionRequestTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction.Request; + +public class OpenJobActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected Request createTestInstance() { + OpenJobAction.JobParams params = new OpenJobAction.JobParams(randomAlphaOfLengthBetween(1, 20)); + if (randomBoolean()) { + params.setTimeout(TimeValue.timeValueMillis(randomNonNegativeLong())); + } + return new Request(params); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(null, parser); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PersistJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PersistJobActionRequestTests.java new file mode 100644 index 0000000000000..cf210403dd6b7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PersistJobActionRequestTests.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class PersistJobActionRequestTests extends AbstractStreamableTestCase { + @Override + protected PersistJobAction.Request createBlankInstance() { + return new PersistJobAction.Request(); + } + + @Override + protected PersistJobAction.Request createTestInstance() { + return new PersistJobAction.Request(randomAlphaOfLength(10)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PersistJobActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PersistJobActionResponseTests.java new file mode 100644 index 0000000000000..746e40445afd8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PersistJobActionResponseTests.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class PersistJobActionResponseTests extends AbstractStreamableTestCase { + @Override + protected PersistJobAction.Response createBlankInstance() { + return new PersistJobAction.Response(); + } + + @Override + protected PersistJobAction.Response createTestInstance() { + return new PersistJobAction.Response(randomBoolean()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventActionRequestTests.java new file mode 100644 index 0000000000000..ce6a64be6c572 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventActionRequestTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.PostCalendarEventsAction; +import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; +import org.elasticsearch.xpack.core.ml.calendars.ScheduledEventTests; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class PostCalendarEventActionRequestTests extends AbstractStreamableTestCase { + + @Override + protected PostCalendarEventsAction.Request createTestInstance() { + String id = randomAlphaOfLengthBetween(1, 20); + return createTestInstance(id); + } + + private PostCalendarEventsAction.Request createTestInstance(String calendarId) { + int numEvents = randomIntBetween(1, 10); + List events = new ArrayList<>(); + for (int i=0; i PostCalendarEventsAction.Request.parseRequest("bar", parser)); + assertEquals("Inconsistent calendar_id; 'foo' specified in the body differs from 'bar' specified as a URL argument", + e.getMessage()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataActionRequestTests.java new file mode 100644 index 0000000000000..ba4a3ff06477d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataActionRequestTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription.DataFormat; + +public class PostDataActionRequestTests extends AbstractStreamableTestCase { + @Override + protected PostDataAction.Request createTestInstance() { + PostDataAction.Request request = new PostDataAction.Request(randomAlphaOfLengthBetween(1, 20)); + if (randomBoolean()) { + request.setResetStart(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setResetEnd(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setDataDescription(new DataDescription(randomFrom(DataFormat.values()), + randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20), + randomAlphaOfLength(1).charAt(0), randomAlphaOfLength(1).charAt(0))); + } + if (randomBoolean()) { + request.setContent(new BytesArray(new byte[0]), randomFrom(XContentType.values())); + } + return request; + } + + @Override + protected PostDataAction.Request createBlankInstance() { + return new PostDataAction.Request(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataActionResponseTests.java new file mode 100644 index 0000000000000..2f1a9d2e27d7b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataActionResponseTests.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCountsTests; + +public class PostDataActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected PostDataAction.Response createTestInstance() { + DataCounts counts = new DataCountsTests().createTestInstance(); + return new PostDataAction.Response(counts); + } + + @Override + protected PostDataAction.Response createBlankInstance() { + return new PostDataAction.Response("foo") ; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataFlushRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataFlushRequestTests.java new file mode 100644 index 0000000000000..a4fd8c3c47069 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataFlushRequestTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.FlushJobAction.Request; + +public class PostDataFlushRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + Request request = new Request(randomAlphaOfLengthBetween(1, 20)); + request.setCalcInterim(randomBoolean()); + if (randomBoolean()) { + request.setStart(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setEnd(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setAdvanceTime(randomAlphaOfLengthBetween(1, 20)); + } + return request; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + public void testNullJobIdThrows() { + expectThrows(IllegalArgumentException.class, () -> new Request(null)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataFlushResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataFlushResponseTests.java new file mode 100644 index 0000000000000..14f067e38ad87 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PostDataFlushResponseTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.FlushJobAction.Response; +import org.joda.time.DateTime; + +public class PostDataFlushResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(randomBoolean(), new DateTime(randomDateTimeZone()).toDate()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedActionRequestTests.java new file mode 100644 index 0000000000000..b7828e4b97682 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedActionRequestTests.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction.Request; + +public class PreviewDatafeedActionRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLength(10)); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionRequestTests.java new file mode 100644 index 0000000000000..7d111a31c9d51 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionRequestTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.calendars.CalendarTests; +import org.elasticsearch.xpack.core.ml.job.config.JobTests; + +public class PutCalendarActionRequestTests extends AbstractStreamableXContentTestCase { + + private final String calendarId = JobTests.randomValidJobId(); + + @Override + protected PutCalendarAction.Request createTestInstance() { + return new PutCalendarAction.Request(CalendarTests.testInstance(calendarId)); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected PutCalendarAction.Request createBlankInstance() { + return new PutCalendarAction.Request(); + } + + @Override + protected PutCalendarAction.Request doParseInstance(XContentParser parser) { + return PutCalendarAction.Request.parseRequest(calendarId, parser); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java new file mode 100644 index 0000000000000..941de884554bf --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutCalendarActionResponseTests.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.calendars.CalendarTests; + +public class PutCalendarActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected PutCalendarAction.Response createTestInstance() { + return new PutCalendarAction.Response(CalendarTests.testInstance()); + } + + @Override + protected PutCalendarAction.Response createBlankInstance() { + return new PutCalendarAction.Response(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionRequestTests.java new file mode 100644 index 0000000000000..f01a360f909a0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionRequestTests.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction.Request; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfigTests; +import org.junit.Before; + +import java.util.Collections; + +public class PutDatafeedActionRequestTests extends AbstractStreamableXContentTestCase { + + private String datafeedId; + + @Before + public void setUpDatafeedId() { + datafeedId = DatafeedConfigTests.randomValidDatafeedId(); + } + + @Override + protected Request createTestInstance() { + DatafeedConfig.Builder datafeedConfig = new DatafeedConfig.Builder(datafeedId, randomAlphaOfLength(10)); + datafeedConfig.setIndices(Collections.singletonList(randomAlphaOfLength(10))); + datafeedConfig.setTypes(Collections.singletonList(randomAlphaOfLength(10))); + return new Request(datafeedConfig.build()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(datafeedId, parser); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionResponseTests.java new file mode 100644 index 0000000000000..13e00e27f68aa --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedActionResponseTests.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction.Response; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfigTests; + +import java.util.Arrays; +import java.util.Collections; + +public class PutDatafeedActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + DatafeedConfig.Builder datafeedConfig = new DatafeedConfig.Builder( + DatafeedConfigTests.randomValidDatafeedId(), randomAlphaOfLength(10)); + datafeedConfig.setIndices(Arrays.asList(randomAlphaOfLength(10))); + datafeedConfig.setTypes(Arrays.asList(randomAlphaOfLength(10))); + return new Response(datafeedConfig.build()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java new file mode 100644 index 0000000000000..21845922470f0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.PutFilterAction.Request; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; + +import java.util.ArrayList; +import java.util.List; + +public class PutFilterActionRequestTests extends AbstractStreamableXContentTestCase { + + private final String filterId = randomAlphaOfLengthBetween(1, 20); + + @Override + protected Request createTestInstance() { + int size = randomInt(10); + List items = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + items.add(randomAlphaOfLengthBetween(1, 20)); + } + MlFilter filter = new MlFilter(filterId, items); + return new PutFilterAction.Request(filter); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new PutFilterAction.Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return PutFilterAction.Request.parseRequest(filterId, parser); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionRequestTests.java new file mode 100644 index 0000000000000..039954f141421 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionRequestTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.PutJobAction.Request; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import java.io.IOException; +import java.util.Date; + +import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; +import static org.elasticsearch.xpack.core.ml.job.config.JobTests.randomValidJobId; + +public class PutJobActionRequestTests extends AbstractStreamableXContentTestCase { + + private final String jobId = randomValidJobId(); + + @Override + protected Request createTestInstance() { + Job.Builder jobConfiguration = buildJobBuilder(jobId, null); + return new Request(jobConfiguration); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(jobId, parser); + } + + public void testParseRequest_InvalidCreateSetting() throws IOException { + Job.Builder jobConfiguration = buildJobBuilder(jobId, null); + jobConfiguration.setLastDataTime(new Date()); + BytesReference bytes = XContentHelper.toXContent(jobConfiguration, XContentType.JSON, false); + XContentParser parser = createParser(XContentType.JSON.xContent(), bytes); + expectThrows(IllegalArgumentException.class, () -> Request.parseRequest(jobId, parser)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionResponseTests.java new file mode 100644 index 0000000000000..090ae23f2cbb0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutJobActionResponseTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.PutJobAction.Response; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; +import static org.elasticsearch.xpack.core.ml.job.config.JobTests.randomValidJobId; + +public class PutJobActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + Job.Builder builder = buildJobBuilder(randomValidJobId()); + return new Response(builder.build()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionRequestTests.java new file mode 100644 index 0000000000000..cd1b48cb31aed --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionRequestTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction.Request; + +public class RevertModelSnapshotActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected Request createTestInstance() { + RevertModelSnapshotAction.Request request = + new RevertModelSnapshotAction.Request(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + if (randomBoolean()) { + request.setDeleteInterveningResults(randomBoolean()); + } + return request; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new RevertModelSnapshotAction.Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return RevertModelSnapshotAction.Request.parseRequest(null, null, parser); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionResponseTests.java new file mode 100644 index 0000000000000..f24a8ccb9e180 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotActionResponseTests.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction.Response; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotTests; + +public class RevertModelSnapshotActionResponseTests extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new RevertModelSnapshotAction.Response(ModelSnapshotTests.createRandomized()); + } + + @Override + protected Response createBlankInstance() { + return new RevertModelSnapshotAction.Response(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedActionRequestTests.java new file mode 100644 index 0000000000000..bae610c5e3694 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedActionRequestTests.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction.DatafeedParams; +import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction.Request; + +import static org.hamcrest.Matchers.equalTo; + +public class StartDatafeedActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected Request createTestInstance() { + DatafeedParams params = new DatafeedParams(randomAlphaOfLength(10), randomNonNegativeLong()); + if (randomBoolean()) { + params.setEndTime(randomNonNegativeLong()); + } + if (randomBoolean()) { + params.setTimeout(TimeValue.timeValueMillis(randomNonNegativeLong())); + } + return new Request(params); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(null, parser); + } + + public void testParseDateOrThrow() { + assertEquals(0L, StartDatafeedAction.DatafeedParams.parseDateOrThrow("0", + StartDatafeedAction.START_TIME, () -> System.currentTimeMillis())); + assertEquals(0L, StartDatafeedAction.DatafeedParams.parseDateOrThrow("1970-01-01T00:00:00Z", + StartDatafeedAction.START_TIME, () -> System.currentTimeMillis())); + assertThat(StartDatafeedAction.DatafeedParams.parseDateOrThrow("now", + StartDatafeedAction.START_TIME, () -> 123456789L), equalTo(123456789L)); + + Exception e = expectThrows(ElasticsearchParseException.class, + () -> StartDatafeedAction.DatafeedParams.parseDateOrThrow("not-a-date", + StartDatafeedAction.START_TIME, () -> System.currentTimeMillis())); + assertEquals("Query param [start] with value [not-a-date] cannot be parsed as a date or converted to a number (epoch).", + e.getMessage()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedActionRequestTests.java new file mode 100644 index 0000000000000..1983b314b44c0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StopDatafeedActionRequestTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction.Request; + +public class StopDatafeedActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected Request createTestInstance() { + Request request = new Request(randomAlphaOfLengthBetween(1, 20)); + if (randomBoolean()) { + request.setStopTimeout(TimeValue.timeValueMillis(randomNonNegativeLong())); + } + if (randomBoolean()) { + request.setForce(randomBoolean()); + } + if (randomBoolean()) { + request.setAllowNoDatafeeds(randomBoolean()); + } + return request; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(null, parser); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobActionResquestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobActionResquestTests.java new file mode 100644 index 0000000000000..b11c571bf78b8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateCalendarJobActionResquestTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class UpdateCalendarJobActionResquestTests extends AbstractStreamableTestCase { + + @Override + protected UpdateCalendarJobAction.Request createTestInstance() { + return new UpdateCalendarJobAction.Request(randomAlphaOfLength(10), + randomBoolean() ? null : randomAlphaOfLength(10), + randomBoolean() ? null : randomAlphaOfLength(10)); + } + + @Override + protected UpdateCalendarJobAction.Request createBlankInstance() { + return new UpdateCalendarJobAction.Request(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedActionRequestTests.java new file mode 100644 index 0000000000000..af883ee916462 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedActionRequestTests.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction.Request; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfigTests; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdateTests; +import org.junit.Before; + +import java.util.Collections; + +public class UpdateDatafeedActionRequestTests extends AbstractStreamableXContentTestCase { + + private String datafeedId; + + @Before + public void setUpDatafeedId() { + datafeedId = DatafeedConfigTests.randomValidDatafeedId(); + } + + @Override + protected Request createTestInstance() { + return new Request(DatafeedUpdateTests.createRandomized(datafeedId)); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(datafeedId, parser); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateJobActionRequestTests.java new file mode 100644 index 0000000000000..3b09017147886 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateJobActionRequestTests.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; + +public class UpdateJobActionRequestTests + extends AbstractStreamableTestCase { + + @Override + protected UpdateJobAction.Request createTestInstance() { + String jobId = randomAlphaOfLength(10); + // no need to randomize JobUpdate this is already tested in: JobUpdateTests + JobUpdate.Builder jobUpdate = new JobUpdate.Builder(jobId); + jobUpdate.setAnalysisLimits(new AnalysisLimits(100L, 100L)); + UpdateJobAction.Request request = new UpdateJobAction.Request(jobId, jobUpdate.build()); + request.setWaitForAck(randomBoolean()); + return request; + } + + @Override + protected UpdateJobAction.Request createBlankInstance() { + return new UpdateJobAction.Request(); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionRequestTests.java new file mode 100644 index 0000000000000..8ccb8bb2e1916 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionRequestTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction.Request; + +public class UpdateModelSnapshotActionRequestTests + extends AbstractStreamableXContentTestCase { + + @Override + protected Request doParseInstance(XContentParser parser) { + return UpdateModelSnapshotAction.Request.parseRequest(null, null, parser); + } + + @Override + protected Request createTestInstance() { + Request request = new Request(randomAlphaOfLengthBetween(1, 20), + randomAlphaOfLengthBetween(1, 20)); + if (randomBoolean()) { + request.setDescription(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + request.setRetain(randomBoolean()); + } + return request; + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionResponseTests.java new file mode 100644 index 0000000000000..a8694476ee259 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateModelSnapshotActionResponseTests.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction.Response; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshotTests; + +public class UpdateModelSnapshotActionResponseTests + extends AbstractStreamableTestCase { + + @Override + protected Response createTestInstance() { + return new Response(ModelSnapshotTests.createRandomized()); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessActionRequestTests.java new file mode 100644 index 0000000000000..f7ee459bb1944 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessActionRequestTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.core.ml.job.config.MlFilterTests; +import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; + +import java.util.ArrayList; +import java.util.List; + +public class UpdateProcessActionRequestTests extends AbstractStreamableTestCase { + + + @Override + protected UpdateProcessAction.Request createTestInstance() { + ModelPlotConfig config = null; + if (randomBoolean()) { + config = new ModelPlotConfig(randomBoolean(), randomAlphaOfLength(10)); + } + List updates = null; + if (randomBoolean()) { + updates = new ArrayList<>(); + int detectorUpdateCount = randomIntBetween(0, 5); + for (int i = 0; i < detectorUpdateCount; i++) { + updates.add(new JobUpdate.DetectorUpdate(randomInt(), randomAlphaOfLength(10), null)); + } + } + MlFilter filter = null; + if (randomBoolean()) { + filter = MlFilterTests.createTestFilter(); + } + return new UpdateProcessAction.Request(randomAlphaOfLength(10), config, updates, filter, randomBoolean()); + } + + @Override + protected UpdateProcessAction.Request createBlankInstance() { + return new UpdateProcessAction.Request(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorActionRequestTests.java new file mode 100644 index 0000000000000..d49908b1f1bae --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateDetectorActionRequestTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction.Request; +import org.elasticsearch.xpack.core.ml.job.config.Detector; + +public class ValidateDetectorActionRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected Request createTestInstance() { + Detector.Builder detector; + if (randomBoolean()) { + detector = new Detector.Builder(randomFrom(Detector.COUNT_WITHOUT_FIELD_FUNCTIONS), null); + } else { + detector = new Detector.Builder(randomFrom(Detector.FIELD_NAME_FUNCTIONS), randomAlphaOfLengthBetween(1, 20)); + } + return new Request(detector.build()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.parseRequest(parser); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigActionRequestTests.java new file mode 100644 index 0000000000000..8fed16271e63f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/ValidateJobConfigActionRequestTests.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction.Request; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import java.io.IOException; +import java.util.Date; + +import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder; +import static org.elasticsearch.xpack.core.ml.job.config.JobTests.randomValidJobId; + +public class ValidateJobConfigActionRequestTests extends AbstractStreamableTestCase { + + @Override + protected Request createTestInstance() { + return new Request(buildJobBuilder(randomValidJobId(), new Date()).build()); + } + + @Override + protected Request createBlankInstance() { + return new Request(); + } + + public void testParseRequest_InvalidCreateSetting() throws IOException { + String jobId = randomValidJobId(); + Job.Builder jobConfiguration = buildJobBuilder(jobId, null); + jobConfiguration.setLastDataTime(new Date()); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + XContentBuilder xContentBuilder = jobConfiguration.toXContent(builder, ToXContent.EMPTY_PARAMS); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(xContentBuilder).streamInput()); + + expectThrows(IllegalArgumentException.class, () -> Request.parseRequest(parser)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/util/PageParamsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/util/PageParamsTests.java new file mode 100644 index 0000000000000..d2f00e717b4fe --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/util/PageParamsTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action.util; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +public class PageParamsTests extends AbstractSerializingTestCase { + + @Override + protected PageParams doParseInstance(XContentParser parser) { + return PageParams.PARSER.apply(parser, null); + } + + @Override + protected PageParams createTestInstance() { + int from = randomInt(10000); + int size = randomInt(10000); + return new PageParams(from, size); + } + + @Override + protected Reader instanceReader() { + return PageParams::new; + } + + public void testValidate_GivenFromIsMinusOne() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new PageParams(-1, 100)); + assertEquals("Parameter [from] cannot be < 0", e.getMessage()); + } + + public void testValidate_GivenFromIsMinusTen() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new PageParams(-10, 100)); + assertEquals("Parameter [from] cannot be < 0", e.getMessage()); + } + + public void testValidate_GivenSizeIsMinusOne() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new PageParams(0, -1)); + assertEquals("Parameter [size] cannot be < 0", e.getMessage()); + } + + public void testValidate_GivenSizeIsMinusHundred() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new PageParams(0, -100)); + assertEquals("Parameter [size] cannot be < 0", e.getMessage()); + } + + @Override + protected PageParams mutateInstance(PageParams instance) { + int from = instance.getFrom(); + int size = instance.getSize(); + int amountToAdd = between(1, 20); + switch (between(0, 1)) { + case 0: + from += amountToAdd; + break; + case 1: + size += amountToAdd; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new PageParams(from, size); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/util/QueryPageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/util/QueryPageTests.java new file mode 100644 index 0000000000000..8bded1972f2eb --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/util/QueryPageTests.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action.util; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.ml.job.results.Influencer; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +public class QueryPageTests extends AbstractWireSerializingTestCase> { + + @Override + protected QueryPage createTestInstance() { + int hitCount = randomIntBetween(0, 10); + ArrayList hits = new ArrayList<>(); + for (int i = 0; i < hitCount; i++) { + hits.add(new Influencer(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20), + randomAlphaOfLengthBetween(1, 20), new Date(), randomNonNegativeLong())); + } + return new QueryPage<>(hits, hitCount, new ParseField("test")); + } + + @Override + protected Reader> instanceReader() { + return (in) -> new QueryPage<>(in, Influencer::new); + } + + @Override + protected QueryPage mutateInstance(QueryPage instance) throws IOException { + ParseField resultsField = instance.getResultsField(); + List page = instance.results(); + long count = instance.count(); + switch (between(0, 1)) { + case 0: + page = new ArrayList<>(page); + page.add(new Influencer(randomAlphaOfLengthBetween(10, 20), randomAlphaOfLengthBetween(10, 20), + randomAlphaOfLengthBetween(10, 20), new Date(randomNonNegativeLong()), randomNonNegativeLong())); + break; + case 1: + count += between(1, 20); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new QueryPage<>(page, count, resultsField); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/CalendarTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/CalendarTests.java new file mode 100644 index 0000000000000..b51115a47c388 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/CalendarTests.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.calendars; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.job.config.JobTests; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class CalendarTests extends AbstractSerializingTestCase { + + public static Calendar testInstance() { + return testInstance(JobTests.randomValidJobId()); + } + + public static Calendar testInstance(String calendarId) { + int size = randomInt(10); + List items = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + items.add(randomAlphaOfLengthBetween(1, 20)); + } + String description = null; + if (randomBoolean()) { + description = randomAlphaOfLength(20); + } + return new Calendar(calendarId, items, description); + } + + @Override + protected Calendar createTestInstance() { + return testInstance(); + } + + @Override + protected Writeable.Reader instanceReader() { + return Calendar::new; + } + + @Override + protected Calendar doParseInstance(XContentParser parser) throws IOException { + return Calendar.STRICT_PARSER.apply(parser, null).build(); + } + + public void testNullId() { + NullPointerException ex = expectThrows(NullPointerException.class, () -> new Calendar(null, Collections.emptyList(), null)); + assertEquals(Calendar.ID.getPreferredName() + " must not be null", ex.getMessage()); + } + + public void testDocumentId() { + assertThat(Calendar.documentId("foo"), equalTo("calendar_foo")); + } + + public void testStrictParser() throws IOException { + String json = "{\"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> Calendar.STRICT_PARSER.apply(parser, null)); + + assertThat(e.getMessage(), containsString("unknown field [foo]")); + } + } + + public void testLenientParser() throws IOException { + String json = "{\"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + Calendar.LENIENT_PARSER.apply(parser, null); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java new file mode 100644 index 0000000000000..f98eb9d5dcecc --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.calendars; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.job.config.Connective; +import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; +import org.elasticsearch.xpack.core.ml.job.config.Operator; +import org.elasticsearch.xpack.core.ml.job.config.RuleAction; +import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; +import org.elasticsearch.xpack.core.ml.job.config.RuleConditionType; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.EnumSet; +import java.util.List; + +import static org.hamcrest.Matchers.containsString; + +public class ScheduledEventTests extends AbstractSerializingTestCase { + + public static ScheduledEvent createScheduledEvent(String calendarId) { + ZonedDateTime start = ZonedDateTime.ofInstant(Instant.ofEpochMilli(new DateTime(randomDateTimeZone()).getMillis()), ZoneOffset.UTC); + return new ScheduledEvent(randomAlphaOfLength(10), start, start.plusSeconds(randomIntBetween(1, 10000)), + calendarId, null); + } + + @Override + protected ScheduledEvent createTestInstance() { + return createScheduledEvent(randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Writeable.Reader instanceReader() { + return ScheduledEvent::new; + } + + @Override + protected ScheduledEvent doParseInstance(XContentParser parser) throws IOException { + return ScheduledEvent.STRICT_PARSER.apply(parser, null).build(); + } + + public void testToDetectionRule() { + long bucketSpanSecs = 300; + ScheduledEvent event = createTestInstance(); + DetectionRule rule = event.toDetectionRule(TimeValue.timeValueSeconds(bucketSpanSecs)); + + assertEquals(Connective.AND, rule.getConditionsConnective()); + assertEquals(rule.getActions(), EnumSet.of(RuleAction.FILTER_RESULTS, RuleAction.SKIP_SAMPLING)); + assertNull(rule.getTargetFieldName()); + assertNull(rule.getTargetFieldValue()); + + List conditions = rule.getConditions(); + assertEquals(2, conditions.size()); + assertEquals(RuleConditionType.TIME, conditions.get(0).getType()); + assertEquals(RuleConditionType.TIME, conditions.get(1).getType()); + assertEquals(Operator.GTE, conditions.get(0).getCondition().getOperator()); + assertEquals(Operator.LT, conditions.get(1).getCondition().getOperator()); + + // Check times are aligned with the bucket + long conditionStartTime = Long.parseLong(conditions.get(0).getCondition().getValue()); + assertEquals(0, conditionStartTime % bucketSpanSecs); + long bucketCount = conditionStartTime / bucketSpanSecs; + assertEquals(bucketSpanSecs * bucketCount, conditionStartTime); + + long conditionEndTime = Long.parseLong(conditions.get(1).getCondition().getValue()); + assertEquals(0, conditionEndTime % bucketSpanSecs); + + long eventTime = event.getEndTime().toEpochSecond() - conditionStartTime; + long numbBucketsInEvent = (eventTime + bucketSpanSecs -1) / bucketSpanSecs; + assertEquals(bucketSpanSecs * (bucketCount + numbBucketsInEvent), conditionEndTime); + } + + public void testBuild() { + ScheduledEvent.Builder builder = new ScheduledEvent.Builder(); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, builder::build); + assertEquals("Field [description] cannot be null", e.getMessage()); + builder.description("foo"); + e = expectThrows(ElasticsearchStatusException.class, builder::build); + assertEquals("Field [start_time] cannot be null", e.getMessage()); + ZonedDateTime now = ZonedDateTime.now(); + builder.startTime(now); + e = expectThrows(ElasticsearchStatusException.class, builder::build); + assertEquals("Field [end_time] cannot be null", e.getMessage()); + builder.endTime(now.plusHours(1)); + e = expectThrows(ElasticsearchStatusException.class, builder::build); + assertEquals("Field [calendar_id] cannot be null", e.getMessage()); + builder.calendarId("foo"); + builder.build(); + + + builder = new ScheduledEvent.Builder().description("f").calendarId("c"); + builder.startTime(now); + builder.endTime(now.minusHours(2)); + + e = expectThrows(ElasticsearchStatusException.class, builder::build); + assertThat(e.getMessage(), containsString("must come before end time")); + } + + public void testStrictParser() throws IOException { + String json = "{\"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> ScheduledEvent.STRICT_PARSER.apply(parser, null)); + + assertThat(e.getMessage(), containsString("unknown field [foo]")); + } + } + + public void testLenientParser() throws IOException { + String json = "{\"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + ScheduledEvent.LENIENT_PARSER.apply(parser, null); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/ChunkingConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/ChunkingConfigTests.java new file mode 100644 index 0000000000000..ef89200b765ec --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/ChunkingConfigTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.is; + +public class ChunkingConfigTests extends AbstractSerializingTestCase { + + @Override + protected ChunkingConfig createTestInstance() { + return createRandomizedChunk(); + } + + @Override + protected Writeable.Reader instanceReader() { + return ChunkingConfig::new; + } + + @Override + protected ChunkingConfig doParseInstance(XContentParser parser) { + return ChunkingConfig.CONFIG_PARSER.apply(parser, null); + } + + public void testConstructorGivenAutoAndTimeSpan() { + expectThrows(IllegalArgumentException.class, () -> new ChunkingConfig(ChunkingConfig.Mode.AUTO, TimeValue.timeValueMillis(1000))); + } + + public void testConstructorGivenOffAndTimeSpan() { + expectThrows(IllegalArgumentException.class, () -> new ChunkingConfig(ChunkingConfig.Mode.OFF, TimeValue.timeValueMillis(1000))); + } + + public void testConstructorGivenManualAndNoTimeSpan() { + expectThrows(IllegalArgumentException.class, () -> new ChunkingConfig(ChunkingConfig.Mode.MANUAL, null)); + } + + public void testIsEnabled() { + assertThat(ChunkingConfig.newAuto().isEnabled(), is(true)); + assertThat(ChunkingConfig.newManual(TimeValue.timeValueMillis(1000)).isEnabled(), is(true)); + assertThat(ChunkingConfig.newOff().isEnabled(), is(false)); + } + + public static ChunkingConfig createRandomizedChunk() { + ChunkingConfig.Mode mode = randomFrom(ChunkingConfig.Mode.values()); + TimeValue timeSpan = null; + if (mode == ChunkingConfig.Mode.MANUAL) { + // time span is required to be at least 1 millis, so we use a custom method to generate a time value here + timeSpan = randomPositiveSecondsMinutesHours(); + } + return new ChunkingConfig(mode, timeSpan); + } + + private static TimeValue randomPositiveSecondsMinutesHours() { + return new TimeValue(randomIntBetween(1, 1000), randomFrom(Arrays.asList(TimeUnit.SECONDS, TimeUnit.MINUTES, TimeUnit.HOURS))); + } + + @Override + protected ChunkingConfig mutateInstance(ChunkingConfig instance) throws IOException { + ChunkingConfig.Mode mode = instance.getMode(); + TimeValue timeSpan = instance.getTimeSpan(); + switch (between(0, 1)) { + case 0: + List modes = new ArrayList<>(Arrays.asList(ChunkingConfig.Mode.values())); + modes.remove(mode); + mode = randomFrom(modes); + if (mode == ChunkingConfig.Mode.MANUAL) { + timeSpan = randomPositiveSecondsMinutesHours(); + } else { + timeSpan = null; + } + break; + case 1: + if (timeSpan == null) { + timeSpan = randomPositiveSecondsMinutesHours(); + } else { + timeSpan = new TimeValue(timeSpan.getMillis() + between(10, 10000)); + } + // only manual mode allows a timespan + mode = ChunkingConfig.Mode.MANUAL; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new ChunkingConfig(mode, timeSpan); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java new file mode 100644 index 0000000000000..a3f74d25531e4 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -0,0 +1,599 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.script.Script; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig.Mode; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.TimeZone; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.not; + +public class DatafeedConfigTests extends AbstractSerializingTestCase { + + @Override + protected DatafeedConfig createTestInstance() { + return createRandomizedDatafeedConfig(randomAlphaOfLength(10)); + } + + public static DatafeedConfig createRandomizedDatafeedConfig(String jobId) { + return createRandomizedDatafeedConfig(jobId, 3600000); + } + + public static DatafeedConfig createRandomizedDatafeedConfig(String jobId, long bucketSpanMillis) { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder(randomValidDatafeedId(), jobId); + builder.setIndices(randomStringList(1, 10)); + builder.setTypes(randomStringList(0, 10)); + if (randomBoolean()) { + builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + } + boolean addScriptFields = randomBoolean(); + if (addScriptFields) { + int scriptsSize = randomInt(3); + List scriptFields = new ArrayList<>(scriptsSize); + for (int scriptIndex = 0; scriptIndex < scriptsSize; scriptIndex++) { + scriptFields.add(new SearchSourceBuilder.ScriptField(randomAlphaOfLength(10), mockScript(randomAlphaOfLength(10)), + randomBoolean())); + } + builder.setScriptFields(scriptFields); + } + Long aggHistogramInterval = null; + if (randomBoolean() && addScriptFields == false) { + // can only test with a single agg as the xcontent order gets randomized by test base class and then + // the actual xcontent isn't the same and test fail. + // Testing with a single agg is ok as we don't have special list writeable / xconent logic + AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); + aggHistogramInterval = randomNonNegativeLong(); + aggHistogramInterval = aggHistogramInterval> bucketSpanMillis ? bucketSpanMillis : aggHistogramInterval; + aggHistogramInterval = aggHistogramInterval <= 0 ? 1 : aggHistogramInterval; + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + aggs.addAggregator(AggregationBuilders.dateHistogram("buckets") + .interval(aggHistogramInterval).subAggregation(maxTime).field("time")); + builder.setAggregations(aggs); + } + if (randomBoolean()) { + builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + if (aggHistogramInterval == null) { + builder.setFrequency(TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000))); + } else { + builder.setFrequency(TimeValue.timeValueMillis(randomIntBetween(1, 5) * aggHistogramInterval)); + } + } + if (randomBoolean()) { + builder.setQueryDelay(TimeValue.timeValueMillis(randomIntBetween(1, 1_000_000))); + } + if (randomBoolean()) { + builder.setChunkingConfig(ChunkingConfigTests.createRandomizedChunk()); + } + return builder.build(); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + + public static List randomStringList(int min, int max) { + int size = scaledRandomIntBetween(min, max); + List list = new ArrayList<>(); + for (int i = 0; i < size; i++) { + list.add(randomAlphaOfLength(10)); + } + return list; + } + + @Override + protected Writeable.Reader instanceReader() { + return DatafeedConfig::new; + } + + @Override + protected DatafeedConfig doParseInstance(XContentParser parser) { + return DatafeedConfig.CONFIG_PARSER.apply(parser, null).build(); + } + + private static final String FUTURE_DATAFEED = "{\n" + + " \"datafeed_id\": \"farequote-datafeed\",\n" + + " \"job_id\": \"farequote\",\n" + + " \"frequency\": \"1h\",\n" + + " \"indices\": [\"farequote1\", \"farequote2\"],\n" + + " \"tomorrows_technology_today\": \"amazing\",\n" + + " \"scroll_size\": 1234\n" + + "}"; + + public void testFutureConfigParse() throws IOException { + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> DatafeedConfig.CONFIG_PARSER.apply(parser, null).build()); + assertEquals("[datafeed_config] unknown field [tomorrows_technology_today], parser not found", e.getMessage()); + } + + public void testFutureMetadataParse() throws IOException { + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED); + // Unlike the config version of this test, the metadata parser should tolerate the unknown future field + assertNotNull(DatafeedConfig.METADATA_PARSER.apply(parser, null).build()); + } + + public void testCopyConstructor() { + for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { + DatafeedConfig datafeedConfig = createTestInstance(); + DatafeedConfig copy = new DatafeedConfig.Builder(datafeedConfig).build(); + assertEquals(datafeedConfig, copy); + } + } + + public void testDefaults() { + DatafeedConfig.Builder expectedDatafeedConfig = new DatafeedConfig.Builder("datafeed1", "job1"); + expectedDatafeedConfig.setIndices(Collections.singletonList("index")); + expectedDatafeedConfig.setQueryDelay(TimeValue.timeValueMinutes(1)); + expectedDatafeedConfig.setScrollSize(1000); + DatafeedConfig.Builder defaultFeedBuilder = new DatafeedConfig.Builder("datafeed1", "job1"); + defaultFeedBuilder.setIndices(Collections.singletonList("index")); + DatafeedConfig defaultFeed = defaultFeedBuilder.build(); + + + assertThat(defaultFeed.getScrollSize(), equalTo(1000)); + assertThat(defaultFeed.getQueryDelay().seconds(), greaterThanOrEqualTo(60L)); + assertThat(defaultFeed.getQueryDelay().seconds(), lessThan(120L)); + } + + public void testDefaultQueryDelay() { + DatafeedConfig.Builder feedBuilder1 = new DatafeedConfig.Builder("datafeed1", "job1"); + feedBuilder1.setIndices(Arrays.asList("foo")); + DatafeedConfig.Builder feedBuilder2 = new DatafeedConfig.Builder("datafeed2", "job1"); + feedBuilder2.setIndices(Arrays.asList("foo")); + DatafeedConfig.Builder feedBuilder3 = new DatafeedConfig.Builder("datafeed3", "job2"); + feedBuilder3.setIndices(Arrays.asList("foo")); + DatafeedConfig feed1 = feedBuilder1.build(); + DatafeedConfig feed2 = feedBuilder2.build(); + DatafeedConfig feed3 = feedBuilder3.build(); + + // Two datafeeds with the same job id should have the same random query delay + assertThat(feed1.getQueryDelay(), equalTo(feed2.getQueryDelay())); + // But the query delay of a datafeed with a different job id should differ too + assertThat(feed1.getQueryDelay(), not(equalTo(feed3.getQueryDelay()))); + } + + public void testCheckValid_GivenNullIndices() throws IOException { + DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1"); + expectThrows(IllegalArgumentException.class, () -> conf.setIndices(null)); + } + + public void testCheckValid_GivenEmptyIndices() throws IOException { + DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1"); + conf.setIndices(Collections.emptyList()); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, conf::build); + assertEquals(Messages.getMessage(Messages.DATAFEED_CONFIG_INVALID_OPTION_VALUE, "indices", "[]"), e.getMessage()); + } + + public void testCheckValid_GivenIndicesContainsOnlyNulls() throws IOException { + List indices = new ArrayList<>(); + indices.add(null); + indices.add(null); + DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1"); + conf.setIndices(indices); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, conf::build); + assertEquals(Messages.getMessage(Messages.DATAFEED_CONFIG_INVALID_OPTION_VALUE, "indices", "[null, null]"), e.getMessage()); + } + + public void testCheckValid_GivenIndicesContainsOnlyEmptyStrings() throws IOException { + List indices = new ArrayList<>(); + indices.add(""); + indices.add(""); + DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1"); + conf.setIndices(indices); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, conf::build); + assertEquals(Messages.getMessage(Messages.DATAFEED_CONFIG_INVALID_OPTION_VALUE, "indices", "[, ]"), e.getMessage()); + } + + public void testCheckValid_GivenNegativeQueryDelay() throws IOException { + DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1"); + IllegalArgumentException e = ESTestCase.expectThrows(IllegalArgumentException.class, + () -> conf.setQueryDelay(TimeValue.timeValueMillis(-10))); + assertEquals("query_delay cannot be less than 0. Value = -10", e.getMessage()); + } + + public void testCheckValid_GivenZeroFrequency() throws IOException { + DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1"); + IllegalArgumentException e = ESTestCase.expectThrows(IllegalArgumentException.class, () -> conf.setFrequency(TimeValue.ZERO)); + assertEquals("frequency cannot be less or equal than 0. Value = 0s", e.getMessage()); + } + + public void testCheckValid_GivenNegativeFrequency() throws IOException { + DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1"); + IllegalArgumentException e = ESTestCase.expectThrows(IllegalArgumentException.class, + () -> conf.setFrequency(TimeValue.timeValueMinutes(-1))); + assertEquals("frequency cannot be less or equal than 0. Value = -1", e.getMessage()); + } + + public void testCheckValid_GivenNegativeScrollSize() throws IOException { + DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1"); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, () -> conf.setScrollSize(-1000)); + assertEquals(Messages.getMessage(Messages.DATAFEED_CONFIG_INVALID_OPTION_VALUE, "scroll_size", -1000L), e.getMessage()); + } + + public void testBuild_GivenScriptFieldsAndAggregations() { + DatafeedConfig.Builder datafeed = new DatafeedConfig.Builder("datafeed1", "job1"); + datafeed.setIndices(Collections.singletonList("my_index")); + datafeed.setTypes(Collections.singletonList("my_type")); + datafeed.setScriptFields(Collections.singletonList(new SearchSourceBuilder.ScriptField(randomAlphaOfLength(10), + mockScript(randomAlphaOfLength(10)), randomBoolean()))); + datafeed.setAggregations(new AggregatorFactories.Builder().addAggregator(AggregationBuilders.avg("foo"))); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, datafeed::build); + + assertThat(e.getMessage(), equalTo("script_fields cannot be used in combination with aggregations")); + } + + public void testHasAggregations_GivenNull() { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed1", "job1"); + builder.setIndices(Collections.singletonList("myIndex")); + builder.setTypes(Collections.singletonList("myType")); + DatafeedConfig datafeedConfig = builder.build(); + + assertThat(datafeedConfig.hasAggregations(), is(false)); + } + + public void testHasAggregations_NonEmpty() { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed1", "job1"); + builder.setIndices(Collections.singletonList("myIndex")); + builder.setTypes(Collections.singletonList("myType")); + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + builder.setAggregations(new AggregatorFactories.Builder().addAggregator( + AggregationBuilders.dateHistogram("time").interval(300000).subAggregation(maxTime).field("time"))); + DatafeedConfig datafeedConfig = builder.build(); + + assertThat(datafeedConfig.hasAggregations(), is(true)); + } + + public void testBuild_GivenEmptyAggregations() { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed1", "job1"); + builder.setIndices(Collections.singletonList("myIndex")); + builder.setTypes(Collections.singletonList("myType")); + builder.setAggregations(new AggregatorFactories.Builder()); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, builder::build); + + assertThat(e.getMessage(), equalTo("A date_histogram (or histogram) aggregation is required")); + } + + public void testBuild_GivenHistogramWithDefaultInterval() { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed1", "job1"); + builder.setIndices(Collections.singletonList("myIndex")); + builder.setTypes(Collections.singletonList("myType")); + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + builder.setAggregations(new AggregatorFactories.Builder().addAggregator( + AggregationBuilders.histogram("time").subAggregation(maxTime).field("time")) + ); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, builder::build); + + assertThat(e.getMessage(), equalTo("Aggregation interval must be greater than 0")); + } + + public void testBuild_GivenDateHistogramWithInvalidTimeZone() { + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("bucket").field("time") + .interval(300000L).timeZone(DateTimeZone.forTimeZone(TimeZone.getTimeZone("EST"))).subAggregation(maxTime); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> createDatafeedWithDateHistogram(dateHistogram)); + + assertThat(e.getMessage(), equalTo("ML requires date_histogram.time_zone to be UTC")); + } + + public void testBuild_GivenDateHistogramWithDefaultInterval() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> createDatafeedWithDateHistogram((String) null)); + + assertThat(e.getMessage(), equalTo("Aggregation interval must be greater than 0")); + } + + public void testBuild_GivenValidDateHistogram() { + long millisInDay = 24 * 3600000L; + + assertThat(createDatafeedWithDateHistogram("1s").getHistogramIntervalMillis(), equalTo(1000L)); + assertThat(createDatafeedWithDateHistogram("2s").getHistogramIntervalMillis(), equalTo(2000L)); + assertThat(createDatafeedWithDateHistogram("1m").getHistogramIntervalMillis(), equalTo(60000L)); + assertThat(createDatafeedWithDateHistogram("2m").getHistogramIntervalMillis(), equalTo(120000L)); + assertThat(createDatafeedWithDateHistogram("1h").getHistogramIntervalMillis(), equalTo(3600000L)); + assertThat(createDatafeedWithDateHistogram("2h").getHistogramIntervalMillis(), equalTo(7200000L)); + assertThat(createDatafeedWithDateHistogram("1d").getHistogramIntervalMillis(), equalTo(millisInDay)); + assertThat(createDatafeedWithDateHistogram("7d").getHistogramIntervalMillis(), equalTo(7 * millisInDay)); + + assertThat(createDatafeedWithDateHistogram(7 * millisInDay + 1).getHistogramIntervalMillis(), + equalTo(7 * millisInDay + 1)); + } + + public void testBuild_GivenDateHistogramWithMoreThanCalendarWeek() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> createDatafeedWithDateHistogram("8d")); + + assertThat(e.getMessage(), containsString("When specifying a date_histogram calendar interval [8d]")); + } + + public void testDefaultChunkingConfig_GivenAggregations() { + assertThat(createDatafeedWithDateHistogram("1s").getChunkingConfig(), + equalTo(ChunkingConfig.newManual(TimeValue.timeValueSeconds(1000)))); + assertThat(createDatafeedWithDateHistogram("2h").getChunkingConfig(), + equalTo(ChunkingConfig.newManual(TimeValue.timeValueHours(2000)))); + } + + public void testChunkingConfig_GivenExplicitSetting() { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder(createDatafeedWithDateHistogram("30s")); + builder.setChunkingConfig(ChunkingConfig.newAuto()); + assertThat(builder.build().getChunkingConfig(), equalTo(ChunkingConfig.newAuto())); + } + + public void testCheckHistogramAggregationHasChildMaxTimeAgg() { + DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("time_agg").field("max_time"); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> DatafeedConfig.Builder.checkHistogramAggregationHasChildMaxTimeAgg(dateHistogram)); + + assertThat(e.getMessage(), containsString("Date histogram must have nested max aggregation for time_field [max_time]")); + } + + public void testValidateAggregations_GivenMulitpleHistogramAggs() { + DateHistogramAggregationBuilder nestedDateHistogram = AggregationBuilders.dateHistogram("nested_time"); + AvgAggregationBuilder avg = AggregationBuilders.avg("avg").subAggregation(nestedDateHistogram); + TermsAggregationBuilder nestedTerms = AggregationBuilders.terms("nested_terms"); + + DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("time"); + + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + dateHistogram.subAggregation(avg).subAggregation(nestedTerms).subAggregation(maxTime).field("time"); + + TermsAggregationBuilder toplevelTerms = AggregationBuilders.terms("top_level"); + toplevelTerms.subAggregation(dateHistogram); + + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("foo", "bar"); + builder.setAggregations(new AggregatorFactories.Builder().addAggregator(toplevelTerms)); + ElasticsearchException e = expectThrows(ElasticsearchException.class, builder::validateAggregations); + + assertEquals("Aggregations can only have 1 date_histogram or histogram aggregation", e.getMessage()); + } + + public void testDefaultFrequency_GivenNegative() { + DatafeedConfig datafeed = createTestInstance(); + ESTestCase.expectThrows(IllegalArgumentException.class, () -> datafeed.defaultFrequency(TimeValue.timeValueSeconds(-1))); + } + + public void testDefaultFrequency_GivenNoAggregations() { + DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder("feed", "job"); + datafeedBuilder.setIndices(Arrays.asList("my_index")); + DatafeedConfig datafeed = datafeedBuilder.build(); + + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(1))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(30))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(60))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(90))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(120))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(121))); + + assertEquals(TimeValue.timeValueSeconds(61), datafeed.defaultFrequency(TimeValue.timeValueSeconds(122))); + assertEquals(TimeValue.timeValueSeconds(75), datafeed.defaultFrequency(TimeValue.timeValueSeconds(150))); + assertEquals(TimeValue.timeValueSeconds(150), datafeed.defaultFrequency(TimeValue.timeValueSeconds(300))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueSeconds(1200))); + + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueSeconds(1201))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueSeconds(1800))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(1))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(2))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(12))); + + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(12 * 3600 + 1))); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(13))); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(24))); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(48))); + } + + public void testDefaultFrequency_GivenAggregationsWithHistogramInterval_1_Second() { + DatafeedConfig datafeed = createDatafeedWithDateHistogram("1s"); + + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(60))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(90))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(120))); + assertEquals(TimeValue.timeValueSeconds(125), datafeed.defaultFrequency(TimeValue.timeValueSeconds(250))); + assertEquals(TimeValue.timeValueSeconds(250), datafeed.defaultFrequency(TimeValue.timeValueSeconds(500))); + + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(1))); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(13))); + } + + public void testDefaultFrequency_GivenAggregationsWithHistogramInterval_1_Minute() { + DatafeedConfig datafeed = createDatafeedWithDateHistogram("1m"); + + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(60))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(90))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(120))); + assertEquals(TimeValue.timeValueMinutes(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(180))); + assertEquals(TimeValue.timeValueMinutes(2), datafeed.defaultFrequency(TimeValue.timeValueSeconds(240))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(20))); + + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueSeconds(20 * 60 + 1))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(6))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueHours(12))); + + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(13))); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(72))); + } + + public void testDefaultFrequency_GivenAggregationsWithHistogramInterval_10_Minutes() { + DatafeedConfig datafeed = createDatafeedWithDateHistogram("10m"); + + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(10))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(20))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(30))); + assertEquals(TimeValue.timeValueMinutes(10), datafeed.defaultFrequency(TimeValue.timeValueMinutes(12 * 60))); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueMinutes(13 * 60))); + } + + public void testDefaultFrequency_GivenAggregationsWithHistogramInterval_1_Hour() { + DatafeedConfig datafeed = createDatafeedWithDateHistogram("1h"); + + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(1))); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueSeconds(3601))); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(2))); + assertEquals(TimeValue.timeValueHours(1), datafeed.defaultFrequency(TimeValue.timeValueHours(12))); + } + + public static String randomValidDatafeedId() { + CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()); + return generator.ofCodePointsLength(random(), 10, 10); + } + + private static DatafeedConfig createDatafeedWithDateHistogram(String interval) { + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("buckets").subAggregation(maxTime).field("time"); + if (interval != null) { + dateHistogram.dateHistogramInterval(new DateHistogramInterval(interval)); + } + return createDatafeedWithDateHistogram(dateHistogram); + } + + private static DatafeedConfig createDatafeedWithDateHistogram(Long interval) { + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("buckets").subAggregation(maxTime).field("time"); + if (interval != null) { + dateHistogram.interval(interval); + } + return createDatafeedWithDateHistogram(dateHistogram); + } + + private static DatafeedConfig createDatafeedWithDateHistogram(DateHistogramAggregationBuilder dateHistogram) { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed1", "job1"); + builder.setIndices(Collections.singletonList("myIndex")); + builder.setTypes(Collections.singletonList("myType")); + builder.setAggregations(new AggregatorFactories.Builder().addAggregator(dateHistogram)); + return builder.build(); + } + + @Override + protected DatafeedConfig mutateInstance(DatafeedConfig instance) throws IOException { + DatafeedConfig.Builder builder = new DatafeedConfig.Builder(instance); + switch (between(0, 10)) { + case 0: + builder.setId(instance.getId() + randomValidDatafeedId()); + break; + case 1: + builder.setJobId(instance.getJobId() + randomAlphaOfLength(5)); + break; + case 2: + builder.setQueryDelay(new TimeValue(instance.getQueryDelay().millis() + between(100, 100000))); + break; + case 3: + if (instance.getFrequency() == null) { + builder.setFrequency(new TimeValue(between(1, 10) * 1000)); + } else { + builder.setFrequency(new TimeValue(instance.getFrequency().millis() + between(1, 10) * 1000)); + } + break; + case 4: + List indices = new ArrayList<>(instance.getIndices()); + indices.add(randomAlphaOfLengthBetween(1, 20)); + builder.setIndices(indices); + break; + case 5: + List types = new ArrayList<>(instance.getTypes()); + types.add(randomAlphaOfLengthBetween(1, 20)); + builder.setTypes(types); + break; + case 6: + BoolQueryBuilder query = new BoolQueryBuilder(); + if (instance.getQuery() != null) { + query.must(instance.getQuery()); + } + query.filter(new TermQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); + builder.setQuery(query); + break; + case 7: + if (instance.hasAggregations()) { + builder.setAggregations(null); + } else { + AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); + String timeField = randomAlphaOfLength(10); + aggBuilder + .addAggregator(new DateHistogramAggregationBuilder(timeField).field(timeField).interval(between(10000, 3600000)) + .subAggregation(new MaxAggregationBuilder(timeField).field(timeField))); + builder.setAggregations(aggBuilder); + if (instance.getScriptFields().isEmpty() == false) { + builder.setScriptFields(Collections.emptyList()); + } + } + break; + case 8: + ArrayList scriptFields = new ArrayList<>(instance.getScriptFields()); + scriptFields.add(new ScriptField(randomAlphaOfLengthBetween(1, 10), new Script("foo"), true)); + builder.setScriptFields(scriptFields); + builder.setAggregations(null); + break; + case 9: + builder.setScrollSize(instance.getScrollSize() + between(1, 100)); + break; + case 10: + if (instance.getChunkingConfig() == null || instance.getChunkingConfig().getMode() == Mode.AUTO) { + ChunkingConfig newChunkingConfig = ChunkingConfig.newManual(new TimeValue(randomNonNegativeLong())); + builder.setChunkingConfig(newChunkingConfig); + } else { + builder.setChunkingConfig(ChunkingConfig.newAuto()); + } + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return builder.build(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java new file mode 100644 index 0000000000000..d059e567d1588 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java @@ -0,0 +1,279 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.script.Script; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig.Mode; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class DatafeedUpdateTests extends AbstractSerializingTestCase { + + @Override + protected DatafeedUpdate createTestInstance() { + return createRandomized(DatafeedConfigTests.randomValidDatafeedId()); + } + + public static DatafeedUpdate createRandomized(String datafeedId) { + DatafeedUpdate.Builder builder = new DatafeedUpdate.Builder(datafeedId); + if (randomBoolean()) { + builder.setJobId(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + builder.setQueryDelay(TimeValue.timeValueMillis(randomIntBetween(1, Integer.MAX_VALUE))); + } + if (randomBoolean()) { + builder.setFrequency(TimeValue.timeValueSeconds(randomIntBetween(1, Integer.MAX_VALUE))); + } + if (randomBoolean()) { + builder.setIndices(DatafeedConfigTests.randomStringList(1, 10)); + } + if (randomBoolean()) { + builder.setTypes(DatafeedConfigTests.randomStringList(1, 10)); + } + if (randomBoolean()) { + builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + } + if (randomBoolean()) { + int scriptsSize = randomInt(3); + List scriptFields = new ArrayList<>(scriptsSize); + for (int scriptIndex = 0; scriptIndex < scriptsSize; scriptIndex++) { + scriptFields.add(new SearchSourceBuilder.ScriptField(randomAlphaOfLength(10), mockScript(randomAlphaOfLength(10)), + randomBoolean())); + } + builder.setScriptFields(scriptFields); + } + if (randomBoolean()) { + // can only test with a single agg as the xcontent order gets randomized by test base class and then + // the actual xcontent isn't the same and test fail. + // Testing with a single agg is ok as we don't have special list writeable / xconent logic + AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); + aggs.addAggregator(AggregationBuilders.avg(randomAlphaOfLength(10)).field(randomAlphaOfLength(10))); + builder.setAggregations(aggs); + } + if (randomBoolean()) { + builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + builder.setChunkingConfig(ChunkingConfigTests.createRandomizedChunk()); + } + return builder.build(); + } + + @Override + protected Writeable.Reader instanceReader() { + return DatafeedUpdate::new; + } + + @Override + protected DatafeedUpdate doParseInstance(XContentParser parser) { + return DatafeedUpdate.PARSER.apply(parser, null).build(); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + + public void testApply_failBecauseTargetDatafeedHasDifferentId() { + DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo"); + expectThrows(IllegalArgumentException.class, () -> createRandomized(datafeed.getId() + "_2").apply(datafeed, null)); + } + + public void testApply_givenEmptyUpdate() { + DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo"); + DatafeedConfig updatedDatafeed = new DatafeedUpdate.Builder(datafeed.getId()).build().apply(datafeed, null); + assertThat(datafeed, equalTo(updatedDatafeed)); + } + + public void testApply_givenPartialUpdate() { + DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo"); + DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeed.getId()); + update.setScrollSize(datafeed.getScrollSize() + 1); + + DatafeedUpdate.Builder updated = new DatafeedUpdate.Builder(datafeed.getId()); + updated.setScrollSize(datafeed.getScrollSize() + 1); + DatafeedConfig updatedDatafeed = update.build().apply(datafeed, null); + + DatafeedConfig.Builder expectedDatafeed = new DatafeedConfig.Builder(datafeed); + expectedDatafeed.setScrollSize(datafeed.getScrollSize() + 1); + assertThat(updatedDatafeed, equalTo(expectedDatafeed.build())); + } + + public void testApply_givenFullUpdateNoAggregations() { + DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder("foo", "foo-feed"); + datafeedBuilder.setIndices(Collections.singletonList("i_1")); + datafeedBuilder.setTypes(Collections.singletonList("t_1")); + DatafeedConfig datafeed = datafeedBuilder.build(); + + DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeed.getId()); + update.setJobId("bar"); + update.setIndices(Collections.singletonList("i_2")); + update.setTypes(Collections.singletonList("t_2")); + update.setQueryDelay(TimeValue.timeValueSeconds(42)); + update.setFrequency(TimeValue.timeValueSeconds(142)); + update.setQuery(QueryBuilders.termQuery("a", "b")); + update.setScriptFields(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false))); + update.setScrollSize(8000); + update.setChunkingConfig(ChunkingConfig.newManual(TimeValue.timeValueHours(1))); + + DatafeedConfig updatedDatafeed = update.build().apply(datafeed, null); + + assertThat(updatedDatafeed.getJobId(), equalTo("bar")); + assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_2"))); + assertThat(updatedDatafeed.getTypes(), equalTo(Collections.singletonList("t_2"))); + assertThat(updatedDatafeed.getQueryDelay(), equalTo(TimeValue.timeValueSeconds(42))); + assertThat(updatedDatafeed.getFrequency(), equalTo(TimeValue.timeValueSeconds(142))); + assertThat(updatedDatafeed.getQuery(), equalTo(QueryBuilders.termQuery("a", "b"))); + assertThat(updatedDatafeed.hasAggregations(), is(false)); + assertThat(updatedDatafeed.getScriptFields(), + equalTo(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false)))); + assertThat(updatedDatafeed.getScrollSize(), equalTo(8000)); + assertThat(updatedDatafeed.getChunkingConfig(), equalTo(ChunkingConfig.newManual(TimeValue.timeValueHours(1)))); + } + + public void testApply_givenAggregations() { + DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder("foo", "foo-feed"); + datafeedBuilder.setIndices(Collections.singletonList("i_1")); + datafeedBuilder.setTypes(Collections.singletonList("t_1")); + DatafeedConfig datafeed = datafeedBuilder.build(); + + DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeed.getId()); + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + update.setAggregations(new AggregatorFactories.Builder().addAggregator( + AggregationBuilders.histogram("a").interval(300000).field("time").subAggregation(maxTime))); + + DatafeedConfig updatedDatafeed = update.build().apply(datafeed, null); + + assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_1"))); + assertThat(updatedDatafeed.getTypes(), equalTo(Collections.singletonList("t_1"))); + assertThat(updatedDatafeed.getAggregations(), + equalTo(new AggregatorFactories.Builder().addAggregator( + AggregationBuilders.histogram("a").interval(300000).field("time").subAggregation(maxTime)))); + } + + @Override + protected DatafeedUpdate mutateInstance(DatafeedUpdate instance) { + DatafeedUpdate.Builder builder = new DatafeedUpdate.Builder(instance); + switch (between(0, 10)) { + case 0: + builder.setId(instance.getId() + DatafeedConfigTests.randomValidDatafeedId()); + break; + case 1: + builder.setJobId(instance.getJobId() + randomAlphaOfLength(5)); + break; + case 2: + if (instance.getQueryDelay() == null) { + builder.setQueryDelay(new TimeValue(between(100, 100000))); + } else { + builder.setQueryDelay(new TimeValue(instance.getQueryDelay().millis() + between(100, 100000))); + } + break; + case 3: + if (instance.getFrequency() == null) { + builder.setFrequency(new TimeValue(between(1, 10) * 1000)); + } else { + builder.setFrequency(new TimeValue(instance.getFrequency().millis() + between(1, 10) * 1000)); + } + break; + case 4: + List indices; + if (instance.getIndices() == null) { + indices = new ArrayList<>(); + } else { + indices = new ArrayList<>(instance.getIndices()); + } + indices.add(randomAlphaOfLengthBetween(1, 20)); + builder.setIndices(indices); + break; + case 5: + List types; + if (instance.getTypes() == null) { + types = new ArrayList<>(); + } else { + types = new ArrayList<>(instance.getTypes()); + } + types.add(randomAlphaOfLengthBetween(1, 20)); + builder.setTypes(types); + break; + case 6: + BoolQueryBuilder query = new BoolQueryBuilder(); + if (instance.getQuery() != null) { + query.must(instance.getQuery()); + } + query.filter(new TermQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); + builder.setQuery(query); + break; + case 7: + if (instance.hasAggregations()) { + builder.setAggregations(null); + } else { + AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); + String timeField = randomAlphaOfLength(10); + aggBuilder.addAggregator(new DateHistogramAggregationBuilder(timeField).field(timeField).interval(between(10000, 3600000)) + .subAggregation(new MaxAggregationBuilder(timeField).field(timeField))); + builder.setAggregations(aggBuilder); + if (instance.getScriptFields().isEmpty() == false) { + builder.setScriptFields(Collections.emptyList()); + } + } + break; + case 8: + ArrayList scriptFields = new ArrayList<>(instance.getScriptFields()); + scriptFields.add(new ScriptField(randomAlphaOfLengthBetween(1, 10), new Script("foo"), true)); + builder.setScriptFields(scriptFields); + builder.setAggregations(null); + break; + case 9: + if (instance.getScrollSize() == null) { + builder.setScrollSize(between(1, 100)); + } else { + builder.setScrollSize(instance.getScrollSize() + between(1, 100)); + } + break; + case 10: + if (instance.getChunkingConfig() == null || instance.getChunkingConfig().getMode() == Mode.AUTO) { + ChunkingConfig newChunkingConfig = ChunkingConfig.newManual(new TimeValue(randomNonNegativeLong())); + builder.setChunkingConfig(newChunkingConfig); + } else { + builder.setChunkingConfig(null); + } + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return builder.build(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java new file mode 100644 index 0000000000000..b1eb13b5d73c5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.datafeed.extractor; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; +import org.elasticsearch.test.ESTestCase; +import org.joda.time.DateTimeZone; + +import java.util.TimeZone; + +import static org.hamcrest.Matchers.equalTo; + +public class ExtractorUtilsTests extends ESTestCase { + + public void testGetHistogramAggregation_DateHistogramHasSibling() { + AvgAggregationBuilder avg = AggregationBuilders.avg("avg"); + DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("time"); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> ExtractorUtils.getHistogramAggregation( + new AggregatorFactories.Builder().addAggregator(avg).addAggregator(dateHistogram).getAggregatorFactories())); + assertEquals("The date_histogram (or histogram) aggregation cannot have sibling aggregations", e.getMessage()); + + TermsAggregationBuilder terms = AggregationBuilders.terms("terms"); + terms.subAggregation(dateHistogram); + terms.subAggregation(avg); + e = expectThrows(ElasticsearchException.class, + () -> ExtractorUtils.getHistogramAggregation( + new AggregatorFactories.Builder().addAggregator(terms).getAggregatorFactories())); + assertEquals("The date_histogram (or histogram) aggregation cannot have sibling aggregations", e.getMessage()); + } + + public void testGetHistogramAggregation() { + AvgAggregationBuilder avg = AggregationBuilders.avg("avg"); + TermsAggregationBuilder nestedTerms = AggregationBuilders.terms("nested_terms"); + + DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("time"); + AggregationBuilder histogramAggregationBuilder = ExtractorUtils.getHistogramAggregation( + new AggregatorFactories.Builder().addAggregator(dateHistogram).getAggregatorFactories()); + assertEquals(dateHistogram, histogramAggregationBuilder); + + dateHistogram.subAggregation(avg).subAggregation(nestedTerms); + histogramAggregationBuilder = ExtractorUtils.getHistogramAggregation( + new AggregatorFactories.Builder().addAggregator(dateHistogram).getAggregatorFactories()); + assertEquals(dateHistogram, histogramAggregationBuilder); + + TermsAggregationBuilder toplevelTerms = AggregationBuilders.terms("top_level"); + toplevelTerms.subAggregation(dateHistogram); + histogramAggregationBuilder = ExtractorUtils.getHistogramAggregation( + new AggregatorFactories.Builder().addAggregator(toplevelTerms).getAggregatorFactories()); + + assertEquals(dateHistogram, histogramAggregationBuilder); + } + + public void testGetHistogramAggregation_MissingHistogramAgg() { + TermsAggregationBuilder terms = AggregationBuilders.terms("top_level"); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> ExtractorUtils.getHistogramAggregation( + new AggregatorFactories.Builder().addAggregator(terms).getAggregatorFactories())); + assertEquals("A date_histogram (or histogram) aggregation is required", e.getMessage()); + } + + public void testGetHistogramIntervalMillis_GivenDateHistogramWithInvalidTimeZone() { + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("bucket").field("time") + .interval(300000L).timeZone(DateTimeZone.forTimeZone(TimeZone.getTimeZone("EST"))).subAggregation(maxTime); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> ExtractorUtils.getHistogramIntervalMillis(dateHistogram)); + + assertThat(e.getMessage(), equalTo("ML requires date_histogram.time_zone to be UTC")); + } + + public void testIsHistogram() { + assertTrue(ExtractorUtils.isHistogram(AggregationBuilders.dateHistogram("time"))); + assertTrue(ExtractorUtils.isHistogram(AggregationBuilders.histogram("time"))); + assertFalse(ExtractorUtils.isHistogram(AggregationBuilders.max("time"))); + } + + public void testValidateAndGetCalendarInterval() { + assertEquals(300 * 1000L, ExtractorUtils.validateAndGetCalendarInterval("5m")); + assertEquals(7200 * 1000L, ExtractorUtils.validateAndGetCalendarInterval("2h")); + assertEquals(86400L * 1000L, ExtractorUtils.validateAndGetCalendarInterval("1d")); + } + + public void testValidateAndGetCalendarInterval_intervalIsLongerThanAWeek() { + expectThrows(ElasticsearchException.class, + () -> ExtractorUtils.validateAndGetCalendarInterval("8d")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java new file mode 100644 index 0000000000000..8e326e3556b59 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.integration; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class MlRestTestStateCleaner { + + private final Logger logger; + private final RestClient adminClient; + private final ESRestTestCase testCase; + + public MlRestTestStateCleaner(Logger logger, RestClient adminClient, ESRestTestCase testCase) { + this.logger = logger; + this.adminClient = adminClient; + this.testCase = testCase; + } + + public void clearMlMetadata() throws IOException { + deleteAllDatafeeds(); + deleteAllJobs(); + // indices will be deleted by the ESIntegTestCase class + } + + @SuppressWarnings("unchecked") + private void deleteAllDatafeeds() throws IOException { + Map clusterStateAsMap = testCase.entityAsMap(adminClient.performRequest("GET", "/_cluster/state", + Collections.singletonMap("filter_path", "metadata.ml.datafeeds"))); + List> datafeeds = + (List>) XContentMapValues.extractValue("metadata.ml.datafeeds", clusterStateAsMap); + if (datafeeds == null) { + return; + } + + try { + int statusCode = adminClient.performRequest("POST", "/_xpack/ml/datafeeds/_all/_stop") + .getStatusLine().getStatusCode(); + if (statusCode != 200) { + logger.error("Got status code " + statusCode + " when stopping datafeeds"); + } + } catch (Exception e1) { + logger.warn("failed to stop all datafeeds. Forcing stop", e1); + try { + int statusCode = adminClient + .performRequest("POST", "/_xpack/ml/datafeeds/_all/_stop?force=true") + .getStatusLine().getStatusCode(); + if (statusCode != 200) { + logger.error("Got status code " + statusCode + " when stopping datafeeds"); + } + } catch (Exception e2) { + logger.warn("Force-closing all data feeds failed", e2); + } + throw new RuntimeException( + "Had to resort to force-stopping datafeeds, something went wrong?", e1); + } + + for (Map datafeed : datafeeds) { + String datafeedId = (String) datafeed.get("datafeed_id"); + int statusCode = adminClient.performRequest("DELETE", "/_xpack/ml/datafeeds/" + datafeedId).getStatusLine().getStatusCode(); + if (statusCode != 200) { + logger.error("Got status code " + statusCode + " when deleting datafeed " + datafeedId); + } + } + } + + private void deleteAllJobs() throws IOException { + Map clusterStateAsMap = testCase.entityAsMap(adminClient.performRequest("GET", "/_cluster/state", + Collections.singletonMap("filter_path", "metadata.ml.jobs"))); + @SuppressWarnings("unchecked") + List> jobConfigs = + (List>) XContentMapValues.extractValue("metadata.ml.jobs", clusterStateAsMap); + if (jobConfigs == null) { + return; + } + + try { + int statusCode = adminClient + .performRequest("POST", "/_xpack/ml/anomaly_detectors/_all/_close") + .getStatusLine().getStatusCode(); + if (statusCode != 200) { + logger.error("Got status code " + statusCode + " when closing all jobs"); + } + } catch (Exception e1) { + logger.warn("failed to close all jobs. Forcing closed", e1); + try { + adminClient.performRequest("POST", + "/_xpack/ml/anomaly_detectors/_all/_close?force=true"); + } catch (Exception e2) { + logger.warn("Force-closing all jobs failed", e2); + } + throw new RuntimeException("Had to resort to force-closing jobs, something went wrong?", + e1); + } + + for (Map jobConfig : jobConfigs) { + String jobId = (String) jobConfig.get("job_id"); + int statusCode = adminClient.performRequest("DELETE", "/_xpack/ml/anomaly_detectors/" + jobId).getStatusLine().getStatusCode(); + if (statusCode != 200) { + logger.error("Got status code " + statusCode + " when deleting job " + jobId); + } + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java new file mode 100644 index 0000000000000..e64c01d7d0c3b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java @@ -0,0 +1,965 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import static org.hamcrest.Matchers.equalTo; + +public class AnalysisConfigTests extends AbstractSerializingTestCase { + + @Override + protected AnalysisConfig createTestInstance() { + return createRandomized().build(); + } + + public static AnalysisConfig.Builder createRandomized() { + boolean isCategorization = randomBoolean(); + List detectors = new ArrayList<>(); + int numDetectors = randomIntBetween(1, 10); + for (int i = 0; i < numDetectors; i++) { + Detector.Builder builder = new Detector.Builder("count", null); + builder.setPartitionFieldName(isCategorization ? "mlcategory" : "part"); + detectors.add(builder.build()); + } + AnalysisConfig.Builder builder = new AnalysisConfig.Builder(detectors); + + TimeValue bucketSpan = AnalysisConfig.Builder.DEFAULT_BUCKET_SPAN; + if (randomBoolean()) { + bucketSpan = TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000)); + builder.setBucketSpan(bucketSpan); + } + if (isCategorization) { + builder.setCategorizationFieldName(randomAlphaOfLength(10)); + if (randomBoolean()) { + builder.setCategorizationFilters(Arrays.asList(generateRandomStringArray(10, 10, false))); + } else { + CategorizationAnalyzerConfig.Builder analyzerBuilder = new CategorizationAnalyzerConfig.Builder(); + if (rarely()) { + analyzerBuilder.setAnalyzer(randomAlphaOfLength(10)); + } else { + if (randomBoolean()) { + for (String pattern : generateRandomStringArray(3, 40, false)) { + Map charFilter = new HashMap<>(); + charFilter.put("type", "pattern_replace"); + charFilter.put("pattern", pattern); + analyzerBuilder.addCharFilter(charFilter); + } + } + + Map tokenizer = new HashMap<>(); + tokenizer.put("type", "pattern"); + tokenizer.put("pattern", randomAlphaOfLength(10)); + analyzerBuilder.setTokenizer(tokenizer); + + if (randomBoolean()) { + for (String pattern : generateRandomStringArray(4, 40, false)) { + Map tokenFilter = new HashMap<>(); + tokenFilter.put("type", "pattern_replace"); + tokenFilter.put("pattern", pattern); + analyzerBuilder.addTokenFilter(tokenFilter); + } + } + } + builder.setCategorizationAnalyzerConfig(analyzerBuilder.build()); + } + } + if (randomBoolean()) { + builder.setLatency(TimeValue.timeValueSeconds(randomIntBetween(1, 1_000_000))); + } + if (randomBoolean()) { + int numBucketSpans = randomIntBetween(0, 10); + List multipleBucketSpans = new ArrayList<>(); + for (int i = 2; i <= numBucketSpans; i++) { + multipleBucketSpans.add(TimeValue.timeValueSeconds(bucketSpan.getSeconds() * i)); + } + builder.setMultipleBucketSpans(multipleBucketSpans); + } + if (randomBoolean()) { + builder.setMultivariateByFields(randomBoolean()); + } + if (randomBoolean()) { + builder.setOverlappingBuckets(randomBoolean()); + } + if (randomBoolean()) { + builder.setResultFinalizationWindow(randomNonNegativeLong()); + } + + boolean usePerPartitionNormalisation = randomBoolean(); + builder.setUsePerPartitionNormalization(usePerPartitionNormalisation); + if (!usePerPartitionNormalisation) { // influencers can't be used with per partition normalisation + builder.setInfluencers(Arrays.asList(generateRandomStringArray(10, 10, false))); + } + return builder; + } + + @Override + protected Writeable.Reader instanceReader() { + return AnalysisConfig::new; + } + + @Override + protected AnalysisConfig doParseInstance(XContentParser parser) { + return AnalysisConfig.CONFIG_PARSER.apply(parser, null).build(); + } + + public void testFieldConfiguration_singleDetector_notPreSummarised() { + // Single detector, not pre-summarised + Detector.Builder det = new Detector.Builder("max", "responsetime"); + det.setByFieldName("airline"); + det.setPartitionFieldName("sourcetype"); + AnalysisConfig ac = createConfigWithDetectors(Collections.singletonList(det.build())); + + Set termFields = new TreeSet<>(Arrays.asList("airline", "sourcetype")); + Set analysisFields = new TreeSet<>(Arrays.asList("responsetime", "airline", "sourcetype")); + + assertEquals(termFields.size(), ac.termFields().size()); + assertEquals(analysisFields.size(), ac.analysisFields().size()); + + for (String s : ac.termFields()) { + assertTrue(termFields.contains(s)); + } + + for (String s : termFields) { + assertTrue(ac.termFields().contains(s)); + } + + for (String s : ac.analysisFields()) { + assertTrue(analysisFields.contains(s)); + } + + for (String s : analysisFields) { + assertTrue(ac.analysisFields().contains(s)); + } + + assertEquals(1, ac.fields().size()); + assertTrue(ac.fields().contains("responsetime")); + + assertEquals(1, ac.byFields().size()); + assertTrue(ac.byFields().contains("airline")); + + assertEquals(1, ac.partitionFields().size()); + assertTrue(ac.partitionFields().contains("sourcetype")); + + assertNull(ac.getSummaryCountFieldName()); + + // Single detector, pre-summarised + analysisFields.add("summaryCount"); + AnalysisConfig.Builder builder = new AnalysisConfig.Builder(ac); + builder.setSummaryCountFieldName("summaryCount"); + ac = builder.build(); + + for (String s : ac.analysisFields()) { + assertTrue(analysisFields.contains(s)); + } + + for (String s : analysisFields) { + assertTrue(ac.analysisFields().contains(s)); + } + + assertEquals("summaryCount", ac.getSummaryCountFieldName()); + assertEquals(1, ac.getDetectors().size()); + assertEquals(0, ac.getDetectors().get(0).getDetectorIndex()); + } + + public void testFieldConfiguration_multipleDetectors_NotPreSummarised() { + // Multiple detectors, not pre-summarised + List detectors = new ArrayList<>(); + + Detector.Builder det = new Detector.Builder("metric", "metric1"); + det.setByFieldName("by_one"); + det.setPartitionFieldName("partition_one"); + detectors.add(det.build()); + + det = new Detector.Builder("metric", "metric2"); + det.setByFieldName("by_two"); + det.setOverFieldName("over_field"); + detectors.add(det.build()); + + det = new Detector.Builder("metric", "metric2"); + det.setByFieldName("by_two"); + det.setPartitionFieldName("partition_two"); + detectors.add(det.build()); + + AnalysisConfig.Builder builder = new AnalysisConfig.Builder(detectors); + builder.setInfluencers(Collections.singletonList("Influencer_Field")); + AnalysisConfig ac = builder.build(); + + Set termFields = new TreeSet<>(Arrays.asList( + "by_one", "by_two", "over_field", + "partition_one", "partition_two", "Influencer_Field")); + Set analysisFields = new TreeSet<>(Arrays.asList( + "metric1", "metric2", "by_one", "by_two", "over_field", + "partition_one", "partition_two", "Influencer_Field")); + + assertEquals(termFields.size(), ac.termFields().size()); + assertEquals(analysisFields.size(), ac.analysisFields().size()); + + for (String s : ac.termFields()) { + assertTrue(s, termFields.contains(s)); + } + + for (String s : termFields) { + assertTrue(s, ac.termFields().contains(s)); + } + + for (String s : ac.analysisFields()) { + assertTrue(analysisFields.contains(s)); + } + + for (String s : analysisFields) { + assertTrue(ac.analysisFields().contains(s)); + } + + assertEquals(2, ac.fields().size()); + assertTrue(ac.fields().contains("metric1")); + assertTrue(ac.fields().contains("metric2")); + + assertEquals(2, ac.byFields().size()); + assertTrue(ac.byFields().contains("by_one")); + assertTrue(ac.byFields().contains("by_two")); + + assertEquals(1, ac.overFields().size()); + assertTrue(ac.overFields().contains("over_field")); + + assertEquals(2, ac.partitionFields().size()); + assertTrue(ac.partitionFields().contains("partition_one")); + assertTrue(ac.partitionFields().contains("partition_two")); + + assertNull(ac.getSummaryCountFieldName()); + + assertEquals(3, ac.getDetectors().size()); + int expectedDetectorIndex = 0; + for (Detector detector : ac.getDetectors()) { + assertEquals(expectedDetectorIndex++, detector.getDetectorIndex()); + } + } + + public void testFieldConfiguration_singleDetector_PreSummarised() { + // Multiple detectors, pre-summarised + AnalysisConfig.Builder builder = createConfigBuilder(); + builder.setSummaryCountFieldName("summaryCount"); + AnalysisConfig ac = builder.build(); + + assertTrue(ac.analysisFields().contains("summaryCount")); + assertEquals("summaryCount", ac.getSummaryCountFieldName()); + + builder = createConfigBuilder(); + builder.setBucketSpan(TimeValue.timeValueSeconds(1000)); + builder.setMultipleBucketSpans(Arrays.asList( + TimeValue.timeValueSeconds(5000), TimeValue.timeValueSeconds(10000), TimeValue.timeValueSeconds(24000))); + ac = builder.build(); + assertTrue(ac.getMultipleBucketSpans().contains(TimeValue.timeValueSeconds(5000))); + assertTrue(ac.getMultipleBucketSpans().contains(TimeValue.timeValueSeconds(10000))); + assertTrue(ac.getMultipleBucketSpans().contains(TimeValue.timeValueSeconds(24000))); + + assertEquals(1, ac.getDetectors().size()); + assertEquals(0, ac.getDetectors().get(0).getDetectorIndex()); + } + + public void testBuild_GivenMlCategoryUsedAsByFieldButNoCategorizationFieldName() { + Detector.Builder detector = new Detector.Builder(); + detector.setFunction("count"); + detector.setByFieldName("mlcategory"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + ac.setCategorizationFieldName(null); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, ac::build); + assertThat(e.getMessage(), equalTo("categorization_field_name must be set for mlcategory to be available")); + } + + public void testBuild_GivenMlCategoryUsedAsOverFieldButNoCategorizationFieldName() { + Detector.Builder detector = new Detector.Builder(); + detector.setFunction("count"); + detector.setOverFieldName("mlcategory"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + ac.setCategorizationFieldName(null); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, ac::build); + assertThat(e.getMessage(), equalTo("categorization_field_name must be set for mlcategory to be available")); + } + + public void testBuild_GivenMlCategoryUsedAsPartitionFieldButNoCategorizationFieldName() { + Detector.Builder detector = new Detector.Builder(); + detector.setFunction("count"); + detector.setPartitionFieldName("mlcategory"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + ac.setCategorizationFieldName(null); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, ac::build); + assertThat(e.getMessage(), equalTo("categorization_field_name must be set for mlcategory to be available")); + } + + public void testBuild_GivenCategorizationFieldNameButNoUseOfMlCategory() { + Detector.Builder detector = new Detector.Builder(); + detector.setFunction("count"); + detector.setOverFieldName("foo"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + ac.setCategorizationFieldName("msg"); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, ac::build); + assertThat(e.getMessage(), equalTo("categorization_field_name is set but mlcategory is " + + "not used in any detector by/over/partition field")); + } + + public void testBuild_GivenMlCategoryUsedAsByFieldAndCategorizationFieldName() { + Detector.Builder detector = new Detector.Builder(); + detector.setFunction("count"); + detector.setOverFieldName("mlcategory"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + ac.setCategorizationFieldName("msg"); + ac.build(); + } + + public void testBuild_GivenNestedFieldOverlapsNonNested() { + Detector.Builder detector1 = new Detector.Builder(); + detector1.setFunction("count"); + detector1.setByFieldName("a"); + Detector.Builder detector2 = new Detector.Builder(); + detector2.setFunction("count"); + detector2.setPartitionFieldName("a.b"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Arrays.asList(detector1.build(), detector2.build())); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, ac::build); + assertThat(e.getMessage(), equalTo("Fields [a] and [a.b] cannot both be used in the same analysis_config")); + } + + public void testBuild_GivenOverlappingNestedFields() { + Detector.Builder detector = new Detector.Builder(); + detector.setFunction("count"); + detector.setByFieldName("a.b.c"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + ac.setInfluencers(Arrays.asList("a.b", "d")); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, ac::build); + assertThat(e.getMessage(), equalTo("Fields [a.b] and [a.b.c] cannot both be used in the same analysis_config")); + } + + public void testBuild_GivenNonOverlappingNestedFields() { + Detector.Builder detector = new Detector.Builder(); + detector.setFunction("count"); + detector.setByFieldName("a.b.c"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + ac.setInfluencers(Arrays.asList("a.b.c", "a.b.d")); + + ac.build(); + } + + public void testEquals_GivenSameReference() { + AnalysisConfig config = createFullyPopulatedNonRandomConfig(); + assertTrue(config.equals(config)); + } + + public void testEquals_GivenDifferentClass() { + assertFalse(createFullyPopulatedNonRandomConfig().equals("a string")); + } + + public void testEquals_GivenNull() { + assertFalse(createFullyPopulatedNonRandomConfig().equals(null)); + } + + public void testEquals_GivenEqualConfig() { + AnalysisConfig config1 = createFullyPopulatedNonRandomConfig(); + AnalysisConfig config2 = createFullyPopulatedNonRandomConfig(); + + assertTrue(config1.equals(config2)); + assertTrue(config2.equals(config1)); + assertEquals(config1.hashCode(), config2.hashCode()); + } + + public void testEquals_GivenDifferentBucketSpan() { + AnalysisConfig.Builder builder = createConfigBuilder(); + builder.setBucketSpan(TimeValue.timeValueSeconds(1800)); + AnalysisConfig config1 = builder.build(); + + builder = createConfigBuilder(); + builder.setBucketSpan(TimeValue.timeValueHours(1)); + AnalysisConfig config2 = builder.build(); + + assertFalse(config1.equals(config2)); + assertFalse(config2.equals(config1)); + } + + public void testEquals_GivenCategorizationField() { + AnalysisConfig.Builder builder = createValidCategorizationConfig(); + builder.setCategorizationFieldName("foo"); + AnalysisConfig config1 = builder.build(); + + builder = createValidCategorizationConfig(); + builder.setCategorizationFieldName("bar"); + AnalysisConfig config2 = builder.build(); + + assertFalse(config1.equals(config2)); + assertFalse(config2.equals(config1)); + } + + public void testEquals_GivenDifferentDetector() { + AnalysisConfig config1 = createConfigWithDetectors(Collections.singletonList(new Detector.Builder("min", "low_count").build())); + + AnalysisConfig config2 = createConfigWithDetectors(Collections.singletonList(new Detector.Builder("min", "high_count").build())); + + assertFalse(config1.equals(config2)); + assertFalse(config2.equals(config1)); + } + + public void testEquals_GivenDifferentInfluencers() { + AnalysisConfig.Builder builder = createConfigBuilder(); + builder.setInfluencers(Collections.singletonList("foo")); + AnalysisConfig config1 = builder.build(); + + builder = createConfigBuilder(); + builder.setInfluencers(Collections.singletonList("bar")); + AnalysisConfig config2 = builder.build(); + + assertFalse(config1.equals(config2)); + assertFalse(config2.equals(config1)); + } + + public void testEquals_GivenDifferentLatency() { + AnalysisConfig.Builder builder = createConfigBuilder(); + builder.setLatency(TimeValue.timeValueSeconds(1800)); + AnalysisConfig config1 = builder.build(); + + builder = createConfigBuilder(); + builder.setLatency(TimeValue.timeValueSeconds(1801)); + AnalysisConfig config2 = builder.build(); + + assertFalse(config1.equals(config2)); + assertFalse(config2.equals(config1)); + } + + public void testEquals_GivenSummaryCountField() { + AnalysisConfig.Builder builder = createConfigBuilder(); + builder.setSummaryCountFieldName("foo"); + AnalysisConfig config1 = builder.build(); + + builder = createConfigBuilder(); + builder.setSummaryCountFieldName("bar"); + AnalysisConfig config2 = builder.build(); + + assertFalse(config1.equals(config2)); + assertFalse(config2.equals(config1)); + } + + public void testEquals_GivenMultivariateByField() { + AnalysisConfig.Builder builder = createConfigBuilder(); + builder.setMultivariateByFields(true); + AnalysisConfig config1 = builder.build(); + + builder = createConfigBuilder(); + builder.setMultivariateByFields(false); + AnalysisConfig config2 = builder.build(); + + assertFalse(config1.equals(config2)); + assertFalse(config2.equals(config1)); + } + + public void testEquals_GivenDifferentCategorizationFilters() { + AnalysisConfig.Builder configBuilder1 = createValidCategorizationConfig(); + AnalysisConfig.Builder configBuilder2 = createValidCategorizationConfig(); + configBuilder1.setCategorizationFilters(Arrays.asList("foo", "bar")); + configBuilder2.setCategorizationFilters(Arrays.asList("foo", "foobar")); + AnalysisConfig config1 = configBuilder1.build(); + AnalysisConfig config2 = configBuilder2.build(); + + assertFalse(config1.equals(config2)); + assertFalse(config2.equals(config1)); + } + + public void testExtractReferencedLists() { + DetectionRule rule1 = new DetectionRule.Builder(Collections.singletonList(RuleCondition.createCategorical("foo", + "filter1"))).build(); + DetectionRule rule2 = new DetectionRule.Builder(Collections.singletonList(RuleCondition.createCategorical("foo", + "filter2"))).build(); + Detector.Builder detector1 = new Detector.Builder("count", null); + detector1.setByFieldName("foo"); + detector1.setRules(Collections.singletonList(rule1)); + Detector.Builder detector2 = new Detector.Builder("count", null); + detector2.setRules(Collections.singletonList(rule2)); + detector2.setByFieldName("foo"); + AnalysisConfig config = new AnalysisConfig.Builder( + Arrays.asList(detector1.build(), detector2.build(), new Detector.Builder("count", null).build())).build(); + + assertEquals(new HashSet<>(Arrays.asList("filter1", "filter2")), config.extractReferencedFilters()); + } + + private static AnalysisConfig createFullyPopulatedNonRandomConfig() { + Detector.Builder detector = new Detector.Builder("min", "count"); + detector.setOverFieldName("mlcategory"); + AnalysisConfig.Builder builder = new AnalysisConfig.Builder( + Collections.singletonList(detector.build())); + builder.setBucketSpan(TimeValue.timeValueHours(1)); + builder.setCategorizationFieldName("cat"); + builder.setCategorizationAnalyzerConfig( + CategorizationAnalyzerConfig.buildDefaultCategorizationAnalyzer(Collections.singletonList("foo"))); + builder.setInfluencers(Collections.singletonList("myInfluencer")); + builder.setLatency(TimeValue.timeValueSeconds(3600)); + builder.setSummaryCountFieldName("sumCount"); + return builder.build(); + } + + private static AnalysisConfig createConfigWithDetectors(List detectors) { + return new AnalysisConfig.Builder(detectors).build(); + } + + private static AnalysisConfig.Builder createConfigBuilder() { + return new AnalysisConfig.Builder(Collections.singletonList(new Detector.Builder("min", "count").build())); + } + + public void testVerify_GivenNegativeBucketSpan() { + AnalysisConfig.Builder config = createValidConfig(); + config.setBucketSpan(TimeValue.timeValueSeconds(-1)); + + IllegalArgumentException e = ESTestCase.expectThrows(IllegalArgumentException.class, config::build); + + assertEquals("bucket_span cannot be less or equal than 0. Value = -1", e.getMessage()); + } + + public void testVerify_GivenNegativeLatency() { + AnalysisConfig.Builder analysisConfig = createValidConfig(); + analysisConfig.setLatency(TimeValue.timeValueSeconds(-1)); + + IllegalArgumentException e = ESTestCase.expectThrows(IllegalArgumentException.class, analysisConfig::build); + + assertEquals("latency cannot be less than 0. Value = -1", e.getMessage()); + } + + public void testVerify_GivenDefaultConfig_ShouldBeInvalidDueToNoDetectors() { + AnalysisConfig.Builder analysisConfig = createValidConfig(); + analysisConfig.setDetectors(null); + + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, analysisConfig::build); + + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_NO_DETECTORS), e.getMessage()); + } + + public void testVerify_GivenValidConfig() { + AnalysisConfig.Builder analysisConfig = createValidConfig(); + analysisConfig.build(); + } + + public void testVerify_GivenValidConfigWithCategorizationFieldNameAndCategorizationFilters() { + AnalysisConfig.Builder analysisConfig = createValidCategorizationConfig(); + analysisConfig.setCategorizationFilters(Arrays.asList("foo", "bar")); + + analysisConfig.build(); + } + + public void testVerify_GivenValidConfigWithCategorizationFieldNameAndCategorizationAnalyzerConfig() { + AnalysisConfig.Builder analysisConfig = createValidCategorizationConfig(); + analysisConfig.setCategorizationAnalyzerConfig( + CategorizationAnalyzerConfig.buildDefaultCategorizationAnalyzer(Arrays.asList("foo", "bar"))); + + analysisConfig.build(); + } + + public void testVerify_GivenBothCategorizationFiltersAndCategorizationAnalyzerConfig() { + AnalysisConfig.Builder analysisConfig = createValidCategorizationConfig(); + analysisConfig.setCategorizationFilters(Arrays.asList("foo", "bar")); + analysisConfig.setCategorizationAnalyzerConfig( + CategorizationAnalyzerConfig.buildDefaultCategorizationAnalyzer(Collections.singletonList("baz"))); + + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, analysisConfig::build); + + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_INCOMPATIBLE_WITH_CATEGORIZATION_ANALYZER), + e.getMessage()); + } + + public void testVerify_GivenFieldIsControlField() { + AnalysisConfig.Builder analysisConfig = createValidConfig(); + if (randomBoolean()) { + analysisConfig.setSummaryCountFieldName(RecordWriter.CONTROL_FIELD_NAME); + } else { + analysisConfig.setCategorizationFieldName(RecordWriter.CONTROL_FIELD_NAME); + } + + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, analysisConfig::build); + + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_INVALID_FIELDNAME, RecordWriter.CONTROL_FIELD_NAME, + RecordWriter.CONTROL_FIELD_NAME), e.getMessage()); + } + + public void testVerify_OverlappingBuckets() { + List detectors; + Detector detector; + + boolean onByDefault = false; + + // Uncomment this when overlappingBuckets turned on by default + if (onByDefault) { + // Test overlappingBuckets unset + AnalysisConfig.Builder analysisConfig = createValidConfig(); + analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(5000L)); + detectors = new ArrayList<>(); + detector = new Detector.Builder("count", null).build(); + detectors.add(detector); + detector = new Detector.Builder("mean", "value").build(); + detectors.add(detector); + analysisConfig.setDetectors(detectors); + AnalysisConfig ac = analysisConfig.build(); + assertTrue(ac.getOverlappingBuckets()); + + // Test overlappingBuckets unset + analysisConfig = createValidConfig(); + analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(5000L)); + detectors = new ArrayList<>(); + detector = new Detector.Builder("count", null).build(); + detectors.add(detector); + detector = new Detector.Builder("rare", "value").build(); + detectors.add(detector); + analysisConfig.setDetectors(detectors); + ac = analysisConfig.build(); + assertFalse(ac.getOverlappingBuckets()); + + // Test overlappingBuckets unset + analysisConfig = createValidConfig(); + analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(5000L)); + detectors = new ArrayList<>(); + detector = new Detector.Builder("count", null).build(); + detectors.add(detector); + detector = new Detector.Builder("min", "value").build(); + detectors.add(detector); + detector = new Detector.Builder("max", "value").build(); + detectors.add(detector); + analysisConfig.setDetectors(detectors); + ac = analysisConfig.build(); + assertFalse(ac.getOverlappingBuckets()); + } + + // Test overlappingBuckets set + AnalysisConfig.Builder analysisConfig = createValidConfig(); + analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(5000L)); + detectors = new ArrayList<>(); + detector = new Detector.Builder("count", null).build(); + detectors.add(detector); + Detector.Builder builder = new Detector.Builder("rare", null); + builder.setByFieldName("value"); + detectors.add(builder.build()); + analysisConfig.setOverlappingBuckets(false); + analysisConfig.setDetectors(detectors); + assertFalse(analysisConfig.build().getOverlappingBuckets()); + + // Test overlappingBuckets set + analysisConfig = createValidConfig(); + analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(5000L)); + analysisConfig.setOverlappingBuckets(true); + detectors = new ArrayList<>(); + detector = new Detector.Builder("count", null).build(); + detectors.add(detector); + builder = new Detector.Builder("rare", null); + builder.setByFieldName("value"); + detectors.add(builder.build()); + analysisConfig.setDetectors(detectors); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, analysisConfig::build); + assertEquals("Overlapping buckets cannot be used with function '[rare]'", e.getMessage()); + + // Test overlappingBuckets set + analysisConfig = createValidConfig(); + analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(5000L)); + analysisConfig.setOverlappingBuckets(false); + detectors = new ArrayList<>(); + detector = new Detector.Builder("count", null).build(); + detectors.add(detector); + detector = new Detector.Builder("mean", "value").build(); + detectors.add(detector); + analysisConfig.setDetectors(detectors); + AnalysisConfig ac = analysisConfig.build(); + assertFalse(ac.getOverlappingBuckets()); + } + + public void testVerify_GivenMetricAndSummaryCountField() { + Detector d = new Detector.Builder("metric", "my_metric").build(); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(d)); + ac.setSummaryCountFieldName("my_summary_count"); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, ac::build); + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_FUNCTION_INCOMPATIBLE_PRESUMMARIZED, DetectorFunction.METRIC), e.getMessage()); + } + + public void testMultipleBucketsConfig() { + AnalysisConfig.Builder ac = createValidConfig(); + ac.setMultipleBucketSpans(Arrays.asList( + TimeValue.timeValueSeconds(10L), + TimeValue.timeValueSeconds(15L), + TimeValue.timeValueSeconds(20L), + TimeValue.timeValueSeconds(25L), + TimeValue.timeValueSeconds(30L), + TimeValue.timeValueSeconds(35L))); + List detectors = new ArrayList<>(); + Detector detector = new Detector.Builder("count", null).build(); + detectors.add(detector); + ac.setDetectors(detectors); + + ac.setBucketSpan(TimeValue.timeValueSeconds(4L)); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, ac::build); + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_MULTIPLE_BUCKETSPANS_MUST_BE_MULTIPLE, "10s", "4s"), e.getMessage()); + + ac.setBucketSpan(TimeValue.timeValueSeconds(5L)); + ac.build(); + + AnalysisConfig.Builder ac2 = createValidConfig(); + ac2.setBucketSpan(TimeValue.timeValueSeconds(5L)); + ac2.setDetectors(detectors); + ac2.setMultipleBucketSpans(Arrays.asList( + TimeValue.timeValueSeconds(10L), + TimeValue.timeValueSeconds(15L), + TimeValue.timeValueSeconds(20L), + TimeValue.timeValueSeconds(25L), + TimeValue.timeValueSeconds(30L))); + assertFalse(ac.equals(ac2)); + ac2.setMultipleBucketSpans(Arrays.asList( + TimeValue.timeValueSeconds(10L), + TimeValue.timeValueSeconds(15L), + TimeValue.timeValueSeconds(20L), + TimeValue.timeValueSeconds(25L), + TimeValue.timeValueSeconds(30L), + TimeValue.timeValueSeconds(35L))); + + ac.setBucketSpan(TimeValue.timeValueSeconds(222L)); + ac.setMultipleBucketSpans(Collections.emptyList()); + ac.build(); + + ac.setMultipleBucketSpans(Collections.singletonList(TimeValue.timeValueSeconds(222L))); + e = ESTestCase.expectThrows(ElasticsearchException.class, ac::build); + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_MULTIPLE_BUCKETSPANS_MUST_BE_MULTIPLE, "3.7m", "3.7m"), e.getMessage()); + + ac.setMultipleBucketSpans(Arrays.asList(TimeValue.timeValueSeconds(-444L), TimeValue.timeValueSeconds(-888L))); + e = ESTestCase.expectThrows(ElasticsearchException.class, ac::build); + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_MULTIPLE_BUCKETSPANS_MUST_BE_MULTIPLE, -444, "3.7m"), e.getMessage()); + } + + public void testVerify_GivenCategorizationFiltersButNoCategorizationFieldName() { + AnalysisConfig.Builder config = createValidConfig(); + config.setCategorizationFilters(Collections.singletonList("foo")); + + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, config::build); + + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_REQUIRE_CATEGORIZATION_FIELD_NAME), e.getMessage()); + } + + public void testVerify_GivenDuplicateCategorizationFilters() { + AnalysisConfig.Builder config = createValidCategorizationConfig(); + config.setCategorizationFilters(Arrays.asList("foo", "bar", "foo")); + + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, config::build); + + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_DUPLICATES), e.getMessage()); + } + + public void testVerify_GivenEmptyCategorizationFilter() { + AnalysisConfig.Builder config = createValidCategorizationConfig(); + config.setCategorizationFilters(Arrays.asList("foo", "")); + + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, config::build); + + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_EMPTY), e.getMessage()); + } + + + public void testCheckDetectorsHavePartitionFields() { + AnalysisConfig.Builder config = createValidConfig(); + config.setUsePerPartitionNormalization(true); + + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, config::build); + + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_PER_PARTITION_NORMALIZATION_REQUIRES_PARTITION_FIELD), e.getMessage()); + } + + public void testCheckDetectorsHavePartitionFields_doesntThrowWhenValid() { + AnalysisConfig.Builder config = createValidConfig(); + Detector.Builder builder = new Detector.Builder(config.build().getDetectors().get(0)); + builder.setPartitionFieldName("pField"); + config.build().getDetectors().set(0, builder.build()); + config.setUsePerPartitionNormalization(true); + + config.build(); + } + + public void testCheckNoInfluencersAreSet() { + + AnalysisConfig.Builder config = createValidConfig(); + Detector.Builder builder = new Detector.Builder(config.build().getDetectors().get(0)); + builder.setPartitionFieldName("pField"); + config.build().getDetectors().set(0, builder.build()); + config.setInfluencers(Arrays.asList("inf1", "inf2")); + config.setUsePerPartitionNormalization(true); + + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, config::build); + + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_PER_PARTITION_NORMALIZATION_CANNOT_USE_INFLUENCERS), e.getMessage()); + } + + public void testVerify_GivenCategorizationFiltersContainInvalidRegex() { + AnalysisConfig.Builder config = createValidCategorizationConfig(); + config.setCategorizationFilters(Arrays.asList("foo", "(")); + + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, config::build); + + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_CATEGORIZATION_FILTERS_CONTAINS_INVALID_REGEX, "("), e.getMessage()); + } + + private static AnalysisConfig.Builder createValidConfig() { + List detectors = new ArrayList<>(); + Detector detector = new Detector.Builder("count", null).build(); + detectors.add(detector); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(detectors); + analysisConfig.setBucketSpan(TimeValue.timeValueHours(1)); + analysisConfig.setLatency(TimeValue.ZERO); + return analysisConfig; + } + + private static AnalysisConfig.Builder createValidCategorizationConfig() { + Detector.Builder detector = new Detector.Builder("count", null); + detector.setByFieldName("mlcategory"); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(TimeValue.timeValueHours(1)); + analysisConfig.setLatency(TimeValue.ZERO); + analysisConfig.setCategorizationFieldName("msg"); + return analysisConfig; + } + + @Override + protected AnalysisConfig mutateInstance(AnalysisConfig instance) { + AnalysisConfig.Builder builder = new AnalysisConfig.Builder(instance); + switch (between(0, 12)) { + case 0: + List detectors = new ArrayList<>(instance.getDetectors()); + Detector.Builder detector = new Detector.Builder(); + detector.setFunction("mean"); + detector.setFieldName(randomAlphaOfLengthBetween(10, 20)); + detectors.add(detector.build()); + builder.setDetectors(detectors); + break; + case 1: + builder.setBucketSpan(new TimeValue(instance.getBucketSpan().millis() + (between(1, 1000) * 1000))); + builder.setMultipleBucketSpans(Collections.emptyList()); + break; + case 2: + if (instance.getLatency() == null) { + builder.setLatency(new TimeValue(between(1, 1000) * 1000)); + } else { + builder.setLatency(new TimeValue(instance.getLatency().millis() + (between(1, 1000) * 1000))); + } + break; + case 3: + if (instance.getCategorizationFieldName() == null) { + String categorizationFieldName = instance.getCategorizationFieldName() + randomAlphaOfLengthBetween(1, 10); + builder.setCategorizationFieldName(categorizationFieldName); + List newDetectors = new ArrayList<>(instance.getDetectors()); + Detector.Builder catDetector = new Detector.Builder(); + catDetector.setFunction("mean"); + catDetector.setFieldName(randomAlphaOfLengthBetween(10, 20)); + catDetector.setPartitionFieldName("mlcategory"); + newDetectors.add(catDetector.build()); + builder.setDetectors(newDetectors); + } else { + builder.setCategorizationFieldName(instance.getCategorizationFieldName() + randomAlphaOfLengthBetween(1, 10)); + } + break; + case 4: + List filters; + if (instance.getCategorizationFilters() == null) { + filters = new ArrayList<>(); + } else { + filters = new ArrayList<>(instance.getCategorizationFilters()); + } + filters.add(randomAlphaOfLengthBetween(1, 20)); + builder.setCategorizationFilters(filters); + builder.setCategorizationAnalyzerConfig(null); + if (instance.getCategorizationFieldName() == null) { + builder.setCategorizationFieldName(randomAlphaOfLengthBetween(1, 10)); + List newDetectors = new ArrayList<>(instance.getDetectors()); + Detector.Builder catDetector = new Detector.Builder(); + catDetector.setFunction("mean"); + catDetector.setFieldName(randomAlphaOfLengthBetween(10, 20)); + catDetector.setPartitionFieldName("mlcategory"); + newDetectors.add(catDetector.build()); + builder.setDetectors(newDetectors); + } + break; + case 5: + builder.setCategorizationFilters(null); + builder.setCategorizationAnalyzerConfig(CategorizationAnalyzerConfig.buildDefaultCategorizationAnalyzer( + Collections.singletonList(randomAlphaOfLengthBetween(1, 20)))); + if (instance.getCategorizationFieldName() == null) { + builder.setCategorizationFieldName(randomAlphaOfLengthBetween(1, 10)); + List newDetectors = new ArrayList<>(instance.getDetectors()); + Detector.Builder catDetector = new Detector.Builder(); + catDetector.setFunction("count"); + catDetector.setByFieldName("mlcategory"); + newDetectors.add(catDetector.build()); + builder.setDetectors(newDetectors); + } + break; + case 6: + builder.setSummaryCountFieldName(instance.getSummaryCountFieldName() + randomAlphaOfLengthBetween(1, 5)); + break; + case 7: + List influencers = new ArrayList<>(instance.getInfluencers()); + influencers.add(randomAlphaOfLengthBetween(5, 10)); + builder.setInfluencers(influencers); + builder.setUsePerPartitionNormalization(false); + break; + case 8: + if (instance.getOverlappingBuckets() == null) { + builder.setOverlappingBuckets(randomBoolean()); + } else { + builder.setOverlappingBuckets(instance.getOverlappingBuckets() == false); + } + break; + case 9: + if (instance.getResultFinalizationWindow() == null) { + builder.setResultFinalizationWindow(between(1, 100) * 1000L); + } else { + builder.setResultFinalizationWindow(instance.getResultFinalizationWindow() + (between(1, 100) * 1000)); + } + break; + case 10: + if (instance.getMultivariateByFields() == null) { + builder.setMultivariateByFields(randomBoolean()); + } else { + builder.setMultivariateByFields(instance.getMultivariateByFields() == false); + } + break; + case 11: + List multipleBucketSpans; + if (instance.getMultipleBucketSpans() == null) { + multipleBucketSpans = new ArrayList<>(); + } else { + multipleBucketSpans = new ArrayList<>(instance.getMultipleBucketSpans()); + } + multipleBucketSpans.add(new TimeValue(between(2, 10) * instance.getBucketSpan().millis())); + builder.setMultipleBucketSpans(multipleBucketSpans); + break; + case 12: + boolean usePerPartitionNormalization = instance.getUsePerPartitionNormalization() == false; + builder.setUsePerPartitionNormalization(usePerPartitionNormalization); + if (usePerPartitionNormalization) { + builder.setInfluencers(Collections.emptyList()); + } + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return builder.build(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisLimitsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisLimitsTests.java new file mode 100644 index 0000000000000..f751556fefa60 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisLimitsTests.java @@ -0,0 +1,202 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class AnalysisLimitsTests extends AbstractSerializingTestCase { + + @Override + protected AnalysisLimits createTestInstance() { + return createRandomized(); + } + + public static AnalysisLimits createRandomized() { + return new AnalysisLimits(randomBoolean() ? (long) randomIntBetween(1, 1000000) : null, + randomBoolean() ? randomNonNegativeLong() : null); + } + + @Override + protected Writeable.Reader instanceReader() { + return AnalysisLimits::new; + } + + @Override + protected AnalysisLimits doParseInstance(XContentParser parser) { + return AnalysisLimits.CONFIG_PARSER.apply(parser, null); + } + + public void testParseModelMemoryLimitGivenNegativeNumber() throws IOException { + String json = "{\"model_memory_limit\": -1}"; + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null)); + assertThat(ExceptionsHelper.detailedMessage(e), containsString("model_memory_limit must be at least 1 MiB. Value = -1")); + } + + public void testParseModelMemoryLimitGivenZero() throws IOException { + String json = "{\"model_memory_limit\": 0}"; + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null)); + assertThat(ExceptionsHelper.detailedMessage(e), containsString("model_memory_limit must be at least 1 MiB. Value = 0")); + } + + public void testParseModelMemoryLimitGivenPositiveNumber() throws IOException { + String json = "{\"model_memory_limit\": 2048}"; + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + + AnalysisLimits limits = AnalysisLimits.CONFIG_PARSER.apply(parser, null); + + assertThat(limits.getModelMemoryLimit(), equalTo(2048L)); + } + + public void testParseModelMemoryLimitGivenNegativeString() throws IOException { + String json = "{\"model_memory_limit\":\"-4MB\"}"; + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null)); + assertThat(ExceptionsHelper.detailedMessage(e), containsString("Values less than -1 bytes are not supported: -4mb")); + } + + public void testParseModelMemoryLimitGivenZeroString() throws IOException { + String json = "{\"model_memory_limit\":\"0MB\"}"; + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null)); + assertThat(ExceptionsHelper.detailedMessage(e), containsString("model_memory_limit must be at least 1 MiB. Value = 0")); + } + + public void testParseModelMemoryLimitGivenLessThanOneMBString() throws IOException { + String json = "{\"model_memory_limit\":\"1000Kb\"}"; + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + XContentParseException e = expectThrows(XContentParseException.class, () -> AnalysisLimits.CONFIG_PARSER.apply(parser, null)); + assertThat(ExceptionsHelper.detailedMessage(e), containsString("model_memory_limit must be at least 1 MiB. Value = 0")); + } + + public void testParseModelMemoryLimitGivenStringMultipleOfMBs() throws IOException { + String json = "{\"model_memory_limit\":\"4g\"}"; + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + + AnalysisLimits limits = AnalysisLimits.CONFIG_PARSER.apply(parser, null); + + assertThat(limits.getModelMemoryLimit(), equalTo(4096L)); + } + + public void testParseModelMemoryLimitGivenStringNonMultipleOfMBs() throws IOException { + String json = "{\"model_memory_limit\":\"1300kb\"}"; + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + + AnalysisLimits limits = AnalysisLimits.CONFIG_PARSER.apply(parser, null); + + assertThat(limits.getModelMemoryLimit(), equalTo(1L)); + } + + public void testModelMemoryDefault() { + AnalysisLimits limits = new AnalysisLimits(randomNonNegativeLong()); + assertThat(limits.getModelMemoryLimit(), equalTo(AnalysisLimits.DEFAULT_MODEL_MEMORY_LIMIT_MB)); + } + + public void testEquals_GivenEqual() { + AnalysisLimits analysisLimits1 = new AnalysisLimits(10L, 20L); + AnalysisLimits analysisLimits2 = new AnalysisLimits(10L, 20L); + + assertTrue(analysisLimits1.equals(analysisLimits1)); + assertTrue(analysisLimits1.equals(analysisLimits2)); + assertTrue(analysisLimits2.equals(analysisLimits1)); + } + + + public void testEquals_GivenDifferentModelMemoryLimit() { + AnalysisLimits analysisLimits1 = new AnalysisLimits(10L, 20L); + AnalysisLimits analysisLimits2 = new AnalysisLimits(11L, 20L); + + assertFalse(analysisLimits1.equals(analysisLimits2)); + assertFalse(analysisLimits2.equals(analysisLimits1)); + } + + + public void testEquals_GivenDifferentCategorizationExamplesLimit() { + AnalysisLimits analysisLimits1 = new AnalysisLimits(10L, 20L); + AnalysisLimits analysisLimits2 = new AnalysisLimits(10L, 21L); + + assertFalse(analysisLimits1.equals(analysisLimits2)); + assertFalse(analysisLimits2.equals(analysisLimits1)); + } + + + public void testHashCode_GivenEqual() { + AnalysisLimits analysisLimits1 = new AnalysisLimits(5555L, 3L); + AnalysisLimits analysisLimits2 = new AnalysisLimits(5555L, 3L); + + assertEquals(analysisLimits1.hashCode(), analysisLimits2.hashCode()); + } + + public void testVerify_GivenNegativeCategorizationExamplesLimit() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> new AnalysisLimits(1L, -1L)); + String errorMessage = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, + AnalysisLimits.CATEGORIZATION_EXAMPLES_LIMIT, 0, -1L); + assertEquals(errorMessage, e.getMessage()); + } + + public void testVerify_GivenValid() { + new AnalysisLimits(null, 1L); + new AnalysisLimits(1L, null); + new AnalysisLimits(1L, 1L); + } + + protected AnalysisLimits mutateInstance(AnalysisLimits instance) throws IOException { + Long memoryModelLimit = instance.getModelMemoryLimit(); + Long categorizationExamplesLimit = instance.getCategorizationExamplesLimit(); + switch (between(0, 1)) { + case 0: + if (memoryModelLimit == null) { + memoryModelLimit = randomNonNegativeLong(); + } else { + if (randomBoolean()) { + memoryModelLimit = null; + } else { + memoryModelLimit += between(1, 10000); + } + } + break; + case 1: + if (categorizationExamplesLimit == null) { + categorizationExamplesLimit = randomNonNegativeLong(); + } else { + if (randomBoolean()) { + categorizationExamplesLimit = null; + } else { + categorizationExamplesLimit += between(1, 10000); + } + } + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new AnalysisLimits(memoryModelLimit, categorizationExamplesLimit); + }; +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DataDescriptionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DataDescriptionTests.java new file mode 100644 index 0000000000000..48f36823e3119 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DataDescriptionTests.java @@ -0,0 +1,321 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription.DataFormat; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; + +public class DataDescriptionTests extends AbstractSerializingTestCase { + + public void testDefault() { + DataDescription dataDescription = new DataDescription.Builder().build(); + assertThat(dataDescription.getFormat(), equalTo(DataFormat.XCONTENT)); + assertThat(dataDescription.getTimeField(), equalTo("time")); + assertThat(dataDescription.getTimeFormat(), equalTo("epoch_ms")); + assertThat(dataDescription.getFieldDelimiter(), is(nullValue())); + assertThat(dataDescription.getQuoteCharacter(), is(nullValue())); + } + + public void testDefaultDelimited() { + DataDescription.Builder dataDescriptionBuilder = new DataDescription.Builder(); + dataDescriptionBuilder.setFormat(DataFormat.DELIMITED); + DataDescription dataDescription = dataDescriptionBuilder.build(); + + assertThat(dataDescription.getFormat(), equalTo(DataFormat.DELIMITED)); + assertThat(dataDescription.getTimeField(), equalTo("time")); + assertThat(dataDescription.getTimeFormat(), equalTo("epoch_ms")); + assertThat(dataDescription.getFieldDelimiter(), is('\t')); + assertThat(dataDescription.getQuoteCharacter(), is('"')); + } + + public void testVerify_GivenValidFormat() { + DataDescription.Builder description = new DataDescription.Builder(); + description.setTimeFormat("epoch"); + description.setTimeFormat("epoch_ms"); + description.setTimeFormat("yyyy-MM-dd HH"); + String goodFormat = "yyyy.MM.dd G 'at' HH:mm:ss z"; + description.setTimeFormat(goodFormat); + } + + public void testVerify_GivenInValidFormat() { + DataDescription.Builder description = new DataDescription.Builder(); + expectThrows(IllegalArgumentException.class, () -> description.setTimeFormat(null)); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> description.setTimeFormat("invalid")); + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_INVALID_TIMEFORMAT, "invalid"), e.getMessage()); + + e = expectThrows(ElasticsearchException.class, () -> description.setTimeFormat("")); + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_INVALID_TIMEFORMAT, ""), e.getMessage()); + + e = expectThrows(ElasticsearchException.class, () -> description.setTimeFormat("y-M-dd")); + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_INVALID_TIMEFORMAT, "y-M-dd"), e.getMessage()); + expectThrows(ElasticsearchException.class, () -> description.setTimeFormat("YYY-mm-UU hh:mm:ssY")); + } + + public void testTransform_GivenDelimitedAndEpoch() { + DataDescription.Builder dd = new DataDescription.Builder(); + dd.setFormat(DataFormat.DELIMITED); + dd.setTimeFormat("epoch"); + assertFalse(dd.build().transform()); + } + + public void testTransform_GivenDelimitedAndEpochMs() { + DataDescription.Builder dd = new DataDescription.Builder(); + dd.setFormat(DataFormat.DELIMITED); + dd.setTimeFormat("epoch_ms"); + assertTrue(dd.build().transform()); + } + + public void testIsTransformTime_GivenTimeFormatIsEpoch() { + DataDescription.Builder dd = new DataDescription.Builder(); + dd.setTimeFormat("epoch"); + assertFalse(dd.build().isTransformTime()); + } + + public void testIsTransformTime_GivenTimeFormatIsEpochMs() { + DataDescription.Builder dd = new DataDescription.Builder(); + dd.setTimeFormat("epoch_ms"); + assertTrue(dd.build().isTransformTime()); + } + + public void testIsTransformTime_GivenTimeFormatPattern() { + DataDescription.Builder dd = new DataDescription.Builder(); + dd.setTimeFormat("yyyy-MM-dd HH:mm:ss.SSSZ"); + assertTrue(dd.build().isTransformTime()); + } + + public void testEquals_GivenDifferentDateFormat() { + DataDescription.Builder description1 = new DataDescription.Builder(); + description1.setFormat(DataFormat.XCONTENT); + description1.setQuoteCharacter('"'); + description1.setTimeField("timestamp"); + description1.setTimeFormat("epoch"); + description1.setFieldDelimiter(','); + + DataDescription.Builder description2 = new DataDescription.Builder(); + description2.setFormat(DataFormat.DELIMITED); + description2.setQuoteCharacter('"'); + description2.setTimeField("timestamp"); + description2.setTimeFormat("epoch"); + description2.setFieldDelimiter(','); + + assertFalse(description1.build().equals(description2.build())); + assertFalse(description2.build().equals(description1.build())); + } + + public void testEquals_GivenDifferentQuoteCharacter() { + DataDescription.Builder description1 = new DataDescription.Builder(); + description1.setFormat(DataFormat.XCONTENT); + description1.setQuoteCharacter('"'); + description1.setTimeField("timestamp"); + description1.setTimeFormat("epoch"); + description1.setFieldDelimiter(','); + + DataDescription.Builder description2 = new DataDescription.Builder(); + description2.setFormat(DataFormat.XCONTENT); + description2.setQuoteCharacter('\''); + description2.setTimeField("timestamp"); + description2.setTimeFormat("epoch"); + description2.setFieldDelimiter(','); + + assertFalse(description1.build().equals(description2.build())); + assertFalse(description2.build().equals(description1.build())); + } + + public void testEquals_GivenDifferentTimeField() { + DataDescription.Builder description1 = new DataDescription.Builder(); + description1.setFormat(DataFormat.XCONTENT); + description1.setQuoteCharacter('"'); + description1.setTimeField("timestamp"); + description1.setTimeFormat("epoch"); + description1.setFieldDelimiter(','); + + DataDescription.Builder description2 = new DataDescription.Builder(); + description2.setFormat(DataFormat.XCONTENT); + description2.setQuoteCharacter('"'); + description2.setTimeField("time"); + description2.setTimeFormat("epoch"); + description2.setFieldDelimiter(','); + + assertFalse(description1.build().equals(description2.build())); + assertFalse(description2.build().equals(description1.build())); + } + + public void testEquals_GivenDifferentTimeFormat() { + DataDescription.Builder description1 = new DataDescription.Builder(); + description1.setFormat(DataFormat.XCONTENT); + description1.setQuoteCharacter('"'); + description1.setTimeField("timestamp"); + description1.setTimeFormat("epoch"); + description1.setFieldDelimiter(','); + + DataDescription.Builder description2 = new DataDescription.Builder(); + description2.setFormat(DataFormat.XCONTENT); + description2.setQuoteCharacter('"'); + description2.setTimeField("timestamp"); + description2.setTimeFormat("epoch_ms"); + description2.setFieldDelimiter(','); + + assertFalse(description1.build().equals(description2.build())); + assertFalse(description2.build().equals(description1.build())); + } + + public void testEquals_GivenDifferentFieldDelimiter() { + DataDescription.Builder description1 = new DataDescription.Builder(); + description1.setFormat(DataFormat.XCONTENT); + description1.setQuoteCharacter('"'); + description1.setTimeField("timestamp"); + description1.setTimeFormat("epoch"); + description1.setFieldDelimiter(','); + + DataDescription.Builder description2 = new DataDescription.Builder(); + description2.setFormat(DataFormat.XCONTENT); + description2.setQuoteCharacter('"'); + description2.setTimeField("timestamp"); + description2.setTimeFormat("epoch"); + description2.setFieldDelimiter(';'); + + assertFalse(description1.build().equals(description2.build())); + assertFalse(description2.build().equals(description1.build())); + } + + public void testInvalidDataFormat() throws Exception { + BytesArray json = new BytesArray("{ \"format\":\"INEXISTENT_FORMAT\" }"); + XContentParser parser = JsonXContent.jsonXContent + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json.streamInput()); + XContentParseException ex = expectThrows(XContentParseException.class, + () -> DataDescription.CONFIG_PARSER.apply(parser, null)); + assertThat(ex.getMessage(), containsString("[data_description] failed to parse field [format]")); + Throwable cause = ex.getCause(); + assertNotNull(cause); + assertThat(cause, instanceOf(IllegalArgumentException.class)); + assertThat(cause.getMessage(), + containsString("No enum constant org.elasticsearch.xpack.core.ml.job.config.DataDescription.DataFormat.INEXISTENT_FORMAT")); + } + + public void testInvalidFieldDelimiter() throws Exception { + BytesArray json = new BytesArray("{ \"field_delimiter\":\",,\" }"); + XContentParser parser = JsonXContent.jsonXContent + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json.streamInput()); + XContentParseException ex = expectThrows(XContentParseException.class, + () -> DataDescription.CONFIG_PARSER.apply(parser, null)); + assertThat(ex.getMessage(), containsString("[data_description] failed to parse field [field_delimiter]")); + Throwable cause = ex.getCause(); + assertNotNull(cause); + assertThat(cause, instanceOf(IllegalArgumentException.class)); + assertThat(cause.getMessage(), + containsString("String must be a single character, found [,,]")); + } + + public void testInvalidQuoteCharacter() throws Exception { + BytesArray json = new BytesArray("{ \"quote_character\":\"''\" }"); + XContentParser parser = JsonXContent.jsonXContent + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json.streamInput()); + XContentParseException ex = expectThrows(XContentParseException.class, + () -> DataDescription.CONFIG_PARSER.apply(parser, null)); + assertThat(ex.getMessage(), containsString("[data_description] failed to parse field [quote_character]")); + Throwable cause = ex.getCause(); + assertNotNull(cause); + assertThat(cause, instanceOf(IllegalArgumentException.class)); + assertThat(cause.getMessage(), containsString("String must be a single character, found ['']")); + } + + @Override + protected DataDescription createTestInstance() { + DataDescription.Builder dataDescription = new DataDescription.Builder(); + if (randomBoolean()) { + dataDescription.setFormat(randomFrom(DataFormat.values())); + } + if (randomBoolean()) { + dataDescription.setTimeField(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + String format; + if (randomBoolean()) { + format = DataDescription.EPOCH; + } else if (randomBoolean()) { + format = DataDescription.EPOCH_MS; + } else { + format = "yyy.MM.dd G 'at' HH:mm:ss z"; + } + dataDescription.setTimeFormat(format); + } + if (randomBoolean()) { + dataDescription.setFieldDelimiter(randomAlphaOfLength(1).charAt(0)); + } + if (randomBoolean()) { + dataDescription.setQuoteCharacter(randomAlphaOfLength(1).charAt(0)); + } + return dataDescription.build(); + } + + @Override + protected Reader instanceReader() { + return DataDescription::new; + } + + @Override + protected DataDescription doParseInstance(XContentParser parser) { + return DataDescription.CONFIG_PARSER.apply(parser, null).build(); + } + + protected DataDescription mutateInstance(DataDescription instance) throws java.io.IOException { + DataFormat format = instance.getFormat(); + String timeField = instance.getTimeField(); + String timeFormat = instance.getTimeFormat(); + Character delimiter = instance.getFieldDelimiter(); + Character quoteChar = instance.getQuoteCharacter(); + switch (between(0, 4)) { + case 0: + if (format == DataFormat.DELIMITED) { + format = DataFormat.XCONTENT; + } else { + format = DataFormat.DELIMITED; + } + break; + case 1: + timeField += randomAlphaOfLengthBetween(1, 10); + break; + case 2: + timeFormat = "yyyy-MM-dd-HH-mm-ss"; + break; + case 3: + if (delimiter == null) { + delimiter = randomAlphaOfLength(1).charAt(0); + } else { + delimiter = null; + } + break; + case 4: + if (quoteChar == null) { + quoteChar = randomAlphaOfLength(1).charAt(0); + } else { + quoteChar = null; + } + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new DataDescription(format, timeField, timeFormat, delimiter, quoteChar); + }; +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRuleTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRuleTests.java new file mode 100644 index 0000000000000..3aaf99ab730f8 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRuleTests.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.List; + +public class DetectionRuleTests extends AbstractSerializingTestCase { + + public void testExtractReferencedLists() { + RuleCondition numericalCondition = + new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "field", "value", new Condition(Operator.GT, "5"), null); + List conditions = Arrays.asList( + numericalCondition, + RuleCondition.createCategorical("foo", "filter1"), + RuleCondition.createCategorical("bar", "filter2")); + + DetectionRule rule = new DetectionRule.Builder(conditions).build(); + + assertEquals(new HashSet<>(Arrays.asList("filter1", "filter2")), rule.extractReferencedFilters()); + } + + public void testEqualsGivenSameObject() { + DetectionRule rule = createFullyPopulated().build(); + assertTrue(rule.equals(rule)); + } + + public void testEqualsGivenString() { + assertFalse(createFullyPopulated().build().equals("a string")); + } + + public void testEqualsGivenDifferentTargetFieldName() { + DetectionRule rule1 = createFullyPopulated().build(); + DetectionRule rule2 = createFullyPopulated().setTargetFieldName("targetField2").build(); + assertFalse(rule1.equals(rule2)); + assertFalse(rule2.equals(rule1)); + } + + public void testEqualsGivenDifferentTargetFieldValue() { + DetectionRule rule1 = createFullyPopulated().build(); + DetectionRule rule2 = createFullyPopulated().setTargetFieldValue("targetValue2").build(); + assertFalse(rule1.equals(rule2)); + assertFalse(rule2.equals(rule1)); + } + + public void testEqualsGivenDifferentConnective() { + DetectionRule rule1 = createFullyPopulated().build(); + DetectionRule rule2 = createFullyPopulated().setConditionsConnective(Connective.OR).build(); + assertFalse(rule1.equals(rule2)); + assertFalse(rule2.equals(rule1)); + } + + public void testEqualsGivenRules() { + DetectionRule rule1 = createFullyPopulated().build(); + DetectionRule rule2 = createFullyPopulated().setConditions(createRule("10")).build(); + assertFalse(rule1.equals(rule2)); + assertFalse(rule2.equals(rule1)); + } + + public void testEqualsGivenEqual() { + DetectionRule rule1 = createFullyPopulated().build(); + DetectionRule rule2 = createFullyPopulated().build(); + assertTrue(rule1.equals(rule2)); + assertTrue(rule2.equals(rule1)); + assertEquals(rule1.hashCode(), rule2.hashCode()); + } + + private static DetectionRule.Builder createFullyPopulated() { + return new DetectionRule.Builder(createRule("5")) + .setActions(EnumSet.of(RuleAction.FILTER_RESULTS, RuleAction.SKIP_SAMPLING)) + .setTargetFieldName("targetField") + .setTargetFieldValue("targetValue") + .setConditionsConnective(Connective.AND); + } + + private static List createRule(String value) { + Condition condition = new Condition(Operator.GT, value); + return Collections.singletonList(new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, condition, null)); + } + + @Override + protected DetectionRule createTestInstance() { + int size = 1 + randomInt(20); + List ruleConditions = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + // no need for random condition (it is already tested) + ruleConditions.addAll(createRule(Double.toString(randomDouble()))); + } + DetectionRule.Builder builder = new DetectionRule.Builder(ruleConditions); + + if (randomBoolean()) { + EnumSet actions = EnumSet.noneOf(RuleAction.class); + int actionsCount = randomIntBetween(1, RuleAction.values().length); + for (int i = 0; i < actionsCount; ++i) { + actions.add(randomFrom(RuleAction.values())); + } + builder.setActions(actions); + } + + if (randomBoolean()) { + builder.setConditionsConnective(randomFrom(Connective.values())); + } + + if (randomBoolean()) { + builder.setTargetFieldName(randomAlphaOfLengthBetween(1, 20)); + builder.setTargetFieldValue(randomAlphaOfLengthBetween(1, 20)); + } + + return builder.build(); + } + + @Override + protected Reader instanceReader() { + return DetectionRule::new; + } + + @Override + protected DetectionRule doParseInstance(XContentParser parser) { + return DetectionRule.CONFIG_PARSER.apply(parser, null).build(); + } + + @Override + protected DetectionRule mutateInstance(DetectionRule instance) throws IOException { + List conditions = instance.getConditions(); + EnumSet actions = instance.getActions(); + String targetFieldName = instance.getTargetFieldName(); + String targetFieldValue = instance.getTargetFieldValue(); + Connective connective = instance.getConditionsConnective(); + + switch (between(0, 3)) { + case 0: + conditions = new ArrayList<>(conditions); + conditions.addAll(createRule(Double.toString(randomDouble()))); + break; + case 1: + targetFieldName = randomAlphaOfLengthBetween(5, 10); + break; + case 2: + targetFieldValue = randomAlphaOfLengthBetween(5, 10); + if (targetFieldName == null) { + targetFieldName = randomAlphaOfLengthBetween(5, 10); + } + break; + case 3: + if (connective == Connective.AND) { + connective = Connective.OR; + } else { + connective = Connective.AND; + } + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + + return new DetectionRule.Builder(conditions).setActions(actions).setTargetFieldName(targetFieldName) + .setTargetFieldValue(targetFieldValue).setConditionsConnective(connective).build(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java new file mode 100644 index 0000000000000..1296928d68478 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java @@ -0,0 +1,609 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class DetectorTests extends AbstractSerializingTestCase { + + public void testEquals_GivenEqual() { + Detector.Builder builder = new Detector.Builder("mean", "field"); + builder.setByFieldName("by_field"); + builder.setOverFieldName("over_field"); + builder.setPartitionFieldName("partition"); + builder.setUseNull(false); + Detector detector1 = builder.build(); + + builder = new Detector.Builder("mean", "field"); + builder.setByFieldName("by_field"); + builder.setOverFieldName("over_field"); + builder.setPartitionFieldName("partition"); + builder.setUseNull(false); + Detector detector2 = builder.build(); + + assertTrue(detector1.equals(detector2)); + assertTrue(detector2.equals(detector1)); + assertEquals(detector1.hashCode(), detector2.hashCode()); + } + + public void testEquals_GivenDifferentDetectorDescription() { + Detector detector1 = createDetector().build(); + Detector.Builder builder = createDetector(); + builder.setDetectorDescription("bar"); + Detector detector2 = builder.build(); + + assertFalse(detector1.equals(detector2)); + } + + public void testEquals_GivenDifferentByFieldName() { + Detector detector1 = createDetector().build(); + Detector detector2 = createDetector().build(); + + assertEquals(detector1, detector2); + + Detector.Builder builder = new Detector.Builder(detector2); + builder.setByFieldName("by2"); + Condition condition = new Condition(Operator.GT, "5"); + DetectionRule rule = new DetectionRule.Builder( + Collections.singletonList(new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "by2", "val", condition, null))) + .setActions(RuleAction.FILTER_RESULTS).setTargetFieldName("over_field") + .setTargetFieldValue("targetValue") + .setConditionsConnective(Connective.AND) + .build(); + builder.setRules(Collections.singletonList(rule)); + detector2 = builder.build(); + assertFalse(detector1.equals(detector2)); + } + + public void testExtractAnalysisFields() { + Detector detector = createDetector().build(); + assertEquals(Arrays.asList("by_field", "over_field", "partition"), detector.extractAnalysisFields()); + Detector.Builder builder = new Detector.Builder(detector); + builder.setPartitionFieldName(null); + detector = builder.build(); + assertEquals(Arrays.asList("by_field", "over_field"), detector.extractAnalysisFields()); + builder = new Detector.Builder(detector); + Condition condition = new Condition(Operator.GT, "5"); + DetectionRule rule = new DetectionRule.Builder( + Collections.singletonList(new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, condition, null))) + .setActions(RuleAction.FILTER_RESULTS) + .setTargetFieldName("over_field") + .setTargetFieldValue("targetValue") + .setConditionsConnective(Connective.AND) + .build(); + builder.setRules(Collections.singletonList(rule)); + builder.setByFieldName(null); + detector = builder.build(); + assertEquals(Collections.singletonList("over_field"), detector.extractAnalysisFields()); + builder = new Detector.Builder(detector); + rule = new DetectionRule.Builder( + Collections.singletonList(new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, condition, null))) + .setActions(RuleAction.FILTER_RESULTS) + .setConditionsConnective(Connective.AND) + .build(); + builder.setRules(Collections.singletonList(rule)); + builder.setOverFieldName(null); + detector = builder.build(); + assertTrue(detector.extractAnalysisFields().isEmpty()); + } + + public void testExtractReferencedLists() { + Detector.Builder builder = createDetector(); + builder.setRules(Arrays.asList( + new DetectionRule.Builder(Collections.singletonList(RuleCondition.createCategorical("by_field", "list1"))).build(), + new DetectionRule.Builder(Collections.singletonList(RuleCondition.createCategorical("by_field", "list2"))).build())); + + Detector detector = builder.build(); + assertEquals(new HashSet<>(Arrays.asList("list1", "list2")), detector.extractReferencedFilters()); + } + + public void testInvalid_GivenFieldIsControlField() { + Detector.Builder detector = new Detector.Builder("mean", "field"); + if (randomBoolean()) { + detector.setByFieldName(RecordWriter.CONTROL_FIELD_NAME); + } else if (randomBoolean()) { + detector.setOverFieldName(RecordWriter.CONTROL_FIELD_NAME); + } else { + detector.setPartitionFieldName(RecordWriter.CONTROL_FIELD_NAME); + } + + ElasticsearchException e = expectThrows(ElasticsearchException.class , detector::build); + + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_INVALID_FIELDNAME, RecordWriter.CONTROL_FIELD_NAME, + RecordWriter.CONTROL_FIELD_NAME), e.getMessage()); + } + + private Detector.Builder createDetector() { + Detector.Builder detector = new Detector.Builder("mean", "field"); + detector.setByFieldName("by_field"); + detector.setOverFieldName("over_field"); + detector.setPartitionFieldName("partition"); + detector.setUseNull(true); + Condition condition = new Condition(Operator.GT, "5"); + DetectionRule rule = new DetectionRule.Builder( + Collections.singletonList(new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "by_field", "val", condition, null))) + .setActions(RuleAction.FILTER_RESULTS) + .setTargetFieldName("over_field") + .setTargetFieldValue("targetValue") + .setConditionsConnective(Connective.AND) + .build(); + detector.setRules(Collections.singletonList(rule)); + return detector; + } + + @Override + protected Detector createTestInstance() { + DetectorFunction function; + Detector.Builder detector; + if (randomBoolean()) { + detector = new Detector.Builder(function = randomFrom(Detector.COUNT_WITHOUT_FIELD_FUNCTIONS), null); + } else { + EnumSet functions = EnumSet.copyOf(Detector.FIELD_NAME_FUNCTIONS); + functions.removeAll(Detector.Builder.FUNCTIONS_WITHOUT_RULE_SUPPORT); + detector = new Detector.Builder(function = randomFrom(functions), randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + detector.setDetectorDescription(randomAlphaOfLengthBetween(1, 20)); + } + String fieldName = null; + if (randomBoolean()) { + detector.setPartitionFieldName(fieldName = randomAlphaOfLengthBetween(6, 20)); + } else if (randomBoolean() && Detector.NO_OVER_FIELD_NAME_FUNCTIONS.contains(function) == false) { + detector.setOverFieldName(fieldName = randomAlphaOfLengthBetween(6, 20)); + } else if (randomBoolean()) { + detector.setByFieldName(fieldName = randomAlphaOfLengthBetween(6, 20)); + } + if (randomBoolean()) { + detector.setExcludeFrequent(randomFrom(Detector.ExcludeFrequent.values())); + } + if (randomBoolean()) { + int size = randomInt(10); + List rules = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + // no need for random DetectionRule (it is already tested) + Condition condition = new Condition(Operator.GT, "5"); + rules.add(new DetectionRule.Builder( + Collections.singletonList(new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, condition, null))) + .setTargetFieldName(fieldName).build()); + } + detector.setRules(rules); + } + if (randomBoolean()) { + detector.setUseNull(randomBoolean()); + } + return detector.build(); + } + + @Override + protected Reader instanceReader() { + return Detector::new; + } + + @Override + protected Detector doParseInstance(XContentParser parser) { + return Detector.CONFIG_PARSER.apply(parser, null).build(); + } + + public void testVerifyFieldNames_givenInvalidChars() { + Collection testCaseArguments = getCharactersAndValidity(); + for (Object [] args : testCaseArguments) { + String character = (String) args[0]; + boolean valid = (boolean) args[1]; + Detector.Builder detector = createDetectorWithValidFieldNames(); + verifyFieldName(detector, character, valid); + detector = createDetectorWithValidFieldNames(); + verifyByFieldName(detector, character, valid); + detector = createDetectorWithValidFieldNames(); + verifyOverFieldName(detector, character, valid); + detector = createDetectorWithValidFieldNames(); + verifyPartitionFieldName(detector, character, valid); + } + } + + public void testVerifyFunctionForPreSummariedInput() { + Collection testCaseArguments = getCharactersAndValidity(); + for (Object [] args : testCaseArguments) { + String character = (String) args[0]; + boolean valid = (boolean) args[1]; + Detector.Builder detector = createDetectorWithValidFieldNames(); + verifyFieldName(detector, character, valid); + detector = createDetectorWithValidFieldNames(); + verifyByFieldName(new Detector.Builder(detector.build()), character, valid); + verifyOverFieldName(new Detector.Builder(detector.build()), character, valid); + verifyByFieldName(new Detector.Builder(detector.build()), character, valid); + verifyPartitionFieldName(new Detector.Builder(detector.build()), character, valid); + } + } + + private static void verifyFieldName(Detector.Builder detector, String character, boolean valid) { + Detector.Builder updated = createDetectorWithSpecificFieldName(detector.build().getFieldName() + character); + if (valid == false) { + expectThrows(ElasticsearchException.class , updated::build); + } + } + + private static void verifyByFieldName(Detector.Builder detector, String character, boolean valid) { + detector.setByFieldName(detector.build().getByFieldName() + character); + if (valid == false) { + expectThrows(ElasticsearchException.class , detector::build); + } + } + + private static void verifyOverFieldName(Detector.Builder detector, String character, boolean valid) { + detector.setOverFieldName(detector.build().getOverFieldName() + character); + if (valid == false) { + expectThrows(ElasticsearchException.class , detector::build); + } + } + + private static void verifyPartitionFieldName(Detector.Builder detector, String character, boolean valid) { + detector.setPartitionFieldName(detector.build().getPartitionFieldName() + character); + if (valid == false) { + expectThrows(ElasticsearchException.class , detector::build); + } + } + + private static Detector.Builder createDetectorWithValidFieldNames() { + Detector.Builder d = new Detector.Builder("metric", "field"); + d.setByFieldName("by_field"); + d.setOverFieldName("over_field"); + d.setPartitionFieldName("partition"); + return d; + } + + private static Detector.Builder createDetectorWithSpecificFieldName(String fieldName) { + Detector.Builder d = new Detector.Builder("metric", fieldName); + d.setByFieldName("by_field"); + d.setOverFieldName("over_field"); + d.setPartitionFieldName("partition"); + return d; + } + + private static Collection getCharactersAndValidity() { + return Arrays.asList(new Object[][]{ + // char, isValid? + {"a", true}, + {"[", true}, + {"]", true}, + {"(", true}, + {")", true}, + {"=", true}, + {"-", true}, + {" ", true}, + {"\"", false}, + {"\\", false}, + {"\t", false}, + {"\n", false}, + }); + } + + public void testVerify_GivenFunctionOnly() { + // if nothing else is set the count functions (excluding distinct count) + // are the only allowable functions + new Detector.Builder(DetectorFunction.COUNT, null).build(); + + EnumSet difference = EnumSet.allOf(DetectorFunction.class); + difference.remove(DetectorFunction.COUNT); + difference.remove(DetectorFunction.HIGH_COUNT); + difference.remove(DetectorFunction.LOW_COUNT); + difference.remove(DetectorFunction.NON_ZERO_COUNT); + difference.remove(DetectorFunction.LOW_NON_ZERO_COUNT); + difference.remove(DetectorFunction.HIGH_NON_ZERO_COUNT); + difference.remove(DetectorFunction.TIME_OF_DAY); + difference.remove(DetectorFunction.TIME_OF_WEEK); + for (DetectorFunction f : difference) { + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> new Detector.Builder(f, null).build()); + assertThat(e.getMessage(), equalTo("Unless a count or temporal function is used one of field_name," + + " by_field_name or over_field_name must be set")); + } + } + + public void testVerify_GivenFunctionsNotSupportingOverField() { + EnumSet noOverFieldFunctions = EnumSet.of( + DetectorFunction.NON_ZERO_COUNT, + DetectorFunction.LOW_NON_ZERO_COUNT, + DetectorFunction.HIGH_NON_ZERO_COUNT + ); + for (DetectorFunction f: noOverFieldFunctions) { + Detector.Builder builder = new Detector.Builder(f, null); + builder.setOverFieldName("over_field"); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> builder.build()); + assertThat(e.getMessage(), equalTo("over_field_name cannot be used with function '" + f + "'")); + } + } + + public void testVerify_GivenFunctionsCannotHaveJustOverField() { + EnumSet difference = EnumSet.allOf(DetectorFunction.class); + difference.remove(DetectorFunction.COUNT); + difference.remove(DetectorFunction.LOW_COUNT); + difference.remove(DetectorFunction.HIGH_COUNT); + difference.remove(DetectorFunction.TIME_OF_DAY); + difference.remove(DetectorFunction.TIME_OF_WEEK); + for (DetectorFunction f: difference) { + Detector.Builder builder = new Detector.Builder(f, null); + builder.setOverFieldName("over_field"); + expectThrows(ElasticsearchStatusException.class, () -> builder.build()); + } + } + + public void testVerify_GivenFunctionsCanHaveJustOverField() { + EnumSet noOverFieldFunctions = EnumSet.of( + DetectorFunction.COUNT, + DetectorFunction.LOW_COUNT, + DetectorFunction.HIGH_COUNT + ); + for (DetectorFunction f: noOverFieldFunctions) { + Detector.Builder builder = new Detector.Builder(f, null); + builder.setOverFieldName("over_field"); + builder.build(); + } + } + + public void testVerify_GivenFunctionsCannotHaveFieldName() { + for (DetectorFunction f : Detector.COUNT_WITHOUT_FIELD_FUNCTIONS) { + Detector.Builder builder = new Detector.Builder(f, "field"); + builder.setByFieldName("b"); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> builder.build()); + assertThat(e.getMessage(), equalTo("field_name cannot be used with function '" + f + "'")); + } + + // Nor rare + { + Detector.Builder builder = new Detector.Builder(DetectorFunction.RARE, "field"); + builder.setByFieldName("b"); + builder.setOverFieldName("over_field"); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> builder.build()); + assertThat(e.getMessage(), equalTo("field_name cannot be used with function 'rare'")); + } + + // Nor freq_rare + { + Detector.Builder builder = new Detector.Builder(DetectorFunction.FREQ_RARE, "field"); + builder.setByFieldName("b"); + builder.setOverFieldName("over_field"); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> builder.build()); + assertThat(e.getMessage(), equalTo("field_name cannot be used with function 'freq_rare'")); + } + } + + public void testVerify_GivenFunctionsRequiringFieldName() { + // some functions require a fieldname + for (DetectorFunction f : Detector.FIELD_NAME_FUNCTIONS) { + Detector.Builder builder = new Detector.Builder(f, "f"); + builder.build(); + } + } + + public void testVerify_GivenFieldNameFunctionsAndOverField() { + // some functions require a fieldname + for (DetectorFunction f : Detector.FIELD_NAME_FUNCTIONS) { + Detector.Builder builder = new Detector.Builder(f, "f"); + builder.setOverFieldName("some_over_field"); + builder.build(); + } + } + + public void testVerify_GivenFieldNameFunctionsAndByField() { + // some functions require a fieldname + for (DetectorFunction f : Detector.FIELD_NAME_FUNCTIONS) { + Detector.Builder builder = new Detector.Builder(f, "f"); + builder.setByFieldName("some_by_field"); + builder.build(); + } + } + + public void testVerify_GivenCountFunctionsWithByField() { + // some functions require a fieldname + for (DetectorFunction f : Detector.COUNT_WITHOUT_FIELD_FUNCTIONS) { + Detector.Builder builder = new Detector.Builder(f, null); + builder.setByFieldName("some_by_field"); + builder.build(); + } + } + + public void testVerify_GivenCountFunctionsWithOverField() { + EnumSet functions = EnumSet.copyOf(Detector.COUNT_WITHOUT_FIELD_FUNCTIONS); + functions.removeAll(Detector.NO_OVER_FIELD_NAME_FUNCTIONS); + for (DetectorFunction f : functions) { + Detector.Builder builder = new Detector.Builder(f, null); + builder.setOverFieldName("some_over_field"); + builder.build(); + } + } + + public void testVerify_GivenCountFunctionsWithByAndOverFields() { + EnumSet functions = EnumSet.copyOf(Detector.COUNT_WITHOUT_FIELD_FUNCTIONS); + functions.removeAll(Detector.NO_OVER_FIELD_NAME_FUNCTIONS); + for (DetectorFunction f : functions) { + Detector.Builder builder = new Detector.Builder(f, null); + builder.setByFieldName("some_over_field"); + builder.setOverFieldName("some_by_field"); + builder.build(); + } + } + + public void testVerify_GivenRareAndFreqRareWithByAndOverFields() { + for (DetectorFunction f : EnumSet.of(DetectorFunction.RARE, DetectorFunction.FREQ_RARE)) { + Detector.Builder builder = new Detector.Builder(f, null); + builder.setOverFieldName("over_field"); + builder.setByFieldName("by_field"); + builder.build(); + } + } + + public void testVerify_GivenFunctionsThatCanHaveByField() { + for (DetectorFunction f : EnumSet.of(DetectorFunction.COUNT, DetectorFunction.HIGH_COUNT, DetectorFunction.LOW_COUNT, + DetectorFunction.RARE, DetectorFunction.NON_ZERO_COUNT, DetectorFunction.LOW_NON_ZERO_COUNT, + DetectorFunction.HIGH_NON_ZERO_COUNT)) { + Detector.Builder builder = new Detector.Builder(f, null); + builder.setByFieldName("b"); + builder.build(); + } + } + + public void testVerify_GivenInvalidRuleTargetFieldName() { + Detector.Builder detector = new Detector.Builder("mean", "metricVale"); + detector.setByFieldName("metricName"); + detector.setPartitionFieldName("instance"); + RuleCondition ruleCondition = + new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "metricName", "metricVale", new Condition(Operator.LT, "5"), null); + DetectionRule rule = new DetectionRule.Builder(Collections.singletonList(ruleCondition)).setTargetFieldName("instancE").build(); + detector.setRules(Collections.singletonList(rule)); + + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, detector::build); + + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_INVALID_TARGET_FIELD_NAME, + "[metricName, instance]", "instancE"), + e.getMessage()); + } + + public void testVerify_GivenValidRule() { + Detector.Builder detector = new Detector.Builder("mean", "metricVale"); + detector.setByFieldName("metricName"); + detector.setPartitionFieldName("instance"); + RuleCondition ruleCondition = + new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "metricName", "CPU", new Condition(Operator.LT, "5"), null); + DetectionRule rule = new DetectionRule.Builder(Collections.singletonList(ruleCondition)).setTargetFieldName("instance").build(); + detector.setRules(Collections.singletonList(rule)); + detector.build(); + } + + public void testVerify_GivenCategoricalRuleOnAllPartitioningFields() { + Detector.Builder detector = new Detector.Builder("count", null); + detector.setPartitionFieldName("my_partition"); + detector.setOverFieldName("my_over"); + detector.setByFieldName("my_by"); + DetectionRule rule = new DetectionRule.Builder(Arrays.asList( + RuleCondition.createCategorical("my_partition", "my_filter_id"), + RuleCondition.createCategorical("my_over", "my_filter_id"), + RuleCondition.createCategorical("my_by", "my_filter_id") + )).build(); + detector.setRules(Collections.singletonList(rule)); + + detector.build(); + } + + public void testVerify_GivenCategoricalRuleOnInvalidField() { + Detector.Builder detector = new Detector.Builder("mean", "my_metric"); + detector.setPartitionFieldName("my_partition"); + detector.setOverFieldName("my_over"); + detector.setByFieldName("my_by"); + DetectionRule rule = new DetectionRule.Builder(Collections.singletonList( + RuleCondition.createCategorical("my_metric", "my_filter_id") + )).build(); + detector.setRules(Collections.singletonList(rule)); + + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, detector::build); + + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_CONDITION_INVALID_FIELD_NAME, + "[my_by, my_over, my_partition]", "my_metric"), + e.getMessage()); + } + + public void testVerify_GivenSameByAndPartition() { + Detector.Builder detector = new Detector.Builder("count", ""); + detector.setByFieldName("x"); + detector.setPartitionFieldName("x"); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, detector::build); + + assertEquals("partition_field_name and by_field_name cannot be the same: 'x'", e.getMessage()); + } + + public void testVerify_GivenSameByAndOver() { + Detector.Builder detector = new Detector.Builder("count", ""); + detector.setByFieldName("x"); + detector.setOverFieldName("x"); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, detector::build); + + assertEquals("by_field_name and over_field_name cannot be the same: 'x'", e.getMessage()); + } + + public void testVerify_GivenSameOverAndPartition() { + Detector.Builder detector = new Detector.Builder("count", ""); + detector.setOverFieldName("x"); + detector.setPartitionFieldName("x"); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, detector::build); + + assertEquals("partition_field_name and over_field_name cannot be the same: 'x'", e.getMessage()); + } + + public void testVerify_GivenByIsCount() { + Detector.Builder detector = new Detector.Builder("count", ""); + detector.setByFieldName("count"); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, detector::build); + + assertEquals("'count' is not a permitted value for by_field_name", e.getMessage()); + } + + public void testVerify_GivenOverIsCount() { + Detector.Builder detector = new Detector.Builder("count", ""); + detector.setOverFieldName("count"); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, detector::build); + + assertEquals("'count' is not a permitted value for over_field_name", e.getMessage()); + } + + public void testVerify_GivenByIsBy() { + Detector.Builder detector = new Detector.Builder("count", ""); + detector.setByFieldName("by"); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, detector::build); + + assertEquals("'by' is not a permitted value for by_field_name", e.getMessage()); + } + + public void testVerify_GivenOverIsBy() { + Detector.Builder detector = new Detector.Builder("count", ""); + detector.setOverFieldName("by"); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, detector::build); + + assertEquals("'by' is not a permitted value for over_field_name", e.getMessage()); + } + + public void testVerify_GivenByIsOver() { + Detector.Builder detector = new Detector.Builder("count", ""); + detector.setByFieldName("over"); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, detector::build); + + assertEquals("'over' is not a permitted value for by_field_name", e.getMessage()); + } + + public void testVerify_GivenOverIsOver() { + Detector.Builder detector = new Detector.Builder("count", ""); + detector.setOverFieldName("over"); + ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, detector::build); + + assertEquals("'over' is not a permitted value for over_field_name", e.getMessage()); + } + + public void testExcludeFrequentForString() { + assertEquals(Detector.ExcludeFrequent.ALL, Detector.ExcludeFrequent.forString("all")); + assertEquals(Detector.ExcludeFrequent.ALL, Detector.ExcludeFrequent.forString("ALL")); + assertEquals(Detector.ExcludeFrequent.NONE, Detector.ExcludeFrequent.forString("none")); + assertEquals(Detector.ExcludeFrequent.NONE, Detector.ExcludeFrequent.forString("NONE")); + assertEquals(Detector.ExcludeFrequent.BY, Detector.ExcludeFrequent.forString("by")); + assertEquals(Detector.ExcludeFrequent.BY, Detector.ExcludeFrequent.forString("BY")); + assertEquals(Detector.ExcludeFrequent.OVER, Detector.ExcludeFrequent.forString("over")); + assertEquals(Detector.ExcludeFrequent.OVER, Detector.ExcludeFrequent.forString("OVER")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java new file mode 100644 index 0000000000000..c1fae72af27f7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -0,0 +1,672 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.MachineLearningField; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class JobTests extends AbstractSerializingTestCase { + + private static final String FUTURE_JOB = "{\n" + + " \"job_id\": \"farequote\",\n" + + " \"create_time\": 1234567890000,\n" + + " \"tomorrows_technology_today\": \"wow\",\n" + + " \"analysis_config\": {\n" + + " \"bucket_span\": \"1h\",\n" + + " \"something_new\": \"gasp\",\n" + + " \"detectors\": [{\"function\": \"metric\", \"field_name\": \"responsetime\", \"by_field_name\": \"airline\"}]\n" + + " },\n" + + " \"data_description\": {\n" + + " \"time_field\": \"time\",\n" + + " \"the_future\": 123\n" + + " }\n" + + "}"; + + @Override + protected Job createTestInstance() { + return createRandomizedJob(); + } + + @Override + protected Writeable.Reader instanceReader() { + return Job::new; + } + + @Override + protected Job doParseInstance(XContentParser parser) { + return Job.CONFIG_PARSER.apply(parser, null).build(); + } + + public void testFutureConfigParse() throws IOException { + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_JOB); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> Job.CONFIG_PARSER.apply(parser, null).build()); + assertEquals("[job_details] unknown field [tomorrows_technology_today], parser not found", e.getMessage()); + } + + public void testFutureMetadataParse() throws IOException { + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_JOB); + // Unlike the config version of this test, the metadata parser should tolerate the unknown future field + assertNotNull(Job.METADATA_PARSER.apply(parser, null).build()); + } + + public void testConstructor_GivenEmptyJobConfiguration() { + Job job = buildJobBuilder("foo").build(); + + assertEquals("foo", job.getId()); + assertNotNull(job.getCreateTime()); + assertNotNull(job.getAnalysisConfig()); + assertNotNull(job.getAnalysisLimits()); + assertNull(job.getCustomSettings()); + assertNotNull(job.getDataDescription()); + assertNull(job.getDescription()); + assertNull(job.getFinishedTime()); + assertNull(job.getLastDataTime()); + assertNull(job.getModelPlotConfig()); + assertNull(job.getRenormalizationWindowDays()); + assertNull(job.getBackgroundPersistInterval()); + assertThat(job.getModelSnapshotRetentionDays(), equalTo(1L)); + assertNull(job.getResultsRetentionDays()); + assertNotNull(job.allInputFields()); + assertFalse(job.allInputFields().isEmpty()); + } + + public void testNoId() { + expectThrows(IllegalArgumentException.class, () -> buildJobBuilder("").build()); + } + + public void testEnsureModelMemoryLimitSet() { + Job.Builder builder = buildJobBuilder("foo"); + builder.setAnalysisLimits(new AnalysisLimits(null, null)); + builder.validateAnalysisLimitsAndSetDefaults(new ByteSizeValue(0L)); + Job job = builder.build(); + assertEquals("foo", job.getId()); + assertNotNull(job.getAnalysisLimits()); + assertThat(job.getAnalysisLimits().getModelMemoryLimit(), equalTo(AnalysisLimits.DEFAULT_MODEL_MEMORY_LIMIT_MB)); + assertThat(job.getAnalysisLimits().getCategorizationExamplesLimit(), equalTo(4L)); + + builder.setAnalysisLimits(new AnalysisLimits(AnalysisLimits.DEFAULT_MODEL_MEMORY_LIMIT_MB * 2, 5L)); + builder.validateAnalysisLimitsAndSetDefaults(null); + job = builder.build(); + assertNotNull(job.getAnalysisLimits()); + assertThat(job.getAnalysisLimits().getModelMemoryLimit(), equalTo(AnalysisLimits.DEFAULT_MODEL_MEMORY_LIMIT_MB * 2)); + assertThat(job.getAnalysisLimits().getCategorizationExamplesLimit(), equalTo(5L)); + } + + public void testValidateAnalysisLimitsAndSetDefaults_whenMaxIsLessThanTheDefault() { + Job.Builder builder = buildJobBuilder("foo"); + builder.validateAnalysisLimitsAndSetDefaults(new ByteSizeValue(512L, ByteSizeUnit.MB)); + + Job job = builder.build(); + assertNotNull(job.getAnalysisLimits()); + assertThat(job.getAnalysisLimits().getModelMemoryLimit(), equalTo(512L)); + assertThat(job.getAnalysisLimits().getCategorizationExamplesLimit(), equalTo(4L)); + } + + public void testValidateAnalysisLimitsAndSetDefaults_throwsWhenMaxLimitIsExceeded() { + Job.Builder builder = buildJobBuilder("foo"); + builder.setAnalysisLimits(new AnalysisLimits(4096L, null)); + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> builder.validateAnalysisLimitsAndSetDefaults(new ByteSizeValue(1000L, ByteSizeUnit.MB))); + assertEquals("model_memory_limit [4gb] must be less than the value of the " + + MachineLearningField.MAX_MODEL_MEMORY_LIMIT.getKey() + " setting [1000mb]", e.getMessage()); + + builder.validateAnalysisLimitsAndSetDefaults(new ByteSizeValue(8192L, ByteSizeUnit.MB)); + } + + public void testEquals_GivenDifferentClass() { + Job job = buildJobBuilder("foo").build(); + assertFalse(job.equals("a string")); + } + + public void testEquals_GivenDifferentIds() { + Date createTime = new Date(); + Job.Builder builder = buildJobBuilder("foo"); + builder.setCreateTime(createTime); + Job job1 = builder.build(); + builder.setId("bar"); + Job job2 = builder.build(); + assertFalse(job1.equals(job2)); + } + + public void testEquals_GivenDifferentRenormalizationWindowDays() { + Date date = new Date(); + Job.Builder jobDetails1 = new Job.Builder("foo"); + jobDetails1.setDataDescription(new DataDescription.Builder()); + jobDetails1.setAnalysisConfig(createAnalysisConfig()); + jobDetails1.setRenormalizationWindowDays(3L); + jobDetails1.setCreateTime(date); + Job.Builder jobDetails2 = new Job.Builder("foo"); + jobDetails2.setDataDescription(new DataDescription.Builder()); + jobDetails2.setRenormalizationWindowDays(4L); + jobDetails2.setAnalysisConfig(createAnalysisConfig()); + jobDetails2.setCreateTime(date); + assertFalse(jobDetails1.build().equals(jobDetails2.build())); + } + + public void testEquals_GivenDifferentBackgroundPersistInterval() { + Date date = new Date(); + Job.Builder jobDetails1 = new Job.Builder("foo"); + jobDetails1.setDataDescription(new DataDescription.Builder()); + jobDetails1.setAnalysisConfig(createAnalysisConfig()); + jobDetails1.setBackgroundPersistInterval(TimeValue.timeValueSeconds(10000L)); + jobDetails1.setCreateTime(date); + Job.Builder jobDetails2 = new Job.Builder("foo"); + jobDetails2.setDataDescription(new DataDescription.Builder()); + jobDetails2.setBackgroundPersistInterval(TimeValue.timeValueSeconds(8000L)); + jobDetails2.setAnalysisConfig(createAnalysisConfig()); + jobDetails2.setCreateTime(date); + assertFalse(jobDetails1.build().equals(jobDetails2.build())); + } + + public void testEquals_GivenDifferentModelSnapshotRetentionDays() { + Date date = new Date(); + Job.Builder jobDetails1 = new Job.Builder("foo"); + jobDetails1.setDataDescription(new DataDescription.Builder()); + jobDetails1.setAnalysisConfig(createAnalysisConfig()); + jobDetails1.setModelSnapshotRetentionDays(10L); + jobDetails1.setCreateTime(date); + Job.Builder jobDetails2 = new Job.Builder("foo"); + jobDetails2.setDataDescription(new DataDescription.Builder()); + jobDetails2.setModelSnapshotRetentionDays(8L); + jobDetails2.setAnalysisConfig(createAnalysisConfig()); + jobDetails2.setCreateTime(date); + assertFalse(jobDetails1.build().equals(jobDetails2.build())); + } + + public void testEquals_GivenDifferentResultsRetentionDays() { + Date date = new Date(); + Job.Builder jobDetails1 = new Job.Builder("foo"); + jobDetails1.setDataDescription(new DataDescription.Builder()); + jobDetails1.setAnalysisConfig(createAnalysisConfig()); + jobDetails1.setCreateTime(date); + jobDetails1.setResultsRetentionDays(30L); + Job.Builder jobDetails2 = new Job.Builder("foo"); + jobDetails2.setDataDescription(new DataDescription.Builder()); + jobDetails2.setResultsRetentionDays(4L); + jobDetails2.setAnalysisConfig(createAnalysisConfig()); + jobDetails2.setCreateTime(date); + assertFalse(jobDetails1.build().equals(jobDetails2.build())); + } + + public void testEquals_GivenDifferentCustomSettings() { + Job.Builder jobDetails1 = buildJobBuilder("foo"); + Map customSettings1 = new HashMap<>(); + customSettings1.put("key1", "value1"); + jobDetails1.setCustomSettings(customSettings1); + Job.Builder jobDetails2 = buildJobBuilder("foo"); + Map customSettings2 = new HashMap<>(); + customSettings2.put("key2", "value2"); + jobDetails2.setCustomSettings(customSettings2); + assertFalse(jobDetails1.build().equals(jobDetails2.build())); + } + + // JobConfigurationTests: + + /** + * Test the {@link AnalysisConfig#analysisFields()} method which produces a + * list of analysis fields from the detectors + */ + public void testAnalysisConfigRequiredFields() { + Detector.Builder d1 = new Detector.Builder("max", "field"); + d1.setByFieldName("by_field"); + + Detector.Builder d2 = new Detector.Builder("median", "field2"); + d2.setOverFieldName("over_field"); + + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Arrays.asList(d1.build(), d2.build())); + ac.setSummaryCountFieldName("agg"); + + Set analysisFields = ac.build().analysisFields(); + assertTrue(analysisFields.size() == 5); + + assertTrue(analysisFields.contains("agg")); + assertTrue(analysisFields.contains("field")); + assertTrue(analysisFields.contains("by_field")); + assertTrue(analysisFields.contains("field2")); + assertTrue(analysisFields.contains("over_field")); + + assertFalse(analysisFields.contains("max")); + assertFalse(analysisFields.contains("median")); + assertFalse(analysisFields.contains("")); + + Detector.Builder d3 = new Detector.Builder("count", null); + d3.setByFieldName("by2"); + d3.setPartitionFieldName("partition"); + + ac = new AnalysisConfig.Builder(Arrays.asList(d1.build(), d2.build(), d3.build())); + + analysisFields = ac.build().analysisFields(); + assertTrue(analysisFields.size() == 6); + + assertTrue(analysisFields.contains("partition")); + assertTrue(analysisFields.contains("field")); + assertTrue(analysisFields.contains("by_field")); + assertTrue(analysisFields.contains("by2")); + assertTrue(analysisFields.contains("field2")); + assertTrue(analysisFields.contains("over_field")); + + assertFalse(analysisFields.contains("count")); + assertFalse(analysisFields.contains("max")); + assertFalse(analysisFields.contains("median")); + assertFalse(analysisFields.contains("")); + } + + // JobConfigurationVerifierTests: + + public void testCopyConstructor() { + for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { + Job job = createTestInstance(); + Job copy = new Job.Builder(job).build(); + assertEquals(job, copy); + } + } + + public void testCheckValidId_IdTooLong() { + Job.Builder builder = buildJobBuilder("foo"); + builder.setId("averyveryveryaveryveryveryaveryveryveryaveryveryveryaveryveryveryaveryveryverylongid"); + expectThrows(IllegalArgumentException.class, builder::build); + } + + public void testCheckValidId_GivenAllValidChars() { + Job.Builder builder = buildJobBuilder("foo"); + builder.setId("abcdefghijklmnopqrstuvwxyz-._0123456789"); + builder.build(); + } + + public void testCheckValidId_ProhibitedChars() { + String invalidChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()+?\"'~±/\\[]{},<>="; + Job.Builder builder = buildJobBuilder("foo"); + for (char c : invalidChars.toCharArray()) { + builder.setId(Character.toString(c)); + String errorMessage = Messages.getMessage(Messages.INVALID_ID, Job.ID.getPreferredName(), Character.toString(c)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertEquals(errorMessage, e.getMessage()); + } + } + + public void testCheckValidId_startsWithUnderscore() { + Job.Builder builder = buildJobBuilder("_foo"); + String errorMessage = Messages.getMessage(Messages.INVALID_ID, Job.ID.getPreferredName(), "_foo"); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertEquals(errorMessage, e.getMessage()); + } + + public void testCheckValidId_endsWithUnderscore() { + Job.Builder builder = buildJobBuilder("foo_"); + String errorMessage = Messages.getMessage(Messages.INVALID_ID, Job.ID.getPreferredName(), "foo_"); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertEquals(errorMessage, e.getMessage()); + } + + public void testCheckValidId_ControlChars() { + Job.Builder builder = buildJobBuilder("foo"); + builder.setId("two\nlines"); + expectThrows(IllegalArgumentException.class, builder::build); + } + + public void jobConfigurationTest() { + Job.Builder builder = new Job.Builder(); + expectThrows(IllegalArgumentException.class, builder::build); + builder.setId("bad id with spaces"); + expectThrows(IllegalArgumentException.class, builder::build); + builder.setId("bad_id_with_UPPERCASE_chars"); + expectThrows(IllegalArgumentException.class, builder::build); + builder.setId("a very very very very very very very very very very very very very very very very very very very very long id"); + expectThrows(IllegalArgumentException.class, builder::build); + builder.setId(null); + expectThrows(IllegalArgumentException.class, builder::build); + + Detector.Builder d = new Detector.Builder("max", "a"); + d.setByFieldName("b"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(d.build())); + builder.setAnalysisConfig(ac); + builder.build(); + builder.setAnalysisLimits(new AnalysisLimits(-1L, null)); + expectThrows(IllegalArgumentException.class, builder::build); + AnalysisLimits limits = new AnalysisLimits(1000L, 4L); + builder.setAnalysisLimits(limits); + builder.build(); + DataDescription.Builder dc = new DataDescription.Builder(); + dc.setTimeFormat("YYY_KKKKajsatp*"); + builder.setDataDescription(dc); + expectThrows(IllegalArgumentException.class, builder::build); + dc = new DataDescription.Builder(); + builder.setDataDescription(dc); + expectThrows(IllegalArgumentException.class, builder::build); + builder.build(); + } + + public void testVerify_GivenNegativeRenormalizationWindowDays() { + String errorMessage = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, + "renormalization_window_days", 0, -1); + Job.Builder builder = buildJobBuilder("foo"); + builder.setRenormalizationWindowDays(-1L); + IllegalArgumentException e = ESTestCase.expectThrows(IllegalArgumentException.class, builder::build); + assertEquals(errorMessage, e.getMessage()); + } + + public void testVerify_GivenNegativeModelSnapshotRetentionDays() { + String errorMessage = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, "model_snapshot_retention_days", 0, -1); + Job.Builder builder = buildJobBuilder("foo"); + builder.setModelSnapshotRetentionDays(-1L); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + + assertEquals(errorMessage, e.getMessage()); + } + + public void testVerify_GivenLowBackgroundPersistInterval() { + String errorMessage = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, "background_persist_interval", 3600, 3599); + Job.Builder builder = buildJobBuilder("foo"); + builder.setBackgroundPersistInterval(TimeValue.timeValueSeconds(3599L)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertEquals(errorMessage, e.getMessage()); + } + + public void testVerify_GivenNegativeResultsRetentionDays() { + String errorMessage = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, + "results_retention_days", 0, -1); + Job.Builder builder = buildJobBuilder("foo"); + builder.setResultsRetentionDays(-1L); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertEquals(errorMessage, e.getMessage()); + } + + public void testBuilder_setsDefaultIndexName() { + Job.Builder builder = buildJobBuilder("foo"); + Job job = builder.build(); + assertEquals(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT, + job.getResultsIndexName()); + } + + public void testBuilder_setsIndexName() { + Job.Builder builder = buildJobBuilder("foo"); + builder.setResultsIndexName("carol"); + Job job = builder.build(); + assertEquals(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-carol", job.getResultsIndexName()); + } + + public void testBuilder_withInvalidIndexNameThrows() { + Job.Builder builder = buildJobBuilder("foo"); + builder.setResultsIndexName("_bad^name"); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertEquals(Messages.getMessage(Messages.INVALID_ID, Job.RESULTS_INDEX_NAME.getPreferredName(), "_bad^name"), e.getMessage()); + } + + public void testBuilder_buildWithCreateTime() { + Job.Builder builder = buildJobBuilder("foo"); + Date now = new Date(); + Job job = builder.setEstablishedModelMemory(randomNonNegativeLong()).build(now); + assertEquals(now, job.getCreateTime()); + assertEquals(Version.CURRENT, job.getJobVersion()); + assertNull(job.getEstablishedModelMemory()); + } + + public void testJobWithoutVersion() throws IOException { + Job.Builder builder = buildJobBuilder("foo"); + Job job = builder.build(); + assertThat(job.getJobVersion(), is(nullValue())); + + // Assert parsing a job without version works as expected + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference bytes = XContentHelper.toXContent(job, xContentType, false); + try(XContentParser parser = createParser(xContentType.xContent(), bytes)) { + Job parsed = doParseInstance(parser); + assertThat(parsed, equalTo(job)); + } + } + + public void testBuilder_buildRequiresDataDescription() { + Job.Builder builder = new Job.Builder("no-data-description"); + builder.setAnalysisConfig(createAnalysisConfig()); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertThat(e.getMessage(), equalTo("A data_description must be set")); + } + + public void testBuilder_givenTimeFieldInAnalysisConfig() { + DataDescription.Builder dataDescriptionBuilder = new DataDescription.Builder(); + // field name used here matches what's in createAnalysisConfig() + dataDescriptionBuilder.setTimeField("client"); + + Job.Builder jobBuilder = new Job.Builder("time-field-in-analysis-config"); + jobBuilder.setAnalysisConfig(createAnalysisConfig()); + jobBuilder.setDataDescription(dataDescriptionBuilder); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, jobBuilder::build); + assertThat(e.getMessage(), equalTo(Messages.getMessage(Messages.JOB_CONFIG_TIME_FIELD_NOT_ALLOWED_IN_ANALYSIS_CONFIG))); + } + + public void testGetCompatibleJobTypes_givenVersionBefore_V_5_4() { + assertThat(Job.getCompatibleJobTypes(Version.V_5_0_0).isEmpty(), is(true)); + assertThat(Job.getCompatibleJobTypes(Version.V_5_3_0).isEmpty(), is(true)); + assertThat(Job.getCompatibleJobTypes(Version.V_5_3_2).isEmpty(), is(true)); + } + + public void testGetCompatibleJobTypes_givenVersionAfter_V_5_4() { + assertThat(Job.getCompatibleJobTypes(Version.V_5_4_0), contains(Job.ANOMALY_DETECTOR_JOB_TYPE)); + assertThat(Job.getCompatibleJobTypes(Version.V_5_4_0).size(), equalTo(1)); + assertThat(Job.getCompatibleJobTypes(Version.V_5_5_0), contains(Job.ANOMALY_DETECTOR_JOB_TYPE)); + assertThat(Job.getCompatibleJobTypes(Version.V_5_5_0).size(), equalTo(1)); + } + + public void testInvalidCreateTimeSettings() { + Job.Builder builder = new Job.Builder("invalid-settings"); + builder.setModelSnapshotId("snapshot-foo"); + assertEquals(Collections.singletonList(Job.MODEL_SNAPSHOT_ID.getPreferredName()), builder.invalidCreateTimeSettings()); + + builder.setCreateTime(new Date()); + builder.setFinishedTime(new Date()); + builder.setLastDataTime(new Date()); + + Set expected = new HashSet<>(); + expected.add(Job.CREATE_TIME.getPreferredName()); + expected.add(Job.FINISHED_TIME.getPreferredName()); + expected.add(Job.LAST_DATA_TIME.getPreferredName()); + expected.add(Job.MODEL_SNAPSHOT_ID.getPreferredName()); + + assertEquals(expected, new HashSet<>(builder.invalidCreateTimeSettings())); + } + + public void testEmptyGroup() { + Job.Builder builder = buildJobBuilder("foo"); + builder.setGroups(Arrays.asList("foo-group", "")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertThat(e.getMessage(), containsString("Invalid group id ''")); + } + + public void testInvalidGroup() { + Job.Builder builder = buildJobBuilder("foo"); + builder.setGroups(Arrays.asList("foo-group", "$$$")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertThat(e.getMessage(), containsString("Invalid group id '$$$'")); + } + + public void testEstimateMemoryFootprint_GivenEstablished() { + Job.Builder builder = buildJobBuilder("established"); + long establishedModelMemory = randomIntBetween(10_000, 2_000_000_000); + builder.setEstablishedModelMemory(establishedModelMemory); + if (randomBoolean()) { + builder.setAnalysisLimits(new AnalysisLimits(randomNonNegativeLong(), null)); + } + assertEquals(establishedModelMemory + Job.PROCESS_MEMORY_OVERHEAD.getBytes(), builder.build().estimateMemoryFootprint()); + } + + public void testEstimateMemoryFootprint_GivenLimitAndNotEstablished() { + Job.Builder builder = buildJobBuilder("limit"); + if (rarely()) { + // An "established" model memory of 0 means "not established". Generally this won't be set, so getEstablishedModelMemory() + // will return null, but if it returns 0 we shouldn't estimate the job's memory requirement to be 0. + builder.setEstablishedModelMemory(0L); + } + ByteSizeValue limit = new ByteSizeValue(randomIntBetween(100, 10000), ByteSizeUnit.MB); + builder.setAnalysisLimits(new AnalysisLimits(limit.getMb(), null)); + assertEquals(limit.getBytes() + Job.PROCESS_MEMORY_OVERHEAD.getBytes(), builder.build().estimateMemoryFootprint()); + } + + public void testEstimateMemoryFootprint_GivenNoLimitAndNotEstablished() { + Job.Builder builder = buildJobBuilder("nolimit"); + if (rarely()) { + // An "established" model memory of 0 means "not established". Generally this won't be set, so getEstablishedModelMemory() + // will return null, but if it returns 0 we shouldn't estimate the job's memory requirement to be 0. + builder.setEstablishedModelMemory(0L); + } + assertEquals(ByteSizeUnit.MB.toBytes(AnalysisLimits.PRE_6_1_DEFAULT_MODEL_MEMORY_LIMIT_MB) + + Job.PROCESS_MEMORY_OVERHEAD.getBytes(), builder.build().estimateMemoryFootprint()); + } + + public void testEarliestValidTimestamp_GivenEmptyDataCounts() { + assertThat(createRandomizedJob().earliestValidTimestamp(new DataCounts("foo")), equalTo(0L)); + } + + public void testEarliestValidTimestamp_GivenDataCountsAndZeroLatency() { + Job.Builder builder = buildJobBuilder("foo"); + DataCounts dataCounts = new DataCounts(builder.getId()); + dataCounts.setLatestRecordTimeStamp(new Date(123456789L)); + + assertThat(builder.build().earliestValidTimestamp(dataCounts), equalTo(123456789L)); + } + + public void testEarliestValidTimestamp_GivenDataCountsAndLatency() { + Job.Builder builder = buildJobBuilder("foo"); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(builder.build().getAnalysisConfig()); + analysisConfig.setLatency(TimeValue.timeValueMillis(1000L)); + builder.setAnalysisConfig(analysisConfig); + + DataCounts dataCounts = new DataCounts(builder.getId()); + dataCounts.setLatestRecordTimeStamp(new Date(123456789L)); + + assertThat(builder.build().earliestValidTimestamp(dataCounts), equalTo(123455789L)); + } + + public static Job.Builder buildJobBuilder(String id, Date date) { + Job.Builder builder = new Job.Builder(id); + builder.setCreateTime(date); + AnalysisConfig.Builder ac = createAnalysisConfig(); + DataDescription.Builder dc = new DataDescription.Builder(); + builder.setAnalysisConfig(ac); + builder.setDataDescription(dc); + return builder; + } + + public static Job.Builder buildJobBuilder(String id) { + return buildJobBuilder(id, new Date()); + } + + public static String randomValidJobId() { + CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()); + return generator.ofCodePointsLength(random(), 10, 10); + } + + public static AnalysisConfig.Builder createAnalysisConfig() { + Detector.Builder d1 = new Detector.Builder("info_content", "domain"); + d1.setOverFieldName("client"); + Detector.Builder d2 = new Detector.Builder("min", "field"); + return new AnalysisConfig.Builder(Arrays.asList(d1.build(), d2.build())); + } + + public static Job createRandomizedJob() { + String jobId = randomValidJobId(); + Job.Builder builder = new Job.Builder(jobId); + if (randomBoolean()) { + builder.setDescription(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + builder.setJobVersion(Version.CURRENT); + } + if (randomBoolean()) { + int groupsNum = randomIntBetween(0, 10); + List groups = new ArrayList<>(groupsNum); + for (int i = 0; i < groupsNum; i++) { + groups.add(randomValidJobId()); + } + builder.setGroups(groups); + } + builder.setCreateTime(new Date(randomNonNegativeLong())); + if (randomBoolean()) { + builder.setFinishedTime(new Date(randomNonNegativeLong())); + } + if (randomBoolean()) { + builder.setLastDataTime(new Date(randomNonNegativeLong())); + } + if (randomBoolean()) { + builder.setEstablishedModelMemory(randomNonNegativeLong()); + } + builder.setAnalysisConfig(AnalysisConfigTests.createRandomized()); + builder.setAnalysisLimits(AnalysisLimits.validateAndSetDefaults(AnalysisLimitsTests.createRandomized(), null, + AnalysisLimits.DEFAULT_MODEL_MEMORY_LIMIT_MB)); + + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setFormat(randomFrom(DataDescription.DataFormat.values())); + builder.setDataDescription(dataDescription); + + if (randomBoolean()) { + builder.setModelPlotConfig(new ModelPlotConfig(randomBoolean(), randomAlphaOfLength(10))); + } + if (randomBoolean()) { + builder.setRenormalizationWindowDays(randomNonNegativeLong()); + } + if (randomBoolean()) { + builder.setBackgroundPersistInterval(TimeValue.timeValueHours(randomIntBetween(1, 24))); + } + if (randomBoolean()) { + builder.setModelSnapshotRetentionDays(randomNonNegativeLong()); + } + if (randomBoolean()) { + builder.setResultsRetentionDays(randomNonNegativeLong()); + } + if (randomBoolean()) { + builder.setCustomSettings(Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10))); + } + if (randomBoolean()) { + builder.setModelSnapshotId(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + builder.setModelSnapshotMinVersion(Version.CURRENT); + } + if (randomBoolean()) { + builder.setResultsIndexName(randomValidJobId()); + } + return builder.build(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java new file mode 100644 index 0000000000000..7a976c89cdb40 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java @@ -0,0 +1,230 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; + +public class JobUpdateTests extends AbstractSerializingTestCase { + + @Override + protected JobUpdate createTestInstance() { + JobUpdate.Builder update = new JobUpdate.Builder(randomAlphaOfLength(4)); + if (randomBoolean()) { + int groupsNum = randomIntBetween(0, 10); + List groups = new ArrayList<>(groupsNum); + for (int i = 0; i < groupsNum; i++) { + groups.add(JobTests.randomValidJobId()); + } + update.setGroups(groups); + } + if (randomBoolean()) { + update.setDescription(randomAlphaOfLength(20)); + } + if (randomBoolean()) { + int size = randomInt(10); + List detectorUpdates = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + String detectorDescription = null; + if (randomBoolean()) { + detectorDescription = randomAlphaOfLength(12); + } + List detectionRules = null; + if (randomBoolean()) { + detectionRules = new ArrayList<>(); + Condition condition = new Condition(Operator.GT, "5"); + detectionRules.add(new DetectionRule.Builder( + Collections.singletonList(new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, condition, null))) + .setTargetFieldName("foo").build()); + } + detectorUpdates.add(new JobUpdate.DetectorUpdate(i, detectorDescription, detectionRules)); + } + update.setDetectorUpdates(detectorUpdates); + } + if (randomBoolean()) { + update.setModelPlotConfig(new ModelPlotConfig(randomBoolean(), randomAlphaOfLength(10))); + } + if (randomBoolean()) { + update.setAnalysisLimits(AnalysisLimitsTests.createRandomized()); + } + if (randomBoolean()) { + update.setRenormalizationWindowDays(randomNonNegativeLong()); + } + if (randomBoolean()) { + update.setBackgroundPersistInterval(TimeValue.timeValueHours(randomIntBetween(1, 24))); + } + if (randomBoolean()) { + update.setModelSnapshotRetentionDays(randomNonNegativeLong()); + } + if (randomBoolean()) { + update.setResultsRetentionDays(randomNonNegativeLong()); + } + if (randomBoolean()) { + update.setCategorizationFilters(Arrays.asList(generateRandomStringArray(10, 10, false))); + } + if (randomBoolean()) { + update.setCustomSettings(Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10))); + } + if (randomBoolean()) { + update.setModelSnapshotId(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + update.setModelSnapshotMinVersion(Version.CURRENT); + } + if (randomBoolean()) { + update.setEstablishedModelMemory(randomNonNegativeLong()); + } + + return update.build(); + } + + @Override + protected Writeable.Reader instanceReader() { + return JobUpdate::new; + } + + @Override + protected JobUpdate doParseInstance(XContentParser parser) { + return JobUpdate.PARSER.apply(parser, null).build(); + } + + public void testMergeWithJob() { + List detectorUpdates = new ArrayList<>(); + List detectionRules1 = Collections.singletonList(new DetectionRule.Builder( + Collections.singletonList(new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, new Condition(Operator.GT, "5") + , null))) + .setTargetFieldName("mlcategory").build()); + detectorUpdates.add(new JobUpdate.DetectorUpdate(0, "description-1", detectionRules1)); + List detectionRules2 = Collections.singletonList(new DetectionRule.Builder(Collections.singletonList( + new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, new Condition(Operator.GT, "5"), null))) + .setTargetFieldName("host").build()); + detectorUpdates.add(new JobUpdate.DetectorUpdate(1, "description-2", detectionRules2)); + + ModelPlotConfig modelPlotConfig = new ModelPlotConfig(randomBoolean(), randomAlphaOfLength(10)); + AnalysisLimits analysisLimits = new AnalysisLimits(randomNonNegativeLong(), randomNonNegativeLong()); + List categorizationFilters = Arrays.asList(generateRandomStringArray(10, 10, false)); + Map customSettings = Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10)); + + JobUpdate.Builder updateBuilder = new JobUpdate.Builder("foo"); + updateBuilder.setGroups(Arrays.asList("group-1", "group-2")); + updateBuilder.setDescription("updated_description"); + updateBuilder.setDetectorUpdates(detectorUpdates); + updateBuilder.setModelPlotConfig(modelPlotConfig); + updateBuilder.setAnalysisLimits(analysisLimits); + updateBuilder.setBackgroundPersistInterval(TimeValue.timeValueHours(randomIntBetween(1, 24))); + updateBuilder.setResultsRetentionDays(randomNonNegativeLong()); + updateBuilder.setModelSnapshotRetentionDays(randomNonNegativeLong()); + updateBuilder.setRenormalizationWindowDays(randomNonNegativeLong()); + updateBuilder.setCategorizationFilters(categorizationFilters); + updateBuilder.setCustomSettings(customSettings); + updateBuilder.setModelSnapshotId(randomAlphaOfLength(10)); + JobUpdate update = updateBuilder.build(); + + Job.Builder jobBuilder = new Job.Builder("foo"); + jobBuilder.setGroups(Arrays.asList("group-1")); + Detector.Builder d1 = new Detector.Builder("info_content", "domain"); + d1.setOverFieldName("mlcategory"); + Detector.Builder d2 = new Detector.Builder("min", "field"); + d2.setOverFieldName("host"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Arrays.asList(d1.build(), d2.build())); + ac.setCategorizationFieldName("cat_field"); + jobBuilder.setAnalysisConfig(ac); + jobBuilder.setDataDescription(new DataDescription.Builder()); + jobBuilder.setCreateTime(new Date()); + + Job updatedJob = update.mergeWithJob(jobBuilder.build(), new ByteSizeValue(0L)); + + assertEquals(update.getGroups(), updatedJob.getGroups()); + assertEquals(update.getDescription(), updatedJob.getDescription()); + assertEquals(update.getModelPlotConfig(), updatedJob.getModelPlotConfig()); + assertEquals(update.getAnalysisLimits(), updatedJob.getAnalysisLimits()); + assertEquals(update.getRenormalizationWindowDays(), updatedJob.getRenormalizationWindowDays()); + assertEquals(update.getBackgroundPersistInterval(), updatedJob.getBackgroundPersistInterval()); + assertEquals(update.getModelSnapshotRetentionDays(), updatedJob.getModelSnapshotRetentionDays()); + assertEquals(update.getResultsRetentionDays(), updatedJob.getResultsRetentionDays()); + assertEquals(update.getCategorizationFilters(), updatedJob.getAnalysisConfig().getCategorizationFilters()); + assertEquals(update.getCustomSettings(), updatedJob.getCustomSettings()); + assertEquals(update.getModelSnapshotId(), updatedJob.getModelSnapshotId()); + for (JobUpdate.DetectorUpdate detectorUpdate : update.getDetectorUpdates()) { + assertNotNull(updatedJob.getAnalysisConfig().getDetectors().get(detectorUpdate.getDetectorIndex()).getDetectorDescription()); + assertEquals(detectorUpdate.getDescription(), + updatedJob.getAnalysisConfig().getDetectors().get(detectorUpdate.getDetectorIndex()).getDetectorDescription()); + assertNotNull(updatedJob.getAnalysisConfig().getDetectors().get(detectorUpdate.getDetectorIndex()).getDetectorDescription()); + assertEquals(detectorUpdate.getRules(), + updatedJob.getAnalysisConfig().getDetectors().get(detectorUpdate.getDetectorIndex()).getRules()); + } + } + + public void testIsAutodetectProcessUpdate() { + JobUpdate update = new JobUpdate.Builder("foo").build(); + assertFalse(update.isAutodetectProcessUpdate()); + update = new JobUpdate.Builder("foo").setModelPlotConfig(new ModelPlotConfig(true, "ff")).build(); + assertTrue(update.isAutodetectProcessUpdate()); + update = new JobUpdate.Builder("foo").setDetectorUpdates(Collections.singletonList(mock(JobUpdate.DetectorUpdate.class))).build(); + assertTrue(update.isAutodetectProcessUpdate()); + } + + public void testUpdateAnalysisLimitWithValueGreaterThanMax() { + Job.Builder jobBuilder = new Job.Builder("foo"); + Detector.Builder d1 = new Detector.Builder("info_content", "domain"); + d1.setOverFieldName("mlcategory"); + Detector.Builder d2 = new Detector.Builder("min", "field"); + d2.setOverFieldName("host"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Arrays.asList(d1.build(), d2.build())); + ac.setCategorizationFieldName("cat_field"); + jobBuilder.setAnalysisConfig(ac); + jobBuilder.setDataDescription(new DataDescription.Builder()); + jobBuilder.setCreateTime(new Date()); + jobBuilder.setAnalysisLimits(new AnalysisLimits(256L, null)); + + JobUpdate update = new JobUpdate.Builder("foo").setAnalysisLimits(new AnalysisLimits(1024L, null)).build(); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> update.mergeWithJob(jobBuilder.build(), new ByteSizeValue(512L, ByteSizeUnit.MB))); + assertEquals("model_memory_limit [1gb] must be less than the value of the xpack.ml.max_model_memory_limit setting [512mb]", + e.getMessage()); + } + + public void testUpdate_withAnalysisLimitsPreviouslyUndefined() { + Job.Builder jobBuilder = new Job.Builder("foo"); + Detector.Builder d1 = new Detector.Builder("info_content", "domain"); + AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(d1.build())); + jobBuilder.setAnalysisConfig(ac); + jobBuilder.setDataDescription(new DataDescription.Builder()); + jobBuilder.setCreateTime(new Date()); + jobBuilder.validateAnalysisLimitsAndSetDefaults(null); + + JobUpdate update = new JobUpdate.Builder("foo").setAnalysisLimits(new AnalysisLimits(2048L, 5L)).build(); + Job updated = update.mergeWithJob(jobBuilder.build(), new ByteSizeValue(0L)); + assertThat(updated.getAnalysisLimits().getModelMemoryLimit(), equalTo(2048L)); + assertThat(updated.getAnalysisLimits().getCategorizationExamplesLimit(), equalTo(5L)); + + JobUpdate updateAboveMaxLimit = new JobUpdate.Builder("foo").setAnalysisLimits(new AnalysisLimits(8000L, null)).build(); + + Exception e = expectThrows(ElasticsearchStatusException.class, + () -> updateAboveMaxLimit.mergeWithJob(jobBuilder.build(), new ByteSizeValue(5000L, ByteSizeUnit.MB))); + assertEquals("model_memory_limit [7.8gb] must be less than the value of the xpack.ml.max_model_memory_limit setting [4.8gb]", + e.getMessage()); + + updateAboveMaxLimit.mergeWithJob(jobBuilder.build(), new ByteSizeValue(10000L, ByteSizeUnit.MB)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java new file mode 100644 index 0000000000000..08efc1c883fca --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class MlFilterTests extends AbstractSerializingTestCase { + + public static MlFilter createTestFilter() { + return new MlFilterTests().createTestInstance(); + } + + @Override + protected MlFilter createTestInstance() { + int size = randomInt(10); + List items = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + items.add(randomAlphaOfLengthBetween(1, 20)); + } + return new MlFilter(randomAlphaOfLengthBetween(1, 20), items); + } + + @Override + protected Reader instanceReader() { + return MlFilter::new; + } + + @Override + protected MlFilter doParseInstance(XContentParser parser) { + return MlFilter.STRICT_PARSER.apply(parser, null).build(); + } + + public void testNullId() { + NullPointerException ex = expectThrows(NullPointerException.class, () -> new MlFilter(null, Collections.emptyList())); + assertEquals(MlFilter.ID.getPreferredName() + " must not be null", ex.getMessage()); + } + + public void testNullItems() { + NullPointerException ex = + expectThrows(NullPointerException.class, () -> new MlFilter(randomAlphaOfLengthBetween(1, 20), null)); + assertEquals(MlFilter.ITEMS.getPreferredName() + " must not be null", ex.getMessage()); + } + + public void testDocumentId() { + assertThat(MlFilter.documentId("foo"), equalTo("filter_foo")); + } + + public void testStrictParser() throws IOException { + String json = "{\"filter_id\":\"filter_1\", \"items\": [], \"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> MlFilter.STRICT_PARSER.apply(parser, null)); + + assertThat(e.getMessage(), containsString("unknown field [foo]")); + } + } + + public void testLenientParser() throws IOException { + String json = "{\"filter_id\":\"filter_1\", \"items\": [], \"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + MlFilter.LENIENT_PARSER.apply(parser, null); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/ModelPlotConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/ModelPlotConfigTests.java new file mode 100644 index 0000000000000..aa54a174194c4 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/ModelPlotConfigTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class ModelPlotConfigTests extends AbstractSerializingTestCase { + + public void testConstructorDefaults() { + assertThat(new ModelPlotConfig().isEnabled(), is(true)); + assertThat(new ModelPlotConfig().getTerms(), is(nullValue())); + } + + @Override + protected ModelPlotConfig createTestInstance() { + return new ModelPlotConfig(randomBoolean(), randomAlphaOfLengthBetween(1, 30)); + } + + @Override + protected Reader instanceReader() { + return ModelPlotConfig::new; + } + + @Override + protected ModelPlotConfig doParseInstance(XContentParser parser) { + return ModelPlotConfig.CONFIG_PARSER.apply(parser, null); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/RuleConditionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/RuleConditionTests.java new file mode 100644 index 0000000000000..882c590983aae --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/RuleConditionTests.java @@ -0,0 +1,245 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.config; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; + +public class RuleConditionTests extends AbstractSerializingTestCase { + + @Override + protected RuleCondition createTestInstance() { + Condition condition = null; + String fieldName = null; + String valueFilter = null; + String fieldValue = null; + RuleConditionType type = randomFrom(RuleConditionType.values()); + if (type.isCategorical()) { + valueFilter = randomAlphaOfLengthBetween(1, 20); + if (randomBoolean()) { + fieldName = randomAlphaOfLengthBetween(1, 20); + } + } else { + // no need to randomize, it is properly randomly tested in + // ConditionTest + condition = new Condition(Operator.LT, Long.toString(randomLong())); + if (randomBoolean()) { + fieldName = randomAlphaOfLengthBetween(1, 20); + fieldValue = randomAlphaOfLengthBetween(1, 20); + } + } + return new RuleCondition(type, fieldName, fieldValue, condition, valueFilter); + } + + @Override + protected Reader instanceReader() { + return RuleCondition::new; + } + + @Override + protected RuleCondition doParseInstance(XContentParser parser) { + return RuleCondition.CONFIG_PARSER.apply(parser, null); + } + + public void testConstructor() { + RuleCondition condition = new RuleCondition(RuleConditionType.CATEGORICAL, null, null, null, "valueFilter"); + assertEquals(RuleConditionType.CATEGORICAL, condition.getType()); + assertNull(condition.getFieldName()); + assertNull(condition.getFieldValue()); + assertNull(condition.getCondition()); + } + + public void testEqualsGivenSameObject() { + RuleCondition condition = new RuleCondition(RuleConditionType.CATEGORICAL, null, null, null, "valueFilter"); + assertTrue(condition.equals(condition)); + } + + public void testEqualsGivenString() { + assertFalse(new RuleCondition(RuleConditionType.CATEGORICAL, null, null, null, "filter").equals("a string")); + } + + public void testEqualsGivenDifferentType() { + RuleCondition condition1 = createFullyPopulated(); + RuleCondition condition2 = new RuleCondition(RuleConditionType.CATEGORICAL, null, null, null, "valueFilter"); + assertFalse(condition1.equals(condition2)); + assertFalse(condition2.equals(condition1)); + } + + public void testEqualsGivenDifferentFieldName() { + RuleCondition condition1 = createFullyPopulated(); + RuleCondition condition2 = new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "metricNameaaa", "cpu", + new Condition(Operator.LT, "5"), null); + assertFalse(condition1.equals(condition2)); + assertFalse(condition2.equals(condition1)); + } + + public void testEqualsGivenDifferentFieldValue() { + RuleCondition condition1 = createFullyPopulated(); + RuleCondition condition2 = new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "metricName", "cpuaaa", + new Condition(Operator.LT, "5"), null); + assertFalse(condition1.equals(condition2)); + assertFalse(condition2.equals(condition1)); + } + + public void testEqualsGivenDifferentCondition() { + RuleCondition condition1 = createFullyPopulated(); + RuleCondition condition2 = new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "metricName", "cpu", + new Condition(Operator.GT, "5"), null); + assertFalse(condition1.equals(condition2)); + assertFalse(condition2.equals(condition1)); + } + + public void testEqualsGivenDifferentValueFilter() { + RuleCondition condition1 = new RuleCondition(RuleConditionType.CATEGORICAL, null, null, null, "myFilter"); + RuleCondition condition2 = new RuleCondition(RuleConditionType.CATEGORICAL, null, null, null, "myFilteraaa"); + assertFalse(condition1.equals(condition2)); + assertFalse(condition2.equals(condition1)); + } + + private static RuleCondition createFullyPopulated() { + return new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "metricName", "cpu", new Condition(Operator.LT, "5"), null); + } + + public void testVerify_GivenCategoricalWithCondition() { + Condition condition = new Condition(Operator.MATCH, "text"); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.CATEGORICAL, null, null, condition, null)); + assertEquals("Invalid detector rule: a categorical rule_condition does not support condition", e.getMessage()); + } + + public void testVerify_GivenCategoricalWithFieldValue() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.CATEGORICAL, "metric", "CPU", null, null)); + assertEquals("Invalid detector rule: a categorical rule_condition does not support field_value", e.getMessage()); + } + + public void testVerify_GivenCategoricalWithoutFilterId() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.CATEGORICAL, null, null, null, null)); + assertEquals("Invalid detector rule: a categorical rule_condition requires filter_id to be set", e.getMessage()); + } + + public void testVerify_GivenNumericalActualWithFilterId() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, null, "myFilter")); + assertEquals("Invalid detector rule: a numerical rule_condition does not support filter_id", e.getMessage()); + } + + public void testVerify_GivenNumericalActualWithoutCondition() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, null, null)); + assertEquals("Invalid detector rule: a numerical rule_condition requires condition to be set", e.getMessage()); + } + + public void testVerify_GivenNumericalActualWithFieldNameButNoFieldValue() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "metric", null, new Condition(Operator.LT, "5"), null)); + assertEquals("Invalid detector rule: a numerical rule_condition with field_name requires that field_value is set", e.getMessage()); + } + + public void testVerify_GivenNumericalTypicalWithFilterId() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, null, "myFilter")); + assertEquals("Invalid detector rule: a numerical rule_condition does not support filter_id", e.getMessage()); + } + + public void testVerify_GivenNumericalTypicalWithoutCondition() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, null, null)); + assertEquals("Invalid detector rule: a numerical rule_condition requires condition to be set", e.getMessage()); + } + + public void testVerify_GivenNumericalDiffAbsWithFilterId() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.NUMERICAL_DIFF_ABS, null, null, null, "myFilter")); + assertEquals("Invalid detector rule: a numerical rule_condition does not support filter_id", e.getMessage()); + } + + public void testVerify_GivenNumericalDiffAbsWithoutCondition() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.NUMERICAL_DIFF_ABS, null, null, null, null)); + assertEquals("Invalid detector rule: a numerical rule_condition requires condition to be set", e.getMessage()); + } + + public void testVerify_GivenFieldValueWithoutFieldName() { + Condition condition = new Condition(Operator.LTE, "5"); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.NUMERICAL_DIFF_ABS, null, "foo", condition, null)); + assertEquals("Invalid detector rule: missing field_name in rule_condition where field_value 'foo' is set", e.getMessage()); + } + + public void testVerify_GivenNumericalAndOperatorEquals() { + Condition condition = new Condition(Operator.EQ, "5"); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, condition, null)); + assertEquals("Invalid detector rule: operator 'eq' is not allowed", e.getMessage()); + } + + public void testVerify_GivenNumericalAndOperatorMatch() { + Condition condition = new Condition(Operator.MATCH, "aaa"); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, null, null, condition, null)); + assertEquals("Invalid detector rule: operator 'match' is not allowed", e.getMessage()); + } + + public void testVerify_GivenDetectionRuleWithInvalidCondition() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "metricName", "CPU", new Condition(Operator.LT, "invalid"), + null)); + assertEquals(Messages.getMessage(Messages.JOB_CONFIG_CONDITION_INVALID_VALUE_NUMBER, "invalid"), e.getMessage()); + } + + public void testVerify_GivenValidCategorical() { + // no validation error: + new RuleCondition(RuleConditionType.CATEGORICAL, "metric", null, null, "myFilter"); + new RuleCondition(RuleConditionType.CATEGORICAL_COMPLEMENT, "metric", null, null, "myFilter"); + } + + public void testVerify_GivenValidNumericalActual() { + // no validation error: + new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "metric", "cpu", new Condition(Operator.GT, "5"), null); + } + + public void testVerify_GivenValidNumericalTypical() { + // no validation error: + new RuleCondition(RuleConditionType.NUMERICAL_ACTUAL, "metric", "cpu", new Condition(Operator.GTE, "5"), null); + } + + public void testVerify_GivenValidNumericalDiffAbs() { + // no validation error: + new RuleCondition(RuleConditionType.NUMERICAL_DIFF_ABS, "metric", "cpu", new Condition(Operator.LT, "5"), null); + } + + public void testCreateTimeBased() { + RuleCondition timeBased = RuleCondition.createTime(Operator.GTE, 100L); + assertEquals(RuleConditionType.TIME, timeBased.getType()); + assertEquals(Operator.GTE, timeBased.getCondition().getOperator()); + assertEquals("100", timeBased.getCondition().getValue()); + assertNull(timeBased.getFieldName()); + assertNull(timeBased.getFieldValue()); + assertNull(timeBased.getFilterId()); + } + + public void testCreateTimeBased_GivenOperatorMatch() { + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> RuleCondition.createTime(Operator.MATCH, 100L)); + assertEquals("Invalid detector rule: operator 'match' is not allowed", e.getMessage()); + } + + public void testCreateNumerical() { + RuleCondition ruleCondition = RuleCondition.createNumerical(RuleConditionType.NUMERICAL_ACTUAL, "foo", "bar", + new Condition(Operator.GTE, "100")); + assertEquals(RuleConditionType.NUMERICAL_ACTUAL, ruleCondition.getType()); + assertEquals(Operator.GTE, ruleCondition.getCondition().getOperator()); + assertEquals("100", ruleCondition.getCondition().getValue()); + assertEquals("foo", ruleCondition.getFieldName()); + assertEquals("bar", ruleCondition.getFieldValue()); + assertNull(ruleCondition.getFilterId()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookupTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookupTests.java new file mode 100644 index 0000000000000..8543f02cec56c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobLookupTests.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.groups; + +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.groups.GroupOrJobLookup; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.contains; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class GroupOrJobLookupTests extends ESTestCase { + + public void testEmptyLookup_GivenAllowNoJobs() { + GroupOrJobLookup lookup = new GroupOrJobLookup(Collections.emptyList()); + + assertThat(lookup.expandJobIds("_all", true).isEmpty(), is(true)); + assertThat(lookup.expandJobIds("*", true).isEmpty(), is(true)); + assertThat(lookup.expandJobIds("foo*", true).isEmpty(), is(true)); + expectThrows(ResourceNotFoundException.class, () -> lookup.expandJobIds("foo", true)); + } + + public void testEmptyLookup_GivenNotAllowNoJobs() { + GroupOrJobLookup lookup = new GroupOrJobLookup(Collections.emptyList()); + + expectThrows(ResourceNotFoundException.class, () -> lookup.expandJobIds("_all", false)); + expectThrows(ResourceNotFoundException.class, () -> lookup.expandJobIds("*", false)); + expectThrows(ResourceNotFoundException.class, () -> lookup.expandJobIds("foo*", false)); + expectThrows(ResourceNotFoundException.class, () -> lookup.expandJobIds("foo", true)); + } + + public void testAllIsNotExpandedInCommaSeparatedExpression() { + GroupOrJobLookup lookup = new GroupOrJobLookup(Collections.emptyList()); + ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> lookup.expandJobIds("foo-*,_all", true)); + assertThat(e.getMessage(), equalTo("No known job with id '_all'")); + } + + public void testConstructor_GivenJobWithSameIdAsPreviousGroupName() { + List jobs = new ArrayList<>(); + jobs.add(mockJob("foo", Arrays.asList("foo-group"))); + jobs.add(mockJob("foo-group", Collections.emptyList())); + ResourceAlreadyExistsException e = expectThrows(ResourceAlreadyExistsException.class, () -> new GroupOrJobLookup(jobs)); + assertThat(e.getMessage(), + equalTo("job and group names must be unique but job [foo-group] and group [foo-group] have the same name")); + } + + public void testConstructor_GivenGroupWithSameNameAsPreviousJobId() { + List jobs = new ArrayList<>(); + jobs.add(mockJob("foo", Collections.emptyList())); + jobs.add(mockJob("foo-2", Arrays.asList("foo"))); + ResourceAlreadyExistsException e = expectThrows(ResourceAlreadyExistsException.class, () -> new GroupOrJobLookup(jobs)); + assertThat(e.getMessage(), + equalTo("job and group names must be unique but job [foo] and group [foo] have the same name")); + } + + public void testLookup() { + List jobs = new ArrayList<>(); + jobs.add(mockJob("foo-1", Arrays.asList("foo-group", "ones"))); + jobs.add(mockJob("foo-2", Arrays.asList("foo-group", "twos"))); + jobs.add(mockJob("bar-1", Arrays.asList("bar-group", "ones"))); + jobs.add(mockJob("bar-2", Arrays.asList("bar-group", "twos"))); + jobs.add(mockJob("nogroup", Collections.emptyList())); + GroupOrJobLookup groupOrJobLookup = new GroupOrJobLookup(jobs); + + assertThat(groupOrJobLookup.expandJobIds("_all", false), contains("bar-1", "bar-2", "foo-1", "foo-2", "nogroup")); + assertThat(groupOrJobLookup.expandJobIds("*", false), contains("bar-1", "bar-2", "foo-1", "foo-2", "nogroup")); + assertThat(groupOrJobLookup.expandJobIds("bar-1", false), contains("bar-1")); + assertThat(groupOrJobLookup.expandJobIds("foo-1", false), contains("foo-1")); + assertThat(groupOrJobLookup.expandJobIds("foo-2, bar-1", false), contains("bar-1", "foo-2")); + assertThat(groupOrJobLookup.expandJobIds("foo-group", false), contains("foo-1", "foo-2")); + assertThat(groupOrJobLookup.expandJobIds("bar-group", false), contains("bar-1", "bar-2")); + assertThat(groupOrJobLookup.expandJobIds("ones", false), contains("bar-1", "foo-1")); + assertThat(groupOrJobLookup.expandJobIds("twos", false), contains("bar-2", "foo-2")); + assertThat(groupOrJobLookup.expandJobIds("foo-group, nogroup", false), contains("foo-1", "foo-2", "nogroup")); + assertThat(groupOrJobLookup.expandJobIds("*-group", false), contains("bar-1", "bar-2", "foo-1", "foo-2")); + assertThat(groupOrJobLookup.expandJobIds("foo-group,foo-1,foo-2", false), contains("foo-1", "foo-2")); + assertThat(groupOrJobLookup.expandJobIds("foo-group,*-2", false), contains("bar-2", "foo-1", "foo-2")); + } + + public void testIsGroupOrJob() { + List jobs = new ArrayList<>(); + jobs.add(mockJob("foo-1", Arrays.asList("foo-group", "ones"))); + jobs.add(mockJob("foo-2", Arrays.asList("foo-group", "twos"))); + jobs.add(mockJob("bar-1", Arrays.asList("bar-group", "ones"))); + jobs.add(mockJob("nogroup", Collections.emptyList())); + GroupOrJobLookup groupOrJobLookup = new GroupOrJobLookup(jobs); + + assertTrue(groupOrJobLookup.isGroupOrJob("foo-1")); + assertTrue(groupOrJobLookup.isGroupOrJob("twos")); + assertTrue(groupOrJobLookup.isGroupOrJob("nogroup")); + assertFalse(groupOrJobLookup.isGroupOrJob("missing")); + } + + private static Job mockJob(String jobId, List groups) { + Job job = mock(Job.class); + when(job.getId()).thenReturn(jobId); + when(job.getGroups()).thenReturn(groups); + return job; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobTests.java new file mode 100644 index 0000000000000..a0059ca940bfd --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/groups/GroupOrJobTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.groups; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.groups.GroupOrJob; + +import java.util.Arrays; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class GroupOrJobTests extends ESTestCase { + + public void testSingleJob() { + Job job = mock(Job.class); + GroupOrJob groupOrJob = new GroupOrJob.SingleJob(job); + assertThat(groupOrJob.isGroup(), is(false)); + assertThat(groupOrJob.jobs(), contains(job)); + expectThrows(UnsupportedOperationException.class, () -> groupOrJob.jobs().add(mock(Job.class))); + } + + public void testGroup() { + Job job1 = mock(Job.class); + Job job2 = mock(Job.class); + GroupOrJob groupOrJob = new GroupOrJob.Group(Arrays.asList(job1, job2)); + assertThat(groupOrJob.isGroup(), is(true)); + assertThat(groupOrJob.jobs(), contains(job1, job2)); + expectThrows(UnsupportedOperationException.class, () -> groupOrJob.jobs().add(mock(Job.class))); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java new file mode 100644 index 0000000000000..2b644c4aa5be0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.persistence; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; +import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; +import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition; +import org.elasticsearch.xpack.core.ml.job.results.ReservedFieldNames; +import org.elasticsearch.xpack.core.ml.job.results.Result; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + + +public class ElasticsearchMappingsTests extends ESTestCase { + + public void testReservedFields() throws Exception { + Set overridden = new HashSet<>(); + + // These are not reserved because they're Elasticsearch keywords, not + // field names + overridden.add(ElasticsearchMappings.ANALYZER); + overridden.add(ElasticsearchMappings.COPY_TO); + overridden.add(ElasticsearchMappings.DYNAMIC); + overridden.add(ElasticsearchMappings.ENABLED); + overridden.add(ElasticsearchMappings.NESTED); + overridden.add(ElasticsearchMappings.PROPERTIES); + overridden.add(ElasticsearchMappings.TYPE); + overridden.add(ElasticsearchMappings.WHITESPACE); + + // These are not reserved because they're data types, not field names + overridden.add(Result.TYPE.getPreferredName()); + overridden.add(DataCounts.TYPE.getPreferredName()); + overridden.add(CategoryDefinition.TYPE.getPreferredName()); + overridden.add(ModelSizeStats.RESULT_TYPE_FIELD.getPreferredName()); + overridden.add(ModelSnapshot.TYPE.getPreferredName()); + overridden.add(Quantiles.TYPE.getPreferredName()); + + Set expected = collectResultsDocFieldNames(); + + expected.removeAll(overridden); + + if (ReservedFieldNames.RESERVED_FIELD_NAMES.size() != expected.size()) { + Set diff = new HashSet<>(ReservedFieldNames.RESERVED_FIELD_NAMES); + diff.removeAll(expected); + StringBuilder errorMessage = new StringBuilder("Fields in ReservedFieldNames but not in expected: ").append(diff); + + diff = new HashSet<>(expected); + diff.removeAll(ReservedFieldNames.RESERVED_FIELD_NAMES); + errorMessage.append("\nFields in expected but not in ReservedFieldNames: ").append(diff); + fail(errorMessage.toString()); + } + assertEquals(ReservedFieldNames.RESERVED_FIELD_NAMES.size(), expected.size()); + + for (String s : expected) { + // By comparing like this the failure messages say which string is missing + String reserved = ReservedFieldNames.RESERVED_FIELD_NAMES.contains(s) ? s : null; + assertEquals(s, reserved); + } + } + + @SuppressWarnings("unchecked") + public void testTermFieldMapping() throws IOException { + + XContentBuilder builder = ElasticsearchMappings.termFieldsMapping(null, Arrays.asList("apple", "strawberry", + AnomalyRecord.BUCKET_SPAN.getPreferredName())); + + XContentParser parser = createParser(builder); + Map properties = (Map) parser.map().get(ElasticsearchMappings.PROPERTIES); + + Map instanceMapping = (Map) properties.get("apple"); + assertNotNull(instanceMapping); + String dataType = (String)instanceMapping.get(ElasticsearchMappings.TYPE); + assertEquals(ElasticsearchMappings.KEYWORD, dataType); + + instanceMapping = (Map) properties.get("strawberry"); + assertNotNull(instanceMapping); + dataType = (String)instanceMapping.get(ElasticsearchMappings.TYPE); + assertEquals(ElasticsearchMappings.KEYWORD, dataType); + + // check no mapping for the reserved field + instanceMapping = (Map) properties.get(AnomalyRecord.BUCKET_SPAN.getPreferredName()); + assertNull(instanceMapping); + } + + private Set collectResultsDocFieldNames() throws IOException { + // Only the mappings for the results index should be added below. Do NOT add mappings for other indexes here. + + XContentBuilder builder = ElasticsearchMappings.docMapping(); + BufferedInputStream inputStream = + new BufferedInputStream(new ByteArrayInputStream(Strings.toString(builder).getBytes(StandardCharsets.UTF_8))); + JsonParser parser = new JsonFactory().createParser(inputStream); + Set fieldNames = new HashSet<>(); + boolean isAfterPropertiesStart = false; + try { + JsonToken token = parser.nextToken(); + while (token != null) { + switch (token) { + case START_OBJECT: + break; + case FIELD_NAME: + String fieldName = parser.getCurrentName(); + if (isAfterPropertiesStart) { + fieldNames.add(fieldName); + } else { + if (ElasticsearchMappings.PROPERTIES.equals(fieldName)) { + isAfterPropertiesStart = true; + } + } + break; + default: + break; + } + token = parser.nextToken(); + } + } catch (JsonParseException e) { + fail("Cannot parse JSON: " + e); + } + + return fieldNames; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCountsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCountsTests.java new file mode 100644 index 0000000000000..091e6887701d7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/DataCountsTests.java @@ -0,0 +1,187 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.joda.time.DateTime; + +import java.util.Date; + +import static org.hamcrest.Matchers.greaterThan; + +public class DataCountsTests extends AbstractSerializingTestCase { + + public static DataCounts createTestInstance(String jobId) { + return new DataCounts(jobId, randomIntBetween(1, 1_000_000), + randomIntBetween(1, 1_000_000), randomIntBetween(1, 1_000_000), randomIntBetween(1, 1_000_000), + randomIntBetween(1, 1_000_000), randomIntBetween(1, 1_000_000), randomIntBetween(1, 1_000_000), + randomIntBetween(1, 1_000_000), randomIntBetween(1, 1_000_000), randomIntBetween(1, 1_000_000), + new DateTime(randomDateTimeZone()).toDate(), new DateTime(randomDateTimeZone()).toDate(), + new DateTime(randomDateTimeZone()).toDate(), new DateTime(randomDateTimeZone()).toDate(), + new DateTime(randomDateTimeZone()).toDate()); + } + + @Override + public DataCounts createTestInstance() { + return createTestInstance(randomAlphaOfLength(10)); + } + + @Override + protected Writeable.Reader instanceReader() { + return DataCounts::new; + } + + @Override + protected DataCounts doParseInstance(XContentParser parser) { + return DataCounts.PARSER.apply(parser, null); + } + + public void testCountsEquals_GivenEqualCounts() { + DataCounts counts1 = createCounts(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + DataCounts counts2 = createCounts(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + + assertTrue(counts1.equals(counts2)); + assertTrue(counts2.equals(counts1)); + } + + public void testCountsHashCode_GivenEqualCounts() { + DataCounts counts1 = createCounts(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + DataCounts counts2 = createCounts(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + assertEquals(counts1.hashCode(), counts2.hashCode()); + } + + public void testCountsCopyConstructor() { + DataCounts counts1 = createCounts(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + DataCounts counts2 = new DataCounts(counts1); + + assertEquals(counts1.hashCode(), counts2.hashCode()); + } + + public void testCountCreatedZero() throws Exception { + DataCounts counts = new DataCounts(randomAlphaOfLength(16)); + assertAllFieldsEqualZero(counts); + } + + public void testCountCopyCreatedFieldsNotZero() throws Exception { + DataCounts counts1 = createCounts(1, 200, 400, 3, 4, 5, 6, 7, 8, 9, 1479211200000L, 1479384000000L, 13, 14, 15); + assertAllFieldsGreaterThanZero(counts1); + + DataCounts counts2 = new DataCounts(counts1); + assertAllFieldsGreaterThanZero(counts2); + } + + public void testIncrements() { + DataCounts counts = new DataCounts(randomAlphaOfLength(16)); + + counts.incrementInputBytes(15); + assertEquals(15, counts.getInputBytes()); + + counts.incrementInvalidDateCount(20); + assertEquals(20, counts.getInvalidDateCount()); + + counts.incrementMissingFieldCount(25); + assertEquals(25, counts.getMissingFieldCount()); + + counts.incrementOutOfOrderTimeStampCount(30); + assertEquals(30, counts.getOutOfOrderTimeStampCount()); + + counts.incrementProcessedRecordCount(40); + assertEquals(40, counts.getProcessedRecordCount()); + } + + public void testGetInputRecordCount() { + DataCounts counts = new DataCounts(randomAlphaOfLength(16)); + counts.incrementProcessedRecordCount(5); + assertEquals(5, counts.getInputRecordCount()); + + counts.incrementOutOfOrderTimeStampCount(2); + assertEquals(7, counts.getInputRecordCount()); + + counts.incrementInvalidDateCount(1); + assertEquals(8, counts.getInputRecordCount()); + } + + public void testCalcProcessedFieldCount() { + DataCounts counts = new DataCounts(randomAlphaOfLength(16), 10L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, new Date(), new Date(), + new Date(), new Date(), new Date()); + counts.calcProcessedFieldCount(3); + + assertEquals(30, counts.getProcessedFieldCount()); + + counts = new DataCounts(randomAlphaOfLength(16), 10L, 0L, 0L, 0L, 0L, 5L, 0L, 0L, 0L, 0L, new Date(), new Date(), + new Date(), new Date(), new Date()); + counts.calcProcessedFieldCount(3); + assertEquals(25, counts.getProcessedFieldCount()); + } + + public void testEquals() { + DataCounts counts1 = new DataCounts( + randomAlphaOfLength(16), 10L, 5000L, 2000L, 300L, 6L, 15L, 0L, 0L, 0L, 0L, new Date(), new Date(1435000000L), + new Date(), new Date(), new Date()); + DataCounts counts2 = new DataCounts(counts1); + + assertEquals(counts1, counts2); + counts2.incrementInputBytes(1); + assertFalse(counts1.equals(counts2)); + } + + public void testSetEarliestRecordTimestamp_doesnotOverwrite() { + DataCounts counts = new DataCounts(randomAlphaOfLength(12)); + counts.setEarliestRecordTimeStamp(new Date(100L)); + + expectThrows(IllegalStateException.class, () -> counts.setEarliestRecordTimeStamp(new Date(200L))); + assertEquals(new Date(100L), counts.getEarliestRecordTimeStamp()); + } + + public void testDocumentId() { + DataCounts dataCounts = createTestInstance(); + String jobId = dataCounts.getJobid(); + assertEquals(jobId + "_data_counts", DataCounts.documentId(jobId)); + } + + private void assertAllFieldsEqualZero(DataCounts stats) throws Exception { + assertEquals(0L, stats.getProcessedRecordCount()); + assertEquals(0L, stats.getProcessedFieldCount()); + assertEquals(0L, stats.getInputBytes()); + assertEquals(0L, stats.getInputFieldCount()); + assertEquals(0L, stats.getInputRecordCount()); + assertEquals(0L, stats.getInvalidDateCount()); + assertEquals(0L, stats.getMissingFieldCount()); + assertEquals(0L, stats.getOutOfOrderTimeStampCount()); + } + + private void assertAllFieldsGreaterThanZero(DataCounts stats) throws Exception { + assertThat(stats.getProcessedRecordCount(), greaterThan(0L)); + assertThat(stats.getProcessedFieldCount(), greaterThan(0L)); + assertThat(stats.getInputBytes(), greaterThan(0L)); + assertThat(stats.getInputFieldCount(), greaterThan(0L)); + assertThat(stats.getInputRecordCount(), greaterThan(0L)); + assertThat(stats.getInputRecordCount(), greaterThan(0L)); + assertThat(stats.getInvalidDateCount(), greaterThan(0L)); + assertThat(stats.getMissingFieldCount(), greaterThan(0L)); + assertThat(stats.getOutOfOrderTimeStampCount(), greaterThan(0L)); + assertThat(stats.getLatestRecordTimeStamp().getTime(), greaterThan(0L)); + } + + private static DataCounts createCounts( + long processedRecordCount, long processedFieldCount, long inputBytes, long inputFieldCount, + long invalidDateCount, long missingFieldCount, long outOfOrderTimeStampCount, + long emptyBucketCount, long sparseBucketCount, long bucketCount, + long earliestRecordTime, long latestRecordTime, long lastDataTimeStamp, long latestEmptyBucketTimeStamp, + long latestSparseBucketTimeStamp) { + + DataCounts counts = new DataCounts("foo", processedRecordCount, processedFieldCount, inputBytes, + inputFieldCount, invalidDateCount, missingFieldCount, outOfOrderTimeStampCount, + emptyBucketCount, sparseBucketCount, bucketCount, + new Date(earliestRecordTime), new Date(latestRecordTime), + new Date(lastDataTimeStamp), new Date(latestEmptyBucketTimeStamp), new Date(latestSparseBucketTimeStamp)); + + return counts; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java new file mode 100644 index 0000000000000..e66fea90f049b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSizeStatsTests.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats.MemoryStatus; + +import java.io.IOException; +import java.util.Date; + +import static org.hamcrest.Matchers.containsString; + +public class ModelSizeStatsTests extends AbstractSerializingTestCase { + + public void testDefaultConstructor() { + ModelSizeStats stats = new ModelSizeStats.Builder("foo").build(); + assertEquals(0, stats.getModelBytes()); + assertEquals(0, stats.getTotalByFieldCount()); + assertEquals(0, stats.getTotalOverFieldCount()); + assertEquals(0, stats.getTotalPartitionFieldCount()); + assertEquals(0, stats.getBucketAllocationFailuresCount()); + assertEquals(MemoryStatus.OK, stats.getMemoryStatus()); + } + + public void testSetMemoryStatus_GivenNull() { + ModelSizeStats.Builder stats = new ModelSizeStats.Builder("foo"); + + NullPointerException ex = expectThrows(NullPointerException.class, () -> stats.setMemoryStatus(null)); + + assertEquals("[memory_status] must not be null", ex.getMessage()); + } + + public void testSetMemoryStatus_GivenSoftLimit() { + ModelSizeStats.Builder stats = new ModelSizeStats.Builder("foo"); + + stats.setMemoryStatus(MemoryStatus.SOFT_LIMIT); + + assertEquals(MemoryStatus.SOFT_LIMIT, stats.build().getMemoryStatus()); + } + + @Override + protected ModelSizeStats createTestInstance() { + return createRandomized(); + } + + public static ModelSizeStats createRandomized() { + ModelSizeStats.Builder stats = new ModelSizeStats.Builder("foo"); + if (randomBoolean()) { + stats.setBucketAllocationFailuresCount(randomNonNegativeLong()); + } + if (randomBoolean()) { + stats.setModelBytes(randomNonNegativeLong()); + } + if (randomBoolean()) { + stats.setTotalByFieldCount(randomNonNegativeLong()); + } + if (randomBoolean()) { + stats.setTotalOverFieldCount(randomNonNegativeLong()); + } + if (randomBoolean()) { + stats.setTotalPartitionFieldCount(randomNonNegativeLong()); + } + if (randomBoolean()) { + stats.setLogTime(new Date(TimeValue.parseTimeValue(randomTimeValue(), "test").millis())); + } + if (randomBoolean()) { + stats.setTimestamp(new Date(TimeValue.parseTimeValue(randomTimeValue(), "test").millis())); + } + if (randomBoolean()) { + stats.setMemoryStatus(randomFrom(MemoryStatus.values())); + } + return stats.build(); + } + + @Override + protected Reader instanceReader() { + return ModelSizeStats::new; + } + + @Override + protected ModelSizeStats doParseInstance(XContentParser parser) { + return ModelSizeStats.STRICT_PARSER.apply(parser, null).build(); + } + + public void testId() { + ModelSizeStats stats = new ModelSizeStats.Builder("job-foo").setLogTime(new Date(100)).build(); + assertEquals("job-foo_model_size_stats_100", stats.getId()); + } + + public void testStrictParser() throws IOException { + String json = "{\"job_id\":\"job_1\", \"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> ModelSizeStats.STRICT_PARSER.apply(parser, null)); + + assertThat(e.getMessage(), containsString("unknown field [foo]")); + } + } + + public void testLenientParser() throws IOException { + String json = "{\"job_id\":\"job_1\", \"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + ModelSizeStats.LENIENT_PARSER.apply(parser, null); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshotTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshotTests.java new file mode 100644 index 0000000000000..4026be67461ee --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelSnapshotTests.java @@ -0,0 +1,222 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Date; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class ModelSnapshotTests extends AbstractSerializingTestCase { + private static final Date DEFAULT_TIMESTAMP = new Date(); + private static final String DEFAULT_DESCRIPTION = "a snapshot"; + private static final String DEFAULT_ID = "my_id"; + private static final int DEFAULT_DOC_COUNT = 7; + private static final Date DEFAULT_LATEST_RESULT_TIMESTAMP = new Date(12345678901234L); + private static final Date DEFAULT_LATEST_RECORD_TIMESTAMP = new Date(12345678904321L); + private static final boolean DEFAULT_RETAIN = true; + + public void testCopyBuilder() { + ModelSnapshot modelSnapshot1 = createFullyPopulated().build(); + ModelSnapshot modelSnapshot2 = new ModelSnapshot.Builder(modelSnapshot1).build(); + assertEquals(modelSnapshot1, modelSnapshot2); + } + + public void testEquals_GivenSameObject() { + ModelSnapshot modelSnapshot = createFullyPopulated().build(); + assertTrue(modelSnapshot.equals(modelSnapshot)); + } + + public void testEquals_GivenObjectOfDifferentClass() { + ModelSnapshot modelSnapshot = createFullyPopulated().build(); + assertFalse(modelSnapshot.equals("a string")); + } + + public void testEquals_GivenEqualModelSnapshots() { + ModelSnapshot modelSnapshot1 = createFullyPopulated().build(); + ModelSnapshot modelSnapshot2 = createFullyPopulated().build(); + + assertEquals(modelSnapshot1, modelSnapshot2); + assertEquals(modelSnapshot2, modelSnapshot1); + assertEquals(modelSnapshot1.hashCode(), modelSnapshot2.hashCode()); + } + + public void testEquals_GivenDifferentTimestamp() { + ModelSnapshot modelSnapshot1 = createFullyPopulated().build(); + ModelSnapshot modelSnapshot2 = createFullyPopulated().setTimestamp( + new Date(modelSnapshot1.getTimestamp().getTime() + 1)).build(); + + assertFalse(modelSnapshot1.equals(modelSnapshot2)); + assertFalse(modelSnapshot2.equals(modelSnapshot1)); + } + + public void testEquals_GivenDifferentDescription() { + ModelSnapshot modelSnapshot1 = createFullyPopulated().build(); + ModelSnapshot modelSnapshot2 = createFullyPopulated() + .setDescription(modelSnapshot1.getDescription() + " blah").build(); + + assertFalse(modelSnapshot1.equals(modelSnapshot2)); + assertFalse(modelSnapshot2.equals(modelSnapshot1)); + } + + public void testEquals_GivenDifferentId() { + ModelSnapshot modelSnapshot1 = createFullyPopulated().build(); + ModelSnapshot modelSnapshot2 = createFullyPopulated() + .setSnapshotId(modelSnapshot1.getSnapshotId() + "_2").build(); + + assertFalse(modelSnapshot1.equals(modelSnapshot2)); + assertFalse(modelSnapshot2.equals(modelSnapshot1)); + } + + public void testEquals_GivenDifferentDocCount() { + ModelSnapshot modelSnapshot1 = createFullyPopulated().build(); + ModelSnapshot modelSnapshot2 = createFullyPopulated() + .setSnapshotDocCount(modelSnapshot1.getSnapshotDocCount() + 1).build(); + + assertFalse(modelSnapshot1.equals(modelSnapshot2)); + assertFalse(modelSnapshot2.equals(modelSnapshot1)); + } + + public void testEquals_GivenDifferentModelSizeStats() { + ModelSnapshot modelSnapshot1 = createFullyPopulated().build(); + ModelSizeStats.Builder modelSizeStats = new ModelSizeStats.Builder("foo"); + modelSizeStats.setModelBytes(42L); + ModelSnapshot modelSnapshot2 = createFullyPopulated().setModelSizeStats(modelSizeStats).build(); + + assertFalse(modelSnapshot1.equals(modelSnapshot2)); + assertFalse(modelSnapshot2.equals(modelSnapshot1)); + } + + public void testEquals_GivenDifferentQuantiles() { + ModelSnapshot modelSnapshot1 = createFullyPopulated().build(); + ModelSnapshot modelSnapshot2 = createFullyPopulated() + .setQuantiles(new Quantiles("foo", modelSnapshot1.getQuantiles().getTimestamp(), + "different state")).build(); + + assertFalse(modelSnapshot1.equals(modelSnapshot2)); + assertFalse(modelSnapshot2.equals(modelSnapshot1)); + } + + public void testEquals_GivenDifferentLatestResultTimestamp() { + ModelSnapshot modelSnapshot1 = createFullyPopulated().build(); + ModelSnapshot modelSnapshot2 = createFullyPopulated().setLatestResultTimeStamp( + new Date(modelSnapshot1.getLatestResultTimeStamp().getTime() + 1)).build(); + + assertFalse(modelSnapshot1.equals(modelSnapshot2)); + assertFalse(modelSnapshot2.equals(modelSnapshot1)); + } + + public void testEquals_GivenDifferentLatestRecordTimestamp() { + ModelSnapshot modelSnapshot1 = createFullyPopulated().build(); + ModelSnapshot modelSnapshot2 = createFullyPopulated().setLatestRecordTimeStamp( + new Date(modelSnapshot1.getLatestRecordTimeStamp().getTime() + 1)).build(); + + assertFalse(modelSnapshot1.equals(modelSnapshot2)); + assertFalse(modelSnapshot2.equals(modelSnapshot1)); + } + + private static ModelSnapshot.Builder createFullyPopulated() { + ModelSnapshot.Builder modelSnapshot = new ModelSnapshot.Builder(); + modelSnapshot.setJobId("foo"); + modelSnapshot.setMinVersion(Version.CURRENT); + modelSnapshot.setTimestamp(DEFAULT_TIMESTAMP); + modelSnapshot.setDescription(DEFAULT_DESCRIPTION); + modelSnapshot.setSnapshotId(DEFAULT_ID); + modelSnapshot.setSnapshotDocCount(DEFAULT_DOC_COUNT); + ModelSizeStats.Builder modelSizeStatsBuilder = new ModelSizeStats.Builder("foo"); + modelSizeStatsBuilder.setLogTime(null); + modelSnapshot.setModelSizeStats(modelSizeStatsBuilder); + modelSnapshot.setLatestResultTimeStamp(DEFAULT_LATEST_RESULT_TIMESTAMP); + modelSnapshot.setLatestRecordTimeStamp(DEFAULT_LATEST_RECORD_TIMESTAMP); + modelSnapshot.setQuantiles(new Quantiles("foo", DEFAULT_TIMESTAMP, "state")); + modelSnapshot.setRetain(DEFAULT_RETAIN); + return modelSnapshot; + } + + @Override + protected ModelSnapshot createTestInstance() { + return createRandomized(); + } + + public static ModelSnapshot createRandomized() { + ModelSnapshot.Builder modelSnapshot = new ModelSnapshot.Builder(randomAlphaOfLengthBetween(1, 20)); + modelSnapshot.setMinVersion(Version.CURRENT); + modelSnapshot.setTimestamp(new Date(TimeValue.parseTimeValue(randomTimeValue(), "test").millis())); + modelSnapshot.setDescription(randomAlphaOfLengthBetween(1, 20)); + modelSnapshot.setSnapshotId(randomAlphaOfLengthBetween(1, 20)); + modelSnapshot.setSnapshotDocCount(randomInt()); + modelSnapshot.setModelSizeStats(ModelSizeStatsTests.createRandomized()); + modelSnapshot.setLatestResultTimeStamp( + new Date(TimeValue.parseTimeValue(randomTimeValue(), "test").millis())); + modelSnapshot.setLatestRecordTimeStamp( + new Date(TimeValue.parseTimeValue(randomTimeValue(), "test").millis())); + modelSnapshot.setQuantiles(QuantilesTests.createRandomized()); + modelSnapshot.setRetain(randomBoolean()); + return modelSnapshot.build(); + } + + @Override + protected Reader instanceReader() { + return ModelSnapshot::new; + } + + @Override + protected ModelSnapshot doParseInstance(XContentParser parser) { + return ModelSnapshot.STRICT_PARSER.apply(parser, null).build(); + } + + public void testDocumentId() { + ModelSnapshot snapshot1 = new ModelSnapshot.Builder("foo").setSnapshotId("1").build(); + ModelSnapshot snapshot2 = new ModelSnapshot.Builder("foo").setSnapshotId("2").build(); + ModelSnapshot snapshot3 = new ModelSnapshot.Builder("bar").setSnapshotId("1").build(); + + assertEquals("foo_model_snapshot_1", ModelSnapshot.documentId(snapshot1)); + assertEquals("foo_model_snapshot_2", ModelSnapshot.documentId(snapshot2)); + assertEquals("bar_model_snapshot_1", ModelSnapshot.documentId(snapshot3)); + } + + public void testStateDocumentIds_GivenDocCountIsOne() { + ModelSnapshot snapshot = new ModelSnapshot.Builder("foo").setSnapshotId("1").setSnapshotDocCount(1).build(); + assertThat(snapshot.stateDocumentIds(), equalTo(Arrays.asList("foo_model_state_1#1"))); + } + + public void testStateDocumentIds_GivenDocCountIsThree() { + ModelSnapshot snapshot = new ModelSnapshot.Builder("foo").setSnapshotId("123456789").setSnapshotDocCount(3).build(); + assertThat(snapshot.stateDocumentIds(), + equalTo(Arrays.asList("foo_model_state_123456789#1", "foo_model_state_123456789#2", "foo_model_state_123456789#3"))); + } + + public void testStrictParser() throws IOException { + String json = "{\"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> ModelSnapshot.STRICT_PARSER.apply(parser, null)); + + assertThat(e.getMessage(), containsString("unknown field [foo]")); + } + } + + public void testLenientParser() throws IOException { + String json = "{\"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + ModelSnapshot.LENIENT_PARSER.apply(parser, null); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/QuantilesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/QuantilesTests.java new file mode 100644 index 0000000000000..84c1a161f1ee4 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/QuantilesTests.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.Date; + +import static org.hamcrest.Matchers.containsString; + +public class QuantilesTests extends AbstractSerializingTestCase { + + public void testEquals_GivenSameObject() { + Quantiles quantiles = new Quantiles("foo", new Date(0L), "foo"); + assertTrue(quantiles.equals(quantiles)); + } + + + public void testEquals_GivenDifferentClassObject() { + Quantiles quantiles = new Quantiles("foo", new Date(0L), "foo"); + assertFalse(quantiles.equals("not a quantiles object")); + } + + + public void testEquals_GivenEqualQuantilesObject() { + Quantiles quantiles1 = new Quantiles("foo", new Date(0L), "foo"); + + Quantiles quantiles2 = new Quantiles("foo", new Date(0L), "foo"); + + assertTrue(quantiles1.equals(quantiles2)); + assertTrue(quantiles2.equals(quantiles1)); + } + + + public void testEquals_GivenDifferentState() { + Quantiles quantiles1 = new Quantiles("foo", new Date(0L), "bar1"); + + Quantiles quantiles2 = new Quantiles("foo", new Date(0L), "bar2"); + + assertFalse(quantiles1.equals(quantiles2)); + assertFalse(quantiles2.equals(quantiles1)); + } + + + public void testHashCode_GivenEqualObject() { + Quantiles quantiles1 = new Quantiles("foo", new Date(0L), "foo"); + + Quantiles quantiles2 = new Quantiles("foo", new Date(0L), "foo"); + + assertEquals(quantiles1.hashCode(), quantiles2.hashCode()); + } + + public void testDocumentId() { + Quantiles quantiles = createTestInstance(); + String jobId = quantiles.getJobId(); + assertEquals(jobId + "_quantiles", Quantiles.documentId(jobId)); + } + + @Override + protected Quantiles createTestInstance() { + return createRandomized(); + } + + public static Quantiles createRandomized() { + return new Quantiles(randomAlphaOfLengthBetween(1, 20), + new Date(TimeValue.parseTimeValue(randomTimeValue(), "test").millis()), + randomAlphaOfLengthBetween(0, 1000)); + } + + @Override + protected Reader instanceReader() { + return Quantiles::new; + } + + @Override + protected Quantiles doParseInstance(XContentParser parser) { + return Quantiles.STRICT_PARSER.apply(parser, null); + } + + public void testStrictParser() throws IOException { + String json = "{\"job_id\":\"job_1\", \"timestamp\": 123456789, \"quantile_state\":\"...\", \"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> Quantiles.STRICT_PARSER.apply(parser, null)); + + assertThat(e.getMessage(), containsString("unknown field [foo]")); + } + } + + public void testLenientParser() throws IOException { + String json = "{\"job_id\":\"job_1\", \"timestamp\": 123456789, \"quantile_state\":\"...\", \"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + Quantiles.LENIENT_PARSER.apply(parser, null); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyCauseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyCauseTests.java new file mode 100644 index 0000000000000..033392eddce27 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyCauseTests.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.containsString; + +public class AnomalyCauseTests extends AbstractSerializingTestCase { + + @Override + protected AnomalyCause createTestInstance() { + AnomalyCause anomalyCause = new AnomalyCause(); + if (randomBoolean()) { + int size = randomInt(10); + List actual = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + actual.add(randomDouble()); + } + anomalyCause.setActual(actual); + } + if (randomBoolean()) { + int size = randomInt(10); + List typical = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + typical.add(randomDouble()); + } + anomalyCause.setTypical(typical); + } + if (randomBoolean()) { + anomalyCause.setByFieldName(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + anomalyCause.setByFieldValue(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + anomalyCause.setCorrelatedByFieldValue(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + anomalyCause.setOverFieldName(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + anomalyCause.setOverFieldValue(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + anomalyCause.setPartitionFieldName(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + anomalyCause.setPartitionFieldValue(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + anomalyCause.setFunction(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + anomalyCause.setFunctionDescription(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + anomalyCause.setFieldName(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + anomalyCause.setProbability(randomDouble()); + } + if (randomBoolean()) { + int size = randomInt(10); + List influencers = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + int fieldValuesSize = randomInt(10); + List fieldValues = new ArrayList<>(fieldValuesSize); + for (int j = 0; j < fieldValuesSize; j++) { + fieldValues.add(randomAlphaOfLengthBetween(1, 20)); + } + influencers.add(new Influence(randomAlphaOfLengthBetween(1, 20), fieldValues)); + } + anomalyCause.setInfluencers(influencers); + } + return anomalyCause; + } + + @Override + protected Reader instanceReader() { + return AnomalyCause::new; + } + + @Override + protected AnomalyCause doParseInstance(XContentParser parser) { + return AnomalyCause.STRICT_PARSER.apply(parser, null); + } + + public void testStrictParser() throws IOException { + String json = "{\"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> AnomalyCause.STRICT_PARSER.apply(parser, null)); + + assertThat(e.getMessage(), containsString("unknown field [foo]")); + } + } + + public void testLenientParser() throws IOException { + String json = "{\"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + AnomalyCause.LENIENT_PARSER.apply(parser, null); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecordTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecordTests.java new file mode 100644 index 0000000000000..b66fa4b2889d5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecordTests.java @@ -0,0 +1,218 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.hamcrest.Matchers.containsString; + +public class AnomalyRecordTests extends AbstractSerializingTestCase { + + @Override + protected AnomalyRecord createTestInstance() { + return createTestInstance("foo"); + } + + public AnomalyRecord createTestInstance(String jobId) { + AnomalyRecord anomalyRecord = new AnomalyRecord(jobId, new Date(randomNonNegativeLong()), randomNonNegativeLong()); + anomalyRecord.setActual(Collections.singletonList(randomDouble())); + anomalyRecord.setTypical(Collections.singletonList(randomDouble())); + anomalyRecord.setProbability(randomDouble()); + anomalyRecord.setRecordScore(randomDouble()); + anomalyRecord.setInitialRecordScore(randomDouble()); + anomalyRecord.setInterim(randomBoolean()); + if (randomBoolean()) { + anomalyRecord.setFieldName(randomAlphaOfLength(12)); + } + if (randomBoolean()) { + anomalyRecord.setByFieldName(randomAlphaOfLength(12)); + anomalyRecord.setByFieldValue(randomAlphaOfLength(12)); + } + if (randomBoolean()) { + anomalyRecord.setPartitionFieldName(randomAlphaOfLength(12)); + anomalyRecord.setPartitionFieldValue(randomAlphaOfLength(12)); + } + if (randomBoolean()) { + anomalyRecord.setOverFieldName(randomAlphaOfLength(12)); + anomalyRecord.setOverFieldValue(randomAlphaOfLength(12)); + } + anomalyRecord.setFunction(randomAlphaOfLengthBetween(5, 20)); + anomalyRecord.setFunctionDescription(randomAlphaOfLengthBetween(5, 20)); + if (randomBoolean()) { + anomalyRecord.setCorrelatedByFieldValue(randomAlphaOfLength(16)); + } + if (randomBoolean()) { + int count = randomIntBetween(0, 9); + List influences = new ArrayList<>(); + for (int i=0; i causes = new ArrayList<>(); + for (int i=0; i instanceReader() { + return AnomalyRecord::new; + } + + @Override + protected AnomalyRecord doParseInstance(XContentParser parser) { + return AnomalyRecord.STRICT_PARSER.apply(parser, null); + } + + @SuppressWarnings("unchecked") + public void testToXContentIncludesInputFields() throws IOException { + AnomalyRecord record = createTestInstance(); + record.setByFieldName("byfn"); + record.setByFieldValue("byfv"); + record.setOverFieldName("overfn"); + record.setOverFieldValue("overfv"); + record.setPartitionFieldName("partfn"); + record.setPartitionFieldValue("partfv"); + + Influence influence1 = new Influence("inffn", Arrays.asList("inffv1", "inffv2")); + Influence influence2 = new Influence("inffn", Arrays.asList("inffv1", "inffv2")); + record.setInfluencers(Arrays.asList(influence1, influence2)); + + BytesReference bytes = XContentHelper.toXContent(record, XContentType.JSON, false); + XContentParser parser = createParser(XContentType.JSON.xContent(), bytes); + Map map = parser.map(); + List serialisedByFieldValues = (List) map.get(record.getByFieldName()); + assertEquals(Collections.singletonList(record.getByFieldValue()), serialisedByFieldValues); + List serialisedOverFieldValues = (List) map.get(record.getOverFieldName()); + assertEquals(Collections.singletonList(record.getOverFieldValue()), serialisedOverFieldValues); + List serialisedPartFieldValues = (List) map.get(record.getPartitionFieldName()); + assertEquals(Collections.singletonList(record.getPartitionFieldValue()), serialisedPartFieldValues); + + List serialisedInfFieldValues1 = (List) map.get(influence1.getInfluencerFieldName()); + assertEquals(influence1.getInfluencerFieldValues(), serialisedInfFieldValues1); + List serialisedInfFieldValues2 = (List) map.get(influence2.getInfluencerFieldName()); + assertEquals(influence2.getInfluencerFieldValues(), serialisedInfFieldValues2); + } + + public void testToXContentOrdersDuplicateInputFields() throws IOException { + AnomalyRecord record = createTestInstance(); + record.setByFieldName("car-make"); + record.setByFieldValue("ford"); + record.setOverFieldName("number-of-wheels"); + record.setOverFieldValue("4"); + record.setPartitionFieldName("spoiler"); + record.setPartitionFieldValue("yes"); + + Influence influence1 = new Influence("car-make", Collections.singletonList("VW")); + Influence influence2 = new Influence("number-of-wheels", Collections.singletonList("18")); + Influence influence3 = new Influence("spoiler", Collections.singletonList("no")); + record.setInfluencers(Arrays.asList(influence1, influence2, influence3)); + + // influencer fields with the same name as a by/over/partitiion field + // come second in the list + BytesReference bytes = XContentHelper.toXContent(record, XContentType.JSON, false); + XContentParser parser = createParser(XContentType.JSON.xContent(), bytes); + Map map = parser.map(); + List serialisedCarMakeFieldValues = (List) map.get("car-make"); + assertEquals(Arrays.asList("ford", "VW"), serialisedCarMakeFieldValues); + List serialisedNumberOfWheelsFieldValues = (List) map.get("number-of-wheels"); + assertEquals(Arrays.asList("4", "18"), serialisedNumberOfWheelsFieldValues); + List serialisedSpoilerFieldValues = (List) map.get("spoiler"); + assertEquals(Arrays.asList("yes", "no"), serialisedSpoilerFieldValues); + } + + @SuppressWarnings("unchecked") + public void testToXContentDoesNotIncludesReservedWordInputFields() throws IOException { + AnomalyRecord record = createTestInstance(); + record.setByFieldName(AnomalyRecord.BUCKET_SPAN.getPreferredName()); + record.setByFieldValue("bar"); + + BytesReference bytes = XContentHelper.toXContent(record, XContentType.JSON, false); + XContentParser parser = createParser(XContentType.JSON.xContent(), bytes); + Object value = parser.map().get(AnomalyRecord.BUCKET_SPAN.getPreferredName()); + assertNotEquals("bar", value); + assertEquals((Long)record.getBucketSpan(), (Long)value); + } + + public void testId() { + AnomalyRecord record = new AnomalyRecord("test-job", new Date(1000), 60L); + String byFieldValue = null; + String overFieldValue = null; + String partitionFieldValue = null; + + int valuesHash = Objects.hash(byFieldValue, overFieldValue, partitionFieldValue); + assertEquals("test-job_record_1000_60_0_" + valuesHash + "_0", record.getId()); + + int length = 0; + if (randomBoolean()) { + byFieldValue = randomAlphaOfLength(10); + length += byFieldValue.length(); + record.setByFieldValue(byFieldValue); + } + if (randomBoolean()) { + overFieldValue = randomAlphaOfLength(10); + length += overFieldValue.length(); + record.setOverFieldValue(overFieldValue); + } + if (randomBoolean()) { + partitionFieldValue = randomAlphaOfLength(10); + length += partitionFieldValue.length(); + record.setPartitionFieldValue(partitionFieldValue); + } + + valuesHash = Objects.hash(byFieldValue, overFieldValue, partitionFieldValue); + assertEquals("test-job_record_1000_60_0_" + valuesHash + "_" + length, record.getId()); + } + + public void testStrictParser_IsLenientOnTopLevelFields() throws IOException { + String json = "{\"job_id\":\"job_1\", \"timestamp\": 123544456, \"bucket_span\": 3600, \"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + AnomalyRecord.STRICT_PARSER.apply(parser, null); + } + } + + public void testStrictParser_IsStrictOnNestedFields() throws IOException { + String json = "{\"job_id\":\"job_1\", \"timestamp\": 123544456, \"bucket_span\": 3600, \"foo\":\"bar\"," + + " \"causes\":[{\"cause_foo\":\"bar\"}]}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + XContentParseException e = expectThrows(XContentParseException.class, + () -> AnomalyRecord.STRICT_PARSER.apply(parser, null)); + assertThat(e.getCause().getMessage(), containsString("[anomaly_cause] unknown field [cause_foo]")); + } + } + + public void testLenientParser() throws IOException { + String json = "{\"job_id\":\"job_1\", \"timestamp\": 123544456, \"bucket_span\": 3600, \"foo\":\"bar\"," + + " \"causes\":[{\"cause_foo\":\"bar\"}]}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + AnomalyRecord.LENIENT_PARSER.apply(parser, null); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencerTests.java new file mode 100644 index 0000000000000..8c56c96c09609 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/BucketInfluencerTests.java @@ -0,0 +1,162 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.Date; + +import static org.hamcrest.Matchers.containsString; + +public class BucketInfluencerTests extends AbstractSerializingTestCase { + + @Override + protected BucketInfluencer createTestInstance() { + BucketInfluencer bucketInfluencer = new BucketInfluencer(randomAlphaOfLengthBetween(1, 20), new Date(randomNonNegativeLong()), + randomNonNegativeLong()); + if (randomBoolean()) { + bucketInfluencer.setAnomalyScore(randomDouble()); + } + if (randomBoolean()) { + bucketInfluencer.setInfluencerFieldName(randomAlphaOfLengthBetween(1, 20)); + } + if (randomBoolean()) { + bucketInfluencer.setInitialAnomalyScore(randomDouble()); + } + if (randomBoolean()) { + bucketInfluencer.setProbability(randomDouble()); + } + if (randomBoolean()) { + bucketInfluencer.setRawAnomalyScore(randomDouble()); + } + if (randomBoolean()) { + bucketInfluencer.setIsInterim(randomBoolean()); + } + return bucketInfluencer; + } + + @Override + protected Reader instanceReader() { + return BucketInfluencer::new; + } + + @Override + protected BucketInfluencer doParseInstance(XContentParser parser) { + return BucketInfluencer.STRICT_PARSER.apply(parser, null); + } + + public void testEquals_GivenNull() { + assertFalse(new BucketInfluencer(randomAlphaOfLengthBetween(1, 20), new Date(), 600).equals(null)); + } + + public void testEquals_GivenDifferentClass() { + assertFalse(new BucketInfluencer(randomAlphaOfLengthBetween(1, 20), new Date(), 600).equals("a string")); + } + + public void testEquals_GivenEqualInfluencers() { + BucketInfluencer bucketInfluencer1 = new BucketInfluencer("foo", new Date(123), 600); + bucketInfluencer1.setAnomalyScore(42.0); + bucketInfluencer1.setInfluencerFieldName("foo"); + bucketInfluencer1.setInitialAnomalyScore(67.3); + bucketInfluencer1.setProbability(0.0003); + bucketInfluencer1.setRawAnomalyScore(3.14); + + BucketInfluencer bucketInfluencer2 = new BucketInfluencer("foo", new Date(123), 600); + bucketInfluencer2.setAnomalyScore(42.0); + bucketInfluencer2.setInfluencerFieldName("foo"); + bucketInfluencer2.setInitialAnomalyScore(67.3); + bucketInfluencer2.setProbability(0.0003); + bucketInfluencer2.setRawAnomalyScore(3.14); + + assertTrue(bucketInfluencer1.equals(bucketInfluencer2)); + assertTrue(bucketInfluencer2.equals(bucketInfluencer1)); + assertEquals(bucketInfluencer1.hashCode(), bucketInfluencer2.hashCode()); + } + + public void testEquals_GivenDifferentAnomalyScore() { + BucketInfluencer bucketInfluencer1 = new BucketInfluencer("foo", new Date(123), 600); + bucketInfluencer1.setAnomalyScore(42.0); + + BucketInfluencer bucketInfluencer2 = new BucketInfluencer("foo", new Date(123), 600); + bucketInfluencer2.setAnomalyScore(42.1); + + assertFalse(bucketInfluencer1.equals(bucketInfluencer2)); + assertFalse(bucketInfluencer2.equals(bucketInfluencer1)); + } + + public void testEquals_GivenDifferentFieldName() { + BucketInfluencer bucketInfluencer1 = new BucketInfluencer("foo", new Date(123), 600); + bucketInfluencer1.setInfluencerFieldName("foo"); + + BucketInfluencer bucketInfluencer2 = new BucketInfluencer("foo", new Date(123), 600); + bucketInfluencer2.setInfluencerFieldName("bar"); + + assertFalse(bucketInfluencer1.equals(bucketInfluencer2)); + assertFalse(bucketInfluencer2.equals(bucketInfluencer1)); + } + + public void testEquals_GivenDifferentInitialAnomalyScore() { + BucketInfluencer bucketInfluencer1 = new BucketInfluencer("foo", new Date(123), 600); + bucketInfluencer1.setInitialAnomalyScore(42.0); + + BucketInfluencer bucketInfluencer2 = new BucketInfluencer("foo", new Date(123), 600); + bucketInfluencer2.setInitialAnomalyScore(42.1); + + assertFalse(bucketInfluencer1.equals(bucketInfluencer2)); + assertFalse(bucketInfluencer2.equals(bucketInfluencer1)); + } + + public void testEquals_GivenRawAnomalyScore() { + BucketInfluencer bucketInfluencer1 = new BucketInfluencer("foo", new Date(123), 600); + bucketInfluencer1.setRawAnomalyScore(42.0); + + BucketInfluencer bucketInfluencer2 = new BucketInfluencer("foo", new Date(123), 600); + bucketInfluencer2.setRawAnomalyScore(42.1); + + assertFalse(bucketInfluencer1.equals(bucketInfluencer2)); + assertFalse(bucketInfluencer2.equals(bucketInfluencer1)); + } + + public void testEquals_GivenDifferentProbability() { + BucketInfluencer bucketInfluencer1 = new BucketInfluencer("foo", new Date(123), 600); + bucketInfluencer1.setProbability(0.001); + + BucketInfluencer bucketInfluencer2 = new BucketInfluencer("foo", new Date(123), 600); + bucketInfluencer2.setProbability(0.002); + + assertFalse(bucketInfluencer1.equals(bucketInfluencer2)); + assertFalse(bucketInfluencer2.equals(bucketInfluencer1)); + } + + public void testId() { + BucketInfluencer influencer = new BucketInfluencer("job-foo", new Date(1000), 300L); + assertEquals("job-foo_bucket_influencer_1000_300", influencer.getId()); + + influencer.setInfluencerFieldName("field-with-influence"); + assertEquals("job-foo_bucket_influencer_1000_300_field-with-influence", influencer.getId()); + } + + public void testStrictParser() throws IOException { + String json = "{\"job_id\":\"job_1\", \"timestamp\": 123544456, \"bucket_span\": 3600, \"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> BucketInfluencer.STRICT_PARSER.apply(parser, null)); + + assertThat(e.getMessage(), containsString("unknown field [foo]")); + } + } + + public void testLenientParser() throws IOException { + String json = "{\"job_id\":\"job_1\", \"timestamp\": 123544456, \"bucket_span\": 3600, \"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + BucketInfluencer.LENIENT_PARSER.apply(parser, null); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/InfluencerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/InfluencerTests.java new file mode 100644 index 0000000000000..7b2da1d795539 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/results/InfluencerTests.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.results; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +public class InfluencerTests extends AbstractSerializingTestCase { + + public Influencer createTestInstance(String jobId) { + Influencer influencer = new Influencer(jobId, randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20), + new Date(randomNonNegativeLong()), randomNonNegativeLong()); + influencer.setInterim(randomBoolean()); + influencer.setInfluencerScore(randomDouble()); + influencer.setInitialInfluencerScore(randomDouble()); + influencer.setProbability(randomDouble()); + return influencer; + } + @Override + protected Influencer createTestInstance() { + return createTestInstance(randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Reader instanceReader() { + return Influencer::new; + } + + @Override + protected Influencer doParseInstance(XContentParser parser) { + return Influencer.LENIENT_PARSER.apply(parser, null); + } + + public void testToXContentIncludesNameValueField() throws IOException { + Influencer influencer = createTestInstance("foo"); + BytesReference bytes = XContentHelper.toXContent(influencer, XContentType.JSON, false); + XContentParser parser = createParser(XContentType.JSON.xContent(), bytes); + String serialisedFieldName = (String) parser.map().get(influencer.getInfluencerFieldName()); + assertNotNull(serialisedFieldName); + assertEquals(influencer.getInfluencerFieldValue(), serialisedFieldName); + } + + public void testToXContentDoesNotIncludeNameValueFieldWhenReservedWord() throws IOException { + Influencer influencer = new Influencer("foo", Influencer.INFLUENCER_SCORE.getPreferredName(), "bar", new Date(), 300L); + BytesReference bytes = XContentHelper.toXContent(influencer, XContentType.JSON, false); + XContentParser parser = createParser(XContentType.JSON.xContent(), bytes); + Object serialisedFieldValue = parser.map().get(Influencer.INFLUENCER_SCORE.getPreferredName()); + assertNotNull(serialisedFieldValue); + assertNotEquals("bar", serialisedFieldValue); + } + + public void testId() { + String influencerFieldValue = "wopr"; + Influencer influencer = new Influencer("job-foo", "host", influencerFieldValue, new Date(1000), 300L); + int valueHash = Objects.hashCode(influencerFieldValue); + assertEquals("job-foo_influencer_1000_300_host_" + valueHash + "_" + influencerFieldValue.length(), influencer.getId()); + } + + public void testLenientParser() throws IOException { + String json = "{\"job_id\":\"job_1\", \"timestamp\": 123544456, \"bucket_span\": 3600," + + "\"influencer_field_name\":\"foo_1\", \"influencer_field_value\": \"foo_2\", \"foo\":\"bar\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, json)) { + Influencer.LENIENT_PARSER.apply(parser, null); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessageTests.java new file mode 100644 index 0000000000000..f5c30c5900394 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/AuditMessageTests.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.notifications; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.junit.Before; + +import java.util.Date; + +public class AuditMessageTests extends AbstractSerializingTestCase { + private long startMillis; + + @Before + public void setStartTime() { + startMillis = System.currentTimeMillis(); + } + + public void testNewInfo() { + AuditMessage info = AuditMessage.newInfo("foo", "some info", "some_node"); + assertEquals("foo", info.getJobId()); + assertEquals("some info", info.getMessage()); + assertEquals(Level.INFO, info.getLevel()); + assertDateBetweenStartAndNow(info.getTimestamp()); + } + + public void testNewWarning() { + AuditMessage warning = AuditMessage.newWarning("bar", "some warning", "some_node"); + assertEquals("bar", warning.getJobId()); + assertEquals("some warning", warning.getMessage()); + assertEquals(Level.WARNING, warning.getLevel()); + assertDateBetweenStartAndNow(warning.getTimestamp()); + } + + + public void testNewError() { + AuditMessage error = AuditMessage.newError("foo", "some error", "some_node"); + assertEquals("foo", error.getJobId()); + assertEquals("some error", error.getMessage()); + assertEquals(Level.ERROR, error.getLevel()); + assertDateBetweenStartAndNow(error.getTimestamp()); + } + + public void testNewActivity() { + AuditMessage error = AuditMessage.newActivity("foo", "some error", "some_node"); + assertEquals("foo", error.getJobId()); + assertEquals("some error", error.getMessage()); + assertEquals(Level.ACTIVITY, error.getLevel()); + assertDateBetweenStartAndNow(error.getTimestamp()); + } + + private void assertDateBetweenStartAndNow(Date timestamp) { + long timestampMillis = timestamp.getTime(); + assertTrue(timestampMillis >= startMillis); + assertTrue(timestampMillis <= System.currentTimeMillis()); + } + + @Override + protected AuditMessage doParseInstance(XContentParser parser) { + return AuditMessage.PARSER.apply(parser, null); + } + + @Override + protected AuditMessage createTestInstance() { + return new AuditMessage(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 200), + randomFrom(Level.values()), randomAlphaOfLengthBetween(1, 20)); + } + + @Override + protected Reader instanceReader() { + return AuditMessage::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/monitoring/test/MockPainlessScriptEngine.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/monitoring/test/MockPainlessScriptEngine.java new file mode 100644 index 0000000000000..6c0e5e156cac1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/monitoring/test/MockPainlessScriptEngine.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.monitoring.test; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.SearchScript; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.function.Function; + +/** + * A mock script engine that registers itself under the 'painless' name so that watches that use it can still be used in tests. + */ +public class MockPainlessScriptEngine extends MockScriptEngine { + + public static final String NAME = "painless"; + + public static class TestPlugin extends MockScriptPlugin { + @Override + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { + return new MockPainlessScriptEngine(); + } + + @Override + protected Map, Object>> pluginScripts() { + return Collections.emptyMap(); + } + } + + @Override + public String getType() { + return NAME; + } + + @Override + public T compile(String name, String script, ScriptContext context, Map options) { + MockCompiledScript compiledScript = new MockCompiledScript(name, options, script, p -> script); + if (context.instanceClazz.equals(ExecutableScript.class)) { + return context.factoryClazz.cast((ExecutableScript.Factory) compiledScript::createExecutableScript); + } else if (context.instanceClazz.equals(SearchScript.class)) { + return context.factoryClazz.cast((SearchScript.Factory) compiledScript::createSearchScript); + } + throw new IllegalArgumentException("mock painless does not know how to handle context [" + context.name + "]"); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java new file mode 100644 index 0000000000000..7522f474e77b2 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.rollup.job.DateHistoGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.GroupConfig; +import org.elasticsearch.xpack.core.rollup.job.HistoGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.MetricConfig; +import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; +import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class ConfigTestHelpers { + + public static RollupJobConfig.Builder getRollupJob(String jobId) { + RollupJobConfig.Builder builder = new RollupJobConfig.Builder(); + builder.setId(jobId); + builder.setCron(getCronString()); + builder.setTimeout(new TimeValue(ESTestCase.randomIntBetween(1,100))); + builder.setIndexPattern(ESTestCase.randomAlphaOfLengthBetween(1,10)); + builder.setRollupIndex(ESTestCase.randomAlphaOfLengthBetween(1,10)); + builder.setGroupConfig(ConfigTestHelpers.getGroupConfig().build()); + builder.setPageSize(ESTestCase.randomIntBetween(1,10)); + if (ESTestCase.randomBoolean()) { + List metrics = IntStream.range(1, ESTestCase.randomIntBetween(1,10)) + .mapToObj(n -> ConfigTestHelpers.getMetricConfig().build()) + .collect(Collectors.toList()); + + builder.setMetricsConfig(metrics); + } + return builder; + } + + public static GroupConfig.Builder getGroupConfig() { + GroupConfig.Builder groupBuilder = new GroupConfig.Builder(); + groupBuilder.setDateHisto(getDateHisto().build()); + if (ESTestCase.randomBoolean()) { + groupBuilder.setHisto(getHisto().build()); + } + if (ESTestCase.randomBoolean()) { + groupBuilder.setTerms(getTerms().build()); + } + return groupBuilder; + } + + public static MetricConfig.Builder getMetricConfig() { + MetricConfig.Builder builder = new MetricConfig.Builder(); + builder.setField(ESTestCase.randomAlphaOfLength(15)); // large names so we don't accidentally collide + List metrics = new ArrayList<>(); + if (ESTestCase.randomBoolean()) { + metrics.add("min"); + } + if (ESTestCase.randomBoolean()) { + metrics.add("max"); + } + if (ESTestCase.randomBoolean()) { + metrics.add("sum"); + } + if (ESTestCase.randomBoolean()) { + metrics.add("avg"); + } + if (ESTestCase.randomBoolean()) { + metrics.add("value_count"); + } + if (metrics.size() == 0) { + metrics.add("min"); + } + builder.setMetrics(metrics); + return builder; + } + + private static final String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m"}; + public static String randomPositiveTimeValue() { + return ESTestCase.randomIntBetween(1, 1000) + ESTestCase.randomFrom(TIME_SUFFIXES); + } + + public static DateHistoGroupConfig.Builder getDateHisto() { + DateHistoGroupConfig.Builder dateHistoBuilder = new DateHistoGroupConfig.Builder(); + dateHistoBuilder.setInterval(new DateHistogramInterval(randomPositiveTimeValue())); + if (ESTestCase.randomBoolean()) { + dateHistoBuilder.setTimeZone(ESTestCase.randomDateTimeZone()); + } + if (ESTestCase.randomBoolean()) { + dateHistoBuilder.setDelay(new DateHistogramInterval(randomPositiveTimeValue())); + } + dateHistoBuilder.setField(ESTestCase.randomAlphaOfLengthBetween(1, 10 )); + return dateHistoBuilder; + } + + public static HistoGroupConfig.Builder getHisto() { + HistoGroupConfig.Builder histoBuilder = new HistoGroupConfig.Builder(); + histoBuilder.setInterval(ESTestCase.randomIntBetween(1,10000)); + histoBuilder.setFields(getFields()); + return histoBuilder; + } + + public static TermsGroupConfig.Builder getTerms() { + TermsGroupConfig.Builder builder = new TermsGroupConfig.Builder(); + builder.setFields(getFields()); + return builder; + } + + public static List getFields() { + return IntStream.range(0, ESTestCase.randomIntBetween(1,10)) + .mapToObj(n -> ESTestCase.randomAlphaOfLengthBetween(1,10)) + .collect(Collectors.toList()); + } + + public static String getCronString() { + return (ESTestCase.randomBoolean() ? "*" : String.valueOf(ESTestCase.randomIntBetween(0, 59))) + //second + " " + (ESTestCase.randomBoolean() ? "*" : String.valueOf(ESTestCase.randomIntBetween(0, 59))) + //minute + " " + (ESTestCase.randomBoolean() ? "*" : String.valueOf(ESTestCase.randomIntBetween(0, 23))) + //hour + " " + (ESTestCase.randomBoolean() ? "*" : String.valueOf(ESTestCase.randomIntBetween(1, 31))) + //day of month + " " + (ESTestCase.randomBoolean() ? "*" : String.valueOf(ESTestCase.randomIntBetween(1, 12))) + //month + " ?" + //day of week + " " + (ESTestCase.randomBoolean() ? "*" : String.valueOf(ESTestCase.randomIntBetween(1970, 2199))); //year + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/RollupRestTestStateCleaner.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/RollupRestTestStateCleaner.java new file mode 100644 index 0000000000000..5c8bdd30ea523 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/RollupRestTestStateCleaner.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup; + +import org.apache.http.HttpStatus; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.rollup.job.RollupJob; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.junit.Assert.assertEquals; + +public class RollupRestTestStateCleaner { + + private final Logger logger; + private final RestClient adminClient; + private final ESRestTestCase testCase; + + public RollupRestTestStateCleaner(Logger logger, RestClient adminClient, ESRestTestCase testCase) { + this.logger = logger; + this.adminClient = adminClient; + this.testCase = testCase; + } + + public void clearRollupMetadata() throws Exception { + deleteAllJobs(); + waitForPendingTasks(); + // indices will be deleted by the ESIntegTestCase class + } + + private void waitForPendingTasks() throws Exception { + ESTestCase.assertBusy(() -> { + try { + Response response = adminClient.performRequest("GET", "/_cat/tasks", + Collections.singletonMap("detailed", "true")); + if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { + try (BufferedReader responseReader = new BufferedReader( + new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { + int activeTasks = 0; + String line; + StringBuilder tasksListString = new StringBuilder(); + while ((line = responseReader.readLine()) != null) { + + // We only care about Rollup jobs, otherwise this fails too easily due to unrelated tasks + if (line.startsWith(RollupJob.NAME) == true) { + activeTasks++; + tasksListString.append(line); + tasksListString.append('\n'); + } + } + assertEquals(activeTasks + " active tasks found:\n" + tasksListString, 0, activeTasks); + } + } + } catch (IOException e) { + throw new AssertionError("Error getting active tasks list", e); + } + }); + } + + @SuppressWarnings("unchecked") + private void deleteAllJobs() throws Exception { + Response response = adminClient.performRequest("GET", "/_xpack/rollup/job/_all"); + Map jobs = testCase.entityAsMap(response); + @SuppressWarnings("unchecked") + List> jobConfigs = + (List>) XContentMapValues.extractValue("jobs", jobs); + + if (jobConfigs == null) { + return; + } + + for (Map jobConfig : jobConfigs) { + logger.debug(jobConfig); + String jobId = (String) ((Map) jobConfig.get("config")).get("id"); + logger.debug("Deleting job " + jobId); + try { + response = adminClient.performRequest("DELETE", "/_xpack/rollup/job/" + jobId); + } catch (Exception e) { + // ok + } + } + } + + private static String responseEntityToString(Response response) throws Exception { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { + return reader.lines().collect(Collectors.joining("\n")); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistoGroupConfigSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistoGroupConfigSerializingTests.java new file mode 100644 index 0000000000000..034f16a100084 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistoGroupConfigSerializingTests.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DateHistoGroupConfigSerializingTests extends AbstractSerializingTestCase { + @Override + protected DateHistoGroupConfig doParseInstance(XContentParser parser) throws IOException { + return DateHistoGroupConfig.PARSER.apply(parser, null).build(); + } + + @Override + protected Writeable.Reader instanceReader() { + return DateHistoGroupConfig::new; + } + + @Override + protected DateHistoGroupConfig createTestInstance() { + return ConfigTestHelpers.getDateHisto().build(); + } + + public void testValidateNoMapping() throws IOException { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + DateHistoGroupConfig config = new DateHistoGroupConfig.Builder() + .setField("my_field") + .setInterval(new DateHistogramInterval("1d")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("Could not find a [date] field with name [my_field] in any of the " + + "indices matching the index pattern.")); + } + + public void testValidateNomatchingField() throws IOException { + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + responseMap.put("some_other_field", Collections.singletonMap("date", fieldCaps)); + + DateHistoGroupConfig config = new DateHistoGroupConfig.Builder() + .setField("my_field") + .setInterval(new DateHistogramInterval("1d")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("Could not find a [date] field with name [my_field] in any of the " + + "indices matching the index pattern.")); + } + + public void testValidateFieldWrongType() throws IOException { + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + responseMap.put("my_field", Collections.singletonMap("keyword", fieldCaps)); + + DateHistoGroupConfig config = new DateHistoGroupConfig.Builder() + .setField("my_field") + .setInterval(new DateHistogramInterval("1d")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("The field referenced by a date_histo group must be a [date] type across all " + + "indices in the index pattern. Found: [keyword] for field [my_field]")); + } + + public void testValidateFieldMixtureTypes() throws IOException { + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + Map types = new HashMap<>(2); + types.put("date", fieldCaps); + types.put("keyword", fieldCaps); + responseMap.put("my_field", types); + + DateHistoGroupConfig config = new DateHistoGroupConfig.Builder() + .setField("my_field") + .setInterval(new DateHistogramInterval("1d")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("The field referenced by a date_histo group must be a [date] type across all " + + "indices in the index pattern. Found: [date, keyword] for field [my_field]")); + } + + public void testValidateFieldMatchingNotAggregatable() throws IOException { + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(false); + responseMap.put("my_field", Collections.singletonMap("date", fieldCaps)); + + DateHistoGroupConfig config = new DateHistoGroupConfig.Builder() + .setField("my_field") + .setInterval(new DateHistogramInterval("1d")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("The field [my_field] must be aggregatable across all indices, but is not.")); + } + + public void testValidateMatchingField() throws IOException { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap("date", fieldCaps)); + + DateHistoGroupConfig config = new DateHistoGroupConfig.Builder() + .setField("my_field") + .setInterval(new DateHistogramInterval("1d")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().size(), equalTo(0)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/GroupConfigSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/GroupConfigSerializingTests.java new file mode 100644 index 0000000000000..c220f10aeab27 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/GroupConfigSerializingTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; + +import java.io.IOException; + +public class GroupConfigSerializingTests extends AbstractSerializingTestCase { + @Override + protected GroupConfig doParseInstance(XContentParser parser) throws IOException { + return GroupConfig.PARSER.apply(parser, null).build(); + } + + @Override + protected Writeable.Reader instanceReader() { + return GroupConfig::new; + } + + @Override + protected GroupConfig createTestInstance() { + return ConfigTestHelpers.getGroupConfig().build(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/HistoGroupConfigSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/HistoGroupConfigSerializingTests.java new file mode 100644 index 0000000000000..18a64bc2adfd6 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/HistoGroupConfigSerializingTests.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class HistoGroupConfigSerializingTests extends AbstractSerializingTestCase { + @Override + protected HistoGroupConfig doParseInstance(XContentParser parser) throws IOException { + return HistoGroupConfig.PARSER.apply(parser, null).build(); + } + + @Override + protected Writeable.Reader instanceReader() { + return HistoGroupConfig::new; + } + + @Override + protected HistoGroupConfig createTestInstance() { + return ConfigTestHelpers.getHisto().build(); + } + + public void testValidateNoMapping() throws IOException { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + HistoGroupConfig config = new HistoGroupConfig.Builder() + .setFields(Collections.singletonList("my_field")) + .setInterval(123) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("Could not find a [numeric] field with name [my_field] in any of the " + + "indices matching the index pattern.")); + } + + public void testValidateNomatchingField() throws IOException { + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + responseMap.put("some_other_field", Collections.singletonMap("long", fieldCaps)); + + HistoGroupConfig config = new HistoGroupConfig.Builder() + .setFields(Collections.singletonList("my_field")) + .setInterval(123) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("Could not find a [numeric] field with name [my_field] in any of the " + + "indices matching the index pattern.")); + } + + public void testValidateFieldWrongType() throws IOException { + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + responseMap.put("my_field", Collections.singletonMap("keyword", fieldCaps)); + + HistoGroupConfig config = new HistoGroupConfig.Builder() + .setFields(Collections.singletonList("my_field")) + .setInterval(123) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("The field referenced by a histo group must be a [numeric] type, but " + + "found [keyword] for field [my_field]")); + } + + public void testValidateFieldMatchingNotAggregatable() throws IOException { + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(false); + responseMap.put("my_field", Collections.singletonMap("long", fieldCaps)); + + HistoGroupConfig config = new HistoGroupConfig.Builder() + .setFields(Collections.singletonList("my_field")) + .setInterval(123) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("The field [my_field] must be aggregatable across all indices, but is not.")); + } + + public void testValidateMatchingField() throws IOException { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap("long", fieldCaps)); + + HistoGroupConfig config = new HistoGroupConfig.Builder() + .setFields(Collections.singletonList("my_field")) + .setInterval(123) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().size(), equalTo(0)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java new file mode 100644 index 0000000000000..ec17a37e23b2b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class IndexerStateEnumTests extends ESTestCase { + + public void testValidOrdinals() { + assertThat(IndexerState.STARTED.ordinal(), equalTo(0)); + assertThat(IndexerState.INDEXING.ordinal(), equalTo(1)); + assertThat(IndexerState.STOPPING.ordinal(), equalTo(2)); + assertThat(IndexerState.STOPPED.ordinal(), equalTo(3)); + assertThat(IndexerState.ABORTING.ordinal(), equalTo(4)); + } + + public void testwriteTo() throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + IndexerState.STARTED.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(0)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + IndexerState.INDEXING.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(1)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + IndexerState.STOPPING.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(2)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + IndexerState.STOPPED.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(3)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + IndexerState.ABORTING.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(4)); + } + } + } + + public void testReadFrom() throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(0); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(IndexerState.fromStream(in), equalTo(IndexerState.STARTED)); + } + } + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(1); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(IndexerState.fromStream(in), equalTo(IndexerState.INDEXING)); + } + } + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(2); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(IndexerState.fromStream(in), equalTo(IndexerState.STOPPING)); + } + } + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(3); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(IndexerState.fromStream(in), equalTo(IndexerState.STOPPED)); + } + } + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(4); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(IndexerState.fromStream(in), equalTo(IndexerState.ABORTING)); + } + } + } + + public void testInvalidReadFrom() throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(randomIntBetween(3, Integer.MAX_VALUE)); + try (StreamInput in = out.bytes().streamInput()) { + IndexerState.fromStream(in); + fail("Expected IOException"); + } catch(IOException e) { + assertThat(e.getMessage(), containsString("Unknown IndexerState ordinal [")); + } + + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java new file mode 100644 index 0000000000000..5515aeaaf4829 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; +import org.elasticsearch.xpack.core.rollup.action.GetRollupJobsAction; + +import java.io.IOException; +import java.util.Collections; + +public class JobWrapperSerializingTests extends AbstractSerializingTestCase { + @Override + protected GetRollupJobsAction.JobWrapper doParseInstance(XContentParser parser) throws IOException { + return GetRollupJobsAction.JobWrapper.PARSER.apply(parser, null); + } + + @Override + protected Writeable.Reader instanceReader() { + return GetRollupJobsAction.JobWrapper::new; + } + + @Override + protected GetRollupJobsAction.JobWrapper createTestInstance() { + IndexerState state = null; + int num = randomIntBetween(0,3); + if (num == 0) { + state = IndexerState.STOPPED; + } else if (num == 1) { + state = IndexerState.STARTED; + } else if (num == 2) { + state = IndexerState.STOPPING; + } else if (num == 3) { + state = IndexerState.ABORTING; + } + + return new GetRollupJobsAction.JobWrapper(ConfigTestHelpers.getRollupJob(randomAlphaOfLength(5)).build(), + new RollupJobStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()), + new RollupJobStatus(state, Collections.emptyMap())); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/MetricsConfigSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/MetricsConfigSerializingTests.java new file mode 100644 index 0000000000000..92a0976f532b7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/MetricsConfigSerializingTests.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MetricsConfigSerializingTests extends AbstractSerializingTestCase { + @Override + protected MetricConfig doParseInstance(XContentParser parser) throws IOException { + return MetricConfig.PARSER.apply(parser, null); + } + + @Override + protected Writeable.Reader instanceReader() { + return MetricConfig::new; + } + + @Override + protected MetricConfig createTestInstance() { + return ConfigTestHelpers.getMetricConfig().build(); + } + + public void testValidateNoMapping() throws IOException { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + MetricConfig config = new MetricConfig.Builder() + .setField("my_field") + .setMetrics(Collections.singletonList("max")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("Could not find a [numeric] field with name [my_field] in any of the " + + "indices matching the index pattern.")); + } + + public void testValidateNomatchingField() throws IOException { + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + responseMap.put("some_other_field", Collections.singletonMap("date", fieldCaps)); + + MetricConfig config = new MetricConfig.Builder() + .setField("my_field") + .setMetrics(Collections.singletonList("max")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("Could not find a [numeric] field with name [my_field] in any of the " + + "indices matching the index pattern.")); + } + + public void testValidateFieldWrongType() throws IOException { + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + responseMap.put("my_field", Collections.singletonMap("keyword", fieldCaps)); + + MetricConfig config = new MetricConfig.Builder() + .setField("my_field") + .setMetrics(Collections.singletonList("max")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("The field referenced by a metric group must be a [numeric] type, " + + "but found [keyword] for field [my_field]")); + } + + public void testValidateFieldMatchingNotAggregatable() throws IOException { + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(false); + responseMap.put("my_field", Collections.singletonMap("long", fieldCaps)); + + MetricConfig config = new MetricConfig.Builder() + .setField("my_field") + .setMetrics(Collections.singletonList("max")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("The field [my_field] must be aggregatable across all indices, but is not.")); + } + + public void testValidateMatchingField() throws IOException { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap("long", fieldCaps)); + + MetricConfig config = new MetricConfig.Builder() + .setField("my_field") + .setMetrics(Collections.singletonList("max")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().size(), equalTo(0)); + + + fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap("double", fieldCaps)); + config = new MetricConfig.Builder() + .setField("my_field") + .setMetrics(Collections.singletonList("max")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().size(), equalTo(0)); + + fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap("float", fieldCaps)); + config = new MetricConfig.Builder() + .setField("my_field") + .setMetrics(Collections.singletonList("max")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().size(), equalTo(0)); + + fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap("short", fieldCaps)); + config = new MetricConfig.Builder() + .setField("my_field") + .setMetrics(Collections.singletonList("max")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().size(), equalTo(0)); + + fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap("byte", fieldCaps)); + config = new MetricConfig.Builder() + .setField("my_field") + .setMetrics(Collections.singletonList("max")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().size(), equalTo(0)); + + fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap("half_float", fieldCaps)); + config = new MetricConfig.Builder() + .setField("my_field") + .setMetrics(Collections.singletonList("max")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().size(), equalTo(0)); + + fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap("scaled_float", fieldCaps)); + config = new MetricConfig.Builder() + .setField("my_field") + .setMetrics(Collections.singletonList("max")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().size(), equalTo(0)); + + fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap("integer", fieldCaps)); + config = new MetricConfig.Builder() + .setField("my_field") + .setMetrics(Collections.singletonList("max")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().size(), equalTo(0)); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfigTests.java new file mode 100644 index 0000000000000..a5f03aab51830 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfigTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; +import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; + +import static org.hamcrest.Matchers.equalTo; + + +public class RollupJobConfigTests extends AbstractSerializingTestCase { + + @Override + protected RollupJobConfig createTestInstance() { + return ConfigTestHelpers.getRollupJob(randomAlphaOfLengthBetween(1,10)).build(); + } + + @Override + protected Writeable.Reader instanceReader() { + return RollupJobConfig::new; + } + + @Override + protected RollupJobConfig doParseInstance(XContentParser parser) { + return RollupJobConfig.PARSER.apply(parser, null).build(); + } + + public void testEmptyIndexPattern() { + RollupJobConfig.Builder builder = ConfigTestHelpers.getRollupJob(randomAlphaOfLengthBetween(1, 10)); + builder.setIndexPattern(null); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertThat(e.getMessage(), equalTo("An index pattern is mandatory.")); + + builder = ConfigTestHelpers.getRollupJob(randomAlphaOfLengthBetween(1, 10)); + builder.setIndexPattern(""); + e = expectThrows(IllegalArgumentException.class, builder::build); + assertThat(e.getMessage(), equalTo("An index pattern is mandatory.")); + } + + public void testEmptyCron() { + RollupJobConfig.Builder builder = ConfigTestHelpers.getRollupJob(randomAlphaOfLengthBetween(1, 10)); + builder.setCron(null); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertThat(e.getMessage(), equalTo("A cron schedule is mandatory.")); + + builder = ConfigTestHelpers.getRollupJob(randomAlphaOfLengthBetween(1, 10)); + builder.setCron(""); + e = expectThrows(IllegalArgumentException.class, builder::build); + assertThat(e.getMessage(), equalTo("A cron schedule is mandatory.")); + } + + public void testEmptyID() { + RollupJobConfig.Builder builder = ConfigTestHelpers.getRollupJob(randomAlphaOfLengthBetween(1, 10)); + builder.setId(null); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertThat(e.getMessage(), equalTo("An ID is mandatory.")); + + builder = ConfigTestHelpers.getRollupJob(randomAlphaOfLengthBetween(1, 10)); + builder.setId(""); + e = expectThrows(IllegalArgumentException.class, builder::build); + assertThat(e.getMessage(), equalTo("An ID is mandatory.")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java new file mode 100644 index 0000000000000..0091b21dc40d0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; + +public class RollupJobStatsTests extends AbstractSerializingTestCase { + + @Override + protected RollupJobStats createTestInstance() { + return randomStats(); + } + + @Override + protected Writeable.Reader instanceReader() { + return RollupJobStats::new; + } + + @Override + protected RollupJobStats doParseInstance(XContentParser parser) { + return RollupJobStats.fromXContent(parser); + } + + public static RollupJobStats randomStats() { + return new RollupJobStats(randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong()); + } +} + diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java new file mode 100644 index 0000000000000..cc9b9e81cfbe9 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.rollup.job.IndexerState; +import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; + +import java.util.HashMap; +import java.util.Map; + +public class RollupJobStatusTests extends AbstractSerializingTestCase { + private Map randomPosition() { + if (randomBoolean()) { + return null; + } + int numFields = randomIntBetween(1, 5); + Map position = new HashMap<>(); + for (int i = 0; i < numFields; i++) { + Object value; + if (randomBoolean()) { + value = randomLong(); + } else { + value = randomAlphaOfLengthBetween(1, 10); + } + position.put(randomAlphaOfLengthBetween(3, 10), value); + } + return position; + } + + @Override + protected RollupJobStatus createTestInstance() { + return new RollupJobStatus(randomFrom(IndexerState.values()), randomPosition()); + } + + @Override + protected Writeable.Reader instanceReader() { + return RollupJobStatus::new; + } + + @Override + protected RollupJobStatus doParseInstance(XContentParser parser) { + return RollupJobStatus.fromXContent(parser); + } + +} + diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobTests.java new file mode 100644 index 0000000000000..915cfc2fe3575 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractDiffableSerializationTestCase; +import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class RollupJobTests extends AbstractDiffableSerializationTestCase { + @Override + protected Writeable.Reader diffReader() { + return RollupJob::readJobDiffFrom; + } + + @Override + protected ToXContent doParseInstance(XContentParser parser) throws IOException { + return RollupJob.fromXContent(parser); + } + + @Override + protected Writeable.Reader instanceReader() { + return RollupJob::new; + } + + @Override + protected Writeable createTestInstance() { + if (randomBoolean()) { + return new RollupJob(ConfigTestHelpers.getRollupJob(randomAlphaOfLength(5)).build(), null); + } + + Map headers = Collections.emptyMap(); + if (randomBoolean()) { + headers = new HashMap<>(1); + headers.put("foo", "bar"); + } + return new RollupJob(ConfigTestHelpers.getRollupJob(randomAlphaOfLength(5)).build(), headers); + } + + @Override + protected Diffable makeTestChanges(Diffable testInstance) { + RollupJob other = (RollupJob) testInstance; + if (randomBoolean()) { + if (other.getHeaders().isEmpty()) { + Map headers = new HashMap<>(1); + headers.put("foo", "bar"); + return new RollupJob(other.getConfig(), headers); + } else { + return new RollupJob(other.getConfig(), null); + } + } else { + return new RollupJob(ConfigTestHelpers.getRollupJob(randomAlphaOfLength(5)).build(), other.getHeaders()); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfigSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfigSerializingTests.java new file mode 100644 index 0000000000000..140b0d9c04b95 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfigSerializingTests.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; +import org.elasticsearch.xpack.core.rollup.RollupField; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TermsGroupConfigSerializingTests extends AbstractSerializingTestCase { + + private static final List FLOAT_TYPES = Arrays.asList("half_float", "float", "double", "scaled_float"); + private static final List NATURAL_TYPES = Arrays.asList("byte", "short", "integer", "long"); + + @Override + protected TermsGroupConfig doParseInstance(XContentParser parser) throws IOException { + return TermsGroupConfig.PARSER.apply(parser, null).build(); + } + + @Override + protected Writeable.Reader instanceReader() { + return TermsGroupConfig::new; + } + + @Override + protected TermsGroupConfig createTestInstance() { + return ConfigTestHelpers.getTerms().build(); + } + + public void testValidateNoMapping() throws IOException { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + TermsGroupConfig config = new TermsGroupConfig.Builder() + .setFields(Collections.singletonList("my_field")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("Could not find a [numeric] or [keyword/text] field with name " + + "[my_field] in any of the indices matching the index pattern.")); + } + + public void testValidateNomatchingField() throws IOException { + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + responseMap.put("some_other_field", Collections.singletonMap("keyword", fieldCaps)); + + TermsGroupConfig config = new TermsGroupConfig.Builder() + .setFields(Collections.singletonList("my_field")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("Could not find a [numeric] or [keyword/text] field with name " + + "[my_field] in any of the indices matching the index pattern.")); + } + + public void testValidateFieldWrongType() throws IOException { + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + responseMap.put("my_field", Collections.singletonMap("geo_point", fieldCaps)); + + TermsGroupConfig config = new TermsGroupConfig.Builder() + .setFields(Collections.singletonList("my_field")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("The field referenced by a terms group must be a [numeric] or " + + "[keyword/text] type, but found [geo_point] for field [my_field]")); + } + + public void testValidateFieldMatchingNotAggregatable() throws IOException { + + + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(false); + responseMap.put("my_field", Collections.singletonMap(getRandomType(), fieldCaps)); + + TermsGroupConfig config = new TermsGroupConfig.Builder() + .setFields(Collections.singletonList("my_field")) + .build(); + config.validateMappings(responseMap, e); + assertThat(e.validationErrors().get(0), equalTo("The field [my_field] must be aggregatable across all indices, but is not.")); + } + + public void testValidateMatchingField() throws IOException { + ActionRequestValidationException e = new ActionRequestValidationException(); + Map> responseMap = new HashMap<>(); + String type = getRandomType(); + + // Have to mock fieldcaps because the ctor's aren't public... + FieldCapabilities fieldCaps = mock(FieldCapabilities.class); + when(fieldCaps.isAggregatable()).thenReturn(true); + responseMap.put("my_field", Collections.singletonMap(type, fieldCaps)); + + TermsGroupConfig config = new TermsGroupConfig.Builder() + .setFields(Collections.singletonList("my_field")) + .build(); + config.validateMappings(responseMap, e); + if (e.validationErrors().size() != 0) { + fail(e.getMessage()); + } + + List> builders = config.toBuilders(); + assertThat(builders.size(), equalTo(1)); + } + + private String getRandomType() { + int n = randomIntBetween(0,8); + if (n == 0) { + return "keyword"; + } else if (n == 1) { + return "text"; + } else if (n == 2) { + return "long"; + } else if (n == 3) { + return "integer"; + } else if (n == 4) { + return "short"; + } else if (n == 5) { + return "float"; + } else if (n == 6) { + return "double"; + } else if (n == 7) { + return "scaled_float"; + } else if (n == 8) { + return "half_float"; + } + return "long"; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/audit/logfile/CapturingLogger.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/audit/logfile/CapturingLogger.java new file mode 100644 index 0000000000000..2091f8fb75fde --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/audit/logfile/CapturingLogger.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.audit.logfile; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; +import org.apache.logging.log4j.core.filter.RegexFilter; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.logging.Loggers; + +import java.util.ArrayList; +import java.util.List; + +public class CapturingLogger { + + public static Logger newCapturingLogger(final Level level) throws IllegalAccessException { + final StackTraceElement caller = Thread.currentThread().getStackTrace()[2]; + final String name = caller.getClassName() + "." + caller.getMethodName() + "." + level.toString(); + final Logger logger = ESLoggerFactory.getLogger(name); + Loggers.setLevel(logger, level); + final MockAppender appender = new MockAppender(name); + appender.start(); + Loggers.addAppender(logger, appender); + return logger; + } + + private static MockAppender getMockAppender(final String name) { + final LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + final Configuration config = ctx.getConfiguration(); + final LoggerConfig loggerConfig = config.getLoggerConfig(name); + return (MockAppender) loggerConfig.getAppenders().get(name); + } + + public static boolean isEmpty(final String name) { + final MockAppender appender = getMockAppender(name); + return appender.isEmpty(); + } + + public static List output(final String name, final Level level) { + final MockAppender appender = getMockAppender(name); + return appender.output(level); + } + + private static class MockAppender extends AbstractAppender { + + public final List error = new ArrayList<>(); + public final List warn = new ArrayList<>(); + public final List info = new ArrayList<>(); + public final List debug = new ArrayList<>(); + public final List trace = new ArrayList<>(); + + private MockAppender(final String name) throws IllegalAccessException { + super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null); + } + + @Override + public void append(LogEvent event) { + switch (event.getLevel().toString()) { + // we can not keep a reference to the event here because Log4j is using a thread + // local instance under the hood + case "ERROR": + error.add(event.getMessage().getFormattedMessage()); + break; + case "WARN": + warn.add(event.getMessage().getFormattedMessage()); + break; + case "INFO": + info.add(event.getMessage().getFormattedMessage()); + break; + case "DEBUG": + debug.add(event.getMessage().getFormattedMessage()); + break; + case "TRACE": + trace.add(event.getMessage().getFormattedMessage()); + break; + default: + throw invalidLevelException(event.getLevel()); + } + } + + private IllegalArgumentException invalidLevelException(Level level) { + return new IllegalArgumentException("invalid level, expected [ERROR|WARN|INFO|DEBUG|TRACE] but was [" + level + "]"); + } + + public boolean isEmpty() { + return error.isEmpty() && warn.isEmpty() && info.isEmpty() && debug.isEmpty() && trace.isEmpty(); + } + + public List output(Level level) { + switch (level.toString()) { + case "ERROR": + return error; + case "WARN": + return warn; + case "INFO": + return info; + case "DEBUG": + return debug; + case "TRACE": + return trace; + default: + throw invalidLevelException(level); + } + } + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModelPredicateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModelPredicateTests.java new file mode 100644 index 0000000000000..1754e2f8c0235 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModelPredicateTests.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue; + +import java.math.BigInteger; +import java.util.function.Predicate; + +import static org.hamcrest.Matchers.is; + +public class ExpressionModelPredicateTests extends ESTestCase { + + public void testNullValue() throws Exception { + final Predicate predicate = ExpressionModel.buildPredicate(null); + assertThat(predicate.test(new FieldValue(null)), is(true)); + assertThat(predicate.test(new FieldValue("")), is(false)); + assertThat(predicate.test(new FieldValue(1)), is(false)); + assertThat(predicate.test(new FieldValue(true)), is(false)); + } + + public void testBooleanValue() throws Exception { + final boolean matchValue = randomBoolean(); + final Predicate predicate = ExpressionModel.buildPredicate(matchValue); + assertThat(predicate.test(new FieldValue(matchValue)), is(true)); + Object value = !matchValue; + assertThat(predicate.test(new FieldValue(value)), is(false)); + assertThat(predicate.test(new FieldValue(String.valueOf(matchValue))), is(false)); + assertThat(predicate.test(new FieldValue("")), is(false)); + assertThat(predicate.test(new FieldValue(1)), is(false)); + assertThat(predicate.test(new FieldValue(null)), is(false)); + } + + public void testLongValue() throws Exception { + final int intValue = randomInt(); + final long longValue = intValue; + final Predicate predicate = ExpressionModel.buildPredicate(longValue); + + assertThat(predicate.test(new FieldValue(longValue)), is(true)); + assertThat(predicate.test(new FieldValue(intValue)), is(true)); + assertThat(predicate.test(new FieldValue(new BigInteger(String.valueOf(longValue)))), is(true)); + + assertThat(predicate.test(new FieldValue(longValue - 1)), is(false)); + assertThat(predicate.test(new FieldValue(intValue + 1)), is(false)); + assertThat(predicate.test(new FieldValue(String.valueOf(longValue))), is(false)); + assertThat(predicate.test(new FieldValue("")), is(false)); + assertThat(predicate.test(new FieldValue(true)), is(false)); + assertThat(predicate.test(new FieldValue(null)), is(false)); + } + + public void testSimpleAutomatonValue() throws Exception { + final String prefix = randomAlphaOfLength(3); + FieldValue fieldValue = new FieldValue(prefix + "*"); + + assertThat(ExpressionModel.buildPredicate(prefix).test(fieldValue), is(true)); + assertThat(ExpressionModel.buildPredicate(prefix + randomAlphaOfLengthBetween(1, 5)).test(fieldValue), is(true)); + + assertThat(ExpressionModel.buildPredicate("_" + prefix).test(fieldValue), is(false)); + assertThat(ExpressionModel.buildPredicate(prefix.substring(0, 1)).test(fieldValue), is(false)); + + assertThat(ExpressionModel.buildPredicate("").test(fieldValue), is(false)); + assertThat(ExpressionModel.buildPredicate(1).test(fieldValue), is(false)); + assertThat(ExpressionModel.buildPredicate(true).test(fieldValue), is(false)); + assertThat(ExpressionModel.buildPredicate(null).test(fieldValue), is(false)); + } + + public void testEmptyStringValue() throws Exception { + final Predicate predicate = ExpressionModel.buildPredicate(""); + + assertThat(predicate.test(new FieldValue("")), is(true)); + + assertThat(predicate.test(new FieldValue(randomAlphaOfLengthBetween(1, 3))), is(false)); + assertThat(predicate.test(new FieldValue(1)), is(false)); + assertThat(predicate.test(new FieldValue(true)), is(false)); + assertThat(predicate.test(new FieldValue(null)), is(false)); + } + + public void testRegexAutomatonValue() throws Exception { + final String substring = randomAlphaOfLength(5); + final FieldValue fieldValue = new FieldValue("/.*" + substring + ".*/"); + + assertThat(ExpressionModel.buildPredicate(substring).test(fieldValue), is(true)); + assertThat(ExpressionModel.buildPredicate(randomAlphaOfLengthBetween(2, 4) + substring + randomAlphaOfLengthBetween(1, 5)) + .test(fieldValue), is(true)); + + assertThat(ExpressionModel.buildPredicate(substring.substring(1, 3)).test(fieldValue), is(false)); + + assertThat(ExpressionModel.buildPredicate("").test(fieldValue), is(false)); + assertThat(ExpressionModel.buildPredicate(1).test(fieldValue), is(false)); + assertThat(ExpressionModel.buildPredicate(true).test(fieldValue), is(false)); + assertThat(ExpressionModel.buildPredicate(null).test(fieldValue), is(false)); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParserTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParserTests.java new file mode 100644 index 0000000000000..070f36b72e920 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParserTests.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl; + +import com.carrotsearch.randomizedtesting.WriterOutputStream; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; + +import java.io.IOException; +import java.io.StringWriter; +import java.util.Arrays; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.iterableWithSize; +import static org.hamcrest.Matchers.notNullValue; + +public class ExpressionParserTests extends ESTestCase { + + public void testParseSimpleFieldExpression() throws Exception { + String json = "{ \"field\": { \"username\" : \"*@shield.gov\" } }"; + FieldExpression field = checkExpressionType(parse(json), FieldExpression.class); + assertThat(field.getField(), equalTo("username")); + assertThat(field.getValues(), iterableWithSize(1)); + final FieldValue value = field.getValues().get(0); + assertThat(value.getValue(), equalTo("*@shield.gov")); + assertThat(value.getAutomaton(), notNullValue()); + assertThat(value.getAutomaton().run("bob@shield.gov"), equalTo(true)); + assertThat(value.getAutomaton().run("bob@example.net"), equalTo(false)); + assertThat(json(field), equalTo(json.replaceAll("\\s", ""))); + } + + public void testParseComplexExpression() throws Exception { + String json = "{ \"any\": [" + + " { \"field\": { \"username\" : \"*@shield.gov\" } }, " + + " { \"all\": [" + + " { \"field\": { \"username\" : \"/.*\\\\@avengers\\\\.(net|org)/\" } }, " + + " { \"field\": { \"groups\" : [ \"admin\", \"operators\" ] } }, " + + " { \"except\":" + + " { \"field\": { \"groups\" : \"disavowed\" } }" + + " }" + + " ] }" + + "] }"; + final RoleMapperExpression expr = parse(json); + + assertThat(expr, instanceOf(AnyExpression.class)); + AnyExpression any = (AnyExpression) expr; + + assertThat(any.getElements(), iterableWithSize(2)); + + final FieldExpression fieldShield = checkExpressionType(any.getElements().get(0), + FieldExpression.class); + assertThat(fieldShield.getField(), equalTo("username")); + assertThat(fieldShield.getValues(), iterableWithSize(1)); + final FieldValue valueShield = fieldShield.getValues().get(0); + assertThat(valueShield.getValue(), equalTo("*@shield.gov")); + assertThat(valueShield.getAutomaton(), notNullValue()); + assertThat(valueShield.getAutomaton().run("fury@shield.gov"), equalTo(true)); + assertThat(valueShield.getAutomaton().run("fury@shield.net"), equalTo(false)); + + final AllExpression all = checkExpressionType(any.getElements().get(1), + AllExpression.class); + assertThat(all.getElements(), iterableWithSize(3)); + + final FieldExpression fieldAvengers = checkExpressionType(all.getElements().get(0), + FieldExpression.class); + assertThat(fieldAvengers.getField(), equalTo("username")); + assertThat(fieldAvengers.getValues(), iterableWithSize(1)); + final FieldValue valueAvengers = fieldAvengers.getValues().get(0); + assertThat(valueAvengers.getAutomaton().run("stark@avengers.net"), equalTo(true)); + assertThat(valueAvengers.getAutomaton().run("romanov@avengers.org"), equalTo(true)); + assertThat(valueAvengers.getAutomaton().run("fury@shield.gov"), equalTo(false)); + + final FieldExpression fieldGroupsAdmin = checkExpressionType(all.getElements().get(1), + FieldExpression.class); + assertThat(fieldGroupsAdmin.getField(), equalTo("groups")); + assertThat(fieldGroupsAdmin.getValues(), iterableWithSize(2)); + assertThat(fieldGroupsAdmin.getValues().get(0).getValue(), equalTo("admin")); + assertThat(fieldGroupsAdmin.getValues().get(1).getValue(), equalTo("operators")); + + final ExceptExpression except = checkExpressionType(all.getElements().get(2), + ExceptExpression.class); + final FieldExpression fieldDisavowed = checkExpressionType(except.getInnerExpression(), + FieldExpression.class); + assertThat(fieldDisavowed.getField(), equalTo("groups")); + assertThat(fieldDisavowed.getValues(), iterableWithSize(1)); + assertThat(fieldDisavowed.getValues().get(0).getValue(), equalTo("disavowed")); + + ExpressionModel hawkeye = new ExpressionModel(); + hawkeye.defineField("username", "hawkeye@avengers.org"); + hawkeye.defineField("groups", Arrays.asList("operators")); + assertThat(expr.match(hawkeye), equalTo(true)); + + ExpressionModel captain = new ExpressionModel(); + captain.defineField("username", "america@avengers.net"); + assertThat(expr.match(captain), equalTo(false)); + + ExpressionModel warmachine = new ExpressionModel(); + warmachine.defineField("username", "warmachine@avengers.net"); + warmachine.defineField("groups", Arrays.asList("admin", "disavowed")); + assertThat(expr.match(warmachine), equalTo(false)); + + ExpressionModel fury = new ExpressionModel(); + fury.defineField("username", "fury@shield.gov"); + fury.defineField("groups", Arrays.asList("classified", "directors")); + assertThat(expr.asPredicate().test(fury), equalTo(true)); + + assertThat(json(expr), equalTo(json.replaceAll("\\s", ""))); + } + + public void testWriteAndReadFromStream() throws Exception { + String json = "{ \"any\": [" + + " { \"field\": { \"username\" : \"*@shield.gov\" } }, " + + " { \"all\": [" + + " { \"field\": { \"username\" : \"/.*\\\\@avengers\\\\.(net|org)/\" } }, " + + " { \"field\": { \"groups\" : [ \"admin\", \"operators\" ] } }, " + + " { \"except\":" + + " { \"field\": { \"groups\" : \"disavowed\" } }" + + " }" + + " ] }" + + "] }"; + final RoleMapperExpression exprSource = parse(json); + final BytesStreamOutput out = new BytesStreamOutput(); + ExpressionParser.writeExpression(exprSource, out); + + final Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(settings).getNamedWriteables()); + final NamedWriteableAwareStreamInput input = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), registry); + final RoleMapperExpression exprResult = ExpressionParser.readExpression(input); + assertThat(json(exprResult), equalTo(json.replaceAll("\\s", ""))); + } + + private T checkExpressionType(RoleMapperExpression expr, Class type) { + assertThat(expr, instanceOf(type)); + return type.cast(expr); + } + + private RoleMapperExpression parse(String json) throws IOException { + return new ExpressionParser().parse("rules", new XContentSource(new BytesArray(json), + XContentType.JSON)); + } + + private String json(RoleMapperExpression node) throws IOException { + final StringWriter writer = new StringWriter(); + try (XContentBuilder builder = XContentFactory.jsonBuilder(new WriterOutputStream(writer))) { + node.toXContent(builder, ToXContent.EMPTY_PARAMS); + } + return writer.toString(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java new file mode 100644 index 0000000000000..0fb3094fca5a1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java @@ -0,0 +1,247 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.accesscontrol; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.Bits; +import org.elasticsearch.core.internal.io.IOUtils; +import org.apache.lucene.util.TestUtil; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class DocumentSubsetReaderTests extends ESTestCase { + + private Directory directory; + private DirectoryReader directoryReader; + private BitsetFilterCache bitsetFilterCache; + + @Before + public void setUpDirectory() { + // We check it is empty at the end of the test, so make sure it is empty in the + // beginning as well so that we can easily distinguish from garbage added by + // this test and garbage not cleaned up by other tests. + assertTrue(DocumentSubsetReader.NUM_DOCS_CACHE.toString(), + DocumentSubsetReader.NUM_DOCS_CACHE.isEmpty()); + directory = newDirectory(); + IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY); + bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) { + + } + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) { + + } + }); + } + + @After + public void cleanDirectory() throws Exception { + if (directoryReader != null) { + directoryReader.close(); + } + assertTrue(DocumentSubsetReader.NUM_DOCS_CACHE.toString(), + DocumentSubsetReader.NUM_DOCS_CACHE.isEmpty()); + directory.close(); + bitsetFilterCache.close(); + } + + public void testSearch() throws Exception { + IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig()); + + Document document = new Document(); + document.add(new StringField("field", "value1", Field.Store.NO)); + iw.addDocument(document); + + document = new Document(); + document.add(new StringField("field", "value2", Field.Store.NO)); + iw.addDocument(document); + + document = new Document(); + document.add(new StringField("field", "value3", Field.Store.NO)); + iw.addDocument(document); + + document = new Document(); + document.add(new StringField("field", "value4", Field.Store.NO)); + iw.addDocument(document); + + iw.forceMerge(1); + iw.deleteDocuments(new Term("field", "value3")); + iw.close(); + openDirectoryReader(); + + IndexSearcher indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, + new TermQuery(new Term("field", "value1")))); + assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1)); + TopDocs result = indexSearcher.search(new MatchAllDocsQuery(), 1); + assertThat(result.totalHits, equalTo(1L)); + assertThat(result.scoreDocs[0].doc, equalTo(0)); + + indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, + new TermQuery(new Term("field", "value2")))); + assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1)); + result = indexSearcher.search(new MatchAllDocsQuery(), 1); + assertThat(result.totalHits, equalTo(1L)); + assertThat(result.scoreDocs[0].doc, equalTo(1)); + + // this doc has been marked as deleted: + indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, + new TermQuery(new Term("field", "value3")))); + assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(0)); + result = indexSearcher.search(new MatchAllDocsQuery(), 1); + assertThat(result.totalHits, equalTo(0L)); + + indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, + new TermQuery(new Term("field", "value4")))); + assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1)); + result = indexSearcher.search(new MatchAllDocsQuery(), 1); + assertThat(result.totalHits, equalTo(1L)); + assertThat(result.scoreDocs[0].doc, equalTo(3)); + } + + public void testLiveDocs() throws Exception { + int numDocs = scaledRandomIntBetween(16, 128); + IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ); + + for (int i = 0; i < numDocs; i++) { + Document document = new Document(); + document.add(new StringField("field", "value" + i, Field.Store.NO)); + iw.addDocument(document); + } + + iw.forceMerge(1); + iw.close(); + + openDirectoryReader(); + assertThat("should have one segment after force merge", directoryReader.leaves().size(), equalTo(1)); + + for (int i = 0; i < numDocs; i++) { + Query roleQuery = new TermQuery(new Term("field", "value" + i)); + DirectoryReader wrappedReader = DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, roleQuery); + + LeafReader leafReader = wrappedReader.leaves().get(0).reader(); + assertThat(leafReader.hasDeletions(), is(true)); + assertThat(leafReader.numDocs(), equalTo(1)); + Bits liveDocs = leafReader.getLiveDocs(); + assertThat(liveDocs.length(), equalTo(numDocs)); + for (int docId = 0; docId < numDocs; docId++) { + if (docId == i) { + assertThat("docId [" + docId +"] should match", liveDocs.get(docId), is(true)); + } else { + assertThat("docId [" + docId +"] should not match", liveDocs.get(docId), is(false)); + } + } + } + } + + public void testWrapTwice() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + iw.close(); + IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) { + } + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) { + } + }); + DirectoryReader directoryReader = DocumentSubsetReader.wrap(DirectoryReader.open(dir), bitsetFilterCache, new MatchAllDocsQuery()); + try { + DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new MatchAllDocsQuery()); + fail("shouldn't be able to wrap DocumentSubsetDirectoryReader twice"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("Can't wrap [class org.elasticsearch.xpack.core.security.authz.accesscontrol" + + ".DocumentSubsetReader$DocumentSubsetDirectoryReader] twice")); + } + + bitsetFilterCache.close(); + directoryReader.close(); + dir.close(); + } + + /** Same test as in FieldSubsetReaderTests, test that core cache key (needed for NRT) is working */ + public void testCoreCacheKey() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + iwc.setMaxBufferedDocs(100); + iwc.setMergePolicy(NoMergePolicy.INSTANCE); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add two docs, id:0 and id:1 + Document doc = new Document(); + Field idField = new StringField("id", "", Field.Store.NO); + doc.add(idField); + idField.setStringValue("0"); + iw.addDocument(doc); + idField.setStringValue("1"); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw), new ShardId("_index", "_na_", 0)); + ir = DocumentSubsetReader.wrap(ir, bitsetFilterCache, new MatchAllDocsQuery()); + assertEquals(2, ir.numDocs()); + assertEquals(1, ir.leaves().size()); + + // delete id:0 and reopen + iw.deleteDocuments(new Term("id", "0")); + DirectoryReader ir2 = DirectoryReader.openIfChanged(ir); + + // we should have the same cache key as before + assertEquals(1, ir2.numDocs()); + assertEquals(1, ir2.leaves().size()); + assertSame(ir.leaves().get(0).reader().getCoreCacheHelper().getKey(), + ir2.leaves().get(0).reader().getCoreCacheHelper().getKey()); + // However we don't support caching on the reader cache key since we override deletes + assertNull(ir.leaves().get(0).reader().getReaderCacheHelper()); + assertNull(ir2.leaves().get(0).reader().getReaderCacheHelper()); + + TestUtil.checkReader(ir); + IOUtils.close(ir, ir2, iw, dir); + } + + private void openDirectoryReader() throws IOException { + directoryReader = DirectoryReader.open(directory); + directoryReader = ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId("_index", "_na_", 0)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java new file mode 100644 index 0000000000000..4c74e7f5d9059 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java @@ -0,0 +1,1177 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.accesscontrol; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.document.BinaryDocValuesField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.IntPoint; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.PointValues.IntersectVisitor; +import org.apache.lucene.index.PointValues.Relation; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.index.TermsEnum.SeekStatus; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.core.internal.io.IOUtils; +import org.apache.lucene.util.TestUtil; +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.elasticsearch.xpack.core.security.support.Automatons; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.equalTo; + +/** Simple tests for this filterreader */ +public class FieldSubsetReaderTests extends ESTestCase { + + /** + * test filtering two string fields + */ + public void testIndexed() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StringField("fieldA", "test", Field.Store.NO)); + doc.add(new StringField("fieldB", "test", Field.Store.NO)); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + LeafReader segmentReader = ir.leaves().get(0).reader(); + Set seenFields = new HashSet<>(); + for (FieldInfo info : segmentReader.getFieldInfos()) { + seenFields.add(info.name); + } + assertEquals(Collections.singleton("fieldA"), seenFields); + assertNotNull(segmentReader.terms("fieldA")); + assertNull(segmentReader.terms("fieldB")); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two int points + */ + public void testPoints() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 points + Document doc = new Document(); + doc.add(new IntPoint("fieldA", 1)); + doc.add(new IntPoint("fieldB", 2)); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + LeafReader segmentReader = ir.leaves().get(0).reader(); + PointValues points = segmentReader.getPointValues("fieldA"); + assertNull(segmentReader.getPointValues("fieldB")); + + // size statistic + assertEquals(1, points.size()); + + // doccount statistic + assertEquals(1, points.getDocCount()); + + // min statistic + assertNotNull(points.getMinPackedValue()); + + // max statistic + assertNotNull(points.getMaxPackedValue()); + + // bytes per dimension + assertEquals(Integer.BYTES, points.getBytesPerDimension()); + + // number of dimensions + assertEquals(1, points.getNumDimensions()); + + // walk the trees: we should see stuff in fieldA + AtomicBoolean sawDoc = new AtomicBoolean(false); + points.intersect(new IntersectVisitor() { + @Override + public void visit(int docID) throws IOException { + throw new IllegalStateException("should not get here"); + } + + @Override + public void visit(int docID, byte[] packedValue) throws IOException { + sawDoc.set(true); + } + + @Override + public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { + return Relation.CELL_CROSSES_QUERY; + } + }); + assertTrue(sawDoc.get()); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two stored fields (string) + */ + public void testStoredFieldsString() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StoredField("fieldA", "testA")); + doc.add(new StoredField("fieldB", "testB")); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + Document d2 = ir.document(0); + assertEquals(1, d2.getFields().size()); + assertEquals("testA", d2.get("fieldA")); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two stored fields (binary) + */ + public void testStoredFieldsBinary() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StoredField("fieldA", new BytesRef("testA"))); + doc.add(new StoredField("fieldB", new BytesRef("testB"))); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + Document d2 = ir.document(0); + assertEquals(1, d2.getFields().size()); + assertEquals(new BytesRef("testA"), d2.getBinaryValue("fieldA")); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two stored fields (int) + */ + public void testStoredFieldsInt() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StoredField("fieldA", 1)); + doc.add(new StoredField("fieldB", 2)); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + Document d2 = ir.document(0); + assertEquals(1, d2.getFields().size()); + assertEquals(1, d2.getField("fieldA").numericValue()); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two stored fields (long) + */ + public void testStoredFieldsLong() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StoredField("fieldA", 1L)); + doc.add(new StoredField("fieldB", 2L)); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + Document d2 = ir.document(0); + assertEquals(1, d2.getFields().size()); + assertEquals(1L, d2.getField("fieldA").numericValue()); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two stored fields (float) + */ + public void testStoredFieldsFloat() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StoredField("fieldA", 1F)); + doc.add(new StoredField("fieldB", 2F)); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + Document d2 = ir.document(0); + assertEquals(1, d2.getFields().size()); + assertEquals(1F, d2.getField("fieldA").numericValue()); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two stored fields (double) + */ + public void testStoredFieldsDouble() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StoredField("fieldA", 1D)); + doc.add(new StoredField("fieldB", 2D)); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + Document d2 = ir.document(0); + assertEquals(1, d2.getFields().size()); + assertEquals(1D, d2.getField("fieldA").numericValue()); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two vector fields + */ + public void testVectors() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + FieldType ft = new FieldType(StringField.TYPE_NOT_STORED); + ft.setStoreTermVectors(true); + doc.add(new Field("fieldA", "testA", ft)); + doc.add(new Field("fieldB", "testB", ft)); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + Fields vectors = ir.getTermVectors(0); + Set seenFields = new HashSet<>(); + for (String field : vectors) { + seenFields.add(field); + } + assertEquals(Collections.singleton("fieldA"), seenFields); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two text fields + */ + public void testNorms() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new TextField("fieldA", "test", Field.Store.NO)); + doc.add(new TextField("fieldB", "test", Field.Store.NO)); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + LeafReader segmentReader = ir.leaves().get(0).reader(); + assertNotNull(segmentReader.getNormValues("fieldA")); + assertNull(segmentReader.getNormValues("fieldB")); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two numeric dv fields + */ + public void testNumericDocValues() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new NumericDocValuesField("fieldA", 1)); + doc.add(new NumericDocValuesField("fieldB", 2)); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + LeafReader segmentReader = ir.leaves().get(0).reader(); + NumericDocValues values = segmentReader.getNumericDocValues("fieldA"); + assertNotNull(values); + assertTrue(values.advanceExact(0)); + assertEquals(1, values.longValue()); + assertNull(segmentReader.getNumericDocValues("fieldB")); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two binary dv fields + */ + public void testBinaryDocValues() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new BinaryDocValuesField("fieldA", new BytesRef("testA"))); + doc.add(new BinaryDocValuesField("fieldB", new BytesRef("testB"))); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + LeafReader segmentReader = ir.leaves().get(0).reader(); + BinaryDocValues values = segmentReader.getBinaryDocValues("fieldA"); + assertNotNull(values); + assertTrue(values.advanceExact(0)); + assertEquals(new BytesRef("testA"), values.binaryValue()); + assertNull(segmentReader.getBinaryDocValues("fieldB")); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two sorted dv fields + */ + public void testSortedDocValues() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new SortedDocValuesField("fieldA", new BytesRef("testA"))); + doc.add(new SortedDocValuesField("fieldB", new BytesRef("testB"))); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + LeafReader segmentReader = ir.leaves().get(0).reader(); + SortedDocValues values = segmentReader.getSortedDocValues("fieldA"); + assertNotNull(values); + assertTrue(values.advanceExact(0)); + assertEquals(new BytesRef("testA"), values.binaryValue()); + assertNull(segmentReader.getSortedDocValues("fieldB")); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two sortedset dv fields + */ + public void testSortedSetDocValues() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new SortedSetDocValuesField("fieldA", new BytesRef("testA"))); + doc.add(new SortedSetDocValuesField("fieldB", new BytesRef("testB"))); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + LeafReader segmentReader = ir.leaves().get(0).reader(); + SortedSetDocValues dv = segmentReader.getSortedSetDocValues("fieldA"); + assertNotNull(dv); + assertTrue(dv.advanceExact(0)); + assertEquals(0, dv.nextOrd()); + assertEquals(SortedSetDocValues.NO_MORE_ORDS, dv.nextOrd()); + assertEquals(new BytesRef("testA"), dv.lookupOrd(0)); + assertNull(segmentReader.getSortedSetDocValues("fieldB")); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering two sortednumeric dv fields + */ + public void testSortedNumericDocValues() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new SortedNumericDocValuesField("fieldA", 1)); + doc.add(new SortedNumericDocValuesField("fieldB", 2)); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + LeafReader segmentReader = ir.leaves().get(0).reader(); + SortedNumericDocValues dv = segmentReader.getSortedNumericDocValues("fieldA"); + assertNotNull(dv); + assertTrue(dv.advanceExact(0)); + assertEquals(1, dv.docValueCount()); + assertEquals(1, dv.nextValue()); + assertNull(segmentReader.getSortedNumericDocValues("fieldB")); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test we have correct fieldinfos metadata + */ + public void testFieldInfos() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StringField("fieldA", "test", Field.Store.NO)); + doc.add(new StringField("fieldB", "test", Field.Store.NO)); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see only one field + LeafReader segmentReader = ir.leaves().get(0).reader(); + FieldInfos infos = segmentReader.getFieldInfos(); + assertEquals(1, infos.size()); + assertNotNull(infos.fieldInfo("fieldA")); + assertNull(infos.fieldInfo("fieldB")); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test special handling for _source field. + */ + public void testSourceFilteringIntegration() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StringField("fieldA", "testA", Field.Store.NO)); + doc.add(new StringField("fieldB", "testB", Field.Store.NO)); + byte bytes[] = "{\"fieldA\":\"testA\", \"fieldB\":\"testB\"}".getBytes(StandardCharsets.UTF_8); + doc.add(new StoredField(SourceFieldMapper.NAME, bytes, 0, bytes.length)); + iw.addDocument(doc); + + // open reader + Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", SourceFieldMapper.NAME)); + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); + + // see only one field + Document d2 = ir.document(0); + assertEquals(1, d2.getFields().size()); + assertEquals("{\"fieldA\":\"testA\"}", d2.getBinaryValue(SourceFieldMapper.NAME).utf8ToString()); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + public void testSourceFiltering() { + // include on top-level value + Map map = new HashMap<>(); + map.put("foo", 3); + map.put("bar", "baz"); + + CharacterRunAutomaton include = new CharacterRunAutomaton(Automata.makeString("foo")); + Map filtered = FieldSubsetReader.filter(map, include, 0); + Map expected = new HashMap<>(); + expected.put("foo", 3); + + assertEquals(expected, filtered); + + // include on inner wildcard + map = new HashMap<>(); + Map subMap = new HashMap<>(); + subMap.put("bar", 42); + subMap.put("baz", 6); + map.put("foo", subMap); + map.put("bar", "baz"); + + include = new CharacterRunAutomaton(Automatons.patterns("foo.*")); + filtered = FieldSubsetReader.filter(map, include, 0); + expected = new HashMap<>(); + expected.put("foo", subMap); + + assertEquals(expected, filtered); + + // include on leading wildcard + include = new CharacterRunAutomaton(Automatons.patterns("*.bar")); + filtered = FieldSubsetReader.filter(map, include, 0); + expected = new HashMap<>(); + subMap = new HashMap<>(); + subMap.put("bar", 42); + expected.put("foo", subMap); + + assertEquals(expected, filtered); + + // include on inner value + include = new CharacterRunAutomaton(Automatons.patterns("foo.bar")); + filtered = FieldSubsetReader.filter(map, include, 0); + + assertEquals(expected, filtered); + + // exclude on exact value + include = new CharacterRunAutomaton(Operations.minus( + Automata.makeAnyString(), Automatons.patterns("foo.bar"), + Operations.DEFAULT_MAX_DETERMINIZED_STATES)); + filtered = FieldSubsetReader.filter(map, include, 0); + expected = new HashMap<>(); + expected.put("bar", "baz"); + expected.put("foo", Collections.singletonMap("baz", 6)); + + assertEquals(expected, filtered); + + // exclude on wildcard + include = new CharacterRunAutomaton(Operations.minus( + Automata.makeAnyString(), Automatons.patterns("foo.*"), + Operations.DEFAULT_MAX_DETERMINIZED_STATES)); + filtered = FieldSubsetReader.filter(map, include, 0); + expected = Collections.singletonMap("bar", "baz"); + + assertEquals(expected, filtered); + + // include on inner array + map = new HashMap<>(); + List subArray = new ArrayList<>(); + subMap = new HashMap<>(); + subMap.put("bar", 42); + subMap.put("baz", "foo"); + subArray.add(subMap); + subArray.add(12); + map.put("foo", subArray); + + include = new CharacterRunAutomaton(Automatons.patterns("foo.bar")); + filtered = FieldSubsetReader.filter(map, include, 0); + expected = new HashMap<>(); + subArray = new ArrayList<>(); + subMap = new HashMap<>(); + subMap.put("bar", 42); + subArray.add(subMap); + expected.put("foo", subArray); + + assertEquals(expected, filtered); + + // include on inner array 2 + include = new CharacterRunAutomaton(Automatons.patterns("foo")); + filtered = FieldSubsetReader.filter(map, include, 0); + expected = new HashMap<>(); + subArray = new ArrayList<>(); + subArray.add(12); + expected.put("foo", subArray); + + assertEquals(expected, filtered); + + // exclude on inner array + include = new CharacterRunAutomaton(Operations.minus( + Automata.makeAnyString(), Automatons.patterns("foo.baz"), + Operations.DEFAULT_MAX_DETERMINIZED_STATES)); + filtered = FieldSubsetReader.filter(map, include, 0); + expected = new HashMap<>(); + subArray = new ArrayList<>(); + subMap = new HashMap<>(); + subMap.put("bar", 42); + subArray.add(subMap); + subArray.add(12); + expected.put("foo", subArray); + + assertEquals(expected, filtered); + + // exclude on inner array 2 + include = new CharacterRunAutomaton(Operations.minus( + Automata.makeAnyString(), Automatons.patterns("foo"), + Operations.DEFAULT_MAX_DETERMINIZED_STATES)); + filtered = FieldSubsetReader.filter(map, include, 0); + expected = new HashMap<>(); + subArray = new ArrayList<>(); + subMap = new HashMap<>(); + subMap.put("bar", 42); + subMap.put("baz", "foo"); + subArray.add(subMap); + expected.put("foo", subArray); + + assertEquals(expected, filtered); + } + + /** + * test special handling for _field_names field. + */ + public void testFieldNames() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StringField("fieldA", "test", Field.Store.NO)); + doc.add(new StringField("fieldB", "test", Field.Store.NO)); + doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldA", Field.Store.NO)); + doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldB", Field.Store.NO)); + iw.addDocument(doc); + + // open reader + Set fields = new HashSet<>(); + fields.add("fieldA"); + Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", FieldNamesFieldMapper.NAME)); + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); + + // see only one field + LeafReader segmentReader = ir.leaves().get(0).reader(); + Terms terms = segmentReader.terms(FieldNamesFieldMapper.NAME); + TermsEnum termsEnum = terms.iterator(); + assertEquals(new BytesRef("fieldA"), termsEnum.next()); + assertNull(termsEnum.next()); + + // seekExact + termsEnum = terms.iterator(); + assertTrue(termsEnum.seekExact(new BytesRef("fieldA"))); + assertFalse(termsEnum.seekExact(new BytesRef("fieldB"))); + + // seekCeil + termsEnum = terms.iterator(); + assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef("fieldA"))); + assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("field0000"))); + assertEquals(new BytesRef("fieldA"), termsEnum.term()); + assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("fieldAAA"))); + assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("fieldB"))); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test special handling for _field_names field (three fields, to exercise termsenum better) + */ + public void testFieldNamesThreeFields() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StringField("fieldA", "test", Field.Store.NO)); + doc.add(new StringField("fieldB", "test", Field.Store.NO)); + doc.add(new StringField("fieldC", "test", Field.Store.NO)); + doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldA", Field.Store.NO)); + doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldB", Field.Store.NO)); + doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldC", Field.Store.NO)); + iw.addDocument(doc); + + // open reader + Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", "fieldC", FieldNamesFieldMapper.NAME)); + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); + + // see only two fields + LeafReader segmentReader = ir.leaves().get(0).reader(); + Terms terms = segmentReader.terms(FieldNamesFieldMapper.NAME); + TermsEnum termsEnum = terms.iterator(); + assertEquals(new BytesRef("fieldA"), termsEnum.next()); + assertEquals(new BytesRef("fieldC"), termsEnum.next()); + assertNull(termsEnum.next()); + + // seekExact + termsEnum = terms.iterator(); + assertTrue(termsEnum.seekExact(new BytesRef("fieldA"))); + assertFalse(termsEnum.seekExact(new BytesRef("fieldB"))); + assertTrue(termsEnum.seekExact(new BytesRef("fieldC"))); + + // seekCeil + termsEnum = terms.iterator(); + assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef("fieldA"))); + assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("fieldB"))); + assertEquals(new BytesRef("fieldC"), termsEnum.term()); + assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("fieldD"))); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test _field_names where a field is permitted, but doesn't exist in the segment. + */ + public void testFieldNamesMissing() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StringField("fieldA", "test", Field.Store.NO)); + doc.add(new StringField("fieldB", "test", Field.Store.NO)); + doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldA", Field.Store.NO)); + doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldB", Field.Store.NO)); + iw.addDocument(doc); + + // open reader + Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", "fieldC", FieldNamesFieldMapper.NAME)); + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); + + // see only one field + LeafReader segmentReader = ir.leaves().get(0).reader(); + Terms terms = segmentReader.terms(FieldNamesFieldMapper.NAME); + + // seekExact + TermsEnum termsEnum = terms.iterator(); + assertFalse(termsEnum.seekExact(new BytesRef("fieldC"))); + + // seekCeil + termsEnum = terms.iterator(); + assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("fieldC"))); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test where _field_names does not exist + */ + public void testFieldNamesOldIndex() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + doc.add(new StringField("fieldA", "test", Field.Store.NO)); + doc.add(new StringField("fieldB", "test", Field.Store.NO)); + iw.addDocument(doc); + + // open reader + Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", SourceFieldMapper.NAME)); + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); + + // see only one field + LeafReader segmentReader = ir.leaves().get(0).reader(); + assertNull(segmentReader.terms(FieldNamesFieldMapper.NAME)); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** test that core cache key (needed for NRT) is working */ + public void testCoreCacheKey() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + iwc.setMaxBufferedDocs(100); + iwc.setMergePolicy(NoMergePolicy.INSTANCE); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add two docs, id:0 and id:1 + Document doc = new Document(); + Field idField = new StringField("id", "", Field.Store.NO); + doc.add(idField); + idField.setStringValue("0"); + iw.addDocument(doc); + idField.setStringValue("1"); + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("id"))); + assertEquals(2, ir.numDocs()); + assertEquals(1, ir.leaves().size()); + + // delete id:0 and reopen + iw.deleteDocuments(new Term("id", "0")); + DirectoryReader ir2 = DirectoryReader.openIfChanged(ir); + + // we should have the same cache key as before + assertEquals(1, ir2.numDocs()); + assertEquals(1, ir2.leaves().size()); + assertSame(ir.leaves().get(0).reader().getCoreCacheHelper().getKey(), + ir2.leaves().get(0).reader().getCoreCacheHelper().getKey()); + + TestUtil.checkReader(ir); + IOUtils.close(ir, ir2, iw, dir); + } + + /** + * test filtering the only vector fields + */ + public void testFilterAwayAllVectors() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + + // add document with 2 fields + Document doc = new Document(); + FieldType ft = new FieldType(StringField.TYPE_NOT_STORED); + ft.setStoreTermVectors(true); + doc.add(new Field("fieldA", "testA", ft)); + doc.add(new StringField("fieldB", "testB", Field.Store.NO)); // no vectors + iw.addDocument(doc); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldB"))); + + // sees no fields + assertNull(ir.getTermVectors(0)); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + /** + * test filtering an index with no fields + */ + public void testEmpty() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + iw.addDocument(new Document()); + + // open reader + DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); + + // see no fields + LeafReader segmentReader = ir.leaves().get(0).reader(); + Set seenFields = new HashSet<>(); + for (FieldInfo info : segmentReader.getFieldInfos()) { + seenFields.add(info.name); + } + assertEquals(0, seenFields.size()); + assertNull(segmentReader.terms("foo")); + + // see no vectors + assertNull(segmentReader.getTermVectors(0)); + + // see no stored fields + Document document = segmentReader.document(0); + assertEquals(0, document.getFields().size()); + + TestUtil.checkReader(ir); + IOUtils.close(ir, iw, dir); + } + + public void testWrapTwice() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + IndexWriter iw = new IndexWriter(dir, iwc); + iw.close(); + + final DirectoryReader directoryReader = FieldSubsetReader.wrap(DirectoryReader.open(dir), + new CharacterRunAutomaton(Automata.makeString("fieldA"))); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> FieldSubsetReader.wrap(directoryReader, + new CharacterRunAutomaton(Automata.makeString("fieldA")))); + assertThat(e.getMessage(), equalTo("Can't wrap [class org.elasticsearch.xpack.core.security.authz.accesscontrol" + + ".FieldSubsetReader$FieldSubsetDirectoryReader] twice")); + directoryReader.close(); + dir.close(); + } + + @SuppressWarnings("unchecked") + public void testMappingsFilteringDuelWithSourceFiltering() throws Exception { + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("index") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) + .putMapping("doc", MAPPING_TEST_ITEM)).build(); + + { + FieldPermissionsDefinition definition = new FieldPermissionsDefinition(new String[]{"*inner1"}, Strings.EMPTY_ARRAY); + FieldPermissions fieldPermissions = new FieldPermissions(definition); + ImmutableOpenMap> mappings = metaData.findMappings(new String[]{"index"}, + new String[]{"doc"}, index -> fieldPermissions::grantsAccessTo); + ImmutableOpenMap index = mappings.get("index"); + Map sourceAsMap = index.get("doc").getSourceAsMap(); + assertEquals(1, sourceAsMap.size()); + Map properties = (Map) sourceAsMap.get("properties"); + assertEquals(2, properties.size()); + Map objectMapping = (Map) properties.get("object"); + assertEquals(1, objectMapping.size()); + Map objectProperties = (Map) objectMapping.get("properties"); + assertEquals(1, objectProperties.size()); + assertTrue(objectProperties.containsKey("inner1")); + Map nestedMapping = (Map) properties.get("nested"); + assertEquals(2, nestedMapping.size()); + assertEquals("nested", nestedMapping.get("type")); + Map nestedProperties = (Map) nestedMapping.get("properties"); + assertEquals(1, nestedProperties.size()); + assertTrue(nestedProperties.containsKey("inner1")); + + Automaton automaton = FieldPermissions.initializePermittedFieldsAutomaton(definition); + CharacterRunAutomaton include = new CharacterRunAutomaton(automaton); + Map stringObjectMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), DOC_TEST_ITEM, false); + Map filtered = FieldSubsetReader.filter(stringObjectMap, include, 0); + assertEquals(2, filtered.size()); + Map object = (Map)filtered.get("object"); + assertEquals(1, object.size()); + assertTrue(object.containsKey("inner1")); + List> nested = (List>)filtered.get("nested"); + assertEquals(2, nested.size()); + for (Map objectMap : nested) { + assertEquals(1, objectMap.size()); + assertTrue(objectMap.containsKey("inner1")); + } + } + { + FieldPermissionsDefinition definition = new FieldPermissionsDefinition(new String[]{"object*"}, Strings.EMPTY_ARRAY); + FieldPermissions fieldPermissions = new FieldPermissions(definition); + ImmutableOpenMap> mappings = metaData.findMappings(new String[]{"index"}, + new String[]{"doc"}, index -> fieldPermissions::grantsAccessTo); + ImmutableOpenMap index = mappings.get("index"); + Map sourceAsMap = index.get("doc").getSourceAsMap(); + assertEquals(1, sourceAsMap.size()); + Map properties = (Map) sourceAsMap.get("properties"); + assertEquals(1, properties.size()); + Map objectMapping = (Map) properties.get("object"); + assertEquals(1, objectMapping.size()); + Map objectProperties = (Map) objectMapping.get("properties"); + assertEquals(2, objectProperties.size()); + Map inner1 = (Map) objectProperties.get("inner1"); + assertEquals(2, inner1.size()); + assertEquals("text", inner1.get("type")); + Map inner1Fields = (Map) inner1.get("fields"); + assertEquals(1, inner1Fields.size()); + Map inner1Keyword = (Map) inner1Fields.get("keyword"); + assertEquals(1, inner1Keyword.size()); + assertEquals("keyword", inner1Keyword.get("type")); + Map inner2 = (Map) objectProperties.get("inner2"); + assertEquals(1, inner2.size()); + assertEquals("keyword", inner2.get("type")); + + Automaton automaton = FieldPermissions.initializePermittedFieldsAutomaton(definition); + CharacterRunAutomaton include = new CharacterRunAutomaton(automaton); + Map stringObjectMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), DOC_TEST_ITEM, false); + Map filtered = FieldSubsetReader.filter(stringObjectMap, include, 0); + assertEquals(1, filtered.size()); + Map object = (Map)filtered.get("object"); + assertEquals(2, object.size()); + assertTrue(object.containsKey("inner1")); + assertTrue(object.containsKey("inner2")); + } + { + FieldPermissionsDefinition definition = new FieldPermissionsDefinition(new String[]{"object"}, Strings.EMPTY_ARRAY); + FieldPermissions fieldPermissions = new FieldPermissions(definition); + ImmutableOpenMap> mappings = metaData.findMappings(new String[]{"index"}, + new String[]{"doc"}, index -> fieldPermissions::grantsAccessTo); + ImmutableOpenMap index = mappings.get("index"); + Map sourceAsMap = index.get("doc").getSourceAsMap(); + assertEquals(1, sourceAsMap.size()); + Map properties = (Map) sourceAsMap.get("properties"); + assertEquals(1, properties.size()); + Map objectMapping = (Map) properties.get("object"); + assertEquals(1, objectMapping.size()); + Map objectProperties = (Map) objectMapping.get("properties"); + assertEquals(0, objectProperties.size()); + + Automaton automaton = FieldPermissions.initializePermittedFieldsAutomaton(definition); + CharacterRunAutomaton include = new CharacterRunAutomaton(automaton); + Map stringObjectMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), DOC_TEST_ITEM, false); + Map filtered = FieldSubsetReader.filter(stringObjectMap, include, 0); + //TODO FLS filters out empty objects from source, although they are granted access. + //When filtering mappings though we keep them. + assertEquals(0, filtered.size()); + /*assertEquals(1, filtered.size()); + Map object = (Map)filtered.get("object"); + assertEquals(0, object.size());*/ + } + { + FieldPermissionsDefinition definition = new FieldPermissionsDefinition(new String[]{"nested.inner2"}, Strings.EMPTY_ARRAY); + FieldPermissions fieldPermissions = new FieldPermissions(definition); + ImmutableOpenMap> mappings = metaData.findMappings(new String[]{"index"}, + new String[]{"doc"}, index -> fieldPermissions::grantsAccessTo); + ImmutableOpenMap index = mappings.get("index"); + Map sourceAsMap = index.get("doc").getSourceAsMap(); + assertEquals(1, sourceAsMap.size()); + Map properties = (Map) sourceAsMap.get("properties"); + assertEquals(1, properties.size()); + Map nestedMapping = (Map) properties.get("nested"); + assertEquals(2, nestedMapping.size()); + assertEquals("nested", nestedMapping.get("type")); + Map nestedProperties = (Map) nestedMapping.get("properties"); + assertEquals(1, nestedProperties.size()); + assertTrue(nestedProperties.containsKey("inner2")); + + Automaton automaton = FieldPermissions.initializePermittedFieldsAutomaton(definition); + CharacterRunAutomaton include = new CharacterRunAutomaton(automaton); + Map stringObjectMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), DOC_TEST_ITEM, false); + Map filtered = FieldSubsetReader.filter(stringObjectMap, include, 0); + assertEquals(1, filtered.size()); + List> nested = (List>)filtered.get("nested"); + assertEquals(2, nested.size()); + for (Map objectMap : nested) { + assertEquals(1, objectMap.size()); + assertTrue(objectMap.containsKey("inner2")); + } + } + } + + private static final String DOC_TEST_ITEM = "{\n" + + " \"field_text\" : \"text\",\n" + + " \"object\" : {\n" + + " \"inner1\" : \"text\",\n" + + " \"inner2\" : \"keyword\"\n" + + " },\n" + + " \"nested\" : [\n" + + " {\n" + + " \"inner1\" : 1,\n" + + " \"inner2\" : \"2017/12/12\"\n" + + " },\n" + + " {\n" + + " \"inner1\" : 2,\n" + + " \"inner2\" : \"2017/11/11\"\n" + + " }\n" + + " ]\n" + + "}"; + + private static final String MAPPING_TEST_ITEM = "{\n" + + " \"doc\": {\n" + + " \"properties\" : {\n" + + " \"field_text\" : {\n" + + " \"type\":\"text\"\n" + + " },\n" + + " \"object\" : {\n" + + " \"properties\" : {\n" + + " \"inner1\" : {\n" + + " \"type\": \"text\",\n" + + " \"fields\" : {\n" + + " \"keyword\" : {\n" + + " \"type\" : \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"inner2\" : {\n" + + " \"type\": \"keyword\"\n" + + " }\n" + + " }\n" + + " },\n" + + " \"nested\" : {\n" + + " \"type\":\"nested\",\n" + + " \"properties\" : {\n" + + " \"inner1\" : {\n" + + " \"type\": \"integer\"\n" + + " },\n" + + " \"inner2\" : {\n" + + " \"type\": \"date\"\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperIntegrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperIntegrationTests.java new file mode 100644 index 0000000000000..08c1d010d99dd --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperIntegrationTests.java @@ -0,0 +1,162 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.accesscontrol; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Accountable; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.ParsedQuery; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; + +import java.util.Collections; + +import static java.util.Collections.singleton; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +public class SecurityIndexSearcherWrapperIntegrationTests extends ESTestCase { + + public void testDLS() throws Exception { + ShardId shardId = new ShardId("_index", "_na_", 0); + MapperService mapperService = mock(MapperService.class); + ScriptService scriptService = mock(ScriptService.class); + when(mapperService.documentMapper()).thenReturn(null); + when(mapperService.simpleMatchToIndexNames(anyString())) + .then(invocationOnMock -> Collections.singletonList((String) invocationOnMock.getArguments()[0])); + + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl(true, new + FieldPermissions(), + singleton(new BytesArray("{\"match_all\" : {}}"))); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), Settings.EMPTY); + Client client = mock(Client.class); + when(client.settings()).thenReturn(Settings.EMPTY); + final long nowInMillis = randomNonNegativeLong(); + QueryShardContext realQueryShardContext = new QueryShardContext(shardId.id(), indexSettings, null, null, mapperService, null, + null, xContentRegistry(), writableRegistry(), client, null, () -> nowInMillis, null); + QueryShardContext queryShardContext = spy(realQueryShardContext); + IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) { + } + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) { + + } + }); + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); + when(licenseState.isSecurityEnabled()).thenReturn(true); + SecurityIndexSearcherWrapper wrapper = new SecurityIndexSearcherWrapper(indexSettings, s -> queryShardContext, + bitsetFilterCache, threadContext, licenseState, scriptService) { + + @Override + protected IndicesAccessControl getIndicesAccessControl() { + return new IndicesAccessControl(true, singletonMap("_index", indexAccessControl)); + } + }; + + Directory directory = newDirectory(); + IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ); + + int numValues = scaledRandomIntBetween(2, 16); + String[] values = new String[numValues]; + for (int i = 0; i < numValues; i++) { + values[i] = "value" + i; + } + int[] valuesHitCount = new int[numValues]; + + int numDocs = scaledRandomIntBetween(32, 128); + int commitAfter = scaledRandomIntBetween(1, numDocs); + logger.info("Going to index [{}] documents with [{}] unique values and commit after [{}] documents have been indexed", + numDocs, numValues, commitAfter); + + for (int doc = 1; doc <= numDocs; doc++) { + int valueIndex = (numValues - 1) % doc; + + Document document = new Document(); + String id = String.valueOf(doc); + document.add(new StringField("id", id, Field.Store.NO)); + String value = values[valueIndex]; + document.add(new StringField("field", value, Field.Store.NO)); + iw.addDocument(document); + if (doc % 11 == 0) { + iw.deleteDocuments(new Term("id", id)); + } else { + if (commitAfter % commitAfter == 0) { + iw.commit(); + } + valuesHitCount[valueIndex]++; + } + } + iw.close(); + StringBuilder valueToHitCountOutput = new StringBuilder(); + for (int i = 0; i < numValues; i++) { + valueToHitCountOutput.append(values[i]).append('\t').append(valuesHitCount[i]).append('\n'); + } + logger.info("Value count matrix:\n{}", valueToHitCountOutput); + + DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), shardId); + for (int i = 0; i < numValues; i++) { + ParsedQuery parsedQuery = new ParsedQuery(new TermQuery(new Term("field", values[i]))); + doReturn(new TermQueryBuilder("field", values[i])).when(queryShardContext).parseInnerQueryBuilder(any(XContentParser.class)); + when(queryShardContext.toFilter(new TermsQueryBuilder("field", values[i]))).thenReturn(parsedQuery); + DirectoryReader wrappedDirectoryReader = wrapper.wrap(directoryReader); + IndexSearcher indexSearcher = wrapper.wrap(new IndexSearcher(wrappedDirectoryReader)); + + int expectedHitCount = valuesHitCount[i]; + logger.info("Going to verify hit count with query [{}] with expected total hits [{}]", parsedQuery.query(), expectedHitCount); + TotalHitCountCollector countCollector = new TotalHitCountCollector(); + indexSearcher.search(new MatchAllDocsQuery(), countCollector); + assertThat(countCollector.getTotalHits(), equalTo(expectedHitCount)); + assertThat(wrappedDirectoryReader.numDocs(), equalTo(expectedHitCount)); + } + + bitsetFilterCache.close(); + directoryReader.close(); + directory.close(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java new file mode 100644 index 0000000000000..d02d7e5fe975c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java @@ -0,0 +1,681 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authz.accesscontrol; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.BulkScorer; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; +import org.apache.lucene.search.join.ScoreMode; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.BitSet; +import org.apache.lucene.util.FixedBitSet; +import org.elasticsearch.core.internal.io.IOUtils; +import org.apache.lucene.util.SparseFixedBitSet; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.mapper.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.BoostingQueryBuilder; +import org.elasticsearch.index.query.ConstantScoreQueryBuilder; +import org.elasticsearch.index.query.GeoShapeQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.TermsLookup; +import org.elasticsearch.join.query.HasChildQueryBuilder; +import org.elasticsearch.join.query.HasParentQueryBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.TemplateScript; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.elasticsearch.xpack.core.security.user.User; +import org.junit.After; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.Map; +import java.util.Set; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexSearcherWrapper.intersectScorerAndRoleBits; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class SecurityIndexSearcherWrapperUnitTests extends ESTestCase { + + private static final Set META_FIELDS; + static { + final Set metaFields = new HashSet<>(Arrays.asList(MapperService.getAllMetaFields())); + metaFields.add(SourceFieldMapper.NAME); + metaFields.add(FieldNamesFieldMapper.NAME); + metaFields.add(SeqNoFieldMapper.NAME); + META_FIELDS = Collections.unmodifiableSet(metaFields); + } + + private ThreadContext threadContext; + private ScriptService scriptService; + private SecurityIndexSearcherWrapper securityIndexSearcherWrapper; + private ElasticsearchDirectoryReader esIn; + private XPackLicenseState licenseState; + private IndexSettings indexSettings; + + @Before + public void setup() throws Exception { + Index index = new Index("_index", "testUUID"); + scriptService = mock(ScriptService.class); + indexSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY); + + ShardId shardId = new ShardId(index, 0); + licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); + threadContext = new ThreadContext(Settings.EMPTY); + IndexShard indexShard = mock(IndexShard.class); + when(indexShard.shardId()).thenReturn(shardId); + + Directory directory = new RAMDirectory(); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig()); + writer.close(); + + DirectoryReader in = DirectoryReader.open(directory); // unfortunately DirectoryReader isn't mock friendly + esIn = ElasticsearchDirectoryReader.wrap(in, shardId); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + esIn.close(); + } + + public void testDefaultMetaFields() throws Exception { + securityIndexSearcherWrapper = + new SecurityIndexSearcherWrapper(indexSettings, null, null, threadContext, licenseState, scriptService) { + @Override + protected IndicesAccessControl getIndicesAccessControl() { + IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{}, null)), null); + return new IndicesAccessControl(true, singletonMap("_index", indexAccessControl)); + } + }; + + FieldSubsetReader.FieldSubsetDirectoryReader result = + (FieldSubsetReader.FieldSubsetDirectoryReader) securityIndexSearcherWrapper.wrap(esIn); + assertThat(result.getFilter().run("_uid"), is(true)); + assertThat(result.getFilter().run("_id"), is(true)); + assertThat(result.getFilter().run("_version"), is(true)); + assertThat(result.getFilter().run("_type"), is(true)); + assertThat(result.getFilter().run("_source"), is(true)); + assertThat(result.getFilter().run("_routing"), is(true)); + assertThat(result.getFilter().run("_timestamp"), is(true)); + assertThat(result.getFilter().run("_ttl"), is(true)); + assertThat(result.getFilter().run("_size"), is(true)); + assertThat(result.getFilter().run("_index"), is(true)); + assertThat(result.getFilter().run("_field_names"), is(true)); + assertThat(result.getFilter().run("_seq_no"), is(true)); + assertThat(result.getFilter().run("_some_random_meta_field"), is(true)); + assertThat(result.getFilter().run("some_random_regular_field"), is(false)); + } + + public void testWrapReaderWhenFeatureDisabled() throws Exception { + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(false); + securityIndexSearcherWrapper = + new SecurityIndexSearcherWrapper(indexSettings, null, null, threadContext, licenseState, scriptService); + DirectoryReader reader = securityIndexSearcherWrapper.wrap(esIn); + assertThat(reader, sameInstance(esIn)); + } + + public void testWrapSearcherWhenFeatureDisabled() throws Exception { + securityIndexSearcherWrapper = + new SecurityIndexSearcherWrapper(indexSettings, null, null, threadContext, licenseState, scriptService); + IndexSearcher indexSearcher = new IndexSearcher(esIn); + IndexSearcher result = securityIndexSearcherWrapper.wrap(indexSearcher); + assertThat(result, sameInstance(indexSearcher)); + } + + public void testWildcards() throws Exception { + Set expected = new HashSet<>(META_FIELDS); + expected.add("field1_a"); + expected.add("field1_b"); + expected.add("field1_c"); + assertResolved(new FieldPermissions(fieldPermissionDef(new String[] {"field1*"}, null)), expected, "field", "field2"); + } + + public void testDotNotion() throws Exception { + Set expected = new HashSet<>(META_FIELDS); + expected.add("foo.bar"); + assertResolved(new FieldPermissions(fieldPermissionDef(new String[] {"foo.bar"}, null)), expected, "foo", "foo.baz", "bar.foo"); + + expected = new HashSet<>(META_FIELDS); + expected.add("foo.bar"); + assertResolved(new FieldPermissions(fieldPermissionDef(new String[] {"foo.*"}, null)), expected, "foo", "bar"); + } + + public void testDelegateSimilarity() throws Exception { + IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY); + BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) { + } + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) { + + } + }); + DirectoryReader directoryReader = DocumentSubsetReader.wrap(esIn, bitsetFilterCache, new MatchAllDocsQuery()); + IndexSearcher indexSearcher = new IndexSearcher(directoryReader); + securityIndexSearcherWrapper = + new SecurityIndexSearcherWrapper(indexSettings, null, null, threadContext, licenseState, scriptService); + IndexSearcher result = securityIndexSearcherWrapper.wrap(indexSearcher); + assertThat(result, not(sameInstance(indexSearcher))); + assertThat(result.getSimilarity(true), sameInstance(indexSearcher.getSimilarity(true))); + bitsetFilterCache.close(); + } + + public void testIntersectScorerAndRoleBits() throws Exception { + securityIndexSearcherWrapper = + new SecurityIndexSearcherWrapper(indexSettings, null, null, threadContext, licenseState, scriptService); + final Directory directory = newDirectory(); + IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ); + + Document document = new Document(); + document.add(new StringField("field1", "value1", Field.Store.NO)); + document.add(new StringField("field2", "value1", Field.Store.NO)); + iw.addDocument(document); + + document = new Document(); + document.add(new StringField("field1", "value2", Field.Store.NO)); + document.add(new StringField("field2", "value1", Field.Store.NO)); + iw.addDocument(document); + + document = new Document(); + document.add(new StringField("field1", "value3", Field.Store.NO)); + document.add(new StringField("field2", "value1", Field.Store.NO)); + iw.addDocument(document); + + document = new Document(); + document.add(new StringField("field1", "value4", Field.Store.NO)); + document.add(new StringField("field2", "value1", Field.Store.NO)); + iw.addDocument(document); + + iw.commit(); + iw.deleteDocuments(new Term("field1", "value3")); + iw.close(); + DirectoryReader directoryReader = DirectoryReader.open(directory); + IndexSearcher searcher = new IndexSearcher(directoryReader); + Weight weight = searcher.createNormalizedWeight(new TermQuery(new Term("field2", "value1")), false); + + LeafReaderContext leaf = directoryReader.leaves().get(0); + + SparseFixedBitSet sparseFixedBitSet = query(leaf, "field1", "value1"); + LeafCollector leafCollector = new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + assertThat(doc, equalTo(0)); + } + }; + intersectScorerAndRoleBits(weight.scorer(leaf), sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs()); + + sparseFixedBitSet = query(leaf, "field1", "value2"); + leafCollector = new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + assertThat(doc, equalTo(1)); + } + }; + intersectScorerAndRoleBits(weight.scorer(leaf), sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs()); + + + sparseFixedBitSet = query(leaf, "field1", "value3"); + leafCollector = new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + fail("docId [" + doc + "] should have been deleted"); + } + }; + intersectScorerAndRoleBits(weight.scorer(leaf), sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs()); + + sparseFixedBitSet = query(leaf, "field1", "value4"); + leafCollector = new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + assertThat(doc, equalTo(3)); + } + }; + intersectScorerAndRoleBits(weight.scorer(leaf), sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs()); + + directoryReader.close(); + directory.close(); + } + + private void assertResolved(FieldPermissions permissions, Set expected, String... fieldsToTest) { + for (String field : expected) { + assertThat(field, permissions.grantsAccessTo(field), is(true)); + } + for (String field : fieldsToTest) { + assertThat(field, permissions.grantsAccessTo(field), is(expected.contains(field))); + } + } + + public void testFieldPermissionsWithFieldExceptions() throws Exception { + securityIndexSearcherWrapper = + new SecurityIndexSearcherWrapper(indexSettings, null, null, threadContext, licenseState, null); + String[] grantedFields = new String[]{}; + String[] deniedFields; + Set expected = new HashSet<>(META_FIELDS); + // Presence of fields in a role with an empty array implies access to no fields except the meta fields + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, randomBoolean() ? null : new String[]{})), + expected, "foo", "bar"); + + // make sure meta fields cannot be denied access to + deniedFields = META_FIELDS.toArray(new String[0]); + assertResolved(new FieldPermissions(fieldPermissionDef(null, deniedFields)), + new HashSet<>(Arrays.asList("foo", "bar", "_some_plugin_meta_field"))); + + // check we can add all fields with * + grantedFields = new String[]{"*"}; + expected = new HashSet<>(META_FIELDS); + expected.add("foo"); + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, randomBoolean() ? null : new String[]{})), expected); + + // same with null + grantedFields = null; + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, randomBoolean() ? null : new String[]{})), expected); + + // check we remove only excluded fields + grantedFields = new String[]{"*"}; + deniedFields = new String[]{"xfield"}; + expected = new HashSet<>(META_FIELDS); + expected.add("foo"); + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "xfield"); + + // same with null + grantedFields = null; + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "xfield"); + + // some other checks + grantedFields = new String[]{"field*"}; + deniedFields = new String[]{"field1", "field2"}; + expected = new HashSet<>(META_FIELDS); + expected.add("field3"); + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "field1", "field2"); + + grantedFields = new String[]{"field1", "field2"}; + deniedFields = new String[]{"field2"}; + expected = new HashSet<>(META_FIELDS); + expected.add("field1"); + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "field1", "field2"); + + grantedFields = new String[]{"field*"}; + deniedFields = new String[]{"field2"}; + expected = new HashSet<>(META_FIELDS); + expected.add("field1"); + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected, "field2"); + + deniedFields = new String[]{"field*"}; + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), + META_FIELDS, "field1", "field2"); + + // empty array for allowed fields always means no field is allowed + grantedFields = new String[]{}; + deniedFields = new String[]{}; + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), + META_FIELDS, "field1", "field2"); + + // make sure all field can be explicitly allowed + grantedFields = new String[]{"*"}; + deniedFields = randomBoolean() ? null : new String[]{}; + expected = new HashSet<>(META_FIELDS); + expected.add("field1"); + assertResolved(new FieldPermissions(fieldPermissionDef(grantedFields, deniedFields)), expected); + } + + private SparseFixedBitSet query(LeafReaderContext leaf, String field, String value) throws IOException { + SparseFixedBitSet sparseFixedBitSet = new SparseFixedBitSet(leaf.reader().maxDoc()); + TermsEnum tenum = leaf.reader().terms(field).iterator(); + while (tenum.next().utf8ToString().equals(value) == false) { + } + PostingsEnum penum = tenum.postings(null); + sparseFixedBitSet.or(penum); + return sparseFixedBitSet; + } + + public void testIndexSearcherWrapperSparseNoDeletions() throws IOException { + doTestIndexSearcherWrapper(true, false); + } + + public void testIndexSearcherWrapperDenseNoDeletions() throws IOException { + doTestIndexSearcherWrapper(false, false); + } + + public void testIndexSearcherWrapperSparseWithDeletions() throws IOException { + doTestIndexSearcherWrapper(true, true); + } + + public void testIndexSearcherWrapperDenseWithDeletions() throws IOException { + doTestIndexSearcherWrapper(false, true); + } + + public void testTemplating() throws Exception { + User user = new User("_username", new String[]{"role1", "role2"}, "_full_name", "_email", + Collections.singletonMap("key", "value"), true); + securityIndexSearcherWrapper = + new SecurityIndexSearcherWrapper(indexSettings, null, null, threadContext, licenseState, scriptService) { + + @Override + protected User getUser() { + return user; + } + }; + + TemplateScript.Factory compiledTemplate = templateParams -> + new TemplateScript(templateParams) { + @Override + public String execute() { + return "rendered_text"; + } + }; + + when(scriptService.compile(any(Script.class), eq(TemplateScript.CONTEXT))).thenReturn(compiledTemplate); + + XContentBuilder builder = jsonBuilder(); + String query = Strings.toString(new TermQueryBuilder("field", "{{_user.username}}").toXContent(builder, ToXContent.EMPTY_PARAMS)); + Script script = new Script(ScriptType.INLINE, "mustache", query, Collections.singletonMap("custom", "value")); + builder = jsonBuilder().startObject().field("template"); + script.toXContent(builder, ToXContent.EMPTY_PARAMS); + String querySource = Strings.toString(builder.endObject()); + + securityIndexSearcherWrapper.evaluateTemplate(querySource); + ArgumentCaptor + + + + + + + +
+ + You signed in with another tab or window. Reload to refresh your session. + You signed out in another tab or window. Reload to refresh your session. +
+ + + + + + diff --git a/x-pack/plugin/security/licenses/guava-19.0.jar.sha1 b/x-pack/plugin/security/licenses/guava-19.0.jar.sha1 new file mode 100644 index 0000000000000..02f937f821f45 --- /dev/null +++ b/x-pack/plugin/security/licenses/guava-19.0.jar.sha1 @@ -0,0 +1 @@ +6ce200f6b23222af3d8abb6b6459e6c44f4bb0e9 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/guava-LICENSE.txt b/x-pack/plugin/security/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/security/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/security/licenses/guava-NOTICE.txt b/x-pack/plugin/security/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/security/licenses/httpclient-LICENSE.txt b/x-pack/plugin/security/licenses/httpclient-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/x-pack/plugin/security/licenses/httpclient-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/x-pack/plugin/security/licenses/httpclient-NOTICE.txt b/x-pack/plugin/security/licenses/httpclient-NOTICE.txt new file mode 100644 index 0000000000000..91e5c40c4c6d3 --- /dev/null +++ b/x-pack/plugin/security/licenses/httpclient-NOTICE.txt @@ -0,0 +1,6 @@ +Apache HttpComponents Client +Copyright 1999-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/x-pack/plugin/security/licenses/httpclient-cache-4.5.2.jar.sha1 b/x-pack/plugin/security/licenses/httpclient-cache-4.5.2.jar.sha1 new file mode 100644 index 0000000000000..75fbd3009da8e --- /dev/null +++ b/x-pack/plugin/security/licenses/httpclient-cache-4.5.2.jar.sha1 @@ -0,0 +1 @@ +bd50ea83908dbf2f387a333216e66d2f0c5079bd \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/java-support-7.3.0.jar.sha1 b/x-pack/plugin/security/licenses/java-support-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..edc1658c49607 --- /dev/null +++ b/x-pack/plugin/security/licenses/java-support-7.3.0.jar.sha1 @@ -0,0 +1 @@ +288ecc17f2025ad14f768163d42808987d5ffcd6 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/log4j-slf4j-impl-2.9.1.jar.sha1 b/x-pack/plugin/security/licenses/log4j-slf4j-impl-2.9.1.jar.sha1 new file mode 100644 index 0000000000000..66119e87e211f --- /dev/null +++ b/x-pack/plugin/security/licenses/log4j-slf4j-impl-2.9.1.jar.sha1 @@ -0,0 +1 @@ +0a97a849b18b3798c4af1a2ca5b10c66cef17e3a \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/log4j-slf4j-impl-LICENSE.txt b/x-pack/plugin/security/licenses/log4j-slf4j-impl-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/security/licenses/log4j-slf4j-impl-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/security/licenses/log4j-slf4j-impl-NOTICE.txt b/x-pack/plugin/security/licenses/log4j-slf4j-impl-NOTICE.txt new file mode 100644 index 0000000000000..ea99ef1d4726b --- /dev/null +++ b/x-pack/plugin/security/licenses/log4j-slf4j-impl-NOTICE.txt @@ -0,0 +1,8 @@ + +Apache Log4j SLF4J Binding +Copyright 1999-2017 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + diff --git a/x-pack/plugin/security/licenses/metrics-core-3.2.2.jar.sha1 b/x-pack/plugin/security/licenses/metrics-core-3.2.2.jar.sha1 new file mode 100644 index 0000000000000..dedc1dfa8d0e4 --- /dev/null +++ b/x-pack/plugin/security/licenses/metrics-core-3.2.2.jar.sha1 @@ -0,0 +1 @@ +cd9886f498ee2ab2d994f0c779e5553b2c450416 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/metrics-core-LICENSE.txt b/x-pack/plugin/security/licenses/metrics-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/security/licenses/metrics-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/security/licenses/metrics-core-NOTICE.txt b/x-pack/plugin/security/licenses/metrics-core-NOTICE.txt new file mode 100644 index 0000000000000..255f6a193ad0f --- /dev/null +++ b/x-pack/plugin/security/licenses/metrics-core-NOTICE.txt @@ -0,0 +1,4 @@ +Dropwizard +Copyright 2010-2013 Coda Hale and Yammer, Inc., 2014-2016 Dropwizard Team + +This product includes software developed by Coda Hale and Yammer, Inc. diff --git a/x-pack/plugin/security/licenses/opensaml-core-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-core-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..52e48aecf6c4b --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-core-3.3.0.jar.sha1 @@ -0,0 +1 @@ +6fac68342891abec3c22d53e14c706ba3e58918b \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-messaging-api-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-messaging-api-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..64af335f0d3ca --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-messaging-api-3.3.0.jar.sha1 @@ -0,0 +1 @@ +5da0ff5d28546b3af8cc1487b4717fdeb675b8c4 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-messaging-impl-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-messaging-impl-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..7f65533789acf --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-messaging-impl-3.3.0.jar.sha1 @@ -0,0 +1 @@ +38b21389971105f32099d04c6f63b4af505364ca \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-profile-api-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-profile-api-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..e9ad1a88ba192 --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-profile-api-3.3.0.jar.sha1 @@ -0,0 +1 @@ +e4c72301b98cf4967c49c450de7da2dbc1f6b8d0 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-profile-impl-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-profile-impl-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..eb5ba5d3e1da1 --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-profile-impl-3.3.0.jar.sha1 @@ -0,0 +1 @@ +25c28fb4ab027fcaacaa268902cffc4451ac840c \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-saml-api-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-saml-api-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..76a2bfe45b20e --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-saml-api-3.3.0.jar.sha1 @@ -0,0 +1 @@ +c9611395e073206e59816b0b5ce5166450e8101e \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-saml-impl-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-saml-impl-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..8fb734014758e --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-saml-impl-3.3.0.jar.sha1 @@ -0,0 +1 @@ +391ac88f96a9f8f522d693c168d4c65fad20535d \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-security-api-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-security-api-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..880f45d51fc73 --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-security-api-3.3.0.jar.sha1 @@ -0,0 +1 @@ +89477899f0836040e9a584b451895a61d923bf96 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-security-impl-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-security-impl-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..1a0ebae336345 --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-security-impl-3.3.0.jar.sha1 @@ -0,0 +1 @@ +48cf37a5080ee406aef21a49045f5e1d15ea46e6 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-soap-api-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-soap-api-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..cf4b5e7092ba4 --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-soap-api-3.3.0.jar.sha1 @@ -0,0 +1 @@ +4e900056cd80c1f0bd72497c26a48664089e04a8 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-soap-impl-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-soap-impl-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..4a80173835652 --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-soap-impl-3.3.0.jar.sha1 @@ -0,0 +1 @@ +ea912fe660d11ad443775974e3208f0563edcebd \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-storage-api-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-storage-api-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..42a82939319c5 --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-storage-api-3.3.0.jar.sha1 @@ -0,0 +1 @@ +7492688b067dca0568554ec4c7abf9f0b5e1f682 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-storage-impl-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-storage-impl-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..cc653a0b383a5 --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-storage-impl-3.3.0.jar.sha1 @@ -0,0 +1 @@ +1244ecd4e8eccf74eb178906b0e9cac8a62bcbf7 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-xmlsec-api-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-xmlsec-api-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..6c29986c3b90d --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-xmlsec-api-3.3.0.jar.sha1 @@ -0,0 +1 @@ +e824f1e3ec14080412a4ab4b0807a13933d9be80 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/opensaml-xmlsec-impl-3.3.0.jar.sha1 b/x-pack/plugin/security/licenses/opensaml-xmlsec-impl-3.3.0.jar.sha1 new file mode 100644 index 0000000000000..2ec1aa75d6e2c --- /dev/null +++ b/x-pack/plugin/security/licenses/opensaml-xmlsec-impl-3.3.0.jar.sha1 @@ -0,0 +1 @@ +569ae8fc7c84817c5324e9f9b7958adf700a94c1 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/shibboleth-LICENSE.txt b/x-pack/plugin/security/licenses/shibboleth-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/security/licenses/shibboleth-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/security/licenses/shibboleth-NOTICE.txt b/x-pack/plugin/security/licenses/shibboleth-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/security/licenses/slf4j-api-1.6.2.jar.sha1 b/x-pack/plugin/security/licenses/slf4j-api-1.6.2.jar.sha1 new file mode 100644 index 0000000000000..a2f93ea55802b --- /dev/null +++ b/x-pack/plugin/security/licenses/slf4j-api-1.6.2.jar.sha1 @@ -0,0 +1 @@ +8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/slf4j-api-LICENSE.txt b/x-pack/plugin/security/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..c8e72c2cd4738 --- /dev/null +++ b/x-pack/plugin/security/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,21 @@ + Copyright (c) 2004-2017 QOS.ch + All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/x-pack/plugin/security/licenses/slf4j-api-NOTICE.txt b/x-pack/plugin/security/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/security/licenses/xmlsec-2.0.8.jar.sha1 b/x-pack/plugin/security/licenses/xmlsec-2.0.8.jar.sha1 new file mode 100644 index 0000000000000..eea95c3ce5826 --- /dev/null +++ b/x-pack/plugin/security/licenses/xmlsec-2.0.8.jar.sha1 @@ -0,0 +1 @@ +f5995bd4cd75816568c3b26d2552d957316ba8dc \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/xmlsec-LICENSE.txt b/x-pack/plugin/security/licenses/xmlsec-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/security/licenses/xmlsec-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/security/licenses/xmlsec-NOTICE.txt b/x-pack/plugin/security/licenses/xmlsec-NOTICE.txt new file mode 100644 index 0000000000000..ab7c6276cd836 --- /dev/null +++ b/x-pack/plugin/security/licenses/xmlsec-NOTICE.txt @@ -0,0 +1,9 @@ + +Apache XML Security for Java +Copyright 2000-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + + + diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen new file mode 100644 index 0000000000000..d56ae2f4d1adb --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen @@ -0,0 +1,20 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +source "`dirname "$0"`"/elasticsearch-env + +source "`dirname "$0"`"/x-pack-security-env + +exec \ + "$JAVA" \ + $ES_JAVA_OPTS \ + -Des.path.home="$ES_HOME" \ + -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ + -cp "$ES_CLASSPATH" \ + org.elasticsearch.xpack.core.ssl.CertificateGenerateTool \ + "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat new file mode 100644 index 0000000000000..d44ca227c07fd --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat @@ -0,0 +1,25 @@ +@echo off + +rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +rem or more contributor license agreements. Licensed under the Elastic License; +rem you may not use this file except in compliance with the Elastic License. + +setlocal enabledelayedexpansion +setlocal enableextensions + +call "%~dp0elasticsearch-env.bat" || exit /b 1 + +call "%~dp0x-pack-security-env.bat" || exit /b 1 + +%JAVA% ^ + %ES_JAVA_OPTS% ^ + -Des.path.home="%ES_HOME%" ^ + -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ + -cp "%ES_CLASSPATH%" ^ + org.elasticsearch.xpack.core.ssl.CertificateGenerateTool ^ + %* + +endlocal +endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil new file mode 100644 index 0000000000000..c2502bd734ffe --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil @@ -0,0 +1,20 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +source "`dirname "$0"`"/elasticsearch-env + +source "`dirname "$0"`"/x-pack-security-env + +exec \ + "$JAVA" \ + $ES_JAVA_OPTS \ + -Des.path.home="$ES_HOME" \ + -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ + -cp "$ES_CLASSPATH" \ + org.elasticsearch.xpack.core.ssl.CertificateTool \ + "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat new file mode 100644 index 0000000000000..4426fb87d3ba6 --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat @@ -0,0 +1,25 @@ +@echo off + +rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +rem or more contributor license agreements. Licensed under the Elastic License; +rem you may not use this file except in compliance with the Elastic License. + +setlocal enabledelayedexpansion +setlocal enableextensions + +call "%~dp0elasticsearch-env.bat" || exit /b 1 + +call "%~dp0x-pack-security-env.bat" || exit /b 1 + +%JAVA% ^ + %ES_JAVA_OPTS% ^ + -Des.path.home="%ES_HOME%" ^ + -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ + -cp "%ES_CLASSPATH%" ^ + org.elasticsearch.xpack.core.ssl.CertificateTool ^ + %* + +endlocal +endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate new file mode 100755 index 0000000000000..eb3c81febdfb7 --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate @@ -0,0 +1,20 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +source "`dirname "$0"`"/elasticsearch-env + +source "`dirname "$0"`"/x-pack-security-env + +exec \ + "$JAVA" \ + $ES_JAVA_OPTS \ + -Des.path.home="$ES_HOME" \ + -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ + -cp "$ES_CLASSPATH" \ + org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool \ + "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat new file mode 100644 index 0000000000000..79090b6490790 --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat @@ -0,0 +1,25 @@ +@echo off + +rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +rem or more contributor license agreements. Licensed under the Elastic License; +rem you may not use this file except in compliance with the Elastic License. + +setlocal enabledelayedexpansion +setlocal enableextensions + +call "%~dp0elasticsearch-env.bat" || exit /b 1 + +call "%~dp0x-pack-security-env.bat" || exit /b 1 + +%JAVA% ^ + %ES_JAVA_OPTS% ^ + -Des.path.home="%ES_HOME%" ^ + -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ + -cp "%ES_CLASSPATH%" ^ + org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool ^ + %* + +endlocal +endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata new file mode 100644 index 0000000000000..92200d82e1264 --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata @@ -0,0 +1,20 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +source "`dirname "$0"`"/elasticsearch-env + +source "`dirname "$0"`"/x-pack-security-env + +exec \ + "$JAVA" \ + $ES_JAVA_OPTS \ + -Des.path.home="$ES_HOME" \ + -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ + -cp "$ES_CLASSPATH" \ + org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand \ + "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat new file mode 100644 index 0000000000000..9e5625d0b912e --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat @@ -0,0 +1,25 @@ +@echo off + +rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +rem or more contributor license agreements. Licensed under the Elastic License; +rem you may not use this file except in compliance with the Elastic License. + +setlocal enabledelayedexpansion +setlocal enableextensions + +call "%~dp0elasticsearch-env.bat" || exit /b 1 + +call "%~dp0x-pack-security-env.bat" || exit /b 1 + +%JAVA% ^ + %ES_JAVA_OPTS% ^ + -Des.path.home="%ES_HOME%" ^ + -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ + -cp "%ES_CLASSPATH%" ^ + org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand ^ + %* + +endlocal +endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords new file mode 100644 index 0000000000000..e6aaa00d64796 --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords @@ -0,0 +1,20 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +source "`dirname "$0"`"/elasticsearch-env + +source "`dirname "$0"`"/x-pack-security-env + +exec \ + "$JAVA" \ + $ES_JAVA_OPTS \ + -Des.path.home="$ES_HOME" \ + -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ + -cp "$ES_CLASSPATH" \ + org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool \ + "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat new file mode 100644 index 0000000000000..b449ca09a6c30 --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat @@ -0,0 +1,25 @@ +@echo off + +rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +rem or more contributor license agreements. Licensed under the Elastic License; +rem you may not use this file except in compliance with the Elastic License. + +setlocal enabledelayedexpansion +setlocal enableextensions + +call "%~dp0elasticsearch-env.bat" || exit /b 1 + +call "%~dp0x-pack-security-env.bat" || exit /b 1 + +%JAVA% ^ + %ES_JAVA_OPTS% ^ + -Des.path.home="%ES_HOME%" ^ + -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ + -cp "%ES_CLASSPATH%" ^ + org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool ^ + %* + +endlocal +endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen new file mode 100755 index 0000000000000..e8c4f10c044c3 --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen @@ -0,0 +1,20 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +source "`dirname "$0"`"/elasticsearch-env + +source "`dirname "$0"`"/x-pack-security-env + +exec \ + "$JAVA" \ + $ES_JAVA_OPTS \ + -Des.path.home="$ES_HOME" \ + -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ + -cp "$ES_CLASSPATH" \ + org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool \ + "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat new file mode 100644 index 0000000000000..3ee9dcb3ba9cb --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat @@ -0,0 +1,25 @@ +@echo off + +rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +rem or more contributor license agreements. Licensed under the Elastic License; +rem you may not use this file except in compliance with the Elastic License. + +setlocal enabledelayedexpansion +setlocal enableextensions + +call "%~dp0elasticsearch-env.bat" || exit /b 1 + +call "%~dp0x-pack-security-env.bat" || exit /b 1 + +%JAVA% ^ + %ES_JAVA_OPTS% ^ + -Des.path.home="%ES_HOME%" ^ + -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ + -cp "%ES_CLASSPATH%" ^ + org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool ^ + %* + +endlocal +endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-users b/x-pack/plugin/security/src/main/bin/elasticsearch-users new file mode 100755 index 0000000000000..2d9ed8df93dc4 --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-users @@ -0,0 +1,20 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +source "`dirname "$0"`"/elasticsearch-env + +source "`dirname "$0"`"/x-pack-security-env + +exec \ + "$JAVA" \ + $ES_JAVA_OPTS \ + -Des.path.home="$ES_HOME" \ + -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ + -cp "$ES_CLASSPATH" \ + org.elasticsearch.xpack.security.authc.file.tool.UsersTool \ + "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat new file mode 100644 index 0000000000000..b32b9398f9971 --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat @@ -0,0 +1,25 @@ +@echo off + +rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +rem or more contributor license agreements. Licensed under the Elastic License; +rem you may not use this file except in compliance with the Elastic License. + +setlocal enabledelayedexpansion +setlocal enableextensions + +call "%~dp0elasticsearch-env.bat" || exit /b 1 + +call "%~dp0x-pack-security-env.bat" || exit /b 1 + +%JAVA% ^ + %ES_JAVA_OPTS% ^ + -Des.path.home="%ES_HOME%" ^ + -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ + -cp "%ES_CLASSPATH%" ^ + org.elasticsearch.xpack.security.authc.file.tool.UsersTool ^ + %* + +endlocal +endlocal diff --git a/x-pack/plugin/security/src/main/bin/x-pack-security-env b/x-pack/plugin/security/src/main/bin/x-pack-security-env new file mode 100644 index 0000000000000..fd35535be8cca --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/x-pack-security-env @@ -0,0 +1,10 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +source "`dirname "$0"`"/x-pack-env + +# include x-pack-security jars in classpath +ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack/x-pack-security/*" diff --git a/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat b/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat new file mode 100644 index 0000000000000..610f5835d28c2 --- /dev/null +++ b/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat @@ -0,0 +1,7 @@ +rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +rem or more contributor license agreements. Licensed under the Elastic License; +rem you may not use this file except in compliance with the Elastic License. + +call "%~dp0x-pack-env.bat" || exit /b 1 + +set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack/x-pack-security/* diff --git a/x-pack/plugin/security/src/main/config/role_mapping.yml b/x-pack/plugin/security/src/main/config/role_mapping.yml new file mode 100644 index 0000000000000..68c82f7e7c05b --- /dev/null +++ b/x-pack/plugin/security/src/main/config/role_mapping.yml @@ -0,0 +1,14 @@ +# Role mapping configuration file which has elasticsearch roles as keys +# that map to one or more user or group distinguished names + +#roleA: this is an elasticsearch role +# - groupA-DN this is a group distinguished name +# - groupB-DN +# - user1-DN this is the full user distinguished name + +#power_user: +# - "cn=admins,dc=example,dc=com" +#user: +# - "cn=users,dc=example,dc=com" +# - "cn=admins,dc=example,dc=com" +# - "cn=John Doe,cn=other users,dc=example,dc=com" diff --git a/x-pack/plugin/security/src/main/config/roles.yml b/x-pack/plugin/security/src/main/config/roles.yml new file mode 100644 index 0000000000000..68e003b8cdedd --- /dev/null +++ b/x-pack/plugin/security/src/main/config/roles.yml @@ -0,0 +1,3 @@ +# The default roles file is empty as the preferred method of defining roles is +# through the API/UI. File based roles are useful in error scenarios when the +# API based roles may not be available. diff --git a/x-pack/plugin/security/src/main/config/users b/x-pack/plugin/security/src/main/config/users new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/security/src/main/config/users_roles b/x-pack/plugin/security/src/main/config/users_roles new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java new file mode 100644 index 0000000000000..dcc2308f9f0c1 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; +import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; +import org.elasticsearch.xpack.core.ssl.SSLConfiguration; +import org.elasticsearch.xpack.core.ssl.SSLService; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; +import static org.elasticsearch.xpack.core.security.SecurityField.setting; + +class PkiRealmBootstrapCheck implements BootstrapCheck { + + private final SSLService sslService; + private final List sslConfigurations; + + PkiRealmBootstrapCheck(Settings settings, SSLService sslService) { + this.sslService = sslService; + this.sslConfigurations = loadSslConfigurations(settings); + } + + /** + * {@link SSLConfiguration} may depend on {@link org.elasticsearch.common.settings.SecureSettings} that can only be read during startup. + * We need to preload these during component configuration. + */ + private List loadSslConfigurations(Settings settings) { + final List list = new ArrayList<>(); + if (HTTP_SSL_ENABLED.get(settings)) { + list.add(sslService.sslConfiguration(SSLService.getHttpTransportSSLSettings(settings), Settings.EMPTY)); + } + + if (XPackSettings.TRANSPORT_SSL_ENABLED.get(settings)) { + final Settings transportSslSettings = settings.getByPrefix(setting("transport.ssl.")); + list.add(sslService.sslConfiguration(transportSslSettings, Settings.EMPTY)); + + settings.getGroups("transport.profiles.").values().stream() + .map(SecurityNetty4Transport::profileSslSettings) + .map(s -> sslService.sslConfiguration(s, transportSslSettings)) + .forEach(list::add); + } + + return list; + } + + /** + * If a PKI realm is enabled, checks to see if SSL and Client authentication are enabled on at + * least one network communication layer. + */ + @Override + public BootstrapCheckResult check(BootstrapContext context) { + final Settings settings = context.settings; + final boolean pkiRealmEnabled = settings.getGroups(RealmSettings.PREFIX).values().stream() + .filter(s -> PkiRealmSettings.TYPE.equals(s.get("type"))) + .anyMatch(s -> s.getAsBoolean("enabled", true)); + if (pkiRealmEnabled) { + for (SSLConfiguration configuration : this.sslConfigurations) { + if (sslService.isSSLClientAuthEnabled(configuration)) { + return BootstrapCheckResult.success(); + } + } + return BootstrapCheckResult.failure( + "a PKI realm is enabled but cannot be used as neither HTTP or Transport have SSL and client authentication enabled"); + } else { + return BootstrapCheckResult.success(); + } + } + + @Override + public boolean alwaysEnforce() { + return true; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java new file mode 100644 index 0000000000000..4e9c6d1e5d754 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -0,0 +1,988 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.inject.util.Providers; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.ClusterPlugin; +import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.plugins.ExtensiblePlugin; +import org.elasticsearch.plugins.IngestPlugin; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.NetworkPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.threadpool.FixedExecutorBuilder; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.SecurityExtension; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.SecuritySettings; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheAction; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; +import org.elasticsearch.xpack.core.security.action.role.GetRolesAction; +import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateAction; +import org.elasticsearch.xpack.core.security.action.saml.SamlInvalidateSessionAction; +import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutAction; +import org.elasticsearch.xpack.core.security.action.saml.SamlPrepareAuthenticationAction; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserAction; +import org.elasticsearch.xpack.core.security.action.user.GetUsersAction; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; +import org.elasticsearch.xpack.core.security.action.user.PutUserAction; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledAction; +import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; +import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; +import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.TokenMetaData; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexSearcherWrapper; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.SetSecurityUserProcessor; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; +import org.elasticsearch.xpack.security.authz.store.FileRolesStore; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.TLSLicenseBootstrapCheck; +import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction; +import org.elasticsearch.xpack.core.ssl.action.TransportGetCertificateInfoAction; +import org.elasticsearch.xpack.core.ssl.rest.RestGetCertificateInfoAction; +import org.elasticsearch.xpack.security.action.filter.SecurityActionFilter; +import org.elasticsearch.xpack.security.action.interceptor.BulkShardRequestInterceptor; +import org.elasticsearch.xpack.security.action.interceptor.IndicesAliasesRequestInterceptor; +import org.elasticsearch.xpack.security.action.interceptor.RequestInterceptor; +import org.elasticsearch.xpack.security.action.interceptor.ResizeRequestInterceptor; +import org.elasticsearch.xpack.security.action.interceptor.SearchRequestInterceptor; +import org.elasticsearch.xpack.security.action.interceptor.UpdateRequestInterceptor; +import org.elasticsearch.xpack.security.action.realm.TransportClearRealmCacheAction; +import org.elasticsearch.xpack.security.action.role.TransportClearRolesCacheAction; +import org.elasticsearch.xpack.security.action.role.TransportDeleteRoleAction; +import org.elasticsearch.xpack.security.action.role.TransportGetRolesAction; +import org.elasticsearch.xpack.security.action.role.TransportPutRoleAction; +import org.elasticsearch.xpack.security.action.rolemapping.TransportDeleteRoleMappingAction; +import org.elasticsearch.xpack.security.action.rolemapping.TransportGetRoleMappingsAction; +import org.elasticsearch.xpack.security.action.rolemapping.TransportPutRoleMappingAction; +import org.elasticsearch.xpack.security.action.saml.TransportSamlAuthenticateAction; +import org.elasticsearch.xpack.security.action.saml.TransportSamlInvalidateSessionAction; +import org.elasticsearch.xpack.security.action.saml.TransportSamlLogoutAction; +import org.elasticsearch.xpack.security.action.saml.TransportSamlPrepareAuthenticationAction; +import org.elasticsearch.xpack.security.action.token.TransportCreateTokenAction; +import org.elasticsearch.xpack.security.action.token.TransportInvalidateTokenAction; +import org.elasticsearch.xpack.security.action.token.TransportRefreshTokenAction; +import org.elasticsearch.xpack.security.action.user.TransportAuthenticateAction; +import org.elasticsearch.xpack.security.action.user.TransportChangePasswordAction; +import org.elasticsearch.xpack.security.action.user.TransportDeleteUserAction; +import org.elasticsearch.xpack.security.action.user.TransportGetUsersAction; +import org.elasticsearch.xpack.security.action.user.TransportHasPrivilegesAction; +import org.elasticsearch.xpack.security.action.user.TransportPutUserAction; +import org.elasticsearch.xpack.security.action.user.TransportSetEnabledAction; +import org.elasticsearch.xpack.security.audit.AuditTrail; +import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; +import org.elasticsearch.xpack.security.audit.index.IndexNameResolver; +import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authc.InternalRealms; +import org.elasticsearch.xpack.security.authc.Realms; +import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; +import org.elasticsearch.xpack.security.authz.AuthorizationService; +import org.elasticsearch.xpack.security.authz.SecuritySearchOperationListener; +import org.elasticsearch.xpack.security.authz.accesscontrol.OptOutQueryCache; +import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; +import org.elasticsearch.xpack.security.rest.SecurityRestFilter; +import org.elasticsearch.xpack.security.rest.action.RestAuthenticateAction; +import org.elasticsearch.xpack.security.rest.action.oauth2.RestGetTokenAction; +import org.elasticsearch.xpack.security.rest.action.oauth2.RestInvalidateTokenAction; +import org.elasticsearch.xpack.security.rest.action.realm.RestClearRealmCacheAction; +import org.elasticsearch.xpack.security.rest.action.role.RestClearRolesCacheAction; +import org.elasticsearch.xpack.security.rest.action.role.RestDeleteRoleAction; +import org.elasticsearch.xpack.security.rest.action.role.RestGetRolesAction; +import org.elasticsearch.xpack.security.rest.action.role.RestPutRoleAction; +import org.elasticsearch.xpack.security.rest.action.rolemapping.RestDeleteRoleMappingAction; +import org.elasticsearch.xpack.security.rest.action.rolemapping.RestGetRoleMappingsAction; +import org.elasticsearch.xpack.security.rest.action.rolemapping.RestPutRoleMappingAction; +import org.elasticsearch.xpack.security.rest.action.saml.RestSamlAuthenticateAction; +import org.elasticsearch.xpack.security.rest.action.saml.RestSamlInvalidateSessionAction; +import org.elasticsearch.xpack.security.rest.action.saml.RestSamlLogoutAction; +import org.elasticsearch.xpack.security.rest.action.saml.RestSamlPrepareAuthenticationAction; +import org.elasticsearch.xpack.security.rest.action.user.RestChangePasswordAction; +import org.elasticsearch.xpack.security.rest.action.user.RestDeleteUserAction; +import org.elasticsearch.xpack.security.rest.action.user.RestGetUsersAction; +import org.elasticsearch.xpack.security.rest.action.user.RestHasPrivilegesAction; +import org.elasticsearch.xpack.security.rest.action.user.RestPutUserAction; +import org.elasticsearch.xpack.security.rest.action.user.RestSetEnabledAction; +import org.elasticsearch.xpack.security.support.IndexLifecycleManager; +import org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor; +import org.elasticsearch.xpack.security.transport.filter.IPFilter; +import org.elasticsearch.xpack.security.transport.netty4.SecurityNetty4HttpServerTransport; +import org.elasticsearch.xpack.security.transport.netty4.SecurityNetty4ServerTransport; +import org.elasticsearch.xpack.core.template.TemplateUtils; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; +import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; +import static org.elasticsearch.xpack.core.security.SecurityLifecycleServiceField.SECURITY_TEMPLATE_NAME; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.IndexLifecycleManager.INTERNAL_INDEX_FORMAT; + +public class Security extends Plugin implements ActionPlugin, IngestPlugin, NetworkPlugin, ClusterPlugin, + DiscoveryPlugin, MapperPlugin, ExtensiblePlugin { + + private static final Logger logger = Loggers.getLogger(Security.class); + + static final Setting> AUDIT_OUTPUTS_SETTING = + Setting.listSetting(SecurityField.setting("audit.outputs"), + s -> s.keySet().contains(SecurityField.setting("audit.outputs")) ? + Collections.emptyList() : Collections.singletonList(LoggingAuditTrail.NAME), + Function.identity(), Property.NodeScope); + + private final Settings settings; + private final Environment env; + private final boolean enabled; + private final boolean transportClientMode; + /* what a PITA that we need an extra indirection to initialize this. Yet, once we got rid of guice we can thing about how + * to fix this or make it simpler. Today we need several service that are created in createComponents but we need to register + * an instance of TransportInterceptor way earlier before createComponents is called. */ + private final SetOnce securityInterceptor = new SetOnce<>(); + private final SetOnce ipFilter = new SetOnce<>(); + private final SetOnce authcService = new SetOnce<>(); + private final SetOnce auditTrailService = new SetOnce<>(); + private final SetOnce securityContext = new SetOnce<>(); + private final SetOnce threadContext = new SetOnce<>(); + private final SetOnce tokenService = new SetOnce<>(); + private final SetOnce securityActionFilter = new SetOnce<>(); + private final List bootstrapChecks; + private final List securityExtensions = new ArrayList<>(); + + public Security(Settings settings, final Path configPath) { + this(settings, configPath, Collections.emptyList()); + } + + Security(Settings settings, final Path configPath, List extensions) { + this.settings = settings; + this.transportClientMode = XPackPlugin.transportClientMode(settings); + this.env = transportClientMode ? null : new Environment(settings, configPath); + this.enabled = XPackSettings.SECURITY_ENABLED.get(settings); + if (enabled && transportClientMode == false) { + validateAutoCreateIndex(settings); + } + + if (enabled) { + // we load them all here otherwise we can't access secure settings since they are closed once the checks are + // fetched + final List checks = new ArrayList<>(); + checks.addAll(Arrays.asList( + new TokenSSLBootstrapCheck(), + new PkiRealmBootstrapCheck(settings, getSslService()), + new TLSLicenseBootstrapCheck())); + checks.addAll(InternalRealms.getBootstrapChecks(settings, env)); + this.bootstrapChecks = Collections.unmodifiableList(checks); + } else { + this.bootstrapChecks = Collections.emptyList(); + } + this.securityExtensions.addAll(extensions); + } + + @Override + public Collection createGuiceModules() { + List modules = new ArrayList<>(); + if (enabled == false || transportClientMode) { + modules.add(b -> b.bind(IPFilter.class).toProvider(Providers.of(null))); + } + + if (transportClientMode) { + if (enabled == false) { + return modules; + } + modules.add(b -> { + // for transport client we still must inject these ssl classes with guice + b.bind(SSLService.class).toInstance(getSslService()); + }); + + return modules; + } + modules.add(b -> XPackPlugin.bindFeatureSet(b, SecurityFeatureSet.class)); + + + if (enabled == false) { + modules.add(b -> { + b.bind(Realms.class).toProvider(Providers.of(null)); // for SecurityFeatureSet + b.bind(CompositeRolesStore.class).toProvider(Providers.of(null)); // for SecurityFeatureSet + b.bind(NativeRoleMappingStore.class).toProvider(Providers.of(null)); // for SecurityFeatureSet + b.bind(AuditTrailService.class) + .toInstance(new AuditTrailService(settings, Collections.emptyList(), getLicenseState())); + }); + return modules; + } + + // we can't load that at construction time since the license plugin might not have been loaded at that point + // which might not be the case during Plugin class instantiation. Once nodeModules are pulled + // everything should have been loaded + modules.add(b -> { + if (XPackSettings.AUDIT_ENABLED.get(settings)) { + b.bind(AuditTrail.class).to(AuditTrailService.class); // interface used by some actions... + } + }); + return modules; + } + + // overridable by tests + protected Clock getClock() { + return Clock.systemUTC(); + } + protected SSLService getSslService() { return XPackPlugin.getSharedSslService(); } + protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, + NamedXContentRegistry xContentRegistry, Environment environment, + NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + try { + return createComponents(client, threadPool, clusterService, resourceWatcherService); + } catch (final Exception e) { + throw new IllegalStateException("security initialization failed", e); + } + } + + // pkg private for testing - tests want to pass in their set of extensions hence we are not using the extension service directly + Collection createComponents(Client client, ThreadPool threadPool, ClusterService clusterService, + ResourceWatcherService resourceWatcherService) throws Exception { + if (enabled == false) { + return Collections.emptyList(); + } + + threadContext.set(threadPool.getThreadContext()); + List components = new ArrayList<>(); + securityContext.set(new SecurityContext(settings, threadPool.getThreadContext())); + components.add(securityContext.get()); + + // audit trails construction + IndexAuditTrail indexAuditTrail = null; + Set auditTrails = new LinkedHashSet<>(); + if (XPackSettings.AUDIT_ENABLED.get(settings)) { + List outputs = AUDIT_OUTPUTS_SETTING.get(settings); + if (outputs.isEmpty()) { + throw new IllegalArgumentException("Audit logging is enabled but there are zero output types in " + + XPackSettings.AUDIT_ENABLED.getKey()); + } + + for (String output : outputs) { + switch (output) { + case LoggingAuditTrail.NAME: + auditTrails.add(new LoggingAuditTrail(settings, clusterService, threadPool)); + break; + case IndexAuditTrail.NAME: + indexAuditTrail = new IndexAuditTrail(settings, client, threadPool, clusterService); + auditTrails.add(indexAuditTrail); + break; + default: + throw new IllegalArgumentException("Unknown audit trail output [" + output + "]"); + } + } + } + final AuditTrailService auditTrailService = + new AuditTrailService(settings, new ArrayList<>(auditTrails), getLicenseState()); + components.add(auditTrailService); + this.auditTrailService.set(auditTrailService); + + final SecurityLifecycleService securityLifecycleService = + new SecurityLifecycleService(settings, clusterService, threadPool, client, indexAuditTrail); + final TokenService tokenService = new TokenService(settings, Clock.systemUTC(), client, securityLifecycleService, clusterService); + this.tokenService.set(tokenService); + components.add(tokenService); + + // realms construction + final NativeUsersStore nativeUsersStore = new NativeUsersStore(settings, client, securityLifecycleService); + final NativeRoleMappingStore nativeRoleMappingStore = new NativeRoleMappingStore(settings, client, securityLifecycleService); + final AnonymousUser anonymousUser = new AnonymousUser(settings); + final ReservedRealm reservedRealm = new ReservedRealm(env, settings, nativeUsersStore, + anonymousUser, securityLifecycleService, threadPool.getThreadContext()); + Map realmFactories = new HashMap<>(InternalRealms.getFactories(threadPool, resourceWatcherService, + getSslService(), nativeUsersStore, nativeRoleMappingStore, securityLifecycleService)); + for (SecurityExtension extension : securityExtensions) { + Map newRealms = extension.getRealms(resourceWatcherService); + for (Map.Entry entry : newRealms.entrySet()) { + if (realmFactories.put(entry.getKey(), entry.getValue()) != null) { + throw new IllegalArgumentException("Realm type [" + entry.getKey() + "] is already registered"); + } + } + } + final Realms realms = new Realms(settings, env, realmFactories, getLicenseState(), threadPool.getThreadContext(), reservedRealm); + components.add(nativeUsersStore); + components.add(nativeRoleMappingStore); + components.add(realms); + components.add(reservedRealm); + + securityLifecycleService.addSecurityIndexHealthChangeListener(nativeRoleMappingStore::onSecurityIndexHealthChange); + securityLifecycleService.addSecurityIndexOutOfDateListener(nativeRoleMappingStore::onSecurityIndexOutOfDateChange); + + AuthenticationFailureHandler failureHandler = null; + String extensionName = null; + for (SecurityExtension extension : securityExtensions) { + AuthenticationFailureHandler extensionFailureHandler = extension.getAuthenticationFailureHandler(); + if (extensionFailureHandler != null && failureHandler != null) { + throw new IllegalStateException("Extensions [" + extensionName + "] and [" + extension.toString() + "] " + + "both set an authentication failure handler"); + } + failureHandler = extensionFailureHandler; + extensionName = extension.toString(); + } + if (failureHandler == null) { + logger.debug("Using default authentication failure handler"); + failureHandler = new DefaultAuthenticationFailureHandler(); + } else { + logger.debug("Using authentication failure handler from extension [" + extensionName + "]"); + } + + authcService.set(new AuthenticationService(settings, realms, auditTrailService, failureHandler, threadPool, + anonymousUser, tokenService)); + components.add(authcService.get()); + + final FileRolesStore fileRolesStore = new FileRolesStore(settings, env, resourceWatcherService, getLicenseState()); + final NativeRolesStore nativeRolesStore = new NativeRolesStore(settings, client, getLicenseState(), securityLifecycleService); + final ReservedRolesStore reservedRolesStore = new ReservedRolesStore(); + List, ActionListener>>> rolesProviders = new ArrayList<>(); + for (SecurityExtension extension : securityExtensions) { + rolesProviders.addAll(extension.getRolesProviders(settings, resourceWatcherService)); + } + final CompositeRolesStore allRolesStore = new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, + reservedRolesStore, rolesProviders, threadPool.getThreadContext(), getLicenseState()); + securityLifecycleService.addSecurityIndexHealthChangeListener(allRolesStore::onSecurityIndexHealthChange); + securityLifecycleService.addSecurityIndexOutOfDateListener(allRolesStore::onSecurityIndexOutOfDateChange); + // to keep things simple, just invalidate all cached entries on license change. this happens so rarely that the impact should be + // minimal + getLicenseState().addListener(allRolesStore::invalidateAll); + final AuthorizationService authzService = new AuthorizationService(settings, allRolesStore, clusterService, + auditTrailService, failureHandler, threadPool, anonymousUser); + components.add(nativeRolesStore); // used by roles actions + components.add(reservedRolesStore); // used by roles actions + components.add(allRolesStore); // for SecurityFeatureSet and clear roles cache + components.add(authzService); + + components.add(securityLifecycleService); + + ipFilter.set(new IPFilter(settings, auditTrailService, clusterService.getClusterSettings(), getLicenseState())); + components.add(ipFilter.get()); + DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterService.getClusterSettings()); + securityInterceptor.set(new SecurityServerTransportInterceptor(settings, threadPool, authcService.get(), + authzService, getLicenseState(), getSslService(), securityContext.get(), destructiveOperations)); + + final Set requestInterceptors; + if (XPackSettings.DLS_FLS_ENABLED.get(settings)) { + requestInterceptors = Collections.unmodifiableSet(Sets.newHashSet( + new SearchRequestInterceptor(settings, threadPool, getLicenseState()), + new UpdateRequestInterceptor(settings, threadPool, getLicenseState()), + new BulkShardRequestInterceptor(settings, threadPool, getLicenseState()), + new ResizeRequestInterceptor(settings, threadPool, getLicenseState(), auditTrailService), + new IndicesAliasesRequestInterceptor(threadPool.getThreadContext(), getLicenseState(), auditTrailService))); + } else { + requestInterceptors = Collections.emptySet(); + } + + securityActionFilter.set(new SecurityActionFilter(settings, authcService.get(), authzService, getLicenseState(), + requestInterceptors, threadPool, securityContext.get(), destructiveOperations)); + + return components; + } + + @Override + public Settings additionalSettings() { + return additionalSettings(settings, enabled, transportClientMode); + } + + // visible for tests + static Settings additionalSettings(final Settings settings, final boolean enabled, final boolean transportClientMode) { + if (enabled && transportClientMode == false) { + final Settings.Builder builder = Settings.builder(); + + builder.put(SecuritySettings.addTransportSettings(settings)); + + if (NetworkModule.HTTP_TYPE_SETTING.exists(settings)) { + final String httpType = NetworkModule.HTTP_TYPE_SETTING.get(settings); + if (httpType.equals(SecurityField.NAME4)) { + SecurityNetty4HttpServerTransport.overrideSettings(builder, settings); + } else { + final String message = String.format( + Locale.ROOT, + "http type setting [%s] must be [%s] but is [%s]", + NetworkModule.HTTP_TYPE_KEY, + SecurityField.NAME4, + httpType); + throw new IllegalArgumentException(message); + } + } else { + // default to security4 + builder.put(NetworkModule.HTTP_TYPE_KEY, SecurityField.NAME4); + SecurityNetty4HttpServerTransport.overrideSettings(builder, settings); + } + builder.put(SecuritySettings.addUserSettings(settings)); + return builder.build(); + } else { + return Settings.EMPTY; + } + } + + @Override + public List> getSettings() { + return getSettings(transportClientMode, securityExtensions); + } + + /** + * Get the {@link Setting setting configuration} for all security components, including those defined in extensions. + */ + public static List> getSettings(boolean transportClientMode, List securityExtensions) { + List> settingsList = new ArrayList<>(); + + if (transportClientMode) { + return settingsList; + } + + // The following just apply in node mode + + // IP Filter settings + IPFilter.addSettings(settingsList); + + // audit settings + settingsList.add(AUDIT_OUTPUTS_SETTING); + LoggingAuditTrail.registerSettings(settingsList); + IndexAuditTrail.registerSettings(settingsList); + + // authentication settings + AnonymousUser.addSettings(settingsList); + RealmSettings.addSettings(settingsList, securityExtensions); + NativeRolesStore.addSettings(settingsList); + ReservedRealm.addSettings(settingsList); + AuthenticationService.addSettings(settingsList); + AuthorizationService.addSettings(settingsList); + settingsList.add(CompositeRolesStore.CACHE_SIZE_SETTING); + settingsList.add(FieldPermissionsCache.CACHE_SIZE_SETTING); + settingsList.add(TokenService.TOKEN_EXPIRATION); + settingsList.add(TokenService.DELETE_INTERVAL); + settingsList.add(TokenService.DELETE_TIMEOUT); + settingsList.add(SecurityServerTransportInterceptor.TRANSPORT_TYPE_PROFILE_SETTING); + settingsList.addAll(SSLConfigurationSettings.getProfileSettings()); + + // hide settings + settingsList.add(Setting.listSetting(SecurityField.setting("hide_settings"), Collections.emptyList(), Function.identity(), + Property.NodeScope, Property.Filtered)); + return settingsList; + } + + @Override + public Collection getRestHeaders() { + if (transportClientMode) { + return Collections.emptyList(); + } + Set headers = new HashSet<>(); + headers.add(UsernamePasswordToken.BASIC_AUTH_HEADER); + if (AuthenticationServiceField.RUN_AS_ENABLED.get(settings)) { + headers.add(AuthenticationServiceField.RUN_AS_USER_HEADER); + } + return headers; + } + + @Override + public List getSettingsFilter() { + List asArray = settings.getAsList(SecurityField.setting("hide_settings")); + ArrayList settingsFilter = new ArrayList<>(asArray); + settingsFilter.addAll(RealmSettings.getSettingsFilter(securityExtensions)); + // hide settings where we don't define them - they are part of a group... + settingsFilter.add("transport.profiles.*." + SecurityField.setting("*")); + return settingsFilter; + } + + @Override + public List getBootstrapChecks() { + return bootstrapChecks; + } + + @Override + public void onIndexModule(IndexModule module) { + if (enabled) { + assert getLicenseState() != null; + if (XPackSettings.DLS_FLS_ENABLED.get(settings)) { + module.setSearcherWrapper(indexService -> + new SecurityIndexSearcherWrapper(indexService.getIndexSettings(), + shardId -> indexService.newQueryShardContext(shardId.id(), + // we pass a null index reader, which is legal and will disable rewrite optimizations + // based on index statistics, which is probably safer... + null, + () -> { + throw new IllegalArgumentException("permission filters are not allowed to use the current timestamp"); + + }, null), + indexService.cache().bitsetFilterCache(), + indexService.getThreadPool().getThreadContext(), getLicenseState(), + indexService.getScriptService())); + /* We need to forcefully overwrite the query cache implementation to use security's opt out query cache implementation. + * This impl. disabled the query cache if field level security is used for a particular request. If we wouldn't do + * forcefully overwrite the query cache implementation then we leave the system vulnerable to leakages of data to + * unauthorized users. */ + module.forceQueryCacheProvider((settings, cache) -> new OptOutQueryCache(settings, cache, threadContext.get())); + } + + // in order to prevent scroll ids from being maliciously crafted and/or guessed, a listener is added that + // attaches information to the scroll context so that we can validate the user that created the scroll against + // the user that is executing a scroll operation + module.addSearchOperationListener( + new SecuritySearchOperationListener(threadContext.get(), getLicenseState(), auditTrailService.get())); + } + } + + @Override + public List> getActions() { + if (enabled == false) { + return emptyList(); + } + return Arrays.asList( + new ActionHandler<>(ClearRealmCacheAction.INSTANCE, TransportClearRealmCacheAction.class), + new ActionHandler<>(ClearRolesCacheAction.INSTANCE, TransportClearRolesCacheAction.class), + new ActionHandler<>(GetUsersAction.INSTANCE, TransportGetUsersAction.class), + new ActionHandler<>(PutUserAction.INSTANCE, TransportPutUserAction.class), + new ActionHandler<>(DeleteUserAction.INSTANCE, TransportDeleteUserAction.class), + new ActionHandler<>(GetRolesAction.INSTANCE, TransportGetRolesAction.class), + new ActionHandler<>(PutRoleAction.INSTANCE, TransportPutRoleAction.class), + new ActionHandler<>(DeleteRoleAction.INSTANCE, TransportDeleteRoleAction.class), + new ActionHandler<>(ChangePasswordAction.INSTANCE, TransportChangePasswordAction.class), + new ActionHandler<>(AuthenticateAction.INSTANCE, TransportAuthenticateAction.class), + new ActionHandler<>(SetEnabledAction.INSTANCE, TransportSetEnabledAction.class), + new ActionHandler<>(HasPrivilegesAction.INSTANCE, TransportHasPrivilegesAction.class), + new ActionHandler<>(GetRoleMappingsAction.INSTANCE, TransportGetRoleMappingsAction.class), + new ActionHandler<>(PutRoleMappingAction.INSTANCE, TransportPutRoleMappingAction.class), + new ActionHandler<>(DeleteRoleMappingAction.INSTANCE, TransportDeleteRoleMappingAction.class), + new ActionHandler<>(CreateTokenAction.INSTANCE, TransportCreateTokenAction.class), + new ActionHandler<>(InvalidateTokenAction.INSTANCE, TransportInvalidateTokenAction.class), + new ActionHandler<>(GetCertificateInfoAction.INSTANCE, TransportGetCertificateInfoAction.class), + new ActionHandler<>(RefreshTokenAction.INSTANCE, TransportRefreshTokenAction.class), + new ActionHandler<>(SamlPrepareAuthenticationAction.INSTANCE, TransportSamlPrepareAuthenticationAction.class), + new ActionHandler<>(SamlAuthenticateAction.INSTANCE, TransportSamlAuthenticateAction.class), + new ActionHandler<>(SamlLogoutAction.INSTANCE, TransportSamlLogoutAction.class), + new ActionHandler<>(SamlInvalidateSessionAction.INSTANCE, TransportSamlInvalidateSessionAction.class) + ); + } + + @Override + public List getActionFilters() { + if (enabled == false) { + return emptyList(); + } + // registering the security filter only for nodes + if (transportClientMode == false) { + return singletonList(securityActionFilter.get()); + } + return emptyList(); + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + if (enabled == false) { + return emptyList(); + } + return Arrays.asList( + new RestAuthenticateAction(settings, restController, securityContext.get(), getLicenseState()), + new RestClearRealmCacheAction(settings, restController, getLicenseState()), + new RestClearRolesCacheAction(settings, restController, getLicenseState()), + new RestGetUsersAction(settings, restController, getLicenseState()), + new RestPutUserAction(settings, restController, getLicenseState()), + new RestDeleteUserAction(settings, restController, getLicenseState()), + new RestGetRolesAction(settings, restController, getLicenseState()), + new RestPutRoleAction(settings, restController, getLicenseState()), + new RestDeleteRoleAction(settings, restController, getLicenseState()), + new RestChangePasswordAction(settings, restController, securityContext.get(), getLicenseState()), + new RestSetEnabledAction(settings, restController, getLicenseState()), + new RestHasPrivilegesAction(settings, restController, securityContext.get(), getLicenseState()), + new RestGetRoleMappingsAction(settings, restController, getLicenseState()), + new RestPutRoleMappingAction(settings, restController, getLicenseState()), + new RestDeleteRoleMappingAction(settings, restController, getLicenseState()), + new RestGetTokenAction(settings, restController, getLicenseState()), + new RestInvalidateTokenAction(settings, restController, getLicenseState()), + new RestGetCertificateInfoAction(settings, restController), + new RestSamlPrepareAuthenticationAction(settings, restController, getLicenseState()), + new RestSamlAuthenticateAction(settings, restController, getLicenseState()), + new RestSamlLogoutAction(settings, restController, getLicenseState()), + new RestSamlInvalidateSessionAction(settings, restController, getLicenseState()) + ); + } + + @Override + public Map getProcessors(Processor.Parameters parameters) { + return Collections.singletonMap(SetSecurityUserProcessor.TYPE, new SetSecurityUserProcessor.Factory(parameters.threadContext)); + } + + + static boolean indexAuditLoggingEnabled(Settings settings) { + if (XPackSettings.AUDIT_ENABLED.get(settings)) { + List outputs = AUDIT_OUTPUTS_SETTING.get(settings); + for (String output : outputs) { + if (output.equals(IndexAuditTrail.NAME)) { + return true; + } + } + } + return false; + } + + static void validateAutoCreateIndex(Settings settings) { + String value = settings.get("action.auto_create_index"); + if (value == null) { + return; + } + + final boolean indexAuditingEnabled = Security.indexAuditLoggingEnabled(settings); + if (indexAuditingEnabled) { + String auditIndex = IndexAuditTrailField.INDEX_NAME_PREFIX + "*"; + String errorMessage = LoggerMessageFormat.format( + "the [action.auto_create_index] setting value [{}] is too" + + " restrictive. disable [action.auto_create_index] or set it to include " + + "[{}]", (Object) value, auditIndex); + if (Booleans.isFalse(value)) { + throw new IllegalArgumentException(errorMessage); + } + + if (Booleans.isTrue(value)) { + return; + } + + String[] matches = Strings.commaDelimitedListToStringArray(value); + List indices = new ArrayList<>(); + DateTime now = new DateTime(DateTimeZone.UTC); + // just use daily rollover + + indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now, IndexNameResolver.Rollover.DAILY)); + indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusDays(1), + IndexNameResolver.Rollover.DAILY)); + indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusMonths(1), + IndexNameResolver.Rollover.DAILY)); + indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusMonths(2), + IndexNameResolver.Rollover.DAILY)); + indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusMonths(3), + IndexNameResolver.Rollover.DAILY)); + indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusMonths(4), + IndexNameResolver.Rollover.DAILY)); + indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusMonths(5), + IndexNameResolver.Rollover.DAILY)); + indices.add(IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now.plusMonths(6), + IndexNameResolver.Rollover.DAILY)); + + for (String index : indices) { + boolean matched = false; + for (String match : matches) { + char c = match.charAt(0); + if (c == '-') { + if (Regex.simpleMatch(match.substring(1), index)) { + throw new IllegalArgumentException(errorMessage); + } + } else if (c == '+') { + if (Regex.simpleMatch(match.substring(1), index)) { + matched = true; + break; + } + } else { + if (Regex.simpleMatch(match, index)) { + matched = true; + break; + } + } + } + if (!matched) { + throw new IllegalArgumentException(errorMessage); + } + } + + logger.warn("the [action.auto_create_index] setting is configured to be restrictive [{}]. " + + " for the next 6 months audit indices are allowed to be created, but please make sure" + + " that any future history indices after 6 months with the pattern " + + "[.security_audit_log*] are allowed to be created", value); + } + } + + @Override + public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, ThreadContext threadContext) { + if (transportClientMode || enabled == false) { // don't register anything if we are not enabled + // interceptors are not installed if we are running on the transport client + return Collections.emptyList(); + } + return Collections.singletonList(new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler(String action, String executor, + boolean forceExecution, + TransportRequestHandler actualHandler) { + assert securityInterceptor.get() != null; + return securityInterceptor.get().interceptHandler(action, executor, forceExecution, actualHandler); + } + + @Override + public AsyncSender interceptSender(AsyncSender sender) { + assert securityInterceptor.get() != null; + return securityInterceptor.get().interceptSender(sender); + } + }); + } + + @Override + public Map> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService) { + if (transportClientMode || enabled == false) { // don't register anything if we are not enabled, or in transport client mode + return Collections.emptyMap(); + } + return Collections.singletonMap(SecurityField.NAME4, () -> new SecurityNetty4ServerTransport(settings, threadPool, + networkService, bigArrays, namedWriteableRegistry, circuitBreakerService, ipFilter.get(), getSslService())); + } + + @Override + public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher) { + if (enabled == false) { // don't register anything if we are not enabled + return Collections.emptyMap(); + } + return Collections.singletonMap(SecurityField.NAME4, () -> new SecurityNetty4HttpServerTransport(settings, + networkService, bigArrays, ipFilter.get(), getSslService(), threadPool, xContentRegistry, dispatcher)); + } + + @Override + public UnaryOperator getRestHandlerWrapper(ThreadContext threadContext) { + if (enabled == false || transportClientMode) { + return null; + } + final boolean ssl = HTTP_SSL_ENABLED.get(settings); + Settings httpSSLSettings = SSLService.getHttpTransportSSLSettings(settings); + boolean extractClientCertificate = ssl && getSslService().isSSLClientAuthEnabled(httpSSLSettings); + return handler -> new SecurityRestFilter(getLicenseState(), threadContext, authcService.get(), handler, + extractClientCertificate); + } + + @Override + public List> getExecutorBuilders(final Settings settings) { + if (enabled && transportClientMode == false) { + return Collections.singletonList( + new FixedExecutorBuilder(settings, TokenService.THREAD_POOL_NAME, 1, 1000, "xpack.security.authc.token.thread_pool")); + } + return Collections.emptyList(); + } + + @Override + public UnaryOperator> getIndexTemplateMetaDataUpgrader() { + return templates -> { + templates.remove(SECURITY_TEMPLATE_NAME); + final XContent xContent = XContentFactory.xContent(XContentType.JSON); + final byte[] auditTemplate = TemplateUtils.loadTemplate("/" + IndexAuditTrail.INDEX_TEMPLATE_NAME + ".json", + Version.CURRENT.toString(), IndexLifecycleManager.TEMPLATE_VERSION_PATTERN).getBytes(StandardCharsets.UTF_8); + + try (XContentParser parser = xContent + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, auditTemplate)) { + IndexTemplateMetaData auditMetadata = new IndexTemplateMetaData.Builder( + IndexTemplateMetaData.Builder.fromXContent(parser, IndexAuditTrail.INDEX_TEMPLATE_NAME)) + .settings(IndexAuditTrail.customAuditIndexSettings(settings, logger)) + .build(); + templates.put(IndexAuditTrail.INDEX_TEMPLATE_NAME, auditMetadata); + } catch (IOException e) { + // TODO: should we handle this with a thrown exception? + logger.error("Error loading template [{}] as part of metadata upgrading", IndexAuditTrail.INDEX_TEMPLATE_NAME); + } + + return templates; + }; + } + + @Override + public Map> getInitialClusterStateCustomSupplier() { + if (enabled) { + return Collections.singletonMap(TokenMetaData.TYPE, () -> tokenService.get().getTokenMetaData()); + } else { + return Collections.emptyMap(); + } + } + + @Override + public Function> getFieldFilter() { + if (enabled) { + return index -> { + if (getLicenseState().isSecurityEnabled() == false || getLicenseState().isDocumentAndFieldLevelSecurityAllowed() == false) { + return MapperPlugin.NOOP_FIELD_PREDICATE; + } + IndicesAccessControl indicesAccessControl = threadContext.get().getTransient( + AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + IndicesAccessControl.IndexAccessControl indexPermissions = indicesAccessControl.getIndexPermissions(index); + if (indexPermissions == null) { + return MapperPlugin.NOOP_FIELD_PREDICATE; + } + if (indexPermissions.isGranted() == false) { + throw new IllegalStateException("unexpected call to getFieldFilter for index [" + index + "] which is not granted"); + } + FieldPermissions fieldPermissions = indexPermissions.getFieldPermissions(); + if (fieldPermissions == null) { + return MapperPlugin.NOOP_FIELD_PREDICATE; + } + return fieldPermissions::grantsAccessTo; + }; + } + return MapperPlugin.super.getFieldFilter(); + } + + @Override + public BiConsumer getJoinValidator() { + if (enabled) { + return new ValidateTLSOnJoin(XPackSettings.TRANSPORT_SSL_ENABLED.get(settings), + DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)) + .andThen(new ValidateUpgradedSecurityIndex()); + } + return null; + } + + static final class ValidateTLSOnJoin implements BiConsumer { + private final boolean isTLSEnabled; + private final String discoveryType; + + ValidateTLSOnJoin(boolean isTLSEnabled, String discoveryType) { + this.isTLSEnabled = isTLSEnabled; + this.discoveryType = discoveryType; + } + + @Override + public void accept(DiscoveryNode node, ClusterState state) { + License license = LicenseService.getLicense(state.metaData()); + if (license != null && license.isProductionLicense() && + isTLSEnabled == false && "single-node".equals(discoveryType) == false) { + throw new IllegalStateException("TLS setup is required for license type [" + license.operationMode().name() + "]"); + } + } + } + + static final class ValidateUpgradedSecurityIndex implements BiConsumer { + @Override + public void accept(DiscoveryNode node, ClusterState state) { + if (state.getNodes().getMinNodeVersion().before(Version.V_7_0_0_alpha1)) { + IndexMetaData indexMetaData = state.getMetaData().getIndices().get(SECURITY_INDEX_NAME); + if (indexMetaData != null && INDEX_FORMAT_SETTING.get(indexMetaData.getSettings()) < INTERNAL_INDEX_FORMAT) { + throw new IllegalStateException("Security index is not on the current version [" + INTERNAL_INDEX_FORMAT + "] - " + + "The Upgrade API must be run for 7.x nodes to join the cluster"); + } + } + } + } + + @Override + public void reloadSPI(ClassLoader loader) { + securityExtensions.addAll(SecurityExtension.loadExtensions(loader)); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java new file mode 100644 index 0000000000000..1be3b4cd6795b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.security.authc.Realms; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; +import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; +import org.elasticsearch.xpack.security.transport.filter.IPFilter; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; +import static org.elasticsearch.xpack.core.XPackSettings.TRANSPORT_SSL_ENABLED; + +/** + * Indicates whether the features of Security are currently in use + */ +public class SecurityFeatureSet implements XPackFeatureSet { + + private final Settings settings; + private final XPackLicenseState licenseState; + @Nullable + private final Realms realms; + @Nullable + private final CompositeRolesStore rolesStore; + @Nullable + private final NativeRoleMappingStore roleMappingStore; + @Nullable + private final IPFilter ipFilter; + + @Inject + public SecurityFeatureSet(Settings settings, @Nullable XPackLicenseState licenseState, + @Nullable Realms realms, @Nullable CompositeRolesStore rolesStore, + @Nullable NativeRoleMappingStore roleMappingStore, + @Nullable IPFilter ipFilter) { + this.licenseState = licenseState; + this.realms = realms; + this.rolesStore = rolesStore; + this.roleMappingStore = roleMappingStore; + this.settings = settings; + this.ipFilter = ipFilter; + } + + @Override + public String name() { + return XPackField.SECURITY; + } + + @Override + public String description() { + return "Security for the Elastic Stack"; + } + + @Override + public boolean available() { + return licenseState != null && licenseState.isSecurityAvailable(); + } + + @Override + public boolean enabled() { + return licenseState != null && licenseState.isSecurityEnabled(); + } + + @Override + public Map nativeCodeInfo() { + return null; + } + + @Override + public void usage(ActionListener listener) { + Map realmsUsage = buildRealmsUsage(realms); + Map sslUsage = sslUsage(settings); + Map auditUsage = auditUsage(settings); + Map ipFilterUsage = ipFilterUsage(ipFilter); + Map anonymousUsage = singletonMap("enabled", AnonymousUser.isAnonymousEnabled(settings)); + + final AtomicReference> rolesUsageRef = new AtomicReference<>(); + final AtomicReference> roleMappingUsageRef = new AtomicReference<>(); + final CountDown countDown = new CountDown(2); + final Runnable doCountDown = () -> { + if (countDown.countDown()) { + listener.onResponse(new SecurityFeatureSetUsage(available(), enabled(), realmsUsage, + rolesUsageRef.get(), roleMappingUsageRef.get(), + sslUsage, auditUsage, ipFilterUsage, anonymousUsage)); + } + }; + + final ActionListener> rolesStoreUsageListener = + ActionListener.wrap(rolesStoreUsage -> { + rolesUsageRef.set(rolesStoreUsage); + doCountDown.run(); + }, listener::onFailure); + + final ActionListener> roleMappingStoreUsageListener = + ActionListener.wrap(nativeRoleMappingStoreUsage -> { + Map usage = singletonMap("native", nativeRoleMappingStoreUsage); + roleMappingUsageRef.set(usage); + doCountDown.run(); + }, listener::onFailure); + + if (rolesStore == null) { + rolesStoreUsageListener.onResponse(Collections.emptyMap()); + } else { + rolesStore.usageStats(rolesStoreUsageListener); + } + if (roleMappingStore == null) { + roleMappingStoreUsageListener.onResponse(Collections.emptyMap()); + } else { + roleMappingStore.usageStats(roleMappingStoreUsageListener); + } + } + + static Map buildRealmsUsage(Realms realms) { + if (realms == null) { + return Collections.emptyMap(); + } + return realms.usageStats(); + } + + static Map sslUsage(Settings settings) { + Map map = new HashMap<>(2); + map.put("http", singletonMap("enabled", HTTP_SSL_ENABLED.get(settings))); + map.put("transport", singletonMap("enabled", TRANSPORT_SSL_ENABLED.get(settings))); + return map; + } + + static Map auditUsage(Settings settings) { + Map map = new HashMap<>(2); + map.put("enabled", XPackSettings.AUDIT_ENABLED.get(settings)); + map.put("outputs", Security.AUDIT_OUTPUTS_SETTING.get(settings)); + return map; + } + + static Map ipFilterUsage(@Nullable IPFilter ipFilter) { + if (ipFilter == null) { + return IPFilter.DISABLED_USAGE_STATS; + } + return ipFilter.usageStats(); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityLifecycleService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityLifecycleService.java new file mode 100644 index 0000000000000..fd9bf875b3465 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityLifecycleService.java @@ -0,0 +1,235 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; +import org.elasticsearch.xpack.security.support.IndexLifecycleManager; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Predicate; + +/** + * This class is used to provide a lifecycle for services that is based on the cluster's state + * rather than the typical lifecycle that is used to start services as part of the node startup. + * + * This type of lifecycle is necessary for services that need to perform actions that require the + * cluster to be in a certain state; some examples are storing index templates and creating indices. + * These actions would most likely fail from within a plugin if executed in the + * {@link org.elasticsearch.common.component.AbstractLifecycleComponent#doStart()} method. + * However, if the startup of these services waits for the cluster to form and recover indices then + * it will be successful. This lifecycle service allows for this to happen by listening for + * {@link ClusterChangedEvent} and checking if the services can start. Additionally, the service + * also provides hooks for stop and close functionality. + */ +public class SecurityLifecycleService extends AbstractComponent implements ClusterStateListener { + + public static final String INTERNAL_SECURITY_INDEX = IndexLifecycleManager.INTERNAL_SECURITY_INDEX; + public static final String SECURITY_INDEX_NAME = ".security"; + + private static final Version MIN_READ_VERSION = Version.V_5_0_0; + + private final Settings settings; + private final ThreadPool threadPool; + private final IndexAuditTrail indexAuditTrail; + + private final IndexLifecycleManager securityIndex; + + public SecurityLifecycleService(Settings settings, ClusterService clusterService, + ThreadPool threadPool, Client client, + @Nullable IndexAuditTrail indexAuditTrail) { + super(settings); + this.settings = settings; + this.threadPool = threadPool; + this.indexAuditTrail = indexAuditTrail; + this.securityIndex = new IndexLifecycleManager(settings, client, SECURITY_INDEX_NAME); + clusterService.addListener(this); + clusterService.addLifecycleListener(new LifecycleListener() { + @Override + public void beforeStop() { + close(); + } + }); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + final ClusterState state = event.state(); + if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + // wait until the gateway has recovered from disk, otherwise we think we don't have the + // .security index but they may not have been restored from the cluster state on disk + logger.debug("lifecycle service waiting until state has been recovered"); + return; + } + + securityIndex.clusterChanged(event); + + try { + if (Security.indexAuditLoggingEnabled(settings) && + indexAuditTrail.state() == IndexAuditTrail.State.INITIALIZED) { + if (indexAuditTrail.canStart(event)) { + threadPool.generic().execute(new AbstractRunnable() { + + @Override + public void onFailure(Exception throwable) { + logger.error("failed to start index audit trail services", throwable); + assert false : "security lifecycle services startup failed"; + } + + @Override + public void doRun() { + indexAuditTrail.start(); + } + }); + } + } + } catch (Exception e) { + logger.error("failed to start index audit trail", e); + } + } + + IndexLifecycleManager securityIndex() { + return securityIndex; + } + + /** + * Returns {@code true} if the security index exists + */ + public boolean isSecurityIndexExisting() { + return securityIndex.indexExists(); + } + + /** + * Returns true if the security index does not exist or it exists and has the current + * value for the index.format index setting + */ + public boolean isSecurityIndexUpToDate() { + return securityIndex.isIndexUpToDate(); + } + + /** + * Returns true if the security index exists and all primary shards are active + */ + public boolean isSecurityIndexAvailable() { + return securityIndex.isAvailable(); + } + + /** + * Returns true if the security index does not exist or the mappings are up to date + * based on the version in the _meta field + */ + public boolean isSecurityIndexMappingUpToDate() { + return securityIndex().isMappingUpToDate(); + } + + /** + * Test whether the effective (active) version of the security mapping meets the + * requiredVersion. + * + * @return true if the effective version passes the predicate, or the security + * mapping does not exist (null version). Otherwise, false. + */ + public boolean checkSecurityMappingVersion(Predicate requiredVersion) { + return securityIndex.checkMappingVersion(requiredVersion); + } + + /** + * Adds a listener which will be notified when the security index health changes. The previous and + * current health will be provided to the listener so that the listener can determine if any action + * needs to be taken. + */ + public void addSecurityIndexHealthChangeListener(BiConsumer listener) { + securityIndex.addIndexHealthChangeListener(listener); + } + + /** + * Adds a listener which will be notified when the security index out of date value changes. The previous and + * current value will be provided to the listener so that the listener can determine if any action + * needs to be taken. + */ + void addSecurityIndexOutOfDateListener(BiConsumer listener) { + securityIndex.addIndexOutOfDateListener(listener); + } + + // this is called in a lifecycle listener beforeStop on the cluster service + private void close() { + if (indexAuditTrail != null) { + try { + indexAuditTrail.stop(); + } catch (Exception e) { + logger.error("failed to stop audit trail module", e); + } + } + } + + public static boolean securityIndexMappingSufficientToRead(ClusterState clusterState, Logger logger) { + return checkMappingVersions(clusterState, logger, MIN_READ_VERSION::onOrBefore); + } + + static boolean securityIndexMappingUpToDate(ClusterState clusterState, Logger logger) { + return checkMappingVersions(clusterState, logger, Version.CURRENT::equals); + } + + private static boolean checkMappingVersions(ClusterState clusterState, Logger logger, Predicate versionPredicate) { + return IndexLifecycleManager.checkIndexMappingVersionMatches(SECURITY_INDEX_NAME, clusterState, logger, versionPredicate); + } + + public static List indexNames() { + return Collections.unmodifiableList(Arrays.asList(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)); + } + + /** + * Prepares the security index by creating it if it doesn't exist or updating the mappings if the mappings are + * out of date. After any tasks have been executed, the runnable is then executed. + */ + public void prepareIndexIfNeededThenExecute(final Consumer consumer, final Runnable andThen) { + securityIndex.prepareIndexIfNeededThenExecute(consumer, andThen); + } + + /** + * Checks if the security index is out of date with the current version. If the index does not exist + * we treat the index as up to date as we expect it to be created with the current format. + */ + public boolean isSecurityIndexOutOfDate() { + return securityIndex.isIndexUpToDate() == false; + } + + /** + * Is the move from {@code previousHealth} to {@code currentHealth} a move from an unhealthy ("RED") index state to a healthy + * ("non-RED") state. + */ + public static boolean isMoveFromRedToNonRed(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) { + return (previousHealth == null || previousHealth.getStatus() == ClusterHealthStatus.RED) + && currentHealth != null && currentHealth.getStatus() != ClusterHealthStatus.RED; + } + + /** + * Is the move from {@code previousHealth} to {@code currentHealth} a move from index-exists to index-deleted + */ + public static boolean isIndexDeleted(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) { + return previousHealth != null && currentHealth == null; + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/TokenSSLBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/TokenSSLBootstrapCheck.java new file mode 100644 index 0000000000000..cc6da23247987 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/TokenSSLBootstrapCheck.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.xpack.core.XPackSettings; + +import java.util.Locale; + +/** + * Bootstrap check to ensure that the user has enabled HTTPS when using the token service + */ +final class TokenSSLBootstrapCheck implements BootstrapCheck { + + @Override + public BootstrapCheckResult check(BootstrapContext context) { + final Boolean httpEnabled = NetworkModule.HTTP_ENABLED.get(context.settings); + final Boolean httpsEnabled = XPackSettings.HTTP_SSL_ENABLED.get(context.settings); + final Boolean tokenServiceEnabled = XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.get(context.settings); + if (httpEnabled && httpsEnabled == false && tokenServiceEnabled) { + final String message = String.format( + Locale.ROOT, + "HTTPS is required in order to use the token service; " + + "please enable HTTPS using the [%s] setting or disable the token service using the [%s] setting", + XPackSettings.HTTP_SSL_ENABLED.getKey(), + XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey()); + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/SecurityActionMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/SecurityActionMapper.java new file mode 100644 index 0000000000000..409317bbf89cc --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/SecurityActionMapper.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action; + +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; +import org.elasticsearch.action.search.ClearScrollAction; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.transport.TransportRequest; + +/** + * This class analyzes an incoming request and its action name, and returns the security action name for it. + * In many cases the action name is the same as the original one used in es core, but in some exceptional cases it might need + * to be converted. For instance a clear_scroll that targets all opened scrolls gets converted to a different action that requires + * cluster privileges instead of the default indices privileges, still valid for clear scrolls that target specific scroll ids. + */ +public class SecurityActionMapper { + + static final String CLUSTER_PERMISSION_SCROLL_CLEAR_ALL_NAME = "cluster:admin/indices/scroll/clear_all"; + static final String CLUSTER_PERMISSION_ANALYZE = "cluster:admin/analyze"; + + /** + * Returns the security specific action name given the incoming action name and request + */ + public String action(String action, TransportRequest request) { + switch (action) { + case ClearScrollAction.NAME: + assert request instanceof ClearScrollRequest; + boolean isClearAllScrollRequest = ((ClearScrollRequest) request).scrollIds().contains("_all"); + if (isClearAllScrollRequest) { + return CLUSTER_PERMISSION_SCROLL_CLEAR_ALL_NAME; + } + break; + case AnalyzeAction.NAME: + case AnalyzeAction.NAME + "[s]": + assert request instanceof AnalyzeRequest; + String[] indices = ((AnalyzeRequest) request).indices(); + if (indices == null || (indices.length == 1 && indices[0] == null)) { + return CLUSTER_PERMISSION_ANALYZE; + } + break; + } + return action; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java new file mode 100644 index 0000000000000..353b4b9729b9c --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilter.java @@ -0,0 +1,186 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.filter; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.open.OpenIndexAction; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.action.support.ActionFilterChain; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.privilege.HealthAndStatsPrivilege; +import org.elasticsearch.xpack.core.security.support.Automatons; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.security.action.SecurityActionMapper; +import org.elasticsearch.xpack.security.action.interceptor.RequestInterceptor; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authz.AuthorizationService; +import org.elasticsearch.xpack.security.authz.AuthorizationUtils; + +import java.io.IOException; +import java.util.Set; +import java.util.function.Predicate; + +public class SecurityActionFilter extends AbstractComponent implements ActionFilter { + + private static final Predicate LICENSE_EXPIRATION_ACTION_MATCHER = HealthAndStatsPrivilege.INSTANCE.predicate(); + private static final Predicate SECURITY_ACTION_MATCHER = Automatons.predicate("cluster:admin/xpack/security*"); + + private final AuthenticationService authcService; + private final AuthorizationService authzService; + private final SecurityActionMapper actionMapper = new SecurityActionMapper(); + private final Set requestInterceptors; + private final XPackLicenseState licenseState; + private final ThreadContext threadContext; + private final SecurityContext securityContext; + private final DestructiveOperations destructiveOperations; + + public SecurityActionFilter(Settings settings, AuthenticationService authcService, AuthorizationService authzService, + XPackLicenseState licenseState, Set requestInterceptors, ThreadPool threadPool, + SecurityContext securityContext, DestructiveOperations destructiveOperations) { + super(settings); + this.authcService = authcService; + this.authzService = authzService; + this.licenseState = licenseState; + this.requestInterceptors = requestInterceptors; + this.threadContext = threadPool.getThreadContext(); + this.securityContext = securityContext; + this.destructiveOperations = destructiveOperations; + } + + @Override + public void apply(Task task, String action, Request request, + ActionListener listener, + ActionFilterChain chain) { + + /* + A functional requirement - when the license of security is disabled (invalid/expires), security will continue + to operate normally, except all read operations will be blocked. + */ + if (licenseState.isStatsAndHealthAllowed() == false && LICENSE_EXPIRATION_ACTION_MATCHER.test(action)) { + logger.error("blocking [{}] operation due to expired license. Cluster health, cluster stats and indices stats \n" + + "operations are blocked on license expiration. All data operations (read and write) continue to work. \n" + + "If you have a new license, please update it. Otherwise, please reach out to your support contact.", action); + throw LicenseUtils.newComplianceException(XPackField.SECURITY); + } + + final boolean securityEnabled = licenseState.isSecurityEnabled(); + if (securityEnabled && licenseState.isAuthAllowed()) { + final ActionListener contextPreservingListener = + ContextPreservingActionListener.wrapPreservingContext(listener, threadContext); + ActionListener authenticatedListener = ActionListener.wrap( + (aVoid) -> chain.proceed(task, action, request, contextPreservingListener), contextPreservingListener::onFailure); + final boolean useSystemUser = AuthorizationUtils.shouldReplaceUserWithSystem(threadContext, action); + try { + if (useSystemUser) { + securityContext.executeAsUser(SystemUser.INSTANCE, (original) -> { + try { + applyInternal(action, request, authenticatedListener); + } catch (IOException e) { + listener.onFailure(e); + } + }, Version.CURRENT); + } else if (AuthorizationUtils.shouldSetUserBasedOnActionOrigin(threadContext)) { + AuthorizationUtils.switchUserBasedOnActionOriginAndExecute(threadContext, securityContext, (original) -> { + try { + applyInternal(action, request, authenticatedListener); + } catch (IOException e) { + listener.onFailure(e); + } + }); + } else { + try (ThreadContext.StoredContext ignore = threadContext.newStoredContext(true)) { + applyInternal(action, request, authenticatedListener); + } + } + } catch (Exception e) { + listener.onFailure(e); + } + } else if (SECURITY_ACTION_MATCHER.test(action)) { + if (securityEnabled == false && licenseState.isTrialLicense()) { + listener.onFailure(new ElasticsearchException("Security must be explicitly enabled when using a trial license. " + + "Enable security by setting [xpack.security.enabled] to [true] in the elasticsearch.yml file " + + "and restart the node.")); + } else { + listener.onFailure(LicenseUtils.newComplianceException(XPackField.SECURITY)); + } + } else { + chain.proceed(task, action, request, listener); + } + } + + @Override + public int order() { + return Integer.MIN_VALUE; + } + + private void applyInternal(String action, Request request, + ActionListener listener) throws IOException { + if (CloseIndexAction.NAME.equals(action) || OpenIndexAction.NAME.equals(action) || DeleteIndexAction.NAME.equals(action)) { + IndicesRequest indicesRequest = (IndicesRequest) request; + try { + destructiveOperations.failDestructive(indicesRequest.indices()); + } catch(IllegalArgumentException e) { + listener.onFailure(e); + return; + } + } + + /* + here we fallback on the system user. Internal system requests are requests that are triggered by + the system itself (e.g. pings, update mappings, share relocation, etc...) and were not originated + by user interaction. Since these requests are triggered by es core modules, they are security + agnostic and therefore not associated with any user. When these requests execute locally, they + are executed directly on their relevant action. Since there is no other way a request can make + it to the action without an associated user (not via REST or transport - this is taken care of by + the {@link Rest} filter and the {@link ServerTransport} filter respectively), it's safe to assume a system user + here if a request is not associated with any other user. + */ + final String securityAction = actionMapper.action(action, request); + authcService.authenticate(securityAction, request, SystemUser.INSTANCE, + ActionListener.wrap((authc) -> authorizeRequest(authc, securityAction, request, listener), listener::onFailure)); + } + + private void authorizeRequest(Authentication authentication, String securityAction, Request request, + ActionListener listener) { + if (authentication == null) { + listener.onFailure(new IllegalArgumentException("authentication must be non null for authorization")); + } else { + final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = new AuthorizationUtils.AsyncAuthorizer(authentication, listener, + (userRoles, runAsRoles) -> { + authzService.authorize(authentication, securityAction, request, userRoles, runAsRoles); + /* + * We use a separate concept for code that needs to be run after authentication and authorization that could + * affect the running of the action. This is done to make it more clear of the state of the request. + */ + for (RequestInterceptor interceptor : requestInterceptors) { + if (interceptor.supports(request)) { + interceptor.intercept(request, authentication, runAsRoles != null ? runAsRoles : userRoles, securityAction); + } + } + listener.onResponse(null); + }); + asyncAuthorizer.authorize(authzService); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/BulkShardRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/BulkShardRequestInterceptor.java new file mode 100644 index 0000000000000..cbcdce98eaaaf --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/BulkShardRequestInterceptor.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.interceptor; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.bulk.BulkItemRequest; +import org.elasticsearch.action.bulk.BulkShardRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.Role; + +/** + * Similar to {@link UpdateRequestInterceptor}, but checks if there are update requests embedded in a bulk request. + */ +public class BulkShardRequestInterceptor extends AbstractComponent implements RequestInterceptor { + + private final ThreadContext threadContext; + private final XPackLicenseState licenseState; + + public BulkShardRequestInterceptor(Settings settings, ThreadPool threadPool, XPackLicenseState licenseState) { + super(settings); + this.threadContext = threadPool.getThreadContext(); + this.licenseState = licenseState; + } + + @Override + public void intercept(BulkShardRequest request, Authentication authentication, Role userPermissions, String action) { + if (licenseState.isSecurityEnabled() == false || licenseState.isDocumentAndFieldLevelSecurityAllowed() == false) { + return; + } + IndicesAccessControl indicesAccessControl = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + + for (BulkItemRequest bulkItemRequest : request.items()) { + IndicesAccessControl.IndexAccessControl indexAccessControl = indicesAccessControl.getIndexPermissions(bulkItemRequest.index()); + if (indexAccessControl != null) { + boolean fls = indexAccessControl.getFieldPermissions().hasFieldLevelSecurity(); + boolean dls = indexAccessControl.getQueries() != null; + if (fls || dls) { + if (bulkItemRequest.request() instanceof UpdateRequest) { + throw new ElasticsearchSecurityException("Can't execute a bulk request with update requests embedded if " + + "field or document level security is enabled", RestStatus.BAD_REQUEST); + } + } + } + logger.trace("intercepted bulk request for index [{}] without any update requests, continuing execution", + bulkItemRequest.index()); + } + } + + @Override + public boolean supports(TransportRequest request) { + return request instanceof BulkShardRequest; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/FieldAndDocumentLevelSecurityRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/FieldAndDocumentLevelSecurityRequestInterceptor.java new file mode 100644 index 0000000000000..5116e9b09f882 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/FieldAndDocumentLevelSecurityRequestInterceptor.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.interceptor; + +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.Role; + +/** + * Base class for interceptors that disables features when field level security is configured for indices a request + * is going to execute on. + */ +abstract class FieldAndDocumentLevelSecurityRequestInterceptor extends AbstractComponent implements + RequestInterceptor { + + private final ThreadContext threadContext; + private final XPackLicenseState licenseState; + + FieldAndDocumentLevelSecurityRequestInterceptor(Settings settings, ThreadContext threadContext, + XPackLicenseState licenseState) { + super(settings); + this.threadContext = threadContext; + this.licenseState = licenseState; + } + + @Override + public void intercept(Request request, Authentication authentication, Role userPermissions, String action) { + if (licenseState.isSecurityEnabled() == false || licenseState.isDocumentAndFieldLevelSecurityAllowed() == false) { + return; + } + final IndicesAccessControl indicesAccessControl = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + for (String index : request.indices()) { + IndicesAccessControl.IndexAccessControl indexAccessControl = indicesAccessControl.getIndexPermissions(index); + if (indexAccessControl != null) { + boolean fieldLevelSecurityEnabled = indexAccessControl.getFieldPermissions().hasFieldLevelSecurity(); + boolean documentLevelSecurityEnabled = indexAccessControl.getQueries() != null; + if (fieldLevelSecurityEnabled || documentLevelSecurityEnabled) { + if (fieldLevelSecurityEnabled || documentLevelSecurityEnabled) { + logger.trace("intercepted request for index [{}] with field level access controls [{}] document level access " + + "controls [{}]. disabling conflicting features", index, fieldLevelSecurityEnabled, + documentLevelSecurityEnabled); + } + disableFeatures(request, fieldLevelSecurityEnabled, documentLevelSecurityEnabled); + return; + } + } + logger.trace("intercepted request for index [{}] without field or document level access controls", index); + } + } + + protected abstract void disableFeatures(Request request, boolean fieldLevelSecurityEnabled, boolean documentLevelSecurityEnabled); + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptor.java new file mode 100644 index 0000000000000..76b776c71f6b6 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptor.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.interceptor; + +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.security.audit.AuditTrailService; + +import java.util.HashMap; +import java.util.Map; + +public final class IndicesAliasesRequestInterceptor implements RequestInterceptor { + + private final ThreadContext threadContext; + private final XPackLicenseState licenseState; + private final AuditTrailService auditTrailService; + + public IndicesAliasesRequestInterceptor(ThreadContext threadContext, XPackLicenseState licenseState, + AuditTrailService auditTrailService) { + this.threadContext = threadContext; + this.licenseState = licenseState; + this.auditTrailService = auditTrailService; + } + + @Override + public void intercept(IndicesAliasesRequest request, Authentication authentication, Role userPermissions, String action) { + if (licenseState.isSecurityEnabled() == false) { + return; + } + + if (licenseState.isDocumentAndFieldLevelSecurityAllowed()) { + IndicesAccessControl indicesAccessControl = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + for (IndicesAliasesRequest.AliasActions aliasAction : request.getAliasActions()) { + if (aliasAction.actionType() == IndicesAliasesRequest.AliasActions.Type.ADD) { + for (String index : aliasAction.indices()) { + IndicesAccessControl.IndexAccessControl indexAccessControl = indicesAccessControl.getIndexPermissions(index); + if (indexAccessControl != null) { + final boolean fls = indexAccessControl.getFieldPermissions().hasFieldLevelSecurity(); + final boolean dls = indexAccessControl.getQueries() != null; + if (fls || dls) { + throw new ElasticsearchSecurityException("Alias requests are not allowed for users who have " + + "field or document level security enabled on one of the indices", RestStatus.BAD_REQUEST); + } + } + } + } + } + } + + Map permissionsMap = new HashMap<>(); + for (IndicesAliasesRequest.AliasActions aliasAction : request.getAliasActions()) { + if (aliasAction.actionType() == IndicesAliasesRequest.AliasActions.Type.ADD) { + for (String index : aliasAction.indices()) { + Automaton indexPermissions = permissionsMap.computeIfAbsent(index, userPermissions.indices()::allowedActionsMatcher); + for (String alias : aliasAction.aliases()) { + Automaton aliasPermissions = + permissionsMap.computeIfAbsent(alias, userPermissions.indices()::allowedActionsMatcher); + if (Operations.subsetOf(aliasPermissions, indexPermissions) == false) { + // TODO we've already audited a access granted event so this is going to look ugly + auditTrailService.accessDenied(authentication, action, request, userPermissions.names()); + throw Exceptions.authorizationError("Adding an alias is not allowed when the alias " + + "has more permissions than any of the indices"); + } + } + } + } + } + } + + @Override + public boolean supports(TransportRequest request) { + return request instanceof IndicesAliasesRequest; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/RequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/RequestInterceptor.java new file mode 100644 index 0000000000000..c994626a7f402 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/RequestInterceptor.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.interceptor; + +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.permission.Role; + +/** + * A request interceptor can introspect a request and modify it. + */ +public interface RequestInterceptor { + + /** + * If {@link #supports(TransportRequest)} returns true this interceptor will introspect the request + * and potentially modify it. + */ + void intercept(Request request, Authentication authentication, Role userPermissions, String action); + + /** + * Returns whether this request interceptor should intercept the specified request. + */ + boolean supports(TransportRequest request); + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptor.java new file mode 100644 index 0000000000000..a4d5eecb92f03 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptor.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.interceptor; + +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.security.audit.AuditTrailService; + +public final class ResizeRequestInterceptor extends AbstractComponent implements RequestInterceptor { + + private final ThreadContext threadContext; + private final XPackLicenseState licenseState; + private final AuditTrailService auditTrailService; + + public ResizeRequestInterceptor(Settings settings, ThreadPool threadPool, XPackLicenseState licenseState, + AuditTrailService auditTrailService) { + super(settings); + this.threadContext = threadPool.getThreadContext(); + this.licenseState = licenseState; + this.auditTrailService = auditTrailService; + } + + @Override + public void intercept(ResizeRequest request, Authentication authentication, Role userPermissions, String action) { + if (licenseState.isSecurityEnabled() == false) { + return; + } + + if (licenseState.isDocumentAndFieldLevelSecurityAllowed()) { + IndicesAccessControl indicesAccessControl = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + IndicesAccessControl.IndexAccessControl indexAccessControl = indicesAccessControl.getIndexPermissions(request.getSourceIndex()); + if (indexAccessControl != null) { + final boolean fls = indexAccessControl.getFieldPermissions().hasFieldLevelSecurity(); + final boolean dls = indexAccessControl.getQueries() != null; + if (fls || dls) { + throw new ElasticsearchSecurityException("Resize requests are not allowed for users when " + + "field or document level security is enabled on the source index", RestStatus.BAD_REQUEST); + } + } + } + + // ensure that the user would have the same level of access OR less on the target index + final Automaton sourceIndexPermissions = userPermissions.indices().allowedActionsMatcher(request.getSourceIndex()); + final Automaton targetIndexPermissions = userPermissions.indices().allowedActionsMatcher(request.getTargetIndexRequest().index()); + if (Operations.subsetOf(targetIndexPermissions, sourceIndexPermissions) == false) { + // TODO we've already audited a access granted event so this is going to look ugly + auditTrailService.accessDenied(authentication, action, request, userPermissions.names()); + throw Exceptions.authorizationError("Resizing an index is not allowed when the target index " + + "has more permissions than the source index"); + } + } + + @Override + public boolean supports(TransportRequest request) { + return request instanceof ResizeRequest; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/SearchRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/SearchRequestInterceptor.java new file mode 100644 index 0000000000000..3ceaa02ee7286 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/SearchRequestInterceptor.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.interceptor; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequest; + +/** + * If field level security is enabled this interceptor disables the request cache for search requests. + */ +public class SearchRequestInterceptor extends FieldAndDocumentLevelSecurityRequestInterceptor { + + public SearchRequestInterceptor(Settings settings, ThreadPool threadPool, XPackLicenseState licenseState) { + super(settings, threadPool.getThreadContext(), licenseState); + } + + @Override + public void disableFeatures(SearchRequest request, boolean fieldLevelSecurityEnabled, boolean documentLevelSecurityEnabled) { + request.requestCache(false); + + if (documentLevelSecurityEnabled) { + if (request.source() != null && request.source().suggest() != null) { + throw new ElasticsearchSecurityException("Suggest isn't supported if document level security is enabled", + RestStatus.BAD_REQUEST); + } + if (request.source() != null && request.source().profile()) { + throw new ElasticsearchSecurityException("A search request cannot be profiled if document level security is enabled", + RestStatus.BAD_REQUEST); + } + } + } + + @Override + public boolean supports(TransportRequest request) { + return request instanceof SearchRequest; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/UpdateRequestInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/UpdateRequestInterceptor.java new file mode 100644 index 0000000000000..40b63d943d818 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/interceptor/UpdateRequestInterceptor.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.interceptor; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequest; + +/** + * A request interceptor that fails update request if field or document level security is enabled. + *

+ * It can be dangerous for users if document where to be update via a role that has fls or dls enabled, + * because only the fields that a role can see would be used to perform the update and without knowing the user may + * remove the other fields, not visible for him, from the document being updated. + */ +public class UpdateRequestInterceptor extends FieldAndDocumentLevelSecurityRequestInterceptor { + + public UpdateRequestInterceptor(Settings settings, ThreadPool threadPool, XPackLicenseState licenseState) { + super(settings, threadPool.getThreadContext(), licenseState); + } + + @Override + protected void disableFeatures(UpdateRequest updateRequest, boolean fieldLevelSecurityEnabled, boolean documentLevelSecurityEnabled) { + throw new ElasticsearchSecurityException("Can't execute an update request if field or document level security is enabled", + RestStatus.BAD_REQUEST); + } + + @Override + public boolean supports(TransportRequest request) { + return request instanceof UpdateRequest; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheAction.java new file mode 100644 index 0000000000000..1f7a307396adb --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/realm/TransportClearRealmCacheAction.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.realm; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.security.authc.Realms; +import org.elasticsearch.xpack.security.authc.support.CachingRealm; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.List; + +public class TransportClearRealmCacheAction extends TransportNodesAction { + + private final Realms realms; + + @Inject + public TransportClearRealmCacheAction(Settings settings, ThreadPool threadPool, + ClusterService clusterService, TransportService transportService, + ActionFilters actionFilters, Realms realms, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, ClearRealmCacheAction.NAME, threadPool, clusterService, transportService, actionFilters, + indexNameExpressionResolver, ClearRealmCacheRequest::new, ClearRealmCacheRequest.Node::new, ThreadPool.Names.MANAGEMENT, + ClearRealmCacheResponse.Node.class); + this.realms = realms; + } + + @Override + protected ClearRealmCacheResponse newResponse(ClearRealmCacheRequest request, + List responses, List failures) { + return new ClearRealmCacheResponse(clusterService.getClusterName(), responses, failures); + } + + @Override + protected ClearRealmCacheRequest.Node newNodeRequest(String nodeId, ClearRealmCacheRequest request) { + return new ClearRealmCacheRequest.Node(request, nodeId); + } + + @Override + protected ClearRealmCacheResponse.Node newNodeResponse() { + return new ClearRealmCacheResponse.Node(); + } + + @Override + protected ClearRealmCacheResponse.Node nodeOperation(ClearRealmCacheRequest.Node nodeRequest) throws ElasticsearchException { + if (nodeRequest.getRealms() == null || nodeRequest.getRealms().length == 0) { + for (Realm realm : realms) { + clearCache(realm, nodeRequest.getUsernames()); + } + return new ClearRealmCacheResponse.Node(clusterService.localNode()); + } + + for (String realmName : nodeRequest.getRealms()) { + Realm realm = realms.realm(realmName); + if (realm == null) { + throw new IllegalArgumentException("could not find active realm [" + realmName + "]"); + } + clearCache(realm, nodeRequest.getUsernames()); + } + return new ClearRealmCacheResponse.Node(clusterService.localNode()); + } + + private void clearCache(Realm realm, String[] usernames) { + if (!(realm instanceof CachingRealm)) { + return; + } + CachingRealm cachingRealm = (CachingRealm) realm; + + if (usernames != null && usernames.length != 0) { + for (String username : usernames) { + cachingRealm.expire(username); + } + } else { + cachingRealm.expireAll(); + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportClearRolesCacheAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportClearRolesCacheAction.java new file mode 100644 index 0000000000000..92e524b0c50aa --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportClearRolesCacheAction.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.role; + +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheAction; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequest; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheResponse; +import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.List; + +public class TransportClearRolesCacheAction extends TransportNodesAction { + + private final CompositeRolesStore rolesStore; + + @Inject + public TransportClearRolesCacheAction(Settings settings, ThreadPool threadPool, + ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, + CompositeRolesStore rolesStore, IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, ClearRolesCacheAction.NAME, threadPool, clusterService, transportService, + actionFilters, indexNameExpressionResolver, ClearRolesCacheRequest::new, ClearRolesCacheRequest.Node::new, + ThreadPool.Names.MANAGEMENT, ClearRolesCacheResponse.Node.class); + this.rolesStore = rolesStore; + } + + @Override + protected ClearRolesCacheResponse newResponse(ClearRolesCacheRequest request, + List responses, List failures) { + return new ClearRolesCacheResponse(clusterService.getClusterName(), responses, failures); + } + + @Override + protected ClearRolesCacheRequest.Node newNodeRequest(String nodeId, ClearRolesCacheRequest request) { + return new ClearRolesCacheRequest.Node(request, nodeId); + } + + @Override + protected ClearRolesCacheResponse.Node newNodeResponse() { + return new ClearRolesCacheResponse.Node(); + } + + @Override + protected ClearRolesCacheResponse.Node nodeOperation(ClearRolesCacheRequest.Node request) { + if (request.getNames() == null || request.getNames().length == 0) { + rolesStore.invalidateAll(); + } else { + for (String role : request.getNames()) { + rolesStore.invalidate(role); + } + } + return new ClearRolesCacheResponse.Node(clusterService.localNode()); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleAction.java new file mode 100644 index 0000000000000..756e330ff3acf --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleAction.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.role; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; + +public class TransportDeleteRoleAction extends HandledTransportAction { + + private final NativeRolesStore rolesStore; + + @Inject + public TransportDeleteRoleAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, NativeRolesStore rolesStore, + TransportService transportService) { + super(settings, DeleteRoleAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + DeleteRoleRequest::new); + this.rolesStore = rolesStore; + } + + @Override + protected void doExecute(DeleteRoleRequest request, ActionListener listener) { + if (ReservedRolesStore.isReserved(request.name())) { + listener.onFailure(new IllegalArgumentException("role [" + request.name() + "] is reserved and cannot be deleted")); + return; + } + + try { + rolesStore.deleteRole(request, new ActionListener() { + @Override + public void onResponse(Boolean found) { + listener.onResponse(new DeleteRoleResponse(found)); + } + + @Override + public void onFailure(Exception t) { + listener.onFailure(t); + } + }); + } catch (Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("failed to delete role [{}]", request.name()), e); + listener.onFailure(e); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesAction.java new file mode 100644 index 0000000000000..74f93ddff2f64 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesAction.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.role; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.role.GetRolesAction; +import org.elasticsearch.xpack.core.security.action.role.GetRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; + +import java.util.ArrayList; +import java.util.List; + +public class TransportGetRolesAction extends HandledTransportAction { + + private final NativeRolesStore nativeRolesStore; + private final ReservedRolesStore reservedRolesStore; + + @Inject + public TransportGetRolesAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + NativeRolesStore nativeRolesStore, TransportService transportService, + ReservedRolesStore reservedRolesStore) { + super(settings, GetRolesAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + GetRolesRequest::new); + this.nativeRolesStore = nativeRolesStore; + this.reservedRolesStore = reservedRolesStore; + } + + @Override + protected void doExecute(final GetRolesRequest request, final ActionListener listener) { + final String[] requestedRoles = request.names(); + final boolean specificRolesRequested = requestedRoles != null && requestedRoles.length > 0; + final List rolesToSearchFor = new ArrayList<>(); + final List roles = new ArrayList<>(); + + if (specificRolesRequested) { + for (String role : requestedRoles) { + if (ReservedRolesStore.isReserved(role)) { + RoleDescriptor rd = reservedRolesStore.roleDescriptor(role); + if (rd != null) { + roles.add(rd); + } else { + listener.onFailure(new IllegalStateException("unable to obtain reserved role [" + role + "]")); + return; + } + } else { + rolesToSearchFor.add(role); + } + } + } else { + roles.addAll(reservedRolesStore.roleDescriptors()); + } + + if (specificRolesRequested && rolesToSearchFor.isEmpty()) { + // specific roles were requested but they were built in only, no need to hit the store + listener.onResponse(new GetRolesResponse(roles.toArray(new RoleDescriptor[roles.size()]))); + } else { + String[] roleNames = rolesToSearchFor.toArray(new String[rolesToSearchFor.size()]); + nativeRolesStore.getRoleDescriptors(roleNames, ActionListener.wrap((foundRoles) -> { + roles.addAll(foundRoles); + listener.onResponse(new GetRolesResponse(roles.toArray(new RoleDescriptor[roles.size()]))); + }, listener::onFailure)); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleAction.java new file mode 100644 index 0000000000000..874a11131c5b7 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleAction.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.role; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; + +public class TransportPutRoleAction extends HandledTransportAction { + + private final NativeRolesStore rolesStore; + + @Inject + public TransportPutRoleAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + NativeRolesStore rolesStore, TransportService transportService) { + super(settings, PutRoleAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, PutRoleRequest::new); + this.rolesStore = rolesStore; + } + + @Override + protected void doExecute(final PutRoleRequest request, final ActionListener listener) { + final String name = request.roleDescriptor().getName(); + if (ReservedRolesStore.isReserved(name)) { + listener.onFailure(new IllegalArgumentException("role [" + name + "] is reserved and cannot be modified.")); + return; + } + + rolesStore.putRole(request, request.roleDescriptor(), new ActionListener() { + @Override + public void onResponse(Boolean created) { + if (created) { + logger.info("added role [{}]", request.name()); + } else { + logger.info("updated role [{}]", request.name()); + } + listener.onResponse(new PutRoleResponse(created)); + } + + @Override + public void onFailure(Exception t) { + listener.onFailure(t); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java new file mode 100644 index 0000000000000..dd2bf593cbe38 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.rolemapping; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingResponse; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; + +public class TransportDeleteRoleMappingAction + extends HandledTransportAction { + + private final NativeRoleMappingStore roleMappingStore; + + @Inject + public TransportDeleteRoleMappingAction(Settings settings, ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + TransportService transportService, + NativeRoleMappingStore roleMappingStore) { + super(settings, DeleteRoleMappingAction.NAME, threadPool, transportService, actionFilters, + indexNameExpressionResolver, DeleteRoleMappingRequest::new); + this.roleMappingStore = roleMappingStore; + } + + @Override + protected void doExecute(DeleteRoleMappingRequest request, + ActionListener listener) { + roleMappingStore.deleteRoleMapping(request, new ActionListener() { + @Override + public void onResponse(Boolean found) { + listener.onResponse(new DeleteRoleMappingResponse(found)); + } + + @Override + public void onFailure(Exception t) { + listener.onFailure(t); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsAction.java new file mode 100644 index 0000000000000..f87d8147fba6d --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsAction.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.rolemapping; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; + +public class TransportGetRoleMappingsAction + extends HandledTransportAction { + + private final NativeRoleMappingStore roleMappingStore; + + @Inject + public TransportGetRoleMappingsAction(Settings settings, ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + TransportService transportService, + NativeRoleMappingStore nativeRoleMappingStore) { + super(settings, GetRoleMappingsAction.NAME, threadPool, transportService, actionFilters, + indexNameExpressionResolver, GetRoleMappingsRequest::new); + this.roleMappingStore = nativeRoleMappingStore; + } + + @Override + protected void doExecute(final GetRoleMappingsRequest request, + final ActionListener listener) { + final Set names; + if (request.getNames() == null || request.getNames().length == 0) { + names = null; + } else { + names = new HashSet<>(Arrays.asList(request.getNames())); + } + this.roleMappingStore.getRoleMappings(names, ActionListener.wrap( + mappings -> { + ExpressionRoleMapping[] array = mappings.toArray( + new ExpressionRoleMapping[mappings.size()] + ); + listener.onResponse(new GetRoleMappingsResponse(array)); + }, + listener::onFailure + )); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java new file mode 100644 index 0000000000000..8c07d86e4162e --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.rolemapping; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; + +public class TransportPutRoleMappingAction + extends HandledTransportAction { + + private final NativeRoleMappingStore roleMappingStore; + + @Inject + public TransportPutRoleMappingAction(Settings settings, ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + TransportService transportService, + NativeRoleMappingStore roleMappingStore) { + super(settings, PutRoleMappingAction.NAME, threadPool, transportService, actionFilters, + indexNameExpressionResolver, PutRoleMappingRequest::new); + this.roleMappingStore = roleMappingStore; + } + + @Override + protected void doExecute(final PutRoleMappingRequest request, + final ActionListener listener) { + roleMappingStore.putRoleMapping(request, ActionListener.wrap( + created -> listener.onResponse(new PutRoleMappingResponse(created)), + listener::onFailure + )); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java new file mode 100644 index 0000000000000..48f846579c42d --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.saml; + +import java.util.Map; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateAction; +import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateRequest; +import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.security.authc.saml.SamlRealm; +import org.elasticsearch.xpack.security.authc.saml.SamlToken; + +/** + * Transport action responsible for taking saml content and turning it into a token. + */ +public final class TransportSamlAuthenticateAction extends HandledTransportAction { + + private final AuthenticationService authenticationService; + private final TokenService tokenService; + + @Inject + public TransportSamlAuthenticateAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + AuthenticationService authenticationService, TokenService tokenService) { + super(settings, SamlAuthenticateAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + SamlAuthenticateRequest::new); + this.authenticationService = authenticationService; + this.tokenService = tokenService; + } + + @Override + protected void doExecute(SamlAuthenticateRequest request, + ActionListener listener) { + final SamlToken saml = new SamlToken(request.getSaml(), request.getValidRequestIds()); + logger.trace("Attempting to authenticate SamlToken [{}]", saml); + final ThreadContext threadContext = threadPool.getThreadContext(); + Authentication originatingAuthentication = Authentication.getAuthentication(threadContext); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + authenticationService.authenticate(SamlAuthenticateAction.NAME, request, saml, ActionListener.wrap(authentication -> { + final Map tokenMeta = threadContext.getTransient(SamlRealm.CONTEXT_TOKEN_DATA); + tokenService.createUserToken(authentication, originatingAuthentication, + ActionListener.wrap(tuple -> { + final String tokenString = tokenService.getUserTokenString(tuple.v1()); + final TimeValue expiresIn = tokenService.getExpirationDelay(); + listener.onResponse( + new SamlAuthenticateResponse(authentication.getUser().principal(), tokenString, tuple.v2(), expiresIn)); + }, listener::onFailure), tokenMeta); + }, e -> { + logger.debug(() -> new ParameterizedMessage("SamlToken [{}] could not be authenticated", saml), e); + listener.onFailure(e); + })); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionAction.java new file mode 100644 index 0000000000000..143b3ffd64bc8 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionAction.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.saml; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.saml.SamlInvalidateSessionAction; +import org.elasticsearch.xpack.core.security.action.saml.SamlInvalidateSessionRequest; +import org.elasticsearch.xpack.core.security.action.saml.SamlInvalidateSessionResponse; +import org.elasticsearch.xpack.security.authc.Realms; +import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.security.authc.UserToken; +import org.elasticsearch.xpack.security.authc.saml.SamlLogoutRequestHandler; +import org.elasticsearch.xpack.security.authc.saml.SamlRealm; +import org.elasticsearch.xpack.security.authc.saml.SamlRedirect; +import org.elasticsearch.xpack.security.authc.saml.SamlUtils; +import org.opensaml.saml.saml2.core.LogoutResponse; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.security.authc.saml.SamlRealm.findSamlRealms; + +/** + * Transport action responsible for taking a SAML {@code LogoutRequest} and invalidating any associated Security Tokens + */ +public final class TransportSamlInvalidateSessionAction + extends HandledTransportAction { + + private final TokenService tokenService; + private final Realms realms; + + @Inject + public TransportSamlInvalidateSessionAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + TokenService tokenService, Realms realms) { + super(settings, SamlInvalidateSessionAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + SamlInvalidateSessionRequest::new); + this.tokenService = tokenService; + this.realms = realms; + } + + @Override + protected void doExecute(SamlInvalidateSessionRequest request, + ActionListener listener) { + List realms = findSamlRealms(this.realms, request.getRealmName(), request.getAssertionConsumerServiceURL()); + if (realms.isEmpty()) { + listener.onFailure(SamlUtils.samlException("Cannot find any matching realm for [{}]", request)); + } else if (realms.size() > 1) { + listener.onFailure(SamlUtils.samlException("Found multiple matching realms [{}] for [{}]", realms, request)); + } else { + invalidateSession(realms.get(0), request, listener); + } + } + + private void invalidateSession(SamlRealm realm, SamlInvalidateSessionRequest request, + ActionListener listener) { + try { + final SamlLogoutRequestHandler.Result result = realm.getLogoutHandler().parseFromQueryString(request.getQueryString()); + findAndInvalidateTokens(realm, result, ActionListener.wrap(count -> listener.onResponse( + new SamlInvalidateSessionResponse(realm.name(), count, buildLogoutResponseUrl(realm, result)) + ), listener::onFailure)); + } catch (ElasticsearchSecurityException e) { + logger.info("Failed to invalidate SAML session", e); + listener.onFailure(e); + } + } + + private String buildLogoutResponseUrl(SamlRealm realm, SamlLogoutRequestHandler.Result result) { + final LogoutResponse response = realm.buildLogoutResponse(result.getRequestId()); + return new SamlRedirect(response, realm.getSigningConfiguration()).getRedirectUrl(result.getRelayState()); + } + + private void findAndInvalidateTokens(SamlRealm realm, SamlLogoutRequestHandler.Result result, ActionListener listener) { + final Map tokenMetadata = realm.createTokenMetadata(result.getNameId(), result.getSession()); + if (Strings.hasText((String) tokenMetadata.get(SamlRealm.TOKEN_METADATA_NAMEID_VALUE)) == false) { + // If we don't have a valid name-id to match against, don't do anything + logger.debug("Logout request [{}] has no NameID value, so cannot invalidate any sessions", result); + listener.onResponse(0); + return; + } + + tokenService.findActiveTokensForRealm(realm.name(), ActionListener.wrap(tokens -> { + List> sessionTokens = filterTokens(tokens, tokenMetadata); + logger.debug("Found [{}] token pairs to invalidate for SAML metadata [{}]", sessionTokens.size(), tokenMetadata); + if (sessionTokens.isEmpty()) { + listener.onResponse(0); + } else { + GroupedActionListener groupedListener = new GroupedActionListener<>( + ActionListener.wrap(collection -> listener.onResponse(collection.size()), listener::onFailure), + sessionTokens.size(), Collections.emptyList() + ); + sessionTokens.forEach(tuple -> invalidateTokenPair(tuple, groupedListener)); + } + }, e -> listener.onFailure(e) + )); + } + + private void invalidateTokenPair(Tuple tokenPair, ActionListener listener) { + // Invalidate the refresh token first, so the client doesn't trigger a refresh once the access token is invalidated + tokenService.invalidateRefreshToken(tokenPair.v2(), ActionListener.wrap(ignore -> tokenService.invalidateAccessToken( + tokenPair.v1(), + ActionListener.wrap(listener::onResponse, e -> { + logger.info("Failed to invalidate SAML access_token [{}] - {}", tokenPair.v1().getId(), e.toString()); + listener.onFailure(e); + })), listener::onFailure)); + } + + private List> filterTokens(Collection> tokens, Map requiredMetadata) { + return tokens.stream() + .filter(tup -> { + Map actualMetadata = tup.v1().getMetadata(); + return requiredMetadata.entrySet().stream().allMatch(e -> Objects.equals(actualMetadata.get(e.getKey()), e.getValue())); + }) + .collect(Collectors.toList()); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java new file mode 100644 index 0000000000000..16eb56760469e --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java @@ -0,0 +1,157 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.saml; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutAction; +import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutRequest; +import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.Realms; +import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.security.authc.saml.SamlNameId; +import org.elasticsearch.xpack.security.authc.saml.SamlRealm; +import org.elasticsearch.xpack.security.authc.saml.SamlRedirect; +import org.elasticsearch.xpack.security.authc.saml.SamlUtils; +import org.opensaml.saml.saml2.core.LogoutRequest; + +import java.io.IOException; +import java.util.Map; + +/** + * Transport action responsible for generating a SAML {@code <LogoutRequest>} as a redirect binding URL. + */ +public final class TransportSamlLogoutAction + extends HandledTransportAction { + + private final Realms realms; + private final TokenService tokenService; + + @Inject + public TransportSamlLogoutAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + Realms realms, TokenService tokenService) { + super(settings, SamlLogoutAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + SamlLogoutRequest::new); + this.realms = realms; + this.tokenService = tokenService; + } + + @Override + protected void doExecute(SamlLogoutRequest request, + ActionListener listener) { + invalidateRefreshToken(request.getRefreshToken(), ActionListener.wrap(ignore -> { + try { + final String token = request.getToken(); + tokenService.getAuthenticationAndMetaData(token, ActionListener.wrap( + tuple -> { + Authentication authentication = tuple.v1(); + final Map tokenMetadata = tuple.v2(); + SamlLogoutResponse response = buildResponse(authentication, tokenMetadata); + tokenService.invalidateAccessToken(token, ActionListener.wrap( + created -> { + if (logger.isTraceEnabled()) { + logger.trace("SAML Logout User [{}], Token [{}...{}]", + authentication.getUser().principal(), + token.substring(0, 8), + token.substring(token.length() - 8) + ); + } + listener.onResponse(response); + }, + listener::onFailure + )); + }, listener::onFailure + )); + } catch (IOException | ElasticsearchException e) { + logger.debug("Internal exception during SAML logout", e); + listener.onFailure(e); + } + }, listener::onFailure)); + } + + private void invalidateRefreshToken(String refreshToken, ActionListener listener) { + if (refreshToken == null) { + listener.onResponse(null); + } else { + tokenService.invalidateRefreshToken(refreshToken, listener); + } + } + + private SamlLogoutResponse buildResponse(Authentication authentication, Map tokenMetadata) { + if (authentication == null) { + throw SamlUtils.samlException("No active authentication"); + } + final User user = authentication.getUser(); + if (user == null) { + throw SamlUtils.samlException("No active user"); + } + + final SamlRealm realm = findRealm(authentication); + final String tokenRealm = getMetadataString(tokenMetadata, SamlRealm.TOKEN_METADATA_REALM); + if (realm.name().equals(tokenRealm) == false) { + throw SamlUtils.samlException("Authenticating realm [{}] does not match token realm [{}]", realm, tokenRealm); + } + + final SamlNameId nameId = new SamlNameId( + getMetadataString(tokenMetadata, SamlRealm.TOKEN_METADATA_NAMEID_FORMAT), + getMetadataString(tokenMetadata, SamlRealm.TOKEN_METADATA_NAMEID_VALUE), + getMetadataString(tokenMetadata, SamlRealm.TOKEN_METADATA_NAMEID_QUALIFIER), + getMetadataString(tokenMetadata, SamlRealm.TOKEN_METADATA_NAMEID_SP_QUALIFIER), + getMetadataString(tokenMetadata, SamlRealm.TOKEN_METADATA_NAMEID_SP_PROVIDED_ID) + ); + final String session = getMetadataString(tokenMetadata, SamlRealm.TOKEN_METADATA_SESSION); + final LogoutRequest logout = realm.buildLogoutRequest(nameId.asXml(), session); + if (logout == null) { + return new SamlLogoutResponse(null); + } + final String uri = new SamlRedirect(logout, realm.getSigningConfiguration()).getRedirectUrl(); + return new SamlLogoutResponse(uri); + } + + private String getMetadataString(Map metadata, String key) { + final Object value = metadata.get(key); + if (value == null) { + if (metadata.containsKey(key)) { + return null; + } + throw SamlUtils.samlException("Access token does not have SAML metadata [{}]", key); + } + if (value instanceof String) { + return (String) value; + } else { + throw SamlUtils.samlException("In access token, SAML metadata [{}] is [{}] rather than String", key, value.getClass()); + } + } + + private SamlRealm findRealm(Authentication authentication) { + final Authentication.RealmRef ref = authentication.getAuthenticatedBy(); + if (ref == null || Strings.isNullOrEmpty(ref.getName())) { + throw SamlUtils.samlException("Authentication {} has no authenticating realm", authentication); + } + final Realm realm = realms.realm(ref.getName()); + if (realm == null) { + throw SamlUtils.samlException("Authenticating realm {} does not exist", ref.getName()); + } + if (realm instanceof SamlRealm) { + return (SamlRealm) realm; + } else { + throw SamlUtils.samlException("Authenticating realm {} is not a SAML realm", realm); + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlPrepareAuthenticationAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlPrepareAuthenticationAction.java new file mode 100644 index 0000000000000..7bba1974b3dc8 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlPrepareAuthenticationAction.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.saml; + +import java.util.List; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.saml.SamlPrepareAuthenticationAction; +import org.elasticsearch.xpack.core.security.action.saml.SamlPrepareAuthenticationRequest; +import org.elasticsearch.xpack.core.security.action.saml.SamlPrepareAuthenticationResponse; +import org.elasticsearch.xpack.security.authc.Realms; +import org.elasticsearch.xpack.security.authc.saml.SamlRealm; +import org.elasticsearch.xpack.security.authc.saml.SamlRedirect; +import org.elasticsearch.xpack.security.authc.saml.SamlUtils; +import org.opensaml.saml.saml2.core.AuthnRequest; + +import static org.elasticsearch.xpack.security.authc.saml.SamlRealm.findSamlRealms; + +/** + * Transport action responsible for generating a SAML {@code <AuthnRequest>} as a redirect binding URL. + */ +public final class TransportSamlPrepareAuthenticationAction + extends HandledTransportAction { + + private final Realms realms; + + @Inject + public TransportSamlPrepareAuthenticationAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + Realms realms) { + super(settings, SamlPrepareAuthenticationAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + SamlPrepareAuthenticationRequest::new); + this.realms = realms; + } + + @Override + protected void doExecute(SamlPrepareAuthenticationRequest request, + ActionListener listener) { + List realms = findSamlRealms(this.realms, request.getRealmName(), request.getAssertionConsumerServiceURL() ); + if (realms.isEmpty()) { + listener.onFailure(SamlUtils.samlException("Cannot find any matching realm for [{}]", request)); + } else if (realms.size() > 1) { + listener.onFailure(SamlUtils.samlException("Found multiple matching realms [{}] for [{}]", realms, request)); + } else { + prepareAuthentication(realms.get(0), listener); + } + } + + private void prepareAuthentication(SamlRealm realm, ActionListener listener) { + final AuthnRequest authnRequest = realm.buildAuthenticationRequest(); + try { + String redirectUrl = new SamlRedirect(authnRequest, realm.getSigningConfiguration()).getRedirectUrl(); + listener.onResponse(new SamlPrepareAuthenticationResponse( + realm.name(), + authnRequest.getID(), + redirectUrl + )); + } catch (ElasticsearchException e) { + listener.onFailure(e); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java new file mode 100644 index 0000000000000..4a96c3c88d88d --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.token; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequest; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; + +import java.util.Collections; + +/** + * Transport action responsible for creating a token based on a request. Requests provide user + * credentials that can be different than those of the user that is currently authenticated so we + * always re-authenticate within this action. This authenticated user will be the user that the + * token represents + */ +public final class TransportCreateTokenAction extends HandledTransportAction { + + private static final String DEFAULT_SCOPE = "full"; + private final TokenService tokenService; + private final AuthenticationService authenticationService; + + @Inject + public TransportCreateTokenAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + TokenService tokenService, AuthenticationService authenticationService) { + super(settings, CreateTokenAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + CreateTokenRequest::new); + this.tokenService = tokenService; + this.authenticationService = authenticationService; + } + + @Override + protected void doExecute(CreateTokenRequest request, ActionListener listener) { + Authentication originatingAuthentication = Authentication.getAuthentication(threadPool.getThreadContext()); + try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) { + final UsernamePasswordToken authToken = new UsernamePasswordToken(request.getUsername(), request.getPassword()); + authenticationService.authenticate(CreateTokenAction.NAME, request, authToken, + ActionListener.wrap(authentication -> { + request.getPassword().close(); + tokenService.createUserToken(authentication, originatingAuthentication, ActionListener.wrap(tuple -> { + final String tokenStr = tokenService.getUserTokenString(tuple.v1()); + final String scope = getResponseScopeValue(request.getScope()); + + final CreateTokenResponse response = + new CreateTokenResponse(tokenStr, tokenService.getExpirationDelay(), scope, tuple.v2()); + listener.onResponse(response); + }, e -> { + // clear the request password + request.getPassword().close(); + listener.onFailure(e); + }), Collections.emptyMap()); + }, e -> { + // clear the request password + request.getPassword().close(); + listener.onFailure(e); + })); + } + } + + static String getResponseScopeValue(String requestScope) { + final String scope; + // the OAuth2.0 RFC requires the scope to be provided in the + // response if it differs from the user provided scope. If the + // scope was not provided then it does not need to be returned. + // if the scope is not supported, the value of the scope that the + // token is for must be returned + if (requestScope != null) { + scope = DEFAULT_SCOPE; // this is the only non-null value that is currently supported + } else { + scope = null; + } + return scope; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenAction.java new file mode 100644 index 0000000000000..63a130b30047d --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenAction.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.token; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenRequest; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenResponse; +import org.elasticsearch.xpack.security.authc.TokenService; + +/** + * Transport action responsible for handling invalidation of tokens + */ +public final class TransportInvalidateTokenAction extends HandledTransportAction { + + private final TokenService tokenService; + + @Inject + public TransportInvalidateTokenAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + TokenService tokenService) { + super(settings, InvalidateTokenAction.NAME, threadPool, transportService, actionFilters, + indexNameExpressionResolver, InvalidateTokenRequest::new); + this.tokenService = tokenService; + } + + @Override + protected void doExecute(InvalidateTokenRequest request, ActionListener listener) { + final ActionListener invalidateListener = + ActionListener.wrap(created -> listener.onResponse(new InvalidateTokenResponse(created)), listener::onFailure); + if (request.getTokenType() == InvalidateTokenRequest.Type.ACCESS_TOKEN) { + tokenService.invalidateAccessToken(request.getTokenString(), invalidateListener); + } else { + assert request.getTokenType() == InvalidateTokenRequest.Type.REFRESH_TOKEN; + tokenService.invalidateRefreshToken(request.getTokenString(), invalidateListener); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java new file mode 100644 index 0000000000000..0462fe6cddeaf --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.token; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequest; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; +import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; +import org.elasticsearch.xpack.security.authc.TokenService; + +import static org.elasticsearch.xpack.security.action.token.TransportCreateTokenAction.getResponseScopeValue; + +public class TransportRefreshTokenAction extends HandledTransportAction { + + private final TokenService tokenService; + + @Inject + public TransportRefreshTokenAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + TokenService tokenService) { + super(settings, RefreshTokenAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + CreateTokenRequest::new); + this.tokenService = tokenService; + } + + @Override + protected void doExecute(CreateTokenRequest request, ActionListener listener) { + tokenService.refreshToken(request.getRefreshToken(), ActionListener.wrap(tuple -> { + final String tokenStr = tokenService.getUserTokenString(tuple.v1()); + final String scope = getResponseScopeValue(request.getScope()); + + final CreateTokenResponse response = + new CreateTokenResponse(tokenStr, tokenService.getExpirationDelay(), scope, tuple.v2()); + listener.onResponse(response); + }, listener::onFailure)); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java new file mode 100644 index 0000000000000..6386917a1e98f --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; + +public class TransportAuthenticateAction extends HandledTransportAction { + + private final SecurityContext securityContext; + + @Inject + public TransportAuthenticateAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + SecurityContext securityContext) { + super(settings, AuthenticateAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + AuthenticateRequest::new); + this.securityContext = securityContext; + } + + @Override + protected void doExecute(AuthenticateRequest request, ActionListener listener) { + final User runAsUser = securityContext.getUser(); + final User authUser = runAsUser == null ? null : runAsUser.authenticatedUser(); + if (authUser == null) { + listener.onFailure(new ElasticsearchSecurityException("did not find an authenticated user")); + } else if (SystemUser.is(authUser) || XPackUser.is(authUser)) { + listener.onFailure(new IllegalArgumentException("user [" + authUser.principal() + "] is internal")); + } else if (SystemUser.is(runAsUser) || XPackUser.is(runAsUser)) { + listener.onFailure(new IllegalArgumentException("user [" + runAsUser.principal() + "] is internal")); + } else { + listener.onResponse(new AuthenticateResponse(runAsUser)); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordAction.java new file mode 100644 index 0000000000000..047b47dfa256b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordAction.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequest; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; + +public class TransportChangePasswordAction extends HandledTransportAction { + + private final NativeUsersStore nativeUsersStore; + + @Inject + public TransportChangePasswordAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + NativeUsersStore nativeUsersStore) { + super(settings, ChangePasswordAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + ChangePasswordRequest::new); + this.nativeUsersStore = nativeUsersStore; + } + + @Override + protected void doExecute(ChangePasswordRequest request, ActionListener listener) { + final String username = request.username(); + if (AnonymousUser.isAnonymousUsername(username, settings)) { + listener.onFailure(new IllegalArgumentException("user [" + username + "] is anonymous and cannot be modified via the API")); + return; + } else if (SystemUser.NAME.equals(username) || XPackUser.NAME.equals(username)) { + listener.onFailure(new IllegalArgumentException("user [" + username + "] is internal")); + return; + } + nativeUsersStore.changePassword(request, new ActionListener() { + @Override + public void onResponse(Void v) { + listener.onResponse(new ChangePasswordResponse()); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserAction.java new file mode 100644 index 0000000000000..4ada1ab0e3331 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserAction.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserAction; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequest; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserResponse; +import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; + +public class TransportDeleteUserAction extends HandledTransportAction { + + private final NativeUsersStore usersStore; + + @Inject + public TransportDeleteUserAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, NativeUsersStore usersStore, + TransportService transportService) { + super(settings, DeleteUserAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + DeleteUserRequest::new); + this.usersStore = usersStore; + } + + @Override + protected void doExecute(DeleteUserRequest request, final ActionListener listener) { + final String username = request.username(); + if (ClientReservedRealm.isReserved(username, settings)) { + if (AnonymousUser.isAnonymousUsername(username, settings)) { + listener.onFailure(new IllegalArgumentException("user [" + username + "] is anonymous and cannot be deleted")); + return; + } else { + listener.onFailure(new IllegalArgumentException("user [" + username + "] is reserved and cannot be deleted")); + return; + } + } else if (SystemUser.NAME.equals(username) || XPackUser.NAME.equals(username)) { + listener.onFailure(new IllegalArgumentException("user [" + username + "] is internal")); + return; + } + + usersStore.deleteUser(request, new ActionListener() { + @Override + public void onResponse(Boolean found) { + listener.onResponse(new DeleteUserResponse(found)); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java new file mode 100644 index 0000000000000..4a57a918c1af0 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.user.GetUsersAction; +import org.elasticsearch.xpack.core.security.action.user.GetUsersRequest; +import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; +import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +public class TransportGetUsersAction extends HandledTransportAction { + + private final NativeUsersStore usersStore; + private final ReservedRealm reservedRealm; + + @Inject + public TransportGetUsersAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, NativeUsersStore usersStore, + TransportService transportService, ReservedRealm reservedRealm) { + super(settings, GetUsersAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + GetUsersRequest::new); + this.usersStore = usersStore; + this.reservedRealm = reservedRealm; + } + + @Override + protected void doExecute(final GetUsersRequest request, final ActionListener listener) { + final String[] requestedUsers = request.usernames(); + final boolean specificUsersRequested = requestedUsers != null && requestedUsers.length > 0; + final List usersToSearchFor = new ArrayList<>(); + final List users = new ArrayList<>(); + final List realmLookup = new ArrayList<>(); + if (specificUsersRequested) { + for (String username : requestedUsers) { + if (ClientReservedRealm.isReserved(username, settings)) { + realmLookup.add(username); + } else if (SystemUser.NAME.equals(username) || XPackUser.NAME.equals(username)) { + listener.onFailure(new IllegalArgumentException("user [" + username + "] is internal")); + return; + } else { + usersToSearchFor.add(username); + } + } + } + + final ActionListener>> sendingListener = ActionListener.wrap((userLists) -> { + users.addAll(userLists.stream().flatMap(Collection::stream).filter(Objects::nonNull).collect(Collectors.toList())); + listener.onResponse(new GetUsersResponse(users)); + }, listener::onFailure); + final GroupedActionListener> groupListener = + new GroupedActionListener<>(sendingListener, 2, Collections.emptyList()); + // We have two sources for the users object, the reservedRealm and the usersStore, we query both at the same time with a + // GroupedActionListener + if (realmLookup.isEmpty()) { + if (specificUsersRequested == false) { + // we get all users from the realm + reservedRealm.users(groupListener); + } else { + groupListener.onResponse(Collections.emptyList());// pass an empty list to inform the group listener + // - no real lookups necessary + } + } else { + // nested group listener action here - for each of the users we got and fetch it concurrently - once we are done we notify + // the "global" group listener. + GroupedActionListener realmGroupListener = new GroupedActionListener<>(groupListener, realmLookup.size(), + Collections.emptyList()); + for (String user : realmLookup) { + reservedRealm.lookupUser(user, realmGroupListener); + } + } + + // user store lookups + if (specificUsersRequested && usersToSearchFor.isEmpty()) { + groupListener.onResponse(Collections.emptyList()); // no users requested notify + } else { + // go and get all users from the users store and pass it directly on to the group listener + usersStore.getUsers(usersToSearchFor.toArray(new String[usersToSearchFor.size()]), groupListener); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java new file mode 100644 index 0000000000000..dbc2d8f82bd94 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.Privilege; +import org.elasticsearch.xpack.core.security.support.Automatons; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authz.AuthorizationService; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +/** + * Transport action that tests whether a user has the specified + * {@link RoleDescriptor.IndicesPrivileges privileges} + */ +public class TransportHasPrivilegesAction extends HandledTransportAction { + + private final AuthorizationService authorizationService; + + @Inject + public TransportHasPrivilegesAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + AuthorizationService authorizationService) { + super(settings, HasPrivilegesAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + HasPrivilegesRequest::new); + this.authorizationService = authorizationService; + } + + @Override + protected void doExecute(HasPrivilegesRequest request, ActionListener listener) { + final String username = request.username(); + + final User user = Authentication.getAuthentication(threadPool.getThreadContext()).getUser(); + if (user.principal().equals(username) == false) { + listener.onFailure(new IllegalArgumentException("users may only check the privileges of their own account")); + return; + } + + authorizationService.roles(user, ActionListener.wrap( + role -> checkPrivileges(request, role, listener), + listener::onFailure)); + } + + private void checkPrivileges(HasPrivilegesRequest request, Role userRole, + ActionListener listener) { + logger.debug(() -> new ParameterizedMessage("Check whether role [{}] has privileges cluster=[{}] index=[{}]", + Strings.arrayToCommaDelimitedString(userRole.names()), Strings.arrayToCommaDelimitedString(request.clusterPrivileges()), + Strings.arrayToCommaDelimitedString(request.indexPrivileges()))); + + Map cluster = new HashMap<>(); + for (String checkAction : request.clusterPrivileges()) { + final ClusterPrivilege checkPrivilege = ClusterPrivilege.get(Collections.singleton(checkAction)); + final ClusterPrivilege rolePrivilege = userRole.cluster().privilege(); + cluster.put(checkAction, testPrivilege(checkPrivilege, rolePrivilege.getAutomaton())); + } + boolean allMatch = cluster.values().stream().allMatch(Boolean::booleanValue); + + final Map predicateCache = new HashMap<>(); + + final Map indices = new LinkedHashMap<>(); + for (RoleDescriptor.IndicesPrivileges check : request.indexPrivileges()) { + for (String index : check.getIndices()) { + final Map privileges = new HashMap<>(); + final HasPrivilegesResponse.IndexPrivileges existing = indices.get(index); + if (existing != null) { + privileges.putAll(existing.getPrivileges()); + } + for (String privilege : check.getPrivileges()) { + if (testIndexMatch(index, privilege, userRole, predicateCache)) { + logger.debug(() -> new ParameterizedMessage("Role [{}] has [{}] on [{}]", + Strings.arrayToCommaDelimitedString(userRole.names()), privilege, index)); + privileges.put(privilege, true); + } else { + logger.debug(() -> new ParameterizedMessage("Role [{}] does not have [{}] on [{}]", + Strings.arrayToCommaDelimitedString(userRole.names()), privilege, index)); + privileges.put(privilege, false); + allMatch = false; + } + } + indices.put(index, new HasPrivilegesResponse.IndexPrivileges(index, privileges)); + } + } + listener.onResponse(new HasPrivilegesResponse(allMatch, cluster, indices.values())); + } + + private boolean testIndexMatch(String checkIndex, String checkPrivilegeName, Role userRole, + Map predicateCache) { + final IndexPrivilege checkPrivilege = IndexPrivilege.get(Collections.singleton(checkPrivilegeName)); + + final Automaton checkIndexAutomaton = Automatons.patterns(checkIndex); + + List privilegeAutomatons = new ArrayList<>(); + for (IndicesPermission.Group group : userRole.indices().groups()) { + final Automaton groupIndexAutomaton = predicateCache.computeIfAbsent(group, g -> Automatons.patterns(g.indices())); + if (testIndex(checkIndexAutomaton, groupIndexAutomaton)) { + final IndexPrivilege rolePrivilege = group.privilege(); + if (rolePrivilege.name().contains(checkPrivilegeName)) { + return true; + } + privilegeAutomatons.add(rolePrivilege.getAutomaton()); + } + } + return testPrivilege(checkPrivilege, Automatons.unionAndMinimize(privilegeAutomatons)); + } + + private static boolean testIndex(Automaton checkIndex, Automaton roleIndex) { + return Operations.subsetOf(checkIndex, roleIndex); + } + + private static boolean testPrivilege(Privilege checkPrivilege, Automaton roleAutomaton) { + return Operations.subsetOf(checkPrivilege.getAutomaton(), roleAutomaton); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportPutUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportPutUserAction.java new file mode 100644 index 0000000000000..1d2f792b29809 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportPutUserAction.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.user.PutUserAction; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; +import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; +import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; + +public class TransportPutUserAction extends HandledTransportAction { + + private final NativeUsersStore usersStore; + + @Inject + public TransportPutUserAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + NativeUsersStore usersStore, TransportService transportService) { + super(settings, PutUserAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, PutUserRequest::new); + this.usersStore = usersStore; + } + + @Override + protected void doExecute(final PutUserRequest request, final ActionListener listener) { + final String username = request.username(); + if (ClientReservedRealm.isReserved(username, settings)) { + if (AnonymousUser.isAnonymousUsername(username, settings)) { + listener.onFailure(new IllegalArgumentException("user [" + username + "] is anonymous and cannot be modified via the API")); + return; + } else { + listener.onFailure(new IllegalArgumentException("user [" + username + "] is reserved and only the " + + "password can be changed")); + return; + } + } else if (SystemUser.NAME.equals(username) || XPackUser.NAME.equals(username)) { + listener.onFailure(new IllegalArgumentException("user [" + username + "] is internal")); + return; + } + + usersStore.putUser(request, new ActionListener() { + @Override + public void onResponse(Boolean created) { + if (created) { + logger.info("added user [{}]", request.username()); + } else { + logger.info("updated user [{}]", request.username()); + } + listener.onResponse(new PutUserResponse(created)); + } + + @Override + public void onFailure(Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("failed to put user [{}]", request.username()), e); + listener.onFailure(e); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledAction.java new file mode 100644 index 0000000000000..a8c884ccc16ae --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledAction.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledAction; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledRequest; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; + +/** + * Transport action that handles setting a native or reserved user to enabled + */ +public class TransportSetEnabledAction extends HandledTransportAction { + + private final NativeUsersStore usersStore; + + @Inject + public TransportSetEnabledAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + NativeUsersStore usersStore) { + super(settings, SetEnabledAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + SetEnabledRequest::new); + this.usersStore = usersStore; + } + + @Override + protected void doExecute(SetEnabledRequest request, ActionListener listener) { + final String username = request.username(); + // make sure the user is not disabling themselves + if (Authentication.getAuthentication(threadPool.getThreadContext()).getUser().principal().equals(request.username())) { + listener.onFailure(new IllegalArgumentException("users may not update the enabled status of their own account")); + return; + } else if (SystemUser.NAME.equals(username) || XPackUser.NAME.equals(username)) { + listener.onFailure(new IllegalArgumentException("user [" + username + "] is internal")); + return; + } else if (AnonymousUser.isAnonymousUsername(username, settings)) { + listener.onFailure(new IllegalArgumentException("user [" + username + "] is anonymous and cannot be modified using the api")); + return; + } + + usersStore.setEnabled(username, request.enabled(), request.getRefreshPolicy(), new ActionListener() { + @Override + public void onResponse(Void v) { + listener.onResponse(new SetEnabledResponse()); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditLevel.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditLevel.java new file mode 100644 index 0000000000000..c8c5fa6420054 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditLevel.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit; + +import java.util.Arrays; +import java.util.EnumSet; +import java.util.List; +import java.util.Locale; + +public enum AuditLevel { + + + ANONYMOUS_ACCESS_DENIED, + AUTHENTICATION_FAILED, + REALM_AUTHENTICATION_FAILED, + ACCESS_GRANTED, + ACCESS_DENIED, + TAMPERED_REQUEST, + CONNECTION_GRANTED, + CONNECTION_DENIED, + SYSTEM_ACCESS_GRANTED, + AUTHENTICATION_SUCCESS, + RUN_AS_GRANTED, + RUN_AS_DENIED; + + static EnumSet parse(List levels) { + EnumSet enumSet = EnumSet.noneOf(AuditLevel.class); + for (String level : levels) { + String lowerCaseLevel = level.trim().toLowerCase(Locale.ROOT); + switch (lowerCaseLevel) { + case "_all": + enumSet.addAll(Arrays.asList(AuditLevel.values())); + break; + case "anonymous_access_denied": + enumSet.add(ANONYMOUS_ACCESS_DENIED); + break; + case "authentication_failed": + enumSet.add(AUTHENTICATION_FAILED); + break; + case "realm_authentication_failed": + enumSet.add(REALM_AUTHENTICATION_FAILED); + break; + case "access_granted": + enumSet.add(ACCESS_GRANTED); + break; + case "access_denied": + enumSet.add(ACCESS_DENIED); + break; + case "tampered_request": + enumSet.add(TAMPERED_REQUEST); + break; + case "connection_granted": + enumSet.add(CONNECTION_GRANTED); + break; + case "connection_denied": + enumSet.add(CONNECTION_DENIED); + break; + case "system_access_granted": + enumSet.add(SYSTEM_ACCESS_GRANTED); + break; + case "authentication_success": + enumSet.add(AUTHENTICATION_SUCCESS); + break; + case "run_as_granted": + enumSet.add(RUN_AS_GRANTED); + break; + case "run_as_denied": + enumSet.add(RUN_AS_DENIED); + break; + default: + throw new IllegalArgumentException("invalid event name specified [" + level + "]"); + } + } + return enumSet; + } + + public static EnumSet parse(List includeLevels, List excludeLevels) { + EnumSet included = parse(includeLevels); + EnumSet excluded = parse(excludeLevels); + included.removeAll(excluded); + return included; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java new file mode 100644 index 0000000000000..3f19d28192583 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit; + +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; + +import java.net.InetAddress; + +public interface AuditTrail { + + String name(); + + void authenticationSuccess(String realm, User user, RestRequest request); + + void authenticationSuccess(String realm, User user, String action, TransportMessage message); + + void anonymousAccessDenied(String action, TransportMessage message); + + void anonymousAccessDenied(RestRequest request); + + void authenticationFailed(RestRequest request); + + void authenticationFailed(String action, TransportMessage message); + + void authenticationFailed(AuthenticationToken token, String action, TransportMessage message); + + void authenticationFailed(AuthenticationToken token, RestRequest request); + + void authenticationFailed(String realm, AuthenticationToken token, String action, TransportMessage message); + + void authenticationFailed(String realm, AuthenticationToken token, RestRequest request); + + void accessGranted(Authentication authentication, String action, TransportMessage message, String[] roleNames); + + void accessDenied(Authentication authentication, String action, TransportMessage message, String[] roleNames); + + void tamperedRequest(RestRequest request); + + void tamperedRequest(String action, TransportMessage message); + + void tamperedRequest(User user, String action, TransportMessage request); + + void connectionGranted(InetAddress inetAddress, String profile, SecurityIpFilterRule rule); + + void connectionDenied(InetAddress inetAddress, String profile, SecurityIpFilterRule rule); + + void runAsGranted(Authentication authentication, String action, TransportMessage message, String[] roleNames); + + void runAsDenied(Authentication authentication, String action, TransportMessage message, String[] roleNames); + + void runAsDenied(Authentication authentication, RestRequest request, String[] roleNames); +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java new file mode 100644 index 0000000000000..3cd12b1a7ceb9 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java @@ -0,0 +1,222 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit; + +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; + +import java.net.InetAddress; +import java.util.Collections; +import java.util.List; + +public class AuditTrailService extends AbstractComponent implements AuditTrail { + + private final XPackLicenseState licenseState; + private final List auditTrails; + + @Override + public String name() { + return "service"; + } + + public AuditTrailService(Settings settings, List auditTrails, XPackLicenseState licenseState) { + super(settings); + this.auditTrails = Collections.unmodifiableList(auditTrails); + this.licenseState = licenseState; + } + + /** Returns the audit trail implementations that this service delegates to. */ + public List getAuditTrails() { + return auditTrails; + } + + @Override + public void authenticationSuccess(String realm, User user, RestRequest request) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.authenticationSuccess(realm, user, request); + } + } + } + + @Override + public void authenticationSuccess(String realm, User user, String action, TransportMessage message) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.authenticationSuccess(realm, user, action, message); + } + } + } + + @Override + public void anonymousAccessDenied(String action, TransportMessage message) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.anonymousAccessDenied(action, message); + } + } + } + + @Override + public void anonymousAccessDenied(RestRequest request) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.anonymousAccessDenied(request); + } + } + } + + @Override + public void authenticationFailed(RestRequest request) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.authenticationFailed(request); + } + } + } + + @Override + public void authenticationFailed(String action, TransportMessage message) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.authenticationFailed(action, message); + } + } + } + + @Override + public void authenticationFailed(AuthenticationToken token, String action, TransportMessage message) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.authenticationFailed(token, action, message); + } + } + } + + @Override + public void authenticationFailed(String realm, AuthenticationToken token, String action, TransportMessage message) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.authenticationFailed(realm, token, action, message); + } + } + } + + @Override + public void authenticationFailed(AuthenticationToken token, RestRequest request) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.authenticationFailed(token, request); + } + } + } + + @Override + public void authenticationFailed(String realm, AuthenticationToken token, RestRequest request) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.authenticationFailed(realm, token, request); + } + } + } + + @Override + public void accessGranted(Authentication authentication, String action, TransportMessage message, String[] roleNames) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.accessGranted(authentication, action, message, roleNames); + } + } + } + + @Override + public void accessDenied(Authentication authentication, String action, TransportMessage message, String[] roleNames) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.accessDenied(authentication, action, message, roleNames); + } + } + } + + @Override + public void tamperedRequest(RestRequest request) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.tamperedRequest(request); + } + } + } + + @Override + public void tamperedRequest(String action, TransportMessage message) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.tamperedRequest(action, message); + } + } + } + + @Override + public void tamperedRequest(User user, String action, TransportMessage request) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.tamperedRequest(user, action, request); + } + } + } + + @Override + public void connectionGranted(InetAddress inetAddress, String profile, SecurityIpFilterRule rule) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.connectionGranted(inetAddress, profile, rule); + } + } + } + + @Override + public void connectionDenied(InetAddress inetAddress, String profile, SecurityIpFilterRule rule) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.connectionDenied(inetAddress, profile, rule); + } + } + } + + @Override + public void runAsGranted(Authentication authentication, String action, TransportMessage message, String[] roleNames) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.runAsGranted(authentication, action, message, roleNames); + } + } + } + + @Override + public void runAsDenied(Authentication authentication, String action, TransportMessage message, String[] roleNames) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.runAsDenied(authentication, action, message, roleNames); + } + } + } + + @Override + public void runAsDenied(Authentication authentication, RestRequest request, String[] roleNames) { + if (licenseState.isSecurityEnabled() && licenseState.isAuditingAllowed()) { + for (AuditTrail auditTrail : auditTrails) { + auditTrail.runAsDenied(authentication, request, roleNames); + } + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditUtil.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditUtil.java new file mode 100644 index 0000000000000..0d2cbc24ee12c --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditUtil.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit; + +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.transport.TransportMessage; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +public class AuditUtil { + + public static String restRequestContent(RestRequest request) { + if (request.hasContent()) { + try { + return XContentHelper.convertToJson(request.content(), false, false, request.getXContentType()); + } catch (IOException ioe) { + return "Invalid Format: " + request.content().utf8ToString(); + } + } + return ""; + } + + public static Set indices(TransportMessage message) { + if (message instanceof IndicesRequest) { + return arrayToSetOrNull(((IndicesRequest) message).indices()); + } + return null; + } + + private static Set arrayToSetOrNull(String[] indices) { + return indices == null ? null : new HashSet<>(Arrays.asList(indices)); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java new file mode 100644 index 0000000000000..e16a09c8a2a44 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java @@ -0,0 +1,1194 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit.index; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.bulk.BulkProcessor; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.core.template.TemplateUtils; +import org.elasticsearch.xpack.security.audit.AuditLevel; +import org.elasticsearch.xpack.security.audit.AuditTrail; +import org.elasticsearch.xpack.security.rest.RemoteHostHeader; +import org.elasticsearch.xpack.security.support.IndexLifecycleManager; +import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.Closeable; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.clientWithOrigin; +import static org.elasticsearch.xpack.core.security.SecurityField.setting; +import static org.elasticsearch.xpack.security.audit.AuditLevel.ACCESS_DENIED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.ACCESS_GRANTED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.ANONYMOUS_ACCESS_DENIED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.AUTHENTICATION_FAILED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.AUTHENTICATION_SUCCESS; +import static org.elasticsearch.xpack.security.audit.AuditLevel.CONNECTION_DENIED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.CONNECTION_GRANTED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.REALM_AUTHENTICATION_FAILED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.RUN_AS_DENIED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.RUN_AS_GRANTED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.SYSTEM_ACCESS_GRANTED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.TAMPERED_REQUEST; +import static org.elasticsearch.xpack.security.audit.AuditLevel.parse; +import static org.elasticsearch.xpack.security.audit.AuditUtil.indices; +import static org.elasticsearch.xpack.security.audit.AuditUtil.restRequestContent; +import static org.elasticsearch.xpack.security.audit.index.IndexNameResolver.resolve; +import static org.elasticsearch.xpack.security.support.IndexLifecycleManager.SECURITY_VERSION_STRING; + +/** + * Audit trail implementation that writes events into an index. + */ +public class IndexAuditTrail extends AbstractComponent implements AuditTrail { + + public static final String NAME = "index"; + public static final String DOC_TYPE = "doc"; + public static final String INDEX_TEMPLATE_NAME = "security_audit_log"; + + private static final int DEFAULT_BULK_SIZE = 1000; + private static final int MAX_BULK_SIZE = 10000; + private static final int DEFAULT_MAX_QUEUE_SIZE = 10000; + private static final TimeValue DEFAULT_FLUSH_INTERVAL = TimeValue.timeValueSeconds(1); + private static final IndexNameResolver.Rollover DEFAULT_ROLLOVER = IndexNameResolver.Rollover.DAILY; + private static final Setting ROLLOVER_SETTING = + new Setting<>(setting("audit.index.rollover"), (s) -> DEFAULT_ROLLOVER.name(), + s -> IndexNameResolver.Rollover.valueOf(s.toUpperCase(Locale.ENGLISH)), Property.NodeScope); + private static final Setting QUEUE_SIZE_SETTING = + Setting.intSetting(setting("audit.index.queue_max_size"), DEFAULT_MAX_QUEUE_SIZE, 1, Property.NodeScope); + private static final String DEFAULT_CLIENT_NAME = "security-audit-client"; + + private static final List DEFAULT_EVENT_INCLUDES = Arrays.asList( + ACCESS_DENIED.toString(), + ACCESS_GRANTED.toString(), + ANONYMOUS_ACCESS_DENIED.toString(), + AUTHENTICATION_FAILED.toString(), + REALM_AUTHENTICATION_FAILED.toString(), + CONNECTION_DENIED.toString(), + CONNECTION_GRANTED.toString(), + TAMPERED_REQUEST.toString(), + RUN_AS_DENIED.toString(), + RUN_AS_GRANTED.toString(), + AUTHENTICATION_SUCCESS.toString() + ); + private static final String FORBIDDEN_INDEX_SETTING = "index.mapper.dynamic"; + + private static final Setting INDEX_SETTINGS = + Setting.groupSetting(setting("audit.index.settings.index."), Property.NodeScope); + private static final Setting> INCLUDE_EVENT_SETTINGS = + Setting.listSetting(setting("audit.index.events.include"), DEFAULT_EVENT_INCLUDES, Function.identity(), + Property.NodeScope); + private static final Setting> EXCLUDE_EVENT_SETTINGS = + Setting.listSetting(setting("audit.index.events.exclude"), Collections.emptyList(), + Function.identity(), Property.NodeScope); + private static final Setting INCLUDE_REQUEST_BODY = + Setting.boolSetting(setting("audit.index.events.emit_request_body"), false, Property.NodeScope); + private static final Setting REMOTE_CLIENT_SETTINGS = + Setting.groupSetting(setting("audit.index.client."), Property.NodeScope); + private static final Setting BULK_SIZE_SETTING = + Setting.intSetting(setting("audit.index.bulk_size"), DEFAULT_BULK_SIZE, 1, MAX_BULK_SIZE, Property.NodeScope); + private static final Setting FLUSH_TIMEOUT_SETTING = + Setting.timeSetting(setting("audit.index.flush_interval"), DEFAULT_FLUSH_INTERVAL, + TimeValue.timeValueMillis(1L), Property.NodeScope); + + private final AtomicReference state = new AtomicReference<>(State.INITIALIZED); + private final String nodeName; + private final Client client; + private final QueueConsumer queueConsumer; + private final ThreadPool threadPool; + private final ClusterService clusterService; + private final boolean indexToRemoteCluster; + private final EnumSet events; + private final IndexNameResolver.Rollover rollover; + private final boolean includeRequestBody; + + private BulkProcessor bulkProcessor; + private String nodeHostName; + private String nodeHostAddress; + + @Override + public String name() { + return NAME; + } + + public IndexAuditTrail(Settings settings, Client client, ThreadPool threadPool, ClusterService clusterService) { + super(settings); + this.threadPool = threadPool; + this.clusterService = clusterService; + this.nodeName = Node.NODE_NAME_SETTING.get(settings); + final int maxQueueSize = QUEUE_SIZE_SETTING.get(settings); + this.queueConsumer = new QueueConsumer(EsExecutors.threadName(settings, "audit-queue-consumer"), createQueue(maxQueueSize)); + this.rollover = ROLLOVER_SETTING.get(settings); + this.events = parse(INCLUDE_EVENT_SETTINGS.get(settings), EXCLUDE_EVENT_SETTINGS.get(settings)); + this.indexToRemoteCluster = REMOTE_CLIENT_SETTINGS.get(settings).names().size() > 0; + this.includeRequestBody = INCLUDE_REQUEST_BODY.get(settings); + + if (indexToRemoteCluster == false) { + // in the absence of client settings for remote indexing, fall back to the client that was passed in. + this.client = clientWithOrigin(client, SECURITY_ORIGIN); + } else { + this.client = initializeRemoteClient(settings, logger); + } + + } + + public State state() { + return state.get(); + } + + /** + * This method determines if this service can be started based on the state in the {@link ClusterChangedEvent} and + * if the node is the master or not. When using remote indexing, a call to the remote cluster will be made to retrieve + * the state and the same rules will be applied. In order for the service to start, the following must be true: + *

    + *
  1. The cluster must not have a {@link GatewayService#STATE_NOT_RECOVERED_BLOCK}; in other words the gateway + * must have recovered from disk already.
  2. + *
  3. The current node must be the master OR the security_audit_log index template must exist
  4. + *
  5. The current audit index must not exist or have all primary shards active. The current audit index name + * is determined by the rollover settings and current time
  6. + *
+ * + * @param event the {@link ClusterChangedEvent} containing the up to date cluster state + * @return true if all requirements are met and the service can be started + */ + public boolean canStart(ClusterChangedEvent event) { + if (indexToRemoteCluster) { + // just return true as we do not determine whether we can start or not based on the local cluster state, but must base it off + // of the remote cluster state and this method is called on the cluster state update thread, so we do not really want to + // execute remote calls on this thread + return true; + } + synchronized (this) { + return canStart(event.state()); + } + } + + private boolean canStart(ClusterState clusterState) { + if (clusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + // wait until the gateway has recovered from disk, otherwise we think may not have audit indices + // but they may not have been restored from the cluster state on disk + logger.debug("index audit trail waiting until gateway has recovered from disk"); + return false; + } + + if (TemplateUtils.checkTemplateExistsAndVersionMatches(INDEX_TEMPLATE_NAME, SECURITY_VERSION_STRING, + clusterState, logger, Version.CURRENT::onOrAfter) == false) { + logger.debug("security audit index template [{}] is not up to date", INDEX_TEMPLATE_NAME); + return false; + } + + String index = getIndexName(); + IndexMetaData metaData = clusterState.metaData().index(index); + if (metaData == null) { + logger.debug("security audit index [{}] does not exist, so service can start", index); + return true; + } + + if (clusterState.routingTable().index(index).allPrimaryShardsActive()) { + logger.debug("security audit index [{}] all primary shards started, so service can start", index); + return true; + } + logger.debug("security audit index [{}] does not have all primary shards started, so service cannot start", index); + return false; + } + + private String getIndexName() { + final Message first = peek(); + final String index; + if (first == null) { + index = resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, DateTime.now(DateTimeZone.UTC), rollover); + } else { + index = resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, first.timestamp, rollover); + } + return index; + } + + /** + * Starts the service. The state is moved to {@link org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.State#STARTING} + * at the beginning of the method. The service's components are initialized and if the current node is the master, the index + * template will be stored. The state is moved {@link org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.State#STARTED} + * and before returning the queue of messages that came before the service started is drained. + */ + public void start() { + if (state.compareAndSet(State.INITIALIZED, State.STARTING)) { + this.nodeHostName = clusterService.localNode().getHostName(); + this.nodeHostAddress = clusterService.localNode().getHostAddress(); + if (indexToRemoteCluster) { + client.admin().cluster().prepareState().execute(new ActionListener() { + @Override + public void onResponse(ClusterStateResponse clusterStateResponse) { + logger.trace("remote cluster state is [{}] [{}]", + clusterStateResponse.getClusterName(), clusterStateResponse.getState()); + if (canStart(clusterStateResponse.getState())) { + updateCurrentIndexMappingsIfNecessary(clusterStateResponse.getState()); + } else if (TemplateUtils.checkTemplateExistsAndVersionMatches(INDEX_TEMPLATE_NAME, + SECURITY_VERSION_STRING, clusterStateResponse.getState(), logger, + Version.CURRENT::onOrAfter) == false) { + putTemplate(customAuditIndexSettings(settings, logger), + e -> { + logger.error("failed to put audit trail template", e); + transitionStartingToInitialized(); + }); + } else { + // for some reason we can't start up since the remote cluster is not fully setup. in this case + // we try to wait for yellow status (all primaries started up) this will also wait for + // state recovery etc. + String indexName = getIndexName(); + // if this index doesn't exists the call will fail with a not_found exception... + client.admin().cluster().prepareHealth().setIndices(indexName).setWaitForYellowStatus().execute( + ActionListener.wrap( + (x) -> { + logger.debug("have yellow status on remote index [{}] ", indexName); + transitionStartingToInitialized(); + start(); + }, + (e) -> { + logger.error("failed to get wait for yellow status on remote index [" + indexName + "]", e); + transitionStartingToInitialized(); + })); + } + } + + @Override + public void onFailure(Exception e) { + transitionStartingToInitialized(); + logger.error("failed to get remote cluster state", e); + } + }); + } else { + updateCurrentIndexMappingsIfNecessary(clusterService.state()); + } + } + } + + // pkg private for tests + void updateCurrentIndexMappingsIfNecessary(ClusterState state) { + final String index = getIndexName(); + + AliasOrIndex aliasOrIndex = state.getMetaData().getAliasAndIndexLookup().get(index); + if (aliasOrIndex != null) { + // check mappings + final List indices = aliasOrIndex.getIndices(); + if (aliasOrIndex.isAlias() && indices.size() > 1) { + throw new IllegalStateException("Alias [" + index + "] points to more than one index: " + + indices.stream().map(imd -> imd.getIndex().getName()).collect(Collectors.toList())); + } + IndexMetaData indexMetaData = indices.get(0); + MappingMetaData docMapping = indexMetaData.mapping("doc"); + if (docMapping == null) { + if (indexToRemoteCluster || state.nodes().isLocalNodeElectedMaster()) { + putAuditIndexMappingsAndStart(index); + } else { + logger.trace("audit index [{}] is missing mapping for type [{}]", index, DOC_TYPE); + transitionStartingToInitialized(); + } + } else { + @SuppressWarnings("unchecked") + Map meta = (Map) docMapping.sourceAsMap().get("_meta"); + if (meta == null) { + logger.info("Missing _meta field in mapping [{}] of index [{}]", docMapping.type(), index); + throw new IllegalStateException("Cannot read security-version string in index " + index); + } + + final String versionString = (String) meta.get(SECURITY_VERSION_STRING); + if (versionString != null && Version.fromString(versionString).onOrAfter(Version.CURRENT)) { + innerStart(); + } else { + if (indexToRemoteCluster || state.nodes().isLocalNodeElectedMaster()) { + putAuditIndexMappingsAndStart(index); + } else if (versionString == null) { + logger.trace("audit index [{}] mapping is missing meta field [{}]", index, SECURITY_VERSION_STRING); + transitionStartingToInitialized(); + } else { + logger.trace("audit index [{}] has the incorrect version [{}]", index, versionString); + transitionStartingToInitialized(); + } + } + } + } else { + innerStart(); + } + } + + private void putAuditIndexMappingsAndStart(String index) { + putAuditIndexMappings(index, getPutIndexTemplateRequest(Settings.EMPTY).mappings().get(DOC_TYPE), + ActionListener.wrap(ignore -> { + logger.trace("updated mappings on audit index [{}]", index); + innerStart(); + }, e -> { + logger.error(new ParameterizedMessage("failed to update mappings on audit index [{}]", index), e); + transitionStartingToInitialized(); // reset to initialized so we can retry + })); + } + + private void transitionStartingToInitialized() { + if (state.compareAndSet(State.STARTING, State.INITIALIZED) == false) { + final String message = "state transition from starting to initialized failed, current value: " + state.get(); + assert false : message; + logger.error(message); + } + } + + void innerStart() { + initializeBulkProcessor(); + queueConsumer.start(); + if (state.compareAndSet(State.STARTING, State.STARTED) == false) { + final String message = "state transition from starting to started failed, current value: " + state.get(); + assert false : message; + logger.error(message); + } else { + logger.trace("successful state transition from starting to started, current value: [{}]", state.get()); + } + } + + public synchronized void stop() { + if (state.compareAndSet(State.STARTED, State.STOPPING)) { + queueConsumer.close(); + } + + if (state() != State.STOPPED) { + try { + if (bulkProcessor != null) { + if (bulkProcessor.awaitClose(10, TimeUnit.SECONDS) == false) { + logger.warn("index audit trail failed to store all pending events after waiting for 10s"); + } + } + } catch (InterruptedException exc) { + Thread.currentThread().interrupt(); + } finally { + if (indexToRemoteCluster) { + client.close(); + } + state.set(State.STOPPED); + } + } + } + + @Override + public void authenticationSuccess(String realm, User user, RestRequest request) { + if (events.contains(AUTHENTICATION_SUCCESS)) { + try { + enqueue(message("authentication_success", new Tuple<>(realm, realm), user, null, request), "authentication_success"); + } catch (final Exception e) { + logger.warn("failed to index audit event: [authentication_success]", e); + } + } + } + + @Override + public void authenticationSuccess(String realm, User user, String action, TransportMessage message) { + if (events.contains(AUTHENTICATION_SUCCESS)) { + try { + enqueue(message("authentication_success", action, user, null, new Tuple<>(realm, realm), null, message), + "authentication_success"); + } catch (final Exception e) { + logger.warn("failed to index audit event: [authentication_success]", e); + } + } + } + + @Override + public void anonymousAccessDenied(String action, TransportMessage message) { + if (events.contains(ANONYMOUS_ACCESS_DENIED)) { + try { + enqueue(message("anonymous_access_denied", action, (User) null, null, null, indices(message), message), + "anonymous_access_denied"); + } catch (Exception e) { + logger.warn("failed to index audit event: [anonymous_access_denied]", e); + } + } + } + + @Override + public void anonymousAccessDenied(RestRequest request) { + if (events.contains(ANONYMOUS_ACCESS_DENIED)) { + try { + enqueue(message("anonymous_access_denied", null, null, null, null, request), "anonymous_access_denied"); + } catch (Exception e) { + logger.warn("failed to index audit event: [anonymous_access_denied]", e); + } + } + } + + @Override + public void authenticationFailed(String action, TransportMessage message) { + if (events.contains(AUTHENTICATION_FAILED)) { + try { + enqueue(message("authentication_failed", action, (User) null, null, null, indices(message), message), + "authentication_failed"); + } catch (Exception e) { + logger.warn("failed to index audit event: [authentication_failed]", e); + } + } + } + + @Override + public void authenticationFailed(RestRequest request) { + if (events.contains(AUTHENTICATION_FAILED)) { + try { + enqueue(message("authentication_failed", null, null, null, null, request), "authentication_failed"); + } catch (Exception e) { + logger.warn("failed to index audit event: [authentication_failed]", e); + } + } + } + + @Override + public void authenticationFailed(AuthenticationToken token, String action, TransportMessage message) { + if (events.contains(AUTHENTICATION_FAILED)) { + if (XPackUser.is(token.principal()) == false) { + try { + enqueue(message("authentication_failed", action, token, null, indices(message), message), "authentication_failed"); + } catch (Exception e) { + logger.warn("failed to index audit event: [authentication_failed]", e); + } + } + } + } + + @Override + public void authenticationFailed(AuthenticationToken token, RestRequest request) { + if (events.contains(AUTHENTICATION_FAILED)) { + if (XPackUser.is(token.principal()) == false) { + try { + enqueue(message("authentication_failed", null, token, null, null, request), "authentication_failed"); + } catch (Exception e) { + logger.warn("failed to index audit event: [authentication_failed]", e); + } + } + } + } + + @Override + public void authenticationFailed(String realm, AuthenticationToken token, String action, TransportMessage message) { + if (events.contains(REALM_AUTHENTICATION_FAILED)) { + if (XPackUser.is(token.principal()) == false) { + try { + enqueue(message("realm_authentication_failed", action, token, realm, indices(message), message), + "realm_authentication_failed"); + } catch (Exception e) { + logger.warn("failed to index audit event: [authentication_failed]", e); + } + } + } + } + + @Override + public void authenticationFailed(String realm, AuthenticationToken token, RestRequest request) { + if (events.contains(REALM_AUTHENTICATION_FAILED)) { + if (XPackUser.is(token.principal()) == false) { + try { + enqueue(message("realm_authentication_failed", null, token, realm, null, request), "realm_authentication_failed"); + } catch (Exception e) { + logger.warn("failed to index audit event: [authentication_failed]", e); + } + } + } + } + + @Override + public void accessGranted(Authentication authentication, String action, TransportMessage message, String[] roleNames) { + final User user = authentication.getUser(); + final boolean isSystem = SystemUser.is(user) || XPackUser.is(user); + final boolean logSystemAccessGranted = isSystem && events.contains(SYSTEM_ACCESS_GRANTED); + final boolean shouldLog = logSystemAccessGranted || (isSystem == false && events.contains(ACCESS_GRANTED)); + if (shouldLog) { + try { + assert authentication.getAuthenticatedBy() != null; + final String authRealmName = authentication.getAuthenticatedBy().getName(); + final String lookRealmName = authentication.getLookedUpBy() == null ? null : authentication.getLookedUpBy().getName(); + enqueue(message("access_granted", action, user, roleNames, new Tuple(authRealmName, lookRealmName), indices(message), + message), "access_granted"); + } catch (final Exception e) { + logger.warn("failed to index audit event: [access_granted]", e); + } + } + } + + @Override + public void accessDenied(Authentication authentication, String action, TransportMessage message, String[] roleNames) { + if (events.contains(ACCESS_DENIED) && (XPackUser.is(authentication.getUser()) == false)) { + try { + assert authentication.getAuthenticatedBy() != null; + final String authRealmName = authentication.getAuthenticatedBy().getName(); + final String lookRealmName = authentication.getLookedUpBy() == null ? null : authentication.getLookedUpBy().getName(); + enqueue(message("access_denied", action, authentication.getUser(), roleNames, new Tuple(authRealmName, lookRealmName), + indices(message), message), "access_denied"); + } catch (final Exception e) { + logger.warn("failed to index audit event: [access_denied]", e); + } + } + } + + @Override + public void tamperedRequest(RestRequest request) { + if (events.contains(TAMPERED_REQUEST)) { + try { + enqueue(message("tampered_request", null, null, null, null, request), "tampered_request"); + } catch (Exception e) { + logger.warn("failed to index audit event: [tampered_request]", e); + } + } + } + + @Override + public void tamperedRequest(String action, TransportMessage message) { + if (events.contains(TAMPERED_REQUEST)) { + try { + enqueue(message("tampered_request", action, (User) null, null, null, indices(message), message), "tampered_request"); + } catch (Exception e) { + logger.warn("failed to index audit event: [tampered_request]", e); + } + } + } + + @Override + public void tamperedRequest(User user, String action, TransportMessage request) { + if (events.contains(TAMPERED_REQUEST) && XPackUser.is(user) == false) { + try { + enqueue(message("tampered_request", action, user, null, null, indices(request), request), "tampered_request"); + } catch (Exception e) { + logger.warn("failed to index audit event: [tampered_request]", e); + } + } + } + + @Override + public void connectionGranted(InetAddress inetAddress, String profile, SecurityIpFilterRule rule) { + if (events.contains(CONNECTION_GRANTED)) { + try { + enqueue(message("ip_filter", "connection_granted", inetAddress, profile, rule), "connection_granted"); + } catch (Exception e) { + logger.warn("failed to index audit event: [connection_granted]", e); + } + } + } + + @Override + public void connectionDenied(InetAddress inetAddress, String profile, SecurityIpFilterRule rule) { + if (events.contains(CONNECTION_DENIED)) { + try { + enqueue(message("ip_filter", "connection_denied", inetAddress, profile, rule), "connection_denied"); + } catch (Exception e) { + logger.warn("failed to index audit event: [connection_denied]", e); + } + } + } + + @Override + public void runAsGranted(Authentication authentication, String action, TransportMessage message, String[] roleNames) { + if (events.contains(RUN_AS_GRANTED)) { + try { + assert authentication.getAuthenticatedBy() != null; + final String authRealmName = authentication.getAuthenticatedBy().getName(); + final String lookRealmName = authentication.getLookedUpBy() == null ? null : authentication.getLookedUpBy().getName(); + enqueue(message("run_as_granted", action, authentication.getUser(), roleNames, new Tuple<>(authRealmName, lookRealmName), + null, message), "run_as_granted"); + } catch (final Exception e) { + logger.warn("failed to index audit event: [run_as_granted]", e); + } + } + } + + @Override + public void runAsDenied(Authentication authentication, String action, TransportMessage message, String[] roleNames) { + if (events.contains(RUN_AS_DENIED)) { + try { + assert authentication.getAuthenticatedBy() != null; + final String authRealmName = authentication.getAuthenticatedBy().getName(); + final String lookRealmName = authentication.getLookedUpBy() == null ? null : authentication.getLookedUpBy().getName(); + enqueue(message("run_as_denied", action, authentication.getUser(), roleNames, new Tuple<>(authRealmName, lookRealmName), + null, message), "run_as_denied"); + } catch (final Exception e) { + logger.warn("failed to index audit event: [run_as_denied]", e); + } + } + } + + @Override + public void runAsDenied(Authentication authentication, RestRequest request, String[] roleNames) { + if (events.contains(RUN_AS_DENIED)) { + try { + assert authentication.getAuthenticatedBy() != null; + final String authRealmName = authentication.getAuthenticatedBy().getName(); + final String lookRealmName = authentication.getLookedUpBy() == null ? null : authentication.getLookedUpBy().getName(); + enqueue(message("run_as_denied", new Tuple<>(authRealmName, lookRealmName), authentication.getUser(), roleNames, request), + "run_as_denied"); + } catch (final Exception e) { + logger.warn("failed to index audit event: [run_as_denied]", e); + } + } + } + + private Message message(String type, @Nullable String action, @Nullable User user, @Nullable String[] roleNames, + @Nullable Tuple realms, @Nullable Set indices, TransportMessage message) + throws Exception { + + Message msg = new Message().start(); + common("transport", type, msg.builder); + originAttributes(message, msg.builder, clusterService.localNode(), threadPool.getThreadContext()); + + if (action != null) { + msg.builder.field(Field.ACTION, action); + } + addUserAndRealmFields(msg.builder, type, user, realms); + if (roleNames != null) { + msg.builder.array(Field.ROLE_NAMES, roleNames); + } + if (indices != null) { + msg.builder.array(Field.INDICES, indices.toArray(Strings.EMPTY_ARRAY)); + } + msg.builder.field(Field.REQUEST, message.getClass().getSimpleName()); + + return msg.end(); + } + + private void addUserAndRealmFields(XContentBuilder builder, String type, @Nullable User user, @Nullable Tuple realms) + throws IOException { + if (user != null) { + if (user.isRunAs()) { + if ("run_as_granted".equals(type) || "run_as_denied".equals(type)) { + builder.field(Field.PRINCIPAL, user.authenticatedUser().principal()); + builder.field(Field.RUN_AS_PRINCIPAL, user.principal()); + if (realms != null) { + // realms.v1() is the authenticating realm + builder.field(Field.REALM, realms.v1()); + // realms.v2() is the lookup realm + builder.field(Field.RUN_AS_REALM, realms.v2()); + } + } else { + // TODO: this doesn't make sense... + builder.field(Field.PRINCIPAL, user.principal()); + builder.field(Field.RUN_BY_PRINCIPAL, user.authenticatedUser().principal()); + if (realms != null) { + // realms.v2() is the lookup realm + builder.field(Field.REALM, realms.v2()); + // realms.v1() is the authenticating realm + builder.field(Field.RUN_BY_REALM, realms.v1()); + } + } + } else { + builder.field(Field.PRINCIPAL, user.principal()); + if (realms != null) { + // realms.v1() is the authenticating realm + builder.field(Field.REALM, realms.v1()); + } + } + } + } + + // FIXME - clean up the message generation + private Message message(String type, @Nullable String action, @Nullable AuthenticationToken token, + @Nullable String realm, @Nullable Set indices, TransportMessage message) throws Exception { + + Message msg = new Message().start(); + common("transport", type, msg.builder); + originAttributes(message, msg.builder, clusterService.localNode(), threadPool.getThreadContext()); + + if (action != null) { + msg.builder.field(Field.ACTION, action); + } + if (token != null) { + msg.builder.field(Field.PRINCIPAL, token.principal()); + } + if (realm != null) { + msg.builder.field(Field.REALM, realm); + } + if (indices != null) { + msg.builder.array(Field.INDICES, indices.toArray(Strings.EMPTY_ARRAY)); + } + msg.builder.field(Field.REQUEST, message.getClass().getSimpleName()); + + return msg.end(); + } + + private Message message(String type, @Nullable String action, @Nullable AuthenticationToken token, + @Nullable String realm, @Nullable Set indices, RestRequest request) throws Exception { + + Message msg = new Message().start(); + common("rest", type, msg.builder); + + if (action != null) { + msg.builder.field(Field.ACTION, action); + } + + if (token != null) { + msg.builder.field(Field.PRINCIPAL, token.principal()); + } + + if (realm != null) { + msg.builder.field(Field.REALM, realm); + } + if (indices != null) { + msg.builder.array(Field.INDICES, indices.toArray(Strings.EMPTY_ARRAY)); + } + if (includeRequestBody) { + msg.builder.field(Field.REQUEST_BODY, restRequestContent(request)); + } + msg.builder.field(Field.ORIGIN_TYPE, "rest"); + SocketAddress address = request.getRemoteAddress(); + if (address instanceof InetSocketAddress) { + msg.builder.field(Field.ORIGIN_ADDRESS, NetworkAddress.format(((InetSocketAddress) request.getRemoteAddress()) + .getAddress())); + } else { + msg.builder.field(Field.ORIGIN_ADDRESS, address); + } + msg.builder.field(Field.URI, request.uri()); + return msg.end(); + } + + private Message message(String type, @Nullable Tuple realms, @Nullable User user, @Nullable String[] roleNames, + RestRequest request) throws Exception { + + Message msg = new Message().start(); + common("rest", type, msg.builder); + + addUserAndRealmFields(msg.builder, type, user, realms); + if (roleNames != null) { + msg.builder.array(Field.ROLE_NAMES, roleNames); + } + if (includeRequestBody) { + msg.builder.field(Field.REQUEST_BODY, restRequestContent(request)); + } + msg.builder.field(Field.ORIGIN_TYPE, "rest"); + SocketAddress address = request.getRemoteAddress(); + if (address instanceof InetSocketAddress) { + msg.builder.field(Field.ORIGIN_ADDRESS, NetworkAddress.format(((InetSocketAddress) request.getRemoteAddress()) + .getAddress())); + } else { + msg.builder.field(Field.ORIGIN_ADDRESS, address); + } + msg.builder.field(Field.URI, request.uri()); + + return msg.end(); + } + + private Message message(String layer, String type, InetAddress originAddress, String profile, + SecurityIpFilterRule rule) throws IOException { + + Message msg = new Message().start(); + common(layer, type, msg.builder); + + msg.builder.field(Field.ORIGIN_ADDRESS, NetworkAddress.format(originAddress)); + msg.builder.field(Field.TRANSPORT_PROFILE, profile); + msg.builder.field(Field.RULE, rule); + + return msg.end(); + } + + private XContentBuilder common(String layer, String type, XContentBuilder builder) throws IOException { + builder.field(Field.NODE_NAME, nodeName); + builder.field(Field.NODE_HOST_NAME, nodeHostName); + builder.field(Field.NODE_HOST_ADDRESS, nodeHostAddress); + builder.field(Field.LAYER, layer); + builder.field(Field.TYPE, type); + return builder; + } + + private static XContentBuilder originAttributes(TransportMessage message, XContentBuilder builder, + DiscoveryNode localNode, ThreadContext threadContext) throws IOException { + + // first checking if the message originated in a rest call + InetSocketAddress restAddress = RemoteHostHeader.restRemoteAddress(threadContext); + if (restAddress != null) { + builder.field(Field.ORIGIN_TYPE, "rest"); + builder.field(Field.ORIGIN_ADDRESS, NetworkAddress.format(restAddress.getAddress())); + return builder; + } + + // we'll see if was originated in a remote node + TransportAddress address = message.remoteAddress(); + if (address != null) { + builder.field(Field.ORIGIN_TYPE, "transport"); + builder.field(Field.ORIGIN_ADDRESS, + NetworkAddress.format(address.address().getAddress())); + return builder; + } + + // the call was originated locally on this node + builder.field(Field.ORIGIN_TYPE, "local_node"); + builder.field(Field.ORIGIN_ADDRESS, localNode.getHostAddress()); + return builder; + } + + void enqueue(Message message, String type) { + State currentState = state(); + if (currentState != State.STOPPING && currentState != State.STOPPED) { + boolean accepted = queueConsumer.offer(message); + if (!accepted) { + logger.warn("failed to index audit event: [{}]. internal queue is full, which may be caused by a high indexing rate or " + + "issue with the destination", type); + } + } + } + + // for testing to ensure we get the proper timestamp and index name... + Message peek() { + return queueConsumer.peek(); + } + + Client initializeRemoteClient(Settings settings, Logger logger) { + Settings clientSettings = REMOTE_CLIENT_SETTINGS.get(settings); + List hosts = clientSettings.getAsList("hosts"); + if (hosts.isEmpty()) { + throw new ElasticsearchException("missing required setting " + + "[" + REMOTE_CLIENT_SETTINGS.getKey() + ".hosts] for remote audit log indexing"); + } + + final int processors = EsExecutors.PROCESSORS_SETTING.get(settings); + if (EsExecutors.PROCESSORS_SETTING.exists(clientSettings)) { + final int clientProcessors = EsExecutors.PROCESSORS_SETTING.get(clientSettings); + if (clientProcessors != processors) { + final String message = String.format( + Locale.ROOT, + "explicit processor setting [%d] for audit trail remote client does not match inherited processor setting [%d]", + clientProcessors, + processors); + throw new IllegalStateException(message); + } + } + + if (clientSettings.get("cluster.name", "").isEmpty()) { + throw new ElasticsearchException("missing required setting " + + "[" + REMOTE_CLIENT_SETTINGS.getKey() + ".cluster.name] for remote audit log indexing"); + } + + List> hostPortPairs = new ArrayList<>(); + + for (String host : hosts) { + List hostPort = Arrays.asList(host.trim().split(":")); + if (hostPort.size() != 1 && hostPort.size() != 2) { + logger.warn("invalid host:port specified: [{}] for setting [{}.hosts]", REMOTE_CLIENT_SETTINGS.getKey(), host); + } + hostPortPairs.add(new Tuple<>(hostPort.get(0), hostPort.size() == 2 ? Integer.valueOf(hostPort.get(1)) : 9300)); + } + + if (hostPortPairs.size() == 0) { + throw new ElasticsearchException("no valid host:port pairs specified for setting [" + + REMOTE_CLIENT_SETTINGS.getKey() + ".hosts]"); + } + final Settings theClientSetting = + Settings.builder() + .put(clientSettings.filter((s) -> s.startsWith("hosts") == false)) // hosts is not a valid setting + .put(EsExecutors.PROCESSORS_SETTING.getKey(), processors) + .build(); + final TransportClient transportClient = new TransportClient(Settings.builder() + .put("node.name", DEFAULT_CLIENT_NAME + "-" + Node.NODE_NAME_SETTING.get(settings)) + .put(theClientSetting).build(), Settings.EMPTY, remoteTransportClientPlugins(), null) {}; + for (Tuple pair : hostPortPairs) { + try { + transportClient.addTransportAddress(new TransportAddress(InetAddress.getByName(pair.v1()), pair.v2())); + } catch (UnknownHostException e) { + throw new ElasticsearchException("could not find host {}", e, pair.v1()); + } + } + + logger.info("forwarding audit events to remote cluster [{}] using hosts [{}]", + clientSettings.get("cluster.name", ""), hostPortPairs.toString()); + return transportClient; + } + + public static Settings customAuditIndexSettings(Settings nodeSettings, Logger logger) { + Settings newSettings = Settings.builder() + .put(INDEX_SETTINGS.get(nodeSettings), false) + .build(); + if (newSettings.names().isEmpty()) { + return Settings.EMPTY; + } + + // Filter out forbidden settings: + Settings.Builder builder = Settings.builder(); + builder.put(newSettings.filter(k -> { + String name = "index." + k; + if (FORBIDDEN_INDEX_SETTING.equals(name)) { + logger.warn("overriding the default [{}} setting is forbidden. ignoring...", name); + return false; + } + return true; + })); + return builder.build(); + } + + private void putTemplate(Settings customSettings, Consumer consumer) { + try { + final PutIndexTemplateRequest request = getPutIndexTemplateRequest(customSettings); + + client.admin().indices().putTemplate(request, ActionListener.wrap((response) -> { + if (response.isAcknowledged()) { + // now we may need to update the mappings of the current index + client.admin().cluster().prepareState().execute(ActionListener.wrap( + stateResponse -> updateCurrentIndexMappingsIfNecessary(stateResponse.getState()), + consumer)); + } else { + consumer.accept(new IllegalStateException("failed to put index template for audit logging")); + } + }, consumer)); + } catch (Exception e) { + logger.debug("unexpected exception while putting index template", e); + consumer.accept(e); + } + } + + private PutIndexTemplateRequest getPutIndexTemplateRequest(Settings customSettings) { + final byte[] template = TemplateUtils.loadTemplate("/" + INDEX_TEMPLATE_NAME + ".json", + Version.CURRENT.toString(), IndexLifecycleManager.TEMPLATE_VERSION_PATTERN).getBytes(StandardCharsets.UTF_8); + final PutIndexTemplateRequest request = new PutIndexTemplateRequest(INDEX_TEMPLATE_NAME).source(template, XContentType.JSON); + if (customSettings != null && customSettings.names().size() > 0) { + Settings updatedSettings = Settings.builder() + .put(request.settings()) + .put(customSettings) + .build(); + request.settings(updatedSettings); + } + return request; + } + + private void putAuditIndexMappings(String index, String mappings, ActionListener listener) { + client.admin().indices().preparePutMapping(index) + .setType(DOC_TYPE) + .setSource(mappings, XContentType.JSON) + .execute(ActionListener.wrap((response) -> { + if (response.isAcknowledged()) { + listener.onResponse(null); + } else { + listener.onFailure(new IllegalStateException("failed to put mappings for audit logging index [" + index + "]")); + } + }, + listener::onFailure)); + } + + BlockingQueue createQueue(int maxQueueSize) { + return new LinkedBlockingQueue<>(maxQueueSize); + } + + private void initializeBulkProcessor() { + + final int bulkSize = BULK_SIZE_SETTING.get(settings); + final TimeValue interval = FLUSH_TIMEOUT_SETTING.get(settings); + + bulkProcessor = BulkProcessor.builder(client, new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + if (response.hasFailures()) { + logger.info("failed to bulk index audit events: [{}]", response.buildFailureMessage()); + } + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + logger.error(new ParameterizedMessage("failed to bulk index audit events: [{}]", failure.getMessage()), failure); + } + }).setBulkActions(bulkSize) + .setFlushInterval(interval) + .setConcurrentRequests(1) + .build(); + } + + // method for testing to allow different plugins such as mock transport... + List> remoteTransportClientPlugins() { + return Arrays.asList(XPackClientPlugin.class); + } + + public static void registerSettings(List> settings) { + settings.add(INDEX_SETTINGS); + settings.add(EXCLUDE_EVENT_SETTINGS); + settings.add(INCLUDE_EVENT_SETTINGS); + settings.add(ROLLOVER_SETTING); + settings.add(BULK_SIZE_SETTING); + settings.add(FLUSH_TIMEOUT_SETTING); + settings.add(QUEUE_SIZE_SETTING); + settings.add(REMOTE_CLIENT_SETTINGS); + settings.add(INCLUDE_REQUEST_BODY); + } + + private final class QueueConsumer extends Thread implements Closeable { + private final AtomicBoolean open = new AtomicBoolean(true); + private final BlockingQueue eventQueue; + private final Message shutdownSentinelMessage; + + QueueConsumer(String name, BlockingQueue eventQueue) { + super(name); + this.eventQueue = eventQueue; + try { + shutdownSentinelMessage = new Message(); + } catch (IOException e) { + throw new AssertionError(e); + } + } + + @Override + public void close() { + if (open.compareAndSet(true, false)) { + try { + eventQueue.put(shutdownSentinelMessage); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + } + + @Override + public void run() { + while (open.get()) { + try { + final Message message = eventQueue.take(); + if (message == shutdownSentinelMessage || open.get() == false) { + break; + } + final IndexRequest indexRequest = client.prepareIndex() + .setIndex(resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, message.timestamp, rollover)) + .setType(DOC_TYPE).setSource(message.builder).request(); + bulkProcessor.add(indexRequest); + } catch (InterruptedException e) { + logger.debug("index audit queue consumer interrupted", e); + close(); + break; + } catch (Exception e) { + // log the exception and keep going + logger.warn("failed to index audit message from queue", e); + } + } + eventQueue.clear(); + } + + public boolean offer(Message message) { + if (open.get()) { + return eventQueue.offer(message); + } + return false; + } + + public Message peek() { + return eventQueue.peek(); + } + } + + static class Message { + + final DateTime timestamp; + final XContentBuilder builder; + + Message() throws IOException { + this.timestamp = DateTime.now(DateTimeZone.UTC); + this.builder = XContentFactory.jsonBuilder(); + } + + Message start() throws IOException { + builder.startObject(); + builder.timeField(Field.TIMESTAMP, timestamp); + return this; + } + + Message end() throws IOException { + builder.endObject(); + return this; + } + } + + interface Field { + String TIMESTAMP = "@timestamp"; + String NODE_NAME = "node_name"; + String NODE_HOST_NAME = "node_host_name"; + String NODE_HOST_ADDRESS = "node_host_address"; + String LAYER = "layer"; + String TYPE = "event_type"; + String ORIGIN_ADDRESS = "origin_address"; + String ORIGIN_TYPE = "origin_type"; + String PRINCIPAL = "principal"; + String ROLE_NAMES = "roles"; + String RUN_AS_PRINCIPAL = "run_as_principal"; + String RUN_AS_REALM = "run_as_realm"; + String RUN_BY_PRINCIPAL = "run_by_principal"; + String RUN_BY_REALM = "run_by_realm"; + String ACTION = "action"; + String INDICES = "indices"; + String REQUEST = "request"; + String REQUEST_BODY = "request_body"; + String URI = "uri"; + String REALM = "realm"; + String TRANSPORT_PROFILE = "transport_profile"; + String RULE = "rule"; + } + + public enum State { + INITIALIZED, + STARTING, + STARTED, + STOPPING, + STOPPED + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexNameResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexNameResolver.java new file mode 100644 index 0000000000000..5a65bf813054a --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexNameResolver.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit.index; + +import org.joda.time.DateTime; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; + +public class IndexNameResolver { + + public enum Rollover { + HOURLY ("-yyyy.MM.dd.HH"), + DAILY ("-yyyy.MM.dd"), + WEEKLY ("-yyyy.w"), + MONTHLY ("-yyyy.MM"); + + private final DateTimeFormatter formatter; + + Rollover(String format) { + this.formatter = DateTimeFormat.forPattern(format); + } + + DateTimeFormatter formatter() { + return formatter; + } + } + + private IndexNameResolver() {} + + public static String resolve(DateTime timestamp, Rollover rollover) { + return rollover.formatter().print(timestamp); + } + + public static String resolve(String indexNamePrefix, DateTime timestamp, Rollover rollover) { + return indexNamePrefix + resolve(timestamp, rollover); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java new file mode 100644 index 0000000000000..3b9a42179a577 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -0,0 +1,857 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit.logfile; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.node.Node; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.support.Automatons; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.audit.AuditLevel; +import org.elasticsearch.xpack.security.audit.AuditTrail; +import org.elasticsearch.xpack.security.rest.RemoteHostHeader; +import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.common.Strings.arrayToCommaDelimitedString; +import static org.elasticsearch.xpack.core.security.SecurityField.setting; +import static org.elasticsearch.xpack.security.audit.AuditLevel.ACCESS_DENIED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.ACCESS_GRANTED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.ANONYMOUS_ACCESS_DENIED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.AUTHENTICATION_FAILED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.AUTHENTICATION_SUCCESS; +import static org.elasticsearch.xpack.security.audit.AuditLevel.CONNECTION_DENIED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.CONNECTION_GRANTED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.REALM_AUTHENTICATION_FAILED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.RUN_AS_DENIED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.RUN_AS_GRANTED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.SYSTEM_ACCESS_GRANTED; +import static org.elasticsearch.xpack.security.audit.AuditLevel.TAMPERED_REQUEST; +import static org.elasticsearch.xpack.security.audit.AuditLevel.parse; +import static org.elasticsearch.xpack.security.audit.AuditUtil.restRequestContent; + +public class LoggingAuditTrail extends AbstractComponent implements AuditTrail, ClusterStateListener { + + public static final String NAME = "logfile"; + public static final Setting HOST_ADDRESS_SETTING = + Setting.boolSetting(setting("audit.logfile.prefix.emit_node_host_address"), false, Property.NodeScope, Property.Dynamic); + public static final Setting HOST_NAME_SETTING = + Setting.boolSetting(setting("audit.logfile.prefix.emit_node_host_name"), false, Property.NodeScope, Property.Dynamic); + public static final Setting NODE_NAME_SETTING = + Setting.boolSetting(setting("audit.logfile.prefix.emit_node_name"), true, Property.NodeScope, Property.Dynamic); + private static final List DEFAULT_EVENT_INCLUDES = Arrays.asList( + ACCESS_DENIED.toString(), + ACCESS_GRANTED.toString(), + ANONYMOUS_ACCESS_DENIED.toString(), + AUTHENTICATION_FAILED.toString(), + CONNECTION_DENIED.toString(), + TAMPERED_REQUEST.toString(), + RUN_AS_DENIED.toString(), + RUN_AS_GRANTED.toString() + ); + public static final Setting> INCLUDE_EVENT_SETTINGS = + Setting.listSetting(setting("audit.logfile.events.include"), DEFAULT_EVENT_INCLUDES, Function.identity(), Property.NodeScope, + Property.Dynamic); + public static final Setting> EXCLUDE_EVENT_SETTINGS = + Setting.listSetting(setting("audit.logfile.events.exclude"), Collections.emptyList(), Function.identity(), Property.NodeScope, + Property.Dynamic); + public static final Setting INCLUDE_REQUEST_BODY = + Setting.boolSetting(setting("audit.logfile.events.emit_request_body"), false, Property.NodeScope, Property.Dynamic); + private static final String FILTER_POLICY_PREFIX = setting("audit.logfile.events.ignore_filters."); + // because of the default wildcard value (*) for the field filter, a policy with + // an unspecified filter field will match events that have any value for that + // particular field, as well as events with that particular field missing + private static final Setting.AffixSetting> FILTER_POLICY_IGNORE_PRINCIPALS = + Setting.affixKeySetting(FILTER_POLICY_PREFIX, "users", (key) -> Setting.listSetting(key, Collections.singletonList("*"), + Function.identity(), Property.NodeScope, Property.Dynamic)); + private static final Setting.AffixSetting> FILTER_POLICY_IGNORE_REALMS = + Setting.affixKeySetting(FILTER_POLICY_PREFIX, "realms", (key) -> Setting.listSetting(key, Collections.singletonList("*"), + Function.identity(), Property.NodeScope, Property.Dynamic)); + private static final Setting.AffixSetting> FILTER_POLICY_IGNORE_ROLES = + Setting.affixKeySetting(FILTER_POLICY_PREFIX, "roles", (key) -> Setting.listSetting(key, Collections.singletonList("*"), + Function.identity(), Property.NodeScope, Property.Dynamic)); + private static final Setting.AffixSetting> FILTER_POLICY_IGNORE_INDICES = + Setting.affixKeySetting(FILTER_POLICY_PREFIX, "indices", (key) -> Setting.listSetting(key, Collections.singletonList("*"), + Function.identity(), Property.NodeScope, Property.Dynamic)); + + private final Logger logger; + final EventFilterPolicyRegistry eventFilterPolicyRegistry; + private final ThreadContext threadContext; + // package for testing + volatile EnumSet events; + boolean includeRequestBody; + LocalNodeInfo localNodeInfo; + + @Override + public String name() { + return NAME; + } + + public LoggingAuditTrail(Settings settings, ClusterService clusterService, ThreadPool threadPool) { + this(settings, clusterService, Loggers.getLogger(LoggingAuditTrail.class), threadPool.getThreadContext()); + } + + LoggingAuditTrail(Settings settings, ClusterService clusterService, Logger logger, ThreadContext threadContext) { + super(settings); + this.logger = logger; + this.events = parse(INCLUDE_EVENT_SETTINGS.get(settings), EXCLUDE_EVENT_SETTINGS.get(settings)); + this.includeRequestBody = INCLUDE_REQUEST_BODY.get(settings); + this.threadContext = threadContext; + this.localNodeInfo = new LocalNodeInfo(settings, null); + this.eventFilterPolicyRegistry = new EventFilterPolicyRegistry(settings); + clusterService.addListener(this); + clusterService.getClusterSettings().addSettingsUpdateConsumer(newSettings -> { + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + final Settings.Builder builder = Settings.builder().put(localNodeInfo.settings).put(newSettings, false); + this.localNodeInfo = new LocalNodeInfo(builder.build(), localNodeInfo.localNode); + this.includeRequestBody = INCLUDE_REQUEST_BODY.get(newSettings); + // `events` is a volatile field! Keep `events` write last so that + // `localNodeInfo` and `includeRequestBody` writes happen-before! `events` is + // always read before `localNodeInfo` and `includeRequestBody`. + this.events = parse(INCLUDE_EVENT_SETTINGS.get(newSettings), EXCLUDE_EVENT_SETTINGS.get(newSettings)); + }, Arrays.asList(HOST_ADDRESS_SETTING, HOST_NAME_SETTING, NODE_NAME_SETTING, INCLUDE_EVENT_SETTINGS, EXCLUDE_EVENT_SETTINGS, + INCLUDE_REQUEST_BODY)); + clusterService.getClusterSettings().addAffixUpdateConsumer(FILTER_POLICY_IGNORE_PRINCIPALS, (policyName, filtersList) -> { + final Optional policy = eventFilterPolicyRegistry.get(policyName); + final EventFilterPolicy newPolicy = policy.orElse(new EventFilterPolicy(policyName, settings)) + .changePrincipalsFilter(filtersList); + this.eventFilterPolicyRegistry.set(policyName, newPolicy); + }, (policyName, filtersList) -> EventFilterPolicy.parsePredicate(filtersList)); + clusterService.getClusterSettings().addAffixUpdateConsumer(FILTER_POLICY_IGNORE_REALMS, (policyName, filtersList) -> { + final Optional policy = eventFilterPolicyRegistry.get(policyName); + final EventFilterPolicy newPolicy = policy.orElse(new EventFilterPolicy(policyName, settings)) + .changeRealmsFilter(filtersList); + this.eventFilterPolicyRegistry.set(policyName, newPolicy); + }, (policyName, filtersList) -> EventFilterPolicy.parsePredicate(filtersList)); + clusterService.getClusterSettings().addAffixUpdateConsumer(FILTER_POLICY_IGNORE_ROLES, (policyName, filtersList) -> { + final Optional policy = eventFilterPolicyRegistry.get(policyName); + final EventFilterPolicy newPolicy = policy.orElse(new EventFilterPolicy(policyName, settings)) + .changeRolesFilter(filtersList); + this.eventFilterPolicyRegistry.set(policyName, newPolicy); + }, (policyName, filtersList) -> EventFilterPolicy.parsePredicate(filtersList)); + clusterService.getClusterSettings().addAffixUpdateConsumer(FILTER_POLICY_IGNORE_INDICES, (policyName, filtersList) -> { + final Optional policy = eventFilterPolicyRegistry.get(policyName); + final EventFilterPolicy newPolicy = policy.orElse(new EventFilterPolicy(policyName, settings)) + .changeIndicesFilter(filtersList); + this.eventFilterPolicyRegistry.set(policyName, newPolicy); + }, (policyName, filtersList) -> EventFilterPolicy.parsePredicate(filtersList)); + } + + @Override + public void authenticationSuccess(String realm, User user, RestRequest request) { + if (events.contains(AUTHENTICATION_SUCCESS) && (eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(user), Optional.of(realm), Optional.empty(), Optional.empty())) == false)) { + if (includeRequestBody) { + logger.info("{}[rest] [authentication_success]\t{}, realm=[{}], uri=[{}], params=[{}], request_body=[{}]", + localNodeInfo.prefix, principal(user), realm, request.uri(), request.params(), restRequestContent(request)); + } else { + logger.info("{}[rest] [authentication_success]\t{}, realm=[{}], uri=[{}], params=[{}]", localNodeInfo.prefix, + principal(user), realm, request.uri(), request.params()); + } + } + } + + @Override + public void authenticationSuccess(String realm, User user, String action, TransportMessage message) { + if (events.contains(AUTHENTICATION_SUCCESS)) { + final Optional indices = indices(message); + if (eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(user), Optional.of(realm), Optional.empty(), indices)) == false) { + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + if (indices.isPresent()) { + logger.info("{}[transport] [authentication_success]\t{}, {}, realm=[{}], action=[{}], indices=[{}], request=[{}]", + localNodeInfo.prefix, originAttributes(threadContext, message, localNodeInfo), principal(user), realm, action, + arrayToCommaDelimitedString(indices.get()), message.getClass().getSimpleName()); + } else { + logger.info("{}[transport] [authentication_success]\t{}, {}, realm=[{}], action=[{}], request=[{}]", + localNodeInfo.prefix, originAttributes(threadContext, message, localNodeInfo), principal(user), realm, action, + message.getClass().getSimpleName()); + } + } + } + } + + @Override + public void anonymousAccessDenied(String action, TransportMessage message) { + if (events.contains(ANONYMOUS_ACCESS_DENIED)) { + final Optional indices = indices(message); + if (eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.empty(), Optional.empty(), indices)) == false) { + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + if (indices.isPresent()) { + logger.info("{}[transport] [anonymous_access_denied]\t{}, action=[{}], indices=[{}], request=[{}]", + localNodeInfo.prefix, originAttributes(threadContext, message, localNodeInfo), action, + arrayToCommaDelimitedString(indices.get()), message.getClass().getSimpleName()); + } else { + logger.info("{}[transport] [anonymous_access_denied]\t{}, action=[{}], request=[{}]", localNodeInfo.prefix, + originAttributes(threadContext, message, localNodeInfo), action, message.getClass().getSimpleName()); + } + } + } + } + + @Override + public void anonymousAccessDenied(RestRequest request) { + if (events.contains(ANONYMOUS_ACCESS_DENIED) + && (eventFilterPolicyRegistry.ignorePredicate().test(AuditEventMetaInfo.EMPTY) == false)) { + if (includeRequestBody) { + logger.info("{}[rest] [anonymous_access_denied]\t{}, uri=[{}], request_body=[{}]", localNodeInfo.prefix, + hostAttributes(request), request.uri(), restRequestContent(request)); + } else { + logger.info("{}[rest] [anonymous_access_denied]\t{}, uri=[{}]", localNodeInfo.prefix, hostAttributes(request), + request.uri()); + } + } + } + + @Override + public void authenticationFailed(AuthenticationToken token, String action, TransportMessage message) { + if (events.contains(AUTHENTICATION_FAILED)) { + final Optional indices = indices(message); + if (eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(token), Optional.empty(), indices)) == false) { + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + if (indices.isPresent()) { + logger.info("{}[transport] [authentication_failed]\t{}, principal=[{}], action=[{}], indices=[{}], request=[{}]", + localNodeInfo.prefix, originAttributes(threadContext, message, localNodeInfo), token.principal(), action, + arrayToCommaDelimitedString(indices.get()), message.getClass().getSimpleName()); + } else { + logger.info("{}[transport] [authentication_failed]\t{}, principal=[{}], action=[{}], request=[{}]", + localNodeInfo.prefix, originAttributes(threadContext, message, localNodeInfo), token.principal(), action, + message.getClass().getSimpleName()); + } + } + } + } + + @Override + public void authenticationFailed(RestRequest request) { + if (events.contains(AUTHENTICATION_FAILED) + && (eventFilterPolicyRegistry.ignorePredicate().test(AuditEventMetaInfo.EMPTY) == false)) { + if (includeRequestBody) { + logger.info("{}[rest] [authentication_failed]\t{}, uri=[{}], request_body=[{}]", localNodeInfo.prefix, + hostAttributes(request), request.uri(), restRequestContent(request)); + } else { + logger.info("{}[rest] [authentication_failed]\t{}, uri=[{}]", localNodeInfo.prefix, hostAttributes(request), request.uri()); + } + } + } + + @Override + public void authenticationFailed(String action, TransportMessage message) { + if (events.contains(AUTHENTICATION_FAILED)) { + final Optional indices = indices(message); + if (eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.empty(), Optional.empty(), indices)) == false) { + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + if (indices.isPresent()) { + logger.info("{}[transport] [authentication_failed]\t{}, action=[{}], indices=[{}], request=[{}]", localNodeInfo.prefix, + originAttributes(threadContext, message, localNodeInfo), action, arrayToCommaDelimitedString(indices.get()), + message.getClass().getSimpleName()); + } else { + logger.info("{}[transport] [authentication_failed]\t{}, action=[{}], request=[{}]", localNodeInfo.prefix, + originAttributes(threadContext, message, localNodeInfo), action, message.getClass().getSimpleName()); + } + } + } + } + + @Override + public void authenticationFailed(AuthenticationToken token, RestRequest request) { + if (events.contains(AUTHENTICATION_FAILED) + && (eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(token), Optional.empty(), Optional.empty())) == false)) { + if (includeRequestBody) { + logger.info("{}[rest] [authentication_failed]\t{}, principal=[{}], uri=[{}], request_body=[{}]", localNodeInfo.prefix, + hostAttributes(request), token.principal(), request.uri(), restRequestContent(request)); + } else { + logger.info("{}[rest] [authentication_failed]\t{}, principal=[{}], uri=[{}]", localNodeInfo.prefix, hostAttributes(request), + token.principal(), request.uri()); + } + } + } + + @Override + public void authenticationFailed(String realm, AuthenticationToken token, String action, TransportMessage message) { + if (events.contains(REALM_AUTHENTICATION_FAILED)) { + final Optional indices = indices(message); + if (eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(token), Optional.of(realm), indices)) == false) { + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + if (indices.isPresent()) { + logger.info( + "{}[transport] [realm_authentication_failed]\trealm=[{}], {}, principal=[{}], action=[{}], indices=[{}], " + + "request=[{}]", + localNodeInfo.prefix, realm, originAttributes(threadContext, message, localNodeInfo), token.principal(), action, + arrayToCommaDelimitedString(indices.get()), message.getClass().getSimpleName()); + } else { + logger.info("{}[transport] [realm_authentication_failed]\trealm=[{}], {}, principal=[{}], action=[{}], request=[{}]", + localNodeInfo.prefix, realm, originAttributes(threadContext, message, localNodeInfo), token.principal(), action, + message.getClass().getSimpleName()); + } + } + } + } + + @Override + public void authenticationFailed(String realm, AuthenticationToken token, RestRequest request) { + if (events.contains(REALM_AUTHENTICATION_FAILED) + && (eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(token), Optional.of(realm), Optional.empty())) == false)) { + if (includeRequestBody) { + logger.info("{}[rest] [realm_authentication_failed]\trealm=[{}], {}, principal=[{}], uri=[{}], request_body=[{}]", + localNodeInfo.prefix, realm, hostAttributes(request), token.principal(), request.uri(), + restRequestContent(request)); + } else { + logger.info("{}[rest] [realm_authentication_failed]\trealm=[{}], {}, principal=[{}], uri=[{}]", localNodeInfo.prefix, realm, + hostAttributes(request), token.principal(), request.uri()); + } + } + } + + @Override + public void accessGranted(Authentication authentication, String action, TransportMessage message, String[] roleNames) { + final User user = authentication.getUser(); + final boolean isSystem = SystemUser.is(user) || XPackUser.is(user); + if ((isSystem && events.contains(SYSTEM_ACCESS_GRANTED)) || ((isSystem == false) && events.contains(ACCESS_GRANTED))) { + final Optional indices = indices(message); + if (eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(user), + Optional.of(effectiveRealmName(authentication)), Optional.of(roleNames), indices)) == false) { + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + if (indices.isPresent()) { + logger.info("{}[transport] [access_granted]\t{}, {}, roles=[{}], action=[{}], indices=[{}], request=[{}]", + localNodeInfo.prefix, originAttributes(threadContext, message, localNodeInfo), subject(authentication), + arrayToCommaDelimitedString(roleNames), action, arrayToCommaDelimitedString(indices.get()), + message.getClass().getSimpleName()); + } else { + logger.info("{}[transport] [access_granted]\t{}, {}, roles=[{}], action=[{}], request=[{}]", localNodeInfo.prefix, + originAttributes(threadContext, message, localNodeInfo), subject(authentication), + arrayToCommaDelimitedString(roleNames), action, message.getClass().getSimpleName()); + } + } + } + } + + @Override + public void accessDenied(Authentication authentication, String action, TransportMessage message, String[] roleNames) { + if (events.contains(ACCESS_DENIED)) { + final Optional indices = indices(message); + if (eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(authentication.getUser()), + Optional.of(effectiveRealmName(authentication)), Optional.of(roleNames), indices)) == false) { + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + if (indices.isPresent()) { + logger.info("{}[transport] [access_denied]\t{}, {}, roles=[{}], action=[{}], indices=[{}], request=[{}]", + localNodeInfo.prefix, originAttributes(threadContext, message, localNodeInfo), subject(authentication), + arrayToCommaDelimitedString(roleNames), action, arrayToCommaDelimitedString(indices.get()), + message.getClass().getSimpleName()); + } else { + logger.info("{}[transport] [access_denied]\t{}, {}, roles=[{}], action=[{}], request=[{}]", localNodeInfo.prefix, + originAttributes(threadContext, message, localNodeInfo), subject(authentication), + arrayToCommaDelimitedString(roleNames), action, message.getClass().getSimpleName()); + } + } + } + } + + @Override + public void tamperedRequest(RestRequest request) { + if (events.contains(TAMPERED_REQUEST) && (eventFilterPolicyRegistry.ignorePredicate().test(AuditEventMetaInfo.EMPTY) == false)) { + if (includeRequestBody) { + logger.info("{}[rest] [tampered_request]\t{}, uri=[{}], request_body=[{}]", localNodeInfo.prefix, hostAttributes(request), + request.uri(), restRequestContent(request)); + } else { + logger.info("{}[rest] [tampered_request]\t{}, uri=[{}]", localNodeInfo.prefix, hostAttributes(request), request.uri()); + } + } + } + + @Override + public void tamperedRequest(String action, TransportMessage message) { + if (events.contains(TAMPERED_REQUEST)) { + final Optional indices = indices(message); + if (eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.empty(), Optional.empty(), indices)) == false) { + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + if (indices.isPresent()) { + logger.info("{}[transport] [tampered_request]\t{}, action=[{}], indices=[{}], request=[{}]", localNodeInfo.prefix, + originAttributes(threadContext, message, localNodeInfo), action, arrayToCommaDelimitedString(indices.get()), + message.getClass().getSimpleName()); + } else { + logger.info("{}[transport] [tampered_request]\t{}, action=[{}], request=[{}]", localNodeInfo.prefix, + originAttributes(threadContext, message, localNodeInfo), action, message.getClass().getSimpleName()); + } + } + } + } + + @Override + public void tamperedRequest(User user, String action, TransportMessage request) { + if (events.contains(TAMPERED_REQUEST)) { + final Optional indices = indices(request); + if (eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(user), Optional.empty(), Optional.empty(), indices)) == false) { + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + if (indices.isPresent()) { + logger.info("{}[transport] [tampered_request]\t{}, {}, action=[{}], indices=[{}], request=[{}]", localNodeInfo.prefix, + originAttributes(threadContext, request, localNodeInfo), principal(user), action, + arrayToCommaDelimitedString(indices.get()), request.getClass().getSimpleName()); + } else { + logger.info("{}[transport] [tampered_request]\t{}, {}, action=[{}], request=[{}]", localNodeInfo.prefix, + originAttributes(threadContext, request, localNodeInfo), principal(user), action, + request.getClass().getSimpleName()); + } + } + } + } + + @Override + public void connectionGranted(InetAddress inetAddress, String profile, SecurityIpFilterRule rule) { + if (events.contains(CONNECTION_GRANTED) && (eventFilterPolicyRegistry.ignorePredicate().test(AuditEventMetaInfo.EMPTY) == false)) { + logger.info("{}[ip_filter] [connection_granted]\torigin_address=[{}], transport_profile=[{}], rule=[{}]", localNodeInfo.prefix, + NetworkAddress.format(inetAddress), profile, rule); + } + } + + @Override + public void connectionDenied(InetAddress inetAddress, String profile, SecurityIpFilterRule rule) { + if (events.contains(CONNECTION_DENIED) && (eventFilterPolicyRegistry.ignorePredicate().test(AuditEventMetaInfo.EMPTY) == false)) { + logger.info("{}[ip_filter] [connection_denied]\torigin_address=[{}], transport_profile=[{}], rule=[{}]", localNodeInfo.prefix, + NetworkAddress.format(inetAddress), profile, rule); + } + } + + @Override + public void runAsGranted(Authentication authentication, String action, TransportMessage message, String[] roleNames) { + if (events.contains(RUN_AS_GRANTED)) { + final Optional indices = indices(message); + if (eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(authentication.getUser()), + Optional.of(effectiveRealmName(authentication)), Optional.of(roleNames), indices)) == false) { + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + if (indices.isPresent()) { + logger.info("{}[transport] [run_as_granted]\t{}, {}, roles=[{}], action=[{}], indices=[{}], request=[{}]", + localNodeInfo.prefix, originAttributes(threadContext, message, localNodeInfo), runAsSubject(authentication), + arrayToCommaDelimitedString(roleNames), action, arrayToCommaDelimitedString(indices.get()), + message.getClass().getSimpleName()); + } else { + logger.info("{}[transport] [run_as_granted]\t{}, {}, roles=[{}], action=[{}], request=[{}]", localNodeInfo.prefix, + originAttributes(threadContext, message, localNodeInfo), runAsSubject(authentication), + arrayToCommaDelimitedString(roleNames), action, + message.getClass().getSimpleName()); + } + } + } + } + + @Override + public void runAsDenied(Authentication authentication, String action, TransportMessage message, String[] roleNames) { + if (events.contains(RUN_AS_DENIED)) { + final Optional indices = indices(message); + if (eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(authentication.getUser()), + Optional.of(effectiveRealmName(authentication)), Optional.of(roleNames), indices)) == false) { + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + if (indices.isPresent()) { + logger.info("{}[transport] [run_as_denied]\t{}, {}, roles=[{}], action=[{}], indices=[{}], request=[{}]", + localNodeInfo.prefix, originAttributes(threadContext, message, localNodeInfo), runAsSubject(authentication), + arrayToCommaDelimitedString(roleNames), action, arrayToCommaDelimitedString(indices.get()), + message.getClass().getSimpleName()); + } else { + logger.info("{}[transport] [run_as_denied]\t{}, {}, roles=[{}], action=[{}], request=[{}]", localNodeInfo.prefix, + originAttributes(threadContext, message, localNodeInfo), runAsSubject(authentication), + arrayToCommaDelimitedString(roleNames), action, message.getClass().getSimpleName()); + } + } + } + } + + @Override + public void runAsDenied(Authentication authentication, RestRequest request, String[] roleNames) { + if (events.contains(RUN_AS_DENIED) + && (eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(authentication.getUser()), + Optional.of(effectiveRealmName(authentication)), Optional.of(roleNames), Optional.empty())) == false)) { + if (includeRequestBody) { + logger.info("{}[rest] [run_as_denied]\t{}, {}, roles=[{}], uri=[{}], request_body=[{}]", localNodeInfo.prefix, + hostAttributes(request), runAsSubject(authentication), arrayToCommaDelimitedString(roleNames), request.uri(), + restRequestContent(request)); + } else { + logger.info("{}[rest] [run_as_denied]\t{}, {}, roles=[{}], uri=[{}]", localNodeInfo.prefix, hostAttributes(request), + runAsSubject(authentication), arrayToCommaDelimitedString(roleNames), request.uri()); + } + } + } + + static String runAsSubject(Authentication authentication) { + final StringBuilder sb = new StringBuilder("principal=["); + sb.append(authentication.getUser().authenticatedUser().principal()); + sb.append("], realm=["); + sb.append(authentication.getAuthenticatedBy().getName()); + sb.append("], run_as_principal=["); + sb.append(authentication.getUser().principal()); + if (authentication.getLookedUpBy() != null) { + sb.append("], run_as_realm=[").append(authentication.getLookedUpBy().getName()); + } + sb.append("]"); + return sb.toString(); + } + + static String subject(Authentication authentication) { + final StringBuilder sb = new StringBuilder("principal=["); + sb.append(authentication.getUser().principal()).append("], realm=["); + if (authentication.getUser().isRunAs()) { + sb.append(authentication.getLookedUpBy().getName()).append("], run_by_principal=["); + sb.append(authentication.getUser().authenticatedUser().principal()).append("], run_by_realm=["); + } + sb.append(authentication.getAuthenticatedBy().getName()).append("]"); + return sb.toString(); + } + + private static String hostAttributes(RestRequest request) { + String formattedAddress; + final SocketAddress socketAddress = request.getRemoteAddress(); + if (socketAddress instanceof InetSocketAddress) { + formattedAddress = NetworkAddress.format(((InetSocketAddress) socketAddress).getAddress()); + } else { + formattedAddress = socketAddress.toString(); + } + return "origin_address=[" + formattedAddress + "]"; + } + + protected static String originAttributes(ThreadContext threadContext, TransportMessage message, LocalNodeInfo localNodeInfo) { + return restOriginTag(threadContext).orElse(transportOriginTag(message).orElse(localNodeInfo.localOriginTag)); + } + + private static Optional restOriginTag(ThreadContext threadContext) { + final InetSocketAddress restAddress = RemoteHostHeader.restRemoteAddress(threadContext); + if (restAddress == null) { + return Optional.empty(); + } + return Optional.of(new StringBuilder("origin_type=[rest], origin_address=[").append(NetworkAddress.format(restAddress.getAddress())) + .append("]") + .toString()); + } + + private static Optional transportOriginTag(TransportMessage message) { + final TransportAddress address = message.remoteAddress(); + if (address == null) { + return Optional.empty(); + } + return Optional.of( + new StringBuilder("origin_type=[transport], origin_address=[").append(NetworkAddress.format(address.address().getAddress())) + .append("]") + .toString()); + } + + static Optional indices(TransportMessage message) { + if (message instanceof IndicesRequest) { + final String[] indices = ((IndicesRequest) message).indices(); + if ((indices != null) && (indices.length != 0)) { + return Optional.of(((IndicesRequest) message).indices()); + } + } + return Optional.empty(); + } + + static String effectiveRealmName(Authentication authentication) { + return authentication.getLookedUpBy() != null ? authentication.getLookedUpBy().getName() + : authentication.getAuthenticatedBy().getName(); + } + + static String principal(User user) { + final StringBuilder builder = new StringBuilder("principal=["); + builder.append(user.principal()); + if (user.isRunAs()) { + builder.append("], run_by_principal=[").append(user.authenticatedUser().principal()); + } + return builder.append("]").toString(); + } + + public static void registerSettings(List> settings) { + settings.add(HOST_ADDRESS_SETTING); + settings.add(HOST_NAME_SETTING); + settings.add(NODE_NAME_SETTING); + settings.add(INCLUDE_EVENT_SETTINGS); + settings.add(EXCLUDE_EVENT_SETTINGS); + settings.add(INCLUDE_REQUEST_BODY); + settings.add(FILTER_POLICY_IGNORE_PRINCIPALS); + settings.add(FILTER_POLICY_IGNORE_INDICES); + settings.add(FILTER_POLICY_IGNORE_ROLES); + settings.add(FILTER_POLICY_IGNORE_REALMS); + } + + /** + * Builds the predicate for a single policy filter. The predicate matches events + * that will be ignored, aka filtered out, aka not logged. The event can be + * filtered by the following fields : `user`, `realm`, `role` and `index`. + * Predicates on each field are ANDed together to form the filter predicate of + * the policy. + */ + private static final class EventFilterPolicy { + private final String name; + private final Predicate ignorePrincipalsPredicate; + private final Predicate ignoreRealmsPredicate; + private final Predicate ignoreRolesPredicate; + private final Predicate ignoreIndicesPredicate; + + EventFilterPolicy(String name, Settings settings) { + this(name, parsePredicate(FILTER_POLICY_IGNORE_PRINCIPALS.getConcreteSettingForNamespace(name).get(settings)), + parsePredicate(FILTER_POLICY_IGNORE_REALMS.getConcreteSettingForNamespace(name).get(settings)), + parsePredicate(FILTER_POLICY_IGNORE_ROLES.getConcreteSettingForNamespace(name).get(settings)), + parsePredicate(FILTER_POLICY_IGNORE_INDICES.getConcreteSettingForNamespace(name).get(settings))); + } + + /** + * An empty filter list for a field will match events with that field missing. + * An event with an undefined field has the field value the empty string ("") or + * a singleton list of the empty string ([""]). + */ + EventFilterPolicy(String name, Predicate ignorePrincipalsPredicate, Predicate ignoreRealmsPredicate, + Predicate ignoreRolesPredicate, Predicate ignoreIndicesPredicate) { + this.name = name; + this.ignorePrincipalsPredicate = ignorePrincipalsPredicate; + this.ignoreRealmsPredicate = ignoreRealmsPredicate; + this.ignoreRolesPredicate = ignoreRolesPredicate; + this.ignoreIndicesPredicate = ignoreIndicesPredicate; + } + + private EventFilterPolicy changePrincipalsFilter(List filtersList) { + return new EventFilterPolicy(name, parsePredicate(filtersList), ignoreRealmsPredicate, ignoreRolesPredicate, + ignoreIndicesPredicate); + } + + private EventFilterPolicy changeRealmsFilter(List filtersList) { + return new EventFilterPolicy(name, ignorePrincipalsPredicate, parsePredicate(filtersList), ignoreRolesPredicate, + ignoreIndicesPredicate); + } + + private EventFilterPolicy changeRolesFilter(List filtersList) { + return new EventFilterPolicy(name, ignorePrincipalsPredicate, ignoreRealmsPredicate, parsePredicate(filtersList), + ignoreIndicesPredicate); + } + + private EventFilterPolicy changeIndicesFilter(List filtersList) { + return new EventFilterPolicy(name, ignorePrincipalsPredicate, ignoreRealmsPredicate, ignoreRolesPredicate, + parsePredicate(filtersList)); + } + + static Predicate parsePredicate(List l) { + return Automatons.predicate(emptyStringBuildsEmptyAutomaton(l)); + } + + /** + * It is a requirement that empty string filters match empty string fields. In + * this case we require automatons from empty string to match the empty string. + * `Automatons.predicate("").test("") == false` + * `Automatons.predicate("//").test("") == true` + */ + private static List emptyStringBuildsEmptyAutomaton(List l) { + if (l.isEmpty()) { + return Collections.singletonList("//"); + } + return l.stream().map(f -> f.isEmpty() ? "//" : f).collect(Collectors.toList()); + } + + /** + * ANDs the predicates of this filter policy. The `indices` and `roles` fields + * of an audit event are multi-valued and all values should match the filter + * predicate of the corresponding field. + */ + Predicate ignorePredicate() { + return eventInfo -> ignorePrincipalsPredicate.test(eventInfo.principal) && ignoreRealmsPredicate.test(eventInfo.realm) + && eventInfo.roles.get().allMatch(ignoreRolesPredicate) && eventInfo.indices.get().allMatch(ignoreIndicesPredicate); + } + + @Override + public String toString() { + return "[users]:" + ignorePrincipalsPredicate.toString() + "&[realms]:" + ignoreRealmsPredicate.toString() + "&[roles]:" + + ignoreRolesPredicate.toString() + "&[indices]:" + ignoreIndicesPredicate.toString(); + } + } + + /** + * Builds the filter predicates for all the policies. Predicates of all policies + * are ORed together, so that an audit event matching any policy is ignored. + */ + static final class EventFilterPolicyRegistry { + private volatile Map policyMap; + private volatile Predicate predicate; + + private EventFilterPolicyRegistry(Settings settings) { + final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); + for (final String policyName : settings.getGroups(FILTER_POLICY_PREFIX, true).keySet()) { + mapBuilder.put(policyName, new EventFilterPolicy(policyName, settings)); + } + policyMap = mapBuilder.immutableMap(); + // precompute predicate + predicate = buildIgnorePredicate(policyMap); + } + + private Optional get(String policyName) { + return Optional.ofNullable(policyMap.get(policyName)); + } + + private synchronized void set(String policyName, EventFilterPolicy eventFilterPolicy) { + policyMap = MapBuilder.newMapBuilder(policyMap).put(policyName, eventFilterPolicy).immutableMap(); + // precompute predicate + predicate = buildIgnorePredicate(policyMap); + } + + Predicate ignorePredicate() { + return predicate; + } + + private static Predicate buildIgnorePredicate(Map policyMap) { + return policyMap.values().stream().map(EventFilterPolicy::ignorePredicate).reduce(x -> false, (x, y) -> x.or(y)); + } + + @Override + public String toString() { + final Map treeMap = new TreeMap<>(policyMap); + final StringBuilder sb = new StringBuilder(); + for (final Map.Entry entry : treeMap.entrySet()) { + sb.append(entry.getKey()).append(":").append(entry.getValue().toString()); + } + return sb.toString(); + } + } + + /** + * Abstraction for the fields of the audit event that are used for filtering. If + * an event has a missing field (one of `user`, `realm`, `roles` and `indices`) + * the value for the field will be the empty string or a singleton stream of the + * empty string. + */ + static final class AuditEventMetaInfo { + final String principal; + final String realm; + final Supplier> roles; + final Supplier> indices; + + // empty is used for events can be filtered out only by the lack of a field + static final AuditEventMetaInfo EMPTY = new AuditEventMetaInfo(Optional.empty(), Optional.empty(), Optional.empty()); + + /** + * If a field is missing for an event, its value for filtering purposes is the + * empty string or a singleton stream of the empty string. This a allows a + * policy to filter by the missing value using the empty string, ie + * `ignore_filters.users: ["", "elastic"]` will filter events with a missing + * user field (such as `anonymous_access_denied`) as well as events from the + * "elastic" username. + */ + AuditEventMetaInfo(Optional user, Optional realm, Optional roles, Optional indices) { + this.principal = user.map(u -> u.principal()).orElse(""); + this.realm = realm.orElse(""); + // Supplier indirection and lazy generation of Streams serves 2 purposes: + // 1. streams might not get generated due to short circuiting logical + // conditions on the `principal` and `realm` fields + // 2. reusability of the AuditEventMetaInfo instance: in this case Streams have + // to be regenerated as they cannot be operated upon twice + this.roles = () -> roles.filter(r -> r.length != 0).map(Arrays::stream).orElse(Stream.of("")); + this.indices = () -> indices.filter(i -> i.length != 0).map(Arrays::stream).orElse(Stream.of("")); + } + + AuditEventMetaInfo(Optional authenticationToken, Optional realm, Optional indices) { + this.principal = authenticationToken.map(u -> u.principal()).orElse(""); + this.realm = realm.orElse(""); + this.roles = () -> Stream.of(""); + this.indices = () -> indices.filter(r -> r.length != 0).map(i -> Arrays.stream(i)).orElse(Stream.of("")); + } + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + updateLocalNodeInfo(event.state().getNodes().getLocalNode()); + } + + void updateLocalNodeInfo(DiscoveryNode newLocalNode) { + // check if local node changed + final LocalNodeInfo localNodeInfo = this.localNodeInfo; + if ((localNodeInfo.localNode == null) || (localNodeInfo.localNode.equals(newLocalNode) == false)) { + // no need to synchronize, called only from the cluster state applier thread + this.localNodeInfo = new LocalNodeInfo(localNodeInfo.settings, newLocalNode); + } + } + + static class LocalNodeInfo { + private final Settings settings; + private final DiscoveryNode localNode; + final String prefix; + private final String localOriginTag; + + LocalNodeInfo(Settings settings, @Nullable DiscoveryNode newLocalNode) { + this.settings = settings; + this.localNode = newLocalNode; + this.prefix = resolvePrefix(settings, newLocalNode); + this.localOriginTag = localOriginTag(newLocalNode); + } + + static String resolvePrefix(Settings settings, @Nullable DiscoveryNode localNode) { + final StringBuilder builder = new StringBuilder(); + if (HOST_ADDRESS_SETTING.get(settings)) { + final String address = localNode != null ? localNode.getHostAddress() : null; + if (address != null) { + builder.append("[").append(address).append("] "); + } + } + if (HOST_NAME_SETTING.get(settings)) { + final String hostName = localNode != null ? localNode.getHostName() : null; + if (hostName != null) { + builder.append("[").append(hostName).append("] "); + } + } + if (NODE_NAME_SETTING.get(settings)) { + final String name = Node.NODE_NAME_SETTING.get(settings); + if (name != null) { + builder.append("[").append(name).append("] "); + } + } + return builder.toString(); + } + + private static String localOriginTag(@Nullable DiscoveryNode localNode) { + if (localNode == null) { + return "origin_type=[local_node]"; + } + return new StringBuilder("origin_type=[local_node], origin_address=[").append(localNode.getHostAddress()) + .append("]") + .toString(); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java new file mode 100644 index 0000000000000..8bae951e88360 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java @@ -0,0 +1,604 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.node.Node; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.xpack.core.common.IteratingActionListener; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.audit.AuditTrail; +import org.elasticsearch.xpack.security.audit.AuditTrailService; + +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +/** + * An authentication service that delegates the authentication process to its configured {@link Realm realms}. + * This service also supports request level caching of authenticated users (i.e. once a user authenticated + * successfully, it is set on the request context to avoid subsequent redundant authentication process) + */ +public class AuthenticationService extends AbstractComponent { + + private final Realms realms; + private final AuditTrail auditTrail; + private final AuthenticationFailureHandler failureHandler; + private final ThreadContext threadContext; + private final String nodeName; + private final AnonymousUser anonymousUser; + private final TokenService tokenService; + private final boolean runAsEnabled; + private final boolean isAnonymousUserEnabled; + + public AuthenticationService(Settings settings, Realms realms, AuditTrailService auditTrail, + AuthenticationFailureHandler failureHandler, ThreadPool threadPool, + AnonymousUser anonymousUser, TokenService tokenService) { + super(settings); + this.nodeName = Node.NODE_NAME_SETTING.get(settings); + this.realms = realms; + this.auditTrail = auditTrail; + this.failureHandler = failureHandler; + this.threadContext = threadPool.getThreadContext(); + this.anonymousUser = anonymousUser; + this.runAsEnabled = AuthenticationServiceField.RUN_AS_ENABLED.get(settings); + this.isAnonymousUserEnabled = AnonymousUser.isAnonymousEnabled(settings); + this.tokenService = tokenService; + } + + /** + * Authenticates the user that is associated with the given request. If the user was authenticated successfully (i.e. + * a user was indeed associated with the request and the credentials were verified to be valid), the method returns + * the user and that user is then "attached" to the request's context. + * + * @param request The request to be authenticated + */ + public void authenticate(RestRequest request, ActionListener authenticationListener) { + createAuthenticator(request, authenticationListener).authenticateAsync(); + } + + /** + * Authenticates the user that is associated with the given message. If the user was authenticated successfully (i.e. + * a user was indeed associated with the request and the credentials were verified to be valid), the method returns + * the user and that user is then "attached" to the message's context. If no user was found to be attached to the given + * message, then the given fallback user will be returned instead. + * + * @param action The action of the message + * @param message The message to be authenticated + * @param fallbackUser The default user that will be assumed if no other user is attached to the message. Can be + * {@code null}, in which case there will be no fallback user and the success/failure of the + * authentication will be based on the whether there's an attached user to in the message and + * if there is, whether its credentials are valid. + */ + public void authenticate(String action, TransportMessage message, User fallbackUser, ActionListener listener) { + createAuthenticator(action, message, fallbackUser, listener).authenticateAsync(); + } + + /** + * Authenticates the username and password that are provided as parameters. This will not look + * at the values in the ThreadContext for Authentication. + * + * @param action The action of the message + * @param message The message that resulted in this authenticate call + * @param token The token (credentials) to be authenticated + */ + public void authenticate(String action, TransportMessage message, + AuthenticationToken token, ActionListener listener) { + new Authenticator(action, message, null, listener).authenticateToken(token); + } + + // pkg private method for testing + Authenticator createAuthenticator(RestRequest request, ActionListener listener) { + return new Authenticator(request, listener); + } + + // pkg private method for testing + Authenticator createAuthenticator(String action, TransportMessage message, User fallbackUser, ActionListener listener) { + return new Authenticator(action, message, fallbackUser, listener); + } + + /** + * This class is responsible for taking a request and executing the authentication. The authentication is executed in an asynchronous + * fashion in order to avoid blocking calls on a network thread. This class also performs the auditing necessary around authentication + */ + class Authenticator { + + private final AuditableRequest request; + private final User fallbackUser; + + private final ActionListener listener; + private RealmRef authenticatedBy = null; + private RealmRef lookedupBy = null; + private AuthenticationToken authenticationToken = null; + + Authenticator(RestRequest request, ActionListener listener) { + this(new AuditableRestRequest(auditTrail, failureHandler, threadContext, request), null, listener); + } + + Authenticator(String action, TransportMessage message, User fallbackUser, ActionListener listener) { + this(new AuditableTransportRequest(auditTrail, failureHandler, threadContext, action, message), fallbackUser, listener); + } + + private Authenticator(AuditableRequest auditableRequest, User fallbackUser, ActionListener listener) { + this.request = auditableRequest; + this.fallbackUser = fallbackUser; + this.listener = listener; + } + + /** + * This method starts the authentication process. The authentication process can be broken down into distinct operations. In order, + * these operations are: + * + *
    + *
  1. look for existing authentication {@link #lookForExistingAuthentication(Consumer)}
  2. + *
  3. look for a user token
  4. + *
  5. token extraction {@link #extractToken(Consumer)}
  6. + *
  7. token authentication {@link #consumeToken(AuthenticationToken)}
  8. + *
  9. user lookup for run as if necessary {@link #consumeUser(User, Map)} and + * {@link #lookupRunAsUser(User, String, Consumer)}
  10. + *
  11. write authentication into the context {@link #finishAuthentication(User)}
  12. + *
+ */ + private void authenticateAsync() { + lookForExistingAuthentication((authentication) -> { + if (authentication != null) { + listener.onResponse(authentication); + } else { + tokenService.getAndValidateToken(threadContext, ActionListener.wrap(userToken -> { + if (userToken != null) { + writeAuthToContext(userToken.getAuthentication()); + } else { + extractToken(this::consumeToken); + } + }, e -> { + if (e instanceof ElasticsearchSecurityException && + tokenService.isExpiredTokenException((ElasticsearchSecurityException) e) == false) { + // intentionally ignore the returned exception; we call this primarily + // for the auditing as we already have a purpose built exception + request.tamperedRequest(); + } + listener.onFailure(e); + })); + } + }); + } + + /** + * Looks to see if the request contains an existing {@link Authentication} and if so, that authentication will be used. The + * consumer is called if no exception was thrown while trying to read the authentication and may be called with a {@code null} + * value + */ + private void lookForExistingAuthentication(Consumer authenticationConsumer) { + Runnable action; + try { + final Authentication authentication = Authentication.readFromContext(threadContext); + if (authentication != null && request instanceof AuditableRestRequest) { + action = () -> listener.onFailure(request.tamperedRequest()); + } else { + action = () -> authenticationConsumer.accept(authentication); + } + } catch (Exception e) { + logger.error((Supplier) + () -> new ParameterizedMessage("caught exception while trying to read authentication from request [{}]", request), + e); + action = () -> listener.onFailure(request.tamperedRequest()); + } + + // While we could place this call in the try block, the issue is that we catch all exceptions and could catch exceptions that + // have nothing to do with a tampered request. + action.run(); + } + + /** + * Attempts to extract an {@link AuthenticationToken} from the request by iterating over the {@link Realms} and calling + * {@link Realm#token(ThreadContext)}. The first non-null token that is returned will be used. The consumer is only called if + * no exception was caught during the extraction process and may be called with a {@code null} token. + */ + // pkg-private accessor testing token extraction with a consumer + void extractToken(Consumer consumer) { + Runnable action = () -> consumer.accept(null); + try { + if (authenticationToken != null) { + action = () -> consumer.accept(authenticationToken); + } else { + for (Realm realm : realms) { + final AuthenticationToken token = realm.token(threadContext); + if (token != null) { + action = () -> consumer.accept(token); + break; + } + } + } + } catch (Exception e) { + logger.warn("An exception occurred while attempting to find authentication credentials", e); + action = () -> listener.onFailure(request.exceptionProcessingRequest(e, null)); + } + + action.run(); + } + + /** + * Consumes the {@link AuthenticationToken} provided by the caller. In the case of a {@code null} token, {@link #handleNullToken()} + * is called. In the case of a {@code non-null} token, the realms are iterated over and the first realm that returns a non-null + * {@link User} is the authenticating realm and iteration is stopped. This user is then passed to {@link #consumeUser(User, Map)} + * if no exception was caught while trying to authenticate the token + */ + private void consumeToken(AuthenticationToken token) { + if (token == null) { + handleNullToken(); + } else { + authenticationToken = token; + final List realmsList = realms.asList(); + final Map> messages = new LinkedHashMap<>(); + final BiConsumer> realmAuthenticatingConsumer = (realm, userListener) -> { + if (realm.supports(authenticationToken)) { + realm.authenticate(authenticationToken, ActionListener.wrap((result) -> { + assert result != null : "Realm " + realm + " produced a null authentication result"; + if (result.getStatus() == AuthenticationResult.Status.SUCCESS) { + // user was authenticated, populate the authenticated by information + authenticatedBy = new RealmRef(realm.name(), realm.type(), nodeName); + userListener.onResponse(result.getUser()); + } else { + // the user was not authenticated, call this so we can audit the correct event + request.realmAuthenticationFailed(authenticationToken, realm.name()); + if (result.getStatus() == AuthenticationResult.Status.TERMINATE) { + logger.info("Authentication of [{}] was terminated by realm [{}] - {}", + authenticationToken.principal(), realm.name(), result.getMessage()); + userListener.onFailure(Exceptions.authenticationError(result.getMessage(), result.getException())); + } else { + if (result.getMessage() != null) { + messages.put(realm, new Tuple<>(result.getMessage(), result.getException())); + } + userListener.onResponse(null); + } + } + }, (ex) -> { + logger.warn(new ParameterizedMessage( + "An error occurred while attempting to authenticate [{}] against realm [{}]", + authenticationToken.principal(), realm.name()), ex); + userListener.onFailure(ex); + })); + } else { + userListener.onResponse(null); + } + }; + final IteratingActionListener authenticatingListener = + new IteratingActionListener<>(ActionListener.wrap( + (user) -> consumeUser(user, messages), + (e) -> listener.onFailure(request.exceptionProcessingRequest(e, token))), + realmAuthenticatingConsumer, realmsList, threadContext); + try { + authenticatingListener.run(); + } catch (Exception e) { + listener.onFailure(request.exceptionProcessingRequest(e, token)); + } + } + } + + /** + * Handles failed extraction of an authentication token. This can happen in a few different scenarios: + * + *
    + *
  • this is an initial request from a client without preemptive authentication, so we must return an authentication + * challenge
  • + *
  • this is a request made internally within a node and there is a fallback user, which is typically the + * {@link SystemUser}
  • + *
  • anonymous access is enabled and this will be considered an anonymous request
  • + *
+ * + * Regardless of the scenario, this method will call the listener with either failure or success. + */ + // pkg-private for tests + void handleNullToken() { + final Authentication authentication; + if (fallbackUser != null) { + RealmRef authenticatedBy = new RealmRef("__fallback", "__fallback", nodeName); + authentication = new Authentication(fallbackUser, authenticatedBy, null); + } else if (isAnonymousUserEnabled) { + RealmRef authenticatedBy = new RealmRef("__anonymous", "__anonymous", nodeName); + authentication = new Authentication(anonymousUser, authenticatedBy, null); + } else { + authentication = null; + } + + Runnable action; + if (authentication != null) { + action = () -> writeAuthToContext(authentication); + } else { + action = () -> listener.onFailure(request.anonymousAccessDenied()); + } + + // we assign the listener call to an action to avoid calling the listener within a try block and auditing the wrong thing when + // an exception bubbles up even after successful authentication + action.run(); + } + + /** + * Consumes the {@link User} that resulted from attempting to authenticate a token against the {@link Realms}. When the user is + * {@code null}, authentication fails and does not proceed. When there is a user, the request is inspected to see if the run as + * functionality is in use. When run as is not in use, {@link #finishAuthentication(User)} is called, otherwise we try to lookup + * the run as user in {@link #lookupRunAsUser(User, String, Consumer)} + */ + private void consumeUser(User user, Map> messages) { + if (user == null) { + messages.forEach((realm, tuple) -> { + final String message = tuple.v1(); + final String cause = tuple.v2() == null ? "" : " (Caused by " + tuple.v2() + ")"; + logger.warn("Authentication to realm {} failed - {}{}", realm.name(), message, cause); + }); + listener.onFailure(request.authenticationFailed(authenticationToken)); + } else { + if (runAsEnabled) { + final String runAsUsername = threadContext.getHeader(AuthenticationServiceField.RUN_AS_USER_HEADER); + if (runAsUsername != null && runAsUsername.isEmpty() == false) { + lookupRunAsUser(user, runAsUsername, this::finishAuthentication); + } else if (runAsUsername == null) { + finishAuthentication(user); + } else { + assert runAsUsername.isEmpty() : "the run as username may not be empty"; + logger.debug("user [{}] attempted to runAs with an empty username", user.principal()); + listener.onFailure(request.runAsDenied( + new Authentication(new User(runAsUsername, null, user), authenticatedBy, lookedupBy), authenticationToken)); + } + } else { + finishAuthentication(user); + } + } + } + + /** + * Iterates over the realms and attempts to lookup the run as user by the given username. The consumer will be called regardless of + * if the user is found or not, with a non-null user. We do not fail requests if the run as user is not found as that can leak the + * names of users that exist using a timing attack + */ + private void lookupRunAsUser(final User user, String runAsUsername, Consumer userConsumer) { + final List realmsList = realms.asList(); + final BiConsumer> realmLookupConsumer = (realm, lookupUserListener) -> + realm.lookupUser(runAsUsername, ActionListener.wrap((lookedupUser) -> { + if (lookedupUser != null) { + lookedupBy = new RealmRef(realm.name(), realm.type(), nodeName); + lookupUserListener.onResponse(lookedupUser); + } else { + lookupUserListener.onResponse(null); + } + }, lookupUserListener::onFailure)); + + final IteratingActionListener userLookupListener = + new IteratingActionListener<>(ActionListener.wrap((lookupUser) -> { + if (lookupUser == null) { + // the user does not exist, but we still create a User object, which will later be rejected by authz + userConsumer.accept(new User(runAsUsername, null, user)); + } else { + userConsumer.accept(new User(lookupUser, user)); + } + }, + (e) -> listener.onFailure(request.exceptionProcessingRequest(e, authenticationToken))), + realmLookupConsumer, realmsList, threadContext); + try { + userLookupListener.run(); + } catch (Exception e) { + listener.onFailure(request.exceptionProcessingRequest(e, authenticationToken)); + } + } + + /** + * Finishes the authentication process by ensuring the returned user is enabled and that the run as user is enabled if there is + * one. If authentication is successful, this method also ensures that the authentication is written to the ThreadContext + */ + void finishAuthentication(User finalUser) { + if (finalUser.enabled() == false || finalUser.authenticatedUser().enabled() == false) { + // TODO: these should be different log messages if the runas vs auth user is disabled? + logger.debug("user [{}] is disabled. failing authentication", finalUser); + listener.onFailure(request.authenticationFailed(authenticationToken)); + } else { + final Authentication finalAuth = new Authentication(finalUser, authenticatedBy, lookedupBy); + writeAuthToContext(finalAuth); + } + } + + /** + * Writes the authentication to the {@link ThreadContext} and then calls the listener if + * successful + */ + void writeAuthToContext(Authentication authentication) { + request.authenticationSuccess(authentication.getAuthenticatedBy().getName(), authentication.getUser()); + Runnable action = () -> listener.onResponse(authentication); + try { + authentication.writeToContext(threadContext); + } catch (Exception e) { + action = () -> listener.onFailure(request.exceptionProcessingRequest(e, authenticationToken)); + } + + // we assign the listener call to an action to avoid calling the listener within a try block and auditing the wrong thing + // when an exception bubbles up even after successful authentication + action.run(); + } + + private void authenticateToken(AuthenticationToken token) { + this.consumeToken(token); + } + } + + abstract static class AuditableRequest { + + final AuditTrail auditTrail; + final AuthenticationFailureHandler failureHandler; + final ThreadContext threadContext; + + AuditableRequest(AuditTrail auditTrail, AuthenticationFailureHandler failureHandler, ThreadContext threadContext) { + this.auditTrail = auditTrail; + this.failureHandler = failureHandler; + this.threadContext = threadContext; + } + + abstract void realmAuthenticationFailed(AuthenticationToken token, String realm); + + abstract ElasticsearchSecurityException tamperedRequest(); + + abstract ElasticsearchSecurityException exceptionProcessingRequest(Exception e, @Nullable AuthenticationToken token); + + abstract ElasticsearchSecurityException authenticationFailed(AuthenticationToken token); + + abstract ElasticsearchSecurityException anonymousAccessDenied(); + + abstract ElasticsearchSecurityException runAsDenied(Authentication authentication, AuthenticationToken token); + + abstract void authenticationSuccess(String realm, User user); + + } + + static class AuditableTransportRequest extends AuditableRequest { + + private final String action; + private final TransportMessage message; + + AuditableTransportRequest(AuditTrail auditTrail, AuthenticationFailureHandler failureHandler, ThreadContext threadContext, + String action, TransportMessage message) { + super(auditTrail, failureHandler, threadContext); + this.action = action; + this.message = message; + } + + @Override + void authenticationSuccess(String realm, User user) { + auditTrail.authenticationSuccess(realm, user, action, message); + } + + @Override + void realmAuthenticationFailed(AuthenticationToken token, String realm) { + auditTrail.authenticationFailed(realm, token, action, message); + } + + @Override + ElasticsearchSecurityException tamperedRequest() { + auditTrail.tamperedRequest(action, message); + return new ElasticsearchSecurityException("failed to verify signed authentication information"); + } + + @Override + ElasticsearchSecurityException exceptionProcessingRequest(Exception e, @Nullable AuthenticationToken token) { + if (token != null) { + auditTrail.authenticationFailed(token, action, message); + } else { + auditTrail.authenticationFailed(action, message); + } + return failureHandler.exceptionProcessingRequest(message, action, e, threadContext); + } + + @Override + ElasticsearchSecurityException authenticationFailed(AuthenticationToken token) { + auditTrail.authenticationFailed(token, action, message); + return failureHandler.failedAuthentication(message, token, action, threadContext); + } + + @Override + ElasticsearchSecurityException anonymousAccessDenied() { + auditTrail.anonymousAccessDenied(action, message); + return failureHandler.missingToken(message, action, threadContext); + } + + @Override + ElasticsearchSecurityException runAsDenied(Authentication authentication, AuthenticationToken token) { + auditTrail.runAsDenied(authentication, action, message, Role.EMPTY.names()); + return failureHandler.failedAuthentication(message, token, action, threadContext); + } + + @Override + public String toString() { + return "transport request action [" + action + "]"; + } + + } + + static class AuditableRestRequest extends AuditableRequest { + + private final RestRequest request; + + @SuppressWarnings("unchecked") + AuditableRestRequest(AuditTrail auditTrail, AuthenticationFailureHandler failureHandler, ThreadContext threadContext, + RestRequest request) { + super(auditTrail, failureHandler, threadContext); + this.request = request; + } + + @Override + void authenticationSuccess(String realm, User user) { + auditTrail.authenticationSuccess(realm, user, request); + } + + @Override + void realmAuthenticationFailed(AuthenticationToken token, String realm) { + auditTrail.authenticationFailed(realm, token, request); + } + + @Override + ElasticsearchSecurityException tamperedRequest() { + auditTrail.tamperedRequest(request); + return new ElasticsearchSecurityException("rest request attempted to inject a user"); + } + + @Override + ElasticsearchSecurityException exceptionProcessingRequest(Exception e, @Nullable AuthenticationToken token) { + if (token != null) { + auditTrail.authenticationFailed(token, request); + } else { + auditTrail.authenticationFailed(request); + } + return failureHandler.exceptionProcessingRequest(request, e, threadContext); + } + + @Override + ElasticsearchSecurityException authenticationFailed(AuthenticationToken token) { + auditTrail.authenticationFailed(token, request); + return failureHandler.failedAuthentication(request, token, threadContext); + } + + @Override + ElasticsearchSecurityException anonymousAccessDenied() { + auditTrail.anonymousAccessDenied(request); + return failureHandler.missingToken(request, threadContext); + } + + @Override + ElasticsearchSecurityException runAsDenied(Authentication authentication, AuthenticationToken token) { + auditTrail.runAsDenied(authentication, request, Role.EMPTY.names()); + return failureHandler.failedAuthentication(request, token, threadContext); + } + + @Override + public String toString() { + return "rest request uri [" + request.uri() + "]"; + } + } + + public static void addSettings(List> settings) { + settings.add(AuthenticationServiceField.RUN_AS_ENABLED); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/BytesKey.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/BytesKey.java new file mode 100644 index 0000000000000..1534b78899f8b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/BytesKey.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.StringHelper; + +import java.util.Arrays; + +/** + * Simple wrapper around bytes so that it can be used as a cache key. The hashCode is computed + * once upon creation and cached. + */ +public class BytesKey { + + final byte[] bytes; + private final int hashCode; + + public BytesKey(byte[] bytes) { + this.bytes = bytes; + this.hashCode = StringHelper.murmurhash3_x86_32(bytes, 0, bytes.length, StringHelper.GOOD_FAST_HASH_SEED); + } + + @Override + public int hashCode() { + return hashCode; + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other instanceof BytesKey == false) { + return false; + } + + BytesKey otherBytes = (BytesKey) other; + return Arrays.equals(otherBytes.bytes, bytes); + } + + @Override + public String toString() { + return new BytesRef(bytes).toString(); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java new file mode 100644 index 0000000000000..a46d6131c6035 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.DeleteByQueryAction; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.index.reindex.ScrollableHitSource; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.ThreadPool.Names; +import org.elasticsearch.xpack.security.SecurityLifecycleService; + +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException; +import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +/** + * Responsible for cleaning the invalidated tokens from the invalidated tokens index. + */ +final class ExpiredTokenRemover extends AbstractRunnable { + + private final Client client; + private final AtomicBoolean inProgress = new AtomicBoolean(false); + private final Logger logger; + private final TimeValue timeout; + + ExpiredTokenRemover(Settings settings, Client client) { + this.client = client; + this.logger = Loggers.getLogger(getClass(), settings); + this.timeout = TokenService.DELETE_TIMEOUT.get(settings); + } + + @Override + public void doRun() { + SearchRequest searchRequest = new SearchRequest(SecurityLifecycleService.SECURITY_INDEX_NAME); + DeleteByQueryRequest expiredDbq = new DeleteByQueryRequest(searchRequest); + if (timeout != TimeValue.MINUS_ONE) { + expiredDbq.setTimeout(timeout); + searchRequest.source().timeout(timeout); + } + final Instant now = Instant.now(); + searchRequest.source() + .query(QueryBuilders.boolQuery() + .filter(QueryBuilders.termsQuery("doc_type", TokenService.INVALIDATED_TOKEN_DOC_TYPE, "token")) + .filter(QueryBuilders.boolQuery() + .should(QueryBuilders.rangeQuery("expiration_time").lte(now.toEpochMilli())) + .should(QueryBuilders.rangeQuery("creation_time").lte(now.minus(24L, ChronoUnit.HOURS).toEpochMilli())))); + executeAsyncWithOrigin(client, SECURITY_ORIGIN, DeleteByQueryAction.INSTANCE, expiredDbq, + ActionListener.wrap(r -> { + debugDbqResponse(r); + markComplete(); + }, this::onFailure)); + } + + void submit(ThreadPool threadPool) { + if (inProgress.compareAndSet(false, true)) { + threadPool.executor(Names.GENERIC).submit(this); + } + } + + private void debugDbqResponse(BulkByScrollResponse response) { + if (logger.isDebugEnabled()) { + logger.debug("delete by query of tokens finished with [{}] deletions, [{}] bulk failures, [{}] search failures", + response.getDeleted(), response.getBulkFailures().size(), response.getSearchFailures().size()); + for (BulkItemResponse.Failure failure : response.getBulkFailures()) { + logger.debug(new ParameterizedMessage("deletion failed for index [{}], type [{}], id [{}]", + failure.getIndex(), failure.getType(), failure.getId()), failure.getCause()); + } + for (ScrollableHitSource.SearchFailure failure : response.getSearchFailures()) { + logger.debug(new ParameterizedMessage("search failed for index [{}], shard [{}] on node [{}]", + failure.getIndex(), failure.getShardId(), failure.getNodeId()), failure.getReason()); + } + } + } + + boolean isExpirationInProgress() { + return inProgress.get(); + } + + @Override + public void onFailure(Exception e) { + if (isShardNotAvailableException(e)) { + logger.debug("failed to delete expired tokens", e); + } else { + logger.error("failed to delete expired tokens", e); + } + markComplete(); + } + + private void markComplete() { + if (inProgress.compareAndSet(true, false) == false) { + throw new IllegalStateException("in progress was set to false but should have been true!"); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java new file mode 100644 index 0000000000000..017f4a6e04990 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.env.Environment; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; +import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; +import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; +import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.authc.esnative.NativeRealm; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import org.elasticsearch.xpack.security.authc.file.FileRealm; +import org.elasticsearch.xpack.security.authc.ldap.LdapRealm; +import org.elasticsearch.xpack.security.authc.pki.PkiRealm; +import org.elasticsearch.xpack.security.authc.saml.SamlRealm; +import org.elasticsearch.xpack.security.authc.support.RoleMappingFileBootstrapCheck; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * Provides a single entry point into dealing with all standard XPack security {@link Realm realms}. + * This class does not handle extensions. + * @see Realms for the component that manages configured realms (including custom extension realms) + */ +public final class InternalRealms { + + /** + * The list of all internal realm types, excluding {@link ReservedRealm#TYPE}. + */ + private static final Set XPACK_TYPES = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( + NativeRealmSettings.TYPE, FileRealmSettings.TYPE, LdapRealmSettings.AD_TYPE, LdapRealmSettings.LDAP_TYPE, PkiRealmSettings.TYPE, + SamlRealmSettings.TYPE + ))); + + /** + * The list of all standard realm types, which are those provided by x-pack and do not have extensive + * interaction with third party sources + */ + private static final Set STANDARD_TYPES = + Collections.unmodifiableSet(Sets.difference(XPACK_TYPES, Collections.singleton(SamlRealmSettings.TYPE))); + + /** + * Determines whether type is an internal realm-type that is provided by x-pack, + * including the {@link ReservedRealm} + */ + static boolean isXPackRealm(String type) { + if (XPACK_TYPES.contains(type)) { + return true; + } + return ReservedRealm.TYPE.equals(type); + } + + /** + * Determines whether type is an internal realm-type that is provided by x-pack, + * excluding the {@link ReservedRealm} and realms that have extensive interaction with + * third party sources + */ + static boolean isStandardRealm(String type) { + return STANDARD_TYPES.contains(type); + } + + /** + * Creates {@link Realm.Factory factories} for each internal realm type. + * This excludes the {@link ReservedRealm}, as it cannot be created dynamically. + * @return A map from realm-type to Factory + */ + public static Map getFactories(ThreadPool threadPool, ResourceWatcherService resourceWatcherService, + SSLService sslService, NativeUsersStore nativeUsersStore, + NativeRoleMappingStore nativeRoleMappingStore, + SecurityLifecycleService securityLifecycleService) { + + Map map = new HashMap<>(); + map.put(FileRealmSettings.TYPE, config -> new FileRealm(config, resourceWatcherService)); + map.put(NativeRealmSettings.TYPE, config -> { + final NativeRealm nativeRealm = new NativeRealm(config, nativeUsersStore); + securityLifecycleService.addSecurityIndexHealthChangeListener(nativeRealm::onSecurityIndexHealthChange); + return nativeRealm; + }); + map.put(LdapRealmSettings.AD_TYPE, config -> new LdapRealm(LdapRealmSettings.AD_TYPE, config, sslService, + resourceWatcherService, nativeRoleMappingStore, threadPool)); + map.put(LdapRealmSettings.LDAP_TYPE, config -> new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, + sslService, resourceWatcherService, nativeRoleMappingStore, threadPool)); + map.put(PkiRealmSettings.TYPE, config -> new PkiRealm(config, resourceWatcherService, nativeRoleMappingStore)); + map.put(SamlRealmSettings.TYPE, config -> SamlRealm.create(config, sslService, resourceWatcherService, nativeRoleMappingStore)); + return Collections.unmodifiableMap(map); + } + + private InternalRealms() { + } + + public static List getBootstrapChecks(final Settings globalSettings, final Environment env) { + final List checks = new ArrayList<>(); + final Map settingsByRealm = RealmSettings.getRealmSettings(globalSettings); + settingsByRealm.forEach((name, settings) -> { + final RealmConfig realmConfig = new RealmConfig(name, settings, globalSettings, env, null); + switch (realmConfig.type()) { + case LdapRealmSettings.AD_TYPE: + case LdapRealmSettings.LDAP_TYPE: + case PkiRealmSettings.TYPE: + final BootstrapCheck check = RoleMappingFileBootstrapCheck.create(realmConfig); + if (check != null) { + checks.add(check); + } + } + }); + return checks; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java new file mode 100644 index 0000000000000..3831959752382 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java @@ -0,0 +1,284 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.XPackLicenseState.AllowedRealmType; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; + + +/** + * Serves as a realms registry (also responsible for ordering the realms appropriately) + */ +public class Realms extends AbstractComponent implements Iterable { + + private final Environment env; + private final Map factories; + private final XPackLicenseState licenseState; + private final ThreadContext threadContext; + private final ReservedRealm reservedRealm; + + protected List realms; + // a list of realms that are considered standard in that they are provided by x-pack and + // interact with a 3rd party source on a limited basis + List standardRealmsOnly; + // a list of realms that are considered native, that is they only interact with x-pack and no 3rd party auth sources + List nativeRealmsOnly; + + public Realms(Settings settings, Environment env, Map factories, XPackLicenseState licenseState, + ThreadContext threadContext, ReservedRealm reservedRealm) throws Exception { + super(settings); + this.env = env; + this.factories = factories; + this.licenseState = licenseState; + this.threadContext = threadContext; + this.reservedRealm = reservedRealm; + assert factories.get(ReservedRealm.TYPE) == null; + this.realms = initRealms(); + // pre-computing a list of internal only realms allows us to have much cheaper iteration than a custom iterator + // and is also simpler in terms of logic. These lists are small, so the duplication should not be a real issue here + List standardRealms = new ArrayList<>(); + List nativeRealms = new ArrayList<>(); + for (Realm realm : realms) { + // don't add the reserved realm here otherwise we end up with only this realm... + if (InternalRealms.isStandardRealm(realm.type())) { + standardRealms.add(realm); + } + + if (FileRealmSettings.TYPE.equals(realm.type()) || NativeRealmSettings.TYPE.equals(realm.type())) { + nativeRealms.add(realm); + } + } + + for (List realmList : Arrays.asList(standardRealms, nativeRealms)) { + if (realmList.isEmpty()) { + addNativeRealms(realmList); + } + + assert realmList.contains(reservedRealm) == false; + realmList.add(0, reservedRealm); + assert realmList.get(0) == reservedRealm; + } + + this.standardRealmsOnly = Collections.unmodifiableList(standardRealms); + this.nativeRealmsOnly = Collections.unmodifiableList(nativeRealms); + } + + @Override + public Iterator iterator() { + if (licenseState.isSecurityEnabled() == false || licenseState.isAuthAllowed() == false) { + return Collections.emptyIterator(); + } + + AllowedRealmType allowedRealmType = licenseState.allowedRealmType(); + switch (allowedRealmType) { + case ALL: + return realms.iterator(); + case DEFAULT: + return standardRealmsOnly.iterator(); + case NATIVE: + return nativeRealmsOnly.iterator(); + default: + throw new IllegalStateException("authentication should not be enabled"); + } + } + + public Stream stream() { + return StreamSupport.stream(this.spliterator(), false); + } + + public List asList() { + if (licenseState.isSecurityEnabled() == false || licenseState.isAuthAllowed() == false) { + return Collections.emptyList(); + } + + AllowedRealmType allowedRealmType = licenseState.allowedRealmType(); + switch (allowedRealmType) { + case ALL: + return Collections.unmodifiableList(realms); + case DEFAULT: + return Collections.unmodifiableList(standardRealmsOnly); + case NATIVE: + return Collections.unmodifiableList(nativeRealmsOnly); + default: + throw new IllegalStateException("authentication should not be enabled"); + } + } + + public Realm realm(String name) { + for (Realm realm : realms) { + if (name.equals(realm.name())) { + return realm; + } + } + return null; + } + + public Realm.Factory realmFactory(String type) { + return factories.get(type); + } + + protected List initRealms() throws Exception { + Settings realmsSettings = RealmSettings.get(settings); + Set internalTypes = new HashSet<>(); + List realms = new ArrayList<>(); + for (String name : realmsSettings.names()) { + Settings realmSettings = realmsSettings.getAsSettings(name); + String type = realmSettings.get("type"); + if (type == null) { + throw new IllegalArgumentException("missing realm type for [" + name + "] realm"); + } + Realm.Factory factory = factories.get(type); + if (factory == null) { + throw new IllegalArgumentException("unknown realm type [" + type + "] set for realm [" + name + "]"); + } + RealmConfig config = new RealmConfig(name, realmSettings, settings, env, threadContext); + if (!config.enabled()) { + if (logger.isDebugEnabled()) { + logger.debug("realm [{}/{}] is disabled", type, name); + } + continue; + } + if (FileRealmSettings.TYPE.equals(type) || NativeRealmSettings.TYPE.equals(type)) { + // this is an internal realm factory, let's make sure we didn't already registered one + // (there can only be one instance of an internal realm) + if (internalTypes.contains(type)) { + throw new IllegalArgumentException("multiple [" + type + "] realms are configured. [" + type + + "] is an internal realm and therefore there can only be one such realm configured"); + } + internalTypes.add(type); + } + realms.add(factory.create(config)); + } + + if (!realms.isEmpty()) { + Collections.sort(realms); + } else { + // there is no "realms" configuration, add the defaults + addNativeRealms(realms); + } + // always add built in first! + realms.add(0, reservedRealm); + return realms; + } + + public Map usageStats() { + Map realmMap = new HashMap<>(); + for (Realm realm : this) { + if (ReservedRealm.TYPE.equals(realm.type())) { + continue; + } + realmMap.compute(realm.type(), (key, value) -> { + if (value == null) { + Object realmTypeUsage = convertToMapOfLists(realm.usageStats()); + return realmTypeUsage; + } + assert value instanceof Map; + combineMaps((Map) value, realm.usageStats()); + return value; + }); + } + + final AllowedRealmType allowedRealmType = licenseState.allowedRealmType(); + // iterate over the factories so we can add enabled & available info + for (String type : factories.keySet()) { + assert ReservedRealm.TYPE.equals(type) == false; + realmMap.compute(type, (key, value) -> { + if (value == null) { + return MapBuilder.newMapBuilder() + .put("enabled", false) + .put("available", isRealmTypeAvailable(allowedRealmType, type)) + .map(); + } + + assert value instanceof Map; + Map realmTypeUsage = (Map) value; + realmTypeUsage.put("enabled", true); + // the realms iterator returned this type so it must be enabled + assert isRealmTypeAvailable(allowedRealmType, type); + realmTypeUsage.put("available", true); + return value; + }); + } + + return realmMap; + } + + private void addNativeRealms(List realms) throws Exception { + Realm.Factory fileRealm = factories.get(FileRealmSettings.TYPE); + if (fileRealm != null) { + + realms.add(fileRealm.create(new RealmConfig("default_" + FileRealmSettings.TYPE, Settings.EMPTY, + settings, env, threadContext))); + } + Realm.Factory indexRealmFactory = factories.get(NativeRealmSettings.TYPE); + if (indexRealmFactory != null) { + realms.add(indexRealmFactory.create(new RealmConfig("default_" + NativeRealmSettings.TYPE, Settings.EMPTY, + settings, env, threadContext))); + } + } + + private static void combineMaps(Map mapA, Map mapB) { + for (Entry entry : mapB.entrySet()) { + mapA.compute(entry.getKey(), (key, value) -> { + if (value == null) { + return new ArrayList<>(Collections.singletonList(entry.getValue())); + } + + assert value instanceof List; + ((List) value).add(entry.getValue()); + return value; + }); + } + } + + private static Map convertToMapOfLists(Map map) { + Map converted = new HashMap<>(map.size()); + for (Entry entry : map.entrySet()) { + converted.put(entry.getKey(), new ArrayList<>(Collections.singletonList(entry.getValue()))); + } + return converted; + } + + public static boolean isRealmTypeAvailable(AllowedRealmType enabledRealmType, String type) { + switch (enabledRealmType) { + case ALL: + return true; + case NONE: + return false; + case NATIVE: + return FileRealmSettings.TYPE.equals(type) || NativeRealmSettings.TYPE.equals(type); + case DEFAULT: + return InternalRealms.isStandardRealm(type) || ReservedRealm.TYPE.equals(type); + default: + throw new IllegalStateException("unknown enabled realm type [" + enabledRealmType + "]"); + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java new file mode 100644 index 0000000000000..305c6caeba649 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -0,0 +1,1453 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.core.internal.io.IOUtils; +import org.apache.lucene.util.StringHelper; +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest.OpType; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ack.AckedRequest; +import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.iterable.Iterables; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.engine.DocumentMissingException; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.security.ScrollHelper; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.KeyAndTimestamp; +import org.elasticsearch.xpack.core.security.authc.TokenMetaData; +import org.elasticsearch.xpack.security.SecurityLifecycleService; + +import javax.crypto.Cipher; +import javax.crypto.CipherInputStream; +import javax.crypto.CipherOutputStream; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.GCMParameterSpec; +import javax.crypto.spec.PBEKeySpec; +import javax.crypto.spec.SecretKeySpec; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.Closeable; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.security.GeneralSecurityException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.security.spec.InvalidKeySpecException; +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; + +import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException; +import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; +import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +/** + * Service responsible for the creation, validation, and other management of {@link UserToken} + * objects for authentication + */ +public final class TokenService extends AbstractComponent { + + /** + * The parameters below are used to generate the cryptographic key that is used to encrypt the + * values returned by this service. These parameters are based off of the + * OWASP Password Storage + * Cheat Sheet and the + * NIST Digital Identity Guidelines + */ + private static final int ITERATIONS = 100000; + private static final String KDF_ALGORITHM = "PBKDF2withHMACSHA512"; + private static final int SALT_BYTES = 32; + private static final int KEY_BYTES = 64; + private static final int IV_BYTES = 12; + private static final int VERSION_BYTES = 4; + private static final String ENCRYPTION_CIPHER = "AES/GCM/NoPadding"; + private static final String EXPIRED_TOKEN_WWW_AUTH_VALUE = "Bearer realm=\"" + XPackField.SECURITY + + "\", error=\"invalid_token\", error_description=\"The access token expired\""; + private static final String MALFORMED_TOKEN_WWW_AUTH_VALUE = "Bearer realm=\"" + XPackField.SECURITY + + "\", error=\"invalid_token\", error_description=\"The access token is malformed\""; + private static final String TYPE = "doc"; + + public static final String THREAD_POOL_NAME = XPackField.SECURITY + "-token-key"; + public static final Setting TOKEN_EXPIRATION = Setting.timeSetting("xpack.security.authc.token.timeout", + TimeValue.timeValueMinutes(20L), TimeValue.timeValueSeconds(1L), Property.NodeScope); + public static final Setting DELETE_INTERVAL = Setting.timeSetting("xpack.security.authc.token.delete.interval", + TimeValue.timeValueMinutes(30L), Property.NodeScope); + public static final Setting DELETE_TIMEOUT = Setting.timeSetting("xpack.security.authc.token.delete.timeout", + TimeValue.MINUS_ONE, Property.NodeScope); + + static final String INVALIDATED_TOKEN_DOC_TYPE = "invalidated-token"; + static final int MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1; + private static final int MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * MINIMUM_BYTES) / 3)).intValue(); + + private final SecureRandom secureRandom = new SecureRandom(); + private final ClusterService clusterService; + private final Clock clock; + private final TimeValue expirationDelay; + private final TimeValue deleteInterval; + private final Client client; + private final SecurityLifecycleService lifecycleService; + private final ExpiredTokenRemover expiredTokenRemover; + private final boolean enabled; + private volatile TokenKeys keyCache; + private volatile long lastExpirationRunMs; + private final AtomicLong createdTimeStamps = new AtomicLong(-1); + + /** + * Creates a new token service + * + * @param settings the node settings + * @param clock the clock that will be used for comparing timestamps + * @param client the client to use when checking for revocations + */ + public TokenService(Settings settings, Clock clock, Client client, + SecurityLifecycleService lifecycleService, ClusterService clusterService) throws GeneralSecurityException { + super(settings); + byte[] saltArr = new byte[SALT_BYTES]; + secureRandom.nextBytes(saltArr); + + final SecureString tokenPassphrase = generateTokenKey(); + this.clock = clock.withZone(ZoneOffset.UTC); + this.expirationDelay = TOKEN_EXPIRATION.get(settings); + this.client = client; + this.lifecycleService = lifecycleService; + this.lastExpirationRunMs = client.threadPool().relativeTimeInMillis(); + this.deleteInterval = DELETE_INTERVAL.get(settings); + this.enabled = isTokenServiceEnabled(settings); + this.expiredTokenRemover = new ExpiredTokenRemover(settings, client); + ensureEncryptionCiphersSupported(); + KeyAndCache keyAndCache = new KeyAndCache(new KeyAndTimestamp(tokenPassphrase, createdTimeStamps.incrementAndGet()), + new BytesKey(saltArr)); + keyCache = new TokenKeys(Collections.singletonMap(keyAndCache.getKeyHash(), keyAndCache), keyAndCache.getKeyHash()); + this.clusterService = clusterService; + initialize(clusterService); + getTokenMetaData(); + } + + public static Boolean isTokenServiceEnabled(Settings settings) { + return XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.get(settings); + } + + /** + * Create a token based on the provided authentication and metadata. + * The created token will be stored in the security index. + */ + public void createUserToken(Authentication authentication, Authentication originatingClientAuth, + ActionListener> listener, Map metadata) throws IOException { + ensureEnabled(); + if (authentication == null) { + listener.onFailure(new IllegalArgumentException("authentication must be provided")); + } else if (originatingClientAuth == null) { + listener.onFailure(new IllegalArgumentException("originating client authentication must be provided")); + } else { + final Instant created = clock.instant(); + final Instant expiration = getExpirationTime(created); + final Version version = clusterService.state().nodes().getMinNodeVersion(); + final Authentication matchingVersionAuth = version.equals(authentication.getVersion()) ? authentication : + new Authentication(authentication.getUser(), authentication.getAuthenticatedBy(), authentication.getLookedUpBy(), + version); + final UserToken userToken = new UserToken(version, matchingVersionAuth, expiration, metadata); + final String refreshToken = UUIDs.randomBase64UUID(); + + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + builder.startObject(); + builder.field("doc_type", "token"); + builder.field("creation_time", created.toEpochMilli()); + builder.startObject("refresh_token") + .field("token", refreshToken) + .field("invalidated", false) + .field("refreshed", false) + .startObject("client") + .field("type", "unassociated_client") + .field("user", originatingClientAuth.getUser().principal()) + .field("realm", originatingClientAuth.getAuthenticatedBy().getName()) + .endObject() + .endObject(); + builder.startObject("access_token") + .field("invalidated", false) + .field("user_token", userToken) + .field("realm", authentication.getAuthenticatedBy().getName()) + .endObject(); + builder.endObject(); + IndexRequest request = + client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, getTokenDocumentId(userToken)) + .setOpType(OpType.CREATE) + .setSource(builder) + .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) + .request(); + lifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client, SECURITY_ORIGIN, IndexAction.INSTANCE, request, + ActionListener.wrap(indexResponse -> listener.onResponse(new Tuple<>(userToken, refreshToken)), + listener::onFailure)) + ); + } + } + } + + /** + * Looks in the context to see if the request provided a header with a user token and if so the + * token is validated, which includes authenticated decryption and verification that the token + * has not been revoked or is expired. + */ + void getAndValidateToken(ThreadContext ctx, ActionListener listener) { + if (enabled) { + final String token = getFromHeader(ctx); + if (token == null) { + listener.onResponse(null); + } else { + try { + decodeAndValidateToken(token, ActionListener.wrap(listener::onResponse, e -> { + if (e instanceof IOException) { + // could happen with a token that is not ours + logger.debug("invalid token", e); + listener.onResponse(null); + } else { + listener.onFailure(e); + } + })); + } catch (IOException e) { + // could happen with a token that is not ours + logger.debug("invalid token", e); + listener.onResponse(null); + } + } + } else { + listener.onResponse(null); + } + } + + /** + * Reads the authentication and metadata from the given token. + * This method does not validate whether the token is expired or not. + */ + public void getAuthenticationAndMetaData(String token, ActionListener>> listener) + throws IOException { + decodeToken(token, ActionListener.wrap( + userToken -> { + if (userToken == null) { + listener.onFailure(new ElasticsearchSecurityException("supplied token is not valid")); + } else { + listener.onResponse(new Tuple<>(userToken.getAuthentication(), userToken.getMetadata())); + } + }, + listener::onFailure + )); + } + + private void decodeAndValidateToken(String token, ActionListener listener) throws IOException { + decodeToken(token, ActionListener.wrap(userToken -> { + if (userToken != null) { + Instant currentTime = clock.instant(); + if (currentTime.isAfter(userToken.getExpirationTime())) { + // token expired + listener.onFailure(expiredTokenException()); + } else { + checkIfTokenIsRevoked(userToken, listener); + } + } else { + listener.onResponse(null); + } + }, listener::onFailure)); + } + + /* + * Asynchronously decodes the string representation of a {@link UserToken}. The process for + * this is asynchronous as we may need to compute a key, which can be computationally expensive + * so this should not block the current thread, which is typically a network thread. A second + * reason for being asynchronous is that we can restrain the amount of resources consumed by + * the key computation to a single thread. + */ + void decodeToken(String token, ActionListener listener) throws IOException { + // We intentionally do not use try-with resources since we need to keep the stream open if we need to compute a key! + byte[] bytes = token.getBytes(StandardCharsets.UTF_8); + StreamInput in = new InputStreamStreamInput(Base64.getDecoder().wrap(new ByteArrayInputStream(bytes)), bytes.length); + if (in.available() < MINIMUM_BASE64_BYTES) { + logger.debug("invalid token"); + listener.onResponse(null); + } else { + // the token exists and the value is at least as long as we'd expect + final Version version = Version.readVersion(in); + in.setVersion(version); + final BytesKey decodedSalt = new BytesKey(in.readByteArray()); + final BytesKey passphraseHash = new BytesKey(in.readByteArray()); + KeyAndCache keyAndCache = keyCache.get(passphraseHash); + if (keyAndCache != null) { + getKeyAsync(decodedSalt, keyAndCache, ActionListener.wrap(decodeKey -> { + try { + final byte[] iv = in.readByteArray(); + final Cipher cipher = getDecryptionCipher(iv, decodeKey, version, decodedSalt); + if (version.onOrAfter(Version.V_6_2_0)) { + // we only have the id and need to get the token from the doc! + decryptTokenId(in, cipher, version, ActionListener.wrap(tokenId -> + lifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + final GetRequest getRequest = + client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, + getTokenDocumentId(tokenId)).request(); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, getRequest, + ActionListener.wrap(response -> { + if (response.isExists()) { + Map accessTokenSource = + (Map) response.getSource().get("access_token"); + if (accessTokenSource == null) { + listener.onFailure(new IllegalStateException("token document is missing " + + "the access_token field")); + } else if (accessTokenSource.containsKey("user_token") == false) { + listener.onFailure(new IllegalStateException("token document is missing " + + "the user_token field")); + } else { + Map userTokenSource = + (Map) accessTokenSource.get("user_token"); + listener.onResponse(UserToken.fromSourceMap(userTokenSource)); + } + } else { + listener.onFailure( + new IllegalStateException("token document is missing and must be present")); + } + }, e -> { + // if the index or the shard is not there / available we assume that + // the token is not valid + if (isShardNotAvailableException(e)) { + logger.warn("failed to get token [{}] since index is not available", tokenId); + listener.onResponse(null); + } else { + logger.error(new ParameterizedMessage("failed to get token [{}]", tokenId), e); + listener.onFailure(e); + } + }), client::get); + }), listener::onFailure)); + } else { + decryptToken(in, cipher, version, listener); + } + } catch (GeneralSecurityException e) { + // could happen with a token that is not ours + logger.warn("invalid token", e); + listener.onResponse(null); + } finally { + in.close(); + } + }, e -> { + IOUtils.closeWhileHandlingException(in); + listener.onFailure(e); + })); + } else { + IOUtils.closeWhileHandlingException(in); + logger.debug("invalid key {} key: {}", passphraseHash, keyCache.cache.keySet()); + listener.onResponse(null); + } + } + } + + private void getKeyAsync(BytesKey decodedSalt, KeyAndCache keyAndCache, ActionListener listener) { + final SecretKey decodeKey = keyAndCache.getKey(decodedSalt); + if (decodeKey != null) { + listener.onResponse(decodeKey); + } else { + /* As a measure of protected against DOS, we can pass requests requiring a key + * computation off to a single thread executor. For normal usage, the initial + * request(s) that require a key computation will be delayed and there will be + * some additional latency. + */ + client.threadPool().executor(THREAD_POOL_NAME) + .submit(new KeyComputingRunnable(decodedSalt, listener, keyAndCache)); + } + } + + private static void decryptToken(StreamInput in, Cipher cipher, Version version, ActionListener listener) throws + IOException { + try (CipherInputStream cis = new CipherInputStream(in, cipher); StreamInput decryptedInput = new InputStreamStreamInput(cis)) { + decryptedInput.setVersion(version); + listener.onResponse(new UserToken(decryptedInput)); + } + } + + private static void decryptTokenId(StreamInput in, Cipher cipher, Version version, ActionListener listener) throws IOException { + try (CipherInputStream cis = new CipherInputStream(in, cipher); StreamInput decryptedInput = new InputStreamStreamInput(cis)) { + decryptedInput.setVersion(version); + listener.onResponse(decryptedInput.readString()); + } + } + + /** + * This method performs the steps necessary to invalidate a token so that it may no longer be + * used. The process of invalidation involves a step that is needed for backwards compatibility + * with versions prior to 6.2.0; this step records an entry to indicate that a token with a + * given id has been expired. The second step is to record the invalidation for tokens that + * have been created on versions on or after 6.2; this step involves performing an update to + * the token document and setting the invalidated field to true + */ + public void invalidateAccessToken(String tokenString, ActionListener listener) { + ensureEnabled(); + if (Strings.isNullOrEmpty(tokenString)) { + listener.onFailure(new IllegalArgumentException("token must be provided")); + } else { + maybeStartTokenRemover(); + try { + decodeToken(tokenString, ActionListener.wrap(userToken -> { + if (userToken == null) { + listener.onFailure(malformedTokenException()); + } else { + final long expirationEpochMilli = getExpirationTime().toEpochMilli(); + indexBwcInvalidation(userToken, listener, new AtomicInteger(0), expirationEpochMilli); + } + }, listener::onFailure)); + } catch (IOException e) { + logger.error("received a malformed token as part of a invalidation request", e); + listener.onFailure(malformedTokenException()); + } + } + } + + /** + * This method performs the steps necessary to invalidate a token so that it may no longer be used. + * + * @see #invalidateAccessToken(String, ActionListener) + */ + public void invalidateAccessToken(UserToken userToken, ActionListener listener) { + ensureEnabled(); + if (userToken == null) { + listener.onFailure(new IllegalArgumentException("token must be provided")); + } else { + maybeStartTokenRemover(); + final long expirationEpochMilli = getExpirationTime().toEpochMilli(); + indexBwcInvalidation(userToken, listener, new AtomicInteger(0), expirationEpochMilli); + } + } + + public void invalidateRefreshToken(String refreshToken, ActionListener listener) { + ensureEnabled(); + if (Strings.isNullOrEmpty(refreshToken)) { + listener.onFailure(new IllegalArgumentException("refresh token must be provided")); + } else { + maybeStartTokenRemover(); + findTokenFromRefreshToken(refreshToken, + ActionListener.wrap(tuple -> { + final String docId = tuple.v1().getHits().getAt(0).getId(); + final long docVersion = tuple.v1().getHits().getAt(0).getVersion(); + indexInvalidation(docId, Version.CURRENT, listener, tuple.v2(), "refresh_token", docVersion); + }, listener::onFailure), new AtomicInteger(0)); + } + } + + /** + * Performs the actual bwc invalidation of a token and then kicks off the new invalidation method + * + * @param userToken the token to invalidate + * @param listener the listener to notify upon completion + * @param attemptCount the number of attempts to invalidate that have already been tried + * @param expirationEpochMilli the expiration time as milliseconds since the epoch + */ + private void indexBwcInvalidation(UserToken userToken, ActionListener listener, AtomicInteger attemptCount, + long expirationEpochMilli) { + if (attemptCount.get() > 5) { + listener.onFailure(invalidGrantException("failed to invalidate token")); + } else { + final String invalidatedTokenId = getInvalidatedTokenDocumentId(userToken); + IndexRequest indexRequest = client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, invalidatedTokenId) + .setOpType(OpType.CREATE) + .setSource("doc_type", INVALIDATED_TOKEN_DOC_TYPE, "expiration_time", expirationEpochMilli) + .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) + .request(); + final String tokenDocId = getTokenDocumentId(userToken); + final Version version = userToken.getVersion(); + lifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, indexRequest, + ActionListener.wrap(indexResponse -> { + ActionListener wrappedListener = + ActionListener.wrap(ignore -> listener.onResponse(true), listener::onFailure); + indexInvalidation(tokenDocId, version, wrappedListener, attemptCount, "access_token", 1L); + }, e -> { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof VersionConflictEngineException) { + // expected since something else could have invalidated + ActionListener wrappedListener = + ActionListener.wrap(ignore -> listener.onResponse(false), listener::onFailure); + indexInvalidation(tokenDocId, version, wrappedListener, attemptCount, "access_token", 1L); + } else if (isShardNotAvailableException(e)) { + attemptCount.incrementAndGet(); + indexBwcInvalidation(userToken, listener, attemptCount, expirationEpochMilli); + } else { + listener.onFailure(e); + } + }), client::index)); + } + } + + /** + * Performs the actual invalidation of a token + * + * @param tokenDocId the id of the token doc to invalidate + * @param listener the listener to notify upon completion + * @param attemptCount the number of attempts to invalidate that have already been tried + * @param srcPrefix the prefix to use when constructing the doc to update + * @param documentVersion the expected version of the document we will update + */ + private void indexInvalidation(String tokenDocId, Version version, ActionListener listener, AtomicInteger attemptCount, + String srcPrefix, long documentVersion) { + if (attemptCount.get() > 5) { + listener.onFailure(invalidGrantException("failed to invalidate token")); + } else { + UpdateRequest request = client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, tokenDocId) + .setDoc(srcPrefix, Collections.singletonMap("invalidated", true)) + .setVersion(documentVersion) + .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) + .request(); + lifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, + ActionListener.wrap(updateResponse -> { + if (updateResponse.getGetResult() != null + && updateResponse.getGetResult().sourceAsMap().containsKey(srcPrefix) + && ((Map) updateResponse.getGetResult().sourceAsMap().get(srcPrefix)) + .containsKey("invalidated")) { + final boolean prevInvalidated = (boolean) + ((Map) updateResponse.getGetResult().sourceAsMap().get(srcPrefix)) + .get("invalidated"); + listener.onResponse(prevInvalidated == false); + } else { + listener.onResponse(true); + } + }, e -> { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof DocumentMissingException) { + if (version.onOrAfter(Version.V_6_2_0)) { + // the document should always be there! + listener.onFailure(e); + } else { + listener.onResponse(false); + } + } else if (cause instanceof VersionConflictEngineException + || isShardNotAvailableException(cause)) { + attemptCount.incrementAndGet(); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, tokenDocId).request(), + ActionListener.wrap(getResult -> { + if (getResult.isExists()) { + Map source = getResult.getSource(); + Map accessTokenSource = + (Map) source.get("access_token"); + if (accessTokenSource == null) { + listener.onFailure(new IllegalArgumentException("token document is " + + "missing access_token field")); + } else { + Boolean invalidated = (Boolean) accessTokenSource.get("invalidated"); + if (invalidated == null) { + listener.onFailure(new IllegalStateException( + "token document missing invalidated value")); + } else if (invalidated) { + listener.onResponse(false); + } else { + indexInvalidation(tokenDocId, version, listener, attemptCount, srcPrefix, + getResult.getVersion()); + } + } + } else if (version.onOrAfter(Version.V_6_2_0)) { + logger.warn("could not find token document [{}] but there should " + + "be one as token has version [{}]", tokenDocId, version); + listener.onFailure(invalidGrantException("could not invalidate the token")); + } else { + listener.onResponse(false); + } + }, + e1 -> { + if (isShardNotAvailableException(e1)) { + // don't increment count; call again + indexInvalidation(tokenDocId, version, listener, attemptCount, srcPrefix, + documentVersion); + } else { + listener.onFailure(e1); + } + }), client::get); + } else { + listener.onFailure(e); + } + }), client::update)); + } + } + + /** + * Uses the refresh token to refresh its associated token and returns the new token with an + * updated expiration date to the listener + */ + public void refreshToken(String refreshToken, ActionListener> listener) { + ensureEnabled(); + findTokenFromRefreshToken(refreshToken, + ActionListener.wrap(tuple -> { + final Authentication userAuth = Authentication.readFromContext(client.threadPool().getThreadContext()); + final String tokenDocId = tuple.v1().getHits().getHits()[0].getId(); + innerRefresh(tokenDocId, userAuth, listener, tuple.v2()); + }, listener::onFailure), + new AtomicInteger(0)); + } + + private void findTokenFromRefreshToken(String refreshToken, ActionListener> listener, + AtomicInteger attemptCount) { + if (attemptCount.get() > 5) { + listener.onFailure(invalidGrantException("could not refresh the requested token")); + } else { + SearchRequest request = client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("doc_type", "token")) + .filter(QueryBuilders.termQuery("refresh_token.token", refreshToken))) + .setVersion(true) + .request(); + + lifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, + ActionListener.wrap(searchResponse -> { + if (searchResponse.isTimedOut()) { + attemptCount.incrementAndGet(); + findTokenFromRefreshToken(refreshToken, listener, attemptCount); + } else if (searchResponse.getHits().getHits().length < 1) { + logger.info("could not find token document with refresh_token [{}]", refreshToken); + listener.onFailure(invalidGrantException("could not refresh the requested token")); + } else if (searchResponse.getHits().getHits().length > 1) { + listener.onFailure(new IllegalStateException("multiple tokens share the same refresh token")); + } else { + listener.onResponse(new Tuple<>(searchResponse, attemptCount)); + } + }, e -> { + if (isShardNotAvailableException(e)) { + logger.debug("failed to search for token document, retrying", e); + attemptCount.incrementAndGet(); + findTokenFromRefreshToken(refreshToken, listener, attemptCount); + } else { + listener.onFailure(e); + } + }), + client::search)); + } + } + + /** + * Performs the actual refresh of the token with retries in case of certain exceptions that + * may be recoverable. The refresh involves retrieval of the token document and then + * updating the token document to indicate that the document has been refreshed. + */ + private void innerRefresh(String tokenDocId, Authentication userAuth, ActionListener> listener, + AtomicInteger attemptCount) { + if (attemptCount.getAndIncrement() > 5) { + listener.onFailure(invalidGrantException("could not refresh the requested token")); + } else { + GetRequest getRequest = client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, tokenDocId).request(); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, getRequest, + ActionListener.wrap(response -> { + if (response.isExists()) { + final Map source = response.getSource(); + final Optional invalidSource = checkTokenDocForRefresh(source, userAuth); + + if (invalidSource.isPresent()) { + listener.onFailure(invalidSource.get()); + } else { + final Map userTokenSource = (Map) + ((Map) source.get("access_token")).get("user_token"); + final String authString = (String) userTokenSource.get("authentication"); + final Integer version = (Integer) userTokenSource.get("version"); + final Map metadata = (Map) userTokenSource.get("metadata"); + + Version authVersion = Version.fromId(version); + try (StreamInput in = StreamInput.wrap(Base64.getDecoder().decode(authString))) { + in.setVersion(authVersion); + Authentication authentication = new Authentication(in); + UpdateRequest updateRequest = + client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, tokenDocId) + .setVersion(response.getVersion()) + .setDoc("refresh_token", Collections.singletonMap("refreshed", true)) + .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) + .request(); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, updateRequest, + ActionListener.wrap( + updateResponse -> createUserToken(authentication, userAuth, listener, metadata), + e -> { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof VersionConflictEngineException || + isShardNotAvailableException(e)) { + innerRefresh(tokenDocId, userAuth, + listener, attemptCount); + } else { + listener.onFailure(e); + } + }), + client::update); + } + } + } else { + logger.info("could not find token document [{}] for refresh", tokenDocId); + listener.onFailure(invalidGrantException("could not refresh the requested token")); + } + }, e -> { + if (isShardNotAvailableException(e)) { + innerRefresh(tokenDocId, userAuth, listener, attemptCount); + } else { + listener.onFailure(e); + } + }), client::get); + } + } + + /** + * Performs checks on the retrieved source and returns an {@link Optional} with the exception + * if there is an issue + */ + private Optional checkTokenDocForRefresh(Map source, Authentication userAuth) { + final Map refreshTokenSrc = (Map) source.get("refresh_token"); + final Map accessTokenSrc = (Map) source.get("access_token"); + if (refreshTokenSrc == null || refreshTokenSrc.isEmpty()) { + return Optional.of(invalidGrantException("token document is missing the refresh_token object")); + } else if (accessTokenSrc == null || accessTokenSrc.isEmpty()) { + return Optional.of(invalidGrantException("token document is missing the access_token object")); + } else { + final Boolean refreshed = (Boolean) refreshTokenSrc.get("refreshed"); + final Boolean invalidated = (Boolean) refreshTokenSrc.get("invalidated"); + final Long creationEpochMilli = (Long) source.get("creation_time"); + final Instant creationTime = creationEpochMilli == null ? null : Instant.ofEpochMilli(creationEpochMilli); + final Map userTokenSrc = (Map) accessTokenSrc.get("user_token"); + if (refreshed == null) { + return Optional.of(invalidGrantException("token document is missing refreshed value")); + } else if (invalidated == null) { + return Optional.of(invalidGrantException("token document is missing invalidated value")); + } else if (creationEpochMilli == null) { + return Optional.of(invalidGrantException("token document is missing creation time value")); + } else if (refreshed) { + return Optional.of(invalidGrantException("token has already been refreshed")); + } else if (invalidated) { + return Optional.of(invalidGrantException("token has been invalidated")); + } else if (clock.instant().isAfter(creationTime.plus(24L, ChronoUnit.HOURS))) { + return Optional.of(invalidGrantException("refresh token is expired")); + } else if (userTokenSrc == null || userTokenSrc.isEmpty()) { + return Optional.of(invalidGrantException("token document is missing the user token info")); + } else if (userTokenSrc.get("authentication") == null) { + return Optional.of(invalidGrantException("token is missing authentication info")); + } else if (userTokenSrc.get("version") == null) { + return Optional.of(invalidGrantException("token is missing version value")); + } else if (userTokenSrc.get("metadata") == null) { + return Optional.of(invalidGrantException("token is missing metadata")); + } else { + return checkClient(refreshTokenSrc, userAuth); + } + } + } + + private Optional checkClient(Map refreshTokenSource, Authentication userAuth) { + Map clientInfo = (Map) refreshTokenSource.get("client"); + if (clientInfo == null) { + return Optional.of(invalidGrantException("token is missing client information")); + } else if (userAuth.getUser().principal().equals(clientInfo.get("user")) == false) { + return Optional.of(invalidGrantException("tokens must be refreshed by the creating client")); + } else if (userAuth.getAuthenticatedBy().getName().equals(clientInfo.get("realm")) == false) { + return Optional.of(invalidGrantException("tokens must be refreshed by the creating client")); + } else { + return Optional.empty(); + } + } + + /** + * Find all stored refresh and access tokens that have not been invalidated or expired, and were issued against + * the specified realm. + */ + public void findActiveTokensForRealm(String realmName, ActionListener>> listener) { + ensureEnabled(); + + if (Strings.isNullOrEmpty(realmName)) { + listener.onFailure(new IllegalArgumentException("Realm name is required")); + return; + } + + final Instant now = clock.instant(); + final BoolQueryBuilder boolQuery = QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("doc_type", "token")) + .filter(QueryBuilders.termQuery("access_token.realm", realmName)) + .filter(QueryBuilders.boolQuery() + .should(QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery("access_token.invalidated", false)) + .must(QueryBuilders.rangeQuery("access_token.user_token.expiration_time").gte(now.toEpochMilli())) + ) + .should(QueryBuilders.termQuery("refresh_token.invalidated", false)) + ); + + final SearchRequest request = client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME) + .setScroll(TimeValue.timeValueSeconds(10L)) + .setQuery(boolQuery) + .setVersion(false) + .setSize(1000) + .setFetchSource(true) + .request(); + + final Supplier supplier = client.threadPool().getThreadContext().newRestorableContext(false); + lifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + ScrollHelper.fetchAllByEntity(client, request, new ContextPreservingActionListener<>(supplier, listener), this::parseHit)); + } + + private Tuple parseHit(SearchHit hit) { + final Map source = hit.getSourceAsMap(); + if (source == null) { + throw new IllegalStateException("token document did not have source but source should have been fetched"); + } + + try { + return parseTokensFromDocument(source); + } catch (IOException e) { + throw invalidGrantException("cannot read token from document"); + } + } + + /** + * @return A {@link Tuple} of access-token and refresh-token-id + */ + private Tuple parseTokensFromDocument(Map source) throws IOException { + final String refreshToken = (String) ((Map) source.get("refresh_token")).get("token"); + + final Map userTokenSource = (Map) + ((Map) source.get("access_token")).get("user_token"); + final String id = (String) userTokenSource.get("id"); + final Integer version = (Integer) userTokenSource.get("version"); + final String authString = (String) userTokenSource.get("authentication"); + final Long expiration = (Long) userTokenSource.get("expiration_time"); + final Map metadata = (Map) userTokenSource.get("metadata"); + + Version authVersion = Version.fromId(version); + try (StreamInput in = StreamInput.wrap(Base64.getDecoder().decode(authString))) { + in.setVersion(authVersion); + Authentication authentication = new Authentication(in); + return new Tuple<>(new UserToken(id, Version.fromId(version), authentication, Instant.ofEpochMilli(expiration), metadata), + refreshToken); + } + } + + private static String getInvalidatedTokenDocumentId(UserToken userToken) { + return getInvalidatedTokenDocumentId(userToken.getId()); + } + + private static String getInvalidatedTokenDocumentId(String id) { + return INVALIDATED_TOKEN_DOC_TYPE + "_" + id; + } + + private static String getTokenDocumentId(UserToken userToken) { + return getTokenDocumentId(userToken.getId()); + } + + private static String getTokenDocumentId(String id) { + return "token_" + id; + } + + private void ensureEnabled() { + if (enabled == false) { + throw new IllegalStateException("tokens are not enabled"); + } + } + + /** + * Checks if the token has been stored as a revoked token to ensure we do not allow tokens that + * have been explicitly cleared. + */ + private void checkIfTokenIsRevoked(UserToken userToken, ActionListener listener) { + if (lifecycleService.isSecurityIndexExisting() == false) { + // index doesn't exist so the token is considered valid. + listener.onResponse(userToken); + } else { + lifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + MultiGetRequest mGetRequest = client.prepareMultiGet() + .add(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, getInvalidatedTokenDocumentId(userToken)) + .add(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, getTokenDocumentId(userToken)) + .request(); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + mGetRequest, + new ActionListener() { + + @Override + public void onResponse(MultiGetResponse response) { + MultiGetItemResponse[] itemResponse = response.getResponses(); + if (itemResponse[0].isFailed()) { + onFailure(itemResponse[0].getFailure().getFailure()); + } else if (itemResponse[0].getResponse().isExists()) { + listener.onFailure(expiredTokenException()); + } else if (itemResponse[1].isFailed()) { + onFailure(itemResponse[1].getFailure().getFailure()); + } else if (itemResponse[1].getResponse().isExists()) { + Map source = itemResponse[1].getResponse().getSource(); + Map accessTokenSource = (Map) source.get("access_token"); + if (accessTokenSource == null) { + listener.onFailure(new IllegalStateException("token document is missing access_token field")); + } else { + Boolean invalidated = (Boolean) accessTokenSource.get("invalidated"); + if (invalidated == null) { + listener.onFailure(new IllegalStateException("token document is missing invalidated field")); + } else if (invalidated) { + listener.onFailure(expiredTokenException()); + } else { + listener.onResponse(userToken); + } + } + } else if (userToken.getVersion().onOrAfter(Version.V_6_2_0)) { + listener.onFailure(new IllegalStateException("token document is missing and must be present")); + } else { + listener.onResponse(userToken); + } + } + + @Override + public void onFailure(Exception e) { + // if the index or the shard is not there / available we assume that + // the token is not valid + if (isShardNotAvailableException(e)) { + logger.warn("failed to get token [{}] since index is not available", userToken.getId()); + listener.onResponse(null); + } else { + logger.error(new ParameterizedMessage("failed to get token [{}]", userToken.getId()), e); + listener.onFailure(e); + } + } + }, client::multiGet); + }); + } + } + + + public TimeValue getExpirationDelay() { + return expirationDelay; + } + + private Instant getExpirationTime() { + return getExpirationTime(clock.instant()); + } + + private Instant getExpirationTime(Instant now) { + return now.plusSeconds(expirationDelay.getSeconds()); + } + + private void maybeStartTokenRemover() { + if (lifecycleService.isSecurityIndexAvailable()) { + if (client.threadPool().relativeTimeInMillis() - lastExpirationRunMs > deleteInterval.getMillis()) { + expiredTokenRemover.submit(client.threadPool()); + lastExpirationRunMs = client.threadPool().relativeTimeInMillis(); + } + } + } + + /** + * Gets the token from the Authorization header if the header begins with + * Bearer + */ + private String getFromHeader(ThreadContext threadContext) { + String header = threadContext.getHeader("Authorization"); + if (Strings.hasLength(header) && header.startsWith("Bearer ") + && header.length() > "Bearer ".length()) { + return header.substring("Bearer ".length()); + } + return null; + } + + /** + * Serializes a token to a String containing an encrypted representation of the token + */ + public String getUserTokenString(UserToken userToken) throws IOException, GeneralSecurityException { + // we know that the minimum length is larger than the default of the ByteArrayOutputStream so set the size to this explicitly + try (ByteArrayOutputStream os = new ByteArrayOutputStream(MINIMUM_BASE64_BYTES); + OutputStream base64 = Base64.getEncoder().wrap(os); + StreamOutput out = new OutputStreamStreamOutput(base64)) { + out.setVersion(userToken.getVersion()); + KeyAndCache keyAndCache = keyCache.activeKeyCache; + Version.writeVersion(userToken.getVersion(), out); + out.writeByteArray(keyAndCache.getSalt().bytes); + out.writeByteArray(keyAndCache.getKeyHash().bytes); + final byte[] initializationVector = getNewInitializationVector(); + out.writeByteArray(initializationVector); + try (CipherOutputStream encryptedOutput = + new CipherOutputStream(out, getEncryptionCipher(initializationVector, keyAndCache, userToken.getVersion())); + StreamOutput encryptedStreamOutput = new OutputStreamStreamOutput(encryptedOutput)) { + encryptedStreamOutput.setVersion(userToken.getVersion()); + if (userToken.getVersion().onOrAfter(Version.V_6_2_0)) { + encryptedStreamOutput.writeString(userToken.getId()); + } else { + userToken.writeTo(encryptedStreamOutput); + } + encryptedStreamOutput.close(); + return new String(os.toByteArray(), StandardCharsets.UTF_8); + } + } + } + + private void ensureEncryptionCiphersSupported() throws NoSuchPaddingException, NoSuchAlgorithmException { + Cipher.getInstance(ENCRYPTION_CIPHER); + SecretKeyFactory.getInstance(KDF_ALGORITHM); + } + + private Cipher getEncryptionCipher(byte[] iv, KeyAndCache keyAndCache, Version version) throws GeneralSecurityException { + Cipher cipher = Cipher.getInstance(ENCRYPTION_CIPHER); + BytesKey salt = keyAndCache.getSalt(); + try { + cipher.init(Cipher.ENCRYPT_MODE, keyAndCache.getOrComputeKey(salt), new GCMParameterSpec(128, iv), secureRandom); + } catch (ExecutionException e) { + throw new ElasticsearchSecurityException("Failed to compute secret key for active salt", e); + } + cipher.updateAAD(ByteBuffer.allocate(4).putInt(version.id).array()); + cipher.updateAAD(salt.bytes); + return cipher; + } + + private Cipher getDecryptionCipher(byte[] iv, SecretKey key, Version version, + BytesKey salt) throws GeneralSecurityException { + Cipher cipher = Cipher.getInstance(ENCRYPTION_CIPHER); + cipher.init(Cipher.DECRYPT_MODE, key, new GCMParameterSpec(128, iv), secureRandom); + cipher.updateAAD(ByteBuffer.allocate(4).putInt(version.id).array()); + cipher.updateAAD(salt.bytes); + return cipher; + } + + private byte[] getNewInitializationVector() { + final byte[] initializationVector = new byte[IV_BYTES]; + secureRandom.nextBytes(initializationVector); + return initializationVector; + } + + /** + * Generates a secret key based off of the provided password and salt. + * This method is computationally expensive. + */ + static SecretKey computeSecretKey(char[] rawPassword, byte[] salt) + throws NoSuchAlgorithmException, InvalidKeySpecException { + SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance(KDF_ALGORITHM); + PBEKeySpec keySpec = new PBEKeySpec(rawPassword, salt, ITERATIONS, 128); + SecretKey tmp = secretKeyFactory.generateSecret(keySpec); + return new SecretKeySpec(tmp.getEncoded(), "AES"); + } + + /** + * Creates an {@link ElasticsearchSecurityException} that indicates the token was expired. It + * is up to the client to re-authenticate and obtain a new token. The format for this response + * is defined in + */ + private static ElasticsearchSecurityException expiredTokenException() { + ElasticsearchSecurityException e = + new ElasticsearchSecurityException("token expired", RestStatus.UNAUTHORIZED); + e.addHeader("WWW-Authenticate", EXPIRED_TOKEN_WWW_AUTH_VALUE); + return e; + } + + /** + * Creates an {@link ElasticsearchSecurityException} that indicates the token was expired. It + * is up to the client to re-authenticate and obtain a new token. The format for this response + * is defined in + */ + private static ElasticsearchSecurityException malformedTokenException() { + ElasticsearchSecurityException e = + new ElasticsearchSecurityException("token malformed", RestStatus.UNAUTHORIZED); + e.addHeader("WWW-Authenticate", MALFORMED_TOKEN_WWW_AUTH_VALUE); + return e; + } + + /** + * Creates an {@link ElasticsearchSecurityException} that indicates the request contained an invalid grant + */ + private static ElasticsearchSecurityException invalidGrantException(String detail) { + ElasticsearchSecurityException e = + new ElasticsearchSecurityException("invalid_grant", RestStatus.BAD_REQUEST); + e.addHeader("error_description", detail); + return e; + } + + boolean isExpiredTokenException(ElasticsearchSecurityException e) { + final List headers = e.getHeader("WWW-Authenticate"); + return headers != null && headers.stream().anyMatch(EXPIRED_TOKEN_WWW_AUTH_VALUE::equals); + } + + boolean isExpirationInProgress() { + return expiredTokenRemover.isExpirationInProgress(); + } + + private class KeyComputingRunnable extends AbstractRunnable { + + private final BytesKey decodedSalt; + private final ActionListener listener; + private final KeyAndCache keyAndCache; + + KeyComputingRunnable(BytesKey decodedSalt, ActionListener listener, KeyAndCache keyAndCache) { + this.decodedSalt = decodedSalt; + this.listener = listener; + this.keyAndCache = keyAndCache; + } + + @Override + protected void doRun() { + try { + final SecretKey computedKey = keyAndCache.getOrComputeKey(decodedSalt); + listener.onResponse(computedKey); + } catch (ExecutionException e) { + if (e.getCause() != null && + (e.getCause() instanceof GeneralSecurityException || e.getCause() instanceof IOException + || e.getCause() instanceof IllegalArgumentException)) { + // this could happen if another realm supports the Bearer token so we should + // see if another realm can use this token! + logger.debug("unable to decode bearer token", e); + listener.onResponse(null); + } else { + listener.onFailure(e); + } + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + } + + /** + * Creates a new key unless present that is newer than the current active key and returns the corresponding metadata. Note: + * this method doesn't modify the metadata used in this token service. See {@link #refreshMetaData(TokenMetaData)} + */ + synchronized TokenMetaData generateSpareKey() { + KeyAndCache maxKey = keyCache.cache.values().stream().max(Comparator.comparingLong(v -> v.keyAndTimestamp.getTimestamp())).get(); + KeyAndCache currentKey = keyCache.activeKeyCache; + if (currentKey == maxKey) { + long timestamp = createdTimeStamps.incrementAndGet(); + while (true) { + byte[] saltArr = new byte[SALT_BYTES]; + secureRandom.nextBytes(saltArr); + SecureString tokenKey = generateTokenKey(); + KeyAndCache keyAndCache = new KeyAndCache(new KeyAndTimestamp(tokenKey, timestamp), new BytesKey(saltArr)); + if (keyCache.cache.containsKey(keyAndCache.getKeyHash())) { + continue; // collision -- generate a new key + } + return newTokenMetaData(keyCache.currentTokenKeyHash, Iterables.concat(keyCache.cache.values(), + Collections.singletonList(keyAndCache))); + } + } + return newTokenMetaData(keyCache.currentTokenKeyHash, keyCache.cache.values()); + } + + /** + * Rotate the current active key to the spare key created in the previous {@link #generateSpareKey()} call. + */ + synchronized TokenMetaData rotateToSpareKey() { + KeyAndCache maxKey = keyCache.cache.values().stream().max(Comparator.comparingLong(v -> v.keyAndTimestamp.getTimestamp())).get(); + if (maxKey == keyCache.activeKeyCache) { + throw new IllegalStateException("call generateSpareKey first"); + } + return newTokenMetaData(maxKey.getKeyHash(), keyCache.cache.values()); + } + + /** + * Prunes the keys and keeps up to the latest N keys around + * + * @param numKeysToKeep the number of keys to keep. + */ + synchronized TokenMetaData pruneKeys(int numKeysToKeep) { + if (keyCache.cache.size() <= numKeysToKeep) { + return getTokenMetaData(); // nothing to do + } + Map map = new HashMap<>(keyCache.cache.size() + 1); + KeyAndCache currentKey = keyCache.get(keyCache.currentTokenKeyHash); + ArrayList entries = new ArrayList<>(keyCache.cache.values()); + Collections.sort(entries, + (left, right) -> Long.compare(right.keyAndTimestamp.getTimestamp(), left.keyAndTimestamp.getTimestamp())); + for (KeyAndCache value : entries) { + if (map.size() < numKeysToKeep || value.keyAndTimestamp.getTimestamp() >= currentKey + .keyAndTimestamp.getTimestamp()) { + logger.debug("keeping key {} ", value.getKeyHash()); + map.put(value.getKeyHash(), value); + } else { + logger.debug("prune key {} ", value.getKeyHash()); + } + } + assert map.isEmpty() == false; + assert map.containsKey(keyCache.currentTokenKeyHash); + return newTokenMetaData(keyCache.currentTokenKeyHash, map.values()); + } + + /** + * Returns the current in-use metdata of this {@link TokenService} + */ + public synchronized TokenMetaData getTokenMetaData() { + return newTokenMetaData(keyCache.currentTokenKeyHash, keyCache.cache.values()); + } + + private TokenMetaData newTokenMetaData(BytesKey activeTokenKey, Iterable iterable) { + List list = new ArrayList<>(); + for (KeyAndCache v : iterable) { + list.add(v.keyAndTimestamp); + } + return new TokenMetaData(list, activeTokenKey.bytes); + } + + /** + * Refreshes the current in-use metadata. + */ + synchronized void refreshMetaData(TokenMetaData metaData) { + BytesKey currentUsedKeyHash = new BytesKey(metaData.getCurrentKeyHash()); + byte[] saltArr = new byte[SALT_BYTES]; + Map map = new HashMap<>(metaData.getKeys().size()); + long maxTimestamp = createdTimeStamps.get(); + for (KeyAndTimestamp key : metaData.getKeys()) { + secureRandom.nextBytes(saltArr); + KeyAndCache keyAndCache = new KeyAndCache(key, new BytesKey(saltArr)); + maxTimestamp = Math.max(keyAndCache.keyAndTimestamp.getTimestamp(), maxTimestamp); + if (keyCache.cache.containsKey(keyAndCache.getKeyHash()) == false) { + map.put(keyAndCache.getKeyHash(), keyAndCache); + } else { + map.put(keyAndCache.getKeyHash(), keyCache.get(keyAndCache.getKeyHash())); // maintain the cache we already have + } + } + if (map.containsKey(currentUsedKeyHash) == false) { + // this won't leak any secrets it's only exposing the current set of hashes + throw new IllegalStateException("Current key is not in the map: " + map.keySet() + " key: " + currentUsedKeyHash); + } + createdTimeStamps.set(maxTimestamp); + keyCache = new TokenKeys(Collections.unmodifiableMap(map), currentUsedKeyHash); + logger.debug("refreshed keys current: {}, keys: {}", currentUsedKeyHash, keyCache.cache.keySet()); + } + + private SecureString generateTokenKey() { + byte[] keyBytes = new byte[KEY_BYTES]; + byte[] encode = new byte[0]; + char[] ref = new char[0]; + try { + secureRandom.nextBytes(keyBytes); + encode = Base64.getUrlEncoder().withoutPadding().encode(keyBytes); + ref = new char[encode.length]; + int len = UnicodeUtil.UTF8toUTF16(encode, 0, encode.length, ref); + return new SecureString(Arrays.copyOfRange(ref, 0, len)); + } finally { + Arrays.fill(keyBytes, (byte) 0x00); + Arrays.fill(encode, (byte) 0x00); + Arrays.fill(ref, (char) 0x00); + } + } + + synchronized String getActiveKeyHash() { + return new BytesRef(Base64.getUrlEncoder().withoutPadding().encode(this.keyCache.currentTokenKeyHash.bytes)).utf8ToString(); + } + + void rotateKeysOnMaster(ActionListener listener) { + logger.info("rotate keys on master"); + TokenMetaData tokenMetaData = generateSpareKey(); + clusterService.submitStateUpdateTask("publish next key to prepare key rotation", + new TokenMetadataPublishAction( + ActionListener.wrap((res) -> { + if (res.isAcknowledged()) { + TokenMetaData metaData = rotateToSpareKey(); + clusterService.submitStateUpdateTask("publish next key to prepare key rotation", + new TokenMetadataPublishAction(listener, metaData)); + } else { + listener.onFailure(new IllegalStateException("not acked")); + } + }, listener::onFailure), tokenMetaData)); + } + + private final class TokenMetadataPublishAction extends AckedClusterStateUpdateTask { + + private final TokenMetaData tokenMetaData; + + protected TokenMetadataPublishAction(ActionListener listener, TokenMetaData tokenMetaData) { + super(new AckedRequest() { + @Override + public TimeValue ackTimeout() { + return AcknowledgedRequest.DEFAULT_ACK_TIMEOUT; + } + + @Override + public TimeValue masterNodeTimeout() { + return AcknowledgedRequest.DEFAULT_MASTER_NODE_TIMEOUT; + } + }, listener); + this.tokenMetaData = tokenMetaData; + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + if (tokenMetaData.equals(currentState.custom(TokenMetaData.TYPE))) { + return currentState; + } + return ClusterState.builder(currentState).putCustom(TokenMetaData.TYPE, tokenMetaData).build(); + } + + @Override + protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { + return new ClusterStateUpdateResponse(acknowledged); + } + + } + + private void initialize(ClusterService clusterService) { + clusterService.addListener(event -> { + ClusterState state = event.state(); + if (state.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) { + return; + } + + TokenMetaData custom = event.state().custom(TokenMetaData.TYPE); + if (custom != null && custom.equals(getTokenMetaData()) == false) { + logger.info("refresh keys"); + try { + refreshMetaData(custom); + } catch (Exception e) { + logger.warn("refreshing metadata failed", e); + } + logger.info("refreshed keys"); + } + }); + } + + /** + * For testing + */ + void clearActiveKeyCache() { + this.keyCache.activeKeyCache.keyCache.invalidateAll(); + } + + static final class KeyAndCache implements Closeable { + private final KeyAndTimestamp keyAndTimestamp; + private final Cache keyCache; + private final BytesKey salt; + private final BytesKey keyHash; + + private KeyAndCache(KeyAndTimestamp keyAndTimestamp, BytesKey salt) { + this.keyAndTimestamp = keyAndTimestamp; + keyCache = CacheBuilder.builder() + .setExpireAfterAccess(TimeValue.timeValueMinutes(60L)) + .setMaximumWeight(500L) + .build(); + try { + SecretKey secretKey = computeSecretKey(keyAndTimestamp.getKey().getChars(), salt.bytes); + keyCache.put(salt, secretKey); + } catch (Exception e) { + throw new IllegalStateException(e); + } + this.salt = salt; + this.keyHash = calculateKeyHash(keyAndTimestamp.getKey()); + } + + private SecretKey getKey(BytesKey salt) { + return keyCache.get(salt); + } + + public SecretKey getOrComputeKey(BytesKey decodedSalt) throws ExecutionException { + return keyCache.computeIfAbsent(decodedSalt, (salt) -> { + try (SecureString closeableChars = keyAndTimestamp.getKey().clone()) { + return computeSecretKey(closeableChars.getChars(), salt.bytes); + } + }); + } + + @Override + public void close() throws IOException { + keyAndTimestamp.getKey().close(); + } + + BytesKey getKeyHash() { + return keyHash; + } + + private static BytesKey calculateKeyHash(SecureString key) { + MessageDigest messageDigest = MessageDigests.sha256(); + BytesRefBuilder b = new BytesRefBuilder(); + try { + b.copyChars(key); + BytesRef bytesRef = b.toBytesRef(); + try { + messageDigest.update(bytesRef.bytes, bytesRef.offset, bytesRef.length); + return new BytesKey(Arrays.copyOfRange(messageDigest.digest(), 0, 8)); + } finally { + Arrays.fill(bytesRef.bytes, (byte) 0x00); + } + } finally { + Arrays.fill(b.bytes(), (byte) 0x00); + } + } + + BytesKey getSalt() { + return salt; + } + } + + + private static final class TokenKeys { + final Map cache; + final BytesKey currentTokenKeyHash; + final KeyAndCache activeKeyCache; + + private TokenKeys(Map cache, BytesKey currentTokenKeyHash) { + this.cache = cache; + this.currentTokenKeyHash = currentTokenKeyHash; + this.activeKeyCache = cache.get(currentTokenKeyHash); + } + + KeyAndCache get(BytesKey passphraseHash) { + return cache.get(passphraseHash); + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java new file mode 100644 index 0000000000000..0d7d33ed55c0b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.elasticsearch.Version; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.authc.Authentication; + +import java.io.IOException; +import java.time.Instant; +import java.util.Base64; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * This token is a combination of a {@link Authentication} object with an expiry. This token can be + * serialized for use later. Note, if serializing this token to a entity outside of the cluster, + * care must be taken to encrypt and validate the serialized bytes or they cannot be trusted. + * + * Additionally, care must also be used when transporting these tokens as a stolen token can be + * used by an adversary to gain access. For this reason, TLS must be enabled for these tokens to + * be used. + */ +public final class UserToken implements Writeable, ToXContentObject { + + private final Version version; + private final String id; + private final Authentication authentication; + private final Instant expirationTime; + private final Map metadata; + + /** + * Create a new token with an autogenerated id + */ + UserToken(Authentication authentication, Instant expirationTime) { + this(Version.CURRENT, authentication, expirationTime, Collections.emptyMap()); + } + + /** + * Create a new token with an autogenerated id + */ + UserToken(Version version, Authentication authentication, Instant expirationTime, Map metadata) { + this(UUIDs.randomBase64UUID(), version, authentication, expirationTime, metadata); + } + + /** + * Create a new token from an existing id + */ + UserToken(String id, Version version, Authentication authentication, Instant expirationTime, Map metadata) { + this.version = Objects.requireNonNull(version); + this.id = Objects.requireNonNull(id); + this.authentication = Objects.requireNonNull(authentication); + this.expirationTime = Objects.requireNonNull(expirationTime); + this.metadata = metadata; + } + + /** + * Creates a new token based on the values from the stream + */ + UserToken(StreamInput input) throws IOException { + this.version = input.getVersion(); + this.id = input.readString(); + this.authentication = new Authentication(input); + this.expirationTime = Instant.ofEpochSecond(input.readLong(), input.readInt()); + if (version.before(Version.V_6_2_0)) { + this.metadata = Collections.emptyMap(); + } else { + this.metadata = input.readMap(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + authentication.writeTo(out); + out.writeLong(expirationTime.getEpochSecond()); + out.writeInt(expirationTime.getNano()); + if (out.getVersion().onOrAfter(Version.V_6_2_0)) { + out.writeMap(metadata); + } + } + + /** + * Get the authentication + */ + Authentication getAuthentication() { + return authentication; + } + + /** + * Get the expiration time + */ + Instant getExpirationTime() { + return expirationTime; + } + + /** + * The ID of this token + */ + public String getId() { + return id; + } + + /** + * The version of the node this token was created on + */ + Version getVersion() { + return version; + } + + /** + * The metadata associated with this token + */ + public Map getMetadata() { + return metadata; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("id", id); + builder.field("expiration_time", expirationTime.toEpochMilli()); + builder.field("version", version.id); + builder.field("metadata", metadata); + try (BytesStreamOutput output = new BytesStreamOutput()) { + output.setVersion(version); + authentication.writeTo(output); + builder.field("authentication", output.bytes().toBytesRef().bytes); + } + return builder.endObject(); + } + + static UserToken fromSourceMap(Map source) throws IOException { + final String id = (String) source.get("id"); + final Long expirationEpochMilli = (Long) source.get("expiration_time"); + final Integer versionId = (Integer) source.get("version"); + final Map metadata = (Map) source.get("metadata"); + final String authString = (String) source.get("authentication"); + final Version version = Version.fromId(versionId); + try (StreamInput in = StreamInput.wrap(Base64.getDecoder().decode(authString))) { + in.setVersion(version); + Authentication authentication = new Authentication(in); + return new UserToken(id, version, authentication, Instant.ofEpochMilli(expirationEpochMilli), metadata); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java new file mode 100644 index 0000000000000..b149fec3d3db8 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java @@ -0,0 +1,394 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.Appender; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; +import org.apache.logging.log4j.core.layout.PatternLayout; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.LoggingAwareMultiCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.Terminal.Verbosity; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.security.authz.store.FileRolesStore; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.file.FileUserPasswdStore; +import org.elasticsearch.xpack.security.authc.file.FileUserRolesStore; + +import javax.net.ssl.HttpsURLConnection; + +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.HttpURLConnection; +import java.net.URI; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.SecurityField.setting; + +/** + * This is the command-line tool used for migrating users and roles from the file-based realm into the new native realm using the API for + * import. It reads from the files and tries its best to add the users, showing an error if it was incapable of importing them. Any existing + * users or roles are skipped. + */ +public class ESNativeRealmMigrateTool extends LoggingAwareMultiCommand { + + public static void main(String[] args) throws Exception { + exit(new ESNativeRealmMigrateTool().main(args, Terminal.DEFAULT)); + } + + public ESNativeRealmMigrateTool() { + super("Imports file-based users and roles to the native security realm"); + subcommands.put("native", newMigrateUserOrRoles()); + } + + protected MigrateUserOrRoles newMigrateUserOrRoles() { + return new MigrateUserOrRoles(); + } + + /** + * Command to migrate users and roles to the native realm + */ + public static class MigrateUserOrRoles extends EnvironmentAwareCommand { + + private final OptionSpec username; + private final OptionSpec password; + private final OptionSpec url; + private final OptionSpec usersToMigrateCsv; + private final OptionSpec rolesToMigrateCsv; + + public MigrateUserOrRoles() { + super("Migrates users or roles from file to native realm"); + this.username = parser.acceptsAll(Arrays.asList("u", "username"), + "User used to authenticate with Elasticsearch") + .withRequiredArg(); + this.password = parser.acceptsAll(Arrays.asList("p", "password"), + "Password used to authenticate with Elasticsearch") + .withRequiredArg(); + this.url = parser.acceptsAll(Arrays.asList("U", "url"), + "URL of Elasticsearch host") + .withRequiredArg(); + this.usersToMigrateCsv = parser.acceptsAll(Arrays.asList("n", "users"), + "Users to migrate from file to native realm") + .withRequiredArg(); + this.rolesToMigrateCsv = parser.acceptsAll(Arrays.asList("r", "roles"), + "Roles to migrate from file to native realm") + .withRequiredArg(); + } + + // Visible for testing + public OptionParser getParser() { + return this.parser; + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("This tool migrates file based users[1] and roles[2] to the native realm in"); + terminal.println("elasticsearch, saving the administrator from needing to manually transition"); + terminal.println("them from the file."); + } + + // Visible for testing + @Override + public void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + terminal.println("starting migration of users and roles..."); + importUsers(terminal, env, options); + importRoles(terminal, env, options); + terminal.println("users and roles imported."); + } + + @SuppressForbidden(reason = "We call connect in doPrivileged and provide SocketPermission") + private String postURL(Settings settings, Environment env, String method, String urlString, + OptionSet options, @Nullable String bodyString) throws Exception { + URI uri = new URI(urlString); + URL url = uri.toURL(); + HttpURLConnection conn; + // If using SSL, need a custom service because it's likely a self-signed certificate + if ("https".equalsIgnoreCase(uri.getScheme())) { + Settings sslSettings = settings.getByPrefix(setting("http.ssl.")); + final SSLService sslService = new SSLService(settings, env); + final HttpsURLConnection httpsConn = (HttpsURLConnection) url.openConnection(); + AccessController.doPrivileged((PrivilegedAction) () -> { + // Requires permission java.lang.RuntimePermission "setFactory"; + httpsConn.setSSLSocketFactory(sslService.sslSocketFactory(sslSettings)); + return null; + }); + conn = httpsConn; + } else { + conn = (HttpURLConnection) url.openConnection(); + } + conn.setRequestMethod(method); + conn.setReadTimeout(30 * 1000); // 30 second timeout + // Add basic-auth header + conn.setRequestProperty("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(username.value(options), + new SecureString(password.value(options).toCharArray()))); + conn.setRequestProperty("Content-Type", XContentType.JSON.mediaType()); + conn.setDoOutput(true); // we'll be sending a body + SocketAccess.doPrivileged(conn::connect); + if (bodyString != null) { + try (OutputStream out = conn.getOutputStream()) { + out.write(bodyString.getBytes(StandardCharsets.UTF_8)); + } catch (Exception e) { + try { + conn.disconnect(); + } catch (Exception e2) { + // Ignore exceptions if we weren't able to close the connection after an error + } + throw e; + } + } + try (BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) { + StringBuilder sb = new StringBuilder(); + String line = null; + while ((line = reader.readLine()) != null) { + sb.append(line); + } + return sb.toString(); + } catch (IOException e) { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getErrorStream(), StandardCharsets.UTF_8))) { + StringBuilder sb = new StringBuilder(); + String line = null; + while ((line = reader.readLine()) != null) { + sb.append(line); + } + throw new IOException(sb.toString(), e); + } + } finally { + conn.disconnect(); + } + } + + Set getUsersThatExist(Terminal terminal, Settings settings, Environment env, OptionSet options) throws Exception { + Set existingUsers = new HashSet<>(); + String allUsersJson = postURL(settings, env, "GET", this.url.value(options) + "/_xpack/security/user/", options, null); + // EMPTY is safe here because we never use namedObject + try (XContentParser parser = JsonXContent.jsonXContent + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, allUsersJson)) { + XContentParser.Token token = parser.nextToken(); + String userName; + if (token == XContentParser.Token.START_OBJECT) { + while ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) { + userName = parser.currentName(); + existingUsers.add(userName); + parser.nextToken(); + parser.skipChildren(); + } + } else { + throw new ElasticsearchException("failed to retrieve users, expecting an object but got: " + token); + } + } + terminal.println("found existing users: " + existingUsers); + return existingUsers; + } + + static String createUserJson(String[] roles, char[] password) throws IOException { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + { + builder.field("password_hash", new String(password)); + builder.startArray("roles"); + for (String role : roles) { + builder.value(role); + } + builder.endArray(); + } + builder.endObject(); + return Strings.toString(builder); + } + + void importUsers(Terminal terminal, Environment env, OptionSet options) throws FileNotFoundException { + String usersCsv = usersToMigrateCsv.value(options); + String[] usersToMigrate = (usersCsv != null) ? usersCsv.split(",") : Strings.EMPTY_ARRAY; + Path usersFile = FileUserPasswdStore.resolveFile(env); + Path usersRolesFile = FileUserRolesStore.resolveFile(env); + if (Files.exists(usersFile) == false) { + throw new FileNotFoundException("users file [" + usersFile + "] does not exist"); + } else if (Files.exists(usersRolesFile) == false) { + throw new FileNotFoundException("users_roles file [" + usersRolesFile + "] does not exist"); + } + + terminal.println("importing users from [" + usersFile + "]..."); + final Logger logger = getTerminalLogger(terminal); + Map userToHashedPW = FileUserPasswdStore.parseFile(usersFile, logger, env.settings()); + Map userToRoles = FileUserRolesStore.parseFile(usersRolesFile, logger); + Set existingUsers; + try { + existingUsers = getUsersThatExist(terminal, env.settings(), env, options); + } catch (Exception e) { + throw new ElasticsearchException("failed to get users that already exist, skipping user import", e); + } + if (usersToMigrate.length == 0) { + usersToMigrate = userToHashedPW.keySet().toArray(new String[userToHashedPW.size()]); + } + for (String user : usersToMigrate) { + if (userToHashedPW.containsKey(user) == false) { + terminal.println("user [" + user + "] was not found in files, skipping"); + continue; + } else if (existingUsers.contains(user)) { + terminal.println("user [" + user + "] already exists, skipping"); + continue; + } + terminal.println("migrating user [" + user + "]"); + String reqBody = "n/a"; + try { + reqBody = createUserJson(userToRoles.get(user), userToHashedPW.get(user)); + String resp = postURL(env.settings(), env, "POST", + this.url.value(options) + "/_xpack/security/user/" + user, options, reqBody); + terminal.println(resp); + } catch (Exception e) { + throw new ElasticsearchException("failed to migrate user [" + user + "] with body: " + reqBody, e); + } + } + } + + Set getRolesThatExist(Terminal terminal, Settings settings, Environment env, OptionSet options) throws Exception { + Set existingRoles = new HashSet<>(); + String allRolesJson = postURL(settings, env, "GET", this.url.value(options) + "/_xpack/security/role/", options, null); + // EMPTY is safe here because we never use namedObject + try (XContentParser parser = JsonXContent.jsonXContent + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, allRolesJson)) { + XContentParser.Token token = parser.nextToken(); + String roleName; + if (token == XContentParser.Token.START_OBJECT) { + while ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) { + roleName = parser.currentName(); + existingRoles.add(roleName); + parser.nextToken(); + parser.skipChildren(); + } + } else { + throw new ElasticsearchException("failed to retrieve roles, expecting an object but got: " + token); + } + } + terminal.println("found existing roles: " + existingRoles); + return existingRoles; + } + + static String createRoleJson(RoleDescriptor rd) throws IOException { + XContentBuilder builder = jsonBuilder(); + rd.toXContent(builder, ToXContent.EMPTY_PARAMS, true); + return Strings.toString(builder); + } + + void importRoles(Terminal terminal, Environment env, OptionSet options) throws FileNotFoundException { + String rolesCsv = rolesToMigrateCsv.value(options); + String[] rolesToMigrate = (rolesCsv != null) ? rolesCsv.split(",") : Strings.EMPTY_ARRAY; + Path rolesFile = FileRolesStore.resolveFile(env).toAbsolutePath(); + if (Files.exists(rolesFile) == false) { + throw new FileNotFoundException("roles.yml file [" + rolesFile + "] does not exist"); + } + terminal.println("importing roles from [" + rolesFile + "]..."); + Logger logger = getTerminalLogger(terminal); + Map roles = FileRolesStore.parseRoleDescriptors(rolesFile, logger, true, Settings.EMPTY); + Set existingRoles; + try { + existingRoles = getRolesThatExist(terminal, env.settings(), env, options); + } catch (Exception e) { + throw new ElasticsearchException("failed to get roles that already exist, skipping role import", e); + } + if (rolesToMigrate.length == 0) { + rolesToMigrate = roles.keySet().toArray(new String[roles.size()]); + } + for (String roleName : rolesToMigrate) { + if (roles.containsKey(roleName) == false) { + terminal.println("no role [" + roleName + "] found, skipping"); + continue; + } else if (existingRoles.contains(roleName)) { + terminal.println("role [" + roleName + "] already exists, skipping"); + continue; + } + terminal.println("migrating role [" + roleName + "]"); + String reqBody = "n/a"; + try { + reqBody = createRoleJson(roles.get(roleName)); + String resp = postURL(env.settings(), env, "POST", + this.url.value(options) + "/_xpack/security/role/" + roleName, options, reqBody); + terminal.println(resp); + } catch (Exception e) { + throw new ElasticsearchException("failed to migrate role [" + roleName + "] with body: " + reqBody, e); + } + } + } + } + + /** + * Creates a new Logger that is detached from the ROOT logger and only has an appender that will output log messages to the terminal + */ + static Logger getTerminalLogger(final Terminal terminal) { + final Logger logger = ESLoggerFactory.getLogger(ESNativeRealmMigrateTool.class); + Loggers.setLevel(logger, Level.ALL); + + // create appender + final Appender appender = new AbstractAppender(ESNativeRealmMigrateTool.class.getName(), null, + PatternLayout.newBuilder().withPattern("%m").build()) { + @Override + public void append(LogEvent event) { + switch (event.getLevel().getStandardLevel()) { + case FATAL: + case ERROR: + terminal.println(Verbosity.NORMAL, event.getMessage().getFormattedMessage()); + break; + case OFF: + break; + default: + terminal.println(Verbosity.VERBOSE, event.getMessage().getFormattedMessage()); + break; + } + } + }; + appender.start(); + + // get the config, detach from parent, remove appenders, add custom appender + final LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + final Configuration config = ctx.getConfiguration(); + final LoggerConfig loggerConfig = config.getLoggerConfig(ESNativeRealmMigrateTool.class.getName()); + loggerConfig.setParent(null); + loggerConfig.getAppenders().forEach((s, a) -> Loggers.removeAppender(logger, a)); + Loggers.addAppender(logger, appender); + return logger; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java new file mode 100644 index 0000000000000..6b8f9eb703db0 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; + +/** + * User/password realm that is backed by an Elasticsearch index + */ +public class NativeRealm extends CachingUsernamePasswordRealm { + + private final NativeUsersStore userStore; + + public NativeRealm(RealmConfig config, NativeUsersStore usersStore) { + super(NativeRealmSettings.TYPE, config); + this.userStore = usersStore; + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + userStore.getUser(username, listener); + } + + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + userStore.verifyPassword(token.principal(), token.credentials(), listener); + } + + public void onSecurityIndexHealthChange(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) { + final boolean movedFromRedToNonRed = (previousHealth == null || previousHealth.getStatus() == ClusterHealthStatus.RED) + && currentHealth != null && currentHealth.getStatus() != ClusterHealthStatus.RED; + final boolean indexDeleted = previousHealth != null && currentHealth == null; + + if (movedFromRedToNonRed || indexDeleted) { + clearCache(); + } + } + + // method is used for testing to verify cache expiration since expireAll is final + void clearCache() { + expireAll(); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java new file mode 100644 index 0000000000000..d4d71523fea50 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -0,0 +1,642 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.action.support.TransportActions; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Requests; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.engine.DocumentMissingException; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.core.security.ScrollHelper; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequest; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequest; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.User.Fields; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.SecurityLifecycleService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; + +/** + * NativeUsersStore is a store for users that reads from an Elasticsearch index. This store is responsible for fetching the full + * {@link User} object, which includes the names of the roles assigned to the user. + *

+ * No caching is done by this class, it is handled at a higher level and no polling for changes is done by this class. Modification + * operations make a best effort attempt to clear the cache on all nodes for the user that was modified. + */ +public class NativeUsersStore extends AbstractComponent { + + public static final String INDEX_TYPE = "doc"; + static final String USER_DOC_TYPE = "user"; + public static final String RESERVED_USER_TYPE = "reserved-user"; + + + private final Hasher hasher = Hasher.BCRYPT; + private final Client client; + + private volatile SecurityLifecycleService securityLifecycleService; + + public NativeUsersStore(Settings settings, Client client, SecurityLifecycleService securityLifecycleService) { + super(settings); + this.client = client; + this.securityLifecycleService = securityLifecycleService; + } + + /** + * Blocking version of {@code getUser} that blocks until the User is returned + */ + public void getUser(String username, ActionListener listener) { + getUserAndPassword(username, ActionListener.wrap((uap) -> { + listener.onResponse(uap == null ? null : uap.user()); + }, listener::onFailure)); + } + + /** + * Retrieve a list of users, if userNames is null or empty, fetch all users + */ + public void getUsers(String[] userNames, final ActionListener> listener) { + final Consumer handleException = (t) -> { + if (t instanceof IndexNotFoundException) { + logger.trace("could not retrieve users because security index does not exist"); + // We don't invoke the onFailure listener here, instead just pass an empty list + listener.onResponse(Collections.emptyList()); + } else { + listener.onFailure(t); + } + }; + + if (securityLifecycleService.isSecurityIndexExisting() == false) { + // TODO remove this short circuiting and fix tests that fail without this! + listener.onResponse(Collections.emptyList()); + } else if (userNames.length == 1) { // optimization for single user lookup + final String username = userNames[0]; + getUserAndPassword(username, ActionListener.wrap( + (uap) -> listener.onResponse(uap == null ? Collections.emptyList() : Collections.singletonList(uap.user())), + handleException)); + } else { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + final QueryBuilder query; + if (userNames == null || userNames.length == 0) { + query = QueryBuilders.termQuery(Fields.TYPE.getPreferredName(), USER_DOC_TYPE); + } else { + final String[] users = Arrays.asList(userNames).stream() + .map(s -> getIdForUser(USER_DOC_TYPE, s)).toArray(String[]::new); + query = QueryBuilders.boolQuery().filter(QueryBuilders.idsQuery(INDEX_TYPE).addIds(users)); + } + final Supplier supplier = client.threadPool().getThreadContext().newRestorableContext(false); + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN)) { + SearchRequest request = client.prepareSearch(SECURITY_INDEX_NAME) + .setScroll(TimeValue.timeValueSeconds(10L)) + .setQuery(query) + .setSize(1000) + .setFetchSource(true) + .request(); + request.indicesOptions().ignoreUnavailable(); + ScrollHelper.fetchAllByEntity(client, request, new ContextPreservingActionListener<>(supplier, listener), (hit) -> { + UserAndPassword u = transformUser(hit.getId(), hit.getSourceAsMap()); + return u != null ? u.user() : null; + }); + } + }); + } + } + + /** + * Async method to retrieve a user and their password + */ + private void getUserAndPassword(final String user, final ActionListener listener) { + if (securityLifecycleService.isSecurityIndexExisting() == false) { + // TODO remove this short circuiting and fix tests that fail without this! + listener.onResponse(null); + } else { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareGet(SECURITY_INDEX_NAME, + INDEX_TYPE, getIdForUser(USER_DOC_TYPE, user)).request(), + new ActionListener() { + @Override + public void onResponse(GetResponse response) { + listener.onResponse(transformUser(response.getId(), response.getSource())); + } + + @Override + public void onFailure(Exception t) { + if (t instanceof IndexNotFoundException) { + logger.trace( + (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + "could not retrieve user [{}] because security index does not exist", user), t); + } else { + logger.error(new ParameterizedMessage("failed to retrieve user [{}]", user), t); + } + // We don't invoke the onFailure listener here, instead + // we call the response with a null user + listener.onResponse(null); + } + }, client::get)); + } + } + + /** + * Async method to change the password of a native or reserved user. If a reserved user does not exist, the document will be created + * with a hash of the provided password. + */ + public void changePassword(final ChangePasswordRequest request, final ActionListener listener) { + final String username = request.username(); + assert SystemUser.NAME.equals(username) == false && XPackUser.NAME.equals(username) == false : username + "is internal!"; + final String docType; + if (ClientReservedRealm.isReserved(username, settings)) { + docType = RESERVED_USER_TYPE; + } else { + docType = USER_DOC_TYPE; + } + + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareUpdate(SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(docType, username)) + .setDoc(Requests.INDEX_CONTENT_TYPE, Fields.PASSWORD.getPreferredName(), + String.valueOf(request.passwordHash())) + .setRefreshPolicy(request.getRefreshPolicy()).request(), + new ActionListener() { + @Override + public void onResponse(UpdateResponse updateResponse) { + assert updateResponse.getResult() == DocWriteResponse.Result.UPDATED; + clearRealmCache(request.username(), listener, null); + } + + @Override + public void onFailure(Exception e) { + if (isIndexNotFoundOrDocumentMissing(e)) { + if (docType.equals(RESERVED_USER_TYPE)) { + createReservedUser(username, request.passwordHash(), request.getRefreshPolicy(), listener); + } else { + logger.debug((org.apache.logging.log4j.util.Supplier) () -> + new ParameterizedMessage("failed to change password for user [{}]", request.username()), e); + ValidationException validationException = new ValidationException(); + validationException.addValidationError("user must exist in order to change password"); + listener.onFailure(validationException); + } + } else { + listener.onFailure(e); + } + } + }, client::update); + }); + } + + /** + * Asynchronous method to create a reserved user with the given password hash. The cache for the user will be cleared after the document + * has been indexed + */ + private void createReservedUser(String username, char[] passwordHash, RefreshPolicy refresh, ActionListener listener) { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareIndex(SECURITY_INDEX_NAME, INDEX_TYPE, + getIdForUser(RESERVED_USER_TYPE, username)) + .setSource(Fields.PASSWORD.getPreferredName(), String.valueOf(passwordHash), + Fields.ENABLED.getPreferredName(), true, + Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE) + .setRefreshPolicy(refresh).request(), + new ActionListener() { + @Override + public void onResponse(IndexResponse indexResponse) { + clearRealmCache(username, listener, null); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, client::index); + }); + } + + /** + * Asynchronous method to put a user. A put user request without a password hash is treated as an update and will fail with a + * {@link ValidationException} if the user does not exist. If a password hash is provided, then we issue a update request with an + * upsert document as well; the upsert document sets the enabled flag of the user to true but if the document already exists, this + * method will not modify the enabled value. + */ + public void putUser(final PutUserRequest request, final ActionListener listener) { + if (request.passwordHash() == null) { + updateUserWithoutPassword(request, listener); + } else { + indexUser(request, listener); + } + } + + /** + * Handles updating a user that should already exist where their password should not change + */ + private void updateUserWithoutPassword(final PutUserRequest putUserRequest, final ActionListener listener) { + assert putUserRequest.passwordHash() == null; + // We must have an existing document + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareUpdate(SECURITY_INDEX_NAME, INDEX_TYPE, + getIdForUser(USER_DOC_TYPE, putUserRequest.username())) + .setDoc(Requests.INDEX_CONTENT_TYPE, + Fields.USERNAME.getPreferredName(), putUserRequest.username(), + Fields.ROLES.getPreferredName(), putUserRequest.roles(), + Fields.FULL_NAME.getPreferredName(), putUserRequest.fullName(), + Fields.EMAIL.getPreferredName(), putUserRequest.email(), + Fields.METADATA.getPreferredName(), putUserRequest.metadata(), + Fields.ENABLED.getPreferredName(), putUserRequest.enabled(), + Fields.TYPE.getPreferredName(), USER_DOC_TYPE) + .setRefreshPolicy(putUserRequest.getRefreshPolicy()) + .request(), + new ActionListener() { + @Override + public void onResponse(UpdateResponse updateResponse) { + assert updateResponse.getResult() == DocWriteResponse.Result.UPDATED; + clearRealmCache(putUserRequest.username(), listener, false); + } + + @Override + public void onFailure(Exception e) { + Exception failure = e; + if (isIndexNotFoundOrDocumentMissing(e)) { + // if the index doesn't exist we can never update a user + // if the document doesn't exist, then this update is not valid + logger.debug((org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("failed to update user document with username [{}]", + putUserRequest.username()), e); + ValidationException validationException = new ValidationException(); + validationException + .addValidationError("password must be specified unless you are updating an existing user"); + failure = validationException; + } + listener.onFailure(failure); + } + }, client::update); + }); + } + + private void indexUser(final PutUserRequest putUserRequest, final ActionListener listener) { + assert putUserRequest.passwordHash() != null; + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareIndex(SECURITY_INDEX_NAME, INDEX_TYPE, + getIdForUser(USER_DOC_TYPE, putUserRequest.username())) + .setSource(Fields.USERNAME.getPreferredName(), putUserRequest.username(), + Fields.PASSWORD.getPreferredName(), String.valueOf(putUserRequest.passwordHash()), + Fields.ROLES.getPreferredName(), putUserRequest.roles(), + Fields.FULL_NAME.getPreferredName(), putUserRequest.fullName(), + Fields.EMAIL.getPreferredName(), putUserRequest.email(), + Fields.METADATA.getPreferredName(), putUserRequest.metadata(), + Fields.ENABLED.getPreferredName(), putUserRequest.enabled(), + Fields.TYPE.getPreferredName(), USER_DOC_TYPE) + .setRefreshPolicy(putUserRequest.getRefreshPolicy()) + .request(), + new ActionListener() { + @Override + public void onResponse(IndexResponse updateResponse) { + clearRealmCache(putUserRequest.username(), listener, + updateResponse.getResult() == DocWriteResponse.Result.CREATED); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, client::index); + }); + } + + /** + * Asynchronous method that will update the enabled flag of a user. If the user is reserved and the document does not exist, a document + * will be created. If the user is not reserved, the user must exist otherwise the operation will fail. + */ + public void setEnabled(final String username, final boolean enabled, final RefreshPolicy refreshPolicy, + final ActionListener listener) { + if (ClientReservedRealm.isReserved(username, settings)) { + setReservedUserEnabled(username, enabled, refreshPolicy, true, listener); + } else { + setRegularUserEnabled(username, enabled, refreshPolicy, listener); + } + } + + private void setRegularUserEnabled(final String username, final boolean enabled, final RefreshPolicy refreshPolicy, + final ActionListener listener) { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareUpdate(SECURITY_INDEX_NAME, INDEX_TYPE, + getIdForUser(USER_DOC_TYPE, username)) + .setDoc(Requests.INDEX_CONTENT_TYPE, Fields.ENABLED.getPreferredName(), enabled) + .setRefreshPolicy(refreshPolicy) + .request(), + new ActionListener() { + @Override + public void onResponse(UpdateResponse updateResponse) { + clearRealmCache(username, listener, null); + } + + @Override + public void onFailure(Exception e) { + Exception failure = e; + if (isIndexNotFoundOrDocumentMissing(e)) { + // if the index doesn't exist we can never update a user + // if the document doesn't exist, then this update is not valid + logger.debug((org.apache.logging.log4j.util.Supplier) + () -> new ParameterizedMessage("failed to {} user [{}]", + enabled ? "enable" : "disable", username), e); + ValidationException validationException = new ValidationException(); + validationException.addValidationError("only existing users can be " + + (enabled ? "enabled" : "disabled")); + failure = validationException; + } + listener.onFailure(failure); + } + }, client::update); + }); + } + + private void setReservedUserEnabled(final String username, final boolean enabled, final RefreshPolicy refreshPolicy, + boolean clearCache, final ActionListener listener) { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareUpdate(SECURITY_INDEX_NAME, INDEX_TYPE, + getIdForUser(RESERVED_USER_TYPE, username)) + .setDoc(Requests.INDEX_CONTENT_TYPE, Fields.ENABLED.getPreferredName(), enabled) + .setUpsert(XContentType.JSON, + Fields.PASSWORD.getPreferredName(), "", + Fields.ENABLED.getPreferredName(), enabled, + Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE) + .setRefreshPolicy(refreshPolicy) + .request(), + new ActionListener() { + @Override + public void onResponse(UpdateResponse updateResponse) { + if (clearCache) { + clearRealmCache(username, listener, null); + } else { + listener.onResponse(null); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, client::update); + }); + } + + public void deleteUser(final DeleteUserRequest deleteUserRequest, final ActionListener listener) { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + DeleteRequest request = client.prepareDelete(SECURITY_INDEX_NAME, + INDEX_TYPE, getIdForUser(USER_DOC_TYPE, deleteUserRequest.username())).request(); + request.setRefreshPolicy(deleteUserRequest.getRefreshPolicy()); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, + new ActionListener() { + @Override + public void onResponse(DeleteResponse deleteResponse) { + clearRealmCache(deleteUserRequest.username(), listener, + deleteResponse.getResult() == DocWriteResponse.Result.DELETED); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, client::delete); + }); + } + + /** + * This method is used to verify the username and credentials against those stored in the system. + * + * @param username username to lookup the user by + * @param password the plaintext password to verify + */ + void verifyPassword(String username, final SecureString password, ActionListener listener) { + getUserAndPassword(username, ActionListener.wrap((userAndPassword) -> { + if (userAndPassword == null || userAndPassword.passwordHash() == null) { + listener.onResponse(AuthenticationResult.notHandled()); + } else if (hasher.verify(password, userAndPassword.passwordHash())) { + listener.onResponse(AuthenticationResult.success(userAndPassword.user())); + } else { + listener.onResponse(AuthenticationResult.unsuccessful("Password authentication failed for " + username, null)); + } + }, listener::onFailure)); + } + + void getReservedUserInfo(String username, ActionListener listener) { + if (securityLifecycleService.isSecurityIndexExisting() == false) { + // TODO remove this short circuiting and fix tests that fail without this! + listener.onResponse(null); + } else { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareGet(SECURITY_INDEX_NAME, INDEX_TYPE, + getIdForUser(RESERVED_USER_TYPE, username)).request(), + new ActionListener() { + @Override + public void onResponse(GetResponse getResponse) { + if (getResponse.isExists()) { + Map sourceMap = getResponse.getSourceAsMap(); + String password = (String) sourceMap.get(Fields.PASSWORD.getPreferredName()); + Boolean enabled = (Boolean) sourceMap.get(Fields.ENABLED.getPreferredName()); + if (password == null) { + listener.onFailure(new IllegalStateException("password hash must not be null!")); + } else if (enabled == null) { + listener.onFailure(new IllegalStateException("enabled must not be null!")); + } else if (password.isEmpty()) { + listener.onResponse((enabled ? ReservedRealm.ENABLED_DEFAULT_USER_INFO : ReservedRealm + .DISABLED_DEFAULT_USER_INFO).deepClone()); + } else { + listener.onResponse(new ReservedUserInfo(password.toCharArray(), enabled, false)); + } + } else { + listener.onResponse(null); + } + } + + @Override + public void onFailure(Exception e) { + if (TransportActions.isShardNotAvailableException(e)) { + logger.trace((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + "could not retrieve built in user [{}] info since security index unavailable", username), + e); + } + listener.onFailure(e); + } + }, client::get)); + } + } + + void getAllReservedUserInfo(ActionListener> listener) { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareSearch(SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.termQuery(Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE)) + .setFetchSource(true).request(), + new ActionListener() { + @Override + public void onResponse(SearchResponse searchResponse) { + Map userInfos = new HashMap<>(); + assert searchResponse.getHits().getTotalHits() <= 10 : + "there are more than 10 reserved users we need to change this to retrieve them all!"; + for (SearchHit searchHit : searchResponse.getHits().getHits()) { + Map sourceMap = searchHit.getSourceAsMap(); + String password = (String) sourceMap.get(Fields.PASSWORD.getPreferredName()); + Boolean enabled = (Boolean) sourceMap.get(Fields.ENABLED.getPreferredName()); + final String id = searchHit.getId(); + assert id != null && id.startsWith(RESERVED_USER_TYPE) : + "id [" + id + "] does not start with reserved-user prefix"; + final String username = id.substring(RESERVED_USER_TYPE.length() + 1); + if (password == null) { + listener.onFailure(new IllegalStateException("password hash must not be null!")); + return; + } else if (enabled == null) { + listener.onFailure(new IllegalStateException("enabled must not be null!")); + return; + } else { + userInfos.put(username, new ReservedUserInfo(password.toCharArray(), enabled, false)); + } + } + listener.onResponse(userInfos); + } + + @Override + public void onFailure(Exception e) { + if (e instanceof IndexNotFoundException) { + logger.trace("could not retrieve built in users since security index does not exist", e); + listener.onResponse(Collections.emptyMap()); + } else { + logger.error("failed to retrieve built in users", e); + listener.onFailure(e); + } + } + }, client::search)); + } + + private void clearRealmCache(String username, ActionListener listener, Response response) { + SecurityClient securityClient = new SecurityClient(client); + ClearRealmCacheRequest request = securityClient.prepareClearRealmCache() + .usernames(username).request(); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, + new ActionListener() { + @Override + public void onResponse(ClearRealmCacheResponse nodes) { + listener.onResponse(response); + } + + @Override + public void onFailure(Exception e) { + logger.error(new ParameterizedMessage("unable to clear realm cache for user [{}]", username), e); + ElasticsearchException exception = new ElasticsearchException("clearing the cache for [" + username + + "] failed. please clear the realm cache manually", e); + listener.onFailure(exception); + } + }, securityClient::clearRealmCache); + } + + @Nullable + private UserAndPassword transformUser(final String id, final Map sourceMap) { + if (sourceMap == null) { + return null; + } + assert id != null && id.startsWith(USER_DOC_TYPE) : "id [" + id + "] does not start with user prefix"; + final String username = id.substring(USER_DOC_TYPE.length() + 1); + try { + String password = (String) sourceMap.get(Fields.PASSWORD.getPreferredName()); + String[] roles = ((List) sourceMap.get(Fields.ROLES.getPreferredName())).toArray(Strings.EMPTY_ARRAY); + String fullName = (String) sourceMap.get(Fields.FULL_NAME.getPreferredName()); + String email = (String) sourceMap.get(Fields.EMAIL.getPreferredName()); + Boolean enabled = (Boolean) sourceMap.get(Fields.ENABLED.getPreferredName()); + if (enabled == null) { + // fallback mechanism as a user from 2.x may not have the enabled field + enabled = Boolean.TRUE; + } + Map metadata = (Map) sourceMap.get(Fields.METADATA.getPreferredName()); + return new UserAndPassword(new User(username, roles, fullName, email, metadata, enabled), password.toCharArray()); + } catch (Exception e) { + logger.error(new ParameterizedMessage("error in the format of data for user [{}]", username), e); + return null; + } + } + + private static boolean isIndexNotFoundOrDocumentMissing(Exception e) { + if (e instanceof ElasticsearchException) { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof IndexNotFoundException || cause instanceof DocumentMissingException) { + return true; + } + } + return false; + } + + /** + * Gets the document id for the given user and user type (reserved user or regular user). + */ + public static String getIdForUser(final String docType, final String userName) { + return docType + "-" + userName; + } + + static final class ReservedUserInfo { + + public final char[] passwordHash; + public final boolean enabled; + public final boolean hasEmptyPassword; + + ReservedUserInfo(char[] passwordHash, boolean enabled, boolean hasEmptyPassword) { + this.passwordHash = passwordHash; + this.enabled = enabled; + this.hasEmptyPassword = hasEmptyPassword; + } + + ReservedUserInfo deepClone() { + return new ReservedUserInfo(Arrays.copyOf(passwordHash, passwordHash.length), enabled, hasEmptyPassword); + } + + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java new file mode 100644 index 0000000000000..601942b694a76 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java @@ -0,0 +1,239 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo; +import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +/** + * A realm for predefined users. These users can only be modified in terms of changing their passwords; no other modifications are allowed. + * This realm is always enabled. + */ +public class ReservedRealm extends CachingUsernamePasswordRealm { + + public static final String TYPE = "reserved"; + + private final ReservedUserInfo bootstrapUserInfo; + static final char[] EMPTY_PASSWORD_HASH = Hasher.BCRYPT.hash(new SecureString("".toCharArray())); + static final ReservedUserInfo DISABLED_DEFAULT_USER_INFO = new ReservedUserInfo(EMPTY_PASSWORD_HASH, false, true); + static final ReservedUserInfo ENABLED_DEFAULT_USER_INFO = new ReservedUserInfo(EMPTY_PASSWORD_HASH, true, true); + + public static final Setting ACCEPT_DEFAULT_PASSWORD_SETTING = Setting.boolSetting( + SecurityField.setting("authc.accept_default_password"), true, Setting.Property.NodeScope, Setting.Property.Filtered, + Setting.Property.Deprecated); + public static final Setting BOOTSTRAP_ELASTIC_PASSWORD = SecureSetting.secureString("bootstrap.password", + KeyStoreWrapper.SEED_SETTING); + + private final NativeUsersStore nativeUsersStore; + private final AnonymousUser anonymousUser; + private final boolean realmEnabled; + private final boolean anonymousEnabled; + private final SecurityLifecycleService securityLifecycleService; + + public ReservedRealm(Environment env, Settings settings, NativeUsersStore nativeUsersStore, AnonymousUser anonymousUser, + SecurityLifecycleService securityLifecycleService, ThreadContext threadContext) { + super(TYPE, new RealmConfig(TYPE, Settings.EMPTY, settings, env, threadContext)); + this.nativeUsersStore = nativeUsersStore; + this.realmEnabled = XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings); + this.anonymousUser = anonymousUser; + this.anonymousEnabled = AnonymousUser.isAnonymousEnabled(settings); + this.securityLifecycleService = securityLifecycleService; + final char[] hash = BOOTSTRAP_ELASTIC_PASSWORD.get(settings).length() == 0 ? EMPTY_PASSWORD_HASH : + Hasher.BCRYPT.hash(BOOTSTRAP_ELASTIC_PASSWORD.get(settings)); + bootstrapUserInfo = new ReservedUserInfo(hash, true, hash == EMPTY_PASSWORD_HASH); + } + + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + if (realmEnabled == false) { + listener.onResponse(AuthenticationResult.notHandled()); + } else if (ClientReservedRealm.isReserved(token.principal(), config.globalSettings()) == false) { + listener.onResponse(AuthenticationResult.notHandled()); + } else { + getUserInfo(token.principal(), ActionListener.wrap((userInfo) -> { + AuthenticationResult result; + if (userInfo != null) { + try { + if (userInfo.hasEmptyPassword) { + result = AuthenticationResult.terminate("failed to authenticate user [" + token.principal() + "]", null); + } else if (Hasher.BCRYPT.verify(token.credentials(), userInfo.passwordHash)) { + final User user = getUser(token.principal(), userInfo); + result = AuthenticationResult.success(user); + } else { + result = AuthenticationResult.terminate("failed to authenticate user [" + token.principal() + "]", null); + } + } finally { + assert userInfo.passwordHash != DISABLED_DEFAULT_USER_INFO.passwordHash : "default user info must be cloned"; + assert userInfo.passwordHash != ENABLED_DEFAULT_USER_INFO.passwordHash : "default user info must be cloned"; + assert userInfo.passwordHash != bootstrapUserInfo.passwordHash : "bootstrap user info must be cloned"; + Arrays.fill(userInfo.passwordHash, (char) 0); + } + } else { + result = AuthenticationResult.terminate("failed to authenticate user [" + token.principal() + "]", null); + } + // we want the finally block to clear out the chars before we proceed further so we handle the result here + listener.onResponse(result); + }, listener::onFailure)); + } + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + if (realmEnabled == false) { + if (anonymousEnabled && AnonymousUser.isAnonymousUsername(username, config.globalSettings())) { + listener.onResponse(anonymousUser); + } + listener.onResponse(null); + } else if (ClientReservedRealm.isReserved(username, config.globalSettings()) == false) { + listener.onResponse(null); + } else if (AnonymousUser.isAnonymousUsername(username, config.globalSettings())) { + listener.onResponse(anonymousEnabled ? anonymousUser : null); + } else { + getUserInfo(username, ActionListener.wrap((userInfo) -> { + if (userInfo != null) { + listener.onResponse(getUser(username, userInfo)); + } else { + // this was a reserved username - don't allow this to go to another realm... + listener.onFailure(Exceptions.authenticationError("failed to lookup user [{}]", username)); + } + }, listener::onFailure)); + } + } + + private User getUser(String username, ReservedUserInfo userInfo) { + assert username != null; + switch (username) { + case ElasticUser.NAME: + return new ElasticUser(userInfo.enabled); + case KibanaUser.NAME: + return new KibanaUser(userInfo.enabled); + case LogstashSystemUser.NAME: + return new LogstashSystemUser(userInfo.enabled); + case BeatsSystemUser.NAME: + return new BeatsSystemUser(userInfo.enabled); + default: + if (anonymousEnabled && anonymousUser.principal().equals(username)) { + return anonymousUser; + } + return null; + } + } + + + public void users(ActionListener> listener) { + if (realmEnabled == false) { + listener.onResponse(anonymousEnabled ? Collections.singletonList(anonymousUser) : Collections.emptyList()); + } else { + nativeUsersStore.getAllReservedUserInfo(ActionListener.wrap((reservedUserInfos) -> { + List users = new ArrayList<>(4); + + ReservedUserInfo userInfo = reservedUserInfos.get(ElasticUser.NAME); + users.add(new ElasticUser(userInfo == null || userInfo.enabled)); + + userInfo = reservedUserInfos.get(KibanaUser.NAME); + users.add(new KibanaUser(userInfo == null || userInfo.enabled)); + + userInfo = reservedUserInfos.get(LogstashSystemUser.NAME); + users.add(new LogstashSystemUser(userInfo == null || userInfo.enabled)); + + userInfo = reservedUserInfos.get(BeatsSystemUser.NAME); + users.add(new BeatsSystemUser(userInfo == null || userInfo.enabled)); + + if (anonymousEnabled) { + users.add(anonymousUser); + } + + listener.onResponse(users); + }, (e) -> { + logger.error("failed to retrieve reserved users", e); + listener.onResponse(anonymousEnabled ? Collections.singletonList(anonymousUser) : Collections.emptyList()); + })); + } + } + + + private void getUserInfo(final String username, ActionListener listener) { + if (userIsDefinedForCurrentSecurityMapping(username) == false) { + logger.debug("Marking user [{}] as disabled because the security mapping is not at the required version", username); + listener.onResponse(DISABLED_DEFAULT_USER_INFO.deepClone()); + } else if (securityLifecycleService.isSecurityIndexExisting() == false) { + listener.onResponse(getDefaultUserInfo(username)); + } else { + nativeUsersStore.getReservedUserInfo(username, ActionListener.wrap((userInfo) -> { + if (userInfo == null) { + listener.onResponse(getDefaultUserInfo(username)); + } else { + listener.onResponse(userInfo); + } + }, (e) -> { + logger.error((Supplier) () -> + new ParameterizedMessage("failed to retrieve password hash for reserved user [{}]", username), e); + listener.onResponse(null); + })); + } + } + + private ReservedUserInfo getDefaultUserInfo(String username) { + if (ElasticUser.NAME.equals(username)) { + return bootstrapUserInfo.deepClone(); + } else { + return ENABLED_DEFAULT_USER_INFO.deepClone(); + } + } + + private boolean userIsDefinedForCurrentSecurityMapping(String username) { + final Version requiredVersion = getDefinedVersion(username); + return securityLifecycleService.checkSecurityMappingVersion(requiredVersion::onOrBefore); + } + + private Version getDefinedVersion(String username) { + switch (username) { + case LogstashSystemUser.NAME: + return LogstashSystemUser.DEFINED_SINCE; + case BeatsSystemUser.NAME: + return BeatsSystemUser.DEFINED_SINCE; + default: + return Version.V_5_0_0; + } + } + + public static void addSettings(List> settingsList) { + settingsList.add(ACCEPT_DEFAULT_PASSWORD_SETTING); + settingsList.add(BOOTSTRAP_ELASTIC_PASSWORD); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/UserAndPassword.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/UserAndPassword.java new file mode 100644 index 0000000000000..58351ac2879b5 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/UserAndPassword.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative; + +import org.elasticsearch.xpack.core.security.user.User; + +/** + * Like User, but includes the hashed password + * + * NOT to be used for password verification + * + * NOTE that this purposefully does not serialize the {@code passwordHash} + * field, because this is not meant to be used for security other than + * retrieving the UserAndPassword from the index before local authentication. + */ +class UserAndPassword { + + private final User user; + private final char[] passwordHash; + + UserAndPassword(User user, char[] passwordHash) { + this.user = user; + this.passwordHash = passwordHash; + } + + public User user() { + return this.user; + } + + public char[] passwordHash() { + return this.passwordHash; + } + + @Override + public boolean equals(Object o) { + return false; // Don't use this for user comparison + } + + @Override + public int hashCode() { + int result = this.user.hashCode(); + result = 31 * result + passwordHash().hashCode(); + return result; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClient.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClient.java new file mode 100644 index 0000000000000..f14911402d60f --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClient.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative.tool; + +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.esnative.tool.HttpResponse.HttpResponseBuilder; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.net.HttpURLConnection; +import java.net.InetAddress; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Collections; +import java.util.List; + +import javax.net.ssl.HttpsURLConnection; + +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PORT; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT; + +/** + * A simple http client for usage in command line tools. This client only uses internal jdk classes and does + * not rely on an external http libraries. + */ +public class CommandLineHttpClient { + + /** + * Timeout HTTP(s) reads after 35 seconds. + * The default timeout for discovering a master is 30s, and we want to be longer than this, otherwise a querying a disconnected node + * will trigger as client side timeout rather than giving clear error details. + */ + private static final int READ_TIMEOUT = 35 * 1000; + + private final Settings settings; + private final Environment env; + + public CommandLineHttpClient(Settings settings, Environment env) { + this.settings = settings; + this.env = env; + } + + /** + * General purpose HTTP(S) call with JSON Content-Type and Authorization Header. + * SSL settings are read from the settings file, if any. + * + * @param user + * user in the authorization header. + * @param password + * password in the authorization header. + * @param requestBodySupplier + * supplier for the JSON string body of the request. + * @param responseHandler + * handler of the response Input Stream. + * @return HTTP protocol response code. + */ + @SuppressForbidden(reason = "We call connect in doPrivileged and provide SocketPermission") + public HttpResponse execute(String method, URL url, String user, SecureString password, + CheckedSupplier requestBodySupplier, + CheckedFunction responseHandler) throws Exception { + final HttpURLConnection conn; + // If using SSL, need a custom service because it's likely a self-signed certificate + if ("https".equalsIgnoreCase(url.getProtocol())) { + final SSLService sslService = new SSLService(settings, env); + final HttpsURLConnection httpsConn = (HttpsURLConnection) url.openConnection(); + AccessController.doPrivileged((PrivilegedAction) () -> { + final Settings sslSettings = SSLService.getHttpTransportSSLSettings(settings); + // Requires permission java.lang.RuntimePermission "setFactory"; + httpsConn.setSSLSocketFactory(sslService.sslSocketFactory(sslSettings)); + final boolean isHostnameVerificationEnabled = + sslService.getVerificationMode(sslSettings, Settings.EMPTY).isHostnameVerificationEnabled(); + if (isHostnameVerificationEnabled == false) { + httpsConn.setHostnameVerifier((hostname, session) -> true); + } + return null; + }); + conn = httpsConn; + } else { + conn = (HttpURLConnection) url.openConnection(); + } + conn.setRequestMethod(method); + conn.setReadTimeout(READ_TIMEOUT); + // Add basic-auth header + String token = UsernamePasswordToken.basicAuthHeaderValue(user, password); + conn.setRequestProperty("Authorization", token); + conn.setRequestProperty("Content-Type", XContentType.JSON.mediaType()); + String bodyString = requestBodySupplier.get(); + conn.setDoOutput(bodyString != null); // set true if we are sending a body + SocketAccess.doPrivileged(conn::connect); + if (bodyString != null) { + try (OutputStream out = conn.getOutputStream()) { + out.write(bodyString.getBytes(StandardCharsets.UTF_8)); + } catch (Exception e) { + Releasables.closeWhileHandlingException(conn::disconnect); + throw e; + } + } + // this throws IOException if there is a network problem + final int responseCode = conn.getResponseCode(); + HttpResponseBuilder responseBuilder = null; + try (InputStream inputStream = conn.getInputStream()) { + responseBuilder = responseHandler.apply(inputStream); + } catch (IOException e) { + // this IOException is if the HTTP response code is 'BAD' (>= 400) + try (InputStream errorStream = conn.getErrorStream()) { + responseBuilder = responseHandler.apply(errorStream); + } + } finally { + Releasables.closeWhileHandlingException(conn::disconnect); + } + responseBuilder.withHttpStatus(responseCode); + return responseBuilder.build(); + } + + String getDefaultURL() { + final String scheme = XPackSettings.HTTP_SSL_ENABLED.get(settings) ? "https" : "http"; + List httpPublishHost = SETTING_HTTP_PUBLISH_HOST.get(settings); + if (httpPublishHost.isEmpty()) { + httpPublishHost = NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings); + } + + // we cannot do custom name resolution here... + NetworkService networkService = new NetworkService(Collections.emptyList()); + try { + InetAddress publishAddress = networkService.resolvePublishHostAddresses(httpPublishHost.toArray(Strings.EMPTY_ARRAY)); + int port = SETTING_HTTP_PUBLISH_PORT.get(settings); + if (port <= 0) { + int[] ports = SETTING_HTTP_PORT.get(settings).ports(); + if (ports.length > 0) { + port = ports[0]; + } + + // this sucks but a port can be specified with a value of 0, we'll never be able to connect to it so just default to + // what we know + if (port <= 0) { + throw new IllegalStateException("unable to determine http port from settings, please use the -u option to provide the" + + " url"); + } + } + return scheme + "://" + InetAddresses.toUriString(publishAddress) + ":" + port; + } catch (IOException e) { + throw new UncheckedIOException("failed to resolve default URL", e); + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/HttpResponse.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/HttpResponse.java new file mode 100644 index 0000000000000..97335fd0706fb --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/HttpResponse.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative.tool; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.UnsupportedEncodingException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * Simple http response with status and response body as key value map. To be + * used with {@link CommandLineHttpClient}. + */ +final class HttpResponse { + private final int httpStatus; + private final Map responseBody; + + HttpResponse(final int httpStatus, final Map responseBody) { + this.httpStatus = httpStatus; + Map response = new HashMap<>(); + response.putAll(responseBody); + this.responseBody = Collections.unmodifiableMap(response); + } + + int getHttpStatus() { + return httpStatus; + } + + Map getResponseBody() { + return responseBody; + } + + static class HttpResponseBuilder { + private int httpStatus; + private Map responseBody; + + HttpResponseBuilder withHttpStatus(final int httpStatus) { + this.httpStatus = httpStatus; + return this; + } + + HttpResponseBuilder withResponseBody(final String responseJson) + throws ElasticsearchParseException, UnsupportedEncodingException { + if (responseJson == null || responseJson.trim().isEmpty()) { + throw new ElasticsearchParseException( + "Invalid string provided as http response body, Failed to parse content to form response body."); + } + this.responseBody = XContentHelper.convertToMap(XContentType.JSON.xContent(), responseJson, false); + return this; + } + + HttpResponse build() { + HttpResponse httpResponse = new HttpResponse(this.httpStatus, this.responseBody); + return httpResponse; + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java new file mode 100644 index 0000000000000..9675453186ea8 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java @@ -0,0 +1,588 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative.tool; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.bouncycastle.util.io.Streams; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.LoggingAwareMultiCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.Terminal.Verbosity; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.support.Validation; +import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import org.elasticsearch.xpack.security.authc.esnative.tool.HttpResponse.HttpResponseBuilder; + +import javax.net.ssl.SSLException; +import java.io.IOException; +import java.io.InputStream; +import java.net.HttpURLConnection; +import java.net.MalformedURLException; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.security.SecureRandom; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiFunction; + +import static java.util.Arrays.asList; + +/** + * A tool to set passwords of reserved users (elastic, kibana and + * logstash_system). Can run in `interactive` or `auto` mode. In `auto` mode + * generates random passwords and prints them on the console. In `interactive` + * mode prompts for each individual user's password. This tool only runs once, + * if successful. After the elastic user password is set you have to use the + * `security` API to manipulate passwords. + */ +public class SetupPasswordTool extends LoggingAwareMultiCommand { + + private static final char[] CHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789").toCharArray(); + public static final List USERS = asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + + private final BiFunction clientFunction; + private final CheckedFunction keyStoreFunction; + + private CommandLineHttpClient client; + + SetupPasswordTool() { + this((environment, settings) -> { + return new CommandLineHttpClient(settings, environment); + }, (environment) -> { + KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.load(environment.configFile()); + if (keyStoreWrapper == null) { + throw new UserException(ExitCodes.CONFIG, + "Elasticsearch keystore file is missing [" + KeyStoreWrapper.keystorePath(environment.configFile()) + "]"); + } + return keyStoreWrapper; + }); + } + + SetupPasswordTool(BiFunction clientFunction, + CheckedFunction keyStoreFunction) { + super("Sets the passwords for reserved users"); + subcommands.put("auto", newAutoSetup()); + subcommands.put("interactive", newInteractiveSetup()); + this.clientFunction = clientFunction; + this.keyStoreFunction = keyStoreFunction; + } + + protected AutoSetup newAutoSetup() { + return new AutoSetup(); + } + + protected InteractiveSetup newInteractiveSetup() { + return new InteractiveSetup(); + } + + public static void main(String[] args) throws Exception { + exit(new SetupPasswordTool().main(args, Terminal.DEFAULT)); + } + + // Visible for testing + OptionParser getParser() { + return this.parser; + } + + /** + * This class sets the passwords using automatically generated random passwords. + * The passwords will be printed to the console. + */ + class AutoSetup extends SetupCommand { + + AutoSetup() { + super("Uses randomly generated passwords"); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + terminal.println(Verbosity.VERBOSE, "Running with configuration path: " + env.configFile()); + setupOptions(options, env); + checkElasticKeystorePasswordValid(terminal, env); + checkClusterHealth(terminal); + + if (shouldPrompt) { + terminal.println("Initiating the setup of passwords for reserved users " + String.join(",", USERS) + "."); + terminal.println("The passwords will be randomly generated and printed to the console."); + boolean shouldContinue = terminal.promptYesNo("Please confirm that you would like to continue", false); + terminal.println("\n"); + if (shouldContinue == false) { + throw new UserException(ExitCodes.OK, "User cancelled operation"); + } + } + + SecureRandom secureRandom = new SecureRandom(); + changePasswords((user) -> generatePassword(secureRandom, user), + (user, password) -> changedPasswordCallback(terminal, user, password), terminal); + } + + private SecureString generatePassword(SecureRandom secureRandom, String user) { + int passwordLength = 20; // Generate 20 character passwords + char[] characters = new char[passwordLength]; + for (int i = 0; i < passwordLength; ++i) { + characters[i] = CHARS[secureRandom.nextInt(CHARS.length)]; + } + return new SecureString(characters); + } + + private void changedPasswordCallback(Terminal terminal, String user, SecureString password) { + terminal.println("Changed password for user " + user + "\n" + "PASSWORD " + user + " = " + password + "\n"); + } + + } + + /** + * This class sets the passwords using input prompted on the console + */ + class InteractiveSetup extends SetupCommand { + + InteractiveSetup() { + super("Uses passwords entered by a user"); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + terminal.println(Verbosity.VERBOSE, "Running with configuration path: " + env.configFile()); + setupOptions(options, env); + checkElasticKeystorePasswordValid(terminal, env); + checkClusterHealth(terminal); + + if (shouldPrompt) { + terminal.println("Initiating the setup of passwords for reserved users " + String.join(",", USERS) + "."); + terminal.println("You will be prompted to enter passwords as the process progresses."); + boolean shouldContinue = terminal.promptYesNo("Please confirm that you would like to continue", false); + terminal.println("\n"); + if (shouldContinue == false) { + throw new UserException(ExitCodes.OK, "User cancelled operation"); + } + } + + changePasswords(user -> promptForPassword(terminal, user), + (user, password) -> changedPasswordCallback(terminal, user, password), terminal); + } + + private SecureString promptForPassword(Terminal terminal, String user) throws UserException { + // loop for two consecutive good passwords + while (true) { + SecureString password1 = new SecureString(terminal.readSecret("Enter password for [" + user + "]: ")); + Validation.Error err = Validation.Users.validatePassword(password1.getChars()); + if (err != null) { + terminal.println(err.toString()); + terminal.println("Try again."); + password1.close(); + continue; + } + try (SecureString password2 = new SecureString(terminal.readSecret("Reenter password for [" + user + "]: "))) { + if (password1.equals(password2) == false) { + terminal.println("Passwords do not match."); + terminal.println("Try again."); + password1.close(); + continue; + } + } + return password1; + } + } + + private void changedPasswordCallback(Terminal terminal, String user, SecureString password) { + terminal.println("Changed password for user [" + user + "]"); + } + } + + /** + * An abstract class that provides functionality common to both the auto and + * interactive setup modes. + */ + private abstract class SetupCommand extends EnvironmentAwareCommand { + + boolean shouldPrompt; + + private OptionSpec urlOption; + private OptionSpec noPromptOption; + + private String elasticUser = ElasticUser.NAME; + private SecureString elasticUserPassword; + private KeyStoreWrapper keyStoreWrapper; + private URL url; + + SetupCommand(String description) { + super(description); + setParser(); + } + + @Override + public void close() { + if (keyStoreWrapper != null) { + keyStoreWrapper.close(); + } + if (elasticUserPassword != null) { + elasticUserPassword.close(); + } + } + + void setupOptions(OptionSet options, Environment env) throws Exception { + keyStoreWrapper = keyStoreFunction.apply(env); + // TODO: We currently do not support keystore passwords + keyStoreWrapper.decrypt(new char[0]); + + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(env.settings(), true); + if (settingsBuilder.getSecureSettings() == null) { + settingsBuilder.setSecureSettings(keyStoreWrapper); + } + Settings settings = settingsBuilder.build(); + elasticUserPassword = ReservedRealm.BOOTSTRAP_ELASTIC_PASSWORD.get(settings); + + client = clientFunction.apply(env, settings); + + String providedUrl = urlOption.value(options); + url = new URL(providedUrl == null ? client.getDefaultURL() : providedUrl); + setShouldPrompt(options); + + } + + private void setParser() { + urlOption = parser.acceptsAll(asList("u", "url"), "The url for the change password request.").withRequiredArg(); + noPromptOption = parser.acceptsAll(asList("b", "batch"), + "If enabled, run the change password process without prompting the user.").withOptionalArg(); + } + + private void setShouldPrompt(OptionSet options) { + String optionalNoPrompt = noPromptOption.value(options); + if (options.has(noPromptOption)) { + shouldPrompt = optionalNoPrompt != null && Booleans.parseBoolean(optionalNoPrompt) == false; + } else { + shouldPrompt = true; + } + } + + /** + * Validates the bootstrap password from the local keystore by making an + * '_authenticate' call. Returns silently if server is reachable and password is + * valid. Throws {@link UserException} otherwise. + * + * @param terminal where to write verbose info. + */ + void checkElasticKeystorePasswordValid(Terminal terminal, Environment env) throws Exception { + URL route = createURL(url, "/_xpack/security/_authenticate", "?pretty"); + terminal.println(Verbosity.VERBOSE, ""); + terminal.println(Verbosity.VERBOSE, "Testing if bootstrap password is valid for " + route.toString()); + try { + final HttpResponse httpResponse = client.execute("GET", route, elasticUser, elasticUserPassword, () -> null, + is -> responseBuilder(is, terminal)); + final int httpCode = httpResponse.getHttpStatus(); + + // keystore password is not valid + if (httpCode == HttpURLConnection.HTTP_UNAUTHORIZED) { + terminal.println(""); + terminal.println("Failed to authenticate user '" + elasticUser + "' against " + route.toString()); + terminal.println("Possible causes include:"); + terminal.println(" * The password for the '" + elasticUser + "' user has already been changed on this cluster"); + terminal.println(" * Your elasticsearch node is running against a different keystore"); + terminal.println(" This tool used the keystore at " + KeyStoreWrapper.keystorePath(env.configFile())); + terminal.println(""); + throw new UserException(ExitCodes.CONFIG, "Failed to verify bootstrap password"); + } else if (httpCode != HttpURLConnection.HTTP_OK) { + terminal.println(""); + terminal.println("Unexpected response code [" + httpCode + "] from calling GET " + route.toString()); + XPackSecurityFeatureConfig xPackSecurityFeatureConfig = getXPackSecurityConfig(terminal); + if (xPackSecurityFeatureConfig.isAvailable == false) { + terminal.println("It doesn't look like the X-Pack security feature is available on this Elasticsearch node."); + terminal.println("Please check if you have installed a license that allows access to X-Pack Security feature."); + terminal.println(""); + throw new UserException(ExitCodes.CONFIG, "X-Pack Security is not available."); + } + if (xPackSecurityFeatureConfig.isEnabled == false) { + terminal.println("It doesn't look like the X-Pack security feature is enabled on this Elasticsearch node."); + terminal.println("Please check if you have enabled X-Pack security in your elasticsearch.yml configuration file."); + terminal.println(""); + throw new UserException(ExitCodes.CONFIG, "X-Pack Security is disabled by configuration."); + } + terminal.println("X-Pack security feature is available and enabled on this Elasticsearch node."); + terminal.println("Possible causes include:"); + terminal.println(" * The relative path of the URL is incorrect. Is there a proxy in-between?"); + terminal.println(" * The protocol (http/https) does not match the port."); + terminal.println(" * Is this really an Elasticsearch server?"); + terminal.println(""); + throw new UserException(ExitCodes.CONFIG, "Unknown error"); + } + } catch (SSLException e) { + terminal.println(""); + terminal.println("SSL connection to " + route.toString() + " failed: " + e.getMessage()); + terminal.println("Please check the elasticsearch SSL settings under " + XPackSettings.HTTP_SSL_PREFIX); + terminal.println(Verbosity.VERBOSE, ""); + terminal.println(Verbosity.VERBOSE, ExceptionsHelper.stackTrace(e)); + terminal.println(""); + throw new UserException(ExitCodes.CONFIG, + "Failed to establish SSL connection to elasticsearch at " + route.toString() + ". ", e); + } catch (IOException e) { + terminal.println(""); + terminal.println("Connection failure to: " + route.toString() + " failed: " + e.getMessage()); + terminal.println(Verbosity.VERBOSE, ""); + terminal.println(Verbosity.VERBOSE, ExceptionsHelper.stackTrace(e)); + terminal.println(""); + throw new UserException(ExitCodes.CONFIG, + "Failed to connect to elasticsearch at " + route.toString() + ". Is the URL correct and elasticsearch running?", e); + } + } + + @SuppressWarnings("unchecked") + private XPackSecurityFeatureConfig getXPackSecurityConfig(Terminal terminal) throws Exception { + // Get x-pack security info. + URL route = createURL(url, "/_xpack", "?categories=features&human=false&pretty"); + final HttpResponse httpResponse = + client.execute("GET", route, elasticUser, elasticUserPassword, () -> null, is -> responseBuilder(is, terminal)); + if (httpResponse.getHttpStatus() != HttpURLConnection.HTTP_OK) { + terminal.println(""); + terminal.println("Unexpected response code [" + httpResponse.getHttpStatus() + "] from calling GET " + route.toString()); + if (httpResponse.getHttpStatus() == HttpURLConnection.HTTP_BAD_REQUEST) { + terminal.println("It doesn't look like the X-Pack is available on this Elasticsearch node."); + terminal.println("Please check that you have followed all installation instructions and that this tool"); + terminal.println(" is pointing to the correct Elasticsearch server."); + terminal.println(""); + throw new UserException(ExitCodes.CONFIG, "X-Pack is not available on this Elasticsearch node."); + } else { + terminal.println("* Try running this tool again."); + terminal.println("* Verify that the tool is pointing to the correct Elasticsearch server."); + terminal.println("* Check the elasticsearch logs for additional error details."); + terminal.println(""); + throw new UserException(ExitCodes.TEMP_FAILURE, "Failed to determine x-pack security feature configuration."); + } + } + final XPackSecurityFeatureConfig xPackSecurityFeatureConfig; + if (httpResponse.getHttpStatus() == HttpURLConnection.HTTP_OK && httpResponse.getResponseBody() != null) { + Map features = (Map) httpResponse.getResponseBody().get("features"); + if (features != null) { + Map featureInfo = (Map) features.get("security"); + if (featureInfo != null) { + xPackSecurityFeatureConfig = + new XPackSecurityFeatureConfig(Boolean.parseBoolean(featureInfo.get("available").toString()), + Boolean.parseBoolean(featureInfo.get("enabled").toString())); + return xPackSecurityFeatureConfig; + } + } + } + terminal.println(""); + terminal.println("Unexpected response from calling GET " + route.toString()); + terminal.println("* Try running this tool again."); + terminal.println("* Verify that the tool is pointing to the correct Elasticsearch server."); + terminal.println("* Check the elasticsearch logs for additional error details."); + terminal.println(""); + throw new UserException(ExitCodes.TEMP_FAILURE, "Failed to determine x-pack security feature configuration."); + } + + void checkClusterHealth(Terminal terminal) throws Exception { + URL route = createURL(url, "/_cluster/health", "?pretty"); + terminal.println(Verbosity.VERBOSE, ""); + terminal.println(Verbosity.VERBOSE, "Checking cluster health: " + route.toString()); + final HttpResponse httpResponse = client.execute("GET", route, elasticUser, elasticUserPassword, () -> null, + is -> responseBuilder(is, terminal)); + if (httpResponse.getHttpStatus() != HttpURLConnection.HTTP_OK) { + terminal.println(""); + terminal.println("Failed to determine the health of the cluster running at " + url); + terminal.println("Unexpected response code [" + httpResponse.getHttpStatus() + "] from calling GET " + route.toString()); + final String cause = getErrorCause(httpResponse); + if (cause != null) { + terminal.println("Cause: " + cause); + } + } else { + final String clusterStatus = Objects.toString(httpResponse.getResponseBody().get("status"), ""); + if (clusterStatus.isEmpty()) { + terminal.println(""); + terminal.println("Failed to determine the health of the cluster running at " + url); + terminal.println("Could not find a 'status' value at " + route.toString()); + } else if ("red".equalsIgnoreCase(clusterStatus)) { + terminal.println(""); + terminal.println("Your cluster health is currently RED."); + terminal.println("This means that some cluster data is unavailable and your cluster is not fully functional."); + } else { + // Cluster is yellow/green -> all OK + return; + } + } + terminal.println(""); + terminal.println( + "It is recommended that you resolve the issues with your cluster before running elasticsearch-setup-passwords."); + terminal.println("It is very likely that the password changes will fail when run against an unhealthy cluster."); + terminal.println(""); + if (shouldPrompt) { + final boolean keepGoing = terminal.promptYesNo("Do you want to continue with the password setup process", false); + if (keepGoing == false) { + throw new UserException(ExitCodes.OK, "User cancelled operation"); + } + terminal.println(""); + } + } + + /** + * Sets one user's password using the elastic superUser credentials. + * + * @param user The user who's password will change. + * @param password the new password of the user. + */ + private void changeUserPassword(String user, SecureString password, Terminal terminal) throws Exception { + URL route = createURL(url, "/_xpack/security/user/" + user + "/_password", "?pretty"); + terminal.println(Verbosity.VERBOSE, ""); + terminal.println(Verbosity.VERBOSE, "Trying user password change call " + route.toString()); + try { + // supplier should own his resources + SecureString supplierPassword = password.clone(); + final HttpResponse httpResponse = client.execute("PUT", route, elasticUser, elasticUserPassword, () -> { + try { + XContentBuilder xContentBuilder = JsonXContent.contentBuilder(); + xContentBuilder.startObject().field("password", supplierPassword.toString()).endObject(); + return Strings.toString(xContentBuilder); + } finally { + supplierPassword.close(); + } + }, is -> responseBuilder(is, terminal)); + if (httpResponse.getHttpStatus() != HttpURLConnection.HTTP_OK) { + terminal.println(""); + terminal.println( + "Unexpected response code [" + httpResponse.getHttpStatus() + "] from calling PUT " + route.toString()); + String cause = getErrorCause(httpResponse); + if (cause != null) { + terminal.println("Cause: " + cause); + terminal.println(""); + } + terminal.println("Possible next steps:"); + terminal.println("* Try running this tool again."); + terminal.println("* Try running with the --verbose parameter for additional messages."); + terminal.println("* Check the elasticsearch logs for additional error details."); + terminal.println("* Use the change password API manually. "); + terminal.println(""); + throw new UserException(ExitCodes.TEMP_FAILURE, "Failed to set password for user [" + user + "]."); + } + } catch (IOException e) { + terminal.println(""); + terminal.println("Connection failure to: " + route.toString() + " failed: " + e.getMessage()); + terminal.println(Verbosity.VERBOSE, ""); + terminal.println(Verbosity.VERBOSE, ExceptionsHelper.stackTrace(e)); + terminal.println(""); + throw new UserException(ExitCodes.TEMP_FAILURE, "Failed to set password for user [" + user + "].", e); + } + } + + /** + * Collects passwords for all the users, then issues set requests. Fails on the + * first failed request. In this case rerun the tool to redo all the operations. + * + * @param passwordFn Function to generate or prompt for each user's password. + * @param successCallback Callback for each successful operation + */ + void changePasswords(CheckedFunction passwordFn, + CheckedBiConsumer successCallback, Terminal terminal) throws Exception { + Map passwordsMap = new HashMap<>(USERS.size()); + try { + for (String user : USERS) { + passwordsMap.put(user, passwordFn.apply(user)); + } + /* + * Change elastic user last. This tool will not run after the elastic user + * password is changed even if changing password for any subsequent user fails. + * Stay safe and change elastic last. + */ + Map.Entry superUserEntry = null; + for (Map.Entry entry : passwordsMap.entrySet()) { + if (entry.getKey().equals(elasticUser)) { + superUserEntry = entry; + continue; + } + changeUserPassword(entry.getKey(), entry.getValue(), terminal); + successCallback.accept(entry.getKey(), entry.getValue()); + } + // change elastic superuser + if (superUserEntry != null) { + changeUserPassword(superUserEntry.getKey(), superUserEntry.getValue(), terminal); + successCallback.accept(superUserEntry.getKey(), superUserEntry.getValue()); + } + } finally { + passwordsMap.forEach((user, pass) -> pass.close()); + } + } + + private HttpResponseBuilder responseBuilder(InputStream is, Terminal terminal) throws IOException { + HttpResponseBuilder httpResponseBuilder = new HttpResponseBuilder(); + if (is != null) { + byte[] bytes = Streams.readAll(is); + String responseBody = new String(bytes, StandardCharsets.UTF_8); + terminal.println(Verbosity.VERBOSE, responseBody); + httpResponseBuilder.withResponseBody(responseBody); + } else { + terminal.println(Verbosity.VERBOSE, ""); + } + return httpResponseBuilder; + } + + private URL createURL(URL url, String path, String query) throws MalformedURLException, URISyntaxException { + return new URL(url, (url.toURI().getPath() + path).replaceAll("/+", "/") + query); + } + } + + private String getErrorCause(HttpResponse httpResponse) { + final Object error = httpResponse.getResponseBody().get("error"); + if (error == null) { + return null; + } + if (error instanceof Map) { + Object reason = ((Map) error).get("reason"); + if (reason != null) { + return reason.toString(); + } + final Object root = ((Map) error).get("root_cause"); + if (root != null && root instanceof Map) { + reason = ((Map) root).get("reason"); + if (reason != null) { + return reason.toString(); + } + final Object type = ((Map) root).get("type"); + if (type != null) { + return (String) type; + } + } + return String.valueOf(((Map) error).get("type")); + } + return error.toString(); + } + + /** + * This class is used to capture x-pack security feature configuration. + */ + static class XPackSecurityFeatureConfig { + final boolean isAvailable; + final boolean isEnabled; + + XPackSecurityFeatureConfig(boolean isAvailable, boolean isEnabled) { + this.isAvailable = isAvailable; + this.isEnabled = isEnabled; + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java new file mode 100644 index 0000000000000..9e85b4505210e --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.file; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; + +import java.util.Map; + +public class FileRealm extends CachingUsernamePasswordRealm { + + private final FileUserPasswdStore userPasswdStore; + private final FileUserRolesStore userRolesStore; + + public FileRealm(RealmConfig config, ResourceWatcherService watcherService) { + this(config, new FileUserPasswdStore(config, watcherService), new FileUserRolesStore(config, watcherService)); + } + + // pkg private for testing + FileRealm(RealmConfig config, FileUserPasswdStore userPasswdStore, FileUserRolesStore userRolesStore) { + super(FileRealmSettings.TYPE, config); + this.userPasswdStore = userPasswdStore; + userPasswdStore.addListener(this::expireAll); + this.userRolesStore = userRolesStore; + userRolesStore.addListener(this::expireAll); + } + + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + final AuthenticationResult result = userPasswdStore.verifyPassword(token.principal(), token.credentials(), () -> { + String[] roles = userRolesStore.roles(token.principal()); + return new User(token.principal(), roles); + }); + listener.onResponse(result); + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + if (userPasswdStore.userExists(username)) { + String[] roles = userRolesStore.roles(username); + listener.onResponse(new User(username, roles)); + } else { + listener.onResponse(null); + } + } + + @Override + public Map usageStats() { + Map stats = super.usageStats(); + // here we can determine the size based on the in mem user store + stats.put("size", userPasswdStore.usersCount()); + return stats; + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java new file mode 100644 index 0000000000000..93d222fc791fd --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java @@ -0,0 +1,203 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.file; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.watcher.FileChangesListener; +import org.elasticsearch.watcher.FileWatcher; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.support.NoOpLogger; +import org.elasticsearch.xpack.core.security.support.Validation; +import org.elasticsearch.xpack.core.security.support.Validation.Users; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.support.SecurityFiles; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableMap; + +public class FileUserPasswdStore { + + private final Logger logger; + + private final Path file; + private final Hasher hasher = Hasher.BCRYPT; + private final Settings settings; + private final CopyOnWriteArrayList listeners; + + private volatile Map users; + + public FileUserPasswdStore(RealmConfig config, ResourceWatcherService watcherService) { + this(config, watcherService, () -> {}); + } + + FileUserPasswdStore(RealmConfig config, ResourceWatcherService watcherService, Runnable listener) { + logger = config.logger(FileUserPasswdStore.class); + file = resolveFile(config.env()); + settings = config.globalSettings(); + users = parseFileLenient(file, logger, settings); + listeners = new CopyOnWriteArrayList<>(Collections.singletonList(listener)); + FileWatcher watcher = new FileWatcher(file.getParent()); + watcher.addListener(new FileListener()); + try { + watcherService.add(watcher, ResourceWatcherService.Frequency.HIGH); + } catch (IOException e) { + throw new ElasticsearchException("failed to start watching users file [{}]", e, file.toAbsolutePath()); + } + } + + public void addListener(Runnable listener) { + listeners.add(listener); + } + + public int usersCount() { + return users.size(); + } + + public AuthenticationResult verifyPassword(String username, SecureString password, java.util.function.Supplier user) { + char[] hash = users.get(username); + if (hash == null) { + return AuthenticationResult.notHandled(); + } + if (hasher.verify(password, hash) == false) { + return AuthenticationResult.unsuccessful("Password authentication failed for " + username, null); + } + return AuthenticationResult.success(user.get()); + } + + public boolean userExists(String username) { + return users != null && users.containsKey(username); + } + + public static Path resolveFile(Environment env) { + return XPackPlugin.resolveConfigFile(env, "users"); + } + + /** + * Internally in this class, we try to load the file, but if for some reason we can't, we're being more lenient by + * logging the error and skipping all users. This is aligned with how we handle other auto-loaded files in security. + */ + static Map parseFileLenient(Path path, Logger logger, Settings settings) { + try { + Map map = parseFile(path, logger, settings); + return map == null ? emptyMap() : map; + } catch (Exception e) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "failed to parse users file [{}]. skipping/removing all users...", path.toAbsolutePath()), e); + return emptyMap(); + } + } + + /** + * Parses the users file. + * + * Returns {@code null}, if the {@code users} file does not exist. + */ + public static Map parseFile(Path path, @Nullable Logger logger, Settings settings) { + if (logger == null) { + logger = NoOpLogger.INSTANCE; + } + logger.trace("reading users file [{}]...", path.toAbsolutePath()); + + if (Files.exists(path) == false) { + return null; + } + + List lines; + try { + lines = Files.readAllLines(path, StandardCharsets.UTF_8); + } catch (IOException ioe) { + throw new IllegalStateException("could not read users file [" + path.toAbsolutePath() + "]", ioe); + } + + Map users = new HashMap<>(); + + final boolean allowReserved = XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings) == false; + int lineNr = 0; + for (String line : lines) { + lineNr++; + if (line.startsWith("#")) { // comment + continue; + } + + // only trim the line because we have a format, our tool generates the formatted text and we shouldn't be lenient + // and allow spaces in the format + line = line.trim(); + int i = line.indexOf(":"); + if (i <= 0 || i == line.length() - 1) { + logger.error("invalid entry in users file [{}], line [{}]. skipping...", path.toAbsolutePath(), lineNr); + continue; + } + String username = line.substring(0, i); + Validation.Error validationError = Users.validateUsername(username, allowReserved, settings); + if (validationError != null) { + logger.error("invalid username [{}] in users file [{}], skipping... ({})", username, path.toAbsolutePath(), + validationError); + continue; + } + String hash = line.substring(i + 1); + users.put(username, hash.toCharArray()); + } + + logger.debug("parsed [{}] users from file [{}]", users.size(), path.toAbsolutePath()); + return unmodifiableMap(users); + } + + public static void writeFile(Map users, Path path) { + SecurityFiles.writeFileAtomically( + path, + users, + e -> String.format(Locale.ROOT, "%s:%s", e.getKey(), new String(e.getValue()))); + } + + void notifyRefresh() { + listeners.forEach(Runnable::run); + } + + private class FileListener implements FileChangesListener { + @Override + public void onFileCreated(Path file) { + onFileChanged(file); + } + + @Override + public void onFileDeleted(Path file) { + onFileChanged(file); + } + + @Override + public void onFileChanged(Path file) { + if (file.equals(FileUserPasswdStore.this.file)) { + logger.info("users file [{}] changed. updating users... )", file.toAbsolutePath()); + users = parseFileLenient(file, logger, settings); + notifyRefresh(); + } + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStore.java new file mode 100644 index 0000000000000..1631fef60ea89 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStore.java @@ -0,0 +1,227 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.file; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.watcher.FileChangesListener; +import org.elasticsearch.watcher.FileWatcher; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.support.NoOpLogger; +import org.elasticsearch.xpack.core.security.support.Validation; +import org.elasticsearch.xpack.security.support.SecurityFiles; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.regex.Pattern; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.common.Strings.collectionToCommaDelimitedString; + +public class FileUserRolesStore { + + private static final Pattern USERS_DELIM = Pattern.compile("\\s*,\\s*"); + + private final Logger logger; + + private final Path file; + private final CopyOnWriteArrayList listeners; + private volatile Map userRoles; + + FileUserRolesStore(RealmConfig config, ResourceWatcherService watcherService) { + this(config, watcherService, () -> {}); + } + + FileUserRolesStore(RealmConfig config, ResourceWatcherService watcherService, Runnable listener) { + logger = config.logger(FileUserRolesStore.class); + file = resolveFile(config.env()); + userRoles = parseFileLenient(file, logger); + listeners = new CopyOnWriteArrayList<>(Collections.singletonList(listener)); + FileWatcher watcher = new FileWatcher(file.getParent()); + watcher.addListener(new FileListener()); + try { + watcherService.add(watcher, ResourceWatcherService.Frequency.HIGH); + } catch (IOException e) { + throw new ElasticsearchException("failed to start watching the user roles file [" + file.toAbsolutePath() + "]", e); + } + } + + public void addListener(Runnable listener) { + listeners.add(listener); + } + + int entriesCount() { + return userRoles.size(); + } + + public String[] roles(String username) { + if (userRoles == null) { + return Strings.EMPTY_ARRAY; + } + String[] roles = userRoles.get(username); + return roles == null ? Strings.EMPTY_ARRAY : userRoles.get(username); + } + + public static Path resolveFile(Environment env) { + return XPackPlugin.resolveConfigFile(env, "users_roles"); + } + + /** + * Internally in this class, we try to load the file, but if for some reason we can't, we're being more lenient by + * logging the error and skipping all enries. This is aligned with how we handle other auto-loaded files in security. + */ + static Map parseFileLenient(Path path, Logger logger) { + try { + Map map = parseFile(path, logger); + return map == null ? emptyMap() : map; + } catch (Exception e) { + logger.error( + (Supplier) () -> new ParameterizedMessage("failed to parse users_roles file [{}]. skipping/removing all entries...", + path.toAbsolutePath()), + e); + return emptyMap(); + } + } + + /** + * Parses the users_roles file. + * + * Returns @{code null} if the {@code users_roles} file does not exist. The read file holds a mapping per + * line of the form "role -> users" while the returned map holds entries of the form "user -> roles". + */ + public static Map parseFile(Path path, @Nullable Logger logger) { + if (logger == null) { + logger = NoOpLogger.INSTANCE; + } + logger.trace("reading users_roles file [{}]...", path.toAbsolutePath()); + + if (Files.exists(path) == false) { + return null; + } + + List lines; + try { + lines = Files.readAllLines(path, StandardCharsets.UTF_8); + } catch (IOException ioe) { + throw new ElasticsearchException("could not read users file [" + path.toAbsolutePath() + "]", ioe); + } + + Map> userToRoles = new HashMap<>(); + + int lineNr = 0; + for (String line : lines) { + lineNr++; + if (line.startsWith("#")) { //comment + continue; + } + int i = line.indexOf(":"); + if (i <= 0 || i == line.length() - 1) { + logger.error("invalid entry in users_roles file [{}], line [{}]. skipping...", path.toAbsolutePath(), lineNr); + continue; + } + String role = line.substring(0, i).trim(); + Validation.Error validationError = Validation.Roles.validateRoleName(role, true); + if (validationError != null) { + logger.error("invalid role entry in users_roles file [{}], line [{}] - {}. skipping...", path.toAbsolutePath(), lineNr, + validationError); + continue; + } + String usersStr = line.substring(i + 1).trim(); + if (Strings.isEmpty(usersStr)) { + logger.error("invalid entry for role [{}] in users_roles file [{}], line [{}]. no users found. skipping...", role, + path.toAbsolutePath(), lineNr); + continue; + } + String[] roleUsers = USERS_DELIM.split(usersStr); + if (roleUsers.length == 0) { + logger.error("invalid entry for role [{}] in users_roles file [{}], line [{}]. no users found. skipping...", role, + path.toAbsolutePath(), lineNr); + continue; + } + + for (String user : roleUsers) { + List roles = userToRoles.get(user); + if (roles == null) { + roles = new ArrayList<>(); + userToRoles.put(user, roles); + } + roles.add(role); + } + } + + Map usersRoles = new HashMap<>(); + for (Map.Entry> entry : userToRoles.entrySet()) { + usersRoles.put(entry.getKey(), entry.getValue().toArray(new String[entry.getValue().size()])); + } + + logger.debug("parsed [{}] user to role mappings from file [{}]", usersRoles.size(), path.toAbsolutePath()); + return unmodifiableMap(usersRoles); + } + + /** + * Accepts a mapping of user -> list of roles + */ + public static void writeFile(Map userToRoles, Path path) { + HashMap> roleToUsers = new HashMap<>(); + for (Map.Entry entry : userToRoles.entrySet()) { + for (String role : entry.getValue()) { + List users = roleToUsers.get(role); + if (users == null) { + users = new ArrayList<>(); + roleToUsers.put(role, users); + } + users.add(entry.getKey()); + } + } + + SecurityFiles.writeFileAtomically( + path, + roleToUsers, + e -> String.format(Locale.ROOT, "%s:%s", e.getKey(), collectionToCommaDelimitedString(e.getValue()))); + } + + void notifyRefresh() { + listeners.forEach(Runnable::run); + } + + private class FileListener implements FileChangesListener { + @Override + public void onFileCreated(Path file) { + onFileChanged(file); + } + + @Override + public void onFileDeleted(Path file) { + onFileChanged(file); + } + + @Override + public void onFileChanged(Path file) { + if (file.equals(FileUserRolesStore.this.file)) { + logger.info("users roles file [{}] changed. updating users roles...", file.toAbsolutePath()); + userRoles = parseFileLenient(file, logger); + notifyRefresh(); + } + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java new file mode 100644 index 0000000000000..9823ab0ad071b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java @@ -0,0 +1,503 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.file.tool; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.LoggingAwareMultiCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.security.authz.store.FileRolesStore; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.support.Validation; +import org.elasticsearch.xpack.core.security.support.Validation.Users; +import org.elasticsearch.xpack.security.authc.file.FileUserPasswdStore; +import org.elasticsearch.xpack.security.authc.file.FileUserRolesStore; +import org.elasticsearch.xpack.security.support.FileAttributesChecker; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +public class UsersTool extends LoggingAwareMultiCommand { + + public static void main(String[] args) throws Exception { + exit(new UsersTool().main(args, Terminal.DEFAULT)); + } + + UsersTool() { + super("Manages elasticsearch native users"); + subcommands.put("useradd", newAddUserCommand()); + subcommands.put("userdel", newDeleteUserCommand()); + subcommands.put("passwd", newPasswordCommand()); + subcommands.put("roles", newRolesCommand()); + subcommands.put("list", newListCommand()); + } + + protected AddUserCommand newAddUserCommand() { + return new AddUserCommand(); + } + + protected DeleteUserCommand newDeleteUserCommand() { + return new DeleteUserCommand(); + } + + protected PasswordCommand newPasswordCommand() { + return new PasswordCommand(); + } + + protected RolesCommand newRolesCommand() { + return new RolesCommand(); + } + + protected ListCommand newListCommand() { + return new ListCommand(); + } + + static class AddUserCommand extends EnvironmentAwareCommand { + + private final OptionSpec passwordOption; + private final OptionSpec rolesOption; + private final OptionSpec arguments; + + AddUserCommand() { + super("Adds a native user"); + + this.passwordOption = parser.acceptsAll(Arrays.asList("p", "password"), + "The user password") + .withRequiredArg(); + this.rolesOption = parser.acceptsAll(Arrays.asList("r", "roles"), + "Comma-separated list of the roles of the user") + .withRequiredArg().defaultsTo(""); + this.arguments = parser.nonOptions("username"); + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("Adds a file based user to elasticsearch (via internal realm). The user will"); + terminal.println("be added to the users file and its roles will be added to the"); + terminal.println("users_roles file. If non-default files are used (different file"); + terminal.println("locations are configured in elasticsearch.yml) the appropriate files"); + terminal.println("will be resolved from the settings and the user and its roles will be"); + terminal.println("added to them."); + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + + String username = parseUsername(arguments.values(options), env.settings()); + final boolean allowReserved = XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(env.settings()) == false; + Validation.Error validationError = Users.validateUsername(username, allowReserved, env.settings()); + if (validationError != null) { + throw new UserException(ExitCodes.DATA_ERROR, "Invalid username [" + username + "]... " + validationError); + } + + char[] password = parsePassword(terminal, passwordOption.value(options)); + String[] roles = parseRoles(terminal, env, rolesOption.value(options)); + + Path passwordFile = FileUserPasswdStore.resolveFile(env); + Path rolesFile = FileUserRolesStore.resolveFile(env); + FileAttributesChecker attributesChecker = new FileAttributesChecker(passwordFile, rolesFile); + + Map users = FileUserPasswdStore.parseFile(passwordFile, null, env.settings()); + if (users == null) { + throw new UserException(ExitCodes.CONFIG, "Configuration file [users] is missing"); + } + if (users.containsKey(username)) { + throw new UserException(ExitCodes.CODE_ERROR, "User [" + username + "] already exists"); + } + Hasher hasher = Hasher.BCRYPT; + users = new HashMap<>(users); // make modifiable + users.put(username, hasher.hash(new SecureString(password))); + FileUserPasswdStore.writeFile(users, passwordFile); + + if (roles.length > 0) { + Map userRoles = new HashMap<>(FileUserRolesStore.parseFile(rolesFile, null)); + userRoles.put(username, roles); + FileUserRolesStore.writeFile(userRoles, rolesFile); + } + + attributesChecker.check(terminal); + } + } + + static class DeleteUserCommand extends EnvironmentAwareCommand { + + private final OptionSpec arguments; + + DeleteUserCommand() { + super("Deletes a file based user"); + this.arguments = parser.nonOptions("username"); + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("Removes an existing file based user from elasticsearch. The user will be"); + terminal.println("removed from the users file and its roles will be removed from the"); + terminal.println("users_roles file. If non-default files are used (different file"); + terminal.println("locations are configured in elasticsearch.yml) the appropriate files"); + terminal.println("will be resolved from the settings and the user and its roles will be"); + terminal.println("removed from them."); + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + + String username = parseUsername(arguments.values(options), env.settings()); + Path passwordFile = FileUserPasswdStore.resolveFile(env); + Path rolesFile = FileUserRolesStore.resolveFile(env); + FileAttributesChecker attributesChecker = new FileAttributesChecker(passwordFile, rolesFile); + + Map users = FileUserPasswdStore.parseFile(passwordFile, null, env.settings()); + if (users == null) { + throw new UserException(ExitCodes.CONFIG, "Configuration file [users] is missing"); + } + if (users.containsKey(username) == false) { + throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist"); + } + if (Files.exists(passwordFile)) { + users = new HashMap<>(users); + char[] passwd = users.remove(username); + if (passwd != null) { + FileUserPasswdStore.writeFile(users, passwordFile); + } + } + + Map userRoles = new HashMap<>(FileUserRolesStore.parseFile(rolesFile, null)); + if (Files.exists(rolesFile)) { + String[] roles = userRoles.remove(username); + if (roles != null) { + FileUserRolesStore.writeFile(userRoles, rolesFile); + } + } + + attributesChecker.check(terminal); + } + } + + static class PasswordCommand extends EnvironmentAwareCommand { + + private final OptionSpec passwordOption; + private final OptionSpec arguments; + + PasswordCommand() { + super("Changes the password of an existing file based user"); + this.passwordOption = parser.acceptsAll(Arrays.asList("p", "password"), + "The user password") + .withRequiredArg(); + this.arguments = parser.nonOptions("username"); + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("The passwd command changes passwords for files based users. The tool"); + terminal.println("prompts twice for a replacement password. The second entry is compared"); + terminal.println("against the first and both are required to match in order for the"); + terminal.println("password to be changed. If non-default users file is used (a different"); + terminal.println("file location is configured in elasticsearch.yml) the appropriate file"); + terminal.println("will be resolved from the settings."); + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + + String username = parseUsername(arguments.values(options), env.settings()); + char[] password = parsePassword(terminal, passwordOption.value(options)); + + Path file = FileUserPasswdStore.resolveFile(env); + FileAttributesChecker attributesChecker = new FileAttributesChecker(file); + Map users = new HashMap<>(FileUserPasswdStore.parseFile(file, null, env.settings())); + if (users == null) { + throw new UserException(ExitCodes.CONFIG, "Configuration file [users] is missing"); + } + if (users.containsKey(username) == false) { + throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist"); + } + users.put(username, Hasher.BCRYPT.hash(new SecureString(password))); + FileUserPasswdStore.writeFile(users, file); + + attributesChecker.check(terminal); + } + } + + static class RolesCommand extends EnvironmentAwareCommand { + + private final OptionSpec addOption; + private final OptionSpec removeOption; + private final OptionSpec arguments; + + RolesCommand() { + super("Edit roles of an existing user"); + this.addOption = parser.acceptsAll(Arrays.asList("a", "add"), + "Adds supplied roles to the specified user") + .withRequiredArg().defaultsTo(""); + this.removeOption = parser.acceptsAll(Arrays.asList("r", "remove"), + "Remove supplied roles from the specified user") + .withRequiredArg().defaultsTo(""); + this.arguments = parser.nonOptions("username"); + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("The roles command allows editing roles for file based users."); + terminal.println("You can also list a user's roles by omitting the -a and -r"); + terminal.println("parameters."); + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + + String username = parseUsername(arguments.values(options), env.settings()); + String[] addRoles = parseRoles(terminal, env, addOption.value(options)); + String[] removeRoles = parseRoles(terminal, env, removeOption.value(options)); + + // check if just need to return data as no write operation happens + // Nothing to add, just list the data for a username + boolean readOnlyUserListing = removeRoles.length == 0 && addRoles.length == 0; + if (readOnlyUserListing) { + listUsersAndRoles(terminal, env, username); + return; + } + + Path usersFile = FileUserPasswdStore.resolveFile(env); + Path rolesFile = FileUserRolesStore.resolveFile(env); + FileAttributesChecker attributesChecker = new FileAttributesChecker(usersFile, rolesFile); + + Map usersMap = FileUserPasswdStore.parseFile(usersFile, null, env.settings()); + if (!usersMap.containsKey(username)) { + throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist"); + } + + Map userRoles = FileUserRolesStore.parseFile(rolesFile, null); + List roles = new ArrayList<>(); + if (userRoles.get(username) != null) { + roles.addAll(Arrays.asList(userRoles.get(username))); + } + roles.addAll(Arrays.asList(addRoles)); + roles.removeAll(Arrays.asList(removeRoles)); + + Map userRolesToWrite = new HashMap<>(userRoles.size()); + userRolesToWrite.putAll(userRoles); + if (roles.size() == 0) { + userRolesToWrite.remove(username); + } else { + userRolesToWrite.put(username, new LinkedHashSet<>(roles).toArray(new String[]{})); + } + FileUserRolesStore.writeFile(userRolesToWrite, rolesFile); + + attributesChecker.check(terminal); + } + } + + static class ListCommand extends EnvironmentAwareCommand { + + private final OptionSpec arguments; + + ListCommand() { + super("List existing file based users and their corresponding roles"); + this.arguments = parser.nonOptions("username"); + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + terminal.println(""); + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + + String username = null; + if (options.has(arguments)) { + username = arguments.value(options); + } + listUsersAndRoles(terminal, env, username); + } + } + + // pkg private for tests + static void listUsersAndRoles(Terminal terminal, Environment env, String username) throws Exception { + Path userRolesFilePath = FileUserRolesStore.resolveFile(env); + Map userRoles = FileUserRolesStore.parseFile(userRolesFilePath, null); + if (userRoles == null) { + throw new UserException(ExitCodes.CONFIG, "Configuration file [users_roles] is missing"); + } + + Path userFilePath = FileUserPasswdStore.resolveFile(env); + Map users = FileUserPasswdStore.parseFile(userFilePath, null, env.settings()); + if (users == null) { + throw new UserException(ExitCodes.CONFIG, "Configuration file [users] is missing"); + } + + Path rolesFilePath = FileRolesStore.resolveFile(env); + Set knownRoles = Sets.union(FileRolesStore.parseFileForRoleNames(rolesFilePath, null), ReservedRolesStore.names()); + if (knownRoles == null) { + throw new UserException(ExitCodes.CONFIG, "Configuration file [roles.xml] is missing"); + } + + if (username != null) { + if (!users.containsKey(username)) { + throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist"); + } + + if (userRoles.containsKey(username)) { + String[] roles = userRoles.get(username); + Set unknownRoles = Sets.difference(Sets.newHashSet(roles), knownRoles); + String[] markedRoles = markUnknownRoles(roles, unknownRoles); + terminal.println(String.format(Locale.ROOT, "%-15s: %s", username, Arrays.stream(markedRoles).map(s -> s == null ? + "-" : s).collect(Collectors.joining(",")))); + if (!unknownRoles.isEmpty()) { + // at least one role is marked... so printing the legend + Path rolesFile = FileRolesStore.resolveFile(env).toAbsolutePath(); + terminal.println(""); + terminal.println(" [*] Role is not in the [" + rolesFile.toAbsolutePath() + "] file. If the role has been created " + + "using the API, please disregard this message."); + } + } else { + terminal.println(String.format(Locale.ROOT, "%-15s: -", username)); + } + } else { + boolean unknownRolesFound = false; + boolean usersExist = false; + for (Map.Entry entry : userRoles.entrySet()) { + String[] roles = entry.getValue(); + Set unknownRoles = Sets.difference(Sets.newHashSet(roles), knownRoles); + String[] markedRoles = markUnknownRoles(roles, unknownRoles); + terminal.println(String.format(Locale.ROOT, "%-15s: %s", entry.getKey(), String.join(",", markedRoles))); + unknownRolesFound = unknownRolesFound || !unknownRoles.isEmpty(); + usersExist = true; + } + // list users without roles + Set usersWithoutRoles = Sets.newHashSet(users.keySet()); + usersWithoutRoles.removeAll(userRoles.keySet()); + for (String user : usersWithoutRoles) { + terminal.println(String.format(Locale.ROOT, "%-15s: -", user)); + usersExist = true; + } + + if (!usersExist) { + terminal.println("No users found"); + return; + } + + if (unknownRolesFound) { + // at least one role is marked... so printing the legend + Path rolesFile = FileRolesStore.resolveFile(env).toAbsolutePath(); + terminal.println(""); + terminal.println(" [*] Role is not in the [" + rolesFile.toAbsolutePath() + "] file. If the role has been created " + + "using the API, please disregard this message."); + } + } + } + + private static String[] markUnknownRoles(String[] roles, Set unknownRoles) { + if (unknownRoles.isEmpty()) { + return roles; + } + String[] marked = new String[roles.length]; + for (int i = 0; i < roles.length; i++) { + if (unknownRoles.contains(roles[i])) { + marked[i] = roles[i] + "*"; + } else { + marked[i] = roles[i]; + } + } + return marked; + } + + // pkg private for testing + static String parseUsername(List args, Settings settings) throws UserException { + if (args.isEmpty()) { + throw new UserException(ExitCodes.USAGE, "Missing username argument"); + } else if (args.size() > 1) { + throw new UserException(ExitCodes.USAGE, "Expected a single username argument, found extra: " + args.toString()); + } + String username = args.get(0); + final boolean allowReserved = XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings) == false; + Validation.Error validationError = Users.validateUsername(username, allowReserved, settings); + if (validationError != null) { + throw new UserException(ExitCodes.DATA_ERROR, "Invalid username [" + username + "]... " + validationError); + } + return username; + } + + // pkg private for testing + static char[] parsePassword(Terminal terminal, String passwordStr) throws UserException { + char[] password; + if (passwordStr != null) { + password = passwordStr.toCharArray(); + Validation.Error validationError = Users.validatePassword(password); + if (validationError != null) { + throw new UserException(ExitCodes.DATA_ERROR, "Invalid password..." + validationError); + } + } else { + password = terminal.readSecret("Enter new password: "); + Validation.Error validationError = Users.validatePassword(password); + if (validationError != null) { + throw new UserException(ExitCodes.DATA_ERROR, "Invalid password..." + validationError); + } + char[] retyped = terminal.readSecret("Retype new password: "); + if (Arrays.equals(password, retyped) == false) { + throw new UserException(ExitCodes.DATA_ERROR, "Password mismatch"); + } + } + return password; + } + + private static void verifyRoles(Terminal terminal, Environment env, String[] roles) { + Path rolesFile = FileRolesStore.resolveFile(env); + assert Files.exists(rolesFile); + Set knownRoles = Sets.union(FileRolesStore.parseFileForRoleNames(rolesFile, null), ReservedRolesStore.names()); + Set unknownRoles = Sets.difference(Sets.newHashSet(roles), knownRoles); + if (!unknownRoles.isEmpty()) { + terminal.println(String.format(Locale.ROOT, "Warning: The following roles [%s] are not in the [%s] file. Make sure the names " + + "are correct. If the names are correct and the roles were created using the API please disregard this message. " + + "Nonetheless the user will still be associated with all specified roles", + Strings.collectionToCommaDelimitedString(unknownRoles), rolesFile.toAbsolutePath())); + terminal.println("Known roles: " + knownRoles.toString()); + } + } + + // pkg private for testing + static String[] parseRoles(Terminal terminal, Environment env, String rolesStr) throws UserException { + if (rolesStr.isEmpty()) { + return Strings.EMPTY_ARRAY; + } + String[] roles = rolesStr.split(","); + for (String role : roles) { + Validation.Error validationError = Validation.Roles.validateRoleName(role, true); + if (validationError != null) { + throw new UserException(ExitCodes.DATA_ERROR, "Invalid role [" + role + "]... " + validationError); + } + } + + verifyRoles(terminal, env, roles); + + return roles; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolver.java new file mode 100644 index 0000000000000..8e009154cad12 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolver.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.Attribute; +import com.unboundid.ldap.sdk.Filter; +import com.unboundid.ldap.sdk.LDAPInterface; +import com.unboundid.ldap.sdk.SearchRequest; +import com.unboundid.ldap.sdk.SearchResultEntry; +import com.unboundid.ldap.sdk.SearchScope; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING; +import static org.elasticsearch.xpack.security.authc.ldap.ActiveDirectorySessionFactory.buildDnFromDomain; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.OBJECT_CLASS_PRESENCE_FILTER; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.search; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.searchForEntry; +import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING; +import static org.elasticsearch.xpack.security.authc.ldap.ActiveDirectorySIDUtil.convertToString; + +class ActiveDirectoryGroupsResolver implements GroupsResolver { + + private static final String TOKEN_GROUPS = "tokenGroups"; + private final String baseDn; + private final LdapSearchScope scope; + private final boolean ignoreReferralErrors; + + ActiveDirectoryGroupsResolver(Settings settings) { + this.baseDn = settings.get("group_search.base_dn", buildDnFromDomain(settings.get(AD_DOMAIN_NAME_SETTING))); + this.scope = LdapSearchScope.resolve(settings.get("group_search.scope"), LdapSearchScope.SUB_TREE); + this.ignoreReferralErrors = IGNORE_REFERRAL_ERRORS_SETTING.get(settings); + } + + @Override + public void resolve(LDAPInterface connection, String userDn, TimeValue timeout, Logger logger, Collection attributes, + ActionListener> listener) { + buildGroupQuery(connection, userDn, timeout, + ignoreReferralErrors, ActionListener.wrap((filter) -> { + if (filter == null) { + listener.onResponse(Collections.emptyList()); + } else { + logger.debug("group SID to DN [{}] search filter: [{}]", userDn, filter); + search(connection, baseDn, scope.scope(), filter, + Math.toIntExact(timeout.seconds()), ignoreReferralErrors, + ActionListener.wrap((results) -> { + List groups = results.stream() + .map(SearchResultEntry::getDN) + .collect(Collectors.toList()); + listener.onResponse(Collections.unmodifiableList(groups)); + }, + listener::onFailure), + SearchRequest.NO_ATTRIBUTES); + } + }, listener::onFailure)); + } + + @Override + public String[] attributes() { + // we have to return null since the tokenGroups attribute is computed and can only be retrieved using a BASE level search + return null; + } + + static void buildGroupQuery(LDAPInterface connection, String userDn, TimeValue timeout, + boolean ignoreReferralErrors, ActionListener listener) { + searchForEntry(connection, userDn, SearchScope.BASE, OBJECT_CLASS_PRESENCE_FILTER, + Math.toIntExact(timeout.seconds()), ignoreReferralErrors, + ActionListener.wrap((entry) -> { + if (entry == null || entry.hasAttribute(TOKEN_GROUPS) == false) { + listener.onResponse(null); + } else { + final byte[][] tokenGroupSIDBytes = entry.getAttributeValueByteArrays(TOKEN_GROUPS); + List orFilters = Arrays.stream(tokenGroupSIDBytes) + .map((sidBytes) -> Filter.createEqualityFilter("objectSid", convertToString(sidBytes))) + .collect(Collectors.toList()); + listener.onResponse(Filter.createORFilter(orFilters)); + } + }, listener::onFailure), + TOKEN_GROUPS); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySIDUtil.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySIDUtil.java new file mode 100644 index 0000000000000..8cb6e128d17e5 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySIDUtil.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/* + * This code sourced from:http://svn.apache.org/repos/asf/directory/studio/tags/2.0.0.v20170904-M13/plugins/valueeditors/src/main/java/org/apache/directory/studio/valueeditors/msad/InPlaceMsAdObjectSidValueEditor.java + */ + +package org.elasticsearch.xpack.security.authc.ldap; + +import org.apache.commons.codec.binary.Hex; + +class ActiveDirectorySIDUtil { + + static String convertToString( byte[] bytes ) + { + /* + * The binary data structure, from http://msdn.microsoft.com/en-us/library/cc230371(PROT.10).aspx: + * byte[0] - Revision (1 byte): An 8-bit unsigned integer that specifies the revision level of + * the SID structure. This value MUST be set to 0x01. + * byte[1] - SubAuthorityCount (1 byte): An 8-bit unsigned integer that specifies the number of + * elements in the SubAuthority array. The maximum number of elements allowed is 15. + * byte[2-7] - IdentifierAuthority (6 bytes): A SID_IDENTIFIER_AUTHORITY structure that contains + * information, which indicates the authority under which the SID was created. It describes the + * entity that created the SID and manages the account. + * Six element arrays of 8-bit unsigned integers that specify the top-level authority + * big-endian! + * and then - SubAuthority (variable): A variable length array of unsigned 32-bit integers that + * uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined + * by SubAuthorityCount. little-endian! + */ + + if ( ( bytes == null ) || ( bytes.length < 8 ) ) + { + throw new IllegalArgumentException("Invalid SID"); + } + + char[] hex = Hex.encodeHex( bytes ); + StringBuffer sb = new StringBuffer(); + + // start with 'S' + sb.append( 'S' ); + + // revision + int revision = Integer.parseInt( new String( hex, 0, 2 ), 16 ); + sb.append( '-' ); + sb.append( revision ); + + // get count + int count = Integer.parseInt( new String( hex, 2, 2 ), 16 ); + + // check length + if ( bytes.length != ( 8 + count * 4 ) ) + { + throw new IllegalArgumentException("Invalid SID"); + } + + // get authority, big-endian + long authority = Long.parseLong( new String( hex, 4, 12 ), 16 ); + sb.append( '-' ); + sb.append( authority ); + + // sub-authorities, little-endian + for ( int i = 0; i < count; i++ ) + { + StringBuffer rid = new StringBuffer(); + + for ( int k = 3; k >= 0; k-- ) + { + rid.append( hex[16 + ( i * 8 ) + ( k * 2 )] ); + rid.append( hex[16 + ( i * 8 ) + ( k * 2 ) + 1] ); + } + + long subAuthority = Long.parseLong( rid.toString(), 16 ); + sb.append( '-' ); + sb.append( subAuthority ); + } + + return sb.toString(); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java new file mode 100644 index 0000000000000..295e00e19a8a9 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactory.java @@ -0,0 +1,544 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.Filter; +import com.unboundid.ldap.sdk.LDAPConnection; +import com.unboundid.ldap.sdk.LDAPConnectionPool; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.LDAPInterface; +import com.unboundid.ldap.sdk.SearchResultEntry; +import com.unboundid.ldap.sdk.SimpleBindRequest; +import com.unboundid.ldap.sdk.controls.AuthorizationIdentityRequestControl; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; + +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.attributesToSearchFor; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.createFilter; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.search; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.searchForEntry; + +/** + * This Class creates LdapSessions authenticating via the custom Active Directory protocol. (that being + * authenticating with a principal name, "username@domain", then searching through the directory to find the + * user entry in Active Directory that matches the user name). This eliminates the need for user templates, and simplifies + * the configuration for windows admins that may not be familiar with LDAP concepts. + */ +class ActiveDirectorySessionFactory extends PoolingSessionFactory { + + private static final String NETBIOS_NAME_FILTER_TEMPLATE = "(netbiosname={0})"; + + final DefaultADAuthenticator defaultADAuthenticator; + final DownLevelADAuthenticator downLevelADAuthenticator; + final UpnADAuthenticator upnADAuthenticator; + + private final int ldapPort; + + ActiveDirectorySessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) throws LDAPException { + super(config, sslService, new ActiveDirectoryGroupsResolver(config.settings()), + ActiveDirectorySessionFactorySettings.POOL_ENABLED, + PoolingSessionFactorySettings.BIND_DN.exists(config.settings())? getBindDN(config.settings()) : null, + () -> { + if (PoolingSessionFactorySettings.BIND_DN.exists(config.settings())) { + final String healthCheckDn = PoolingSessionFactorySettings.BIND_DN.get(config.settings()); + if (healthCheckDn.isEmpty() && healthCheckDn.indexOf('=') > 0) { + return healthCheckDn; + } + } + return config.settings().get(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_BASEDN_SETTING, + config.settings().get(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING)); + }, threadPool); + Settings settings = config.settings(); + String domainName = settings.get(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING); + if (domainName == null) { + throw new IllegalArgumentException("missing [" + ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING + + "] setting for active directory"); + } + String domainDN = buildDnFromDomain(domainName); + ldapPort = ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING.get(settings); + final int ldapsPort = ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING.get(settings); + final int gcLdapPort = ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING.get(settings); + final int gcLdapsPort = ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING.get(settings); + + defaultADAuthenticator = new DefaultADAuthenticator(config, timeout, ignoreReferralErrors, logger, groupResolver, + metaDataResolver, domainDN, threadPool); + downLevelADAuthenticator = new DownLevelADAuthenticator(config, timeout, ignoreReferralErrors, logger, groupResolver, + metaDataResolver, domainDN, sslService, threadPool, ldapPort, ldapsPort, gcLdapPort, gcLdapsPort); + upnADAuthenticator = new UpnADAuthenticator(config, timeout, ignoreReferralErrors, logger, groupResolver, + metaDataResolver, domainDN, threadPool); + + } + + @Override + protected List getDefaultLdapUrls(Settings settings) { + return Collections.singletonList("ldap://" + settings.get(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING) + + ":" + ldapPort); + } + + @Override + public boolean supportsUnauthenticatedSession() { + // Strictly, we only support unauthenticated sessions if there is a bind_dn or a connection pool, but the + // getUnauthenticatedSession... methods handle the situations correctly, so it's OK to always return true here. + return true; + } + + @Override + void getSessionWithPool(LDAPConnectionPool connectionPool, String user, SecureString password, ActionListener listener) { + getADAuthenticator(user).authenticate(connectionPool, user, password, threadPool, listener); + } + + @Override + void getSessionWithoutPool(String username, SecureString password, ActionListener listener) { + try { + final LDAPConnection connection = LdapUtils.privilegedConnect(serverSet::getConnection); + getADAuthenticator(username).authenticate(connection, username, password, ActionListener.wrap(listener::onResponse, e -> { + IOUtils.closeWhileHandlingException(connection); + listener.onFailure(e); + })); + } catch (LDAPException e) { + listener.onFailure(e); + } + } + + @Override + void getUnauthenticatedSessionWithPool(LDAPConnectionPool connectionPool, String user, ActionListener listener) { + getADAuthenticator(user).searchForDN(connectionPool, user, null, Math.toIntExact(timeout.seconds()), ActionListener.wrap(entry -> { + if (entry == null) { + listener.onResponse(null); + } else { + final String dn = entry.getDN(); + listener.onResponse(new LdapSession(logger, config, connectionPool, dn, groupResolver, metaDataResolver, timeout, null)); + } + }, listener::onFailure)); + } + + @Override + void getUnauthenticatedSessionWithoutPool(String user, ActionListener listener) { + if (PoolingSessionFactorySettings.BIND_DN.exists(config.settings()) == false) { + listener.onResponse(null); + return; + } + try { + final LDAPConnection connection = LdapUtils.privilegedConnect(serverSet::getConnection); + LdapUtils.maybeForkThenBind(connection, bindCredentials, threadPool, new AbstractRunnable() { + + @Override + public void onFailure(Exception e) { + IOUtils.closeWhileHandlingException(connection); + listener.onFailure(e); + } + + @Override + protected void doRun() throws Exception { + getADAuthenticator(user).searchForDN(connection, user, null, Math.toIntExact(timeout.getSeconds()), + ActionListener.wrap(entry -> { + if (entry == null) { + IOUtils.close(connection); + listener.onResponse(null); + } else { + listener.onResponse(new LdapSession(logger, config, connection, entry.getDN(), groupResolver, + metaDataResolver, timeout, null)); + } + }, e -> { + IOUtils.closeWhileHandlingException(connection); + listener.onFailure(e); + })); + + } + }); + } catch (LDAPException e) { + listener.onFailure(e); + } + } + + /** + * @param domain active directory domain name + * @return LDAP DN, distinguished name, of the root of the domain + */ + static String buildDnFromDomain(String domain) { + return "DC=" + domain.replace(".", ",DC="); + } + + static String getBindDN(Settings settings) { + String bindDN = PoolingSessionFactorySettings.BIND_DN.get(settings); + if (bindDN.isEmpty() == false && bindDN.indexOf('\\') < 0 && bindDN.indexOf('@') < 0 && bindDN.indexOf('=') < 0) { + bindDN = bindDN + "@" + settings.get(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING); + } + return bindDN; + } + + ADAuthenticator getADAuthenticator(String username) { + if (username.indexOf('\\') > 0) { + return downLevelADAuthenticator; + } else if (username.indexOf("@") > 0) { + return upnADAuthenticator; + } + return defaultADAuthenticator; + } + + abstract static class ADAuthenticator { + + private final RealmConfig realm; + final TimeValue timeout; + final boolean ignoreReferralErrors; + final Logger logger; + final GroupsResolver groupsResolver; + final LdapMetaDataResolver metaDataResolver; + final String userSearchDN; + final LdapSearchScope userSearchScope; + final String userSearchFilter; + final String bindDN; + final SecureString bindPassword; + final ThreadPool threadPool; + + ADAuthenticator(RealmConfig realm, TimeValue timeout, boolean ignoreReferralErrors, Logger logger, GroupsResolver groupsResolver, + LdapMetaDataResolver metaDataResolver, String domainDN, String userSearchFilterSetting, String defaultUserSearchFilter, + ThreadPool threadPool) { + this.realm = realm; + this.timeout = timeout; + this.ignoreReferralErrors = ignoreReferralErrors; + this.logger = logger; + this.groupsResolver = groupsResolver; + this.metaDataResolver = metaDataResolver; + final Settings settings = realm.settings(); + this.bindDN = getBindDN(settings); + this.bindPassword = PoolingSessionFactorySettings.SECURE_BIND_PASSWORD.get(settings); + this.threadPool = threadPool; + userSearchDN = settings.get(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_BASEDN_SETTING, domainDN); + userSearchScope = LdapSearchScope.resolve(settings.get(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_SCOPE_SETTING), + LdapSearchScope.SUB_TREE); + userSearchFilter = settings.get(userSearchFilterSetting, defaultUserSearchFilter); + } + + final void authenticate(LDAPConnection connection, String username, SecureString password, ActionListener listener) { + final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); + final SimpleBindRequest userBind = new SimpleBindRequest(bindUsername(username), passwordBytes, + new AuthorizationIdentityRequestControl()); + LdapUtils.maybeForkThenBind(connection, userBind, threadPool, new ActionRunnable(listener) { + @Override + protected void doRun() throws Exception { + final ActionRunnable searchRunnable = new ActionRunnable(listener) { + @Override + protected void doRun() throws Exception { + searchForDN(connection, username, password, Math.toIntExact(timeout.seconds()), ActionListener.wrap((entry) -> { + if (entry == null) { + // we did not find the user, cannot authenticate in this realm + listener.onFailure(new ElasticsearchSecurityException( + "search for user [" + username + "] by principal name yielded no results")); + } else { + listener.onResponse(new LdapSession(logger, realm, connection, entry.getDN(), groupsResolver, + metaDataResolver, timeout, null)); + } + }, e -> { + listener.onFailure(e); + })); + } + }; + if (bindDN.isEmpty()) { + searchRunnable.run(); + } else { + final SimpleBindRequest bind = new SimpleBindRequest(bindDN, CharArrays.toUtf8Bytes(bindPassword.getChars())); + LdapUtils.maybeForkThenBind(connection, bind, threadPool, searchRunnable); + } + } + }); + } + + final void authenticate(LDAPConnectionPool pool, String username, SecureString password, ThreadPool threadPool, + ActionListener listener) { + final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); + final SimpleBindRequest bind = new SimpleBindRequest(bindUsername(username), passwordBytes); + LdapUtils.maybeForkThenBindAndRevert(pool, bind, threadPool, new ActionRunnable(listener) { + @Override + protected void doRun() throws Exception { + searchForDN(pool, username, password, Math.toIntExact(timeout.seconds()), ActionListener.wrap((entry) -> { + if (entry == null) { + // we did not find the user, cannot authenticate in this realm + listener.onFailure(new ElasticsearchSecurityException( + "search for user [" + username + "] by principal name yielded no results")); + } else { + listener.onResponse( + new LdapSession(logger, realm, pool, entry.getDN(), groupsResolver, metaDataResolver, timeout, null)); + } + }, e -> { + listener.onFailure(e); + })); + } + }); + } + + String bindUsername(String username) { + return username; + } + + // pkg-private for testing + final String getUserSearchFilter() { + return userSearchFilter; + } + + abstract void searchForDN(LDAPInterface connection, String username, SecureString password, int timeLimitSeconds, + ActionListener listener); + } + + /** + * This authenticator is used for usernames that do not contain an `@` or `/`. It attempts a bind with the provided username combined + * with the domain name specified in settings. On AD DS this will work for both upn@domain and samaccountname@domain; AD LDS will only + * support the upn format + */ + static class DefaultADAuthenticator extends ADAuthenticator { + + final String domainName; + + DefaultADAuthenticator(RealmConfig realm, TimeValue timeout, boolean ignoreReferralErrors, Logger logger, + GroupsResolver groupsResolver, LdapMetaDataResolver metaDataResolver, String domainDN, ThreadPool threadPool) { + super(realm, timeout, ignoreReferralErrors, logger, groupsResolver, metaDataResolver, domainDN, + ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_FILTER_SETTING, + "(&(objectClass=user)(|(sAMAccountName={0})(userPrincipalName={0}@" + domainName(realm) + ")))", threadPool); + domainName = domainName(realm); + } + + private static String domainName(RealmConfig realm) { + return realm.settings().get(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING); + } + + @Override + void searchForDN(LDAPInterface connection, String username, SecureString password, + int timeLimitSeconds, ActionListener listener) { + try { + searchForEntry(connection, userSearchDN, userSearchScope.scope(), + createFilter(userSearchFilter, username), timeLimitSeconds, + ignoreReferralErrors, listener, + attributesToSearchFor(groupsResolver.attributes())); + } catch (LDAPException e) { + listener.onFailure(e); + } + } + + @Override + String bindUsername(String username) { + return username + "@" + domainName; + } + } + + /** + * Active Directory calls the format DOMAIN\\username down-level credentials and + * this class contains the logic necessary to authenticate this form of a username + */ + static class DownLevelADAuthenticator extends ADAuthenticator { + static final String DOWN_LEVEL_FILTER = "(&(objectClass=user)(sAMAccountName={0}))"; + Cache domainNameCache = CacheBuilder.builder().setMaximumWeight(100).build(); + + final String domainDN; + final Settings settings; + final SSLService sslService; + final RealmConfig config; + private final int ldapPort; + private final int ldapsPort; + private final int gcLdapPort; + private final int gcLdapsPort; + + DownLevelADAuthenticator(RealmConfig config, TimeValue timeout, boolean ignoreReferralErrors, Logger logger, + GroupsResolver groupsResolver, LdapMetaDataResolver metaDataResolver, String domainDN, SSLService sslService, + ThreadPool threadPool, int ldapPort, int ldapsPort, int gcLdapPort, int gcLdapsPort) { + super(config, timeout, ignoreReferralErrors, logger, groupsResolver, metaDataResolver, domainDN, + ActiveDirectorySessionFactorySettings.AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING, DOWN_LEVEL_FILTER, threadPool); + this.domainDN = domainDN; + this.settings = config.settings(); + this.sslService = sslService; + this.config = config; + this.ldapPort = ldapPort; + this.ldapsPort = ldapsPort; + this.gcLdapPort = gcLdapPort; + this.gcLdapsPort = gcLdapsPort; + } + + @Override + void searchForDN(LDAPInterface connection, String username, SecureString password, int timeLimitSeconds, + ActionListener listener) { + String[] parts = username.split("\\\\"); + assert parts.length == 2; + final String netBiosDomainName = parts[0]; + final String accountName = parts[1]; + + netBiosDomainNameToDn(connection, netBiosDomainName, username, password, timeLimitSeconds, ActionListener.wrap((domainDN) -> { + if (domainDN == null) { + listener.onResponse(null); + } else { + searchForEntry(connection, domainDN, LdapSearchScope.SUB_TREE.scope(), createFilter(userSearchFilter, accountName), + timeLimitSeconds, ignoreReferralErrors, listener, attributesToSearchFor(groupsResolver.attributes())); + } + }, listener::onFailure)); + } + + void netBiosDomainNameToDn(LDAPInterface ldapInterface, String netBiosDomainName, String username, SecureString password, + int timeLimitSeconds, ActionListener listener) { + LDAPConnection ldapConnection = null; + try { + final Filter filter = createFilter(NETBIOS_NAME_FILTER_TEMPLATE, netBiosDomainName); + final String cachedName = domainNameCache.get(netBiosDomainName); + if (cachedName != null) { + listener.onResponse(cachedName); + } else if (usingGlobalCatalog(ldapInterface) == false) { + search(ldapInterface, "CN=Configuration," + domainDN, LdapSearchScope.SUB_TREE.scope(), filter, timeLimitSeconds, + ignoreReferralErrors, + ActionListener.wrap((results) -> handleSearchResults(results, netBiosDomainName, domainNameCache, listener), + listener::onFailure), + "ncname"); + } else { + // the global catalog does not replicate the necessary information to map a + // netbios dns name to a DN so we need to instead connect to the normal ports. + // This code uses the standard ports to avoid adding even more settings and is + // probably ok as most AD users do not use non-standard ports + if (ldapInterface instanceof LDAPConnection) { + ldapConnection = (LDAPConnection) ldapInterface; + } else { + ldapConnection = LdapUtils.privilegedConnect(((LDAPConnectionPool) ldapInterface)::getConnection); + } + final LDAPConnection finalLdapConnection = ldapConnection; + final LDAPConnection searchConnection = LdapUtils.privilegedConnect( + () -> new LDAPConnection(finalLdapConnection.getSocketFactory(), connectionOptions(config, sslService, logger), + finalLdapConnection.getConnectedAddress(), + finalLdapConnection.getSSLSession() != null ? ldapsPort : ldapPort)); + final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); + final SimpleBindRequest bind = bindDN.isEmpty() + ? new SimpleBindRequest(username, passwordBytes) + : new SimpleBindRequest(bindDN, CharArrays.toUtf8Bytes(bindPassword.getChars())); + LdapUtils.maybeForkThenBind(searchConnection, bind, threadPool, new ActionRunnable(listener) { + @Override + protected void doRun() throws Exception { + search(searchConnection, "CN=Configuration," + domainDN, LdapSearchScope.SUB_TREE.scope(), filter, + timeLimitSeconds, ignoreReferralErrors, + ActionListener.wrap( + results -> { + IOUtils.close(searchConnection); + handleSearchResults(results, netBiosDomainName, domainNameCache, listener); + }, e -> { + IOUtils.closeWhileHandlingException(searchConnection); + listener.onFailure(e); + }), + "ncname"); + } + + @Override + public void onFailure(Exception e) { + IOUtils.closeWhileHandlingException(searchConnection); + listener.onFailure(e); + }; + }); + } + } catch (LDAPException e) { + listener.onFailure(e); + } finally { + if (ldapInterface instanceof LDAPConnectionPool && ldapConnection != null) { + ((LDAPConnectionPool) ldapInterface).releaseConnection(ldapConnection); + } + } + } + + static void handleSearchResults(List results, String netBiosDomainName, + Cache domainNameCache, + ActionListener listener) { + Optional entry = results.stream() + .filter((r) -> r.hasAttribute("ncname")) + .findFirst(); + if (entry.isPresent()) { + final String value = entry.get().getAttributeValue("ncname"); + try { + domainNameCache.computeIfAbsent(netBiosDomainName, (s) -> value); + } catch (ExecutionException e) { + throw new AssertionError("failed to load constant non-null value", e); + } + listener.onResponse(value); + } else { + listener.onResponse(null); + } + } + + boolean usingGlobalCatalog(LDAPInterface ldap) throws LDAPException { + if (ldap instanceof LDAPConnection) { + return usingGlobalCatalog((LDAPConnection) ldap); + } else { + LDAPConnectionPool pool = (LDAPConnectionPool) ldap; + LDAPConnection connection = null; + try { + connection = LdapUtils.privilegedConnect(pool::getConnection); + return usingGlobalCatalog(connection); + } finally { + if (connection != null) { + pool.releaseConnection(connection); + } + } + } + } + + private boolean usingGlobalCatalog(LDAPConnection ldapConnection) { + return ldapConnection.getConnectedPort() == gcLdapPort || ldapConnection.getConnectedPort() == gcLdapsPort; + } + } + + /** + * Authenticates user principal names provided by the user (eq user@domain). Note this authenticator does not currently support + * UPN suffixes that are different than the actual domain name. + */ + static class UpnADAuthenticator extends ADAuthenticator { + + static final String UPN_USER_FILTER = "(&(objectClass=user)(userPrincipalName={1}))"; + + UpnADAuthenticator(RealmConfig config, TimeValue timeout, boolean ignoreReferralErrors, Logger logger, + GroupsResolver groupsResolver, LdapMetaDataResolver metaDataResolver, String domainDN, ThreadPool threadPool) { + super(config, timeout, ignoreReferralErrors, logger, groupsResolver, metaDataResolver, domainDN, + ActiveDirectorySessionFactorySettings.AD_UPN_USER_SEARCH_FILTER_SETTING, UPN_USER_FILTER, threadPool); + if (userSearchFilter.contains("{0}")) { + new DeprecationLogger(logger).deprecated("The use of the account name variable {0} in the setting [" + + RealmSettings.getFullSettingKey(config, ActiveDirectorySessionFactorySettings.AD_UPN_USER_SEARCH_FILTER_SETTING) + + "] has been deprecated and will be removed in a future version!"); + } + } + + @Override + void searchForDN(LDAPInterface connection, String username, SecureString password, int timeLimitSeconds, + ActionListener listener) { + String[] parts = username.split("@"); + assert parts.length == 2 : "there should have only been two values for " + username + " after splitting on '@'"; + final String accountName = parts[0]; + try { + Filter filter = createFilter(userSearchFilter, accountName, username); + searchForEntry(connection, userSearchDN, LdapSearchScope.SUB_TREE.scope(), filter, + timeLimitSeconds, ignoreReferralErrors, listener, + attributesToSearchFor(groupsResolver.attributes())); + } catch (LDAPException e) { + listener.onFailure(e); + } + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java new file mode 100644 index 0000000000000..ceb28ada76a97 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java @@ -0,0 +1,311 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.LDAPException; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.ThreadPool.Names; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapLoadBalancing; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; +import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; +import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; +import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData; +import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; + +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; + + +/** + * Authenticates username/password tokens against ldap, locates groups and maps them to roles. + */ +public final class LdapRealm extends CachingUsernamePasswordRealm { + + private final SessionFactory sessionFactory; + private final UserRoleMapper roleMapper; + private final ThreadPool threadPool; + private final TimeValue executionTimeout; + + + public LdapRealm(String type, RealmConfig config, SSLService sslService, + ResourceWatcherService watcherService, + NativeRoleMappingStore nativeRoleMappingStore, ThreadPool threadPool) + throws LDAPException { + this(type, config, sessionFactory(config, sslService, threadPool, type), + new CompositeRoleMapper(type, config, watcherService, nativeRoleMappingStore), + threadPool); + } + + // pkg private for testing + LdapRealm(String type, RealmConfig config, SessionFactory sessionFactory, + UserRoleMapper roleMapper, ThreadPool threadPool) { + super(type, config); + this.sessionFactory = sessionFactory; + this.roleMapper = roleMapper; + this.threadPool = threadPool; + this.executionTimeout = LdapRealmSettings.EXECUTION_TIMEOUT.get(config.settings()); + roleMapper.refreshRealmOnChange(this); + } + + static SessionFactory sessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool, String type) + throws LDAPException { + + final SessionFactory sessionFactory; + if (LdapRealmSettings.AD_TYPE.equals(type)) { + sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); + } else { + assert LdapRealmSettings.LDAP_TYPE.equals(type) : "type [" + type + "] is unknown. expected one of [" + + LdapRealmSettings.AD_TYPE + ", " + LdapRealmSettings.LDAP_TYPE + "]"; + final boolean hasSearchSettings = LdapUserSearchSessionFactory.hasUserSearchSettings(config); + final boolean hasTemplates = LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING.exists(config.settings()); + if (hasSearchSettings == false) { + if (hasTemplates == false) { + throw new IllegalArgumentException("settings were not found for either user search [" + + RealmSettings.getFullSettingKey(config, LdapUserSearchSessionFactory.SEARCH_PREFIX) + + "] or user template [" + + RealmSettings.getFullSettingKey(config, LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING) + + "] modes of operation. " + + "Please provide the settings for the mode you wish to use. For more details refer to the ldap " + + "authentication section of the X-Pack guide."); + } + sessionFactory = new LdapSessionFactory(config, sslService, threadPool); + } else if (hasTemplates) { + throw new IllegalArgumentException("settings were found for both user search [" + + RealmSettings.getFullSettingKey(config, LdapUserSearchSessionFactory.SEARCH_PREFIX) + + "] and user template [" + + RealmSettings.getFullSettingKey(config, LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING) + + "] modes of operation. " + + "Please remove the settings for the mode you do not wish to use. For more details refer to the ldap " + + "authentication section of the X-Pack guide."); + } else { + sessionFactory = new LdapUserSearchSessionFactory(config, sslService, threadPool); + } + } + return sessionFactory; + } + + /** + * Given a username and password, open a connection to ldap, bind to authenticate, retrieve groups, map to roles and build the user. + * This user will then be passed to the listener + */ + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + // we submit to the threadpool because authentication using LDAP will execute blocking I/O for a bind request and we don't want + // network threads stuck waiting for a socket to connect. After the bind, then all interaction with LDAP should be async + final CancellableLdapRunnable cancellableLdapRunnable = new CancellableLdapRunnable<>(listener, + ex -> AuthenticationResult.unsuccessful("Authentication against realm [" + this.toString() + "] failed", ex), + () -> sessionFactory.session(token.principal(), token.credentials(), + contextPreservingListener(new LdapSessionActionListener("authenticate", token.principal(), listener))), logger + ); + threadPool.generic().execute(cancellableLdapRunnable); + threadPool.schedule(executionTimeout, Names.SAME, cancellableLdapRunnable::maybeTimeout); + } + + @Override + protected void doLookupUser(String username, ActionListener userActionListener) { + if (sessionFactory.supportsUnauthenticatedSession()) { + // we submit to the threadpool because authentication using LDAP will execute blocking I/O for a bind request and we don't want + // network threads stuck waiting for a socket to connect. After the bind, then all interaction with LDAP should be async + final ActionListener sessionListener = ActionListener.wrap( + result -> userActionListener.onResponse(result.getUser()), + userActionListener::onFailure); + final CancellableLdapRunnable cancellableLdapRunnable = new CancellableLdapRunnable<>(userActionListener, e -> null, + () -> sessionFactory.unauthenticatedSession(username, + contextPreservingListener(new LdapSessionActionListener("lookup", username, sessionListener))), logger); + threadPool.generic().execute(cancellableLdapRunnable); + threadPool.schedule(executionTimeout, Names.SAME, cancellableLdapRunnable::maybeTimeout); + } else { + userActionListener.onResponse(null); + } + } + + /** + * Wraps the provided sessionListener to preserve the {@link ThreadContext} associated with the + * current thread. + * Responses headers are not preserved, as they are not needed. Response output should not yet exist, nor should + * any be produced within the realm/ldap-session. + */ + private ContextPreservingActionListener contextPreservingListener(LdapSessionActionListener sessionListener) { + final Supplier toRestore = config.threadContext().newRestorableContext(false); + return new ContextPreservingActionListener<>(toRestore, + sessionListener); + } + + @Override + public Map usageStats() { + Map usage = super.usageStats(); + usage.put("load_balance_type", LdapLoadBalancing.resolve(config.settings()).toString()); + usage.put("ssl", sessionFactory.isSslUsed()); + usage.put("user_search", LdapUserSearchSessionFactory.hasUserSearchSettings(config)); + return usage; + } + + private static void buildUser(LdapSession session, String username, ActionListener listener, + UserRoleMapper roleMapper) { + if (session == null) { + listener.onResponse(AuthenticationResult.notHandled()); + } else { + boolean loadingGroups = false; + try { + final Consumer onFailure = e -> { + IOUtils.closeWhileHandlingException(session); + listener.onFailure(e); + }; + session.resolve(ActionListener.wrap((ldapData) -> { + final Map metadata = MapBuilder.newMapBuilder() + .put("ldap_dn", session.userDn()) + .put("ldap_groups", ldapData.groups) + .putAll(ldapData.metaData) + .map(); + final UserData user = new UserData(username, session.userDn(), ldapData.groups, + metadata, session.realm()); + roleMapper.resolveRoles(user, ActionListener.wrap( + roles -> { + IOUtils.close(session); + String[] rolesArray = roles.toArray(new String[roles.size()]); + listener.onResponse(AuthenticationResult.success( + new User(username, rolesArray, null, null, metadata, true)) + ); + }, onFailure + )); + }, onFailure)); + loadingGroups = true; + } finally { + if (loadingGroups == false) { + session.close(); + } + } + } + } + + + /** + * A special {@link ActionListener} that encapsulates the handling of a LdapSession, which is used to return a user. This class handles + * cases where the session is null or where an exception may be caught after a session has been established, which requires the + * closing of the session. + */ + private class LdapSessionActionListener implements ActionListener { + + private final AtomicReference ldapSessionAtomicReference = new AtomicReference<>(); + private String action; + private final String username; + private final ActionListener resultListener; + + LdapSessionActionListener(String action, String username, ActionListener resultListener) { + this.action = action; + this.username = username; + this.resultListener = resultListener; + } + + @Override + public void onResponse(LdapSession session) { + if (session == null) { + resultListener.onResponse(AuthenticationResult.notHandled()); + } else { + ldapSessionAtomicReference.set(session); + buildUser(session, username, resultListener, roleMapper); + } + } + + @Override + public void onFailure(Exception e) { + if (ldapSessionAtomicReference.get() != null) { + IOUtils.closeWhileHandlingException(ldapSessionAtomicReference.get()); + } + if (logger.isDebugEnabled()) { + logger.debug(new ParameterizedMessage("Exception occurred during {} for {}", action, LdapRealm.this), e); + } + resultListener.onResponse(AuthenticationResult.unsuccessful(action + " failed", e)); + } + + } + + /** + * A runnable that allows us to terminate and call the listener. We use this as a runnable can + * be queued and not executed for a long time or ever and this causes user requests to appear + * to hang. In these cases at least we can provide a response. + */ + static class CancellableLdapRunnable extends AbstractRunnable { + + private final Runnable in; + private final ActionListener listener; + private final Function defaultValue; + private final Logger logger; + private final AtomicReference state = new AtomicReference<>(LdapRunnableState.AWAITING_EXECUTION); + + CancellableLdapRunnable(ActionListener listener, Function defaultValue, Runnable in, Logger logger) { + this.listener = listener; + this.defaultValue = Objects.requireNonNull(defaultValue); + this.in = in; + this.logger = logger; + } + + @Override + public void onFailure(Exception e) { + logger.error("execution of ldap runnable failed", e); + final T result = defaultValue.apply(e); + listener.onResponse(result); + } + + @Override + protected void doRun() throws Exception { + if (state.compareAndSet(LdapRunnableState.AWAITING_EXECUTION, LdapRunnableState.EXECUTING)) { + in.run(); + } else { + logger.trace("skipping execution of ldap runnable as the current state is [{}]", state.get()); + } + } + + @Override + public void onRejection(Exception e) { + listener.onFailure(e); + } + + /** + * If the execution of this runnable has not already started, the runnable is cancelled and we pass an exception to the user + * listener + */ + void maybeTimeout() { + if (state.compareAndSet(LdapRunnableState.AWAITING_EXECUTION, LdapRunnableState.TIMED_OUT)) { + logger.warn("skipping execution of ldap runnable as it has been waiting for " + + "execution too long"); + listener.onFailure(new ElasticsearchTimeoutException("timed out waiting for " + + "execution of ldap runnable")); + } + } + + enum LdapRunnableState { + AWAITING_EXECUTION, + EXECUTING, + TIMED_OUT + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java new file mode 100644 index 0000000000000..36d14aa67c0de --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.LDAPConnection; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.SimpleBindRequest; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; +import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; +import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; + +import java.text.MessageFormat; +import java.util.Locale; + +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.escapedRDNValue; + +/** + * This factory creates LDAP connections via iterating through user templates. + * + * Note that even though there is a separate factory for Active Directory, this factory would work against AD. A template + * for each user context would need to be supplied. + */ +public class LdapSessionFactory extends SessionFactory { + + private final String[] userDnTemplates; + private final GroupsResolver groupResolver; + private final LdapMetaDataResolver metaDataResolver; + + public LdapSessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) { + super(config, sslService, threadPool); + Settings settings = config.settings(); + userDnTemplates = LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY); + if (userDnTemplates.length == 0) { + throw new IllegalArgumentException("missing required LDAP setting [" + + RealmSettings.getFullSettingKey(config, LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING) + "]"); + } + logger.info("Realm [{}] is in user-dn-template mode: [{}]", config.name(), userDnTemplates); + groupResolver = groupResolver(settings); + metaDataResolver = new LdapMetaDataResolver(settings, ignoreReferralErrors); + } + + /** + * This iterates through the configured user templates attempting to open. If all attempts fail, the last exception + * is kept as the cause of the thrown exception + * + * @param username a relative name, Not a distinguished name, that will be inserted into the template. + */ + @Override + public void session(String username, SecureString password, ActionListener listener) { + try { + new AbstractRunnable() { + final LDAPConnection connection = LdapUtils.privilegedConnect(serverSet::getConnection); + final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); + Exception containerException = null; + int loopIndex = 0; + + @Override + protected void doRun() throws Exception { + listener.onResponse( + (new LdapSession(logger, config, connection, ((SimpleBindRequest) connection.getLastBindRequest()).getBindDN(), + groupResolver, metaDataResolver, timeout, null))); + } + + @Override + public void onFailure(Exception e) { + // record failure + if (containerException == null) { + containerException = e; + } else { + containerException.addSuppressed(e); + } + + if (loopIndex > userDnTemplates.length) { + listener.onFailure(new IllegalStateException("User DN template iteration index out of bounds.")); + } else if (loopIndex == userDnTemplates.length) { + // loop break + IOUtils.closeWhileHandlingException(connection); + listener.onFailure(containerException); + } else { + loop(); + } + } + + // loop body + void loop() { + final String template = userDnTemplates[loopIndex++]; + final SimpleBindRequest bind = new SimpleBindRequest(buildDnFromTemplate(username, template), passwordBytes); + LdapUtils.maybeForkThenBind(connection, bind, threadPool, this); + } + }.loop(); + } catch (LDAPException e) { + listener.onFailure(e); + } + } + + /** + * Securely escapes the username and inserts it into the template using MessageFormat + * + * @param username username to insert into the DN template. Any commas, equals or plus will be escaped. + * @return DN (distinquished name) build from the template. + */ + String buildDnFromTemplate(String username, String template) { + //this value must be escaped to avoid manipulation of the template DN. + String escapedUsername = escapedRDNValue(username); + return new MessageFormat(template, Locale.ROOT).format(new Object[] { escapedUsername }, new StringBuffer(), null).toString(); + } + + static GroupsResolver groupResolver(Settings settings) { + if (SearchGroupsResolverSettings.BASE_DN.exists(settings)) { + return new SearchGroupsResolver(settings); + } + return new UserAttributeGroupsResolver(settings); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java new file mode 100644 index 0000000000000..2ec87888d8c13 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactory.java @@ -0,0 +1,257 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.Filter; +import com.unboundid.ldap.sdk.LDAPConnection; +import com.unboundid.ldap.sdk.LDAPConnectionPool; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.LDAPInterface; +import com.unboundid.ldap.sdk.SearchResultEntry; +import com.unboundid.ldap.sdk.SimpleBindRequest; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapUserSearchSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; + +import static org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings.BIND_DN; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.attributesToSearchFor; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.createFilter; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.searchForEntry; + +class LdapUserSearchSessionFactory extends PoolingSessionFactory { + + static final String SEARCH_PREFIX = "user_search."; + + private final String userSearchBaseDn; + private final LdapSearchScope scope; + private final String searchFilter; + + LdapUserSearchSessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) throws LDAPException { + super(config, sslService, groupResolver(config.settings()), LdapUserSearchSessionFactorySettings.POOL_ENABLED, + BIND_DN.exists(config.settings()) ? BIND_DN.get(config.settings()) : null, + () -> { + if (BIND_DN.exists(config.settings())) { + return BIND_DN.get(config.settings()); + } else { + return LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN.get(config.settings()); + } + }, threadPool); + Settings settings = config.settings(); + if (LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN.exists(settings)) { + userSearchBaseDn = LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN.get(settings); + } else { + throw new IllegalArgumentException("[" + RealmSettings.getFullSettingKey(config, + LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN) + "] must be specified"); + } + scope = LdapUserSearchSessionFactorySettings.SEARCH_SCOPE.get(settings); + searchFilter = getSearchFilter(config); + logger.info("Realm [{}] is in user-search mode - base_dn=[{}], search filter=[{}]", + config.name(), userSearchBaseDn, searchFilter); + } + + static boolean hasUserSearchSettings(RealmConfig config) { + return config.settings().getByPrefix("user_search.").isEmpty() == false; + } + + /** + * Sets up a LDAPSession using the connection pool that potentially holds existing connections to the server + */ + @Override + void getSessionWithPool(LDAPConnectionPool connectionPool, String user, SecureString password, ActionListener listener) { + findUser(user, connectionPool, ActionListener.wrap((entry) -> { + if (entry == null) { + listener.onResponse(null); + } else { + final String dn = entry.getDN(); + final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); + final SimpleBindRequest bind = new SimpleBindRequest(dn, passwordBytes); + LdapUtils.maybeForkThenBindAndRevert(connectionPool, bind, threadPool, new ActionRunnable(listener) { + @Override + protected void doRun() throws Exception { + listener.onResponse(new LdapSession(logger, config, connectionPool, dn, groupResolver, metaDataResolver, timeout, + entry.getAttributes())); + } + }); + } + }, listener::onFailure)); + } + + /** + * Sets up a LDAPSession using the following process: + *

    + *
  1. Opening a new connection to the LDAP server
  2. + *
  3. Executes a bind request using the bind user
  4. + *
  5. Executes a search to find the DN of the user
  6. + *
  7. Closes the opened connection
  8. + *
  9. Opens a new connection to the LDAP server
  10. + *
  11. Executes a bind request using the found DN and provided password
  12. + *
  13. Creates a new LDAPSession with the bound connection
  14. + *
+ */ + @Override + void getSessionWithoutPool(String user, SecureString password, ActionListener listener) { + try { + final LDAPConnection connection = LdapUtils.privilegedConnect(serverSet::getConnection); + LdapUtils.maybeForkThenBind(connection, bindCredentials, threadPool, new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + findUser(user, connection, ActionListener.wrap((entry) -> { + if (entry == null) { + IOUtils.close(connection); + listener.onResponse(null); + } else { + final String dn = entry.getDN(); + final byte[] passwordBytes = CharArrays.toUtf8Bytes(password.getChars()); + final SimpleBindRequest userBind = new SimpleBindRequest(dn, passwordBytes); + LdapUtils.maybeForkThenBind(connection, userBind, threadPool, new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + LdapUtils.maybeForkThenBind(connection, bindCredentials, threadPool, new AbstractRunnable() { + + @Override + protected void doRun() throws Exception { + listener.onResponse(new LdapSession(logger, config, connection, dn, groupResolver, + metaDataResolver, timeout, entry.getAttributes())); + } + + @Override + public void onFailure(Exception e) { + IOUtils.closeWhileHandlingException(connection); + listener.onFailure(e); + } + }); + } + + @Override + public void onFailure(Exception e) { + IOUtils.closeWhileHandlingException(connection); + listener.onFailure(e); + } + }); + } + }, e -> { + IOUtils.closeWhileHandlingException(connection); + listener.onFailure(e); + })); + } + @Override + public void onFailure(Exception e) { + IOUtils.closeWhileHandlingException(connection); + listener.onFailure(e); + } + }); + } catch (LDAPException e) { + listener.onFailure(e); + } + } + + @Override + public boolean supportsUnauthenticatedSession() { + return true; + } + + @Override + void getUnauthenticatedSessionWithPool(LDAPConnectionPool connectionPool, String user, ActionListener listener) { + findUser(user, connectionPool, ActionListener.wrap((entry) -> { + if (entry == null) { + listener.onResponse(null); + } else { + final String dn = entry.getDN(); + LdapSession session = new LdapSession(logger, config, connectionPool, dn, groupResolver, metaDataResolver, timeout, + entry.getAttributes()); + listener.onResponse(session); + } + }, listener::onFailure)); + } + + @Override + void getUnauthenticatedSessionWithoutPool(String user, ActionListener listener) { + try { + final LDAPConnection connection = LdapUtils.privilegedConnect(serverSet::getConnection); + LdapUtils.maybeForkThenBind(connection, bindCredentials, threadPool, new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + findUser(user, connection, ActionListener.wrap((entry) -> { + if (entry == null) { + IOUtils.close(connection); + listener.onResponse(null); + } else { + listener.onResponse(new LdapSession(logger, config, connection, entry.getDN(), groupResolver, metaDataResolver, + timeout, entry.getAttributes())); + } + }, e -> { + IOUtils.closeWhileHandlingException(connection); + listener.onFailure(e); + })); + } + + @Override + public void onFailure(Exception e) { + IOUtils.closeWhileHandlingException(connection); + listener.onFailure(e); + } + }); + } catch (LDAPException e) { + listener.onFailure(e); + } + } + + private void findUser(String user, LDAPInterface ldapInterface, ActionListener listener) { + final Filter filter; + try { + filter = createFilter(searchFilter, user); + } catch (LDAPException e) { + listener.onFailure(e); + return; + } + + searchForEntry(ldapInterface, userSearchBaseDn, scope.scope(), + filter, Math.toIntExact(timeout.seconds()), ignoreReferralErrors, listener, + attributesToSearchFor(groupResolver.attributes(), metaDataResolver.attributeNames())); + } + + private static GroupsResolver groupResolver(Settings settings) { + if (SearchGroupsResolverSettings.BASE_DN.exists(settings)) { + return new SearchGroupsResolver(settings); + } + return new UserAttributeGroupsResolver(settings); + } + + static String getSearchFilter(RealmConfig config) { + final Settings settings = config.settings(); + final boolean hasAttribute = LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE.exists(settings); + final boolean hasFilter = LdapUserSearchSessionFactorySettings.SEARCH_FILTER.exists(settings); + if (hasAttribute && hasFilter) { + throw new IllegalArgumentException("search attribute setting [" + + RealmSettings.getFullSettingKey(config, LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE) + + "] and filter setting [" + + RealmSettings.getFullSettingKey(config, LdapUserSearchSessionFactorySettings.SEARCH_FILTER) + + "] cannot be combined!"); + } else if (hasFilter) { + return LdapUserSearchSessionFactorySettings.SEARCH_FILTER.get(settings); + } else if (hasAttribute) { + return "(" + LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE.get(settings) + "={0})"; + } else { + return "(uid={0})"; + } + } + + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java new file mode 100644 index 0000000000000..367bd525036e2 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/PoolingSessionFactory.java @@ -0,0 +1,201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.BindRequest; +import com.unboundid.ldap.sdk.GetEntryLDAPConnectionPoolHealthCheck; +import com.unboundid.ldap.sdk.LDAPConnectionPool; +import com.unboundid.ldap.sdk.LDAPConnectionPoolHealthCheck; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.ServerSet; +import com.unboundid.ldap.sdk.SimpleBindRequest; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; +import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; + +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD; +import static org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings.SECURE_BIND_PASSWORD; + +/** + * Base class for LDAP session factories that can make use of a connection pool + */ +abstract class PoolingSessionFactory extends SessionFactory implements Releasable { + + private final boolean useConnectionPool; + private final LDAPConnectionPool connectionPool; + + final SimpleBindRequest bindCredentials; + final LdapMetaDataResolver metaDataResolver; + final LdapSession.GroupsResolver groupResolver; + + + /** + * @param config the configuration for the realm + * @param sslService the ssl service to get a socket factory or context from + * @param groupResolver the resolver to use to find groups belonging to a user + * @param poolingEnabled the setting that should be used to determine if connection pooling is enabled + * @param bindDn the DN of the user to be used for pooled connections (or null to perform anonymous bind) + * @param healthCheckDNSupplier a supplier for the dn to query for health checks + * @param threadPool a thread pool used for async queries execution + */ + PoolingSessionFactory(RealmConfig config, SSLService sslService, LdapSession.GroupsResolver groupResolver, + Setting poolingEnabled, @Nullable String bindDn, Supplier healthCheckDNSupplier, + ThreadPool threadPool) throws LDAPException { + super(config, sslService, threadPool); + this.groupResolver = groupResolver; + this.metaDataResolver = new LdapMetaDataResolver(config.settings(), ignoreReferralErrors); + + final byte[] bindPassword; + if (LEGACY_BIND_PASSWORD.exists(config.settings())) { + if (SECURE_BIND_PASSWORD.exists(config.settings())) { + throw new IllegalArgumentException("You cannot specify both [" + + RealmSettings.getFullSettingKey(config, LEGACY_BIND_PASSWORD) + "] and [" + + RealmSettings.getFullSettingKey(config, SECURE_BIND_PASSWORD) + "]"); + } else { + bindPassword = CharArrays.toUtf8Bytes(LEGACY_BIND_PASSWORD.get(config.settings()).getChars()); + } + } else if (SECURE_BIND_PASSWORD.exists(config.settings())) { + bindPassword = CharArrays.toUtf8Bytes(SECURE_BIND_PASSWORD.get(config.settings()).getChars()); + } else { + bindPassword = null; + } + + if (bindDn == null) { + bindCredentials = new SimpleBindRequest(); + } else { + bindCredentials = new SimpleBindRequest(bindDn, bindPassword); + } + + this.useConnectionPool = poolingEnabled.get(config.settings()); + if (useConnectionPool) { + this.connectionPool = createConnectionPool(config, serverSet, timeout, logger, bindCredentials, healthCheckDNSupplier); + } else { + this.connectionPool = null; + } + } + + @Override + public final void session(String user, SecureString password, ActionListener listener) { + if (useConnectionPool) { + getSessionWithPool(connectionPool, user, password, listener); + } else { + getSessionWithoutPool(user, password, listener); + } + } + + @Override + public final void unauthenticatedSession(String user, ActionListener listener) { + if (useConnectionPool) { + getUnauthenticatedSessionWithPool(connectionPool, user, listener); + } else { + getUnauthenticatedSessionWithoutPool(user, listener); + } + } + + /** + * Attempts to get a {@link LdapSession} using the provided credentials and makes use of the provided connection pool + */ + abstract void getSessionWithPool(LDAPConnectionPool connectionPool, String user, SecureString password, + ActionListener listener); + + /** + * Attempts to get a {@link LdapSession} using the provided credentials and opens a new connection to the ldap server + */ + abstract void getSessionWithoutPool(String user, SecureString password, ActionListener listener); + + /** + * Attempts to search using a pooled connection for the user and provides an unauthenticated {@link LdapSession} to the listener if the + * user is found + */ + abstract void getUnauthenticatedSessionWithPool(LDAPConnectionPool connectionPool, String user, ActionListener listener); + + /** + * Attempts to search using a new connection for the user and provides an unauthenticated {@link LdapSession} to the listener if the + * user is found + */ + abstract void getUnauthenticatedSessionWithoutPool(String user, ActionListener listener); + + /** + * Creates the connection pool that will be used by the session factory and initializes the health check support + */ + static LDAPConnectionPool createConnectionPool(RealmConfig config, ServerSet serverSet, TimeValue timeout, Logger logger, + BindRequest bindRequest, + Supplier healthCheckDnSupplier) throws LDAPException { + Settings settings = config.settings(); + final int initialSize = PoolingSessionFactorySettings.POOL_INITIAL_SIZE.get(settings); + final int size = PoolingSessionFactorySettings.POOL_SIZE.get(settings); + LDAPConnectionPool pool = null; + boolean success = false; + try { + pool = LdapUtils.privilegedConnect(() -> new LDAPConnectionPool(serverSet, bindRequest, initialSize, size)); + pool.setRetryFailedOperationsDueToInvalidConnections(true); + if (PoolingSessionFactorySettings.HEALTH_CHECK_ENABLED.get(settings)) { + String entryDn = PoolingSessionFactorySettings.HEALTH_CHECK_DN.get(settings).orElseGet(healthCheckDnSupplier); + final long healthCheckInterval = PoolingSessionFactorySettings.HEALTH_CHECK_INTERVAL.get(settings).millis(); + if (entryDn != null) { + // Checks the status of the LDAP connection at a specified interval in the background. We do not check on + // create as the LDAP server may require authentication to get an entry and a bind request has not been executed + // yet so we could end up never getting a connection. We do not check on checkout as we always set retry operations + // and the pool will handle a bad connection without the added latency on every operation + LDAPConnectionPoolHealthCheck healthCheck = new GetEntryLDAPConnectionPoolHealthCheck(entryDn, timeout.millis(), + false, false, false, true, false); + pool.setHealthCheck(healthCheck); + pool.setHealthCheckIntervalMillis(healthCheckInterval); + } else { + logger.warn(new ParameterizedMessage("[{}] and [{}} have not been specified or are not valid distinguished names," + + "so connection health checking is disabled", RealmSettings.getFullSettingKey(config, + PoolingSessionFactorySettings.BIND_DN), + RealmSettings.getFullSettingKey(config, PoolingSessionFactorySettings.HEALTH_CHECK_DN))); + } + } + + success = true; + return pool; + } finally { + if (success == false && pool != null) { + pool.close(); + } + } + } + + /** + * This method is used to cleanup the connection pool if one is being used + */ + @Override + public final void close() { + if (connectionPool != null) { + connectionPool.close(); + } + } + + /** + * For tests use only + * + * @return the connection pool for LDAP queries + */ + LDAPConnectionPool getConnectionPool() { + return connectionPool; + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolver.java new file mode 100644 index 0000000000000..c641be947d8dd --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolver.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.Attribute; +import com.unboundid.ldap.sdk.Filter; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.LDAPInterface; +import com.unboundid.ldap.sdk.SearchRequest; +import com.unboundid.ldap.sdk.SearchScope; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.Strings.isNullOrEmpty; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.OBJECT_CLASS_PRESENCE_FILTER; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.createFilter; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.search; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.searchForEntry; +import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING; + +/** + * Resolves the groups for a user by executing a search with a filter usually that contains a group + * object class with a attribute that matches an ID of the user + */ +class SearchGroupsResolver implements GroupsResolver { + + private final String baseDn; + private final String filter; + private final String userAttribute; + private final LdapSearchScope scope; + private final boolean ignoreReferralErrors; + + SearchGroupsResolver(Settings settings) { + if (SearchGroupsResolverSettings.BASE_DN.exists(settings)) { + baseDn = SearchGroupsResolverSettings.BASE_DN.get(settings); + } else { + throw new IllegalArgumentException("base_dn must be specified"); + } + filter = SearchGroupsResolverSettings.FILTER.get(settings); + userAttribute = SearchGroupsResolverSettings.USER_ATTRIBUTE.get(settings); + scope = SearchGroupsResolverSettings.SCOPE.get(settings); + this.ignoreReferralErrors = IGNORE_REFERRAL_ERRORS_SETTING.get(settings); + } + + @Override + public void resolve(LDAPInterface connection, String userDn, TimeValue timeout, Logger logger, + Collection attributes, ActionListener> listener) { + getUserId(userDn, attributes, connection, timeout, ActionListener.wrap((userId) -> { + if (userId == null) { + listener.onResponse(Collections.emptyList()); + } else { + try { + Filter userFilter = createFilter(filter, userId); + + search(connection, baseDn, scope.scope(), userFilter, + Math.toIntExact(timeout.seconds()), ignoreReferralErrors, + ActionListener.wrap( + (results) -> listener.onResponse(results + .stream() + .map((r) -> r.getDN()) + .collect(Collectors.toList()) + ), + listener::onFailure), + SearchRequest.NO_ATTRIBUTES); + } catch (LDAPException e) { + listener.onFailure(e); + } + } + }, listener::onFailure)); + } + + @Override + public String[] attributes() { + if (Strings.hasLength(userAttribute)) { + return new String[] { userAttribute }; + } + return null; + } + + private void getUserId(String dn, Collection attributes, LDAPInterface connection, + TimeValue timeout, ActionListener listener) { + if (isNullOrEmpty(userAttribute) || userAttribute.equals("dn")) { + listener.onResponse(dn); + } else if (attributes != null) { + final String value = attributes.stream() + .filter((attribute) -> attribute.getName().equals(userAttribute)) + .map(Attribute::getValue) + .findFirst() + .orElse(null); + listener.onResponse(value); + } else { + readUserAttribute(connection, dn, timeout, listener); + } + } + + void readUserAttribute(LDAPInterface connection, String userDn, TimeValue timeout, + ActionListener listener) { + searchForEntry(connection, userDn, SearchScope.BASE, OBJECT_CLASS_PRESENCE_FILTER, + Math.toIntExact(timeout.seconds()), ignoreReferralErrors, + ActionListener.wrap((entry) -> { + if (entry == null || entry.hasAttribute(userAttribute) == false) { + listener.onResponse(null); + } else { + listener.onResponse(entry.getAttributeValue(userAttribute)); + } + }, listener::onFailure), + userAttribute); + } + + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolver.java new file mode 100644 index 0000000000000..6f5393d591e02 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolver.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.Attribute; +import com.unboundid.ldap.sdk.LDAPInterface; +import com.unboundid.ldap.sdk.SearchScope; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.ldap.UserAttributeGroupsResolverSettings; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.OBJECT_CLASS_PRESENCE_FILTER; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.searchForEntry; +import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING; + +/** +* Resolves the groups of a user based on the value of a attribute of the user's ldap entry +*/ +class UserAttributeGroupsResolver implements GroupsResolver { + + private final String attribute; + private final boolean ignoreReferralErrors; + + UserAttributeGroupsResolver(Settings settings) { + this(UserAttributeGroupsResolverSettings.ATTRIBUTE.get(settings), IGNORE_REFERRAL_ERRORS_SETTING.get(settings)); + } + + private UserAttributeGroupsResolver(String attribute, boolean ignoreReferralErrors) { + this.attribute = Objects.requireNonNull(attribute); + this.ignoreReferralErrors = ignoreReferralErrors; + } + + @Override + public void resolve(LDAPInterface connection, String userDn, TimeValue timeout, Logger logger, Collection attributes, + ActionListener> listener) { + if (attributes != null) { + List list = attributes.stream().filter((attr) -> attr.getName().equals(attribute)) + .flatMap(attr -> Arrays.stream(attr.getValues())).collect(Collectors.toList()); + listener.onResponse(Collections.unmodifiableList(list)); + } else { + searchForEntry(connection, userDn, SearchScope.BASE, OBJECT_CLASS_PRESENCE_FILTER, Math.toIntExact(timeout.seconds()), + ignoreReferralErrors, ActionListener.wrap((entry) -> { + if (entry == null || entry.hasAttribute(attribute) == false) { + listener.onResponse(Collections.emptyList()); + } else { + listener.onResponse(Collections.unmodifiableList(Arrays.asList(entry.getAttributeValues(attribute)))); + } + }, listener::onFailure), attribute); + } + } + + @Override + public String[] attributes() { + return new String[] { attribute }; + } + + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancing.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancing.java new file mode 100644 index 0000000000000..3ac40eb374aef --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancing.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap.support; + +import com.unboundid.ldap.sdk.FailoverServerSet; +import com.unboundid.ldap.sdk.LDAPConnectionOptions; +import com.unboundid.ldap.sdk.RoundRobinDNSServerSet; +import com.unboundid.ldap.sdk.RoundRobinServerSet; +import com.unboundid.ldap.sdk.ServerSet; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapLoadBalancingSettings; + +import javax.net.SocketFactory; +import java.util.Locale; + +/** + * Enumeration representing the various supported {@link ServerSet} types that can be used with out built in realms. + */ +public enum LdapLoadBalancing { + + FAILOVER() { + @Override + ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nullable SocketFactory socketFactory, + @Nullable LDAPConnectionOptions options) { + FailoverServerSet serverSet = new FailoverServerSet(addresses, ports, socketFactory, options); + serverSet.setReOrderOnFailover(true); + return serverSet; + } + }, + + ROUND_ROBIN() { + @Override + ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nullable SocketFactory socketFactory, + @Nullable LDAPConnectionOptions options) { + return new RoundRobinServerSet(addresses, ports, socketFactory, options); + } + }, + + DNS_ROUND_ROBIN() { + @Override + ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nullable SocketFactory socketFactory, + @Nullable LDAPConnectionOptions options) { + if (addresses.length != 1) { + throw new IllegalArgumentException(toString() + " can only be used with a single url"); + } + if (InetAddresses.isInetAddress(addresses[0])) { + throw new IllegalArgumentException(toString() + " can only be used with a DNS name"); + } + TimeValue dnsTtl = settings.getAsTime(LdapLoadBalancingSettings.CACHE_TTL_SETTING, CACHE_TTL_DEFAULT); + return new RoundRobinDNSServerSet(addresses[0], ports[0], + RoundRobinDNSServerSet.AddressSelectionMode.ROUND_ROBIN, dnsTtl.millis(), null, socketFactory, options); + } + }, + + DNS_FAILOVER() { + @Override + ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nullable SocketFactory socketFactory, + @Nullable LDAPConnectionOptions options) { + if (addresses.length != 1) { + throw new IllegalArgumentException(toString() + " can only be used with a single url"); + } + if (InetAddresses.isInetAddress(addresses[0])) { + throw new IllegalArgumentException(toString() + " can only be used with a DNS name"); + } + TimeValue dnsTtl = settings.getAsTime(LdapLoadBalancingSettings.CACHE_TTL_SETTING, CACHE_TTL_DEFAULT); + return new RoundRobinDNSServerSet(addresses[0], ports[0], + RoundRobinDNSServerSet.AddressSelectionMode.FAILOVER, dnsTtl.millis(), null, socketFactory, options); + } + }; + + public static final String LOAD_BALANCE_TYPE_DEFAULT = LdapLoadBalancing.FAILOVER.toString(); + public static final TimeValue CACHE_TTL_DEFAULT = TimeValue.timeValueHours(1L); + + abstract ServerSet buildServerSet(String[] addresses, int[] ports, Settings settings, @Nullable SocketFactory socketFactory, + @Nullable LDAPConnectionOptions options); + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + + public static LdapLoadBalancing resolve(Settings settings) { + Settings loadBalanceSettings = settings.getAsSettings(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS); + String type = loadBalanceSettings.get(LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, LOAD_BALANCE_TYPE_DEFAULT); + try { + return valueOf(type.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException ilae) { + throw new IllegalArgumentException("unknown load balance type [" + type + "]", ilae); + } + } + + public static ServerSet serverSet(String[] addresses, int[] ports, Settings settings, @Nullable SocketFactory socketFactory, + @Nullable LDAPConnectionOptions options) { + LdapLoadBalancing loadBalancing = resolve(settings); + Settings loadBalanceSettings = settings.getAsSettings(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS); + return loadBalancing.buildServerSet(addresses, ports, loadBalanceSettings, socketFactory, options); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolver.java new file mode 100644 index 0000000000000..e957c29fe2ba2 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolver.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap.support; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +import com.unboundid.ldap.sdk.Attribute; +import com.unboundid.ldap.sdk.LDAPInterface; +import com.unboundid.ldap.sdk.SearchResultEntry; +import com.unboundid.ldap.sdk.SearchScope; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapMetaDataResolverSettings; + +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.OBJECT_CLASS_PRESENCE_FILTER; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.searchForEntry; + +public class LdapMetaDataResolver { + + private final String[] attributeNames; + private final boolean ignoreReferralErrors; + + public LdapMetaDataResolver(Settings settings, boolean ignoreReferralErrors) { + this(LdapMetaDataResolverSettings.ADDITIONAL_META_DATA_SETTING.get(settings), ignoreReferralErrors); + } + + LdapMetaDataResolver(Collection attributeNames, boolean ignoreReferralErrors) { + this.attributeNames = attributeNames.toArray(new String[attributeNames.size()]); + this.ignoreReferralErrors = ignoreReferralErrors; + } + + public String[] attributeNames() { + return attributeNames; + } + + public void resolve(LDAPInterface connection, String userDn, TimeValue timeout, Logger logger, + Collection attributes, + ActionListener> listener) { + if (this.attributeNames.length == 0) { + listener.onResponse(Collections.emptyMap()); + } else if (attributes != null) { + listener.onResponse(toMap(name -> findAttribute(attributes, name))); + } else { + searchForEntry(connection, userDn, SearchScope.BASE, OBJECT_CLASS_PRESENCE_FILTER, + Math.toIntExact(timeout.seconds()), ignoreReferralErrors, + ActionListener.wrap((SearchResultEntry entry) -> { + if (entry == null) { + listener.onResponse(Collections.emptyMap()); + } else { + listener.onResponse(toMap(entry::getAttribute)); + } + }, listener::onFailure), this.attributeNames); + } + } + + private Attribute findAttribute(Collection attributes, String name) { + return attributes.stream() + .filter(attr -> attr.getName().equals(name)) + .findFirst().orElse(null); + } + + private Map toMap(Function attributes) { + return Collections.unmodifiableMap( + Arrays.stream(this.attributeNames).map(attributes).filter(Objects::nonNull) + .collect(Collectors.toMap( + attr -> attr.getName(), + attr -> { + final String[] values = attr.getValues(); + return values.length == 1 ? values[0] : Arrays.asList(values); + }) + ) + ); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapSession.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapSession.java new file mode 100644 index 0000000000000..bd1f0408a0c1d --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapSession.java @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap.support; + +import com.unboundid.ldap.sdk.Attribute; +import com.unboundid.ldap.sdk.LDAPConnection; +import com.unboundid.ldap.sdk.LDAPInterface; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; + +import java.util.Collection; +import java.util.List; +import java.util.Map; + +/** + * Represents a LDAP connection with an authenticated/bound user that needs closing. + */ +public class LdapSession implements Releasable { + + protected final Logger logger; + protected final RealmConfig realm; + protected final LDAPInterface connection; + protected final String userDn; + protected final GroupsResolver groupsResolver; + private LdapMetaDataResolver metaDataResolver; + protected final TimeValue timeout; + protected final Collection attributes; + + /** + * This object is intended to be constructed by the LdapConnectionFactory + * + * This constructor accepts a logger with which the connection can log. Since this connection + * can be instantiated very frequently, it's best to have the logger for this connection created + * outside of and be reused across all connections. We can't keep a static logger in this class + * since we want the logger to be contextual (i.e. aware of the settings and its environment). + */ + public LdapSession(Logger logger, RealmConfig realm, LDAPInterface connection, String userDn, GroupsResolver groupsResolver, + LdapMetaDataResolver metaDataResolver, TimeValue timeout, Collection attributes) { + this.logger = logger; + this.realm = realm; + this.connection = connection; + this.userDn = userDn; + this.groupsResolver = groupsResolver; + this.metaDataResolver = metaDataResolver; + this.timeout = timeout; + this.attributes = attributes; + } + + /** + * LDAP connections should be closed to clean up resources. + */ + @Override + public void close() { + // Only if it is an LDAPConnection do we need to close it, otherwise it is a connection pool and we will close all of the + // connections in the pool + if (connection instanceof LDAPConnection) { + ((LDAPConnection) connection).close(); + } + } + + /** + * @return the fully distinguished name of the user bound to this connection + */ + public String userDn() { + return userDn; + } + + /** + * @return the realm for which this session was created + */ + public RealmConfig realm() { + return realm; + } + + /** + * @return the connection to the LDAP/AD server of this session + */ + public LDAPInterface getConnection() { + return connection; + } + + /** + * Asynchronously retrieves a list of group distinguished names + */ + public void groups(ActionListener> listener) { + groupsResolver.resolve(connection, userDn, timeout, logger, attributes, listener); + } + + public void metaData(ActionListener> listener) { + metaDataResolver.resolve(connection, userDn, timeout, logger, attributes, listener); + } + + public void resolve(ActionListener listener) { + logger.debug("Resolving LDAP groups + meta-data for user [{}]", userDn); + groups(ActionListener.wrap( + groups -> { + logger.debug("Resolved {} LDAP groups [{}] for user [{}]", groups.size(), groups, userDn); + metaData(ActionListener.wrap( + meta -> { + logger.debug("Resolved {} meta-data fields [{}] for user [{}]", meta.size(), meta, userDn); + listener.onResponse(new LdapUserData(groups, meta)); + }, + listener::onFailure)); + }, + listener::onFailure)); + } + + public static class LdapUserData { + public final List groups; + public final Map metaData; + + public LdapUserData(List groups, Map metaData) { + this.groups = groups; + this.metaData = metaData; + } + } + + /** + * A GroupsResolver is used to resolve the group names of a given LDAP user + */ + public interface GroupsResolver { + + /** + * Asynchronously resolve the group name for the given ldap user + * @param ldapConnection an authenticated {@link LDAPConnection} to be used for LDAP queries + * @param userDn the distinguished name of the ldap user + * @param timeout the timeout for any ldap operation + * @param logger the logger to use if necessary + * @param attributes a collection of attributes that were previously retrieved for the user such as during a user search. + * {@code null} indicates that the attributes have not been attempted to be retrieved + * @param listener the listener to call on a result or on failure + */ + void resolve(LDAPInterface ldapConnection, String userDn, TimeValue timeout, Logger logger, Collection attributes, + ActionListener> listener); + + /** + * Returns the attributes that this resolvers uses. If no attributes are required, return {@code null}. + */ + String[] attributes(); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapUtils.java new file mode 100644 index 0000000000000..90cecd1e48a10 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapUtils.java @@ -0,0 +1,674 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap.support; + +import com.unboundid.ldap.sdk.AsyncRequestID; +import com.unboundid.ldap.sdk.AsyncSearchResultListener; +import com.unboundid.ldap.sdk.BindRequest; +import com.unboundid.ldap.sdk.DN; +import com.unboundid.ldap.sdk.DereferencePolicy; +import com.unboundid.ldap.sdk.Filter; +import com.unboundid.ldap.sdk.LDAPConnection; +import com.unboundid.ldap.sdk.LDAPConnectionPool; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.LDAPInterface; +import com.unboundid.ldap.sdk.LDAPURL; +import com.unboundid.ldap.sdk.ResultCode; +import com.unboundid.ldap.sdk.SearchRequest; +import com.unboundid.ldap.sdk.SearchResult; +import com.unboundid.ldap.sdk.SearchResultEntry; +import com.unboundid.ldap.sdk.SearchResultReference; +import com.unboundid.ldap.sdk.SearchScope; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.core.internal.io.IOUtils; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.support.Exceptions; + +import javax.naming.ldap.Rdn; + +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; + +public final class LdapUtils { + + public static final Filter OBJECT_CLASS_PRESENCE_FILTER = Filter.createPresenceFilter("objectClass"); + + private static final Logger LOGGER = ESLoggerFactory.getLogger(LdapUtils.class); + + private LdapUtils() { + } + + public static DN dn(String dn) { + try { + return new DN(dn); + } catch (LDAPException e) { + throw new IllegalArgumentException("invalid DN [" + dn + "]", e); + } + } + + public static T privilegedConnect(CheckedSupplier supplier) + throws LDAPException { + SpecialPermission.check(); + try { + return AccessController.doPrivileged((PrivilegedExceptionAction) supplier::get); + } catch (PrivilegedActionException e) { + throw (LDAPException) e.getCause(); + } + } + + public static String relativeName(DN dn) { + return dn.getRDNString().split("=")[1].trim(); + } + + public static String escapedRDNValue(String rdn) { + // We can't use UnboundID RDN here because it expects attribute=value, not just value + return Rdn.escapeValue(rdn); + } + + /** + * If necessary, fork before executing the runnable. A deadlock will happen if + * the same thread which handles bind responses blocks on the bind call, waiting + * for the response which he itself should handle. + */ + private static void maybeForkAndRun(ThreadPool threadPool, Runnable runnable) { + if (isLdapConnectionThread(Thread.currentThread())) { + // only fork if binding on the LDAPConnectionReader thread + threadPool.executor(ThreadPool.Names.GENERIC).execute(runnable); + } else { + // avoids repeated forking + runnable.run(); + } + } + + /** + * This method submits the {@code bind} request over one connection from the + * pool. The bind authentication is then reverted and the connection is returned + * to the pool, so that the connection can be safely reused, see + * {@code LDAPConnectionPool#bindAndRevertAuthentication}. This validates the + * bind credentials. + * + * Bind calls are blocking and if a bind is executed on the LDAP Connection + * Reader thread (as returned by {@code LdapUtils#isLdapConnectionThread}), the + * thread will be blocked until it is interrupted by something else such as a + * timeout timer. Do not call bind outside this method or + * {@link LdapUtils#maybeForkThenBind(LDAPConnection, BindRequest, ThreadPool, AbstractRunnable)} + * + * @param ldapPool + * The LDAP connection pool on which to submit the bind operation. + * @param bind + * The request object of the bind operation. + * @param threadPool + * The threads that will call the blocking bind operation, in case + * the calling thread is a connection reader, see: + * {@code LdapUtils#isLdapConnectionThread}. + * @param runnable + * The runnable that continues the program flow after the bind + * operation. It is executed on the same thread as the prior bind. + */ + public static void maybeForkThenBindAndRevert(LDAPConnectionPool ldapPool, BindRequest bind, ThreadPool threadPool, + AbstractRunnable runnable) { + final Runnable bindRunnable = new AbstractRunnable() { + @Override + @SuppressForbidden(reason = "Bind allowed if forking of the LDAP Connection Reader Thread.") + protected void doRun() throws Exception { + privilegedConnect(() -> ldapPool.bindAndRevertAuthentication(bind.duplicate())); + runnable.run(); + } + + @Override + public void onFailure(Exception e) { + runnable.onFailure(e); + } + + @Override + public void onAfter() { + runnable.onAfter(); + } + }; + maybeForkAndRun(threadPool, bindRunnable); + } + + /** + * This method submits the {@code bind} request over the ldap connection. Its + * authentication status changes. The connection can be subsequently reused. + * This validates the bind credentials. + * + * Bind calls are blocking and if a bind is executed on the LDAP Connection + * Reader thread (as returned by {@code LdapUtils#isLdapConnectionThread}), the + * thread will be blocked until it is interrupted by something else such as a + * timeout timer. Do not call bind outside this method or + * {@link LdapUtils#maybeForkThenBind(LDAPConnection, BindRequest, ThreadPool, AbstractRunnable)} + * + * @param ldap + * The LDAP connection on which to submit the bind operation. + * @param bind + * The request object of the bind operation. + * @param threadPool + * The threads that will call the blocking bind operation, in case + * the calling thread is a connection reader, see: + * {@code LdapUtils#isLdapConnectionThread}. + * @param runnable + * The runnable that continues the program flow after the bind + * operation. It is executed on the same thread as the prior bind. + */ + public static void maybeForkThenBind(LDAPConnection ldap, BindRequest bind, ThreadPool threadPool, AbstractRunnable runnable) { + final Runnable bindRunnable = new AbstractRunnable() { + @Override + @SuppressForbidden(reason = "Bind allowed if forking of the LDAP Connection Reader Thread.") + protected void doRun() throws Exception { + privilegedConnect(() -> ldap.bind(bind.duplicate())); + runnable.run(); + } + + @Override + public void onFailure(Exception e) { + runnable.onFailure(e); + } + + @Override + public void onAfter() { + runnable.onAfter(); + } + }; + maybeForkAndRun(threadPool, bindRunnable); + } + + /** + * This method performs an asynchronous ldap search operation that could have multiple results + */ + public static void searchForEntry(LDAPInterface ldap, String baseDN, SearchScope scope, + Filter filter, int timeLimitSeconds, + boolean ignoreReferralErrors, + ActionListener listener, + String... attributes) { + if (ldap instanceof LDAPConnection) { + searchForEntry((LDAPConnection) ldap, baseDN, scope, filter, timeLimitSeconds, + ignoreReferralErrors, listener, attributes); + } else if (ldap instanceof LDAPConnectionPool) { + searchForEntry((LDAPConnectionPool) ldap, baseDN, scope, filter, timeLimitSeconds, + ignoreReferralErrors, listener, attributes); + } else { + throw new IllegalArgumentException("unsupported LDAPInterface implementation: " + ldap); + } + } + + /** + * This method performs an asynchronous ldap search operation that only expects at most one + * result. + * If more than one result is found then this is an error + * If no results are found, then {@code null} will be returned. + * If the LDAP server returns an error {@link ResultCode} then this is handled as a + * {@link ActionListener#onFailure(Exception) failure} + */ + public static void searchForEntry(LDAPConnection ldap, String baseDN, SearchScope scope, + Filter filter, int timeLimitSeconds, + boolean ignoreReferralErrors, + ActionListener listener, + String... attributes) { + LdapSearchResultListener searchResultListener = new SingleEntryListener(ldap, listener, + filter, ignoreReferralErrors); + try { + SearchRequest request = new SearchRequest(searchResultListener, baseDN, scope, + DereferencePolicy.NEVER, 0, timeLimitSeconds, false, filter, attributes); + searchResultListener.setSearchRequest(request); + ldap.asyncSearch(request); + } catch (LDAPException e) { + listener.onFailure(e); + } + } + + /** + * This method performs an asynchronous ldap search operation that only expects at most one + * result. + * If more than one result is found then this is an error. + * If no results are found, then {@code null} will be returned. + * If the LDAP server returns an error {@link ResultCode} then this is handled as a + * {@link ActionListener#onFailure(Exception) failure} + */ + public static void searchForEntry(LDAPConnectionPool ldap, String baseDN, SearchScope scope, + Filter filter, int timeLimitSeconds, + boolean ignoreReferralErrors, + ActionListener listener, + String... attributes) { + boolean searching = false; + LDAPConnection ldapConnection = null; + try { + ldapConnection = privilegedConnect(ldap::getConnection); + final LDAPConnection finalConnection = ldapConnection; + searchForEntry(finalConnection, baseDN, scope, filter, timeLimitSeconds, + ignoreReferralErrors, ActionListener.wrap( + entry -> { + assert isLdapConnectionThread(Thread.currentThread()) : "Expected current thread [" + Thread.currentThread() + + "] to be an LDAPConnectionReader Thread. Probably the new library has changed the thread's name."; + IOUtils.close(() -> ldap.releaseConnection(finalConnection)); + listener.onResponse(entry); + }, + e -> { + IOUtils.closeWhileHandlingException( + () -> ldap.releaseConnection(finalConnection) + ); + listener.onFailure(e); + }), attributes); + searching = true; + } catch (LDAPException e) { + listener.onFailure(e); + } finally { + if (searching == false) { + final LDAPConnection finalConnection = ldapConnection; + IOUtils.closeWhileHandlingException(() -> ldap.releaseConnection(finalConnection)); + } + } + } + + /** + * This method performs an asynchronous ldap search operation that could have multiple results + */ + public static void search(LDAPInterface ldap, String baseDN, SearchScope scope, + Filter filter, int timeLimitSeconds, + boolean ignoreReferralErrors, + ActionListener> listener, + String... attributes) { + if (ldap instanceof LDAPConnection) { + search((LDAPConnection) ldap, baseDN, scope, filter, timeLimitSeconds, + ignoreReferralErrors, listener, attributes); + } else if (ldap instanceof LDAPConnectionPool) { + search((LDAPConnectionPool) ldap, baseDN, scope, filter, timeLimitSeconds, + ignoreReferralErrors, listener, attributes); + } else { + throw new IllegalArgumentException("unsupported LDAPInterface implementation: " + ldap); + } + } + + /** + * This method performs an asynchronous ldap search operation that could have multiple results + */ + public static void search(LDAPConnection ldap, String baseDN, SearchScope scope, + Filter filter, int timeLimitSeconds, + boolean ignoreReferralErrors, + ActionListener> listener, + String... attributes) { + LdapSearchResultListener searchResultListener = new LdapSearchResultListener( + ldap, + ignoreReferralErrors, + ActionListener.wrap( + searchResult -> { + assert isLdapConnectionThread(Thread.currentThread()) : "Expected current thread [" + Thread.currentThread() + + "] to be an LDAPConnectionReader Thread. Probably the new library has changed the thread's name."; + listener.onResponse(Collections.unmodifiableList(searchResult.getSearchEntries())); + }, + listener::onFailure), + 1); + try { + SearchRequest request = new SearchRequest(searchResultListener, baseDN, scope, + DereferencePolicy.NEVER, 0, timeLimitSeconds, false, filter, attributes); + searchResultListener.setSearchRequest(request); + ldap.asyncSearch(request); + } catch (LDAPException e) { + listener.onFailure(e); + } + } + + /** + * This method performs an asynchronous ldap search operation that could have multiple results + */ + public static void search(LDAPConnectionPool ldap, String baseDN, SearchScope scope, + Filter filter, int timeLimitSeconds, + boolean ignoreReferralErrors, + ActionListener> listener, + String... attributes) { + boolean searching = false; + LDAPConnection ldapConnection = null; + try { + ldapConnection = privilegedConnect(ldap::getConnection); + final LDAPConnection finalConnection = ldapConnection; + search(finalConnection, baseDN, scope, filter, timeLimitSeconds, ignoreReferralErrors, ActionListener.wrap(searchResult -> { + IOUtils.closeWhileHandlingException(() -> ldap.releaseConnection(finalConnection)); + listener.onResponse(searchResult); + }, (e) -> { + IOUtils.closeWhileHandlingException(() -> ldap.releaseConnection(finalConnection)); + listener.onFailure(e); + }), attributes); + searching = true; + } catch (LDAPException e) { + listener.onFailure(e); + } finally { + if (searching == false && ldapConnection != null) { + final LDAPConnection finalConnection = ldapConnection; + IOUtils.closeWhileHandlingException(() -> ldap.releaseConnection(finalConnection)); + } + } + } + + static boolean isLdapConnectionThread(Thread thread) { + return Thread.currentThread().getName().startsWith("Connection reader for connection "); + } + + /** + * Returns true if the provide {@link SearchResult} was successfully completed + * by the server. + * Note: Referrals are not considered a successful response for the + * purposes of this method. + */ + private static boolean isSuccess(SearchResult searchResult) { + switch (searchResult.getResultCode().intValue()) { + case ResultCode.SUCCESS_INT_VALUE: + case ResultCode.COMPARE_FALSE_INT_VALUE: + case ResultCode.COMPARE_TRUE_INT_VALUE: + return true; + default: + return false; + } + } + + private static SearchResult emptyResult(SearchResult parentResult) { + return new SearchResult( + parentResult.getMessageID(), + ResultCode.SUCCESS, + "Empty result", + parentResult.getMatchedDN(), + null, + 0, + 0, + null + ); + } + + private static LDAPException toException(SearchResult searchResult) { + return new LDAPException( + searchResult.getResultCode(), + searchResult.getDiagnosticMessage(), + searchResult.getMatchedDN(), + searchResult.getReferralURLs(), + searchResult.getResponseControls() + ); + } + + public static Filter createFilter(String filterTemplate, String... arguments) throws LDAPException { + return Filter.create(new MessageFormat(filterTemplate, Locale.ROOT) + .format(encodeFilterValues(arguments), new StringBuffer(), null) + .toString()); + } + + public static String[] attributesToSearchFor(String[] attributes) { + return attributes == null ? new String[] { SearchRequest.NO_ATTRIBUTES } : attributes; + } + + public static String[] attributesToSearchFor(String[]... args) { + List attributes = new ArrayList<>(); + for (String[] array : args) { + if (array != null) { + attributes.addAll(Arrays.asList(array)); + } + } + return attributes.isEmpty() ? attributesToSearchFor((String[]) null) + : attributes.toArray(new String[attributes.size()]); + } + + private static String[] encodeFilterValues(String... arguments) { + for (int i = 0; i < arguments.length; i++) { + arguments[i] = Filter.encodeValue(arguments[i]); + } + return arguments; + } + + private static class SingleEntryListener extends LdapSearchResultListener { + + SingleEntryListener(LDAPConnection ldapConnection, + ActionListener listener, Filter filter, + boolean ignoreReferralErrors) { + super(ldapConnection, ignoreReferralErrors, ActionListener.wrap(searchResult -> { + final List entryList = searchResult.getSearchEntries(); + if (entryList.size() > 1) { + listener.onFailure(Exceptions.authenticationError( + "multiple search results found for [{}]", filter)); + } else if (entryList.size() == 1) { + listener.onResponse(entryList.get(0)); + } else { + listener.onResponse(null); + } + }, listener::onFailure) + , 1); + } + } + + private static class LdapSearchResultListener implements AsyncSearchResultListener { + + private final List entryList = new ArrayList<>(); + private final List referenceList = new ArrayList<>(); + protected final SetOnce searchRequestRef = new SetOnce<>(); + + private final LDAPConnection ldapConnection; + private final boolean ignoreReferralErrors; + private final ActionListener listener; + private final int depth; + + LdapSearchResultListener(LDAPConnection ldapConnection, boolean ignoreReferralErrors, + ActionListener listener, int depth) { + this.ldapConnection = ldapConnection; + this.listener = listener; + this.depth = depth; + this.ignoreReferralErrors = ignoreReferralErrors; + } + + @Override + public void searchEntryReturned(SearchResultEntry searchEntry) { + entryList.add(searchEntry); + } + + @Override + public void searchReferenceReturned(SearchResultReference searchReference) { + referenceList.add(searchReference); + } + + @Override + public void searchResultReceived(AsyncRequestID requestID, SearchResult searchResult) { + // Whenever we get a search result we need to check for a referral. + // A referral is a mechanism for an LDAP server to reference an object stored in a + // different LDAP server/partition. There are cases where we need to follow a referral + // in order to get the actual object we are searching for + final String[] referralUrls = referenceList.stream() + .flatMap((ref) -> Arrays.stream(ref.getReferralURLs())) + .collect(Collectors.toList()) + .toArray(Strings.EMPTY_ARRAY); + final SearchRequest request = searchRequestRef.get(); + if (referralUrls.length == 0 || request.followReferrals(ldapConnection) == false) { + // either no referrals to follow or we have explicitly disabled referral following + // on the connection so we just create a new search result that has the values we've + // collected. The search result passed to this method will not have of the entries + // as we are using a result listener and the results are not being collected by the + // LDAP library + LOGGER.trace("LDAP Search {} => {} ({})", request, searchResult, entryList); + if (isSuccess(searchResult)) { + SearchResult resultWithValues = new SearchResult(searchResult.getMessageID(), + searchResult.getResultCode(), searchResult.getDiagnosticMessage(), + searchResult.getMatchedDN(), referralUrls, entryList, referenceList, + entryList.size(), referenceList.size(), + searchResult.getResponseControls()); + listener.onResponse(resultWithValues); + } else { + listener.onFailure(toException(searchResult)); + } + } else if (depth >= ldapConnection.getConnectionOptions().getReferralHopLimit()) { + // we've gone through too many levels of referrals so we terminate with the values + // collected so far and the proper result code to indicate the search was + // terminated early + LOGGER.trace("Referral limit exceeded {} => {} ({})", + request, searchResult, entryList); + listener.onFailure(new LDAPException(ResultCode.REFERRAL_LIMIT_EXCEEDED, + "Referral limit exceeded (" + depth + ")", + searchResult.getMatchedDN(), referralUrls, + searchResult.getResponseControls())); + } else { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("LDAP referred elsewhere {} => {}", + request, Arrays.toString(referralUrls)); + } + // there are referrals to follow, so we start the process to follow the referrals + final CountDown countDown = new CountDown(referralUrls.length); + final List referralUrlsList = new ArrayList<>(Arrays.asList(referralUrls)); + + ActionListener referralListener = ActionListener.wrap( + innerResult -> { + // synchronize here since we are possibly sending out a lot of requests + // and the result lists are not thread safe and this also provides us + // with a consistent view + synchronized (this) { + if (innerResult.getSearchEntries() != null) { + entryList.addAll(innerResult.getSearchEntries()); + } + if (innerResult.getSearchReferences() != null) { + referenceList.addAll(innerResult.getSearchReferences()); + } + } + + // count down and once all referrals have been traversed then we can + // create the results + if (countDown.countDown()) { + SearchResult resultWithValues = new SearchResult( + searchResult.getMessageID(), searchResult.getResultCode(), + searchResult.getDiagnosticMessage(), + searchResult.getMatchedDN(), + referralUrlsList.toArray(Strings.EMPTY_ARRAY), entryList, + referenceList, entryList.size(), referenceList.size(), + searchResult.getResponseControls()); + listener.onResponse(resultWithValues); + } + }, listener::onFailure); + + for (String referralUrl : referralUrls) { + try { + // for each referral follow it and any other referrals returned until we + // get to a depth that is greater than or equal to the referral hop limit + // or all referrals have been followed. Each time referrals are followed + // from a search result, the depth increases by 1 + followReferral(ldapConnection, referralUrl, request, referralListener, + depth + 1, ignoreReferralErrors, searchResult); + } catch (LDAPException e) { + LOGGER.warn((Supplier) () -> new ParameterizedMessage( + "caught exception while trying to follow referral [{}]", + referralUrl), e); + if (ignoreReferralErrors) { + // Needed in order for the countDown to be correct + referralListener.onResponse(emptyResult(searchResult)); + } else { + listener.onFailure(e); + } + } + } + } + } + + void setSearchRequest(SearchRequest searchRequest) { + this.searchRequestRef.set(searchRequest); + } + } + + /** + * Performs the actual connection and following of a referral given a URL string. + * This referral is being followed as it may contain a result that is relevant to our search + */ + private static void followReferral(LDAPConnection ldapConnection, String urlString, + SearchRequest searchRequest, + ActionListener listener, int depth, + boolean ignoreErrors, SearchResult originatingResult) + throws LDAPException { + + final LDAPURL referralURL = new LDAPURL(urlString); + final String host = referralURL.getHost(); + // the host must be present in order to follow a referral + if (host == null) { + // nothing to really do since a null host cannot really be handled, so we treat it as + // an error + throw new LDAPException(ResultCode.UNAVAILABLE, "Null referral host in " + urlString); + } + + // the referral URL often contains information necessary about the LDAP request such as + // the base DN, scope, and filter. If it does not, then we reuse the values from the + // originating search request + final String requestBaseDN; + if (referralURL.baseDNProvided()) { + requestBaseDN = referralURL.getBaseDN().toString(); + } else { + requestBaseDN = searchRequest.getBaseDN(); + } + + final SearchScope requestScope; + if (referralURL.scopeProvided()) { + requestScope = referralURL.getScope(); + } else { + requestScope = searchRequest.getScope(); + } + + final Filter requestFilter; + if (referralURL.filterProvided()) { + requestFilter = referralURL.getFilter(); + } else { + requestFilter = searchRequest.getFilter(); + } + + // in order to follow the referral we need to open a new connection and we do so using the + // referral connector on the ldap connection + final LDAPConnection referralConn = + privilegedConnect(() -> ldapConnection.getReferralConnector().getReferralConnection(referralURL, ldapConnection)); + final LdapSearchResultListener ldapListener = new LdapSearchResultListener( + referralConn, ignoreErrors, + ActionListener.wrap( + searchResult -> { + IOUtils.close(referralConn); + listener.onResponse(searchResult); + }, + e -> { + IOUtils.closeWhileHandlingException(referralConn); + if (ignoreErrors) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(new ParameterizedMessage( + "Failed to retrieve results from referral URL [{}]." + + " Treating as 'no results'", + referralURL), e); + } + listener.onResponse(emptyResult(originatingResult)); + } else { + listener.onFailure(e); + } + }), + depth); + boolean success = false; + try { + final SearchRequest referralSearchRequest = + new SearchRequest(ldapListener, searchRequest.getControls(), + requestBaseDN, requestScope, searchRequest.getDereferencePolicy(), + searchRequest.getSizeLimit(), searchRequest.getTimeLimitSeconds(), + searchRequest.typesOnly(), requestFilter, + searchRequest.getAttributes()); + ldapListener.setSearchRequest(searchRequest); + referralConn.asyncSearch(referralSearchRequest); + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(referralConn); + } + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java new file mode 100644 index 0000000000000..aabea2eb854e7 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java @@ -0,0 +1,260 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap.support; + +import com.unboundid.ldap.sdk.LDAPConnectionOptions; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.LDAPURL; +import com.unboundid.ldap.sdk.ServerSet; +import com.unboundid.util.ssl.HostNameSSLSocketVerifier; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.VerificationMode; + +import javax.net.SocketFactory; + +import java.util.Arrays; +import java.util.List; +import java.util.regex.Pattern; + +/** + * This factory holds settings needed for authenticating to LDAP and creating LdapConnections. + * Each created LdapConnection needs to be closed or else connections will pill up consuming + * resources. + *

+ * A standard looking usage pattern could look like this: + *

+ * ConnectionFactory factory = ...
+ * try (LdapConnection session = factory.session(...)) {
+ * ...do stuff with the session
+ * }
+ * 
+ */ +public abstract class SessionFactory { + + private static final Pattern STARTS_WITH_LDAPS = Pattern.compile("^ldaps:.*", + Pattern.CASE_INSENSITIVE); + private static final Pattern STARTS_WITH_LDAP = Pattern.compile("^ldap:.*", + Pattern.CASE_INSENSITIVE); + + protected final Logger logger; + protected final RealmConfig config; + protected final TimeValue timeout; + protected final SSLService sslService; + protected final ThreadPool threadPool; + + protected final ServerSet serverSet; + protected final boolean sslUsed; + protected final boolean ignoreReferralErrors; + + protected SessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) { + this.config = config; + this.logger = config.logger(getClass()); + final Settings settings = config.settings(); + TimeValue searchTimeout = settings.getAsTime(SessionFactorySettings.TIMEOUT_LDAP_SETTING, SessionFactorySettings.TIMEOUT_DEFAULT); + if (searchTimeout.millis() < 1000L) { + logger.warn("ldap_search timeout [{}] is less than the minimum supported search " + + "timeout of 1s. using 1s", + searchTimeout.millis()); + searchTimeout = TimeValue.timeValueSeconds(1L); + } + this.timeout = searchTimeout; + this.sslService = sslService; + this.threadPool = threadPool; + LDAPServers ldapServers = ldapServers(settings); + this.serverSet = serverSet(config, sslService, ldapServers); + this.sslUsed = ldapServers.ssl; + this.ignoreReferralErrors = SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING.get(settings); + } + + /** + * Authenticates the given user and opens a new connection that bound to it (meaning, all + * operations under the returned connection will be executed on behalf of the authenticated + * user. + * + * @param user The name of the user to authenticate the connection with. + * @param password The password of the user + * @param listener the listener to call on a failure or result + */ + public abstract void session(String user, SecureString password, + ActionListener listener); + + /** + * Returns a flag to indicate if this session factory supports unauthenticated sessions. + * This means that a session can be established without providing any credentials in a call to + * {@link #unauthenticatedSession(String, ActionListener)} + * + * @return true if the factory supports unauthenticated sessions + */ + public boolean supportsUnauthenticatedSession() { + return false; + } + + /** + * Returns an {@link LdapSession} for the user identified by the String parameter + * + * @param username the identifier for the user + * @param listener the listener to call on a failure or result + */ + public void unauthenticatedSession(String username, ActionListener listener) { + throw new UnsupportedOperationException("unauthenticated sessions are not supported"); + } + + protected static LDAPConnectionOptions connectionOptions(RealmConfig config, + SSLService sslService, Logger logger) { + Settings realmSettings = config.settings(); + LDAPConnectionOptions options = new LDAPConnectionOptions(); + options.setConnectTimeoutMillis(Math.toIntExact( + realmSettings.getAsTime(SessionFactorySettings.TIMEOUT_TCP_CONNECTION_SETTING, + SessionFactorySettings.TIMEOUT_DEFAULT).millis() + )); + options.setFollowReferrals(realmSettings.getAsBoolean(SessionFactorySettings.FOLLOW_REFERRALS_SETTING, true)); + options.setResponseTimeoutMillis( + realmSettings.getAsTime(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING, SessionFactorySettings.TIMEOUT_DEFAULT).millis() + ); + options.setAllowConcurrentSocketFactoryUse(true); + + final SSLConfigurationSettings sslConfigurationSettings = + SSLConfigurationSettings.withoutPrefix(); + final Settings realmSSLSettings = realmSettings.getByPrefix("ssl."); + final boolean verificationModeExists = + sslConfigurationSettings.verificationMode.exists(realmSSLSettings); + final boolean hostnameVerficationExists = + realmSettings.get(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING, null) != null; + + if (verificationModeExists && hostnameVerficationExists) { + throw new IllegalArgumentException("[" + SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING + "] and [" + + sslConfigurationSettings.verificationMode.getKey() + + "] may not be used at the same time"); + } else if (verificationModeExists) { + VerificationMode verificationMode = sslService.getVerificationMode(realmSSLSettings, + Settings.EMPTY); + if (verificationMode == VerificationMode.FULL) { + options.setSSLSocketVerifier(new HostNameSSLSocketVerifier(true)); + } + } else if (hostnameVerficationExists) { + new DeprecationLogger(logger).deprecated("the setting [{}] has been deprecated and " + + "will be removed in a future version. use [{}] instead", + RealmSettings.getFullSettingKey(config, SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING), + RealmSettings.getFullSettingKey(config, "ssl." + + sslConfigurationSettings.verificationMode.getKey())); + if (realmSettings.getAsBoolean(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING, true)) { + options.setSSLSocketVerifier(new HostNameSSLSocketVerifier(true)); + } + } else { + options.setSSLSocketVerifier(new HostNameSSLSocketVerifier(true)); + } + return options; + } + + private LDAPServers ldapServers(Settings settings) { + // Parse LDAP urls + List ldapUrls = settings.getAsList(SessionFactorySettings.URLS_SETTING, getDefaultLdapUrls(settings)); + if (ldapUrls == null || ldapUrls.isEmpty()) { + throw new IllegalArgumentException("missing required LDAP setting [" + SessionFactorySettings.URLS_SETTING + + "]"); + } + return new LDAPServers(ldapUrls.toArray(new String[ldapUrls.size()])); + } + + protected List getDefaultLdapUrls(Settings settings) { + return null; + } + + private ServerSet serverSet(RealmConfig realmConfig, SSLService clientSSLService, + LDAPServers ldapServers) { + Settings settings = realmConfig.settings(); + SocketFactory socketFactory = null; + if (ldapServers.ssl()) { + socketFactory = clientSSLService.sslSocketFactory(settings.getByPrefix("ssl.")); + if (settings.getAsBoolean(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING, true)) { + logger.debug("using encryption for LDAP connections with hostname verification"); + } else { + logger.debug("using encryption for LDAP connections without hostname verification"); + } + } + return LdapLoadBalancing.serverSet(ldapServers.addresses(), ldapServers.ports(), settings, + socketFactory, connectionOptions(realmConfig, sslService, logger)); + } + + // package private to use for testing + ServerSet getServerSet() { + return serverSet; + } + + public boolean isSslUsed() { + return sslUsed; + } + + public static class LDAPServers { + + private final String[] addresses; + private final int[] ports; + private final boolean ssl; + + public LDAPServers(String[] urls) { + ssl = secureUrls(urls); + addresses = new String[urls.length]; + ports = new int[urls.length]; + for (int i = 0; i < urls.length; i++) { + try { + LDAPURL url = new LDAPURL(urls[i]); + addresses[i] = url.getHost(); + ports[i] = url.getPort(); + } catch (LDAPException e) { + throw new IllegalArgumentException("unable to parse configured LDAP url [" + + urls[i] + "]", e); + } + } + } + + public String[] addresses() { + return addresses; + } + + public int[] ports() { + return ports; + } + + public boolean ssl() { + return ssl; + } + + /** + * @param ldapUrls URLS in the form of "ldap://..." or "ldaps://..." + */ + private boolean secureUrls(String[] ldapUrls) { + if (ldapUrls.length == 0) { + return true; + } + + final boolean allSecure = Arrays.stream(ldapUrls) + .allMatch(s -> STARTS_WITH_LDAPS.matcher(s).find()); + final boolean allClear = Arrays.stream(ldapUrls) + .allMatch(s -> STARTS_WITH_LDAP.matcher(s).find()); + + if (!allSecure && !allClear) { + //No mixing is allowed because we use the same socketfactory + throw new IllegalArgumentException( + "configured LDAP protocols are not all equal (ldaps://.. and ldap://..): [" + + Strings.arrayToCommaDelimitedString(ldapUrls) + "]"); + } + + return allSecure; + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java new file mode 100644 index 0000000000000..a956351f86e4b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java @@ -0,0 +1,262 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.pki; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.ssl.CertUtils; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; +import org.elasticsearch.xpack.security.authc.BytesKey; +import org.elasticsearch.xpack.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; + +import javax.net.ssl.X509TrustManager; + +import java.security.MessageDigest; +import java.security.cert.Certificate; +import java.security.cert.CertificateEncodingException; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class PkiRealm extends Realm implements CachingRealm { + + public static final String PKI_CERT_HEADER_NAME = "__SECURITY_CLIENT_CERTIFICATE"; + + // For client based cert validation, the auth type must be specified but UNKNOWN is an acceptable value + private static final String AUTH_TYPE = "UNKNOWN"; + + // the lock is used in an odd manner; when iterating over the cache we cannot have modifiers other than deletes using + // the iterator but when not iterating we can modify the cache without external locking. When making normal modifications to the cache + // the read lock is obtained so that we can allow concurrent modifications; however when we need to iterate over the keys or values of + // the cache the write lock must obtained to prevent any modifications + private final ReleasableLock readLock; + private final ReleasableLock writeLock; + + { + final ReadWriteLock iterationLock = new ReentrantReadWriteLock(); + readLock = new ReleasableLock(iterationLock.readLock()); + writeLock = new ReleasableLock(iterationLock.writeLock()); + } + + private final X509TrustManager trustManager; + private final Pattern principalPattern; + private final UserRoleMapper roleMapper; + private final Cache cache; + + public PkiRealm(RealmConfig config, ResourceWatcherService watcherService, NativeRoleMappingStore nativeRoleMappingStore) { + this(config, new CompositeRoleMapper(PkiRealmSettings.TYPE, config, watcherService, nativeRoleMappingStore)); + } + + // pkg private for testing + PkiRealm(RealmConfig config, UserRoleMapper roleMapper) { + super(PkiRealmSettings.TYPE, config); + this.trustManager = trustManagers(config); + this.principalPattern = PkiRealmSettings.USERNAME_PATTERN_SETTING.get(config.settings()); + this.roleMapper = roleMapper; + this.cache = CacheBuilder.builder() + .setExpireAfterWrite(PkiRealmSettings.CACHE_TTL_SETTING.get(config.settings())) + .setMaximumWeight(PkiRealmSettings.CACHE_MAX_USERS_SETTING.get(config.settings())) + .build(); + } + + @Override + public boolean supports(AuthenticationToken token) { + return token instanceof X509AuthenticationToken; + } + + @Override + public X509AuthenticationToken token(ThreadContext context) { + return token(context.getTransient(PKI_CERT_HEADER_NAME), principalPattern, logger); + } + + @Override + public void authenticate(AuthenticationToken authToken, ActionListener listener) { + X509AuthenticationToken token = (X509AuthenticationToken)authToken; + try { + final BytesKey fingerprint = computeFingerprint(token.credentials()[0]); + User user = cache.get(fingerprint); + if (user != null) { + listener.onResponse(AuthenticationResult.success(user)); + } else if (isCertificateChainTrusted(trustManager, token, logger) == false) { + listener.onResponse(AuthenticationResult.unsuccessful("Certificate for " + token.dn() + " is not trusted", null)); + } else { + final Map metadata = Collections.singletonMap("pki_dn", token.dn()); + final UserRoleMapper.UserData userData = new UserRoleMapper.UserData(token.principal(), + token.dn(), Collections.emptySet(), metadata, this.config); + roleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { + final User computedUser = + new User(token.principal(), roles.toArray(new String[roles.size()]), null, null, metadata, true); + try (ReleasableLock ignored = readLock.acquire()) { + cache.put(fingerprint, computedUser); + } + listener.onResponse(AuthenticationResult.success(computedUser)); + }, listener::onFailure)); + } + } catch (CertificateEncodingException e) { + listener.onResponse(AuthenticationResult.unsuccessful("Certificate for " + token.dn() + " has encoding issues", e)); + } + } + + @Override + public void lookupUser(String username, ActionListener listener) { + listener.onResponse(null); + } + + static X509AuthenticationToken token(Object pkiHeaderValue, Pattern principalPattern, Logger logger) { + if (pkiHeaderValue == null) { + return null; + } + + assert pkiHeaderValue instanceof X509Certificate[]; + X509Certificate[] certificates = (X509Certificate[]) pkiHeaderValue; + if (certificates.length == 0) { + return null; + } + + String dn = certificates[0].getSubjectX500Principal().toString(); + Matcher matcher = principalPattern.matcher(dn); + if (!matcher.find()) { + if (logger.isDebugEnabled()) { + logger.debug("certificate authentication succeeded for [{}] but could not extract principal from DN", dn); + } + return null; + } + + String principal = matcher.group(1); + if (Strings.isNullOrEmpty(principal)) { + if (logger.isDebugEnabled()) { + logger.debug("certificate authentication succeeded for [{}] but extracted principal was empty", dn); + } + return null; + } + return new X509AuthenticationToken(certificates, principal, dn); + } + + static boolean isCertificateChainTrusted(X509TrustManager trustManager, X509AuthenticationToken token, Logger logger) { + if (trustManager != null) { + try { + trustManager.checkClientTrusted(token.credentials(), AUTH_TYPE); + return true; + } catch (CertificateException e) { + if (logger.isTraceEnabled()) { + logger.trace((Supplier) + () -> new ParameterizedMessage("failed certificate validation for principal [{}]", token.principal()), e); + } else if (logger.isDebugEnabled()) { + logger.debug("failed certificate validation for principal [{}]", token.principal()); + } + } + return false; + } + + // No extra trust managers specified, so at this point we can be considered authenticated. + return true; + } + + static X509TrustManager trustManagers(RealmConfig realmConfig) { + final Settings settings = realmConfig.settings(); + final Environment env = realmConfig.env(); + List certificateAuthorities = settings.getAsList(PkiRealmSettings.SSL_SETTINGS.caPaths.getKey(), null); + String truststorePath = PkiRealmSettings.SSL_SETTINGS.truststorePath.get(settings).orElse(null); + if (truststorePath == null && certificateAuthorities == null) { + return null; + } else if (truststorePath != null && certificateAuthorities != null) { + final String pathKey = RealmSettings.getFullSettingKey(realmConfig, PkiRealmSettings.SSL_SETTINGS.truststorePath); + final String caKey = RealmSettings.getFullSettingKey(realmConfig, PkiRealmSettings.SSL_SETTINGS.caPaths); + throw new IllegalArgumentException("[" + pathKey + "] and [" + caKey + "] cannot be used at the same time"); + } else if (truststorePath != null) { + return trustManagersFromTruststore(truststorePath, realmConfig); + } + return trustManagersFromCAs(settings, env); + } + + private static X509TrustManager trustManagersFromTruststore(String truststorePath, RealmConfig realmConfig) { + final Settings settings = realmConfig.settings(); + if (PkiRealmSettings.SSL_SETTINGS.truststorePassword.exists(settings) == false + && PkiRealmSettings.SSL_SETTINGS.legacyTruststorePassword.exists(settings) == false) { + throw new IllegalArgumentException("Neither [" + + RealmSettings.getFullSettingKey(realmConfig, PkiRealmSettings.SSL_SETTINGS.truststorePassword) + "] or [" + + RealmSettings.getFullSettingKey(realmConfig, PkiRealmSettings.SSL_SETTINGS.legacyTruststorePassword) + "] is configured" + ); + } + try (SecureString password = PkiRealmSettings.SSL_SETTINGS.truststorePassword.get(settings)) { + String trustStoreAlgorithm = PkiRealmSettings.SSL_SETTINGS.truststoreAlgorithm.get(settings); + String trustStoreType = SSLConfigurationSettings.getKeyStoreType(PkiRealmSettings.SSL_SETTINGS.truststoreType, + settings, truststorePath); + try { + return CertUtils.trustManager(truststorePath, trustStoreType, password.getChars(), trustStoreAlgorithm, realmConfig.env()); + } catch (Exception e) { + throw new IllegalArgumentException("failed to load specified truststore", e); + } + } + } + + private static X509TrustManager trustManagersFromCAs(Settings settings, Environment env) { + List certificateAuthorities = settings.getAsList(PkiRealmSettings.SSL_SETTINGS.caPaths.getKey(), null); + assert certificateAuthorities != null; + try { + Certificate[] certificates = CertUtils.readCertificates(certificateAuthorities, env); + return CertUtils.trustManager(certificates); + } catch (Exception e) { + throw new ElasticsearchException("failed to load certificate authorities for PKI realm", e); + } + } + + @Override + public void expire(String username) { + try (ReleasableLock ignored = writeLock.acquire()) { + Iterator userIterator = cache.values().iterator(); + while (userIterator.hasNext()) { + if (userIterator.next().principal().equals(username)) { + userIterator.remove(); + // do not break since there is no guarantee username is unique in this realm + } + } + } + } + + @Override + public void expireAll() { + try (ReleasableLock ignored = readLock.acquire()) { + cache.invalidateAll(); + } + } + + private static BytesKey computeFingerprint(X509Certificate certificate) throws CertificateEncodingException { + MessageDigest digest = MessageDigests.sha256(); + digest.update(certificate.getEncoded()); + return new BytesKey(digest.digest()); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/X509AuthenticationToken.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/X509AuthenticationToken.java new file mode 100644 index 0000000000000..8603a662efa4c --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/X509AuthenticationToken.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.pki; + +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; + +import java.security.cert.X509Certificate; + +public class X509AuthenticationToken implements AuthenticationToken { + + private final String principal; + private final String dn; + private X509Certificate[] credentials; + + public X509AuthenticationToken(X509Certificate[] certificates, String principal, String dn) { + this.principal = principal; + this.credentials = certificates; + this.dn = dn; + } + + @Override + public String principal() { + return principal; + } + + @Override + public X509Certificate[] credentials() { + return credentials; + } + + public String dn() { + return dn; + } + + @Override + public void clearCredentials() { + credentials = null; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/IdpConfiguration.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/IdpConfiguration.java new file mode 100644 index 0000000000000..0e1e40fde45f0 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/IdpConfiguration.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.util.List; +import java.util.function.Supplier; + +import org.opensaml.security.credential.Credential; + +/** + * A simple container class that holds all configuration related to a SAML Identity Provider (IdP). + */ +class IdpConfiguration { + + private final String entityId; + private final Supplier> signingCredentials; + + IdpConfiguration(String entityId, Supplier> signingCredentials) { + this.entityId = entityId; + this.signingCredentials = signingCredentials; + } + + /** + * The SAML identifier (as a URI) for the IDP + */ + String getEntityId() { + return entityId; + } + + /** + * A list of credentials that the IDP uses for signing messages. + * A trusted message should be signed with any one (or more) of these credentials. + */ + List getSigningCredentials() { + return signingCredentials.get(); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAttributes.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAttributes.java new file mode 100644 index 0000000000000..6915d1aabf635 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAttributes.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.opensaml.saml.saml2.core.Attribute; +import org.opensaml.saml.saml2.core.NameIDType; + +/** + * An lightweight collection of SAML attributes + */ +public class SamlAttributes { + + public static final String NAMEID_SYNTHENTIC_ATTRIBUTE = "nameid"; + public static final String PERSISTENT_NAMEID_SYNTHENTIC_ATTRIBUTE = "nameid:persistent"; + + private final SamlNameId name; + private final String session; + private final List attributes; + + SamlAttributes(SamlNameId name, String session, List attributes) { + this.name = name; + this.session = session; + this.attributes = attributes; + } + + /** + * Finds all values for the specified attribute + * + * @param attributeId The name of the attribute - either its {@code name} or @{code friendlyName} + * @return A list of all matching attribute values (may be empty). + */ + List getAttributeValues(String attributeId) { + if (attributeId.equals(NAMEID_SYNTHENTIC_ATTRIBUTE)) { + return name == null ? Collections.emptyList() : Collections.singletonList(name.value); + } + if (name != null && NameIDType.PERSISTENT.equals(name.format) && attributeId.equals(PERSISTENT_NAMEID_SYNTHENTIC_ATTRIBUTE)) { + return Collections.singletonList(name.value); + } + if (Strings.isNullOrEmpty(attributeId)) { + return Collections.emptyList(); + } + return attributes.stream() + .filter(attr -> attributeId.equals(attr.name) || attributeId.equals(attr.friendlyName)) + .flatMap(attr -> attr.values.stream()) + .collect(Collectors.toList()); + } + + List attributes() { + return Collections.unmodifiableList(attributes); + } + + SamlNameId name() { + return name; + } + + String session() { + return session; + } + + @Override + public String toString() { + return getClass().getSimpleName() + "(" + name + ")[" + session + "]{" + attributes + "}"; + } + + static class SamlAttribute { + final String name; + final String friendlyName; + final List values; + + SamlAttribute(Attribute attribute) { + this(attribute.getName(), attribute.getFriendlyName(), + attribute.getAttributeValues().stream().map(x -> x.getDOM().getTextContent()).collect(Collectors.toList())); + } + + SamlAttribute(String name, @Nullable String friendlyName, List values) { + this.name = Objects.requireNonNull(name, "Attribute name cannot be null"); + this.friendlyName = friendlyName; + this.values = Collections.unmodifiableList(values); + } + + @Override + public String toString() { + if (Strings.isNullOrEmpty(friendlyName)) { + return name + '=' + values; + } else { + return friendlyName + '(' + name + ")=" + values; + } + } + } + + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticator.java new file mode 100644 index 0000000000000..93bbe2c1a7567 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticator.java @@ -0,0 +1,332 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.time.Clock; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Objects; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.opensaml.core.xml.XMLObject; +import org.opensaml.saml.saml2.core.Assertion; +import org.opensaml.saml.saml2.core.Attribute; +import org.opensaml.saml.saml2.core.AttributeStatement; +import org.opensaml.saml.saml2.core.Audience; +import org.opensaml.saml.saml2.core.AudienceRestriction; +import org.opensaml.saml.saml2.core.Conditions; +import org.opensaml.saml.saml2.core.EncryptedAssertion; +import org.opensaml.saml.saml2.core.EncryptedAttribute; +import org.opensaml.saml.saml2.core.Response; +import org.opensaml.saml.saml2.core.Status; +import org.opensaml.saml.saml2.core.StatusCode; +import org.opensaml.saml.saml2.core.StatusDetail; +import org.opensaml.saml.saml2.core.StatusMessage; +import org.opensaml.saml.saml2.core.Subject; +import org.opensaml.saml.saml2.core.SubjectConfirmation; +import org.opensaml.saml.saml2.core.SubjectConfirmationData; +import org.opensaml.xmlsec.encryption.support.DecryptionException; +import org.w3c.dom.Document; +import org.w3c.dom.Element; + +import static org.elasticsearch.xpack.security.authc.saml.SamlUtils.samlException; +import static org.opensaml.saml.saml2.core.SubjectConfirmation.METHOD_BEARER; + +/** + * Processes the IdP's SAML Response for our AuthnRequest, validates it, and extracts the relevant properties. + */ +class SamlAuthenticator extends SamlRequestHandler { + + private static final String RESPONSE_TAG_NAME = "Response"; + + SamlAuthenticator(RealmConfig realmConfig, + Clock clock, + IdpConfiguration idp, + SpConfiguration sp, + TimeValue maxSkew) { + super(realmConfig, clock, idp, sp, maxSkew); + } + + /** + * Processes the provided SAML response within the provided token and, if valid, extracts the relevant attributes from it. + * + * @throws org.elasticsearch.ElasticsearchSecurityException If the SAML is invalid for this realm/configuration + */ + SamlAttributes authenticate(SamlToken token) { + final Element root = parseSamlMessage(token.getContent()); + if (RESPONSE_TAG_NAME.equals(root.getLocalName()) && SAML_NAMESPACE.equals(root.getNamespaceURI())) { + try { + return authenticateResponse(root, token.getAllowedSamlRequestIds()); + } catch (ElasticsearchSecurityException e) { + logger.trace("Rejecting SAML response {} because {}", SamlUtils.toString(root), e.getMessage()); + throw e; + } + } else { + throw samlException("SAML content [{}] should have a root element of Namespace=[{}] Tag=[{}]", + root, SAML_NAMESPACE, RESPONSE_TAG_NAME); + } + } + + private SamlAttributes authenticateResponse(Element element, Collection allowedSamlRequestIds) { + final Response response = buildXmlObject(element, Response.class); + if (response == null) { + throw samlException("Cannot convert element {} into Response object", element); + } + if (logger.isTraceEnabled()) { + logger.trace(SamlUtils.describeSamlObject(response)); + } + if (Strings.hasText(response.getInResponseTo()) && allowedSamlRequestIds.contains(response.getInResponseTo()) == false) { + logger.debug("The SAML Response with ID {} is unsolicited. A user might have used a stale URL or the Identity Provider " + + "incorrectly populates the InResponseTo attribute", response.getID()); + throw samlException("SAML content is in-response-to {} but expected one of {} ", + response.getInResponseTo(), allowedSamlRequestIds); + } + + final Status status = response.getStatus(); + if (status == null || status.getStatusCode() == null) { + throw samlException("SAML Response has no status code"); + } + if (isSuccess(status) == false) { + throw samlException("SAML Response is not a 'success' response: Code={} Message={} Detail={}", + status.getStatusCode().getValue(), getMessage(status), getDetail(status)); + } + + checkResponseDestination(response); + + Tuple> details = extractDetails(response, allowedSamlRequestIds); + final Assertion assertion = details.v1(); + final SamlNameId nameId = SamlNameId.forSubject(assertion.getSubject()); + final String session = getSessionIndex(assertion); + final List attributes = details.v2().stream() + .map(SamlAttributes.SamlAttribute::new) + .collect(Collectors.toList()); + if (logger.isTraceEnabled()) { + StringBuilder sb = new StringBuilder(); + sb.append("The SAML Assertion contained the following attributes: \n"); + for (SamlAttributes.SamlAttribute attr : attributes) { + sb.append(attr).append("\n"); + } + logger.trace(sb.toString()); + } + if (attributes.isEmpty() && nameId == null) { + logger.debug("The Attribute Statements of SAML Response with ID {} contained no attributes and the SAML Assertion Subject did" + + "not contain a SAML NameID. Please verify that the Identity Provider configuration with regards to attribute " + + "release is correct. ", response.getID()); + throw samlException("Could not process any SAML attributes in {}", response.getElementQName()); + } + + return new SamlAttributes(nameId, session, attributes); + } + + private String getMessage(Status status) { + final StatusMessage sm = status.getStatusMessage(); + return sm == null ? null : sm.getMessage(); + } + + private String getDetail(Status status) { + final StatusDetail sd = status.getStatusDetail(); + return sd == null ? null : SamlUtils.toString(sd.getDOM()); + } + + private boolean isSuccess(Status status) { + return status.getStatusCode().getValue().equals(StatusCode.SUCCESS); + } + + private String getSessionIndex(Assertion assertion) { + return assertion.getAuthnStatements().stream().map(as -> as.getSessionIndex()).filter(Objects::nonNull).findFirst().orElse(null); + } + + private void checkResponseDestination(Response response) { + final String asc = getSpConfiguration().getAscUrl(); + if (asc.equals(response.getDestination()) == false) { + throw samlException("SAML response " + response.getID() + " is for destination " + response.getDestination() + + " but this realm uses " + asc); + } + } + + private Tuple> extractDetails(Response response, Collection allowedSamlRequestIds) { + final boolean requireSignedAssertions; + if (response.isSigned()) { + validateSignature(response.getSignature()); + requireSignedAssertions = false; + } else { + requireSignedAssertions = true; + } + + checkIssuer(response.getIssuer(), response); + + final int assertionCount = response.getAssertions().size() + response.getEncryptedAssertions().size(); + if (assertionCount > 1) { + throw samlException("Expecting only 1 assertion, but response contains multiple (" + assertionCount + ")"); + } + for (Assertion assertion : response.getAssertions()) { + return new Tuple<>(assertion, processAssertion(assertion, requireSignedAssertions, allowedSamlRequestIds)); + } + for (EncryptedAssertion encrypted : response.getEncryptedAssertions()) { + Assertion assertion = decrypt(encrypted); + moveToNewDocument(assertion); + assertion.getDOM().setIdAttribute("ID", true); + return new Tuple<>(assertion, processAssertion(assertion, requireSignedAssertions, allowedSamlRequestIds)); + } + throw samlException("No assertions found in SAML response"); + } + + private void moveToNewDocument(XMLObject xmlObject) { + final Element element = xmlObject.getDOM(); + final Document doc = element.getOwnerDocument().getImplementation().createDocument(null, null, null); + doc.adoptNode(element); + doc.appendChild(element); + } + + private Assertion decrypt(EncryptedAssertion encrypted) { + if (decrypter == null) { + throw samlException("SAML assertion [" + text(encrypted, 32) + "] is encrypted, but no decryption key is available"); + } + try { + return decrypter.decrypt(encrypted); + } catch (DecryptionException e) { + logger.debug(() -> new ParameterizedMessage("Failed to decrypt SAML assertion [{}] with [{}]", + text(encrypted, 512), describe(getSpConfiguration().getEncryptionCredentials())), e); + throw samlException("Failed to decrypt SAML assertion " + text(encrypted, 32), e); + } + } + + private List processAssertion(Assertion assertion, boolean requireSignature, Collection allowedSamlRequestIds) { + if (logger.isTraceEnabled()) { + logger.trace("(Possibly decrypted) Assertion: {}", SamlUtils.samlObjectToString(assertion)); + logger.trace(SamlUtils.describeSamlObject(assertion)); + } + // Do not further process unsigned Assertions + if (assertion.isSigned()) { + validateSignature(assertion.getSignature()); + } else if (requireSignature) { + throw samlException("Assertion [{}] is not signed, but a signature is required", assertion.getElementQName()); + } + + checkConditions(assertion.getConditions()); + checkIssuer(assertion.getIssuer(), assertion); + checkSubject(assertion.getSubject(), assertion, allowedSamlRequestIds); + + List attributes = new ArrayList<>(); + for (AttributeStatement statement : assertion.getAttributeStatements()) { + logger.trace("SAML AttributeStatement has [{}] attributes and [{}] encrypted attributes", + statement.getAttributes().size(), statement.getEncryptedAttributes().size()); + attributes.addAll(statement.getAttributes()); + for (EncryptedAttribute enc : statement.getEncryptedAttributes()) { + final Attribute attribute = decrypt(enc); + if (attribute != null) { + logger.trace("Successfully decrypted attribute: {}" + SamlUtils.samlObjectToString(attribute)); + attributes.add(attribute); + } + } + } + return attributes; + } + + private Attribute decrypt(EncryptedAttribute encrypted) { + if (decrypter == null) { + logger.info("SAML message has encrypted attribute [" + text(encrypted, 32) + "], but no encryption key has been configured"); + return null; + } + try { + return decrypter.decrypt(encrypted); + } catch (DecryptionException e) { + logger.info("Failed to decrypt SAML attribute " + text(encrypted, 32), e); + return null; + } + } + + private void checkConditions(Conditions conditions) { + if (conditions != null) { + if (logger.isTraceEnabled()) { + logger.trace("SAML Assertion was intended for the following Service providers: {}", + conditions.getAudienceRestrictions().stream().map(r -> text(r, 32)) + .collect(Collectors.joining(" | "))); + logger.trace("SAML Assertion is only valid between: " + conditions.getNotBefore() + " and " + conditions.getNotOnOrAfter()); + } + checkAudienceRestrictions(conditions.getAudienceRestrictions()); + checkLifetimeRestrictions(conditions); + } + } + + private void checkSubject(Subject assertionSubject, XMLObject parent, Collection allowedSamlRequestIds) { + + if (assertionSubject == null) { + throw samlException("SAML Assertion ({}) has no Subject", text(parent, 16)); + } + final List confirmationData = assertionSubject.getSubjectConfirmations().stream() + .filter(data -> data.getMethod().equals(METHOD_BEARER)) + .map(SubjectConfirmation::getSubjectConfirmationData).filter(Objects::nonNull).collect(Collectors.toList()); + if (confirmationData.size() != 1) { + throw samlException("SAML Assertion subject contains {} bearer SubjectConfirmation, while exactly one was expected.", + confirmationData.size()); + } + if (logger.isTraceEnabled()) { + logger.trace("SAML Assertion Subject Confirmation intended recipient is: " + confirmationData.get(0).getRecipient()); + logger.trace("SAML Assertion Subject Confirmation is only valid before: " + confirmationData.get(0).getNotOnOrAfter()); + logger.trace("SAML Assertion Subject Confirmation is in response to: " + confirmationData.get(0).getInResponseTo()); + } + checkRecipient(confirmationData.get(0)); + checkLifetimeRestrictions(confirmationData.get(0)); + checkInResponseTo(confirmationData.get(0), allowedSamlRequestIds); + } + + private void checkRecipient(SubjectConfirmationData subjectConfirmationData) { + final SpConfiguration sp = getSpConfiguration(); + if (sp.getAscUrl().equals(subjectConfirmationData.getRecipient()) == false) { + throw samlException("SAML Assertion SubjectConfirmationData Recipient {} does not match expected value {}", + subjectConfirmationData.getRecipient(), sp.getAscUrl()); + } + } + + private void checkInResponseTo(SubjectConfirmationData subjectConfirmationData, Collection allowedSamlRequestIds) { + // Allow for IdP initiated SSO where InResponseTo MUST be missing + if (Strings.hasText(subjectConfirmationData.getInResponseTo()) + && allowedSamlRequestIds.contains(subjectConfirmationData.getInResponseTo()) == false) { + throw samlException("SAML Assertion SubjectConfirmationData is in-response-to {} but expected one of {} ", + subjectConfirmationData.getInResponseTo(), allowedSamlRequestIds); + } + } + + private void checkAudienceRestrictions(List restrictions) { + final String spEntityId = this.getSpConfiguration().getEntityId(); + final Predicate predicate = ar -> + ar.getAudiences().stream().map(Audience::getAudienceURI).anyMatch(spEntityId::equals); + if (restrictions.stream().allMatch(predicate) == false) { + throw samlException("Conditions [{}] do not match required audience [{}]", + restrictions.stream().map(r -> text(r, 32)).collect(Collectors.joining(" | ")), getSpConfiguration().getEntityId()); + } + } + + private void checkLifetimeRestrictions(Conditions conditions) { + // In order to compensate for clock skew we construct 2 alternate realities + // - a "future now" that is now + the maximum skew we will tolerate. Essentially "if our clock is 2min slow, what time is it now?" + // - a "past now" that is now - the maximum skew we will tolerate. Essentially "if our clock is 2min fast, what time is it now?" + final Instant now = now(); + final Instant futureNow = now.plusMillis(maxSkewInMillis()); + final Instant pastNow = now.minusMillis(maxSkewInMillis()); + if (conditions.getNotBefore() != null && futureNow.isBefore(toInstant(conditions.getNotBefore()))) { + throw samlException("Rejecting SAML assertion because [{}] is before [{}]", futureNow, conditions.getNotBefore()); + } + if (conditions.getNotOnOrAfter() != null && pastNow.isBefore(toInstant(conditions.getNotOnOrAfter())) == false) { + throw samlException("Rejecting SAML assertion because [{}] is on/after [{}]", pastNow, conditions.getNotOnOrAfter()); + } + } + + private void checkLifetimeRestrictions(SubjectConfirmationData subjectConfirmationData) { + validateNotOnOrAfter(subjectConfirmationData.getNotOnOrAfter()); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthnRequestBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthnRequestBuilder.java new file mode 100644 index 0000000000000..531213a942217 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthnRequestBuilder.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Strings; +import org.opensaml.saml.saml2.core.AuthnRequest; +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.saml.saml2.core.NameIDPolicy; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.IDPSSODescriptor; + +import java.time.Clock; + +/** + * Generates a SAML {@link AuthnRequest} from a simplified set of parameters. + */ +class SamlAuthnRequestBuilder extends SamlMessageBuilder { + + private final String spBinding; + private final String idpBinding; + private Boolean forceAuthn; + private NameIDPolicySettings nameIdSettings; + + SamlAuthnRequestBuilder(SpConfiguration spConfig, String spBinding, EntityDescriptor idpDescriptor, String idBinding, Clock clock) { + super(idpDescriptor, spConfig, clock); + this.spBinding = spBinding; + this.idpBinding = idBinding; + this.nameIdSettings = new NameIDPolicySettings(NameID.TRANSIENT, false, null); + } + + SamlAuthnRequestBuilder forceAuthn(Boolean forceAuthn) { + this.forceAuthn = forceAuthn; + return this; + } + + SamlAuthnRequestBuilder nameIDPolicy(NameIDPolicySettings settings) { + this.nameIdSettings = settings; + return this; + } + + AuthnRequest build() { + final String destination = getIdpLocation(); + + final AuthnRequest request = SamlUtils.buildObject(AuthnRequest.class, AuthnRequest.DEFAULT_ELEMENT_NAME); + request.setID(buildId()); + request.setIssueInstant(now()); + request.setDestination(destination); + request.setProtocolBinding(spBinding); + request.setAssertionConsumerServiceURL(serviceProvider.getAscUrl()); + request.setIssuer(buildIssuer()); + if (nameIdSettings != null) { + request.setNameIDPolicy(buildNameIDPolicy()); + } + request.setForceAuthn(forceAuthn); + return request; + } + + private NameIDPolicy buildNameIDPolicy() { + NameIDPolicy nameIDPolicy = SamlUtils.buildObject(NameIDPolicy.class, NameIDPolicy.DEFAULT_ELEMENT_NAME); + nameIDPolicy.setFormat(nameIdSettings.format); + nameIDPolicy.setAllowCreate(nameIdSettings.allowCreate); + nameIDPolicy.setSPNameQualifier(Strings.isNullOrEmpty(nameIdSettings.spNameQualifier) ? null : nameIdSettings.spNameQualifier); + return nameIDPolicy; + } + + private String getIdpLocation() { + final String location = getIdentityProviderEndpoint(idpBinding, IDPSSODescriptor::getSingleSignOnServices); + if (location == null) { + throw new ElasticsearchException("Cannot find [{}]/[{}] in descriptor [{}]", + IDPSSODescriptor.DEFAULT_ELEMENT_NAME, idpBinding, identityProvider.getID()); + } + return location; + } + + static class NameIDPolicySettings { + private final String format; + private final boolean allowCreate; + private final String spNameQualifier; + + NameIDPolicySettings(String format, boolean allowCreate, String spNameQualifier) { + this.format = format; + this.allowCreate = allowCreate; + this.spNameQualifier = spNameQualifier; + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandler.java new file mode 100644 index 0000000000000..3e827952f45cb --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandler.java @@ -0,0 +1,253 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.time.Clock; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.zip.Inflater; +import java.util.zip.InflaterInputStream; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.core.internal.io.Streams; +import org.elasticsearch.rest.RestUtils; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.opensaml.saml.common.SAMLObject; +import org.opensaml.saml.saml2.core.EncryptedID; +import org.opensaml.saml.saml2.core.LogoutRequest; +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.xmlsec.crypto.XMLSigningUtil; +import org.opensaml.xmlsec.encryption.support.DecryptionException; +import org.opensaml.xmlsec.signature.Signature; +import org.w3c.dom.Element; + +import static org.elasticsearch.xpack.security.authc.saml.SamlUtils.samlException; + +/** + * Processes a LogoutRequest for an IdP-initiated logout. + */ +public class SamlLogoutRequestHandler extends SamlRequestHandler { + + private static final String REQUEST_TAG_NAME = "LogoutRequest"; + + SamlLogoutRequestHandler(RealmConfig realmConfig, Clock clock, IdpConfiguration idp, SpConfiguration sp, TimeValue maxSkew) { + super(realmConfig, clock, idp, sp, maxSkew); + } + + /** + * Processes the provided LogoutRequest and extracts the NameID and SessionIndex. + * Returns these in a {@link SamlAttributes} object with an empty attributes list. + *

+ * The recommended binding for Logout (for maximum interoperability) is HTTP-Redirect. + * Under this binding the signature is applied to the query-string (including parameter + * names and url-encoded/base64-encoded/deflated values). Therefore in order to properly + * validate the signature, this method operates on a raw query- string. + * + * @throws ElasticsearchSecurityException If the SAML is invalid for this realm/configuration + */ + public Result parseFromQueryString(String queryString) { + final ParsedQueryString parsed = parseQueryStringAndValidateSignature(queryString); + + final Element root = parseSamlMessage(inflate(decodeBase64(parsed.samlRequest))); + if (REQUEST_TAG_NAME.equals(root.getLocalName()) && SAML_NAMESPACE.equals(root.getNamespaceURI())) { + try { + final LogoutRequest logoutRequest = buildXmlObject(root, LogoutRequest.class); + return parseLogout(logoutRequest, parsed.hasSignature == false, parsed.relayState); + } catch (ElasticsearchSecurityException e) { + logger.trace("Rejecting SAML logout request {} because {}", SamlUtils.toString(root), e.getMessage()); + throw e; + } + } else { + throw samlException("SAML content [{}] should have a root element of Namespace=[{}] Tag=[{}]", + root, SAML_NAMESPACE, REQUEST_TAG_NAME); + } + } + + private ParsedQueryString parseQueryStringAndValidateSignature(String queryString) { + final String signatureInput = queryString.replaceAll("&Signature=.*$", ""); + final Map parameters = new HashMap<>(); + RestUtils.decodeQueryString(queryString, 0, parameters); + final String samlRequest = parameters.get("SAMLRequest"); + if (samlRequest == null) { + throw samlException("Could not parse SAMLRequest from query string: [{}]", queryString); + } + + final String relayState = parameters.get("RelayState"); + final String signatureAlgorithm = parameters.get("SigAlg"); + final String signature = parameters.get("Signature"); + if (signature == null || signatureAlgorithm == null) { + return new ParsedQueryString(samlRequest, false, relayState); + } + + validateSignature(signatureInput, signatureAlgorithm, signature); + return new ParsedQueryString(samlRequest, true, relayState); + } + + private Result parseLogout(LogoutRequest logoutRequest, boolean requireSignature, String relayState) { + final Signature signature = logoutRequest.getSignature(); + if (signature == null) { + if (requireSignature) { + throw samlException("Logout request is not signed"); + } + } else { + validateSignature(signature); + } + + checkIssuer(logoutRequest.getIssuer(), logoutRequest); + checkDestination(logoutRequest); + validateNotOnOrAfter(logoutRequest.getNotOnOrAfter()); + + return new Result(logoutRequest.getID(), SamlNameId.fromXml(getNameID(logoutRequest)), getSessionIndex(logoutRequest), relayState); + } + + private void validateSignature(String inputString, String signatureAlgorithm, String signature) { + final byte[] sigBytes = decodeBase64(signature); + final byte[] inputBytes = inputString.getBytes(StandardCharsets.US_ASCII); + final String signatureText = Strings.cleanTruncate(signature, 32); + checkIdpSignature(credential -> { + if (XMLSigningUtil.verifyWithURI(credential, signatureAlgorithm, sigBytes, inputBytes)) { + logger.debug(() -> new ParameterizedMessage("SAML Signature [{}] matches credentials [{}] [{}]", + signatureText, credential.getEntityId(), credential.getPublicKey())); + return true; + } else { + logger.debug(() -> new ParameterizedMessage("SAML Signature [{}] failed against credentials [{}] [{}]", + signatureText, credential.getEntityId(), credential.getPublicKey())); + return false; + } + }, signatureText); + } + + private byte[] decodeBase64(String content) { + try { + return Base64.getDecoder().decode(content.replaceAll("\\s+", "")); + } catch (IllegalArgumentException e) { + logger.info("Failed to decode base64 string [{}] - {}", content, e.toString()); + throw samlException("SAML message cannot be Base64 decoded", e); + } + } + + private byte[] inflate(byte[] bytes) { + Inflater inflater = new Inflater(true); + try (ByteArrayInputStream in = new ByteArrayInputStream(bytes); + InflaterInputStream inflate = new InflaterInputStream(in, inflater); + ByteArrayOutputStream out = new ByteArrayOutputStream(bytes.length * 3 / 2)) { + Streams.copy(inflate, out); + return out.toByteArray(); + } catch (IOException e) { + throw samlException("SAML message cannot be inflated", e); + } + } + + private NameID getNameID(LogoutRequest logoutRequest) { + final NameID nameID = logoutRequest.getNameID(); + if (nameID == null) { + final EncryptedID encryptedID = logoutRequest.getEncryptedID(); + if (encryptedID != null) { + final SAMLObject samlObject = decrypt(encryptedID); + if (samlObject instanceof NameID) { + return (NameID) samlObject; + } + } + } + return nameID; + } + + private SAMLObject decrypt(EncryptedID encrypted) { + if (decrypter == null) { + throw samlException("SAML EncryptedID [" + text(encrypted, 32) + "] is encrypted, but no decryption key is available"); + } + try { + return decrypter.decrypt(encrypted); + } catch (DecryptionException e) { + logger.debug(() -> new ParameterizedMessage("Failed to decrypt SAML EncryptedID [{}] with [{}]", + text(encrypted, 512), describe(getSpConfiguration().getEncryptionCredentials())), e); + throw samlException("Failed to decrypt SAML EncryptedID " + text(encrypted, 32), e); + } + } + + private String getSessionIndex(LogoutRequest logoutRequest) { + return logoutRequest.getSessionIndexes() + .stream() + .map(as -> as.getSessionIndex()) + .filter(Objects::nonNull) + .findFirst() + .orElse(null); + } + + private void checkDestination(LogoutRequest request) { + final String url = getSpConfiguration().getLogoutUrl(); + if (url == null) { + throw samlException("SAML request " + request.getID() + " is for destination " + request.getDestination() + + " but this realm is not configured for logout"); + } + if (url.equals(request.getDestination()) == false) { + throw samlException("SAML request " + request.getID() + " is for destination " + request.getDestination() + + " but this realm uses " + url); + } + } + + static class ParsedQueryString { + final String samlRequest; + final boolean hasSignature; + final String relayState; + + ParsedQueryString(String samlRequest, boolean hasSignature, String relayState) { + this.samlRequest = samlRequest; + this.hasSignature = hasSignature; + this.relayState = relayState; + } + } + + public static class Result { + private final String requestId; + private final SamlNameId nameId; + private final String session; + private final String relayState; + + public Result(String requestId, SamlNameId nameId, String session, String relayState) { + this.requestId = requestId; + this.nameId = nameId; + this.session = session; + this.relayState = relayState; + } + + public String getRequestId() { + return requestId; + } + + public SamlNameId getNameId() { + return nameId; + } + + public String getSession() { + return session; + } + + public String getRelayState() { + return relayState; + } + + @Override + public String toString() { + return "SamlLogoutRequestHandler.Result{" + + "requestId='" + requestId + '\'' + + ", nameId=" + nameId + + ", session='" + session + '\'' + + ", relayState='" + relayState + '\'' + + '}'; + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestMessageBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestMessageBuilder.java new file mode 100644 index 0000000000000..1a25c5d9281c5 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestMessageBuilder.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.time.Clock; + +import org.elasticsearch.common.Strings; +import org.opensaml.saml.common.xml.SAMLConstants; +import org.opensaml.saml.saml2.core.Issuer; +import org.opensaml.saml.saml2.core.LogoutRequest; +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.saml.saml2.core.SessionIndex; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.SSODescriptor; + +/** + * Constructs {@code <LogoutRequest<} objects for use in a SAML Single-Sign-Out flow. + */ +class SamlLogoutRequestMessageBuilder extends SamlMessageBuilder { + private final NameID nameId; + private final String session; + + SamlLogoutRequestMessageBuilder(Clock clock, SpConfiguration serviceProvider, EntityDescriptor identityProvider, + NameID nameId, String session) { + super(identityProvider, serviceProvider, clock); + this.nameId = nameId; + this.session = session; + } + + LogoutRequest build() { + final String logoutUrl = getLogoutUrl(); + if (Strings.isNullOrEmpty(logoutUrl)) { + logger.debug("Cannot perform logout because the IDP {} does not provide a logout service", identityProvider.getEntityID()); + return null; + } + + final SessionIndex sessionIndex = SamlUtils.buildObject(SessionIndex.class, SessionIndex.DEFAULT_ELEMENT_NAME); + sessionIndex.setSessionIndex(session); + + final Issuer issuer = buildIssuer(); + + final LogoutRequest request = SamlUtils.buildObject(LogoutRequest.class, LogoutRequest.DEFAULT_ELEMENT_NAME); + request.setID(buildId()); + request.setIssueInstant(now()); + request.setDestination(logoutUrl); + request.setNameID(nameId); + request.getSessionIndexes().add(sessionIndex); + request.setIssuer(issuer); + return request; + } + + protected String getLogoutUrl() { + return getIdentityProviderEndpoint(SAMLConstants.SAML2_REDIRECT_BINDING_URI, SSODescriptor::getSingleLogoutServices); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutResponseBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutResponseBuilder.java new file mode 100644 index 0000000000000..6861ea90ed0c1 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutResponseBuilder.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.time.Clock; + +import org.elasticsearch.common.Strings; +import org.opensaml.saml.common.xml.SAMLConstants; +import org.opensaml.saml.saml2.core.LogoutResponse; +import org.opensaml.saml.saml2.core.Status; +import org.opensaml.saml.saml2.core.StatusCode; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.SSODescriptor; + +import static org.elasticsearch.xpack.security.authc.saml.SamlUtils.samlException; + +/** + * Constructs {@code <LogoutRespond<} objects for use in a SAML Single-Sign-Out flow. + */ +class SamlLogoutResponseBuilder extends SamlMessageBuilder { + private final String inResponseTo; + private final String statusValue; + + SamlLogoutResponseBuilder(Clock clock, SpConfiguration serviceProvider, EntityDescriptor identityProvider, + String inResponseTo, String statusValue) { + super(identityProvider, serviceProvider, clock); + this.inResponseTo = inResponseTo; + this.statusValue = statusValue; + } + + LogoutResponse build() { + final String destination = getLogoutUrl(); + if (Strings.isNullOrEmpty(destination)) { + throw samlException("Cannot send LogoutResponse because the IDP {} does not provide a logout service", + identityProvider.getEntityID()); + } + + final LogoutResponse res = SamlUtils.buildObject(LogoutResponse.class, LogoutResponse.DEFAULT_ELEMENT_NAME); + res.setID(buildId()); + res.setIssueInstant(now()); + res.setDestination(destination); + res.setIssuer(buildIssuer()); + res.setInResponseTo(inResponseTo); + + final Status status = SamlUtils.buildObject(Status.class, Status.DEFAULT_ELEMENT_NAME); + final StatusCode statusCode= SamlUtils.buildObject(StatusCode.class, StatusCode.DEFAULT_ELEMENT_NAME); + statusCode.setValue(this.statusValue); + status.setStatusCode(statusCode); + res.setStatus(status); + return res; + } + + protected String getLogoutUrl() { + return getIdentityProviderEndpoint(SAMLConstants.SAML2_REDIRECT_BINDING_URI, SSODescriptor::getSingleLogoutServices); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMessageBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMessageBuilder.java new file mode 100644 index 0000000000000..bdc8da0dfca12 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMessageBuilder.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.time.Clock; +import java.util.Collection; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.logging.Loggers; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.opensaml.saml.saml2.core.Issuer; +import org.opensaml.saml.saml2.metadata.Endpoint; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.IDPSSODescriptor; + +/** + * Abstract base class for object that build some sort of {@link org.opensaml.saml.common.SAMLObject} + */ +public abstract class SamlMessageBuilder { + + protected final Logger logger; + protected final Clock clock; + protected final SpConfiguration serviceProvider; + protected final EntityDescriptor identityProvider; + + public SamlMessageBuilder(EntityDescriptor identityProvider, SpConfiguration serviceProvider, Clock clock) { + this.logger = Loggers.getLogger(getClass()); + this.identityProvider = identityProvider; + this.serviceProvider = serviceProvider; + this.clock = clock; + } + + protected String getIdentityProviderEndpoint(String binding, + Function> selector) { + final List locations = identityProvider.getRoleDescriptors(IDPSSODescriptor.DEFAULT_ELEMENT_NAME).stream() + .map(rd -> (IDPSSODescriptor) rd) + .flatMap(idp -> selector.apply(idp).stream()) + .filter(endp -> binding.equals(endp.getBinding())) + .map(sso -> sso.getLocation()) + .collect(Collectors.toList()); + if (locations.isEmpty()) { + return null; + } + if (locations.size() > 1) { + throw new ElasticsearchException("Found multiple locations for binding [{}] in descriptor [{}] - [{}]", + binding, identityProvider.getID(), locations); + } + return locations.get(0); + } + + protected DateTime now() { + return new DateTime(clock.millis(), DateTimeZone.UTC); + } + + protected Issuer buildIssuer() { + Issuer issuer = SamlUtils.buildObject(Issuer.class, Issuer.DEFAULT_ELEMENT_NAME); + issuer.setValue(this.serviceProvider.getEntityId()); + return issuer; + } + + protected String buildId() { + // 20 bytes (160 bits) of randomness as recommended by the SAML spec + return SamlUtils.generateSecureNCName(20); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java new file mode 100644 index 0000000000000..ce63a7bd5485e --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java @@ -0,0 +1,491 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.Key; +import java.security.PrivateKey; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.SuppressForbidden; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.LocaleUtils; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; +import org.elasticsearch.xpack.core.ssl.CertUtils; +import org.elasticsearch.xpack.security.authc.saml.SamlSpMetadataBuilder.ContactInfo; +import org.opensaml.core.xml.config.XMLObjectProviderRegistrySupport; +import org.opensaml.core.xml.io.MarshallingException; +import org.opensaml.saml.saml2.core.AuthnRequest; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.impl.EntityDescriptorMarshaller; +import org.opensaml.security.credential.Credential; +import org.opensaml.security.x509.BasicX509Credential; +import org.opensaml.xmlsec.signature.Signature; +import org.opensaml.xmlsec.signature.support.SignatureConstants; +import org.opensaml.xmlsec.signature.support.Signer; +import org.w3c.dom.Element; +import org.xml.sax.SAXException; + +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getRealmType; +import static org.elasticsearch.xpack.security.authc.saml.SamlRealm.require; + +/** + * CLI tool to generate SAML Metadata for a Service Provider (realm) + */ +public class SamlMetadataCommand extends EnvironmentAwareCommand { + + static final String METADATA_SCHEMA = "saml-schema-metadata-2.0.xsd"; + + private final OptionSpec outputPathSpec; + private final OptionSpec batchSpec; + private final OptionSpec realmSpec; + private final OptionSpec localeSpec; + private final OptionSpec serviceNameSpec; + private final OptionSpec attributeSpec; + private final OptionSpec orgNameSpec; + private final OptionSpec orgDisplayNameSpec; + private final OptionSpec orgUrlSpec; + private final OptionSpec contactsSpec; + private final OptionSpec signingPkcs12PathSpec; + private final OptionSpec signingCertPathSpec; + private final OptionSpec signingKeyPathSpec; + private final OptionSpec keyPasswordSpec; + private final CheckedFunction keyStoreFunction; + private KeyStoreWrapper keyStoreWrapper; + + public static void main(String[] args) throws Exception { + new SamlMetadataCommand().main(args, Terminal.DEFAULT); + } + + public SamlMetadataCommand() { + this((environment) -> { + KeyStoreWrapper ksWrapper = KeyStoreWrapper.load(environment.configFile()); + return ksWrapper; + }); + } + + public SamlMetadataCommand(CheckedFunction keyStoreFunction) { + super("Generate Service Provider Metadata for a SAML realm"); + outputPathSpec = parser.accepts("out", "path of the xml file that should be generated").withRequiredArg(); + batchSpec = parser.accepts("batch", "Do not prompt"); + realmSpec = parser.accepts("realm", "name of the elasticsearch realm for which metadata should be generated").withRequiredArg(); + localeSpec = parser.accepts("locale", "the locale to be used for elements that require a language").withRequiredArg(); + serviceNameSpec = parser.accepts("service-name", "the name to apply to the attribute consuming service").withRequiredArg(); + attributeSpec = parser.accepts("attribute", "additional SAML attributes to request").withRequiredArg(); + orgNameSpec = parser.accepts("organisation-name", "the name of the organisation operating this service").withRequiredArg(); + orgDisplayNameSpec = parser.accepts("organisation-display-name", "the display-name of the organisation operating this service") + .availableIf(orgNameSpec).withRequiredArg(); + orgUrlSpec = parser.accepts("organisation-url", "the URL of the organisation operating this service") + .requiredIf(orgNameSpec).withRequiredArg(); + contactsSpec = parser.accepts("contacts", "Include contact information in metadata").availableUnless(batchSpec); + signingPkcs12PathSpec = parser.accepts("signing-bundle", "path to an existing key pair (in PKCS#12 format) to be used for " + + "signing ") + .withRequiredArg(); + signingCertPathSpec = parser.accepts("signing-cert", "path to an existing signing certificate") + .availableUnless(signingPkcs12PathSpec) + .withRequiredArg(); + signingKeyPathSpec = parser.accepts("signing-key", "path to an existing signing private key") + .availableIf(signingCertPathSpec) + .requiredIf(signingCertPathSpec) + .withRequiredArg(); + keyPasswordSpec = parser.accepts("signing-key-password", "password for an existing signing private key or keypair") + .withOptionalArg(); + this.keyStoreFunction = keyStoreFunction; + } + + @Override + public void close() throws IOException { + super.close(); + if (keyStoreWrapper != null) { + keyStoreWrapper.close(); + } + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + // OpenSAML prints a lot of _stuff_ at info level, that really isn't needed in a command line tool. + Loggers.setLevel(Loggers.getLogger("org.opensaml"), Level.WARN); + + final Logger logger = Loggers.getLogger(getClass()); + SamlUtils.initialize(logger); + + final EntityDescriptor descriptor = buildEntityDescriptor(terminal, options, env); + Element element = possiblySignDescriptor(terminal, options, descriptor, env); + + final Path xml = writeOutput(terminal, options, element); + validateXml(terminal, xml); + } + + // package-protected for testing + EntityDescriptor buildEntityDescriptor(Terminal terminal, OptionSet options, Environment env) throws Exception { + final boolean batch = options.has(batchSpec); + + final RealmConfig realm = findRealm(terminal, options, env); + terminal.println(Terminal.Verbosity.VERBOSE, + "Using realm configuration\n=====\n" + realm.settings().toDelimitedString('\n') + "====="); + final Locale locale = findLocale(options); + terminal.println(Terminal.Verbosity.VERBOSE, "Using locale: " + locale.toLanguageTag()); + + final SpConfiguration spConfig = SamlRealm.getSpConfiguration(realm); + final SamlSpMetadataBuilder builder = new SamlSpMetadataBuilder(locale, spConfig.getEntityId()) + .assertionConsumerServiceUrl(spConfig.getAscUrl()) + .singleLogoutServiceUrl(spConfig.getLogoutUrl()) + .encryptionCredentials(spConfig.getEncryptionCredentials()) + .signingCredential(spConfig.getSigningConfiguration().getCredential()) + .authnRequestsSigned(spConfig.getSigningConfiguration().shouldSign(AuthnRequest.DEFAULT_ELEMENT_LOCAL_NAME)) + .nameIdFormat(SamlRealmSettings.NAMEID_FORMAT.get(realm.settings())) + .serviceName(option(serviceNameSpec, options, env.settings().get("cluster.name"))); + + Map attributes = getAttributeNames(options, realm); + for (String attr : attributes.keySet()) { + final String name; + String friendlyName; + final String settingName = attributes.get(attr); + final String attributeSource = settingName == null ? "command line" : '"' + settingName + '"'; + if (attr.contains(":")) { + name = attr; + if (batch) { + friendlyName = settingName; + } else { + friendlyName = terminal.readText("What is the friendly name for " + + attributeSource + + " attribute \"" + attr + "\" [default: " + + (settingName == null ? "none" : settingName) + + "] "); + if (Strings.isNullOrEmpty(friendlyName)) { + friendlyName = settingName; + } + } + } else { + if (batch) { + throw new UserException(ExitCodes.CONFIG, "Option " + batchSpec.toString() + " is specified, but attribute " + + attr + " appears to be a FriendlyName value"); + } + friendlyName = attr; + name = requireText(terminal, + "What is the standard (urn) name for " + attributeSource + " attribute \"" + attr + "\" (required): "); + } + terminal.println(Terminal.Verbosity.VERBOSE, "Requesting attribute '" + name + "' (FriendlyName: '" + friendlyName + "')"); + builder.withAttribute(friendlyName, name); + } + + if (options.has(orgNameSpec) && options.has(orgUrlSpec)) { + String name = orgNameSpec.value(options); + builder.organization(name, option(orgDisplayNameSpec, options, name), orgUrlSpec.value(options)); + } + + if (options.has(contactsSpec)) { + terminal.println("\nPlease enter the personal details for each contact to be included in the metadata"); + do { + final String givenName = requireText(terminal, "What is the given name for the contact: "); + final String surName = requireText(terminal, "What is the surname for the contact: "); + final String displayName = givenName + ' ' + surName; + final String email = requireText(terminal, "What is the email address for " + displayName + ": "); + String type; + while (true) { + type = requireText(terminal, "What is the contact type for " + displayName + ": "); + if (ContactInfo.TYPES.containsKey(type)) { + break; + } else { + terminal.println("Type '" + type + "' is not valid. Valid values are " + + Strings.collectionToCommaDelimitedString(ContactInfo.TYPES.keySet())); + } + } + builder.withContact(type, givenName, surName, email); + } while (terminal.promptYesNo("Enter details for another contact", true)); + } + + return builder.build(); + } + + // package-protected for testing + Element possiblySignDescriptor(Terminal terminal, OptionSet options, EntityDescriptor descriptor, Environment env) + throws UserException { + try { + final EntityDescriptorMarshaller marshaller = new EntityDescriptorMarshaller(); + if (options.has(signingPkcs12PathSpec) || (options.has(signingCertPathSpec) && options.has(signingKeyPathSpec))) { + Signature signature = (Signature) XMLObjectProviderRegistrySupport.getBuilderFactory() + .getBuilder(Signature.DEFAULT_ELEMENT_NAME) + .buildObject(Signature.DEFAULT_ELEMENT_NAME); + signature.setSigningCredential(buildSigningCredential(terminal, options, env)); + signature.setSignatureAlgorithm(SignatureConstants.ALGO_ID_SIGNATURE_RSA_SHA256); + signature.setCanonicalizationAlgorithm(SignatureConstants.ALGO_ID_C14N_EXCL_OMIT_COMMENTS); + descriptor.setSignature(signature); + Element element = marshaller.marshall(descriptor); + Signer.signObject(signature); + return element; + } else { + return marshaller.marshall(descriptor); + } + } catch (Exception e) { + String errorMessage; + if (e instanceof MarshallingException) { + errorMessage = "Error serializing Metadata to file"; + } else if (e instanceof org.opensaml.xmlsec.signature.support.SignatureException) { + errorMessage = "Error attempting to sign Metadata"; + } else { + errorMessage = "Error building signing credentials from provided keyPair"; + } + terminal.println(Terminal.Verbosity.SILENT, errorMessage); + terminal.println("The following errors were found:"); + printExceptions(terminal, e); + throw new UserException(ExitCodes.CANT_CREATE, "Unable to create metadata document"); + } + } + + private Path writeOutput(Terminal terminal, OptionSet options, Element element) throws Exception { + final Path outputFile = resolvePath(option(outputPathSpec, options, "saml-elasticsearch-metadata.xml")); + final Writer writer = Files.newBufferedWriter(outputFile); + SamlUtils.print(element, writer, true); + terminal.println("\nWrote SAML metadata to " + outputFile); + return outputFile; + } + + private Credential buildSigningCredential(Terminal terminal, OptionSet options, Environment env) throws + Exception { + X509Certificate signingCertificate; + PrivateKey signingKey; + char[] password = getChars(keyPasswordSpec.value(options)); + if (options.has(signingPkcs12PathSpec)) { + Path p12Path = resolvePath(signingPkcs12PathSpec.value(options)); + Map keys = withPassword("certificate bundle (" + p12Path + ")", password, + terminal, keyPassword -> CertUtils.readPkcs12KeyPairs(p12Path, keyPassword, a -> keyPassword, env)); + + if (keys.size() != 1) { + throw new IllegalArgumentException("expected a single key in file [" + p12Path.toAbsolutePath() + "] but found [" + + keys.size() + "]"); + } + final Map.Entry pair = keys.entrySet().iterator().next(); + signingCertificate = (X509Certificate) pair.getKey(); + signingKey = (PrivateKey) pair.getValue(); + } else { + Path cert = resolvePath(signingCertPathSpec.value(options)); + Path key = resolvePath(signingKeyPathSpec.value(options)); + final String resolvedSigningCertPath = cert.toAbsolutePath().toString(); + Certificate[] certificates = CertUtils.readCertificates(Collections.singletonList(resolvedSigningCertPath), env); + if (certificates.length != 1) { + throw new IllegalArgumentException("expected a single certificate in file [" + resolvedSigningCertPath + "] but found [" + + certificates.length + "]"); + } + signingCertificate = (X509Certificate) certificates[0]; + signingKey = readSigningKey(key, password, terminal); + } + return new BasicX509Credential(signingCertificate, signingKey); + } + + private static T withPassword(String description, char[] password, Terminal terminal, + CheckedFunction body) throws E { + if (password == null) { + char[] promptedValue = terminal.readSecret("Enter password for " + description + " : "); + try { + return body.apply(promptedValue); + } finally { + Arrays.fill(promptedValue, (char) 0); + } + } else { + return body.apply(password); + } + } + + private static char[] getChars(String password) { + return password == null ? null : password.toCharArray(); + } + + private static PrivateKey readSigningKey(Path path, char[] password, Terminal terminal) + throws Exception { + AtomicReference passwordReference = new AtomicReference<>(password); + try (Reader reader = Files.newBufferedReader(path, StandardCharsets.UTF_8)) { + return CertUtils.readPrivateKey(reader, () -> { + if (password != null) { + return password; + } + char[] promptedValue = terminal.readSecret("Enter password for the signing key (" + path.getFileName() + ") : "); + passwordReference.set(promptedValue); + return promptedValue; + }); + } finally { + if (passwordReference.get() != null) { + Arrays.fill(passwordReference.get(), (char) 0); + } + } + } + private void validateXml(Terminal terminal, Path xml) throws Exception { + try (InputStream xmlInput = Files.newInputStream(xml)) { + SamlUtils.validate(xmlInput, METADATA_SCHEMA); + terminal.println(Terminal.Verbosity.VERBOSE, "The generated metadata file conforms to the SAML metadata schema"); + } catch (SAXException e) { + terminal.println(Terminal.Verbosity.SILENT, "Error - The generated metadata file does not conform to the SAML metadata schema"); + terminal.println("While validating " + xml.toString() + " the follow errors were found:"); + printExceptions(terminal, e); + throw new UserException(ExitCodes.CODE_ERROR, "Generated metadata is not valid"); + } + } + + private void printExceptions(Terminal terminal, Throwable throwable) { + terminal.println(" - " + throwable.getMessage()); + for (Throwable sup : throwable.getSuppressed()) { + printExceptions(terminal, sup); + } + if (throwable.getCause() != null && throwable.getCause() != throwable) { + printExceptions(terminal, throwable.getCause()); + } + } + + @SuppressForbidden(reason = "CLI tool working from current directory") + private Path resolvePath(String name) { + return PathUtils.get(name).normalize(); + } + + private String requireText(Terminal terminal, String prompt) { + String value = null; + while (Strings.isNullOrEmpty(value)) { + value = terminal.readText(prompt); + } + return value; + } + + private T option(OptionSpec spec, OptionSet options, T defaultValue) { + if (options.has(spec)) { + return spec.value(options); + } else { + return defaultValue; + } + } + + /** + * Map of saml-attribute name to configuration-setting name + */ + private Map getAttributeNames(OptionSet options, RealmConfig realm) { + Map attributes = new LinkedHashMap<>(); + for (String a : attributeSpec.values(options)) { + attributes.put(a, null); + } + final Settings attributeSettings = realm.settings().getByPrefix(SamlRealmSettings.AttributeSetting.ATTRIBUTES_PREFIX); + for (String key : sorted(attributeSettings.keySet())) { + final String attr = attributeSettings.get(key); + attributes.put(attr, key); + } + return attributes; + } + + // We sort this Set so that it is deterministic for testing + private SortedSet sorted(Set strings) { + return new TreeSet<>(strings); + } + + private RealmConfig findRealm(Terminal terminal, OptionSet options, Environment env) throws UserException, IOException, Exception { + + keyStoreWrapper = keyStoreFunction.apply(env); + final Settings settings; + if (keyStoreWrapper != null) { + // TODO: We currently do not support keystore passwords + keyStoreWrapper.decrypt(new char[0]); + + final Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(env.settings(), true); + if (settingsBuilder.getSecureSettings() == null) { + settingsBuilder.setSecureSettings(keyStoreWrapper); + } + settings = settingsBuilder.build(); + } else { + settings = env.settings(); + } + + final Map realms = RealmSettings.getRealmSettings(settings); + if (options.has(realmSpec)) { + final String name = realmSpec.value(options); + final Settings realmSettings = realms.get(name); + if (realmSettings == null) { + throw new UserException(ExitCodes.CONFIG, "No such realm '" + name + "' defined in " + env.configFile()); + } + final String realmType = getRealmType(realmSettings); + if (isSamlRealm(realmType)) { + return buildRealm(name, realmSettings, env); + } else { + throw new UserException(ExitCodes.CONFIG, "Realm '" + name + "' is not a SAML realm (is '" + realmType + "')"); + } + } else { + final List> saml = realms.entrySet().stream() + .filter(entry -> isSamlRealm(getRealmType(entry.getValue()))) + .collect(Collectors.toList()); + if (saml.isEmpty()) { + throw new UserException(ExitCodes.CONFIG, "There is no SAML realm configured in " + env.configFile()); + } + if (saml.size() > 1) { + terminal.println("Using configuration in " + env.configFile()); + terminal.println("Found multiple SAML realms: " + saml.stream().map(Map.Entry::getKey).collect(Collectors.joining(", "))); + terminal.println("Use the -" + optionName(realmSpec) + " option to specify an explicit realm"); + throw new UserException(ExitCodes.CONFIG, + "Found multiple SAML realms, please specify one with '-" + optionName(realmSpec) + "'"); + } + final Map.Entry entry = saml.get(0); + terminal.println("Building metadata for SAML realm " + entry.getKey()); + return buildRealm(entry.getKey(), entry.getValue(), env); + } + } + + private String optionName(OptionSpec spec) { + return spec.options().get(0); + } + + private RealmConfig buildRealm(String name, Settings settings, Environment env) { + return new RealmConfig(name, settings, env.settings(), env, new ThreadContext(env.settings())); + } + + private boolean isSamlRealm(String realmType) { + return SamlRealmSettings.TYPE.equals(realmType); + } + + private Locale findLocale(OptionSet options) { + if (options.has(localeSpec)) { + return LocaleUtils.parse(localeSpec.value(options)); + } else { + return Locale.getDefault(); + } + } + + // For testing + OptionParser getParser() { + return parser; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlNameId.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlNameId.java new file mode 100644 index 0000000000000..35ff8f879615e --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlNameId.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.saml.saml2.core.Subject; + +/** + * Lightweight (non-XML) representation of a SAML {@code NameID} element + */ +public class SamlNameId { + final String format; + final String value; + final String idpNameQualifier; + final String spNameQualifier; + final String spProvidedId; + + public SamlNameId(String format, String value, String idpNameQualifier, String spNameQualifier, String spProvidedId) { + this.format = format; + this.value = value; + this.idpNameQualifier = idpNameQualifier; + this.spNameQualifier = spNameQualifier; + this.spProvidedId = spProvidedId; + } + + @Override + public String toString() { + return "NameId(" + format + ")=" + value; + } + + public NameID asXml() { + final NameID nameId = SamlUtils.buildObject(NameID.class, NameID.DEFAULT_ELEMENT_NAME); + nameId.setFormat(format); + nameId.setValue(value); + nameId.setNameQualifier(idpNameQualifier); + nameId.setSPNameQualifier(spNameQualifier); + nameId.setSPProvidedID(spProvidedId); + return nameId; + } + + static SamlNameId fromXml(NameID name) { + if (name == null) { + return null; + } + return new SamlNameId(name.getFormat(), name.getValue(), name.getNameQualifier(), + name.getSPNameQualifier(), name.getSPProvidedID()); + } + + static SamlNameId forSubject(Subject subject) { + if (subject == null) { + return null; + } + final NameID name = subject.getNameID(); + return fromXml(name); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java new file mode 100644 index 0000000000000..d7d231af68002 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java @@ -0,0 +1,754 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import net.shibboleth.utilities.java.support.component.ComponentInitializationException; +import net.shibboleth.utilities.java.support.resolver.CriteriaSet; +import net.shibboleth.utilities.java.support.resolver.ResolverException; +import net.shibboleth.utilities.java.support.xml.BasicParserPool; +import org.apache.http.client.HttpClient; +import org.apache.http.conn.ssl.DefaultHostnameVerifier; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.watcher.FileChangesListener; +import org.elasticsearch.watcher.FileWatcher; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.ssl.CertUtils; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.X509KeyPairSettings; +import org.elasticsearch.xpack.security.authc.Realms; +import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.opensaml.core.criterion.EntityIdCriterion; +import org.opensaml.saml.common.xml.SAMLConstants; +import org.opensaml.saml.criterion.EntityRoleCriterion; +import org.opensaml.saml.metadata.resolver.MetadataResolver; +import org.opensaml.saml.metadata.resolver.impl.AbstractReloadingMetadataResolver; +import org.opensaml.saml.metadata.resolver.impl.FilesystemMetadataResolver; +import org.opensaml.saml.metadata.resolver.impl.HTTPMetadataResolver; +import org.opensaml.saml.metadata.resolver.impl.PredicateRoleDescriptorResolver; +import org.opensaml.saml.saml2.core.AuthnRequest; +import org.opensaml.saml.saml2.core.LogoutRequest; +import org.opensaml.saml.saml2.core.LogoutResponse; +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.saml.saml2.core.StatusCode; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.IDPSSODescriptor; +import org.opensaml.saml.security.impl.MetadataCredentialResolver; +import org.opensaml.security.credential.Credential; +import org.opensaml.security.credential.UsageType; +import org.opensaml.security.criteria.UsageCriterion; +import org.opensaml.security.x509.X509Credential; +import org.opensaml.security.x509.impl.X509KeyManagerX509CredentialAdapter; +import org.opensaml.xmlsec.keyinfo.impl.BasicProviderKeyInfoCredentialResolver; +import org.opensaml.xmlsec.keyinfo.impl.provider.InlineX509DataProvider; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.X509KeyManager; +import java.io.IOException; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.GeneralSecurityException; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.CLOCK_SKEW; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.DN_ATTRIBUTE; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.ENCRYPTION_KEY_ALIAS; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.ENCRYPTION_SETTINGS; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.FORCE_AUTHN; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.GROUPS_ATTRIBUTE; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.IDP_ENTITY_ID; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.IDP_METADATA_HTTP_REFRESH; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.IDP_METADATA_PATH; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.IDP_SINGLE_LOGOUT; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.MAIL_ATTRIBUTE; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.NAMEID_ALLOW_CREATE; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.NAMEID_FORMAT; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.NAMEID_SP_QUALIFIER; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.NAME_ATTRIBUTE; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.POPULATE_USER_METADATA; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.PRINCIPAL_ATTRIBUTE; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_KEY_ALIAS; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_MESSAGE_TYPES; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SIGNING_SETTINGS; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SP_ACS; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SP_ENTITY_ID; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.SP_LOGOUT; +import static org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings.TYPE; + +/** + * This class is {@link Releasable} because it uses a library that thinks timers and timer tasks + * are still cool and no chance to opt out + */ +public final class SamlRealm extends Realm implements Releasable { + + public static final String USER_METADATA_NAMEID_VALUE = "saml_" + SamlAttributes.NAMEID_SYNTHENTIC_ATTRIBUTE; + public static final String USER_METADATA_NAMEID_FORMAT = USER_METADATA_NAMEID_VALUE + "_format"; + + public static final String CONTEXT_TOKEN_DATA = "_xpack_saml_tokendata"; + public static final String TOKEN_METADATA_NAMEID_VALUE = "saml_nameid_val"; + public static final String TOKEN_METADATA_NAMEID_FORMAT = "saml_nameid_fmt"; + public static final String TOKEN_METADATA_NAMEID_QUALIFIER = "saml_nameid_qual"; + public static final String TOKEN_METADATA_NAMEID_SP_QUALIFIER = "saml_nameid_sp_qual"; + public static final String TOKEN_METADATA_NAMEID_SP_PROVIDED_ID = "saml_nameid_sp_id"; + public static final String TOKEN_METADATA_SESSION = "saml_session"; + public static final String TOKEN_METADATA_REALM = "saml_realm"; + // Although we only use this for IDP metadata loading, the SSLServer only loads configurations where "ssl." is a top-level element + // in the realm group configuration, so it has to have this name. + + private final List releasables; + + private final SamlAuthenticator authenticator; + private final SamlLogoutRequestHandler logoutHandler; + private final UserRoleMapper roleMapper; + + private final Supplier idpDescriptor; + + private final SpConfiguration serviceProvider; + private final SamlAuthnRequestBuilder.NameIDPolicySettings nameIdPolicy; + private final Boolean forceAuthn; + private final boolean useSingleLogout; + private final Boolean populateUserMetadata; + + private final AttributeParser principalAttribute; + private final AttributeParser groupsAttribute; + private final AttributeParser dnAttribute; + private final AttributeParser nameAttribute; + private final AttributeParser mailAttribute; + + + /** + * Factory for SAML realm. + * This is not a constructor as it needs to initialise a number of components before delegating to + * {@link #SamlRealm} + */ + public static SamlRealm create(RealmConfig config, SSLService sslService, ResourceWatcherService watcherService, + UserRoleMapper roleMapper) throws Exception { + final Logger logger = config.logger(SamlRealm.class); + SamlUtils.initialize(logger); + + if (TokenService.isTokenServiceEnabled(config.globalSettings()) == false) { + throw new IllegalStateException("SAML requires that the token service be enabled (" + + XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey() + ")"); + } + + final Tuple> tuple + = initializeResolver(logger, config, sslService, watcherService); + final AbstractReloadingMetadataResolver metadataResolver = tuple.v1(); + final Supplier idpDescriptor = tuple.v2(); + + final SpConfiguration serviceProvider = getSpConfiguration(config); + + final Clock clock = Clock.systemUTC(); + final IdpConfiguration idpConfiguration = getIdpConfiguration(config, metadataResolver, idpDescriptor); + final TimeValue maxSkew = CLOCK_SKEW.get(config.settings()); + final SamlAuthenticator authenticator = new SamlAuthenticator(config, clock, idpConfiguration, serviceProvider, maxSkew); + final SamlLogoutRequestHandler logoutHandler = + new SamlLogoutRequestHandler(config, clock, idpConfiguration, serviceProvider, maxSkew); + + final SamlRealm realm = new SamlRealm(config, roleMapper, authenticator, logoutHandler, idpDescriptor, serviceProvider); + + // the metadata resolver needs to be destroyed since it runs a timer task in the background and destroying stops it! + realm.releasables.add(() -> metadataResolver.destroy()); + + return realm; + } + + // For testing + SamlRealm(RealmConfig config, UserRoleMapper roleMapper, SamlAuthenticator authenticator, SamlLogoutRequestHandler logoutHandler, + Supplier idpDescriptor, SpConfiguration spConfiguration) throws Exception { + super(TYPE, config); + + this.roleMapper = roleMapper; + this.authenticator = authenticator; + this.logoutHandler = logoutHandler; + + this.idpDescriptor = idpDescriptor; + this.serviceProvider = spConfiguration; + + this.nameIdPolicy = new SamlAuthnRequestBuilder.NameIDPolicySettings(require(config, NAMEID_FORMAT), + NAMEID_ALLOW_CREATE.get(config.settings()), NAMEID_SP_QUALIFIER.get(config.settings())); + this.forceAuthn = FORCE_AUTHN.exists(config.settings()) ? FORCE_AUTHN.get(config.settings()) : null; + this.useSingleLogout = IDP_SINGLE_LOGOUT.get(config.settings()); + this.populateUserMetadata = POPULATE_USER_METADATA.get(config.settings()); + this.principalAttribute = AttributeParser.forSetting(logger, PRINCIPAL_ATTRIBUTE, config, true); + + this.groupsAttribute = AttributeParser.forSetting(logger, GROUPS_ATTRIBUTE, config, false); + this.dnAttribute = AttributeParser.forSetting(logger, DN_ATTRIBUTE, config, false); + this.nameAttribute = AttributeParser.forSetting(logger, NAME_ATTRIBUTE, config, false); + this.mailAttribute = AttributeParser.forSetting(logger, MAIL_ATTRIBUTE, config, false); + + this.releasables = new ArrayList<>(); + } + + static String require(RealmConfig config, Setting setting) { + final String value = setting.get(config.settings()); + if (value.isEmpty()) { + throw new IllegalArgumentException("The configuration setting [" + RealmSettings.getFullSettingKey(config, setting) + + "] is required"); + } + return value; + } + + private static IdpConfiguration getIdpConfiguration(RealmConfig config, MetadataResolver metadataResolver, + Supplier idpDescriptor) { + final MetadataCredentialResolver resolver = new MetadataCredentialResolver(); + + final PredicateRoleDescriptorResolver roleDescriptorResolver = new PredicateRoleDescriptorResolver(metadataResolver); + resolver.setRoleDescriptorResolver(roleDescriptorResolver); + + final InlineX509DataProvider keyInfoProvider = new InlineX509DataProvider(); + resolver.setKeyInfoCredentialResolver(new BasicProviderKeyInfoCredentialResolver(Collections.singletonList(keyInfoProvider))); + + try { + roleDescriptorResolver.initialize(); + resolver.initialize(); + } catch (ComponentInitializationException e) { + throw new IllegalStateException("Cannot initialise SAML IDP resolvers for realm " + config.name(), e); + } + + final String entityID = idpDescriptor.get().getEntityID(); + return new IdpConfiguration(entityID, () -> { + try { + final Iterable credentials = resolver.resolve(new CriteriaSet( + new EntityIdCriterion(entityID), + new EntityRoleCriterion(IDPSSODescriptor.DEFAULT_ELEMENT_NAME), + new UsageCriterion(UsageType.SIGNING))); + return CollectionUtils.iterableAsArrayList(credentials); + } catch (ResolverException e) { + throw new IllegalStateException("Cannot resolve SAML IDP credentials resolver for realm " + config.name(), e); + } + }); + } + + static SpConfiguration getSpConfiguration(RealmConfig config) throws IOException, GeneralSecurityException { + final String serviceProviderId = require(config, SP_ENTITY_ID); + final String assertionConsumerServiceURL = require(config, SP_ACS); + final String logoutUrl = SP_LOGOUT.get(config.settings()); + return new SpConfiguration(serviceProviderId, assertionConsumerServiceURL, + logoutUrl, buildSigningConfiguration(config), buildEncryptionCredential(config)); + } + + + // Package-private for testing + static List buildEncryptionCredential(RealmConfig config) throws IOException, GeneralSecurityException { + return buildCredential(config, ENCRYPTION_SETTINGS, ENCRYPTION_KEY_ALIAS, true); + } + + static SigningConfiguration buildSigningConfiguration(RealmConfig config) throws IOException, GeneralSecurityException { + final List credentials = buildCredential(config, SIGNING_SETTINGS, SIGNING_KEY_ALIAS, false); + + if (credentials == null || credentials.isEmpty()) { + if (SIGNING_MESSAGE_TYPES.exists(config.settings())) { + throw new IllegalArgumentException("The setting [" + RealmSettings.getFullSettingKey(config, SIGNING_MESSAGE_TYPES) + + "] cannot be specified if there are no signing credentials"); + } else { + return new SigningConfiguration(Collections.emptySet(), null); + } + } else { + final List types = SIGNING_MESSAGE_TYPES.get(config.settings()); + return new SigningConfiguration(Sets.newHashSet(types), credentials.get(0)); + } + } + + private static List buildCredential(RealmConfig config, X509KeyPairSettings keyPairSettings, + Setting aliasSetting, final boolean allowMultiple) { + final X509KeyManager keyManager = CertUtils.getKeyManager(keyPairSettings, config.settings(), null, config.env()); + if (keyManager == null) { + return null; + } + + final Set aliases = new HashSet<>(); + final String configuredAlias = aliasSetting.get(config.settings()); + if (Strings.isNullOrEmpty(configuredAlias)) { + + final String[] serverAliases = keyManager.getServerAliases("RSA", null); + if (serverAliases != null) { + aliases.addAll(Arrays.asList(serverAliases)); + } + + if (aliases.isEmpty()) { + throw new IllegalArgumentException( + "The configured key store for " + RealmSettings.getFullSettingKey(config, keyPairSettings.getPrefix()) + + " does not contain any RSA key pairs"); + } else if (allowMultiple == false && aliases.size() > 1) { + throw new IllegalArgumentException( + "The configured key store for " + RealmSettings.getFullSettingKey(config, keyPairSettings.getPrefix()) + + " has multiple keys but no alias has been specified (from setting " + + RealmSettings.getFullSettingKey(config, aliasSetting) + ")"); + } + } else { + aliases.add(configuredAlias); + } + + final List credentials = new ArrayList<>(); + for (String alias : aliases) { + if (keyManager.getPrivateKey(alias) == null) { + throw new IllegalArgumentException( + "The configured key store for " + RealmSettings.getFullSettingKey(config, keyPairSettings.getPrefix()) + + " does not have a key associated with alias [" + alias + "] " + + ((Strings.isNullOrEmpty(configuredAlias) == false) + ? "(from setting " + RealmSettings.getFullSettingKey(config, aliasSetting) + ")" + : "")); + } + + final String keyType = keyManager.getPrivateKey(alias).getAlgorithm(); + if (keyType.equals("RSA") == false) { + throw new IllegalArgumentException("The key associated with alias [" + alias + "] " + "(from setting " + + RealmSettings.getFullSettingKey(config, aliasSetting) + ") uses unsupported key algorithm type [" + keyType + + "], only RSA is supported"); + } + credentials.add(new X509KeyManagerX509CredentialAdapter(keyManager, alias)); + } + + return credentials; + } + + public static List findSamlRealms(Realms realms, String realmName, String acsUrl) { + Stream stream = realms.stream().filter(r -> r instanceof SamlRealm).map(r -> (SamlRealm) r); + if (Strings.hasText(realmName)) { + stream = stream.filter(r -> realmName.equals(r.name())); + } + if (Strings.hasText(acsUrl)) { + stream = stream.filter(r -> acsUrl.equals(r.assertionConsumerServiceURL())); + } + return stream.collect(Collectors.toList()); + } + + @Override + public boolean supports(AuthenticationToken token) { + return token instanceof SamlToken; + } + + /** + * Always returns {@code null} as there is no support for reading a SAML token out of a request + * + * @see org.elasticsearch.xpack.security.action.saml.TransportSamlAuthenticateAction + */ + @Override + public AuthenticationToken token(ThreadContext threadContext) { + return null; + } + + @Override + public void authenticate(AuthenticationToken authenticationToken, ActionListener listener) { + if (authenticationToken instanceof SamlToken) { + try { + final SamlToken token = (SamlToken) authenticationToken; + final SamlAttributes attributes = authenticator.authenticate(token); + logger.debug("Parsed token [{}] to attributes [{}]", token, attributes); + buildUser(attributes, listener); + } catch (ElasticsearchSecurityException e) { + if (SamlUtils.isSamlException(e)) { + listener.onResponse(AuthenticationResult.unsuccessful("Provided SAML response is not valid for realm " + this, e)); + } else { + listener.onFailure(e); + } + } + } else { + listener.onResponse(AuthenticationResult.notHandled()); + } + } + + private void buildUser(SamlAttributes attributes, ActionListener listener) { + final String principal = resolveSingleValueAttribute(attributes, principalAttribute, PRINCIPAL_ATTRIBUTE.name()); + if (Strings.isNullOrEmpty(principal)) { + listener.onResponse(AuthenticationResult.unsuccessful( + principalAttribute + " not found in " + attributes.attributes(), null)); + return; + } + + final Map userMeta = new HashMap<>(); + if (populateUserMetadata) { + for (SamlAttributes.SamlAttribute a : attributes.attributes()) { + userMeta.put("saml(" + a.name + ")", a.values); + if (Strings.hasText(a.friendlyName)) { + userMeta.put("saml_" + a.friendlyName, a.values); + } + } + } + if (attributes.name() != null) { + userMeta.put(USER_METADATA_NAMEID_VALUE, attributes.name().value); + userMeta.put(USER_METADATA_NAMEID_FORMAT, attributes.name().format); + } + + final Map tokenMetadata = createTokenMetadata(attributes.name(), attributes.session()); + + final List groups = groupsAttribute.getAttribute(attributes); + final String dn = resolveSingleValueAttribute(attributes, dnAttribute, DN_ATTRIBUTE.name()); + final String name = resolveSingleValueAttribute(attributes, nameAttribute, NAME_ATTRIBUTE.name()); + final String mail = resolveSingleValueAttribute(attributes, mailAttribute, MAIL_ATTRIBUTE.name()); + UserRoleMapper.UserData userData = new UserRoleMapper.UserData(principal, dn, groups, userMeta, config); + roleMapper.resolveRoles(userData, ActionListener.wrap(roles -> { + final User user = new User(principal, roles.toArray(new String[roles.size()]), name, mail, userMeta, true); + config.threadContext().putTransient(CONTEXT_TOKEN_DATA, tokenMetadata); + listener.onResponse(AuthenticationResult.success(user)); + }, listener::onFailure)); + } + + public Map createTokenMetadata(SamlNameId nameId, String session) { + final Map tokenMeta = new HashMap<>(); + if (nameId != null) { + tokenMeta.put(TOKEN_METADATA_NAMEID_VALUE, nameId.value); + tokenMeta.put(TOKEN_METADATA_NAMEID_FORMAT, nameId.format); + tokenMeta.put(TOKEN_METADATA_NAMEID_QUALIFIER, nameId.idpNameQualifier); + tokenMeta.put(TOKEN_METADATA_NAMEID_SP_QUALIFIER, nameId.spNameQualifier); + tokenMeta.put(TOKEN_METADATA_NAMEID_SP_PROVIDED_ID, nameId.spProvidedId); + } else { + tokenMeta.put(TOKEN_METADATA_NAMEID_VALUE, null); + tokenMeta.put(TOKEN_METADATA_NAMEID_FORMAT, null); + tokenMeta.put(TOKEN_METADATA_NAMEID_QUALIFIER, null); + tokenMeta.put(TOKEN_METADATA_NAMEID_SP_QUALIFIER, null); + tokenMeta.put(TOKEN_METADATA_NAMEID_SP_PROVIDED_ID, null); + } + tokenMeta.put(TOKEN_METADATA_SESSION, session); + tokenMeta.put(TOKEN_METADATA_REALM, name()); + return tokenMeta; + } + + private String resolveSingleValueAttribute(SamlAttributes attributes, AttributeParser parser, String name) { + final List list = parser.getAttribute(attributes); + switch (list.size()) { + case 0: + return null; + case 1: + return list.get(0); + default: + logger.info("SAML assertion contains multiple values for attribute [{}] returning first one", name); + return list.get(0); + } + } + + @Override + public void lookupUser(String username, ActionListener listener) { + // saml will not support user lookup initially + listener.onResponse(null); + } + + static Tuple> initializeResolver(Logger logger, RealmConfig config, + SSLService sslService, + ResourceWatcherService watcherService) + throws ResolverException, ComponentInitializationException, PrivilegedActionException, IOException { + final String metadataUrl = require(config, IDP_METADATA_PATH); + if (metadataUrl.startsWith("http://")) { + throw new IllegalArgumentException("The [http] protocol is not supported as it is insecure. Use [https] instead"); + } else if (metadataUrl.startsWith("https://")) { + return parseHttpMetadata(metadataUrl, config, sslService); + } else { + return parseFileSystemMetadata(logger, metadataUrl, config, watcherService); + } + } + + private static Tuple> parseHttpMetadata(String metadataUrl, + RealmConfig config, + SSLService sslService) + throws ResolverException, ComponentInitializationException, PrivilegedActionException { + final String entityId = require(config, IDP_ENTITY_ID); + + HttpClientBuilder builder = HttpClientBuilder.create(); + // ssl setup + Settings sslSettings = config.settings().getByPrefix(SamlRealmSettings.SSL_PREFIX); + boolean isHostnameVerificationEnabled = sslService.getVerificationMode(sslSettings, Settings.EMPTY).isHostnameVerificationEnabled(); + HostnameVerifier verifier = isHostnameVerificationEnabled ? new DefaultHostnameVerifier() : NoopHostnameVerifier.INSTANCE; + SSLConnectionSocketFactory factory = new SSLConnectionSocketFactory(sslService.sslSocketFactory(sslSettings), verifier); + builder.setSSLSocketFactory(factory); + + HTTPMetadataResolver resolver = new PrivilegedHTTPMetadataResolver(builder.build(), metadataUrl); + TimeValue refresh = IDP_METADATA_HTTP_REFRESH.get(config.settings()); + resolver.setMinRefreshDelay(refresh.millis()); + resolver.setMaxRefreshDelay(refresh.millis()); + initialiseResolver(resolver, config); + + return new Tuple<>(resolver, () -> { + // for some reason the resolver supports its own trust engine and custom socket factories. + // we do not use these as we'd rather rely on the JDK versions for TLS security! + SpecialPermission.check(); + try { + return AccessController.doPrivileged((PrivilegedExceptionAction) + () -> resolveEntityDescriptor(resolver, entityId, metadataUrl)); + } catch (PrivilegedActionException e) { + throw ExceptionsHelper.convertToRuntime((Exception) ExceptionsHelper.unwrapCause(e)); + } + }); + } + + private static final class PrivilegedHTTPMetadataResolver extends HTTPMetadataResolver { + + PrivilegedHTTPMetadataResolver(final HttpClient client, final String metadataURL) throws ResolverException { + super(client, metadataURL); + } + + @Override + protected byte[] fetchMetadata() throws ResolverException { + try { + return AccessController.doPrivileged( + (PrivilegedExceptionAction) () -> PrivilegedHTTPMetadataResolver.super.fetchMetadata()); + } catch (final PrivilegedActionException e) { + throw (ResolverException) e.getCause(); + } + } + + } + + @SuppressForbidden(reason = "uses toFile") + private static Tuple> parseFileSystemMetadata( + Logger logger, String metadataPath, RealmConfig config, ResourceWatcherService watcherService) + throws ResolverException, ComponentInitializationException, IOException, PrivilegedActionException { + + final String entityId = require(config, IDP_ENTITY_ID); + final Path path = config.env().configFile().resolve(metadataPath); + final FilesystemMetadataResolver resolver = new FilesystemMetadataResolver(path.toFile()); + + if (IDP_METADATA_HTTP_REFRESH.exists(config.settings())) { + logger.info("Ignoring setting [{}] because the IdP metadata is being loaded from a file", + RealmSettings.getFullSettingKey(config, IDP_METADATA_HTTP_REFRESH)); + } + + // We don't want to rely on the internal OpenSAML refresh timer, but we can't turn it off, so just set it to run once a day. + // @TODO : Submit a patch to OpenSAML to optionally disable the timer + final long oneDayMs = TimeValue.timeValueHours(24).millis(); + resolver.setMinRefreshDelay(oneDayMs); + resolver.setMaxRefreshDelay(oneDayMs); + initialiseResolver(resolver, config); + + FileWatcher watcher = new FileWatcher(path); + watcher.addListener(new FileListener(logger, resolver::refresh)); + watcherService.add(watcher, ResourceWatcherService.Frequency.MEDIUM); + return new Tuple<>(resolver, () -> resolveEntityDescriptor(resolver, entityId, path.toString())); + } + + private static EntityDescriptor resolveEntityDescriptor(AbstractReloadingMetadataResolver resolver, String entityId, + String sourceLocation) { + try { + final EntityDescriptor descriptor = resolver.resolveSingle(new CriteriaSet(new EntityIdCriterion(entityId))); + if (descriptor == null) { + throw SamlUtils.samlException("Cannot find metadata for entity [{}] in [{}]", entityId, sourceLocation); + } + return descriptor; + } catch (ResolverException e) { + throw SamlUtils.samlException("Cannot resolve entity metadata", e); + } + } + + + @Override + public void close() { + Releasables.close(releasables); + } + + private static void initialiseResolver(AbstractReloadingMetadataResolver resolver, RealmConfig config) + throws ComponentInitializationException, PrivilegedActionException { + resolver.setRequireValidMetadata(true); + BasicParserPool pool = new BasicParserPool(); + pool.initialize(); + resolver.setParserPool(pool); + resolver.setId(config.name()); + SpecialPermission.check(); + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + resolver.initialize(); + return null; + }); + } + + public String serviceProviderEntityId() { + return this.serviceProvider.getEntityId(); + } + + public String assertionConsumerServiceURL() { + return this.serviceProvider.getAscUrl(); + } + + public AuthnRequest buildAuthenticationRequest() { + final AuthnRequest authnRequest = new SamlAuthnRequestBuilder( + serviceProvider, + SAMLConstants.SAML2_POST_BINDING_URI, + idpDescriptor.get(), + SAMLConstants.SAML2_REDIRECT_BINDING_URI, + Clock.systemUTC()) + .nameIDPolicy(nameIdPolicy) + .forceAuthn(forceAuthn) + .build(); + if (logger.isTraceEnabled()) { + logger.trace("Constructed SAML Authentication Request: {}", SamlUtils.samlObjectToString(authnRequest)); + } + return authnRequest; + } + + /** + * Creates a SAML {@link LogoutRequest Single LogOut request} for the provided session, if the + * realm and IdP configuration support SLO. Otherwise returns {@code null} + * + * @see SamlRealmSettings#IDP_SINGLE_LOGOUT + */ + public LogoutRequest buildLogoutRequest(NameID nameId, String session) { + if (useSingleLogout) { + final LogoutRequest logoutRequest = new SamlLogoutRequestMessageBuilder( + Clock.systemUTC(), serviceProvider, idpDescriptor.get(), nameId, session).build(); + if (logoutRequest != null && logger.isTraceEnabled()) { + logger.trace("Constructed SAML Logout Request: {}", SamlUtils.samlObjectToString(logoutRequest)); + } + return logoutRequest; + } else { + return null; + } + + } + + /** + * Creates a SAML {@link org.opensaml.saml.saml2.core.LogoutResponse} to the provided requestID + */ + public LogoutResponse buildLogoutResponse(String inResponseTo) { + final LogoutResponse logoutResponse = new SamlLogoutResponseBuilder( + Clock.systemUTC(), serviceProvider, idpDescriptor.get(), inResponseTo, StatusCode.SUCCESS).build(); + if (logoutResponse != null && logger.isTraceEnabled()) { + logger.trace("Constructed SAML Logout Response: {}", SamlUtils.samlObjectToString(logoutResponse)); + } + return logoutResponse; + } + + public SigningConfiguration getSigningConfiguration() { + return serviceProvider.getSigningConfiguration(); + } + + public SamlLogoutRequestHandler getLogoutHandler() { + return this.logoutHandler; + } + + private static class FileListener implements FileChangesListener { + + private final Logger logger; + private final CheckedRunnable onChange; + + private FileListener(Logger logger, CheckedRunnable onChange) { + this.logger = logger; + this.onChange = onChange; + } + + @Override + public void onFileCreated(Path file) { + onFileChanged(file); + } + + @Override + public void onFileDeleted(Path file) { + onFileChanged(file); + } + + @Override + public void onFileChanged(Path file) { + try { + onChange.run(); + } catch (Exception e) { + logger.warn(new ParameterizedMessage("An error occurred while reloading file {}", file), e); + } + } + } + + static final class AttributeParser { + private final String name; + private final Function> parser; + + AttributeParser(String name, Function> parser) { + this.name = name; + this.parser = parser; + } + + List getAttribute(SamlAttributes attributes) { + return parser.apply(attributes); + } + + @Override + public String toString() { + return name; + } + + static AttributeParser forSetting(Logger logger, SamlRealmSettings.AttributeSetting setting, RealmConfig realmConfig, + boolean required) { + final Settings settings = realmConfig.settings(); + if (setting.getAttribute().exists(settings)) { + String attributeName = setting.getAttribute().get(settings); + if (setting.getPattern().exists(settings)) { + Pattern regex = Pattern.compile(setting.getPattern().get(settings)); + return new AttributeParser( + "SAML Attribute [" + attributeName + "] with pattern [" + regex.pattern() + "] for [" + setting.name() + "]", + attributes -> attributes.getAttributeValues(attributeName).stream().map(s -> { + final Matcher matcher = regex.matcher(s); + if (matcher.find() == false) { + logger.debug("Attribute [{}] is [{}], which does not match [{}]", attributeName, s, regex.pattern()); + return null; + } + final String value = matcher.group(1); + if (Strings.isNullOrEmpty(value)) { + logger.debug("Attribute [{}] is [{}], which does match [{}] but group(1) is empty", + attributeName, s, regex.pattern()); + return null; + } + return value; + }).filter(Objects::nonNull).collect(Collectors.toList()) + ); + } else { + return new AttributeParser( + "SAML Attribute [" + attributeName + "] for [" + setting.name() + "]", + attributes -> attributes.getAttributeValues(attributeName)); + } + } else if (required) { + throw new SettingsException("Setting" + RealmSettings.getFullSettingKey(realmConfig, setting.getAttribute()) + + " is required"); + } else if (setting.getPattern().exists(settings)) { + throw new SettingsException("Setting" + RealmSettings.getFullSettingKey(realmConfig, setting.getPattern()) + + " cannot be set unless " + RealmSettings.getFullSettingKey(realmConfig, setting.getAttribute()) + " is also set"); + } else { + return new AttributeParser("No SAML attribute for [" + setting.name() + "]", attributes -> Collections.emptyList()); + } + } + + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRedirect.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRedirect.java new file mode 100644 index 0000000000000..b728fb03bcdd1 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRedirect.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import org.elasticsearch.ElasticsearchException; +import org.opensaml.core.xml.util.XMLObjectSupport; +import org.opensaml.saml.common.SAMLObject; +import org.opensaml.saml.saml2.core.RequestAbstractType; +import org.opensaml.saml.saml2.core.StatusResponseType; +import org.opensaml.xmlsec.signature.support.SignatureConstants; + +import java.io.ByteArrayOutputStream; +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.zip.Deflater; +import java.util.zip.DeflaterOutputStream; + +public class SamlRedirect { + + private final SAMLObject samlObject; + private final String destination; + private final String parameterName; + private final SigningConfiguration signing; + + public SamlRedirect(RequestAbstractType request, SigningConfiguration signing) { + this.samlObject = request; + this.destination = request.getDestination(); + this.parameterName = "SAMLRequest"; + this.signing = signing; + } + + public SamlRedirect(StatusResponseType response, SigningConfiguration signing) { + this.samlObject = response; + this.destination = response.getDestination(); + this.parameterName = "SAMLResponse"; + this.signing = signing; + } + + public String getRedirectUrl() throws ElasticsearchException { + return getRedirectUrl(null); + } + + public String getRedirectUrl(String relayState) throws ElasticsearchException { + try { + final String request = deflateAndBase64Encode(this.samlObject); + String queryParam = parameterName + "=" + urlEncode(request); + if (relayState != null) { + queryParam += "&RelayState=" + urlEncode(relayState); + } + if (signing.shouldSign(this.samlObject)) { + final String algo = SignatureConstants.ALGO_ID_SIGNATURE_RSA_SHA256; + queryParam += "&SigAlg=" + urlEncode(algo); + final byte[] sig = signing.sign(queryParam, algo); + queryParam += "&Signature=" + urlEncode(base64Encode(sig)); + } + return withParameters(queryParam); + } catch (Exception e) { + throw new ElasticsearchException("Cannot construct SAML redirect", e); + } + } + + private String withParameters(String queryParam) { + if (destination.indexOf('?') == -1) { + return destination + "?" + queryParam; + } else if (destination.endsWith("?")) { + return destination + queryParam; + } else { + return destination + "&" + queryParam; + } + } + + private String base64Encode(byte[] bytes) { + return Base64.getEncoder().encodeToString(bytes); + } + + private String urlEncode(String param) throws UnsupportedEncodingException { + return URLEncoder.encode(param, StandardCharsets.US_ASCII.name()); + } + + protected String deflateAndBase64Encode(SAMLObject message) + throws Exception { + Deflater deflater = new Deflater(Deflater.DEFLATED, true); + try (ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); + DeflaterOutputStream deflaterStream = new DeflaterOutputStream(bytesOut, deflater)) { + String messageStr = SamlUtils.toString(XMLObjectSupport.marshall(message)); + deflaterStream.write(messageStr.getBytes(StandardCharsets.UTF_8)); + deflaterStream.finish(); + return base64Encode(bytesOut.toByteArray()); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRequestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRequestHandler.java new file mode 100644 index 0000000000000..2cb33b2c770e3 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRequestHandler.java @@ -0,0 +1,308 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.security.support.RestorableContextClassLoader; +import org.joda.time.DateTime; +import org.opensaml.core.xml.XMLObject; +import org.opensaml.core.xml.io.Unmarshaller; +import org.opensaml.core.xml.io.UnmarshallerFactory; +import org.opensaml.core.xml.io.UnmarshallingException; +import org.opensaml.saml.saml2.core.Issuer; +import org.opensaml.saml.saml2.encryption.Decrypter; +import org.opensaml.saml.security.impl.SAMLSignatureProfileValidator; +import org.opensaml.security.credential.Credential; +import org.opensaml.security.x509.X509Credential; +import org.opensaml.xmlsec.encryption.support.ChainingEncryptedKeyResolver; +import org.opensaml.xmlsec.encryption.support.EncryptedKeyResolver; +import org.opensaml.xmlsec.encryption.support.InlineEncryptedKeyResolver; +import org.opensaml.xmlsec.encryption.support.SimpleKeyInfoReferenceEncryptedKeyResolver; +import org.opensaml.xmlsec.encryption.support.SimpleRetrievalMethodEncryptedKeyResolver; +import org.opensaml.xmlsec.keyinfo.KeyInfoCredentialResolver; +import org.opensaml.xmlsec.keyinfo.impl.ChainingKeyInfoCredentialResolver; +import org.opensaml.xmlsec.keyinfo.impl.CollectionKeyInfoCredentialResolver; +import org.opensaml.xmlsec.keyinfo.impl.LocalKeyInfoCredentialResolver; +import org.opensaml.xmlsec.keyinfo.impl.provider.DEREncodedKeyValueProvider; +import org.opensaml.xmlsec.keyinfo.impl.provider.InlineX509DataProvider; +import org.opensaml.xmlsec.keyinfo.impl.provider.KeyInfoReferenceProvider; +import org.opensaml.xmlsec.keyinfo.impl.provider.RSAKeyValueProvider; +import org.opensaml.xmlsec.signature.Signature; +import org.opensaml.xmlsec.signature.support.SignatureException; +import org.opensaml.xmlsec.signature.support.SignatureValidator; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.xml.sax.SAXException; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.security.PrivilegedActionException; +import java.security.cert.CertificateEncodingException; +import java.security.cert.X509Certificate; +import java.time.Clock; +import java.time.Instant; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import javax.xml.parsers.DocumentBuilder; + +import static org.elasticsearch.xpack.security.authc.saml.SamlUtils.samlException; +import static org.opensaml.core.xml.config.XMLObjectProviderRegistrySupport.getUnmarshallerFactory; + +public class SamlRequestHandler { + + protected static final String SAML_NAMESPACE = "urn:oasis:names:tc:SAML:2.0:protocol"; + + private static final String[] XSD_FILES = new String[] { "/org/elasticsearch/xpack/security/authc/saml/saml-schema-protocol-2.0.xsd", + "/org/elasticsearch/xpack/security/authc/saml/saml-schema-assertion-2.0.xsd", + "/org/elasticsearch/xpack/security/authc/saml/xenc-schema.xsd", + "/org/elasticsearch/xpack/security/authc/saml/xmldsig-core-schema.xsd" }; + + private static final ThreadLocal THREAD_LOCAL_DOCUMENT_BUILDER = ThreadLocal.withInitial(() -> { + try { + return SamlUtils.getHardenedBuilder(XSD_FILES); + } catch (Exception e) { + throw samlException("Could not load XSD schema file", e); + } + }); + + protected final Logger logger; + + @Nullable + protected final Decrypter decrypter; + + private final Clock clock; + private final IdpConfiguration idp; + private final SpConfiguration sp; + private final TimeValue maxSkew; + private final UnmarshallerFactory unmarshallerFactory; + + public SamlRequestHandler(RealmConfig realmConfig, Clock clock, IdpConfiguration idp, SpConfiguration sp, TimeValue maxSkew) { + this.logger = Loggers.getLogger(getClass(), realmConfig.globalSettings()); + this.clock = clock; + this.idp = idp; + this.sp = sp; + this.maxSkew = maxSkew; + this.unmarshallerFactory = getUnmarshallerFactory(); + if (sp.getEncryptionCredentials().isEmpty()) { + this.decrypter = null; + } else { + this.decrypter = new Decrypter(null, createResolverForEncryptionKeys(), createResolverForEncryptedKeyElements()); + } + } + + private KeyInfoCredentialResolver createResolverForEncryptionKeys() { + final CollectionKeyInfoCredentialResolver collectionKeyInfoCredentialResolver = + new CollectionKeyInfoCredentialResolver(Collections.unmodifiableCollection(sp.getEncryptionCredentials())); + final LocalKeyInfoCredentialResolver localKeyInfoCredentialResolver = + new LocalKeyInfoCredentialResolver(Arrays.asList(new InlineX509DataProvider(), new KeyInfoReferenceProvider(), + new RSAKeyValueProvider(), new DEREncodedKeyValueProvider()), collectionKeyInfoCredentialResolver); + return new ChainingKeyInfoCredentialResolver(Arrays.asList(localKeyInfoCredentialResolver, collectionKeyInfoCredentialResolver)); + } + + private EncryptedKeyResolver createResolverForEncryptedKeyElements() { + return new ChainingEncryptedKeyResolver(Arrays.asList(new InlineEncryptedKeyResolver(), + new SimpleRetrievalMethodEncryptedKeyResolver(), new SimpleKeyInfoReferenceEncryptedKeyResolver())); + } + + protected SpConfiguration getSpConfiguration() { + return sp; + } + + protected String describe(X509Certificate certificate) { + return "X509Certificate{Subject=" + certificate.getSubjectDN() + "; SerialNo=" + + certificate.getSerialNumber().toString(16) + "}"; + } + + protected String describe(Collection credentials) { + return credentials.stream().map(credential -> describe(credential.getEntityCertificate())).collect(Collectors.joining(",")); + } + + void validateSignature(Signature signature) { + final String signatureText = text(signature, 32); + SAMLSignatureProfileValidator profileValidator = new SAMLSignatureProfileValidator(); + try { + profileValidator.validate(signature); + } catch (SignatureException e) { + throw samlSignatureException(idp.getSigningCredentials(), signatureText, e); + } + + checkIdpSignature(credential -> { + try (RestorableContextClassLoader ignore = new RestorableContextClassLoader(SignatureValidator.class)) { + SignatureValidator.validate(signature, credential); + logger.debug(() -> new ParameterizedMessage("SAML Signature [{}] matches credentials [{}] [{}]", + signatureText, credential.getEntityId(), credential.getPublicKey())); + return true; + } catch (PrivilegedActionException e) { + logger.warn("SecurityException while attempting to validate SAML signature", e); + return false; + } + }, signatureText); + } + + /** + * Tests whether the provided function returns {@code true} for any of the IdP's signing credentials. + * @throws ElasticsearchSecurityException - A SAML exception if not matching credential is found. + */ + protected void checkIdpSignature(CheckedFunction check, String signatureText) { + final Predicate predicate = credential -> { + try { + return check.apply(credential); + } catch (SignatureException | SecurityException e) { + logger.debug(() -> new ParameterizedMessage("SAML Signature [{}] does not match credentials [{}] [{}] -- {}", + signatureText, credential.getEntityId(), credential.getPublicKey(), e)); + logger.trace("SAML Signature failure caused by", e); + return false; + } catch (Exception e) { + logger.warn("Exception while attempting to validate SAML Signature", e); + return false; + } + }; + final List credentials = idp.getSigningCredentials(); + if (credentials.stream().anyMatch(predicate) == false) { + throw samlSignatureException(credentials, signatureText); + } + } + + /** + * Constructs a SAML specific exception with a consistent message regarding SAML Signature validation failures + */ + private ElasticsearchSecurityException samlSignatureException(List credentials, String signature, Exception cause) { + logger.warn("The XML Signature of this SAML message cannot be validated. Please verify that the saml realm uses the correct SAML" + + "metadata file/URL for this Identity Provider"); + final String msg = "SAML Signature [{}] could not be validated against [{}]"; + return samlException(msg, cause, signature, describeCredentials(credentials)); + } + + private ElasticsearchSecurityException samlSignatureException(List credentials, String signature) { + logger.warn("The XML Signature of this SAML message cannot be validated. Please verify that the saml realm uses the correct SAML" + + "metadata file/URL for this Identity Provider"); + final String msg = "SAML Signature [{}] could not be validated against [{}]"; + return samlException(msg, signature, describeCredentials(credentials)); + } + + private String describeCredentials(List credentials) { + return credentials.stream() + .map(c -> { + if (c == null) { + return ""; + } + byte[] encoded; + if (c instanceof X509Credential) { + X509Credential x = (X509Credential) c; + try { + encoded = x.getEntityCertificate().getEncoded(); + } catch (CertificateEncodingException e) { + encoded = c.getPublicKey().getEncoded(); + } + } else { + encoded = c.getPublicKey().getEncoded(); + } + return Base64.getEncoder().encodeToString(encoded).substring(0, 64) + "..."; + }) + .collect(Collectors.joining(",")); + } + + protected void checkIssuer(Issuer issuer, XMLObject parent) { + if (issuer == null) { + throw samlException("Element {} ({}) has no issuer, but expected {}", + parent.getElementQName(), text(parent, 16), idp.getEntityId()); + } + if (idp.getEntityId().equals(issuer.getValue()) == false) { + throw samlException("SAML Issuer {} does not match expected value {}", issuer.getValue(), idp.getEntityId()); + } + } + + protected long maxSkewInMillis() { + return this.maxSkew.millis(); + } + + protected java.time.Instant now() { + return clock.instant(); + } + + /** + * Converts a Joda DateTime into a Java Instant + */ + protected Instant toInstant(DateTime dateTime) { + if (dateTime == null) { + return null; + } + return Instant.ofEpochMilli(dateTime.getMillis()); + } + + // Package private for testing + T buildXmlObject(Element element, Class type) { + try { + Unmarshaller unmarshaller = unmarshallerFactory.getUnmarshaller(element); + if (unmarshaller == null) { + throw samlException("XML element [{}] cannot be unmarshalled to SAML type [{}] (no unmarshaller)", + element.getTagName(), type); + } + final XMLObject object = unmarshaller.unmarshall(element); + if (type.isInstance(object)) { + return type.cast(object); + } + Object[] args = new Object[] { element.getTagName(), type.getName(), object == null ? "" : object.getClass().getName() }; + throw samlException("SAML object [{}] is incorrect type. Expected [{}] but was [{}]", args); + } catch (UnmarshallingException e) { + throw samlException("Failed to unmarshall SAML content [{}", e, element.getTagName()); + } + } + + protected String text(XMLObject xml, int length) { + final Element dom = xml.getDOM(); + if (dom == null) { + return null; + } + final String text = dom.getTextContent().trim(); + if (text.length() >= length) { + return Strings.cleanTruncate(text, length) + "..."; + } else { + return text; + } + } + + protected Element parseSamlMessage(byte[] content) { + final Element root; + try (ByteArrayInputStream input = new ByteArrayInputStream(content)) { + // This will parse and validate the input + final Document doc = THREAD_LOCAL_DOCUMENT_BUILDER.get().parse(input); + root = doc.getDocumentElement(); + if (logger.isTraceEnabled()) { + logger.trace("Received SAML Message: {} \n", SamlUtils.toString(root, true)); + } + } catch (SAXException | IOException e) { + throw samlException("Failed to parse SAML message", e); + } + return root; + } + + protected void validateNotOnOrAfter(DateTime notOnOrAfter) { + if (notOnOrAfter == null) { + return; + } + final Instant now = now(); + final Instant pastNow = now.minusMillis(this.maxSkew.millis()); + if (pastNow.isBefore(toInstant(notOnOrAfter)) == false) { + throw samlException("Rejecting SAML assertion because [{}] is on/after [{}]", pastNow, notOnOrAfter); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlSpMetadataBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlSpMetadataBuilder.java new file mode 100644 index 0000000000000..3ef8c903f2748 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlSpMetadataBuilder.java @@ -0,0 +1,420 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; +import org.opensaml.saml.common.xml.SAMLConstants; +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.saml.saml2.metadata.AssertionConsumerService; +import org.opensaml.saml.saml2.metadata.AttributeConsumingService; +import org.opensaml.saml.saml2.metadata.ContactPerson; +import org.opensaml.saml.saml2.metadata.ContactPersonTypeEnumeration; +import org.opensaml.saml.saml2.metadata.EmailAddress; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.GivenName; +import org.opensaml.saml.saml2.metadata.KeyDescriptor; +import org.opensaml.saml.saml2.metadata.NameIDFormat; +import org.opensaml.saml.saml2.metadata.Organization; +import org.opensaml.saml.saml2.metadata.OrganizationDisplayName; +import org.opensaml.saml.saml2.metadata.OrganizationName; +import org.opensaml.saml.saml2.metadata.OrganizationURL; +import org.opensaml.saml.saml2.metadata.RequestedAttribute; +import org.opensaml.saml.saml2.metadata.SPSSODescriptor; +import org.opensaml.saml.saml2.metadata.ServiceName; +import org.opensaml.saml.saml2.metadata.SingleLogoutService; +import org.opensaml.saml.saml2.metadata.SurName; +import org.opensaml.saml.saml2.metadata.impl.AssertionConsumerServiceBuilder; +import org.opensaml.saml.saml2.metadata.impl.AttributeConsumingServiceBuilder; +import org.opensaml.saml.saml2.metadata.impl.ContactPersonBuilder; +import org.opensaml.saml.saml2.metadata.impl.EmailAddressBuilder; +import org.opensaml.saml.saml2.metadata.impl.EntityDescriptorBuilder; +import org.opensaml.saml.saml2.metadata.impl.GivenNameBuilder; +import org.opensaml.saml.saml2.metadata.impl.KeyDescriptorBuilder; +import org.opensaml.saml.saml2.metadata.impl.NameIDFormatBuilder; +import org.opensaml.saml.saml2.metadata.impl.OrganizationBuilder; +import org.opensaml.saml.saml2.metadata.impl.OrganizationDisplayNameBuilder; +import org.opensaml.saml.saml2.metadata.impl.OrganizationNameBuilder; +import org.opensaml.saml.saml2.metadata.impl.OrganizationURLBuilder; +import org.opensaml.saml.saml2.metadata.impl.RequestedAttributeBuilder; +import org.opensaml.saml.saml2.metadata.impl.SPSSODescriptorBuilder; +import org.opensaml.saml.saml2.metadata.impl.ServiceNameBuilder; +import org.opensaml.saml.saml2.metadata.impl.SingleLogoutServiceBuilder; +import org.opensaml.saml.saml2.metadata.impl.SurNameBuilder; +import org.opensaml.security.credential.UsageType; +import org.opensaml.security.x509.X509Credential; +import org.opensaml.xmlsec.keyinfo.KeyInfoSupport; +import org.opensaml.xmlsec.signature.KeyInfo; +import org.opensaml.xmlsec.signature.impl.KeyInfoBuilder; + +import java.security.cert.CertificateEncodingException; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * Constructs SAML Metadata to describe a Service Provider. + * This metadata is used to configure Identity Providers that will interact with the Service Provider. + */ +public class SamlSpMetadataBuilder { + + private final Locale locale; + private final String entityId; + private final Map attributeNames; + private final List contacts; + + private String serviceName; + private String nameIdFormat; + private String assertionConsumerServiceUrl; + private String singleLogoutServiceUrl; + private Boolean authnRequestsSigned; + private X509Certificate signingCertificate; + private List encryptionCertificates = new ArrayList<>(); + private OrganizationInfo organization; + + /** + * @param locale The locale to use for element that require {@code xml:lang} attributes + * @param entityId The URI for the Service Provider entity + */ + public SamlSpMetadataBuilder(Locale locale, String entityId) { + this.locale = locale; + this.entityId = entityId; + this.attributeNames = new LinkedHashMap<>(); + this.contacts = new ArrayList<>(); + this.serviceName = "Elasticsearch"; + this.nameIdFormat = NameID.TRANSIENT; + this.authnRequestsSigned = Boolean.FALSE; + } + + /** + * The format that the service provider expects for incoming NameID element. + */ + public SamlSpMetadataBuilder nameIdFormat(String nameIdFormat) { + this.nameIdFormat = nameIdFormat; + return this; + } + + /** + * The name of the service, for use in a {@link AttributeConsumingService} + */ + public SamlSpMetadataBuilder serviceName(String serviceName) { + this.serviceName = serviceName; + return this; + } + + /** + * Request a named attribute be provided as part of assertions. Specified in a {@link AttributeConsumingService} + */ + public SamlSpMetadataBuilder withAttribute(String friendlyName, String name) { + if (Strings.isNullOrEmpty(name)) { + throw new IllegalArgumentException("Attribute name cannot be empty (friendly name was [" + friendlyName + "])"); + } + this.attributeNames.put(name, friendlyName); + return this; + } + + /** + * The (POST) URL to be used to accept SAML assertions (authentication results) + */ + public SamlSpMetadataBuilder assertionConsumerServiceUrl(String acsUrl) { + this.assertionConsumerServiceUrl = acsUrl; + return this; + } + + /** + * The (GET/Redirect) URL to be used to handle SAML logout / session termination + */ + public SamlSpMetadataBuilder singleLogoutServiceUrl(String slsUrl) { + this.singleLogoutServiceUrl = slsUrl; + return this; + } + + /** + * Whether this Service Provider signs {@link org.opensaml.saml.saml2.core.AuthnRequest} messages. + */ + public SamlSpMetadataBuilder authnRequestsSigned(Boolean authnRequestsSigned) { + this.authnRequestsSigned = authnRequestsSigned; + return this; + } + + /** + * The certificate that the service provider users to sign SAML requests. + */ + public SamlSpMetadataBuilder signingCertificate(X509Certificate signingCertificate) { + this.signingCertificate = signingCertificate; + return this; + } + + /** + * The certificate credential that should be used to send encrypted data to the service provider. + */ + public SamlSpMetadataBuilder signingCredential(X509Credential credential) { + return signingCertificate(credential == null ? null : credential.getEntityCertificate()); + } + + /** + * The certificate that should be used to send encrypted data to the service provider. + */ + public SamlSpMetadataBuilder encryptionCertificates(Collection encryptionCertificates) { + if (encryptionCertificates != null) { + this.encryptionCertificates.addAll(encryptionCertificates); + } + return this; + } + + /** + * The certificate credential that should be used to send encrypted data to the service provider. + */ + public SamlSpMetadataBuilder encryptionCredentials(Collection credentials) { + return encryptionCertificates(credentials == null ? Collections.emptyList() + : credentials.stream().map(credential -> credential.getEntityCertificate()).collect(Collectors.toList())); + } + + /** + * The organisation that operates the service provider + */ + public SamlSpMetadataBuilder organization(OrganizationInfo organization) { + this.organization = organization; + return this; + } + + /** + * The organisation that operates the service provider + */ + public SamlSpMetadataBuilder organization(String orgName, String displayName, String url) { + return organization(new OrganizationInfo(orgName, displayName, url)); + } + + /** + * A contact within the organisation that operates the service provider + */ + public SamlSpMetadataBuilder withContact(ContactInfo contact) { + this.contacts.add(contact); + return this; + } + + /** + * A contact within the organisation that operates the service provider + * + * @param type Must be one of the standard types on {@link ContactPersonTypeEnumeration} + */ + public SamlSpMetadataBuilder withContact(String type, String givenName, String surName, String email) { + return withContact(new ContactInfo(ContactInfo.getType(type), givenName, surName, email)); + } + + /** + * Constructs an {@link EntityDescriptor} that contains a single {@link SPSSODescriptor}. + */ + public EntityDescriptor build() throws Exception { + final SPSSODescriptor spRoleDescriptor = new SPSSODescriptorBuilder().buildObject(); + spRoleDescriptor.removeAllSupportedProtocols(); + spRoleDescriptor.addSupportedProtocol(SAMLConstants.SAML20P_NS); + spRoleDescriptor.setWantAssertionsSigned(true); + spRoleDescriptor.setAuthnRequestsSigned(this.authnRequestsSigned); + + if (Strings.hasLength(nameIdFormat)) { + spRoleDescriptor.getNameIDFormats().add(buildNameIdFormat()); + } + spRoleDescriptor.getAssertionConsumerServices().add(buildAssertionConsumerService()); + if (attributeNames.size() > 0) { + spRoleDescriptor.getAttributeConsumingServices().add(buildAttributeConsumerService()); + } + if (Strings.hasText(singleLogoutServiceUrl)) { + spRoleDescriptor.getSingleLogoutServices().add(buildSingleLogoutService()); + } + + spRoleDescriptor.getKeyDescriptors().addAll(buildKeyDescriptors()); + + final EntityDescriptor descriptor = new EntityDescriptorBuilder().buildObject(); + descriptor.setEntityID(this.entityId); + descriptor.getRoleDescriptors().add(spRoleDescriptor); + if (organization != null) { + descriptor.setOrganization(buildOrganization()); + } + contacts.forEach(c -> descriptor.getContactPersons().add(buildContact(c))); + + return descriptor; + } + + private NameIDFormat buildNameIdFormat() { + final NameIDFormat format = new NameIDFormatBuilder().buildObject(); + format.setFormat(this.nameIdFormat); + return format; + } + + private AssertionConsumerService buildAssertionConsumerService() { + if (Strings.isNullOrEmpty(assertionConsumerServiceUrl)) { + throw new IllegalStateException("AssertionConsumerService URL has not been specified"); + } + final AssertionConsumerService acs = new AssertionConsumerServiceBuilder().buildObject(); + acs.setBinding(SAMLConstants.SAML2_POST_BINDING_URI); + acs.setIndex(1); + acs.setIsDefault(Boolean.TRUE); + acs.setLocation(assertionConsumerServiceUrl); + return acs; + } + + private AttributeConsumingService buildAttributeConsumerService() { + final AttributeConsumingService service = new AttributeConsumingServiceBuilder().buildObject(); + service.setIndex(1); + service.setIsDefault(true); + service.getNames().add(buildServiceName()); + attributeNames.forEach((name, friendlyName) -> { + service.getRequestAttributes().add(buildRequestedAttribute(friendlyName, name)); + }); + return service; + } + + private ServiceName buildServiceName() { + final ServiceName name = new ServiceNameBuilder().buildObject(); + name.setValue(serviceName); + name.setXMLLang(locale.toLanguageTag()); + return name; + } + + private RequestedAttribute buildRequestedAttribute(String friendlyName, String name) { + final RequestedAttribute attribute = new RequestedAttributeBuilder().buildObject(); + if (Strings.hasText(friendlyName)) { + attribute.setFriendlyName(friendlyName); + } + attribute.setName(name); + attribute.setNameFormat(RequestedAttribute.URI_REFERENCE); + return attribute; + } + + private SingleLogoutService buildSingleLogoutService() { + final SingleLogoutService service = new SingleLogoutServiceBuilder().buildObject(); + // The draft Interoperable SAML 2 profile requires redirect binding. + // That's annoying, because they require POST binding for the ACS so now SPs need to + // support 2 bindings that have different signature passing rules, etc. *sigh* + service.setBinding(SAMLConstants.SAML2_REDIRECT_BINDING_URI); + service.setLocation(singleLogoutServiceUrl); + return service; + } + + private List buildKeyDescriptors() throws CertificateEncodingException { + if (encryptionCertificates.isEmpty() && signingCertificate == null) { + return Collections.emptyList(); + } + if (encryptionCertificates.size() == 1 && Objects.equals(encryptionCertificates.get(0), signingCertificate)) { + return Collections.singletonList(buildKeyDescriptor(encryptionCertificates.get(0), UsageType.UNSPECIFIED)); + } + List keys = new ArrayList<>(); + if (signingCertificate != null) { + keys.add(buildKeyDescriptor(signingCertificate, UsageType.SIGNING)); + } + for( X509Certificate encryptionCertificate : encryptionCertificates) { + keys.add(buildKeyDescriptor(encryptionCertificate, UsageType.ENCRYPTION)); + } + return keys; + } + + private KeyDescriptor buildKeyDescriptor(X509Certificate certificate, UsageType usageType) throws CertificateEncodingException { + final KeyDescriptor descriptor = new KeyDescriptorBuilder().buildObject(); + descriptor.setUse(usageType); + final KeyInfo keyInfo = new KeyInfoBuilder().buildObject(); + KeyInfoSupport.addCertificate(keyInfo, certificate); + descriptor.setKeyInfo(keyInfo); + return descriptor; + } + + private Organization buildOrganization() { + final String lang = locale.toLanguageTag(); + final OrganizationName name = new OrganizationNameBuilder().buildObject(); + name.setValue(this.organization.organizationName); + name.setXMLLang(lang); + final OrganizationDisplayName displayName = new OrganizationDisplayNameBuilder().buildObject(); + displayName.setValue(this.organization.displayName); + displayName.setXMLLang(lang); + final OrganizationURL url = new OrganizationURLBuilder().buildObject(); + url.setValue(this.organization.url); + url.setXMLLang(lang); + + final Organization org = new OrganizationBuilder().buildObject(); + org.getOrganizationNames().add(name); + org.getDisplayNames().add(displayName); + org.getURLs().add(url); + return org; + } + + private ContactPerson buildContact(ContactInfo contact) { + final GivenName givenName = new GivenNameBuilder().buildObject(); + givenName.setName(contact.givenName); + final SurName surName = new SurNameBuilder().buildObject(); + surName.setName(contact.surName); + final EmailAddress email = new EmailAddressBuilder().buildObject(); + email.setAddress(contact.email); + + final ContactPerson person = new ContactPersonBuilder().buildObject(); + person.setType(contact.type); + person.setGivenName(givenName); + person.setSurName(surName); + person.getEmailAddresses().add(email); + return person; + } + + + public static class OrganizationInfo { + public final String organizationName; + public final String displayName; + public final String url; + + public OrganizationInfo(String organizationName, String displayName, String url) { + if (Strings.isNullOrEmpty(organizationName)) { + throw new IllegalArgumentException("Organization Name is required"); + } + if (Strings.isNullOrEmpty(displayName)) { + throw new IllegalArgumentException("Organization Display Name is required"); + } + if (Strings.isNullOrEmpty(url)) { + throw new IllegalArgumentException("Organization URL is required"); + } + this.organizationName = organizationName; + this.displayName = displayName; + this.url = url; + } + } + + public static class ContactInfo { + static final Map TYPES = + MapBuilder.newMapBuilder(new LinkedHashMap<>()) + .put(ContactPersonTypeEnumeration.ADMINISTRATIVE.toString(), ContactPersonTypeEnumeration.ADMINISTRATIVE) + .put(ContactPersonTypeEnumeration.BILLING.toString(), ContactPersonTypeEnumeration.BILLING) + .put(ContactPersonTypeEnumeration.SUPPORT.toString(), ContactPersonTypeEnumeration.SUPPORT) + .put(ContactPersonTypeEnumeration.TECHNICAL.toString(), ContactPersonTypeEnumeration.TECHNICAL) + .put(ContactPersonTypeEnumeration.OTHER.toString(), ContactPersonTypeEnumeration.OTHER) + .map(); + + public final ContactPersonTypeEnumeration type; + public final String givenName; + public final String surName; + public final String email; + + public ContactInfo(ContactPersonTypeEnumeration type, String givenName, String surName, String email) { + this.type = Objects.requireNonNull(type, "Contact Person Type is required"); + this.givenName = givenName; + this.surName = surName; + this.email = email; + } + + private static ContactPersonTypeEnumeration getType(String name) { + final ContactPersonTypeEnumeration type = TYPES.get(name.toLowerCase(Locale.ROOT)); + if (type == null) { + throw new IllegalArgumentException("Invalid contact type " + name + " allowed values are " + + Strings.collectionToCommaDelimitedString(TYPES.keySet())); + } + return type; + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlToken.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlToken.java new file mode 100644 index 0000000000000..3420733f74a61 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlToken.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.util.List; + +import org.apache.commons.codec.binary.Hex; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; + +/** + * A very lightweight {@link AuthenticationToken} to hold SAML content. + * Due to the nature of SAML, it is impossible to know the {@link #principal() principal} for the token + * until it is parsed and validated, so this token always returns a placeholder value. + * @see SamlRealm#authenticate + */ +public class SamlToken implements AuthenticationToken { + private byte[] content; + private final List allowedSamlRequestIds; + + /** + * @param content The content of the SAML message. This should be raw XML. In particular it should not be + * base64 encoded. + */ + public SamlToken(byte[] content, List allowedSamlRequestIds) { + this.content = content; + this.allowedSamlRequestIds = allowedSamlRequestIds; + } + + @Override + public String principal() { + return ""; + } + + @Override + public Object credentials() { + return content; + } + + @Override + public void clearCredentials() { + content = null; + } + + public byte[] getContent() { + return content; + } + + public List getAllowedSamlRequestIds() { + return allowedSamlRequestIds; + } + + @Override + public String toString() { + return getClass().getSimpleName() + "{" + Strings.cleanTruncate(Hex.encodeHexString(content), 128) + "...}"; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlUtils.java new file mode 100644 index 0000000000000..9b8d73c2e6142 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlUtils.java @@ -0,0 +1,372 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import javax.xml.XMLConstants; +import javax.xml.namespace.QName; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.transform.OutputKeys; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerConfigurationException; +import javax.xml.transform.TransformerException; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; +import javax.xml.transform.stream.StreamSource; +import javax.xml.validation.Schema; +import javax.xml.validation.SchemaFactory; +import javax.xml.validation.Validator; +import java.io.IOException; +import java.io.InputStream; +import java.io.StringWriter; +import java.io.Writer; +import java.net.URISyntaxException; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.xpack.security.support.RestorableContextClassLoader; +import org.opensaml.core.config.InitializationService; +import org.opensaml.core.xml.XMLObject; +import org.opensaml.core.xml.XMLObjectBuilderFactory; +import org.opensaml.core.xml.config.XMLObjectProviderRegistrySupport; +import org.opensaml.core.xml.io.MarshallingException; +import org.opensaml.core.xml.util.XMLObjectSupport; +import org.opensaml.saml.common.SAMLObject; +import org.opensaml.saml.saml2.core.Assertion; +import org.opensaml.saml.saml2.core.Response; +import org.slf4j.LoggerFactory; +import org.w3c.dom.Element; +import org.w3c.dom.bootstrap.DOMImplementationRegistry; +import org.w3c.dom.ls.DOMImplementationLS; +import org.w3c.dom.ls.LSInput; +import org.w3c.dom.ls.LSResourceResolver; +import org.xml.sax.SAXException; +import org.xml.sax.SAXParseException; + +public class SamlUtils { + + private static final String SAML_EXCEPTION_KEY = "es.security.saml"; + private static final String SAML_MARSHALLING_ERROR_STRING = "_unserializable_"; + + private static final AtomicBoolean INITIALISED = new AtomicBoolean(false); + private static final SecureRandom SECURE_RANDOM = new SecureRandom(); + + private static XMLObjectBuilderFactory builderFactory = null; + private static final Logger LOGGER = ESLoggerFactory.getLogger(SamlUtils.class); + + /** + * This is needed in order to initialize the underlying OpenSAML library. + * It must be called before doing anything that potentially interacts with OpenSAML (whether in server code, or in tests). + * The initialization happens within do privileged block as the underlying Apache XML security library has a permission check. + * The initialization happens with a specific context classloader as OpenSAML loads resources from its jar file. + */ + static void initialize(Logger logger) throws PrivilegedActionException { + if (INITIALISED.compareAndSet(false, true)) { + // We want to force these classes to be loaded _before_ we fiddle with the context classloader + LoggerFactory.getLogger(InitializationService.class); + SpecialPermission.check(); + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + logger.debug("Initializing OpenSAML"); + try (RestorableContextClassLoader ignore = new RestorableContextClassLoader(InitializationService.class)) { + InitializationService.initialize(); + } + logger.debug("Initialized OpenSAML"); + return null; + }); + builderFactory = XMLObjectProviderRegistrySupport.getBuilderFactory(); + } + } + + /** + * Constructs an exception that can be distinguished (via {@link #isSamlException} as a SAML specific exception + * Used to distinguish "expected" exceptions (such as SAML signature failures, or missing attributes) that should be treated as a + * simple authentication failure (with a clear cause) + */ + public static ElasticsearchSecurityException samlException(String msg, Object... args) { + final ElasticsearchSecurityException exception = new ElasticsearchSecurityException(msg, args); + exception.addMetadata(SAML_EXCEPTION_KEY); + return exception; + } + + /** + * @see #samlException(String, Object...) + */ + public static ElasticsearchSecurityException samlException(String msg, Exception cause, Object... args) { + final ElasticsearchSecurityException exception = new ElasticsearchSecurityException(msg, cause, args); + exception.addMetadata(SAML_EXCEPTION_KEY); + return exception; + } + + /** + * @see #samlException(String, Object...) + */ + public static boolean isSamlException(ElasticsearchSecurityException exception) { + return exception != null && exception.getMetadata(SAML_EXCEPTION_KEY) != null; + } + + public static T buildObject(Class type, QName elementName) { + final XMLObject obj = builderFactory.getBuilder(elementName).buildObject(elementName); + if (type.isInstance(obj)) { + return type.cast(obj); + } else { + throw new IllegalArgumentException("Object for element " + elementName.getLocalPart() + " is of type " + obj.getClass() + + " not " + type); + } + } + + public static String generateSecureNCName(int numberBytes) { + final byte[] randomBytes = new byte[numberBytes]; + SECURE_RANDOM.nextBytes(randomBytes); + // NCNames (https://www.w3.org/TR/xmlschema-2/#NCName) can't start with a number, so start them all with "_" to be safe + return "_".concat(MessageDigests.toHexString(randomBytes)); + } + + static String toString(Element element, boolean pretty) { + try { + StringWriter writer = new StringWriter(); + print(element, writer, pretty); + return writer.toString(); + } catch (TransformerException e) { + return "[" + element.getNamespaceURI() + "]" + element.getLocalName(); + } + } + + static String toString(Element element) { + return toString(element, false); + } + + static void print(Element element, Writer writer, boolean pretty) throws TransformerException { + final Transformer serializer = getHardenedXMLTransformer(); + if (pretty) { + serializer.setOutputProperty(OutputKeys.INDENT, "yes"); + } + serializer.transform(new DOMSource(element), new StreamResult(writer)); + } + + static String samlObjectToString(SAMLObject object) { + try { + return toString(XMLObjectSupport.marshall(object), true); + } catch (MarshallingException e) { + LOGGER.info("Error marshalling SAMLObject ", e); + return SAML_MARSHALLING_ERROR_STRING; + } + } + + static String describeSamlObject(SAMLObject object) { + if (Response.class.isInstance(object)) { + Response response = (Response) object; + StringBuilder sb = new StringBuilder(); + sb.append("SAML Response: [\n"); + sb.append(" Destination: ").append(response.getDestination()).append("\n"); + sb.append(" Response ID: ").append(response.getID()).append("\n"); + sb.append(" In response to: ").append(response.getInResponseTo()).append("\n"); + sb.append(" Response issued at:").append(response.getIssueInstant()).append("\n"); + if (response.getIssuer() != null) { + sb.append(" Issuer: ").append(response.getIssuer().getValue()).append("\n"); + } + sb.append(" Number of unencrypted Assertions: ").append(response.getAssertions().size()).append("\n"); + sb.append(" Number of encrypted Assertions: ").append(response.getEncryptedAssertions().size()).append("\n"); + sb.append("]"); + return sb.toString(); + + } else if (Assertion.class.isInstance(object)) { + Assertion assertion = (Assertion) object; + StringBuilder sb = new StringBuilder(); + sb.append("SAML Assertion: [\n"); + sb.append(" Response ID: ").append(assertion.getID()).append("\n"); + sb.append(" Response issued at: ").append(assertion.getIssueInstant()).append("\n"); + if (assertion.getIssuer() != null) { + sb.append(" Issuer: ").append(assertion.getIssuer().getValue()).append("\n"); + } + sb.append(" Number of attribute statements: ").append(assertion.getAttributeStatements().size()).append("\n"); + sb.append(" Number of authentication statements: ").append(assertion.getAuthnStatements().size()).append("\n"); + sb.append("]"); + return sb.toString(); + } + return samlObjectToString(object); + } + + @SuppressForbidden(reason = "This is the only allowed way to construct a Transformer") + public static Transformer getHardenedXMLTransformer() throws TransformerConfigurationException { + final TransformerFactory tfactory = TransformerFactory.newInstance(); + tfactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); + tfactory.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, ""); + tfactory.setAttribute(XMLConstants.ACCESS_EXTERNAL_STYLESHEET, ""); + tfactory.setAttribute("indent-number", 2); + Transformer transformer = tfactory.newTransformer(); + transformer.setErrorListener(new ErrorListener()); + return transformer; + } + + static void validate(InputStream xml, String xsdName) throws Exception { + SchemaFactory schemaFactory = SchemaFactory.newInstance(XMLConstants.W3C_XML_SCHEMA_NS_URI); + try (InputStream xsdStream = loadSchema(xsdName); + ResourceResolver resolver = new ResourceResolver()) { + schemaFactory.setResourceResolver(resolver); + Schema schema = schemaFactory.newSchema(new StreamSource(xsdStream)); + Validator validator = schema.newValidator(); + validator.setProperty(XMLConstants.ACCESS_EXTERNAL_DTD, ""); + validator.setProperty(XMLConstants.ACCESS_EXTERNAL_SCHEMA, ""); + validator.validate(new StreamSource(xml)); + } + } + + private static InputStream loadSchema(String name) { + if (name.endsWith(".xsd") && name.indexOf('/') == -1 && name.indexOf('\\') == -1) { + return SamlUtils.class.getResourceAsStream(name); + } else { + return null; + } + } + + private static class ResourceResolver implements LSResourceResolver, AutoCloseable { + private final DOMImplementationLS domLS; + private final List streams; + + private ResourceResolver() throws InstantiationException, IllegalAccessException, ClassNotFoundException { + // Seriously, who thought this was a good idea for an API ??? + DOMImplementationRegistry registry = DOMImplementationRegistry.newInstance(); + domLS = (DOMImplementationLS) registry.getDOMImplementation("LS"); + streams = new ArrayList<>(); + } + + @Override + public LSInput resolveResource(String type, String namespaceURI, String publicId, String systemId, String baseURI) { + InputStream stream = loadSchema(systemId); + if (stream == null) { + return null; + } + streams.add(stream); + final LSInput input = domLS.createLSInput(); + input.setByteStream(stream); + return input; + } + + @Override + public void close() throws IOException { + IOUtils.close(streams); + } + } + + /** + * Constructs a DocumentBuilder with all the necessary features for it to be secure + * + * @throws ParserConfigurationException if one of the features can't be set on the DocumentBuilderFactory + */ + @SuppressForbidden(reason = "This is the only allowed way to construct a DocumentBuilder") + public static DocumentBuilder getHardenedBuilder(String[] schemaFiles) throws ParserConfigurationException { + final DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); + dbf.setNamespaceAware(true); + // Ensure that Schema Validation is enabled for the factory + dbf.setValidating(true); + // Disallow internal and external entity expansion + dbf.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true); + dbf.setFeature("http://xml.org/sax/features/external-general-entities", false); + dbf.setFeature("http://xml.org/sax/features/external-parameter-entities", false); + dbf.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false); + dbf.setFeature("http://xml.org/sax/features/validation", true); + dbf.setFeature("http://apache.org/xml/features/nonvalidating/load-dtd-grammar", false); + dbf.setIgnoringComments(true); + // This is required, otherwise schema validation causes signature invalidation + dbf.setFeature("http://apache.org/xml/features/validation/schema/normalized-value", false); + // Make sure that URL schema namespaces are not resolved/downloaded from URLs we do not control + dbf.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, "file,jar"); + dbf.setAttribute(XMLConstants.ACCESS_EXTERNAL_SCHEMA, "file,jar"); + dbf.setFeature("http://apache.org/xml/features/honour-all-schemaLocations", true); + // Ensure we do not resolve XIncludes. Defaults to false, but set it explicitly to be future-proof + dbf.setXIncludeAware(false); + // Ensure we do not expand entity reference nodes + dbf.setExpandEntityReferences(false); + // Further limit danger from denial of service attacks + dbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); + dbf.setAttribute("http://apache.org/xml/features/validation/schema", true); + dbf.setAttribute("http://apache.org/xml/features/validation/schema-full-checking", true); + dbf.setAttribute("http://java.sun.com/xml/jaxp/properties/schemaLanguage", + XMLConstants.W3C_XML_SCHEMA_NS_URI); + // We ship our own xsd files for schema validation since we do not trust anyone else. + dbf.setAttribute("http://java.sun.com/xml/jaxp/properties/schemaSource", resolveSchemaFilePaths(schemaFiles)); + DocumentBuilder documentBuilder = dbf.newDocumentBuilder(); + documentBuilder.setErrorHandler(new ErrorHandler()); + return documentBuilder; + } + + private static String[] resolveSchemaFilePaths(String[] relativePaths) { + + return Arrays.stream(relativePaths). + map(file -> { + try { + return SamlUtils.class.getResource(file).toURI().toString(); + } catch (URISyntaxException e) { + LOGGER.warn("Error resolving schema file path", e); + return null; + } + }).filter(Objects::nonNull).toArray(String[]::new); + } + + private static class ErrorListener implements javax.xml.transform.ErrorListener { + + @Override + public void warning(TransformerException e) throws TransformerException { + LOGGER.debug("XML transformation error", e); + throw e; + } + + @Override + public void error(TransformerException e) throws TransformerException { + LOGGER.debug("XML transformation error", e); + throw e; + } + + @Override + public void fatalError(TransformerException e) throws TransformerException { + LOGGER.debug("XML transformation error", e); + throw e; + } + } + + private static class ErrorHandler implements org.xml.sax.ErrorHandler { + /** + * Enabling schema validation with `setValidating(true)` in our + * DocumentBuilderFactory requires that we provide our own + * ErrorHandler implementation + * + * @throws SAXException If the document we attempt to parse is not valid according to the specified schema. + */ + @Override + public void warning(SAXParseException e) throws SAXException { + LOGGER.debug("XML Parser error ", e); + throw e; + } + + @Override + public void error(SAXParseException e) throws SAXException { + LOGGER.debug("XML Parser error ", e); + throw e; + } + + @Override + public void fatalError(SAXParseException e) throws SAXException { + LOGGER.debug("XML Parser error ", e); + throw e; + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SigningConfiguration.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SigningConfiguration.java new file mode 100644 index 0000000000000..349d72d5369e8 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SigningConfiguration.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.nio.charset.StandardCharsets; +import java.util.Set; + +import org.opensaml.saml.common.SAMLObject; +import org.opensaml.security.SecurityException; +import org.opensaml.security.x509.X509Credential; +import org.opensaml.xmlsec.crypto.XMLSigningUtil; + +/** + * Encapsulates the rules and credentials for how and when Elasticsearch should sign outgoing SAML messages. + */ +class SigningConfiguration { + + private final Set messageTypes; + private final X509Credential credential; + + SigningConfiguration(Set messageTypes, X509Credential credential) { + this.messageTypes = messageTypes; + this.credential = credential; + } + + boolean shouldSign(SAMLObject object) { + return shouldSign(object.getElementQName().getLocalPart()); + } + + boolean shouldSign(String elementName) { + if (credential == null) { + return false; + } + return messageTypes.contains(elementName) || messageTypes.contains("*"); + } + + byte[] sign(String text, String algo) throws SecurityException { + return sign(text.getBytes(StandardCharsets.UTF_8), algo); + } + + byte[] sign(byte[] content, String algo) throws SecurityException { + return XMLSigningUtil.signWithURI(this.credential, algo, content); + } + + X509Credential getCredential() { + return credential; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SpConfiguration.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SpConfiguration.java new file mode 100644 index 0000000000000..984deb0a6938f --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SpConfiguration.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import org.elasticsearch.common.Nullable; +import org.opensaml.security.x509.X509Credential; + +import java.util.Collections; +import java.util.List; + +/** + * A simple container class that holds all configuration related to a SAML Service Provider (SP). + */ +public class SpConfiguration { + + private final String entityId; + private final String ascUrl; + private final String logoutUrl; + private final SigningConfiguration signingConfiguration; + private final List encryptionCredentials; + + public SpConfiguration(final String entityId, final String ascUrl, final String logoutUrl, + final SigningConfiguration signingConfiguration, @Nullable final List encryptionCredential) { + this.entityId = entityId; + this.ascUrl = ascUrl; + this.logoutUrl = logoutUrl; + this.signingConfiguration = signingConfiguration; + if (encryptionCredential != null) { + this.encryptionCredentials = Collections.unmodifiableList(encryptionCredential); + } else { + this.encryptionCredentials = Collections.emptyList(); + } + } + + /** + * The SAML identifier (as a URI) for the Sp + */ + String getEntityId() { + return entityId; + } + + String getAscUrl() { + return ascUrl; + } + + String getLogoutUrl() { + return logoutUrl; + } + + List getEncryptionCredentials() { + return encryptionCredentials; + } + + SigningConfiguration getSigningConfiguration() { + return signingConfiguration; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingRealm.java new file mode 100644 index 0000000000000..4c18ac2df6d6e --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingRealm.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.xpack.core.security.authc.Realm; + +/** + * This interface allows a {@link Realm} to indicate that it supports caching user credentials + * and expose the ability to clear the cache for a given String identifier or all of the cache + */ +public interface CachingRealm { + + /** + * Expires a single user from the cache identified by the String agument + * @param username the identifier of the user to be cleared + */ + void expire(String username); + + /** + * Expires all of the data that has been cached in this realm + */ + void expireAll(); +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java new file mode 100644 index 0000000000000..e5a90c0855fdc --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java @@ -0,0 +1,213 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ExecutionException; + +public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm implements CachingRealm { + + private final Cache cache; + final Hasher hasher; + + protected CachingUsernamePasswordRealm(String type, RealmConfig config) { + super(type, config); + hasher = Hasher.resolve(CachingUsernamePasswordRealmSettings.CACHE_HASH_ALGO_SETTING.get(config.settings()), Hasher.SSHA256); + TimeValue ttl = CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.get(config.settings()); + if (ttl.getNanos() > 0) { + cache = CacheBuilder.builder() + .setExpireAfterWrite(ttl) + .setMaximumWeight(CachingUsernamePasswordRealmSettings.CACHE_MAX_USERS_SETTING.get(config.settings())) + .build(); + } else { + cache = null; + } + } + + public final void expire(String username) { + if (cache != null) { + logger.trace("invalidating cache for user [{}] in realm [{}]", username, name()); + cache.invalidate(username); + } + } + + public final void expireAll() { + if (cache != null) { + logger.trace("invalidating cache for all users in realm [{}]", name()); + cache.invalidateAll(); + } + } + + /** + * If the user exists in the cache (keyed by the principle name), then the password is validated + * against a hash also stored in the cache. Otherwise the subclass authenticates the user via + * doAuthenticate + * @param authToken The authentication token + * @param listener to be called at completion + */ + @Override + public final void authenticate(AuthenticationToken authToken, ActionListener listener) { + UsernamePasswordToken token = (UsernamePasswordToken) authToken; + try { + if (cache == null) { + doAuthenticate(token, listener); + } else { + authenticateWithCache(token, listener); + } + } catch (Exception e) { + // each realm should handle exceptions, if we get one here it should be considered fatal + listener.onFailure(e); + } + } + + private void authenticateWithCache(UsernamePasswordToken token, ActionListener listener) { + UserWithHash userWithHash = cache.get(token.principal()); + if (userWithHash == null) { + if (logger.isDebugEnabled()) { + logger.debug("user [{}] not found in cache for realm [{}], proceeding with normal authentication", + token.principal(), name()); + } + doAuthenticateAndCache(token, ActionListener.wrap((result) -> { + if (result.isAuthenticated()) { + final User user = result.getUser(); + logger.debug("realm [{}] authenticated user [{}], with roles [{}]", name(), token.principal(), user.roles()); + } + listener.onResponse(result); + }, listener::onFailure)); + } else if (userWithHash.hasHash()) { + if (userWithHash.verify(token.credentials())) { + if (userWithHash.user.enabled()) { + User user = userWithHash.user; + logger.debug("realm [{}] authenticated user [{}], with roles [{}]", name(), token.principal(), user.roles()); + listener.onResponse(AuthenticationResult.success(user)); + } else { + // We successfully authenticated, but the cached user is disabled. + // Reload the primary record to check whether the user is still disabled + cache.invalidate(token.principal()); + doAuthenticateAndCache(token, ActionListener.wrap((result) -> { + if (result.isAuthenticated()) { + final User user = result.getUser(); + logger.debug("realm [{}] authenticated user [{}] (enabled:{}), with roles [{}]", name(), token.principal(), + user.enabled(), user.roles()); + } + listener.onResponse(result); + }, listener::onFailure)); + } + } else { + cache.invalidate(token.principal()); + doAuthenticateAndCache(token, ActionListener.wrap((result) -> { + if (result.isAuthenticated()) { + final User user = result.getUser(); + logger.debug("cached user's password changed. realm [{}] authenticated user [{}], with roles [{}]", + name(), token.principal(), user.roles()); + } + listener.onResponse(result); + }, listener::onFailure)); + } + } else { + cache.invalidate(token.principal()); + doAuthenticateAndCache(token, ActionListener.wrap((result) -> { + if (result.isAuthenticated()) { + final User user = result.getUser(); + logger.debug("cached user came from a lookup and could not be used for authentication. " + + "realm [{}] authenticated user [{}] with roles [{}]", name(), token.principal(), user.roles()); + } + listener.onResponse(result); + }, listener::onFailure)); + } + } + + private void doAuthenticateAndCache(UsernamePasswordToken token, ActionListener listener) { + ActionListener wrapped = ActionListener.wrap((result) -> { + Objects.requireNonNull(result, "AuthenticationResult cannot be null"); + if (result.getStatus() == AuthenticationResult.Status.SUCCESS) { + UserWithHash userWithHash = new UserWithHash(result.getUser(), token.credentials(), hasher); + // it doesn't matter if we already computed it elsewhere + cache.put(token.principal(), userWithHash); + } + listener.onResponse(result); + }, listener::onFailure); + + doAuthenticate(token, wrapped); + } + + @Override + public Map usageStats() { + Map stats = super.usageStats(); + stats.put("size", cache.count()); + return stats; + } + + protected abstract void doAuthenticate(UsernamePasswordToken token, ActionListener listener); + + @Override + public final void lookupUser(String username, ActionListener listener) { + if (cache != null) { + UserWithHash withHash = cache.get(username); + if (withHash == null) { + try { + doLookupUser(username, ActionListener.wrap((user) -> { + Runnable action = () -> listener.onResponse(null); + if (user != null) { + UserWithHash userWithHash = new UserWithHash(user, null, null); + try { + // computeIfAbsent is used here to avoid overwriting a value from a concurrent authenticate call as it + // contains the password hash, which provides a performance boost and we shouldn't just erase that + cache.computeIfAbsent(username, (n) -> userWithHash); + action = () -> listener.onResponse(userWithHash.user); + } catch (ExecutionException e) { + action = () -> listener.onFailure(e); + } + } + action.run(); + }, listener::onFailure)); + } catch (Exception e) { + listener.onFailure(e); + } + } else { + listener.onResponse(withHash.user); + } + } else { + doLookupUser(username, listener); + } + } + + protected abstract void doLookupUser(String username, ActionListener listener); + + private static class UserWithHash { + User user; + char[] hash; + Hasher hasher; + + UserWithHash(User user, SecureString password, Hasher hasher) { + this.user = user; + this.hash = password == null ? null : hasher.hash(password); + this.hasher = hasher; + } + + boolean verify(SecureString password) { + return hash != null && hasher.verify(password, hash); + } + + boolean hasHash() { + return hash != null; + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapper.java new file mode 100644 index 0000000000000..6516b02f68d0b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapper.java @@ -0,0 +1,229 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; + +import com.unboundid.ldap.sdk.DN; +import com.unboundid.ldap.sdk.LDAPException; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.env.Environment; +import org.elasticsearch.watcher.FileChangesListener; +import org.elasticsearch.watcher.FileWatcher; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.dn; +import static org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils.relativeName; + +/** + * This class loads and monitors the file defining the mappings of DNs to internal ES Roles. + */ +public class DnRoleMapper implements UserRoleMapper { + + protected final Logger logger; + protected final RealmConfig config; + + private final Path file; + private final boolean useUnmappedGroupsAsRoles; + private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); + private volatile Map> dnRoles; + + public DnRoleMapper(RealmConfig config, ResourceWatcherService watcherService) { + this.config = config; + this.logger = config.logger(getClass()); + + useUnmappedGroupsAsRoles = DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING.get(config.settings()); + file = resolveFile(config.settings(), config.env()); + dnRoles = parseFileLenient(file, logger, config.type(), config.name()); + FileWatcher watcher = new FileWatcher(file.getParent()); + watcher.addListener(new FileListener()); + try { + watcherService.add(watcher, ResourceWatcherService.Frequency.HIGH); + } catch (IOException e) { + throw new ElasticsearchException("failed to start file watcher for role mapping file [" + file.toAbsolutePath() + "]", e); + } + } + + @Override + public void refreshRealmOnChange(CachingUsernamePasswordRealm realm) { + addListener(realm::expireAll); + } + + synchronized void addListener(Runnable listener) { + listeners.add(Objects.requireNonNull(listener, "listener cannot be null")); + } + + public static Path resolveFile(Settings settings, Environment env) { + String location = DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.get(settings); + return XPackPlugin.resolveConfigFile(env, location); + } + + /** + * Internally in this class, we try to load the file, but if for some reason we can't, we're being more lenient by + * logging the error and skipping/removing all mappings. This is aligned with how we handle other auto-loaded files + * in security. + */ + public static Map> parseFileLenient(Path path, Logger logger, String realmType, String realmName) { + try { + return parseFile(path, logger, realmType, realmName, false); + } catch (Exception e) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "failed to parse role mappings file [{}]. skipping/removing all mappings...", path.toAbsolutePath()), e); + return emptyMap(); + } + } + + public static Map> parseFile(Path path, Logger logger, String realmType, String realmName, boolean strict) { + + logger.trace("reading realm [{}/{}] role mappings file [{}]...", realmType, realmName, path.toAbsolutePath()); + + if (Files.exists(path) == false) { + final ParameterizedMessage message = new ParameterizedMessage( + "Role mapping file [{}] for realm [{}] does not exist.", + path.toAbsolutePath(), realmName); + if (strict) { + throw new ElasticsearchException(message.getFormattedMessage()); + } else { + logger.warn(message.getFormattedMessage() + " Role mapping will be skipped."); + return emptyMap(); + } + } + + try { + Settings settings = Settings.builder().loadFromPath(path).build(); + + Map> dnToRoles = new HashMap<>(); + Set roles = settings.names(); + for (String role : roles) { + for (String providedDn : settings.getAsList(role)) { + try { + DN dn = new DN(providedDn); + Set dnRoles = dnToRoles.get(dn); + if (dnRoles == null) { + dnRoles = new HashSet<>(); + dnToRoles.put(dn, dnRoles); + } + dnRoles.add(role); + } catch (LDAPException e) { + ParameterizedMessage message = new ParameterizedMessage( + "invalid DN [{}] found in [{}] role mappings [{}] for realm [{}/{}].", + providedDn, + realmType, + path.toAbsolutePath(), + realmType, + realmName); + if (strict) { + throw new ElasticsearchException(message.getFormattedMessage(), e); + } else { + logger.error(message.getFormattedMessage() + " skipping...", e); + } + } + } + + } + + logger.debug("[{}] role mappings found in file [{}] for realm [{}/{}]", dnToRoles.size(), path.toAbsolutePath(), realmType, + realmName); + return unmodifiableMap(dnToRoles); + } catch (IOException | SettingsException e) { + throw new ElasticsearchException("could not read realm [" + realmType + "/" + realmName + "] role mappings file [" + + path.toAbsolutePath() + "]", e); + } + } + + int mappingsCount() { + return dnRoles.size(); + } + + @Override + public void resolveRoles(UserData user, ActionListener> listener) { + try { + listener.onResponse(resolveRoles(user.getDn(), user.getGroups())); + } catch (Exception e) { + listener.onFailure(e); + } + } + + /** + * This will map the groupDN's to ES Roles + */ + public Set resolveRoles(String userDnString, Collection groupDns) { + Set roles = new HashSet<>(); + for (String groupDnString : groupDns) { + DN groupDn = dn(groupDnString); + if (dnRoles.containsKey(groupDn)) { + roles.addAll(dnRoles.get(groupDn)); + } else if (useUnmappedGroupsAsRoles) { + roles.add(relativeName(groupDn)); + } + } + if (logger.isDebugEnabled()) { + logger.debug("the roles [{}], are mapped from these [{}] groups [{}] using file [{}] for realm [{}/{}]", roles, config.type(), + groupDns, file.getFileName(), config.type(), config.name()); + } + + DN userDn = dn(userDnString); + Set rolesMappedToUserDn = dnRoles.get(userDn); + if (rolesMappedToUserDn != null) { + roles.addAll(rolesMappedToUserDn); + } + if (logger.isDebugEnabled()) { + logger.debug("the roles [{}], are mapped from the user [{}] using file [{}] for realm [{}/{}]", + (rolesMappedToUserDn == null) ? Collections.emptySet() : rolesMappedToUserDn, userDnString, file.getFileName(), + config.type(), config.name()); + } + return roles; + } + + public void notifyRefresh() { + listeners.forEach(Runnable::run); + } + + private class FileListener implements FileChangesListener { + @Override + public void onFileCreated(Path file) { + onFileChanged(file); + } + + @Override + public void onFileDeleted(Path file) { + onFileChanged(file); + } + + @Override + public void onFileChanged(Path file) { + if (file.equals(DnRoleMapper.this.file)) { + logger.info("role mappings file [{}] changed for realm [{}/{}]. updating mappings...", file.toAbsolutePath(), + config.type(), config.name()); + dnRoles = parseFileLenient(file, logger, config.type(), config.name()); + notifyRefresh(); + } + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheck.java new file mode 100644 index 0000000000000..c4193c1921946 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheck.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import java.nio.file.Path; + +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; + +/** + * A BootstrapCheck that {@link DnRoleMapper} files exist and are valid (valid YAML and valid DNs) + */ +public class RoleMappingFileBootstrapCheck implements BootstrapCheck { + + private final RealmConfig realmConfig; + private final Path path; + + RoleMappingFileBootstrapCheck(RealmConfig config, Path path) { + this.realmConfig = config; + this.path = path; + } + + @Override + public BootstrapCheckResult check(BootstrapContext context) { + try { + DnRoleMapper.parseFile(path, realmConfig.logger(getClass()), realmConfig.type(), realmConfig.name(), true); + return BootstrapCheckResult.success(); + } catch (Exception e) { + return BootstrapCheckResult.failure(e.getMessage()); + } + + } + + @Override + public boolean alwaysEnforce() { + return true; + } + + public static BootstrapCheck create(RealmConfig realmConfig) { + if (realmConfig.enabled() && DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.exists(realmConfig.settings())) { + Path file = DnRoleMapper.resolveFile(realmConfig.settings(), realmConfig.env()); + return new RoleMappingFileBootstrapCheck(realmConfig, file); + } + return null; + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java new file mode 100644 index 0000000000000..ffdab15e3b507 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UserRoleMapper.java @@ -0,0 +1,246 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import com.unboundid.ldap.sdk.DN; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.util.LDAPSDKUsageException; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; +import org.elasticsearch.xpack.core.security.authz.permission.Role; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.function.Predicate; + +/** + * Where a realm users an authentication method that does not have in-built support for X-Pack + * {@link Role roles}, it may delegate to an implementation of this class the + * responsibility for determining the set roles that an authenticated user should have. + */ +public interface UserRoleMapper { + /** + * Determines the set of roles that should be applied to user. + */ + void resolveRoles(UserData user, ActionListener> listener); + + /** + * Informs the mapper that the provided realm should be refreshed when + * the set of role-mappings change. The realm may be updated for the local node only, or across + * the whole cluster depending on whether this role-mapper has node-local data or cluster-wide + * data. + */ + void refreshRealmOnChange(CachingUsernamePasswordRealm realm); + + /** + * A representation of a user for whom roles should be mapped. + * The user has been authenticated, but does not yet have any roles. + */ + class UserData { + private final String username; + @Nullable + private final String dn; + private final Set groups; + private final Map metadata; + private final RealmConfig realm; + + public UserData(String username, @Nullable String dn, Collection groups, + Map metadata, RealmConfig realm) { + this.username = username; + this.dn = dn; + this.groups = groups == null || groups.isEmpty() + ? Collections.emptySet() : Collections.unmodifiableSet(new HashSet<>(groups)); + this.metadata = metadata == null || metadata.isEmpty() + ? Collections.emptyMap() : Collections.unmodifiableMap(metadata); + this.realm = realm; + } + + /** + * Formats the user data as a {@link ExpressionModel}. + * The model does not have nested values - all values are simple Java values, but keys may + * contain .. + * For example, the {@link #metadata} values will be stored in the model with a key of + * "metadata.KEY" where KEY is the key from the metadata object. + */ + public ExpressionModel asModel() { + final ExpressionModel model = new ExpressionModel(); + model.defineField("username", username); + model.defineField("dn", dn, new DistinguishedNamePredicate(dn)); + model.defineField("groups", groups, groups.stream() + .>map(DistinguishedNamePredicate::new) + .reduce(Predicate::or) + .orElse(fieldValue -> false) + ); + metadata.keySet().forEach(k -> model.defineField("metadata." + k, metadata.get(k))); + model.defineField("realm.name", realm.name()); + return model; + } + + @Override + public String toString() { + return "UserData{" + + "username:" + username + + "; dn:" + dn + + "; groups:" + groups + + "; metadata:" + metadata + + "; realm=" + realm.name() + + '}'; + } + + /** + * The username for the authenticated user. + */ + public String getUsername() { + return username; + } + + /** + * The distinguished name of the authenticated user, if applicable to the + * authentication method used. Otherwise, null. + */ + @Nullable + public String getDn() { + return dn; + } + + /** + * The groups to which the user belongs in the originating user store. Should be empty + * if the user store or authentication method does not support groups. + */ + public Set getGroups() { + return groups; + } + + /** + * Any additional metadata that was provided at authentication time. The set of keys will + * vary according to the authenticating realm. + */ + public Map getMetadata() { + return metadata; + } + + /** + * The realm that authenticated the user. + */ + public RealmConfig getRealm() { + return realm; + } + } + + /** + * A specialised predicate for fields that might be a DistinguishedName (e.g "dn" or "groups"). + * + * The X500 specs define how to compare DistinguishedNames (but we mostly rely on {@link DN#equals(Object)}), + * which means "CN=me,DC=example,DC=com" should be equal to "cn=me, dc=Example, dc=COM" (and other variations). + + * The {@link FieldExpression} class doesn't know about special rules for special data types, but the + * {@link ExpressionModel} class can take a custom {@code Predicate} that tests whether the data in the model + * matches the {@link FieldExpression.FieldValue value} in the expression. + * + * The string constructor parameter may or may not actaully parse as a DN - the "dn" field should + * always be a DN, however groups will be a DN if they're from an LDAP/AD realm, but often won't be for a SAML realm. + * + * Because the {@link FieldExpression.FieldValue} might be a pattern ({@link CharacterRunAutomaton automaton}), + * we sometimes need to do more complex matching than just comparing a DN for equality. + * + */ + class DistinguishedNamePredicate implements Predicate { + private static final Logger LOGGER = Loggers.getLogger(DistinguishedNamePredicate.class); + + private final String string; + private final DN dn; + + public DistinguishedNamePredicate(String string) { + this.string = string; + this.dn = parseDn(string); + } + + private static DN parseDn(String string) { + if (string == null) { + return null; + } else { + try { + return new DN(string); + } catch (LDAPException | LDAPSDKUsageException e) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(new ParameterizedMessage("failed to parse [{}] as a DN", string), e); + } + return null; + } + } + } + + @Override + public String toString() { + return string; + } + + @Override + public boolean test(FieldExpression.FieldValue fieldValue) { + final CharacterRunAutomaton automaton = fieldValue.getAutomaton(); + if (automaton != null) { + if (automaton.run(string)) { + return true; + } + if (dn != null && automaton.run(dn.toNormalizedString())) { + return true; + } + if (automaton.run(string.toLowerCase(Locale.ROOT)) || automaton.run(string.toUpperCase(Locale.ROOT))) { + return true; + } + if (dn == null) { + return false; + } + + assert fieldValue.getValue() instanceof String : "FieldValue " + fieldValue + " has automaton but value is " + + (fieldValue.getValue() == null ? "" : fieldValue.getValue().getClass()); + String pattern = (String) fieldValue.getValue(); + + // If the pattern is "*,dc=example,dc=com" then the rule is actually trying to express a DN sub-tree match. + // We can use dn.isDescendantOf for that + if (pattern.startsWith("*,")) { + final String suffix = pattern.substring(2); + // if the suffix has a wildcard, then it's not a pure sub-tree match + if (suffix.indexOf('*') == -1) { + final DN dnSuffix = parseDn(suffix); + if (dnSuffix != null && dn.isDescendantOf(dnSuffix, false)) { + return true; + } + } + } + + return false; + } + if (fieldValue.getValue() instanceof String) { + final String testString = (String) fieldValue.getValue(); + if (testString.equalsIgnoreCase(string)) { + return true; + } + if (dn == null) { + return false; + } + + final DN testDn = parseDn(testString); + if (testDn != null) { + return dn.equals(testDn); + } + return testString.equalsIgnoreCase(dn.toNormalizedString()); + } + return string == null && fieldValue.getValue() == null; + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UsernamePasswordRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UsernamePasswordRealm.java new file mode 100644 index 0000000000000..b50cba349dc56 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/UsernamePasswordRealm.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; + +abstract class UsernamePasswordRealm extends Realm { + + UsernamePasswordRealm(String type, RealmConfig config) { + super(type, config); + } + + @Override + public UsernamePasswordToken token(ThreadContext threadContext) { + return UsernamePasswordToken.extractToken(threadContext); + } + + public boolean supports(AuthenticationToken token) { + return token instanceof UsernamePasswordToken; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java new file mode 100644 index 0000000000000..0814469cfcea7 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support.mapper; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; +import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; +import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; + +/** + * A {@link UserRoleMapper} that composes one or more delegate role-mappers. + * During {@link #resolveRoles(UserData, ActionListener) role resolution}, each of the delegates is + * queried, and the individual results are merged into a single {@link Set} which includes all the roles from each mapper. + */ +public class CompositeRoleMapper implements UserRoleMapper { + + private List delegates; + + public CompositeRoleMapper(String realmType, RealmConfig realmConfig, + ResourceWatcherService watcherService, + NativeRoleMappingStore nativeRoleMappingStore) { + this(new DnRoleMapper(realmConfig, watcherService), nativeRoleMappingStore); + } + + private CompositeRoleMapper(UserRoleMapper... delegates) { + this.delegates = new ArrayList<>(Arrays.asList(delegates)); + } + + @Override + public void resolveRoles(UserData user, ActionListener> listener) { + GroupedActionListener> groupListener = new GroupedActionListener<>(ActionListener.wrap( + composite -> listener.onResponse(composite.stream().flatMap(Set::stream).collect(Collectors.toSet())), listener::onFailure + ), delegates.size(), Collections.emptyList()); + this.delegates.forEach(mapper -> mapper.resolveRoles(user, groupListener)); + } + + @Override + public void refreshRealmOnChange(CachingUsernamePasswordRealm realm) { + this.delegates.forEach(mapper -> mapper.refreshRealmOnChange(realm)); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java new file mode 100644 index 0000000000000..0fcaf297c0f59 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -0,0 +1,380 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support.mapper; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.core.security.ScrollHelper; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; +import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.action.DocWriteResponse.Result.CREATED; +import static org.elasticsearch.action.DocWriteResponse.Result.DELETED; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.isIndexDeleted; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.isMoveFromRedToNonRed; + +/** + * This store reads + writes {@link ExpressionRoleMapping role mappings} in an Elasticsearch + * {@link SecurityLifecycleService#SECURITY_INDEX_NAME index}. + *
+ * The store is responsible for all read and write operations as well as + * {@link #resolveRoles(UserData, ActionListener) resolving roles}. + *

+ * No caching is done by this class, it is handled at a higher level and no polling for changes + * is done by this class. Modification operations make a best effort attempt to clear the cache + * on all nodes for the user that was modified. + */ +public class NativeRoleMappingStore extends AbstractComponent implements UserRoleMapper { + + static final String DOC_TYPE_FIELD = "doc_type"; + static final String DOC_TYPE_ROLE_MAPPING = "role-mapping"; + + private static final String ID_PREFIX = DOC_TYPE_ROLE_MAPPING + "_"; + + private static final String SECURITY_GENERIC_TYPE = "doc"; + + private static final ActionListener NO_OP_ACTION_LISTENER = new ActionListener() { + @Override + public void onResponse(Object o) { + // nothing + } + + @Override + public void onFailure(Exception e) { + // nothing + } + }; + + private final Client client; + private final SecurityLifecycleService securityLifecycleService; + private final List realmsToRefresh = new CopyOnWriteArrayList<>(); + + public NativeRoleMappingStore(Settings settings, Client client, SecurityLifecycleService securityLifecycleService) { + super(settings); + this.client = client; + this.securityLifecycleService = securityLifecycleService; + } + + private String getNameFromId(String id) { + assert id.startsWith(ID_PREFIX); + return id.substring(ID_PREFIX.length()); + } + + private String getIdForName(String name) { + return ID_PREFIX + name; + } + + /** + * Loads all mappings from the index. + * package private for unit testing + */ + void loadMappings(ActionListener> listener) { + if (securityLifecycleService.isSecurityIndexOutOfDate()) { + listener.onFailure(new IllegalStateException( + "Security index is not on the current version - the native realm will not be operational until " + + "the upgrade API is run on the security index")); + return; + } + final QueryBuilder query = QueryBuilders.termQuery(DOC_TYPE_FIELD, DOC_TYPE_ROLE_MAPPING); + final Supplier supplier = client.threadPool().getThreadContext().newRestorableContext(false); + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN)) { + SearchRequest request = client.prepareSearch(SECURITY_INDEX_NAME) + .setScroll(TimeValue.timeValueSeconds(10L)) + .setTypes(SECURITY_GENERIC_TYPE) + .setQuery(query) + .setSize(1000) + .setFetchSource(true) + .request(); + request.indicesOptions().ignoreUnavailable(); + ScrollHelper.fetchAllByEntity(client, request, + new ContextPreservingActionListener<>(supplier, ActionListener.wrap((Collection mappings) -> + listener.onResponse(mappings.stream().filter(Objects::nonNull).collect(Collectors.toList())), + ex -> { + logger.error(new ParameterizedMessage("failed to load role mappings from index [{}] skipping all mappings.", + SECURITY_INDEX_NAME), ex); + listener.onResponse(Collections.emptyList()); + })), + doc -> buildMapping(getNameFromId(doc.getId()), doc.getSourceRef())); + } + } + + private ExpressionRoleMapping buildMapping(String id, BytesReference source) { + try (InputStream stream = source.streamInput(); + XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + return ExpressionRoleMapping.parse(id, parser); + } catch (Exception e) { + logger.warn(new ParameterizedMessage("Role mapping [{}] cannot be parsed and will be skipped", id), e); + return null; + } + } + + /** + * Stores (create or update) a single mapping in the index + */ + public void putRoleMapping(PutRoleMappingRequest request, ActionListener listener) { + modifyMapping(request.getName(), this::innerPutMapping, request, listener); + } + + /** + * Deletes a named mapping from the index + */ + public void deleteRoleMapping(DeleteRoleMappingRequest request, ActionListener listener) { + modifyMapping(request.getName(), this::innerDeleteMapping, request, listener); + } + + private void modifyMapping(String name, CheckedBiConsumer, Exception> inner, + Request request, ActionListener listener) { + if (securityLifecycleService.isSecurityIndexOutOfDate()) { + listener.onFailure(new IllegalStateException( + "Security index is not on the current version - the native realm will not be operational until " + + "the upgrade API is run on the security index")); + } else { + try { + inner.accept(request, ActionListener.wrap(r -> refreshRealms(listener, r), listener::onFailure)); + } catch (Exception e) { + logger.error(new ParameterizedMessage("failed to modify role-mapping [{}]", name), e); + listener.onFailure(e); + } + } + } + + private void innerPutMapping(PutRoleMappingRequest request, ActionListener listener) { + final ExpressionRoleMapping mapping = request.getMapping(); + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + final XContentBuilder xContentBuilder; + try { + xContentBuilder = mapping.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS, true); + } catch (IOException e) { + listener.onFailure(e); + return; + } + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareIndex(SECURITY_INDEX_NAME, SECURITY_GENERIC_TYPE, getIdForName(mapping.getName())) + .setSource(xContentBuilder) + .setRefreshPolicy(request.getRefreshPolicy()) + .request(), + new ActionListener() { + @Override + public void onResponse(IndexResponse indexResponse) { + boolean created = indexResponse.getResult() == CREATED; + listener.onResponse(created); + } + + @Override + public void onFailure(Exception e) { + logger.error(new ParameterizedMessage("failed to put role-mapping [{}]", mapping.getName()), e); + listener.onFailure(e); + } + }, client::index); + }); + } + + private void innerDeleteMapping(DeleteRoleMappingRequest request, ActionListener listener) throws IOException { + if (securityLifecycleService.isSecurityIndexOutOfDate()) { + listener.onFailure(new IllegalStateException( + "Security index is not on the current version - the native realm will not be operational until " + + "the upgrade API is run on the security index")); + return; + } + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareDelete(SECURITY_INDEX_NAME, SECURITY_GENERIC_TYPE, getIdForName(request.getName())) + .setRefreshPolicy(request.getRefreshPolicy()) + .request(), + new ActionListener() { + + @Override + public void onResponse(DeleteResponse deleteResponse) { + boolean deleted = deleteResponse.getResult() == DELETED; + listener.onResponse(deleted); + } + + @Override + public void onFailure(Exception e) { + logger.error(new ParameterizedMessage("failed to delete role-mapping [{}]", request.getName()), e); + listener.onFailure(e); + + } + }, client::delete); + } + + /** + * Retrieves one or more mappings from the index. + * If names is null or {@link Set#isEmpty empty}, then this retrieves all mappings. + * Otherwise it retrieves the specified mappings by name. + */ + public void getRoleMappings(Set names, ActionListener> listener) { + if (names == null || names.isEmpty()) { + getMappings(listener); + } else { + getMappings(new ActionListener>() { + @Override + public void onResponse(List mappings) { + final List filtered = mappings.stream() + .filter(m -> names.contains(m.getName())) + .collect(Collectors.toList()); + listener.onResponse(filtered); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + } + + private void getMappings(ActionListener> listener) { + if (securityLifecycleService.isSecurityIndexAvailable()) { + loadMappings(listener); + } else { + logger.info("The security index is not yet available - no role mappings can be loaded"); + if (logger.isDebugEnabled()) { + logger.debug("Security Index [{}] [exists: {}] [available: {}] [mapping up to date: {}]", + SECURITY_INDEX_NAME, + securityLifecycleService.isSecurityIndexExisting(), + securityLifecycleService.isSecurityIndexAvailable(), + securityLifecycleService.isSecurityIndexMappingUpToDate() + ); + } + listener.onResponse(Collections.emptyList()); + } + } + + /** + * Provides usage statistics for this store. + * The resulting map contains the keys + *
    + *
  • size - The total number of mappings stored in the index
  • + *
  • enabled - The number of mappings that are + * {@link ExpressionRoleMapping#isEnabled() enabled}
  • + *
+ */ + public void usageStats(ActionListener> listener) { + if (securityLifecycleService.isSecurityIndexExisting() == false) { + reportStats(listener, Collections.emptyList()); + } else { + getMappings(ActionListener.wrap(mappings -> reportStats(listener, mappings), listener::onFailure)); + } + } + + private void reportStats(ActionListener> listener, List mappings) { + Map usageStats = new HashMap<>(); + usageStats.put("size", mappings.size()); + usageStats.put("enabled", mappings.stream().filter(ExpressionRoleMapping::isEnabled).count()); + listener.onResponse(usageStats); + } + + public void onSecurityIndexHealthChange(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) { + if (isMoveFromRedToNonRed(previousHealth, currentHealth) || isIndexDeleted(previousHealth, currentHealth)) { + refreshRealms(NO_OP_ACTION_LISTENER, null); + } + } + + public void onSecurityIndexOutOfDateChange(boolean prevOutOfDate, boolean outOfDate) { + assert prevOutOfDate != outOfDate : "this method should only be called if the two values are different"; + refreshRealms(NO_OP_ACTION_LISTENER, null); + } + + private void refreshRealms(ActionListener listener, Result result) { + String[] realmNames = this.realmsToRefresh.toArray(new String[realmsToRefresh.size()]); + final SecurityClient securityClient = new SecurityClient(client); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + securityClient.prepareClearRealmCache().realms(realmNames).request(), + ActionListener.wrap( + response -> { + logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + "Cleared cached in realms [{}] due to role mapping change", Arrays.toString(realmNames))); + listener.onResponse(result); + }, + ex -> { + logger.warn("Failed to clear cache for realms [{}]", Arrays.toString(realmNames)); + listener.onFailure(ex); + }), + securityClient::clearRealmCache); + } + + @Override + public void resolveRoles(UserData user, ActionListener> listener) { + getRoleMappings(null, ActionListener.wrap( + mappings -> { + final ExpressionModel model = user.asModel(); + Stream stream = mappings.stream() + .filter(ExpressionRoleMapping::isEnabled) + .filter(m -> m.getExpression().match(model)); + if (logger.isTraceEnabled()) { + stream = stream.map(m -> { + logger.trace("User [{}] matches role-mapping [{}] with roles [{}]", user.getUsername(), m.getName(), + m.getRoles()); + return m; + }); + } + final Set roles = stream.flatMap(m -> m.getRoles().stream()).collect(Collectors.toSet()); + logger.debug("Mapping user [{}] to roles [{}]", user, roles); + listener.onResponse(roles); + }, listener::onFailure + )); + } + + /** + * Indicates that the provided realm should have its cache cleared if this store is updated + * (that is, {@link #putRoleMapping(PutRoleMappingRequest, ActionListener)} or + * {@link #deleteRoleMapping(DeleteRoleMappingRequest, ActionListener)} are called). + * @see ClearRealmCacheAction + */ + @Override + public void refreshRealmOnChange(CachingUsernamePasswordRealm realm) { + realmsToRefresh.add(realm.name()); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java new file mode 100644 index 0000000000000..8ab48a0320602 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -0,0 +1,585 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.BulkItemRequest; +import org.elasticsearch.action.bulk.BulkShardRequest; +import org.elasticsearch.action.bulk.TransportShardBulkAction; +import org.elasticsearch.action.delete.DeleteAction; +import org.elasticsearch.action.get.MultiGetAction; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.search.ClearScrollAction; +import org.elasticsearch.action.search.MultiSearchAction; +import org.elasticsearch.action.search.SearchScrollAction; +import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.action.support.replication.TransportReplicationAction.ConcreteShardRequest; +import org.elasticsearch.action.termvectors.MultiTermVectorsAction; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportActionProxy; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; +import org.elasticsearch.xpack.core.security.action.user.UserRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; +import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.support.Automatons; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import org.elasticsearch.xpack.security.authz.IndicesAndAliasesResolver.ResolvedIndices; +import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Predicate; + +import static org.elasticsearch.xpack.core.security.SecurityField.setting; +import static org.elasticsearch.xpack.core.security.support.Exceptions.authorizationError; + +public class AuthorizationService extends AbstractComponent { + public static final Setting ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING = + Setting.boolSetting(setting("authc.anonymous.authz_exception"), true, Property.NodeScope); + public static final String ORIGINATING_ACTION_KEY = "_originating_action_name"; + public static final String ROLE_NAMES_KEY = "_effective_role_names"; + + private static final Predicate MONITOR_INDEX_PREDICATE = IndexPrivilege.MONITOR.predicate(); + private static final Predicate SAME_USER_PRIVILEGE = Automatons.predicate( + ChangePasswordAction.NAME, AuthenticateAction.NAME, HasPrivilegesAction.NAME); + + private static final String INDEX_SUB_REQUEST_PRIMARY = IndexAction.NAME + "[p]"; + private static final String INDEX_SUB_REQUEST_REPLICA = IndexAction.NAME + "[r]"; + private static final String DELETE_SUB_REQUEST_PRIMARY = DeleteAction.NAME + "[p]"; + private static final String DELETE_SUB_REQUEST_REPLICA = DeleteAction.NAME + "[r]"; + + private final ClusterService clusterService; + private final CompositeRolesStore rolesStore; + private final AuditTrailService auditTrail; + private final IndicesAndAliasesResolver indicesAndAliasesResolver; + private final AuthenticationFailureHandler authcFailureHandler; + private final ThreadContext threadContext; + private final AnonymousUser anonymousUser; + private final FieldPermissionsCache fieldPermissionsCache; + private final boolean isAnonymousEnabled; + private final boolean anonymousAuthzExceptionEnabled; + + public AuthorizationService(Settings settings, CompositeRolesStore rolesStore, ClusterService clusterService, + AuditTrailService auditTrail, AuthenticationFailureHandler authcFailureHandler, + ThreadPool threadPool, AnonymousUser anonymousUser) { + super(settings); + this.rolesStore = rolesStore; + this.clusterService = clusterService; + this.auditTrail = auditTrail; + this.indicesAndAliasesResolver = new IndicesAndAliasesResolver(settings, clusterService); + this.authcFailureHandler = authcFailureHandler; + this.threadContext = threadPool.getThreadContext(); + this.anonymousUser = anonymousUser; + this.isAnonymousEnabled = AnonymousUser.isAnonymousEnabled(settings); + this.anonymousAuthzExceptionEnabled = ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING.get(settings); + this.fieldPermissionsCache = new FieldPermissionsCache(settings); + } + + /** + * Verifies that the given user can execute the given request (and action). If the user doesn't + * have the appropriate privileges for this action/request, an {@link ElasticsearchSecurityException} + * will be thrown. + * + * @param authentication The authentication information + * @param action The action + * @param request The request + * @throws ElasticsearchSecurityException If the given user is no allowed to execute the given request + */ + public void authorize(Authentication authentication, String action, TransportRequest request, Role userRole, + Role runAsRole) throws ElasticsearchSecurityException { + final TransportRequest originalRequest = request; + if (request instanceof ConcreteShardRequest) { + request = ((ConcreteShardRequest) request).getRequest(); + assert TransportActionProxy.isProxyRequest(request) == false : "expected non-proxy request for action: " + action; + } else { + request = TransportActionProxy.unwrapRequest(request); + if (TransportActionProxy.isProxyRequest(originalRequest) && TransportActionProxy.isProxyAction(action) == false) { + throw new IllegalStateException("originalRequest is a proxy request for: [" + request + "] but action: [" + + action + "] isn't"); + } + } + // prior to doing any authorization lets set the originating action in the context only + putTransientIfNonExisting(ORIGINATING_ACTION_KEY, action); + + // first we need to check if the user is the system. If it is, we'll just authorize the system access + if (SystemUser.is(authentication.getUser())) { + if (SystemUser.isAuthorized(action)) { + putTransientIfNonExisting(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, IndicesAccessControl.ALLOW_ALL); + putTransientIfNonExisting(ROLE_NAMES_KEY, new String[] { SystemUser.ROLE_NAME }); + auditTrail.accessGranted(authentication, action, request, new String[] { SystemUser.ROLE_NAME }); + return; + } + throw denial(authentication, action, request, new String[] { SystemUser.ROLE_NAME }); + } + + // get the roles of the authenticated user, which may be different than the effective + Role permission = userRole; + + // check if the request is a run as request + final boolean isRunAs = authentication.getUser().isRunAs(); + if (isRunAs) { + // if we are running as a user we looked up then the authentication must contain a lookedUpBy. If it doesn't then this user + // doesn't really exist but the authc service allowed it through to avoid leaking users that exist in the system + if (authentication.getLookedUpBy() == null) { + throw denyRunAs(authentication, action, request, permission.names()); + } else if (permission.runAs().check(authentication.getUser().principal())) { + auditTrail.runAsGranted(authentication, action, request, permission.names()); + permission = runAsRole; + } else { + throw denyRunAs(authentication, action, request, permission.names()); + } + } + putTransientIfNonExisting(ROLE_NAMES_KEY, permission.names()); + + // first, we'll check if the action is a cluster action. If it is, we'll only check it against the cluster permissions + if (ClusterPrivilege.ACTION_MATCHER.test(action)) { + ClusterPermission cluster = permission.cluster(); + if (cluster.check(action) || checkSameUserPermissions(action, request, authentication)) { + putTransientIfNonExisting(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, IndicesAccessControl.ALLOW_ALL); + auditTrail.accessGranted(authentication, action, request, permission.names()); + return; + } + throw denial(authentication, action, request, permission.names()); + } + + // ok... this is not a cluster action, let's verify it's an indices action + if (!IndexPrivilege.ACTION_MATCHER.test(action)) { + throw denial(authentication, action, request, permission.names()); + } + + //composite actions are explicitly listed and will be authorized at the sub-request / shard level + if (isCompositeAction(action)) { + if (request instanceof CompositeIndicesRequest == false) { + throw new IllegalStateException("Composite actions must implement " + CompositeIndicesRequest.class.getSimpleName() + + ", " + request.getClass().getSimpleName() + " doesn't"); + } + // we check if the user can execute the action, without looking at indices, which will be authorized at the shard level + if (permission.indices().check(action)) { + auditTrail.accessGranted(authentication, action, request, permission.names()); + return; + } + throw denial(authentication, action, request, permission.names()); + } else if (isTranslatedToBulkAction(action)) { + if (request instanceof CompositeIndicesRequest == false) { + throw new IllegalStateException("Bulk translated actions must implement " + CompositeIndicesRequest.class.getSimpleName() + + ", " + request.getClass().getSimpleName() + " doesn't"); + } + // we check if the user can execute the action, without looking at indices, which will be authorized at the shard level + if (permission.indices().check(action)) { + auditTrail.accessGranted(authentication, action, request, permission.names()); + return; + } + throw denial(authentication, action, request, permission.names()); + } else if (TransportActionProxy.isProxyAction(action)) { + // we authorize proxied actions once they are "unwrapped" on the next node + if (TransportActionProxy.isProxyRequest(originalRequest) == false) { + throw new IllegalStateException("originalRequest is not a proxy request: [" + originalRequest + "] but action: [" + + action + "] is a proxy action"); + } + if (permission.indices().check(action)) { + auditTrail.accessGranted(authentication, action, request, permission.names()); + return; + } else { + // we do this here in addition to the denial below since we might run into an assertion on scroll request below if we + // don't have permission to read cross cluster but wrap a scroll request. + throw denial(authentication, action, request, permission.names()); + } + } + + // some APIs are indices requests that are not actually associated with indices. For example, + // search scroll request, is categorized under the indices context, but doesn't hold indices names + // (in this case, the security check on the indices was done on the search request that initialized + // the scroll. Given that scroll is implemented using a context on the node holding the shard, we + // piggyback on it and enhance the context with the original authentication. This serves as our method + // to validate the scroll id only stays with the same user! + if (request instanceof IndicesRequest == false && request instanceof IndicesAliasesRequest == false) { + //note that clear scroll shard level actions can originate from a clear scroll all, which doesn't require any + //indices permission as it's categorized under cluster. This is why the scroll check is performed + //even before checking if the user has any indices permission. + if (isScrollRelatedAction(action)) { + // if the action is a search scroll action, we first authorize that the user can execute the action for some + // index and if they cannot, we can fail the request early before we allow the execution of the action and in + // turn the shard actions + if (SearchScrollAction.NAME.equals(action) && permission.indices().check(action) == false) { + throw denial(authentication, action, request, permission.names()); + } else { + // we store the request as a transient in the ThreadContext in case of a authorization failure at the shard + // level. If authorization fails we will audit a access_denied message and will use the request to retrieve + // information such as the index and the incoming address of the request + auditTrail.accessGranted(authentication, action, request, permission.names()); + return; + } + } else { + assert false : + "only scroll related requests are known indices api that don't support retrieving the indices they relate to"; + throw denial(authentication, action, request, permission.names()); + } + } + + final boolean allowsRemoteIndices = request instanceof IndicesRequest + && IndicesAndAliasesResolver.allowsRemoteIndices((IndicesRequest) request); + + // If this request does not allow remote indices + // then the user must have permission to perform this action on at least 1 local index + if (allowsRemoteIndices == false && permission.indices().check(action) == false) { + throw denial(authentication, action, request, permission.names()); + } + + final MetaData metaData = clusterService.state().metaData(); + final AuthorizedIndices authorizedIndices = new AuthorizedIndices(authentication.getUser(), permission, action, metaData); + final ResolvedIndices resolvedIndices = resolveIndexNames(authentication, action, request, + metaData, authorizedIndices, permission); + assert !resolvedIndices.isEmpty() + : "every indices request needs to have its indices set thus the resolved indices must not be empty"; + + // If this request does reference any remote indices + // then the user must have permission to perform this action on at least 1 local index + if (resolvedIndices.getRemote().isEmpty() && permission.indices().check(action) == false) { + throw denial(authentication, action, request, permission.names()); + } + + //all wildcard expressions have been resolved and only the security plugin could have set '-*' here. + //'-*' matches no indices so we allow the request to go through, which will yield an empty response + if (resolvedIndices.isNoIndicesPlaceholder()) { + putTransientIfNonExisting(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, IndicesAccessControl.ALLOW_NO_INDICES); + auditTrail.accessGranted(authentication, action, request, permission.names()); + return; + } + + final Set localIndices = new HashSet<>(resolvedIndices.getLocal()); + IndicesAccessControl indicesAccessControl = permission.authorize(action, localIndices, metaData, fieldPermissionsCache); + if (!indicesAccessControl.isGranted()) { + throw denial(authentication, action, request, permission.names()); + } else if (hasSecurityIndexAccess(indicesAccessControl) + && MONITOR_INDEX_PREDICATE.test(action) == false + && isSuperuser(authentication.getUser()) == false) { + // only the XPackUser is allowed to work with this index, but we should allow indices monitoring actions through for debugging + // purposes. These monitor requests also sometimes resolve indices concretely and then requests them + logger.debug("user [{}] attempted to directly perform [{}] against the security index [{}]", + authentication.getUser().principal(), action, SecurityLifecycleService.SECURITY_INDEX_NAME); + throw denial(authentication, action, request, permission.names()); + } else { + putTransientIfNonExisting(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, indicesAccessControl); + } + + //if we are creating an index we need to authorize potential aliases created at the same time + if (IndexPrivilege.CREATE_INDEX_MATCHER.test(action)) { + assert request instanceof CreateIndexRequest; + Set aliases = ((CreateIndexRequest) request).aliases(); + if (!aliases.isEmpty()) { + Set aliasesAndIndices = Sets.newHashSet(localIndices); + for (Alias alias : aliases) { + aliasesAndIndices.add(alias.name()); + } + indicesAccessControl = permission.authorize("indices:admin/aliases", aliasesAndIndices, metaData, fieldPermissionsCache); + if (!indicesAccessControl.isGranted()) { + throw denial(authentication, "indices:admin/aliases", request, permission.names()); + } + // no need to re-add the indicesAccessControl in the context, + // because the create index call doesn't do anything FLS or DLS + } + } + + if (action.equals(TransportShardBulkAction.ACTION_NAME)) { + // is this is performing multiple actions on the index, then check each of those actions. + assert request instanceof BulkShardRequest + : "Action " + action + " requires " + BulkShardRequest.class + " but was " + request.getClass(); + + authorizeBulkItems(authentication, (BulkShardRequest) request, permission, metaData, localIndices, authorizedIndices); + } + + auditTrail.accessGranted(authentication, action, request, permission.names()); + } + + private boolean hasSecurityIndexAccess(IndicesAccessControl indicesAccessControl) { + for (String index : SecurityLifecycleService.indexNames()) { + final IndicesAccessControl.IndexAccessControl indexPermissions = indicesAccessControl.getIndexPermissions(index); + if (indexPermissions != null && indexPermissions.isGranted()) { + return true; + } + } + return false; + } + + /** + * Performs authorization checks on the items within a {@link BulkShardRequest}. + * This inspects the {@link BulkItemRequest items} within the request, computes + * an implied action for each item's {@link DocWriteRequest#opType()}, + * and then checks whether that action is allowed on the targeted index. Items + * that fail this checks are {@link BulkItemRequest#abort(String, Exception) + * aborted}, with an + * {@link #denial(Authentication, String, TransportRequest, String[]) access + * denied} exception. Because a shard level request is for exactly 1 index, and + * there are a small number of possible item {@link DocWriteRequest.OpType + * types}, the number of distinct authorization checks that need to be performed + * is very small, but the results must be cached, to avoid adding a high + * overhead to each bulk request. + */ + private void authorizeBulkItems(Authentication authentication, BulkShardRequest request, Role permission, + MetaData metaData, Set indices, AuthorizedIndices authorizedIndices) { + // Maps original-index -> expanded-index-name (expands date-math, but not aliases) + final Map resolvedIndexNames = new HashMap<>(); + // Maps (resolved-index , action) -> is-granted + final Map, Boolean> indexActionAuthority = new HashMap<>(); + for (BulkItemRequest item : request.items()) { + String resolvedIndex = resolvedIndexNames.computeIfAbsent(item.index(), key -> { + final ResolvedIndices resolvedIndices = indicesAndAliasesResolver.resolveIndicesAndAliases(item.request(), metaData, + authorizedIndices); + if (resolvedIndices.getRemote().size() != 0) { + throw illegalArgument("Bulk item should not write to remote indices, but request writes to " + + String.join(",", resolvedIndices.getRemote())); + } + if (resolvedIndices.getLocal().size() != 1) { + throw illegalArgument("Bulk item should write to exactly 1 index, but request writes to " + + String.join(",", resolvedIndices.getLocal())); + } + final String resolved = resolvedIndices.getLocal().get(0); + if (indices.contains(resolved) == false) { + throw illegalArgument("Found bulk item that writes to index " + resolved + " but the request writes to " + indices); + } + return resolved; + }); + final String itemAction = getAction(item); + final Tuple indexAndAction = new Tuple<>(resolvedIndex, itemAction); + final boolean granted = indexActionAuthority.computeIfAbsent(indexAndAction, key -> { + final IndicesAccessControl itemAccessControl = permission.authorize(itemAction, Collections.singleton(resolvedIndex), + metaData, fieldPermissionsCache); + return itemAccessControl.isGranted(); + }); + if (granted == false) { + item.abort(resolvedIndex, denial(authentication, itemAction, request, permission.names())); + } + } + } + + private IllegalArgumentException illegalArgument(String message) { + assert false : message; + return new IllegalArgumentException(message); + } + + private static String getAction(BulkItemRequest item) { + final DocWriteRequest docWriteRequest = item.request(); + switch (docWriteRequest.opType()) { + case INDEX: + case CREATE: + return IndexAction.NAME; + case UPDATE: + return UpdateAction.NAME; + case DELETE: + return DeleteAction.NAME; + } + throw new IllegalArgumentException("No equivalent action for opType [" + docWriteRequest.opType() + "]"); + } + + private ResolvedIndices resolveIndexNames(Authentication authentication, String action, TransportRequest request, + MetaData metaData, AuthorizedIndices authorizedIndices, Role permission) { + try { + return indicesAndAliasesResolver.resolve(request, metaData, authorizedIndices); + } catch (Exception e) { + auditTrail.accessDenied(authentication, action, request, permission.names()); + throw e; + } + } + + private void putTransientIfNonExisting(String key, Object value) { + Object existing = threadContext.getTransient(key); + if (existing == null) { + threadContext.putTransient(key, value); + } + } + + public void roles(User user, ActionListener roleActionListener) { + // we need to special case the internal users in this method, if we apply the anonymous roles to every user including these system + // user accounts then we run into the chance of a deadlock because then we need to get a role that we may be trying to get as the + // internal user. The SystemUser is special cased as it has special privileges to execute internal actions and should never be + // passed into this method. The XPackUser has the Superuser role and we can simply return that + if (SystemUser.is(user)) { + throw new IllegalArgumentException("the user [" + user.principal() + "] is the system user and we should never try to get its" + + " roles"); + } + if (XPackUser.is(user)) { + assert XPackUser.INSTANCE.roles().length == 1; + roleActionListener.onResponse(XPackUser.ROLE); + return; + } + if (XPackSecurityUser.is(user)) { + roleActionListener.onResponse(ReservedRolesStore.SUPERUSER_ROLE); + return; + } + + Set roleNames = new HashSet<>(); + Collections.addAll(roleNames, user.roles()); + if (isAnonymousEnabled && anonymousUser.equals(user) == false) { + if (anonymousUser.roles().length == 0) { + throw new IllegalStateException("anonymous is only enabled when the anonymous user has roles"); + } + Collections.addAll(roleNames, anonymousUser.roles()); + } + + if (roleNames.isEmpty()) { + roleActionListener.onResponse(Role.EMPTY); + } else if (roleNames.contains(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName())) { + roleActionListener.onResponse(ReservedRolesStore.SUPERUSER_ROLE); + } else { + rolesStore.roles(roleNames, fieldPermissionsCache, roleActionListener); + } + } + + private static boolean isCompositeAction(String action) { + return action.equals(BulkAction.NAME) || + action.equals(MultiGetAction.NAME) || + action.equals(MultiTermVectorsAction.NAME) || + action.equals(MultiSearchAction.NAME) || + action.equals("indices:data/read/mpercolate") || + action.equals("indices:data/read/msearch/template") || + action.equals("indices:data/read/search/template") || + action.equals("indices:data/write/reindex") || + action.equals("indices:data/read/sql") || + action.equals("indices:data/read/sql/translate"); + } + + private static boolean isTranslatedToBulkAction(String action) { + return action.equals(IndexAction.NAME) || + action.equals(DeleteAction.NAME) || + action.equals(INDEX_SUB_REQUEST_PRIMARY) || + action.equals(INDEX_SUB_REQUEST_REPLICA) || + action.equals(DELETE_SUB_REQUEST_PRIMARY) || + action.equals(DELETE_SUB_REQUEST_REPLICA); + } + + private static boolean isScrollRelatedAction(String action) { + return action.equals(SearchScrollAction.NAME) || + action.equals(SearchTransportService.FETCH_ID_SCROLL_ACTION_NAME) || + action.equals(SearchTransportService.QUERY_FETCH_SCROLL_ACTION_NAME) || + action.equals(SearchTransportService.QUERY_SCROLL_ACTION_NAME) || + action.equals(SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME) || + action.equals(ClearScrollAction.NAME) || + action.equals("indices:data/read/sql/close_cursor") || + action.equals(SearchTransportService.CLEAR_SCROLL_CONTEXTS_ACTION_NAME); + } + + static boolean checkSameUserPermissions(String action, TransportRequest request, Authentication authentication) { + final boolean actionAllowed = SAME_USER_PRIVILEGE.test(action); + if (actionAllowed) { + if (request instanceof UserRequest == false) { + assert false : "right now only a user request should be allowed"; + return false; + } + UserRequest userRequest = (UserRequest) request; + String[] usernames = userRequest.usernames(); + if (usernames == null || usernames.length != 1 || usernames[0] == null) { + assert false : "this role should only be used for actions to apply to a single user"; + return false; + } + final String username = usernames[0]; + final boolean sameUsername = authentication.getUser().principal().equals(username); + if (sameUsername && ChangePasswordAction.NAME.equals(action)) { + return checkChangePasswordAction(authentication); + } + + assert AuthenticateAction.NAME.equals(action) || HasPrivilegesAction.NAME.equals(action) || sameUsername == false + : "Action '" + action + "' should not be possible when sameUsername=" + sameUsername; + return sameUsername; + } + return false; + } + + private static boolean checkChangePasswordAction(Authentication authentication) { + // we need to verify that this user was authenticated by or looked up by a realm type that support password changes + // otherwise we open ourselves up to issues where a user in a different realm could be created with the same username + // and do malicious things + final boolean isRunAs = authentication.getUser().isRunAs(); + final String realmType; + if (isRunAs) { + realmType = authentication.getLookedUpBy().getType(); + } else { + realmType = authentication.getAuthenticatedBy().getType(); + } + + assert realmType != null; + // ensure the user was authenticated by a realm that we can change a password for. The native realm is an internal realm and + // right now only one can exist in the realm configuration - if this changes we should update this check + return ReservedRealm.TYPE.equals(realmType) || NativeRealmSettings.TYPE.equals(realmType); + } + + ElasticsearchSecurityException denial(Authentication authentication, String action, TransportRequest request, String[] roleNames) { + auditTrail.accessDenied(authentication, action, request, roleNames); + return denialException(authentication, action); + } + + private ElasticsearchSecurityException denyRunAs(Authentication authentication, String action, TransportRequest request, + String[] roleNames) { + auditTrail.runAsDenied(authentication, action, request, roleNames); + return denialException(authentication, action); + } + + private ElasticsearchSecurityException denialException(Authentication authentication, String action) { + final User authUser = authentication.getUser().authenticatedUser(); + // Special case for anonymous user + if (isAnonymousEnabled && anonymousUser.equals(authUser)) { + if (anonymousAuthzExceptionEnabled == false) { + throw authcFailureHandler.authenticationRequired(action, threadContext); + } + } + // check for run as + if (authentication.getUser().isRunAs()) { + return authorizationError("action [{}] is unauthorized for user [{}] run as [{}]", action, authUser.principal(), + authentication.getUser().principal()); + } + return authorizationError("action [{}] is unauthorized for user [{}]", action, authUser.principal()); + } + + static boolean isSuperuser(User user) { + return Arrays.stream(user.roles()) + .anyMatch(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName()::equals); + } + + public static void addSettings(List> settings) { + settings.add(ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java new file mode 100644 index 0000000000000..67e21aadcbceb --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java @@ -0,0 +1,181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.support.Automatons; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; +import org.elasticsearch.xpack.core.security.user.XPackUser; + +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Predicate; + +import static org.elasticsearch.xpack.core.ClientHelper.DEPRECATION_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.MONITORING_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.PERSISTENT_TASK_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.ROLLUP_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; + +public final class AuthorizationUtils { + + private static final Predicate INTERNAL_PREDICATE = Automatons.predicate("internal:*"); + + private AuthorizationUtils() {} + + /** + * This method is used to determine if a request should be executed as the system user, even if the request already + * has a user associated with it. + * + * In order for the user to be replaced by the system user one of the following conditions must be true: + * + *
    + *
  • the action is an internal action and no user is associated with the request
  • + *
  • the action is an internal action and the thread context contains a non-internal action as the originating action
  • + *
+ * + * @param threadContext the {@link ThreadContext} that contains the headers and context associated with the request + * @param action the action name that is being executed + * @return true if the system user should be used to execute a request + */ + public static boolean shouldReplaceUserWithSystem(ThreadContext threadContext, String action) { + // the action must be internal OR the thread context must be a system context. + if (threadContext.isSystemContext() == false && isInternalAction(action) == false) { + return false; + } + + // there is no authentication object AND we are executing in a system context OR an internal action + // AND there + Authentication authentication = threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY); + if (authentication == null && threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME) == null) { + return true; + } + + // we have a internal action being executed by a user other than the system user, lets verify that there is a + // originating action that is not a internal action. We verify that there must be a originating action as an + // internal action should never be called by user code from a client + final String originatingAction = threadContext.getTransient(AuthorizationService.ORIGINATING_ACTION_KEY); + if (originatingAction != null && isInternalAction(originatingAction) == false) { + return true; + } + + // either there was no originating action or the originating action was an internal action, + // we should not replace under these circumstances + return false; + } + + /** + * Returns true if the thread context contains the origin of the action and does not have any authentication + */ + public static boolean shouldSetUserBasedOnActionOrigin(ThreadContext context) { + final String actionOrigin = context.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME); + final Authentication authentication = context.getTransient(AuthenticationField.AUTHENTICATION_KEY); + return actionOrigin != null && authentication == null; + } + + /** + * Stashes the current context and executes the consumer as the proper user based on the origin of the action. + * + * This method knows nothing about listeners so it is important that callers ensure their listeners preserve their + * context and restore it appropriately. + */ + public static void switchUserBasedOnActionOriginAndExecute(ThreadContext threadContext, SecurityContext securityContext, + Consumer consumer) { + final String actionOrigin = threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME); + if (actionOrigin == null) { + assert false : "cannot switch user if there is no action origin"; + throw new IllegalStateException("cannot switch user if there is no action origin"); + } + + switch (actionOrigin) { + case SECURITY_ORIGIN: + securityContext.executeAsUser(XPackSecurityUser.INSTANCE, consumer, Version.CURRENT); + break; + case WATCHER_ORIGIN: + case ML_ORIGIN: + case MONITORING_ORIGIN: + case DEPRECATION_ORIGIN: + case PERSISTENT_TASK_ORIGIN: + case ROLLUP_ORIGIN: + securityContext.executeAsUser(XPackUser.INSTANCE, consumer, Version.CURRENT); + break; + default: + assert false : "action.origin [" + actionOrigin + "] is unknown!"; + throw new IllegalStateException("action.origin [" + actionOrigin + "] should always be a known value"); + } + } + + private static boolean isInternalAction(String action) { + return INTERNAL_PREDICATE.test(action); + } + + /** + * A base class to authorize authorize a given {@link Authentication} against it's users or run-as users roles. + * This class fetches the roles for the users asynchronously and then authenticates the in the callback. + */ + public static class AsyncAuthorizer { + + private final ActionListener listener; + private final BiConsumer consumer; + private final Authentication authentication; + private volatile Role userRoles; + private volatile Role runAsRoles; + private CountDown countDown = new CountDown(2); // we expect only two responses!! + + public AsyncAuthorizer(Authentication authentication, ActionListener listener, BiConsumer consumer) { + this.consumer = consumer; + this.listener = listener; + this.authentication = authentication; + } + + public void authorize(AuthorizationService service) { + if (SystemUser.is(authentication.getUser().authenticatedUser())) { + assert authentication.getUser().isRunAs() == false; + setUserRoles(null); // we can inform the listener immediately - nothing to fetch for us on system user + setRunAsRoles(null); + } else { + service.roles(authentication.getUser().authenticatedUser(), ActionListener.wrap(this::setUserRoles, listener::onFailure)); + if (authentication.getUser().isRunAs()) { + service.roles(authentication.getUser(), ActionListener.wrap(this::setRunAsRoles, listener::onFailure)); + } else { + setRunAsRoles(null); + } + } + } + + private void setUserRoles(Role roles) { + this.userRoles = roles; + maybeRun(); + } + + private void setRunAsRoles(Role roles) { + this.runAsRoles = roles; + maybeRun(); + } + + private void maybeRun() { + if (countDown.countDown()) { + try { + consumer.accept(userRoles, runAsRoles); + } catch (Exception e) { + listener.onFailure(e); + } + } + } + + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java new file mode 100644 index 0000000000000..3f257b7f0ce91 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.SecurityLifecycleService; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Predicate; + +import static org.elasticsearch.xpack.security.authz.AuthorizationService.isSuperuser; + +/** + * Abstraction used to make sure that we lazily load authorized indices only when requested and only maximum once per request. Also + * makes sure that authorized indices don't get updated throughout the same request for the same user. + */ +class AuthorizedIndices { + private final User user; + private final String action; + private final MetaData metaData; + private final Role userRoles; + private List authorizedIndices; + + AuthorizedIndices(User user, Role userRoles, String action, MetaData metaData) { + this.user = user; + this.userRoles = userRoles; + this.action = action; + this.metaData = metaData; + } + + List get() { + if (authorizedIndices == null) { + authorizedIndices = load(); + } + return authorizedIndices; + } + + private List load() { + Predicate predicate = userRoles.indices().allowedIndicesMatcher(action); + + List indicesAndAliases = new ArrayList<>(); + // TODO: can this be done smarter? I think there are usually more indices/aliases in the cluster then indices defined a roles? + for (Map.Entry entry : metaData.getAliasAndIndexLookup().entrySet()) { + String aliasOrIndex = entry.getKey(); + if (predicate.test(aliasOrIndex)) { + indicesAndAliases.add(aliasOrIndex); + } + } + + if (isSuperuser(user) == false) { + // we should filter out all of the security indices from wildcards + indicesAndAliases.removeAll(SecurityLifecycleService.indexNames()); + } + return Collections.unmodifiableList(indicesAndAliases); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java new file mode 100644 index 0000000000000..5c0000d430432 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -0,0 +1,505 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.action.AliasesRequest; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; +import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.stream.Collectors; + +public class IndicesAndAliasesResolver { + + private static final ResolvedIndices NO_INDEX_PLACEHOLDER_RESOLVED = + ResolvedIndices.local(IndicesAndAliasesResolverField.NO_INDEX_PLACEHOLDER); + //`*,-*` what we replace indices with if we need Elasticsearch to return empty responses without throwing exception + private static final String[] NO_INDICES_ARRAY = new String[] { "*", "-*" }; + static final List NO_INDICES_LIST = Arrays.asList(NO_INDICES_ARRAY); + + private final IndexNameExpressionResolver nameExpressionResolver; + private final RemoteClusterResolver remoteClusterResolver; + + public IndicesAndAliasesResolver(Settings settings, ClusterService clusterService) { + this.nameExpressionResolver = new IndexNameExpressionResolver(settings); + this.remoteClusterResolver = new RemoteClusterResolver(settings, clusterService.getClusterSettings()); + } + + /** + * Resolves, and if necessary updates, the list of index names in the provided request in accordance with the user's + * authorizedIndices. + *

+ * Wildcards are expanded at this phase to ensure that all security and execution decisions are made against a fixed set of index names + * that is consistent and does not change during the life of the request. + *

+ *

+ * If the provided request is of a type that {@link #allowsRemoteIndices(IndicesRequest) allows remote indices}, + * then the index names will be categorized into those that refer to {@link ResolvedIndices#getLocal() local indices}, and those that + * refer to {@link ResolvedIndices#getRemote() remote indices}. This categorization follows the standard + * {@link RemoteClusterAware#buildRemoteIndexName(String, String) remote index-name format} and also respects the currently defined + * {@link RemoteClusterAware#getRemoteClusterNames() remote clusters}. + *


+ * Thus an index name N will considered to be remote if-and-only-if all of the following are true + *
    + *
  • request supports remote indices
  • + *
  • + * N is in the format cluster:index. + * It is allowable for cluster and index to contain wildcards, but the separator (:) must be explicit. + *
  • + *
  • cluster matches one or more remote cluster names that are registered within this cluster.
  • + *
+ * In which case, any wildcards in the cluster portion of the name will be expanded and the resulting remote-index-name(s) will + * be added to the remote index list. + *
+ * Otherwise, N will be added to the local index list. + */ + + public ResolvedIndices resolve(TransportRequest request, MetaData metaData, AuthorizedIndices authorizedIndices) { + if (request instanceof IndicesAliasesRequest) { + ResolvedIndices indices = ResolvedIndices.empty(); + IndicesAliasesRequest indicesAliasesRequest = (IndicesAliasesRequest) request; + for (IndicesRequest indicesRequest : indicesAliasesRequest.getAliasActions()) { + indices = ResolvedIndices.add(indices, resolveIndicesAndAliases(indicesRequest, metaData, authorizedIndices)); + } + return indices; + } + + // if for some reason we are missing an action... just for safety we'll reject + if (request instanceof IndicesRequest == false) { + throw new IllegalStateException("Request [" + request + "] is not an Indices request, but should be."); + } + return resolveIndicesAndAliases((IndicesRequest) request, metaData, authorizedIndices); + } + + ResolvedIndices resolveIndicesAndAliases(IndicesRequest indicesRequest, MetaData metaData, + AuthorizedIndices authorizedIndices) { + boolean indicesReplacedWithNoIndices = false; + final ResolvedIndices indices; + if (indicesRequest instanceof PutMappingRequest && ((PutMappingRequest) indicesRequest).getConcreteIndex() != null) { + /* + * This is a special case since PutMappingRequests from dynamic mapping updates have a concrete index + * if this index is set and it's in the list of authorized indices we are good and don't need to put + * the list of indices in there, if we do so it will result in an invalid request and the update will fail. + */ + assert indicesRequest.indices() == null || indicesRequest.indices().length == 0 + : "indices are: " + Arrays.toString(indicesRequest.indices()); // Arrays.toString() can handle null values - all good + return ResolvedIndices.local(((PutMappingRequest) indicesRequest).getConcreteIndex().getName()); + } else if (indicesRequest instanceof IndicesRequest.Replaceable) { + IndicesRequest.Replaceable replaceable = (IndicesRequest.Replaceable) indicesRequest; + final boolean replaceWildcards = indicesRequest.indicesOptions().expandWildcardsOpen() + || indicesRequest.indicesOptions().expandWildcardsClosed(); + IndicesOptions indicesOptions = indicesRequest.indicesOptions(); + if (indicesRequest instanceof IndicesExistsRequest) { + //indices exists api should never throw exception, make sure that ignore_unavailable and allow_no_indices are true + //we have to mimic what TransportIndicesExistsAction#checkBlock does in es core + indicesOptions = IndicesOptions.fromOptions(true, true, + indicesOptions.expandWildcardsOpen(), indicesOptions.expandWildcardsClosed()); + } + + ResolvedIndices result = ResolvedIndices.empty(); + // check for all and return list of authorized indices + if (IndexNameExpressionResolver.isAllIndices(indicesList(indicesRequest.indices()))) { + if (replaceWildcards) { + for (String authorizedIndex : authorizedIndices.get()) { + if (isIndexVisible(authorizedIndex, indicesOptions, metaData)) { + result = ResolvedIndices.add(result, ResolvedIndices.local(authorizedIndex)); + } + } + } + // if we cannot replace wildcards the indices list stays empty. Same if there are no authorized indices. + // we honour allow_no_indices like es core does. + } else { + final ResolvedIndices split; + if (allowsRemoteIndices(indicesRequest)) { + split = remoteClusterResolver.splitLocalAndRemoteIndexNames(indicesRequest.indices()); + } else { + split = ResolvedIndices.local(indicesRequest.indices()); + } + List replaced = replaceWildcardsWithAuthorizedIndices(split.getLocal(), indicesOptions, metaData, + authorizedIndices.get(), replaceWildcards); + if (indicesOptions.ignoreUnavailable()) { + //out of all the explicit names (expanded from wildcards and original ones that were left untouched) + //remove all the ones that the current user is not authorized for and ignore them + replaced = replaced.stream().filter(authorizedIndices.get()::contains).collect(Collectors.toList()); + } + result = new ResolvedIndices(new ArrayList<>(replaced), split.getRemote()); + } + if (result.isEmpty()) { + if (indicesOptions.allowNoIndices()) { + //this is how we tell es core to return an empty response, we can let the request through being sure + //that the '-*' wildcard expression will be resolved to no indices. We can't let empty indices through + //as that would be resolved to _all by es core. + replaceable.indices(NO_INDICES_ARRAY); + indicesReplacedWithNoIndices = true; + indices = NO_INDEX_PLACEHOLDER_RESOLVED; + } else { + throw new IndexNotFoundException(Arrays.toString(indicesRequest.indices())); + } + } else { + replaceable.indices(result.toArray()); + indices = result; + } + } else { + if (containsWildcards(indicesRequest)) { + //an alias can still contain '*' in its name as of 5.0. Such aliases cannot be referred to when using + //the security plugin, otherwise the following exception gets thrown + throw new IllegalStateException("There are no external requests known to support wildcards that don't support replacing " + + "their indices"); + } + //NOTE: shard level requests do support wildcards (as they hold the original indices options) but don't support + // replacing their indices. + //That is fine though because they never contain wildcards, as they get replaced as part of the authorization of their + //corresponding parent request on the coordinating node. Hence wildcards don't need to get replaced nor exploded for + // shard level requests. + List resolvedNames = new ArrayList<>(); + for (String name : indicesRequest.indices()) { + resolvedNames.add(nameExpressionResolver.resolveDateMathExpression(name)); + } + indices = new ResolvedIndices(resolvedNames, new ArrayList<>()); + } + + if (indicesRequest instanceof AliasesRequest) { + //special treatment for AliasesRequest since we need to replace wildcards among the specified aliases too. + //AliasesRequest extends IndicesRequest.Replaceable, hence its indices have already been properly replaced. + AliasesRequest aliasesRequest = (AliasesRequest) indicesRequest; + if (aliasesRequest.expandAliasesWildcards()) { + List aliases = replaceWildcardsWithAuthorizedAliases(aliasesRequest.aliases(), + loadAuthorizedAliases(authorizedIndices.get(), metaData)); + aliasesRequest.aliases(aliases.toArray(new String[aliases.size()])); + } + if (indicesReplacedWithNoIndices) { + if (indicesRequest instanceof GetAliasesRequest == false) { + throw new IllegalStateException(GetAliasesRequest.class.getSimpleName() + " is the only known " + + "request implementing " + AliasesRequest.class.getSimpleName() + " that may allow no indices. Found [" + + indicesRequest.getClass().getName() + "] which ended up with an empty set of indices."); + } + //if we replaced the indices with '-*' we shouldn't be adding the aliases to the list otherwise the request will + //not get authorized. Leave only '-*' and ignore the rest, result will anyway be empty. + } else { + return ResolvedIndices.add(indices, ResolvedIndices.local(aliasesRequest.aliases())); + } + } + return indices; + } + + public static boolean allowsRemoteIndices(IndicesRequest request) { + return request instanceof SearchRequest || request instanceof FieldCapabilitiesRequest + || request instanceof GraphExploreRequest; + } + + private List loadAuthorizedAliases(List authorizedIndices, MetaData metaData) { + List authorizedAliases = new ArrayList<>(); + SortedMap existingAliases = metaData.getAliasAndIndexLookup(); + for (String authorizedIndex : authorizedIndices) { + AliasOrIndex aliasOrIndex = existingAliases.get(authorizedIndex); + if (aliasOrIndex != null && aliasOrIndex.isAlias()) { + authorizedAliases.add(authorizedIndex); + } + } + return authorizedAliases; + } + + private List replaceWildcardsWithAuthorizedAliases(String[] aliases, List authorizedAliases) { + List finalAliases = new ArrayList<>(); + + //IndicesAliasesRequest doesn't support empty aliases (validation fails) but GetAliasesRequest does (in which case empty means _all) + boolean matchAllAliases = aliases.length == 0; + if (matchAllAliases) { + finalAliases.addAll(authorizedAliases); + } + + for (String aliasPattern : aliases) { + if (aliasPattern.equals(MetaData.ALL)) { + matchAllAliases = true; + finalAliases.addAll(authorizedAliases); + } else if (Regex.isSimpleMatchPattern(aliasPattern)) { + for (String authorizedAlias : authorizedAliases) { + if (Regex.simpleMatch(aliasPattern, authorizedAlias)) { + finalAliases.add(authorizedAlias); + } + } + } else { + finalAliases.add(aliasPattern); + } + } + + //Throw exception if the wildcards expansion to authorized aliases resulted in no indices. + //We always need to replace wildcards for security reasons, to make sure that the operation is executed on the aliases that we + //authorized it to execute on. Empty set gets converted to _all by es core though, and unlike with indices, here we don't have + //a special expression to replace empty set with, which gives us the guarantee that nothing will be returned. + //This is because existing aliases can contain all kinds of special characters, they are only validated since 5.1. + if (finalAliases.isEmpty()) { + String indexName = matchAllAliases ? MetaData.ALL : Arrays.toString(aliases); + throw new IndexNotFoundException(indexName); + } + return finalAliases; + } + + private boolean containsWildcards(IndicesRequest indicesRequest) { + if (IndexNameExpressionResolver.isAllIndices(indicesList(indicesRequest.indices()))) { + return true; + } + for (String index : indicesRequest.indices()) { + if (Regex.isSimpleMatchPattern(index)) { + return true; + } + } + return false; + } + + //TODO Investigate reusing code from vanilla es to resolve index names and wildcards + private List replaceWildcardsWithAuthorizedIndices(Iterable indices, IndicesOptions indicesOptions, MetaData metaData, + List authorizedIndices, boolean replaceWildcards) { + //the order matters when it comes to exclusions + List finalIndices = new ArrayList<>(); + boolean wildcardSeen = false; + for (String index : indices) { + String aliasOrIndex; + boolean minus = false; + if (index.charAt(0) == '-' && wildcardSeen) { + aliasOrIndex = index.substring(1); + minus = true; + } else { + aliasOrIndex = index; + } + + // we always need to check for date math expressions + final String dateMathName = nameExpressionResolver.resolveDateMathExpression(aliasOrIndex); + if (dateMathName != aliasOrIndex) { + assert dateMathName.equals(aliasOrIndex) == false; + if (replaceWildcards && Regex.isSimpleMatchPattern(dateMathName)) { + // continue + aliasOrIndex = dateMathName; + } else if (authorizedIndices.contains(dateMathName) && isIndexVisible(dateMathName, indicesOptions, metaData, true)) { + if (minus) { + finalIndices.remove(dateMathName); + } else { + finalIndices.add(dateMathName); + } + } else { + if (indicesOptions.ignoreUnavailable() == false) { + throw new IndexNotFoundException(dateMathName); + } + } + } + + if (replaceWildcards && Regex.isSimpleMatchPattern(aliasOrIndex)) { + wildcardSeen = true; + Set resolvedIndices = new HashSet<>(); + for (String authorizedIndex : authorizedIndices) { + if (Regex.simpleMatch(aliasOrIndex, authorizedIndex) && isIndexVisible(authorizedIndex, indicesOptions, metaData)) { + resolvedIndices.add(authorizedIndex); + } + } + if (resolvedIndices.isEmpty()) { + //es core honours allow_no_indices for each wildcard expression, we do the same here by throwing index not found. + if (indicesOptions.allowNoIndices() == false) { + throw new IndexNotFoundException(aliasOrIndex); + } + } else { + if (minus) { + finalIndices.removeAll(resolvedIndices); + } else { + finalIndices.addAll(resolvedIndices); + } + } + } else if (dateMathName == aliasOrIndex) { + // we can use == here to compare strings since the name expression resolver returns the same instance, but add an assert + // to ensure we catch this if it changes + + assert dateMathName.equals(aliasOrIndex); + //MetaData#convertFromWildcards checks if the index exists here and throws IndexNotFoundException if not (based on + // ignore_unavailable). We only add/remove the index: if the index is missing or the current user is not authorized + // to access it either an AuthorizationException will be thrown later in AuthorizationService, or the index will be + // removed from the list, based on the ignore_unavailable option. + if (minus) { + finalIndices.remove(aliasOrIndex); + } else { + finalIndices.add(aliasOrIndex); + } + } + } + return finalIndices; + } + + private static boolean isIndexVisible(String index, IndicesOptions indicesOptions, MetaData metaData) { + return isIndexVisible(index, indicesOptions, metaData, false); + } + + private static boolean isIndexVisible(String index, IndicesOptions indicesOptions, MetaData metaData, boolean dateMathExpression) { + AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(index); + if (aliasOrIndex.isAlias()) { + //it's an alias, ignore expandWildcardsOpen and expandWildcardsClosed. + //complicated to support those options with aliases pointing to multiple indices... + //TODO investigate supporting expandWildcards option for aliases too, like es core does. + return indicesOptions.ignoreAliases() == false; + } + assert aliasOrIndex.getIndices().size() == 1 : "concrete index must point to a single index"; + IndexMetaData indexMetaData = aliasOrIndex.getIndices().get(0); + if (indexMetaData.getState() == IndexMetaData.State.CLOSE && (indicesOptions.expandWildcardsClosed() || dateMathExpression)) { + return true; + } + if (indexMetaData.getState() == IndexMetaData.State.OPEN && (indicesOptions.expandWildcardsOpen() || dateMathExpression)) { + return true; + } + return false; + } + + private static List indicesList(String[] list) { + return (list == null) ? null : Arrays.asList(list); + } + + private static class RemoteClusterResolver extends RemoteClusterAware { + + private final CopyOnWriteArraySet clusters; + + private RemoteClusterResolver(Settings settings, ClusterSettings clusterSettings) { + super(settings); + clusters = new CopyOnWriteArraySet<>(buildRemoteClustersSeeds(settings).keySet()); + listenForUpdates(clusterSettings); + } + + @Override + protected Set getRemoteClusterNames() { + return clusters; + } + + @Override + protected void updateRemoteCluster(String clusterAlias, List addresses) { + if (addresses.isEmpty()) { + clusters.remove(clusterAlias); + } else { + clusters.add(clusterAlias); + } + } + + ResolvedIndices splitLocalAndRemoteIndexNames(String... indices) { + final Map> map = super.groupClusterIndices(indices, exists -> false); + final List local = map.remove(LOCAL_CLUSTER_GROUP_KEY); + final List remote = map.entrySet().stream() + .flatMap(e -> e.getValue().stream().map(v -> e.getKey() + REMOTE_CLUSTER_INDEX_SEPARATOR + v)) + .collect(Collectors.toList()); + return new ResolvedIndices(local == null ? Collections.emptyList() : local, remote); + } + } + + /** + * Stores a collection of index names separated into "local" and "remote". + * This allows the resolution and categorization to take place exactly once per-request. + */ + public static class ResolvedIndices { + private final List local; + private final List remote; + + ResolvedIndices(List local, List remote) { + this.local = local; + this.remote = remote; + } + + /** + * Constructs a new instance of this class where both the {@link #getLocal() local} and {@link #getRemote() remote} index lists + * are empty. + */ + private static ResolvedIndices empty() { + return new ResolvedIndices(Collections.emptyList(), Collections.emptyList()); + } + + /** + * Constructs a new instance of this class where both the {@link #getLocal() local} index list is populated with names + * and the {@link #getRemote() remote} index list is empty. + */ + private static ResolvedIndices local(String... names) { + return new ResolvedIndices(Arrays.asList(names), Collections.emptyList()); + } + + /** + * Returns the collection of index names that have been stored as "local" indices. + * This is a List because order may be important. For example [ "a*" , "-a1" ] is interpreted differently + * to [ "-a1", "a*" ]. As a consequence, this list may contain duplicates. + */ + public List getLocal() { + return Collections.unmodifiableList(local); + } + + /** + * Returns the collection of index names that have been stored as "remote" indices. + */ + public List getRemote() { + return Collections.unmodifiableList(remote); + } + + /** + * @return true if both the {@link #getLocal() local} and {@link #getRemote() remote} index lists are empty. + */ + public boolean isEmpty() { + return local.isEmpty() && remote.isEmpty(); + } + + /** + * @return true if the {@link #getRemote() remote} index lists is empty, and the local index list contains the + * {@link IndicesAndAliasesResolverField#NO_INDEX_PLACEHOLDER no-index-placeholder} and nothing else. + */ + public boolean isNoIndicesPlaceholder() { + return remote.isEmpty() && local.size() == 1 && local.contains(IndicesAndAliasesResolverField.NO_INDEX_PLACEHOLDER); + } + + private String[] toArray() { + final String[] array = new String[local.size() + remote.size()]; + int i = 0; + for (String index : local) { + array[i++] = index; + } + for (String index : remote) { + array[i++] = index; + } + return array; + } + + /** + * Returns a new ResolvedIndices contains the {@link #getLocal() local} and {@link #getRemote() remote} + * index lists from b appended to the corresponding lists in a. + */ + private static ResolvedIndices add(ResolvedIndices a, ResolvedIndices b) { + List local = new ArrayList<>(a.local.size() + b.local.size()); + local.addAll(a.local); + local.addAll(b.local); + + List remote = new ArrayList<>(a.remote.size() + b.remote.size()); + remote.addAll(a.remote); + remote.addAll(b.remote); + return new ResolvedIndices(local, remote); + } + + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListener.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListener.java new file mode 100644 index 0000000000000..6658d095b9c11 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListener.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.shard.SearchOperationListener; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.search.SearchContextMissingException; +import org.elasticsearch.search.internal.ScrollContext; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; + +import static org.elasticsearch.xpack.security.authz.AuthorizationService.ORIGINATING_ACTION_KEY; +import static org.elasticsearch.xpack.security.authz.AuthorizationService.ROLE_NAMES_KEY; + +/** + * A {@link SearchOperationListener} that is used to provide authorization for scroll requests. + * + * In order to identify the user associated with a scroll request, we replace the {@link ScrollContext} + * on creation with a custom implementation that holds the {@link Authentication} object. When + * this context is accessed again in {@link SearchOperationListener#onPreQueryPhase(SearchContext)} + * the ScrollContext is inspected for the authentication, which is compared to the currently + * authentication. + */ +public final class SecuritySearchOperationListener implements SearchOperationListener { + + private final ThreadContext threadContext; + private final XPackLicenseState licenseState; + private final AuditTrailService auditTrailService; + + public SecuritySearchOperationListener(ThreadContext threadContext, XPackLicenseState licenseState, AuditTrailService auditTrail) { + this.threadContext = threadContext; + this.licenseState = licenseState; + this.auditTrailService = auditTrail; + } + + /** + * Adds the {@link Authentication} to the {@link ScrollContext} + */ + @Override + public void onNewScrollContext(SearchContext searchContext) { + if (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed()) { + searchContext.scrollContext().putInContext(AuthenticationField.AUTHENTICATION_KEY, + Authentication.getAuthentication(threadContext)); + } + } + + /** + * Checks for the {@link ScrollContext} if it exists and compares the {@link Authentication} + * object from the scroll context with the current authentication context + */ + @Override + public void validateSearchContext(SearchContext searchContext, TransportRequest request) { + if (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed()) { + if (searchContext.scrollContext() != null) { + final Authentication originalAuth = searchContext.scrollContext().getFromContext(AuthenticationField.AUTHENTICATION_KEY); + final Authentication current = Authentication.getAuthentication(threadContext); + final String action = threadContext.getTransient(ORIGINATING_ACTION_KEY); + ensureAuthenticatedUserIsSame(originalAuth, current, auditTrailService, searchContext.id(), action, request, + threadContext.getTransient(ROLE_NAMES_KEY)); + } + } + } + + /** + * Compares the {@link Authentication} that was stored in the {@link ScrollContext} with the + * current authentication. We cannot guarantee that all of the details of the authentication will + * be the same. Some things that could differ include the roles, the name of the authenticating + * (or lookup) realm. To work around this we compare the username and the originating realm type. + */ + static void ensureAuthenticatedUserIsSame(Authentication original, Authentication current, AuditTrailService auditTrailService, + long id, String action, TransportRequest request, String[] roleNames) { + // this is really a best effort attempt since we cannot guarantee principal uniqueness + // and realm names can change between nodes. + final boolean samePrincipal = original.getUser().principal().equals(current.getUser().principal()); + final boolean sameRealmType; + if (original.getUser().isRunAs()) { + if (current.getUser().isRunAs()) { + sameRealmType = original.getLookedUpBy().getType().equals(current.getLookedUpBy().getType()); + } else { + sameRealmType = original.getLookedUpBy().getType().equals(current.getAuthenticatedBy().getType()); + } + } else if (current.getUser().isRunAs()) { + sameRealmType = original.getAuthenticatedBy().getType().equals(current.getLookedUpBy().getType()); + } else { + sameRealmType = original.getAuthenticatedBy().getType().equals(current.getAuthenticatedBy().getType()); + } + + final boolean sameUser = samePrincipal && sameRealmType; + if (sameUser == false) { + auditTrailService.accessDenied(current, action, request, roleNames); + throw new SearchContextMissingException(id); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/FieldExtractor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/FieldExtractor.java new file mode 100644 index 0000000000000..a19f1029d64f9 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/FieldExtractor.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.accesscontrol; + +import org.apache.lucene.index.PrefixCodedTerms.TermIterator; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.DisjunctionMaxQuery; +import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.DocValuesNumbersQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.PointInSetQuery; +import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.SynonymQuery; +import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; +import org.apache.lucene.search.spans.SpanTermQuery; + +import java.util.HashSet; +import java.util.Set; + +/** + * Extracts fields from a query, or throws UnsupportedOperationException. + *

+ * Lucene queries have {@link Weight#extractTerms}, but this is really geared at things + * such as highlighting, not security. For example terms in a Boolean {@code MUST_NOT} clause + * are not included, TermsQuery doesn't implement the method as it could be terribly slow, etc. + */ +class FieldExtractor { + + /** + * Populates {@code fields} with the set of fields used by the query, or throws + * UnsupportedOperationException if it doesn't know how to do this. + */ + static void extractFields(Query query, Set fields) throws UnsupportedOperationException { + // NOTE: we expect a rewritten query, so we only need logic for "atomic" queries here: + if (query instanceof BooleanQuery) { + // extract from all clauses + BooleanQuery q = (BooleanQuery) query; + for (BooleanClause clause : q.clauses()) { + extractFields(clause.getQuery(), fields); + } + } else if (query instanceof DisjunctionMaxQuery) { + // extract from all clauses + DisjunctionMaxQuery q = (DisjunctionMaxQuery) query; + for (Query clause : q.getDisjuncts()) { + extractFields(clause, fields); + } + } else if (query instanceof SpanTermQuery) { + // we just do SpanTerm, other spans are trickier, they could contain + // the evil FieldMaskingSpanQuery: so SpanQuery.getField cannot be trusted. + fields.add(((SpanTermQuery)query).getField()); + } else if (query instanceof TermQuery) { + fields.add(((TermQuery)query).getTerm().field()); + } else if (query instanceof SynonymQuery) { + SynonymQuery q = (SynonymQuery) query; + // all terms must have the same field + fields.add(q.getTerms().get(0).field()); + } else if (query instanceof PhraseQuery) { + PhraseQuery q = (PhraseQuery) query; + // all terms must have the same field + fields.add(q.getTerms()[0].field()); + } else if (query instanceof MultiPhraseQuery) { + MultiPhraseQuery q = (MultiPhraseQuery) query; + // all terms must have the same field + fields.add(q.getTermArrays()[0][0].field()); + } else if (query instanceof PointRangeQuery) { + fields.add(((PointRangeQuery)query).getField()); + } else if (query instanceof PointInSetQuery) { + fields.add(((PointInSetQuery)query).getField()); + } else if (query instanceof DocValuesFieldExistsQuery) { + fields.add(((DocValuesFieldExistsQuery)query).getField()); + } else if (query instanceof DocValuesNumbersQuery) { + fields.add(((DocValuesNumbersQuery)query).getField()); + } else if (query instanceof IndexOrDocValuesQuery) { + // Both queries are supposed to be equivalent, so if any of them can be extracted, we are good + try { + Set dvQueryFields = new HashSet<>(1); + extractFields(((IndexOrDocValuesQuery) query).getRandomAccessQuery(), dvQueryFields); + fields.addAll(dvQueryFields); + } catch (UnsupportedOperationException e) { + extractFields(((IndexOrDocValuesQuery) query).getIndexQuery(), fields); + } + } else if (query instanceof TermInSetQuery) { + // TermInSetQuery#field is inaccessible + TermInSetQuery termInSetQuery = (TermInSetQuery) query; + TermIterator termIterator = termInSetQuery.getTermData().iterator(); + // there should only be one field + if (termIterator.next() != null) { + fields.add(termIterator.field()); + } + } else if (query instanceof MatchAllDocsQuery) { + // no field + } else if (query instanceof MatchNoDocsQuery) { + // no field + } else { + throw new UnsupportedOperationException(); // we don't know how to get the fields from it + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java new file mode 100644 index 0000000000000..e15ff2f4d0c67 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCache.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.accesscontrol; + +import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.Weight; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.cache.query.QueryCache; +import org.elasticsearch.indices.IndicesQueryCache; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; + +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; + +/** + * Opts out of the query cache if field level security is active for the current request, + * and its unsafe to cache. + */ +public final class OptOutQueryCache extends AbstractIndexComponent implements QueryCache { + + private final IndicesQueryCache indicesQueryCache; + private final ThreadContext context; + private final String indexName; + + public OptOutQueryCache(IndexSettings indexSettings, IndicesQueryCache indicesQueryCache, ThreadContext context) { + super(indexSettings); + this.indicesQueryCache = indicesQueryCache; + this.context = Objects.requireNonNull(context, "threadContext must not be null"); + this.indexName = indexSettings.getIndex().getName(); + } + + @Override + public void close() throws ElasticsearchException { + clear("close"); + } + + @Override + public void clear(String reason) { + logger.debug("full cache clear, reason [{}]", reason); + indicesQueryCache.clearIndex(index().getName()); + } + + @Override + public Weight doCache(Weight weight, QueryCachingPolicy policy) { + IndicesAccessControl indicesAccessControl = context.getTransient( + AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + if (indicesAccessControl == null) { + logger.debug("opting out of the query cache. current request doesn't hold indices permissions"); + return weight; + } + + IndicesAccessControl.IndexAccessControl indexAccessControl = indicesAccessControl.getIndexPermissions(indexName); + if (indexAccessControl != null && indexAccessControl.getFieldPermissions().hasFieldLevelSecurity()) { + if (cachingIsSafe(weight, indexAccessControl)) { + logger.trace("not opting out of the query cache. request for index [{}] is safe to cache", indexName); + return indicesQueryCache.doCache(weight, policy); + } else { + logger.trace("opting out of the query cache. request for index [{}] is unsafe to cache", indexName); + return weight; + } + } else { + logger.trace("not opting out of the query cache. request for index [{}] has field level security disabled", indexName); + return indicesQueryCache.doCache(weight, policy); + } + } + + /** + * Returns true if its safe to use the query cache for this query. + */ + static boolean cachingIsSafe(Weight weight, IndicesAccessControl.IndexAccessControl permissions) { + // support caching for common queries, by inspecting the field + // TODO: If in the future there is a Query#extractFields() then we can do a better job + Set fields = new HashSet<>(); + try { + FieldExtractor.extractFields(weight.getQuery(), fields); + } catch (UnsupportedOperationException ok) { + // we don't know how to safely extract the fields of this query, don't cache. + return false; + } + + // we successfully extracted the set of fields: check each one + for (String field : fields) { + // don't cache any internal fields (e.g. _field_names), these are complicated. + if (field.startsWith("_") || permissions.getFieldPermissions().grantsAccessTo(field) == false) { + return false; + } + } + // we can cache, all fields are ok + return true; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java new file mode 100644 index 0000000000000..1f604406a7120 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -0,0 +1,371 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.store; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.common.IteratingActionListener; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition.FieldGrantExcludeGroup; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.Privilege; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.security.SecurityLifecycleService; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.BiConsumer; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.security.SecurityField.setting; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.isIndexDeleted; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.isMoveFromRedToNonRed; + +/** + * A composite roles store that combines built in roles, file-based roles, and index-based roles. Checks the built in roles first, then the + * file roles, and finally the index roles. + */ +public class CompositeRolesStore extends AbstractComponent { + + // the lock is used in an odd manner; when iterating over the cache we cannot have modifiers other than deletes using + // the iterator but when not iterating we can modify the cache without external locking. When making normal modifications to the cache + // the read lock is obtained so that we can allow concurrent modifications; however when we need to iterate over the keys or values of + // the cache the write lock must obtained to prevent any modifications + private final ReleasableLock readLock; + private final ReleasableLock writeLock; + + { + final ReadWriteLock iterationLock = new ReentrantReadWriteLock(); + readLock = new ReleasableLock(iterationLock.readLock()); + writeLock = new ReleasableLock(iterationLock.writeLock()); + } + + public static final Setting CACHE_SIZE_SETTING = + Setting.intSetting(setting("authz.store.roles.cache.max_size"), 10000, Property.NodeScope); + + private final FileRolesStore fileRolesStore; + private final NativeRolesStore nativeRolesStore; + private final ReservedRolesStore reservedRolesStore; + private final XPackLicenseState licenseState; + private final Cache, Role> roleCache; + private final Set negativeLookupCache; + private final ThreadContext threadContext; + private final AtomicLong numInvalidation = new AtomicLong(); + private final List, ActionListener>>> customRolesProviders; + + public CompositeRolesStore(Settings settings, FileRolesStore fileRolesStore, NativeRolesStore nativeRolesStore, + ReservedRolesStore reservedRolesStore, + List, ActionListener>>> rolesProviders, + ThreadContext threadContext, XPackLicenseState licenseState) { + super(settings); + this.fileRolesStore = fileRolesStore; + // invalidating all on a file based role update is heavy handed to say the least, but in general this should be infrequent so the + // impact isn't really worth the added complexity of only clearing the changed values + fileRolesStore.addListener(this::invalidateAll); + this.nativeRolesStore = nativeRolesStore; + this.reservedRolesStore = reservedRolesStore; + this.licenseState = licenseState; + CacheBuilder, Role> builder = CacheBuilder.builder(); + final int cacheSize = CACHE_SIZE_SETTING.get(settings); + if (cacheSize >= 0) { + builder.setMaximumWeight(cacheSize); + } + this.roleCache = builder.build(); + this.threadContext = threadContext; + this.negativeLookupCache = ConcurrentCollections.newConcurrentSet(); + this.customRolesProviders = Collections.unmodifiableList(rolesProviders); + } + + public void roles(Set roleNames, FieldPermissionsCache fieldPermissionsCache, ActionListener roleActionListener) { + Role existing = roleCache.get(roleNames); + if (existing != null) { + roleActionListener.onResponse(existing); + } else { + final long invalidationCounter = numInvalidation.get(); + roleDescriptors(roleNames, ActionListener.wrap( + (descriptors) -> { + final Role role; + if (licenseState.isDocumentAndFieldLevelSecurityAllowed()) { + role = buildRoleFromDescriptors(descriptors, fieldPermissionsCache); + } else { + final Set filtered = descriptors.stream() + .filter((rd) -> rd.isUsingDocumentOrFieldLevelSecurity() == false) + .collect(Collectors.toSet()); + role = buildRoleFromDescriptors(filtered, fieldPermissionsCache); + } + + if (role != null) { + try (ReleasableLock ignored = readLock.acquire()) { + /* this is kinda spooky. We use a read/write lock to ensure we don't modify the cache if we hold the write + * lock (fetching stats for instance - which is kinda overkill?) but since we fetching stuff in an async + * fashion we need to make sure that if the cache got invalidated since we started the request we don't + * put a potential stale result in the cache, hence the numInvalidation.get() comparison to the number of + * invalidation when we started. we just try to be on the safe side and don't cache potentially stale + * results*/ + if (invalidationCounter == numInvalidation.get()) { + roleCache.computeIfAbsent(roleNames, (s) -> role); + } + } + } + roleActionListener.onResponse(role); + }, + roleActionListener::onFailure)); + } + } + + private void roleDescriptors(Set roleNames, ActionListener> roleDescriptorActionListener) { + final Set filteredRoleNames = roleNames.stream().filter((s) -> { + if (negativeLookupCache.contains(s)) { + logger.debug("Requested role [{}] does not exist (cached)", s); + return false; + } else { + return true; + } + }).collect(Collectors.toSet()); + final Set builtInRoleDescriptors = getBuiltInRoleDescriptors(filteredRoleNames); + Set remainingRoleNames = difference(filteredRoleNames, builtInRoleDescriptors); + if (remainingRoleNames.isEmpty()) { + roleDescriptorActionListener.onResponse(Collections.unmodifiableSet(builtInRoleDescriptors)); + } else { + nativeRolesStore.getRoleDescriptors(remainingRoleNames.toArray(Strings.EMPTY_ARRAY), ActionListener.wrap((descriptors) -> { + logger.debug(() -> new ParameterizedMessage("Roles [{}] were resolved from the native index store", names(descriptors))); + builtInRoleDescriptors.addAll(descriptors); + callCustomRoleProvidersIfEnabled(builtInRoleDescriptors, filteredRoleNames, roleDescriptorActionListener); + }, e -> { + logger.warn("role retrieval failed from the native roles store", e); + callCustomRoleProvidersIfEnabled(builtInRoleDescriptors, filteredRoleNames, roleDescriptorActionListener); + })); + } + } + + private void callCustomRoleProvidersIfEnabled(Set builtInRoleDescriptors, Set filteredRoleNames, + ActionListener> roleDescriptorActionListener) { + if (builtInRoleDescriptors.size() != filteredRoleNames.size()) { + final Set missing = difference(filteredRoleNames, builtInRoleDescriptors); + assert missing.isEmpty() == false : "the missing set should not be empty if the sizes didn't match"; + if (licenseState.isCustomRoleProvidersAllowed() && !customRolesProviders.isEmpty()) { + new IteratingActionListener<>(roleDescriptorActionListener, (rolesProvider, listener) -> { + // resolve descriptors with role provider + rolesProvider.accept(missing, ActionListener.wrap((resolvedDescriptors) -> { + logger.debug(() -> + new ParameterizedMessage("Roles [{}] were resolved by [{}]", names(resolvedDescriptors), rolesProvider)); + builtInRoleDescriptors.addAll(resolvedDescriptors); + // remove resolved descriptors from the set of roles still needed to be resolved + for (RoleDescriptor descriptor : resolvedDescriptors) { + missing.remove(descriptor.getName()); + } + if (missing.isEmpty()) { + // no more roles to resolve, send the response + listener.onResponse(Collections.unmodifiableSet(builtInRoleDescriptors)); + } else { + // still have roles to resolve, keep trying with the next roles provider + listener.onResponse(null); + } + }, listener::onFailure)); + }, customRolesProviders, threadContext, () -> { + negativeLookupCache.addAll(missing); + return builtInRoleDescriptors; + }).run(); + } else { + logger.debug(() -> + new ParameterizedMessage("Requested roles [{}] do not exist", Strings.collectionToCommaDelimitedString(missing))); + negativeLookupCache.addAll(missing); + roleDescriptorActionListener.onResponse(Collections.unmodifiableSet(builtInRoleDescriptors)); + } + } else { + roleDescriptorActionListener.onResponse(Collections.unmodifiableSet(builtInRoleDescriptors)); + } + } + + private Set getBuiltInRoleDescriptors(Set roleNames) { + final Set descriptors = reservedRolesStore.roleDescriptors().stream() + .filter((rd) -> roleNames.contains(rd.getName())) + .collect(Collectors.toCollection(HashSet::new)); + if (descriptors.size() > 0) { + logger.debug(() -> new ParameterizedMessage("Roles [{}] are builtin roles", names(descriptors))); + } + final Set difference = difference(roleNames, descriptors); + if (difference.isEmpty() == false) { + final Set fileRoles = fileRolesStore.roleDescriptors(difference); + logger.debug(() -> + new ParameterizedMessage("Roles [{}] were resolved from [{}]", names(fileRoles), fileRolesStore.getFile())); + descriptors.addAll(fileRoles); + } + + return descriptors; + } + + private String names(Collection descriptors) { + return descriptors.stream().map(RoleDescriptor::getName).collect(Collectors.joining(",")); + } + + private Set difference(Set roleNames, Set descriptors) { + Set foundNames = descriptors.stream().map(RoleDescriptor::getName).collect(Collectors.toSet()); + return Sets.difference(roleNames, foundNames); + } + + public static Role buildRoleFromDescriptors(Set roleDescriptors, FieldPermissionsCache fieldPermissionsCache) { + if (roleDescriptors.isEmpty()) { + return Role.EMPTY; + } + Set clusterPrivileges = new HashSet<>(); + Set runAs = new HashSet<>(); + Map, MergeableIndicesPrivilege> indicesPrivilegesMap = new HashMap<>(); + List roleNames = new ArrayList<>(roleDescriptors.size()); + for (RoleDescriptor descriptor : roleDescriptors) { + roleNames.add(descriptor.getName()); + if (descriptor.getClusterPrivileges() != null) { + clusterPrivileges.addAll(Arrays.asList(descriptor.getClusterPrivileges())); + } + if (descriptor.getRunAs() != null) { + runAs.addAll(Arrays.asList(descriptor.getRunAs())); + } + IndicesPrivileges[] indicesPrivileges = descriptor.getIndicesPrivileges(); + for (IndicesPrivileges indicesPrivilege : indicesPrivileges) { + Set key = Sets.newHashSet(indicesPrivilege.getIndices()); + // if a index privilege is an explicit denial, then we treat it as non-existent since we skipped these in the past when + // merging + final boolean isExplicitDenial = + indicesPrivileges.length == 1 && "none".equalsIgnoreCase(indicesPrivilege.getPrivileges()[0]); + if (isExplicitDenial == false) { + indicesPrivilegesMap.compute(key, (k, value) -> { + if (value == null) { + return new MergeableIndicesPrivilege(indicesPrivilege.getIndices(), indicesPrivilege.getPrivileges(), + indicesPrivilege.getGrantedFields(), indicesPrivilege.getDeniedFields(), indicesPrivilege.getQuery()); + } else { + value.merge(new MergeableIndicesPrivilege(indicesPrivilege.getIndices(), indicesPrivilege.getPrivileges(), + indicesPrivilege.getGrantedFields(), indicesPrivilege.getDeniedFields(), indicesPrivilege.getQuery())); + return value; + } + }); + } + } + } + + final Set clusterPrivs = clusterPrivileges.isEmpty() ? null : clusterPrivileges; + final Privilege runAsPrivilege = runAs.isEmpty() ? Privilege.NONE : new Privilege(runAs, runAs.toArray(Strings.EMPTY_ARRAY)); + Role.Builder builder = Role.builder(roleNames.toArray(new String[roleNames.size()]), fieldPermissionsCache) + .cluster(ClusterPrivilege.get(clusterPrivs)) + .runAs(runAsPrivilege); + indicesPrivilegesMap.entrySet().forEach((entry) -> { + MergeableIndicesPrivilege privilege = entry.getValue(); + builder.add(fieldPermissionsCache.getFieldPermissions(privilege.fieldPermissionsDefinition), privilege.query, + IndexPrivilege.get(privilege.privileges), privilege.indices.toArray(Strings.EMPTY_ARRAY)); + }); + return builder.build(); + } + + public void invalidateAll() { + numInvalidation.incrementAndGet(); + negativeLookupCache.clear(); + try (ReleasableLock ignored = readLock.acquire()) { + roleCache.invalidateAll(); + } + } + + public void invalidate(String role) { + numInvalidation.incrementAndGet(); + + // the cache cannot be modified while doing this operation per the terms of the cache iterator + try (ReleasableLock ignored = writeLock.acquire()) { + Iterator> keyIter = roleCache.keys().iterator(); + while (keyIter.hasNext()) { + Set key = keyIter.next(); + if (key.contains(role)) { + keyIter.remove(); + } + } + } + negativeLookupCache.remove(role); + } + + public void usageStats(ActionListener> listener) { + final Map usage = new HashMap<>(2); + usage.put("file", fileRolesStore.usageStats()); + nativeRolesStore.usageStats(ActionListener.wrap(map -> { + usage.put("native", map); + listener.onResponse(usage); + }, listener::onFailure)); + } + + public void onSecurityIndexHealthChange(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) { + if (isMoveFromRedToNonRed(previousHealth, currentHealth) || isIndexDeleted(previousHealth, currentHealth)) { + invalidateAll(); + } + } + + public void onSecurityIndexOutOfDateChange(boolean prevOutOfDate, boolean outOfDate) { + assert prevOutOfDate != outOfDate : "this method should only be called if the two values are different"; + invalidateAll(); + } + + /** + * A mutable class that can be used to represent the combination of one or more {@link IndicesPrivileges} + */ + private static class MergeableIndicesPrivilege { + private Set indices; + private Set privileges; + private FieldPermissionsDefinition fieldPermissionsDefinition; + private Set query = null; + + MergeableIndicesPrivilege(String[] indices, String[] privileges, @Nullable String[] grantedFields, @Nullable String[] deniedFields, + @Nullable BytesReference query) { + this.indices = Sets.newHashSet(Objects.requireNonNull(indices)); + this.privileges = Sets.newHashSet(Objects.requireNonNull(privileges)); + this.fieldPermissionsDefinition = new FieldPermissionsDefinition(grantedFields, deniedFields); + if (query != null) { + this.query = Sets.newHashSet(query); + } + } + + void merge(MergeableIndicesPrivilege other) { + assert indices.equals(other.indices) : "index names must be equivalent in order to merge"; + Set groups = new HashSet<>(); + groups.addAll(this.fieldPermissionsDefinition.getFieldGrantExcludeGroups()); + groups.addAll(other.fieldPermissionsDefinition.getFieldGrantExcludeGroups()); + this.fieldPermissionsDefinition = new FieldPermissionsDefinition(groups); + this.privileges.addAll(other.privileges); + + if (this.query == null || other.query == null) { + this.query = null; + } else { + this.query.addAll(other.query); + } + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java new file mode 100644 index 0000000000000..f2d78806da0dc --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java @@ -0,0 +1,340 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.store; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.yaml.YamlXContent; +import org.elasticsearch.env.Environment; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.watcher.FileChangesListener; +import org.elasticsearch.watcher.FileWatcher; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.support.NoOpLogger; +import org.elasticsearch.xpack.core.security.support.Validation; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.regex.Pattern; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableMap; + +public class FileRolesStore extends AbstractComponent { + + private static final Pattern IN_SEGMENT_LINE = Pattern.compile("^\\s+.+"); + private static final Pattern SKIP_LINE = Pattern.compile("(^#.*|^\\s*)"); + + private final Path file; + private final XPackLicenseState licenseState; + private final List listeners = new ArrayList<>(); + + private volatile Map permissions; + + public FileRolesStore(Settings settings, Environment env, ResourceWatcherService watcherService, XPackLicenseState licenseState) + throws IOException { + this(settings, env, watcherService, () -> {}, licenseState); + } + + FileRolesStore(Settings settings, Environment env, ResourceWatcherService watcherService, Runnable listener, + XPackLicenseState licenseState) throws IOException { + super(settings); + this.file = resolveFile(env); + if (listener != null) { + listeners.add(listener); + } + this.licenseState = licenseState; + FileWatcher watcher = new FileWatcher(file.getParent()); + watcher.addListener(new FileListener()); + watcherService.add(watcher, ResourceWatcherService.Frequency.HIGH); + permissions = parseFile(file, logger, settings, licenseState); + } + + public Set roleDescriptors(Set roleNames) { + Set descriptors = new HashSet<>(); + roleNames.forEach((name) -> { + RoleDescriptor descriptor = permissions.get(name); + if (descriptor != null) { + descriptors.add(descriptor); + } + }); + return descriptors; + } + + public Map usageStats() { + Map usageStats = new HashMap<>(); + usageStats.put("size", permissions.size()); + + boolean dls = false; + boolean fls = false; + for (RoleDescriptor descriptor : permissions.values()) { + for (IndicesPrivileges indicesPrivileges : descriptor.getIndicesPrivileges()) { + fls = fls || indicesPrivileges.getGrantedFields() != null || indicesPrivileges.getDeniedFields() != null; + dls = dls || indicesPrivileges.getQuery() != null; + } + if (fls && dls) { + break; + } + } + usageStats.put("fls", fls); + usageStats.put("dls", dls); + + return usageStats; + } + + public void addListener(Runnable runnable) { + Objects.requireNonNull(runnable); + synchronized (this) { + listeners.add(runnable); + } + } + + public Path getFile() { + return file; + } + + public static Path resolveFile(Environment env) { + return XPackPlugin.resolveConfigFile(env, "roles.yml"); + } + + public static Set parseFileForRoleNames(Path path, Logger logger) { + return parseRoleDescriptors(path, logger, false, Settings.EMPTY).keySet(); + } + + public static Map parseFile(Path path, Logger logger, Settings settings, XPackLicenseState licenseState) { + return parseFile(path, logger, true, settings, licenseState); + } + + public static Map parseFile(Path path, Logger logger, boolean resolvePermission, + Settings settings, XPackLicenseState licenseState) { + if (logger == null) { + logger = NoOpLogger.INSTANCE; + } + + Map roles = new HashMap<>(); + logger.debug("attempting to read roles file located at [{}]", path.toAbsolutePath()); + if (Files.exists(path)) { + try { + List roleSegments = roleSegments(path); + final boolean flsDlsLicensed = licenseState.isDocumentAndFieldLevelSecurityAllowed(); + for (String segment : roleSegments) { + RoleDescriptor descriptor = parseRoleDescriptor(segment, path, logger, resolvePermission, settings); + if (descriptor != null) { + if (ReservedRolesStore.isReserved(descriptor.getName())) { + logger.warn("role [{}] is reserved. the relevant role definition in the mapping file will be ignored", + descriptor.getName()); + } else if (flsDlsLicensed == false && descriptor.isUsingDocumentOrFieldLevelSecurity()) { + logger.warn("role [{}] uses document and/or field level security, which is not enabled by the current license" + + ". this role will be ignored", descriptor.getName()); + // we still put the role in the map to avoid unnecessary negative lookups + roles.put(descriptor.getName(), descriptor); + } else { + roles.put(descriptor.getName(), descriptor); + } + } + } + } catch (IOException ioe) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "failed to read roles file [{}]. skipping all roles...", + path.toAbsolutePath()), + ioe); + return emptyMap(); + } + } else { + logger.debug("roles file does not exist"); + return emptyMap(); + } + + logger.info("parsed [{}] roles from file [{}]", roles.size(), path.toAbsolutePath()); + return unmodifiableMap(roles); + } + + public static Map parseRoleDescriptors(Path path, Logger logger, boolean resolvePermission, Settings settings) { + if (logger == null) { + logger = NoOpLogger.INSTANCE; + } + + Map roles = new HashMap<>(); + logger.trace("attempting to read roles file located at [{}]", path.toAbsolutePath()); + if (Files.exists(path)) { + try { + List roleSegments = roleSegments(path); + for (String segment : roleSegments) { + RoleDescriptor rd = parseRoleDescriptor(segment, path, logger, resolvePermission, settings); + if (rd != null) { + roles.put(rd.getName(), rd); + } + } + } catch (IOException ioe) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "failed to read roles file [{}]. skipping all roles...", + path.toAbsolutePath()), + ioe); + return emptyMap(); + } + } + return unmodifiableMap(roles); + } + + @Nullable + static RoleDescriptor parseRoleDescriptor(String segment, Path path, Logger logger, boolean resolvePermissions, Settings settings) { + String roleName = null; + try { + // EMPTY is safe here because we never use namedObject + XContentParser parser = YamlXContent.yamlXContent + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, segment); + XContentParser.Token token = parser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + token = parser.nextToken(); + if (token == XContentParser.Token.FIELD_NAME) { + roleName = parser.currentName(); + Validation.Error validationError = Validation.Roles.validateRoleName(roleName); + if (validationError != null) { + logger.error("invalid role definition [{}] in roles file [{}]. invalid role name - {}. skipping role... ", + roleName, path.toAbsolutePath(), validationError); + return null; + } + + if (resolvePermissions == false) { + return new RoleDescriptor(roleName, null, null, null); + } + + token = parser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + // we pass true as last parameter because we do not want to reject files if field permissions + // are given in 2.x syntax + RoleDescriptor descriptor = RoleDescriptor.parse(roleName, parser, true); + return checkDescriptor(descriptor, path, logger, settings); + } else { + logger.error("invalid role definition [{}] in roles file [{}]. skipping role...", roleName, path.toAbsolutePath()); + return null; + } + } + } + logger.error("invalid role definition [{}] in roles file [{}]. skipping role...", roleName, path.toAbsolutePath()); + } catch (ElasticsearchParseException e) { + assert roleName != null; + if (logger.isDebugEnabled()) { + final String finalRoleName = roleName; + logger.debug((Supplier) () -> new ParameterizedMessage("parsing exception for role [{}]", finalRoleName), e); + } else { + logger.error(e.getMessage() + ". skipping role..."); + } + } catch (IOException e) { + if (roleName != null) { + final String finalRoleName = roleName; + logger.error( + (Supplier) () -> new ParameterizedMessage( + "invalid role definition [{}] in roles file [{}]. skipping role...", + finalRoleName, + path), + e); + } else { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "invalid role definition in roles file [{}]. skipping role...", + path), + e); + } + } + return null; + } + + @Nullable + private static RoleDescriptor checkDescriptor(RoleDescriptor descriptor, Path path, Logger logger, Settings settings) { + String roleName = descriptor.getName(); + // first check if FLS/DLS is enabled on the role... + for (RoleDescriptor.IndicesPrivileges privilege : descriptor.getIndicesPrivileges()) { + if ((privilege.getQuery() != null || privilege.getGrantedFields() != null || privilege.getDeniedFields() != null) + && XPackSettings.DLS_FLS_ENABLED.get(settings) == false) { + logger.error("invalid role definition [{}] in roles file [{}]. document and field level security is not " + + "enabled. set [{}] to [true] in the configuration file. skipping role...", roleName, path + .toAbsolutePath(), XPackSettings.DLS_FLS_ENABLED.getKey()); + return null; + } + } + return descriptor; + } + + private static List roleSegments(Path path) throws IOException { + List segments = new ArrayList<>(); + StringBuilder builder = null; + for (String line : Files.readAllLines(path, StandardCharsets.UTF_8)) { + if (!SKIP_LINE.matcher(line).matches()) { + if (IN_SEGMENT_LINE.matcher(line).matches()) { + if (builder != null) { + builder.append(line).append("\n"); + } + } else { + if (builder != null) { + segments.add(builder.toString()); + } + builder = new StringBuilder(line).append("\n"); + } + } + } + if (builder != null) { + segments.add(builder.toString()); + } + return segments; + } + + private class FileListener implements FileChangesListener { + + @Override + public void onFileCreated(Path file) { + onFileChanged(file); + } + + @Override + public void onFileDeleted(Path file) { + onFileChanged(file); + } + + @Override + public void onFileChanged(Path file) { + if (file.equals(FileRolesStore.this.file)) { + try { + permissions = parseFile(file, logger, settings, licenseState); + logger.info("updated roles (roles file [{}] {})", file.toAbsolutePath(), Files.exists(file) ? "changed" : "removed"); + } catch (Exception e) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "could not reload roles file [{}]. Current roles remain unmodified", file.toAbsolutePath()), e); + return; + } + + synchronized (FileRolesStore.this) { + listeners.forEach(Runnable::run); + } + } + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java new file mode 100644 index 0000000000000..4f0bb5b2e3c32 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -0,0 +1,377 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.store; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.MultiSearchResponse.Item; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.action.support.TransportActions; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.security.ScrollHelper; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequest; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheResponse; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.SecurityLifecycleService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.existsQuery; +import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; +import static org.elasticsearch.xpack.core.security.SecurityField.setting; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ROLE_TYPE; + +/** + * NativeRolesStore is a {@code RolesStore} that, instead of reading from a + * file, reads from an Elasticsearch index instead. Unlike the file-based roles + * store, ESNativeRolesStore can be used to add a role to the store by inserting + * the document into the administrative index. + * + * No caching is done by this class, it is handled at a higher level + */ +public class NativeRolesStore extends AbstractComponent { + + // these are no longer used, but leave them around for users upgrading + private static final Setting CACHE_SIZE_SETTING = + Setting.intSetting(setting("authz.store.roles.index.cache.max_size"), 10000, Property.NodeScope, Property.Deprecated); + private static final Setting CACHE_TTL_SETTING = Setting.timeSetting(setting("authz.store.roles.index.cache.ttl"), + TimeValue.timeValueMinutes(20), Property.NodeScope, Property.Deprecated); + private static final String ROLE_DOC_TYPE = "doc"; + + private final Client client; + private final XPackLicenseState licenseState; + + private SecurityClient securityClient; + private final SecurityLifecycleService securityLifecycleService; + + public NativeRolesStore(Settings settings, Client client, XPackLicenseState licenseState, + SecurityLifecycleService securityLifecycleService) { + super(settings); + this.client = client; + this.securityClient = new SecurityClient(client); + this.licenseState = licenseState; + this.securityLifecycleService = securityLifecycleService; + } + + /** + * Retrieve a list of roles, if rolesToGet is null or empty, fetch all roles + */ + public void getRoleDescriptors(String[] names, final ActionListener> listener) { + if (securityLifecycleService.isSecurityIndexExisting() == false) { + // TODO remove this short circuiting and fix tests that fail without this! + listener.onResponse(Collections.emptyList()); + } else if (names != null && names.length == 1) { + getRoleDescriptor(Objects.requireNonNull(names[0]), ActionListener.wrap(roleDescriptor -> + listener.onResponse(roleDescriptor == null ? Collections.emptyList() : Collections.singletonList(roleDescriptor)), + listener::onFailure)); + } else { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + QueryBuilder query; + if (names == null || names.length == 0) { + query = QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE); + } else { + final String[] roleNames = Arrays.stream(names).map(s -> getIdForUser(s)).toArray(String[]::new); + query = QueryBuilders.boolQuery().filter(QueryBuilders.idsQuery(ROLE_DOC_TYPE).addIds(roleNames)); + } + final Supplier supplier = client.threadPool().getThreadContext().newRestorableContext(false); + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN)) { + SearchRequest request = client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME) + .setScroll(TimeValue.timeValueSeconds(10L)) + .setQuery(query) + .setSize(1000) + .setFetchSource(true) + .request(); + request.indicesOptions().ignoreUnavailable(); + ScrollHelper.fetchAllByEntity(client, request, new ContextPreservingActionListener<>(supplier, listener), + (hit) -> transformRole(hit.getId(), hit.getSourceRef(), logger, licenseState)); + } + }); + } + } + + public void deleteRole(final DeleteRoleRequest deleteRoleRequest, final ActionListener listener) { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + DeleteRequest request = client.prepareDelete(SecurityLifecycleService.SECURITY_INDEX_NAME, + ROLE_DOC_TYPE, getIdForUser(deleteRoleRequest.name())).request(); + request.setRefreshPolicy(deleteRoleRequest.getRefreshPolicy()); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, + new ActionListener() { + @Override + public void onResponse(DeleteResponse deleteResponse) { + clearRoleCache(deleteRoleRequest.name(), listener, + deleteResponse.getResult() == DocWriteResponse.Result.DELETED); + } + + @Override + public void onFailure(Exception e) { + logger.error("failed to delete role from the index", e); + listener.onFailure(e); + } + }, client::delete); + }); + } + + public void putRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener listener) { + if (licenseState.isDocumentAndFieldLevelSecurityAllowed()) { + innerPutRole(request, role, listener); + } else if (role.isUsingDocumentOrFieldLevelSecurity()) { + listener.onFailure(LicenseUtils.newComplianceException("field and document level security")); + } else { + innerPutRole(request, role, listener); + } + } + + // pkg-private for testing + void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener listener) { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + final XContentBuilder xContentBuilder; + try { + xContentBuilder = role.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS, true); + } catch (IOException e) { + listener.onFailure(e); + return; + } + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, ROLE_DOC_TYPE, getIdForUser(role.getName())) + .setSource(xContentBuilder) + .setRefreshPolicy(request.getRefreshPolicy()) + .request(), + new ActionListener() { + @Override + public void onResponse(IndexResponse indexResponse) { + final boolean created = indexResponse.getResult() == DocWriteResponse.Result.CREATED; + clearRoleCache(role.getName(), listener, created); + } + + @Override + public void onFailure(Exception e) { + logger.error(new ParameterizedMessage("failed to put role [{}]", request.name()), e); + listener.onFailure(e); + } + }, client::index); + }); + } + + public void usageStats(ActionListener> listener) { + Map usageStats = new HashMap<>(); + if (securityLifecycleService.isSecurityIndexExisting() == false) { + usageStats.put("size", 0L); + usageStats.put("fls", false); + usageStats.put("dls", false); + listener.onResponse(usageStats); + } else { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareMultiSearch() + .add(client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) + .setSize(0)) + .add(client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) + .must(QueryBuilders.boolQuery() + .should(existsQuery("indices.field_security.grant")) + .should(existsQuery("indices.field_security.except")) + // for backwardscompat with 2.x + .should(existsQuery("indices.fields")))) + .setSize(0) + .setTerminateAfter(1)) + .add(client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) + .filter(existsQuery("indices.query"))) + .setSize(0) + .setTerminateAfter(1)) + .request(), + new ActionListener() { + @Override + public void onResponse(MultiSearchResponse items) { + Item[] responses = items.getResponses(); + if (responses[0].isFailure()) { + usageStats.put("size", 0); + } else { + usageStats.put("size", responses[0].getResponse().getHits().getTotalHits()); + } + + if (responses[1].isFailure()) { + usageStats.put("fls", false); + } else { + usageStats.put("fls", responses[1].getResponse().getHits().getTotalHits() > 0L); + } + + if (responses[2].isFailure()) { + usageStats.put("dls", false); + } else { + usageStats.put("dls", responses[2].getResponse().getHits().getTotalHits() > 0L); + } + listener.onResponse(usageStats); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, client::multiSearch)); + } + } + + private void getRoleDescriptor(final String roleId, ActionListener roleActionListener) { + if (securityLifecycleService.isSecurityIndexExisting() == false) { + // TODO remove this short circuiting and fix tests that fail without this! + roleActionListener.onResponse(null); + } else { + securityLifecycleService.prepareIndexIfNeededThenExecute(roleActionListener::onFailure, () -> + executeGetRoleRequest(roleId, new ActionListener() { + @Override + public void onResponse(GetResponse response) { + final RoleDescriptor descriptor = transformRole(response); + roleActionListener.onResponse(descriptor); + } + + @Override + public void onFailure(Exception e) { + // if the index or the shard is not there / available we just claim the role is not there + if (TransportActions.isShardNotAvailableException(e)) { + logger.warn((org.apache.logging.log4j.util.Supplier) () -> + new ParameterizedMessage("failed to load role [{}] index not available", roleId), e); + roleActionListener.onResponse(null); + } else { + logger.error(new ParameterizedMessage("failed to load role [{}]", roleId), e); + roleActionListener.onFailure(e); + } + } + })); + } + } + + private void executeGetRoleRequest(String role, ActionListener listener) { + securityLifecycleService.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, + ROLE_DOC_TYPE, getIdForUser(role)).request(), + listener, + client::get)); + } + + private void clearRoleCache(final String role, ActionListener listener, Response response) { + ClearRolesCacheRequest request = new ClearRolesCacheRequest().names(role); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, + new ActionListener() { + @Override + public void onResponse(ClearRolesCacheResponse nodes) { + listener.onResponse(response); + } + + @Override + public void onFailure(Exception e) { + logger.error(new ParameterizedMessage("unable to clear cache for role [{}]", role), e); + ElasticsearchException exception = new ElasticsearchException("clearing the cache for [" + role + + "] failed. please clear the role cache manually", e); + listener.onFailure(exception); + } + }, securityClient::clearRolesCache); + } + + @Nullable + private RoleDescriptor transformRole(GetResponse response) { + if (response.isExists() == false) { + return null; + } + + return transformRole(response.getId(), response.getSourceAsBytesRef(), logger, licenseState); + } + + @Nullable + static RoleDescriptor transformRole(String id, BytesReference sourceBytes, Logger logger, XPackLicenseState licenseState) { + assert id.startsWith(ROLE_TYPE) : "[" + id + "] does not have role prefix"; + final String name = id.substring(ROLE_TYPE.length() + 1); + try { + // we pass true as last parameter because we do not want to reject permissions if the field permissions + // are given in 2.x syntax + RoleDescriptor roleDescriptor = RoleDescriptor.parse(name, sourceBytes, true, XContentType.JSON); + if (licenseState.isDocumentAndFieldLevelSecurityAllowed()) { + return roleDescriptor; + } else { + final boolean dlsEnabled = + Arrays.stream(roleDescriptor.getIndicesPrivileges()).anyMatch(IndicesPrivileges::isUsingDocumentLevelSecurity); + final boolean flsEnabled = + Arrays.stream(roleDescriptor.getIndicesPrivileges()).anyMatch(IndicesPrivileges::isUsingFieldLevelSecurity); + if (dlsEnabled || flsEnabled) { + List unlicensedFeatures = new ArrayList<>(2); + if (flsEnabled) { + unlicensedFeatures.add("fls"); + } + if (dlsEnabled) { + unlicensedFeatures.add("dls"); + } + Map transientMap = new HashMap<>(2); + transientMap.put("unlicensed_features", unlicensedFeatures); + transientMap.put("enabled", false); + return new RoleDescriptor(roleDescriptor.getName(), roleDescriptor.getClusterPrivileges(), + roleDescriptor.getIndicesPrivileges(), roleDescriptor.getRunAs(), roleDescriptor.getMetadata(), transientMap); + } else { + return roleDescriptor; + } + + } + } catch (Exception e) { + logger.error(new ParameterizedMessage("error in the format of data for role [{}]", name), e); + return null; + } + } + + public static void addSettings(List> settings) { + settings.add(CACHE_SIZE_SETTING); + settings.add(CACHE_TTL_SETTING); + } + + /** + * Gets the document's id field for the given role name. + */ + private static String getIdForUser(final String roleName) { + return ROLE_TYPE + "-" + roleName; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyTool.java new file mode 100644 index 0000000000000..bc2a0d415b3ee --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyTool.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.crypto.tool; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackPlugin; + +import javax.crypto.KeyGenerator; +import javax.crypto.SecretKey; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermission; +import java.security.NoSuchAlgorithmException; +import java.util.List; +import java.util.Locale; +import java.util.Set; + +public class SystemKeyTool extends EnvironmentAwareCommand { + + static final String KEY_ALGO = "HmacSHA512"; + static final int KEY_SIZE = 1024; + + private final OptionSpec arguments; + + SystemKeyTool() { + super("system key tool"); + arguments = parser.nonOptions("key path"); + } + + public static final Set PERMISSION_OWNER_READ_WRITE = Sets.newHashSet(PosixFilePermission.OWNER_READ, + PosixFilePermission.OWNER_WRITE); + + public static void main(String[] args) throws Exception { + final SystemKeyTool tool = new SystemKeyTool(); + int status = main(tool, args, Terminal.DEFAULT); + if (status != ExitCodes.OK) { + exit(status); + } + } + + static int main(SystemKeyTool tool, String[] args, Terminal terminal) throws Exception { + return tool.main(args, terminal); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + final Path keyPath; + + if (options.hasArgument(arguments)) { + List args = arguments.values(options); + if (args.size() > 1) { + throw new UserException(ExitCodes.USAGE, "No more than one key path can be supplied"); + } + keyPath = parsePath(args.get(0)); + } else { + keyPath = XPackPlugin.resolveConfigFile(env, "system_key"); + } + + // write the key + terminal.println(Terminal.Verbosity.VERBOSE, "generating..."); + byte[] key = generateKey(); + terminal.println(String.format(Locale.ROOT, "Storing generated key in [%s]...", keyPath.toAbsolutePath())); + Files.write(keyPath, key, StandardOpenOption.CREATE_NEW); + + // set permissions to 600 + PosixFileAttributeView view = Files.getFileAttributeView(keyPath, PosixFileAttributeView.class); + if (view != null) { + view.setPermissions(PERMISSION_OWNER_READ_WRITE); + terminal.println("Ensure the generated key can be read by the user that Elasticsearch runs as, " + + "permissions are set to owner read/write only"); + } + } + + static byte[] generateKey() { + return generateSecretKey(KEY_SIZE).getEncoded(); + } + + static SecretKey generateSecretKey(int keyLength) { + try { + KeyGenerator generator = KeyGenerator.getInstance(KEY_ALGO); + generator.init(keyLength); + return generator.generateKey(); + } catch (NoSuchAlgorithmException e) { + throw new ElasticsearchException("failed to generate key", e); + } + } + + + @SuppressForbidden(reason = "Parsing command line path") + private static Path parsePath(String path) { + return PathUtils.get(path); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/RemoteHostHeader.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/RemoteHostHeader.java new file mode 100644 index 0000000000000..dcee6535cf337 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/RemoteHostHeader.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest; + +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.rest.RestRequest; + +import java.net.InetSocketAddress; +import java.net.SocketAddress; + +public class RemoteHostHeader { + + static final String KEY = "_rest_remote_address"; + + /** + * Extracts the remote address from the given rest request and puts in the request context. This will + * then be copied to the subsequent action requests. + */ + public static void process(RestRequest request, ThreadContext threadContext) { + threadContext.putTransient(KEY, request.getRemoteAddress()); + } + + /** + * Extracts the rest remote address from the message context. If not found, returns {@code null}. transport + * messages that were created by rest handlers, should have this in their context. + */ + public static InetSocketAddress restRemoteAddress(ThreadContext threadContext) { + SocketAddress address = threadContext.getTransient(KEY); + if (address != null && address instanceof InetSocketAddress) { + return (InetSocketAddress) address; + } + return null; + } + + public static void putRestRemoteAddress(ThreadContext threadContext, SocketAddress address) { + threadContext.putTransient(KEY, address); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java new file mode 100644 index 0000000000000..0f4da8b847c58 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest; + +import io.netty.handler.ssl.SslHandler; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.netty4.Netty4HttpRequest; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestRequest.Method; +import org.elasticsearch.xpack.core.security.rest.RestRequestFilter; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.transport.ServerTransportFilter; + +import java.io.IOException; + +public class SecurityRestFilter implements RestHandler { + + private static final Logger logger = ESLoggerFactory.getLogger(SecurityRestFilter.class); + + private final RestHandler restHandler; + private final AuthenticationService service; + private final XPackLicenseState licenseState; + private final ThreadContext threadContext; + private final boolean extractClientCertificate; + + public SecurityRestFilter(XPackLicenseState licenseState, ThreadContext threadContext, AuthenticationService service, + RestHandler restHandler, boolean extractClientCertificate) { + this.restHandler = restHandler; + this.service = service; + this.licenseState = licenseState; + this.threadContext = threadContext; + this.extractClientCertificate = extractClientCertificate; + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + if (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed() && request.method() != Method.OPTIONS) { + // CORS - allow for preflight unauthenticated OPTIONS request + if (extractClientCertificate) { + Netty4HttpRequest nettyHttpRequest = (Netty4HttpRequest) request; + SslHandler handler = nettyHttpRequest.getChannel().pipeline().get(SslHandler.class); + assert handler != null; + ServerTransportFilter.extractClientCertificates(logger, threadContext, handler.engine(), nettyHttpRequest.getChannel()); + } + service.authenticate(maybeWrapRestRequest(request), ActionListener.wrap( + authentication -> { + RemoteHostHeader.process(request, threadContext); + restHandler.handleRequest(request, channel, client); + }, e -> { + try { + channel.sendResponse(new BytesRestResponse(channel, e)); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.error((Supplier) () -> + new ParameterizedMessage("failed to send failure response for uri [{}]", request.uri()), inner); + } + })); + } else { + restHandler.handleRequest(request, channel, client); + } + } + + RestRequest maybeWrapRestRequest(RestRequest restRequest) throws IOException { + if (restHandler instanceof RestRequestFilter) { + return ((RestRequestFilter)restHandler).getFilteredRequest(restRequest); + } + return restRequest; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java new file mode 100644 index 0000000000000..b280b3a89a204 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestAuthenticateAction extends SecurityBaseRestHandler { + + private final SecurityContext securityContext; + + public RestAuthenticateAction(Settings settings, RestController controller, SecurityContext securityContext, + XPackLicenseState licenseState) { + super(settings, licenseState); + this.securityContext = securityContext; + controller.registerHandler(GET, "/_xpack/security/_authenticate", this); + } + + @Override + public String getName() { + return "xpack_security_authenticate_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final User user = securityContext.getUser(); + if (user == null) { + return restChannel -> { throw new IllegalStateException("we should never have a null user and invoke this consumer"); }; + } + final String username = user.principal(); + + return channel -> client.execute(AuthenticateAction.INSTANCE, new AuthenticateRequest(username), + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(AuthenticateResponse authenticateResponse, XContentBuilder builder) throws Exception { + authenticateResponse.user().toXContent(builder, ToXContent.EMPTY_PARAMS); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java new file mode 100644 index 0000000000000..0b2642ae5bec4 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandler.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; + +/** + * Base class for security rest handlers. This handler takes care of ensuring that the license + * level is valid so that security can be used! + */ +public abstract class SecurityBaseRestHandler extends BaseRestHandler { + + protected final XPackLicenseState licenseState; + + /** + * @param settings the node's settings + * @param licenseState the license state that will be used to determine if security is licensed + */ + protected SecurityBaseRestHandler(Settings settings, XPackLicenseState licenseState) { + super(settings); + this.licenseState = licenseState; + } + + /** + * Calls the {@link #innerPrepareRequest(RestRequest, NodeClient)} method and then checks the + * license state. If the license state allows auth, the result from + * {@link #innerPrepareRequest(RestRequest, NodeClient)} is returned, otherwise a default error + * response will be returned indicating that security is not licensed. + * + * Note: the implementing rest handler is called before the license is checked so that we do not + * trip the unused parameters check + */ + protected final RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + RestChannelConsumer consumer = innerPrepareRequest(request, client); + final Exception failedFeature = checkFeatureAvailable(request); + if (failedFeature == null) { + return consumer; + } else { + return channel -> channel.sendResponse(new BytesRestResponse(channel, failedFeature)); + } + } + + /** + * Check whether the given request is allowed within the current license state and setup, + * and return the name of any unlicensed feature. + * By default this returns an exception is security is not available by the current license or + * security is not enabled. + * Sub-classes can override this method if they have additional requirements. + * + * @return {@code null} if all required features are available, otherwise an exception to be + * sent to the requestor + */ + protected Exception checkFeatureAvailable(RestRequest request) { + if (licenseState.isSecurityAvailable() == false) { + return LicenseUtils.newComplianceException(XPackField.SECURITY); + } else if (licenseState.isSecurityEnabled() == false) { + if (licenseState.isTrialLicense()) { + return new ElasticsearchException("Security must be explicitly enabled when using a trial license. " + + "Enable security by setting [xpack.security.enabled] to [true] in the elasticsearch.yml file " + + "and restart the node."); + } else { + return new IllegalStateException("Security is not enabled but a security rest handler is registered"); + } + } else { + return null; + } + } + + + /** + * Implementers should implement this method as they normally would for + * {@link BaseRestHandler#prepareRequest(RestRequest, NodeClient)} and ensure that all request + * parameters are consumed prior to returning a value. The returned value is not guaranteed to + * be executed unless security is licensed and all request parameters are known + */ + protected abstract RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException; +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenAction.java new file mode 100644 index 0000000000000..e2cfb44a2580d --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenAction.java @@ -0,0 +1,200 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.oauth2; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequest; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequestBuilder; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; +import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Locale; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * An implementation of a OAuth2-esque API for retrieval of an access token. + * This API does not conform to the RFC completely as it uses XContent for the request body + * instead for form encoded data. This is a relatively common modification of the OAuth2 + * specification as this aspect does not make the most sense since the response body is + * expected to be JSON + */ +public final class RestGetTokenAction extends SecurityBaseRestHandler { + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("token_request", + a -> new CreateTokenRequest((String) a[0], (String) a[1], (SecureString) a[2], (String) a[3], (String) a[4])); + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("grant_type")); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("username")); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), parser -> new SecureString( + Arrays.copyOfRange(parser.textCharacters(), parser.textOffset(), parser.textOffset() + parser.textLength())), + new ParseField("password"), ValueType.STRING); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("scope")); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("refresh_token")); + } + + public RestGetTokenAction(Settings settings, RestController controller, XPackLicenseState xPackLicenseState) { + super(settings, xPackLicenseState); + controller.registerHandler(POST, "/_xpack/security/oauth2/token", this); + } + + @Override + public String getName() { + return "xpack_security_get_token_action"; + } + + @Override + protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client)throws IOException { + try (XContentParser parser = request.contentParser()) { + final CreateTokenRequest tokenRequest = PARSER.parse(parser, null); + final Action action = + "refresh_token".equals(tokenRequest.getGrantType()) ? RefreshTokenAction.INSTANCE : CreateTokenAction.INSTANCE; + return channel -> client.execute(action, tokenRequest, + // this doesn't use the RestBuilderListener since we need to override the + // handling of failures in some cases. + new CreateTokenResponseActionListener(channel, request, logger)); + } + } + + static class CreateTokenResponseActionListener implements ActionListener { + + private final RestChannel channel; + private final RestRequest request; + private final Logger logger; + + CreateTokenResponseActionListener(RestChannel restChannel, RestRequest restRequest, + Logger logger) { + this.channel = restChannel; + this.request = restRequest; + this.logger = logger; + } + + @Override + public void onResponse(CreateTokenResponse createTokenResponse) { + try (XContentBuilder builder = channel.newBuilder()) { + channel.sendResponse(new BytesRestResponse(RestStatus.OK, createTokenResponse.toXContent(builder, request))); + } catch (IOException e) { + onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + if (e instanceof ActionRequestValidationException) { + ActionRequestValidationException validationException = (ActionRequestValidationException) e; + final TokenRequestError error; + if (validationException.validationErrors().stream().anyMatch(s -> s.contains("grant_type"))) { + error = TokenRequestError.UNSUPPORTED_GRANT_TYPE; + } else { + error = TokenRequestError.INVALID_REQUEST; + } + + sendTokenErrorResponse(error, validationException.getMessage(), e); + } else if (e instanceof ElasticsearchSecurityException && "invalid_grant".equals(e.getMessage()) && + ((ElasticsearchSecurityException) e).getHeader("error_description").size() == 1) { + sendTokenErrorResponse(TokenRequestError.INVALID_GRANT, + ((ElasticsearchSecurityException) e).getHeader("error_description").get(0), e); + } else { + sendFailure(e); + } + } + + void sendTokenErrorResponse(TokenRequestError error, String description, Exception e) { + try (XContentBuilder builder = channel.newErrorBuilder()) { + // defined by https://tools.ietf.org/html/rfc6749#section-5.2 + builder.startObject() + .field("error", error.toString().toLowerCase(Locale.ROOT)) + .field("error_description", description) + .endObject(); + channel.sendResponse(new BytesRestResponse(RestStatus.BAD_REQUEST, builder)); + } catch (IOException ioe) { + ioe.addSuppressed(e); + sendFailure(e); + } + } + + void sendFailure(Exception e) { + try { + channel.sendResponse(new BytesRestResponse(channel, e)); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.error("failed to send failure response", inner); + } + } + } + + // defined by https://tools.ietf.org/html/rfc6749#section-5.2 + enum TokenRequestError { + /** + * The request is missing a required parameter, includes an unsupported + * parameter value (other than grant type), repeats a parameter, + * includes multiple credentials, utilizes more than one mechanism for + * authenticating the client, or is otherwise malformed. + */ + INVALID_REQUEST, + + /** + * Client authentication failed (e.g., unknown client, no client + * authentication included, or unsupported authentication method). The + * authorization server MAY return an HTTP 401 (Unauthorized) status + * code to indicate which HTTP authentication schemes are supported. If + * the client attempted to authenticate via the "Authorization" request + * header field, the authorization server MUST respond with an HTTP 401 + * (Unauthorized) status code and include the "WWW-Authenticate" + * response header field matching the authentication scheme used by the + * client. + */ + INVALID_CLIENT, + + /** + * The provided authorization grant (e.g., authorization code, resource + * owner credentials) or refresh token is invalid, expired, revoked, + * does not match the redirection URI used in the authorization request, + * or was issued to another client. + */ + INVALID_GRANT, + + /** + * The authenticated client is not authorized to use this authorization + * grant type. + */ + UNAUTHORIZED_CLIENT, + + /** + * The authorization grant type is not supported by the authorization + * server. + */ + UNSUPPORTED_GRANT_TYPE, + + /** + * The requested scope is invalid, unknown, malformed, or exceeds the + * scope granted by the resource owner. + */ + INVALID_SCOPE + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestInvalidateTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestInvalidateTokenAction.java new file mode 100644 index 0000000000000..d76f4da7d8ce0 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestInvalidateTokenAction.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.oauth2; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenRequest; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenResponse; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.DELETE; + +/** + * Rest handler for handling access token invalidation requests + */ +public final class RestInvalidateTokenAction extends SecurityBaseRestHandler { + + static final ConstructingObjectParser, Void> PARSER = + new ConstructingObjectParser<>("invalidate_token", a -> new Tuple<>((String) a[0], (String) a[1])); + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("token")); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), new ParseField("refresh_token")); + } + + public RestInvalidateTokenAction(Settings settings, RestController controller, XPackLicenseState xPackLicenseState) { + super(settings, xPackLicenseState); + controller.registerHandler(DELETE, "/_xpack/security/oauth2/token", this); + } + + @Override + public String getName() { + return "xpack_security_invalidate_token_action"; + } + + @Override + protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + try (XContentParser parser = request.contentParser()) { + final Tuple tuple = PARSER.parse(parser, null); + final String token = tuple.v1(); + final String refreshToken = tuple.v2(); + + final String tokenString; + final InvalidateTokenRequest.Type type; + if (Strings.hasLength(token) && Strings.hasLength(refreshToken)) { + throw new IllegalArgumentException("only one of [token, refresh_token] may be sent per request"); + } else if (Strings.hasLength(token)) { + tokenString = token; + type = InvalidateTokenRequest.Type.ACCESS_TOKEN; + } else if (Strings.hasLength(refreshToken)) { + tokenString = refreshToken; + type = InvalidateTokenRequest.Type.REFRESH_TOKEN; + } else { + tokenString = null; + type = null; + } + + final InvalidateTokenRequest tokenRequest = new InvalidateTokenRequest(tokenString, type); + return channel -> client.execute(InvalidateTokenAction.INSTANCE, tokenRequest, + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(InvalidateTokenResponse invalidateResp, + XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, builder.startObject() + .field("created", invalidateResp.isCreated()) + .endObject()); + } + }); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/realm/RestClearRealmCacheAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/realm/RestClearRealmCacheAction.java new file mode 100644 index 0000000000000..cc507fdfb517f --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/realm/RestClearRealmCacheAction.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.realm; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestActions.NodesResponseRestListener; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public final class RestClearRealmCacheAction extends SecurityBaseRestHandler { + + public RestClearRealmCacheAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(POST, "/_xpack/security/realm/{realms}/_clear_cache", this); + } + + @Override + public String getName() { + return "xpack_security_clear_realm_cache_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + String[] realms = request.paramAsStringArrayOrEmptyIfAll("realms"); + String[] usernames = request.paramAsStringArrayOrEmptyIfAll("usernames"); + + ClearRealmCacheRequest req = new ClearRealmCacheRequest().realms(realms).usernames(usernames); + + return channel -> new SecurityClient(client).clearRealmCache(req, new NodesResponseRestListener<>(channel)); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestClearRolesCacheAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestClearRolesCacheAction.java new file mode 100644 index 0000000000000..e60ce90aa88a4 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestClearRolesCacheAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.role; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestActions.NodesResponseRestListener; +import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequest; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public final class RestClearRolesCacheAction extends SecurityBaseRestHandler { + + public RestClearRolesCacheAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(POST, "/_xpack/security/role/{name}/_clear_cache", this); + } + + @Override + public String getName() { + return "xpack_security_clear_roles_cache_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + String[] roles = request.paramAsStringArrayOrEmptyIfAll("name"); + + ClearRolesCacheRequest req = new ClearRolesCacheRequest().names(roles); + + return channel -> new SecurityClient(client).clearRolesCache(req, new NodesResponseRestListener<>(channel)); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestDeleteRoleAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestDeleteRoleAction.java new file mode 100644 index 0000000000000..f4ec87747045f --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestDeleteRoleAction.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.role; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.DELETE; + +/** + * Rest endpoint to delete a Role from the security index + */ +public class RestDeleteRoleAction extends SecurityBaseRestHandler { + + public RestDeleteRoleAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(DELETE, "/_xpack/security/role/{name}", this); + } + + @Override + public String getName() { + return "xpack_security_delete_role_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final String name = request.param("name"); + final String refresh = request.param("refresh"); + + return channel -> new SecurityClient(client).prepareDeleteRole(name) + .setRefreshPolicy(refresh) + .execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(DeleteRoleResponse response, XContentBuilder builder) throws Exception { + return new BytesRestResponse( + response.found() ? RestStatus.OK : RestStatus.NOT_FOUND, + builder.startObject().field("found", response.found()).endObject()); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java new file mode 100644 index 0000000000000..b4c394dfa513b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestGetRolesAction.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.role; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +/** + * Rest endpoint to retrieve a Role from the security index + */ +public class RestGetRolesAction extends SecurityBaseRestHandler { + public RestGetRolesAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(GET, "/_xpack/security/role/", this); + controller.registerHandler(GET, "/_xpack/security/role/{name}", this); + } + + @Override + public String getName() { + return "xpack_security_get_roles_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final String[] roles = request.paramAsStringArray("name", Strings.EMPTY_ARRAY); + return channel -> new SecurityClient(client).prepareGetRoles(roles).execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(GetRolesResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + for (RoleDescriptor role : response.roles()) { + builder.field(role.getName(), role); + } + builder.endObject(); + + // if the user asked for specific roles, but none of them were found + // we'll return an empty result and 404 status code + if (roles.length != 0 && response.roles().length == 0) { + return new BytesRestResponse(RestStatus.NOT_FOUND, builder); + } + + // either the user asked for all roles, or at least one of the roles + // the user asked for was found + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestPutRoleAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestPutRoleAction.java new file mode 100644 index 0000000000000..ea22cdb3cf9f9 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/role/RestPutRoleAction.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.role; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequestBuilder; +import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; + +/** + * Rest endpoint to add a Role to the security index + */ +public class RestPutRoleAction extends SecurityBaseRestHandler { + + public RestPutRoleAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(POST, "/_xpack/security/role/{name}", this); + controller.registerHandler(PUT, "/_xpack/security/role/{name}", this); + } + + @Override + public String getName() { + return "xpack_security_put_role_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + PutRoleRequestBuilder requestBuilder = new SecurityClient(client) + .preparePutRole(request.param("name"), request.requiredContent(), request.getXContentType()) + .setRefreshPolicy(request.param("refresh")); + return channel -> requestBuilder.execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(PutRoleResponse putRoleResponse, XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, builder.startObject().field("role", putRoleResponse).endObject()); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestDeleteRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestDeleteRoleMappingAction.java new file mode 100644 index 0000000000000..ce1f9ad05954f --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestDeleteRoleMappingAction.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.rolemapping; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingResponse; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.DELETE; + +/** + * Rest endpoint to delete a role-mapping from the {@link org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore} + */ +public class RestDeleteRoleMappingAction extends SecurityBaseRestHandler { + + public RestDeleteRoleMappingAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(DELETE, "/_xpack/security/role_mapping/{name}", this); + } + + @Override + public String getName() { + return "xpack_security_delete_role_mapping_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final String name = request.param("name"); + final String refresh = request.param("refresh"); + + return channel -> new SecurityClient(client).prepareDeleteRoleMapping(name) + .setRefreshPolicy(refresh) + .execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(DeleteRoleMappingResponse response, XContentBuilder builder) throws Exception { + return new BytesRestResponse(response.isFound() ? RestStatus.OK : RestStatus.NOT_FOUND, + builder.startObject().field("found", response.isFound()).endObject()); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestGetRoleMappingsAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestGetRoleMappingsAction.java new file mode 100644 index 0000000000000..4d1f3d969fa41 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestGetRoleMappingsAction.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.rolemapping; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +/** + * Rest endpoint to retrieve a role-mapping from the org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class RestGetRoleMappingsAction extends SecurityBaseRestHandler { + + public RestGetRoleMappingsAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(GET, "/_xpack/security/role_mapping/", this); + controller.registerHandler(GET, "/_xpack/security/role_mapping/{name}", this); + } + + @Override + public String getName() { + return "xpack_security_get_role_mappings_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final String[] names = request.paramAsStringArrayOrEmptyIfAll("name"); + return channel -> new SecurityClient(client).prepareGetRoleMappings(names) + .execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(GetRoleMappingsResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + for (ExpressionRoleMapping mapping : response.mappings()) { + builder.field(mapping.getName(), mapping); + } + builder.endObject(); + + // if the request specified mapping names, but nothing was found then return a 404 result + if (names.length != 0 && response.mappings().length == 0) { + return new BytesRestResponse(RestStatus.NOT_FOUND, builder); + } else { + return new BytesRestResponse(RestStatus.OK, builder); + } + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestPutRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestPutRoleMappingAction.java new file mode 100644 index 0000000000000..088364eb95c02 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestPutRoleMappingAction.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.rolemapping; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; + +/** + * Rest endpoint to add a role-mapping to the native store + * + * @see org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore + */ +public class RestPutRoleMappingAction extends SecurityBaseRestHandler { + + public RestPutRoleMappingAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(POST, "/_xpack/security/role_mapping/{name}", this); + controller.registerHandler(PUT, "/_xpack/security/role_mapping/{name}", this); + } + + @Override + public String getName() { + return "xpack_security_put_role_mappings_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final String name = request.param("name"); + PutRoleMappingRequestBuilder requestBuilder = new SecurityClient(client) + .preparePutRoleMapping(name, request.requiredContent(), request.getXContentType()) + .setRefreshPolicy(request.param("refresh")); + return channel -> requestBuilder.execute( + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(PutRoleMappingResponse response, XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, builder.startObject().field("role_mapping", response).endObject()); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlAuthenticateAction.java new file mode 100644 index 0000000000000..8f241b0b14f83 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlAuthenticateAction.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.saml; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateRequestBuilder; +import org.elasticsearch.xpack.core.security.action.saml.SamlAuthenticateResponse; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; +import java.util.Base64; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * A REST handler that attempts to authenticate a user based on the provided SAML response/assertion. + */ +public class RestSamlAuthenticateAction extends SamlBaseRestHandler implements RestHandler { + + static class Input { + String content; + List ids; + + void setContent(String content) { + this.content = content; + } + + void setIds(List ids) { + this.ids = ids; + } + } + + static final ObjectParser PARSER = new ObjectParser<>("saml_authenticate", Input::new); + + static { + PARSER.declareString(Input::setContent, new ParseField("content")); + PARSER.declareStringArray(Input::setIds, new ParseField("ids")); + } + + public RestSamlAuthenticateAction(Settings settings, RestController controller, + XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(POST, "/_xpack/security/saml/authenticate", this); + } + + @Override + public String getName() { + return "xpack_security_saml_authenticate_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + try (XContentParser parser = request.contentParser()) { + final Input input = PARSER.parse(parser, null); + logger.trace("SAML Authenticate: [{}...] [{}]", Strings.cleanTruncate(input.content, 128), input.ids); + return channel -> { + final byte[] bytes = decodeBase64(input.content); + final SamlAuthenticateRequestBuilder requestBuilder = new SecurityClient(client).prepareSamlAuthenticate(bytes, input.ids); + requestBuilder.execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(SamlAuthenticateResponse response, XContentBuilder builder) throws Exception { + builder.startObject() + .field("username", response.getPrincipal()) + .field("access_token", response.getTokenString()) + .field("refresh_token", response.getRefreshToken()) + .field("expires_in", response.getExpiresIn().seconds()) + .endObject(); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + }; + } + } + + private byte[] decodeBase64(String content) { + content = content.replaceAll("\\s+", ""); + try { + return Base64.getDecoder().decode(content); + } catch (IllegalArgumentException e) { + logger.info("Failed to decode base64 string [{}] - {}", content, e.toString()); + throw e; + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlInvalidateSessionAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlInvalidateSessionAction.java new file mode 100644 index 0000000000000..ac287a13d1625 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlInvalidateSessionAction.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.saml; + +import java.io.IOException; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.saml.SamlInvalidateSessionAction; +import org.elasticsearch.xpack.core.security.action.saml.SamlInvalidateSessionRequest; +import org.elasticsearch.xpack.core.security.action.saml.SamlInvalidateSessionResponse; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * Invalidates any security tokens associated with the provided SAML session. + * The session identity is provided in a SAML {@code <LogoutRequest>} + */ +public class RestSamlInvalidateSessionAction extends SamlBaseRestHandler { + + static final ObjectParser PARSER = + new ObjectParser<>("saml_invalidate_session", SamlInvalidateSessionRequest::new); + + static { + PARSER.declareString(SamlInvalidateSessionRequest::setQueryString, new ParseField("queryString")); + PARSER.declareString(SamlInvalidateSessionRequest::setAssertionConsumerServiceURL, new ParseField("acs")); + PARSER.declareString(SamlInvalidateSessionRequest::setRealmName, new ParseField("realm")); + } + + public RestSamlInvalidateSessionAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(POST, "/_xpack/security/saml/invalidate", this); + } + + @Override + public String getName() { + return "xpack_security_saml_invalidate_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + try (XContentParser parser = request.contentParser()) { + final SamlInvalidateSessionRequest invalidateRequest = PARSER.parse(parser, this); + return channel -> client.execute(SamlInvalidateSessionAction.INSTANCE, invalidateRequest, + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(SamlInvalidateSessionResponse resp, XContentBuilder builder) throws Exception { + builder.startObject(); + builder.field("realm", resp.getRealmName()); + builder.field("invalidated", resp.getCount()); + builder.field("redirect", resp.getRedirectUrl()); + builder.endObject(); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlLogoutAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlLogoutAction.java new file mode 100644 index 0000000000000..bc81f2d6babd2 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlLogoutAction.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.saml; + +import java.io.IOException; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutAction; +import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutRequest; +import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutResponse; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * Invalidates the provided security token, and if the associated SAML realm support logout, generates + * a SAML logout request ({@code <LogoutRequest>}). + * This logout request is returned in the REST response as a redirect URI, and the REST client should + * make it available to the browser. + */ +public class RestSamlLogoutAction extends SamlBaseRestHandler { + + static final ObjectParser PARSER = new ObjectParser<>("saml_logout", SamlLogoutRequest::new); + + static { + PARSER.declareString(SamlLogoutRequest::setToken, new ParseField("token")); + PARSER.declareString(SamlLogoutRequest::setRefreshToken, new ParseField("refresh_token")); + } + + public RestSamlLogoutAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(POST, "/_xpack/security/saml/logout", this); + } + + @Override + public String getName() { + return "xpack_security_saml_logout_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + try (XContentParser parser = request.contentParser()) { + final SamlLogoutRequest logoutRequest = PARSER.parse(parser, null); + return channel -> client.execute(SamlLogoutAction.INSTANCE, logoutRequest, + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(SamlLogoutResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + builder.field("redirect", response.getRedirectUrl()); + builder.endObject(); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlPrepareAuthenticationAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlPrepareAuthenticationAction.java new file mode 100644 index 0000000000000..e5b6cdc494283 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlPrepareAuthenticationAction.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.saml; + +import java.io.IOException; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.saml.SamlPrepareAuthenticationAction; +import org.elasticsearch.xpack.core.security.action.saml.SamlPrepareAuthenticationRequest; +import org.elasticsearch.xpack.core.security.action.saml.SamlPrepareAuthenticationResponse; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * Generates a SAML authentication request ({@code }) based on the provided + * parameters. + * The request is returned in the REST response, and the REST client should make it available + * to the browser. + */ +public class RestSamlPrepareAuthenticationAction extends SamlBaseRestHandler { + + static final ObjectParser PARSER = new ObjectParser<>("saml_prepare_authn", + SamlPrepareAuthenticationRequest::new); + + static { + PARSER.declareString(SamlPrepareAuthenticationRequest::setAssertionConsumerServiceURL, new ParseField("acs")); + PARSER.declareString(SamlPrepareAuthenticationRequest::setRealmName, new ParseField("realm")); + } + + public RestSamlPrepareAuthenticationAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(POST, "/_xpack/security/saml/prepare", this); + } + + @Override + public String getName() { + return "xpack_security_saml_prepare_authentication_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + try (XContentParser parser = request.contentParser()) { + final SamlPrepareAuthenticationRequest authenticationRequest = PARSER.parse(parser, null); + return channel -> client.execute(SamlPrepareAuthenticationAction.INSTANCE, authenticationRequest, + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(SamlPrepareAuthenticationResponse response, XContentBuilder builder) + throws Exception { + builder.startObject(); + builder.field("realm", response.getRealmName()); + builder.field("id", response.getRequestId()); + builder.field("redirect", response.getRedirectUrl()); + builder.endObject(); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/SamlBaseRestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/SamlBaseRestHandler.java new file mode 100644 index 0000000000000..c136fdf95f68e --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/SamlBaseRestHandler.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.saml; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; +import org.elasticsearch.xpack.security.authc.Realms; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +/** + * An abstract implementation of {@link SecurityBaseRestHandler} that performs a license check for the SAML realm type + */ +public abstract class SamlBaseRestHandler extends SecurityBaseRestHandler { + + private static final String SAML_REALM_TYPE = SamlRealmSettings.TYPE; + + public SamlBaseRestHandler(Settings settings, XPackLicenseState licenseState) { + super(settings, licenseState); + } + + @Override + protected Exception checkFeatureAvailable(RestRequest request) { + Exception failedFeature = super.checkFeatureAvailable(request); + if (failedFeature != null) { + return failedFeature; + } else if (Realms.isRealmTypeAvailable(licenseState.allowedRealmType(), SAML_REALM_TYPE)) { + return null; + } else { + logger.info("The '{}' realm is not available under the current license", SAML_REALM_TYPE); + return LicenseUtils.newComplianceException(SAML_REALM_TYPE); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java new file mode 100644 index 0000000000000..b47881de2db34 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestChangePasswordAction.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.user; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.rest.RestRequestFilter; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; +import java.util.Collections; +import java.util.Set; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; + +public class RestChangePasswordAction extends SecurityBaseRestHandler implements RestRequestFilter { + + private final SecurityContext securityContext; + + public RestChangePasswordAction(Settings settings, RestController controller, SecurityContext securityContext, + XPackLicenseState licenseState) { + super(settings, licenseState); + this.securityContext = securityContext; + controller.registerHandler(POST, "/_xpack/security/user/{username}/_password", this); + controller.registerHandler(PUT, "/_xpack/security/user/{username}/_password", this); + controller.registerHandler(POST, "/_xpack/security/user/_password", this); + controller.registerHandler(PUT, "/_xpack/security/user/_password", this); + } + + @Override + public String getName() { + return "xpack_security_change_password_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final User user = securityContext.getUser(); + final String username; + if (request.param("username") == null) { + username = user.principal(); + } else { + username = request.param("username"); + } + + final String refresh = request.param("refresh"); + return channel -> + new SecurityClient(client) + .prepareChangePassword(username, request.requiredContent(), request.getXContentType()) + .setRefreshPolicy(refresh) + .execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(ChangePasswordResponse changePasswordResponse, + XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, builder.startObject().endObject()); + } + }); + } + + private static final Set FILTERED_FIELDS = Collections.singleton("password"); + + @Override + public Set getFilteredFields() { + return FILTERED_FIELDS; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestDeleteUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestDeleteUserAction.java new file mode 100644 index 0000000000000..a8590388398d8 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestDeleteUserAction.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.user; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserResponse; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action to delete a user from the security index + */ +public class RestDeleteUserAction extends SecurityBaseRestHandler { + + public RestDeleteUserAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(DELETE, "/_xpack/security/user/{username}", this); + } + + @Override + public String getName() { + return "xpack_security_delete_user_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final String username = request.param("username"); + final String refresh = request.param("refresh"); + return channel -> new SecurityClient(client).prepareDeleteUser(username) + .setRefreshPolicy(refresh) + .execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(DeleteUserResponse response, XContentBuilder builder) throws Exception { + return new BytesRestResponse(response.found() ? RestStatus.OK : RestStatus.NOT_FOUND, + builder.startObject() + .field("found", response.found()) + .endObject()); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java new file mode 100644 index 0000000000000..1ab80954e9b53 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUsersAction.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.user; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +/** + * Rest action to retrieve a user from the security index + */ +public class RestGetUsersAction extends SecurityBaseRestHandler { + + public RestGetUsersAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(GET, "/_xpack/security/user/", this); + controller.registerHandler(GET, "/_xpack/security/user/{username}", this); + } + + @Override + public String getName() { + return "xpack_security_get_users_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + String[] usernames = request.paramAsStringArray("username", Strings.EMPTY_ARRAY); + + return channel -> new SecurityClient(client).prepareGetUsers(usernames).execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(GetUsersResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + for (User user : response.users()) { + builder.field(user.principal(), user); + } + builder.endObject(); + + // if the user asked for specific users, but none of them were found + // we'll return an empty result and 404 status code + if (usernames.length != 0 && response.users().length == 0) { + return new BytesRestResponse(RestStatus.NOT_FOUND, builder); + } + + // either the user asked for all users, or at least one of the users + // was found + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java new file mode 100644 index 0000000000000..cc566c212cfb8 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesAction.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.user; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * REST handler that tests whether a user has the specified + * {@link RoleDescriptor.IndicesPrivileges privileges} + */ +public class RestHasPrivilegesAction extends SecurityBaseRestHandler { + + private final SecurityContext securityContext; + + public RestHasPrivilegesAction(Settings settings, RestController controller, SecurityContext securityContext, + XPackLicenseState licenseState) { + super(settings, licenseState); + this.securityContext = securityContext; + controller.registerHandler(GET, "/_xpack/security/user/{username}/_has_privileges", this); + controller.registerHandler(POST, "/_xpack/security/user/{username}/_has_privileges", this); + controller.registerHandler(GET, "/_xpack/security/user/_has_privileges", this); + controller.registerHandler(POST, "/_xpack/security/user/_has_privileges", this); + } + + @Override + public String getName() { + return "xpack_security_has_priviledges_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final String username = getUsername(request); + HasPrivilegesRequestBuilder requestBuilder = new SecurityClient(client) + .prepareHasPrivileges(username, request.requiredContent(), request.getXContentType()); + return channel -> requestBuilder.execute(new HasPrivilegesRestResponseBuilder(username, channel)); + } + + private String getUsername(RestRequest request) { + final String username = request.param("username"); + if (username != null) { + return username; + } + return securityContext.getUser().principal(); + } + + static class HasPrivilegesRestResponseBuilder extends RestBuilderListener { + private String username; + + HasPrivilegesRestResponseBuilder(String username, RestChannel channel) { + super(channel); + this.username = username; + } + + @Override + public RestResponse buildResponse(HasPrivilegesResponse response, XContentBuilder builder) throws Exception { + builder.startObject() + .field("username", username) + .field("has_all_requested", response.isCompleteMatch()); + + builder.field("cluster"); + builder.map(response.getClusterPrivileges()); + + builder.startObject("index"); + for (HasPrivilegesResponse.IndexPrivileges index : response.getIndexPrivileges()) { + builder.field(index.getIndex()); + builder.map(index.getPrivileges()); + } + builder.endObject(); + + builder.endObject(); + return new BytesRestResponse(RestStatus.OK, builder); + } + + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java new file mode 100644 index 0000000000000..1b35e5684d91d --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserAction.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.user; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.rest.RestRequestFilter; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; +import java.util.Collections; +import java.util.Set; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; + +/** + * Rest endpoint to add a User to the security index + */ +public class RestPutUserAction extends SecurityBaseRestHandler implements RestRequestFilter { + + public RestPutUserAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(POST, "/_xpack/security/user/{username}", this); + controller.registerHandler(PUT, "/_xpack/security/user/{username}", this); + } + + @Override + public String getName() { + return "xpack_security_put_user_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + PutUserRequestBuilder requestBuilder = new SecurityClient(client) + .preparePutUser(request.param("username"), request.requiredContent(), request.getXContentType()) + .setRefreshPolicy(request.param("refresh")); + + return channel -> requestBuilder.execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(PutUserResponse putUserResponse, XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, + builder.startObject() + .field("user", putUserResponse) + .endObject()); + } + }); + } + + private static final Set FILTERED_FIELDS = Collections.unmodifiableSet(Sets.newHashSet("password", "passwordHash")); + + @Override + public Set getFilteredFields() { + return FILTERED_FIELDS; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestSetEnabledAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestSetEnabledAction.java new file mode 100644 index 0000000000000..8d796b9c6180f --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestSetEnabledAction.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.user; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledResponse; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; + +/** + * REST handler for enabling and disabling users. The username is required and we use the path to determine if the user is being + * enabled or disabled. + */ +public class RestSetEnabledAction extends SecurityBaseRestHandler { + + public RestSetEnabledAction(Settings settings, RestController controller, XPackLicenseState licenseState) { + super(settings, licenseState); + controller.registerHandler(POST, "/_xpack/security/user/{username}/_enable", this); + controller.registerHandler(PUT, "/_xpack/security/user/{username}/_enable", this); + controller.registerHandler(POST, "/_xpack/security/user/{username}/_disable", this); + controller.registerHandler(PUT, "/_xpack/security/user/{username}/_disable", this); + } + + @Override + public String getName() { + return "xpack_security_set_enabled_action"; + } + + @Override + public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + final boolean enabled = request.path().endsWith("_enable"); + assert enabled || request.path().endsWith("_disable"); + final String username = request.param("username"); + return channel -> new SecurityClient(client).prepareSetEnabled(username, enabled) + .execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(SetEnabledResponse setEnabledResponse, XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, builder.startObject().endObject()); + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/AbstractSecurityModule.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/AbstractSecurityModule.java new file mode 100644 index 0000000000000..0dfb369bc371f --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/AbstractSecurityModule.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.support; + +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.XPackSettings; + +public abstract class AbstractSecurityModule extends AbstractModule { + + protected final Settings settings; + protected final boolean clientMode; + protected final boolean securityEnabled; + + public AbstractSecurityModule(Settings settings) { + this.settings = settings; + this.clientMode = TransportClient.CLIENT_TYPE.equals(settings.get(Client.CLIENT_TYPE_SETTING_S.getKey())); + this.securityEnabled = XPackSettings.SECURITY_ENABLED.get(settings); + } + + @Override + protected final void configure() { + configure(clientMode); + } + + protected abstract void configure(boolean clientMode); + + public abstract static class Node extends AbstractSecurityModule { + + protected Node(Settings settings) { + super(settings); + } + + @Override + protected final void configure(boolean clientMode) { + assert !clientMode : "[" + getClass().getSimpleName() + "] is a node only module"; + configureNode(); + } + + protected abstract void configureNode(); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/FileAttributesChecker.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/FileAttributesChecker.java new file mode 100644 index 0000000000000..6ef8461db82fb --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/FileAttributesChecker.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.support; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFileAttributes; +import java.nio.file.attribute.PosixFilePermissions; + +import org.elasticsearch.cli.Terminal; + +/** + * A utility for cli tools to capture file attributes + * before writing files, and to warn if the permissions/group/owner changes. + */ +public class FileAttributesChecker { + + // the paths to check + private final Path[] paths; + + // captured attributes for each path + private final PosixFileAttributes[] attributes; + + /** Create a checker for the given paths, which will warn to the given terminal if changes are made. */ + public FileAttributesChecker(Path... paths) throws IOException { + this.paths = paths; + this.attributes = new PosixFileAttributes[paths.length]; + + for (int i = 0; i < paths.length; ++i) { + if (Files.exists(paths[i]) == false) continue; // missing file, so changes later don't matter + PosixFileAttributeView view = Files.getFileAttributeView(paths[i], PosixFileAttributeView.class); + if (view == null) continue; // not posix + this.attributes[i] = view.readAttributes(); + } + } + + /** Check if attributes of the paths have changed, warning to the given terminal if they have. */ + public void check(Terminal terminal) throws IOException { + for (int i = 0; i < paths.length; ++i) { + if (attributes[i] == null) { + // we couldn't get attributes in setup, so we can't check them now + continue; + } + + PosixFileAttributeView view = Files.getFileAttributeView(paths[i], PosixFileAttributeView.class); + PosixFileAttributes newAttributes = view.readAttributes(); + PosixFileAttributes oldAttributes = attributes[i]; + if (oldAttributes.permissions().equals(newAttributes.permissions()) == false) { + terminal.println(Terminal.Verbosity.SILENT, "WARNING: The file permissions of [" + paths[i] + "] have changed " + + "from [" + PosixFilePermissions.toString(oldAttributes.permissions()) + "] " + + "to [" + PosixFilePermissions.toString(newAttributes.permissions()) + "]"); + terminal.println(Terminal.Verbosity.SILENT, + "Please ensure that the user account running Elasticsearch has read access to this file!"); + } + if (oldAttributes.owner().getName().equals(newAttributes.owner().getName()) == false) { + terminal.println(Terminal.Verbosity.SILENT, "WARNING: Owner of file [" + paths[i] + "] " + + "used to be [" + oldAttributes.owner().getName() + "], " + + "but now is [" + newAttributes.owner().getName() + "]"); + } + if (oldAttributes.group().getName().equals(newAttributes.group().getName()) == false) { + terminal.println(Terminal.Verbosity.SILENT, "WARNING: Group of file [" + paths[i] + "] " + + "used to be [" + oldAttributes.group().getName() + "], " + + "but now is [" + newAttributes.group().getName() + "]"); + } + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/IndexLifecycleManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/IndexLifecycleManager.java new file mode 100644 index 0000000000000..e2e278c70820f --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/IndexLifecycleManager.java @@ -0,0 +1,373 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.support; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.xpack.core.template.TemplateUtils; +import org.elasticsearch.xpack.core.upgrade.IndexUpgradeCheckVersion; + +import java.nio.charset.StandardCharsets; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; +import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.core.security.SecurityLifecycleServiceField.SECURITY_TEMPLATE_NAME; + +/** + * Manages the lifecycle of a single index, its template, mapping and and data upgrades/migrations. + */ +public class IndexLifecycleManager extends AbstractComponent { + + public static final String INTERNAL_SECURITY_INDEX = ".security-" + IndexUpgradeCheckVersion.UPRADE_VERSION; + public static final int INTERNAL_INDEX_FORMAT = 6; + public static final String SECURITY_VERSION_STRING = "security-version"; + public static final String TEMPLATE_VERSION_PATTERN = + Pattern.quote("${security.template.version}"); + + private final String indexName; + private final Client client; + + private final List> indexHealthChangeListeners = new CopyOnWriteArrayList<>(); + private final List> indexOutOfDateListeners = new CopyOnWriteArrayList<>(); + + private volatile State indexState = new State(false, false, false, false, null); + + public IndexLifecycleManager(Settings settings, Client client, String indexName) { + super(settings); + this.client = client; + this.indexName = indexName; + } + + public boolean checkMappingVersion(Predicate requiredVersion) { + // pull value into local variable for consistent view + final State currentIndexState = this.indexState; + return currentIndexState.mappingVersion == null || requiredVersion.test(currentIndexState.mappingVersion); + } + + public boolean indexExists() { + return this.indexState.indexExists; + } + + /** + * Returns whether the index is on the current format if it exists. If the index does not exist + * we treat the index as up to date as we expect it to be created with the current format. + */ + public boolean isIndexUpToDate() { + return this.indexState.isIndexUpToDate; + } + + public boolean isAvailable() { + return this.indexState.indexAvailable; + } + + public boolean isMappingUpToDate() { + return this.indexState.mappingUpToDate; + } + + /** + * Adds a listener which will be notified when the security index health changes. The previous and + * current health will be provided to the listener so that the listener can determine if any action + * needs to be taken. + */ + public void addIndexHealthChangeListener(BiConsumer listener) { + indexHealthChangeListeners.add(listener); + } + + /** + * Adds a listener which will be notified when the security index out of date value changes. The previous and + * current value will be provided to the listener so that the listener can determine if any action + * needs to be taken. + */ + public void addIndexOutOfDateListener(BiConsumer listener) { + indexOutOfDateListeners.add(listener); + } + + public void clusterChanged(ClusterChangedEvent event) { + final boolean previousUpToDate = this.indexState.isIndexUpToDate; + processClusterState(event.state()); + checkIndexHealthChange(event); + if (previousUpToDate != this.indexState.isIndexUpToDate) { + notifyIndexOutOfDateListeners(previousUpToDate, this.indexState.isIndexUpToDate); + } + } + + private void processClusterState(ClusterState clusterState) { + assert clusterState != null; + final IndexMetaData securityIndex = resolveConcreteIndex(indexName, clusterState.metaData()); + final boolean indexExists = securityIndex != null; + final boolean isIndexUpToDate = indexExists == false || + INDEX_FORMAT_SETTING.get(securityIndex.getSettings()).intValue() == INTERNAL_INDEX_FORMAT; + final boolean indexAvailable = checkIndexAvailable(clusterState); + final boolean mappingIsUpToDate = indexExists == false || checkIndexMappingUpToDate(clusterState); + final Version mappingVersion = oldestIndexMappingVersion(clusterState); + this.indexState = new State(indexExists, isIndexUpToDate, indexAvailable, mappingIsUpToDate, mappingVersion); + } + + private void checkIndexHealthChange(ClusterChangedEvent event) { + final ClusterState state = event.state(); + final ClusterState previousState = event.previousState(); + final IndexMetaData indexMetaData = resolveConcreteIndex(indexName, state.metaData()); + final IndexMetaData previousIndexMetaData = resolveConcreteIndex(indexName, previousState.metaData()); + if (indexMetaData != null) { + final ClusterIndexHealth currentHealth = + new ClusterIndexHealth(indexMetaData, state.getRoutingTable().index(indexMetaData.getIndex())); + final ClusterIndexHealth previousHealth = previousIndexMetaData != null ? new ClusterIndexHealth(previousIndexMetaData, + previousState.getRoutingTable().index(previousIndexMetaData.getIndex())) : null; + + if (previousHealth == null || previousHealth.getStatus() != currentHealth.getStatus()) { + notifyIndexHealthChangeListeners(previousHealth, currentHealth); + } + } else if (previousIndexMetaData != null) { + final ClusterIndexHealth previousHealth = + new ClusterIndexHealth(previousIndexMetaData, previousState.getRoutingTable().index(previousIndexMetaData.getIndex())); + notifyIndexHealthChangeListeners(previousHealth, null); + } + } + + private void notifyIndexHealthChangeListeners(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) { + for (BiConsumer consumer : indexHealthChangeListeners) { + try { + consumer.accept(previousHealth, currentHealth); + } catch (Exception e) { + logger.warn(new ParameterizedMessage("failed to notify listener [{}] of index health change", consumer), e); + } + } + } + + private void notifyIndexOutOfDateListeners(boolean previous, boolean current) { + for (BiConsumer consumer : indexOutOfDateListeners) { + try { + consumer.accept(previous, current); + } catch (Exception e) { + logger.warn(new ParameterizedMessage("failed to notify listener [{}] of index out of date change", consumer), e); + } + } + } + + private boolean checkIndexAvailable(ClusterState state) { + final IndexRoutingTable routingTable = getIndexRoutingTable(state); + if (routingTable != null && routingTable.allPrimaryShardsActive()) { + return true; + } + logger.debug("Security index [{}] is not yet active", indexName); + return false; + } + + + /** + * Returns the routing-table for this index, or null if the index does not exist. + */ + private IndexRoutingTable getIndexRoutingTable(ClusterState clusterState) { + IndexMetaData metaData = resolveConcreteIndex(indexName, clusterState.metaData()); + if (metaData == null) { + return null; + } else { + return clusterState.routingTable().index(metaData.getIndex()); + } + } + + public static boolean checkTemplateExistsAndVersionMatches( + String templateName, ClusterState state, Logger logger, Predicate predicate) { + + return TemplateUtils.checkTemplateExistsAndVersionMatches(templateName, SECURITY_VERSION_STRING, + state, logger, predicate); + } + + private boolean checkIndexMappingUpToDate(ClusterState clusterState) { + return checkIndexMappingVersionMatches(clusterState, Version.CURRENT::equals); + } + + private boolean checkIndexMappingVersionMatches(ClusterState clusterState, + Predicate predicate) { + return checkIndexMappingVersionMatches(indexName, clusterState, logger, predicate); + } + + public static boolean checkIndexMappingVersionMatches(String indexName, + ClusterState clusterState, Logger logger, + Predicate predicate) { + return loadIndexMappingVersions(indexName, clusterState, logger) + .stream().allMatch(predicate); + } + + private Version oldestIndexMappingVersion(ClusterState clusterState) { + final Set versions = loadIndexMappingVersions(indexName, clusterState, logger); + return versions.stream().min(Version::compareTo).orElse(null); + } + + private static Set loadIndexMappingVersions(String indexName, + ClusterState clusterState, Logger logger) { + Set versions = new HashSet<>(); + IndexMetaData indexMetaData = resolveConcreteIndex(indexName, clusterState.metaData()); + if (indexMetaData != null) { + for (Object object : indexMetaData.getMappings().values().toArray()) { + MappingMetaData mappingMetaData = (MappingMetaData) object; + if (mappingMetaData.type().equals(MapperService.DEFAULT_MAPPING)) { + continue; + } + versions.add(readMappingVersion(indexName, mappingMetaData, logger)); + } + } + return versions; + } + + /** + * Resolves a concrete index name or alias to a {@link IndexMetaData} instance. Requires + * that if supplied with an alias, the alias resolves to at most one concrete index. + */ + private static IndexMetaData resolveConcreteIndex(final String indexOrAliasName, final MetaData metaData) { + final AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(indexOrAliasName); + if (aliasOrIndex != null) { + final List indices = aliasOrIndex.getIndices(); + if (aliasOrIndex.isAlias() && indices.size() > 1) { + throw new IllegalStateException("Alias [" + indexOrAliasName + "] points to more than one index: " + + indices.stream().map(imd -> imd.getIndex().getName()).collect(Collectors.toList())); + } + return indices.get(0); + } + return null; + } + + private static Version readMappingVersion(String indexName, MappingMetaData mappingMetaData, + Logger logger) { + try { + Map meta = + (Map) mappingMetaData.sourceAsMap().get("_meta"); + if (meta == null) { + logger.info("Missing _meta field in mapping [{}] of index [{}]", mappingMetaData.type(), indexName); + throw new IllegalStateException("Cannot read security-version string in index " + indexName); + } + return Version.fromString((String) meta.get(SECURITY_VERSION_STRING)); + } catch (ElasticsearchParseException e) { + logger.error(new ParameterizedMessage( + "Cannot parse the mapping for index [{}]", indexName), e); + throw new ElasticsearchException( + "Cannot parse the mapping for index [{}]", e, indexName); + } + } + + /** + * Prepares the index by creating it if it doesn't exist or updating the mappings if the mappings are + * out of date. After any tasks have been executed, the runnable is then executed. + */ + public void prepareIndexIfNeededThenExecute(final Consumer consumer, final Runnable andThen) { + final State indexState = this.indexState; // use a local copy so all checks execute against the same state! + // TODO we should improve this so we don't fire off a bunch of requests to do the same thing (create or update mappings) + if (indexState.indexExists && indexState.isIndexUpToDate == false) { + consumer.accept(new IllegalStateException( + "Security index is not on the current version. Security features relying on the index will not be available until " + + "the upgrade API is run on the security index")); + } else if (indexState.indexExists == false) { + Tuple mappingAndSettings = loadMappingAndSettingsSourceFromTemplate(); + CreateIndexRequest request = new CreateIndexRequest(INTERNAL_SECURITY_INDEX) + .alias(new Alias(SECURITY_INDEX_NAME)) + .mapping("doc", mappingAndSettings.v1(), XContentType.JSON) + .waitForActiveShards(ActiveShardCount.ALL) + .settings(mappingAndSettings.v2()); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, + new ActionListener() { + @Override + public void onResponse(CreateIndexResponse createIndexResponse) { + if (createIndexResponse.isAcknowledged()) { + andThen.run(); + } else { + consumer.accept(new ElasticsearchException("Failed to create security index")); + } + } + + @Override + public void onFailure(Exception e) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof ResourceAlreadyExistsException) { + // the index already exists - it was probably just created so this + // node hasn't yet received the cluster state update with the index + andThen.run(); + } else { + consumer.accept(e); + } + } + }, client.admin().indices()::create); + } else if (indexState.mappingUpToDate == false) { + PutMappingRequest request = new PutMappingRequest(INTERNAL_SECURITY_INDEX) + .source(loadMappingAndSettingsSourceFromTemplate().v1(), XContentType.JSON) + .type("doc"); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, + ActionListener.wrap(putMappingResponse -> { + if (putMappingResponse.isAcknowledged()) { + andThen.run(); + } else { + consumer.accept(new IllegalStateException("put mapping request was not acknowledged")); + } + }, consumer), client.admin().indices()::putMapping); + } else { + andThen.run(); + } + } + + private Tuple loadMappingAndSettingsSourceFromTemplate() { + final byte[] template = TemplateUtils.loadTemplate("/" + SECURITY_TEMPLATE_NAME + ".json", + Version.CURRENT.toString(), IndexLifecycleManager.TEMPLATE_VERSION_PATTERN).getBytes(StandardCharsets.UTF_8); + PutIndexTemplateRequest request = new PutIndexTemplateRequest(SECURITY_TEMPLATE_NAME).source(template, XContentType.JSON); + return new Tuple<>(request.mappings().get("doc"), request.settings()); + } + /** + * Holder class so we can update all values at once + */ + private static class State { + private final boolean indexExists; + private final boolean isIndexUpToDate; + private final boolean indexAvailable; + private final boolean mappingUpToDate; + private final Version mappingVersion; + + private State(boolean indexExists, boolean isIndexUpToDate, boolean indexAvailable, + boolean mappingUpToDate, Version mappingVersion) { + this.indexExists = indexExists; + this.isIndexUpToDate = isIndexUpToDate; + this.indexAvailable = indexAvailable; + this.mappingUpToDate = mappingUpToDate; + this.mappingVersion = mappingVersion; + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/RestorableContextClassLoader.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/RestorableContextClassLoader.java new file mode 100644 index 0000000000000..f2e36ebf98273 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/RestorableContextClassLoader.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.support; + +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; + +import org.elasticsearch.SpecialPermission; + +/** + * A try-with-resource compatible object for configuring a thread {@link Thread#contextClassLoader}. + * On construction this class will set the current (or provided) thread's context class loader. + * On {@link #close()}, it restores the previous value of the class loader. + */ +public class RestorableContextClassLoader implements AutoCloseable { + + private final Thread thread; + private ClassLoader restore; + + public RestorableContextClassLoader(Class fromClass) throws PrivilegedActionException { + this(Thread.currentThread(), fromClass.getClassLoader()); + } + + public RestorableContextClassLoader(Thread thread, ClassLoader setClassLoader) throws PrivilegedActionException { + this.thread = thread; + SpecialPermission.check(); + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + restore = thread.getContextClassLoader(); + thread.setContextClassLoader(setClassLoader); + return null; + }); + } + + @Override + public void close() throws PrivilegedActionException { + SpecialPermission.check(); + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + this.thread.setContextClassLoader(this.restore); + return null; + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityFiles.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityFiles.java new file mode 100644 index 0000000000000..de829fb66c910 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityFiles.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.support; + +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.Environment; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.nio.file.AtomicMoveNotSupportedException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFileAttributes; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; + +import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; +import static java.nio.file.StandardOpenOption.CREATE; +import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; +import static java.nio.file.StandardOpenOption.WRITE; + +public class SecurityFiles { + + private SecurityFiles() { + } + + /** + * Atomically writes to the specified file a line per entry in the specified map using the specified transform to convert each entry to + * a line. The writing is done atomically in the following sense: first the lines are written to a temporary file and if the writing + * succeeds then the temporary file is moved to the specified path, replacing the file if it exists. If a failure occurs, any existing + * file is preserved, and the temporary file is cleaned up. + * + * @param the key type of the map entries + * @param the value type of the map entries + * @param path the path + * @param map the map whose entries to transform into lines + * @param transform the transform to convert each map entry to a line + */ + public static void writeFileAtomically(final Path path, final Map map, final Function, String> transform) { + Path tempFile = null; + try { + tempFile = Files.createTempFile(path.getParent(), path.getFileName().toString(), "tmp"); + try (Writer writer = Files.newBufferedWriter(tempFile, StandardCharsets.UTF_8, CREATE, TRUNCATE_EXISTING, WRITE)) { + for (final Map.Entry entry : map.entrySet()) { + final StringBuilder sb = new StringBuilder(); + final String line = sb.append(transform.apply(entry)).append(System.lineSeparator()).toString(); + writer.write(line); + } + } + // get original permissions + if (Files.exists(path)) { + boolean supportsPosixAttributes = + Environment.getFileStore(path).supportsFileAttributeView(PosixFileAttributeView.class); + if (supportsPosixAttributes) { + setPosixAttributesOnTempFile(path, tempFile); + } + } + + try { + Files.move(tempFile, path, REPLACE_EXISTING, ATOMIC_MOVE); + } catch (final AtomicMoveNotSupportedException e) { + Files.move(tempFile, path, REPLACE_EXISTING); + } + } catch (final IOException e) { + throw new UncheckedIOException(String.format(Locale.ROOT, "could not write file [%s]", path.toAbsolutePath()), e); + } finally { + // we are ignoring exceptions here, so we do not need handle whether or not tempFile was initialized nor if the file exists + IOUtils.deleteFilesIgnoringExceptions(tempFile); + } + } + + static void setPosixAttributesOnTempFile(Path path, Path tempFile) throws IOException { + PosixFileAttributes attributes = Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes(); + PosixFileAttributeView tempFileView = Files.getFileAttributeView(tempFile, PosixFileAttributeView.class); + + tempFileView.setPermissions(attributes.permissions()); + + // Make an attempt to set the username and group to match. If it fails, silently ignore the failure as the user + // will be notified by the FileAttributeChecker that the ownership has changed and needs to be corrected + try { + tempFileView.setOwner(attributes.owner()); + } catch (Exception e) { + } + + try { + tempFileView.setGroup(attributes.group()); + } catch (Exception e) { + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java new file mode 100644 index 0000000000000..785425ade9bd6 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -0,0 +1,314 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.TransportService.ContextRestoreResponseHandler; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authz.AuthorizationService; +import org.elasticsearch.xpack.security.authz.AuthorizationUtils; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.Executor; +import java.util.function.Function; + +import static org.elasticsearch.xpack.core.security.SecurityField.setting; + +public class SecurityServerTransportInterceptor extends AbstractComponent implements TransportInterceptor { + + private static final Function> TRANSPORT_TYPE_SETTING_TEMPLATE = (key) -> new Setting<>(key, + "node", v + -> { + if (v.equals("node") || v.equals("client")) { + return v; + } + throw new IllegalArgumentException("type must be one of [client, node]"); + }, Setting.Property.NodeScope); + private static final String TRANSPORT_TYPE_SETTING_KEY = "xpack.security.type"; + + public static final Setting TRANSPORT_TYPE_PROFILE_SETTING = Setting.affixKeySetting("transport.profiles.", + TRANSPORT_TYPE_SETTING_KEY, TRANSPORT_TYPE_SETTING_TEMPLATE); + + private final AuthenticationService authcService; + private final AuthorizationService authzService; + private final SSLService sslService; + private final Map profileFilters; + private final XPackLicenseState licenseState; + private final ThreadPool threadPool; + private final Settings settings; + private final SecurityContext securityContext; + private final boolean reservedRealmEnabled; + + public SecurityServerTransportInterceptor(Settings settings, + ThreadPool threadPool, + AuthenticationService authcService, + AuthorizationService authzService, + XPackLicenseState licenseState, + SSLService sslService, + SecurityContext securityContext, + DestructiveOperations destructiveOperations) { + super(settings); + this.settings = settings; + this.threadPool = threadPool; + this.authcService = authcService; + this.authzService = authzService; + this.licenseState = licenseState; + this.sslService = sslService; + this.securityContext = securityContext; + this.profileFilters = initializeProfileFilters(destructiveOperations); + this.reservedRealmEnabled = XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings); + } + + @Override + public AsyncSender interceptSender(AsyncSender sender) { + return new AsyncSender() { + @Override + public void sendRequest(Transport.Connection connection, String action, TransportRequest request, + TransportRequestOptions options, TransportResponseHandler handler) { + if (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed()) { + // the transport in core normally does this check, BUT since we are serializing to a string header we need to do it + // ourselves otherwise we wind up using a version newer than what we can actually send + final Version minVersion = Version.min(connection.getVersion(), Version.CURRENT); + + // Sometimes a system action gets executed like a internal create index request or update mappings request + // which means that the user is copied over to system actions so we need to change the user + if (AuthorizationUtils.shouldReplaceUserWithSystem(threadPool.getThreadContext(), action)) { + securityContext.executeAsUser(SystemUser.INSTANCE, (original) -> sendWithUser(connection, action, request, options, + new ContextRestoreResponseHandler<>(threadPool.getThreadContext().wrapRestorable(original) + , handler), sender), minVersion); + } else if (AuthorizationUtils.shouldSetUserBasedOnActionOrigin(threadPool.getThreadContext())) { + AuthorizationUtils.switchUserBasedOnActionOriginAndExecute(threadPool.getThreadContext(), securityContext, + (original) -> sendWithUser(connection, action, request, options, + new ContextRestoreResponseHandler<>(threadPool.getThreadContext().wrapRestorable(original) + , handler), sender)); + } else if (securityContext.getAuthentication() != null && + securityContext.getAuthentication().getVersion().equals(minVersion) == false) { + // re-write the authentication since we want the authentication version to match the version of the connection + securityContext.executeAfterRewritingAuthentication(original -> sendWithUser(connection, action, request, options, + new ContextRestoreResponseHandler<>(threadPool.getThreadContext().wrapRestorable(original), handler), sender), + minVersion); + } else { + sendWithUser(connection, action, request, options, handler, sender); + } + } else { + sender.sendRequest(connection, action, request, options, handler); + } + } + }; + } + + private void sendWithUser(Transport.Connection connection, String action, TransportRequest request, + TransportRequestOptions options, TransportResponseHandler handler, + AsyncSender sender) { + // There cannot be a request outgoing from this node that is not associated with a user. + if (securityContext.getAuthentication() == null) { + // we use an assertion here to ensure we catch this in our testing infrastructure, but leave the ISE for cases we do not catch + // in tests and may be hit by a user + assertNoAuthentication(action); + throw new IllegalStateException("there should always be a user when sending a message for action [" + action + "]"); + } + + try { + sender.sendRequest(connection, action, request, options, handler); + } catch (Exception e) { + handler.handleException(new TransportException("failed sending request", e)); + } + } + + // pkg-private method to allow overriding for tests + void assertNoAuthentication(String action) { + assert false : "there should always be a user when sending a message for action [" + action + "]"; + } + + @Override + public TransportRequestHandler interceptHandler(String action, String executor, + boolean forceExecution, + TransportRequestHandler actualHandler) { + return new ProfileSecuredRequestHandler<>(logger, action, forceExecution, executor, actualHandler, profileFilters, + licenseState, threadPool); + } + + protected Map initializeProfileFilters(DestructiveOperations destructiveOperations) { + Map profileSettingsMap = settings.getGroups("transport.profiles.", true); + Map profileFilters = new HashMap<>(profileSettingsMap.size() + 1); + + final Settings transportSSLSettings = settings.getByPrefix(setting("transport.ssl.")); + final boolean transportSSLEnabled = XPackSettings.TRANSPORT_SSL_ENABLED.get(settings); + for (Map.Entry entry : profileSettingsMap.entrySet()) { + Settings profileSettings = entry.getValue(); + final Settings profileSslSettings = SecurityNetty4Transport.profileSslSettings(profileSettings); + final boolean extractClientCert = transportSSLEnabled && + sslService.isSSLClientAuthEnabled(profileSslSettings, transportSSLSettings); + String type = TRANSPORT_TYPE_SETTING_TEMPLATE.apply(TRANSPORT_TYPE_SETTING_KEY).get(entry.getValue()); + switch (type) { + case "client": + profileFilters.put(entry.getKey(), new ServerTransportFilter.ClientProfile(authcService, authzService, + threadPool.getThreadContext(), extractClientCert, destructiveOperations, reservedRealmEnabled, + securityContext)); + break; + case "node": + profileFilters.put(entry.getKey(), new ServerTransportFilter.NodeProfile(authcService, authzService, + threadPool.getThreadContext(), extractClientCert, destructiveOperations, reservedRealmEnabled, + securityContext)); + break; + default: + throw new IllegalStateException("unknown profile type: " + type); + } + } + + if (!profileFilters.containsKey(TcpTransport.DEFAULT_PROFILE)) { + final boolean extractClientCert = transportSSLEnabled && sslService.isSSLClientAuthEnabled(transportSSLSettings); + profileFilters.put(TcpTransport.DEFAULT_PROFILE, new ServerTransportFilter.NodeProfile(authcService, authzService, + threadPool.getThreadContext(), extractClientCert, destructiveOperations, reservedRealmEnabled, securityContext)); + } + + return Collections.unmodifiableMap(profileFilters); + } + + public static class ProfileSecuredRequestHandler implements TransportRequestHandler { + + private final String action; + private final TransportRequestHandler handler; + private final Map profileFilters; + private final XPackLicenseState licenseState; + private final ThreadContext threadContext; + private final String executorName; + private final ThreadPool threadPool; + private final boolean forceExecution; + private final Logger logger; + + ProfileSecuredRequestHandler(Logger logger, String action, boolean forceExecution, String executorName, + TransportRequestHandler handler, Map profileFilters, + XPackLicenseState licenseState, ThreadPool threadPool) { + this.logger = logger; + this.action = action; + this.executorName = executorName; + this.handler = handler; + this.profileFilters = profileFilters; + this.licenseState = licenseState; + this.threadContext = threadPool.getThreadContext(); + this.threadPool = threadPool; + this.forceExecution = forceExecution; + } + + AbstractRunnable getReceiveRunnable(T request, TransportChannel channel, Task task) { + return new AbstractRunnable() { + @Override + public boolean isForceExecution() { + return forceExecution; + } + + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (Exception e1) { + e1.addSuppressed(e); + logger.warn("failed to send exception response for action [" + action + "]", e1); + } + } + + @Override + protected void doRun() throws Exception { + handler.messageReceived(request, channel, task); + } + }; + } + + @Override + public String toString() { + return "ProfileSecuredRequestHandler{" + + "action='" + action + '\'' + + ", executorName='" + executorName + '\'' + + ", forceExecution=" + forceExecution + + '}'; + } + + @Override + public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { + final AbstractRunnable receiveMessage = getReceiveRunnable(request, channel, task); + try (ThreadContext.StoredContext ctx = threadContext.newStoredContext(true)) { + if (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed()) { + String profile = channel.getProfileName(); + ServerTransportFilter filter = profileFilters.get(profile); + + if (filter == null) { + if (TransportService.DIRECT_RESPONSE_PROFILE.equals(profile)) { + // apply the default filter to local requests. We never know what the request is or who sent it... + filter = profileFilters.get("default"); + } else { + String msg = "transport profile [" + profile + "] is not associated with a transport filter"; + throw new IllegalStateException(msg); + } + } + assert filter != null; + final Thread executingThread = Thread.currentThread(); + + CheckedConsumer consumer = (x) -> { + final Executor executor; + if (executingThread == Thread.currentThread()) { + // only fork off if we get called on another thread this means we moved to + // an async execution and in this case we need to go back to the thread pool + // that was actually executing it. it's also possible that the + // thread-pool we are supposed to execute on is `SAME` in that case + // the handler is OK with executing on a network thread and we can just continue even if + // we are on another thread due to async operations + executor = threadPool.executor(ThreadPool.Names.SAME); + } else { + executor = threadPool.executor(executorName); + } + + try { + executor.execute(receiveMessage); + } catch (Exception e) { + receiveMessage.onFailure(e); + } + + }; + ActionListener filterListener = ActionListener.wrap(consumer, receiveMessage::onFailure); + filter.inbound(action, request, channel, filterListener); + } else { + receiveMessage.run(); + } + } + } + + @Override + public void messageReceived(T request, TransportChannel channel) throws Exception { + throw new UnsupportedOperationException("task parameter is required for this operation"); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java new file mode 100644 index 0000000000000..161ac3678aeab --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java @@ -0,0 +1,223 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport; + +import io.netty.channel.Channel; +import io.netty.handler.ssl.SslHandler; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.open.OpenIndexAction; +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.transport.TaskTransportChannel; +import org.elasticsearch.transport.TcpTransportChannel; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.netty4.NettyTcpChannel; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.action.SecurityActionMapper; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authc.pki.PkiRealm; +import org.elasticsearch.xpack.security.authz.AuthorizationService; +import org.elasticsearch.xpack.security.authz.AuthorizationUtils; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLPeerUnverifiedException; + +import java.io.IOException; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; + +import static org.elasticsearch.xpack.core.security.support.Exceptions.authenticationError; + +/** + * This interface allows to intercept messages as they come in and execute logic + * This is used in x-pack security to execute the authentication/authorization on incoming + * messages. + * Note that this filter only applies for nodes, but not for clients. + */ +public interface ServerTransportFilter { + + /** + * Called just after the given request was received by the transport. Any exception + * thrown by this method will stop the request from being handled and the error will + * be sent back to the sender. + */ + void inbound(String action, TransportRequest request, TransportChannel transportChannel, ActionListener listener) + throws IOException; + + /** + * The server trasnport filter that should be used in nodes as it ensures that an incoming + * request is properly authenticated and authorized + */ + class NodeProfile implements ServerTransportFilter { + private static final Logger logger = Loggers.getLogger(NodeProfile.class); + + private final AuthenticationService authcService; + private final AuthorizationService authzService; + private final SecurityActionMapper actionMapper = new SecurityActionMapper(); + private final ThreadContext threadContext; + private final boolean extractClientCert; + private final DestructiveOperations destructiveOperations; + private final boolean reservedRealmEnabled; + private final SecurityContext securityContext; + + NodeProfile(AuthenticationService authcService, AuthorizationService authzService, + ThreadContext threadContext, boolean extractClientCert, DestructiveOperations destructiveOperations, + boolean reservedRealmEnabled, SecurityContext securityContext) { + this.authcService = authcService; + this.authzService = authzService; + this.threadContext = threadContext; + this.extractClientCert = extractClientCert; + this.destructiveOperations = destructiveOperations; + this.reservedRealmEnabled = reservedRealmEnabled; + this.securityContext = securityContext; + } + + @Override + public void inbound(String action, TransportRequest request, TransportChannel transportChannel, ActionListener listener) + throws IOException { + if (CloseIndexAction.NAME.equals(action) || OpenIndexAction.NAME.equals(action) || DeleteIndexAction.NAME.equals(action)) { + IndicesRequest indicesRequest = (IndicesRequest) request; + try { + destructiveOperations.failDestructive(indicesRequest.indices()); + } catch(IllegalArgumentException e) { + listener.onFailure(e); + return; + } + } + /* + here we don't have a fallback user, as all incoming request are + expected to have a user attached (either in headers or in context) + We can make this assumption because in nodes we make sure all outgoing + requests from all the nodes are attached with a user (either a serialize + user an authentication token + */ + String securityAction = actionMapper.action(action, request); + + TransportChannel unwrappedChannel = transportChannel; + if (unwrappedChannel instanceof TaskTransportChannel) { + unwrappedChannel = ((TaskTransportChannel) unwrappedChannel).getChannel(); + } + + if (extractClientCert && (unwrappedChannel instanceof TcpTransportChannel) && + ((TcpTransportChannel) unwrappedChannel).getChannel() instanceof NettyTcpChannel) { + Channel channel = ((NettyTcpChannel) ((TcpTransportChannel) unwrappedChannel).getChannel()).getLowLevelChannel(); + SslHandler sslHandler = channel.pipeline().get(SslHandler.class); + if (channel.isOpen()) { + assert sslHandler != null : "channel [" + channel + "] did not have a ssl handler. pipeline " + channel.pipeline(); + extractClientCertificates(logger, threadContext, sslHandler.engine(), channel); + } + } + + final Version version = transportChannel.getVersion().equals(Version.V_5_4_0) ? Version.CURRENT : transportChannel.getVersion(); + authcService.authenticate(securityAction, request, (User)null, ActionListener.wrap((authentication) -> { + if (reservedRealmEnabled && authentication.getVersion().before(Version.V_5_2_0) && + KibanaUser.NAME.equals(authentication.getUser().authenticatedUser().principal())) { + executeAsCurrentVersionKibanaUser(securityAction, request, transportChannel, listener, authentication); + } else if (securityAction.equals(TransportService.HANDSHAKE_ACTION_NAME) && + SystemUser.is(authentication.getUser()) == false) { + securityContext.executeAsUser(SystemUser.INSTANCE, (ctx) -> { + final Authentication replaced = Authentication.getAuthentication(threadContext); + final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = + new AuthorizationUtils.AsyncAuthorizer(replaced, listener, (userRoles, runAsRoles) -> { + authzService.authorize(replaced, securityAction, request, userRoles, runAsRoles); + listener.onResponse(null); + }); + asyncAuthorizer.authorize(authzService); + }, version); + } else { + final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = + new AuthorizationUtils.AsyncAuthorizer(authentication, listener, (userRoles, runAsRoles) -> { + authzService.authorize(authentication, securityAction, request, userRoles, runAsRoles); + listener.onResponse(null); + }); + asyncAuthorizer.authorize(authzService); + } + }, listener::onFailure)); + } + + private void executeAsCurrentVersionKibanaUser(String securityAction, TransportRequest request, TransportChannel transportChannel, + ActionListener listener, Authentication authentication) { + // the authentication came from an older node - so let's replace the user with our version + final User kibanaUser = new KibanaUser(authentication.getUser().enabled()); + if (kibanaUser.enabled()) { + securityContext.executeAsUser(kibanaUser, (original) -> { + final Authentication replacedUserAuth = securityContext.getAuthentication(); + final AuthorizationUtils.AsyncAuthorizer asyncAuthorizer = + new AuthorizationUtils.AsyncAuthorizer(replacedUserAuth, listener, (userRoles, runAsRoles) -> { + authzService.authorize(replacedUserAuth, securityAction, request, userRoles, runAsRoles); + listener.onResponse(null); + }); + asyncAuthorizer.authorize(authzService); + }, transportChannel.getVersion()); + } else { + throw new IllegalStateException("a disabled user should never be sent. " + kibanaUser); + } + } + } + + static void extractClientCertificates(Logger logger, ThreadContext threadContext, SSLEngine sslEngine, Channel channel) { + try { + Certificate[] certs = sslEngine.getSession().getPeerCertificates(); + if (certs instanceof X509Certificate[]) { + threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, certs); + } + } catch (SSLPeerUnverifiedException e) { + // this happens when client authentication is optional and the client does not provide credentials. If client + // authentication was required then this connection should be closed before ever getting into this class + assert sslEngine.getNeedClientAuth() == false; + assert sslEngine.getWantClientAuth(); + if (logger.isTraceEnabled()) { + logger.trace( + (Supplier) () -> new ParameterizedMessage( + "SSL Peer did not present a certificate on channel [{}]", channel), e); + } else if (logger.isDebugEnabled()) { + logger.debug("SSL Peer did not present a certificate on channel [{}]", channel); + } + } + } + + /** + * A server transport filter rejects internal calls, which should be used on connections + * where only clients connect to. This ensures that no client can send any internal actions + * or shard level actions. As it extends the NodeProfile the authentication/authorization is + * done as well + */ + class ClientProfile extends NodeProfile { + + ClientProfile(AuthenticationService authcService, AuthorizationService authzService, + ThreadContext threadContext, boolean extractClientCert, DestructiveOperations destructiveOperations, + boolean reservedRealmEnabled, SecurityContext securityContext) { + super(authcService, authzService, threadContext, extractClientCert, destructiveOperations, reservedRealmEnabled, + securityContext); + } + + @Override + public void inbound(String action, TransportRequest request, TransportChannel transportChannel, ActionListener listener) + throws IOException { + // TODO is ']' sufficient to mark as shard action? + final boolean isInternalOrShardAction = action.startsWith("internal:") || action.endsWith("]"); + if (isInternalOrShardAction && TransportService.HANDSHAKE_ACTION_NAME.equals(action) == false) { + throw authenticationError("executing internal/shard actions is considered malicious and forbidden"); + } + super.inbound(action, request, transportChannel, listener); + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/IPFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/IPFilter.java new file mode 100644 index 0000000000000..586e9cd65071b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/IPFilter.java @@ -0,0 +1,304 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.filter; + + +import io.netty.handler.ipfilter.IpFilterRuleType; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.xpack.security.audit.AuditTrailService; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.xpack.core.security.SecurityField.setting; + +public class IPFilter { + + /** + * .http has been chosen for handling HTTP filters, which are not part of the profiles + * The profiles are only handled for the transport protocol, so we need an own kind of profile + * for HTTP. This name starts withs a dot, because no profile name can ever start like that due to + * how we handle settings + */ + public static final String HTTP_PROFILE_NAME = ".http"; + + public static final Setting ALLOW_BOUND_ADDRESSES_SETTING = + Setting.boolSetting(setting("filter.always_allow_bound_address"), true, Property.NodeScope); + + public static final Setting IP_FILTER_ENABLED_HTTP_SETTING = Setting.boolSetting(setting("http.filter.enabled"), + true, Property.Dynamic, Property.NodeScope); + + public static final Setting IP_FILTER_ENABLED_SETTING = Setting.boolSetting(setting("transport.filter.enabled"), + true, Property.Dynamic, Property.NodeScope); + + public static final Setting> TRANSPORT_FILTER_ALLOW_SETTING = Setting.listSetting(setting("transport.filter.allow"), + Collections.emptyList(), Function.identity(), Property.Dynamic, Property.NodeScope); + + public static final Setting> TRANSPORT_FILTER_DENY_SETTING = Setting.listSetting(setting("transport.filter.deny"), + Collections.emptyList(), Function.identity(), Property.Dynamic, Property.NodeScope); + + public static final Setting.AffixSetting> PROFILE_FILTER_DENY_SETTING = Setting.affixKeySetting("transport.profiles.", + "xpack.security.filter.deny", key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), + Property.Dynamic, Property.NodeScope)); + public static final Setting.AffixSetting> PROFILE_FILTER_ALLOW_SETTING = Setting.affixKeySetting("transport.profiles.", + "xpack.security.filter.allow", key -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), + Property.Dynamic, Property.NodeScope)); + + private static final Setting> HTTP_FILTER_ALLOW_FALLBACK = + Setting.listSetting("transport.profiles.default.xpack.security.filter.allow", TRANSPORT_FILTER_ALLOW_SETTING, s -> s, + Property.NodeScope); + public static final Setting> HTTP_FILTER_ALLOW_SETTING = Setting.listSetting(setting("http.filter.allow"), + HTTP_FILTER_ALLOW_FALLBACK, Function.identity(), Property.Dynamic, Property.NodeScope); + + private static final Setting> HTTP_FILTER_DENY_FALLBACK = + Setting.listSetting("transport.profiles.default.xpack.security.filter.deny", TRANSPORT_FILTER_DENY_SETTING, s -> s, + Property.NodeScope); + public static final Setting> HTTP_FILTER_DENY_SETTING = Setting.listSetting(setting("http.filter.deny"), + HTTP_FILTER_DENY_FALLBACK, Function.identity(), Property.Dynamic, Property.NodeScope); + + public static final Map DISABLED_USAGE_STATS = new MapBuilder() + .put("http", false) + .put("transport", false) + .immutableMap(); + + public static final SecurityIpFilterRule DEFAULT_PROFILE_ACCEPT_ALL = new SecurityIpFilterRule(true, "default:accept_all") { + + @Override + public boolean matches(InetSocketAddress remoteAddress) { + return true; + } + + @Override + public IpFilterRuleType ruleType() { + return IpFilterRuleType.ACCEPT; + } + }; + + private final AuditTrailService auditTrail; + private final XPackLicenseState licenseState; + private final boolean alwaysAllowBoundAddresses; + + private final Logger logger; + private volatile Map rules = Collections.emptyMap(); + private volatile boolean isIpFilterEnabled; + private volatile boolean isHttpFilterEnabled; + private final Set profiles; + private volatile List transportAllowFilter; + private volatile List transportDenyFilter; + private volatile List httpAllowFilter; + private volatile List httpDenyFilter; + private final SetOnce boundTransportAddress = new SetOnce<>(); + private final SetOnce boundHttpTransportAddress = new SetOnce<>(); + private final SetOnce> profileBoundAddress = new SetOnce<>(); + private final Map> profileAllowRules = Collections.synchronizedMap(new HashMap<>()); + private final Map> profileDenyRules = Collections.synchronizedMap(new HashMap<>()); + + public IPFilter(final Settings settings, AuditTrailService auditTrail, ClusterSettings clusterSettings, + XPackLicenseState licenseState) { + this.logger = Loggers.getLogger(getClass(), settings); + this.auditTrail = auditTrail; + this.licenseState = licenseState; + this.alwaysAllowBoundAddresses = ALLOW_BOUND_ADDRESSES_SETTING.get(settings); + httpDenyFilter = HTTP_FILTER_DENY_SETTING.get(settings); + httpAllowFilter = HTTP_FILTER_ALLOW_SETTING.get(settings); + transportAllowFilter = TRANSPORT_FILTER_ALLOW_SETTING.get(settings); + transportDenyFilter = TRANSPORT_FILTER_DENY_SETTING.get(settings); + isHttpFilterEnabled = IP_FILTER_ENABLED_HTTP_SETTING.get(settings); + isIpFilterEnabled = IP_FILTER_ENABLED_SETTING.get(settings); + + this.profiles = settings.getGroups("transport.profiles.",true).keySet().stream().filter(k -> TcpTransport + .DEFAULT_PROFILE.equals(k) == false).collect(Collectors.toSet()); // exclude default profile -- it's handled differently + for (String profile : profiles) { + Setting> allowSetting = PROFILE_FILTER_ALLOW_SETTING.getConcreteSettingForNamespace(profile); + profileAllowRules.put(profile, allowSetting.get(settings)); + Setting> denySetting = PROFILE_FILTER_DENY_SETTING.getConcreteSettingForNamespace(profile); + profileDenyRules.put(profile, denySetting.get(settings)); + } + clusterSettings.addSettingsUpdateConsumer(IP_FILTER_ENABLED_HTTP_SETTING, this::setHttpFiltering); + clusterSettings.addSettingsUpdateConsumer(IP_FILTER_ENABLED_SETTING, this::setTransportFiltering); + clusterSettings.addSettingsUpdateConsumer(TRANSPORT_FILTER_ALLOW_SETTING, this::setTransportAllowFilter); + clusterSettings.addSettingsUpdateConsumer(TRANSPORT_FILTER_DENY_SETTING, this::setTransportDenyFilter); + clusterSettings.addSettingsUpdateConsumer(HTTP_FILTER_ALLOW_SETTING, this::setHttpAllowFilter); + clusterSettings.addSettingsUpdateConsumer(HTTP_FILTER_DENY_SETTING, this::setHttpDenyFilter); + clusterSettings.addAffixUpdateConsumer(PROFILE_FILTER_ALLOW_SETTING, this::setProfileAllowRules, (a,b) -> {}); + clusterSettings.addAffixUpdateConsumer(PROFILE_FILTER_DENY_SETTING, this::setProfileDenyRules, (a,b) -> {}); + updateRules(); + } + + public Map usageStats() { + Map map = new HashMap<>(2); + final boolean httpFilterEnabled = isHttpFilterEnabled && (httpAllowFilter.isEmpty() == false || httpDenyFilter.isEmpty() == false); + final boolean transportFilterEnabled = isIpFilterEnabled && + (transportAllowFilter.isEmpty() == false || transportDenyFilter.isEmpty() == false); + map.put("http", httpFilterEnabled); + map.put("transport", transportFilterEnabled); + return map; + } + + private void setProfileAllowRules(String profile, List rules) { + profileAllowRules.put(profile, rules); + updateRules(); + } + + private void setProfileDenyRules(String profile, List rules) { + profileDenyRules.put(profile, rules); + updateRules(); + } + + private void setHttpDenyFilter(List filter) { + this.httpDenyFilter = filter; + updateRules(); + } + + private void setHttpAllowFilter(List filter) { + this.httpAllowFilter = filter; + updateRules(); + } + + private void setTransportDenyFilter(List filter) { + this.transportDenyFilter = filter; + updateRules(); + } + + private void setTransportAllowFilter(List filter) { + this.transportAllowFilter = filter; + updateRules(); + } + + private void setTransportFiltering(boolean enabled) { + this.isIpFilterEnabled = enabled; + updateRules(); + } + + private void setHttpFiltering(boolean enabled) { + this.isHttpFilterEnabled = enabled; + updateRules(); + } + + public boolean accept(String profile, InetSocketAddress peerAddress) { + if (licenseState.isSecurityEnabled() == false || licenseState.isIpFilteringAllowed() == false) { + return true; + } + + if (!rules.containsKey(profile)) { + // FIXME we need to audit here + return true; + } + + for (SecurityIpFilterRule rule : rules.get(profile)) { + if (rule.matches(peerAddress)) { + boolean isAllowed = rule.ruleType() == IpFilterRuleType.ACCEPT; + if (isAllowed) { + auditTrail.connectionGranted(peerAddress.getAddress(), profile, rule); + } else { + auditTrail.connectionDenied(peerAddress.getAddress(), profile, rule); + } + return isAllowed; + } + } + + auditTrail.connectionGranted(peerAddress.getAddress(), profile, DEFAULT_PROFILE_ACCEPT_ALL); + return true; + } + + private synchronized void updateRules() { + this.rules = parseSettings(); + } + + private Map parseSettings() { + if (isIpFilterEnabled || isHttpFilterEnabled) { + Map profileRules = new HashMap<>(); + if (isHttpFilterEnabled && boundHttpTransportAddress.get() != null) { + TransportAddress[] localAddresses = boundHttpTransportAddress.get().boundAddresses(); + profileRules.put(HTTP_PROFILE_NAME, createRules(httpAllowFilter, httpDenyFilter, localAddresses)); + } + + if (isIpFilterEnabled && boundTransportAddress.get() != null) { + TransportAddress[] localAddresses = boundTransportAddress.get().boundAddresses(); + profileRules.put(TcpTransport.DEFAULT_PROFILE, createRules(transportAllowFilter, transportDenyFilter, localAddresses)); + for (String profile : profiles) { + BoundTransportAddress profileBoundTransportAddress = profileBoundAddress.get().get(profile); + if (profileBoundTransportAddress == null) { + // this could happen if a user updates the settings dynamically with a new profile + logger.warn("skipping ip filter rules for profile [{}] since the profile is not bound to any addresses", profile); + continue; + } + final List allowRules = this.profileAllowRules.getOrDefault(profile, Collections.emptyList()); + final List denyRules = this.profileDenyRules.getOrDefault(profile, Collections.emptyList()); + profileRules.put(profile, createRules(allowRules, denyRules, profileBoundTransportAddress.boundAddresses())); + } + } + + logger.debug("loaded ip filtering profiles: {}", profileRules.keySet()); + return unmodifiableMap(profileRules); + } else { + return Collections.emptyMap(); + } + + } + + private SecurityIpFilterRule[] createRules(List allow, List deny, TransportAddress[] boundAddresses) { + List rules = new ArrayList<>(); + // if we are always going to allow the bound addresses, then the rule for them should be the first rule in the list + if (alwaysAllowBoundAddresses) { + assert boundAddresses != null && boundAddresses.length > 0; + rules.add(new SecurityIpFilterRule(true, boundAddresses)); + } + + // add all rules to the same list. Allow takes precedence so they must come first! + for (String value : allow) { + rules.add(new SecurityIpFilterRule(true, value)); + } + for (String value : deny) { + rules.add(new SecurityIpFilterRule(false, value)); + } + + return rules.toArray(new SecurityIpFilterRule[rules.size()]); + } + + public void setBoundTransportAddress(BoundTransportAddress boundTransportAddress, + Map profileBoundAddress) { + this.boundTransportAddress.set(boundTransportAddress); + this.profileBoundAddress.set(profileBoundAddress); + updateRules(); + } + + public void setBoundHttpTransportAddress(BoundTransportAddress boundHttpTransportAddress) { + this.boundHttpTransportAddress.set(boundHttpTransportAddress); + updateRules(); + } + + public static void addSettings(List> settings) { + settings.add(ALLOW_BOUND_ADDRESSES_SETTING); + settings.add(IP_FILTER_ENABLED_SETTING); + settings.add(IP_FILTER_ENABLED_HTTP_SETTING); + settings.add(HTTP_FILTER_ALLOW_SETTING); + settings.add(HTTP_FILTER_DENY_SETTING); + settings.add(TRANSPORT_FILTER_ALLOW_SETTING); + settings.add(TRANSPORT_FILTER_DENY_SETTING); + settings.add(PROFILE_FILTER_ALLOW_SETTING); + settings.add(PROFILE_FILTER_DENY_SETTING); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/PatternRule.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/PatternRule.java new file mode 100644 index 0000000000000..9d255497ce51b --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/PatternRule.java @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.filter; + +import io.netty.handler.ipfilter.IpFilterRule; +import io.netty.handler.ipfilter.IpFilterRuleType; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.network.NetworkAddress; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.NetworkInterface; +import java.net.SocketException; +import java.util.regex.Pattern; + +/** + * The Class PatternRule represents an IP filter rule using string patterns. + *
+ * Rule Syntax: + *
+ *

+ * Rule ::= [n|i]:address          n stands for computer name, i for ip address
+ * address ::= <regex> | localhost
+ * regex is a regular expression with '*' as multi character and '?' as single character wild card
+ * 
+ *
+ * Example: allow localhost: + *
+ * new PatternRule(true, "n:localhost") + *
+ * Example: allow local lan: + *
+ * new PatternRule(true, "i:192.168.0.*") + *
+ * Example: block all + *
+ * new PatternRule(false, "n:*") + *
+ */ +// this has been adopted from Netty3 there is no replacement in netty4 for this. +final class PatternRule implements IpFilterRule { + + private final Pattern ipPattern; + private final Pattern namePattern; + private final IpFilterRuleType ruleType; + private final boolean localhost; + private final String pattern; + + /** + * Instantiates a new pattern rule. + * + * @param ruleType indicates if this is an allow or block rule + * @param pattern the filter pattern + */ + PatternRule(IpFilterRuleType ruleType, String pattern) { + this.ruleType = ruleType; + this.pattern = pattern; + Pattern namePattern = null; + Pattern ipPattern = null; + boolean localhost = false; + if (pattern != null) { + String[] acls = pattern.split(","); + String ip = ""; + String name = ""; + for (String c : acls) { + c = c.trim(); + if ("n:localhost".equals(c)) { + localhost = true; + } else if (c.startsWith("n:")) { + name = addRule(name, c.substring(2)); + } else if (c.startsWith("i:")) { + ip = addRule(ip, c.substring(2)); + } + } + if (ip.length() != 0) { + ipPattern = Pattern.compile(ip); + } + if (name.length() != 0) { + namePattern = Pattern.compile(name); + } + } + this.ipPattern = ipPattern; + this.namePattern = namePattern; + this.localhost = localhost; + } + + /** + * returns the pattern. + * + * @return the pattern + */ + String getPattern() { + return pattern; + } + + private static String addRule(String pattern, String rule) { + if (rule == null || rule.length() == 0) { + return pattern; + } + if (pattern.length() != 0) { + pattern += "|"; + } + rule = rule.replaceAll("\\.", "\\\\."); + rule = rule.replaceAll("\\*", ".*"); + rule = rule.replaceAll("\\?", "."); + pattern += '(' + rule + ')'; + return pattern; + } + + private boolean isLocalhost(InetAddress address) { + try { + return address.isAnyLocalAddress() || address.isLoopbackAddress() || NetworkInterface.getByInetAddress(address) != null; + } catch (SocketException e) { + // not defined - ie. it's not a local address + return false; + } + } + + + @Override + public boolean matches(InetSocketAddress remoteAddress) { + InetAddress inetAddress = remoteAddress.getAddress(); + if (localhost) { + if (isLocalhost(inetAddress)) { + return true; + } + } + if (ipPattern != null) { + String format = NetworkAddress.format(inetAddress); + if (ipPattern.matcher(format).matches()) { + return true; + } + } + + return checkHostName(inetAddress); + } + + @SuppressForbidden(reason = "we compare the hostname of the address this is how netty3 did it and we keep it for BWC") + private boolean checkHostName(InetAddress address) { + if (namePattern != null) { + if (namePattern.matcher(address.getHostName()).matches()) { + return true; + } + } + return false; + } + + @Override + public IpFilterRuleType ruleType() { + return ruleType; + } + + boolean isLocalhost() { + return localhost; + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/SecurityIpFilterRule.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/SecurityIpFilterRule.java new file mode 100644 index 0000000000000..226dad8c343ed --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/filter/SecurityIpFilterRule.java @@ -0,0 +1,226 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.filter; + +import io.netty.handler.ipfilter.IpFilterRule; +import io.netty.handler.ipfilter.IpFilterRuleType; +import io.netty.handler.ipfilter.IpSubnetFilterRule; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.StringJoiner; +import java.util.StringTokenizer; + +/** + * decorator class to have a useful toString() method for an IpFilterRule + * as this is needed for audit logging + */ +public class SecurityIpFilterRule implements IpFilterRule, ToXContentFragment { + + public static final SecurityIpFilterRule ACCEPT_ALL = new SecurityIpFilterRule(true, "accept_all") { + @Override + public boolean matches(InetSocketAddress remoteAddress) { + return true; + } + + @Override + public IpFilterRuleType ruleType() { + return IpFilterRuleType.ACCEPT; + } + }; + + public static final SecurityIpFilterRule DENY_ALL = new SecurityIpFilterRule(true, "deny_all") { + @Override + public boolean matches(InetSocketAddress remoteAddress) { + return true; + } + + @Override + public IpFilterRuleType ruleType() { + return IpFilterRuleType.REJECT; + } + }; + + private final IpFilterRule ipFilterRule; + private final String ruleSpec; + + public SecurityIpFilterRule(boolean isAllowRule, String ruleSpec) { + this.ipFilterRule = getRule(isAllowRule, ruleSpec); + this.ruleSpec = ruleSpec; + } + + SecurityIpFilterRule(boolean isAllowRule, TransportAddress... addresses) { + this.ruleSpec = getRuleSpec(addresses); + this.ipFilterRule = getRule(isAllowRule, ruleSpec); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + if (ruleType() == IpFilterRuleType.ACCEPT) { + builder.append("allow "); + } else { + builder.append("deny "); + } + + builder.append(ruleSpec); + return builder.toString(); + } + + static Tuple parseSubnetMask(String address) throws UnknownHostException { + int p = address.indexOf('/'); + if (p < 0) { + throw new UnknownHostException("Invalid CIDR notation used: " + address); + } + if (p == address.length() -1) { + throw new IllegalArgumentException("address must not end with a '/"); + } + String addrString = address.substring(0, p); + String maskString = address.substring(p + 1); + InetAddress addr = InetAddress.getByName(addrString); + int mask; + if (maskString.indexOf('.') < 0) { + mask = parseInt(maskString, -1); + } else { + mask = getNetMask(maskString); + if (addr instanceof Inet6Address) { + mask += 96; + } + } + if (mask < 0) { + throw new UnknownHostException("Invalid mask length used: " + maskString); + } + return new Tuple<>(addr, mask); + } + + + /** + * Get the Subnet's Netmask in Decimal format.
+ * i.e.: getNetMask("255.255.255.0") returns the integer CIDR mask + * + * @param netMask a network mask + * @return the integer CIDR mask + */ + private static int getNetMask(String netMask) { + StringTokenizer nm = new StringTokenizer(netMask, "."); + int i = 0; + int[] netmask = new int[4]; + while (nm.hasMoreTokens()) { + netmask[i] = Integer.parseInt(nm.nextToken()); + i++; + } + int mask1 = 0; + for (i = 0; i < 4; i++) { + mask1 += Integer.bitCount(netmask[i]); + } + return mask1; + } + + /** + * @param intstr a string containing an integer. + * @param def the default if the string does not contain a valid + * integer. + * @return the inetAddress from the integer + */ + private static int parseInt(String intstr, int def) { + Integer res; + if (intstr == null) { + return def; + } + try { + res = Integer.decode(intstr); + } catch (Exception e) { + res = def; + } + return res.intValue(); + } + + static IpFilterRule getRule(boolean isAllowRule, String value) { + IpFilterRuleType filterRuleType = isAllowRule ? IpFilterRuleType.ACCEPT : IpFilterRuleType.REJECT; + String[] values = value.split(","); + if (Arrays.stream(values).anyMatch("_all"::equals)) { + // all rule was found. It should be the only rule! + if (values.length != 1) { + throw new IllegalArgumentException("rules that specify _all may not have other values!"); + } + return isAllowRule ? ACCEPT_ALL : DENY_ALL; + } + + if (value.contains("/")) { + // subnet rule... + if (values.length != 1) { + throw new IllegalArgumentException("multiple subnet filters cannot be specified in a single rule!"); + } + try { + Tuple inetAddressIntegerTuple = parseSubnetMask(value); + return new IpSubnetFilterRule(inetAddressIntegerTuple.v1(), inetAddressIntegerTuple.v2(), filterRuleType); + } catch (UnknownHostException e) { + String ruleType = (isAllowRule ? "allow " : "deny "); + throw new ElasticsearchException("unable to create ip filter for rule [" + ruleType + " " + value + "]", e); + } + } else { + // pattern rule - not netmask + StringJoiner rules = new StringJoiner(","); + for (String pattern : values) { + if (InetAddresses.isInetAddress(pattern)) { + // we want the inet addresses to be normalized especially in the IPv6 case where :0:0: is equivalent to :: + // that's why we convert the address here and then format since PatternRule also uses the formatting to normalize + // the value we are matching against + InetAddress inetAddress = InetAddresses.forString(pattern); + pattern = "i:" + NetworkAddress.format(inetAddress); + } else { + pattern = "n:" + pattern; + } + rules.add(pattern); + } + return new PatternRule(filterRuleType, rules.toString()); + } + } + + static String getRuleSpec(TransportAddress... addresses) { + StringBuilder ruleSpec = new StringBuilder(); + boolean firstAdded = false; + for (TransportAddress transportAddress : addresses) { + if (firstAdded) { + ruleSpec.append(","); + } else { + firstAdded = true; + } + + ruleSpec.append(NetworkAddress.format(transportAddress.address().getAddress())); + } + return ruleSpec.toString(); + } + + @Override + public boolean matches(InetSocketAddress remoteAddress) { + return ipFilterRule.matches(remoteAddress); + } + + @Override + public IpFilterRuleType ruleType() { + return ipFilterRule.ruleType(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilter.java new file mode 100644 index 0000000000000..6f7c4f143cb1c --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilter.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.netty4; + +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.ipfilter.AbstractRemoteAddressFilter; +import org.elasticsearch.xpack.security.transport.filter.IPFilter; + +import java.net.InetSocketAddress; + +@ChannelHandler.Sharable +class IpFilterRemoteAddressFilter extends AbstractRemoteAddressFilter { + + private final IPFilter filter; + private final String profile; + + IpFilterRemoteAddressFilter(final IPFilter filter, final String profile) { + this.filter = filter; + this.profile = profile; + } + + @Override + protected boolean accept(final ChannelHandlerContext ctx, final InetSocketAddress remoteAddress) throws Exception { + // at this stage no auth has happened, so we do not have any principal anyway + return filter.accept(profile, remoteAddress); + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransport.java new file mode 100644 index 0000000000000..5b4543ccaf275 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransport.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.netty4; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.ssl.SslHandler; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.netty4.Netty4HttpServerTransport; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.netty4.Netty4Utils; +import org.elasticsearch.xpack.core.ssl.SSLConfiguration; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.transport.filter.IPFilter; + +import javax.net.ssl.SSLEngine; + +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; +import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; +import static org.elasticsearch.xpack.core.security.transport.SSLExceptionHelper.isCloseDuringHandshakeException; +import static org.elasticsearch.xpack.core.security.transport.SSLExceptionHelper.isNotSslRecordException; +import static org.elasticsearch.xpack.core.security.transport.SSLExceptionHelper.isReceivedCertificateUnknownException; + +public class SecurityNetty4HttpServerTransport extends Netty4HttpServerTransport { + + private final IPFilter ipFilter; + private final Settings sslSettings; + private final SSLService sslService; + private final SSLConfiguration sslConfiguration; + + public SecurityNetty4HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, IPFilter ipFilter, + SSLService sslService, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, + Dispatcher dispatcher) { + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher); + this.ipFilter = ipFilter; + final boolean ssl = HTTP_SSL_ENABLED.get(settings); + this.sslSettings = SSLService.getHttpTransportSSLSettings(settings); + this.sslService = sslService; + if (ssl) { + this.sslConfiguration = sslService.sslConfiguration(sslSettings, Settings.EMPTY); + if (sslService.isConfigurationValidForServerUsage(sslConfiguration) == false) { + throw new IllegalArgumentException("a key must be provided to run as a server. the key should be configured using the " + + "[xpack.security.http.ssl.key] or [xpack.security.http.ssl.keystore.path] setting"); + } + } else { + this.sslConfiguration = null; + } + + } + + @Override + protected void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + Netty4Utils.maybeDie(cause); + if (!lifecycle.started()) { + return; + } + + if (isNotSslRecordException(cause)) { + if (logger.isTraceEnabled()) { + logger.trace(new ParameterizedMessage("received plaintext http traffic on a https channel, closing connection {}", + ctx.channel()), cause); + } else { + logger.warn("received plaintext http traffic on a https channel, closing connection {}", ctx.channel()); + } + ctx.channel().close(); + } else if (isCloseDuringHandshakeException(cause)) { + if (logger.isTraceEnabled()) { + logger.trace(new ParameterizedMessage("connection {} closed during ssl handshake", ctx.channel()), cause); + } else { + logger.warn("connection {} closed during ssl handshake", ctx.channel()); + } + ctx.channel().close(); + } else if (isReceivedCertificateUnknownException(cause)) { + if (logger.isTraceEnabled()) { + logger.trace(new ParameterizedMessage("http client did not trust server's certificate, closing connection {}", + ctx.channel()), cause); + } else { + logger.warn("http client did not trust this server's certificate, closing connection {}", ctx.channel()); + } + ctx.channel().close(); + } else { + super.exceptionCaught(ctx, cause); + } + } + + @Override + protected void doStart() { + super.doStart(); + ipFilter.setBoundHttpTransportAddress(this.boundAddress()); + } + + @Override + public ChannelHandler configureServerChannelHandler() { + return new HttpSslChannelHandler(); + } + + private final class HttpSslChannelHandler extends HttpChannelHandler { + HttpSslChannelHandler() { + super(SecurityNetty4HttpServerTransport.this, detailedErrorsEnabled, threadPool.getThreadContext()); + } + + @Override + protected void initChannel(Channel ch) throws Exception { + super.initChannel(ch); + if (sslConfiguration != null) { + SSLEngine sslEngine = sslService.createSSLEngine(sslConfiguration, null, -1); + sslEngine.setUseClientMode(false); + ch.pipeline().addFirst("ssl", new SslHandler(sslEngine)); + } + ch.pipeline().addFirst("ip_filter", new IpFilterRemoteAddressFilter(ipFilter, IPFilter.HTTP_PROFILE_NAME)); + } + } + + public static void overrideSettings(Settings.Builder settingsBuilder, Settings settings) { + if (HTTP_SSL_ENABLED.get(settings) && SETTING_HTTP_COMPRESSION.exists(settings) == false) { + settingsBuilder.put(SETTING_HTTP_COMPRESSION.getKey(), false); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransport.java new file mode 100644 index 0000000000000..e0794d037e33d --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransport.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.netty4; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; +import org.elasticsearch.xpack.core.ssl.SSLConfiguration; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.transport.filter.IPFilter; + +public class SecurityNetty4ServerTransport extends SecurityNetty4Transport { + + @Nullable private final IPFilter authenticator; + + public SecurityNetty4ServerTransport( + final Settings settings, + final ThreadPool threadPool, + final NetworkService networkService, + final BigArrays bigArrays, + final NamedWriteableRegistry namedWriteableRegistry, + final CircuitBreakerService circuitBreakerService, + @Nullable final IPFilter authenticator, + final SSLService sslService) { + super(settings, threadPool, networkService, bigArrays, namedWriteableRegistry, circuitBreakerService, sslService); + this.authenticator = authenticator; + } + + @Override + protected void doStart() { + super.doStart(); + if (authenticator != null) { + authenticator.setBoundTransportAddress(boundAddress(), profileBoundAddresses()); + } + } + + @Override + protected ChannelHandler getNoSslChannelInitializer(final String name) { + return new IPFilterServerChannelInitializer(name); + } + + @Override + protected ServerChannelInitializer getSslChannelInitializer(final String name, final SSLConfiguration configuration) { + return new SecurityServerChannelInitializer(name, configuration); + } + + public class IPFilterServerChannelInitializer extends ServerChannelInitializer { + + IPFilterServerChannelInitializer(final String name) { + super(name); + } + + @Override + protected void initChannel(final Channel ch) throws Exception { + super.initChannel(ch); + maybeAddIPFilter(ch, name); + } + } + + public class SecurityServerChannelInitializer extends SslChannelInitializer { + + SecurityServerChannelInitializer(final String name, final SSLConfiguration configuration) { + super(name, configuration); + } + + @Override + protected void initChannel(final Channel ch) throws Exception { + super.initChannel(ch); + maybeAddIPFilter(ch, name); + } + + } + + private void maybeAddIPFilter(final Channel ch, final String name) { + if (authenticator != null) { + ch.pipeline().addFirst("ipfilter", new IpFilterRemoteAddressFilter(authenticator, name)); + } + } + +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java new file mode 100644 index 0000000000000..a4e88ec70f203 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java @@ -0,0 +1,233 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.nio; + +import org.elasticsearch.nio.BytesWriteOperation; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.nio.WriteOperation; +import org.elasticsearch.nio.utils.ExceptionsHelper; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +/** + * Provides a TLS/SSL read/write layer over a channel. This context will use a {@link SSLDriver} to handshake + * with the peer channel. Once the handshake is complete, any data from the peer channel will be decrypted + * before being passed to the {@link ReadConsumer}. Outbound data will + * be encrypted before being flushed to the channel. + */ +public final class SSLChannelContext extends SocketChannelContext { + + private final LinkedList queued = new LinkedList<>(); + private final SSLDriver sslDriver; + private final ReadConsumer readConsumer; + private final InboundChannelBuffer buffer; + private final AtomicBoolean isClosing = new AtomicBoolean(false); + + SSLChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, SSLDriver sslDriver, + ReadConsumer readConsumer, InboundChannelBuffer buffer) { + super(channel, selector, exceptionHandler); + this.sslDriver = sslDriver; + this.readConsumer = readConsumer; + this.buffer = buffer; + } + + @Override + public void register() throws IOException { + super.register(); + sslDriver.init(); + } + + @Override + public void sendMessage(ByteBuffer[] buffers, BiConsumer listener) { + if (isClosing.get()) { + listener.accept(null, new ClosedChannelException()); + return; + } + + BytesWriteOperation writeOperation = new BytesWriteOperation(this, buffers, listener); + SocketSelector selector = getSelector(); + if (selector.isOnCurrentThread() == false) { + // If this message is being sent from another thread, we queue the write to be handled by the + // network thread + selector.queueWrite(writeOperation); + return; + } + + selector.queueWriteInChannelBuffer(writeOperation); + } + + @Override + public void queueWriteOperation(WriteOperation writeOperation) { + getSelector().assertOnSelectorThread(); + if (writeOperation instanceof CloseNotifyOperation) { + sslDriver.initiateClose(); + } else { + queued.add((BytesWriteOperation) writeOperation); + } + } + + @Override + public void flushChannel() throws IOException { + if (hasIOException()) { + return; + } + // If there is currently data in the outbound write buffer, flush the buffer. + if (sslDriver.hasFlushPending()) { + // If the data is not completely flushed, exit. We cannot produce new write data until the + // existing data has been fully flushed. + flushToChannel(sslDriver.getNetworkWriteBuffer()); + if (sslDriver.hasFlushPending()) { + return; + } + } + + // If the driver is ready for application writes, we can attempt to proceed with any queued writes. + if (sslDriver.readyForApplicationWrites()) { + BytesWriteOperation currentOperation = queued.peekFirst(); + while (sslDriver.hasFlushPending() == false && currentOperation != null) { + // If the current operation has been fully consumed (encrypted) we now know that it has been + // sent (as we only get to this point if the write buffer has been fully flushed). + if (currentOperation.isFullyFlushed()) { + queued.removeFirst(); + getSelector().executeListener(currentOperation.getListener(), null); + currentOperation = queued.peekFirst(); + } else { + try { + // Attempt to encrypt application write data. The encrypted data ends up in the + // outbound write buffer. + int bytesEncrypted = sslDriver.applicationWrite(currentOperation.getBuffersToWrite()); + if (bytesEncrypted == 0) { + break; + } + currentOperation.incrementIndex(bytesEncrypted); + // Flush the write buffer to the channel + flushToChannel(sslDriver.getNetworkWriteBuffer()); + } catch (IOException e) { + queued.removeFirst(); + getSelector().executeFailedListener(currentOperation.getListener(), e); + throw e; + } + } + } + } else { + // We are not ready for application writes, check if the driver has non-application writes. We + // only want to continue producing new writes if the outbound write buffer is fully flushed. + while (sslDriver.hasFlushPending() == false && sslDriver.needsNonApplicationWrite()) { + sslDriver.nonApplicationWrite(); + // If non-application writes were produced, flush the outbound write buffer. + if (sslDriver.hasFlushPending()) { + flushToChannel(sslDriver.getNetworkWriteBuffer()); + } + } + } + } + + @Override + public boolean hasQueuedWriteOps() { + getSelector().assertOnSelectorThread(); + if (sslDriver.readyForApplicationWrites()) { + return sslDriver.hasFlushPending() || queued.isEmpty() == false; + } else { + return sslDriver.hasFlushPending() || sslDriver.needsNonApplicationWrite(); + } + } + + @Override + public int read() throws IOException { + int bytesRead = 0; + if (hasIOException()) { + return bytesRead; + } + bytesRead = readFromChannel(sslDriver.getNetworkReadBuffer()); + if (bytesRead == 0) { + return bytesRead; + } + + sslDriver.read(buffer); + + int bytesConsumed = Integer.MAX_VALUE; + while (bytesConsumed > 0 && buffer.getIndex() > 0) { + bytesConsumed = readConsumer.consumeReads(buffer); + buffer.release(bytesConsumed); + } + + return bytesRead; + } + + @Override + public boolean selectorShouldClose() { + return isPeerClosed() || hasIOException() || sslDriver.isClosed(); + } + + @Override + public void closeChannel() { + if (isClosing.compareAndSet(false, true)) { + WriteOperation writeOperation = new CloseNotifyOperation(this); + SocketSelector selector = getSelector(); + if (selector.isOnCurrentThread() == false) { + selector.queueWrite(writeOperation); + return; + } + selector.queueWriteInChannelBuffer(writeOperation); + } + } + + @Override + public void closeFromSelector() throws IOException { + getSelector().assertOnSelectorThread(); + if (channel.isOpen()) { + // Set to true in order to reject new writes before queuing with selector + isClosing.set(true); + ArrayList closingExceptions = new ArrayList<>(2); + try { + super.closeFromSelector(); + } catch (IOException e) { + closingExceptions.add(e); + } + try { + buffer.close(); + for (BytesWriteOperation op : queued) { + getSelector().executeFailedListener(op.getListener(), new ClosedChannelException()); + } + queued.clear(); + sslDriver.close(); + } catch (IOException e) { + closingExceptions.add(e); + } + ExceptionsHelper.rethrowAndSuppress(closingExceptions); + } + } + + private static class CloseNotifyOperation implements WriteOperation { + + private static final BiConsumer LISTENER = (v, t) -> {}; + private final SocketChannelContext channelContext; + + private CloseNotifyOperation(SocketChannelContext channelContext) { + this.channelContext = channelContext; + } + + @Override + public BiConsumer getListener() { + return LISTENER; + } + + @Override + public SocketChannelContext getChannel() { + return channelContext; + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java new file mode 100644 index 0000000000000..a44d39a0d7a56 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java @@ -0,0 +1,584 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.nio; + +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.utils.ExceptionsHelper; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLEngineResult; +import javax.net.ssl.SSLException; +import javax.net.ssl.SSLSession; +import java.nio.ByteBuffer; +import java.util.ArrayList; + +/** + * SSLDriver is a class that wraps the {@link SSLEngine} and attempts to simplify the API. The basic usage is + * to create an SSLDriver class and call {@link #init()}. This initiates the SSL/TLS handshaking process. + * + * When the SSLDriver is handshaking or closing, reads and writes will be consumed/produced internally to + * advance the handshake or close process. Alternatively, when the SSLDriver is in application mode, it will + * decrypt data off the wire to be consumed by the application and will encrypt data provided by the + * application to be written to the wire. + * + * Handling reads from a channel with this class is very simple. When data has been read, call + * {@link #read(InboundChannelBuffer)}. If the data is application data, it will be decrypted and placed into + * the buffer passed as an argument. Otherwise, it will be consumed internally and advance the SSL/TLS close + * or handshake process. + * + * Producing writes for a channel is more complicated. If there is existing data in the outbound write buffer + * as indicated by {@link #hasFlushPending()}, that data must be written to the channel before more outbound + * data can be produced. If no flushes are pending, {@link #needsNonApplicationWrite()} can be called to + * determine if this driver needs to produce more data to advance the handshake or close process. If that + * method returns true, {@link #nonApplicationWrite()} should be called (and the data produced then flushed + * to the channel) until no further non-application writes are needed. + * + * If no non-application writes are needed, {@link #readyForApplicationWrites()} can be called to determine + * if the driver is ready to consume application data. (Note: It is possible that + * {@link #readyForApplicationWrites()} and {@link #needsNonApplicationWrite()} can both return false if the + * driver is waiting on non-application data from the peer.) If the driver indicates it is ready for + * application writes, {@link #applicationWrite(ByteBuffer[])} can be called. This method will encrypt + * application data and place it in the write buffer for flushing to a channel. + * + * If you are ready to close the channel {@link #initiateClose()} should be called. After that is called, the + * driver will start producing non-application writes related to notifying the peer connection that this + * connection is closing. When {@link #isClosed()} returns true, this SSL connection is closed and the + * channel should be closed. + */ +public class SSLDriver implements AutoCloseable { + + private static final ByteBuffer[] EMPTY_BUFFER_ARRAY = new ByteBuffer[0]; + + private final SSLEngine engine; + private final boolean isClientMode; + // This should only be accessed by the network thread associated with this channel, so nothing needs to + // be volatile. + private Mode currentMode = new HandshakeMode(); + private ByteBuffer networkWriteBuffer; + private ByteBuffer networkReadBuffer; + + public SSLDriver(SSLEngine engine, boolean isClientMode) { + this.engine = engine; + this.isClientMode = isClientMode; + SSLSession session = engine.getSession(); + this.networkReadBuffer = ByteBuffer.allocate(session.getPacketBufferSize()); + this.networkWriteBuffer = ByteBuffer.allocate(session.getPacketBufferSize()); + this.networkWriteBuffer.position(this.networkWriteBuffer.limit()); + } + + public void init() throws SSLException { + engine.setUseClientMode(isClientMode); + if (currentMode.isHandshake()) { + engine.beginHandshake(); + ((HandshakeMode) currentMode).startHandshake(); + } else { + throw new AssertionError("Attempted to init outside from non-handshaking mode: " + currentMode.modeName()); + } + } + + /** + * Requests a TLS renegotiation. This means the we will request that the peer performs another handshake + * prior to the continued exchange of application data. This can only be requested if we are currently in + * APPLICATION mode. + * + * @throws SSLException if the handshake cannot be initiated + */ + public void renegotiate() throws SSLException { + if (currentMode.isApplication()) { + currentMode = new HandshakeMode(); + engine.beginHandshake(); + ((HandshakeMode) currentMode).startHandshake(); + } else { + throw new IllegalStateException("Attempted to renegotiate while in invalid mode: " + currentMode.modeName()); + } + } + + public boolean hasFlushPending() { + return networkWriteBuffer.hasRemaining(); + } + + public boolean isHandshaking() { + return currentMode.isHandshake(); + } + + public ByteBuffer getNetworkWriteBuffer() { + return networkWriteBuffer; + } + + public ByteBuffer getNetworkReadBuffer() { + return networkReadBuffer; + } + + public void read(InboundChannelBuffer buffer) throws SSLException { + currentMode.read(buffer); + } + + public boolean readyForApplicationWrites() { + return currentMode.isApplication(); + } + + public boolean needsNonApplicationWrite() { + return currentMode.needsNonApplicationWrite(); + } + + public int applicationWrite(ByteBuffer[] buffers) throws SSLException { + assert readyForApplicationWrites() : "Should not be called if driver is not ready for application writes"; + return currentMode.write(buffers); + } + + public void nonApplicationWrite() throws SSLException { + assert currentMode.isApplication() == false : "Should not be called if driver is in application mode"; + if (currentMode.isApplication() == false) { + currentMode.write(EMPTY_BUFFER_ARRAY); + } else { + throw new AssertionError("Attempted to non-application write from invalid mode: " + currentMode.modeName()); + } + } + + public void initiateClose() { + closingInternal(); + } + + public boolean isClosed() { + return currentMode.isClose() && ((CloseMode) currentMode).isCloseDone(); + } + + @Override + public void close() throws SSLException { + ArrayList closingExceptions = new ArrayList<>(2); + closingInternal(); + CloseMode closeMode = (CloseMode) this.currentMode; + if (closeMode.needToSendClose) { + closingExceptions.add(new SSLException("Closed engine without completely sending the close alert message.")); + engine.closeOutbound(); + } + + if (closeMode.needToReceiveClose) { + closingExceptions.add(new SSLException("Closed engine without receiving the close alert message.")); + closeMode.closeInboundAndSwallowPeerDidNotCloseException(); + } + ExceptionsHelper.rethrowAndSuppress(closingExceptions); + } + + private SSLEngineResult unwrap(InboundChannelBuffer buffer) throws SSLException { + while (true) { + SSLEngineResult result = engine.unwrap(networkReadBuffer, buffer.sliceBuffersFrom(buffer.getIndex())); + buffer.incrementIndex(result.bytesProduced()); + switch (result.getStatus()) { + case OK: + networkReadBuffer.compact(); + return result; + case BUFFER_UNDERFLOW: + // There is not enough space in the network buffer for an entire SSL packet. Compact the + // current data and expand the buffer if necessary. + int currentCapacity = networkReadBuffer.capacity(); + ensureNetworkReadBufferSize(); + if (currentCapacity == networkReadBuffer.capacity()) { + networkReadBuffer.compact(); + } + return result; + case BUFFER_OVERFLOW: + // There is not enough space in the application buffer for the decrypted message. Expand + // the application buffer to ensure that it has enough space. + ensureApplicationBufferSize(buffer); + break; + case CLOSED: + assert engine.isInboundDone() : "We received close_notify so read should be done"; + closingInternal(); + return result; + default: + throw new IllegalStateException("Unexpected UNWRAP result: " + result.getStatus()); + } + } + } + + private SSLEngineResult wrap(ByteBuffer[] buffers) throws SSLException { + assert hasFlushPending() == false : "Should never called with pending writes"; + + networkWriteBuffer.clear(); + while (true) { + SSLEngineResult result; + try { + if (buffers.length == 1) { + result = engine.wrap(buffers[0], networkWriteBuffer); + } else { + result = engine.wrap(buffers, networkWriteBuffer); + } + } catch (SSLException e) { + networkWriteBuffer.position(networkWriteBuffer.limit()); + throw e; + } + + switch (result.getStatus()) { + case OK: + networkWriteBuffer.flip(); + return result; + case BUFFER_UNDERFLOW: + throw new IllegalStateException("Should not receive BUFFER_UNDERFLOW on WRAP"); + case BUFFER_OVERFLOW: + // There is not enough space in the network buffer for an entire SSL packet. Expand the + // buffer if it's smaller than the current session packet size. Otherwise return and wait + // for existing data to be flushed. + int currentCapacity = networkWriteBuffer.capacity(); + ensureNetworkWriteBufferSize(); + if (currentCapacity == networkWriteBuffer.capacity()) { + return result; + } + break; + case CLOSED: + if (result.bytesProduced() > 0) { + networkWriteBuffer.flip(); + } else { + assert false : "WRAP during close processing should produce close message."; + } + return result; + default: + throw new IllegalStateException("Unexpected WRAP result: " + result.getStatus()); + } + } + } + + private void closingInternal() { + // This check prevents us from attempting to send close_notify twice + if (currentMode.isClose() == false) { + currentMode = new CloseMode(currentMode.isHandshake()); + } + } + + private void ensureApplicationBufferSize(InboundChannelBuffer applicationBuffer) { + int applicationBufferSize = engine.getSession().getApplicationBufferSize(); + if (applicationBuffer.getRemaining() < applicationBufferSize) { + applicationBuffer.ensureCapacity(applicationBuffer.getIndex() + engine.getSession().getApplicationBufferSize()); + } + } + + private void ensureNetworkWriteBufferSize() { + networkWriteBuffer = ensureNetBufferSize(networkWriteBuffer); + } + + private void ensureNetworkReadBufferSize() { + networkReadBuffer = ensureNetBufferSize(networkReadBuffer); + } + + private ByteBuffer ensureNetBufferSize(ByteBuffer current) { + int networkPacketSize = engine.getSession().getPacketBufferSize(); + if (current.capacity() < networkPacketSize) { + ByteBuffer newBuffer = ByteBuffer.allocate(networkPacketSize); + current.flip(); + newBuffer.put(current); + return newBuffer; + } else { + return current; + } + } + + // There are three potential modes for the driver to be in - HANDSHAKE, APPLICATION, or CLOSE. HANDSHAKE + // is the initial mode. During this mode data that is read and written will be related to the TLS + // handshake process. Application related data cannot be encrypted until the handshake is complete. From + // HANDSHAKE mode the driver can transition to APPLICATION (if the handshake is successful) or CLOSE (if + // an error occurs or we initiate a close). In APPLICATION mode data read from the channel will be + // decrypted and placed into the buffer passed as an argument to the read call. Additionally, application + // writes will be accepted and encrypted into the outbound write buffer. APPLICATION mode will proceed + // until we receive a request for renegotiation (currently unsupported) or the CLOSE mode begins. CLOSE + // mode can begin if we receive a CLOSE_NOTIFY message from the peer or if initiateClose is called. In + // CLOSE mode we attempt to both send and receive an SSL CLOSE_NOTIFY message. The exception to this is + // when we enter CLOSE mode from HANDSHAKE mode. In this scenario we only need to send the alert to the + // peer and then close the channel. Some SSL/TLS implementations do not properly adhere to the full + // two-direction close_notify process. Additionally, in newer TLS specifications it is not required to + // wait to receive close_notify. However, we will make our best attempt to both send and receive as it is + // expected by the java SSLEngine (it throws an exception if close_notify has not been received when + // inbound is closed). + + private interface Mode { + + void read(InboundChannelBuffer buffer) throws SSLException; + + int write(ByteBuffer[] buffers) throws SSLException; + + boolean needsNonApplicationWrite(); + + boolean isHandshake(); + + boolean isApplication(); + + boolean isClose(); + + String modeName(); + + } + + private class HandshakeMode implements Mode { + + private SSLEngineResult.HandshakeStatus handshakeStatus; + + private void startHandshake() throws SSLException { + handshakeStatus = engine.getHandshakeStatus(); + if (handshakeStatus != SSLEngineResult.HandshakeStatus.NEED_UNWRAP && + handshakeStatus != SSLEngineResult.HandshakeStatus.NEED_WRAP) { + try { + handshake(); + } catch (SSLException e) { + closingInternal(); + throw e; + } + } + } + + private void handshake() throws SSLException { + boolean continueHandshaking = true; + while (continueHandshaking) { + switch (handshakeStatus) { + case NEED_UNWRAP: + // We UNWRAP as much as possible immediately after a read. Do not need to do it here. + continueHandshaking = false; + break; + case NEED_WRAP: + if (hasFlushPending() == false) { + handshakeStatus = wrap(EMPTY_BUFFER_ARRAY).getHandshakeStatus(); + } + continueHandshaking = false; + break; + case NEED_TASK: + runTasks(); + handshakeStatus = engine.getHandshakeStatus(); + break; + case NOT_HANDSHAKING: + maybeFinishHandshake(); + continueHandshaking = false; + break; + case FINISHED: + maybeFinishHandshake(); + continueHandshaking = false; + break; + } + } + } + + @Override + public void read(InboundChannelBuffer buffer) throws SSLException { + boolean continueUnwrap = true; + while (continueUnwrap && networkReadBuffer.position() > 0) { + networkReadBuffer.flip(); + try { + SSLEngineResult result = unwrap(buffer); + handshakeStatus = result.getHandshakeStatus(); + continueUnwrap = result.bytesConsumed() > 0; + handshake(); + } catch (SSLException e) { + closingInternal(); + throw e; + } + } + } + + @Override + public int write(ByteBuffer[] buffers) throws SSLException { + try { + handshake(); + } catch (SSLException e) { + closingInternal(); + throw e; + } + return 0; + } + + @Override + public boolean needsNonApplicationWrite() { + return handshakeStatus == SSLEngineResult.HandshakeStatus.NEED_WRAP + || handshakeStatus == SSLEngineResult.HandshakeStatus.NOT_HANDSHAKING + || handshakeStatus == SSLEngineResult.HandshakeStatus.FINISHED; + } + + @Override + public boolean isHandshake() { + return true; + } + + @Override + public boolean isApplication() { + return false; + } + + @Override + public boolean isClose() { + return false; + } + + @Override + public String modeName() { + return "HANDSHAKE"; + } + + private void runTasks() { + Runnable delegatedTask; + while ((delegatedTask = engine.getDelegatedTask()) != null) { + delegatedTask.run(); + } + } + + private void maybeFinishHandshake() { + // We only acknowledge that we are done handshaking if there are no bytes that need to be written + if (hasFlushPending() == false) { + if (currentMode.isHandshake()) { + currentMode = new ApplicationMode(); + } else { + String message = "Attempted to transition to application mode from non-handshaking mode: " + currentMode; + throw new AssertionError(message); + } + } + } + } + + private class ApplicationMode implements Mode { + + @Override + public void read(InboundChannelBuffer buffer) throws SSLException { + ensureApplicationBufferSize(buffer); + boolean continueUnwrap = true; + while (continueUnwrap && networkReadBuffer.position() > 0) { + networkReadBuffer.flip(); + SSLEngineResult result = unwrap(buffer); + boolean renegotiationRequested = result.getStatus() != SSLEngineResult.Status.CLOSED + && maybeRenegotiation(result.getHandshakeStatus()); + continueUnwrap = result.bytesProduced() > 0 && renegotiationRequested == false; + } + } + + @Override + public int write(ByteBuffer[] buffers) throws SSLException { + SSLEngineResult result = wrap(buffers); + maybeRenegotiation(result.getHandshakeStatus()); + return result.bytesConsumed(); + } + + private boolean maybeRenegotiation(SSLEngineResult.HandshakeStatus newStatus) throws SSLException { + if (newStatus != SSLEngineResult.HandshakeStatus.NOT_HANDSHAKING && newStatus != SSLEngineResult.HandshakeStatus.FINISHED) { + renegotiate(); + return true; + } else { + return false; + } + } + + @Override + public boolean needsNonApplicationWrite() { + return false; + } + + @Override + public boolean isHandshake() { + return false; + } + + @Override + public boolean isApplication() { + return true; + } + + @Override + public boolean isClose() { + return false; + } + + @Override + public String modeName() { + return "APPLICATION"; + } + } + + private class CloseMode implements Mode { + + private boolean needToSendClose = true; + private boolean needToReceiveClose = true; + + private CloseMode(boolean isHandshaking) { + if (isHandshaking && engine.isInboundDone() == false) { + // If we attempt to close during a handshake either we are sending an alert and inbound + // should already be closed or we are sending a close_notify. If we send a close_notify + // the peer will send an handshake error alert. If we attempt to receive the handshake alert, + // the engine will throw an IllegalStateException as it is not in a proper state to receive + // handshake message. Closing inbound immediately after close_notify is the cleanest option. + needToReceiveClose = false; + } else if (engine.isInboundDone()) { + needToReceiveClose = false; + } + if (engine.isOutboundDone()) { + needToSendClose = false; + } else { + engine.closeOutbound(); + } + + } + + @Override + public void read(InboundChannelBuffer buffer) throws SSLException { + ensureApplicationBufferSize(buffer); + boolean continueUnwrap = true; + while (continueUnwrap && networkReadBuffer.position() > 0) { + networkReadBuffer.flip(); + SSLEngineResult result = unwrap(buffer); + continueUnwrap = result.bytesProduced() > 0; + } + if (engine.isInboundDone()) { + needToReceiveClose = false; + } + } + + @Override + public int write(ByteBuffer[] buffers) throws SSLException { + if (hasFlushPending() == false && engine.isOutboundDone()) { + needToSendClose = false; + // Close inbound if it is still open and we have decided not to wait for response. + if (needToReceiveClose == false && engine.isInboundDone() == false) { + closeInboundAndSwallowPeerDidNotCloseException(); + } + } else { + wrap(EMPTY_BUFFER_ARRAY); + assert hasFlushPending() : "Should have produced close message"; + } + return 0; + } + + @Override + public boolean needsNonApplicationWrite() { + return needToSendClose; + } + + @Override + public boolean isHandshake() { + return false; + } + + @Override + public boolean isApplication() { + return false; + } + + @Override + public boolean isClose() { + return true; + } + + @Override + public String modeName() { + return "CLOSE"; + } + + private boolean isCloseDone() { + return needToSendClose == false && needToReceiveClose == false; + } + + private void closeInboundAndSwallowPeerDidNotCloseException() throws SSLException { + try { + engine.closeInbound(); + } catch (SSLException e) { + if (e.getMessage().startsWith("Inbound closed before receiving peer's close_notify") == false) { + throw e; + } + } + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java new file mode 100644 index 0000000000000..7773404762eb1 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.nio; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.nio.AcceptingSelector; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.ServerChannelContext; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.nio.NioTransport; +import org.elasticsearch.transport.nio.TcpNioServerSocketChannel; +import org.elasticsearch.transport.nio.TcpNioSocketChannel; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; +import org.elasticsearch.xpack.core.ssl.SSLConfiguration; +import org.elasticsearch.xpack.core.ssl.SSLService; + +import javax.net.ssl.SSLEngine; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.core.security.SecurityField.setting; + +/** + * This transport provides a transport based on nio that is secured by SSL/TLS. SSL/TLS is a communications + * protocol that allows two channels to go through a handshake process prior to application data being + * exchanged. The handshake process enables the channels to exchange parameters that will allow them to + * encrypt the application data they exchange. + * + * The specific SSL/TLS parameters and configurations are setup in the {@link SSLService} class. The actual + * implementation of the SSL/TLS layer is in the {@link SSLChannelContext} and {@link SSLDriver} classes. + */ +public class SecurityNioTransport extends NioTransport { + + private final SSLConfiguration sslConfiguration; + private final SSLService sslService; + private final Map profileConfiguration; + private final boolean sslEnabled; + + SecurityNioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, + CircuitBreakerService circuitBreakerService, SSLService sslService) { + super(settings, threadPool, networkService, bigArrays, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService); + this.sslService = sslService; + this.sslEnabled = XPackSettings.TRANSPORT_SSL_ENABLED.get(settings); + final Settings transportSSLSettings = settings.getByPrefix(setting("transport.ssl.")); + if (sslEnabled) { + this.sslConfiguration = sslService.sslConfiguration(transportSSLSettings, Settings.EMPTY); + Map profileSettingsMap = settings.getGroups("transport.profiles.", true); + Map profileConfiguration = new HashMap<>(profileSettingsMap.size() + 1); + for (Map.Entry entry : profileSettingsMap.entrySet()) { + Settings profileSettings = entry.getValue(); + final Settings profileSslSettings = SecurityNetty4Transport.profileSslSettings(profileSettings); + SSLConfiguration configuration = sslService.sslConfiguration(profileSslSettings, transportSSLSettings); + profileConfiguration.put(entry.getKey(), configuration); + } + + if (profileConfiguration.containsKey(TcpTransport.DEFAULT_PROFILE) == false) { + profileConfiguration.put(TcpTransport.DEFAULT_PROFILE, sslConfiguration); + } + + this.profileConfiguration = Collections.unmodifiableMap(profileConfiguration); + } else { + throw new IllegalArgumentException("Currently only support SSL enabled."); + } + } + + @Override + protected TcpChannelFactory channelFactory(ProfileSettings profileSettings, boolean isClient) { + return new SecurityTcpChannelFactory(profileSettings, isClient); + } + + @Override + protected void acceptChannel(NioSocketChannel channel) { + super.acceptChannel(channel); + } + + @Override + protected void exceptionCaught(NioSocketChannel channel, Exception exception) { + super.exceptionCaught(channel, exception); + } + + private class SecurityTcpChannelFactory extends TcpChannelFactory { + + private final String profileName; + private final boolean isClient; + + private SecurityTcpChannelFactory(ProfileSettings profileSettings, boolean isClient) { + super(new RawChannelFactory(profileSettings.tcpNoDelay, + profileSettings.tcpKeepAlive, + profileSettings.reuseAddress, + Math.toIntExact(profileSettings.sendBufferSize.getBytes()), + Math.toIntExact(profileSettings.receiveBufferSize.getBytes()))); + this.profileName = profileSettings.profileName; + this.isClient = isClient; + } + + @Override + public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { + SSLConfiguration defaultConfig = profileConfiguration.get(TcpTransport.DEFAULT_PROFILE); + SSLEngine sslEngine = sslService.createSSLEngine(profileConfiguration.getOrDefault(profileName, defaultConfig), null, -1); + SSLDriver sslDriver = new SSLDriver(sslEngine, isClient); + TcpNioSocketChannel nioChannel = new TcpNioSocketChannel(profileName, channel); + Supplier pageSupplier = () -> { + Recycler.V bytes = pageCacheRecycler.bytePage(false); + return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); + }; + + SocketChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> + consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); + InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); + Consumer exceptionHandler = (e) -> exceptionCaught(nioChannel, e); + SSLChannelContext context = new SSLChannelContext(nioChannel, selector, exceptionHandler, sslDriver, nioReadConsumer, + buffer); + nioChannel.setContext(context); + return nioChannel; + } + + @Override + public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { + TcpNioServerSocketChannel nioChannel = new TcpNioServerSocketChannel(profileName, channel); + ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, SecurityNioTransport.this::acceptChannel, + (e) -> {}); + nioChannel.setContext(context); + return nioChannel; + } + } +} diff --git a/x-pack/plugin/security/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/security/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..857c2f6e472d5 --- /dev/null +++ b/x-pack/plugin/security/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,42 @@ +grant { + permission java.lang.RuntimePermission "setFactory"; + + // needed because of problems in unbound LDAP library + permission java.util.PropertyPermission "*", "read,write"; + + // needed because of SAML (cf. o.e.x.s.s.RestorableContextClassLoader) + permission java.lang.RuntimePermission "getClassLoader"; + permission java.lang.RuntimePermission "setContextClassLoader"; + + // needed for multiple server implementations used in tests + permission java.net.SocketPermission "*", "accept,connect"; +}; + +grant codeBase "${codebase.xmlsec-2.0.8.jar}" { + // needed during initialization of OpenSAML library where xml security algorithms are registered + // see https://github.com/apache/santuario-java/blob/e79f1fe4192de73a975bc7246aee58ed0703343d/src/main/java/org/apache/xml/security/utils/JavaUtils.java#L205-L220 + // and https://git.shibboleth.net/view/?p=java-opensaml.git;a=blob;f=opensaml-xmlsec-impl/src/main/java/org/opensaml/xmlsec/signature/impl/SignatureMarshaller.java;hb=db0eaa64210f0e32d359cd6c57bedd57902bf811#l52 + // which uses it in the opensaml-xmlsec-impl + permission java.security.SecurityPermission "org.apache.xml.security.register"; +}; + +grant codeBase "${codebase.netty-common}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; +}; + +grant codeBase "${codebase.netty-transport}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 + // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! + permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; +}; + +grant codeBase "${codebase.elasticsearch-rest-client}" { + // rest client uses system properties which gets the default proxy + permission java.net.NetPermission "getProxySelector"; +}; + +grant codeBase "${codebase.httpasyncclient}" { + // rest client uses system properties which gets the default proxy + permission java.net.NetPermission "getProxySelector"; +}; diff --git a/x-pack/plugin/security/src/main/resources/meta-plugin-descriptor.properties b/x-pack/plugin/security/src/main/resources/meta-plugin-descriptor.properties new file mode 100644 index 0000000000000..2e878c207acfc --- /dev/null +++ b/x-pack/plugin/security/src/main/resources/meta-plugin-descriptor.properties @@ -0,0 +1,21 @@ +# Elasticsearch meta plugin descriptor file +# This file must exist as 'meta-plugin-descriptor.properties' in a folder named `elasticsearch`. +# +### example meta plugin for "meta-foo" +# +# meta-foo.zip <-- zip file for the meta plugin, with this structure: +#|____elasticsearch/ +#| |____ <-- The plugin files for bundled_plugin_1 (the content of the elastisearch directory) +#| |____ <-- The plugin files for bundled_plugin_2 +#| |____ meta-plugin-descriptor.properties <-- example contents below: +# +# description=My meta plugin +# name=meta-foo +# +### mandatory elements for all meta plugins: +# +# 'description': simple summary of the meta plugin +description=Elasticsearch Expanded Pack Plugin +# +# 'name': the meta plugin name +name=x-pack \ No newline at end of file diff --git a/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/saml-schema-assertion-2.0.xsd b/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/saml-schema-assertion-2.0.xsd new file mode 100644 index 0000000000000..759baf8993b3e --- /dev/null +++ b/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/saml-schema-assertion-2.0.xsd @@ -0,0 +1,283 @@ + + + + + + + Document identifier: saml-schema-assertion-2.0 + Location: http://docs.oasis-open.org/security/saml/v2.0/ + Revision history: + V1.0 (November, 2002): + Initial Standard Schema. + V1.1 (September, 2003): + Updates within the same V1.0 namespace. + V2.0 (March, 2005): + New assertion schema for SAML V2.0 namespace. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/saml-schema-metadata-2.0.xsd b/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/saml-schema-metadata-2.0.xsd new file mode 100644 index 0000000000000..9d5e4832a1ada --- /dev/null +++ b/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/saml-schema-metadata-2.0.xsd @@ -0,0 +1,337 @@ + + + + + + + + + Document identifier: saml-schema-metadata-2.0 + Location: http://docs.oasis-open.org/security/saml/v2.0/ + Revision history: + V2.0 (March, 2005): + Schema for SAML metadata, first published in SAML 2.0. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/saml-schema-protocol-2.0.xsd b/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/saml-schema-protocol-2.0.xsd new file mode 100644 index 0000000000000..48ec69cbc967d --- /dev/null +++ b/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/saml-schema-protocol-2.0.xsd @@ -0,0 +1,302 @@ + + + + + + + Document identifier: saml-schema-protocol-2.0 + Location: http://docs.oasis-open.org/security/saml/v2.0/ + Revision history: + V1.0 (November, 2002): + Initial Standard Schema. + V1.1 (September, 2003): + Updates within the same V1.0 namespace. + V2.0 (March, 2005): + New protocol schema based in a SAML V2.0 namespace. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/xenc-schema.xsd b/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/xenc-schema.xsd new file mode 100644 index 0000000000000..c902d4fc60772 --- /dev/null +++ b/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/xenc-schema.xsd @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/xml.xsd b/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/xml.xsd new file mode 100644 index 0000000000000..5a282019b6afe --- /dev/null +++ b/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/xml.xsd @@ -0,0 +1,286 @@ + + + + + +
+

About the XML namespace

+ +
+

+ This schema document describes the XML namespace, in a form + suitable for import by other schema documents. +

+

+ See + http://www.w3.org/XML/1998/namespace.html and + + http://www.w3.org/TR/REC-xml for information + about this namespace. +

+

+ Note that local names in this namespace are intended to be + defined only by the World Wide Web Consortium or its subgroups. + The names currently defined in this namespace are listed below. + They should not be used with conflicting semantics by any Working + Group, specification, or document instance. +

+

+ See further below in this document for more information about how to refer to this schema document from your own + XSD schema documents and about the + namespace-versioning policy governing this schema document. +

+
+
+
+
+ + + + +
+ +

lang (as an attribute name)

+

+ denotes an attribute whose value + is a language code for the natural language of the content of + any element; its value is inherited. This name is reserved + by virtue of its definition in the XML specification.

+ +
+
+

Notes

+

+ Attempting to install the relevant ISO 2- and 3-letter + codes as the enumerated possible values is probably never + going to be a realistic possibility. +

+

+ See BCP 47 at + http://www.rfc-editor.org/rfc/bcp/bcp47.txt + and the IANA language subtag registry at + + http://www.iana.org/assignments/language-subtag-registry + for further information. +

+

+ The union allows for the 'un-declaration' of xml:lang with + the empty string. +

+
+
+
+ + + + + + + + + +
+ + + + +
+ +

space (as an attribute name)

+

+ denotes an attribute whose + value is a keyword indicating what whitespace processing + discipline is intended for the content of the element; its + value is inherited. This name is reserved by virtue of its + definition in the XML specification.

+ +
+
+
+ + + + + + +
+ + + +
+ +

base (as an attribute name)

+

+ denotes an attribute whose value + provides a URI to be used as the base for interpreting any + relative URIs in the scope of the element on which it + appears; its value is inherited. This name is reserved + by virtue of its definition in the XML Base specification.

+ +

+ See http://www.w3.org/TR/xmlbase/ + for information about this attribute. +

+
+
+
+
+ + + + +
+ +

id (as an attribute name)

+

+ denotes an attribute whose value + should be interpreted as if declared to be of type ID. + This name is reserved by virtue of its definition in the + xml:id specification.

+ +

+ See http://www.w3.org/TR/xml-id/ + for information about this attribute. +

+
+
+
+
+ + + + + + + + + + +
+ +

Father (in any context at all)

+ +
+

+ denotes Jon Bosak, the chair of + the original XML Working Group. This name is reserved by + the following decision of the W3C XML Plenary and + XML Coordination groups: +

+
+

+ In appreciation for his vision, leadership and + dedication the W3C XML Plenary on this 10th day of + February, 2000, reserves for Jon Bosak in perpetuity + the XML name "xml:Father". +

+
+
+
+
+
+ + + +
+

About this schema document

+ +
+

+ This schema defines attributes and an attribute group suitable + for use by schemas wishing to allow xml:base, + xml:lang, xml:space or + xml:id attributes on elements they define. +

+

+ To enable this, such a schema must import this schema for + the XML namespace, e.g. as follows: +

+
+          <schema . . .>
+           . . .
+           <import namespace="http://www.w3.org/XML/1998/namespace"
+                      schemaLocation="http://www.w3.org/2001/xml.xsd"/>
+     
+

+ or +

+
+           <import namespace="http://www.w3.org/XML/1998/namespace"
+                      schemaLocation="http://www.w3.org/2009/01/xml.xsd"/>
+     
+

+ Subsequently, qualified reference to any of the attributes or the + group defined below will have the desired effect, e.g. +

+
+          <type . . .>
+           . . .
+           <attributeGroup ref="xml:specialAttrs"/>
+     
+

+ will define a type which will schema-validate an instance element + with any of those attributes. +

+
+
+
+
+ + + +
+

Versioning policy for this schema document

+
+

+ In keeping with the XML Schema WG's standard versioning + policy, this schema document will persist at + + http://www.w3.org/2009/01/xml.xsd. +

+

+ At the date of issue it can also be found at + + http://www.w3.org/2001/xml.xsd. +

+

+ The schema document at that URI may however change in the future, + in order to remain compatible with the latest version of XML + Schema itself, or with the XML namespace itself. In other words, + if the XML Schema or XML namespaces change, the version of this + document at + http://www.w3.org/2001/xml.xsd + + will change accordingly; the version at + + http://www.w3.org/2009/01/xml.xsd + + will not change. +

+

+ Previous dated (and unchanging) versions of this schema + document are at: +

+ +
+
+
+
+ +
+ diff --git a/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/xmldsig-core-schema.xsd b/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/xmldsig-core-schema.xsd new file mode 100644 index 0000000000000..8422fdfaaf9d2 --- /dev/null +++ b/x-pack/plugin/security/src/main/resources/org/elasticsearch/xpack/security/authc/saml/xmldsig-core-schema.xsd @@ -0,0 +1,308 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java new file mode 100644 index 0000000000000..31a1b1eaabfb0 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/AbstractPrivilegeTestCase.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.apache.http.HttpEntity; +import org.apache.http.StatusLine; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.SecuritySingleNodeTestCase; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +/** + * a helper class that contains a couple of HTTP helper methods + */ +public abstract class AbstractPrivilegeTestCase extends SecuritySingleNodeTestCase { + + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(new SecureString("passwd".toCharArray()))); + + protected void assertAccessIsAllowed(String user, String method, String uri, String body, + Map params) throws IOException { + Response response = getRestClient().performRequest(method, uri, params, entityOrNull(body), + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(user, new SecureString("passwd".toCharArray())))); + StatusLine statusLine = response.getStatusLine(); + String message = String.format(Locale.ROOT, "%s %s: Expected no error got %s %s with body %s", method, uri, + statusLine.getStatusCode(), statusLine.getReasonPhrase(), EntityUtils.toString(response.getEntity())); + assertThat(message, statusLine.getStatusCode(), is(not(greaterThanOrEqualTo(400)))); + } + + protected void assertAccessIsAllowed(String user, String method, String uri, String body) throws IOException { + assertAccessIsAllowed(user, method, uri, body, new HashMap<>()); + } + + protected void assertAccessIsAllowed(String user, String method, String uri) throws IOException { + assertAccessIsAllowed(user, method, uri, null, new HashMap<>()); + } + + protected void assertAccessIsDenied(String user, String method, String uri, String body) throws IOException { + assertAccessIsDenied(user, method, uri, body, new HashMap<>()); + } + + protected void assertAccessIsDenied(String user, String method, String uri) throws IOException { + assertAccessIsDenied(user, method, uri, null, new HashMap<>()); + } + + protected void assertAccessIsDenied(String user, String method, String uri, String body, + Map params) throws IOException { + ResponseException responseException = expectThrows(ResponseException.class, + () -> getRestClient().performRequest(method, uri, params, entityOrNull(body), + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(user, new SecureString("passwd".toCharArray()))))); + StatusLine statusLine = responseException.getResponse().getStatusLine(); + String message = String.format(Locale.ROOT, "%s %s body %s: Expected 403, got %s %s with body %s", method, uri, body, + statusLine.getStatusCode(), statusLine.getReasonPhrase(), + EntityUtils.toString(responseException.getResponse().getEntity())); + assertThat(message, statusLine.getStatusCode(), is(403)); + } + + + protected void assertBodyHasAccessIsDenied(String user, String method, String uri, String body) throws IOException { + assertBodyHasAccessIsDenied(user, method, uri, body, new HashMap<>()); + } + + /** + * Like {@code assertAcessIsDenied}, but for _bulk requests since the entire + * request will not be failed, just the individual ones + */ + protected void assertBodyHasAccessIsDenied(String user, String method, String uri, String body, + Map params) throws IOException { + Response resp = getRestClient().performRequest(method, uri, params, entityOrNull(body), + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(user, new SecureString("passwd".toCharArray())))); + StatusLine statusLine = resp.getStatusLine(); + assertThat(statusLine.getStatusCode(), is(200)); + HttpEntity bodyEntity = resp.getEntity(); + String bodyStr = EntityUtils.toString(bodyEntity); + assertThat(bodyStr, containsString("unauthorized for user [" + user + "]")); + } + + private static HttpEntity entityOrNull(String body) { + HttpEntity entity = null; + if (body != null) { + entity = new StringEntity(body, ContentType.APPLICATION_JSON); + } + return entity; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java new file mode 100644 index 0000000000000..ce7e58972b8e8 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/BulkUpdateTests.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.apache.http.Header; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; + +import java.io.IOException; +import java.util.Collections; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class BulkUpdateTests extends SecurityIntegTestCase { + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .put(XPackSettings.DLS_FLS_ENABLED.getKey(), randomBoolean()) + .build(); + } + + public void testThatBulkUpdateDoesNotLoseFields() { + assertEquals(DocWriteResponse.Result.CREATED, + client().prepareIndex("index1", "type").setSource("{\"test\": \"test\"}", XContentType.JSON).setId("1").get().getResult()); + GetResponse getResponse = internalCluster().transportClient().prepareGet("index1", "type", "1").get(); + assertEquals("test", getResponse.getSource().get("test")); + + if (randomBoolean()) { + flushAndRefresh(); + } + + // update with a new field + assertEquals(DocWriteResponse.Result.UPDATED, internalCluster().transportClient().prepareUpdate("index1", "type", "1") + .setDoc("{\"not test\": \"not test\"}", XContentType.JSON).get().getResult()); + getResponse = internalCluster().transportClient().prepareGet("index1", "type", "1").get(); + assertEquals("test", getResponse.getSource().get("test")); + assertEquals("not test", getResponse.getSource().get("not test")); + + // this part is important. Without this, the document may be read from the translog which would bypass the bug where + // FLS kicks in because the request can't be found and only returns meta fields + flushAndRefresh(); + + // do it in a bulk + BulkResponse response = internalCluster().transportClient().prepareBulk().add(client().prepareUpdate("index1", "type", "1") + .setDoc("{\"bulk updated\": \"bulk updated\"}", XContentType.JSON)).get(); + assertEquals(DocWriteResponse.Result.UPDATED, response.getItems()[0].getResponse().getResult()); + getResponse = internalCluster().transportClient().prepareGet("index1", "type", "1").get(); + assertEquals("test", getResponse.getSource().get("test")); + assertEquals("not test", getResponse.getSource().get("not test")); + assertEquals("bulk updated", getResponse.getSource().get("bulk updated")); + } + + public void testThatBulkUpdateDoesNotLoseFieldsHttp() throws IOException { + final String path = "/index1/type/1"; + final Header basicAuthHeader = new BasicHeader("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, + new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); + + StringEntity body = new StringEntity("{\"test\":\"test\"}", ContentType.APPLICATION_JSON); + Response response = getRestClient().performRequest("PUT", path, Collections.emptyMap(), body, basicAuthHeader); + assertThat(response.getStatusLine().getStatusCode(), equalTo(201)); + + response = getRestClient().performRequest("GET", path, basicAuthHeader); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(EntityUtils.toString(response.getEntity()), containsString("\"test\":\"test\"")); + + if (randomBoolean()) { + flushAndRefresh(); + } + + //update with new field + body = new StringEntity("{\"doc\": {\"not test\": \"not test\"}}", ContentType.APPLICATION_JSON); + response = getRestClient().performRequest("POST", path + "/_update", Collections.emptyMap(), body, basicAuthHeader); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + response = getRestClient().performRequest("GET", path, basicAuthHeader); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + String responseBody = EntityUtils.toString(response.getEntity()); + assertThat(responseBody, containsString("\"test\":\"test\"")); + assertThat(responseBody, containsString("\"not test\":\"not test\"")); + + // this part is important. Without this, the document may be read from the translog which would bypass the bug where + // FLS kicks in because the request can't be found and only returns meta fields + flushAndRefresh(); + + body = new StringEntity("{\"update\": {\"_index\": \"index1\", \"_type\": \"type\", \"_id\": \"1\"}}\n" + + "{\"doc\": {\"bulk updated\":\"bulk updated\"}}\n", ContentType.APPLICATION_JSON); + response = getRestClient().performRequest("POST", "/_bulk", Collections.emptyMap(), body, basicAuthHeader); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + response = getRestClient().performRequest("GET", path, basicAuthHeader); + responseBody = EntityUtils.toString(response.getEntity()); + assertThat(responseBody, containsString("\"test\":\"test\"")); + assertThat(responseBody, containsString("\"not test\":\"not test\"")); + assertThat(responseBody, containsString("\"bulk updated\":\"bulk updated\"")); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java new file mode 100644 index 0000000000000..fcab6f0d73240 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRealmsCacheTests.java @@ -0,0 +1,289 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.apache.http.message.BasicHeader; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.Realms; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class ClearRealmsCacheTests extends SecurityIntegTestCase { + private static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(new SecureString("passwd".toCharArray()))); + + private static String[] usernames; + + @BeforeClass + public static void init() throws Exception { + usernames = new String[randomIntBetween(5, 10)]; + for (int i = 0; i < usernames.length; i++) { + usernames[i] = randomAlphaOfLength(6) + "_" + i; + } + } + + enum Scenario { + + EVICT_ALL() { + + @Override + public void assertEviction(User prevUser, User newUser) { + assertThat(prevUser, not(sameInstance(newUser))); + } + + @Override + public void executeRequest() throws Exception { + executeTransportRequest(new ClearRealmCacheRequest()); + } + }, + + EVICT_SOME() { + + private final String[] evicted_usernames = randomSelection(usernames); + { + Arrays.sort(evicted_usernames); + } + + @Override + public void assertEviction(User prevUser, User newUser) { + if (Arrays.stream(evicted_usernames).anyMatch(prevUser.principal()::equals)) { + assertThat(prevUser, not(sameInstance(newUser))); + } else { + assertThat(prevUser, sameInstance(newUser)); + } + } + + @Override + public void executeRequest() throws Exception { + executeTransportRequest(new ClearRealmCacheRequest().usernames(evicted_usernames)); + } + }, + + EVICT_ALL_HTTP() { + + @Override + public void assertEviction(User prevUser, User newUser) { + assertThat(prevUser, not(sameInstance(newUser))); + } + + @Override + public void executeRequest() throws Exception { + executeHttpRequest("/_xpack/security/realm/" + (randomBoolean() ? "*" : "_all") + "/_clear_cache", + Collections.emptyMap()); + } + }, + + EVICT_SOME_HTTP() { + + private final String[] evicted_usernames = randomSelection(usernames); + { + Arrays.sort(evicted_usernames); + } + + @Override + public void assertEviction(User prevUser, User newUser) { + if (Arrays.stream(evicted_usernames).anyMatch(prevUser.principal()::equals)) { + assertThat(prevUser, not(sameInstance(newUser))); + } else { + assertThat(prevUser, sameInstance(newUser)); + } + } + + @Override + public void executeRequest() throws Exception { + String path = "/_xpack/security/realm/" + (randomBoolean() ? "*" : "_all") + "/_clear_cache"; + Map params = Collections.singletonMap("usernames", String.join(",", evicted_usernames)); + executeHttpRequest(path, params); + } + }; + + public abstract void assertEviction(User prevUser, User newUser); + + public abstract void executeRequest() throws Exception; + + static void executeTransportRequest(ClearRealmCacheRequest request) throws Exception { + SecurityClient securityClient = securityClient(client()); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference error = new AtomicReference<>(); + securityClient.clearRealmCache(request, new ActionListener() { + @Override + public void onResponse(ClearRealmCacheResponse response) { + assertThat(response.getNodes().size(), equalTo(internalCluster().getNodeNames().length)); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + error.set(e); + latch.countDown(); + } + }); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("waiting for clear realms cache request too long"); + } + + if (error.get() != null) { + fail("failed to clear realm caches" + error.get().getMessage()); + } + } + + static void executeHttpRequest(String path, Map params) throws Exception { + Response response = getRestClient().performRequest("POST", path, params, + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, + new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())))); + assertNotNull(response.getEntity()); + assertTrue(EntityUtils.toString(response.getEntity()).contains("cluster_name")); + } + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + @Override + protected String configRoles() { + return SecuritySettingsSource.CONFIG_ROLE_ALLOW_ALL + "\n" + + "r1:\n" + + " cluster: all\n"; + } + + @Override + protected String configUsers() { + StringBuilder builder = new StringBuilder(SecuritySettingsSource.CONFIG_STANDARD_USER); + for (String username : usernames) { + builder.append(username).append(":").append(USERS_PASSWD_HASHED).append("\n"); + } + return builder.toString(); + } + + @Override + protected String configUsersRoles() { + return SecuritySettingsSource.CONFIG_STANDARD_USER_ROLES + + "r1:" + Strings.arrayToCommaDelimitedString(usernames); + } + + public void testEvictAll() throws Exception { + testScenario(Scenario.EVICT_ALL); + } + + public void testEvictSome() throws Exception { + testScenario(Scenario.EVICT_SOME); + } + + public void testEvictAllHttp() throws Exception { + testScenario(Scenario.EVICT_ALL_HTTP); + } + + public void testEvictSomeHttp() throws Exception { + testScenario(Scenario.EVICT_SOME_HTTP); + } + + private void testScenario(Scenario scenario) throws Exception { + Map tokens = new HashMap<>(); + for (String user : usernames) { + tokens.put(user, new UsernamePasswordToken(user, new SecureString("passwd"))); + } + + List realms = new ArrayList<>(); + for (Realms nodeRealms : internalCluster().getInstances(Realms.class)) { + realms.add(nodeRealms.realm("file")); + } + + // we authenticate each user on each of the realms to make sure they're all cached + Map> users = new HashMap<>(); + for (Realm realm : realms) { + for (String username : usernames) { + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(tokens.get(username), future); + User user = future.actionGet().getUser(); + assertThat(user, notNullValue()); + Map realmToUser = users.get(username); + if (realmToUser == null) { + realmToUser = new HashMap<>(); + users.put(username, realmToUser); + } + realmToUser.put(realm, user); + } + } + + // all users should be cached now on all realms, lets verify + + for (String username : usernames) { + for (Realm realm : realms) { + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(tokens.get(username), future); + User user = future.actionGet().getUser(); + assertThat(user, sameInstance(users.get(username).get(realm))); + } + } + + // now, lets run the scenario + scenario.executeRequest(); + + // now, user_a should have been evicted, but user_b should still be cached + for (String username : usernames) { + for (Realm realm : realms) { + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(tokens.get(username), future); + User user = future.actionGet().getUser(); + assertThat(user, notNullValue()); + scenario.assertEviction(users.get(username).get(realm), user); + } + } + } + + // selects a random sub-set of the give values + private static String[] randomSelection(String[] values) { + List list = new ArrayList<>(); + while (list.isEmpty()) { + double base = randomDouble(); + for (String value : values) { + if (randomDouble() < base) { + list.add(value); + } + } + } + return list.toArray(new String[list.size()]); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java new file mode 100644 index 0000000000000..eadbe3738b63f --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.NativeRealmIntegTestCase; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; +import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; +import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + + +/** + * Test for the clear roles API + */ +public class ClearRolesCacheTests extends NativeRealmIntegTestCase { + + private static String[] roles; + + @BeforeClass + public static void init() throws Exception { + roles = new String[randomIntBetween(5, 10)]; + for (int i = 0; i < roles.length; i++) { + roles[i] = randomAlphaOfLength(6) + "_" + i; + } + } + + @Before + public void setupForTests() { + SecurityClient c = securityClient(); + // create roles + for (String role : roles) { + c.preparePutRole(role) + .cluster("none") + .addIndices(new String[] { "*" }, new String[] { "ALL" }, null, null, null) + .get(); + logger.debug("--> created role [{}]", role); + } + + ensureGreen(SecurityLifecycleService.SECURITY_INDEX_NAME); + + // warm up the caches on every node + for (NativeRolesStore rolesStore : internalCluster().getInstances(NativeRolesStore.class)) { + PlainActionFuture> future = new PlainActionFuture<>(); + rolesStore.getRoleDescriptors(roles, future); + assertThat(future.actionGet(), notNullValue()); + } + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + public void testModifyingViaApiClearsCache() throws Exception { + Client client = internalCluster().transportClient(); + SecurityClient securityClient = securityClient(client); + + int modifiedRolesCount = randomIntBetween(1, roles.length); + List toModify = randomSubsetOf(modifiedRolesCount, roles); + logger.debug("--> modifying roles {} to have run_as", toModify); + for (String role : toModify) { + PutRoleResponse response = securityClient.preparePutRole(role) + .cluster("none") + .addIndices(new String[] { "*" }, new String[] { "ALL" }, null, null, null) + .runAs(role) + .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) + .get(); + assertThat(response.isCreated(), is(false)); + logger.debug("--> updated role [{}] with run_as", role); + } + + assertRolesAreCorrect(securityClient, toModify); + } + + public void testDeletingViaApiClearsCache() throws Exception { + final int rolesToDelete = randomIntBetween(1, roles.length - 1); + List toDelete = randomSubsetOf(rolesToDelete, roles); + for (String role : toDelete) { + DeleteRoleResponse response = securityClient().prepareDeleteRole(role).get(); + assertTrue(response.found()); + } + + GetRolesResponse roleResponse = securityClient().prepareGetRoles().names(roles).get(); + assertTrue(roleResponse.hasRoles()); + assertThat(roleResponse.roles().length, is(roles.length - rolesToDelete)); + } + + private void assertRolesAreCorrect(SecurityClient securityClient, List toModify) { + for (String role : roles) { + logger.debug("--> getting role [{}]", role); + GetRolesResponse roleResponse = securityClient.prepareGetRoles().names(role).get(); + assertThat(roleResponse.hasRoles(), is(true)); + final String[] runAs = roleResponse.roles()[0].getRunAs(); + if (toModify.contains(role)) { + assertThat("role [" + role + "] should be modified and have run as", runAs == null || runAs.length == 0, is(false)); + assertThat(Arrays.asList(runAs).contains(role), is(true)); + } else { + assertThat("role [" + role + "] should be cached and not have run as set but does!", runAs == null || runAs.length == 0, + is(true)); + } + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java new file mode 100644 index 0000000000000..19d61ed77c5f4 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.nio.file.Path; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.is; + +public class ClusterPrivilegeTests extends AbstractPrivilegeTestCase { + + private static final String ROLES = + "role_a:\n" + + " cluster: [ all ]\n" + + "\n" + + "role_b:\n" + + " cluster: [ monitor ]\n" + + "\n" + + "role_c:\n" + + " indices:\n" + + " - names: 'someindex'\n" + + " privileges: [ all ]\n"; + + private static final String USERS = + "user_a:" + USERS_PASSWD_HASHED + "\n" + + "user_b:" + USERS_PASSWD_HASHED + "\n" + + "user_c:" + USERS_PASSWD_HASHED + "\n"; + + private static final String USERS_ROLES = + "role_a:user_a\n" + + "role_b:user_b\n" + + "role_c:user_c\n"; + + private static Path repositoryLocation; + + @BeforeClass + public static void setupRepositoryPath() { + repositoryLocation = createTempDir(); + } + + @AfterClass + public static void cleanupRepositoryPath() { + repositoryLocation = null; + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put(super.nodeSettings()) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .put("path.repo", repositoryLocation) + .build(); + } + + @Override + protected String configRoles() { + return super.configRoles() + "\n" + ROLES; + } + + @Override + protected String configUsers() { + return super.configUsers() + USERS; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + USERS_ROLES; + } + + public void testThatClusterPrivilegesWorkAsExpectedViaHttp() throws Exception { + // user_a can do all the things + assertAccessIsAllowed("user_a", "GET", "/_cluster/state"); + assertAccessIsAllowed("user_a", "GET", "/_cluster/health"); + assertAccessIsAllowed("user_a", "GET", "/_cluster/settings"); + assertAccessIsAllowed("user_a", "GET", "/_cluster/stats"); + assertAccessIsAllowed("user_a", "GET", "/_cluster/pending_tasks"); + assertAccessIsAllowed("user_a", "GET", "/_nodes/stats"); + assertAccessIsAllowed("user_a", "GET", "/_nodes/hot_threads"); + assertAccessIsAllowed("user_a", "GET", "/_nodes/infos"); + assertAccessIsAllowed("user_a", "POST", "/_cluster/reroute"); + assertAccessIsAllowed("user_a", "PUT", "/_cluster/settings", "{ \"transient\" : { \"search.default_search_timeout\": \"1m\" } }"); + assertAccessIsAllowed("user_a", "PUT", "/_cluster/settings", "{ \"transient\" : { \"search.default_search_timeout\": null } }"); + + // user_b can do monitoring + assertAccessIsAllowed("user_b", "GET", "/_cluster/state"); + assertAccessIsAllowed("user_b", "GET", "/_cluster/health"); + assertAccessIsAllowed("user_b", "GET", "/_cluster/settings"); + assertAccessIsAllowed("user_b", "GET", "/_cluster/stats"); + assertAccessIsAllowed("user_b", "GET", "/_cluster/pending_tasks"); + assertAccessIsAllowed("user_b", "GET", "/_nodes/stats"); + assertAccessIsAllowed("user_b", "GET", "/_nodes/hot_threads"); + assertAccessIsAllowed("user_b", "GET", "/_nodes/infos"); + // but no admin stuff + assertAccessIsDenied("user_b", "POST", "/_cluster/reroute"); + assertAccessIsDenied("user_b", "PUT", "/_cluster/settings", "{ \"transient\" : { \"search.default_search_timeout\": \"1m\" } }"); + + // sorry user_c, you are not allowed anything + assertAccessIsDenied("user_c", "GET", "/_cluster/state"); + assertAccessIsDenied("user_c", "GET", "/_cluster/health"); + assertAccessIsDenied("user_c", "GET", "/_cluster/settings"); + assertAccessIsDenied("user_c", "GET", "/_cluster/stats"); + assertAccessIsDenied("user_c", "GET", "/_cluster/pending_tasks"); + assertAccessIsDenied("user_c", "GET", "/_nodes/stats"); + assertAccessIsDenied("user_c", "GET", "/_nodes/hot_threads"); + assertAccessIsDenied("user_c", "GET", "/_nodes/infos"); + assertAccessIsDenied("user_c", "POST", "/_cluster/reroute"); + assertAccessIsDenied("user_c", "PUT", "/_cluster/settings", "{ \"transient\" : { \"search.default_search_timeout\": \"1m\" } }"); + } + + public void testThatSnapshotAndRestore() throws Exception { + String repoJson = Strings.toString(jsonBuilder().startObject().field("type", "fs").startObject("settings").field("location", + repositoryLocation.toString()).endObject().endObject()); + assertAccessIsDenied("user_b", "PUT", "/_snapshot/my-repo", repoJson); + assertAccessIsDenied("user_c", "PUT", "/_snapshot/my-repo", repoJson); + assertAccessIsAllowed("user_a", "PUT", "/_snapshot/my-repo", repoJson); + + Map params = singletonMap("refresh", "true"); + assertAccessIsDenied("user_a", "PUT", "/someindex/bar/1", "{ \"name\" : \"elasticsearch\" }", params); + assertAccessIsDenied("user_b", "PUT", "/someindex/bar/1", "{ \"name\" : \"elasticsearch\" }", params); + assertAccessIsAllowed("user_c", "PUT", "/someindex/bar/1", "{ \"name\" : \"elasticsearch\" }", params); + + assertAccessIsDenied("user_b", "PUT", "/_snapshot/my-repo/my-snapshot", "{ \"indices\": \"someindex\" }"); + assertAccessIsDenied("user_c", "PUT", "/_snapshot/my-repo/my-snapshot", "{ \"indices\": \"someindex\" }"); + assertAccessIsAllowed("user_a", "PUT", "/_snapshot/my-repo/my-snapshot", "{ \"indices\": \"someindex\" }"); + + assertAccessIsDenied("user_b", "GET", "/_snapshot/my-repo/my-snapshot/_status"); + assertAccessIsDenied("user_c", "GET", "/_snapshot/my-repo/my-snapshot/_status"); + assertAccessIsAllowed("user_a", "GET", "/_snapshot/my-repo/my-snapshot/_status"); + + // This snapshot needs to be finished in order to be restored + waitForSnapshotToFinish("my-repo", "my-snapshot"); + + assertAccessIsDenied("user_a", "DELETE", "/someindex"); + assertAccessIsDenied("user_b", "DELETE", "/someindex"); + assertAccessIsAllowed("user_c", "DELETE", "/someindex"); + + params = singletonMap("wait_for_completion", "true"); + assertAccessIsDenied("user_b", "POST", "/_snapshot/my-repo/my-snapshot/_restore", null, params); + assertAccessIsDenied("user_c", "POST", "/_snapshot/my-repo/my-snapshot/_restore", null, params); + assertAccessIsAllowed("user_a", "POST", "/_snapshot/my-repo/my-snapshot/_restore", null, params); + + assertAccessIsDenied("user_a", "GET", "/someindex/bar/1"); + assertAccessIsDenied("user_b", "GET", "/someindex/bar/1"); + assertAccessIsAllowed("user_c", "GET", "/someindex/bar/1"); + + assertAccessIsDenied("user_b", "DELETE", "/_snapshot/my-repo/my-snapshot"); + assertAccessIsDenied("user_c", "DELETE", "/_snapshot/my-repo/my-snapshot"); + assertAccessIsAllowed("user_a", "DELETE", "/_snapshot/my-repo/my-snapshot"); + + assertAccessIsDenied("user_b", "DELETE", "/_snapshot/my-repo"); + assertAccessIsDenied("user_c", "DELETE", "/_snapshot/my-repo"); + assertAccessIsAllowed("user_a", "DELETE", "/_snapshot/my-repo"); + } + + private void waitForSnapshotToFinish(String repo, String snapshot) throws Exception { + assertBusy(() -> { + SnapshotsStatusResponse response = client().admin().cluster().prepareSnapshotStatus(repo).setSnapshots(snapshot).get(); + assertThat(response.getSnapshots().get(0).getState(), is(SnapshotsInProgress.State.SUCCESS)); + }); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java new file mode 100644 index 0000000000000..016ccab87eb15 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DateMathExpressionIntegTests.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; + +import java.util.Collections; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class DateMathExpressionIntegTests extends SecurityIntegTestCase { + + protected static final SecureString USERS_PASSWD = new SecureString("change_me".toCharArray()); + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(USERS_PASSWD)); + + @Override + protected String configUsers() { + return super.configUsers() + + "user1:" + USERS_PASSWD_HASHED + "\n"; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + + "role1:user1\n"; + } + + @Override + protected String configRoles() { + return super.configRoles() + + "\nrole1:\n" + + " cluster: [ none ]\n" + + " indices:\n" + + " - names: 'datemath-*'\n" + + " privileges: [ ALL ]\n"; + } + + public void testDateMathExpressionsCanBeAuthorized() throws Exception { + final String expression = ""; + final String expectedIndexName = new IndexNameExpressionResolver(Settings.EMPTY).resolveDateMathExpression(expression); + final boolean refeshOnOperation = randomBoolean(); + Client client = client().filterWithHeader(Collections.singletonMap("Authorization", basicAuthHeaderValue("user1", USERS_PASSWD))); + + if (randomBoolean()) { + CreateIndexResponse response = client.admin().indices().prepareCreate(expression).get(); + assertThat(response.isAcknowledged(), is(true)); + } + IndexResponse response = client.prepareIndex(expression, "type").setSource("foo", "bar") + .setRefreshPolicy(refeshOnOperation ? IMMEDIATE : NONE).get(); + + assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); + assertThat(response.getIndex(), containsString(expectedIndexName)); + + if (refeshOnOperation == false) { + client.admin().indices().prepareRefresh(expression).get(); + } + SearchResponse searchResponse = client.prepareSearch(expression) + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + assertThat(searchResponse.getHits().getTotalHits(), is(1L)); + + MultiSearchResponse multiSearchResponse = client.prepareMultiSearch() + .add(client.prepareSearch(expression).setQuery(QueryBuilders.matchAllQuery()).request()) + .get(); + assertThat(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), is(1L)); + + UpdateResponse updateResponse = client.prepareUpdate(expression, "type", response.getId()) + .setDoc(Requests.INDEX_CONTENT_TYPE, "new", "field") + .setRefreshPolicy(refeshOnOperation ? IMMEDIATE : NONE) + .get(); + assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); + + if (refeshOnOperation == false) { + client.admin().indices().prepareRefresh(expression).get(); + } + GetResponse getResponse = client.prepareGet(expression, "type", response.getId()).setFetchSource(true).get(); + assertThat(getResponse.isExists(), is(true)); + assertEquals(expectedIndexName, getResponse.getIndex()); + assertThat(getResponse.getSourceAsMap().get("foo").toString(), is("bar")); + assertThat(getResponse.getSourceAsMap().get("new").toString(), is("field")); + + // multi get doesn't support expressions - this is probably a bug + MultiGetResponse multiGetResponse = client.prepareMultiGet() + .add(expression, "type", response.getId()) + .get(); + assertFalse(multiGetResponse.getResponses()[0].isFailed()); + assertTrue(multiGetResponse.getResponses()[0].getResponse().isExists()); + assertEquals(expectedIndexName, multiGetResponse.getResponses()[0].getResponse().getIndex()); + + + DeleteIndexResponse deleteIndexResponse = client.admin().indices().prepareDelete(expression).get(); + assertThat(deleteIndexResponse.isAcknowledged(), is(true)); + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java new file mode 100644 index 0000000000000..5326954635e4c --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentAndFieldLevelSecurityTests.java @@ -0,0 +1,441 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.equalTo; + +public class DocumentAndFieldLevelSecurityTests extends SecurityIntegTestCase { + + protected static final SecureString USERS_PASSWD = new SecureString("change_me".toCharArray()); + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(USERS_PASSWD)); + + @Override + protected String configUsers() { + return super.configUsers() + + "user1:" + USERS_PASSWD_HASHED + "\n" + + "user2:" + USERS_PASSWD_HASHED + "\n" + + "user3:" + USERS_PASSWD_HASHED + "\n" + + "user4:" + USERS_PASSWD_HASHED + "\n" + + "user5:" + USERS_PASSWD_HASHED + "\n"; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + + "role1:user1\n" + + "role2:user1,user4\n" + + "role3:user2,user4\n" + + "role4:user3,user4,user5\n"; + } + + @Override + protected String configRoles() { + return super.configRoles() + + "\nrole1:\n" + + " cluster: [ none ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ none ]\n" + + "role2:\n" + + " cluster:\n" + + " - all\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ ALL ]\n" + + " field_security:\n" + + " grant: [ field1 ]\n" + + " query: '{\"term\" : {\"field1\" : \"value1\"}}'\n" + + "role3:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ ALL ]\n" + + " field_security:\n" + + " grant: [ field2 ]\n" + + " query: '{\"term\" : {\"field2\" : \"value2\"}}'\n" + + "role4:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ ALL ]\n" + + " field_security:\n" + + " grant: [ field1 ]\n" + + " query: '{\"term\" : {\"field2\" : \"value2\"}}'\n"; + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(XPackSettings.DLS_FLS_ENABLED.getKey(), true) + .build(); + } + + public void testSimpleQuery() { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + + SearchResponse response = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertHitCount(response, 1); + assertSearchHits(response, "1"); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertHitCount(response, 1); + assertSearchHits(response, "2"); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .addSort("_id", SortOrder.ASC) + .get(); + assertHitCount(response, 2); + assertSearchHits(response, "1", "2"); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(1).getSourceAsMap().get("field2").toString(), equalTo("value2")); + } + + public void testDLSIsAppliedBeforeFLS() { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "2").setSource("field1", "value2", "field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + + SearchResponse response = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareSearch("test").setQuery(QueryBuilders.termQuery("field1", "value2")) + .get(); + assertHitCount(response, 1); + assertSearchHits(response, "2"); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value2")); + + response = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareSearch("test").setQuery(QueryBuilders.termQuery("field1", "value1")) + .get(); + assertHitCount(response, 0); + } + + public void testQueryCache() { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true)) + .addMapping("type1", "field1", "type=text", "field2", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + + // Both users have the same role query, but user3 has access to field2 and not field1, which should result in zero hits: + int max = scaledRandomIntBetween(4, 32); + for (int i = 0; i < max; i++) { + SearchResponse response = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value1")); + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2"), equalTo("value2")); + + // this is a bit weird the document level permission (all docs with field2:value2) don't match with the field level + // permissions (field1), + // this results in document 2 being returned but no fields are visible: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getId(), equalTo("2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(0)); + + // user4 has all roles + response = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .addSort("_id", SortOrder.ASC) + .get(); + assertHitCount(response, 2); + assertThat(response.getHits().getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value1")); + assertThat(response.getHits().getAt(1).getId(), equalTo("2")); + assertThat(response.getHits().getAt(1).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(1).getSourceAsMap().get("field2"), equalTo("value2")); + } + } + + public void testGetMappingsIsFiltered() { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + + { + GetMappingsResponse getMappingsResponse = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .admin().indices().prepareGetMappings("test").get(); + assertExpectedFields(getMappingsResponse.getMappings(), "field1"); + } + + { + GetMappingsResponse getMappingsResponse = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .admin().indices().prepareGetMappings("test").get(); + assertExpectedFields(getMappingsResponse.getMappings(), "field2"); + } + + { + GetMappingsResponse getMappingsResponse = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .admin().indices().prepareGetMappings("test").get(); + assertExpectedFields(getMappingsResponse.getMappings(), "field1"); + } + + { + GetMappingsResponse getMappingsResponse = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .admin().indices().prepareGetMappings("test").get(); + assertExpectedFields(getMappingsResponse.getMappings(), "field1", "field2"); + } + } + + public void testGetIndexMappingsIsFiltered() { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + + { + GetIndexResponse getIndexResponse = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .admin().indices().prepareGetIndex().setIndices("test").get(); + assertExpectedFields(getIndexResponse.getMappings(), "field1"); + } + { + GetIndexResponse getIndexResponse = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .admin().indices().prepareGetIndex().setIndices("test").get(); + assertExpectedFields(getIndexResponse.getMappings(), "field2"); + } + { + GetIndexResponse getIndexResponse = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .admin().indices().prepareGetIndex().setIndices("test").get(); + assertExpectedFields(getIndexResponse.getMappings(), "field1"); + } + { + GetIndexResponse getIndexResponse = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .admin().indices().prepareGetIndex().setIndices("test").get(); + assertExpectedFields(getIndexResponse.getMappings(), "field1", "field2"); + } + } + + public void testGetFieldMappingsIsFiltered() { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + + { + GetFieldMappingsResponse getFieldMappingsResponse = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .admin().indices().prepareGetFieldMappings("test").setFields("*").get(); + + Map>> mappings = + getFieldMappingsResponse.mappings(); + assertEquals(1, mappings.size()); + assertExpectedFields(mappings.get("test"), "field1"); + } + { + GetFieldMappingsResponse getFieldMappingsResponse = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .admin().indices().prepareGetFieldMappings("test").setFields("*").get(); + + Map>> mappings = + getFieldMappingsResponse.mappings(); + assertEquals(1, mappings.size()); + assertExpectedFields(mappings.get("test"), "field2"); + } + { + GetFieldMappingsResponse getFieldMappingsResponse = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .admin().indices().prepareGetFieldMappings("test").setFields("*").get(); + + Map>> mappings = + getFieldMappingsResponse.mappings(); + assertEquals(1, mappings.size()); + assertExpectedFields(mappings.get("test"), "field1"); + } + { + GetFieldMappingsResponse getFieldMappingsResponse = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .admin().indices().prepareGetFieldMappings("test").setFields("*").get(); + + Map>> mappings = + getFieldMappingsResponse.mappings(); + assertEquals(1, mappings.size()); + assertExpectedFields(mappings.get("test"), "field1", "field2"); + } + } + + public void testFieldCapabilitiesIsFiltered() { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + + { + FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest().fields("*").indices("test"); + FieldCapabilitiesResponse response = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .fieldCaps(fieldCapabilitiesRequest).actionGet(); + assertExpectedFields(response, "field1"); + } + { + FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest().fields("*").indices("test"); + FieldCapabilitiesResponse response = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .fieldCaps(fieldCapabilitiesRequest).actionGet(); + assertExpectedFields(response, "field2"); + } + { + FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest().fields("*").indices("test"); + FieldCapabilitiesResponse response = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .fieldCaps(fieldCapabilitiesRequest).actionGet(); + assertExpectedFields(response, "field1"); + } + { + FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest().fields("*").indices("test"); + FieldCapabilitiesResponse response = client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .fieldCaps(fieldCapabilitiesRequest).actionGet(); + assertExpectedFields(response, "field1", "field2"); + } + } + + @SuppressWarnings("unchecked") + private static void assertExpectedFields(ImmutableOpenMap> mappings, + String... fields) { + Map sourceAsMap = mappings.get("test").get("type1").getSourceAsMap(); + assertEquals(1, sourceAsMap.size()); + Map properties = (Map)sourceAsMap.get("properties"); + assertEquals(fields.length, properties.size()); + for (String field : fields) { + assertNotNull(properties.get(field)); + } + } + + private static void assertExpectedFields(FieldCapabilitiesResponse fieldCapabilitiesResponse, String... expectedFields) { + Map> responseMap = fieldCapabilitiesResponse.get(); + Set builtInMetaDataFields = IndicesModule.getBuiltInMetaDataFields(); + for (String field : builtInMetaDataFields) { + Map remove = responseMap.remove(field); + assertNotNull(" expected field [" + field + "] not found", remove); + } + for (String field : expectedFields) { + Map remove = responseMap.remove(field); + assertNotNull(" expected field [" + field + "] not found", remove); + } + assertEquals("Some unexpected fields were returned: " + responseMap.keySet(), 0, responseMap.size()); + } + + private static void assertExpectedFields(Map> mappings, + String... expectedFields) { + assertEquals(1, mappings.size()); + Map fields = new HashMap<>(mappings.get("type1")); + Set builtInMetaDataFields = IndicesModule.getBuiltInMetaDataFields(); + for (String field : builtInMetaDataFields) { + GetFieldMappingsResponse.FieldMappingMetaData fieldMappingMetaData = fields.remove(field); + assertNotNull(" expected field [" + field + "] not found", fieldMappingMetaData); + } + for (String field : expectedFields) { + GetFieldMappingsResponse.FieldMappingMetaData fieldMappingMetaData = fields.remove(field); + assertNotNull("expected field [" + field + "] not found", fieldMappingMetaData); + } + assertEquals("Some unexpected fields were returned: " + fields.keySet(), 0, fields.size()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityRandomTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityRandomTests.java new file mode 100644 index 0000000000000..f4d1f429d8c6b --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityRandomTests.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.test.SecurityIntegTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +public class DocumentLevelSecurityRandomTests extends SecurityIntegTestCase { + + protected static final SecureString USERS_PASSWD = new SecureString("change_me".toCharArray()); + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(new SecureString("change_me".toCharArray()))); + + // can't add a second test method, because each test run creates a new instance of this class and that will will result + // in a new random value: + private final int numberOfRoles = scaledRandomIntBetween(3, 99); + + @Override + protected String configUsers() { + StringBuilder builder = new StringBuilder(super.configUsers()); + for (int i = 1; i <= numberOfRoles; i++) { + builder.append("user").append(i).append(':').append(USERS_PASSWD_HASHED).append('\n'); + } + return builder.toString(); + } + + @Override + protected String configUsersRoles() { + StringBuilder builder = new StringBuilder(super.configUsersRoles()); + builder.append("role0:"); + for (int i = 1; i <= numberOfRoles; i++) { + builder.append("user").append(i); + if (i != numberOfRoles) { + builder.append(","); + } + } + builder.append("\n"); + for (int i = 1; i <= numberOfRoles; i++) { + builder.append("role").append(i).append(":user").append(i).append('\n'); + } + return builder.toString(); + } + + @Override + protected String configRoles() { + StringBuilder builder = new StringBuilder(super.configRoles()); + builder.append("\nrole0:\n"); + builder.append(" cluster: [ none ]\n"); + builder.append(" indices:\n"); + builder.append(" - names: '*'\n"); + builder.append(" privileges: [ none ]\n"); + for (int i = 1; i <= numberOfRoles; i++) { + builder.append("role").append(i).append(":\n"); + builder.append(" cluster: [ all ]\n"); + builder.append(" indices:\n"); + builder.append(" - names: '*'\n"); + builder.append(" privileges:\n"); + builder.append(" - all\n"); + builder.append(" query: \n"); + builder.append(" term: \n"); + builder.append(" field1: value").append(i).append('\n'); + } + return builder.toString(); + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(XPackSettings.DLS_FLS_ENABLED.getKey(), true) + .build(); + } + + public void testDuelWithAliasFilters() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text") + ); + + List requests = new ArrayList<>(numberOfRoles); + IndicesAliasesRequestBuilder builder = client().admin().indices().prepareAliases(); + for (int i = 1; i <= numberOfRoles; i++) { + String value = "value" + i; + requests.add(client().prepareIndex("test", "type1", value).setSource("field1", value)); + builder.addAlias("test", "alias" + i, QueryBuilders.termQuery("field1", value)); + } + indexRandom(true, requests); + builder.get(); + + for (int roleI = 1; roleI <= numberOfRoles; roleI++) { + SearchResponse searchResponse1 = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user" + roleI, USERS_PASSWD))) + .prepareSearch("test") + .get(); + SearchResponse searchResponse2 = client().prepareSearch("alias" + roleI).get(); + assertThat(searchResponse1.getHits().getTotalHits(), equalTo(searchResponse2.getHits().getTotalHits())); + for (int hitI = 0; hitI < searchResponse1.getHits().getHits().length; hitI++) { + assertThat(searchResponse1.getHits().getAt(hitI).getId(), equalTo(searchResponse2.getHits().getAt(hitI).getId())); + } + } + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java new file mode 100644 index 0000000000000..aa74b71963712 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java @@ -0,0 +1,1034 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.apache.lucene.search.join.ScoreMode; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.Version; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; +import org.elasticsearch.action.termvectors.TermVectorsRequest; +import org.elasticsearch.action.termvectors.TermVectorsResponse; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.client.Requests; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.FuzzyQueryBuilder; +import org.elasticsearch.index.query.InnerHitBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.indices.IndicesRequestCache; +import org.elasticsearch.join.ParentJoinPlugin; +import org.elasticsearch.join.aggregations.Children; +import org.elasticsearch.join.aggregations.JoinAggregationBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.bucket.global.Global; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.profile.ProfileShardResult; +import org.elasticsearch.search.profile.query.QueryProfileShardResult; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.search.sort.SortMode; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.completion.CompletionSuggestion; +import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestion; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.elasticsearch.search.suggest.term.TermSuggestion; +import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; +import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery; +import static org.elasticsearch.join.query.JoinQueryBuilders.hasParentQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +@LuceneTestCase.SuppressCodecs("*") // suppress test codecs otherwise test using completion suggester fails +public class DocumentLevelSecurityTests extends SecurityIntegTestCase { + + protected static final SecureString USERS_PASSWD = new SecureString("change_me".toCharArray()); + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(USERS_PASSWD)); + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateSecurity.class, CommonAnalysisPlugin.class, ParentJoinPlugin.class, InternalSettingsPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + @Override + protected String configUsers() { + return super.configUsers() + + "user1:" + USERS_PASSWD_HASHED + "\n" + + "user2:" + USERS_PASSWD_HASHED + "\n" + + "user3:" + USERS_PASSWD_HASHED + "\n" ; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + + "role1:user1,user2,user3\n" + + "role2:user1,user3\n" + + "role3:user2,user3\n"; + } + + @Override + protected String configRoles() { + return super.configRoles() + + "\nrole1:\n" + + " cluster: [ none ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ none ]\n" + + "\nrole2:\n" + + " cluster:\n" + + " - all\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges:\n" + + " - all\n" + + " query: \n" + + " term: \n" + + " field1: value1\n" + + "role3:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ ALL ]\n" + + " query: '{\"term\" : {\"field2\" : \"value2\"}}'"; // <-- query defined as json in a string + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(XPackSettings.DLS_FLS_ENABLED.getKey(), true) + .put(XPackSettings.AUDIT_ENABLED.getKey(), false) // Just to make logs less noisy + .build(); + } + + public void testSimpleQuery() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "3").setSource("field3", "value3") + .setRefreshPolicy(IMMEDIATE) + .get(); + + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(randomBoolean() ? QueryBuilders.termQuery("field1", "value1") : QueryBuilders.matchAllQuery()) + .get(); + assertHitCount(response, 1); + assertSearchHits(response, "1"); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(randomBoolean() ? QueryBuilders.termQuery("field2", "value2") : QueryBuilders.matchAllQuery()) + .get(); + assertHitCount(response, 1); + assertSearchHits(response, "2"); + + QueryBuilder combined = QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("field2", "value2")) + .should(QueryBuilders.termQuery("field1", "value1")) + .minimumShouldMatch(1); + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(randomBoolean() ? combined : QueryBuilders.matchAllQuery()) + .get(); + assertHitCount(response, 2); + assertSearchHits(response, "1", "2"); + } + + public void testGetApi() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + + client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2").get(); + client().prepareIndex("test", "type1", "3").setSource("field3", "value3").get(); + + // test documents users can see + boolean realtime = randomBoolean(); + GetResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareGet("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), equalTo("1")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareGet("test", "type1", "2") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), equalTo("2")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareGet("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), equalTo("1")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareGet("test", "type1", "2") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), equalTo("2")); + + // test documents user cannot see + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareGet("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(false)); + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareGet("test", "type1", "2") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(false)); + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareGet("test", "type1", "3") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(false)); + } + + public void testMGetApi() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + + client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2").get(); + client().prepareIndex("test", "type1", "3").setSource("field3", "value3").get(); + + boolean realtime = randomBoolean(); + MultiGetResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getId(), equalTo("1")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "2") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getId(), equalTo("2")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "1") + .add("test", "type1", "2") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getId(), equalTo("1")); + assertThat(response.getResponses()[1].isFailed(), is(false)); + assertThat(response.getResponses()[1].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[1].getResponse().getId(), equalTo("2")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(false)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "2") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(false)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "3") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(false)); + } + + public void testMSearch() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test1") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text", "id", "type=integer") + ); + assertAcked(client().admin().indices().prepareCreate("test2") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text", "id", "type=integer") + ); + + client().prepareIndex("test1", "type1", "1").setSource("field1", "value1", "id", 1).get(); + client().prepareIndex("test1", "type1", "2").setSource("field2", "value2", "id", 2).get(); + client().prepareIndex("test1", "type1", "3").setSource("field3", "value3", "id", 3).get(); + client().prepareIndex("test2", "type1", "1").setSource("field1", "value1", "id", 1).get(); + client().prepareIndex("test2", "type1", "2").setSource("field2", "value2", "id", 2).get(); + client().prepareIndex("test2", "type1", "3").setSource("field3", "value3", "id", 3).get(); + client().admin().indices().prepareRefresh("test1", "test2").get(); + + MultiSearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareMultiSearch() + .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .get(); + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(1)); + + assertFalse(response.getResponses()[1].isFailure()); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(1)); + + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareMultiSearch() + .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .get(); + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(2)); + + assertFalse(response.getResponses()[1].isFailure()); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(2)); + + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareMultiSearch() + .add(client().prepareSearch("test1").setTypes("type1").addSort(SortBuilders.fieldSort("id").sortMode(SortMode.MIN)) + .setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setTypes("type1").addSort(SortBuilders.fieldSort("id").sortMode(SortMode.MIN)) + .setQuery(QueryBuilders.matchAllQuery())) + .get(); + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits(), is(2L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(1)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(1).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(1).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(1).getSourceAsMap().get("id"), is(2)); + + assertFalse(response.getResponses()[1].isFailure()); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits(), is(2L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("id"), is(1)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(1).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(1).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(1).getSourceAsMap().get("id"), is(2)); + } + + public void testTVApi() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text,term_vector=with_positions_offsets_payloads", + "field2", "type=text,term_vector=with_positions_offsets_payloads", + "field3", "type=text,term_vector=with_positions_offsets_payloads") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "3").setSource("field3", "value3") + .setRefreshPolicy(IMMEDIATE) + .get(); + + boolean realtime = randomBoolean(); + TermVectorsResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "1") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), is("1")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "2") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), is("2")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "1") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), is("1")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "2") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getId(), is("2")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "1") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(false)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "2") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(false)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "3") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(false)); + } + + public void testMTVApi() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text,term_vector=with_positions_offsets_payloads", + "field2", "type=text,term_vector=with_positions_offsets_payloads", + "field3", "type=text,term_vector=with_positions_offsets_payloads") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "3").setSource("field3", "value3") + .setRefreshPolicy(IMMEDIATE) + .get(); + + boolean realtime = randomBoolean(); + MultiTermVectorsResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "1").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getId(), is("1")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "2").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getId(), is("2")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "1").realtime(realtime)) + .add(new TermVectorsRequest("test", "type1", "2").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(2)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getId(), is("1")); + assertThat(response.getResponses()[1].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[1].getResponse().getId(), is("2")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "1").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(false)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "2").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(false)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "3").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(false)); + } + + public void testGlobalAggregation() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text,fielddata=true", "field3", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + client().prepareIndex("test", "type1", "3").setSource("field3", "value3") + .setRefreshPolicy(IMMEDIATE) + .get(); + + SearchResponse response = client().prepareSearch("test") + .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2"))) + .get(); + assertHitCount(response, 3); + assertSearchHits(response, "1", "2", "3"); + + Global globalAgg = response.getAggregations().get("global"); + assertThat(globalAgg.getDocCount(), equalTo(3L)); + Terms termsAgg = globalAgg.getAggregations().get("field2"); + assertThat(termsAgg.getBuckets().get(0).getKeyAsString(), equalTo("value2")); + assertThat(termsAgg.getBuckets().get(0).getDocCount(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2"))) + .get(); + assertHitCount(response, 1); + assertSearchHits(response, "1"); + + globalAgg = response.getAggregations().get("global"); + assertThat(globalAgg.getDocCount(), equalTo(1L)); + termsAgg = globalAgg.getAggregations().get("field2"); + assertThat(termsAgg.getBuckets().size(), equalTo(0)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2"))) + .get(); + assertHitCount(response, 1); + assertSearchHits(response, "2"); + + globalAgg = response.getAggregations().get("global"); + assertThat(globalAgg.getDocCount(), equalTo(1L)); + termsAgg = globalAgg.getAggregations().get("field2"); + assertThat(termsAgg.getBuckets().size(), equalTo(1)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("field2").field("field2"))) + .get(); + assertHitCount(response, 2); + assertSearchHits(response, "1", "2"); + + globalAgg = response.getAggregations().get("global"); + assertThat(globalAgg.getDocCount(), equalTo(2L)); + termsAgg = globalAgg.getAggregations().get("field2"); + assertThat(termsAgg.getBuckets().size(), equalTo(1)); + } + + public void testParentChild() throws Exception { + XContentBuilder mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .endObject() + .endObject() + .startObject("field1") + .field("type", "text") + .endObject() + .startObject("field2") + .field("type", "text") + .endObject() + .startObject("field3") + .field("type", "text") + .endObject() + .endObject() + .endObject(); + assertAcked(prepareCreate("test") + .addMapping("doc", mapping)); + ensureGreen(); + + // index simple data + client().prepareIndex("test", "doc", "p1").setSource("join_field", "parent", "field1", "value1").get(); + + Map source = new HashMap<>(); + source.put("field2", "value2"); + Map joinField = new HashMap<>(); + joinField.put("name", "child"); + joinField.put("parent", "p1"); + source.put("join_field", joinField); + client().prepareIndex("test", "doc", "c1").setSource(source).setRouting("p1").get(); + client().prepareIndex("test", "doc", "c2").setSource(source).setRouting("p1").get(); + source = new HashMap<>(); + source.put("field3", "value3"); + source.put("join_field", joinField); + client().prepareIndex("test", "doc", "c3").setSource(source).setRouting("p1").get(); + refresh(); + verifyParentChild(); + } + + private void verifyParentChild() { + SearchResponse searchResponse = client().prepareSearch("test") + .setQuery(hasChildQuery("child", matchAllQuery(), ScoreMode.None)) + .get(); + assertHitCount(searchResponse, 1L); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("p1")); + + searchResponse = client().prepareSearch("test") + .setQuery(hasParentQuery("parent", matchAllQuery(), false)) + .addSort("_id", SortOrder.ASC) + .get(); + assertHitCount(searchResponse, 3L); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("c1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("c2")); + assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("c3")); + + // Both user1 and user2 can't see field1 and field2, no parent/child query should yield results: + searchResponse = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(hasChildQuery("child", matchAllQuery(), ScoreMode.None)) + .get(); + assertHitCount(searchResponse, 0L); + + searchResponse = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(hasChildQuery("child", matchAllQuery(), ScoreMode.None)) + .get(); + assertHitCount(searchResponse, 0L); + + searchResponse = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(hasParentQuery("parent", matchAllQuery(), false)) + .get(); + assertHitCount(searchResponse, 0L); + + searchResponse = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(hasParentQuery("parent", matchAllQuery(), false)) + .get(); + assertHitCount(searchResponse, 0L); + + // user 3 can see them but not c3 + searchResponse = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(hasChildQuery("child", matchAllQuery(), ScoreMode.None)) + .get(); + assertHitCount(searchResponse, 1L); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("p1")); + + searchResponse = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(hasParentQuery("parent", matchAllQuery(), false)) + .get(); + assertHitCount(searchResponse, 2L); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("c1")); + assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("c2")); + } + + public void testScroll() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)) + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + final int numVisible = scaledRandomIntBetween(2, 10); + final int numInVisible = scaledRandomIntBetween(2, 10); + int id = 1; + for (int i = 0; i < numVisible; i++) { + client().prepareIndex("test", "type1", String.valueOf(id++)).setSource("field1", "value1").get(); + } + + for (int i = 0; i < numInVisible; i++) { + client().prepareIndex("test", "type1", String.valueOf(id++)).setSource("field2", "value2").get(); + client().prepareIndex("test", "type1", String.valueOf(id++)).setSource("field3", "value3").get(); + } + refresh(); + + SearchResponse response = null; + try { + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setSize(1) + .setScroll(TimeValue.timeValueMinutes(1L)) + .setQuery(termQuery("field1", "value1")) + .get(); + do { + assertNoFailures(response); + assertThat(response.getHits().getTotalHits(), is((long) numVisible)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + + if (response.getScrollId() == null) { + break; + } + + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearchScroll(response.getScrollId()) + .setScroll(TimeValue.timeValueMinutes(1L)) + .get(); + } while (response.getHits().getHits().length > 0); + } finally { + if (response != null) { + String scrollId = response.getScrollId(); + if (scrollId != null) { + client().prepareClearScroll().addScrollId(scrollId).get(); + } + } + } + } + + public void testRequestCache() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)) + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1") + .get(); + client().prepareIndex("test", "type1", "2").setSource("field2", "value2") + .get(); + client().prepareIndex("test", "type1", "3").setSource("field3", "value3") + .get(); + refresh(); + + int max = scaledRandomIntBetween(4, 32); + for (int i = 0; i < max; i++) { + Boolean requestCache = randomFrom(true, null); + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setSize(0) + .setQuery(termQuery("field1", "value1")) + .setRequestCache(requestCache) + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setSize(0) + .setQuery(termQuery("field1", "value1")) + .setRequestCache(requestCache) + .get(); + assertNoFailures(response); + assertHitCount(response, 0); + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setSize(0) + .setQuery(termQuery("field1", "value1")) + .setRequestCache(requestCache) + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + } + } + + public void testUpdateApiIsBlocked() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type", "field1", "type=text", "field2", "type=text") + ); + client().prepareIndex("test", "type", "1").setSource("field1", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + + // With document level security enabled the update is not allowed: + try { + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareUpdate("test", "type", "1").setDoc(Requests.INDEX_CONTENT_TYPE, "field1", "value2") + .get(); + fail("failed, because update request shouldn't be allowed if document level security is enabled"); + } catch (ElasticsearchSecurityException e) { + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), equalTo("Can't execute an update request if field or document level security is enabled")); + } + assertThat(client().prepareGet("test", "type", "1").get().getSource().get("field1").toString(), equalTo("value1")); + + // With no document level security enabled the update is allowed: + client().prepareUpdate("test", "type", "1").setDoc(Requests.INDEX_CONTENT_TYPE, "field1", "value2") + .get(); + assertThat(client().prepareGet("test", "type", "1").get().getSource().get("field1").toString(), equalTo("value2")); + + // With document level security enabled the update in bulk is not allowed: + BulkResponse bulkResponse = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue + ("user1", USERS_PASSWD))) + .prepareBulk() + .add(new UpdateRequest("test", "type", "1").doc(Requests.INDEX_CONTENT_TYPE, "field1", "value3")) + .get(); + assertEquals(1, bulkResponse.getItems().length); + BulkItemResponse bulkItem = bulkResponse.getItems()[0]; + assertTrue(bulkItem.isFailed()); + assertThat(bulkItem.getFailure().getCause(), instanceOf(ElasticsearchSecurityException.class)); + ElasticsearchSecurityException securityException = (ElasticsearchSecurityException) bulkItem.getFailure().getCause(); + assertThat(securityException.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(securityException.getMessage(), + equalTo("Can't execute a bulk request with update requests embedded if field or document level security is enabled")); + + assertThat(client().prepareGet("test", "type", "1").get().getSource().get("field1").toString(), equalTo("value2")); + + client().prepareBulk() + .add(new UpdateRequest("test", "type", "1").doc(Requests.INDEX_CONTENT_TYPE, "field1", "value3")) + .get(); + assertThat(client().prepareGet("test", "type", "1").get().getSource().get("field1").toString(), equalTo("value3")); + } + + public void testNestedInnerHits() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "nested_field", "type=nested") + ); + client().prepareIndex("test", "type1", "1") + .setSource(jsonBuilder().startObject() + .field("field1", "value1") + .startArray("nested_field") + .startObject() + .field("field2", "value2") + .endObject() + .endArray() + .endObject()) + .get(); + client().prepareIndex("test", "type1", "2") + .setSource(jsonBuilder().startObject() + .field("field1", "value2") + .startArray("nested_field") + .startObject() + .field("field2", "value2") + .endObject() + .endArray() + .endObject()) + .get(); + refresh("test"); + + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(QueryBuilders.nestedQuery("nested_field", QueryBuilders.termQuery("nested_field.field2", "value2"), + ScoreMode.None).innerHit(new InnerHitBuilder())) + .get(); + assertHitCount(response, 1); + assertSearchHits(response, "1"); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(0).getId(), equalTo("1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(0).getSourceAsString(), + equalTo("{\"field2\":\"value2\"}")); + } + + public void testSuggesters() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + ) + .addMapping("type1", "field1", "type=text", "suggest_field1", "type=text", "suggest_field2", "type=completion") + ); + + client().prepareIndex("test", "type1", "1") + .setSource(jsonBuilder().startObject() + .field("field1", "value1") + .field("suggest_field1", "value") + .startObject("suggest_field2") + .field("input", "value") + .endObject() + .endObject()).get(); + // A document that is always included by role query of both roles: + client().prepareIndex("test", "type1", "2") + .setSource(jsonBuilder().startObject() + .field("field1", "value1") + .field("field2", "value2") + .endObject()).get(); + refresh("test"); + + // Term suggester: + SearchResponse response = client() + .prepareSearch("test") + .suggest(new SuggestBuilder() + .setGlobalText("valeu") + .addSuggestion("_name1", new TermSuggestionBuilder("suggest_field1")) + ).get(); + assertNoFailures(response); + + TermSuggestion termSuggestion = response.getSuggest().getSuggestion("_name1"); + assertThat(termSuggestion, notNullValue()); + assertThat(termSuggestion.getEntries().size(), equalTo(1)); + assertThat(termSuggestion.getEntries().get(0).getOptions().size(), equalTo(1)); + assertThat(termSuggestion.getEntries().get(0).getOptions().get(0).getText().string(), equalTo("value")); + + Exception e = expectThrows(ElasticsearchSecurityException.class, () -> client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .suggest(new SuggestBuilder() + .setGlobalText("valeu") + .addSuggestion("_name1", new TermSuggestionBuilder("suggest_field1")) + ).get()); + assertThat(e.getMessage(), equalTo("Suggest isn't supported if document level security is enabled")); + + // Phrase suggester: + response = client() + .prepareSearch("test") + .suggest(new SuggestBuilder() + .setGlobalText("valeu") + .addSuggestion("_name1", new PhraseSuggestionBuilder("suggest_field1")) + ).get(); + assertNoFailures(response); + + PhraseSuggestion phraseSuggestion = response.getSuggest().getSuggestion("_name1"); + assertThat(phraseSuggestion, notNullValue()); + assertThat(phraseSuggestion.getEntries().size(), equalTo(1)); + assertThat(phraseSuggestion.getEntries().get(0).getOptions().size(), equalTo(1)); + assertThat(phraseSuggestion.getEntries().get(0).getOptions().get(0).getText().string(), equalTo("value")); + + e = expectThrows(ElasticsearchSecurityException.class, () -> client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .suggest(new SuggestBuilder() + .setGlobalText("valeu") + .addSuggestion("_name1", new PhraseSuggestionBuilder("suggest_field1")) + ).get()); + assertThat(e.getMessage(), equalTo("Suggest isn't supported if document level security is enabled")); + + // Completion suggester: + response = client() + .prepareSearch("test") + .suggest(new SuggestBuilder() + .setGlobalText("valu") + .addSuggestion("_name1", new CompletionSuggestionBuilder("suggest_field2")) + ).get(); + assertNoFailures(response); + + CompletionSuggestion completionSuggestion = response.getSuggest().getSuggestion("_name1"); + assertThat(completionSuggestion, notNullValue()); + assertThat(completionSuggestion.getEntries().size(), equalTo(1)); + assertThat(completionSuggestion.getEntries().get(0).getOptions().size(), equalTo(1)); + assertThat(completionSuggestion.getEntries().get(0).getOptions().get(0).getText().string(), equalTo("value")); + + e = expectThrows(ElasticsearchSecurityException.class, () -> client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .suggest(new SuggestBuilder() + .setGlobalText("valeu") + .addSuggestion("_name1", new CompletionSuggestionBuilder("suggest_field2")) + ).get()); + assertThat(e.getMessage(), equalTo("Suggest isn't supported if document level security is enabled")); + } + + public void testProfile() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + ) + .addMapping("type1", "field1", "type=text", "other_field", "type=text") + ); + + client().prepareIndex("test", "type1", "1") + .setSource(jsonBuilder().startObject() + .field("field1", "value1") + .field("other_field", "value") + .endObject()).get(); + // A document that is always included by role query of both roles: + client().prepareIndex("test", "type1", "2") + .setSource(jsonBuilder().startObject() + .field("field1", "value1") + .field("field2", "value2") + .endObject()).get(); + refresh("test"); + + SearchResponse response = client() + .prepareSearch("test") + .setProfile(true) + .setQuery(new FuzzyQueryBuilder("other_field", "valeu")) + .get(); + assertNoFailures(response); + + assertThat(response.getProfileResults().size(), equalTo(1)); + ProfileShardResult shardResult = response.getProfileResults().get(response.getProfileResults().keySet().toArray()[0]); + assertThat(shardResult.getQueryProfileResults().size(), equalTo(1)); + QueryProfileShardResult queryProfileShardResult = shardResult.getQueryProfileResults().get(0); + assertThat(queryProfileShardResult.getQueryResults().size(), equalTo(1)); + logger.info("queryProfileShardResult=" + Strings.toString(queryProfileShardResult)); +// ProfileResult profileResult = queryProfileShardResult.getQueryResults().get(0); +// assertThat(profileResult.getLuceneDescription(), equalTo("(other_field:value)^0.8")); + + Exception e = expectThrows(ElasticsearchSecurityException.class, () -> client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setProfile(true) + .setQuery(new FuzzyQueryBuilder("other_field", "valeu")) + .get()); + assertThat(e.getMessage(), equalTo("A search request cannot be profiled if document level security is enabled")); + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityRandomTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityRandomTests.java new file mode 100644 index 0000000000000..995b91d3628db --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityRandomTests.java @@ -0,0 +1,246 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.test.SecurityIntegTestCase; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; + +public class FieldLevelSecurityRandomTests extends SecurityIntegTestCase { + + protected static final SecureString USERS_PASSWD = new SecureString("change_me".toCharArray()); + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(new SecureString("change_me".toCharArray()))); + + private static Set allowedFields; + private static Set disAllowedFields; + + @Override + protected String configUsers() { + return super.configUsers() + + "user1:" + USERS_PASSWD_HASHED + "\n" + + "user2:" + USERS_PASSWD_HASHED + "\n" + + "user3:" + USERS_PASSWD_HASHED + "\n" + + "user4:" + USERS_PASSWD_HASHED + "\n" ; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + + "role1:user1,user2,user3,user4\n" + + "role2:user1\n" + + "role3:user2\n" + + "role4:user3\n" + + "role5:user4\n"; + } + + @Override + protected String configRoles() { + if (allowedFields == null) { + allowedFields = new HashSet<>(); + disAllowedFields = new HashSet<>(); + int numFields = scaledRandomIntBetween(5, 50); + for (int i = 0; i < numFields; i++) { + String field = "field" + i; + if (i % 2 == 0) { + allowedFields.add(field); + } else { + disAllowedFields.add(field); + } + } + } + + StringBuilder roleFields = new StringBuilder(); + for (String field : allowedFields) { + roleFields.append(" - ").append(field).append('\n'); + } + + return super.configRoles() + + "\nrole1:\n" + + " cluster: [ none ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ none ]\n" + + "\nrole2:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ ALL ]\n" + + " field_security:\n" + + " grant:\n" + roleFields.toString() + + "role3:\n" + + " cluster:\n" + + " - all\n" + + " indices:\n" + + " - names: test\n" + + " privileges:\n" + + " - all\n" + + " field_security:\n" + + " grant: [ field1 ]\n" + + "role4:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: test\n" + + " privileges: [ ALL ]\n" + + " field_security:\n" + + " grant: [ field2 ]\n" + + "role5:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: test\n" + + " privileges: [ ALL ]\n" + + " field_security:\n" + + " grant: [ field3 ]\n"; + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(XPackSettings.DLS_FLS_ENABLED.getKey(), true) + .build(); + } + + public void testRandom() throws Exception { + int j = 0; + Map doc = new HashMap<>(); + String[] fieldMappers = new String[(allowedFields.size() + disAllowedFields.size()) * 2]; + for (String field : allowedFields) { + fieldMappers[j++] = field; + fieldMappers[j++] = "type=text"; + doc.put(field, "value"); + } + for (String field : disAllowedFields) { + fieldMappers[j++] = field; + fieldMappers[j++] = "type=text"; + doc.put(field, "value"); + } + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", (Object[])fieldMappers) + ); + client().prepareIndex("test", "type1", "1").setSource(doc).setRefreshPolicy(IMMEDIATE).get(); + + for (String allowedField : allowedFields) { + logger.info("Checking allowed field [{}]", allowedField); + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery(allowedField, "value")) + .get(); + assertHitCount(response, 1); + } + for (String disallowedField : disAllowedFields) { + logger.info("Checking disallowed field [{}]", disallowedField); + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery(disallowedField, "value")) + .get(); + assertHitCount(response, 0); + } + } + + public void testDuel() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + + int numDocs = scaledRandomIntBetween(32, 128); + List requests = new ArrayList<>(numDocs); + for (int i = 1; i <= numDocs; i++) { + String field = randomFrom("field1", "field2", "field3"); + String value = "value"; + requests.add(client().prepareIndex("test", "type1", value).setSource(field, value)); + } + indexRandom(true, requests); + + SearchResponse actual = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addSort("_id", SortOrder.ASC) + .setQuery(QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("field1", "value")) + .should(QueryBuilders.termQuery("field2", "value")) + .should(QueryBuilders.termQuery("field3", "value")) + ) + .get(); + SearchResponse expected = client().prepareSearch("test") + .addSort("_id", SortOrder.ASC) + .setQuery(QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("field1", "value")) + ) + .get(); + assertThat(actual.getHits().getTotalHits(), equalTo(expected.getHits().getTotalHits())); + assertThat(actual.getHits().getHits().length, equalTo(expected.getHits().getHits().length)); + for (int i = 0; i < actual.getHits().getHits().length; i++) { + assertThat(actual.getHits().getAt(i).getId(), equalTo(expected.getHits().getAt(i).getId())); + } + + actual = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .addSort("_id", SortOrder.ASC) + .setQuery(QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("field1", "value")) + .should(QueryBuilders.termQuery("field2", "value")) + .should(QueryBuilders.termQuery("field3", "value")) + ) + .get(); + expected = client().prepareSearch("test") + .addSort("_id", SortOrder.ASC) + .setQuery(QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("field2", "value")) + ) + .get(); + assertThat(actual.getHits().getTotalHits(), equalTo(expected.getHits().getTotalHits())); + assertThat(actual.getHits().getHits().length, equalTo(expected.getHits().getHits().length)); + for (int i = 0; i < actual.getHits().getHits().length; i++) { + assertThat(actual.getHits().getAt(i).getId(), equalTo(expected.getHits().getAt(i).getId())); + } + + actual = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .addSort("_id", SortOrder.ASC) + .setQuery(QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("field1", "value")) + .should(QueryBuilders.termQuery("field2", "value")) + .should(QueryBuilders.termQuery("field3", "value")) + ) + .get(); + expected = client().prepareSearch("test") + .addSort("_id", SortOrder.ASC) + .setQuery(QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery("field3", "value")) + ) + .get(); + assertThat(actual.getHits().getTotalHits(), equalTo(expected.getHits().getTotalHits())); + assertThat(actual.getHits().getHits().length, equalTo(expected.getHits().getHits().length)); + for (int i = 0; i < actual.getHits().getHits().length; i++) { + assertThat(actual.getHits().getAt(i).getId(), equalTo(expected.getHits().getAt(i).getId())); + } + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java new file mode 100644 index 0000000000000..7d31edb37493b --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -0,0 +1,1410 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.Version; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; +import org.elasticsearch.action.termvectors.TermVectorsRequest; +import org.elasticsearch.action.termvectors.TermVectorsResponse; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.client.Requests; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.indices.IndicesRequestCache; +import org.elasticsearch.join.ParentJoinPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; +import static org.elasticsearch.index.query.QueryBuilders.existsQuery; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +// The random usage of meta fields such as _timestamp add noise to the test, so disable random index templates: +@ESIntegTestCase.ClusterScope +public class FieldLevelSecurityTests extends SecurityIntegTestCase { + + protected static final SecureString USERS_PASSWD = new SecureString("change_me".toCharArray()); + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(new SecureString("change_me".toCharArray()))); + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateSecurity.class, CommonAnalysisPlugin.class, ParentJoinPlugin.class, + InternalSettingsPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + @Override + protected String configUsers() { + return super.configUsers() + + "user1:" + USERS_PASSWD_HASHED + "\n" + + "user2:" + USERS_PASSWD_HASHED + "\n" + + "user3:" + USERS_PASSWD_HASHED + "\n" + + "user4:" + USERS_PASSWD_HASHED + "\n" + + "user5:" + USERS_PASSWD_HASHED + "\n" + + "user6:" + USERS_PASSWD_HASHED + "\n" + + "user7:" + USERS_PASSWD_HASHED + "\n" + + "user8:" + USERS_PASSWD_HASHED + "\n"; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + + "role1:user1\n" + + "role2:user1,user7,user8\n" + + "role3:user2,user7,user8\n" + + "role4:user3,user7\n" + + "role5:user4,user7\n" + + "role6:user5,user7\n" + + "role7:user6"; + } + @Override + protected String configRoles() { + return super.configRoles() + + "\nrole1:\n" + + " cluster: [ none ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ none ]\n" + + "role2:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ ALL ]\n" + + " field_security:\n" + + " grant: [ field1, join_field* ]\n" + + "role3:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ ALL ]\n" + + " field_security:\n" + + " grant: [ field2, query* ]\n" + + "role4:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ ALL ]\n" + + " field_security:\n" + + " grant: [ field1, field2]\n" + + "role5:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ ALL ]\n" + + " field_security:\n" + + " grant: [ ]\n" + + "role6:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ALL]\n" + + "role7:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ ALL ]\n" + + " field_security:\n" + + " grant: [ 'field*' ]\n"; + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(XPackSettings.DLS_FLS_ENABLED.getKey(), true) + .build(); + } + + public void testQuery() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2", "field3", "value3") + .setRefreshPolicy(IMMEDIATE) + .get(); + + // user1 has access to field1, so the query should match with the document: + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field1", "value1")) + .get(); + assertHitCount(response, 1); + // user2 has no access to field1, so the query should not match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field1", "value1")) + .get(); + assertHitCount(response, 0); + // user3 has access to field1 and field2, so the query should match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field1", "value1")) + .get(); + assertHitCount(response, 1); + // user4 has access to no fields, so the query should not match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field1", "value1")) + .get(); + assertHitCount(response, 0); + // user5 has no field level security configured, so the query should match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field1", "value1")) + .get(); + assertHitCount(response, 1); + // user7 has roles with field level security configured and without field level security + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field1", "value1")) + .get(); + assertHitCount(response, 1); + // user8 has roles with field level security configured for field1 and field2 + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field1", "value1")) + .get(); + assertHitCount(response, 1); + + // user1 has no access to field1, so the query should not match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field2", "value2")) + .get(); + assertHitCount(response, 0); + // user2 has access to field1, so the query should match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field2", "value2")) + .get(); + assertHitCount(response, 1); + // user3 has access to field1 and field2, so the query should match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field2", "value2")) + .get(); + assertHitCount(response, 1); + // user4 has access to no fields, so the query should not match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field2", "value2")) + .get(); + assertHitCount(response, 0); + // user5 has no field level security configured, so the query should match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field2", "value2")) + .get(); + assertHitCount(response, 1); + // user7 has role with field level security and without field level security + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field2", "value2")) + .get(); + assertHitCount(response, 1); + // user8 has roles with field level security configured for field1 and field2 + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field2", "value2")) + .get(); + assertHitCount(response, 1); + + // user1 has access to field3, so the query should not match with the document: + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field3", "value3")) + .get(); + assertHitCount(response, 0); + // user2 has no access to field3, so the query should not match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field3", "value3")) + .get(); + assertHitCount(response, 0); + // user3 has access to field1 and field2 but not field3, so the query should not match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field3", "value3")) + .get(); + assertHitCount(response, 0); + // user4 has access to no fields, so the query should not match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field3", "value3")) + .get(); + assertHitCount(response, 0); + // user5 has no field level security configured, so the query should match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field3", "value3")) + .get(); + assertHitCount(response, 1); + // user7 has roles with field level security and without field level security + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field3", "value3")) + .get(); + assertHitCount(response, 1); + // user8 has roles with field level security configured for field1 and field2 + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field3", "value3")) + .get(); + assertHitCount(response, 0); + } + + public void testGetApi() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2", "field3", "value3") + .get(); + + boolean realtime = randomBoolean(); + // user1 is granted access to field1 only: + GetResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareGet("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getSource().size(), equalTo(1)); + assertThat(response.getSource().get("field1").toString(), equalTo("value1")); + + // user2 is granted access to field2 only: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareGet("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getSource().size(), equalTo(1)); + assertThat(response.getSource().get("field2").toString(), equalTo("value2")); + + // user3 is granted access to field1 and field2: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareGet("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getSource().size(), equalTo(2)); + assertThat(response.getSource().get("field1").toString(), equalTo("value1")); + assertThat(response.getSource().get("field2").toString(), equalTo("value2")); + + // user4 is granted access to no fields, so the get response does say the doc exist, but no fields are returned: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareGet("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getSource().size(), equalTo(0)); + + // user5 has no field level security configured, so all fields are returned: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareGet("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getSource().size(), equalTo(3)); + assertThat(response.getSource().get("field1").toString(), equalTo("value1")); + assertThat(response.getSource().get("field2").toString(), equalTo("value2")); + assertThat(response.getSource().get("field3").toString(), equalTo("value3")); + + // user6 has access to field* + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareGet("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getSource().size(), equalTo(3)); + assertThat(response.getSource().get("field1").toString(), equalTo("value1")); + assertThat(response.getSource().get("field2").toString(), equalTo("value2")); + assertThat(response.getSource().get("field3").toString(), equalTo("value3")); + + // user7 has roles with field level security and without field level security + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) + .prepareGet("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getSource().size(), equalTo(3)); + assertThat(response.getSource().get("field1").toString(), equalTo("value1")); + assertThat(response.getSource().get("field2").toString(), equalTo("value2")); + assertThat(response.getSource().get("field3").toString(), equalTo("value3")); + + // user8 has roles with field level security with access to field1 and field2 + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareGet("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getSource().size(), equalTo(2)); + assertThat(response.getSource().get("field1").toString(), equalTo("value1")); + assertThat(response.getSource().get("field2").toString(), equalTo("value2")); + } + + public void testMGetApi() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); + + boolean realtime = randomBoolean(); + // user1 is granted access to field1 only: + MultiGetResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getSource().size(), equalTo(1)); + assertThat(response.getResponses()[0].getResponse().getSource().get("field1").toString(), equalTo("value1")); + + // user2 is granted access to field2 only: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getSource().size(), equalTo(1)); + assertThat(response.getResponses()[0].getResponse().getSource().get("field2").toString(), equalTo("value2")); + + // user3 is granted access to field1 and field2: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getSource().size(), equalTo(2)); + assertThat(response.getResponses()[0].getResponse().getSource().get("field1").toString(), equalTo("value1")); + assertThat(response.getResponses()[0].getResponse().getSource().get("field2").toString(), equalTo("value2")); + + // user4 is granted access to no fields, so the get response does say the doc exist, but no fields are returned: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getSource().size(), equalTo(0)); + + // user5 has no field level security configured, so all fields are returned: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getSource().size(), equalTo(3)); + assertThat(response.getResponses()[0].getResponse().getSource().get("field1").toString(), equalTo("value1")); + assertThat(response.getResponses()[0].getResponse().getSource().get("field2").toString(), equalTo("value2")); + assertThat(response.getResponses()[0].getResponse().getSource().get("field3").toString(), equalTo("value3")); + + // user6 has access to field* + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getSource().size(), equalTo(3)); + assertThat(response.getResponses()[0].getResponse().getSource().get("field1").toString(), equalTo("value1")); + assertThat(response.getResponses()[0].getResponse().getSource().get("field2").toString(), equalTo("value2")); + assertThat(response.getResponses()[0].getResponse().getSource().get("field3").toString(), equalTo("value3")); + + // user7 has roles with field level security and without field level security + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getSource().size(), equalTo(3)); + assertThat(response.getResponses()[0].getResponse().getSource().get("field1").toString(), equalTo("value1")); + assertThat(response.getResponses()[0].getResponse().getSource().get("field2").toString(), equalTo("value2")); + assertThat(response.getResponses()[0].getResponse().getSource().get("field3").toString(), equalTo("value3")); + + // user8 has roles with field level security with access to field1 and field2 + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareMultiGet() + .add("test", "type1", "1") + .setRealtime(realtime) + .setRefresh(true) + .get(); + assertThat(response.getResponses()[0].isFailed(), is(false)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getSource().size(), equalTo(2)); + assertThat(response.getResponses()[0].getResponse().getSource().get("field1").toString(), equalTo("value1")); + assertThat(response.getResponses()[0].getResponse().getSource().get("field2").toString(), equalTo("value2")); + } + + public void testMSearchApi() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test1") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + assertAcked(client().admin().indices().prepareCreate("test2") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + + client().prepareIndex("test1", "type1", "1") + .setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); + client().prepareIndex("test2", "type1", "1") + .setSource("field1", "value1", "field2", "value2", "field3", "value3").get(); + client().admin().indices().prepareRefresh("test1", "test2").get(); + + // user1 is granted access to field1 only + MultiSearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareMultiSearch() + .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .get(); + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + + // user2 is granted access to field2 only + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareMultiSearch() + .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .get(); + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + + // user3 is granted access to field1 and field2 + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareMultiSearch() + .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .get(); + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + + // user4 is granted access to no fields, so the search response does say the doc exist, but no fields are returned + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareMultiSearch() + .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .get(); + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(0)); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(0)); + + // user5 has no field level security configured, so all fields are returned + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareMultiSearch() + .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .get(); + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + + // user6 has access to field* + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareMultiSearch() + .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .get(); + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + + // user7 has roles with field level security and without field level security + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) + .prepareMultiSearch() + .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .get(); + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + + // user8 has roles with field level security with access to field1 and field2 + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareMultiSearch() + .add(client().prepareSearch("test1").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .add(client().prepareSearch("test2").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())) + .get(); + assertFalse(response.getResponses()[0].isFailure()); + assertThat(response.getResponses()[0].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[0].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getResponses()[1].getResponse().getHits().getTotalHits(), is(1L)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().size(), is(2)); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getResponses()[1].getResponse().getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + } + + public void testScroll() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true)) + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + + final int numDocs = scaledRandomIntBetween(2, 10); + for (int i = 0; i < numDocs; i++) { + client().prepareIndex("test", "type1", String.valueOf(i)) + .setSource("field1", "value1", "field2", "value2", "field3", "value3") + .get(); + } + refresh("test"); + + SearchResponse response = null; + try { + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setScroll(TimeValue.timeValueMinutes(1L)) + .setSize(1) + .setQuery(constantScoreQuery(termQuery("field1", "value1"))) + .setFetchSource(true) + .get(); + + do { + assertThat(response.getHits().getTotalHits(), is((long) numDocs)); + assertThat(response.getHits().getHits().length, is(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + + if (response.getScrollId() == null) { + break; + } + + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearchScroll(response.getScrollId()) + .setScroll(TimeValue.timeValueMinutes(1L)) + .get(); + } while (response.getHits().getHits().length > 0); + + } finally { + if (response != null) { + String scrollId = response.getScrollId(); + if (scrollId != null) { + client().prepareClearScroll().addScrollId(scrollId).get(); + } + } + } + } + + public void testQueryCache() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true)) + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2", "field3", "value3") + .setRefreshPolicy(IMMEDIATE) + .get(); + + int max = scaledRandomIntBetween(4, 32); + for (int i = 0; i < max; i++) { + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(constantScoreQuery(termQuery("field1", "value1"))) + .get(); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(constantScoreQuery(termQuery("field1", "value1"))) + .get(); + assertHitCount(response, 0); + String multipleFieldsUser = randomFrom("user5", "user6", "user7"); + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(multipleFieldsUser, USERS_PASSWD))) + .prepareSearch("test") + .setQuery(constantScoreQuery(termQuery("field1", "value1"))) + .get(); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), is(3)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2"), is("value2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field3"), is("value3")); + } + } + + public void testRequestCache() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)) + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + + int max = scaledRandomIntBetween(4, 32); + for (int i = 0; i < max; i++) { + Boolean requestCache = randomFrom(true, null); + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setSize(0) + .setQuery(termQuery("field1", "value1")) + .setRequestCache(requestCache) + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setSize(0) + .setQuery(termQuery("field1", "value1")) + .setRequestCache(requestCache) + .get(); + assertNoFailures(response); + assertHitCount(response, 0); + String multipleFieldsUser = randomFrom("user5", "user6", "user7"); + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(multipleFieldsUser, USERS_PASSWD))) + .prepareSearch("test") + .setSize(0) + .setQuery(termQuery("field1", "value1")) + .setRequestCache(requestCache) + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + } + } + + public void testFields() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text,store=true", "field2", "type=text,store=true", + "field3", "type=text,store=true") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2", "field3", "value3") + .setRefreshPolicy(IMMEDIATE) + .get(); + + // user1 is granted access to field1 only: + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3") + .get(); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + + // user2 is granted access to field2 only: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3") + .get(); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + + // user3 is granted access to field1 and field2: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3") + .get(); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + + // user4 is granted access to no fields: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3") + .get(); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(0)); + + // user5 has no field level security configured: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3") + .get(); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(3)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue(), equalTo("value3")); + + // user6 has field level security configured with access to field*: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3") + .get(); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(3)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue(), equalTo("value3")); + + // user7 has access to all fields due to a mix of roles without field level security and with: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3") + .get(); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(3)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getFields().get("field3").getValue(), equalTo("value3")); + + // user8 has field level security configured with access to field1 and field2: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareSearch("test") + .addStoredField("field1") + .addStoredField("field2") + .addStoredField("field3") + .get(); + assertThat(response.getHits().getAt(0).getFields().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getFields().get("field1").getValue(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getFields().get("field2").getValue(), equalTo("value2")); + } + + public void testSource() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2", "field3", "value3") + .setRefreshPolicy(IMMEDIATE) + .get(); + + // user1 is granted access to field1 only: + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + + // user2 is granted access to field2 only: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + + // user3 is granted access to field1 and field2: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + + // user4 is granted access to no fields: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(0)); + + // user5 has no field level security configured: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(3)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field3").toString(), equalTo("value3")); + + // user6 has field level security configured with access to field*: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(3)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field3").toString(), equalTo("value3")); + + // user7 has access to all fields + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(3)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field3").toString(), equalTo("value3")); + + // user8 has field level security configured with access to field1 and field2: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareSearch("test") + .get(); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + } + + public void testSort() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=long", "field2", "type=long") + ); + + client().prepareIndex("test", "type1", "1").setSource("field1", 1d, "field2", 2d) + .setRefreshPolicy(IMMEDIATE) + .get(); + + // user1 is granted to use field1, so it is included in the sort_values + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addSort("field1", SortOrder.ASC) + .get(); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(1L)); + + // user2 is not granted to use field1, so the default missing sort value is included + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addSort("field1", SortOrder.ASC) + .get(); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(Long.MAX_VALUE)); + + // user1 is not granted to use field2, so the default missing sort value is included + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addSort("field2", SortOrder.ASC) + .get(); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(Long.MAX_VALUE)); + + // user2 is granted to use field2, so it is included in the sort_values + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addSort("field2", SortOrder.ASC) + .get(); + assertThat(response.getHits().getAt(0).getSortValues()[0], equalTo(2L)); + } + + public void testAggs() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text,fielddata=true", "field2", "type=text,fielddata=true") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + + // user1 is authorized to use field1, so buckets are include for a term agg on field1 + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.terms("_name").field("field1")) + .get(); + assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value1").getDocCount(), equalTo(1L)); + + // user2 is not authorized to use field1, so no buckets are include for a term agg on field1 + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.terms("_name").field("field1")) + .get(); + assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value1"), nullValue()); + + // user1 is not authorized to use field2, so no buckets are include for a term agg on field2 + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.terms("_name").field("field2")) + .get(); + assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value2"), nullValue()); + + // user2 is authorized to use field2, so buckets are include for a term agg on field2 + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .addAggregation(AggregationBuilders.terms("_name").field("field2")) + .get(); + assertThat(((Terms) response.getAggregations().get("_name")).getBucketByKey("value2").getDocCount(), equalTo(1L)); + } + + public void testTVApi() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text,term_vector=with_positions_offsets_payloads", + "field2", "type=text,term_vector=with_positions_offsets_payloads", + "field3", "type=text,term_vector=with_positions_offsets_payloads") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2", "field3", "value3") + .setRefreshPolicy(IMMEDIATE) + .get(); + + boolean realtime = randomBoolean(); + TermVectorsResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "1") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getFields().size(), equalTo(1)); + assertThat(response.getFields().terms("field1").size(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "1") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getFields().size(), equalTo(1)); + assertThat(response.getFields().terms("field2").size(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "1") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getFields().size(), equalTo(2)); + assertThat(response.getFields().terms("field1").size(), equalTo(1L)); + assertThat(response.getFields().terms("field2").size(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "1") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getFields().size(), equalTo(0)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "1") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getFields().size(), equalTo(3)); + assertThat(response.getFields().terms("field1").size(), equalTo(1L)); + assertThat(response.getFields().terms("field2").size(), equalTo(1L)); + assertThat(response.getFields().terms("field3").size(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "1") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getFields().size(), equalTo(3)); + assertThat(response.getFields().terms("field1").size(), equalTo(1L)); + assertThat(response.getFields().terms("field2").size(), equalTo(1L)); + assertThat(response.getFields().terms("field3").size(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "1") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getFields().size(), equalTo(3)); + assertThat(response.getFields().terms("field1").size(), equalTo(1L)); + assertThat(response.getFields().terms("field2").size(), equalTo(1L)); + assertThat(response.getFields().terms("field3").size(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareTermVectors("test", "type1", "1") + .setRealtime(realtime) + .get(); + assertThat(response.isExists(), is(true)); + assertThat(response.getFields().size(), equalTo(2)); + assertThat(response.getFields().terms("field1").size(), equalTo(1L)); + assertThat(response.getFields().terms("field2").size(), equalTo(1L)); + } + + public void testMTVApi() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text,term_vector=with_positions_offsets_payloads", + "field2", "type=text,term_vector=with_positions_offsets_payloads", + "field3", "type=text,term_vector=with_positions_offsets_payloads") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2", "field3", "value3") + .setRefreshPolicy(IMMEDIATE) + .get(); + + boolean realtime = randomBoolean(); + MultiTermVectorsResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "1").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getFields().size(), equalTo(1)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field1").size(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "1").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getFields().size(), equalTo(1)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field2").size(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "1").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getFields().size(), equalTo(2)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field1").size(), equalTo(1L)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field2").size(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "1").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getFields().size(), equalTo(0)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user5", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "1").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getFields().size(), equalTo(3)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field1").size(), equalTo(1L)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field2").size(), equalTo(1L)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field3").size(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "1").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getFields().size(), equalTo(3)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field1").size(), equalTo(1L)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field2").size(), equalTo(1L)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field3").size(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user7", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "1").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getFields().size(), equalTo(3)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field1").size(), equalTo(1L)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field2").size(), equalTo(1L)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field3").size(), equalTo(1L)); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user8", USERS_PASSWD))) + .prepareMultiTermVectors() + .add(new TermVectorsRequest("test", "type1", "1").realtime(realtime)) + .get(); + assertThat(response.getResponses().length, equalTo(1)); + assertThat(response.getResponses()[0].getResponse().isExists(), is(true)); + assertThat(response.getResponses()[0].getResponse().getFields().size(), equalTo(2)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field1").size(), equalTo(1L)); + assertThat(response.getResponses()[0].getResponse().getFields().terms("field2").size(), equalTo(1L)); + } + + public void testParentChild() throws Exception { + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject() + .startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", "child") + .endObject() + .endObject() + .endObject() + .endObject(); + assertAcked(prepareCreate("test") + .addMapping("doc", mapping)); + ensureGreen(); + + // index simple data + client().prepareIndex("test", "doc", "p1").setSource("join_field", "parent").get(); + Map source = new HashMap<>(); + source.put("field1", "red"); + Map joinField = new HashMap<>(); + joinField.put("name", "child"); + joinField.put("parent", "p1"); + source.put("join_field", joinField); + client().prepareIndex("test", "doc", "c1").setSource(source).setRouting("p1").get(); + source = new HashMap<>(); + source.put("field1", "yellow"); + source.put("join_field", joinField); + client().prepareIndex("test", "doc", "c2").setSource(source).setRouting("p1").get(); + refresh(); + verifyParentChild(); + } + + private void verifyParentChild() { + SearchResponse searchResponse = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(hasChildQuery("child", termQuery("field1", "yellow"), ScoreMode.None)) + .get(); + assertHitCount(searchResponse, 1L); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("p1")); + + searchResponse = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(hasChildQuery("child", termQuery("field1", "yellow"), ScoreMode.None)) + .get(); + assertHitCount(searchResponse, 0L); + } + + public void testUpdateApiIsBlocked() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type", "field1", "type=text", "field2", "type=text") + ); + client().prepareIndex("test", "type", "1") + .setSource("field1", "value1", "field2", "value1") + .setRefreshPolicy(IMMEDIATE) + .get(); + + // With field level security enabled the update is not allowed: + try { + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareUpdate("test", "type", "1").setDoc(Requests.INDEX_CONTENT_TYPE, "field2", "value2") + .get(); + fail("failed, because update request shouldn't be allowed if field level security is enabled"); + } catch (ElasticsearchSecurityException e) { + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), equalTo("Can't execute an update request if field or document level security is enabled")); + } + assertThat(client().prepareGet("test", "type", "1").get().getSource().get("field2").toString(), equalTo("value1")); + + // With no field level security enabled the update is allowed: + client().prepareUpdate("test", "type", "1").setDoc(Requests.INDEX_CONTENT_TYPE, "field2", "value2") + .get(); + assertThat(client().prepareGet("test", "type", "1").get().getSource().get("field2").toString(), equalTo("value2")); + + // With field level security enabled the update in bulk is not allowed: + BulkResponse bulkResponse = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue + ("user1", USERS_PASSWD))) + .prepareBulk() + .add(new UpdateRequest("test", "type", "1").doc(Requests.INDEX_CONTENT_TYPE, "field2", "value3")) + .get(); + assertEquals(1, bulkResponse.getItems().length); + BulkItemResponse bulkItem = bulkResponse.getItems()[0]; + assertTrue(bulkItem.isFailed()); + assertThat(bulkItem.getFailure().getCause(), instanceOf(ElasticsearchSecurityException.class)); + ElasticsearchSecurityException securityException = (ElasticsearchSecurityException) bulkItem.getFailure().getCause(); + assertThat(securityException.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(securityException.getMessage(), + equalTo("Can't execute a bulk request with update requests embedded if field or document level security is enabled")); + + assertThat(client().prepareGet("test", "type", "1").get().getSource().get("field2").toString(), equalTo("value2")); + + client().prepareBulk() + .add(new UpdateRequest("test", "type", "1").doc(Requests.INDEX_CONTENT_TYPE, "field2", "value3")) + .get(); + assertThat(client().prepareGet("test", "type", "1").get().getSource().get("field2").toString(), equalTo("value3")); + } + + public void testQuery_withRoleWithFieldWildcards() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2") + .setRefreshPolicy(IMMEDIATE) + .get(); + + // user6 has access to all fields, so the query should match with the document: + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field1", "value1")) + .get(); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(matchQuery("field2", "value2")) + .get(); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(2)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1").toString(), equalTo("value1")); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field2").toString(), equalTo("value2")); + } + + public void testExistQuery() { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2", "field3", "value3") + .setRefreshPolicy(IMMEDIATE) + .get(); + + // user1 has access to field1, so the query should match with the document: + SearchResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(existsQuery("field1")) + .get(); + assertHitCount(response, 1); + // user1 has no access to field2, so the query should not match with the document: + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(existsQuery("field2")) + .get(); + assertHitCount(response, 0); + // user2 has no access to field1, so the query should not match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(existsQuery("field1")) + .get(); + assertHitCount(response, 0); + // user2 has access to field2, so the query should match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(existsQuery("field2")) + .get(); + assertHitCount(response, 1); + // user3 has access to field1 and field2, so the query should match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(existsQuery("field1")) + .get(); + assertHitCount(response, 1); + // user3 has access to field1 and field2, so the query should match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(existsQuery("field2")) + .get(); + assertHitCount(response, 1); + // user4 has access to no fields, so the query should not match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(existsQuery("field1")) + .get(); + assertHitCount(response, 0); + // user4 has access to no fields, so the query should not match with the document: + response = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user4", USERS_PASSWD))) + .prepareSearch("test") + .setQuery(existsQuery("field2")) + .get(); + assertHitCount(response, 0); + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java new file mode 100644 index 0000000000000..b1428040080d4 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java @@ -0,0 +1,568 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.junit.Before; + +import java.util.Collections; +import java.util.Locale; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.hamcrest.Matchers.is; + +public class IndexPrivilegeTests extends AbstractPrivilegeTestCase { + + private String jsonDoc = "{ \"name\" : \"elasticsearch\", \"body\": \"foo bar\" }"; + + private static final String ROLES = + "all_cluster_role:\n" + + " cluster: [ all ]\n" + + "all_indices_role:\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ all ]\n" + + "all_a_role:\n" + + " indices:\n" + + " - names: 'a'\n" + + " privileges: [ all ]\n" + + "read_a_role:\n" + + " indices:\n" + + " - names: 'a'\n" + + " privileges: [ read ]\n" + + "read_b_role:\n" + + " indices:\n" + + " - names: 'b'\n" + + " privileges: [ read ]\n" + + "write_a_role:\n" + + " indices:\n" + + " - names: 'a'\n" + + " privileges: [ write ]\n" + + "read_ab_role:\n" + + " indices:\n" + + " - names: [ 'a', 'b' ]\n" + + " privileges: [ read ]\n" + + "all_regex_ab_role:\n" + + " indices:\n" + + " - names: '/a|b/'\n" + + " privileges: [ all ]\n" + + "manage_starts_with_a_role:\n" + + " indices:\n" + + " - names: 'a*'\n" + + " privileges: [ manage ]\n" + + "read_write_all_role:\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ read, write ]\n" + + "create_c_role:\n" + + " indices:\n" + + " - names: 'c'\n" + + " privileges: [ create_index ]\n" + + "monitor_b_role:\n" + + " indices:\n" + + " - names: 'b'\n" + + " privileges: [ monitor ]\n" + + "read_write_a_role:\n" + + " indices:\n" + + " - names: 'a'\n" + + " privileges: [ read, write ]\n" + + "delete_b_role:\n" + + " indices:\n" + + " - names: 'b'\n" + + " privileges: [ delete ]\n" + + "index_a_role:\n" + + " indices:\n" + + " - names: 'a'\n" + + " privileges: [ index ]\n" + + "\n"; + + private static final String USERS = + "admin:" + USERS_PASSWD_HASHED + "\n" + + "u1:" + USERS_PASSWD_HASHED + "\n" + + "u2:" + USERS_PASSWD_HASHED + "\n" + + "u3:" + USERS_PASSWD_HASHED + "\n" + + "u4:" + USERS_PASSWD_HASHED + "\n" + + "u5:" + USERS_PASSWD_HASHED + "\n" + + "u6:" + USERS_PASSWD_HASHED + "\n" + + "u7:" + USERS_PASSWD_HASHED + "\n"+ + "u8:" + USERS_PASSWD_HASHED + "\n"+ + "u9:" + USERS_PASSWD_HASHED + "\n" + + "u11:" + USERS_PASSWD_HASHED + "\n" + + "u12:" + USERS_PASSWD_HASHED + "\n" + + "u13:" + USERS_PASSWD_HASHED + "\n" + + "u14:" + USERS_PASSWD_HASHED + "\n"; + + private static final String USERS_ROLES = + "all_indices_role:admin,u8\n" + + "all_cluster_role:admin\n" + + "all_a_role:u1,u2,u6\n" + + "read_a_role:u1,u5,u14\n" + + "read_b_role:u3,u5,u6,u8,u13\n" + + "write_a_role:u9\n" + + "read_ab_role:u2,u4,u9\n" + + "all_regex_ab_role:u3\n" + + "manage_starts_with_a_role:u4\n" + + "read_write_all_role:u12\n" + + "create_c_role:u11\n" + + "monitor_b_role:u14\n" + + "read_write_a_role:u12\n" + + "delete_b_role:u11\n" + + "index_a_role:u13\n"; + + @Override + protected Settings nodeSettings() { + return Settings.builder().put(super.nodeSettings()) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + @Override + protected String configRoles() { + return super.configRoles() + "\n" + ROLES; + } + + @Override + protected String configUsers() { + return super.configUsers() + USERS; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + USERS_ROLES; + } + + @Before + public void insertBaseDocumentsAsAdmin() throws Exception { + // indices: a,b,c,abc + Map params = singletonMap("refresh", "true"); + assertAccessIsAllowed("admin", "PUT", "/a/foo/1", jsonDoc, params); + assertAccessIsAllowed("admin", "PUT", "/b/foo/1", jsonDoc, params); + assertAccessIsAllowed("admin", "PUT", "/c/foo/1", jsonDoc, params); + assertAccessIsAllowed("admin", "PUT", "/abc/foo/1", jsonDoc, params); + } + + private static String randomIndex() { + return randomFrom("a", "b", "c", "abc"); + } + + public void testUserU1() throws Exception { + // u1 has all_a_role and read_a_role + assertUserIsAllowed("u1", "all", "a"); + assertUserIsDenied("u1", "all", "b"); + assertUserIsDenied("u1", "all", "c"); + assertAccessIsAllowed("u1", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u1", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertAccessIsAllowed("u1", "PUT", + "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsAllowed("u1", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testUserU2() throws Exception { + // u2 has all_all and read a/b role + assertUserIsAllowed("u2", "all", "a"); + assertUserIsAllowed("u2", "read", "b"); + assertUserIsDenied("u2", "write", "b"); + assertUserIsDenied("u2", "monitor", "b"); + assertUserIsDenied("u2", "create_index", "b"); + assertUserIsDenied("u2", "all", "c"); + assertAccessIsAllowed("u2", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u2", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertAccessIsAllowed("u2", "PUT", + "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsAllowed("u2", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testUserU3() throws Exception { + // u3 has read b role, but all access to a* and b* via regex + assertUserIsAllowed("u3", "all", "a"); + assertUserIsAllowed("u3", "all", "b"); + assertUserIsDenied("u3", "all", "c"); + assertAccessIsAllowed("u3", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u3", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertAccessIsAllowed("u3", "PUT", + "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsAllowed("u3", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testUserU4() throws Exception { + // u4 has read access to a/b and manage access to a* + assertUserIsAllowed("u4", "read", "a"); + assertUserIsAllowed("u4", "manage", "a"); + assertUserIsDenied("u4", "index", "a"); + + assertUserIsAllowed("u4", "read", "b"); + assertUserIsDenied("u4", "index", "b"); + assertUserIsDenied("u4", "manage", "b"); + + assertUserIsDenied("u4", "all", "c"); + + assertUserIsAllowed("u4", "create_index", "an_index"); + assertUserIsAllowed("u4", "manage", "an_index"); + + assertAccessIsAllowed("u4", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u4", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertAccessIsDenied("u4", "PUT", + "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsAllowed("u4", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testUserU5() throws Exception { + // u5 may read a and read b + assertUserIsAllowed("u5", "read", "a"); + assertUserIsDenied("u5", "manage", "a"); + assertUserIsDenied("u5", "write", "a"); + + assertUserIsAllowed("u5", "read", "b"); + assertUserIsDenied("u5", "manage", "b"); + assertUserIsDenied("u5", "write", "b"); + + assertAccessIsAllowed("u5", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u5", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertAccessIsDenied("u5", "PUT", + "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsAllowed("u5", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testUserU6() throws Exception { + // u6 has all access on a and read access on b + assertUserIsAllowed("u6", "all", "a"); + assertUserIsAllowed("u6", "read", "b"); + assertUserIsDenied("u6", "manage", "b"); + assertUserIsDenied("u6", "write", "b"); + assertUserIsDenied("u6", "all", "c"); + assertAccessIsAllowed("u6", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u6", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertAccessIsAllowed("u6", "PUT", + "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsAllowed("u6", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testUserU7() throws Exception { + // no access at all + assertUserIsDenied("u7", "all", "a"); + assertUserIsDenied("u7", "all", "b"); + assertUserIsDenied("u7", "all", "c"); + assertAccessIsDenied("u7", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsDenied("u7", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertAccessIsDenied("u7", "PUT", + "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsDenied("u7", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testUserU8() throws Exception { + // u8 has admin access and read access on b + assertUserIsAllowed("u8", "all", "a"); + assertUserIsAllowed("u8", "all", "b"); + assertUserIsAllowed("u8", "all", "c"); + assertAccessIsAllowed("u8", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u8", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertAccessIsAllowed("u8", "PUT", + "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsAllowed("u8", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testUserU9() throws Exception { + // u9 has write access to a and read access to a/b + assertUserIsAllowed("u9", "crud", "a"); + assertUserIsDenied("u9", "manage", "a"); + assertUserIsAllowed("u9", "read", "b"); + assertUserIsDenied("u9", "manage", "b"); + assertUserIsDenied("u9", "write", "b"); + assertUserIsDenied("u9", "all", "c"); + assertAccessIsAllowed("u9", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u9", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertAccessIsAllowed("u9", "PUT", + "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsAllowed("u9", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testUserU11() throws Exception { + // u11 has access to create c and delete b + assertUserIsDenied("u11", "all", "a"); + + assertUserIsDenied("u11", "manage", "b"); + assertUserIsDenied("u11", "index", "b"); + assertUserIsDenied("u11", "search", "b"); + assertUserIsAllowed("u11", "delete", "b"); + + assertAccessIsAllowed("admin", "DELETE", "/c"); + assertUserIsAllowed("u11", "create_index", "c"); + assertUserIsDenied("u11", "data_access", "c"); + assertUserIsDenied("u11", "monitor", "c"); + + assertAccessIsDenied("u11", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsDenied("u11", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertBodyHasAccessIsDenied("u11", "PUT", + "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsDenied("u11", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testUserU12() throws Exception { + // u12 has data_access to all indices+ crud access to a + assertUserIsDenied("u12", "manage", "a"); + assertUserIsAllowed("u12", "data_access", "a"); + assertUserIsDenied("u12", "manage", "b"); + assertUserIsAllowed("u12", "data_access", "b"); + assertUserIsDenied("u12", "manage", "c"); + assertUserIsAllowed("u12", "data_access", "c"); + assertAccessIsAllowed("u12", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u12", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertAccessIsAllowed("u12", "PUT", + "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsAllowed("u12", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testUserU13() throws Exception { + // u13 has read access on b and index access on a + assertUserIsDenied("u13", "manage", "a"); + assertUserIsAllowed("u13", "index", "a"); + assertUserIsDenied("u13", "delete", "a"); + assertUserIsDenied("u13", "read", "a"); + + assertUserIsDenied("u13", "manage", "b"); + assertUserIsDenied("u13", "write", "b"); + assertUserIsAllowed("u13", "read", "b"); + + assertUserIsDenied("u13", "all", "c"); + + assertAccessIsAllowed("u13", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u13", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertAccessIsAllowed("u13", "PUT", "/a/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertBodyHasAccessIsDenied("u13", "PUT", "/b/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsAllowed("u13", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testUserU14() throws Exception { + // u14 has access to read a and monitor b + assertUserIsDenied("u14", "manage", "a"); + assertUserIsDenied("u14", "write", "a"); + assertUserIsAllowed("u14", "read", "a"); + + // FIXME, indices:admin/get authorization missing here for _settings call + assertUserIsAllowed("u14", "monitor", "b"); + assertUserIsDenied("u14", "create_index", "b"); + assertUserIsDenied("u14", "data_access", "b"); + + assertUserIsDenied("u14", "all", "c"); + + assertAccessIsAllowed("u14", + "GET", "/" + randomIndex() + "/foo/_msearch", "{}\n{ \"query\" : { \"match_all\" : {} } }\n"); + assertAccessIsAllowed("u14", "POST", "/" + randomIndex() + "/foo/_mget", "{ \"ids\" : [ \"1\", \"2\" ] } "); + assertAccessIsDenied("u14", "PUT", + "/" + randomIndex() + "/foo/_bulk", "{ \"index\" : { \"_id\" : \"123\" } }\n{ \"foo\" : \"bar\" }\n"); + assertAccessIsAllowed("u14", + "GET", "/" + randomIndex() + "/foo/_mtermvectors", "{ \"docs\" : [ { \"_id\": \"1\" }, { \"_id\": \"2\" } ] }"); + } + + public void testThatUnknownUserIsRejectedProperly() throws Exception { + try { + getRestClient().performRequest("GET", "/", + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue("idonotexist", new SecureString("passwd".toCharArray())))); + fail("request should have failed"); + } catch(ResponseException e) { + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(401)); + } + } + + private void assertUserExecutes(String user, String action, String index, boolean userIsAllowed) throws Exception { + Map refreshParams = Collections.emptyMap();//singletonMap("refresh", "true"); + + switch (action) { + case "all" : + if (userIsAllowed) { + assertUserIsAllowed(user, "crud", index); + assertUserIsAllowed(user, "manage", index); + } else { + assertUserIsDenied(user, "crud", index); + assertUserIsDenied(user, "manage", index); + } + break; + + case "create_index" : + if (userIsAllowed) { + assertAccessIsAllowed(user, "PUT", "/" + index); + } else { + assertAccessIsDenied(user, "PUT", "/" + index); + } + break; + + case "manage" : + if (userIsAllowed) { + assertAccessIsAllowed(user, "DELETE", "/" + index); + assertUserIsAllowed(user, "create_index", index); + // wait until index ready, but as admin + assertNoTimeout(client().admin().cluster().prepareHealth(index).setWaitForGreenStatus().get()); + assertAccessIsAllowed(user, "POST", "/" + index + "/_refresh"); + assertAccessIsAllowed(user, "GET", "/" + index + "/_analyze", "{ \"text\" : \"test\" }"); + assertAccessIsAllowed(user, "POST", "/" + index + "/_flush"); + assertAccessIsAllowed(user, "POST", "/" + index + "/_forcemerge"); + assertAccessIsAllowed(user, "POST", "/" + index + "/_upgrade", null); + assertAccessIsAllowed(user, "POST", "/" + index + "/_close"); + assertAccessIsAllowed(user, "POST", "/" + index + "/_open"); + assertAccessIsAllowed(user, "POST", "/" + index + "/_cache/clear"); + // indexing a document to have the mapping available, and wait for green state to make sure index is created + assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/1", jsonDoc, refreshParams); + assertNoTimeout(client().admin().cluster().prepareHealth(index).setWaitForGreenStatus().get()); + assertAccessIsAllowed(user, "GET", "/" + index + "/_mapping/foo/field/name"); + assertAccessIsAllowed(user, "GET", "/" + index + "/_settings"); + } else { + assertAccessIsDenied(user, "DELETE", "/" + index); + assertUserIsDenied(user, "create_index", index); + assertAccessIsDenied(user, "POST", "/" + index + "/_refresh"); + assertAccessIsDenied(user, "GET", "/" + index + "/_analyze", "{ \"text\" : \"test\" }"); + assertAccessIsDenied(user, "POST", "/" + index + "/_flush"); + assertAccessIsDenied(user, "POST", "/" + index + "/_forcemerge"); + assertAccessIsDenied(user, "POST", "/" + index + "/_upgrade", null); + assertAccessIsDenied(user, "POST", "/" + index + "/_close"); + assertAccessIsDenied(user, "POST", "/" + index + "/_open"); + assertAccessIsDenied(user, "POST", "/" + index + "/_cache/clear"); + assertAccessIsDenied(user, "GET", "/" + index + "/_mapping/foo/field/name"); + assertAccessIsDenied(user, "GET", "/" + index + "/_settings"); + } + break; + + case "monitor" : + if (userIsAllowed) { + assertAccessIsAllowed(user, "GET", "/" + index + "/_stats"); + assertAccessIsAllowed(user, "GET", "/" + index + "/_segments"); + assertAccessIsAllowed(user, "GET", "/" + index + "/_recovery"); + } else { + assertAccessIsDenied(user, "GET", "/" + index + "/_stats"); + assertAccessIsDenied(user, "GET", "/" + index + "/_segments"); + assertAccessIsDenied(user, "GET", "/" + index + "/_recovery"); + } + break; + + case "data_access" : + if (userIsAllowed) { + assertUserIsAllowed(user, "crud", index); + } else { + assertUserIsDenied(user, "crud", index); + } + break; + + case "crud" : + if (userIsAllowed) { + assertUserIsAllowed(user, "read", index); + assertUserIsAllowed(user, "index", index); + } else { + assertUserIsDenied(user, "read", index); + assertUserIsDenied(user, "index", index); + } + break; + + case "read" : + if (userIsAllowed) { + // admin refresh before executing + assertAccessIsAllowed("admin", "GET", "/" + index + "/_refresh"); + assertAccessIsAllowed(user, "GET", "/" + index + "/_count"); + assertAccessIsAllowed("admin", "GET", "/" + index + "/_search"); + assertAccessIsAllowed("admin", "GET", "/" + index + "/foo/1"); + assertAccessIsAllowed(user, "GET", "/" + index + "/foo/1/_explain", "{ \"query\" : { \"match_all\" : {} } }"); + assertAccessIsAllowed(user, "GET", "/" + index + "/foo/1/_termvector"); + assertUserIsAllowed(user, "search", index); + } else { + assertAccessIsDenied(user, "GET", "/" + index + "/_count"); + assertAccessIsDenied(user, "GET", "/" + index + "/_search"); + assertAccessIsDenied(user, "GET", "/" + index + "/foo/1/_explain", "{ \"query\" : { \"match_all\" : {} } }"); + assertAccessIsDenied(user, "GET", "/" + index + "/foo/1/_termvector"); + assertUserIsDenied(user, "search", index); + } + break; + + case "search" : + if (userIsAllowed) { + assertAccessIsAllowed(user, "GET", "/" + index + "/_search"); + } else { + assertAccessIsDenied(user, "GET", "/" + index + "/_search"); + } + break; + + case "get" : + if (userIsAllowed) { + assertAccessIsAllowed(user, "GET", "/" + index + "/foo/1"); + } else { + assertAccessIsDenied(user, "GET", "/" + index + "/foo/1"); + } + break; + + case "index" : + if (userIsAllowed) { + assertAccessIsAllowed(user, "PUT", "/" + index + "/foo/321", "{ \"foo\" : \"bar\" }"); + assertAccessIsAllowed(user, "POST", "/" + index + "/foo/321/_update", "{ \"doc\" : { \"foo\" : \"baz\" } }"); + } else { + assertAccessIsDenied(user, "PUT", "/" + index + "/foo/321", "{ \"foo\" : \"bar\" }"); + assertAccessIsDenied(user, "POST", "/" + index + "/foo/321/_update", "{ \"doc\" : { \"foo\" : \"baz\" } }"); + } + break; + + case "delete" : + String jsonDoc = "{ \"name\" : \"docToDelete\"}"; + assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/docToDelete", jsonDoc, refreshParams); + assertAccessIsAllowed("admin", "PUT", "/" + index + "/foo/docToDelete2", jsonDoc, refreshParams); + if (userIsAllowed) { + assertAccessIsAllowed(user, "DELETE", "/" + index + "/foo/docToDelete"); + } else { + assertAccessIsDenied(user, "DELETE", "/" + index + "/foo/docToDelete"); + } + break; + + case "write" : + if (userIsAllowed) { + assertUserIsAllowed(user, "index", index); + assertUserIsAllowed(user, "delete", index); + } else { + assertUserIsDenied(user, "index", index); + assertUserIsDenied(user, "delete", index); + } + break; + + default: + fail(String.format(Locale.ROOT, "Unknown action %s to execute", action)); + } + + } + + private void assertUserIsAllowed(String user, String action, String index) throws Exception { + assertUserExecutes(user, action, index, true); + } + + private void assertUserIsDenied(String user, String action, String index) throws Exception { + assertUserExecutes(user, action, index, false); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java new file mode 100644 index 0000000000000..9982b42b859f1 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndicesPermissionsWithAliasesWildcardsAndRegexsTests.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.test.SecurityIntegTestCase; + +import java.util.Collections; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +public class IndicesPermissionsWithAliasesWildcardsAndRegexsTests extends SecurityIntegTestCase { + + protected static final SecureString USERS_PASSWD = new SecureString("change_me".toCharArray()); + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(new SecureString("change_me".toCharArray()))); + + @Override + protected String configUsers() { + return super.configUsers() + + "user1:" + USERS_PASSWD_HASHED + "\n"; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + + "role1:user1\n"; + } + + @Override + protected String configRoles() { + return super.configRoles() + + "\nrole1:\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: 't*'\n" + + " privileges: [ALL]\n" + + " field_security:\n" + + " grant: [ field1 ]\n" + + " - names: 'my_alias'\n" + + " privileges: [ALL]\n" + + " field_security:\n" + + " grant: [ field2 ]\n" + + " - names: '/an_.*/'\n" + + " privileges: [ALL]\n" + + " field_security:\n" + + " grant: [ field3 ]\n"; + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(XPackSettings.DLS_FLS_ENABLED.getKey(), true) + .build(); + } + + public void testResolveWildcardsRegexs() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .addMapping("type1", "field1", "type=text", "field2", "type=text") + .addAlias(new Alias("my_alias")) + .addAlias(new Alias("an_alias")) + ); + client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2", "field3", "value3") + .setRefreshPolicy(IMMEDIATE) + .get(); + + GetResponse getResponse = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareGet("test", "type1", "1") + .get(); + assertThat(getResponse.getSource().size(), equalTo(1)); + assertThat((String) getResponse.getSource().get("field1"), equalTo("value1")); + + getResponse = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareGet("my_alias", "type1", "1") + .get(); + assertThat(getResponse.getSource().size(), equalTo(1)); + assertThat((String) getResponse.getSource().get("field2"), equalTo("value2")); + + getResponse = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareGet("an_alias", "type1", "1") + .get(); + assertThat(getResponse.getSource().size(), equalTo(1)); + assertThat((String) getResponse.getSource().get("field3"), equalTo("value3")); + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java new file mode 100644 index 0000000000000..a3174a02e99dd --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java @@ -0,0 +1,205 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; + +import java.util.Locale; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class KibanaUserRoleIntegTests extends SecurityIntegTestCase { + + protected static final SecureString USERS_PASSWD = new SecureString("change_me".toCharArray()); + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(new SecureString("change_me".toCharArray()))); + + @Override + public String configRoles() { + return super.configRoles() + "\n" + + "my_kibana_user:\n" + + " indices:\n" + + " - names: 'logstash-*'\n" + + " privileges:\n" + + " - view_index_metadata\n" + + " - read\n"; + } + + @Override + public String configUsers() { + return super.configUsers() + + "kibana_user:" + USERS_PASSWD_HASHED; + } + + @Override + public String configUsersRoles() { + return super.configUsersRoles() + + "my_kibana_user:kibana_user\n" + + "kibana_user:kibana_user"; + } + + public void testFieldMappings() throws Exception { + final String index = "logstash-20-12-2015"; + final String type = "event"; + final String field = "foo"; + indexRandom(true, client().prepareIndex().setIndex(index).setType(type).setSource(field, "bar")); + + GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings().addIndices("logstash-*").setFields("*") + .includeDefaults(true).get(); + FieldMappingMetaData fieldMappingMetaData = response.fieldMappings(index, type, field); + assertThat(fieldMappingMetaData, notNullValue()); + assertThat(fieldMappingMetaData.isNull(), is(false)); + + response = client() + .filterWithHeader(singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) + .admin().indices().prepareGetFieldMappings().addIndices("logstash-*") + .setFields("*") + .includeDefaults(true).get(); + FieldMappingMetaData fieldMappingMetaData1 = response.fieldMappings(index, type, field); + assertThat(fieldMappingMetaData1, notNullValue()); + assertThat(fieldMappingMetaData1.isNull(), is(false)); + assertThat(fieldMappingMetaData1.fullName(), equalTo(fieldMappingMetaData.fullName())); + } + + public void testValidateQuery() throws Exception { + final String index = "logstash-20-12-2015"; + final String type = "event"; + final String field = "foo"; + indexRandom(true, client().prepareIndex().setIndex(index).setType(type).setSource(field, "bar")); + + ValidateQueryResponse response = client().admin().indices() + .prepareValidateQuery(index).setQuery(QueryBuilders.termQuery(field, "bar")).get(); + assertThat(response.isValid(), is(true)); + + response = client() + .filterWithHeader(singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) + .admin().indices() + .prepareValidateQuery(index) + .setQuery(QueryBuilders.termQuery(field, "bar")).get(); + assertThat(response.isValid(), is(true)); + } + + public void testSearchAndMSearch() throws Exception { + final String index = "logstash-20-12-2015"; + final String type = "event"; + final String field = "foo"; + indexRandom(true, client().prepareIndex().setIndex(index).setType(type).setSource(field, "bar")); + + SearchResponse response = client().prepareSearch(index).setQuery(QueryBuilders.matchAllQuery()).get(); + final long hits = response.getHits().getTotalHits(); + assertThat(hits, greaterThan(0L)); + response = client() + .filterWithHeader(singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) + .prepareSearch(index) + .setQuery(QueryBuilders.matchAllQuery()).get(); + assertEquals(response.getHits().getTotalHits(), hits); + + + MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() + .add(client().prepareSearch(index).setQuery(QueryBuilders.matchAllQuery())).get(); + final long multiHits = multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(); + assertThat(hits, greaterThan(0L)); + multiSearchResponse = client() + .filterWithHeader(singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) + .prepareMultiSearch() + .add(client().prepareSearch(index).setQuery(QueryBuilders.matchAllQuery())).get(); + assertEquals(multiSearchResponse.getResponses()[0].getResponse().getHits().getTotalHits(), multiHits); + } + + public void testGetIndex() throws Exception { + final String index = "logstash-20-12-2015"; + final String type = "event"; + final String field = "foo"; + indexRandom(true, client().prepareIndex().setIndex(index).setType(type).setSource(field, "bar")); + + GetIndexResponse response = client().admin().indices().prepareGetIndex().setIndices(index).get(); + assertThat(response.getIndices(), arrayContaining(index)); + + response = client() + .filterWithHeader(singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) + .admin().indices().prepareGetIndex() + .setIndices(index).get(); + assertThat(response.getIndices(), arrayContaining(index)); + } + + public void testCreateIndexDeleteInKibanaIndex() throws Exception { + final String index = randomBoolean()? ".kibana" : ".kibana-" + randomAlphaOfLengthBetween(1, 10).toLowerCase(Locale.ENGLISH); + + if (randomBoolean()) { + CreateIndexResponse createIndexResponse = client().filterWithHeader(singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) + .admin().indices().prepareCreate(index).get(); + assertThat(createIndexResponse.isAcknowledged(), is(true)); + } + + IndexResponse response = client() + .filterWithHeader(singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) + .prepareIndex() + .setIndex(index) + .setType("dashboard") + .setSource("foo", "bar") + .setRefreshPolicy(IMMEDIATE) + .get(); + assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); + + DeleteResponse deleteResponse = client() + .filterWithHeader(singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) + .prepareDelete(index, "dashboard", response.getId()) + .get(); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + } + + public void testGetMappings() throws Exception { + final String index = "logstash-20-12-2015"; + final String type = "event"; + final String field = "foo"; + indexRandom(true, client().prepareIndex().setIndex(index).setType(type).setSource(field, "bar")); + + GetMappingsResponse response = client() + .filterWithHeader(singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) + .admin() + .indices() + .prepareGetMappings("logstash-*") + .get(); + ImmutableOpenMap> mappingsMap = response.getMappings(); + assertNotNull(mappingsMap); + assertNotNull(mappingsMap.get(index)); + assertNotNull(mappingsMap.get(index).get(type)); + MappingMetaData mappingMetaData = mappingsMap.get(index).get(type); + assertThat(mappingMetaData.getSourceAsMap(), hasKey("properties")); + assertThat(mappingMetaData.getSourceAsMap().get("properties"), instanceOf(Map.class)); + Map propertiesMap = (Map) mappingMetaData.getSourceAsMap().get("properties"); + assertThat(propertiesMap, hasKey(field)); + } + + // TODO: When we have an XPackIntegTestCase, this should test that we can send MonitoringBulkActions + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java new file mode 100644 index 0000000000000..a8750d5b80232 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java @@ -0,0 +1,201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; + +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.is; + +public class MultipleIndicesPermissionsTests extends SecurityIntegTestCase { + protected static final SecureString PASSWD = new SecureString("passwd".toCharArray()); + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(PASSWD)); + + @Override + protected String configRoles() { + return SecuritySettingsSource.TEST_ROLE + ":\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [manage]\n" + + " - names: '/.*/'\n" + + " privileges: [write]\n" + + " - names: 'test'\n" + + " privileges: [read]\n" + + " - names: 'test1'\n" + + " privileges: [read]\n" + + "\n" + + "role_a:\n" + + " indices:\n" + + " - names: 'a'\n" + + " privileges: [all]\n" + + "\n" + + "role_b:\n" + + " indices:\n" + + " - names: 'b'\n" + + " privileges: [all]\n"; + } + + @Override + protected String configUsers() { + return SecuritySettingsSource.CONFIG_STANDARD_USER + + "user_a:" + USERS_PASSWD_HASHED + "\n" + + "user_ab:" + USERS_PASSWD_HASHED + "\n"; + } + + @Override + protected String configUsersRoles() { + return SecuritySettingsSource.CONFIG_STANDARD_USER_ROLES + + "role_a:user_a,user_ab\n" + + "role_b:user_ab\n"; + } + + public void testSingleRole() throws Exception { + IndexResponse indexResponse = index("test", "type", jsonBuilder() + .startObject() + .field("name", "value") + .endObject()); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + + + indexResponse = index("test1", "type", jsonBuilder() + .startObject() + .field("name", "value1") + .endObject()); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + + refresh(); + + Client client = internalCluster().transportClient(); + + // no specifying an index, should replace indices with the permitted ones (test & test1) + SearchResponse searchResponse = client.prepareSearch().setQuery(matchAllQuery()).get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2); + + // _all should expand to all the permitted indices + searchResponse = client.prepareSearch("_all").setQuery(matchAllQuery()).get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2); + + // wildcards should expand to all the permitted indices + searchResponse = client.prepareSearch("test*").setQuery(matchAllQuery()).get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 2); + + try { + client.prepareSearch("test", "test2").setQuery(matchAllQuery()).get(); + fail("expected an authorization exception when one of mulitple indices is forbidden"); + } catch (ElasticsearchSecurityException e) { + // expected + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + } + + MultiSearchResponse msearchResponse = client.prepareMultiSearch() + .add(client.prepareSearch("test")) + .add(client.prepareSearch("test1")) + .get(); + MultiSearchResponse.Item[] items = msearchResponse.getResponses(); + assertThat(items.length, is(2)); + assertThat(items[0].isFailure(), is(false)); + searchResponse = items[0].getResponse(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1); + assertThat(items[1].isFailure(), is(false)); + searchResponse = items[1].getResponse(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1); + } + + public void testMultipleRoles() throws Exception { + IndexResponse indexResponse = index("a", "type", jsonBuilder() + .startObject() + .field("name", "value_a") + .endObject()); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + + indexResponse = index("b", "type", jsonBuilder() + .startObject() + .field("name", "value_b") + .endObject()); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + + refresh(); + + Client client = internalCluster().transportClient(); + + SearchResponse response = client + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_a", PASSWD))) + .prepareSearch("a") + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + + String[] indices = randomDouble() < 0.3 ? + new String[] { "_all"} : randomBoolean() ? + new String[] { "*" } : + new String[] {}; + response = client + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_a", PASSWD))) + .prepareSearch(indices) + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + + try { + indices = randomBoolean() ? new String[] { "a", "b" } : new String[] { "b", "a" }; + client + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_a", PASSWD))) + .prepareSearch(indices) + .get(); + fail("expected an authorization excpetion when trying to search on multiple indices where there are no search permissions on " + + "one/some of them"); + } catch (ElasticsearchSecurityException e) { + // expected + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + } + + response = client.filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_ab", PASSWD))) + .prepareSearch("b") + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + + indices = randomBoolean() ? new String[] { "a", "b" } : new String[] { "b", "a" }; + response = client.filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_ab", PASSWD))) + .prepareSearch(indices) + .get(); + assertNoFailures(response); + assertHitCount(response, 2); + + indices = randomDouble() < 0.3 ? + new String[] { "_all"} : randomBoolean() ? + new String[] { "*" } : + new String[] {}; + response = client.filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_ab", PASSWD))) + .prepareSearch(indices) + .get(); + assertNoFailures(response); + assertHitCount(response, 2); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/PermissionPrecedenceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/PermissionPrecedenceTests.java new file mode 100644 index 0000000000000..41b5787cb26f3 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/PermissionPrecedenceTests.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationException; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.hasSize; + +/** + * This test makes sure that if an action is a cluster action (according to our + * internal categorization in security), then we apply the cluster priv checks and don't + * fallback on the indices privs at all. In particular, this is useful when we want to treat + * actions that are normally categorized as index actions as cluster actions - for example, + * index template actions. + */ +public class PermissionPrecedenceTests extends SecurityIntegTestCase { + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(new SecureString("test123".toCharArray()))); + + @Override + protected String configRoles() { + return "admin:\n" + + " cluster: [ all ] \n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ all ]" + + "\n" + + "user:\n" + + " indices:\n" + + " - names: 'test_*'\n" + + " privileges: [ all ]"; + } + + @Override + protected String configUsers() { + return "admin:" + USERS_PASSWD_HASHED + "\n" + + "client:" + USERS_PASSWD_HASHED + "\n" + + "user:" + USERS_PASSWD_HASHED + "\n"; + } + + @Override + protected String configUsersRoles() { + return "admin:admin\n" + + "transport_client:client\n" + + "user:user\n"; + } + + @Override + protected String nodeClientUsername() { + return "admin"; + } + + @Override + protected SecureString nodeClientPassword() { + return new SecureString("test123".toCharArray()); + } + + @Override + protected String transportClientUsername() { + return "admin"; + } + + @Override + protected SecureString transportClientPassword() { + return new SecureString("test123".toCharArray()); + } + + public void testDifferentCombinationsOfIndices() throws Exception { + Client client = internalCluster().transportClient(); + + // first lets try with "admin"... all should work + + PutIndexTemplateResponse putResponse = client + .filterWithHeader(Collections.singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, + basicAuthHeaderValue(transportClientUsername(), transportClientPassword()))) + .admin().indices().preparePutTemplate("template1") + .setTemplate("test_*") + .get(); + assertAcked(putResponse); + + GetIndexTemplatesResponse getResponse = client.admin().indices().prepareGetTemplates("template1") + .get(); + List templates = getResponse.getIndexTemplates(); + assertThat(templates, hasSize(1)); + + // now lets try with "user" + + Map auth = Collections.singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, basicAuthHeaderValue("user", + transportClientPassword())); + assertThrowsAuthorizationException(client.filterWithHeader(auth).admin().indices().preparePutTemplate("template1") + .setTemplate("test_*")::get, PutIndexTemplateAction.NAME, "user"); + + Map headers = Collections.singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, basicAuthHeaderValue("user", + new SecureString("test123"))); + assertThrowsAuthorizationException(client.filterWithHeader(headers).admin().indices().prepareGetTemplates("template1")::get, + GetIndexTemplatesAction.NAME, "user"); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityCachePermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityCachePermissionTests.java new file mode 100644 index 0000000000000..8a570aac28663 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityCachePermissionTests.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.indices.TermsLookup; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.junit.Before; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class SecurityCachePermissionTests extends SecurityIntegTestCase { + + private final String READ_ONE_IDX_USER = "read_user"; + + @Override + public String configUsers() { + return super.configUsers() + + READ_ONE_IDX_USER + ":" + SecuritySettingsSource.TEST_PASSWORD_HASHED + "\n"; + } + + @Override + public String configRoles() { + return super.configRoles() + + "\nread_one_idx:\n" + + " indices:\n" + + " 'data':\n" + + " - read\n"; + } + + @Override + public String configUsersRoles() { + return super.configUsersRoles() + + "read_one_idx:" + READ_ONE_IDX_USER + "\n"; + } + + @Before + public void loadData() { + index("data", "a", "1", "{ \"name\": \"John\", \"token\": \"token1\" }"); + index("tokens", "tokens", "1", "{ \"group\": \"1\", \"tokens\": [\"token1\", \"token2\"] }"); + refresh(); + } + + public void testThatTermsFilterQueryDoesntLeakData() { + SearchResponse response = client().prepareSearch("data").setTypes("a").setQuery(QueryBuilders.constantScoreQuery( + QueryBuilders.termsLookupQuery("token", new TermsLookup("tokens", "tokens", "1", "tokens")))) + .execute().actionGet(); + assertThat(response.isTimedOut(), is(false)); + assertThat(response.getHits().getHits().length, is(1)); + + // Repeat with unauthorized user!!!! + try { + response = client().filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(READ_ONE_IDX_USER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))) + .prepareSearch("data").setTypes("a").setQuery(QueryBuilders.constantScoreQuery( + QueryBuilders.termsLookupQuery("token", new TermsLookup("tokens", "tokens", "1", "tokens")))) + .execute().actionGet(); + fail("search phase exception should have been thrown! response was:\n" + response.toString()); + } catch (ElasticsearchSecurityException e) { + assertThat(e.toString(), containsString("ElasticsearchSecurityException[action")); + assertThat(e.toString(), containsString("unauthorized")); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java new file mode 100644 index 0000000000000..2ec899dbafe0d --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/SecurityClearScrollTests.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.MultiSearchRequestBuilder; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class SecurityClearScrollTests extends SecurityIntegTestCase { + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(new SecureString("change_me".toCharArray()))); + + private List scrollIds; + + @Override + protected String configUsers() { + return super.configUsers() + + "allowed_user:" + USERS_PASSWD_HASHED + "\n" + + "denied_user:" + USERS_PASSWD_HASHED + "\n" ; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + + "allowed_role:allowed_user\n" + + "denied_role:denied_user\n"; + } + + @Override + protected String configRoles() { + return super.configRoles() + + "\nallowed_role:\n" + + " cluster:\n" + + " - cluster:admin/indices/scroll/clear_all \n" + + "denied_role:\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ALL]\n"; + } + + @Before + public void indexRandomDocuments() { + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(IMMEDIATE); + for (int i = 0; i < randomIntBetween(10, 50); i++) { + bulkRequestBuilder.add(client().prepareIndex("index", "type", + String.valueOf(i)).setSource("{ \"foo\" : \"bar\" }", XContentType.JSON)); + } + BulkResponse bulkItemResponses = bulkRequestBuilder.get(); + assertThat(bulkItemResponses.hasFailures(), is(false)); + + MultiSearchRequestBuilder multiSearchRequestBuilder = client().prepareMultiSearch(); + int count = randomIntBetween(5, 15); + for (int i = 0; i < count; i++) { + multiSearchRequestBuilder.add(client().prepareSearch("index").setTypes("type").setScroll("10m").setSize(1)); + } + MultiSearchResponse multiSearchResponse = multiSearchRequestBuilder.get(); + scrollIds = getScrollIds(multiSearchResponse); + } + + @After + public void clearScrolls() { + //clear all scroll ids from the default admin user, just in case any of test fails + client().prepareClearScroll().addScrollId("_all").get(); + } + + public void testThatClearingAllScrollIdsWorks() throws Exception { + String user = "allowed_user:change_me"; + String basicAuth = basicAuthHeaderValue("allowed_user", new SecureString("change_me".toCharArray())); + Map headers = new HashMap<>(); + headers.put(SecurityField.USER_SETTING.getKey(), user); + headers.put(BASIC_AUTH_HEADER, basicAuth); + ClearScrollResponse clearScrollResponse = internalCluster().transportClient().filterWithHeader(headers) + .prepareClearScroll() + .addScrollId("_all").get(); + assertThat(clearScrollResponse.isSucceeded(), is(true)); + + assertThatScrollIdsDoNotExist(scrollIds); + } + + public void testThatClearingAllScrollIdsRequirePermissions() throws Exception { + String user = "denied_user:change_me"; + String basicAuth = basicAuthHeaderValue("denied_user", new SecureString("change_me".toCharArray())); + Map headers = new HashMap<>(); + headers.put(SecurityField.USER_SETTING.getKey(), user); + headers.put(BASIC_AUTH_HEADER, basicAuth); + assertThrows(internalCluster().transportClient().filterWithHeader(headers) + .prepareClearScroll() + .addScrollId("_all"), ElasticsearchSecurityException.class, + "action [cluster:admin/indices/scroll/clear_all] is unauthorized for user [denied_user]"); + + // deletion of scroll ids should work + ClearScrollResponse clearByIdScrollResponse = client().prepareClearScroll().setScrollIds(scrollIds).get(); + assertThat(clearByIdScrollResponse.isSucceeded(), is(true)); + + // test with each id, that they do not exist + assertThatScrollIdsDoNotExist(scrollIds); + } + + private void assertThatScrollIdsDoNotExist(List scrollIds) { + for (String scrollId : scrollIds) { + SearchPhaseExecutionException expectedException = + expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearchScroll(scrollId).get()); + assertThat(expectedException.toString(), containsString("SearchContextMissingException")); + } + } + + private List getScrollIds(MultiSearchResponse multiSearchResponse) { + List ids = new ArrayList<>(); + for (MultiSearchResponse.Item item : multiSearchResponse) { + ids.add(item.getResponse().getScrollId()); + } + return ids; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java new file mode 100644 index 0000000000000..87db72bcf0285 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.integration; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.SecurityIntegTestCase; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; + +/** + * Integration test that uses multiple data nodes to test that the shrink index api works with security. + */ +@ClusterScope(minNumDataNodes = 2) +public class ShrinkIndexWithSecurityTests extends SecurityIntegTestCase { + + @Override + protected final boolean ignoreExternalCluster() { + return true; + } + + @Override + protected int minimumNumberOfShards() { + return 2; + } + + public void testShrinkIndex() throws Exception { + final int randomNumberOfDocs = scaledRandomIntBetween(2, 12); + for (int i = 0; i < randomNumberOfDocs; i++) { + client().prepareIndex("bigindex", "type").setSource("foo", "bar").get(); + } + + ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() + .getDataNodes(); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); + final String mergeNode = discoveryNodes[0].getName(); + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("bigindex") + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", mergeNode) + .put("index.blocks.write", true)).get(); + + // wait for green and then shrink + ensureGreen(); + assertAcked(client().admin().indices().prepareResizeIndex("bigindex", "shrunk_bigindex") + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 1) + .build())); + + // verify all docs + ensureGreen(); + assertHitCount(client().prepareSearch("shrunk_bigindex").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + randomNumberOfDocs); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java new file mode 100644 index 0000000000000..351cf91bf9428 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java @@ -0,0 +1,280 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.license; + +import org.apache.http.message.BasicHeader; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.admin.cluster.stats.ClusterStatsIndices; +import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.transport.Netty4Plugin; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.core.TestXPackTransportClient; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.junit.After; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collection; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +@TestLogging("org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") +public class LicensingTests extends SecurityIntegTestCase { + public static final String ROLES = + SecuritySettingsSource.TEST_ROLE + ":\n" + + " cluster: [ all ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [manage]\n" + + " - names: '/.*/'\n" + + " privileges: [write]\n" + + " - names: 'test'\n" + + " privileges: [read]\n" + + " - names: 'test1'\n" + + " privileges: [read]\n" + + "\n" + + "role_a:\n" + + " indices:\n" + + " - names: 'a'\n" + + " privileges: [all]\n" + + "\n" + + "role_b:\n" + + " indices:\n" + + " - names: 'b'\n" + + " privileges: [all]\n"; + + public static final String USERS = + SecuritySettingsSource.CONFIG_STANDARD_USER + + "user_a:{plain}passwd\n" + + "user_b:{plain}passwd\n"; + + public static final String USERS_ROLES = + SecuritySettingsSource.CONFIG_STANDARD_USER_ROLES + + "role_a:user_a,user_b\n" + + "role_b:user_b\n"; + + @Override + protected String configRoles() { + return ROLES; + } + + @Override + protected String configUsers() { + return USERS; + } + + @Override + protected String configUsersRoles() { + return USERS_ROLES; + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + @Override + protected Collection> nodePlugins() { + ArrayList> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(Netty4Plugin.class); // for http + return plugins; + } + + @Before + public void resetLicensing() { + enableLicensing(); + } + + @After + public void cleanupSecurityIndex() { + deleteSecurityIndex(); + } + + public void testEnableDisableBehaviour() throws Exception { + IndexResponse indexResponse = index("test", "type", jsonBuilder() + .startObject() + .field("name", "value") + .endObject()); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + + + indexResponse = index("test1", "type", jsonBuilder() + .startObject() + .field("name", "value1") + .endObject()); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + + refresh(); + // wait for all replicas to be started (to make sure that there are no more cluster state updates when we disable licensing) + assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().routingTable() + .shardsWithState(ShardRoutingState.INITIALIZING).isEmpty())); + + Client client = internalCluster().transportClient(); + + disableLicensing(); + + assertElasticsearchSecurityException(() -> client.admin().indices().prepareStats().get()); + assertElasticsearchSecurityException(() -> client.admin().cluster().prepareClusterStats().get()); + assertElasticsearchSecurityException(() -> client.admin().cluster().prepareHealth().get()); + assertElasticsearchSecurityException(() -> client.admin().cluster().prepareNodesStats().get()); + + enableLicensing(randomFrom(License.OperationMode.values())); + + IndicesStatsResponse indicesStatsResponse = client.admin().indices().prepareStats().get(); + assertNoFailures(indicesStatsResponse); + + ClusterStatsResponse clusterStatsNodeResponse = client.admin().cluster().prepareClusterStats().get(); + assertThat(clusterStatsNodeResponse, notNullValue()); + ClusterStatsIndices indices = clusterStatsNodeResponse.getIndicesStats(); + assertThat(indices, notNullValue()); + assertThat(indices.getIndexCount(), greaterThanOrEqualTo(2)); + + ClusterHealthResponse clusterIndexHealth = client.admin().cluster().prepareHealth().get(); + assertThat(clusterIndexHealth, notNullValue()); + + NodesStatsResponse nodeStats = client.admin().cluster().prepareNodesStats().get(); + assertThat(nodeStats, notNullValue()); + } + + public void testRestAuthenticationByLicenseType() throws Exception { + Response response = getRestClient().performRequest("GET", "/"); + // the default of the licensing tests is basic + assertThat(response.getStatusLine().getStatusCode(), is(200)); + ResponseException e = expectThrows(ResponseException.class, + () -> getRestClient().performRequest("GET", "/_xpack/security/_authenticate")); + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); + + // generate a new license with a mode that enables auth + License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.TRIAL, + License.OperationMode.PLATINUM, License.OperationMode.STANDARD); + enableLicensing(mode); + e = expectThrows(ResponseException.class, () -> getRestClient().performRequest("GET", "/")); + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(401)); + e = expectThrows(ResponseException.class, + () -> getRestClient().performRequest("GET", "/_xpack/security/_authenticate")); + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(401)); + + final String basicAuthValue = UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, + new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())); + response = getRestClient().performRequest("GET", "/", new BasicHeader("Authorization", basicAuthValue)); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + response = getRestClient().performRequest("GET", "/_xpack/security/_authenticate", + new BasicHeader("Authorization", basicAuthValue)); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + + } + + public void testSecurityActionsByLicenseType() throws Exception { + // security actions should not work! + Settings settings = internalCluster().transportClient().settings(); + try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { + client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); + new SecurityClient(client).prepareGetUsers().get(); + fail("security actions should not be enabled!"); + } catch (ElasticsearchSecurityException e) { + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + assertThat(e.getMessage(), containsString("non-compliant")); + } + + // enable a license that enables security + License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.TRIAL, + License.OperationMode.PLATINUM, License.OperationMode.STANDARD); + enableLicensing(mode); + // security actions should not work! + try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { + client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); + GetUsersResponse response = new SecurityClient(client).prepareGetUsers().get(); + assertNotNull(response); + } + } + + public void testTransportClientAuthenticationByLicenseType() throws Exception { + Settings.Builder builder = Settings.builder() + .put(internalCluster().transportClient().settings()); + // remove user info + builder.remove(SecurityField.USER_SETTING.getKey()); + builder.remove(ThreadContext.PREFIX + "." + UsernamePasswordToken.BASIC_AUTH_HEADER); + + // basic has no auth + try (TransportClient client = new TestXPackTransportClient(builder.build(), LocalStateSecurity.class)) { + client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); + assertGreenClusterState(client); + } + + // enable a license that enables security + License.OperationMode mode = randomFrom(License.OperationMode.GOLD, License.OperationMode.TRIAL, + License.OperationMode.PLATINUM, License.OperationMode.STANDARD); + enableLicensing(mode); + + try (TransportClient client = new TestXPackTransportClient(builder.build(), LocalStateSecurity.class)) { + client.addTransportAddress(internalCluster().getDataNodeInstance(Transport.class).boundAddress().publishAddress()); + client.admin().cluster().prepareHealth().get(); + fail("should not have been able to connect to a node!"); + } catch (NoNodeAvailableException e) { + // expected + } + } + + private static void assertElasticsearchSecurityException(ThrowingRunnable runnable) { + ElasticsearchSecurityException ee = expectThrows(ElasticsearchSecurityException.class, runnable); + assertThat(ee.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), hasItem(XPackField.SECURITY)); + assertThat(ee.status(), is(RestStatus.FORBIDDEN)); + } + + public static void disableLicensing() { + disableLicensing(License.OperationMode.BASIC); + } + + public static void disableLicensing(License.OperationMode operationMode) { + for (XPackLicenseState licenseState : internalCluster().getInstances(XPackLicenseState.class)) { + licenseState.update(operationMode, false); + } + } + + public static void enableLicensing() { + enableLicensing(License.OperationMode.BASIC); + } + + public static void enableLicensing(License.OperationMode operationMode) { + for (XPackLicenseState licenseState : internalCluster().getInstances(XPackLicenseState.class)) { + licenseState.update(operationMode, true); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java new file mode 100644 index 0000000000000..2727353b36f0b --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test; + + +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.message.BasicHeader; +import org.apache.http.nio.entity.NStringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.security.SecurityLifecycleServiceField; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.Set; + +/** + * Test case with method to handle the starting and stopping the stores for native users and roles + */ +public abstract class NativeRealmIntegTestCase extends SecurityIntegTestCase { + + @Before + public void ensureNativeStoresStarted() throws Exception { + assertSecurityIndexActive(); + if (shouldSetReservedUserPasswords()) { + setupReservedPasswords(); + } + } + + @After + public void stopESNativeStores() throws Exception { + deleteSecurityIndex(); + + if (getCurrentClusterScope() == Scope.SUITE) { + // Clear the realm cache for all realms since we use a SUITE scoped cluster + SecurityClient client = securityClient(internalCluster().transportClient()); + client.prepareClearRealmCache().get(); + } + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + @Override + public Set excludeTemplates() { + Set templates = Sets.newHashSet(super.excludeTemplates()); + templates.add(SecurityLifecycleServiceField.SECURITY_TEMPLATE_NAME); // don't remove the security index template + return templates; + } + + private SecureString reservedPassword = SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; + + protected SecureString getReservedPassword() { + return reservedPassword; + } + + protected boolean shouldSetReservedUserPasswords() { + return true; + } + + public void setupReservedPasswords() throws IOException { + setupReservedPasswords(getRestClient()); + } + + public void setupReservedPasswords(RestClient restClient) throws IOException { + logger.info("setting up reserved passwords for test"); + { + String payload = "{\"password\": \"" + new String(reservedPassword.getChars()) + "\"}"; + HttpEntity entity = new NStringEntity(payload, ContentType.APPLICATION_JSON); + BasicHeader authHeader = new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(ElasticUser.NAME, BOOTSTRAP_PASSWORD)); + String route = "/_xpack/security/user/elastic/_password"; + Response response = restClient.performRequest("PUT", route, Collections.emptyMap(), entity, authHeader); + assertEquals(response.getStatusLine().getReasonPhrase(), 200, response.getStatusLine().getStatusCode()); + } + + for (String username : Arrays.asList(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME)) { + String payload = "{\"password\": \"" + new String(reservedPassword.getChars()) + "\"}"; + HttpEntity entity = new NStringEntity(payload, ContentType.APPLICATION_JSON); + BasicHeader authHeader = new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(ElasticUser.NAME, reservedPassword)); + String route = "/_xpack/security/user/" + username + "/_password"; + Response response = restClient.performRequest("PUT", route, Collections.emptyMap(), entity, authHeader); + assertEquals(response.getStatusLine().getReasonPhrase(), 200, response.getStatusLine().getStatusCode()); + } + logger.info("setting up reserved passwords finished"); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java new file mode 100644 index 0000000000000..7859b95fece48 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -0,0 +1,511 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test; + +import io.netty.util.ThreadDeathWatcher; +import io.netty.util.concurrent.GlobalEventExecutor; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.Index; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.LocalStateSecurity; + +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.ExternalResource; + +import java.net.InetSocketAddress; +import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.securityIndexMappingSufficientToRead; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsCollectionContaining.hasItem; + +/** + * Base class to run tests against a cluster with X-Pack installed and security enabled. + * The default {@link org.elasticsearch.test.ESIntegTestCase.Scope} is {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} + * + * @see SecuritySettingsSource + */ +public abstract class SecurityIntegTestCase extends ESIntegTestCase { + + private static SecuritySettingsSource SECURITY_DEFAULT_SETTINGS; + protected static SecureString BOOTSTRAP_PASSWORD = null; + + /** + * Settings used when the {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to + * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} + * so that some of the configuration parameters can be overridden through test instance methods, similarly + * to how {@link #nodeSettings(int)} and {@link #transportClientSettings()} work. + */ + private static CustomSecuritySettingsSource customSecuritySettingsSource = null; + + + @BeforeClass + public static void generateBootstrapPassword() { + BOOTSTRAP_PASSWORD = TEST_PASSWORD_SECURE_STRING.clone(); + } + + //UnicastZen requires the number of nodes in a cluster to generate the unicast configuration. + //The number of nodes is randomized though, but we can predict what the maximum number of nodes will be + //and configure them all in unicast.hosts + protected static int defaultMaxNumberOfNodes() { + ClusterScope clusterScope = SecurityIntegTestCase.class.getAnnotation(ClusterScope.class); + if (clusterScope == null) { + return InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES + + InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES + + InternalTestCluster.DEFAULT_MAX_NUM_CLIENT_NODES; + } else { + int clientNodes = clusterScope.numClientNodes(); + if (clientNodes < 0) { + clientNodes = InternalTestCluster.DEFAULT_MAX_NUM_CLIENT_NODES; + } + int masterNodes = 0; + if (clusterScope.supportsDedicatedMasters()) { + masterNodes = InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES; + } + + int dataNodes = 0; + if (clusterScope.numDataNodes() < 0) { + if (clusterScope.maxNumDataNodes() < 0) { + dataNodes = InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES; + } else { + dataNodes = clusterScope.maxNumDataNodes(); + } + } else { + dataNodes = clusterScope.numDataNodes(); + } + return masterNodes + dataNodes + clientNodes; + } + } + + private static ClusterScope getAnnotation(Class clazz) { + if (clazz == Object.class || clazz == SecurityIntegTestCase.class) { + return null; + } + ClusterScope annotation = clazz.getAnnotation(ClusterScope.class); + if (annotation != null) { + return annotation; + } + return getAnnotation(clazz.getSuperclass()); + } + + Scope getCurrentClusterScope() { + return getCurrentClusterScope(this.getClass()); + } + + private static Scope getCurrentClusterScope(Class clazz) { + ClusterScope annotation = getAnnotation(clazz); + return annotation == null ? Scope.SUITE : annotation.scope(); + } + + @BeforeClass + public static void initDefaultSettings() { + if (SECURITY_DEFAULT_SETTINGS == null) { + SECURITY_DEFAULT_SETTINGS = + new SecuritySettingsSource(defaultMaxNumberOfNodes(), randomBoolean(), createTempDir(), Scope.SUITE); + } + } + + /** + * Set the static default settings to null to prevent a memory leak. The test framework also checks for memory leaks + * and computes the size, this can cause issues when running with the security manager as it tries to do reflection + * into protected sun packages. + */ + @AfterClass + public static void destroyDefaultSettings() { + SECURITY_DEFAULT_SETTINGS = null; + customSecuritySettingsSource = null; + // Wait for the network threads to finish otherwise there is the possibility that one of + // the threads lingers and trips the thread leak detector + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (IllegalStateException e) { + if (e.getMessage().equals("thread was not started") == false) { + throw e; + } + // ignore since the thread was never started + } + + try { + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + @Rule + //Rules are the only way to have something run before the before (final) method inherited from ESIntegTestCase + public ExternalResource externalResource = new ExternalResource() { + @Override + protected void before() throws Throwable { + Scope currentClusterScope = getCurrentClusterScope(); + switch (currentClusterScope) { + case SUITE: + if (customSecuritySettingsSource == null) { + customSecuritySettingsSource = + new CustomSecuritySettingsSource(transportSSLEnabled(), createTempDir(), currentClusterScope); + } + break; + case TEST: + customSecuritySettingsSource = + new CustomSecuritySettingsSource(transportSSLEnabled(), createTempDir(), currentClusterScope); + break; + } + } + }; + + @Before + //before methods from the superclass are run before this, which means that the current cluster is ready to go + public void assertXPackIsInstalled() { + doAssertXPackIsInstalled(); + } + + protected void doAssertXPackIsInstalled() { + NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().clear().setPlugins(true).get(); + for (NodeInfo nodeInfo : nodeInfos.getNodes()) { + // TODO: disable this assertion for now, due to random runs with mock plugins. perhaps run without mock plugins? +// assertThat(nodeInfo.getPlugins().getInfos(), hasSize(2)); + Collection pluginNames = + nodeInfo.getPlugins().getPluginInfos().stream().map(p -> p.getClassname()).collect(Collectors.toList()); + assertThat("plugin [" + LocalStateSecurity.class.getName() + "] not found in [" + pluginNames + "]", pluginNames, + hasItem(LocalStateSecurity.class.getName())); + } + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + // Disable native ML autodetect_process as the c++ controller won't be available +// builder.put(MachineLearningField.AUTODETECT_PROCESS.getKey(), false); + Settings customSettings = customSecuritySettingsSource.nodeSettings(nodeOrdinal); + builder.put(customSettings, false); // handle secure settings separately + builder.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); + Settings.Builder customBuilder = Settings.builder().put(customSettings); + if (customBuilder.getSecureSettings() != null) { + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> + secureSettings.merge((MockSecureSettings) customBuilder.getSecureSettings())); + } + if (builder.getSecureSettings() == null) { + builder.setSecureSettings(new MockSecureSettings()); + } + ((MockSecureSettings) builder.getSecureSettings()).setString("bootstrap.password", BOOTSTRAP_PASSWORD.toString()); + return builder.build(); + } + + @Override + protected Path nodeConfigPath(int nodeOrdinal) { + return customSecuritySettingsSource.nodeConfigPath(nodeOrdinal); + } + + @Override + protected Settings transportClientSettings() { + return Settings.builder().put(super.transportClientSettings()) + .put(customSecuritySettingsSource.transportClientSettings()) + .build(); + } + + @Override + protected boolean addMockTransportService() { + return false; // security has its own transport service + } + + @Override + protected Collection> nodePlugins() { + return customSecuritySettingsSource.nodePlugins(); + } + + @Override + protected Collection> transportClientPlugins() { + return customSecuritySettingsSource.transportClientPlugins(); + } + + @Override + protected Settings externalClusterClientSettings() { + return Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), SecuritySettingsSource.TEST_USER_NAME + ":" + + SecuritySettingsSourceField.TEST_PASSWORD) + .build(); + } + + /** + * Allows to override the users config file when the {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to + * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} + */ + protected String configUsers() { + return SECURITY_DEFAULT_SETTINGS.configUsers(); + } + + /** + * Allows to override the users_roles config file when the {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to + * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} + */ + protected String configUsersRoles() { + return SECURITY_DEFAULT_SETTINGS.configUsersRoles(); + } + + /** + * Allows to override the roles config file when the {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to + * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} + */ + protected String configRoles() { + return SECURITY_DEFAULT_SETTINGS.configRoles(); + } + + /** + * Allows to override the node client username (used while sending requests to the test cluster) when the + * {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to + * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} + */ + protected String nodeClientUsername() { + return SECURITY_DEFAULT_SETTINGS.nodeClientUsername(); + } + + /** + * Allows to override the node client password (used while sending requests to the test cluster) when the + * {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to + * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} + */ + protected SecureString nodeClientPassword() { + return SECURITY_DEFAULT_SETTINGS.nodeClientPassword(); + } + + /** + * Allows to override the transport client username (used while sending requests to the test cluster) when the + * {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to + * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} + */ + protected String transportClientUsername() { + return SECURITY_DEFAULT_SETTINGS.transportClientUsername(); + } + + /** + * Allows to override the transport client password (used while sending requests to the test cluster) when the + * {@link org.elasticsearch.test.ESIntegTestCase.ClusterScope} is set to + * {@link org.elasticsearch.test.ESIntegTestCase.Scope#SUITE} or {@link org.elasticsearch.test.ESIntegTestCase.Scope#TEST} + */ + protected SecureString transportClientPassword() { + return SECURITY_DEFAULT_SETTINGS.transportClientPassword(); + } + + /** + * Allows to control whether ssl key information is auto generated or not on the transport layer + */ + protected boolean transportSSLEnabled() { + return randomBoolean(); + } + + protected int maxNumberOfNodes() { + return defaultMaxNumberOfNodes(); + } + + private class CustomSecuritySettingsSource extends SecuritySettingsSource { + + private CustomSecuritySettingsSource(boolean sslEnabled, Path configDir, Scope scope) { + super(maxNumberOfNodes(), sslEnabled, configDir, scope); + } + + @Override + protected String configUsers() { + return SecurityIntegTestCase.this.configUsers(); + } + + @Override + protected String configUsersRoles() { + return SecurityIntegTestCase.this.configUsersRoles(); + } + + @Override + protected String configRoles() { + return SecurityIntegTestCase.this.configRoles(); + } + + @Override + protected String nodeClientUsername() { + return SecurityIntegTestCase.this.nodeClientUsername(); + } + + @Override + protected SecureString nodeClientPassword() { + return SecurityIntegTestCase.this.nodeClientPassword(); + } + + @Override + protected String transportClientUsername() { + return SecurityIntegTestCase.this.transportClientUsername(); + } + + @Override + protected SecureString transportClientPassword() { + return SecurityIntegTestCase.this.transportClientPassword(); + } + } + + protected static void assertGreenClusterState(Client client) { + ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().get(); + assertNoTimeout(clusterHealthResponse); + assertThat(clusterHealthResponse.getStatus(), is(ClusterHealthStatus.GREEN)); + } + + /** + * Creates the indices provided as argument, randomly associating them with aliases, indexes one dummy document per index + * and refreshes the new indices + */ + protected void createIndicesWithRandomAliases(String... indices) { + createIndex(indices); + + if (frequently()) { + IndicesAliasesRequestBuilder builder = client().admin().indices().prepareAliases(); + for (String index : indices) { + if (frequently()) { + //one alias per index with prefix "alias-" + builder.addAlias(index, "alias-" + index); + } + } + if (randomBoolean()) { + //one alias pointing to all indices + for (String index : indices) { + builder.addAlias(index, "alias"); + } + } + assertAcked(builder); + } + + for (String index : indices) { + client().prepareIndex(index, "type").setSource("field", "value").get(); + } + refresh(indices); + } + + @Override + protected Function getClientWrapper() { + Map headers = Collections.singletonMap("Authorization", + basicAuthHeaderValue(nodeClientUsername(), nodeClientPassword())); + // we need to wrap node clients because we do not specify a user for nodes and all requests will use the system + // user. This is ok for internal n2n stuff but the test framework does other things like wiping indices, repositories, etc + // that the system user cannot do. so we wrap the node client with a user that can do these things since the client() calls + // are randomized to return both node clients and transport clients + // transport clients do not need to be wrapped since we specify the xpack.security.user setting that sets the default user to be + // used for the transport client. If we did not set a default user then the transport client would not even be allowed + // to connect + return client -> (client instanceof NodeClient) ? client.filterWithHeader(headers) : client; + } + + protected SecurityClient securityClient() { + return securityClient(client()); + } + + public static SecurityClient securityClient(Client client) { + return randomBoolean() ? new XPackClient(client).security() : new SecurityClient(client); + } + + protected String getHttpURL() { + final NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); + final List nodes = nodeInfos.getNodes(); + assertTrue("there is at least one node", nodes.size() > 0); + NodeInfo ni = randomFrom(nodes); + boolean useSSL = XPackSettings.HTTP_SSL_ENABLED.get(ni.getSettings()); + TransportAddress publishAddress = ni.getHttp().address().publishAddress(); + InetSocketAddress address = publishAddress.address(); + return (useSSL ? "https://" : "http://") + NetworkAddress.format(address.getAddress()) + ":" + address.getPort(); + } + + public void assertSecurityIndexActive() throws Exception { + assertSecurityIndexActive(cluster()); + } + + public void assertSecurityIndexActive(TestCluster testCluster) throws Exception { + for (Client client : testCluster.getClients()) { + assertBusy(() -> { + ClusterState clusterState = client.admin().cluster().prepareState().setLocal(true).get().getState(); + assertFalse(clusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)); + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint().startObject(); + assertTrue("security index mapping not sufficient to read:\n" + + Strings.toString(clusterState.toXContent(builder, ToXContent.EMPTY_PARAMS).endObject()), + securityIndexMappingSufficientToRead(clusterState, logger)); + Index securityIndex = resolveSecurityIndex(clusterState.metaData()); + if (securityIndex != null) { + IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(securityIndex); + if (indexRoutingTable != null) { + assertTrue(indexRoutingTable.allPrimaryShardsActive()); + } + } + }, 30L, TimeUnit.SECONDS); + } + } + + protected void deleteSecurityIndex() { + final Client client = client().filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + GetIndexRequest getIndexRequest = new GetIndexRequest(); + getIndexRequest.indices(SECURITY_INDEX_NAME); + getIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); + GetIndexResponse getIndexResponse = client.admin().indices().getIndex(getIndexRequest).actionGet(); + if (getIndexResponse.getIndices().length > 0) { + // this is a hack to clean up the .security index since only a superuser can delete it + DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(getIndexResponse.getIndices()); + client.admin().indices().delete(deleteIndexRequest).actionGet(); + } + } + + private static Index resolveSecurityIndex(MetaData metaData) { + final AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(SECURITY_INDEX_NAME); + if (aliasOrIndex != null) { + return aliasOrIndex.getIndices().get(0).getIndex(); + } + return null; + } + + protected boolean isTransportSSLEnabled() { + return customSecuritySettingsSource.isSslEnabled(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java new file mode 100644 index 0000000000000..2f1123a9461de --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java @@ -0,0 +1,350 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.reindex.ReindexPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration; +import org.elasticsearch.transport.Netty4Plugin; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail; +import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; +import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; + +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.function.Consumer; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; +import static org.apache.lucene.util.LuceneTestCase.createTempFile; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.elasticsearch.xpack.security.test.SecurityTestUtils.writeFile; + +/** + * {@link org.elasticsearch.test.NodeConfigurationSource} subclass that allows to set all needed settings for x-pack security. + * Unicast discovery is configured through {@link org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration.UnicastZen}, + * also x-pack is installed with all the needed configuration and files. + * To avoid conflicts, every cluster should have its own instance of this class as some configuration files need to be created. + */ +public class SecuritySettingsSource extends ClusterDiscoveryConfiguration.UnicastZen { + + public static final Settings DEFAULT_SETTINGS = Settings.EMPTY; + + public static final String TEST_USER_NAME = "test_user"; + public static final String TEST_PASSWORD_HASHED = + new String(Hasher.BCRYPT.hash(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); + public static final String TEST_ROLE = "user"; + public static final String TEST_SUPERUSER = "test_superuser"; + + public static final String DEFAULT_TRANSPORT_CLIENT_ROLE = "transport_client"; + public static final String DEFAULT_TRANSPORT_CLIENT_USER_NAME = "test_trans_client_user"; + + public static final String CONFIG_STANDARD_USER = + TEST_USER_NAME + ":" + TEST_PASSWORD_HASHED + "\n" + + DEFAULT_TRANSPORT_CLIENT_USER_NAME + ":" + TEST_PASSWORD_HASHED + "\n" + + TEST_SUPERUSER + ":" + TEST_PASSWORD_HASHED + "\n"; + + public static final String CONFIG_STANDARD_USER_ROLES = + TEST_ROLE + ":" + TEST_USER_NAME + "," + DEFAULT_TRANSPORT_CLIENT_USER_NAME + "\n" + + DEFAULT_TRANSPORT_CLIENT_ROLE + ":" + DEFAULT_TRANSPORT_CLIENT_USER_NAME + "\n" + + "superuser:" + TEST_SUPERUSER + "\n"; + + public static final String CONFIG_ROLE_ALLOW_ALL = + TEST_ROLE + ":\n" + + " cluster: [ ALL ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ ALL ]\n"; + + private final Path parentFolder; + private final String subfolderPrefix; + private final boolean sslEnabled; + private final boolean hostnameVerificationEnabled; + private final boolean usePEM; + + /** + * Creates a new {@link org.elasticsearch.test.NodeConfigurationSource} for the security configuration. + * + * @param numOfNodes the number of nodes for proper unicast configuration (can be more than actually available) + * @param sslEnabled whether ssl is enabled + * @param parentFolder the parent folder that will contain all of the configuration files that need to be created + * @param scope the scope of the test that is requiring an instance of SecuritySettingsSource + */ + public SecuritySettingsSource(int numOfNodes, boolean sslEnabled, Path parentFolder, Scope scope) { + super(numOfNodes, DEFAULT_SETTINGS); + this.parentFolder = parentFolder; + this.subfolderPrefix = scope.name(); + this.sslEnabled = sslEnabled; + this.hostnameVerificationEnabled = randomBoolean(); + this.usePEM = randomBoolean(); + } + + Path nodePath(final int nodeOrdinal) { + return parentFolder.resolve(subfolderPrefix + "-" + nodeOrdinal); + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + final Path home = nodePath(nodeOrdinal); + final Path xpackConf = home.resolve("config"); + try { + Files.createDirectories(xpackConf); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + writeFile(xpackConf, "roles.yml", configRoles()); + writeFile(xpackConf, "users", configUsers()); + writeFile(xpackConf, "users_roles", configUsersRoles()); + + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(XPackSettings.SECURITY_ENABLED.getKey(), true) + //TODO: for now isolate security tests from watcher & monitoring (randomize this later) + .put(XPackSettings.WATCHER_ENABLED.getKey(), false) + .put(XPackSettings.MONITORING_ENABLED.getKey(), false) + .put(XPackSettings.AUDIT_ENABLED.getKey(), randomBoolean()) + .put(LoggingAuditTrail.HOST_ADDRESS_SETTING.getKey(), randomBoolean()) + .put(LoggingAuditTrail.HOST_NAME_SETTING.getKey(), randomBoolean()) + .put(LoggingAuditTrail.NODE_NAME_SETTING.getKey(), randomBoolean()) + .put("xpack.security.authc.realms.file.type", FileRealmSettings.TYPE) + .put("xpack.security.authc.realms.file.order", 0) + .put("xpack.security.authc.realms.index.type", NativeRealmSettings.TYPE) + .put("xpack.security.authc.realms.index.order", "1"); + addNodeSSLSettings(builder); + return builder.build(); + } + + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return nodePath(nodeOrdinal).resolve("config"); + } + + @Override + public Settings transportClientSettings() { + Settings superSettings = super.transportClientSettings(); + Settings.Builder builder = Settings.builder().put(superSettings); + addClientSSLSettings(builder, ""); + addDefaultSecurityTransportType(builder, superSettings); + + if (randomBoolean()) { + builder.put(SecurityField.USER_SETTING.getKey(), + transportClientUsername() + ":" + new String(transportClientPassword().getChars())); + } else { + builder.put(ThreadContext.PREFIX + ".Authorization", basicAuthHeaderValue(transportClientUsername(), + transportClientPassword())); + } + return builder.build(); + } + + protected void addDefaultSecurityTransportType(Settings.Builder builder, Settings settings) { + if (NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) == false) { + builder.put(NetworkModule.TRANSPORT_TYPE_SETTING.getKey(), SecurityField.NAME4); + } + } + + + @Override + public Collection> nodePlugins() { + return Arrays.asList(LocalStateSecurity.class, Netty4Plugin.class, ReindexPlugin.class, CommonAnalysisPlugin.class); + } + + @Override + public Collection> transportClientPlugins() { + return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class, ReindexPlugin.class, CommonAnalysisPlugin.class); + } + + protected String configUsers() { + return CONFIG_STANDARD_USER; + } + + protected String configUsersRoles() { + return CONFIG_STANDARD_USER_ROLES; + } + + protected String configRoles() { + return CONFIG_ROLE_ALLOW_ALL; + } + + protected String nodeClientUsername() { + return TEST_USER_NAME; + } + + protected SecureString nodeClientPassword() { + return new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()); + } + + protected String transportClientUsername() { + return DEFAULT_TRANSPORT_CLIENT_USER_NAME; + } + + protected SecureString transportClientPassword() { + return new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()); + } + + private void addNodeSSLSettings(Settings.Builder builder) { + if (sslEnabled) { + if (usePEM) { + addSSLSettingsForPEMFiles(builder, "", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem", "testnode", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", + Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/active-directory-ca.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/openldap.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"), + sslEnabled, hostnameVerificationEnabled, false); + + } else { + addSSLSettingsForStore(builder, "", "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks", + "testnode", sslEnabled, hostnameVerificationEnabled, false); + } + } else if (randomBoolean()) { + builder.put(XPackSettings.TRANSPORT_SSL_ENABLED.getKey(), false); + } + } + + public void addClientSSLSettings(Settings.Builder builder, String prefix) { + if (usePEM) { + addSSLSettingsForPEMFiles(builder, prefix, + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.pem", "testclient", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt", + Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt"), + sslEnabled, hostnameVerificationEnabled, true); + } else { + addSSLSettingsForStore(builder, prefix, "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks", + "testclient", sslEnabled, hostnameVerificationEnabled, true); + } + } + + /** + * Returns the configuration settings given the location of a certificate and its password + * + * @param resourcePathToStore the location of the keystore or truststore + * @param password the password + */ + public static void addSSLSettingsForStore(Settings.Builder builder, String resourcePathToStore, String password) { + addSSLSettingsForStore(builder, "", resourcePathToStore, password, true, true, true); + } + + private static void addSSLSettingsForStore(Settings.Builder builder, String prefix, String resourcePathToStore, String password, + boolean sslEnabled, boolean hostnameVerificationEnabled, + boolean transportClient) { + Path store = resolveResourcePath(resourcePathToStore); + + if (transportClient == false) { + builder.put(prefix + "xpack.security.http.ssl.enabled", false); + } + builder.put(XPackSettings.TRANSPORT_SSL_ENABLED.getKey(), sslEnabled); + + builder.put(prefix + "xpack.ssl.verification_mode", hostnameVerificationEnabled ? "full" : "certificate"); + builder.put(prefix + "xpack.ssl.keystore.path", store); + if (transportClient) { + // continue using insecure settings for clients until we figure out what to do there... + builder.put(prefix + "xpack.ssl.keystore.password", password); + } else { + addSecureSettings(builder, secureSettings -> + secureSettings.setString(prefix + "xpack.ssl.keystore.secure_password", password)); + } + + if (randomBoolean()) { + builder.put(prefix + "xpack.ssl.truststore.path", store); + if (transportClient) { + // continue using insecure settings for clients until we figure out what to do there... + builder.put(prefix + "xpack.ssl.truststore.password", password); + } else { + addSecureSettings(builder, secureSettings -> + secureSettings.setString(prefix + "xpack.ssl.truststore.secure_password", password)); + } + } + } + + private static void addSSLSettingsForPEMFiles(Settings.Builder builder, String prefix, String keyPath, String password, + String certificatePath, List trustedCertificates, boolean sslEnabled, + boolean hostnameVerificationEnabled, boolean transportClient) { + + if (transportClient == false) { + builder.put(prefix + "xpack.security.http.ssl.enabled", false); + } + builder.put(XPackSettings.TRANSPORT_SSL_ENABLED.getKey(), sslEnabled); + + builder.put(prefix + "xpack.ssl.verification_mode", hostnameVerificationEnabled ? "full" : "certificate"); + builder.put(prefix + "xpack.ssl.key", resolveResourcePath(keyPath)) + .put(prefix + "xpack.ssl.certificate", resolveResourcePath(certificatePath)); + if (transportClient) { + // continue using insecure settings for clients until we figure out what to do there... + builder.put(prefix + "xpack.ssl.key_passphrase", password); + } else { + addSecureSettings(builder, secureSettings -> + secureSettings.setString(prefix + "xpack.ssl.secure_key_passphrase", password)); + } + + if (trustedCertificates.isEmpty() == false) { + builder.put(prefix + "xpack.ssl.certificate_authorities", + Strings.arrayToCommaDelimitedString(resolvePathsToString(trustedCertificates))); + } + } + + public static void addSecureSettings(Settings.Builder builder, Consumer settingsSetter) { + SecureSettings secureSettings = builder.getSecureSettings(); + if (secureSettings instanceof MockSecureSettings) { + settingsSetter.accept((MockSecureSettings) secureSettings); + } else if (secureSettings == null) { + MockSecureSettings mockSecureSettings = new MockSecureSettings(); + settingsSetter.accept(mockSecureSettings); + builder.setSecureSettings(mockSecureSettings); + } else { + throw new AssertionError("Test settings builder must contain MockSecureSettings, " + + "but has [" + secureSettings.getClass().getName() + "]"); + } + } + + private static String[] resolvePathsToString(List resourcePaths) { + List resolvedPaths = new ArrayList<>(resourcePaths.size()); + for (String resource : resourcePaths) { + resolvedPaths.add(resolveResourcePath(resource).toString()); + } + return resolvedPaths.toArray(new String[resolvedPaths.size()]); + } + + private static Path resolveResourcePath(String resourcePathToStore) { + try { + Path path = createTempFile(); + try (InputStream resourceInput = SecuritySettingsSource.class.getResourceAsStream(resourcePathToStore)) { + Files.copy(resourceInput, path, StandardCopyOption.REPLACE_EXISTING); + } + return path; + } catch (IOException e) { + throw new ElasticsearchException("Failed to resolve resource (Path=[{}])", e, resourcePathToStore); + } + } + + public boolean isSslEnabled() { + return sslEnabled; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java new file mode 100644 index 0000000000000..1ee654c0baffc --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java @@ -0,0 +1,308 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test; + +import io.netty.util.ThreadDeathWatcher; +import io.netty.util.concurrent.GlobalEventExecutor; +import org.apache.http.HttpHost; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientBuilder; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginInfo; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.ExternalResource; + +import java.net.InetSocketAddress; +import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.core.IsCollectionContaining.hasItem; + +/** + * A test that starts a single node with security enabled. This test case allows for customization + * of users and roles that the cluster starts with. If an integration test is needed to test but + * multiple nodes are not needed, then this class should be favored over + * {@link SecurityIntegTestCase} due to simplicity and improved speed from not needing to start + * multiple nodes and wait for the cluster to form. + */ +public abstract class SecuritySingleNodeTestCase extends ESSingleNodeTestCase { + + private static SecuritySettingsSource SECURITY_DEFAULT_SETTINGS = null; + private static CustomSecuritySettingsSource customSecuritySettingsSource = null; + private static RestClient restClient = null; + private static SecureString BOOTSTRAP_PASSWORD = null; + + @BeforeClass + public static void generateBootstrapPassword() { + BOOTSTRAP_PASSWORD = TEST_PASSWORD_SECURE_STRING.clone(); + } + + @BeforeClass + public static void initDefaultSettings() { + if (SECURITY_DEFAULT_SETTINGS == null) { + SECURITY_DEFAULT_SETTINGS = + new SecuritySettingsSource(1, randomBoolean(), createTempDir(), ESIntegTestCase.Scope.SUITE); + } + } + + /** + * Set the static default settings to null to prevent a memory leak. The test framework also checks for memory leaks + * and computes the size, this can cause issues when running with the security manager as it tries to do reflection + * into protected sun packages. + */ + @AfterClass + public static void destroyDefaultSettings() { + SECURITY_DEFAULT_SETTINGS = null; + customSecuritySettingsSource = null; + if (BOOTSTRAP_PASSWORD != null) { + BOOTSTRAP_PASSWORD.close(); + BOOTSTRAP_PASSWORD = null; + } + tearDownRestClient(); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + if (resetNodeAfterTest()) { + tearDownRestClient(); + } + } + + private static void tearDownRestClient() { + if (restClient != null) { + IOUtils.closeWhileHandlingException(restClient); + restClient = null; + } + + // Wait for the network threads to finish otherwise there is the possibility that one of + // the threads lingers and trips the thread leak detector + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (IllegalStateException e) { + if (e.getMessage().equals("thread was not started") == false) { + throw e; + } + // ignore since the thread was never started + } + + try { + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + @Rule + //Rules are the only way to have something run before the before (final) method inherited from ESSingleNodeTestCase + public ExternalResource externalResource = new ExternalResource() { + @Override + protected void before() { + if (customSecuritySettingsSource == null) { + customSecuritySettingsSource = + new CustomSecuritySettingsSource(transportSSLEnabled(), createTempDir(), ESIntegTestCase.Scope.SUITE); + } + } + }; + + @Before + //before methods from the superclass are run before this, which means that the current cluster is ready to go + public void assertXPackIsInstalled() { + doAssertXPackIsInstalled(); + } + + private void doAssertXPackIsInstalled() { + NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().clear().setPlugins(true).get(); + for (NodeInfo nodeInfo : nodeInfos.getNodes()) { + // TODO: disable this assertion for now, due to random runs with mock plugins. perhaps run without mock plugins? + // assertThat(nodeInfo.getPlugins().getInfos(), hasSize(2)); + Collection pluginNames = + nodeInfo.getPlugins().getPluginInfos().stream().map(PluginInfo::getClassname).collect(Collectors.toList()); + assertThat("plugin [" + LocalStateSecurity.class.getName() + "] not found in [" + pluginNames + "]", pluginNames, + hasItem(LocalStateSecurity.class.getName())); + } + } + + @Override + protected Settings nodeSettings() { + Settings.Builder builder = Settings.builder().put(super.nodeSettings()); + Settings customSettings = customSecuritySettingsSource.nodeSettings(0); + builder.put(customSettings, false); // handle secure settings separately + builder.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); + builder.put("transport.type", "security4"); + builder.put("path.home", customSecuritySettingsSource.nodePath(0)); + Settings.Builder customBuilder = Settings.builder().put(customSettings); + if (customBuilder.getSecureSettings() != null) { + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> + secureSettings.merge((MockSecureSettings) customBuilder.getSecureSettings())); + } + if (builder.getSecureSettings() == null) { + builder.setSecureSettings(new MockSecureSettings()); + } + ((MockSecureSettings) builder.getSecureSettings()).setString("bootstrap.password", BOOTSTRAP_PASSWORD.toString()); + return builder.build(); + } + + protected Settings transportClientSettings() { + return Settings.builder() + .put(customSecuritySettingsSource.transportClientSettings()) + .build(); + } + + @Override + protected Collection> getPlugins() { + return customSecuritySettingsSource.nodePlugins(); + } + + /** + * Allows to override the users config file + */ + protected String configUsers() { + return SECURITY_DEFAULT_SETTINGS.configUsers(); + } + + /** + * Allows to override the users_roles config file + */ + protected String configUsersRoles() { + return SECURITY_DEFAULT_SETTINGS.configUsersRoles(); + } + + /** + * Allows to override the roles config file + */ + protected String configRoles() { + return SECURITY_DEFAULT_SETTINGS.configRoles(); + } + + /** + * Allows to override the node client username + */ + protected String nodeClientUsername() { + return SECURITY_DEFAULT_SETTINGS.nodeClientUsername(); + } + + /** + * Allows to override the node client password (used while sending requests to the test node) + */ + protected SecureString nodeClientPassword() { + return SECURITY_DEFAULT_SETTINGS.nodeClientPassword(); + } + + /** + * Allows to control whether ssl key information is auto generated or not on the transport layer + */ + protected boolean transportSSLEnabled() { + return randomBoolean(); + } + + private class CustomSecuritySettingsSource extends SecuritySettingsSource { + + private CustomSecuritySettingsSource(boolean sslEnabled, Path configDir, ESIntegTestCase.Scope scope) { + super(1, sslEnabled, configDir, scope); + } + + @Override + protected String configUsers() { + return SecuritySingleNodeTestCase.this.configUsers(); + } + + @Override + protected String configUsersRoles() { + return SecuritySingleNodeTestCase.this.configUsersRoles(); + } + + @Override + protected String configRoles() { + return SecuritySingleNodeTestCase.this.configRoles(); + } + + @Override + protected String nodeClientUsername() { + return SecuritySingleNodeTestCase.this.nodeClientUsername(); + } + + @Override + protected SecureString nodeClientPassword() { + return SecuritySingleNodeTestCase.this.nodeClientPassword(); + } + } + + @Override + public Client client() { + Map headers = Collections.singletonMap("Authorization", + basicAuthHeaderValue(nodeClientUsername(), nodeClientPassword())); + // we need to wrap node clients because we do not specify a user for nodes and all requests will use the system + // user. This is ok for internal n2n stuff but the test framework does other things like wiping indices, repositories, etc + // that the system user cannot do. so we wrap the node client with a user that can do these things since the client() calls + // are all using a node client + return super.client().filterWithHeader(headers); + } + + protected boolean isTransportSSLEnabled() { + return customSecuritySettingsSource.isSslEnabled(); + } + + /** + * Returns an instance of {@link RestClient} pointing to the current node. + * Creates a new client if the method is invoked for the first time in the context of the current test scope. + * The returned client gets automatically closed when needed, it shouldn't be closed as part of tests otherwise + * it cannot be reused by other tests anymore. + */ + protected RestClient getRestClient() { + return getRestClient(client()); + } + + protected RestClient createRestClient(RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, String protocol) { + return createRestClient(client(), httpClientConfigCallback, protocol); + } + + private static synchronized RestClient getRestClient(Client client) { + if (restClient == null) { + restClient = createRestClient(client, null, "http"); + } + return restClient; + } + + private static RestClient createRestClient(Client client, RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, + String protocol) { + NodesInfoResponse nodesInfoResponse = client.admin().cluster().prepareNodesInfo().get(); + assertFalse(nodesInfoResponse.hasFailures()); + assertEquals(nodesInfoResponse.getNodes().size(), 1); + NodeInfo node = nodesInfoResponse.getNodes().get(0); + assertNotNull(node.getHttp()); + TransportAddress publishAddress = node.getHttp().address().publishAddress(); + InetSocketAddress address = publishAddress.address(); + final HttpHost host = new HttpHost(NetworkAddress.format(address.getAddress()), address.getPort(), protocol); + RestClientBuilder builder = RestClient.builder(host); + if (httpClientConfigCallback != null) { + builder.setHttpClientConfigCallback(httpClientConfigCallback); + } + return builder.build(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityTestsUtils.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityTestsUtils.java new file mode 100644 index 0000000000000..36ec016170afe --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityTestsUtils.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test; + +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.rest.RestStatus; +import org.hamcrest.Matcher; + +import static org.apache.lucene.util.LuceneTestCase.expectThrows; +import static org.elasticsearch.xpack.core.security.test.SecurityAssertions.assertContainsWWWAuthenticateHeader; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.either; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertThat; + +public class SecurityTestsUtils { + + private SecurityTestsUtils() { + } + + public static void assertAuthenticationException(ElasticsearchSecurityException e) { + assertThat(e.status(), is(RestStatus.UNAUTHORIZED)); + // making sure it's not a license expired exception + assertThat(e.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), nullValue()); + assertContainsWWWAuthenticateHeader(e); + } + + public static void assertAuthenticationException(ElasticsearchSecurityException e, Matcher messageMatcher) { + assertAuthenticationException(e); + assertThat(e.getMessage(), messageMatcher); + } + + public static void assertThrowsAuthorizationException(LuceneTestCase.ThrowingRunnable throwingRunnable, String action, String user) { + assertThrowsAuthorizationException(throwingRunnable, + containsString("[" + action + "] is unauthorized for user [" + user + "]")); + } + + public static void assertThrowsAuthorizationExceptionRunAs(LuceneTestCase.ThrowingRunnable throwingRunnable, + String action, String user, String runAs) { + assertThrowsAuthorizationException(throwingRunnable, + containsString("[" + action + "] is unauthorized for user [" + user + "] run as [" + runAs + "]")); + } + + public static void assertThrowsAuthorizationExceptionDefaultUsers(LuceneTestCase.ThrowingRunnable throwingRunnable, String action) { + ElasticsearchSecurityException exception = expectThrows(ElasticsearchSecurityException.class, throwingRunnable); + assertAuthorizationExceptionDefaultUsers(exception, action); + } + + public static void assertAuthorizationExceptionDefaultUsers(Throwable throwable, String action) { + assertAuthorizationException(throwable, either(containsString("[" + action + "] is unauthorized for user [" + + SecuritySettingsSource.TEST_USER_NAME + "]")).or(containsString("[" + action + "] is unauthorized for user [" + + SecuritySettingsSource.DEFAULT_TRANSPORT_CLIENT_USER_NAME + "]"))); + } + + public static void assertThrowsAuthorizationException(LuceneTestCase.ThrowingRunnable throwingRunnable, + Matcher messageMatcher) { + ElasticsearchSecurityException securityException = expectThrows(ElasticsearchSecurityException.class, throwingRunnable); + assertAuthorizationException(securityException, messageMatcher); + } + + private static void assertAuthorizationException(Throwable throwable, Matcher messageMatcher) { + assertThat(throwable, instanceOf(ElasticsearchSecurityException.class)); + ElasticsearchSecurityException securityException = (ElasticsearchSecurityException) throwable; + assertThat(securityException.status(), is(RestStatus.FORBIDDEN)); + assertThat(throwable.getMessage(), messageMatcher); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java new file mode 100644 index 0000000000000..6447be7e69cbe --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Guice; +import org.elasticsearch.common.inject.Injector; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.hamcrest.Matcher; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.TrustManagerFactory; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.is; + +public class SettingsFilterTests extends ESTestCase { + + private Settings.Builder configuredSettingsBuilder = Settings.builder(); + private Map settingsMatcherMap = new HashMap<>(); + private MockSecureSettings mockSecureSettings = new MockSecureSettings(); + + public void testFiltering() throws Exception { + final boolean useLegacyLdapBindPassword = randomBoolean(); + + configureUnfilteredSetting("xpack.security.authc.realms.file.type", "file"); + + // ldap realm filtering + configureUnfilteredSetting("xpack.security.authc.realms.ldap1.type", "ldap"); + configureUnfilteredSetting("xpack.security.authc.realms.ldap1.enabled", "false"); + configureUnfilteredSetting("xpack.security.authc.realms.ldap1.url", "ldap://host.domain"); + configureFilteredSetting("xpack.security.authc.realms.ldap1.hostname_verification", Boolean.toString(randomBoolean())); + configureFilteredSetting("xpack.security.authc.realms.ldap1.bind_dn", randomAlphaOfLength(5)); + if (useLegacyLdapBindPassword) { + configureFilteredSetting("xpack.security.authc.realms.ldap1.bind_password", randomAlphaOfLength(5)); + } else { + configureSecureSetting("xpack.security.authc.realms.ldap1.secure_bind_password", randomAlphaOfLengthBetween(3, 8)); + } + + // active directory filtering + configureUnfilteredSetting("xpack.security.authc.realms.ad1.type", "active_directory"); + configureUnfilteredSetting("xpack.security.authc.realms.ad1.enabled", "false"); + configureUnfilteredSetting("xpack.security.authc.realms.ad1.url", "ldap://host.domain"); + configureFilteredSetting("xpack.security.authc.realms.ad1.hostname_verification", Boolean.toString(randomBoolean())); + + // pki filtering + configureUnfilteredSetting("xpack.security.authc.realms.pki1.type", "pki"); + configureUnfilteredSetting("xpack.security.authc.realms.pki1.order", "0"); + configureFilteredSetting("xpack.security.authc.realms.pki1.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks").toString()); + configureSecureSetting("xpack.security.authc.realms.pki1.truststore.secure_password", "truststore-testnode-only"); + configureFilteredSetting("xpack.security.authc.realms.pki1.truststore.algorithm", "SunX509"); + + configureFilteredSetting("xpack.ssl.keystore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks").toString()); + configureFilteredSetting("xpack.ssl.cipher_suites", + Strings.arrayToCommaDelimitedString(XPackSettings.DEFAULT_CIPHERS.toArray())); + configureFilteredSetting("xpack.ssl.supported_protocols", randomFrom("TLSv1", "TLSv1.1", "TLSv1.2")); + configureSecureSetting("xpack.ssl.keystore.secure_password", "testnode"); + configureFilteredSetting("xpack.ssl.keystore.algorithm", KeyManagerFactory.getDefaultAlgorithm()); + configureSecureSetting("xpack.ssl.keystore.secure_key_password", "testnode"); + configureSecureSetting("xpack.ssl.truststore.secure_password", randomAlphaOfLength(5)); + configureFilteredSetting("xpack.ssl.truststore.algorithm", TrustManagerFactory.getDefaultAlgorithm()); + + // client profile + configureUnfilteredSetting("transport.profiles.client.port", "9500-9600"); + configureFilteredSetting("transport.profiles.client.xpack.security.ssl.keystore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks").toString()); + configureFilteredSetting("transport.profiles.client.xpack.security.ssl.cipher_suites", + Strings.arrayToCommaDelimitedString(XPackSettings.DEFAULT_CIPHERS.toArray())); + configureFilteredSetting("transport.profiles.client.xpack.security.ssl.supported_protocols", + randomFrom("TLSv1", "TLSv1.1", "TLSv1.2")); + configureSecureSetting("transport.profiles.client.xpack.security.ssl.keystore.secure_password", "testnode"); + configureFilteredSetting("transport.profiles.client.xpack.security.ssl.keystore.algorithm", + KeyManagerFactory.getDefaultAlgorithm()); + configureSecureSetting("transport.profiles.client.xpack.security.ssl.keystore.secure_key_password", "testnode"); + configureSecureSetting("transport.profiles.client.xpack.security.ssl.truststore.secure_password", randomAlphaOfLength(5)); + configureFilteredSetting("transport.profiles.client.xpack.security.ssl.truststore.algorithm", + TrustManagerFactory.getDefaultAlgorithm()); + + // custom settings, potentially added by a plugin + configureFilteredSetting("foo.bar", "_secret"); + configureFilteredSetting("foo.baz", "_secret"); + configureFilteredSetting("bar.baz", "_secret"); + configureUnfilteredSetting("baz.foo", "_not_a_secret"); + configureFilteredSetting("xpack.security.hide_settings", "foo.*,bar.baz"); + + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(configuredSettingsBuilder.build()) + .setSecureSettings(mockSecureSettings) + .build(); + + LocalStateSecurity securityPlugin = new LocalStateSecurity(settings, null); + + List> settingList = new ArrayList<>(); + settingList.add(Setting.simpleString("foo.bar", Setting.Property.NodeScope)); + settingList.add(Setting.simpleString("foo.baz", Setting.Property.NodeScope)); + settingList.add(Setting.simpleString("bar.baz", Setting.Property.NodeScope)); + settingList.add(Setting.simpleString("baz.foo", Setting.Property.NodeScope)); + settingList.addAll(securityPlugin.getSettings()); + List settingsFilterList = new ArrayList<>(); + settingsFilterList.addAll(securityPlugin.getSettingsFilter()); + // custom settings, potentially added by a plugin + SettingsModule settingsModule = new SettingsModule(settings, settingList, settingsFilterList); + + Injector injector = Guice.createInjector(settingsModule); + SettingsFilter settingsFilter = injector.getInstance(SettingsFilter.class); + + Settings filteredSettings = settingsFilter.filter(settings); + for (Map.Entry entry : settingsMatcherMap.entrySet()) { + assertThat(filteredSettings.get(entry.getKey()), entry.getValue()); + } + + if (useLegacyLdapBindPassword) { + assertSettingDeprecationsAndWarnings(new Setting[]{PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD}); + } + } + + private void configureUnfilteredSetting(String settingName, String value) { + configureSetting(settingName, value, is(value)); + } + + private void configureFilteredSetting(String settingName, String value) { + configureSetting(settingName, value, is(nullValue())); + } + + private void configureSecureSetting(String settingName, String value) { + mockSecureSettings.setString(settingName, value); + settingsMatcherMap.put(settingName, is(nullValue())); + } + + private void configureSetting(String settingName, String value, Matcher expectedMatcher) { + configuredSettingsBuilder.put(settingName, value); + settingsMatcherMap.put(settingName, expectedMatcher); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java new file mode 100644 index 0000000000000..675c438b87fa2 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.transport; + +import java.util.Map; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor; + +// this class sits in org.elasticsearch.transport so that TransportService.requestHandlers is visible +public class SecurityServerTransportServiceTests extends SecurityIntegTestCase { + @Override + protected Settings transportClientSettings() { + return Settings.builder() + .put(super.transportClientSettings()) + .put(XPackSettings.SECURITY_ENABLED.getKey(), true) + .build(); + } + + public void testSecurityServerTransportServiceWrapsAllHandlers() { + for (TransportService transportService : internalCluster().getInstances(TransportService.class)) { + for (Map.Entry entry : transportService.requestHandlers.entrySet()) { + RequestHandlerRegistry handler = entry.getValue(); + assertEquals( + "handler not wrapped by " + SecurityServerTransportInterceptor.ProfileSecuredRequestHandler.class + + "; do all the handler registration methods have overrides?", + handler.toString(), + "ProfileSecuredRequestHandler{action='" + handler.getAction() + "', executorName='" + handler.getExecutor() + + "', forceExecution=" + handler.isForceExecution() + "}"); + } + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java new file mode 100644 index 0000000000000..686c4ae12f1fc --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.monitoring.Monitoring; + +import java.nio.file.Path; + +public class LocalStateSecurity extends LocalStateCompositeXPackPlugin { + + public LocalStateSecurity(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + LocalStateSecurity thisVar = this; + plugins.add(new Monitoring(settings) { + @Override + protected SSLService getSslService() { + return thisVar.getSslService(); + } + + @Override + protected LicenseService getLicenseService() { + return thisVar.getLicenseService(); + } + + @Override + protected XPackLicenseState getLicenseState() { + return thisVar.getLicenseState(); + } + }); + plugins.add(new Security(settings, configPath) { + @Override + protected SSLService getSslService() { return thisVar.getSslService(); } + + @Override + protected XPackLicenseState getLicenseState() { return thisVar.getLicenseState(); } + }); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheckTests.java new file mode 100644 index 0000000000000..5610da6f75c6b --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheckTests.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.hamcrest.Matchers; + +public class PkiRealmBootstrapCheckTests extends ESTestCase { + + public void testPkiRealmBootstrapDefault() throws Exception { + final Settings settings = Settings.EMPTY; + final Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + assertFalse(runCheck(settings, env).isFailure()); + } + + public void testBootstrapCheckWithPkiRealm() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.authc.realms.test_pki.type", PkiRealmSettings.TYPE) + .put("path.home", createTempDir()) + .build(); + Environment env = TestEnvironment.newEnvironment(settings); + assertTrue(runCheck(settings, env).isFailure()); + + // enable transport tls + settings = Settings.builder().put(settings) + .put("xpack.security.transport.ssl.enabled", true) + .build(); + assertFalse(runCheck(settings, env).isFailure()); + + // disable client auth default + settings = Settings.builder().put(settings) + .put("xpack.ssl.client_authentication", "none") + .build(); + env = TestEnvironment.newEnvironment(settings); + assertTrue(runCheck(settings, env).isFailure()); + + // enable ssl for http + settings = Settings.builder().put(settings) + .put("xpack.security.http.ssl.enabled", true) + .build(); + env = TestEnvironment.newEnvironment(settings); + assertTrue(runCheck(settings, env).isFailure()); + + // enable client auth for http + settings = Settings.builder().put(settings) + .put("xpack.security.http.ssl.client_authentication", randomFrom("required", "optional")) + .build(); + env = TestEnvironment.newEnvironment(settings); + assertFalse(runCheck(settings, env).isFailure()); + + // disable http ssl + settings = Settings.builder().put(settings) + .put("xpack.security.http.ssl.enabled", false) + .build(); + env = TestEnvironment.newEnvironment(settings); + assertTrue(runCheck(settings, env).isFailure()); + + // set transport client auth + settings = Settings.builder().put(settings) + .put("xpack.security.transport.client_authentication", randomFrom("required", "optional")) + .build(); + env = TestEnvironment.newEnvironment(settings); + assertTrue(runCheck(settings, env).isFailure()); + + // test with transport profile + settings = Settings.builder().put(settings) + .put("xpack.security.transport.client_authentication", "none") + .put("transport.profiles.foo.xpack.security.ssl.client_authentication", randomFrom("required", "optional")) + .build(); + env = TestEnvironment.newEnvironment(settings); + assertFalse(runCheck(settings, env).isFailure()); + } + + private BootstrapCheck.BootstrapCheckResult runCheck(Settings settings, Environment env) throws Exception { + return new PkiRealmBootstrapCheck(settings, new SSLService(settings, env)).check(new BootstrapContext(settings, null)); + } + + public void testBootstrapCheckWithDisabledRealm() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.authc.realms.test_pki.type", PkiRealmSettings.TYPE) + .put("xpack.security.authc.realms.test_pki.enabled", false) + .put("xpack.ssl.client_authentication", "none") + .put("path.home", createTempDir()) + .build(); + Environment env = TestEnvironment.newEnvironment(settings); + assertFalse(runCheck(settings, env).isFailure()); + } + + public void testBootstrapCheckWithClosedSecuredSetting() throws Exception { + final boolean expectFail = randomBoolean(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.security.http.ssl.keystore.secure_password", "testnode"); + Settings settings = Settings.builder() + .put("xpack.security.authc.realms.test_pki.type", PkiRealmSettings.TYPE) + .put("xpack.security.http.ssl.enabled", true) + .put("xpack.security.http.ssl.client_authentication", expectFail ? "none" : "optional") + .put("xpack.security.http.ssl.keystore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) + .put("path.home", createTempDir()) + .setSecureSettings(secureSettings) + .build(); + + Environment env = TestEnvironment.newEnvironment(settings); + final PkiRealmBootstrapCheck check = new PkiRealmBootstrapCheck(settings, new SSLService(settings, env)); + secureSettings.close(); + assertThat(check.check(new BootstrapContext(settings, null)).isFailure(), Matchers.equalTo(expectFail)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java new file mode 100644 index 0000000000000..7ab26b0c33fef --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.ScrollHelper; +import org.mockito.stubbing.Answer; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + + +public class ScrollHelperIntegTests extends ESSingleNodeTestCase { + + public void testFetchAllEntities() throws ExecutionException, InterruptedException { + Client client = client(); + int numDocs = randomIntBetween(5, 30); + for (int i = 0; i < numDocs; i++) { + client.prepareIndex("foo", "bar").setSource(Collections.singletonMap("number", i)).get(); + } + client.admin().indices().prepareRefresh("foo").get(); + SearchRequest request = client.prepareSearch() + .setScroll(TimeValue.timeValueHours(10L)) + .setQuery(QueryBuilders.matchAllQuery()) + .setSize(randomIntBetween(1, 10)) + .setFetchSource(true) + .request(); + request.indicesOptions().ignoreUnavailable(); + PlainActionFuture> future = new PlainActionFuture<>(); + ScrollHelper.fetchAllByEntity(client(), request, future, + (hit) -> Integer.parseInt(hit.getSourceAsMap().get("number").toString())); + Collection integers = future.actionGet(); + ArrayList list = new ArrayList<>(integers); + CollectionUtil.timSort(list); + assertEquals(numDocs, list.size()); + for (int i = 0; i < numDocs; i++) { + assertEquals(list.get(i).intValue(), i); + } + } + + /** + * Tests that + * {@link ScrollHelper#fetchAllByEntity(Client, SearchRequest, ActionListener, Function)} + * defends against scrolls broken in such a way that the remote Elasticsearch returns infinite results. While Elasticsearch + * shouldn't do this it has in the past and it is very when it does. It takes out the whole node. So + * this makes sure we defend against it properly. + */ + public void testFetchAllByEntityWithBrokenScroll() { + Client client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + SearchRequest request = new SearchRequest(); + + String scrollId = randomAlphaOfLength(5); + SearchHit[] hits = new SearchHit[] {new SearchHit(1), new SearchHit(2)}; + InternalSearchResponse internalResponse = new InternalSearchResponse(new SearchHits(hits, 3, 1), null, null, null, false, false, 1); + SearchResponse response = new SearchResponse(internalResponse, scrollId, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY); + + Answer returnResponse = invocation -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(response); + return null; + }; + doAnswer(returnResponse).when(client).search(eq(request), anyObject()); + /* The line below simulates the evil cluster. A working cluster would return + * a response with 0 hits. Our simulated broken cluster returns the same + * response over and over again. */ + doAnswer(returnResponse).when(client).searchScroll(anyObject(), anyObject()); + + AtomicReference failure = new AtomicReference<>(); + ScrollHelper.fetchAllByEntity(client, request, new ActionListener>() { + @Override + public void onResponse(Collection response) { + fail("This shouldn't succeed."); + } + + @Override + public void onFailure(Exception e) { + failure.set(e); + } + }, Function.identity()); + + assertNotNull("onFailure wasn't called", failure.get()); + assertEquals("scrolling returned more hits [4] than expected [3] so bailing out to prevent unbounded memory consumption.", + failure.get().getMessage()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java new file mode 100644 index 0000000000000..e3b1cd31246a2 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.Version; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.junit.Before; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicReference; + +public class SecurityContextTests extends ESTestCase { + + private Settings settings; + private ThreadContext threadContext; + private SecurityContext securityContext; + + @Before + public void buildSecurityContext() throws IOException { + settings = Settings.builder() + .put("path.home", createTempDir()) + .build(); + threadContext = new ThreadContext(settings); + securityContext = new SecurityContext(settings, threadContext); + } + + public void testGetAuthenticationAndUserInEmptyContext() throws IOException { + assertNull(securityContext.getAuthentication()); + assertNull(securityContext.getUser()); + } + + public void testGetAuthenticationAndUser() throws IOException { + final User user = new User("test"); + final Authentication authentication = new Authentication(user, new RealmRef("ldap", "foo", "node1"), null); + authentication.writeToContext(threadContext); + + assertEquals(authentication, securityContext.getAuthentication()); + assertEquals(user, securityContext.getUser()); + } + + public void testSetUser() { + final User user = new User("test"); + assertNull(securityContext.getAuthentication()); + assertNull(securityContext.getUser()); + securityContext.setUser(user, Version.CURRENT); + assertEquals(user, securityContext.getUser()); + + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> securityContext.setUser(randomFrom(user, SystemUser.INSTANCE), Version.CURRENT)); + assertEquals("authentication is already present in the context", e.getMessage()); + } + + public void testExecuteAsUser() throws IOException { + final User original; + if (randomBoolean()) { + original = new User("test"); + final Authentication authentication = new Authentication(original, new RealmRef("ldap", "foo", "node1"), null); + authentication.writeToContext(threadContext); + } else { + original = null; + } + + final User executionUser = new User("executor"); + final AtomicReference contextAtomicReference = new AtomicReference<>(); + securityContext.executeAsUser(executionUser, (originalCtx) -> { + assertEquals(executionUser, securityContext.getUser()); + contextAtomicReference.set(originalCtx); + }, Version.CURRENT); + + final User userAfterExecution = securityContext.getUser(); + assertEquals(original, userAfterExecution); + StoredContext originalContext = contextAtomicReference.get(); + assertNotNull(originalContext); + originalContext.restore(); + assertEquals(original, securityContext.getUser()); + } + + public void testExecuteAfterRewritingAuthentication() throws IOException { + User user = new User("test", null, new User("authUser")); + RealmRef authBy = new RealmRef("ldap", "foo", "node1"); + final Authentication original = new Authentication(user, authBy, authBy); + original.writeToContext(threadContext); + + final AtomicReference contextAtomicReference = new AtomicReference<>(); + securityContext.executeAfterRewritingAuthentication(originalCtx -> { + Authentication authentication = securityContext.getAuthentication(); + assertEquals(original.getUser(), authentication.getUser()); + assertEquals(original.getAuthenticatedBy(), authentication.getAuthenticatedBy()); + assertEquals(original.getLookedUpBy(), authentication.getLookedUpBy()); + assertEquals(VersionUtils.getPreviousVersion(), authentication.getVersion()); + contextAtomicReference.set(originalCtx); + }, VersionUtils.getPreviousVersion()); + + final Authentication authAfterExecution = securityContext.getAuthentication(); + assertEquals(original, authAfterExecution); + StoredContext originalContext = contextAtomicReference.get(); + assertNotNull(originalContext); + originalContext.restore(); + assertEquals(original, securityContext.getAuthentication()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java new file mode 100644 index 0000000000000..c169d62c6b17b --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java @@ -0,0 +1,226 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.security.authc.Realms; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; +import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; +import org.elasticsearch.xpack.security.transport.filter.IPFilter; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.emptyIterable; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.core.Is.is; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SecurityFeatureSetTests extends ESTestCase { + + private Settings settings; + private XPackLicenseState licenseState; + private Realms realms; + private IPFilter ipFilter; + private CompositeRolesStore rolesStore; + private NativeRoleMappingStore roleMappingStore; + + @Before + public void init() throws Exception { + settings = Settings.builder().put("path.home", createTempDir()).build(); + licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + realms = mock(Realms.class); + ipFilter = mock(IPFilter.class); + rolesStore = mock(CompositeRolesStore.class); + roleMappingStore = mock(NativeRoleMappingStore.class); + } + + public void testAvailable() { + SecurityFeatureSet featureSet = new SecurityFeatureSet(settings, licenseState, realms, + rolesStore, roleMappingStore, ipFilter); + when(licenseState.isSecurityAvailable()).thenReturn(true); + assertThat(featureSet.available(), is(true)); + + when(licenseState.isSecurityAvailable()).thenReturn(false); + assertThat(featureSet.available(), is(false)); + } + + public void testEnabled() { + SecurityFeatureSet featureSet = new SecurityFeatureSet(settings, licenseState, realms, + rolesStore, roleMappingStore, ipFilter); + assertThat(featureSet.enabled(), is(true)); + + when(licenseState.isSecurityEnabled()).thenReturn(false); + featureSet = new SecurityFeatureSet(settings, licenseState, realms, + rolesStore, roleMappingStore, ipFilter); + assertThat(featureSet.enabled(), is(false)); + } + + public void testUsage() throws Exception { + final boolean authcAuthzAvailable = randomBoolean(); + when(licenseState.isSecurityAvailable()).thenReturn(authcAuthzAvailable); + + Settings.Builder settings = Settings.builder().put(this.settings); + + boolean enabled = randomBoolean(); + when(licenseState.isSecurityEnabled()).thenReturn(enabled); + + final boolean httpSSLEnabled = randomBoolean(); + settings.put("xpack.security.http.ssl.enabled", httpSSLEnabled); + final boolean transportSSLEnabled = randomBoolean(); + settings.put("xpack.security.transport.ssl.enabled", transportSSLEnabled); + final boolean auditingEnabled = randomBoolean(); + settings.put(XPackSettings.AUDIT_ENABLED.getKey(), auditingEnabled); + final String[] auditOutputs = randomFrom( + new String[] { "logfile" }, + new String[] { "index" }, + new String[] { "logfile", "index" } + ); + settings.putList(Security.AUDIT_OUTPUTS_SETTING.getKey(), auditOutputs); + final boolean httpIpFilterEnabled = randomBoolean(); + final boolean transportIPFilterEnabled = randomBoolean(); + when(ipFilter.usageStats()) + .thenReturn(MapBuilder.newMapBuilder() + .put("http", Collections.singletonMap("enabled", httpIpFilterEnabled)) + .put("transport", Collections.singletonMap("enabled", transportIPFilterEnabled)) + .map()); + + + final boolean rolesStoreEnabled = randomBoolean(); + doAnswer(invocationOnMock -> { + ActionListener> listener = (ActionListener>) invocationOnMock.getArguments()[0]; + if (rolesStoreEnabled) { + listener.onResponse(Collections.singletonMap("count", 1)); + } else { + listener.onResponse(Collections.emptyMap()); + } + return Void.TYPE; + }).when(rolesStore).usageStats(any(ActionListener.class)); + + final boolean roleMappingStoreEnabled = randomBoolean(); + doAnswer(invocationOnMock -> { + ActionListener> listener = (ActionListener) invocationOnMock.getArguments()[0]; + if (roleMappingStoreEnabled) { + final Map map = new HashMap<>(); + map.put("size", 12L); + map.put("enabled", 10L); + listener.onResponse(map); + } else { + listener.onResponse(Collections.emptyMap()); + } + return Void.TYPE; + }).when(roleMappingStore).usageStats(any(ActionListener.class)); + + Map realmsUsageStats = new HashMap<>(); + for (int i = 0; i < 5; i++) { + Map realmUsage = new HashMap<>(); + realmsUsageStats.put("type" + i, realmUsage); + realmUsage.put("key1", Arrays.asList("value" + i)); + realmUsage.put("key2", Arrays.asList(i)); + realmUsage.put("key3", Arrays.asList(i % 2 == 0)); + } + when(realms.usageStats()).thenReturn(realmsUsageStats); + + final boolean anonymousEnabled = randomBoolean(); + if (anonymousEnabled) { + settings.put(AnonymousUser.ROLES_SETTING.getKey(), "foo"); + } + + SecurityFeatureSet featureSet = new SecurityFeatureSet(settings.build(), licenseState, + realms, rolesStore, roleMappingStore, ipFilter); + PlainActionFuture future = new PlainActionFuture<>(); + featureSet.usage(future); + XPackFeatureSet.Usage securityUsage = future.get(); + BytesStreamOutput out = new BytesStreamOutput(); + securityUsage.writeTo(out); + XPackFeatureSet.Usage serializedUsage = new SecurityFeatureSetUsage(out.bytes().streamInput()); + for (XPackFeatureSet.Usage usage : Arrays.asList(securityUsage, serializedUsage)) { + assertThat(usage, is(notNullValue())); + assertThat(usage.name(), is(XPackField.SECURITY)); + assertThat(usage.enabled(), is(enabled)); + assertThat(usage.available(), is(authcAuthzAvailable)); + XContentSource source; + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + usage.toXContent(builder, ToXContent.EMPTY_PARAMS); + source = new XContentSource(builder); + } + + if (enabled) { + if (authcAuthzAvailable) { + for (int i = 0; i < 5; i++) { + assertThat(source.getValue("realms.type" + i + ".key1"), contains("value" + i)); + assertThat(source.getValue("realms.type" + i + ".key2"), contains(i)); + assertThat(source.getValue("realms.type" + i + ".key3"), contains(i % 2 == 0)); + } + } else { + assertThat(source.getValue("realms"), is(notNullValue())); + } + + // check SSL + assertThat(source.getValue("ssl.http.enabled"), is(httpSSLEnabled)); + assertThat(source.getValue("ssl.transport.enabled"), is(transportSSLEnabled)); + + // auditing + assertThat(source.getValue("audit.enabled"), is(auditingEnabled)); + assertThat(source.getValue("audit.outputs"), contains(auditOutputs)); + + // ip filter + assertThat(source.getValue("ipfilter.http.enabled"), is(httpIpFilterEnabled)); + assertThat(source.getValue("ipfilter.transport.enabled"), is(transportIPFilterEnabled)); + + // roles + if (rolesStoreEnabled) { + assertThat(source.getValue("roles.count"), is(1)); + } else { + assertThat(((Map) source.getValue("roles")).isEmpty(), is(true)); + } + + // role-mapping + if (roleMappingStoreEnabled) { + assertThat(source.getValue("role_mapping.native.size"), is(12)); + assertThat(source.getValue("role_mapping.native.enabled"), is(10)); + } else { + final Map roleMapping = source.getValue("role_mapping.native"); + assertThat(roleMapping.entrySet(), emptyIterable()); + } + + // anonymous + assertThat(source.getValue("anonymous.enabled"), is(anonymousEnabled)); + } else { + assertThat(source.getValue("realms"), is(nullValue())); + assertThat(source.getValue("ssl"), is(nullValue())); + assertThat(source.getValue("audit"), is(nullValue())); + assertThat(source.getValue("anonymous"), is(nullValue())); + assertThat(source.getValue("ipfilter"), is(nullValue())); + assertThat(source.getValue("roles"), is(nullValue())); + } + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityLifecycleServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityLifecycleServiceTests.java new file mode 100644 index 0000000000000..af00d4ac616e0 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityLifecycleServiceTests.java @@ -0,0 +1,243 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.FilterClient; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.MockTransportClient; +import org.elasticsearch.xpack.core.security.SecurityLifecycleServiceField; +import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; +import org.elasticsearch.xpack.security.support.IndexLifecycleManager; +import org.elasticsearch.xpack.security.test.SecurityTestUtils; +import org.elasticsearch.xpack.core.template.TemplateUtils; +import org.junit.After; +import org.junit.Before; + +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.core.security.SecurityLifecycleServiceField.SECURITY_TEMPLATE_NAME; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.securityIndexMappingUpToDate; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SecurityLifecycleServiceTests extends ESTestCase { + private TransportClient transportClient; + private ThreadPool threadPool; + private SecurityLifecycleService securityLifecycleService; + private static final ClusterState EMPTY_CLUSTER_STATE = + new ClusterState.Builder(new ClusterName("test-cluster")).build(); + private CopyOnWriteArrayList listeners; + + @Before + public void setup() { + DiscoveryNode localNode = mock(DiscoveryNode.class); + when(localNode.getHostAddress()).thenReturn(buildNewFakeTransportAddress().toString()); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.localNode()).thenReturn(localNode); + + threadPool = new TestThreadPool("security template service tests"); + transportClient = new MockTransportClient(Settings.EMPTY); + Client client = new FilterClient(transportClient) { + @Override + protected > + void doExecute(Action action, Request request, + ActionListener listener) { + listeners.add(listener); + } + }; + securityLifecycleService = new SecurityLifecycleService(Settings.EMPTY, clusterService, + threadPool, client, mock(IndexAuditTrail.class)); + listeners = new CopyOnWriteArrayList<>(); + } + + @After + public void stop() throws InterruptedException { + if (transportClient != null) { + transportClient.close(); + } + terminate(threadPool); + } + + public void testIndexTemplateIsIdentifiedAsUpToDate() throws IOException { + ClusterState.Builder clusterStateBuilder = createClusterStateWithTemplate( + "/" + SECURITY_TEMPLATE_NAME + ".json" + ); + securityLifecycleService.clusterChanged(new ClusterChangedEvent("test-event", + clusterStateBuilder.build(), EMPTY_CLUSTER_STATE)); + // No upgrade actions run + assertThat(listeners.size(), equalTo(0)); + } + + public void testIndexTemplateVersionMatching() throws Exception { + String templateString = "/" + SECURITY_TEMPLATE_NAME + ".json"; + ClusterState.Builder clusterStateBuilder = createClusterStateWithTemplate(templateString); + final ClusterState clusterState = clusterStateBuilder.build(); + + assertTrue(IndexLifecycleManager.checkTemplateExistsAndVersionMatches( + SecurityLifecycleServiceField.SECURITY_TEMPLATE_NAME, clusterState, logger, + Version.V_5_0_0::before)); + assertFalse(IndexLifecycleManager.checkTemplateExistsAndVersionMatches( + SecurityLifecycleServiceField.SECURITY_TEMPLATE_NAME, clusterState, logger, + Version.V_5_0_0::after)); + } + + public void testUpToDateMappingsAreIdentifiedAsUpToDate() throws IOException { + String securityTemplateString = "/" + SECURITY_TEMPLATE_NAME + ".json"; + ClusterState.Builder clusterStateBuilder = createClusterStateWithMappingAndTemplate(securityTemplateString); + securityLifecycleService.clusterChanged(new ClusterChangedEvent("test-event", + clusterStateBuilder.build(), EMPTY_CLUSTER_STATE)); + assertThat(listeners.size(), equalTo(0)); + } + + public void testMappingVersionMatching() throws IOException { + String templateString = "/" + SECURITY_TEMPLATE_NAME + ".json"; + ClusterState.Builder clusterStateBuilder = createClusterStateWithMappingAndTemplate(templateString); + securityLifecycleService.clusterChanged(new ClusterChangedEvent("test-event", + clusterStateBuilder.build(), EMPTY_CLUSTER_STATE)); + final IndexLifecycleManager securityIndex = securityLifecycleService.securityIndex(); + assertTrue(securityIndex.checkMappingVersion(Version.V_5_0_0::before)); + assertFalse(securityIndex.checkMappingVersion(Version.V_5_0_0::after)); + } + + public void testMissingVersionMappingThrowsError() throws IOException { + String templateString = "/missing-version-" + SECURITY_TEMPLATE_NAME + ".json"; + ClusterState.Builder clusterStateBuilder = createClusterStateWithMappingAndTemplate(templateString); + final ClusterState clusterState = clusterStateBuilder.build(); + IllegalStateException exception = expectThrows(IllegalStateException.class, + () -> securityIndexMappingUpToDate(clusterState, logger)); + assertEquals("Cannot read security-version string in index " + SECURITY_INDEX_NAME, + exception.getMessage()); + } + + public void testMissingIndexIsIdentifiedAsUpToDate() throws IOException { + final ClusterName clusterName = new ClusterName("test-cluster"); + final ClusterState.Builder clusterStateBuilder = ClusterState.builder(clusterName); + String mappingString = "/" + SECURITY_TEMPLATE_NAME + ".json"; + IndexTemplateMetaData.Builder templateMeta = getIndexTemplateMetaData(SECURITY_TEMPLATE_NAME, mappingString); + MetaData.Builder builder = new MetaData.Builder(clusterStateBuilder.build().getMetaData()); + builder.put(templateMeta); + clusterStateBuilder.metaData(builder); + securityLifecycleService.clusterChanged(new ClusterChangedEvent("test-event", clusterStateBuilder.build() + , EMPTY_CLUSTER_STATE)); + assertThat(listeners.size(), equalTo(0)); + } + + private ClusterState.Builder createClusterStateWithMapping(String securityTemplateString) throws IOException { + final ClusterState clusterState = createClusterStateWithIndex(securityTemplateString).build(); + final String indexName = clusterState.metaData().getAliasAndIndexLookup() + .get(SECURITY_INDEX_NAME).getIndices().get(0).getIndex().getName(); + return ClusterState.builder(clusterState).routingTable(SecurityTestUtils.buildIndexRoutingTable(indexName)); + } + + private ClusterState.Builder createClusterStateWithMappingAndTemplate(String securityTemplateString) throws IOException { + ClusterState.Builder clusterStateBuilder = createClusterStateWithMapping(securityTemplateString); + MetaData.Builder metaDataBuilder = new MetaData.Builder(clusterStateBuilder.build().metaData()); + String securityMappingString = "/" + SECURITY_TEMPLATE_NAME + ".json"; + IndexTemplateMetaData.Builder securityTemplateMeta = getIndexTemplateMetaData(SECURITY_TEMPLATE_NAME, securityMappingString); + metaDataBuilder.put(securityTemplateMeta); + return clusterStateBuilder.metaData(metaDataBuilder); + } + + private static IndexMetaData.Builder createIndexMetadata(String indexName, String templateString) throws IOException { + String template = TemplateUtils.loadTemplate(templateString, Version.CURRENT.toString(), + IndexLifecycleManager.TEMPLATE_VERSION_PATTERN); + PutIndexTemplateRequest request = new PutIndexTemplateRequest(); + request.source(template, XContentType.JSON); + IndexMetaData.Builder indexMetaData = IndexMetaData.builder(indexName); + indexMetaData.settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build()); + + for (Map.Entry entry : request.mappings().entrySet()) { + indexMetaData.putMapping(entry.getKey(), entry.getValue()); + } + return indexMetaData; + } + + public ClusterState.Builder createClusterStateWithTemplate(String securityTemplateString) throws IOException { + // add the correct mapping no matter what the template + ClusterState clusterState = createClusterStateWithIndex("/" + SECURITY_TEMPLATE_NAME + ".json").build(); + final MetaData.Builder metaDataBuilder = new MetaData.Builder(clusterState.metaData()); + metaDataBuilder.put(getIndexTemplateMetaData(SECURITY_TEMPLATE_NAME, securityTemplateString)); + return ClusterState.builder(clusterState).metaData(metaDataBuilder); + } + + private ClusterState.Builder createClusterStateWithIndex(String securityTemplate) throws IOException { + final MetaData.Builder metaDataBuilder = new MetaData.Builder(); + final boolean withAlias = randomBoolean(); + final String securityIndexName = SECURITY_INDEX_NAME + (withAlias ? "-" + randomAlphaOfLength(5) : ""); + metaDataBuilder.put(createIndexMetadata(securityIndexName, securityTemplate)); + + ClusterState.Builder clusterStateBuilder = ClusterState.builder(state()); + if (withAlias) { + // try with .security index as an alias + clusterStateBuilder.metaData(SecurityTestUtils.addAliasToMetaData(metaDataBuilder.build(), securityIndexName)); + } else { + // try with .security index as a concrete index + clusterStateBuilder.metaData(metaDataBuilder); + } + + clusterStateBuilder.routingTable(SecurityTestUtils.buildIndexRoutingTable(securityIndexName)); + return clusterStateBuilder; + } + + private static IndexTemplateMetaData.Builder getIndexTemplateMetaData( + String templateName, String templateString) throws IOException { + + String template = TemplateUtils.loadTemplate(templateString, Version.CURRENT.toString(), + IndexLifecycleManager.TEMPLATE_VERSION_PATTERN); + PutIndexTemplateRequest request = new PutIndexTemplateRequest(); + request.source(template, XContentType.JSON); + IndexTemplateMetaData.Builder templateBuilder = IndexTemplateMetaData.builder(templateName) + .patterns(Arrays.asList(generateRandomStringArray(10, 100, false, false))); + for (Map.Entry entry : request.mappings().entrySet()) { + templateBuilder.putMapping(entry.getKey(), entry.getValue()); + } + return templateBuilder; + } + + // cluster state where local node is master + private static ClusterState state() { + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + discoBuilder.masterNodeId("1"); + discoBuilder.localNodeId("1"); + ClusterState.Builder state = ClusterState.builder(new ClusterName("test-cluster")); + state.nodes(discoBuilder); + state.metaData(MetaData.builder().generateClusterUuidIfNeeded()); + return state.build(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityPluginTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityPluginTests.java new file mode 100644 index 0000000000000..3f74437032980 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityPluginTests.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.rest.RestStatus.UNAUTHORIZED; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.is; + +public class SecurityPluginTests extends SecurityIntegTestCase { + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("http.enabled", true) //This test requires HTTP + .build(); + } + + public void testThatPluginIsLoaded() throws IOException { + try { + logger.info("executing unauthorized request to /_xpack info"); + getRestClient().performRequest("GET", "/_xpack"); + fail("request should have failed"); + } catch(ResponseException e) { + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(UNAUTHORIZED.getStatus())); + } + + logger.info("executing authorized request to /_xpack infos"); + Response response = getRestClient().performRequest("GET", "/_xpack", + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, + new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())))); + assertThat(response.getStatusLine().getStatusCode(), is(OK.getStatus())); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecuritySettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecuritySettingsTests.java new file mode 100644 index 0000000000000..54ab9c1abb2ff --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecuritySettingsTests.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; + +import java.util.Collections; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class SecuritySettingsTests extends ESTestCase { + + public void testValidAutoCreateIndex() { + Security.validateAutoCreateIndex(Settings.EMPTY); + Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", true).build()); + Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", false).build()); + Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".security,.security-6").build()); + Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".security*").build()); + Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", "*s*").build()); + Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".s*").build()); + Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", "foo").build()); + Security.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".security_audit_log*").build()); + + Security.validateAutoCreateIndex(Settings.builder() + .put("action.auto_create_index", ".security,.security-6") + .put(XPackSettings.AUDIT_ENABLED.getKey(), true) + .build()); + + try { + Security.validateAutoCreateIndex(Settings.builder() + .put("action.auto_create_index", ".security,.security-6") + .put(XPackSettings.AUDIT_ENABLED.getKey(), true) + .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), randomFrom("index", "logfile,index")) + .build()); + fail("IllegalArgumentException expected"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString(IndexAuditTrailField.INDEX_NAME_PREFIX)); + } + + Security.validateAutoCreateIndex(Settings.builder() + .put("action.auto_create_index", ".security_audit_log*,.security,.security-6") + .put(XPackSettings.AUDIT_ENABLED.getKey(), true) + .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), randomFrom("index", "logfile,index")) + .build()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java new file mode 100644 index 0000000000000..7d751a802463e --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -0,0 +1,392 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.License; +import org.elasticsearch.license.TestUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.SecurityExtension; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; +import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail; +import org.elasticsearch.xpack.security.authc.Realms; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.function.Predicate; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.IndexLifecycleManager.INTERNAL_INDEX_FORMAT; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SecurityTests extends ESTestCase { + + private Security security = null; + private ThreadContext threadContext = null; + private TestUtils.UpdatableLicenseState licenseState; + + public static class DummyExtension implements SecurityExtension { + private String realmType; + DummyExtension(String realmType) { + this.realmType = realmType; + } + @Override + public Map getRealms(ResourceWatcherService resourceWatcherService) { + return Collections.singletonMap(realmType, config -> null); + } + } + + private Collection createComponents(Settings testSettings, SecurityExtension... extensions) throws Exception { + if (security != null) { + throw new IllegalStateException("Security object already exists (" + security + ")"); + } + Settings settings = Settings.builder() + .put("xpack.security.enabled", true) + .put(testSettings) + .put("path.home", createTempDir()).build(); + Environment env = TestEnvironment.newEnvironment(settings); + licenseState = new TestUtils.UpdatableLicenseState(settings); + SSLService sslService = new SSLService(settings, env); + security = new Security(settings, null, Arrays.asList(extensions)) { + @Override + protected XPackLicenseState getLicenseState() { + return licenseState; + } + + @Override + protected SSLService getSslService() { + return sslService; + } + }; + ThreadPool threadPool = mock(ThreadPool.class); + ClusterService clusterService = mock(ClusterService.class); + settings = Security.additionalSettings(settings, true, false); + Set> allowedSettings = new HashSet<>(Security.getSettings(false, null)); + allowedSettings.addAll(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + ClusterSettings clusterSettings = new ClusterSettings(settings, allowedSettings); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + when(threadPool.relativeTimeInMillis()).thenReturn(1L); + threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + Client client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + when(client.settings()).thenReturn(settings); + return security.createComponents(client, threadPool, clusterService, mock(ResourceWatcherService.class)); + } + + private static T findComponent(Class type, Collection components) { + for (Object obj : components) { + if (type.isInstance(obj)) { + return type.cast(obj); + } + } + return null; + } + + @Before + public void cleanup() throws IOException { + if (threadContext != null) { + threadContext.stashContext(); + threadContext.close(); + threadContext = null; + } + } + + public void testCustomRealmExtension() throws Exception { + Collection components = createComponents(Settings.EMPTY, new DummyExtension("myrealm")); + Realms realms = findComponent(Realms.class, components); + assertNotNull(realms); + assertNotNull(realms.realmFactory("myrealm")); + } + + public void testCustomRealmExtensionConflict() throws Exception { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> createComponents(Settings.EMPTY, new DummyExtension(FileRealmSettings.TYPE))); + assertEquals("Realm type [" + FileRealmSettings.TYPE + "] is already registered", e.getMessage()); + } + + + public void testAuditEnabled() throws Exception { + Settings settings = Settings.builder().put(XPackSettings.AUDIT_ENABLED.getKey(), true).build(); + Collection components = createComponents(settings); + AuditTrailService service = findComponent(AuditTrailService.class, components); + assertNotNull(service); + assertEquals(1, service.getAuditTrails().size()); + assertEquals(LoggingAuditTrail.NAME, service.getAuditTrails().get(0).name()); + } + + public void testDisabledByDefault() throws Exception { + Collection components = createComponents(Settings.EMPTY); + AuditTrailService auditTrailService = findComponent(AuditTrailService.class, components); + assertEquals(0, auditTrailService.getAuditTrails().size()); + } + + public void testIndexAuditTrail() throws Exception { + Settings settings = Settings.builder() + .put(XPackSettings.AUDIT_ENABLED.getKey(), true) + .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "index").build(); + Collection components = createComponents(settings); + AuditTrailService service = findComponent(AuditTrailService.class, components); + assertNotNull(service); + assertEquals(1, service.getAuditTrails().size()); + assertEquals(IndexAuditTrail.NAME, service.getAuditTrails().get(0).name()); + } + + public void testIndexAndLoggingAuditTrail() throws Exception { + Settings settings = Settings.builder() + .put(XPackSettings.AUDIT_ENABLED.getKey(), true) + .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "index,logfile").build(); + Collection components = createComponents(settings); + AuditTrailService service = findComponent(AuditTrailService.class, components); + assertNotNull(service); + assertEquals(2, service.getAuditTrails().size()); + assertEquals(IndexAuditTrail.NAME, service.getAuditTrails().get(0).name()); + assertEquals(LoggingAuditTrail.NAME, service.getAuditTrails().get(1).name()); + } + + public void testUnknownOutput() { + Settings settings = Settings.builder() + .put(XPackSettings.AUDIT_ENABLED.getKey(), true) + .put(Security.AUDIT_OUTPUTS_SETTING.getKey(), "foo").build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createComponents(settings)); + assertEquals("Unknown audit trail output [foo]", e.getMessage()); + } + + public void testHttpSettingDefaults() throws Exception { + final Settings defaultSettings = Security.additionalSettings(Settings.EMPTY, true, false); + assertThat(SecurityField.NAME4, equalTo(NetworkModule.TRANSPORT_TYPE_SETTING.get(defaultSettings))); + assertThat(SecurityField.NAME4, equalTo(NetworkModule.HTTP_TYPE_SETTING.get(defaultSettings))); + } + + public void testTransportSettingNetty4Both() { + Settings both4 = Security.additionalSettings(Settings.builder() + .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4) + .put(NetworkModule.HTTP_TYPE_KEY, SecurityField.NAME4) + .build(), true, false); + assertFalse(NetworkModule.TRANSPORT_TYPE_SETTING.exists(both4)); + assertFalse(NetworkModule.HTTP_TYPE_SETTING.exists(both4)); + } + + public void testTransportSettingValidation() { + final String badType = randomFrom("netty4", "other", "security1"); + Settings settingsTransport = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, badType).build(); + IllegalArgumentException badTransport = expectThrows(IllegalArgumentException.class, + () -> Security.additionalSettings(settingsTransport, true, false)); + assertThat(badTransport.getMessage(), containsString(SecurityField.NAME4)); + assertThat(badTransport.getMessage(), containsString(NetworkModule.TRANSPORT_TYPE_KEY)); + + Settings settingsHttp = Settings.builder().put(NetworkModule.HTTP_TYPE_KEY, badType).build(); + IllegalArgumentException badHttp = expectThrows(IllegalArgumentException.class, + () -> Security.additionalSettings(settingsHttp, true, false)); + assertThat(badHttp.getMessage(), containsString(SecurityField.NAME4)); + assertThat(badHttp.getMessage(), containsString(NetworkModule.HTTP_TYPE_KEY)); + } + + public void testSettingFilter() throws Exception { + createComponents(Settings.EMPTY); + final List filter = security.getSettingsFilter(); + assertThat(filter, hasItem(SecurityField.setting("authc.realms.*.bind_dn"))); + assertThat(filter, hasItem(SecurityField.setting("authc.realms.*.bind_password"))); + assertThat(filter, hasItem(SecurityField.setting("authc.realms.*." + SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING))); + assertThat(filter, hasItem(SecurityField.setting("authc.realms.*.ssl.truststore.password"))); + assertThat(filter, hasItem(SecurityField.setting("authc.realms.*.ssl.truststore.path"))); + assertThat(filter, hasItem(SecurityField.setting("authc.realms.*.ssl.truststore.algorithm"))); + } + + public void testJoinValidatorOnDisabledSecurity() throws Exception { + Settings disabledSettings = Settings.builder().put("xpack.security.enabled", false).build(); + createComponents(disabledSettings); + BiConsumer joinValidator = security.getJoinValidator(); + assertNull(joinValidator); + } + + public void testTLSJoinValidator() throws Exception { + createComponents(Settings.EMPTY); + BiConsumer joinValidator = security.getJoinValidator(); + assertNotNull(joinValidator); + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); + joinValidator.accept(node, ClusterState.builder(ClusterName.DEFAULT).build()); + int numIters = randomIntBetween(1,10); + for (int i = 0; i < numIters; i++) { + boolean tlsOn = randomBoolean(); + String discoveryType = randomFrom("single-node", "zen", randomAlphaOfLength(4)); + Security.ValidateTLSOnJoin validator = new Security.ValidateTLSOnJoin(tlsOn, discoveryType); + MetaData.Builder builder = MetaData.builder(); + License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); + TestUtils.putLicense(builder, license); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); + EnumSet productionModes = EnumSet.of(License.OperationMode.GOLD, License.OperationMode.PLATINUM, + License.OperationMode.STANDARD); + if (productionModes.contains(license.operationMode()) && tlsOn == false && "single-node".equals(discoveryType) == false) { + IllegalStateException ise = expectThrows(IllegalStateException.class, () -> validator.accept(node, state)); + assertEquals("TLS setup is required for license type [" + license.operationMode().name() + "]", ise.getMessage()); + } else { + validator.accept(node, state); + } + validator.accept(node, ClusterState.builder(ClusterName.DEFAULT).metaData(MetaData.builder().build()).build()); + } + } + + public void testIndexJoinValidator_Old_And_Rolling() throws Exception { + createComponents(Settings.EMPTY); + BiConsumer joinValidator = security.getJoinValidator(); + assertNotNull(joinValidator); + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); + IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_INDEX_NAME) + .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_INDEX_FORMAT - 1)) + .numberOfShards(1).numberOfReplicas(0) + .build(); + DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.V_6_1_0); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes) + .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> joinValidator.accept(node, clusterState)); + assertThat(e.getMessage(), equalTo("Security index is not on the current version [6] - " + + "The Upgrade API must be run for 7.x nodes to join the cluster")); + } + + public void testIndexJoinValidator_FullyCurrentCluster() throws Exception { + createComponents(Settings.EMPTY); + BiConsumer joinValidator = security.getJoinValidator(); + assertNotNull(joinValidator); + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); + int indexFormat = randomBoolean() ? INTERNAL_INDEX_FORMAT : INTERNAL_INDEX_FORMAT - 1; + IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_INDEX_NAME) + .settings(settings(Version.V_6_1_0).put(INDEX_FORMAT_SETTING.getKey(), indexFormat)) + .numberOfShards(1).numberOfReplicas(0) + .build(); + DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes) + .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); + joinValidator.accept(node, clusterState); + } + + public void testIndexUpgradeValidatorWithUpToDateIndex() throws Exception { + createComponents(Settings.EMPTY); + BiConsumer joinValidator = security.getJoinValidator(); + assertNotNull(joinValidator); + Version version = randomBoolean() ? Version.CURRENT : Version.V_6_1_0; + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); + IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_INDEX_NAME) + .settings(settings(version).put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_INDEX_FORMAT)) + .numberOfShards(1).numberOfReplicas(0) + .build(); + DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), version); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes) + .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); + joinValidator.accept(node, clusterState); + } + + public void testIndexUpgradeValidatorWithMissingIndex() throws Exception { + createComponents(Settings.EMPTY); + BiConsumer joinValidator = security.getJoinValidator(); + assertNotNull(joinValidator); + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), Version.V_6_1_0); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes).build(); + joinValidator.accept(node, clusterState); + } + + public void testGetFieldFilterSecurityEnabled() throws Exception { + createComponents(Settings.EMPTY); + Function> fieldFilter = security.getFieldFilter(); + assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); + Map permissionsMap = new HashMap<>(); + + FieldPermissions permissions = new FieldPermissions( + new FieldPermissionsDefinition(new String[]{"field_granted"}, Strings.EMPTY_ARRAY)); + IndicesAccessControl.IndexAccessControl indexGrantedAccessControl = new IndicesAccessControl.IndexAccessControl(true, permissions, + Collections.emptySet()); + permissionsMap.put("index_granted", indexGrantedAccessControl); + IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl(false, + FieldPermissions.DEFAULT, Collections.emptySet()); + permissionsMap.put("index_not_granted", indexAccessControl); + IndicesAccessControl.IndexAccessControl nullFieldPermissions = + new IndicesAccessControl.IndexAccessControl(true, null, Collections.emptySet()); + permissionsMap.put("index_null", nullFieldPermissions); + IndicesAccessControl index = new IndicesAccessControl(true, permissionsMap); + threadContext.putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, index); + + assertTrue(fieldFilter.apply("index_granted").test("field_granted")); + assertFalse(fieldFilter.apply("index_granted").test(randomAlphaOfLengthBetween(3, 10))); + assertTrue(fieldFilter.apply(randomAlphaOfLengthBetween(3, 6)).test("field_granted")); + assertTrue(fieldFilter.apply(randomAlphaOfLengthBetween(3, 6)).test(randomAlphaOfLengthBetween(3, 10))); + assertEquals(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply(randomAlphaOfLengthBetween(3, 10))); + expectThrows(IllegalStateException.class, () -> fieldFilter.apply("index_not_granted")); + assertTrue(fieldFilter.apply("index_null").test(randomAlphaOfLengthBetween(3, 6))); + assertEquals(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply("index_null")); + } + + public void testGetFieldFilterSecurityDisabled() throws Exception { + createComponents(Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), false).build()); + assertSame(MapperPlugin.NOOP_FIELD_FILTER, security.getFieldFilter()); + } + + public void testGetFieldFilterSecurityEnabledLicenseNoFLS() throws Exception { + createComponents(Settings.EMPTY); + Function> fieldFilter = security.getFieldFilter(); + assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); + licenseState.update(randomFrom(License.OperationMode.BASIC, License.OperationMode.STANDARD, License.OperationMode.GOLD), true); + assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); + assertSame(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply(randomAlphaOfLengthBetween(3, 6))); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java new file mode 100644 index 0000000000000..bfa7e8ce1e821 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TemplateUpgraderTests.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.metadata.TemplateUpgradeService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.UnaryOperator; +import java.util.stream.Collectors; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.not; + +/** + * This test ensures, that the plugin template upgrader can add and remove + * templates when started within security, as this requires certain + * system priviliges + */ +@ClusterScope(maxNumDataNodes = 1, scope = Scope.SUITE, numClientNodes = 0) +public class TemplateUpgraderTests extends SecurityIntegTestCase { + + public void testTemplatesWorkAsExpected() throws Exception { + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); + ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, internalCluster().getMasterName()); + Client client = internalCluster().getInstance(Client.class, internalCluster().getMasterName()); + UnaryOperator> indexTemplateMetaDataUpgraders = map -> { + map.remove("removed-template"); + map.put("added-template", IndexTemplateMetaData.builder("added-template") + .order(1) + .patterns(Collections.singletonList(randomAlphaOfLength(10))).build()); + return map; + }; + + PutIndexTemplateResponse putIndexTemplateResponse = client().admin().indices().preparePutTemplate("removed-template") + .setOrder(1) + .setPatterns(Collections.singletonList(randomAlphaOfLength(10))) + .get(); + assertAcked(putIndexTemplateResponse); + assertTemplates("removed-template", "added-template"); + + TemplateUpgradeService templateUpgradeService = new TemplateUpgradeService(Settings.EMPTY, client, clusterService, threadPool, + Collections.singleton(indexTemplateMetaDataUpgraders)); + + // ensure the cluster listener gets triggered + ClusterChangedEvent event = new ClusterChangedEvent("testing", clusterService.state(), clusterService.state()); + templateUpgradeService.clusterChanged(event); + + assertBusy(() -> assertTemplates("added-template", "removed-template")); + } + + private void assertTemplates(String existingTemplate, String deletedTemplate) { + GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get(); + List templateNames = response.getIndexTemplates().stream().map(IndexTemplateMetaData::name).collect(Collectors.toList()); + assertThat(templateNames, hasItem(existingTemplate)); + assertThat(templateNames, not(hasItem(deletedTemplate))); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TokenSSLBootsrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TokenSSLBootsrapCheckTests.java new file mode 100644 index 0000000000000..c66b882ac0a3f --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/TokenSSLBootsrapCheckTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackSettings; + +public class TokenSSLBootsrapCheckTests extends ESTestCase { + + public void testTokenSSLBootstrapCheck() { + Settings settings = Settings.EMPTY; + + assertFalse(new TokenSSLBootstrapCheck().check(new BootstrapContext(settings, null)).isFailure()); + + settings = Settings.builder() + .put(NetworkModule.HTTP_ENABLED.getKey(), false) + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).build(); + assertFalse(new TokenSSLBootstrapCheck().check(new BootstrapContext(settings, null)).isFailure()); + + settings = Settings.builder().put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true).build(); + assertFalse(new TokenSSLBootstrapCheck().check(new BootstrapContext(settings, null)).isFailure()); + + // XPackSettings.HTTP_SSL_ENABLED default false + settings = Settings.builder().put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).build(); + assertTrue(new TokenSSLBootstrapCheck().check(new BootstrapContext(settings, null)).isFailure()); + + settings = Settings.builder() + .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), false) + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).build(); + assertTrue(new TokenSSLBootstrapCheck().check(new BootstrapContext(settings, null)).isFailure()); + + settings = Settings.builder() + .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), false) + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true) + .put(NetworkModule.HTTP_ENABLED.getKey(), false).build(); + assertFalse(new TokenSSLBootstrapCheck().check(new BootstrapContext(settings, null)).isFailure()); + + assertSettingDeprecationsAndWarnings(new Setting[] { NetworkModule.HTTP_ENABLED }); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/SecurityActionMapperTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/SecurityActionMapperTests.java new file mode 100644 index 0000000000000..6efb293f7b201 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/SecurityActionMapperTests.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action; + +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; +import org.elasticsearch.action.search.ClearScrollAction; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class SecurityActionMapperTests extends ESTestCase { + + public void testThatAllOrdinaryActionsRemainTheSame() { + SecurityActionMapper securityActionMapper = new SecurityActionMapper(); + StringBuilder actionNameBuilder = new StringBuilder(); + if (randomBoolean()) { + actionNameBuilder.append("indices:"); + if (randomBoolean()) { + actionNameBuilder.append("data/"); + actionNameBuilder.append(randomBoolean() ? "read" : "write"); + actionNameBuilder.append("/"); + actionNameBuilder.append(randomAlphaOfLengthBetween(2, 12)); + } else { + actionNameBuilder.append(randomBoolean() ? "admin" : "monitor"); + actionNameBuilder.append("/"); + actionNameBuilder.append(randomAlphaOfLengthBetween(2, 12)); + } + } else { + actionNameBuilder.append("cluster:"); + actionNameBuilder.append(randomBoolean() ? "admin" : "monitor"); + actionNameBuilder.append("/"); + actionNameBuilder.append(randomAlphaOfLengthBetween(2, 12)); + } + String randomAction = actionNameBuilder.toString(); + assumeFalse("Random action is one of the known mapped values: " + randomAction, randomAction.equals(ClearScrollAction.NAME) || + randomAction.equals(AnalyzeAction.NAME) || + randomAction.equals(AnalyzeAction.NAME + "[s]")); + + assertThat(securityActionMapper.action(randomAction, null), equalTo(randomAction)); + } + + public void testClearScroll() { + SecurityActionMapper securityActionMapper = new SecurityActionMapper(); + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + int scrollIds = randomIntBetween(1, 10); + for (int i = 0; i < scrollIds; i++) { + clearScrollRequest.addScrollId(randomAlphaOfLength(randomIntBetween(1, 30))); + } + assertThat(securityActionMapper.action(ClearScrollAction.NAME, clearScrollRequest), equalTo(ClearScrollAction.NAME)); + } + + public void testClearScrollAll() { + SecurityActionMapper securityActionMapper = new SecurityActionMapper(); + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + int scrollIds = randomIntBetween(0, 10); + for (int i = 0; i < scrollIds; i++) { + clearScrollRequest.addScrollId(randomAlphaOfLength(randomIntBetween(1, 30))); + } + clearScrollRequest.addScrollId("_all"); + //make sure that wherever the _all is among the scroll ids the action name gets translated + Collections.shuffle(clearScrollRequest.getScrollIds(), random()); + + assertThat(securityActionMapper.action(ClearScrollAction.NAME, clearScrollRequest), + equalTo(SecurityActionMapper.CLUSTER_PERMISSION_SCROLL_CLEAR_ALL_NAME)); + } + + public void testIndicesAnalyze() { + SecurityActionMapper securityActionMapper = new SecurityActionMapper(); + AnalyzeRequest analyzeRequest; + if (randomBoolean()) { + analyzeRequest = new AnalyzeRequest(randomAlphaOfLength(randomIntBetween(1, 30))).text("text"); + } else { + analyzeRequest = new AnalyzeRequest(null).text("text"); + analyzeRequest.index(randomAlphaOfLength(randomIntBetween(1, 30))); + } + assertThat(securityActionMapper.action(AnalyzeAction.NAME, analyzeRequest), equalTo(AnalyzeAction.NAME)); + } + + public void testClusterAnalyze() { + SecurityActionMapper securityActionMapper = new SecurityActionMapper(); + AnalyzeRequest analyzeRequest = new AnalyzeRequest(null).text("text"); + assertThat(securityActionMapper.action(AnalyzeAction.NAME, analyzeRequest), + equalTo(SecurityActionMapper.CLUSTER_PERMISSION_ANALYZE)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/DestructiveOperationsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/DestructiveOperationsTests.java new file mode 100644 index 0000000000000..c0b83f9fc6ea7 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/DestructiveOperationsTests.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.filter; + +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.junit.After; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +public class DestructiveOperationsTests extends SecurityIntegTestCase { + + @After + public void afterTest() { + Settings settings = Settings.builder().put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), (String)null).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + + public void testDeleteIndexDestructiveOperationsRequireName() { + createIndex("index1"); + Settings settings = Settings.builder().put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + { + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareDelete("*").get()); + assertEquals("Wildcard expressions or all indices are not allowed", illegalArgumentException.getMessage()); + String[] indices = client().admin().indices().prepareGetIndex().setIndices("index1").get().getIndices(); + assertEquals(1, indices.length); + assertEquals("index1", indices[0]); + } + { + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareDelete("*", "-index1").get()); + assertEquals("Wildcard expressions or all indices are not allowed", illegalArgumentException.getMessage()); + String[] indices = client().admin().indices().prepareGetIndex().setIndices("index1").get().getIndices(); + assertEquals(1, indices.length); + assertEquals("index1", indices[0]); + } + { + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareDelete("_all").get()); + assertEquals("Wildcard expressions or all indices are not allowed", illegalArgumentException.getMessage()); + String[] indices = client().admin().indices().prepareGetIndex().setIndices("index1").get().getIndices(); + assertEquals(1, indices.length); + assertEquals("index1", indices[0]); + } + + assertAcked(client().admin().indices().prepareDelete("index1")); + } + + public void testDestructiveOperationsDefaultBehaviour() { + if (randomBoolean()) { + Settings settings = Settings.builder().put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + createIndex("index1", "index2"); + + switch(randomIntBetween(0, 2)) { + case 0: + assertAcked(client().admin().indices().prepareClose("*")); + assertAcked(client().admin().indices().prepareOpen("*")); + assertAcked(client().admin().indices().prepareDelete("*")); + break; + case 1: + assertAcked(client().admin().indices().prepareClose("_all")); + assertAcked(client().admin().indices().prepareOpen("_all")); + assertAcked(client().admin().indices().prepareDelete("_all")); + break; + case 2: + assertAcked(client().admin().indices().prepareClose("*", "-index1")); + assertAcked(client().admin().indices().prepareOpen("*", "-index1")); + assertAcked(client().admin().indices().prepareDelete("*", "-index1")); + break; + default: + throw new UnsupportedOperationException(); + } + } + + public void testOpenCloseIndexDestructiveOperationsRequireName() { + Settings settings = Settings.builder().put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + { + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareClose("*").get()); + assertEquals("Wildcard expressions or all indices are not allowed", illegalArgumentException.getMessage()); + } + { + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareClose("*", "-index1").get()); + assertEquals("Wildcard expressions or all indices are not allowed", illegalArgumentException.getMessage()); + } + { + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareClose("_all").get()); + assertEquals("Wildcard expressions or all indices are not allowed", illegalArgumentException.getMessage()); + } + { + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareOpen("*").get()); + assertEquals("Wildcard expressions or all indices are not allowed", illegalArgumentException.getMessage()); + } + { + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareOpen("*", "-index1").get()); + assertEquals("Wildcard expressions or all indices are not allowed", illegalArgumentException.getMessage()); + } + { + IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareOpen("_all").get()); + assertEquals("Wildcard expressions or all indices are not allowed", illegalArgumentException.getMessage()); + } + + createIndex("index1"); + assertAcked(client().admin().indices().prepareClose("index1")); + assertAcked(client().admin().indices().prepareOpen("index1")); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java new file mode 100644 index 0000000000000..019901afa28be --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/filter/SecurityActionFilterTests.java @@ -0,0 +1,257 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.filter; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.MockIndicesRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.open.OpenIndexAction; +import org.elasticsearch.action.support.ActionFilterChain; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authz.AuthorizationService; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashSet; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Matchers.isA; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class SecurityActionFilterTests extends ESTestCase { + private AuthenticationService authcService; + private AuthorizationService authzService; + private XPackLicenseState licenseState; + private SecurityActionFilter filter; + private ThreadContext threadContext; + private boolean failDestructiveOperations; + + @Before + public void init() throws Exception { + authcService = mock(AuthenticationService.class); + authzService = mock(AuthorizationService.class); + licenseState = mock(XPackLicenseState.class); + when(licenseState.isAuthAllowed()).thenReturn(true); + when(licenseState.isStatsAndHealthAllowed()).thenReturn(true); + when(licenseState.isSecurityEnabled()).thenReturn(true); + ThreadPool threadPool = mock(ThreadPool.class); + threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + failDestructiveOperations = randomBoolean(); + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), failDestructiveOperations).build(); + DestructiveOperations destructiveOperations = new DestructiveOperations(settings, + new ClusterSettings(settings, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING))); + ClusterState state = mock(ClusterState.class); + DiscoveryNodes nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("id1", buildNewFakeTransportAddress(), Version.CURRENT)) + .add(new DiscoveryNode("id2", buildNewFakeTransportAddress(), Version.V_5_4_0)) + .build(); + when(state.nodes()).thenReturn(nodes); + + SecurityContext securityContext = new SecurityContext(settings, threadContext); + filter = new SecurityActionFilter(Settings.EMPTY, authcService, authzService, + licenseState, new HashSet<>(), threadPool, securityContext, destructiveOperations); + } + + public void testApply() throws Exception { + ActionRequest request = mock(ActionRequest.class); + ActionListener listener = mock(ActionListener.class); + ActionFilterChain chain = mock(ActionFilterChain.class); + Task task = mock(Task.class); + User user = new User("username", "r1", "r2"); + Authentication authentication = new Authentication(user, new RealmRef("test", "test", "foo"), null); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[3]; + callback.onResponse(authentication); + return Void.TYPE; + }).when(authcService).authenticate(eq("_action"), eq(request), eq(SystemUser.INSTANCE), any(ActionListener.class)); + final Role empty = Role.EMPTY; + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[1]; + callback.onResponse(empty); + return Void.TYPE; + }).when(authzService).roles(any(User.class), any(ActionListener.class)); + filter.apply(task, "_action", request, listener, chain); + verify(authzService).authorize(authentication, "_action", request, empty, null); + verify(chain).proceed(eq(task), eq("_action"), eq(request), isA(ContextPreservingActionListener.class)); + } + + public void testApplyRestoresThreadContext() throws Exception { + ActionRequest request = mock(ActionRequest.class); + ActionListener listener = mock(ActionListener.class); + ActionFilterChain chain = mock(ActionFilterChain.class); + Task task = mock(Task.class); + User user = new User("username", "r1", "r2"); + Authentication authentication = new Authentication(user, new RealmRef("test", "test", "foo"), null); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[3]; + assertNull(threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + callback.onResponse(authentication); + return Void.TYPE; + }).when(authcService).authenticate(eq("_action"), eq(request), eq(SystemUser.INSTANCE), any(ActionListener.class)); + final Role empty = Role.EMPTY; + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[1]; + assertEquals(authentication, threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); + callback.onResponse(empty); + return Void.TYPE; + }).when(authzService).roles(any(User.class), any(ActionListener.class)); + assertNull(threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); + + filter.apply(task, "_action", request, listener, chain); + + assertNull(threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); + verify(authzService).authorize(authentication, "_action", request, empty, null); + verify(chain).proceed(eq(task), eq("_action"), eq(request), isA(ContextPreservingActionListener.class)); + } + + public void testApplyAsSystemUser() throws Exception { + ActionRequest request = mock(ActionRequest.class); + ActionListener listener = mock(ActionListener.class); + User user = new User("username", "r1", "r2"); + Authentication authentication = new Authentication(user, new RealmRef("test", "test", "foo"), null); + SetOnce authenticationSetOnce = new SetOnce<>(); + ActionFilterChain chain = (task, action, request1, listener1) -> { + authenticationSetOnce.set(threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); + }; + Task task = mock(Task.class); + final boolean hasExistingAuthentication = randomBoolean(); + final String action = "internal:foo"; + if (hasExistingAuthentication) { + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + threadContext.putHeader(AuthenticationField.AUTHENTICATION_KEY, "foo"); + threadContext.putTransient(AuthorizationService.ORIGINATING_ACTION_KEY, "indices:foo"); + } else { + assertNull(threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); + } + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[3]; + callback.onResponse(threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); + return Void.TYPE; + }).when(authcService).authenticate(eq(action), eq(request), eq(SystemUser.INSTANCE), any(ActionListener.class)); + + filter.apply(task, action, request, listener, chain); + + if (hasExistingAuthentication) { + assertEquals(authentication, threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); + } else { + assertNull(threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY)); + } + assertNotNull(authenticationSetOnce.get()); + assertNotEquals(authentication, authenticationSetOnce.get()); + assertEquals(SystemUser.INSTANCE, authenticationSetOnce.get().getUser()); + } + + public void testApplyDestructiveOperations() throws Exception { + ActionRequest request = new MockIndicesRequest( + IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()), + randomFrom("*", "_all", "test*")); + String action = randomFrom(CloseIndexAction.NAME, OpenIndexAction.NAME, DeleteIndexAction.NAME); + ActionListener listener = mock(ActionListener.class); + ActionFilterChain chain = mock(ActionFilterChain.class); + Task task = mock(Task.class); + User user = new User("username", "r1", "r2"); + Authentication authentication = new Authentication(user, new RealmRef("test", "test", "foo"), null); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[3]; + callback.onResponse(authentication); + return Void.TYPE; + }).when(authcService).authenticate(eq(action), eq(request), eq(SystemUser.INSTANCE), any(ActionListener.class)); + final Role empty = Role.EMPTY; + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[1]; + callback.onResponse(empty); + return Void.TYPE; + }).when(authzService).roles(any(User.class), any(ActionListener.class)); + filter.apply(task, action, request, listener, chain); + if (failDestructiveOperations) { + verify(listener).onFailure(isA(IllegalArgumentException.class)); + verifyNoMoreInteractions(authzService, chain); + } else { + verify(authzService).authorize(authentication, action, request, empty, null); + verify(chain).proceed(eq(task), eq(action), eq(request), isA(ContextPreservingActionListener.class)); + } + } + + public void testActionProcessException() throws Exception { + ActionRequest request = mock(ActionRequest.class); + ActionListener listener = mock(ActionListener.class); + ActionFilterChain chain = mock(ActionFilterChain.class); + RuntimeException exception = new RuntimeException("process-error"); + Task task = mock(Task.class); + User user = new User("username", "r1", "r2"); + Authentication authentication = new Authentication(user, new RealmRef("test", "test", "foo"), null); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[3]; + callback.onResponse(authentication); + return Void.TYPE; + }).when(authcService).authenticate(eq("_action"), eq(request), eq(SystemUser.INSTANCE), any(ActionListener.class)); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[1]; + callback.onResponse(Role.EMPTY); + return Void.TYPE; + }).when(authzService).roles(any(User.class), any(ActionListener.class)); + doThrow(exception).when(authzService).authorize(eq(authentication), eq("_action"), eq(request), any(Role.class), + any(Role.class)); + filter.apply(task, "_action", request, listener, chain); + verify(listener).onFailure(exception); + verifyNoMoreInteractions(chain); + } + + public void testApplyUnlicensed() throws Exception { + ActionRequest request = mock(ActionRequest.class); + ActionListener listener = mock(ActionListener.class); + ActionFilterChain chain = mock(ActionFilterChain.class); + Task task = mock(Task.class); + when(licenseState.isAuthAllowed()).thenReturn(false); + filter.apply(task, "_action", request, listener, chain); + verifyZeroInteractions(authcService); + verifyZeroInteractions(authzService); + verify(chain).proceed(eq(task), eq("_action"), eq(request), eq(listener)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java new file mode 100644 index 0000000000000..7c951c0014e89 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/IndicesAliasesRequestInterceptorTests.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.interceptor; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.audit.AuditTrailService; + +import java.util.Collections; +import java.util.Set; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IndicesAliasesRequestInterceptorTests extends ESTestCase { + + public void testInterceptorThrowsWhenFLSDLSEnabled() { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.isAuditingAllowed()).thenReturn(true); + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + AuditTrailService auditTrailService = new AuditTrailService(Settings.EMPTY, Collections.emptyList(), licenseState); + Authentication authentication = new Authentication(new User("john", "role"), new RealmRef(null, null, null), + new RealmRef(null, null, null)); + final FieldPermissions fieldPermissions; + final boolean useFls = randomBoolean(); + if (useFls) { + fieldPermissions = new FieldPermissions(new FieldPermissionsDefinition(new String[] { "foo" }, null)); + } else { + fieldPermissions = new FieldPermissions(); + } + final boolean useDls = (useFls == false) || randomBoolean(); + final Set queries; + if (useDls) { + queries = Collections.singleton(new BytesArray(randomAlphaOfLengthBetween(2, 8))); + } else { + queries = null; + } + Role role = Role.builder().add(fieldPermissions, queries, IndexPrivilege.ALL, "foo").build(); + final String action = IndicesAliasesAction.NAME; + IndicesAccessControl accessControl = new IndicesAccessControl(true, Collections.singletonMap("foo", + new IndicesAccessControl.IndexAccessControl(true, fieldPermissions, queries))); + threadContext.putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, accessControl); + + IndicesAliasesRequestInterceptor interceptor = + new IndicesAliasesRequestInterceptor(threadContext, licenseState, auditTrailService); + + IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); + if (randomBoolean()) { + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.remove().index("bar").alias(randomAlphaOfLength(4))); + } + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("foo").alias(randomAlphaOfLength(4))); + if (randomBoolean()) { + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.removeIndex().index("foofoo")); + } + ElasticsearchSecurityException securityException = expectThrows(ElasticsearchSecurityException.class, + () -> interceptor.intercept(indicesAliasesRequest, authentication, role, action)); + assertEquals("Alias requests are not allowed for users who have field or document level security enabled on one of the indices", + securityException.getMessage()); + } + + public void testInterceptorThrowsWhenTargetHasGreaterPermissions() throws Exception { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.isAuditingAllowed()).thenReturn(true); + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + AuditTrailService auditTrailService = new AuditTrailService(Settings.EMPTY, Collections.emptyList(), licenseState); + Authentication authentication = new Authentication(new User("john", "role"), new RealmRef(null, null, null), + new RealmRef(null, null, null)); + Role role = Role.builder() + .add(IndexPrivilege.ALL, "alias") + .add(IndexPrivilege.READ, "index") + .build(); + final String action = IndicesAliasesAction.NAME; + IndicesAccessControl accessControl = new IndicesAccessControl(true, Collections.emptyMap()); + threadContext.putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, accessControl); + IndicesAliasesRequestInterceptor interceptor = + new IndicesAliasesRequestInterceptor(threadContext, licenseState, auditTrailService); + + final IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); + if (randomBoolean()) { + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.remove().index("bar").alias(randomAlphaOfLength(4))); + } + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("index").alias("alias")); + if (randomBoolean()) { + indicesAliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.removeIndex().index("foofoo")); + } + + ElasticsearchSecurityException securityException = expectThrows(ElasticsearchSecurityException.class, + () -> interceptor.intercept(indicesAliasesRequest, authentication, role, action)); + assertEquals("Adding an alias is not allowed when the alias has more permissions than any of the indices", + securityException.getMessage()); + + // swap target and source for success + final IndicesAliasesRequest successRequest = new IndicesAliasesRequest(); + if (randomBoolean()) { + successRequest.addAliasAction(IndicesAliasesRequest.AliasActions.remove().index("bar").alias(randomAlphaOfLength(4))); + } + successRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("alias").alias("index")); + if (randomBoolean()) { + successRequest.addAliasAction(IndicesAliasesRequest.AliasActions.removeIndex().index("foofoo")); + } + interceptor.intercept(successRequest, authentication, role, action); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java new file mode 100644 index 0000000000000..f1363214b0706 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/interceptor/ResizeRequestInterceptorTests.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.interceptor; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.admin.indices.shrink.ResizeAction; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.audit.AuditTrailService; + +import java.util.Collections; +import java.util.Set; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ResizeRequestInterceptorTests extends ESTestCase { + + public void testResizeRequestInterceptorThrowsWhenFLSDLSEnabled() { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.isAuditingAllowed()).thenReturn(true); + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + AuditTrailService auditTrailService = new AuditTrailService(Settings.EMPTY, Collections.emptyList(), licenseState); + final Authentication authentication = new Authentication(new User("john", "role"), new RealmRef(null, null, null), null); + final FieldPermissions fieldPermissions; + final boolean useFls = randomBoolean(); + if (useFls) { + fieldPermissions = new FieldPermissions(new FieldPermissionsDefinition(new String[] { "foo" }, null)); + } else { + fieldPermissions = new FieldPermissions(); + } + final boolean useDls = (useFls == false) || randomBoolean(); + final Set queries; + if (useDls) { + queries = Collections.singleton(new BytesArray(randomAlphaOfLengthBetween(2, 8))); + } else { + queries = null; + } + Role role = Role.builder().add(fieldPermissions, queries, IndexPrivilege.ALL, "foo").build(); + final String action = randomFrom(ShrinkAction.NAME, ResizeAction.NAME); + IndicesAccessControl accessControl = new IndicesAccessControl(true, Collections.singletonMap("foo", + new IndicesAccessControl.IndexAccessControl(true, fieldPermissions, queries))); + threadContext.putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, accessControl); + + ResizeRequestInterceptor resizeRequestInterceptor = + new ResizeRequestInterceptor(Settings.EMPTY, threadPool, licenseState, auditTrailService); + + ElasticsearchSecurityException securityException = expectThrows(ElasticsearchSecurityException.class, + () -> resizeRequestInterceptor.intercept(new ResizeRequest("bar", "foo"), authentication, role, action)); + assertEquals("Resize requests are not allowed for users when field or document level security is enabled on the source index", + securityException.getMessage()); + } + + public void testResizeRequestInterceptorThrowsWhenTargetHasGreaterPermissions() throws Exception { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.isAuditingAllowed()).thenReturn(true); + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + AuditTrailService auditTrailService = new AuditTrailService(Settings.EMPTY, Collections.emptyList(), licenseState); + final Authentication authentication = new Authentication(new User("john", "role"), new RealmRef(null, null, null), null); + Role role = Role.builder() + .add(IndexPrivilege.ALL, "target") + .add(IndexPrivilege.READ, "source") + .build(); + final String action = randomFrom(ShrinkAction.NAME, ResizeAction.NAME); + IndicesAccessControl accessControl = new IndicesAccessControl(true, Collections.emptyMap()); + threadContext.putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, accessControl); + ResizeRequestInterceptor resizeRequestInterceptor = + new ResizeRequestInterceptor(Settings.EMPTY, threadPool, licenseState, auditTrailService); + ElasticsearchSecurityException securityException = expectThrows(ElasticsearchSecurityException.class, + () -> resizeRequestInterceptor.intercept(new ResizeRequest("target", "source"), authentication, role, action)); + assertEquals("Resizing an index is not allowed when the target index has more permissions than the source index", + securityException.getMessage()); + + // swap target and source for success + resizeRequestInterceptor.intercept(new ResizeRequest("source", "target"), authentication, role, action); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/PutRoleBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/PutRoleBuilderTests.java new file mode 100644 index 0000000000000..ba305e15ed768 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/PutRoleBuilderTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.role; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequestBuilder; + +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.containsString; + +public class PutRoleBuilderTests extends ESTestCase { + // test that we reject a role where field permissions are stored in 2.x format (fields:...) + public void testBWCFieldPermissions() throws Exception { + Path path = getDataPath("roles2xformat.json"); + byte[] bytes = Files.readAllBytes(path); + String roleString = new String(bytes, Charset.defaultCharset()); + try (Client client = new NoOpClient("testBWCFieldPermissions")) { + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, + () -> new PutRoleRequestBuilder(client).source("role1", new BytesArray(roleString), XContentType.JSON)); + assertThat(e.getDetailedMessage(), containsString("\"fields\": [...]] format has changed for field permissions in role " + + "[role1], use [\"field_security\": {\"grant\":[...],\"except\":[...]}] instead")); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java new file mode 100644 index 0000000000000..0f901830bf183 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java @@ -0,0 +1,160 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.role; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; + +public class TransportDeleteRoleActionTests extends ESTestCase { + + public void testReservedRole() { + final String roleName = randomFrom(new ArrayList<>(ReservedRolesStore.names())); + NativeRolesStore rolesStore = mock(NativeRolesStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + (x) -> null, null, Collections.emptySet()); + TransportDeleteRoleAction action = new TransportDeleteRoleAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), rolesStore, transportService); + + DeleteRoleRequest request = new DeleteRoleRequest(); + request.name(roleName); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(DeleteRoleResponse deleteRoleResponse) { + responseRef.set(deleteRoleResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), is(instanceOf(IllegalArgumentException.class))); + assertThat(throwableRef.get().getMessage(), containsString("is reserved and cannot be deleted")); + verifyZeroInteractions(rolesStore); + } + + public void testValidRole() { + final String roleName = randomFrom("admin", "dept_a", "restricted"); + NativeRolesStore rolesStore = mock(NativeRolesStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + (x) -> null, null, Collections.emptySet()); + TransportDeleteRoleAction action = new TransportDeleteRoleAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), rolesStore, transportService); + + DeleteRoleRequest request = new DeleteRoleRequest(); + request.name(roleName); + + final boolean found = randomBoolean(); + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener listener = (ActionListener) args[1]; + listener.onResponse(found); + return null; + } + }).when(rolesStore).deleteRole(eq(request), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(DeleteRoleResponse deleteRoleResponse) { + responseRef.set(deleteRoleResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(notNullValue())); + assertThat(responseRef.get().found(), is(found)); + assertThat(throwableRef.get(), is(nullValue())); + verify(rolesStore, times(1)).deleteRole(eq(request), any(ActionListener.class)); + } + + public void testException() { + final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException()); + final String roleName = randomFrom("admin", "dept_a", "restricted"); + NativeRolesStore rolesStore = mock(NativeRolesStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + (x) -> null, null, Collections.emptySet()); + TransportDeleteRoleAction action = new TransportDeleteRoleAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), rolesStore, transportService); + + DeleteRoleRequest request = new DeleteRoleRequest(); + request.name(roleName); + + final boolean found = randomBoolean(); + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener listener = (ActionListener) args[1]; + listener.onFailure(e); + return null; + } + }).when(rolesStore).deleteRole(eq(request), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(DeleteRoleResponse deleteRoleResponse) { + responseRef.set(deleteRoleResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), is(notNullValue())); + assertThat(throwableRef.get(), is(sameInstance(e))); + verify(rolesStore, times(1)).deleteRole(eq(request), any(ActionListener.class)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java new file mode 100644 index 0000000000000..431d6cc613c16 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java @@ -0,0 +1,251 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.role; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.role.GetRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.AdditionalMatchers.aryEq; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; + +public class TransportGetRolesActionTests extends ESTestCase { + + public void testReservedRoles() { + NativeRolesStore rolesStore = mock(NativeRolesStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), rolesStore, transportService, new ReservedRolesStore()); + + final int size = randomIntBetween(1, ReservedRolesStore.names().size()); + final List names = randomSubsetOf(size, ReservedRolesStore.names()); + + final List expectedNames = new ArrayList<>(names); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener> listener = (ActionListener>) args[1]; + listener.onResponse(Collections.emptyList()); + return null; + }).when(rolesStore).getRoleDescriptors(aryEq(Strings.EMPTY_ARRAY), any(ActionListener.class)); + + GetRolesRequest request = new GetRolesRequest(); + request.names(names.toArray(Strings.EMPTY_ARRAY)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(GetRolesResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(throwableRef.get(), is(nullValue())); + assertThat(responseRef.get(), is(notNullValue())); + List retrievedRoleNames = + Arrays.asList(responseRef.get().roles()).stream().map(RoleDescriptor::getName).collect(Collectors.toList()); + assertThat(retrievedRoleNames, containsInAnyOrder(expectedNames.toArray(Strings.EMPTY_ARRAY))); + verifyZeroInteractions(rolesStore); + } + + public void testStoreRoles() { + final List storeRoleDescriptors = randomRoleDescriptors(); + NativeRolesStore rolesStore = mock(NativeRolesStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), rolesStore, transportService, new ReservedRolesStore()); + + GetRolesRequest request = new GetRolesRequest(); + request.names(storeRoleDescriptors.stream().map(RoleDescriptor::getName).collect(Collectors.toList()).toArray(Strings.EMPTY_ARRAY)); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener> listener = (ActionListener>) args[1]; + listener.onResponse(storeRoleDescriptors); + return null; + }).when(rolesStore).getRoleDescriptors(aryEq(request.names()), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(GetRolesResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(throwableRef.get(), is(nullValue())); + assertThat(responseRef.get(), is(notNullValue())); + List retrievedRoleNames = + Arrays.asList(responseRef.get().roles()).stream().map(RoleDescriptor::getName).collect(Collectors.toList()); + assertThat(retrievedRoleNames, containsInAnyOrder(request.names())); + } + + public void testGetAllOrMix() { + final boolean all = randomBoolean(); + final List storeRoleDescriptors = randomRoleDescriptors(); + final List storeNames = storeRoleDescriptors.stream().map(RoleDescriptor::getName).collect(Collectors.toList()); + final List reservedRoleNames = new ArrayList<>(ReservedRolesStore.names()); + + final List requestedNames = new ArrayList<>(); + List specificStoreNames = new ArrayList<>(); + if (all == false) { + requestedNames.addAll(randomSubsetOf(randomIntBetween(1, ReservedRolesStore.names().size()), ReservedRolesStore.names())); + specificStoreNames.addAll(randomSubsetOf(randomIntBetween(1, storeNames.size()), storeNames)); + requestedNames.addAll(specificStoreNames); + } + + NativeRolesStore rolesStore = mock(NativeRolesStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), rolesStore, transportService, new ReservedRolesStore()); + + final List expectedNames = new ArrayList<>(); + if (all) { + expectedNames.addAll(reservedRoleNames); + expectedNames.addAll(storeNames); + } else { + expectedNames.addAll(requestedNames); + } + + GetRolesRequest request = new GetRolesRequest(); + request.names(requestedNames.toArray(Strings.EMPTY_ARRAY)); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + String[] requestedNames1 = (String[]) args[0]; + ActionListener> listener = (ActionListener>) args[1]; + if (requestedNames1.length == 0) { + listener.onResponse(storeRoleDescriptors); + } else { + List requestedNamesList = Arrays.asList(requestedNames1); + listener.onResponse(storeRoleDescriptors.stream() + .filter(r -> requestedNamesList.contains(r.getName())) + .collect(Collectors.toList())); + } + return null; + }).when(rolesStore).getRoleDescriptors(aryEq(specificStoreNames.toArray(Strings.EMPTY_ARRAY)), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(GetRolesResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(throwableRef.get(), is(nullValue())); + assertThat(responseRef.get(), is(notNullValue())); + List retrievedRoleNames = + Arrays.asList(responseRef.get().roles()).stream().map(RoleDescriptor::getName).collect(Collectors.toList()); + assertThat(retrievedRoleNames, containsInAnyOrder(expectedNames.toArray(Strings.EMPTY_ARRAY))); + + if (all) { + verify(rolesStore, times(1)).getRoleDescriptors(aryEq(Strings.EMPTY_ARRAY), any(ActionListener.class)); + } else { + verify(rolesStore, times(1)) + .getRoleDescriptors(aryEq(specificStoreNames.toArray(Strings.EMPTY_ARRAY)), any(ActionListener.class)); + } + } + + public void testException() { + final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException()); + final List storeRoleDescriptors = randomRoleDescriptors(); + NativeRolesStore rolesStore = mock(NativeRolesStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), rolesStore, transportService, new ReservedRolesStore()); + + GetRolesRequest request = new GetRolesRequest(); + request.names(storeRoleDescriptors.stream().map(RoleDescriptor::getName).collect(Collectors.toList()).toArray(Strings.EMPTY_ARRAY)); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener> listener = (ActionListener>) args[1]; + listener.onFailure(e); + return null; + }).when(rolesStore).getRoleDescriptors(aryEq(request.names()), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(GetRolesResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(throwableRef.get(), is(notNullValue())); + assertThat(throwableRef.get(), is(e)); + assertThat(responseRef.get(), is(nullValue())); + } + + private List randomRoleDescriptors() { + int size = scaledRandomIntBetween(1, 10); + List list = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + list.add(new RoleDescriptor("role_" + i, null, null, null)); + } + return list; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java new file mode 100644 index 0000000000000..0ae2477ba0310 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java @@ -0,0 +1,160 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.role; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; + +public class TransportPutRoleActionTests extends ESTestCase { + + public void testReservedRole() { + final String roleName = randomFrom(new ArrayList<>(ReservedRolesStore.names())); + NativeRolesStore rolesStore = mock(NativeRolesStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportPutRoleAction action = new TransportPutRoleAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), rolesStore, transportService); + + PutRoleRequest request = new PutRoleRequest(); + request.name(roleName); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(PutRoleResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), is(instanceOf(IllegalArgumentException.class))); + assertThat(throwableRef.get().getMessage(), containsString("is reserved and cannot be modified")); + verifyZeroInteractions(rolesStore); + } + + public void testValidRole() { + final String roleName = randomFrom("admin", "dept_a", "restricted"); + NativeRolesStore rolesStore = mock(NativeRolesStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportPutRoleAction action = new TransportPutRoleAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), rolesStore, transportService); + + final boolean created = randomBoolean(); + PutRoleRequest request = new PutRoleRequest(); + request.name(roleName); + + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + assert args.length == 3; + ActionListener listener = (ActionListener) args[2]; + listener.onResponse(created); + return null; + } + }).when(rolesStore).putRole(eq(request), any(RoleDescriptor.class), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(PutRoleResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(notNullValue())); + assertThat(responseRef.get().isCreated(), is(created)); + assertThat(throwableRef.get(), is(nullValue())); + verify(rolesStore, times(1)).putRole(eq(request), any(RoleDescriptor.class), any(ActionListener.class)); + } + + public void testException() { + final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException()); + final String roleName = randomFrom("admin", "dept_a", "restricted"); + NativeRolesStore rolesStore = mock(NativeRolesStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportPutRoleAction action = new TransportPutRoleAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), rolesStore, transportService); + + PutRoleRequest request = new PutRoleRequest(); + request.name(roleName); + + doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + assert args.length == 3; + ActionListener listener = (ActionListener) args[2]; + listener.onFailure(e); + return null; + } + }).when(rolesStore).putRole(eq(request), any(RoleDescriptor.class), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(PutRoleResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), is(notNullValue())); + assertThat(throwableRef.get(), is(sameInstance(e))); + verify(rolesStore, times(1)).putRole(eq(request), any(RoleDescriptor.class), any(ActionListener.class)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/PutRoleMappingRequestTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/PutRoleMappingRequestTests.java new file mode 100644 index 0000000000000..3079a17b77424 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/PutRoleMappingRequestTests.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.rolemapping; + +import java.util.Collections; + +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; +import org.junit.Before; +import org.mockito.Mockito; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; + +public class PutRoleMappingRequestTests extends ESTestCase { + + private PutRoleMappingRequestBuilder builder; + + @Before + public void setupBuilder() { + final ElasticsearchClient client = Mockito.mock(ElasticsearchClient.class); + builder = new PutRoleMappingRequestBuilder(client, PutRoleMappingAction.INSTANCE); + } + + public void testValidateMissingName() throws Exception { + final PutRoleMappingRequest request = builder + .roles("superuser") + .expression(Mockito.mock(RoleMapperExpression.class)) + .request(); + assertValidationFailure(request, "name"); + } + + public void testValidateMissingRoles() throws Exception { + final PutRoleMappingRequest request = builder + .name("test") + .expression(Mockito.mock(RoleMapperExpression.class)) + .request(); + assertValidationFailure(request, "roles"); + } + + public void testValidateMissingRules() throws Exception { + final PutRoleMappingRequest request = builder + .name("test") + .roles("superuser") + .request(); + assertValidationFailure(request, "rules"); + } + + public void testValidateMetadataKeys() throws Exception { + final PutRoleMappingRequest request = builder + .name("test") + .roles("superuser") + .expression(Mockito.mock(RoleMapperExpression.class)) + .metadata(Collections.singletonMap("_secret", false)) + .request(); + assertValidationFailure(request, "metadata key"); + } + + private void assertValidationFailure(PutRoleMappingRequest request, String expectedMessage) { + final ValidationException ve = request.validate(); + assertThat(ve, notNullValue()); + assertThat(ve.getMessage(), containsString(expectedMessage)); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java new file mode 100644 index 0000000000000..219ce1d1f79dd --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.rolemapping; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; +import org.hamcrest.Matchers; +import org.junit.Before; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; + +public class TransportGetRoleMappingsActionTests extends ESTestCase { + + private NativeRoleMappingStore store; + private TransportGetRoleMappingsAction action; + private AtomicReference> namesRef; + private List result; + + @Before + public void setupMocks() { + store = mock(NativeRoleMappingStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); + action = new TransportGetRoleMappingsAction(Settings.EMPTY, mock(ThreadPool.class), + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), + transportService, store); + + namesRef = new AtomicReference<>(null); + result = Collections.emptyList(); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + namesRef.set((Set) args[0]); + ActionListener> listener = (ActionListener) args[1]; + listener.onResponse(result); + return null; + }).when(store).getRoleMappings(any(Set.class), any(ActionListener.class)); + } + + public void testGetSingleRole() throws Exception { + final PlainActionFuture future = new PlainActionFuture<>(); + final GetRoleMappingsRequest request = new GetRoleMappingsRequest(); + request.setNames("everyone"); + + final ExpressionRoleMapping mapping = mock(ExpressionRoleMapping.class); + result = Collections.singletonList(mapping); + action.doExecute(request, future); + assertThat(future.get(), notNullValue()); + assertThat(future.get().mappings(), arrayContaining(mapping)); + assertThat(namesRef.get(), containsInAnyOrder("everyone")); + } + + public void testGetMultipleNamedRoles() throws Exception { + final PlainActionFuture future = new PlainActionFuture<>(); + final GetRoleMappingsRequest request = new GetRoleMappingsRequest(); + request.setNames("admin", "engineering", "sales", "finance"); + + final ExpressionRoleMapping mapping1 = mock(ExpressionRoleMapping.class); + final ExpressionRoleMapping mapping2 = mock(ExpressionRoleMapping.class); + final ExpressionRoleMapping mapping3 = mock(ExpressionRoleMapping.class); + result = Arrays.asList(mapping1, mapping2, mapping3); + + action.doExecute(request, future); + + final GetRoleMappingsResponse response = future.get(); + assertThat(response, notNullValue()); + assertThat(response.mappings(), arrayContainingInAnyOrder(mapping1, mapping2, mapping3)); + assertThat(namesRef.get(), containsInAnyOrder("admin", "engineering", "sales", "finance")); + } + + public void testGetAllRoles() throws Exception { + final PlainActionFuture future = new PlainActionFuture<>(); + final GetRoleMappingsRequest request = new GetRoleMappingsRequest(); + request.setNames(Strings.EMPTY_ARRAY); + + final ExpressionRoleMapping mapping1 = mock(ExpressionRoleMapping.class); + final ExpressionRoleMapping mapping2 = mock(ExpressionRoleMapping.class); + final ExpressionRoleMapping mapping3 = mock(ExpressionRoleMapping.class); + result = Arrays.asList(mapping1, mapping2, mapping3); + + action.doExecute(request, future); + + final GetRoleMappingsResponse response = future.get(); + assertThat(response, notNullValue()); + assertThat(response.mappings(), arrayContainingInAnyOrder(mapping1, mapping2, mapping3)); + assertThat(namesRef.get(), Matchers.nullValue(Set.class)); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java new file mode 100644 index 0000000000000..da9eca7a9b61a --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.rolemapping; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; +import org.junit.Before; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; + +public class TransportPutRoleMappingActionTests extends ESTestCase { + + private NativeRoleMappingStore store; + private TransportPutRoleMappingAction action; + private AtomicReference requestRef; + + @Before + public void setupMocks() { + store = mock(NativeRoleMappingStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); + action = new TransportPutRoleMappingAction(Settings.EMPTY, mock(ThreadPool.class), + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), + transportService, store); + + requestRef = new AtomicReference<>(null); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + requestRef.set((PutRoleMappingRequest) args[0]); + ActionListener listener = (ActionListener) args[1]; + listener.onResponse(true); + return null; + }).when(store).putRoleMapping(any(PutRoleMappingRequest.class), any(ActionListener.class) + ); + } + + public void testPutValidMapping() throws Exception { + final FieldExpression expression = new FieldExpression( + "username", + Collections.singletonList(new FieldExpression.FieldValue("*")) + ); + final PutRoleMappingResponse response = put("anarchy", expression, "superuser", + Collections.singletonMap("dumb", true)); + + assertThat(response.isCreated(), equalTo(true)); + + final ExpressionRoleMapping mapping = requestRef.get().getMapping(); + assertThat(mapping.getExpression(), is(expression)); + assertThat(mapping.isEnabled(), equalTo(true)); + assertThat(mapping.getName(), equalTo("anarchy")); + assertThat(mapping.getRoles(), containsInAnyOrder("superuser")); + assertThat(mapping.getMetadata().size(), equalTo(1)); + assertThat(mapping.getMetadata().get("dumb"), equalTo(true)); + } + + private PutRoleMappingResponse put(String name, FieldExpression expression, String role, + Map metadata) throws Exception { + final PutRoleMappingRequest request = new PutRoleMappingRequest(); + request.setName(name); + request.setRoles(Arrays.asList(role)); + request.setRules(expression); + request.setMetadata(metadata); + request.setEnabled(true); + final PlainActionFuture future = new PlainActionFuture<>(); + action.doExecute(request, future); + return future.get(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/SamlPrepareAuthenticationRequestTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/SamlPrepareAuthenticationRequestTests.java new file mode 100644 index 0000000000000..e257c37cbd377 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/SamlPrepareAuthenticationRequestTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.saml; + +import java.io.IOException; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.xpack.core.security.action.saml.SamlPrepareAuthenticationRequest; +import org.elasticsearch.xpack.security.authc.saml.SamlTestCase; +import org.hamcrest.Matchers; + +public class SamlPrepareAuthenticationRequestTests extends SamlTestCase { + + public void testSerialiseNonNullCriteria() throws IOException { + final SamlPrepareAuthenticationRequest req = new SamlPrepareAuthenticationRequest(); + req.setRealmName("saml1"); + req.setAssertionConsumerServiceURL("https://sp.example.com/sso/saml2/post"); + serialiseAndValidate(req); + } + + public void testSerialiseNullCriteria() throws IOException { + final SamlPrepareAuthenticationRequest req = new SamlPrepareAuthenticationRequest(); + req.setRealmName(null); + req.setAssertionConsumerServiceURL(null); + serialiseAndValidate(req); + } + + private void serialiseAndValidate(SamlPrepareAuthenticationRequest req1) throws IOException { + final BytesStreamOutput out = new BytesStreamOutput(); + req1.writeTo(out); + + final SamlPrepareAuthenticationRequest req2 = new SamlPrepareAuthenticationRequest(); + req2.readFrom(out.bytes().streamInput()); + + assertThat(req2.getRealmName(), Matchers.equalTo(req1.getRealmName())); + assertThat(req2.getAssertionConsumerServiceURL(), Matchers.equalTo(req1.getAssertionConsumerServiceURL())); + assertThat(req2.getParentTask(), Matchers.equalTo(req1.getParentTask())); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java new file mode 100644 index 0000000000000..a0755f9cd0682 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -0,0 +1,326 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.saml; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.ClearScrollAction; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchResponseSections; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.action.saml.SamlInvalidateSessionRequest; +import org.elasticsearch.xpack.core.security.action.saml.SamlInvalidateSessionResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; +import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.authc.Realms; +import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.security.authc.UserToken; +import org.elasticsearch.xpack.security.authc.saml.SamlLogoutRequestHandler; +import org.elasticsearch.xpack.security.authc.saml.SamlNameId; +import org.elasticsearch.xpack.security.authc.saml.SamlRealm; +import org.elasticsearch.xpack.security.authc.saml.SamlRealmTestHelper; +import org.elasticsearch.xpack.security.authc.saml.SamlRealmTests; +import org.elasticsearch.xpack.security.authc.saml.SamlTestCase; +import org.junit.After; +import org.junit.Before; +import org.opensaml.saml.saml2.core.NameID; + +import java.io.IOException; +import java.nio.file.Path; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.iterableWithSize; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportSamlInvalidateSessionActionTests extends SamlTestCase { + + private SamlRealm samlRealm; + private TokenService tokenService; + private List indexRequests; + private List updateRequests; + private List searchRequests; + private TransportSamlInvalidateSessionAction action; + private SamlLogoutRequestHandler.Result logoutRequest; + private Function searchFunction = ignore -> new SearchHit[0]; + + @Before + public void setup() throws Exception { + final Settings settings = Settings.builder() + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true) + .put("path.home", createTempDir()) + .build(); + + final ThreadContext threadContext = new ThreadContext(settings); + final ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(threadContext); + new Authentication(new User("kibana"), new RealmRef("realm", "type", "node"), null).writeToContext(threadContext); + + indexRequests = new ArrayList<>(); + updateRequests = new ArrayList<>(); + searchRequests = new ArrayList<>(); + final Client client = new NoOpClient(threadPool) { + @Override + protected > + void doExecute(Action action, Request request, ActionListener listener) { + if (IndexAction.NAME.equals(action.name())) { + assertThat(request, instanceOf(IndexRequest.class)); + IndexRequest indexRequest = (IndexRequest) request; + indexRequests.add(indexRequest); + final IndexResponse response = new IndexResponse( + indexRequest.shardId(), indexRequest.type(), indexRequest.id(), 1, 1, 1, true); + listener.onResponse((Response) response); + } else if (UpdateAction.NAME.equals(action.name())) { + assertThat(request, instanceOf(UpdateRequest.class)); + updateRequests.add((UpdateRequest) request); + listener.onResponse((Response) new UpdateResponse()); + } else if (SearchAction.NAME.equals(action.name())) { + assertThat(request, instanceOf(SearchRequest.class)); + SearchRequest searchRequest = (SearchRequest) request; + searchRequests.add(searchRequest); + final SearchHit[] hits = searchFunction.apply(searchRequest); + final SearchResponse response = new SearchResponse( + new SearchResponseSections(new SearchHits(hits, hits.length, 0f), + null, null, false, false, null, 1), "_scrollId1", 1, 1, 0, 1, null, null); + listener.onResponse((Response) response); + } else if (ClearScrollAction.NAME.equals(action.name())) { + assertThat(request, instanceOf(ClearScrollRequest.class)); + ClearScrollRequest scrollRequest = (ClearScrollRequest) request; + assertEquals("_scrollId1", scrollRequest.getScrollIds().get(0)); + ClearScrollResponse response = new ClearScrollResponse(true, 1); + listener.onResponse((Response) response); + } else { + super.doExecute(action, request, listener); + } + } + }; + + final SecurityLifecycleService lifecycleService = mock(SecurityLifecycleService.class); + doAnswer(inv -> { + ((Runnable) inv.getArguments()[1]).run(); + return null; + }).when(lifecycleService).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); + + final ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + tokenService = new TokenService(settings, Clock.systemUTC(), client, lifecycleService, clusterService); + + final TransportService transportService = new TransportService(Settings.EMPTY, null, null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); + final Realms realms = mock(Realms.class); + action = new TransportSamlInvalidateSessionAction(settings, threadPool, transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), tokenService, realms); + + final Path metadata = PathUtils.get(SamlRealm.class.getResource("idp1.xml").toURI()); + final Environment env = TestEnvironment.newEnvironment(settings); + final Settings realmSettings = Settings.builder() + .put(SamlRealmSettings.IDP_METADATA_PATH.getKey(), metadata.toString()) + .put(SamlRealmSettings.IDP_ENTITY_ID.getKey(), SamlRealmTests.TEST_IDP_ENTITY_ID) + .put(SamlRealmSettings.SP_ENTITY_ID.getKey(), SamlRealmTestHelper.SP_ENTITY_ID) + .put(SamlRealmSettings.SP_ACS.getKey(), SamlRealmTestHelper.SP_ACS_URL) + .put(SamlRealmSettings.SP_LOGOUT.getKey(), SamlRealmTestHelper.SP_LOGOUT_URL) + .put("attributes.principal", "uid") + .build(); + + final RealmConfig realmConfig = new RealmConfig("saml1", realmSettings, settings, env, threadContext); + samlRealm = SamlRealmTestHelper.buildRealm(realmConfig, null); + when(realms.realm(realmConfig.name())).thenReturn(samlRealm); + when(realms.stream()).thenAnswer(i -> Stream.of(samlRealm)); + + logoutRequest = new SamlLogoutRequestHandler.Result( + randomAlphaOfLengthBetween(8, 24), + new SamlNameId(NameID.TRANSIENT, randomAlphaOfLengthBetween(8, 24), null, null, null), + randomAlphaOfLengthBetween(12, 16), + null + ); + when(samlRealm.getLogoutHandler().parseFromQueryString(anyString())).thenReturn(logoutRequest); + } + + private SearchHit tokenHit(int idx, BytesReference source) { + try { + final Map sourceMap = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, source.streamInput()).map(); + final Map accessToken = (Map) sourceMap.get("access_token"); + final Map userToken = (Map) accessToken.get("user_token"); + final SearchHit hit = new SearchHit(idx, "token_" + userToken.get("id"), null, null); + hit.sourceRef(source); + return hit; + } catch (IOException e) { + throw ExceptionsHelper.convertToRuntime(e); + } + } + + @After + public void cleanup() { + samlRealm.close(); + } + + public void testInvalidateCorrectTokensFromLogoutRequest() throws Exception { + storeToken(logoutRequest.getNameId(), randomAlphaOfLength(10)); + final Tuple tokenToInvalidate1 = storeToken(logoutRequest.getNameId(), logoutRequest.getSession()); + final Tuple tokenToInvalidate2 = storeToken(logoutRequest.getNameId(), logoutRequest.getSession()); + storeToken(new SamlNameId(NameID.PERSISTENT, randomAlphaOfLength(16), null, null, null), logoutRequest.getSession()); + + assertThat(indexRequests.size(), equalTo(4)); + + final AtomicInteger counter = new AtomicInteger(); + final SearchHit[] searchHits = indexRequests.stream() + .filter(r -> r.id().startsWith("token")) + .map(r -> tokenHit(counter.incrementAndGet(), r.source())) + .collect(Collectors.toList()) + .toArray(new SearchHit[0]); + assertThat(searchHits.length, equalTo(4)); + searchFunction = req1 -> { + searchFunction = findTokenByRefreshToken(searchHits); + return searchHits; + }; + + indexRequests.clear(); + + final SamlInvalidateSessionRequest request = new SamlInvalidateSessionRequest(); + request.setRealmName(samlRealm.name()); + request.setQueryString("SAMLRequest=foo"); + final PlainActionFuture future = new PlainActionFuture<>(); + action.doExecute(request, future); + final SamlInvalidateSessionResponse response = future.get(); + assertThat(response, notNullValue()); + assertThat(response.getCount(), equalTo(2)); + assertThat(response.getRealmName(), equalTo(samlRealm.name())); + assertThat(response.getRedirectUrl(), notNullValue()); + assertThat(response.getRedirectUrl(), startsWith(SamlRealmTestHelper.IDP_LOGOUT_URL)); + assertThat(response.getRedirectUrl(), containsString("SAMLResponse=")); + + // 1 to find the tokens for the realm + // 2 more to find the UserTokens from the 2 matching refresh tokens + assertThat(searchRequests.size(), equalTo(3)); + + assertThat(searchRequests.get(0).source().query(), instanceOf(BoolQueryBuilder.class)); + final List filter0 = ((BoolQueryBuilder) searchRequests.get(0).source().query()).filter(); + assertThat(filter0, iterableWithSize(3)); + + assertThat(filter0.get(0), instanceOf(TermQueryBuilder.class)); + assertThat(((TermQueryBuilder) filter0.get(0)).fieldName(), equalTo("doc_type")); + assertThat(((TermQueryBuilder) filter0.get(0)).value(), equalTo("token")); + + assertThat(filter0.get(1), instanceOf(TermQueryBuilder.class)); + assertThat(((TermQueryBuilder) filter0.get(1)).fieldName(), equalTo("access_token.realm")); + assertThat(((TermQueryBuilder) filter0.get(1)).value(), equalTo(samlRealm.name())); + + assertThat(filter0.get(2), instanceOf(BoolQueryBuilder.class)); + assertThat(((BoolQueryBuilder) filter0.get(2)).should(), iterableWithSize(2)); + + assertThat(searchRequests.get(1).source().query(), instanceOf(BoolQueryBuilder.class)); + final List filter1 = ((BoolQueryBuilder) searchRequests.get(1).source().query()).filter(); + assertThat(filter1, iterableWithSize(2)); + + assertThat(filter1.get(0), instanceOf(TermQueryBuilder.class)); + assertThat(((TermQueryBuilder) filter1.get(0)).fieldName(), equalTo("doc_type")); + assertThat(((TermQueryBuilder) filter1.get(0)).value(), equalTo("token")); + + assertThat(filter1.get(1), instanceOf(TermQueryBuilder.class)); + assertThat(((TermQueryBuilder) filter1.get(1)).fieldName(), equalTo("refresh_token.token")); + assertThat(((TermQueryBuilder) filter1.get(1)).value(), equalTo(tokenToInvalidate1.v2())); + + assertThat(updateRequests.size(), equalTo(4)); // (refresh-token + access-token) * 2 + assertThat(updateRequests.get(0).id(), equalTo("token_" + tokenToInvalidate1.v1().getId())); + assertThat(updateRequests.get(1).id(), equalTo(updateRequests.get(0).id())); + assertThat(updateRequests.get(2).id(), equalTo("token_" + tokenToInvalidate2.v1().getId())); + assertThat(updateRequests.get(3).id(), equalTo(updateRequests.get(2).id())); + + assertThat(indexRequests.size(), equalTo(2)); // bwc-invalidate * 2 + assertThat(indexRequests.get(0).id(), startsWith("invalidated-token_")); + assertThat(indexRequests.get(1).id(), startsWith("invalidated-token_")); + } + + private Function findTokenByRefreshToken(SearchHit[] searchHits) { + return request -> { + assertThat(request.source().query(), instanceOf(BoolQueryBuilder.class)); + final List filters = ((BoolQueryBuilder) request.source().query()).filter(); + assertThat(filters, iterableWithSize(2)); + assertThat(filters.get(1), instanceOf(TermQueryBuilder.class)); + final TermQueryBuilder termQuery = (TermQueryBuilder) filters.get(1); + assertThat(termQuery.fieldName(), equalTo("refresh_token.token")); + for (SearchHit hit : searchHits) { + final Map refreshToken = (Map) hit.getSourceAsMap().get("refresh_token"); + if (termQuery.value().equals(refreshToken.get("token"))) { + return new SearchHit[]{hit}; + } + } + return new SearchHit[0]; + }; + } + + private Tuple storeToken(SamlNameId nameId, String session) throws IOException { + Authentication authentication = new Authentication(new User("bob"), + new RealmRef("native", NativeRealmSettings.TYPE, "node01"), null); + final Map metadata = samlRealm.createTokenMetadata(nameId, session); + final PlainActionFuture> future = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, future, metadata); + return future.actionGet(); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java new file mode 100644 index 0000000000000..50a7a35b7a682 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -0,0 +1,247 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.saml; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequestBuilder; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetAction; +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetRequestBuilder; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateRequestBuilder; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutRequest; +import org.elasticsearch.xpack.core.security.action.saml.SamlLogoutResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.authc.Realms; +import org.elasticsearch.xpack.security.authc.TokenService; +import org.elasticsearch.xpack.security.authc.UserToken; +import org.elasticsearch.xpack.security.authc.saml.SamlNameId; +import org.elasticsearch.xpack.security.authc.saml.SamlRealm; +import org.elasticsearch.xpack.security.authc.saml.SamlRealmTests; +import org.elasticsearch.xpack.security.authc.saml.SamlTestCase; +import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.junit.After; +import org.junit.Before; +import org.opensaml.saml.saml2.core.NameID; + +import java.nio.file.Path; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; + +import static org.elasticsearch.xpack.security.authc.TokenServiceTests.mockGetTokenFromId; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportSamlLogoutActionTests extends SamlTestCase { + + private static final String SP_URL = "https://sp.example.net/saml"; + + private SamlRealm samlRealm; + private TokenService tokenService; + private List indexRequests; + private List updateRequests; + private TransportSamlLogoutAction action; + private Client client; + + @Before + public void setup() throws Exception { + final Settings settings = Settings.builder() + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true) + .put("path.home", createTempDir()) + .build(); + + final ThreadContext threadContext = new ThreadContext(settings); + final ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(threadContext); + new Authentication(new User("kibana"), new Authentication.RealmRef("realm", "type", "node"), null).writeToContext(threadContext); + + indexRequests = new ArrayList<>(); + updateRequests = new ArrayList<>(); + client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + when(client.settings()).thenReturn(settings); + doAnswer(invocationOnMock -> { + GetRequestBuilder builder = new GetRequestBuilder(client, GetAction.INSTANCE); + builder.setIndex((String) invocationOnMock.getArguments()[0]) + .setType((String) invocationOnMock.getArguments()[1]) + .setId((String) invocationOnMock.getArguments()[2]); + return builder; + }).when(client).prepareGet(anyString(), anyString(), anyString()); + doAnswer(invocationOnMock -> { + IndexRequestBuilder builder = new IndexRequestBuilder(client, IndexAction.INSTANCE); + builder.setIndex((String) invocationOnMock.getArguments()[0]) + .setType((String) invocationOnMock.getArguments()[1]) + .setId((String) invocationOnMock.getArguments()[2]); + return builder; + }).when(client).prepareIndex(anyString(), anyString(), anyString()); + doAnswer(invocationOnMock -> { + UpdateRequestBuilder builder = new UpdateRequestBuilder(client, UpdateAction.INSTANCE); + builder.setIndex((String) invocationOnMock.getArguments()[0]) + .setType((String) invocationOnMock.getArguments()[1]) + .setId((String) invocationOnMock.getArguments()[2]); + return builder; + }).when(client).prepareUpdate(anyString(), anyString(), anyString()); + when(client.prepareMultiGet()).thenReturn(new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE)); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + MultiGetResponse response = mock(MultiGetResponse.class); + MultiGetItemResponse[] responses = new MultiGetItemResponse[2]; + when(response.getResponses()).thenReturn(responses); + + GetResponse oldGetResponse = mock(GetResponse.class); + when(oldGetResponse.isExists()).thenReturn(false); + responses[0] = new MultiGetItemResponse(oldGetResponse, null); + + GetResponse getResponse = mock(GetResponse.class); + responses[1] = new MultiGetItemResponse(getResponse, null); + when(getResponse.isExists()).thenReturn(false); + listener.onResponse(response); + return Void.TYPE; + }).when(client).multiGet(any(MultiGetRequest.class), any(ActionListener.class)); + doAnswer(invocationOnMock -> { + UpdateRequest updateRequest = (UpdateRequest) invocationOnMock.getArguments()[0]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + updateRequests.add(updateRequest); + final UpdateResponse response = new UpdateResponse( + updateRequest.getShardId(), updateRequest.type(), updateRequest.id(), 1, DocWriteResponse.Result.UPDATED); + listener.onResponse(response); + return Void.TYPE; + }).when(client).update(any(UpdateRequest.class), any(ActionListener.class)); + doAnswer(invocationOnMock -> { + IndexRequest indexRequest = (IndexRequest) invocationOnMock.getArguments()[0]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + indexRequests.add(indexRequest); + final IndexResponse response = new IndexResponse( + indexRequest.shardId(), indexRequest.type(), indexRequest.id(), 1, 1, 1, true); + listener.onResponse(response); + return Void.TYPE; + }).when(client).index(any(IndexRequest.class), any(ActionListener.class)); + doAnswer(invocationOnMock -> { + IndexRequest indexRequest = (IndexRequest) invocationOnMock.getArguments()[1]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + indexRequests.add(indexRequest); + final IndexResponse response = new IndexResponse( + indexRequest.shardId(), indexRequest.type(), indexRequest.id(), 1, 1, 1, true); + listener.onResponse(response); + return Void.TYPE; + }).when(client).execute(eq(IndexAction.INSTANCE), any(IndexRequest.class), any(ActionListener.class)); + + final SecurityLifecycleService lifecycleService = mock(SecurityLifecycleService.class); + doAnswer(inv -> { + ((Runnable) inv.getArguments()[1]).run(); + return null; + }).when(lifecycleService).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); + + final ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + tokenService = new TokenService(settings, Clock.systemUTC(), client, lifecycleService, clusterService); + + final TransportService transportService = new TransportService(Settings.EMPTY, null, null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); + final Realms realms = mock(Realms.class); + action = new TransportSamlLogoutAction(settings, threadPool, transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), realms, tokenService); + + final Path metadata = PathUtils.get(SamlRealm.class.getResource("idp1.xml").toURI()); + final Environment env = TestEnvironment.newEnvironment(settings); + final Settings realmSettings = Settings.builder() + .put(SamlRealmSettings.IDP_METADATA_PATH.getKey(), metadata.toString()) + .put(SamlRealmSettings.IDP_ENTITY_ID.getKey(), SamlRealmTests.TEST_IDP_ENTITY_ID) + .put(SamlRealmSettings.SP_ENTITY_ID.getKey(), SP_URL) + .put(SamlRealmSettings.SP_ACS.getKey(), SP_URL) + .put("attributes.principal", "uid") + .build(); + + final RealmConfig realmConfig = new RealmConfig("saml1", realmSettings, settings, env, threadContext); + samlRealm = SamlRealm.create(realmConfig, mock(SSLService.class), mock(ResourceWatcherService.class), mock(UserRoleMapper.class)); + when(realms.realm(realmConfig.name())).thenReturn(samlRealm); + } + + @After + public void cleanup() { + samlRealm.close(); + } + + public void testLogoutInvalidatesToken() throws Exception { + final String session = randomAlphaOfLengthBetween(12, 18); + final String nameId = randomAlphaOfLengthBetween(6, 16); + final Map userMetaData = MapBuilder.newMapBuilder() + .put(SamlRealm.USER_METADATA_NAMEID_FORMAT, NameID.TRANSIENT) + .put(SamlRealm.USER_METADATA_NAMEID_VALUE, nameId) + .map(); + final User user = new User("punisher", new String[] { "superuser" }, null, null, userMetaData, true); + final Authentication.RealmRef realmRef = new Authentication.RealmRef(samlRealm.name(), SamlRealmSettings.TYPE, "node01"); + final Authentication authentication = new Authentication(user, realmRef, null); + + final Map tokenMetaData = samlRealm.createTokenMetadata( + new SamlNameId(NameID.TRANSIENT, nameId, null, null, null), session); + + final PlainActionFuture> future = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, future, tokenMetaData); + final UserToken userToken = future.actionGet().v1(); + mockGetTokenFromId(userToken, client); + final String tokenString = tokenService.getUserTokenString(userToken); + + final SamlLogoutRequest request = new SamlLogoutRequest(); + request.setToken(tokenString); + final PlainActionFuture listener = new PlainActionFuture<>(); + action.doExecute(request, listener); + final SamlLogoutResponse response = listener.get(); + assertThat(response, notNullValue()); + assertThat(response.getRedirectUrl(), notNullValue()); + + final IndexRequest indexRequest1 = indexRequests.get(0); + assertThat(indexRequest1, notNullValue()); + assertThat(indexRequest1.id(), startsWith("token")); + + final IndexRequest indexRequest2 = indexRequests.get(1); + assertThat(indexRequest2, notNullValue()); + assertThat(indexRequest2.id(), startsWith("invalidated-token")); + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/CreateTokenRequestTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/CreateTokenRequestTests.java new file mode 100644 index 0000000000000..4404526328448 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/CreateTokenRequestTests.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.token; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequest; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItem; + +public class CreateTokenRequestTests extends ESTestCase { + + public void testRequestValidation() { + CreateTokenRequest request = new CreateTokenRequest(); + ActionRequestValidationException ve = request.validate(); + assertNotNull(ve); + assertEquals(1, ve.validationErrors().size()); + assertThat(ve.validationErrors().get(0), containsString("[password, refresh_token]")); + assertThat(ve.validationErrors().get(0), containsString("grant_type")); + + request.setGrantType("password"); + ve = request.validate(); + assertNotNull(ve); + assertEquals(2, ve.validationErrors().size()); + assertThat(ve.validationErrors(), hasItem("username is missing")); + assertThat(ve.validationErrors(), hasItem("password is missing")); + + request.setUsername(randomBoolean() ? null : ""); + request.setPassword(randomBoolean() ? null : new SecureString(new char[] {})); + + ve = request.validate(); + assertNotNull(ve); + assertEquals(2, ve.validationErrors().size()); + assertThat(ve.validationErrors(), hasItem("username is missing")); + assertThat(ve.validationErrors(), hasItem("password is missing")); + + request.setUsername(randomAlphaOfLengthBetween(1, 256)); + ve = request.validate(); + assertNotNull(ve); + assertEquals(1, ve.validationErrors().size()); + assertThat(ve.validationErrors(), hasItem("password is missing")); + + request.setPassword(new SecureString(randomAlphaOfLengthBetween(1, 256).toCharArray())); + ve = request.validate(); + assertNull(ve); + + request.setRefreshToken(randomAlphaOfLengthBetween(1, 10)); + ve = request.validate(); + assertNotNull(ve); + assertEquals(1, ve.validationErrors().size()); + assertThat(ve.validationErrors().get(0), containsString("refresh_token is not supported")); + + request.setGrantType("refresh_token"); + ve = request.validate(); + assertNotNull(ve); + assertEquals(2, ve.validationErrors().size()); + assertThat(ve.validationErrors(), hasItem(containsString("username is not supported"))); + assertThat(ve.validationErrors(), hasItem(containsString("password is not supported"))); + + request.setUsername(null); + request.setPassword(null); + ve = request.validate(); + assertNull(ve); + + request.setRefreshToken(null); + ve = request.validate(); + assertNotNull(ve); + assertEquals(1, ve.validationErrors().size()); + assertThat(ve.validationErrors(), hasItem("refresh_token is missing")); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/HasPrivilegesRequestBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/HasPrivilegesRequestBuilderTests.java new file mode 100644 index 0000000000000..2d53a3e6e8615 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/HasPrivilegesRequestBuilderTests.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequestBuilder; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.nio.charset.StandardCharsets; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; + +public class HasPrivilegesRequestBuilderTests extends ESTestCase { + + public void testParseValidJsonWithClusterAndIndexPrivileges() throws Exception { + String json = "{ " + + " \"cluster\":[ \"all\"]," + + " \"index\":[ " + + " { \"names\": [ \".kibana\", \".reporting\" ], " + + " \"privileges\" : [ \"read\", \"write\" ] }, " + + " { \"names\": [ \".security\" ], " + + " \"privileges\" : [ \"manage\" ] } " + + " ]" + + "}"; + + final HasPrivilegesRequestBuilder builder = new HasPrivilegesRequestBuilder(mock(Client.class)); + builder.source("elastic", new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON); + + final HasPrivilegesRequest request = builder.request(); + assertThat(request.clusterPrivileges().length, equalTo(1)); + assertThat(request.clusterPrivileges()[0], equalTo("all")); + + assertThat(request.indexPrivileges().length, equalTo(2)); + + final RoleDescriptor.IndicesPrivileges privileges0 = request.indexPrivileges()[0]; + assertThat(privileges0.getIndices(), arrayContaining(".kibana", ".reporting")); + assertThat(privileges0.getPrivileges(), arrayContaining("read", "write")); + + final RoleDescriptor.IndicesPrivileges privileges1 = request.indexPrivileges()[1]; + assertThat(privileges1.getIndices(), arrayContaining(".security")); + assertThat(privileges1.getPrivileges(), arrayContaining("manage")); + } + + public void testParseValidJsonWithJustIndexPrivileges() throws Exception { + String json = "{ \"index\":[ " + + "{ \"names\": [ \".kibana\", \".reporting\" ], " + + " \"privileges\" : [ \"read\", \"write\" ] }, " + + "{ \"names\": [ \".security\" ], " + + " \"privileges\" : [ \"manage\" ] } " + + "] }"; + + final HasPrivilegesRequestBuilder builder = new HasPrivilegesRequestBuilder(mock(Client.class)); + builder.source("elastic", new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON); + + final HasPrivilegesRequest request = builder.request(); + assertThat(request.clusterPrivileges().length, equalTo(0)); + assertThat(request.indexPrivileges().length, equalTo(2)); + + final RoleDescriptor.IndicesPrivileges privileges0 = request.indexPrivileges()[0]; + assertThat(privileges0.getIndices(), arrayContaining(".kibana", ".reporting")); + assertThat(privileges0.getPrivileges(), arrayContaining("read", "write")); + + final RoleDescriptor.IndicesPrivileges privileges1 = request.indexPrivileges()[1]; + assertThat(privileges1.getIndices(), arrayContaining(".security")); + assertThat(privileges1.getPrivileges(), arrayContaining("manage")); + } + + public void testParseValidJsonWithJustClusterPrivileges() throws Exception { + String json = "{ \"cluster\":[ " + + "\"manage\"," + + "\"" + ClusterHealthAction.NAME + "\"," + + "\"" + ClusterStatsAction.NAME + "\"" + + "] }"; + + final HasPrivilegesRequestBuilder builder = new HasPrivilegesRequestBuilder(mock(Client.class)); + builder.source("elastic", new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON); + + final HasPrivilegesRequest request = builder.request(); + assertThat(request.indexPrivileges().length, equalTo(0)); + assertThat(request.clusterPrivileges(), arrayContaining("manage", ClusterHealthAction.NAME, ClusterStatsAction.NAME)); + } + + public void testUseOfFieldLevelSecurityThrowsException() throws Exception { + String json = "{ \"index\":[ " + + "{" + + " \"names\": [ \"employees\" ], " + + " \"privileges\" : [ \"read\", \"write\" ] ," + + " \"field_security\": { \"grant\": [ \"name\", \"department\", \"title\" ] }" + + "} ] }"; + + final HasPrivilegesRequestBuilder builder = new HasPrivilegesRequestBuilder(mock(Client.class)); + final ElasticsearchParseException parseException = expectThrows(ElasticsearchParseException.class, + () -> builder.source("elastic", new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON) + ); + assertThat(parseException.getMessage(), containsString("[field_security]")); + } + + public void testMissingPrivilegesThrowsException() throws Exception { + String json = "{ }"; + final HasPrivilegesRequestBuilder builder = new HasPrivilegesRequestBuilder(mock(Client.class)); + final ElasticsearchParseException parseException = expectThrows(ElasticsearchParseException.class, + () -> builder.source("elastic", new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON) + ); + assertThat(parseException.getMessage(), containsString("[index] and [cluster] are both missing")); + } +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/PutUserRequestBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/PutUserRequestBuilderTests.java new file mode 100644 index 0000000000000..ac3308e2cc35d --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/PutUserRequestBuilderTests.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequestBuilder; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; + +public class PutUserRequestBuilderTests extends ESTestCase { + + public void testNullValuesForEmailAndFullName() throws IOException { + final String json = "{\n" + + " \"roles\": [\n" + + " \"kibana4\"\n" + + " ],\n" + + " \"full_name\": null,\n" + + " \"email\": null,\n" + + " \"metadata\": {}\n" + + "}"; + + PutUserRequestBuilder builder = new PutUserRequestBuilder(mock(Client.class)); + builder.source("kibana4", new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON); + + PutUserRequest request = builder.request(); + assertThat(request.username(), is("kibana4")); + assertThat(request.roles(), arrayContaining("kibana4")); + assertThat(request.fullName(), nullValue()); + assertThat(request.email(), nullValue()); + assertThat(request.metadata().isEmpty(), is(true)); + assertTrue(request.enabled()); + } + + public void testMissingEmailFullName() throws Exception { + final String json = "{\n" + + " \"roles\": [\n" + + " \"kibana4\"\n" + + " ],\n" + + " \"metadata\": {}\n" + + "}"; + + PutUserRequestBuilder builder = new PutUserRequestBuilder(mock(Client.class)); + builder.source("kibana4", new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON); + + PutUserRequest request = builder.request(); + assertThat(request.username(), is("kibana4")); + assertThat(request.roles(), arrayContaining("kibana4")); + assertThat(request.fullName(), nullValue()); + assertThat(request.email(), nullValue()); + assertThat(request.metadata().isEmpty(), is(true)); + } + + public void testWithFullNameAndEmail() throws IOException { + final String json = "{\n" + + " \"roles\": [\n" + + " \"kibana4\"\n" + + " ],\n" + + " \"full_name\": \"Kibana User\",\n" + + " \"email\": \"kibana@elastic.co\",\n" + + " \"metadata\": {}\n" + + "}"; + + PutUserRequestBuilder builder = new PutUserRequestBuilder(mock(Client.class)); + builder.source("kibana4", new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON); + + PutUserRequest request = builder.request(); + assertThat(request.username(), is("kibana4")); + assertThat(request.roles(), arrayContaining("kibana4")); + assertThat(request.fullName(), is("Kibana User")); + assertThat(request.email(), is("kibana@elastic.co")); + assertThat(request.metadata().isEmpty(), is(true)); + } + + public void testInvalidFullname() throws IOException { + final String json = "{\n" + + " \"roles\": [\n" + + " \"kibana4\"\n" + + " ],\n" + + " \"full_name\": [ \"Kibana User\" ],\n" + + " \"email\": \"kibana@elastic.co\",\n" + + " \"metadata\": {}\n" + + "}"; + + PutUserRequestBuilder builder = new PutUserRequestBuilder(mock(Client.class)); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, + () -> builder.source("kibana4", new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON)); + assertThat(e.getMessage(), containsString("expected field [full_name] to be of type string")); + } + + public void testInvalidEmail() throws IOException { + final String json = "{\n" + + " \"roles\": [\n" + + " \"kibana4\"\n" + + " ],\n" + + " \"full_name\": \"Kibana User\",\n" + + " \"email\": [ \"kibana@elastic.co\" ],\n" + + " \"metadata\": {}\n" + + "}"; + + PutUserRequestBuilder builder = new PutUserRequestBuilder(mock(Client.class)); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, + () -> builder.source("kibana4", new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON)); + assertThat(e.getMessage(), containsString("expected field [email] to be of type string")); + } + + public void testWithEnabled() throws IOException { + final String json = "{\n" + + " \"roles\": [\n" + + " \"kibana4\"\n" + + " ],\n" + + " \"full_name\": \"Kibana User\",\n" + + " \"email\": \"kibana@elastic.co\",\n" + + " \"metadata\": {}\n," + + " \"enabled\": false\n" + + "}"; + + PutUserRequestBuilder builder = new PutUserRequestBuilder(mock(Client.class)); + PutUserRequest request = + builder.source("kibana4", new BytesArray(json.getBytes(StandardCharsets.UTF_8)), XContentType.JSON).request(); + assertFalse(request.enabled()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/PutUserRequestTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/PutUserRequestTests.java new file mode 100644 index 0000000000000..af3a89c77b6f0 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/PutUserRequestTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; + +import java.util.Collections; +import java.util.Date; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class PutUserRequestTests extends ESTestCase { + + public void testValidateReturnsNullForCorrectData() throws Exception { + final PutUserRequest request = new PutUserRequest(); + request.username("foo"); + request.roles("bar"); + request.metadata(Collections.singletonMap("created", new Date())); + final ActionRequestValidationException validation = request.validate(); + assertThat(validation, is(nullValue())); + } + + public void testValidateRejectsNullUserName() throws Exception { + final PutUserRequest request = new PutUserRequest(); + request.username(null); + request.roles("bar"); + final ActionRequestValidationException validation = request.validate(); + assertThat(validation, is(notNullValue())); + assertThat(validation.validationErrors(), contains(is("user is missing"))); + assertThat(validation.validationErrors().size(), is(1)); + } + + public void testValidateRejectsUserNameThatHasInvalidCharacters() throws Exception { + final PutUserRequest request = new PutUserRequest(); + request.username("fóóbár"); + request.roles("bar"); + final ActionRequestValidationException validation = request.validate(); + assertThat(validation, is(notNullValue())); + assertThat(validation.validationErrors(), contains(containsString("must be"))); + assertThat(validation.validationErrors().size(), is(1)); + } + + public void testValidateRejectsMetaDataWithLeadingUnderscore() throws Exception { + final PutUserRequest request = new PutUserRequest(); + request.username("foo"); + request.roles("bar"); + request.metadata(Collections.singletonMap("_created", new Date())); + final ActionRequestValidationException validation = request.validate(); + assertThat(validation, is(notNullValue())); + assertThat(validation.validationErrors(), contains(containsString("metadata keys"))); + assertThat(validation.validationErrors().size(), is(1)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java new file mode 100644 index 0000000000000..56e714d7a7067 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; + +import java.util.Collections; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportAuthenticateActionTests extends ESTestCase { + + public void testInternalUser() { + SecurityContext securityContext = mock(SecurityContext.class); + when(securityContext.getUser()).thenReturn(randomFrom(SystemUser.INSTANCE, XPackUser.INSTANCE)); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportAuthenticateAction action = new TransportAuthenticateAction(Settings.EMPTY, mock(ThreadPool.class), transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), securityContext); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(new AuthenticateRequest(), new ActionListener() { + @Override + public void onResponse(AuthenticateResponse authenticateResponse) { + responseRef.set(authenticateResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), nullValue()); + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("is internal")); + } + + public void testNullUser() { + SecurityContext securityContext = mock(SecurityContext.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportAuthenticateAction action = new TransportAuthenticateAction(Settings.EMPTY, mock(ThreadPool.class), transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), securityContext); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(new AuthenticateRequest(), new ActionListener() { + @Override + public void onResponse(AuthenticateResponse authenticateResponse) { + responseRef.set(authenticateResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), nullValue()); + assertThat(throwableRef.get(), instanceOf(ElasticsearchSecurityException.class)); + assertThat(throwableRef.get().getMessage(), containsString("did not find an authenticated user")); + } + + public void testValidUser() { + final User user = randomFrom(new ElasticUser(true), new KibanaUser(true), new User("joe")); + SecurityContext securityContext = mock(SecurityContext.class); + when(securityContext.getUser()).thenReturn(user); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportAuthenticateAction action = new TransportAuthenticateAction(Settings.EMPTY, mock(ThreadPool.class), transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), securityContext); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(new AuthenticateRequest(), new ActionListener() { + @Override + public void onResponse(AuthenticateResponse authenticateResponse) { + responseRef.set(authenticateResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), notNullValue()); + assertThat(responseRef.get().user(), sameInstance(user)); + assertThat(throwableRef.get(), nullValue()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java new file mode 100644 index 0000000000000..78f6fd26e93ea --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java @@ -0,0 +1,191 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequest; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.Collections; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; + +public class TransportChangePasswordActionTests extends ESTestCase { + + public void testAnonymousUser() { + Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "superuser").build(); + AnonymousUser anonymousUser = new AnonymousUser(settings); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportChangePasswordAction action = new TransportChangePasswordAction(settings, mock(ThreadPool.class), transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore); + + ChangePasswordRequest request = new ChangePasswordRequest(); + request.username(anonymousUser.principal()); + request.passwordHash(Hasher.BCRYPT.hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(ChangePasswordResponse changePasswordResponse) { + responseRef.set(changePasswordResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("is anonymous and cannot be modified")); + verifyZeroInteractions(usersStore); + } + + public void testInternalUsers() { + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportChangePasswordAction action = new TransportChangePasswordAction(Settings.EMPTY, mock(ThreadPool.class), transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore); + + ChangePasswordRequest request = new ChangePasswordRequest(); + request.username(randomFrom(SystemUser.INSTANCE.principal(), XPackUser.INSTANCE.principal())); + request.passwordHash(Hasher.BCRYPT.hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(ChangePasswordResponse changePasswordResponse) { + responseRef.set(changePasswordResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("is internal")); + verifyZeroInteractions(usersStore); + } + + public void testValidUser() { + final User user = randomFrom(new ElasticUser(true), new KibanaUser(true), new User("joe")); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + ChangePasswordRequest request = new ChangePasswordRequest(); + request.username(user.principal()); + request.passwordHash(Hasher.BCRYPT.hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener listener = (ActionListener) args[1]; + listener.onResponse(null); + return null; + }).when(usersStore).changePassword(eq(request), any(ActionListener.class)); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportChangePasswordAction action = new TransportChangePasswordAction(Settings.EMPTY, mock(ThreadPool.class), transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(ChangePasswordResponse changePasswordResponse) { + responseRef.set(changePasswordResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(notNullValue())); + assertThat(responseRef.get(), instanceOf(ChangePasswordResponse.class)); + assertThat(throwableRef.get(), is(nullValue())); + verify(usersStore, times(1)).changePassword(eq(request), any(ActionListener.class)); + } + + public void testException() { + final User user = randomFrom(new ElasticUser(true), new KibanaUser(true), new User("joe")); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + ChangePasswordRequest request = new ChangePasswordRequest(); + request.username(user.principal()); + request.passwordHash(Hasher.BCRYPT.hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException(), new RuntimeException()); + doAnswer(new Answer() { + public Void answer(InvocationOnMock invocation) { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener listener = (ActionListener) args[1]; + listener.onFailure(e); + return null; + } + }).when(usersStore).changePassword(eq(request), any(ActionListener.class)); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportChangePasswordAction action = new TransportChangePasswordAction(Settings.EMPTY, mock(ThreadPool.class), transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(ChangePasswordResponse changePasswordResponse) { + responseRef.set(changePasswordResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), is(notNullValue())); + assertThat(throwableRef.get(), sameInstance(e)); + verify(usersStore, times(1)).changePassword(eq(request), any(ActionListener.class)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java new file mode 100644 index 0000000000000..a60a82e87d71a --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java @@ -0,0 +1,215 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequest; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserResponse; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.Collections; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; + +public class TransportDeleteUserActionTests extends ESTestCase { + + public void testAnonymousUser() { + Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "superuser").build(); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportDeleteUserAction action = new TransportDeleteUserAction(settings, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService); + + DeleteUserRequest request = new DeleteUserRequest(new AnonymousUser(settings).principal()); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(DeleteUserResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("is anonymous and cannot be deleted")); + verifyZeroInteractions(usersStore); + } + + public void testInternalUser() { + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportDeleteUserAction action = new TransportDeleteUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService); + + DeleteUserRequest request = new DeleteUserRequest(randomFrom(SystemUser.INSTANCE.principal(), XPackUser.INSTANCE.principal())); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(DeleteUserResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("is internal")); + verifyZeroInteractions(usersStore); + } + + public void testReservedUser() { + final User reserved = randomFrom(new ElasticUser(true), new KibanaUser(true)); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportDeleteUserAction action = new TransportDeleteUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService); + + DeleteUserRequest request = new DeleteUserRequest(reserved.principal()); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(DeleteUserResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("is reserved and cannot be deleted")); + verifyZeroInteractions(usersStore); + } + + public void testValidUser() { + final User user = new User("joe"); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportDeleteUserAction action = new TransportDeleteUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService); + + final boolean found = randomBoolean(); + final DeleteUserRequest request = new DeleteUserRequest(user.principal()); + doAnswer(new Answer() { + public Void answer(InvocationOnMock invocation) { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener listener = (ActionListener) args[1]; + listener.onResponse(found); + return null; + } + }).when(usersStore).deleteUser(eq(request), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(DeleteUserResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(notNullValue())); + assertThat(responseRef.get().found(), is(found)); + assertThat(throwableRef.get(), is(nullValue())); + verify(usersStore, times(1)).deleteUser(eq(request), any(ActionListener.class)); + } + + public void testException() { + final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException(), new RuntimeException()); + final User user = new User("joe"); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportDeleteUserAction action = new TransportDeleteUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService); + + final DeleteUserRequest request = new DeleteUserRequest(user.principal()); + doAnswer(new Answer() { + public Void answer(InvocationOnMock invocation) { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener listener = (ActionListener) args[1]; + listener.onFailure(e); + return null; + } + }).when(usersStore).deleteUser(eq(request), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(DeleteUserResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), is(notNullValue())); + assertThat(throwableRef.get(), sameInstance(e)); + verify(usersStore, times(1)).deleteUser(eq(request), any(ActionListener.class)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java new file mode 100644 index 0000000000000..b23fccec018dd --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java @@ -0,0 +1,339 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.user.GetUsersRequest; +import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealmTests; +import org.junit.Before; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.emptyArray; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.AdditionalMatchers.aryEq; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class TransportGetUsersActionTests extends ESTestCase { + + private boolean anonymousEnabled; + private Settings settings; + + @Before + public void maybeEnableAnonymous() { + anonymousEnabled = randomBoolean(); + if (anonymousEnabled) { + settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "superuser").build(); + } else { + settings = Settings.EMPTY; + } + } + + public void testAnonymousUser() { + NativeUsersStore usersStore = mock(NativeUsersStore.class); + SecurityLifecycleService securityLifecycleService = mock(SecurityLifecycleService.class); + when(securityLifecycleService.isSecurityIndexAvailable()).thenReturn(true); + AnonymousUser anonymousUser = new AnonymousUser(settings); + ReservedRealm reservedRealm = + new ReservedRealm(mock(Environment.class), settings, usersStore, anonymousUser, securityLifecycleService, new ThreadContext(Settings.EMPTY)); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService, reservedRealm); + + GetUsersRequest request = new GetUsersRequest(); + request.usernames(anonymousUser.principal()); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(GetUsersResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(throwableRef.get(), is(nullValue())); + assertThat(responseRef.get(), is(notNullValue())); + final User[] users = responseRef.get().users(); + if (anonymousEnabled) { + assertThat("expected array with anonymous but got: " + Arrays.toString(users), users, arrayContaining(anonymousUser)); + } else { + assertThat("expected an empty array but got: " + Arrays.toString(users), users, emptyArray()); + } + verifyZeroInteractions(usersStore); + } + + public void testInternalUser() { + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService, mock(ReservedRealm.class)); + + GetUsersRequest request = new GetUsersRequest(); + request.usernames(randomFrom(SystemUser.INSTANCE.principal(), XPackUser.INSTANCE.principal())); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(GetUsersResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("is internal")); + assertThat(responseRef.get(), is(nullValue())); + verifyZeroInteractions(usersStore); + } + + public void testReservedUsersOnly() { + NativeUsersStore usersStore = mock(NativeUsersStore.class); + SecurityLifecycleService securityLifecycleService = mock(SecurityLifecycleService.class); + when(securityLifecycleService.isSecurityIndexAvailable()).thenReturn(true); + when(securityLifecycleService.checkSecurityMappingVersion(any())).thenReturn(true); + + ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); + ReservedRealm reservedRealm = + new ReservedRealm(mock(Environment.class), settings, usersStore, new AnonymousUser(settings), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + PlainActionFuture> userFuture = new PlainActionFuture<>(); + reservedRealm.users(userFuture); + final Collection allReservedUsers = userFuture.actionGet(); + final int size = randomIntBetween(1, allReservedUsers.size()); + final List reservedUsers = randomSubsetOf(size, allReservedUsers); + final List names = reservedUsers.stream().map(User::principal).collect(Collectors.toList()); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService, reservedRealm); + + logger.error("names {}", names); + GetUsersRequest request = new GetUsersRequest(); + request.usernames(names.toArray(new String[names.size()])); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(GetUsersResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + logger.warn("Request failed", e); + throwableRef.set(e); + } + }); + + User[] users = responseRef.get().users(); + + assertThat(throwableRef.get(), is(nullValue())); + assertThat(responseRef.get(), is(notNullValue())); + assertThat(users, arrayContaining(reservedUsers.toArray(new User[reservedUsers.size()]))); + } + + public void testGetAllUsers() { + final List storeUsers = randomFrom(Collections.emptyList(), Collections.singletonList(new User("joe")), + Arrays.asList(new User("jane"), new User("fred")), randomUsers()); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + SecurityLifecycleService securityLifecycleService = mock(SecurityLifecycleService.class); + when(securityLifecycleService.isSecurityIndexAvailable()).thenReturn(true); + ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); + ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, new AnonymousUser(settings), + securityLifecycleService, new ThreadContext(Settings.EMPTY)); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService, reservedRealm); + + GetUsersRequest request = new GetUsersRequest(); + doAnswer(new Answer() { + public Void answer(InvocationOnMock invocation) { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener> listener = (ActionListener>) args[1]; + listener.onResponse(storeUsers); + return null; + } + }).when(usersStore).getUsers(eq(Strings.EMPTY_ARRAY), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(GetUsersResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + final List expectedList = new ArrayList<>(); + PlainActionFuture> userFuture = new PlainActionFuture<>(); + reservedRealm.users(userFuture); + expectedList.addAll(userFuture.actionGet()); + expectedList.addAll(storeUsers); + + assertThat(throwableRef.get(), is(nullValue())); + assertThat(responseRef.get(), is(notNullValue())); + assertThat(responseRef.get().users(), arrayContaining(expectedList.toArray(new User[expectedList.size()]))); + verify(usersStore, times(1)).getUsers(aryEq(Strings.EMPTY_ARRAY), any(ActionListener.class)); + } + + public void testGetStoreOnlyUsers() { + final List storeUsers = + randomFrom(Collections.singletonList(new User("joe")), Arrays.asList(new User("jane"), new User("fred")), randomUsers()); + final String[] storeUsernames = storeUsers.stream().map(User::principal).collect(Collectors.toList()).toArray(Strings.EMPTY_ARRAY); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService, mock(ReservedRealm.class)); + + GetUsersRequest request = new GetUsersRequest(); + request.usernames(storeUsernames); + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener> listener = (ActionListener>) args[1]; + listener.onResponse(storeUsers); + return null; + }).when(usersStore).getUsers(aryEq(storeUsernames), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(GetUsersResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + final List expectedList = new ArrayList<>(); + expectedList.addAll(storeUsers); + + assertThat(throwableRef.get(), is(nullValue())); + assertThat(responseRef.get(), is(notNullValue())); + assertThat(responseRef.get().users(), arrayContaining(expectedList.toArray(new User[expectedList.size()]))); + if (storeUsers.size() > 1) { + verify(usersStore, times(1)).getUsers(aryEq(storeUsernames), any(ActionListener.class)); + } else { + verify(usersStore, times(1)).getUsers(aryEq(new String[] {storeUsernames[0]}), any(ActionListener.class)); + } + } + + public void testException() { + final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException(), new ValidationException()); + final List storeUsers = + randomFrom(Collections.singletonList(new User("joe")), Arrays.asList(new User("jane"), new User("fred")), randomUsers()); + final String[] storeUsernames = storeUsers.stream().map(User::principal).collect(Collectors.toList()).toArray(Strings.EMPTY_ARRAY); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService, mock(ReservedRealm.class)); + + GetUsersRequest request = new GetUsersRequest(); + request.usernames(storeUsernames); + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener> listener = (ActionListener>) args[1]; + listener.onFailure(e); + return null; + }).when(usersStore).getUsers(aryEq(storeUsernames), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(GetUsersResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(throwableRef.get(), is(notNullValue())); + assertThat(throwableRef.get(), is(sameInstance(e))); + assertThat(responseRef.get(), is(nullValue())); + verify(usersStore, times(1)).getUsers(aryEq(storeUsernames), any(ActionListener.class)); + } + + private List randomUsers() { + int size = scaledRandomIntBetween(3, 16); + List users = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + users.add(new User("user_" + i, randomAlphaOfLengthBetween(4, 12))); + } + return users; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java new file mode 100644 index 0000000000000..d4a256b8a0ca8 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java @@ -0,0 +1,313 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.delete.DeleteAction; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.mock.orig.Mockito; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse.IndexPrivileges; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authz.AuthorizationService; +import org.hamcrest.Matchers; +import org.junit.Before; + +import java.util.Collections; +import java.util.LinkedHashMap; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportHasPrivilegesActionTests extends ESTestCase { + + private User user; + private Role role; + private TransportHasPrivilegesAction action; + + @Before + public void setup() { + final Settings settings = Settings.builder().build(); + user = new User(randomAlphaOfLengthBetween(4, 12)); + final ThreadPool threadPool = mock(ThreadPool.class); + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService + .NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + + final Authentication authentication = mock(Authentication.class); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + when(authentication.getUser()).thenReturn(user); + + AuthorizationService authorizationService = mock(AuthorizationService.class); + Mockito.doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(role); + return null; + }).when(authorizationService).roles(eq(user), any(ActionListener.class)); + + action = new TransportHasPrivilegesAction(settings, threadPool, transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), authorizationService); + } + + /** + * This tests that action names in the request are considered "matched" by the relevant named privilege + * (in this case that {@link DeleteAction} and {@link IndexAction} are satisfied by {@link IndexPrivilege#WRITE}). + */ + public void testNamedIndexPrivilegesMatchApplicableActions() throws Exception { + role = Role.builder("test1").cluster(ClusterPrivilege.ALL).add(IndexPrivilege.WRITE, "academy").build(); + + final HasPrivilegesRequest request = new HasPrivilegesRequest(); + request.username(user.principal()); + request.clusterPrivileges(ClusterHealthAction.NAME); + request.indexPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices("academy") + .privileges(DeleteAction.NAME, IndexAction.NAME) + .build()); + final PlainActionFuture future = new PlainActionFuture(); + action.doExecute(request, future); + + final HasPrivilegesResponse response = future.get(); + assertThat(response, notNullValue()); + assertThat(response.isCompleteMatch(), is(true)); + + assertThat(response.getClusterPrivileges().size(), equalTo(1)); + assertThat(response.getClusterPrivileges().get(ClusterHealthAction.NAME), equalTo(true)); + + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(1)); + final IndexPrivileges result = response.getIndexPrivileges().get(0); + assertThat(result.getIndex(), equalTo("academy")); + assertThat(result.getPrivileges().size(), equalTo(2)); + assertThat(result.getPrivileges().get(DeleteAction.NAME), equalTo(true)); + assertThat(result.getPrivileges().get(IndexAction.NAME), equalTo(true)); + } + + /** + * This tests that the action responds correctly when the user/role has some, but not all + * of the privileges being checked. + */ + public void testMatchSubsetOfPrivileges() throws Exception { + role = Role.builder("test2") + .cluster(ClusterPrivilege.MONITOR) + .add(IndexPrivilege.INDEX, "academy") + .add(IndexPrivilege.WRITE, "initiative") + .build(); + + final HasPrivilegesRequest request = new HasPrivilegesRequest(); + request.username(user.principal()); + request.clusterPrivileges("monitor", "manage"); + request.indexPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices("academy", "initiative", "school") + .privileges("delete", "index", "manage") + .build()); + final PlainActionFuture future = new PlainActionFuture(); + action.doExecute(request, future); + + final HasPrivilegesResponse response = future.get(); + assertThat(response, notNullValue()); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getClusterPrivileges().size(), equalTo(2)); + assertThat(response.getClusterPrivileges().get("monitor"), equalTo(true)); + assertThat(response.getClusterPrivileges().get("manage"), equalTo(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(3)); + + final IndexPrivileges academy = response.getIndexPrivileges().get(0); + final IndexPrivileges initiative = response.getIndexPrivileges().get(1); + final IndexPrivileges school = response.getIndexPrivileges().get(2); + + assertThat(academy.getIndex(), equalTo("academy")); + assertThat(academy.getPrivileges().size(), equalTo(3)); + assertThat(academy.getPrivileges().get("index"), equalTo(true)); // explicit + assertThat(academy.getPrivileges().get("delete"), equalTo(false)); + assertThat(academy.getPrivileges().get("manage"), equalTo(false)); + + assertThat(initiative.getIndex(), equalTo("initiative")); + assertThat(initiative.getPrivileges().size(), equalTo(3)); + assertThat(initiative.getPrivileges().get("index"), equalTo(true)); // implied by write + assertThat(initiative.getPrivileges().get("delete"), equalTo(true)); // implied by write + assertThat(initiative.getPrivileges().get("manage"), equalTo(false)); + + assertThat(school.getIndex(), equalTo("school")); + assertThat(school.getPrivileges().size(), equalTo(3)); + assertThat(school.getPrivileges().get("index"), equalTo(false)); + assertThat(school.getPrivileges().get("delete"), equalTo(false)); + assertThat(school.getPrivileges().get("manage"), equalTo(false)); + } + + /** + * This tests that the action responds correctly when the user/role has none + * of the privileges being checked. + */ + public void testMatchNothing() throws Exception { + role = Role.builder("test3") + .cluster(ClusterPrivilege.MONITOR) + .build(); + + final HasPrivilegesResponse response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices("academy") + .privileges("read", "write") + .build(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(1)); + final IndexPrivileges result = response.getIndexPrivileges().get(0); + assertThat(result.getIndex(), equalTo("academy")); + assertThat(result.getPrivileges().size(), equalTo(2)); + assertThat(result.getPrivileges().get("read"), equalTo(false)); + assertThat(result.getPrivileges().get("write"), equalTo(false)); + } + + /** + * Wildcards in the request are treated as + * does the user have ___ privilege on every possible index that matches this pattern? + * Or, expressed differently, + * does the user have ___ privilege on a wildcard that covers (is a superset of) this pattern? + */ + public void testWildcardHandling() throws Exception { + role = Role.builder("test3") + .add(IndexPrivilege.ALL, "logstash-*", "foo?") + .add(IndexPrivilege.READ, "abc*") + .add(IndexPrivilege.WRITE, "*xyz") + .build(); + + final HasPrivilegesRequest request = new HasPrivilegesRequest(); + request.username(user.principal()); + request.clusterPrivileges(Strings.EMPTY_ARRAY); + request.indexPrivileges( + RoleDescriptor.IndicesPrivileges.builder() + .indices("logstash-2016-*") + .privileges("write") // Yes, because (ALL,"logstash-*") + .build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices("logstash-*") + .privileges("read") // Yes, because (ALL,"logstash-*") + .build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices("log*") + .privileges("manage") // No, because "log*" includes indices that "logstash-*" does not + .build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices("foo*", "foo?") + .privileges("read") // Yes, "foo?", but not "foo*", because "foo*" > "foo?" + .build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices("abcd*") + .privileges("read", "write") // read = Yes, because (READ, "abc*"), write = No + .build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices("abc*xyz") + .privileges("read", "write", "manage") // read = Yes ( READ "abc*"), write = Yes (WRITE, "*xyz"), manage = No + .build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices("a*xyz") + .privileges("read", "write", "manage") // read = No, write = Yes (WRITE, "*xyz"), manage = No + .build() + ); + final PlainActionFuture future = new PlainActionFuture(); + action.doExecute(request, future); + + final HasPrivilegesResponse response = future.get(); + assertThat(response, notNullValue()); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(8)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + new IndexPrivileges("logstash-2016-*", Collections.singletonMap("write", true)), + new IndexPrivileges("logstash-*", Collections.singletonMap("read", true)), + new IndexPrivileges("log*", Collections.singletonMap("manage", false)), + new IndexPrivileges("foo?", Collections.singletonMap("read", true)), + new IndexPrivileges("foo*", Collections.singletonMap("read", false)), + new IndexPrivileges("abcd*", mapBuilder().put("read", true).put("write", false).map()), + new IndexPrivileges("abc*xyz", mapBuilder().put("read", true).put("write", true).put("manage", false).map()), + new IndexPrivileges("a*xyz", mapBuilder().put("read", false).put("write", true).put("manage", false).map()) + )); + } + + public void testCheckingIndexPermissionsDefinedOnDifferentPatterns() throws Exception { + role = Role.builder("test-write") + .add(IndexPrivilege.INDEX, "apache-*") + .add(IndexPrivilege.DELETE, "apache-2016-*") + .build(); + + final HasPrivilegesResponse response = hasPrivileges(RoleDescriptor.IndicesPrivileges.builder() + .indices("apache-2016-12", "apache-2017-01") + .privileges("index", "delete") + .build(), Strings.EMPTY_ARRAY); + assertThat(response.isCompleteMatch(), is(false)); + assertThat(response.getIndexPrivileges(), Matchers.iterableWithSize(2)); + assertThat(response.getIndexPrivileges(), containsInAnyOrder( + new IndexPrivileges("apache-2016-12", + MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).put("delete", true).map()), + new IndexPrivileges("apache-2017-01", + MapBuilder.newMapBuilder(new LinkedHashMap()) + .put("index", true).put("delete", false).map() + ) + )); + } + + public void testIsCompleteMatch() throws Exception { + role = Role.builder("test-write") + .cluster(ClusterPrivilege.MONITOR) + .add(IndexPrivilege.READ, "read-*") + .add(IndexPrivilege.ALL, "all-*") + .build(); + + assertThat(hasPrivileges(indexPrivileges("read", "read-123", "read-456", "all-999"), "monitor").isCompleteMatch(), is(true)); + assertThat(hasPrivileges(indexPrivileges("read", "read-123", "read-456", "all-999"), "manage").isCompleteMatch(), is(false)); + assertThat(hasPrivileges(indexPrivileges("write", "read-123", "read-456", "all-999"), "monitor").isCompleteMatch(), is(false)); + assertThat(hasPrivileges(indexPrivileges("write", "read-123", "read-456", "all-999"), "manage").isCompleteMatch(), is(false)); + } + + private RoleDescriptor.IndicesPrivileges indexPrivileges(String priv, String... indices) { + return RoleDescriptor.IndicesPrivileges.builder() + .indices(indices) + .privileges(priv) + .build(); + } + + private HasPrivilegesResponse hasPrivileges(RoleDescriptor.IndicesPrivileges indicesPrivileges, String... clusterPrivileges) + throws Exception { + final HasPrivilegesRequest request = new HasPrivilegesRequest(); + request.username(user.principal()); + request.clusterPrivileges(clusterPrivileges); + request.indexPrivileges(indicesPrivileges); + final PlainActionFuture future = new PlainActionFuture(); + action.doExecute(request, future); + final HasPrivilegesResponse response = future.get(); + assertThat(response, notNullValue()); + return response; + } + + private static MapBuilder mapBuilder() { + return MapBuilder.newMapBuilder(); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java new file mode 100644 index 0000000000000..bab047951e50a --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java @@ -0,0 +1,241 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; +import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealmTests; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class TransportPutUserActionTests extends ESTestCase { + + public void testAnonymousUser() { + Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "superuser").build(); + final AnonymousUser anonymousUser = new AnonymousUser(settings); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportPutUserAction action = new TransportPutUserAction(settings, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService); + + PutUserRequest request = new PutUserRequest(); + request.username(anonymousUser.principal()); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(PutUserResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("is anonymous and cannot be modified")); + verifyZeroInteractions(usersStore); + } + + public void testSystemUser() { + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportPutUserAction action = new TransportPutUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService); + + PutUserRequest request = new PutUserRequest(); + request.username(randomFrom(SystemUser.INSTANCE.principal(), XPackUser.INSTANCE.principal())); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(PutUserResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("is internal")); + verifyZeroInteractions(usersStore); + } + + public void testReservedUser() { + NativeUsersStore usersStore = mock(NativeUsersStore.class); + SecurityLifecycleService securityLifecycleService = mock(SecurityLifecycleService.class); + when(securityLifecycleService.isSecurityIndexAvailable()).thenReturn(true); + ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); + Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + ReservedRealm reservedRealm = new ReservedRealm(TestEnvironment.newEnvironment(settings), settings, usersStore, + new AnonymousUser(settings), securityLifecycleService, new ThreadContext(settings)); + PlainActionFuture> userFuture = new PlainActionFuture<>(); + reservedRealm.users(userFuture); + final User reserved = randomFrom(userFuture.actionGet().toArray(new User[0])); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportPutUserAction action = new TransportPutUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService); + + PutUserRequest request = new PutUserRequest(); + request.username(reserved.principal()); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(PutUserResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("is reserved and only the password")); + } + + public void testValidUser() { + final User user = new User("joe"); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportPutUserAction action = new TransportPutUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService); + + final boolean isCreate = randomBoolean(); + final PutUserRequest request = new PutUserRequest(); + request.username(user.principal()); + if (isCreate) { + request.passwordHash(Hasher.BCRYPT.hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + } + final boolean created = isCreate ? randomBoolean() : false; // updates should always return false for create + doAnswer(new Answer() { + public Void answer(InvocationOnMock invocation) { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener listener = (ActionListener) args[1]; + listener.onResponse(created); + return null; + } + }).when(usersStore).putUser(eq(request), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(PutUserResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(notNullValue())); + assertThat(responseRef.get().created(), is(created)); + assertThat(throwableRef.get(), is(nullValue())); + verify(usersStore, times(1)).putUser(eq(request), any(ActionListener.class)); + } + + public void testException() { + final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException(), new ValidationException()); + final User user = new User("joe"); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportPutUserAction action = new TransportPutUserAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore, transportService); + + final PutUserRequest request = new PutUserRequest(); + request.username(user.principal()); + doAnswer(new Answer() { + public Void answer(InvocationOnMock invocation) { + Object[] args = invocation.getArguments(); + assert args.length == 2; + ActionListener listener = (ActionListener) args[1]; + listener.onFailure(e); + return null; + } + }).when(usersStore).putUser(eq(request), any(ActionListener.class)); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(PutUserResponse response) { + responseRef.set(response); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), is(notNullValue())); + assertThat(throwableRef.get(), sameInstance(e)); + verify(usersStore, times(1)).putUser(eq(request), any(ActionListener.class)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java new file mode 100644 index 0000000000000..09fd90437523c --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java @@ -0,0 +1,273 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledRequest; +import org.elasticsearch.xpack.core.security.action.user.SetEnabledResponse; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.Collections; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +/** + * Unit tests for the {@link TransportSetEnabledAction} + */ +public class TransportSetEnabledActionTests extends ESTestCase { + + public void testAnonymousUser() { + Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "superuser").build(); + final User user = randomFrom(new ElasticUser(true), new KibanaUser(true), new User("joe")); + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + Authentication authentication = mock(Authentication.class); + when(threadPool.getThreadContext()).thenReturn(threadContext); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + when(authentication.getUser()).thenReturn(user); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportSetEnabledAction action = new TransportSetEnabledAction(settings, threadPool, transportService, mock(ActionFilters.class), + mock(IndexNameExpressionResolver.class), usersStore); + + SetEnabledRequest request = new SetEnabledRequest(); + request.username(new AnonymousUser(settings).principal()); + request.enabled(randomBoolean()); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(SetEnabledResponse setEnabledResponse) { + responseRef.set(setEnabledResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("is anonymous")); + verifyZeroInteractions(usersStore); + } + + public void testInternalUser() { + final User user = randomFrom(new ElasticUser(true), new KibanaUser(true), new User("joe")); + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + Authentication authentication = mock(Authentication.class); + when(threadPool.getThreadContext()).thenReturn(threadContext); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + when(authentication.getUser()).thenReturn(user); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportSetEnabledAction action = new TransportSetEnabledAction(Settings.EMPTY, threadPool, transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore); + + SetEnabledRequest request = new SetEnabledRequest(); + request.username(randomFrom(SystemUser.INSTANCE.principal(), XPackUser.INSTANCE.principal())); + request.enabled(randomBoolean()); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(SetEnabledResponse setEnabledResponse) { + responseRef.set(setEnabledResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("is internal")); + verifyZeroInteractions(usersStore); + } + + public void testValidUser() { + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + Authentication authentication = mock(Authentication.class); + when(threadPool.getThreadContext()).thenReturn(threadContext); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + when(authentication.getUser()).thenReturn(new User("the runner")); + + final User user = randomFrom(new ElasticUser(true), new KibanaUser(true), new User("joe")); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + SetEnabledRequest request = new SetEnabledRequest(); + request.username(user.principal()); + request.enabled(randomBoolean()); + request.setRefreshPolicy(randomFrom(RefreshPolicy.values())); + // mock the setEnabled call on the native users store so that it will invoke the action listener with a response + doAnswer(new Answer() { + public Void answer(InvocationOnMock invocation) { + Object[] args = invocation.getArguments(); + assert args.length == 4; + ActionListener listener = (ActionListener) args[3]; + listener.onResponse(null); + return null; + } + }).when(usersStore) + .setEnabled(eq(user.principal()), eq(request.enabled()), eq(request.getRefreshPolicy()), any(ActionListener.class)); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportSetEnabledAction action = new TransportSetEnabledAction(Settings.EMPTY, threadPool, transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(SetEnabledResponse setEnabledResponse) { + responseRef.set(setEnabledResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(notNullValue())); + assertThat(responseRef.get(), instanceOf(SetEnabledResponse.class)); + assertThat(throwableRef.get(), is(nullValue())); + verify(usersStore, times(1)) + .setEnabled(eq(user.principal()), eq(request.enabled()), eq(request.getRefreshPolicy()), any(ActionListener.class)); + } + + public void testException() { + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + Authentication authentication = mock(Authentication.class); + when(threadPool.getThreadContext()).thenReturn(threadContext); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + when(authentication.getUser()).thenReturn(new User("the runner")); + + final User user = randomFrom(new ElasticUser(true), new KibanaUser(true), new User("joe")); + NativeUsersStore usersStore = mock(NativeUsersStore.class); + SetEnabledRequest request = new SetEnabledRequest(); + request.username(user.principal()); + request.enabled(randomBoolean()); + request.setRefreshPolicy(randomFrom(RefreshPolicy.values())); + final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException(), new RuntimeException()); + // we're mocking the setEnabled call on the native users store so that it will invoke the action listener with an exception + doAnswer(new Answer() { + public Void answer(InvocationOnMock invocation) { + Object[] args = invocation.getArguments(); + assert args.length == 4; + ActionListener listener = (ActionListener) args[3]; + listener.onFailure(e); + return null; + } + }).when(usersStore) + .setEnabled(eq(user.principal()), eq(request.enabled()), eq(request.getRefreshPolicy()), any(ActionListener.class)); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportSetEnabledAction action = new TransportSetEnabledAction(Settings.EMPTY, threadPool, transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(SetEnabledResponse setEnabledResponse) { + responseRef.set(setEnabledResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), is(notNullValue())); + assertThat(throwableRef.get(), sameInstance(e)); + verify(usersStore, times(1)) + .setEnabled(eq(user.principal()), eq(request.enabled()), eq(request.getRefreshPolicy()), any(ActionListener.class)); + } + + public void testUserModifyingThemselves() { + final User user = randomFrom(new ElasticUser(true), new KibanaUser(true), new User("joe")); + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + Authentication authentication = mock(Authentication.class); + when(threadPool.getThreadContext()).thenReturn(threadContext); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + when(authentication.getUser()).thenReturn(user); + + NativeUsersStore usersStore = mock(NativeUsersStore.class); + SetEnabledRequest request = new SetEnabledRequest(); + request.username(user.principal()); + request.enabled(randomBoolean()); + request.setRefreshPolicy(randomFrom(RefreshPolicy.values())); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()); + TransportSetEnabledAction action = new TransportSetEnabledAction(Settings.EMPTY, threadPool, transportService, + mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), usersStore); + + final AtomicReference throwableRef = new AtomicReference<>(); + final AtomicReference responseRef = new AtomicReference<>(); + action.doExecute(request, new ActionListener() { + @Override + public void onResponse(SetEnabledResponse setEnabledResponse) { + responseRef.set(setEnabledResponse); + } + + @Override + public void onFailure(Exception e) { + throwableRef.set(e); + } + }); + + assertThat(responseRef.get(), is(nullValue())); + assertThat(throwableRef.get(), instanceOf(IllegalArgumentException.class)); + assertThat(throwableRef.get().getMessage(), containsString("own account")); + verifyZeroInteractions(usersStore); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditLevelTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditLevelTests.java new file mode 100644 index 0000000000000..9655e1f09d88b --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditLevelTests.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.security.audit.AuditLevel; + +import java.util.Collections; +import java.util.EnumSet; +import java.util.Locale; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class AuditLevelTests extends ESTestCase { + public void testAllIndexAuditLevel() { + EnumSet enumSet = AuditLevel.parse(Collections.singletonList("_all")); + AuditLevel[] levels = AuditLevel.values(); + assertThat(enumSet.size(), is(levels.length)); + for (AuditLevel level : levels) { + assertThat(enumSet.contains(level), is(true)); + } + } + + public void testExcludeHasPreference() { + EnumSet enumSet = AuditLevel.parse(Collections.singletonList("_all"), Collections.singletonList("_all")); + assertThat(enumSet.size(), is(0)); + } + + public void testExcludeHasPreferenceSingle() { + String excluded = randomFrom(AuditLevel.values()).toString().toLowerCase(Locale.ROOT); + EnumSet enumSet = AuditLevel.parse(Collections.singletonList("_all"), Collections.singletonList(excluded)); + EnumSet expected = EnumSet.allOf(AuditLevel.class); + expected.remove(AuditLevel.valueOf(excluded.toUpperCase(Locale.ROOT))); + assertThat(enumSet, equalTo(expected)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java new file mode 100644 index 0000000000000..b346fc6857e7e --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditTrailServiceTests.java @@ -0,0 +1,240 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.transport.filter.IPFilter; +import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; +import org.junit.Before; + +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.List; + +import static java.util.Collections.unmodifiableList; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class AuditTrailServiceTests extends ESTestCase { + private List auditTrails; + private AuditTrailService service; + + private AuthenticationToken token; + private TransportMessage message; + private RestRequest restRequest; + private XPackLicenseState licenseState; + private boolean isAuditingAllowed; + + @Before + public void init() throws Exception { + List auditTrailsBuilder = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(1, 4); i++) { + auditTrailsBuilder.add(mock(AuditTrail.class)); + } + auditTrails = unmodifiableList(auditTrailsBuilder); + licenseState = mock(XPackLicenseState.class); + service = new AuditTrailService(Settings.EMPTY, auditTrails, licenseState); + isAuditingAllowed = randomBoolean(); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.isAuditingAllowed()).thenReturn(isAuditingAllowed); + token = mock(AuthenticationToken.class); + message = mock(TransportMessage.class); + restRequest = mock(RestRequest.class); + } + + public void testAuthenticationFailed() throws Exception { + service.authenticationFailed(token, "_action", message); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).authenticationFailed(token, "_action", message); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } + + public void testAuthenticationFailedNoToken() throws Exception { + service.authenticationFailed("_action", message); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).authenticationFailed("_action", message); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } + + public void testAuthenticationFailedRestNoToken() throws Exception { + service.authenticationFailed(restRequest); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).authenticationFailed(restRequest); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } + + public void testAuthenticationFailedRest() throws Exception { + service.authenticationFailed(token, restRequest); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).authenticationFailed(token, restRequest); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } + + public void testAuthenticationFailedRealm() throws Exception { + service.authenticationFailed("_realm", token, "_action", message); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).authenticationFailed("_realm", token, "_action", message); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } + + public void testAuthenticationFailedRestRealm() throws Exception { + service.authenticationFailed("_realm", token, restRequest); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).authenticationFailed("_realm", token, restRequest); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } + + public void testAnonymousAccess() throws Exception { + service.anonymousAccessDenied("_action", message); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).anonymousAccessDenied("_action", message); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } + + public void testAccessGranted() throws Exception { + Authentication authentication =new Authentication(new User("_username", "r1"), new RealmRef(null, null, null), + new RealmRef(null, null, null)); + String[] roles = new String[] { randomAlphaOfLengthBetween(1, 6) }; + service.accessGranted(authentication, "_action", message, roles); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).accessGranted(authentication, "_action", message, roles); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } + + public void testAccessDenied() throws Exception { + Authentication authentication = new Authentication(new User("_username", "r1"), new RealmRef(null, null, null), + new RealmRef(null, null, null)); + String[] roles = new String[] { randomAlphaOfLengthBetween(1, 6) }; + service.accessDenied(authentication, "_action", message, roles); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).accessDenied(authentication, "_action", message, roles); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } + + public void testConnectionGranted() throws Exception { + InetAddress inetAddress = InetAddress.getLoopbackAddress(); + SecurityIpFilterRule rule = randomBoolean() ? SecurityIpFilterRule.ACCEPT_ALL : IPFilter.DEFAULT_PROFILE_ACCEPT_ALL; + service.connectionGranted(inetAddress, "client", rule); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).connectionGranted(inetAddress, "client", rule); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } + + public void testConnectionDenied() throws Exception { + InetAddress inetAddress = InetAddress.getLoopbackAddress(); + SecurityIpFilterRule rule = new SecurityIpFilterRule(false, "_all"); + service.connectionDenied(inetAddress, "client", rule); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).connectionDenied(inetAddress, "client", rule); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } + + public void testAuthenticationSuccessRest() throws Exception { + User user = new User("_username", "r1"); + String realm = "_realm"; + service.authenticationSuccess(realm, user, restRequest); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).authenticationSuccess(realm, user, restRequest); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } + + public void testAuthenticationSuccessTransport() throws Exception { + User user = new User("_username", "r1"); + String realm = "_realm"; + service.authenticationSuccess(realm, user, "_action", message); + verify(licenseState).isAuditingAllowed(); + verify(licenseState).isSecurityEnabled(); + if (isAuditingAllowed) { + for (AuditTrail auditTrail : auditTrails) { + verify(auditTrail).authenticationSuccess(realm, user, "_action", message); + } + } else { + verifyZeroInteractions(auditTrails.toArray((Object[]) new AuditTrail[auditTrails.size()])); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditUtilTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditUtilTests.java new file mode 100644 index 0000000000000..63896e9679204 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/AuditUtilTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit; + +import org.elasticsearch.action.MockIndicesRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.hamcrest.Matchers.hasItems; + +/** + * Unit tests for the audit utils class + */ +public class AuditUtilTests extends ESTestCase { + + public void testIndicesRequest() { + assertNull(AuditUtil.indices(new MockIndicesRequest(null, (String[])null))); + final int numberOfIndices = randomIntBetween(1, 100); + List expectedIndices = new ArrayList<>(); + final boolean includeDuplicates = randomBoolean(); + for (int i = 0; i < numberOfIndices; i++) { + String name = randomAlphaOfLengthBetween(1, 30); + expectedIndices.add(name); + if (includeDuplicates) { + expectedIndices.add(name); + } + } + final Set uniqueExpectedIndices = new HashSet<>(expectedIndices); + final Set result = AuditUtil.indices(new MockIndicesRequest(null, + expectedIndices.toArray(new String[expectedIndices.size()]))); + assertNotNull(result); + assertEquals(uniqueExpectedIndices.size(), result.size()); + assertThat(result, hasItems(uniqueExpectedIndices.toArray(Strings.EMPTY_ARRAY))); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/AuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/AuditTrailTests.java new file mode 100644 index 0000000000000..e64d9bb7e4447 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/AuditTrailTests.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit.index; + +import org.apache.http.message.BasicHeader; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Requests; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.xpack.core.security.ScrollHelper; +import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; +import org.elasticsearch.xpack.security.audit.AuditTrail; +import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.iterableWithSize; + +public class AuditTrailTests extends SecurityIntegTestCase { + + private static final String AUTHENTICATE_USER = "http_user"; + private static final String EXECUTE_USER = "exec_user"; + private static final String ROLE_CAN_RUN_AS = "can_run_as"; + private static final String ROLES = ROLE_CAN_RUN_AS + ":\n" + " run_as: [ '" + EXECUTE_USER + "' ]\n"; + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .put("xpack.security.audit.enabled", true) + .put("xpack.security.audit.outputs", "index") + .putList("xpack.security.audit.index.events.include", "access_denied", "authentication_failed", "run_as_denied") + .build(); + } + + @Override + public String configRoles() { + return ROLES + super.configRoles(); + } + + @Override + public String configUsers() { + return super.configUsers() + + AUTHENTICATE_USER + ":" + SecuritySettingsSource.TEST_PASSWORD_HASHED + "\n" + + EXECUTE_USER + ":xx_no_password_xx\n"; + } + + @Override + public String configUsersRoles() { + return super.configUsersRoles() + + ROLE_CAN_RUN_AS + ":" + AUTHENTICATE_USER + "\n" + + "kibana_user:" + EXECUTE_USER; + } + + @Override + public boolean transportSSLEnabled() { + return true; + } + + public void testAuditAccessDeniedWithRunAsUser() throws Exception { + try { + getRestClient().performRequest("GET", "/.security/_search", + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(AUTHENTICATE_USER, TEST_PASSWORD_SECURE_STRING)), + new BasicHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, EXECUTE_USER)); + fail("request should have failed"); + } catch (final ResponseException e) { + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); + } + + final Collection> events = waitForAuditEvents(); + + assertThat(events, iterableWithSize(1)); + final Map event = events.iterator().next(); + assertThat(event.get(IndexAuditTrail.Field.TYPE), equalTo("access_denied")); + assertThat((List) event.get(IndexAuditTrail.Field.INDICES), containsInAnyOrder(".security")); + assertThat(event.get(IndexAuditTrail.Field.PRINCIPAL), equalTo(EXECUTE_USER)); + assertThat(event.get(IndexAuditTrail.Field.RUN_BY_PRINCIPAL), equalTo(AUTHENTICATE_USER)); + } + + + public void testAuditRunAsDeniedEmptyUser() throws Exception { + try { + getRestClient().performRequest("GET", "/.security/_search", + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(AUTHENTICATE_USER, TEST_PASSWORD_SECURE_STRING)), + new BasicHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, "")); + fail("request should have failed"); + } catch (final ResponseException e) { + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(401)); + } + + final Collection> events = waitForAuditEvents(); + + assertThat(events, iterableWithSize(1)); + final Map event = events.iterator().next(); + assertThat(event.get(IndexAuditTrail.Field.TYPE), equalTo("run_as_denied")); + assertThat(event.get(IndexAuditTrail.Field.PRINCIPAL), equalTo(AUTHENTICATE_USER)); + assertThat(event.get(IndexAuditTrail.Field.RUN_AS_PRINCIPAL), equalTo("")); + assertThat(event.get(IndexAuditTrail.Field.REALM), equalTo("file")); + assertThat(event.get(IndexAuditTrail.Field.RUN_AS_REALM), nullValue()); + } + + private Collection> waitForAuditEvents() throws InterruptedException { + waitForAuditTrailToBeWritten(); + final AtomicReference>> eventsRef = new AtomicReference<>(); + awaitBusy(() -> { + try { + final Collection> events = getAuditEvents(); + eventsRef.set(events); + return events.size() > 0; + } catch (final Exception e) { + throw new RuntimeException(e); + } + }); + + return eventsRef.get(); + } + private Collection> getAuditEvents() throws Exception { + final Client client = client(); + final DateTime now = new DateTime(DateTimeZone.UTC); + final String indexName = IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, now, IndexNameResolver.Rollover.DAILY); + + assertTrue(awaitBusy(() -> indexExists(client, indexName), 5, TimeUnit.SECONDS)); + + client.admin().indices().refresh(Requests.refreshRequest(indexName)).get(); + + final SearchRequest request = client.prepareSearch(indexName) + .setTypes(IndexAuditTrail.DOC_TYPE) + .setQuery(QueryBuilders.matchAllQuery()) + .setSize(1000) + .setFetchSource(true) + .request(); + request.indicesOptions().ignoreUnavailable(); + + final PlainActionFuture>> listener = new PlainActionFuture(); + ScrollHelper.fetchAllByEntity(client, request, listener, SearchHit::getSourceAsMap); + + return listener.get(); + } + + private boolean indexExists(Client client, String indexName) { + try { + final ActionFuture future = client.admin().indices().exists(Requests.indicesExistsRequest(indexName)); + return future.get().isExists(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException("Failed to check if " + indexName + " exists", e); + } + } + + private void waitForAuditTrailToBeWritten() throws InterruptedException { + final AuditTrailService auditTrailService = (AuditTrailService) internalCluster().getInstance(AuditTrail.class); + assertThat(auditTrailService.getAuditTrails(), iterableWithSize(1)); + + final IndexAuditTrail indexAuditTrail = (IndexAuditTrail) auditTrailService.getAuditTrails().get(0); + assertTrue(awaitBusy(() -> indexAuditTrail.peek() == null, 5, TimeUnit.SECONDS)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java new file mode 100644 index 0000000000000..33ba5741e087e --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailMutedTests.java @@ -0,0 +1,322 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit.index; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.FilterClient; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.MockTransportClient; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.State; +import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; +import org.junit.After; +import org.junit.Before; + +import java.net.InetAddress; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class IndexAuditTrailMutedTests extends ESTestCase { + + private Client client; + private TransportClient transportClient; + private ThreadPool threadPool; + private ClusterService clusterService; + private IndexAuditTrail auditTrail; + + private AtomicBoolean messageEnqueued; + private AtomicBoolean clientCalled; + + @Before + public void setup() { + DiscoveryNode localNode = mock(DiscoveryNode.class); + when(localNode.getHostAddress()).thenReturn(buildNewFakeTransportAddress().toString()); + clusterService = mock(ClusterService.class); + when(clusterService.localNode()).thenReturn(localNode); + + threadPool = new TestThreadPool("index audit trail tests"); + transportClient = new MockTransportClient(Settings.EMPTY); + clientCalled = new AtomicBoolean(false); + class IClient extends FilterClient { + IClient(Client transportClient){ + super(Settings.EMPTY, threadPool, transportClient); + } + + @Override + protected > void doExecute( + Action action, Request request, ActionListener listener) { + clientCalled.set(true); + } + } + client = new IClient(transportClient); + messageEnqueued = new AtomicBoolean(false); + } + + @After + public void stop() { + if (auditTrail != null) { + auditTrail.stop(); + } + if (transportClient != null) { + transportClient.close(); + } + threadPool.shutdown(); + } + + public void testAnonymousAccessDeniedMutedTransport() { + createAuditTrail(new String[] { "anonymous_access_denied" }); + TransportMessage message = mock(TransportMessage.class); + auditTrail.anonymousAccessDenied("_action", message); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(message); + } + + public void testAnonymousAccessDeniedMutedRest() { + createAuditTrail(new String[] { "anonymous_access_denied" }); + RestRequest restRequest = mock(RestRequest.class); + auditTrail.anonymousAccessDenied(restRequest); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(restRequest); + } + + public void testAuthenticationFailedMutedTransport() { + createAuditTrail(new String[] { "authentication_failed" }); + TransportMessage message = mock(TransportMessage.class); + AuthenticationToken token = mock(AuthenticationToken.class); + + // without realm + auditTrail.authenticationFailed(token, "_action", message); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + // without the token + auditTrail.authenticationFailed("_action", message); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(token, message); + } + + public void testAuthenticationFailedMutedRest() { + createAuditTrail(new String[] { "authentication_failed" }); + RestRequest restRequest = mock(RestRequest.class); + AuthenticationToken token = mock(AuthenticationToken.class); + + // without the realm + auditTrail.authenticationFailed(token, restRequest); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + // without the token + auditTrail.authenticationFailed(restRequest); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(token, restRequest); + } + + public void testAuthenticationFailedRealmMutedTransport() { + createAuditTrail(new String[] { "realm_authentication_failed" }); + TransportMessage message = mock(TransportMessage.class); + AuthenticationToken token = mock(AuthenticationToken.class); + + // with realm + auditTrail.authenticationFailed(randomAlphaOfLengthBetween(2, 10), token, "_action", message); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(token, message); + } + + public void testAuthenticationFailedRealmMutedRest() { + createAuditTrail(new String[]{"realm_authentication_failed"}); + RestRequest restRequest = mock(RestRequest.class); + AuthenticationToken token = mock(AuthenticationToken.class); + + // with realm + auditTrail.authenticationFailed(randomAlphaOfLengthBetween(2, 10), token, restRequest); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + verifyZeroInteractions(token, restRequest); + } + + public void testAccessGrantedMuted() { + createAuditTrail(new String[] { "access_granted" }); + final TransportMessage message = mock(TransportMessage.class); + final Authentication authentication = mock(Authentication.class); + auditTrail.accessGranted(authentication, randomAlphaOfLengthBetween(6, 40), message, new String[] { "role" }); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + verifyZeroInteractions(message); + } + + public void testSystemAccessGrantedMuted() { + createAuditTrail(randomFrom(new String[] { "access_granted" }, null)); + final TransportMessage message = mock(TransportMessage.class); + final Authentication authentication = new Authentication(SystemUser.INSTANCE, new RealmRef(null, null, null), null); + auditTrail.accessGranted(authentication, "internal:foo", message, new String[] { "role" }); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(message); + } + + public void testAccessDeniedMuted() { + createAuditTrail(new String[] { "access_denied" }); + final TransportMessage message = mock(TransportMessage.class); + final Authentication authentication = mock(Authentication.class); + auditTrail.accessDenied(authentication, randomAlphaOfLengthBetween(6, 40), message, new String[] { "role" }); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(message, authentication); + } + + public void testTamperedRequestMuted() { + createAuditTrail(new String[] { "tampered_request" }); + TransportMessage message = mock(TransportMessage.class); + User user = mock(User.class); + + // with user + auditTrail.tamperedRequest(user, randomAlphaOfLengthBetween(6, 40), message); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + // without user + auditTrail.tamperedRequest(randomAlphaOfLengthBetween(6, 40), message); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(message, user); + } + + public void testConnectionGrantedMuted() { + createAuditTrail(new String[] { "connection_granted" }); + InetAddress address = mock(InetAddress.class); + SecurityIpFilterRule rule = mock(SecurityIpFilterRule.class); + + auditTrail.connectionGranted(address, randomAlphaOfLengthBetween(1, 12), rule); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(address, rule); + } + + public void testConnectionDeniedMuted() { + createAuditTrail(new String[] { "connection_denied" }); + InetAddress address = mock(InetAddress.class); + SecurityIpFilterRule rule = mock(SecurityIpFilterRule.class); + + auditTrail.connectionDenied(address, randomAlphaOfLengthBetween(1, 12), rule); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(address, rule); + } + + public void testRunAsGrantedMuted() { + createAuditTrail(new String[] { "run_as_granted" }); + TransportMessage message = mock(TransportMessage.class); + Authentication authentication = mock(Authentication.class); + + auditTrail.runAsGranted(authentication, randomAlphaOfLengthBetween(6, 40), message, new String[] { "role" }); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(message, authentication); + } + + public void testRunAsDeniedMuted() { + createAuditTrail(new String[] { "run_as_denied" }); + TransportMessage message = mock(TransportMessage.class); + Authentication authentication = mock(Authentication.class); + + auditTrail.runAsDenied(authentication, randomAlphaOfLengthBetween(6, 40), message, new String[] { "role" }); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(message, authentication); + } + + public void testAuthenticationSuccessRest() { + createAuditTrail(new String[] { "authentication_success" }); + RestRequest restRequest = mock(RestRequest.class); + User user = mock(User.class); + String realm = "_realm"; + + auditTrail.authenticationSuccess(realm, user, restRequest); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(restRequest); + } + + public void testAuthenticationSuccessTransport() { + createAuditTrail(new String[] { "authentication_success" }); + TransportMessage message = mock(TransportMessage.class); + User user = mock(User.class); + String realm = "_realm"; + auditTrail.authenticationSuccess(realm, user, randomAlphaOfLengthBetween(6, 40), message); + assertThat(messageEnqueued.get(), is(false)); + assertThat(clientCalled.get(), is(false)); + + verifyZeroInteractions(message, user); + } + + IndexAuditTrail createAuditTrail(String[] excludes) { + Settings settings = IndexAuditTrailTests.levelSettings(null, excludes); + auditTrail = new IndexAuditTrail(settings, client, threadPool, clusterService) { + @Override + void updateCurrentIndexMappingsIfNecessary(ClusterState state) { + // skip stuff so we don't have to stub out unnecessary client activities and cluster state + innerStart(); + } + + @Override + BlockingQueue createQueue(int maxQueueSize) { + return new LinkedBlockingQueue(maxQueueSize) { + @Override + public boolean offer(Message message) { + messageEnqueued.set(true); + return super.offer(message); + } + }; + } + }; + auditTrail.start(); + assertThat(auditTrail.state(), is(State.STARTED)); + return auditTrail; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java new file mode 100644 index 0000000000000..a1e8cc3c4e993 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java @@ -0,0 +1,961 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit.index; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Requests; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportInfo; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.SecurityLifecycleServiceField; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail.Message; +import org.elasticsearch.xpack.security.transport.filter.IPFilter; +import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.format.ISODateTimeFormat; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; + +import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; +import static org.elasticsearch.test.InternalTestCluster.clusterName; +import static org.elasticsearch.xpack.security.audit.index.IndexNameResolver.Rollover.DAILY; +import static org.elasticsearch.xpack.security.audit.index.IndexNameResolver.Rollover.HOURLY; +import static org.elasticsearch.xpack.security.audit.index.IndexNameResolver.Rollover.MONTHLY; +import static org.elasticsearch.xpack.security.audit.index.IndexNameResolver.Rollover.WEEKLY; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + + +@ESIntegTestCase.ClusterScope(scope = SUITE, supportsDedicatedMasters = false, numDataNodes = 1) +public class IndexAuditTrailTests extends SecurityIntegTestCase { + public static final String SECOND_CLUSTER_NODE_PREFIX = "remote_" + SUITE_CLUSTER_NODE_PREFIX; + + private static boolean remoteIndexing; + private static boolean useSSL; + private static InternalTestCluster remoteCluster; + private static Settings remoteSettings; + private static int numShards = -1; + private static int numReplicas = -1; + + private TransportAddress remoteAddress = buildNewFakeTransportAddress(); + private TransportAddress localAddress = new TransportAddress(InetAddress.getLoopbackAddress(), 0); + private IndexNameResolver.Rollover rollover; + private IndexAuditTrail auditor; + private SetOnce enqueuedMessage; + private ThreadPool threadPool; + private boolean includeRequestBody; + + @BeforeClass + public static void configureBeforeClass() { + useSSL = randomBoolean(); + remoteIndexing = randomBoolean(); + if (remoteIndexing == false) { + remoteSettings = Settings.EMPTY; + } + } + + @AfterClass + public static void cleanupAfterTest() { + if (remoteCluster != null) { + remoteCluster.close(); + remoteCluster = null; + + } + remoteSettings = null; + } + + @Override + protected boolean transportSSLEnabled() { + return useSSL; + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + if (numShards == -1) { + numShards = numberOfShards(); + } + if (numReplicas == -1) { + numReplicas = numberOfReplicas(); + } + + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("xpack.security.audit.index.settings.index.number_of_shards", numShards) + .put("xpack.security.audit.index.settings.index.number_of_replicas", numReplicas) + .build(); + } + + @Before + public void initializeRemoteClusterIfNecessary() throws Exception { + if (remoteIndexing == false) { + logger.info("--> remote indexing disabled."); + return; + } + + if (remoteCluster != null) { + return; + } + + // create another cluster + String cluster2Name = clusterName(Scope.SUITE.name(), randomLong()); + + // Setup a second test cluster with randomization for number of nodes, security enabled, and SSL + final int numNodes = randomIntBetween(1, 2); + final boolean useSecurity = randomBoolean(); + final boolean remoteUseSSL = useSecurity && useSSL; + logger.info("--> remote indexing enabled. security enabled: [{}], SSL enabled: [{}], nodes: [{}]", useSecurity, useSSL, + numNodes); + SecuritySettingsSource cluster2SettingsSource = + new SecuritySettingsSource(numNodes, useSSL, createTempDir(), Scope.SUITE) { + @Override + public Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("xpack.security.audit.index.settings.index.number_of_shards", numShards) + .put("xpack.security.audit.index.settings.index.number_of_replicas", numReplicas) + // Disable native ML autodetect_process as the c++ controller won't be available +// .put(MachineLearningField.AUTODETECT_PROCESS.getKey(), false) + .put(XPackSettings.SECURITY_ENABLED.getKey(), useSecurity); + if (useSecurity == false && builder.get(NetworkModule.TRANSPORT_TYPE_KEY) == null) { + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); + } + return builder.build(); + } + + @Override + public Settings transportClientSettings() { + if (useSecurity) { + return super.transportClientSettings(); + } else { + Settings.Builder builder = Settings.builder() + .put(XPackSettings.SECURITY_ENABLED.getKey(), false) + .put(super.transportClientSettings()); + if (builder.get(NetworkModule.TRANSPORT_TYPE_KEY) == null) { + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); + } + return builder.build(); + } + } + + @Override + protected void addDefaultSecurityTransportType(Settings.Builder builder, Settings settings) { + if (useSecurity) { + super.addDefaultSecurityTransportType(builder, settings); + } + } + }; + + + Set> mockPlugins = new HashSet<>(getMockPlugins()); + if (useSecurity == false) { + mockPlugins.add(getTestTransportPlugin()); + } + remoteCluster = new InternalTestCluster(randomLong(), createTempDir(), false, true, numNodes, numNodes, cluster2Name, + cluster2SettingsSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, mockPlugins, + useSecurity ? getClientWrapper() : Function.identity()); + remoteCluster.beforeTest(random(), 0.5); + + NodesInfoResponse response = remoteCluster.client().admin().cluster().prepareNodesInfo().execute().actionGet(); + TransportInfo info = response.getNodes().get(0).getTransport(); + TransportAddress inet = info.address().publishAddress(); + + Settings.Builder builder = Settings.builder() + .put("xpack.security.audit.index.client." + XPackSettings.SECURITY_ENABLED.getKey(), useSecurity) + .put(remoteSettings(NetworkAddress.format(inet.address().getAddress()), inet.address().getPort(), cluster2Name)) + .put("xpack.security.audit.index.client.xpack.security.user", SecuritySettingsSource.TEST_USER_NAME + ":" + + SecuritySettingsSourceField.TEST_PASSWORD); + + if (remoteUseSSL) { + cluster2SettingsSource.addClientSSLSettings(builder, "xpack.security.audit.index.client."); + builder.put("xpack.security.audit.index.client.xpack.security.transport.ssl.enabled", true); + } + if (useSecurity == false && builder.get(NetworkModule.TRANSPORT_TYPE_KEY) == null) { + builder.put("xpack.security.audit.index.client." + NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); + } + remoteSettings = builder.build(); + } + + @After + public void afterTest() { + if (threadPool != null) { + threadPool.shutdown(); + } + if (auditor != null) { + auditor.stop(); + } + + if (remoteCluster != null) { + remoteCluster.wipe(excludeTemplates()); + } + } + + @Override + protected Set excludeTemplates() { + return Sets.newHashSet(SecurityLifecycleServiceField.SECURITY_TEMPLATE_NAME, IndexAuditTrail.INDEX_TEMPLATE_NAME); + } + + @Override + protected int maximumNumberOfShards() { + return 3; + } + + private Settings commonSettings(IndexNameResolver.Rollover rollover) { + return Settings.builder() + .put("xpack.security.audit.enabled", true) + .put("xpack.security.audit.outputs", "index, logfile") + .put("xpack.security.audit.index.bulk_size", 1) + .put("xpack.security.audit.index.flush_interval", "1ms") + .put("xpack.security.audit.index.rollover", rollover.name().toLowerCase(Locale.ENGLISH)) + .put("xpack.security.audit.index.settings.index.number_of_shards", numShards) + .put("xpack.security.audit.index.settings.index.number_of_replicas", numReplicas) + .build(); + } + + static Settings remoteSettings(String address, int port, String clusterName) { + return Settings.builder() + .put("xpack.security.audit.index.client.hosts", address + ":" + port) + .put("xpack.security.audit.index.client.cluster.name", clusterName) + .build(); + } + + static Settings levelSettings(String[] includes, String[] excludes) { + Settings.Builder builder = Settings.builder(); + if (includes != null) { + builder.putList("xpack.security.audit.index.events.include", includes); + } + if (excludes != null) { + builder.putList("xpack.security.audit.index.events.exclude", excludes); + } + return builder.build(); + } + + private Settings settings(IndexNameResolver.Rollover rollover, String[] includes, String[] excludes) { + Settings.Builder builder = Settings.builder(); + builder.put(levelSettings(includes, excludes)); + builder.put(commonSettings(rollover)); + builder.put("xpack.security.audit.index.events.emit_request_body", includeRequestBody); + return builder.build(); + } + + private Client getClient() { + return remoteIndexing ? remoteCluster.client() : client(); + } + + private void initialize() throws Exception { + initialize(null, null); + } + + private void initialize(String[] includes, String[] excludes) throws Exception { + initialize(includes, excludes, Settings.EMPTY); + } + + private void initialize(final String[] includes, final String[] excludes, final Settings additionalSettings) throws Exception { + rollover = randomFrom(HOURLY, DAILY, WEEKLY, MONTHLY); + includeRequestBody = randomBoolean(); + Settings.Builder builder = Settings.builder(); + if (remoteIndexing) { + builder.put(remoteSettings); + } + builder.put(settings(rollover, includes, excludes)).put(additionalSettings).build(); + // IndexAuditTrail should ignore secure settings + // they are merged on the master node creating the audit index + if (randomBoolean()) { + MockSecureSettings ignored = new MockSecureSettings(); + if (randomBoolean()) { + ignored.setString(KeyStoreWrapper.SEED_SETTING.getKey(), "non-empty-secure-settings"); + } + builder.setSecureSettings(ignored); + } + Settings settings = builder.build(); + + logger.info("--> settings: [{}]", settings); + DiscoveryNode localNode = mock(DiscoveryNode.class); + when(localNode.getHostAddress()).thenReturn(remoteAddress.getAddress()); + when(localNode.getHostName()).thenReturn(remoteAddress.getAddress()); + ClusterService clusterService = mock(ClusterService.class); + ClusterState state = mock(ClusterState.class); + DiscoveryNodes nodes = mock(DiscoveryNodes.class); + when(clusterService.localNode()).thenReturn(localNode); + when(clusterService.state()).thenReturn(client().admin().cluster().prepareState().get().getState()); + when(state.getNodes()).thenReturn(nodes); + when(nodes.isLocalNodeElectedMaster()).thenReturn(true); + threadPool = new TestThreadPool("index audit trail tests"); + enqueuedMessage = new SetOnce<>(); + auditor = new IndexAuditTrail(settings, client(), threadPool, clusterService) { + + @Override + void enqueue(Message message, String type) { + enqueuedMessage.set(message); + super.enqueue(message, type); + } + + @Override + List> remoteTransportClientPlugins() { + return Arrays.asList(LocalStateSecurity.class, getTestTransportPlugin()); + } + }; + auditor.start(); + } + + public void testProcessorsSetting() { + final boolean explicitProcessors = randomBoolean(); + final int processors; + if (explicitProcessors) { + processors = randomIntBetween(1, 16); + } else { + processors = EsExecutors.PROCESSORS_SETTING.get(Settings.EMPTY); + } + final boolean explicitClientProcessors = randomBoolean(); + final int clientProcessors; + if (explicitClientProcessors) { + clientProcessors = randomIntBetween(1, 16); + } else { + clientProcessors = EsExecutors.PROCESSORS_SETTING.get(Settings.EMPTY); + } + + final Settings.Builder additionalSettingsBuilder = + Settings.builder() + .put("xpack.security.audit.index.client.cluster.name", "remote") + .put("xpack.security.audit.index.client.hosts", "localhost:9300"); + + if (explicitProcessors) { + additionalSettingsBuilder.put(EsExecutors.PROCESSORS_SETTING.getKey(), processors); + } + if (explicitClientProcessors) { + additionalSettingsBuilder.put("xpack.security.audit.index.client.processors", clientProcessors); + } + + final ThrowingRunnable runnable = () -> initialize(null, null, additionalSettingsBuilder.build()); + if (processors == clientProcessors || explicitClientProcessors == false) { + // okay, the client initialized which is all we care about but no nodes are available because we never set up the remote cluster + expectThrows(NoNodeAvailableException.class, runnable); + } else { + final IllegalStateException e = expectThrows(IllegalStateException.class, runnable); + assertThat( + e, + hasToString(containsString( + "explicit processor setting [" + clientProcessors + "]" + + " for audit trail remote client does not match inherited processor setting [" + processors + "]"))); + } + } + + public void testAnonymousAccessDeniedTransport() throws Exception { + initialize(); + TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); + auditor.anonymousAccessDenied("_action", message); + + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + assertAuditMessage(hit, "transport", "anonymous_access_denied"); + Map sourceMap = hit.getSourceAsMap(); + if (message instanceof RemoteHostMockMessage) { + assertEquals(remoteAddress.getAddress(), sourceMap.get("origin_address")); + } else { + assertEquals(localAddress.getAddress(), sourceMap.get("origin_address")); + } + + assertEquals("_action", sourceMap.get("action")); + assertEquals("transport", sourceMap.get("origin_type")); + if (message instanceof IndicesRequest) { + List indices = (List) sourceMap.get("indices"); + assertThat(indices, containsInAnyOrder((Object[]) ((IndicesRequest) message).indices())); + } + assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); + } + + public void testAnonymousAccessDeniedRest() throws Exception { + initialize(); + RestRequest request = mockRestRequest(); + auditor.anonymousAccessDenied(request); + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + + assertAuditMessage(hit, "rest", "anonymous_access_denied"); + Map sourceMap = hit.getSourceAsMap(); + assertThat(NetworkAddress.format(InetAddress.getLoopbackAddress()), equalTo(sourceMap.get("origin_address"))); + assertThat("_uri", equalTo(sourceMap.get("uri"))); + assertThat(sourceMap.get("origin_type"), is("rest")); + assertRequestBody(sourceMap); + } + + public void testAuthenticationFailedTransport() throws Exception { + initialize(); + TransportMessage message = randomBoolean() ? new RemoteHostMockMessage() : new LocalHostMockMessage(); + auditor.authenticationFailed(new MockToken(), "_action", message); + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + Map sourceMap = hit.getSourceAsMap(); + assertAuditMessage(hit, "transport", "authentication_failed"); + + if (message instanceof RemoteHostMockMessage) { + assertEquals(remoteAddress.getAddress(), sourceMap.get("origin_address")); + } else { + assertEquals(localAddress.getAddress(), sourceMap.get("origin_address")); + } + + assertEquals("_principal", sourceMap.get("principal")); + assertEquals("_action", sourceMap.get("action")); + assertEquals("transport", sourceMap.get("origin_type")); + assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); + } + + public void testAuthenticationFailedTransportNoToken() throws Exception { + initialize(); + TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); + auditor.authenticationFailed("_action", message); + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + + assertAuditMessage(hit, "transport", "authentication_failed"); + Map sourceMap = hit.getSourceAsMap(); + if (message instanceof RemoteHostMockMessage) { + assertEquals(remoteAddress.getAddress(), sourceMap.get("origin_address")); + } else { + assertEquals(localAddress.getAddress(), sourceMap.get("origin_address")); + } + + assertThat(sourceMap.get("principal"), nullValue()); + assertEquals("_action", sourceMap.get("action")); + assertEquals("transport", sourceMap.get("origin_type")); + if (message instanceof IndicesRequest) { + List indices = (List) sourceMap.get("indices"); + assertThat(indices, containsInAnyOrder((Object[]) ((IndicesRequest) message).indices())); + } + assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); + } + + public void testAuthenticationFailedRest() throws Exception { + initialize(); + RestRequest request = mockRestRequest(); + auditor.authenticationFailed(new MockToken(), request); + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + + assertAuditMessage(hit, "rest", "authentication_failed"); + Map sourceMap = hit.getSourceAsMap(); + assertThat(sourceMap.get("principal"), is((Object) "_principal")); + assertThat("127.0.0.1", equalTo(sourceMap.get("origin_address"))); + assertThat("_uri", equalTo(sourceMap.get("uri"))); + assertThat(sourceMap.get("origin_type"), is("rest")); + assertRequestBody(sourceMap); + } + + public void testAuthenticationFailedRestNoToken() throws Exception { + initialize(); + RestRequest request = mockRestRequest(); + auditor.authenticationFailed(request); + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + + assertAuditMessage(hit, "rest", "authentication_failed"); + Map sourceMap = hit.getSourceAsMap(); + assertThat(sourceMap.get("principal"), nullValue()); + assertThat("127.0.0.1", equalTo(sourceMap.get("origin_address"))); + assertThat("_uri", equalTo(sourceMap.get("uri"))); + assertThat(sourceMap.get("origin_type"), is("rest")); + assertRequestBody(sourceMap); + } + + public void testAuthenticationFailedTransportRealm() throws Exception { + initialize(); + TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); + auditor.authenticationFailed("_realm", new MockToken(), "_action", message); + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + + assertAuditMessage(hit, "transport", "realm_authentication_failed"); + Map sourceMap = hit.getSourceAsMap(); + + if (message instanceof RemoteHostMockMessage) { + assertEquals(remoteAddress.getAddress(), sourceMap.get("origin_address")); + } else { + assertEquals(localAddress.getAddress(), sourceMap.get("origin_address")); + } + + assertEquals("transport", sourceMap.get("origin_type")); + assertEquals("_principal", sourceMap.get("principal")); + assertEquals("_action", sourceMap.get("action")); + assertEquals("_realm", sourceMap.get("realm")); + if (message instanceof IndicesRequest) { + List indices = (List) sourceMap.get("indices"); + assertThat(indices, containsInAnyOrder((Object[]) ((IndicesRequest) message).indices())); + } + assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); + } + + public void testAuthenticationFailedRestRealm() throws Exception { + initialize(); + RestRequest request = mockRestRequest(); + auditor.authenticationFailed("_realm", new MockToken(), request); + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + + assertAuditMessage(hit, "rest", "realm_authentication_failed"); + Map sourceMap = hit.getSourceAsMap(); + assertThat("127.0.0.1", equalTo(sourceMap.get("origin_address"))); + assertThat("_uri", equalTo(sourceMap.get("uri"))); + assertEquals("_realm", sourceMap.get("realm")); + assertThat(sourceMap.get("origin_type"), is("rest")); + assertRequestBody(sourceMap); + } + + public void testAccessGranted() throws Exception { + initialize(); + TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); + final boolean runAs = randomBoolean(); + User user; + if (runAs) { + user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + } else { + user = new User("_username", new String[]{"r1"}); + } + String role = randomAlphaOfLengthBetween(1, 6); + auditor.accessGranted(createAuthentication(user), "_action", message, new String[] { role }); + + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + assertAuditMessage(hit, "transport", "access_granted"); + Map sourceMap = hit.getSourceAsMap(); + assertEquals("transport", sourceMap.get("origin_type")); + if (runAs) { + assertThat(sourceMap.get("principal"), is("running as")); + assertThat(sourceMap.get("realm"), is("lookRealm")); + assertThat(sourceMap.get("run_by_principal"), is("_username")); + assertThat(sourceMap.get("run_by_realm"), is("authRealm")); + } else { + assertThat(sourceMap.get("principal"), is("_username")); + assertThat(sourceMap.get("realm"), is("authRealm")); + } + assertEquals("_action", sourceMap.get("action")); + assertThat((Iterable) sourceMap.get(IndexAuditTrail.Field.ROLE_NAMES), containsInAnyOrder(role)); + if (message instanceof IndicesRequest) { + List indices = (List) sourceMap.get("indices"); + assertThat(indices, containsInAnyOrder((Object[]) ((IndicesRequest) message).indices())); + } + assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); + } + + public void testSystemAccessGranted() throws Exception { + initialize(new String[] { "system_access_granted" }, null); + TransportMessage message = randomBoolean() ? new RemoteHostMockMessage() : new LocalHostMockMessage(); + String role = randomAlphaOfLengthBetween(1, 6); + auditor.accessGranted(createAuthentication(SystemUser.INSTANCE), "internal:_action", message, new String[] { role }); + + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + assertAuditMessage(hit, "transport", "access_granted"); + Map sourceMap = hit.getSourceAsMap(); + assertEquals("transport", sourceMap.get("origin_type")); + assertEquals(SystemUser.INSTANCE.principal(), sourceMap.get("principal")); + assertThat(sourceMap.get("realm"), is("authRealm")); + assertEquals("internal:_action", sourceMap.get("action")); + assertThat((Iterable) sourceMap.get(IndexAuditTrail.Field.ROLE_NAMES), containsInAnyOrder(role)); + assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); + } + + public void testAccessDenied() throws Exception { + initialize(); + TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); + final boolean runAs = randomBoolean(); + User user; + if (runAs) { + user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + } else { + user = new User("_username", new String[]{"r1"}); + } + String role = randomAlphaOfLengthBetween(1, 6); + auditor.accessDenied(createAuthentication(user), "_action", message, new String[] { role }); + + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + Map sourceMap = hit.getSourceAsMap(); + assertAuditMessage(hit, "transport", "access_denied"); + assertEquals("transport", sourceMap.get("origin_type")); + if (runAs) { + assertThat(sourceMap.get("principal"), is("running as")); + assertThat(sourceMap.get("realm"), is("lookRealm")); + assertThat(sourceMap.get("run_by_principal"), is("_username")); + assertThat(sourceMap.get("run_by_realm"), is("authRealm")); + } else { + assertThat(sourceMap.get("principal"), is("_username")); + assertThat(sourceMap.get("realm"), is("authRealm")); + } + assertEquals("_action", sourceMap.get("action")); + if (message instanceof IndicesRequest) { + List indices = (List) sourceMap.get("indices"); + assertThat(indices, containsInAnyOrder((Object[]) ((IndicesRequest) message).indices())); + } + assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); + assertThat((Iterable) sourceMap.get(IndexAuditTrail.Field.ROLE_NAMES), containsInAnyOrder(role)); + } + + public void testTamperedRequestRest() throws Exception { + initialize(); + RestRequest request = mockRestRequest(); + auditor.tamperedRequest(request); + + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + assertAuditMessage(hit, "rest", "tampered_request"); + Map sourceMap = hit.getSourceAsMap(); + assertThat(sourceMap.get("principal"), nullValue()); + assertThat("127.0.0.1", equalTo(sourceMap.get("origin_address"))); + assertThat("_uri", equalTo(sourceMap.get("uri"))); + assertThat(sourceMap.get("origin_type"), is("rest")); + assertRequestBody(sourceMap); + } + + public void testTamperedRequest() throws Exception { + initialize(); + TransportRequest message = new RemoteHostMockTransportRequest(); + auditor.tamperedRequest("_action", message); + + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + Map sourceMap = hit.getSourceAsMap(); + assertAuditMessage(hit, "transport", "tampered_request"); + assertEquals("transport", sourceMap.get("origin_type")); + assertThat(sourceMap.get("principal"), is(nullValue())); + assertEquals("_action", sourceMap.get("action")); + assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); + } + + public void testTamperedRequestWithUser() throws Exception { + initialize(); + TransportRequest message = new RemoteHostMockTransportRequest(); + final boolean runAs = randomBoolean(); + User user; + if (runAs) { + user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + } else { + user = new User("_username", new String[]{"r1"}); + } + auditor.tamperedRequest(user, "_action", message); + + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + + assertAuditMessage(hit, "transport", "tampered_request"); + Map sourceMap = hit.getSourceAsMap(); + assertEquals("transport", sourceMap.get("origin_type")); + if (runAs) { + assertThat(sourceMap.get("principal"), is("running as")); + assertThat(sourceMap.get("run_by_principal"), is("_username")); + } else { + assertEquals("_username", sourceMap.get("principal")); + } + assertEquals("_action", sourceMap.get("action")); + assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); + } + + public void testConnectionGranted() throws Exception { + initialize(); + InetAddress inetAddress = InetAddress.getLoopbackAddress(); + SecurityIpFilterRule rule = IPFilter.DEFAULT_PROFILE_ACCEPT_ALL; + auditor.connectionGranted(inetAddress, "default", rule); + + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + + assertAuditMessage(hit, "ip_filter", "connection_granted"); + Map sourceMap = hit.getSourceAsMap(); + assertEquals("allow default:accept_all", sourceMap.get("rule")); + assertEquals("default", sourceMap.get("transport_profile")); + } + + public void testConnectionDenied() throws Exception { + initialize(); + InetAddress inetAddress = InetAddress.getLoopbackAddress(); + SecurityIpFilterRule rule = new SecurityIpFilterRule(false, "_all"); + auditor.connectionDenied(inetAddress, "default", rule); + + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + + assertAuditMessage(hit, "ip_filter", "connection_denied"); + Map sourceMap = hit.getSourceAsMap(); + assertEquals("deny _all", sourceMap.get("rule")); + assertEquals("default", sourceMap.get("transport_profile")); + } + + public void testRunAsGranted() throws Exception { + initialize(); + TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); + User user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + String role = randomAlphaOfLengthBetween(1, 6); + auditor.runAsGranted(createAuthentication(user), "_action", message, new String[] { role }); + + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + assertAuditMessage(hit, "transport", "run_as_granted"); + Map sourceMap = hit.getSourceAsMap(); + assertEquals("transport", sourceMap.get("origin_type")); + assertThat(sourceMap.get("principal"), is("_username")); + assertThat(sourceMap.get("realm"), is("authRealm")); + assertThat(sourceMap.get("run_as_principal"), is("running as")); + assertThat(sourceMap.get("run_as_realm"), is("lookRealm")); + assertThat((Iterable) sourceMap.get(IndexAuditTrail.Field.ROLE_NAMES), containsInAnyOrder(role)); + assertEquals("_action", sourceMap.get("action")); + assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); + } + + public void testRunAsDenied() throws Exception { + initialize(); + TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); + User user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + auditor.runAsDenied(createAuthentication(user), "_action", message, new String[] { "r1" }); + + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + assertAuditMessage(hit, "transport", "run_as_denied"); + Map sourceMap = hit.getSourceAsMap(); + assertEquals("transport", sourceMap.get("origin_type")); + assertThat(sourceMap.get("principal"), is("_username")); + assertThat(sourceMap.get("realm"), is("authRealm")); + assertThat(sourceMap.get("run_as_principal"), is("running as")); + assertThat(sourceMap.get("run_as_realm"), is("lookRealm")); + assertEquals("_action", sourceMap.get("action")); + assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); + } + + public void testAuthenticationSuccessRest() throws Exception { + initialize(); + RestRequest request = mockRestRequest(); + final boolean runAs = randomBoolean(); + User user; + if (runAs) { + user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + } else { + user = new User("_username", new String[] { "r1" }); + } + String realm = "_realm"; + auditor.authenticationSuccess(realm, user, request); + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + + assertAuditMessage(hit, "rest", "authentication_success"); + Map sourceMap = hit.getSourceAsMap(); + assertThat("_uri", equalTo(sourceMap.get("uri"))); + assertRequestBody(sourceMap); + if (runAs) { + assertThat(sourceMap.get("principal"), is("running as")); + assertThat(sourceMap.get("run_by_principal"), is("_username")); + } else { + assertEquals("_username", sourceMap.get("principal")); + } + assertEquals("_realm", sourceMap.get("realm")); + } + + public void testAuthenticationSuccessTransport() throws Exception { + initialize(); + TransportMessage message = randomFrom(new RemoteHostMockMessage(), new LocalHostMockMessage(), new MockIndicesTransportMessage()); + final boolean runAs = randomBoolean(); + User user; + if (runAs) { + user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + } else { + user = new User("_username", new String[] { "r1" }); + } + String realm = "_realm"; + auditor.authenticationSuccess(realm, user, "_action", message); + + SearchHit hit = getIndexedAuditMessage(enqueuedMessage.get()); + Map sourceMap = hit.getSourceAsMap(); + assertAuditMessage(hit, "transport", "authentication_success"); + assertEquals("transport", sourceMap.get("origin_type")); + if (runAs) { + assertThat(sourceMap.get("principal"), is("running as")); + assertThat(sourceMap.get("run_by_principal"), is("_username")); + } else { + assertEquals("_username", sourceMap.get("principal")); + } + assertEquals("_action", sourceMap.get("action")); + assertEquals("_realm", sourceMap.get("realm")); + assertEquals(sourceMap.get("request"), message.getClass().getSimpleName()); + } + + private void assertAuditMessage(SearchHit hit, String layer, String type) { + Map sourceMap = hit.getSourceAsMap(); + assertThat(sourceMap.get("@timestamp"), notNullValue()); + DateTime dateTime = ISODateTimeFormat.dateTimeParser().withZoneUTC().parseDateTime((String) sourceMap.get("@timestamp")); + final DateTime now = DateTime.now(DateTimeZone.UTC); + assertThat(dateTime + " should be on/before " + now, dateTime.isAfter(now), equalTo(false)); + + assertThat(remoteAddress.getAddress(), equalTo(sourceMap.get("node_host_name"))); + assertThat(remoteAddress.getAddress(), equalTo(sourceMap.get("node_host_address"))); + + assertEquals(layer, sourceMap.get("layer")); + assertEquals(type, sourceMap.get("event_type")); + } + + private void assertRequestBody(Map sourceMap) { + if (includeRequestBody) { + assertThat(sourceMap.get("request_body"), notNullValue()); + } else { + assertThat(sourceMap.get("request_body"), nullValue()); + } + } + private class LocalHostMockMessage extends TransportMessage { + LocalHostMockMessage() { + remoteAddress(localAddress); + } + } + + private class RemoteHostMockMessage extends TransportMessage { + RemoteHostMockMessage() throws Exception { + remoteAddress(remoteAddress); + } + } + + private class RemoteHostMockTransportRequest extends TransportRequest { + RemoteHostMockTransportRequest() throws Exception { + remoteAddress(remoteAddress); + } + } + + private class MockIndicesTransportMessage extends RemoteHostMockMessage implements IndicesRequest { + MockIndicesTransportMessage() throws Exception { + super(); + } + + @Override + public String[] indices() { + return new String[] { "foo", "bar", "baz" }; + } + + @Override + public IndicesOptions indicesOptions() { + return null; + } + } + + private static class MockToken implements AuthenticationToken { + @Override + public String principal() { + return "_principal"; + } + + @Override + public Object credentials() { + fail("it's not allowed to print the credentials of the auth token"); + return null; + } + + @Override + public void clearCredentials() { + } + } + + private RestRequest mockRestRequest() { + RestRequest request = mock(RestRequest.class); + when(request.getRemoteAddress()).thenReturn(new InetSocketAddress(InetAddress.getLoopbackAddress(), 9200)); + when(request.uri()).thenReturn("_uri"); + return request; + } + + private SearchHit getIndexedAuditMessage(Message message) throws InterruptedException { + assertNotNull("no audit message was enqueued", message); + final String indexName = IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, message.timestamp, rollover); + ensureYellowAndNoInitializingShards(indexName); + GetSettingsResponse settingsResponse = getClient().admin().indices().prepareGetSettings(indexName).get(); + assertThat(settingsResponse.getSetting(indexName, "index.number_of_shards"), is(Integer.toString(numShards))); + assertThat(settingsResponse.getSetting(indexName, "index.number_of_replicas"), is(Integer.toString(numReplicas))); + + final SetOnce searchResponseSetOnce = new SetOnce<>(); + final boolean found = awaitBusy(() -> { + try { + SearchResponse searchResponse = getClient() + .prepareSearch(indexName) + .setTypes(IndexAuditTrail.DOC_TYPE) + .get(); + if (searchResponse.getHits().getTotalHits() > 0L) { + searchResponseSetOnce.set(searchResponse); + return true; + } + } catch (Exception e) { + logger.debug("caught exception while executing search", e); + } + return false; + }); + assertThat("no audit document exists!", found, is(true)); + SearchResponse response = searchResponseSetOnce.get(); + assertNotNull(response); + + assertEquals(1, response.getHits().getTotalHits()); + return response.getHits().getHits()[0]; + } + + @Override + public ClusterHealthStatus ensureYellowAndNoInitializingShards(String... indices) { + if (remoteIndexing == false) { + return super.ensureYellowAndNoInitializingShards(indices); + } + + // pretty ugly but just a rip of ensureYellowAndNoInitializingShards that uses a different client + ClusterHealthResponse actionGet = getClient().admin().cluster().health(Requests.clusterHealthRequest(indices) + .waitForNoRelocatingShards(true) + .waitForYellowStatus() + .waitForEvents(Priority.LANGUID) + .waitForNoInitializingShards(true)) + .actionGet(); + if (actionGet.isTimedOut()) { + logger.info("ensureYellow timed out, cluster state:\n{}\n{}", + getClient().admin().cluster().prepareState().get().getState(), + getClient().admin().cluster().preparePendingClusterTasks().get()); + assertThat("timed out waiting for yellow", actionGet.isTimedOut(), equalTo(false)); + } + + logger.debug("indices {} are yellow", indices.length == 0 ? "[_all]" : indices); + return actionGet.getStatus(); + } + + private static Authentication createAuthentication(User user) { + final RealmRef lookedUpBy = user.authenticatedUser() == user ? null : new RealmRef("lookRealm", "up", "by"); + return new Authentication(user, new RealmRef("authRealm", "test", "foo"), lookedUpBy); + } +} + diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java new file mode 100644 index 0000000000000..5b90b2e1e4609 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java @@ -0,0 +1,169 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit.index; + +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.core.security.SecurityLifecycleServiceField; +import org.elasticsearch.xpack.security.audit.AuditTrail; +import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.junit.After; +import org.junit.Before; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.stream.StreamSupport; + +import static org.elasticsearch.test.InternalTestCluster.clusterName; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; + +/** + * This test checks to ensure that the IndexAuditTrail starts properly when indexing to a remote cluster. The cluster + * started by the integration tests is indexed into by the remote cluster started before the test. + * + * The cluster started by the integrations tests may also index into itself... + */ +@ClusterScope(scope = Scope.TEST, numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0.0, supportsDedicatedMasters = false) +@TestLogging("org.elasticsearch.xpack.security.audit.index:TRACE") +public class RemoteIndexAuditTrailStartingTests extends SecurityIntegTestCase { + + public static final String SECOND_CLUSTER_NODE_PREFIX = "remote_" + TEST_CLUSTER_NODE_PREFIX; + + private InternalTestCluster remoteCluster; + + private final boolean sslEnabled = randomBoolean(); + private final boolean localAudit = randomBoolean(); + private final String outputs = randomFrom("index", "logfile", "index,logfile"); + + @Override + public boolean transportSSLEnabled() { + return sslEnabled; + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("xpack.security.audit.enabled", localAudit) + .put("xpack.security.audit.outputs", outputs) + .build(); + } + + @Override + protected Set excludeTemplates() { + return Sets.newHashSet(SecurityLifecycleServiceField.SECURITY_TEMPLATE_NAME, IndexAuditTrail.INDEX_TEMPLATE_NAME); + } + + @Override + protected int numberOfShards() { + return 1; // limit ourselves to a single shard in order to avoid timeout issues with large numbers of shards in tests + } + + @Before + public void startRemoteCluster() throws IOException, InterruptedException { + final List addresses = new ArrayList<>(); + // get addresses for current cluster + NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet(); + final String clusterName = response.getClusterName().value(); + for (NodeInfo nodeInfo : response.getNodes()) { + TransportAddress address = nodeInfo.getTransport().address().publishAddress(); + addresses.add(address.address().getHostString() + ":" + address.address().getPort()); + } + + // create another cluster + String cluster2Name = clusterName(Scope.TEST.name(), randomLong()); + + // Setup a second test cluster with a single node, security enabled, and SSL + final int numNodes = 1; + SecuritySettingsSource cluster2SettingsSource = + new SecuritySettingsSource(numNodes, sslEnabled, createTempDir(), Scope.TEST) { + @Override + public Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + // Disable native ML autodetect_process as the c++ controller won't be available +// .put(MachineLearningField.AUTODETECT_PROCESS.getKey(), false) + .put("xpack.security.audit.enabled", true) + .put("xpack.security.audit.outputs", randomFrom("index", "index,logfile")) + .putList("xpack.security.audit.index.client.hosts", addresses.toArray(new String[addresses.size()])) + .put("xpack.security.audit.index.client.cluster.name", clusterName) + .put("xpack.security.audit.index.client.xpack.security.user", + TEST_USER_NAME + ":" + SecuritySettingsSourceField.TEST_PASSWORD) + .put("xpack.security.audit.index.settings.index.number_of_shards", 1) + .put("xpack.security.audit.index.settings.index.number_of_replicas", 0); + + addClientSSLSettings(builder, "xpack.security.audit.index.client."); + builder.put("xpack.security.audit.index.client.xpack.security.transport.ssl.enabled", sslEnabled); + return builder.build(); + } + }; + remoteCluster = new InternalTestCluster(randomLong(), createTempDir(), false, true, numNodes, numNodes, + cluster2Name, cluster2SettingsSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, getMockPlugins(), getClientWrapper()); + remoteCluster.beforeTest(random(), 0.0); + assertNoTimeout(remoteCluster.client().admin().cluster().prepareHealth().setWaitForGreenStatus().get()); + } + + @After + public void stopRemoteCluster() throws Exception { + List toStop = new ArrayList<>(); + // stop the index audit trail so that the shards aren't locked causing the test to fail + toStop.add(() -> StreamSupport.stream(internalCluster().getInstances(AuditTrailService.class).spliterator(), false) + .map(s -> s.getAuditTrails()).flatMap(List::stream) + .filter(t -> t.name().equals(IndexAuditTrail.NAME)) + .forEach((auditTrail) -> ((IndexAuditTrail) auditTrail).stop())); + // first stop both audit trails otherwise we keep on indexing + if (remoteCluster != null) { + toStop.add(() -> StreamSupport.stream(remoteCluster.getInstances(AuditTrailService.class).spliterator(), false) + .map(s -> s.getAuditTrails()).flatMap(List::stream) + .filter(t -> t.name().equals(IndexAuditTrail.NAME)) + .forEach((auditTrail) -> ((IndexAuditTrail) auditTrail).stop())); + toStop.add(() -> remoteCluster.wipe(excludeTemplates())); + toStop.add(remoteCluster::afterTest); + toStop.add(remoteCluster); + } + + + IOUtils.close(toStop); + } + + public void testThatRemoteAuditInstancesAreStarted() throws Exception { + logger.info("Test configuration: ssl=[{}] localAudit=[{}][{}]", sslEnabled, localAudit, outputs); + // we ensure that all instances present are started otherwise we will have issues + // and race with the shutdown logic + for (InternalTestCluster cluster : Arrays.asList(remoteCluster, internalCluster())) { + for (AuditTrailService auditTrailService : cluster.getInstances(AuditTrailService.class)) { + Optional auditTrail = auditTrailService.getAuditTrails().stream() + .filter(t -> t.name().equals(IndexAuditTrail.NAME)).findAny(); + if (cluster == remoteCluster || (localAudit && outputs.contains("index"))) { + // remote cluster must be present and only if we do local audit and output to an index we are good on the local one + // as well. + assertTrue(auditTrail.isPresent()); + } + if (auditTrail.isPresent()) { + IndexAuditTrail indexAuditTrail = (IndexAuditTrail) auditTrail.get(); + assertBusy(() -> assertSame("trail not started remoteCluster: " + (remoteCluster == cluster), + indexAuditTrail.state(), IndexAuditTrail.State.STARTED)); + } + } + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java new file mode 100644 index 0000000000000..2f32ba9c537f1 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/AuditTrailSettingsUpdateTests.java @@ -0,0 +1,222 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit.logfile; + +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.security.audit.AuditLevel; +import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.regex.Pattern; + +import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@ClusterScope(scope = TEST, numDataNodes = 1) +public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase { + + private static Settings startupFilterSettings; + private static Settings updateFilterSettings; + + @BeforeClass + public static void startupFilterSettings() { + final Settings.Builder settingsBuilder = Settings.builder(); + // generate random filter policies + for (int i = 0; i < randomIntBetween(0, 4); i++) { + settingsBuilder.put(randomFilterPolicySettings("startupPolicy" + i)); + } + startupFilterSettings = settingsBuilder.build(); + } + + @BeforeClass + public static void updateFilterSettings() { + final Settings.Builder settingsBuilder = Settings.builder(); + // generate random filter policies + for (int i = 0; i < randomIntBetween(1, 4); i++) { + settingsBuilder.put(randomFilterPolicySettings("updatePolicy" + i)); + } + updateFilterSettings = settingsBuilder.build(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + final Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(super.nodeSettings(nodeOrdinal)); + + // enable auditing + settingsBuilder.put("xpack.security.audit.enabled", "true"); + settingsBuilder.put("xpack.security.audit.outputs", "logfile"); + // add only startup filter policies + settingsBuilder.put(startupFilterSettings); + return settingsBuilder.build(); + } + + public void testDynamicFilterSettings() throws Exception { + final ClusterService clusterService = mock(ClusterService.class); + final ClusterSettings clusterSettings = mockClusterSettings(); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(startupFilterSettings); + settingsBuilder.put(updateFilterSettings); + // reference audit trail containing all filters + final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext); + final String expected = auditTrail.eventFilterPolicyRegistry.toString(); + // update settings on internal cluster + updateSettings(updateFilterSettings, randomBoolean()); + final String actual = ((LoggingAuditTrail) internalCluster().getInstances(AuditTrailService.class) + .iterator() + .next() + .getAuditTrails() + .iterator() + .next()).eventFilterPolicyRegistry.toString(); + assertEquals(expected, actual); + } + + public void testInvalidFilterSettings() throws Exception { + final String invalidLuceneRegex = "/invalid"; + final Settings.Builder settingsBuilder = Settings.builder(); + final String[] allSettingsKeys = new String[] { "xpack.security.audit.logfile.events.ignore_filters.invalid.users", + "xpack.security.audit.logfile.events.ignore_filters.invalid.realms", + "xpack.security.audit.logfile.events.ignore_filters.invalid.roles", + "xpack.security.audit.logfile.events.ignore_filters.invalid.indices" }; + settingsBuilder.put(randomFrom(allSettingsKeys), invalidLuceneRegex); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder.build()).get()); + assertThat(e.getMessage(), containsString("illegal value can't update")); + } + + public void testDynamicHostSettings() { + final boolean persistent = randomBoolean(); + final Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(LoggingAuditTrail.HOST_ADDRESS_SETTING.getKey(), true); + settingsBuilder.put(LoggingAuditTrail.HOST_NAME_SETTING.getKey(), true); + settingsBuilder.put(LoggingAuditTrail.NODE_NAME_SETTING.getKey(), true); + updateSettings(settingsBuilder.build(), persistent); + final LoggingAuditTrail loggingAuditTrail = ((LoggingAuditTrail) internalCluster().getInstances(AuditTrailService.class) + .iterator() + .next() + .getAuditTrails() + .iterator() + .next()); + assertTrue(Pattern.matches("\\[127\\.0\\.0\\.1\\] \\[127\\.0\\.0\\.1\\] \\[node_.*\\] ", loggingAuditTrail.localNodeInfo.prefix)); + settingsBuilder.put(LoggingAuditTrail.HOST_ADDRESS_SETTING.getKey(), false); + updateSettings(settingsBuilder.build(), persistent); + assertTrue(Pattern.matches("\\[127\\.0\\.0\\.1\\] \\[node_.*\\] ", loggingAuditTrail.localNodeInfo.prefix)); + settingsBuilder.put(LoggingAuditTrail.HOST_ADDRESS_SETTING.getKey(), true); + settingsBuilder.put(LoggingAuditTrail.HOST_NAME_SETTING.getKey(), false); + updateSettings(settingsBuilder.build(), persistent); + assertTrue(Pattern.matches("\\[127\\.0\\.0\\.1\\] \\[node_.*\\] ", loggingAuditTrail.localNodeInfo.prefix)); + settingsBuilder.put(LoggingAuditTrail.HOST_NAME_SETTING.getKey(), true); + settingsBuilder.put(LoggingAuditTrail.NODE_NAME_SETTING.getKey(), false); + updateSettings(settingsBuilder.build(), persistent); + assertTrue(Pattern.matches("\\[127\\.0\\.0\\.1\\] \\[127\\.0\\.0\\.1\\] ", loggingAuditTrail.localNodeInfo.prefix)); + } + + public void testDynamicRequestBodySettings() { + final boolean persistent = randomBoolean(); + final boolean enableRequestBody = randomBoolean(); + final Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(LoggingAuditTrail.INCLUDE_REQUEST_BODY.getKey(), enableRequestBody); + updateSettings(settingsBuilder.build(), persistent); + final LoggingAuditTrail loggingAuditTrail = ((LoggingAuditTrail) internalCluster().getInstances(AuditTrailService.class) + .iterator() + .next() + .getAuditTrails() + .iterator() + .next()); + assertEquals(enableRequestBody, loggingAuditTrail.includeRequestBody); + settingsBuilder.put(LoggingAuditTrail.INCLUDE_REQUEST_BODY.getKey(), !enableRequestBody); + updateSettings(settingsBuilder.build(), persistent); + assertEquals(!enableRequestBody, loggingAuditTrail.includeRequestBody); + } + + public void testDynamicEventsSettings() { + final List allEventTypes = Arrays.asList("anonymous_access_denied", "authentication_failed", "realm_authentication_failed", + "access_granted", "access_denied", "tampered_request", "connection_granted", "connection_denied", "system_access_granted", + "authentication_success", "run_as_granted", "run_as_denied"); + final List includedEvents = randomSubsetOf(allEventTypes); + final List excludedEvents = randomSubsetOf(allEventTypes); + final Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.putList(LoggingAuditTrail.INCLUDE_EVENT_SETTINGS.getKey(), includedEvents); + settingsBuilder.putList(LoggingAuditTrail.EXCLUDE_EVENT_SETTINGS.getKey(), excludedEvents); + updateSettings(settingsBuilder.build(), randomBoolean()); + final LoggingAuditTrail loggingAuditTrail = ((LoggingAuditTrail) internalCluster().getInstances(AuditTrailService.class) + .iterator() + .next() + .getAuditTrails() + .iterator() + .next()); + assertEquals(AuditLevel.parse(includedEvents, excludedEvents), loggingAuditTrail.events); + } + + private void updateSettings(Settings settings, boolean persistent) { + if (persistent) { + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings)); + } else { + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + } + + private static List randomNonEmptyListOfFilteredNames(String... namePrefix) { + final List filtered = new ArrayList<>(4); + for (int i = 0; i < randomIntBetween(1, 4); i++) { + filtered.add(Strings.arrayToCommaDelimitedString(namePrefix) + randomAlphaOfLengthBetween(1, 4)); + } + return filtered; + } + + private static Settings randomFilterPolicySettings(String policyName) { + final Settings.Builder settingsBuilder = Settings.builder(); + do { + if (randomBoolean()) { + // filter by username + final List filteredUsernames = randomNonEmptyListOfFilteredNames(); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters." + policyName + ".users", filteredUsernames); + } + if (randomBoolean()) { + // filter by realms + final List filteredRealms = randomNonEmptyListOfFilteredNames(); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters." + policyName + ".realms", filteredRealms); + } + if (randomBoolean()) { + // filter by roles + final List filteredRoles = randomNonEmptyListOfFilteredNames(); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters." + policyName + ".roles", filteredRoles); + } + if (randomBoolean()) { + // filter by indices + final List filteredIndices = randomNonEmptyListOfFilteredNames(); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters." + policyName + ".indices", filteredIndices); + } + } while (settingsBuilder.build().isEmpty()); + + assertFalse(settingsBuilder.build().isEmpty()); + + return settingsBuilder.build(); + } + + private ClusterSettings mockClusterSettings() { + final List> settingsList = new ArrayList<>(); + LoggingAuditTrail.registerSettings(settingsList); + settingsList.addAll(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + return new ClusterSettings(Settings.EMPTY, new HashSet<>(settingsList)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java new file mode 100644 index 0000000000000..4c9df8fd9d382 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailFilterTests.java @@ -0,0 +1,1652 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit.logfile; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.mock.orig.Mockito; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.test.rest.FakeRestRequest.Builder; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.xpack.core.security.audit.logfile.CapturingLogger; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail.AuditEventMetaInfo; +import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrailTests.MockMessage; +import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrailTests.RestContent; +import org.elasticsearch.xpack.security.rest.RemoteHostHeader; +import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; +import org.junit.Before; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class LoggingAuditTrailFilterTests extends ESTestCase { + + private static final String FILTER_MARKER = "filterMarker_"; + private static final String UNFILTER_MARKER = "nofilter_"; + + private Settings settings; + private DiscoveryNode localNode; + private ClusterService clusterService; + private ThreadContext threadContext; + private Logger logger; + List logOutput; + + @Before + public void init() throws Exception { + settings = Settings.builder() + .put("xpack.security.audit.logfile.prefix.emit_node_host_address", randomBoolean()) + .put("xpack.security.audit.logfile.prefix.emit_node_host_name", randomBoolean()) + .put("xpack.security.audit.logfile.prefix.emit_node_name", randomBoolean()) + .put("xpack.security.audit.logfile.events.emit_request_body", randomBoolean()) + .put("xpack.security.audit.logfile.events.include", "_all") + .build(); + localNode = mock(DiscoveryNode.class); + when(localNode.getHostAddress()).thenReturn(buildNewFakeTransportAddress().toString()); + clusterService = mock(ClusterService.class); + when(clusterService.localNode()).thenReturn(localNode); + final ClusterSettings clusterSettings = mockClusterSettings(); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + Mockito.doAnswer((Answer) invocation -> { + final LoggingAuditTrail arg0 = (LoggingAuditTrail) invocation.getArguments()[0]; + arg0.updateLocalNodeInfo(localNode); + return null; + }).when(clusterService).addListener(Mockito.isA(LoggingAuditTrail.class)); + threadContext = new ThreadContext(Settings.EMPTY); + logger = CapturingLogger.newCapturingLogger(Level.INFO); + logOutput = CapturingLogger.output(logger.getName(), Level.INFO); + } + + public void testSingleCompletePolicyPredicate() throws Exception { + // create complete filter policy + final Settings.Builder settingsBuilder = Settings.builder().put(settings); + // filter by username + final List filteredUsernames = randomNonEmptyListOfFilteredNames(); + final List filteredUsers = filteredUsernames.stream().map(u -> { + if (randomBoolean()) { + return new User(u); + } else { + return new User(new User(u), new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4))); + } + }).collect(Collectors.toList()); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.users", filteredUsernames); + // filter by realms + final List filteredRealms = randomNonEmptyListOfFilteredNames(); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.realms", filteredRealms); + // filter by roles + final List filteredRoles = randomNonEmptyListOfFilteredNames(); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.roles", filteredRoles); + // filter by indices + final List filteredIndices = randomNonEmptyListOfFilteredNames(); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.indices", filteredIndices); + + final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext); + + // all fields match + assertTrue("Matches the filter predicate.", auditTrail.eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo( + Optional.of(randomFrom(filteredUsers)), Optional.of(randomFrom(filteredRealms)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + final User unfilteredUser; + if (randomBoolean()) { + unfilteredUser = new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)); + } else { + unfilteredUser = new User(new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)), + new User(randomFrom(filteredUsers).principal())); + } + // one field does not match or is empty + assertFalse("Does not match the filter predicate because of the user.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(unfilteredUser), + Optional.of(randomFrom(filteredRealms)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + assertFalse("Does not match the filter predicate because of the empty user.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.empty(), + Optional.of(randomFrom(filteredRealms)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + assertFalse("Does not match the filter predicate because of the realm.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), + Optional.of(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + assertFalse("Does not match the filter predicate because of the empty realm.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), + Optional.empty(), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + final List someRolesDoNotMatch = new ArrayList<>(randomSubsetOf(randomIntBetween(0, filteredRoles.size()), filteredRoles)); + for (int i = 0; i < randomIntBetween(1, 8); i++) { + someRolesDoNotMatch.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)); + } + assertFalse("Does not match the filter predicate because of some of the roles.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), + Optional.of(randomFrom(filteredRealms)), Optional.of(someRolesDoNotMatch.toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + final Optional emptyRoles = randomBoolean() ? Optional.empty() : Optional.of(new String[0]); + assertFalse("Does not match the filter predicate because of the empty roles.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), + Optional.of(randomFrom(filteredRealms)), emptyRoles, + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + final List someIndicesDoNotMatch = new ArrayList<>( + randomSubsetOf(randomIntBetween(0, filteredIndices.size()), filteredIndices)); + for (int i = 0; i < randomIntBetween(1, 8); i++) { + someIndicesDoNotMatch.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)); + } + assertFalse("Does not match the filter predicate because of some of the indices.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), Optional.of(randomFrom(filteredRealms)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(someIndicesDoNotMatch.toArray(new String[0]))))); + final Optional emptyIndices = randomBoolean() ? Optional.empty() : Optional.of(new String[0]); + assertFalse("Does not match the filter predicate because of the empty indices.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), Optional.of(randomFrom(filteredRealms)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + emptyIndices))); + } + + public void testSingleCompleteWithEmptyFieldPolicyPredicate() throws Exception { + // create complete filter policy + final Settings.Builder settingsBuilder = Settings.builder().put(settings); + // filter by username + final List filteredUsernames = randomNonEmptyListOfFilteredNames(); + final List filteredUsers = filteredUsernames.stream().map(u -> { + if (randomBoolean()) { + return new User(u); + } else { + return new User(new User(u), new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4))); + } + }).collect(Collectors.toList()); + filteredUsernames.add(""); // filter by missing user name + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.users", filteredUsernames); + // filter by realms + final List filteredRealms = randomNonEmptyListOfFilteredNames(); + filteredRealms.add(""); // filter by missing realm name + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.realms", filteredRealms); + filteredRealms.remove(""); + // filter by roles + final List filteredRoles = randomNonEmptyListOfFilteredNames(); + filteredRoles.add(""); // filter by missing role name + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.roles", filteredRoles); + filteredRoles.remove(""); + // filter by indices + final List filteredIndices = randomNonEmptyListOfFilteredNames(); + filteredIndices.add(""); // filter by missing index name + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.completeFilterPolicy.indices", filteredIndices); + filteredIndices.remove(""); + + final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext); + + // all fields match + assertTrue("Matches the filter predicate.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), + Optional.of(randomFrom(filteredRealms)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + final User unfilteredUser; + if (randomBoolean()) { + unfilteredUser = new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)); + } else { + unfilteredUser = new User(new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)), + new User(randomFrom(filteredUsers).principal())); + } + // one field does not match or is empty + assertFalse("Does not match the filter predicate because of the user.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(unfilteredUser), + Optional.of(randomFrom(filteredRealms)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + assertTrue("Matches the filter predicate because of the empty user.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.empty(), + Optional.of(randomFrom(filteredRealms)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + assertFalse("Does not match the filter predicate because of the realm.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), + Optional.of(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + assertTrue("Matches the filter predicate because of the empty realm.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), + Optional.empty(), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + final List someRolesDoNotMatch = new ArrayList<>(randomSubsetOf(randomIntBetween(0, filteredRoles.size()), filteredRoles)); + for (int i = 0; i < randomIntBetween(1, 8); i++) { + someRolesDoNotMatch.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)); + } + assertFalse("Does not match the filter predicate because of some of the roles.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), + Optional.of(randomFrom(filteredRealms)), Optional.of(someRolesDoNotMatch.toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + final Optional emptyRoles = randomBoolean() ? Optional.empty() : Optional.of(new String[0]); + assertTrue("Matches the filter predicate because of the empty roles.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), + Optional.of(randomFrom(filteredRealms)), emptyRoles, + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + final List someIndicesDoNotMatch = new ArrayList<>( + randomSubsetOf(randomIntBetween(0, filteredIndices.size()), filteredIndices)); + for (int i = 0; i < randomIntBetween(1, 8); i++) { + someIndicesDoNotMatch.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)); + } + assertFalse("Does not match the filter predicate because of some of the indices.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), Optional.of(randomFrom(filteredRealms)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(someIndicesDoNotMatch.toArray(new String[0]))))); + final Optional emptyIndices = randomBoolean() ? Optional.empty() : Optional.of(new String[0]); + assertTrue("Matches the filter predicate because of the empty indices.", auditTrail.eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), Optional.of(randomFrom(filteredRealms)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + emptyIndices))); + } + + public void testTwoPolicyPredicatesWithMissingFields() throws Exception { + final Settings.Builder settingsBuilder = Settings.builder().put(settings); + // first policy: realms and roles filters + final List filteredRealms = randomNonEmptyListOfFilteredNames(); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.firstPolicy.realms", filteredRealms); + final List filteredRoles = randomNonEmptyListOfFilteredNames(); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.firstPolicy.roles", filteredRoles); + // second policy: users and indices filters + final List filteredUsernames = randomNonEmptyListOfFilteredNames(); + final List filteredUsers = filteredUsernames.stream().map(u -> { + if (randomBoolean()) { + return new User(u); + } else { + return new User(new User(u), new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4))); + } + }).collect(Collectors.toList()); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.secondPolicy.users", filteredUsernames); + // filter by indices + final List filteredIndices = randomNonEmptyListOfFilteredNames(); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.secondPolicy.indices", filteredIndices); + + final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext); + + final User unfilteredUser; + if (randomBoolean()) { + unfilteredUser = new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)); + } else { + unfilteredUser = new User(new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)), + new User(randomFrom(filteredUsers).principal())); + } + final List someRolesDoNotMatch = new ArrayList<>(randomSubsetOf(randomIntBetween(0, filteredRoles.size()), filteredRoles)); + for (int i = 0; i < randomIntBetween(1, 8); i++) { + someRolesDoNotMatch.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)); + } + final List someIndicesDoNotMatch = new ArrayList<>( + randomSubsetOf(randomIntBetween(0, filteredIndices.size()), filteredIndices)); + for (int i = 0; i < randomIntBetween(1, 8); i++) { + someIndicesDoNotMatch.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)); + } + // matches both the first and the second policies + assertTrue("Matches both the first and the second filter predicates.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), + Optional.of(randomFrom(filteredRealms)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + // matches first policy but not the second + assertTrue("Matches the first filter predicate but not the second.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(unfilteredUser), + Optional.of(randomFrom(filteredRealms)), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredRoles.size()), filteredRoles).toArray(new String[0])), + Optional.of(someIndicesDoNotMatch.toArray(new String[0]))))); + // matches the second policy but not the first + assertTrue("Matches the second filter predicate but not the first.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate().test(new AuditEventMetaInfo(Optional.of(randomFrom(filteredUsers)), + Optional.of(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)), + Optional.of(someRolesDoNotMatch.toArray(new String[0])), + Optional.of(randomSubsetOf(randomIntBetween(1, filteredIndices.size()), filteredIndices).toArray(new String[0]))))); + // matches neither the first nor the second policies + assertFalse("Matches neither the first nor the second filter predicates.", + auditTrail.eventFilterPolicyRegistry.ignorePredicate() + .test(new AuditEventMetaInfo(Optional.of(unfilteredUser), + Optional.of(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 8)), + Optional.of(someRolesDoNotMatch.toArray(new String[0])), + Optional.of(someIndicesDoNotMatch.toArray(new String[0]))))); + } + + public void testUsersFilter() throws Exception { + final List allFilteredUsers = new ArrayList<>(); + final Settings.Builder settingsBuilder = Settings.builder().put(settings); + for (int i = 0; i < randomIntBetween(1, 4); i++) { + final List filteredUsers = randomNonEmptyListOfFilteredNames(); + allFilteredUsers.addAll(filteredUsers); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.policy" + i + ".users", filteredUsers); + } + // a filter for a field consisting of an empty string ("") or an empty list([]) + // will match events that lack that field + final boolean filterMissingUser = randomBoolean(); + if (filterMissingUser) { + if (randomBoolean()) { + final List filteredUsers = randomNonEmptyListOfFilteredNames(); + // possibly renders list empty + filteredUsers.remove(0); + allFilteredUsers.addAll(filteredUsers); + filteredUsers.add(""); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.users", filteredUsers); + } else { + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.users", + Collections.emptyList()); + } + } + final Authentication filteredAuthentication; + if (randomBoolean()) { + filteredAuthentication = createAuthentication( + new User(randomFrom(allFilteredUsers), new String[] { "r1" }, new User("authUsername", new String[] { "r2" })), + "effectiveRealmName"); + } else { + filteredAuthentication = createAuthentication(new User(randomFrom(allFilteredUsers), new String[] { "r1" }), + "effectiveRealmName"); + } + final Authentication unfilteredAuthentication; + if (randomBoolean()) { + unfilteredAuthentication = createAuthentication(new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4), + new String[] { "r1" }, new User("authUsername", new String[] { "r2" })), "effectiveRealmName"); + } else { + unfilteredAuthentication = createAuthentication( + new User(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4), new String[] { "r1" }), "effectiveRealmName"); + } + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) + : new MockIndicesRequest(threadContext, new String[] { "idx1", "idx2" }); + final MockToken filteredToken = new MockToken(randomFrom(allFilteredUsers)); + final MockToken unfilteredToken = new MockToken(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4)); + + final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext); + // anonymous accessDenied + auditTrail.anonymousAccessDenied("_action", message); + if (filterMissingUser) { + assertThat("Anonymous message: not filtered out by the missing user filter", logOutput.size(), is(0)); + } else { + assertThat("Anonymous message: filtered out by the user filters", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.anonymousAccessDenied(getRestRequest()); + if (filterMissingUser) { + assertThat("Anonymous rest request: not filtered out by the missing user filter", logOutput.size(), is(0)); + } else { + assertThat("Anonymous rest request: filtered out by user filters", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // authenticationFailed + auditTrail.authenticationFailed(getRestRequest()); + if (filterMissingUser) { + assertThat("AuthenticationFailed no token rest request: not filtered out by the missing user filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed no token rest request: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(unfilteredToken, "_action", message); + assertThat("AuthenticationFailed token request: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(filteredToken, "_action", message); + assertThat("AuthenticationFailed token request: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_action", message); + if (filterMissingUser) { + assertThat("AuthenticationFailed no token message: not filtered out by the missing user filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed no token message: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(unfilteredToken, getRestRequest()); + assertThat("AuthenticationFailed rest request: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(filteredToken, getRestRequest()); + assertThat("AuthenticationFailed rest request: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_realm", unfilteredToken, "_action", message); + assertThat("AuthenticationFailed realm message: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_realm", filteredToken, "_action", message); + assertThat("AuthenticationFailed realm message: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_realm", unfilteredToken, getRestRequest()); + assertThat("AuthenticationFailed realm rest request: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_realm", filteredToken, getRestRequest()); + assertThat("AuthenticationFailed realm rest request: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // accessGranted + auditTrail.accessGranted(unfilteredAuthentication, "_action", message, new String[] { "role1" }); + assertThat("AccessGranted message: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(filteredAuthentication, "_action", message, new String[] { "role1" }); + assertThat("AccessGranted message: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(createAuthentication(SystemUser.INSTANCE, "effectiveRealmName"), "internal:_action", message, + new String[] { "role1" }); + assertThat("AccessGranted internal message: system user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(unfilteredAuthentication, "internal:_action", message, new String[] { "role1" }); + assertThat("AccessGranted internal message: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(filteredAuthentication, "internal:_action", message, new String[] { "role1" }); + assertThat("AccessGranted internal message: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // accessDenied + auditTrail.accessDenied(unfilteredAuthentication, "_action", message, new String[] { "role1" }); + assertThat("AccessDenied message: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(filteredAuthentication, "_action", message, new String[] { "role1" }); + assertThat("AccessDenied message: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(createAuthentication(SystemUser.INSTANCE, "effectiveRealmName"), "internal:_action", message, + new String[] { "role1" }); + assertThat("AccessDenied internal message: system user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(unfilteredAuthentication, "internal:_action", message, new String[] { "role1" }); + assertThat("AccessDenied internal message: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(filteredAuthentication, "internal:_action", message, new String[] { "role1" }); + assertThat("AccessDenied internal message: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // tamperedRequest + auditTrail.tamperedRequest(getRestRequest()); + if (filterMissingUser) { + assertThat("Tampered rest: is not filtered out by the missing user filter", logOutput.size(), is(0)); + } else { + assertThat("Tampered rest: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.tamperedRequest("_action", message); + if (filterMissingUser) { + assertThat("Tampered message: is not filtered out by the missing user filter", logOutput.size(), is(0)); + } else { + assertThat("Tampered message: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.tamperedRequest(unfilteredAuthentication.getUser(), "_action", message); + assertThat("Tampered message: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.tamperedRequest(filteredAuthentication.getUser(), "_action", message); + assertThat("Tampered message: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // connection denied + auditTrail.connectionDenied(InetAddress.getLoopbackAddress(), "default", new SecurityIpFilterRule(false, "_all")); + if (filterMissingUser) { + assertThat("Connection denied: is not filtered out by the missing user filter", logOutput.size(), is(0)); + } else { + assertThat("Connection denied: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // connection granted + auditTrail.connectionGranted(InetAddress.getLoopbackAddress(), "default", new SecurityIpFilterRule(false, "_all")); + if (filterMissingUser) { + assertThat("Connection granted: is not filtered out by the missing user filter", logOutput.size(), is(0)); + } else { + assertThat("Connection granted: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // runAsGranted + auditTrail.runAsGranted(unfilteredAuthentication, "_action", new MockMessage(threadContext), new String[] { "role1" }); + assertThat("RunAsGranted message: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsGranted(filteredAuthentication, "_action", new MockMessage(threadContext), new String[] { "role1" }); + assertThat("RunAsGranted message: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // runAsDenied + auditTrail.runAsDenied(unfilteredAuthentication, "_action", new MockMessage(threadContext), new String[] { "role1" }); + assertThat("RunAsDenied message: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsDenied(filteredAuthentication, "_action", new MockMessage(threadContext), new String[] { "role1" }); + assertThat("RunAsDenied message: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsDenied(unfilteredAuthentication, getRestRequest(), new String[] { "role1" }); + assertThat("RunAsDenied rest request: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsDenied(filteredAuthentication, getRestRequest(), new String[] { "role1" }); + assertThat("RunAsDenied rest request: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // authentication Success + auditTrail.authenticationSuccess("_realm", unfilteredAuthentication.getUser(), getRestRequest()); + assertThat("AuthenticationSuccess rest request: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationSuccess("_realm", filteredAuthentication.getUser(), getRestRequest()); + assertThat("AuthenticationSuccess rest request: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationSuccess("_realm", unfilteredAuthentication.getUser(), "_action", message); + assertThat("AuthenticationSuccess message: unfiltered user is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationSuccess("_realm", filteredAuthentication.getUser(), "_action", message); + assertThat("AuthenticationSuccess message: filtered user is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + } + + public void testRealmsFilter() throws Exception { + final List allFilteredRealms = new ArrayList<>(); + final Settings.Builder settingsBuilder = Settings.builder().put(settings); + for (int i = 0; i < randomIntBetween(1, 4); i++) { + final List filteredRealms = randomNonEmptyListOfFilteredNames(); + allFilteredRealms.addAll(filteredRealms); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.policy" + i + ".realms", filteredRealms); + } + // a filter for a field consisting of an empty string ("") or an empty list([]) + // will match events that lack that field + final boolean filterMissingRealm = randomBoolean(); + if (filterMissingRealm) { + if (randomBoolean()) { + final List filteredRealms = randomNonEmptyListOfFilteredNames(); + // possibly renders list empty + filteredRealms.remove(0); + allFilteredRealms.addAll(filteredRealms); + filteredRealms.add(""); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.realms", filteredRealms); + } else { + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.realms", + Collections.emptyList()); + } + } + final String filteredRealm = randomFrom(allFilteredRealms); + final String unfilteredRealm = UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4); + User user; + if (randomBoolean()) { + user = new User("user1", new String[] { "r1" }, new User("authUsername", new String[] { "r2" })); + } else { + user = new User("user1", new String[] { "r1" }); + } + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) + : new MockIndicesRequest(threadContext, new String[] { "idx1", "idx2" }); + final MockToken authToken = new MockToken("token1"); + + final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext); + // anonymous accessDenied + auditTrail.anonymousAccessDenied("_action", message); + if (filterMissingRealm) { + assertThat("Anonymous message: not filtered out by the missing realm filter", logOutput.size(), is(0)); + } else { + assertThat("Anonymous message: filtered out by the realm filters", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.anonymousAccessDenied(getRestRequest()); + if (filterMissingRealm) { + assertThat("Anonymous rest request: not filtered out by the missing realm filter", logOutput.size(), is(0)); + } else { + assertThat("Anonymous rest request: filtered out by realm filters", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // authenticationFailed + auditTrail.authenticationFailed(getRestRequest()); + if (filterMissingRealm) { + assertThat("AuthenticationFailed no token rest request: not filtered out by the missing realm filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed no token rest request: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(authToken, "_action", message); + if (filterMissingRealm) { + assertThat("AuthenticationFailed token request: not filtered out by the missing realm filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed token request: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_action", message); + if (filterMissingRealm) { + assertThat("AuthenticationFailed no token message: not filtered out by the missing realm filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed no token message: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(authToken, getRestRequest()); + if (filterMissingRealm) { + assertThat("AuthenticationFailed rest request: not filtered out by the missing realm filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed rest request: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(unfilteredRealm, authToken, "_action", message); + assertThat("AuthenticationFailed realm message: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(filteredRealm, authToken, "_action", message); + assertThat("AuthenticationFailed realm message: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(unfilteredRealm, authToken, getRestRequest()); + assertThat("AuthenticationFailed realm rest request: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(filteredRealm, authToken, getRestRequest()); + assertThat("AuthenticationFailed realm rest request: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // accessGranted + auditTrail.accessGranted(createAuthentication(user, filteredRealm), "_action", message, new String[] { "role1" }); + assertThat("AccessGranted message: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(createAuthentication(user, unfilteredRealm), "_action", message, new String[] { "role1" }); + assertThat("AccessGranted message: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(createAuthentication(SystemUser.INSTANCE, filteredRealm), "internal:_action", message, + new String[] { "role1" }); + assertThat("AccessGranted internal message system user: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(createAuthentication(SystemUser.INSTANCE, unfilteredRealm), "internal:_action", message, + new String[] { "role1" }); + assertThat("AccessGranted internal message system user: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(createAuthentication(user, filteredRealm), "internal:_action", message, new String[] { "role1" }); + assertThat("AccessGranted internal message: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(createAuthentication(user, unfilteredRealm), "internal:_action", message, new String[] { "role1" }); + assertThat("AccessGranted internal message: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + // accessDenied + auditTrail.accessDenied(createAuthentication(user, filteredRealm), "_action", message, new String[] { "role1" }); + assertThat("AccessDenied message: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(createAuthentication(user, unfilteredRealm), "_action", message, new String[] { "role1" }); + assertThat("AccessDenied message: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(createAuthentication(SystemUser.INSTANCE, filteredRealm), "internal:_action", message, + new String[] { "role1" }); + assertThat("AccessDenied internal message system user: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(createAuthentication(SystemUser.INSTANCE, unfilteredRealm), "internal:_action", message, + new String[] { "role1" }); + assertThat("AccessDenied internal message system user: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(createAuthentication(user, filteredRealm), "internal:_action", message, new String[] { "role1" }); + assertThat("AccessGranted internal message: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(createAuthentication(user, unfilteredRealm), "internal:_action", message, new String[] { "role1" }); + assertThat("AccessGranted internal message: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + // tamperedRequest + auditTrail.tamperedRequest(getRestRequest()); + if (filterMissingRealm) { + assertThat("Tampered rest: is not filtered out by the missing realm filter", logOutput.size(), is(0)); + } else { + assertThat("Tampered rest: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.tamperedRequest("_action", message); + if (filterMissingRealm) { + assertThat("Tampered message: is not filtered out by the missing realm filter", logOutput.size(), is(0)); + } else { + assertThat("Tampered message: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.tamperedRequest(user, "_action", message); + if (filterMissingRealm) { + assertThat("Tampered message: is not filtered out by the missing realm filter", logOutput.size(), is(0)); + } else { + assertThat("Tampered message: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // connection denied + auditTrail.connectionDenied(InetAddress.getLoopbackAddress(), "default", new SecurityIpFilterRule(false, "_all")); + if (filterMissingRealm) { + assertThat("Connection denied: is not filtered out by the missing realm filter", logOutput.size(), is(0)); + } else { + assertThat("Connection denied: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // connection granted + auditTrail.connectionGranted(InetAddress.getLoopbackAddress(), "default", new SecurityIpFilterRule(false, "_all")); + if (filterMissingRealm) { + assertThat("Connection granted: is not filtered out by the missing realm filter", logOutput.size(), is(0)); + } else { + assertThat("Connection granted: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // runAsGranted + auditTrail.runAsGranted(createAuthentication(user, filteredRealm), "_action", new MockMessage(threadContext), + new String[] { "role1" }); + assertThat("RunAsGranted message: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsGranted(createAuthentication(user, unfilteredRealm), "_action", new MockMessage(threadContext), + new String[] { "role1" }); + assertThat("RunAsGranted message: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + // runAsDenied + auditTrail.runAsDenied(createAuthentication(user, filteredRealm), "_action", new MockMessage(threadContext), + new String[] { "role1" }); + assertThat("RunAsDenied message: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsDenied(createAuthentication(user, unfilteredRealm), "_action", new MockMessage(threadContext), + new String[] { "role1" }); + assertThat("RunAsDenied message: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsDenied(createAuthentication(user, filteredRealm), getRestRequest(), new String[] { "role1" }); + assertThat("RunAsDenied rest request: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsDenied(createAuthentication(user, unfilteredRealm), getRestRequest(), new String[] { "role1" }); + assertThat("RunAsDenied rest request: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + // authentication Success + auditTrail.authenticationSuccess(unfilteredRealm, user, getRestRequest()); + assertThat("AuthenticationSuccess rest request: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationSuccess(filteredRealm, user, getRestRequest()); + assertThat("AuthenticationSuccess rest request: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationSuccess(unfilteredRealm, user, "_action", message); + assertThat("AuthenticationSuccess message: unfiltered realm is filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationSuccess(filteredRealm, user, "_action", message); + assertThat("AuthenticationSuccess message: filtered realm is not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + } + + public void testRolesFilter() throws Exception { + final List> allFilteredRoles = new ArrayList<>(); + final Settings.Builder settingsBuilder = Settings.builder().put(settings); + for (int i = 0; i < randomIntBetween(1, 4); i++) { + final List filteredRoles = randomNonEmptyListOfFilteredNames(); + allFilteredRoles.add(new ArrayList<>(filteredRoles)); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.policy" + i + ".roles", filteredRoles); + } + // a filter for a field consisting of an empty string ("") or an empty list([]) + // will match events that lack that field + final boolean filterMissingRoles = randomBoolean(); + if (filterMissingRoles) { + if (randomBoolean()) { + final List filteredRoles = randomNonEmptyListOfFilteredNames(); + // possibly renders list empty + filteredRoles.remove(0); + if (filteredRoles.isEmpty() == false) { + allFilteredRoles.add(new ArrayList<>(filteredRoles)); + } + filteredRoles.add(""); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.roles", filteredRoles); + } else { + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.roles", + Collections.emptyList()); + } + } + // filtered roles are a subset of the roles of any policy + final List filterPolicy = randomFrom(allFilteredRoles); + final String[] filteredRoles = randomListFromLengthBetween(filterPolicy, 1, filterPolicy.size()).toArray(new String[0]); + // unfiltered role sets either have roles distinct from any other policy or are + // a mix of roles from 2 or more policies + final List unfilteredPolicy = randomFrom(allFilteredRoles); + List _unfilteredRoles; + if (randomBoolean()) { + _unfilteredRoles = randomListFromLengthBetween(unfilteredPolicy, 0, unfilteredPolicy.size()); + // add roles distinct from any role in any filter policy + for (int i = 0; i < randomIntBetween(1, 4); i++) { + _unfilteredRoles.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4)); + } + } else { + _unfilteredRoles = randomListFromLengthBetween(unfilteredPolicy, 1, unfilteredPolicy.size()); + // add roles from other filter policies + final List otherRoles = randomNonEmptyListOfFilteredNames("other"); + _unfilteredRoles.addAll(randomListFromLengthBetween(otherRoles, 1, otherRoles.size())); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.otherPolicy.roles", otherRoles); + } + final String[] unfilteredRoles = _unfilteredRoles.toArray(new String[0]); + final Authentication authentication; + if (randomBoolean()) { + authentication = createAuthentication(new User("user1", new String[] { "r1" }, new User("authUsername", new String[] { "r2" })), + "effectiveRealmName"); + } else { + authentication = createAuthentication(new User("user1", new String[] { "r1" }), "effectiveRealmName"); + } + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) + : new MockIndicesRequest(threadContext, new String[] { "idx1", "idx2" }); + final MockToken authToken = new MockToken("token1"); + + final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext); + // anonymous accessDenied + auditTrail.anonymousAccessDenied("_action", message); + if (filterMissingRoles) { + assertThat("Anonymous message: not filtered out by the missing roles filter", logOutput.size(), is(0)); + } else { + assertThat("Anonymous message: filtered out by the roles filters", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.anonymousAccessDenied(getRestRequest()); + if (filterMissingRoles) { + assertThat("Anonymous rest request: not filtered out by the missing roles filter", logOutput.size(), is(0)); + } else { + assertThat("Anonymous rest request: filtered out by roles filters", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // authenticationFailed + auditTrail.authenticationFailed(getRestRequest()); + if (filterMissingRoles) { + assertThat("AuthenticationFailed no token rest request: not filtered out by the missing roles filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed no token rest request: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(authToken, "_action", message); + if (filterMissingRoles) { + assertThat("AuthenticationFailed token request: not filtered out by the missing roles filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed token request: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_action", message); + if (filterMissingRoles) { + assertThat("AuthenticationFailed no token message: not filtered out by the missing roles filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed no token message: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(authToken, getRestRequest()); + if (filterMissingRoles) { + assertThat("AuthenticationFailed rest request: not filtered out by the missing roles filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed rest request: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_realm", authToken, "_action", message); + if (filterMissingRoles) { + assertThat("AuthenticationFailed realm message: not filtered out by the missing roles filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed realm message: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_realm", authToken, getRestRequest()); + if (filterMissingRoles) { + assertThat("AuthenticationFailed realm rest request: not filtered out by the missing roles filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed realm rest request: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // accessGranted + auditTrail.accessGranted(authentication, "_action", message, unfilteredRoles); + assertThat("AccessGranted message: unfiltered roles filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(authentication, "_action", message, filteredRoles); + assertThat("AccessGranted message: filtered roles not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(createAuthentication(SystemUser.INSTANCE, "effectiveRealmName"), "internal:_action", message, + unfilteredRoles); + assertThat("AccessGranted internal message system user: unfiltered roles filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(createAuthentication(SystemUser.INSTANCE, "effectiveRealmName"), "internal:_action", message, + filteredRoles); + assertThat("AccessGranted internal message system user: filtered roles not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(authentication, "internal:_action", message, unfilteredRoles); + assertThat("AccessGranted internal message: unfiltered roles filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(authentication, "internal:_action", message, filteredRoles); + assertThat("AccessGranted internal message: filtered roles not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // accessDenied + auditTrail.accessDenied(authentication, "_action", message, unfilteredRoles); + assertThat("AccessDenied message: unfiltered roles filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(authentication, "_action", message, filteredRoles); + assertThat("AccessDenied message: filtered roles not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(createAuthentication(SystemUser.INSTANCE, "effectiveRealmName"), "internal:_action", message, + unfilteredRoles); + assertThat("AccessDenied internal message system user: unfiltered roles filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(createAuthentication(SystemUser.INSTANCE, "effectiveRealmName"), "internal:_action", message, + filteredRoles); + assertThat("AccessDenied internal message system user: filtered roles not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(authentication, "internal:_action", message, unfilteredRoles); + assertThat("AccessDenied internal message: unfiltered roles filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(authentication, "internal:_action", message, filteredRoles); + assertThat("AccessDenied internal message: filtered roles not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // connection denied + auditTrail.connectionDenied(InetAddress.getLoopbackAddress(), "default", new SecurityIpFilterRule(false, "_all")); + if (filterMissingRoles) { + assertThat("Connection denied: is not filtered out by the missing roles filter", logOutput.size(), is(0)); + } else { + assertThat("Connection denied: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // connection granted + auditTrail.connectionGranted(InetAddress.getLoopbackAddress(), "default", new SecurityIpFilterRule(false, "_all")); + if (filterMissingRoles) { + assertThat("Connection granted: is not filtered out by the missing roles filter", logOutput.size(), is(0)); + } else { + assertThat("Connection granted: is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // runAsGranted + auditTrail.runAsGranted(authentication, "_action", new MockMessage(threadContext), unfilteredRoles); + assertThat("RunAsGranted message: unfiltered roles filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsGranted(authentication, "_action", new MockMessage(threadContext), filteredRoles); + assertThat("RunAsGranted message: filtered roles not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // runAsDenied + auditTrail.runAsDenied(authentication, "_action", new MockMessage(threadContext), unfilteredRoles); + assertThat("RunAsDenied message: unfiltered roles filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsDenied(authentication, "_action", new MockMessage(threadContext), filteredRoles); + assertThat("RunAsDenied message: filtered roles not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsDenied(authentication, getRestRequest(), unfilteredRoles); + assertThat("RunAsDenied rest request: unfiltered roles filtered out", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsDenied(authentication, getRestRequest(), filteredRoles); + assertThat("RunAsDenied rest request: filtered roles not filtered out", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // authentication Success + auditTrail.authenticationSuccess("_realm", authentication.getUser(), getRestRequest()); + if (filterMissingRoles) { + assertThat("AuthenticationSuccess rest request: is not filtered out by the missing roles filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationSuccess rest request: unfiltered realm is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationSuccess("_realm", authentication.getUser(), "_action", message); + if (filterMissingRoles) { + assertThat("AuthenticationSuccess message: is not filtered out by the missing roles filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationSuccess message: unfiltered realm is filtered out", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + } + + public void testIndicesFilter() throws Exception { + final List> allFilteredIndices = new ArrayList<>(); + final Settings.Builder settingsBuilder = Settings.builder().put(settings); + for (int i = 0; i < randomIntBetween(1, 3); i++) { + final List filteredIndices = randomNonEmptyListOfFilteredNames(); + allFilteredIndices.add(new ArrayList<>(filteredIndices)); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.policy" + i + ".indices", filteredIndices); + } + // a filter for a field consisting of an empty string ("") or an empty list([]) + // will match events that lack that field + final boolean filterMissingIndices = randomBoolean(); + if (filterMissingIndices) { + if (randomBoolean()) { + final List filteredIndices = randomNonEmptyListOfFilteredNames(); + // possibly renders list empty + filteredIndices.remove(0); + if (filteredIndices.isEmpty() == false) { + allFilteredIndices.add(new ArrayList<>(filteredIndices)); + } + filteredIndices.add(""); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.indices", filteredIndices); + } else { + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.missingPolicy.indices", + Collections.emptyList()); + } + } + // filtered indices are a subset of the indices of any policy + final List filterPolicy = randomFrom(allFilteredIndices); + final String[] filteredIndices = randomListFromLengthBetween(filterPolicy, 1, filterPolicy.size()).toArray(new String[0]); + // unfiltered index sets either have indices distinct from any other in any + // policy or are a mix of indices from 2 or more policies + final List unfilteredPolicy = randomFrom(allFilteredIndices); + List _unfilteredIndices; + if (randomBoolean()) { + _unfilteredIndices = randomListFromLengthBetween(unfilteredPolicy, 0, unfilteredPolicy.size()); + // add indices distinct from any index in any filter policy + for (int i = 0; i < randomIntBetween(1, 4); i++) { + _unfilteredIndices.add(UNFILTER_MARKER + randomAlphaOfLengthBetween(1, 4)); + } + } else { + _unfilteredIndices = randomListFromLengthBetween(unfilteredPolicy, 1, unfilteredPolicy.size()); + // add indices from other filter policies + final List otherIndices = randomNonEmptyListOfFilteredNames("other"); + _unfilteredIndices.addAll(randomListFromLengthBetween(otherIndices, 1, otherIndices.size())); + settingsBuilder.putList("xpack.security.audit.logfile.events.ignore_filters.otherPolicy.indices", otherIndices); + } + final String[] unfilteredIndices = _unfilteredIndices.toArray(new String[0]); + final Authentication authentication; + if (randomBoolean()) { + authentication = createAuthentication(new User("user1", new String[] { "r1" }, new User("authUsername", new String[] { "r2" })), + "effectiveRealmName"); + } else { + authentication = createAuthentication(new User("user1", new String[] { "r1" }), "effectiveRealmName"); + } + final MockToken authToken = new MockToken("token1"); + final TransportMessage noIndexMessage = new MockMessage(threadContext); + + final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext); + // anonymous accessDenied + auditTrail.anonymousAccessDenied("_action", noIndexMessage); + if (filterMissingIndices) { + assertThat("Anonymous message no index: not filtered out by the missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("Anonymous message no index: filtered out by indices filters", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.anonymousAccessDenied("_action", new MockIndicesRequest(threadContext, unfilteredIndices)); + assertThat("Anonymous message unfiltered indices: filtered out by indices filters", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.anonymousAccessDenied("_action", new MockIndicesRequest(threadContext, filteredIndices)); + assertThat("Anonymous message filtered indices: not filtered out by indices filters", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.anonymousAccessDenied(getRestRequest()); + if (filterMissingIndices) { + assertThat("Anonymous rest request: not filtered out by the missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("Anonymous rest request: filtered out by indices filters", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // authenticationFailed + auditTrail.authenticationFailed(getRestRequest()); + if (filterMissingIndices) { + assertThat("AuthenticationFailed no token rest request: not filtered out by the missing indices filter", logOutput.size(), + is(0)); + } else { + assertThat("AuthenticationFailed no token rest request: filtered out by indices filters", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(authToken, "_action", noIndexMessage); + if (filterMissingIndices) { + assertThat("AuthenticationFailed token request no index: not filtered out by the missing indices filter", logOutput.size(), + is(0)); + } else { + assertThat("AuthenticationFailed token request no index: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(authToken, "_action", new MockIndicesRequest(threadContext, unfilteredIndices)); + assertThat("AuthenticationFailed token request unfiltered indices: filtered out by indices filter", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(authToken, "_action", new MockIndicesRequest(threadContext, filteredIndices)); + assertThat("AuthenticationFailed token request filtered indices: not filtered out by indices filter", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_action", noIndexMessage); + if (filterMissingIndices) { + assertThat("AuthenticationFailed no token message no index: not filtered out by the missing indices filter", logOutput.size(), + is(0)); + } else { + assertThat("AuthenticationFailed no token message: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_action", new MockIndicesRequest(threadContext, unfilteredIndices)); + assertThat("AuthenticationFailed no token request unfiltered indices: filtered out by indices filter", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_action", new MockIndicesRequest(threadContext, filteredIndices)); + assertThat("AuthenticationFailed no token request filtered indices: not filtered out by indices filter", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed(authToken, getRestRequest()); + if (filterMissingIndices) { + assertThat("AuthenticationFailed rest request: not filtered out by the missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed rest request: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_realm", authToken, "_action", noIndexMessage); + if (filterMissingIndices) { + assertThat("AuthenticationFailed realm message no index: not filtered out by the missing indices filter", logOutput.size(), + is(0)); + } else { + assertThat("AuthenticationFailed realm message no index: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_realm", authToken, "_action", new MockIndicesRequest(threadContext, unfilteredIndices)); + assertThat("AuthenticationFailed realm message unfiltered indices: filtered out by indices filter", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_realm", authToken, "_action", new MockIndicesRequest(threadContext, filteredIndices)); + assertThat("AuthenticationFailed realm message filtered indices: not filtered out by indices filter", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationFailed("_realm", authToken, getRestRequest()); + if (filterMissingIndices) { + assertThat("AuthenticationFailed realm rest request: not filtered out by the missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationFailed realm rest request: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // accessGranted + auditTrail.accessGranted(authentication, "_action", noIndexMessage, new String[] { "role1" }); + if (filterMissingIndices) { + assertThat("AccessGranted message no index: not filtered out by the missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("AccessGranted message no index: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(authentication, "_action", new MockIndicesRequest(threadContext, unfilteredIndices), + new String[] { "role1" }); + assertThat("AccessGranted message unfiltered indices: filtered out by indices filter", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(authentication, "_action", new MockIndicesRequest(threadContext, filteredIndices), + new String[] { "role1" }); + assertThat("AccessGranted message filtered indices: not filtered out by indices filter", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(createAuthentication(SystemUser.INSTANCE, "effectiveRealmName"), "internal:_action", noIndexMessage, + new String[] { "role1" }); + if (filterMissingIndices) { + assertThat("AccessGranted message system user no index: not filtered out by the missing indices filter", logOutput.size(), + is(0)); + } else { + assertThat("AccessGranted message system user no index: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(createAuthentication(SystemUser.INSTANCE, "effectiveRealmName"), "internal:_action", + new MockIndicesRequest(threadContext, unfilteredIndices), + new String[] { "role1" }); + assertThat("AccessGranted message system user unfiltered indices: filtered out by indices filter", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessGranted(createAuthentication(SystemUser.INSTANCE, "effectiveRealmName"), "internal:_action", + new MockIndicesRequest(threadContext, filteredIndices), + new String[] { "role1" }); + assertThat("AccessGranted message system user filtered indices: not filtered out by indices filter", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // accessDenied + auditTrail.accessDenied(authentication, "_action", noIndexMessage, new String[] { "role1" }); + if (filterMissingIndices) { + assertThat("AccessDenied message no index: not filtered out by the missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("AccessDenied message no index: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(authentication, "_action", new MockIndicesRequest(threadContext, unfilteredIndices), + new String[] { "role1" }); + assertThat("AccessDenied message unfiltered indices: filtered out by indices filter", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(authentication, "_action", new MockIndicesRequest(threadContext, filteredIndices), + new String[] { "role1" }); + assertThat("AccessDenied message filtered indices: not filtered out by indices filter", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(createAuthentication(SystemUser.INSTANCE, "effectiveRealmName"), "internal:_action", noIndexMessage, + new String[] { "role1" }); + if (filterMissingIndices) { + assertThat("AccessDenied message system user no index: not filtered out by the missing indices filter", logOutput.size(), + is(0)); + } else { + assertThat("AccessDenied message system user no index: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(createAuthentication(SystemUser.INSTANCE, "effectiveRealmName"), "internal:_action", + new MockIndicesRequest(threadContext, unfilteredIndices), + new String[] { "role1" }); + assertThat("AccessDenied message system user unfiltered indices: filtered out by indices filter", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.accessDenied(createAuthentication(SystemUser.INSTANCE, "effectiveRealmName"), "internal:_action", + new MockIndicesRequest(threadContext, filteredIndices), + new String[] { "role1" }); + assertThat("AccessGranted message system user filtered indices: not filtered out by indices filter", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // connection denied + auditTrail.connectionDenied(InetAddress.getLoopbackAddress(), "default", new SecurityIpFilterRule(false, "_all")); + if (filterMissingIndices) { + assertThat("Connection denied: not filtered out by missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("Connection denied: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // connection granted + auditTrail.connectionGranted(InetAddress.getLoopbackAddress(), "default", new SecurityIpFilterRule(false, "_all")); + if (filterMissingIndices) { + assertThat("Connection granted: not filtered out by missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("Connection granted: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // runAsGranted + auditTrail.runAsGranted(authentication, "_action", noIndexMessage, new String[] { "role1" }); + if (filterMissingIndices) { + assertThat("RunAsGranted message no index: not filtered out by missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("RunAsGranted message no index: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsGranted(authentication, "_action", new MockIndicesRequest(threadContext, unfilteredIndices), + new String[] { "role1" }); + assertThat("RunAsGranted message unfiltered indices: filtered out by indices filter", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsGranted(authentication, "_action", new MockIndicesRequest(threadContext, filteredIndices), + new String[] { "role1" }); + assertThat("RunAsGranted message filtered indices: not filtered out by indices filter", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + // runAsDenied + auditTrail.runAsDenied(authentication, "_action", noIndexMessage, new String[] { "role1" }); + if (filterMissingIndices) { + assertThat("RunAsDenied message no index: not filtered out by missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("RunAsDenied message no index: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsDenied(authentication, "_action", new MockIndicesRequest(threadContext, unfilteredIndices), + new String[] { "role1" }); + assertThat("RunAsDenied message unfiltered indices: filtered out by indices filter", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsDenied(authentication, "_action", new MockIndicesRequest(threadContext, filteredIndices), + new String[] { "role1" }); + assertThat("RunAsDenied message filtered indices: not filtered out by indices filter", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.runAsDenied(authentication, getRestRequest(), new String[] { "role1" }); + if (filterMissingIndices) { + assertThat("RunAsDenied rest request: not filtered out by missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("RunAsDenied rest request: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + // authentication Success + auditTrail.authenticationSuccess("_realm", authentication.getUser(), getRestRequest()); + if (filterMissingIndices) { + assertThat("AuthenticationSuccess rest request: is not filtered out by the missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationSuccess rest request: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationSuccess("_realm", authentication.getUser(), "_action", noIndexMessage); + if (filterMissingIndices) { + assertThat("AuthenticationSuccess message no index: not filtered out by missing indices filter", logOutput.size(), is(0)); + } else { + assertThat("AuthenticationSuccess message no index: filtered out by indices filter", logOutput.size(), is(1)); + } + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationSuccess("_realm", authentication.getUser(), "_action", + new MockIndicesRequest(threadContext, unfilteredIndices)); + assertThat("AuthenticationSuccess message unfiltered indices: filtered out by indices filter", logOutput.size(), is(1)); + logOutput.clear(); + threadContext.stashContext(); + + auditTrail.authenticationSuccess("_realm", authentication.getUser(), "_action", + new MockIndicesRequest(threadContext, filteredIndices)); + assertThat("AuthenticationSuccess message filtered indices: not filtered out by indices filter", logOutput.size(), is(0)); + logOutput.clear(); + threadContext.stashContext(); + } + + private List randomListFromLengthBetween(List l, int min, int max) { + assert (min >= 0) && (min <= max) && (max <= l.size()); + final int len = randomIntBetween(min, max); + final List ans = new ArrayList<>(len); + while (ans.size() < len) { + ans.add(randomFrom(l)); + } + return ans; + } + + private static Authentication createAuthentication(User user, String effectiveRealmName) { + if (user.isRunAs()) { + return new Authentication(user, + new RealmRef(UNFILTER_MARKER + randomAlphaOfLength(4), "test", "foo"), new RealmRef(effectiveRealmName, "up", "by")); + } else { + return new Authentication(user, new RealmRef(effectiveRealmName, "test", "foo"), null); + } + } + + private ClusterSettings mockClusterSettings() { + final List> settingsList = new ArrayList<>(); + LoggingAuditTrail.registerSettings(settingsList); + settingsList.addAll(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + return new ClusterSettings(settings, new HashSet<>(settingsList)); + } + + private List randomNonEmptyListOfFilteredNames(String... namePrefix) { + final List filtered = new ArrayList<>(4); + for (int i = 0; i < randomIntBetween(1, 4); i++) { + filtered.add(FILTER_MARKER + Strings.arrayToCommaDelimitedString(namePrefix) + randomAlphaOfLengthBetween(1, 4)); + } + return filtered; + } + + private RestRequest getRestRequest() throws IOException { + final RestContent content = randomFrom(RestContent.values()); + final FakeRestRequest.Builder builder = new Builder(NamedXContentRegistry.EMPTY); + if (content.hasContent()) { + builder.withContent(content.content(), XContentType.JSON); + } + builder.withPath("_uri"); + final byte address[] = InetAddress.getByName(randomBoolean() ? "127.0.0.1" : "::1").getAddress(); + builder.withRemoteAddress(new InetSocketAddress(InetAddress.getByAddress("_hostname", address), 9200)); + builder.withParams(Collections.emptyMap()); + return builder.build(); + } + + private static class MockToken implements AuthenticationToken { + private final String principal; + + MockToken(String principal) { + this.principal = principal; + } + + @Override + public String principal() { + return this.principal; + } + + @Override + public Object credentials() { + fail("it's not allowed to print the credentials of the auth token"); + return null; + } + + @Override + public void clearCredentials() { + + } + } + + static class MockIndicesRequest extends org.elasticsearch.action.MockIndicesRequest { + + MockIndicesRequest(ThreadContext threadContext, String... indices) throws IOException { + super(IndicesOptions.strictExpandOpenAndForbidClosed(), indices); + if (randomBoolean()) { + remoteAddress(buildNewFakeTransportAddress()); + } + if (randomBoolean()) { + RemoteHostHeader.putRestRemoteAddress(threadContext, new InetSocketAddress(forge("localhost", "127.0.0.1"), 1234)); + } + } + + /** creates address without any lookups. hostname can be null, for missing */ + private InetAddress forge(String hostname, String address) throws IOException { + final byte bytes[] = InetAddress.getByName(address).getAddress(); + return InetAddress.getByAddress(hostname, bytes); + } + + @Override + public String toString() { + return "mock-message"; + } + } + + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java new file mode 100644 index 0000000000000..408d3e797e5a3 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -0,0 +1,874 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit.logfile; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.mock.orig.Mockito; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.test.rest.FakeRestRequest.Builder; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.xpack.core.security.audit.logfile.CapturingLogger; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.rest.RemoteHostHeader; +import org.elasticsearch.xpack.security.transport.filter.IPFilter; +import org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule; +import org.junit.Before; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class LoggingAuditTrailTests extends ESTestCase { + + enum RestContent { + VALID() { + @Override + protected boolean hasContent() { + return true; + } + + @Override + protected BytesReference content() { + return new BytesArray("{ \"key\": \"value\" }"); + } + + @Override + protected String expectedMessage() { + return "{ \"key\": \"value\" }"; + } + }, + INVALID() { + @Override + protected boolean hasContent() { + return true; + } + + @Override + protected BytesReference content() { + return new BytesArray("{ \"key\": \"value\" "); + } + + @Override + protected String expectedMessage() { + return "{ \"key\": \"value\" "; + } + }, + EMPTY() { + @Override + protected boolean hasContent() { + return false; + } + + @Override + protected BytesReference content() { + throw new RuntimeException("should never be called"); + } + + @Override + protected String expectedMessage() { + return ""; + } + }; + + protected abstract boolean hasContent(); + protected abstract BytesReference content(); + protected abstract String expectedMessage(); + } + + private String prefix; + private Settings settings; + private DiscoveryNode localNode; + private ClusterService clusterService; + private ThreadContext threadContext; + private boolean includeRequestBody; + + @Before + public void init() throws Exception { + includeRequestBody = randomBoolean(); + settings = Settings.builder() + .put("xpack.security.audit.logfile.prefix.emit_node_host_address", randomBoolean()) + .put("xpack.security.audit.logfile.prefix.emit_node_host_name", randomBoolean()) + .put("xpack.security.audit.logfile.prefix.emit_node_name", randomBoolean()) + .put("xpack.security.audit.logfile.events.emit_request_body", includeRequestBody) + .build(); + localNode = mock(DiscoveryNode.class); + when(localNode.getHostAddress()).thenReturn(buildNewFakeTransportAddress().toString()); + clusterService = mock(ClusterService.class); + when(clusterService.localNode()).thenReturn(localNode); + Mockito.doAnswer((Answer) invocation -> { + final LoggingAuditTrail arg0 = (LoggingAuditTrail) invocation.getArguments()[0]; + arg0.updateLocalNodeInfo(localNode); + return null; + }).when(clusterService).addListener(Mockito.isA(LoggingAuditTrail.class)); + final ClusterSettings clusterSettings = mockClusterSettings(); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + prefix = LoggingAuditTrail.LocalNodeInfo.resolvePrefix(settings, localNode); + threadContext = new ThreadContext(Settings.EMPTY); + } + + public void testAnonymousAccessDeniedTransport() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + auditTrail.anonymousAccessDenied("_action", message); + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, prefix + "[transport] [anonymous_access_denied]\t" + origins + + ", action=[_action], indices=[" + indices(message) + "], request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[transport] [anonymous_access_denied]\t" + origins + + ", action=[_action], request=[MockMessage]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "anonymous_access_denied").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.anonymousAccessDenied("_action", message); + assertEmptyLog(logger); + } + + public void testAnonymousAccessDeniedRest() throws Exception { + final InetAddress address = forge("_hostname", randomBoolean() ? "127.0.0.1" : "::1"); + final Tuple tuple = prepareRestContent("_uri", new InetSocketAddress(address, 9200)); + final String expectedMessage = tuple.v1().expectedMessage(); + final RestRequest request = tuple.v2(); + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.anonymousAccessDenied(request); + if (includeRequestBody) { + assertMsg(logger, Level.INFO, prefix + "[rest] [anonymous_access_denied]\torigin_address=[" + + NetworkAddress.format(address) + "], uri=[_uri], request_body=[" + expectedMessage + "]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[rest] [anonymous_access_denied]\torigin_address=[" + + NetworkAddress.format(address) + "], uri=[_uri]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "anonymous_access_denied").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.anonymousAccessDenied(request); + assertEmptyLog(logger); + } + + public void testAuthenticationFailed() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + auditTrail.authenticationFailed(new MockToken(), "_action", message); + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, prefix + "[transport] [authentication_failed]\t" + origins + + ", principal=[_principal], action=[_action], indices=[" + indices(message) + + "], request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[transport] [authentication_failed]\t" + origins + + ", principal=[_principal], action=[_action], request=[MockMessage]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "authentication_failed").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.authenticationFailed(new MockToken(), "_action", message); + assertEmptyLog(logger); + } + + public void testAuthenticationFailedNoToken() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + auditTrail.authenticationFailed("_action", message); + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, prefix + "[transport] [authentication_failed]\t" + origins + + ", action=[_action], indices=[" + indices(message) + "], request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[transport] [authentication_failed]\t" + origins + + ", action=[_action], request=[MockMessage]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "authentication_failed").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.authenticationFailed("_action", message); + assertEmptyLog(logger); + } + + public void testAuthenticationFailedRest() throws Exception { + final InetAddress address = forge("_hostname", randomBoolean() ? "127.0.0.1" : "::1"); + final Tuple tuple = prepareRestContent("_uri", new InetSocketAddress(address, 9200)); + final String expectedMessage = tuple.v1().expectedMessage(); + final RestRequest request = tuple.v2(); + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.authenticationFailed(new MockToken(), request); + if (includeRequestBody) { + assertMsg(logger, Level.INFO, prefix + "[rest] [authentication_failed]\torigin_address=[" + + NetworkAddress.format(address) + "], principal=[_principal], uri=[_uri], request_body=[" + + expectedMessage + "]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[rest] [authentication_failed]\torigin_address=[" + + NetworkAddress.format(address) + "], principal=[_principal], uri=[_uri]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "authentication_failed").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.authenticationFailed(new MockToken(), request); + assertEmptyLog(logger); + } + + public void testAuthenticationFailedRestNoToken() throws Exception { + final InetAddress address = forge("_hostname", randomBoolean() ? "127.0.0.1" : "::1"); + final Tuple tuple = prepareRestContent("_uri", new InetSocketAddress(address, 9200)); + final String expectedMessage = tuple.v1().expectedMessage(); + final RestRequest request = tuple.v2(); + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.authenticationFailed(request); + if (includeRequestBody) { + assertMsg(logger, Level.INFO, prefix + "[rest] [authentication_failed]\torigin_address=[" + + NetworkAddress.format(address) + "], uri=[_uri], request_body=[" + expectedMessage + "]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[rest] [authentication_failed]\torigin_address=[" + + NetworkAddress.format(address) + "], uri=[_uri]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "authentication_failed").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.authenticationFailed(request); + assertEmptyLog(logger); + } + + public void testAuthenticationFailedRealm() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + auditTrail.authenticationFailed("_realm", new MockToken(), "_action", message); + assertEmptyLog(logger); + + // test enabled + settings = + Settings.builder().put(settings).put("xpack.security.audit.logfile.events.include", "realm_authentication_failed").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + auditTrail.authenticationFailed("_realm", new MockToken(), "_action", message); + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, prefix + "[transport] [realm_authentication_failed]\trealm=[_realm], " + origins + + ", principal=[_principal], action=[_action], indices=[" + indices(message) + "], " + + "request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[transport] [realm_authentication_failed]\trealm=[_realm], " + origins + + ", principal=[_principal], action=[_action], request=[MockMessage]"); + } + } + + public void testAuthenticationFailedRealmRest() throws Exception { + final InetAddress address = forge("_hostname", randomBoolean() ? "127.0.0.1" : "::1"); + final Tuple tuple = prepareRestContent("_uri", new InetSocketAddress(address, 9200)); + final String expectedMessage = tuple.v1().expectedMessage(); + final RestRequest request = tuple.v2(); + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.authenticationFailed("_realm", new MockToken(), request); + assertEmptyLog(logger); + + // test enabled + settings = + Settings.builder().put(settings).put("xpack.security.audit.logfile.events.include", "realm_authentication_failed").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.authenticationFailed("_realm", new MockToken(), request); + if (includeRequestBody) { + assertMsg(logger, Level.INFO, prefix + "[rest] [realm_authentication_failed]\trealm=[_realm], origin_address=[" + + NetworkAddress.format(address) + "], principal=[_principal], uri=[_uri], request_body=[" + + expectedMessage + "]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[rest] [realm_authentication_failed]\trealm=[_realm], origin_address=[" + + NetworkAddress.format(address) + "], principal=[_principal], uri=[_uri]"); + } + } + + public void testAccessGranted() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + final boolean runAs = randomBoolean(); + User user; + if (runAs) { + user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + } else { + user = new User("_username", new String[]{"r1"}); + } + final String role = randomAlphaOfLengthBetween(1, 6); + auditTrail.accessGranted(createAuthentication(user), "_action", message, new String[] { role }); + final String userInfo = (runAs ? "principal=[running as], realm=[lookRealm], run_by_principal=[_username], run_by_realm=[authRealm]" + : "principal=[_username], realm=[authRealm]") + ", roles=[" + role + "]"; + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, prefix + "[transport] [access_granted]\t" + origins + ", " + userInfo + + ", action=[_action], indices=[" + indices(message) + "], request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[transport] [access_granted]\t" + origins + ", " + userInfo + + ", action=[_action], request=[MockMessage]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "access_granted").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.accessGranted(createAuthentication(user), "_action", message, new String[] { role }); + assertEmptyLog(logger); + } + + public void testAccessGrantedInternalSystemAction() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + final String role = randomAlphaOfLengthBetween(1, 6); + auditTrail.accessGranted(createAuthentication(SystemUser.INSTANCE), "internal:_action", message, new String[] { role }); + assertEmptyLog(logger); + + // test enabled + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.include", "system_access_granted").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + auditTrail.accessGranted(createAuthentication(SystemUser.INSTANCE), "internal:_action", message, new String[] { role }); + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, prefix + "[transport] [access_granted]\t" + origins + ", principal=[" + + SystemUser.INSTANCE.principal() + + "], realm=[authRealm], roles=[" + role + "], action=[internal:_action], indices=[" + indices(message) + + "], request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[transport] [access_granted]\t" + origins + ", principal=[" + + SystemUser.INSTANCE.principal() + "], realm=[authRealm], roles=[" + role + + "], action=[internal:_action], request=[MockMessage]"); + } + } + + public void testAccessGrantedInternalSystemActionNonSystemUser() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + final boolean runAs = randomBoolean(); + User user; + if (runAs) { + user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + } else { + user = new User("_username", new String[]{"r1"}); + } + final String role = randomAlphaOfLengthBetween(1, 6); + auditTrail.accessGranted(createAuthentication(user), "internal:_action", message, new String[] { role }); + final String userInfo = (runAs ? "principal=[running as], realm=[lookRealm], run_by_principal=[_username], run_by_realm=[authRealm]" + : "principal=[_username], realm=[authRealm]") + ", roles=[" + role + "]"; + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, prefix + "[transport] [access_granted]\t" + origins + ", " + userInfo + + ", action=[internal:_action], indices=[" + indices(message) + "], request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[transport] [access_granted]\t" + origins + ", " + userInfo + + ", action=[internal:_action], request=[MockMessage]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "access_granted").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.accessGranted(createAuthentication(user), "internal:_action", message, new String[] { role }); + assertEmptyLog(logger); + } + + public void testAccessDenied() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + final boolean runAs = randomBoolean(); + User user; + if (runAs) { + user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + } else { + user = new User("_username", new String[]{"r1"}); + } + final String role = randomAlphaOfLengthBetween(1, 6); + auditTrail.accessDenied(createAuthentication(user), "_action", message, new String[] { role }); + final String userInfo = (runAs ? "principal=[running as], realm=[lookRealm], run_by_principal=[_username], run_by_realm=[authRealm]" + : "principal=[_username], realm=[authRealm]") + ", roles=[" + role + "]"; + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, prefix + "[transport] [access_denied]\t" + origins + ", " + userInfo + + ", action=[_action], indices=[" + indices(message) + "], request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[transport] [access_denied]\t" + origins + ", " + userInfo + + ", action=[_action], request=[MockMessage]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "access_denied").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.accessDenied(createAuthentication(user), "_action", message, new String[] { role }); + assertEmptyLog(logger); + } + + public void testTamperedRequestRest() throws Exception { + final InetAddress address = forge("_hostname", randomBoolean() ? "127.0.0.1" : "::1"); + final Tuple tuple = prepareRestContent("_uri", new InetSocketAddress(address, 9200)); + final String expectedMessage = tuple.v1().expectedMessage(); + final RestRequest request = tuple.v2(); + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.tamperedRequest(request); + if (includeRequestBody) { + assertMsg(logger, Level.INFO, prefix + "[rest] [tampered_request]\torigin_address=[" + + NetworkAddress.format(address) + "], uri=[_uri], request_body=[" + expectedMessage + "]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[rest] [tampered_request]\torigin_address=[" + + NetworkAddress.format(address) + "], uri=[_uri]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "tampered_request").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.tamperedRequest(request); + assertEmptyLog(logger); + } + + public void testTamperedRequest() throws Exception { + final String action = "_action"; + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + auditTrail.tamperedRequest(action, message); + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, prefix + "[transport] [tampered_request]\t" + origins + + ", action=[_action], indices=[" + indices(message) + "], request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[transport] [tampered_request]\t" + origins + + ", action=[_action], request=[MockMessage]"); + } + + // test disabled + + } + + public void testTamperedRequestWithUser() throws Exception { + final String action = "_action"; + final boolean runAs = randomBoolean(); + User user; + if (runAs) { + user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + } else { + user = new User("_username", new String[]{"r1"}); + } + final String userInfo = runAs ? "principal=[running as], run_by_principal=[_username]" : "principal=[_username]"; + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + auditTrail.tamperedRequest(user, action, message); + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, prefix + "[transport] [tampered_request]\t" + origins + ", " + userInfo + + ", action=[_action], indices=[" + indices(message) + "], request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[transport] [tampered_request]\t" + origins + ", " + userInfo + + ", action=[_action], request=[MockMessage]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "tampered_request").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.tamperedRequest(user, action, message); + assertEmptyLog(logger); + } + + public void testConnectionDenied() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final InetAddress inetAddress = InetAddress.getLoopbackAddress(); + final SecurityIpFilterRule rule = new SecurityIpFilterRule(false, "_all"); + auditTrail.connectionDenied(inetAddress, "default", rule); + assertMsg(logger, Level.INFO, String.format(Locale.ROOT, prefix + + "[ip_filter] [connection_denied]\torigin_address=[%s], transport_profile=[%s], rule=[deny %s]", + NetworkAddress.format(inetAddress), "default", "_all")); + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "connection_denied").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.connectionDenied(inetAddress, "default", rule); + assertEmptyLog(logger); + } + + public void testConnectionGranted() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final InetAddress inetAddress = InetAddress.getLoopbackAddress(); + final SecurityIpFilterRule rule = IPFilter.DEFAULT_PROFILE_ACCEPT_ALL; + auditTrail.connectionGranted(inetAddress, "default", rule); + assertEmptyLog(logger); + + // test enabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.include", "connection_granted").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.connectionGranted(inetAddress, "default", rule); + assertMsg(logger, Level.INFO, String.format(Locale.ROOT, prefix + "[ip_filter] [connection_granted]\torigin_address=[%s], " + + "transport_profile=[default], rule=[allow default:accept_all]", NetworkAddress.format(inetAddress))); + } + + public void testRunAsGranted() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + final User user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + final String role = randomAlphaOfLengthBetween(1, 6); + auditTrail.runAsGranted(createAuthentication(user), "_action", message, new String[] { role }); + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, + prefix + "[transport] [run_as_granted]\t" + origins + + ", principal=[_username], realm=[authRealm], run_as_principal=[running as], run_as_realm=[lookRealm], roles=[" + + role + "], action=[_action], indices=[" + indices(message) + "], request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, + prefix + "[transport] [run_as_granted]\t" + origins + + ", principal=[_username], realm=[authRealm], run_as_principal=[running as], run_as_realm=[lookRealm], roles=[" + + role + "], action=[_action], request=[MockMessage]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "run_as_granted").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.runAsGranted(createAuthentication(user), "_action", message, new String[] { role }); + assertEmptyLog(logger); + } + + public void testRunAsDenied() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + final User user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + final String role = randomAlphaOfLengthBetween(1, 6); + auditTrail.runAsDenied(createAuthentication(user), "_action", message, new String[] { role }); + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, + prefix + "[transport] [run_as_denied]\t" + origins + + ", principal=[_username], realm=[authRealm], run_as_principal=[running as], run_as_realm=[lookRealm], roles=[" + + role + "], action=[_action], indices=[" + indices(message) + "], request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, + prefix + "[transport] [run_as_denied]\t" + origins + + ", principal=[_username], realm=[authRealm], run_as_principal=[running as], run_as_realm=[lookRealm], roles=[" + + role + "], action=[_action], request=[MockMessage]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(settings).put("xpack.security.audit.logfile.events.exclude", "run_as_denied").build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.runAsDenied(createAuthentication(user), "_action", message, new String[] { role }); + assertEmptyLog(logger); + } + + public void testOriginAttributes() throws Exception { + final MockMessage message = new MockMessage(threadContext); + final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final String text = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + final InetSocketAddress restAddress = RemoteHostHeader.restRemoteAddress(threadContext); + if (restAddress != null) { + assertThat(text, equalTo("origin_type=[rest], origin_address=[" + + NetworkAddress.format(restAddress.getAddress()) + "]")); + return; + } + final TransportAddress address = message.remoteAddress(); + if (address == null) { + assertThat(text, equalTo("origin_type=[local_node], origin_address=[" + localNode.getHostAddress() + "]")); + return; + } + + assertThat(text, equalTo("origin_type=[transport], origin_address=[" + + NetworkAddress.format(address.address().getAddress()) + "]")); + } + + public void testAuthenticationSuccessRest() throws Exception { + final Map params = new HashMap<>(); + params.put("foo", "bar"); + final InetAddress address = forge("_hostname", randomBoolean() ? "127.0.0.1" : "::1"); + final Tuple tuple = prepareRestContent("_uri", new InetSocketAddress(address, 9200), params); + final String expectedMessage = tuple.v1().expectedMessage(); + final RestRequest request = tuple.v2(); + final boolean runAs = randomBoolean(); + User user; + if (runAs) { + user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + } else { + user = new User("_username", new String[] { "r1" }); + } + final String userInfo = runAs ? "principal=[running as], run_by_principal=[_username]" : "principal=[_username]"; + final String realm = "_realm"; + + Settings settings = Settings.builder().put(this.settings) + .put("xpack.security.audit.logfile.events.include", "authentication_success") + .build(); + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.authenticationSuccess(realm, user, request); + if (includeRequestBody) { + assertMsg(logger, Level.INFO, + prefix + "[rest] [authentication_success]\t" + userInfo + ", realm=[_realm], uri=[_uri], params=[" + params + + "], request_body=[" + expectedMessage + "]"); + } else { + assertMsg(logger, Level.INFO, + prefix + "[rest] [authentication_success]\t" + userInfo + ", realm=[_realm], uri=[_uri], params=[" + params + "]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder().put(this.settings).put("xpack.security.audit.logfile.events.exclude", "authentication_success") + .build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.authenticationSuccess(realm, user, request); + assertEmptyLog(logger); + } + + public void testAuthenticationSuccessTransport() throws Exception { + Settings settings = Settings.builder().put(this.settings) + .put("xpack.security.audit.logfile.events.include", "authentication_success").build(); + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + LoggingAuditTrail auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + final TransportMessage message = randomBoolean() ? new MockMessage(threadContext) : new MockIndicesRequest(threadContext); + final String origins = LoggingAuditTrail.originAttributes(threadContext, message, auditTrail.localNodeInfo); + final boolean runAs = randomBoolean(); + User user; + if (runAs) { + user = new User("running as", new String[]{"r2"}, new User("_username", new String[] {"r1"})); + } else { + user = new User("_username", new String[] { "r1" }); + } + final String userInfo = runAs ? "principal=[running as], run_by_principal=[_username]" : "principal=[_username]"; + final String realm = "_realm"; + auditTrail.authenticationSuccess(realm, user, "_action", message); + if (message instanceof IndicesRequest) { + assertMsg(logger, Level.INFO, prefix + "[transport] [authentication_success]\t" + origins + ", " + userInfo + + ", realm=[_realm], action=[_action], indices=[" + indices(message) + "], request=[MockIndicesRequest]"); + } else { + assertMsg(logger, Level.INFO, prefix + "[transport] [authentication_success]\t" + origins + ", " + userInfo + + ", realm=[_realm], action=[_action], request=[MockMessage]"); + } + + // test disabled + CapturingLogger.output(logger.getName(), Level.INFO).clear(); + settings = Settings.builder() + .put(this.settings) + .put("xpack.security.audit.logfile.events.exclude", "authentication_success") + .build(); + auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext); + auditTrail.authenticationSuccess(realm, user, "_action", message); + assertEmptyLog(logger); + } + + public void testRequestsWithoutIndices() throws Exception { + final Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + final Settings allEventsSettings = Settings.builder() + .put(settings) + .put("xpack.security.audit.logfile.events.include", "_all") + .build(); + final LoggingAuditTrail auditTrail = new LoggingAuditTrail(allEventsSettings, clusterService, logger, threadContext); + final User user = new User("_username", new String[] { "r1" }); + final String role = randomAlphaOfLengthBetween(1, 6); + final String realm = randomAlphaOfLengthBetween(1, 6); + // transport messages without indices + final TransportMessage[] messages = new TransportMessage[] { new MockMessage(threadContext), + new org.elasticsearch.action.MockIndicesRequest(IndicesOptions.strictExpandOpenAndForbidClosed(), new String[0]), + new org.elasticsearch.action.MockIndicesRequest(IndicesOptions.strictExpandOpenAndForbidClosed(), (String[]) null) }; + final List output = CapturingLogger.output(logger.getName(), Level.INFO); + int logEntriesCount = 1; + for (final TransportMessage message : messages) { + auditTrail.anonymousAccessDenied("_action", message); + assertThat(output.size(), is(logEntriesCount++)); + assertThat(output.get(logEntriesCount - 2), not(containsString("indices=["))); + auditTrail.authenticationFailed(new MockToken(), "_action", message); + assertThat(output.size(), is(logEntriesCount++)); + assertThat(output.get(logEntriesCount - 2), not(containsString("indices=["))); + auditTrail.authenticationFailed("_action", message); + assertThat(output.size(), is(logEntriesCount++)); + assertThat(output.get(logEntriesCount - 2), not(containsString("indices=["))); + auditTrail.authenticationFailed(realm, new MockToken(), "_action", message); + assertThat(output.size(), is(logEntriesCount++)); + assertThat(output.get(logEntriesCount - 2), not(containsString("indices=["))); + auditTrail.accessGranted(createAuthentication(user), "_action", message, new String[] { role }); + assertThat(output.size(), is(logEntriesCount++)); + assertThat(output.get(logEntriesCount - 2), not(containsString("indices=["))); + auditTrail.accessDenied(createAuthentication(user), "_action", message, new String[] { role }); + assertThat(output.size(), is(logEntriesCount++)); + assertThat(output.get(logEntriesCount - 2), not(containsString("indices=["))); + auditTrail.tamperedRequest("_action", message); + assertThat(output.size(), is(logEntriesCount++)); + assertThat(output.get(logEntriesCount - 2), not(containsString("indices=["))); + auditTrail.tamperedRequest(user, "_action", message); + assertThat(output.size(), is(logEntriesCount++)); + assertThat(output.get(logEntriesCount - 2), not(containsString("indices=["))); + auditTrail.runAsGranted(createAuthentication(user), "_action", message, new String[] { role }); + assertThat(output.size(), is(logEntriesCount++)); + assertThat(output.get(logEntriesCount - 2), not(containsString("indices=["))); + auditTrail.runAsDenied(createAuthentication(user), "_action", message, new String[] { role }); + assertThat(output.size(), is(logEntriesCount++)); + assertThat(output.get(logEntriesCount - 2), not(containsString("indices=["))); + auditTrail.authenticationSuccess(realm, user, "_action", message); + assertThat(output.size(), is(logEntriesCount++)); + assertThat(output.get(logEntriesCount - 2), not(containsString("indices=["))); + } + } + + private void assertMsg(Logger logger, Level level, String message) { + final List output = CapturingLogger.output(logger.getName(), level); + assertThat(output.size(), is(1)); + assertThat(output.get(0), equalTo(message)); + } + + private void assertEmptyLog(Logger logger) { + assertThat(CapturingLogger.isEmpty(logger.getName()), is(true)); + } + + protected Tuple prepareRestContent(String uri, InetSocketAddress remoteAddress) { + return prepareRestContent(uri, remoteAddress, Collections.emptyMap()); + } + + private Tuple prepareRestContent(String uri, InetSocketAddress remoteAddress, Map params) { + final RestContent content = randomFrom(RestContent.values()); + final FakeRestRequest.Builder builder = new Builder(NamedXContentRegistry.EMPTY); + if (content.hasContent()) { + builder.withContent(content.content(), XContentType.JSON); + } + builder.withPath(uri); + builder.withRemoteAddress(remoteAddress); + builder.withParams(params); + return new Tuple<>(content, builder.build()); + } + + /** creates address without any lookups. hostname can be null, for missing */ + protected static InetAddress forge(String hostname, String address) throws IOException { + final byte bytes[] = InetAddress.getByName(address).getAddress(); + return InetAddress.getByAddress(hostname, bytes); + } + + private static String indices(TransportMessage message) { + return Strings.arrayToCommaDelimitedString(((IndicesRequest) message).indices()); + } + + private static Authentication createAuthentication(User user) { + final RealmRef lookedUpBy = user.authenticatedUser() == user ? null : new RealmRef("lookRealm", "up", "by"); + return new Authentication(user, new RealmRef("authRealm", "test", "foo"), lookedUpBy); + } + + private ClusterSettings mockClusterSettings() { + final List> settingsList = new ArrayList<>(); + LoggingAuditTrail.registerSettings(settingsList); + settingsList.addAll(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + return new ClusterSettings(settings, new HashSet<>(settingsList)); + } + + static class MockMessage extends TransportMessage { + + MockMessage(ThreadContext threadContext) throws IOException { + if (randomBoolean()) { + if (randomBoolean()) { + remoteAddress(buildNewFakeTransportAddress()); + } else { + remoteAddress(new TransportAddress(InetAddress.getLoopbackAddress(), 1234)); + } + } + if (randomBoolean()) { + RemoteHostHeader.putRestRemoteAddress(threadContext, new InetSocketAddress(forge("localhost", "127.0.0.1"), 1234)); + } + } + } + + static class MockIndicesRequest extends org.elasticsearch.action.MockIndicesRequest { + + MockIndicesRequest(ThreadContext threadContext) throws IOException { + super(IndicesOptions.strictExpandOpenAndForbidClosed(), "idx1", "idx2"); + if (randomBoolean()) { + remoteAddress(buildNewFakeTransportAddress()); + } + if (randomBoolean()) { + RemoteHostHeader.putRestRemoteAddress(threadContext, new InetSocketAddress(forge("localhost", "127.0.0.1"), 1234)); + } + } + + @Override + public String toString() { + return "mock-message"; + } + } + + private static class MockToken implements AuthenticationToken { + @Override + public String principal() { + return "_principal"; + } + + @Override + public Object credentials() { + fail("it's not allowed to print the credentials of the auth token"); + return null; + } + + @Override + public void clearCredentials() { + + } + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java new file mode 100644 index 0000000000000..0c75e36fa6c04 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -0,0 +1,1038 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequestBuilder; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetAction; +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetRequestBuilder; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.update.UpdateRequestBuilder; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.threadpool.FixedExecutorBuilder; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.Realm.Factory; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.elasticsearch.xpack.security.authc.AuthenticationService.Authenticator; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.time.Clock; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import static org.elasticsearch.test.SecurityTestsUtils.assertAuthenticationException; +import static org.elasticsearch.xpack.core.security.support.Exceptions.authenticationError; +import static org.elasticsearch.xpack.security.authc.TokenServiceTests.mockGetTokenFromId; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + + +/** + * Unit tests for the {@link AuthenticationService} + */ +public class AuthenticationServiceTests extends ESTestCase { + + private AuthenticationService service; + private TransportMessage message; + private RestRequest restRequest; + private Realms realms; + private Realm firstRealm; + private Realm secondRealm; + private AuditTrailService auditTrail; + private AuthenticationToken token; + private ThreadPool threadPool; + private ThreadContext threadContext; + private TokenService tokenService; + private SecurityLifecycleService lifecycleService; + private Client client; + private InetSocketAddress remoteAddress; + + @Before + @SuppressForbidden(reason = "Allow accessing localhost") + public void init() throws Exception { + token = mock(AuthenticationToken.class); + message = new InternalMessage(); + remoteAddress = new InetSocketAddress(InetAddress.getLocalHost(), 100); + message.remoteAddress(new TransportAddress(remoteAddress)); + restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withRemoteAddress(remoteAddress).build(); + threadContext = new ThreadContext(Settings.EMPTY); + + firstRealm = mock(Realm.class); + when(firstRealm.type()).thenReturn("file"); + when(firstRealm.name()).thenReturn("file_realm"); + secondRealm = mock(Realm.class); + when(secondRealm.type()).thenReturn("second"); + when(secondRealm.name()).thenReturn("second_realm"); + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put("node.name", "authc_test") + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true) + .build(); + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.allowedRealmType()).thenReturn(XPackLicenseState.AllowedRealmType.ALL); + when(licenseState.isAuthAllowed()).thenReturn(true); + when(licenseState.isSecurityEnabled()).thenReturn(true); + realms = new TestRealms(Settings.EMPTY, TestEnvironment.newEnvironment(settings), Collections.emptyMap(), + licenseState, threadContext, mock(ReservedRealm.class), Arrays.asList(firstRealm, secondRealm), + Collections.singletonList(firstRealm)); + + auditTrail = mock(AuditTrailService.class); + client = mock(Client.class); + threadPool = new ThreadPool(settings, + new FixedExecutorBuilder(settings, TokenService.THREAD_POOL_NAME, 1, 1000, "xpack.security.authc.token.thread_pool")); + threadContext = threadPool.getThreadContext(); + when(client.threadPool()).thenReturn(threadPool); + when(client.settings()).thenReturn(settings); + when(client.prepareIndex(any(String.class), any(String.class), any(String.class))) + .thenReturn(new IndexRequestBuilder(client, IndexAction.INSTANCE)); + when(client.prepareUpdate(any(String.class), any(String.class), any(String.class))) + .thenReturn(new UpdateRequestBuilder(client, UpdateAction.INSTANCE)); + doAnswer(invocationOnMock -> { + ActionListener responseActionListener = (ActionListener) invocationOnMock.getArguments()[2]; + responseActionListener.onResponse(new IndexResponse()); + return null; + }).when(client).execute(eq(IndexAction.INSTANCE), any(IndexRequest.class), any(ActionListener.class)); + doAnswer(invocationOnMock -> { + GetRequestBuilder builder = new GetRequestBuilder(client, GetAction.INSTANCE); + builder.setIndex((String) invocationOnMock.getArguments()[0]) + .setType((String) invocationOnMock.getArguments()[1]) + .setId((String) invocationOnMock.getArguments()[2]); + return builder; + }).when(client).prepareGet(anyString(), anyString(), anyString()); + lifecycleService = mock(SecurityLifecycleService.class); + doAnswer(invocationOnMock -> { + Runnable runnable = (Runnable) invocationOnMock.getArguments()[1]; + runnable.run(); + return null; + }).when(lifecycleService).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + tokenService = new TokenService(settings, Clock.systemUTC(), client, lifecycleService, clusterService); + service = new AuthenticationService(settings, realms, auditTrail, + new DefaultAuthenticationFailureHandler(), threadPool, new AnonymousUser(settings), tokenService); + } + + @After + public void shutdownThreadpool() throws InterruptedException { + if (threadPool != null) { + terminate(threadPool); + } + } + + @SuppressWarnings("unchecked") + public void testTokenFirstMissingSecondFound() throws Exception { + when(firstRealm.token(threadContext)).thenReturn(null); + when(secondRealm.token(threadContext)).thenReturn(token); + + PlainActionFuture future = new PlainActionFuture<>(); + Authenticator authenticator = service.createAuthenticator("_action", message, null, future); + authenticator.extractToken((result) -> { + assertThat(result, notNullValue()); + assertThat(result, is(token)); + verifyZeroInteractions(auditTrail); + }); + } + + public void testTokenMissing() throws Exception { + PlainActionFuture future = new PlainActionFuture<>(); + Authenticator authenticator = service.createAuthenticator("_action", message, null, future); + authenticator.extractToken((token) -> { + assertThat(token, nullValue()); + authenticator.handleNullToken(); + }); + + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> future.actionGet()); + assertThat(e.getMessage(), containsString("missing authentication token")); + verify(auditTrail).anonymousAccessDenied("_action", message); + verifyNoMoreInteractions(auditTrail); + } + + @SuppressWarnings("unchecked") + public void testAuthenticateBothSupportSecondSucceeds() throws Exception { + User user = new User("_username", "r1"); + when(firstRealm.supports(token)).thenReturn(true); + mockAuthenticate(firstRealm, token, null); + when(secondRealm.supports(token)).thenReturn(true); + mockAuthenticate(secondRealm, token, user); + if (randomBoolean()) { + when(firstRealm.token(threadContext)).thenReturn(token); + } else { + when(secondRealm.token(threadContext)).thenReturn(token); + } + + final AtomicBoolean completed = new AtomicBoolean(false); + service.authenticate("_action", message, (User)null, ActionListener.wrap(result -> { + assertThat(result, notNullValue()); + assertThat(result.getUser(), is(user)); + assertThat(result.getLookedUpBy(), is(nullValue())); + assertThat(result.getAuthenticatedBy(), is(notNullValue())); // TODO implement equals + assertThreadContextContainsAuthentication(result); + setCompletedToTrue(completed); + }, this::logAndFail)); + assertTrue(completed.get()); + verify(auditTrail).authenticationFailed(firstRealm.name(), token, "_action", message); + } + + public void testAuthenticateFirstNotSupportingSecondSucceeds() throws Exception { + User user = new User("_username", "r1"); + when(firstRealm.supports(token)).thenReturn(false); + when(secondRealm.supports(token)).thenReturn(true); + mockAuthenticate(secondRealm, token, user); + when(secondRealm.token(threadContext)).thenReturn(token); + + final AtomicBoolean completed = new AtomicBoolean(false); + service.authenticate("_action", message, (User)null, ActionListener.wrap(result -> { + assertThat(result, notNullValue()); + assertThat(result.getUser(), is(user)); + assertThreadContextContainsAuthentication(result); + setCompletedToTrue(completed); + }, this::logAndFail)); + verify(auditTrail).authenticationSuccess(secondRealm.name(), user, "_action", message); + verifyNoMoreInteractions(auditTrail); + verify(firstRealm, never()).authenticate(eq(token), any(ActionListener.class)); + assertTrue(completed.get()); + } + + public void testAuthenticateCached() throws Exception { + final Authentication authentication = new Authentication(new User("_username", "r1"), new RealmRef("test", "cached", "foo"), null); + authentication.writeToContext(threadContext); + + Authentication result = authenticateBlocking("_action", message, null); + + assertThat(result, notNullValue()); + assertThat(result, is(authentication)); + verifyZeroInteractions(auditTrail); + verifyZeroInteractions(firstRealm); + verifyZeroInteractions(secondRealm); + } + + public void testAuthenticateNonExistentRestRequestUserThrowsAuthenticationException() throws Exception { + when(firstRealm.token(threadContext)).thenReturn(new UsernamePasswordToken("idonotexist", + new SecureString("passwd".toCharArray()))); + try { + authenticateBlocking(restRequest); + fail("Authentication was successful but should not"); + } catch (ElasticsearchSecurityException e) { + assertAuthenticationException(e, containsString("unable to authenticate user [idonotexist] for REST request [/]")); + } + } + + public void testTokenRestMissing() throws Exception { + when(firstRealm.token(threadContext)).thenReturn(null); + when(secondRealm.token(threadContext)).thenReturn(null); + + Authenticator authenticator = service.createAuthenticator(restRequest, mock(ActionListener.class)); + authenticator.extractToken((token) -> { + assertThat(token, nullValue()); + }); + } + + public void authenticationInContextAndHeader() throws Exception { + User user = new User("_username", "r1"); + when(firstRealm.token(threadContext)).thenReturn(token); + when(firstRealm.supports(token)).thenReturn(true); + mockAuthenticate(firstRealm, token, user); + + Authentication result = authenticateBlocking("_action", message, null); + + assertThat(result, notNullValue()); + assertThat(result.getUser(), is(user)); + + String userStr = threadContext.getHeader(AuthenticationField.AUTHENTICATION_KEY); + assertThat(userStr, notNullValue()); + assertThat(userStr, equalTo("_signed_auth")); + + Authentication ctxAuth = threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY); + assertThat(ctxAuth, is(result)); + } + + public void testAuthenticateTransportAnonymous() throws Exception { + when(firstRealm.token(threadContext)).thenReturn(null); + when(secondRealm.token(threadContext)).thenReturn(null); + try { + authenticateBlocking("_action", message, null); + fail("expected an authentication exception when trying to authenticate an anonymous message"); + } catch (ElasticsearchSecurityException e) { + // expected + assertAuthenticationException(e); + } + verify(auditTrail).anonymousAccessDenied("_action", message); + } + + public void testAuthenticateRestAnonymous() throws Exception { + when(firstRealm.token(threadContext)).thenReturn(null); + when(secondRealm.token(threadContext)).thenReturn(null); + try { + authenticateBlocking(restRequest); + fail("expected an authentication exception when trying to authenticate an anonymous message"); + } catch (ElasticsearchSecurityException e) { + // expected + assertAuthenticationException(e); + } + verify(auditTrail).anonymousAccessDenied(restRequest); + } + + public void testAuthenticateTransportFallback() throws Exception { + when(firstRealm.token(threadContext)).thenReturn(null); + when(secondRealm.token(threadContext)).thenReturn(null); + User user1 = new User("username", "r1", "r2"); + + Authentication result = authenticateBlocking("_action", message, user1); + assertThat(result, notNullValue()); + assertThat(result.getUser(), sameInstance(user1)); + assertThreadContextContainsAuthentication(result); + } + + public void testAuthenticateTransportDisabledUser() throws Exception { + User user = new User("username", new String[] { "r1", "r2" }, null, null, null, false); + User fallback = randomBoolean() ? SystemUser.INSTANCE : null; + when(firstRealm.token(threadContext)).thenReturn(token); + when(firstRealm.supports(token)).thenReturn(true); + mockAuthenticate(firstRealm, token, user); + + ElasticsearchSecurityException e = + expectThrows(ElasticsearchSecurityException.class, () -> authenticateBlocking("_action", message, fallback)); + verify(auditTrail).authenticationFailed(token, "_action", message); + verifyNoMoreInteractions(auditTrail); + assertAuthenticationException(e); + } + + public void testAuthenticateRestDisabledUser() throws Exception { + User user = new User("username", new String[] { "r1", "r2" }, null, null, null, false); + when(firstRealm.token(threadContext)).thenReturn(token); + when(firstRealm.supports(token)).thenReturn(true); + mockAuthenticate(firstRealm, token, user); + + ElasticsearchSecurityException e = + expectThrows(ElasticsearchSecurityException.class, () -> authenticateBlocking(restRequest)); + verify(auditTrail).authenticationFailed(token, restRequest); + verifyNoMoreInteractions(auditTrail); + assertAuthenticationException(e); + } + + public void testAuthenticateTransportSuccess() throws Exception { + User user = new User("username", "r1", "r2"); + User fallback = randomBoolean() ? SystemUser.INSTANCE : null; + when(firstRealm.token(threadContext)).thenReturn(token); + when(firstRealm.supports(token)).thenReturn(true); + mockAuthenticate(firstRealm, token, user); + + final AtomicBoolean completed = new AtomicBoolean(false); + service.authenticate("_action", message, fallback, ActionListener.wrap(result -> { + assertThat(result, notNullValue()); + assertThat(result.getUser(), sameInstance(user)); + assertThreadContextContainsAuthentication(result); + setCompletedToTrue(completed); + }, this::logAndFail)); + + verify(auditTrail).authenticationSuccess(firstRealm.name(), user, "_action", message); + verifyNoMoreInteractions(auditTrail); + assertTrue(completed.get()); + } + + public void testAuthenticateRestSuccess() throws Exception { + User user1 = new User("username", "r1", "r2"); + when(firstRealm.token(threadContext)).thenReturn(token); + when(firstRealm.supports(token)).thenReturn(true); + mockAuthenticate(firstRealm, token, user1); + // this call does not actually go async + final AtomicBoolean completed = new AtomicBoolean(false); + service.authenticate(restRequest, ActionListener.wrap(authentication -> { + assertThat(authentication, notNullValue()); + assertThat(authentication.getUser(), sameInstance(user1)); + assertThreadContextContainsAuthentication(authentication); + setCompletedToTrue(completed); + }, this::logAndFail)); + verify(auditTrail).authenticationSuccess(firstRealm.name(), user1, restRequest); + verifyNoMoreInteractions(auditTrail); + assertTrue(completed.get()); + } + + public void testAutheticateTransportContextAndHeader() throws Exception { + User user1 = new User("username", "r1", "r2"); + when(firstRealm.token(threadContext)).thenReturn(token); + when(firstRealm.supports(token)).thenReturn(true); + mockAuthenticate(firstRealm, token, user1); + final AtomicBoolean completed = new AtomicBoolean(false); + final SetOnce authRef = new SetOnce<>(); + final SetOnce authHeaderRef = new SetOnce<>(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + service.authenticate("_action", message, SystemUser.INSTANCE, ActionListener.wrap(authentication -> { + + assertThat(authentication, notNullValue()); + assertThat(authentication.getUser(), sameInstance(user1)); + assertThreadContextContainsAuthentication(authentication); + authRef.set(authentication); + authHeaderRef.set(threadContext.getHeader(AuthenticationField.AUTHENTICATION_KEY)); + setCompletedToTrue(completed); + }, this::logAndFail)); + } + assertTrue(completed.compareAndSet(true, false)); + reset(firstRealm); + + // checking authentication from the context + InternalMessage message1 = new InternalMessage(); + ThreadPool threadPool1 = new TestThreadPool("testAutheticateTransportContextAndHeader1"); + try { + ThreadContext threadContext1 = threadPool1.getThreadContext(); + service = new AuthenticationService(Settings.EMPTY, realms, auditTrail, + new DefaultAuthenticationFailureHandler(), threadPool1, new AnonymousUser(Settings.EMPTY), tokenService); + + + threadContext1.putTransient(AuthenticationField.AUTHENTICATION_KEY, authRef.get()); + threadContext1.putHeader(AuthenticationField.AUTHENTICATION_KEY, authHeaderRef.get()); + service.authenticate("_action", message1, SystemUser.INSTANCE, ActionListener.wrap(ctxAuth -> { + assertThat(ctxAuth, sameInstance(authRef.get())); + assertThat(threadContext1.getHeader(AuthenticationField.AUTHENTICATION_KEY), sameInstance(authHeaderRef.get())); + setCompletedToTrue(completed); + }, this::logAndFail)); + assertTrue(completed.compareAndSet(true, false)); + verifyZeroInteractions(firstRealm); + reset(firstRealm); + } finally { + terminate(threadPool1); + } + + // checking authentication from the user header + ThreadPool threadPool2 = new TestThreadPool("testAutheticateTransportContextAndHeader2"); + try { + ThreadContext threadContext2 = threadPool2.getThreadContext(); + final String header; + try (ThreadContext.StoredContext ignore = threadContext2.stashContext()) { + service = new AuthenticationService(Settings.EMPTY, realms, auditTrail, + new DefaultAuthenticationFailureHandler(), threadPool2, new AnonymousUser(Settings.EMPTY), tokenService); + threadContext2.putHeader(AuthenticationField.AUTHENTICATION_KEY, authHeaderRef.get()); + + BytesStreamOutput output = new BytesStreamOutput(); + threadContext2.writeTo(output); + StreamInput input = output.bytes().streamInput(); + threadContext2 = new ThreadContext(Settings.EMPTY); + threadContext2.readHeaders(input); + header = threadContext2.getHeader(AuthenticationField.AUTHENTICATION_KEY); + } + + threadPool2.getThreadContext().putHeader(AuthenticationField.AUTHENTICATION_KEY, header); + service = new AuthenticationService(Settings.EMPTY, realms, auditTrail, + new DefaultAuthenticationFailureHandler(), threadPool2, new AnonymousUser(Settings.EMPTY), tokenService); + service.authenticate("_action", new InternalMessage(), SystemUser.INSTANCE, ActionListener.wrap(result -> { + assertThat(result, notNullValue()); + assertThat(result.getUser(), equalTo(user1)); + setCompletedToTrue(completed); + }, this::logAndFail)); + assertTrue(completed.get()); + verifyZeroInteractions(firstRealm); + } finally { + terminate(threadPool2); + } + } + + public void testAuthenticateTamperedUser() throws Exception { + InternalMessage message = new InternalMessage(); + threadContext.putHeader(AuthenticationField.AUTHENTICATION_KEY, "_signed_auth"); + + try { + authenticateBlocking("_action", message, randomBoolean() ? SystemUser.INSTANCE : null); + } catch (Exception e) { + //expected + verify(auditTrail).tamperedRequest("_action", message); + verifyNoMoreInteractions(auditTrail); + } + } + + public void testAnonymousUserRest() throws Exception { + String username = randomBoolean() ? AnonymousUser.DEFAULT_ANONYMOUS_USERNAME : "user1"; + Settings.Builder builder = Settings.builder() + .putList(AnonymousUser.ROLES_SETTING.getKey(), "r1", "r2", "r3"); + if (username.equals(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME) == false) { + builder.put(AnonymousUser.USERNAME_SETTING.getKey(), username); + } + Settings settings = builder.build(); + final AnonymousUser anonymousUser = new AnonymousUser(settings); + service = new AuthenticationService(settings, realms, auditTrail, new DefaultAuthenticationFailureHandler(), + threadPool, anonymousUser, tokenService); + RestRequest request = new FakeRestRequest(); + + Authentication result = authenticateBlocking(request); + + assertThat(result, notNullValue()); + assertThat(result.getUser(), sameInstance((Object) anonymousUser)); + assertThreadContextContainsAuthentication(result); + verify(auditTrail).authenticationSuccess("__anonymous", new AnonymousUser(settings), request); + verifyNoMoreInteractions(auditTrail); + } + + public void testAnonymousUserTransportNoDefaultUser() throws Exception { + Settings settings = Settings.builder() + .putList(AnonymousUser.ROLES_SETTING.getKey(), "r1", "r2", "r3") + .build(); + final AnonymousUser anonymousUser = new AnonymousUser(settings); + service = new AuthenticationService(settings, realms, auditTrail, + new DefaultAuthenticationFailureHandler(), threadPool, anonymousUser, tokenService); + InternalMessage message = new InternalMessage(); + + Authentication result = authenticateBlocking("_action", message, null); + assertThat(result, notNullValue()); + assertThat(result.getUser(), sameInstance(anonymousUser)); + assertThreadContextContainsAuthentication(result); + } + + public void testAnonymousUserTransportWithDefaultUser() throws Exception { + Settings settings = Settings.builder() + .putList(AnonymousUser.ROLES_SETTING.getKey(), "r1", "r2", "r3") + .build(); + final AnonymousUser anonymousUser = new AnonymousUser(settings); + service = new AuthenticationService(settings, realms, auditTrail, + new DefaultAuthenticationFailureHandler(), threadPool, anonymousUser, tokenService); + + InternalMessage message = new InternalMessage(); + + Authentication result = authenticateBlocking("_action", message, SystemUser.INSTANCE); + assertThat(result, notNullValue()); + assertThat(result.getUser(), sameInstance(SystemUser.INSTANCE)); + assertThreadContextContainsAuthentication(result); + } + + public void testRealmTokenThrowingException() throws Exception { + when(firstRealm.token(threadContext)).thenThrow(authenticationError("realm doesn't like tokens")); + try { + authenticateBlocking("_action", message, null); + fail("exception should bubble out"); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), is("realm doesn't like tokens")); + verify(auditTrail).authenticationFailed("_action", message); + } + } + + public void testRealmTokenThrowingExceptionRest() throws Exception { + when(firstRealm.token(threadContext)).thenThrow(authenticationError("realm doesn't like tokens")); + try { + authenticateBlocking(restRequest); + fail("exception should bubble out"); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), is("realm doesn't like tokens")); + verify(auditTrail).authenticationFailed(restRequest); + } + } + + public void testRealmSupportsMethodThrowingException() throws Exception { + AuthenticationToken token = mock(AuthenticationToken.class); + when(secondRealm.token(threadContext)).thenReturn(token); + when(secondRealm.supports(token)).thenThrow(authenticationError("realm doesn't like supports")); + try { + authenticateBlocking("_action", message, null); + fail("exception should bubble out"); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), is("realm doesn't like supports")); + verify(auditTrail).authenticationFailed(token, "_action", message); + } + } + + public void testRealmSupportsMethodThrowingExceptionRest() throws Exception { + AuthenticationToken token = mock(AuthenticationToken.class); + when(secondRealm.token(threadContext)).thenReturn(token); + when(secondRealm.supports(token)).thenThrow(authenticationError("realm doesn't like supports")); + try { + authenticateBlocking(restRequest); + fail("exception should bubble out"); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), is("realm doesn't like supports")); + verify(auditTrail).authenticationFailed(token, restRequest); + } + } + + public void testRealmAuthenticateThrowingException() throws Exception { + AuthenticationToken token = mock(AuthenticationToken.class); + when(secondRealm.token(threadContext)).thenReturn(token); + when(secondRealm.supports(token)).thenReturn(true); + doThrow(authenticationError("realm doesn't like authenticate")) + .when(secondRealm).authenticate(eq(token), any(ActionListener.class)); + try { + authenticateBlocking("_action", message, null); + fail("exception should bubble out"); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), is("realm doesn't like authenticate")); + verify(auditTrail).authenticationFailed(token, "_action", message); + } + } + + public void testRealmAuthenticateThrowingExceptionRest() throws Exception { + AuthenticationToken token = mock(AuthenticationToken.class); + when(secondRealm.token(threadContext)).thenReturn(token); + when(secondRealm.supports(token)).thenReturn(true); + doThrow(authenticationError("realm doesn't like authenticate")) + .when(secondRealm).authenticate(eq(token), any(ActionListener.class)); + try { + authenticateBlocking(restRequest); + fail("exception should bubble out"); + } catch (ElasticsearchSecurityException e) { + assertThat(e.getMessage(), is("realm doesn't like authenticate")); + verify(auditTrail).authenticationFailed(token, restRequest); + } + } + + public void testRealmLookupThrowingException() throws Exception { + AuthenticationToken token = mock(AuthenticationToken.class); + threadContext.putHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, "run_as"); + when(secondRealm.token(threadContext)).thenReturn(token); + when(secondRealm.supports(token)).thenReturn(true); + mockAuthenticate(secondRealm, token, new User("lookup user", new String[]{"user"})); + mockRealmLookupReturnsNull(firstRealm, "run_as"); + doThrow(authenticationError("realm doesn't want to lookup")) + .when(secondRealm).lookupUser(eq("run_as"), any(ActionListener.class)); + + try { + authenticateBlocking("_action", message, null); + fail("exception should bubble out"); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), is("realm doesn't want to lookup")); + verify(auditTrail).authenticationFailed(token, "_action", message); + } + } + + public void testRealmLookupThrowingExceptionRest() throws Exception { + AuthenticationToken token = mock(AuthenticationToken.class); + threadContext.putHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, "run_as"); + when(secondRealm.token(threadContext)).thenReturn(token); + when(secondRealm.supports(token)).thenReturn(true); + mockAuthenticate(secondRealm, token, new User("lookup user", new String[]{"user"})); + mockRealmLookupReturnsNull(firstRealm, "run_as"); + doThrow(authenticationError("realm doesn't want to " + "lookup")) + .when(secondRealm).lookupUser(eq("run_as"), any(ActionListener.class)); + + try { + authenticateBlocking(restRequest); + fail("exception should bubble out"); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), is("realm doesn't want to lookup")); + verify(auditTrail).authenticationFailed(token, restRequest); + } + } + + public void testRunAsLookupSameRealm() throws Exception { + AuthenticationToken token = mock(AuthenticationToken.class); + threadContext.putHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, "run_as"); + when(secondRealm.token(threadContext)).thenReturn(token); + when(secondRealm.supports(token)).thenReturn(true); + final User user = new User("lookup user", new String[]{"user"}, "lookup user", "lookup@foo.foo", + Collections.singletonMap("foo", "bar"), true); + mockAuthenticate(secondRealm, token, user); + mockRealmLookupReturnsNull(firstRealm, "run_as"); + doAnswer((i) -> { + ActionListener listener = (ActionListener) i.getArguments()[1]; + listener.onResponse(new User("looked up user", new String[]{"some role"})); + return null; + }).when(secondRealm).lookupUser(eq("run_as"), any(ActionListener.class)); + + final AtomicBoolean completed = new AtomicBoolean(false); + ActionListener listener = ActionListener.wrap(result -> { + assertThat(result, notNullValue()); + User authenticated = result.getUser(); + + assertThat(authenticated.principal(), is("looked up user")); + assertThat(authenticated.roles(), arrayContaining("some role")); + assertThreadContextContainsAuthentication(result); + + assertThat(SystemUser.is(authenticated), is(false)); + assertThat(authenticated.isRunAs(), is(true)); + User authUser = authenticated.authenticatedUser(); + assertThat(authUser.principal(), is("lookup user")); + assertThat(authUser.roles(), arrayContaining("user")); + assertEquals(user.metadata(), authUser.metadata()); + assertEquals(user.email(), authUser.email()); + assertEquals(user.enabled(), authUser.enabled()); + assertEquals(user.fullName(), authUser.fullName()); + + + setCompletedToTrue(completed); + }, this::logAndFail); + + // we do not actually go async + if (randomBoolean()) { + service.authenticate("_action", message, (User)null, listener); + } else { + service.authenticate(restRequest, listener); + } + assertTrue(completed.get()); + } + + public void testRunAsLookupDifferentRealm() throws Exception { + AuthenticationToken token = mock(AuthenticationToken.class); + threadContext.putHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, "run_as"); + when(secondRealm.token(threadContext)).thenReturn(token); + when(secondRealm.supports(token)).thenReturn(true); + mockAuthenticate(secondRealm, token, new User("lookup user", new String[]{"user"})); + doAnswer((i) -> { + ActionListener listener = (ActionListener) i.getArguments()[1]; + listener.onResponse(new User("looked up user", new String[]{"some role"})); + return null; + }).when(firstRealm).lookupUser(eq("run_as"), any(ActionListener.class)); + + final AtomicBoolean completed = new AtomicBoolean(false); + ActionListener listener = ActionListener.wrap(result -> { + assertThat(result, notNullValue()); + User authenticated = result.getUser(); + + assertThat(SystemUser.is(authenticated), is(false)); + assertThat(authenticated.isRunAs(), is(true)); + assertThat(authenticated.authenticatedUser().principal(), is("lookup user")); + assertThat(authenticated.authenticatedUser().roles(), arrayContaining("user")); + assertThat(authenticated.principal(), is("looked up user")); + assertThat(authenticated.roles(), arrayContaining("some role")); + assertThreadContextContainsAuthentication(result); + setCompletedToTrue(completed); + }, this::logAndFail); + + // call service asynchronously but it doesn't actually go async + if (randomBoolean()) { + service.authenticate("_action", message, (User)null, listener); + } else { + service.authenticate(restRequest, listener); + } + assertTrue(completed.get()); + } + + public void testRunAsWithEmptyRunAsUsernameRest() throws Exception { + AuthenticationToken token = mock(AuthenticationToken.class); + User user = new User("lookup user", new String[]{"user"}); + threadContext.putHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, ""); + when(secondRealm.token(threadContext)).thenReturn(token); + when(secondRealm.supports(token)).thenReturn(true); + mockAuthenticate(secondRealm, token, user); + + try { + authenticateBlocking(restRequest); + fail("exception should be thrown"); + } catch (ElasticsearchException e) { + verify(auditTrail).runAsDenied(any(Authentication.class), eq(restRequest), eq(Role.EMPTY.names())); + verifyNoMoreInteractions(auditTrail); + } + } + + public void testRunAsWithEmptyRunAsUsername() throws Exception { + AuthenticationToken token = mock(AuthenticationToken.class); + User user = new User("lookup user", new String[]{"user"}); + threadContext.putHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, ""); + when(secondRealm.token(threadContext)).thenReturn(token); + when(secondRealm.supports(token)).thenReturn(true); + mockAuthenticate(secondRealm, token, user); + + try { + authenticateBlocking("_action", message, null); + fail("exception should be thrown"); + } catch (ElasticsearchException e) { + verify(auditTrail).runAsDenied(any(Authentication.class), eq("_action"), eq(message), eq(Role.EMPTY.names())); + verifyNoMoreInteractions(auditTrail); + } + } + + public void testAuthenticateTransportDisabledRunAsUser() throws Exception { + AuthenticationToken token = mock(AuthenticationToken.class); + threadContext.putHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, "run_as"); + when(secondRealm.token(threadContext)).thenReturn(token); + when(secondRealm.supports(token)).thenReturn(true); + mockAuthenticate(secondRealm, token, new User("lookup user", new String[]{"user"})); + mockRealmLookupReturnsNull(firstRealm, "run_as"); + doAnswer((i) -> { + ActionListener listener = (ActionListener) i.getArguments()[1]; + listener.onResponse(new User("looked up user", new String[]{"some role"}, null, null, null, false)); + return null; + }).when(secondRealm).lookupUser(eq("run_as"), any(ActionListener.class)); + User fallback = randomBoolean() ? SystemUser.INSTANCE : null; + ElasticsearchSecurityException e = + expectThrows(ElasticsearchSecurityException.class, () -> authenticateBlocking("_action", message, fallback)); + verify(auditTrail).authenticationFailed(token, "_action", message); + verifyNoMoreInteractions(auditTrail); + assertAuthenticationException(e); + } + + public void testAuthenticateRestDisabledRunAsUser() throws Exception { + AuthenticationToken token = mock(AuthenticationToken.class); + threadContext.putHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, "run_as"); + when(secondRealm.token(threadContext)).thenReturn(token); + when(secondRealm.supports(token)).thenReturn(true); + mockAuthenticate(secondRealm, token, new User("lookup user", new String[]{"user"})); + mockRealmLookupReturnsNull(firstRealm, "run_as"); + doAnswer((i) -> { + ActionListener listener = (ActionListener) i.getArguments()[1]; + listener.onResponse(new User("looked up user", new String[]{"some role"}, null, null, null, false)); + return null; + }).when(secondRealm).lookupUser(eq("run_as"), any(ActionListener.class)); + + ElasticsearchSecurityException e = + expectThrows(ElasticsearchSecurityException.class, () -> authenticateBlocking(restRequest)); + verify(auditTrail).authenticationFailed(token, restRequest); + verifyNoMoreInteractions(auditTrail); + assertAuthenticationException(e); + } + + public void testAuthenticateWithToken() throws Exception { + User user = new User("_username", "r1"); + final AtomicBoolean completed = new AtomicBoolean(false); + final Authentication expected = new Authentication(user, new RealmRef("realm", "custom", "node"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { + Authentication originatingAuth = new Authentication(new User("creator"), new RealmRef("test", "test", "test"), null); + tokenService.createUserToken(expected, originatingAuth, tokenFuture, Collections.emptyMap()); + } + String token = tokenService.getUserTokenString(tokenFuture.get().v1()); + mockGetTokenFromId(tokenFuture.get().v1(), client); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + threadContext.putHeader("Authorization", "Bearer " + token); + service.authenticate("_action", message, (User)null, ActionListener.wrap(result -> { + assertThat(result, notNullValue()); + assertThat(result.getUser(), is(user)); + assertThat(result.getLookedUpBy(), is(nullValue())); + assertThat(result.getAuthenticatedBy(), is(notNullValue())); + assertEquals(expected, result); + setCompletedToTrue(completed); + }, this::logAndFail)); + } + assertTrue(completed.get()); + verify(auditTrail).authenticationSuccess("realm", user, "_action", message); + verifyNoMoreInteractions(auditTrail); + } + + public void testInvalidToken() throws Exception { + final User user = new User("_username", "r1"); + when(firstRealm.token(threadContext)).thenReturn(token); + when(firstRealm.supports(token)).thenReturn(true); + mockAuthenticate(firstRealm, token, user); + final int numBytes = randomIntBetween(TokenService.MINIMUM_BYTES, TokenService.MINIMUM_BYTES + 32); + final byte[] randomBytes = new byte[numBytes]; + random().nextBytes(randomBytes); + final CountDownLatch latch = new CountDownLatch(1); + final Authentication expected = new Authentication(user, new RealmRef(firstRealm.name(), firstRealm.type(), "authc_test"), null); + AtomicBoolean success = new AtomicBoolean(false); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + threadContext.putHeader("Authorization", "Bearer " + Base64.getEncoder().encodeToString(randomBytes)); + service.authenticate("_action", message, (User)null, ActionListener.wrap(result -> { + assertThat(result, notNullValue()); + assertThat(result.getUser(), is(user)); + assertThat(result.getLookedUpBy(), is(nullValue())); + assertThat(result.getAuthenticatedBy(), is(notNullValue())); + assertThreadContextContainsAuthentication(result); + assertEquals(expected, result); + success.set(true); + latch.countDown(); + }, e -> { + if (e instanceof IllegalStateException) { + assertThat(e.getMessage(), containsString("array length must be <= to " + ArrayUtil.MAX_ARRAY_LENGTH + " but was: ")); + latch.countDown(); + } else if (e instanceof NegativeArraySizeException) { + assertThat(e.getMessage(), containsString("array size must be positive but was: ")); + latch.countDown(); + } else { + logger.error("unexpected exception", e); + latch.countDown(); + fail("unexpected exception: " + e.getMessage()); + } + })); + } catch (IllegalStateException ex) { + assertThat(ex.getMessage(), containsString("array length must be <= to " + ArrayUtil.MAX_ARRAY_LENGTH + " but was: ")); + latch.countDown(); + } catch (NegativeArraySizeException ex) { + assertThat(ex.getMessage(), containsString("array size must be positive but was: ")); + latch.countDown(); + } + + // we need to use a latch here because the key computation goes async on another thread! + latch.await(); + if (success.get()) { + verify(auditTrail).authenticationSuccess(firstRealm.name(), user, "_action", message); + } + verifyNoMoreInteractions(auditTrail); + } + + public void testExpiredToken() throws Exception { + when(lifecycleService.isSecurityIndexAvailable()).thenReturn(true); + when(lifecycleService.isSecurityIndexExisting()).thenReturn(true); + User user = new User("_username", "r1"); + final Authentication expected = new Authentication(user, new RealmRef("realm", "custom", "node"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { + Authentication originatingAuth = new Authentication(new User("creator"), new RealmRef("test", "test", "test"), null); + tokenService.createUserToken(expected, originatingAuth, tokenFuture, Collections.emptyMap()); + } + String token = tokenService.getUserTokenString(tokenFuture.get().v1()); + mockGetTokenFromId(tokenFuture.get().v1(), client); + when(client.prepareMultiGet()).thenReturn(new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE)); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + MultiGetResponse response = mock(MultiGetResponse.class); + MultiGetItemResponse[] responses = new MultiGetItemResponse[2]; + when(response.getResponses()).thenReturn(responses); + + final boolean newExpired = randomBoolean(); + GetResponse oldGetResponse = mock(GetResponse.class); + when(oldGetResponse.isExists()).thenReturn(newExpired == false); + responses[0] = new MultiGetItemResponse(oldGetResponse, null); + + GetResponse getResponse = mock(GetResponse.class); + responses[1] = new MultiGetItemResponse(getResponse, null); + when(getResponse.isExists()).thenReturn(newExpired); + if (newExpired) { + Map source = MapBuilder.newMapBuilder() + .put("access_token", Collections.singletonMap("invalidated", true)) + .immutableMap(); + when(getResponse.getSource()).thenReturn(source); + } + listener.onResponse(response); + return Void.TYPE; + }).when(client).multiGet(any(MultiGetRequest.class), any(ActionListener.class)); + + doAnswer(invocationOnMock -> { + ((Runnable) invocationOnMock.getArguments()[1]).run(); + return null; + }).when(lifecycleService).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); + + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + threadContext.putHeader("Authorization", "Bearer " + token); + ElasticsearchSecurityException e = + expectThrows(ElasticsearchSecurityException.class, () -> authenticateBlocking("_action", message, null)); + assertEquals(RestStatus.UNAUTHORIZED, e.status()); + assertEquals("token expired", e.getMessage()); + } + } + + private static class InternalMessage extends TransportMessage { + } + + void assertThreadContextContainsAuthentication(Authentication authentication) throws IOException { + Authentication contextAuth = threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY); + assertThat(contextAuth, notNullValue()); + assertThat(contextAuth, is(authentication)); + assertThat(threadContext.getHeader(AuthenticationField.AUTHENTICATION_KEY), equalTo((Object) authentication.encode())); + } + + private void mockAuthenticate(Realm realm, AuthenticationToken token, User user) { + doAnswer((i) -> { + ActionListener listener = (ActionListener) i.getArguments()[1]; + if (user == null) { + listener.onResponse(AuthenticationResult.notHandled()); + } else { + listener.onResponse(AuthenticationResult.success(user)); + } + return null; + }).when(realm).authenticate(eq(token), any(ActionListener.class)); + } + + private Authentication authenticateBlocking(RestRequest restRequest) { + PlainActionFuture future = new PlainActionFuture<>(); + service.authenticate(restRequest, future); + return future.actionGet(); + } + + private Authentication authenticateBlocking(String action, TransportMessage message, User fallbackUser) { + PlainActionFuture future = new PlainActionFuture<>(); + service.authenticate(action, message, fallbackUser, future); + return future.actionGet(); + } + + private static void mockRealmLookupReturnsNull(Realm realm, String username) { + doAnswer((i) -> { + ActionListener listener = (ActionListener) i.getArguments()[1]; + listener.onResponse(null); + return null; + }).when(realm).lookupUser(eq(username), any(ActionListener.class)); + } + + static class TestRealms extends Realms { + + TestRealms(Settings settings, Environment env, Map factories, XPackLicenseState licenseState, + ThreadContext threadContext, ReservedRealm reservedRealm, List realms, List internalRealms) + throws Exception { + super(settings, env, factories, licenseState, threadContext, reservedRealm); + this.realms = realms; + this.standardRealmsOnly = internalRealms; + } + } + + private void logAndFail(Exception e) { + logger.error("unexpected exception", e); + fail("unexpected exception " + e.getMessage()); + } + + private void setCompletedToTrue(AtomicBoolean completed) { + assertTrue(completed.compareAndSet(false, true)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java new file mode 100644 index 0000000000000..91e8111b54c8d --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; + +import java.util.Map; +import java.util.function.BiConsumer; + +import static org.elasticsearch.mock.orig.Mockito.times; +import static org.hamcrest.Matchers.any; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.isA; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; + +public class InternalRealmsTests extends ESTestCase { + + public void testNativeRealmRegistersIndexHealthChangeListener() throws Exception { + SecurityLifecycleService lifecycleService = mock(SecurityLifecycleService.class); + Map factories = InternalRealms.getFactories(mock(ThreadPool.class), mock(ResourceWatcherService.class), + mock(SSLService.class), mock(NativeUsersStore.class), mock(NativeRoleMappingStore.class), lifecycleService); + assertThat(factories, hasEntry(is(NativeRealmSettings.TYPE), any(Realm.Factory.class))); + verifyZeroInteractions(lifecycleService); + + Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + factories.get(NativeRealmSettings.TYPE).create(new RealmConfig("test", Settings.EMPTY, settings, + TestEnvironment.newEnvironment(settings), new ThreadContext(settings))); + verify(lifecycleService).addSecurityIndexHealthChangeListener(isA(BiConsumer.class)); + + factories.get(NativeRealmSettings.TYPE).create(new RealmConfig("test", Settings.EMPTY, settings, + TestEnvironment.newEnvironment(settings), new ThreadContext(settings))); + verify(lifecycleService, times(2)).addSecurityIndexHealthChangeListener(isA(BiConsumer.class)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java new file mode 100644 index 0000000000000..3871574b76c51 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java @@ -0,0 +1,333 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.SecurityExtension; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; + +public class RealmSettingsTests extends ESTestCase { + + private static final List HASH_ALGOS = Arrays.stream(Hasher.values()).map(Hasher::name).collect(Collectors.toList()); + + public void testRealmWithoutTypeDoesNotValidate() throws Exception { + final Settings.Builder builder = baseSettings("x", false); + builder.remove("type"); + assertErrorWithMessage("empty1", "missing realm type", realm("empty1", builder).build()); + } + + public void testRealmWithBlankTypeDoesNotValidate() throws Exception { + final Settings.Builder builder = baseSettings("", false); + assertErrorWithMessage("empty2", "missing realm type", realm("empty2", builder).build()); + } + + /** + * This test exists because (in 5.x), we want to be backwards compatible and accept custom realms that + * have not been updated to explicitly declare their settings. + * + * @see org.elasticsearch.xpack.core.security.SecurityExtension#getRealmSettings() + */ + public void testRealmWithUnknownTypeAcceptsAllSettings() throws Exception { + final Settings.Builder settings = baseSettings("tam", true) + .put("ip", "8.6.75.309") + .put(randomAlphaOfLengthBetween(4, 8), randomTimeValue()); + assertSuccess(realm("tam", settings)); + } + + public void testFileRealmWithAllSettingsValidatesSuccessfully() throws Exception { + assertSuccess(fileRealm("file1")); + } + + public void testFileRealmWithUnknownConfigurationDoesNotValidate() throws Exception { + final Settings.Builder builder = realm("file2", fileSettings().put("not-valid", randomInt())); + assertErrorWithCause("file2", "unknown setting [not-valid]", builder.build()); + } + + public void testNativeRealmWithAllSettingsValidatesSuccessfully() throws Exception { + assertSuccess(nativeRealm("native1")); + } + + public void testNativeRealmWithUnknownConfigurationDoesNotValidate() throws Exception { + final Settings.Builder builder = realm("native2", nativeSettings().put("not-valid", randomAlphaOfLength(10))); + assertErrorWithCause("native2", "unknown setting [not-valid]", builder.build()); + } + + public void testLdapRealmWithUserTemplatesAndGroupAttributesValidatesSuccessfully() throws Exception { + assertSuccess(ldapRealm("ldap1", false, false)); + } + + public void testLdapRealmWithUserSearchAndGroupSearchValidatesSuccessfully() throws Exception { + assertSuccess(ldapRealm("ldap2", true, true)); + } + + public void testActiveDirectoryRealmWithAllSettingsValidatesSuccessfully() throws Exception { + assertSuccess(activeDirectoryRealm("ad1", true)); + } + + public void testPkiRealmWithCertificateAuthoritiesValidatesSuccessfully() throws Exception { + assertSuccess(pkiRealm("pki1", false)); + } + + public void testPkiRealmWithTrustStoreValidatesSuccessfully() throws Exception { + assertSuccess(pkiRealm("pki2", true)); + } + + public void testPkiRealmWithFullSslSettingsDoesNotValidate() throws Exception { + final Settings.Builder realm = realm("pki3", configureSsl("", pkiSettings(true), true, true)); + assertError("pki3", realm.build()); + } + + public void testPkiRealmWithClosedSecurePasswordValidatesSuccessfully() throws Exception { + final Settings.Builder builder = pkiRealm("pki4", true); + builder.getSecureSettings().close(); + final Settings settings = builder.build(); + assertSuccess(settings); + } + + public void testSettingsWithMultipleRealmsValidatesSuccessfully() throws Exception { + final Settings settings = Settings.builder() + .put(fileRealm("file1").build()) + .put(nativeRealm("native2").build()) + .put(ldapRealm("ldap3", true, false).build()) + .put(activeDirectoryRealm("ad4", false).build()) // don't load SSL twice + .put(pkiRealm("pki5", false).build()) + .build(); + assertSuccess(settings); + } + + private Settings.Builder nativeRealm(String name) { + return realm(name, nativeSettings()); + } + + private Settings.Builder nativeSettings() { + return baseSettings("native", true); + } + + private Settings.Builder fileRealm(String name) { + return realm(name, fileSettings()); + } + + private Settings.Builder fileSettings() { + return baseSettings("file", true); + } + + private Settings.Builder ldapRealm(String name, boolean userSearch, boolean groupSearch) { + return realm(name, ldapSettings(userSearch, groupSearch)); + } + + private Settings.Builder ldapSettings(boolean userSearch, boolean groupSearch) { + final Settings.Builder builder = commonLdapSettings("ldap", true) + .put("bind_dn", "elasticsearch") + .put("follow_referrals", randomBoolean()); + + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> { + secureSettings.setString("bind_password", "t0p_s3cr3t"); + }); + + if (userSearch) { + builder.put("user_search.base_dn", "o=people, dc=example, dc=com"); + builder.put("user_search.scope", "sub_tree"); + builder.put("user_search.filter", "(" + randomAlphaOfLengthBetween(2, 5) + "={0})"); + builder.put("user_search.pool.enabled", randomBoolean()); + builder.put("user_search.pool.size", randomIntBetween(10, 100)); + builder.put("user_search.pool.initial_size", randomIntBetween(1, 10)); + builder.put("user_search.pool.health_check.enabled", randomBoolean()); + builder.put("user_search.pool.health_check.dn", randomAlphaOfLength(32)); + builder.put("user_search.pool.health_check.interval", randomPositiveTimeValue()); + } else { + builder.putList("user_dn_templates", + "cn={0}, ou=staff, o=people, dc=example, dc=com", + "cn={0}, ou=visitors, o=people, dc=example, dc=com"); + } + + if (groupSearch) { + builder.put("group_search.base_dn", "o=groups, dc=example, dc=com"); + builder.put("group_search.scope", "one_level"); + builder.put("group_search.filter", "userGroup"); + builder.put("group_search.user_attribute", "uid"); + } else { + builder.put("user_group_attribute", randomAlphaOfLength(8)); + } + return builder; + } + + private Settings.Builder activeDirectoryRealm(String name, boolean configureSSL) { + return realm(name, activeDirectorySettings(configureSSL)); + } + + private Settings.Builder activeDirectorySettings(boolean configureSSL) { + final Settings.Builder builder = commonLdapSettings("active_directory", configureSSL) + .put("domain_name", "MEGACORP"); + builder.put("user_search.base_dn", "o=people, dc.example, dc.com"); + builder.put("user_search.scope", "sub_tree"); + builder.put("user_search.filter", randomAlphaOfLength(5) + "={0}"); + builder.put("group_search.base_dn", "o=groups, dc=example, dc=com"); + builder.put("group_search.scope", "one_level"); + return builder; + } + + private Settings.Builder commonLdapSettings(String type, boolean configureSSL) { + final Settings.Builder builder = baseSettings(type, true) + .putList("url", "ldap://dir1.internal:9876", "ldap://dir2.internal:9876", "ldap://dir3.internal:9876") + .put("load_balance.type", "round_robin") + .put("load_balance.cache_ttl", randomTimeValue()) + .put("unmapped_groups_as_roles", randomBoolean()) + .put("files.role_mapping", "x-pack/" + randomAlphaOfLength(8) + ".yml") + .put("timeout.tcp_connect", randomPositiveTimeValue()) + .put("timeout.tcp_read", randomPositiveTimeValue()) + .put("timeout.ldap_search", randomPositiveTimeValue()); + if (configureSSL) { + configureSsl("ssl.", builder, randomBoolean(), randomBoolean()); + } + return builder; + } + + private Settings.Builder pkiRealm(String name, boolean useTrustStore) { + return realm(name, pkiSettings(useTrustStore)); + } + + private Settings.Builder pkiSettings(boolean useTrustStore) { + final Settings.Builder builder = baseSettings("pki", false) + .put("username_pattern", "CN=\\D(\\d+)(?:,\\|$)") + .put("files.role_mapping", "x-pack/" + randomAlphaOfLength(8) + ".yml"); + + if (useTrustStore) { + builder.put("truststore.path", randomAlphaOfLengthBetween(8, 32)); + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> { + secureSettings.setString("truststore.secure_password", randomAlphaOfLength(8)); + }); + builder.put("truststore.algorithm", randomAlphaOfLengthBetween(6, 10)); + } else { + builder.putList("certificate_authorities", generateRandomStringArray(5, 32, false, false)); + } + return builder; + } + + private Settings.Builder configureSsl(String prefix, Settings.Builder builder, boolean useKeyStore, boolean useTrustStore) { + if (useKeyStore) { + builder.put(prefix + "keystore.path", "ssl/" + randomAlphaOfLength(5) + ".jks"); + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> { + secureSettings.setString(prefix + "keystore.secure_password", randomAlphaOfLength(8)); + secureSettings.setString(prefix + "keystore.secure_key_password", randomAlphaOfLength(8)); + }); + } else { + builder.put(prefix + "key", "x-pack/ssl/" + randomAlphaOfLength(5) + ".key"); + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> + secureSettings.setString(prefix + "secure_key_passphrase", randomAlphaOfLength(32))); + + builder.put(prefix + "certificate", "ssl/" + randomAlphaOfLength(5) + ".cert"); + } + + if (useTrustStore) { + builder.put(prefix + "truststore.path", "x-pack/ssl/" + randomAlphaOfLength(5) + ".jts"); + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> + secureSettings.setString(prefix + "truststore.secure_password", randomAlphaOfLength(8))); + } else { + builder.put(prefix + "certificate_authorities", "ssl/" + randomAlphaOfLength(8) + ".ca"); + } + + builder.put(prefix + "verification_mode", "full"); + builder.putList(prefix + "supported_protocols", randomSubsetOf(XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS)); + builder.putList(prefix + "cipher_suites", randomSubsetOf(XPackSettings.DEFAULT_CIPHERS)); + + return builder; + } + + private Settings.Builder baseSettings(String type, boolean withCacheSettings) { + final Settings.Builder builder = Settings.builder() + .put("type", type) + .put("order", randomInt()) + .put("enabled", true); + if (withCacheSettings) { + builder.put("cache.ttl", randomPositiveTimeValue()) + .put("cache.max_users", randomIntBetween(1_000, 1_000_000)) + .put("cache.hash_algo", randomFrom(HASH_ALGOS)); + } + return builder; + } + + private Settings.Builder realm(String name, Settings.Builder settings) { + final String prefix = realmPrefix(name); + final MockSecureSettings secureSettings = normaliseSecureSettingPrefix(prefix, settings.getSecureSettings()); + final Settings.Builder builder = Settings.builder().put(settings.normalizePrefix(prefix).build(), false); + if (secureSettings != null) { + builder.setSecureSettings(secureSettings); + } + return builder; + } + + private MockSecureSettings normaliseSecureSettingPrefix(String prefix, SecureSettings settings) { + if (settings == null) { + return null; + } + if (settings instanceof MockSecureSettings) { + final MockSecureSettings source = (MockSecureSettings) settings; + final MockSecureSettings target = new MockSecureSettings(); + for (String key : settings.getSettingNames()) { + target.setString(prefix + key, source.getString(key).toString()); + } + return target; + } else { + throw new IllegalArgumentException("Source settings " + settings.getClass() + " is not a " + MockSecureSettings.class); + } + } + + private String realmPrefix(String name) { + return RealmSettings.PREFIX + name + "."; + } + + private void assertSuccess(Settings.Builder builder) { + assertSuccess(builder.build()); + } + + private void assertSuccess(Settings settings) { + assertThat(group().get(settings), notNullValue()); + } + + private void assertErrorWithCause(String realmName, String message, Settings settings) { + final IllegalArgumentException exception = assertError(realmName, settings); + assertThat(exception.getCause(), notNullValue()); + assertThat(exception.getCause().getMessage(), containsString(message)); + } + + private void assertErrorWithMessage(String realmName, String message, Settings settings) { + final IllegalArgumentException exception = assertError(realmName, settings); + assertThat(exception.getMessage(), containsString(message)); + } + + private IllegalArgumentException assertError(String realmName, Settings settings) { + final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> group().get(settings) + ); + assertThat(exception.getMessage(), containsString(realmPrefix(realmName))); + return exception; + } + + private Setting group() { + final List> list = new ArrayList<>(); + final List noExtensions = Collections.emptyList(); + RealmSettings.addSettings(list, noExtensions); + assertThat(list, hasSize(1)); + return list.get(0); + } +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java new file mode 100644 index 0000000000000..2bc3d58471b15 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -0,0 +1,540 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.XPackLicenseState.AllowedRealmType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; +import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; +import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.TreeMap; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RealmsTests extends ESTestCase { + private Map factories; + private XPackLicenseState licenseState; + private ThreadContext threadContext; + private ReservedRealm reservedRealm; + + @Before + public void init() throws Exception { + factories = new HashMap<>(); + factories.put(FileRealmSettings.TYPE, config -> new DummyRealm(FileRealmSettings.TYPE, config)); + factories.put(NativeRealmSettings.TYPE, config -> new DummyRealm(NativeRealmSettings.TYPE, config)); + for (int i = 0; i < randomIntBetween(1, 5); i++) { + String name = "type_" + i; + factories.put(name, config -> new DummyRealm(name, config)); + } + licenseState = mock(XPackLicenseState.class); + threadContext = new ThreadContext(Settings.EMPTY); + reservedRealm = mock(ReservedRealm.class); + when(licenseState.isAuthAllowed()).thenReturn(true); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.ALL); + when(reservedRealm.type()).thenReturn(ReservedRealm.TYPE); + } + + public void testWithSettings() throws Exception { + Settings.Builder builder = Settings.builder() + .put("path.home", createTempDir()); + List orders = new ArrayList<>(factories.size() - 2); + for (int i = 0; i < factories.size() - 2; i++) { + orders.add(i); + } + Collections.shuffle(orders, random()); + Map orderToIndex = new HashMap<>(); + for (int i = 0; i < factories.size() - 2; i++) { + builder.put("xpack.security.authc.realms.realm_" + i + ".type", "type_" + i); + builder.put("xpack.security.authc.realms.realm_" + i + ".order", orders.get(i)); + orderToIndex.put(orders.get(i), i); + } + Settings settings = builder.build(); + Environment env = TestEnvironment.newEnvironment(settings); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); + + Iterator iterator = realms.iterator(); + assertThat(iterator.hasNext(), is(true)); + Realm realm = iterator.next(); + assertThat(realm, is(reservedRealm)); + + int i = 0; + while (iterator.hasNext()) { + realm = iterator.next(); + assertThat(realm.order(), equalTo(i)); + int index = orderToIndex.get(i); + assertThat(realm.type(), equalTo("type_" + index)); + assertThat(realm.name(), equalTo("realm_" + index)); + i++; + } + } + + public void testWithSettingsWhereDifferentRealmsHaveSameOrder() throws Exception { + Settings.Builder builder = Settings.builder() + .put("path.home", createTempDir()); + List randomSeq = new ArrayList<>(factories.size() - 2); + for (int i = 0; i < factories.size() - 2; i++) { + randomSeq.add(i); + } + Collections.shuffle(randomSeq, random()); + + TreeMap nameToRealmId = new TreeMap<>(); + for (int i = 0; i < factories.size() - 2; i++) { + int randomizedRealmId = randomSeq.get(i); + String randomizedRealmName = randomAlphaOfLengthBetween(12,32); + nameToRealmId.put("realm_" + randomizedRealmName, randomizedRealmId); + builder.put("xpack.security.authc.realms.realm_" + randomizedRealmName + ".type", "type_" + randomizedRealmId); + // set same order for all realms + builder.put("xpack.security.authc.realms.realm_" + randomizedRealmName + ".order", 1); + } + Settings settings = builder.build(); + Environment env = TestEnvironment.newEnvironment(settings); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); + + Iterator iterator = realms.iterator(); + assertThat(iterator.hasNext(), is(true)); + Realm realm = iterator.next(); + assertThat(realm, is(reservedRealm)); + + // As order is same for all realms, it should fall back secondary comparison on name + // Verify that realms are iterated in order based on name + Iterator expectedSortedOrderNames = nameToRealmId.keySet().iterator(); + while (iterator.hasNext()) { + realm = iterator.next(); + String expectedRealmName = expectedSortedOrderNames.next(); + assertThat(realm.order(), equalTo(1)); + assertThat(realm.type(), equalTo("type_" + nameToRealmId.get(expectedRealmName))); + assertThat(realm.name(), equalTo(expectedRealmName)); + } + } + + public void testWithSettingsWithMultipleInternalRealmsOfSameType() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.authc.realms.realm_1.type", FileRealmSettings.TYPE) + .put("xpack.security.authc.realms.realm_1.order", 0) + .put("xpack.security.authc.realms.realm_2.type", FileRealmSettings.TYPE) + .put("xpack.security.authc.realms.realm_2.order", 1) + .put("path.home", createTempDir()) + .build(); + Environment env = TestEnvironment.newEnvironment(settings); + try { + new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("multiple [file] realms are configured")); + } + } + + public void testWithEmptySettings() throws Exception { + Realms realms = new Realms(Settings.EMPTY, TestEnvironment.newEnvironment(Settings.builder().put("path.home", + createTempDir()).build()), factories, licenseState, threadContext, reservedRealm); + Iterator iter = realms.iterator(); + assertThat(iter.hasNext(), is(true)); + Realm realm = iter.next(); + assertThat(realm, is(reservedRealm)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), equalTo(FileRealmSettings.TYPE)); + assertThat(realm.name(), equalTo("default_" + FileRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), equalTo(NativeRealmSettings.TYPE)); + assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(false)); + } + + public void testUnlicensedWithOnlyCustomRealms() throws Exception { + Settings.Builder builder = Settings.builder() + .put("path.home", createTempDir()); + List orders = new ArrayList<>(factories.size() - 2); + for (int i = 0; i < factories.size() - 2; i++) { + orders.add(i); + } + Collections.shuffle(orders, random()); + Map orderToIndex = new HashMap<>(); + for (int i = 0; i < factories.size() - 2; i++) { + builder.put("xpack.security.authc.realms.realm_" + i + ".type", "type_" + i); + builder.put("xpack.security.authc.realms.realm_" + i + ".order", orders.get(i)); + orderToIndex.put(orders.get(i), i); + } + Settings settings = builder.build(); + Environment env = TestEnvironment.newEnvironment(settings); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); + + // this is the iterator when licensed + Iterator iter = realms.iterator(); + assertThat(iter.hasNext(), is(true)); + Realm realm = iter.next(); + assertThat(realm, is(reservedRealm)); + int i = 0; + while (iter.hasNext()) { + realm = iter.next(); + assertThat(realm.order(), equalTo(i)); + int index = orderToIndex.get(i); + assertThat(realm.type(), equalTo("type_" + index)); + assertThat(realm.name(), equalTo("realm_" + index)); + i++; + } + + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.DEFAULT); + + iter = realms.iterator(); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm, is(reservedRealm)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), equalTo(FileRealmSettings.TYPE)); + assertThat(realm.name(), equalTo("default_" + FileRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), equalTo(NativeRealmSettings.TYPE)); + assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(false)); + + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); + + iter = realms.iterator(); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm, is(reservedRealm)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), equalTo(FileRealmSettings.TYPE)); + assertThat(realm.name(), equalTo("default_" + FileRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), equalTo(NativeRealmSettings.TYPE)); + assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(false)); + } + + public void testUnlicensedWithInternalRealms() throws Exception { + factories.put(LdapRealmSettings.LDAP_TYPE, config -> new DummyRealm(LdapRealmSettings.LDAP_TYPE, config)); + assertThat(factories.get("type_0"), notNullValue()); + Settings.Builder builder = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.security.authc.realms.foo.type", "ldap") + .put("xpack.security.authc.realms.foo.order", "0") + .put("xpack.security.authc.realms.custom.type", "type_0") + .put("xpack.security.authc.realms.custom.order", "1"); + Settings settings = builder.build(); + Environment env = TestEnvironment.newEnvironment(settings); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm ); + Iterator iter = realms.iterator(); + assertThat(iter.hasNext(), is(true)); + Realm realm = iter.next(); + assertThat(realm, is(reservedRealm)); + + int i = 0; + // this is the iterator when licensed + List types = new ArrayList<>(); + while (iter.hasNext()) { + realm = iter.next(); + i++; + types.add(realm.type()); + } + assertThat(types, contains("ldap", "type_0")); + + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.DEFAULT); + iter = realms.iterator(); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm, is(reservedRealm)); + i = 0; + while (iter.hasNext()) { + realm = iter.next(); + assertThat(realm.getType(), is("ldap")); + i++; + } + assertThat(i, is(1)); + + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); + iter = realms.iterator(); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm, is(reservedRealm)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), equalTo(FileRealmSettings.TYPE)); + assertThat(realm.name(), equalTo("default_" + FileRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), equalTo(NativeRealmSettings.TYPE)); + assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(false)); + } + + public void testUnlicensedWithNativeRealmSettingss() throws Exception { + factories.put(LdapRealmSettings.LDAP_TYPE, config -> new DummyRealm(LdapRealmSettings.LDAP_TYPE, config)); + final String type = randomFrom(FileRealmSettings.TYPE, NativeRealmSettings.TYPE); + Settings.Builder builder = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.security.authc.realms.foo.type", "ldap") + .put("xpack.security.authc.realms.foo.order", "0") + .put("xpack.security.authc.realms.native.type", type) + .put("xpack.security.authc.realms.native.order", "1"); + Settings settings = builder.build(); + Environment env = TestEnvironment.newEnvironment(settings); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); + Iterator iter = realms.iterator(); + assertThat(iter.hasNext(), is(true)); + Realm realm = iter.next(); + assertThat(realm, is(reservedRealm)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), is("ldap")); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), is(type)); + assertThat(iter.hasNext(), is(false)); + + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); + iter = realms.iterator(); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm, is(reservedRealm)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), is(type)); + assertThat(iter.hasNext(), is(false)); + } + + public void testUnlicensedWithNonStandardRealms() throws Exception { + factories.put(SamlRealmSettings.TYPE, config -> new DummyRealm(SamlRealmSettings.TYPE, config)); + Settings.Builder builder = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.security.authc.realms.foo.type", SamlRealmSettings.TYPE) + .put("xpack.security.authc.realms.foo.order", "0"); + Settings settings = builder.build(); + Environment env = TestEnvironment.newEnvironment(settings); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); + Iterator iter = realms.iterator(); + assertThat(iter.hasNext(), is(true)); + Realm realm = iter.next(); + assertThat(realm, is(reservedRealm)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), is(SamlRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(false)); + + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.DEFAULT); + iter = realms.iterator(); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm, is(reservedRealm)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), is(FileRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), is(NativeRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(false)); + + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); + iter = realms.iterator(); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm, is(reservedRealm)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), is(FileRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(true)); + realm = iter.next(); + assertThat(realm.type(), is(NativeRealmSettings.TYPE)); + assertThat(iter.hasNext(), is(false)); + } + + public void testDisabledRealmsAreNotAdded() throws Exception { + Settings.Builder builder = Settings.builder() + .put("path.home", createTempDir()); + List orders = new ArrayList<>(factories.size() - 2); + for (int i = 0; i < factories.size() - 2; i++) { + orders.add(i); + } + Collections.shuffle(orders, random()); + Map orderToIndex = new HashMap<>(); + for (int i = 0; i < factories.size() - 2; i++) { + builder.put("xpack.security.authc.realms.realm_" + i + ".type", "type_" + i); + builder.put("xpack.security.authc.realms.realm_" + i + ".order", orders.get(i)); + boolean enabled = randomBoolean(); + builder.put("xpack.security.authc.realms.realm_" + i + ".enabled", enabled); + if (enabled) { + orderToIndex.put(orders.get(i), i); + logger.error("put [{}] -> [{}]", orders.get(i), i); + } + } + Settings settings = builder.build(); + Environment env = TestEnvironment.newEnvironment(settings); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm ); + Iterator iterator = realms.iterator(); + Realm realm = iterator.next(); + assertThat(realm, is(reservedRealm)); + assertThat(iterator.hasNext(), is(true)); + + int count = 0; + while (iterator.hasNext()) { + realm = iterator.next(); + Integer index = orderToIndex.get(realm.order()); + if (index == null) { + // Default realms are inserted when factories size is 1 and enabled is false + assertThat(realm.type(), equalTo(FileRealmSettings.TYPE)); + assertThat(realm.name(), equalTo("default_" + FileRealmSettings.TYPE)); + assertThat(iterator.hasNext(), is(true)); + realm = iterator.next(); + assertThat(realm.type(), equalTo(NativeRealmSettings.TYPE)); + assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); + assertThat(iterator.hasNext(), is(false)); + } else { + assertThat(realm.type(), equalTo("type_" + index)); + assertThat(realm.name(), equalTo("realm_" + index)); + assertThat(settings.getAsBoolean("xpack.security.authc.realms.realm_" + index + ".enabled", true), equalTo(Boolean.TRUE)); + count++; + } + } + + assertThat(count, equalTo(orderToIndex.size())); + } + + public void testAuthcAuthzDisabled() throws Exception { + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.security.authc.realms.realm_1.type", FileRealmSettings.TYPE) + .put("xpack.security.authc.realms.realm_1.order", 0) + .build(); + Environment env = TestEnvironment.newEnvironment(settings); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm ); + + assertThat(realms.iterator().hasNext(), is(true)); + + when(licenseState.isAuthAllowed()).thenReturn(false); + assertThat(realms.iterator().hasNext(), is(false)); + } + + public void testUsageStats() throws Exception { + // test realms with duplicate values + Settings.Builder builder = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.security.authc.realms.foo.type", "type_0") + .put("xpack.security.authc.realms.foo.order", "0") + .put("xpack.security.authc.realms.bar.type", "type_0") + .put("xpack.security.authc.realms.bar.order", "1"); + Settings settings = builder.build(); + Environment env = TestEnvironment.newEnvironment(settings); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm ); + + Map usageStats = realms.usageStats(); + assertThat(usageStats.size(), is(factories.size())); + + // first check type_0 + assertThat(usageStats.get("type_0"), instanceOf(Map.class)); + Map type0Map = (Map) usageStats.get("type_0"); + assertThat(type0Map, hasEntry("enabled", true)); + assertThat(type0Map, hasEntry("available", true)); + assertThat((Iterable) type0Map.get("name"), contains("foo", "bar")); + assertThat((Iterable) type0Map.get("order"), contains(0, 1)); + + for (Entry entry : usageStats.entrySet()) { + String type = entry.getKey(); + if ("type_0".equals(type)) { + continue; + } + + Map typeMap = (Map) entry.getValue(); + assertThat(typeMap, hasEntry("enabled", false)); + assertThat(typeMap, hasEntry("available", true)); + assertThat(typeMap.size(), is(2)); + } + + // disable ALL using license + when(licenseState.isAuthAllowed()).thenReturn(false); + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NONE); + usageStats = realms.usageStats(); + assertThat(usageStats.size(), is(factories.size())); + for (Entry entry : usageStats.entrySet()) { + Map typeMap = (Map) entry.getValue(); + assertThat(typeMap, hasEntry("enabled", false)); + assertThat(typeMap, hasEntry("available", false)); + assertThat(typeMap.size(), is(2)); + } + + // check native or internal realms enabled only + when(licenseState.isAuthAllowed()).thenReturn(true); + when(licenseState.allowedRealmType()).thenReturn(randomFrom(AllowedRealmType.NATIVE, AllowedRealmType.DEFAULT)); + usageStats = realms.usageStats(); + assertThat(usageStats.size(), is(factories.size())); + for (Entry entry : usageStats.entrySet()) { + final String type = entry.getKey(); + Map typeMap = (Map) entry.getValue(); + if (FileRealmSettings.TYPE.equals(type) || NativeRealmSettings.TYPE.equals(type)) { + assertThat(typeMap, hasEntry("enabled", true)); + assertThat(typeMap, hasEntry("available", true)); + assertThat((Iterable) typeMap.get("name"), contains("default_" + type)); + } else { + assertThat(typeMap, hasEntry("enabled", false)); + assertThat(typeMap, hasEntry("available", false)); + assertThat(typeMap.size(), is(2)); + } + } + } + + static class DummyRealm extends Realm { + + DummyRealm(String type, RealmConfig config) { + super(type, config); + } + + @Override + public boolean supports(AuthenticationToken token) { + return false; + } + + @Override + public AuthenticationToken token(ThreadContext threadContext) { + return null; + } + + @Override + public void authenticate(AuthenticationToken token, ActionListener listener) { + listener.onResponse(AuthenticationResult.notHandled()); + } + + @Override + public void lookupUser(String username, ActionListener listener) { + listener.onResponse(null); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java new file mode 100644 index 0000000000000..ce67b84134f56 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RunAsIntegTests.java @@ -0,0 +1,253 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.apache.http.message.BasicHeader; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.TestXPackTransportClient; +import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.junit.BeforeClass; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; + +public class RunAsIntegTests extends SecurityIntegTestCase { + + private static final String RUN_AS_USER = "run_as_user"; + private static final String TRANSPORT_CLIENT_USER = "transport_user"; + private static final String ROLES = + "run_as_role:\n" + + " run_as: [ '" + SecuritySettingsSource.TEST_USER_NAME + "', 'idontexist' ]\n"; + + // indicates whether the RUN_AS_USER that is being authenticated is also a superuser + private static boolean runAsHasSuperUserRole; + + @BeforeClass + public static void configureRunAsHasSuperUserRole() { + runAsHasSuperUserRole = randomBoolean(); + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + @Override + public String configRoles() { + return ROLES + super.configRoles(); + } + + @Override + public String configUsers() { + return super.configUsers() + + RUN_AS_USER + ":" + SecuritySettingsSource.TEST_PASSWORD_HASHED + "\n" + + TRANSPORT_CLIENT_USER + ":" + SecuritySettingsSource.TEST_PASSWORD_HASHED + "\n"; + } + + @Override + public String configUsersRoles() { + String roles = super.configUsersRoles() + + "run_as_role:" + RUN_AS_USER + "\n" + + "transport_client:" + TRANSPORT_CLIENT_USER; + if (runAsHasSuperUserRole) { + roles = roles + "\n" + + "superuser:" + RUN_AS_USER; + } + return roles; + } + + @Override + protected boolean transportSSLEnabled() { + return false; + } + + public void testUserImpersonation() throws Exception { + try (TransportClient client = getTransportClient(Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TRANSPORT_CLIENT_USER + ":" + + SecuritySettingsSourceField.TEST_PASSWORD).build())) { + //ensure the client can connect + assertBusy(() -> assertThat(client.connectedNodes().size(), greaterThan(0))); + + // make sure the client can't get health + try { + client.admin().cluster().prepareHealth().get(); + fail("the client user should not have privileges to get the health"); + } catch (ElasticsearchSecurityException e) { + assertThat(e.getMessage(), containsString("unauthorized")); + } + + // let's run as without authorization + try { + Map headers = Collections.singletonMap(AuthenticationServiceField.RUN_AS_USER_HEADER, + SecuritySettingsSource.TEST_USER_NAME); + client.filterWithHeader(headers) + .admin().cluster().prepareHealth().get(); + fail("run as should be unauthorized for the transport client user"); + } catch (ElasticsearchSecurityException e) { + assertThat(e.getMessage(), containsString("unauthorized")); + assertThat(e.getMessage(), containsString("run as")); + } + + Map headers = new HashMap<>(); + headers.put("Authorization", UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, + new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); + headers.put(AuthenticationServiceField.RUN_AS_USER_HEADER, SecuritySettingsSource.TEST_USER_NAME); + // lets set the user + ClusterHealthResponse response = client.filterWithHeader(headers).admin().cluster().prepareHealth().get(); + assertThat(response.isTimedOut(), is(false)); + } + } + + public void testUserImpersonationUsingHttp() throws Exception { + // use the transport client user and try to run as + try { + getRestClient().performRequest("GET", "/_nodes", + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(TRANSPORT_CLIENT_USER, + TEST_PASSWORD_SECURE_STRING)), + new BasicHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, SecuritySettingsSource.TEST_USER_NAME)); + fail("request should have failed"); + } catch(ResponseException e) { + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); + } + + if (runAsHasSuperUserRole == false) { + try { + //the run as user shouldn't have access to the nodes api + getRestClient().performRequest("GET", "/_nodes", + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, + TEST_PASSWORD_SECURE_STRING))); + fail("request should have failed"); + } catch (ResponseException e) { + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); + } + } + + // but when running as a different user it should work + Response response = getRestClient().performRequest("GET", "/_nodes", + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, + TEST_PASSWORD_SECURE_STRING)), + new BasicHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, SecuritySettingsSource.TEST_USER_NAME)); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + public void testEmptyUserImpersonationHeader() throws Exception { + try (TransportClient client = getTransportClient(Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TRANSPORT_CLIENT_USER + ":" + + SecuritySettingsSourceField.TEST_PASSWORD).build())) { + //ensure the client can connect + awaitBusy(() -> { + return client.connectedNodes().size() > 0; + }); + + try { + Map headers = new HashMap<>(); + headers.put("Authorization", UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, + new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); + headers.put(AuthenticationServiceField.RUN_AS_USER_HEADER, ""); + + client.filterWithHeader(headers).admin().cluster().prepareHealth().get(); + fail("run as header should not be allowed to be empty"); + } catch (ElasticsearchSecurityException e) { + assertThat(e.getMessage(), containsString("unable to authenticate")); + } + } + } + + public void testEmptyHeaderUsingHttp() throws Exception { + try { + getRestClient().performRequest("GET", "/_nodes", + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, + TEST_PASSWORD_SECURE_STRING)), + new BasicHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, "")); + fail("request should have failed"); + } catch(ResponseException e) { + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(401)); + } + } + + public void testNonExistentRunAsUser() throws Exception { + try (TransportClient client = getTransportClient(Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TRANSPORT_CLIENT_USER + ":" + + SecuritySettingsSourceField.TEST_PASSWORD).build())) { + //ensure the client can connect + awaitBusy(() -> { + return client.connectedNodes().size() > 0; + }); + + try { + Map headers = new HashMap<>(); + headers.put("Authorization", UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, + new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); + headers.put(AuthenticationServiceField.RUN_AS_USER_HEADER, "idontexist"); + + client.filterWithHeader(headers).admin().cluster().prepareHealth().get(); + fail("run as header should not accept non-existent users"); + } catch (ElasticsearchSecurityException e) { + assertThat(e.getMessage(), containsString("unauthorized")); + } + } + } + + public void testNonExistentRunAsUserUsingHttp() throws Exception { + try { + getRestClient().performRequest("GET", "/_nodes", + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(RUN_AS_USER, + TEST_PASSWORD_SECURE_STRING)), + new BasicHeader(AuthenticationServiceField.RUN_AS_USER_HEADER, "idontexist")); + fail("request should have failed"); + } catch (ResponseException e) { + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); + } + } + + // build our own here to better mimic an actual client... + TransportClient getTransportClient(Settings extraSettings) { + NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); + List nodes = nodeInfos.getNodes(); + assertTrue(nodes.isEmpty() == false); + TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress(); + String clusterName = nodeInfos.getClusterName().value(); + + Settings settings = Settings.builder() + .put(extraSettings) + .put("cluster.name", clusterName) + .build(); + + return new TestXPackTransportClient(settings, LocalStateSecurity.class) + .addTransportAddress(publishAddress); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java new file mode 100644 index 0000000000000..a8a0f858d9c03 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -0,0 +1,363 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenRequest; +import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenResponse; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; +import org.elasticsearch.xpack.core.security.authc.TokenMetaData; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.junit.After; +import org.junit.Before; + +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.hamcrest.Matchers.equalTo; + +@TestLogging("org.elasticsearch.xpack.security.authz.store.FileRolesStore:DEBUG") +public class TokenAuthIntegTests extends SecurityIntegTestCase { + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + // crank up the deletion interval and set timeout for delete requests + .put(TokenService.DELETE_INTERVAL.getKey(), TimeValue.timeValueSeconds(1L)) + .put(TokenService.DELETE_TIMEOUT.getKey(), TimeValue.timeValueSeconds(5L)) + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true) + .build(); + } + + @Override + protected int maxNumberOfNodes() { + // we start one more node so we need to make sure if we hit max randomization we can still start one + return defaultMaxNumberOfNodes() + 1; + } + + public void testTokenServiceBootstrapOnNodeJoin() throws Exception { + final Client client = client(); + SecurityClient securityClient = new SecurityClient(client); + CreateTokenResponse response = securityClient.prepareCreateToken() + .setGrantType("password") + .setUsername(SecuritySettingsSource.TEST_USER_NAME) + .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) + .get(); + for (TokenService tokenService : internalCluster().getInstances(TokenService.class)) { + PlainActionFuture userTokenFuture = new PlainActionFuture<>(); + tokenService.decodeToken(response.getTokenString(), userTokenFuture); + assertNotNull(userTokenFuture.actionGet()); + } + // start a new node and see if it can decrypt the token + String nodeName = internalCluster().startNode(); + for (TokenService tokenService : internalCluster().getInstances(TokenService.class)) { + PlainActionFuture userTokenFuture = new PlainActionFuture<>(); + tokenService.decodeToken(response.getTokenString(), userTokenFuture); + assertNotNull(userTokenFuture.actionGet()); + } + + TokenService tokenService = internalCluster().getInstance(TokenService.class, nodeName); + PlainActionFuture userTokenFuture = new PlainActionFuture<>(); + tokenService.decodeToken(response.getTokenString(), userTokenFuture); + assertNotNull(userTokenFuture.actionGet()); + } + + + public void testTokenServiceCanRotateKeys() throws Exception { + final Client client = client(); + SecurityClient securityClient = new SecurityClient(client); + CreateTokenResponse response = securityClient.prepareCreateToken() + .setGrantType("password") + .setUsername(SecuritySettingsSource.TEST_USER_NAME) + .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) + .get(); + String masterName = internalCluster().getMasterName(); + TokenService masterTokenService = internalCluster().getInstance(TokenService.class, masterName); + String activeKeyHash = masterTokenService.getActiveKeyHash(); + for (TokenService tokenService : internalCluster().getInstances(TokenService.class)) { + PlainActionFuture userTokenFuture = new PlainActionFuture<>(); + tokenService.decodeToken(response.getTokenString(), userTokenFuture); + assertNotNull(userTokenFuture.actionGet()); + assertEquals(activeKeyHash, tokenService.getActiveKeyHash()); + } + client().admin().cluster().prepareHealth().execute().get(); + PlainActionFuture rotateActionFuture = new PlainActionFuture<>(); + logger.info("rotate on master: {}", masterName); + masterTokenService.rotateKeysOnMaster(rotateActionFuture); + assertTrue(rotateActionFuture.actionGet().isAcknowledged()); + assertNotEquals(activeKeyHash, masterTokenService.getActiveKeyHash()); + + for (TokenService tokenService : internalCluster().getInstances(TokenService.class)) { + PlainActionFuture userTokenFuture = new PlainActionFuture<>(); + tokenService.decodeToken(response.getTokenString(), userTokenFuture); + assertNotNull(userTokenFuture.actionGet()); + assertNotEquals(activeKeyHash, tokenService.getActiveKeyHash()); + } + } + + @TestLogging("org.elasticsearch.xpack.security.authc:DEBUG") + public void testExpiredTokensDeletedAfterExpiration() throws Exception { + final Client client = client().filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + SecurityClient securityClient = new SecurityClient(client); + CreateTokenResponse response = securityClient.prepareCreateToken() + .setGrantType("password") + .setUsername(SecuritySettingsSource.TEST_USER_NAME) + .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) + .get(); + + Instant created = Instant.now(); + + InvalidateTokenResponse invalidateResponse = securityClient + .prepareInvalidateToken(response.getTokenString()) + .setType(InvalidateTokenRequest.Type.ACCESS_TOKEN) + .get(); + assertTrue(invalidateResponse.isCreated()); + AtomicReference docId = new AtomicReference<>(); + assertBusy(() -> { + SearchResponse searchResponse = client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME) + .setSource(SearchSourceBuilder.searchSource() + .query(QueryBuilders.termQuery("doc_type", TokenService.INVALIDATED_TOKEN_DOC_TYPE))) + .setSize(1) + .setTerminateAfter(1) + .get(); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); + docId.set(searchResponse.getHits().getAt(0).getId()); + }); + + // hack doc to modify the time to the day before + Instant dayBefore = created.minus(1L, ChronoUnit.DAYS); + assertTrue(Instant.now().isAfter(dayBefore)); + client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, "doc", docId.get()) + .setDoc("expiration_time", dayBefore.toEpochMilli()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + + AtomicBoolean deleteTriggered = new AtomicBoolean(false); + assertBusy(() -> { + if (deleteTriggered.compareAndSet(false, true)) { + // invalidate a invalid token... doesn't matter that it is bad... we just want this action to trigger the deletion + try { + securityClient.prepareInvalidateToken("fooobar") + .setType(randomFrom(InvalidateTokenRequest.Type.values())) + .execute() + .actionGet(); + } catch (ElasticsearchSecurityException e) { + assertEquals("token malformed", e.getMessage()); + } + } + client.admin().indices().prepareRefresh(SecurityLifecycleService.SECURITY_INDEX_NAME).get(); + SearchResponse searchResponse = client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME) + .setSource(SearchSourceBuilder.searchSource() + .query(QueryBuilders.termQuery("doc_type", TokenService.INVALIDATED_TOKEN_DOC_TYPE))) + .setSize(0) + .setTerminateAfter(1) + .get(); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(0L)); + }, 30, TimeUnit.SECONDS); + } + + public void testExpireMultipleTimes() { + CreateTokenResponse response = securityClient().prepareCreateToken() + .setGrantType("password") + .setUsername(SecuritySettingsSource.TEST_USER_NAME) + .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) + .get(); + + InvalidateTokenResponse invalidateResponse = securityClient() + .prepareInvalidateToken(response.getTokenString()) + .setType(InvalidateTokenRequest.Type.ACCESS_TOKEN) + .get(); + assertTrue(invalidateResponse.isCreated()); + assertFalse(securityClient() + .prepareInvalidateToken(response.getTokenString()) + .setType(InvalidateTokenRequest.Type.ACCESS_TOKEN) + .get() + .isCreated()); + } + + public void testRefreshingToken() { + Client client = client().filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + SecurityClient securityClient = new SecurityClient(client); + CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() + .setGrantType("password") + .setUsername(SecuritySettingsSource.TEST_USER_NAME) + .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) + .get(); + assertNotNull(createTokenResponse.getRefreshToken()); + // get cluster health with token + assertNoTimeout(client() + .filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + createTokenResponse.getTokenString())) + .admin().cluster().prepareHealth().get()); + + CreateTokenResponse refreshResponse = securityClient.prepareRefreshToken(createTokenResponse.getRefreshToken()).get(); + assertNotNull(refreshResponse.getRefreshToken()); + assertNotEquals(refreshResponse.getRefreshToken(), createTokenResponse.getRefreshToken()); + assertNotEquals(refreshResponse.getTokenString(), createTokenResponse.getTokenString()); + + assertNoTimeout(client().filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + refreshResponse.getTokenString())) + .admin().cluster().prepareHealth().get()); + } + + public void testRefreshingInvalidatedToken() { + Client client = client().filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + SecurityClient securityClient = new SecurityClient(client); + CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() + .setGrantType("password") + .setUsername(SecuritySettingsSource.TEST_USER_NAME) + .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) + .get(); + assertNotNull(createTokenResponse.getRefreshToken()); + InvalidateTokenResponse invalidateResponse = securityClient + .prepareInvalidateToken(createTokenResponse.getRefreshToken()) + .setType(InvalidateTokenRequest.Type.REFRESH_TOKEN) + .get(); + assertTrue(invalidateResponse.isCreated()); + + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, + () -> securityClient.prepareRefreshToken(createTokenResponse.getRefreshToken()).get()); + assertEquals("invalid_grant", e.getMessage()); + assertEquals(RestStatus.BAD_REQUEST, e.status()); + assertEquals("token has been invalidated", e.getHeader("error_description").get(0)); + } + + public void testRefreshingMultipleTimes() { + Client client = client().filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + SecurityClient securityClient = new SecurityClient(client); + CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() + .setGrantType("password") + .setUsername(SecuritySettingsSource.TEST_USER_NAME) + .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) + .get(); + assertNotNull(createTokenResponse.getRefreshToken()); + CreateTokenResponse refreshResponse = securityClient.prepareRefreshToken(createTokenResponse.getRefreshToken()).get(); + assertNotNull(refreshResponse); + + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, + () -> securityClient.prepareRefreshToken(createTokenResponse.getRefreshToken()).get()); + assertEquals("invalid_grant", e.getMessage()); + assertEquals(RestStatus.BAD_REQUEST, e.status()); + assertEquals("token has already been refreshed", e.getHeader("error_description").get(0)); + } + + public void testRefreshAsDifferentUser() { + Client client = client().filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + SecurityClient securityClient = new SecurityClient(client); + CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() + .setGrantType("password") + .setUsername(SecuritySettingsSource.TEST_USER_NAME) + .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) + .get(); + assertNotNull(createTokenResponse.getRefreshToken()); + + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, + () -> new SecurityClient(client() + .filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)))) + .prepareRefreshToken(createTokenResponse.getRefreshToken()).get()); + assertEquals("invalid_grant", e.getMessage()); + assertEquals(RestStatus.BAD_REQUEST, e.status()); + assertEquals("tokens must be refreshed by the creating client", e.getHeader("error_description").get(0)); + } + + public void testCreateThenRefreshAsDifferentUser() { + Client client = client().filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))); + SecurityClient securityClient = new SecurityClient(client); + CreateTokenResponse createTokenResponse = securityClient.prepareCreateToken() + .setGrantType("password") + .setUsername(SecuritySettingsSource.TEST_USER_NAME) + .setPassword(new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())) + .get(); + assertNotNull(createTokenResponse.getRefreshToken()); + + CreateTokenResponse refreshResponse = securityClient.prepareRefreshToken(createTokenResponse.getRefreshToken()).get(); + assertNotEquals(refreshResponse.getTokenString(), createTokenResponse.getTokenString()); + assertNotEquals(refreshResponse.getRefreshToken(), createTokenResponse.getRefreshToken()); + + PlainActionFuture authFuture = new PlainActionFuture<>(); + AuthenticateRequest request = new AuthenticateRequest(); + request.username(SecuritySettingsSource.TEST_SUPERUSER); + client.execute(AuthenticateAction.INSTANCE, request, authFuture); + AuthenticateResponse response = authFuture.actionGet(); + assertEquals(SecuritySettingsSource.TEST_SUPERUSER, response.user().principal()); + + authFuture = new PlainActionFuture<>(); + request = new AuthenticateRequest(); + request.username(SecuritySettingsSource.TEST_USER_NAME); + client.filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + createTokenResponse.getTokenString())) + .execute(AuthenticateAction.INSTANCE, request, authFuture); + response = authFuture.actionGet(); + assertEquals(SecuritySettingsSource.TEST_USER_NAME, response.user().principal()); + + authFuture = new PlainActionFuture<>(); + request = new AuthenticateRequest(); + request.username(SecuritySettingsSource.TEST_USER_NAME); + client.filterWithHeader(Collections.singletonMap("Authorization", "Bearer " + refreshResponse.getTokenString())) + .execute(AuthenticateAction.INSTANCE, request, authFuture); + response = authFuture.actionGet(); + assertEquals(SecuritySettingsSource.TEST_USER_NAME, response.user().principal()); + } + + @Before + public void waitForSecurityIndexWritable() throws Exception { + assertSecurityIndexActive(); + } + + @After + public void wipeSecurityIndex() throws InterruptedException { + // get the token service and wait until token expiration is not in progress! + for (TokenService tokenService : internalCluster().getInstances(TokenService.class)) { + final boolean done = awaitBusy(() -> tokenService.isExpirationInProgress() == false); + assertTrue(done); + } + super.deleteSecurityIndex(); + } + + public void testMetadataIsNotSentToClient() { + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().setCustoms(true).get(); + assertFalse(clusterStateResponse.getState().customs().containsKey(TokenMetaData.TYPE)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java new file mode 100644 index 0000000000000..9b40187394122 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -0,0 +1,611 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetRequestBuilder; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.get.MultiGetAction; +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetRequestBuilder; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.update.UpdateRequestBuilder; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.threadpool.FixedExecutorBuilder; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.TokenMetaData; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import javax.crypto.SecretKey; + +import java.io.IOException; +import java.time.Clock; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Consumer; + +import static java.time.Clock.systemUTC; +import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TokenServiceTests extends ESTestCase { + + private static ThreadPool threadPool; + private static final Settings settings = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "TokenServiceTests") + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).build(); + + private Client client; + private SecurityLifecycleService lifecycleService; + private ClusterService clusterService; + private Settings tokenServiceEnabledSettings = Settings.builder() + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).build(); + + @Before + public void setupClient() { + client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + when(client.settings()).thenReturn(settings); + doAnswer(invocationOnMock -> { + GetRequestBuilder builder = new GetRequestBuilder(client, GetAction.INSTANCE); + builder.setIndex((String) invocationOnMock.getArguments()[0]) + .setType((String) invocationOnMock.getArguments()[1]) + .setId((String) invocationOnMock.getArguments()[2]); + return builder; + }).when(client).prepareGet(anyString(), anyString(), anyString()); + when(client.prepareMultiGet()).thenReturn(new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE)); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + MultiGetResponse response = mock(MultiGetResponse.class); + MultiGetItemResponse[] responses = new MultiGetItemResponse[2]; + when(response.getResponses()).thenReturn(responses); + + GetResponse oldGetResponse = mock(GetResponse.class); + when(oldGetResponse.isExists()).thenReturn(false); + responses[0] = new MultiGetItemResponse(oldGetResponse, null); + + GetResponse getResponse = mock(GetResponse.class); + responses[1] = new MultiGetItemResponse(getResponse, null); + when(getResponse.isExists()).thenReturn(false); + listener.onResponse(response); + return Void.TYPE; + }).when(client).multiGet(any(MultiGetRequest.class), any(ActionListener.class)); + when(client.prepareIndex(any(String.class), any(String.class), any(String.class))) + .thenReturn(new IndexRequestBuilder(client, IndexAction.INSTANCE)); + when(client.prepareUpdate(any(String.class), any(String.class), any(String.class))) + .thenReturn(new UpdateRequestBuilder(client, UpdateAction.INSTANCE)); + doAnswer(invocationOnMock -> { + ActionListener responseActionListener = (ActionListener) invocationOnMock.getArguments()[2]; + responseActionListener.onResponse(new IndexResponse()); + return null; + }).when(client).execute(eq(IndexAction.INSTANCE), any(IndexRequest.class), any(ActionListener.class)); + + // setup lifecycle service + lifecycleService = mock(SecurityLifecycleService.class); + doAnswer(invocationOnMock -> { + Runnable runnable = (Runnable) invocationOnMock.getArguments()[1]; + runnable.run(); + return null; + }).when(lifecycleService).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); + this.clusterService = ClusterServiceUtils.createClusterService(threadPool); + } + + @BeforeClass + public static void startThreadPool() throws IOException { + threadPool = new ThreadPool(settings, + new FixedExecutorBuilder(settings, TokenService.THREAD_POOL_NAME, 1, 1000, "xpack.security.authc.token.thread_pool")); + new Authentication(new User("foo"), new RealmRef("realm", "type", "node"), null).writeToContext(threadPool.getThreadContext()); + } + + @AfterClass + public static void shutdownThreadpool() throws InterruptedException { + terminate(threadPool); + threadPool = null; + } + + public void testAttachAndGetToken() throws Exception { + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + final UserToken token = tokenFuture.get().v1(); + assertNotNull(token); + mockGetTokenFromId(token); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + UserToken serialized = future.get(); + assertEquals(authentication, serialized.getAuthentication()); + } + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + // verify a second separate token service with its own salt can also verify + TokenService anotherService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService + , clusterService); + anotherService.refreshMetaData(tokenService.getTokenMetaData()); + PlainActionFuture future = new PlainActionFuture<>(); + anotherService.getAndValidateToken(requestContext, future); + UserToken fromOtherService = future.get(); + assertEquals(authentication, fromOtherService.getAuthentication()); + } + } + + public void testRotateKey() throws Exception { + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + final UserToken token = tokenFuture.get().v1(); + assertNotNull(token); + mockGetTokenFromId(token); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + UserToken serialized = future.get(); + assertEquals(authentication, serialized.getAuthentication()); + } + rotateKeys(tokenService); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + UserToken serialized = future.get(); + assertEquals(authentication, serialized.getAuthentication()); + } + + PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, newTokenFuture, Collections.emptyMap()); + final UserToken newToken = newTokenFuture.get().v1(); + assertNotNull(newToken); + assertNotEquals(tokenService.getUserTokenString(newToken), tokenService.getUserTokenString(token)); + + requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(newToken)); + mockGetTokenFromId(newToken); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + UserToken serialized = future.get(); + assertEquals(authentication, serialized.getAuthentication()); + } + } + + private void rotateKeys(TokenService tokenService) { + TokenMetaData tokenMetaData = tokenService.generateSpareKey(); + tokenService.refreshMetaData(tokenMetaData); + tokenMetaData = tokenService.rotateToSpareKey(); + tokenService.refreshMetaData(tokenMetaData); + } + + public void testKeyExchange() throws Exception { + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + int numRotations = 0;randomIntBetween(1, 5); + for (int i = 0; i < numRotations; i++) { + rotateKeys(tokenService); + } + TokenService otherTokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, + clusterService); + otherTokenService.refreshMetaData(tokenService.getTokenMetaData()); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + final UserToken token = tokenFuture.get().v1(); + assertNotNull(token); + mockGetTokenFromId(token); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + otherTokenService.getAndValidateToken(requestContext, future); + UserToken serialized = future.get(); + assertEquals(authentication, serialized.getAuthentication()); + } + + rotateKeys(tokenService); + + otherTokenService.refreshMetaData(tokenService.getTokenMetaData()); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + otherTokenService.getAndValidateToken(requestContext, future); + UserToken serialized = future.get(); + assertEquals(authentication, serialized.getAuthentication()); + } + } + + public void testPruneKeys() throws Exception { + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + final UserToken token = tokenFuture.get().v1(); + assertNotNull(token); + mockGetTokenFromId(token); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + UserToken serialized = future.get(); + assertEquals(authentication, serialized.getAuthentication()); + } + TokenMetaData metaData = tokenService.pruneKeys(randomIntBetween(0, 100)); + tokenService.refreshMetaData(metaData); + + int numIterations = scaledRandomIntBetween(1, 5); + for (int i = 0; i < numIterations; i++) { + rotateKeys(tokenService); + } + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + UserToken serialized = future.get(); + assertEquals(authentication, serialized.getAuthentication()); + } + + PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, newTokenFuture, Collections.emptyMap()); + final UserToken newToken = newTokenFuture.get().v1(); + assertNotNull(newToken); + assertNotEquals(tokenService.getUserTokenString(newToken), tokenService.getUserTokenString(token)); + + metaData = tokenService.pruneKeys(1); + tokenService.refreshMetaData(metaData); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + UserToken serialized = future.get(); + assertNull(serialized); + } + + requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(newToken)); + mockGetTokenFromId(newToken); + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + UserToken serialized = future.get(); + assertEquals(authentication, serialized.getAuthentication()); + } + + } + + public void testPassphraseWorks() throws Exception { + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + final UserToken token = tokenFuture.get().v1(); + assertNotNull(token); + mockGetTokenFromId(token); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + UserToken serialized = future.get(); + assertEquals(authentication, serialized.getAuthentication()); + } + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + // verify a second separate token service with its own passphrase cannot verify + TokenService anotherService = new TokenService(Settings.EMPTY, systemUTC(), client, lifecycleService, + clusterService); + PlainActionFuture future = new PlainActionFuture<>(); + anotherService.getAndValidateToken(requestContext, future); + assertNull(future.get()); + } + } + + public void testGetTokenWhenKeyCacheHasExpired() throws Exception { + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + UserToken token = tokenFuture.get().v1(); + assertThat(tokenService.getUserTokenString(token), notNullValue()); + + tokenService.clearActiveKeyCache(); + assertThat(tokenService.getUserTokenString(token), notNullValue()); + } + + public void testInvalidatedToken() throws Exception { + when(lifecycleService.isSecurityIndexExisting()).thenReturn(true); + TokenService tokenService = + new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + final UserToken token = tokenFuture.get().v1(); + assertNotNull(token); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + MultiGetResponse response = mock(MultiGetResponse.class); + MultiGetItemResponse[] responses = new MultiGetItemResponse[2]; + when(response.getResponses()).thenReturn(responses); + + final boolean newExpired = randomBoolean(); + GetResponse oldGetResponse = mock(GetResponse.class); + when(oldGetResponse.isExists()).thenReturn(newExpired == false); + responses[0] = new MultiGetItemResponse(oldGetResponse, null); + + GetResponse getResponse = mock(GetResponse.class); + responses[1] = new MultiGetItemResponse(getResponse, null); + when(getResponse.isExists()).thenReturn(newExpired); + if (newExpired) { + Map source = MapBuilder.newMapBuilder() + .put("access_token", Collections.singletonMap("invalidated", true)) + .immutableMap(); + when(getResponse.getSource()).thenReturn(source); + } + listener.onResponse(response); + return Void.TYPE; + }).when(client).multiGet(any(MultiGetRequest.class), any(ActionListener.class)); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + mockGetTokenFromId(token); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, future::actionGet); + final String headerValue = e.getHeader("WWW-Authenticate").get(0); + assertThat(headerValue, containsString("Bearer realm=")); + assertThat(headerValue, containsString("expired")); + } + } + + public void testComputeSecretKeyIsConsistent() throws Exception { + byte[] saltArr = new byte[32]; + random().nextBytes(saltArr); + SecretKey key = TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr); + SecretKey key2 = TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr); + assertArrayEquals(key.getEncoded(), key2.getEncoded()); + } + + public void testTokenExpiry() throws Exception { + ClockMock clock = ClockMock.frozen(); + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, clock, client, lifecycleService, clusterService); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + final UserToken token = tokenFuture.get().v1(); + mockGetTokenFromId(token); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + // the clock is still frozen, so the cookie should be valid + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + assertEquals(authentication, future.get().getAuthentication()); + } + + final TimeValue defaultExpiration = TokenService.TOKEN_EXPIRATION.get(Settings.EMPTY); + final int fastForwardAmount = randomIntBetween(1, Math.toIntExact(defaultExpiration.getSeconds())); + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + // move the clock forward but don't go to expiry + clock.fastForwardSeconds(fastForwardAmount); + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + assertEquals(authentication, future.get().getAuthentication()); + } + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + // move to expiry + clock.fastForwardSeconds(Math.toIntExact(defaultExpiration.getSeconds()) - fastForwardAmount); + clock.rewind(TimeValue.timeValueNanos(clock.instant().getNano())); // trim off nanoseconds since don't store them in the index + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + assertEquals(authentication, future.get().getAuthentication()); + } + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + // move one second past expiry + clock.fastForwardSeconds(1); + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, future::actionGet); + final String headerValue = e.getHeader("WWW-Authenticate").get(0); + assertThat(headerValue, containsString("Bearer realm=")); + assertThat(headerValue, containsString("expired")); + } + } + + public void testTokenServiceDisabled() throws Exception { + TokenService tokenService = new TokenService(Settings.builder() + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), false) + .build(), + Clock.systemUTC(), client, lifecycleService, clusterService); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> tokenService.createUserToken(null, null, null, null)); + assertEquals("tokens are not enabled", e.getMessage()); + + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(null, future); + assertNull(future.get()); + + e = expectThrows(IllegalStateException.class, () -> { + PlainActionFuture invalidateFuture = new PlainActionFuture<>(); + tokenService.invalidateAccessToken((String) null, invalidateFuture); + invalidateFuture.actionGet(); + }); + assertEquals("tokens are not enabled", e.getMessage()); + } + + public void testBytesKeyEqualsHashCode() { + final int dataLength = randomIntBetween(2, 32); + final byte[] data = randomBytes(dataLength); + BytesKey bytesKey = new BytesKey(data); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(bytesKey, (b) -> new BytesKey(b.bytes.clone()), (b) -> { + final byte[] copy = b.bytes.clone(); + final int randomlyChangedValue = randomIntBetween(0, copy.length - 1); + final byte original = copy[randomlyChangedValue]; + boolean loop; + do { + byte value = randomByte(); + if (value == original) { + loop = true; + } else { + loop = false; + copy[randomlyChangedValue] = value; + } + } while (loop); + return new BytesKey(copy); + }); + } + + public void testMalformedToken() throws Exception { + final int numBytes = randomIntBetween(1, TokenService.MINIMUM_BYTES + 32); + final byte[] randomBytes = new byte[numBytes]; + random().nextBytes(randomBytes); + TokenService tokenService = new TokenService(Settings.EMPTY, systemUTC(), client, lifecycleService, clusterService); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", "Bearer " + Base64.getEncoder().encodeToString(randomBytes)); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + assertNull(future.get()); + } + } + + public void testIndexNotAvailable() throws Exception { + TokenService tokenService = + new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); + final UserToken token = tokenFuture.get().v1(); + assertNotNull(token); + mockGetTokenFromId(token); + + ThreadContext requestContext = new ThreadContext(Settings.EMPTY); + requestContext.putHeader("Authorization", "Bearer " + tokenService.getUserTokenString(token)); + + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onFailure(new NoShardAvailableActionException(new ShardId(new Index("foo", "uuid"), 0), "shard oh shard")); + return Void.TYPE; + }).when(client).multiGet(any(MultiGetRequest.class), any(ActionListener.class)); + + try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { + PlainActionFuture future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + UserToken serialized = future.get(); + assertEquals(authentication, serialized.getAuthentication()); + + when(lifecycleService.isSecurityIndexAvailable()).thenReturn(false); + when(lifecycleService.isSecurityIndexExisting()).thenReturn(true); + future = new PlainActionFuture<>(); + tokenService.getAndValidateToken(requestContext, future); + assertNull(future.get()); + } + } + + public void testGetAuthenticationWorksWithExpiredToken() throws Exception { + TokenService tokenService = + new TokenService(tokenServiceEnabledSettings, Clock.systemUTC(), client, lifecycleService, clusterService); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + UserToken expired = new UserToken(authentication, Instant.now().minus(3L, ChronoUnit.DAYS)); + mockGetTokenFromId(expired); + String userTokenString = tokenService.getUserTokenString(expired); + PlainActionFuture>> authFuture = new PlainActionFuture<>(); + tokenService.getAuthenticationAndMetaData(userTokenString, authFuture); + Authentication retrievedAuth = authFuture.actionGet().v1(); + assertEquals(authentication, retrievedAuth); + } + + private void mockGetTokenFromId(UserToken userToken) { + mockGetTokenFromId(userToken, client); + } + + public static void mockGetTokenFromId(UserToken userToken, Client client) { + doAnswer(invocationOnMock -> { + GetRequest getRequest = (GetRequest) invocationOnMock.getArguments()[0]; + ActionListener getResponseListener = (ActionListener) invocationOnMock.getArguments()[1]; + GetResponse getResponse = mock(GetResponse.class); + if (userToken.getId().equals(getRequest.id().replace("token_", ""))) { + when(getResponse.isExists()).thenReturn(true); + Map sourceMap = new HashMap<>(); + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + userToken.toXContent(builder, ToXContent.EMPTY_PARAMS); + sourceMap.put("access_token", + Collections.singletonMap("user_token", + XContentHelper.convertToMap(XContentType.JSON.xContent(), Strings.toString(builder), false))); + } + when(getResponse.getSource()).thenReturn(sourceMap); + } + getResponseListener.onResponse(getResponse); + return Void.TYPE; + }).when(client).get(any(GetRequest.class), any(ActionListener.class)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/UserTokenTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/UserTokenTests.java new file mode 100644 index 0000000000000..1a8f8dc3b5d2d --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/UserTokenTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; +import java.time.Clock; +import java.time.Instant; + +public class UserTokenTests extends ESTestCase { + + public void testSerialization() throws IOException { + final Authentication authentication = new Authentication(new User("joe", "a role"), new RealmRef("realm", "native", "node1"), null); + final int seconds = randomIntBetween(0, Math.toIntExact(TimeValue.timeValueMinutes(30L).getSeconds())); + final Instant expirationTime = Clock.systemUTC().instant().plusSeconds(seconds); + final UserToken userToken = new UserToken(authentication, expirationTime); + + BytesStreamOutput output = new BytesStreamOutput(); + userToken.writeTo(output); + + final UserToken serialized = new UserToken(output.bytes().streamInput()); + assertEquals(authentication, serialized.getAuthentication()); + assertEquals(expirationTime, serialized.getExpirationTime()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java new file mode 100644 index 0000000000000..ebe6b6abf1860 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.NativeRealmIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.junit.BeforeClass; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.HashSet; +import java.util.Set; + +import static org.hamcrest.Matchers.is; + +/** + * Integration tests for the {@code ESNativeMigrateTool} + */ +public class ESNativeMigrateToolTests extends NativeRealmIntegTestCase { + + // Randomly use SSL (or not) + private static boolean useSSL; + + @BeforeClass + public static void setSSL() { + useSSL = randomBoolean(); + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + logger.info("--> use SSL? {}", useSSL); + Settings s = Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .put("xpack.security.http.ssl.enabled", useSSL) + .build(); + return s; + } + + @Override + protected boolean transportSSLEnabled() { + return useSSL; + } + + @Override + protected boolean shouldSetReservedUserPasswords() { + return false; + } + + private Environment nodeEnvironment() throws Exception { + return internalCluster().getInstances(Environment.class).iterator().next(); + } + + public void testRetrieveUsers() throws Exception { + final Environment nodeEnvironment = nodeEnvironment(); + String home = Environment.PATH_HOME_SETTING.get(nodeEnvironment.settings()); + Path conf = nodeEnvironment.configFile(); + SecurityClient c = new SecurityClient(client()); + logger.error("--> creating users"); + int numToAdd = randomIntBetween(1,10); + Set addedUsers = new HashSet(numToAdd); + for (int i = 0; i < numToAdd; i++) { + String uname = randomAlphaOfLength(5); + c.preparePutUser(uname, "s3kirt".toCharArray(), "role1", "user").get(); + addedUsers.add(uname); + } + logger.error("--> waiting for .security index"); + ensureGreen(SecurityLifecycleService.SECURITY_INDEX_NAME); + + MockTerminal t = new MockTerminal(); + String username = nodeClientUsername(); + String password = new String(CharArrays.toUtf8Bytes(nodeClientPassword().getChars()), StandardCharsets.UTF_8); + String url = getHttpURL(); + ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); + + Settings.Builder builder = Settings.builder() + .put("path.home", home) + .put("path.conf", conf.toString()); + SecuritySettingsSource.addSSLSettingsForStore(builder, + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks", "testnode"); + Settings settings = builder.build(); + logger.error("--> retrieving users using URL: {}, home: {}", url, home); + + OptionParser parser = muor.getParser(); + OptionSet options = parser.parse("-u", username, "-p", password, "-U", url); + logger.info("--> options: {}", options.asMap()); + Set users = muor.getUsersThatExist(t, settings, new Environment(settings, conf), options); + logger.info("--> output: \n{}", t.getOutput()); + for (String u : addedUsers) { + assertThat("expected list to contain: " + u + ", real list: " + users, users.contains(u), is(true)); + } + } + + public void testRetrieveRoles() throws Exception { + final Environment nodeEnvironment = nodeEnvironment(); + String home = Environment.PATH_HOME_SETTING.get(nodeEnvironment.settings()); + Path conf = nodeEnvironment.configFile(); + SecurityClient c = new SecurityClient(client()); + logger.error("--> creating roles"); + int numToAdd = randomIntBetween(1,10); + Set addedRoles = new HashSet<>(numToAdd); + for (int i = 0; i < numToAdd; i++) { + String rname = randomAlphaOfLength(5); + c.preparePutRole(rname) + .cluster("all", "none") + .runAs("root", "nobody") + .addIndices(new String[]{"index"}, new String[]{"read"}, + new String[]{"body", "title"}, null, new BytesArray("{\"query\": {\"match_all\": {}}}")) + .get(); + addedRoles.add(rname); + } + logger.error("--> waiting for .security index"); + ensureGreen(SecurityLifecycleService.SECURITY_INDEX_NAME); + + MockTerminal t = new MockTerminal(); + String username = nodeClientUsername(); + String password = new String(CharArrays.toUtf8Bytes(nodeClientPassword().getChars()), StandardCharsets.UTF_8); + String url = getHttpURL(); + ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); + Settings.Builder builder = Settings.builder().put("path.home", home); + SecuritySettingsSource.addSSLSettingsForStore(builder, + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks", "testclient"); + Settings settings = builder.build(); + logger.error("--> retrieving roles using URL: {}, home: {}", url, home); + + OptionParser parser = muor.getParser(); + OptionSet options = parser.parse("-u", username, "-p", password, "-U", url); + Set roles = muor.getRolesThatExist(t, settings, new Environment(settings, conf), options); + logger.info("--> output: \n{}", t.getOutput());; + for (String r : addedRoles) { + assertThat("expected list to contain: " + r, roles.contains(r), is(true)); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java new file mode 100644 index 0000000000000..c42353ee75232 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateToolTests.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative; + +import joptsimple.OptionSet; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.CommandTestCase; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.Terminal.Verbosity; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.FileNotFoundException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.isEmptyString; + +/** + * Unit tests for the {@code ESNativeRealmMigrateTool} + */ +public class ESNativeRealmMigrateToolTests extends CommandTestCase { + + @Override + protected Command newCommand() { + return new ESNativeRealmMigrateTool() { + @Override + protected MigrateUserOrRoles newMigrateUserOrRoles() { + return new MigrateUserOrRoles() { + + @Override + protected Environment createEnv(Map settings) throws UserException { + Settings.Builder builder = Settings.builder(); + settings.forEach((k,v) -> builder.put(k, v)); + return TestEnvironment.newEnvironment(builder.build()); + } + + }; + } + }; + } + + public void testUserJson() throws Exception { + assertThat(ESNativeRealmMigrateTool.MigrateUserOrRoles.createUserJson(Strings.EMPTY_ARRAY, "hash".toCharArray()), + equalTo("{\"password_hash\":\"hash\",\"roles\":[]}")); + assertThat(ESNativeRealmMigrateTool.MigrateUserOrRoles.createUserJson(new String[]{"role1", "role2"}, "hash".toCharArray()), + equalTo("{\"password_hash\":\"hash\",\"roles\":[\"role1\",\"role2\"]}")); + } + + public void testRoleJson() throws Exception { + RoleDescriptor.IndicesPrivileges ip = RoleDescriptor.IndicesPrivileges.builder() + .indices(new String[]{"i1", "i2", "i3"}) + .privileges(new String[]{"all"}) + .grantedFields("body") + .build(); + RoleDescriptor.IndicesPrivileges[] ips = new RoleDescriptor.IndicesPrivileges[1]; + ips[0] = ip; + String[] cluster = Strings.EMPTY_ARRAY; + String[] runAs = Strings.EMPTY_ARRAY; + RoleDescriptor rd = new RoleDescriptor("rolename", cluster, ips, runAs); + assertThat(ESNativeRealmMigrateTool.MigrateUserOrRoles.createRoleJson(rd), + equalTo("{\"cluster\":[],\"indices\":[{\"names\":[\"i1\",\"i2\",\"i3\"]," + + "\"privileges\":[\"all\"],\"field_security\":{\"grant\":[\"body\"]}}]," + + "\"run_as\":[],\"metadata\":{},\"type\":\"role\"}")); + } + + public void testTerminalLogger() throws Exception { + Logger terminalLogger = ESNativeRealmMigrateTool.getTerminalLogger(terminal); + assertThat(terminal.getOutput(), isEmptyString()); + + // only error and fatal gets logged at normal verbosity + terminal.setVerbosity(Verbosity.NORMAL); + List nonLoggingLevels = new ArrayList<>(Arrays.asList(Level.values())); + nonLoggingLevels.removeAll(Arrays.asList(Level.ERROR, Level.FATAL)); + for (Level level : nonLoggingLevels) { + terminalLogger.log(level, "this level should not log " + level.name()); + assertThat(terminal.getOutput(), isEmptyString()); + } + + terminalLogger.log(Level.ERROR, "logging an error"); + assertEquals("logging an error\n", terminal.getOutput()); + terminal.reset(); + assertThat(terminal.getOutput(), isEmptyString()); + + terminalLogger.log(Level.FATAL, "logging a fatal message"); + assertEquals("logging a fatal message\n", terminal.getOutput()); + terminal.reset(); + assertThat(terminal.getOutput(), isEmptyString()); + + // everything will get logged at verbose! + terminal.setVerbosity(Verbosity.VERBOSE); + List loggingLevels = new ArrayList<>(Arrays.asList(Level.values())); + loggingLevels.remove(Level.OFF); + for (Level level : loggingLevels) { + terminalLogger.log(level, "this level should log " + level.name()); + assertEquals("this level should log " + level.name() + "\n", terminal.getOutput()); + terminal.reset(); + assertThat(terminal.getOutput(), isEmptyString()); + } + } + + public void testMissingFiles() throws Exception { + Path homeDir = createTempDir(); + Path confDir = homeDir.resolve("config"); + Path xpackConfDir = confDir; + Files.createDirectories(xpackConfDir); + + ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); + + OptionSet options = muor.getParser().parse("-u", "elastic", "-p", SecuritySettingsSourceField.TEST_PASSWORD, + "-U", "http://localhost:9200"); + Settings settings = Settings.builder().put("path.home", homeDir).build(); + Environment environment = new Environment(settings, confDir); + + MockTerminal mockTerminal = new MockTerminal(); + + FileNotFoundException fnfe = expectThrows(FileNotFoundException.class, + () -> muor.importUsers(mockTerminal, environment, options)); + assertThat(fnfe.getMessage(), containsString("users file")); + + Files.createFile(xpackConfDir.resolve("users")); + fnfe = expectThrows(FileNotFoundException.class, + () -> muor.importUsers(mockTerminal, environment, options)); + assertThat(fnfe.getMessage(), containsString("users_roles file")); + + fnfe = expectThrows(FileNotFoundException.class, + () -> muor.importRoles(mockTerminal, environment, options)); + assertThat(fnfe.getMessage(), containsString("roles.yml file")); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java new file mode 100644 index 0000000000000..36a49653645e8 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -0,0 +1,759 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative; + +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.NativeRealmIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; +import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; +import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserResponse; +import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.IndexLifecycleManager.INTERNAL_SECURITY_INDEX; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +/** + * Tests for the NativeUsersStore and NativeRolesStore + */ +public class NativeRealmIntegTests extends NativeRealmIntegTestCase { + + private static boolean anonymousEnabled; + + private boolean roleExists; + + @BeforeClass + public static void init() { + anonymousEnabled = randomBoolean(); + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + if (anonymousEnabled) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(AnonymousUser.ROLES_SETTING.getKey(), "native_anonymous") + .build(); + } + return super.nodeSettings(nodeOrdinal); + } + + @Before + public void setupAnonymousRoleIfNecessary() throws Exception { + roleExists = anonymousEnabled && randomBoolean(); + if (anonymousEnabled) { + if (roleExists) { + logger.info("anonymous is enabled. creating [native_anonymous] role"); + PutRoleResponse response = securityClient() + .preparePutRole("native_anonymous") + .cluster("ALL") + .addIndices(new String[]{"*"}, new String[]{"ALL"}, null, null, null) + .get(); + assertTrue(response.isCreated()); + } else { + logger.info("anonymous is enabled, but configured with a missing role"); + } + } + } + + public void testDeletingNonexistingUserAndRole() throws Exception { + SecurityClient c = securityClient(); + // first create the index so it exists + c.preparePutUser("joe", "s3kirt".toCharArray(), "role1", "user").get(); + DeleteUserResponse resp = c.prepareDeleteUser("missing").get(); + assertFalse("user shouldn't be found", resp.found()); + DeleteRoleResponse resp2 = c.prepareDeleteRole("role").get(); + assertFalse("role shouldn't be found", resp2.found()); + } + + public void testGettingUserThatDoesntExist() throws Exception { + SecurityClient c = securityClient(); + GetUsersResponse resp = c.prepareGetUsers("joe").get(); + assertFalse("user should not exist", resp.hasUsers()); + GetRolesResponse resp2 = c.prepareGetRoles().names("role").get(); + assertFalse("role should not exist", resp2.hasRoles()); + } + + public void testAddAndGetUser() throws Exception { + SecurityClient c = securityClient(); + final List existingUsers = Arrays.asList(c.prepareGetUsers().get().users()); + final int existing = existingUsers.size(); + logger.error("--> creating user"); + c.preparePutUser("joe", "s3kirt".toCharArray(), "role1", "user").get(); + logger.error("--> waiting for .security index"); + ensureGreen(SECURITY_INDEX_NAME); + logger.info("--> retrieving user"); + GetUsersResponse resp = c.prepareGetUsers("joe").get(); + assertTrue("user should exist", resp.hasUsers()); + User joe = resp.users()[0]; + assertEquals("joe", joe.principal()); + assertArrayEquals(joe.roles(), new String[]{"role1", "user"}); + + logger.info("--> adding two more users"); + c.preparePutUser("joe2", "s3kirt2".toCharArray(), "role2", "user").get(); + c.preparePutUser("joe3", "s3kirt3".toCharArray(), "role3", "user").get(); + GetUsersResponse allUsersResp = c.prepareGetUsers().get(); + assertTrue("users should exist", allUsersResp.hasUsers()); + assertEquals("should be " + (3 + existing) + " users total", 3 + existing, allUsersResp.users().length); + List names = new ArrayList<>(3); + for (User u : allUsersResp.users()) { + if (existingUsers.contains(u) == false) { + names.add(u.principal()); + } + } + CollectionUtil.timSort(names); + assertArrayEquals(new String[] { "joe", "joe2", "joe3" }, names.toArray(Strings.EMPTY_ARRAY)); + + GetUsersResponse someUsersResp = c.prepareGetUsers("joe", "joe3").get(); + assertTrue("users should exist", someUsersResp.hasUsers()); + assertEquals("should be 2 users returned", 2, someUsersResp.users().length); + names = new ArrayList<>(2); + for (User u : someUsersResp.users()) { + names.add(u.principal()); + } + CollectionUtil.timSort(names); + assertArrayEquals(new String[]{"joe", "joe3"}, names.toArray(Strings.EMPTY_ARRAY)); + + logger.info("--> deleting user"); + DeleteUserResponse delResp = c.prepareDeleteUser("joe").get(); + assertTrue(delResp.found()); + logger.info("--> retrieving user"); + resp = c.prepareGetUsers("joe").get(); + assertFalse("user should not exist after being deleted", resp.hasUsers()); + } + + public void testAddAndGetRole() throws Exception { + SecurityClient c = securityClient(); + final List existingRoles = Arrays.asList(c.prepareGetRoles().get().roles()); + final int existing = existingRoles.size(); + final Map metadata = Collections.singletonMap("key", randomAlphaOfLengthBetween(1, 10)); + logger.error("--> creating role"); + c.preparePutRole("test_role") + .cluster("all", "none") + .runAs("root", "nobody") + .addIndices(new String[]{"index"}, new String[]{"read"}, new String[]{"body", "title"}, null, + new BytesArray("{\"query\": {\"match_all\": {}}}")) + .metadata(metadata) + .get(); + logger.error("--> waiting for .security index"); + ensureGreen(SECURITY_INDEX_NAME); + logger.info("--> retrieving role"); + GetRolesResponse resp = c.prepareGetRoles().names("test_role").get(); + assertTrue("role should exist", resp.hasRoles()); + RoleDescriptor testRole = resp.roles()[0]; + assertNotNull(testRole); + assertThat(testRole.getMetadata().size(), is(1)); + assertThat(testRole.getMetadata().get("key"), is(metadata.get("key"))); + + c.preparePutRole("test_role2") + .cluster("all", "none") + .runAs("root", "nobody") + .addIndices(new String[]{"index"}, new String[]{"read"}, new String[]{"body", "title"}, null, + new BytesArray("{\"query\": {\"match_all\": {}}}")) + .get(); + c.preparePutRole("test_role3") + .cluster("all", "none") + .runAs("root", "nobody") + .addIndices(new String[]{"index"}, new String[]{"read"}, new String[]{"body", "title"}, null, + new BytesArray("{\"query\": {\"match_all\": {}}}")) + .get(); + + logger.info("--> retrieving all roles"); + GetRolesResponse allRolesResp = c.prepareGetRoles().get(); + assertTrue("roles should exist", allRolesResp.hasRoles()); + assertEquals("should be " + (3 + existing) + " roles total", 3 + existing, allRolesResp.roles().length); + + logger.info("--> retrieving test_role and test_role3"); + GetRolesResponse someRolesResp = c.prepareGetRoles().names("test_role", "test_role3").get(); + assertTrue("roles should exist", someRolesResp.hasRoles()); + assertEquals("should be 2 roles total", 2, someRolesResp.roles().length); + + logger.info("--> deleting role"); + DeleteRoleResponse delResp = c.prepareDeleteRole("test_role").get(); + assertTrue(delResp.found()); + logger.info("--> retrieving role"); + GetRolesResponse resp2 = c.prepareGetRoles().names("test_role").get(); + assertFalse("role should not exist after being deleted", resp2.hasRoles()); + } + + public void testAddUserAndRoleThenAuth() throws Exception { + SecurityClient c = securityClient(); + logger.error("--> creating role"); + c.preparePutRole("test_role") + .cluster("all") + .addIndices(new String[] { "*" }, new String[] { "read" }, new String[]{"body", "title"}, null, + new BytesArray("{\"match_all\": {}}")) + .get(); + logger.error("--> creating user"); + c.preparePutUser("joe", "s3krit".toCharArray(), "test_role").get(); + logger.error("--> waiting for .security index"); + ensureGreen(SECURITY_INDEX_NAME); + logger.info("--> retrieving user"); + GetUsersResponse resp = c.prepareGetUsers("joe").get(); + assertTrue("user should exist", resp.hasUsers()); + + createIndex("idx"); + ensureGreen("idx"); + // Index a document with the default test user + client().prepareIndex("idx", "doc", "1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); + + String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + SearchResponse searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); + + assertEquals(searchResp.getHits().getTotalHits(), 1L); + } + + public void testUpdatingUserAndAuthentication() throws Exception { + SecurityClient c = securityClient(); + logger.error("--> creating user"); + c.preparePutUser("joe", "s3krit".toCharArray(), SecuritySettingsSource.TEST_ROLE).get(); + logger.error("--> waiting for .security index"); + ensureGreen(SECURITY_INDEX_NAME); + logger.info("--> retrieving user"); + GetUsersResponse resp = c.prepareGetUsers("joe").get(); + assertTrue("user should exist", resp.hasUsers()); + assertThat(resp.users()[0].roles(), arrayContaining(SecuritySettingsSource.TEST_ROLE)); + + createIndex("idx"); + ensureGreen("idx"); + // Index a document with the default test user + client().prepareIndex("idx", "doc", "1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); + String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + SearchResponse searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); + + assertEquals(searchResp.getHits().getTotalHits(), 1L); + + c.preparePutUser("joe", "s3krit2".toCharArray(), SecuritySettingsSource.TEST_ROLE).get(); + + try { + client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); + fail("authentication with old credentials after an update to the user should fail!"); + } catch (ElasticsearchSecurityException e) { + // expected + assertThat(e.status(), is(RestStatus.UNAUTHORIZED)); + } + + token = basicAuthHeaderValue("joe", new SecureString("s3krit2".toCharArray())); + searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); + assertEquals(searchResp.getHits().getTotalHits(), 1L); + } + + public void testCreateDeleteAuthenticate() { + SecurityClient c = securityClient(); + logger.error("--> creating user"); + c.preparePutUser("joe", "s3krit".toCharArray(), SecuritySettingsSource.TEST_ROLE).get(); + logger.error("--> waiting for .security index"); + ensureGreen(SECURITY_INDEX_NAME); + logger.info("--> retrieving user"); + GetUsersResponse resp = c.prepareGetUsers("joe").get(); + assertTrue("user should exist", resp.hasUsers()); + assertThat(resp.users()[0].roles(), arrayContaining(SecuritySettingsSource.TEST_ROLE)); + + createIndex("idx"); + ensureGreen("idx"); + // Index a document with the default test user + client().prepareIndex("idx", "doc", "1").setSource("body", "foo").setRefreshPolicy(IMMEDIATE).get(); + String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + SearchResponse searchResp = client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); + + assertEquals(searchResp.getHits().getTotalHits(), 1L); + + DeleteUserResponse response = c.prepareDeleteUser("joe").get(); + assertThat(response.found(), is(true)); + try { + client().filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("idx").get(); + fail("authentication with a deleted user should fail!"); + } catch (ElasticsearchSecurityException e) { + // expected + assertThat(e.status(), is(RestStatus.UNAUTHORIZED)); + } + } + + public void testCreateAndUpdateRole() { + final boolean authenticate = randomBoolean(); + SecurityClient c = securityClient(); + logger.error("--> creating role"); + c.preparePutRole("test_role") + .cluster("all") + .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, + new BytesArray("{\"match_all\": {}}")) + .get(); + logger.error("--> creating user"); + c.preparePutUser("joe", "s3krit".toCharArray(), "test_role").get(); + logger.error("--> waiting for .security index"); + ensureGreen(SECURITY_INDEX_NAME); + + if (authenticate) { + final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster() + .prepareHealth().get(); + assertFalse(response.isTimedOut()); + c.preparePutRole("test_role") + .cluster("none") + .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, + new BytesArray("{\"match_all\": {}}")) + .get(); + if (anonymousEnabled && roleExists) { + assertNoTimeout(client() + .filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get()); + } else { + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> client() + .filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get()); + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + } + } else { + GetRolesResponse getRolesResponse = c.prepareGetRoles().names("test_role").get(); + assertTrue("test_role does not exist!", getRolesResponse.hasRoles()); + assertTrue("any cluster permission should be authorized", + Role.builder(getRolesResponse.roles()[0], null).build().cluster().check("cluster:admin/foo")); + + c.preparePutRole("test_role") + .cluster("none") + .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, + new BytesArray("{\"match_all\": {}}")) + .get(); + getRolesResponse = c.prepareGetRoles().names("test_role").get(); + assertTrue("test_role does not exist!", getRolesResponse.hasRoles()); + + assertFalse("no cluster permission should be authorized", + Role.builder(getRolesResponse.roles()[0], null).build().cluster().check("cluster:admin/bar")); + } + } + + public void testAuthenticateWithDeletedRole() { + SecurityClient c = securityClient(); + logger.error("--> creating role"); + c.preparePutRole("test_role") + .cluster("all") + .addIndices(new String[]{"*"}, new String[]{"read"}, new String[]{"body", "title"}, null, + new BytesArray("{\"match_all\": {}}")) + .get(); + c.preparePutUser("joe", "s3krit".toCharArray(), "test_role").get(); + logger.error("--> waiting for .security index"); + ensureGreen(SECURITY_INDEX_NAME); + + final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster() + .prepareHealth().get(); + assertFalse(response.isTimedOut()); + c.prepareDeleteRole("test_role").get(); + if (anonymousEnabled && roleExists) { + assertNoTimeout( + client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get()); + } else { + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> + client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get()); + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + } + } + + public void testPutUserWithoutPassword() { + SecurityClient client = securityClient(); + // create some roles + client.preparePutRole("admin_role") + .cluster("all") + .addIndices(new String[]{"*"}, new String[]{"all"}, null, null, null) + .get(); + client.preparePutRole("read_role") + .cluster("none") + .addIndices(new String[]{"*"}, new String[]{"read"}, null, null, null) + .get(); + + assertThat(client.prepareGetUsers("joes").get().hasUsers(), is(false)); + // check that putting a user without a password fails if the user doesn't exist + try { + client.preparePutUser("joe", null, "admin_role").get(); + fail("cannot create a user without a password"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("password must be specified")); + } + + assertThat(client.prepareGetUsers("joes").get().hasUsers(), is(false)); + + // create joe with a password and verify the user works + client.preparePutUser("joe", SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), "admin_role").get(); + assertThat(client.prepareGetUsers("joe").get().hasUsers(), is(true)); + final String token = basicAuthHeaderValue("joe", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster() + .prepareHealth().get(); + assertFalse(response.isTimedOut()); + + // modify joe without sending the password + client.preparePutUser("joe", null, "read_role").fullName("Joe Smith").get(); + GetUsersResponse getUsersResponse = client.prepareGetUsers("joe").get(); + assertThat(getUsersResponse.hasUsers(), is(true)); + assertThat(getUsersResponse.users().length, is(1)); + User joe = getUsersResponse.users()[0]; + assertThat(joe.roles(), arrayContaining("read_role")); + assertThat(joe.fullName(), is("Joe Smith")); + + // test that role change took effect if anonymous is disabled as anonymous grants monitoring permissions... + if (anonymousEnabled && roleExists) { + assertNoTimeout( + client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get()); + } else { + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> + client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get()); + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + assertThat(e.getMessage(), containsString("authorized")); + } + + // update the user with password and admin role again + String secondPassword = SecuritySettingsSourceField.TEST_PASSWORD + "2"; + client.preparePutUser("joe", secondPassword.toCharArray(), "admin_role").fullName("Joe Smith").get(); + getUsersResponse = client.prepareGetUsers("joe").get(); + assertThat(getUsersResponse.hasUsers(), is(true)); + assertThat(getUsersResponse.users().length, is(1)); + joe = getUsersResponse.users()[0]; + assertThat(joe.roles(), arrayContaining("admin_role")); + assertThat(joe.fullName(), is("Joe Smith")); + + // validate that joe cannot auth with the old token + try { + client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get(); + fail("should not authenticate with old password"); + } catch (ElasticsearchSecurityException e) { + assertThat(e.getMessage(), containsString("authenticate")); + } + + // test with new password and role + response = client() + .filterWithHeader( + Collections.singletonMap("Authorization", + basicAuthHeaderValue("joe", new SecureString(secondPassword.toCharArray())))) + .admin().cluster().prepareHealth().get(); + assertFalse(response.isTimedOut()); + } + + public void testCannotCreateUserWithShortPassword() throws Exception { + SecurityClient client = securityClient(); + try { + client.preparePutUser("joe", randomAlphaOfLengthBetween(0, 5).toCharArray(), "admin_role").get(); + fail("cannot create a user without a password < 6 characters"); + } catch (ValidationException v) { + assertThat(v.getMessage().contains("password"), is(true)); + } + } + + public void testCannotCreateUserWithInvalidCharactersInName() throws Exception { + SecurityClient client = securityClient(); + ValidationException v = expectThrows(ValidationException.class, + () -> client.preparePutUser("fóóbár", "my-am@zing-password".toCharArray(), "admin_role").get() + ); + assertThat(v.getMessage(), containsString("names must be")); + } + + public void testUsersAndRolesDoNotInterfereWithIndicesStats() throws Exception { + client().prepareIndex("foo", "bar").setSource("ignore", "me").get(); + + SecurityClient client = securityClient(); + if (randomBoolean()) { + client.preparePutUser("joe", "s3krit".toCharArray(), SecuritySettingsSource.TEST_ROLE).get(); + } else { + client.preparePutRole("read_role") + .cluster("none") + .addIndices(new String[]{"*"}, new String[]{"read"}, null, null, null) + .get(); + } + + IndicesStatsResponse response = client().admin().indices().prepareStats("foo", SECURITY_INDEX_NAME).get(); + assertThat(response.getFailedShards(), is(0)); + assertThat(response.getIndices().size(), is(2)); + assertThat(response.getIndices().get(INTERNAL_SECURITY_INDEX), notNullValue()); + assertThat(response.getIndices().get(INTERNAL_SECURITY_INDEX).getIndex(), + is(INTERNAL_SECURITY_INDEX)); + } + + public void testOperationsOnReservedUsers() throws Exception { + final String username = randomFrom(ElasticUser.NAME, KibanaUser.NAME); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> securityClient().preparePutUser(username, randomBoolean() ? SecuritySettingsSourceField.TEST_PASSWORD.toCharArray() + : null, "admin").get()); + assertThat(exception.getMessage(), containsString("Username [" + username + "] is reserved")); + + exception = expectThrows(IllegalArgumentException.class, + () -> securityClient().prepareDeleteUser(username).get()); + assertThat(exception.getMessage(), containsString("user [" + username + "] is reserved")); + + exception = expectThrows(IllegalArgumentException.class, + () -> securityClient().prepareDeleteUser(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME).get()); + assertThat(exception.getMessage(), containsString("user [" + AnonymousUser.DEFAULT_ANONYMOUS_USERNAME + "] is anonymous")); + + exception = expectThrows(IllegalArgumentException.class, + () -> securityClient().prepareChangePassword(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME, "foobar".toCharArray()).get()); + assertThat(exception.getMessage(), containsString("user [" + AnonymousUser.DEFAULT_ANONYMOUS_USERNAME + "] is anonymous")); + + exception = expectThrows(IllegalArgumentException.class, + () -> securityClient().preparePutUser(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME, "foobar".toCharArray()).get()); + assertThat(exception.getMessage(), containsString("Username [" + AnonymousUser.DEFAULT_ANONYMOUS_USERNAME + "] is reserved")); + + exception = expectThrows(IllegalArgumentException.class, + () -> securityClient().preparePutUser(SystemUser.NAME, "foobar".toCharArray()).get()); + assertThat(exception.getMessage(), containsString("user [" + SystemUser.NAME + "] is internal")); + + exception = expectThrows(IllegalArgumentException.class, + () -> securityClient().prepareChangePassword(SystemUser.NAME, "foobar".toCharArray()).get()); + assertThat(exception.getMessage(), containsString("user [" + SystemUser.NAME + "] is internal")); + + exception = expectThrows(IllegalArgumentException.class, + () -> securityClient().prepareDeleteUser(SystemUser.NAME).get()); + assertThat(exception.getMessage(), containsString("user [" + SystemUser.NAME + "] is internal")); + + // get should work + GetUsersResponse response = securityClient().prepareGetUsers(username).get(); + assertThat(response.hasUsers(), is(true)); + assertThat(response.users()[0].principal(), is(username)); + + // authenticate should work + AuthenticateResponse authenticateResponse = client() + .filterWithHeader(Collections.singletonMap("Authorization", + basicAuthHeaderValue(username, getReservedPassword()))) + .execute(AuthenticateAction.INSTANCE, new AuthenticateRequest(username)) + .get(); + assertThat(authenticateResponse.user().principal(), is(username)); + } + + public void testOperationsOnReservedRoles() throws Exception { + final String name = randomFrom(ReservedRolesStore.names()); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> securityClient().preparePutRole(name).cluster("monitor").get()); + assertThat(exception.getMessage(), containsString("role [" + name + "] is reserved")); + + exception = expectThrows(IllegalArgumentException.class, + () -> securityClient().prepareDeleteRole(name).get()); + assertThat(exception.getMessage(), containsString("role [" + name + "] is reserved")); + + // get role is allowed + GetRolesResponse response = securityClient().prepareGetRoles(name).get(); + assertThat(response.hasRoles(), is(true)); + assertThat(response.roles()[0].getName(), is(name)); + } + + public void testCreateAndChangePassword() throws Exception { + securityClient().preparePutUser("joe", "s3krit".toCharArray(), SecuritySettingsSource.TEST_ROLE).get(); + final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)) + .admin().cluster().prepareHealth().get(); + assertThat(response.isTimedOut(), is(false)); + + ChangePasswordResponse passwordResponse = securityClient( + client().filterWithHeader(Collections.singletonMap("Authorization", token))) + .prepareChangePassword("joe", SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()) + .get(); + assertThat(passwordResponse, notNullValue()); + + + ElasticsearchSecurityException expected = expectThrows(ElasticsearchSecurityException.class, + () -> client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get()); + assertThat(expected.status(), is(RestStatus.UNAUTHORIZED)); + + response = client() + .filterWithHeader( + Collections.singletonMap("Authorization", + basicAuthHeaderValue("joe", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))) + .admin().cluster().prepareHealth().get(); + assertThat(response.isTimedOut(), is(false)); + } + + public void testRolesUsageStats() throws Exception { + NativeRolesStore rolesStore = internalCluster().getInstance(NativeRolesStore.class); + long roles = anonymousEnabled && roleExists ? 1L: 0L; + logger.info("--> running testRolesUsageStats with anonymousEnabled=[{}], roleExists=[{}]", + anonymousEnabled, roleExists); + PlainActionFuture> future = new PlainActionFuture<>(); + rolesStore.usageStats(future); + Map usage = future.get(); + assertEquals(roles, usage.get("size")); + assertThat(usage.get("fls"), is(false)); + assertThat(usage.get("dls"), is(false)); + + final boolean fls = randomBoolean(); + final boolean dls = randomBoolean(); + SecurityClient client = new SecurityClient(client()); + PutRoleResponse putRoleResponse = client.preparePutRole("admin_role") + .cluster("all") + .addIndices(new String[]{"*"}, new String[]{"all"}, null, null, null) + .get(); + assertThat(putRoleResponse.isCreated(), is(true)); + roles++; + if (fls) { + PutRoleResponse roleResponse; + String[] fields = new String[]{"foo"}; + final String[] grantedFields; + final String[] deniedFields; + if (randomBoolean()) { + grantedFields = fields; + deniedFields = null; + } else { + grantedFields = null; + deniedFields = fields; + } + roleResponse = client.preparePutRole("admin_role_fls") + .cluster("all") + .addIndices(new String[]{"*"}, new String[]{"all"}, grantedFields, deniedFields, null) + .get(); + assertThat(roleResponse.isCreated(), is(true)); + roles++; + } + + if (dls) { + PutRoleResponse roleResponse = client.preparePutRole("admin_role_dls") + .cluster("all") + .addIndices(new String[]{"*"}, new String[]{"all"}, null, null, new BytesArray("{ \"match_all\": {} }")) + .get(); + assertThat(roleResponse.isCreated(), is(true)); + roles++; + } + + client.prepareClearRolesCache().get(); + + future = new PlainActionFuture<>(); + rolesStore.usageStats(future); + usage = future.get(); + assertThat(usage.get("size"), is(roles)); + assertThat(usage.get("fls"), is(fls)); + assertThat(usage.get("dls"), is(dls)); + } + + public void testSetEnabled() throws Exception { + securityClient().preparePutUser("joe", "s3krit".toCharArray(), SecuritySettingsSource.TEST_ROLE).get(); + final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); + ClusterHealthResponse response = client().filterWithHeader(Collections.singletonMap("Authorization", token)) + .admin().cluster().prepareHealth().get(); + assertThat(response.isTimedOut(), is(false)); + + securityClient(client()).prepareSetEnabled("joe", false).get(); + + ElasticsearchSecurityException expected = expectThrows(ElasticsearchSecurityException.class, + () -> client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get()); + assertThat(expected.status(), is(RestStatus.UNAUTHORIZED)); + + securityClient(client()).prepareSetEnabled("joe", true).get(); + + response = client().filterWithHeader(Collections.singletonMap("Authorization", token)).admin().cluster().prepareHealth().get(); + assertThat(response.isTimedOut(), is(false)); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> securityClient(client()).prepareSetEnabled("not_a_real_user", false).get()); + assertThat(e.getMessage(), containsString("only existing users can be disabled")); + } + + public void testNegativeLookupsThenCreateRole() throws Exception { + SecurityClient securityClient = new SecurityClient(client()); + securityClient.preparePutUser("joe", "s3krit".toCharArray(), "unknown_role").get(); + + final int negativeLookups = scaledRandomIntBetween(1, 10); + for (int i = 0; i < negativeLookups; i++) { + if (anonymousEnabled && roleExists) { + ClusterHealthResponse response = client() + .filterWithHeader(Collections.singletonMap("Authorization", + basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())))) + .admin().cluster().prepareHealth().get(); + assertNoTimeout(response); + } else { + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> client() + .filterWithHeader(Collections.singletonMap("Authorization", + basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())))) + .admin().cluster().prepareHealth().get()); + assertThat(e.status(), is(RestStatus.FORBIDDEN)); + } + } + + securityClient.preparePutRole("unknown_role").cluster("all").get(); + ClusterHealthResponse response = client() + .filterWithHeader(Collections.singletonMap("Authorization", + basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())))) + .admin().cluster().prepareHealth().get(); + assertNoTimeout(response); + } + + /** + * Tests that multiple concurrent run as requests can be authenticated successfully. There was a bug in the Cache implementation used + * for our internal realms that caused some run as requests to fail even when the authentication was valid and the run as user existed. + * + * The issue was that when iterating the realms there would be failed lookups and under heavy concurrency, requests will wait for an + * existing load attempt in the cache. The original caller was thrown an ExecutionException with a nested NullPointerException since + * the loader returned a null value, while the other caller(s) would get a null value unexpectedly + */ + public void testConcurrentRunAs() throws Exception { + securityClient().preparePutUser("joe", "s3krit".toCharArray(), SecuritySettingsSource.TEST_ROLE).get(); + securityClient().preparePutUser("executor", "s3krit".toCharArray(), "superuser").get(); + final String token = basicAuthHeaderValue("executor", new SecureString("s3krit".toCharArray())); + final Client client = client().filterWithHeader(MapBuilder.newMapBuilder() + .put("Authorization", token) + .put("es-security-runas-user", "joe") + .immutableMap()); + final CountDownLatch latch = new CountDownLatch(1); + final int numberOfProcessors = Runtime.getRuntime().availableProcessors(); + final int numberOfThreads = scaledRandomIntBetween(numberOfProcessors, numberOfProcessors * 3); + final int numberOfIterations = scaledRandomIntBetween(20, 100); + List threads = new ArrayList<>(); + for (int i = 0; i < numberOfThreads; i++) { + threads.add(new Thread(() -> { + try { + latch.await(); + for (int j = 0; j < numberOfIterations; j++) { + ClusterHealthResponse response = client.admin().cluster().prepareHealth().get(); + assertNoTimeout(response); + } + } catch (InterruptedException e) { + } + })); + } + + for (Thread thread : threads) { + thread.start(); + } + latch.countDown(); + for (Thread thread : threads) { + thread.join(); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java new file mode 100644 index 0000000000000..8b64ad4b1ec56 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative; + +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; + +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.xpack.security.test.SecurityTestUtils.getClusterIndexHealth; +import static org.mockito.Mockito.mock; + +public class NativeRealmTests extends ESTestCase { + + public void testCacheClearOnIndexHealthChange() { + final AtomicInteger numInvalidation = new AtomicInteger(0); + int expectedInvalidation = 0; + Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + RealmConfig config = new RealmConfig("native", Settings.EMPTY, settings, TestEnvironment.newEnvironment(settings), + new ThreadContext(settings)); + final NativeRealm nativeRealm = new NativeRealm(config, mock(NativeUsersStore.class)) { + @Override + void clearCache() { + numInvalidation.incrementAndGet(); + } + }; + + // existing to no longer present + ClusterIndexHealth previousHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + ClusterIndexHealth currentHealth = null; + nativeRealm.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(++expectedInvalidation, numInvalidation.get()); + + // doesn't exist to exists + previousHealth = null; + currentHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + nativeRealm.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(++expectedInvalidation, numInvalidation.get()); + + // green or yellow to red + previousHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + currentHealth = getClusterIndexHealth(ClusterHealthStatus.RED); + nativeRealm.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(expectedInvalidation, numInvalidation.get()); + + // red to non red + previousHealth = getClusterIndexHealth(ClusterHealthStatus.RED); + currentHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + nativeRealm.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(++expectedInvalidation, numInvalidation.get()); + + // green to yellow or yellow to green + previousHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + currentHealth = getClusterIndexHealth( + previousHealth.getStatus() == ClusterHealthStatus.GREEN ? ClusterHealthStatus.YELLOW : ClusterHealthStatus.GREEN); + nativeRealm.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(expectedInvalidation, numInvalidation.get()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java new file mode 100644 index 0000000000000..51314c6437575 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java @@ -0,0 +1,252 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.FilterClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Consumer; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class NativeUsersStoreTests extends ESTestCase { + + private static final String ENABLED_FIELD = User.Fields.ENABLED.getPreferredName(); + private static final String PASSWORD_FIELD = User.Fields.PASSWORD.getPreferredName(); + private static final String BLANK_PASSWORD = ""; + + private Client client; + private final List>> requests = new CopyOnWriteArrayList<>(); + + @Before + public void setupMocks() { + Client mockClient = mock(Client.class); + when(mockClient.settings()).thenReturn(Settings.EMPTY); + ThreadPool threadPool = mock(ThreadPool.class); + when(mockClient.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + client = new FilterClient(mockClient) { + + @Override + protected < + Request extends ActionRequest, + Response extends ActionResponse, + RequestBuilder extends ActionRequestBuilder + > void doExecute( + Action action, + Request request, + ActionListener listener) { + requests.add(new Tuple<>(request, listener)); + } + }; + } + + public void testPasswordUpsertWhenSetEnabledOnReservedUser() throws Exception { + final NativeUsersStore nativeUsersStore = startNativeUsersStore(); + + final String user = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + + final PlainActionFuture future = new PlainActionFuture<>(); + nativeUsersStore.setEnabled(user, true, WriteRequest.RefreshPolicy.IMMEDIATE, future); + final UpdateRequest update = actionRespond(UpdateRequest.class, null); + + final Map docMap = update.doc().sourceAsMap(); + assertThat(docMap.get(ENABLED_FIELD), equalTo(Boolean.TRUE)); + assertThat(docMap.get(PASSWORD_FIELD), nullValue()); + + final Map upsertMap = update.upsertRequest().sourceAsMap(); + assertThat(upsertMap.get(User.Fields.ENABLED.getPreferredName()), equalTo(Boolean.TRUE)); + assertThat(upsertMap.get(User.Fields.PASSWORD.getPreferredName()), equalTo(BLANK_PASSWORD)); + } + + public void testBlankPasswordInIndexImpliesDefaultPassword() throws Exception { + final NativeUsersStore nativeUsersStore = startNativeUsersStore(); + + final String user = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + final Map values = new HashMap<>(); + values.put(ENABLED_FIELD, Boolean.TRUE); + values.put(PASSWORD_FIELD, BLANK_PASSWORD); + + final GetResult result = new GetResult( + SecurityLifecycleService.SECURITY_INDEX_NAME, + NativeUsersStore.INDEX_TYPE, + NativeUsersStore.getIdForUser(NativeUsersStore.RESERVED_USER_TYPE, randomAlphaOfLength(12)), + 1L, + true, + BytesReference.bytes(jsonBuilder().map(values)), + Collections.emptyMap()); + + final PlainActionFuture future = new PlainActionFuture<>(); + nativeUsersStore.getReservedUserInfo(user, future); + + actionRespond(GetRequest.class, new GetResponse(result)); + + final NativeUsersStore.ReservedUserInfo userInfo = future.get(); + assertThat(userInfo.hasEmptyPassword, equalTo(true)); + assertThat(userInfo.enabled, equalTo(true)); + assertThat(userInfo.passwordHash, equalTo(ReservedRealm.EMPTY_PASSWORD_HASH)); + } + + public void testVerifyUserWithCorrectPassword() throws Exception { + final NativeUsersStore nativeUsersStore = startNativeUsersStore(); + final String username = randomAlphaOfLengthBetween(4, 12); + final SecureString password = new SecureString(randomAlphaOfLengthBetween(8, 16).toCharArray()); + final String[] roles = generateRandomStringArray(4, 12, false, false); + + final PlainActionFuture future = new PlainActionFuture<>(); + nativeUsersStore.verifyPassword(username, password, future); + + respondToGetUserRequest(username, password, roles); + + final AuthenticationResult result = future.get(); + assertThat(result, notNullValue()); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + final User user = result.getUser(); + assertThat(user, notNullValue()); + assertThat(user.enabled(), equalTo(true)); + assertThat(user.principal(), equalTo(username)); + assertThat(user.roles(), equalTo(roles)); + assertThat(user.authenticatedUser(), equalTo(user)); + } + + public void testVerifyUserWithIncorrectPassword() throws Exception { + final NativeUsersStore nativeUsersStore = startNativeUsersStore(); + final String username = randomAlphaOfLengthBetween(4, 12); + final SecureString correctPassword = new SecureString(randomAlphaOfLengthBetween(12, 16).toCharArray()); + final SecureString incorrectPassword = new SecureString(randomAlphaOfLengthBetween(8, 10).toCharArray()); + final String[] roles = generateRandomStringArray(4, 12, false, false); + + final PlainActionFuture future = new PlainActionFuture<>(); + nativeUsersStore.verifyPassword(username, incorrectPassword, future); + + respondToGetUserRequest(username, correctPassword, roles); + + final AuthenticationResult result = future.get(); + assertThat(result, notNullValue()); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result.getUser(), nullValue()); + assertThat(result.getMessage(), containsString("authentication failed")); + } + + public void testVerifyNonExistentUser() throws Exception { + final NativeUsersStore nativeUsersStore = startNativeUsersStore(); + final String username = randomAlphaOfLengthBetween(4, 12); + final SecureString password = new SecureString(randomAlphaOfLengthBetween(8, 16).toCharArray()); + + final PlainActionFuture future = new PlainActionFuture<>(); + nativeUsersStore.verifyPassword(username, password, future); + + final GetResult getResult = new GetResult( + SecurityLifecycleService.SECURITY_INDEX_NAME, + NativeUsersStore.INDEX_TYPE, + NativeUsersStore.getIdForUser(NativeUsersStore.USER_DOC_TYPE, username), + 1L, + false, + null, + Collections.emptyMap()); + + actionRespond(GetRequest.class, new GetResponse(getResult)); + + final AuthenticationResult result = future.get(); + assertThat(result, notNullValue()); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result.getUser(), nullValue()); + assertThat(result.getMessage(), nullValue()); + } + + private ARequest actionRespond(Class requestClass, + AResponse response) { + Tuple> tuple = findRequest(requestClass); + ((ActionListener) tuple.v2()).onResponse(response); + return tuple.v1(); + } + + private Tuple> findRequest( + Class requestClass) { + return this.requests.stream() + .filter(t -> requestClass.isInstance(t.v1())) + .map(t -> new Tuple>(requestClass.cast(t.v1()), t.v2())) + .findFirst().orElseThrow(() -> new RuntimeException("Cannot find request of type " + requestClass)); + } + + private void respondToGetUserRequest(String username, SecureString password, String[] roles) throws IOException { + final Map values = new HashMap<>(); + values.put(User.Fields.USERNAME.getPreferredName(), username); + values.put(User.Fields.PASSWORD.getPreferredName(), String.valueOf(Hasher.BCRYPT.hash(password))); + values.put(User.Fields.ROLES.getPreferredName(), roles); + values.put(User.Fields.ENABLED.getPreferredName(), Boolean.TRUE); + values.put(User.Fields.TYPE.getPreferredName(), NativeUsersStore.USER_DOC_TYPE); + final BytesReference source = BytesReference.bytes(jsonBuilder().map(values)); + final GetResult getResult = new GetResult( + SecurityLifecycleService.SECURITY_INDEX_NAME, + NativeUsersStore.INDEX_TYPE, + NativeUsersStore.getIdForUser(NativeUsersStore.USER_DOC_TYPE, username), + 1L, + true, + source, + Collections.emptyMap()); + + + actionRespond(GetRequest.class, new GetResponse(getResult)); + } + + private NativeUsersStore startNativeUsersStore() { + SecurityLifecycleService securityLifecycleService = mock(SecurityLifecycleService.class); + when(securityLifecycleService.isSecurityIndexAvailable()).thenReturn(true); + when(securityLifecycleService.isSecurityIndexExisting()).thenReturn(true); + when(securityLifecycleService.isSecurityIndexMappingUpToDate()).thenReturn(true); + when(securityLifecycleService.isSecurityIndexOutOfDate()).thenReturn(false); + when(securityLifecycleService.isSecurityIndexUpToDate()).thenReturn(true); + doAnswer((i) -> { + Runnable action = (Runnable) i.getArguments()[1]; + action.run(); + return null; + }).when(securityLifecycleService).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); + return new NativeUsersStore(Settings.EMPTY, client, securityLifecycleService); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java new file mode 100644 index 0000000000000..2ec7ed7ea2204 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmIntegTests.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.NativeRealmIntegTestCase; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordResponse; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; + +import java.util.Arrays; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +/** + * Integration tests for the built in realm + */ +public class ReservedRealmIntegTests extends NativeRealmIntegTestCase { + + public void testAuthenticate() { + for (String username : Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME)) { + ClusterHealthResponse response = client() + .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword()))) + .admin() + .cluster() + .prepareHealth() + .get(); + + assertThat(response.getClusterName(), is(cluster().getClusterName())); + } + } + + /** + * Enabling a user forces a doc to be written to the security index, and "user doc with empty password" has a special case code in + * the reserved realm. + */ + public void testAuthenticateAfterEnablingUser() { + final SecurityClient c = securityClient(); + for (String username : Arrays.asList(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME)) { + c.prepareSetEnabled(username, true).get(); + ClusterHealthResponse response = client() + .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword()))) + .admin() + .cluster() + .prepareHealth() + .get(); + + assertThat(response.getClusterName(), is(cluster().getClusterName())); + } + } + + public void testChangingPassword() { + String username = randomFrom(ElasticUser.NAME, KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + final char[] newPassword = "supersecretvalue".toCharArray(); + + if (randomBoolean()) { + ClusterHealthResponse response = client() + .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword()))) + .admin() + .cluster() + .prepareHealth() + .get(); + assertThat(response.getClusterName(), is(cluster().getClusterName())); + } + + ChangePasswordResponse response = securityClient() + .prepareChangePassword(username, Arrays.copyOf(newPassword, newPassword.length)) + .get(); + assertThat(response, notNullValue()); + + ElasticsearchSecurityException elasticsearchSecurityException = expectThrows(ElasticsearchSecurityException.class, () -> client() + .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, getReservedPassword()))) + .admin() + .cluster() + .prepareHealth() + .get()); + assertThat(elasticsearchSecurityException.getMessage(), containsString("authenticate")); + + ClusterHealthResponse healthResponse = client() + .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(username, new SecureString(newPassword)))) + .admin() + .cluster() + .prepareHealth() + .get(); + assertThat(healthResponse.getClusterName(), is(cluster().getClusterName())); + } + + public void testDisablingUser() throws Exception { + // validate the user works + ClusterHealthResponse response = client() + .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(ElasticUser.NAME, getReservedPassword()))) + .admin() + .cluster() + .prepareHealth() + .get(); + assertThat(response.getClusterName(), is(cluster().getClusterName())); + + // disable user + securityClient().prepareSetEnabled(ElasticUser.NAME, false).get(); + ElasticsearchSecurityException elasticsearchSecurityException = expectThrows(ElasticsearchSecurityException.class, () -> client() + .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(ElasticUser.NAME, getReservedPassword()))) + .admin() + .cluster() + .prepareHealth() + .get()); + assertThat(elasticsearchSecurityException.getMessage(), containsString("authenticate")); + + //enable + securityClient().prepareSetEnabled(ElasticUser.NAME, true).get(); + response = client() + .filterWithHeader(singletonMap("Authorization", basicAuthHeaderValue(ElasticUser.NAME, getReservedPassword()))) + .admin() + .cluster() + .prepareHealth() + .get(); + assertThat(response.getClusterName(), is(cluster().getClusterName())); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java new file mode 100644 index 0000000000000..272af679d13ea --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java @@ -0,0 +1,450 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.BeatsSystemUser; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.UsernamesField; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ExecutionException; +import java.util.function.Predicate; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +/** + * Unit tests for the {@link ReservedRealm} + */ +public class ReservedRealmTests extends ESTestCase { + + private static final SecureString EMPTY_PASSWORD = new SecureString("".toCharArray()); + private NativeUsersStore usersStore; + private SecurityLifecycleService securityLifecycleService; + + @Before + public void setupMocks() throws Exception { + usersStore = mock(NativeUsersStore.class); + securityLifecycleService = mock(SecurityLifecycleService.class); + when(securityLifecycleService.isSecurityIndexAvailable()).thenReturn(true); + when(securityLifecycleService.checkSecurityMappingVersion(any())).thenReturn(true); + mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); + } + + public void testReservedUserEmptyPasswordAuthenticationFails() throws Throwable { + final String principal = randomFrom(UsernamesField.ELASTIC_NAME, UsernamesField.KIBANA_NAME, UsernamesField.LOGSTASH_NAME, + UsernamesField.BEATS_NAME); + + final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), Settings.EMPTY, usersStore, + new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + + PlainActionFuture listener = new PlainActionFuture<>(); + + reservedRealm.doAuthenticate(new UsernamePasswordToken(principal, EMPTY_PASSWORD), listener); + assertFailedAuthentication(listener, principal); + } + + public void testAuthenticationDisabled() throws Throwable { + Settings settings = Settings.builder().put(XPackSettings.RESERVED_REALM_ENABLED_SETTING.getKey(), false).build(); + final boolean securityIndexExists = randomBoolean(); + if (securityIndexExists) { + when(securityLifecycleService.isSecurityIndexExisting()).thenReturn(true); + } + final ReservedRealm reservedRealm = + new ReservedRealm(mock(Environment.class), settings, usersStore, + new AnonymousUser(settings), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + final User expected = randomReservedUser(true); + final String principal = expected.principal(); + + PlainActionFuture listener = new PlainActionFuture<>(); + reservedRealm.doAuthenticate(new UsernamePasswordToken(principal, EMPTY_PASSWORD), listener); + final AuthenticationResult result = listener.actionGet(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.CONTINUE)); + assertNull(result.getUser()); + verifyZeroInteractions(usersStore); + } + + public void testAuthenticationEnabledUserWithStoredPassword() throws Throwable { + verifySuccessfulAuthentication(true); + } + + public void testAuthenticationDisabledUserWithStoredPassword() throws Throwable { + verifySuccessfulAuthentication(false); + } + + private void verifySuccessfulAuthentication(boolean enabled) throws Exception { + final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), Settings.EMPTY, usersStore, + new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + final User expectedUser = randomReservedUser(enabled); + final String principal = expectedUser.principal(); + final SecureString newPassword = new SecureString("foobar".toCharArray()); + when(securityLifecycleService.isSecurityIndexExisting()).thenReturn(true); + doAnswer((i) -> { + ActionListener callback = (ActionListener) i.getArguments()[1]; + callback.onResponse(new ReservedUserInfo(Hasher.BCRYPT.hash(newPassword), enabled, false)); + return null; + }).when(usersStore).getReservedUserInfo(eq(principal), any(ActionListener.class)); + + // test empty password + final PlainActionFuture listener = new PlainActionFuture<>(); + reservedRealm.doAuthenticate(new UsernamePasswordToken(principal, EMPTY_PASSWORD), listener); + assertFailedAuthentication(listener, expectedUser.principal()); + + // the realm assumes it owns the hashed password so it fills it with 0's + doAnswer((i) -> { + ActionListener callback = (ActionListener) i.getArguments()[1]; + callback.onResponse(new ReservedUserInfo(Hasher.BCRYPT.hash(newPassword), true, false)); + return null; + }).when(usersStore).getReservedUserInfo(eq(principal), any(ActionListener.class)); + + // test new password + final PlainActionFuture authListener = new PlainActionFuture<>(); + reservedRealm.doAuthenticate(new UsernamePasswordToken(principal, newPassword), authListener); + final User authenticated = authListener.actionGet().getUser(); + assertEquals(expectedUser, authenticated); + assertThat(expectedUser.enabled(), is(enabled)); + + verify(securityLifecycleService, times(2)).isSecurityIndexExisting(); + verify(usersStore, times(2)).getReservedUserInfo(eq(principal), any(ActionListener.class)); + final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); + verify(securityLifecycleService, times(2)).checkSecurityMappingVersion(predicateCaptor.capture()); + verifyVersionPredicate(principal, predicateCaptor.getValue()); + verifyNoMoreInteractions(usersStore); + } + + public void testLookup() throws Exception { + final ReservedRealm reservedRealm = + new ReservedRealm(mock(Environment.class), Settings.EMPTY, usersStore, + new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + final User expectedUser = randomReservedUser(true); + final String principal = expectedUser.principal(); + + PlainActionFuture listener = new PlainActionFuture<>(); + reservedRealm.doLookupUser(principal, listener); + final User user = listener.actionGet(); + assertEquals(expectedUser, user); + verify(securityLifecycleService).isSecurityIndexExisting(); + + final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); + verify(securityLifecycleService).checkSecurityMappingVersion(predicateCaptor.capture()); + verifyVersionPredicate(principal, predicateCaptor.getValue()); + + PlainActionFuture future = new PlainActionFuture<>(); + reservedRealm.doLookupUser("foobar", future); + final User doesntExist = future.actionGet(); + assertThat(doesntExist, nullValue()); + verifyNoMoreInteractions(usersStore); + } + + public void testLookupDisabled() throws Exception { + Settings settings = Settings.builder().put(XPackSettings.RESERVED_REALM_ENABLED_SETTING.getKey(), false).build(); + final ReservedRealm reservedRealm = + new ReservedRealm(mock(Environment.class), settings, usersStore, new AnonymousUser(settings), + securityLifecycleService, new ThreadContext(Settings.EMPTY)); + final User expectedUser = randomReservedUser(true); + final String principal = expectedUser.principal(); + + PlainActionFuture listener = new PlainActionFuture<>(); + reservedRealm.doLookupUser(principal, listener); + final User user = listener.actionGet(); + assertNull(user); + verifyZeroInteractions(usersStore); + } + + public void testLookupThrows() throws Exception { + final ReservedRealm reservedRealm = + new ReservedRealm(mock(Environment.class), Settings.EMPTY, usersStore, + new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + final User expectedUser = randomReservedUser(true); + final String principal = expectedUser.principal(); + when(securityLifecycleService.isSecurityIndexExisting()).thenReturn(true); + final RuntimeException e = new RuntimeException("store threw"); + doAnswer((i) -> { + ActionListener callback = (ActionListener) i.getArguments()[1]; + callback.onFailure(e); + return null; + }).when(usersStore).getReservedUserInfo(eq(principal), any(ActionListener.class)); + + PlainActionFuture future = new PlainActionFuture<>(); + reservedRealm.lookupUser(principal, future); + ElasticsearchSecurityException securityException = expectThrows(ElasticsearchSecurityException.class, future::actionGet); + assertThat(securityException.getMessage(), containsString("failed to lookup")); + + verify(securityLifecycleService).isSecurityIndexExisting(); + verify(usersStore).getReservedUserInfo(eq(principal), any(ActionListener.class)); + + final ArgumentCaptor predicateCaptor = ArgumentCaptor.forClass(Predicate.class); + verify(securityLifecycleService).checkSecurityMappingVersion(predicateCaptor.capture()); + verifyVersionPredicate(principal, predicateCaptor.getValue()); + + verifyNoMoreInteractions(usersStore); + } + + public void testIsReserved() { + final User expectedUser = randomReservedUser(true); + final String principal = expectedUser.principal(); + assertThat(ClientReservedRealm.isReserved(principal, Settings.EMPTY), is(true)); + + final String notExpected = randomFrom("foobar", "", randomAlphaOfLengthBetween(1, 30)); + assertThat(ClientReservedRealm.isReserved(notExpected, Settings.EMPTY), is(false)); + } + + public void testIsReservedDisabled() { + Settings settings = Settings.builder().put(XPackSettings.RESERVED_REALM_ENABLED_SETTING.getKey(), false).build(); + final User expectedUser = randomReservedUser(true); + final String principal = expectedUser.principal(); + assertThat(ClientReservedRealm.isReserved(principal, settings), is(false)); + + final String notExpected = randomFrom("foobar", "", randomAlphaOfLengthBetween(1, 30)); + assertThat(ClientReservedRealm.isReserved(notExpected, settings), is(false)); + } + + public void testGetUsers() { + final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), Settings.EMPTY, usersStore, + new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + PlainActionFuture> userFuture = new PlainActionFuture<>(); + reservedRealm.users(userFuture); + assertThat(userFuture.actionGet(), + containsInAnyOrder(new ElasticUser(true), new KibanaUser(true), new LogstashSystemUser(true), new BeatsSystemUser(true))); + } + + public void testGetUsersDisabled() { + final boolean anonymousEnabled = randomBoolean(); + Settings settings = Settings.builder() + .put(XPackSettings.RESERVED_REALM_ENABLED_SETTING.getKey(), false) + .put(AnonymousUser.ROLES_SETTING.getKey(), anonymousEnabled ? "user" : "") + .build(); + final AnonymousUser anonymousUser = new AnonymousUser(settings); + final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, anonymousUser, + securityLifecycleService, new ThreadContext(Settings.EMPTY)); + PlainActionFuture> userFuture = new PlainActionFuture<>(); + reservedRealm.users(userFuture); + if (anonymousEnabled) { + assertThat(userFuture.actionGet(), contains(anonymousUser)); + } else { + assertThat(userFuture.actionGet(), empty()); + } + } + + public void testFailedAuthentication() throws Exception { + when(securityLifecycleService.isSecurityIndexExisting()).thenReturn(true); + SecureString password = new SecureString("password".toCharArray()); + char[] hash = Hasher.BCRYPT.hash(password); + ReservedUserInfo userInfo = new ReservedUserInfo(hash, true, false); + mockGetAllReservedUserInfo(usersStore, Collections.singletonMap("elastic", userInfo)); + final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), Settings.EMPTY, usersStore, + new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + + if (randomBoolean()) { + PlainActionFuture future = new PlainActionFuture<>(); + + reservedRealm.authenticate(new UsernamePasswordToken(ElasticUser.NAME, password), future); + User user = future.actionGet().getUser(); + assertEquals(new ElasticUser(true), user); + } + + PlainActionFuture future = new PlainActionFuture<>(); + reservedRealm.authenticate(new UsernamePasswordToken(ElasticUser.NAME, new SecureString("foobar".toCharArray())), future); + assertFailedAuthentication(future, ElasticUser.NAME); + } + + private void assertFailedAuthentication(PlainActionFuture future, String principal) throws Exception { + final AuthenticationResult result = future.get(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.TERMINATE)); + assertThat(result.getMessage(), containsString("failed to authenticate")); + assertThat(result.getMessage(), containsString(principal)); + } + + @SuppressWarnings("unchecked") + public void testBootstrapElasticPasswordWorksOnceSecurityIndexExists() throws Exception { + MockSecureSettings mockSecureSettings = new MockSecureSettings(); + mockSecureSettings.setString("bootstrap.password", "foobar"); + Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build(); + when(securityLifecycleService.isSecurityIndexExisting()).thenReturn(true); + + final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, + new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + PlainActionFuture listener = new PlainActionFuture<>(); + + doAnswer((i) -> { + ActionListener callback = (ActionListener) i.getArguments()[1]; + callback.onResponse(null); + return null; + }).when(usersStore).getReservedUserInfo(eq("elastic"), any(ActionListener.class)); + reservedRealm.doAuthenticate(new UsernamePasswordToken(new ElasticUser(true).principal(), + mockSecureSettings.getString("bootstrap.password")), + listener); + final AuthenticationResult result = listener.get(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + } + + public void testBootstrapElasticPasswordFailsOnceElasticUserExists() throws Exception { + MockSecureSettings mockSecureSettings = new MockSecureSettings(); + mockSecureSettings.setString("bootstrap.password", "foobar"); + Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build(); + when(securityLifecycleService.isSecurityIndexExisting()).thenReturn(true); + + final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, + new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + PlainActionFuture listener = new PlainActionFuture<>(); + SecureString password = new SecureString("password".toCharArray()); + doAnswer((i) -> { + ActionListener callback = (ActionListener) i.getArguments()[1]; + char[] hash = Hasher.BCRYPT.hash(password); + ReservedUserInfo userInfo = new ReservedUserInfo(hash, true, false); + callback.onResponse(userInfo); + return null; + }).when(usersStore).getReservedUserInfo(eq("elastic"), any(ActionListener.class)); + reservedRealm.doAuthenticate(new UsernamePasswordToken(new ElasticUser(true).principal(), + mockSecureSettings.getString("bootstrap.password")), listener); + assertFailedAuthentication(listener, "elastic"); + // now try with the real password + listener = new PlainActionFuture<>(); + reservedRealm.doAuthenticate(new UsernamePasswordToken(new ElasticUser(true).principal(), password), listener); + final AuthenticationResult result = listener.get(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + } + + public void testBootstrapElasticPasswordWorksBeforeSecurityIndexExists() throws ExecutionException, InterruptedException { + MockSecureSettings mockSecureSettings = new MockSecureSettings(); + mockSecureSettings.setString("bootstrap.password", "foobar"); + Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build(); + when(securityLifecycleService.isSecurityIndexExisting()).thenReturn(false); + + final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, + new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + PlainActionFuture listener = new PlainActionFuture<>(); + + reservedRealm.doAuthenticate(new UsernamePasswordToken(new ElasticUser(true).principal(), + mockSecureSettings.getString("bootstrap.password")), + listener); + final AuthenticationResult result = listener.get(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + } + + public void testNonElasticUsersCannotUseBootstrapPasswordWhenSecurityIndexExists() throws Exception { + final MockSecureSettings mockSecureSettings = new MockSecureSettings(); + final String password = randomAlphaOfLengthBetween(8, 24); + mockSecureSettings.setString("bootstrap.password", password); + Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build(); + when(securityLifecycleService.isSecurityIndexExisting()).thenReturn(true); + + final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, + new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + PlainActionFuture listener = new PlainActionFuture<>(); + + final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + doAnswer((i) -> { + ActionListener callback = (ActionListener) i.getArguments()[1]; + callback.onResponse(null); + return null; + }).when(usersStore).getReservedUserInfo(eq(principal), any(ActionListener.class)); + reservedRealm.doAuthenticate(new UsernamePasswordToken(principal, mockSecureSettings.getString("bootstrap.password")), listener); + final AuthenticationResult result = listener.get(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.TERMINATE)); + } + + public void testNonElasticUsersCannotUseBootstrapPasswordWhenSecurityIndexDoesNotExists() throws Exception { + final MockSecureSettings mockSecureSettings = new MockSecureSettings(); + final String password = randomAlphaOfLengthBetween(8, 24); + mockSecureSettings.setString("bootstrap.password", password); + Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build(); + when(securityLifecycleService.isSecurityIndexExisting()).thenReturn(false); + + final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, + new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + PlainActionFuture listener = new PlainActionFuture<>(); + + final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); + reservedRealm.doAuthenticate(new UsernamePasswordToken(principal, mockSecureSettings.getString("bootstrap.password")), listener); + final AuthenticationResult result = listener.get(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.TERMINATE)); + } + + private User randomReservedUser(boolean enabled) { + return randomFrom(new ElasticUser(enabled), new KibanaUser(enabled), new LogstashSystemUser(enabled), new BeatsSystemUser(enabled)); + } + + /* + * NativeUserStore#getAllReservedUserInfo is pkg private we can't mock it otherwise + */ + public static void mockGetAllReservedUserInfo(NativeUsersStore usersStore, Map collection) { + doAnswer((i) -> { + ((ActionListener) i.getArguments()[0]).onResponse(collection); + return null; + }).when(usersStore).getAllReservedUserInfo(any(ActionListener.class)); + + for (Entry entry : collection.entrySet()) { + doAnswer((i) -> { + ((ActionListener) i.getArguments()[1]).onResponse(entry.getValue()); + return null; + }).when(usersStore).getReservedUserInfo(eq(entry.getKey()), any(ActionListener.class)); + } + } + + private void verifyVersionPredicate(String principal, Predicate versionPredicate) { + assertThat(versionPredicate.test(Version.V_5_0_0_rc1), is(false)); + switch (principal) { + case LogstashSystemUser.NAME: + assertThat(versionPredicate.test(Version.V_5_0_0), is(false)); + assertThat(versionPredicate.test(Version.V_5_1_1), is(false)); + assertThat(versionPredicate.test(Version.V_5_2_0), is(true)); + assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); + break; + case BeatsSystemUser.NAME: + assertThat(versionPredicate.test(Version.V_5_6_9), is(false)); + assertThat(versionPredicate.test(Version.V_6_2_3), is(false)); + assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); + break; + default: + assertThat(versionPredicate.test(Version.V_5_0_0), is(true)); + assertThat(versionPredicate.test(Version.V_5_1_1), is(true)); + assertThat(versionPredicate.test(Version.V_5_2_0), is(true)); + assertThat(versionPredicate.test(Version.V_6_3_0), is(true)); + break; + } + assertThat(versionPredicate.test(Version.V_7_0_0_alpha1), is(true)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClientTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClientTests.java new file mode 100644 index 0000000000000..d127a45d53249 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/CommandLineHttpClientTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative.tool; + +import org.bouncycastle.operator.OperatorCreationException; +import org.bouncycastle.util.io.Streams; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettingsTests; +import org.elasticsearch.xpack.core.ssl.TestsSSLService; +import org.elasticsearch.xpack.core.ssl.VerificationMode; +import org.elasticsearch.xpack.security.authc.esnative.tool.HttpResponse.HttpResponseBuilder; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; + +import javax.security.auth.DestroyFailedException; + +/** + * This class tests {@link CommandLineHttpClient} For extensive tests related to + * ssl settings can be found {@link SSLConfigurationSettingsTests} + */ +public class CommandLineHttpClientTests extends ESTestCase { + + private MockWebServer webServer; + private Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + + @Before + public void setup() throws Exception { + webServer = createMockWebServer(); + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("{\"test\": \"complete\"}")); + webServer.start(); + } + + @After + public void shutdown() throws Exception { + webServer.close(); + } + + public void testCommandLineHttpClientCanExecuteAndReturnCorrectResultUsingSSLSettings() throws Exception { + Path resource = getDataPath("/org/elasticsearch/xpack/security/keystore/testnode.jks"); + MockSecureSettings secureSettings = new MockSecureSettings(); + Settings settings; + if (randomBoolean()) { + // with http ssl settings + secureSettings.setString("xpack.security.http.ssl.truststore.secure_password", "testnode"); + settings = Settings.builder().put("xpack.security.http.ssl.truststore.path", resource.toString()) + .put("xpack.security.http.ssl.verification_mode", VerificationMode.CERTIFICATE).setSecureSettings(secureSettings) + .build(); + } else { + // with global settings + secureSettings.setString("xpack.ssl.truststore.secure_password", "testnode"); + settings = Settings.builder().put("xpack.ssl.truststore.path", resource.toString()) + .put("xpack.ssl.verification_mode", VerificationMode.CERTIFICATE).setSecureSettings(secureSettings).build(); + } + CommandLineHttpClient client = new CommandLineHttpClient(settings, environment); + HttpResponse httpResponse = client.execute("GET", new URL("https://localhost:" + webServer.getPort() + "/test"), "u1", + new SecureString(new char[] { 'p' }), () -> null, is -> responseBuilder(is)); + + assertNotNull("Should have http response", httpResponse); + assertEquals("Http status code does not match", 200, httpResponse.getHttpStatus()); + assertEquals("Http response body does not match", "complete", httpResponse.getResponseBody().get("test")); + } + + private MockWebServer createMockWebServer() throws IOException, UnrecoverableKeyException, CertificateException, + NoSuchAlgorithmException, KeyStoreException, OperatorCreationException, DestroyFailedException { + Path resource = getDataPath("/org/elasticsearch/xpack/security/keystore/testnode.jks"); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.ssl.keystore.secure_password", "testnode"); + Settings settings = + Settings.builder().put("xpack.ssl.keystore.path", resource.toString()).setSecureSettings(secureSettings).build(); + TestsSSLService sslService = new TestsSSLService(settings, environment); + return new MockWebServer(sslService.sslContext(), false); + } + + private HttpResponseBuilder responseBuilder(final InputStream is) throws IOException { + final HttpResponseBuilder httpResponseBuilder = new HttpResponseBuilder(); + if (is != null) { + byte[] bytes = Streams.readAll(is); + httpResponseBuilder.withResponseBody(new String(bytes, StandardCharsets.UTF_8)); + } + return httpResponseBuilder; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolTests.java new file mode 100644 index 0000000000000..d614afc0aeb81 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolTests.java @@ -0,0 +1,484 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative.tool; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.CommandTestCase; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackInfoResponse; +import org.elasticsearch.license.XPackInfoResponse.FeatureSetsInfo; +import org.elasticsearch.license.XPackInfoResponse.FeatureSetsInfo.FeatureSet; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.security.support.Validation; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import org.elasticsearch.xpack.security.authc.esnative.tool.HttpResponse.HttpResponseBuilder; +import org.hamcrest.CoreMatchers; +import org.hamcrest.Matchers; +import org.junit.Before; +import org.junit.Rule; +import org.junit.rules.ExpectedException; +import org.mockito.ArgumentCaptor; +import org.mockito.InOrder; +import org.mockito.Mockito; + +import javax.net.ssl.SSLException; +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.MalformedURLException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class SetupPasswordToolTests extends CommandTestCase { + + private final String pathHomeParameter = "-Epath.home=" + createTempDir(); + private SecureString bootstrapPassword; + private CommandLineHttpClient httpClient; + private KeyStoreWrapper keyStore; + private List usersInSetOrder; + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Before + public void setSecretsAndKeyStore() throws Exception { + // sometimes we fall back to the keystore seed as this is the default when a new node starts + boolean useFallback = randomBoolean(); + bootstrapPassword = useFallback ? new SecureString("0xCAFEBABE".toCharArray()) : + new SecureString("bootstrap-password".toCharArray()); + this.keyStore = mock(KeyStoreWrapper.class); + this.httpClient = mock(CommandLineHttpClient.class); + + when(keyStore.isLoaded()).thenReturn(true); + if (useFallback) { + when(keyStore.getSettingNames()).thenReturn(new HashSet<>(Arrays.asList(ReservedRealm.BOOTSTRAP_ELASTIC_PASSWORD.getKey(), + KeyStoreWrapper.SEED_SETTING.getKey()))); + when(keyStore.getString(ReservedRealm.BOOTSTRAP_ELASTIC_PASSWORD.getKey())).thenReturn(bootstrapPassword); + } else { + when(keyStore.getSettingNames()).thenReturn(Collections.singleton(KeyStoreWrapper.SEED_SETTING.getKey())); + when(keyStore.getString(KeyStoreWrapper.SEED_SETTING.getKey())).thenReturn(bootstrapPassword); + } + + when(httpClient.getDefaultURL()).thenReturn("http://localhost:9200"); + + HttpResponse httpResponse = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap()); + when(httpClient.execute(anyString(), any(URL.class), anyString(), any(SecureString.class), any(CheckedSupplier.class), + any(CheckedFunction.class))).thenReturn(httpResponse); + + URL url = new URL(httpClient.getDefaultURL()); + httpResponse = new HttpResponse(HttpURLConnection.HTTP_OK, Collections.singletonMap("status", randomFrom("yellow", "green"))); + when(httpClient.execute(anyString(), eq(clusterHealthUrl(url)), anyString(), any(SecureString.class), any(CheckedSupplier.class), + any(CheckedFunction.class))).thenReturn(httpResponse); + + URL xpackSecurityPluginQueryURL = queryXPackSecurityFeatureConfigURL(url); + HttpResponse queryXPackSecurityConfigHttpResponse = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap()); + when(httpClient.execute(eq("GET"), eq(xpackSecurityPluginQueryURL), anyString(), any(SecureString.class), + any(CheckedSupplier.class), any(CheckedFunction.class))).thenReturn(queryXPackSecurityConfigHttpResponse); + + // elastic user is updated last + usersInSetOrder = new ArrayList<>(SetupPasswordTool.USERS); + for (int i = 0; i < usersInSetOrder.size() - 1; i++) { + if (ElasticUser.NAME.equals(usersInSetOrder.get(i))) { + Collections.swap(usersInSetOrder, i, i + 1); + } + } + + for (String user : SetupPasswordTool.USERS) { + terminal.addSecretInput(user + "-password"); + terminal.addSecretInput(user + "-password"); + } + } + + @Override + protected Command newCommand() { + return new SetupPasswordTool((e, s) -> httpClient, (e) -> keyStore) { + + @Override + protected AutoSetup newAutoSetup() { + return new AutoSetup() { + @Override + protected Environment createEnv(Map settings) throws UserException { + Settings.Builder builder = Settings.builder(); + settings.forEach((k, v) -> builder.put(k, v)); + return TestEnvironment.newEnvironment(builder.build()); + } + }; + } + + @Override + protected InteractiveSetup newInteractiveSetup() { + return new InteractiveSetup() { + @Override + protected Environment createEnv(Map settings) throws UserException { + Settings.Builder builder = Settings.builder(); + settings.forEach((k, v) -> builder.put(k, v)); + return TestEnvironment.newEnvironment(builder.build()); + } + }; + } + + }; + } + + public void testAutoSetup() throws Exception { + URL url = new URL(httpClient.getDefaultURL()); + if (randomBoolean()) { + execute("auto", pathHomeParameter, "-b", "true"); + } else { + terminal.addTextInput("Y"); + execute("auto", pathHomeParameter); + } + + verify(keyStore).decrypt(new char[0]); + + InOrder inOrder = Mockito.inOrder(httpClient); + + URL checkUrl = authenticateUrl(url); + inOrder.verify(httpClient).execute(eq("GET"), eq(checkUrl), eq(ElasticUser.NAME), eq(bootstrapPassword), any(CheckedSupplier.class), + any(CheckedFunction.class)); + for (String user : usersInSetOrder) { + URL urlWithRoute = passwordUrl(url, user); + inOrder.verify(httpClient).execute(eq("PUT"), eq(urlWithRoute), eq(ElasticUser.NAME), eq(bootstrapPassword), + any(CheckedSupplier.class), any(CheckedFunction.class)); + } + } + + public void testAuthnFail() throws Exception { + URL url = new URL(httpClient.getDefaultURL()); + URL authnURL = authenticateUrl(url); + + HttpResponse httpResponse = new HttpResponse(HttpURLConnection.HTTP_UNAUTHORIZED, new HashMap()); + + when(httpClient.execute(eq("GET"), eq(authnURL), eq(ElasticUser.NAME), any(SecureString.class), any(CheckedSupplier.class), + any(CheckedFunction.class))).thenReturn(httpResponse); + + try { + execute(randomBoolean() ? "auto" : "interactive", pathHomeParameter); + fail("Should have thrown exception"); + } catch (UserException e) { + assertEquals(ExitCodes.CONFIG, e.exitCode); + } + } + + public void testErrorMessagesWhenXPackIsNotAvailableOnNode() throws Exception { + URL url = new URL(httpClient.getDefaultURL()); + URL authnURL = authenticateUrl(url); + + HttpResponse httpResponse = new HttpResponse(HttpURLConnection.HTTP_NOT_FOUND, new HashMap()); + when(httpClient.execute(eq("GET"), eq(authnURL), eq(ElasticUser.NAME), any(SecureString.class), any(CheckedSupplier.class), + any(CheckedFunction.class))).thenReturn(httpResponse); + + URL xpackSecurityPluginQueryURL = queryXPackSecurityFeatureConfigURL(url); + String securityPluginQueryResponseBody = null; + final IllegalArgumentException illegalArgException = + new IllegalArgumentException("request [/_xpack] contains unrecognized parameter: [categories]"); + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + builder.startObject(); + ElasticsearchException.generateFailureXContent(builder, ToXContent.EMPTY_PARAMS, illegalArgException, true); + builder.field("status", RestStatus.BAD_REQUEST.getStatus()); + builder.endObject(); + securityPluginQueryResponseBody = Strings.toString(builder); + } + when(httpClient.execute(eq("GET"), eq(xpackSecurityPluginQueryURL), eq(ElasticUser.NAME), any(SecureString.class), + any(CheckedSupplier.class), any(CheckedFunction.class))) + .thenReturn(createHttpResponse(HttpURLConnection.HTTP_BAD_REQUEST, securityPluginQueryResponseBody)); + + thrown.expect(UserException.class); + thrown.expectMessage("X-Pack is not available on this Elasticsearch node."); + execute(randomBoolean() ? "auto" : "interactive", pathHomeParameter); + } + + public void testErrorMessagesWhenXPackIsAvailableWithCorrectLicenseAndIsEnabledButStillFailedForUnknown() throws Exception { + URL url = new URL(httpClient.getDefaultURL()); + URL authnURL = authenticateUrl(url); + + HttpResponse httpResponse = new HttpResponse(HttpURLConnection.HTTP_NOT_FOUND, new HashMap()); + when(httpClient.execute(eq("GET"), eq(authnURL), eq(ElasticUser.NAME), any(SecureString.class), any(CheckedSupplier.class), + any(CheckedFunction.class))).thenReturn(httpResponse); + + URL xpackSecurityPluginQueryURL = queryXPackSecurityFeatureConfigURL(url); + + Set featureSets = new HashSet<>(); + featureSets.add(new FeatureSet("logstash", null, true, true, null)); + featureSets.add(new FeatureSet("security", null, true, true, null)); + FeatureSetsInfo featureInfos = new FeatureSetsInfo(featureSets); + XPackInfoResponse xpackInfo = new XPackInfoResponse(null, null, featureInfos); + String securityPluginQueryResponseBody = null; + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + builder.startObject(); + builder.field("features", xpackInfo.getFeatureSetsInfo()); + builder.endObject(); + securityPluginQueryResponseBody = Strings.toString(builder); + } + when(httpClient.execute(eq("GET"), eq(xpackSecurityPluginQueryURL), eq(ElasticUser.NAME), any(SecureString.class), + any(CheckedSupplier.class), any(CheckedFunction.class))) + .thenReturn(createHttpResponse(HttpURLConnection.HTTP_OK, securityPluginQueryResponseBody)); + + thrown.expect(UserException.class); + thrown.expectMessage("Unknown error"); + execute(randomBoolean() ? "auto" : "interactive", pathHomeParameter); + + } + + public void testErrorMessagesWhenXPackPluginIsAvailableButNoSecurityLicense() throws Exception { + URL url = new URL(httpClient.getDefaultURL()); + URL authnURL = authenticateUrl(url); + URL xpackSecurityPluginQueryURL = queryXPackSecurityFeatureConfigURL(url); + + HttpResponse httpResponse = new HttpResponse(HttpURLConnection.HTTP_NOT_FOUND, new HashMap()); + when(httpClient.execute(eq("GET"), eq(authnURL), eq(ElasticUser.NAME), any(SecureString.class), any(CheckedSupplier.class), + any(CheckedFunction.class))).thenReturn(httpResponse); + + Set featureSets = new HashSet<>(); + featureSets.add(new FeatureSet("logstash", null, true, true, null)); + featureSets.add(new FeatureSet("security", null, false, false, null)); + FeatureSetsInfo featureInfos = new FeatureSetsInfo(featureSets); + XPackInfoResponse xpackInfo = new XPackInfoResponse(null, null, featureInfos); + String securityPluginQueryResponseBody = null; + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + builder.startObject(); + builder.field("features", xpackInfo.getFeatureSetsInfo()); + builder.endObject(); + securityPluginQueryResponseBody = Strings.toString(builder); + } + when(httpClient.execute(eq("GET"), eq(xpackSecurityPluginQueryURL), eq(ElasticUser.NAME), any(SecureString.class), + any(CheckedSupplier.class), any(CheckedFunction.class))) + .thenReturn(createHttpResponse(HttpURLConnection.HTTP_OK, securityPluginQueryResponseBody)); + + thrown.expect(UserException.class); + thrown.expectMessage("X-Pack Security is not available."); + execute(randomBoolean() ? "auto" : "interactive", pathHomeParameter); + + } + + public void testErrorMessagesWhenXPackPluginIsAvailableWithValidLicenseButDisabledSecurity() throws Exception { + URL url = new URL(httpClient.getDefaultURL()); + URL authnURL = authenticateUrl(url); + URL xpackSecurityPluginQueryURL = queryXPackSecurityFeatureConfigURL(url); + + HttpResponse httpResponse = new HttpResponse(HttpURLConnection.HTTP_NOT_FOUND, new HashMap()); + when(httpClient.execute(eq("GET"), eq(authnURL), eq(ElasticUser.NAME), any(SecureString.class), any(CheckedSupplier.class), + any(CheckedFunction.class))).thenReturn(httpResponse); + + Set featureSets = new HashSet<>(); + featureSets.add(new FeatureSet("logstash", null, true, true, null)); + featureSets.add(new FeatureSet("security", null, true, false, null)); + FeatureSetsInfo featureInfos = new FeatureSetsInfo(featureSets); + XPackInfoResponse xpackInfo = new XPackInfoResponse(null, null, featureInfos); + String securityPluginQueryResponseBody = null; + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + builder.startObject(); + builder.field("features", xpackInfo.getFeatureSetsInfo()); + builder.endObject(); + securityPluginQueryResponseBody = Strings.toString(builder); + } + when(httpClient.execute(eq("GET"), eq(xpackSecurityPluginQueryURL), eq(ElasticUser.NAME), any(SecureString.class), + any(CheckedSupplier.class), any(CheckedFunction.class))) + .thenReturn(createHttpResponse(HttpURLConnection.HTTP_OK, securityPluginQueryResponseBody)); + + thrown.expect(UserException.class); + thrown.expectMessage("X-Pack Security is disabled by configuration."); + execute(randomBoolean() ? "auto" : "interactive", pathHomeParameter); + } + + public void testWrongServer() throws Exception { + URL url = new URL(httpClient.getDefaultURL()); + URL authnURL = authenticateUrl(url); + doThrow(randomFrom(new IOException(), new SSLException(""))).when(httpClient).execute(eq("GET"), eq(authnURL), eq(ElasticUser.NAME), + any(SecureString.class), any(CheckedSupplier.class), any(CheckedFunction.class)); + + try { + execute(randomBoolean() ? "auto" : "interactive", pathHomeParameter); + fail("Should have thrown exception"); + } catch (UserException e) { + assertEquals(ExitCodes.CONFIG, e.exitCode); + } + } + + public void testRedCluster() throws Exception { + URL url = new URL(httpClient.getDefaultURL()); + + HttpResponse httpResponse = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); + when(httpClient.execute(eq("GET"), eq(authenticateUrl(url)), eq(ElasticUser.NAME), any(SecureString.class), + any(CheckedSupplier.class), any(CheckedFunction.class))).thenReturn(httpResponse); + + httpResponse = new HttpResponse(HttpURLConnection.HTTP_OK, MapBuilder.newMapBuilder() + .put("cluster_name", "elasticsearch").put("status", "red").put("number_of_nodes", 1).map()); + when(httpClient.execute(eq("GET"), eq(clusterHealthUrl(url)), eq(ElasticUser.NAME), any(SecureString.class), + any(CheckedSupplier.class), any(CheckedFunction.class))).thenReturn(httpResponse); + + terminal.addTextInput("n"); + try { + execute(randomBoolean() ? "auto" : "interactive", pathHomeParameter); + fail("Should have thrown exception"); + } catch (UserException e) { + assertEquals(ExitCodes.OK, e.exitCode); + assertThat(terminal.getOutput(), Matchers.containsString("Your cluster health is currently RED.")); + } + } + + public void testUrlOption() throws Exception { + URL url = new URL("http://localhost:9202" + randomFrom("", "/", "//", "/smth", "//smth/", "//x//x/")); + execute("auto", pathHomeParameter, "-u", url.toString(), "-b"); + + InOrder inOrder = Mockito.inOrder(httpClient); + + URL checkUrl = authenticateUrl(url); + inOrder.verify(httpClient).execute(eq("GET"), eq(checkUrl), eq(ElasticUser.NAME), eq(bootstrapPassword), any(CheckedSupplier.class), + any(CheckedFunction.class)); + for (String user : usersInSetOrder) { + URL urlWithRoute = passwordUrl(url, user); + inOrder.verify(httpClient).execute(eq("PUT"), eq(urlWithRoute), eq(ElasticUser.NAME), eq(bootstrapPassword), + any(CheckedSupplier.class), any(CheckedFunction.class)); + } + } + + public void testSetUserPassFail() throws Exception { + URL url = new URL(httpClient.getDefaultURL()); + String userToFail = randomFrom(SetupPasswordTool.USERS); + URL userToFailURL = passwordUrl(url, userToFail); + + doThrow(new IOException()).when(httpClient).execute(eq("PUT"), eq(userToFailURL), anyString(), any(SecureString.class), + any(CheckedSupplier.class), any(CheckedFunction.class)); + try { + execute(randomBoolean() ? "auto" : "interactive", pathHomeParameter, "-b"); + fail("Should have thrown exception"); + } catch (UserException e) { + assertEquals(ExitCodes.TEMP_FAILURE, e.exitCode); + } + } + + public void testInteractiveSetup() throws Exception { + URL url = new URL(httpClient.getDefaultURL()); + + terminal.addTextInput("Y"); + execute("interactive", pathHomeParameter); + + InOrder inOrder = Mockito.inOrder(httpClient); + + URL checkUrl = authenticateUrl(url); + inOrder.verify(httpClient).execute(eq("GET"), eq(checkUrl), eq(ElasticUser.NAME), eq(bootstrapPassword), any(CheckedSupplier.class), + any(CheckedFunction.class)); + for (String user : usersInSetOrder) { + URL urlWithRoute = passwordUrl(url, user); + ArgumentCaptor> passwordCaptor = ArgumentCaptor.forClass((Class) CheckedSupplier.class); + inOrder.verify(httpClient).execute(eq("PUT"), eq(urlWithRoute), eq(ElasticUser.NAME), eq(bootstrapPassword), + passwordCaptor.capture(), any(CheckedFunction.class)); + assertThat(passwordCaptor.getValue().get(), CoreMatchers.containsString(user + "-password")); + } + } + + public void testInteractivePasswordsFatFingers() throws Exception { + URL url = new URL(httpClient.getDefaultURL()); + + terminal.reset(); + terminal.addTextInput("Y"); + for (String user : SetupPasswordTool.USERS) { + // fail in strength and match + int failCount = randomIntBetween(3, 10); + while (failCount-- > 0) { + String password1 = randomAlphaOfLength(randomIntBetween(3, 10)); + terminal.addSecretInput(password1); + Validation.Error err = Validation.Users.validatePassword(password1.toCharArray()); + if (err == null) { + // passes strength validation, fail by mismatch + terminal.addSecretInput(password1 + "typo"); + } + } + // two good passwords + terminal.addSecretInput(user + "-password"); + terminal.addSecretInput(user + "-password"); + } + + execute("interactive", pathHomeParameter); + + InOrder inOrder = Mockito.inOrder(httpClient); + + URL checkUrl = authenticateUrl(url); + inOrder.verify(httpClient).execute(eq("GET"), eq(checkUrl), eq(ElasticUser.NAME), eq(bootstrapPassword), any(CheckedSupplier.class), + any(CheckedFunction.class)); + for (String user : usersInSetOrder) { + URL urlWithRoute = passwordUrl(url, user); + ArgumentCaptor> passwordCaptor = ArgumentCaptor.forClass((Class) CheckedSupplier.class); + inOrder.verify(httpClient).execute(eq("PUT"), eq(urlWithRoute), eq(ElasticUser.NAME), eq(bootstrapPassword), + passwordCaptor.capture(), any(CheckedFunction.class)); + assertThat(passwordCaptor.getValue().get(), CoreMatchers.containsString(user + "-password")); + } + } + + private String parsePassword(String value) throws IOException { + try (XContentParser parser = JsonXContent.jsonXContent + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, value)) { + XContentParser.Token token = parser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + if (parser.nextToken() == XContentParser.Token.FIELD_NAME) { + if (parser.nextToken() == XContentParser.Token.VALUE_STRING) { + return parser.text(); + } + } + } + } + throw new RuntimeException("Did not properly parse password."); + } + + private URL authenticateUrl(URL url) throws MalformedURLException, URISyntaxException { + return new URL(url, (url.toURI().getPath() + "/_xpack/security/_authenticate").replaceAll("/+", "/") + "?pretty"); + } + + private URL passwordUrl(URL url, String user) throws MalformedURLException, URISyntaxException { + return new URL(url, (url.toURI().getPath() + "/_xpack/security/user/" + user + "/_password").replaceAll("/+", "/") + "?pretty"); + } + + private URL clusterHealthUrl(URL url) throws MalformedURLException, URISyntaxException { + return new URL(url, (url.toURI().getPath() + "/_cluster/health").replaceAll("/+", "/") + "?pretty"); + } + + private URL queryXPackSecurityFeatureConfigURL(URL url) throws MalformedURLException, URISyntaxException { + return new URL(url, + (url.toURI().getPath() + "/_xpack").replaceAll("/+", "/") + "?categories=features&human=false&pretty"); + } + + private HttpResponse createHttpResponse(final int httpStatus, final String responseJson) throws IOException { + HttpResponseBuilder builder = new HttpResponseBuilder(); + builder.withHttpStatus(httpStatus); + builder.withResponseBody(responseJson); + return builder.build(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java new file mode 100644 index 0000000000000..b1500cc75208c --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -0,0 +1,255 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.file; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; +import org.junit.Before; +import org.mockito.stubbing.Answer; + +import java.util.Locale; +import java.util.Map; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class FileRealmTests extends ESTestCase { + + private static final Answer VERIFY_PASSWORD_ANSWER = inv -> { + assertThat(inv.getArguments().length, is(3)); + Supplier supplier = (Supplier) inv.getArguments()[2]; + return AuthenticationResult.success(supplier.get()); + }; + + private FileUserPasswdStore userPasswdStore; + private FileUserRolesStore userRolesStore; + private Settings globalSettings; + + @Before + public void init() throws Exception { + userPasswdStore = mock(FileUserPasswdStore.class); + userRolesStore = mock(FileUserRolesStore.class); + globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + } + + public void testAuthenticate() throws Exception { + when(userPasswdStore.verifyPassword(eq("user1"), eq(new SecureString("test123")), any(Supplier.class))) + .thenAnswer(VERIFY_PASSWORD_ANSWER); + when(userRolesStore.roles("user1")).thenReturn(new String[] { "role1", "role2" }); + RealmConfig config = new RealmConfig("file-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore); + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user1", new SecureString("test123")), future); + final AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + User user = result.getUser(); + assertThat(user, notNullValue()); + assertThat(user.principal(), equalTo("user1")); + assertThat(user.roles(), notNullValue()); + assertThat(user.roles().length, equalTo(2)); + assertThat(user.roles(), arrayContaining("role1", "role2")); + } + + public void testAuthenticateCaching() throws Exception { + Settings settings = Settings.builder() + .put("cache.hash_algo", Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT)) + .build(); + RealmConfig config = new RealmConfig("file-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + when(userPasswdStore.verifyPassword(eq("user1"), eq(new SecureString("test123")), any(Supplier.class))) + .thenAnswer(VERIFY_PASSWORD_ANSWER); + when(userRolesStore.roles("user1")).thenReturn(new String[]{"role1", "role2"}); + FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore); + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user1", new SecureString("test123")), future); + User user1 = future.actionGet().getUser(); + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user1", new SecureString("test123")), future); + User user2 = future.actionGet().getUser(); + assertThat(user1, sameInstance(user2)); + } + + public void testAuthenticateCachingRefresh() throws Exception { + RealmConfig config = new RealmConfig("file-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + userPasswdStore = spy(new UserPasswdStore(config)); + userRolesStore = spy(new UserRolesStore(config)); + when(userPasswdStore.verifyPassword(eq("user1"), eq(new SecureString("test123")), any(Supplier.class))) + .thenAnswer(VERIFY_PASSWORD_ANSWER); + doReturn(new String[] { "role1", "role2" }).when(userRolesStore).roles("user1"); + FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore); + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user1", new SecureString("test123")), future); + User user1 = future.actionGet().getUser(); + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user1", new SecureString("test123")), future); + User user2 = future.actionGet().getUser(); + assertThat(user1, sameInstance(user2)); + + userPasswdStore.notifyRefresh(); + + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user1", new SecureString("test123")), future); + User user3 = future.actionGet().getUser(); + assertThat(user2, not(sameInstance(user3))); + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user1", new SecureString("test123")), future); + User user4 = future.actionGet().getUser(); + assertThat(user3, sameInstance(user4)); + + userRolesStore.notifyRefresh(); + + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user1", new SecureString("test123")), future); + User user5 = future.actionGet().getUser(); + assertThat(user4, not(sameInstance(user5))); + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user1", new SecureString("test123")), future); + User user6 = future.actionGet().getUser(); + assertThat(user5, sameInstance(user6)); + } + + public void testToken() throws Exception { + RealmConfig config = new RealmConfig("file-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + when(userPasswdStore.verifyPassword(eq("user1"), eq(new SecureString("test123")), any(Supplier.class))) + .thenAnswer(VERIFY_PASSWORD_ANSWER); + when(userRolesStore.roles("user1")).thenReturn(new String[]{"role1", "role2"}); + FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore); + + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + UsernamePasswordToken.putTokenHeader(threadContext, new UsernamePasswordToken("user1", new SecureString("test123"))); + + UsernamePasswordToken token = realm.token(threadContext); + assertThat(token, notNullValue()); + assertThat(token.principal(), equalTo("user1")); + assertThat(token.credentials(), notNullValue()); + assertThat(new String(token.credentials().getChars()), equalTo("test123")); + } + + public void testLookup() throws Exception { + when(userPasswdStore.userExists("user1")).thenReturn(true); + when(userRolesStore.roles("user1")).thenReturn(new String[] { "role1", "role2" }); + RealmConfig config = new RealmConfig("file-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore); + + PlainActionFuture future = new PlainActionFuture<>(); + realm.lookupUser("user1", future); + User user = future.actionGet(); + + assertThat(user, notNullValue()); + assertThat(user.principal(), equalTo("user1")); + assertThat(user.roles(), notNullValue()); + assertThat(user.roles().length, equalTo(2)); + assertThat(user.roles(), arrayContaining("role1", "role2")); + } + + public void testLookupCaching() throws Exception { + when(userPasswdStore.userExists("user1")).thenReturn(true); + when(userRolesStore.roles("user1")).thenReturn(new String[] { "role1", "role2" }); + RealmConfig config = new RealmConfig("file-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore); + + PlainActionFuture future = new PlainActionFuture<>(); + realm.lookupUser("user1", future); + User user = future.actionGet(); + future = new PlainActionFuture<>(); + realm.lookupUser("user1", future); + User user1 = future.actionGet(); + assertThat(user, sameInstance(user1)); + verify(userPasswdStore).userExists("user1"); + verify(userRolesStore).roles("user1"); + } + + public void testLookupCachingWithRefresh() throws Exception { + RealmConfig config = new RealmConfig("file-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + userPasswdStore = spy(new UserPasswdStore(config)); + userRolesStore = spy(new UserRolesStore(config)); + doReturn(true).when(userPasswdStore).userExists("user1"); + doReturn(new String[] { "role1", "role2" }).when(userRolesStore).roles("user1"); + FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore); + PlainActionFuture future = new PlainActionFuture<>(); + realm.lookupUser("user1", future); + User user1 = future.actionGet(); + future = new PlainActionFuture<>(); + realm.lookupUser("user1", future); + User user2 = future.actionGet(); + assertThat(user1, sameInstance(user2)); + + userPasswdStore.notifyRefresh(); + + future = new PlainActionFuture<>(); + realm.lookupUser("user1", future); + User user3 = future.actionGet(); + assertThat(user2, not(sameInstance(user3))); + future = new PlainActionFuture<>(); + realm.lookupUser("user1", future); + User user4 = future.actionGet(); + assertThat(user3, sameInstance(user4)); + + userRolesStore.notifyRefresh(); + + future = new PlainActionFuture<>(); + realm.lookupUser("user1", future); + User user5 = future.actionGet(); + assertThat(user4, not(sameInstance(user5))); + future = new PlainActionFuture<>(); + realm.lookupUser("user1", future); + User user6 = future.actionGet(); + assertThat(user5, sameInstance(user6)); + } + + public void testUsageStats() throws Exception { + int userCount = randomIntBetween(0, 1000); + when(userPasswdStore.usersCount()).thenReturn(userCount); + + Settings.Builder settings = Settings.builder(); + + int order = randomIntBetween(0, 10); + settings.put("order", order); + + RealmConfig config = new RealmConfig("file-realm", settings.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore); + + Map usage = realm.usageStats(); + assertThat(usage, is(notNullValue())); + assertThat(usage, hasEntry("name", "file-realm")); + assertThat(usage, hasEntry("order", order)); + assertThat(usage, hasEntry("size", userCount)); + } + + static class UserPasswdStore extends FileUserPasswdStore { + UserPasswdStore(RealmConfig config) { + super(config, mock(ResourceWatcherService.class)); + } + } + + static class UserRolesStore extends FileUserRolesStore { + UserRolesStore(RealmConfig config) { + super(config, mock(ResourceWatcherService.class)); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java new file mode 100644 index 0000000000000..367313c58a6cb --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStoreTests.java @@ -0,0 +1,235 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.file; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.audit.logfile.CapturingLogger; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.user.User; +import org.junit.After; +import org.junit.Before; + +import java.io.BufferedWriter; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.core.IsNull.nullValue; + +public class FileUserPasswdStoreTests extends ESTestCase { + + private Settings settings; + private Environment env; + private ThreadPool threadPool; + + @Before + public void init() { + settings = Settings.builder() + .put("resource.reload.interval.high", "2s") + .put("path.home", createTempDir()) + .build(); + env = TestEnvironment.newEnvironment(settings); + threadPool = new TestThreadPool("test"); + } + + @After + public void shutdown() throws InterruptedException { + terminate(threadPool); + } + + public void testStore_ConfiguredWithUnreadableFile() throws Exception { + Path xpackConf = env.configFile(); + Files.createDirectories(xpackConf); + Path file = xpackConf.resolve("users"); + + // writing in utf_16 should cause a parsing error as we try to read the file in utf_8 + Files.write(file, Collections.singletonList("aldlfkjldjdflkjd"), StandardCharsets.UTF_16); + + Settings fileSettings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("files.users", file.toAbsolutePath()).build(); + RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, threadPool.getThreadContext()); + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); + FileUserPasswdStore store = new FileUserPasswdStore(config, watcherService); + assertThat(store.usersCount(), is(0)); + } + + public void testStore_AutoReload() throws Exception { + Path users = getDataPath("users"); + Path xpackConf = env.configFile(); + Files.createDirectories(xpackConf); + Path file = xpackConf.resolve("users"); + Files.copy(users, file, StandardCopyOption.REPLACE_EXISTING); + + Settings fileSettings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("files.users", file.toAbsolutePath()).build(); + RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, threadPool.getThreadContext()); + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); + final CountDownLatch latch = new CountDownLatch(1); + + FileUserPasswdStore store = new FileUserPasswdStore(config, watcherService, latch::countDown); + + User user = new User("bcrypt"); + assertThat(store.userExists("bcrypt"), is(true)); + AuthenticationResult result = store.verifyPassword("bcrypt", new SecureString("test123"), () -> user); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser(), is(user)); + + watcherService.start(); + + try (BufferedWriter writer = Files.newBufferedWriter(file, StandardCharsets.UTF_8, StandardOpenOption.APPEND)) { + writer.newLine(); + writer.append("foobar:").append(new String(Hasher.BCRYPT.hash(new SecureString("barfoo")))); + } + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("Waited too long for the updated file to be picked up"); + } + + assertThat(store.userExists("foobar"), is(true)); + result = store.verifyPassword("foobar", new SecureString("barfoo"), () -> user); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser(), is(user)); + } + + public void testStore_AutoReload_WithParseFailures() throws Exception { + Path users = getDataPath("users"); + Path xpackConf = env.configFile(); + Files.createDirectories(xpackConf); + Path testUsers = xpackConf.resolve("users"); + Files.copy(users, testUsers, StandardCopyOption.REPLACE_EXISTING); + + Settings fileSettings = Settings.builder() + .put("files.users", testUsers.toAbsolutePath()) + .build(); + + RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, threadPool.getThreadContext()); + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); + final CountDownLatch latch = new CountDownLatch(1); + + FileUserPasswdStore store = new FileUserPasswdStore(config, watcherService, latch::countDown); + + User user = new User("bcrypt"); + final AuthenticationResult result = store.verifyPassword("bcrypt", new SecureString("test123"), () -> user); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser(), is(user)); + + watcherService.start(); + + // now replacing the content of the users file with something that cannot be read + Files.write(testUsers, Collections.singletonList("aldlfkjldjdflkjd"), StandardCharsets.UTF_16); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("Waited too long for the updated file to be picked up"); + } + + assertThat(store.usersCount(), is(0)); + } + + public void testParseFile() throws Exception { + Path path = getDataPath("users"); + Map users = FileUserPasswdStore.parseFile(path, null, Settings.EMPTY); + assertThat(users, notNullValue()); + assertThat(users.size(), is(6)); + assertThat(users.get("bcrypt"), notNullValue()); + assertThat(new String(users.get("bcrypt")), equalTo("$2a$05$zxnP0vdREMxnEpkLCDI2OuSaSk/QEKA2.A42iOpI6U2u.RLLOWm1e")); + assertThat(users.get("bcrypt10"), notNullValue()); + assertThat(new String(users.get("bcrypt10")), equalTo("$2y$10$FMhmFjwU5.qxQ/BsEciS9OqcJVkFMgXMo4uH5CelOR1j4N9zIv67e")); + assertThat(users.get("md5"), notNullValue()); + assertThat(new String(users.get("md5")), equalTo("$apr1$R3DdqiAZ$aljIkaIVPSarmDMlJUBBP.")); + assertThat(users.get("crypt"), notNullValue()); + assertThat(new String(users.get("crypt")), equalTo("hsP1PYSLsEEvs")); + assertThat(users.get("plain"), notNullValue()); + assertThat(new String(users.get("plain")), equalTo("{plain}test123")); + assertThat(users.get("sha"), notNullValue()); + assertThat(new String(users.get("sha")), equalTo("{SHA}cojt0Pw//L6ToM8G41aOKFIWh7w=")); + } + + public void testParseFile_Empty() throws Exception { + Path empty = createTempFile(); + Logger logger = CapturingLogger.newCapturingLogger(Level.DEBUG); + Map users = FileUserPasswdStore.parseFile(empty, logger, Settings.EMPTY); + assertThat(users.isEmpty(), is(true)); + List events = CapturingLogger.output(logger.getName(), Level.DEBUG); + assertThat(events.size(), is(1)); + assertThat(events.get(0), containsString("parsed [0] users")); + } + + public void testParseFile_WhenFileDoesNotExist() throws Exception { + Path file = createTempDir().resolve(randomAlphaOfLength(10)); + Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + Map users = FileUserPasswdStore.parseFile(file, logger, Settings.EMPTY); + assertThat(users, nullValue()); + users = FileUserPasswdStore.parseFileLenient(file, logger, Settings.EMPTY); + assertThat(users, notNullValue()); + assertThat(users.isEmpty(), is(true)); + } + + public void testParseFile_WhenCannotReadFile() throws Exception { + Path file = createTempFile(); + // writing in utf_16 should cause a parsing error as we try to read the file in utf_8 + Files.write(file, Collections.singletonList("aldlfkjldjdflkjd"), StandardCharsets.UTF_16); + Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + try { + FileUserPasswdStore.parseFile(file, logger, Settings.EMPTY); + fail("expected a parse failure"); + } catch (IllegalStateException se) { + this.logger.info("expected", se); + } + } + + public void testParseFile_InvalidLineDoesNotResultInLoggerNPE() throws Exception { + Path file = createTempFile(); + Files.write(file, Arrays.asList("NotValidUsername=Password", "user:pass"), StandardCharsets.UTF_8); + Map users = FileUserPasswdStore.parseFile(file, null, Settings.EMPTY); + assertThat(users, notNullValue()); + assertThat(users.keySet(), hasSize(1)); + } + + public void testParseFileLenient_WhenCannotReadFile() throws Exception { + Path file = createTempFile(); + // writing in utf_16 should cause a parsing error as we try to read the file in utf_8 + Files.write(file, Collections.singletonList("aldlfkjldjdflkjd"), StandardCharsets.UTF_16); + Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + Map users = FileUserPasswdStore.parseFileLenient(file, logger, Settings.EMPTY); + assertThat(users, notNullValue()); + assertThat(users.isEmpty(), is(true)); + List events = CapturingLogger.output(logger.getName(), Level.ERROR); + assertThat(events.size(), is(1)); + assertThat(events.get(0), containsString("failed to parse users file")); + } + + public void testParseFileWithLineWithEmptyPasswordAndWhitespace() throws Exception { + Path file = createTempFile(); + Files.write(file, Collections.singletonList("user: "), StandardCharsets.UTF_8); + Map users = FileUserPasswdStore.parseFile(file, null, Settings.EMPTY); + assertThat(users, notNullValue()); + assertThat(users.keySet(), is(empty())); + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStoreTests.java new file mode 100644 index 0000000000000..b2560da88e805 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStoreTests.java @@ -0,0 +1,289 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.file; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.audit.logfile.CapturingLogger; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.junit.After; +import org.junit.Before; + +import java.io.BufferedWriter; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.core.IsNull.nullValue; + +public class FileUserRolesStoreTests extends ESTestCase { + + private Settings settings; + private Environment env; + private ThreadPool threadPool; + + @Before + public void init() { + settings = Settings.builder() + .put("resource.reload.interval.high", "2s") + .put("path.home", createTempDir()) + .build(); + env = TestEnvironment.newEnvironment(settings); + threadPool = new TestThreadPool("test"); + } + + @After + public void shutdown() throws InterruptedException { + terminate(threadPool); + } + + public void testStore_ConfiguredWithUnreadableFile() throws Exception { + Path file = getUsersRolesPath(); + List lines = new ArrayList<>(); + lines.add("aldlfkjldjdflkjd"); + + // writing in utf_16 should cause a parsing error as we try to read the file in utf_8 + Files.write(file, lines, StandardCharsets.UTF_16); + + Settings fileSettings = randomBoolean() ? Settings.EMPTY : Settings.builder() + .put("files.users_roles", file.toAbsolutePath()) + .build(); + + RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, new ThreadContext(Settings.EMPTY)); + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); + FileUserRolesStore store = new FileUserRolesStore(config, watcherService); + assertThat(store.entriesCount(), is(0)); + } + + public void testStoreAutoReload() throws Exception { + Path users = getDataPath("users_roles"); + Path tmp = getUsersRolesPath(); + Files.copy(users, tmp, StandardCopyOption.REPLACE_EXISTING); + + Settings fileSettings = randomBoolean() ? Settings.EMPTY : Settings.builder() + .put("files.users_roles", tmp.toAbsolutePath()) + .build(); + + RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, new ThreadContext(Settings.EMPTY)); + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); + final CountDownLatch latch = new CountDownLatch(1); + + FileUserRolesStore store = new FileUserRolesStore(config, watcherService, latch::countDown); + + String[] roles = store.roles("user1"); + assertThat(roles, notNullValue()); + assertThat(roles.length, is(3)); + assertThat(roles, arrayContaining("role1", "role2", "role3")); + assertThat(store.roles("user4"), equalTo(Strings.EMPTY_ARRAY)); + + watcherService.start(); + + try (BufferedWriter writer = Files.newBufferedWriter(tmp, StandardCharsets.UTF_8, StandardOpenOption.APPEND)) { + writer.newLine(); + writer.append("role4:user4\nrole5:user4\n"); + } + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("Waited too long for the updated file to be picked up"); + } + + roles = store.roles("user4"); + assertThat(roles, notNullValue()); + assertThat(roles.length, is(2)); + assertThat(roles, arrayContaining("role4", "role5")); + } + + public void testStoreAutoReloadWithParseFailure() throws Exception { + Path users = getDataPath("users_roles"); + Path tmp = getUsersRolesPath(); + Files.copy(users, tmp, StandardCopyOption.REPLACE_EXISTING); + + Settings fileSettings = randomBoolean() ? Settings.EMPTY : Settings.builder() + .put("files.users_roles", tmp.toAbsolutePath()) + .build(); + + RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, new ThreadContext(Settings.EMPTY)); + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); + final CountDownLatch latch = new CountDownLatch(1); + + FileUserRolesStore store = new FileUserRolesStore(config, watcherService, latch::countDown); + + String[] roles = store.roles("user1"); + assertThat(roles, notNullValue()); + assertThat(roles.length, is(3)); + assertThat(roles, arrayContaining("role1", "role2", "role3")); + assertThat(store.roles("user4"), equalTo(Strings.EMPTY_ARRAY)); + + watcherService.start(); + + // now replacing the content of the users file with something that cannot be read + Files.write(tmp, Collections.singletonList("aldlfkjldjdflkjd"), StandardCharsets.UTF_16); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("Waited too long for the updated file to be picked up"); + } + + assertThat(store.entriesCount(), is(0)); + } + + public void testParseFile() throws Exception { + Path path = getDataPath("users_roles"); + Map usersRoles = FileUserRolesStore.parseFile(path, null); + assertThat(usersRoles, notNullValue()); + assertThat(usersRoles.size(), is(4)); + assertThat(usersRoles.get("user1"), notNullValue()); + assertThat(usersRoles.get("user1").length, is(3)); + assertThat(usersRoles.get("user1"), arrayContaining("role1", "role2", "role3")); + assertThat(usersRoles.get("user2"), notNullValue()); + assertThat(usersRoles.get("user2").length, is(2)); + assertThat(usersRoles.get("user2"), arrayContaining("role2", "role3")); + assertThat(usersRoles.get("user3"), notNullValue()); + assertThat(usersRoles.get("user3").length, is(1)); + assertThat(usersRoles.get("user3"), arrayContaining("role3")); + assertThat(usersRoles.get("period.user").length, is(1)); + assertThat(usersRoles.get("period.user"), arrayContaining("role4")); + } + + public void testParseFileEmpty() throws Exception { + Path empty = createTempFile(); + Logger log = CapturingLogger.newCapturingLogger(Level.DEBUG); + FileUserRolesStore.parseFile(empty, log); + List events = CapturingLogger.output(log.getName(), Level.DEBUG); + assertThat(events.size(), is(1)); + assertThat(events.get(0), containsString("parsed [0] user to role mappings")); + } + + public void testParseFileWhenFileDoesNotExist() throws Exception { + Path file = createTempDir().resolve(randomAlphaOfLength(10)); + Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + Map usersRoles = FileUserRolesStore.parseFile(file, logger); + assertThat(usersRoles, nullValue()); + usersRoles = FileUserRolesStore.parseFileLenient(file, logger); + assertThat(usersRoles, notNullValue()); + assertThat(usersRoles.isEmpty(), is(true)); + } + + public void testParseFileWhenCannotReadFile() throws Exception { + Path file = createTempFile(); + List lines = new ArrayList<>(); + lines.add("aldlfkjldjdflkjd"); + + // writing in utf_16 should cause a parsing error as we try to read the file in utf_8 + Files.write(file, lines, StandardCharsets.UTF_16); + Logger logger = CapturingLogger.newCapturingLogger(Level.DEBUG); + try { + FileUserRolesStore.parseFile(file, logger); + fail("expected a parse failure"); + } catch (Exception e) { + this.logger.info("expected", e); + } + } + + public void testParseFileEmptyRolesDoesNotCauseNPE() throws Exception { + ThreadPool threadPool = null; + try { + threadPool = new TestThreadPool("test"); + Path usersRoles = writeUsersRoles("role1:admin"); + + Settings settings = Settings.builder() + .put(XPackSettings.WATCHER_ENABLED.getKey(), "false") + .put("path.home", createTempDir()) + .build(); + + Settings fileSettings = randomBoolean() ? Settings.EMPTY : Settings.builder() + .put("files.users_roles", usersRoles.toAbsolutePath()) + .build(); + + Environment env = TestEnvironment.newEnvironment(settings); + RealmConfig config = new RealmConfig("file-test", fileSettings, settings, env, new ThreadContext(Settings.EMPTY)); + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); + FileUserRolesStore store = new FileUserRolesStore(config, watcherService); + assertThat(store.roles("user"), equalTo(Strings.EMPTY_ARRAY)); + } finally { + terminate(threadPool); + } + } + + public void testParseFileEmptyFileIsParsed() throws Exception { + assertInvalidInputIsSilentlyIgnored(""); + assertInvalidInputIsSilentlyIgnored("#"); + } + + public void testParseFileEmptyRoleNameDoesNotThrowException() throws Exception { + assertInvalidInputIsSilentlyIgnored(":user1,user2"); + assertInvalidInputIsSilentlyIgnored(" :user1,user2"); + } + + public void testParseFileEmptyRoleDoesNotThrowException() throws Exception { + assertInvalidInputIsSilentlyIgnored("role:"); + assertInvalidInputIsSilentlyIgnored("role: "); + assertInvalidInputIsSilentlyIgnored("role: , "); + } + + public void testParseFileLenientWhenCannotReadFile() throws Exception { + Path file = createTempFile(); + List lines = new ArrayList<>(); + lines.add("aldlfkjldjdflkjd"); + + // writing in utf_16 should cause a parsing error as we try to read the file in utf_8 + Files.write(file, lines, StandardCharsets.UTF_16); + Logger logger = CapturingLogger.newCapturingLogger(Level.DEBUG); + Map usersRoles = FileUserRolesStore.parseFileLenient(file, logger); + assertThat(usersRoles, notNullValue()); + assertThat(usersRoles.isEmpty(), is(true)); + List events = CapturingLogger.output(logger.getName(), Level.ERROR); + assertThat(events.size(), is(1)); + assertThat(events.get(0), containsString("failed to parse users_roles file")); + } + + private Path writeUsersRoles(String input) throws Exception { + Path file = getUsersRolesPath(); + Files.write(file, input.getBytes(StandardCharsets.UTF_8)); + return file; + } + + private Path getUsersRolesPath() throws IOException { + Path xpackConf = env.configFile(); + Files.createDirectories(xpackConf); + return xpackConf.resolve("users_roles"); + } + + private void assertInvalidInputIsSilentlyIgnored(String input) throws Exception { + Path file = createTempFile(); + Files.write(file, input.getBytes(StandardCharsets.UTF_8)); + Map usersRoles = FileUserRolesStore.parseFile(file, null); + String reason = String.format(Locale.ROOT, "Expected userRoles to be empty, but was %s", usersRoles.keySet()); + assertThat(reason, usersRoles.keySet(), hasSize(0)); + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java new file mode 100644 index 0000000000000..52026cc8af5d9 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java @@ -0,0 +1,373 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.listener.InMemoryDirectoryServer; +import com.unboundid.ldap.listener.InMemoryDirectoryServerConfig; +import com.unboundid.ldap.sdk.Attribute; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.LDAPURL; +import com.unboundid.ldap.sdk.schema.Schema; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.VerificationMode; +import org.elasticsearch.xpack.security.authc.ldap.ActiveDirectorySessionFactory.DownLevelADAuthenticator; +import org.elasticsearch.xpack.security.authc.ldap.ActiveDirectorySessionFactory.UpnADAuthenticator; +import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.security.AccessController; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING; +import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.URLS_SETTING; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +/** + * Active Directory Realm tests that use the UnboundID In Memory Directory Server + *

+ * AD is not LDAPv3 compliant so a workaround is needed + * AD realm binds with userPrincipalName but this is not a valid DN, so we have to add a second userPrincipalName to the + * users in the ldif in the form of CN=user@domain.com or a set the sAMAccountName to CN=user when testing authentication + * with the sAMAccountName field. + *

+ * The username used to authenticate then has to be in the form of CN=user. Finally the username needs to be added as an + * additional bind DN with a password in the test setup since it really is not a DN in the ldif file + */ +public class ActiveDirectoryRealmTests extends ESTestCase { + + private static final String PASSWORD = "password"; + private static final String ROLE_MAPPING_FILE_SETTING = DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.getKey(); + + static int numberOfLdapServers; + InMemoryDirectoryServer[] directoryServers; + + private ResourceWatcherService resourceWatcherService; + private ThreadPool threadPool; + private Settings globalSettings; + private SSLService sslService; + + @BeforeClass + public static void setNumberOfLdapServers() { + numberOfLdapServers = randomIntBetween(1, 4); + } + + @Before + public void start() throws Exception { + InMemoryDirectoryServerConfig config = new InMemoryDirectoryServerConfig("dc=ad,dc=test,dc=elasticsearch,dc=com"); + // Get the default schema and overlay with the AD changes + config.setSchema(Schema.mergeSchemas(Schema.getDefaultStandardSchema(), + Schema.getSchema(getDataPath("ad-schema.ldif").toString()))); + + // Add the bind users here since AD is not LDAPv3 compliant + config.addAdditionalBindCredentials("CN=ironman@ad.test.elasticsearch.com", PASSWORD); + config.addAdditionalBindCredentials("CN=Thor@ad.test.elasticsearch.com", PASSWORD); + + directoryServers = new InMemoryDirectoryServer[numberOfLdapServers]; + for (int i = 0; i < numberOfLdapServers; i++) { + InMemoryDirectoryServer directoryServer = new InMemoryDirectoryServer(config); + directoryServer.add("dc=ad,dc=test,dc=elasticsearch,dc=com", new Attribute("dc", "UnboundID"), + new Attribute("objectClass", "top", "domain", "extensibleObject")); + directoryServer.importFromLDIF(false, getDataPath("ad.ldif").toString()); + // Must have privileged access because underlying server will accept socket connections + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + directoryServer.startListening(); + return null; + }); + directoryServers[i] = directoryServer; + } + threadPool = new TestThreadPool("active directory realm tests"); + resourceWatcherService = new ResourceWatcherService(Settings.EMPTY, threadPool); + globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + sslService = new SSLService(globalSettings, TestEnvironment.newEnvironment(globalSettings)); + } + + @After + public void stop() throws InterruptedException { + resourceWatcherService.stop(); + terminate(threadPool); + for (int i = 0; i < numberOfLdapServers; i++) { + directoryServers[i].shutDown(true); + } + } + + @Override + public boolean enableWarningsCheck() { + return false; + } + + public void testAuthenticateUserPrincipleName() throws Exception { + Settings settings = settings(); + RealmConfig config = new RealmConfig("testAuthenticateUserPrincipleName", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); + DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); + LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("CN=ironman", new SecureString(PASSWORD)), future); + final AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + final User user = result.getUser(); + assertThat(user, is(notNullValue())); + assertThat(user.roles(), arrayContaining(containsString("Avengers"))); + } + + public void testAuthenticateSAMAccountName() throws Exception { + Settings settings = settings(); + RealmConfig config = new RealmConfig("testAuthenticateSAMAccountName", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); + DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); + LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + + // Thor does not have a UPN of form CN=Thor@ad.test.elasticsearch.com + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("CN=Thor", new SecureString(PASSWORD)), future); + User user = future.actionGet().getUser(); + assertThat(user, is(notNullValue())); + assertThat(user.roles(), arrayContaining(containsString("Avengers"))); + } + + protected String[] ldapUrls() throws LDAPException { + List urls = new ArrayList<>(numberOfLdapServers); + for (int i = 0; i < numberOfLdapServers; i++) { + LDAPURL url = new LDAPURL("ldap", "localhost", directoryServers[i].getListenPort(), null, null, null, null); + urls.add(url.toString()); + } + return urls.toArray(Strings.EMPTY_ARRAY); + } + + public void testAuthenticateCachesSuccessfulAuthentications() throws Exception { + Settings settings = settings(); + RealmConfig config = new RealmConfig("testAuthenticateCachesSuccesfulAuthentications", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); + DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); + LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + + int count = randomIntBetween(2, 10); + for (int i = 0; i < count; i++) { + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("CN=ironman", new SecureString(PASSWORD)), future); + future.actionGet(); + } + + // verify one and only one session as further attempts should be returned from cache + verify(sessionFactory, times(1)).session(eq("CN=ironman"), any(SecureString.class), any(ActionListener.class)); + } + + public void testAuthenticateCachingCanBeDisabled() throws Exception { + Settings settings = settings(Settings.builder().put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), -1).build()); + RealmConfig config = new RealmConfig("testAuthenticateCachingCanBeDisabled", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); + DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); + LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + + int count = randomIntBetween(2, 10); + for (int i = 0; i < count; i++) { + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("CN=ironman", new SecureString(PASSWORD)), future); + future.actionGet(); + } + + // verify one and only one session as second attempt should be returned from cache + verify(sessionFactory, times(count)).session(eq("CN=ironman"), any(SecureString.class), any(ActionListener.class)); + } + + public void testAuthenticateCachingClearsCacheOnRoleMapperRefresh() throws Exception { + Settings settings = settings(); + RealmConfig config = new RealmConfig("testAuthenticateCachingClearsCacheOnRoleMapperRefresh", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + ActiveDirectorySessionFactory sessionFactory = spy(new ActiveDirectorySessionFactory(config, sslService, threadPool)); + DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); + LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + + int count = randomIntBetween(2, 10); + for (int i = 0; i < count; i++) { + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("CN=ironman", new SecureString(PASSWORD)), future); + future.actionGet(); + } + + // verify one and only one session as further attempts should be returned from cache + verify(sessionFactory, times(1)).session(eq("CN=ironman"), any(SecureString.class), any(ActionListener.class)); + + // Refresh the role mappings + roleMapper.notifyRefresh(); + + for (int i = 0; i < count; i++) { + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("CN=ironman", new SecureString(PASSWORD)), future); + future.actionGet(); + } + + verify(sessionFactory, times(2)).session(eq("CN=ironman"), any(SecureString.class), any(ActionListener.class)); + } + + public void testUnauthenticatedLookupWithConnectionPool() throws Exception { + doUnauthenticatedLookup(true); + } + + public void testUnauthenticatedLookupWithoutConnectionPool() throws Exception { + doUnauthenticatedLookup(false); + } + + private void doUnauthenticatedLookup(boolean pooled) throws Exception { + final Settings.Builder builder = Settings.builder() + .put(ActiveDirectorySessionFactorySettings.POOL_ENABLED.getKey(), pooled) + .put(PoolingSessionFactorySettings.BIND_DN.getKey(), "CN=ironman@ad.test.elasticsearch.com"); + final boolean useLegacyBindPassword = randomBoolean(); + if (useLegacyBindPassword) { + builder.put(PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD.getKey(), PASSWORD); + } else { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(PoolingSessionFactorySettings.SECURE_BIND_PASSWORD.getKey(), PASSWORD); + builder.setSecureSettings(secureSettings); + } + Settings settings = settings(builder.build()); + RealmConfig config = new RealmConfig("testUnauthenticatedLookupWithConnectionPool", settings, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + try (ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool)) { + DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); + LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + + PlainActionFuture future = new PlainActionFuture<>(); + realm.lookupUser("CN=Thor", future); + final User user = future.actionGet(); + assertThat(user, notNullValue()); + assertThat(user.principal(), equalTo("CN=Thor")); + } + } + + public void testRealmMapsGroupsToRoles() throws Exception { + Settings settings = settings(Settings.builder() + .put(ROLE_MAPPING_FILE_SETTING, getDataPath("role_mapping.yml")) + .build()); + RealmConfig config = new RealmConfig("testRealmMapsGroupsToRoles", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); + DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); + LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("CN=ironman", new SecureString(PASSWORD)), future); + User user = future.actionGet().getUser(); + assertThat(user, is(notNullValue())); + assertThat(user.roles(), arrayContaining(equalTo("group_role"))); + } + + public void testRealmMapsUsersToRoles() throws Exception { + Settings settings = settings(Settings.builder() + .put(ROLE_MAPPING_FILE_SETTING, getDataPath("role_mapping.yml")) + .build()); + RealmConfig config = new RealmConfig("testRealmMapsGroupsToRoles", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); + DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); + LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("CN=Thor", new SecureString(PASSWORD)), future); + User user = future.actionGet().getUser(); + assertThat(user, is(notNullValue())); + assertThat(user.roles(), arrayContainingInAnyOrder(equalTo("group_role"), equalTo("user_role"))); + } + + public void testRealmUsageStats() throws Exception { + String loadBalanceType = randomFrom("failover", "round_robin"); + Settings settings = settings(Settings.builder() + .put(ROLE_MAPPING_FILE_SETTING, getDataPath("role_mapping.yml")) + .put("load_balance.type", loadBalanceType) + .build()); + RealmConfig config = new RealmConfig("testRealmUsageStats", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); + DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); + LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); + + Map stats = realm.usageStats(); + assertThat(stats, is(notNullValue())); + assertThat(stats, hasEntry("name", realm.name())); + assertThat(stats, hasEntry("order", realm.order())); + assertThat(stats, hasEntry("size", 0)); + assertThat(stats, hasEntry("ssl", false)); + assertThat(stats, hasEntry("load_balance_type", loadBalanceType)); + } + + public void testDefaultSearchFilters() throws Exception { + Settings settings = settings(); + RealmConfig config = new RealmConfig("testDefaultSearchFilters", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); + assertEquals("(&(objectClass=user)(|(sAMAccountName={0})(userPrincipalName={0}@ad.test.elasticsearch.com)))", + sessionFactory.defaultADAuthenticator.getUserSearchFilter()); + assertEquals(UpnADAuthenticator.UPN_USER_FILTER, sessionFactory.upnADAuthenticator.getUserSearchFilter()); + assertEquals(DownLevelADAuthenticator.DOWN_LEVEL_FILTER, sessionFactory.downLevelADAuthenticator.getUserSearchFilter()); + } + + public void testCustomSearchFilters() throws Exception { + Settings settings = settings(Settings.builder() + .put(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_FILTER_SETTING, "(objectClass=default)") + .put(ActiveDirectorySessionFactorySettings.AD_UPN_USER_SEARCH_FILTER_SETTING, "(objectClass=upn)") + .put(ActiveDirectorySessionFactorySettings.AD_DOWN_LEVEL_USER_SEARCH_FILTER_SETTING, "(objectClass=down level)") + .build()); + RealmConfig config = new RealmConfig("testDefaultSearchFilters", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); + assertEquals("(objectClass=default)", sessionFactory.defaultADAuthenticator.getUserSearchFilter()); + assertEquals("(objectClass=upn)", sessionFactory.upnADAuthenticator.getUserSearchFilter()); + assertEquals("(objectClass=down level)", sessionFactory.downLevelADAuthenticator.getUserSearchFilter()); + } + + private Settings settings() throws Exception { + return settings(Settings.EMPTY); + } + + private Settings settings(Settings extraSettings) throws Exception { + Settings.Builder builder = Settings.builder() + .putList(URLS_SETTING, ldapUrls()) + .put(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING, "ad.test.elasticsearch.com") + .put(DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING.getKey(), true); + if (randomBoolean()) { + builder.put("ssl.verification_mode", VerificationMode.CERTIFICATE); + } else { + builder.put(HOSTNAME_VERIFICATION_SETTING, false); + } + return builder.put(extraSettings).build(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySIDUtilTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySIDUtilTests.java new file mode 100644 index 0000000000000..a3bda896bd3d9 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySIDUtilTests.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import org.apache.commons.codec.binary.Hex; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class ActiveDirectorySIDUtilTests extends ESTestCase { + + private static final String USER_SID_HEX ="01050000000000051500000050bd51b583ef8ebc4c75521ae9030000"; + private static final String USER_STRING_SID = "S-1-5-21-3042032976-3163484035-441611596-1001"; + + public void testSidConversion() throws Exception { + assertThat(USER_STRING_SID, equalTo(ActiveDirectorySIDUtil.convertToString(Hex.decodeHex(USER_SID_HEX.toCharArray())))); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/CancellableLdapRunnableTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/CancellableLdapRunnableTests.java new file mode 100644 index 0000000000000..18b84df6d6185 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/CancellableLdapRunnableTests.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.ldap.LdapRealm.CancellableLdapRunnable; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.sameInstance; + +public class CancellableLdapRunnableTests extends ESTestCase { + + public void testTimingOutARunnable() { + AtomicReference exceptionAtomicReference = new AtomicReference<>(); + final CancellableLdapRunnable runnable = + new CancellableLdapRunnable(ActionListener.wrap(user -> { + throw new AssertionError("onResponse should not be called"); + }, exceptionAtomicReference::set), e -> null, () -> { + throw new AssertionError("runnable should not be executed"); + }, logger); + + runnable.maybeTimeout(); + runnable.run(); + assertNotNull(exceptionAtomicReference.get()); + assertThat(exceptionAtomicReference.get(), instanceOf(ElasticsearchTimeoutException.class)); + assertThat(exceptionAtomicReference.get().getMessage(), + containsString("timed out waiting for execution")); + } + + public void testCallTimeOutAfterRunning() { + final AtomicBoolean ran = new AtomicBoolean(false); + final AtomicBoolean listenerCalled = new AtomicBoolean(false); + final CancellableLdapRunnable runnable = + new CancellableLdapRunnable(ActionListener.wrap(user -> { + listenerCalled.set(true); + throw new AssertionError("onResponse should not be called"); + }, e -> { + listenerCalled.set(true); + throw new AssertionError("onFailure should not be called"); + }), e -> null, () -> ran.set(ran.get() == false), logger); + + runnable.run(); + assertTrue(ran.get()); + runnable.maybeTimeout(); + assertTrue(ran.get()); + // the listener shouldn't have ever been called. If it was, then either something called + // onResponse or onFailure was called as part of the timeout + assertFalse(listenerCalled.get()); + } + + public void testRejectingExecution() { + AtomicReference exceptionAtomicReference = new AtomicReference<>(); + final CancellableLdapRunnable runnable = + new CancellableLdapRunnable(ActionListener.wrap(user -> { + throw new AssertionError("onResponse should not be called"); + }, exceptionAtomicReference::set), e -> null, () -> { + throw new AssertionError("runnable should not be executed"); + }, logger); + + final Exception e = new RuntimeException("foo"); + runnable.onRejection(e); + + assertNotNull(exceptionAtomicReference.get()); + assertThat(exceptionAtomicReference.get(), sameInstance(e)); + } + + public void testTimeoutDuringExecution() throws InterruptedException { + final CountDownLatch listenerCalledLatch = new CountDownLatch(1); + final CountDownLatch timeoutCalledLatch = new CountDownLatch(1); + final CountDownLatch runningLatch = new CountDownLatch(1); + final ActionListener listener = ActionListener.wrap(user -> { + listenerCalledLatch.countDown(); + }, e -> { + throw new AssertionError("onFailure should not be executed"); + }); + final CancellableLdapRunnable runnable = new CancellableLdapRunnable(listener, e -> null, () -> { + runningLatch.countDown(); + try { + timeoutCalledLatch.await(); + listener.onResponse(null); + } catch (InterruptedException e) { + throw new AssertionError("don't interrupt me", e); + } + }, logger); + + Thread t = new Thread(runnable); + t.start(); + runningLatch.await(); + runnable.maybeTimeout(); + timeoutCalledLatch.countDown(); + listenerCalledLatch.await(); + t.join(); + } + + public void testExceptionInRunnable() { + AtomicReference resultRef = new AtomicReference<>(); + final ActionListener listener = ActionListener.wrap(resultRef::set, e -> { + throw new AssertionError("onFailure should not be executed"); + }); + String defaultValue = randomAlphaOfLengthBetween(2, 10); + final CancellableLdapRunnable runnable = new CancellableLdapRunnable<>(listener, e -> defaultValue, + () -> { + throw new RuntimeException("runnable intentionally failed"); + }, logger); + + runnable.run(); + assertThat(resultRef.get(), equalTo(defaultValue)); + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/GroupsResolverTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/GroupsResolverTestCase.java new file mode 100644 index 0000000000000..26cd513ec78e5 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/GroupsResolverTestCase.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.Attribute; +import com.unboundid.ldap.sdk.LDAPConnection; +import com.unboundid.ldap.sdk.LDAPInterface; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession.GroupsResolver; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Path; +import java.util.Collection; +import java.util.List; + +public abstract class GroupsResolverTestCase extends ESTestCase { + + LDAPConnection ldapConnection; + + protected abstract String ldapUrl(); + + protected abstract String bindDN(); + + protected abstract String bindPassword(); + + protected abstract String trustPath(); + + @Before + public void setUpLdapConnection() throws Exception { + Path truststore = getDataPath(trustPath()); + this.ldapConnection = LdapTestUtils.openConnection(ldapUrl(), bindDN(), bindPassword(), truststore); + } + + @After + public void tearDownLdapConnection() throws Exception { + if (ldapConnection != null) { + ldapConnection.close(); + } + } + + protected static List resolveBlocking(GroupsResolver resolver, LDAPInterface ldapConnection, String dn, TimeValue timeLimit, + Logger logger, Collection attributes) { + PlainActionFuture> future = new PlainActionFuture<>(); + resolver.resolve(ldapConnection, dn, timeLimit, logger, attributes, future); + return future.actionGet(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java new file mode 100644 index 0000000000000..042664fa6707d --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java @@ -0,0 +1,371 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.LDAPURL; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.VerificationMode; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; +import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; +import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; +import org.junit.After; +import org.junit.Before; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.URLS_SETTING; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class LdapRealmTests extends LdapTestCase { + + public static final String VALID_USER_TEMPLATE = "cn={0},ou=people,o=sevenSeas"; + public static final String VALID_USERNAME = "Thomas Masterman Hardy"; + public static final String PASSWORD = "pass"; + + private static final String USER_DN_TEMPLATES_SETTING_KEY = LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING.getKey(); + + private ThreadPool threadPool; + private ResourceWatcherService resourceWatcherService; + private Settings globalSettings; + private SSLService sslService; + + @Before + public void init() throws Exception { + threadPool = new TestThreadPool("ldap realm tests"); + resourceWatcherService = new ResourceWatcherService(Settings.EMPTY, threadPool); + globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + sslService = new SSLService(globalSettings, TestEnvironment.newEnvironment(globalSettings)); + } + + @After + public void shutdown() throws InterruptedException { + resourceWatcherService.stop(); + terminate(threadPool); + } + + public void testAuthenticateSubTreeGroupSearch() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userTemplate = VALID_USER_TEMPLATE; + Settings settings = buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); + LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), + threadPool); + + PlainActionFuture future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + final AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + User user = result.getUser(); + assertThat(user, notNullValue()); + assertThat(user.roles(), arrayContaining("HMS Victory")); + assertThat(user.metadata(), notNullValue()); + assertThat(user.metadata().get("ldap_dn"), equalTo("cn=" + VALID_USERNAME + ",ou=people,o=sevenSeas")); + assertThat(user.metadata().get("ldap_groups"), instanceOf(List.class)); + assertThat((List) user.metadata().get("ldap_groups"), contains("cn=HMS Victory,ou=crews,ou=groups,o=sevenSeas")); + } + + public void testAuthenticateOneLevelGroupSearch() throws Exception { + String groupSearchBase = "ou=crews,ou=groups,o=sevenSeas"; + String userTemplate = VALID_USER_TEMPLATE; + Settings settings = Settings.builder() + .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) + .build(); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); + LdapRealm ldap = + new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + + PlainActionFuture future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + final AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + User user = result.getUser(); + assertThat(user, notNullValue()); + assertThat("For roles " + Arrays.toString(user.roles()), user.roles(), arrayContaining("HMS Victory")); + assertThat(user.metadata(), notNullValue()); + assertThat(user.metadata().get("ldap_dn"), equalTo("cn=" + VALID_USERNAME + ",ou=people,o=sevenSeas")); + assertThat(user.metadata().get("ldap_groups"), instanceOf(List.class)); + assertThat((List) user.metadata().get("ldap_groups"), contains("cn=HMS Victory,ou=crews,ou=groups,o=sevenSeas")); + } + + public void testAuthenticateCaching() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userTemplate = VALID_USER_TEMPLATE; + Settings settings = Settings.builder() + .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) + .build(); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); + ldapFactory = spy(ldapFactory); + LdapRealm ldap = + new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + + PlainActionFuture future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + assertThat(future.actionGet().getStatus(), is(AuthenticationResult.Status.SUCCESS)); + + future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + assertThat(future.actionGet().getStatus(), is(AuthenticationResult.Status.SUCCESS)); + + //verify one and only one session -> caching is working + verify(ldapFactory, times(1)).session(anyString(), any(SecureString.class), any(ActionListener.class)); + } + + public void testAuthenticateCachingRefresh() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userTemplate = VALID_USER_TEMPLATE; + Settings settings = Settings.builder() + .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) + .build(); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); + DnRoleMapper roleMapper = buildGroupAsRoleMapper(resourceWatcherService); + ldapFactory = spy(ldapFactory); + LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, roleMapper, threadPool); + PlainActionFuture future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + future.actionGet(); + future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + future.actionGet(); + + //verify one and only one session -> caching is working + verify(ldapFactory, times(1)).session(anyString(), any(SecureString.class), any(ActionListener.class)); + + roleMapper.notifyRefresh(); + + future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + future.actionGet(); + + //we need to session again + verify(ldapFactory, times(2)).session(anyString(), any(SecureString.class), any(ActionListener.class)); + } + + public void testAuthenticateNoncaching() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userTemplate = VALID_USER_TEMPLATE; + Settings settings = Settings.builder() + .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), -1) + .build(); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); + ldapFactory = spy(ldapFactory); + LdapRealm ldap = + new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), threadPool); + PlainActionFuture future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + future.actionGet(); + future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + future.actionGet(); + + //verify two and only two binds -> caching is disabled + verify(ldapFactory, times(2)).session(anyString(), any(SecureString.class), any(ActionListener.class)); + } + + public void testLdapRealmSelectsLdapSessionFactory() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userTemplate = VALID_USER_TEMPLATE; + Settings settings = Settings.builder() + .putList(URLS_SETTING, ldapUrls()) + .putList(USER_DN_TEMPLATES_SETTING_KEY, userTemplate) + .put("group_search.base_dn", groupSearchBase) + .put("group_search.scope", LdapSearchScope.SUB_TREE) + .put("ssl.verification_mode", VerificationMode.CERTIFICATE) + .build(); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + SessionFactory sessionFactory = LdapRealm.sessionFactory(config, sslService, threadPool, LdapRealmSettings.LDAP_TYPE); + assertThat(sessionFactory, is(instanceOf(LdapSessionFactory.class))); + } + + public void testLdapRealmSelectsLdapUserSearchSessionFactory() throws Exception { + String groupSearchBase = "o=sevenSeas"; + Settings settings = Settings.builder() + .putList(URLS_SETTING, ldapUrls()) + .put("user_search.base_dn", "") + .put("bind_dn", "cn=Thomas Masterman Hardy,ou=people,o=sevenSeas") + .setSecureSettings(secureSettings("secure_bind_password", PASSWORD)) + .put("group_search.base_dn", groupSearchBase) + .put("group_search.scope", LdapSearchScope.SUB_TREE) + .put("ssl.verification_mode", VerificationMode.CERTIFICATE) + .build(); + RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + SessionFactory sessionFactory = LdapRealm.sessionFactory(config, sslService, threadPool, LdapRealmSettings.LDAP_TYPE); + try { + assertThat(sessionFactory, is(instanceOf(LdapUserSearchSessionFactory.class))); + } finally { + ((LdapUserSearchSessionFactory)sessionFactory).close(); + } + } + + private MockSecureSettings secureSettings(String key, String value) { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(key, value); + return secureSettings; + } + + public void testLdapRealmThrowsExceptionForUserTemplateAndSearchSettings() throws Exception { + Settings settings = Settings.builder() + .putList(URLS_SETTING, ldapUrls()) + .putList(USER_DN_TEMPLATES_SETTING_KEY, "cn=foo") + .put("user_search.base_dn", "cn=bar") + .put("group_search.base_dn", "") + .put("group_search.scope", LdapSearchScope.SUB_TREE) + .put("ssl.verification_mode", VerificationMode.CERTIFICATE) + .build(); + RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> LdapRealm.sessionFactory(config, null, threadPool, LdapRealmSettings.LDAP_TYPE)); + assertThat(e.getMessage(), + containsString("settings were found for both" + + " user search [xpack.security.authc.realms.test-ldap-realm-user-search.user_search.] and" + + " user template [xpack.security.authc.realms.test-ldap-realm-user-search.user_dn_templates]")); + } + + public void testLdapRealmThrowsExceptionWhenNeitherUserTemplateNorSearchSettingsProvided() throws Exception { + Settings settings = Settings.builder() + .putList(URLS_SETTING, ldapUrls()) + .put("group_search.base_dn", "") + .put("group_search.scope", LdapSearchScope.SUB_TREE) + .put("ssl.verification_mode", VerificationMode.CERTIFICATE) + .build(); + RealmConfig config = new RealmConfig("test-ldap-realm-user-search", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> LdapRealm.sessionFactory(config, null, threadPool, LdapRealmSettings.LDAP_TYPE)); + assertThat(e.getMessage(), + containsString("settings were not found for either" + + " user search [xpack.security.authc.realms.test-ldap-realm-user-search.user_search.] or" + + " user template [xpack.security.authc.realms.test-ldap-realm-user-search.user_dn_templates]")); + } + + public void testLdapRealmMapsUserDNToRole() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userTemplate = VALID_USER_TEMPLATE; + Settings settings = Settings.builder() + .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put(DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.getKey(), + getDataPath("/org/elasticsearch/xpack/security/authc/support/role_mapping.yml")) + .build(); + RealmConfig config = new RealmConfig("test-ldap-realm-userdn", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); + LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, + new DnRoleMapper(config, resourceWatcherService), threadPool); + + PlainActionFuture future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken("Horatio Hornblower", new SecureString(PASSWORD)), future); + final AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + User user = result.getUser(); + assertThat(user, notNullValue()); + assertThat(user.roles(), arrayContaining("avenger")); + } + + /** + * The contract for {@link Realm} implementations is that they should log-and-return-null (and + * not call {@link ActionListener#onFailure(Exception)}) if there is an internal exception that prevented them from performing an + * authentication. + * This method tests that when an LDAP server is unavailable (invalid hostname), there is a null result + * rather than an exception. + */ + public void testLdapConnectionFailureIsTreatedAsAuthenticationFailure() throws Exception { + LDAPURL url = new LDAPURL("ldap", "..", 12345, null, null, null, null); + String groupSearchBase = "o=sevenSeas"; + String userTemplate = VALID_USER_TEMPLATE; + Settings settings = buildLdapSettings(new String[] { url.toString() }, userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE); + RealmConfig config = new RealmConfig("test-ldap-realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); + LdapRealm ldap = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, buildGroupAsRoleMapper(resourceWatcherService), + threadPool); + + PlainActionFuture future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken(VALID_USERNAME, new SecureString(PASSWORD)), future); + final AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.CONTINUE)); + assertThat(result.getUser(), nullValue()); + assertThat(result.getMessage(), is("authenticate failed")); + assertThat(result.getException(), notNullValue()); + assertThat(result.getException().getMessage(), containsString("UnknownHostException")); + } + + public void testUsageStats() throws Exception { + String groupSearchBase = "o=sevenSeas"; + Settings.Builder settings = Settings.builder() + .putList(URLS_SETTING, ldapUrls()) + .put("bind_dn", "cn=Thomas Masterman Hardy,ou=people,o=sevenSeas") + .put("bind_password", PASSWORD) + .put("group_search.base_dn", groupSearchBase) + .put("group_search.scope", LdapSearchScope.SUB_TREE) + .put(LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING.getKey(), "--") + .put("ssl.verification_mode", VerificationMode.CERTIFICATE); + + int order = randomIntBetween(0, 10); + settings.put("order", order); + + boolean userSearch = randomBoolean(); + if (userSearch) { + settings.put("user_search.base_dn", ""); + } + + RealmConfig config = new RealmConfig("ldap-realm", settings.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); + LdapRealm realm = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, + new DnRoleMapper(config, resourceWatcherService), threadPool); + + Map stats = realm.usageStats(); + assertThat(stats, is(notNullValue())); + assertThat(stats, hasEntry("name", "ldap-realm")); + assertThat(stats, hasEntry("order", realm.order())); + assertThat(stats, hasEntry("size", 0)); + assertThat(stats, hasEntry("ssl", false)); + assertThat(stats, hasEntry("user_search", userSearch)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java new file mode 100644 index 0000000000000..b96da799e48b5 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactoryTests.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.listener.InMemoryDirectoryServer; +import com.unboundid.ldap.sdk.BindRequest; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.LDAPURL; +import com.unboundid.ldap.sdk.SimpleBindRequest; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class LdapSessionFactoryTests extends LdapTestCase { + private Settings globalSettings; + private SSLService sslService; + private ThreadPool threadPool; + + @Before + public void setup() throws Exception { + globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + sslService = new SSLService(globalSettings, TestEnvironment.newEnvironment(globalSettings)); + threadPool = new TestThreadPool("LdapSessionFactoryTests thread pool"); + } + + @After + public void shutdown() throws InterruptedException { + terminate(threadPool); + } + + public void testBindWithReadTimeout() throws Exception { + InMemoryDirectoryServer ldapServer = randomFrom(ldapServers); + String ldapUrl = new LDAPURL("ldap", "localhost", ldapServer.getListenPort(), null, null, null, null).toString(); + String groupSearchBase = "o=sevenSeas"; + String userTemplates = "cn={0},ou=people,o=sevenSeas"; + + Settings settings = Settings.builder() + .put(buildLdapSettings(ldapUrl, userTemplates, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING, "1ms") //1 millisecond + .put("path.home", createTempDir()) + .build(); + + RealmConfig config = new RealmConfig("ldap_realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); + String user = "Horatio Hornblower"; + SecureString userPass = new SecureString("pass"); + + ldapServer.setProcessingDelayMillis(500L); + try { + UncategorizedExecutionException e = + expectThrows(UncategorizedExecutionException.class, () -> session(sessionFactory, user, userPass)); + assertThat(e.getCause(), instanceOf(ExecutionException.class)); + assertThat(e.getCause().getCause(), instanceOf(LDAPException.class)); + assertThat(e.getCause().getCause().getMessage(), containsString("A client-side timeout was encountered while waiting ")); + } finally { + ldapServer.setProcessingDelayMillis(0L); + } + } + + public void testBindWithTemplates() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String[] userTemplates = new String[] { + "cn={0},ou=something,ou=obviously,ou=incorrect,o=sevenSeas", + "wrongname={0},ou=people,o=sevenSeas", + "cn={0},ou=people,o=sevenSeas", //this last one should work + }; + RealmConfig config = new RealmConfig("ldap_realm", buildLdapSettings(ldapUrls(), userTemplates, groupSearchBase, + LdapSearchScope.SUB_TREE), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); + + String user = "Horatio Hornblower"; + SecureString userPass = new SecureString("pass"); + final SimpleBindRequest bindRequest = new SimpleBindRequest("cn=Horatio Hornblower,ou=people,o=sevenSeas", "pass"); + + try (LdapSession ldap = session(sessionFactory, user, userPass)) { + assertConnectionValid(ldap.getConnection(), bindRequest); + String dn = ldap.userDn(); + assertThat(dn, containsString(user)); + } + } + + public void testBindWithBogusTemplates() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String[] userTemplates = new String[] { + "cn={0},ou=something,ou=obviously,ou=incorrect,o=sevenSeas", + "wrongname={0},ou=people,o=sevenSeas", + "asdf={0},ou=people,o=sevenSeas", //none of these should work + }; + RealmConfig config = new RealmConfig("ldap_realm", buildLdapSettings(ldapUrls(), userTemplates, groupSearchBase, + LdapSearchScope.SUB_TREE), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapSessionFactory ldapFac = new LdapSessionFactory(config, sslService, threadPool); + + String user = "Horatio Hornblower"; + SecureString userPass = new SecureString("pass"); + UncategorizedExecutionException e = expectThrows(UncategorizedExecutionException.class, () -> session(ldapFac, user, userPass)); + assertThat(e.getCause(), instanceOf(ExecutionException.class)); + assertThat(e.getCause().getCause(), instanceOf(LDAPException.class)); + assertThat(e.getCause().getCause().getMessage(), containsString("Unable to bind as user")); + Throwable[] suppressed = e.getCause().getCause().getSuppressed(); + assertThat(suppressed.length, is(2)); + } + + public void testGroupLookupSubtree() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userTemplate = "cn={0},ou=people,o=sevenSeas"; + RealmConfig config = new RealmConfig("ldap_realm", buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, + LdapSearchScope.SUB_TREE), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapSessionFactory ldapFac = new LdapSessionFactory(config, sslService, threadPool); + + String user = "Horatio Hornblower"; + SecureString userPass = new SecureString("pass"); + final SimpleBindRequest bindRequest = new SimpleBindRequest("cn=Horatio Hornblower,ou=people,o=sevenSeas", "pass"); + + try (LdapSession ldap = session(ldapFac, user, userPass)) { + assertConnectionValid(ldap.getConnection(), bindRequest); + List groups = groups(ldap); + assertThat(groups, contains("cn=HMS Lydia,ou=crews,ou=groups,o=sevenSeas")); + } + } + + public void testGroupLookupOneLevel() throws Exception { + String groupSearchBase = "ou=crews,ou=groups,o=sevenSeas"; + String userTemplate = "cn={0},ou=people,o=sevenSeas"; + RealmConfig config = new RealmConfig("ldap_realm", buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, + LdapSearchScope.ONE_LEVEL), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapSessionFactory ldapFac = new LdapSessionFactory(config, sslService, threadPool); + + String user = "Horatio Hornblower"; + final SimpleBindRequest bindRequest = new SimpleBindRequest("cn=Horatio Hornblower,ou=people,o=sevenSeas", "pass"); + + try (LdapSession ldap = session(ldapFac, user, new SecureString("pass"))) { + assertConnectionValid(ldap.getConnection(), bindRequest); + List groups = groups(ldap); + assertThat(groups, contains("cn=HMS Lydia,ou=crews,ou=groups,o=sevenSeas")); + } + } + + public void testGroupLookupBase() throws Exception { + String groupSearchBase = "cn=HMS Lydia,ou=crews,ou=groups,o=sevenSeas"; + String userTemplate = "cn={0},ou=people,o=sevenSeas"; + RealmConfig config = new RealmConfig("ldap_realm", buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, + LdapSearchScope.BASE), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapSessionFactory ldapFac = new LdapSessionFactory(config, sslService, threadPool); + + String user = "Horatio Hornblower"; + SecureString userPass = new SecureString("pass"); + final SimpleBindRequest bindRequest = new SimpleBindRequest("cn=Horatio Hornblower,ou=people,o=sevenSeas", "pass"); + + try (LdapSession ldap = session(ldapFac, user, userPass)) { + assertConnectionValid(ldap.getConnection(), bindRequest); + List groups = groups(ldap); + assertThat(groups.size(), is(1)); + assertThat(groups, contains("cn=HMS Lydia,ou=crews,ou=groups,o=sevenSeas")); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapTestUtils.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapTestUtils.java new file mode 100644 index 0000000000000..dced24544aa98 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapTestUtils.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.LDAPConnection; +import com.unboundid.ldap.sdk.LDAPConnectionOptions; +import com.unboundid.ldap.sdk.LDAPURL; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.VerificationMode; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; + +import java.nio.file.Path; + +public class LdapTestUtils { + + private LdapTestUtils() { + // Utility class + } + + public static LDAPConnection openConnection(String url, String bindDN, String bindPassword, Path truststore) throws Exception { + boolean useGlobalSSL = ESTestCase.randomBoolean(); + Settings.Builder builder = Settings.builder().put("path.home", LuceneTestCase.createTempDir()); + MockSecureSettings secureSettings = new MockSecureSettings(); + builder.setSecureSettings(secureSettings); + if (useGlobalSSL) { + builder.put("xpack.ssl.truststore.path", truststore); + // fake realm to load config with certificate verification mode + builder.put("xpack.security.authc.realms.bar.ssl.truststore.path", truststore); + builder.put("xpack.security.authc.realms.bar.ssl.verification_mode", VerificationMode.CERTIFICATE); + secureSettings.setString("xpack.ssl.truststore.secure_password", "changeit"); + secureSettings.setString("xpack.security.authc.realms.bar.ssl.truststore.secure_password", "changeit"); + } else { + // fake realms so ssl will get loaded + builder.put("xpack.security.authc.realms.foo.ssl.truststore.path", truststore); + builder.put("xpack.security.authc.realms.foo.ssl.verification_mode", VerificationMode.FULL); + builder.put("xpack.security.authc.realms.bar.ssl.truststore.path", truststore); + builder.put("xpack.security.authc.realms.bar.ssl.verification_mode", VerificationMode.CERTIFICATE); + secureSettings.setString("xpack.security.authc.realms.foo.ssl.truststore.secure_password", "changeit"); + secureSettings.setString("xpack.security.authc.realms.bar.ssl.truststore.secure_password", "changeit"); + } + Settings settings = builder.build(); + Environment env = TestEnvironment.newEnvironment(settings); + SSLService sslService = new SSLService(settings, env); + + LDAPURL ldapurl = new LDAPURL(url); + LDAPConnectionOptions options = new LDAPConnectionOptions(); + options.setFollowReferrals(true); + options.setAllowConcurrentSocketFactoryUse(true); + options.setConnectTimeoutMillis(Math.toIntExact(SessionFactorySettings.TIMEOUT_DEFAULT.millis())); + options.setResponseTimeoutMillis(SessionFactorySettings.TIMEOUT_DEFAULT.millis()); + + Settings connectionSettings; + if (useGlobalSSL) { + connectionSettings = Settings.EMPTY; + } else { + MockSecureSettings connSecureSettings = new MockSecureSettings(); + connSecureSettings.setString("truststore.secure_password", "changeit"); + connectionSettings = Settings.builder().put("truststore.path", truststore) + .setSecureSettings(connSecureSettings).build(); + } + return LdapUtils.privilegedConnect(() -> new LDAPConnection(sslService.sslSocketFactory(connectionSettings), options, + ldapurl.getHost(), ldapurl.getPort(), bindDN, bindPassword)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java new file mode 100644 index 0000000000000..9d8fd1544f5a6 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java @@ -0,0 +1,596 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.listener.InMemoryDirectoryServer; +import com.unboundid.ldap.sdk.GetEntryLDAPConnectionPoolHealthCheck; +import com.unboundid.ldap.sdk.LDAPConnectionPool; +import com.unboundid.ldap.sdk.LDAPConnectionPoolHealthCheck; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.LDAPURL; +import com.unboundid.ldap.sdk.SimpleBindRequest; +import com.unboundid.ldap.sdk.SingleServerSet; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapUserSearchSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.security.support.NoOpLogger; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isEmptyString; +import static org.hamcrest.Matchers.notNullValue; + +public class LdapUserSearchSessionFactoryTests extends LdapTestCase { + + private SSLService sslService; + private Settings globalSettings; + private ThreadPool threadPool; + + @Before + public void init() throws Exception { + Path keystore = getDataPath("support/ADtrust.jks"); + Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + /* + * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. + * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * verification tests since a re-established connection does not perform hostname verification. + */ + + globalSettings = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.ssl.truststore.path", keystore) + .setSecureSettings(newSecureSettings("xpack.ssl.truststore.secure_password", "changeit")) + .build(); + sslService = new SSLService(globalSettings, env); + threadPool = new TestThreadPool("LdapUserSearchSessionFactoryTests"); + } + + @After + public void shutdown() throws InterruptedException { + terminate(threadPool); + } + + private MockSecureSettings newSecureSettings(String key, String value) { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(key, value); + return secureSettings; + } + + public void testSupportsUnauthenticatedSessions() throws Exception { + final boolean useAttribute = randomBoolean(); + Settings.Builder builder = Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, "", LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", "") + .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put("user_search.pool.enabled", randomBoolean()); + final boolean useLegacyBindPassword = configureBindPassword(builder); + if (useAttribute) { + builder.put("user_search.attribute", "cn"); + } else { + builder.put("user_search.filter", "(cn={0})"); + } + + RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + + LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); + try { + assertThat(sessionFactory.supportsUnauthenticatedSession(), is(true)); + } finally { + sessionFactory.close(); + } + + assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + } + + public void testUserSearchSubTree() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "o=sevenSeas"; + + final boolean useAttribute = randomBoolean(); + Settings.Builder builder = Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase) + .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put("user_search.pool.enabled", randomBoolean()); + final boolean useLegacyBindPassword = configureBindPassword(builder); + if (useAttribute) { + builder.put("user_search.attribute", "cn"); + } else { + builder.put("user_search.filter", "(cn={0})"); + } + RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + + LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); + + String user = "William Bush"; + SecureString userPass = new SecureString("pass"); + + try { + // auth + try (LdapSession ldap = session(sessionFactory, user, userPass)) { + assertConnectionValid(ldap.getConnection(), sessionFactory.bindCredentials); + String dn = ldap.userDn(); + assertThat(dn, containsString(user)); + } + + //lookup + try (LdapSession ldap = unauthenticatedSession(sessionFactory, user)) { + assertConnectionValid(ldap.getConnection(), sessionFactory.bindCredentials); + String dn = ldap.userDn(); + assertThat(dn, containsString(user)); + } + } finally { + sessionFactory.close(); + } + + assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + } + + public void testUserSearchBaseScopeFailsWithWrongBaseDN() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "o=sevenSeas"; + + final boolean useAttribute = randomBoolean(); + Settings.Builder builder = Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase) + .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put("user_search.scope", LdapSearchScope.BASE) + .put("user_search.pool.enabled", randomBoolean()); + final boolean useLegacyBindPassword = configureBindPassword(builder); + if (useAttribute) { + builder.put("user_search.attribute", "cn"); + } else { + builder.put("user_search.filter", "(cn={0})"); + } + RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + + LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); + + String user = "William Bush"; + SecureString userPass = new SecureString("pass"); + + try { + assertNull(session(sessionFactory, user, userPass)); + assertNull(unauthenticatedSession(sessionFactory, user)); + } finally { + sessionFactory.close(); + } + + assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + } + + public void testUserSearchBaseScopePassesWithCorrectBaseDN() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "cn=William Bush,ou=people,o=sevenSeas"; + + Settings.Builder builder = Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase) + .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put("user_search.scope", LdapSearchScope.BASE) + .put("user_search.pool.enabled", randomBoolean()); + final boolean useLegacyBindPassword = configureBindPassword(builder); + final boolean useAttribute = randomBoolean(); + if (useAttribute) { + builder.put("user_search.attribute", "cn"); + } else { + builder.put("user_search.filter", "(cn={0})"); + } + RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + + LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); + + String user = "William Bush"; + SecureString userPass = new SecureString("pass"); + + try { + // auth + try (LdapSession ldap = session(sessionFactory, user, userPass)) { + assertConnectionValid(ldap.getConnection(), sessionFactory.bindCredentials); + String dn = ldap.userDn(); + assertThat(dn, containsString(user)); + } + + //lookup + try (LdapSession ldap = unauthenticatedSession(sessionFactory, user)) { + assertConnectionValid(ldap.getConnection(), sessionFactory.bindCredentials); + String dn = ldap.userDn(); + assertThat(dn, containsString(user)); + } + } finally { + sessionFactory.close(); + } + + assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + } + + public void testUserSearchOneLevelScopeFailsWithWrongBaseDN() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "o=sevenSeas"; + + Settings.Builder builder = Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase) + .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put("user_search.scope", LdapSearchScope.ONE_LEVEL) + .put("user_search.pool.enabled", randomBoolean()); + final boolean useLegacyBindPassword = configureBindPassword(builder); + final boolean useAttribute = randomBoolean(); + if (useAttribute) { + builder.put("user_search.attribute", "cn"); + } else { + builder.put("user_search.filter", "(cn={0})"); + } + RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + + LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); + + String user = "William Bush"; + SecureString userPass = new SecureString("pass"); + + try { + assertNull(session(sessionFactory, user, userPass)); + assertNull(unauthenticatedSession(sessionFactory, user)); + } finally { + sessionFactory.close(); + } + + assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + } + + public void testUserSearchOneLevelScopePassesWithCorrectBaseDN() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "ou=people,o=sevenSeas"; + + Settings.Builder builder = Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase) + .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put("user_search.scope", LdapSearchScope.ONE_LEVEL) + .put("user_search.pool.enabled", randomBoolean()); + final boolean useLegacyBindPassword = configureBindPassword(builder); + final boolean useAttribute = randomBoolean(); + if (useAttribute) { + builder.put("user_search.attribute", "cn"); + } else { + builder.put("user_search.filter", "(cn={0})"); + } + RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + + LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); + + String user = "William Bush"; + SecureString userPass = new SecureString("pass"); + + try { + //auth + try (LdapSession ldap = session(sessionFactory, user, userPass)) { + assertConnectionValid(ldap.getConnection(), sessionFactory.bindCredentials); + String dn = ldap.userDn(); + assertThat(dn, containsString(user)); + } + + //lookup + try (LdapSession ldap = unauthenticatedSession(sessionFactory, user)) { + assertConnectionValid(ldap.getConnection(), sessionFactory.bindCredentials); + String dn = ldap.userDn(); + assertThat(dn, containsString(user)); + } + } finally { + sessionFactory.close(); + } + + assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + } + + public void testUserSearchWithBadAttributeFails() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "o=sevenSeas"; + + Settings.Builder builder = Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase) + .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put("user_search.pool.enabled", randomBoolean()); + final boolean useLegacyBindPassword = configureBindPassword(builder); + final boolean useAttribute = randomBoolean(); + if (useAttribute) { + builder.put("user_search.attribute", "uid1"); + } else { + builder.put("user_search.filter", "(uid1={0})"); + } + RealmConfig config = new RealmConfig("ldap_realm", builder.build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + + LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); + + String user = "William Bush"; + SecureString userPass = new SecureString("pass"); + + try { + assertNull(session(sessionFactory, user, userPass)); + assertNull(unauthenticatedSession(sessionFactory, user)); + } finally { + sessionFactory.close(); + } + + assertDeprecationWarnings(useAttribute, useLegacyBindPassword); + } + + public void testUserSearchWithoutAttributePasses() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "o=sevenSeas"; + + final Settings.Builder realmSettings = Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase) + .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put("user_search.pool.enabled", randomBoolean()); + final boolean useLegacyBindPassword = configureBindPassword(realmSettings); + RealmConfig config = new RealmConfig("ldap_realm", realmSettings.build(), globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); + + String user = "wbush"; + SecureString userPass = new SecureString("pass"); + + try { + //auth + try (LdapSession ldap = session(sessionFactory, user, userPass)) { + assertConnectionValid(ldap.getConnection(), sessionFactory.bindCredentials); + String dn = ldap.userDn(); + assertThat(dn, containsString("William Bush")); + } + + //lookup + try (LdapSession ldap = unauthenticatedSession(sessionFactory, user)) { + assertConnectionValid(ldap.getConnection(), sessionFactory.bindCredentials); + String dn = ldap.userDn(); + assertThat(dn, containsString("William Bush")); + } + } finally { + sessionFactory.close(); + } + + assertDeprecationWarnings(false, useLegacyBindPassword); + } + + public void testConnectionPoolDefaultSettings() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "o=sevenSeas"; + final Settings.Builder realmSettings = Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase) + .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas"); + configureBindPassword(realmSettings); + RealmConfig config = new RealmConfig("ldap_realm", realmSettings.build(), globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LDAPConnectionPool connectionPool = LdapUserSearchSessionFactory.createConnectionPool(config, new SingleServerSet("localhost", + randomFrom(ldapServers).getListenPort()), TimeValue.timeValueSeconds(5), NoOpLogger.INSTANCE, + new SimpleBindRequest("cn=Horatio Hornblower,ou=people,o=sevenSeas", "pass"), + () -> "cn=Horatio Hornblower,ou=people,o=sevenSeas"); + try { + assertThat(connectionPool.getCurrentAvailableConnections(), + is(PoolingSessionFactorySettings.DEFAULT_CONNECTION_POOL_INITIAL_SIZE)); + assertThat(connectionPool.getMaximumAvailableConnections(), + is(PoolingSessionFactorySettings.DEFAULT_CONNECTION_POOL_SIZE)); + assertEquals(connectionPool.getHealthCheck().getClass(), GetEntryLDAPConnectionPoolHealthCheck.class); + GetEntryLDAPConnectionPoolHealthCheck healthCheck = (GetEntryLDAPConnectionPoolHealthCheck) connectionPool.getHealthCheck(); + assertThat(healthCheck.getEntryDN(), is("cn=Horatio Hornblower,ou=people,o=sevenSeas")); + assertThat(healthCheck.getMaxResponseTimeMillis(), is(SessionFactorySettings.TIMEOUT_DEFAULT.millis())); + } finally { + connectionPool.close(); + } + } + + public void testConnectionPoolSettings() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "o=sevenSeas"; + final Settings.Builder realmSettings = Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase) + .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put("user_search.pool.initial_size", 10) + .put("user_search.pool.size", 12) + .put("user_search.pool.health_check.enabled", false); + configureBindPassword(realmSettings); + RealmConfig config = new RealmConfig("ldap_realm", realmSettings.build(), globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LDAPConnectionPool connectionPool = LdapUserSearchSessionFactory.createConnectionPool(config, new SingleServerSet("localhost", + randomFrom(ldapServers).getListenPort()), TimeValue.timeValueSeconds(5), NoOpLogger.INSTANCE, + new SimpleBindRequest("cn=Horatio Hornblower,ou=people,o=sevenSeas", "pass"), + () -> "cn=Horatio Hornblower,ou=people,o=sevenSeas"); + try { + assertThat(connectionPool.getCurrentAvailableConnections(), is(10)); + assertThat(connectionPool.getMaximumAvailableConnections(), is(12)); + assertThat(connectionPool.retryFailedOperationsDueToInvalidConnections(), is(true)); + assertEquals(connectionPool.getHealthCheck().getClass(), LDAPConnectionPoolHealthCheck.class); + } finally { + connectionPool.close(); + } + } + + public void testThatEmptyBindDNWithHealthCheckEnabledDoesNotThrow() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "o=sevenSeas"; + RealmConfig config = new RealmConfig("ldap_realm", Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase) + .put("bind_password", "pass") + .build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapUserSearchSessionFactory searchSessionFactory = null; + try { + searchSessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); + } finally { + if (searchSessionFactory != null) { + searchSessionFactory.close(); + } + } + + assertDeprecationWarnings(false, true); + } + + public void testThatEmptyBindDNAndDisabledPoolingDoesNotThrow() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "o=sevenSeas"; + RealmConfig config = new RealmConfig("ldap_realm", Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase) + .put("user_search.pool.enabled", false) + .put("bind_password", "pass") + .build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + + LdapUserSearchSessionFactory searchSessionFactory = null; + try { + searchSessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); + final PlainActionFuture future = new PlainActionFuture<>(); + searchSessionFactory.session("cn=ironman", new SecureString("password".toCharArray()), future); + future.get(); + } finally { + if (searchSessionFactory != null) { + searchSessionFactory.close(); + } + } + + assertDeprecationWarnings(false, true); + } + + public void testEmptyBindDNReturnsAnonymousBindRequest() throws LDAPException { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "o=sevenSeas"; + final Settings.Builder realmSettings = Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase); + final boolean useLegacyBindPassword = configureBindPassword(realmSettings); + RealmConfig config = new RealmConfig("ldap_realm", realmSettings.build(), globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + try (LdapUserSearchSessionFactory searchSessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool)) { + assertThat(searchSessionFactory.bindCredentials, notNullValue()); + assertThat(searchSessionFactory.bindCredentials.getBindDN(), isEmptyString()); + } + assertDeprecationWarnings(false, useLegacyBindPassword); + } + + public void testThatBindRequestReturnsSimpleBindRequest() throws LDAPException { + String groupSearchBase = "o=sevenSeas"; + String userSearchBase = "o=sevenSeas"; + final Settings.Builder realmSettings = Settings.builder() + .put(buildLdapSettings(ldapUrls(), Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("bind_dn", "cn=ironman") + .put("user_search.base_dn", userSearchBase); + final boolean useLegacyBindPassword = configureBindPassword(realmSettings); + RealmConfig config = new RealmConfig("ldap_realm", realmSettings.build(), globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + try (LdapUserSearchSessionFactory searchSessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool)) { + assertThat(searchSessionFactory.bindCredentials, notNullValue()); + assertThat(searchSessionFactory.bindCredentials.getBindDN(), is("cn=ironman")); + } + assertDeprecationWarnings(false, useLegacyBindPassword); + } + + public void testThatConnectErrorIsNotThrownOnConstruction() throws Exception { + String groupSearchBase = "DC=ad,DC=test,DC=elasticsearch,DC=com"; + String userSearchBase = "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; + + // pick a random ldap server and stop it + InMemoryDirectoryServer inMemoryDirectoryServer = randomFrom(ldapServers); + String ldapUrl = new LDAPURL("ldap", "localhost", inMemoryDirectoryServer.getListenPort(), null, null, null, null).toString(); + inMemoryDirectoryServer.shutDown(true); + + final Settings.Builder ldapSettingsBuilder = Settings.builder() + .put(LdapTestCase.buildLdapSettings(new String[]{ldapUrl}, Strings.EMPTY_ARRAY, + groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("user_search.base_dn", userSearchBase) + .put("bind_dn", "ironman@ad.test.elasticsearch.com") + .put("user_search.attribute", "cn") + .put("timeout.tcp_connect", "500ms") + .put("type", "ldap") + .put("user_search.pool.health_check.enabled", false) + .put("user_search.pool.enabled", randomBoolean()); + + final boolean useLegacyBindPassword = configureBindPassword(ldapSettingsBuilder); + RealmConfig config = new RealmConfig("ldap_realm", ldapSettingsBuilder.build(), globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + LdapUserSearchSessionFactory searchSessionFactory = null; + try { + searchSessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); + } finally { + if (searchSessionFactory != null) { + searchSessionFactory.close(); + } + } + + assertDeprecationWarnings(true, useLegacyBindPassword); + } + + private void assertDeprecationWarnings(boolean useAttribute, boolean legacyBindPassword) { + List> deprecatedSettings = new ArrayList<>(); + if (useAttribute) { + deprecatedSettings.add(LdapUserSearchSessionFactorySettings.SEARCH_ATTRIBUTE); + } + if (legacyBindPassword) { + deprecatedSettings.add(PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD); + } + if (deprecatedSettings.size() > 0) { + assertSettingDeprecationsAndWarnings(deprecatedSettings.toArray(new Setting[deprecatedSettings.size()])); + } + } + + private boolean configureBindPassword(Settings.Builder builder) { + final boolean useLegacyBindPassword = randomBoolean(); + if (useLegacyBindPassword) { + builder.put("bind_password", "pass"); + } else { + builder.setSecureSettings(newSecureSettings("secure_bind_password", "pass")); + } + return useLegacyBindPassword; + } + + static LdapUserSearchSessionFactory getLdapUserSearchSessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) + throws LDAPException { + LdapUserSearchSessionFactory sessionFactory = new LdapUserSearchSessionFactory(config, sslService, threadPool); + if (sessionFactory.getConnectionPool() != null) { + // don't use this in production + // used here to catch bugs that might get masked by an automatic retry + sessionFactory.getConnectionPool().setRetryFailedOperationsDueToInvalidConnections(false); + } + return sessionFactory; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverInMemoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverInMemoryTests.java new file mode 100644 index 0000000000000..fb20c08da6135 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverInMemoryTests.java @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import java.util.List; +import java.util.concurrent.ExecutionException; + +import com.unboundid.ldap.sdk.LDAPConnection; +import com.unboundid.ldap.sdk.LDAPConnectionOptions; +import com.unboundid.ldap.sdk.LDAPConnectionPool; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.LDAPURL; +import com.unboundid.ldap.sdk.ResultCode; +import com.unboundid.ldap.sdk.SimpleBindRequest; +import com.unboundid.ldap.sdk.SingleServerSet; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; +import org.junit.After; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.iterableWithSize; + +public class SearchGroupsResolverInMemoryTests extends LdapTestCase { + + private static final String WILLIAM_BUSH = "cn=William Bush,ou=people,o=sevenSeas"; + private LDAPConnection connection; + + @After + public void closeConnection() { + if (connection != null) { + connection.close(); + } + } + + /** + * Tests that a client-side timeout in the asynchronous LDAP SDK is treated as a failure, rather + * than simply returning no results. + */ + public void testSearchTimeoutIsFailure() throws Exception { + ldapServers[0].setProcessingDelayMillis(100); + + final LDAPConnectionOptions options = new LDAPConnectionOptions(); + options.setConnectTimeoutMillis(500); + options.setResponseTimeoutMillis(5); + connect(options); + + final Settings settings = Settings.builder() + .put("group_search.base_dn", "ou=groups,o=sevenSeas") + .put("group_search.scope", LdapSearchScope.SUB_TREE) + .build(); + final SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + final PlainActionFuture> future = new PlainActionFuture<>(); + resolver.resolve(connection, WILLIAM_BUSH, TimeValue.timeValueSeconds(30), logger, null, future); + + final ExecutionException exception = expectThrows(ExecutionException.class, future::get); + final Throwable cause = exception.getCause(); + assertThat(cause, instanceOf(LDAPException.class)); + assertThat(((LDAPException) cause).getResultCode(), is(ResultCode.TIMEOUT)); + } + + /** + * Tests searching for groups when the "user_attribute" field is not set + */ + public void testResolveWithDefaultUserAttribute() throws Exception { + connect(new LDAPConnectionOptions()); + + Settings settings = Settings.builder() + .put("group_search.base_dn", "ou=groups,o=sevenSeas") + .put("group_search.scope", LdapSearchScope.SUB_TREE) + .build(); + + final List groups = resolveGroups(settings, WILLIAM_BUSH); + assertThat(groups, iterableWithSize(1)); + assertThat(groups.get(0), containsString("HMS Lydia")); + } + + /** + * Tests searching for groups when the "user_attribute" field is set to "dn" (which is special) + */ + public void testResolveWithExplicitDnAttribute() throws Exception { + connect(new LDAPConnectionOptions()); + + Settings settings = Settings.builder() + .put("group_search.base_dn", "ou=groups,o=sevenSeas") + .put("group_search.user_attribute", "dn") + .build(); + + final List groups = resolveGroups(settings, WILLIAM_BUSH); + assertThat(groups, iterableWithSize(1)); + assertThat(groups.get(0), containsString("HMS Lydia")); + } + + /** + * Tests searching for groups when the "user_attribute" field is set to a missing value + */ + public void testResolveWithMissingAttribute() throws Exception { + connect(new LDAPConnectionOptions()); + + Settings settings = Settings.builder() + .put("group_search.base_dn", "ou=groups,o=sevenSeas") + .put("group_search.user_attribute", "no-such-attribute") + .build(); + + final List groups = resolveGroups(settings, WILLIAM_BUSH); + assertThat(groups, iterableWithSize(0)); + } + + public void testSearchWithConnectionPoolForOneResult() throws Exception { + final LDAPURL ldapurl = new LDAPURL(ldapUrls()[0]); + + try (LDAPConnectionPool pool = + LdapUtils.privilegedConnect(() -> new LDAPConnectionPool(new SingleServerSet(ldapurl.getHost(), ldapurl.getPort()), + new SimpleBindRequest("cn=Horatio Hornblower,ou=people,o=sevenSeas", "pass"), 0, 20))) { + + final Settings settings = Settings.builder() + .put("bind_dn", "cn=Horatio Hornblower,ou=people,o=sevenSeas") + .put("bind_password", "pass") + .put("user_search.base_dn", "ou=groups,o=sevenSeas") + .put("group_search.base_dn", "ou=groups,o=sevenSeas") + .put("group_search.scope", LdapSearchScope.SUB_TREE) + .build(); + final SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + final PlainActionFuture> future = new PlainActionFuture<>(); + resolver.resolve(pool, + "cn=Moultrie Crystal,ou=people,o=sevenSeas", + TimeValue.timeValueSeconds(30), + logger, + null, future); + List resolvedDNs = future.actionGet(); + assertEquals(1, resolvedDNs.size()); + } + } + + private void connect(LDAPConnectionOptions options) throws LDAPException { + if (connection != null) { + throw new IllegalStateException("Already connected (" + connection.getConnectionName() + ' ' + + connection.getConnectedAddress() + ')'); + } + final LDAPURL ldapurl = new LDAPURL(ldapUrls()[0]); + this.connection = LdapUtils.privilegedConnect(() -> new LDAPConnection(options, ldapurl.getHost(), ldapurl.getPort())); + } + + private List resolveGroups(Settings settings, String userDn) { + final SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + final PlainActionFuture> future = new PlainActionFuture<>(); + resolver.resolve(connection, userDn, TimeValue.timeValueSeconds(30), logger, null, future); + return future.actionGet(); + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LDAPServersTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LDAPServersTests.java new file mode 100644 index 0000000000000..ec68a22fb4d0d --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LDAPServersTests.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap.support; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class LDAPServersTests extends ESTestCase { + + public void testConfigure1ldaps() { + String[] urls = new String[] { "ldaps://example.com:636" }; + + SessionFactory.LDAPServers servers = new SessionFactory.LDAPServers(urls); + assertThat(servers.addresses().length, is(equalTo(1))); + assertThat(servers.addresses()[0], is(equalTo("example.com"))); + assertThat(servers.ports().length, is(equalTo(1))); + assertThat(servers.ports()[0], is(equalTo(636))); + assertThat(servers.ssl(), is(equalTo(true))); + } + + public void testConfigure2ldaps() { + String[] urls = new String[] { "ldaps://primary.example.com:636", "LDAPS://secondary.example.com:10636" }; + + SessionFactory.LDAPServers servers = new SessionFactory.LDAPServers(urls); + assertThat(servers.addresses().length, is(equalTo(2))); + assertThat(servers.addresses()[0], is(equalTo("primary.example.com"))); + assertThat(servers.addresses()[1], is(equalTo("secondary.example.com"))); + assertThat(servers.ports().length, is(equalTo(2))); + assertThat(servers.ports()[0], is(equalTo(636))); + assertThat(servers.ports()[1], is(equalTo(10636))); + assertThat(servers.ssl(), is(equalTo(true))); + } + + public void testConfigure2ldap() { + String[] urls = new String[] { "ldap://primary.example.com:392", "LDAP://secondary.example.com:10392" }; + + SessionFactory.LDAPServers servers = new SessionFactory.LDAPServers(urls); + assertThat(servers.addresses().length, is(equalTo(2))); + assertThat(servers.addresses()[0], is(equalTo("primary.example.com"))); + assertThat(servers.addresses()[1], is(equalTo("secondary.example.com"))); + assertThat(servers.ports().length, is(equalTo(2))); + assertThat(servers.ports()[0], is(equalTo(392))); + assertThat(servers.ports()[1], is(equalTo(10392))); + assertThat(servers.ssl(), is(equalTo(false))); + } + + public void testConfigure1ldaps1ldap() { + String[] urls = new String[] { "LDAPS://primary.example.com:636", "ldap://secondary.example.com:392" }; + + try { + new SessionFactory.LDAPServers(urls); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("configured LDAP protocols are not all equal")); + } + } + + public void testConfigure1ldap1ldaps() { + String[] urls = new String[] { "ldap://primary.example.com:392", "ldaps://secondary.example.com:636" }; + + try { + new SessionFactory.LDAPServers(urls); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("configured LDAP protocols are not all equal")); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancingTests.java new file mode 100644 index 0000000000000..00e111fa9d8f6 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapLoadBalancingTests.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap.support; + +import com.unboundid.ldap.sdk.FailoverServerSet; +import com.unboundid.ldap.sdk.RoundRobinDNSServerSet; +import com.unboundid.ldap.sdk.RoundRobinServerSet; +import com.unboundid.ldap.sdk.ServerSet; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapLoadBalancingSettings; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class LdapLoadBalancingTests extends ESTestCase { + + public void testBadTypeThrowsException() { + String badType = randomAlphaOfLengthBetween(3, 12); + Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + + LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, badType).build(); + try { + LdapLoadBalancing.serverSet(null, null, settings, null, null); + fail("using type [" + badType + "] should have thrown an exception"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("unknown load balance type")); + } + } + + public void testFailoverServerSet() { + Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + + LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, "failover").build(); + String[] address = new String[] { "localhost" }; + int[] ports = new int[] { 26000 }; + ServerSet serverSet = LdapLoadBalancing.serverSet(address, ports, settings, null, null); + assertThat(serverSet, instanceOf(FailoverServerSet.class)); + assertThat(((FailoverServerSet)serverSet).reOrderOnFailover(), is(true)); + } + + public void testDnsFailover() { + Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + + LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, "dns_failover").build(); + String[] address = new String[] { "foo.bar" }; + int[] ports = new int[] { 26000 }; + ServerSet serverSet = LdapLoadBalancing.serverSet(address, ports, settings, null, null); + assertThat(serverSet, instanceOf(RoundRobinDNSServerSet.class)); + assertThat(((RoundRobinDNSServerSet)serverSet).getAddressSelectionMode(), is(RoundRobinDNSServerSet.AddressSelectionMode.FAILOVER)); + } + + public void testDnsFailoverBadArgs() { + Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + + LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, "dns_failover").build(); + String[] addresses = new String[] { "foo.bar", "localhost" }; + int[] ports = new int[] { 26000, 389 }; + try { + LdapLoadBalancing.serverSet(addresses, ports, settings, null, null); + fail("dns server sets only support a single URL"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("single url")); + } + + try { + LdapLoadBalancing.serverSet(new String[] { "127.0.0.1" }, new int[] { 389 }, settings, null, null); + fail("dns server sets only support DNS names"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("DNS name")); + } + } + + public void testRoundRobin() { + Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + + LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, "round_robin").build(); + String[] address = new String[] { "localhost", "foo.bar" }; + int[] ports = new int[] { 389, 389 }; + ServerSet serverSet = LdapLoadBalancing.serverSet(address, ports, settings, null, null); + assertThat(serverSet, instanceOf(RoundRobinServerSet.class)); + } + + public void testDnsRoundRobin() { + Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + + LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, "dns_round_robin").build(); + String[] address = new String[] { "foo.bar" }; + int[] ports = new int[] { 26000 }; + ServerSet serverSet = LdapLoadBalancing.serverSet(address, ports, settings, null, null); + assertThat(serverSet, instanceOf(RoundRobinDNSServerSet.class)); + assertThat(((RoundRobinDNSServerSet)serverSet).getAddressSelectionMode(), + is(RoundRobinDNSServerSet.AddressSelectionMode.ROUND_ROBIN)); + } + + public void testDnsRoundRobinBadArgs() { + Settings settings = Settings.builder().put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + + LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, "dns_round_robin").build(); + String[] addresses = new String[] { "foo.bar", "localhost" }; + int[] ports = new int[] { 26000, 389 }; + try { + LdapLoadBalancing.serverSet(addresses, ports, settings, null, null); + fail("dns server sets only support a single URL"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("single url")); + } + + try { + LdapLoadBalancing.serverSet(new String[] { "127.0.0.1" }, new int[] { 389 }, settings, null, null); + fail("dns server sets only support DNS names"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("DNS name")); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolverTests.java new file mode 100644 index 0000000000000..bddfd3f4bcfca --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetaDataResolverTests.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap.support; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import com.unboundid.ldap.sdk.Attribute; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; + +public class LdapMetaDataResolverTests extends ESTestCase { + + private static final String HAWKEYE_DN = "uid=hawkeye,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + + private LdapMetaDataResolver resolver; + + public void testParseSettings() throws Exception { + resolver = new LdapMetaDataResolver(Settings.builder().putList("metadata", "cn", "uid").build(), false); + assertThat(resolver.attributeNames(), arrayContaining("cn", "uid")); + } + + public void testResolveSingleValuedAttributeFromCachedAttributes() throws Exception { + resolver = new LdapMetaDataResolver(Arrays.asList("cn", "uid"), true); + final Collection attributes = Arrays.asList( + new Attribute("cn", "Clint Barton"), + new Attribute("uid", "hawkeye"), + new Attribute("email", "clint.barton@shield.gov"), + new Attribute("memberOf", "cn=staff,ou=groups,dc=exmaple,dc=com", "cn=admin,ou=groups,dc=exmaple,dc=com") + ); + final Map map = resolve(attributes); + assertThat(map.size(), equalTo(2)); + assertThat(map.get("cn"), equalTo("Clint Barton")); + assertThat(map.get("uid"), equalTo("hawkeye")); + } + + public void testResolveMultiValuedAttributeFromCachedAttributes() throws Exception { + resolver = new LdapMetaDataResolver(Arrays.asList("cn", "uid"), true); + final Collection attributes = Arrays.asList( + new Attribute("cn", "Clint Barton", "hawkeye"), + new Attribute("uid", "hawkeye") + ); + final Map map = resolve(attributes); + assertThat(map.size(), equalTo(2)); + assertThat(map.get("cn"), instanceOf(List.class)); + assertThat((List) map.get("cn"), contains("Clint Barton", "hawkeye")); + assertThat(map.get("uid"), equalTo("hawkeye")); + } + + public void testResolveMissingAttributeFromCachedAttributes() throws Exception { + resolver = new LdapMetaDataResolver(Arrays.asList("cn", "uid"), true); + final Collection attributes = Collections.singletonList(new Attribute("uid", "hawkeye")); + final Map map = resolve(attributes); + assertThat(map.size(), equalTo(1)); + assertThat(map.get("cn"), nullValue()); + assertThat(map.get("uid"), equalTo("hawkeye")); + } + + private Map resolve(Collection attributes) throws Exception { + final PlainActionFuture> future = new PlainActionFuture<>(); + resolver.resolve(null, HAWKEYE_DN, TimeValue.timeValueSeconds(1), logger, attributes, future); + return future.get(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapTestCase.java new file mode 100644 index 0000000000000..c802812038c32 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapTestCase.java @@ -0,0 +1,200 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap.support; + +import com.unboundid.ldap.listener.InMemoryDirectoryServer; +import com.unboundid.ldap.sdk.Attribute; +import com.unboundid.ldap.sdk.BindRequest; +import com.unboundid.ldap.sdk.LDAPConnection; +import com.unboundid.ldap.sdk.LDAPConnectionPool; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.LDAPInterface; +import com.unboundid.ldap.sdk.LDAPURL; +import com.unboundid.ldap.sdk.SimpleBindRequest; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapLoadBalancingSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; +import org.elasticsearch.xpack.core.ssl.VerificationMode; +import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING; +import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.URLS_SETTING; + +public abstract class LdapTestCase extends ESTestCase { + + private static final String USER_DN_TEMPLATES_SETTING_KEY = LdapSessionFactorySettings.USER_DN_TEMPLATES_SETTING.getKey(); + + static int numberOfLdapServers; + protected InMemoryDirectoryServer[] ldapServers; + + @BeforeClass + public static void setNumberOfLdapServers() { + numberOfLdapServers = randomIntBetween(1, 4); + } + + @Before + public void startLdap() throws Exception { + ldapServers = new InMemoryDirectoryServer[numberOfLdapServers]; + for (int i = 0; i < numberOfLdapServers; i++) { + InMemoryDirectoryServer ldapServer = new InMemoryDirectoryServer("o=sevenSeas"); + ldapServer.add("o=sevenSeas", new Attribute("dc", "UnboundID"), + new Attribute("objectClass", "top", "domain", "extensibleObject")); + ldapServer.importFromLDIF(false, + getDataPath("/org/elasticsearch/xpack/security/authc/ldap/support/seven-seas.ldif").toString()); + // Must have privileged access because underlying server will accept socket connections + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + ldapServer.startListening(); + return null; + }); + ldapServers[i] = ldapServer; + } + } + + @After + public void stopLdap() throws Exception { + for (int i = 0; i < numberOfLdapServers; i++) { + ldapServers[i].shutDown(true); + } + } + + protected String[] ldapUrls() throws LDAPException { + List urls = new ArrayList<>(numberOfLdapServers); + for (int i = 0; i < numberOfLdapServers; i++) { + LDAPURL url = new LDAPURL("ldap", "localhost", ldapServers[i].getListenPort(), null, null, null, null); + urls.add(url.toString()); + } + return urls.toArray(Strings.EMPTY_ARRAY); + } + + public static Settings buildLdapSettings(String ldapUrl, String userTemplate, String groupSearchBase, LdapSearchScope scope) { + return buildLdapSettings(new String[] { ldapUrl }, new String[] { userTemplate }, groupSearchBase, scope); + } + + public static Settings buildLdapSettings(String[] ldapUrl, String userTemplate, String groupSearchBase, LdapSearchScope scope) { + return buildLdapSettings(ldapUrl, new String[] { userTemplate }, groupSearchBase, scope); + } + + public static Settings buildLdapSettings(String[] ldapUrl, String[] userTemplate, String groupSearchBase, LdapSearchScope scope) { + return buildLdapSettings(ldapUrl, userTemplate, groupSearchBase, scope, null); + } + + public static Settings buildLdapSettings(String[] ldapUrl, String[] userTemplate, + String groupSearchBase, LdapSearchScope scope, + LdapLoadBalancing serverSetType) { + return buildLdapSettings(ldapUrl, userTemplate, groupSearchBase, scope, + serverSetType, false); + } + + public static Settings buildLdapSettings(String[] ldapUrl, String[] userTemplate, + String groupSearchBase, LdapSearchScope scope, + LdapLoadBalancing serverSetType, + boolean ignoreReferralErrors) { + Settings.Builder builder = Settings.builder() + .putList(URLS_SETTING, ldapUrl) + .putList(USER_DN_TEMPLATES_SETTING_KEY, userTemplate) + .put(SessionFactorySettings.TIMEOUT_TCP_CONNECTION_SETTING, TimeValue.timeValueSeconds(1L)) + .put(SessionFactorySettings.IGNORE_REFERRAL_ERRORS_SETTING.getKey(), ignoreReferralErrors) + .put("group_search.base_dn", groupSearchBase) + .put("group_search.scope", scope); + if (serverSetType != null) { + builder.put(LdapLoadBalancingSettings.LOAD_BALANCE_SETTINGS + "." + + LdapLoadBalancingSettings.LOAD_BALANCE_TYPE_SETTING, serverSetType.toString()); + } + return builder.build(); + } + + public static Settings buildLdapSettings(String[] ldapUrl, String userTemplate, boolean hostnameVerification) { + Settings.Builder builder = Settings.builder() + .putList(URLS_SETTING, ldapUrl) + .putList(USER_DN_TEMPLATES_SETTING_KEY, userTemplate); + if (randomBoolean()) { + builder.put("ssl.verification_mode", hostnameVerification ? VerificationMode.FULL : VerificationMode.CERTIFICATE); + } else { + builder.put(HOSTNAME_VERIFICATION_SETTING, hostnameVerification); + } + return builder.build(); + } + + protected DnRoleMapper buildGroupAsRoleMapper(ResourceWatcherService resourceWatcherService) { + Settings settings = Settings.builder() + .put(DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING.getKey(), true) + .build(); + Settings global = Settings.builder().put("path.home", createTempDir()).build(); + RealmConfig config = new RealmConfig("ldap1", settings, global, TestEnvironment.newEnvironment(global), + new ThreadContext(Settings.EMPTY)); + + return new DnRoleMapper(config, resourceWatcherService); + } + + protected LdapSession session(SessionFactory factory, String username, SecureString password) { + PlainActionFuture future = new PlainActionFuture<>(); + factory.session(username, password, future); + return future.actionGet(); + } + + protected List groups(LdapSession ldapSession) { + Objects.requireNonNull(ldapSession); + PlainActionFuture> future = new PlainActionFuture<>(); + ldapSession.groups(future); + return future.actionGet(); + } + + protected LdapSession unauthenticatedSession(SessionFactory factory, String username) { + PlainActionFuture future = new PlainActionFuture<>(); + factory.unauthenticatedSession(username, future); + return future.actionGet(); + } + + protected static void assertConnectionValid(LDAPInterface conn, SimpleBindRequest bindRequest) { + AccessController.doPrivileged(new PrivilegedAction() { + @Override + public Void run() { + try { + if (conn instanceof LDAPConnection) { + assertTrue(((LDAPConnection) conn).isConnected()); + assertEquals(bindRequest.getBindDN(), + ((SimpleBindRequest)((LDAPConnection) conn).getLastBindRequest()).getBindDN()); + ((LDAPConnection) conn).reconnect(); + } else if (conn instanceof LDAPConnectionPool) { + try (LDAPConnection c = ((LDAPConnectionPool) conn).getConnection()) { + assertTrue(c.isConnected()); + assertEquals(bindRequest.getBindDN(), ((SimpleBindRequest)c.getLastBindRequest()).getBindDN()); + c.reconnect(); + } + } + } catch (LDAPException e) { + fail("Connection is not valid. It will not work on follow referral flow." + + System.lineSeparator() + ExceptionsHelper.stackTrace(e)); + } + return null; + } + }); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java new file mode 100644 index 0000000000000..1a9a5b4d56e63 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryLoadBalancingTests.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap.support; + +import com.unboundid.ldap.listener.InMemoryDirectoryServer; +import com.unboundid.ldap.sdk.LDAPConnection; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.mocksocket.MockServerSocket; +import org.elasticsearch.mocksocket.MockSocket; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.Socket; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CountDownLatch; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +/** + * Tests that the server sets properly load balance connections without throwing exceptions + */ +@TestLogging("org.elasticsearch.xpack.security.authc.ldap.support:DEBUG") +public class SessionFactoryLoadBalancingTests extends LdapTestCase { + + private ThreadPool threadPool; + + @Before + public void init() throws Exception { + threadPool = new TestThreadPool("SessionFactoryLoadBalancingTests thread pool"); + } + + @After + public void shutdown() throws InterruptedException { + terminate(threadPool); + } + + public void testRoundRobin() throws Exception { + TestSessionFactory testSessionFactory = createSessionFactory(LdapLoadBalancing.ROUND_ROBIN); + + final int numberOfIterations = randomIntBetween(1, 5); + for (int iteration = 0; iteration < numberOfIterations; iteration++) { + for (int i = 0; i < numberOfLdapServers; i++) { + LDAPConnection connection = null; + try { + connection = LdapUtils.privilegedConnect(testSessionFactory.getServerSet()::getConnection); + assertThat(connection.getConnectedPort(), is(ldapServers[i].getListenPort())); + } finally { + if (connection != null) { + connection.close(); + } + } + } + } + } + + public void testRoundRobinWithFailures() throws Exception { + assumeTrue("at least one ldap server should be present for this test", ldapServers.length > 1); + logger.debug("using [{}] ldap servers, urls {}", ldapServers.length, ldapUrls()); + TestSessionFactory testSessionFactory = createSessionFactory(LdapLoadBalancing.ROUND_ROBIN); + + // create a list of ports + List ports = new ArrayList<>(numberOfLdapServers); + for (int i = 0; i < ldapServers.length; i++) { + ports.add(ldapServers[i].getListenPort()); + } + logger.debug("list of all ports {}", ports); + + final int numberToKill = randomIntBetween(1, numberOfLdapServers - 1); + logger.debug("killing [{}] servers", numberToKill); + + // get a subset to kill + final List ldapServersToKill = randomSubsetOf(numberToKill, ldapServers); + final List ldapServersList = Arrays.asList(ldapServers); + final InetAddress local = InetAddress.getByName("localhost"); + final MockServerSocket mockServerSocket = new MockServerSocket(0, 0, local); + final List listenThreads = new ArrayList<>(); + final CountDownLatch latch = new CountDownLatch(ldapServersToKill.size()); + final CountDownLatch closeLatch = new CountDownLatch(1); + try { + for (InMemoryDirectoryServer ldapServerToKill : ldapServersToKill) { + final int index = ldapServersList.indexOf(ldapServerToKill); + assertThat(index, greaterThanOrEqualTo(0)); + final Integer port = Integer.valueOf(ldapServers[index].getListenPort()); + logger.debug("shutting down server index [{}] listening on [{}]", index, port); + assertTrue(ports.remove(port)); + ldapServers[index].shutDown(true); + + // when running multiple test jvms, there is a chance that something else could + // start listening on this port so we try to avoid this by creating a local socket + // that will be bound to the port the ldap server was running on and connecting to + // a mock server socket. + // NOTE: this is not perfect as there is a small amount of time between the shutdown + // of the ldap server and the opening of the socket + logger.debug("opening mock server socket listening on [{}]", port); + Runnable runnable = () -> { + try (Socket socket = new MockSocket(InetAddress.getByName("localhost"), mockServerSocket.getLocalPort(), local, port)) { + logger.debug("opened socket [{}]", socket); + latch.countDown(); + closeLatch.await(); + logger.debug("closing socket [{}]", socket); + } catch (IOException | InterruptedException e) { + logger.debug("caught exception", e); + } + }; + Thread thread = new Thread(runnable); + thread.start(); + listenThreads.add(thread); + + assertThat(ldapServers[index].getListenPort(), is(-1)); + } + + latch.await(); + final int numberOfIterations = randomIntBetween(1, 5); + // go one iteration through and attempt a bind + for (int iteration = 0; iteration < numberOfIterations; iteration++) { + logger.debug("iteration [{}]", iteration); + for (Integer port : ports) { + logger.debug("attempting connection with expected port [{}]", port); + try (LDAPConnection connection = LdapUtils.privilegedConnect(testSessionFactory.getServerSet()::getConnection)) { + assertThat(connection.getConnectedPort(), is(port)); + } + } + } + } finally { + closeLatch.countDown(); + mockServerSocket.close(); + for (Thread t : listenThreads) { + t.join(); + } + } + } + + public void testFailover() throws Exception { + assumeTrue("at least one ldap server should be present for this test", ldapServers.length > 1); + logger.debug("using [{}] ldap servers, urls {}", ldapServers.length, ldapUrls()); + TestSessionFactory testSessionFactory = createSessionFactory(LdapLoadBalancing.FAILOVER); + + // first test that there is no round robin stuff going on + final int firstPort = ldapServers[0].getListenPort(); + for (int i = 0; i < numberOfLdapServers; i++) { + LDAPConnection connection = null; + try { + connection = LdapUtils.privilegedConnect(testSessionFactory.getServerSet()::getConnection); + assertThat(connection.getConnectedPort(), is(firstPort)); + } finally { + if (connection != null) { + connection.close(); + } + } + } + + logger.debug("shutting down server index [0] listening on [{}]", ldapServers[0].getListenPort()); + // always kill the first one + ldapServers[0].shutDown(true); + assertThat(ldapServers[0].getListenPort(), is(-1)); + + // now randomly shutdown some others + if (ldapServers.length > 2) { + // kill at least one other server, but we need at least one good one. Hence the upper bound is number - 2 since we need at least + // one server to use! + final int numberToKill = randomIntBetween(1, numberOfLdapServers - 2); + InMemoryDirectoryServer[] allButFirstServer = Arrays.copyOfRange(ldapServers, 1, ldapServers.length); + // get a subset to kil + final List ldapServersToKill = randomSubsetOf(numberToKill, allButFirstServer); + final List ldapServersList = Arrays.asList(ldapServers); + for (InMemoryDirectoryServer ldapServerToKill : ldapServersToKill) { + final int index = ldapServersList.indexOf(ldapServerToKill); + assertThat(index, greaterThanOrEqualTo(1)); + final Integer port = Integer.valueOf(ldapServers[index].getListenPort()); + logger.debug("shutting down server index [{}] listening on [{}]", index, port); + ldapServers[index].shutDown(true); + assertThat(ldapServers[index].getListenPort(), is(-1)); + } + } + + int firstNonStoppedPort = -1; + // now we find the first that isn't stopped + for (int i = 0; i < numberOfLdapServers; i++) { + if (ldapServers[i].getListenPort() != -1) { + firstNonStoppedPort = ldapServers[i].getListenPort(); + break; + } + } + logger.debug("first non stopped port [{}]", firstNonStoppedPort); + + assertThat(firstNonStoppedPort, not(-1)); + final int numberOfIterations = randomIntBetween(1, 5); + for (int iteration = 0; iteration < numberOfIterations; iteration++) { + LDAPConnection connection = null; + try { + logger.debug("attempting connection with expected port [{}] iteration [{}]", firstNonStoppedPort, iteration); + connection = LdapUtils.privilegedConnect(testSessionFactory.getServerSet()::getConnection); + assertThat(connection.getConnectedPort(), is(firstNonStoppedPort)); + } finally { + if (connection != null) { + connection.close(); + } + } + } + } + + private TestSessionFactory createSessionFactory(LdapLoadBalancing loadBalancing) throws Exception { + String groupSearchBase = "cn=HMS Lydia,ou=crews,ou=groups,o=sevenSeas"; + String userTemplate = "cn={0},ou=people,o=sevenSeas"; + Settings settings = buildLdapSettings(ldapUrls(), new String[] { userTemplate }, groupSearchBase, + LdapSearchScope.SUB_TREE, loadBalancing); + Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + RealmConfig config = new RealmConfig("test-session-factory", settings, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); + return new TestSessionFactory(config, new SSLService(Settings.EMPTY, TestEnvironment.newEnvironment(config.globalSettings())), + threadPool); + } + + static class TestSessionFactory extends SessionFactory { + + protected TestSessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) { + super(config, sslService, threadPool); + } + + @Override + public void session(String user, SecureString password, ActionListener listener) { + listener.onResponse(null); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java new file mode 100644 index 0000000000000..1a5fa6af5f8a3 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactoryTests.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap.support; + +import com.unboundid.ldap.sdk.LDAPConnectionOptions; +import com.unboundid.util.ssl.HostNameSSLSocketVerifier; +import com.unboundid.util.ssl.TrustAllSSLSocketVerifier; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.VerificationMode; +import org.junit.After; +import org.junit.Before; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class SessionFactoryTests extends ESTestCase { + + private ThreadPool threadPool; + + @Before + public void init() throws Exception { + threadPool = new TestThreadPool("SessionFactoryTests thread pool"); + } + + @After + public void shutdown() throws InterruptedException { + terminate(threadPool); + } + + public void testConnectionFactoryReturnsCorrectLDAPConnectionOptionsWithDefaultSettings() throws Exception { + final Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + RealmConfig realmConfig = new RealmConfig("conn settings", Settings.EMPTY, environment.settings(), environment, + new ThreadContext(Settings.EMPTY)); + LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, new SSLService(environment.settings(), environment), + logger); + assertThat(options.followReferrals(), is(equalTo(true))); + assertThat(options.allowConcurrentSocketFactoryUse(), is(equalTo(true))); + assertThat(options.getConnectTimeoutMillis(), is(equalTo(5000))); + assertThat(options.getResponseTimeoutMillis(), is(equalTo(5000L))); + assertThat(options.getSSLSocketVerifier(), is(instanceOf(HostNameSSLSocketVerifier.class))); + } + + public void testConnectionFactoryReturnsCorrectLDAPConnectionOptions() throws Exception { + Settings settings = Settings.builder() + .put(SessionFactorySettings.TIMEOUT_TCP_CONNECTION_SETTING, "10ms") + .put(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING, "false") + .put(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING, "20ms") + .put(SessionFactorySettings.FOLLOW_REFERRALS_SETTING, "false") + .build(); + + final Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + RealmConfig realmConfig = new RealmConfig("conn settings", settings, environment.settings(), environment, new ThreadContext(Settings.EMPTY)); + LDAPConnectionOptions options = SessionFactory.connectionOptions(realmConfig, new SSLService(environment.settings(), environment), + logger); + assertThat(options.followReferrals(), is(equalTo(false))); + assertThat(options.allowConcurrentSocketFactoryUse(), is(equalTo(true))); + assertThat(options.getConnectTimeoutMillis(), is(equalTo(10))); + assertThat(options.getResponseTimeoutMillis(), is(equalTo(20L))); + assertThat(options.getSSLSocketVerifier(), is(instanceOf(TrustAllSSLSocketVerifier.class))); + assertWarnings("the setting [xpack.security.authc.realms.conn settings.hostname_verification] has been deprecated and will be " + + "removed in a future version. use [xpack.security.authc.realms.conn settings.ssl.verification_mode] instead"); + + settings = Settings.builder().put("ssl.verification_mode", VerificationMode.CERTIFICATE).build(); + realmConfig = new RealmConfig("conn settings", settings, environment.settings(), environment, new ThreadContext(Settings.EMPTY)); + options = SessionFactory.connectionOptions(realmConfig, new SSLService(environment.settings(), environment), + logger); + assertThat(options.getSSLSocketVerifier(), is(instanceOf(TrustAllSSLSocketVerifier.class))); + + settings = Settings.builder().put("ssl.verification_mode", VerificationMode.NONE).build(); + realmConfig = new RealmConfig("conn settings", settings, environment.settings(), environment, new ThreadContext(Settings.EMPTY)); + options = SessionFactory.connectionOptions(realmConfig, new SSLService(environment.settings(), environment), + logger); + assertThat(options.getSSLSocketVerifier(), is(instanceOf(TrustAllSSLSocketVerifier.class))); + + settings = Settings.builder().put("ssl.verification_mode", VerificationMode.FULL).build(); + realmConfig = new RealmConfig("conn settings", settings, environment.settings(), environment, new ThreadContext(Settings.EMPTY)); + options = SessionFactory.connectionOptions(realmConfig, new SSLService(environment.settings(), environment), + logger); + assertThat(options.getSSLSocketVerifier(), is(instanceOf(HostNameSSLSocketVerifier.class))); + } + + public void testSessionFactoryDoesNotSupportUnauthenticated() { + assertThat(createSessionFactory().supportsUnauthenticatedSession(), is(false)); + } + + public void testUnauthenticatedSessionThrowsUnsupportedOperationException() throws Exception { + UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, + () -> createSessionFactory().unauthenticatedSession(randomAlphaOfLength(5), new PlainActionFuture<>())); + assertThat(e.getMessage(), containsString("unauthenticated sessions")); + } + + private SessionFactory createSessionFactory() { + Settings global = Settings.builder().put("path.home", createTempDir()).build(); + final RealmConfig realmConfig = new RealmConfig("_name", Settings.builder().put("url", "ldap://localhost:389").build(), + global, TestEnvironment.newEnvironment(global), new ThreadContext(Settings.EMPTY)); + return new SessionFactory(realmConfig, null, threadPool) { + + @Override + public void session(String user, SecureString password, ActionListener listener) { + listener.onResponse(null); + } + }; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java new file mode 100644 index 0000000000000..e64a06d435fc1 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.pki; + +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySingleNodeTestCase; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.core.TestXPackTransportClient; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; +import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; +import org.elasticsearch.xpack.core.ssl.SSLClientAuth; +import org.elasticsearch.xpack.security.LocalStateSecurity; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; + +import java.io.InputStream; +import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.KeyStore; +import java.security.SecureRandom; +import java.util.Locale; + +import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForStore; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +/** + * Test authentication via PKI on both REST and Transport layers + */ +public class PkiAuthenticationTests extends SecuritySingleNodeTestCase { + + @Override + protected Settings nodeSettings() { + SSLClientAuth sslClientAuth = randomBoolean() ? SSLClientAuth.REQUIRED : SSLClientAuth.OPTIONAL; + + Settings.Builder builder = Settings.builder() + .put(super.nodeSettings()) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .put("xpack.security.http.ssl.enabled", true) + .put("xpack.security.http.ssl.client_authentication", sslClientAuth) + .put("xpack.security.authc.realms.file.type", FileRealmSettings.TYPE) + .put("xpack.security.authc.realms.file.order", "0") + .put("xpack.security.authc.realms.pki1.type", PkiRealmSettings.TYPE) + .put("xpack.security.authc.realms.pki1.order", "1") + .put("xpack.security.authc.realms.pki1.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks")) + .put("xpack.security.authc.realms.pki1.files.role_mapping", getDataPath("role_mapping.yml")); + + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> + secureSettings.setString("xpack.security.authc.realms.pki1.truststore.secure_password", "truststore-testnode-only")); + return builder.build(); + } + + @Override + protected boolean transportSSLEnabled() { + return true; + } + + @Override + protected boolean enableWarningsCheck() { + // the transport client uses deprecated SSL settings since we do not know what to do about + // secure settings for the transport client + return false; + } + + public void testTransportClientCanAuthenticateViaPki() { + Settings.Builder builder = Settings.builder(); + addSSLSettingsForStore(builder, "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks", "testnode"); + try (TransportClient client = createTransportClient(builder.build())) { + client.addTransportAddress(randomFrom(node().injector().getInstance(Transport.class).boundAddress().boundAddresses())); + IndexResponse response = client.prepareIndex("foo", "bar").setSource("pki", "auth").get(); + assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); + } + } + + /** + * Test uses the testclient cert which is trusted by the SSL layer BUT it is not trusted by the PKI authentication + * realm + */ + public void testTransportClientAuthenticationFailure() { + try (TransportClient client = createTransportClient(Settings.EMPTY)) { + client.addTransportAddress(randomFrom(node().injector().getInstance(Transport.class).boundAddress().boundAddresses())); + client.prepareIndex("foo", "bar").setSource("pki", "auth").get(); + fail("transport client should not have been able to authenticate"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#")); + } + } + + public void testRestAuthenticationViaPki() throws Exception { + SSLContext context = getRestSSLContext("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks", "testnode"); + try (CloseableHttpClient client = HttpClients.custom().setSSLContext(context).build()) { + HttpPut put = new HttpPut(getNodeUrl() + "foo"); + try (CloseableHttpResponse response = SocketAccess.doPrivileged(() -> client.execute(put))) { + String body = EntityUtils.toString(response.getEntity()); + assertThat(body, containsString("\"acknowledged\":true")); + } + } + } + + public void testRestAuthenticationFailure() throws Exception { + SSLContext context = getRestSSLContext("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks", "testclient"); + try (CloseableHttpClient client = HttpClients.custom().setSSLContext(context).build()) { + HttpPut put = new HttpPut(getNodeUrl() + "foo"); + try (CloseableHttpResponse response = SocketAccess.doPrivileged(() -> client.execute(put))) { + assertThat(response.getStatusLine().getStatusCode(), is(401)); + String body = EntityUtils.toString(response.getEntity()); + assertThat(body, containsString("unable to authenticate user [Elasticsearch Test Client]")); + } + } + } + + private SSLContext getRestSSLContext(String keystoreResourcePath, String password) throws Exception { + SSLContext context = SSLContext.getInstance("TLS"); + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + Path store = getDataPath(keystoreResourcePath); + KeyStore ks; + try (InputStream in = Files.newInputStream(store)) { + ks = KeyStore.getInstance("jks"); + ks.load(in, password.toCharArray()); + } + + kmf.init(ks, password.toCharArray()); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(ks); + context.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); + + return context; + } + + private TransportClient createTransportClient(Settings additionalSettings) { + Settings clientSettings = transportClientSettings(); + if (additionalSettings.getByPrefix("xpack.ssl.").isEmpty() == false) { + clientSettings = clientSettings.filter(k -> k.startsWith("xpack.ssl.") == false); + } + + Settings.Builder builder = Settings.builder().put(clientSettings, false) + .put(additionalSettings) + .put("cluster.name", node().settings().get("cluster.name")); + builder.remove(SecurityField.USER_SETTING.getKey()); + builder.remove("request.headers.Authorization"); + return new TestXPackTransportClient(builder.build(), LocalStateSecurity.class); + } + + private String getNodeUrl() { + TransportAddress transportAddress = randomFrom(node().injector().getInstance(HttpServerTransport.class) + .boundAddress().boundAddresses()); + final InetSocketAddress inetSocketAddress = transportAddress.address(); + return String.format(Locale.ROOT, "https://%s/", NetworkAddress.format(inetSocketAddress)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java new file mode 100644 index 0000000000000..720ab17aedb45 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.pki; + +import org.apache.http.message.BasicHeader; +import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.SecuritySingleNodeTestCase; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.ssl.SSLClientAuth; +import org.junit.BeforeClass; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManagerFactory; + +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.KeyStore; +import java.security.SecureRandom; + +import static org.hamcrest.Matchers.is; + +public class PkiOptionalClientAuthTests extends SecuritySingleNodeTestCase { + + private static int randomClientPort; + + @BeforeClass + public static void initPort() { + randomClientPort = randomIntBetween(49000, 65500); + } + + @Override + protected Settings nodeSettings() { + String randomClientPortRange = randomClientPort + "-" + (randomClientPort+100); + + Settings.Builder builder = Settings.builder() + .put(super.nodeSettings()) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .put("xpack.security.http.ssl.enabled", true) + .put("xpack.security.http.ssl.client_authentication", SSLClientAuth.OPTIONAL) + .put("xpack.security.authc.realms.file.type", "file") + .put("xpack.security.authc.realms.file.order", "0") + .put("xpack.security.authc.realms.pki1.type", "pki") + .put("xpack.security.authc.realms.pki1.order", "1") + .put("xpack.security.authc.realms.pki1.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks")) + .put("xpack.security.authc.realms.pki1.files.role_mapping", getDataPath("role_mapping.yml")) + .put("transport.profiles.want_client_auth.port", randomClientPortRange) + .put("transport.profiles.want_client_auth.bind_host", "localhost") + .put("transport.profiles.want_client_auth.xpack.security.ssl.client_authentication", SSLClientAuth.OPTIONAL); + + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> + secureSettings.setString("xpack.security.authc.realms.pki1.truststore.secure_password", "truststore-testnode-only")); + return builder.build(); + + } + + @Override + protected boolean transportSSLEnabled() { + return true; + } + + public void testRestClientWithoutClientCertificate() throws Exception { + SSLIOSessionStrategy sessionStrategy = new SSLIOSessionStrategy(getSSLContext()); + try (RestClient restClient = createRestClient(httpClientBuilder -> httpClientBuilder.setSSLStrategy(sessionStrategy), "https")) { + ResponseException e = expectThrows(ResponseException.class, () -> restClient.performRequest("GET", "_nodes")); + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(401)); + + Response response = restClient.performRequest("GET", "_nodes", + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, + new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())))); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + } + + private SSLContext getSSLContext() throws Exception { + SSLContext sc = SSLContext.getInstance("TLSv1.2"); + Path truststore = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks"); + KeyStore keyStore = KeyStore.getInstance("JKS"); + try (InputStream stream = Files.newInputStream(truststore)) { + keyStore.load(stream, "truststore-testnode-only".toCharArray()); + } + TrustManagerFactory factory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + factory.init(keyStore); + sc.init(null, factory.getTrustManagers(), new SecureRandom()); + return sc; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java new file mode 100644 index 0000000000000..74f6598f8dd1c --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiRealmTests.java @@ -0,0 +1,313 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.pki; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.support.NoOpLogger; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; +import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.junit.Before; +import org.mockito.Mockito; + +import javax.security.auth.x500.X500Principal; + +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.regex.Pattern; + +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class PkiRealmTests extends ESTestCase { + + private Settings globalSettings; + + @Before + public void setup() throws Exception { + globalSettings = Settings.builder() + .put("path.home", createTempDir()) + .build(); + } + + public void testTokenSupport() { + RealmConfig config = new RealmConfig("", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + PkiRealm realm = new PkiRealm(config, mock(UserRoleMapper.class)); + + assertThat(realm.supports(null), is(false)); + assertThat(realm.supports(new UsernamePasswordToken("", new SecureString(new char[0]))), is(false)); + assertThat(realm.supports(new X509AuthenticationToken(new X509Certificate[0], "", "")), is(true)); + } + + public void testExtractToken() throws Exception { + X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); + PkiRealm realm = new PkiRealm(new RealmConfig("", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)), mock(UserRoleMapper.class)); + + X509AuthenticationToken token = realm.token(threadContext); + assertThat(token, is(notNullValue())); + assertThat(token.dn(), is("CN=Elasticsearch Test Node, OU=elasticsearch, O=org")); + assertThat(token.principal(), is("Elasticsearch Test Node")); + } + + public void testAuthenticateBasedOnCertToken() throws Exception { + assertSuccessfulAuthentication(Collections.emptySet()); + } + + public void testAuthenticateWithRoleMapping() throws Exception { + final Set roles = new HashSet<>(); + roles.add("admin"); + roles.add("kibana_user"); + assertSuccessfulAuthentication(roles); + } + + private void assertSuccessfulAuthentication(Set roles) throws Exception { + String dn = "CN=Elasticsearch Test Node,"; + final String expectedUsername = "Elasticsearch Test Node"; + X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + X509AuthenticationToken token = new X509AuthenticationToken(new X509Certificate[] { certificate }, "Elasticsearch Test Node", dn); + UserRoleMapper roleMapper = mock(UserRoleMapper.class); + PkiRealm realm = new PkiRealm(new RealmConfig("", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)), roleMapper); + Mockito.doAnswer(invocation -> { + final UserRoleMapper.UserData userData = (UserRoleMapper.UserData) invocation.getArguments()[0]; + final ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + if (userData.getDn().equals(dn)) { + listener.onResponse(roles); + } else { + listener.onFailure(new IllegalArgumentException("Expected DN '" + dn + "' but was '" + userData + "'")); + } + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(token, future); + final AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + User user = result.getUser(); + assertThat(user, is(notNullValue())); + assertThat(user.principal(), is(expectedUsername)); + assertThat(user.roles(), is(notNullValue())); + assertThat(user.roles().length, is(roles.size())); + assertThat(user.roles(), arrayContainingInAnyOrder(roles.toArray())); + + final boolean testCaching = randomBoolean(); + final boolean invalidate = testCaching && randomBoolean(); + if (testCaching) { + if (invalidate) { + if (randomBoolean()) { + realm.expireAll(); + } else { + realm.expire(expectedUsername); + } + } + future = new PlainActionFuture<>(); + realm.authenticate(token, future); + assertEquals(AuthenticationResult.Status.SUCCESS, future.actionGet().getStatus()); + assertEquals(user, future.actionGet().getUser()); + } + + final int numTimes = invalidate ? 2 : 1; + verify(roleMapper, times(numTimes)).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + } + + public void testCustomUsernamePattern() throws Exception { + X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + UserRoleMapper roleMapper = mock(UserRoleMapper.class); + PkiRealm realm = new PkiRealm(new RealmConfig("", Settings.builder().put("username_pattern", "OU=(.*?),").build(), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)), + roleMapper); + Mockito.doAnswer(invocation -> { + ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + listener.onResponse(Collections.emptySet()); + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); + + X509AuthenticationToken token = realm.token(threadContext); + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(token, future); + User user = future.actionGet().getUser(); + assertThat(user, is(notNullValue())); + assertThat(user.principal(), is("elasticsearch")); + assertThat(user.roles(), is(notNullValue())); + assertThat(user.roles().length, is(0)); + } + + public void testVerificationUsingATruststore() throws Exception { + X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + + UserRoleMapper roleMapper = mock(UserRoleMapper.class); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("truststore.secure_password", "testnode"); + Settings settings = Settings.builder() + .put("truststore.path", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) + .setSecureSettings(secureSettings) + .build(); + PkiRealm realm = new PkiRealm(new RealmConfig("", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)), roleMapper); + Mockito.doAnswer(invocation -> { + ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + listener.onResponse(Collections.emptySet()); + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); + + X509AuthenticationToken token = realm.token(threadContext); + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(token, future); + User user = future.actionGet().getUser(); + assertThat(user, is(notNullValue())); + assertThat(user.principal(), is("Elasticsearch Test Node")); + assertThat(user.roles(), is(notNullValue())); + assertThat(user.roles().length, is(0)); + } + + public void testVerificationFailsUsingADifferentTruststore() throws Exception { + X509Certificate certificate = readCert(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + UserRoleMapper roleMapper = mock(UserRoleMapper.class); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("truststore.secure_password", "testnode-client-profile"); + Settings settings = Settings.builder() + .put("truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks")) + .setSecureSettings(secureSettings) + .build(); + PkiRealm realm = new PkiRealm(new RealmConfig("", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)), roleMapper); + Mockito.doAnswer(invocation -> { + ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + listener.onResponse(Collections.emptySet()); + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + threadContext.putTransient(PkiRealm.PKI_CERT_HEADER_NAME, new X509Certificate[] { certificate }); + + X509AuthenticationToken token = realm.token(threadContext); + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(token, future); + User user = future.actionGet().getUser(); + assertThat(user, is(nullValue())); + } + + public void testTruststorePathWithoutPasswordThrowsException() throws Exception { + Settings settings = Settings.builder() + .put("truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks")) + .build(); + try { + new PkiRealm(new RealmConfig("mypki", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)), mock(UserRoleMapper.class)); + fail("exception should have been thrown"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("Neither [xpack.security.authc.realms.mypki.truststore.secure_password] or [" + + "xpack.security.authc.realms.mypki.truststore.password] is configured")); + } + } + + public void testTruststorePathWithLegacyPasswordDoesNotThrow() throws Exception { + Settings settings = Settings.builder() + .put("truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks")) + .put("truststore.password", "testnode-client-profile") + .build(); + new PkiRealm(new RealmConfig("mypki", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)), mock(UserRoleMapper.class)); + assertSettingDeprecationsAndWarnings(new Setting[] { SSLConfigurationSettings.withoutPrefix().legacyTruststorePassword }); + } + + public void testCertificateWithOnlyCnExtractsProperly() throws Exception { + X509Certificate certificate = mock(X509Certificate.class); + X500Principal principal = new X500Principal("CN=PKI Client"); + when(certificate.getSubjectX500Principal()).thenReturn(principal); + + X509AuthenticationToken token = PkiRealm.token(new X509Certificate[] { certificate }, + Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), NoOpLogger.INSTANCE); + assertThat(token, notNullValue()); + assertThat(token.principal(), is("PKI Client")); + assertThat(token.dn(), is("CN=PKI Client")); + } + + public void testCertificateWithCnAndOuExtractsProperly() throws Exception { + X509Certificate certificate = mock(X509Certificate.class); + X500Principal principal = new X500Principal("CN=PKI Client, OU=Security"); + when(certificate.getSubjectX500Principal()).thenReturn(principal); + + X509AuthenticationToken token = PkiRealm.token(new X509Certificate[] { certificate }, + Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), NoOpLogger.INSTANCE); + assertThat(token, notNullValue()); + assertThat(token.principal(), is("PKI Client")); + assertThat(token.dn(), is("CN=PKI Client, OU=Security")); + } + + public void testCertificateWithCnInMiddle() throws Exception { + X509Certificate certificate = mock(X509Certificate.class); + X500Principal principal = new X500Principal("EMAILADDRESS=pki@elastic.co, CN=PKI Client, OU=Security"); + when(certificate.getSubjectX500Principal()).thenReturn(principal); + + X509AuthenticationToken token = PkiRealm.token(new X509Certificate[] { certificate }, + Pattern.compile(PkiRealmSettings.DEFAULT_USERNAME_PATTERN), NoOpLogger.INSTANCE); + assertThat(token, notNullValue()); + assertThat(token.principal(), is("PKI Client")); + assertThat(token.dn(), is("EMAILADDRESS=pki@elastic.co, CN=PKI Client, OU=Security")); + } + + public void testPKIRealmSettingsPassValidation() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.authc.realms.pki1.type", "pki") + .put("xpack.security.authc.realms.pki1.truststore.path", "/foo/bar") + .put("xpack.security.authc.realms.pki1.truststore.password", "supersecret") + .build(); + List> settingList = new ArrayList<>(); + RealmSettings.addSettings(settingList, Collections.emptyList()); + ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(settingList)); + clusterSettings.validate(settings, false); + + assertSettingDeprecationsAndWarnings(new Setting[] { SSLConfigurationSettings.withoutPrefix().legacyTruststorePassword }); + } + + static X509Certificate readCert(Path path) throws Exception { + try (InputStream in = Files.newInputStream(path)) { + CertificateFactory factory = CertificateFactory.getInstance("X.509"); + return (X509Certificate) factory.generateCertificate(in); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java new file mode 100644 index 0000000000000..477bfdebb59d2 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticatorTests.java @@ -0,0 +1,1944 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import org.apache.xml.security.Init; +import org.apache.xml.security.encryption.EncryptedData; +import org.apache.xml.security.encryption.EncryptedKey; +import org.apache.xml.security.encryption.EncryptionMethod; +import org.apache.xml.security.encryption.XMLCipher; +import org.apache.xml.security.exceptions.XMLSecurityException; +import org.apache.xml.security.keys.content.X509Data; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.hamcrest.Matchers; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.opensaml.saml.saml2.core.Assertion; +import org.opensaml.saml.saml2.core.AttributeStatement; +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.saml.saml2.core.Response; +import org.opensaml.saml.saml2.core.StatusCode; +import org.opensaml.security.credential.Credential; +import org.opensaml.security.x509.X509Credential; +import org.opensaml.xmlsec.encryption.support.DecryptionException; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.NamedNodeMap; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; +import org.xml.sax.InputSource; +import org.xml.sax.SAXException; + +import javax.crypto.Cipher; +import javax.crypto.KeyGenerator; +import javax.xml.crypto.dsig.CanonicalizationMethod; +import javax.xml.crypto.dsig.DigestMethod; +import javax.xml.crypto.dsig.Reference; +import javax.xml.crypto.dsig.SignatureMethod; +import javax.xml.crypto.dsig.SignedInfo; +import javax.xml.crypto.dsig.Transform; +import javax.xml.crypto.dsig.XMLSignature; +import javax.xml.crypto.dsig.XMLSignatureFactory; +import javax.xml.crypto.dsig.dom.DOMSignContext; +import javax.xml.crypto.dsig.keyinfo.KeyInfo; +import javax.xml.crypto.dsig.keyinfo.KeyInfoFactory; +import javax.xml.crypto.dsig.spec.C14NMethodParameterSpec; +import javax.xml.crypto.dsig.spec.TransformParameterSpec; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; + +import java.io.IOException; +import java.io.StringReader; +import java.nio.charset.StandardCharsets; +import java.security.Key; +import java.security.KeyException; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.cert.X509Certificate; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static javax.xml.crypto.dsig.CanonicalizationMethod.EXCLUSIVE; +import static javax.xml.crypto.dsig.CanonicalizationMethod.EXCLUSIVE_WITH_COMMENTS; +import static javax.xml.crypto.dsig.Transform.ENVELOPED; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.iterableWithSize; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.opensaml.saml.common.xml.SAMLConstants.SAML20P_NS; +import static org.opensaml.saml.common.xml.SAMLConstants.SAML20_NS; +import static org.opensaml.saml.saml2.core.AuthnContext.PASSWORD_AUTHN_CTX; +import static org.opensaml.saml.saml2.core.NameIDType.TRANSIENT; +import static org.opensaml.saml.saml2.core.SubjectConfirmation.METHOD_ATTRIB_NAME; +import static org.opensaml.saml.saml2.core.SubjectConfirmation.METHOD_BEARER; + +public class SamlAuthenticatorTests extends SamlTestCase { + + private static final String SP_ENTITY_ID = "https://sp.saml.elastic.test/"; + private static final String IDP_ENTITY_ID = "https://idp.saml.elastic.test/"; + private static final String SP_ACS_URL = SP_ENTITY_ID + "sso/post"; + + private static Tuple idpSigningCertificatePair; + private static Tuple spSigningCertificatePair; + private static List> spEncryptionCertificatePairs; + + private static List supportedAesKeyLengths; + private static List supportedAesTransformations; + + private ClockMock clock; + private SamlAuthenticator authenticator; + private String requestId; + private TimeValue maxSkew; + + @BeforeClass + public static void init() throws Exception { + SamlUtils.initialize(Loggers.getLogger(SamlAuthenticatorTests.class)); + // Initialise Apache XML security so that the signDoc methods work correctly. + Init.init(); + } + + @BeforeClass + public static void calculateAesLength() throws NoSuchAlgorithmException { + supportedAesKeyLengths = new ArrayList<>(); + supportedAesTransformations = new ArrayList<>(); + supportedAesKeyLengths.add(128); + supportedAesTransformations.add(XMLCipher.AES_128); + supportedAesTransformations.add(XMLCipher.AES_128_GCM); + if (Cipher.getMaxAllowedKeyLength("AES") > 128) { + supportedAesKeyLengths.add(192); + supportedAesKeyLengths.add(256); + supportedAesTransformations.add(XMLCipher.AES_192); + supportedAesTransformations.add(XMLCipher.AES_192_GCM); + supportedAesTransformations.add(XMLCipher.AES_256); + supportedAesTransformations.add(XMLCipher.AES_256_GCM); + } + } + + /** + * Generating X.509 credentials can be CPU intensive and slow, so we only want to do it once per class. + */ + @BeforeClass + public static void initCredentials() throws Exception { + idpSigningCertificatePair = createKeyPair(randomSigningAlgorithm()); + spSigningCertificatePair = createKeyPair(randomSigningAlgorithm()); + spEncryptionCertificatePairs = Arrays.asList(createKeyPair("RSA"), createKeyPair("RSA")); + } + + private static String randomSigningAlgorithm() { + return randomFrom("RSA", "DSA", "EC"); + } + + @AfterClass + public static void cleanup() { + idpSigningCertificatePair = null; + spSigningCertificatePair = null; + spEncryptionCertificatePairs = null; + supportedAesKeyLengths = null; + supportedAesTransformations = null; + } + + @Before + public void setupAuthenticator() throws Exception { + this.clock = new ClockMock(); + this.maxSkew = TimeValue.timeValueMinutes(1); + this.authenticator = buildAuthenticator(() -> buildOpenSamlCredential(idpSigningCertificatePair)); + this.requestId = randomId(); + } + + private SamlAuthenticator buildAuthenticator(Supplier> credentials) throws Exception { + final Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + final Settings realmSettings = Settings.EMPTY; + final IdpConfiguration idp = new IdpConfiguration(IDP_ENTITY_ID, credentials); + + final SigningConfiguration signingConfiguration = new SigningConfiguration(Collections.singleton("*"), + (X509Credential) buildOpenSamlCredential(spSigningCertificatePair).get(0)); + final List spEncryptionCredentials = buildOpenSamlCredential(spEncryptionCertificatePairs).stream() + .map((cred) -> (X509Credential) cred).collect(Collectors.toList()); + final SpConfiguration sp = new SpConfiguration(SP_ENTITY_ID, SP_ACS_URL, null, signingConfiguration, spEncryptionCredentials); + final Environment env = TestEnvironment.newEnvironment(globalSettings); + return new SamlAuthenticator( + new RealmConfig("saml_test", realmSettings, globalSettings, env, new ThreadContext(globalSettings)), + clock, + idp, + sp, + maxSkew + ); + } + + public void testParseEmptyContentIsRejected() throws Exception { + SamlToken token = token(""); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("Failed to parse")); + assertThat(exception.getCause(), Matchers.instanceOf(SAXException.class)); + } + + public void testParseContentWithNoAssertionsIsRejected() throws Exception { + Instant now = clock.instant(); + SamlToken token = token("\n" + + "" + + "" + + IDP_ENTITY_ID + "" + + "" + + ""); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("No assertions found in SAML response")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testSuccessfullyParseContentWithASingleValidAssertion() throws Exception { + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(30); + final String nameId = randomAlphaOfLengthBetween(12, 24); + final String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + nameId + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + SamlToken token = token(signDoc(xml)); + final SamlAttributes attributes = authenticator.authenticate(token); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(1)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + assertThat(uid, iterableWithSize(1)); + assertThat(attributes.name(), notNullValue()); + assertThat(attributes.name().format, equalTo(TRANSIENT)); + assertThat(attributes.name().value, equalTo(nameId)); + } + + public void testSuccessfullyParseContentWithMultipleValidAttributes() throws Exception { + final String nameId = randomAlphaOfLengthBetween(4, 8) + "-" + randomAlphaOfLengthBetween(8, 12); + final String session = randomId(); + + final String xml = getSimpleResponse(clock.instant(), nameId, session); + SamlToken token = token(signDoc(xml)); + final SamlAttributes attributes = authenticator.authenticate(token); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(2)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + + final List groups = attributes.getAttributeValues("urn:oid:1.3.6.1.4.1.5923.1.5.1.1"); + assertThat(groups, containsInAnyOrder("defenders", "netflix")); + + assertThat(attributes.name(), notNullValue()); + assertThat(attributes.name().format, equalTo(NameID.TRANSIENT)); + assertThat(attributes.name().value, equalTo(nameId)); + assertThat(attributes.name().idpNameQualifier, equalTo(IDP_ENTITY_ID)); + assertThat(attributes.name().spNameQualifier, equalTo(SP_ENTITY_ID)); + + assertThat(attributes.session(), equalTo(session)); + } + + public void testSuccessfullyParseContentFromEncryptedAssertion() throws Exception { + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + + final String encrypted = encryptAssertions(xml, randomFrom(spEncryptionCertificatePairs)); + assertThat(encrypted, not(equalTo(xml))); + + final String signed = signDoc(encrypted); + assertThat(signed, not(equalTo(encrypted))); + + final SamlToken token = token(signed); + final SamlAttributes attributes = authenticator.authenticate(token); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(2)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + + final List groups = attributes.getAttributeValues("urn:oid:1.3.6.1.4.1.5923.1.5.1.1"); + assertThat(groups, containsInAnyOrder("defenders", "netflix")); + } + + public void testSuccessfullyParseContentFromEncryptedAndSignedAssertion() throws Exception { + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + + final String signed = processAssertions(parseDocument(xml), element -> { + // For the signature to validate, it needs to be made against a fragment assertion, so we: + // - convert the assertion to an xml-string + // - parse it into a new doc + // - sign it there + // - then replace it in the original doc + // This is the most reliable way to get a valid signature + final String str = SamlUtils.toString(element); + final Element clone = parseDocument(str).getDocumentElement(); + signElement(clone, idpSigningCertificatePair); + element.getOwnerDocument().adoptNode(clone); + element.getParentNode().replaceChild(clone, element); + }); + + assertThat(signed, not(equalTo(xml))); + final String encrypted = encryptAssertions(signed, randomFrom(spEncryptionCertificatePairs)); + assertThat(encrypted, not(equalTo(signed))); + + final SamlToken token = token(encrypted); + final SamlAttributes attributes = authenticator.authenticate(token); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(2)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + + final List groups = attributes.getAttributeValues("urn:oid:1.3.6.1.4.1.5923.1.5.1.1"); + assertThat(groups, containsInAnyOrder("defenders", "netflix")); + } + + public void testSuccessfullyParseContentFromEncryptedAttribute() throws Exception { + final CryptoTransform signer = randomBoolean() ? this::signDoc : this::signAssertions; + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + + final String encrypted = encryptAttributes(xml, randomFrom(spEncryptionCertificatePairs)); + assertThat(encrypted, not(equalTo(xml))); + + final String signed = signer.transform(encrypted, idpSigningCertificatePair); + assertThat(signed, not(equalTo(encrypted))); + + final SamlToken token = token(signed); + final SamlAttributes attributes = authenticator.authenticate(token); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(2)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + + final List groups = attributes.getAttributeValues("urn:oid:1.3.6.1.4.1.5923.1.5.1.1"); + assertThat(groups, containsInAnyOrder("defenders", "netflix")); + } + + public void testFailWhenAssertionsCannotBeDecrypted() throws Exception { + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + + // Encrypting with different cert instead of sp cert will mean that the SP cannot decrypt + final String encrypted = encryptAssertions(xml, createKeyPair("RSA")); + assertThat(encrypted, not(equalTo(xml))); + + final String signed = signDoc(encrypted); + assertThat(signed, not(equalTo(encrypted))); + + final SamlToken token = token(signed); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("Failed to decrypt")); + assertThat(exception.getCause(), instanceOf(DecryptionException.class)); + } + + public void testNoAttributesReturnedWhenTheyCannotBeDecrypted() throws Exception { + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + + // Encrypting with different cert instead of sp cert will mean that the SP cannot decrypt + final String encrypted = encryptAttributes(xml, createKeyPair("RSA")); + assertThat(encrypted, not(equalTo(xml))); + + final String signed = signDoc(encrypted); + assertThat(signed, not(equalTo(encrypted))); + + final SamlToken token = token(signed); + // Because an assertion can theoretically contains encrypted and unencrypted attributes + // we don't treat a decryption as a hard failure. + final SamlAttributes attributes = authenticator.authenticate(token); + assertThat(attributes.attributes(), iterableWithSize(0)); + } + + public void testIncorrectResponseIssuerIsRejected() throws Exception { + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(30); + final String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "xxx" + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("Issuer")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testIncorrectAssertionIssuerIsRejected() throws Exception { + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(30); + final String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "_" + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("Issuer")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testIncorrectDestinationIsRejected() throws Exception { + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(30); + String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("destination")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testIncorrectRequestIdIsRejected() throws Exception { + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(30); + final String sessionindex = randomId(); + final String incorrectId = "_012345"; + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("in-response-to")); + assertThat(exception.getMessage(), containsString(requestId)); + assertThat(exception.getMessage(), containsString(incorrectId)); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testIncorrectRecipientIsRejected() throws Exception { + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(30); + String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("SAML Assertion SubjectConfirmationData Recipient")); + assertThat(exception.getMessage(), containsString(SP_ACS_URL + "/fake")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testAssertionWithoutSubjectIsRejected() throws Exception { + Instant now = clock.instant(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "daredevil" + + "" + + "" + + ""; + SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("has no Subject")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + + } + + public void testAssertionWithoutSubjectConfirmationIsRejected() throws Exception { + Instant now = clock.instant(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("SAML Assertion subject contains 0 bearer SubjectConfirmation")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testAssertionWithoutSubjectConfirmationDataIsRejected() throws Exception { + Instant now = clock.instant(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("bearer SubjectConfirmation, while exactly one was expected.")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testAssetionWithoutBearerSubjectConfirmationMethodIsRejected() throws Exception { + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(30); + final String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("bearer SubjectConfirmation, while exactly one was expected.")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testIncorrectSubjectConfirmationDataInResponseToIsRejected() throws Exception { + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(30); + final String incorrectId = "_123456"; + final String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("SAML Assertion SubjectConfirmationData is in-response-to")); + assertThat(exception.getMessage(), containsString(requestId)); + assertThat(exception.getMessage(), containsString(incorrectId)); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testExpiredSubjectConfirmationDataIsRejected() throws Exception { + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(120); + final String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + + // check that the content is valid "now" + final SamlToken token = token(signDoc(xml)); + assertThat(authenticator.authenticate(token), notNullValue()); + + // and still valid if we advance partway through the expiry time + clock.fastForwardSeconds(90); + assertThat(authenticator.authenticate(token), notNullValue()); + + // and still valid if we advance past the expiry time, but allow for clock skew + clock.fastForwardSeconds((int) (30 + maxSkew.seconds() / 2)); + assertThat(authenticator.authenticate(token), notNullValue()); + + // but fails once we get past the clock skew allowance + clock.fastForwardSeconds((int) (1 + maxSkew.seconds() / 2)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("on/after")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testIdpInitiatedLoginIsAllowed() throws Exception { + /* An IdP initiated login has no "in response to" + * This might happen if: + * - The IDP has a list of services to pick from (like the Okta dashboard) + * - The IDP had to do some housework (like a forced password change) during the login flow, and switch from an in-response-to + * login to an IDP initiated login. + */ + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(30); + final String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + final SamlToken token = token(signDoc(xml)); + final SamlAttributes attributes = authenticator.authenticate(token); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(1)); + } + + public void testIncorrectSigningKeyIsRejected() throws Exception { + final CryptoTransform signer = randomBoolean() ? this::signDoc : this::signAssertions; + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(30); + final String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + + // check that the content is valid when signed by the correct key-pair + assertThat(authenticator.authenticate(token(signer.transform(xml, idpSigningCertificatePair))), notNullValue()); + + // check is rejected when signed by a different key-pair + final Tuple wrongKey = createKeyPair(randomSigningAlgorithm()); + final ElasticsearchSecurityException exception = expectThrows(ElasticsearchSecurityException.class, + () -> authenticator.authenticate(token(signer.transform(xml, wrongKey)))); + assertThat(exception.getMessage(), containsString("SAML Signature")); + assertThat(exception.getMessage(), containsString("could not be validated")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testSigningKeyIsReloadedForEachRequest() throws Exception { + final CryptoTransform signer = randomBoolean() ? this::signDoc : this::signAssertions; + final String xml = getSimpleResponse(Instant.now()); + + assertThat(authenticator.authenticate(token(signer.transform(xml, idpSigningCertificatePair))), notNullValue()); + + final Tuple oldKeyPair = idpSigningCertificatePair; + idpSigningCertificatePair = createKeyPair(randomSigningAlgorithm()); + assertThat(idpSigningCertificatePair.v2(), not(equalTo(oldKeyPair.v2()))); + assertThat(authenticator.authenticate(token(signer.transform(xml, idpSigningCertificatePair))), notNullValue()); + } + + public void testParsingRejectsTamperedContent() throws Exception { + CryptoTransform signer = randomBoolean() ? this::signDoc : this::signAssertions; + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(30); + final String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + + // check that the original signed content is valid + final String signed = signer.transform(xml, idpSigningCertificatePair); + assertThat(authenticator.authenticate(token(signed)), notNullValue()); + + // but altered content is rejected + final String altered = signed.replace("daredevil", "iron fist"); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token(altered))); + assertThat(exception.getMessage(), containsString("SAML Signature")); + assertThat(exception.getMessage(), containsString("could not be validated")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testSigningWhenIdpHasMultipleKeys() throws Exception { + final int numberOfKeys = scaledRandomIntBetween(2, 6); + final List> keys = new ArrayList<>(numberOfKeys); + final List credentials = new ArrayList<>(numberOfKeys); + for (int i = 0; i < numberOfKeys; i++) { + final Tuple key = createKeyPair(randomSigningAlgorithm()); + keys.add(key); + credentials.addAll(buildOpenSamlCredential(key)); + } + this.authenticator = buildAuthenticator(() -> credentials); + final CryptoTransform signer = randomBoolean() ? this::signDoc : this::signAssertions; + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(30); + final String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + + // check that the content is valid when signed by the each of the key-pairs + for (Tuple key : keys) { + assertThat(authenticator.authenticate(token(signer.transform(xml, key))), notNullValue()); + } + } + + /** + * This is testing a test, but the real signing tests are useless if our signing is incorrectly implemented + */ + public void testThatTheTestSignersInteractCorrectlyWithOpenSaml() throws Exception { + final String xml = getSimpleResponse(clock.instant()); + final Response unsigned = toResponse(xml); + assertThat(unsigned.isSigned(), equalTo(false)); + assertThat(unsigned.getAssertions().get(0).isSigned(), equalTo(false)); + + final Response signedDoc = toResponse(signDoc(xml, idpSigningCertificatePair)); + assertThat(signedDoc.isSigned(), equalTo(true)); + assertThat(signedDoc.getAssertions().get(0).isSigned(), equalTo(false)); + + final Response signedAssertions = toResponse(signAssertions(xml, idpSigningCertificatePair)); + assertThat(signedAssertions.isSigned(), equalTo(false)); + assertThat(signedAssertions.getAssertions().get(0).isSigned(), equalTo(true)); + } + + /** + * This is testing a test, but the real encryption tests are useless if our encryption routines don't do anything + */ + public void testThatTheTestEncryptionInteractsCorrectlyWithOpenSaml() throws Exception { + final String xml = getSimpleResponse(clock.instant()); + + final Response unencrypted = toResponse(xml); + // Expect Assertion > AttributeStatement (x2) > Attribute + assertThat(unencrypted.getAssertions(), iterableWithSize(1)); + assertThat(unencrypted.getEncryptedAssertions(), iterableWithSize(0)); + for (Assertion assertion : unencrypted.getAssertions()) { + assertThat(assertion.getAttributeStatements(), iterableWithSize(2)); + for (AttributeStatement statement : assertion.getAttributeStatements()) { + assertThat(statement.getAttributes(), iterableWithSize(1)); + assertThat(statement.getEncryptedAttributes(), iterableWithSize(0)); + } + } + + final Tuple spEncryptionCertificatePair = randomFrom(spEncryptionCertificatePairs); + final Response encryptedAssertion = toResponse(encryptAssertions(xml, spEncryptionCertificatePair)); + // Expect EncryptedAssertion + assertThat(encryptedAssertion.getAssertions(), iterableWithSize(0)); + assertThat(encryptedAssertion.getEncryptedAssertions(), iterableWithSize(1)); + + final Response encryptedAttributes = toResponse(encryptAttributes(xml, spEncryptionCertificatePair)); + // Expect Assertion > AttributeStatement (x2) > EncryptedAttribute + assertThat(encryptedAttributes.getAssertions(), iterableWithSize(1)); + assertThat(encryptedAttributes.getEncryptedAssertions(), iterableWithSize(0)); + for (Assertion assertion : encryptedAttributes.getAssertions()) { + assertThat(assertion.getAttributeStatements(), iterableWithSize(2)); + for (AttributeStatement statement : assertion.getAttributeStatements()) { + assertThat(statement.getAttributes(), iterableWithSize(0)); + assertThat(statement.getEncryptedAttributes(), iterableWithSize(1)); + } + } + } + + public void testExpiredContentIsRejected() throws Exception { + Instant now = clock.instant(); + Instant validUntil = now.plusSeconds(120); + final String sessionindex = randomId(); + final String xml = "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "randomopaquestring" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + ""; + + // check that the content is valid "now" + final SamlToken token = token(signDoc(xml)); + assertThat(authenticator.authenticate(token), notNullValue()); + + // and still valid if we advance partway through the expiry time + clock.fastForwardSeconds(90); + assertThat(authenticator.authenticate(token), notNullValue()); + + // and still valid if we advance past the expiry time, but allow for clock skew + clock.fastForwardSeconds((int) (30 + maxSkew.seconds() / 2)); + assertThat(authenticator.authenticate(token), notNullValue()); + + // but fails once we get past the clock skew allowance + clock.fastForwardSeconds((int) (1 + maxSkew.seconds() / 2)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("on/after")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testContentIsRejectedIfRestrictedToADifferentAudience() throws Exception { + final String audience = "https://some.other.sp/SAML2"; + final String xml = getResponseWithAudienceRestriction(audience); + final SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("required audience")); + assertThat(exception.getMessage(), containsString(audience)); + assertThat(exception.getMessage(), containsString(SP_ENTITY_ID)); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + public void testContentIsAcceptedIfRestrictedToOurAudience() throws Exception { + final String xml = getResponseWithAudienceRestriction(SP_ENTITY_ID); + final SamlToken token = token(signDoc(xml)); + final SamlAttributes attributes = authenticator.authenticate(token); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), not(empty())); + } + + public void testContentIsRejectedIfNotMarkedAsSuccess() throws Exception { + final String xml = getSimpleResponse(clock.instant()).replace(StatusCode.SUCCESS, StatusCode.REQUESTER); + final SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getMessage(), containsString("not a 'success' response")); + assertThat(exception.getMessage(), containsString(StatusCode.REQUESTER)); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + } + + /* + * Implement most of the attacks described in https://www.usenix.org/system/files/conference/usenixsecurity12/sec12-final91-8-23-12.pdf + * as tests + */ + + public void testSignatureWrappingAttackOne() throws Exception { + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + final Document legitimateDocument = parseDocument(signDoc(xml, idpSigningCertificatePair)); + // First verify that the correct SAML Response can be consumed + final SamlToken legitimateToken = token(SamlUtils.toString(legitimateDocument.getDocumentElement())); + final SamlAttributes attributes = authenticator.authenticate(legitimateToken); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(2)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + /* + Permutation 1 - Mangle the contents of the response to be + + + + + + + */ + final Element response = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element clonedResponse = (Element) response.cloneNode(true); + final Element clonedSignature = (Element) clonedResponse. + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + clonedResponse.removeChild(clonedSignature); + final Element legitimateSignature = (Element) response. + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + legitimateSignature.appendChild(clonedResponse); + response.setAttribute("ID", "_forged_ID"); + final SamlToken forgedToken = token(SamlUtils.toString((legitimateDocument.getDocumentElement()))); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(forgedToken)); + assertThat(exception.getMessage(), containsString("Failed to parse SAML")); + assertThat(exception.getCause(), instanceOf(SAXException.class)); + } + + public void testSignatureWrappingAttackTwo() throws Exception { + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + final Document legitimateDocument = parseDocument(signDoc(xml, idpSigningCertificatePair)); + // First verify that the correct SAML Response can be consumed + final SamlToken legitimateToken = token(SamlUtils.toString(legitimateDocument.getDocumentElement())); + final SamlAttributes attributes = authenticator.authenticate(legitimateToken); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(2)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + /* + Permutation 2 - Mangle the contents of the response to be + + + + + + */ + final Element response = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element clonedResponse = (Element) response.cloneNode(true); + final Element clonedSignature = (Element) clonedResponse. + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + clonedResponse.removeChild(clonedSignature); + final Element legitimateSignature = (Element) response. + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + response.insertBefore(clonedResponse, legitimateSignature); + response.setAttribute("ID", "_forged_ID"); + final SamlToken forgedToken = token(SamlUtils.toString((legitimateDocument.getDocumentElement()))); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(forgedToken)); + assertThat(exception.getMessage(), containsString("Failed to parse SAML")); + assertThat(exception.getCause(), instanceOf(SAXException.class)); + } + + /* + * Most commonly successful XSW attack + */ + public void testSignatureWrappingAttackThree() throws Exception { + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + final Document legitimateDocument = parseDocument(signAssertions(xml, idpSigningCertificatePair)); + // First verify that the correct SAML Response can be consumed + final SamlToken legitimateToken = token(SamlUtils.toString(legitimateDocument.getDocumentElement())); + final SamlAttributes attributes = authenticator.authenticate(legitimateToken); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(2)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + /* + Permutation 3 - Mangle the contents of the response to be + + + + + + + */ + final Element response = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element assertion = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); + final Element forgedAssertion = (Element) assertion.cloneNode(true); + forgedAssertion.setAttribute("ID", "_forged_assertion_id"); + final Element clonedSignature = (Element) forgedAssertion. + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + forgedAssertion.removeChild(clonedSignature); + response.insertBefore(forgedAssertion, assertion); + final SamlToken forgedToken = token(SamlUtils.toString((legitimateDocument.getDocumentElement()))); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(forgedToken)); + assertThat(exception.getMessage(), containsString("Expecting only 1 assertion, but response contains multiple")); + assertThat(exception.getCause(), nullValue()); + assertThat(SamlUtils.isSamlException(exception), is(true)); + + } + + + public void testSignatureWrappingAttackFour() throws Exception { + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + final Document legitimateDocument = parseDocument(signAssertions(xml, idpSigningCertificatePair)); + // First verify that the correct SAML Response can be consumed + final SamlToken legitimateToken = token(SamlUtils.toString(legitimateDocument.getDocumentElement())); + final SamlAttributes attributes = authenticator.authenticate(legitimateToken); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(2)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + /* + Permutation 4 - Mangle the contents of the response to be + + + + + + + + */ + final Element response = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element assertion = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); + final Element forgedAssertion = (Element) assertion.cloneNode(true); + forgedAssertion.setAttribute("ID", "_forged_assertion_id"); + final Element clonedSignature = (Element) forgedAssertion. + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + forgedAssertion.removeChild(clonedSignature); + response.appendChild(forgedAssertion); + forgedAssertion.appendChild(assertion); + final SamlToken forgedToken = token(SamlUtils.toString((legitimateDocument.getDocumentElement()))); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(forgedToken)); + assertThat(exception.getMessage(), containsString("Failed to parse SAML")); + assertThat(exception.getCause(), instanceOf(SAXException.class)); + } + + public void testSignatureWrappingAttackFive() throws Exception { + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + final Document legitimateDocument = parseDocument(signAssertions(xml, idpSigningCertificatePair)); + // First verify that the correct SAML Response can be consumed + final SamlToken legitimateToken = token(SamlUtils.toString(legitimateDocument.getDocumentElement())); + final SamlAttributes attributes = authenticator.authenticate(legitimateToken); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(2)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + /* + Permutation 5 - Mangle the contents of the response to be + + + + + + + */ + final Element response = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element assertion = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); + final Element signature = (Element) assertion. + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + assertion.removeChild(signature); + final Element forgedAssertion = (Element) assertion.cloneNode(true); + forgedAssertion.setAttribute("ID", "_forged_assertion_id"); + final Element issuer = (Element) forgedAssertion. + getElementsByTagNameNS(SAML20_NS, "Issuer").item(0); + forgedAssertion.insertBefore(signature, issuer.getNextSibling()); + response.insertBefore(forgedAssertion, assertion); + final SamlToken forgedToken = token(SamlUtils.toString((legitimateDocument.getDocumentElement()))); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(forgedToken)); + assertThat(exception.getMessage(), containsString("Expecting only 1 assertion, but response contains multiple")); + } + + public void testSignatureWrappingAttackSix() throws Exception { + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + final Document legitimateDocument = parseDocument(signAssertions(xml, idpSigningCertificatePair)); + // First verify that the correct SAML Response can be consumed + final SamlToken legitimateToken = token(SamlUtils.toString(legitimateDocument.getDocumentElement())); + final SamlAttributes attributes = authenticator.authenticate(legitimateToken); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(2)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + /* + Permutation 6 - Mangle the contents of the response to be + + + + + + + + */ + final Element response = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element assertion = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); + final Element forgedAssertion = (Element) assertion.cloneNode(true); + forgedAssertion.setAttribute("ID", "_forged_assertion_id"); + final Element signature = (Element) assertion. + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + final Element forgedSignature = (Element) forgedAssertion. + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + forgedAssertion.removeChild(forgedSignature); + assertion.removeChild(signature); + final Element issuer = (Element) forgedAssertion. + getElementsByTagNameNS(SAML20_NS, "Issuer").item(0); + forgedAssertion.insertBefore(signature, issuer.getNextSibling()); + signature.appendChild(assertion); + response.appendChild(forgedAssertion); + final SamlToken forgedToken = token(SamlUtils.toString((legitimateDocument.getDocumentElement()))); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(forgedToken)); + assertThat(exception.getMessage(), containsString("Failed to parse SAML")); + assertThat(exception.getCause(), instanceOf(SAXException.class)); + } + + public void testSignatureWrappingAttackSeven() throws Exception { + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + final Document legitimateDocument = parseDocument(signAssertions(xml, idpSigningCertificatePair)); + // First verify that the correct SAML Response can be consumed + final SamlToken legitimateToken = token(SamlUtils.toString(legitimateDocument.getDocumentElement())); + final SamlAttributes attributes = authenticator.authenticate(legitimateToken); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(2)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + /* + Permutation 7 - Mangle the contents of the response to be + + + + + + + + */ + final Element response = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element extentions = legitimateDocument.createElement("Extensions"); + final Element assertion = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); + response.insertBefore(extentions, assertion); + final Element forgedAssertion = (Element) assertion.cloneNode(true); + forgedAssertion.setAttribute("ID", "_forged_assertion_id"); + final Element forgedSignature = (Element) forgedAssertion. + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + forgedAssertion.removeChild(forgedSignature); + extentions.appendChild(forgedAssertion); + final SamlToken forgedToken = token(SamlUtils.toString((legitimateDocument.getDocumentElement()))); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(forgedToken)); + assertThat(exception.getMessage(), containsString("Failed to parse SAML")); + assertThat(exception.getCause(), instanceOf(SAXException.class)); + } + + public void testSignatureWrappingAttackEight() throws Exception { + final Instant now = clock.instant(); + final String xml = getSimpleResponse(now); + final Document legitimateDocument = parseDocument(signAssertions(xml, idpSigningCertificatePair)); + // First verify that the correct SAML Response can be consumed + final SamlToken legitimateToken = token(SamlUtils.toString(legitimateDocument.getDocumentElement())); + final SamlAttributes attributes = authenticator.authenticate(legitimateToken); + assertThat(attributes, notNullValue()); + assertThat(attributes.attributes(), iterableWithSize(2)); + final List uid = attributes.getAttributeValues("urn:oid:0.9.2342.19200300.100.1.1"); + assertThat(uid, contains("daredevil")); + /* + Permutation 8 - Mangle the contents of the response to be + + + + + + + + + + */ + final Element response = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20P_NS, "Response").item(0); + final Element assertion = (Element) legitimateDocument. + getElementsByTagNameNS(SAML20_NS, "Assertion").item(0); + final Element forgedAssertion = (Element) assertion.cloneNode(true); + forgedAssertion.setAttribute("ID", "_forged_assertion_id"); + final Element signature = (Element) assertion. + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + final Element forgedSignature = (Element) forgedAssertion. + getElementsByTagNameNS("http://www.w3.org/2000/09/xmldsig#", "Signature").item(0); + forgedAssertion.removeChild(forgedSignature); + assertion.removeChild(signature); + final Element issuer = (Element) forgedAssertion. + getElementsByTagNameNS(SAML20_NS, "Issuer").item(0); + forgedAssertion.insertBefore(signature, issuer.getNextSibling()); + Element object = legitimateDocument.createElement("Object"); + object.appendChild(assertion); + signature.appendChild(object); + response.appendChild(forgedAssertion); + final SamlToken forgedToken = token(SamlUtils.toString((legitimateDocument.getDocumentElement()))); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(forgedToken)); + assertThat(exception.getMessage(), containsString("Failed to parse SAML")); + assertThat(exception.getCause(), instanceOf(SAXException.class)); + } + + public void testXXE() throws Exception { + String xml = "\n" + + " ]>" + + "&xxe;"; + final SamlToken token = token(xml); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getCause(), instanceOf(SAXException.class)); + assertThat(exception.getCause().getMessage(), containsString("DOCTYPE")); + } + + public void testBillionLaughsAttack() throws Exception { + // There is no need to go up to N iterations + String xml = "\n" + + " \n" + + "]>\n" + + "&lol1;"; + final SamlToken token = token(xml); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getCause(), instanceOf(SAXException.class)); + assertThat(exception.getCause().getMessage(), containsString("DOCTYPE")); + } + + public void testIgnoredCommentsInForgedResponses() throws Exception { + final String legitimateNameId = "useradmin@example.com"; + final String forgedNameId = "useradmin@example.com"; + final String signedXml = signDoc(getSimpleResponse(clock.instant(), legitimateNameId, randomId())); + final String forgedXml = signedXml.replace(legitimateNameId, forgedNameId); + final SamlToken forgedToken = token(forgedXml); + final SamlAttributes attributes = authenticator.authenticate(forgedToken); + assertThat(attributes.name(), notNullValue()); + assertThat(attributes.name().format, equalTo(TRANSIENT)); + assertThat(attributes.name().value, equalTo(legitimateNameId)); + } + + public void testIgnoredCommentsInLegitimateResponses() throws Exception { + final String nameId = "useradmin@example.com"; + final String sanitizedNameId = "useradmin@example.com"; + final String xml = getSimpleResponse(clock.instant(), nameId, randomId()); + final SamlToken token = token(signDoc(xml)); + final SamlAttributes attributes = authenticator.authenticate(token); + assertThat(attributes.name(), notNullValue()); + assertThat(attributes.name().format, equalTo(TRANSIENT)); + assertThat(attributes.name().value, equalTo(sanitizedNameId)); + } + + public void testIgnoredCommentsInResponseUsingCanonicalizationWithComments() throws Exception { + final String nameId = "useradmin@example.com"; + final String sanitizedNameId = "useradmin@example.com"; + final String xml = getSimpleResponse(clock.instant(), nameId, randomId()); + final SamlToken token = token(signDoc(xml, EXCLUSIVE_WITH_COMMENTS)); + final SamlAttributes attributes = authenticator.authenticate(token); + assertThat(attributes.name(), notNullValue()); + assertThat(attributes.name().format, equalTo(TRANSIENT)); + assertThat(attributes.name().value, equalTo(sanitizedNameId)); + } + + public void testFailureWhenIdPCredentialsAreEmpty() throws Exception { + authenticator = buildAuthenticator(() -> emptyList()); + final String xml = getSimpleResponse(clock.instant()); + final SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getCause(), nullValue()); + assertThat(exception.getMessage(), containsString("SAML Signature")); + assertThat(exception.getMessage(), containsString("could not be validated")); + //Restore the authenticator with credentials for the rest of the test cases + authenticator = buildAuthenticator(() -> buildOpenSamlCredential(idpSigningCertificatePair)); + } + + public void testFailureWhenIdPCredentialsAreNull() throws Exception { + authenticator = buildAuthenticator(() -> singletonList(null)); + final String xml = getSimpleResponse(clock.instant()); + final SamlToken token = token(signDoc(xml)); + final ElasticsearchSecurityException exception = expectSamlException(() -> authenticator.authenticate(token)); + assertThat(exception.getCause(), nullValue()); + assertThat(exception.getMessage(), containsString("SAML Signature")); + assertThat(exception.getMessage(), containsString("could not be validated")); + //Restore the authenticator with credentials for the rest of the test cases + authenticator = buildAuthenticator(() -> buildOpenSamlCredential(idpSigningCertificatePair)); + } + + private interface CryptoTransform { + String transform(String xml, Tuple keyPair) throws Exception; + } + + private String signDoc(String xml) throws Exception { + return signDoc(xml, EXCLUSIVE, SamlAuthenticatorTests.idpSigningCertificatePair); + } + + private String signDoc(String xml, Tuple keyPair) throws Exception { + return signDoc(xml, EXCLUSIVE, keyPair); + } + + private String signDoc(String xml, String c14nMethod) throws Exception { + return signDoc(xml, c14nMethod, SamlAuthenticatorTests.idpSigningCertificatePair); + } + + private String signDoc(String xml, String c14nMethod, Tuple keyPair) throws Exception { + final Document doc = parseDocument(xml); + signElement(doc.getDocumentElement(), keyPair, c14nMethod); + return SamlUtils.toString(doc.getDocumentElement()); + } + + private String signAssertions(String xml, Tuple keyPair) throws Exception { + final Document doc = parseDocument(xml); + return processAssertions(doc, node -> signElement(node, keyPair)); + } + + private String encryptAssertions(String xml, Tuple keyPair) throws Exception { + final X509Certificate certificate = keyPair.v1(); + final Document doc = parseDocument(xml); + // Some Identity Providers (e.g. Shibboleth) include the AES directly within the element + // And some (e.g. Okta) include a that links to an that is a sibling of the + final boolean withRetrievalMethod = randomBoolean(); + return processAssertions(doc, node -> wrapAndEncrypt(node, "Assertion", certificate, withRetrievalMethod)); + } + + private String encryptAttributes(String xml, Tuple keyPair) throws Exception { + final X509Certificate certificate = keyPair.v1(); + final Document doc = parseDocument(xml); + return processAttributes(doc, node -> wrapAndEncrypt(node, "Attribute", certificate, false)); + } + + private String processAssertions(Document doc, CheckedConsumer consumer) throws Exception { + return processNodes(doc, SAML20_NS, "Assertion", consumer); + } + + private String processAttributes(Document doc, CheckedConsumer consumer) throws Exception { + return processNodes(doc, SAML20_NS, "Attribute", consumer); + } + + private String processNodes(Document doc, String namespaceURI, String localName, CheckedConsumer consumer) + throws Exception { + final NodeList nodes = doc.getElementsByTagNameNS(namespaceURI, localName); + // Because the consumer changes the nodes (and removes them from the document) we need to clone the list + List list = new ArrayList<>(); + for (int i = 0; i < nodes.getLength(); i++) { + list.add(nodes.item(i)); + } + for (Node node : list) { + consumer.accept((Element) node); + } + return SamlUtils.toString(doc.getDocumentElement()); + } + + private Document parseDocument(String xml) throws ParserConfigurationException, SAXException, IOException { + final DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); + dbf.setNamespaceAware(true); + final DocumentBuilder documentBuilder = dbf.newDocumentBuilder(); + return documentBuilder.parse(new InputSource(new StringReader(xml))); + } + + private void signElement(Element parent, Tuple keyPair) throws Exception { + signElement(parent, keyPair, EXCLUSIVE); + } + + /** + * Randomly selects digital signature algorithm URI for given private key + * algorithm ({@link PrivateKey#getAlgorithm()}). + * + * @param key + * {@link PrivateKey} + * @return algorithm URI + */ + private String getSignatureAlgorithmURI(PrivateKey key) { + String algoUri = null; + switch (key.getAlgorithm()) { + case "RSA": + algoUri = randomFrom("http://www.w3.org/2001/04/xmldsig-more#rsa-sha256", + "http://www.w3.org/2001/04/xmldsig-more#rsa-sha512"); + break; + case "DSA": + algoUri = "http://www.w3.org/2009/xmldsig11#dsa-sha256"; + break; + case "EC": + algoUri = randomFrom("http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha256", + "http://www.w3.org/2001/04/xmldsig-more#ecdsa-sha512"); + break; + default: + throw new IllegalArgumentException("Unsupported algorithm : " + key.getAlgorithm() + + " for signature, allowed values for private key algorithm are [RSA, DSA, EC]"); + } + return algoUri; + } + + private void signElement(Element parent, Tuple keyPair, String c14nMethod) throws Exception { + //We need to explicitly set the Id attribute, "ID" is just our convention + parent.setIdAttribute("ID", true); + final String refID = "#" + parent.getAttribute("ID"); + final X509Certificate certificate = keyPair.v1(); + final PrivateKey privateKey = keyPair.v2(); + final XMLSignatureFactory fac = XMLSignatureFactory.getInstance("DOM"); + final DigestMethod digestMethod = fac.newDigestMethod(randomFrom(DigestMethod.SHA256, DigestMethod.SHA512), null); + final Transform transform = fac.newTransform(ENVELOPED, (TransformParameterSpec) null); + // We don't "have to" set the reference explicitly since we're using enveloped signatures, but it helps with + // creating the XSW test cases + final Reference reference = fac.newReference(refID, digestMethod, singletonList(transform), null, null); + final SignatureMethod signatureMethod = fac.newSignatureMethod(getSignatureAlgorithmURI(privateKey), null); + final CanonicalizationMethod canonicalizationMethod = fac.newCanonicalizationMethod(c14nMethod, (C14NMethodParameterSpec) null); + + final SignedInfo signedInfo = fac.newSignedInfo(canonicalizationMethod, signatureMethod, singletonList(reference)); + + final KeyInfo keyInfo = getKeyInfo(fac, certificate); + + final DOMSignContext dsc = new DOMSignContext(privateKey, parent); + dsc.setDefaultNamespacePrefix("ds"); + // According to the schema, the signature needs to be placed after the if there is one in the document + // If there are more than one we are dealing with a so we sign the Response and add the + // Signature after the Response + NodeList issuersList = parent.getElementsByTagNameNS(SAML20_NS, "Issuer"); + if (issuersList.getLength() > 0) { + dsc.setNextSibling(issuersList.item(0).getNextSibling()); + } + + final XMLSignature signature = fac.newXMLSignature(signedInfo, keyInfo); + signature.sign(dsc); + } + + private static KeyInfo getKeyInfo(XMLSignatureFactory factory, X509Certificate certificate) throws KeyException { + KeyInfoFactory kif = factory.getKeyInfoFactory(); + javax.xml.crypto.dsig.keyinfo.X509Data data = kif.newX509Data(Collections.singletonList(certificate)); + return kif.newKeyInfo(singletonList(data)); + } + + private void wrapAndEncrypt(Node node, String tagName, X509Certificate certificate, boolean withRetrievalMethod) throws Exception { + assertThat(node, instanceOf(Element.class)); + final Element element = (Element) node; + assertThat(element.getLocalName(), equalTo(tagName)); + + // Wrap the assertion in an "EncryptedXXX" element and then replace it with the encrypted content + final Node parent = element.getParentNode(); + final Element encryptedWrapper = parent.getOwnerDocument().createElementNS(element.getNamespaceURI(), "Encrypted" + tagName); + parent.replaceChild(encryptedWrapper, element); + encryptedWrapper.appendChild(element); + + // The node, once encrypted needs to be "standalone", so it needs to have all the namespaces defined locally. + // There might be a more standard way to do this, but this works... + defineRequiredNamespaces(element); + encryptElement(element, certificate, withRetrievalMethod); + } + + private void defineRequiredNamespaces(Element element) { + defineRequiredNamespaces(element, Collections.emptySet()); + } + + private void defineRequiredNamespaces(Element element, Set> parentProcessed) { + Set> processed = new HashSet<>(parentProcessed); + final Map namespaces = getNamespaces(element); + for (String prefix : namespaces.keySet()) { + final String uri = namespaces.get(prefix); + Tuple t = new Tuple<>(prefix, uri); + if (processed.contains(t) == false) { + processed.add(t); + if (Strings.isNullOrEmpty(element.getAttribute("xmlns:" + prefix))) { + element.setAttribute("xmlns:" + prefix, uri); + } + } + } + final NodeList children = element.getChildNodes(); + for (int i = 0; i < children.getLength(); i++) { + final Node child = children.item(i); + if (child instanceof Element) { + defineRequiredNamespaces((Element) child, processed); + } + } + } + + private Map getNamespaces(Node node) { + Map namespaces = new HashMap<>(); + final String prefix = node.getPrefix(); + if (Strings.hasText(prefix) && "xmlns".equals(prefix) == false) { + namespaces.put(prefix, node.getNamespaceURI()); + } + final NamedNodeMap attributes = node.getAttributes(); + if (attributes != null) { + for (int i = 0; i < attributes.getLength(); i++) { + namespaces.putAll(getNamespaces(attributes.item(i))); + } + } + return namespaces; + } + + private void encryptElement(Element element, X509Certificate certificate, boolean storeKeyWithRetrievalMethod) throws Exception { + // Save the parent node now, because it will change when we encrypt + final Node parentNode = element.getParentNode(); + final Document document = element.getOwnerDocument(); + + // Generate an AES key for the actual encryption + final KeyGenerator aesGenerator = KeyGenerator.getInstance("AES"); + aesGenerator.init(randomFrom(supportedAesKeyLengths)); + final Key aesKey = aesGenerator.generateKey(); + + // Encrypt the AES key with the public key of the recipient + final XMLCipher keyCipher = XMLCipher.getInstance(randomFrom(XMLCipher.RSA_OAEP, XMLCipher.RSA_OAEP_11)); + keyCipher.init(XMLCipher.WRAP_MODE, certificate.getPublicKey()); + final EncryptedKey encryptedKey = keyCipher.encryptKey(document, aesKey); + + // Encryption context for actual content + final XMLCipher xmlCipher = XMLCipher.getInstance(randomFrom(supportedAesTransformations)); + xmlCipher.init(XMLCipher.ENCRYPT_MODE, aesKey); + + final String keyElementId = randomId(); + + // Include the key info for passing the AES key + org.apache.xml.security.keys.KeyInfo keyInfo = new org.apache.xml.security.keys.KeyInfo(document); + if (storeKeyWithRetrievalMethod) { + keyInfo.addRetrievalMethod("#" + keyElementId, null, "http://www.w3.org/2001/04/xmlenc#EncryptedKey"); + } else { + keyInfo.add(encryptedKey); + } + EncryptedData encryptedData = xmlCipher.getEncryptedData(); + encryptedData.setKeyInfo(keyInfo); + + // Include the content element itself + // - The 3rd argument indicates whether to only encrypt the content (true) or the element itself (false) + xmlCipher.doFinal(document, element, false); + + if (storeKeyWithRetrievalMethod) { + final Element keyElement = buildEncryptedKeyElement(document, encryptedKey, certificate); + keyElement.setAttribute("Id", keyElementId); + keyElement.setIdAttribute("Id", true); + parentNode.appendChild(keyElement); + } + } + + private Element buildEncryptedKeyElement(Document document, EncryptedKey encryptedKey, X509Certificate certificate) + throws XMLSecurityException { + final XMLCipher cipher = XMLCipher.getInstance(); + final org.apache.xml.security.keys.KeyInfo keyInfo = new org.apache.xml.security.keys.KeyInfo(document); + final X509Data x509Data = new X509Data(document); + x509Data.addCertificate(certificate); + keyInfo.add(x509Data); + encryptedKey.setKeyInfo(keyInfo); + final EncryptionMethod method = cipher.createEncryptionMethod(XMLCipher.RSA_OAEP); + method.setDigestAlgorithm(XMLCipher.SHA1); + encryptedKey.setEncryptionMethod(method); + return cipher.martial(document, encryptedKey); + } + + private Response toResponse(String xml) throws SAXException, IOException, ParserConfigurationException { + final DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); + dbf.setNamespaceAware(true); + final Document doc = dbf.newDocumentBuilder().parse(new InputSource(new StringReader(xml))); + return authenticator.buildXmlObject(doc.getDocumentElement(), Response.class); + } + + private String getSimpleResponse(Instant now) { + return getSimpleResponse(now, randomAlphaOfLengthBetween(12, 18), randomId()); + } + + private String getSimpleResponse(Instant now, String nameId, String sessionindex) { + + Instant validUntil = now.plusSeconds(30); + return "\n" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + + "" + IDP_ENTITY_ID + "" + + "" + + "" + nameId + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + PASSWORD_AUTHN_CTX + "" + + "" + + "" + + "" + + "daredevil" + + "" + + "" + + "defenders" + + "netflix" + + "" + + "" + + ""; + } + + private String getResponseWithAudienceRestriction(String requiredAudience) { + return getSimpleResponse(clock.instant()).replaceFirst("" + + "" + + requiredAudience + + "" + + "" + + "$0"); + } + + private String randomId() { + return SamlUtils.generateSecureNCName(randomIntBetween(12, 36)); + } + + private SamlToken token(String content) { + return token(content.getBytes(StandardCharsets.UTF_8)); + } + + private SamlToken token(byte[] content) { + return new SamlToken(content, singletonList(requestId)); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthnRequestBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthnRequestBuilderTests.java new file mode 100644 index 0000000000000..b1b1b3098f063 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthnRequestBuilderTests.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.time.Clock; + +import org.joda.time.Instant; +import org.junit.Before; +import org.opensaml.saml.common.xml.SAMLConstants; +import org.opensaml.saml.saml2.core.AuthnRequest; +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.IDPSSODescriptor; +import org.opensaml.saml.saml2.metadata.SingleSignOnService; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class SamlAuthnRequestBuilderTests extends SamlTestCase { + + private static final String SP_ENTITY_ID = "https://sp.example.com/"; + private static final String IDP_ENTITY_ID = "https://idp.example.net/"; + + private static final String ACS_URL = "https://sp.example.com/saml/acs"; + private static final String IDP_URL = "https://idp.example.net/saml/sso/redirect"; + private EntityDescriptor idpDescriptor; + + @Before + public void init() throws Exception { + SamlUtils.initialize(logger); + + final SingleSignOnService sso = SamlUtils.buildObject(SingleSignOnService.class, SingleSignOnService.DEFAULT_ELEMENT_NAME); + sso.setLocation(IDP_URL); + sso.setBinding(SAMLConstants.SAML2_REDIRECT_BINDING_URI); + + final IDPSSODescriptor idpRole = SamlUtils.buildObject(IDPSSODescriptor.class, IDPSSODescriptor.DEFAULT_ELEMENT_NAME); + idpRole.getSingleSignOnServices().add(sso); + + idpDescriptor = SamlUtils.buildObject(EntityDescriptor.class, EntityDescriptor.DEFAULT_ELEMENT_NAME); + idpDescriptor.setEntityID(IDP_ENTITY_ID); + idpDescriptor.getRoleDescriptors().add(idpRole); + } + + public void testBuildRequestWithPersistentNameAndNoForceAuth() throws Exception { + SpConfiguration sp = new SpConfiguration(SP_ENTITY_ID, ACS_URL, null, null, null); + final SamlAuthnRequestBuilder builder = new SamlAuthnRequestBuilder( + sp, SAMLConstants.SAML2_POST_BINDING_URI, + idpDescriptor, SAMLConstants.SAML2_REDIRECT_BINDING_URI, + Clock.systemUTC()); + builder.nameIDPolicy(new SamlAuthnRequestBuilder.NameIDPolicySettings(NameID.PERSISTENT, false, SP_ENTITY_ID)); + builder.forceAuthn(null); + + final AuthnRequest request = buildAndValidateAuthnRequest(builder); + + assertThat(request.getIssuer().getValue(), equalTo(SP_ENTITY_ID)); + assertThat(request.getProtocolBinding(), equalTo(SAMLConstants.SAML2_POST_BINDING_URI)); + + assertThat(request.getAssertionConsumerServiceURL(), equalTo(ACS_URL)); + + assertThat(request.getNameIDPolicy(), notNullValue()); + assertThat(request.getNameIDPolicy().getFormat(), equalTo(NameID.PERSISTENT)); + assertThat(request.getNameIDPolicy().getSPNameQualifier(), equalTo(SP_ENTITY_ID)); + assertThat(request.getNameIDPolicy().getAllowCreate(), equalTo(Boolean.FALSE)); + + assertThat(request.isForceAuthn(), equalTo(Boolean.FALSE)); + } + + public void testBuildRequestWithTransientNameAndForceAuthTrue() throws Exception { + SpConfiguration sp = new SpConfiguration(SP_ENTITY_ID, ACS_URL, null, null, null); + final SamlAuthnRequestBuilder builder = new SamlAuthnRequestBuilder( + sp, SAMLConstants.SAML2_POST_BINDING_URI, + idpDescriptor, SAMLConstants.SAML2_REDIRECT_BINDING_URI, + Clock.systemUTC()); + + final String noSpNameQualifier = randomBoolean() ? "" : null; + builder.nameIDPolicy(new SamlAuthnRequestBuilder.NameIDPolicySettings(NameID.TRANSIENT, true, noSpNameQualifier)); + builder.forceAuthn(Boolean.TRUE); + + final AuthnRequest request = buildAndValidateAuthnRequest(builder); + + assertThat(request.getIssuer().getValue(), equalTo(SP_ENTITY_ID)); + assertThat(request.getProtocolBinding(), equalTo(SAMLConstants.SAML2_POST_BINDING_URI)); + + assertThat(request.getAssertionConsumerServiceURL(), equalTo(ACS_URL)); + + assertThat(request.getNameIDPolicy(), notNullValue()); + assertThat(request.getNameIDPolicy().getFormat(), equalTo(NameID.TRANSIENT)); + assertThat(request.getNameIDPolicy().getSPNameQualifier(), nullValue()); + assertThat(request.getNameIDPolicy().getAllowCreate(), equalTo(Boolean.TRUE)); + + assertThat(request.isForceAuthn(), equalTo(Boolean.TRUE)); + } + + private AuthnRequest buildAndValidateAuthnRequest(SamlAuthnRequestBuilder builder) { + Instant before = Instant.now(); + final AuthnRequest request = builder.build(); + Instant after = Instant.now(); + assertThat(request, notNullValue()); + + assertThat(request.getID(), notNullValue()); + assertThat(request.getID().length(), greaterThan(20)); + + assertThat(request.getIssuer(), notNullValue()); + + assertThat(request.getIssueInstant(), notNullValue()); + assertThat(request.getIssueInstant().isBefore(before), equalTo(false)); + assertThat(request.getIssueInstant().isAfter(after), equalTo(false)); + assertThat(request.getDestination(), equalTo(IDP_URL)); + return request; + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandlerTests.java new file mode 100644 index 0000000000000..06d0ce4a179c0 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandlerTests.java @@ -0,0 +1,227 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.time.Clock; +import java.util.Arrays; +import java.util.Collections; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.joda.time.DateTime; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.opensaml.saml.common.SAMLObject; +import org.opensaml.saml.saml2.core.Issuer; +import org.opensaml.saml.saml2.core.LogoutRequest; +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.security.x509.X509Credential; +import org.opensaml.xmlsec.signature.support.SignatureConstants; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class SamlLogoutRequestHandlerTests extends SamlTestCase { + + private static final String IDP_ENTITY_ID = "https://idp.test/"; + private static final String LOGOUT_URL = "https://sp.test/saml/logout"; + + private static X509Credential credential; + private Clock clock; + + @BeforeClass + public static void setupCredential() throws Exception { + credential = (X509Credential)buildOpenSamlCredential(createKeyPair()).get(0); + } + + @AfterClass + public static void clearCredential() throws Exception { + credential = null; + } + + @Before + public void setupClock() throws Exception { + clock = Clock.systemUTC(); + } + + public void testLogoutWithValidSignatureIsParsedSuccessfully() throws Exception { + final LogoutRequest logoutRequest = buildLogoutRequest(); + + final String query = buildSignedQueryString(logoutRequest); + + final SamlLogoutRequestHandler.Result result = buildHandler().parseFromQueryString(query); + assertResultMatchesRequest(result, logoutRequest); + } + + private void assertResultMatchesRequest(SamlLogoutRequestHandler.Result result, LogoutRequest logoutRequest) { + assertThat(result, notNullValue()); + assertThat(result.getNameId(), notNullValue()); + assertThat(result.getNameId().idpNameQualifier, equalTo(logoutRequest.getNameID().getNameQualifier())); + assertThat(result.getNameId().spNameQualifier, equalTo(logoutRequest.getNameID().getSPNameQualifier())); + assertThat(result.getNameId().value, equalTo(logoutRequest.getNameID().getValue())); + assertThat(result.getNameId().spProvidedId, nullValue()); + assertThat(result.getSession(), nullValue()); + assertThat(result.getRequestId(), equalTo(logoutRequest.getID())); + } + + public void testLogoutWithIncorrectIssuerIsRejected() throws Exception { + final LogoutRequest logoutRequest = buildLogoutRequest(); + logoutRequest.getIssuer().setValue("https://attacker.bot.net/"); + + final String query = buildSignedQueryString(logoutRequest); + + final SamlLogoutRequestHandler handler = buildHandler(); + final ElasticsearchSecurityException exception = expectSamlException(() -> handler.parseFromQueryString(query)); + assertThat(exception.getMessage(), containsString("Issuer")); + assertThat(exception.getMessage(), containsString(IDP_ENTITY_ID)); + assertThat(exception.getMessage(), containsString(logoutRequest.getIssuer().getValue())); + } + + public void testLogoutWithIncorrectDestinationIsRejected() throws Exception { + final LogoutRequest logoutRequest = buildLogoutRequest(); + logoutRequest.setDestination("https://attacker.bot.net/"); + + final String query = buildSignedQueryString(logoutRequest); + + final SamlLogoutRequestHandler handler = buildHandler(); + final ElasticsearchSecurityException exception = expectSamlException(() -> handler.parseFromQueryString(query)); + assertThat(exception.getMessage(), containsString("destination")); + assertThat(exception.getMessage(), containsString(LOGOUT_URL)); + assertThat(exception.getMessage(), containsString(logoutRequest.getDestination())); + } + + public void testLogoutWithSwitchedSignatureFailsValidation() throws Exception { + final LogoutRequest fakeLogoutRequest = buildLogoutRequest(); + final LogoutRequest realLogoutRequest = buildLogoutRequest(); + final String fakeQuery = buildSignedQueryString(fakeLogoutRequest); + final String realQuery = buildSignedQueryString(realLogoutRequest); + + final String tamperedQuery = fakeQuery.replaceFirst("&Signature=.*$", "") + + "&Signature=" + + realQuery.replaceFirst("^.*&Signature=", ""); + + final SamlLogoutRequestHandler handler = buildHandler(); + final ElasticsearchSecurityException exception = expectSamlException(() -> handler.parseFromQueryString(tamperedQuery)); + assertThat(exception.getMessage(), containsString("SAML Signature")); + assertThat(exception.getMessage(), containsString("could not be validated")); + } + + public void testLogoutWithSwitchedAlgorithmFailsValidation() throws Exception { + final LogoutRequest logoutRequest = buildLogoutRequest(); + final String realQuery = buildSignedQueryString(logoutRequest); + + final String tamperedQuery = realQuery.replaceFirst( + urlEncode(SignatureConstants.ALGO_ID_SIGNATURE_RSA_SHA256), + urlEncode(SignatureConstants.ALGO_ID_SIGNATURE_RSA_SHA1)); + + final SamlLogoutRequestHandler handler = buildHandler(); + assertThat(handler.parseFromQueryString(realQuery), notNullValue()); + + final ElasticsearchSecurityException exception = expectSamlException(() -> handler.parseFromQueryString(tamperedQuery)); + assertThat(exception.getMessage(), containsString("SAML Signature")); + assertThat(exception.getMessage(), containsString("could not be validated")); + } + + public void testLogoutWithoutSignatureFails() throws Exception { + final LogoutRequest logoutRequest = buildLogoutRequest(); + final String query = buildQueryString(logoutRequest, new String[0]); + final SamlLogoutRequestHandler handler = buildHandler(); + final ElasticsearchSecurityException exception = expectSamlException(() -> handler.parseFromQueryString(query)); + assertThat(exception.getMessage(), containsString("is not signed")); + } + + /** + * The spec states that this should never happen (SAML bindings spec, v2.0 WD6, section 3.4.4.1, point 3). + * OneLogin includes newlines anyway. + */ + public void testWhiteSpaceInSamlRequestIsIgnored() throws Exception { + final LogoutRequest logoutRequest = buildLogoutRequest(); + final SigningConfiguration signingConfiguration = new SigningConfiguration(Sets.newHashSet("*"), credential); + final String url = new SamlRedirect(logoutRequest, signingConfiguration) { + @Override + protected String deflateAndBase64Encode(SAMLObject message) throws Exception { + return super.deflateAndBase64Encode(message).replaceAll(".{48}", "$0\n"); + } + }.getRedirectUrl(); + final String query = new URI(url).getRawQuery(); + final SamlLogoutRequestHandler handler = buildHandler(); + final SamlLogoutRequestHandler.Result result = handler.parseFromQueryString(query); + assertResultMatchesRequest(result, logoutRequest); + } + + public void testRelayStateIsReturnedInRedirect() throws Exception { + final LogoutRequest logoutRequest = buildLogoutRequest(); + final SigningConfiguration signingConfiguration = new SigningConfiguration(Sets.newHashSet("*"), credential); + final String url = new SamlRedirect(logoutRequest, signingConfiguration).getRedirectUrl("Hail Hydra"); + final String query = new URI(url).getRawQuery(); + final SamlLogoutRequestHandler handler = buildHandler(); + final SamlLogoutRequestHandler.Result result = handler.parseFromQueryString(query); + assertResultMatchesRequest(result, logoutRequest); + assertThat(result.getRelayState(), equalTo("Hail Hydra")); + } + + private String urlEncode(String str) throws UnsupportedEncodingException { + return URLEncoder.encode(str, StandardCharsets.US_ASCII.name()); + } + + private String buildSignedQueryString(LogoutRequest logoutRequest) throws URISyntaxException { + return buildQueryString(logoutRequest, "*"); + } + + private String buildQueryString(LogoutRequest logoutRequest, String... signTypes) throws URISyntaxException { + final SigningConfiguration signingConfiguration = new SigningConfiguration(Sets.newHashSet(signTypes), credential); + final String url = new SamlRedirect(logoutRequest, signingConfiguration).getRedirectUrl(); + return new URI(url).getRawQuery(); + } + + private LogoutRequest buildLogoutRequest() { + final LogoutRequest logoutRequest = SamlUtils.buildObject(LogoutRequest.class, LogoutRequest.DEFAULT_ELEMENT_NAME); + logoutRequest.setDestination(LOGOUT_URL); + logoutRequest.setIssueInstant(new DateTime(clock.millis())); + logoutRequest.setID(SamlUtils.generateSecureNCName(randomIntBetween(8, 30))); + final Issuer issuer = SamlUtils.buildObject(Issuer.class, Issuer.DEFAULT_ELEMENT_NAME); + issuer.setValue(IDP_ENTITY_ID); + logoutRequest.setIssuer(issuer); + final NameID nameId = SamlUtils.buildObject(NameID.class, NameID.DEFAULT_ELEMENT_NAME); + nameId.setValue(randomAlphaOfLengthBetween(12, 36)); + logoutRequest.setNameID(nameId); + return logoutRequest; + } + + private SamlLogoutRequestHandler buildHandler() throws Exception { + final Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + final Settings realmSettings = Settings.EMPTY; + final IdpConfiguration idp = new IdpConfiguration(IDP_ENTITY_ID, () -> Collections.singletonList(credential)); + + final X509Credential spCredential = (X509Credential)buildOpenSamlCredential(createKeyPair()).get(0); + final SigningConfiguration signingConfiguration = new SigningConfiguration(Collections.singleton("*"), spCredential); + final SpConfiguration sp = new SpConfiguration("https://sp.test/", "https://sp.test/saml/asc", LOGOUT_URL, + signingConfiguration, Arrays.asList(spCredential)); + final Environment env = TestEnvironment.newEnvironment(globalSettings); + return new SamlLogoutRequestHandler( + new RealmConfig("saml_test", realmSettings, globalSettings, env, new ThreadContext(globalSettings)), + clock, + idp, + sp, + TimeValue.timeValueSeconds(1) + ); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestMessageBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestMessageBuilderTests.java new file mode 100644 index 0000000000000..252cf2f0d1c4c --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestMessageBuilderTests.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneOffset; + +import org.hamcrest.Matchers; +import org.junit.Before; +import org.opensaml.saml.common.xml.SAMLConstants; +import org.opensaml.saml.saml2.core.LogoutRequest; +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.IDPSSODescriptor; +import org.opensaml.saml.saml2.metadata.SingleLogoutService; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.iterableWithSize; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class SamlLogoutRequestMessageBuilderTests extends SamlTestCase { + + private static final String SP_ENTITY_ID = "http://sp.saml/"; + private static final String IDP_ENTITY_ID = "http://sp.saml/"; + + private SpConfiguration sp; + private EntityDescriptor idp; + private IDPSSODescriptor idpRole; + private NameID nameId; + private String session; + + @Before + public void init() throws Exception { + SamlUtils.initialize(logger); + sp = new SpConfiguration(SP_ENTITY_ID, "http://sp.example.com/saml/acs", null, null, null); + idpRole = SamlUtils.buildObject(IDPSSODescriptor.class, IDPSSODescriptor.DEFAULT_ELEMENT_NAME); + idp = SamlUtils.buildObject(EntityDescriptor.class, EntityDescriptor.DEFAULT_ELEMENT_NAME); + idp.setEntityID(IDP_ENTITY_ID); + idp.getRoleDescriptors().add(idpRole); + nameId = new SamlNameId(NameID.PERSISTENT, randomAlphaOfLengthBetween(12, 24), IDP_ENTITY_ID, SP_ENTITY_ID, null).asXml(); + session = randomAlphaOfLengthBetween(8, 16); + } + + public void testBuildNullRequestWhenLogoutNotSupportedByIdp() throws Exception { + idpRole.getSingleLogoutServices().clear(); + final SamlLogoutRequestMessageBuilder builder = new SamlLogoutRequestMessageBuilder(Clock.systemUTC(), sp, idp, nameId, session); + assertThat(builder.build(), Matchers.nullValue()); + } + + public void testBuildValidRequest() throws Exception { + final SingleLogoutService sloPost = logoutService(SAMLConstants.SAML2_POST_BINDING_URI, + "http://idp.example.com/saml/logout/post"); + idpRole.getSingleLogoutServices().add(sloPost); + + final SingleLogoutService sloRedirect = logoutService(SAMLConstants.SAML2_REDIRECT_BINDING_URI, + "http://idp.example.com/saml/logout/redirect"); + idpRole.getSingleLogoutServices().add(sloRedirect); + + final SingleLogoutService sloArtifact = logoutService(SAMLConstants.SAML2_ARTIFACT_BINDING_URI, + "http://idp.example.com/saml/logout/artifact"); + idpRole.getSingleLogoutServices().add(sloArtifact); + + Clock fixedClock = Clock.fixed(Instant.now(), ZoneOffset.UTC); + final SamlLogoutRequestMessageBuilder builder = new SamlLogoutRequestMessageBuilder(fixedClock, sp, idp, nameId, session); + final LogoutRequest logoutRequest = builder.build(); + assertThat(logoutRequest, notNullValue()); + assertThat(logoutRequest.getReason(), nullValue()); + assertThat(logoutRequest.getBaseID(), nullValue()); + assertThat(logoutRequest.getEncryptedID(), nullValue()); + assertThat(logoutRequest.getNameID(), notNullValue()); + assertThat(logoutRequest.getNameID().getFormat(), equalTo(NameID.PERSISTENT)); + assertThat(logoutRequest.getNameID().getValue(), equalTo(nameId.getValue())); + assertThat(logoutRequest.getNameID().getNameQualifier(), equalTo(IDP_ENTITY_ID)); + assertThat(logoutRequest.getNameID().getSPNameQualifier(), equalTo(SP_ENTITY_ID)); + assertThat(logoutRequest.getConsent(), nullValue()); + assertThat(logoutRequest.getNotOnOrAfter(), nullValue()); + assertThat(logoutRequest.getIssueInstant(), notNullValue()); + assertThat(logoutRequest.getIssueInstant().getMillis(), equalTo(fixedClock.millis())); + assertThat(logoutRequest.getSessionIndexes(), iterableWithSize(1)); + assertThat(logoutRequest.getSessionIndexes().get(0).getSessionIndex(), equalTo(session)); + assertThat(logoutRequest.getDestination(), equalTo("http://idp.example.com/saml/logout/redirect")); + assertThat(logoutRequest.getID(), notNullValue()); + assertThat(logoutRequest.getID().length(), greaterThan(20)); + assertThat(logoutRequest.getIssuer(), notNullValue()); + assertThat(logoutRequest.getIssuer().getValue(), equalTo(sp.getEntityId())); + } + + private SingleLogoutService logoutService(String binding, String location) { + final SingleLogoutService sls = SamlUtils.buildObject(SingleLogoutService.class, SingleLogoutService.DEFAULT_ELEMENT_NAME); + sls.setBinding(binding); + sls.setLocation(location); + return sls; + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommandTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommandTests.java new file mode 100644 index 0000000000000..3fdf01bf135f2 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommandTests.java @@ -0,0 +1,698 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import joptsimple.OptionSet; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.ssl.CertUtils; +import org.junit.Before; +import org.opensaml.saml.common.xml.SAMLConstants; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.RequestedAttribute; +import org.opensaml.saml.saml2.metadata.SPSSODescriptor; +import org.opensaml.saml.security.impl.SAMLSignatureProfileValidator; +import org.opensaml.security.credential.Credential; +import org.opensaml.security.credential.UsageType; +import org.opensaml.security.x509.BasicX509Credential; +import org.opensaml.xmlsec.keyinfo.KeyInfoSupport; +import org.opensaml.xmlsec.signature.Signature; +import org.opensaml.xmlsec.signature.X509Certificate; +import org.opensaml.xmlsec.signature.X509Data; +import org.opensaml.xmlsec.signature.support.SignatureValidator; +import org.w3c.dom.Element; + +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.KeyStore; +import java.security.PrivateKey; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Set; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.iterableWithSize; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SamlMetadataCommandTests extends SamlTestCase { + + private KeyStoreWrapper keyStore; + + @Before + public void setup() throws Exception { + SamlUtils.initialize(logger); + this.keyStore = mock(KeyStoreWrapper.class); + when(keyStore.isLoaded()).thenReturn(true); + } + + public void testDefaultOptions() throws Exception { + final Path certPath = getDataPath("saml.crt"); + final Path keyPath = getDataPath("saml.key"); + + final SamlMetadataCommand command = new SamlMetadataCommand((e) -> randomFrom(keyStore, null)); + final OptionSet options = command.getParser().parse(new String[0]); + + final boolean useSigningCredentials = randomBoolean(); + final Settings.Builder settingsBuilder = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.PREFIX + "my_saml.type", "saml") + .put(RealmSettings.PREFIX + "my_saml.order", 1) + .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout") + .put(RealmSettings.PREFIX + "my_saml.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1"); + if (useSigningCredentials) { + settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + } + final Settings settings = settingsBuilder.build(); + final Environment env = TestEnvironment.newEnvironment(settings); + + final MockTerminal terminal = new MockTerminal(); + + // What is the friendly name for "principal" attribute "urn:oid:0.9.2342.19200300.100.1.1" [default: principal] + terminal.addTextInput(""); + + final EntityDescriptor descriptor = command.buildEntityDescriptor(terminal, options, env); + + assertThat(descriptor, notNullValue()); + assertThat(descriptor.getEntityID(), equalTo("https://kibana.my.corp/")); + + assertThat(descriptor.getRoleDescriptors(), iterableWithSize(1)); + assertThat(descriptor.getRoleDescriptors().get(0), instanceOf(SPSSODescriptor.class)); + final SPSSODescriptor spDescriptor = (SPSSODescriptor) descriptor.getRoleDescriptors().get(0); + + assertThat(spDescriptor.getAssertionConsumerServices(), iterableWithSize(1)); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).getLocation(), equalTo("https://kibana.my.corp/saml/login")); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).isDefault(), equalTo(true)); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).getIndex(), equalTo(1)); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).getBinding(), equalTo(SAMLConstants.SAML2_POST_BINDING_URI)); + + assertThat(spDescriptor.getAttributeConsumingServices(), iterableWithSize(1)); + assertThat(spDescriptor.getAttributeConsumingServices().get(0).isDefault(), equalTo(true)); + assertThat(spDescriptor.getAttributeConsumingServices().get(0).getIndex(), equalTo(1)); + assertThat(spDescriptor.getAttributeConsumingServices().get(0).getRequestAttributes(), iterableWithSize(1)); + final RequestedAttribute uidAttribute = spDescriptor.getAttributeConsumingServices().get(0).getRequestAttributes().get(0); + assertThat(uidAttribute.getName(), equalTo("urn:oid:0.9.2342.19200300.100.1.1")); + assertThat(uidAttribute.getFriendlyName(), equalTo("principal")); + + assertThat(spDescriptor.getSingleLogoutServices(), iterableWithSize(1)); + assertThat(spDescriptor.getSingleLogoutServices().get(0).getLocation(), equalTo("https://kibana.my.corp/saml/logout")); + assertThat(spDescriptor.getSingleLogoutServices().get(0).getBinding(), equalTo(SAMLConstants.SAML2_REDIRECT_BINDING_URI)); + + assertThat(spDescriptor.isAuthnRequestsSigned(), equalTo(useSigningCredentials)); + assertThat(spDescriptor.getWantAssertionsSigned(), equalTo(true)); + + if (useSigningCredentials) { + assertThat(spDescriptor.getKeyDescriptors(), iterableWithSize(1)); + assertThat(spDescriptor.getKeyDescriptors().get(0).getUse(), equalTo(UsageType.SIGNING)); + assertThat(spDescriptor.getKeyDescriptors().get(0).getKeyInfo().getPGPDatas(), iterableWithSize(0)); + assertThat(spDescriptor.getKeyDescriptors().get(0).getKeyInfo().getMgmtDatas(), iterableWithSize(0)); + assertThat(spDescriptor.getKeyDescriptors().get(0).getKeyInfo().getSPKIDatas(), iterableWithSize(0)); + final List x509 = spDescriptor.getKeyDescriptors().get(0).getKeyInfo().getX509Datas(); + assertThat(x509, iterableWithSize(1)); + assertThat(x509.get(0).getX509Certificates(), iterableWithSize(1)); + final X509Certificate xmlCert = x509.get(0).getX509Certificates().get(0); + assertThat(xmlCert.getValue(), startsWith("MIIDWDCCAkCgAwIBAgIVANRTZaFrK+Pz19O8TZsb3HSJmAWpMA0GCSqGSIb3DQEB")); + + // Verify that OpenSAML things the XML representation is the same as our input + final java.security.cert.X509Certificate javaCert = KeyInfoSupport.getCertificate(xmlCert); + assertThat(CertUtils.readCertificates(Collections.singletonList(certPath)), arrayContaining(javaCert)); + } else { + assertThat(spDescriptor.getKeyDescriptors(), iterableWithSize(0)); + } + } + + public void testFailIfMultipleRealmsExist() throws Exception { + final Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.PREFIX + "saml_a.type", "saml") + .put(RealmSettings.PREFIX + "saml_a.sp.entity_id", "https://saml.a/") + .put(RealmSettings.PREFIX + "saml_a.sp.acs", "https://saml.a/") + .put(RealmSettings.PREFIX + "saml_b.type", "saml") + .put(RealmSettings.PREFIX + "saml_b.sp.entity_id", "https://saml.b/") + .put(RealmSettings.PREFIX + "saml_b.sp.acs", "https://saml.b/") + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + + final SamlMetadataCommand command = new SamlMetadataCommand((e) -> randomFrom(keyStore, null)); + final OptionSet options = command.getParser().parse(new String[0]); + + final MockTerminal terminal = new MockTerminal(); + + final UserException userException = expectThrows(UserException.class, () -> command.buildEntityDescriptor(terminal, options, env)); + assertThat(userException.getMessage(), containsString("multiple SAML realms")); + assertThat(terminal.getOutput(), containsString("saml_a")); + assertThat(terminal.getOutput(), containsString("saml_b")); + assertThat(terminal.getOutput(), containsString("Use the -realm option")); + } + + public void testSpecifyRealmNameAsParameter() throws Exception { + final Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.PREFIX + "saml_a.type", "saml") + .put(RealmSettings.PREFIX + "saml_a.sp.entity_id", "https://saml.a/") + .put(RealmSettings.PREFIX + "saml_a.sp.acs", "https://saml.a/acs") + .put(RealmSettings.PREFIX + "saml_b.type", "saml") + .put(RealmSettings.PREFIX + "saml_b.sp.entity_id", "https://saml.b/") + .put(RealmSettings.PREFIX + "saml_b.sp.acs", "https://saml.b/acs") + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + + final SamlMetadataCommand command = new SamlMetadataCommand((e) -> keyStore); + final OptionSet options = command.getParser().parse(new String[] { + "-realm", "saml_b" + }); + + final MockTerminal terminal = new MockTerminal(); + final EntityDescriptor descriptor = command.buildEntityDescriptor(terminal, options, env); + + assertThat(descriptor, notNullValue()); + assertThat(descriptor.getEntityID(), equalTo("https://saml.b/")); + + assertThat(descriptor.getRoleDescriptors(), iterableWithSize(1)); + assertThat(descriptor.getRoleDescriptors().get(0), instanceOf(SPSSODescriptor.class)); + final SPSSODescriptor spDescriptor = (SPSSODescriptor) descriptor.getRoleDescriptors().get(0); + + assertThat(spDescriptor.getAssertionConsumerServices(), iterableWithSize(1)); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).getLocation(), equalTo("https://saml.b/acs")); + } + + public void testHandleAttributes() throws Exception { + final Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.PREFIX + "saml1.type", "saml") + .put(RealmSettings.PREFIX + "saml1.sp.entity_id", "https://saml.example.com/") + .put(RealmSettings.PREFIX + "saml1.sp.acs", "https://saml.example.com/") + .put(RealmSettings.PREFIX + "saml1.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1") + .put(RealmSettings.PREFIX + "saml1.attributes.name", "displayName") + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + + final SamlMetadataCommand command = new SamlMetadataCommand((e) -> randomFrom(keyStore, null)); + final OptionSet options = command.getParser().parse(new String[] { + "-attribute", "urn:oid:0.9.2342.19200300.100.1.3", + "-attribute", "groups" + }); + + final MockTerminal terminal = new MockTerminal(); + + // What is the friendly name for command line attribute "urn:oid:0.9.2342.19200300.100.1.3" [default: none] + terminal.addTextInput("mail"); + // What is the standard (urn) name for attribute "groups" (required) + terminal.addTextInput("urn:oid:1.3.6.1.4.1.5923.1.5.1.1"); + // What is the standard (urn) name for "name" attribute "displayName" (required) + terminal.addTextInput("urn:oid:2.16.840.1.113730.3.1.241"); + // What is the friendly name for "principal" "urn:oid:0.9.2342.19200300.100.1.1" [default: principal] + terminal.addTextInput("uid"); + + final EntityDescriptor descriptor = command.buildEntityDescriptor(terminal, options, env); + + assertThat(descriptor, notNullValue()); + assertThat(descriptor.getEntityID(), equalTo("https://saml.example.com/")); + + assertThat(descriptor.getRoleDescriptors(), iterableWithSize(1)); + assertThat(descriptor.getRoleDescriptors().get(0), instanceOf(SPSSODescriptor.class)); + final SPSSODescriptor spDescriptor = (SPSSODescriptor) descriptor.getRoleDescriptors().get(0); + + assertThat(spDescriptor.getAttributeConsumingServices(), iterableWithSize(1)); + final List attributes = spDescriptor.getAttributeConsumingServices().get(0).getRequestAttributes(); + assertThat(attributes, iterableWithSize(4)); + + assertThat(attributes.get(0).getFriendlyName(), equalTo("mail")); + assertThat(attributes.get(0).getName(), equalTo("urn:oid:0.9.2342.19200300.100.1.3")); + + assertThat(attributes.get(1).getFriendlyName(), equalTo("groups")); + assertThat(attributes.get(1).getName(), equalTo("urn:oid:1.3.6.1.4.1.5923.1.5.1.1")); + + assertThat(attributes.get(2).getFriendlyName(), equalTo("displayName")); + assertThat(attributes.get(2).getName(), equalTo("urn:oid:2.16.840.1.113730.3.1.241")); + + assertThat(attributes.get(3).getFriendlyName(), equalTo("uid")); + assertThat(attributes.get(3).getName(), equalTo("urn:oid:0.9.2342.19200300.100.1.1")); + } + + public void testHandleAttributesInBatchMode() throws Exception { + final Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.PREFIX + "saml1.type", "saml") + .put(RealmSettings.PREFIX + "saml1.sp.entity_id", "https://saml.example.com/") + .put(RealmSettings.PREFIX + "saml1.sp.acs", "https://saml.example.com/") + .put(RealmSettings.PREFIX + "saml1.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1") + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + + final SamlMetadataCommand command = new SamlMetadataCommand((e) -> randomFrom(keyStore, null)); + final OptionSet options = command.getParser().parse(new String[] { + "-attribute", "urn:oid:0.9.2342.19200300.100.1.3", + "-batch" + }); + + final MockTerminal terminal = new MockTerminal(); + final EntityDescriptor descriptor = command.buildEntityDescriptor(terminal, options, env); + + assertThat(descriptor, notNullValue()); + assertThat(descriptor.getEntityID(), equalTo("https://saml.example.com/")); + + assertThat(descriptor.getRoleDescriptors(), iterableWithSize(1)); + assertThat(descriptor.getRoleDescriptors().get(0), instanceOf(SPSSODescriptor.class)); + final SPSSODescriptor spDescriptor = (SPSSODescriptor) descriptor.getRoleDescriptors().get(0); + + assertThat(spDescriptor.getAttributeConsumingServices(), iterableWithSize(1)); + final List attributes = spDescriptor.getAttributeConsumingServices().get(0).getRequestAttributes(); + assertThat(attributes, iterableWithSize(2)); + + assertThat(attributes.get(0).getFriendlyName(), nullValue()); + assertThat(attributes.get(0).getName(), equalTo("urn:oid:0.9.2342.19200300.100.1.3")); + + assertThat(attributes.get(1).getFriendlyName(), equalTo("principal")); + assertThat(attributes.get(1).getName(), equalTo("urn:oid:0.9.2342.19200300.100.1.1")); + } + + public void testSigningMetadataWithPfx() throws Exception { + final Path certPath = getDataPath("saml.crt"); + final Path keyPath = getDataPath("saml.key"); + final Path p12Path = getDataPath("saml.p12"); + final SamlMetadataCommand command = new SamlMetadataCommand((e) -> randomFrom(keyStore, null)); + final OptionSet options = command.getParser().parse(new String[]{ + "-signing-bundle", p12Path.toString() + }); + + final boolean useSigningCredentials = randomBoolean(); + final Settings.Builder settingsBuilder = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.PREFIX + "my_saml.type", "saml") + .put(RealmSettings.PREFIX + "my_saml.order", 1) + .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout") + .put(RealmSettings.PREFIX + "my_saml.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1"); + if (useSigningCredentials) { + settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + } + final Settings settings = settingsBuilder.build(); + final Environment env = TestEnvironment.newEnvironment(settings); + + final MockTerminal terminal = new MockTerminal(); + + // What is the friendly name for "principal" attribute "urn:oid:0.9.2342.19200300.100.1.1" [default: principal] + terminal.addTextInput(""); + terminal.addSecretInput(""); + + final EntityDescriptor descriptor = command.buildEntityDescriptor(terminal, options, env); + command.possiblySignDescriptor(terminal, options, descriptor, env); + assertThat(descriptor, notNullValue()); + // Verify generated signature + assertThat(descriptor.getSignature(), notNullValue()); + assertThat(validateSignature(descriptor.getSignature()), equalTo(true)); + // Make sure that Signing didn't mangle the XML at all and we can still read metadata + assertThat(descriptor.getEntityID(), equalTo("https://kibana.my.corp/")); + + assertThat(descriptor.getRoleDescriptors(), iterableWithSize(1)); + assertThat(descriptor.getRoleDescriptors().get(0), instanceOf(SPSSODescriptor.class)); + final SPSSODescriptor spDescriptor = (SPSSODescriptor) descriptor.getRoleDescriptors().get(0); + + assertThat(spDescriptor.getAssertionConsumerServices(), iterableWithSize(1)); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).getLocation(), equalTo("https://kibana.my.corp/saml/login")); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).isDefault(), equalTo(true)); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).getIndex(), equalTo(1)); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).getBinding(), equalTo(SAMLConstants.SAML2_POST_BINDING_URI)); + + final RequestedAttribute uidAttribute = spDescriptor.getAttributeConsumingServices().get(0).getRequestAttributes().get(0); + assertThat(uidAttribute.getName(), equalTo("urn:oid:0.9.2342.19200300.100.1.1")); + assertThat(uidAttribute.getFriendlyName(), equalTo("principal")); + + assertThat(spDescriptor.isAuthnRequestsSigned(), equalTo(useSigningCredentials)); + assertThat(spDescriptor.getWantAssertionsSigned(), equalTo(true)); + } + + public void testSigningMetadataWithPasswordProtectedPfx() throws Exception { + final Path certPath = getDataPath("saml.crt"); + final Path keyPath = getDataPath("saml.key"); + final Path p12Path = getDataPath("saml_with_password.p12"); + final SamlMetadataCommand command = new SamlMetadataCommand((e) -> randomFrom(keyStore, null)); + final OptionSet options = command.getParser().parse(new String[]{ + "-signing-bundle", p12Path.toString(), + "-signing-key-password", "saml" + }); + + final boolean useSigningCredentials = randomBoolean(); + final Settings.Builder settingsBuilder = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.PREFIX + "my_saml.type", "saml") + .put(RealmSettings.PREFIX + "my_saml.order", 1) + .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); + if (useSigningCredentials) { + settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + } + final Settings settings = settingsBuilder.build(); + final Environment env = TestEnvironment.newEnvironment(settings); + + final MockTerminal terminal = new MockTerminal(); + + final EntityDescriptor descriptor = command.buildEntityDescriptor(terminal, options, env); + Element e = command.possiblySignDescriptor(terminal, options, descriptor, env); + String a = SamlUtils.toString(e); + assertThat(descriptor, notNullValue()); + // Verify generated signature + assertThat(descriptor.getSignature(), notNullValue()); + assertThat(validateSignature(descriptor.getSignature()), equalTo(true)); + } + + public void testErrorSigningMetadataWithWrongPassword() throws Exception { + final Path certPath = getDataPath("saml.crt"); + final Path keyPath = getDataPath("saml.key"); + final Path p12Path = getDataPath("saml_with_password.p12"); + final SamlMetadataCommand command = new SamlMetadataCommand((e) -> randomFrom(keyStore, null)); + final OptionSet options = command.getParser().parse(new String[]{ + "-signing-bundle", p12Path.toString(), + "-signing-key-password", "wrong_password" + }); + + final boolean useSigningCredentials = randomBoolean(); + final Settings.Builder settingsBuilder = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.PREFIX + "my_saml.type", "saml") + .put(RealmSettings.PREFIX + "my_saml.order", 1) + .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); + if (useSigningCredentials) { + settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + } + final Settings settings = settingsBuilder.build(); + final Environment env = TestEnvironment.newEnvironment(settings); + + final MockTerminal terminal = new MockTerminal(); + + final EntityDescriptor descriptor = command.buildEntityDescriptor(terminal, options, env); + final UserException userException = expectThrows(UserException.class, () -> command.possiblySignDescriptor(terminal, options, + descriptor, env)); + assertThat(userException.getMessage(), containsString("Unable to create metadata document")); + assertThat(terminal.getOutput(), containsString("keystore password was incorrect")); + } + + public void testSigningMetadataWithPem() throws Exception { + //Use this keypair for signing the metadata also + final Path certPath = getDataPath("saml.crt"); + final Path keyPath = getDataPath("saml.key"); + + final SamlMetadataCommand command = new SamlMetadataCommand((e) -> randomFrom(keyStore, null)); + final OptionSet options = command.getParser().parse(new String[]{ + "-signing-cert", certPath.toString(), + "-signing-key", keyPath.toString() + }); + + final boolean useSigningCredentials = randomBoolean(); + final Settings.Builder settingsBuilder = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.PREFIX + "my_saml.type", "saml") + .put(RealmSettings.PREFIX + "my_saml.order", 1) + .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); + if (useSigningCredentials) { + settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + } + final Settings settings = settingsBuilder.build(); + final Environment env = TestEnvironment.newEnvironment(settings); + + final MockTerminal terminal = new MockTerminal(); + + final EntityDescriptor descriptor = command.buildEntityDescriptor(terminal, options, env); + command.possiblySignDescriptor(terminal, options, descriptor, env); + assertThat(descriptor, notNullValue()); + // Verify generated signature + assertThat(descriptor.getSignature(), notNullValue()); + assertThat(validateSignature(descriptor.getSignature()), equalTo(true)); + } + + public void testSigningMetadataWithPasswordProtectedPem() throws Exception { + //Use same keypair for signing the metadata + final Path signingKeyPath = getDataPath("saml_with_password.key"); + + final Path certPath = getDataPath("saml.crt"); + final Path keyPath = getDataPath("saml.key"); + + final SamlMetadataCommand command = new SamlMetadataCommand((e) -> keyStore); + final OptionSet options = command.getParser().parse(new String[]{ + "-signing-cert", certPath.toString(), + "-signing-key", signingKeyPath.toString(), + "-signing-key-password", "saml" + + }); + + final boolean useSigningCredentials = randomBoolean(); + final Settings.Builder settingsBuilder = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.PREFIX + "my_saml.type", "saml") + .put(RealmSettings.PREFIX + "my_saml.order", 1) + .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); + if (useSigningCredentials) { + settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + } + final Settings settings = settingsBuilder.build(); + final Environment env = TestEnvironment.newEnvironment(settings); + + final MockTerminal terminal = new MockTerminal(); + + final EntityDescriptor descriptor = command.buildEntityDescriptor(terminal, options, env); + command.possiblySignDescriptor(terminal, options, descriptor, env); + assertThat(descriptor, notNullValue()); + // Verify generated signature + assertThat(descriptor.getSignature(), notNullValue()); + assertThat(validateSignature(descriptor.getSignature()), equalTo(true)); + } + + public void testSigningMetadataWithPasswordProtectedPemInTerminal() throws Exception { + //Use same keypair for signing the metadata + final Path signingKeyPath = getDataPath("saml_with_password.key"); + + final Path certPath = getDataPath("saml.crt"); + final Path keyPath = getDataPath("saml.key"); + + final SamlMetadataCommand command = new SamlMetadataCommand((e) -> randomFrom(keyStore, null)); + final OptionSet options = command.getParser().parse(new String[]{ + "-signing-cert", certPath.toString(), + "-signing-key", signingKeyPath.toString() + + }); + + final boolean useSigningCredentials = randomBoolean(); + final Settings.Builder settingsBuilder = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.PREFIX + "my_saml.type", "saml") + .put(RealmSettings.PREFIX + "my_saml.order", 1) + .put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout"); + if (useSigningCredentials) { + settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.certificate", certPath.toString()) + .put(RealmSettings.PREFIX + "my_saml.signing.key", keyPath.toString()); + } + final Settings settings = settingsBuilder.build(); + final Environment env = TestEnvironment.newEnvironment(settings); + + final MockTerminal terminal = new MockTerminal(); + + terminal.addSecretInput("saml"); + + final EntityDescriptor descriptor = command.buildEntityDescriptor(terminal, options, env); + command.possiblySignDescriptor(terminal, options, descriptor, env); + assertThat(descriptor, notNullValue()); + // Verify generated signature + assertThat(descriptor.getSignature(), notNullValue()); + assertThat(validateSignature(descriptor.getSignature()), equalTo(true)); + } + + public void testDefaultOptionsWithSigningAndMultipleEncryptionKeys() throws Exception { + final Path dir = createTempDir(); + + final Path ksEncryptionFile = dir.resolve("saml-encryption.p12"); + final Tuple certEncKeyPair1 = createKeyPair("RSA"); + final Tuple certEncKeyPair2 = createKeyPair("RSA"); + final KeyStore ksEncrypt = KeyStore.getInstance("PKCS12"); + ksEncrypt.load(null); + ksEncrypt.setKeyEntry(getAliasName(certEncKeyPair1), certEncKeyPair1.v2(), "key-password".toCharArray(), + new Certificate[] { certEncKeyPair1.v1() }); + ksEncrypt.setKeyEntry(getAliasName(certEncKeyPair2), certEncKeyPair2.v2(), "key-password".toCharArray(), + new Certificate[] { certEncKeyPair2.v1() }); + try (OutputStream out = Files.newOutputStream(ksEncryptionFile)) { + ksEncrypt.store(out, "ks-password".toCharArray()); + } + + final Path ksSigningFile = dir.resolve("saml-signing.p12"); + final Tuple certKeyPairSign = createKeyPair("RSA"); + final KeyStore ksSign = KeyStore.getInstance("PKCS12"); + ksSign.load(null); + ksSign.setKeyEntry(getAliasName(certKeyPairSign), certKeyPairSign.v2(), "key-password".toCharArray(), + new Certificate[] { certKeyPairSign.v1() }); + try (OutputStream out = Files.newOutputStream(ksSigningFile)) { + ksSign.store(out, "ks-password".toCharArray()); + } + + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(RealmSettings.PREFIX + "my_saml.signing.keystore.secure_password", "ks-password"); + secureSettings.setString(RealmSettings.PREFIX + "my_saml.signing.keystore.secure_key_password", "key-password"); + secureSettings.setString(RealmSettings.PREFIX + "my_saml.encryption.keystore.secure_password", "ks-password"); + secureSettings.setString(RealmSettings.PREFIX + "my_saml.encryption.keystore.secure_key_password", "key-password"); + + final SamlMetadataCommand command = new SamlMetadataCommand((e) -> keyStore); + final OptionSet options = command.getParser().parse(new String[0]); + + final boolean useSigningCredentials = randomBoolean(); + final boolean useEncryptionCredentials = randomBoolean(); + final Settings.Builder settingsBuilder = Settings.builder().put("path.home", dir).put(RealmSettings.PREFIX + "my_saml.type", "saml") + .put(RealmSettings.PREFIX + "my_saml.order", 1).put(RealmSettings.PREFIX + "my_saml.idp.entity_id", "https://okta.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.entity_id", "https://kibana.my.corp/") + .put(RealmSettings.PREFIX + "my_saml.sp.acs", "https://kibana.my.corp/saml/login") + .put(RealmSettings.PREFIX + "my_saml.sp.logout", "https://kibana.my.corp/saml/logout") + .put(RealmSettings.PREFIX + "my_saml.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1"); + settingsBuilder.setSecureSettings(secureSettings); + if (useSigningCredentials) { + settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.keystore.path", ksSigningFile.toString()); + settingsBuilder.put(RealmSettings.PREFIX + "my_saml.signing.keystore.type", "PKCS12"); + } + if (useEncryptionCredentials) { + settingsBuilder.put(RealmSettings.PREFIX + "my_saml.encryption.keystore.path", ksEncryptionFile.toString()); + settingsBuilder.put(RealmSettings.PREFIX + "my_saml.encryption.keystore.type", "PKCS12"); + } + final Settings settings = settingsBuilder.build(); + final Environment env = TestEnvironment.newEnvironment(settings); + + final MockTerminal terminal = new MockTerminal(); + + // What is the friendly name for "principal" attribute + // "urn:oid:0.9.2342.19200300.100.1.1" [default: principal] + terminal.addTextInput(""); + + final EntityDescriptor descriptor = command.buildEntityDescriptor(terminal, options, env); + + assertThat(descriptor, notNullValue()); + assertThat(descriptor.getEntityID(), equalTo("https://kibana.my.corp/")); + + assertThat(descriptor.getRoleDescriptors(), iterableWithSize(1)); + assertThat(descriptor.getRoleDescriptors().get(0), instanceOf(SPSSODescriptor.class)); + final SPSSODescriptor spDescriptor = (SPSSODescriptor) descriptor.getRoleDescriptors().get(0); + + assertThat(spDescriptor.getAssertionConsumerServices(), iterableWithSize(1)); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).getLocation(), equalTo("https://kibana.my.corp/saml/login")); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).isDefault(), equalTo(true)); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).getIndex(), equalTo(1)); + assertThat(spDescriptor.getAssertionConsumerServices().get(0).getBinding(), equalTo(SAMLConstants.SAML2_POST_BINDING_URI)); + + assertThat(spDescriptor.getAttributeConsumingServices(), iterableWithSize(1)); + assertThat(spDescriptor.getAttributeConsumingServices().get(0).isDefault(), equalTo(true)); + assertThat(spDescriptor.getAttributeConsumingServices().get(0).getIndex(), equalTo(1)); + assertThat(spDescriptor.getAttributeConsumingServices().get(0).getRequestAttributes(), iterableWithSize(1)); + final RequestedAttribute uidAttribute = spDescriptor.getAttributeConsumingServices().get(0).getRequestAttributes().get(0); + assertThat(uidAttribute.getName(), equalTo("urn:oid:0.9.2342.19200300.100.1.1")); + assertThat(uidAttribute.getFriendlyName(), equalTo("principal")); + + assertThat(spDescriptor.getSingleLogoutServices(), iterableWithSize(1)); + assertThat(spDescriptor.getSingleLogoutServices().get(0).getLocation(), equalTo("https://kibana.my.corp/saml/logout")); + assertThat(spDescriptor.getSingleLogoutServices().get(0).getBinding(), equalTo(SAMLConstants.SAML2_REDIRECT_BINDING_URI)); + + assertThat(spDescriptor.isAuthnRequestsSigned(), equalTo(useSigningCredentials)); + assertThat(spDescriptor.getWantAssertionsSigned(), equalTo(true)); + + int expectedKeyDescriptorSize = (useSigningCredentials) ? 1 : 0; + expectedKeyDescriptorSize = (useEncryptionCredentials) ? expectedKeyDescriptorSize + 2 : expectedKeyDescriptorSize; + + assertThat(spDescriptor.getKeyDescriptors(), iterableWithSize(expectedKeyDescriptorSize)); + if (expectedKeyDescriptorSize > 0) { + final Set encryptionCertificatesToMatch = new HashSet<>(); + if (useEncryptionCredentials) { + encryptionCertificatesToMatch.add(certEncKeyPair1.v1()); + encryptionCertificatesToMatch.add(certEncKeyPair2.v1()); + } + spDescriptor.getKeyDescriptors().stream().forEach((keyDesc) -> { + UsageType usageType = keyDesc.getUse(); + final List x509 = keyDesc.getKeyInfo().getX509Datas(); + assertThat(x509, iterableWithSize(1)); + assertThat(x509.get(0).getX509Certificates(), iterableWithSize(1)); + final X509Certificate xmlCert = x509.get(0).getX509Certificates().get(0); + final java.security.cert.X509Certificate javaCert; + try { + // Verify that OpenSAML things the XML representation is the same as our input + javaCert = KeyInfoSupport.getCertificate(xmlCert); + } catch (CertificateException ce) { + throw ExceptionsHelper.convertToRuntime(ce); + } + if (usageType == UsageType.SIGNING) { + assertTrue("Found UsageType as SIGNING in SP metadata when not testing for signing credentials", useSigningCredentials); + assertEquals("Signing Certificate from SP metadata does not match", certKeyPairSign.v1(), javaCert); + } else if (usageType == UsageType.ENCRYPTION) { + assertTrue(useEncryptionCredentials); + assertTrue("Encryption Certificate was not found in encryption certificates", + encryptionCertificatesToMatch.remove(javaCert)); + } else { + fail("Usage type should have been either SIGNING or ENCRYPTION"); + } + }); + if (useEncryptionCredentials) { + assertTrue("Did not find all encryption certificates in exported SP metadata", encryptionCertificatesToMatch.isEmpty()); + } + } + } + + private String getAliasName(final Tuple certKeyPair) { + return certKeyPair.v1().getSubjectX500Principal().getName().toLowerCase(Locale.US) + "-alias"; + } + + private boolean validateSignature(Signature signature) { + try { + Certificate[] certificates = CertUtils.readCertificates(Collections.singletonList(getDataPath("saml.crt").toString()), null); + PrivateKey key = CertUtils.readPrivateKey(Files.newBufferedReader(getDataPath("saml.key"), StandardCharsets.UTF_8), + ""::toCharArray); + Credential verificationCredential = new BasicX509Credential((java.security.cert.X509Certificate) certificates[0], key); + SAMLSignatureProfileValidator profileValidator = new SAMLSignatureProfileValidator(); + profileValidator.validate(signature); + SignatureValidator.validate(signature, verificationCredential); + return true; + } catch (Exception e) { + return false; + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTestHelper.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTestHelper.java new file mode 100644 index 0000000000000..beacb491cf0ed --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTestHelper.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.util.Arrays; +import java.util.Collections; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.opensaml.saml.common.xml.SAMLConstants; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.IDPSSODescriptor; +import org.opensaml.saml.saml2.metadata.SingleLogoutService; +import org.opensaml.security.x509.X509Credential; + +import static org.mockito.Mockito.mock; + +public class SamlRealmTestHelper { + + public static final String SP_ENTITY_ID = "https://sp.example.net/"; + public static final String SP_ACS_URL = SP_ENTITY_ID + "saml/acs"; + public static final String SP_LOGOUT_URL = SP_ENTITY_ID + "saml/logout"; + + public static final String IDP_ENTITY_ID = "https://idp.example.org/"; + public static final String IDP_LOGOUT_URL = IDP_ENTITY_ID + "saml/logout"; + + public static SamlRealm buildRealm(RealmConfig realmConfig, @Nullable X509Credential credential) throws Exception { + EntityDescriptor idpDescriptor = SamlUtils.buildObject(EntityDescriptor.class, EntityDescriptor.DEFAULT_ELEMENT_NAME); + final IDPSSODescriptor role = SamlUtils.buildObject(IDPSSODescriptor.class, IDPSSODescriptor.DEFAULT_ELEMENT_NAME); + final SingleLogoutService slo = SamlUtils.buildObject(SingleLogoutService.class, SingleLogoutService.DEFAULT_ELEMENT_NAME); + idpDescriptor.getRoleDescriptors().add(role); + role.getSingleLogoutServices().add(slo); + slo.setBinding(SAMLConstants.SAML2_REDIRECT_BINDING_URI); + slo.setLocation(IDP_LOGOUT_URL); + + final SpConfiguration spConfiguration = new SpConfiguration(SP_ENTITY_ID, SP_ACS_URL, SP_LOGOUT_URL, + new SigningConfiguration(Collections.singleton("*"), credential), Arrays.asList(credential)); + return new SamlRealm(realmConfig, mock(UserRoleMapper.class), mock(SamlAuthenticator.class), + mock(SamlLogoutRequestHandler.class), () -> idpDescriptor, spConfiguration); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java new file mode 100644 index 0000000000000..fdcf720bf2606 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java @@ -0,0 +1,617 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import org.bouncycastle.openssl.jcajce.JcaPEMWriter; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; +import org.elasticsearch.xpack.core.ssl.CertUtils; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.TestsSSLService; +import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.junit.Before; +import org.mockito.Mockito; +import org.opensaml.saml.common.xml.SAMLConstants; +import org.opensaml.saml.metadata.resolver.impl.AbstractReloadingMetadataResolver; +import org.opensaml.saml.saml2.core.LogoutRequest; +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.saml.saml2.core.NameIDType; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.IDPSSODescriptor; +import org.opensaml.saml.saml2.metadata.SingleLogoutService; +import org.opensaml.saml.saml2.metadata.SingleSignOnService; +import org.opensaml.security.credential.Credential; +import org.opensaml.security.x509.X509Credential; + +import javax.security.auth.x500.X500Principal; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.KeyStore; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.PrivilegedActionException; +import java.security.PublicKey; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.iterableWithSize; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; + +/** + * Basic unit tests for the SAMLRealm + */ +public class SamlRealmTests extends SamlTestCase { + + public static final String TEST_IDP_ENTITY_ID = "http://demo_josso_1.josso.dev.docker:8081/IDBUS/JOSSO-TUTORIAL/IDP1/SAML2/MD"; + private static final int METADATA_REFRESH = 3000; + + private static final String REALM_NAME = "my-saml"; + private static final String REALM_SETTINGS_PREFIX = "xpack.security.authc.realms." + REALM_NAME; + + @Before + public void initRealm() throws PrivilegedActionException { + SamlUtils.initialize(logger); + } + + public void testReadIdpMetadataFromFile() throws Exception { + final Path path = getDataPath("idp1.xml"); + Tuple config = buildConfig(path.toString()); + final ResourceWatcherService watcherService = mock(ResourceWatcherService.class); + Tuple> tuple + = SamlRealm.initializeResolver(logger, config.v1(), config.v2(), watcherService); + try { + assertIdp1MetadataParsedCorrectly(tuple.v2().get()); + } finally { + tuple.v1().destroy(); + } + } + + public void testReadIdpMetadataFromHttps() throws Exception { + final Path path = getDataPath("idp1.xml"); + final String body = new String(Files.readAllBytes(path), StandardCharsets.UTF_8); + final MockSecureSettings mockSecureSettings = new MockSecureSettings(); + mockSecureSettings.setString("xpack.ssl.keystore.secure_password", "testnode"); + final Settings settings = Settings.builder() + .put("xpack.ssl.keystore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) + .put("path.home", createTempDir()) + .setSecureSettings(mockSecureSettings) + .build(); + TestsSSLService sslService = new TestsSSLService(settings, TestEnvironment.newEnvironment(settings)); + try (MockWebServer proxyServer = new MockWebServer(sslService.sslContext(Settings.EMPTY), false)) { + proxyServer.start(); + proxyServer.enqueue(new MockResponse().setResponseCode(200).setBody(body).addHeader("Content-Type", "application/xml")); + proxyServer.enqueue(new MockResponse().setResponseCode(200).setBody(body).addHeader("Content-Type", "application/xml")); + assertEquals(0, proxyServer.requests().size()); + + Tuple config = buildConfig("https://localhost:" + proxyServer.getPort()); + final ResourceWatcherService watcherService = mock(ResourceWatcherService.class); + Tuple> tuple + = SamlRealm.initializeResolver(logger, config.v1(), config.v2(), watcherService); + + try { + final int firstRequestCount = proxyServer.requests().size(); + assertThat(firstRequestCount, greaterThanOrEqualTo(1)); + assertIdp1MetadataParsedCorrectly(tuple.v2().get()); + assertBusy(() -> assertThat(proxyServer.requests().size(), greaterThan(firstRequestCount))); + } finally { + tuple.v1().destroy(); + } + } + } + + public void testAuthenticateWithRoleMapping() throws Exception { + final boolean useNameId = randomBoolean(); + final boolean principalIsEmailAddress = randomBoolean(); + final Boolean populateUserMetadata = randomFrom(Boolean.TRUE, Boolean.FALSE, null); + final UserRoleMapper roleMapper = mock(UserRoleMapper.class); + final EntityDescriptor idp = mockIdp(); + final SpConfiguration sp = new SpConfiguration("", "https://saml/", null, null, null); + final SamlAuthenticator authenticator = mock(SamlAuthenticator.class); + final SamlLogoutRequestHandler logoutHandler = mock(SamlLogoutRequestHandler.class); + + final Settings.Builder settingsBuilder = Settings.builder() + .put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.name(), useNameId ? "nameid" : "uid") + .put(SamlRealmSettings.GROUPS_ATTRIBUTE.name(), "groups") + .put(SamlRealmSettings.MAIL_ATTRIBUTE.name(), "mail"); + if (principalIsEmailAddress) { + final boolean anchoredMatch = randomBoolean(); + settingsBuilder.put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getPattern().getKey(), + anchoredMatch ? "^([^@]+)@shield.gov$" : "^([^@]+)@"); + } + if (populateUserMetadata != null) { + settingsBuilder.put(SamlRealmSettings.POPULATE_USER_METADATA.getKey(), populateUserMetadata.booleanValue()); + } + final Settings realmSettings = settingsBuilder.build(); + + final RealmConfig config = realmConfigFromRealmSettings(realmSettings); + + final SamlRealm realm = new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp); + final SamlToken token = new SamlToken(new byte[0], Collections.singletonList("")); + + final String nameIdValue = principalIsEmailAddress ? "clint.barton@shield.gov" : "clint.barton"; + final String uidValue = principalIsEmailAddress ? "cbarton@shield.gov" : "cbarton"; + final SamlAttributes attributes = new SamlAttributes( + new SamlNameId(NameIDType.PERSISTENT, nameIdValue, idp.getEntityID(), sp.getEntityId(), null), + randomAlphaOfLength(16), + Arrays.asList( + new SamlAttributes.SamlAttribute("urn:oid:0.9.2342.19200300.100.1.1", "uid", Collections.singletonList(uidValue)), + new SamlAttributes.SamlAttribute("urn:oid:1.3.6.1.4.1.5923.1.5.1.1", "groups", Arrays.asList("avengers", "shield")), + new SamlAttributes.SamlAttribute("urn:oid:0.9.2342.19200300.100.1.3", "mail", Arrays.asList("cbarton@shield.gov")) + )); + Mockito.when(authenticator.authenticate(token)).thenReturn(attributes); + + AtomicReference userData = new AtomicReference<>(); + Mockito.doAnswer(invocation -> { + assert invocation.getArguments().length == 2; + userData.set((UserRoleMapper.UserData) invocation.getArguments()[0]); + ActionListener> listener = (ActionListener>) invocation.getArguments()[1]; + listener.onResponse(Collections.singleton("superuser")); + return null; + }).when(roleMapper).resolveRoles(any(UserRoleMapper.UserData.class), any(ActionListener.class)); + + final PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(token, future); + final AuthenticationResult result = future.get(); + assertThat(result, notNullValue()); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); + assertThat(result.getUser().principal(), equalTo(useNameId ? "clint.barton" : "cbarton")); + assertThat(result.getUser().email(), equalTo("cbarton@shield.gov")); + assertThat(result.getUser().roles(), arrayContainingInAnyOrder("superuser")); + if (populateUserMetadata == Boolean.FALSE) { + // TODO : "saml_nameid" should be null too, but the logout code requires it for now. + assertThat(result.getUser().metadata().get("saml_uid"), nullValue()); + } else { + assertThat(result.getUser().metadata().get("saml_nameid"), equalTo(nameIdValue)); + assertThat(result.getUser().metadata().get("saml_uid"), instanceOf(Iterable.class)); + assertThat((Iterable) result.getUser().metadata().get("saml_uid"), contains(uidValue)); + } + + assertThat(userData.get().getUsername(), equalTo(useNameId ? "clint.barton" : "cbarton")); + assertThat(userData.get().getGroups(), containsInAnyOrder("avengers", "shield")); + } + + public void testAttributeSelectionWithRegex() throws Exception { + final boolean useFriendlyName = randomBoolean(); + final Settings settings = Settings.builder() + .put("attributes.principal", useFriendlyName ? "mail" : "urn:oid:0.9.2342.19200300.100.1.3") + .put("attribute_patterns.principal", "^(.+)@\\w+.example.com$") + .build(); + + final RealmConfig config = realmConfigFromRealmSettings(settings); + + final SamlRealmSettings.AttributeSetting principalSetting = new SamlRealmSettings.AttributeSetting("principal"); + final SamlRealm.AttributeParser parser = SamlRealm.AttributeParser.forSetting(logger, principalSetting, config, false); + + final SamlAttributes attributes = new SamlAttributes( + new SamlNameId(NameIDType.TRANSIENT, randomAlphaOfLength(24), null, null, null), + randomAlphaOfLength(16), + Collections.singletonList(new SamlAttributes.SamlAttribute("urn:oid:0.9.2342.19200300.100.1.3", "mail", + Arrays.asList("john.smith@personal.example.net", "john.smith@corporate.example.com", "jsmith@corporate.example.com") + ))); + + final List strings = parser.getAttribute(attributes); + assertThat(strings, contains("john.smith", "jsmith")); + } + + public void testSettingPatternWithoutAttributeThrowsSettingsException() throws Exception { + final Settings realmSettings = Settings.builder() + .put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.name(), "nameid") + .put(SamlRealmSettings.NAME_ATTRIBUTE.getPattern().getKey(), "^\\s*(\\S.*\\S)\\s*$") + .build(); + final RealmConfig config = realmConfigFromRealmSettings(realmSettings); + + final UserRoleMapper roleMapper = mock(UserRoleMapper.class); + final SamlAuthenticator authenticator = mock(SamlAuthenticator.class); + final SamlLogoutRequestHandler logoutHandler = mock(SamlLogoutRequestHandler.class); + final EntityDescriptor idp = mockIdp(); + final SpConfiguration sp = new SpConfiguration("", "https://saml/", null, null, null); + + final SettingsException settingsException = expectThrows(SettingsException.class, + () -> new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp)); + assertThat(settingsException.getMessage(), containsString(REALM_SETTINGS_PREFIX + ".attribute_patterns.name")); + assertThat(settingsException.getMessage(), containsString(REALM_SETTINGS_PREFIX + ".attributes.name")); + } + + public void testMissingPrincipalSettingThrowsSettingsException() throws Exception { + final Settings realmSettings = Settings.EMPTY; + final RealmConfig config = realmConfigFromRealmSettings(realmSettings); + + final UserRoleMapper roleMapper = mock(UserRoleMapper.class); + final SamlAuthenticator authenticator = mock(SamlAuthenticator.class); + final SamlLogoutRequestHandler logoutHandler = mock(SamlLogoutRequestHandler.class); + final EntityDescriptor idp = mockIdp(); + final SpConfiguration sp = new SpConfiguration("", "https://saml/", null, null, null); + + final SettingsException settingsException = expectThrows(SettingsException.class, + () -> new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp)); + assertThat(settingsException.getMessage(), containsString(REALM_SETTINGS_PREFIX + ".attributes.principal")); + } + + public void testNonMatchingPrincipalPatternThrowsSamlException() throws Exception { + final UserRoleMapper roleMapper = mock(UserRoleMapper.class); + final EntityDescriptor idp = mockIdp(); + final SpConfiguration sp = new SpConfiguration("", "https://saml/", null, null, null); + final SamlAuthenticator authenticator = mock(SamlAuthenticator.class); + final SamlLogoutRequestHandler logoutHandler = mock(SamlLogoutRequestHandler.class); + + final Settings realmSettings = Settings.builder() + .put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute().getKey(), "mail") + .put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getPattern().getKey(), "^([^@]+)@mycorp\\.example\\.com$") + .build(); + + final RealmConfig config = realmConfigFromRealmSettings(realmSettings); + + final SamlRealm realm = new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp); + final SamlToken token = new SamlToken(new byte[0], Collections.singletonList("")); + + for (String mail : Arrays.asList("john@your-corp.example.com", "john@mycorp.example.com.example.net", "john")) { + final SamlAttributes attributes = new SamlAttributes( + new SamlNameId(NameIDType.TRANSIENT, randomAlphaOfLength(12), null, null, null), + randomAlphaOfLength(16), + Collections.singletonList( + new SamlAttributes.SamlAttribute("urn:oid:0.9.2342.19200300.100.1.3", "mail", Collections.singletonList(mail)) + )); + Mockito.when(authenticator.authenticate(token)).thenReturn(attributes); + + final PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(token, future); + final AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + assertThat(result.getMessage(), containsString("attributes.principal")); + assertThat(result.getMessage(), containsString("mail")); + assertThat(result.getMessage(), containsString("@mycorp\\.example\\.com")); + } + } + + public void testCreateCredentialFromPemFiles() throws Exception { + final Settings.Builder builder = buildSettings("http://example.com"); + final Path dir = createTempDir("encryption"); + final KeyPair pair = buildKeyPair(); + final X509Certificate cert = buildCertificate(pair); + builder.put(REALM_SETTINGS_PREFIX + ".encryption.key", writePemObject(dir, "enc.key", pair.getPrivate()).toString()); + builder.put(REALM_SETTINGS_PREFIX + ".encryption.certificate", writePemObject(dir, "enc.crt", cert).toString()); + final Settings settings = builder.build(); + final RealmConfig realmConfig = realmConfigFromGlobalSettings(settings); + final Credential credential = SamlRealm.buildEncryptionCredential(realmConfig).get(0); + + assertThat(credential, notNullValue()); + assertThat(credential.getPrivateKey(), equalTo(pair.getPrivate())); + assertThat(credential.getPublicKey(), equalTo(pair.getPublic())); + } + + public void testCreateEncryptionCredentialFromKeyStore() throws Exception { + final Path dir = createTempDir(); + final Settings.Builder builder = Settings.builder() + .put(REALM_SETTINGS_PREFIX + ".type", "saml") + .put("path.home", dir); + final Path ksFile = dir.resolve("cred.p12"); + final boolean testMultipleEncryptionKeyPair = randomBoolean(); + final Tuple certKeyPair1 = createKeyPair("RSA"); + final Tuple certKeyPair2 = createKeyPair("RSA"); + + final KeyStore ks = KeyStore.getInstance("PKCS12"); + ks.load(null); + ks.setKeyEntry(getAliasName(certKeyPair1), certKeyPair1.v2(), "key-password".toCharArray(), + new Certificate[] { certKeyPair1.v1() }); + if (testMultipleEncryptionKeyPair) { + ks.setKeyEntry(getAliasName(certKeyPair2), certKeyPair2.v2(), "key-password".toCharArray(), + new Certificate[] { certKeyPair2.v1() }); + } + try (OutputStream out = Files.newOutputStream(ksFile)) { + ks.store(out, "ks-password".toCharArray()); + } + builder.put(REALM_SETTINGS_PREFIX + ".encryption.keystore.path", ksFile.toString()); + builder.put(REALM_SETTINGS_PREFIX + ".encryption.keystore.type", "PKCS12"); + final boolean isEncryptionKeyStoreAliasSet = randomBoolean(); + if (isEncryptionKeyStoreAliasSet) { + builder.put(REALM_SETTINGS_PREFIX + ".encryption.keystore.alias", getAliasName(certKeyPair1)); + } + + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(REALM_SETTINGS_PREFIX + ".encryption.keystore.secure_password", "ks-password"); + secureSettings.setString(REALM_SETTINGS_PREFIX + ".encryption.keystore.secure_key_password", "key-password"); + builder.setSecureSettings(secureSettings); + + final Settings settings = builder.build(); + final RealmConfig realmConfig = realmConfigFromGlobalSettings(settings); + final List credentials = SamlRealm.buildEncryptionCredential(realmConfig); + + assertThat("Encryption Credentials should not be null", credentials, notNullValue()); + final int expectedCredentials = (isEncryptionKeyStoreAliasSet) ? 1 : (testMultipleEncryptionKeyPair) ? 2 : 1; + assertEquals("Expected encryption credentials size does not match", expectedCredentials, credentials.size()); + credentials.stream().forEach((credential) -> { + assertTrue("Unexpected private key in the list of encryption credentials", + Arrays.asList(new PrivateKey[] { certKeyPair1.v2(), certKeyPair2.v2() }).contains(credential.getPrivateKey())); + assertTrue("Unexpected public key in the list of encryption credentials", + Arrays.asList(new PublicKey[] { (certKeyPair1.v1()).getPublicKey(), certKeyPair2.v1().getPublicKey() }) + .contains(credential.getPublicKey())); + }); + } + + public void testCreateSigningCredentialFromKeyStoreSuccessScenarios() throws Exception { + final Path dir = createTempDir(); + final Settings.Builder builder = Settings.builder().put(REALM_SETTINGS_PREFIX + ".type", "saml").put("path.home", dir); + final Path ksFile = dir.resolve("cred.p12"); + final Tuple certKeyPair1 = createKeyPair("RSA"); + final Tuple certKeyPair2 = createKeyPair("EC"); + + final KeyStore ks = KeyStore.getInstance("PKCS12"); + ks.load(null); + ks.setKeyEntry(getAliasName(certKeyPair1), certKeyPair1.v2(), "key-password".toCharArray(), + new Certificate[] { certKeyPair1.v1() }); + ks.setKeyEntry(getAliasName(certKeyPair2), certKeyPair2.v2(), "key-password".toCharArray(), + new Certificate[] { certKeyPair2.v1() }); + try (OutputStream out = Files.newOutputStream(ksFile)) { + ks.store(out, "ks-password".toCharArray()); + } + builder.put(REALM_SETTINGS_PREFIX + ".signing.keystore.path", ksFile.toString()); + builder.put(REALM_SETTINGS_PREFIX + ".signing.keystore.type", "PKCS12"); + final boolean isSigningKeyStoreAliasSet = randomBoolean(); + if (isSigningKeyStoreAliasSet) { + builder.put(REALM_SETTINGS_PREFIX + ".signing.keystore.alias", getAliasName(certKeyPair1)); + } + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(REALM_SETTINGS_PREFIX + ".signing.keystore.secure_password", "ks-password"); + secureSettings.setString(REALM_SETTINGS_PREFIX + ".signing.keystore.secure_key_password", "key-password"); + builder.setSecureSettings(secureSettings); + + final Settings settings = builder.build(); + final RealmConfig realmConfig = realmConfigFromGlobalSettings(settings); + + // Should build signing credential and use the key from KS. + final SigningConfiguration signingConfig = SamlRealm.buildSigningConfiguration(realmConfig); + final Credential credential = signingConfig.getCredential(); + assertThat(credential, notNullValue()); + assertThat(credential.getPrivateKey(), equalTo(certKeyPair1.v2())); + assertThat(credential.getPublicKey(), equalTo(certKeyPair1.v1().getPublicKey())); + } + + public void testCreateSigningCredentialFromKeyStoreFailureScenarios() throws Exception { + final Path dir = createTempDir(); + final Settings.Builder builder = Settings.builder().put(REALM_SETTINGS_PREFIX + ".type", "saml").put("path.home", dir); + final Path ksFile = dir.resolve("cred.p12"); + final Tuple certKeyPair1 = createKeyPair("RSA"); + final Tuple certKeyPair2 = createKeyPair("RSA"); + final Tuple certKeyPair3 = createKeyPair("EC"); + + final KeyStore ks = KeyStore.getInstance("PKCS12"); + ks.load(null); + final boolean noRSAKeysInKS = randomBoolean(); + if (noRSAKeysInKS == false) { + ks.setKeyEntry(getAliasName(certKeyPair1), certKeyPair1.v2(), "key-password".toCharArray(), + new Certificate[] { certKeyPair1.v1() }); + ks.setKeyEntry(getAliasName(certKeyPair2), certKeyPair2.v2(), "key-password".toCharArray(), + new Certificate[] { certKeyPair2.v1() }); + } + ks.setKeyEntry(getAliasName(certKeyPair3), certKeyPair3.v2(), "key-password".toCharArray(), + new Certificate[] { certKeyPair3.v1() }); + try (OutputStream out = Files.newOutputStream(ksFile)) { + ks.store(out, "ks-password".toCharArray()); + } + + builder.put(REALM_SETTINGS_PREFIX + ".signing.keystore.path", ksFile.toString()); + builder.put(REALM_SETTINGS_PREFIX + ".signing.keystore.type", "PKCS12"); + final boolean isSigningKeyStoreAliasSet = randomBoolean(); + final Tuple chosenAliasCertKeyPair; + final String unknownAlias = randomAlphaOfLength(5); + if (isSigningKeyStoreAliasSet) { + chosenAliasCertKeyPair = randomFrom(Arrays.asList(certKeyPair3, null)); + if (chosenAliasCertKeyPair == null) { + // Unknown alias + builder.put(REALM_SETTINGS_PREFIX + ".signing.keystore.alias", unknownAlias); + } else { + builder.put(REALM_SETTINGS_PREFIX + ".signing.keystore.alias", getAliasName(chosenAliasCertKeyPair)); + } + } else { + chosenAliasCertKeyPair = null; + } + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(REALM_SETTINGS_PREFIX + ".signing.keystore.secure_password", "ks-password"); + secureSettings.setString(REALM_SETTINGS_PREFIX + ".signing.keystore.secure_key_password", "key-password"); + builder.setSecureSettings(secureSettings); + + final Settings settings = builder.build(); + final RealmConfig realmConfig = realmConfigFromGlobalSettings(settings); + + if (isSigningKeyStoreAliasSet) { + if (chosenAliasCertKeyPair == null) { + // Unknown alias, this must throw exception + final IllegalArgumentException illegalArgumentException = + expectThrows(IllegalArgumentException.class, () -> SamlRealm.buildSigningConfiguration(realmConfig)); + final String expectedErrorMessage = "The configured key store for " + + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_SETTINGS.getPrefix()) + + " does not have a key associated with alias [" + unknownAlias + "] " + "(from setting " + + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_KEY_ALIAS) + ")"; + assertEquals(expectedErrorMessage, illegalArgumentException.getLocalizedMessage()); + } else { + final String chosenAliasName = getAliasName(chosenAliasCertKeyPair); + // Since this is unsupported key type, this must throw exception + final IllegalArgumentException illegalArgumentException = + expectThrows(IllegalArgumentException.class, () -> SamlRealm.buildSigningConfiguration(realmConfig)); + final String expectedErrorMessage = "The key associated with alias [" + chosenAliasName + "] " + "(from setting " + + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_KEY_ALIAS) + + ") uses unsupported key algorithm type [" + chosenAliasCertKeyPair.v2().getAlgorithm() + + "], only RSA is supported"; + assertEquals(expectedErrorMessage, illegalArgumentException.getLocalizedMessage()); + } + } else { + if (noRSAKeysInKS) { + // Should throw exception as no RSA keys in the keystore + final IllegalArgumentException illegalArgumentException = + expectThrows(IllegalArgumentException.class, () -> SamlRealm.buildSigningConfiguration(realmConfig)); + final String expectedErrorMessage = "The configured key store for " + + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_SETTINGS.getPrefix()) + + " does not contain any RSA key pairs"; + assertEquals(expectedErrorMessage, illegalArgumentException.getLocalizedMessage()); + } else { + // Should throw exception when multiple signing keys found and alias not set + final IllegalArgumentException illegalArgumentException = + expectThrows(IllegalArgumentException.class, () -> SamlRealm.buildSigningConfiguration(realmConfig)); + final String expectedErrorMessage = "The configured key store for " + + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_SETTINGS.getPrefix()) + + " has multiple keys but no alias has been specified (from setting " + + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_KEY_ALIAS) + ")"; + assertEquals(expectedErrorMessage, illegalArgumentException.getLocalizedMessage()); + } + } + } + + private String getAliasName(final Tuple certKeyPair) { + return certKeyPair.v1().getSubjectX500Principal().getName().toLowerCase(Locale.US) + "-alias"; + } + + public void testBuildLogoutRequest() throws Exception { + final Boolean useSingleLogout = randomFrom(true, false, null); + final UserRoleMapper roleMapper = mock(UserRoleMapper.class); + final EntityDescriptor idp = mockIdp(); + final IDPSSODescriptor role = mock(IDPSSODescriptor.class); + final SingleLogoutService slo = SamlUtils.buildObject(SingleLogoutService.class, SingleLogoutService.DEFAULT_ELEMENT_NAME); + Mockito.when(idp.getRoleDescriptors(IDPSSODescriptor.DEFAULT_ELEMENT_NAME)).thenReturn(Collections.singletonList(role)); + Mockito.when(role.getSingleLogoutServices()).thenReturn(Collections.singletonList(slo)); + slo.setBinding(SAMLConstants.SAML2_REDIRECT_BINDING_URI); + slo.setLocation("https://logout.saml/"); + + final SpConfiguration sp = new SpConfiguration("", "https://saml/", null, null, null); + final SamlAuthenticator authenticator = mock(SamlAuthenticator.class); + final SamlLogoutRequestHandler logoutHandler = mock(SamlLogoutRequestHandler.class); + + final Settings.Builder realmSettings = Settings.builder() + .put(SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute().getKey(), "uid"); + if (useSingleLogout != null) { + realmSettings.put(SamlRealmSettings.IDP_SINGLE_LOGOUT.getKey(), useSingleLogout.booleanValue()); + } + + final RealmConfig config = realmConfigFromRealmSettings(realmSettings.build()); + + final SamlRealm realm = new SamlRealm(config, roleMapper, authenticator, logoutHandler, () -> idp, sp); + + final NameID nameId = SamlUtils.buildObject(NameID.class, NameID.DEFAULT_ELEMENT_NAME); + nameId.setFormat(NameID.TRANSIENT); + nameId.setValue(SamlUtils.generateSecureNCName(18)); + final String session = SamlUtils.generateSecureNCName(12); + + final LogoutRequest request = realm.buildLogoutRequest(nameId, session); + if (Boolean.FALSE.equals(useSingleLogout)) { + assertThat(request, nullValue()); + } else { + assertThat(request, notNullValue()); + assertThat(request.getDestination(), equalTo("https://logout.saml/")); + assertThat(request.getNameID(), equalTo(nameId)); + assertThat(request.getSessionIndexes(), iterableWithSize(1)); + assertThat(request.getSessionIndexes().get(0).getSessionIndex(), equalTo(session)); + } + } + + private EntityDescriptor mockIdp() { + final EntityDescriptor descriptor = mock(EntityDescriptor.class); + Mockito.when(descriptor.getEntityID()).thenReturn("https://idp.saml/"); + return descriptor; + } + + private X509Certificate buildCertificate(KeyPair pair) throws Exception { + return CertUtils.generateSignedCertificate(new X500Principal("CN=idp"), null, pair, null, null, 30); + } + + private KeyPair buildKeyPair() throws NoSuchAlgorithmException { + KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA"); + keyPairGenerator.initialize(2048); + return keyPairGenerator.generateKeyPair(); + } + + + private Path writePemObject(Path dir, String name, Object object) throws IOException { + final Path path = dir.resolve(name); + final JcaPEMWriter writer = new JcaPEMWriter(Files.newBufferedWriter(path, StandardCharsets.US_ASCII)); + writer.writeObject(object); + writer.close(); + return path; + } + + private Tuple buildConfig(String path) throws Exception { + Settings globalSettings = buildSettings(path).build(); + final Environment env = TestEnvironment.newEnvironment(globalSettings); + final RealmConfig config = realmConfigFromGlobalSettings(globalSettings); + final SSLService sslService = new SSLService(globalSettings, env); + return new Tuple<>(config, sslService); + } + + private Settings.Builder buildSettings(String idpMetaDataPath) { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(REALM_SETTINGS_PREFIX + ".ssl.keystore.secure_password", "testnode"); + return Settings.builder() + .put(REALM_SETTINGS_PREFIX + ".ssl.verification_mode", "certificate") + .put(REALM_SETTINGS_PREFIX + ".ssl.keystore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) + .put(REALM_SETTINGS_PREFIX + ".type", "saml") + .put(REALM_SETTINGS_PREFIX + "." + SamlRealmSettings.IDP_METADATA_PATH.getKey(), idpMetaDataPath) + .put(REALM_SETTINGS_PREFIX + "." + SamlRealmSettings.IDP_ENTITY_ID.getKey(), TEST_IDP_ENTITY_ID) + .put(REALM_SETTINGS_PREFIX + "." + SamlRealmSettings.IDP_METADATA_HTTP_REFRESH.getKey(), METADATA_REFRESH + "ms") + .put("path.home", createTempDir()) + .setSecureSettings(secureSettings); + } + + private RealmConfig realmConfigFromRealmSettings(Settings realmSettings) { + final Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + final Environment env = TestEnvironment.newEnvironment(globalSettings); + return new RealmConfig(REALM_NAME, realmSettings, globalSettings, env, new ThreadContext(globalSettings)); + } + + private RealmConfig realmConfigFromGlobalSettings(Settings globalSettings) { + final Settings realmSettings = globalSettings.getByPrefix(REALM_SETTINGS_PREFIX + "."); + final Environment env = TestEnvironment.newEnvironment(globalSettings); + return new RealmConfig(REALM_NAME, realmSettings, globalSettings, env, new ThreadContext(globalSettings)); + } + + private void assertIdp1MetadataParsedCorrectly(EntityDescriptor descriptor) { + IDPSSODescriptor idpssoDescriptor = descriptor.getIDPSSODescriptor(SAMLConstants.SAML20P_NS); + assertNotNull(idpssoDescriptor); + List ssoServices = idpssoDescriptor.getSingleSignOnServices(); + assertEquals(2, ssoServices.size()); + assertEquals(SAMLConstants.SAML2_POST_BINDING_URI, ssoServices.get(0).getBinding()); + assertEquals(SAMLConstants.SAML2_REDIRECT_BINDING_URI, ssoServices.get(1).getBinding()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRedirectTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRedirectTests.java new file mode 100644 index 0000000000000..d7b2249d14eef --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRedirectTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.opensaml.saml.saml2.core.Issuer; +import org.opensaml.saml.saml2.core.LogoutRequest; +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.security.x509.X509Credential; + +import static java.util.Collections.emptySet; +import static java.util.Collections.singleton; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.startsWith; + +public class SamlRedirectTests extends SamlTestCase { + + private static final String IDP_ENTITY_ID = "https://idp.test/"; + private static final String LOGOUT_URL = "https://idp.test/saml/logout"; + + private static final SigningConfiguration NO_SIGNING = new SigningConfiguration(emptySet(), null); + + public void testRedirectUrlWithoutRelayStateOrSigning() { + final SamlRedirect redirect = new SamlRedirect(buildLogoutRequest(LOGOUT_URL), NO_SIGNING); + final String url = redirect.getRedirectUrl(); + assertThat(url, equalTo(LOGOUT_URL + "?SAMLRequest=nZFBa4QwFIT%2FSnh3Naa2ax%2FqsiAFYdtDu91DLyVo2AY0cX2x9Oc36gpLCz30mAwz3" + + "wwv2351LftUA2lrcohDDkyZ2jbanHJ4PTwEKWyLjGTXih739mRH96zOoyLHvNMQLlIO42DQStKERnaK0NX4snvcowg59oN1trYtsNIbtZFupn04" + + "1xNGkW760HkhmrKidoYAq8oc3nUTi5vk9m6T3vsfolFVhpw0LgfB4zTgcRAnByEw2SDnIef8DdhxnePZcCmPs3m4Lv13Z0mkhqknFL96ZtF15kp" + + "48hlV%2BS%2FCJAbL0sBP5StgiSwuzx8HKL4B")); + } + + public void testRedirectUrlWithRelayStateAndSigning() throws Exception { + final SigningConfiguration signing = + new SigningConfiguration(singleton("*"), (X509Credential) buildOpenSamlCredential(createKeyPair()).get(0)); + final SamlRedirect redirect = new SamlRedirect(buildLogoutRequest(LOGOUT_URL), signing); + final String url = redirect.getRedirectUrl("hello"); + assertThat(url, startsWith(LOGOUT_URL + "?SAMLRequest=nZFBa4QwFIT%2FSnh3Naa2ax%2FqsiAFYdtDu91DLyVo2AY0cX2x9Oc36gpLC" + + "z30mAwz3wwv2351LftUA2lrcohDDkyZ2jbanHJ4PTwEKWyLjGTXih739mRH96zOoyLHvNMQLlIO42DQStKERnaK0NX4snvcowg59oN1trY" + + "tsNIbtZFupn041xNGkW760HkhmrKidoYAq8oc3nUTi5vk9m6T3vsfolFVhpw0LgfB4zTgcRAnByEw2SDnIef8DdhxnePZcCmPs3m4Lv13Z" + + "0mkhqknFL96ZtF15kp48hlV%2BS%2FCJAbL0sBP5StgiSwuzx8HKL4B" + + "&RelayState=hello" + + "&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256" + + "&Signature=")); + } + + public void testRedirectUrlWithExistingParameters() { + final SamlRedirect redirect = new SamlRedirect(buildLogoutRequest(LOGOUT_URL + "?a=xyz"), NO_SIGNING); + final String url = redirect.getRedirectUrl("foo"); + assertThat(url, equalTo(LOGOUT_URL + "?a=xyz" + + "&SAMLRequest=nZFBS8QwFIT%2FSnn3tmmsbn00LUIRCqsHXT14kdCGNdAmtS%2BV1V9v2u7CouDBYzLMzDe8vDz0XfChRtLWCE" + + "giBoEyjW212Qt42t2GGZRFTrLv%2BIBbu7eTe1DvkyIXeKchXCUB02jQStKERvaK0DX4eHO3RR4xHEbrbGM7CCpv1Ea6pe3NuYE" + + "wjnU7RM4L8ZwVd0tJKcXh8wuCuhLwqtuEX6SXV5vs2v8QTao25KRxAjhLspAlYZLuOMd0g4xFjLEXCJ5PozwBHCfgYh7P0f8ml0" + + "RqnGmh%2BEWbx%2BeZp4Z7n1FX%2F2qYxXBdGvqp7FSwRhbH548zFN8%3D" + + "&RelayState=foo")); + } + + public void testRedirectUrlWithTrailingQuestionMark() { + final SamlRedirect redirect = new SamlRedirect(buildLogoutRequest(LOGOUT_URL + "?"), NO_SIGNING); + final String url = redirect.getRedirectUrl(); + assertThat(url, equalTo(LOGOUT_URL + "?SAMLRequest=nZFPS8QwFMS%2FSnj3tmmsbn30D0IRCqsHXffgRUIb1kCb1L5U%2FPim7R" + + "YWBQ8ek2HmN8PLyq%2B%2BY59qJG1NDnHIgSnT2FabUw4vh%2FsghbLISPadGHBvT3ZyT%2BpjUuSYdxrCVcphGg1aSZrQyF4Rug" + + "af7x72KEKOw2idbWwHrPJGbaRbaO%2FODYRRpNshdF6I5qyoWyAlsLrK4U23sbhKrm926a3%2FIZpUbchJ43IQPE4DHgdxchACkx" + + "1yHnLOX4Edtz0eDuf2uJjHy9Z%2Fl5ZEapyLQvGraBZdZm6ER59RV%2F8izGKwLg38VL4B1sji%2FPxxgeIb")); + } + + private LogoutRequest buildLogoutRequest(String logoutUrl) { + final LogoutRequest logoutRequest = SamlUtils.buildObject(LogoutRequest.class, LogoutRequest.DEFAULT_ELEMENT_NAME); + logoutRequest.setDestination(logoutUrl); + logoutRequest.setIssueInstant(new DateTime(2018, 1, 14, 22, 47, DateTimeZone.UTC)); + logoutRequest.setID("_id123456789"); + final Issuer issuer = SamlUtils.buildObject(Issuer.class, Issuer.DEFAULT_ELEMENT_NAME); + issuer.setValue(IDP_ENTITY_ID); + logoutRequest.setIssuer(issuer); + final NameID nameId = SamlUtils.buildObject(NameID.class, NameID.DEFAULT_ELEMENT_NAME); + nameId.setValue("name-123456-7890"); + logoutRequest.setNameID(nameId); + return logoutRequest; + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlSpMetadataBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlSpMetadataBuilderTests.java new file mode 100644 index 0000000000000..67db2be74f135 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlSpMetadataBuilderTests.java @@ -0,0 +1,310 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import org.elasticsearch.xpack.core.ssl.CertUtils; +import org.hamcrest.Matchers; +import org.junit.Before; +import org.opensaml.saml.saml2.core.NameID; +import org.opensaml.saml.saml2.metadata.EntityDescriptor; +import org.opensaml.saml.saml2.metadata.impl.EntityDescriptorMarshaller; +import org.w3c.dom.Element; + +import java.io.ByteArrayInputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; + +public class SamlSpMetadataBuilderTests extends SamlTestCase { + + private X509Certificate certificate; + + // 1st is for signing, followed by 2 for encryption + private X509Certificate[] threeCertificates; + + @Before + public void setup() throws Exception { + SamlUtils.initialize(logger); + final Path certPath = getDataPath("saml.crt"); + final Certificate[] certs = CertUtils.readCertificates(Collections.singletonList(certPath)); + if (certs.length != 1) { + fail("Expected exactly 1 certificate in " + certPath); + } + if (certs[0] instanceof X509Certificate) { + this.certificate = (X509Certificate) certs[0]; + } else { + fail("Expected exactly X509Certificate, but was " + certs[0].getClass()); + } + + final Path threeCertsPath = getDataPath("saml-three-certs.crt"); + final Certificate[] threeCerts = CertUtils.readCertificates(Collections.singletonList(threeCertsPath)); + if (threeCerts.length != 3) { + fail("Expected exactly 3 certificate in " + certPath); + } + List notX509Certificates = Arrays.stream(threeCerts).filter((cert) -> { + return !(cert instanceof X509Certificate); + }).map(cert -> cert.getClass()).collect(Collectors.toList()); + if (notX509Certificates.isEmpty() == false) { + fail("Expected exactly X509Certificates, but found " + notX509Certificates); + } else { + this.threeCertificates = Arrays.asList(threeCerts).toArray(new X509Certificate[0]); + } + } + + public void testBuildMinimalistMetadata() throws Exception { + final EntityDescriptor descriptor = new SamlSpMetadataBuilder(Locale.getDefault(), "https://my.sp.example.net/") + .assertionConsumerServiceUrl("https://my.sp.example.net/saml/acs/post") + .build(); + + final Element element = new EntityDescriptorMarshaller().marshall(descriptor); + final String xml = SamlUtils.toString(element); + assertThat(xml, Matchers.equalTo("" + + "" + + "" + + "urn:oasis:names:tc:SAML:2.0:nameid-format:transient" + + "" + + "" + + "" + )); + assertValidXml(xml); + } + + public void testBuildFullMetadata() throws Exception { + final EntityDescriptor descriptor = new SamlSpMetadataBuilder(Locale.US, "https://kibana.apps.hydra/") + .serviceName("Hydra Kibana") + .nameIdFormat(NameID.PERSISTENT) + .withAttribute("uid", "urn:oid:0.9.2342.19200300.100.1.1") + .withAttribute("mail", "urn:oid:0.9.2342.19200300.100.1.3") + .withAttribute("groups", "urn:oid:1.3.6.1.4.1.5923.1.5.1.1") + .withAttribute(null, "urn:oid:2.16.840.1.113730.3.1.241") + .withAttribute(null, "urn:oid:1.3.6.1.4.1.5923.1.1.1.6") + .assertionConsumerServiceUrl("https://kibana.apps.hydra/saml/acs") + .singleLogoutServiceUrl("https://kibana.apps.hydra/saml/logout") + .authnRequestsSigned(true) + .signingCertificate(certificate) + .encryptionCertificates(Arrays.asList(certificate)) + .organization("Hydra", "Hydra", "https://hail.hydra/") + .withContact("administrative", "Wolfgang", "von Strucker", "baron.strucker@supreme.hydra") + .withContact("technical", "Paul", "Ebersol", "pne@tech.hydra") + .build(); + + final Element element = new EntityDescriptorMarshaller().marshall(descriptor); + final String xml = SamlUtils.toString(element); + assertThat(xml, Matchers.equalTo("" + + "" + + "" + + "" + + "" + + "MIIDWDCCAkCgAwIBAgIVANRTZaFrK+Pz19O8TZsb3HSJmAWpMA0GCSqGSIb3DQEBCwUAMB0xGzAZ" + System.lineSeparator() + + "BgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FNTDAeFw0xNzExMjkwMjQ3MjZaFw0yMDExMjgwMjQ3MjZa" + System.lineSeparator() + + "MB0xGzAZBgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FNTDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC" + System.lineSeparator() + + "AQoCggEBALHTuPGOieCbD2mZUdYrdH4ofo7qFze6rQUROCLKqf69uBuwvraNWOcwxHUTKVlLMV3d" + System.lineSeparator() + + "dKzYo+yfC44AMXrrV+79xVWsTCNHu9sxQzcDwiEx2OtOOX9MAk6tJQ3svNrMPNXWh8ftwmmY9XdF" + System.lineSeparator() + + "ZwMYUdo6FPjSQj5uQTDmGWRgF08f7VRlk6N92d/fzn9DlDm+TFuaOr17OTSR4B6RTrNwKC29AmXQ" + System.lineSeparator() + + "TwCijCObjLqyMEqP20dZCQeVf2qw8JKUHhW4r6mCLzqmeR+kRTqiHMSWxJddzxDGw6X7fOS7iuzB" + System.lineSeparator() + + "0+TnsKwgu8nYrEXds9MkGf1Yco7WsM43g+Es+LhNHP+es70CAwEAAaOBjjCBizAdBgNVHQ4EFgQU" + System.lineSeparator() + + "ILqVKGhIi8p5Xffsow/IKFLhRbIwWQYDVR0jBFIwUIAUILqVKGhIi8p5Xffsow/IKFLhRbKhIaQf" + System.lineSeparator() + + "MB0xGzAZBgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FNTIIVANRTZaFrK+Pz19O8TZsb3HSJmAWpMA8G" + System.lineSeparator() + + "A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGhl4V9mp4SWSV2E3HAJ1PX+Vmp6k27K" + System.lineSeparator() + + "d0tkOk1B9fyA13QB30teyiL7RR0vSHRyWFY8rQH1mHD366GKRWLITRG/QPULamGdYXX4h0pFj5ld" + System.lineSeparator() + + "aubLxM/O9vEAxOgmo/lsdkeIq9tLBqY06r/5A/Mcgo63KGi00AFYBoyvqfOu6nRLPnQr+rKVfdNO" + System.lineSeparator() + + "pWeIiFY1i2XTNZ3CZjNPSTwiQMUzrCxKXB9lL0vF6QL2Gj2iBhzNfXi88wf7xaR6XKY1wNuv3HLP" + System.lineSeparator() + + "sL7n+PWby7LRX188dyS1dmKfQcrKL65OssBA5NC8CAYyBiygBmWN+5kVJM5fSb0SwPSoVWrNyz+8" + System.lineSeparator() + + "IUldQE8=" + + "" + + "" + + "" + + "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" + + "" + + "" + + "Hydra Kibana" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "Hydra" + + "Hydra" + + "https://hail.hydra/" + + "" + + "" + + "Wolfgang" + + "von Strucker" + + "baron.strucker@supreme.hydra" + + "" + + "" + + "Paul" + + "Ebersol" + + "pne@tech.hydra" + + "" + + "" + )); + assertValidXml(xml); + } + + public void testBuildFullMetadataWithSigningAndTwoEncryptionCerts() throws Exception { + final EntityDescriptor descriptor = new SamlSpMetadataBuilder(Locale.US, "https://kibana.apps.hydra/") + .serviceName("Hydra Kibana") + .nameIdFormat(NameID.PERSISTENT) + .withAttribute("uid", "urn:oid:0.9.2342.19200300.100.1.1") + .withAttribute("mail", "urn:oid:0.9.2342.19200300.100.1.3") + .withAttribute("groups", "urn:oid:1.3.6.1.4.1.5923.1.5.1.1") + .withAttribute(null, "urn:oid:2.16.840.1.113730.3.1.241") + .withAttribute(null, "urn:oid:1.3.6.1.4.1.5923.1.1.1.6") + .assertionConsumerServiceUrl("https://kibana.apps.hydra/saml/acs") + .singleLogoutServiceUrl("https://kibana.apps.hydra/saml/logout") + .authnRequestsSigned(true) + .signingCertificate(threeCertificates[0]) + .encryptionCertificates(Arrays.asList(threeCertificates[1], threeCertificates[2])) + .organization("Hydra", "Hydra", "https://hail.hydra/") + .withContact("administrative", "Wolfgang", "von Strucker", "baron.strucker@supreme.hydra") + .withContact("technical", "Paul", "Ebersol", "pne@tech.hydra") + .build(); + + final Element element = new EntityDescriptorMarshaller().marshall(descriptor); + final String xml = SamlUtils.toString(element); + assertThat(xml, Matchers.equalTo("" + + "" + + "" + + "" + + "" + + "MIIDWDCCAkCgAwIBAgIVANRTZaFrK+Pz19O8TZsb3HSJmAWpMA0GCSqGSIb3DQEBCwUAMB0xGzAZ" + System.lineSeparator() + + "BgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FNTDAeFw0xNzExMjkwMjQ3MjZaFw0yMDExMjgwMjQ3MjZa" + System.lineSeparator() + + "MB0xGzAZBgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FNTDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC" + System.lineSeparator() + + "AQoCggEBALHTuPGOieCbD2mZUdYrdH4ofo7qFze6rQUROCLKqf69uBuwvraNWOcwxHUTKVlLMV3d" + System.lineSeparator() + + "dKzYo+yfC44AMXrrV+79xVWsTCNHu9sxQzcDwiEx2OtOOX9MAk6tJQ3svNrMPNXWh8ftwmmY9XdF" + System.lineSeparator() + + "ZwMYUdo6FPjSQj5uQTDmGWRgF08f7VRlk6N92d/fzn9DlDm+TFuaOr17OTSR4B6RTrNwKC29AmXQ" + System.lineSeparator() + + "TwCijCObjLqyMEqP20dZCQeVf2qw8JKUHhW4r6mCLzqmeR+kRTqiHMSWxJddzxDGw6X7fOS7iuzB" + System.lineSeparator() + + "0+TnsKwgu8nYrEXds9MkGf1Yco7WsM43g+Es+LhNHP+es70CAwEAAaOBjjCBizAdBgNVHQ4EFgQU" + System.lineSeparator() + + "ILqVKGhIi8p5Xffsow/IKFLhRbIwWQYDVR0jBFIwUIAUILqVKGhIi8p5Xffsow/IKFLhRbKhIaQf" + System.lineSeparator() + + "MB0xGzAZBgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FNTIIVANRTZaFrK+Pz19O8TZsb3HSJmAWpMA8G" + System.lineSeparator() + + "A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGhl4V9mp4SWSV2E3HAJ1PX+Vmp6k27K" + System.lineSeparator() + + "d0tkOk1B9fyA13QB30teyiL7RR0vSHRyWFY8rQH1mHD366GKRWLITRG/QPULamGdYXX4h0pFj5ld" + System.lineSeparator() + + "aubLxM/O9vEAxOgmo/lsdkeIq9tLBqY06r/5A/Mcgo63KGi00AFYBoyvqfOu6nRLPnQr+rKVfdNO" + System.lineSeparator() + + "pWeIiFY1i2XTNZ3CZjNPSTwiQMUzrCxKXB9lL0vF6QL2Gj2iBhzNfXi88wf7xaR6XKY1wNuv3HLP" + System.lineSeparator() + + "sL7n+PWby7LRX188dyS1dmKfQcrKL65OssBA5NC8CAYyBiygBmWN+5kVJM5fSb0SwPSoVWrNyz+8" + System.lineSeparator() + + "IUldQE8=" + + "" + + "" + + "" + + "" + + "MIID0zCCArugAwIBAgIJALi5bDfjMszLMA0GCSqGSIb3DQEBCwUAMEgxDDAKBgNVBAoTA29yZzEW" + System.lineSeparator() + + "MBQGA1UECxMNZWxhc3RpY3NlYXJjaDEgMB4GA1UEAxMXRWxhc3RpY3NlYXJjaCBUZXN0IE5vZGUw" + System.lineSeparator() + + "HhcNMTUwOTIzMTg1MjU3WhcNMTkwOTIyMTg1MjU3WjBIMQwwCgYDVQQKEwNvcmcxFjAUBgNVBAsT" + System.lineSeparator() + + "DWVsYXN0aWNzZWFyY2gxIDAeBgNVBAMTF0VsYXN0aWNzZWFyY2ggVGVzdCBOb2RlMIIBIjANBgkq" + System.lineSeparator() + + "hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3rGZ1QbsW0+MuyrSLmMfDFKtLBkIFW8V0gRuurFg1PUK" + System.lineSeparator() + + "KNR1Mq2tMVwjjYETAU/UY0iKZOzjgvYPKhDTYBTte/WHR1ZK4CYVv7TQX/gtFQG/ge/c7u0sLch9" + System.lineSeparator() + + "p7fbd+/HZiLS/rBEZDIohvgUvzvnA8+OIYnw4kuxKo/5iboAIS41klMg/lATm8V71LMY68inht71" + System.lineSeparator() + + "/ZkQoAHKgcR9z4yNYvQ1WqKG8DG8KROXltll3sTrKbl5zJhn660es/1ZnR6nvwt6xnSTl/mNHMjk" + System.lineSeparator() + + "fv1bs4rJ/py3qPxicdoSIn/KyojUcgHVF38fuAy2CQTdjVG5fWj9iz+mQvLm3+qsIYQdFwIDAQAB" + System.lineSeparator() + + "o4G/MIG8MAkGA1UdEwQCMAAwHQYDVR0OBBYEFEMMWLWQi/g83PzlHYqAVnty5L7HMIGPBgNVHREE" + System.lineSeparator() + + "gYcwgYSCCWxvY2FsaG9zdIIVbG9jYWxob3N0LmxvY2FsZG9tYWluggpsb2NhbGhvc3Q0ghdsb2Nh" + System.lineSeparator() + + "bGhvc3Q0LmxvY2FsZG9tYWluNIIKbG9jYWxob3N0NoIXbG9jYWxob3N0Ni5sb2NhbGRvbWFpbjaH" + System.lineSeparator() + + "BH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggEBAMjGGXT8Nt1tbl2GkiKt" + System.lineSeparator() + + "miuGE2Ej66YuZ37WSJViaRNDVHLlg87TCcHek2rdO+6sFqQbbzEfwQ05T7xGmVu7tm54HwKMRugo" + System.lineSeparator() + + "Q3wct0bQC5wEWYN+oMDvSyO6M28mZwWb4VtR2IRyWP+ve5DHwTM9mxWa6rBlGzsQqH6YkJpZojzq" + System.lineSeparator() + + "k/mQTug+Y8aEmVoqRIPMHq9ob+S9qd5lp09+MtYpwPfTPx/NN+xMEooXWW/ARfpGhWPkg/FuCu4z" + System.lineSeparator() + + "1tFmCqHgNcWirzMm3dQpF78muE9ng6OB2MXQwL4VgnVkxmlZNHbkR2v/t8MyZJxCy4g6cTMM3S/U" + System.lineSeparator() + + "Mt5/+aIB2JAuMKyuD+A=" + + "" + + "" + + "" + + "" + + "MIID1zCCAr+gAwIBAgIJALnUl/KSS74pMA0GCSqGSIb3DQEBCwUAMEoxDDAKBgNVBAoTA29yZzEW" + System.lineSeparator() + + "MBQGA1UECxMNZWxhc3RpY3NlYXJjaDEiMCAGA1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVu" + System.lineSeparator() + + "dDAeFw0xNTA5MjMxODUyNTVaFw0xOTA5MjIxODUyNTVaMEoxDDAKBgNVBAoTA29yZzEWMBQGA1UE" + System.lineSeparator() + + "CxMNZWxhc3RpY3NlYXJjaDEiMCAGA1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVudDCCASIw" + System.lineSeparator() + + "DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMKm+P6vDAff0c6BWKGdhnYoNl9HijLIgfU3d9CQ" + System.lineSeparator() + + "cqKtwT+yUW3DPSVjIfaLmDIGj6Hl8jTHWPB7ZP4fzhrPi6m4qlRGclJMECBuNASZFiPDtEDv3mso" + System.lineSeparator() + + "eqOKQet6n7PZvgpWM7hxYZO4P1aMKJtRsFAdvBAdZUnv0spR5G4UZTHzSKmMeanIKFkLaD0XVKiL" + System.lineSeparator() + + "Qu9/z9M6roDQeAEoCJ/8JsanG8ih2ymfPHIZuNyYIOrVekHN2zU6bnVn8/PCeZSjS6h5xYw+Jl5g" + System.lineSeparator() + + "zGI/n+F5CZ+THoH8pM4pGp6xRVzpiH12gvERGwgSIDXdn/+uZZj+4lE7n2ENRSOt5KcOGG99r60C" + System.lineSeparator() + + "AwEAAaOBvzCBvDAJBgNVHRMEAjAAMB0GA1UdDgQWBBSSFhBXNp7AaNrHdlgCV0mCEzt7ajCBjwYD" + System.lineSeparator() + + "VR0RBIGHMIGEgglsb2NhbGhvc3SCFWxvY2FsaG9zdC5sb2NhbGRvbWFpboIKbG9jYWxob3N0NIIX" + System.lineSeparator() + + "bG9jYWxob3N0NC5sb2NhbGRvbWFpbjSCCmxvY2FsaG9zdDaCF2xvY2FsaG9zdDYubG9jYWxkb21h" + System.lineSeparator() + + "aW42hwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqGSIb3DQEBCwUAA4IBAQANvAkddfLxn4/B" + System.lineSeparator() + + "CY4LY/1ET3d7ZRldjFTyjjHRYJ3CYBXWVahMskLxIcFNca8YjKfXoX8mcK+NQK/dAbGHXqk76yMl" + System.lineSeparator() + + "krKjh1OQiZ1YAX5ryYerGrZ99N3E9wnbn72bW3iumoLlqmTWlHEpMI0Ql6J75BQLTgKHxCPupVA5" + System.lineSeparator() + + "sTbWkKwGjXXAi84rUlzhDJOR8jk3/7ct0iZO8Hk6AWMcNix5Wka3IDGUXuEVevYRlxgVyCxcnZWC" + System.lineSeparator() + + "7JWREpar5aIPQFkY6VCEglxwUyXbHZw5T/u6XaKKnS7gz8RiwRh68ddSQJeEHi5e4onUD7bOCJgf" + System.lineSeparator() + + "siUwdiCkDbfN9Yum8OIpmBRs" + + "" + + "" + + "" + + "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" + + "" + + "" + + "Hydra Kibana" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "Hydra" + + "Hydra" + + "https://hail.hydra/" + + "" + + "" + + "Wolfgang" + + "von Strucker" + + "baron.strucker@supreme.hydra" + + "" + + "" + + "Paul" + + "Ebersol" + + "pne@tech.hydra" + + "" + + "" + )); + assertValidXml(xml); + } + + public void testAssertionConsumerServiceIsRequired() { + final SamlSpMetadataBuilder builder = new SamlSpMetadataBuilder(Locale.US, "https://kibana.apps.hydra/"); + final IllegalStateException exception = expectThrows(IllegalStateException.class, builder::build); + assertThat(exception.getMessage(), Matchers.containsString("AssertionConsumerService URL")); + } + + public void testAttributeNameIsRequired() { + final SamlSpMetadataBuilder builder = new SamlSpMetadataBuilder(Locale.US, "https://kibana.example.net/"); + final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> builder.withAttribute("uid", "")); + assertThat(exception.getMessage(), Matchers.containsString("Attribute name")); + } + + private void assertValidXml(String xml) throws Exception { + SamlUtils.validate(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8)), SamlMetadataCommand.METADATA_SCHEMA); + } +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java new file mode 100644 index 0000000000000..616dfab8fe0be --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlTestCase.java @@ -0,0 +1,136 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ssl.CertUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.opensaml.security.credential.Credential; +import org.opensaml.security.x509.X509Credential; +import org.opensaml.security.x509.impl.X509KeyManagerX509CredentialAdapter; + +import javax.security.auth.x500.X500Principal; + +import java.io.IOException; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.UnrecoverableKeyException; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.is; + +public abstract class SamlTestCase extends ESTestCase { + + private static Locale restoreLocale; + + @BeforeClass + public static void setupSaml() throws Exception { + Logger logger = Loggers.getLogger(SamlTestCase.class); + if (isTurkishLocale()) { + // See: https://github.com/elastic/x-pack-elasticsearch/issues/2815 + logger.warn("Attempting to run SAML test on turkish-like locale, but that breaks OpenSAML. Switching to English."); + restoreLocale = Locale.getDefault(); + Locale.setDefault(Locale.ENGLISH); + } + SamlUtils.initialize(logger); + } + + private static boolean isTurkishLocale() { + return Locale.getDefault().getLanguage().equals(new Locale("tr").getLanguage()) + || Locale.getDefault().getLanguage().equals(new Locale("az").getLanguage()); + } + + @AfterClass + public static void restoreLocale() throws Exception { + if (restoreLocale != null) { + Locale.setDefault(restoreLocale); + restoreLocale = null; + } + } + + /** + * Generates signed certificate and associates with generated key pair. + * @see #createKeyPair(String) + * @return X509Certificate a signed certificate, it's PrivateKey {@link Tuple} + * @throws Exception + */ + protected static Tuple createKeyPair() throws Exception { + return createKeyPair("RSA"); + } + + /** + * Generates key pair for given algorithm and then associates with a certificate. + * For testing, for "EC" algorithm 256 key size is used, others use 2048 as default. + * @param algorithm + * @return X509Certificate a signed certificate, it's PrivateKey {@link Tuple} + * @throws Exception + */ + protected static Tuple createKeyPair(String algorithm) throws Exception { + KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance(algorithm); + final boolean useBigKeySizes = rarely(); + switch (algorithm) { + case "EC": + keyPairGenerator.initialize(randomFrom(256, 384)); + break; + case "RSA": + keyPairGenerator.initialize(randomFrom(Arrays.stream(new int[] { 1024, 2048, 4096 }).boxed() + .filter((ksize) -> (ksize <= 2048 || useBigKeySizes)).collect(Collectors.toList()))); + break; + case "DSA": + keyPairGenerator.initialize(randomFrom(Arrays.stream(new int[] { 1024, 2048, 3072 }).boxed() + .filter((ksize) -> (ksize <= 2048 || useBigKeySizes)).collect(Collectors.toList()))); + break; + default: + keyPairGenerator.initialize(randomFrom(1024, 2048)); + } + final KeyPair pair = keyPairGenerator.generateKeyPair(); + final String name = randomAlphaOfLength(8); + final X509Certificate cert = CertUtils.generateSignedCertificate(new X500Principal("CN=test-" + name), null, pair, null, null, 30); + return new Tuple<>(cert, pair.getPrivate()); + } + + protected static List buildOpenSamlCredential(final Tuple keyPair) { + try { + return Arrays.asList(new X509KeyManagerX509CredentialAdapter( + CertUtils.keyManager(new Certificate[] { keyPair.v1() }, keyPair.v2(), new char[0]), "key")); + } catch (Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } + } + + protected static List buildOpenSamlCredential(final List> keyPairs) { + final List credentials = keyPairs.stream().map((keyPair) -> { + try { + return new X509KeyManagerX509CredentialAdapter( + CertUtils.keyManager(new Certificate[] { keyPair.v1() }, keyPair.v2(), new char[0]), "key"); + } catch (Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } + }).collect(Collectors.toList()); + return credentials; + } + + protected ElasticsearchSecurityException expectSamlException(ThrowingRunnable runnable) { + final ElasticsearchSecurityException exception = expectThrows(ElasticsearchSecurityException.class, runnable); + assertThat("Exception " + exception + " should be a SAML exception", SamlUtils.isSamlException(exception), is(true)); + return exception; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlUtilsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlUtilsTests.java new file mode 100644 index 0000000000000..bce77654aa4be --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlUtilsTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import static org.elasticsearch.test.TestMatchers.matchesPattern; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class SamlUtilsTests extends SamlTestCase { + + public void testGenerateSecureNCName() { + int previousLength = 0; + for (int bytes = randomIntBetween(1, 10); bytes <= 30; bytes += 5) { + final String name = SamlUtils.generateSecureNCName(bytes); + // See: http://www.datypic.com/sc/xsd/t-xsd_NCName.html + assertThat(name, matchesPattern("^[a-zA-Z_][a-zA-Z0-9_.-]*$")); + assertThat(name.length(), greaterThanOrEqualTo(bytes)); + assertThat(name.length(), greaterThan(previousLength)); + previousLength = name.length(); + } + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SigningConfigurationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SigningConfigurationTests.java new file mode 100644 index 0000000000000..01d13601ea0bb --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SigningConfigurationTests.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import java.util.Arrays; + +import org.elasticsearch.common.util.set.Sets; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.opensaml.saml.common.SAMLObject; +import org.opensaml.saml.saml2.core.AuthnRequest; +import org.opensaml.saml.saml2.core.LogoutRequest; +import org.opensaml.security.x509.X509Credential; + +public class SigningConfigurationTests extends SamlTestCase { + + private static X509Credential credential; + + @BeforeClass + public static void setupCredential() throws Exception { + credential = (X509Credential)buildOpenSamlCredential(createKeyPair()).get(0); + } + + @AfterClass + public static void clearCredential() throws Exception { + credential = null; + } + + public void testShouldSignObject() throws Exception { + final AuthnRequest authnRequest = SamlUtils.buildObject(AuthnRequest.class, AuthnRequest.DEFAULT_ELEMENT_NAME); + final LogoutRequest logoutRequest = SamlUtils.buildObject(LogoutRequest.class, LogoutRequest.DEFAULT_ELEMENT_NAME); + + assertShouldSign(authnRequest, "AuthnRequest"); + assertShouldSign(logoutRequest, "LogoutRequest"); + assertShouldSign(authnRequest, "*"); + assertShouldSign(logoutRequest, "*"); + assertShouldSign(authnRequest, "AuthnRequest", "LogoutRequest"); + assertShouldSign(logoutRequest, "AuthnRequest", "LogoutRequest"); + + assertShouldNotSign(authnRequest, "LogoutRequest"); + assertShouldNotSign(logoutRequest, "AuthnRequest"); + assertShouldNotSign(authnRequest, new String[0]); + assertShouldNotSign(logoutRequest, new String[0]); + assertShouldNotSign(authnRequest, "foo", "bar", "baz"); + assertShouldNotSign(logoutRequest, "foo", "bar", "baz"); + } + + private void assertShouldSign(SAMLObject object, String... types) { + final SigningConfiguration signingConfiguration = getSigningConfiguration(types); + assertTrue("Configuration types " + Arrays.toString(types) + " should sign " + object, signingConfiguration.shouldSign(object)); + } + + private void assertShouldNotSign(SAMLObject object, String... types) { + final SigningConfiguration signingConfiguration = getSigningConfiguration(types); + assertFalse("Configuration types " + Arrays.toString(types) + " shouldn't sign " + object, signingConfiguration.shouldSign(object)); + } + + private SigningConfiguration getSigningConfiguration(String[] types) { + return new SigningConfiguration(Sets.newHashSet(types), credential); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/BCryptTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/BCryptTests.java new file mode 100644 index 0000000000000..40c4a8ed81646 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/BCryptTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.support.BCrypt; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.is; + +/** + * Tests for the Bcrypt implementation specifically around modifications we have made + */ +public class BCryptTests extends ESTestCase { + /* + * This test checks that the BCrypt implementation can verify passwords correctly when being invoked from multiple + * threads all the time. This attempts to simulate authentication of many clients at once (without a cache). + * + * This test can be used to reproduce the issue in https://github.com/elastic/x-plugins/issues/589, but it is not + * 100% reliable unless memory parameters are changed such as lowering the heap size to something really small like + * 16M and the test is really slow since the issue depends on garbage collection and object finalization. + */ + @AwaitsFix(bugUrl = "need a better way to test this") + public void testUnderLoad() throws Exception { + final String password = randomAlphaOfLengthBetween(10, 32); + final String bcrypt = BCrypt.hashpw(new SecureString(password), BCrypt.gensalt()); + + ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(100); + try { + List> callables = new ArrayList<>(100); + + final AtomicBoolean failed = new AtomicBoolean(false); + for (int i = 0; i < 100; i++) { + callables.add(new Callable() { + @Override + public Boolean call() throws Exception { + for (int i = 0; i < 10000 && !failed.get(); i++) { + if (BCrypt.checkpw(new SecureString(password), bcrypt) == false) { + failed.set(true); + return false; + } + } + return true; + } + }); + } + + List> futures = threadPoolExecutor.invokeAll(callables); + for (Future future : futures) { + assertThat(future.get(), is(true)); + } + } finally { + threadPoolExecutor.shutdownNow(); + } + + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java new file mode 100644 index 0000000000000..87f62cd97a198 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealmTests.java @@ -0,0 +1,547 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.BCrypt; +import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class CachingUsernamePasswordRealmTests extends ESTestCase { + + private Settings globalSettings; + + @Before + public void setup() { + globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + } + + public void testSettings() throws Exception { + String hashAlgo = randomFrom("bcrypt", "bcrypt4", "bcrypt5", "bcrypt6", "bcrypt7", "bcrypt8", "bcrypt9", + "sha1", "ssha256", "md5", "clear_text", "noop"); + int maxUsers = randomIntBetween(10, 100); + TimeValue ttl = TimeValue.timeValueMinutes(randomIntBetween(10, 20)); + Settings settings = Settings.builder() + .put(CachingUsernamePasswordRealmSettings.CACHE_HASH_ALGO_SETTING.getKey(), hashAlgo) + .put(CachingUsernamePasswordRealmSettings.CACHE_MAX_USERS_SETTING.getKey(), maxUsers) + .put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), ttl) + .build(); + + RealmConfig config = new RealmConfig("test_realm", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(Settings.EMPTY)); + CachingUsernamePasswordRealm realm = new CachingUsernamePasswordRealm("test", config) { + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + listener.onResponse(AuthenticationResult.success(new User("username", new String[]{"r1", "r2", "r3"}))); + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + listener.onFailure(new UnsupportedOperationException("this method should not be called")); + } + }; + + assertThat(realm.hasher, sameInstance(Hasher.resolve(hashAlgo))); + } + + public void testAuthCache() { + AlwaysAuthenticateCachingRealm realm = new AlwaysAuthenticateCachingRealm(globalSettings); + SecureString pass = new SecureString("pass"); + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("a", pass), future); + future.actionGet(); + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("b", pass), future); + future.actionGet(); + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("c", pass), future); + future.actionGet(); + + assertThat(realm.authInvocationCounter.intValue(), is(3)); + + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("a", pass), future); + future.actionGet(); + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("b", pass), future); + future.actionGet(); + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("c", pass), future); + future.actionGet(); + + assertThat(realm.authInvocationCounter.intValue(), is(3)); + assertThat(realm.lookupInvocationCounter.intValue(), is(0)); + } + + public void testLookupCache() { + AlwaysAuthenticateCachingRealm realm = new AlwaysAuthenticateCachingRealm(globalSettings); + PlainActionFuture future = new PlainActionFuture<>(); + realm.lookupUser("a", future); + future.actionGet(); + future = new PlainActionFuture<>(); + realm.lookupUser("b", future); + future.actionGet(); + future = new PlainActionFuture<>(); + realm.lookupUser("c", future); + future.actionGet(); + + assertThat(realm.lookupInvocationCounter.intValue(), is(3)); + future = new PlainActionFuture<>(); + realm.lookupUser("a", future); + future.actionGet(); + future = new PlainActionFuture<>(); + realm.lookupUser("b", future); + future.actionGet(); + future = new PlainActionFuture<>(); + realm.lookupUser("c", future); + future.actionGet(); + + assertThat(realm.authInvocationCounter.intValue(), is(0)); + assertThat(realm.lookupInvocationCounter.intValue(), is(3)); + } + + public void testLookupAndAuthCache() { + AlwaysAuthenticateCachingRealm realm = new AlwaysAuthenticateCachingRealm(globalSettings); + // lookup first + PlainActionFuture lookupFuture = new PlainActionFuture<>(); + realm.lookupUser("a", lookupFuture); + User lookedUp = lookupFuture.actionGet(); + assertThat(realm.lookupInvocationCounter.intValue(), is(1)); + assertThat(realm.authInvocationCounter.intValue(), is(0)); + assertThat(lookedUp.roles(), arrayContaining("lookupRole1", "lookupRole2")); + + // now authenticate + PlainActionFuture authFuture = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("a", new SecureString("pass")), authFuture); + AuthenticationResult authResult = authFuture.actionGet(); + assertThat(authResult.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + User user = authResult.getUser(); + assertThat(realm.lookupInvocationCounter.intValue(), is(1)); + assertThat(realm.authInvocationCounter.intValue(), is(1)); + assertThat(user.roles(), arrayContaining("testRole1", "testRole2")); + assertThat(user, not(sameInstance(lookedUp))); + + // authenticate a different user first + authFuture = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("b", new SecureString("pass")), authFuture); + authResult = authFuture.actionGet(); + assertThat(authResult.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + user = authResult.getUser(); + assertThat(realm.lookupInvocationCounter.intValue(), is(1)); + assertThat(realm.authInvocationCounter.intValue(), is(2)); + assertThat(user.roles(), arrayContaining("testRole1", "testRole2")); + //now lookup b + lookupFuture = new PlainActionFuture<>(); + realm.lookupUser("b", lookupFuture); + lookedUp = lookupFuture.actionGet(); + assertThat(realm.lookupInvocationCounter.intValue(), is(1)); + assertThat(realm.authInvocationCounter.intValue(), is(2)); + assertThat(user, sameInstance(lookedUp)); + } + + public void testCacheChangePassword() { + AlwaysAuthenticateCachingRealm realm = new AlwaysAuthenticateCachingRealm(globalSettings); + + String user = "testUser"; + SecureString pass1 = new SecureString("pass"); + SecureString pass2 = new SecureString("password"); + + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken(user, pass1), future); + future.actionGet(); + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken(user, pass1), future); + future.actionGet(); + + assertThat(realm.authInvocationCounter.intValue(), is(1)); + + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken(user, pass2), future); + future.actionGet(); + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken(user, pass2), future); + future.actionGet(); + + assertThat(realm.authInvocationCounter.intValue(), is(2)); + } + + public void testCacheDisabledUser() { + AlwaysAuthenticateCachingRealm realm = new AlwaysAuthenticateCachingRealm(globalSettings); + realm.setUsersEnabled(false); + + String user = "testUser"; + SecureString password = new SecureString("password"); + + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken(user, password), future); + assertThat(future.actionGet().getUser().enabled(), equalTo(false)); + + assertThat(realm.authInvocationCounter.intValue(), is(1)); + + realm.setUsersEnabled(true); + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken(user, password), future); + future.actionGet(); + assertThat(future.actionGet().getUser().enabled(), equalTo(true)); + + assertThat(realm.authInvocationCounter.intValue(), is(2)); + + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken(user, password), future); + future.actionGet(); + assertThat(future.actionGet().getUser().enabled(), equalTo(true)); + + assertThat(realm.authInvocationCounter.intValue(), is(2)); + } + + public void testCacheWithVeryLowTtlExpiresBetweenAuthenticateCalls() throws InterruptedException { + TimeValue ttl = TimeValue.timeValueNanos(randomIntBetween(10, 100)); + Settings settings = Settings.builder() + .put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), ttl) + .build(); + RealmConfig config = new RealmConfig("test_cache_ttl", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(Settings.EMPTY)); + AlwaysAuthenticateCachingRealm realm = new AlwaysAuthenticateCachingRealm(config); + + final UsernamePasswordToken authToken = new UsernamePasswordToken("the-user", new SecureString("the-password")); + + // authenticate + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(authToken, future); + final User user1 = future.actionGet().getUser(); + assertThat(user1.roles(), arrayContaining("testRole1", "testRole2")); + assertThat(realm.authInvocationCounter.intValue(), is(1)); + + Thread.sleep(2); + + // authenticate + future = new PlainActionFuture<>(); + realm.authenticate(authToken, future); + final User user2 = future.actionGet().getUser(); + assertThat(user2.roles(), arrayContaining("testRole1", "testRole2")); + assertThat(user2, not(sameInstance(user1))); + assertThat(realm.authInvocationCounter.intValue(), is(2)); + } + + public void testReadsDoNotPreventCacheExpiry() throws InterruptedException { + TimeValue ttl = TimeValue.timeValueMillis(250); + Settings settings = Settings.builder() + .put(CachingUsernamePasswordRealmSettings.CACHE_TTL_SETTING.getKey(), ttl) + .build(); + RealmConfig config = new RealmConfig("test_cache_ttl", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(Settings.EMPTY)); + AlwaysAuthenticateCachingRealm realm = new AlwaysAuthenticateCachingRealm(config); + + final UsernamePasswordToken authToken = new UsernamePasswordToken("the-user", new SecureString("the-password")); + PlainActionFuture future = new PlainActionFuture<>(); + + // authenticate + realm.authenticate(authToken, future); + final long start = System.currentTimeMillis(); + final User user1 = future.actionGet().getUser(); + assertThat(realm.authInvocationCounter.intValue(), is(1)); + + // After 100 ms (from the original start time), authenticate (read from cache). We don't care about the result + sleepUntil(start + 100); + future = new PlainActionFuture<>(); + realm.authenticate(authToken, future); + future.actionGet(); + + // After 200 ms (from the original start time), authenticate (read from cache). We don't care about the result + sleepUntil(start + 200); + future = new PlainActionFuture<>(); + realm.authenticate(authToken, future); + future.actionGet(); + + // After 300 ms (from the original start time), authenticate again. The cache entry should have expired (despite the previous reads) + sleepUntil(start + 300); + future = new PlainActionFuture<>(); + realm.authenticate(authToken, future); + final User user2 = future.actionGet().getUser(); + assertThat(user2, not(sameInstance(user1))); + // Due to slow VMs etc, the cache might have expired more than once during the test, but we can accept that. + // We have other tests that verify caching works - this test just checks that it expires even when there are repeated reads. + assertThat(realm.authInvocationCounter.intValue(), greaterThan(1)); + } + + private void sleepUntil(long until) throws InterruptedException { + final long sleep = until - System.currentTimeMillis(); + if (sleep > 0) { + Thread.sleep(sleep); + } + } + + public void testAuthenticateContract() throws Exception { + Realm realm = new FailingAuthenticationRealm(Settings.EMPTY, globalSettings); + PlainActionFuture future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user", new SecureString("pass")), future); + User user = future.actionGet().getUser(); + assertThat(user, nullValue()); + + realm = new ThrowingAuthenticationRealm(Settings.EMPTY, globalSettings); + future = new PlainActionFuture<>(); + realm.authenticate(new UsernamePasswordToken("user", new SecureString("pass")), future); + RuntimeException e = expectThrows(RuntimeException.class, future::actionGet); + assertThat(e.getMessage(), containsString("whatever exception")); + } + + public void testLookupContract() throws Exception { + Realm realm = new FailingAuthenticationRealm(Settings.EMPTY, globalSettings); + PlainActionFuture future = new PlainActionFuture<>(); + realm.lookupUser("user", future); + User user = future.actionGet(); + assertThat(user, nullValue()); + + realm = new ThrowingAuthenticationRealm(Settings.EMPTY, globalSettings); + future = new PlainActionFuture<>(); + realm.lookupUser("user", future); + RuntimeException e = expectThrows(RuntimeException.class, future::actionGet); + assertThat(e.getMessage(), containsString("lookup exception")); + } + + public void testCacheConcurrency() throws Exception { + final String username = "username"; + final SecureString password = SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; + final SecureString randomPassword = new SecureString(randomAlphaOfLength(password.length()).toCharArray()); + + final String passwordHash = new String(Hasher.BCRYPT.hash(password)); + RealmConfig config = new RealmConfig("test_realm", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(Settings.EMPTY)); + final CachingUsernamePasswordRealm realm = new CachingUsernamePasswordRealm("test", config) { + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + // do something slow + if (BCrypt.checkpw(token.credentials(), passwordHash)) { + listener.onResponse(AuthenticationResult.success(new User(username, new String[]{"r1", "r2", "r3"}))); + } else { + listener.onResponse(AuthenticationResult.unsuccessful("Incorrect password", null)); + } + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + listener.onFailure(new UnsupportedOperationException("this method should not be called")); + } + }; + + final CountDownLatch latch = new CountDownLatch(1); + final int numberOfProcessors = Runtime.getRuntime().availableProcessors(); + final int numberOfThreads = scaledRandomIntBetween((numberOfProcessors + 1) / 2, numberOfProcessors * 3); + final int numberOfIterations = scaledRandomIntBetween(20, 100); + List threads = new ArrayList<>(); + for (int i = 0; i < numberOfThreads; i++) { + final boolean invalidPassword = randomBoolean(); + threads.add(new Thread() { + @Override + public void run() { + try { + latch.await(); + for (int i = 0; i < numberOfIterations; i++) { + UsernamePasswordToken token = new UsernamePasswordToken(username, invalidPassword ? randomPassword : password); + + realm.authenticate(token, ActionListener.wrap((result) -> { + if (invalidPassword && result.isAuthenticated()) { + throw new RuntimeException("invalid password led to an authenticated user: " + result); + } else if (invalidPassword == false && result.isAuthenticated() == false) { + throw new RuntimeException("proper password led to an unauthenticated result: " + result); + } + }, (e) -> { + logger.error("caught exception", e); + fail("unexpected exception - " + e); + })); + } + + } catch (InterruptedException e) { + } + } + }); + } + + for (Thread thread : threads) { + thread.start(); + } + latch.countDown(); + for (Thread thread : threads) { + thread.join(); + } + } + + public void testUserLookupConcurrency() throws Exception { + final String username = "username"; + + RealmConfig config = new RealmConfig("test_realm", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(Settings.EMPTY)); + final CachingUsernamePasswordRealm realm = new CachingUsernamePasswordRealm("test", config) { + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + listener.onFailure(new UnsupportedOperationException("authenticate should not be called!")); + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + listener.onResponse(new User(username, new String[]{"r1", "r2", "r3"})); + } + }; + + final CountDownLatch latch = new CountDownLatch(1); + final int numberOfProcessors = Runtime.getRuntime().availableProcessors(); + final int numberOfThreads = scaledRandomIntBetween(numberOfProcessors, numberOfProcessors * 3); + final int numberOfIterations = scaledRandomIntBetween(10000, 100000); + List threads = new ArrayList<>(); + for (int i = 0; i < numberOfThreads; i++) { + threads.add(new Thread() { + @Override + public void run() { + try { + latch.await(); + for (int i = 0; i < numberOfIterations; i++) { + realm.lookupUser(username, ActionListener.wrap((user) -> { + if (user == null) { + throw new RuntimeException("failed to lookup user"); + } + }, (e) -> { + logger.error("caught exception", e); + fail("unexpected exception"); + })); + } + + } catch (InterruptedException e) { + } + } + }); + } + + for (Thread thread : threads) { + thread.start(); + } + latch.countDown(); + for (Thread thread : threads) { + thread.join(); + } + } + + static class FailingAuthenticationRealm extends CachingUsernamePasswordRealm { + + FailingAuthenticationRealm(Settings settings, Settings global) { + super("failing", new RealmConfig("failing-test", settings, global, TestEnvironment.newEnvironment(global), + new ThreadContext(Settings.EMPTY))); + } + + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + listener.onResponse(AuthenticationResult.notHandled()); + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + listener.onResponse(null); + } + } + + static class ThrowingAuthenticationRealm extends CachingUsernamePasswordRealm { + + ThrowingAuthenticationRealm(Settings settings, Settings globalSettings) { + super("throwing", new RealmConfig("throwing-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(Settings.EMPTY))); + } + + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + listener.onFailure(new RuntimeException("whatever exception")); + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + listener.onFailure(new RuntimeException("lookup exception")); + } + } + + static class AlwaysAuthenticateCachingRealm extends CachingUsernamePasswordRealm { + + public final AtomicInteger authInvocationCounter = new AtomicInteger(0); + public final AtomicInteger lookupInvocationCounter = new AtomicInteger(0); + + private boolean usersEnabled = true; + + AlwaysAuthenticateCachingRealm(Settings globalSettings) { + this(new RealmConfig("always-test", Settings.EMPTY, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(Settings.EMPTY))); + } + + AlwaysAuthenticateCachingRealm(RealmConfig config) { + super("always", config); + } + + void setUsersEnabled(boolean usersEnabled) { + this.usersEnabled = usersEnabled; + } + + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + authInvocationCounter.incrementAndGet(); + final User user = new User(token.principal(), new String[]{"testRole1", "testRole2"}, null, null, emptyMap(), usersEnabled); + listener.onResponse(AuthenticationResult.success(user)); + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + lookupInvocationCounter.incrementAndGet(); + listener.onResponse(new User(username, new String[]{"lookupRole1", "lookupRole2"})); + } + } + + static class LookupNotSupportedRealm extends CachingUsernamePasswordRealm { + + public final AtomicInteger authInvocationCounter = new AtomicInteger(0); + public final AtomicInteger lookupInvocationCounter = new AtomicInteger(0); + + LookupNotSupportedRealm(Settings globalSettings) { + super("lookup", new RealmConfig("lookup-notsupported-test", Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY))); + } + + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + authInvocationCounter.incrementAndGet(); + listener.onResponse(AuthenticationResult.success(new User(token.principal(), new String[]{"testRole1", "testRole2"}))); + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + lookupInvocationCounter.incrementAndGet(); + listener.onFailure(new UnsupportedOperationException("don't call lookup if lookup isn't supported!!!")); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java new file mode 100644 index 0000000000000..d04f0ad7f9383 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DistinguishedNamePredicateTests.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import com.unboundid.ldap.sdk.DN; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue; + +import java.util.Locale; +import java.util.function.Predicate; + +import static org.hamcrest.Matchers.equalTo; + +public class DistinguishedNamePredicateTests extends ESTestCase { + + public void testMatching() throws Exception { + String randomDn = "CN=" + randomAlphaOfLengthBetween(3, 12) + + ",OU=" + randomAlphaOfLength(4) + + ", O=" + randomAlphaOfLengthBetween(2, 6); + + // Randomly enter the DN in mixed case, lower case or upper case; + final String inputDn; + if (randomBoolean()) { + inputDn = randomBoolean() ? randomDn.toLowerCase(Locale.ENGLISH) : randomDn.toUpperCase(Locale.ENGLISH); + } else { + inputDn = randomDn; + } + final Predicate predicate = new UserRoleMapper.DistinguishedNamePredicate(inputDn); + + assertPredicate(predicate, randomDn, true); + assertPredicate(predicate, randomDn.toLowerCase(Locale.ROOT), true); + assertPredicate(predicate, randomDn.toUpperCase(Locale.ROOT), true); + assertPredicate(predicate, "/" + inputDn + "/", true); + assertPredicate(predicate, new DN(randomDn).toNormalizedString(), true); + assertPredicate(predicate, "*," + new DN(randomDn).getParent().toNormalizedString(), true); + assertPredicate(predicate, "*," + new DN(inputDn).getParent().getParent().toNormalizedString(), true); + assertPredicate(predicate, randomDn.replaceFirst(".*,", "*,"), true); + assertPredicate(predicate, randomDn.replaceFirst("[^,]*,", "*, "), true); + + assertPredicate(predicate, randomDn + ",CN=AU", false); + assertPredicate(predicate, "X" + randomDn, false); + assertPredicate(predicate, "", false); + assertPredicate(predicate, 1.23, false); + assertPredicate(predicate, true, false); + assertPredicate(predicate, null, false); + } + + public void testParsingMalformedInput() { + Predicate predicate = new UserRoleMapper.DistinguishedNamePredicate(null); + assertPredicate(predicate, null, true); + assertPredicate(predicate, "", false); + assertPredicate(predicate, randomAlphaOfLengthBetween(1, 8), false); + + predicate = new UserRoleMapper.DistinguishedNamePredicate(""); + assertPredicate(predicate, null, false); + assertPredicate(predicate, "", true); + assertPredicate(predicate, randomAlphaOfLengthBetween(1, 8), false); + + predicate = new UserRoleMapper.DistinguishedNamePredicate("foo="); + assertPredicate(predicate, null, false); + assertPredicate(predicate, "foo", false); + assertPredicate(predicate, "foo=", true); + assertPredicate(predicate, randomAlphaOfLengthBetween(5, 12), false); + + predicate = new UserRoleMapper.DistinguishedNamePredicate("=bar"); + assertPredicate(predicate, null, false); + assertPredicate(predicate, "bar", false); + assertPredicate(predicate, "=bar", true); + assertPredicate(predicate, randomAlphaOfLengthBetween(5, 12), false); + } + + private void assertPredicate(Predicate predicate, Object value, boolean expected) { + assertThat("Predicate [" + predicate + "] match [" + value + "]", predicate.test(new FieldValue(value)), equalTo(expected)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapperTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapperTests.java new file mode 100644 index 0000000000000..f6d18b7cfc106 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/DnRoleMapperTests.java @@ -0,0 +1,333 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import com.unboundid.ldap.sdk.DN; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.audit.logfile.CapturingLogger; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; +import org.junit.After; +import org.junit.Before; + +import java.io.BufferedWriter; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class DnRoleMapperTests extends ESTestCase { + + private static final String ROLE_MAPPING_FILE_SETTING = DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.getKey(); + private static final String USE_UNMAPPED_GROUPS_AS_ROLES_SETTING_KEY = + DnRoleMapperSettings.USE_UNMAPPED_GROUPS_AS_ROLES_SETTING.getKey(); + + private static final String[] STARK_GROUP_DNS = new String[] { + //groups can be named by different attributes, depending on the directory, + //we don't care what it is named by + "cn=shield,ou=marvel,o=superheros", + "cn=avengers,ou=marvel,o=superheros", + "group=genius, dc=mit, dc=edu", + "groupName = billionaire , ou = acme", + "gid = playboy , dc = example , dc = com", + "groupid=philanthropist,ou=groups,dc=unitedway,dc=org" + }; + + protected Settings settings; + protected Environment env; + protected ThreadPool threadPool; + + @Before + public void init() throws IOException { + settings = Settings.builder() + .put("resource.reload.interval.high", "100ms") + .put("path.home", createTempDir()) + .build(); + env = TestEnvironment.newEnvironment(settings); + if (Files.exists(env.configFile()) == false) { + Files.createDirectory(env.configFile()); + } + threadPool = new TestThreadPool("test"); + } + + @After + public void shutdown() throws InterruptedException { + terminate(threadPool); + } + + public void testMapper_ConfiguredWithUnreadableFile() throws Exception { + Path file = createTempFile("", ".yml"); + // writing in utf_16 should cause a parsing error as we try to read the file in utf_8 + Files.write(file, Collections.singletonList("aldlfkjldjdflkjd"), StandardCharsets.UTF_16); + + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); + DnRoleMapper mapper = createMapper(file, watcherService); + assertThat(mapper.mappingsCount(), is(0)); + } + + public void testMapper_AutoReload() throws Exception { + Path roleMappingFile = getDataPath("role_mapping.yml"); + Path file = env.configFile().resolve("test_role_mapping.yml"); + Files.copy(roleMappingFile, file, StandardCopyOption.REPLACE_EXISTING); + + final CountDownLatch latch = new CountDownLatch(1); + + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); + DnRoleMapper mapper = createMapper(file, watcherService); + mapper.addListener(latch::countDown); + + Set roles = mapper.resolveRoles("", Collections.singletonList("cn=shield,ou=marvel,o=superheros")); + assertThat(roles, notNullValue()); + assertThat(roles.size(), is(1)); + assertThat(roles, contains("security")); + + watcherService.start(); + + try (BufferedWriter writer = Files.newBufferedWriter(file, StandardCharsets.UTF_8, StandardOpenOption.APPEND)) { + writer.newLine(); + writer.append("fantastic_four:\n") + .append(" - \"cn=fantastic_four,ou=marvel,o=superheros\""); + } + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("Waited too long for the updated file to be picked up"); + } + + roles = mapper.resolveRoles("", Collections.singletonList("cn=fantastic_four,ou=marvel,o=superheros")); + assertThat(roles, notNullValue()); + assertThat(roles.size(), is(1)); + assertThat(roles, contains("fantastic_four")); + } + + public void testMapper_AutoReload_WithParseFailures() throws Exception { + Path roleMappingFile = getDataPath("role_mapping.yml"); + Path file = env.configFile().resolve("test_role_mapping.yml"); + Files.copy(roleMappingFile, file, StandardCopyOption.REPLACE_EXISTING); + + final CountDownLatch latch = new CountDownLatch(1); + + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); + DnRoleMapper mapper = createMapper(file, watcherService); + mapper.addListener(latch::countDown); + + Set roles = mapper.resolveRoles("", Collections.singletonList("cn=shield,ou=marvel,o=superheros")); + assertThat(roles, notNullValue()); + assertThat(roles.size(), is(1)); + assertThat(roles, contains("security")); + + watcherService.start(); + + // now replacing the content of the users file with something that cannot be read + Files.write(file, Collections.singletonList("aldlfkjldjdflkjd"), StandardCharsets.UTF_16); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("Waited too long for the updated file to be picked up"); + } + + assertThat(mapper.mappingsCount(), is(0)); + } + + public void testMapperAutoReloadWithoutListener() throws Exception { + Path roleMappingFile = getDataPath("role_mapping.yml"); + Path file = env.configFile().resolve("test_role_mapping.yml"); + Files.copy(roleMappingFile, file, StandardCopyOption.REPLACE_EXISTING); + + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); + DnRoleMapper mapper = createMapper(file, watcherService); + Set roles = mapper.resolveRoles("", Collections.singletonList("cn=shield,ou=marvel,o=superheros")); + assertThat(roles, notNullValue()); + assertThat(roles.size(), is(1)); + assertThat(roles, contains("security")); + + watcherService.start(); + + try (BufferedWriter writer = Files.newBufferedWriter(file, StandardCharsets.UTF_8, StandardOpenOption.APPEND)) { + writer.newLine(); + writer.append("fantastic_four:\n") + .append(" - \"cn=fantastic_four,ou=marvel,o=superheros\""); + } + + assertBusy(() -> { + Set resolvedRoles = mapper.resolveRoles("", + Collections.singletonList("cn=fantastic_four,ou=marvel,o=superheros")); + assertThat(resolvedRoles, notNullValue()); + assertThat(resolvedRoles.size(), is(1)); + assertThat(resolvedRoles, contains("fantastic_four")); + }, 2L, TimeUnit.SECONDS); + } + + public void testAddNullListener() throws Exception { + Path file = env.configFile().resolve("test_role_mapping.yml"); + Files.write(file, Collections.singleton("")); + ResourceWatcherService watcherService = new ResourceWatcherService(settings, threadPool); + DnRoleMapper mapper = createMapper(file, watcherService); + NullPointerException e = expectThrows(NullPointerException.class, () -> mapper.addListener(null)); + assertEquals("listener cannot be null", e.getMessage()); + } + + public void testParseFile() throws Exception { + Path file = getDataPath("role_mapping.yml"); + Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + Map> mappings = DnRoleMapper.parseFile(file, logger, "_type", "_name", false); + assertThat(mappings, notNullValue()); + assertThat(mappings.size(), is(3)); + + DN dn = new DN("cn=avengers,ou=marvel,o=superheros"); + assertThat(mappings, hasKey(dn)); + Set roles = mappings.get(dn); + assertThat(roles, notNullValue()); + assertThat(roles, hasSize(2)); + assertThat(roles, containsInAnyOrder("security", "avenger")); + + dn = new DN("cn=shield,ou=marvel,o=superheros"); + assertThat(mappings, hasKey(dn)); + roles = mappings.get(dn); + assertThat(roles, notNullValue()); + assertThat(roles, hasSize(1)); + assertThat(roles, contains("security")); + + dn = new DN("cn=Horatio Hornblower,ou=people,o=sevenSeas"); + assertThat(mappings, hasKey(dn)); + roles = mappings.get(dn); + assertThat(roles, notNullValue()); + assertThat(roles, hasSize(1)); + assertThat(roles, contains("avenger")); + } + + public void testParseFile_Empty() throws Exception { + Path file = createTempDir().resolve("foo.yaml"); + Files.createFile(file); + Logger logger = CapturingLogger.newCapturingLogger(Level.DEBUG); + Map> mappings = DnRoleMapper.parseFile(file, logger, "_type", "_name", false); + assertThat(mappings, notNullValue()); + assertThat(mappings.isEmpty(), is(true)); + List events = CapturingLogger.output(logger.getName(), Level.DEBUG); + assertThat(events.size(), is(1)); + assertThat(events.get(0), containsString("[0] role mappings found")); + } + + public void testParseFile_WhenFileDoesNotExist() throws Exception { + Path file = createTempDir().resolve(randomAlphaOfLength(10)); + Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + Map> mappings = DnRoleMapper.parseFile(file, logger, "_type", "_name", false); + assertThat(mappings, notNullValue()); + assertThat(mappings.isEmpty(), is(true)); + + final ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> { + DnRoleMapper.parseFile(file, logger, "_type", "_name", true); + }); + assertThat(exception.getMessage(), containsString(file.toString())); + assertThat(exception.getMessage(), containsString("does not exist")); + assertThat(exception.getMessage(), containsString("_name")); + } + + public void testParseFile_WhenCannotReadFile() throws Exception { + Path file = createTempFile("", ".yml"); + // writing in utf_16 should cause a parsing error as we try to read the file in utf_8 + Files.write(file, Collections.singletonList("aldlfkjldjdflkjd"), StandardCharsets.UTF_16); + Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + try { + DnRoleMapper.parseFile(file, logger, "_type", "_name", false); + fail("expected a parse failure"); + } catch (Exception e) { + this.logger.info("expected", e); + } + } + + public void testParseFileLenient_WhenCannotReadFile() throws Exception { + Path file = createTempFile("", ".yml"); + // writing in utf_16 should cause a parsing error as we try to read the file in utf_8 + Files.write(file, Collections.singletonList("aldlfkjldjdflkjd"), StandardCharsets.UTF_16); + Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + Map> mappings = DnRoleMapper.parseFileLenient(file, logger, "_type", "_name"); + assertThat(mappings, notNullValue()); + assertThat(mappings.isEmpty(), is(true)); + List events = CapturingLogger.output(logger.getName(), Level.ERROR); + assertThat(events.size(), is(1)); + assertThat(events.get(0), containsString("failed to parse role mappings file")); + } + + public void testYaml() throws Exception { + Path file = getDataPath("role_mapping.yml"); + Settings ldapSettings = Settings.builder() + .put(ROLE_MAPPING_FILE_SETTING, file.toAbsolutePath()) + .build(); + RealmConfig config = new RealmConfig("ldap1", ldapSettings, settings, TestEnvironment.newEnvironment(settings), + new ThreadContext(Settings.EMPTY)); + + DnRoleMapper mapper = new DnRoleMapper(config, new ResourceWatcherService(settings, threadPool)); + + Set roles = mapper.resolveRoles("", Arrays.asList(STARK_GROUP_DNS)); + + //verify + assertThat(roles, hasItems("security", "avenger")); + } + + public void testRelativeDN() { + Settings ldapSettings = Settings.builder() + .put(USE_UNMAPPED_GROUPS_AS_ROLES_SETTING_KEY, true) + .build(); + RealmConfig config = new RealmConfig("ldap1", ldapSettings, settings, TestEnvironment.newEnvironment(settings), + new ThreadContext(Settings.EMPTY)); + + DnRoleMapper mapper = new DnRoleMapper(config, new ResourceWatcherService(settings, threadPool)); + + Set roles = mapper.resolveRoles("", Arrays.asList(STARK_GROUP_DNS)); + assertThat(roles, hasItems("genius", "billionaire", "playboy", "philanthropist", "shield", "avengers")); + } + + public void testUserDNMapping() throws Exception { + Path file = getDataPath("role_mapping.yml"); + Settings ldapSettings = Settings.builder() + .put(ROLE_MAPPING_FILE_SETTING, file.toAbsolutePath()) + .put(USE_UNMAPPED_GROUPS_AS_ROLES_SETTING_KEY, false) + .build(); + RealmConfig config = new RealmConfig("ldap-userdn-role", ldapSettings, settings, TestEnvironment.newEnvironment(settings), + new ThreadContext(Settings.EMPTY)); + + DnRoleMapper mapper = new DnRoleMapper(config, new ResourceWatcherService(settings, threadPool)); + + Set roles = mapper.resolveRoles("cn=Horatio Hornblower,ou=people,o=sevenSeas", Collections.emptyList()); + assertThat(roles, hasItem("avenger")); + } + + protected DnRoleMapper createMapper(Path file, ResourceWatcherService watcherService) { + Settings realmSettings = Settings.builder() + .put("files.role_mapping", file.toAbsolutePath()) + .build(); + RealmConfig config = new RealmConfig("ad-group-mapper-test", realmSettings, settings, env, new ThreadContext(Settings.EMPTY)); + return new DnRoleMapper(config, watcherService); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java new file mode 100644 index 0000000000000..0a8e8e9ac3936 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; + +import static org.hamcrest.Matchers.sameInstance; + +public class HasherTests extends ESTestCase { + public void testBcryptFamilySelfGenerated() throws Exception { + testHasherSelfGenerated(Hasher.BCRYPT); + testHasherSelfGenerated(Hasher.BCRYPT4); + testHasherSelfGenerated(Hasher.BCRYPT5); + testHasherSelfGenerated(Hasher.BCRYPT6); + testHasherSelfGenerated(Hasher.BCRYPT7); + testHasherSelfGenerated(Hasher.BCRYPT8); + testHasherSelfGenerated(Hasher.BCRYPT9); + } + + public void testMd5SelfGenerated() throws Exception { + testHasherSelfGenerated(Hasher.MD5); + } + + public void testSha1SelfGenerated() throws Exception { + testHasherSelfGenerated(Hasher.SHA1); + } + + public void testSSHA256SelfGenerated() throws Exception { + testHasherSelfGenerated(Hasher.SSHA256); + } + + public void testNoopSelfGenerated() throws Exception { + testHasherSelfGenerated(Hasher.NOOP); + } + + public void testResolve() throws Exception { + assertThat(Hasher.resolve("bcrypt"), sameInstance(Hasher.BCRYPT)); + assertThat(Hasher.resolve("bcrypt4"), sameInstance(Hasher.BCRYPT4)); + assertThat(Hasher.resolve("bcrypt5"), sameInstance(Hasher.BCRYPT5)); + assertThat(Hasher.resolve("bcrypt6"), sameInstance(Hasher.BCRYPT6)); + assertThat(Hasher.resolve("bcrypt7"), sameInstance(Hasher.BCRYPT7)); + assertThat(Hasher.resolve("bcrypt8"), sameInstance(Hasher.BCRYPT8)); + assertThat(Hasher.resolve("bcrypt9"), sameInstance(Hasher.BCRYPT9)); + assertThat(Hasher.resolve("sha1"), sameInstance(Hasher.SHA1)); + assertThat(Hasher.resolve("md5"), sameInstance(Hasher.MD5)); + assertThat(Hasher.resolve("ssha256"), sameInstance(Hasher.SSHA256)); + assertThat(Hasher.resolve("noop"), sameInstance(Hasher.NOOP)); + assertThat(Hasher.resolve("clear_text"), sameInstance(Hasher.NOOP)); + try { + Hasher.resolve("unknown_hasher"); + fail("expected a settings error when trying to resolve an unknown hasher"); + } catch (IllegalArgumentException e) { + // expected + } + Hasher hasher = randomFrom(Hasher.values()); + assertThat(Hasher.resolve("unknown_hasher", hasher), sameInstance(hasher)); + } + + private static void testHasherSelfGenerated(Hasher hasher) throws Exception { + SecureString passwd = new SecureString(randomAlphaOfLength(10)); + char[] hash = hasher.hash(passwd); + assertTrue(hasher.verify(passwd, hash)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheckTests.java new file mode 100644 index 0000000000000..e0f71c40607ee --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheckTests.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; + +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; +import org.junit.Before; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class RoleMappingFileBootstrapCheckTests extends ESTestCase { + + private static final String ROLE_MAPPING_FILE_SETTING = DnRoleMapperSettings.ROLE_MAPPING_FILE_SETTING.getKey(); + + protected Settings settings; + + @Before + public void init() throws IOException { + settings = Settings.builder() + .put("resource.reload.interval.high", "100ms") + .put("path.home", createTempDir()) + .build(); + } + + public void testBootstrapCheckOfValidFile() { + Path file = getDataPath("role_mapping.yml"); + Settings ldapSettings = Settings.builder() + .put(ROLE_MAPPING_FILE_SETTING, file.toAbsolutePath()) + .build(); + RealmConfig config = new RealmConfig("ldap1", ldapSettings, settings, TestEnvironment.newEnvironment(settings), + new ThreadContext(Settings.EMPTY)); + final BootstrapCheck check = RoleMappingFileBootstrapCheck.create(config); + assertThat(check, notNullValue()); + assertThat(check.alwaysEnforce(), equalTo(true)); + assertFalse(check.check(new BootstrapContext(settings, null)).isFailure()); + } + + public void testBootstrapCheckOfMissingFile() { + final String fileName = randomAlphaOfLength(10); + Path file = createTempDir().resolve(fileName); + Settings ldapSettings = Settings.builder() + .put(ROLE_MAPPING_FILE_SETTING, file.toAbsolutePath()) + .build(); + RealmConfig config = new RealmConfig("the-realm-name", ldapSettings, settings, TestEnvironment.newEnvironment(settings), + new ThreadContext(Settings.EMPTY)); + final BootstrapCheck check = RoleMappingFileBootstrapCheck.create(config); + assertThat(check, notNullValue()); + assertThat(check.alwaysEnforce(), equalTo(true)); + final BootstrapCheck.BootstrapCheckResult result = check.check(new BootstrapContext(settings, null)); + assertTrue(result.isFailure()); + assertThat(result.getMessage(), containsString("the-realm-name")); + assertThat(result.getMessage(), containsString(fileName)); + assertThat(result.getMessage(), containsString("does not exist")); + } + + public void testBootstrapCheckWithInvalidYaml() throws IOException { + Path file = createTempFile("", ".yml"); + // writing in utf_16 should cause a parsing error as we try to read the file in utf_8 + Files.write(file, Collections.singletonList("junk"), StandardCharsets.UTF_16); + + Settings ldapSettings = Settings.builder() + .put(ROLE_MAPPING_FILE_SETTING, file.toAbsolutePath()) + .build(); + RealmConfig config = new RealmConfig("the-realm-name", ldapSettings, settings, TestEnvironment.newEnvironment(settings), + new ThreadContext(Settings.EMPTY)); + final BootstrapCheck check = RoleMappingFileBootstrapCheck.create(config); + assertThat(check, notNullValue()); + assertThat(check.alwaysEnforce(), equalTo(true)); + final BootstrapCheck.BootstrapCheckResult result = check.check(new BootstrapContext(settings, null)); + assertTrue(result.isFailure()); + assertThat(result.getMessage(), containsString("the-realm-name")); + assertThat(result.getMessage(), containsString(file.toString())); + assertThat(result.getMessage(), containsString("could not read")); + } + + public void testBootstrapCheckWithInvalidDn() throws IOException { + Path file = createTempFile("", ".yml"); + // A DN must have at least 1 '=' symbol + Files.write(file, Collections.singletonList("role: not-a-dn")); + + Settings ldapSettings = Settings.builder() + .put(ROLE_MAPPING_FILE_SETTING, file.toAbsolutePath()) + .build(); + RealmConfig config = new RealmConfig("the-realm-name", ldapSettings, settings, TestEnvironment.newEnvironment(settings), + new ThreadContext(Settings.EMPTY)); + final BootstrapCheck check = RoleMappingFileBootstrapCheck.create(config); + assertThat(check, notNullValue()); + assertThat(check.alwaysEnforce(), equalTo(true)); + final BootstrapCheck.BootstrapCheckResult result = check.check(new BootstrapContext(settings, null)); + assertTrue(result.isFailure()); + assertThat(result.getMessage(), containsString("the-realm-name")); + assertThat(result.getMessage(), containsString(file.toString())); + assertThat(result.getMessage(), containsString("invalid DN")); + assertThat(result.getMessage(), containsString("not-a-dn")); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/UsernamePasswordTokenTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/UsernamePasswordTokenTests.java new file mode 100644 index 0000000000000..57c452798844c --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/UsernamePasswordTokenTests.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.junit.Rule; +import org.junit.rules.ExpectedException; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +import static org.elasticsearch.test.SecurityTestsUtils.assertAuthenticationException; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class UsernamePasswordTokenTests extends ESTestCase { + @Rule + public ExpectedException thrown = ExpectedException.none(); + + public void testPutToken() throws Exception { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + UsernamePasswordToken.putTokenHeader(threadContext, new UsernamePasswordToken("user1", new SecureString("test123"))); + String header = threadContext.getHeader(UsernamePasswordToken.BASIC_AUTH_HEADER); + assertThat(header, notNullValue()); + assertTrue(header.startsWith("Basic ")); + String token = header.substring("Basic ".length()); + token = new String(Base64.getDecoder().decode(token), StandardCharsets.UTF_8); + int i = token.indexOf(":"); + assertTrue(i > 0); + String username = token.substring(0, i); + String password = token.substring(i + 1); + assertThat(username, equalTo("user1")); + assertThat(password, equalTo("test123")); + } + + public void testExtractToken() throws Exception { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + String header = "Basic " + Base64.getEncoder().encodeToString("user1:test123".getBytes(StandardCharsets.UTF_8)); + threadContext.putHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, header); + UsernamePasswordToken token = UsernamePasswordToken.extractToken(threadContext); + assertThat(token, notNullValue()); + assertThat(token.principal(), equalTo("user1")); + assertThat(new String(token.credentials().getChars()), equalTo("test123")); + } + + public void testExtractTokenInvalid() throws Exception { + String[] invalidValues = { "Basic ", "Basic f" }; + for (String value : invalidValues) { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + threadContext.putHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, value); + try { + UsernamePasswordToken.extractToken(threadContext); + fail("Expected an authentication exception for invalid basic auth token [" + value + "]"); + } catch (ElasticsearchSecurityException e) { + // expected + assertAuthenticationException(e); + } + } + } + + public void testHeaderNotMatchingReturnsNull() { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + String header = randomFrom("BasicBroken", "invalid", "Basic"); + threadContext.putHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, header); + UsernamePasswordToken extracted = UsernamePasswordToken.extractToken(threadContext); + assertThat(extracted, nullValue()); + } + + public void testExtractTokenMissing() throws Exception { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + assertThat(UsernamePasswordToken.extractToken(threadContext), nullValue()); + } + + public void testEqualsWithDifferentPasswords() { + UsernamePasswordToken token1 = new UsernamePasswordToken("username", new SecureString("password".toCharArray())); + UsernamePasswordToken token2 = new UsernamePasswordToken("username", new SecureString("new password".toCharArray())); + assertThat(token1, not(equalTo(token2))); + } + + public void testEqualsWithDifferentUsernames() { + UsernamePasswordToken token1 = new UsernamePasswordToken("username", new SecureString("password".toCharArray())); + UsernamePasswordToken token2 = new UsernamePasswordToken("username1", new SecureString("password".toCharArray())); + assertThat(token1, not(equalTo(token2))); + } + + public void testEquals() { + UsernamePasswordToken token1 = new UsernamePasswordToken("username", new SecureString("password".toCharArray())); + UsernamePasswordToken token2 = new UsernamePasswordToken("username", new SecureString("password".toCharArray())); + assertThat(token1, equalTo(token2)); + } + + public static String basicAuthHeaderValue(String username, String passwd) { + return UsernamePasswordToken.basicAuthHeaderValue(username, new SecureString(passwd.toCharArray())); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java new file mode 100644 index 0000000000000..2ac82c8d7c063 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support.mapper; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AllExpression; +import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.hamcrest.Matchers; +import org.junit.Before; +import org.mockito.Mockito; + +import java.io.IOException; +import java.util.Collections; +import java.util.Locale; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; + +public class ExpressionRoleMappingTests extends ESTestCase { + + private RealmConfig realm; + + @Before + public void setupMapping() throws Exception { + realm = new RealmConfig("ldap1", Settings.EMPTY, Settings.EMPTY, Mockito.mock(Environment.class), + new ThreadContext(Settings.EMPTY)); + } + + public void testParseValidJson() throws Exception { + String json = "{" + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"enabled\": true, " + + "\"rules\": { " + + " \"all\": [ " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " + + " { \"except\": { \"field\": { \"metadata.active\" : false } } }" + + " ]}" + + "}"; + final ExpressionRoleMapping mapping = parse(json, "ldap_sales"); + assertThat(mapping.getRoles(), Matchers.containsInAnyOrder("kibana_user", "sales")); + assertThat(mapping.getExpression(), instanceOf(AllExpression.class)); + + final UserRoleMapper.UserData user1a = new UserRoleMapper.UserData( + "john.smith", "cn=john.smith,ou=sales,dc=example,dc=com", + Collections.emptyList(), Collections.singletonMap("active", true), realm + ); + final UserRoleMapper.UserData user1b = new UserRoleMapper.UserData( + user1a.getUsername(), user1a.getDn().toUpperCase(Locale.US), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() + ); + final UserRoleMapper.UserData user1c = new UserRoleMapper.UserData( + user1a.getUsername(), user1a.getDn().replaceAll(",", ", "), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() + ); + final UserRoleMapper.UserData user1d = new UserRoleMapper.UserData( + user1a.getUsername(), user1a.getDn().replaceAll("dc=", "DC="), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() + ); + final UserRoleMapper.UserData user2 = new UserRoleMapper.UserData( + "jamie.perez", "cn=jamie.perez,ou=sales,dc=example,dc=com", + Collections.emptyList(), Collections.singletonMap("active", false), realm + ); + + final UserRoleMapper.UserData user3 = new UserRoleMapper.UserData( + "simone.ng", "cn=simone.ng,ou=finance,dc=example,dc=com", + Collections.emptyList(), Collections.singletonMap("active", true), realm + ); + + assertThat(mapping.getExpression().match(user1a.asModel()), equalTo(true)); + assertThat(mapping.getExpression().match(user1b.asModel()), equalTo(true)); + assertThat(mapping.getExpression().match(user1c.asModel()), equalTo(true)); + assertThat(mapping.getExpression().match(user1d.asModel()), equalTo(true)); + assertThat(mapping.getExpression().match(user2.asModel()), equalTo(false)); + assertThat(mapping.getExpression().match(user3.asModel()), equalTo(false)); + } + + public void testParsingFailsIfRulesAreMissing() throws Exception { + String json = "{" + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"enabled\": true " + + "}"; + ParsingException ex = expectThrows(ParsingException.class, () -> parse(json, "bad_json")); + assertThat(ex.getMessage(), containsString("rules")); + } + + public void testParsingFailsIfRolesMissing() throws Exception { + String json = "{" + + "\"enabled\": true, " + + "\"rules\": " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } } " + + "}"; + ParsingException ex = expectThrows(ParsingException.class, () -> parse(json, "bad_json")); + assertThat(ex.getMessage(), containsString("role")); + } + + public void testParsingFailsIfThereAreUnrecognisedFields() throws Exception { + String json = "{" + + "\"disabled\": false, " + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"rules\": " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } } " + + "}"; + ParsingException ex = expectThrows(ParsingException.class, () -> parse(json, "bad_json")); + assertThat(ex.getMessage(), containsString("disabled")); + } + + public void testParsingIgnoresTypeFields() throws Exception { + String json = "{" + + "\"enabled\": true, " + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"rules\": " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " + + "\"doc_type\": \"role-mapping\", " + + "\"type\": \"doc\"" + + "}"; + final ExpressionRoleMapping mapping = parse(json, "from_index"); + assertThat(mapping.isEnabled(), equalTo(true)); + assertThat(mapping.getRoles(), containsInAnyOrder("kibana_user", "sales")); + } + + private ExpressionRoleMapping parse(String json, String name) throws IOException { + final NamedXContentRegistry registry = NamedXContentRegistry.EMPTY; + final XContentParser parser = XContentType.JSON.xContent() + .createParser(registry, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + final ExpressionRoleMapping mapping = ExpressionRoleMapping.parse(name, parser); + assertThat(mapping, notNullValue()); + assertThat(mapping.getName(), equalTo(name)); + return mapping; + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java new file mode 100644 index 0000000000000..41fe340d05f41 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.support.mapper; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; +import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; +import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; +import org.hamcrest.Matchers; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.security.test.SecurityTestUtils.getClusterIndexHealth; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class NativeRoleMappingStoreTests extends ESTestCase { + + public void testResolveRoles() throws Exception { + // Does match DN + final ExpressionRoleMapping mapping1 = new ExpressionRoleMapping("dept_h", + new FieldExpression("dn", Collections.singletonList(new FieldValue("*,ou=dept_h,o=forces,dc=gc,dc=ca"))), + Arrays.asList("dept_h", "defence"), Collections.emptyMap(), true); + // Does not match - user is not in this group + final ExpressionRoleMapping mapping2 = new ExpressionRoleMapping("admin", + new FieldExpression("groups", Collections.singletonList( + new FieldValue(randomiseDn("cn=esadmin,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")))), + Arrays.asList("admin"), Collections.emptyMap(), true); + // Does match - user is one of these groups + final ExpressionRoleMapping mapping3 = new ExpressionRoleMapping("flight", + new FieldExpression("groups", Arrays.asList( + new FieldValue(randomiseDn("cn=alphaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")), + new FieldValue(randomiseDn("cn=betaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")), + new FieldValue(randomiseDn("cn=gammaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")) + )), + Arrays.asList("flight"), Collections.emptyMap(), true); + // Does not match - mapping is not enabled + final ExpressionRoleMapping mapping4 = new ExpressionRoleMapping("mutants", + new FieldExpression("groups", Collections.singletonList( + new FieldValue(randomiseDn("cn=mutants,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")))), + Arrays.asList("mutants"), Collections.emptyMap(), false); + + final Client client = mock(Client.class); + final SecurityLifecycleService lifecycleService = mock(SecurityLifecycleService.class); + when(lifecycleService.isSecurityIndexAvailable()).thenReturn(true); + + final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, lifecycleService) { + @Override + protected void loadMappings(ActionListener> listener) { + final List mappings = Arrays.asList(mapping1, mapping2, mapping3, mapping4); + logger.info("Role mappings are: [{}]", mappings); + listener.onResponse(mappings); + } + }; + + final RealmConfig realm = new RealmConfig("ldap1", Settings.EMPTY, Settings.EMPTY, mock(Environment.class), + new ThreadContext(Settings.EMPTY)); + + final PlainActionFuture> future = new PlainActionFuture<>(); + final UserRoleMapper.UserData user = new UserRoleMapper.UserData("sasquatch", + randomiseDn("cn=walter.langowski,ou=people,ou=dept_h,o=forces,dc=gc,dc=ca"), + Arrays.asList( + randomiseDn("cn=alphaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca"), + randomiseDn("cn=mutants,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca") + ), Collections.emptyMap(), realm); + + logger.info("UserData is [{}]", user); + store.resolveRoles(user, future); + final Set roles = future.get(); + assertThat(roles, Matchers.containsInAnyOrder("dept_h", "defence", "flight")); + } + + private String randomiseDn(String dn) { + // Randomly transform the dn into another valid form that is logically identical, + // but (potentially) textually different + switch (randomIntBetween(0, 3)) { + case 0: + // do nothing + return dn; + case 1: + return dn.toUpperCase(Locale.ROOT); + case 2: + // Upper case just the attribute name for each RDN + return Arrays.stream(dn.split(",")).map(s -> { + final String[] arr = s.split("="); + arr[0] = arr[0].toUpperCase(Locale.ROOT); + return String.join("=", arr); + }).collect(Collectors.joining(",")); + case 3: + return dn.replaceAll(",", ", "); + } + return dn; + } + + + public void testCacheClearOnIndexHealthChange() { + final AtomicInteger numInvalidation = new AtomicInteger(0); + final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation); + + int expectedInvalidation = 0; + // existing to no longer present + ClusterIndexHealth previousHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + ClusterIndexHealth currentHealth = null; + store.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(++expectedInvalidation, numInvalidation.get()); + + // doesn't exist to exists + previousHealth = null; + currentHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + store.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(++expectedInvalidation, numInvalidation.get()); + + // green or yellow to red + previousHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + currentHealth = getClusterIndexHealth(ClusterHealthStatus.RED); + store.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(expectedInvalidation, numInvalidation.get()); + + // red to non red + previousHealth = getClusterIndexHealth(ClusterHealthStatus.RED); + currentHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + store.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(++expectedInvalidation, numInvalidation.get()); + + // green to yellow or yellow to green + previousHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + currentHealth = getClusterIndexHealth( + previousHealth.getStatus() == ClusterHealthStatus.GREEN ? ClusterHealthStatus.YELLOW : ClusterHealthStatus.GREEN); + store.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(expectedInvalidation, numInvalidation.get()); + } + + public void testCacheClearOnIndexOutOfDateChange() { + final AtomicInteger numInvalidation = new AtomicInteger(0); + final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation); + + store.onSecurityIndexOutOfDateChange(false, true); + assertEquals(1, numInvalidation.get()); + + store.onSecurityIndexOutOfDateChange(true, false); + assertEquals(2, numInvalidation.get()); + } + + private NativeRoleMappingStore buildRoleMappingStoreForInvalidationTesting(AtomicInteger invalidationCounter) { + final Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + + final ThreadPool threadPool = mock(ThreadPool.class); + final ThreadContext threadContext = new ThreadContext(settings); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + final Client client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + when(client.settings()).thenReturn(settings); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + invalidationCounter.incrementAndGet(); + listener.onResponse(new ClearRealmCacheResponse(new ClusterName("cluster"), Collections.emptyList(), Collections.emptyList())); + return null; + }).when(client).execute(eq(ClearRealmCacheAction.INSTANCE), any(ClearRealmCacheRequest.class), any(ActionListener.class)); + + final Environment env = TestEnvironment.newEnvironment(settings); + final RealmConfig realmConfig = new RealmConfig(getTestName(), Settings.EMPTY, settings, env, threadContext); + final CachingUsernamePasswordRealm mockRealm = new CachingUsernamePasswordRealm("test", realmConfig) { + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + listener.onResponse(AuthenticationResult.notHandled()); + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + listener.onResponse(null); + } + }; + final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, mock(SecurityLifecycleService.class)); + store.refreshRealmOnChange(mockRealm); + return store; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AnalyzeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AnalyzeTests.java new file mode 100644 index 0000000000000..b8634f2e9f358 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AnalyzeTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; + +import java.util.Collections; + +import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationException; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +public class AnalyzeTests extends SecurityIntegTestCase { + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(new SecureString("test123".toCharArray()))); + + @Override + protected String configUsers() { + return super.configUsers() + + "analyze_indices:" + USERS_PASSWD_HASHED + "\n" + + "analyze_cluster:" + USERS_PASSWD_HASHED + "\n"; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + + "analyze_indices:analyze_indices\n" + + "analyze_cluster:analyze_cluster\n"; + } + + @Override + protected String configRoles() { + return super.configRoles()+ "\n" + + //role that has analyze indices privileges only + "analyze_indices:\n" + + " indices:\n" + + " - names: 'test_*'\n" + + " privileges: [ 'indices:admin/analyze' ]\n" + + "analyze_cluster:\n" + + " cluster:\n" + + " - cluster:admin/analyze\n"; + } + + public void testAnalyzeWithIndices() { + // this test tries to execute different analyze api variants from a user that has analyze privileges only on a specific index + // namespace + + createIndex("test_1"); + ensureGreen(); + + //ok: user has permissions for analyze on test_* + SecureString passwd = new SecureString("test123".toCharArray()); + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("analyze_indices", passwd))) + .admin().indices().prepareAnalyze("this is my text").setIndex("test_1").setAnalyzer("standard").get(); + + //fails: user doesn't have permissions for analyze on index non_authorized + assertThrowsAuthorizationException(client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("analyze_indices", passwd))) + .admin().indices().prepareAnalyze("this is my text").setIndex("non_authorized").setAnalyzer("standard")::get, + AnalyzeAction.NAME, "analyze_indices"); + + //fails: user doesn't have permissions for cluster level analyze + assertThrowsAuthorizationException(client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("analyze_indices", passwd))) + .admin().indices().prepareAnalyze("this is my text").setAnalyzer("standard")::get, + "cluster:admin/analyze", "analyze_indices"); + } + + public void testAnalyzeWithoutIndices() { + //this test tries to execute different analyze api variants from a user that has analyze privileges only at cluster level + + SecureString passwd = new SecureString("test123".toCharArray()); + //fails: user doesn't have permissions for analyze on index test_1 + assertThrowsAuthorizationException(client().filterWithHeader( + Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("analyze_cluster", passwd))) + .admin().indices().prepareAnalyze("this is my text").setIndex("test_1").setAnalyzer("standard")::get, + AnalyzeAction.NAME, "analyze_cluster"); + + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("analyze_cluster", passwd))) + .admin().indices().prepareAnalyze("this is my text").setAnalyzer("standard").get(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java new file mode 100644 index 0000000000000..3013a7c41c2ac --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -0,0 +1,1323 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.MockIndicesRequest; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsAction; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexAction; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; +import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusAction; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; +import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.BulkItemRequest; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkShardRequest; +import org.elasticsearch.action.delete.DeleteAction; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.MultiGetAction; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.ClearScrollAction; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.MultiSearchAction; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchScrollAction; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.termvectors.MultiTermVectorsAction; +import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; +import org.elasticsearch.action.termvectors.TermVectorsAction; +import org.elasticsearch.action.termvectors.TermVectorsRequest; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.TriFunction; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.license.GetLicenseAction; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportActionProxy; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequest; +import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequestBuilder; +import org.elasticsearch.xpack.core.security.action.user.DeleteUserAction; +import org.elasticsearch.xpack.core.security.action.user.PutUserAction; +import org.elasticsearch.xpack.core.security.action.user.UserRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; +import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; +import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; +import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; +import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; +import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; +import org.elasticsearch.xpack.sql.plugin.SqlQueryAction; +import org.elasticsearch.xpack.sql.plugin.SqlQueryRequest; +import org.junit.Before; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import static java.util.Arrays.asList; +import static org.elasticsearch.test.SecurityTestsUtils.assertAuthenticationException; +import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationException; +import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationExceptionRunAs; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class AuthorizationServiceTests extends ESTestCase { + private AuditTrailService auditTrail; + private ClusterService clusterService; + private AuthorizationService authorizationService; + private ThreadContext threadContext; + private ThreadPool threadPool; + private Map roleMap = new HashMap<>(); + private CompositeRolesStore rolesStore; + + @Before + public void setup() { + rolesStore = mock(CompositeRolesStore.class); + clusterService = mock(ClusterService.class); + final Settings settings = Settings.builder() + .put("search.remote.other_cluster.seeds", "localhost:9999") + .build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + auditTrail = mock(AuditTrailService.class); + threadContext = new ThreadContext(settings); + threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(threadContext); + final FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(settings); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[2]; + Set names = (Set) i.getArguments()[0]; + assertNotNull(names); + Set roleDescriptors = new HashSet<>(); + for (String name : names) { + RoleDescriptor descriptor = roleMap.get(name); + if (descriptor != null) { + roleDescriptors.add(descriptor); + } + } + + if (roleDescriptors.isEmpty()) { + callback.onResponse(Role.EMPTY); + } else { + callback.onResponse( + CompositeRolesStore.buildRoleFromDescriptors(roleDescriptors, fieldPermissionsCache)); + } + return Void.TYPE; + }).when(rolesStore).roles(any(Set.class), any(FieldPermissionsCache.class), any(ActionListener.class)); + authorizationService = new AuthorizationService(settings, rolesStore, clusterService, + auditTrail, new DefaultAuthenticationFailureHandler(), threadPool, new AnonymousUser(settings)); + } + + private void authorize(Authentication authentication, String action, TransportRequest request) { + PlainActionFuture future = new PlainActionFuture(); + AuthorizationUtils.AsyncAuthorizer authorizer = new AuthorizationUtils.AsyncAuthorizer(authentication, future, + (userRoles, runAsRoles) -> { + authorizationService.authorize(authentication, action, request, userRoles, runAsRoles); + future.onResponse(null); + }); + authorizer.authorize(authorizationService); + future.actionGet(); + } + + public void testActionsSystemUserIsAuthorized() { + TransportRequest request = mock(TransportRequest.class); + + // A failure would throw an exception + Authentication authentication = createAuthentication(SystemUser.INSTANCE); + authorize(authentication, "indices:monitor/whatever", request); + verify(auditTrail).accessGranted(authentication, "indices:monitor/whatever", request, + new String[] { SystemUser.ROLE_NAME }); + + authentication = createAuthentication(SystemUser.INSTANCE); + authorize(authentication, "internal:whatever", request); + verify(auditTrail).accessGranted(authentication, "internal:whatever", request, new String[] { SystemUser.ROLE_NAME }); + verifyNoMoreInteractions(auditTrail); + } + + public void testIndicesActionsAreNotAuthorized() { + final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = createAuthentication(SystemUser.INSTANCE); + assertThrowsAuthorizationException( + () -> authorize(authentication, "indices:", request), + "indices:", SystemUser.INSTANCE.principal()); + verify(auditTrail).accessDenied(authentication, "indices:", request, new String[] { SystemUser.ROLE_NAME }); + verifyNoMoreInteractions(auditTrail); + } + + public void testClusterAdminActionsAreNotAuthorized() { + final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = createAuthentication(SystemUser.INSTANCE); + assertThrowsAuthorizationException( + () -> authorize(authentication, "cluster:admin/whatever", request), + "cluster:admin/whatever", SystemUser.INSTANCE.principal()); + verify(auditTrail).accessDenied(authentication, "cluster:admin/whatever", request, + new String[] { SystemUser.ROLE_NAME }); + verifyNoMoreInteractions(auditTrail); + } + + public void testClusterAdminSnapshotStatusActionIsNotAuthorized() { + final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = createAuthentication(SystemUser.INSTANCE); + assertThrowsAuthorizationException( + () -> authorize(authentication, "cluster:admin/snapshot/status", request), + "cluster:admin/snapshot/status", SystemUser.INSTANCE.principal()); + verify(auditTrail).accessDenied(authentication, "cluster:admin/snapshot/status", request, + new String[] { SystemUser.ROLE_NAME }); + verifyNoMoreInteractions(auditTrail); + } + + public void testNoRolesCausesDenial() { + final TransportRequest request = new SearchRequest(); + final Authentication authentication = createAuthentication(new User("test user")); + mockEmptyMetaData(); + assertThrowsAuthorizationException( + () -> authorize(authentication, "indices:a", request), + "indices:a", "test user"); + verify(auditTrail).accessDenied(authentication, "indices:a", request, Role.EMPTY.names()); + verifyNoMoreInteractions(auditTrail); + } + + public void testUserWithNoRolesCanPerformRemoteSearch() { + SearchRequest request = new SearchRequest(); + request.indices("other_cluster:index1", "*_cluster:index2", "other_cluster:other_*"); + final Authentication authentication = createAuthentication(new User("test user")); + mockEmptyMetaData(); + authorize(authentication, SearchAction.NAME, request); + verify(auditTrail).accessGranted(authentication, SearchAction.NAME, request, Role.EMPTY.names()); + verifyNoMoreInteractions(auditTrail); + } + + /** + * This test mimics {@link #testUserWithNoRolesCanPerformRemoteSearch()} except that + * while the referenced index _looks_ like a remote index, the remote cluster name has not + * been defined, so it is actually a local index and access should be denied + */ + public void testUserWithNoRolesCannotPerformLocalSearch() { + SearchRequest request = new SearchRequest(); + request.indices("no_such_cluster:index"); + final Authentication authentication = createAuthentication(new User("test user")); + mockEmptyMetaData(); + assertThrowsAuthorizationException( + () -> authorize(authentication, SearchAction.NAME, request), + SearchAction.NAME, "test user"); + verify(auditTrail).accessDenied(authentication, SearchAction.NAME, request, Role.EMPTY.names()); + verifyNoMoreInteractions(auditTrail); + } + + /** + * This test mimics {@link #testUserWithNoRolesCannotPerformLocalSearch()} but includes + * both local and remote indices, including wildcards + */ + public void testUserWithNoRolesCanPerformMultiClusterSearch() { + SearchRequest request = new SearchRequest(); + request.indices("local_index", "wildcard_*", "other_cluster:remote_index", "*:foo?"); + final Authentication authentication = createAuthentication(new User("test user")); + mockEmptyMetaData(); + assertThrowsAuthorizationException( + () -> authorize(authentication, SearchAction.NAME, request), + SearchAction.NAME, "test user"); + verify(auditTrail).accessDenied(authentication, SearchAction.NAME, request, Role.EMPTY.names()); + verifyNoMoreInteractions(auditTrail); + } + + public void testUserWithNoRolesCannotSql() { + TransportRequest request = new SqlQueryRequest(); + Authentication authentication = createAuthentication(new User("test user")); + mockEmptyMetaData(); + assertThrowsAuthorizationException( + () -> authorize(authentication, SqlQueryAction.NAME, request), + SqlQueryAction.NAME, "test user"); + verify(auditTrail).accessDenied(authentication, SqlQueryAction.NAME, request, Role.EMPTY.names()); + verifyNoMoreInteractions(auditTrail); + } + + /** + * Verifies that the behaviour tested in {@link #testUserWithNoRolesCanPerformRemoteSearch} + * does not work for requests that are not remote-index-capable. + */ + public void testRemoteIndicesOnlyWorkWithApplicableRequestTypes() { + DeleteIndexRequest request = new DeleteIndexRequest(); + request.indices("other_cluster:index1", "other_cluster:index2"); + final Authentication authentication = createAuthentication(new User("test user")); + mockEmptyMetaData(); + assertThrowsAuthorizationException( + () -> authorize(authentication, DeleteIndexAction.NAME, request), + DeleteIndexAction.NAME, "test user"); + verify(auditTrail).accessDenied(authentication, DeleteIndexAction.NAME, request, Role.EMPTY.names()); + verifyNoMoreInteractions(auditTrail); + } + + public void testUnknownRoleCausesDenial() { + Tuple tuple = randomFrom(asList( + new Tuple<>(SearchAction.NAME, new SearchRequest()), + new Tuple<>(IndicesExistsAction.NAME, new IndicesExistsRequest()), + new Tuple<>(SqlQueryAction.NAME, new SqlQueryRequest()))); + String action = tuple.v1(); + TransportRequest request = tuple.v2(); + final Authentication authentication = createAuthentication(new User("test user", "non-existent-role")); + mockEmptyMetaData(); + assertThrowsAuthorizationException( + () -> authorize(authentication, action, request), + action, "test user"); + verify(auditTrail).accessDenied(authentication, action, request, Role.EMPTY.names()); + verifyNoMoreInteractions(auditTrail); + } + + public void testThatNonIndicesAndNonClusterActionIsDenied() { + final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = createAuthentication(new User("test user", "a_all")); + final RoleDescriptor role = new RoleDescriptor("a_role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null); + roleMap.put("a_all", role); + + assertThrowsAuthorizationException( + () -> authorize(authentication, "whatever", request), + "whatever", "test user"); + verify(auditTrail).accessDenied(authentication, "whatever", request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + } + + public void testThatRoleWithNoIndicesIsDenied() { + @SuppressWarnings("unchecked") + Tuple tuple = randomFrom( + new Tuple<>(SearchAction.NAME, new SearchRequest()), + new Tuple<>(IndicesExistsAction.NAME, new IndicesExistsRequest()), + new Tuple<>(SqlQueryAction.NAME, new SqlQueryRequest())); + String action = tuple.v1(); + TransportRequest request = tuple.v2(); + final Authentication authentication = createAuthentication(new User("test user", "no_indices")); + RoleDescriptor role = new RoleDescriptor("a_role", null, null, null); + roleMap.put("no_indices", role); + mockEmptyMetaData(); + + assertThrowsAuthorizationException( + () -> authorize(authentication, action, request), + action, "test user"); + verify(auditTrail).accessDenied(authentication, action, request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + } + + public void testElasticUserAuthorizedForNonChangePasswordRequestsWhenNotInSetupMode() { + final Authentication authentication = createAuthentication(new ElasticUser(true)); + final Tuple request = randomCompositeRequest(); + authorize(authentication, request.v1(), request.v2()); + + verify(auditTrail).accessGranted(authentication, request.v1(), request.v2(), new String[] { ElasticUser.ROLE_NAME }); + } + + public void testSearchAgainstEmptyCluster() { + RoleDescriptor role = new RoleDescriptor("a_role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null); + final Authentication authentication = createAuthentication(new User("test user", "a_all")); + roleMap.put("a_all", role); + mockEmptyMetaData(); + + { + //ignore_unavailable set to false, user is not authorized for this index nor does it exist + SearchRequest searchRequest = new SearchRequest("does_not_exist") + .indicesOptions(IndicesOptions.fromOptions(false, true, + true, false)); + + assertThrowsAuthorizationException( + () -> authorize(authentication, SearchAction.NAME, searchRequest), + SearchAction.NAME, "test user"); + verify(auditTrail).accessDenied(authentication, SearchAction.NAME, searchRequest, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + } + + { + //ignore_unavailable and allow_no_indices both set to true, user is not authorized for this index nor does it exist + SearchRequest searchRequest = new SearchRequest("does_not_exist") + .indicesOptions(IndicesOptions.fromOptions(true, true, true, false)); + authorize(authentication, SearchAction.NAME, searchRequest); + verify(auditTrail).accessGranted(authentication, SearchAction.NAME, searchRequest, new String[] { role.getName() }); + final IndicesAccessControl indicesAccessControl = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + final IndicesAccessControl.IndexAccessControl indexAccessControl = + indicesAccessControl.getIndexPermissions(IndicesAndAliasesResolverField.NO_INDEX_PLACEHOLDER); + assertFalse(indexAccessControl.getFieldPermissions().hasFieldLevelSecurity()); + assertNull(indexAccessControl.getQueries()); + } + } + + public void testScrollRelatedRequestsAllowed() { + RoleDescriptor role = new RoleDescriptor("a_role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null); + final Authentication authentication = createAuthentication(new User("test user", "a_all")); + roleMap.put("a_all", role); + mockEmptyMetaData(); + + final ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + authorize(authentication, ClearScrollAction.NAME, clearScrollRequest); + verify(auditTrail).accessGranted(authentication, ClearScrollAction.NAME, clearScrollRequest, new String[] { role.getName() }); + + final SearchScrollRequest searchScrollRequest = new SearchScrollRequest(); + authorize(authentication, SearchScrollAction.NAME, searchScrollRequest); + verify(auditTrail).accessGranted(authentication, SearchScrollAction.NAME, searchScrollRequest, new String[] { role.getName() }); + + // We have to use a mock request for other Scroll actions as the actual requests are package private to SearchTransportService + final TransportRequest request = mock(TransportRequest.class); + authorize(authentication, SearchTransportService.CLEAR_SCROLL_CONTEXTS_ACTION_NAME, request); + verify(auditTrail).accessGranted(authentication, SearchTransportService.CLEAR_SCROLL_CONTEXTS_ACTION_NAME, request, + new String[] { role.getName() }); + + authorize(authentication, SearchTransportService.FETCH_ID_SCROLL_ACTION_NAME, request); + verify(auditTrail).accessGranted(authentication, SearchTransportService.FETCH_ID_SCROLL_ACTION_NAME, request, + new String[] { role.getName() }); + + authorize(authentication, SearchTransportService.QUERY_FETCH_SCROLL_ACTION_NAME, request); + verify(auditTrail).accessGranted(authentication, SearchTransportService.QUERY_FETCH_SCROLL_ACTION_NAME, request, + new String[] { role.getName() }); + + authorize(authentication, SearchTransportService.QUERY_SCROLL_ACTION_NAME, request); + verify(auditTrail).accessGranted(authentication, SearchTransportService.QUERY_SCROLL_ACTION_NAME, request, + new String[] { role.getName() }); + + authorize(authentication, SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME, request); + verify(auditTrail).accessGranted(authentication, SearchTransportService.FREE_CONTEXT_SCROLL_ACTION_NAME, request, + new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + } + + public void testAuthorizeIndicesFailures() { + TransportRequest request = new GetIndexRequest().indices("b"); + ClusterState state = mockEmptyMetaData(); + RoleDescriptor role = new RoleDescriptor("a_role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null); + final Authentication authentication = createAuthentication(new User("test user", "a_all")); + roleMap.put("a_all", role); + + assertThrowsAuthorizationException( + () -> authorize(authentication, "indices:a", request), + "indices:a", "test user"); + verify(auditTrail).accessDenied(authentication, "indices:a", request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + verify(clusterService, times(1)).state(); + verify(state, times(1)).metaData(); + } + + public void testCreateIndexWithAliasWithoutPermissions() { + CreateIndexRequest request = new CreateIndexRequest("a"); + request.alias(new Alias("a2")); + ClusterState state = mockEmptyMetaData(); + RoleDescriptor role = new RoleDescriptor("a_role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null); + final Authentication authentication = createAuthentication(new User("test user", "a_all")); + roleMap.put("a_all", role); + + assertThrowsAuthorizationException( + () -> authorize(authentication, CreateIndexAction.NAME, request), + IndicesAliasesAction.NAME, "test user"); + verify(auditTrail).accessDenied(authentication, IndicesAliasesAction.NAME, request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + verify(clusterService).state(); + verify(state, times(1)).metaData(); + } + + public void testCreateIndexWithAlias() { + CreateIndexRequest request = new CreateIndexRequest("a"); + request.alias(new Alias("a2")); + ClusterState state = mockEmptyMetaData(); + RoleDescriptor role = new RoleDescriptor("a_all", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a", "a2").privileges("all").build() }, null); + final Authentication authentication = createAuthentication(new User("test user", "a_all")); + roleMap.put("a_all", role); + + authorize(authentication, CreateIndexAction.NAME, request); + + verify(auditTrail).accessGranted(authentication, CreateIndexAction.NAME, request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + verify(clusterService).state(); + verify(state, times(1)).metaData(); + } + + public void testDenialForAnonymousUser() { + TransportRequest request = new GetIndexRequest().indices("b"); + ClusterState state = mockEmptyMetaData(); + Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "a_all").build(); + final AnonymousUser anonymousUser = new AnonymousUser(settings); + authorizationService = new AuthorizationService(settings, rolesStore, clusterService, auditTrail, + new DefaultAuthenticationFailureHandler(), threadPool, anonymousUser); + + RoleDescriptor role = new RoleDescriptor("a_all", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null); + roleMap.put("a_all", role); + + final Authentication authentication = createAuthentication(anonymousUser); + assertThrowsAuthorizationException( + () -> authorize(authentication, "indices:a", request), + "indices:a", anonymousUser.principal()); + verify(auditTrail).accessDenied(authentication, "indices:a", request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + verify(clusterService, times(1)).state(); + verify(state, times(1)).metaData(); + } + + public void testDenialForAnonymousUserAuthorizationExceptionDisabled() { + TransportRequest request = new GetIndexRequest().indices("b"); + ClusterState state = mockEmptyMetaData(); + Settings settings = Settings.builder() + .put(AnonymousUser.ROLES_SETTING.getKey(), "a_all") + .put(AuthorizationService.ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING.getKey(), false) + .build(); + final Authentication authentication = createAuthentication(new AnonymousUser(settings)); + authorizationService = new AuthorizationService(settings, rolesStore, clusterService, auditTrail, + new DefaultAuthenticationFailureHandler(), threadPool, new AnonymousUser(settings)); + + RoleDescriptor role = new RoleDescriptor("a_all", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null); + roleMap.put("a_all", role); + + final ElasticsearchSecurityException securityException = expectThrows(ElasticsearchSecurityException.class, + () -> authorize(authentication, "indices:a", request)); + assertAuthenticationException(securityException, containsString("action [indices:a] requires authentication")); + verify(auditTrail).accessDenied(authentication, "indices:a", request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + verify(clusterService, times(1)).state(); + verify(state, times(1)).metaData(); + } + + public void testAuditTrailIsRecordedWhenIndexWildcardThrowsError() { + IndicesOptions options = IndicesOptions.fromOptions(false, false, true, true); + TransportRequest request = new GetIndexRequest().indices("not-an-index-*").indicesOptions(options); + ClusterState state = mockEmptyMetaData(); + RoleDescriptor role = new RoleDescriptor("a_all", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null); + final Authentication authentication = createAuthentication(new User("test user", "a_all")); + roleMap.put("a_all", role); + + final IndexNotFoundException nfe = expectThrows( + IndexNotFoundException.class, + () -> authorize(authentication, GetIndexAction.NAME, request)); + assertThat(nfe.getIndex(), is(notNullValue())); + assertThat(nfe.getIndex().getName(), is("not-an-index-*")); + verify(auditTrail).accessDenied(authentication, GetIndexAction.NAME, request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + verify(clusterService).state(); + verify(state, times(1)).metaData(); + } + + public void testRunAsRequestWithNoRolesUser() { + final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = createAuthentication(new User("run as me", null, new User("test user", "admin"))); + final User user = new User("run as me", null, new User("test user", "admin")); + assertNotEquals(authentication.getUser().authenticatedUser(), authentication); + assertThrowsAuthorizationExceptionRunAs( + () -> authorize(authentication, "indices:a", request), + "indices:a", "test user", "run as me"); // run as [run as me] + verify(auditTrail).runAsDenied(authentication, "indices:a", request, Role.EMPTY.names()); + verifyNoMoreInteractions(auditTrail); + } + + public void testRunAsRequestWithoutLookedUpBy() { + AuthenticateRequest request = new AuthenticateRequest("run as me"); + roleMap.put("can run as", ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR); + User user = new User("run as me", Strings.EMPTY_ARRAY, new User("test user", new String[] { "can run as" })); + Authentication authentication = new Authentication(user, new RealmRef("foo", "bar", "baz"), null); + assertNotEquals(user.authenticatedUser(), user); + assertThrowsAuthorizationExceptionRunAs( + () -> authorize(authentication, AuthenticateAction.NAME, request), + AuthenticateAction.NAME, "test user", "run as me"); // run as [run as me] + verify(auditTrail).runAsDenied(authentication, AuthenticateAction.NAME, request, + new String[] { ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName() }); + verifyNoMoreInteractions(auditTrail); + } + + public void testRunAsRequestRunningAsUnAllowedUser() { + TransportRequest request = mock(TransportRequest.class); + User user = new User("run as me", new String[] { "doesn't exist" }, new User("test user", "can run as")); + assertNotEquals(user.authenticatedUser(), user); + final Authentication authentication = createAuthentication(user); + final RoleDescriptor role = new RoleDescriptor("can run as", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, + new String[] { "not the right user" }); + roleMap.put("can run as", role); + + assertThrowsAuthorizationExceptionRunAs( + () -> authorize(authentication, "indices:a", request), + "indices:a", "test user", "run as me"); + verify(auditTrail).runAsDenied(authentication, "indices:a", request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + } + + public void testRunAsRequestWithRunAsUserWithoutPermission() { + TransportRequest request = new GetIndexRequest().indices("a"); + User authenticatedUser = new User("test user", "can run as"); + User user = new User("run as me", new String[] { "b" }, authenticatedUser); + assertNotEquals(user.authenticatedUser(), user); + final Authentication authentication = createAuthentication(user); + final RoleDescriptor runAsRole = new RoleDescriptor("can run as", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, + new String[] { "run as me" }); + roleMap.put("can run as", runAsRole); + + RoleDescriptor bRole = new RoleDescriptor("b", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("b").privileges("all").build() }, null); + boolean indexExists = randomBoolean(); + if (indexExists) { + ClusterState state = mock(ClusterState.class); + when(clusterService.state()).thenReturn(state); + when(state.metaData()).thenReturn(MetaData.builder() + .put(new IndexMetaData.Builder("a") + .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .numberOfShards(1).numberOfReplicas(0).build(), true) + .build()); + roleMap.put("b", bRole); + } else { + mockEmptyMetaData(); + } + + assertThrowsAuthorizationExceptionRunAs( + () -> authorize(authentication, "indices:a", request), + "indices:a", "test user", "run as me"); + verify(auditTrail).runAsGranted(authentication, "indices:a", request, new String[] { runAsRole.getName() }); + if (indexExists) { + verify(auditTrail).accessDenied(authentication, "indices:a", request, new String[] { bRole.getName() }); + } else { + verify(auditTrail).accessDenied(authentication, "indices:a", request, Role.EMPTY.names()); + } + verifyNoMoreInteractions(auditTrail); + } + + public void testRunAsRequestWithValidPermissions() { + TransportRequest request = new GetIndexRequest().indices("b"); + User authenticatedUser = new User("test user", new String[] { "can run as" }); + User user = new User("run as me", new String[] { "b" }, authenticatedUser); + assertNotEquals(user.authenticatedUser(), user); + final Authentication authentication = createAuthentication(user); + final RoleDescriptor runAsRole = new RoleDescriptor("can run as", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, + new String[] { "run as me" }); + roleMap.put("can run as", runAsRole); + ClusterState state = mock(ClusterState.class); + when(clusterService.state()).thenReturn(state); + when(state.metaData()).thenReturn(MetaData.builder() + .put(new IndexMetaData.Builder("b") + .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .numberOfShards(1).numberOfReplicas(0).build(), true) + .build()); + RoleDescriptor bRole = new RoleDescriptor("b", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("b").privileges("all").build() }, null); + roleMap.put("b", bRole); + + authorize(authentication, "indices:a", request); + verify(auditTrail).runAsGranted(authentication, "indices:a", request, new String[] { runAsRole.getName() }); + verify(auditTrail).accessGranted(authentication, "indices:a", request, new String[] { bRole.getName() }); + verifyNoMoreInteractions(auditTrail); + } + + public void testNonXPackUserCannotExecuteOperationAgainstSecurityIndex() { + RoleDescriptor role = new RoleDescriptor("all access", new String[] { "all" }, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("*").privileges("all").build() }, null); + final Authentication authentication = createAuthentication(new User("all_access_user", "all_access")); + roleMap.put("all_access", role); + ClusterState state = mock(ClusterState.class); + when(clusterService.state()).thenReturn(state); + when(state.metaData()).thenReturn(MetaData.builder() + .put(new IndexMetaData.Builder(SECURITY_INDEX_NAME) + .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .numberOfShards(1).numberOfReplicas(0).build(), true) + .build()); + + List> requests = new ArrayList<>(); + requests.add(new Tuple<>(BulkAction.NAME + "[s]", + new DeleteRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(UpdateAction.NAME, + new UpdateRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(BulkAction.NAME + "[s]", + new IndexRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(SearchAction.NAME, new SearchRequest(SECURITY_INDEX_NAME))); + requests.add(new Tuple<>(TermVectorsAction.NAME, + new TermVectorsRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(GetAction.NAME, new GetRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(TermVectorsAction.NAME, + new TermVectorsRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(IndicesAliasesAction.NAME, new IndicesAliasesRequest() + .addAliasAction(AliasActions.add().alias("security_alias").index(SECURITY_INDEX_NAME)))); + requests.add( + new Tuple<>(UpdateSettingsAction.NAME, new UpdateSettingsRequest().indices(SECURITY_INDEX_NAME))); + + for (Tuple requestTuple : requests) { + String action = requestTuple.v1(); + TransportRequest request = requestTuple.v2(); + assertThrowsAuthorizationException( + () -> authorize(authentication, action, request), + action, "all_access_user"); + verify(auditTrail).accessDenied(authentication, action, request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + } + + // we should allow waiting for the health of the index or any index if the user has this permission + ClusterHealthRequest request = new ClusterHealthRequest(SECURITY_INDEX_NAME); + authorize(authentication, ClusterHealthAction.NAME, request); + verify(auditTrail).accessGranted(authentication, ClusterHealthAction.NAME, request, new String[] { role.getName() }); + + // multiple indices + request = new ClusterHealthRequest(SECURITY_INDEX_NAME, "foo", "bar"); + authorize(authentication, ClusterHealthAction.NAME, request); + verify(auditTrail).accessGranted(authentication, ClusterHealthAction.NAME, request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + + final SearchRequest searchRequest = new SearchRequest("_all"); + authorize(authentication, SearchAction.NAME, searchRequest); + assertEquals(2, searchRequest.indices().length); + assertEquals(IndicesAndAliasesResolver.NO_INDICES_LIST, Arrays.asList(searchRequest.indices())); + } + + public void testGrantedNonXPackUserCanExecuteMonitoringOperationsAgainstSecurityIndex() { + RoleDescriptor role = new RoleDescriptor("all access", new String[] { "all" }, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("*").privileges("all").build() }, null); + final Authentication authentication = createAuthentication(new User("all_access_user", "all_access")); + roleMap.put("all_access", role); + ClusterState state = mock(ClusterState.class); + when(clusterService.state()).thenReturn(state); + when(state.metaData()).thenReturn(MetaData.builder() + .put(new IndexMetaData.Builder(SECURITY_INDEX_NAME) + .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .numberOfShards(1).numberOfReplicas(0).build(), true) + .build()); + + List> requests = new ArrayList<>(); + requests.add(new Tuple<>(IndicesStatsAction.NAME, new IndicesStatsRequest().indices(SECURITY_INDEX_NAME))); + requests.add(new Tuple<>(RecoveryAction.NAME, new RecoveryRequest().indices(SECURITY_INDEX_NAME))); + requests.add(new Tuple<>(IndicesSegmentsAction.NAME, new IndicesSegmentsRequest().indices(SECURITY_INDEX_NAME))); + requests.add(new Tuple<>(GetSettingsAction.NAME, new GetSettingsRequest().indices(SECURITY_INDEX_NAME))); + requests.add(new Tuple<>(IndicesShardStoresAction.NAME, + new IndicesShardStoresRequest().indices(SECURITY_INDEX_NAME))); + requests.add(new Tuple<>(UpgradeStatusAction.NAME, + new UpgradeStatusRequest().indices(SECURITY_INDEX_NAME))); + + for (final Tuple requestTuple : requests) { + final String action = requestTuple.v1(); + final TransportRequest request = requestTuple.v2(); + authorize(authentication, action, request); + verify(auditTrail).accessGranted(authentication, action, request, new String[] { role.getName() }); + } + } + + public void testSuperusersCanExecuteOperationAgainstSecurityIndex() { + final User superuser = new User("custom_admin", ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName()); + roleMap.put(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName(), ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR); + ClusterState state = mock(ClusterState.class); + when(clusterService.state()).thenReturn(state); + when(state.metaData()).thenReturn(MetaData.builder() + .put(new IndexMetaData.Builder(SECURITY_INDEX_NAME) + .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .numberOfShards(1).numberOfReplicas(0).build(), true) + .build()); + + List> requests = new ArrayList<>(); + requests.add(new Tuple<>(DeleteAction.NAME, + new DeleteRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(BulkAction.NAME + "[s]", + createBulkShardRequest(SECURITY_INDEX_NAME, DeleteRequest::new))); + requests.add(new Tuple<>(UpdateAction.NAME, + new UpdateRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(IndexAction.NAME, + new IndexRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(BulkAction.NAME + "[s]", + createBulkShardRequest(SECURITY_INDEX_NAME, IndexRequest::new))); + requests.add(new Tuple<>(SearchAction.NAME, new SearchRequest(SECURITY_INDEX_NAME))); + requests.add(new Tuple<>(TermVectorsAction.NAME, + new TermVectorsRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(GetAction.NAME, new GetRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(TermVectorsAction.NAME, + new TermVectorsRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(IndicesAliasesAction.NAME, new IndicesAliasesRequest() + .addAliasAction(AliasActions.add().alias("security_alias").index(SECURITY_INDEX_NAME)))); + requests.add(new Tuple<>(ClusterHealthAction.NAME, new ClusterHealthRequest(SECURITY_INDEX_NAME))); + requests.add(new Tuple<>(ClusterHealthAction.NAME, + new ClusterHealthRequest(SECURITY_INDEX_NAME, "foo", "bar"))); + + for (final Tuple requestTuple : requests) { + final String action = requestTuple.v1(); + final TransportRequest request = requestTuple.v2(); + final Authentication authentication = createAuthentication(superuser); + authorize(authentication, action, request); + verify(auditTrail).accessGranted(authentication, action, request, superuser.roles()); + } + } + + public void testSuperusersCanExecuteOperationAgainstSecurityIndexWithWildcard() { + final User superuser = new User("custom_admin", ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName()); + final Authentication authentication = createAuthentication(superuser); + roleMap.put(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName(), ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR); + ClusterState state = mock(ClusterState.class); + when(clusterService.state()).thenReturn(state); + when(state.metaData()).thenReturn(MetaData.builder() + .put(new IndexMetaData.Builder(SECURITY_INDEX_NAME) + .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .numberOfShards(1).numberOfReplicas(0).build(), true) + .build()); + + String action = SearchAction.NAME; + SearchRequest request = new SearchRequest("_all"); + authorize(createAuthentication(superuser), action, request); + verify(auditTrail).accessGranted(authentication, action, request, superuser.roles()); + assertThat(request.indices(), arrayContaining(".security")); + } + + public void testAnonymousRolesAreAppliedToOtherUsers() { + TransportRequest request = new ClusterHealthRequest(); + Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "anonymous_user_role").build(); + final AnonymousUser anonymousUser = new AnonymousUser(settings); + authorizationService = new AuthorizationService(settings, rolesStore, clusterService, auditTrail, + new DefaultAuthenticationFailureHandler(), threadPool, anonymousUser); + roleMap.put("anonymous_user_role", new RoleDescriptor("anonymous_user_role", new String[] { "all" }, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null)); + mockEmptyMetaData(); + + // sanity check the anonymous user + authorize(createAuthentication(anonymousUser), ClusterHealthAction.NAME, request); + authorize(createAuthentication(anonymousUser), IndicesExistsAction.NAME, new IndicesExistsRequest("a")); + + // test the no role user + final User userWithNoRoles = new User("no role user"); + authorize(createAuthentication(userWithNoRoles), ClusterHealthAction.NAME, request); + authorize(createAuthentication(userWithNoRoles), IndicesExistsAction.NAME, new IndicesExistsRequest("a")); + } + + public void testDefaultRoleUserWithoutRoles() { + PlainActionFuture rolesFuture = new PlainActionFuture<>(); + authorizationService.roles(new User("no role user"), rolesFuture); + final Role roles = rolesFuture.actionGet(); + assertEquals(Role.EMPTY, roles); + } + + public void testAnonymousUserEnabledRoleAdded() { + Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "anonymous_user_role").build(); + final AnonymousUser anonymousUser = new AnonymousUser(settings); + authorizationService = new AuthorizationService(settings, rolesStore, clusterService, auditTrail, + new DefaultAuthenticationFailureHandler(), threadPool, anonymousUser); + roleMap.put("anonymous_user_role", new RoleDescriptor("anonymous_user_role", new String[] { "all" }, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null)); + mockEmptyMetaData(); + PlainActionFuture rolesFuture = new PlainActionFuture<>(); + authorizationService.roles(new User("no role user"), rolesFuture); + final Role roles = rolesFuture.actionGet(); + assertThat(Arrays.asList(roles.names()), hasItem("anonymous_user_role")); + } + + public void testCompositeActionsAreImmediatelyRejected() { + //if the user has no permission for composite actions against any index, the request fails straight-away in the main action + final Tuple compositeRequest = randomCompositeRequest(); + final String action = compositeRequest.v1(); + final TransportRequest request = compositeRequest.v2(); + final Authentication authentication = createAuthentication(new User("test user", "no_indices")); + final RoleDescriptor role = new RoleDescriptor("no_indices", null, null, null); + roleMap.put("no_indices", role); + assertThrowsAuthorizationException( + () -> authorize(authentication, action, request), action, "test user"); + verify(auditTrail).accessDenied(authentication, action, request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + } + + public void testCompositeActionsIndicesAreNotChecked() { + //if the user has permission for some index, the request goes through without looking at the indices, they will be checked later + final Tuple compositeRequest = randomCompositeRequest(); + final String action = compositeRequest.v1(); + final TransportRequest request = compositeRequest.v2(); + final Authentication authentication = createAuthentication(new User("test user", "role")); + final RoleDescriptor role = new RoleDescriptor("role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices(randomBoolean() ? "a" : "index").privileges("all").build() }, + null); + roleMap.put("role", role); + authorize(authentication, action, request); + verify(auditTrail).accessGranted(authentication, action, request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + } + + public void testCompositeActionsMustImplementCompositeIndicesRequest() { + String action = randomCompositeRequest().v1(); + TransportRequest request = mock(TransportRequest.class); + User user = new User("test user", "role"); + roleMap.put("role", new RoleDescriptor("role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices(randomBoolean() ? "a" : "index").privileges("all").build() }, + null)); + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> authorize(createAuthentication(user), action, request)); + assertThat(illegalStateException.getMessage(), containsString("Composite actions must implement CompositeIndicesRequest")); + } + + public void testCompositeActionsIndicesAreCheckedAtTheShardLevel() { + final MockIndicesRequest mockRequest = new MockIndicesRequest(IndicesOptions.strictExpandOpen(), "index"); + final TransportRequest request; + final String action; + switch (randomIntBetween(0, 4)) { + case 0: + action = MultiGetAction.NAME + "[shard]"; + request = mockRequest; + break; + case 1: + //reindex, msearch, search template, and multi search template delegate to search + action = SearchAction.NAME; + request = mockRequest; + break; + case 2: + action = MultiTermVectorsAction.NAME + "[shard]"; + request = mockRequest; + break; + case 3: + action = BulkAction.NAME + "[s]"; + request = createBulkShardRequest("index", IndexRequest::new); + break; + case 4: + action = "indices:data/read/mpercolate[s]"; + request = mockRequest; + break; + default: + throw new UnsupportedOperationException(); + } + logger.info("--> action: {}", action); + + User userAllowed = new User("userAllowed", "roleAllowed"); + roleMap.put("roleAllowed", new RoleDescriptor("roleAllowed", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("index").privileges("all").build() }, null)); + User userDenied = new User("userDenied", "roleDenied"); + roleMap.put("roleDenied", new RoleDescriptor("roleDenied", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null)); + mockEmptyMetaData(); + authorize(createAuthentication(userAllowed), action, request); + assertThrowsAuthorizationException( + () -> authorize(createAuthentication(userDenied), action, request), action, "userDenied"); + } + + public void testAuthorizationOfIndividualBulkItems() { + final String action = BulkAction.NAME + "[s]"; + final BulkItemRequest[] items = { + new BulkItemRequest(1, new DeleteRequest("concrete-index", "doc", "c1")), + new BulkItemRequest(2, new IndexRequest("concrete-index", "doc", "c2")), + new BulkItemRequest(3, new DeleteRequest("alias-1", "doc", "a1a")), + new BulkItemRequest(4, new IndexRequest("alias-1", "doc", "a1b")), + new BulkItemRequest(5, new DeleteRequest("alias-2", "doc", "a2a")), + new BulkItemRequest(6, new IndexRequest("alias-2", "doc", "a2b")) + }; + final ShardId shardId = new ShardId("concrete-index", UUID.randomUUID().toString(), 1); + final TransportRequest request = new BulkShardRequest(shardId, WriteRequest.RefreshPolicy.IMMEDIATE, items); + + final Authentication authentication = createAuthentication(new User("user", "my-role")); + RoleDescriptor role = new RoleDescriptor("my-role", null, new IndicesPrivileges[] { + IndicesPrivileges.builder().indices("concrete-index").privileges("all").build(), + IndicesPrivileges.builder().indices("alias-1").privileges("index").build(), + IndicesPrivileges.builder().indices("alias-2").privileges("delete").build() + }, null); + roleMap.put("my-role", role); + + mockEmptyMetaData(); + authorize(authentication, action, request); + + verify(auditTrail).accessDenied(authentication, DeleteAction.NAME, request, new String[] { role.getName() }); // alias-1 delete + verify(auditTrail).accessDenied(authentication, IndexAction.NAME, request, new String[] { role.getName() }); // alias-2 index + verify(auditTrail).accessGranted(authentication, action, request, new String[] { role.getName() }); // bulk request is allowed + verifyNoMoreInteractions(auditTrail); + } + + public void testAuthorizationOfIndividualBulkItemsWithDateMath() { + final String action = BulkAction.NAME + "[s]"; + final BulkItemRequest[] items = { + new BulkItemRequest(1, new IndexRequest("", "doc", "dy1")), + new BulkItemRequest(2, + new DeleteRequest("", "doc", "dy2")), // resolves to same as above + new BulkItemRequest(3, new IndexRequest("", "doc", "dm1")), + new BulkItemRequest(4, + new DeleteRequest("", "doc", "dm2")), // resolves to same as above + }; + final ShardId shardId = new ShardId("concrete-index", UUID.randomUUID().toString(), 1); + final TransportRequest request = new BulkShardRequest(shardId, WriteRequest.RefreshPolicy.IMMEDIATE, items); + + final Authentication authentication = createAuthentication(new User("user", "my-role")); + final RoleDescriptor role = new RoleDescriptor("my-role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("datemath-*").privileges("index").build() }, null); + roleMap.put("my-role", role); + + mockEmptyMetaData(); + authorize(authentication, action, request); + + // both deletes should fail + verify(auditTrail, Mockito.times(2)).accessDenied(authentication, DeleteAction.NAME, request, + new String[] { role.getName() }); + // bulk request is allowed + verify(auditTrail).accessGranted(authentication, action, request, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + } + + private BulkShardRequest createBulkShardRequest(String indexName, TriFunction> req) { + final BulkItemRequest[] items = { new BulkItemRequest(1, req.apply(indexName, "type", "id")) }; + return new BulkShardRequest(new ShardId(indexName, UUID.randomUUID().toString(), 1), + WriteRequest.RefreshPolicy.IMMEDIATE, items); + } + + public void testSameUserPermission() { + final User user = new User("joe"); + final boolean changePasswordRequest = randomBoolean(); + final TransportRequest request = changePasswordRequest ? + new ChangePasswordRequestBuilder(mock(Client.class)).username(user.principal()).request() : + new AuthenticateRequestBuilder(mock(Client.class)).username(user.principal()).request(); + final String action = changePasswordRequest ? ChangePasswordAction.NAME : AuthenticateAction.NAME; + final Authentication authentication = mock(Authentication.class); + final RealmRef authenticatedBy = mock(RealmRef.class); + when(authentication.getUser()).thenReturn(user); + when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); + when(authenticatedBy.getType()) + .thenReturn(changePasswordRequest ? randomFrom(ReservedRealm.TYPE, NativeRealmSettings.TYPE) : + randomAlphaOfLengthBetween(4, 12)); + + assertThat(request, instanceOf(UserRequest.class)); + assertTrue(AuthorizationService.checkSameUserPermissions(action, request, authentication)); + } + + public void testSameUserPermissionDoesNotAllowNonMatchingUsername() { + final User authUser = new User("admin", new String[] { "bar" }); + final User user = new User("joe", null, authUser); + final boolean changePasswordRequest = randomBoolean(); + final String username = randomFrom("", "joe" + randomAlphaOfLengthBetween(1, 5), randomAlphaOfLengthBetween(3, 10)); + final TransportRequest request = changePasswordRequest ? + new ChangePasswordRequestBuilder(mock(Client.class)).username(username).request() : + new AuthenticateRequestBuilder(mock(Client.class)).username(username).request(); + final String action = changePasswordRequest ? ChangePasswordAction.NAME : AuthenticateAction.NAME; + final Authentication authentication = mock(Authentication.class); + final RealmRef authenticatedBy = mock(RealmRef.class); + when(authentication.getUser()).thenReturn(user); + when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); + when(authenticatedBy.getType()) + .thenReturn(changePasswordRequest ? randomFrom(ReservedRealm.TYPE, NativeRealmSettings.TYPE) : + randomAlphaOfLengthBetween(4, 12)); + + assertThat(request, instanceOf(UserRequest.class)); + assertFalse(AuthorizationService.checkSameUserPermissions(action, request, authentication)); + + when(authentication.getUser()).thenReturn(user); + final RealmRef lookedUpBy = mock(RealmRef.class); + when(authentication.getLookedUpBy()).thenReturn(lookedUpBy); + when(lookedUpBy.getType()) + .thenReturn(changePasswordRequest ? randomFrom(ReservedRealm.TYPE, NativeRealmSettings.TYPE) : + randomAlphaOfLengthBetween(4, 12)); + // this should still fail since the username is still different + assertFalse(AuthorizationService.checkSameUserPermissions(action, request, authentication)); + + if (request instanceof ChangePasswordRequest) { + ((ChangePasswordRequest) request).username("joe"); + } else { + ((AuthenticateRequest) request).username("joe"); + } + assertTrue(AuthorizationService.checkSameUserPermissions(action, request, authentication)); + } + + public void testSameUserPermissionDoesNotAllowOtherActions() { + final User user = mock(User.class); + final TransportRequest request = mock(TransportRequest.class); + final String action = randomFrom(PutUserAction.NAME, DeleteUserAction.NAME, ClusterHealthAction.NAME, ClusterStateAction.NAME, + ClusterStatsAction.NAME, GetLicenseAction.NAME); + final Authentication authentication = mock(Authentication.class); + final RealmRef authenticatedBy = mock(RealmRef.class); + final boolean runAs = randomBoolean(); + when(authentication.getUser()).thenReturn(user); + when(user.authenticatedUser()).thenReturn(runAs ? new User("authUser") : user); + when(user.isRunAs()).thenReturn(runAs); + when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); + when(authenticatedBy.getType()) + .thenReturn(randomAlphaOfLengthBetween(4, 12)); + + assertFalse(AuthorizationService.checkSameUserPermissions(action, request, authentication)); + verifyZeroInteractions(user, request, authentication); + } + + public void testSameUserPermissionRunAsChecksAuthenticatedBy() { + final User authUser = new User("admin", new String[] { "bar" }); + final String username = "joe"; + final User user = new User(username, null, authUser); + final boolean changePasswordRequest = randomBoolean(); + final TransportRequest request = changePasswordRequest ? + new ChangePasswordRequestBuilder(mock(Client.class)).username(username).request() : + new AuthenticateRequestBuilder(mock(Client.class)).username(username).request(); + final String action = changePasswordRequest ? ChangePasswordAction.NAME : AuthenticateAction.NAME; + final Authentication authentication = mock(Authentication.class); + final RealmRef authenticatedBy = mock(RealmRef.class); + final RealmRef lookedUpBy = mock(RealmRef.class); + when(authentication.getUser()).thenReturn(user); + when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); + when(authentication.getLookedUpBy()).thenReturn(lookedUpBy); + when(lookedUpBy.getType()) + .thenReturn(changePasswordRequest ? randomFrom(ReservedRealm.TYPE, NativeRealmSettings.TYPE) : + randomAlphaOfLengthBetween(4, 12)); + assertTrue(AuthorizationService.checkSameUserPermissions(action, request, authentication)); + + when(authentication.getUser()).thenReturn(authUser); + assertFalse(AuthorizationService.checkSameUserPermissions(action, request, authentication)); + } + + public void testSameUserPermissionDoesNotAllowChangePasswordForOtherRealms() { + final User user = new User("joe"); + final ChangePasswordRequest request = new ChangePasswordRequestBuilder(mock(Client.class)).username(user.principal()).request(); + final String action = ChangePasswordAction.NAME; + final Authentication authentication = mock(Authentication.class); + final RealmRef authenticatedBy = mock(RealmRef.class); + when(authentication.getUser()).thenReturn(user); + when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); + when(authenticatedBy.getType()).thenReturn(randomFrom(LdapRealmSettings.LDAP_TYPE, FileRealmSettings.TYPE, + LdapRealmSettings.AD_TYPE, PkiRealmSettings.TYPE, + randomAlphaOfLengthBetween(4, 12))); + + assertThat(request, instanceOf(UserRequest.class)); + assertFalse(AuthorizationService.checkSameUserPermissions(action, request, authentication)); + verify(authenticatedBy).getType(); + verify(authentication).getAuthenticatedBy(); + verify(authentication, times(2)).getUser(); + verifyNoMoreInteractions(authenticatedBy, authentication); + } + + public void testSameUserPermissionDoesNotAllowChangePasswordForLookedUpByOtherRealms() { + final User authUser = new User("admin", new String[] { "bar" }); + final User user = new User("joe", null, authUser); + final ChangePasswordRequest request = new ChangePasswordRequestBuilder(mock(Client.class)).username(user.principal()).request(); + final String action = ChangePasswordAction.NAME; + final Authentication authentication = mock(Authentication.class); + final RealmRef authenticatedBy = mock(RealmRef.class); + final RealmRef lookedUpBy = mock(RealmRef.class); + when(authentication.getUser()).thenReturn(user); + when(authentication.getAuthenticatedBy()).thenReturn(authenticatedBy); + when(authentication.getLookedUpBy()).thenReturn(lookedUpBy); + when(lookedUpBy.getType()).thenReturn(randomFrom(LdapRealmSettings.LDAP_TYPE, FileRealmSettings.TYPE, + LdapRealmSettings.AD_TYPE, PkiRealmSettings.TYPE, + randomAlphaOfLengthBetween(4, 12))); + + assertThat(request, instanceOf(UserRequest.class)); + assertFalse(AuthorizationService.checkSameUserPermissions(action, request, authentication)); + verify(authentication).getLookedUpBy(); + verify(authentication, times(2)).getUser(); + verify(lookedUpBy).getType(); + verifyNoMoreInteractions(authentication, lookedUpBy, authenticatedBy); + } + + private static Tuple randomCompositeRequest() { + switch (randomIntBetween(0, 7)) { + case 0: + return Tuple.tuple(MultiGetAction.NAME, new MultiGetRequest().add("index", "type", "id")); + case 1: + return Tuple.tuple(MultiSearchAction.NAME, new MultiSearchRequest().add(new SearchRequest())); + case 2: + return Tuple.tuple(MultiTermVectorsAction.NAME, new MultiTermVectorsRequest().add("index", "type", "id")); + case 3: + return Tuple.tuple(BulkAction.NAME, new BulkRequest().add(new DeleteRequest("index", "type", "id"))); + case 4: + return Tuple.tuple("indices:data/read/mpercolate", new MockCompositeIndicesRequest()); + case 5: + return Tuple.tuple("indices:data/read/msearch/template", new MockCompositeIndicesRequest()); + case 6: + return Tuple.tuple("indices:data/read/search/template", new MockCompositeIndicesRequest()); + case 7: + return Tuple.tuple("indices:data/write/reindex", new MockCompositeIndicesRequest()); + default: + throw new UnsupportedOperationException(); + } + } + + private static class MockCompositeIndicesRequest extends TransportRequest implements CompositeIndicesRequest { + } + + public void testDoesNotUseRolesStoreForXPackUser() { + PlainActionFuture rolesFuture = new PlainActionFuture<>(); + authorizationService.roles(XPackUser.INSTANCE, rolesFuture); + final Role roles = rolesFuture.actionGet(); + assertThat(roles, equalTo(XPackUser.ROLE)); + verifyZeroInteractions(rolesStore); + } + + public void testGetRolesForSystemUserThrowsException() { + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> authorizationService.roles(SystemUser.INSTANCE, + null)); + assertEquals("the user [_system] is the system user and we should never try to get its roles", iae.getMessage()); + } + + private static Authentication createAuthentication(User user) { + RealmRef lookedUpBy = user.authenticatedUser() == user ? null : new RealmRef("looked", "up", "by"); + return new Authentication(user, new RealmRef("test", "test", "foo"), lookedUpBy); + } + + private ClusterState mockEmptyMetaData() { + ClusterState state = mock(ClusterState.class); + when(clusterService.state()).thenReturn(state); + when(state.metaData()).thenReturn(MetaData.EMPTY_META_DATA); + return state; + } + + public void testProxyRequestFailsOnNonProxyAction() { + TransportRequest request = TransportRequest.Empty.INSTANCE; + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); + TransportRequest transportRequest = TransportActionProxy.wrapRequest(node, request); + User user = new User("test user", "role"); + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> authorize(createAuthentication(user), "indices:some/action", transportRequest)); + assertThat(illegalStateException.getMessage(), + startsWith("originalRequest is a proxy request for: [org.elasticsearch.transport.TransportRequest$")); + assertThat(illegalStateException.getMessage(), endsWith("] but action: [indices:some/action] isn't")); + } + + public void testProxyRequestFailsOnNonProxyRequest() { + TransportRequest request = TransportRequest.Empty.INSTANCE; + User user = new User("test user", "role"); + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> authorize(createAuthentication(user), TransportActionProxy.getProxyAction("indices:some/action"), request)); + assertThat(illegalStateException.getMessage(), + startsWith("originalRequest is not a proxy request: [org.elasticsearch.transport.TransportRequest$")); + assertThat(illegalStateException.getMessage(), + endsWith("] but action: [internal:transport/proxy/indices:some/action] is a proxy action")); + } + + public void testProxyRequestAuthenticationDenied() { + final TransportRequest proxiedRequest = new SearchRequest(); + final DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); + final TransportRequest transportRequest = TransportActionProxy.wrapRequest(node, proxiedRequest); + final String action = TransportActionProxy.getProxyAction(SearchTransportService.QUERY_ACTION_NAME); + final Authentication authentication = createAuthentication(new User("test user", "no_indices")); + final RoleDescriptor role = new RoleDescriptor("no_indices", null, null, null); + roleMap.put("no_indices", role); + assertThrowsAuthorizationException( + () -> authorize(authentication, action, transportRequest), action, "test user"); + verify(auditTrail).accessDenied(authentication, action, proxiedRequest, new String[] { role.getName() }); + verifyNoMoreInteractions(auditTrail); + } + + public void testProxyRequestAuthenticationGrantedWithAllPrivileges() { + RoleDescriptor role = new RoleDescriptor("a_role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("all").build() }, null); + final Authentication authentication = createAuthentication(new User("test user", "a_all")); + roleMap.put("a_all", role); + mockEmptyMetaData(); + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); + + final ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + final TransportRequest transportRequest = TransportActionProxy.wrapRequest(node, clearScrollRequest); + final String action = TransportActionProxy.getProxyAction(SearchTransportService.CLEAR_SCROLL_CONTEXTS_ACTION_NAME); + authorize(authentication, action, transportRequest); + verify(auditTrail).accessGranted(authentication, action, clearScrollRequest, new String[] { role.getName() }); + } + + public void testProxyRequestAuthenticationGranted() { + RoleDescriptor role = new RoleDescriptor("a_role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("read_cross_cluster").build() }, null); + final Authentication authentication = createAuthentication(new User("test user", "a_all")); + roleMap.put("a_all", role); + mockEmptyMetaData(); + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); + + final ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + final TransportRequest transportRequest = TransportActionProxy.wrapRequest(node, clearScrollRequest); + final String action = TransportActionProxy.getProxyAction(SearchTransportService.CLEAR_SCROLL_CONTEXTS_ACTION_NAME); + authorize(authentication, action, transportRequest); + verify(auditTrail).accessGranted(authentication, action, clearScrollRequest, new String[] { role.getName() }); + } + + public void testProxyRequestAuthenticationDeniedWithReadPrivileges() { + final Authentication authentication = createAuthentication(new User("test user", "a_all")); + final RoleDescriptor role = new RoleDescriptor("a_role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a").privileges("read").build() }, null); + roleMap.put("a_all", role); + mockEmptyMetaData(); + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + TransportRequest transportRequest = TransportActionProxy.wrapRequest(node, clearScrollRequest); + String action = TransportActionProxy.getProxyAction(SearchTransportService.CLEAR_SCROLL_CONTEXTS_ACTION_NAME); + assertThrowsAuthorizationException( + () -> authorize(authentication, action, transportRequest), action, "test user"); + verify(auditTrail).accessDenied(authentication, action, clearScrollRequest, new String[] { role.getName() }); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java new file mode 100644 index 0000000000000..9c9f2b1b1a42a --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.junit.Before; + +import java.util.concurrent.CountDownLatch; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.is; + +/** + * Unit tests for the AuthorizationUtils class + */ +public class AuthorizationUtilsTests extends ESTestCase { + + private ThreadContext threadContext; + + @Before + public void setupContext() { + threadContext = new ThreadContext(Settings.EMPTY); + } + + public void testSystemUserSwitchNonInternalAction() { + assertThat(AuthorizationUtils.shouldReplaceUserWithSystem(threadContext, randomFrom("indices:foo", "cluster:bar")), is(false)); + } + + public void testSystemUserSwitchWithSystemUser() { + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, + new Authentication(SystemUser.INSTANCE, new RealmRef("test", "test", "foo"), null)); + assertThat(AuthorizationUtils.shouldReplaceUserWithSystem(threadContext, "internal:something"), is(false)); + } + + public void testSystemUserSwitchWithNullUser() { + assertThat(AuthorizationUtils.shouldReplaceUserWithSystem(threadContext, "internal:something"), is(true)); + } + + public void testSystemUserSwitchWithNonSystemUser() { + User user = new User(randomAlphaOfLength(6), new String[] {}); + Authentication authentication = new Authentication(user, new RealmRef("test", "test", "foo"), null); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + threadContext.putTransient(AuthorizationService.ORIGINATING_ACTION_KEY, randomFrom("indices:foo", "cluster:bar")); + assertThat(AuthorizationUtils.shouldReplaceUserWithSystem(threadContext, "internal:something"), is(true)); + } + + public void testSystemUserSwitchWithNonSystemUserAndInternalAction() { + User user = new User(randomAlphaOfLength(6), new String[] {}); + Authentication authentication = new Authentication(user, new RealmRef("test", "test", "foo"), null); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + threadContext.putTransient(AuthorizationService.ORIGINATING_ACTION_KEY, randomFrom("internal:foo/bar")); + assertThat(AuthorizationUtils.shouldReplaceUserWithSystem(threadContext, "internal:something"), is(false)); + } + + public void testShouldSetUser() { + assertFalse(AuthorizationUtils.shouldSetUserBasedOnActionOrigin(threadContext)); + + // put origin in context + threadContext.putTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME, randomAlphaOfLength(4)); + assertTrue(AuthorizationUtils.shouldSetUserBasedOnActionOrigin(threadContext)); + + // set authentication + User user = new User(randomAlphaOfLength(6), new String[] {}); + Authentication authentication = new Authentication(user, new RealmRef("test", "test", "foo"), null); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + assertFalse(AuthorizationUtils.shouldSetUserBasedOnActionOrigin(threadContext)); + + threadContext = new ThreadContext(Settings.EMPTY); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); + assertFalse(AuthorizationUtils.shouldSetUserBasedOnActionOrigin(threadContext)); + + threadContext = new ThreadContext(Settings.EMPTY); + threadContext.putHeader(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME, randomAlphaOfLength(4)); + assertFalse(AuthorizationUtils.shouldSetUserBasedOnActionOrigin(threadContext)); + } + + public void testSwitchAndExecuteXpackSecurityUser() throws Exception { + SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); + final String headerName = randomAlphaOfLengthBetween(4, 16); + final String headerValue = randomAlphaOfLengthBetween(4, 16); + final CountDownLatch latch = new CountDownLatch(2); + + final ActionListener listener = ActionListener.wrap(v -> { + assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + assertNull(threadContext.getHeader(headerName)); + assertEquals(XPackSecurityUser.INSTANCE, securityContext.getAuthentication().getUser()); + latch.countDown(); + }, e -> fail(e.getMessage())); + + final Consumer consumer = original -> { + assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + assertNull(threadContext.getHeader(headerName)); + assertEquals(XPackSecurityUser.INSTANCE, securityContext.getAuthentication().getUser()); + latch.countDown(); + listener.onResponse(null); + }; + threadContext.putHeader(headerName, headerValue); + threadContext.putTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME, ClientHelper.SECURITY_ORIGIN); + + AuthorizationUtils.switchUserBasedOnActionOriginAndExecute(threadContext, securityContext, consumer); + + latch.await(); + } + + public void testSwitchAndExecuteXpackUser() throws Exception { + SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); + final String headerName = randomAlphaOfLengthBetween(4, 16); + final String headerValue = randomAlphaOfLengthBetween(4, 16); + final CountDownLatch latch = new CountDownLatch(2); + + final ActionListener listener = ActionListener.wrap(v -> { + assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + assertNull(threadContext.getHeader(headerName)); + assertEquals(XPackUser.INSTANCE, securityContext.getAuthentication().getUser()); + latch.countDown(); + }, e -> fail(e.getMessage())); + + final Consumer consumer = original -> { + assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); + assertNull(threadContext.getHeader(headerName)); + assertEquals(XPackUser.INSTANCE, securityContext.getAuthentication().getUser()); + latch.countDown(); + listener.onResponse(null); + }; + threadContext.putHeader(headerName, headerValue); + threadContext.putTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME, + randomFrom(ClientHelper.ML_ORIGIN, ClientHelper.WATCHER_ORIGIN, ClientHelper.DEPRECATION_ORIGIN, + ClientHelper.MONITORING_ORIGIN, ClientHelper.PERSISTENT_TASK_ORIGIN)); + + AuthorizationUtils.switchUserBasedOnActionOriginAndExecute(threadContext, securityContext, consumer); + + latch.await(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java new file mode 100644 index 0000000000000..4bb8af96ca8c8 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.Version; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; + +import java.util.List; + +import static org.hamcrest.Matchers.containsInAnyOrder; + +public class AuthorizedIndicesTests extends ESTestCase { + + public void testAuthorizedIndicesUserWithoutRoles() { + User user = new User("test user"); + AuthorizedIndices authorizedIndices = new AuthorizedIndices(user, Role.EMPTY, "", + MetaData.EMPTY_META_DATA); + List list = authorizedIndices.get(); + assertTrue(list.isEmpty()); + } + + public void testAuthorizedIndicesUserWithSomeRoles() { + User user = new User("test user", "a_star", "b"); + RoleDescriptor aStarRole = new RoleDescriptor("a_star", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("a*").privileges("all").build() }, null); + RoleDescriptor bRole = new RoleDescriptor("b", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("b").privileges("READ").build() }, null); + Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + MetaData metaData = MetaData.builder() + .put(new IndexMetaData.Builder("a1").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder("a2").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder("aaaaaa").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder("bbbbb").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder("b") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .putAlias(new AliasMetaData.Builder("ab").build()) + .putAlias(new AliasMetaData.Builder("ba").build()) + .build(), true) + .build(); + Role roles = CompositeRolesStore.buildRoleFromDescriptors(Sets.newHashSet(aStarRole, bRole), + new FieldPermissionsCache(Settings.EMPTY)); + AuthorizedIndices authorizedIndices = new AuthorizedIndices(user, roles, SearchAction.NAME, metaData); + List list = authorizedIndices.get(); + assertThat(list, containsInAnyOrder("a1", "a2", "aaaaaa", "b", "ab")); + assertFalse(list.contains("bbbbb")); + assertFalse(list.contains("ba")); + } + + public void testAuthorizedIndicesUserWithSomeRolesEmptyMetaData() { + User user = new User("test user", "role"); + Role role = Role.builder("role").add(IndexPrivilege.ALL, "*").build(); + AuthorizedIndices authorizedIndices = new AuthorizedIndices(user, role, SearchAction.NAME, MetaData.EMPTY_META_DATA); + List list = authorizedIndices.get(); + assertTrue(list.isEmpty()); + } + + public void testSecurityIndicesAreRemovedFromRegularUser() { + User user = new User("test user", "user_role"); + Role role = Role.builder("user_role").add(IndexPrivilege.ALL, "*").cluster(ClusterPrivilege.ALL).build(); + Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + MetaData metaData = MetaData.builder() + .put(new IndexMetaData.Builder("an-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder("another-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder(SecurityLifecycleService.SECURITY_INDEX_NAME).settings(indexSettings) + .numberOfShards(1).numberOfReplicas(0).build(), true) + .build(); + + AuthorizedIndices authorizedIndices = new AuthorizedIndices(user, role, SearchAction.NAME, metaData); + List list = authorizedIndices.get(); + assertThat(list, containsInAnyOrder("an-index", "another-index")); + } + + public void testSecurityIndicesAreNotRemovedFromSuperUsers() { + User user = new User("admin", "kibana_user", "superuser"); + Role role = Role.builder("kibana_user+superuser").add(IndexPrivilege.ALL, "*").cluster(ClusterPrivilege.ALL).build(); + Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + MetaData metaData = MetaData.builder() + .put(new IndexMetaData.Builder("an-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder("another-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder(SecurityLifecycleService.SECURITY_INDEX_NAME).settings(indexSettings) + .numberOfShards(1).numberOfReplicas(0).build(), true) + .build(); + + AuthorizedIndices authorizedIndices = new AuthorizedIndices(user, role, SearchAction.NAME, metaData); + List list = authorizedIndices.get(); + assertThat(list, containsInAnyOrder("an-index", "another-index", SecurityLifecycleService.SECURITY_INDEX_NAME)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndexAliasesTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndexAliasesTests.java new file mode 100644 index 0000000000000..d273d61959e2a --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndexAliasesTests.java @@ -0,0 +1,547 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.junit.Before; + +import java.util.Collections; +import java.util.Map; + +import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationException; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; + +public class IndexAliasesTests extends SecurityIntegTestCase { + + protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(new SecureString("test123".toCharArray()))); + + @Override + protected String configUsers() { + return super.configUsers() + + "create_only:" + USERS_PASSWD_HASHED + "\n" + + "create_test_aliases_test:" + USERS_PASSWD_HASHED + "\n" + + "create_test_aliases_alias:" + USERS_PASSWD_HASHED + "\n" + + "create_test_aliases_test_alias:" + USERS_PASSWD_HASHED + "\n" + + "aliases_only:" + USERS_PASSWD_HASHED + "\n"; + } + + @Override + protected String configUsersRoles() { + return super.configUsersRoles() + + "create_only:create_only\n" + + "create_test_aliases_test:create_test_aliases_test\n" + + "create_test_aliases_alias:create_test_aliases_alias\n" + + "create_test_aliases_test_alias:create_test_aliases_test_alias\n" + + "aliases_only:aliases_only\n"; + } + + @Override + protected String configRoles() { + return super.configRoles() + "\n" + + //role that has create index only privileges + "create_only:\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ create_index ]\n" + + //role that has create index and manage_aliases on test_*, not enough to manage_aliases aliases outside of test_* namespace + "create_test_aliases_test:\n" + + " indices:\n" + + " - names: 'test_*'\n" + + " privileges: [ create_index, 'indices:admin/aliases*' ]\n" + + //role that has create index on test_* and manage_aliases on alias_*, can't create aliases pointing to test_* though + "create_test_aliases_alias:\n" + + " indices:\n" + + " - names: 'test_*'\n" + + " privileges: [ create_index ]\n" + + " - names: 'alias_*'\n" + + " privileges: [ 'indices:admin/aliases*' ]\n" + + //role that has create index on test_* and manage_aliases on both alias_* and test_* + "create_test_aliases_test_alias:\n" + + " indices:\n" + + " - names: 'test_*'\n" + + " privileges: [ create_index ]\n" + + " - names: [ 'alias_*', 'test_*' ]\n" + + " privileges: [ 'indices:admin/aliases*' ]\n" + + //role that has manage_aliases only on both test_* and alias_* + "aliases_only:\n" + + " indices:\n" + + " - names: [ 'alias_*', 'test_*']\n" + + " privileges: [ 'indices:admin/aliases*' ]\n"; + } + + @Before + public void createBogusIndex() { + if (randomBoolean()) { + //randomly create an index with two aliases from user admin, to make sure it doesn't affect any of the test results + assertAcked(client().admin().indices().prepareCreate("index1").addAlias(new Alias("alias1")).addAlias(new Alias("alias2"))); + } + } + + public void testCreateIndexThenAliasesCreateOnlyPermission() { + //user has create permission only: allows to create indices, manage_aliases is required to add/remove aliases + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("create_only", + new SecureString("test123".toCharArray()))); + assertAcked(client().filterWithHeader(headers).admin().indices().prepareCreate("test_1").get()); + + assertThrowsAuthorizationException( + client().filterWithHeader(headers).admin().indices().prepareAliases().addAlias("test_1", "test_alias")::get, + IndicesAliasesAction.NAME, "create_only"); + + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareAliases() + .addAlias("test_*", "test_alias")::get, IndicesAliasesAction.NAME, "create_only"); + } + + public void testCreateIndexAndAliasesCreateOnlyPermission() { + //user has create permission only: allows to create indices, manage_aliases is required to add aliases although they are part of + // the same create index request + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("create_only", + new SecureString("test123".toCharArray()))); + + assertThrowsAuthorizationException( + client().filterWithHeader(headers).admin().indices().prepareCreate("test_1").addAlias(new Alias("test_2"))::get, + IndicesAliasesAction.NAME, "create_only"); + } + + public void testDeleteAliasesCreateOnlyPermission() { + //user has create permission only: allows to create indices, manage_aliases is required to add/remove aliases + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("create_only", + new SecureString("test123".toCharArray()))); + + assertThrowsAuthorizationException( + client().filterWithHeader(headers).admin().indices().prepareAliases().removeAlias("test_1", "alias_1")::get, + IndicesAliasesAction.NAME, "create_only"); + + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareAliases() + .removeAlias("test_1", "alias_*")::get, IndicesAliasesAction.NAME, "create_only"); + + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareAliases() + .removeAlias("test_1", "_all")::get, IndicesAliasesAction.NAME, "create_only"); + } + + public void testGetAliasesCreateOnlyPermissionStrict() { + //user has create permission only: allows to create indices, manage_aliases is required to retrieve aliases though + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("create_only", + new SecureString("test123".toCharArray()))); + + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareGetAliases("test_1") + .setIndices("test_1").setIndicesOptions(IndicesOptions.strictExpand())::get, GetAliasesAction.NAME, "create_only"); + + assertThrowsAuthorizationException(client().filterWithHeader(headers) + .admin().indices().prepareGetAliases("_all") + .setIndices("test_1").setIndicesOptions(IndicesOptions.strictExpand())::get, GetAliasesAction.NAME, "create_only"); + + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices() + .prepareGetAliases().setIndices("test_1").setIndicesOptions(IndicesOptions.strictExpand())::get, + GetAliasesAction.NAME, "create_only"); + + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareGetAliases("test_alias") + .setIndices("test_*").setIndicesOptions(IndicesOptions.strictExpand())::get, GetAliasesAction.NAME, "create_only"); + + //this throws exception no matter what the indices options are because the aliases part cannot be resolved to any alias + //and there is no way to "allow_no_aliases" like we can do with indices. + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareGetAliases()::get, + GetAliasesAction.NAME, "create_only"); + } + + public void testGetAliasesCreateOnlyPermissionIgnoreUnavailable() { + //user has create permission only: allows to create indices, manage_aliases is required to retrieve aliases though + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("create_only", + new SecureString("test123".toCharArray()))); + + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareGetAliases("test_1") + .setIndices("test_1").setIndicesOptions(IndicesOptions.lenientExpandOpen())::get, GetAliasesAction.NAME, "create_only"); + + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareGetAliases("_all") + .setIndices("test_1").setIndicesOptions(IndicesOptions.lenientExpandOpen())::get, GetAliasesAction.NAME, "create_only"); + + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareGetAliases().setIndices("test_1") + .setIndicesOptions(IndicesOptions.lenientExpandOpen())::get, GetAliasesAction.NAME, "create_only"); + + assertThrowsAuthorizationException( + client().filterWithHeader(headers).admin().indices().prepareGetAliases("test_alias") + .setIndices("test_*").setIndicesOptions(IndicesOptions.lenientExpandOpen())::get, GetAliasesAction.NAME, "create_only"); + + //this throws exception no matter what the indices options are because the aliases part cannot be resolved to any alias + //and there is no way to "allow_no_aliases" like we can do with indices. + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices() + .prepareGetAliases().setIndicesOptions(IndicesOptions.lenientExpandOpen())::get, GetAliasesAction.NAME, "create_only"); + } + + public void testCreateIndexThenAliasesCreateAndAliasesPermission() { + //user has create and manage_aliases permission on test_*. manage_aliases is required to add/remove aliases on both aliases and + // indices + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("create_test_aliases_test", new SecureString("test123".toCharArray()))); + + assertAcked(client().filterWithHeader(headers).admin().indices().prepareCreate("test_1").get()); + + //ok: user has manage_aliases on test_* + assertAcked(client().filterWithHeader(headers).admin().indices().prepareAliases().addAlias("test_1", "test_alias").get()); + + //ok: user has manage_aliases on test_* + assertAcked(client().filterWithHeader(headers).admin().indices().prepareAliases().addAlias("test_*", "test_alias_2").get()); + + //fails: user doesn't have manage_aliases on alias_1 + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareAliases() + .addAlias("test_1", "alias_1").addAlias("test_1", "test_alias")::get, + IndicesAliasesAction.NAME, "create_test_aliases_test"); + } + + public void testCreateIndexAndAliasesCreateAndAliasesPermission() { + //user has create and manage_aliases permission on test_*. manage_aliases is required to add/remove aliases on both aliases and + // indices + //ok: user has manage_aliases on test_* + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("create_test_aliases_test", + new SecureString("test123".toCharArray()))); + assertAcked(client().filterWithHeader(headers).admin().indices().prepareCreate("test_1").addAlias(new Alias("test_alias")).get()); + + //fails: user doesn't have manage_aliases on alias_1 + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareCreate("test_2") + .addAlias(new Alias("test_alias")).addAlias(new Alias("alias_2"))::get, + IndicesAliasesAction.NAME, "create_test_aliases_test"); + } + + public void testDeleteAliasesCreateAndAliasesPermission() { + //user has create and manage_aliases permission on test_*. manage_aliases is required to add/remove aliases on both aliases and + // indices + //ok: user has manage_aliases on test_* + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("create_test_aliases_test", + new SecureString("test123".toCharArray()))); + + assertAcked(client().filterWithHeader(headers).admin().indices().prepareCreate("test_1").addAlias(new Alias("test_alias_1")) + .addAlias(new Alias("test_alias_2")) + .addAlias(new Alias("test_alias_3")).addAlias(new Alias("test_alias_4")).get()); + //ok: user has manage_aliases on test_* + assertAcked(client().filterWithHeader(headers).admin().indices().prepareAliases().removeAlias("test_1", "test_alias_1").get()); + //ok: user has manage_aliases on test_* + assertAcked(client().filterWithHeader(headers).admin().indices().prepareAliases().removeAlias("test_*", "test_alias_2").get()); + //ok: user has manage_aliases on test_* + assertAcked(client().filterWithHeader(headers).admin().indices().prepareAliases().removeAlias("test_1", "test_alias_*").get()); + + //fails: all aliases have been deleted, no existing aliases match test_alias_* + IndexNotFoundException indexNotFoundException = expectThrows(IndexNotFoundException.class, + client().filterWithHeader(headers).admin().indices().prepareAliases().removeAlias("test_1", "test_alias_*")::get); + assertThat(indexNotFoundException.toString(), containsString("[test_alias_*]")); + + //fails: all aliases have been deleted, no existing aliases match _all + indexNotFoundException = expectThrows(IndexNotFoundException.class, + client().filterWithHeader(headers).admin().indices().prepareAliases().removeAlias("test_1", "_all")::get); + assertThat(indexNotFoundException.toString(), containsString("[_all]")); + + //fails: user doesn't have manage_aliases on alias_1 + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareAliases() + .removeAlias("test_1", "alias_1")::get, IndicesAliasesAction.NAME, "create_test_aliases_test"); + + //fails: user doesn't have manage_aliases on alias_1 + assertThrowsAuthorizationException(client().filterWithHeader(headers).admin().indices().prepareAliases() + .removeAlias("test_1", new String[]{"_all", "alias_1"})::get, IndicesAliasesAction.NAME, "create_test_aliases_test"); + } + + public void testGetAliasesCreateAndAliasesPermission() { + //user has create and manage_aliases permission on test_*. manage_aliases is required to retrieve aliases on both aliases and + // indices + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("create_test_aliases_test", new SecureString("test123".toCharArray()))); + final Client client = client().filterWithHeader(headers); + assertAcked(client.admin().indices().prepareCreate("test_1").addAlias(new Alias("test_alias")).get()); + + //ok: user has manage_aliases on test_* + assertAliases(client.admin().indices().prepareGetAliases().setAliases("test_alias").setIndices("test_1"), + "test_1", "test_alias"); + + //ok: user has manage_aliases on test_*, test_* gets resolved to test_1 + assertAliases(client.admin().indices().prepareGetAliases().setAliases("test_alias").setIndices("test_*"), + "test_1", "test_alias"); + + //ok: user has manage_aliases on test_*, empty indices gets resolved to _all indices (thus test_1) + assertAliases(client.admin().indices().prepareGetAliases().setAliases("test_alias"), + "test_1", "test_alias"); + + //ok: user has manage_aliases on test_*, _all aliases gets resolved to test_alias and empty indices gets resolved to _all + // indices (thus test_1) + assertAliases(client.admin().indices().prepareGetAliases().setAliases("_all").setIndices("test_1"), + "test_1", "test_alias"); + + //ok: user has manage_aliases on test_*, empty aliases gets resolved to test_alias and empty indices gets resolved to _all + // indices (thus test_1) + assertAliases(client.admin().indices().prepareGetAliases().setIndices("test_1"), + "test_1", "test_alias"); + + //ok: user has manage_aliases on test_*, test_* aliases gets resolved to test_alias and empty indices gets resolved to _all + // indices (thus test_1) + assertAliases(client.admin().indices().prepareGetAliases().setAliases("test_*").setIndices("test_1"), + "test_1", "test_alias"); + + //ok: user has manage_aliases on test_*, _all aliases gets resolved to test_alias and _all indices becomes test_1 + assertAliases(client.admin().indices().prepareGetAliases().setAliases("_all").setIndices("_all"), + "test_1", "test_alias"); + + //ok: user has manage_aliases on test_*, empty aliases gets resolved to test_alias and empty indices becomes test_1 + assertAliases(client.admin().indices().prepareGetAliases(), + "test_1", "test_alias"); + + //fails: user has manage_aliases on test_*, although _all aliases and empty indices can be resolved, the explicit non + // authorized alias (alias_1) causes the request to fail + assertThrowsAuthorizationException(client.admin().indices().prepareGetAliases().setAliases("_all", "alias_1")::get, + GetAliasesAction.NAME, "create_test_aliases_test"); + + //fails: user doesn't have manage_aliases on alias_1 + assertThrowsAuthorizationException(client.admin().indices().prepareGetAliases().setAliases("alias_1")::get, + GetAliasesAction.NAME, "create_test_aliases_test"); + } + + public void testCreateIndexThenAliasesCreateAndAliasesPermission2() { + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("create_test_aliases_alias", new SecureString("test123".toCharArray()))); + final Client client = client().filterWithHeader(headers); + + //user has create permission on test_* and manage_aliases permission on alias_*. manage_aliases is required to add/remove aliases + // on both aliases and indices + assertAcked(client.admin().indices().prepareCreate("test_1")); + + //fails: user doesn't have manage_aliases aliases on test_1 + assertThrowsAuthorizationException(client.admin().indices().prepareAliases().addAlias("test_1", "test_alias")::get, + IndicesAliasesAction.NAME, "create_test_aliases_alias"); + + //fails: user doesn't have manage_aliases aliases on test_1 + assertThrowsAuthorizationException(client.admin().indices().prepareAliases().addAlias("test_1", "alias_1")::get, + IndicesAliasesAction.NAME, "create_test_aliases_alias"); + + //fails: user doesn't have manage_aliases aliases on test_*, no matching indices to replace wildcards + IndexNotFoundException indexNotFoundException = expectThrows(IndexNotFoundException.class, + client.admin().indices().prepareAliases().addAlias("test_*", "alias_1")::get); + assertThat(indexNotFoundException.toString(), containsString("[test_*]")); + } + + public void testCreateIndexAndAliasesCreateAndAliasesPermission2() { + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("create_test_aliases_alias", new + SecureString("test123".toCharArray()))); + final Client client = client().filterWithHeader(headers); + + //user has create permission on test_* and manage_aliases permission on alias_*. manage_aliases is required to add/remove aliases + // on both aliases and indices + + //fails: user doesn't have manage_aliases on test_1, create index is rejected as a whole + assertThrowsAuthorizationException(client.admin().indices().prepareCreate("test_1").addAlias(new Alias("test_alias"))::get, + IndicesAliasesAction.NAME, "create_test_aliases_alias"); + } + + public void testDeleteAliasesCreateAndAliasesPermission2() { + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("create_test_aliases_alias", new SecureString("test123".toCharArray()))); + final Client client = client().filterWithHeader(headers); + + //user has create permission on test_* and manage_aliases permission on alias_*. manage_aliases is required to add/remove aliases + // on both aliases and indices + + //fails: user doesn't have manage_aliases on test_1 + assertThrowsAuthorizationException(client.admin().indices().prepareAliases().removeAlias("test_1", "test_alias")::get, + IndicesAliasesAction.NAME, "create_test_aliases_alias"); + + //fails: user doesn't have manage_aliases on test_*, wildcards can't get replaced + IndexNotFoundException indexNotFoundException = expectThrows(IndexNotFoundException.class, + client.admin().indices().prepareAliases().removeAlias("test_*", "alias_1")::get); + } + + public void testGetAliasesCreateAndAliasesPermission2() { + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("create_test_aliases_alias", new SecureString("test123".toCharArray()))); + final Client client = client().filterWithHeader(headers); + + //user has create permission on test_* and manage_aliases permission on alias_*. manage_aliases is required to retrieve aliases + // on both aliases and indices + assertAcked(client.admin().indices().prepareCreate("test_1")); + + //fails: user doesn't have manage_aliases aliases on test_1, nor test_alias + assertThrowsAuthorizationException(client.admin().indices().prepareGetAliases().setAliases("test_alias").setIndices("test_1")::get, + GetAliasesAction.NAME, "create_test_aliases_alias"); + + //user doesn't have manage_aliases aliases on test_*, no matching indices to replace wildcards + GetAliasesResponse getAliasesResponse = client.admin().indices().prepareGetAliases() + .setIndices("test_*").setAliases("test_alias").get(); + assertEquals(0, getAliasesResponse.getAliases().size()); + + //no existing indices to replace empty indices (thus _all) + getAliasesResponse = client.admin().indices().prepareGetAliases().setAliases("test_alias").get(); + assertEquals(0, getAliasesResponse.getAliases().size()); + + //fails: no existing aliases to replace wildcards + IndexNotFoundException indexNotFoundException = expectThrows(IndexNotFoundException.class, + client.admin().indices().prepareGetAliases().setIndices("test_1").setAliases("test_*")::get); + assertThat(indexNotFoundException.toString(), containsString("[test_*]")); + + //fails: no existing aliases to replace _all + indexNotFoundException = expectThrows(IndexNotFoundException.class, + client.admin().indices().prepareGetAliases().setIndices("test_1").setAliases("_all")::get); + assertThat(indexNotFoundException.toString(), containsString("[_all]")); + + //fails: no existing aliases to replace empty aliases + indexNotFoundException = expectThrows(IndexNotFoundException.class, + client.admin().indices().prepareGetAliases().setIndices("test_1")::get); + assertThat(indexNotFoundException.toString(), containsString("[_all]")); + + //fails: no existing aliases to replace empty aliases + indexNotFoundException = expectThrows(IndexNotFoundException.class, client.admin().indices().prepareGetAliases()::get); + assertThat(indexNotFoundException.toString(), containsString("[_all]")); + } + + public void testCreateIndexThenAliasesCreateAndAliasesPermission3() { + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("create_test_aliases_test_alias", new SecureString("test123".toCharArray()))); + final Client client = client().filterWithHeader(headers); + + //user has create permission on test_* and manage_aliases permission on test_*,alias_*. All good. + assertAcked(client.admin().indices().prepareCreate("test_1")); + + assertAcked(client.admin().indices().prepareAliases().addAlias("test_1", "test_alias")); + + assertAcked(client.admin().indices().prepareAliases().addAlias("test_1", "alias_1")); + + assertAcked(client.admin().indices().prepareAliases().addAlias("test_*", "alias_2")); + } + + public void testCreateIndexAndAliasesCreateAndAliasesPermission3() { + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("create_test_aliases_test_alias", new SecureString("test123".toCharArray()))); + final Client client = client().filterWithHeader(headers); + + //user has create permission on test_* and manage_aliases permission on test_*,alias_*. All good. + assertAcked(client.admin().indices().prepareCreate("test_1").addAlias(new Alias("test_alias"))); + + assertAcked(client.admin().indices().prepareCreate("test_2").addAlias(new Alias("test_alias_2")).addAlias(new Alias("alias_2"))); + } + + public void testDeleteAliasesCreateAndAliasesPermission3() { + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("create_test_aliases_test_alias", new SecureString("test123".toCharArray()))); + final Client client = client().filterWithHeader(headers); + + //user has create permission on test_* and manage_aliases permission on test_*,alias_*. All good. + assertAcked(client.admin().indices().prepareCreate("test_1").addAlias(new Alias("test_alias")).addAlias(new Alias("alias_1")) + .addAlias(new Alias("alias_2")).addAlias(new Alias("alias_3"))); + + //fails: user doesn't have manage_aliases privilege on non_authorized + assertThrowsAuthorizationException(client.admin().indices().prepareAliases().removeAlias("test_1", "non_authorized") + .removeAlias("test_1", "test_alias")::get, IndicesAliasesAction.NAME, "create_test_aliases_test_alias"); + + assertAcked(client.admin().indices().prepareAliases().removeAlias("test_1", "alias_1")); + + assertAcked(client.admin().indices().prepareAliases().removeAlias("test_*", "_all")); + + //fails: all aliases have been deleted, _all can't be resolved to any existing authorized aliases + IndexNotFoundException indexNotFoundException = expectThrows(IndexNotFoundException.class, + client.admin().indices().prepareAliases().removeAlias("test_1", "_all")::get); + assertThat(indexNotFoundException.toString(), containsString("[_all]")); + } + + public void testGetAliasesCreateAndAliasesPermission3() { + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("create_test_aliases_test_alias", new SecureString("test123".toCharArray()))); + final Client client = client().filterWithHeader(headers); + + //user has create permission on test_* and manage_aliases permission on test_*,alias_*. All good. + assertAcked(client.admin().indices().prepareCreate("test_1").addAlias(new Alias("test_alias")).addAlias(new Alias("alias_1"))); + + assertAliases(client.admin().indices().prepareGetAliases().setAliases("test_alias").setIndices("test_1"), + "test_1", "test_alias"); + + assertAliases(client.admin().indices().prepareGetAliases().setAliases("alias_1").setIndices("test_1"), + "test_1", "alias_1"); + + assertAliases(client.admin().indices().prepareGetAliases().setAliases("alias_1").setIndices("test_*"), + "test_1", "alias_1"); + + assertAliases(client.admin().indices().prepareGetAliases().setAliases("test_*").setIndices("test_1"), + "test_1", "test_alias"); + + assertAliases(client.admin().indices().prepareGetAliases().setAliases("_all").setIndices("test_1"), + "test_1", "alias_1", "test_alias"); + + assertAliases(client.admin().indices().prepareGetAliases().setAliases("_all"), + "test_1", "alias_1", "test_alias"); + + assertAliases(client.admin().indices().prepareGetAliases().setIndices("test_1"), + "test_1", "alias_1", "test_alias"); + + assertAliases(client.admin().indices().prepareGetAliases(), "test_1", "alias_1", "test_alias"); + + assertAliases(client.admin().indices().prepareGetAliases().setAliases("alias_*").setIndices("test_*"), + "test_1", "alias_1"); + } + + public void testCreateIndexAliasesOnlyPermission() { + assertThrowsAuthorizationException(client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("aliases_only", new SecureString("test123".toCharArray())))) + .admin().indices().prepareCreate("test_1")::get, CreateIndexAction.NAME, "aliases_only"); + } + + public void testGetAliasesAliasesOnlyPermissionStrict() { + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("aliases_only", new SecureString("test123".toCharArray()))); + final Client client = client().filterWithHeader(headers); + //user has manage_aliases only permissions on both alias_* and test_* + + //security plugin lets it through, but es core intercepts it due to strict indices options and throws index not found + IndexNotFoundException indexNotFoundException = expectThrows(IndexNotFoundException.class, client.admin().indices() + .prepareGetAliases("alias_1").addIndices("test_1").setIndicesOptions(IndicesOptions.strictExpandOpen())::get); + assertEquals("no such index", indexNotFoundException.getMessage()); + + //fails: no manage_aliases privilege on non_authorized alias + assertThrowsAuthorizationException(client.admin().indices().prepareGetAliases("non_authorized").addIndices("test_1") + .setIndicesOptions(IndicesOptions.strictExpandOpen())::get, GetAliasesAction.NAME, "aliases_only"); + + //fails: no manage_aliases privilege on non_authorized index + assertThrowsAuthorizationException(client.admin().indices().prepareGetAliases("alias_1").addIndices("non_authorized") + .setIndicesOptions(IndicesOptions.strictExpandOpen())::get, GetAliasesAction.NAME, "aliases_only"); + } + + public void testGetAliasesAliasesOnlyPermissionIgnoreUnavailable() { + Map headers = Collections.singletonMap(BASIC_AUTH_HEADER, + basicAuthHeaderValue("aliases_only", new SecureString("test123".toCharArray()))); + final Client client = client().filterWithHeader(headers); + //user has manage_aliases only permissions on both alias_* and test_* + + //ok: manage_aliases on both test_* and alias_* + GetAliasesResponse getAliasesResponse = client.admin().indices().prepareGetAliases("alias_1") + .addIndices("test_1").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get(); + assertEquals(0, getAliasesResponse.getAliases().size()); + + //no manage_aliases privilege on non_authorized alias + getAliasesResponse = client.admin().indices().prepareGetAliases("non_authorized").addIndices("test_1") + .setIndicesOptions(IndicesOptions.lenientExpandOpen()).get(); + assertEquals(0, getAliasesResponse.getAliases().size()); + + //no manage_aliases privilege on non_authorized index + getAliasesResponse = client.admin().indices().prepareGetAliases("alias_1").addIndices("non_authorized") + .setIndicesOptions(IndicesOptions.lenientExpandOpen()).get(); + assertEquals(0, getAliasesResponse.getAliases().size()); + } + + private static void assertAliases(GetAliasesRequestBuilder getAliasesRequestBuilder, String index, String... aliases) { + GetAliasesResponse getAliasesResponse = getAliasesRequestBuilder.get(); + assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); + assertThat(getAliasesResponse.getAliases().get(index).size(), equalTo(aliases.length)); + for (int i = 0; i < aliases.length; i++) { + assertThat(getAliasesResponse.getAliases().get(index).get(i).alias(), equalTo(aliases[i])); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java new file mode 100644 index 0000000000000..17d8c754e1642 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -0,0 +1,1351 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexAction; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsAction; +import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.search.MultiSearchAction; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.search.internal.ShardSearchTransportRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; +import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest; +import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; +import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.elasticsearch.xpack.security.authz.IndicesAndAliasesResolver.ResolvedIndices; +import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; +import org.elasticsearch.xpack.security.test.SecurityTestUtils; +import org.junit.Before; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.emptyIterable; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.not; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IndicesAndAliasesResolverTests extends ESTestCase { + + private User user; + private User userDashIndices; + private User userNoIndices; + private CompositeRolesStore rolesStore; + private MetaData metaData; + private AuthorizationService authzService; + private IndicesAndAliasesResolver defaultIndicesResolver; + private IndexNameExpressionResolver indexNameExpressionResolver; + private Map roleMap; + + @Before + public void setup() { + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 2)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 1)) + .put("search.remote.remote.seeds", "127.0.0.1:" + randomIntBetween(9301, 9350)) + .put("search.remote.other_remote.seeds", "127.0.0.1:" + randomIntBetween(9351, 9399)) + .build(); + + indexNameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY); + + final boolean withAlias = randomBoolean(); + final String securityIndexName = SECURITY_INDEX_NAME + (withAlias ? "-" + randomAlphaOfLength(5) : ""); + MetaData metaData = MetaData.builder() + .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar")).settings(settings)) + .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar")).settings(settings)) + .put(indexBuilder("closed").state(IndexMetaData.State.CLOSE) + .putAlias(AliasMetaData.builder("foofoobar")).settings(settings)) + .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE).settings(settings)) + .put(indexBuilder("foobar-closed").state(IndexMetaData.State.CLOSE).settings(settings)) + .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")).settings(settings)) + .put(indexBuilder("bar").settings(settings)) + .put(indexBuilder("bar-closed").state(IndexMetaData.State.CLOSE).settings(settings)) + .put(indexBuilder("bar2").settings(settings)) + .put(indexBuilder(indexNameExpressionResolver.resolveDateMathExpression("")).settings(settings)) + .put(indexBuilder("-index10").settings(settings)) + .put(indexBuilder("-index11").settings(settings)) + .put(indexBuilder("-index20").settings(settings)) + .put(indexBuilder("-index21").settings(settings)) + .put(indexBuilder(securityIndexName).settings(settings)).build(); + + if (withAlias) { + metaData = SecurityTestUtils.addAliasToMetaData(metaData, securityIndexName); + } + this.metaData = metaData; + + user = new User("user", "role"); + userDashIndices = new User("dash", "dash"); + userNoIndices = new User("test", "test"); + rolesStore = mock(CompositeRolesStore.class); + String[] authorizedIndices = new String[] { "bar", "bar-closed", "foofoobar", "foofoo", "missing", "foofoo-closed"}; + String[] dashIndices = new String[]{"-index10", "-index11", "-index20", "-index21"}; + roleMap = new HashMap<>(); + roleMap.put("role", new RoleDescriptor("role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices(authorizedIndices).privileges("all").build() }, null)); + roleMap.put("dash", new RoleDescriptor("dash", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices(dashIndices).privileges("all").build() }, null)); + roleMap.put("test", new RoleDescriptor("role", new String[] { "monitor" }, null, null)); + roleMap.put(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName(), ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR); + final FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[2]; + Set names = (Set) i.getArguments()[0]; + assertNotNull(names); + Set roleDescriptors = new HashSet<>(); + for (String name : names) { + RoleDescriptor descriptor = roleMap.get(name); + if (descriptor != null) { + roleDescriptors.add(descriptor); + } + } + + if (roleDescriptors.isEmpty()) { + callback.onResponse(Role.EMPTY); + } else { + callback.onResponse( + CompositeRolesStore.buildRoleFromDescriptors(roleDescriptors, fieldPermissionsCache)); + } + return Void.TYPE; + }).when(rolesStore).roles(any(Set.class), any(FieldPermissionsCache.class), any(ActionListener.class)); + + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); + authzService = new AuthorizationService(settings, rolesStore, clusterService, + mock(AuditTrailService.class), new DefaultAuthenticationFailureHandler(), mock(ThreadPool.class), + new AnonymousUser(settings)); + defaultIndicesResolver = new IndicesAndAliasesResolver(settings, clusterService); + } + + public void testDashIndicesAreAllowedInShardLevelRequests() { + //indices with names starting with '-' or '+' can be created up to version 2.x and can be around in 5.x + //aliases with names starting with '-' or '+' can be created up to version 5.x and can be around in 6.x + ShardSearchTransportRequest request = mock(ShardSearchTransportRequest.class); + when(request.indices()).thenReturn(new String[]{"-index10", "-index20", "+index30"}); + List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)) + .getLocal(); + String[] expectedIndices = new String[]{"-index10", "-index20", "+index30"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + } + + public void testWildcardsAreNotAllowedInShardLevelRequests() { + ShardSearchTransportRequest request = mock(ShardSearchTransportRequest.class); + when(request.indices()).thenReturn(new String[]{"index*"}); + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)) + .getLocal()); + assertEquals("There are no external requests known to support wildcards that don't support replacing their indices", + illegalStateException.getMessage()); + } + + public void testAllIsNotAllowedInShardLevelRequests() { + ShardSearchTransportRequest request = mock(ShardSearchTransportRequest.class); + if (randomBoolean()) { + when(request.indices()).thenReturn(new String[]{"_all"}); + } else { + if (randomBoolean()) { + when(request.indices()).thenReturn(Strings.EMPTY_ARRAY); + } else { + when(request.indices()).thenReturn(null); + } + } + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)) + .getLocal()); + assertEquals("There are no external requests known to support wildcards that don't support replacing their indices", + illegalStateException.getMessage()); + } + + public void testExplicitDashIndices() { + SearchRequest request = new SearchRequest("-index10", "-index20"); + List indices = + resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)).getLocal(); + String[] expectedIndices = new String[]{"-index10", "-index20"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(request.indices().length, equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(expectedIndices)); + } + + public void testWildcardDashIndices() { + SearchRequest request; + if (randomBoolean()) { + request = new SearchRequest("-index*", "--index20"); + } else { + request = new SearchRequest("*", "--index20"); + } + List indices = + resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)).getLocal(); + String[] expectedIndices = new String[]{"-index10", "-index11", "-index21"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(request.indices().length, equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(expectedIndices)); + } + + public void testExplicitMixedWildcardDashIndices() { + SearchRequest request = new SearchRequest("-index21", "-does_not_exist", "-index1*", "--index11"); + List indices = + resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)).getLocal(); + String[] expectedIndices = new String[]{"-index10", "-index21", "-does_not_exist"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(request.indices().length, equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(expectedIndices)); + } + + public void testDashIndicesNoExpandWildcard() { + SearchRequest request = new SearchRequest("-index1*", "--index11"); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), false, false)); + List indices = + resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)).getLocal(); + String[] expectedIndices = new String[]{"-index1*", "--index11"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(request.indices().length, equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(expectedIndices)); + } + + public void testDashIndicesMinus() { + SearchRequest request = new SearchRequest("-index10", "-index11", "--index11", "-index20"); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); + List indices = + resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)).getLocal(); + String[] expectedIndices = new String[]{"-index10", "-index11", "--index11", "-index20"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(request.indices().length, equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(expectedIndices)); + } + + public void testDashIndicesPlus() { + SearchRequest request = new SearchRequest("+bar"); + request.indicesOptions(IndicesOptions.fromOptions(true, false, randomBoolean(), randomBoolean())); + expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME))); + } + + public void testDashNotExistingIndex() { + SearchRequest request = new SearchRequest("-does_not_exist"); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); + List indices = resolveIndices(request, buildAuthorizedIndices(userDashIndices, SearchAction.NAME)).getLocal(); + String[] expectedIndices = new String[]{"-does_not_exist"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(request.indices().length, equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(expectedIndices)); + } + + public void testResolveEmptyIndicesExpandWilcardsOpenAndClosed() { + SearchRequest request = new SearchRequest(); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, true)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"bar", "bar-closed", "foofoobar", "foofoo", "foofoo-closed"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveEmptyIndicesExpandWilcardsOpen() { + SearchRequest request = new SearchRequest(); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"bar", "foofoobar", "foofoo"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveAllExpandWilcardsOpenAndClosed() { + SearchRequest request = new SearchRequest("_all"); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, true)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"bar", "bar-closed", "foofoobar", "foofoo", "foofoo-closed"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveAllExpandWilcardsOpen() { + SearchRequest request = new SearchRequest("_all"); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"bar", "foofoobar", "foofoo"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveWildcardsStrictExpand() { + SearchRequest request = new SearchRequest("barbaz", "foofoo*"); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), true, true)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"barbaz", "foofoobar", "foofoo", "foofoo-closed"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveWildcardsExpandOpenAndClosedIgnoreUnavailable() { + SearchRequest request = new SearchRequest("barbaz", "foofoo*"); + request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), true, true)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"foofoobar", "foofoo", "foofoo-closed"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveWildcardsStrictExpandOpen() { + SearchRequest request = new SearchRequest("barbaz", "foofoo*"); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), true, false)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"barbaz", "foofoobar", "foofoo"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveWildcardsLenientExpandOpen() { + SearchRequest request = new SearchRequest("barbaz", "foofoo*"); + request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), true, false)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"foofoobar", "foofoo"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveWildcardsMinusExpandWilcardsOpen() { + SearchRequest request = new SearchRequest("*", "-foofoo*"); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"bar"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveWildcardsMinusExpandWilcardsOpenAndClosed() { + SearchRequest request = new SearchRequest("*", "-foofoo*"); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, true)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"bar", "bar-closed"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveWildcardsExclusionsExpandWilcardsOpenStrict() { + SearchRequest request = new SearchRequest("*", "-foofoo*", "barbaz", "foob*"); + request.indicesOptions(IndicesOptions.fromOptions(false, true, true, false)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"bar", "barbaz"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveWildcardsPlusAndMinusExpandWilcardsOpenIgnoreUnavailable() { + SearchRequest request = new SearchRequest("*", "-foofoo*", "+barbaz", "+foob*"); + request.indicesOptions(IndicesOptions.fromOptions(true, true, true, false)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"bar"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveWildcardsExclusionExpandWilcardsOpenAndClosedStrict() { + SearchRequest request = new SearchRequest("*", "-foofoo*", "barbaz"); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), true, true)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"bar", "bar-closed", "barbaz"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveWildcardsExclusionExpandWilcardsOpenAndClosedIgnoreUnavailable() { + SearchRequest request = new SearchRequest("*", "-foofoo*", "barbaz"); + request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), true, true)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"bar", "bar-closed"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveNonMatchingIndicesAllowNoIndices() { + SearchRequest request = new SearchRequest("missing*"); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), true, true, randomBoolean())); + assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + } + + public void testResolveNonMatchingIndicesDisallowNoIndices() { + SearchRequest request = new SearchRequest("missing*"); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + assertEquals("no such index", e.getMessage()); + } + + public void testResolveExplicitIndicesStrict() { + SearchRequest request = new SearchRequest("missing", "bar", "barbaz"); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"missing", "bar", "barbaz"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveExplicitIndicesIgnoreUnavailable() { + SearchRequest request = new SearchRequest("missing", "bar", "barbaz"); + request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), randomBoolean(), randomBoolean())); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] replacedIndices = new String[]{"bar"}; + assertThat(indices.size(), equalTo(replacedIndices.length)); + assertThat(request.indices().length, equalTo(replacedIndices.length)); + assertThat(indices, hasItems(replacedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + } + + public void testResolveNoAuthorizedIndicesAllowNoIndices() { + SearchRequest request = new SearchRequest(); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), true, true, randomBoolean())); + assertNoIndices(request, resolveIndices(request, + buildAuthorizedIndices(userNoIndices, SearchAction.NAME))); + } + + public void testResolveNoAuthorizedIndicesDisallowNoIndices() { + SearchRequest request = new SearchRequest(); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(userNoIndices, SearchAction.NAME))); + assertEquals("no such index", e.getMessage()); + } + + public void testResolveMissingIndexStrict() { + SearchRequest request = new SearchRequest("bar*", "missing"); + request.indicesOptions(IndicesOptions.fromOptions(false, true, true, false)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] expectedIndices = new String[]{"bar", "missing"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(request.indices().length, equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), equalTo(expectedIndices)); + } + + public void testResolveMissingIndexIgnoreUnavailable() { + SearchRequest request = new SearchRequest("bar*", "missing"); + request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), true, false)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] expectedIndices = new String[]{"bar"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(request.indices().length, equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), equalTo(expectedIndices)); + } + + public void testResolveNonMatchingIndicesAndExplicit() { + SearchRequest request = new SearchRequest("missing*", "bar"); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), true, true, randomBoolean())); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] expectedIndices = new String[]{"bar"}; + assertThat(indices.toArray(new String[indices.size()]), equalTo(expectedIndices)); + assertThat(request.indices(), equalTo(expectedIndices)); + } + + public void testResolveNoExpandStrict() { + SearchRequest request = new SearchRequest("missing*"); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), false, false)); + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + String[] expectedIndices = new String[]{"missing*"}; + assertThat(indices.toArray(new String[indices.size()]), equalTo(expectedIndices)); + assertThat(request.indices(), equalTo(expectedIndices)); + } + + public void testResolveNoExpandIgnoreUnavailable() { + SearchRequest request = new SearchRequest("missing*"); + request.indicesOptions(IndicesOptions.fromOptions(true, true, false, false)); + assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + } + + public void testSearchWithRemoteIndex() { + SearchRequest request = new SearchRequest("remote:indexName"); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + final ResolvedIndices resolved = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)); + assertThat(resolved.getLocal(), emptyIterable()); + assertThat(resolved.getRemote(), containsInAnyOrder("remote:indexName")); + assertThat(request.indices(), arrayContaining("remote:indexName")); + } + + public void testSearchWithRemoteAndLocalIndices() { + SearchRequest request = new SearchRequest("remote:indexName", "bar", "bar2"); + request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), randomBoolean(), randomBoolean())); + final ResolvedIndices resolved = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)); + assertThat(resolved.getLocal(), containsInAnyOrder("bar")); + assertThat(resolved.getRemote(), containsInAnyOrder("remote:indexName")); + assertThat(request.indices(), arrayContainingInAnyOrder("remote:indexName", "bar")); + } + + public void testSearchWithRemoteAndLocalWildcards() { + SearchRequest request = new SearchRequest("*:foo", "r*:bar*", "remote:baz*", "bar*", "foofoo"); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false)); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, SearchAction.NAME); + final ResolvedIndices resolved = resolveIndices(request, authorizedIndices); + assertThat(resolved.getRemote(), containsInAnyOrder("remote:foo", "other_remote:foo", "remote:bar*", "remote:baz*")); + assertThat(resolved.getLocal(), containsInAnyOrder("bar", "foofoo")); + assertThat(request.indices(), + arrayContainingInAnyOrder("remote:foo", "other_remote:foo", "remote:bar*", "remote:baz*", "bar", "foofoo")); + } + + public void testResolveIndicesAliasesRequest() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.add().alias("alias1").indices("foo", "foofoo")); + request.addAliasAction(AliasActions.add().alias("alias2").indices("foo", "foobar")); + List indices = + resolveIndices(request, buildAuthorizedIndices(user, IndicesAliasesAction.NAME)).getLocal(); + //the union of all indices and aliases gets returned + String[] expectedIndices = new String[]{"alias1", "alias2", "foo", "foofoo", "foobar"}; + assertSameValues(indices, expectedIndices); + assertThat(request.getAliasActions().get(0).indices(), arrayContainingInAnyOrder("foo", "foofoo")); + assertThat(request.getAliasActions().get(0).aliases(), arrayContainingInAnyOrder("alias1")); + assertThat(request.getAliasActions().get(1).indices(), arrayContainingInAnyOrder("foo", "foobar")); + assertThat(request.getAliasActions().get(1).aliases(), arrayContainingInAnyOrder("alias2")); + } + + public void testResolveIndicesAliasesRequestExistingAlias() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.add().alias("alias1").indices("foo", "foofoo")); + request.addAliasAction(AliasActions.add().alias("foofoobar").indices("foo", "foobar")); + List indices = + resolveIndices(request, buildAuthorizedIndices(user, IndicesAliasesAction.NAME)).getLocal(); + //the union of all indices and aliases gets returned, foofoobar is an existing alias but that doesn't make any difference + String[] expectedIndices = new String[]{"alias1", "foofoobar", "foo", "foofoo", "foobar"}; + assertSameValues(indices, expectedIndices); + assertThat(request.getAliasActions().get(0).indices(), arrayContainingInAnyOrder("foo", "foofoo")); + assertThat(request.getAliasActions().get(0).aliases(), arrayContainingInAnyOrder("alias1")); + assertThat(request.getAliasActions().get(1).indices(), arrayContainingInAnyOrder("foo", "foobar")); + assertThat(request.getAliasActions().get(1).aliases(), arrayContainingInAnyOrder("foofoobar")); + } + + public void testResolveIndicesAliasesRequestMissingIndex() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.add().alias("alias1").indices("foo", "foofoo")); + request.addAliasAction(AliasActions.add().alias("alias2").index("missing")); + List indices = + resolveIndices(request, buildAuthorizedIndices(user, IndicesAliasesAction.NAME)).getLocal(); + //the union of all indices and aliases gets returned, missing is not an existing index/alias but that doesn't make any difference + String[] expectedIndices = new String[]{"alias1", "alias2", "foo", "foofoo", "missing"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.getAliasActions().get(0).indices(), arrayContainingInAnyOrder("foo", "foofoo")); + assertThat(request.getAliasActions().get(0).aliases(), arrayContainingInAnyOrder("alias1")); + assertThat(request.getAliasActions().get(1).indices(), arrayContainingInAnyOrder("missing")); + assertThat(request.getAliasActions().get(1).aliases(), arrayContainingInAnyOrder("alias2")); + } + + public void testResolveWildcardsIndicesAliasesRequest() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.add().alias("foo-alias").index("foo*")); + request.addAliasAction(AliasActions.add().alias("alias2").index("bar*")); + List indices = + resolveIndices(request, buildAuthorizedIndices(user, IndicesAliasesAction.NAME)).getLocal(); + //the union of all resolved indices and aliases gets returned, based on indices and aliases that user is authorized for + String[] expectedIndices = new String[]{"foo-alias", "alias2", "foofoo", "bar"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + //wildcards get replaced on each single action + assertThat(request.getAliasActions().get(0).indices(), arrayContainingInAnyOrder("foofoo")); + assertThat(request.getAliasActions().get(0).aliases(), arrayContainingInAnyOrder("foo-alias")); + assertThat(request.getAliasActions().get(1).indices(), arrayContainingInAnyOrder("bar")); + assertThat(request.getAliasActions().get(1).aliases(), arrayContainingInAnyOrder("alias2")); + } + + public void testResolveWildcardsIndicesAliasesRequestNoMatchingIndices() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.add().alias("alias1").index("foo*")); + request.addAliasAction(AliasActions.add().alias("alias2").index("bar*")); + request.addAliasAction(AliasActions.add().alias("alias3").index("non_matching_*")); + //if a single operation contains wildcards and ends up being resolved to no indices, it makes the whole request fail + expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(user, IndicesAliasesAction.NAME))); + } + + public void testResolveAllIndicesAliasesRequest() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.add().alias("alias1").index("_all")); + request.addAliasAction(AliasActions.add().alias("alias2").index("_all")); + List indices = + resolveIndices(request, buildAuthorizedIndices(user, IndicesAliasesAction.NAME)).getLocal(); + //the union of all resolved indices and aliases gets returned + String[] expectedIndices = new String[]{"bar", "foofoo", "alias1", "alias2"}; + assertSameValues(indices, expectedIndices); + String[] replacedIndices = new String[]{"bar", "foofoo"}; + //_all gets replaced with all indices that user is authorized for, on each single action + assertThat(request.getAliasActions().get(0).indices(), arrayContainingInAnyOrder(replacedIndices)); + assertThat(request.getAliasActions().get(0).aliases(), arrayContainingInAnyOrder("alias1")); + assertThat(request.getAliasActions().get(1).indices(), arrayContainingInAnyOrder(replacedIndices)); + assertThat(request.getAliasActions().get(1).aliases(), arrayContainingInAnyOrder("alias2")); + } + + public void testResolveAllIndicesAliasesRequestNoAuthorizedIndices() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.add().alias("alias1").index("_all")); + //current user is not authorized for any index, _all resolves to no indices, the request fails + expectThrows(IndexNotFoundException.class, () -> + resolveIndices(request, buildAuthorizedIndices(userNoIndices, IndicesAliasesAction.NAME))); + } + + public void testResolveWildcardsIndicesAliasesRequestNoAuthorizedIndices() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.add().alias("alias1").index("foo*")); + //current user is not authorized for any index, foo* resolves to no indices, the request fails + expectThrows(IndexNotFoundException.class, () -> resolveIndices( + request, buildAuthorizedIndices(userNoIndices, IndicesAliasesAction.NAME))); + } + + public void testResolveIndicesAliasesRequestDeleteActions() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.remove().index("foo").alias("foofoobar")); + request.addAliasAction(AliasActions.remove().index("foofoo").alias("barbaz")); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, IndicesAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //the union of all indices and aliases gets returned + String[] expectedIndices = new String[]{"foo", "foofoobar", "foofoo", "barbaz"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.getAliasActions().get(0).indices(), arrayContainingInAnyOrder("foo")); + assertThat(request.getAliasActions().get(0).aliases(), arrayContainingInAnyOrder("foofoobar")); + assertThat(request.getAliasActions().get(1).indices(), arrayContainingInAnyOrder("foofoo")); + assertThat(request.getAliasActions().get(1).aliases(), arrayContainingInAnyOrder("barbaz")); + } + + public void testResolveIndicesAliasesRequestDeleteActionsMissingIndex() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.remove().index("foo").alias("foofoobar")); + request.addAliasAction(AliasActions.remove().index("missing_index").alias("missing_alias")); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, IndicesAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //the union of all indices and aliases gets returned, doesn't matter is some of them don't exist + String[] expectedIndices = new String[]{"foo", "foofoobar", "missing_index", "missing_alias"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.getAliasActions().get(0).indices(), arrayContainingInAnyOrder("foo")); + assertThat(request.getAliasActions().get(0).aliases(), arrayContainingInAnyOrder("foofoobar")); + assertThat(request.getAliasActions().get(1).indices(), arrayContainingInAnyOrder("missing_index")); + assertThat(request.getAliasActions().get(1).aliases(), arrayContainingInAnyOrder("missing_alias")); + } + + public void testResolveWildcardsIndicesAliasesRequestDeleteActions() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.remove().index("foo*").alias("foofoobar")); + request.addAliasAction(AliasActions.remove().index("bar*").alias("barbaz")); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, IndicesAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //union of all resolved indices and aliases gets returned, based on what user is authorized for + String[] expectedIndices = new String[]{"foofoobar", "foofoo", "bar", "barbaz"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + //wildcards get replaced within each single action + assertThat(request.getAliasActions().get(0).indices(), arrayContainingInAnyOrder("foofoo")); + assertThat(request.getAliasActions().get(0).aliases(), arrayContainingInAnyOrder("foofoobar")); + assertThat(request.getAliasActions().get(1).indices(), arrayContainingInAnyOrder("bar")); + assertThat(request.getAliasActions().get(1).aliases(), arrayContainingInAnyOrder("barbaz")); + } + + public void testResolveAliasesWildcardsIndicesAliasesRequestDeleteActions() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.remove().index("*").alias("foo*")); + request.addAliasAction(AliasActions.remove().index("*bar").alias("foo*")); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, IndicesAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //union of all resolved indices and aliases gets returned, based on what user is authorized for + //note that the index side will end up containing matching aliases too, which is fine, as es core would do + //the same and resolve those aliases to their corresponding concrete indices (which we let core do) + String[] expectedIndices = new String[]{"bar", "foofoobar", "foofoo"}; + assertSameValues(indices, expectedIndices); + //alias foofoobar on both sides, that's fine, es core would do the same, same as above + assertThat(request.getAliasActions().get(0).indices(), arrayContainingInAnyOrder("bar", "foofoo")); + assertThat(request.getAliasActions().get(0).aliases(), arrayContainingInAnyOrder("foofoobar")); + assertThat(request.getAliasActions().get(1).indices(), arrayContainingInAnyOrder("bar")); + assertThat(request.getAliasActions().get(1).aliases(), arrayContainingInAnyOrder("foofoobar")); + } + + public void testResolveAllAliasesWildcardsIndicesAliasesRequestDeleteActions() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.remove().index("*").alias("_all")); + request.addAliasAction(AliasActions.remove().index("_all").aliases("_all", "explicit")); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, IndicesAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //union of all resolved indices and aliases gets returned, based on what user is authorized for + //note that the index side will end up containing matching aliases too, which is fine, as es core would do + //the same and resolve those aliases to their corresponding concrete indices (which we let core do) + String[] expectedIndices = new String[]{"bar", "foofoobar", "foofoo", "explicit"}; + assertSameValues(indices, expectedIndices); + //alias foofoobar on both sides, that's fine, es core would do the same, same as above + assertThat(request.getAliasActions().get(0).indices(), arrayContainingInAnyOrder("bar", "foofoo")); + assertThat(request.getAliasActions().get(0).aliases(), arrayContainingInAnyOrder("foofoobar")); + assertThat(request.getAliasActions().get(0).indices(), arrayContainingInAnyOrder("bar", "foofoo")); + assertThat(request.getAliasActions().get(1).aliases(), arrayContainingInAnyOrder("foofoobar", "explicit")); + } + + public void testResolveAliasesWildcardsIndicesAliasesRequestDeleteActionsNoAuthorizedIndices() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.remove().index("foo*").alias("foo*")); + //no authorized aliases match bar*, hence this action fails and makes the whole request fail + request.addAliasAction(AliasActions.remove().index("*bar").alias("bar*")); + expectThrows(IndexNotFoundException.class, () -> resolveIndices( + request, buildAuthorizedIndices(user, IndicesAliasesAction.NAME))); + } + + public void testResolveWildcardsIndicesAliasesRequestAddAndDeleteActions() { + IndicesAliasesRequest request = new IndicesAliasesRequest(); + request.addAliasAction(AliasActions.remove().index("foo*").alias("foofoobar")); + request.addAliasAction(AliasActions.add().index("bar*").alias("foofoobar")); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, IndicesAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //union of all resolved indices and aliases gets returned, based on what user is authorized for + String[] expectedIndices = new String[]{"foofoobar", "foofoo", "bar"}; + assertSameValues(indices, expectedIndices); + //every single action has its indices replaced with matching (authorized) ones + assertThat(request.getAliasActions().get(0).indices(), arrayContainingInAnyOrder("foofoo")); + assertThat(request.getAliasActions().get(0).aliases(), arrayContainingInAnyOrder("foofoobar")); + assertThat(request.getAliasActions().get(1).indices(), arrayContainingInAnyOrder("bar")); + assertThat(request.getAliasActions().get(1).aliases(), arrayContainingInAnyOrder("foofoobar")); + } + + public void testResolveGetAliasesRequestStrict() { + GetAliasesRequest request = new GetAliasesRequest("alias1").indices("foo", "foofoo"); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //the union of all indices and aliases gets returned + String[] expectedIndices = new String[]{"alias1", "foo", "foofoo"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder("foo", "foofoo")); + assertThat(request.aliases(), arrayContainingInAnyOrder("alias1")); + } + + public void testResolveGetAliasesRequestIgnoreUnavailable() { + GetAliasesRequest request = new GetAliasesRequest("alias1").indices("foo", "foofoo"); + request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), randomBoolean(), randomBoolean())); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + String[] expectedIndices = new String[]{"alias1", "foofoo"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder("foofoo")); + assertThat(request.aliases(), arrayContainingInAnyOrder("alias1")); + } + + public void testResolveGetAliasesRequestMissingIndexStrict() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), true, randomBoolean())); + request.indices("missing"); + request.aliases("alias2"); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //the union of all indices and aliases gets returned, missing is not an existing index/alias but that doesn't make any difference + String[] expectedIndices = new String[]{"alias2", "missing"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder("missing")); + assertThat(request.aliases(), arrayContainingInAnyOrder("alias2")); + } + + public void testGetAliasesRequestMissingIndexIgnoreUnavailableDisallowNoIndices() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indicesOptions(IndicesOptions.fromOptions(true, false, randomBoolean(), randomBoolean())); + request.indices("missing"); + request.aliases("alias2"); + IndexNotFoundException exception = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(user, GetAliasesAction.NAME)).getLocal()); + assertEquals("no such index", exception.getMessage()); + } + + public void testGetAliasesRequestMissingIndexIgnoreUnavailableAllowNoIndices() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indicesOptions(IndicesOptions.fromOptions(true, true, randomBoolean(), randomBoolean())); + request.indices("missing"); + request.aliases("alias2"); + assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, GetAliasesAction.NAME))); + } + + public void testGetAliasesRequestMissingIndexStrict() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); + request.indices("missing"); + request.aliases("alias2"); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + String[] expectedIndices = new String[]{"alias2", "missing"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder("missing")); + assertThat(request.aliases(), arrayContainingInAnyOrder("alias2")); + } + + public void testResolveWildcardsGetAliasesRequestStrictExpand() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), true, true)); + request.aliases("alias1"); + request.indices("foo*"); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //the union of all resolved indices and aliases gets returned, based on indices and aliases that user is authorized for + String[] expectedIndices = new String[]{"alias1", "foofoo", "foofoo-closed", "foofoobar"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + //wildcards get replaced on each single action + assertThat(request.indices(), arrayContainingInAnyOrder("foofoobar", "foofoo", "foofoo-closed")); + assertThat(request.aliases(), arrayContainingInAnyOrder("alias1")); + } + + public void testResolveWildcardsGetAliasesRequestStrictExpandOpen() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), true, false)); + request.aliases("alias1"); + request.indices("foo*"); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //the union of all resolved indices and aliases gets returned, based on indices and aliases that user is authorized for + String[] expectedIndices = new String[]{"alias1", "foofoo", "foofoobar"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + //wildcards get replaced on each single action + assertThat(request.indices(), arrayContainingInAnyOrder("foofoobar", "foofoo")); + assertThat(request.aliases(), arrayContainingInAnyOrder("alias1")); + } + + public void testResolveWildcardsGetAliasesRequestLenientExpandOpen() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indicesOptions(IndicesOptions.fromOptions(true, randomBoolean(), true, false)); + request.aliases("alias1"); + request.indices("foo*", "bar", "missing"); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //the union of all resolved indices and aliases gets returned, based on indices and aliases that user is authorized for + String[] expectedIndices = new String[]{"alias1", "foofoo", "foofoobar", "bar"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + //wildcards get replaced on each single action + assertThat(request.indices(), arrayContainingInAnyOrder("foofoobar", "foofoo", "bar")); + assertThat(request.aliases(), arrayContainingInAnyOrder("alias1")); + } + + public void testWildcardsGetAliasesRequestNoMatchingIndicesDisallowNoIndices() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())); + request.aliases("alias3"); + request.indices("non_matching_*"); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(user, GetAliasesAction.NAME)).getLocal()); + assertEquals("no such index", e.getMessage()); + } + + public void testWildcardsGetAliasesRequestNoMatchingIndicesAllowNoIndices() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), true, true, randomBoolean())); + request.aliases("alias3"); + request.indices("non_matching_*"); + assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, GetAliasesAction.NAME))); + } + + public void testResolveAllGetAliasesRequest() { + GetAliasesRequest request = new GetAliasesRequest(); + //even if not set, empty means _all + if (randomBoolean()) { + request.indices("_all"); + } + request.aliases("alias1"); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //the union of all resolved indices and aliases gets returned + String[] expectedIndices = new String[]{"bar", "bar-closed", "foofoobar", "foofoo", "foofoo-closed", "alias1"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + String[] replacedIndices = new String[]{"bar", "bar-closed", "foofoobar", "foofoo", "foofoo-closed"}; + //_all gets replaced with all indices that user is authorized for + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + assertThat(request.aliases(), arrayContainingInAnyOrder("alias1")); + } + + public void testResolveAllGetAliasesRequestExpandWildcardsOpenOnly() { + GetAliasesRequest request = new GetAliasesRequest(); + //set indices options to have wildcards resolved to open indices only (default is open and closed) + request.indicesOptions(IndicesOptions.fromOptions(true, false, true, false)); + //even if not set, empty means _all + if (randomBoolean()) { + request.indices("_all"); + } + request.aliases("alias1"); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //the union of all resolved indices and aliases gets returned + String[] expectedIndices = new String[]{"bar", "foofoobar", "foofoo", "alias1"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + String[] replacedIndices = new String[]{"bar", "foofoobar", "foofoo"}; + //_all gets replaced with all indices that user is authorized for + assertThat(request.indices(), arrayContainingInAnyOrder(replacedIndices)); + assertThat(request.aliases(), arrayContainingInAnyOrder("alias1")); + } + + public void testAllGetAliasesRequestNoAuthorizedIndicesAllowNoIndices() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), true, true, randomBoolean())); + request.aliases("alias1"); + request.indices("_all"); + assertNoIndices(request, resolveIndices(request, + buildAuthorizedIndices(userNoIndices, GetAliasesAction.NAME))); + } + + public void testAllGetAliasesRequestNoAuthorizedIndicesDisallowNoIndices() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())); + request.aliases("alias1"); + request.indices("_all"); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(userNoIndices, GetAliasesAction.NAME))); + assertEquals("no such index", e.getMessage()); + } + + public void testWildcardsGetAliasesRequestNoAuthorizedIndicesAllowNoIndices() { + GetAliasesRequest request = new GetAliasesRequest(); + request.aliases("alias1"); + request.indices("foo*"); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), true, true, randomBoolean())); + assertNoIndices(request, resolveIndices(request, + buildAuthorizedIndices(userNoIndices, GetAliasesAction.NAME))); + } + + public void testWildcardsGetAliasesRequestNoAuthorizedIndicesDisallowNoIndices() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())); + request.aliases("alias1"); + request.indices("foo*"); + //current user is not authorized for any index, foo* resolves to no indices, the request fails + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(userNoIndices, GetAliasesAction.NAME))); + assertEquals("no such index", e.getMessage()); + } + + public void testResolveAllAliasesGetAliasesRequest() { + GetAliasesRequest request = new GetAliasesRequest(); + if (randomBoolean()) { + request.aliases("_all"); + } + if (randomBoolean()) { + request.indices("_all"); + } + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //the union of all resolved indices and aliases gets returned + String[] expectedIndices = new String[]{"bar", "bar-closed", "foofoobar", "foofoo", "foofoo-closed"}; + assertSameValues(indices, expectedIndices); + //_all gets replaced with all indices that user is authorized for + assertThat(request.indices(), arrayContainingInAnyOrder(expectedIndices)); + assertThat(request.aliases(), arrayContainingInAnyOrder("foofoobar")); + } + + public void testResolveAllAndExplicitAliasesGetAliasesRequest() { + GetAliasesRequest request = new GetAliasesRequest(new String[]{"_all", "explicit"}); + if (randomBoolean()) { + request.indices("_all"); + } + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //the union of all resolved indices and aliases gets returned + String[] expectedIndices = new String[]{"bar", "bar-closed", "foofoobar", "foofoo", "foofoo-closed", "explicit"}; + assertSameValues(indices, expectedIndices); + //_all gets replaced with all indices that user is authorized for + assertThat(request.indices(), arrayContainingInAnyOrder("bar", "bar-closed", "foofoobar", "foofoo", "foofoo-closed")); + assertThat(request.aliases(), arrayContainingInAnyOrder("foofoobar", "explicit")); + } + + public void testResolveAllAndWildcardsAliasesGetAliasesRequest() { + GetAliasesRequest request = new GetAliasesRequest(new String[]{"_all", "foo*", "non_matching_*"}); + if (randomBoolean()) { + request.indices("_all"); + } + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //the union of all resolved indices and aliases gets returned + String[] expectedIndices = new String[]{"bar", "bar-closed", "foofoobar", "foofoo", "foofoo-closed"}; + assertSameValues(indices, expectedIndices); + //_all gets replaced with all indices that user is authorized for + assertThat(request.indices(), arrayContainingInAnyOrder(expectedIndices)); + assertThat(request.aliases(), arrayContainingInAnyOrder("foofoobar", "foofoobar")); + } + + public void testResolveAliasesWildcardsGetAliasesRequest() { + GetAliasesRequest request = new GetAliasesRequest(); + request.indices("*bar"); + request.aliases("foo*"); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, GetAliasesAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + //union of all resolved indices and aliases gets returned, based on what user is authorized for + //note that the index side will end up containing matching aliases too, which is fine, as es core would do + //the same and resolve those aliases to their corresponding concrete indices (which we let core do) + String[] expectedIndices = new String[]{"bar", "foofoobar"}; + assertSameValues(indices, expectedIndices); + //alias foofoobar on both sides, that's fine, es core would do the same, same as above + assertThat(request.indices(), arrayContainingInAnyOrder("bar", "foofoobar")); + assertThat(request.aliases(), arrayContainingInAnyOrder("foofoobar")); + } + + public void testResolveAliasesWildcardsGetAliasesRequestNoAuthorizedIndices() { + GetAliasesRequest request = new GetAliasesRequest(); + //no authorized aliases match bar*, hence the request fails + request.aliases("bar*"); + request.indices("*bar"); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(user, GetAliasesAction.NAME))); + assertEquals("no such index", e.getMessage()); + } + + public void testResolveAliasesAllGetAliasesRequestNoAuthorizedIndices() { + GetAliasesRequest request = new GetAliasesRequest(); + if (randomBoolean()) { + request.aliases("_all"); + } + request.indices("non_existing"); + //current user is not authorized for any index, foo* resolves to no indices, the request fails + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(userNoIndices, GetAliasesAction.NAME))); + assertEquals("no such index", e.getMessage()); + } + + /** + * Tests that all the request types that are known to support remote indices successfully pass them through + * the resolver + */ + public void testRemotableRequestsAllowRemoteIndices() { + IndicesOptions options = IndicesOptions.fromOptions(true, false, false, false); + Tuple tuple = randomFrom( + new Tuple<>(new SearchRequest("remote:foo").indicesOptions(options), SearchAction.NAME), + new Tuple<>(new FieldCapabilitiesRequest().indices("remote:foo").indicesOptions(options), FieldCapabilitiesAction.NAME), + new Tuple<>(new GraphExploreRequest().indices("remote:foo").indicesOptions(options), GraphExploreAction.NAME) + ); + final TransportRequest request = tuple.v1(); + ResolvedIndices resolved = resolveIndices(request, buildAuthorizedIndices(user, tuple.v2())); + assertThat(resolved.getRemote(), containsInAnyOrder("remote:foo")); + assertThat(resolved.getLocal(), emptyIterable()); + assertThat(((IndicesRequest) request).indices(), arrayContaining("remote:foo")); + } + + /** + * Tests that request types that do not support remote indices will be resolved as if all index names are local. + */ + public void testNonRemotableRequestDoesNotAllowRemoteIndices() { + IndicesOptions options = IndicesOptions.fromOptions(true, false, false, false); + Tuple tuple = randomFrom( + new Tuple<>(new CloseIndexRequest("remote:foo").indicesOptions(options), CloseIndexAction.NAME), + new Tuple<>(new DeleteIndexRequest("remote:foo").indicesOptions(options), DeleteIndexAction.NAME), + new Tuple<>(new PutMappingRequest("remote:foo").indicesOptions(options), PutMappingAction.NAME) + ); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(tuple.v1(), buildAuthorizedIndices(user, tuple.v2())).getLocal()); + assertEquals("no such index", e.getMessage()); + } + + public void testNonRemotableRequestDoesNotAllowRemoteWildcardIndices() { + IndicesOptions options = IndicesOptions.fromOptions(randomBoolean(), true, true, true); + Tuple tuple = randomFrom( + new Tuple<>(new CloseIndexRequest("*:*").indicesOptions(options), CloseIndexAction.NAME), + new Tuple<>(new DeleteIndexRequest("*:*").indicesOptions(options), DeleteIndexAction.NAME), + new Tuple<>(new PutMappingRequest("*:*").indicesOptions(options), PutMappingAction.NAME) + ); + final ResolvedIndices resolved = resolveIndices(tuple.v1(), buildAuthorizedIndices(user, tuple.v2())); + assertNoIndices((IndicesRequest.Replaceable) tuple.v1(), resolved); + } + + public void testCompositeIndicesRequestIsNotSupported() { + TransportRequest request = randomFrom(new MultiSearchRequest(), new MultiGetRequest(), + new MultiTermVectorsRequest(), new BulkRequest()); + expectThrows(IllegalStateException.class, () -> resolveIndices(request, + buildAuthorizedIndices(user, MultiSearchAction.NAME))); + } + + public void testResolveAdminAction() { + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(user, DeleteIndexAction.NAME); + { + RefreshRequest request = new RefreshRequest("*"); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + String[] expectedIndices = new String[]{"bar", "foofoobar", "foofoo"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(expectedIndices)); + } + { + DeleteIndexRequest request = new DeleteIndexRequest("*"); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + String[] expectedIndices = new String[]{"bar", "bar-closed", "foofoo", "foofoo-closed"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder(expectedIndices)); + } + } + + public void testIndicesExists() { + //verify that the ignore_unavailable and allow_no_indices get replaced like es core does, to make sure that + //indices exists api never throws exception due to missing indices, but only returns false instead. + { + IndicesExistsRequest request = new IndicesExistsRequest(); + assertNoIndices(request, resolveIndices(request, + buildAuthorizedIndices(userNoIndices, IndicesExistsAction.NAME))); + } + + { + IndicesExistsRequest request = new IndicesExistsRequest("does_not_exist"); + + assertNoIndices(request, resolveIndices(request, + buildAuthorizedIndices(user, IndicesExistsAction.NAME))); + } + { + IndicesExistsRequest request = new IndicesExistsRequest("does_not_exist_*"); + assertNoIndices(request, resolveIndices(request, + buildAuthorizedIndices(user, IndicesExistsAction.NAME))); + } + } + + public void testXPackSecurityUserHasAccessToSecurityIndex() { + SearchRequest request = new SearchRequest(); + { + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(XPackSecurityUser.INSTANCE, SearchAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + assertThat(indices, hasItem(SecurityLifecycleService.SECURITY_INDEX_NAME)); + } + { + IndicesAliasesRequest aliasesRequest = new IndicesAliasesRequest(); + aliasesRequest.addAliasAction(AliasActions.add().alias("security_alias").index(SECURITY_INDEX_NAME)); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(XPackSecurityUser.INSTANCE, IndicesAliasesAction.NAME); + List indices = resolveIndices(aliasesRequest, authorizedIndices).getLocal(); + assertThat(indices, hasItem(SecurityLifecycleService.SECURITY_INDEX_NAME)); + } + } + + public void testXPackUserDoesNotHaveAccessToSecurityIndex() { + SearchRequest request = new SearchRequest(); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(XPackUser.INSTANCE, SearchAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + assertThat(indices, not(hasItem(SecurityLifecycleService.SECURITY_INDEX_NAME))); + } + + public void testNonXPackUserAccessingSecurityIndex() { + User allAccessUser = new User("all_access", "all_access"); + roleMap.put("all_access", new RoleDescriptor("all_access", new String[] { "all" }, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("*").privileges("all").build() }, null)); + + { + SearchRequest request = new SearchRequest(); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(allAccessUser, SearchAction.NAME); + List indices = resolveIndices(request, authorizedIndices).getLocal(); + assertThat(indices, not(hasItem(SecurityLifecycleService.SECURITY_INDEX_NAME))); + } + + { + IndicesAliasesRequest aliasesRequest = new IndicesAliasesRequest(); + aliasesRequest.addAliasAction(AliasActions.add().alias("security_alias1").index("*")); + final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(allAccessUser, IndicesAliasesAction.NAME); + List indices = resolveIndices(aliasesRequest, authorizedIndices).getLocal(); + assertThat(indices, not(hasItem(SecurityLifecycleService.SECURITY_INDEX_NAME))); + } + } + + public void testUnauthorizedDateMathExpressionIgnoreUnavailable() { + SearchRequest request = new SearchRequest(""); + request.indicesOptions(IndicesOptions.fromOptions(true, true, randomBoolean(), randomBoolean())); + assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + } + + public void testUnauthorizedDateMathExpressionIgnoreUnavailableDisallowNoIndices() { + SearchRequest request = new SearchRequest(""); + request.indicesOptions(IndicesOptions.fromOptions(true, false, randomBoolean(), randomBoolean())); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + assertEquals("no such index" , e.getMessage()); + } + + public void testUnauthorizedDateMathExpressionStrict() { + SearchRequest request = new SearchRequest(""); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + assertEquals("no such index" , e.getMessage()); + } + + public void testResolveDateMathExpression() { + // make the user authorized + final String pattern = randomBoolean() ? "" : ""; + String dateTimeIndex = indexNameExpressionResolver.resolveDateMathExpression(""); + String[] authorizedIndices = new String[] { "bar", "bar-closed", "foofoobar", "foofoo", "missing", "foofoo-closed", dateTimeIndex}; + roleMap.put("role", new RoleDescriptor("role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices(authorizedIndices).privileges("all").build() }, null)); + + SearchRequest request = new SearchRequest(pattern); + if (randomBoolean()) { + final boolean expandIndicesOpen = Regex.isSimpleMatchPattern(pattern) ? true : randomBoolean(); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), expandIndicesOpen, randomBoolean())); + } + List indices = resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME)).getLocal(); + assertThat(indices.size(), equalTo(1)); + assertThat(request.indices()[0], equalTo(dateTimeIndex)); + } + + public void testMissingDateMathExpressionIgnoreUnavailable() { + SearchRequest request = new SearchRequest(""); + request.indicesOptions(IndicesOptions.fromOptions(true, true, randomBoolean(), randomBoolean())); + assertNoIndices(request, resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + } + + public void testMissingDateMathExpressionIgnoreUnavailableDisallowNoIndices() { + SearchRequest request = new SearchRequest(""); + request.indicesOptions(IndicesOptions.fromOptions(true, false, randomBoolean(), randomBoolean())); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + assertEquals("no such index" , e.getMessage()); + } + + public void testMissingDateMathExpressionStrict() { + SearchRequest request = new SearchRequest(""); + request.indicesOptions(IndicesOptions.fromOptions(false, randomBoolean(), randomBoolean(), randomBoolean())); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> resolveIndices(request, buildAuthorizedIndices(user, SearchAction.NAME))); + assertEquals("no such index" , e.getMessage()); + } + + public void testAliasDateMathExpressionNotSupported() { + // make the user authorized + String[] authorizedIndices = new String[] { "bar", "bar-closed", "foofoobar", "foofoo", "missing", "foofoo-closed", + indexNameExpressionResolver.resolveDateMathExpression("")}; + roleMap.put("role", new RoleDescriptor("role", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices(authorizedIndices).privileges("all").build() }, null)); + GetAliasesRequest request = new GetAliasesRequest("").indices("foo", "foofoo"); + List indices = + resolveIndices(request, buildAuthorizedIndices(user, GetAliasesAction.NAME)).getLocal(); + //the union of all indices and aliases gets returned + String[] expectedIndices = new String[]{"", "foo", "foofoo"}; + assertThat(indices.size(), equalTo(expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + assertThat(request.indices(), arrayContainingInAnyOrder("foo", "foofoo")); + assertThat(request.aliases(), arrayContainingInAnyOrder("")); + } + + // TODO with the removal of DeleteByQuery is there another way to test resolving a write action? + + + private AuthorizedIndices buildAuthorizedIndices(User user, String action) { + PlainActionFuture rolesListener = new PlainActionFuture<>(); + authzService.roles(user, rolesListener); + return new AuthorizedIndices(user, rolesListener.actionGet(), action, metaData); + } + + public static IndexMetaData.Builder indexBuilder(String index) { + return IndexMetaData.builder(index).settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + } + + private ResolvedIndices resolveIndices(TransportRequest request, AuthorizedIndices authorizedIndices) { + return defaultIndicesResolver.resolve(request, this.metaData, authorizedIndices); + } + + private static void assertNoIndices(IndicesRequest.Replaceable request, ResolvedIndices resolvedIndices) { + final List localIndices = resolvedIndices.getLocal(); + assertEquals(1, localIndices.size()); + assertEquals(IndicesAndAliasesResolverField.NO_INDEX_PLACEHOLDER, localIndices.iterator().next()); + assertEquals(IndicesAndAliasesResolver.NO_INDICES_LIST, Arrays.asList(request.indices())); + assertEquals(0, resolvedIndices.getRemote().size()); + } + + private void assertSameValues(List indices, String[] expectedIndices) { + assertThat(indices.stream().distinct().count(), equalTo((long)expectedIndices.length)); + assertThat(indices, hasItems(expectedIndices)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java new file mode 100644 index 0000000000000..76568d3d48b5a --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/ReadActionsTests.java @@ -0,0 +1,421 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.get.MultiGetAction; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.termvectors.MultiTermVectorsAction; +import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; +import org.elasticsearch.action.termvectors.TermVectorsAction; +import org.elasticsearch.client.Requests; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.test.SecurityTestsUtils.assertAuthorizationExceptionDefaultUsers; +import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationExceptionDefaultUsers; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; +import static org.hamcrest.core.IsCollectionContaining.hasItems; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.hamcrest.core.IsInstanceOf.instanceOf; +import static org.hamcrest.number.OrderingComparison.greaterThan; + +public class ReadActionsTests extends SecurityIntegTestCase { + + @Override + protected String configRoles() { + return SecuritySettingsSource.TEST_ROLE + ":\n" + + " cluster: [ ALL ]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ manage, write ]\n" + + " - names: ['/test.*/', '/-alias.*/']\n" + + " privileges: [ read ]\n"; + } + + public void testSearchForAll() { + //index1 is not authorized and referred to through wildcard + createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); + + SearchResponse searchResponse = client().prepareSearch().get(); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + } + + public void testSearchForWildcard() { + //index1 is not authorized and referred to through wildcard + createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); + + SearchResponse searchResponse = client().prepareSearch("*").get(); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + } + + public void testSearchNonAuthorizedWildcard() { + //wildcard doesn't match any authorized index + createIndicesWithRandomAliases("test1", "test2", "index1", "index2"); + assertNoSearchHits(client().prepareSearch("index*").get()); + } + + public void testSearchNonAuthorizedWildcardDisallowNoIndices() { + //wildcard doesn't match any authorized index + createIndicesWithRandomAliases("test1", "test2", "index1", "index2"); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().prepareSearch("index*") + .setIndicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())).get()); + assertEquals("no such index", e.getMessage()); + } + + public void testEmptyClusterSearchForAll() { + assertNoSearchHits(client().prepareSearch().get()); + } + + public void testEmptyClusterSearchForAllDisallowNoIndices() { + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().prepareSearch() + .setIndicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())).get()); + assertEquals("no such index", e.getMessage()); + } + + public void testEmptyClusterSearchForWildcard() { + SearchResponse searchResponse = client().prepareSearch("*").get(); + assertNoSearchHits(searchResponse); + } + + public void testEmptyClusterSearchForWildcardDisallowNoIndices() { + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().prepareSearch("*") + .setIndicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())).get()); + assertEquals("no such index", e.getMessage()); + } + + public void testEmptyAuthorizedIndicesSearchForAll() { + createIndicesWithRandomAliases("index1", "index2"); + assertNoSearchHits(client().prepareSearch().get()); + } + + public void testEmptyAuthorizedIndicesSearchForAllDisallowNoIndices() { + createIndicesWithRandomAliases("index1", "index2"); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().prepareSearch() + .setIndicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())).get()); + assertEquals("no such index", e.getMessage()); + } + + public void testEmptyAuthorizedIndicesSearchForWildcard() { + createIndicesWithRandomAliases("index1", "index2"); + assertNoSearchHits(client().prepareSearch("*").get()); + } + + public void testEmptyAuthorizedIndicesSearchForWildcardDisallowNoIndices() { + createIndicesWithRandomAliases("index1", "index2"); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> client().prepareSearch("*") + .setIndicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean())).get()); + assertEquals("no such index", e.getMessage()); + } + + public void testExplicitNonAuthorizedIndex() { + createIndicesWithRandomAliases("test1", "test2", "index1"); + assertThrowsAuthorizationExceptionDefaultUsers(client().prepareSearch("test*", "index1")::get, SearchAction.NAME); + } + + public void testIndexNotFound() { + createIndicesWithRandomAliases("test1", "test2", "index1"); + assertThrowsAuthorizationExceptionDefaultUsers(client().prepareSearch("missing")::get, SearchAction.NAME); + } + + public void testIndexNotFoundIgnoreUnavailable() { + IndicesOptions indicesOptions = IndicesOptions.lenientExpandOpen(); + createIndicesWithRandomAliases("test1", "test2", "index1"); + + String index = randomFrom("test1", "test2"); + assertReturnedIndices(client().prepareSearch("missing", index).setIndicesOptions(indicesOptions).get(), index); + + assertReturnedIndices(client().prepareSearch("missing", "test*").setIndicesOptions(indicesOptions).get(), "test1", "test2"); + + assertReturnedIndices(client().prepareSearch("missing_*", "test*").setIndicesOptions(indicesOptions).get(), "test1", "test2"); + + //an unauthorized index is the same as a missing one + assertNoSearchHits(client().prepareSearch("missing").setIndicesOptions(indicesOptions).get()); + + assertNoSearchHits(client().prepareSearch("index1").setIndicesOptions(indicesOptions).get()); + + assertNoSearchHits(client().prepareSearch("missing", "index1").setIndicesOptions(indicesOptions).get()); + + assertNoSearchHits(client().prepareSearch("does_not_match_any_*").setIndicesOptions(indicesOptions).get()); + + assertNoSearchHits(client().prepareSearch("does_not_match_any_*", "index1").setIndicesOptions(indicesOptions).get()); + + assertNoSearchHits(client().prepareSearch("index*").setIndicesOptions(indicesOptions).get()); + + assertNoSearchHits(client().prepareSearch("index*", "missing").setIndicesOptions(indicesOptions).get()); + } + + public void testExplicitExclusion() { + //index1 is not authorized and referred to through wildcard, test2 is excluded + createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); + + SearchResponse searchResponse = client().prepareSearch("*", "-test2").get(); + assertReturnedIndices(searchResponse, "test1", "test3"); + } + + public void testWildcardExclusion() { + //index1 is not authorized and referred to through wildcard, test2 is excluded + createIndicesWithRandomAliases("test1", "test2", "test21", "test3", "index1"); + + SearchResponse searchResponse = client().prepareSearch("*", "-test2*").get(); + assertReturnedIndices(searchResponse, "test1", "test3"); + } + + public void testInclusionAndWildcardsExclusion() { + //index1 is not authorized and referred to through wildcard, test111 and test112 are excluded + createIndicesWithRandomAliases("test1", "test10", "test111", "test112", "test2", "index1"); + + SearchResponse searchResponse = client().prepareSearch("test1*", "index*", "-test11*").get(); + assertReturnedIndices(searchResponse, "test1", "test10"); + } + + public void testExplicitAndWildcardsInclusionAndWildcardExclusion() { + //index1 is not authorized and referred to through wildcard, test111 and test112 are excluded + createIndicesWithRandomAliases("test1", "test10", "test111", "test112", "test2", "index1"); + + SearchResponse searchResponse = client().prepareSearch("test2", "test11*", "index*", "-test2*").get(); + assertReturnedIndices(searchResponse, "test111", "test112"); + } + + public void testExplicitAndWildcardInclusionAndExplicitExclusions() { + //index1 is not authorized and referred to through wildcard, test111 and test112 are excluded + createIndicesWithRandomAliases("test1", "test10", "test111", "test112", "test2", "index1"); + + SearchResponse searchResponse = client().prepareSearch("test10", "test11*", "index*", "-test111", "-test112").get(); + assertReturnedIndices(searchResponse, "test10"); + } + + public void testMissingDateMath() { + expectThrows(IndexNotFoundException.class, () -> client().prepareSearch("").get()); + } + + public void testMultiSearchUnauthorizedIndex() { + //index1 is not authorized, only that specific item fails + createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); + { + MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() + .add(Requests.searchRequest()) + .add(Requests.searchRequest("index1")).get(); + assertEquals(2, multiSearchResponse.getResponses().length); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); + assertThat(searchResponse.getHits().getTotalHits(), greaterThan(0L)); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + assertTrue(multiSearchResponse.getResponses()[1].isFailure()); + Exception exception = multiSearchResponse.getResponses()[1].getFailure(); + assertThat(exception, instanceOf(ElasticsearchSecurityException.class)); + assertAuthorizationExceptionDefaultUsers(exception, SearchAction.NAME); + } + { + MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() + .add(Requests.searchRequest()) + .add(Requests.searchRequest("index1") + .indicesOptions(IndicesOptions.fromOptions(true, true, true, randomBoolean()))).get(); + assertEquals(2, multiSearchResponse.getResponses().length); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); + assertThat(searchResponse.getHits().getTotalHits(), greaterThan(0L)); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + assertFalse(multiSearchResponse.getResponses()[1].isFailure()); + assertNoSearchHits(multiSearchResponse.getResponses()[1].getResponse()); + } + } + + public void testMultiSearchMissingUnauthorizedIndex() { + createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); + { + MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() + .add(Requests.searchRequest()) + .add(Requests.searchRequest("missing")).get(); + assertEquals(2, multiSearchResponse.getResponses().length); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); + assertThat(searchResponse.getHits().getTotalHits(), greaterThan(0L)); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + assertTrue(multiSearchResponse.getResponses()[1].isFailure()); + Exception exception = multiSearchResponse.getResponses()[1].getFailure(); + assertThat(exception, instanceOf(ElasticsearchSecurityException.class)); + assertAuthorizationExceptionDefaultUsers(exception, SearchAction.NAME); + } + { + MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() + .add(Requests.searchRequest()) + .add(Requests.searchRequest("missing") + .indicesOptions(IndicesOptions.fromOptions(true, true, true, randomBoolean()))).get(); + assertEquals(2, multiSearchResponse.getResponses().length); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); + assertThat(searchResponse.getHits().getTotalHits(), greaterThan(0L)); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + assertFalse(multiSearchResponse.getResponses()[1].isFailure()); + assertNoSearchHits(multiSearchResponse.getResponses()[1].getResponse()); + } + } + + public void testMultiSearchMissingAuthorizedIndex() { + //test4 is missing but authorized, only that specific item fails + createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); + { + //default indices options for search request don't ignore unavailable indices, only individual items fail. + MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() + .add(Requests.searchRequest()) + .add(Requests.searchRequest("test4")).get(); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + assertReturnedIndices(multiSearchResponse.getResponses()[0].getResponse(), "test1", "test2", "test3"); + assertTrue(multiSearchResponse.getResponses()[1].isFailure()); + assertThat(multiSearchResponse.getResponses()[1].getFailure().toString(), + equalTo("[test4] IndexNotFoundException[no such index]")); + } + { + //we set ignore_unavailable and allow_no_indices to true, no errors returned, second item doesn't have hits. + MultiSearchResponse multiSearchResponse = client().prepareMultiSearch() + .add(Requests.searchRequest()) + .add(Requests.searchRequest("test4") + .indicesOptions(IndicesOptions.fromOptions(true, true, true, randomBoolean()))).get(); + assertReturnedIndices(multiSearchResponse.getResponses()[0].getResponse(), "test1", "test2", "test3"); + assertNoSearchHits(multiSearchResponse.getResponses()[1].getResponse()); + } + } + + public void testMultiSearchWildcard() { + createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); + { + MultiSearchResponse multiSearchResponse = client().prepareMultiSearch().add(Requests.searchRequest()) + .add(Requests.searchRequest("index*")).get(); + assertEquals(2, multiSearchResponse.getResponses().length); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); + assertThat(searchResponse.getHits().getTotalHits(), greaterThan(0L)); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + assertNoSearchHits(multiSearchResponse.getResponses()[1].getResponse()); + } + { + MultiSearchResponse multiSearchResponse = client().prepareMultiSearch().add(Requests.searchRequest()) + .add(Requests.searchRequest("index*") + .indicesOptions(IndicesOptions.fromOptions(randomBoolean(), false, true, randomBoolean()))).get(); + assertEquals(2, multiSearchResponse.getResponses().length); + assertFalse(multiSearchResponse.getResponses()[0].isFailure()); + SearchResponse searchResponse = multiSearchResponse.getResponses()[0].getResponse(); + assertThat(searchResponse.getHits().getTotalHits(), greaterThan(0L)); + assertReturnedIndices(searchResponse, "test1", "test2", "test3"); + assertTrue(multiSearchResponse.getResponses()[1].isFailure()); + Exception exception = multiSearchResponse.getResponses()[1].getFailure(); + assertThat(exception, instanceOf(IndexNotFoundException.class)); + } + } + + public void testIndicesExists() { + createIndicesWithRandomAliases("test1", "test2", "test3"); + + assertEquals(true, client().admin().indices().prepareExists("*").get().isExists()); + + assertEquals(true, client().admin().indices().prepareExists("_all").get().isExists()); + + assertEquals(true, client().admin().indices().prepareExists("test1", "test2").get().isExists()); + + assertEquals(true, client().admin().indices().prepareExists("test*").get().isExists()); + + assertEquals(false, client().admin().indices().prepareExists("does_not_exist").get().isExists()); + + assertEquals(false, client().admin().indices().prepareExists("does_not_exist*").get().isExists()); + } + + public void testGet() { + createIndicesWithRandomAliases("test1", "index1"); + + client().prepareGet("test1", "type", "id").get(); + + assertThrowsAuthorizationExceptionDefaultUsers(client().prepareGet("index1", "type", "id")::get, GetAction.NAME); + + assertThrowsAuthorizationExceptionDefaultUsers(client().prepareGet("missing", "type", "id")::get, GetAction.NAME); + + expectThrows(IndexNotFoundException.class, () -> client().prepareGet("test5", "type", "id").get()); + } + + public void testMultiGet() { + createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); + MultiGetResponse multiGetResponse = client().prepareMultiGet() + .add("test1", "type", "id") + .add("index1", "type", "id") + .add("test3", "type", "id") + .add("missing", "type", "id") + .add("test5", "type", "id").get(); + assertEquals(5, multiGetResponse.getResponses().length); + assertFalse(multiGetResponse.getResponses()[0].isFailed()); + assertEquals("test1", multiGetResponse.getResponses()[0].getResponse().getIndex()); + assertTrue(multiGetResponse.getResponses()[1].isFailed()); + assertEquals("index1", multiGetResponse.getResponses()[1].getFailure().getIndex()); + assertAuthorizationExceptionDefaultUsers(multiGetResponse.getResponses()[1].getFailure().getFailure(), + MultiGetAction.NAME + "[shard]"); + assertFalse(multiGetResponse.getResponses()[2].isFailed()); + assertEquals("test3", multiGetResponse.getResponses()[2].getResponse().getIndex()); + assertTrue(multiGetResponse.getResponses()[3].isFailed()); + assertEquals("missing", multiGetResponse.getResponses()[3].getFailure().getIndex()); + //different behaviour compared to get api: we leak information about a non existing index that the current user is not + //authorized for. Should rather be an authorization exception but we only authorize at the shard level in mget. If we + //authorized globally, we would fail the whole mget request which is not desirable. + assertThat(multiGetResponse.getResponses()[3].getFailure().getFailure(), instanceOf(IndexNotFoundException.class)); + assertTrue(multiGetResponse.getResponses()[4].isFailed()); + assertThat(multiGetResponse.getResponses()[4].getFailure().getFailure(), instanceOf(IndexNotFoundException.class)); + } + + public void testTermVectors() { + createIndicesWithRandomAliases("test1", "index1"); + client().prepareTermVectors("test1", "type", "id").get(); + + assertThrowsAuthorizationExceptionDefaultUsers(client().prepareTermVectors("index1", "type", "id")::get, TermVectorsAction.NAME); + + assertThrowsAuthorizationExceptionDefaultUsers(client().prepareTermVectors("missing", "type", "id")::get, TermVectorsAction.NAME); + + expectThrows(IndexNotFoundException.class, () -> client().prepareTermVectors("test5", "type", "id").get()); + } + + public void testMultiTermVectors() { + createIndicesWithRandomAliases("test1", "test2", "test3", "index1"); + MultiTermVectorsResponse response = client().prepareMultiTermVectors() + .add("test1", "type", "id") + .add("index1", "type", "id") + .add("test3", "type", "id") + .add("missing", "type", "id") + .add("test5", "type", "id").get(); + assertEquals(5, response.getResponses().length); + assertFalse(response.getResponses()[0].isFailed()); + assertEquals("test1", response.getResponses()[0].getResponse().getIndex()); + assertTrue(response.getResponses()[1].isFailed()); + assertEquals("index1", response.getResponses()[1].getFailure().getIndex()); + assertAuthorizationExceptionDefaultUsers(response.getResponses()[1].getFailure().getCause(), + MultiTermVectorsAction.NAME + "[shard]"); + assertFalse(response.getResponses()[2].isFailed()); + assertEquals("test3", response.getResponses()[2].getResponse().getIndex()); + assertTrue(response.getResponses()[3].isFailed()); + assertEquals("missing", response.getResponses()[3].getFailure().getIndex()); + //different behaviour compared to term_vector api: we leak information about a non existing index that the current user is not + //authorized for. Should rather be an authorization exception but we only authorize at the shard level in mget. If we + //authorized globally, we would fail the whole mget request which is not desirable. + assertThat(response.getResponses()[3].getFailure().getCause(), instanceOf(IndexNotFoundException.class)); + assertTrue(response.getResponses()[4].isFailed()); + assertThat(response.getResponses()[4].getFailure().getCause(), instanceOf(IndexNotFoundException.class)); + } + + private static void assertReturnedIndices(SearchResponse searchResponse, String... indices) { + List foundIndices = new ArrayList<>(); + for (SearchHit searchHit : searchResponse.getHits().getHits()) { + foundIndices.add(searchHit.getIndex()); + } + assertThat(foundIndices.size(), equalTo(indices.length)); + assertThat(foundIndices, hasItems(indices)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java new file mode 100644 index 0000000000000..9d34382d566fb --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RoleDescriptorTests.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ByteBufferStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.support.MetadataUtils; + +import java.util.Collections; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.core.Is.is; + +public class RoleDescriptorTests extends ESTestCase { + + public void testIndexGroup() throws Exception { + RoleDescriptor.IndicesPrivileges privs = RoleDescriptor.IndicesPrivileges.builder() + .indices("idx") + .privileges("priv") + .build(); + XContentBuilder b = jsonBuilder(); + privs.toXContent(b, ToXContent.EMPTY_PARAMS); + assertEquals("{\"names\":[\"idx\"],\"privileges\":[\"priv\"]}", Strings.toString(b)); + } + + public void testToString() throws Exception { + RoleDescriptor.IndicesPrivileges[] groups = new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices("i1", "i2") + .privileges("read") + .grantedFields("body", "title") + .query("{\"query\": {\"match_all\": {}}}") + .build() + }; + RoleDescriptor descriptor = new RoleDescriptor("test", new String[] { "all", "none" }, groups, new String[] { "sudo" }); + assertThat(descriptor.toString(), is("Role[name=test, cluster=[all,none], indicesPrivileges=[IndicesPrivileges[indices=[i1,i2], " + + "privileges=[read], field_security=[grant=[body,title], except=null], query={\"query\": {\"match_all\": {}}}],]" + + ", runAs=[sudo], metadata=[{}]]")); + } + + public void testToXContent() throws Exception { + RoleDescriptor.IndicesPrivileges[] groups = new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices("i1", "i2") + .privileges("read") + .grantedFields("body", "title") + .query("{\"query\": {\"match_all\": {}}}") + .build() + }; + Map metadata = randomBoolean() ? MetadataUtils.DEFAULT_RESERVED_METADATA : null; + RoleDescriptor descriptor = new RoleDescriptor("test", new String[] { "all", "none" }, groups, new String[] { "sudo" }, metadata); + XContentBuilder builder = descriptor.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS); + RoleDescriptor parsed = RoleDescriptor.parse("test", BytesReference.bytes(builder), false, XContentType.JSON); + assertEquals(parsed, descriptor); + } + + public void testParse() throws Exception { + + String q = "{\"cluster\":[\"a\", \"b\"]}"; + RoleDescriptor rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); + assertEquals("test", rd.getName()); + assertArrayEquals(new String[] { "a", "b" }, rd.getClusterPrivileges()); + assertEquals(0, rd.getIndicesPrivileges().length); + assertArrayEquals(Strings.EMPTY_ARRAY, rd.getRunAs()); + + q = "{\"cluster\":[\"a\", \"b\"], \"run_as\": [\"m\", \"n\"]}"; + rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); + assertEquals("test", rd.getName()); + assertArrayEquals(new String[] { "a", "b" }, rd.getClusterPrivileges()); + assertEquals(0, rd.getIndicesPrivileges().length); + assertArrayEquals(new String[] { "m", "n" }, rd.getRunAs()); + + q = "{\"cluster\":[\"a\", \"b\"], \"run_as\": [\"m\", \"n\"], \"index\": [{\"names\": \"idx1\", \"privileges\": [\"p1\", " + + "\"p2\"]}, {\"names\": \"idx2\", \"privileges\": [\"p3\"], \"field_security\": " + + "{\"grant\": [\"f1\", \"f2\"]}}, {\"names\": " + + "\"idx2\", " + + "\"privileges\": [\"p3\"], \"field_security\": {\"grant\": [\"f1\", \"f2\"]}, \"query\": \"{\\\"match_all\\\": {}}\"}]}"; + rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); + assertEquals("test", rd.getName()); + assertArrayEquals(new String[] { "a", "b" }, rd.getClusterPrivileges()); + assertEquals(3, rd.getIndicesPrivileges().length); + assertArrayEquals(new String[] { "m", "n" }, rd.getRunAs()); + + q = "{\"cluster\":[\"a\", \"b\"], \"run_as\": [\"m\", \"n\"], \"index\": [{\"names\": [\"idx1\",\"idx2\"], \"privileges\": " + + "[\"p1\", \"p2\"]}]}"; + rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); + assertEquals("test", rd.getName()); + assertArrayEquals(new String[] { "a", "b" }, rd.getClusterPrivileges()); + assertEquals(1, rd.getIndicesPrivileges().length); + assertArrayEquals(new String[] { "idx1", "idx2" }, rd.getIndicesPrivileges()[0].getIndices()); + assertArrayEquals(new String[] { "m", "n" }, rd.getRunAs()); + assertNull(rd.getIndicesPrivileges()[0].getQuery()); + + q = "{\"cluster\":[\"a\", \"b\"], \"metadata\":{\"foo\":\"bar\"}}"; + rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); + assertEquals("test", rd.getName()); + assertArrayEquals(new String[] { "a", "b" }, rd.getClusterPrivileges()); + assertEquals(0, rd.getIndicesPrivileges().length); + assertArrayEquals(Strings.EMPTY_ARRAY, rd.getRunAs()); + assertNotNull(rd.getMetadata()); + assertThat(rd.getMetadata().size(), is(1)); + assertThat(rd.getMetadata().get("foo"), is("bar")); + } + + public void testSerialization() throws Exception { + BytesStreamOutput output = new BytesStreamOutput(); + RoleDescriptor.IndicesPrivileges[] groups = new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .indices("i1", "i2") + .privileges("read") + .grantedFields("body", "title") + .query("{\"query\": {\"match_all\": {}}}") + .build() + }; + Map metadata = randomBoolean() ? MetadataUtils.DEFAULT_RESERVED_METADATA : null; + final RoleDescriptor descriptor = + new RoleDescriptor("test", new String[] { "all", "none" }, groups, new String[] { "sudo" }, metadata); + RoleDescriptor.writeTo(descriptor, output); + StreamInput streamInput = ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())); + final RoleDescriptor serialized = RoleDescriptor.readFrom(streamInput); + assertEquals(descriptor, serialized); + } + + public void testParseEmptyQuery() throws Exception { + String json = "{\"cluster\":[\"a\", \"b\"], \"run_as\": [\"m\", \"n\"], \"index\": [{\"names\": [\"idx1\",\"idx2\"], " + + "\"privileges\": [\"p1\", \"p2\"], \"query\": \"\"}]}"; + RoleDescriptor rd = RoleDescriptor.parse("test", new BytesArray(json), false, XContentType.JSON); + assertEquals("test", rd.getName()); + assertArrayEquals(new String[] { "a", "b" }, rd.getClusterPrivileges()); + assertEquals(1, rd.getIndicesPrivileges().length); + assertArrayEquals(new String[] { "idx1", "idx2" }, rd.getIndicesPrivileges()[0].getIndices()); + assertArrayEquals(new String[] { "m", "n" }, rd.getRunAs()); + assertNull(rd.getIndicesPrivileges()[0].getQuery()); + } + + public void testParseEmptyQueryUsingDeprecatedIndicesField() throws Exception { + String json = "{\"cluster\":[\"a\", \"b\"], \"run_as\": [\"m\", \"n\"], \"indices\": [{\"names\": [\"idx1\",\"idx2\"], " + + "\"privileges\": [\"p1\", \"p2\"], \"query\": \"\"}]}"; + RoleDescriptor rd = RoleDescriptor.parse("test", new BytesArray(json), false, XContentType.JSON); + assertEquals("test", rd.getName()); + assertArrayEquals(new String[] { "a", "b" }, rd.getClusterPrivileges()); + assertEquals(1, rd.getIndicesPrivileges().length); + assertArrayEquals(new String[] { "idx1", "idx2" }, rd.getIndicesPrivileges()[0].getIndices()); + assertArrayEquals(new String[] { "m", "n" }, rd.getRunAs()); + assertNull(rd.getIndicesPrivileges()[0].getQuery()); + } + + public void testParseIgnoresTransientMetadata() throws Exception { + final RoleDescriptor descriptor = new RoleDescriptor("test", new String[] { "all" }, null, null, + Collections.singletonMap("_unlicensed_feature", true), Collections.singletonMap("foo", "bar")); + XContentBuilder b = jsonBuilder(); + descriptor.toXContent(b, ToXContent.EMPTY_PARAMS); + RoleDescriptor parsed = RoleDescriptor.parse("test", BytesReference.bytes(b), false, XContentType.JSON); + assertNotNull(parsed); + assertEquals(1, parsed.getTransientMetadata().size()); + assertEquals(true, parsed.getTransientMetadata().get("enabled")); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java new file mode 100644 index 0000000000000..c3d24e1adc7a4 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecurityScrollTests.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.search.SearchContextMissingException; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.junit.After; + +import java.util.Collections; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class SecurityScrollTests extends SecurityIntegTestCase { + + public void testScrollIsPerUser() throws Exception { + assertSecurityIndexActive(); + securityClient().preparePutRole("scrollable") + .addIndices(new String[] { randomAlphaOfLengthBetween(4, 12) }, new String[] { "read" }, null, null, null) + .get(); + securityClient().preparePutUser("other", SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), "scrollable").get(); + + final int numDocs = randomIntBetween(4, 16); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < docs.length; i++) { + docs[i] = client().prepareIndex("foo", "bar").setSource("doc", i); + } + indexRandom(true, docs); + + SearchResponse response = client().prepareSearch("foo") + .setScroll(TimeValue.timeValueSeconds(5L)) + .setQuery(matchAllQuery()) + .setSize(1) + .get(); + assertEquals(numDocs, response.getHits().getTotalHits()); + assertEquals(1, response.getHits().getHits().length); + + if (randomBoolean()) { + response = client().prepareSearchScroll(response.getScrollId()).setScroll(TimeValue.timeValueSeconds(5L)).get(); + assertEquals(numDocs, response.getHits().getTotalHits()); + assertEquals(1, response.getHits().getHits().length); + } + + final String scrollId = response.getScrollId(); + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> + client() + .filterWithHeader(Collections.singletonMap("Authorization", + UsernamePasswordToken.basicAuthHeaderValue("other", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING))) + .prepareSearchScroll(scrollId) + .get()); + for (ShardSearchFailure failure : e.shardFailures()) { + assertThat(ExceptionsHelper.unwrapCause(failure.getCause()), instanceOf(SearchContextMissingException.class)); + } + } + + public void testSearchAndClearScroll() throws Exception { + IndexRequestBuilder[] docs = new IndexRequestBuilder[randomIntBetween(20, 100)]; + for (int i = 0; i < docs.length; i++) { + docs[i] = client().prepareIndex("idx", "type").setSource("field", "value"); + } + indexRandom(true, docs); + SearchResponse response = client().prepareSearch() + .setQuery(matchAllQuery()) + .setScroll(TimeValue.timeValueSeconds(5L)) + .setSize(randomIntBetween(1, 10)).get(); + + int hits = 0; + try { + do { + assertHitCount(response, docs.length); + hits += response.getHits().getHits().length; + response = client().prepareSearchScroll(response.getScrollId()) + .setScroll(TimeValue.timeValueSeconds(5L)).get(); + } while (response.getHits().getHits().length != 0); + + assertThat(hits, equalTo(docs.length)); + } finally { + clearScroll(response.getScrollId()); + } + } + + @After + public void cleanupSecurityIndex() throws Exception { + super.deleteSecurityIndex(); + } + + @Override + public String transportClientUsername() { + return this.nodeClientUsername(); + } + + @Override + public SecureString transportClientPassword() { + return this.nodeClientPassword(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java new file mode 100644 index 0000000000000..fac88e8af09b0 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java @@ -0,0 +1,248 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.search.Scroll; +import org.elasticsearch.search.SearchContextMissingException; +import org.elasticsearch.search.internal.InternalScrollSearchRequest; +import org.elasticsearch.search.internal.ScrollContext; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TestSearchContext; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequest.Empty; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.audit.AuditTrailService; + +import static org.elasticsearch.mock.orig.Mockito.verifyNoMoreInteractions; +import static org.elasticsearch.xpack.security.authz.AuthorizationService.ORIGINATING_ACTION_KEY; +import static org.elasticsearch.xpack.security.authz.AuthorizationService.ROLE_NAMES_KEY; +import static org.elasticsearch.xpack.security.authz.SecuritySearchOperationListener.ensureAuthenticatedUserIsSame; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class SecuritySearchOperationListenerTests extends ESTestCase { + + public void testUnlicensed() { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.isAuthAllowed()).thenReturn(false); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + AuditTrailService auditTrailService = mock(AuditTrailService.class); + SearchContext searchContext = mock(SearchContext.class); + when(searchContext.scrollContext()).thenReturn(new ScrollContext()); + + SecuritySearchOperationListener listener = new SecuritySearchOperationListener(threadContext, licenseState, auditTrailService); + listener.onNewScrollContext(searchContext); + listener.validateSearchContext(searchContext, Empty.INSTANCE); + verify(licenseState, times(2)).isSecurityEnabled(); + verify(licenseState, times(2)).isAuthAllowed(); + verifyZeroInteractions(auditTrailService, searchContext); + } + + public void testOnNewContextSetsAuthentication() throws Exception { + TestScrollSearchContext testSearchContext = new TestScrollSearchContext(); + testSearchContext.scrollContext(new ScrollContext()); + final Scroll scroll = new Scroll(TimeValue.timeValueSeconds(2L)); + testSearchContext.scrollContext().scroll = scroll; + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.isAuthAllowed()).thenReturn(true); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + AuditTrailService auditTrailService = mock(AuditTrailService.class); + Authentication authentication = new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null); + authentication.writeToContext(threadContext); + + SecuritySearchOperationListener listener = new SecuritySearchOperationListener(threadContext, licenseState, auditTrailService); + listener.onNewScrollContext(testSearchContext); + + Authentication contextAuth = testSearchContext.scrollContext().getFromContext(AuthenticationField.AUTHENTICATION_KEY); + assertEquals(authentication, contextAuth); + assertEquals(scroll, testSearchContext.scrollContext().scroll); + + verify(licenseState).isAuthAllowed(); + verify(licenseState).isSecurityEnabled(); + verifyZeroInteractions(auditTrailService); + } + + public void testValidateSearchContext() throws Exception { + TestScrollSearchContext testSearchContext = new TestScrollSearchContext(); + testSearchContext.scrollContext(new ScrollContext()); + testSearchContext.scrollContext().putInContext(AuthenticationField.AUTHENTICATION_KEY, + new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null)); + testSearchContext.scrollContext().scroll = new Scroll(TimeValue.timeValueSeconds(2L)); + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.isAuthAllowed()).thenReturn(true); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + AuditTrailService auditTrailService = mock(AuditTrailService.class); + + SecuritySearchOperationListener listener = new SecuritySearchOperationListener(threadContext, licenseState, auditTrailService); + try (StoredContext ignore = threadContext.newStoredContext(false)) { + Authentication authentication = new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null); + authentication.writeToContext(threadContext); + listener.validateSearchContext(testSearchContext, Empty.INSTANCE); + verify(licenseState).isAuthAllowed(); + verify(licenseState).isSecurityEnabled(); + verifyZeroInteractions(auditTrailService); + } + + try (StoredContext ignore = threadContext.newStoredContext(false)) { + final String nodeName = randomAlphaOfLengthBetween(1, 8); + final String realmName = randomAlphaOfLengthBetween(1, 16); + Authentication authentication = new Authentication(new User("test", "role"), new RealmRef(realmName, "file", nodeName), null); + authentication.writeToContext(threadContext); + listener.validateSearchContext(testSearchContext, Empty.INSTANCE); + verify(licenseState, times(2)).isAuthAllowed(); + verify(licenseState, times(2)).isSecurityEnabled(); + verifyZeroInteractions(auditTrailService); + } + + try (StoredContext ignore = threadContext.newStoredContext(false)) { + final String nodeName = randomBoolean() ? "node" : randomAlphaOfLengthBetween(1, 8); + final String realmName = randomBoolean() ? "realm" : randomAlphaOfLengthBetween(1, 16); + final String type = randomAlphaOfLengthBetween(5, 16); + Authentication authentication = new Authentication(new User("test", "role"), new RealmRef(realmName, type, nodeName), null); + authentication.writeToContext(threadContext); + threadContext.putTransient(ORIGINATING_ACTION_KEY, "action"); + threadContext.putTransient(ROLE_NAMES_KEY, authentication.getUser().roles()); + final InternalScrollSearchRequest request = new InternalScrollSearchRequest(); + SearchContextMissingException expected = + expectThrows(SearchContextMissingException.class, () -> listener.validateSearchContext(testSearchContext, request)); + assertEquals(testSearchContext.id(), expected.id()); + verify(licenseState, times(3)).isAuthAllowed(); + verify(licenseState, times(3)).isSecurityEnabled(); + verify(auditTrailService).accessDenied(authentication, "action", request, authentication.getUser().roles()); + } + + // another user running as the original user + try (StoredContext ignore = threadContext.newStoredContext(false)) { + final String nodeName = randomBoolean() ? "node" : randomAlphaOfLengthBetween(1, 8); + final String realmName = randomBoolean() ? "realm" : randomAlphaOfLengthBetween(1, 16); + final String type = randomAlphaOfLengthBetween(5, 16); + User user = new User(new User("test", "role"), new User("authenticated", "runas")); + Authentication authentication = new Authentication(user, new RealmRef(realmName, type, nodeName), + new RealmRef(randomAlphaOfLengthBetween(1, 16), "file", nodeName)); + authentication.writeToContext(threadContext); + threadContext.putTransient(ORIGINATING_ACTION_KEY, "action"); + final InternalScrollSearchRequest request = new InternalScrollSearchRequest(); + listener.validateSearchContext(testSearchContext, request); + verify(licenseState, times(4)).isAuthAllowed(); + verify(licenseState, times(4)).isSecurityEnabled(); + verifyNoMoreInteractions(auditTrailService); + } + + // the user that authenticated for the run as request + try (StoredContext ignore = threadContext.newStoredContext(false)) { + final String nodeName = randomBoolean() ? "node" : randomAlphaOfLengthBetween(1, 8); + final String realmName = randomBoolean() ? "realm" : randomAlphaOfLengthBetween(1, 16); + final String type = randomAlphaOfLengthBetween(5, 16); + Authentication authentication = + new Authentication(new User("authenticated", "runas"), new RealmRef(realmName, type, nodeName), null); + authentication.writeToContext(threadContext); + threadContext.putTransient(ORIGINATING_ACTION_KEY, "action"); + threadContext.putTransient(ROLE_NAMES_KEY, authentication.getUser().roles()); + final InternalScrollSearchRequest request = new InternalScrollSearchRequest(); + SearchContextMissingException expected = + expectThrows(SearchContextMissingException.class, () -> listener.validateSearchContext(testSearchContext, request)); + assertEquals(testSearchContext.id(), expected.id()); + verify(licenseState, times(5)).isAuthAllowed(); + verify(licenseState, times(5)).isSecurityEnabled(); + verify(auditTrailService).accessDenied(authentication, "action", request, authentication.getUser().roles()); + } + } + + public void testEnsuredAuthenticatedUserIsSame() { + Authentication original = new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null); + Authentication current = + randomBoolean() ? original : new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null); + long id = randomLong(); + final String action = randomAlphaOfLength(4); + TransportRequest request = Empty.INSTANCE; + AuditTrailService auditTrail = mock(AuditTrailService.class); + + ensureAuthenticatedUserIsSame(original, current, auditTrail, id, action, request, original.getUser().roles()); + verifyZeroInteractions(auditTrail); + + // original user being run as + User user = new User(new User("test", "role"), new User("authenticated", "runas")); + current = new Authentication(user, new RealmRef("realm", "file", "node"), + new RealmRef(randomAlphaOfLengthBetween(1, 16), "file", "node")); + ensureAuthenticatedUserIsSame(original, current, auditTrail, id, action, request, original.getUser().roles()); + verifyZeroInteractions(auditTrail); + + // both user are run as + current = new Authentication(user, new RealmRef("realm", "file", "node"), + new RealmRef(randomAlphaOfLengthBetween(1, 16), "file", "node")); + Authentication runAs = current; + ensureAuthenticatedUserIsSame(runAs, current, auditTrail, id, action, request, original.getUser().roles()); + verifyZeroInteractions(auditTrail); + + // different authenticated by type + Authentication differentRealmType = + new Authentication(new User("test", "role"), new RealmRef("realm", randomAlphaOfLength(5), "node"), null); + SearchContextMissingException e = expectThrows(SearchContextMissingException.class, + () -> ensureAuthenticatedUserIsSame(original, differentRealmType, auditTrail, id, action, request, + original.getUser().roles())); + assertEquals(id, e.id()); + verify(auditTrail).accessDenied(differentRealmType, action, request, original.getUser().roles()); + + // wrong user + Authentication differentUser = + new Authentication(new User("test2", "role"), new RealmRef("realm", "realm", "node"), null); + e = expectThrows(SearchContextMissingException.class, + () -> ensureAuthenticatedUserIsSame(original, differentUser, auditTrail, id, action, request, original.getUser().roles())); + assertEquals(id, e.id()); + verify(auditTrail).accessDenied(differentUser, action, request, original.getUser().roles()); + + // run as different user + Authentication diffRunAs = new Authentication(new User(new User("test2", "role"), new User("authenticated", "runas")), + new RealmRef("realm", "file", "node1"), new RealmRef("realm", "file", "node1")); + e = expectThrows(SearchContextMissingException.class, + () -> ensureAuthenticatedUserIsSame(original, diffRunAs, auditTrail, id, action, request, original.getUser().roles())); + assertEquals(id, e.id()); + verify(auditTrail).accessDenied(diffRunAs, action, request, original.getUser().roles()); + + // run as different looked up by type + Authentication runAsDiffType = new Authentication(user, new RealmRef("realm", "file", "node"), + new RealmRef(randomAlphaOfLengthBetween(1, 16), randomAlphaOfLengthBetween(5, 12), "node")); + e = expectThrows(SearchContextMissingException.class, + () -> ensureAuthenticatedUserIsSame(runAs, runAsDiffType, auditTrail, id, action, request, original.getUser().roles())); + assertEquals(id, e.id()); + verify(auditTrail).accessDenied(runAsDiffType, action, request, original.getUser().roles()); + } + + static class TestScrollSearchContext extends TestSearchContext { + + private ScrollContext scrollContext; + + TestScrollSearchContext() { + super(null); + } + + @Override + public ScrollContext scrollContext() { + return scrollContext; + } + + @Override + public SearchContext scrollContext(ScrollContext scrollContext) { + this.scrollContext = scrollContext; + return this; + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/WriteActionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/WriteActionsTests.java new file mode 100644 index 0000000000000..1f40f1c480f6b --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/WriteActionsTests.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.Requests; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.engine.DocumentMissingException; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; + +import static org.elasticsearch.test.SecurityTestsUtils.assertAuthorizationExceptionDefaultUsers; +import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationExceptionDefaultUsers; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.core.IsInstanceOf.instanceOf; + +public class WriteActionsTests extends SecurityIntegTestCase { + + @Override + protected String configRoles() { + return SecuritySettingsSource.TEST_ROLE + ":\n" + + " cluster: [ ALL ]\n" + + " indices:\n" + + " - names: 'missing'\n" + + " privileges: [ 'indices:admin/create', 'indices:admin/delete' ]\n" + + " - names: ['/index.*/']\n" + + " privileges: [ manage ]\n" + + " - names: ['/test.*/']\n" + + " privileges: [ manage, write ]\n" + + " - names: '/test.*/'\n" + + " privileges: [ read ]\n"; + } + + public void testIndex() { + createIndex("test1", "index1"); + client().prepareIndex("test1", "type", "id").setSource("field", "value").get(); + + assertThrowsAuthorizationExceptionDefaultUsers(client().prepareIndex("index1", "type", "id").setSource("field", "value")::get, + BulkAction.NAME + "[s]"); + + client().prepareIndex("test4", "type", "id").setSource("field", "value").get(); + //the missing index gets automatically created (user has permissions for that), but indexing fails due to missing authorization + assertThrowsAuthorizationExceptionDefaultUsers(client().prepareIndex("missing", "type", "id").setSource("field", "value")::get, + BulkAction.NAME + "[s]"); + } + + public void testDelete() { + createIndex("test1", "index1"); + client().prepareIndex("test1", "type", "id").setSource("field", "value").get(); + assertEquals(RestStatus.OK, client().prepareDelete("test1", "type", "id").get().status()); + + assertThrowsAuthorizationExceptionDefaultUsers(client().prepareDelete("index1", "type", "id")::get, BulkAction.NAME + "[s]"); + + expectThrows(IndexNotFoundException.class, () -> client().prepareDelete("test4", "type", "id").get()); + } + + public void testUpdate() { + createIndex("test1", "index1"); + client().prepareIndex("test1", "type", "id").setSource("field", "value").get(); + assertEquals(RestStatus.OK, client().prepareUpdate("test1", "type", "id") + .setDoc(Requests.INDEX_CONTENT_TYPE, "field2", "value2").get().status()); + + assertThrowsAuthorizationExceptionDefaultUsers(client().prepareUpdate("index1", "type", "id") + .setDoc(Requests.INDEX_CONTENT_TYPE, "field2", "value2")::get, UpdateAction.NAME); + + expectThrows(DocumentMissingException.class, () -> client().prepareUpdate("test4", "type", "id") + .setDoc(Requests.INDEX_CONTENT_TYPE, "field2", "value2").get()); + + assertThrowsAuthorizationExceptionDefaultUsers(client().prepareUpdate("missing", "type", "id") + .setDoc(Requests.INDEX_CONTENT_TYPE, "field2", "value2")::get, UpdateAction.NAME); + } + + public void testBulk() { + createIndex("test1", "test2", "test3", "index1"); + BulkResponse bulkResponse = client().prepareBulk() + .add(new IndexRequest("test1", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value")) + .add(new IndexRequest("index1", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value")) + .add(new IndexRequest("test4", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value")) + .add(new IndexRequest("missing", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value")) + .add(new DeleteRequest("test1", "type", "id")) + .add(new DeleteRequest("index1", "type", "id")) + .add(new DeleteRequest("test4", "type", "id")) + .add(new DeleteRequest("missing", "type", "id")) + .add(new IndexRequest("test1", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value")) + .add(new UpdateRequest("test1", "type", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value")) + .add(new UpdateRequest("index1", "type", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value")) + .add(new UpdateRequest("test4", "type", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value")) + .add(new UpdateRequest("missing", "type", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value")).get(); + assertTrue(bulkResponse.hasFailures()); + assertEquals(13, bulkResponse.getItems().length); + assertFalse(bulkResponse.getItems()[0].isFailed()); + assertEquals(DocWriteRequest.OpType.INDEX, bulkResponse.getItems()[0].getOpType()); + assertEquals("test1", bulkResponse.getItems()[0].getIndex()); + assertTrue(bulkResponse.getItems()[1].isFailed()); + assertEquals(DocWriteRequest.OpType.INDEX, bulkResponse.getItems()[1].getOpType()); + assertEquals("index1", bulkResponse.getItems()[1].getFailure().getIndex()); + assertAuthorizationExceptionDefaultUsers(bulkResponse.getItems()[1].getFailure().getCause(), BulkAction.NAME + "[s]"); + assertThat(bulkResponse.getItems()[1].getFailure().getCause().getMessage(), + containsString("[indices:data/write/bulk[s]] is unauthorized")); + assertFalse(bulkResponse.getItems()[2].isFailed()); + assertEquals(DocWriteRequest.OpType.INDEX, bulkResponse.getItems()[2].getOpType()); + assertEquals("test4", bulkResponse.getItems()[2].getResponse().getIndex()); + assertTrue(bulkResponse.getItems()[3].isFailed()); + assertEquals(DocWriteRequest.OpType.INDEX, bulkResponse.getItems()[3].getOpType()); + //the missing index gets automatically created (user has permissions for that), but indexing fails due to missing authorization + assertEquals("missing", bulkResponse.getItems()[3].getFailure().getIndex()); + assertThat(bulkResponse.getItems()[3].getFailure().getCause(), instanceOf(ElasticsearchSecurityException.class)); + assertAuthorizationExceptionDefaultUsers(bulkResponse.getItems()[3].getFailure().getCause(), BulkAction.NAME + "[s]"); + assertThat(bulkResponse.getItems()[3].getFailure().getCause().getMessage(), + containsString("[indices:data/write/bulk[s]] is unauthorized")); + assertFalse(bulkResponse.getItems()[4].isFailed()); + assertEquals(DocWriteRequest.OpType.DELETE, bulkResponse.getItems()[4].getOpType()); + assertEquals("test1", bulkResponse.getItems()[4].getIndex()); + assertTrue(bulkResponse.getItems()[5].isFailed()); + assertEquals(DocWriteRequest.OpType.DELETE, bulkResponse.getItems()[5].getOpType()); + assertEquals("index1", bulkResponse.getItems()[5].getFailure().getIndex()); + assertAuthorizationExceptionDefaultUsers(bulkResponse.getItems()[5].getFailure().getCause(), BulkAction.NAME + "[s]"); + assertThat(bulkResponse.getItems()[5].getFailure().getCause().getMessage(), + containsString("[indices:data/write/bulk[s]] is unauthorized")); + assertFalse(bulkResponse.getItems()[6].isFailed()); + assertEquals(DocWriteRequest.OpType.DELETE, bulkResponse.getItems()[6].getOpType()); + assertEquals("test4", bulkResponse.getItems()[6].getIndex()); + assertTrue(bulkResponse.getItems()[7].isFailed()); + assertEquals(DocWriteRequest.OpType.DELETE, bulkResponse.getItems()[7].getOpType()); + assertEquals("missing", bulkResponse.getItems()[7].getFailure().getIndex()); + assertAuthorizationExceptionDefaultUsers(bulkResponse.getItems()[7].getFailure().getCause(), BulkAction.NAME + "[s]"); + assertThat(bulkResponse.getItems()[7].getFailure().getCause().getMessage(), + containsString("[indices:data/write/bulk[s]] is unauthorized")); + assertFalse(bulkResponse.getItems()[8].isFailed()); + assertEquals(DocWriteRequest.OpType.INDEX, bulkResponse.getItems()[8].getOpType()); + assertEquals("test1", bulkResponse.getItems()[8].getIndex()); + assertFalse(bulkResponse.getItems()[9].isFailed()); + assertEquals(DocWriteRequest.OpType.UPDATE, bulkResponse.getItems()[9].getOpType()); + assertEquals("test1", bulkResponse.getItems()[9].getIndex()); + assertTrue(bulkResponse.getItems()[10].isFailed()); + assertEquals(DocWriteRequest.OpType.UPDATE, bulkResponse.getItems()[10].getOpType()); + assertEquals("index1", bulkResponse.getItems()[10].getFailure().getIndex()); + assertAuthorizationExceptionDefaultUsers(bulkResponse.getItems()[10].getFailure().getCause(), BulkAction.NAME + "[s]"); + assertThat(bulkResponse.getItems()[10].getFailure().getCause().getMessage(), + containsString("[indices:data/write/bulk[s]] is unauthorized")); + assertTrue(bulkResponse.getItems()[11].isFailed()); + assertEquals(DocWriteRequest.OpType.UPDATE, bulkResponse.getItems()[11].getOpType()); + assertEquals("test4", bulkResponse.getItems()[11].getIndex()); + assertThat(bulkResponse.getItems()[11].getFailure().getCause(), instanceOf(DocumentMissingException.class)); + assertTrue(bulkResponse.getItems()[12].isFailed()); + assertEquals(DocWriteRequest.OpType.UPDATE, bulkResponse.getItems()[12].getOpType()); + assertEquals("missing", bulkResponse.getItems()[12].getFailure().getIndex()); + assertThat(bulkResponse.getItems()[12].getFailure().getCause(), instanceOf(ElasticsearchSecurityException.class)); + assertAuthorizationExceptionDefaultUsers(bulkResponse.getItems()[12].getFailure().getCause(), BulkAction.NAME + "[s]"); + assertThat(bulkResponse.getItems()[12].getFailure().getCause().getMessage(), + containsString("[indices:data/write/bulk[s]] is unauthorized")); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/FieldDataCacheWithFieldSubsetReaderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/FieldDataCacheWithFieldSubsetReaderTests.java new file mode 100644 index 0000000000000..6c18e473a0ca1 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/FieldDataCacheWithFieldSubsetReaderTests.java @@ -0,0 +1,194 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.accesscontrol; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.fielddata.AtomicFieldData; +import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexFieldDataCache; +import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; +import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData; +import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData; +import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData; +import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.FieldSubsetReader; +import org.junit.After; +import org.junit.Before; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class FieldDataCacheWithFieldSubsetReaderTests extends ESTestCase { + + private SortedSetDVOrdinalsIndexFieldData sortedSetDVOrdinalsIndexFieldData; + private PagedBytesIndexFieldData pagedBytesIndexFieldData; + + private DirectoryReader ir; + + private long numDocs; + private Directory dir; + private DummyAccountingFieldDataCache indexFieldDataCache; + + @Before + public void setup() throws Exception { + IndexSettings indexSettings = createIndexSettings(); + CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); + String name = "_field"; + indexFieldDataCache = new DummyAccountingFieldDataCache(); + sortedSetDVOrdinalsIndexFieldData = new SortedSetDVOrdinalsIndexFieldData(indexSettings,indexFieldDataCache, name, + circuitBreakerService, AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION); + pagedBytesIndexFieldData = new PagedBytesIndexFieldData(indexSettings, name, indexFieldDataCache, + circuitBreakerService, TextFieldMapper.Defaults.FIELDDATA_MIN_FREQUENCY, + TextFieldMapper.Defaults.FIELDDATA_MAX_FREQUENCY, + TextFieldMapper.Defaults.FIELDDATA_MIN_SEGMENT_SIZE); + + dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + iwc.setMergePolicy(NoMergePolicy.INSTANCE); + IndexWriter iw = new IndexWriter(dir, iwc); + numDocs = scaledRandomIntBetween(32, 128); + + for (int i = 1; i <= numDocs; i++) { + Document doc = new Document(); + doc.add(new StringField("_field", String.valueOf(i), Field.Store.NO)); + doc.add(new SortedSetDocValuesField("_field", new BytesRef(String.valueOf(i)))); + iw.addDocument(doc); + if (i % 24 == 0) { + iw.commit(); + } + } + iw.close(); + ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(dir), new ShardId(indexSettings.getIndex(), 0)); + } + + @After + public void destroy() throws Exception { + ir.close(); + dir.close(); + } + + public void testSortedSetDVOrdinalsIndexFieldData_global() throws Exception { + assertThat(indexFieldDataCache.topLevelBuilds, equalTo(0)); + IndexOrdinalsFieldData global = sortedSetDVOrdinalsIndexFieldData.loadGlobal(ir); + AtomicOrdinalsFieldData atomic = global.load(ir.leaves().get(0)); + assertThat(atomic.getOrdinalsValues().getValueCount(), equalTo(numDocs)); + assertThat(indexFieldDataCache.topLevelBuilds, equalTo(1)); + + DirectoryReader ir = FieldSubsetReader.wrap(this.ir, new CharacterRunAutomaton(Automata.makeEmpty())); + global = sortedSetDVOrdinalsIndexFieldData.loadGlobal(ir); + atomic = global.load(ir.leaves().get(0)); + assertThat(atomic.getOrdinalsValues().getValueCount(), equalTo(0L)); + assertThat(indexFieldDataCache.topLevelBuilds, equalTo(1)); + } + + public void testSortedSetDVOrdinalsIndexFieldData_segment() throws Exception { + for (LeafReaderContext context : ir.leaves()) { + AtomicOrdinalsFieldData atomic = sortedSetDVOrdinalsIndexFieldData.load(context); + assertThat(atomic.getOrdinalsValues().getValueCount(), greaterThanOrEqualTo(1L)); + } + + DirectoryReader ir = FieldSubsetReader.wrap(this.ir, new CharacterRunAutomaton(Automata.makeEmpty())); + for (LeafReaderContext context : ir.leaves()) { + AtomicOrdinalsFieldData atomic = sortedSetDVOrdinalsIndexFieldData.load(context); + assertThat(atomic.getOrdinalsValues().getValueCount(), equalTo(0L)); + } + // dv based field data doesn't use index field data cache, so in the end noting should have been added + assertThat(indexFieldDataCache.leafLevelBuilds, equalTo(0)); + } + + public void testPagedBytesIndexFieldData_global() throws Exception { + assertThat(indexFieldDataCache.topLevelBuilds, equalTo(0)); + IndexOrdinalsFieldData global = pagedBytesIndexFieldData.loadGlobal(ir); + AtomicOrdinalsFieldData atomic = global.load(ir.leaves().get(0)); + assertThat(atomic.getOrdinalsValues().getValueCount(), equalTo(numDocs)); + assertThat(indexFieldDataCache.topLevelBuilds, equalTo(1)); + + DirectoryReader ir = FieldSubsetReader.wrap(this.ir, new CharacterRunAutomaton(Automata.makeEmpty())); + global = pagedBytesIndexFieldData.loadGlobal(ir); + atomic = global.load(ir.leaves().get(0)); + assertThat(atomic.getOrdinalsValues().getValueCount(), equalTo(0L)); + assertThat(indexFieldDataCache.topLevelBuilds, equalTo(1)); + } + + public void testPagedBytesIndexFieldData_segment() throws Exception { + assertThat(indexFieldDataCache.leafLevelBuilds, equalTo(0)); + for (LeafReaderContext context : ir.leaves()) { + AtomicOrdinalsFieldData atomic = pagedBytesIndexFieldData.load(context); + assertThat(atomic.getOrdinalsValues().getValueCount(), greaterThanOrEqualTo(1L)); + } + assertThat(indexFieldDataCache.leafLevelBuilds, equalTo(ir.leaves().size())); + + DirectoryReader ir = FieldSubsetReader.wrap(this.ir, new CharacterRunAutomaton(Automata.makeEmpty())); + for (LeafReaderContext context : ir.leaves()) { + AtomicOrdinalsFieldData atomic = pagedBytesIndexFieldData.load(context); + assertThat(atomic.getOrdinalsValues().getValueCount(), equalTo(0L)); + } + assertThat(indexFieldDataCache.leafLevelBuilds, equalTo(ir.leaves().size())); + } + + private IndexSettings createIndexSettings() { + Settings settings = Settings.EMPTY; + IndexMetaData indexMetaData = IndexMetaData.builder("_name") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .creationDate(System.currentTimeMillis()) + .build(); + return new IndexSettings(indexMetaData, settings); + } + + private static class DummyAccountingFieldDataCache implements IndexFieldDataCache { + + private int leafLevelBuilds = 0; + private int topLevelBuilds = 0; + + @Override + public > FD load(LeafReaderContext context, IFD indexFieldData) + throws Exception { + leafLevelBuilds++; + return indexFieldData.loadDirect(context); + } + + @Override + public > IFD load(DirectoryReader indexReader, + IFD indexFieldData) throws Exception { + topLevelBuilds++; + return (IFD) indexFieldData.localGlobalDirect(indexReader); + } + + @Override + public void clear() { + } + + @Override + public void clear(String fieldName) { + } + + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/FieldExtractorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/FieldExtractorTests.java new file mode 100644 index 0000000000000..4f8a54f867817 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/FieldExtractorTests.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.accesscontrol; + +import org.apache.lucene.document.IntPoint; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.AssertingQuery; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.DisjunctionMaxQuery; +import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.DocValuesNumbersQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.SynonymQuery; +import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.spans.SpanTermQuery; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +/** Simple tests for query field extraction */ +public class FieldExtractorTests extends ESTestCase { + + public void testBoolean() { + Set fields = new HashSet<>(); + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + builder.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.MUST); + builder.add(new TermQuery(new Term("no", "baz")), BooleanClause.Occur.MUST_NOT); + FieldExtractor.extractFields(builder.build(), fields); + assertEquals(asSet("foo", "no"), fields); + } + + public void testDisjunctionMax() { + Set fields = new HashSet<>(); + DisjunctionMaxQuery query = new DisjunctionMaxQuery(Arrays.asList( + new TermQuery(new Term("one", "bar")), + new TermQuery(new Term("two", "baz")) + ), 1.0F); + FieldExtractor.extractFields(query, fields); + assertEquals(asSet("one", "two"), fields); + } + + public void testSpanTerm() { + Set fields = new HashSet<>(); + FieldExtractor.extractFields(new SpanTermQuery(new Term("foo", "bar")), fields); + assertEquals(asSet("foo"), fields); + } + + public void testTerm() { + Set fields = new HashSet<>(); + FieldExtractor.extractFields(new TermQuery(new Term("foo", "bar")), fields); + assertEquals(asSet("foo"), fields); + } + + public void testSynonym() { + Set fields = new HashSet<>(); + SynonymQuery query = new SynonymQuery(new Term("foo", "bar"), new Term("foo", "baz")); + FieldExtractor.extractFields(query, fields); + assertEquals(asSet("foo"), fields); + } + + public void testPhrase() { + Set fields = new HashSet<>(); + PhraseQuery.Builder builder = new PhraseQuery.Builder(); + builder.add(new Term("foo", "bar")); + builder.add(new Term("foo", "baz")); + FieldExtractor.extractFields(builder.build(), fields); + assertEquals(asSet("foo"), fields); + } + + public void testMultiPhrase() { + Set fields = new HashSet<>(); + MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder(); + builder.add(new Term("foo", "bar")); + builder.add(new Term[] { new Term("foo", "baz"), new Term("foo", "baz2") }); + FieldExtractor.extractFields(builder.build(), fields); + assertEquals(asSet("foo"), fields); + } + + public void testPointRange() { + Set fields = new HashSet<>(); + FieldExtractor.extractFields(IntPoint.newRangeQuery("foo", 3, 4), fields); + assertEquals(asSet("foo"), fields); + } + + public void testPointSet() { + Set fields = new HashSet<>(); + FieldExtractor.extractFields(IntPoint.newSetQuery("foo", 3, 4, 5), fields); + assertEquals(asSet("foo"), fields); + } + + public void testFieldValue() { + Set fields = new HashSet<>(); + FieldExtractor.extractFields(new DocValuesFieldExistsQuery("foo"), fields); + assertEquals(asSet("foo"), fields); + } + + public void testDocValuesNumbers() { + Set fields = new HashSet<>(); + FieldExtractor.extractFields(new DocValuesNumbersQuery("foo", 5L), fields); + assertEquals(asSet("foo"), fields); + } + + public void testTermInSet() { + Set fields = new HashSet<>(); + FieldExtractor.extractFields(new TermInSetQuery("foo", new BytesRef("baz"), new BytesRef("baz2")), fields); + assertEquals(asSet("foo"), fields); + } + + public void testMatchAllDocs() { + Set fields = new HashSet<>(); + FieldExtractor.extractFields(new MatchAllDocsQuery(), fields); + assertEquals(Collections.emptySet(), fields); + } + + public void testMatchNoDocs() { + Set fields = new HashSet<>(); + FieldExtractor.extractFields(new MatchNoDocsQuery(), fields); + assertEquals(Collections.emptySet(), fields); + } + + public void testUnsupported() { + Set fields = new HashSet<>(); + expectThrows(UnsupportedOperationException.class, () -> { + FieldExtractor.extractFields(new AssertingQuery(random(), new MatchAllDocsQuery()), fields); + }); + } + + public void testIndexOrDocValuesQuery() { + Set fields = new HashSet<>(); + Query supported = IntPoint.newExactQuery("foo", 42); + Query unsupported = NumericDocValuesField.newSlowExactQuery("bar", 3); + + IndexOrDocValuesQuery query = new IndexOrDocValuesQuery(supported, supported); + FieldExtractor.extractFields(query, fields); + assertEquals(asSet("foo"), fields); + + IndexOrDocValuesQuery query2 = new IndexOrDocValuesQuery(unsupported, unsupported); + expectThrows(UnsupportedOperationException.class, () -> FieldExtractor.extractFields(query2, new HashSet<>())); + + fields = new HashSet<>(); + IndexOrDocValuesQuery query3 = new IndexOrDocValuesQuery(supported, unsupported); + FieldExtractor.extractFields(query3, fields); + assertEquals(asSet("foo"), fields); + + fields = new HashSet<>(); + IndexOrDocValuesQuery query4 = new IndexOrDocValuesQuery(unsupported, supported); + FieldExtractor.extractFields(query4, fields); + assertEquals(asSet("foo"), fields); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesAccessControlTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesAccessControlTests.java new file mode 100644 index 0000000000000..40d3abb899b14 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesAccessControlTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.accesscontrol; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; + +import java.util.Collections; + +/** + * Unit tests for {@link IndicesAccessControl} + */ +public class IndicesAccessControlTests extends ESTestCase { + + public void testEmptyIndicesAccessControl() { + IndicesAccessControl indicesAccessControl = new IndicesAccessControl(true, Collections.emptyMap()); + assertTrue(indicesAccessControl.isGranted()); + assertNull(indicesAccessControl.getIndexPermissions(randomAlphaOfLengthBetween(3,20))); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java new file mode 100644 index 0000000000000..cacdbc5fa1016 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/IndicesPermissionTests.java @@ -0,0 +1,274 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.accesscontrol; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.Version; +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class IndicesPermissionTests extends ESTestCase { + + public void testAuthorize() { + IndexMetaData.Builder imbBuilder = IndexMetaData.builder("_index") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + ) + .putAlias(AliasMetaData.builder("_alias")); + MetaData md = MetaData.builder().put(imbBuilder).build(); + FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + + // basics: + Set query = Collections.singleton(new BytesArray("{}")); + String[] fields = new String[]{"_field"}; + Role role = Role.builder("_role") + .add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, "_index").build(); + IndicesAccessControl permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_index"), md, fieldPermissionsCache); + assertThat(permissions.getIndexPermissions("_index"), notNullValue()); + assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field")); + assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); + assertThat(permissions.getIndexPermissions("_index").getQueries().size(), equalTo(1)); + assertThat(permissions.getIndexPermissions("_index").getQueries(), equalTo(query)); + + // no document level security: + role = Role.builder("_role") + .add(new FieldPermissions(fieldPermissionDef(fields, null)), null, IndexPrivilege.ALL, "_index").build(); + permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_index"), md, fieldPermissionsCache); + assertThat(permissions.getIndexPermissions("_index"), notNullValue()); + assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field")); + assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); + assertThat(permissions.getIndexPermissions("_index").getQueries(), nullValue()); + + // no field level security: + role = Role.builder("_role").add(new FieldPermissions(), query, IndexPrivilege.ALL, "_index").build(); + permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_index"), md, fieldPermissionsCache); + assertThat(permissions.getIndexPermissions("_index"), notNullValue()); + assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); + assertThat(permissions.getIndexPermissions("_index").getQueries().size(), equalTo(1)); + assertThat(permissions.getIndexPermissions("_index").getQueries(), equalTo(query)); + + // index group associated with an alias: + role = Role.builder("_role") + .add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, "_alias") + .build(); + permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_alias"), md, fieldPermissionsCache); + assertThat(permissions.getIndexPermissions("_index"), notNullValue()); + assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field")); + assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); + assertThat(permissions.getIndexPermissions("_index").getQueries().size(), equalTo(1)); + assertThat(permissions.getIndexPermissions("_index").getQueries(), equalTo(query)); + + assertThat(permissions.getIndexPermissions("_alias"), notNullValue()); + assertTrue(permissions.getIndexPermissions("_alias").getFieldPermissions().grantsAccessTo("_field")); + assertTrue(permissions.getIndexPermissions("_alias").getFieldPermissions().hasFieldLevelSecurity()); + assertThat(permissions.getIndexPermissions("_alias").getQueries().size(), equalTo(1)); + assertThat(permissions.getIndexPermissions("_alias").getQueries(), equalTo(query)); + + // match all fields + String[] allFields = randomFrom(new String[]{"*"}, new String[]{"foo", "*"}, + new String[]{randomAlphaOfLengthBetween(1, 10), "*"}); + role = Role.builder("_role") + .add(new FieldPermissions(fieldPermissionDef(allFields, null)), query, IndexPrivilege.ALL, "_alias").build(); + permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_alias"), md, fieldPermissionsCache); + assertThat(permissions.getIndexPermissions("_index"), notNullValue()); + assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); + assertThat(permissions.getIndexPermissions("_index").getQueries().size(), equalTo(1)); + assertThat(permissions.getIndexPermissions("_index").getQueries(), equalTo(query)); + + assertThat(permissions.getIndexPermissions("_alias"), notNullValue()); + assertFalse(permissions.getIndexPermissions("_alias").getFieldPermissions().hasFieldLevelSecurity()); + assertThat(permissions.getIndexPermissions("_alias").getQueries().size(), equalTo(1)); + assertThat(permissions.getIndexPermissions("_alias").getQueries(), equalTo(query)); + + IndexMetaData.Builder imbBuilder1 = IndexMetaData.builder("_index_1") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + ) + .putAlias(AliasMetaData.builder("_alias")); + md = MetaData.builder(md).put(imbBuilder1).build(); + + + // match all fields with more than one permission + Set fooQuery = Collections.singleton(new BytesArray("{foo}")); + allFields = randomFrom(new String[]{"*"}, new String[]{"foo", "*"}, + new String[]{randomAlphaOfLengthBetween(1, 10), "*"}); + role = Role.builder("_role") + .add(new FieldPermissions(fieldPermissionDef(allFields, null)), fooQuery, IndexPrivilege.ALL, "_alias") + .add(new FieldPermissions(fieldPermissionDef(allFields, null)), query, IndexPrivilege.ALL, "_alias").build(); + permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_alias"), md, fieldPermissionsCache); + Set bothQueries = Sets.union(fooQuery, query); + assertThat(permissions.getIndexPermissions("_index"), notNullValue()); + assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); + assertThat(permissions.getIndexPermissions("_index").getQueries().size(), equalTo(2)); + assertThat(permissions.getIndexPermissions("_index").getQueries(), equalTo(bothQueries)); + + assertThat(permissions.getIndexPermissions("_index_1"), notNullValue()); + assertFalse(permissions.getIndexPermissions("_index_1").getFieldPermissions().hasFieldLevelSecurity()); + assertThat(permissions.getIndexPermissions("_index_1").getQueries().size(), equalTo(2)); + assertThat(permissions.getIndexPermissions("_index_1").getQueries(), equalTo(bothQueries)); + + assertThat(permissions.getIndexPermissions("_alias"), notNullValue()); + assertFalse(permissions.getIndexPermissions("_alias").getFieldPermissions().hasFieldLevelSecurity()); + assertThat(permissions.getIndexPermissions("_alias").getQueries().size(), equalTo(2)); + assertThat(permissions.getIndexPermissions("_alias").getQueries(), equalTo(bothQueries)); + + } + + public void testAuthorizeMultipleGroupsMixedDls() { + IndexMetaData.Builder imbBuilder = IndexMetaData.builder("_index") + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + ) + .putAlias(AliasMetaData.builder("_alias")); + MetaData md = MetaData.builder().put(imbBuilder).build(); + FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + + Set query = Collections.singleton(new BytesArray("{}")); + String[] fields = new String[]{"_field"}; + Role role = Role.builder("_role") + .add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, "_index") + .add(new FieldPermissions(fieldPermissionDef(null, null)), null, IndexPrivilege.ALL, "*") + .build(); + IndicesAccessControl permissions = role.authorize(SearchAction.NAME, Sets.newHashSet("_index"), md, fieldPermissionsCache); + assertThat(permissions.getIndexPermissions("_index"), notNullValue()); + assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field")); + assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity()); + // null implies that there is no DLS. Currently a index permissions only has queries defined + // on it and not a true document level permission object like fields + assertNull(permissions.getIndexPermissions("_index").getQueries()); + } + + public void testIndicesPrivilegesStreaming() throws IOException { + BytesStreamOutput out = new BytesStreamOutput(); + String[] allowed = new String[]{randomAlphaOfLength(5) + "*", randomAlphaOfLength(5) + "*", randomAlphaOfLength(5) + "*"}; + String[] denied = new String[]{allowed[0] + randomAlphaOfLength(5), allowed[1] + randomAlphaOfLength(5), + allowed[2] + randomAlphaOfLength(5)}; + RoleDescriptor.IndicesPrivileges.Builder indicesPrivileges = RoleDescriptor.IndicesPrivileges.builder(); + indicesPrivileges.grantedFields(allowed); + indicesPrivileges.deniedFields(denied); + indicesPrivileges.query("{match_all:{}}"); + indicesPrivileges.indices(randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5)); + indicesPrivileges.privileges("all", "read", "priv"); + indicesPrivileges.build().writeTo(out); + out.close(); + StreamInput in = out.bytes().streamInput(); + RoleDescriptor.IndicesPrivileges readIndicesPrivileges = RoleDescriptor.IndicesPrivileges.createFrom(in); + assertEquals(readIndicesPrivileges, indicesPrivileges.build()); + + out = new BytesStreamOutput(); + out.setVersion(Version.V_5_0_0); + indicesPrivileges = RoleDescriptor.IndicesPrivileges.builder(); + indicesPrivileges.grantedFields(allowed); + indicesPrivileges.deniedFields(denied); + indicesPrivileges.query("{match_all:{}}"); + indicesPrivileges.indices(readIndicesPrivileges.getIndices()); + indicesPrivileges.privileges("all", "read", "priv"); + indicesPrivileges.build().writeTo(out); + out.close(); + in = out.bytes().streamInput(); + in.setVersion(Version.V_5_0_0); + RoleDescriptor.IndicesPrivileges readIndicesPrivileges2 = RoleDescriptor.IndicesPrivileges.createFrom(in); + assertEquals(readIndicesPrivileges, readIndicesPrivileges2); + } + + // tests that field permissions are merged correctly when we authorize with several groups and don't crash when an index has no group + public void testCorePermissionAuthorize() { + final Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + final MetaData metaData = new MetaData.Builder() + .put(new IndexMetaData.Builder("a1").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder("a2").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) + .build(); + + FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + IndicesPermission.Group group1 = new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, "a1"); + IndicesPermission.Group group2 = new IndicesPermission.Group(IndexPrivilege.ALL, + new FieldPermissions(fieldPermissionDef(null, new String[]{"denied_field"})), null, "a1"); + IndicesPermission core = new IndicesPermission(group1, group2); + Map authzMap = + core.authorize(SearchAction.NAME, Sets.newHashSet("a1", "ba"), metaData, fieldPermissionsCache); + assertTrue(authzMap.get("a1").getFieldPermissions().grantsAccessTo("denied_field")); + assertTrue(authzMap.get("a1").getFieldPermissions().grantsAccessTo(randomAlphaOfLength(5))); + // did not define anything for ba so we allow all + assertFalse(authzMap.get("ba").getFieldPermissions().hasFieldLevelSecurity()); + + assertTrue(core.check(SearchAction.NAME)); + assertFalse(core.check("unknown")); + + // test with two indices + group1 = new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, "a1"); + group2 = new IndicesPermission.Group(IndexPrivilege.ALL, + new FieldPermissions(fieldPermissionDef(null, new String[]{"denied_field"})), null, "a1"); + IndicesPermission.Group group3 = new IndicesPermission.Group(IndexPrivilege.ALL, + new FieldPermissions(fieldPermissionDef(new String[]{"*_field"}, new String[]{"denied_field"})), null, "a2"); + IndicesPermission.Group group4 = new IndicesPermission.Group(IndexPrivilege.ALL, + new FieldPermissions(fieldPermissionDef(new String[]{"*_field2"}, new String[]{"denied_field2"})), null, "a2"); + core = new IndicesPermission(group1, group2, group3, group4); + authzMap = core.authorize(SearchAction.NAME, Sets.newHashSet("a1", "a2"), metaData, fieldPermissionsCache); + assertFalse(authzMap.get("a1").getFieldPermissions().hasFieldLevelSecurity()); + assertFalse(authzMap.get("a2").getFieldPermissions().grantsAccessTo("denied_field2")); + assertFalse(authzMap.get("a2").getFieldPermissions().grantsAccessTo("denied_field")); + assertTrue(authzMap.get("a2").getFieldPermissions().grantsAccessTo(randomAlphaOfLength(5) + "_field")); + assertTrue(authzMap.get("a2").getFieldPermissions().grantsAccessTo(randomAlphaOfLength(5) + "_field2")); + assertTrue(authzMap.get("a2").getFieldPermissions().hasFieldLevelSecurity()); + + assertTrue(core.check(SearchAction.NAME)); + assertFalse(core.check("unknown")); + } + + public void testErrorMessageIfIndexPatternIsTooComplex() { + List indices = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + String prefix = randomAlphaOfLengthBetween(4, 12); + String suffixBegin = randomAlphaOfLengthBetween(12, 36); + indices.add("*" + prefix + "*" + suffixBegin + "*"); + } + final ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, + () -> new IndicesPermission.Group(IndexPrivilege.ALL, new FieldPermissions(), null, indices.toArray(Strings.EMPTY_ARRAY))); + assertThat(e.getMessage(), containsString(indices.get(0))); + assertThat(e.getMessage(), containsString("too complex to evaluate")); + } + + private static FieldPermissionsDefinition fieldPermissionDef(String[] granted, String[] denied) { + return new FieldPermissionsDefinition(granted, denied); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java new file mode 100644 index 0000000000000..fe180c9c5ccee --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.accesscontrol; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; +import org.apache.lucene.store.Directory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashSet; + +/** Simple tests for opt out query cache*/ +public class OptOutQueryCacheTests extends ESTestCase { + IndexSearcher searcher; + Directory dir; + RandomIndexWriter w; + DirectoryReader reader; + + @Before + void initLuceneStuff() throws IOException { + dir = newDirectory(); + w = new RandomIndexWriter(random(), dir); + reader = w.getReader(); + searcher = newSearcher(reader); + } + + @After + void closeLuceneStuff() throws IOException { + w.close(); + dir.close(); + reader.close(); + } + public void testOptOutQueryCacheSafetyCheck() throws IOException { + + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + builder.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.MUST); + builder.add(new TermQuery(new Term("no", "baz")), BooleanClause.Occur.MUST_NOT); + Weight weight = builder.build().createWeight(searcher, false, 1f); + + // whenever the allowed fields match the fields in the query and we do not deny access to any fields we allow caching. + IndicesAccessControl.IndexAccessControl permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{"foo", "no"}, null)), new HashSet<>()); + assertTrue(OptOutQueryCache.cachingIsSafe(weight, permissions)); + + permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{"foo", "no"}, new String[]{})), new HashSet<>()); + assertTrue(OptOutQueryCache.cachingIsSafe(weight, permissions)); + + permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{"*"}, new String[]{})), new HashSet<>()); + assertTrue(OptOutQueryCache.cachingIsSafe(weight, permissions)); + + permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{"*"}, null)), new HashSet<>()); + assertTrue(OptOutQueryCache.cachingIsSafe(weight, permissions)); + + permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{"*"}, new String[]{"oof"})), new HashSet<>()); + assertTrue(OptOutQueryCache.cachingIsSafe(weight, permissions)); + + permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{"f*", "n*"}, new String[]{})), new HashSet<>()); + assertTrue(OptOutQueryCache.cachingIsSafe(weight, permissions)); + + // check we don't cache if a field is not allowed + permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{"foo"}, null)), new HashSet<>()); + assertFalse(OptOutQueryCache.cachingIsSafe(weight, permissions)); + + permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{"a*"}, new String[]{"aa"})), new HashSet<>()); + assertFalse(OptOutQueryCache.cachingIsSafe(weight, permissions)); + + permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(null, new String[]{"no"})), new HashSet<>()); + assertFalse(OptOutQueryCache.cachingIsSafe(weight, permissions)); + + permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(null, new String[]{"*"})), new HashSet<>()); + assertFalse(OptOutQueryCache.cachingIsSafe(weight, permissions)); + + permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{"foo", "no"}, new String[]{"no"})), new HashSet<>()); + assertFalse(OptOutQueryCache.cachingIsSafe(weight, permissions)); + + permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{}, new String[]{})), new HashSet<>()); + assertFalse(OptOutQueryCache.cachingIsSafe(weight, permissions)); + + permissions = new IndicesAccessControl.IndexAccessControl(true, + new FieldPermissions(fieldPermissionDef(new String[]{}, null)), new HashSet<>()); + assertFalse(OptOutQueryCache.cachingIsSafe(weight, permissions)); + } + + private static FieldPermissionsDefinition fieldPermissionDef(String[] granted, String[] denied) { + return new FieldPermissionsDefinition(granted, denied); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/SetSecurityUserProcessorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/SetSecurityUserProcessorTests.java new file mode 100644 index 0000000000000..c9ef169a375a5 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/SetSecurityUserProcessorTests.java @@ -0,0 +1,152 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.accesscontrol; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.SetSecurityUserProcessor; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.SetSecurityUserProcessor.Property; +import org.elasticsearch.xpack.core.security.user.User; + +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class SetSecurityUserProcessorTests extends ESTestCase { + + public void testProcessor() throws Exception { + User user = new User("_username", new String[]{"role1", "role2"}, "firstname lastname", "_email", + Collections.singletonMap("key", "value"), true); + Authentication.RealmRef realmRef = new Authentication.RealmRef("_name", "_type", "_node_name"); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, new Authentication(user, realmRef, null)); + + IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>()); + SetSecurityUserProcessor processor = new SetSecurityUserProcessor("_tag", threadContext, "_field", EnumSet.allOf(Property.class)); + processor.execute(ingestDocument); + + Map result = ingestDocument.getFieldValue("_field", Map.class); + assertThat(result.size(), equalTo(5)); + assertThat(result.get("username"), equalTo("_username")); + assertThat(((List) result.get("roles")).size(), equalTo(2)); + assertThat(((List) result.get("roles")).get(0), equalTo("role1")); + assertThat(((List) result.get("roles")).get(1), equalTo("role2")); + assertThat(result.get("full_name"), equalTo("firstname lastname")); + assertThat(result.get("email"), equalTo("_email")); + assertThat(((Map) result.get("metadata")).size(), equalTo(1)); + assertThat(((Map) result.get("metadata")).get("key"), equalTo("value")); + + // test when user holds no data: + threadContext = new ThreadContext(Settings.EMPTY); + user = new User(null, null, null); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, new Authentication(user, realmRef, null)); + ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>()); + processor = new SetSecurityUserProcessor("_tag", threadContext, "_field", EnumSet.allOf(Property.class)); + processor.execute(ingestDocument); + result = ingestDocument.getFieldValue("_field", Map.class); + assertThat(result.size(), equalTo(0)); + } + + public void testNoCurrentUser() throws Exception { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>()); + SetSecurityUserProcessor processor = new SetSecurityUserProcessor("_tag", threadContext, "_field", EnumSet.allOf(Property.class)); + IllegalStateException e = expectThrows(IllegalStateException.class, () -> processor.execute(ingestDocument)); + assertThat(e.getMessage(), equalTo("No user authenticated, only use this processor via authenticated user")); + } + + public void testUsernameProperties() throws Exception { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + User user = new User("_username", null, null); + Authentication.RealmRef realmRef = new Authentication.RealmRef("_name", "_type", "_node_name"); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, new Authentication(user, realmRef, null)); + + IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>()); + SetSecurityUserProcessor processor = new SetSecurityUserProcessor("_tag", threadContext, "_field", EnumSet.of(Property.USERNAME)); + processor.execute(ingestDocument); + + @SuppressWarnings("unchecked") + Map result = ingestDocument.getFieldValue("_field", Map.class); + assertThat(result.size(), equalTo(1)); + assertThat(result.get("username"), equalTo("_username")); + } + + public void testRolesProperties() throws Exception { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + User user = new User(null, "role1", "role2"); + Authentication.RealmRef realmRef = new Authentication.RealmRef("_name", "_type", "_node_name"); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, new Authentication(user, realmRef, null)); + + IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>()); + SetSecurityUserProcessor processor = new SetSecurityUserProcessor("_tag", threadContext, "_field", EnumSet.of(Property.ROLES)); + processor.execute(ingestDocument); + + @SuppressWarnings("unchecked") + Map result = ingestDocument.getFieldValue("_field", Map.class); + assertThat(result.size(), equalTo(1)); + assertThat(((List) result.get("roles")).size(), equalTo(2)); + assertThat(((List) result.get("roles")).get(0), equalTo("role1")); + assertThat(((List) result.get("roles")).get(1), equalTo("role2")); + } + + public void testFullNameProperties() throws Exception { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + User user = new User(null, null, "_full_name", null, null, true); + Authentication.RealmRef realmRef = new Authentication.RealmRef("_name", "_type", "_node_name"); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, new Authentication(user, realmRef, null)); + + IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>()); + SetSecurityUserProcessor processor = new SetSecurityUserProcessor("_tag", threadContext, "_field", EnumSet.of(Property.FULL_NAME)); + processor.execute(ingestDocument); + + @SuppressWarnings("unchecked") + Map result = ingestDocument.getFieldValue("_field", Map.class); + assertThat(result.size(), equalTo(1)); + assertThat(result.get("full_name"), equalTo("_full_name")); + } + + public void testEmailProperties() throws Exception { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + User user = new User(null, null, null, "_email", null, true); + Authentication.RealmRef realmRef = new Authentication.RealmRef("_name", "_type", "_node_name"); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, new Authentication(user, realmRef, null)); + + IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>()); + SetSecurityUserProcessor processor = new SetSecurityUserProcessor("_tag", threadContext, "_field", EnumSet.of(Property.EMAIL)); + processor.execute(ingestDocument); + + @SuppressWarnings("unchecked") + Map result = ingestDocument.getFieldValue("_field", Map.class); + assertThat(result.size(), equalTo(1)); + assertThat(result.get("email"), equalTo("_email")); + } + + public void testMetadataProperties() throws Exception { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + User user = new User(null, null, null, null, Collections.singletonMap("key", "value"), true); + Authentication.RealmRef realmRef = new Authentication.RealmRef("_name", "_type", "_node_name"); + threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, new Authentication(user, realmRef, null)); + + IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>()); + SetSecurityUserProcessor processor = new SetSecurityUserProcessor("_tag", threadContext, "_field", EnumSet.of(Property.METADATA)); + processor.execute(ingestDocument); + + @SuppressWarnings("unchecked") + Map result = ingestDocument.getFieldValue("_field", Map.class); + assertThat(result.size(), equalTo(1)); + assertThat(((Map) result.get("metadata")).size(), equalTo(1)); + assertThat(((Map) result.get("metadata")).get("key"), equalTo("value")); + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/permission/FieldPermissionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/permission/FieldPermissionsTests.java new file mode 100644 index 0000000000000..b21740ee96ce6 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/permission/FieldPermissionsTests.java @@ -0,0 +1,227 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.permission; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReferenceArray; + +import static org.hamcrest.Matchers.containsString; + +public class FieldPermissionsTests extends ESTestCase { + + public void testParseFieldPermissions() throws Exception { + String q = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": [\"p3\"], " + + "\"field_security\": {" + + "\"grant\": [\"f1\", \"f2\", \"f3\", \"f4\"]," + + "\"except\": [\"f3\",\"f4\"]" + + "}}]}"; + RoleDescriptor rd = + RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); + assertArrayEquals(rd.getIndicesPrivileges()[0].getGrantedFields(), + new String[] { "f1", "f2", "f3", "f4" }); + assertArrayEquals(rd.getIndicesPrivileges()[0].getDeniedFields(), + new String[] { "f3", "f4" }); + + q = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": [\"p3\"], " + + "\"field_security\": {" + + "\"except\": [\"f3\",\"f4\"]," + + "\"grant\": [\"f1\", \"f2\", \"f3\", \"f4\"]" + + "}}]}"; + rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); + assertArrayEquals(rd.getIndicesPrivileges()[0].getGrantedFields(), + new String[] { "f1", "f2", "f3", "f4" }); + assertArrayEquals(rd.getIndicesPrivileges()[0].getDeniedFields(), + new String[] { "f3", "f4" }); + + q = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": [\"p3\"], " + + "\"field_security\": {" + + "\"grant\": [\"f1\", \"f2\"]" + + "}}]}"; + rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); + assertArrayEquals(rd.getIndicesPrivileges()[0].getGrantedFields(), + new String[] { "f1", "f2" }); + assertNull(rd.getIndicesPrivileges()[0].getDeniedFields()); + + q = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": [\"p3\"], " + + "\"field_security\": {" + + "\"grant\": []" + + "}}]}"; + rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); + assertArrayEquals(rd.getIndicesPrivileges()[0].getGrantedFields(), new String[] {}); + assertNull(rd.getIndicesPrivileges()[0].getDeniedFields()); + + q = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": [\"p3\"], " + + "\"field_security\": {" + + "\"except\": []," + + "\"grant\": []" + + "}}]}"; + rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); + assertArrayEquals(rd.getIndicesPrivileges()[0].getGrantedFields(), new String[] {}); + assertArrayEquals(rd.getIndicesPrivileges()[0].getDeniedFields(), new String[] {}); + + final String exceptWithoutGrant = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\":" + + " [\"p3\"], \"field_security\": {" + + "\"except\": [\"f1\"]" + + "}}]}"; + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, + () -> RoleDescriptor.parse("test", new BytesArray(exceptWithoutGrant), false, + XContentType.JSON)); + assertThat(e.getDetailedMessage(), + containsString("failed to parse indices privileges for role [test]. field_security" + + " requires grant if except is given")); + + final String grantNull = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": [\"p3\"]," + + " \"field_security\": {" + + "\"grant\": null" + + "}}]}"; + e = expectThrows(ElasticsearchParseException.class, + () -> RoleDescriptor.parse("test", new BytesArray(grantNull), false, + XContentType.JSON)); + assertThat(e.getDetailedMessage(), containsString("failed to parse indices privileges for" + + " role [test]. grant must not be null.")); + + final String exceptNull = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": " + + "[\"p3\"], \"field_security\": {" + + "\"grant\": [\"*\"]," + + "\"except\": null" + + "}}]}"; + e = expectThrows(ElasticsearchParseException.class, + () -> RoleDescriptor.parse("test", new BytesArray(exceptNull), false, + XContentType.JSON)); + assertThat(e.getDetailedMessage(), + containsString("failed to parse indices privileges for role [test]. except must" + + " not be null.")); + + final String exceptGrantNull = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": " + + "[\"p3\"], \"field_security\": {" + + "\"grant\": null," + + "\"except\": null" + + "}}]}"; + e = expectThrows(ElasticsearchParseException.class, + () -> RoleDescriptor.parse("test", new BytesArray(exceptGrantNull), false, + XContentType.JSON)); + assertThat(e.getDetailedMessage(), containsString("failed to parse indices privileges " + + "for role [test]. grant must not be null.")); + + final String bothFieldsMissing = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": " + + "[\"p3\"], \"field_security\": {" + + "}}]}"; + e = expectThrows(ElasticsearchParseException.class, + () -> RoleDescriptor.parse("test", new BytesArray(bothFieldsMissing), false, + XContentType.JSON)); + assertThat(e.getDetailedMessage(), containsString("failed to parse indices privileges " + + "for role [test]. \"field_security\" must not be empty.")); + + // try with two indices and mix order a little + q = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": [\"p3\"], " + + "\"field_security\": {" + + "\"grant\": []" + + "}}," + + "{\"names\": \"idx3\",\n" + + " \"field_security\": {\n" + + " \"grant\": [\"*\"], \n" + + " \"except\": [\"f2\"]}," + + "\"privileges\": [\"p3\"]}]}"; + rd = RoleDescriptor.parse("test", new BytesArray(q), false, XContentType.JSON); + assertArrayEquals(rd.getIndicesPrivileges()[0].getGrantedFields(), new String[] {}); + assertNull(rd.getIndicesPrivileges()[0].getDeniedFields()); + assertArrayEquals(rd.getIndicesPrivileges()[1].getGrantedFields(), new String[] {"*"}); + assertArrayEquals(rd.getIndicesPrivileges()[1].getDeniedFields(), new String[] {"f2"}); + } + + // test old syntax for field permissions + public void testBWCFieldPermissions() throws Exception { + String q = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": [\"p3\"], " + + "\"fields\": [\"f1\", \"f2\"]" + + "}]}"; + RoleDescriptor rd = RoleDescriptor.parse("test", new BytesArray(q), true, + XContentType.JSON); + assertArrayEquals(rd.getIndicesPrivileges()[0].getGrantedFields(), + new String[]{"f1", "f2"}); + assertNull(rd.getIndicesPrivileges()[0].getDeniedFields()); + + final String failingQuery = q; + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, + () -> RoleDescriptor.parse("test", new BytesArray(failingQuery), false, + XContentType.JSON)); + assertThat(e.getDetailedMessage(), containsString("[\"fields\": [...]] format has " + + "changed for field permissions in role [test]" + + ", use [\"field_security\": {\"grant\":[...],\"except\":[...]}] instead")); + + q = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": [\"p3\"], " + + "\"fields\": []" + + "}]}"; + rd = RoleDescriptor.parse("test", new BytesArray(q), true, XContentType.JSON); + assertArrayEquals(rd.getIndicesPrivileges()[0].getGrantedFields(), new String[]{}); + assertNull(rd.getIndicesPrivileges()[0].getDeniedFields()); + final String failingQuery2 = q; + e = expectThrows(ElasticsearchParseException.class, + () -> RoleDescriptor.parse("test", new BytesArray(failingQuery2), false, + XContentType.JSON)); + assertThat(e.getDetailedMessage(), containsString("[\"fields\": [...]] format has " + + "changed for field permissions in role [test]" + + ", use [\"field_security\": {\"grant\":[...],\"except\":[...]}] instead")); + + q = "{\"indices\": [ {\"names\": \"idx2\", \"privileges\": [\"p3\"], " + + "\"fields\": null" + + "}]}"; + rd = RoleDescriptor.parse("test", new BytesArray(q), true, XContentType.JSON); + assertNull(rd.getIndicesPrivileges()[0].getGrantedFields()); + assertNull(rd.getIndicesPrivileges()[0].getDeniedFields()); + final String failingQuery3 = q; + e = expectThrows(ElasticsearchParseException.class, + () -> RoleDescriptor.parse("test", new BytesArray(failingQuery3), false, + XContentType.JSON)); + assertThat(e.getDetailedMessage(), containsString("[\"fields\": [...]] format has " + + "changed for field permissions in role [test]" + + ", use [\"field_security\": {\"grant\":[...],\"except\":[...]}] instead")); + } + + public void testFieldPermissionsHashCodeThreadSafe() throws Exception { + final int numThreads = scaledRandomIntBetween(4, 16); + final FieldPermissions fieldPermissions =new FieldPermissions( + new FieldPermissionsDefinition(new String[] { "*" }, new String[] { "foo" })); + final CountDownLatch latch = new CountDownLatch(numThreads + 1); + final AtomicReferenceArray hashCodes = new AtomicReferenceArray<>(numThreads); + List threads = new ArrayList<>(numThreads); + for (int i = 0; i < numThreads; i++) { + final int threadNum = i; + threads.add(new Thread(() -> { + latch.countDown(); + try { + latch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + final int hashCode = fieldPermissions.hashCode(); + hashCodes.set(threadNum, hashCode); + })); + } + + for (Thread thread : threads) { + thread.start(); + } + latch.countDown(); + for (Thread thread : threads) { + thread.join(); + } + + final int hashCode = fieldPermissions.hashCode(); + for (int i = 0; i < numThreads; i++) { + assertEquals((Integer) hashCode, hashCodes.get(i)); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/permission/PermissionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/permission/PermissionTests.java new file mode 100644 index 0000000000000..7dfd567b46033 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/permission/PermissionTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.permission; + +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.privilege.Privilege; +import org.junit.Before; + +import java.util.function.Predicate; + +import static org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege.MONITOR; +import static org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege.READ; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class PermissionTests extends ESTestCase { + private Role permission; + + @Before + public void init() { + Role.Builder builder = Role.builder("test"); + builder.add(MONITOR, "test_*", "/foo.*/"); + builder.add(READ, "baz_*foo", "/fool.*bar/"); + builder.add(MONITOR, "/bar.*/"); + permission = builder.build(); + } + + public void testAllowedIndicesMatcherAction() throws Exception { + testAllowedIndicesMatcher(permission.indices().allowedIndicesMatcher(GetAction.NAME)); + } + + public void testAllowedIndicesMatcherActionCaching() throws Exception { + Predicate matcher1 = permission.indices().allowedIndicesMatcher(GetAction.NAME); + Predicate matcher2 = permission.indices().allowedIndicesMatcher(GetAction.NAME); + assertThat(matcher1, is(matcher2)); + } + + public void testBuildEmptyRole() { + Role.Builder permission = Role.builder(new String[] { "some_role" }); + Role role = permission.build(); + assertThat(role, notNullValue()); + assertThat(role.cluster(), notNullValue()); + assertThat(role.indices(), notNullValue()); + assertThat(role.runAs(), notNullValue()); + } + + public void testRunAs() { + Role permission = Role.builder("some_role") + .runAs(new Privilege("name", "user1", "run*")) + .build(); + assertThat(permission.runAs().check("user1"), is(true)); + assertThat(permission.runAs().check("user"), is(false)); + assertThat(permission.runAs().check("run" + randomAlphaOfLengthBetween(1, 10)), is(true)); + } + + // "baz_*foo", "/fool.*bar/" + private void testAllowedIndicesMatcher(Predicate indicesMatcher) { + assertThat(indicesMatcher.test("foobar"), is(false)); + assertThat(indicesMatcher.test("fool"), is(false)); + assertThat(indicesMatcher.test("fool2bar"), is(true)); + assertThat(indicesMatcher.test("baz_foo"), is(true)); + assertThat(indicesMatcher.test("barbapapa"), is(false)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java new file mode 100644 index 0000000000000..7c732cd7c52f9 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -0,0 +1,530 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.store; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.license.License.OperationMode; +import org.elasticsearch.license.TestUtils.UpdatableLicenseState; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; +import java.util.function.Function; + +import static org.elasticsearch.mock.orig.Mockito.times; +import static org.elasticsearch.mock.orig.Mockito.verifyNoMoreInteractions; +import static org.elasticsearch.xpack.security.test.SecurityTestUtils.getClusterIndexHealth; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anySetOf; +import static org.mockito.Matchers.eq; +import static org.mockito.Matchers.isA; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class CompositeRolesStoreTests extends ESTestCase { + + private static final Settings SECURITY_ENABLED_SETTINGS = Settings.builder() + .put(XPackSettings.SECURITY_ENABLED.getKey(), true) + .build(); + + public void testRolesWhenDlsFlsUnlicensed() throws IOException { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(false); + RoleDescriptor flsRole = new RoleDescriptor("fls", null, new IndicesPrivileges[] { + IndicesPrivileges.builder() + .grantedFields("*") + .deniedFields("foo") + .indices("*") + .privileges("read") + .build() + }, null); + BytesReference matchAllBytes = XContentHelper.toXContent(QueryBuilders.matchAllQuery(), XContentType.JSON, false); + RoleDescriptor dlsRole = new RoleDescriptor("dls", null, new IndicesPrivileges[] { + IndicesPrivileges.builder() + .indices("*") + .privileges("read") + .query(matchAllBytes) + .build() + }, null); + RoleDescriptor flsDlsRole = new RoleDescriptor("fls_dls", null, new IndicesPrivileges[] { + IndicesPrivileges.builder() + .indices("*") + .privileges("read") + .grantedFields("*") + .deniedFields("foo") + .query(matchAllBytes) + .build() + }, null); + RoleDescriptor noFlsDlsRole = new RoleDescriptor("no_fls_dls", null, new IndicesPrivileges[] { + IndicesPrivileges.builder() + .indices("*") + .privileges("read") + .build() + }, null); + FileRolesStore fileRolesStore = mock(FileRolesStore.class); + when(fileRolesStore.roleDescriptors(Collections.singleton("fls"))).thenReturn(Collections.singleton(flsRole)); + when(fileRolesStore.roleDescriptors(Collections.singleton("dls"))).thenReturn(Collections.singleton(dlsRole)); + when(fileRolesStore.roleDescriptors(Collections.singleton("fls_dls"))).thenReturn(Collections.singleton(flsDlsRole)); + when(fileRolesStore.roleDescriptors(Collections.singleton("no_fls_dls"))).thenReturn(Collections.singleton(noFlsDlsRole)); + CompositeRolesStore compositeRolesStore = new CompositeRolesStore(Settings.EMPTY, fileRolesStore, mock(NativeRolesStore.class), + mock(ReservedRolesStore.class), Collections.emptyList(), new ThreadContext(Settings.EMPTY), licenseState); + + FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + PlainActionFuture roleFuture = new PlainActionFuture<>(); + compositeRolesStore.roles(Collections.singleton("fls"), fieldPermissionsCache, roleFuture); + assertEquals(Role.EMPTY, roleFuture.actionGet()); + + roleFuture = new PlainActionFuture<>(); + compositeRolesStore.roles(Collections.singleton("dls"), fieldPermissionsCache, roleFuture); + assertEquals(Role.EMPTY, roleFuture.actionGet()); + + roleFuture = new PlainActionFuture<>(); + compositeRolesStore.roles(Collections.singleton("fls_dls"), fieldPermissionsCache, roleFuture); + assertEquals(Role.EMPTY, roleFuture.actionGet()); + + roleFuture = new PlainActionFuture<>(); + compositeRolesStore.roles(Collections.singleton("no_fls_dls"), fieldPermissionsCache, roleFuture); + assertNotEquals(Role.EMPTY, roleFuture.actionGet()); + } + + public void testRolesWhenDlsFlsLicensed() throws IOException { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); + RoleDescriptor flsRole = new RoleDescriptor("fls", null, new IndicesPrivileges[] { + IndicesPrivileges.builder() + .grantedFields("*") + .deniedFields("foo") + .indices("*") + .privileges("read") + .build() + }, null); + BytesReference matchAllBytes = XContentHelper.toXContent(QueryBuilders.matchAllQuery(), XContentType.JSON, false); + RoleDescriptor dlsRole = new RoleDescriptor("dls", null, new IndicesPrivileges[] { + IndicesPrivileges.builder() + .indices("*") + .privileges("read") + .query(matchAllBytes) + .build() + }, null); + RoleDescriptor flsDlsRole = new RoleDescriptor("fls_dls", null, new IndicesPrivileges[] { + IndicesPrivileges.builder() + .indices("*") + .privileges("read") + .grantedFields("*") + .deniedFields("foo") + .query(matchAllBytes) + .build() + }, null); + RoleDescriptor noFlsDlsRole = new RoleDescriptor("no_fls_dls", null, new IndicesPrivileges[] { + IndicesPrivileges.builder() + .indices("*") + .privileges("read") + .build() + }, null); + FileRolesStore fileRolesStore = mock(FileRolesStore.class); + when(fileRolesStore.roleDescriptors(Collections.singleton("fls"))).thenReturn(Collections.singleton(flsRole)); + when(fileRolesStore.roleDescriptors(Collections.singleton("dls"))).thenReturn(Collections.singleton(dlsRole)); + when(fileRolesStore.roleDescriptors(Collections.singleton("fls_dls"))).thenReturn(Collections.singleton(flsDlsRole)); + when(fileRolesStore.roleDescriptors(Collections.singleton("no_fls_dls"))).thenReturn(Collections.singleton(noFlsDlsRole)); + CompositeRolesStore compositeRolesStore = new CompositeRolesStore(Settings.EMPTY, fileRolesStore, mock(NativeRolesStore.class), + mock(ReservedRolesStore.class), Collections.emptyList(), new ThreadContext(Settings.EMPTY), licenseState); + + FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + PlainActionFuture roleFuture = new PlainActionFuture<>(); + compositeRolesStore.roles(Collections.singleton("fls"), fieldPermissionsCache, roleFuture); + assertNotEquals(Role.EMPTY, roleFuture.actionGet()); + + roleFuture = new PlainActionFuture<>(); + compositeRolesStore.roles(Collections.singleton("dls"), fieldPermissionsCache, roleFuture); + assertNotEquals(Role.EMPTY, roleFuture.actionGet()); + + roleFuture = new PlainActionFuture<>(); + compositeRolesStore.roles(Collections.singleton("fls_dls"), fieldPermissionsCache, roleFuture); + assertNotEquals(Role.EMPTY, roleFuture.actionGet()); + + roleFuture = new PlainActionFuture<>(); + compositeRolesStore.roles(Collections.singleton("no_fls_dls"), fieldPermissionsCache, roleFuture); + assertNotEquals(Role.EMPTY, roleFuture.actionGet()); + } + + public void testNegativeLookupsAreCached() { + final FileRolesStore fileRolesStore = mock(FileRolesStore.class); + when(fileRolesStore.roleDescriptors(anySetOf(String.class))).thenReturn(Collections.emptySet()); + final NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class); + doAnswer((invocationOnMock) -> { + ActionListener> callback = (ActionListener>) invocationOnMock.getArguments()[1]; + callback.onResponse(Collections.emptySet()); + return null; + }).when(nativeRolesStore).getRoleDescriptors(isA(String[].class), any(ActionListener.class)); + final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); + + final CompositeRolesStore compositeRolesStore = + new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, + Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), + new XPackLicenseState(SECURITY_ENABLED_SETTINGS)); + verify(fileRolesStore).addListener(any(Runnable.class)); // adds a listener in ctor + + final String roleName = randomAlphaOfLengthBetween(1, 10); + PlainActionFuture future = new PlainActionFuture<>(); + final FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + compositeRolesStore.roles(Collections.singleton(roleName), fieldPermissionsCache, future); + final Role role = future.actionGet(); + assertEquals(Role.EMPTY, role); + verify(reservedRolesStore).roleDescriptors(); + verify(fileRolesStore).roleDescriptors(eq(Collections.singleton(roleName))); + verify(nativeRolesStore).getRoleDescriptors(isA(String[].class), any(ActionListener.class)); + + final int numberOfTimesToCall = scaledRandomIntBetween(0, 32); + final boolean getSuperuserRole = randomBoolean() + && roleName.equals(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName()) == false; + final Set names = getSuperuserRole ? Sets.newHashSet(roleName, ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName()) + : Collections.singleton(roleName); + for (int i = 0; i < numberOfTimesToCall; i++) { + future = new PlainActionFuture<>(); + compositeRolesStore.roles(names, fieldPermissionsCache, future); + future.actionGet(); + } + + if (getSuperuserRole && numberOfTimesToCall > 0) { + // the superuser role was requested so we get the role descriptors again + verify(reservedRolesStore, times(2)).roleDescriptors(); + } + verifyNoMoreInteractions(fileRolesStore, reservedRolesStore, nativeRolesStore); + + // force a cache clear + + } + + public void testCustomRolesProviders() { + final FileRolesStore fileRolesStore = mock(FileRolesStore.class); + when(fileRolesStore.roleDescriptors(anySetOf(String.class))).thenReturn(Collections.emptySet()); + final NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class); + doAnswer((invocationOnMock) -> { + ActionListener> callback = (ActionListener>) invocationOnMock.getArguments()[1]; + callback.onResponse(Collections.emptySet()); + return null; + }).when(nativeRolesStore).getRoleDescriptors(isA(String[].class), any(ActionListener.class)); + final ReservedRolesStore reservedRolesStore = spy(new ReservedRolesStore()); + + final InMemoryRolesProvider inMemoryProvider1 = spy(new InMemoryRolesProvider((roles) -> { + Set descriptors = new HashSet<>(); + if (roles.contains("roleA")) { + descriptors.add(new RoleDescriptor("roleA", null, + new IndicesPrivileges[] { + IndicesPrivileges.builder().privileges("READ").indices("foo").grantedFields("*").build() + }, null)); + } + return descriptors; + })); + + final InMemoryRolesProvider inMemoryProvider2 = spy(new InMemoryRolesProvider((roles) -> { + Set descriptors = new HashSet<>(); + if (roles.contains("roleA")) { + // both role providers can resolve role A, this makes sure that if the first + // role provider in order resolves a role, the second provider does not override it + descriptors.add(new RoleDescriptor("roleA", null, + new IndicesPrivileges[] { + IndicesPrivileges.builder().privileges("WRITE").indices("*").grantedFields("*").build() + }, null)); + } + if (roles.contains("roleB")) { + descriptors.add(new RoleDescriptor("roleB", null, + new IndicesPrivileges[] { + IndicesPrivileges.builder().privileges("READ").indices("bar").grantedFields("*").build() + }, null)); + } + return descriptors; + })); + + final CompositeRolesStore compositeRolesStore = + new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, + Arrays.asList(inMemoryProvider1, inMemoryProvider2), new ThreadContext(SECURITY_ENABLED_SETTINGS), + new XPackLicenseState(SECURITY_ENABLED_SETTINGS)); + + final Set roleNames = Sets.newHashSet("roleA", "roleB", "unknown"); + PlainActionFuture future = new PlainActionFuture<>(); + final FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + compositeRolesStore.roles(roleNames, fieldPermissionsCache, future); + final Role role = future.actionGet(); + + // make sure custom roles providers populate roles correctly + assertEquals(2, role.indices().groups().length); + assertEquals(IndexPrivilege.READ, role.indices().groups()[0].privilege()); + assertThat(role.indices().groups()[0].indices()[0], anyOf(equalTo("foo"), equalTo("bar"))); + assertEquals(IndexPrivilege.READ, role.indices().groups()[1].privilege()); + assertThat(role.indices().groups()[1].indices()[0], anyOf(equalTo("foo"), equalTo("bar"))); + + // make sure negative lookups are cached + verify(inMemoryProvider1).accept(anySetOf(String.class), any(ActionListener.class)); + verify(inMemoryProvider2).accept(anySetOf(String.class), any(ActionListener.class)); + + final int numberOfTimesToCall = scaledRandomIntBetween(1, 8); + for (int i = 0; i < numberOfTimesToCall; i++) { + future = new PlainActionFuture<>(); + compositeRolesStore.roles(Collections.singleton("unknown"), fieldPermissionsCache, future); + future.actionGet(); + } + + verifyNoMoreInteractions(inMemoryProvider1, inMemoryProvider2); + } + + /** + * This test is a direct result of a issue where field level security permissions were not + * being merged correctly. The improper merging resulted in an allow all result when merging + * permissions from different roles instead of properly creating a union of their languages + */ + public void testMergingRolesWithFls() { + RoleDescriptor flsRole = new RoleDescriptor("fls", null, new IndicesPrivileges[] { + IndicesPrivileges.builder() + .grantedFields("*") + .deniedFields("L1.*", "L2.*") + .indices("*") + .privileges("read") + .query("{ \"match\": {\"eventType.typeCode\": \"foo\"} }") + .build() + }, null); + RoleDescriptor addsL1Fields = new RoleDescriptor("dls", null, new IndicesPrivileges[] { + IndicesPrivileges.builder() + .indices("*") + .grantedFields("L1.*") + .privileges("read") + .query("{ \"match\": {\"eventType.typeCode\": \"foo\"} }") + .build() + }, null); + FieldPermissionsCache cache = new FieldPermissionsCache(Settings.EMPTY); + Role role = CompositeRolesStore.buildRoleFromDescriptors(Sets.newHashSet(flsRole, addsL1Fields), cache); + + MetaData metaData = MetaData.builder() + .put(new IndexMetaData.Builder("test") + .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .numberOfShards(1).numberOfReplicas(0).build(), true) + .build(); + Map acls = + role.indices().authorize("indices:data/read/search", Collections.singleton("test"), metaData, cache); + assertFalse(acls.isEmpty()); + assertTrue(acls.get("test").getFieldPermissions().grantsAccessTo("L1.foo")); + assertFalse(acls.get("test").getFieldPermissions().grantsAccessTo("L2.foo")); + assertTrue(acls.get("test").getFieldPermissions().grantsAccessTo("L3.foo")); + } + + public void testCustomRolesProviderFailures() throws Exception { + final FileRolesStore fileRolesStore = mock(FileRolesStore.class); + when(fileRolesStore.roleDescriptors(anySetOf(String.class))).thenReturn(Collections.emptySet()); + final NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class); + doAnswer((invocationOnMock) -> { + ActionListener> callback = (ActionListener>) invocationOnMock.getArguments()[1]; + callback.onResponse(Collections.emptySet()); + return null; + }).when(nativeRolesStore).getRoleDescriptors(isA(String[].class), any(ActionListener.class)); + final ReservedRolesStore reservedRolesStore = new ReservedRolesStore(); + + final InMemoryRolesProvider inMemoryProvider1 = new InMemoryRolesProvider((roles) -> { + Set descriptors = new HashSet<>(); + if (roles.contains("roleA")) { + descriptors.add(new RoleDescriptor("roleA", null, + new IndicesPrivileges[] { + IndicesPrivileges.builder().privileges("READ").indices("foo").grantedFields("*").build() + }, null)); + } + return descriptors; + }); + + final BiConsumer, ActionListener>> failingProvider = + (roles, listener) -> listener.onFailure(new Exception("fake failure")); + + final CompositeRolesStore compositeRolesStore = + new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore, + Arrays.asList(inMemoryProvider1, failingProvider), new ThreadContext(SECURITY_ENABLED_SETTINGS), + new XPackLicenseState(SECURITY_ENABLED_SETTINGS)); + + final Set roleNames = Sets.newHashSet("roleA", "roleB", "unknown"); + PlainActionFuture future = new PlainActionFuture<>(); + final FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + compositeRolesStore.roles(roleNames, fieldPermissionsCache, future); + try { + future.get(); + fail("provider should have thrown a failure"); + } catch (ExecutionException e) { + assertEquals("fake failure", e.getCause().getMessage()); + } + } + + public void testCustomRolesProvidersLicensing() { + final FileRolesStore fileRolesStore = mock(FileRolesStore.class); + when(fileRolesStore.roleDescriptors(anySetOf(String.class))).thenReturn(Collections.emptySet()); + final NativeRolesStore nativeRolesStore = mock(NativeRolesStore.class); + doAnswer((invocationOnMock) -> { + ActionListener> callback = (ActionListener>) invocationOnMock.getArguments()[1]; + callback.onResponse(Collections.emptySet()); + return null; + }).when(nativeRolesStore).getRoleDescriptors(isA(String[].class), any(ActionListener.class)); + final ReservedRolesStore reservedRolesStore = new ReservedRolesStore(); + + final InMemoryRolesProvider inMemoryProvider = new InMemoryRolesProvider((roles) -> { + Set descriptors = new HashSet<>(); + if (roles.contains("roleA")) { + descriptors.add(new RoleDescriptor("roleA", null, + new IndicesPrivileges[] { + IndicesPrivileges.builder().privileges("READ").indices("foo").grantedFields("*").build() + }, null)); + } + return descriptors; + }); + + UpdatableLicenseState xPackLicenseState = new UpdatableLicenseState(SECURITY_ENABLED_SETTINGS); + // these licenses don't allow custom role providers + xPackLicenseState.update(randomFrom(OperationMode.BASIC, OperationMode.GOLD, OperationMode.STANDARD), true); + CompositeRolesStore compositeRolesStore = new CompositeRolesStore( + Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, + Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState); + + Set roleNames = Sets.newHashSet("roleA"); + PlainActionFuture future = new PlainActionFuture<>(); + FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + compositeRolesStore.roles(roleNames, fieldPermissionsCache, future); + Role role = future.actionGet(); + + // no roles should've been populated, as the license doesn't permit custom role providers + assertEquals(0, role.indices().groups().length); + + compositeRolesStore = new CompositeRolesStore( + Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, + Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState); + // these licenses allow custom role providers + xPackLicenseState.update(randomFrom(OperationMode.PLATINUM, OperationMode.TRIAL), true); + roleNames = Sets.newHashSet("roleA"); + future = new PlainActionFuture<>(); + fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + compositeRolesStore.roles(roleNames, fieldPermissionsCache, future); + role = future.actionGet(); + + // roleA should've been populated by the custom role provider, because the license allows it + assertEquals(1, role.indices().groups().length); + + // license expired, don't allow custom role providers + compositeRolesStore = new CompositeRolesStore( + Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, + Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState); + xPackLicenseState.update(randomFrom(OperationMode.PLATINUM, OperationMode.TRIAL), false); + roleNames = Sets.newHashSet("roleA"); + future = new PlainActionFuture<>(); + fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + compositeRolesStore.roles(roleNames, fieldPermissionsCache, future); + role = future.actionGet(); + assertEquals(0, role.indices().groups().length); + } + + public void testCacheClearOnIndexHealthChange() { + final AtomicInteger numInvalidation = new AtomicInteger(0); + + CompositeRolesStore compositeRolesStore = new CompositeRolesStore( + Settings.EMPTY, mock(FileRolesStore.class), mock(NativeRolesStore.class), mock(ReservedRolesStore.class), + Collections.emptyList(), new ThreadContext(Settings.EMPTY), new XPackLicenseState(SECURITY_ENABLED_SETTINGS)) { + @Override + public void invalidateAll() { + numInvalidation.incrementAndGet(); + } + }; + + int expectedInvalidation = 0; + // existing to no longer present + ClusterIndexHealth previousHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + ClusterIndexHealth currentHealth = null; + compositeRolesStore.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(++expectedInvalidation, numInvalidation.get()); + + // doesn't exist to exists + previousHealth = null; + currentHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + compositeRolesStore.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(++expectedInvalidation, numInvalidation.get()); + + // green or yellow to red + previousHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + currentHealth = getClusterIndexHealth(ClusterHealthStatus.RED); + compositeRolesStore.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(expectedInvalidation, numInvalidation.get()); + + // red to non red + previousHealth = getClusterIndexHealth(ClusterHealthStatus.RED); + currentHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + compositeRolesStore.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(++expectedInvalidation, numInvalidation.get()); + + // green to yellow or yellow to green + previousHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW)); + currentHealth = getClusterIndexHealth( + previousHealth.getStatus() == ClusterHealthStatus.GREEN ? ClusterHealthStatus.YELLOW : ClusterHealthStatus.GREEN); + compositeRolesStore.onSecurityIndexHealthChange(previousHealth, currentHealth); + assertEquals(expectedInvalidation, numInvalidation.get()); + } + + public void testCacheClearOnIndexOutOfDateChange() { + final AtomicInteger numInvalidation = new AtomicInteger(0); + + CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, mock(FileRolesStore.class), + mock(NativeRolesStore.class), mock(ReservedRolesStore.class), + Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS), new XPackLicenseState(SECURITY_ENABLED_SETTINGS)) { + @Override + public void invalidateAll() { + numInvalidation.incrementAndGet(); + } + }; + + compositeRolesStore.onSecurityIndexOutOfDateChange(false, true); + assertEquals(1, numInvalidation.get()); + + compositeRolesStore.onSecurityIndexOutOfDateChange(true, false); + assertEquals(2, numInvalidation.get()); + } + + private static class InMemoryRolesProvider implements BiConsumer, ActionListener>> { + private final Function, Set> roleDescriptorsFunc; + + InMemoryRolesProvider(Function, Set> roleDescriptorsFunc) { + this.roleDescriptorsFunc = roleDescriptorsFunc; + } + + @Override + public void accept(Set roles, ActionListener> listener) { + listener.onResponse(roleDescriptorsFunc.apply(roles)); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java new file mode 100644 index 0000000000000..9ad9927dd9154 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java @@ -0,0 +1,465 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.store; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.automaton.MinimizationOperations; +import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.audit.logfile.CapturingLogger; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; +import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.permission.RunAsPermission; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; + +import java.io.BufferedWriter; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class FileRolesStoreTests extends ESTestCase { + + public void testParseFile() throws Exception { + Path path = getDataPath("roles.yml"); + Map roles = FileRolesStore.parseFile(path, logger, Settings.builder() + .put(XPackSettings.DLS_FLS_ENABLED.getKey(), true) + .build(), new XPackLicenseState(Settings.EMPTY)); + assertThat(roles, notNullValue()); + assertThat(roles.size(), is(9)); + + RoleDescriptor descriptor = roles.get("role1"); + assertNotNull(descriptor); + Role role = Role.builder(descriptor, null).build(); + assertThat(role, notNullValue()); + assertThat(role.names(), equalTo(new String[] { "role1" })); + assertThat(role.cluster(), notNullValue()); + assertThat(role.cluster().privilege(), is(ClusterPrivilege.ALL)); + assertThat(role.indices(), notNullValue()); + assertThat(role.indices().groups(), notNullValue()); + assertThat(role.indices().groups().length, is(2)); + assertThat(role.runAs(), is(RunAsPermission.NONE)); + + IndicesPermission.Group group = role.indices().groups()[0]; + assertThat(group.indices(), notNullValue()); + assertThat(group.indices().length, is(2)); + assertThat(group.indices()[0], equalTo("idx1")); + assertThat(group.indices()[1], equalTo("idx2")); + assertThat(group.privilege(), notNullValue()); + assertThat(group.privilege(), is(IndexPrivilege.READ)); + + group = role.indices().groups()[1]; + assertThat(group.indices(), notNullValue()); + assertThat(group.indices().length, is(1)); + assertThat(group.indices()[0], equalTo("idx3")); + assertThat(group.privilege(), notNullValue()); + assertTrue(Operations.subsetOf(IndexPrivilege.READ.getAutomaton(), group.privilege().getAutomaton())); + assertTrue(Operations.subsetOf(IndexPrivilege.WRITE.getAutomaton(), group.privilege().getAutomaton())); + + descriptor = roles.get("role1.ab"); + assertNotNull(descriptor); + role = Role.builder(descriptor, null).build(); + assertThat(role, notNullValue()); + assertThat(role.names(), equalTo(new String[] { "role1.ab" })); + assertThat(role.cluster(), notNullValue()); + assertThat(role.cluster().privilege(), is(ClusterPrivilege.ALL)); + assertThat(role.indices(), notNullValue()); + assertThat(role.indices().groups(), notNullValue()); + assertThat(role.indices().groups().length, is(0)); + assertThat(role.runAs(), is(RunAsPermission.NONE)); + + descriptor = roles.get("role2"); + assertNotNull(descriptor); + role = Role.builder(descriptor, null).build(); + assertThat(role, notNullValue()); + assertThat(role.names(), equalTo(new String[] { "role2" })); + assertThat(role.cluster(), notNullValue()); + assertTrue(Operations.sameLanguage(role.cluster().privilege().getAutomaton(), ClusterPrivilege.ALL.getAutomaton())); + assertThat(role.indices(), notNullValue()); + assertThat(role.indices(), is(IndicesPermission.NONE)); + assertThat(role.runAs(), is(RunAsPermission.NONE)); + + descriptor = roles.get("role3"); + assertNotNull(descriptor); + role = Role.builder(descriptor, null).build(); + assertThat(role, notNullValue()); + assertThat(role.names(), equalTo(new String[] { "role3" })); + assertThat(role.cluster(), notNullValue()); + assertThat(role.cluster(), is(ClusterPermission.NONE)); + assertThat(role.indices(), notNullValue()); + assertThat(role.indices().groups(), notNullValue()); + assertThat(role.indices().groups().length, is(1)); + assertThat(role.runAs(), is(RunAsPermission.NONE)); + + group = role.indices().groups()[0]; + assertThat(group.indices(), notNullValue()); + assertThat(group.indices().length, is(1)); + assertThat(group.indices()[0], equalTo("/.*_.*/")); + assertThat(group.privilege(), notNullValue()); + assertTrue(Operations.sameLanguage(group.privilege().getAutomaton(), + MinimizationOperations.minimize(Operations.union(IndexPrivilege.READ.getAutomaton(), IndexPrivilege.WRITE.getAutomaton()), + Operations.DEFAULT_MAX_DETERMINIZED_STATES))); + + descriptor = roles.get("role4"); + assertNull(descriptor); + + descriptor = roles.get("role_run_as"); + assertNotNull(descriptor); + role = Role.builder(descriptor, null).build(); + assertThat(role, notNullValue()); + assertThat(role.names(), equalTo(new String[] { "role_run_as" })); + assertThat(role.cluster(), notNullValue()); + assertThat(role.cluster(), is(ClusterPermission.NONE)); + assertThat(role.indices(), is(IndicesPermission.NONE)); + assertThat(role.runAs(), notNullValue()); + assertThat(role.runAs().check("user1"), is(true)); + assertThat(role.runAs().check("user2"), is(true)); + assertThat(role.runAs().check("user" + randomIntBetween(3, 9)), is(false)); + + descriptor = roles.get("role_run_as1"); + assertNotNull(descriptor); + role = Role.builder(descriptor, null).build(); + assertThat(role, notNullValue()); + assertThat(role.names(), equalTo(new String[] { "role_run_as1" })); + assertThat(role.cluster(), notNullValue()); + assertThat(role.cluster(), is(ClusterPermission.NONE)); + assertThat(role.indices(), is(IndicesPermission.NONE)); + assertThat(role.runAs(), notNullValue()); + assertThat(role.runAs().check("user1"), is(true)); + assertThat(role.runAs().check("user2"), is(true)); + assertThat(role.runAs().check("user" + randomIntBetween(3, 9)), is(false)); + + descriptor = roles.get("role_fields"); + assertNotNull(descriptor); + role = Role.builder(descriptor, null).build(); + assertThat(role, notNullValue()); + assertThat(role.names(), equalTo(new String[] { "role_fields" })); + assertThat(role.cluster(), notNullValue()); + assertThat(role.cluster(), is(ClusterPermission.NONE)); + assertThat(role.runAs(), is(RunAsPermission.NONE)); + assertThat(role.indices(), notNullValue()); + assertThat(role.indices().groups(), notNullValue()); + assertThat(role.indices().groups().length, is(1)); + + group = role.indices().groups()[0]; + assertThat(group.indices(), notNullValue()); + assertThat(group.indices().length, is(1)); + assertThat(group.indices()[0], equalTo("field_idx")); + assertThat(group.privilege(), notNullValue()); + assertTrue(Operations.sameLanguage(group.privilege().getAutomaton(), IndexPrivilege.READ.getAutomaton())); + assertTrue(group.getFieldPermissions().grantsAccessTo("foo")); + assertTrue(group.getFieldPermissions().grantsAccessTo("boo")); + assertTrue(group.getFieldPermissions().hasFieldLevelSecurity()); + + descriptor = roles.get("role_query"); + assertNotNull(descriptor); + role = Role.builder(descriptor, null).build(); + assertThat(role, notNullValue()); + assertThat(role.names(), equalTo(new String[] { "role_query" })); + assertThat(role.cluster(), notNullValue()); + assertThat(role.cluster(), is(ClusterPermission.NONE)); + assertThat(role.runAs(), is(RunAsPermission.NONE)); + assertThat(role.indices(), notNullValue()); + assertThat(role.indices().groups(), notNullValue()); + assertThat(role.indices().groups().length, is(1)); + + group = role.indices().groups()[0]; + assertThat(group.indices(), notNullValue()); + assertThat(group.indices().length, is(1)); + assertThat(group.indices()[0], equalTo("query_idx")); + assertThat(group.privilege(), notNullValue()); + assertTrue(Operations.sameLanguage(group.privilege().getAutomaton(), IndexPrivilege.READ.getAutomaton())); + assertFalse(group.getFieldPermissions().hasFieldLevelSecurity()); + assertThat(group.getQuery(), notNullValue()); + + descriptor = roles.get("role_query_fields"); + assertNotNull(descriptor); + role = Role.builder(descriptor, null).build(); + assertThat(role, notNullValue()); + assertThat(role.names(), equalTo(new String[] { "role_query_fields" })); + assertThat(role.cluster(), notNullValue()); + assertThat(role.cluster(), is(ClusterPermission.NONE)); + assertThat(role.runAs(), is(RunAsPermission.NONE)); + assertThat(role.indices(), notNullValue()); + assertThat(role.indices().groups(), notNullValue()); + assertThat(role.indices().groups().length, is(1)); + + group = role.indices().groups()[0]; + assertThat(group.indices(), notNullValue()); + assertThat(group.indices().length, is(1)); + assertThat(group.indices()[0], equalTo("query_fields_idx")); + assertThat(group.privilege(), notNullValue()); + assertTrue(Operations.sameLanguage(group.privilege().getAutomaton(), IndexPrivilege.READ.getAutomaton())); + assertTrue(group.getFieldPermissions().grantsAccessTo("foo")); + assertTrue(group.getFieldPermissions().grantsAccessTo("boo")); + assertTrue(group.getFieldPermissions().hasFieldLevelSecurity()); + assertThat(group.getQuery(), notNullValue()); + } + + public void testParseFileWithFLSAndDLSDisabled() throws Exception { + Path path = getDataPath("roles.yml"); + Logger logger = CapturingLogger.newCapturingLogger(Level.ERROR); + Map roles = FileRolesStore.parseFile(path, logger, Settings.builder() + .put(XPackSettings.DLS_FLS_ENABLED.getKey(), false) + .build(), new XPackLicenseState(Settings.EMPTY)); + assertThat(roles, notNullValue()); + assertThat(roles.size(), is(6)); + assertThat(roles.get("role_fields"), nullValue()); + assertThat(roles.get("role_query"), nullValue()); + assertThat(roles.get("role_query_fields"), nullValue()); + + List events = CapturingLogger.output(logger.getName(), Level.ERROR); + assertThat(events, hasSize(3)); + assertThat( + events.get(0), + startsWith("invalid role definition [role_fields] in roles file [" + path.toAbsolutePath() + + "]. document and field level security is not enabled.")); + assertThat(events.get(1), + startsWith("invalid role definition [role_query] in roles file [" + path.toAbsolutePath() + + "]. document and field level security is not enabled.")); + assertThat(events.get(2), + startsWith("invalid role definition [role_query_fields] in roles file [" + path.toAbsolutePath() + + "]. document and field level security is not enabled.")); + } + + public void testParseFileWithFLSAndDLSUnlicensed() throws Exception { + Path path = getDataPath("roles.yml"); + Logger logger = CapturingLogger.newCapturingLogger(Level.WARN); + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(false); + Map roles = FileRolesStore.parseFile(path, logger, Settings.EMPTY, licenseState); + assertThat(roles, notNullValue()); + assertThat(roles.size(), is(9)); + assertNotNull(roles.get("role_fields")); + assertNotNull(roles.get("role_query")); + assertNotNull(roles.get("role_query_fields")); + + List events = CapturingLogger.output(logger.getName(), Level.WARN); + assertThat(events, hasSize(3)); + assertThat( + events.get(0), + startsWith("role [role_fields] uses document and/or field level security, which is not enabled by the current license")); + assertThat(events.get(1), + startsWith("role [role_query] uses document and/or field level security, which is not enabled by the current license")); + assertThat(events.get(2), + startsWith("role [role_query_fields] uses document and/or field level security, which is not enabled by the current " + + "license")); + } + + /** + * This test is mainly to make sure we can read the default roles.yml config + */ + public void testDefaultRolesFile() throws Exception { + // TODO we should add the config dir to the resources so we don't copy this stuff around... + Path path = getDataPath("default_roles.yml"); + Map roles = FileRolesStore.parseFile(path, logger, Settings.EMPTY, new XPackLicenseState(Settings.EMPTY)); + assertThat(roles, notNullValue()); + assertThat(roles.size(), is(0)); + } + + public void testAutoReload() throws Exception { + ThreadPool threadPool = null; + ResourceWatcherService watcherService = null; + try { + Path roles = getDataPath("roles.yml"); + Path home = createTempDir(); + Path xpackConf = home.resolve("config"); + Files.createDirectories(xpackConf); + Path tmp = xpackConf.resolve("roles.yml"); + try (OutputStream stream = Files.newOutputStream(tmp)) { + Files.copy(roles, stream); + } + + Settings.Builder builder = Settings.builder() + .put("resource.reload.interval.high", "500ms") + .put("path.home", home); + Settings settings = builder.build(); + Environment env = TestEnvironment.newEnvironment(settings); + threadPool = new TestThreadPool("test"); + watcherService = new ResourceWatcherService(settings, threadPool); + final CountDownLatch latch = new CountDownLatch(1); + FileRolesStore store = new FileRolesStore(settings, env, watcherService, latch::countDown, + new XPackLicenseState(Settings.EMPTY)); + + Set descriptors = store.roleDescriptors(Collections.singleton("role1")); + assertThat(descriptors, notNullValue()); + assertEquals(1, descriptors.size()); + descriptors = store.roleDescriptors(Collections.singleton("role5")); + assertThat(descriptors, notNullValue()); + assertTrue(descriptors.isEmpty()); + + watcherService.start(); + + try (BufferedWriter writer = Files.newBufferedWriter(tmp, StandardCharsets.UTF_8, StandardOpenOption.APPEND)) { + writer.newLine(); + writer.newLine(); + writer.newLine(); + writer.append("role5:").append(System.lineSeparator()); + writer.append(" cluster:").append(System.lineSeparator()); + writer.append(" - 'MONITOR'"); + } + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("Waited too long for the updated file to be picked up"); + } + + descriptors = store.roleDescriptors(Collections.singleton("role5")); + assertThat(descriptors, notNullValue()); + assertEquals(1, descriptors.size()); + Role role = Role.builder(descriptors.iterator().next(), null).build(); + assertThat(role, notNullValue()); + assertThat(role.names(), equalTo(new String[] { "role5" })); + assertThat(role.cluster().check("cluster:monitor/foo/bar"), is(true)); + assertThat(role.cluster().check("cluster:admin/foo/bar"), is(false)); + + } finally { + if (watcherService != null) { + watcherService.stop(); + } + terminate(threadPool); + } + } + + public void testThatEmptyFileDoesNotResultInLoop() throws Exception { + Path file = createTempFile(); + Files.write(file, Collections.singletonList("#"), StandardCharsets.UTF_8); + Map roles = FileRolesStore.parseFile(file, logger, Settings.EMPTY, new XPackLicenseState(Settings.EMPTY)); + assertThat(roles.keySet(), is(empty())); + } + + public void testThatInvalidRoleDefinitions() throws Exception { + Path path = getDataPath("invalid_roles.yml"); + Logger logger = CapturingLogger.newCapturingLogger(Level.ERROR); + Map roles = FileRolesStore.parseFile(path, logger, Settings.EMPTY, new XPackLicenseState(Settings.EMPTY)); + assertThat(roles.size(), is(1)); + assertThat(roles, hasKey("valid_role")); + RoleDescriptor descriptor = roles.get("valid_role"); + assertNotNull(descriptor); + Role role = Role.builder(descriptor, null).build(); + assertThat(role, notNullValue()); + assertThat(role.names(), equalTo(new String[] { "valid_role" })); + + List entries = CapturingLogger.output(logger.getName(), Level.ERROR); + assertThat(entries, hasSize(6)); + assertThat( + entries.get(0), + startsWith("invalid role definition [fóóbár] in roles file [" + path.toAbsolutePath() + "]. invalid role name")); + assertThat( + entries.get(1), + startsWith("invalid role definition [role1] in roles file [" + path.toAbsolutePath() + "]")); + assertThat(entries.get(2), startsWith("failed to parse role [role2]")); + assertThat(entries.get(3), startsWith("failed to parse role [role3]")); + assertThat(entries.get(4), startsWith("failed to parse role [role4]")); + assertThat(entries.get(5), startsWith("failed to parse indices privileges for role [role5]")); + } + + public void testThatRoleNamesDoesNotResolvePermissions() throws Exception { + Path path = getDataPath("invalid_roles.yml"); + Logger logger = CapturingLogger.newCapturingLogger(Level.ERROR); + Set roleNames = FileRolesStore.parseFileForRoleNames(path, logger); + assertThat(roleNames.size(), is(6)); + assertThat(roleNames, containsInAnyOrder("valid_role", "role1", "role2", "role3", "role4", "role5")); + + List events = CapturingLogger.output(logger.getName(), Level.ERROR); + assertThat(events, hasSize(1)); + assertThat( + events.get(0), + startsWith("invalid role definition [fóóbár] in roles file [" + path.toAbsolutePath() + "]. invalid role name")); + } + + public void testReservedRoles() throws Exception { + + Logger logger = CapturingLogger.newCapturingLogger(Level.INFO); + + Path path = getDataPath("reserved_roles.yml"); + Map roles = FileRolesStore.parseFile(path, logger, Settings.EMPTY, new XPackLicenseState(Settings.EMPTY)); + assertThat(roles, notNullValue()); + assertThat(roles.size(), is(1)); + + assertThat(roles, hasKey("admin")); + + List events = CapturingLogger.output(logger.getName(), Level.ERROR); + assertThat(events, notNullValue()); + assertThat(events, hasSize(4)); + // the system role will always be checked first + assertThat(events.get(0), containsString("Role [_system] is reserved")); + assertThat(events.get(1), containsString("Role [superuser] is reserved")); + assertThat(events.get(2), containsString("Role [kibana_system] is reserved")); + assertThat(events.get(3), containsString("Role [transport_client] is reserved")); + } + + public void testUsageStats() throws Exception { + Path roles = getDataPath("roles.yml"); + Path home = createTempDir(); + Path tmp = home.resolve("config/roles.yml"); + Files.createDirectories(tmp.getParent()); + try (OutputStream stream = Files.newOutputStream(tmp)) { + Files.copy(roles, stream); + } + + final boolean flsDlsEnabled = randomBoolean(); + Settings settings = Settings.builder() + .put("resource.reload.interval.high", "500ms") + .put("path.home", home) + .put(XPackSettings.DLS_FLS_ENABLED.getKey(), flsDlsEnabled) + .build(); + Environment env = TestEnvironment.newEnvironment(settings); + FileRolesStore store = new FileRolesStore(settings, env, mock(ResourceWatcherService.class), new XPackLicenseState(Settings.EMPTY)); + + Map usageStats = store.usageStats(); + + assertThat(usageStats.get("size"), is(flsDlsEnabled ? 9 : 6)); + assertThat(usageStats.get("fls"), is(flsDlsEnabled)); + assertThat(usageStats.get("dls"), is(flsDlsEnabled)); + } + + // test that we can read a role where field permissions are stored in 2.x format (fields:...) + public void testBWCFieldPermissions() throws IOException { + Path path = getDataPath("roles2xformat.yml"); + byte[] bytes = Files.readAllBytes(path); + String roleString = new String(bytes, Charset.defaultCharset()); + RoleDescriptor role = FileRolesStore.parseRoleDescriptor(roleString, path, logger, true, Settings.EMPTY); + RoleDescriptor.IndicesPrivileges indicesPrivileges = role.getIndicesPrivileges()[0]; + assertThat(indicesPrivileges.getGrantedFields(), arrayContaining("foo", "boo")); + assertNull(indicesPrivileges.getDeniedFields()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java new file mode 100644 index 0000000000000..8df2b14e3ba8e --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java @@ -0,0 +1,296 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authz.store; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.UnassignedInfo.Reason; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.SecurityLifecycleServiceField; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; +import org.elasticsearch.xpack.security.test.SecurityTestUtils; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class NativeRolesStoreTests extends ESTestCase { + + private ThreadPool threadPool; + + @Before + public void createThreadPool() { + threadPool = new TestThreadPool("index audit trail update mapping tests"); + } + + @After + public void terminateThreadPool() throws Exception { + terminate(threadPool); + } + + // test that we can read a role where field permissions are stored in 2.x format (fields:...) + public void testBWCFieldPermissions() throws IOException { + Path path = getDataPath("roles2xformat.json"); + byte[] bytes = Files.readAllBytes(path); + String roleString = new String(bytes, Charset.defaultCharset()); + RoleDescriptor role = NativeRolesStore.transformRole(RoleDescriptor.ROLE_TYPE + "role1", + new BytesArray(roleString), logger, new XPackLicenseState(Settings.EMPTY)); + assertNotNull(role); + assertNotNull(role.getIndicesPrivileges()); + RoleDescriptor.IndicesPrivileges indicesPrivileges = role.getIndicesPrivileges()[0]; + assertThat(indicesPrivileges.getGrantedFields(), arrayContaining("foo", "boo")); + assertNull(indicesPrivileges.getDeniedFields()); + } + + public void testRoleDescriptorWithFlsDlsLicensing() throws IOException { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(false); + RoleDescriptor flsRole = new RoleDescriptor("fls", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().privileges("READ").indices("*") + .grantedFields("*") + .deniedFields("foo") + .build() }, + null); + assertFalse(flsRole.getTransientMetadata().containsKey("unlicensed_features")); + + BytesReference matchAllBytes = XContentHelper.toXContent(QueryBuilders.matchAllQuery(), XContentType.JSON, false); + + RoleDescriptor dlsRole = new RoleDescriptor("dls", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("*").privileges("READ") + .query(matchAllBytes) + .build() }, + null); + assertFalse(dlsRole.getTransientMetadata().containsKey("unlicensed_features")); + + RoleDescriptor flsDlsRole = new RoleDescriptor("fls_ dls", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("*").privileges("READ") + .grantedFields("*") + .deniedFields("foo") + .query(matchAllBytes) + .build() }, + null); + assertFalse(flsDlsRole.getTransientMetadata().containsKey("unlicensed_features")); + + RoleDescriptor noFlsDlsRole = new RoleDescriptor("no_fls_dls", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("*").privileges("READ").build() }, + null); + assertFalse(noFlsDlsRole.getTransientMetadata().containsKey("unlicensed_features")); + + XContentBuilder builder = flsRole.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS); + BytesReference bytes = BytesReference.bytes(builder); + RoleDescriptor role = NativeRolesStore.transformRole(RoleDescriptor.ROLE_TYPE + "-fls", bytes, logger, licenseState); + assertNotNull(role); + assertTrue(role.getTransientMetadata().containsKey("unlicensed_features")); + assertThat(role.getTransientMetadata().get("unlicensed_features"), instanceOf(List.class)); + assertThat((List) role.getTransientMetadata().get("unlicensed_features"), contains("fls")); + + builder = dlsRole.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS); + bytes = BytesReference.bytes(builder); + role = NativeRolesStore.transformRole(RoleDescriptor.ROLE_TYPE + "dls", bytes, logger, licenseState); + assertNotNull(role); + assertTrue(role.getTransientMetadata().containsKey("unlicensed_features")); + assertThat(role.getTransientMetadata().get("unlicensed_features"), instanceOf(List.class)); + assertThat((List) role.getTransientMetadata().get("unlicensed_features"), contains("dls")); + + builder = flsDlsRole.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS); + bytes = BytesReference.bytes(builder); + role = NativeRolesStore.transformRole(RoleDescriptor.ROLE_TYPE + "fls_dls", bytes, logger, licenseState); + assertNotNull(role); + assertTrue(role.getTransientMetadata().containsKey("unlicensed_features")); + assertThat(role.getTransientMetadata().get("unlicensed_features"), instanceOf(List.class)); + assertThat((List) role.getTransientMetadata().get("unlicensed_features"), contains("fls", "dls")); + + builder = noFlsDlsRole.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS); + bytes = BytesReference.bytes(builder); + role = NativeRolesStore.transformRole(RoleDescriptor.ROLE_TYPE + "no_fls_dls", bytes, logger, licenseState); + assertNotNull(role); + assertFalse(role.getTransientMetadata().containsKey("unlicensed_features")); + + when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); + builder = flsRole.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS); + bytes = BytesReference.bytes(builder); + role = NativeRolesStore.transformRole(RoleDescriptor.ROLE_TYPE + "fls", bytes, logger, licenseState); + assertNotNull(role); + assertFalse(role.getTransientMetadata().containsKey("unlicensed_features")); + + builder = dlsRole.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS); + bytes = BytesReference.bytes(builder); + role = NativeRolesStore.transformRole(RoleDescriptor.ROLE_TYPE + "dls", bytes, logger, licenseState); + assertNotNull(role); + assertFalse(role.getTransientMetadata().containsKey("unlicensed_features")); + + builder = flsDlsRole.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS); + bytes = BytesReference.bytes(builder); + role = NativeRolesStore.transformRole(RoleDescriptor.ROLE_TYPE + "fls_dls", bytes, logger, licenseState); + assertNotNull(role); + assertFalse(role.getTransientMetadata().containsKey("unlicensed_features")); + + builder = noFlsDlsRole.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS); + bytes = BytesReference.bytes(builder); + role = NativeRolesStore.transformRole(RoleDescriptor.ROLE_TYPE + "no_fls_dls", bytes, logger, licenseState); + assertNotNull(role); + assertFalse(role.getTransientMetadata().containsKey("unlicensed_features")); + } + + public void testPutOfRoleWithFlsDlsUnlicensed() throws IOException { + final Client client = mock(Client.class); + final ClusterService clusterService = mock(ClusterService.class); + final XPackLicenseState licenseState = mock(XPackLicenseState.class); + final AtomicBoolean methodCalled = new AtomicBoolean(false); + final SecurityLifecycleService securityLifecycleService = + new SecurityLifecycleService(Settings.EMPTY, clusterService, threadPool, client, + mock(IndexAuditTrail.class)); + final NativeRolesStore rolesStore = new NativeRolesStore(Settings.EMPTY, client, licenseState, securityLifecycleService) { + @Override + void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener listener) { + if (methodCalled.compareAndSet(false, true)) { + listener.onResponse(true); + } else { + fail("method called more than once!"); + } + } + }; + // setup the roles store so the security index exists + securityLifecycleService.clusterChanged(new ClusterChangedEvent( + "fls_dls_license", getClusterStateWithSecurityIndex(), getEmptyClusterState())); + + PutRoleRequest putRoleRequest = new PutRoleRequest(); + RoleDescriptor flsRole = new RoleDescriptor("fls", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().privileges("READ").indices("*") + .grantedFields("*") + .deniedFields("foo") + .build() }, + null); + PlainActionFuture future = new PlainActionFuture<>(); + rolesStore.putRole(putRoleRequest, flsRole, future); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, future::actionGet); + assertThat(e.getMessage(), containsString("field and document level security")); + BytesReference matchAllBytes = XContentHelper.toXContent(QueryBuilders.matchAllQuery(), XContentType.JSON, false); + + RoleDescriptor dlsRole = new RoleDescriptor("dls", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("*").privileges("READ") + .query(matchAllBytes) + .build() }, + null); + future = new PlainActionFuture<>(); + rolesStore.putRole(putRoleRequest, dlsRole, future); + e = expectThrows(ElasticsearchSecurityException.class, future::actionGet); + assertThat(e.getMessage(), containsString("field and document level security")); + + RoleDescriptor flsDlsRole = new RoleDescriptor("fls_ dls", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("*").privileges("READ") + .grantedFields("*") + .deniedFields("foo") + .query(matchAllBytes) + .build() }, + null); + future = new PlainActionFuture<>(); + rolesStore.putRole(putRoleRequest, flsDlsRole, future); + e = expectThrows(ElasticsearchSecurityException.class, future::actionGet); + assertThat(e.getMessage(), containsString("field and document level security")); + + RoleDescriptor noFlsDlsRole = new RoleDescriptor("no_fls_dls", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("*").privileges("READ").build() }, + null); + future = new PlainActionFuture<>(); + rolesStore.putRole(putRoleRequest, noFlsDlsRole, future); + assertTrue(future.actionGet()); + } + + private ClusterState getClusterStateWithSecurityIndex() { + final boolean withAlias = randomBoolean(); + final String securityIndexName = SECURITY_INDEX_NAME + (withAlias ? "-" + randomAlphaOfLength(5) : ""); + + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder(securityIndexName).settings(settings)) + .put(new IndexTemplateMetaData(SecurityLifecycleServiceField.SECURITY_TEMPLATE_NAME, 0, 0, + Collections.singletonList(securityIndexName), Settings.EMPTY, ImmutableOpenMap.of(), + ImmutableOpenMap.of(), ImmutableOpenMap.of())) + .build(); + + if (withAlias) { + metaData = SecurityTestUtils.addAliasToMetaData(metaData, securityIndexName); + } + + Index index = new Index(securityIndexName, UUID.randomUUID().toString()); + ShardRouting shardRouting = ShardRouting.newUnassigned(new ShardId(index, 0), true, EXISTING_STORE_INSTANCE, + new UnassignedInfo(Reason.INDEX_CREATED, "")); + IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(new ShardId(index, 0)) + .addShard(shardRouting.initialize(randomAlphaOfLength(8), null, shardRouting.getExpectedShardSize()).moveToStarted()) + .build(); + RoutingTable routingTable = RoutingTable.builder() + .add(IndexRoutingTable + .builder(index) + .addIndexShard(table) + .build()) + .build(); + + ClusterState clusterState = ClusterState.builder(new ClusterName(NativeRolesStoreTests.class.getName())) + .metaData(metaData) + .routingTable(routingTable) + .build(); + + return clusterState; + } + + private ClusterState getEmptyClusterState() { + return ClusterState.builder(new ClusterName(NativeRolesStoreTests.class.getName())).build(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/RestRequestFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/RestRequestFilterTests.java new file mode 100644 index 0000000000000..335673f1c0cbb --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/RestRequestFilterTests.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xpack.core.security.rest.RestRequestFilter; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.Collections; +import java.util.Map; + +public class RestRequestFilterTests extends ESTestCase { + + public void testFilteringItemsInSubLevels() throws IOException { + BytesReference content = new BytesArray("{\"root\": {\"second\": {\"third\": \"password\", \"foo\": \"bar\"}}}"); + RestRequestFilter filter = () -> Collections.singleton("root.second.third"); + FakeRestRequest restRequest = + new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(content, XContentType.JSON).build(); + RestRequest filtered = filter.getFilteredRequest(restRequest); + assertNotEquals(content, filtered.content()); + + Map map = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, filtered.content().streamInput()).map(); + Map root = (Map) map.get("root"); + assertNotNull(root); + Map second = (Map) root.get("second"); + assertNotNull(second); + assertEquals("bar", second.get("foo")); + assertNull(second.get("third")); + } + + public void testFilteringItemsInSubLevelsWithWildCard() throws IOException { + BytesReference content = new BytesArray("{\"root\": {\"second\": {\"third\": \"password\", \"foo\": \"bar\"}}}"); + RestRequestFilter filter = () -> Collections.singleton("root.*.third"); + FakeRestRequest restRequest = + new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(content, XContentType.JSON).build(); + RestRequest filtered = filter.getFilteredRequest(restRequest); + assertNotEquals(content, filtered.content()); + + Map map = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, filtered.content().streamInput()).map(); + Map root = (Map) map.get("root"); + assertNotNull(root); + Map second = (Map) root.get("second"); + assertNotNull(second); + assertEquals("bar", second.get("foo")); + assertNull(second.get("third")); + } + + public void testFilteringItemsInSubLevelsWithLeadingWildCard() throws IOException { + BytesReference content = new BytesArray("{\"root\": {\"second\": {\"third\": \"password\", \"foo\": \"bar\"}}}"); + RestRequestFilter filter = () -> Collections.singleton("*.third"); + FakeRestRequest restRequest = + new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(content, XContentType.JSON).build(); + RestRequest filtered = filter.getFilteredRequest(restRequest); + assertNotEquals(content, filtered.content()); + + Map map = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, filtered.content().streamInput()).map(); + Map root = (Map) map.get("root"); + assertNotNull(root); + Map second = (Map) root.get("second"); + assertNotNull(second); + assertEquals("bar", second.get("foo")); + assertNull(second.get("third")); + } + + public void testRemoteAddressWorks() throws IOException { + BytesReference content = new BytesArray("{\"root\": {\"second\": {\"third\": \"password\", \"foo\": \"bar\"}}}"); + RestRequestFilter filter = () -> Collections.singleton("*.third"); + InetSocketAddress address = new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 32768); + FakeRestRequest restRequest = + new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(content, XContentType.JSON) + .withRemoteAddress(address).build(); + RestRequest filtered = filter.getFilteredRequest(restRequest); + assertEquals(address, filtered.getRemoteAddress()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java new file mode 100644 index 0000000000000..2857aee9b61ad --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.rest.RestRequestFilter; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.xpack.core.security.support.Exceptions.authenticationError; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class SecurityRestFilterTests extends ESTestCase { + + private AuthenticationService authcService; + private RestChannel channel; + private SecurityRestFilter filter; + private XPackLicenseState licenseState; + private RestHandler restHandler; + + @Before + public void init() throws Exception { + authcService = mock(AuthenticationService.class); + channel = mock(RestChannel.class); + licenseState = mock(XPackLicenseState.class); + when(licenseState.isAuthAllowed()).thenReturn(true); + when(licenseState.isSecurityEnabled()).thenReturn(true); + restHandler = mock(RestHandler.class); + filter = new SecurityRestFilter(licenseState, + new ThreadContext(Settings.EMPTY), authcService, restHandler, false); + } + + public void testProcess() throws Exception { + RestRequest request = mock(RestRequest.class); + Authentication authentication = mock(Authentication.class); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[1]; + callback.onResponse(authentication); + return Void.TYPE; + }).when(authcService).authenticate(eq(request), any(ActionListener.class)); + filter.handleRequest(request, channel, null); + verify(restHandler).handleRequest(request, channel, null); + verifyZeroInteractions(channel); + } + + public void testProcessBasicLicense() throws Exception { + RestRequest request = mock(RestRequest.class); + when(licenseState.isAuthAllowed()).thenReturn(false); + filter.handleRequest(request, channel, null); + verify(restHandler).handleRequest(request, channel, null); + verifyZeroInteractions(channel, authcService); + } + + public void testProcessAuthenticationError() throws Exception { + RestRequest request = mock(RestRequest.class); + Exception exception = authenticationError("failed authc"); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[1]; + callback.onFailure(exception); + return Void.TYPE; + }).when(authcService).authenticate(eq(request), any(ActionListener.class)); + when(channel.request()).thenReturn(request); + when(channel.newErrorBuilder()).thenReturn(JsonXContent.contentBuilder()); + filter.handleRequest(request, channel, null); + ArgumentCaptor response = ArgumentCaptor.forClass(BytesRestResponse.class); + verify(channel).sendResponse(response.capture()); + assertEquals(RestStatus.UNAUTHORIZED, response.getValue().status()); + verifyZeroInteractions(restHandler); + } + + public void testProcessOptionsMethod() throws Exception { + RestRequest request = mock(RestRequest.class); + when(request.method()).thenReturn(RestRequest.Method.OPTIONS); + filter.handleRequest(request, channel, null); + verify(restHandler).handleRequest(request, channel, null); + verifyZeroInteractions(channel); + verifyZeroInteractions(authcService); + } + + public void testProcessFiltersBodyCorrectly() throws Exception { + FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) + .withContent(new BytesArray("{\"password\": \"" + SecuritySettingsSourceField.TEST_PASSWORD + "\", \"foo\": \"bar\"}"), + XContentType.JSON).build(); + when(channel.request()).thenReturn(restRequest); + SetOnce handlerRequest = new SetOnce<>(); + restHandler = new FilteredRestHandler() { + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + handlerRequest.set(request); + } + + @Override + public Set getFilteredFields() { + return Collections.singleton("password"); + } + }; + SetOnce authcServiceRequest = new SetOnce<>(); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[1]; + authcServiceRequest.set((RestRequest)i.getArguments()[0]); + callback.onResponse(new Authentication(XPackUser.INSTANCE, new RealmRef("test", "test", "t"), null)); + return Void.TYPE; + }).when(authcService).authenticate(any(RestRequest.class), any(ActionListener.class)); + filter = new SecurityRestFilter(licenseState, new ThreadContext(Settings.EMPTY), authcService, restHandler, false); + + filter.handleRequest(restRequest, channel, null); + + assertEquals(restRequest, handlerRequest.get()); + assertEquals(restRequest.content(), handlerRequest.get().content()); + Map original = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, handlerRequest.get().content().streamInput()).map(); + assertEquals(2, original.size()); + assertEquals(SecuritySettingsSourceField.TEST_PASSWORD, original.get("password")); + assertEquals("bar", original.get("foo")); + + assertNotEquals(restRequest, authcServiceRequest.get()); + assertNotEquals(restRequest.content(), authcServiceRequest.get().content()); + + Map map = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + authcServiceRequest.get().content().streamInput()).map(); + assertEquals(1, map.size()); + assertEquals("bar", map.get("foo")); + } + + private interface FilteredRestHandler extends RestHandler, RestRequestFilter {} +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java new file mode 100644 index 0000000000000..a35af3c749fa7 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action; + +import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.security.authz.AuthorizationService; +import org.junit.BeforeClass; + +import java.util.List; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class RestAuthenticateActionTests extends SecurityIntegTestCase { + + private static boolean anonymousEnabled; + + @BeforeClass + public static void maybeEnableAnonymous() { + anonymousEnabled = randomBoolean(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true); + + if (anonymousEnabled) { + builder.put(AnonymousUser.USERNAME_SETTING.getKey(), "anon") + .putList(AnonymousUser.ROLES_SETTING.getKey(), SecuritySettingsSource.TEST_ROLE, "foo") + .put(AuthorizationService.ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING.getKey(), false); + } + return builder.build(); + } + + public void testAuthenticateApi() throws Exception { + Response response = getRestClient().performRequest("GET", "/_xpack/security/_authenticate", + new BasicHeader("Authorization", basicAuthHeaderValue(SecuritySettingsSource.TEST_USER_NAME, + new SecureString(SecuritySettingsSourceField.TEST_PASSWORD.toCharArray())))); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertThat(objectPath.evaluate("username").toString(), equalTo(SecuritySettingsSource.TEST_USER_NAME)); + @SuppressWarnings("unchecked") + List roles = objectPath.evaluate("roles"); + assertThat(roles.size(), is(1)); + assertThat(roles, contains(SecuritySettingsSource.TEST_ROLE)); + } + + public void testAuthenticateApiWithoutAuthentication() throws Exception { + try { + Response response = getRestClient().performRequest("GET", "/_xpack/security/_authenticate"); + if (anonymousEnabled) { + assertThat(response.getStatusLine().getStatusCode(), is(200)); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + assertThat(objectPath.evaluate("username").toString(), equalTo("anon")); + @SuppressWarnings("unchecked") + List roles = (List) objectPath.evaluate("roles"); + assertThat(roles.size(), is(2)); + assertThat(roles, contains(SecuritySettingsSource.TEST_ROLE, "foo")); + } else { + fail("request should have failed"); + } + } catch(ResponseException e) { + if (anonymousEnabled) { + fail("request should have succeeded"); + } else { + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(401)); + } + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java new file mode 100644 index 0000000000000..c78d0a64745ee --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestChannel; +import org.elasticsearch.test.rest.FakeRestRequest; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class SecurityBaseRestHandlerTests extends ESTestCase { + + public void testSecurityBaseRestHandlerChecksLicenseState() throws Exception { + final boolean securityEnabled = randomBoolean(); + final AtomicBoolean consumerCalled = new AtomicBoolean(false); + final XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityAvailable()).thenReturn(true); + when(licenseState.isSecurityEnabled()).thenReturn(securityEnabled); + SecurityBaseRestHandler handler = new SecurityBaseRestHandler(Settings.EMPTY, licenseState) { + + @Override + public String getName() { + return "test_xpack_security_base_action"; + } + + @Override + protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { + return channel -> { + if (consumerCalled.compareAndSet(false, true) == false) { + fail("consumerCalled was not false"); + } + }; + } + }; + FakeRestRequest fakeRestRequest = new FakeRestRequest(); + FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), securityEnabled ? 0 : 1); + NodeClient client = mock(NodeClient.class); + + assertFalse(consumerCalled.get()); + verifyZeroInteractions(licenseState); + handler.handleRequest(fakeRestRequest, fakeRestChannel, client); + + verify(licenseState).isSecurityAvailable(); + verify(licenseState).isSecurityEnabled(); + if (securityEnabled) { + assertTrue(consumerCalled.get()); + assertEquals(0, fakeRestChannel.responses().get()); + assertEquals(0, fakeRestChannel.errors().get()); + } else { + assertFalse(consumerCalled.get()); + assertEquals(0, fakeRestChannel.responses().get()); + assertEquals(1, fakeRestChannel.errors().get()); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java new file mode 100644 index 0000000000000..9e99d0cf1563c --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.oauth2; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.AbstractRestChannel; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenRequest; +import org.elasticsearch.xpack.core.security.action.token.CreateTokenResponse; +import org.elasticsearch.xpack.core.security.support.NoOpLogger; +import org.elasticsearch.xpack.security.rest.action.oauth2.RestGetTokenAction.CreateTokenResponseActionListener; + +import java.util.Map; + +import static org.hamcrest.Matchers.hasEntry; + +public class RestGetTokenActionTests extends ESTestCase { + + public void testListenerHandlesExceptionProperly() { + FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) + .build(); + final SetOnce responseSetOnce = new SetOnce<>(); + RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + @Override + public void sendResponse(RestResponse restResponse) { + responseSetOnce.set(restResponse); + } + }; + CreateTokenResponseActionListener listener = new CreateTokenResponseActionListener(restChannel, restRequest, NoOpLogger.INSTANCE); + + ActionRequestValidationException ve = new CreateTokenRequest(null, null, null, null, null).validate(); + listener.onFailure(ve); + RestResponse response = responseSetOnce.get(); + assertNotNull(response); + + Map map = XContentHelper.convertToMap(response.content(), false, + XContentType.fromMediaType(response.contentType())).v2(); + assertThat(map, hasEntry("error", "unsupported_grant_type")); + assertThat(map, hasEntry("error_description", ve.getMessage())); + assertEquals(2, map.size()); + assertEquals(RestStatus.BAD_REQUEST, response.status()); + } + + public void testSendResponse() { + FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); + final SetOnce responseSetOnce = new SetOnce<>(); + RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + @Override + public void sendResponse(RestResponse restResponse) { + responseSetOnce.set(restResponse); + } + }; + CreateTokenResponseActionListener listener = new CreateTokenResponseActionListener(restChannel, restRequest, NoOpLogger.INSTANCE); + CreateTokenResponse createTokenResponse = + new CreateTokenResponse(randomAlphaOfLengthBetween(1, 256), TimeValue.timeValueHours(1L), null, randomAlphaOfLength(4)); + listener.onResponse(createTokenResponse); + + RestResponse response = responseSetOnce.get(); + assertNotNull(response); + + Map map = XContentHelper.convertToMap(response.content(), false, + XContentType.fromMediaType(response.contentType())).v2(); + assertEquals(RestStatus.OK, response.status()); + assertThat(map, hasEntry("type", "Bearer")); + assertThat(map, hasEntry("access_token", createTokenResponse.getTokenString())); + assertThat(map, hasEntry("expires_in", Math.toIntExact(createTokenResponse.getExpiresIn().seconds()))); + assertThat(map, hasEntry("refresh_token", createTokenResponse.getRefreshToken())); + assertEquals(4, map.size()); + } + + public void testParser() throws Exception { + final String request = "{" + + "\"grant_type\": \"password\"," + + "\"username\": \"user1\"," + + "\"password\": \"" + SecuritySettingsSourceField.TEST_PASSWORD + "\"," + + "\"scope\": \"FULL\"" + + "}"; + try (XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, request)) { + CreateTokenRequest createTokenRequest = RestGetTokenAction.PARSER.parse(parser, null); + assertEquals("password", createTokenRequest.getGrantType()); + assertEquals("user1", createTokenRequest.getUsername()); + assertEquals("FULL", createTokenRequest.getScope()); + assertTrue(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING.equals(createTokenRequest.getPassword())); + } + } + + public void testParserRefreshRequest() throws Exception { + final String token = randomAlphaOfLengthBetween(4, 32); + final String request = "{" + + "\"grant_type\": \"refresh_token\"," + + "\"refresh_token\": \"" + token + "\"," + + "\"scope\": \"FULL\"" + + "}"; + try (XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, request)) { + CreateTokenRequest createTokenRequest = RestGetTokenAction.PARSER.parse(parser, null); + assertEquals("refresh_token", createTokenRequest.getGrantType()); + assertEquals(token, createTokenRequest.getRefreshToken()); + assertEquals("FULL", createTokenRequest.getScope()); + assertNull(createTokenRequest.getUsername()); + assertNull(createTokenRequest.getPassword()); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/saml/SamlBaseRestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/saml/SamlBaseRestHandlerTests.java new file mode 100644 index 0000000000000..5942c206cac98 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/saml/SamlBaseRestHandlerTests.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.saml; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.TestUtils; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xpack.core.XPackSettings; +import org.hamcrest.Matchers; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.instanceOf; + +public class SamlBaseRestHandlerTests extends ESTestCase { + + public void testSamlAvailableOnTrialAndPlatinum() { + final SamlBaseRestHandler handler = buildHandler(randomFrom(License.OperationMode.TRIAL, License.OperationMode.PLATINUM)); + assertThat(handler.checkFeatureAvailable(new FakeRestRequest()), Matchers.nullValue()); + } + + public void testSecurityNotAvailableOnBasic() { + final SamlBaseRestHandler handler = buildHandler(License.OperationMode.BASIC); + Exception e = handler.checkFeatureAvailable(new FakeRestRequest()); + assertThat(e, instanceOf(ElasticsearchException.class)); + ElasticsearchException elasticsearchException = (ElasticsearchException) e; + assertThat(elasticsearchException.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), contains("security")); + } + + public void testSamlNotAvailableOnStandardOrGold() { + final SamlBaseRestHandler handler = buildHandler(randomFrom(License.OperationMode.STANDARD, License.OperationMode.GOLD)); + Exception e = handler.checkFeatureAvailable(new FakeRestRequest()); + assertThat(e, instanceOf(ElasticsearchException.class)); + ElasticsearchException elasticsearchException = (ElasticsearchException) e; + assertThat(elasticsearchException.getMetadata(LicenseUtils.EXPIRED_FEATURE_METADATA), contains("saml")); + } + + private SamlBaseRestHandler buildHandler(License.OperationMode licenseMode) { + final Settings settings = Settings.builder() + .put(XPackSettings.SECURITY_ENABLED.getKey(), true) + .build(); + final TestUtils.UpdatableLicenseState licenseState = new TestUtils.UpdatableLicenseState(settings); + licenseState.update(licenseMode, true); + + return new SamlBaseRestHandler(settings, licenseState) { + + @Override + public String getName() { + return "saml_test"; + } + + @Override + protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) { + return null; + } + }; + } + +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/HasPrivilegesRestResponseTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/HasPrivilegesRestResponseTests.java new file mode 100644 index 0000000000000..601cabf4f846a --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/HasPrivilegesRestResponseTests.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.rest.action.user; + +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; +import org.elasticsearch.xpack.security.rest.action.user.RestHasPrivilegesAction.HasPrivilegesRestResponseBuilder; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; + +public class HasPrivilegesRestResponseTests extends ESTestCase { + + public void testBuildValidJsonResponse() throws Exception { + final HasPrivilegesRestResponseBuilder response = new HasPrivilegesRestResponseBuilder("daredevil", mock(RestChannel.class)); + final HasPrivilegesResponse actionResponse = new HasPrivilegesResponse(false, + Collections.singletonMap("manage", true), + Arrays.asList( + new HasPrivilegesResponse.IndexPrivileges("staff", + MapBuilder.newMapBuilder(new LinkedHashMap<>()) + .put("read", true).put("index", true).put("delete", false).put("manage", false).map()), + new HasPrivilegesResponse.IndexPrivileges("customers", + MapBuilder.newMapBuilder(new LinkedHashMap<>()) + .put("read", true).put("index", true).put("delete", true).put("manage", false).map()) + )); + final XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + final RestResponse rest = response.buildResponse(actionResponse, builder); + + assertThat(rest, instanceOf(BytesRestResponse.class)); + + final String json = rest.content().utf8ToString(); + assertThat(json, equalTo("{" + + "\"username\":\"daredevil\"," + + "\"has_all_requested\":false," + + "\"cluster\":{\"manage\":true}," + + "\"index\":{" + + "\"staff\":{\"read\":true,\"index\":true,\"delete\":false,\"manage\":false}," + + "\"customers\":{\"read\":true,\"index\":true,\"delete\":true,\"manage\":false}" + + "}}")); + } +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/IndexLifecycleManagerIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/IndexLifecycleManagerIntegTests.java new file mode 100644 index 0000000000000..4934bcd93036f --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/IndexLifecycleManagerIntegTests.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.support; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; +import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; +import org.hamcrest.Matchers; +import org.junit.After; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +public class IndexLifecycleManagerIntegTests extends SecurityIntegTestCase { + + public void testConcurrentOperationsTryingToCreateSecurityIndexAndAlias() throws Exception { + assertSecurityIndexActive(); + final int processors = Runtime.getRuntime().availableProcessors(); + final int numThreads = scaledRandomIntBetween((processors + 1) / 2, 4 * processors); + final int maxNumRequests = 100 / numThreads; // bound to a maximum of 100 requests + final int numRequests = scaledRandomIntBetween(Math.min(4, maxNumRequests), maxNumRequests); + + final List> futures = new CopyOnWriteArrayList<>(); + final List exceptions = new CopyOnWriteArrayList<>(); + final Thread[] threads = new Thread[numThreads]; + final CyclicBarrier barrier = new CyclicBarrier(threads.length); + final AtomicInteger userNumber = new AtomicInteger(0); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + exceptions.add(e); + } + + @Override + protected void doRun() throws Exception { + final List requests = new ArrayList<>(numRequests); + for (int i = 0; i < numRequests; i++) { + requests.add(securityClient() + .preparePutUser("user" + userNumber.getAndIncrement(), "password".toCharArray(), + randomAlphaOfLengthBetween(1, 16)) + .request()); + } + + barrier.await(10L, TimeUnit.SECONDS); + + for (PutUserRequest request : requests) { + PlainActionFuture responsePlainActionFuture = new PlainActionFuture<>(); + securityClient().putUser(request, responsePlainActionFuture); + futures.add(responsePlainActionFuture); + } + } + }, "create_users_thread" + i); + threads[i].start(); + } + + for (Thread thread : threads) { + thread.join(); + } + + assertThat(exceptions, Matchers.empty()); + assertEquals(futures.size(), numRequests * numThreads); + for (ActionFuture future : futures) { + assertTrue(future.actionGet().created()); + } + } + + @After + public void cleanupSecurityIndex() throws Exception { + super.deleteSecurityIndex(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/IndexLifecycleManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/IndexLifecycleManagerTests.java new file mode 100644 index 0000000000000..9411042e36317 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/IndexLifecycleManagerTests.java @@ -0,0 +1,327 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.support; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.FilterClient; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.security.test.SecurityTestUtils; +import org.elasticsearch.xpack.core.template.TemplateUtils; +import org.hamcrest.Matchers; +import org.junit.Before; + +import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE; +import static org.elasticsearch.xpack.security.support.IndexLifecycleManager.TEMPLATE_VERSION_PATTERN; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IndexLifecycleManagerTests extends ESTestCase { + + private static final ClusterName CLUSTER_NAME = new ClusterName("index-lifecycle-manager-tests"); + private static final ClusterState EMPTY_CLUSTER_STATE = new ClusterState.Builder(CLUSTER_NAME).build(); + public static final String INDEX_NAME = "IndexLifecycleManagerTests"; + private static final String TEMPLATE_NAME = "IndexLifecycleManagerTests-template"; + private IndexLifecycleManager manager; + private Map, Map>> actions; + + @Before + public void setUpManager() { + final Client mockClient = mock(Client.class); + final ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + when(mockClient.threadPool()).thenReturn(threadPool); + when(mockClient.settings()).thenReturn(Settings.EMPTY); + + actions = new LinkedHashMap<>(); + final Client client = new FilterClient(mockClient) { + @Override + protected > + void doExecute(Action action, Request request, + ActionListener listener) { + final Map> map = actions.getOrDefault(action, new HashMap<>()); + map.put(request, listener); + actions.put(action, map); + } + }; + manager = new IndexLifecycleManager(Settings.EMPTY, client, INDEX_NAME); + } + + public void testIndexWithUpToDateMappingAndTemplate() throws IOException { + assertInitialState(); + + final ClusterState.Builder clusterStateBuilder = createClusterState(INDEX_NAME, TEMPLATE_NAME); + markShardsAvailable(clusterStateBuilder); + manager.clusterChanged(event(clusterStateBuilder)); + + assertThat(manager.indexExists(), Matchers.equalTo(true)); + assertThat(manager.isAvailable(), Matchers.equalTo(true)); + assertThat(manager.isMappingUpToDate(), Matchers.equalTo(true)); + } + + public void testIndexWithoutPrimaryShards() throws IOException { + assertInitialState(); + + final ClusterState.Builder clusterStateBuilder = createClusterState(INDEX_NAME, TEMPLATE_NAME); + Index index = new Index(INDEX_NAME, UUID.randomUUID().toString()); + ShardRouting shardRouting = ShardRouting.newUnassigned(new ShardId(index, 0), true, EXISTING_STORE_INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); + String nodeId = ESTestCase.randomAlphaOfLength(8); + IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(new ShardId(index, 0)) + .addShard(shardRouting.initialize(nodeId, null, shardRouting.getExpectedShardSize()) + .moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, ""))) + .build(); + clusterStateBuilder.routingTable(RoutingTable.builder() + .add(IndexRoutingTable.builder(index).addIndexShard(table).build()) + .build()); + manager.clusterChanged(event(clusterStateBuilder)); + + assertIndexUpToDateButNotAvailable(); + } + + private ClusterChangedEvent event(ClusterState.Builder clusterStateBuilder) { + return new ClusterChangedEvent("test-event", clusterStateBuilder.build(), EMPTY_CLUSTER_STATE); + } + + public void testIndexHealthChangeListeners() throws Exception { + final AtomicBoolean listenerCalled = new AtomicBoolean(false); + final AtomicReference previousHealth = new AtomicReference<>(); + final AtomicReference currentHealth = new AtomicReference<>(); + final BiConsumer listener = (prevState, state) -> { + previousHealth.set(prevState); + currentHealth.set(state); + listenerCalled.set(true); + }; + + if (randomBoolean()) { + if (randomBoolean()) { + manager.addIndexHealthChangeListener(listener); + manager.addIndexHealthChangeListener((prevState, state) -> { + throw new RuntimeException("throw after listener"); + }); + } else { + manager.addIndexHealthChangeListener((prevState, state) -> { + throw new RuntimeException("throw before listener"); + }); + manager.addIndexHealthChangeListener(listener); + } + } else { + manager.addIndexHealthChangeListener(listener); + } + + // index doesn't exist and now exists + final ClusterState.Builder clusterStateBuilder = createClusterState(INDEX_NAME, TEMPLATE_NAME); + markShardsAvailable(clusterStateBuilder); + manager.clusterChanged(event(clusterStateBuilder)); + + assertTrue(listenerCalled.get()); + assertNull(previousHealth.get()); + assertEquals(ClusterHealthStatus.GREEN, currentHealth.get().getStatus()); + + // reset and call with no change to the index + listenerCalled.set(false); + previousHealth.set(null); + currentHealth.set(null); + ClusterChangedEvent event = new ClusterChangedEvent("same index health", clusterStateBuilder.build(), clusterStateBuilder.build()); + manager.clusterChanged(event); + + assertFalse(listenerCalled.get()); + assertNull(previousHealth.get()); + assertNull(currentHealth.get()); + + // index with different health + listenerCalled.set(false); + previousHealth.set(null); + currentHealth.set(null); + ClusterState previousState = clusterStateBuilder.build(); + Index prevIndex = previousState.getRoutingTable().index(INDEX_NAME).getIndex(); + clusterStateBuilder.routingTable(RoutingTable.builder() + .add(IndexRoutingTable.builder(prevIndex) + .addIndexShard(new IndexShardRoutingTable.Builder(new ShardId(prevIndex, 0)) + .addShard(ShardRouting.newUnassigned(new ShardId(prevIndex, 0), true, EXISTING_STORE_INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) + .initialize(UUIDs.randomBase64UUID(random()), null, 0L) + .moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, ""))) + .build())) + .build()); + + + + event = new ClusterChangedEvent("different index health", clusterStateBuilder.build(), previousState); + manager.clusterChanged(event); + assertTrue(listenerCalled.get()); + assertEquals(ClusterHealthStatus.GREEN, previousHealth.get().getStatus()); + assertEquals(ClusterHealthStatus.RED, currentHealth.get().getStatus()); + + // swap prev and current + listenerCalled.set(false); + previousHealth.set(null); + currentHealth.set(null); + event = new ClusterChangedEvent("different index health swapped", previousState, clusterStateBuilder.build()); + manager.clusterChanged(event); + assertTrue(listenerCalled.get()); + assertEquals(ClusterHealthStatus.RED, previousHealth.get().getStatus()); + assertEquals(ClusterHealthStatus.GREEN, currentHealth.get().getStatus()); + } + + public void testIndexOutOfDateListeners() throws Exception { + final AtomicBoolean listenerCalled = new AtomicBoolean(false); + manager.clusterChanged(event(new ClusterState.Builder(CLUSTER_NAME))); + manager.addIndexOutOfDateListener((prev, current) -> { + listenerCalled.set(true); + assertNotEquals(prev, current); + }); + assertTrue(manager.isIndexUpToDate()); + + manager.clusterChanged(event(new ClusterState.Builder(CLUSTER_NAME))); + assertFalse(listenerCalled.get()); + assertTrue(manager.isIndexUpToDate()); + + // index doesn't exist and now exists with wrong format + ClusterState.Builder clusterStateBuilder = createClusterState(INDEX_NAME, TEMPLATE_NAME, + IndexLifecycleManager.INTERNAL_INDEX_FORMAT - 1); + markShardsAvailable(clusterStateBuilder); + manager.clusterChanged(event(clusterStateBuilder)); + assertTrue(listenerCalled.get()); + assertFalse(manager.isIndexUpToDate()); + + listenerCalled.set(false); + assertFalse(listenerCalled.get()); + manager.clusterChanged(event(new ClusterState.Builder(CLUSTER_NAME))); + assertTrue(listenerCalled.get()); + assertTrue(manager.isIndexUpToDate()); + + listenerCalled.set(false); + // index doesn't exist and now exists with correct format + clusterStateBuilder = createClusterState(INDEX_NAME, TEMPLATE_NAME, IndexLifecycleManager.INTERNAL_INDEX_FORMAT); + markShardsAvailable(clusterStateBuilder); + manager.clusterChanged(event(clusterStateBuilder)); + assertFalse(listenerCalled.get()); + assertTrue(manager.isIndexUpToDate()); + } + + private void assertInitialState() { + assertThat(manager.indexExists(), Matchers.equalTo(false)); + assertThat(manager.isAvailable(), Matchers.equalTo(false)); + assertThat(manager.isMappingUpToDate(), Matchers.equalTo(false)); + } + + private void assertIndexUpToDateButNotAvailable() { + assertThat(manager.indexExists(), Matchers.equalTo(true)); + assertThat(manager.isAvailable(), Matchers.equalTo(false)); + assertThat(manager.isMappingUpToDate(), Matchers.equalTo(true)); + } + + public static ClusterState.Builder createClusterState(String indexName, String templateName) throws IOException { + return createClusterState(indexName, templateName, templateName, IndexLifecycleManager.INTERNAL_INDEX_FORMAT); + } + + public static ClusterState.Builder createClusterState(String indexName, String templateName, int format) throws IOException { + return createClusterState(indexName, templateName, templateName, format); + } + + private static ClusterState.Builder createClusterState(String indexName, String templateName, String buildMappingFrom, int format) + throws IOException { + IndexTemplateMetaData.Builder templateBuilder = getIndexTemplateMetaData(templateName); + IndexMetaData.Builder indexMeta = getIndexMetadata(indexName, buildMappingFrom, format); + + MetaData.Builder metaDataBuilder = new MetaData.Builder(); + metaDataBuilder.put(templateBuilder); + metaDataBuilder.put(indexMeta); + + return ClusterState.builder(state()).metaData(metaDataBuilder.build()); + } + + private void markShardsAvailable(ClusterState.Builder clusterStateBuilder) { + clusterStateBuilder.routingTable(SecurityTestUtils.buildIndexRoutingTable(INDEX_NAME)); + } + + private static ClusterState state() { + final DiscoveryNodes nodes = DiscoveryNodes.builder().masterNodeId("1").localNodeId("1").build(); + return ClusterState.builder(CLUSTER_NAME) + .nodes(nodes) + .metaData(MetaData.builder().generateClusterUuidIfNeeded()) + .build(); + } + + private static IndexMetaData.Builder getIndexMetadata(String indexName, String templateName, int format) throws IOException { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder(indexName); + indexMetaData.settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), format) + .build()); + + final Map mappings = getTemplateMappings(templateName); + for (Map.Entry entry : mappings.entrySet()) { + indexMetaData.putMapping(entry.getKey(), entry.getValue()); + } + + return indexMetaData; + } + + private static IndexTemplateMetaData.Builder getIndexTemplateMetaData(String templateName) throws IOException { + final Map mappings = getTemplateMappings(templateName); + IndexTemplateMetaData.Builder templateBuilder = IndexTemplateMetaData.builder(TEMPLATE_NAME) + .patterns(Arrays.asList(generateRandomStringArray(10, 100, false, false))); + for (Map.Entry entry : mappings.entrySet()) { + templateBuilder.putMapping(entry.getKey(), entry.getValue()); + } + return templateBuilder; + } + + private static Map getTemplateMappings(String templateName) { + String template = loadTemplate(templateName); + PutIndexTemplateRequest request = new PutIndexTemplateRequest(); + request.source(template, XContentType.JSON); + return request.mappings(); + } + + private static String loadTemplate(String templateName) { + final String resource = "/" + templateName + ".json"; + return TemplateUtils.loadTemplate(resource, Version.CURRENT.toString(), TEMPLATE_VERSION_PATTERN); + } +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityTestUtils.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityTestUtils.java new file mode 100644 index 0000000000000..63c267eb816fc --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityTestUtils.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.test; + +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.AtomicMoveNotSupportedException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Locale; +import java.util.UUID; + +import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; +import static java.nio.file.StandardOpenOption.CREATE; +import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; +import static java.nio.file.StandardOpenOption.WRITE; +import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE; +import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.junit.Assert.assertEquals; + +public class SecurityTestUtils { + + public static String writeFile(Path folder, String name, byte[] content) { + final Path path = folder.resolve(name); + Path tempFile = null; + try { + tempFile = Files.createTempFile(path.getParent(), path.getFileName().toString(), "tmp"); + try (OutputStream os = Files.newOutputStream(tempFile, CREATE, TRUNCATE_EXISTING, WRITE)) { + Streams.copy(content, os); + } + + try { + Files.move(tempFile, path, REPLACE_EXISTING, ATOMIC_MOVE); + } catch (final AtomicMoveNotSupportedException e) { + Files.move(tempFile, path, REPLACE_EXISTING); + } + } catch (final IOException e) { + throw new UncheckedIOException(String.format(Locale.ROOT, "could not write file [%s]", path.toAbsolutePath()), e); + } finally { + // we are ignoring exceptions here, so we do not need handle whether or not tempFile was initialized nor if the file exists + IOUtils.deleteFilesIgnoringExceptions(tempFile); + } + return path.toAbsolutePath().toString(); + } + + public static String writeFile(Path folder, String name, String content) { + return writeFile(folder, name, content.getBytes(StandardCharsets.UTF_8)); + } + + public static RoutingTable buildIndexRoutingTable(String indexName) { + Index index = new Index(indexName, UUID.randomUUID().toString()); + ShardRouting shardRouting = ShardRouting.newUnassigned(new ShardId(index, 0), true, EXISTING_STORE_INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")); + String nodeId = ESTestCase.randomAlphaOfLength(8); + IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(new ShardId(index, 0)) + .addShard(shardRouting.initialize(nodeId, null, shardRouting.getExpectedShardSize()).moveToStarted()) + .build(); + return RoutingTable.builder() + .add(IndexRoutingTable.builder(index).addIndexShard(table).build()) + .build(); + } + + /** + * Adds the index alias {@code .security} to the underlying concrete index. + */ + public static MetaData addAliasToMetaData(MetaData metaData, String indexName) { + AliasMetaData aliasMetaData = AliasMetaData.newAliasMetaDataBuilder(SECURITY_INDEX_NAME).build(); + MetaData.Builder metaDataBuilder = new MetaData.Builder(metaData); + IndexMetaData indexMetaData = metaData.index(indexName); + metaDataBuilder.put(IndexMetaData.builder(indexMetaData).putAlias(aliasMetaData)); + return metaDataBuilder.build(); + } + + public static ClusterIndexHealth getClusterIndexHealth(ClusterHealthStatus status) { + IndexMetaData metaData = IndexMetaData.builder("foo").settings(Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build()) + .build(); + final IndexRoutingTable routingTable; + switch (status) { + case RED: + routingTable = IndexRoutingTable.builder(metaData.getIndex()) + .addIndexShard(new IndexShardRoutingTable.Builder(new ShardId(metaData.getIndex(), 0)) + .addShard(ShardRouting.newUnassigned(new ShardId(metaData.getIndex(), 0), true, EXISTING_STORE_INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) + .initialize(ESTestCase.randomAlphaOfLength(8), null, 0L)) + .addShard(ShardRouting.newUnassigned(new ShardId(metaData.getIndex(), 0), false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) + .initialize(ESTestCase.randomAlphaOfLength(8), null, 0L)) + .build()) + .build(); + break; + case YELLOW: + routingTable = IndexRoutingTable.builder(metaData.getIndex()) + .addIndexShard(new IndexShardRoutingTable.Builder(new ShardId(metaData.getIndex(), 0)) + .addShard(ShardRouting.newUnassigned(new ShardId(metaData.getIndex(), 0), true, EXISTING_STORE_INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) + .initialize(ESTestCase.randomAlphaOfLength(8), null, 0L).moveToStarted()) + .addShard(ShardRouting.newUnassigned(new ShardId(metaData.getIndex(), 0), false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) + .initialize(ESTestCase.randomAlphaOfLength(8), null, 0L)) + .build()) + .build(); + break; + case GREEN: + routingTable = IndexRoutingTable.builder(metaData.getIndex()) + .addIndexShard(new IndexShardRoutingTable.Builder(new ShardId(metaData.getIndex(), 0)) + .addShard(ShardRouting.newUnassigned(new ShardId(metaData.getIndex(), 0), true, EXISTING_STORE_INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) + .initialize(ESTestCase.randomAlphaOfLength(8), null, 0L).moveToStarted()) + .addShard(ShardRouting.newUnassigned(new ShardId(metaData.getIndex(), 0), false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "")) + .initialize(ESTestCase.randomAlphaOfLength(8), null, 0L).moveToStarted()) + .build()) + .build(); + break; + default: + throw new IllegalStateException("unknown status: " + status); + } + ClusterIndexHealth health = new ClusterIndexHealth(metaData, routingTable); + assertEquals(status, health.getStatus()); + return health; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java new file mode 100644 index 0000000000000..0bc7c527df346 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -0,0 +1,368 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.Transport.Connection; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportInterceptor.AsyncSender; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponse.Empty; +import org.elasticsearch.transport.TransportResponseHandler; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authz.AuthorizationService; + +import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class SecurityServerTransportInterceptorTests extends ESTestCase { + + private Settings settings; + private ThreadPool threadPool; + private ThreadContext threadContext; + private XPackLicenseState xPackLicenseState; + private SecurityContext securityContext; + + @Override + public void setUp() throws Exception { + super.setUp(); + settings = Settings.builder().put("path.home", createTempDir()).build(); + threadPool = mock(ThreadPool.class); + threadContext = new ThreadContext(settings); + when(threadPool.getThreadContext()).thenReturn(threadContext); + securityContext = spy(new SecurityContext(settings, threadPool.getThreadContext())); + xPackLicenseState = mock(XPackLicenseState.class); + when(xPackLicenseState.isAuthAllowed()).thenReturn(true); + when(xPackLicenseState.isSecurityEnabled()).thenReturn(true); + } + + public void testSendAsyncUnlicensed() { + SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, + mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), + securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)))); + when(xPackLicenseState.isAuthAllowed()).thenReturn(false); + AtomicBoolean calledWrappedSender = new AtomicBoolean(false); + AsyncSender sender = interceptor.interceptSender(new AsyncSender() { + @Override + public void sendRequest(Transport.Connection connection, String action, TransportRequest request, + TransportRequestOptions options, TransportResponseHandler handler) { + if (calledWrappedSender.compareAndSet(false, true) == false) { + fail("sender called more than once!"); + } + } + }); + sender.sendRequest(null, null, null, null, null); + assertTrue(calledWrappedSender.get()); + verify(xPackLicenseState).isAuthAllowed(); + verify(xPackLicenseState).isSecurityEnabled(); + verifyNoMoreInteractions(xPackLicenseState); + verifyZeroInteractions(securityContext); + } + + public void testSendAsync() throws Exception { + final User authUser = randomBoolean() ? new User("authenticator") : null; + final User user = new User("test", randomRoles(), authUser); + final Authentication authentication = new Authentication(user, new RealmRef("ldap", "foo", "node1"), null); + authentication.writeToContext(threadContext); + SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, + mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), + securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)))); + + AtomicBoolean calledWrappedSender = new AtomicBoolean(false); + AtomicReference sendingUser = new AtomicReference<>(); + AsyncSender sender = interceptor.interceptSender(new AsyncSender() { + @Override + public void sendRequest(Transport.Connection connection, String action, TransportRequest request, + TransportRequestOptions options, TransportResponseHandler handler) { + if (calledWrappedSender.compareAndSet(false, true) == false) { + fail("sender called more than once!"); + } + sendingUser.set(securityContext.getUser()); + } + }); + Transport.Connection connection = mock(Transport.Connection.class); + when(connection.getVersion()).thenReturn(Version.CURRENT); + sender.sendRequest(connection, "indices:foo", null, null, null); + assertTrue(calledWrappedSender.get()); + assertEquals(user, sendingUser.get()); + assertEquals(user, securityContext.getUser()); + verify(xPackLicenseState).isAuthAllowed(); + verify(xPackLicenseState).isSecurityEnabled(); + verify(securityContext, never()).executeAsUser(any(User.class), any(Consumer.class), any(Version.class)); + verifyNoMoreInteractions(xPackLicenseState); + } + + public void testSendAsyncSwitchToSystem() throws Exception { + final User authUser = randomBoolean() ? new User("authenticator") : null; + final User user = new User("test", randomRoles(), authUser); + final Authentication authentication = new Authentication(user, new RealmRef("ldap", "foo", "node1"), null); + authentication.writeToContext(threadContext); + threadContext.putTransient(AuthorizationService.ORIGINATING_ACTION_KEY, "indices:foo"); + + SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, + mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), + securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)))); + + AtomicBoolean calledWrappedSender = new AtomicBoolean(false); + AtomicReference sendingUser = new AtomicReference<>(); + AsyncSender sender = interceptor.interceptSender(new AsyncSender() { + @Override + public void sendRequest(Transport.Connection connection, String action, TransportRequest request, + TransportRequestOptions options, TransportResponseHandler handler) { + if (calledWrappedSender.compareAndSet(false, true) == false) { + fail("sender called more than once!"); + } + sendingUser.set(securityContext.getUser()); + } + }); + Connection connection = mock(Connection.class); + when(connection.getVersion()).thenReturn(Version.CURRENT); + sender.sendRequest(connection, "internal:foo", null, null, null); + assertTrue(calledWrappedSender.get()); + assertNotEquals(user, sendingUser.get()); + assertEquals(SystemUser.INSTANCE, sendingUser.get()); + assertEquals(user, securityContext.getUser()); + verify(xPackLicenseState).isAuthAllowed(); + verify(xPackLicenseState).isSecurityEnabled(); + verify(securityContext).executeAsUser(any(User.class), any(Consumer.class), eq(Version.CURRENT)); + verifyNoMoreInteractions(xPackLicenseState); + } + + public void testSendWithoutUser() throws Exception { + SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, + mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), + securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)))) { + @Override + void assertNoAuthentication(String action) { + } + }; + + assertNull(securityContext.getUser()); + AsyncSender sender = interceptor.interceptSender(new AsyncSender() { + @Override + public void sendRequest(Transport.Connection connection, String action, TransportRequest request, + TransportRequestOptions options, TransportResponseHandler handler) { + fail("sender should not be called!"); + } + }); + Transport.Connection connection = mock(Transport.Connection.class); + when(connection.getVersion()).thenReturn(Version.CURRENT); + IllegalStateException e = + expectThrows(IllegalStateException.class, () -> sender.sendRequest(connection, "indices:foo", null, null, null)); + assertEquals("there should always be a user when sending a message for action [indices:foo]", e.getMessage()); + assertNull(securityContext.getUser()); + verify(xPackLicenseState).isAuthAllowed(); + verify(xPackLicenseState).isSecurityEnabled(); + verify(securityContext, never()).executeAsUser(any(User.class), any(Consumer.class), any(Version.class)); + verifyNoMoreInteractions(xPackLicenseState); + } + + public void testSendToNewerVersionSetsCorrectVersion() throws Exception { + final User authUser = randomBoolean() ? new User("authenticator") : null; + final User user = new User("joe", randomRoles(), authUser); + final Authentication authentication = new Authentication(user, new RealmRef("file", "file", "node1"), null); + authentication.writeToContext(threadContext); + threadContext.putTransient(AuthorizationService.ORIGINATING_ACTION_KEY, "indices:foo"); + + SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, + mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), + securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)))); + + AtomicBoolean calledWrappedSender = new AtomicBoolean(false); + AtomicReference sendingUser = new AtomicReference<>(); + AtomicReference authRef = new AtomicReference<>(); + AsyncSender intercepted = new AsyncSender() { + @Override + public void sendRequest(Transport.Connection connection, String action, TransportRequest request, + TransportRequestOptions options, TransportResponseHandler handler) { + if (calledWrappedSender.compareAndSet(false, true) == false) { + fail("sender called more than once!"); + } + sendingUser.set(securityContext.getUser()); + authRef.set(securityContext.getAuthentication()); + } + }; + AsyncSender sender = interceptor.interceptSender(intercepted); + final Version connectionVersion = Version.fromId(Version.CURRENT.id + randomIntBetween(100, 100000)); + assertEquals(Version.CURRENT, Version.min(connectionVersion, Version.CURRENT)); + + Transport.Connection connection = mock(Transport.Connection.class); + when(connection.getVersion()).thenReturn(connectionVersion); + sender.sendRequest(connection, "indices:foo[s]", null, null, null); + assertTrue(calledWrappedSender.get()); + assertEquals(user, sendingUser.get()); + assertEquals(user, securityContext.getUser()); + assertEquals(Version.CURRENT, authRef.get().getVersion()); + assertEquals(Version.CURRENT, authentication.getVersion()); + } + + public void testSendToOlderVersionSetsCorrectVersion() throws Exception { + final User authUser = randomBoolean() ? new User("authenticator") : null; + final User user = new User("joe", randomRoles(), authUser); + final Authentication authentication = new Authentication(user, new RealmRef("file", "file", "node1"), null); + authentication.writeToContext(threadContext); + threadContext.putTransient(AuthorizationService.ORIGINATING_ACTION_KEY, "indices:foo"); + + SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor(settings, threadPool, + mock(AuthenticationService.class), mock(AuthorizationService.class), xPackLicenseState, mock(SSLService.class), + securityContext, new DestructiveOperations(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, + Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)))); + + AtomicBoolean calledWrappedSender = new AtomicBoolean(false); + AtomicReference sendingUser = new AtomicReference<>(); + AtomicReference authRef = new AtomicReference<>(); + AsyncSender intercepted = new AsyncSender() { + @Override + public void sendRequest(Transport.Connection connection, String action, TransportRequest request, + TransportRequestOptions options, TransportResponseHandler handler) { + if (calledWrappedSender.compareAndSet(false, true) == false) { + fail("sender called more than once!"); + } + sendingUser.set(securityContext.getUser()); + authRef.set(securityContext.getAuthentication()); + } + }; + AsyncSender sender = interceptor.interceptSender(intercepted); + final Version connectionVersion = Version.fromId(Version.CURRENT.id - randomIntBetween(100, 100000)); + assertEquals(connectionVersion, Version.min(connectionVersion, Version.CURRENT)); + + Transport.Connection connection = mock(Transport.Connection.class); + when(connection.getVersion()).thenReturn(connectionVersion); + sender.sendRequest(connection, "indices:foo[s]", null, null, null); + assertTrue(calledWrappedSender.get()); + assertEquals(user, sendingUser.get()); + assertEquals(user, securityContext.getUser()); + assertEquals(connectionVersion, authRef.get().getVersion()); + assertEquals(Version.CURRENT, authentication.getVersion()); + } + + public void testContextRestoreResponseHandler() throws Exception { + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + + threadContext.putTransient("foo", "bar"); + threadContext.putHeader("key", "value"); + try (ThreadContext.StoredContext storedContext = threadContext.stashContext()) { + threadContext.putTransient("foo", "different_bar"); + threadContext.putHeader("key", "value2"); + TransportResponseHandler handler = new TransportService.ContextRestoreResponseHandler<>( + threadContext.wrapRestorable(storedContext), new TransportResponseHandler() { + + @Override + public Empty newInstance() { + return Empty.INSTANCE; + } + + @Override + public void handleResponse(Empty response) { + assertEquals("bar", threadContext.getTransient("foo")); + assertEquals("value", threadContext.getHeader("key")); + } + + @Override + public void handleException(TransportException exp) { + assertEquals("bar", threadContext.getTransient("foo")); + assertEquals("value", threadContext.getHeader("key")); + } + + @Override + public String executor() { + return null; + } + }); + + handler.handleResponse(null); + handler.handleException(null); + } + } + + public void testContextRestoreResponseHandlerRestoreOriginalContext() throws Exception { + try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) { + threadContext.putTransient("foo", "bar"); + threadContext.putHeader("key", "value"); + TransportResponseHandler handler; + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + threadContext.putTransient("foo", "different_bar"); + threadContext.putHeader("key", "value2"); + handler = new TransportService.ContextRestoreResponseHandler<>(threadContext.newRestorableContext(true), + new TransportResponseHandler() { + + @Override + public Empty newInstance() { + return Empty.INSTANCE; + } + + @Override + public void handleResponse(Empty response) { + assertEquals("different_bar", threadContext.getTransient("foo")); + assertEquals("value2", threadContext.getHeader("key")); + } + + @Override + public void handleException(TransportException exp) { + assertEquals("different_bar", threadContext.getTransient("foo")); + assertEquals("value2", threadContext.getHeader("key")); + } + + @Override + public String executor() { + return null; + } + }); + } + + assertEquals("bar", threadContext.getTransient("foo")); + assertEquals("value", threadContext.getHeader("key")); + handler.handleResponse(null); + + assertEquals("bar", threadContext.getTransient("foo")); + assertEquals("value", threadContext.getHeader("key")); + handler.handleException(null); + + assertEquals("bar", threadContext.getTransient("foo")); + assertEquals("value", threadContext.getHeader("key")); + } + } + + private String[] randomRoles() { + return generateRandomStringArray(3, 10, false, true); + } + + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java new file mode 100644 index 0000000000000..0964bc5a45df7 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java @@ -0,0 +1,209 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.node.MockNode; +import org.elasticsearch.node.Node; +import org.elasticsearch.node.NodeValidationException; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.discovery.TestZenDiscovery; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectionProfile; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportResponseHandler; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; +import org.elasticsearch.xpack.core.ssl.SSLClientAuth; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.concurrent.CountDownLatch; + +import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForStore; +import static org.elasticsearch.xpack.security.test.SecurityTestUtils.writeFile; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; + +public class ServerTransportFilterIntegrationTests extends SecurityIntegTestCase { + private static int randomClientPort; + + @BeforeClass + public static void getRandomPort() { + randomClientPort = randomIntBetween(49000, 65500); // ephemeral port + } + + @Override + public boolean transportSSLEnabled() { + return true; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder settingsBuilder = Settings.builder(); + String randomClientPortRange = randomClientPort + "-" + (randomClientPort+100); + + Path store; + try { + store = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks"); + assertThat(Files.exists(store), is(true)); + } catch (Exception e) { + throw new RuntimeException(e); + } + + settingsBuilder.put(super.nodeSettings(nodeOrdinal)) + .put("transport.profiles.client.xpack.security.ssl.truststore.path", store) // settings for client truststore + .put("xpack.ssl.client_authentication", SSLClientAuth.REQUIRED) + .put("transport.profiles.client.xpack.security.type", "client") + .put("transport.profiles.client.port", randomClientPortRange) + // make sure this is "localhost", no matter if ipv4 or ipv6, but be consistent + .put("transport.profiles.client.bind_host", "localhost") + .put("xpack.security.audit.enabled", false) + .put(XPackSettings.WATCHER_ENABLED.getKey(), false) + .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false); + if (randomBoolean()) { + settingsBuilder.put("transport.profiles.default.xpack.security.type", "node"); // this is default lets set it randomly + } + + SecuritySettingsSource.addSecureSettings(settingsBuilder, secureSettings -> + secureSettings.setString("transport.profiles.client.xpack.security.ssl.truststore.secure_password", "testnode")); + return settingsBuilder.build(); + } + + public void testThatConnectionToServerTypeConnectionWorks() throws IOException, NodeValidationException { + Path home = createTempDir(); + Path xpackConf = home.resolve("config"); + Files.createDirectories(xpackConf); + + Transport transport = internalCluster().getDataNodeInstance(Transport.class); + TransportAddress transportAddress = transport.boundAddress().publishAddress(); + String unicastHost = NetworkAddress.format(transportAddress.address()); + + // test that starting up a node works + Settings.Builder nodeSettings = Settings.builder() + .put("node.name", "my-test-node") + .put("network.host", "localhost") + .put("cluster.name", internalCluster().getClusterName()) + .put("discovery.zen.ping.unicast.hosts", unicastHost) + .put("discovery.zen.minimum_master_nodes", + internalCluster().getInstance(Settings.class).get("discovery.zen.minimum_master_nodes")) + .put("xpack.security.enabled", true) + .put("xpack.security.audit.enabled", false) + .put(XPackSettings.WATCHER_ENABLED.getKey(), false) + .put("path.home", home) + .put(NetworkModule.HTTP_ENABLED.getKey(), false) + .put(Node.NODE_MASTER_SETTING.getKey(), false) + .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false); + //.put("xpack.ml.autodetect_process", false); + addSSLSettingsForStore(nodeSettings, "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks", "testnode"); + try (Node node = new MockNode(nodeSettings.build(), Arrays.asList(LocalStateSecurity.class, TestZenDiscovery.TestPlugin.class))) { + node.start(); + ensureStableCluster(cluster().size() + 1); + } + } + + public void testThatConnectionToClientTypeConnectionIsRejected() throws IOException, NodeValidationException, InterruptedException { + Path home = createTempDir(); + Path xpackConf = home.resolve("config"); + Files.createDirectories(xpackConf); + writeFile(xpackConf, "users", configUsers()); + writeFile(xpackConf, "users_roles", configUsersRoles()); + writeFile(xpackConf, "roles.yml", configRoles()); + + Transport transport = internalCluster().getDataNodeInstance(Transport.class); + TransportAddress transportAddress = transport.profileBoundAddresses().get("client").publishAddress(); + String unicastHost = NetworkAddress.format(transportAddress.address()); + + // test that starting up a node works + Settings.Builder nodeSettings = Settings.builder() + .put("xpack.security.authc.realms.file.type", FileRealmSettings.TYPE) + .put("xpack.security.authc.realms.file.order", 0) + .put("node.name", "my-test-node") + .put(SecurityField.USER_SETTING.getKey(), "test_user:" + SecuritySettingsSourceField.TEST_PASSWORD) + .put("cluster.name", internalCluster().getClusterName()) + .put("discovery.zen.ping.unicast.hosts", unicastHost) + .put("discovery.zen.minimum_master_nodes", + internalCluster().getInstance(Settings.class).get("discovery.zen.minimum_master_nodes")) + .put("xpack.security.enabled", true) + .put("xpack.security.audit.enabled", false) + .put(XPackSettings.WATCHER_ENABLED.getKey(), false) + .put(NetworkModule.HTTP_ENABLED.getKey(), false) + .put("discovery.initial_state_timeout", "0s") + .put("path.home", home) + .put(Node.NODE_MASTER_SETTING.getKey(), false) + .put(TestZenDiscovery.USE_MOCK_PINGS.getKey(), false); + //.put("xpack.ml.autodetect_process", false); + addSSLSettingsForStore(nodeSettings, "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks", "testnode"); + try (Node node = new MockNode(nodeSettings.build(), Arrays.asList(LocalStateSecurity.class, TestZenDiscovery.TestPlugin.class))) { + node.start(); + TransportService instance = node.injector().getInstance(TransportService.class); + try (Transport.Connection connection = instance.openConnection(new DiscoveryNode("theNode", transportAddress, Version.CURRENT), + ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, null, null))) { + // handshake should be ok + final DiscoveryNode handshake = instance.handshake(connection, 10000); + assertEquals(transport.boundAddress().publishAddress(), handshake.getAddress()); + CountDownLatch latch = new CountDownLatch(1); + instance.sendRequest(connection, NodeMappingRefreshAction.ACTION_NAME, + new NodeMappingRefreshAction.NodeMappingRefreshRequest("foo", "bar", "baz"), + TransportRequestOptions.EMPTY, + new TransportResponseHandler() { + @Override + public TransportResponse newInstance() { + fail("never get that far"); + return null; + } + + @Override + public void handleResponse(TransportResponse response) { + try { + fail("never get that far"); + } finally { + latch.countDown(); + } + } + + @Override + public void handleException(TransportException exp) { + try { + assertThat(exp.getCause(), instanceOf(ElasticsearchSecurityException.class)); + assertThat(exp.getCause().getMessage(), + equalTo("executing internal/shard actions is considered malicious and forbidden")); + } finally { + latch.countDown(); + } + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }); + latch.await(); + } + } + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java new file mode 100644 index 0000000000000..38b9a029d9bd1 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterTests.java @@ -0,0 +1,281 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.MockIndicesRequest; +import org.elasticsearch.action.admin.indices.close.CloseIndexAction; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; +import org.elasticsearch.action.admin.indices.open.OpenIndexAction; +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.core.security.SecurityContext; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef; +import org.elasticsearch.xpack.core.security.authz.permission.Role; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.authc.AuthenticationService; +import org.elasticsearch.xpack.security.authz.AuthorizationService; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.mock.orig.Mockito.times; +import static org.elasticsearch.xpack.core.security.support.Exceptions.authenticationError; +import static org.elasticsearch.xpack.core.security.support.Exceptions.authorizationError; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Matchers.isA; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class ServerTransportFilterTests extends ESTestCase { + + private AuthenticationService authcService; + private AuthorizationService authzService; + private TransportChannel channel; + private boolean failDestructiveOperations; + private DestructiveOperations destructiveOperations; + + @Before + public void init() throws Exception { + authcService = mock(AuthenticationService.class); + authzService = mock(AuthorizationService.class); + channel = mock(TransportChannel.class); + when(channel.getProfileName()).thenReturn(TcpTransport.DEFAULT_PROFILE); + when(channel.getVersion()).thenReturn(Version.CURRENT); + failDestructiveOperations = randomBoolean(); + Settings settings = Settings.builder() + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), failDestructiveOperations).build(); + destructiveOperations = new DestructiveOperations(settings, + new ClusterSettings(settings, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING))); + } + + public void testInbound() throws Exception { + TransportRequest request = mock(TransportRequest.class); + Authentication authentication = mock(Authentication.class); + when(authentication.getVersion()).thenReturn(Version.CURRENT); + when(authentication.getUser()).thenReturn(SystemUser.INSTANCE); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[3]; + callback.onResponse(authentication); + return Void.TYPE; + }).when(authcService).authenticate(eq("_action"), eq(request), eq((User)null), any(ActionListener.class)); + ServerTransportFilter filter = getClientOrNodeFilter(); + PlainActionFuture future = new PlainActionFuture<>(); + filter.inbound("_action", request, channel, future); + //future.get(); // don't block it's not called really just mocked + verify(authzService).authorize(authentication, "_action", request, null, null); + } + + public void testInboundDestructiveOperations() throws Exception { + String action = randomFrom(CloseIndexAction.NAME, OpenIndexAction.NAME, DeleteIndexAction.NAME); + TransportRequest request = new MockIndicesRequest( + IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()), + randomFrom("*", "_all", "test*")); + Authentication authentication = mock(Authentication.class); + when(authentication.getVersion()).thenReturn(Version.CURRENT); + when(authentication.getUser()).thenReturn(SystemUser.INSTANCE); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[3]; + callback.onResponse(authentication); + return Void.TYPE; + }).when(authcService).authenticate(eq(action), eq(request), eq((User)null), any(ActionListener.class)); + ServerTransportFilter filter = getClientOrNodeFilter(); + PlainActionFuture listener = mock(PlainActionFuture.class); + filter.inbound(action, request, channel, listener); + if (failDestructiveOperations) { + verify(listener).onFailure(isA(IllegalArgumentException.class)); + verifyNoMoreInteractions(authzService); + } else { + verify(authzService).authorize(authentication, action, request, null, null); + } + } + + public void testInboundAuthenticationException() throws Exception { + TransportRequest request = mock(TransportRequest.class); + Exception authE = authenticationError("authc failed"); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[3]; + callback.onFailure(authE); + return Void.TYPE; + }).when(authcService).authenticate(eq("_action"), eq(request), eq((User)null), any(ActionListener.class)); + ServerTransportFilter filter = getClientOrNodeFilter(); + try { + PlainActionFuture future = new PlainActionFuture<>(); + filter.inbound("_action", request, channel, future); + future.actionGet(); + fail("expected filter inbound to throw an authentication exception on authentication error"); + } catch (ElasticsearchSecurityException e) { + assertThat(e.getMessage(), equalTo("authc failed")); + } + verifyZeroInteractions(authzService); + } + + public void testInboundAuthorizationException() throws Exception { + ServerTransportFilter filter = getClientOrNodeFilter(); + TransportRequest request = mock(TransportRequest.class); + Authentication authentication = mock(Authentication.class); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[3]; + callback.onResponse(authentication); + return Void.TYPE; + }).when(authcService).authenticate(eq("_action"), eq(request), eq((User)null), any(ActionListener.class)); + final Role empty = Role.EMPTY; + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[1]; + callback.onResponse(empty); + return Void.TYPE; + }).when(authzService).roles(any(User.class), any(ActionListener.class)); + when(authentication.getVersion()).thenReturn(Version.CURRENT); + when(authentication.getUser()).thenReturn(XPackUser.INSTANCE); + PlainActionFuture future = new PlainActionFuture<>(); + doThrow(authorizationError("authz failed")).when(authzService).authorize(authentication, "_action", request, + empty, null); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> { + filter.inbound("_action", request, channel, future); + future.actionGet(); + }); + assertThat(e.getMessage(), equalTo("authz failed")); + } + + public void testClientProfileRejectsNodeActions() throws Exception { + TransportRequest request = mock(TransportRequest.class); + ServerTransportFilter filter = getClientFilter(true); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, + () -> filter.inbound("internal:foo/bar", request, channel, new PlainActionFuture<>())); + assertEquals("executing internal/shard actions is considered malicious and forbidden", e.getMessage()); + e = expectThrows(ElasticsearchSecurityException.class, + () -> filter.inbound("indices:action" + randomFrom("[s]", "[p]", "[r]", "[n]", "[s][p]", "[s][r]", "[f]"), + request, channel, new PlainActionFuture<>())); + assertEquals("executing internal/shard actions is considered malicious and forbidden", e.getMessage()); + verifyZeroInteractions(authcService); + } + + public void testNodeProfileAllowsNodeActions() throws Exception { + final String internalAction = "internal:foo/bar"; + final String nodeOrShardAction = "indices:action" + randomFrom("[s]", "[p]", "[r]", "[n]", "[s][p]", "[s][r]", "[f]"); + ServerTransportFilter filter = getNodeFilter(true); + TransportRequest request = mock(TransportRequest.class); + Authentication authentication = new Authentication(new User("test", "superuser"), new RealmRef("test", "test", "node1"), null); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[1]; + callback.onResponse(authentication.getUser().equals(i.getArguments()[0]) ? ReservedRolesStore.SUPERUSER_ROLE : null); + return Void.TYPE; + }).when(authzService).roles(any(User.class), any(ActionListener.class)); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[3]; + callback.onResponse(authentication); + return Void.TYPE; + }).when(authcService).authenticate(eq(internalAction), eq(request), eq((User)null), any(ActionListener.class)); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[3]; + callback.onResponse(authentication); + return Void.TYPE; + }).when(authcService).authenticate(eq(nodeOrShardAction), eq(request), eq((User)null), any(ActionListener.class)); + + filter.inbound(internalAction, request, channel, new PlainActionFuture<>()); + verify(authcService).authenticate(eq(internalAction), eq(request), eq((User)null), any(ActionListener.class)); + verify(authzService).roles(eq(authentication.getUser()), any(ActionListener.class)); + verify(authzService).authorize(authentication, internalAction, request, ReservedRolesStore.SUPERUSER_ROLE, null); + + filter.inbound(nodeOrShardAction, request, channel, new PlainActionFuture<>()); + verify(authcService).authenticate(eq(nodeOrShardAction), eq(request), eq((User)null), any(ActionListener.class)); + verify(authzService, times(2)).roles(eq(authentication.getUser()), any(ActionListener.class)); + verify(authzService).authorize(authentication, nodeOrShardAction, request, ReservedRolesStore.SUPERUSER_ROLE, null); + verifyNoMoreInteractions(authcService, authzService); + } + + public void testHandlesKibanaUserCompatibility() throws Exception { + TransportRequest request = mock(TransportRequest.class); + User user = new User("kibana", "kibana"); + Authentication authentication = mock(Authentication.class); + final Version version = Version.fromId(randomIntBetween(Version.V_5_0_0_ID, Version.V_5_2_0_ID - 100)); + when(authentication.getVersion()).thenReturn(version); + when(authentication.getUser()).thenReturn(user); + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[3]; + callback.onResponse(authentication); + return Void.TYPE; + }).when(authcService).authenticate(eq("_action"), eq(request), eq((User)null), any(ActionListener.class)); + AtomicReference rolesRef = new AtomicReference<>(); + final Role empty = Role.EMPTY; + doAnswer((i) -> { + ActionListener callback = + (ActionListener) i.getArguments()[1]; + rolesRef.set(((User) i.getArguments()[0]).roles()); + callback.onResponse(empty); + return Void.TYPE; + }).when(authzService).roles(any(User.class), any(ActionListener.class)); + ServerTransportFilter filter = getClientOrNodeFilter(); + PlainActionFuture future = new PlainActionFuture<>(); + when(channel.getVersion()).thenReturn(version); + filter.inbound("_action", request, channel, future); + assertNotNull(rolesRef.get()); + assertThat(rolesRef.get(), arrayContaining("kibana_system")); + + // test with a version that doesn't need changing + filter = getClientOrNodeFilter(); + rolesRef.set(null); + user = new KibanaUser(true); + when(authentication.getUser()).thenReturn(user); + when(authentication.getVersion()).thenReturn(Version.V_5_2_0); + future = new PlainActionFuture<>(); + filter.inbound("_action", request, channel, future); + assertNotNull(rolesRef.get()); + assertThat(rolesRef.get(), arrayContaining("kibana_system")); + } + + private ServerTransportFilter getClientOrNodeFilter() throws IOException { + return randomBoolean() ? getNodeFilter(true) : getClientFilter(true); + } + + private ServerTransportFilter.ClientProfile getClientFilter(boolean reservedRealmEnabled) throws IOException { + Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + ThreadContext threadContext = new ThreadContext(settings); + return new ServerTransportFilter.ClientProfile(authcService, authzService, threadContext, false, destructiveOperations, + reservedRealmEnabled, new SecurityContext(settings, threadContext)); + } + + private ServerTransportFilter.NodeProfile getNodeFilter(boolean reservedRealmEnabled) throws IOException { + Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + ThreadContext threadContext = new ThreadContext(settings); + return new ServerTransportFilter.NodeProfile(authcService, authzService, threadContext, false, destructiveOperations, + reservedRealmEnabled, new SecurityContext(settings, threadContext)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java new file mode 100644 index 0000000000000..0ff313ceb25e1 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IPFilterTests.java @@ -0,0 +1,301 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.filter; + +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.node.MockNode; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.junit.annotations.Network; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class IPFilterTests extends ESTestCase { + private IPFilter ipFilter; + private XPackLicenseState licenseState; + private AuditTrailService auditTrail; + private Transport transport; + private HttpServerTransport httpTransport; + private ClusterSettings clusterSettings; + + @Before + public void init() { + licenseState = mock(XPackLicenseState.class); + when(licenseState.isIpFilteringAllowed()).thenReturn(true); + when(licenseState.isSecurityEnabled()).thenReturn(true); + auditTrail = mock(AuditTrailService.class); + clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList( + IPFilter.HTTP_FILTER_ALLOW_SETTING, + IPFilter.HTTP_FILTER_DENY_SETTING, + IPFilter.IP_FILTER_ENABLED_HTTP_SETTING, + IPFilter.IP_FILTER_ENABLED_SETTING, + IPFilter.TRANSPORT_FILTER_ALLOW_SETTING, + IPFilter.TRANSPORT_FILTER_DENY_SETTING, + IPFilter.PROFILE_FILTER_ALLOW_SETTING, + IPFilter.PROFILE_FILTER_DENY_SETTING))); + + httpTransport = mock(HttpServerTransport.class); + TransportAddress httpAddress = new TransportAddress(InetAddress.getLoopbackAddress(), 9200); + when(httpTransport.boundAddress()).thenReturn(new BoundTransportAddress(new TransportAddress[] { httpAddress }, httpAddress)); + when(httpTransport.lifecycleState()).thenReturn(Lifecycle.State.STARTED); + + transport = mock(Transport.class); + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 9300); + when(transport.boundAddress()).thenReturn(new BoundTransportAddress(new TransportAddress[]{ address }, address)); + when(transport.lifecycleState()).thenReturn(Lifecycle.State.STARTED); + + Map profileBoundAddresses = Collections.singletonMap("client", + new BoundTransportAddress(new TransportAddress[]{ new TransportAddress(InetAddress.getLoopbackAddress(), 9500) }, + address)); + when(transport.profileBoundAddresses()).thenReturn(profileBoundAddresses); + } + + public void testThatIpV4AddressesCanBeProcessed() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.transport.filter.allow", "127.0.0.1") + .put("xpack.security.transport.filter.deny", "10.0.0.0/8") + .build(); + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + assertAddressIsAllowed("127.0.0.1"); + assertAddressIsDenied("10.2.3.4"); + } + + public void testThatIpV6AddressesCanBeProcessed() throws Exception { + // you have to use the shortest possible notation in order to match, so + // 1234:0db8:85a3:0000:0000:8a2e:0370:7334 becomes 1234:db8:85a3:0:0:8a2e:370:7334 + Settings settings = Settings.builder() + .put("xpack.security.transport.filter.allow", "2001:0db8:1234::/48") + .putList("xpack.security.transport.filter.deny", "1234:db8:85a3:0:0:8a2e:370:7334", "4321:db8:1234::/48") + .build(); + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + + assertAddressIsAllowed("2001:0db8:1234:0000:0000:8a2e:0370:7334"); + assertAddressIsDenied("1234:0db8:85a3:0000:0000:8a2e:0370:7334"); + assertAddressIsDenied("4321:0db8:1234:0000:0000:8a2e:0370:7334"); + } + + @Network // requires network for name resolution + public void testThatHostnamesCanBeProcessed() throws Exception { + Settings settings = Settings.builder() + .put("xpack.ml.autodetect_process", false) + .put("xpack.security.transport.filter.allow", "127.0.0.1") + .put("xpack.security.transport.filter.deny", "*.google.com") + .build(); + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + + assertAddressIsAllowed("127.0.0.1"); + assertAddressIsDenied("8.8.8.8"); + } + + public void testThatAnAllowAllAuthenticatorWorks() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.transport.filter.allow", "_all") + .build(); + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + assertAddressIsAllowed("127.0.0.1"); + assertAddressIsAllowed("173.194.70.100"); + } + + public void testThatProfilesAreSupported() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.transport.filter.allow", "localhost") + .put("xpack.security.transport.filter.deny", "_all") + .put("transport.profiles.client.xpack.security.filter.allow", "192.168.0.1") + .put("transport.profiles.client.xpack.security.filter.deny", "_all") + .build(); + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + assertAddressIsAllowed("127.0.0.1"); + assertAddressIsDenied("192.168.0.1"); + assertAddressIsAllowedForProfile("client", "192.168.0.1"); + assertAddressIsDeniedForProfile("client", "192.168.0.2"); + } + + public void testThatProfilesAreUpdateable() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.transport.filter.allow", "localhost") + .put("xpack.security.transport.filter.deny", "_all") + .put("transport.profiles.client.xpack.security.filter.allow", "192.168.0.1") + .put("transport.profiles.client.xpack.security.filter.deny", "_all") + .build(); + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + Settings newSettings = Settings.builder().putList("transport.profiles.client.xpack.security.filter.allow", "192.168.0.1", + "192.168.0.2") + .put("transport.profiles.client.xpack.security.filter.deny", "192.168.0.3").build(); + Settings.Builder updatedSettingsBuilder = Settings.builder(); + clusterSettings.updateDynamicSettings(newSettings, updatedSettingsBuilder, Settings.builder(), "test"); + clusterSettings.applySettings(updatedSettingsBuilder.build()); + assertAddressIsAllowed("127.0.0.1"); + assertAddressIsDenied("192.168.0.1"); + assertAddressIsAllowedForProfile("client", "192.168.0.1", "192.168.0.2"); + assertAddressIsDeniedForProfile("client", "192.168.0.3"); + } + + public void testThatAllowWinsOverDeny() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.transport.filter.allow", "10.0.0.1") + .put("xpack.security.transport.filter.deny", "10.0.0.0/8") + .build(); + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + assertAddressIsAllowed("10.0.0.1"); + assertAddressIsDenied("10.0.0.2"); + } + + public void testDefaultAllow() throws Exception { + Settings settings = Settings.builder().build(); + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + assertAddressIsAllowed("10.0.0.1"); + assertAddressIsAllowed("10.0.0.2"); + } + + public void testThatHttpWorks() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.transport.filter.allow", "127.0.0.1") + .put("xpack.security.transport.filter.deny", "10.0.0.0/8") + .put("xpack.security.http.filter.allow", "10.0.0.0/8") + .put("xpack.security.http.filter.deny", "192.168.0.1") + .build(); + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundHttpTransportAddress(httpTransport.boundAddress()); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + assertAddressIsAllowedForProfile(IPFilter.HTTP_PROFILE_NAME, "10.2.3.4"); + assertAddressIsDeniedForProfile(IPFilter.HTTP_PROFILE_NAME, "192.168.0.1"); + } + + public void testThatHttpFallsbackToDefault() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.transport.filter.allow", "127.0.0.1") + .put("xpack.security.transport.filter.deny", "10.0.0.0/8") + .build(); + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundHttpTransportAddress(httpTransport.boundAddress()); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + + assertAddressIsAllowedForProfile(IPFilter.HTTP_PROFILE_NAME, "127.0.0.1"); + assertAddressIsDeniedForProfile(IPFilter.HTTP_PROFILE_NAME, "10.2.3.4"); + } + + public void testThatBoundAddressIsNeverRejected() throws Exception { + List addressStrings = new ArrayList<>(); + for (TransportAddress address : transport.boundAddress().boundAddresses()) { + addressStrings.add(NetworkAddress.format(address.address().getAddress())); + } + + Settings settings; + if (randomBoolean()) { + settings = Settings.builder().putList("xpack.security.transport.filter.deny", + addressStrings.toArray(new String[addressStrings.size()])).build(); + } else { + settings = Settings.builder().put("xpack.security.transport.filter.deny", "_all").build(); + } + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + ipFilter.setBoundHttpTransportAddress(httpTransport.boundAddress()); + + for (String addressString : addressStrings) { + assertAddressIsAllowedForProfile(IPFilter.HTTP_PROFILE_NAME, addressString); + assertAddressIsAllowedForProfile("default", addressString); + } + } + + public void testThatAllAddressesAreAllowedWhenLicenseDisablesSecurity() { + Settings settings = Settings.builder() + .put("xpack.security.transport.filter.deny", "_all") + .build(); + when(licenseState.isIpFilteringAllowed()).thenReturn(false); + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + + // don't use the assert helper because we don't want the audit trail to be invoked here + String message = String.format(Locale.ROOT, "Expected address %s to be allowed", "8.8.8.8"); + InetAddress address = InetAddresses.forString("8.8.8.8"); + assertThat(message, ipFilter.accept("default", new InetSocketAddress(address, 0)), is(true)); + verifyZeroInteractions(auditTrail); + + // for sanity enable license and check that it is denied + when(licenseState.isIpFilteringAllowed()).thenReturn(true); + ipFilter = new IPFilter(settings, auditTrail, clusterSettings, licenseState); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + + assertAddressIsDeniedForProfile("default", "8.8.8.8"); + } + + public void testThatNodeStartsWithIPFilterDisabled() throws Exception { + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.security.transport.filter.enabled", randomBoolean()) + .put("xpack.security.http.filter.enabled", randomBoolean()) + .build(); + try (Node node = new MockNode(settings, Arrays.asList(LocalStateSecurity.class))) { + assertNotNull(node); + } + } + + private void assertAddressIsAllowedForProfile(String profile, String ... inetAddresses) { + for (String inetAddress : inetAddresses) { + String message = String.format(Locale.ROOT, "Expected address %s to be allowed", inetAddress); + InetAddress address = InetAddresses.forString(inetAddress); + assertTrue(message, ipFilter.accept(profile, new InetSocketAddress(address, 0))); + ArgumentCaptor ruleCaptor = ArgumentCaptor.forClass(SecurityIpFilterRule.class); + verify(auditTrail).connectionGranted(eq(address), eq(profile), ruleCaptor.capture()); + assertNotNull(ruleCaptor.getValue()); + } + } + + private void assertAddressIsAllowed(String ... inetAddresses) { + assertAddressIsAllowedForProfile("default", inetAddresses); + } + + private void assertAddressIsDeniedForProfile(String profile, String ... inetAddresses) { + for (String inetAddress : inetAddresses) { + String message = String.format(Locale.ROOT, "Expected address %s to be denied", inetAddress); + InetAddress address = InetAddresses.forString(inetAddress); + assertFalse(message, ipFilter.accept(profile, new InetSocketAddress(address, 0))); + ArgumentCaptor ruleCaptor = ArgumentCaptor.forClass(SecurityIpFilterRule.class); + verify(auditTrail).connectionDenied(eq(address), eq(profile), ruleCaptor.capture()); + assertNotNull(ruleCaptor.getValue()); + } + } + + private void assertAddressIsDenied(String ... inetAddresses) { + assertAddressIsDeniedForProfile("default", inetAddresses); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java new file mode 100644 index 0000000000000..ef1eeace73b31 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.filter; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.nio.charset.StandardCharsets; + +import static org.hamcrest.Matchers.is; + +// no client nodes, no transport clients, as they all get rejected on network connections +@ClusterScope(scope = Scope.SUITE, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0) +public class IpFilteringIntegrationTests extends SecurityIntegTestCase { + private static int randomClientPort; + + @BeforeClass + public static void getRandomPort() { + randomClientPort = randomIntBetween(49000, 65500); // ephemeral port + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + String randomClientPortRange = randomClientPort + "-" + (randomClientPort+100); + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .put("transport.profiles.client.port", randomClientPortRange) + // make sure this is "localhost", no matter if ipv4 or ipv6, but be consistent + .put("transport.profiles.client.bind_host", "localhost") + .put("transport.profiles.client.xpack.security.filter.deny", "_all") + .put(IPFilter.TRANSPORT_FILTER_DENY_SETTING.getKey(), "_all") + .build(); + } + + public void testThatIpFilteringIsIntegratedIntoNettyPipelineViaHttp() throws Exception { + TransportAddress transportAddress = + randomFrom(internalCluster().getDataNodeInstance(HttpServerTransport.class).boundAddress().boundAddresses()); + try (Socket socket = new Socket()){ + trySocketConnection(socket, transportAddress.address()); + assertThat(socket.isClosed(), is(true)); + } + } + + public void testThatIpFilteringIsNotAppliedForDefaultTransport() throws Exception { + Client client = internalCluster().transportClient(); + assertGreenClusterState(client); + } + + public void testThatIpFilteringIsAppliedForProfile() throws Exception { + try (Socket socket = new Socket()){ + trySocketConnection(socket, new InetSocketAddress(InetAddress.getLoopbackAddress(), getProfilePort("client"))); + assertThat(socket.isClosed(), is(true)); + } + } + + @SuppressForbidden(reason = "Allow opening socket for test") + private void trySocketConnection(Socket socket, InetSocketAddress address) throws IOException { + logger.info("connecting to {}", address); + SocketAccess.doPrivileged(() -> socket.connect(address, 500)); + + assertThat(socket.isConnected(), is(true)); + try (OutputStream os = socket.getOutputStream()) { + os.write("fooooo".getBytes(StandardCharsets.UTF_8)); + os.flush(); + } + } + + private static int getProfilePort(String profile) { + TransportAddress transportAddress = + randomFrom(internalCluster().getInstance(Transport.class).profileBoundAddresses().get(profile).boundAddresses()); + return transportAddress.address().getPort(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java new file mode 100644 index 0000000000000..9c3572b767606 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringUpdateTests.java @@ -0,0 +1,180 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.filter; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.junit.BeforeClass; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.Locale; + +import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.is; + +@ClusterScope(scope = TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class IpFilteringUpdateTests extends SecurityIntegTestCase { + + private static int randomClientPort; + + private final boolean httpEnabled = randomBoolean(); + + @BeforeClass + public static void getRandomPort() { + randomClientPort = randomIntBetween(49000, 65500); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + String randomClientPortRange = randomClientPort + "-" + (randomClientPort+100); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), httpEnabled) + .put("xpack.security.transport.filter.deny", "127.0.0.200") + .put("transport.profiles.client.port", randomClientPortRange) + .build(); + } + + public void testThatIpFilterConfigurationCanBeChangedDynamically() throws Exception { + // ensure this did not get overwritten by the listener during startup + assertConnectionRejected("default", "127.0.0.200"); + + // allow all by default + assertConnectionAccepted("default", "127.0.0.8"); + assertConnectionAccepted(".http", "127.0.0.8"); + assertConnectionAccepted("client", "127.0.0.8"); + + Settings settings = Settings.builder() + .put("xpack.security.transport.filter.allow", "127.0.0.1") + .put("xpack.security.transport.filter.deny", "127.0.0.8") + .build(); + updateSettings(settings); + assertConnectionRejected("default", "127.0.0.8"); + + settings = Settings.builder() + .putList("xpack.security.http.filter.allow", "127.0.0.1") + .putList("xpack.security.http.filter.deny", "127.0.0.8") + .build(); + updateSettings(settings); + assertConnectionRejected("default", "127.0.0.8"); + assertConnectionRejected(".http", "127.0.0.8"); + + settings = Settings.builder() + .put("transport.profiles.client.xpack.security.filter.allow", "127.0.0.1") + .put("transport.profiles.client.xpack.security.filter.deny", "127.0.0.8") + .build(); + updateSettings(settings); + assertConnectionRejected("default", "127.0.0.8"); + assertConnectionRejected(".http", "127.0.0.8"); + assertConnectionRejected("client", "127.0.0.8"); + + // check that all is in cluster state + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + assertThat(clusterState.metaData().settings().get("xpack.security.transport.filter.allow"), is("127.0.0.1")); + assertThat(clusterState.metaData().settings().get("xpack.security.transport.filter.deny"), is("127.0.0.8")); + assertEquals(Arrays.asList("127.0.0.1"), clusterState.metaData().settings().getAsList("xpack.security.http.filter.allow")); + assertEquals(Arrays.asList("127.0.0.8"), clusterState.metaData().settings().getAsList("xpack.security.http.filter.deny")); + assertThat(clusterState.metaData().settings().get("transport.profiles.client.xpack.security.filter.allow"), is("127.0.0.1")); + assertThat(clusterState.metaData().settings().get("transport.profiles.client.xpack.security.filter.deny"), is("127.0.0.8")); + + // now disable ip filtering dynamically and make sure nothing is rejected + settings = Settings.builder() + .put(IPFilter.IP_FILTER_ENABLED_SETTING.getKey(), false) + .put(IPFilter.IP_FILTER_ENABLED_HTTP_SETTING.getKey(), true) + .build(); + updateSettings(settings); + assertConnectionAccepted("default", "127.0.0.8"); + assertConnectionAccepted("client", "127.0.0.8"); + + // disabling should not have any effect on the cluster state settings + clusterState = client().admin().cluster().prepareState().get().getState(); + assertThat(clusterState.metaData().settings().get("xpack.security.transport.filter.allow"), is("127.0.0.1")); + assertThat(clusterState.metaData().settings().get("xpack.security.transport.filter.deny"), is("127.0.0.8")); + assertEquals(Arrays.asList("127.0.0.1"), clusterState.metaData().settings().getAsList("xpack.security.http.filter.allow")); + assertEquals(Arrays.asList("127.0.0.8"), clusterState.metaData().settings().getAsList("xpack.security.http.filter.deny")); + assertThat(clusterState.metaData().settings().get("transport.profiles.client.xpack.security.filter.allow"), is("127.0.0.1")); + assertThat(clusterState.metaData().settings().get("transport.profiles.client.xpack.security.filter.deny"), is("127.0.0.8")); + + // now also disable for HTTP + if (httpEnabled) { + assertConnectionRejected(".http", "127.0.0.8"); + settings = Settings.builder() + .put(IPFilter.IP_FILTER_ENABLED_HTTP_SETTING.getKey(), false) + .build(); + // as we permanently switch between persistent and transient settings, just set both here to make sure we overwrite + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings)); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + assertConnectionAccepted(".http", "127.0.0.8"); + } + } + + // issue #762, occured because in the above test we use HTTP and transport + public void testThatDisablingIpFilterWorksAsExpected() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.transport.filter.deny", "127.0.0.8") + .build(); + updateSettings(settings); + assertConnectionRejected("default", "127.0.0.8"); + + settings = Settings.builder() + .put(IPFilter.IP_FILTER_ENABLED_SETTING.getKey(), false) + .build(); + updateSettings(settings); + assertConnectionAccepted("default", "127.0.0.8"); + } + + public void testThatDisablingIpFilterForProfilesWorksAsExpected() throws Exception { + Settings settings = Settings.builder() + .put("transport.profiles.client.xpack.security.filter.deny", "127.0.0.8") + .build(); + updateSettings(settings); + assertConnectionRejected("client", "127.0.0.8"); + + settings = Settings.builder() + .put(IPFilter.IP_FILTER_ENABLED_SETTING.getKey(), false) + .build(); + updateSettings(settings); + assertConnectionAccepted("client", "127.0.0.8"); + } + + + private void updateSettings(Settings settings) { + if (randomBoolean()) { + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings)); + } else { + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); + } + } + + private void assertConnectionAccepted(String profile, String host) throws UnknownHostException { + // HTTP is not applied if disabled + if (!httpEnabled && IPFilter.HTTP_PROFILE_NAME.equals(profile)) { + return; + } + + IPFilter ipFilter = internalCluster().getDataNodeInstance(IPFilter.class); + String message = String.format(Locale.ROOT, "Expected allowed connection for profile %s against host %s", profile, host); + assertThat(message, ipFilter.accept(profile, new InetSocketAddress(InetAddress.getByName(host), 0)), is(true)); + } + + private void assertConnectionRejected(String profile, String host) throws UnknownHostException { + // HTTP is not applied if disabled + if (!httpEnabled && IPFilter.HTTP_PROFILE_NAME.equals(profile)) { + return; + } + + IPFilter ipFilter = internalCluster().getDataNodeInstance(IPFilter.class); + String message = String.format(Locale.ROOT, "Expected rejection for profile %s against host %s", profile, host); + assertThat(message, ipFilter.accept(profile, new InetSocketAddress(InetAddress.getByName(host), 0)), is(false)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/PatternRuleTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/PatternRuleTests.java new file mode 100644 index 0000000000000..6fc4e45ce0518 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/PatternRuleTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.filter; + +import io.netty.handler.ipfilter.IpFilterRuleType; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.test.ESTestCase; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; + +public class PatternRuleTests extends ESTestCase { + + public void testSingleIpRule() throws UnknownHostException { + PatternRule rule = new PatternRule(IpFilterRuleType.REJECT, "i:127.0.0.1"); + assertFalse(rule.isLocalhost()); + assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 0))); + assertEquals(IpFilterRuleType.REJECT, rule.ruleType()); + + rule = new PatternRule(IpFilterRuleType.REJECT, "i:192.168.*"); + assertFalse(rule.isLocalhost()); + assertFalse(rule.matches(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 0))); + assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("192.168.2.1"), 0))); + assertEquals(IpFilterRuleType.REJECT, rule.ruleType()); + } + + public void testSingleLocalHostRule() throws UnknownHostException { + PatternRule rule = new PatternRule(IpFilterRuleType.ACCEPT, "n:localhost"); + assertTrue(rule.isLocalhost()); + assertTrue(rule.matches(new InetSocketAddress(getLocalHost(), 0))); + assertEquals(IpFilterRuleType.ACCEPT, rule.ruleType()); + } + + public void testMultiRules() throws UnknownHostException { + PatternRule rule = new PatternRule(IpFilterRuleType.ACCEPT, "n:localhost,i:127.0.0.1,i:192.168.9.*"); + assertTrue(rule.isLocalhost()); + assertTrue(rule.matches(new InetSocketAddress(getLocalHost(), 0))); + assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("192.168.9.1"), 0))); + assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 0))); + assertFalse(rule.matches(new InetSocketAddress(InetAddress.getByName("192.168.11.1"), 0))); + assertEquals(IpFilterRuleType.ACCEPT, rule.ruleType()); + } + + public void testAll() throws UnknownHostException { + PatternRule rule = new PatternRule(IpFilterRuleType.ACCEPT, "n:*"); + assertFalse(rule.isLocalhost()); + assertTrue(rule.matches(new InetSocketAddress(getLocalHost(), 0))); + assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("192.168.9.1"), 0))); + assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 0))); + assertTrue(rule.matches(new InetSocketAddress(InetAddress.getByName("192.168.11.1"), 0))); + assertEquals(IpFilterRuleType.ACCEPT, rule.ruleType()); + } + + @SuppressForbidden(reason = "just for this test") + private static InetAddress getLocalHost() throws UnknownHostException { + return InetAddress.getLocalHost(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/SecurityIpFilterRuleTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/SecurityIpFilterRuleTests.java new file mode 100644 index 0000000000000..82ed092acd44a --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/SecurityIpFilterRuleTests.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.filter; + +import io.netty.handler.ipfilter.IpFilterRule; +import io.netty.handler.ipfilter.IpFilterRuleType; +import io.netty.handler.ipfilter.IpSubnetFilterRule; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.test.ESTestCase; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; + +import static org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule.ACCEPT_ALL; +import static org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule.DENY_ALL; +import static org.elasticsearch.xpack.security.transport.filter.SecurityIpFilterRule.getRule; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.sameInstance; + +/** + * Unit tests for the {@link SecurityIpFilterRule} + */ +public class SecurityIpFilterRuleTests extends ESTestCase { + public void testParseAllRules() { + IpFilterRule rule = getRule(true, "_all"); + assertThat(rule, sameInstance(ACCEPT_ALL)); + + rule = getRule(false, "_all"); + assertThat(rule, sameInstance(DENY_ALL)); + } + + public void testParseAllRuleWithOtherValues() { + String ruleValue = "_all," + randomFrom("name", "127.0.0.1", "127.0.0.0/24"); + try { + getRule(randomBoolean(), ruleValue); + fail("an illegal argument exception should have been thrown!"); + } catch (IllegalArgumentException e) { + // expected + } + } + + public void testParseIpSubnetFilterRule() throws Exception { + final boolean allow = randomBoolean(); + IpFilterRule rule = getRule(allow, "127.0.0.0/24"); + assertThat(rule, instanceOf(IpSubnetFilterRule.class)); + if (allow) { + assertEquals(rule.ruleType(), IpFilterRuleType.ACCEPT); + } else { + assertEquals(rule.ruleType(), IpFilterRuleType.REJECT); + } + IpSubnetFilterRule ipSubnetFilterRule = (IpSubnetFilterRule) rule; + assertTrue(ipSubnetFilterRule.matches(new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 0))); + } + + public void testParseIpSubnetFilterRuleWithOtherValues() throws Exception { + try { + getRule(randomBoolean(), "127.0.0.0/24," + randomFrom("name", "127.0.0.1", "192.0.0.0/24")); + fail("expected an exception to be thrown because only one subnet can be specified at a time"); + } catch (IllegalArgumentException e) { + //expected + } + } + + public void testParsePatternRules() { + final boolean allow = randomBoolean(); + String ruleSpec = "127.0.0.1,::1,192.168.0.*,name*,specific_name"; + IpFilterRule rule = getRule(allow, ruleSpec); + assertThat(rule, instanceOf(PatternRule.class)); + if (allow) { + assertEquals(rule.ruleType(), IpFilterRuleType.ACCEPT); + } else { + assertEquals(rule.ruleType(), IpFilterRuleType.REJECT); + } + } + + public void testParseSubnetMask() throws UnknownHostException { + Tuple result = SecurityIpFilterRule.parseSubnetMask("2001:0db8:85a3:0000:0000:8a2e:0370:7334/24"); + assertEquals(NetworkAddress.format(result.v1()), "2001:db8:85a3::8a2e:370:7334"); + assertEquals(24, result.v2().intValue()); + + result = SecurityIpFilterRule.parseSubnetMask("127.0.0.0/24"); + assertEquals(NetworkAddress.format(result.v1()), "127.0.0.0"); + assertEquals(24, result.v2().intValue()); + + result = SecurityIpFilterRule.parseSubnetMask("127.0.0.1/255.255.255.0"); + assertEquals(NetworkAddress.format(result.v1()), "127.0.0.1"); + assertEquals(24, result.v2().intValue()); + + expectThrows(UnknownHostException.class, () -> SecurityIpFilterRule.parseSubnetMask("127.0.0.1")); + expectThrows(IllegalArgumentException.class, () -> SecurityIpFilterRule.parseSubnetMask("127.0.0.1/")); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/DNSOnlyHostnameVerificationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/DNSOnlyHostnameVerificationTests.java new file mode 100644 index 0000000000000..54e313a9e4797 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/DNSOnlyHostnameVerificationTests.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.netty4; + +import org.bouncycastle.asn1.x509.GeneralName; +import org.bouncycastle.asn1.x509.GeneralNames; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.ssl.CertUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import javax.security.auth.x500.X500Principal; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.UncheckedIOException; +import java.net.InetAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * Tests the use of DNS only certificates with SSL and verifies hostname verification works. The test itself is simple since we just need + * to check the cluster is formed and green. The setup is a bit more complicated as we do our best to ensure no issues will be encountered + * with DNS. + */ +public class DNSOnlyHostnameVerificationTests extends SecurityIntegTestCase { + + private static KeyStore keystore = null; + private static String hostName = null; + + @BeforeClass + public static void resolveNameForMachine() throws Exception { + assert keystore == null : "keystore is only set by this method and it should only be called once"; + NetworkService networkService = new NetworkService(Collections.emptyList()); + InetAddress inetAddress = networkService.resolvePublishHostAddresses(null); + hostName = getHostName(inetAddress); + String hostAddress = NetworkAddress.format(inetAddress); + assumeFalse("need a local address that can be reverse resolved", hostName.equals(hostAddress)); + // looks good so far, verify forward resolve is ok and proceed + Optional matchingForwardResolvedAddress = Arrays.stream(InetAddress.getAllByName(hostName)) + .filter((i) -> Arrays.equals(i.getAddress(), inetAddress.getAddress())) + .findFirst(); + assumeTrue("could not forward resolve hostname: " + hostName, matchingForwardResolvedAddress.isPresent()); + KeyPair keyPair = KeyPairGenerator.getInstance("RSA").generateKeyPair(); + + // randomize between CN and SAN + final X509Certificate cert; + if (randomBoolean()) { + cert = CertUtils.generateSignedCertificate(new X500Principal("CN=" + hostName), null, keyPair, null, null, 365); + } else { + GeneralName dnsSan = new GeneralName(GeneralName.dNSName, hostName); + GeneralNames names = new GeneralNames(dnsSan); + cert = CertUtils.generateSignedCertificate(new X500Principal("CN=esnode"), names, keyPair, null, null, 365); + } + + keystore = KeyStore.getInstance("JKS"); + keystore.load(null, null); + keystore.setKeyEntry("private key", keyPair.getPrivate(), SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), + new Certificate[]{cert}); + } + + @AfterClass + public static void cleanupKeystore() { + keystore = null; + hostName = null; + } + + @Override + public boolean transportSSLEnabled() { + return true; + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + Settings defaultSettings = super.nodeSettings(nodeOrdinal); + Settings.Builder builder = Settings.builder() + .put(defaultSettings.filter((s) -> s.startsWith("xpack.ssl.") == false), false) + .put("transport.host", hostName); + Path keystorePath = nodeConfigPath(nodeOrdinal).resolve("keystore.jks"); + try (OutputStream os = Files.newOutputStream(keystorePath)) { + keystore.store(os, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } catch (CertificateException | NoSuchAlgorithmException | KeyStoreException e) { + throw new ElasticsearchException("unable to write keystore for node", e); + } + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> { + secureSettings.setString("xpack.ssl.keystore.secure_password", SecuritySettingsSourceField.TEST_PASSWORD); + secureSettings.setString("xpack.ssl.truststore.secure_password", SecuritySettingsSourceField.TEST_PASSWORD); + }); + builder.put("xpack.ssl.keystore.path", keystorePath.toAbsolutePath()) + .put("xpack.ssl.truststore.path", keystorePath.toAbsolutePath()); + List unicastHosts = defaultSettings.getAsList("discovery.zen.ping.unicast.hosts").stream() + .map((s) -> { + String port = s.substring(s.lastIndexOf(':'), s.length()); + return hostName + port; + }) + .collect(Collectors.toList()); + builder.putList("discovery.zen.ping.unicast.hosts", unicastHosts); + return builder.build(); + } + + @Override + public Settings transportClientSettings() { + Settings defaultSettings = super.transportClientSettings(); + Settings.Builder builder = Settings.builder() + .put(defaultSettings.filter((s) -> s.startsWith("xpack.ssl.") == false)); + Path path = createTempDir().resolve("keystore.jks"); + try (OutputStream os = Files.newOutputStream(path)) { + keystore.store(os, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } catch (CertificateException | NoSuchAlgorithmException | KeyStoreException e) { + throw new ElasticsearchException("unable to write keystore for node", e); + } + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> { + secureSettings.setString("xpack.ssl.keystore.secure_password", SecuritySettingsSourceField.TEST_PASSWORD); + secureSettings.setString("xpack.ssl.truststore.secure_password", SecuritySettingsSourceField.TEST_PASSWORD); + }); + builder.put("xpack.ssl.keystore.path", path.toAbsolutePath()) + .put("xpack.ssl.truststore.path", path.toAbsolutePath()); + return builder.build(); + } + + public void testThatClusterIsFormed() { + ensureGreen(); + } + + @SuppressForbidden(reason = "need to get the hostname to set as host in test") + private static String getHostName(InetAddress inetAddress) { + return inetAddress.getHostName(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IPHostnameVerificationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IPHostnameVerificationTests.java new file mode 100644 index 0000000000000..f03a4255b7fe7 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IPHostnameVerificationTests.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.netty4; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.xpack.core.ssl.SSLClientAuth; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.CoreMatchers.is; + +// TODO delete this test? +public class IPHostnameVerificationTests extends SecurityIntegTestCase { + Path keystore; + + @Override + protected boolean transportSSLEnabled() { + return true; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings settings = super.nodeSettings(nodeOrdinal); + Settings.Builder builder = Settings.builder() + .put(settings.filter((s) -> s.startsWith("xpack.ssl.") == false), false); + settings = builder.build(); + + // The default Unicast test behavior is to use 'localhost' with the port number. For this test we need to use IP + List newUnicastAddresses = new ArrayList<>(); + for (String address : settings.getAsList("discovery.zen.ping.unicast.hosts")) { + newUnicastAddresses.add(address.replace("localhost", "127.0.0.1")); + } + + Settings.Builder settingsBuilder = Settings.builder() + .put(settings) + .putList("discovery.zen.ping.unicast.hosts", newUnicastAddresses); + + try { + //This keystore uses a cert with a CN of "Elasticsearch Test Node" and IPv4+IPv6 ip addresses as SubjectAlternativeNames + keystore = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-ip-only.jks"); + assertThat(Files.exists(keystore), is(true)); + } catch (Exception e) { + throw new RuntimeException(e); + } + + SecuritySettingsSource.addSecureSettings(settingsBuilder, secureSettings -> { + secureSettings.setString("xpack.ssl.keystore.secure_password", "testnode-ip-only"); + secureSettings.setString("xpack.ssl.truststore.secure_password", "testnode-ip-only"); + }); + return settingsBuilder.put("xpack.ssl.keystore.path", keystore.toAbsolutePath()) // settings for client truststore + .put("xpack.ssl.truststore.path", keystore.toAbsolutePath()) // settings for client truststore + .put(TcpTransport.BIND_HOST.getKey(), "127.0.0.1") + .put("network.host", "127.0.0.1") + .put("xpack.ssl.client_authentication", SSLClientAuth.NONE) + .put("xpack.ssl.verification_mode", "full") + .build(); + } + + @Override + protected Settings transportClientSettings() { + Settings clientSettings = super.transportClientSettings(); + return Settings.builder().put(clientSettings.filter(k -> k.startsWith("xpack.ssl.") == false)) + .put("xpack.ssl.verification_mode", "certificate") + .put("xpack.ssl.keystore.path", keystore.toAbsolutePath()) + .put("xpack.ssl.keystore.password", "testnode-ip-only") + .put("xpack.ssl.truststore.path", keystore.toAbsolutePath()) + .put("xpack.ssl.truststore.password", "testnode-ip-only") + .build(); + } + + public void testTransportClientConnectionWorksWithIPOnlyHostnameVerification() throws Exception { + Client client = internalCluster().transportClient(); + assertGreenClusterState(client); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java new file mode 100644 index 0000000000000..1b45fad898913 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.netty4; + +import io.netty.channel.ChannelHandlerContext; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.elasticsearch.xpack.security.transport.filter.IPFilter; +import org.junit.Before; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IpFilterRemoteAddressFilterTests extends ESTestCase { + private IpFilterRemoteAddressFilter handler; + + @Before + public void init() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.transport.filter.allow", "127.0.0.1") + .put("xpack.security.transport.filter.deny", "10.0.0.0/8") + .build(); + + boolean isHttpEnabled = randomBoolean(); + + Transport transport = mock(Transport.class); + TransportAddress address = new TransportAddress(InetAddress.getLoopbackAddress(), 9300); + when(transport.boundAddress()).thenReturn(new BoundTransportAddress(new TransportAddress[] { address }, address)); + when(transport.lifecycleState()).thenReturn(Lifecycle.State.STARTED); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList( + IPFilter.HTTP_FILTER_ALLOW_SETTING, + IPFilter.HTTP_FILTER_DENY_SETTING, + IPFilter.IP_FILTER_ENABLED_HTTP_SETTING, + IPFilter.IP_FILTER_ENABLED_SETTING, + IPFilter.TRANSPORT_FILTER_ALLOW_SETTING, + IPFilter.TRANSPORT_FILTER_DENY_SETTING, + IPFilter.PROFILE_FILTER_ALLOW_SETTING, + IPFilter.PROFILE_FILTER_DENY_SETTING))); + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isIpFilteringAllowed()).thenReturn(true); + when(licenseState.isSecurityEnabled()).thenReturn(true); + AuditTrailService auditTrailService = new AuditTrailService(settings, Collections.emptyList(), licenseState); + IPFilter ipFilter = new IPFilter(settings, auditTrailService, clusterSettings, licenseState); + ipFilter.setBoundTransportAddress(transport.boundAddress(), transport.profileBoundAddresses()); + if (isHttpEnabled) { + HttpServerTransport httpTransport = mock(HttpServerTransport.class); + TransportAddress httpAddress = new TransportAddress(InetAddress.getLoopbackAddress(), 9200); + when(httpTransport.boundAddress()).thenReturn(new BoundTransportAddress(new TransportAddress[] { httpAddress }, httpAddress)); + when(httpTransport.lifecycleState()).thenReturn(Lifecycle.State.STARTED); + ipFilter.setBoundHttpTransportAddress(httpTransport.boundAddress()); + } + + if (isHttpEnabled) { + handler = new IpFilterRemoteAddressFilter(ipFilter, IPFilter.HTTP_PROFILE_NAME); + } else { + handler = new IpFilterRemoteAddressFilter(ipFilter, "default"); + } + } + + public void testThatFilteringWorksByIp() throws Exception { + InetSocketAddress localhostAddr = new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 12345); + assertThat(handler.accept(mock(ChannelHandlerContext.class), localhostAddr), is(true)); + + InetSocketAddress remoteAddr = new InetSocketAddress(InetAddresses.forString("10.0.0.8"), 12345); + assertThat(handler.accept(mock(ChannelHandlerContext.class), remoteAddr), is(false)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java new file mode 100644 index 0000000000000..3ef298f3f232d --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java @@ -0,0 +1,217 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.netty4; + +import io.netty.channel.ChannelHandler; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.ssl.SslHandler; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.http.NullDispatcher; +import org.elasticsearch.http.netty4.Netty4HttpMockUtil; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ssl.SSLClientAuth; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.transport.filter.IPFilter; +import org.junit.Before; + +import javax.net.ssl.SSLEngine; + +import java.nio.file.Path; +import java.util.Collections; +import java.util.Locale; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; + +public class SecurityNetty4HttpServerTransportTests extends ESTestCase { + + private SSLService sslService; + private Environment env; + + @Before + public void createSSLService() throws Exception { + Path testNodeStore = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks"); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.ssl.keystore.secure_password", "testnode"); + Settings settings = Settings.builder() + .put("xpack.ssl.keystore.path", testNodeStore) + .put("path.home", createTempDir()) + .setSecureSettings(secureSettings) + .build(); + env = TestEnvironment.newEnvironment(settings); + sslService = new SSLService(settings, env); + } + + public void testDefaultClientAuth() throws Exception { + Settings settings = Settings.builder() + .put(env.settings()) + .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true).build(); + sslService = new SSLService(settings, env); + SecurityNetty4HttpServerTransport transport = new SecurityNetty4HttpServerTransport(settings, + new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(IPFilter.class), sslService, + mock(ThreadPool.class), xContentRegistry(), new NullDispatcher()); + Netty4HttpMockUtil.setOpenChannelsHandlerToMock(transport); + ChannelHandler handler = transport.configureServerChannelHandler(); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + assertThat(ch.pipeline().get(SslHandler.class).engine().getNeedClientAuth(), is(false)); + assertThat(ch.pipeline().get(SslHandler.class).engine().getWantClientAuth(), is(false)); + } + + public void testOptionalClientAuth() throws Exception { + String value = randomFrom(SSLClientAuth.OPTIONAL.name(), SSLClientAuth.OPTIONAL.name().toLowerCase(Locale.ROOT)); + Settings settings = Settings.builder() + .put(env.settings()) + .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true) + .put("xpack.security.http.ssl.client_authentication", value).build(); + sslService = new SSLService(settings, env); + SecurityNetty4HttpServerTransport transport = new SecurityNetty4HttpServerTransport(settings, + new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(IPFilter.class), sslService, + mock(ThreadPool.class), xContentRegistry(), new NullDispatcher()); + Netty4HttpMockUtil.setOpenChannelsHandlerToMock(transport); + ChannelHandler handler = transport.configureServerChannelHandler(); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + assertThat(ch.pipeline().get(SslHandler.class).engine().getNeedClientAuth(), is(false)); + assertThat(ch.pipeline().get(SslHandler.class).engine().getWantClientAuth(), is(true)); + } + + public void testRequiredClientAuth() throws Exception { + String value = randomFrom(SSLClientAuth.REQUIRED.name(), SSLClientAuth.REQUIRED.name().toLowerCase(Locale.ROOT)); + Settings settings = Settings.builder() + .put(env.settings()) + .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true) + .put("xpack.security.http.ssl.client_authentication", value).build(); + sslService = new SSLService(settings, env); + SecurityNetty4HttpServerTransport transport = new SecurityNetty4HttpServerTransport(settings, + new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(IPFilter.class), sslService, + mock(ThreadPool.class), xContentRegistry(), new NullDispatcher()); + Netty4HttpMockUtil.setOpenChannelsHandlerToMock(transport); + ChannelHandler handler = transport.configureServerChannelHandler(); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + assertThat(ch.pipeline().get(SslHandler.class).engine().getNeedClientAuth(), is(true)); + assertThat(ch.pipeline().get(SslHandler.class).engine().getWantClientAuth(), is(false)); + } + + public void testNoClientAuth() throws Exception { + String value = randomFrom(SSLClientAuth.NONE.name(), SSLClientAuth.NONE.name().toLowerCase(Locale.ROOT)); + Settings settings = Settings.builder() + .put(env.settings()) + .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true) + .put("xpack.security.http.ssl.client_authentication", value).build(); + sslService = new SSLService(settings, env); + SecurityNetty4HttpServerTransport transport = new SecurityNetty4HttpServerTransport(settings, + new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(IPFilter.class), sslService, + mock(ThreadPool.class), xContentRegistry(), new NullDispatcher()); + Netty4HttpMockUtil.setOpenChannelsHandlerToMock(transport); + ChannelHandler handler = transport.configureServerChannelHandler(); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + assertThat(ch.pipeline().get(SslHandler.class).engine().getNeedClientAuth(), is(false)); + assertThat(ch.pipeline().get(SslHandler.class).engine().getWantClientAuth(), is(false)); + } + + public void testCustomSSLConfiguration() throws Exception { + Settings settings = Settings.builder() + .put(env.settings()) + .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true).build(); + sslService = new SSLService(settings, env); + SecurityNetty4HttpServerTransport transport = new SecurityNetty4HttpServerTransport(settings, + new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(IPFilter.class), sslService, + mock(ThreadPool.class), xContentRegistry(), new NullDispatcher()); + Netty4HttpMockUtil.setOpenChannelsHandlerToMock(transport); + ChannelHandler handler = transport.configureServerChannelHandler(); + EmbeddedChannel ch = new EmbeddedChannel(handler); + SSLEngine defaultEngine = ch.pipeline().get(SslHandler.class).engine(); + + settings = Settings.builder() + .put(env.settings()) + .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true) + .put("xpack.security.http.ssl.supported_protocols", "TLSv1.2") + .build(); + sslService = new SSLService(settings, TestEnvironment.newEnvironment(settings)); + transport = new SecurityNetty4HttpServerTransport(settings, new NetworkService(Collections.emptyList()), + mock(BigArrays.class), mock(IPFilter.class), sslService, mock(ThreadPool.class), xContentRegistry(), new NullDispatcher()); + Netty4HttpMockUtil.setOpenChannelsHandlerToMock(transport); + handler = transport.configureServerChannelHandler(); + ch = new EmbeddedChannel(handler); + SSLEngine customEngine = ch.pipeline().get(SslHandler.class).engine(); + assertThat(customEngine.getEnabledProtocols(), arrayContaining("TLSv1.2")); + assertThat(customEngine.getEnabledProtocols(), not(equalTo(defaultEngine.getEnabledProtocols()))); + } + + public void testDisablesCompressionByDefaultForSsl() throws Exception { + Settings settings = Settings.builder() + .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true).build(); + + Settings.Builder pluginSettingsBuilder = Settings.builder(); + SecurityNetty4HttpServerTransport.overrideSettings(pluginSettingsBuilder, settings); + assertThat(HttpTransportSettings.SETTING_HTTP_COMPRESSION.get(pluginSettingsBuilder.build()), is(false)); + } + + public void testLeavesCompressionOnIfNotSsl() throws Exception { + Settings settings = Settings.builder() + .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), false).build(); + Settings.Builder pluginSettingsBuilder = Settings.builder(); + SecurityNetty4HttpServerTransport.overrideSettings(pluginSettingsBuilder, settings); + assertThat(pluginSettingsBuilder.build().isEmpty(), is(true)); + } + + public void testDoesNotChangeExplicitlySetCompression() throws Exception { + Settings settings = Settings.builder() + .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true) + .put(HttpTransportSettings.SETTING_HTTP_COMPRESSION.getKey(), true) + .build(); + + Settings.Builder pluginSettingsBuilder = Settings.builder(); + SecurityNetty4HttpServerTransport.overrideSettings(pluginSettingsBuilder, settings); + assertThat(pluginSettingsBuilder.build().isEmpty(), is(true)); + } + + public void testThatExceptionIsThrownWhenConfiguredWithoutSslKey() throws Exception { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.ssl.truststore.secure_password", "testnode"); + Settings settings = Settings.builder() + .put("xpack.ssl.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) + .setSecureSettings(secureSettings) + .put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true) + .put("path.home", createTempDir()) + .build(); + env = TestEnvironment.newEnvironment(settings); + sslService = new SSLService(settings, env); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new SecurityNetty4HttpServerTransport(settings, new NetworkService(Collections.emptyList()), mock(BigArrays.class), + mock(IPFilter.class), sslService, mock(ThreadPool.class), xContentRegistry(), new NullDispatcher())); + assertThat(e.getMessage(), containsString("key must be provided")); + } + + public void testNoExceptionWhenConfiguredWithoutSslKeySSLDisabled() throws Exception { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.ssl.truststore.secure_password", "testnode"); + Settings settings = Settings.builder() + .put("xpack.ssl.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) + .setSecureSettings(secureSettings) + .put("path.home", createTempDir()) + .build(); + env = TestEnvironment.newEnvironment(settings); + sslService = new SSLService(settings, env); + SecurityNetty4HttpServerTransport transport = new SecurityNetty4HttpServerTransport(settings, + new NetworkService(Collections.emptyList()), mock(BigArrays.class), mock(IPFilter.class), sslService, + mock(ThreadPool.class), xContentRegistry(), new NullDispatcher()); + assertNotNull(transport.configureServerChannelHandler()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransportTests.java new file mode 100644 index 0000000000000..3d9227319a870 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransportTests.java @@ -0,0 +1,207 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.netty4; + +import io.netty.channel.ChannelHandler; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.ssl.SslHandler; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; +import org.elasticsearch.xpack.core.ssl.SSLClientAuth; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.junit.Before; + +import javax.net.ssl.SSLEngine; + +import java.nio.file.Path; +import java.util.Collections; +import java.util.Locale; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; + +public class SecurityNetty4ServerTransportTests extends ESTestCase { + + private Environment env; + private SSLService sslService; + + @Before + public void createSSLService() throws Exception { + Path testnodeStore = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks"); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.ssl.keystore.secure_password", "testnode"); + Settings settings = Settings.builder() + .put("xpack.security.transport.ssl.enabled", true) + .put("xpack.ssl.keystore.path", testnodeStore) + .setSecureSettings(secureSettings) + .put("path.home", createTempDir()) + .build(); + env = TestEnvironment.newEnvironment(settings); + sslService = new SSLService(settings, env); + } + + private SecurityNetty4Transport createTransport() { + return createTransport(Settings.builder().put("xpack.security.transport.ssl.enabled", true).build()); + } + + private SecurityNetty4Transport createTransport(Settings additionalSettings) { + final Settings settings = + Settings.builder() + .put("xpack.security.transport.ssl.enabled", true) + .put(additionalSettings) + .build(); + return new SecurityNetty4ServerTransport( + settings, + mock(ThreadPool.class), + new NetworkService(Collections.emptyList()), + mock(BigArrays.class), + mock(NamedWriteableRegistry.class), + mock(CircuitBreakerService.class), + null, + sslService); + } + + public void testThatProfileTakesDefaultSSLSetting() throws Exception { + SecurityNetty4Transport transport = createTransport(); + ChannelHandler handler = transport.getServerChannelInitializer("default"); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + assertThat(ch.pipeline().get(SslHandler.class).engine(), notNullValue()); + } + + public void testDefaultClientAuth() throws Exception { + SecurityNetty4Transport transport = createTransport(); + ChannelHandler handler = transport.getServerChannelInitializer("default"); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + assertThat(ch.pipeline().get(SslHandler.class).engine().getNeedClientAuth(), is(true)); + assertThat(ch.pipeline().get(SslHandler.class).engine().getWantClientAuth(), is(false)); + } + + public void testRequiredClientAuth() throws Exception { + String value = randomFrom(SSLClientAuth.REQUIRED.name(), SSLClientAuth.REQUIRED.name().toLowerCase(Locale.ROOT)); + Settings settings = Settings.builder() + .put(env.settings()) + .put("xpack.ssl.client_authentication", value) + .build(); + sslService = new SSLService(settings, env); + SecurityNetty4Transport transport = createTransport(settings); + ChannelHandler handler = transport.getServerChannelInitializer("default"); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + assertThat(ch.pipeline().get(SslHandler.class).engine().getNeedClientAuth(), is(true)); + assertThat(ch.pipeline().get(SslHandler.class).engine().getWantClientAuth(), is(false)); + } + + public void testNoClientAuth() throws Exception { + String value = randomFrom(SSLClientAuth.NONE.name(), SSLClientAuth.NONE.name().toLowerCase(Locale.ROOT)); + Settings settings = Settings.builder() + .put(env.settings()) + .put("xpack.ssl.client_authentication", value) + .build(); + sslService = new SSLService(settings, env); + SecurityNetty4Transport transport = createTransport(settings); + ChannelHandler handler = transport.getServerChannelInitializer("default"); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + assertThat(ch.pipeline().get(SslHandler.class).engine().getNeedClientAuth(), is(false)); + assertThat(ch.pipeline().get(SslHandler.class).engine().getWantClientAuth(), is(false)); + } + + public void testOptionalClientAuth() throws Exception { + String value = randomFrom(SSLClientAuth.OPTIONAL.name(), SSLClientAuth.OPTIONAL.name().toLowerCase(Locale.ROOT)); + Settings settings = Settings.builder() + .put(env.settings()) + .put("xpack.ssl.client_authentication", value) + .build(); + sslService = new SSLService(settings, env); + SecurityNetty4Transport transport = createTransport(settings); + ChannelHandler handler = transport.getServerChannelInitializer("default"); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + assertThat(ch.pipeline().get(SslHandler.class).engine().getNeedClientAuth(), is(false)); + assertThat(ch.pipeline().get(SslHandler.class).engine().getWantClientAuth(), is(true)); + } + + public void testProfileRequiredClientAuth() throws Exception { + String value = randomFrom(SSLClientAuth.REQUIRED.name(), SSLClientAuth.REQUIRED.name().toLowerCase(Locale.ROOT)); + Settings settings = Settings.builder() + .put(env.settings()) + .put("transport.profiles.client.port", "8000-9000") + .put("transport.profiles.client.xpack.security.ssl.client_authentication", value) + .build(); + sslService = new SSLService(settings, env); + SecurityNetty4Transport transport = createTransport(settings); + ChannelHandler handler = transport.getServerChannelInitializer("client"); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + assertThat(ch.pipeline().get(SslHandler.class).engine().getNeedClientAuth(), is(true)); + assertThat(ch.pipeline().get(SslHandler.class).engine().getWantClientAuth(), is(false)); + } + + public void testProfileNoClientAuth() throws Exception { + String value = randomFrom(SSLClientAuth.NONE.name(), SSLClientAuth.NONE.name().toLowerCase(Locale.ROOT)); + Settings settings = Settings.builder() + .put(env.settings()) + .put("transport.profiles.client.port", "8000-9000") + .put("transport.profiles.client.xpack.security.ssl.client_authentication", value) + .build(); + sslService = new SSLService(settings, env); + SecurityNetty4Transport transport = createTransport(settings); + ChannelHandler handler = transport.getServerChannelInitializer("client"); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + assertThat(ch.pipeline().get(SslHandler.class).engine().getNeedClientAuth(), is(false)); + assertThat(ch.pipeline().get(SslHandler.class).engine().getWantClientAuth(), is(false)); + } + + public void testProfileOptionalClientAuth() throws Exception { + String value = randomFrom(SSLClientAuth.OPTIONAL.name(), SSLClientAuth.OPTIONAL.name().toLowerCase(Locale.ROOT)); + Settings settings = Settings.builder() + .put(env.settings()) + .put("transport.profiles.client.port", "8000-9000") + .put("transport.profiles.client.xpack.security.ssl.client_authentication", value) + .build(); + sslService = new SSLService(settings, env); + SecurityNetty4Transport transport = createTransport(settings); + final ChannelHandler handler = transport.getServerChannelInitializer("client"); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + assertThat(ch.pipeline().get(SslHandler.class).engine().getNeedClientAuth(), is(false)); + assertThat(ch.pipeline().get(SslHandler.class).engine().getWantClientAuth(), is(true)); + } + + public void testTransportSSLOverridesGlobalSSL() throws Exception { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.security.transport.ssl.keystore.secure_password", "testnode"); + secureSettings.setString("xpack.ssl.truststore.secure_password", "truststore-testnode-only"); + Settings.Builder builder = Settings.builder() + .put("xpack.security.transport.ssl.enabled", true) + .put("xpack.security.transport.ssl.keystore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) + .put("xpack.security.transport.ssl.client_authentication", "none") + .put("xpack.ssl.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks")) + .setSecureSettings(secureSettings) + .put("path.home", createTempDir()); + Settings settings = builder.build(); + env = TestEnvironment.newEnvironment(settings); + sslService = new SSLService(settings, env); + SecurityNetty4Transport transport = createTransport(settings); + final ChannelHandler handler = transport.getServerChannelInitializer("default"); + final EmbeddedChannel ch = new EmbeddedChannel(handler); + final SSLEngine engine = ch.pipeline().get(SslHandler.class).engine(); + assertFalse(engine.getNeedClientAuth()); + assertFalse(engine.getWantClientAuth()); + + // get the global and verify that it is different in that it requires client auth + final SSLEngine globalEngine = sslService.createSSLEngine(Settings.EMPTY, Settings.EMPTY); + assertTrue(globalEngine.getNeedClientAuth()); + assertFalse(globalEngine.getWantClientAuth()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SslHostnameVerificationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SslHostnameVerificationTests.java new file mode 100644 index 0000000000000..148453b5f84b0 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SslHostnameVerificationTests.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.netty4; + +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.core.TestXPackTransportClient; +import org.elasticsearch.xpack.security.LocalStateSecurity; + +import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.containsString; + +public class SslHostnameVerificationTests extends SecurityIntegTestCase { + + @Override + protected boolean transportSSLEnabled() { + return true; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings settings = super.nodeSettings(nodeOrdinal); + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(settings.filter(k -> k.startsWith("xpack.ssl.") == false), false); + Path keystore; + try { + /* + * This keystore uses a cert without any subject alternative names and a CN of "Elasticsearch Test Node No SAN" + * that will not resolve to a DNS name and will always cause hostname verification failures + */ + keystore = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.jks"); + assert keystore != null; + assertThat(Files.exists(keystore), is(true)); + } catch (Exception e) { + throw new RuntimeException(e); + } + + SecuritySettingsSource.addSecureSettings(settingsBuilder, secureSettings -> { + secureSettings.setString("xpack.ssl.keystore.secure_password", "testnode-no-subjaltname"); + secureSettings.setString("xpack.ssl.truststore.secure_password", "testnode-no-subjaltname"); + }); + return settingsBuilder.put("xpack.ssl.keystore.path", keystore.toAbsolutePath()) + .put("xpack.ssl.truststore.path", keystore.toAbsolutePath()) + // disable hostname verification as this test uses certs without a valid SAN or DNS in the CN + .put("xpack.ssl.verification_mode", "certificate") + .build(); + } + + @Override + protected Settings transportClientSettings() { + Path keystore = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.jks"); + assert keystore != null; + Settings settings = super.transportClientSettings(); + // remove all ssl settings + Settings.Builder builder = Settings.builder(); + builder.put(settings.filter( k -> k.startsWith("xpack.ssl.") == false), false); + + builder.put("xpack.ssl.verification_mode", "certificate") + .put("xpack.ssl.keystore.path", keystore.toAbsolutePath()) // settings for client keystore + .put("xpack.ssl.keystore.password", "testnode-no-subjaltname"); + + if (randomBoolean()) { + // randomly set the truststore, if not set the keystore should be used + builder.put("xpack.ssl.truststore.path", keystore.toAbsolutePath()) + .put("xpack.ssl.truststore.password", "testnode-no-subjaltname"); + } + return builder.build(); + } + + public void testThatHostnameMismatchDeniesTransportClientConnection() throws Exception { + Transport transport = internalCluster().getDataNodeInstance(Transport.class); + TransportAddress transportAddress = transport.boundAddress().publishAddress(); + InetSocketAddress inetSocketAddress = transportAddress.address(); + + Settings settings = Settings.builder().put(transportClientSettings()) + .put("xpack.ssl.verification_mode", "full") + .build(); + + try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { + client.addTransportAddress(new TransportAddress(inetSocketAddress.getAddress(), inetSocketAddress.getPort())); + client.admin().cluster().prepareHealth().get(); + fail("Expected a NoNodeAvailableException due to hostname verification failures"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#")); + } + } + + public void testTransportClientConnectionIgnoringHostnameVerification() throws Exception { + Client client = internalCluster().transportClient(); + assertGreenClusterState(client); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java new file mode 100644 index 0000000000000..884b348721fc5 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java @@ -0,0 +1,478 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.nio; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.nio.BytesWriteOperation; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.nio.WriteOperation; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; +import org.mockito.ArgumentCaptor; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.isNull; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class SSLChannelContextTests extends ESTestCase { + + private SocketChannelContext.ReadConsumer readConsumer; + private NioSocketChannel channel; + private SocketChannel rawChannel; + private SSLChannelContext context; + private InboundChannelBuffer channelBuffer; + private SocketSelector selector; + private BiConsumer listener; + private Consumer exceptionHandler; + private SSLDriver sslDriver; + private ByteBuffer readBuffer = ByteBuffer.allocate(1 << 14); + private ByteBuffer writeBuffer = ByteBuffer.allocate(1 << 14); + private int messageLength; + + @Before + @SuppressWarnings("unchecked") + public void init() { + readConsumer = mock(SocketChannelContext.ReadConsumer.class); + + messageLength = randomInt(96) + 20; + selector = mock(SocketSelector.class); + listener = mock(BiConsumer.class); + channel = mock(NioSocketChannel.class); + rawChannel = mock(SocketChannel.class); + sslDriver = mock(SSLDriver.class); + channelBuffer = InboundChannelBuffer.allocatingInstance(); + when(channel.getRawChannel()).thenReturn(rawChannel); + exceptionHandler = mock(Consumer.class); + context = new SSLChannelContext(channel, selector, exceptionHandler, sslDriver, readConsumer, channelBuffer); + + when(selector.isOnCurrentThread()).thenReturn(true); + when(sslDriver.getNetworkReadBuffer()).thenReturn(readBuffer); + when(sslDriver.getNetworkWriteBuffer()).thenReturn(writeBuffer); + } + + public void testSuccessfulRead() throws IOException { + byte[] bytes = createMessage(messageLength); + + when(rawChannel.read(same(readBuffer))).thenReturn(bytes.length); + doAnswer(getAnswerForBytes(bytes)).when(sslDriver).read(channelBuffer); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, 0); + + assertEquals(messageLength, context.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); + verify(readConsumer, times(1)).consumeReads(channelBuffer); + } + + public void testMultipleReadsConsumed() throws IOException { + byte[] bytes = createMessage(messageLength * 2); + + when(rawChannel.read(same(readBuffer))).thenReturn(bytes.length); + doAnswer(getAnswerForBytes(bytes)).when(sslDriver).read(channelBuffer); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, messageLength, 0); + + assertEquals(bytes.length, context.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); + verify(readConsumer, times(2)).consumeReads(channelBuffer); + } + + public void testPartialRead() throws IOException { + byte[] bytes = createMessage(messageLength); + + when(rawChannel.read(same(readBuffer))).thenReturn(bytes.length); + doAnswer(getAnswerForBytes(bytes)).when(sslDriver).read(channelBuffer); + + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(0); + + assertEquals(messageLength, context.read()); + + assertEquals(bytes.length, channelBuffer.getIndex()); + verify(readConsumer, times(1)).consumeReads(channelBuffer); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength * 2, 0); + + assertEquals(messageLength, context.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity()); + verify(readConsumer, times(2)).consumeReads(channelBuffer); + } + + public void testReadThrowsIOException() throws IOException { + IOException ioException = new IOException(); + when(rawChannel.read(any(ByteBuffer.class))).thenThrow(ioException); + + IOException ex = expectThrows(IOException.class, () -> context.read()); + assertSame(ioException, ex); + } + + public void testReadThrowsIOExceptionMeansReadyForClose() throws IOException { + when(rawChannel.read(any(ByteBuffer.class))).thenThrow(new IOException()); + + assertFalse(context.selectorShouldClose()); + expectThrows(IOException.class, () -> context.read()); + assertTrue(context.selectorShouldClose()); + } + + public void testReadLessThanZeroMeansReadyForClose() throws IOException { + when(rawChannel.read(any(ByteBuffer.class))).thenReturn(-1); + + assertEquals(0, context.read()); + + assertTrue(context.selectorShouldClose()); + } + + @SuppressWarnings("unchecked") + public void testCloseClosesChannelBuffer() throws IOException { + try (SocketChannel realChannel = SocketChannel.open()) { + when(channel.getRawChannel()).thenReturn(realChannel); + + AtomicInteger closeCount = new AtomicInteger(0); + Supplier pageSupplier = () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), + closeCount::incrementAndGet); + InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); + buffer.ensureCapacity(1); + SSLChannelContext context = new SSLChannelContext(channel, selector, exceptionHandler, sslDriver, readConsumer, buffer); + when(channel.isOpen()).thenReturn(true); + context.closeFromSelector(); + assertEquals(1, closeCount.get()); + } + } + + @SuppressWarnings("unchecked") + public void testWriteOpsClearedOnClose() throws IOException { + try (SocketChannel realChannel = SocketChannel.open()) { + when(channel.getRawChannel()).thenReturn(realChannel); + context = new SSLChannelContext(channel, selector, exceptionHandler, sslDriver, readConsumer, channelBuffer); + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + context.queueWriteOperation(new BytesWriteOperation(context, buffer, listener)); + + when(sslDriver.readyForApplicationWrites()).thenReturn(true); + assertTrue(context.hasQueuedWriteOps()); + + when(channel.isOpen()).thenReturn(true); + context.closeFromSelector(); + + verify(selector).executeFailedListener(same(listener), any(ClosedChannelException.class)); + + assertFalse(context.hasQueuedWriteOps()); + } + } + + @SuppressWarnings("unchecked") + public void testSSLDriverClosedOnClose() throws IOException { + try (SocketChannel realChannel = SocketChannel.open()) { + when(channel.getRawChannel()).thenReturn(realChannel); + context = new SSLChannelContext(channel, selector, exceptionHandler, sslDriver, readConsumer, channelBuffer); + when(channel.isOpen()).thenReturn(true); + context.closeFromSelector(); + + verify(sslDriver).close(); + } + } + + public void testWriteFailsIfClosing() { + context.closeChannel(); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); + } + + public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class); + + when(selector.isOnCurrentThread()).thenReturn(false); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(selector).queueWrite(writeOpCaptor.capture()); + BytesWriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(listener, writeOp.getListener()); + assertSame(context, writeOp.getChannel()); + assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]); + } + + public void testSendMessageFromSameThreadIsQueuedInChannel() { + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); + BytesWriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(listener, writeOp.getListener()); + assertSame(context, writeOp.getChannel()); + assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]); + } + + public void testWriteIsQueuedInChannel() { + when(sslDriver.readyForApplicationWrites()).thenReturn(true); + when(sslDriver.hasFlushPending()).thenReturn(false); + when(sslDriver.needsNonApplicationWrite()).thenReturn(false); + assertFalse(context.hasQueuedWriteOps()); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + context.queueWriteOperation(new BytesWriteOperation(context, buffer, listener)); + + assertTrue(context.hasQueuedWriteOps()); + } + + public void testQueuedWritesAreIgnoredWhenNotReadyForAppWrites() { + when(sslDriver.readyForApplicationWrites()).thenReturn(false); + when(sslDriver.hasFlushPending()).thenReturn(false); + when(sslDriver.needsNonApplicationWrite()).thenReturn(false); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + context.queueWriteOperation(new BytesWriteOperation(context, buffer, listener)); + + assertFalse(context.hasQueuedWriteOps()); + } + + public void testPendingFlushMeansWriteInterested() { + when(sslDriver.readyForApplicationWrites()).thenReturn(randomBoolean()); + when(sslDriver.hasFlushPending()).thenReturn(true); + when(sslDriver.needsNonApplicationWrite()).thenReturn(false); + + assertTrue(context.hasQueuedWriteOps()); + } + + public void testNeedsNonAppWritesMeansWriteInterested() { + when(sslDriver.readyForApplicationWrites()).thenReturn(false); + when(sslDriver.hasFlushPending()).thenReturn(false); + when(sslDriver.needsNonApplicationWrite()).thenReturn(true); + + assertTrue(context.hasQueuedWriteOps()); + } + + public void testNotWritesInterestInAppMode() { + when(sslDriver.readyForApplicationWrites()).thenReturn(true); + when(sslDriver.hasFlushPending()).thenReturn(false); + + assertFalse(context.hasQueuedWriteOps()); + + verify(sslDriver, times(0)).needsNonApplicationWrite(); + } + + public void testFirstFlushMustFinishForWriteToContinue() throws Exception { + when(sslDriver.hasFlushPending()).thenReturn(true, true); + when(sslDriver.readyForApplicationWrites()).thenReturn(false); + + context.flushChannel(); + + verify(sslDriver, times(0)).nonApplicationWrite(); + } + + public void testNonAppWrites() throws Exception { + when(sslDriver.hasFlushPending()).thenReturn(false, false, true, false, true); + when(sslDriver.needsNonApplicationWrite()).thenReturn(true, true, false); + when(sslDriver.readyForApplicationWrites()).thenReturn(false); + + context.flushChannel(); + + verify(sslDriver, times(2)).nonApplicationWrite(); + verify(rawChannel, times(2)).write(sslDriver.getNetworkWriteBuffer()); + } + + public void testNonAppWritesStopIfBufferNotFullyFlushed() throws Exception { + when(sslDriver.hasFlushPending()).thenReturn(false, false, true, true); + when(sslDriver.needsNonApplicationWrite()).thenReturn(true, true, true, true); + when(sslDriver.readyForApplicationWrites()).thenReturn(false); + + context.flushChannel(); + + verify(sslDriver, times(1)).nonApplicationWrite(); + verify(rawChannel, times(1)).write(sslDriver.getNetworkWriteBuffer()); + } + + public void testQueuedWriteIsFlushedInFlushCall() throws Exception { + ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(writeOperation.getListener()).thenReturn(listener); + when(sslDriver.hasFlushPending()).thenReturn(false, false, false, false); + when(sslDriver.readyForApplicationWrites()).thenReturn(true); + when(sslDriver.applicationWrite(buffers)).thenReturn(10); + when(writeOperation.isFullyFlushed()).thenReturn(false,true); + context.flushChannel(); + + verify(writeOperation).incrementIndex(10); + verify(rawChannel, times(1)).write(sslDriver.getNetworkWriteBuffer()); + verify(selector).executeListener(listener, null); + assertFalse(context.hasQueuedWriteOps()); + } + + public void testPartialFlush() throws IOException { + ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(writeOperation.getListener()).thenReturn(listener); + when(sslDriver.hasFlushPending()).thenReturn(false, false, true); + when(sslDriver.readyForApplicationWrites()).thenReturn(true); + when(sslDriver.applicationWrite(buffers)).thenReturn(5); + when(writeOperation.isFullyFlushed()).thenReturn(false, false); + context.flushChannel(); + + verify(writeOperation).incrementIndex(5); + verify(rawChannel, times(1)).write(sslDriver.getNetworkWriteBuffer()); + verify(selector, times(0)).executeListener(listener, null); + assertTrue(context.hasQueuedWriteOps()); + } + + @SuppressWarnings("unchecked") + public void testMultipleWritesPartialFlushes() throws IOException { + BiConsumer listener2 = mock(BiConsumer.class); + ByteBuffer[] buffers1 = {ByteBuffer.allocate(10)}; + ByteBuffer[] buffers2 = {ByteBuffer.allocate(5)}; + BytesWriteOperation writeOperation1 = mock(BytesWriteOperation.class); + BytesWriteOperation writeOperation2 = mock(BytesWriteOperation.class); + when(writeOperation1.getBuffersToWrite()).thenReturn(buffers1); + when(writeOperation2.getBuffersToWrite()).thenReturn(buffers2); + when(writeOperation1.getListener()).thenReturn(listener); + when(writeOperation2.getListener()).thenReturn(listener2); + context.queueWriteOperation(writeOperation1); + context.queueWriteOperation(writeOperation2); + + when(sslDriver.hasFlushPending()).thenReturn(false, false, false, false, false, true); + when(sslDriver.readyForApplicationWrites()).thenReturn(true); + when(sslDriver.applicationWrite(buffers1)).thenReturn(5, 5); + when(sslDriver.applicationWrite(buffers2)).thenReturn(3); + when(writeOperation1.isFullyFlushed()).thenReturn(false, false, true); + when(writeOperation2.isFullyFlushed()).thenReturn(false); + context.flushChannel(); + + verify(writeOperation1, times(2)).incrementIndex(5); + verify(rawChannel, times(3)).write(sslDriver.getNetworkWriteBuffer()); + verify(selector).executeListener(listener, null); + verify(selector, times(0)).executeListener(listener2, null); + assertTrue(context.hasQueuedWriteOps()); + } + + public void testWhenIOExceptionThrownListenerIsCalled() throws IOException { + ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; + BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); + context.queueWriteOperation(writeOperation); + + IOException exception = new IOException(); + when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(writeOperation.getListener()).thenReturn(listener); + when(sslDriver.hasFlushPending()).thenReturn(false, false); + when(sslDriver.readyForApplicationWrites()).thenReturn(true); + when(sslDriver.applicationWrite(buffers)).thenReturn(5); + when(rawChannel.write(sslDriver.getNetworkWriteBuffer())).thenThrow(exception); + when(writeOperation.isFullyFlushed()).thenReturn(false); + expectThrows(IOException.class, () -> context.flushChannel()); + + verify(writeOperation).incrementIndex(5); + verify(selector).executeFailedListener(listener, exception); + assertFalse(context.hasQueuedWriteOps()); + } + + public void testWriteIOExceptionMeansChannelReadyToClose() throws Exception { + when(sslDriver.hasFlushPending()).thenReturn(true); + when(sslDriver.needsNonApplicationWrite()).thenReturn(true); + when(sslDriver.readyForApplicationWrites()).thenReturn(false); + when(rawChannel.write(sslDriver.getNetworkWriteBuffer())).thenThrow(new IOException()); + + assertFalse(context.selectorShouldClose()); + expectThrows(IOException.class, () -> context.flushChannel()); + assertTrue(context.selectorShouldClose()); + } + + public void testReadyToCloseIfDriverIndicateClosed() { + when(sslDriver.isClosed()).thenReturn(false, true); + assertFalse(context.selectorShouldClose()); + assertTrue(context.selectorShouldClose()); + } + + public void testInitiateCloseFromDifferentThreadSchedulesCloseNotify() { + when(selector.isOnCurrentThread()).thenReturn(false, true); + context.closeChannel(); + + ArgumentCaptor captor = ArgumentCaptor.forClass(WriteOperation.class); + verify(selector).queueWrite(captor.capture()); + + context.queueWriteOperation(captor.getValue()); + verify(sslDriver).initiateClose(); + } + + public void testInitiateCloseFromSameThreadSchedulesCloseNotify() { + context.closeChannel(); + + ArgumentCaptor captor = ArgumentCaptor.forClass(WriteOperation.class); + verify(selector).queueWriteInChannelBuffer(captor.capture()); + + context.queueWriteOperation(captor.getValue()); + verify(sslDriver).initiateClose(); + } + + @SuppressWarnings("unchecked") + public void testRegisterInitiatesDriver() throws IOException { + try (Selector realSelector = Selector.open(); + SocketChannel realSocket = SocketChannel.open()) { + realSocket.configureBlocking(false); + when(selector.rawSelector()).thenReturn(realSelector); + when(channel.getRawChannel()).thenReturn(realSocket); + context = new SSLChannelContext(channel, selector, exceptionHandler, sslDriver, readConsumer, channelBuffer); + context.register(); + verify(sslDriver).init(); + } + } + + private Answer getAnswerForBytes(byte[] bytes) { + return invocationOnMock -> { + InboundChannelBuffer buffer = (InboundChannelBuffer) invocationOnMock.getArguments()[0]; + buffer.ensureCapacity(buffer.getIndex() + bytes.length); + ByteBuffer[] buffers = buffer.sliceBuffersFrom(buffer.getIndex()); + assert buffers[0].remaining() > bytes.length; + buffers[0].put(bytes); + buffer.incrementIndex(bytes.length); + return bytes.length; + }; + } + + private static byte[] createMessage(int length) { + byte[] bytes = new byte[length]; + for (int i = 0; i < length; ++i) { + bytes[i] = randomByte(); + } + return bytes; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLDriverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLDriverTests.java new file mode 100644 index 0000000000000..b98e4e0ce5735 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLDriverTests.java @@ -0,0 +1,348 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.nio; + +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.test.ESTestCase; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; +import javax.net.ssl.TrustManagerFactory; +import java.io.IOException; +import java.io.InputStream; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.security.KeyStore; +import java.security.SecureRandom; +import java.util.Arrays; +import java.util.function.Supplier; + +public class SSLDriverTests extends ESTestCase { + + private final Supplier pageSupplier = + () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), () -> {}); + private InboundChannelBuffer serverBuffer = new InboundChannelBuffer(pageSupplier); + private InboundChannelBuffer clientBuffer = new InboundChannelBuffer(pageSupplier); + private InboundChannelBuffer genericBuffer = new InboundChannelBuffer(pageSupplier); + + public void testPingPongAndClose() throws Exception { + SSLContext sslContext = getSSLContext(); + + SSLDriver clientDriver = getDriver(sslContext.createSSLEngine(), true); + SSLDriver serverDriver = getDriver(sslContext.createSSLEngine(), false); + + handshake(clientDriver, serverDriver); + + ByteBuffer[] buffers = {ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8))}; + sendAppData(clientDriver, serverDriver, buffers); + serverDriver.read(serverBuffer); + assertEquals(ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8)), serverBuffer.sliceBuffersTo(4)[0]); + + ByteBuffer[] buffers2 = {ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8))}; + sendAppData(serverDriver, clientDriver, buffers2); + clientDriver.read(clientBuffer); + assertEquals(ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8)), clientBuffer.sliceBuffersTo(4)[0]); + + assertFalse(clientDriver.needsNonApplicationWrite()); + normalClose(clientDriver, serverDriver); + } + + public void testRenegotiate() throws Exception { + SSLContext sslContext = getSSLContext(); + + SSLDriver clientDriver = getDriver(sslContext.createSSLEngine(), true); + SSLDriver serverDriver = getDriver(sslContext.createSSLEngine(), false); + + handshake(clientDriver, serverDriver); + + ByteBuffer[] buffers = {ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8))}; + sendAppData(clientDriver, serverDriver, buffers); + serverDriver.read(serverBuffer); + assertEquals(ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8)), serverBuffer.sliceBuffersTo(4)[0]); + + clientDriver.renegotiate(); + assertTrue(clientDriver.isHandshaking()); + assertFalse(clientDriver.readyForApplicationWrites()); + + // This tests that the client driver can still receive data based on the prior handshake + ByteBuffer[] buffers2 = {ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8))}; + sendAppData(serverDriver, clientDriver, buffers2); + clientDriver.read(clientBuffer); + assertEquals(ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8)), clientBuffer.sliceBuffersTo(4)[0]); + + handshake(clientDriver, serverDriver, true); + sendAppData(clientDriver, serverDriver, buffers); + serverDriver.read(serverBuffer); + assertEquals(ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8)), serverBuffer.sliceBuffersTo(4)[0]); + sendAppData(serverDriver, clientDriver, buffers2); + clientDriver.read(clientBuffer); + assertEquals(ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8)), clientBuffer.sliceBuffersTo(4)[0]); + + normalClose(clientDriver, serverDriver); + } + + public void testBigAppData() throws Exception { + SSLContext sslContext = getSSLContext(); + + SSLDriver clientDriver = getDriver(sslContext.createSSLEngine(), true); + SSLDriver serverDriver = getDriver(sslContext.createSSLEngine(), false); + + handshake(clientDriver, serverDriver); + + ByteBuffer buffer = ByteBuffer.allocate(1 << 15); + for (int i = 0; i < (1 << 15); ++i) { + buffer.put((byte) i); + } + ByteBuffer[] buffers = {buffer}; + sendAppData(clientDriver, serverDriver, buffers); + serverDriver.read(serverBuffer); + assertEquals(16384, serverBuffer.sliceBuffersFrom(0)[0].limit()); + assertEquals(16384, serverBuffer.sliceBuffersFrom(0)[1].limit()); + + ByteBuffer[] buffers2 = {ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8))}; + sendAppData(serverDriver, clientDriver, buffers2); + clientDriver.read(clientBuffer); + assertEquals(ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8)), clientBuffer.sliceBuffersTo(4)[0]); + + assertFalse(clientDriver.needsNonApplicationWrite()); + normalClose(clientDriver, serverDriver); + } + + public void testHandshakeFailureBecauseProtocolMismatch() throws Exception { + SSLContext sslContext = getSSLContext(); + SSLEngine clientEngine = sslContext.createSSLEngine(); + SSLEngine serverEngine = sslContext.createSSLEngine(); + String[] serverProtocols = {"TLSv1.1", "TLSv1.2"}; + serverEngine.setEnabledProtocols(serverProtocols); + String[] clientProtocols = {"TLSv1"}; + clientEngine.setEnabledProtocols(clientProtocols); + SSLDriver clientDriver = getDriver(clientEngine, true); + SSLDriver serverDriver = getDriver(serverEngine, false); + + SSLException sslException = expectThrows(SSLException.class, () -> handshake(clientDriver, serverDriver)); + assertEquals("Client requested protocol TLSv1 not enabled or not supported", sslException.getMessage()); + failedCloseAlert(serverDriver, clientDriver); + } + + public void testHandshakeFailureBecauseNoCiphers() throws Exception { + SSLContext sslContext = getSSLContext(); + SSLEngine clientEngine = sslContext.createSSLEngine(); + SSLEngine serverEngine = sslContext.createSSLEngine(); + String[] enabledCipherSuites = clientEngine.getEnabledCipherSuites(); + int midpoint = enabledCipherSuites.length / 2; + String[] serverCiphers = Arrays.copyOfRange(enabledCipherSuites, 0, midpoint); + serverEngine.setEnabledCipherSuites(serverCiphers); + String[] clientCiphers = Arrays.copyOfRange(enabledCipherSuites, midpoint, enabledCipherSuites.length - 1); + clientEngine.setEnabledCipherSuites(clientCiphers); + SSLDriver clientDriver = getDriver(clientEngine, true); + SSLDriver serverDriver = getDriver(serverEngine, false); + + SSLException sslException = expectThrows(SSLException.class, () -> handshake(clientDriver, serverDriver)); + assertEquals("no cipher suites in common", sslException.getMessage()); + failedCloseAlert(serverDriver, clientDriver); + } + + public void testCloseDuringHandshake() throws Exception { + SSLContext sslContext = getSSLContext(); + SSLDriver clientDriver = getDriver(sslContext.createSSLEngine(), true); + SSLDriver serverDriver = getDriver(sslContext.createSSLEngine(), false); + + clientDriver.init(); + serverDriver.init(); + + assertTrue(clientDriver.needsNonApplicationWrite()); + assertFalse(serverDriver.needsNonApplicationWrite()); + sendHandshakeMessages(clientDriver, serverDriver); + sendHandshakeMessages(serverDriver, clientDriver); + + sendData(clientDriver, serverDriver); + + assertTrue(clientDriver.isHandshaking()); + assertTrue(serverDriver.isHandshaking()); + + assertFalse(serverDriver.needsNonApplicationWrite()); + serverDriver.initiateClose(); + assertTrue(serverDriver.needsNonApplicationWrite()); + assertFalse(serverDriver.isClosed()); + sendNeededWrites(serverDriver, clientDriver); + // We are immediately fully closed due to SSLEngine inconsistency + assertTrue(serverDriver.isClosed()); + // This should not throw exception yet as the SSLEngine will not UNWRAP data while attempting to WRAP + clientDriver.read(clientBuffer); + sendNeededWrites(clientDriver, serverDriver); + SSLException sslException = expectThrows(SSLException.class, () -> clientDriver.read(clientBuffer)); + assertEquals("Received close_notify during handshake", sslException.getMessage()); + assertTrue(clientDriver.needsNonApplicationWrite()); + sendNeededWrites(clientDriver, serverDriver); + serverDriver.read(serverBuffer); + assertTrue(clientDriver.isClosed()); + } + + private void failedCloseAlert(SSLDriver sendDriver, SSLDriver receiveDriver) throws SSLException { + assertTrue(sendDriver.needsNonApplicationWrite()); + assertFalse(sendDriver.isClosed()); + + sendNeededWrites(sendDriver, receiveDriver); + assertTrue(sendDriver.isClosed()); + sendDriver.close(); + + SSLException sslException = expectThrows(SSLException.class, () -> receiveDriver.read(genericBuffer)); + assertEquals("Received fatal alert: handshake_failure", sslException.getMessage()); + if (receiveDriver.needsNonApplicationWrite() == false) { + assertTrue(receiveDriver.isClosed()); + receiveDriver.close(); + } else { + assertFalse(receiveDriver.isClosed()); + expectThrows(SSLException.class, receiveDriver::close); + } + } + + private SSLContext getSSLContext() throws Exception { + String relativePath = "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks"; + SSLContext sslContext; + try (InputStream in = Files.newInputStream(getDataPath(relativePath))) { + KeyStore keyStore = KeyStore.getInstance("jks"); + keyStore.load(in, "testclient".toCharArray()); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(keyStore); + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(keyStore, "testclient".toCharArray()); + sslContext = SSLContext.getInstance("TLSv1.2"); + sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); + return sslContext; + } + } + + private void normalClose(SSLDriver sendDriver, SSLDriver receiveDriver) throws IOException { + sendDriver.initiateClose(); + assertFalse(sendDriver.readyForApplicationWrites()); + assertTrue(sendDriver.needsNonApplicationWrite()); + sendNeededWrites(sendDriver, receiveDriver); + assertFalse(sendDriver.isClosed()); + + receiveDriver.read(genericBuffer); + assertFalse(receiveDriver.isClosed()); + + assertFalse(receiveDriver.readyForApplicationWrites()); + assertTrue(receiveDriver.needsNonApplicationWrite()); + sendNeededWrites(receiveDriver, sendDriver); + assertTrue(receiveDriver.isClosed()); + + sendDriver.read(genericBuffer); + assertTrue(sendDriver.isClosed()); + + sendDriver.close(); + receiveDriver.close(); + } + + private void sendNeededWrites(SSLDriver sendDriver, SSLDriver receiveDriver) throws SSLException { + while (sendDriver.needsNonApplicationWrite() || sendDriver.hasFlushPending()) { + if (sendDriver.hasFlushPending() == false) { + sendDriver.nonApplicationWrite(); + } + if (sendDriver.hasFlushPending()) { + sendData(sendDriver, receiveDriver, true); + } + } + } + + private void handshake(SSLDriver clientDriver, SSLDriver serverDriver) throws IOException { + handshake(clientDriver, serverDriver, false); + } + + private void handshake(SSLDriver clientDriver, SSLDriver serverDriver, boolean isRenegotiation) throws IOException { + if (isRenegotiation == false) { + clientDriver.init(); + serverDriver.init(); + } + + assertTrue(clientDriver.needsNonApplicationWrite() || clientDriver.hasFlushPending()); + assertFalse(serverDriver.needsNonApplicationWrite()); + sendHandshakeMessages(clientDriver, serverDriver); + + assertTrue(clientDriver.isHandshaking()); + assertTrue(serverDriver.isHandshaking()); + + sendHandshakeMessages(serverDriver, clientDriver); + + assertTrue(clientDriver.isHandshaking()); + assertTrue(serverDriver.isHandshaking()); + + sendHandshakeMessages(clientDriver, serverDriver); + + assertTrue(clientDriver.isHandshaking()); + assertTrue(serverDriver.isHandshaking()); + + sendHandshakeMessages(serverDriver, clientDriver); + + assertFalse(clientDriver.isHandshaking()); + assertFalse(serverDriver.isHandshaking()); + } + + private void sendHandshakeMessages(SSLDriver sendDriver, SSLDriver receiveDriver) throws IOException { + assertTrue(sendDriver.needsNonApplicationWrite() || sendDriver.hasFlushPending()); + + while (sendDriver.needsNonApplicationWrite() || sendDriver.hasFlushPending()) { + assertFalse(receiveDriver.needsNonApplicationWrite()); + if (sendDriver.hasFlushPending() == false) { + sendDriver.nonApplicationWrite(); + } + if (sendDriver.isHandshaking()) { + assertTrue(sendDriver.hasFlushPending()); + sendData(sendDriver, receiveDriver); + assertFalse(sendDriver.hasFlushPending()); + receiveDriver.read(genericBuffer); + } + } + if (receiveDriver.isHandshaking()) { + assertTrue(receiveDriver.needsNonApplicationWrite() || receiveDriver.hasFlushPending()); + } + } + + private void sendAppData(SSLDriver sendDriver, SSLDriver receiveDriver, ByteBuffer[] message) throws IOException { + + assertFalse(sendDriver.needsNonApplicationWrite()); + + int bytesToEncrypt = Arrays.stream(message).mapToInt(Buffer::remaining).sum(); + + int bytesEncrypted = 0; + while (bytesToEncrypt > bytesEncrypted) { + bytesEncrypted += sendDriver.applicationWrite(message); + sendData(sendDriver, receiveDriver); + } + } + + private void sendData(SSLDriver sendDriver, SSLDriver receiveDriver) { + sendData(sendDriver, receiveDriver, randomBoolean()); + } + + private void sendData(SSLDriver sendDriver, SSLDriver receiveDriver, boolean partial) { + ByteBuffer writeBuffer = sendDriver.getNetworkWriteBuffer(); + ByteBuffer readBuffer = receiveDriver.getNetworkReadBuffer(); + if (partial) { + int initialLimit = writeBuffer.limit(); + int bytesToWrite = writeBuffer.remaining() / (randomInt(2) + 2); + writeBuffer.limit(writeBuffer.position() + bytesToWrite); + readBuffer.put(writeBuffer); + writeBuffer.limit(initialLimit); + assertTrue(sendDriver.hasFlushPending()); + readBuffer.put(writeBuffer); + assertFalse(sendDriver.hasFlushPending()); + + } else { + readBuffer.put(writeBuffer); + assertFalse(sendDriver.hasFlushPending()); + } + } + + private SSLDriver getDriver(SSLEngine engine, boolean isClient) { + return new SSLDriver(engine, isClient); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java new file mode 100644 index 0000000000000..0a7ee13b9e296 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SimpleSecurityNioTransportTests.java @@ -0,0 +1,218 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.nio; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.AbstractSimpleTransportTestCase; +import org.elasticsearch.transport.BindTransportException; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.TcpChannel; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.core.ssl.SSLService; + +import javax.net.SocketFactory; +import javax.net.ssl.HandshakeCompletedListener; +import javax.net.ssl.SSLSocket; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.SocketTimeoutException; +import java.net.UnknownHostException; +import java.nio.file.Path; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; + +public class SimpleSecurityNioTransportTests extends AbstractSimpleTransportTestCase { + + private SSLService createSSLService() { + Path testnodeStore = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks"); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.ssl.keystore.secure_password", "testnode"); + Settings settings = Settings.builder() + .put("xpack.security.transport.ssl.enabled", true) + .put("xpack.ssl.keystore.path", testnodeStore) + .setSecureSettings(secureSettings) + .put("path.home", createTempDir()) + .build(); + try { + return new SSLService(settings, TestEnvironment.newEnvironment(settings)); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public MockTransportService nioFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, + ClusterSettings clusterSettings, boolean doHandshake) { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + NetworkService networkService = new NetworkService(Collections.emptyList()); + Settings settings1 = Settings.builder() + .put(settings) + .put("xpack.security.transport.ssl.enabled", true).build(); + Transport transport = new SecurityNioTransport(settings1, threadPool, + networkService, BigArrays.NON_RECYCLING_INSTANCE, new MockPageCacheRecycler(settings), namedWriteableRegistry, + new NoneCircuitBreakerService(), createSSLService()) { + + @Override + protected Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) throws IOException, + InterruptedException { + if (doHandshake) { + return super.executeHandshake(node, channel, timeout); + } else { + return version.minimumCompatibilityVersion(); + } + } + + @Override + protected Version getCurrentVersion() { + return version; + } + + }; + MockTransportService mockTransportService = + MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings, + Collections.emptySet()); + mockTransportService.start(); + return mockTransportService; + } + + @Override + protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) { + settings = Settings.builder().put(settings) + .put(TcpTransport.PORT.getKey(), "0") + .build(); + MockTransportService transportService = nioFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake); + transportService.start(); + return transportService; + } + + @Override + protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException { + @SuppressWarnings("unchecked") + TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection; + TcpChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); + } + + public void testConnectException() throws UnknownHostException { + try { + serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), + emptyMap(), emptySet(), Version.CURRENT)); + fail("Expected ConnectTransportException"); + } catch (ConnectTransportException e) { + assertThat(e.getMessage(), containsString("connect_exception")); + assertThat(e.getMessage(), containsString("[127.0.0.1:9876]")); + Throwable cause = e.getCause(); + assertThat(cause, instanceOf(IOException.class)); + } + } + + public void testBindUnavailableAddress() { + // this is on a lower level since it needs access to the TransportService before it's started + int port = serviceA.boundAddress().publishAddress().getPort(); + Settings settings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "foobar") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") + .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .put("transport.tcp.port", port) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { + MockTransportService transportService = nioFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings, true); + try { + transportService.start(); + } finally { + transportService.stop(); + transportService.close(); + } + }); + assertEquals("Failed to bind to [" + port + "]", bindTransportException.getMessage()); + } + + @SuppressForbidden(reason = "Need to open socket connection") + public void testRenegotiation() throws Exception { + SSLService sslService = createSSLService(); + SocketFactory factory = sslService.sslSocketFactory(Settings.EMPTY); + try (SSLSocket socket = (SSLSocket) factory.createSocket()) { + SocketAccess.doPrivileged(() -> socket.connect(serviceA.boundAddress().publishAddress().address())); + + CountDownLatch handshakeLatch = new CountDownLatch(1); + HandshakeCompletedListener firstListener = event -> handshakeLatch.countDown(); + socket.addHandshakeCompletedListener(firstListener); + socket.startHandshake(); + handshakeLatch.await(); + socket.removeHandshakeCompletedListener(firstListener); + + OutputStreamStreamOutput stream = new OutputStreamStreamOutput(socket.getOutputStream()); + stream.writeByte((byte) 'E'); + stream.writeByte((byte) 'S'); + stream.writeInt(-1); + stream.flush(); + + socket.startHandshake(); + CountDownLatch renegotiationLatch = new CountDownLatch(1); + HandshakeCompletedListener secondListener = event -> renegotiationLatch.countDown(); + socket.addHandshakeCompletedListener(secondListener); + + AtomicReference error = new AtomicReference<>(); + CountDownLatch catchReadErrorsLatch = new CountDownLatch(1); + Thread renegotiationThread = new Thread(() -> { + try { + socket.setSoTimeout(50); + socket.getInputStream().read(); + } catch (SocketTimeoutException e) { + // Ignore. We expect a timeout. + } catch (IOException e) { + error.set(e); + } finally { + catchReadErrorsLatch.countDown(); + } + }); + renegotiationThread.start(); + renegotiationLatch.await(); + socket.removeHandshakeCompletedListener(secondListener); + catchReadErrorsLatch.await(); + + assertNull(error.get()); + + stream.writeByte((byte) 'E'); + stream.writeByte((byte)'S'); + stream.writeInt(-1); + stream.flush(); + } + } + + // TODO: These tests currently rely on plaintext transports + + @Override + @AwaitsFix(bugUrl = "") + public void testTcpHandshake() throws IOException, InterruptedException { + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java new file mode 100644 index 0000000000000..2fa376ec85408 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.ssl; + +import com.unboundid.util.ssl.TrustAllTrustManager; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.ssl.CertUtils; +import org.junit.BeforeClass; + +import javax.net.ssl.HandshakeCompletedEvent; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocket; +import javax.net.ssl.SSLSocketFactory; +import javax.net.ssl.TrustManager; +import javax.net.ssl.X509ExtendedKeyManager; + +import java.io.Reader; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivateKey; +import java.security.PrivilegedExceptionAction; +import java.security.SecureRandom; +import java.security.cert.Certificate; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; + +public class EllipticCurveSSLTests extends SecurityIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + final Path keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-key.pem"); + final Path certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-cert.pem"); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal).filter(s -> s.startsWith("xpack.ssl") == false)) + .put("xpack.ssl.key", keyPath) + .put("xpack.ssl.certificate", certPath) + .put("xpack.ssl.certificate_authorities", certPath) + .put("xpack.ssl.verification_mode", "certificate") // disable hostname verificate since these certs aren't setup for that + .build(); + } + + @Override + protected Settings transportClientSettings() { + final Path keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-key.pem"); + final Path certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-cert.pem"); + return Settings.builder() + .put(super.transportClientSettings().filter(s -> s.startsWith("xpack.ssl") == false)) + .put("xpack.ssl.key", keyPath) + .put("xpack.ssl.certificate", certPath) + .put("xpack.ssl.certificate_authorities", certPath) + .put("xpack.ssl.verification_mode", "certificate") // disable hostname verification since these certs aren't setup for that + .build(); + } + + @Override + protected boolean transportSSLEnabled() { + return true; + } + + public void testConnection() throws Exception { + final Path keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-key.pem"); + final Path certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-cert.pem"); + PrivateKey privateKey; + try (Reader reader = Files.newBufferedReader(keyPath)) { + privateKey = CertUtils.readPrivateKey(reader, () -> null); + } + Certificate[] certs = CertUtils.readCertificates(Collections.singletonList(certPath.toString()), null); + X509ExtendedKeyManager x509ExtendedKeyManager = CertUtils.keyManager(certs, privateKey, new char[0]); + SSLContext sslContext = SSLContext.getInstance("TLS"); + sslContext.init(new X509ExtendedKeyManager[] { x509ExtendedKeyManager }, + new TrustManager[] { new TrustAllTrustManager(false) }, new SecureRandom()); + SSLSocketFactory socketFactory = sslContext.getSocketFactory(); + NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().setTransport(true).get(); + TransportAddress address = randomFrom(response.getNodes()).getTransport().getAddress().publishAddress(); + + final CountDownLatch latch = new CountDownLatch(1); + try (SSLSocket sslSocket = AccessController.doPrivileged(new PrivilegedExceptionAction() { + @Override + public SSLSocket run() throws Exception { + return (SSLSocket) socketFactory.createSocket(address.address().getAddress(), address.address().getPort()); + }})) { + final AtomicReference reference = new AtomicReference<>(); + sslSocket.addHandshakeCompletedListener((event) -> { + reference.set(event); + latch.countDown(); + }); + sslSocket.startHandshake(); + latch.await(); + + HandshakeCompletedEvent event = reference.get(); + assertNotNull(event); + SSLSession session = event.getSession(); + Certificate[] peerChain = session.getPeerCertificates(); + assertEquals(1, peerChain.length); + assertEquals(certs[0], peerChain[0]); + assertThat(session.getCipherSuite(), containsString("ECDSA")); + } + } + + @BeforeClass + public static void assumeECDSACiphersSupported() throws Exception { + SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); + sslContext.init(null, null, null); + SSLEngine sslEngine = sslContext.createSSLEngine(); + assumeTrue("ECDSA ciphers must be supported for this test to run. Enabled ciphers: " + + Arrays.toString(sslEngine.getEnabledCipherSuites()) + ", supported ciphers: " + + Arrays.toString(sslEngine.getSupportedCipherSuites()), + Arrays.stream(sslEngine.getEnabledCipherSuites()).anyMatch(s -> s.contains("ECDSA"))); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java new file mode 100644 index 0000000000000..37f13806c2388 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslIntegrationTests.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.ssl; + +import org.apache.http.auth.AuthScope; +import org.apache.http.auth.UsernamePasswordCredentials; +import org.apache.http.client.CredentialsProvider; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.impl.client.BasicCredentialsProvider; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.core.TestXPackTransportClient; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.LocalStateSecurity; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.TrustManagerFactory; + +import java.io.InputStreamReader; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.security.KeyStore; +import java.security.SecureRandom; +import java.util.Locale; + +import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForStore; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; + +public class SslIntegrationTests extends SecurityIntegTestCase { + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .put("xpack.security.http.ssl.enabled", true).build(); + } + + @Override + protected boolean transportSSLEnabled() { + return true; + } + + // no SSL exception as this is the exception is returned when connecting + public void testThatUnconfiguredCiphersAreRejected() { + try (TransportClient transportClient = new TestXPackTransportClient(Settings.builder() + .put(transportClientSettings()) + .put("node.name", "programmatic_transport_client") + .put("cluster.name", internalCluster().getClusterName()) + .putList("xpack.ssl.cipher_suites", "TLS_ECDH_anon_WITH_RC4_128_SHA", "SSL_RSA_WITH_3DES_EDE_CBC_SHA") + .build(), LocalStateSecurity.class)) { + + TransportAddress transportAddress = randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses()); + transportClient.addTransportAddress(transportAddress); + + transportClient.admin().cluster().prepareHealth().get(); + fail("Expected NoNodeAvailableException"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#")); + } + } + + // no SSL exception as this is the exception is returned when connecting + public void testThatTransportClientUsingSSLv3ProtocolIsRejected() { + try (TransportClient transportClient = new TestXPackTransportClient(Settings.builder() + .put(transportClientSettings()) + .put("node.name", "programmatic_transport_client") + .put("cluster.name", internalCluster().getClusterName()) + .putList("xpack.ssl.supported_protocols", new String[]{"SSLv3"}) + .build(), LocalStateSecurity.class)) { + + TransportAddress transportAddress = randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses()); + transportClient.addTransportAddress(transportAddress); + + transportClient.admin().cluster().prepareHealth().get(); + fail("Expected NoNodeAvailableException"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#")); + } + } + + public void testThatConnectionToHTTPWorks() throws Exception { + Settings.Builder builder = Settings.builder(); + addSSLSettingsForStore(builder, "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks", "testclient"); + SSLService service = new SSLService(builder.build(), null); + + CredentialsProvider provider = new BasicCredentialsProvider(); + provider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(nodeClientUsername(), + new String(nodeClientPassword().getChars()))); + try (CloseableHttpClient client = HttpClients.custom() + .setSSLSocketFactory(new SSLConnectionSocketFactory(service.sslSocketFactory(Settings.EMPTY), + SSLConnectionSocketFactory.getDefaultHostnameVerifier())) + .setDefaultCredentialsProvider(provider).build(); + CloseableHttpResponse response = SocketAccess.doPrivileged(() -> client.execute(new HttpGet(getNodeUrl())))) { + assertThat(response.getStatusLine().getStatusCode(), is(200)); + String data = Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)); + assertThat(data, containsString("You Know, for Search")); + } + } + + public void testThatHttpUsingSSLv3IsRejected() throws Exception { + SSLContext sslContext = SSLContext.getInstance("SSL"); + TrustManagerFactory factory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + factory.init((KeyStore) null); + + sslContext.init(null, factory.getTrustManagers(), new SecureRandom()); + SSLConnectionSocketFactory sf = new SSLConnectionSocketFactory(sslContext, new String[]{ "SSLv3" }, null, + NoopHostnameVerifier.INSTANCE); + try (CloseableHttpClient client = HttpClients.custom().setSSLSocketFactory(sf).build()) { + CloseableHttpResponse result = SocketAccess.doPrivileged(() -> client.execute(new HttpGet(getNodeUrl()))); + fail("Expected a connection error due to SSLv3 not being supported by default"); + } catch (Exception e) { + assertThat(e, is(instanceOf(SSLHandshakeException.class))); + } + } + + private String getNodeUrl() { + TransportAddress transportAddress = + randomFrom(internalCluster().getInstance(HttpServerTransport.class).boundAddress().boundAddresses()); + final InetSocketAddress inetSocketAddress = transportAddress.address(); + return String.format(Locale.ROOT, "https://%s/", NetworkAddress.format(inetSocketAddress)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java new file mode 100644 index 0000000000000..1d7ec67762ba0 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java @@ -0,0 +1,400 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.ssl; + +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.core.TestXPackTransportClient; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.ssl.SSLClientAuth; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.junit.BeforeClass; + +import java.net.InetAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; + +import static org.elasticsearch.test.SecuritySettingsSource.TEST_USER_NAME; +import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForStore; +import static org.elasticsearch.test.SecuritySettingsSourceField.TEST_PASSWORD; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.containsString; + +public class SslMultiPortTests extends SecurityIntegTestCase { + + private static int randomClientPort; + private static int randomNoClientAuthPort; + + @BeforeClass + public static void getRandomPort() { + randomClientPort = randomIntBetween(49000, 65500); // ephemeral port + randomNoClientAuthPort = randomIntBetween(49000, 65500); + } + + /** + * On each node sets up the following profiles: + *

    + *
  • default: testnode keystore. Requires client auth
  • + *
  • client: testnode-client-profile keystore that only trusts the testclient cert. Requires client auth
  • + *
  • no_client_auth: testnode keystore. Does not require client auth
  • + *
+ */ + @Override + protected Settings nodeSettings(int nodeOrdinal) { + String randomClientPortRange = randomClientPort + "-" + (randomClientPort+100); + String randomNoClientAuthPortRange = randomNoClientAuthPort + "-" + (randomNoClientAuthPort+100); + + Path store; + try { + store = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks"); + assertThat(Files.exists(store), is(true)); + } catch (Exception e) { + throw new RuntimeException(e); + } + + Settings settings = Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + // client set up here + .put("transport.profiles.client.port", randomClientPortRange) + // make sure this is "localhost", no matter if ipv4 or ipv6, but be consistent + .put("transport.profiles.client.bind_host", "localhost") + .put("transport.profiles.client.xpack.security.ssl.truststore.path", store.toAbsolutePath()) + .put("transport.profiles.client.xpack.security.ssl.truststore.password", "testnode-client-profile") + .put("transport.profiles.no_client_auth.port", randomNoClientAuthPortRange) + .put("transport.profiles.no_client_auth.bind_host", "localhost") + .put("transport.profiles.no_client_auth.xpack.security.ssl.client_authentication", SSLClientAuth.NONE) + .build(); + logger.info("node {} settings:\n{}", nodeOrdinal, settings); + return settings; + } + + @Override + protected boolean transportSSLEnabled() { + return true; + } + + private TransportClient createTransportClient(Settings additionalSettings) { + Settings settings = Settings.builder() + .put(transportClientSettings().filter(s -> s.startsWith("xpack.ssl") == false)) + .put("node.name", "programmatic_transport_client") + .put("cluster.name", internalCluster().getClusterName()) + .put("xpack.security.transport.ssl.enabled", true) + .put(additionalSettings) + .build(); + //return new TestXPackTransportClient(settings, LocalStateSecurity.class); + logger.info("transport client settings:\n{}", settings); + return new TestXPackTransportClient(settings, LocalStateSecurity.class); + } + + /** + * Uses the internal cluster's transport client to test connection to the default profile. The internal transport + * client uses the same SSL settings as the default profile so a connection should always succeed + */ + public void testThatStandardTransportClientCanConnectToDefaultProfile() throws Exception { + assertGreenClusterState(internalCluster().transportClient()); + } + + /** + * Uses a transport client with the same settings as the internal cluster transport client to test connection to the + * no_client_auth profile. The internal transport client is not used here since we are connecting to a different + * profile. Since the no_client_auth profile does not require client authentication, the standard transport client + * connection should always succeed as the settings are the same as the default profile except for the port and + * disabling the client auth requirement + */ + public void testThatStandardTransportClientCanConnectToNoClientAuthProfile() throws Exception { + try(TransportClient transportClient = new TestXPackTransportClient(Settings.builder() + .put(transportClientSettings()) + .put("xpack.security.transport.ssl.enabled", true) + .put("node.name", "programmatic_transport_client") + .put("cluster.name", internalCluster().getClusterName()) + .build(), LocalStateSecurity.class)) { + transportClient.addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), + getProfilePort("no_client_auth"))); + assertGreenClusterState(transportClient); + } + } + + /** + * Uses a transport client with the same settings as the internal cluster transport client to test connection to the + * client profile. The internal transport client is not used here since we are connecting to a different + * profile. The client profile requires client auth and only trusts the certificate in the testclient-client-profile + * keystore so this connection will fail as the certificate presented by the standard transport client is not trusted + * by this profile + */ + public void testThatStandardTransportClientCannotConnectToClientProfile() throws Exception { + try (TransportClient transportClient = createTransportClient(Settings.EMPTY)) { + transportClient.addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), getProfilePort("client"))); + transportClient.admin().cluster().prepareHealth().get(); + fail("Expected NoNodeAvailableException"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); + } + } + + /** + * Uses a transport client with a custom keystore; this keystore testclient-client-profile.jks trusts the testnode + * certificate and had its own self signed certificate. This test connects to the client profile, which is only + * set to trust the testclient-client-profile certificate so the connection should always succeed + */ + public void testThatProfileTransportClientCanConnectToClientProfile() throws Exception { + Settings.Builder builder = Settings.builder(); + addSSLSettingsForStore(builder, + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.jks", + "testclient-client-profile"); + try (TransportClient transportClient = createTransportClient(builder.build())) { + transportClient.addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), getProfilePort("client"))); + assertGreenClusterState(transportClient); + } + } + + /** + * Uses a transport client with a custom keystore; this keystore testclient-client-profile.jks trusts the testnode + * certificate and had its own self signed certificate. This test connects to the no_client_auth profile, which + * uses a truststore that does not trust the testclient-client-profile certificate but does not require client + * authentication + */ + public void testThatProfileTransportClientCanConnectToNoClientAuthProfile() throws Exception { + Settings.Builder builder = Settings.builder(); + addSSLSettingsForStore(builder, + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.jks", + "testclient-client-profile"); + try (TransportClient transportClient = createTransportClient(builder.build())) { + transportClient.addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), + getProfilePort("no_client_auth"))); + assertGreenClusterState(transportClient); + } + } + + /** + * Uses a transport client with a custom keystore; this keystore testclient-client-profile.jks trusts the testnode + * certificate and had its own self signed certificate. This test connects to the default profile, which + * uses a truststore that does not trust the testclient-client-profile certificate and requires client authentication + * so the connection should always fail + */ + public void testThatProfileTransportClientCannotConnectToDefaultProfile() throws Exception { + Settings.Builder builder = Settings.builder(); + addSSLSettingsForStore(builder, + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.jks", + "testclient-client-profile"); + try (TransportClient transportClient = createTransportClient(builder.build())) { + TransportAddress transportAddress = randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses()); + transportClient.addTransportAddress(transportAddress); + transportClient.admin().cluster().prepareHealth().get(); + fail("Expected NoNodeAvailableException"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); + } + } + + /** + * Uses a transport client with SSL disabled. This test connects to the default profile, which should always fail + * as a non-ssl transport client cannot connect to a ssl profile + */ + public void testThatTransportClientCannotConnectToDefaultProfile() throws Exception { + Settings settings = Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) + .put("cluster.name", internalCluster().getClusterName()) + .build(); + try (TransportClient transportClient = new TestXPackTransportClient(settings, + Collections.singletonList(LocalStateSecurity.class))) { + transportClient.addTransportAddress(randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses())); + assertGreenClusterState(transportClient); + fail("Expected NoNodeAvailableException"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); + } + } + + /** + * Uses a transport client with SSL disabled. This test connects to the client profile, which should always fail + * as a non-ssl transport client cannot connect to a ssl profile + */ + public void testThatTransportClientCannotConnectToClientProfile() throws Exception { + Settings settings = Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) + .put("cluster.name", internalCluster().getClusterName()) + .build(); + try (TransportClient transportClient = new TestXPackTransportClient(settings, + Collections.singletonList(LocalStateSecurity.class))) { + transportClient.addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), getProfilePort("client"))); + assertGreenClusterState(transportClient); + fail("Expected NoNodeAvailableException"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); + } + } + + /** + * Uses a transport client with SSL disabled. This test connects to the no_client_auth profile, which should always fail + * as a non-ssl transport client cannot connect to a ssl profile + */ + public void testThatTransportClientCannotConnectToNoClientAuthProfile() throws Exception { + Settings settings = Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) + .put("cluster.name", internalCluster().getClusterName()) + .build(); + try (TransportClient transportClient = new TestXPackTransportClient(settings, + Collections.singletonList(LocalStateSecurity.class))) { + transportClient.addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), + getProfilePort("no_client_auth"))); + assertGreenClusterState(transportClient); + fail("Expected NoNodeAvailableException"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); + } + } + + /** + * Uses a transport client with a custom truststore; this truststore truststore-testnode-only only trusts the testnode + * certificate and contains no other certification. This test connects to the no_client_auth profile, which uses + * the testnode certificate and does not require to present a certificate, so this connection should always succeed + */ + public void testThatTransportClientWithOnlyTruststoreCanConnectToNoClientAuthProfile() throws Exception { + Settings settings = Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) + .put("cluster.name", internalCluster().getClusterName()) + .put("xpack.security.transport.ssl.enabled", true) + .put("xpack.ssl.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks")) + .put("xpack.ssl.truststore.password", "truststore-testnode-only") + .build(); + try (TransportClient transportClient = new TestXPackTransportClient(settings, + Collections.singletonList(LocalStateSecurity.class))) { + transportClient.addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), + getProfilePort("no_client_auth"))); + } + } + + /** + * Uses a transport client with a custom truststore; this truststore truststore-testnode-only only trusts the testnode + * certificate and contains no other certification. This test connects to the client profile, which uses + * the testnode certificate and requires the client to present a certificate, so this connection will never work as + * the client has no certificate to present + */ + public void testThatTransportClientWithOnlyTruststoreCannotConnectToClientProfile() throws Exception { + Settings settings = Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) + .put("cluster.name", internalCluster().getClusterName()) + .put("xpack.security.transport.ssl.enabled", true) + .put("xpack.ssl.client_authentication", SSLClientAuth.REQUIRED) + .put("xpack.ssl.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks")) + .put("xpack.ssl.truststore.password", "truststore-testnode-only") + .build(); + try (TransportClient transportClient = new TestXPackTransportClient(settings, + Collections.singletonList(LocalStateSecurity.class))) { + transportClient.addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), getProfilePort("client"))); + assertGreenClusterState(transportClient); + fail("Expected NoNodeAvailableException"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); + } + } + + /** + * Uses a transport client with a custom truststore; this truststore truststore-testnode-only only trusts the testnode + * certificate and contains no other certification. This test connects to the default profile, which uses + * the testnode certificate and requires the client to present a certificate, so this connection will never work as + * the client has no certificate to present + */ + public void testThatTransportClientWithOnlyTruststoreCannotConnectToDefaultProfile() throws Exception { + Settings settings = Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) + .put("cluster.name", internalCluster().getClusterName()) + .put("xpack.security.transport.ssl.enabled", true) + .put("xpack.ssl.client_authentication", SSLClientAuth.REQUIRED) + .put("xpack.ssl.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks")) + .put("xpack.ssl.truststore.password", "truststore-testnode-only") + .build(); + try (TransportClient transportClient = new TestXPackTransportClient(settings, + Collections.singletonList(LocalStateSecurity.class))) { + transportClient.addTransportAddress(randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses())); + assertGreenClusterState(transportClient); + fail("Expected NoNodeAvailableException"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); + } + } + + /** + * Uses a transport client with the default JDK truststore; this truststore only trusts the known good public + * certificate authorities. This test connects to the default profile, which uses a self-signed certificate that + * will never be trusted by the default truststore so the connection should always fail + */ + public void testThatSSLTransportClientWithNoTruststoreCannotConnectToDefaultProfile() throws Exception { + Settings settings = Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) + .put("cluster.name", internalCluster().getClusterName()) + .put("xpack.ssl.client_authentication", SSLClientAuth.REQUIRED) + .put("xpack.security.transport.ssl.enabled", true) + .build(); + try (TransportClient transportClient = new TestXPackTransportClient(settings, + Collections.singletonList(LocalStateSecurity.class))) { + transportClient.addTransportAddress(randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses())); + assertGreenClusterState(transportClient); + fail("Expected NoNodeAvailableException"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); + } + } + + /** + * Uses a transport client with the default JDK truststore; this truststore only trusts the known good public + * certificate authorities. This test connects to the client profile, which uses a self-signed certificate that + * will never be trusted by the default truststore so the connection should always fail + */ + public void testThatSSLTransportClientWithNoTruststoreCannotConnectToClientProfile() throws Exception { + Settings settings = Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) + .put("cluster.name", internalCluster().getClusterName()) + .put("xpack.ssl.client_authentication", SSLClientAuth.REQUIRED) + .put("xpack.security.transport.ssl.enabled", true) + .build(); + try (TransportClient transportClient = new TestXPackTransportClient(settings, + Collections.singletonList(LocalStateSecurity.class))) { + transportClient.addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), getProfilePort("client"))); + assertGreenClusterState(transportClient); + fail("Expected NoNodeAvailableException"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); + } + } + + /** + * Uses a transport client with the default JDK truststore; this truststore only trusts the known good public + * certificate authorities. This test connects to the no_client_auth profile, which uses a self-signed certificate that + * will never be trusted by the default truststore so the connection should always fail + */ + public void testThatSSLTransportClientWithNoTruststoreCannotConnectToNoClientAuthProfile() throws Exception { + Settings settings = Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) + .put("cluster.name", internalCluster().getClusterName()) + .put("xpack.ssl.client_authentication", SSLClientAuth.REQUIRED) + .put("xpack.security.transport.ssl.enabled", true) + .build(); + try (TransportClient transportClient = new TestXPackTransportClient(settings, + Collections.singletonList(LocalStateSecurity.class))) { + transportClient.addTransportAddress(new TransportAddress(InetAddress.getLoopbackAddress(), + getProfilePort("no_client_auth"))); + assertGreenClusterState(transportClient); + fail("Expected NoNodeAvailableException"); + } catch (NoNodeAvailableException e) { + assertThat(e.getMessage(), containsString("None of the configured nodes are available: [{#transport#-")); + } + } + + private static int getProfilePort(String profile) { + TransportAddress transportAddress = + randomFrom(internalCluster().getInstance(Transport.class).profileBoundAddresses().get(profile).boundAddresses()); + return transportAddress.address().getPort(); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslNullCipherTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslNullCipherTests.java new file mode 100644 index 0000000000000..fb5d567bb3627 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslNullCipherTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.transport.ssl; + +import org.elasticsearch.action.DocWriteResponse.Result; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.SecurityIntegTestCase; + +/** + * An extremely simple test that shows SSL will work with a cipher that does not perform encryption + */ +public class SslNullCipherTests extends SecurityIntegTestCase { + + @Override + public boolean transportSSLEnabled() { + return true; + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + Settings settings = super.nodeSettings(nodeOrdinal); + Settings.Builder builder = Settings.builder() + .put(settings); + builder.put("xpack.security.transport.ssl.cipher_suites", "TLS_RSA_WITH_NULL_SHA256"); + return builder.build(); + } + + @Override + public Settings transportClientSettings() { + Settings settings = super.transportClientSettings(); + Settings.Builder builder = Settings.builder() + .put(settings); + + builder.put("xpack.security.transport.ssl.cipher_suites", "TLS_RSA_WITH_NULL_SHA256"); + return builder.build(); + } + + public void testClusterIsFormed() { + ensureGreen(); + Client client = internalCluster().transportClient(); + IndexResponse response = client.prepareIndex("index", "type").setSource("foo", "bar").get(); + assertEquals(Result.CREATED, response.getResult()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java new file mode 100644 index 0000000000000..738cf68763b85 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserIntegTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.user; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.security.authz.AuthorizationService; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class AnonymousUserIntegTests extends SecurityIntegTestCase { + private boolean authorizationExceptionsEnabled = randomBoolean(); + + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .put(AnonymousUser.ROLES_SETTING.getKey(), "anonymous") + .put(AuthorizationService.ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING.getKey(), authorizationExceptionsEnabled) + .build(); + } + + @Override + public String configRoles() { + return super.configRoles() + "\n" + + "anonymous:\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [ READ ]\n"; + } + + public void testAnonymousViaHttp() throws Exception { + try { + getRestClient().performRequest("GET", "/_nodes"); + fail("request should have failed"); + } catch(ResponseException e) { + int statusCode = e.getResponse().getStatusLine().getStatusCode(); + Response response = e.getResponse(); + if (authorizationExceptionsEnabled) { + assertThat(statusCode, is(403)); + assertThat(response.getHeader("WWW-Authenticate"), nullValue()); + assertThat(EntityUtils.toString(response.getEntity()), containsString("security_exception")); + } else { + assertThat(statusCode, is(401)); + assertThat(response.getHeader("WWW-Authenticate"), notNullValue()); + assertThat(response.getHeader("WWW-Authenticate"), containsString("Basic")); + assertThat(EntityUtils.toString(response.getEntity()), containsString("security_exception")); + } + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserTests.java new file mode 100644 index 0000000000000..4c72afeb5cee7 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/AnonymousUserTests.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.user; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.user.AnonymousUser; +import org.elasticsearch.xpack.core.security.user.User; + +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class AnonymousUserTests extends ESTestCase { + + public void testResolveAnonymousUser() throws Exception { + Settings settings = Settings.builder() + .put(AnonymousUser.USERNAME_SETTING.getKey(), "anonym1") + .putList(AnonymousUser.ROLES_SETTING.getKey(), "r1", "r2", "r3") + .build(); + AnonymousUser user = new AnonymousUser(settings); + assertThat(user.principal(), equalTo("anonym1")); + assertThat(user.roles(), arrayContainingInAnyOrder("r1", "r2", "r3")); + + settings = Settings.builder() + .putList(AnonymousUser.ROLES_SETTING.getKey(), "r1", "r2", "r3") + .build(); + user = new AnonymousUser(settings); + assertThat(user.principal(), equalTo(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME)); + assertThat(user.roles(), arrayContainingInAnyOrder("r1", "r2", "r3")); + } + + public void testResolveAnonymousUser_NoSettings() throws Exception { + Settings settings = randomBoolean() ? + Settings.EMPTY : + Settings.builder().put(AnonymousUser.USERNAME_SETTING.getKey(), "user1").build(); + assertThat(AnonymousUser.isAnonymousEnabled(settings), is(false)); + } + + public void testAnonymous() throws Exception { + Settings settings = Settings.builder().putList(AnonymousUser.ROLES_SETTING.getKey(), "r1", "r2", "r3").build(); + if (randomBoolean()) { + settings = Settings.builder().put(settings).put(AnonymousUser.USERNAME_SETTING.getKey(), "anon").build(); + } + + AnonymousUser user = new AnonymousUser(settings); + assertEquals(user, new AnonymousUser(settings)); + assertThat(AnonymousUser.isAnonymousUsername(user.principal(), settings), is(true)); + // make sure check works with serialization + BytesStreamOutput output = new BytesStreamOutput(); + User.writeTo(user, output); + + User anonymousSerialized = User.readFrom(output.bytes().streamInput()); + assertEquals(user, anonymousSerialized); + + // test with anonymous disabled + if (user.principal().equals(AnonymousUser.DEFAULT_ANONYMOUS_USERNAME)) { + assertThat(AnonymousUser.isAnonymousUsername(user.principal(), Settings.EMPTY), is(true)); + } else { + assertThat(AnonymousUser.isAnonymousUsername(user.principal(), Settings.EMPTY), is(false)); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/SystemUserTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/SystemUserTests.java new file mode 100644 index 0000000000000..da1064e27a927 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/SystemUserTests.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.user; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.user.SystemUser; + +import static org.hamcrest.Matchers.is; + +public class SystemUserTests extends ESTestCase { + + public void testIsAuthorized() throws Exception { + assertThat(SystemUser.isAuthorized("indices:monitor/whatever"), is(true)); + assertThat(SystemUser.isAuthorized("cluster:monitor/whatever"), is(true)); + assertThat(SystemUser.isAuthorized("internal:whatever"), is(true)); + assertThat(SystemUser.isAuthorized("cluster:admin/reroute"), is(true)); + assertThat(SystemUser.isAuthorized("cluster:admin/whatever"), is(false)); + assertThat(SystemUser.isAuthorized("indices:whatever"), is(false)); + assertThat(SystemUser.isAuthorized("cluster:whatever"), is(false)); + assertThat(SystemUser.isAuthorized("whatever"), is(false)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserTests.java new file mode 100644 index 0000000000000..5be4b1c0eca2e --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/UserTests.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.user; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.InternalUserSerializationHelper; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.elasticsearch.xpack.core.security.user.SystemUser; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.core.security.user.XPackUser; + +import java.util.Arrays; +import java.util.Collections; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class UserTests extends ESTestCase { + + public void testWriteToAndReadFrom() throws Exception { + User user = new User(randomAlphaOfLengthBetween(4, 30), + generateRandomStringArray(20, 30, false)); + BytesStreamOutput output = new BytesStreamOutput(); + + User.writeTo(user, output); + User readFrom = User.readFrom(output.bytes().streamInput()); + + assertThat(readFrom, not(sameInstance(user))); + assertThat(readFrom.principal(), is(user.principal())); + assertThat(Arrays.equals(readFrom.roles(), user.roles()), is(true)); + assertThat(readFrom.authenticatedUser(), is(user)); + } + + public void testWriteToAndReadFromWithRunAs() throws Exception { + User authUser = new User(randomAlphaOfLengthBetween(4, 30), generateRandomStringArray(20, 30, false)); + User user = new User(randomAlphaOfLengthBetween(4, 30), + randomBoolean() ? generateRandomStringArray(20, 30, false) : null, + authUser); + + BytesStreamOutput output = new BytesStreamOutput(); + + User.writeTo(user, output); + User readFrom = User.readFrom(output.bytes().streamInput()); + + assertThat(readFrom, not(sameInstance(user))); + assertThat(readFrom.principal(), is(user.principal())); + assertThat(Arrays.equals(readFrom.roles(), user.roles()), is(true)); + User readFromAuthUser = readFrom.authenticatedUser(); + assertThat(authUser, is(notNullValue())); + assertThat(readFromAuthUser.principal(), is(authUser.principal())); + assertThat(Arrays.equals(readFromAuthUser.roles(), authUser.roles()), is(true)); + assertThat(readFromAuthUser.authenticatedUser(), is(authUser)); + } + + public void testRunAsBackcompatRead() throws Exception { + User user = new User(randomAlphaOfLengthBetween(4, 30), + randomBoolean() ? generateRandomStringArray(20, 30, false) : null); + // store the runAs user as the "authenticationUser" here to mimic old format for writing + User authUser = new User(randomAlphaOfLengthBetween(4, 30), generateRandomStringArray(20, 30, false), user); + + BytesStreamOutput output = new BytesStreamOutput(); + User.writeTo(authUser, output); + StreamInput input = output.bytes().streamInput(); + input.setVersion(randomFrom(Version.V_5_0_0, Version.V_5_4_0)); + User readFrom = User.readFrom(input); + + assertThat(readFrom.principal(), is(user.principal())); + assertThat(Arrays.equals(readFrom.roles(), user.roles()), is(true)); + User readFromAuthUser = readFrom.authenticatedUser(); + assertThat(authUser, is(notNullValue())); + assertThat(readFromAuthUser.principal(), is(authUser.principal())); + assertThat(Arrays.equals(readFromAuthUser.roles(), authUser.roles()), is(true)); + } + + public void testRunAsBackcompatWrite() throws Exception { + User user = new User(randomAlphaOfLengthBetween(4, 30), + randomBoolean() ? generateRandomStringArray(20, 30, false) : null); + // store the runAs user as the "authenticationUser" here to mimic old format for writing + User authUser = new User(randomAlphaOfLengthBetween(4, 30), generateRandomStringArray(20, 30, false), user); + + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(randomFrom(Version.V_5_0_0, Version.V_5_4_0)); + User.writeTo(authUser, output); + StreamInput input = output.bytes().streamInput(); + User readFrom = User.readFrom(input); + + assertThat(readFrom.principal(), is(user.principal())); + assertThat(Arrays.equals(readFrom.roles(), user.roles()), is(true)); + User readFromAuthUser = readFrom.authenticatedUser(); + assertThat(authUser, is(notNullValue())); + assertThat(readFromAuthUser.principal(), is(authUser.principal())); + assertThat(Arrays.equals(readFromAuthUser.roles(), authUser.roles()), is(true)); + } + + public void testSystemUserReadAndWrite() throws Exception { + BytesStreamOutput output = new BytesStreamOutput(); + + InternalUserSerializationHelper.writeTo(SystemUser.INSTANCE, output); + User readFrom = InternalUserSerializationHelper.readFrom(output.bytes().streamInput()); + + assertThat(readFrom, is(sameInstance(SystemUser.INSTANCE))); + assertThat(readFrom.authenticatedUser(), is(SystemUser.INSTANCE)); + } + + public void testSystemUserFailsRead() throws Exception { + BytesStreamOutput output = new BytesStreamOutput(); + + InternalUserSerializationHelper.writeTo(SystemUser.INSTANCE, output); + AssertionError e = expectThrows(AssertionError.class, () -> User.readFrom(output.bytes().streamInput())); + + assertThat(e.getMessage(), is("should always return false. Internal users should use the InternalUserSerializationHelper")); + } + + public void testXPackUserReadAndWrite() throws Exception { + BytesStreamOutput output = new BytesStreamOutput(); + + InternalUserSerializationHelper.writeTo(XPackUser.INSTANCE, output); + User readFrom = InternalUserSerializationHelper.readFrom(output.bytes().streamInput()); + + assertThat(readFrom, is(sameInstance(XPackUser.INSTANCE))); + assertThat(readFrom.authenticatedUser(), is(XPackUser.INSTANCE)); + } + + public void testFakeInternalUserSerialization() throws Exception { + BytesStreamOutput output = new BytesStreamOutput(); + output.writeBoolean(true); + output.writeString(randomAlphaOfLengthBetween(4, 30)); + try { + InternalUserSerializationHelper.readFrom(output.bytes().streamInput()); + fail("system user had wrong name"); + } catch (IllegalStateException e) { + // expected + } + } + + public void testUserToString() throws Exception { + User user = new User("u1", "r1"); + assertThat(user.toString(), is("User[username=u1,roles=[r1],fullName=null,email=null,metadata={}]")); + user = new User("u1", new String[] { "r1", "r2" }, "user1", "user1@domain.com", Collections.singletonMap("key", "val"), true); + assertThat(user.toString(), is("User[username=u1,roles=[r1,r2],fullName=user1,email=user1@domain.com,metadata={key=val}]")); + user = new User("u1", new String[] {"r1"}, new User("u2", "r2", "r3")); + assertThat(user.toString(), is("User[username=u1,roles=[r1],fullName=null,email=null,metadata={}," + + "authenticatedUser=[User[username=u2,roles=[r2,r3],fullName=null,email=null,metadata={}]]]")); + } + + public void testReservedUserSerialization() throws Exception { + BytesStreamOutput output = new BytesStreamOutput(); + final ElasticUser elasticUser = new ElasticUser(true); + User.writeTo(elasticUser, output); + User readFrom = User.readFrom(output.bytes().streamInput()); + + assertEquals(elasticUser, readFrom); + + final KibanaUser kibanaUser = new KibanaUser(true); + output = new BytesStreamOutput(); + User.writeTo(kibanaUser, output); + readFrom = User.readFrom(output.bytes().streamInput()); + + assertEquals(kibanaUser, readFrom); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java new file mode 100644 index 0000000000000..99c2ae635f6a6 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.user; + +import org.elasticsearch.action.get.GetAction; +import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; +import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.audit.index.IndexNameResolver; +import org.hamcrest.Matchers; +import org.joda.time.DateTime; + +import java.util.function.Predicate; + +public class XPackUserTests extends ESTestCase { + + public void testXPackUserCanAccessNonSecurityIndices() { + final String action = randomFrom(GetAction.NAME, SearchAction.NAME, IndexAction.NAME); + final Predicate predicate = XPackUser.ROLE.indices().allowedIndicesMatcher(action); + final String index = randomBoolean() ? randomAlphaOfLengthBetween(3, 12) : "." + randomAlphaOfLength(8); + assertThat(predicate.test(index), Matchers.is(true)); + } + + public void testXPackUserCannotAccessSecurityIndex() { + final String action = randomFrom(GetAction.NAME, SearchAction.NAME, IndexAction.NAME); + final Predicate predicate = XPackUser.ROLE.indices().allowedIndicesMatcher(action); + assertThat(predicate.test(SecurityLifecycleService.SECURITY_INDEX_NAME), Matchers.is(false)); + assertThat(predicate.test(SecurityLifecycleService.INTERNAL_SECURITY_INDEX), Matchers.is(false)); + } + + public void testXPackUserCanReadAuditTrail() { + final String action = randomFrom(GetAction.NAME, SearchAction.NAME); + final Predicate predicate = XPackUser.ROLE.indices().allowedIndicesMatcher(action); + assertThat(predicate.test(getAuditLogName()), Matchers.is(true)); + } + + public void testXPackUserCannotWriteToAuditTrail() { + final String action = randomFrom(IndexAction.NAME, UpdateAction.NAME); + final Predicate predicate = XPackUser.ROLE.indices().allowedIndicesMatcher(action); + assertThat(predicate.test(getAuditLogName()), Matchers.is(false)); + } + + private String getAuditLogName() { + final DateTime date = new DateTime().plusDays(randomIntBetween(1, 360)); + final IndexNameResolver.Rollover rollover = randomFrom(IndexNameResolver.Rollover.values()); + return IndexNameResolver.resolve(IndexAuditTrailField.INDEX_NAME_PREFIX, date, rollover); + } +} \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java new file mode 100644 index 0000000000000..0c885840a1734 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ssl; + +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.message.BasicHeader; +import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; +import org.apache.http.ssl.SSLContexts; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.core.TestXPackTransportClient; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.ssl.SSLClientAuth; +import org.elasticsearch.xpack.security.LocalStateSecurity; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.TrustManagerFactory; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.KeyStore; +import java.security.SecureRandom; +import java.security.cert.CertPathBuilderException; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class SSLClientAuthTests extends SecurityIntegTestCase { + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + // invert the require auth settings + .put("xpack.ssl.client_authentication", SSLClientAuth.REQUIRED) + .put("xpack.security.http.ssl.enabled", true) + .put("xpack.security.http.ssl.client_authentication", SSLClientAuth.REQUIRED) + .put("transport.profiles.default.xpack.security.ssl.client_authentication", SSLClientAuth.NONE) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + @Override + protected boolean transportSSLEnabled() { + return true; + } + + public void testThatHttpFailsWithoutSslClientAuth() throws IOException { + SSLIOSessionStrategy sessionStrategy = new SSLIOSessionStrategy(SSLContexts.createDefault(), NoopHostnameVerifier.INSTANCE); + try (RestClient restClient = createRestClient(httpClientBuilder -> httpClientBuilder.setSSLStrategy(sessionStrategy), "https")) { + restClient.performRequest("GET", "/"); + fail("Expected SSLHandshakeException"); + } catch (IOException e) { + Throwable t = ExceptionsHelper.unwrap(e, CertPathBuilderException.class); + assertThat(t, instanceOf(CertPathBuilderException.class)); + assertThat(t.getMessage(), containsString("unable to find valid certification path to requested target")); + } + } + + public void testThatHttpWorksWithSslClientAuth() throws IOException { + SSLIOSessionStrategy sessionStrategy = new SSLIOSessionStrategy(getSSLContext(), NoopHostnameVerifier.INSTANCE); + try (RestClient restClient = createRestClient(httpClientBuilder -> httpClientBuilder.setSSLStrategy(sessionStrategy), "https")) { + Response response = restClient.performRequest("GET", "/", + new BasicHeader("Authorization", basicAuthHeaderValue(transportClientUsername(), transportClientPassword()))); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(EntityUtils.toString(response.getEntity()), containsString("You Know, for Search")); + } + } + + public void testThatTransportWorksWithoutSslClientAuth() throws IOException { + // specify an arbitrary keystore, that does not include the certs needed to connect to the transport protocol + Path store = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.jks"); + + if (Files.notExists(store)) { + throw new ElasticsearchException("store path doesn't exist"); + } + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.ssl.keystore.secure_password", "testclient-client-profile"); + Settings settings = Settings.builder() + .put("xpack.security.transport.ssl.enabled", true) + .put("xpack.ssl.client_authentication", SSLClientAuth.NONE) + .put("xpack.ssl.keystore.path", store) + .setSecureSettings(secureSettings) + .put("cluster.name", internalCluster().getClusterName()) + .put(SecurityField.USER_SETTING.getKey(), + transportClientUsername() + ":" + new String(transportClientPassword().getChars())) + .build(); + try (TransportClient client = new TestXPackTransportClient(settings, LocalStateSecurity.class)) { + Transport transport = internalCluster().getDataNodeInstance(Transport.class); + TransportAddress transportAddress = transport.boundAddress().publishAddress(); + client.addTransportAddress(transportAddress); + + assertGreenClusterState(client); + } + } + + private SSLContext getSSLContext() { + try (InputStream in = + Files.newInputStream(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks"))) { + KeyStore keyStore = KeyStore.getInstance("jks"); + keyStore.load(in, "testclient".toCharArray()); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(keyStore); + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(keyStore, "testclient".toCharArray()); + SSLContext context = SSLContext.getInstance("TLSv1.2"); + context.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); + return context; + } catch (Exception e) { + throw new ElasticsearchException("failed to initialize a TrustManagerFactory", e); + } + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLReloadIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLReloadIntegTests.java new file mode 100644 index 0000000000000..3e05c88953aed --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLReloadIntegTests.java @@ -0,0 +1,184 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ssl; + +import org.bouncycastle.asn1.x500.X500Name; +import org.bouncycastle.asn1.x509.Extension; +import org.bouncycastle.asn1.x509.Time; +import org.bouncycastle.cert.X509CertificateHolder; +import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; +import org.bouncycastle.cert.jcajce.JcaX509ExtensionUtils; +import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder; +import org.bouncycastle.operator.ContentSigner; +import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.network.InetAddressHelper; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.core.ssl.CertUtils; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.SSLSocket; +import javax.net.ssl.SSLSocketFactory; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.SocketException; +import java.nio.file.AtomicMoveNotSupportedException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.security.KeyPair; +import java.security.KeyStore; +import java.security.cert.Certificate; +import java.security.cert.X509Certificate; +import java.util.Locale; +import java.util.concurrent.CountDownLatch; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +/** + * Integration tests for SSL reloading support + */ +public class SSLReloadIntegTests extends SecurityIntegTestCase { + + private Path nodeStorePath; + + @Override + public Settings nodeSettings(int nodeOrdinal) { + if (nodeStorePath == null) { + Path origPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks"); + Path tempDir = createTempDir(); + nodeStorePath = tempDir.resolve("testnode.jks"); + try { + Files.copy(origPath, nodeStorePath); + } catch (IOException e) { + throw new ElasticsearchException("failed to copy keystore"); + } + } + Settings settings = super.nodeSettings(nodeOrdinal); + Settings.Builder builder = Settings.builder() + .put(settings.filter((s) -> s.startsWith("xpack.ssl.") == false)); + + + SecuritySettingsSource.addSSLSettingsForStore(builder, + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks", "testnode"); + builder.put("resource.reload.interval.high", "1s") + .put("xpack.ssl.keystore.path", nodeStorePath); + + if (builder.get("xpack.ssl.truststore.path") != null) { + builder.put("xpack.ssl.truststore.path", nodeStorePath); + } + + return builder.build(); + } + + @Override + protected boolean transportSSLEnabled() { + return true; + } + + public void testThatSSLConfigurationReloadsOnModification() throws Exception { + KeyPair keyPair = CertUtils.generateKeyPair(randomFrom(1024, 2048)); + X509Certificate certificate = getCertificate(keyPair); + KeyStore keyStore = KeyStore.getInstance("jks"); + keyStore.load(null, null); + keyStore.setKeyEntry("key", keyPair.getPrivate(), SecuritySettingsSourceField.TEST_PASSWORD.toCharArray(), + new Certificate[] { certificate }); + Path keystorePath = createTempDir().resolve("newcert.jks"); + try (OutputStream out = Files.newOutputStream(keystorePath)) { + keyStore.store(out, SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()); + } + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.ssl.keystore.secure_password", SecuritySettingsSourceField.TEST_PASSWORD); + secureSettings.setString("xpack.ssl.truststore.secure_password", "testnode"); + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.ssl.keystore.path", keystorePath) + .put("xpack.ssl.truststore.path", nodeStorePath) + .setSecureSettings(secureSettings) + .build(); + String node = randomFrom(internalCluster().getNodeNames()); + SSLService sslService = new SSLService(settings, TestEnvironment.newEnvironment(settings)); + SSLSocketFactory sslSocketFactory = sslService.sslSocketFactory(settings); + TransportAddress address = internalCluster() + .getInstance(Transport.class, node).boundAddress().publishAddress(); + try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(address.getAddress(), address.getPort())) { + assertThat(socket.isConnected(), is(true)); + socket.startHandshake(); + fail("handshake should not have been successful!"); + } catch (SSLHandshakeException | SocketException expected) { + logger.trace("expected exception", expected); + } + + KeyStore nodeStore = KeyStore.getInstance("jks"); + try (InputStream in = Files.newInputStream(nodeStorePath)) { + nodeStore.load(in, "testnode".toCharArray()); + } + nodeStore.setCertificateEntry("newcert", certificate); + Path path = nodeStorePath.getParent().resolve("updated.jks"); + try (OutputStream out = Files.newOutputStream(path)) { + nodeStore.store(out, "testnode".toCharArray()); + } + try { + Files.move(path, nodeStorePath, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE); + } catch (AtomicMoveNotSupportedException e) { + Files.move(path, nodeStorePath, StandardCopyOption.REPLACE_EXISTING); + } + + CountDownLatch latch = new CountDownLatch(1); + assertBusy(() -> { + try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(address.getAddress(), address.getPort())) { + logger.info("opened socket for reloading [{}]", socket); + socket.addHandshakeCompletedListener(event -> { + try { + assertThat(event.getPeerPrincipal().getName(), containsString("Test Node")); + logger.info("ssl handshake completed on port [{}]", event.getSocket().getLocalPort()); + latch.countDown(); + } catch (Exception e) { + fail("caught exception in listener " + e.getMessage()); + } + }); + socket.startHandshake(); + + } catch (Exception e) { + fail("caught exception " + e.getMessage()); + } + }); + latch.await(); + } + + private X509Certificate getCertificate(KeyPair keyPair) throws Exception { + final DateTime notBefore = new DateTime(DateTimeZone.UTC); + final DateTime notAfter = notBefore.plusYears(1); + X500Name subject = new X500Name("CN=random cert"); + JcaX509v3CertificateBuilder builder = + new JcaX509v3CertificateBuilder(subject, CertUtils.getSerial(), + new Time(notBefore.toDate(), Locale.ROOT), new Time(notAfter.toDate(), Locale.ROOT), subject, keyPair.getPublic()); + + JcaX509ExtensionUtils extUtils = new JcaX509ExtensionUtils(); + builder.addExtension(Extension.subjectKeyIdentifier, false, extUtils.createSubjectKeyIdentifier(keyPair.getPublic())); + builder.addExtension(Extension.authorityKeyIdentifier, false, extUtils.createAuthorityKeyIdentifier(keyPair.getPublic())); + builder.addExtension(Extension.subjectAlternativeName, false, + CertUtils.getSubjectAlternativeNames(true, Sets.newHashSet(InetAddressHelper.getAllAddresses()))); + + ContentSigner signer = new JcaContentSignerBuilder("SHA256withRSA").build(keyPair.getPrivate()); + X509CertificateHolder certificateHolder = builder.build(signer); + return new JcaX509CertificateConverter().getCertificate(certificateHolder); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java new file mode 100644 index 0000000000000..4e76e59e5962f --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java @@ -0,0 +1,261 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ssl; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.bouncycastle.asn1.x509.GeneralNames; +import org.bouncycastle.openssl.jcajce.JcaPEMWriter; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.core.ssl.CertUtils; +import org.elasticsearch.xpack.core.ssl.RestrictedTrustManager; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.SSLSocket; +import javax.net.ssl.SSLSocketFactory; +import javax.security.auth.x500.X500Principal; +import java.io.BufferedWriter; +import java.io.IOException; +import java.net.SocketException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.KeyPair; +import java.security.PrivateKey; +import java.security.cert.X509Certificate; +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.core.ssl.CertUtils.generateSignedCertificate; +import static org.hamcrest.Matchers.is; + +/** + * Integration tests for SSL trust restrictions + * + * @see RestrictedTrustManager + */ +@ESIntegTestCase.ClusterScope(numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) +@TestLogging("org.elasticsearch.xpack.ssl.RestrictedTrustManager:DEBUG") +public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { + + /** + * Use a small keysize for performance, since the keys are only used in this test, but a large enough keysize + * to get past the SSL algorithm checker + */ + private static final int KEYSIZE = 1024; + + private static final int RESOURCE_RELOAD_MILLIS = 3; + private static final TimeValue MAX_WAIT_RELOAD = TimeValue.timeValueSeconds(1); + + private static Path configPath; + private static Settings nodeSSL; + + private static CertificateInfo ca; + private static CertificateInfo trustedCert; + private static CertificateInfo untrustedCert; + private static Path restrictionsPath; + + @Override + protected int maxNumberOfNodes() { + // We are trying to test the SSL configuration for which clients/nodes may join a cluster + // We prefer the cluster to only have 1 node, so that the SSL checking doesn't happen until the test methods run + // (That's not _quite_ true, because the base setup code checks the cluster using transport client, but it's the best we can do) + return 1; + } + + @BeforeClass + public static void setupCertificates() throws Exception { + configPath = createTempDir(); + + final KeyPair caPair = CertUtils.generateKeyPair(KEYSIZE); + final X509Certificate caCert = CertUtils.generateCACertificate(new X500Principal("cn=CertAuth"), caPair, 30); + ca = writeCertificates("ca", caPair.getPrivate(), caCert); + + trustedCert = generateCertificate("trusted", "node.trusted"); + untrustedCert = generateCertificate("untrusted", "someone.else"); + + nodeSSL = Settings.builder() + .put("xpack.security.transport.ssl.enabled", true) + .put("xpack.security.transport.ssl.verification_mode", "certificate") + .putList("xpack.ssl.certificate_authorities", ca.getCertPath().toString()) + .put("xpack.ssl.key", trustedCert.getKeyPath()) + .put("xpack.ssl.certificate", trustedCert.getCertPath()) + .build(); + } + + @AfterClass + public static void cleanup() { + configPath = null; + nodeSSL = null; + ca = null; + trustedCert = null; + untrustedCert = null; + } + + @Override + public Settings nodeSettings(int nodeOrdinal) { + + Settings parentSettings = super.nodeSettings(nodeOrdinal); + Settings.Builder builder = Settings.builder() + .put(parentSettings.filter((s) -> s.startsWith("xpack.ssl.") == false)) + .put(nodeSSL); + + restrictionsPath = configPath.resolve("trust_restrictions.yml"); + writeRestrictions("*.trusted"); + builder.put("xpack.ssl.trust_restrictions.path", restrictionsPath); + builder.put("resource.reload.interval.high", RESOURCE_RELOAD_MILLIS + "ms"); + + return builder.build(); + } + + private void writeRestrictions(String trustedPattern) { + try { + Files.write(restrictionsPath, Collections.singleton("trust.subject_name: \"" + trustedPattern + "\"")); + } catch (IOException e) { + throw new ElasticsearchException("failed to write restrictions", e); + } + } + + @Override + protected Settings transportClientSettings() { + Settings parentSettings = super.transportClientSettings(); + Settings.Builder builder = Settings.builder() + .put(parentSettings.filter((s) -> s.startsWith("xpack.ssl.") == false)) + .put(nodeSSL); + return builder.build(); + } + + @Override + protected boolean transportSSLEnabled() { + return true; + } + + public void testCertificateWithTrustedNameIsAccepted() throws Exception { + writeRestrictions("*.trusted"); + try { + tryConnect(trustedCert); + } catch (SSLHandshakeException | SocketException ex) { + logger.warn(new ParameterizedMessage("unexpected handshake failure with certificate [{}] [{}]", + trustedCert.certificate.getSubjectDN(), trustedCert.certificate.getSubjectAlternativeNames()), ex); + fail("handshake should have been successful, but failed with " + ex); + } + } + + public void testCertificateWithUntrustedNameFails() throws Exception { + writeRestrictions("*.trusted"); + try { + tryConnect(untrustedCert); + fail("handshake should have failed, but was successful"); + } catch (SSLHandshakeException | SocketException ex) { + // expected + } + } + + public void testRestrictionsAreReloaded() throws Exception { + writeRestrictions("*"); + assertBusy(() -> { + try { + tryConnect(untrustedCert); + } catch (SSLHandshakeException | SocketException ex) { + fail("handshake should have been successful, but failed with " + ex); + } + }, MAX_WAIT_RELOAD.millis(), TimeUnit.MILLISECONDS); + + writeRestrictions("*.trusted"); + assertBusy(() -> { + try { + tryConnect(untrustedCert); + fail("handshake should have failed, but was successful"); + } catch (SSLHandshakeException | SocketException ex) { + // expected + } + }, MAX_WAIT_RELOAD.millis(), TimeUnit.MILLISECONDS); + } + + private void tryConnect(CertificateInfo certificate) throws Exception { + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.ssl.key", certificate.getKeyPath()) + .put("xpack.ssl.certificate", certificate.getCertPath()) + .putList("xpack.ssl.certificate_authorities", ca.getCertPath().toString()) + .put("xpack.ssl.verification_mode", "certificate") + .build(); + + String node = randomFrom(internalCluster().getNodeNames()); + SSLService sslService = new SSLService(settings, TestEnvironment.newEnvironment(settings)); + SSLSocketFactory sslSocketFactory = sslService.sslSocketFactory(settings); + TransportAddress address = internalCluster().getInstance(Transport.class, node).boundAddress().publishAddress(); + try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(address.getAddress(), address.getPort())) { + assertThat(socket.isConnected(), is(true)); + // The test simply relies on this (synchronously) connecting (or not), so we don't need a handshake handler + socket.startHandshake(); + } + } + + + private static CertificateInfo generateCertificate(String name, String san) throws Exception { + final KeyPair keyPair = CertUtils.generateKeyPair(KEYSIZE); + final X500Principal principal = new X500Principal("cn=" + name); + final GeneralNames altNames = new GeneralNames(CertUtils.createCommonName(san)); + final X509Certificate cert = generateSignedCertificate(principal, altNames, keyPair, ca.getCertificate(), ca.getKey(), 30); + return writeCertificates(name, keyPair.getPrivate(), cert); + } + + private static CertificateInfo writeCertificates(String name, PrivateKey key, X509Certificate cert) throws IOException { + final Path keyPath = writePem(key, name + ".key"); + final Path certPath = writePem(cert, name + ".crt"); + return new CertificateInfo(key, keyPath, cert, certPath); + } + + private static Path writePem(Object obj, String filename) throws IOException { + Path path = configPath.resolve(filename); + Files.deleteIfExists(path); + try (BufferedWriter out = Files.newBufferedWriter(path); + JcaPEMWriter pemWriter = new JcaPEMWriter(out)) { + pemWriter.writeObject(obj); + } + return path; + } + + private static class CertificateInfo { + private final PrivateKey key; + private final Path keyPath; + private final X509Certificate certificate; + private final Path certPath; + + private CertificateInfo(PrivateKey key, Path keyPath, X509Certificate certificate, Path certPath) { + this.key = key; + this.keyPath = keyPath; + this.certificate = certificate; + this.certPath = certPath; + } + + private PrivateKey getKey() { + return key; + } + + private Path getKeyPath() { + return keyPath; + } + + private X509Certificate getCertificate() { + return certificate; + } + + private Path getCertPath() { + return certPath; + } + } +} diff --git a/x-pack/plugin/security/src/test/resources/IndexLifecycleManagerTests-template.json b/x-pack/plugin/security/src/test/resources/IndexLifecycleManagerTests-template.json new file mode 100644 index 0000000000000..d9a53e4622f5f --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/IndexLifecycleManagerTests-template.json @@ -0,0 +1,15 @@ +{ + "index_patterns": "IndexLifeCycleManagerTests", + "mappings": { + "doc": { + "_meta": { + "security-version": "${security.template.version}" + }, + "properties": { + "test": { + "type": "keyword" + } + } + } + } +} diff --git a/x-pack/plugin/security/src/test/resources/missing-version-security-index-template.json b/x-pack/plugin/security/src/test/resources/missing-version-security-index-template.json new file mode 100644 index 0000000000000..c4c74f190ddb1 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/missing-version-security-index-template.json @@ -0,0 +1,107 @@ +{ + "index_patterns" : ".security", + "order" : 1000, + "settings" : { + "number_of_shards" : 1, + "number_of_replicas" : 0, + "auto_expand_replicas" : "0-all", + "analysis" : { + "filter" : { + "email" : { + "type" : "pattern_capture", + "preserve_original" : true, + "patterns" : [ + "([^@]+)", + "(\\p{L}+)", + "(\\d+)", + "@(.+)" + ] + } + }, + "analyzer" : { + "email" : { + "tokenizer" : "uax_url_email", + "filter" : [ + "email", + "lowercase", + "unique" + ] + } + } + } + }, + "mappings" : { + "user" : { + "dynamic" : "strict", + "properties" : { + "username" : { + "type" : "keyword" + }, + "roles" : { + "type" : "keyword" + }, + "password" : { + "type" : "keyword", + "index" : false, + "doc_values": false + }, + "full_name" : { + "type" : "text" + }, + "email" : { + "type" : "text", + "analyzer" : "email" + }, + "metadata" : { + "type" : "object", + "dynamic" : true + } + } + }, + "role" : { + "dynamic" : "strict", + "properties" : { + "cluster" : { + "type" : "keyword" + }, + "indices" : { + "type" : "object", + "properties" : { + "fields" : { + "type" : "keyword" + }, + "names" : { + "type" : "keyword" + }, + "privileges" : { + "type" : "keyword" + }, + "query" : { + "type" : "keyword" + } + } + }, + "name" : { + "type" : "keyword" + }, + "run_as" : { + "type" : "keyword" + }, + "metadata" : { + "type" : "object", + "dynamic" : true + } + } + }, + "reserved-user" : { + "dynamic" : "strict", + "properties" : { + "password": { + "type" : "keyword", + "index" : false, + "doc_values" : false + } + } + } + } +} diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/role/roles2xformat.json b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/role/roles2xformat.json new file mode 100644 index 0000000000000..0e356ba10d5fe --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/action/role/roles2xformat.json @@ -0,0 +1,19 @@ +{ + "indices": [ + { + "names": [ + "test" + ], + "privileges": [ + "READ" + ], + "query": { + "match_all": {} + }, + "fields": [ + "foo", + "boo" + ] + } + ] +} diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/file/users b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/file/users new file mode 100644 index 0000000000000..997c839c2695b --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/file/users @@ -0,0 +1,8 @@ +bcrypt:$2a$05$zxnP0vdREMxnEpkLCDI2OuSaSk/QEKA2.A42iOpI6U2u.RLLOWm1e +md5:$apr1$R3DdqiAZ$aljIkaIVPSarmDMlJUBBP. +crypt:hsP1PYSLsEEvs +plain:{plain}test123 +sha:{SHA}cojt0Pw//L6ToM8G41aOKFIWh7w= +# this is a comment line +# another comment line +bcrypt10:$2y$10$FMhmFjwU5.qxQ/BsEciS9OqcJVkFMgXMo4uH5CelOR1j4N9zIv67e \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/file/users_roles b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/file/users_roles new file mode 100644 index 0000000000000..390128c14e889 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/file/users_roles @@ -0,0 +1,7 @@ +role1:user1 +role2: user1,user2 +# this is a comment line +role3: user1, user2 , user3 +role4: period.user +# another comment line +# and another one \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/ad-schema.ldif b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/ad-schema.ldif new file mode 100644 index 0000000000000..0996376d66d36 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/ad-schema.ldif @@ -0,0 +1,61 @@ +# AD specific schema changes that will override the defaults found in the UnboundID standard schema +dn: cn=schema +objectClass: top +objectClass: ldapSubEntry +objectClass: subschema +cn: schema +attributeTypes: ( 0.9.2342.19200300.100.1.25 + NAME 'dc' + EQUALITY caseIgnoreIA5Match + SUBSTR caseIgnoreIA5SubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 + X-ORIGIN 'RFC 4519' ) +attributeTypes: ( 1.3.114.7.4.2.0.33 + NAME 'memberOf' + SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 + X-ORIGIN 'Active Directory' ) +attributeTypes: ( 1.2.840.113556.1.4.656 + NAME 'userPrincipalName' + DESC 'userPrincipalName as per Active Directory' + EQUALITY caseIgnoreMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + X-ORIGIN 'Active Directory' ) +attributeTypes: ( 1.2.840.113556.1.4.221 + NAME 'sAMAccountName' + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + X-ORIGIN 'Active Directory' ) +attributeTypes: ( 1.2.840.113556.1.4.1301 + NAME 'tokenGroups' + SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 + X-ORIGIN 'Active Directory' ) +attributeTypes: ( 1.2.840.113556.1.4.146 + NAME 'objectSid' + SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 + X-ORIGIN 'Active Directory' ) +objectClasses: ( 1.2.840.113556.1.5.9 + NAME 'user' + DESC 'a user' + SUP inetOrgPerson STRUCTURAL + MUST ( cn $ + userPrincipalName $ + sAMAccountName ) + MAY ( userPassword $ + memberOf $ + tokenGroups ) + X-ORIGIN 'Active Directory' ) +objectClasses: ( 1.2.840.113556.1.5.8 + NAME 'group' + DESC 'a group of users' + SUP top STRUCTURAL + MUST ( cn $ + objectSid ) + MAY ( member ) ) +objectClasses: ( 1.2.840.113556.1.5.4 + NAME 'builtinDomain' + SUP top STRUCTURAL + MUST ( cn ) ) +objectClasses: ( 1.2.840.113556.1.3.23 + NAME 'container' + SUP top STRUCTURAL + MUST ( cn ) ) + diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/ad.ldif b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/ad.ldif new file mode 100644 index 0000000000000..dc4bd119892da --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/ad.ldif @@ -0,0 +1,44 @@ +# Active Directory test data + +dn: CN=Builtin,DC=ad,DC=test,DC=elasticsearch,DC=com +objectclass: top +objectclass: builtinDomain +cn: Builtin + +dn: CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com +objectclass: container +objectclass: top +cn: Users + +dn: CN=Avengers,CN=users,DC=ad,DC=test,DC=elasticsearch,DC=com +objectclass: group +objectclass: top +cn: Avengers +objectSid: S-1-5-21-3510024162-210737641-214529065-1105 + +dn: CN=Tony Stark,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com +objectclass: user +objectclass: top +objectclass: inetOrgPerson +objectclass: organizationalPerson +objectclass: person +cn: Tony Stark +sAMAccountName: ironman +tokenGroups:: AQUAAAAAAAUVAAAA4rc20emZjwwpdMkMUQQAAA== +userPrincipalName: ironman@ad.test.elasticsearch.com +userPrincipalName: CN=ironman@ad.test.elasticsearch.com +userPassword: password +sn: Stark + +dn: CN=Thor,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com +objectclass: user +objectclass: top +objectclass: inetOrgPerson +objectclass: organizationalPerson +objectclass: person +cn: Thor +sAMAccountName: CN=Thor +tokenGroups:: AQUAAAAAAAUVAAAA4rc20emZjwwpdMkMUQQAAA== +userPrincipalName: Thor@ad.test.elasticsearch.com +userPassword: password +sn: Stark diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/role_mapping.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/role_mapping.yml new file mode 100644 index 0000000000000..20876fc96e31c --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/role_mapping.yml @@ -0,0 +1,5 @@ +# AD Realm Role Mapping +group_role: + - "CN=Avengers,CN=users,DC=ad,DC=test,DC=elasticsearch,DC=com" +user_role: + - "CN=Thor,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com" diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/support/ADtrust.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/support/ADtrust.jks new file mode 100644 index 0000000000000..5ad7981b83ed7 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/support/ADtrust.jks differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/support/ldapWithGroupSearch.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/support/ldapWithGroupSearch.yml new file mode 100644 index 0000000000000..db70f28232abf --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/support/ldapWithGroupSearch.yml @@ -0,0 +1,11 @@ +# This LDAP connection does group lookup by a subtree search, no role mapping +ldap: + urls: + - ldap://ldap.example.com:1389 + userDnTemplate: "cn={0},ou=people,o=superheros" + groupSearch: + isSubtreeSearch: "true" + baseDn: "ou=marvel,o=superheros" + # no dnToRoleMapping mapping implies that the group name will be used directly as the ES role name + + diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/support/ldapWithRoleMapping.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/support/ldapWithRoleMapping.yml new file mode 100644 index 0000000000000..d98f48e90bc22 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/support/ldapWithRoleMapping.yml @@ -0,0 +1,10 @@ +# This LDAP connection does group lookup by attribute with group to role mapping +ldap: + urls: # these connections are not round-robin, but primary, secondary, etc. When the first fails the second is attempted + - ldap://ldap.example.com:1389 + userDnTemplate: "cn={0},ou=people,o=superheros" + + # dnToRoleMapping as true means that group will be mapped to + # local ES roles using + dnToRoleMapping: true + #roleMappingFile: /etc/es/role_mapping.yaml diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/support/seven-seas.ldif b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/support/seven-seas.ldif new file mode 100644 index 0000000000000..527b5f5efae66 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/ldap/support/seven-seas.ldif @@ -0,0 +1,219 @@ +# Sample LDIF data for the ApacheDS v1.0 Basic User's Guide +# +# Some sailors and their ships +# userpassword for all persons is "pass" +# +version: 1 + +dn: ou=people,o=sevenSeas +objectclass: organizationalUnit +objectclass: top +description: Contains entries which describe persons (seamen) +ou: people + +dn: ou=groups,o=sevenSeas +objectclass: organizationalUnit +objectclass: top +description: Contains entries which describe groups (crews, for instance) +ou: groups + +dn: ou=crews,ou=groups,o=sevenSeas +objectclass: organizationalUnit +objectclass: top +description: Contains entries which describe ship crews +ou: crews + +dn: ou=ranks,ou=groups,o=sevenSeas +objectclass: organizationalUnit +objectclass: top +description: Contains entries which describe naval ranks (e.g. captain) +ou: ranks + +# HMS Lydia Crew +# -------------- + +dn: cn=Horatio Hornblower,ou=people,o=sevenSeas +objectclass: person +objectclass: organizationalPerson +objectclass: inetOrgPerson +objectclass: top +cn: Horatio Hornblower +description: Capt. Horatio Hornblower, R.N +givenname: Horatio +sn: Hornblower +uid: hhornblo +mail: hhornblo@royalnavy.mod.uk +userpassword: pass + +dn: cn=William Bush,ou=people,o=sevenSeas +objectclass: person +objectclass: organizationalPerson +objectclass: inetOrgPerson +objectclass: top +cn: William Bush +description: Lt. William Bush +givenname: William +manager: cn=Horatio Hornblower,ou=people,o=sevenSeas +sn: Bush +uid: wbush +mail: wbush@royalnavy.mod.uk +userpassword: pass + +dn: cn=Thomas Quist,ou=people,o=sevenSeas +objectclass: person +objectclass: organizationalPerson +objectclass: inetOrgPerson +objectclass: top +cn: Thomas Quist +description: Seaman Quist +givenname: Thomas +manager: cn=Horatio Hornblower,ou=people,o=sevenSeas +sn: Quist +uid: tquist +mail: tquist@royalnavy.mod.uk +userpassword: pass + +dn: cn=Moultrie Crystal,ou=people,o=sevenSeas +objectclass: person +objectclass: organizationalPerson +objectclass: inetOrgPerson +objectclass: top +cn: Moultrie Crystal +description: Lt. Crystal +givenname: Moultrie +manager: cn=Horatio Hornblower,ou=people,o=sevenSeas +sn: Crystal +uid: mchrysta +mail: mchrysta@royalnavy.mod.uk +userpassword: pass + +dn: cn=HMS Lydia,ou=crews,ou=groups,o=sevenSeas +objectclass: groupOfUniqueNames +objectclass: top +cn: HMS Lydia +uniquemember: cn=Horatio Hornblower,ou=people,o=sevenSeas +uniquemember: cn=William Bush,ou=people,o=sevenSeas +uniquemember: cn=Thomas Quist,ou=people,o=sevenSeas +uniquemember: cn=Moultrie Crystal,ou=people,o=sevenSeas + +# HMS Victory Crew +# ---------------- + +dn: cn=Horatio Nelson,ou=people,o=sevenSeas +objectclass: person +objectclass: organizationalPerson +objectclass: inetOrgPerson +objectclass: top +cn: Horatio Nelson +description: Lord Horatio Nelson +givenname: Horatio +sn: Nelson +uid: hnelson +mail: hnelson@royalnavy.mod.uk +userpassword: pass + +dn: cn=Thomas Masterman Hardy,ou=people,o=sevenSeas +objectclass: person +objectclass: organizationalPerson +objectclass: inetOrgPerson +objectclass: top +cn: Thomas Masterman Hardy +description: Sir Thomas Masterman Hardy +givenname: Thomas +manager: cn=Horatio Nelson,ou=people,o=sevenSeas +sn: Hardy +uid: thardy +mail: thardy@royalnavy.mod.uk +userpassword: pass + +dn: cn=Cornelius Buckley,ou=people,o=sevenSeas +objectclass: person +objectclass: organizationalPerson +objectclass: inetOrgPerson +objectclass: top +cn: Cornelius Buckley +description: LM Cornelius Buckley +givenname: Cornelius +manager: cn=Horatio Nelson,ou=people,o=sevenSeas +sn: Buckley +uid: cbuckley +mail: cbuckley@royalnavy.mod.uk +userpassword: pass + +dn: cn=HMS Victory,ou=crews,ou=groups,o=sevenSeas +objectclass: groupOfUniqueNames +objectclass: top +cn: HMS Victory +uniquemember: cn=Horatio Nelson,ou=people,o=sevenSeas +uniquemember: cn=Thomas Masterman Hardy,ou=people,o=sevenSeas +uniquemember: cn=Cornelius Buckley,ou=people,o=sevenSeas + +# HMS Bounty Crew +# --------------- + +dn: cn=William Bligh,ou=people,o=sevenSeas +objectclass: person +objectclass: organizationalPerson +objectclass: inetOrgPerson +objectclass: top +cn: William Bligh +description: Captain William Bligh +givenname: William +sn: Bligh +uid: wbligh +mail: wbligh@royalnavy.mod.uk +userpassword: pass + +dn: cn=Fletcher Christian,ou=people,o=sevenSeas +objectclass: person +objectclass: organizationalPerson +objectclass: inetOrgPerson +objectclass: top +cn: Fletcher Christian +description: Lieutenant Fletcher Christian +givenname: Fletcher +manager: cn=William Bligh,ou=people,o=sevenSeas +sn: Christian +uid: fchristi +mail: fchristi@royalnavy.mod.uk +userpassword: pass + +dn: cn=John Fryer,ou=people,o=sevenSeas +objectclass: person +objectclass: organizationalPerson +objectclass: inetOrgPerson +objectclass: top +cn: John Fryer +description: Master John Fryer +givenname: John +manager: cn=William Bligh,ou=people,o=sevenSeas +sn: Fryer +uid: jfryer +mail: jfryer@royalnavy.mod.uk +userpassword: pass + +dn: cn=John Hallett,ou=people,o=sevenSeas +objectclass: person +objectclass: organizationalPerson +objectclass: inetOrgPerson +objectclass: top +cn: John Hallett +description: Midshipman John Hallett +givenname: John +manager: cn=William Bligh,ou=people,o=sevenSeas +sn: Hallett +uid: jhallett +mail: jhallett@royalnavy.mod.uk +userpassword: pass + +dn: cn=HMS Bounty,ou=crews,ou=groups,o=sevenSeas +objectclass: groupOfUniqueNames +objectclass: top +cn: HMS Bounty +uniquemember: cn=William Bligh,ou=people,o=sevenSeas +uniquemember: cn=Fletcher Christian,ou=people,o=sevenSeas +uniquemember: cn=John Fryer,ou=people,o=sevenSeas +uniquemember: cn=John Hallett,ou=people,o=sevenSeas + + + diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/pki/role_mapping.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/pki/role_mapping.yml new file mode 100644 index 0000000000000..60865c002d13e --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/pki/role_mapping.yml @@ -0,0 +1,4 @@ +# Role mappings for PKI tests + +user: + - "CN=Elasticsearch Test Node, OU=elasticsearch, O=org" diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/idp1.xml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/idp1.xml new file mode 100644 index 0000000000000..a7f1b674fb53b --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/idp1.xml @@ -0,0 +1,79 @@ + + + + + + + MIIDojCCAooCCQCVTd3p5WnWmjANBgkqhkiG9w0BAQsFADCBkjELMAkGA1UEBhMC + VVMxCzAJBgNVBAgMAkNBMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMREwDwYDVQQK + DAhhdHJpY29yZTENMAsGA1UECwwEZGVtbzEXMBUGA1UEAwwOam9zc28tcHJvdmlk + ZXIxIzAhBgkqhkiG9w0BCQEWFHN1cHBvcnRAYXRyaWNvcmUuY29tMB4XDTE2MDIw + MjE3MDIwM1oXDTI2MDEzMDE3MDIwM1owgZIxCzAJBgNVBAYTAlVTMQswCQYDVQQI + DAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzERMA8GA1UECgwIYXRyaWNvcmUx + DTALBgNVBAsMBGRlbW8xFzAVBgNVBAMMDmpvc3NvLXByb3ZpZGVyMSMwIQYJKoZI + hvcNAQkBFhRzdXBwb3J0QGF0cmljb3JlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBAKCBJiMEjYh2Id50qMGGuZzivqFy7t3IwsJgjbS+xV3Jf5Mm + PyXh1AsYpk8eKSYDb+H8+hROeqxbSneXjAi5msrD+oCJnMwz0/uMUPsmntjlrbWS + e2P2vGfLWLp708YLh2RyAA3Iz2Vx5fdbN+14zPfdMF/uNuD4e8XTU7PJcX4cIPna + 58P1ko3mCMVoPFI2KLess/EafBvc5OBBmTo3KeQ59hGRdNtCe5oeuLHapfLWnl36 + MHHkV/sdV+xVV/NsO5lVJ4al/n7snOsqBvUm++Zbey1OI3CWp9+q1CnnqFxzRiJy + SahYF5FoSiWJKpw7tXHkyU93FCVeBV5c5zxqVykCAwEAATANBgkqhkiG9w0BAQsF + AAOCAQEAU27Ag+jrg+xVbRZc3Dqk40PitlvLiT619U8eyt0LHAhX+ZGy/Ao+pJAx + SWHLP6YofG+EO3Fl4sgJ5S9py+PZwDgRQR1xfUsZ5a8tk6c0NPHpcHBU2pMuYQA+ + OoE7g5EIeAhPsmMeM2IH4Yz6qmzhvYBAvbDvGJYHi+Udxp8JHlKYjkieVw+9kI58 + 0YKeUIKXng4XXSuFHspYRLS2iDRfmeJsveOUYr9y7L4XrbLJIG/kVcpFiLkzsWJp + 1j6hwqPe748wekASae/+96l3NjT1AyNnD7rzyskUiNI6wb28OZeJoPczgzIedKXY + dmFqLRuLeSLDJK2EiUATRUqE3ys7Fw== + + + + + + + MIIDojCCAooCCQCVTd3p5WnWmjANBgkqhkiG9w0BAQsFADCBkjELMAkGA1UEBhMC + VVMxCzAJBgNVBAgMAkNBMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMREwDwYDVQQK + DAhhdHJpY29yZTENMAsGA1UECwwEZGVtbzEXMBUGA1UEAwwOam9zc28tcHJvdmlk + ZXIxIzAhBgkqhkiG9w0BCQEWFHN1cHBvcnRAYXRyaWNvcmUuY29tMB4XDTE2MDIw + MjE3MDIwM1oXDTI2MDEzMDE3MDIwM1owgZIxCzAJBgNVBAYTAlVTMQswCQYDVQQI + DAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzERMA8GA1UECgwIYXRyaWNvcmUx + DTALBgNVBAsMBGRlbW8xFzAVBgNVBAMMDmpvc3NvLXByb3ZpZGVyMSMwIQYJKoZI + hvcNAQkBFhRzdXBwb3J0QGF0cmljb3JlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBAKCBJiMEjYh2Id50qMGGuZzivqFy7t3IwsJgjbS+xV3Jf5Mm + PyXh1AsYpk8eKSYDb+H8+hROeqxbSneXjAi5msrD+oCJnMwz0/uMUPsmntjlrbWS + e2P2vGfLWLp708YLh2RyAA3Iz2Vx5fdbN+14zPfdMF/uNuD4e8XTU7PJcX4cIPna + 58P1ko3mCMVoPFI2KLess/EafBvc5OBBmTo3KeQ59hGRdNtCe5oeuLHapfLWnl36 + MHHkV/sdV+xVV/NsO5lVJ4al/n7snOsqBvUm++Zbey1OI3CWp9+q1CnnqFxzRiJy + SahYF5FoSiWJKpw7tXHkyU93FCVeBV5c5zxqVykCAwEAATANBgkqhkiG9w0BAQsF + AAOCAQEAU27Ag+jrg+xVbRZc3Dqk40PitlvLiT619U8eyt0LHAhX+ZGy/Ao+pJAx + SWHLP6YofG+EO3Fl4sgJ5S9py+PZwDgRQR1xfUsZ5a8tk6c0NPHpcHBU2pMuYQA+ + OoE7g5EIeAhPsmMeM2IH4Yz6qmzhvYBAvbDvGJYHi+Udxp8JHlKYjkieVw+9kI58 + 0YKeUIKXng4XXSuFHspYRLS2iDRfmeJsveOUYr9y7L4XrbLJIG/kVcpFiLkzsWJp + 1j6hwqPe748wekASae/+96l3NjT1AyNnD7rzyskUiNI6wb28OZeJoPczgzIedKXY + dmFqLRuLeSLDJK2EiUATRUqE3ys7Fw== + + + + 128 + + + + + + + + + + + + + urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified + + + + + Atricore JOSSO 2 IDP + Atricore, Inc. + http://www.atricore.org + + + diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml-three-certs.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml-three-certs.crt new file mode 100644 index 0000000000000..149b55e2bcfe6 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml-three-certs.crt @@ -0,0 +1,66 @@ +-----BEGIN CERTIFICATE----- +MIIDWDCCAkCgAwIBAgIVANRTZaFrK+Pz19O8TZsb3HSJmAWpMA0GCSqGSIb3DQEB +CwUAMB0xGzAZBgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FNTDAeFw0xNzExMjkwMjQ3 +MjZaFw0yMDExMjgwMjQ3MjZaMB0xGzAZBgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FN +TDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALHTuPGOieCbD2mZUdYr +dH4ofo7qFze6rQUROCLKqf69uBuwvraNWOcwxHUTKVlLMV3ddKzYo+yfC44AMXrr +V+79xVWsTCNHu9sxQzcDwiEx2OtOOX9MAk6tJQ3svNrMPNXWh8ftwmmY9XdFZwMY +Udo6FPjSQj5uQTDmGWRgF08f7VRlk6N92d/fzn9DlDm+TFuaOr17OTSR4B6RTrNw +KC29AmXQTwCijCObjLqyMEqP20dZCQeVf2qw8JKUHhW4r6mCLzqmeR+kRTqiHMSW +xJddzxDGw6X7fOS7iuzB0+TnsKwgu8nYrEXds9MkGf1Yco7WsM43g+Es+LhNHP+e +s70CAwEAAaOBjjCBizAdBgNVHQ4EFgQUILqVKGhIi8p5Xffsow/IKFLhRbIwWQYD +VR0jBFIwUIAUILqVKGhIi8p5Xffsow/IKFLhRbKhIaQfMB0xGzAZBgNVBAMTEkVs +YXN0aWNzZWFyY2gtU0FNTIIVANRTZaFrK+Pz19O8TZsb3HSJmAWpMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGhl4V9mp4SWSV2E3HAJ1PX+Vmp6 +k27Kd0tkOk1B9fyA13QB30teyiL7RR0vSHRyWFY8rQH1mHD366GKRWLITRG/QPUL +amGdYXX4h0pFj5ldaubLxM/O9vEAxOgmo/lsdkeIq9tLBqY06r/5A/Mcgo63KGi0 +0AFYBoyvqfOu6nRLPnQr+rKVfdNOpWeIiFY1i2XTNZ3CZjNPSTwiQMUzrCxKXB9l +L0vF6QL2Gj2iBhzNfXi88wf7xaR6XKY1wNuv3HLPsL7n+PWby7LRX188dyS1dmKf +QcrKL65OssBA5NC8CAYyBiygBmWN+5kVJM5fSb0SwPSoVWrNyz+8IUldQE8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID0zCCArugAwIBAgIJALi5bDfjMszLMA0GCSqGSIb3DQEBCwUAMEgxDDAKBgNV +BAoTA29yZzEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEgMB4GA1UEAxMXRWxhc3Rp +Y3NlYXJjaCBUZXN0IE5vZGUwHhcNMTUwOTIzMTg1MjU3WhcNMTkwOTIyMTg1MjU3 +WjBIMQwwCgYDVQQKEwNvcmcxFjAUBgNVBAsTDWVsYXN0aWNzZWFyY2gxIDAeBgNV +BAMTF0VsYXN0aWNzZWFyY2ggVGVzdCBOb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3rGZ1QbsW0+MuyrSLmMfDFKtLBkIFW8V0gRuurFg1PUKKNR1 +Mq2tMVwjjYETAU/UY0iKZOzjgvYPKhDTYBTte/WHR1ZK4CYVv7TQX/gtFQG/ge/c +7u0sLch9p7fbd+/HZiLS/rBEZDIohvgUvzvnA8+OIYnw4kuxKo/5iboAIS41klMg +/lATm8V71LMY68inht71/ZkQoAHKgcR9z4yNYvQ1WqKG8DG8KROXltll3sTrKbl5 +zJhn660es/1ZnR6nvwt6xnSTl/mNHMjkfv1bs4rJ/py3qPxicdoSIn/KyojUcgHV +F38fuAy2CQTdjVG5fWj9iz+mQvLm3+qsIYQdFwIDAQABo4G/MIG8MAkGA1UdEwQC +MAAwHQYDVR0OBBYEFEMMWLWQi/g83PzlHYqAVnty5L7HMIGPBgNVHREEgYcwgYSC +CWxvY2FsaG9zdIIVbG9jYWxob3N0LmxvY2FsZG9tYWluggpsb2NhbGhvc3Q0ghds +b2NhbGhvc3Q0LmxvY2FsZG9tYWluNIIKbG9jYWxob3N0NoIXbG9jYWxob3N0Ni5s +b2NhbGRvbWFpbjaHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQEL +BQADggEBAMjGGXT8Nt1tbl2GkiKtmiuGE2Ej66YuZ37WSJViaRNDVHLlg87TCcHe +k2rdO+6sFqQbbzEfwQ05T7xGmVu7tm54HwKMRugoQ3wct0bQC5wEWYN+oMDvSyO6 +M28mZwWb4VtR2IRyWP+ve5DHwTM9mxWa6rBlGzsQqH6YkJpZojzqk/mQTug+Y8aE +mVoqRIPMHq9ob+S9qd5lp09+MtYpwPfTPx/NN+xMEooXWW/ARfpGhWPkg/FuCu4z +1tFmCqHgNcWirzMm3dQpF78muE9ng6OB2MXQwL4VgnVkxmlZNHbkR2v/t8MyZJxC +y4g6cTMM3S/UMt5/+aIB2JAuMKyuD+A= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID1zCCAr+gAwIBAgIJALnUl/KSS74pMA0GCSqGSIb3DQEBCwUAMEoxDDAKBgNV +BAoTA29yZzEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEiMCAGA1UEAxMZRWxhc3Rp +Y3NlYXJjaCBUZXN0IENsaWVudDAeFw0xNTA5MjMxODUyNTVaFw0xOTA5MjIxODUy +NTVaMEoxDDAKBgNVBAoTA29yZzEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEiMCAG +A1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVudDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAMKm+P6vDAff0c6BWKGdhnYoNl9HijLIgfU3d9CQcqKt +wT+yUW3DPSVjIfaLmDIGj6Hl8jTHWPB7ZP4fzhrPi6m4qlRGclJMECBuNASZFiPD +tEDv3msoeqOKQet6n7PZvgpWM7hxYZO4P1aMKJtRsFAdvBAdZUnv0spR5G4UZTHz +SKmMeanIKFkLaD0XVKiLQu9/z9M6roDQeAEoCJ/8JsanG8ih2ymfPHIZuNyYIOrV +ekHN2zU6bnVn8/PCeZSjS6h5xYw+Jl5gzGI/n+F5CZ+THoH8pM4pGp6xRVzpiH12 +gvERGwgSIDXdn/+uZZj+4lE7n2ENRSOt5KcOGG99r60CAwEAAaOBvzCBvDAJBgNV +HRMEAjAAMB0GA1UdDgQWBBSSFhBXNp7AaNrHdlgCV0mCEzt7ajCBjwYDVR0RBIGH +MIGEgglsb2NhbGhvc3SCFWxvY2FsaG9zdC5sb2NhbGRvbWFpboIKbG9jYWxob3N0 +NIIXbG9jYWxob3N0NC5sb2NhbGRvbWFpbjSCCmxvY2FsaG9zdDaCF2xvY2FsaG9z +dDYubG9jYWxkb21haW42hwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqGSIb3 +DQEBCwUAA4IBAQANvAkddfLxn4/BCY4LY/1ET3d7ZRldjFTyjjHRYJ3CYBXWVahM +skLxIcFNca8YjKfXoX8mcK+NQK/dAbGHXqk76yMlkrKjh1OQiZ1YAX5ryYerGrZ9 +9N3E9wnbn72bW3iumoLlqmTWlHEpMI0Ql6J75BQLTgKHxCPupVA5sTbWkKwGjXXA +i84rUlzhDJOR8jk3/7ct0iZO8Hk6AWMcNix5Wka3IDGUXuEVevYRlxgVyCxcnZWC +7JWREpar5aIPQFkY6VCEglxwUyXbHZw5T/u6XaKKnS7gz8RiwRh68ddSQJeEHi5e +4onUD7bOCJgfsiUwdiCkDbfN9Yum8OIpmBRs +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml.crt new file mode 100644 index 0000000000000..b28dcba201a1c --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDWDCCAkCgAwIBAgIVANRTZaFrK+Pz19O8TZsb3HSJmAWpMA0GCSqGSIb3DQEB +CwUAMB0xGzAZBgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FNTDAeFw0xNzExMjkwMjQ3 +MjZaFw0yMDExMjgwMjQ3MjZaMB0xGzAZBgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FN +TDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALHTuPGOieCbD2mZUdYr +dH4ofo7qFze6rQUROCLKqf69uBuwvraNWOcwxHUTKVlLMV3ddKzYo+yfC44AMXrr +V+79xVWsTCNHu9sxQzcDwiEx2OtOOX9MAk6tJQ3svNrMPNXWh8ftwmmY9XdFZwMY +Udo6FPjSQj5uQTDmGWRgF08f7VRlk6N92d/fzn9DlDm+TFuaOr17OTSR4B6RTrNw +KC29AmXQTwCijCObjLqyMEqP20dZCQeVf2qw8JKUHhW4r6mCLzqmeR+kRTqiHMSW +xJddzxDGw6X7fOS7iuzB0+TnsKwgu8nYrEXds9MkGf1Yco7WsM43g+Es+LhNHP+e +s70CAwEAAaOBjjCBizAdBgNVHQ4EFgQUILqVKGhIi8p5Xffsow/IKFLhRbIwWQYD +VR0jBFIwUIAUILqVKGhIi8p5Xffsow/IKFLhRbKhIaQfMB0xGzAZBgNVBAMTEkVs +YXN0aWNzZWFyY2gtU0FNTIIVANRTZaFrK+Pz19O8TZsb3HSJmAWpMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGhl4V9mp4SWSV2E3HAJ1PX+Vmp6 +k27Kd0tkOk1B9fyA13QB30teyiL7RR0vSHRyWFY8rQH1mHD366GKRWLITRG/QPUL +amGdYXX4h0pFj5ldaubLxM/O9vEAxOgmo/lsdkeIq9tLBqY06r/5A/Mcgo63KGi0 +0AFYBoyvqfOu6nRLPnQr+rKVfdNOpWeIiFY1i2XTNZ3CZjNPSTwiQMUzrCxKXB9l +L0vF6QL2Gj2iBhzNfXi88wf7xaR6XKY1wNuv3HLPsL7n+PWby7LRX188dyS1dmKf +QcrKL65OssBA5NC8CAYyBiygBmWN+5kVJM5fSb0SwPSoVWrNyz+8IUldQE8= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml.key b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml.key new file mode 100644 index 0000000000000..b3f4dcd7af789 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsdO48Y6J4JsPaZlR1it0fih+juoXN7qtBRE4Isqp/r24G7C+ +to1Y5zDEdRMpWUsxXd10rNij7J8LjgAxeutX7v3FVaxMI0e72zFDNwPCITHY6045 +f0wCTq0lDey82sw81daHx+3CaZj1d0VnAxhR2joU+NJCPm5BMOYZZGAXTx/tVGWT +o33Z39/Of0OUOb5MW5o6vXs5NJHgHpFOs3AoLb0CZdBPAKKMI5uMurIwSo/bR1kJ +B5V/arDwkpQeFbivqYIvOqZ5H6RFOqIcxJbEl13PEMbDpft85LuK7MHT5OewrCC7 +ydisRd2z0yQZ/VhyjtawzjeD4Sz4uE0c/56zvQIDAQABAoIBACor4bI3cW84lqEe +YQ//YjMA+3J9BWCL/rgy6zMlCMeGyoCWm8PDJZA+OnjhQG8Sq/RsrlzHS6iz1xOn +7BQQd9wPhprYr322Z1CGEECdvY4NujmjZXNQ3UM3QJBa0ja5NLwpki3RrsdIJP5S +ESnkvJSaLZsMkfUoRIGKL/9j4mM6889pPqmfJ1jCfC5hVkURKJCdLm11x6Z0diwB +bKwmlkHeUwp2j0cWn1kaktU5TJM7WblRsHhKvM1RBB6uqBF+8UFeUV9o4L1GAz30 +VmzW+5alOVCcWxw5SveAK1xKGFaZSdA/UjJd6FVVIR4CQfWMQOgxn92toC1BItr5 +kEL10mkCgYEA3izrMMQrbWP1FMuLeDA9L5umYNYbJDfTMne8tVghNB+FXKY1nhP3 +gpsmhrAKM6/jnFVQLU+daj/tuMS9o+jP1vlDZFVFxiMV7RDyLUh/jamCrkcsL9Nm +z9fTvnQsHTw//utabzqN9ZNmHPpu6fwaCel8xRGaJwh9xRw0M+CFGDcCgYEAzOZd +huXQlFi0H5SYkobDVXLN4jpShizwR7fmzivZiazUC7ob/yMPAxSp7lvkNbf125QO +0w4P6lcbWYrq9JbN/EtyXnwvxr/2I6b58794BueEfRi0kvTeJvWYlu5db8EJ/UyI +dS7/bWlXsWmqz53qyr4mCN6vpf+RhKXO/8cTMasCgYEAoSFeqQ18mWdHFX/RdKc3 +mNF/xvdA0/GQwCdKuxJhclXx+JGqjSzZ8uTm+gOuQnIr1vOgpFv7RFV86OX2y2tL +YROCJsAZTGuhuT7zBJhxuaHYXuDLURw2kkJrFNNYMxPBLfYurkB/OD1gb1/JFPV7 +RPkLPjlX3TsxS8T6ry0EGd0CgYBgPtUvyENxazkwnFwOd77JJGQRH9QjsngrN/by +YSaNxODVoVYFQjKK/+nSTNelZAPYYHOWL8TKDqCKLvaubSfUmpveRfcrJIA5vvUp +yYZXt+hD2pb3cI3pPvOt8clx9Pr8QDbY1HqmqhTHARt66nKfSTIUeIWJM8Lmy/uv +qZMagQKBgQCeghwcAsECYKsEpOgvZ0ZyaZkzBovanWlWAQcOi2apsMOurWL5i5KZ +QFZwp0YXrKk0xDvuk2wc1WKcWstLJVCP7wGn2K2fdBnOK+EypyZ+phRuMXgBrvVI +x2rQe7lcPwm8KSrg/079flm5i+wgYgNYkxU8Ljk2LRECeLuffHgzxA== +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml.p12 b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml.p12 new file mode 100644 index 0000000000000..be2e45d1074aa Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml.p12 differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml_with_password.key b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml_with_password.key new file mode 100644 index 0000000000000..cc9a4fba42602 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml_with_password.key @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,E5CF40A599E7BBB7 + +TnxQWgs1sKEW5OIFMWQS/Iyqz25nIf7321MiEa/Z8lakv2bh8MBJCIp7r3pN4viJ +K5bMJMIsjqUdolcvbuPXgtA6uuhnf/X4o7sU/fZhsgOiBkoj3r2pWOpHHx+am3G7 +uzrVlDgc36X5lVRUZLdnIeT3aLAk8+ObXF624TxISCUPZluGC0NwJlNZ5yxIeSTv +47hd1OVfdKH+IYoa8Illt10njcl398z5AUWiX+j6ozlJnnufi0kRdQJ3Da0q5rUg +IlUpqNmPwG2tl8Ys1tNaU8fpf22rLDo7S2P4S+SxzMvDYXFr/VDy4nFtWDuP6hTQ +VA51txLKOR0+FhZNOUOZH6YjFv4LOUS3/doonPrp/7z4C6cnf3MECtnhOG2zVcBX +FKCg7iKBmml92tOsCIAXCjUlpTHQniNqxNtOiWySlt/83XPLPQSpeNuSismWSHlN +lVUGkcysjEaZu86DVc6NN91s7oG0x/R0UKU17NUZFtCaYCXYtdtRlpgpmMOD8KFp +3NdZUFPQ0zetXqS/skdg1tKd49amKO7Qj+V9nWMzFnwPTM+LD3hV4Ehb2D41nTxz +b0UFNb/vcYUZzP6+OvgrSyhH9f7pYmyt+Ky80wJ1eOB1ZRReo0iXYPZZo4G8spJy +SHc18HswxU+ICMB77tHHDIJXnGQr9yTDYph3ZEs4m/NIP91f0XjeexsRcSDrKImD ++UY6cY8a76HOT70Wl+Na0ZCKE9BkpWLgbkZbH9arhIbW12wSdvO6oPGFU9FOVL2V +L0RMGSYQPowXyktBer+b1ZPrijOYWLqkn/S5prOjCjD/qxnWts9DGeNixrw3F7HY +yEUbl3amc1/Zxi60CsNHmV6wvQIzoz3Pz+U6foIeJxLj5glFQiA1Yhivd1YGWeFk +t3uqNGRe6C6sNzpF4LCKpxnEbZdAM6QzcmHIAcfINuc00zNjV6B3o1L91wXO/bd/ +VJ4zCcO0UxipLhaiXvMpjMOW3uJri5nlm1cWnGWH3M20l7hLQ4sUw0IUh9wA412K +6muTqlvwYQtYwwG0nz99bnKwHIO01vbAQACYk13j37wUsG57DFygxs+fcITV0AfB +PKwAKISmlr45PztXybEXg92mzkBsdXFMhpHeDhYDeC3g7DGXI5cEqbiATu8Gvsy2 +F4ylZMl2gHG4/2GSjEdHLv2uXPmZYFFeoJY+9GdNS6yz0ncn9yahG57OawbXR/fS +NPfLyDg8C+fRC2O0TI4/a51sXy2bEE0NBRdrY1VZp9sc4nsRvUdvxvdd++R+PeNt +oAqRHqTTksRJqBwy3+KMerGk0z9RLPzzLQkBVeH5bQtqhZq43bI6Zf8jDqKlb1H+ +YhkgU4DsO0iPKvtIMEa31U9Qc2nQiIWjSk4v8gr5tcqoekynV4rKosTV1+GSRlkn +l245GuOOE2PKVYn0jrUIn/IGzcMfORRH4/Sl/gy9ikYS70tykyJVoMplIeb1awMa ++FNAD17iNhaLOvuEfL5zCQtjXHyCRzReGxxmO9F2lN26Lr0MzM8k3bIrFTBVL/+n +5pg6I6i8CXFWfpi08fP5KDU447AaBvdozm9L2JWIKaxjHev+NIy2Og2qtR34nBPH +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml_with_password.p12 b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml_with_password.p12 new file mode 100644 index 0000000000000..a764c67fc2a4c Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/saml/saml_with_password.p12 differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/support/role_mapping.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/support/role_mapping.yml new file mode 100644 index 0000000000000..4cb60ff8997e9 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authc/support/role_mapping.yml @@ -0,0 +1,12 @@ +# roleA this is an elasticsearch role +# - groupA-DN this is any group, ldap DN, or unix group +# - groupB-DN +# - user1-DN + +#This is an example of ldap mapping configuration +security: + - "cn=avengers,ou=marvel,o=superheros" + - "cn=shield,ou=marvel,o=superheros" +avenger: + - "cn=avengers,ou=marvel,o=superheros" + - "cn=Horatio Hornblower,ou=people,o=sevenSeas" diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/default_roles.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/default_roles.yml new file mode 100644 index 0000000000000..68e003b8cdedd --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/default_roles.yml @@ -0,0 +1,3 @@ +# The default roles file is empty as the preferred method of defining roles is +# through the API/UI. File based roles are useful in error scenarios when the +# API based roles may not be available. diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/invalid_roles.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/invalid_roles.yml new file mode 100644 index 0000000000000..11657750c5104 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/invalid_roles.yml @@ -0,0 +1,47 @@ +valid_role: + cluster: + - ALL + indices: + - names: idx + privileges: + - ALL + +"fóóbár": + cluster: all + +# invalid role deifnition +role1: cluster: ALL indices: '*': ALL + +# invalid role cluster privilege +role2: + cluster: blkjdlkd + indices: + '*': ALL + +# invalid role indices deifnition +role3: + cluster: ALL + indices: '*': ALL + +# invalid role indices privilegs +role4: + cluster: ALL + indices: + '*': al;kjdlkj;lkj + +#dadfad +# role won't be available since empty privileges... +role5: + cluster: + indices: + - names: + #adfldkkd + - idx2 + privileges: + - names: + - '' + privileges: + - READ + - names: + - 'idx1' + privileges: [] diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/reserved_roles.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/reserved_roles.yml new file mode 100644 index 0000000000000..5dfae635e5b97 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/reserved_roles.yml @@ -0,0 +1,34 @@ +# All cluster rights +# All operations on all indices +admin: + cluster: + - all + indices: + - names: '*' + privileges: [ all ] + +_system: + cluster: + - all + indices: + - names: '*' + privileges: + - all + +superuser: + cluster: + - all + indices: + - names: '*' + privileges: + - all + run_as: + - '*' + +kibana_system: + cluster: + - all + +transport_client: + cluster: + - all diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles.yml new file mode 100644 index 0000000000000..99459c5f5ec26 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles.yml @@ -0,0 +1,72 @@ +role1: + cluster: + - ALL + indices: + - names: + - idx1 + - idx2 + privileges: + - READ + - names: idx3 + privileges: + - READ + - WRITE + +role1.ab: + cluster: + - ALL + +role2: + cluster: + - ALL + - MONITOR + +role3: + indices: + - names: '/.*_.*/' + privileges: + - READ + - WRITE + +# role with run_as permissions only +role_run_as: + run_as: + - user1 + - user2 + +# role with more than run_as +role_run_as1: + run_as: [user1, user2] + +role_fields: + indices: + - names: + #23456789ohbh + - 'field_idx' + privileges: + - READ + field_security: + grant: + - foo + - boo + +role_query: + indices: + - names: + - 'query_idx' + privileges: + - READ + query: '{ "match_all": {} }' + +role_query_fields: + indices: + - names: + - 'query_fields_idx' + privileges: + - READ + query: + match_all: + field_security: + grant: + - foo + - boo \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles2xformat.json b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles2xformat.json new file mode 100644 index 0000000000000..0e356ba10d5fe --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles2xformat.json @@ -0,0 +1,19 @@ +{ + "indices": [ + { + "names": [ + "test" + ], + "privileges": [ + "READ" + ], + "query": { + "match_all": {} + }, + "fields": [ + "foo", + "boo" + ] + } + ] +} diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles2xformat.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles2xformat.yml new file mode 100644 index 0000000000000..ebfdce617a013 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/authz/store/roles2xformat.yml @@ -0,0 +1,11 @@ +role1: + indices: + - names: + - 'test' + privileges: + - READ + query: + match_all: + fields: + - foo + - boo \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/keystore/testnode-different-passwords.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/keystore/testnode-different-passwords.jks new file mode 100644 index 0000000000000..bbe83e2633f71 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/keystore/testnode-different-passwords.jks differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/keystore/testnode.cert b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/keystore/testnode.cert new file mode 100644 index 0000000000000..df83644be0f8f --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/keystore/testnode.cert @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIJAJ+K5mGS3n/AMA0GCSqGSIb3DQEBCwUAMEgxDDAKBgNV +BAoTA29yZzEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEgMB4GA1UEAxMXRWxhc3Rp +Y3NlYXJjaCBUZXN0IE5vZGUwHhcNMTQxMjE2MTcwNDQ1WhcNMTgxMjE1MTcwNDQ1 +WjBIMQwwCgYDVQQKEwNvcmcxFjAUBgNVBAsTDWVsYXN0aWNzZWFyY2gxIDAeBgNV +BAMTF0VsYXN0aWNzZWFyY2ggVGVzdCBOb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAzhpW7iwkm+Og+HP7U00nbmh0Hy9Z2Ldp5i8tJSlSQwTxCCvO +rse6jwJQN98Dk1ApaSzimZrlKOotFyPV1L3fnOzJbTp1Yq/VsYP4zJkjWtID0qUf +8Rg8bLhjKAG+ZlLuai5XZqnLkdmqvQeR61VhpXWFm0Om153tWmAiHL18ywY71gXN +EnkeFo9OW4fDqkz6h7NJziYvU6URSKErZDEixk5GIPv9K9hiIfi0KQM6xaHp0d2w +VCyFVC0OUdugz6untURzJVx4U3X1bQcv/o2BoUotWh/5h8o5eeiiv2OGZ1XlO+33 +1tweYI4wFjDwnAyHHRr/rk2ZIBiBYGaSzHnuhQIDAQABo1owWDAJBgNVHRMEAjAA +MB0GA1UdDgQWBBTwGg2LF8+mzsvBBWxJKv6VXv3dMTAsBgNVHREEJTAjgglsb2Nh +bGhvc3SHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAIwDQYJKoZIhvcNAQELBQADggEB +ABP4ufLToJhcUselVxV9LPD5VGPEHGLdIFqsUEix7DMsiNpR76X6a8qNQbZpdbd6 ++qPKqoaMgC7znX7qZtCqRbIXTWbudZPxFkcHdiWx3SiALMQYabeUGetClX3sCndU +SUoV8f34i8dJxfNcqhLcsh4zpgxtmwsvs5OLMTBvm0Xo2zUFUjlmrt41pBrWEuq9 +nkObc/cr6Syiz3sy4pYVJO1/YwHaZgE/URqjVlari70DR3ES4YnIUnLQajKx2Q0/ +gXVgzjbe68KPOUGCz6GYiWq+d4tcWdHzLv1GsaqQ1MD9P21ArfrX4DpzgPDrO6MP +9Ppq5DQGa2q4mz3kipd5RIs= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/keystore/testnode.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/keystore/testnode.jks new file mode 100644 index 0000000000000..4df11f34c3650 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/keystore/testnode.jks differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/keystore/truststore-testnode-only.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/keystore/truststore-testnode-only.jks new file mode 100644 index 0000000000000..d75109b2a6854 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/keystore/truststore-testnode-only.jks differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/plugin/roles.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/plugin/roles.yml new file mode 100644 index 0000000000000..b4feb8357bd1c --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/plugin/roles.yml @@ -0,0 +1,4 @@ +user: + cluster: ALL + indices: + '*': ALL \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/plugin/users b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/plugin/users new file mode 100644 index 0000000000000..75d09e55f1ee7 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/plugin/users @@ -0,0 +1 @@ +test_user:{plain}x-pack-test-password \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/plugin/users_roles b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/plugin/users_roles new file mode 100644 index 0000000000000..f859fb3bb0d20 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/plugin/users_roles @@ -0,0 +1 @@ +test_user:user \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/README.asciidoc b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/README.asciidoc new file mode 100644 index 0000000000000..5b2a6b737d779 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/README.asciidoc @@ -0,0 +1,36 @@ += Keystore Details +This document details the steps used to create the certificate and keystore files in this directory. + +== Instructions on generating self-signed certificates +The certificates in this directory have been generated using the following openssl configuration and commands. + +OpenSSL Configuration File is located in this directory as `openssl_config.cnf`. + +NOTE: The `alt_names` section provides the Subject Alternative Names for each certificate. This is necessary for testing +with hostname verification enabled. + +[source,shell] +----------------------------------------------------------------------------------------------------------- +openssl req -new -x509 -extensions v3_req -out .cert -keyout .pem -days 1460 -config config.cnf +----------------------------------------------------------------------------------------------------------- + +When prompted the password is always set to the value of . + +Because we intend to import these certificates into a Java Keystore file, they certificate and private key must be combined +in a PKCS12 certificate. + +[source,shell] +----------------------------------------------------------------------------------------------------------- +openssl pkcs12 -export -name -in .cert -inkey .pem -out .p12 +----------------------------------------------------------------------------------------------------------- + +== Creating the Keystore +We need to create a keystore from the created PKCS12 certificate. + +[source,shell] +----------------------------------------------------------------------------------------------------------- +keytool -importkeystore -destkeystore .jks -srckeystore .p12 -srcstoretype pkcs12 -alias +----------------------------------------------------------------------------------------------------------- + +The keystore is now created and has the private/public key pair. You can import additional trusted certificates using +`keytool -importcert`. When doing so make sure to specify an alias so that others can recreate the keystore if necessary. diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/active-directory-ca.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/active-directory-ca.crt new file mode 100644 index 0000000000000..453d1361ce434 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/active-directory-ca.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID1zCCAr+gAwIBAgIQWA24rVK7FopAgOHfEio/VjANBgkqhkiG9w0BAQsFADB+ +MRMwEQYKCZImiZPyLGQBGRYDY29tMR0wGwYKCZImiZPyLGQBGRYNZWxhc3RpY3Nl +YXJjaDEUMBIGCgmSJomT8ixkARkWBHRlc3QxEjAQBgoJkiaJk/IsZAEZFgJhZDEe +MBwGA1UEAxMVYWQtRUxBU1RJQ1NFQVJDSEFELUNBMB4XDTE0MDgyNzE2MjI0MloX +DTI5MDgyNzE2MzI0MlowfjETMBEGCgmSJomT8ixkARkWA2NvbTEdMBsGCgmSJomT +8ixkARkWDWVsYXN0aWNzZWFyY2gxFDASBgoJkiaJk/IsZAEZFgR0ZXN0MRIwEAYK +CZImiZPyLGQBGRYCYWQxHjAcBgNVBAMTFWFkLUVMQVNUSUNTRUFSQ0hBRC1DQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALNNZsDJ+lhsE/pCIkNlq6/F +xwv3PU2M+E1/SbWrLEtfbb1ATnn98DwxjpCj00wS0bt26/7zrhHKyX5LaxyS27ER +8bKpLSO4qcVWzDIQnVNk2XfBrYS/Og+6Pi/Lw/ylt/vE++kHWIJBc4O6i+pPByOM +oypM6bh71kTkpK8OTPqf+HiPp0qKhRah6XVtqTc+kOCOku2+wkELbCz8RNzF9ca6 +Uu3YxLi73pNdk0wDTmg6JVaUyVRpSkjJH4BAp9SVma6Rxy6tbh4e5P+8K8lY9ptM +TBzTsDS1EhNK/92xULfQbGT814Z294pF3ARMEJ89N+aegS++kz7CqjciZ1+bA6EC +AwEAAaNRME8wCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FIEKG0KdSVNknKcMZkbTlKo7N8MjMBAGCSsGAQQBgjcVAQQDAgEAMA0GCSqGSIb3 +DQEBCwUAA4IBAQBgbWBXPbEMTEsiVWzoxmTw1wJASBdPahx6CggutjGq3ASjby4p +nVCTwE4xdDEVyFGmeslSp9+23XjBuaiqVPtYw8P8hnG269J0q4cOF/VXOccRLeOw +HVDBv2a7xzgBSwc1KB50TLv07stcBmBYNu8anN6EwGksdgjb8IjRV6U3U+IvFNrI +rGifuIc/iRZD4Clhnpxw8tCsgcrcmz9CU7CN5RxKVEpZ6ou6ZjHO8l8H0t9zWrSI +PL+33iBGHNWlyU63N93XgJtxV1em1hHryLtTTtaVZJJ3R0OrLrUpG8SQ7zCUy62f +YtImFPClUMXY03yH+4DAhflueRvY/D1AKL12 +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/openldap.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/openldap.crt new file mode 100644 index 0000000000000..bcabf51acb658 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/openldap.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEBzCCAu+gAwIBAgIBAjANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzEW +MBQGA1UEChMNZWxhc3RpY3NlYXJjaDENMAsGA1UECxMEdGVzdDEQMA4GA1UEAxMH +cm9vdC1jYTAeFw0xNDA4MjcxNTMyNTNaFw0xNTA4MjcxNTMyNTNaME4xCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1lbGFzdGljc2VhcmNoMQ0wCwYDVQQLEwR0ZXN0MRgw +FgYDVQQDEw9pcC0xNzItMzAtMC0xNTQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQC5iQ7DiQwT1YVoLycCGsm3p4Lp8V3uo/+sV9dA4fdpJ7i84Fq0V8g6 +0VjKMIhL8mrzbjCF8hDREO6fnuAwOM8IqladwFzEqFPnvLo/Uv5J7/BgqRN2H25x +REQrOOMRsvWZWslV4aAj3ivmYu3HU6gA0I1/OsI0MCU/kotdVRkAEcu3HL16AEGK +4CkQhw3JKt7iWm2vODQ7BZadrNvjKS6C7pqV/c4+r4T1aEQNbzvM7Br70NLqA9M4 +HeOJjyqEbbr9rHtIP9Cy0M6Lp52ho7sTA2D5b/bWCSog6DvClmj2NCySR2lR1QYF +S7qrsyE6ePq8TnyoReOAxqJdjm+tFvzzAgMBAAGjgfcwgfQwCQYDVR0TBAIwADAw +BglghkgBhvhCAQ0EIxYhWWFTVCBHZW5lcmF0ZWQgU2VydmVyIENlcnRpZmljYXRl +MBEGCWCGSAGG+EIBAQQEAwIGQDALBgNVHQ8EBAMCBaAwHQYDVR0OBBYEFDH+zO6n +PQHyeqiKfo5kVYUQpSJDMHYGA1UdIwRvMG2AFO/jQiwne6+hZEs7afKnyoddejH8 +oUqkSDBGMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNZWxhc3RpY3NlYXJjaDENMAsG +A1UECxMEdGVzdDEQMA4GA1UEAxMHcm9vdC1jYYIJAOwOXHgWfvKEMA0GCSqGSIb3 +DQEBBQUAA4IBAQCk6wXIRaIQrhjD1p9wkv2tjEmnktoic8I0Z3CaHe80aLLcX5n/ +7Vlo4ccp3mWAhON7dCGSj9Qc7wj28h3CkDIIntQpErhEhEpSRU70P3SD7jn3tcrF +fu+SUwtP1YLLjU9S10Jx0eImufMj0AgwIL+axjxgCfQ+nxeLWTt35zZzQqizkSSy +PVGkY/YZy1tc4JdJl9TbwGsxWgLTHs7bD1WPAYovreblRuHNEwabwwgDK+F6G7Lh +BVYCygiuyG/MehQZSgb2LmX4O1QyVe2bZJUZQNMdZLORRdGQXf6grakdolqPIASZ +SpzwTZU5InUGvQG0HuOn//+wHZ0rih/qWV+3 +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/openldap.der b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/openldap.der new file mode 100644 index 0000000000000..8cdc214f0eb01 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/openldap.der differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/openssl_config.cnf b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/openssl_config.cnf new file mode 100644 index 0000000000000..38adbbc776ef8 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/openssl_config.cnf @@ -0,0 +1,35 @@ +[ req ] +default_bits = 2048 # Size of keys +default_keyfile = key.pem # name of generated keys +default_md = sha256 # message digest algorithm +string_mask = nombstr # permitted characters +distinguished_name = req_distinguished_name +req_extensions = v3_req + +[ req_distinguished_name ] +0.organizationName = Organization Name (company) +organizationalUnitName = Organizational Unit Name (department, division) +emailAddress = Email Address +emailAddress_max = 40 +localityName = Locality Name (city, district) +stateOrProvinceName = State or Province Name (full name) +countryName = Country Name (2 letter code) +countryName_min = 2 +countryName_max = 2 +commonName = Common Name (hostname, IP, or your name) +commonName_max = 64 + +[ v3_req ] +basicConstraints = CA:FALSE +subjectKeyIdentifier = hash +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = localhost +DNS.2 = localhost.localdomain +DNS.3 = localhost4 +DNS.4 = localhost4.localdomain4 +DNS.5 = localhost6 +DNS.6 = localhost6.localdomain6 +IP.1 = 127.0.0.1 +IP.2 = ::1 diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-cert.pem b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-cert.pem new file mode 100644 index 0000000000000..b85edb30b02df --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-cert.pem @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICYjCCAgmgAwIBAgIJAPBTfsMrh6VTMAkGByqGSM49BAEwWDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdp +dHMgUHR5IEx0ZDERMA8GA1UEAxMISmF5cy1NQlAwHhcNMTcwNDExMTM1MDQ3WhcN +MTkwNDExMTM1MDQ3WjBYMQswCQYDVQQGEwJVUzETMBEGA1UECBMKU29tZS1TdGF0 +ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMREwDwYDVQQDEwhK +YXlzLU1CUDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABMMRTMV1HqtZPH7dwtWI +q/kLw8cYV8vK7bN7Mi09q9JQogbvwRkVir8b1/3DgUEvLv+8u8zgcIcx2iaWtaLz +rfmjgbwwgbkwHQYDVR0OBBYEFGgv2RGxkfH8fDYUMiAcLgSZ2el2MIGJBgNVHSME +gYEwf4AUaC/ZEbGR8fx8NhQyIBwuBJnZ6XahXKRaMFgxCzAJBgNVBAYTAlVTMRMw +EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0 +eSBMdGQxETAPBgNVBAMTCEpheXMtTUJQggkA8FN+wyuHpVMwDAYDVR0TBAUwAwEB +/zAJBgcqhkjOPQQBA0gAMEUCIBI2zkYo8aZImnlXxIS+7cILdx8AKo6VNvGykn3X +k/n1AiEAp5O/xswzb35GZbAnNCbXDYi2Ny2mv1S9WypHC6Y5/qk= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-key-noparam.pem b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-key-noparam.pem new file mode 100644 index 0000000000000..2cb86113d41fc --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-key-noparam.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIN74E4fO1Pq89hh7NYjUBFu7akoHC36ZvlnHfcCASq5ToAoGCCqGSM49 +AwEHoUQDQgAEwxFMxXUeq1k8ft3C1Yir+QvDxxhXy8rts3syLT2r0lCiBu/BGRWK +vxvX/cOBQS8u/7y7zOBwhzHaJpa1ovOt+Q== +-----END EC PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-key.pem b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-key.pem new file mode 100644 index 0000000000000..51a3f018558fe --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/prime256v1-key.pem @@ -0,0 +1,8 @@ +-----BEGIN EC PARAMETERS----- +BggqhkjOPQMBBw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIN74E4fO1Pq89hh7NYjUBFu7akoHC36ZvlnHfcCASq5ToAoGCCqGSM49 +AwEHoUQDQgAEwxFMxXUeq1k8ft3C1Yir+QvDxxhXy8rts3syLT2r0lCiBu/BGRWK +vxvX/cOBQS8u/7y7zOBwhzHaJpa1ovOt+Q== +-----END EC PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt new file mode 100644 index 0000000000000..d14805ddc89d4 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDizCCAnOgAwIBAgIJANif+/9AeRBYMA0GCSqGSIb3DQEBCwUAMCQxIjAgBgNV +BAMTGXRlc3RjbGllbnQtY2xpZW50LXByb2ZpbGUwHhcNMTUwOTIzMTg1MjU0WhcN +MTkwOTIyMTg1MjU0WjAkMSIwIAYDVQQDExl0ZXN0Y2xpZW50LWNsaWVudC1wcm9m +aWxlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2g9jTDeAZMjXygm4 +Cb9CkBhSGN738ZOBiTGDH0q/3inE/qKJj/SF6RjHrVx5v9QUlqVTBX/NPd0VSFy7 +jbYtab8mlaYEAhatYQhHlEjcre1hjK3MW2oHhk+T9ScnIBeArBjUSUzwPAUM8FOj +0qGSK6QifrCf/5dQyq9D1WynwhGwUqp3WcZEvvaEl66QnBugViY7JRsuWOfIiJ7j +xkFv74Qm3yfCpQR+5TPAWd2ipRZZinwn9M0u0txiffZo7jemdpK5aiNAgJCGn/Sw +jlOzFU5pKxIIHeaR3rLWZ4P0WgMu+kqVPoy66UKBA6kXK4eMk1Wla7dG1gtp+OmQ +TQGqhwIDAQABo4G/MIG8MAkGA1UdEwQCMAAwHQYDVR0OBBYEFGgAgQUFrcvEOeed +swWf+D5DRoj9MIGPBgNVHREEgYcwgYSCCWxvY2FsaG9zdIIVbG9jYWxob3N0Lmxv +Y2FsZG9tYWluggpsb2NhbGhvc3Q0ghdsb2NhbGhvc3Q0LmxvY2FsZG9tYWluNIIK +bG9jYWxob3N0NoIXbG9jYWxob3N0Ni5sb2NhbGRvbWFpbjaHBH8AAAGHEAAAAAAA +AAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggEBAKrKuUTmZ3WymiG6jRhLA5DA +36k7Hey8rvHZ6wbdnUseO+JJbItGEoEd0Js2syjghOnMf+M686prv0tWpaqA3ySh +uEcKSwy063eMmmTPXR9HfEK/QkdfC6otOvR/elgs1t0R45JFbHWSS1tJLOU/HU2i +3jwqy8Gwp+XJG9yrwC4OsJ1pAp7vCIoXrcq6glueiq9vjdAMYCtlrNIlDhmB+rYD +K8mf4S1AygSsZNUmh04oj8kBRacH4vdoKYkxvixPvo1F5VaoJcr+hh2xL4AyuqLc +gdoJmOv6Fk2UCUI2aI4nn0pXwkyOlx0t8B3Hp/TZY2Yn0x+uIIkKGJRm0CuKzNQ= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.jks new file mode 100644 index 0000000000000..2e0923de67a37 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.jks differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.p12 b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.p12 new file mode 100644 index 0000000000000..cece3d1243208 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.p12 differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem new file mode 100644 index 0000000000000..f5ea25da6967f --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,02BA9C3E433F0711 + +M3Ds1K8/cp5v9BKj9SstoXWlyj+CmasLfvPlz6O5Ootdai/wTirhxyAcYcsgjuQA +FIMuQiAaKC+W/DOP52+ilY3FZKw09DptdkaU7IFvSQvJ5SeiJIGLZ9REUWIPwRKV +qCHpKFCS4//mitnykjSu2HhTM+BjbXuytLJtxFJuodr8YhJGgtwfETAmbVlwWG3M +mhEsfdr0kOJGwT/z3dZbCxnGkjOY1FZCdMU/ggeYx93NP0c4yzNU6/A0idZRMlHE +BuSIkDhn3BXxgxYxTOgHzCkxaZ0T7kNv0MvTXrrcQf2WrMaG1FfTzCzF78YDBn7+ +kg5HI8wfxjGjDTnHH9KSuN5gi14DiEEUN45Ba3ChH75SjGwBbUjnHsGIp6Ch8mh8 +5ZgldgUOff0atEBaMpvoZAzxGbY5PhC5/Pfiq+enzvmICxszFNIRMhhLxWWBWuH9 +v04OOKZJ4v+ygZHvPJEFNc/XznGb39ELn4Y6P26fqNfPmv5/yHcenQPNZgGkKQEJ +SYTPxct+yDi3lul1WBPrvZwCOAjUtcCf4tBC6uOqlRjd7VREk5hKUlXTtxJLbaoB +FH1VKn9xZITBqk3g292KGbpZsGP5HtYAbB/gJUcOXZZbr8blMmpeIXXFWfPbvUcv +d1fokqHSrlclNt+h2pENqjJTc2iKKKlkNY0ol+cDej7fOgNH0sNOjuESrofdaXfa +4CPuvMxeQFKnjQFBLM14CFUyseZsAqnLjneUnk5qTPE5twCN0pkJx0y095pEGdrF +2UQ4HfdghnKxWQK50iGUhCgBfqeI3O3cfi76ZdeJYM+7CluGNb/0sybjF6kNiAME +m5PfaSXk7A+a/FteFThW52E8W8Sekf2/lOMdbdQa26/oUfcMWzBj+aMcJBwcOaq4 +lIY6IPHySQIakLGCLLOkYHAG+Kp8AldjcwfpOZ3Jk1HrjOmx9CfonXvdVQmS2BQZ +8+V2ECd0uw9EbD+i4VIIHy6H1a57W9gikshXsv4K5vQsl0s6GjbioN1JFFC78XKS +XohDMDmR2Ty3LU9TLoS9K2fLcgtoJmnEtJ1cs5Cw9+T1s+ILFgoBTXzePnDKK78T +4uNw7a8CRAKW++dHtwHEnHOIDJ0rx+4Y+V7e5Cr0nT4Idv5P9xe9knTghb34vwBw +SBfEn0sCQoly4DGsKlPz8xhLRCX7euXsbPcTy3owpTFywJsfjR5e/jtxa6C5NmNI +LmGJb6TpsAl9mULfSRlGjynqU9T0EzOdcbPs8z1cLFQl7QRi63tlZ9jWh+7eV+dR +NaN6f0jY0me0D7OzvoD19e3ESFARV7MGQ5U0OaZOAcLdx3CYjs777AQ1S2q6ra+p +gr4oqenWqYkbY8/z8AALiWPO59Rl00GcUJqwYXlJANCtK4ikwQPG2+YIYrclBW/d +ZQplHShy0AayYPguLI9ts0XLulIrcsJ0zlEDPf3b8+Gr6yONiYpOtA0ZtwE3w+Q5 +5byhoYoBQPtLXyLff31ozRs/MhJd8+gIIReKSE5sGMwbVX2ZfjWOXzDHp6+CQIKD +OhIfgnyG6tPQ2N4DhSrE/cEITPlSOv55NUnLQUCgfAHvAslgWwmVDk1ikznij/Zj +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt new file mode 100644 index 0000000000000..18221208c162e --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID1zCCAr+gAwIBAgIJALnUl/KSS74pMA0GCSqGSIb3DQEBCwUAMEoxDDAKBgNV +BAoTA29yZzEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEiMCAGA1UEAxMZRWxhc3Rp +Y3NlYXJjaCBUZXN0IENsaWVudDAeFw0xNTA5MjMxODUyNTVaFw0xOTA5MjIxODUy +NTVaMEoxDDAKBgNVBAoTA29yZzEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEiMCAG +A1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVudDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAMKm+P6vDAff0c6BWKGdhnYoNl9HijLIgfU3d9CQcqKt +wT+yUW3DPSVjIfaLmDIGj6Hl8jTHWPB7ZP4fzhrPi6m4qlRGclJMECBuNASZFiPD +tEDv3msoeqOKQet6n7PZvgpWM7hxYZO4P1aMKJtRsFAdvBAdZUnv0spR5G4UZTHz +SKmMeanIKFkLaD0XVKiLQu9/z9M6roDQeAEoCJ/8JsanG8ih2ymfPHIZuNyYIOrV +ekHN2zU6bnVn8/PCeZSjS6h5xYw+Jl5gzGI/n+F5CZ+THoH8pM4pGp6xRVzpiH12 +gvERGwgSIDXdn/+uZZj+4lE7n2ENRSOt5KcOGG99r60CAwEAAaOBvzCBvDAJBgNV +HRMEAjAAMB0GA1UdDgQWBBSSFhBXNp7AaNrHdlgCV0mCEzt7ajCBjwYDVR0RBIGH +MIGEgglsb2NhbGhvc3SCFWxvY2FsaG9zdC5sb2NhbGRvbWFpboIKbG9jYWxob3N0 +NIIXbG9jYWxob3N0NC5sb2NhbGRvbWFpbjSCCmxvY2FsaG9zdDaCF2xvY2FsaG9z +dDYubG9jYWxkb21haW42hwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqGSIb3 +DQEBCwUAA4IBAQANvAkddfLxn4/BCY4LY/1ET3d7ZRldjFTyjjHRYJ3CYBXWVahM +skLxIcFNca8YjKfXoX8mcK+NQK/dAbGHXqk76yMlkrKjh1OQiZ1YAX5ryYerGrZ9 +9N3E9wnbn72bW3iumoLlqmTWlHEpMI0Ql6J75BQLTgKHxCPupVA5sTbWkKwGjXXA +i84rUlzhDJOR8jk3/7ct0iZO8Hk6AWMcNix5Wka3IDGUXuEVevYRlxgVyCxcnZWC +7JWREpar5aIPQFkY6VCEglxwUyXbHZw5T/u6XaKKnS7gz8RiwRh68ddSQJeEHi5e +4onUD7bOCJgfsiUwdiCkDbfN9Yum8OIpmBRs +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks new file mode 100644 index 0000000000000..d6dc21c1bd5ff Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.p12 b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.p12 new file mode 100644 index 0000000000000..cbe7195f285b1 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.p12 differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.pem b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.pem new file mode 100644 index 0000000000000..7268c55dba977 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.pem @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,C98A45E4AFC263C2 + +wLuUEXldYc54r4ryWd6jw6UMGYwn6+ibGKHp4sD92l42lmI2UrCT/Mb/E0O+KMMy +pHgc5/dBWkXgMiqDyLIhHk4kgT40rdw5W5lZkAA4Qt/Yzd+rbscTvzp09zrF6Fll +czgoE7FrvhOKiEOakerTit4pIPYosdX606cpVQE2lq9oZs9HVMcLzdAZj8A/P/4g +fo4X3+zqVYC/LH4n00bhNoeeej2o1lEJ+l9u9hptT2ATXle6pANa83Ldg4OxJyj8 +dkR9ahnAMCvYTSjEU7nwmGNPeFX0PIUjJKQivr410cYG104DC30Yy+XrIUfjTVUi +agwlMpHoBq79/ZRUJR3xPLkIGgw4g+RPt45D9eKsEsV4vqy8SFlgaoJ2mKUKleZy +i7D9ouzMKQ3sYE4eQVQ5o3K8ZPn5eozCwCVIp7jGSsuvDpLA9peZSwWPfc5y8JFD +/64usCt1J8Mv/e9NVllC8ZA+ZmDitTiwLZysczpMOaFqqeUbk9EJst38n4nBzRV2 +quxvg9W/iveQIydFyftCtNfRkpbp0NCsLz293dBYwZacHsPcY27IBCwXHiICjiAW +q7bnisXsgSaQMhMNRGW9YElZGb7ZWxoIzcyNBisGI8zxn48ObERVOmkOFxY/gs9T +YmpVMliWtmRG6hb6iCh9b7z8THRquxgTGE9ZFBwtLUKg33aubtgAfnUh/Xq2Ue5K +l+ZCqDGEi/FSIjVENUNNntAx/vXeNPbkoGLb/HSJwAh+sjpaLGQ54xixCtE9l3NY +o2QAiZ804KLPaGtbbOv7wPumxQ+8mxG5FN0hTRrsMW9t8pBXw47iMy/T2H21TD5D +E5XbM6kFeBrnsWnZJ2/ieXqDE4SX0tm3WEvZlDg7N7jV8QDM/D3Xdkb/sqJRabMG +tQRgwkLiB+mZ5MAfGLogI2/lOEayrBVz4qYdXojewxY4LtaZ5HiUIlyA9CJelMvD +nS52I6+FpaFhvuZC10qaM9Ph9TNyx+XKRUsPILuDiBRnYiHUKs1qASl5tjn2yyjM +71WSo7A7btOckzhDZdMVf1T472f0LGsRYoQebMhotqCuR7yArZHzTeWB0CjL3tOz +j3QlhKt2E1jx43bSK5tBasd9Bpmn2onvdwu1RRP8cyQBsXJSDy4/8t/g63+C3wod +8VPrlKhK+TenK9EoEqJ2mNuNq+duOjTXfK/7GM5s0BFKv+i2ckpDi1NPckd2gXjF +yUFZhmK6k0WC4jjWloMt+WQpi1rXMEXwCypgTrqWbvD0p6+X3uQmP57L4yHQcZoW +Qcs5GnihJ0DIhw9vYDhBhNo0WY1oBO20nVCN3R/JIpp3uDtg64WvfvMSXzJIPBCY +s+/GM5TtuD6mERDu3+qXxWwiy4PMQRcgjRTMEZ3A4Iv77YfQRkcd6S9qjUUuR/5D +xs+J4ryb1biz9ofW7I+Dbz4SArWSgwcuh14AV9RBv6Rh9m83rjT2K0yvbe/+7hHW +R8nzRMqJcGNGCHmRjA/cwoiv6+k2J/RbCJqnR3RmNex/85XaXBfZwRfHXVbzZQfa +SrFaaNLf1hMwGLAJjIcQRxa3yZbjFXVx1Bp4hh8rKNWaOItjavNtNg== +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.crt new file mode 100644 index 0000000000000..47e5b37c28b35 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhzCCAm+gAwIBAgIJAMDqQhbo/w/YMA0GCSqGSIb3DQEBCwUAMCIxIDAeBgNV +BAMTF3Rlc3Rub2RlLWNsaWVudC1wcm9maWxlMB4XDTE1MDkyMzE4NTI1NloXDTE5 +MDkyMjE4NTI1NlowIjEgMB4GA1UEAxMXdGVzdG5vZGUtY2xpZW50LXByb2ZpbGUw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDh9VWS5kI7Sf1tJSCP3F+l +F1Ve2w9GgoRYjOz7jTWm8h8WPoMSGoCIEQqxm+Eo+AGCd58tacKO4ANxZbjh33hP +7bh7jYmj3qDZlwnTaM4RIvzPSgYzlgx7iBWXJZoxWi9jlEBehg/bB6+2tK0/4CJJ +27Xi2cjJDTcctyLEhuQEH6oLf0pBfK2UXv9LkoOkrnRzY+x4YBOlrPmho+5jU2iw +5DnFNKw4Cxdd9XnDpAprU2E02pCXN3mpcvDk2MdnzWvDh14j1eisxmAl7wbU5fdF +P50v5m8cOPreWUlS2WYIJ+nHLnbnmVE8F2FpXDupF5WK5BU/QHVnE6i5kwvMITsJ +AgMBAAGjgb8wgbwwCQYDVR0TBAIwADAdBgNVHQ4EFgQU08G5lM8hL/8f61uAkl3n +vJzyEv4wgY8GA1UdEQSBhzCBhIIJbG9jYWxob3N0ghVsb2NhbGhvc3QubG9jYWxk +b21haW6CCmxvY2FsaG9zdDSCF2xvY2FsaG9zdDQubG9jYWxkb21haW40ggpsb2Nh +bGhvc3Q2ghdsb2NhbGhvc3Q2LmxvY2FsZG9tYWluNocEfwAAAYcQAAAAAAAAAAAA +AAAAAAAAATANBgkqhkiG9w0BAQsFAAOCAQEAq84Eku8yLOcaxfBb7pHWaHHsaxXZ +k1lK3SZm49VTuwtMrGCYY7TBt4Kz1mnpQZd9KVyE4BzhpvUmD2ybSsyK/w1nYGVw +VyFEnutlIsYWs4rWwrvYoX1/B86WvNMBa+XFBnlO0HR5yTc/m5LKkNsZ3p2CGPfN +PCaok0NwS7cKIsWIgBaFlIYMYjZM+cL5qAWeOOgQoxPKi5uzZ/YqJbWf0jwpA/8s +M9QuqJzJMSGvwn0i74ulbubjnit2dMbrUUm6W65snw5qLDSqZzDH02EYYG4yMkmP +/S/SZFvw7fuOHFeJBoO3oej+ilEuYPkjkysr3Ys6xmprqExdDHUMElEycA== +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks new file mode 100644 index 0000000000000..aed185d975567 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.jks differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.p12 b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.p12 new file mode 100644 index 0000000000000..111d6a79f9b0e Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.p12 differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.pem b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.pem new file mode 100644 index 0000000000000..4f5b0e56782ee --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-client-profile.pem @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,AA1E05F81F24B5E3 + +Axn5ritl59GJQ4PLJDIk/IRvYSPsk+5JygqjZc4oeewhpbWqOjzVYROMD65DqN4k +sZw9vhDSm8TjhmuMWP+n1oJWZtAEKBcXbAhoG8lErsJPYx5xq9SYrbK6LWXYlVN2 +fbf5o77vQDqLiZf2QeXyufgLbEZ7yslYPBSMiOQfp2jwpHOQrlWIg7awheTpd8Dv +PQ/Jy45mGJ2gl5n1Yn+nZbIO/S+3Wy0AX0MmS7nBU4ogOkIMq4ZBFALnfmcdGZHJ +pfi2riNF6lTeMgHrGtCRY3yp9Hyu0Ve/2fYdfECcG2BUQ0TweZynMqE3kJokwwC6 +ua3T3WBnQws1MYxvYjy8WaSriuAfNvnu80rdiJRRcp8wBjbSYrxHq2BtYMrVuekt +lFdDCBAnmbTzYVFCez4K5l1Tbd0ilc7D8dFhHP07dkkcm57P2cm3u76O0pIqjzEx +DvYs33vdxiFq4Z4QPDbFh6Xz9uaDhy1gxlm8/giHd+JBPJlcoHD5C2Wtd0pI0tz/ +5n1bdFeejqItmfy45IFFrRn/jb7Q59olVNSbWH9RojLx9HQct4w6MScmv8n8J/r9 +PUrapSwwLEV6lYDBMsO5RXXDDUTJHcXVbr1nuU6BTCTTe1nMIK1+8pS1m4Cizjcg +9J3FWLktCqiCx8NI9O/QzIsbyA6R/NjmvYPOx0/nqtMCPNYJHuZyExMiVMMaPNly +Ic0HfnZyiwJPj52l67xzm4KwJH57piwldRGjVCj7IkVbHs2MRVsT0+j/QKGSU0uY +DTHIOSToCrzorh0j01fmG4zpMFkU/rtCmT3STkSFdsEsy9EORGx2C9t4mVth3V6e +Rc0y9TUcKGdtW13OiZN4o12aToC7yhg0aIpseWm8I0nnRg/TVtzoltOv85I0Lelr +vcQ8/jZ5Go8JZbTrMFmCdIPYvx+m7lL7kGx9Zt3hDpj+EBm0g5Gp44KUkxtI7Bs8 +1sUvSTyNKW8mixkiS4Bq4dGDqHIuvwW1x9PVoJODbHzTip5aAzJWmXA5rOxVp2kz +keA73TUpJo2UZkME8SiDGVlcnZAGx0odxe1tMBkc2GPpQVmz8Xmi75aEuvzBlAEh +2LWGnBhgN1T6cwvkJKgdbVHUlt3fUIPHzZhItMKrPbATMl5Rtf8Fkj4TYGvoxJZh +QPLuD7ngTORv84W0xnxlt2KO8hEkpKjTImu20n4FIb/YPlVxbYULnt9xRl/QaEol +7jjpZJ7zmuZBAEevOlQk/Jc05TbdttPoWU6zVsSswjuAn3lVRIK9Ad0iXzPDWkWU +Du41QXpkCXCoIdky1WZjmZPqLd/nnSdKSBt52ZtMimFix7uer/u9f+933lETSK7U +ImUFAP/pnlmCzll1Rmublb+WDyFV2lIZ2rZURIQUuQMCCcAFo9XmWgdTjJAOUdwX +qLvYUNIVb2B5uAUgm8Wn/vxUW04Jn1aQrPSPGBCo9KSy16DbkslSWpJeCHwS+FBP +Z8CedsnajbUnAa3HebykjlhOkob7wTCogzLjaPj07XYa/O2Dwmiv1GOnXrdie9Wi +ul0/CAm53ZtLOrgHmi1cPq9FtYAYJLboLu7eW2jZz2MjcugGG+0eGdzSoKSfWfOX +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-different-passwords.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-different-passwords.jks new file mode 100644 index 0000000000000..cec32ed75a930 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-different-passwords.jks differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-ip-only.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-ip-only.crt new file mode 100644 index 0000000000000..6ff84ba6674c5 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-ip-only.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDYjCCAkqgAwIBAgIJAKP3WbDN1WxEMA0GCSqGSIb3DQEBCwUAMEgxDDAKBgNV +BAoTA29yZzEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEgMB4GA1UEAxMXRWxhc3Rp +Y3NlYXJjaCBUZXN0IE5vZGUwHhcNMTUwMTE5MTQwNDE3WhcNMTkwMTE4MTQwNDE3 +WjBIMQwwCgYDVQQKEwNvcmcxFjAUBgNVBAsTDWVsYXN0aWNzZWFyY2gxIDAeBgNV +BAMTF0VsYXN0aWNzZWFyY2ggVGVzdCBOb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3a5+yBl2rVEGwlOjw6Ji43+iqvaAbmVhnCk6laEa3GpzothX +7HhtGGDfjdhaLzWF5PWP8SvMM8g4f1PLN0hGSR7vrWjlnpvUDXIHsoIRqWfYdwDA +RNiUvOI4FBjN4pZ4sXyUYsTpw80l6W0r3zopyycE4+4HJv55U1Yy2/3qzv1IITqD +LwRt6VpbPGVyzDSBMQXEgfT7sfaJB9Ru+A/onIpEicrWhgCPHrBnSUkKCKNj9AX/ +RV6/yQYnS/KhLx/eQTP7NVcbrC2J4fFOLX9oZAj6dir/tYQ6rDAMqBTnbhGygDqP +0RgCVf82n6mA23n7l5DaZ4RZl+ssN3fNqDyDpQIDAQABo08wTTAJBgNVHRMEAjAA +MB0GA1UdDgQWBBSDFYaN/Od9ad7Kztv6cGjd2X4w1TAhBgNVHREEGjAYhwR/AAAB +hxAAAAAAAAAAAAAAAAAAAAACMA0GCSqGSIb3DQEBCwUAA4IBAQCbxk4VHMcdD2yU +VpLSBxHBdWY/Gn3f7k0WWhQAmRPR+S6vSr89hVO8UIkqEFzc+D19s9h0XvAmo2QO +G80OLTcHIjxiVqAVWoGUPH4D7FdG7sSBbrJbweIBMW8Ba4kefWGcI0KlUWTssFyE +YLJQIUCIdKtVf/qmcItvrEXw8ucSYaExvizMClTvf2fZxRws8Omo3dxn+ifUH5nn +evQW8Tawlx2ql5P6wHTSUclGLv1CXtMAnuOlyaYY/UbslNhSXigxYOZCI3ole3qL +Z0dBah+eCspOit14mi7jHPoS1Yji/CSh33KZD6ZESuv/V1B7oy6zZWei/b4vllW5 +RwZx1Y/r +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-ip-only.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-ip-only.jks new file mode 100644 index 0000000000000..7e757a3704939 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-ip-only.jks differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.cert b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.cert new file mode 100644 index 0000000000000..ced9d81d96fa6 --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.cert @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDTTCCAjWgAwIBAgIJALL7dwEsWamvMA0GCSqGSIb3DQEBCwUAME8xDDAKBgNV +BAoTA29yZzEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEnMCUGA1UEAxMeRWxhc3Rp +Y3NlYXJjaCBUZXN0IE5vZGUgTm8gU0FOMB4XDTE0MTIxNjE5NTcyNloXDTE4MTIx +NTE5NTcyNlowTzEMMAoGA1UEChMDb3JnMRYwFAYDVQQLEw1lbGFzdGljc2VhcmNo +MScwJQYDVQQDEx5FbGFzdGljc2VhcmNoIFRlc3QgTm9kZSBObyBTQU4wggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCkIGS7A/V6TesR34ajMyNYL3tB1OjW +Raq4KtF8FfW1H6nHGrWa/qXjZWPirczy1k2n6ZL7YOCcv/YeY8xAqC9mGQxvEuqo +EaqXq2cjRdAs/7zqzRkdPPi3Jw/p/RHrDfOAzOsMnBGc0G2Hrsj//aP44vp85pek +fM3t2kNAYZWYCzXUqWAIUoxBDK4DcQdsN8H4KTMIwQEEiRtcKnL/b8QGKsyGLfLq +36ZABHZ4kY2SmcP3bWxZtbFN4hamdwoAtYe+lS0/ee8/fOTLyZ3Ey+X6EEmGO1lk +WR4XLli15k1L2HBzWGG7zwxVEC5r2h3Sx1njYh/Jq3khIdSvDbiMmM+VAgMBAAGj +LDAqMAkGA1UdEwQCMAAwHQYDVR0OBBYEFGm8wrYF9mJweJ1vloDw19e0PUuIMA0G +CSqGSIb3DQEBCwUAA4IBAQBbEZ73weDphNIcmvN25v6NIfjBebqgm0/2grDFwmZe +Z1DibzRoVfoQ7WeUqbPS7SHUQ+KzIN1GdfHXhW9r6mmLbtzPv90Q/8zBcNv5HNZZ +YK+T2r9hoAWEY6nB1fiOJ4udkFMYfAi6LiSxave4IPWp/WIqd0IWtPtkPl+MmG41 +TfRom8TnO+o+VsjgDkY5Q1JDsNQKy1BrtxzIZyz7d1zYKTQ+HXZ4yeYJoVoc3k4y +6w9eX2zAUZ6Z3d4an6CLr6Hew9Dj2VX1vqCj1a5/VvHZVyVxyh4hg8sHYm7tZOJX +wN3B5GcKwbbFjaMVBLaMlP62OdGg7tCh61evWm+l06S0 +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.jks new file mode 100644 index 0000000000000..ec482775bd055 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.jks differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt new file mode 100644 index 0000000000000..08c160bcea5ff --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0zCCArugAwIBAgIJALi5bDfjMszLMA0GCSqGSIb3DQEBCwUAMEgxDDAKBgNV +BAoTA29yZzEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEgMB4GA1UEAxMXRWxhc3Rp +Y3NlYXJjaCBUZXN0IE5vZGUwHhcNMTUwOTIzMTg1MjU3WhcNMTkwOTIyMTg1MjU3 +WjBIMQwwCgYDVQQKEwNvcmcxFjAUBgNVBAsTDWVsYXN0aWNzZWFyY2gxIDAeBgNV +BAMTF0VsYXN0aWNzZWFyY2ggVGVzdCBOb2RlMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA3rGZ1QbsW0+MuyrSLmMfDFKtLBkIFW8V0gRuurFg1PUKKNR1 +Mq2tMVwjjYETAU/UY0iKZOzjgvYPKhDTYBTte/WHR1ZK4CYVv7TQX/gtFQG/ge/c +7u0sLch9p7fbd+/HZiLS/rBEZDIohvgUvzvnA8+OIYnw4kuxKo/5iboAIS41klMg +/lATm8V71LMY68inht71/ZkQoAHKgcR9z4yNYvQ1WqKG8DG8KROXltll3sTrKbl5 +zJhn660es/1ZnR6nvwt6xnSTl/mNHMjkfv1bs4rJ/py3qPxicdoSIn/KyojUcgHV +F38fuAy2CQTdjVG5fWj9iz+mQvLm3+qsIYQdFwIDAQABo4G/MIG8MAkGA1UdEwQC +MAAwHQYDVR0OBBYEFEMMWLWQi/g83PzlHYqAVnty5L7HMIGPBgNVHREEgYcwgYSC +CWxvY2FsaG9zdIIVbG9jYWxob3N0LmxvY2FsZG9tYWluggpsb2NhbGhvc3Q0ghds +b2NhbGhvc3Q0LmxvY2FsZG9tYWluNIIKbG9jYWxob3N0NoIXbG9jYWxob3N0Ni5s +b2NhbGRvbWFpbjaHBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQEL +BQADggEBAMjGGXT8Nt1tbl2GkiKtmiuGE2Ej66YuZ37WSJViaRNDVHLlg87TCcHe +k2rdO+6sFqQbbzEfwQ05T7xGmVu7tm54HwKMRugoQ3wct0bQC5wEWYN+oMDvSyO6 +M28mZwWb4VtR2IRyWP+ve5DHwTM9mxWa6rBlGzsQqH6YkJpZojzqk/mQTug+Y8aE +mVoqRIPMHq9ob+S9qd5lp09+MtYpwPfTPx/NN+xMEooXWW/ARfpGhWPkg/FuCu4z +1tFmCqHgNcWirzMm3dQpF78muE9ng6OB2MXQwL4VgnVkxmlZNHbkR2v/t8MyZJxC +y4g6cTMM3S/UMt5/+aIB2JAuMKyuD+A= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks new file mode 100644 index 0000000000000..832a23d2dbf09 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.p12 b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.p12 new file mode 100644 index 0000000000000..2ac8125b58d31 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.p12 differ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem new file mode 100644 index 0000000000000..5a67e1033440d --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,9D867F7E0C94D013 + +dVoVCjPeg1wgS7rVtOvGfQcrZyLkx393aWRnFq45tbjKBVuITtJ9vI7o4QXOV/15 +Gnb6WhXGIdWrzsxEAd46K6hIuNSISd4Emsx6c2Q5hTqWXXfexbOZBNfTtXtdJPnJ +1jAaikhtztLo3JSLTKNY5sNxd+XbaQyYVUWvueK6zOaIIMETvB+VPVFd9i1ROibk +Sgdtyj01KjkoalifqK/tA0CIYNKL0S6/eoK3UhAlpIprlpV+cnXa940C6bjLeJPt +PMAGGp5RrplxSgrSerw3I9DOWkHGtpqzIka3XneNUXJP8k4HUJ+aZkGH2ZILKS8d +4KMIb+KZSpHEGn+6uGccWLtZZmAjWJrDw56JbQtSHdRYLBRSOjLbTvQoPu/2Hpli +7HOxbotlvjptMunncq5aqK57SHA1dh0cwF7J3LUmGFJ67eoz+VV3b5qMn4MopSeI +mS16Ydd3nGpjSrln/elM0CQxqWfcOAXRZpDpFUQoXcBrLVzvz2DBl/0CrTRLhgzi +CO+5/IVcBWRlYpRNGgjjP7q0j6URID3jk5J06fYQXmBiwQT5j+GZqqzpMCJ9mIy2 +1O9SN1hebJnIcEU+E0njn/MGjlYdPywhaCy8pqElp6Q8TUEJpwLRFO/owCoBet/n +ZmCXUjfCGhc1pWHufFcDEQ6xMgEWWY/tdwCZeSU7EhErTjCbfupg+55A5fpDml0m +3wH4CFcuRjlqyx6Ywixm1ATeitDtJl5HQTw6b8OtEXwSgRmZ0eSqSRVk9QbVS7gu +IpQe09/Zimb5HzjZqZ3fdqHlcW4xax8hyJeyIvF5ZJ57eY8CBvu/wP2GDn26QnvF +xQqdfDbq1H4JmpwUHpbFwBoQK4Q6WFd1z4EA9bRQeo3H9PoqoOwMDjzajwLRF7b7 +q6tYH/n9PyHwdf1c4fFwgSmL1toXGfKlA9hjIaLsRSDD6srT5EdUk78bsnddwI51 +tu7C7P4JG+h1VdRNMNTlqtileWsIE7Nn2A1OkcUxZdF5mamENpDpJcHePLto6c8q +FKiwyFMsxhgsj6HK2HqO+UA4sX5Ni4oHwiPmb//EZLn045M5i1AN26KosJmb8++D +sgR5reWRy+UqJCTYblVg+7Dx++ggUnfxVyQEsWmw5r5f4KU5wXBkvoVMGtPNa9DE +n/uLtObD1qkNL38pRsr2OGRchYCgEoKGqEISBP4knfGXLOlWiW/246j9QzI97r1u +tvy7fKg28G7AUz9l6bpewsPHefBUeRQeieP9eJINaEpxkF/w2RpKDLpQjWxwDDOM +s+D0mrBMJve17AmJ8rMw6dIQPZYNZ88/jz1uQuUwQ2YlbmtZbCG81k9YMFGEU9XS +cyhJxj8hvYnt2PR5Z9/cJPyWOs0m/ufOeeQQ8SnU/lzmrQnpzUd2Z6p5i/B7LdRP +n1kX+l1qynuPnjvBz4nJQE0p6nzW8RyCDSniC9mtYtZmhgC8icqxgbvS7uEOBIYJ +NbK+0bEETTO34iY/JVTIqLOw3iQZYMeUpxpj6Phgx/oooxMTquMecPKNgeVtaBst +qjTNPX0ti1/HYpZqzYi8SV8YjHSJWCVMsZjKPr3W/HIcCKqYoIfgzi83Ha2KMQx6 +-----END RSA PRIVATE KEY----- diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks new file mode 100644 index 0000000000000..f18b9288b10b8 Binary files /dev/null and b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks differ diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle new file mode 100644 index 0000000000000..c52413aa4e1d6 --- /dev/null +++ b/x-pack/plugin/sql/build.gradle @@ -0,0 +1,116 @@ +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' +esplugin { + name 'x-pack-sql' + description 'The Elasticsearch plugin that powers SQL for Elasticsearch' + classname 'org.elasticsearch.xpack.sql.plugin.SqlPlugin' + extendedPlugins = ['x-pack-core'] +} + +configurations { + // Bundles the sql-cli.jar into the distribution + bin +} + +archivesBaseName = 'x-pack-sql' + +// All integration tests live in qa modules +integTest.enabled = false + +dependencies { + compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" + compile project('sql-proto') + compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" + compile "org.antlr:antlr4-runtime:4.5.3" + testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') + testCompile project(path: ':modules:reindex', configuration: 'runtime') + testCompile project(path: ':modules:parent-join', configuration: 'runtime') + testCompile project(path: ':modules:analysis-common', configuration: 'runtime') + + bin(project(path: xpackModule('sql:sql-cli'))) { + // sql-cli bundles all of its dependencies into a single executable jar + transitive = false + } +} + +/* Bundle the sql-cli into the binary files. It should end up + * in $ES_HOME/bin/x-pack/. This is useful because it is an + * executable jar that can be moved wherever it is needed. + */ +bundlePlugin { + from (configurations.bin) { + into 'bin' + } +} + +/********************************************** + * SQL Parser regeneration * + **********************************************/ + +configurations { + regenerate +} + +dependencies { + regenerate 'org.antlr:antlr4:4.5.3' +} + +String grammarPath = 'src/main/antlr' +String outputPath = 'src/main/java/org/elasticsearch/xpack/sql/parser' + +task cleanGenerated(type: Delete) { + delete fileTree(grammarPath) { + include '*.tokens' + } + delete fileTree(outputPath) { + include 'SqlBase*.java' + } +} + +task regenParser(type: JavaExec) { + dependsOn cleanGenerated + main = 'org.antlr.v4.Tool' + classpath = configurations.regenerate + systemProperty 'file.encoding', 'UTF-8' + systemProperty 'user.language', 'en' + systemProperty 'user.country', 'US' + systemProperty 'user.variant', '' + args '-Werror', + '-package', 'org.elasticsearch.xpack.sql.parser', + '-listener', + '-visitor', + '-o', outputPath, + "${file(grammarPath)}/SqlBase.g4" +} + +task regen { + dependsOn regenParser + doLast { + // moves token files to grammar directory for use with IDE's + ant.move(file: "${outputPath}/SqlBase.tokens", toDir: grammarPath) + ant.move(file: "${outputPath}/SqlBaseLexer.tokens", toDir: grammarPath) + // make the generated classes package private + ant.replaceregexp(match: 'public ((interface|class) \\QSqlBase\\E\\w+)', + replace: '\\1', + encoding: 'UTF-8') { + fileset(dir: outputPath, includes: 'SqlBase*.java') + } + // nuke timestamps/filenames in generated files + ant.replaceregexp(match: '\\Q// Generated from \\E.*', + replace: '\\/\\/ ANTLR GENERATED CODE: DO NOT EDIT', + encoding: 'UTF-8') { + fileset(dir: outputPath, includes: 'SqlBase*.java') + } + // remove tabs in antlr generated files + ant.replaceregexp(match: '\t', flags: 'g', replace: ' ', encoding: 'UTF-8') { + fileset(dir: outputPath, includes: 'SqlBase*.java') + } + // fix line endings + ant.fixcrlf(srcdir: outputPath, eol: 'lf') { + patternset(includes: 'SqlBase*.java') + } + } +} diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle new file mode 100644 index 0000000000000..2cc7946d9b9c2 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -0,0 +1,219 @@ +apply plugin: 'elasticsearch.build' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + +description = 'JDBC driver for Elasticsearch' + +forbiddenApisMain { + // does not depend on core, so only jdk and http signatures should be checked + signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] +} + +/* + * Bundle as many of our dependencies as we can get away with into the jar. + * We can't currently bundle *all* dependencies into the jar, but we'd like + * to avoid publishing the sql shared libraries if possible. This allows that. + * + * It is possible to use configure this bundling in a bunch of different ways + * but this particular way generates a pom that doesn't declare the bundled + * dependencies as dependencies. Which is a good thing because we don't publish + * them and we don't want consumers to get two copies of them. + * + * We'd *like* to shade these dependencies, at least ones like jackson which we + * know that we can't remove entirely. But for now something like this is + * simpler. + */ +configurations { + bundled +} +sourceSets { + main { + compileClasspath += configurations.bundled + } + test { + compileClasspath += configurations.bundled + } +} +javadoc { + classpath += configurations.bundled +} +jar { + from({configurations.bundled.collect { it.isDirectory() ? it : zipTree(it) }}) { + // We don't need the META-INF from the things we bundle. For now. + exclude 'META-INF/*' + } +} + +dependencies { + + // Eclipse doesn't know how to deal with these bundled deependencies so make them compile + // dependencies if we are running in Eclipse + if (isEclipse) { + compile (xpackProject('plugin:sql:sql-shared-client')) { + transitive = false + } + compile (xpackProject('plugin:sql:sql-proto')) { + transitive = false + } + } else { + bundled (xpackProject('plugin:sql:sql-shared-client')) { + transitive = false + } + bundled (xpackProject('plugin:sql:sql-proto')) { + transitive = false + } + } + compile (project(':server')) { + transitive = false + } + compile (project(':libs:x-content')) { + transitive = false + } + compile "org.apache.lucene:lucene-core:${versions.lucene}" + compile 'joda-time:joda-time:2.9.9' + compile project(':libs:elasticsearch-core') + runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + runtime "org.apache.logging.log4j:log4j-api:${versions.log4j}" + runtime "org.apache.logging.log4j:log4j-core:${versions.log4j}" + + testCompile "org.elasticsearch.test:framework:${version}" +} + +dependencyLicenses { + mapping from: /sql-proto.*/, to: 'elasticsearch' + mapping from: /sql-shared-client.*/, to: 'elasticsearch' + mapping from: /jackson-.*/, to: 'jackson' + mapping from: /lucene-.*/, to: 'lucene' + mapping from: /elasticsearch-core.*/, to: 'elasticsearch' + ignoreSha 'sql-proto' + ignoreSha 'sql-shared-client' + ignoreSha 'elasticsearch' + ignoreSha 'elasticsearch-core' +} + +/* + * Temporary zip file to make the jdbc driver more usable during the 6.3 + * release. We'd like to remove this in future releases when the jdbc driver + * bundles or shades all of its dependencies. But for now this should help + * non-maven jdbc users, specifically those folks using BI tools. + */ +task zipWithDependencies(type: Zip) { + from configurations.runtime + from configurations.runtime.artifacts.files + baseName 'elasticsearch-jdbc-with-dependencies' + into "elasticsearch-jdbc-with-dependencies-$version" +} +assemble.dependsOn zipWithDependencies + +// Use the jar for testing so the tests are more "real" +test { + classpath -= compileJava.outputs.files + classpath += jar.outputs.files + dependsOn jar +} + +thirdPartyAudit.excludes = [ + 'com.fasterxml.jackson.dataformat.yaml.YAMLFactory', + 'com.fasterxml.jackson.dataformat.yaml.YAMLMapper', + + // from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml) + 'com.fasterxml.jackson.databind.ObjectMapper', + 'org.fusesource.jansi.Ansi', + 'org.fusesource.jansi.AnsiRenderer$Code', + + // from log4j + 'com.conversantmedia.util.concurrent.DisruptorBlockingQueue', + 'com.conversantmedia.util.concurrent.SpinPolicy', + 'com.fasterxml.jackson.annotation.JsonInclude$Include', + 'com.fasterxml.jackson.databind.DeserializationContext', + 'com.fasterxml.jackson.databind.DeserializationFeature', + 'com.fasterxml.jackson.databind.JsonMappingException', + 'com.fasterxml.jackson.databind.JsonNode', + 'com.fasterxml.jackson.databind.Module$SetupContext', + 'com.fasterxml.jackson.databind.ObjectReader', + 'com.fasterxml.jackson.databind.ObjectWriter', + 'com.fasterxml.jackson.databind.SerializerProvider', + 'com.fasterxml.jackson.databind.deser.std.StdDeserializer', + 'com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer', + 'com.fasterxml.jackson.databind.module.SimpleModule', + 'com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter', + 'com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider', + 'com.fasterxml.jackson.databind.ser.std.StdScalarSerializer', + 'com.fasterxml.jackson.databind.ser.std.StdSerializer', + 'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule', + 'com.fasterxml.jackson.dataformat.xml.XmlMapper', + 'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter', + 'com.fasterxml.jackson.databind.node.JsonNodeFactory', + 'com.fasterxml.jackson.databind.node.ObjectNode', + 'com.lmax.disruptor.BlockingWaitStrategy', + 'com.lmax.disruptor.BusySpinWaitStrategy', + 'com.lmax.disruptor.EventFactory', + 'com.lmax.disruptor.EventTranslator', + 'com.lmax.disruptor.EventTranslatorTwoArg', + 'com.lmax.disruptor.EventTranslatorVararg', + 'com.lmax.disruptor.ExceptionHandler', + 'com.lmax.disruptor.LifecycleAware', + 'com.lmax.disruptor.RingBuffer', + 'com.lmax.disruptor.Sequence', + 'com.lmax.disruptor.SequenceReportingEventHandler', + 'com.lmax.disruptor.SleepingWaitStrategy', + 'com.lmax.disruptor.TimeoutBlockingWaitStrategy', + 'com.lmax.disruptor.WaitStrategy', + 'com.lmax.disruptor.YieldingWaitStrategy', + 'com.lmax.disruptor.dsl.Disruptor', + 'com.lmax.disruptor.dsl.ProducerType', + 'javax.jms.Connection', + 'javax.jms.ConnectionFactory', + 'javax.jms.Destination', + 'javax.jms.JMSException', + 'javax.jms.MapMessage', + 'javax.jms.Message', + 'javax.jms.MessageConsumer', + 'javax.jms.MessageProducer', + 'javax.jms.Session', + 'javax.mail.Authenticator', + 'javax.mail.Message$RecipientType', + 'javax.mail.PasswordAuthentication', + 'javax.mail.Session', + 'javax.mail.Transport', + 'javax.mail.internet.InternetAddress', + 'javax.mail.internet.InternetHeaders', + 'javax.mail.internet.MimeBodyPart', + 'javax.mail.internet.MimeMessage', + 'javax.mail.internet.MimeMultipart', + 'javax.mail.internet.MimeUtility', + 'javax.mail.util.ByteArrayDataSource', + 'javax.persistence.AttributeConverter', + 'javax.persistence.EntityManager', + 'javax.persistence.EntityManagerFactory', + 'javax.persistence.EntityTransaction', + 'javax.persistence.Persistence', + 'javax.persistence.PersistenceException', + 'org.apache.commons.compress.compressors.CompressorStreamFactory', + 'org.apache.commons.compress.utils.IOUtils', + 'org.apache.commons.csv.CSVFormat', + 'org.apache.commons.csv.QuoteMode', + 'org.apache.kafka.clients.producer.Callback', + 'org.apache.kafka.clients.producer.KafkaProducer', + 'org.apache.kafka.clients.producer.Producer', + 'org.apache.kafka.clients.producer.ProducerRecord', + 'org.apache.kafka.clients.producer.RecordMetadata', + 'org.codehaus.stax2.XMLStreamWriter2', + 'org.jctools.queues.MessagePassingQueue$Consumer', + 'org.jctools.queues.MpscArrayQueue', + 'org.osgi.framework.AdaptPermission', + 'org.osgi.framework.AdminPermission', + 'org.osgi.framework.Bundle', + 'org.osgi.framework.BundleActivator', + 'org.osgi.framework.BundleContext', + 'org.osgi.framework.BundleEvent', + 'org.osgi.framework.BundleReference', + 'org.osgi.framework.FrameworkUtil', + 'org.osgi.framework.ServiceRegistration', + 'org.osgi.framework.SynchronousBundleListener', + 'org.osgi.framework.wiring.BundleWire', + 'org.osgi.framework.wiring.BundleWiring', + 'org.zeromq.ZMQ$Context', + 'org.zeromq.ZMQ$Socket', + 'org.zeromq.ZMQ' +] diff --git a/x-pack/plugin/sql/jdbc/licenses/jackson-LICENSE b/x-pack/plugin/sql/jdbc/licenses/jackson-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/x-pack/plugin/sql/jdbc/licenses/jackson-NOTICE b/x-pack/plugin/sql/jdbc/licenses/jackson-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/x-pack/plugin/sql/jdbc/licenses/jackson-core-2.8.10.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/jackson-core-2.8.10.jar.sha1 new file mode 100644 index 0000000000000..a322d371e265e --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/jackson-core-2.8.10.jar.sha1 @@ -0,0 +1 @@ +eb21a035c66ad307e66ec8fce37f5d50fd62d039 \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/licenses/joda-time-2.9.9.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/joda-time-2.9.9.jar.sha1 new file mode 100644 index 0000000000000..4009932ea3beb --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/joda-time-2.9.9.jar.sha1 @@ -0,0 +1 @@ +f7b520c458572890807d143670c9b24f4de90897 \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/licenses/joda-time-LICENSE.txt b/x-pack/plugin/sql/jdbc/licenses/joda-time-LICENSE.txt new file mode 100644 index 0000000000000..75b52484ea471 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/joda-time-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/sql/jdbc/licenses/joda-time-NOTICE.txt b/x-pack/plugin/sql/jdbc/licenses/joda-time-NOTICE.txt new file mode 100644 index 0000000000000..dffbcf31cacf6 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/joda-time-NOTICE.txt @@ -0,0 +1,5 @@ +============================================================================= += NOTICE file corresponding to section 4d of the Apache License Version 2.0 = +============================================================================= +This product includes software developed by +Joda.org (http://www.joda.org/). diff --git a/x-pack/plugin/sql/jdbc/licenses/log4j-api-2.9.1.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/log4j-api-2.9.1.jar.sha1 new file mode 100644 index 0000000000000..e1a89fadfed95 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/log4j-api-2.9.1.jar.sha1 @@ -0,0 +1 @@ +7a2999229464e7a324aa503c0a52ec0f05efe7bd \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/licenses/log4j-api-LICENSE.txt b/x-pack/plugin/sql/jdbc/licenses/log4j-api-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/log4j-api-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/sql/jdbc/licenses/log4j-api-NOTICE.txt b/x-pack/plugin/sql/jdbc/licenses/log4j-api-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/log4j-api-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/licenses/log4j-core-2.9.1.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/log4j-core-2.9.1.jar.sha1 new file mode 100644 index 0000000000000..990ea322a7613 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/log4j-core-2.9.1.jar.sha1 @@ -0,0 +1 @@ +c041978c686866ee8534f538c6220238db3bb6be \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/licenses/log4j-core-LICENSE.txt b/x-pack/plugin/sql/jdbc/licenses/log4j-core-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/log4j-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/sql/jdbc/licenses/log4j-core-NOTICE.txt b/x-pack/plugin/sql/jdbc/licenses/log4j-core-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/log4j-core-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-LICENSE.txt b/x-pack/plugin/sql/jdbc/licenses/lucene-LICENSE.txt new file mode 100644 index 0000000000000..28b134f5f8e4d --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/lucene-LICENSE.txt @@ -0,0 +1,475 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from unicode conversion examples available at +http://www.unicode.org/Public/PROGRAMS/CVTUTF. Here is the copyright +from those sources: + +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + + +Some code in core/src/java/org/apache/lucene/util/ArrayUtil.java was +derived from Python 2.4.2 sources available at +http://www.python.org. Full license is here: + + http://www.python.org/download/releases/2.4.2/license/ + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from Python 3.1.2 sources available at +http://www.python.org. Full license is here: + + http://www.python.org/download/releases/3.1.2/license/ + +Some code in core/src/java/org/apache/lucene/util/automaton was +derived from Brics automaton sources available at +www.brics.dk/automaton/. Here is the copyright from those sources: + +/* + * Copyright (c) 2001-2009 Anders Moeller + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +The levenshtein automata tables in core/src/java/org/apache/lucene/util/automaton +were automatically generated with the moman/finenight FSA package. +Here is the copyright for those sources: + +# Copyright (c) 2010, Jean-Philippe Barrette-LaPierre, +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from ICU (http://www.icu-project.org) +The full license is available here: + http://source.icu-project.org/repos/icu/icu/trunk/license.html + +/* + * Copyright (C) 1999-2010, International Business Machines + * Corporation and others. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, and/or sell copies of the + * Software, and to permit persons to whom the Software is furnished to do so, + * provided that the above copyright notice(s) and this permission notice appear + * in all copies of the Software and that both the above copyright notice(s) and + * this permission notice appear in supporting documentation. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE + * LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR + * ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER + * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Except as contained in this notice, the name of a copyright holder shall not + * be used in advertising or otherwise to promote the sale, use or other + * dealings in this Software without prior written authorization of the + * copyright holder. + */ + +The following license applies to the Snowball stemmers: + +Copyright (c) 2001, Dr Martin Porter +Copyright (c) 2002, Richard Boulton +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The following license applies to the KStemmer: + +Copyright © 2003, +Center for Intelligent Information Retrieval, +University of Massachusetts, Amherst. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. The names "Center for Intelligent Information Retrieval" and +"University of Massachusetts" must not be used to endorse or promote products +derived from this software without prior written permission. To obtain +permission, contact info@ciir.cs.umass.edu. + +THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +The following license applies to the Morfologik project: + +Copyright (c) 2006 Dawid Weiss +Copyright (c) 2007-2011 Dawid Weiss, Marcin Miłkowski +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Morfologik nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--- + +The dictionary comes from Morfologik project. Morfologik uses data from +Polish ispell/myspell dictionary hosted at http://www.sjp.pl/slownik/en/ and +is licenced on the terms of (inter alia) LGPL and Creative Commons +ShareAlike. The part-of-speech tags were added in Morfologik project and +are not found in the data from sjp.pl. The tagset is similar to IPI PAN +tagset. + +--- + +The following license applies to the Morfeusz project, +used by org.apache.lucene.analysis.morfologik. + +BSD-licensed dictionary of Polish (SGJP) +http://sgjp.pl/morfeusz/ + +Copyright © 2011 Zygmunt Saloni, Włodzimierz Gruszczyński, + Marcin Woliński, Robert Wołosz + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-NOTICE.txt b/x-pack/plugin/sql/jdbc/licenses/lucene-NOTICE.txt new file mode 100644 index 0000000000000..1a1d51572432a --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/lucene-NOTICE.txt @@ -0,0 +1,192 @@ +Apache Lucene +Copyright 2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Includes software from other Apache Software Foundation projects, +including, but not limited to: + - Apache Ant + - Apache Jakarta Regexp + - Apache Commons + - Apache Xerces + +ICU4J, (under analysis/icu) is licensed under an MIT styles license +and Copyright (c) 1995-2008 International Business Machines Corporation and others + +Some data files (under analysis/icu/src/data) are derived from Unicode data such +as the Unicode Character Database. See http://unicode.org/copyright.html for more +details. + +Brics Automaton (under core/src/java/org/apache/lucene/util/automaton) is +BSD-licensed, created by Anders Møller. See http://www.brics.dk/automaton/ + +The levenshtein automata tables (under core/src/java/org/apache/lucene/util/automaton) were +automatically generated with the moman/finenight FSA library, created by +Jean-Philippe Barrette-LaPierre. This library is available under an MIT license, +see http://sites.google.com/site/rrettesite/moman and +http://bitbucket.org/jpbarrette/moman/overview/ + +The class org.apache.lucene.util.WeakIdentityMap was derived from +the Apache CXF project and is Apache License 2.0. + +The Google Code Prettify is Apache License 2.0. +See http://code.google.com/p/google-code-prettify/ + +JUnit (junit-4.10) is licensed under the Common Public License v. 1.0 +See http://junit.sourceforge.net/cpl-v10.html + +This product includes code (JaspellTernarySearchTrie) from Java Spelling Checkin +g Package (jaspell): http://jaspell.sourceforge.net/ +License: The BSD License (http://www.opensource.org/licenses/bsd-license.php) + +The snowball stemmers in + analysis/common/src/java/net/sf/snowball +were developed by Martin Porter and Richard Boulton. +The snowball stopword lists in + analysis/common/src/resources/org/apache/lucene/analysis/snowball +were developed by Martin Porter and Richard Boulton. +The full snowball package is available from + http://snowball.tartarus.org/ + +The KStem stemmer in + analysis/common/src/org/apache/lucene/analysis/en +was developed by Bob Krovetz and Sergio Guzman-Lara (CIIR-UMass Amherst) +under the BSD-license. + +The Arabic,Persian,Romanian,Bulgarian, Hindi and Bengali analyzers (common) come with a default +stopword list that is BSD-licensed created by Jacques Savoy. These files reside in: +analysis/common/src/resources/org/apache/lucene/analysis/ar/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/fa/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/ro/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/bg/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/hi/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/bn/stopwords.txt +See http://members.unine.ch/jacques.savoy/clef/index.html. + +The German,Spanish,Finnish,French,Hungarian,Italian,Portuguese,Russian and Swedish light stemmers +(common) are based on BSD-licensed reference implementations created by Jacques Savoy and +Ljiljana Dolamic. These files reside in: +analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java + +The Stempel analyzer (stempel) includes BSD-licensed software developed +by the Egothor project http://egothor.sf.net/, created by Leo Galambos, Martin Kvapil, +and Edmond Nolan. + +The Polish analyzer (stempel) comes with a default +stopword list that is BSD-licensed created by the Carrot2 project. The file resides +in stempel/src/resources/org/apache/lucene/analysis/pl/stopwords.txt. +See http://project.carrot2.org/license.html. + +The SmartChineseAnalyzer source code (smartcn) was +provided by Xiaoping Gao and copyright 2009 by www.imdict.net. + +WordBreakTestUnicode_*.java (under modules/analysis/common/src/test/) +is derived from Unicode data such as the Unicode Character Database. +See http://unicode.org/copyright.html for more details. + +The Morfologik analyzer (morfologik) includes BSD-licensed software +developed by Dawid Weiss and Marcin Miłkowski (http://morfologik.blogspot.com/). + +Morfologik uses data from Polish ispell/myspell dictionary +(http://www.sjp.pl/slownik/en/) licenced on the terms of (inter alia) +LGPL and Creative Commons ShareAlike. + +Morfologic includes data from BSD-licensed dictionary of Polish (SGJP) +(http://sgjp.pl/morfeusz/) + +Servlet-api.jar and javax.servlet-*.jar are under the CDDL license, the original +source code for this can be found at http://www.eclipse.org/jetty/downloads.php + +=========================================================================== +Kuromoji Japanese Morphological Analyzer - Apache Lucene Integration +=========================================================================== + +This software includes a binary and/or source version of data from + + mecab-ipadic-2.7.0-20070801 + +which can be obtained from + + http://atilika.com/releases/mecab-ipadic/mecab-ipadic-2.7.0-20070801.tar.gz + +or + + http://jaist.dl.sourceforge.net/project/mecab/mecab-ipadic/2.7.0-20070801/mecab-ipadic-2.7.0-20070801.tar.gz + +=========================================================================== +mecab-ipadic-2.7.0-20070801 Notice +=========================================================================== + +Nara Institute of Science and Technology (NAIST), +the copyright holders, disclaims all warranties with regard to this +software, including all implied warranties of merchantability and +fitness, in no event shall NAIST be liable for +any special, indirect or consequential damages or any damages +whatsoever resulting from loss of use, data or profits, whether in an +action of contract, negligence or other tortuous action, arising out +of or in connection with the use or performance of this software. + +A large portion of the dictionary entries +originate from ICOT Free Software. The following conditions for ICOT +Free Software applies to the current dictionary as well. + +Each User may also freely distribute the Program, whether in its +original form or modified, to any third party or parties, PROVIDED +that the provisions of Section 3 ("NO WARRANTY") will ALWAYS appear +on, or be attached to, the Program, which is distributed substantially +in the same form as set out herein and that such intended +distribution, if actually made, will neither violate or otherwise +contravene any of the laws and regulations of the countries having +jurisdiction over the User or the intended distribution itself. + +NO WARRANTY + +The program was produced on an experimental basis in the course of the +research and development conducted during the project and is provided +to users as so produced on an experimental basis. Accordingly, the +program is provided without any warranty whatsoever, whether express, +implied, statutory or otherwise. The term "warranty" used herein +includes, but is not limited to, any warranty of the quality, +performance, merchantability and fitness for a particular purpose of +the program and the nonexistence of any infringement or violation of +any right of any third party. + +Each user of the program will agree and understand, and be deemed to +have agreed and understood, that there is no warranty whatsoever for +the program and, accordingly, the entire risk arising from or +otherwise connected with the program is assumed by the user. + +Therefore, neither ICOT, the copyright holder, or any other +organization that participated in or was otherwise related to the +development of the program and their respective officials, directors, +officers and other employees shall be held liable for any and all +damages, including, without limitation, general, special, incidental +and consequential damages, arising out of or otherwise in connection +with the use or inability to use the program or any product, material +or result produced or otherwise obtained by using the program, +regardless of whether they have been advised of, or otherwise had +knowledge of, the possibility of such damages at any time during the +project or thereafter. Each user will be deemed to have agreed to the +foregoing by his or her commencement of use of the program. The term +"use" as used herein includes, but is not limited to, the use, +modification, copying and distribution of the program and the +production of secondary products from the program. + +In the case where the program, whether in its original form or +modified, was distributed or delivered to or received by a user from +any person, organization or entity other than ICOT, unless it makes or +grants independently of ICOT any specific warranty to the user in +writing, such person, organization or entity, will also be exempted +from and not be held liable to the user for any such damages as noted +above as far as the program is concerned. diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.3.0.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..e12c932b38dd0 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.3.0.jar.sha1 @@ -0,0 +1 @@ +040e2de30c5e6bad868b144e371730200719ceb3 \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcException.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcException.java new file mode 100644 index 0000000000000..412c374e8a25b --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcException.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc; + +public class JdbcException extends RuntimeException { + + public JdbcException(String message) { + super(message); + } + + public JdbcException(Throwable cause, String message) { + super(message, cause); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcSQLException.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcSQLException.java new file mode 100644 index 0000000000000..352e075a570b3 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcSQLException.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc; + +import java.sql.SQLException; + +public class JdbcSQLException extends SQLException { + + public JdbcSQLException(String message) { + super(message); + } + + public JdbcSQLException(Throwable cause, String message) { + super(message, cause); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ConnectionProxy.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ConnectionProxy.java new file mode 100644 index 0000000000000..016fcc87d24ca --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ConnectionProxy.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import java.sql.DatabaseMetaData; +import java.sql.Statement; + +final class ConnectionProxy extends DebuggingInvoker { + + ConnectionProxy(DebugLog log, Object target) { + super(log, target, null); + } + + @Override + protected Object postProcess(Object result, Object proxy) { + if (result instanceof Statement) { + return Debug.proxy(result, new StatementProxy(log, result, proxy)); + } + if (result instanceof DatabaseMetaData) { + return Debug.proxy(new DatabaseMetadataProxy(log, result, proxy)); + } + + return result; + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DatabaseMetadataProxy.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DatabaseMetadataProxy.java new file mode 100644 index 0000000000000..0c7cc0dcb4fbc --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DatabaseMetadataProxy.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import java.sql.ResultSet; + +final class DatabaseMetadataProxy extends DebuggingInvoker { + + DatabaseMetadataProxy(DebugLog log, Object result, Object parent) { + super(log, result, parent); + } + + @Override + protected Object postProcess(Object result, Object proxy) { + if (result instanceof ResultSet) { + return Debug.proxy(new ResultSetProxy(log, result, null)); + } + return result; + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/Debug.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/Debug.java new file mode 100644 index 0000000000000..41bc80c104052 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/Debug.java @@ -0,0 +1,238 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import org.elasticsearch.xpack.sql.client.shared.SuppressForbidden; +import org.elasticsearch.xpack.sql.jdbc.JdbcException; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; + +import java.io.OutputStreamWriter; +import java.io.PrintStream; +import java.io.PrintWriter; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Proxy; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.DriverManager; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.Statement; +import java.util.HashMap; +import java.util.Map; + +import javax.sql.DataSource; + +/** + * Class handling debug logging. Typically disabled (hence why it's called debug). + * JDBC carries a lot of legacy conventions, logging being one of them - in JDBC logging was expected to + * be to System.Err/Out since there were no logging frameworks at the time. + * This didn't work so the API was changed through {@link DriverManager#getLogStream()} however that also had issues + * being global and not working well with encoding (hence why {@link DriverManager#getLogWriter()} was introduced) + * and was changed again through {@link DataSource#getLogWriter()}. + * However by then the damage was done and most drivers don't use either and have their own logging implementation. + * + * This class tries to cater to both audiences - use the legacy, Writer way if needed though strive to use the + * proper typical approach, that of specifying intention and output (file) in the URL. + * + * For this reason the {@link System#out} and {@link System#err} are being refered in this class though are used only + * when needed. + */ +public final class Debug { + + // cache for streams created by ourselves + private static final Map OUTPUT_CACHE = new HashMap<>(); + // reference counter for a given output + private static final Map OUTPUT_REFS = new HashMap<>(); + // cache of loggers that rely on external/managed printers + private static final Map OUTPUT_MANAGED = new HashMap<>(); + + private static volatile DebugLog ERR = null, OUT = null; + private static volatile PrintStream SYS_ERR = null, SYS_OUT = null; + + /** + * Create a proxied Connection which performs logging of all methods being invoked. + * Typically Debug will read its configuration from the configuration and act accordingly however + * there are two cases where the output is specified programmatically, namely through + * {@link DriverManager#setLogWriter(PrintWriter)} and {@link DataSource#setLogWriter(PrintWriter)}. + * The former is the 'legacy' way, having a global impact on all drivers while the latter allows per + * instance configuration. + * + * As both approaches are not widely used, Debug will take the principle of least surprise and pick its + * own configuration first; if that does not exist it will fallback to the managed approaches (assuming they + * are specified, otherwise logging is simply disabled). + */ + public static Connection proxy(JdbcConfiguration info, Connection connection, PrintWriter managedPrinter) { + return createProxy(Connection.class, new ConnectionProxy(logger(info, managedPrinter), connection)); + } + + static DatabaseMetaData proxy(DatabaseMetadataProxy handler) { + return createProxy(DatabaseMetaData.class, handler); + } + + static ParameterMetaData proxy(ParameterMetaDataProxy handler) { + return createProxy(ParameterMetaData.class, handler); + } + + static ResultSet proxy(ResultSetProxy handler) { + return createProxy(ResultSet.class, handler); + } + + static ResultSetMetaData proxy(ResultSetMetaDataProxy handler) { + return createProxy(ResultSetMetaData.class, handler); + } + + static Statement proxy(Object statement, StatementProxy handler) { + Class i = Statement.class; + + if (statement instanceof PreparedStatement) { + i = PreparedStatement.class; + } + else if (statement instanceof CallableStatement) { + i = CallableStatement.class; + } + + return createProxy(i, handler); + } + + @SuppressWarnings("unchecked") + private static

P createProxy(Class

proxy, InvocationHandler handler) { + return (P) Proxy.newProxyInstance(Debug.class.getClassLoader(), new Class[] { DebugProxy.class, proxy }, handler); + } + + private static DebugLog logger(JdbcConfiguration info, PrintWriter managedPrinter) { + DebugLog log = null; + + if (managedPrinter != null) { + synchronized (Debug.class) { + log = OUTPUT_MANAGED.get(managedPrinter); + if (log == null) { + log = new DebugLog(managedPrinter); + OUTPUT_MANAGED.put(managedPrinter, log); + } + return log; + } + } + + String out = info.debugOut(); + + // System.out/err can be changed so do some checks + if ("err".equals(out)) { + PrintStream sys = stderr(); + + if (SYS_ERR == null) { + SYS_ERR = sys; + } + if (SYS_ERR != sys) { + SYS_ERR.flush(); + SYS_ERR = sys; + ERR = null; + } + if (ERR == null) { + ERR = new DebugLog(new PrintWriter(new OutputStreamWriter(sys, StandardCharsets.UTF_8))); + } + return ERR; + } + + if ("out".equals(out)) { + PrintStream sys = stdout(); + + if (SYS_OUT == null) { + SYS_OUT = sys; + } + + if (SYS_OUT != sys) { + SYS_OUT.flush(); + SYS_OUT = sys; + OUT = null; + } + + if (OUT == null) { + OUT = new DebugLog(new PrintWriter(new OutputStreamWriter(sys, StandardCharsets.UTF_8))); + } + return OUT; + } + + synchronized (Debug.class) { + log = OUTPUT_CACHE.get(out); + if (log == null) { + // must be local file + try { + PrintWriter print = new PrintWriter(Files.newBufferedWriter(Paths.get("").resolve(out), StandardCharsets.UTF_8)); + log = new DebugLog(print); + OUTPUT_CACHE.put(out, log); + OUTPUT_REFS.put(out, Integer.valueOf(0)); + } catch (Exception ex) { + throw new JdbcException(ex, "Cannot open debug output [" + out + "]"); + } + } + OUTPUT_REFS.put(out, Integer.valueOf(OUTPUT_REFS.get(out).intValue() + 1)); + } + + return log; + } + + public static void release(JdbcConfiguration info) { + if (!info.debug()) { + return; + } + + String out = info.debugOut(); + synchronized (Debug.class) { + Integer ref = OUTPUT_REFS.get(out); + if (ref != null) { + int r = ref.intValue(); + if (r < 2) { + OUTPUT_REFS.remove(out); + DebugLog d = OUTPUT_CACHE.remove(out); + if (d != null) { + if (d.print != null) { + d.print.close(); + } + } + } + else { + OUTPUT_REFS.put(out, Integer.valueOf(r - 1)); + } + } + } + } + + public static synchronized void close() { + // clear the ref + OUTPUT_REFS.clear(); + + // clear the streams + for (DebugLog d : OUTPUT_CACHE.values()) { + if (d.print != null) { + d.print.close(); + } + } + OUTPUT_CACHE.clear(); + + // flush the managed ones + for (DebugLog d : OUTPUT_MANAGED.values()) { + d.print.flush(); + } + + OUTPUT_MANAGED.clear(); + } + + @SuppressForbidden(reason = "JDBC drivers allows logging to Sys.out") + private static PrintStream stdout() { + return System.out; + } + + @SuppressForbidden(reason = "JDBC drivers allows logging to Sys.err") + private static PrintStream stderr() { + return System.err; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebugLog.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebugLog.java new file mode 100644 index 0000000000000..28444c1f8ee0c --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebugLog.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import org.elasticsearch.xpack.sql.client.shared.StringUtils; + +import java.io.PrintWriter; +import java.lang.reflect.Array; +import java.lang.reflect.Method; +import java.util.Locale; + +// Logging is done through PrintWriter (not PrintStream which maps to System.err/out) to plug into the JDBC API +final class DebugLog { + private static final String HEADER = "%tF/%tT.%tL - "; + + final PrintWriter print; + + DebugLog(PrintWriter print) { + this.print = print; + } + + void logMethod(Method m, Object[] args) { + long time = System.currentTimeMillis(); + print.printf(Locale.ROOT, HEADER + "Invoke %s#%s(%s)%n", + time, time, time, + //m.getReturnType().getSimpleName(), + m.getDeclaringClass().getSimpleName(), + m.getName(), + //array(m.getParameterTypes()), + array(args)); + } + + + void logResult(Method m, Object[] args, Object r) { + long time = System.currentTimeMillis(); + print.printf(Locale.ROOT, HEADER + "%s#%s(%s) returned %s%n", + time, time, time, + //m.getReturnType().getSimpleName(), + m.getDeclaringClass().getSimpleName(), + m.getName(), + //array(m.getParameterTypes()), + array(args), + r); + } + + void logException(Method m, Object[] args, Throwable t) { + long time = System.currentTimeMillis(); + print.printf(Locale.ROOT, HEADER + "%s#%s(%s) threw ", + time, time, time, + m.getDeclaringClass().getSimpleName(), + m.getName(), + array(args)); + t.printStackTrace(print); + print.flush(); + } + + + private static String array(Object[] a) { + if (a == null || a.length == 0) { + return StringUtils.EMPTY; + } + if (a.length == 1) { + return handleArray(a[0]); + } + + StringBuilder b = new StringBuilder(); + int iMax = a.length - 1; + for (int i = 0; ; i++) { + b.append(handleArray(a[i])); + if (i == iMax) { + return b.toString(); + } + b.append(", "); + } + } + + private static String handleArray(Object o) { + if (o != null && o.getClass().isArray()) { + StringBuilder b = new StringBuilder(); + int l = Array.getLength(o); + int iMax = l - 1; + + if (iMax == -1) + return "[]"; + + b.append('['); + for (int i = 0; i < l; i++) { + b.append(handleArray(Array.get(o, i))); + if (i == iMax) { + return b.append("]").toString(); + } + b.append(", "); + } + } + return String.valueOf(o); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebugProxy.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebugProxy.java new file mode 100644 index 0000000000000..90d775f31a79d --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebugProxy.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +/** + * Debug marker interface for compatible proxy. + */ +interface DebugProxy { + +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebuggingInvoker.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebuggingInvoker.java new file mode 100644 index 0000000000000..4320841471cd7 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/DebuggingInvoker.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; + +abstract class DebuggingInvoker implements InvocationHandler { + + private final Object target; + // used by subclasses to indicate the parent instance that creates the object + // for example a PreparedStatement has a Connection as parent + // the instance is kept around instead of reproxying to preserve the semantics (instead of creating a new proxy) + protected final Object parent; + + final DebugLog log; + + DebuggingInvoker(DebugLog log, Object target, Object parent) { + this.log = log; + this.target = target; + this.parent = parent; + } + + @Override + public final Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + String name = method.getName(); + Class[] params = method.getParameterTypes(); + + if ("equals".equals(name) && params.length == 1 && params[0] == Object.class) { + Object o = args[0]; + if (o == null || !(o instanceof DebugProxy)) { + return Boolean.FALSE; + } + InvocationHandler ih = Proxy.getInvocationHandler(o); + return (ih instanceof DebuggingInvoker && target.equals(((DebuggingInvoker) ih).target)); + } + + else if ("hashCode".equals(name) && params.length == 0) { + return System.identityHashCode(proxy); + } + + else if ("toString".equals(name) && params.length == 0) { + return "Debug proxy for " + target; + } + + try { + Object result = method.invoke(target, args); + log.logResult(method, args, result); + return result == null || result instanceof DebugProxy ? result : postProcess(result, proxy); + } catch (InvocationTargetException ex) { + log.logException(method, args, ex.getCause()); + throw ex.getCause(); + } catch (Exception ex) { + // should not occur + log.logException(method, args, ex); + throw new JdbcSQLException(ex, "Debugging failed for [" + method + "]"); + } + } + + protected Object postProcess(Object result, Object proxy) { + return result; + } + + Object target() { + return target; + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ParameterMetaDataProxy.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ParameterMetaDataProxy.java new file mode 100644 index 0000000000000..22d0cea3cc32c --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ParameterMetaDataProxy.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +final class ParameterMetaDataProxy extends DebuggingInvoker { + + ParameterMetaDataProxy(DebugLog log, Object target) { + super(log, target, null); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ResultSetMetaDataProxy.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ResultSetMetaDataProxy.java new file mode 100644 index 0000000000000..18b2e583e8644 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ResultSetMetaDataProxy.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +final class ResultSetMetaDataProxy extends DebuggingInvoker { + + ResultSetMetaDataProxy(DebugLog log, Object target) { + super(log, target, null); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ResultSetProxy.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ResultSetProxy.java new file mode 100644 index 0000000000000..417adbb0f2ec6 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/ResultSetProxy.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import java.sql.ResultSetMetaData; +import java.sql.Statement; + +class ResultSetProxy extends DebuggingInvoker { + + ResultSetProxy(DebugLog log, Object target, Object parent) { + super(log, target, parent); + } + + @Override + protected Object postProcess(Object result, Object proxy) { + if (result instanceof ResultSetMetaData) { + return Debug.proxy(new ResultSetMetaDataProxy(log, result)); + } + if (result instanceof Statement) { + return parent; + } + return result; + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/StatementProxy.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/StatementProxy.java new file mode 100644 index 0000000000000..7c5d19553fc9e --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/debug/StatementProxy.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.debug; + +import java.sql.Connection; +import java.sql.ParameterMetaData; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; + +// handles Statement, PreparedStatement and CallableStatement +final class StatementProxy extends DebuggingInvoker { + + StatementProxy(DebugLog log, Object target, Object con) { + super(log, target, con); + } + + @Override + protected Object postProcess(Object result, Object proxy) { + if (result instanceof Connection) { + return parent; + } + if (result instanceof ResultSet) { + return Debug.proxy(new ResultSetProxy(log, result, proxy)); + } + if (result instanceof ParameterMetaData) { + return Debug.proxy(new ParameterMetaDataProxy(log, result)); + } + if (result instanceof ResultSetMetaData) { + return Debug.proxy(new ResultSetMetaDataProxy(log, result)); + } + + return result; + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConfiguration.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConfiguration.java new file mode 100644 index 0000000000000..1d90c8c08ca89 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConfiguration.java @@ -0,0 +1,193 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; +import org.elasticsearch.xpack.sql.client.shared.StringUtils; +import org.elasticsearch.xpack.sql.client.shared.Version; +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; + +import java.net.URI; +import java.sql.DriverPropertyInfo; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.TimeZone; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.sql.client.shared.UriUtils.parseURI; +import static org.elasticsearch.xpack.sql.client.shared.UriUtils.removeQuery; + +/** + / Supports the following syntax + / + / jdbc:es://[host|ip] + / jdbc:es://[host|ip]:port/(prefix) + / jdbc:es://[host|ip]:port/(prefix)(?options=value&) + / + / Additional properties can be specified either through the Properties object or in the URL. In case of duplicates, the URL wins. + */ +//TODO: beef this up for Security/SSL +public class JdbcConfiguration extends ConnectionConfiguration { + static final String URL_PREFIX = "jdbc:es://"; + public static URI DEFAULT_URI = URI.create("http://localhost:9200/"); + + + static final String DEBUG = "debug"; + static final String DEBUG_DEFAULT = "false"; + + static final String DEBUG_OUTPUT = "debug.output"; + // can be out/err/url + static final String DEBUG_OUTPUT_DEFAULT = "err"; + + public static final String TIME_ZONE = "timezone"; + // follow the JDBC spec and use the JVM default... + // to avoid inconsistency, the default is picked up once at startup and reused across connections + // to cater to the principle of least surprise + // really, the way to move forward is to specify a calendar or the timezone manually + static final String TIME_ZONE_DEFAULT = TimeZone.getDefault().getID(); + + // options that don't change at runtime + private static final Set OPTION_NAMES = new LinkedHashSet<>(Arrays.asList(TIME_ZONE, DEBUG, DEBUG_OUTPUT)); + + static { + // trigger version initialization + // typically this should have already happened but in case the + // JdbcDriver/JdbcDataSource are not used and the impl. classes used directly + // this covers that case + Version.CURRENT.toString(); + } + + // immutable properties + private final String originalUrl; + private final boolean debug; + private final String debugOut; + + // mutable ones + private TimeZone timeZone; + + public static JdbcConfiguration create(String u, Properties props, int loginTimeoutSeconds) throws JdbcSQLException { + URI uri = parseUrl(u); + Properties urlProps = parseProperties(uri, u); + uri = removeQuery(uri, u, DEFAULT_URI); + + // override properties set in the URL with the ones specified programmatically + if (props != null) { + urlProps.putAll(props); + } + + if (loginTimeoutSeconds > 0) { + urlProps.setProperty(CONNECT_TIMEOUT, Long.toString(TimeUnit.SECONDS.toMillis(loginTimeoutSeconds))); + } + + try { + return new JdbcConfiguration(uri, u, urlProps); + } catch (JdbcSQLException e) { + throw e; + } catch (Exception ex) { + throw new JdbcSQLException(ex, ex.getMessage()); + } + } + + private static URI parseUrl(String u) throws JdbcSQLException { + String url = u; + String format = "jdbc:es://[http|https]?[host[:port]]*/[prefix]*[?[option=value]&]*"; + if (!canAccept(u)) { + throw new JdbcSQLException("Expected [" + URL_PREFIX + "] url, received [" + u + "]"); + } + + try { + return parseURI(removeJdbcPrefix(u), DEFAULT_URI); + } catch (IllegalArgumentException ex) { + throw new JdbcSQLException(ex, "Invalid URL [" + url + "], format should be [" + format + "]"); + } + } + + private static String removeJdbcPrefix(String connectionString) throws JdbcSQLException { + if (connectionString.startsWith(URL_PREFIX)) { + return connectionString.substring(URL_PREFIX.length()); + } else { + throw new JdbcSQLException("Expected [" + URL_PREFIX + "] url, received [" + connectionString + "]"); + } + } + + private static Properties parseProperties(URI uri, String u) throws JdbcSQLException { + Properties props = new Properties(); + try { + if (uri.getRawQuery() != null) { + // parse properties + List prms = StringUtils.tokenize(uri.getRawQuery(), "&"); + for (String param : prms) { + List args = StringUtils.tokenize(param, "="); + if (args.size() != 2) { + throw new JdbcSQLException("Invalid parameter [" + param + "], format needs to be key=value"); + } + // further validation happens in the constructor (since extra properties might be specified either way) + props.setProperty(args.get(0).trim(), args.get(1).trim()); + } + } + } catch (JdbcSQLException e) { + throw e; + } catch (Exception e) { + // Add the url to unexpected exceptions + throw new IllegalArgumentException("Failed to parse acceptable jdbc url [" + u + "]", e); + } + return props; + } + + // constructor is private to force the use of a factory in order to catch and convert any validation exception + // and also do input processing as oppose to handling this from the constructor (which is tricky or impossible) + private JdbcConfiguration(URI baseURI, String u, Properties props) throws JdbcSQLException { + super(baseURI, u, props); + + this.originalUrl = u; + + this.debug = parseValue(DEBUG, props.getProperty(DEBUG, DEBUG_DEFAULT), Boolean::parseBoolean); + this.debugOut = props.getProperty(DEBUG_OUTPUT, DEBUG_OUTPUT_DEFAULT); + + this.timeZone = parseValue(TIME_ZONE, props.getProperty(TIME_ZONE, TIME_ZONE_DEFAULT), TimeZone::getTimeZone); + } + + @Override + protected Collection extraOptions() { + return OPTION_NAMES; + } + + public boolean debug() { + return debug; + } + + public String debugOut() { + return debugOut; + } + + public TimeZone timeZone() { + return timeZone; + } + + public void timeZone(TimeZone timeZone) { + this.timeZone = timeZone; + } + + public static boolean canAccept(String url) { + return (StringUtils.hasText(url) && url.trim().startsWith(JdbcConfiguration.URL_PREFIX)); + } + + public DriverPropertyInfo[] driverPropertyInfo() { + List info = new ArrayList<>(); + for (String option : OPTION_NAMES) { + String value = null; + DriverPropertyInfo prop = new DriverPropertyInfo(option, value); + info.add(prop); + } + + return info.toArray(new DriverPropertyInfo[info.size()]); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java new file mode 100644 index 0000000000000..17f8973cea386 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java @@ -0,0 +1,431 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.debug.Debug; +import org.elasticsearch.xpack.sql.jdbc.net.client.JdbcHttpClient; + +import java.sql.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.Struct; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; + +/** + * Implementation of {@link Connection} for Elasticsearch. + */ +public class JdbcConnection implements Connection, JdbcWrapper { + + private final String url, userName; + final JdbcConfiguration cfg; + final JdbcHttpClient client; + + private boolean closed = false; + private String catalog; + private String schema; + + public JdbcConnection(JdbcConfiguration connectionInfo) throws SQLException { + cfg = connectionInfo; + client = new JdbcHttpClient(connectionInfo); + + url = connectionInfo.connectionString(); + userName = connectionInfo.authUser(); + } + + private void checkOpen() throws SQLException { + if (isClosed()) { + throw new SQLException("Connection is closed"); + } + } + + @Override + public Statement createStatement() throws SQLException { + checkOpen(); + return new JdbcStatement(this, cfg); + } + + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + checkOpen(); + return new JdbcPreparedStatement(this, cfg, sql); + } + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + throw new SQLFeatureNotSupportedException("Stored procedures not supported yet"); + } + + @Override + public String nativeSQL(String sql) throws SQLException { + checkOpen(); + return sql; + } + + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + checkOpen(); + if (!autoCommit) { + new SQLFeatureNotSupportedException("Non auto-commit is not supported"); + } + } + + @Override + public boolean getAutoCommit() throws SQLException { + checkOpen(); + return true; + } + + @Override + public void commit() throws SQLException { + checkOpen(); + if (getAutoCommit()) { + throw new SQLException("Auto-commit is enabled"); + } + throw new SQLFeatureNotSupportedException("Commit/Rollback not supported"); + } + + @Override + public void rollback() throws SQLException { + checkOpen(); + if (getAutoCommit()) { + throw new SQLException("Auto-commit is enabled"); + } + throw new SQLFeatureNotSupportedException("Commit/Rollback not supported"); + } + + @Override + public void close() throws SQLException { + if (!isClosed()) { + closed = true; + Debug.release(cfg); + } + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + return new JdbcDatabaseMetaData(this); + } + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + if (!readOnly) { + throw new SQLFeatureNotSupportedException("Only read-only mode is supported"); + } + } + + @Override + public boolean isReadOnly() throws SQLException { + checkOpen(); + return true; + } + + @Override + public void setCatalog(String catalog) throws SQLException { + checkOpen(); + this.catalog = catalog; + } + + @Override + public String getCatalog() throws SQLException { + checkOpen(); + return catalog; + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + checkOpen(); + if (TRANSACTION_NONE != level) { + throw new SQLFeatureNotSupportedException("Transactions not supported"); + } + } + + @Override + public int getTransactionIsolation() throws SQLException { + checkOpen(); + return TRANSACTION_NONE; + } + + @Override + public SQLWarning getWarnings() throws SQLException { + checkOpen(); + return null; + } + + @Override + public void clearWarnings() throws SQLException { + checkOpen(); + // no-op + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { + checkResultSet(resultSetType, resultSetConcurrency); + return createStatement(); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + checkResultSet(resultSetType, resultSetConcurrency); + return prepareStatement(sql); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + checkResultSet(resultSetType, resultSetConcurrency); + return prepareCall(sql); + } + + @Override + public Map> getTypeMap() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("typeMap not supported"); + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("typeMap not supported"); + } + + @Override + public void setHoldability(int holdability) throws SQLException { + checkOpen(); + checkHoldability(holdability); + } + + @Override + public int getHoldability() throws SQLException { + checkOpen(); + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public Savepoint setSavepoint() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Savepoints not supported"); + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Savepoints not supported"); + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Savepoints not supported"); + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Savepoints not supported"); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + checkOpen(); + checkHoldability(resultSetHoldability); + return createStatement(resultSetType, resultSetConcurrency); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + checkOpen(); + checkHoldability(resultSetHoldability); + return prepareStatement(sql, resultSetType, resultSetConcurrency); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + checkOpen(); + checkHoldability(resultSetHoldability); + return prepareCall(sql, resultSetType, resultSetConcurrency); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + checkOpen(); + if (autoGeneratedKeys != Statement.NO_GENERATED_KEYS) { + throw new SQLFeatureNotSupportedException("Auto generated keys must be NO_GENERATED_KEYS"); + } + return prepareStatement(sql); + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Autogenerated key not supported"); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Autogenerated key not supported"); + } + + @Override + public Clob createClob() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Clob not supported yet"); + } + + @Override + public Blob createBlob() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Blob not supported yet"); + } + + @Override + public NClob createNClob() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("NClob not supported yet"); + } + + @Override + public SQLXML createSQLXML() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("SQLXML not supported yet"); + } + + @Override + public boolean isValid(int timeout) throws SQLException { + if (timeout < 0) { + throw new SQLException("Negative timeout"); + } + return !isClosed() && client.ping(TimeUnit.SECONDS.toMillis(timeout)); + } + + private void checkOpenClientInfo() throws SQLClientInfoException { + if (isClosed()) { + throw new SQLClientInfoException("Connection closed", null); + } + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + checkOpenClientInfo(); + throw new SQLClientInfoException("Unsupported operation", null); + } + + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + checkOpenClientInfo(); + throw new SQLClientInfoException("Unsupported operation", null); + } + + @Override + public String getClientInfo(String name) throws SQLException { + checkOpenClientInfo(); + // we don't support client info - the docs indicate we should return null if properties are not supported + return null; + } + + @Override + public Properties getClientInfo() throws SQLException { + checkOpenClientInfo(); + // similar to getClientInfo - return an empty object instead of an exception + return new Properties(); + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Array not supported yet"); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Struct not supported yet"); + } + + @Override + public void setSchema(String schema) throws SQLException { + checkOpen(); + this.schema = schema; + } + + @Override + public String getSchema() throws SQLException { + checkOpen(); + return schema; + } + + @Override + public void abort(Executor executor) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public int getNetworkTimeout() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + private void checkResultSet(int resultSetType, int resultSetConcurrency) throws SQLException { + if (ResultSet.TYPE_FORWARD_ONLY != resultSetType) { + throw new SQLFeatureNotSupportedException("ResultSet type can only be TYPE_FORWARD_ONLY"); + } + if (ResultSet.CONCUR_READ_ONLY != resultSetConcurrency) { + throw new SQLFeatureNotSupportedException("ResultSet concurrency can only be CONCUR_READ_ONLY"); + } + } + + private void checkHoldability(int resultSetHoldability) throws SQLException { + if (ResultSet.HOLD_CURSORS_OVER_COMMIT != resultSetHoldability) { + throw new SQLFeatureNotSupportedException("Holdability can only be HOLD_CURSORS_OVER_COMMIT"); + } + } + + String getURL() { + return url; + } + + String getUserName() { + return userName; + } + + // There's no checkOpen on these methods since they are used by + // DatabaseMetadata that can work on a closed connection as well + // in fact, this information is cached by the underlying client + // once retrieved + int esInfoMajorVersion() throws SQLException { + return client.serverInfo().majorVersion; + } + + int esInfoMinorVersion() throws SQLException { + return client.serverInfo().minorVersion; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcDatabaseMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcDatabaseMetaData.java new file mode 100644 index 0000000000000..fbbb030bc4973 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcDatabaseMetaData.java @@ -0,0 +1,1170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.client.shared.ObjectUtils; +import org.elasticsearch.xpack.sql.client.shared.Version; +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; +import org.elasticsearch.xpack.sql.jdbc.net.client.Cursor; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.JDBCType; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.RowIdLifetime; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.ArrayList; +import java.util.List; + +import static java.sql.JDBCType.INTEGER; +import static java.sql.JDBCType.SMALLINT; + +/** + * Implementation of {@link DatabaseMetaData} for Elasticsearch. Draws inspiration + * from + * PostgreSQL. Virtual/synthetic tables are not supported so the client returns + * empty data instead of creating a query. + */ +class JdbcDatabaseMetaData implements DatabaseMetaData, JdbcWrapper { + + private final JdbcConnection con; + + JdbcDatabaseMetaData(JdbcConnection con) { + this.con = con; + } + + @Override + public boolean allProceduresAreCallable() throws SQLException { + return true; + } + + @Override + public boolean allTablesAreSelectable() throws SQLException { + return true; + } + + @Override + public String getURL() throws SQLException { + return con.getURL(); + } + + @Override + public String getUserName() throws SQLException { + return con.getUserName(); + } + + @Override + public boolean isReadOnly() throws SQLException { + return true; + } + + @Override + public boolean nullsAreSortedHigh() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedLow() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedAtStart() throws SQLException { + return false; + } + + @Override + public boolean nullsAreSortedAtEnd() throws SQLException { + // missing/null values are sorted (by default) last + return true; + } + + @Override + public String getDatabaseProductName() throws SQLException { + return "Elasticsearch"; + } + + @Override + public String getDatabaseProductVersion() throws SQLException { + return Version.CURRENT.toString(); + } + + @Override + public String getDriverName() throws SQLException { + return "Elasticsearch JDBC Driver"; + } + + @Override + public String getDriverVersion() throws SQLException { + return Version.CURRENT.major + "." + Version.CURRENT.minor; + } + + @Override + public int getDriverMajorVersion() { + return Version.CURRENT.major; + } + + @Override + public int getDriverMinorVersion() { + return Version.CURRENT.minor; + } + + @Override + public boolean usesLocalFiles() throws SQLException { + return true; + } + + @Override + public boolean usesLocalFilePerTable() throws SQLException { + return true; + } + + @Override + public boolean supportsMixedCaseIdentifiers() throws SQLException { + return true; + } + + @Override + public boolean storesUpperCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesLowerCaseIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesMixedCaseIdentifiers() throws SQLException { + //TODO: is the javadoc accurate + return false; + } + + @Override + public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { + return true; + } + + @Override + public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { + return false; + } + + @Override + public String getIdentifierQuoteString() throws SQLException { + return "\""; + } + + @Override + public String getSQLKeywords() throws SQLException { + // TODO: sync this with the grammar + return ""; + } + + @Override + public String getNumericFunctions() throws SQLException { + // TODO: sync this with the grammar + return ""; + } + + @Override + public String getStringFunctions() throws SQLException { + // TODO: sync this with the grammar + return ""; + } + + @Override + public String getSystemFunctions() throws SQLException { + // TODO: sync this with the grammar + return ""; + } + + @Override + public String getTimeDateFunctions() throws SQLException { + return ""; + } + + @Override + public String getSearchStringEscape() throws SQLException { + return "\\"; + } + + @Override + public String getExtraNameCharacters() throws SQLException { + return ""; + } + + @Override + public boolean supportsAlterTableWithAddColumn() throws SQLException { + return false; + } + + @Override + public boolean supportsAlterTableWithDropColumn() throws SQLException { + return false; + } + + @Override + public boolean supportsColumnAliasing() throws SQLException { + return true; + } + + @Override + public boolean nullPlusNonNullIsNull() throws SQLException { + return true; + } + + @Override + public boolean supportsConvert() throws SQLException { + //TODO: add Convert + return false; + } + + @Override + public boolean supportsConvert(int fromType, int toType) throws SQLException { + return false; + } + + @Override + public boolean supportsTableCorrelationNames() throws SQLException { + return true; + } + + @Override + public boolean supportsDifferentTableCorrelationNames() throws SQLException { + return false; + } + + @Override + public boolean supportsExpressionsInOrderBy() throws SQLException { + return false; + } + + @Override + public boolean supportsOrderByUnrelated() throws SQLException { + return true; + } + + @Override + public boolean supportsGroupBy() throws SQLException { + return true; + } + + @Override + public boolean supportsGroupByUnrelated() throws SQLException { + return true; + } + + @Override + public boolean supportsGroupByBeyondSelect() throws SQLException { + return true; + } + + @Override + public boolean supportsLikeEscapeClause() throws SQLException { + return true; + } + + @Override + public boolean supportsMultipleResultSets() throws SQLException { + return false; + } + + @Override + public boolean supportsMultipleTransactions() throws SQLException { + return false; + } + + @Override + public boolean supportsNonNullableColumns() throws SQLException { + return true; + } + + @Override + public boolean supportsMinimumSQLGrammar() throws SQLException { + return true; + } + + @Override + public boolean supportsCoreSQLGrammar() throws SQLException { + return false; + } + + @Override + public boolean supportsExtendedSQLGrammar() throws SQLException { + return false; + } + + @Override + public boolean supportsANSI92EntryLevelSQL() throws SQLException { + return true; + } + + @Override + public boolean supportsANSI92IntermediateSQL() throws SQLException { + return false; + } + + @Override + public boolean supportsANSI92FullSQL() throws SQLException { + return false; + } + + @Override + public boolean supportsIntegrityEnhancementFacility() throws SQLException { + return false; + } + + @Override + public boolean supportsOuterJoins() throws SQLException { + return true; + } + + @Override + public boolean supportsFullOuterJoins() throws SQLException { + return false; + } + + @Override + public boolean supportsLimitedOuterJoins() throws SQLException { + return true; + } + + @Override + public String getSchemaTerm() throws SQLException { + return "schema"; + } + + @Override + public String getProcedureTerm() throws SQLException { + return "procedure"; + } + + @Override + public String getCatalogTerm() throws SQLException { + return "clusterName"; + } + + @Override + public boolean isCatalogAtStart() throws SQLException { + return true; + } + + @Override + public String getCatalogSeparator() throws SQLException { + return "."; + } + + @Override + public boolean supportsSchemasInDataManipulation() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInProcedureCalls() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInTableDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInIndexDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInDataManipulation() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInProcedureCalls() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInTableDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInIndexDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { + return true; + } + + @Override + public boolean supportsPositionedDelete() throws SQLException { + return false; + } + + @Override + public boolean supportsPositionedUpdate() throws SQLException { + return false; + } + + @Override + public boolean supportsSelectForUpdate() throws SQLException { + return false; + } + + @Override + public boolean supportsStoredProcedures() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInComparisons() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInExists() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInIns() throws SQLException { + return false; + } + + @Override + public boolean supportsSubqueriesInQuantifieds() throws SQLException { + return false; + } + + @Override + public boolean supportsCorrelatedSubqueries() throws SQLException { + return false; + } + + @Override + public boolean supportsUnion() throws SQLException { + return false; + } + + @Override + public boolean supportsUnionAll() throws SQLException { + return true; + } + + @Override + public boolean supportsOpenCursorsAcrossCommit() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenCursorsAcrossRollback() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenStatementsAcrossCommit() throws SQLException { + return false; + } + + @Override + public boolean supportsOpenStatementsAcrossRollback() throws SQLException { + return false; + } + + @Override + public int getMaxBinaryLiteralLength() throws SQLException { + return 0; + } + + @Override + public int getMaxCharLiteralLength() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInGroupBy() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInIndex() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInOrderBy() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInSelect() throws SQLException { + return 0; + } + + @Override + public int getMaxColumnsInTable() throws SQLException { + return 0; + } + + @Override + public int getMaxConnections() throws SQLException { + return 0; + } + + @Override + public int getMaxCursorNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxIndexLength() throws SQLException { + return 0; + } + + @Override + public int getMaxSchemaNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxProcedureNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxCatalogNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxRowSize() throws SQLException { + return 0; + } + + @Override + public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { + return true; + } + + @Override + public int getMaxStatementLength() throws SQLException { + return 0; + } + + @Override + public int getMaxStatements() throws SQLException { + return 0; + } + + @Override + public int getMaxTableNameLength() throws SQLException { + return 0; + } + + @Override + public int getMaxTablesInSelect() throws SQLException { + return 0; + } + + @Override + public int getMaxUserNameLength() throws SQLException { + return 0; + } + + @Override + public int getDefaultTransactionIsolation() throws SQLException { + return Connection.TRANSACTION_NONE; + } + + @Override + public boolean supportsTransactions() throws SQLException { + return false; + } + + @Override + public boolean supportsTransactionIsolationLevel(int level) throws SQLException { + return Connection.TRANSACTION_NONE == level; + } + + @Override + public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { + return false; + } + + @Override + public boolean supportsDataManipulationTransactionsOnly() throws SQLException { + return false; + } + + @Override + public boolean dataDefinitionCausesTransactionCommit() throws SQLException { + return false; + } + + @Override + public boolean dataDefinitionIgnoredInTransactions() throws SQLException { + return false; + } + + // https://www.postgresql.org/docs/9.0/static/infoschema-routines.html + @Override + public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException { + return emptySet(con.cfg, + "ROUTINES", + "PROCEDURE_CAT", + "PROCEDURE_SCHEM", + "PROCEDURE_NAME", + "NUM_INPUT_PARAMS", INTEGER, + "NUM_OUTPUT_PARAMS", INTEGER, + "NUM_RESULT_SETS", INTEGER, + "REMARKS", + "PROCEDURE_TYPE", SMALLINT, + "SPECIFIC_NAME"); + } + + @Override + public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) + throws SQLException { + return emptySet(con.cfg, + "PARAMETERS", + "PROCEDURE_CAT", + "PROCEDURE_SCHEM", + "PROCEDURE_NAME", + "COLUMN_NAME", + "COLUMN_TYPE", SMALLINT, + "DATA_TYPE", INTEGER, + "TYPE_NAME", + "PRECISION", INTEGER, + "LENGTH", INTEGER, + "SCALE", SMALLINT, + "RADIX", SMALLINT, + "NULLABLE", SMALLINT, + "REMARKS", + "COLUMN_DEF", + "SQL_DATA_TYPE", INTEGER, + "SQL_DATETIME_SUB", INTEGER, + "CHAR_OCTET_LENGTH", INTEGER, + "ORDINAL_POSITION", INTEGER, + "IS_NULLABLE", + "SPECIFIC_NAME"); + } + + // return the cluster name as the catalog (database) + // helps with the various UIs + private String defaultCatalog() throws SQLException { + return con.client.serverInfo().cluster; + } + + private boolean isDefaultCatalog(String catalog) throws SQLException { + // null means catalog info is irrelevant + // % means return all catalogs + // "" means return those without a catalog + return catalog == null || catalog.equals("") || catalog.equals("%") || catalog.equals(defaultCatalog()); + } + + private boolean isDefaultSchema(String schema) { + // null means schema info is irrelevant + // % means return all schemas` + // "" means return those without a schema + return schema == null || schema.equals("") || schema.equals("%"); + } + + @Override + public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { + String statement = "SYS TABLES CATALOG LIKE ? LIKE ?"; + + if (types != null && types.length > 0) { + statement += " TYPE ?"; + + if (types.length > 1) { + for (int i = 1; i < types.length; i++) { + statement += ", ?"; + } + } + } + + PreparedStatement ps = con.prepareStatement(statement); + ps.setString(1, catalog != null ? catalog.trim() : "%"); + ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : "%"); + + if (types != null && types.length > 0) { + for (int i = 0; i < types.length; i++) { + ps.setString(3 + i, types[i]); + } + } + + return ps.executeQuery(); + } + + @Override + public ResultSet getSchemas() throws SQLException { + Object[][] data = { { "", defaultCatalog() } }; + return memorySet(con.cfg, columnInfo("SCHEMATA", + "TABLE_SCHEM", + "TABLE_CATALOG"), data); + } + + @Override + public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { + List info = columnInfo("SCHEMATA", + "TABLE_SCHEM", + "TABLE_CATALOG"); + if (!isDefaultCatalog(catalog) || !isDefaultSchema(schemaPattern)) { + return emptySet(con.cfg, info); + } + Object[][] data = { { "", defaultCatalog() } }; + return memorySet(con.cfg, info, data); + } + + @Override + public ResultSet getCatalogs() throws SQLException { + return con.createStatement().executeQuery("SYS CATALOGS"); + } + + @Override + public ResultSet getTableTypes() throws SQLException { + return con.createStatement().executeQuery("SYS TABLE TYPES"); + } + + @Override + public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) + throws SQLException { + PreparedStatement ps = con.prepareStatement("SYS COLUMNS CATALOG ? TABLE LIKE ? LIKE ?"); + // TODO: until passing null works, pass an empty string + ps.setString(1, catalog != null ? catalog.trim() : ""); + ps.setString(2, tableNamePattern != null ? tableNamePattern.trim() : "%"); + ps.setString(3, columnNamePattern != null ? columnNamePattern.trim() : "%"); + return ps.executeQuery(); + } + + @Override + public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException { + throw new SQLFeatureNotSupportedException("Privileges not supported"); + } + + @Override + public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { + throw new SQLFeatureNotSupportedException("Privileges not supported"); + } + + @Override + public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException { + throw new SQLFeatureNotSupportedException("Row identifiers not supported"); + } + + @Override + public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { + throw new SQLFeatureNotSupportedException("Version column not supported yet"); + } + + @Override + public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { + throw new SQLFeatureNotSupportedException("Primary keys not supported"); + } + + @Override + public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { + throw new SQLFeatureNotSupportedException("Imported keys not supported"); + } + + @Override + public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { + throw new SQLFeatureNotSupportedException("Exported keys not supported"); + } + + @Override + public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable, String foreignCatalog, + String foreignSchema, String foreignTable) throws SQLException { + throw new SQLFeatureNotSupportedException("Cross reference not supported"); + } + + @Override + public ResultSet getTypeInfo() throws SQLException { + return con.createStatement().executeQuery("SYS TYPES"); + } + + @Override + public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { + throw new SQLFeatureNotSupportedException("Indicies not supported"); + } + + @Override + public boolean supportsResultSetType(int type) throws SQLException { + return ResultSet.TYPE_FORWARD_ONLY == type; + } + + @Override + public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { + return ResultSet.TYPE_FORWARD_ONLY == type && ResultSet.CONCUR_READ_ONLY == concurrency; + } + + @Override + public boolean ownUpdatesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean ownDeletesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean ownInsertsAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersUpdatesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersDeletesAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean othersInsertsAreVisible(int type) throws SQLException { + return false; + } + + @Override + public boolean updatesAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean deletesAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean insertsAreDetected(int type) throws SQLException { + return false; + } + + @Override + public boolean supportsBatchUpdates() throws SQLException { + return false; + } + + @Override + public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException { + return emptySet(con.cfg, + "USER_DEFINED_TYPES", + "TYPE_CAT", + "TYPE_SCHEM", + "TYPE_NAME", + "CLASS_NAME", + "DATA_TYPE", INTEGER, + "REMARKS", + "BASE_TYPE", SMALLINT); + } + + @Override + public Connection getConnection() throws SQLException { + return con; + } + + @Override + public boolean supportsSavepoints() throws SQLException { + return false; + } + + @Override + public boolean supportsNamedParameters() throws SQLException { + return true; + } + + @Override + public boolean supportsMultipleOpenResults() throws SQLException { + return false; + } + + @Override + public boolean supportsGetGeneratedKeys() throws SQLException { + return false; + } + + @Override + public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { + return emptySet(con.cfg, + "SUPER_TYPES", + "TYPE_CAT", + "TYPE_SCHEM", + "TYPE_NAME", + "SUPERTYPE_CAT", + "SUPERTYPE_SCHEM", + "SUPERTYPE_NAME", + "BASE_TYPE"); + } + + @Override + public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { + return emptySet(con.cfg, "SUPER_TABLES", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "SUPERTABLE_NAME"); + } + + @Override + public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) + throws SQLException { + return emptySet(con.cfg, + "ATTRIBUTES", + "TYPE_CAT", + "TYPE_SCHEM", + "TYPE_NAME", + "ATTR_NAME", + "DATA_TYPE", INTEGER, + "ATTR_TYPE_NAME", + "ATTR_SIZE", INTEGER, + "DECIMAL_DIGITS", INTEGER, + "NUM_PREC_RADIX", INTEGER, + "NULLABLE", INTEGER, + "REMARKS", + "ATTR_DEF", + "SQL_DATA_TYPE", INTEGER, + "SQL_DATETIME_SUB", INTEGER, + "CHAR_OCTET_LENGTH", INTEGER, + "ORDINAL_POSITION", INTEGER, + "IS_NULLABLE", + "SCOPE_CATALOG", + "SCOPE_SCHEMA", + "SCOPE_TABLE", + "SOURCE_DATA_TYPE", SMALLINT); + } + + @Override + public boolean supportsResultSetHoldability(int holdability) throws SQLException { + return ResultSet.HOLD_CURSORS_OVER_COMMIT == holdability; + } + + @Override + public int getResultSetHoldability() throws SQLException { + return ResultSet.HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public int getDatabaseMajorVersion() throws SQLException { + return con.esInfoMajorVersion(); + } + + @Override + public int getDatabaseMinorVersion() throws SQLException { + return con.esInfoMinorVersion(); + } + + @Override + public int getJDBCMajorVersion() throws SQLException { + return Version.jdbcMajorVersion(); + } + + @Override + public int getJDBCMinorVersion() throws SQLException { + return Version.jdbcMinorVersion(); + } + + @Override + public int getSQLStateType() throws SQLException { + return DatabaseMetaData.sqlStateSQL; + } + + @Override + public boolean locatorsUpdateCopy() throws SQLException { + return true; + } + + @Override + public boolean supportsStatementPooling() throws SQLException { + return false; + } + + @Override + public RowIdLifetime getRowIdLifetime() throws SQLException { + return RowIdLifetime.ROWID_UNSUPPORTED; + } + + @Override + public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { + return false; + } + + @Override + public boolean autoCommitFailureClosesAllResultSets() throws SQLException { + return false; + } + + @Override + public ResultSet getClientInfoProperties() throws SQLException { + throw new SQLException("Client info not implemented yet"); + } + + @Override + public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException { + return emptySet(con.cfg, + "FUNCTIONS", + "FUNCTION_CAT", + "FUNCTION_SCHEM", + "FUNCTION_NAME", + "REMARKS", + "FUNCTION_TYPE", SMALLINT, + "SPECIFIC_NAME"); + } + + @Override + public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) + throws SQLException { + return emptySet(con.cfg, + "FUNCTION_COLUMNS", + "FUNCTION_CAT", + "FUNCTION_SCHEM", + "FUNCTION_NAME", + "COLUMN_NAME", + "DATA_TYPE", INTEGER, + "TYPE_NAME", + "PRECISION", INTEGER, + "LENGTH", INTEGER, + "SCALE", SMALLINT, + "RADIX", SMALLINT, + "NULLABLE", SMALLINT, + "REMARKS", + "CHAR_OCTET_LENGTH", INTEGER, + "ORDINAL_POSITION", INTEGER, + "IS_NULLABLE", + "SPECIFIC_NAME"); + } + + @Override + public ResultSet getPseudoColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) + throws SQLException { + return emptySet(con.cfg, + "PSEUDO_COLUMNS", + "TABLE_CAT", + "TABLE_SCHEM", + "TABLE_NAME", + "COLUMN_NAME", + "DATA_TYPE", INTEGER, + "COLUMN_SIZE", INTEGER, + "DECIMAL_DIGITS", INTEGER, + "NUM_PREC_RADIX", INTEGER, + "REMARKS", + "COLUMN_USAGE", + "IS_NULLABLE"); + } + + @Override + public boolean generatedKeyAlwaysReturned() throws SQLException { + return false; + } + + private static List columnInfo(String tableName, Object... cols) throws JdbcSQLException { + List columns = new ArrayList<>(); + + for (int i = 0; i < cols.length; i++) { + Object obj = cols[i]; + if (obj instanceof String) { + String name = obj.toString(); + JDBCType type = JDBCType.VARCHAR; + if (i + 1 < cols.length) { + // check if the next item it's a type + if (cols[i + 1] instanceof JDBCType) { + type = (JDBCType) cols[i + 1]; + i++; + } + // it's not, use the default and move on + } + columns.add(new ColumnInfo(name, type, tableName, "INFORMATION_SCHEMA", "", "", 0)); + } + else { + throw new JdbcSQLException("Invalid metadata schema definition"); + } + } + return columns; + } + + private static ResultSet emptySet(JdbcConfiguration cfg, String tableName, Object... cols) throws JdbcSQLException { + return new JdbcResultSet(cfg, null, new InMemoryCursor(columnInfo(tableName, cols), null)); + } + + private static ResultSet emptySet(JdbcConfiguration cfg, List columns) { + return memorySet(cfg, columns, null); + } + + private static ResultSet memorySet(JdbcConfiguration cfg, List columns, Object[][] data) { + return new JdbcResultSet(cfg, null, new InMemoryCursor(columns, data)); + } + + static class InMemoryCursor implements Cursor { + + private final List columns; + private final Object[][] data; + + private int row = -1; + + InMemoryCursor(List info, Object[][] data) { + this.columns = info; + this.data = data; + } + + @Override + public List columns() { + return columns; + } + + @Override + public boolean next() { + if (!ObjectUtils.isEmpty(data) && row < data.length - 1) { + row++; + return true; + } + return false; + } + + @Override + public Object column(int column) { + return data[row][column]; + } + + @Override + public int batchSize() { + return data.length; + } + + @Override + public void close() throws SQLException { + // this cursor doesn't hold any resource - no need to clean up + } + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcDriver.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcDriver.java new file mode 100644 index 0000000000000..3fdb002a0aa94 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcDriver.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.client.shared.Version; +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; +import org.elasticsearch.xpack.sql.jdbc.debug.Debug; + +import java.io.PrintWriter; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.DriverPropertyInfo; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.Properties; +import java.util.logging.Logger; + +public class JdbcDriver implements java.sql.Driver { + + private static final JdbcDriver INSTANCE = new JdbcDriver(); + + static { + // invoke Version to perform classpath/jar sanity checks + Version.CURRENT.toString(); + + try { + register(); + } catch (SQLException ex) { + // the SQLException is bogus as there's no source for it + // but we handle it just in case + PrintWriter writer = DriverManager.getLogWriter(); + if (writer != null) { + ex.printStackTrace(writer); + writer.flush(); + } + throw new ExceptionInInitializerError(ex); + } + } + + public static JdbcDriver register() throws SQLException { + // no closing callback + DriverManager.registerDriver(INSTANCE, INSTANCE::close); + return INSTANCE; + } + + public static void deregister() throws SQLException { + try { + DriverManager.deregisterDriver(INSTANCE); + } catch (SQLException ex) { + // the SQLException is bogus as there's no source for it + // but we handle it just in case + PrintWriter writer = DriverManager.getLogWriter(); + if (writer != null) { + ex.printStackTrace(writer); + writer.flush(); + } + throw ex; + } + } + + // + // Jdbc 4.0 + // + @Override + public Connection connect(String url, Properties props) throws SQLException { + if (url == null) { + throw new JdbcSQLException("Non-null url required"); + } + if (!acceptsURL(url)) { + return null; + } + + JdbcConfiguration cfg = initCfg(url, props); + JdbcConnection con = new JdbcConnection(cfg); + return cfg.debug() ? Debug.proxy(cfg, con, DriverManager.getLogWriter()) : con; + } + + private static JdbcConfiguration initCfg(String url, Properties props) throws JdbcSQLException { + return JdbcConfiguration.create(url, props, DriverManager.getLoginTimeout()); + } + + @Override + public boolean acceptsURL(String url) throws SQLException { + return JdbcConfiguration.canAccept(url); + } + + @Override + public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { + if (!acceptsURL(url)) { + return new DriverPropertyInfo[0]; + } + return JdbcConfiguration.create(url, info, DriverManager.getLoginTimeout()).driverPropertyInfo(); + } + + @Override + public int getMajorVersion() { + return Version.CURRENT.major; + } + + @Override + public int getMinorVersion() { + return Version.CURRENT.minor; + } + + @Override + public boolean jdbcCompliant() { + return false; + } + + // + // Jdbc 4.1 + // + + @Override + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + throw new SQLFeatureNotSupportedException(); + } + + /** + * Cleanup method invoked by the DriverManager when unregistering the driver. + * Since this happens typically when the JDBC driver gets unloaded (from the classloader) + * cleaning all debug information is a good safety check. + */ + private void close() { + Debug.close(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcParameterMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcParameterMetaData.java new file mode 100644 index 0000000000000..ca464813dc2b5 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcParameterMetaData.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.jdbc.PreparedQuery.ParamInfo; + +import java.sql.ParameterMetaData; +import java.sql.SQLException; + +class JdbcParameterMetaData implements ParameterMetaData, JdbcWrapper { + + private final JdbcPreparedStatement ps; + + JdbcParameterMetaData(JdbcPreparedStatement ps) { + this.ps = ps; + } + + @Override + public int getParameterCount() throws SQLException { + ps.checkOpen(); + return ps.query.paramCount(); + } + + @Override + public int isNullable(int param) throws SQLException { + ps.checkOpen(); + return parameterNullableUnknown; + } + + @Override + public boolean isSigned(int param) throws SQLException { + return TypeConverter.isSigned(paramInfo(param).type); + } + + @Override + public int getPrecision(int param) throws SQLException { + ps.checkOpen(); + return 0; + } + + @Override + public int getScale(int param) throws SQLException { + ps.checkOpen(); + return 0; + } + + @Override + public int getParameterType(int param) throws SQLException { + return paramInfo(param).type.getVendorTypeNumber(); + } + + @Override + public String getParameterTypeName(int param) throws SQLException { + return paramInfo(param).type.name(); + } + + @Override + public String getParameterClassName(int param) throws SQLException { + return TypeConverter.classNameOf(paramInfo(param).type); + } + + @Override + public int getParameterMode(int param) throws SQLException { + ps.checkOpen(); + return parameterModeUnknown; + } + + private ParamInfo paramInfo(int param) throws SQLException { + ps.checkOpen(); + return ps.query.getParam(param); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatement.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatement.java new file mode 100644 index 0000000000000..b0af977c4ecfb --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcPreparedStatement.java @@ -0,0 +1,382 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.JDBCType; +import java.sql.NClob; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLXML; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.Calendar; +import java.util.Collections; + +class JdbcPreparedStatement extends JdbcStatement implements PreparedStatement { + final PreparedQuery query; + + JdbcPreparedStatement(JdbcConnection con, JdbcConfiguration info, String sql) throws SQLException { + super(con, info); + this.query = PreparedQuery.prepare(sql); + } + + @Override + public boolean execute() throws SQLException { + checkOpen(); + executeQuery(); + return true; + } + + @Override + public ResultSet executeQuery() throws SQLException { + checkOpen(); + initResultSet(query.sql(), query.params()); + return rs; + } + + @Override + public int executeUpdate() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + private void setParam(int parameterIndex, Object value, int type) throws SQLException { + checkOpen(); + + if (parameterIndex < 0 || parameterIndex > query.paramCount()) { + throw new SQLException("Invalid parameter index [ " + parameterIndex + "; needs to be between 1 and [" + query.paramCount() + + "]"); + } + + query.setParam(parameterIndex, value, JDBCType.valueOf(type)); + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + setParam(parameterIndex, null, sqlType); + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + setParam(parameterIndex, x, Types.BOOLEAN); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + setParam(parameterIndex, x, Types.TINYINT); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + setParam(parameterIndex, x, Types.SMALLINT); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + setParam(parameterIndex, x, Types.INTEGER); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + setParam(parameterIndex, x, Types.BIGINT); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + setParam(parameterIndex, x, Types.REAL); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + setParam(parameterIndex, x, Types.DOUBLE); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { + throw new SQLFeatureNotSupportedException("BigDecimal not supported"); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + setParam(parameterIndex, x, Types.VARCHAR); + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + throw new UnsupportedOperationException("Bytes not implemented yet"); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + throw new UnsupportedOperationException("Date/Time not implemented yet"); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + throw new UnsupportedOperationException("Date/Time not implemented yet"); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + throw new UnsupportedOperationException("Date/Time not implemented yet"); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("AsciiStream not supported"); + } + + @Override + @Deprecated + public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("UnicodeStream not supported"); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("BinaryStream not supported"); + } + + @Override + public void clearParameters() throws SQLException { + checkOpen(); + query.clearParams(); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { + throw new UnsupportedOperationException("Object not implemented yet"); + } + + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + throw new SQLFeatureNotSupportedException("CharacterStream not supported"); + } + + @Override + public void addBatch() throws SQLException { + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("CharacterStream not supported"); + } + + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { + throw new SQLFeatureNotSupportedException("Ref not supported"); + } + + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { + throw new SQLFeatureNotSupportedException("Blob not supported"); + } + + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { + throw new SQLFeatureNotSupportedException("Clob not supported"); + } + + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { + throw new SQLFeatureNotSupportedException("Array not supported"); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return rs != null ? rs.getMetaData() : null; + } + + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { + throw new UnsupportedOperationException("Dates not implemented yet"); + } + + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { + throw new UnsupportedOperationException("Dates not implemented yet"); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { + throw new UnsupportedOperationException("Dates not implemented yet"); + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + setNull(parameterIndex, sqlType); + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + throw new SQLFeatureNotSupportedException("Datalink not supported"); + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + return new JdbcParameterMetaData(this); + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + throw new SQLFeatureNotSupportedException("RowId not supported"); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + throw new SQLFeatureNotSupportedException("NString not supported"); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("NCharacterStream not supported"); + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + throw new SQLFeatureNotSupportedException("NClob not supported"); + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Clob not supported"); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Blob not supported"); + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("NClob not supported"); + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + throw new SQLFeatureNotSupportedException("SQLXML not supported"); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { + throw new UnsupportedOperationException("Object not implemented yet"); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("AsciiStream not supported"); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("BinaryStream not supported"); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("CharacterStream not supported"); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("AsciiStream not supported"); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("BinaryStream not supported"); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("CharacterStream not supported"); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + throw new SQLFeatureNotSupportedException("NCharacterStream not supported"); + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Clob not supported"); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + throw new SQLFeatureNotSupportedException("Blob not supported"); + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("NClob not supported"); + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public int executeUpdate(String sql) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public boolean execute(String sql) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + throw new SQLException("Forbidden method on PreparedStatement"); + } + + @Override + public long executeLargeUpdate() throws SQLException { + throw new SQLFeatureNotSupportedException("Batching not supported"); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java new file mode 100644 index 0000000000000..c92ac9c5ac91c --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java @@ -0,0 +1,1175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.net.client.Cursor; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.Nullable; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.JDBCType; +import java.sql.NClob; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static java.lang.String.format; + +class JdbcResultSet implements ResultSet, JdbcWrapper { + + // temporary calendar instance (per connection) used for normalizing the date and time + // even though the cfg is already in UTC format, JDBC 3.0 requires java.sql.Time to have its date + // removed (set to Jan 01 1970) and java.sql.Date to have its HH:mm:ss component removed + // instead of dealing with longs, a Calendar object is used instead + private final Calendar defaultCalendar; + + private final JdbcStatement statement; + private final Cursor cursor; + private final Map nameToIndex = new LinkedHashMap<>(); + + private boolean closed = false; + private boolean wasNull = false; + + private int rowNumber; + + JdbcResultSet(JdbcConfiguration cfg, @Nullable JdbcStatement statement, Cursor cursor) { + this.statement = statement; + this.cursor = cursor; + // statement can be null so we have to extract the timeZone from the non-nullable cfg + // TODO: should we consider the locale as well? + this.defaultCalendar = Calendar.getInstance(cfg.timeZone(), Locale.ROOT); + + List columns = cursor.columns(); + for (int i = 0; i < columns.size(); i++) { + nameToIndex.put(columns.get(i).name, Integer.valueOf(i + 1)); + } + } + + private Object column(int columnIndex) throws SQLException { + checkOpen(); + if (columnIndex < 1 || columnIndex > cursor.columnSize()) { + throw new SQLException("Invalid column index [" + columnIndex + "]"); + } + Object object = null; + try { + object = cursor.column(columnIndex - 1); + } catch (IllegalArgumentException iae) { + throw new SQLException(iae.getMessage()); + } + wasNull = (object == null); + return object; + } + + private int column(String columnName) throws SQLException { + checkOpen(); + Integer index = nameToIndex.get(columnName); + if (index == null) { + throw new SQLException("Invalid column label [" + columnName + "]"); + } + return index.intValue(); + } + + void checkOpen() throws SQLException { + if (isClosed()) { + throw new SQLException("Closed result set"); + } + } + + @Override + public boolean next() throws SQLException { + checkOpen(); + if (cursor.next()) { + rowNumber++; + return true; + } + return false; + } + + @Override + public void close() throws SQLException { + if (!closed) { + closed = true; + if (statement != null) { + statement.resultSetWasClosed(); + } + cursor.close(); + } + } + + @Override + public boolean wasNull() throws SQLException { + checkOpen(); + return wasNull; + } + + @Override + public String getString(int columnIndex) throws SQLException { + Object val = column(columnIndex); + return val != null ? val.toString() : null; + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + Object val = column(columnIndex); + try { + return val != null ? (Boolean) val : false; + } catch (ClassCastException cce) { + throw new SQLException("unable to convert column " + columnIndex + " to a boolean", cce); + } + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + Object val = column(columnIndex); + try { + return val != null ? ((Number) val).byteValue() : 0; + } catch (ClassCastException cce) { + throw new SQLException("unable to convert column " + columnIndex + " to a byte", cce); + } + } + + @Override + public short getShort(int columnIndex) throws SQLException { + Object val = column(columnIndex); + try { + return val != null ? ((Number) val).shortValue() : 0; + } catch (ClassCastException cce) { + throw new SQLException("unable to convert column " + columnIndex + " to a short", cce); + } + } + + @Override + public int getInt(int columnIndex) throws SQLException { + Object val = column(columnIndex); + try { + return val != null ? ((Number) val).intValue() : 0; + } catch (ClassCastException cce) { + throw new SQLException("unable to convert column " + columnIndex + " to an int", cce); + } + } + + @Override + public long getLong(int columnIndex) throws SQLException { + Object val = column(columnIndex); + try { + return val != null ? ((Number) val).longValue() : 0; + } catch (ClassCastException cce) { + throw new SQLException("unable to convert column " + columnIndex + " to a long", cce); + } + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + Object val = column(columnIndex); + try { + return val != null ? ((Number) val).floatValue() : 0; + } catch (ClassCastException cce) { + throw new SQLException("unable to convert column " + columnIndex + " to a float", cce); + } + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + Object val = column(columnIndex); + try { + return val != null ? ((Number) val).doubleValue() : 0; + } catch (ClassCastException cce) { + throw new SQLException("unable to convert column " + columnIndex + " to a double", cce); + } + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + try { + return (byte[]) column(columnIndex); + } catch (ClassCastException cce) { + throw new SQLException("unable to convert column " + columnIndex + " to a byte array", cce); + } + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + return getDate(columnIndex, null); + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + return getTime(columnIndex, null); + } + + @Override + public Timestamp getTimestamp(int columnIndex) throws SQLException { + return getTimestamp(columnIndex, null); + } + + @Override + public String getString(String columnLabel) throws SQLException { + return getString(column(columnLabel)); + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + return getBoolean(column(columnLabel)); + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + return getByte(column(columnLabel)); + } + + @Override + public short getShort(String columnLabel) throws SQLException { + return getShort(column(columnLabel)); + } + + @Override + public int getInt(String columnLabel) throws SQLException { + return getInt(column(columnLabel)); + } + + @Override + public long getLong(String columnLabel) throws SQLException { + return getLong(column(columnLabel)); + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + return getFloat(column(columnLabel)); + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + return getDouble(column(columnLabel)); + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + return getBytes(column(columnLabel)); + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + return getDate(column(columnLabel)); + } + + private Long dateTime(int columnIndex) throws SQLException { + Object val = column(columnIndex); + try { + return val == null ? null : (Long) val; + } catch (ClassCastException cce) { + throw new SQLException("unable to convert column " + columnIndex + " to a long", cce); + } + } + + private Calendar safeCalendar(Calendar calendar) { + return calendar == null ? defaultCalendar : calendar; + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + return TypeConverter.convertDate(dateTime(columnIndex), safeCalendar(cal)); + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + return getDate(column(columnLabel), cal); + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + return TypeConverter.convertTime(dateTime(columnIndex), safeCalendar(cal)); + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + return getTime(column(columnLabel)); + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + return TypeConverter.convertTimestamp(dateTime(columnIndex), safeCalendar(cal)); + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + return getTimestamp(column(columnLabel)); + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + return getTime(column(columnLabel), cal); + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + return getTimestamp(column(columnLabel), cal); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return new JdbcResultSetMetaData(this, cursor.columns()); + } + + @Override + public Object getObject(int columnIndex) throws SQLException { + return convert(columnIndex, null); + } + + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + if (type == null) { + throw new SQLException("type is null"); + } + + return getObject(columnIndex, type); + } + + private T convert(int columnIndex, Class type) throws SQLException { + checkOpen(); + if (columnIndex < 1 || columnIndex > cursor.columnSize()) { + throw new SQLException("Invalid column index [" + columnIndex + "]"); + } + + Object val = column(columnIndex); + + if (val == null) { + return null; + } + + if (type != null && type.isInstance(val)) { + try { + return type.cast(val); + } catch (ClassCastException cce) { + throw new SQLException("unable to convert column " + columnIndex + " to " + type, cce); + } + } + + JDBCType columnType = cursor.columns().get(columnIndex - 1).type; + + return TypeConverter.convert(val, columnType, type); + } + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + if (map == null || map.isEmpty()) { + return getObject(columnIndex); + } + throw new SQLFeatureNotSupportedException("getObject with non-empty Map not supported"); + } + + @Override + public Object getObject(String columnLabel) throws SQLException { + return getObject(column(columnLabel)); + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + return getObject(column(columnLabel), type); + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + return getObject(column(columnLabel), map); + } + + @Override + public int findColumn(String columnLabel) throws SQLException { + return column(columnLabel); + } + + @Override + public boolean isBeforeFirst() throws SQLException { + return rowNumber == 0; + } + + @Override + public boolean isAfterLast() throws SQLException { + throw new SQLFeatureNotSupportedException("isAfterLast not supported"); + } + + @Override + public boolean isFirst() throws SQLException { + return rowNumber == 1; + } + + @Override + public boolean isLast() throws SQLException { + throw new SQLFeatureNotSupportedException("isLast not supported"); + } + + @Override + public int getRow() throws SQLException { + return rowNumber; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + checkOpen(); + if (rows < 0) { + throw new SQLException("Rows is negative"); + } + if (rows != getFetchSize()) { + throw new SQLException("Fetch size cannot be changed"); + } + // ignore fetch size since scrolls cannot be changed in flight + } + + @Override + public int getFetchSize() throws SQLException { + /* + * Instead of returning the fetch size the user requested we make a + * stab at returning the fetch size that we actually used, returning + * the batch size of the current row. This allows us to assert these + * batch sizes in testing and lets us point users to something that + * they can use for debugging. + */ + checkOpen(); + return cursor.batchSize(); + } + + @Override + public Statement getStatement() throws SQLException { + checkOpen(); + return statement; + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + @Deprecated + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + throw new SQLFeatureNotSupportedException("BigDecimal not supported"); + } + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("AsciiStream not supported"); + } + + @Override + @Deprecated + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("UnicodeStream not supported"); + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("BinaryStream not supported"); + } + + @Override + @Deprecated + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + throw new SQLFeatureNotSupportedException("BigDecimal not supported"); + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("AsciiStream not supported"); + } + + @Override + @Deprecated + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("UnicodeStream not supported"); + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("BinaryStream not supported"); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + checkOpen(); + return null; + } + + @Override + public void clearWarnings() throws SQLException { + checkOpen(); + } + + @Override + public String getCursorName() throws SQLException { + throw new SQLFeatureNotSupportedException("Cursor name not supported"); + } + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("CharacterStream not supported"); + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("CharacterStream not supported"); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("BigDecimal not supported"); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("BigDecimal not supported"); + } + + @Override + public void beforeFirst() throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public void afterLast() throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public boolean first() throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public boolean last() throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public boolean absolute(int row) throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public boolean relative(int rows) throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public boolean previous() throws SQLException { + throw new SQLException("ResultSet is forward-only"); + } + + @Override + public int getType() throws SQLException { + checkOpen(); + return TYPE_FORWARD_ONLY; + } + + @Override + public int getConcurrency() throws SQLException { + checkOpen(); + return CONCUR_READ_ONLY; + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + checkOpen(); + if (direction != FETCH_FORWARD) { + throw new SQLException("Fetch direction must be FETCH_FORWARD"); + } + } + + @Override + public int getFetchDirection() throws SQLException { + checkOpen(); + return FETCH_FORWARD; + } + + @Override + public boolean rowUpdated() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public boolean rowInserted() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public boolean rowDeleted() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void insertRow() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateRow() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void deleteRow() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void cancelRowUpdates() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void moveToInsertRow() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void refreshRow() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void moveToCurrentRow() throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Ref not supported"); + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Blob not supported"); + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Clob not supported"); + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("Array not supported"); + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Ref not supported"); + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Blob not supported"); + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Clob not supported"); + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("Array not supported"); + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("URL not supported"); + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("URL not supported"); + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("RowId not supported"); + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("RowId not supported"); + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public int getHoldability() throws SQLException { + checkOpen(); + return HOLD_CURSORS_OVER_COMMIT; + } + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("NClob not supported"); + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("NClob not supported"); + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("SQLXML not supported"); + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("SQLXML not supported"); + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public String getNString(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("NString not supported"); + } + + @Override + public String getNString(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("NString not supported"); + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException("NCharacterStream not supported"); + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException("NCharacterStream not supported"); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException("Writes not supported"); + } + + @Override + public String toString() { + return format(Locale.ROOT, "%s:row %d", getClass().getSimpleName(), rowNumber); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSetMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSetMetaData.java new file mode 100644 index 0000000000000..574cdeb62b4b5 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSetMetaData.java @@ -0,0 +1,161 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.util.List; +import java.util.Locale; + +import static java.lang.String.format; + +class JdbcResultSetMetaData implements ResultSetMetaData, JdbcWrapper { + + private final JdbcResultSet rs; + private final List columns; + + JdbcResultSetMetaData(JdbcResultSet rs, List columns) { + this.rs = rs; + this.columns = columns; + } + + @Override + public int getColumnCount() throws SQLException { + checkOpen(); + return columns.size(); + } + + @Override + public boolean isAutoIncrement(int column) throws SQLException { + column(column); + return false; + } + + @Override + public boolean isCaseSensitive(int column) throws SQLException { + column(column); + return true; + } + + @Override + public boolean isSearchable(int column) throws SQLException { + column(column); + return true; + } + + @Override + public boolean isCurrency(int column) throws SQLException { + column(column); + return false; + } + + @Override + public int isNullable(int column) throws SQLException { + column(column); + return columnNullableUnknown; + } + + @Override + public boolean isSigned(int column) throws SQLException { + return TypeConverter.isSigned(column(column).type); + } + + @Override + public int getColumnDisplaySize(int column) throws SQLException { + return column(column).displaySize(); + } + + @Override + public String getColumnLabel(int column) throws SQLException { + return column(column).label; + } + + @Override + public String getColumnName(int column) throws SQLException { + return column(column).name; + } + + @Override + public String getSchemaName(int column) throws SQLException { + return column(column).schema; + } + + @Override + public int getPrecision(int column) throws SQLException { + column(column); + return 0; + } + + @Override + public int getScale(int column) throws SQLException { + column(column); + return 0; + } + + @Override + public String getTableName(int column) throws SQLException { + return column(column).table; + } + + @Override + public String getCatalogName(int column) throws SQLException { + return column(column).catalog; + } + + @Override + public int getColumnType(int column) throws SQLException { + return column(column).type.getVendorTypeNumber(); + } + + @Override + public String getColumnTypeName(int column) throws SQLException { + return column(column).type.name(); + } + + @Override + public boolean isReadOnly(int column) throws SQLException { + column(column); + return true; + } + + @Override + public boolean isWritable(int column) throws SQLException { + column(column); + return false; + } + + @Override + public boolean isDefinitelyWritable(int column) throws SQLException { + column(column); + return false; + } + + @Override + public String getColumnClassName(int column) throws SQLException { + return TypeConverter.classNameOf(column(column).type); + } + + private void checkOpen() throws SQLException { + if (rs != null) { + rs.checkOpen(); + } + } + + private ColumnInfo column(int column) throws SQLException { + checkOpen(); + if (column < 1 || column > columns.size()) { + throw new SQLException("Invalid column index [" + column + "]"); + } + return columns.get(column - 1); + } + + @Override + public String toString() { + return format(Locale.ROOT, "%s(%s)", getClass().getSimpleName(), columns); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java new file mode 100644 index 0000000000000..fab21c541799e --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java @@ -0,0 +1,405 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.net.client.Cursor; +import org.elasticsearch.xpack.sql.jdbc.net.client.RequestMeta; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLWarning; +import java.sql.Statement; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +class JdbcStatement implements Statement, JdbcWrapper { + + final JdbcConnection con; + final JdbcConfiguration cfg; + + private boolean closed = false; + private boolean closeOnCompletion = false; + private boolean ignoreResultSetClose = false; + + protected JdbcResultSet rs; + final RequestMeta requestMeta; + + JdbcStatement(JdbcConnection jdbcConnection, JdbcConfiguration info) { + this.con = jdbcConnection; + this.cfg = info; + this.requestMeta = new RequestMeta(info.pageSize(), info.pageTimeout(), info.queryTimeout()); + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + if (!execute(sql)) { + throw new SQLException("Invalid sql query [" + sql + "]"); + } + return rs; + } + + @Override + public int executeUpdate(String sql) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Update not supported"); + } + + @Override + public void close() throws SQLException { + if (!closed) { + closed = true; + closeResultSet(); + } + } + + @Override + public int getMaxFieldSize() throws SQLException { + checkOpen(); + return 0; + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + checkOpen(); + if (max < 0) { + throw new SQLException("Field size must be positive"); + } + } + + @Override + public int getMaxRows() throws SQLException { + long result = getLargeMaxRows(); + if (result > Integer.MAX_VALUE) { + throw new SQLException("Max rows exceeds limit of " + Integer.MAX_VALUE); + } + return Math.toIntExact(result); + } + + + @Override + public long getLargeMaxRows() throws SQLException { + checkOpen(); + return 0; + } + + @Override + public void setMaxRows(int max) throws SQLException { + setLargeMaxRows(max); + } + + @Override + public void setLargeMaxRows(long max) throws SQLException { + checkOpen(); + if (max < 0) { + throw new SQLException("Field size must be positive"); + } + // ignore + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + checkOpen(); + // no-op - always escape + } + + @Override + public int getQueryTimeout() throws SQLException { + checkOpen(); + return (int) TimeUnit.MILLISECONDS.toSeconds(requestMeta.queryTimeoutInMs()); + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + checkOpen(); + if (seconds < 0) { + throw new SQLException("Query timeout must be positive"); + } + requestMeta.queryTimeout(TimeUnit.SECONDS.toMillis(seconds)); + } + + @Override + public void cancel() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Cancel not supported"); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + checkOpen(); + return null; + } + + @Override + public void clearWarnings() throws SQLException { + checkOpen(); + } + + @Override + public void setCursorName(String name) throws SQLException { + checkOpen(); + // no-op (doc is confusing - says no-op but also to throw an exception) + } + + @Override + public boolean execute(String sql) throws SQLException { + checkOpen(); + initResultSet(sql, Collections.emptyList()); + return true; + } + + // execute the query and handle the rs closing and initialization + protected void initResultSet(String sql, List params) throws SQLException { + // close previous result set + closeResultSet(); + + Cursor cursor = con.client.query(sql, params, requestMeta); + rs = new JdbcResultSet(cfg, this, cursor); + } + + @Override + public ResultSet getResultSet() throws SQLException { + checkOpen(); + return rs; + } + + @Override + public int getUpdateCount() throws SQLException { + long count = getLargeUpdateCount(); + return count > Integer.MAX_VALUE ? Integer.MAX_VALUE : count < Integer.MIN_VALUE ? Integer.MIN_VALUE : (int) count; + } + + @Override + public long getLargeUpdateCount() throws SQLException { + checkOpen(); + return -1; + } + + @Override + public boolean getMoreResults() throws SQLException { + checkOpen(); + closeResultSet(); + return false; + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + checkOpen(); + if (ResultSet.FETCH_REVERSE != direction + || ResultSet.FETCH_FORWARD != direction + || ResultSet.FETCH_UNKNOWN != direction) { + throw new SQLException("Invalid direction specified"); + } + } + + @Override + public int getFetchDirection() throws SQLException { + checkOpen(); + return ResultSet.FETCH_FORWARD; + } + + @Override + public void setFetchSize(int rows) throws SQLException { + checkOpen(); + if (rows < 0) { + throw new SQLException("Fetch size must be positive"); + } + requestMeta.fetchSize(rows); + } + + @Override + public int getFetchSize() throws SQLException { + checkOpen(); + // the spec is somewhat unclear. It looks like there are 3 states: + // unset (in this case -1 which the user cannot set) - in this case, the default fetch size is returned + // 0 meaning the hint is disabled (the user has called setFetch) + // >0 means actual hint + + // tl;dr - unless the user set it, returning the default is fine + return requestMeta.fetchSize(); + } + + @Override + public int getResultSetConcurrency() throws SQLException { + checkOpen(); + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public int getResultSetType() throws SQLException { + checkOpen(); + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public void addBatch(String sql) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public void clearBatch() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public int[] executeBatch() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public Connection getConnection() throws SQLException { + checkOpen(); + return con; + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + checkOpen(); + if (CLOSE_CURRENT_RESULT == current) { + closeResultSet(); + return false; + } + if (KEEP_CURRENT_RESULT == current || CLOSE_ALL_RESULTS == current) { + throw new SQLException("Invalid current parameter"); + } + + throw new SQLFeatureNotSupportedException("Multiple ResultSets not supported"); + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Generated keys not supported"); + } + + @Override + public long[] executeLargeBatch() throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public long executeLargeUpdate(String sql) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public long executeLargeUpdate(String sql, String[] columnNames) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + checkOpen(); + throw new SQLFeatureNotSupportedException("Batching not supported"); + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + return execute(sql); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + return execute(sql); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + return execute(sql); + } + + @Override + public int getResultSetHoldability() throws SQLException { + checkOpen(); + return ResultSet.CLOSE_CURSORS_AT_COMMIT; + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + checkOpen(); + // no-op + } + + @Override + public boolean isPoolable() throws SQLException { + checkOpen(); + return false; + } + + @Override + public void closeOnCompletion() throws SQLException { + checkOpen(); + closeOnCompletion = true; + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + checkOpen(); + return closeOnCompletion; + } + + protected final void checkOpen() throws SQLException { + if (isClosed()) { + throw new SQLException("Statement is closed"); + } + } + + protected final void closeResultSet() throws SQLException { + if (rs != null) { + ignoreResultSetClose = true; + try { + rs.close(); + } finally { + rs = null; + ignoreResultSetClose = false; + } + } + } + + final void resultSetWasClosed() throws SQLException { + if (closeOnCompletion && !ignoreResultSetClose) { + close(); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcWrapper.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcWrapper.java new file mode 100644 index 0000000000000..646a9593c782d --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcWrapper.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import java.sql.SQLException; +import java.sql.Wrapper; + +interface JdbcWrapper extends Wrapper { + + @Override + default boolean isWrapperFor(Class iface) throws SQLException { + return iface != null && iface.isAssignableFrom(getClass()); + } + + @SuppressWarnings("unchecked") + @Override + default T unwrap(Class iface) throws SQLException { + if (isWrapperFor(iface)) { + return (T) this; + } + throw new SQLException(); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java new file mode 100644 index 0000000000000..4aaf337f2b772 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.sql.JDBCType; +import java.sql.SQLException; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +class PreparedQuery { + + static class ParamInfo { + JDBCType type; + Object value; + + ParamInfo(Object value, JDBCType type) { + this.value = value; + this.type = type; + } + } + + private final String sql; + private final ParamInfo[] params; + + private PreparedQuery(String sql, int paramCount) { + this.sql = sql; + this.params = new ParamInfo[paramCount]; + clearParams(); + } + + ParamInfo getParam(int param) throws JdbcSQLException { + if (param < 1 || param > params.length) { + throw new JdbcSQLException("Invalid parameter index [" + param + "]"); + } + return params[param - 1]; + } + + void setParam(int param, Object value, JDBCType type) throws JdbcSQLException { + if (param < 1 || param > params.length) { + throw new JdbcSQLException("Invalid parameter index [" + param + "]"); + } + params[param - 1].value = value; + params[param - 1].type = type; + } + + int paramCount() { + return params.length; + } + + void clearParams() { + for (int i = 0; i < params.length; i++) { + params[i] = new ParamInfo(null, JDBCType.VARCHAR); + } + } + + /** + * Returns the sql statement + */ + String sql() { + return sql; + } + + /** + * Returns the parameters if the SQL statement is parametrized + */ + List params() { + return Arrays.stream(this.params).map( + paramInfo -> new SqlTypedParamValue(paramInfo.value, DataType.fromJdbcType(paramInfo.type)) + ).collect(Collectors.toList()); + } + + @Override + public String toString() { + return sql() + " " + params(); + } + + // Creates a PreparedQuery + static PreparedQuery prepare(String sql) throws SQLException { + return new PreparedQuery(sql, SqlQueryParameterAnalyzer.parametersCount(sql)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/SqlQueryParameterAnalyzer.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/SqlQueryParameterAnalyzer.java new file mode 100644 index 0000000000000..8278857bcfa75 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/SqlQueryParameterAnalyzer.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import java.sql.SQLException; + +/** + * Simplistic parser that can find parameters and escape sequences in a query and counts the number of parameters + *

+ * It is used by JDBC client to approximate the number of parameters that JDBC client should accept. The parser is simplistic + * in a sense that it can accept ? in the places where it's not allowed by the server (for example as a index name), but that's + * a reasonable compromise to avoid sending the prepared statement server to the server for extra validation. + */ +public final class SqlQueryParameterAnalyzer { + + private SqlQueryParameterAnalyzer() { + + } + + /** + * Returns number of parameters in the specified SQL query + */ + public static int parametersCount(String sql) throws SQLException { + + int l = sql.length(); + int params = 0; + for (int i = 0; i < l; i++) { + char c = sql.charAt(i); + + switch (c) { + case '{': + i = skipJdbcEscape(i, sql); + break; + case '\'': + i = skipString(i, sql, c); + break; + case '"': + i = skipString(i, sql, c); + break; + case '?': + params ++; + break; + case '-': + if (i + 1 < l && sql.charAt(i + 1) == '-') { + i = skipLineComment(i, sql); + } + break; + case '/': + if (i + 1 < l && sql.charAt(i + 1) == '*') { + i = skipMultiLineComment(i, sql); + } + break; + } + } + return params; + } + + /** + * Skips jdbc escape sequence starting at the current position i, returns the length of the sequence + */ + private static int skipJdbcEscape(int i, String sql) throws SQLException { + // TODO: JDBC escape syntax + // https://db.apache.org/derby/docs/10.5/ref/rrefjdbc1020262.html + throw new SQLException("Jdbc escape sequences are not supported yet"); + } + + + /** + * Skips a line comment starting at the current position i, returns the length of the comment + */ + private static int skipLineComment(int i, String sql) { + for (; i < sql.length(); i++) { + char c = sql.charAt(i); + if (c == '\n' || c == '\r') { + return i; + } + } + return i; + } + + /** + * Skips a multi-line comment starting at the current position i, returns the length of the comment + */ + private static int skipMultiLineComment(int i, String sql) throws SQLException { + int block = 0; + + for (; i < sql.length() - 1; i++) { + char c = sql.charAt(i); + if (c == '/' && sql.charAt(i + 1) == '*') { + i++; + block++; + } else if (c == '*' && sql.charAt(i + 1) == '/') { + i++; + block--; + } + if (block == 0) { + return i; + } + } + throw new SQLException("Cannot parse given sql; unclosed /* comment"); + } + + /** + * Skips a string starting at the current position i, returns the length of the string + */ + private static int skipString(int i, String sql, char q) throws SQLException { + for (i = i + 1; i < sql.length(); i++) { + char c = sql.charAt(i); + if (c == q) { + // double quotes mean escaping + if (i + 1 < sql.length() && sql.charAt(i + 1) == q) { + i++; + } else { + return i; + } + } + } + throw new SQLException("Cannot parse given sql; unclosed string"); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java new file mode 100644 index 0000000000000..a1fa04ef1afd0 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java @@ -0,0 +1,482 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.sql.Date; +import java.sql.JDBCType; +import java.sql.SQLException; +import java.sql.Time; +import java.sql.Timestamp; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.util.Calendar; +import java.util.GregorianCalendar; +import java.util.Locale; +import java.util.function.Function; + +import static java.lang.String.format; +import static java.util.Calendar.DAY_OF_MONTH; +import static java.util.Calendar.ERA; +import static java.util.Calendar.HOUR_OF_DAY; +import static java.util.Calendar.MILLISECOND; +import static java.util.Calendar.MINUTE; +import static java.util.Calendar.MONTH; +import static java.util.Calendar.SECOND; +import static java.util.Calendar.YEAR; + +/** + * Conversion utilities for conversion of JDBC types to Java type and back + *

+ * Only the following JDBC types are supported as part of Elasticsearch response and parameters. + * See org.elasticsearch.xpack.sql.type.DataType for details. + *

+ * NULL, BOOLEAN, TINYINT, SMALLINT, INTEGER, BIGINT, DOUBLE, REAL, FLOAT, VARCHAR, VARBINARY and TIMESTAMP + */ +final class TypeConverter { + + private TypeConverter() { + + } + + private static final long DAY_IN_MILLIS = 60 * 60 * 24; + + /** + * Converts millisecond after epoc to date + */ + static Date convertDate(Long millis, Calendar cal) { + return dateTimeConvert(millis, cal, c -> { + c.set(HOUR_OF_DAY, 0); + c.set(MINUTE, 0); + c.set(SECOND, 0); + c.set(MILLISECOND, 0); + return new Date(c.getTimeInMillis()); + }); + } + + /** + * Converts millisecond after epoc to time + */ + static Time convertTime(Long millis, Calendar cal) { + return dateTimeConvert(millis, cal, c -> { + c.set(ERA, GregorianCalendar.AD); + c.set(YEAR, 1970); + c.set(MONTH, 0); + c.set(DAY_OF_MONTH, 1); + return new Time(c.getTimeInMillis()); + }); + } + + /** + * Converts millisecond after epoc to timestamp + */ + static Timestamp convertTimestamp(Long millis, Calendar cal) { + return dateTimeConvert(millis, cal, c -> new Timestamp(c.getTimeInMillis())); + } + + private static T dateTimeConvert(Long millis, Calendar c, Function creator) { + if (millis == null) { + return null; + } + long initial = c.getTimeInMillis(); + try { + c.setTimeInMillis(millis); + return creator.apply(c); + } finally { + c.setTimeInMillis(initial); + } + } + + /** + * Converts object val from columnType to type + */ + @SuppressWarnings("unchecked") + static T convert(Object val, JDBCType columnType, Class type) throws SQLException { + if (type == null) { + return (T) convert(val, columnType); + } + if (type == String.class) { + return (T) asString(convert(val, columnType)); + } + if (type == Boolean.class) { + return (T) asBoolean(val, columnType); + } + if (type == Byte.class) { + return (T) asByte(val, columnType); + } + if (type == Short.class) { + return (T) asShort(val, columnType); + } + if (type == Integer.class) { + return (T) asInteger(val, columnType); + } + if (type == Long.class) { + return (T) asLong(val, columnType); + } + if (type == Float.class) { + return (T) asFloat(val, columnType); + } + if (type == Double.class) { + return (T) asDouble(val, columnType); + } + if (type == Date.class) { + return (T) asDate(val, columnType); + } + if (type == Time.class) { + return (T) asTime(val, columnType); + } + if (type == Timestamp.class) { + return (T) asTimestamp(val, columnType); + } + if (type == byte[].class) { + return (T) asByteArray(val, columnType); + } + // + // JDK 8 types + // + if (type == LocalDate.class) { + return (T) asLocalDate(val, columnType); + } + if (type == LocalTime.class) { + return (T) asLocalTime(val, columnType); + } + if (type == LocalDateTime.class) { + return (T) asLocalDateTime(val, columnType); + } + if (type == OffsetTime.class) { + return (T) asOffsetTime(val, columnType); + } + if (type == OffsetDateTime.class) { + return (T) asOffsetDateTime(val, columnType); + } + throw new SQLException("Conversion from type [" + columnType + "] to [" + type.getName() + "] not supported"); + } + + /** + * Translates numeric JDBC type into corresponding Java class + *

+ * See {@link javax.sql.rowset.RowSetMetaDataImpl#getColumnClassName} and + * https://db.apache.org/derby/docs/10.5/ref/rrefjdbc20377.html + */ + public static String classNameOf(JDBCType jdbcType) throws JdbcSQLException { + final DataType dataType; + try { + dataType = DataType.fromJdbcType(jdbcType); + } catch (IllegalArgumentException ex) { + // Convert unsupported exception to JdbcSQLException + throw new JdbcSQLException(ex, ex.getMessage()); + } + if (dataType.javaName == null) { + throw new JdbcSQLException("Unsupported JDBC type [" + jdbcType + "]"); + } + return dataType.javaName; + } + + /** + * Converts the object from JSON representation to the specified JDBCType + *

+ * The returned types needs to correspond to ES-portion of classes returned by {@link TypeConverter#classNameOf} + */ + static Object convert(Object v, JDBCType columnType) throws SQLException { + switch (columnType) { + case NULL: + return null; + case BOOLEAN: + case VARCHAR: + return v; // These types are already represented correctly in JSON + case TINYINT: + return ((Number) v).byteValue(); // Parser might return it as integer or long - need to update to the correct type + case SMALLINT: + return ((Number) v).shortValue(); // Parser might return it as integer or long - need to update to the correct type + case INTEGER: + return ((Number) v).intValue(); + case BIGINT: + return ((Number) v).longValue(); + case FLOAT: + case DOUBLE: + return doubleValue(v); // Double might be represented as string for infinity and NaN values + case REAL: + return floatValue(v); // Float might be represented as string for infinity and NaN values + case TIMESTAMP: + return ((Number) v).longValue(); + default: + throw new SQLException("Unexpected column type [" + columnType.getName() + "]"); + + } + } + + /** + * Returns true if the type represents a signed number, false otherwise + *

+ * It needs to support both params and column types + */ + static boolean isSigned(JDBCType jdbcType) throws SQLException { + final DataType dataType; + try { + dataType = DataType.fromJdbcType(jdbcType); + } catch (IllegalArgumentException ex) { + // Convert unsupported exception to JdbcSQLException + throw new JdbcSQLException(ex, ex.getMessage()); + } + return dataType.isSigned(); + } + + private static Double doubleValue(Object v) { + if (v instanceof String) { + switch ((String) v) { + case "NaN": + return Double.NaN; + case "Infinity": + return Double.POSITIVE_INFINITY; + case "-Infinity": + return Double.NEGATIVE_INFINITY; + default: + return Double.parseDouble((String) v); + } + } + return ((Number) v).doubleValue(); + } + + private static Float floatValue(Object v) { + if (v instanceof String) { + switch ((String) v) { + case "NaN": + return Float.NaN; + case "Infinity": + return Float.POSITIVE_INFINITY; + case "-Infinity": + return Float.NEGATIVE_INFINITY; + default: + return Float.parseFloat((String) v); + } + } + return ((Number) v).floatValue(); + } + + private static String asString(Object nativeValue) { + return nativeValue == null ? null : String.valueOf(nativeValue); + } + + private static Boolean asBoolean(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BOOLEAN: + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + case REAL: + case FLOAT: + case DOUBLE: + return Boolean.valueOf(Integer.signum(((Number) val).intValue()) == 0); + default: + throw new SQLException("Conversion from type [" + columnType + "] to [Boolean] not supported"); + + } + } + + private static Byte asByte(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BOOLEAN: + return Byte.valueOf(((Boolean) val).booleanValue() ? (byte) 1 : (byte) 0); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return safeToByte(((Number) val).longValue()); + case REAL: + case FLOAT: + case DOUBLE: + return safeToByte(safeToLong(((Number) val).doubleValue())); + default: + } + + throw new SQLException("Conversion from type [" + columnType + "] to [Byte] not supported"); + } + + private static Short asShort(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BOOLEAN: + return Short.valueOf(((Boolean) val).booleanValue() ? (short) 1 : (short) 0); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return safeToShort(((Number) val).longValue()); + case REAL: + case FLOAT: + case DOUBLE: + return safeToShort(safeToLong(((Number) val).doubleValue())); + default: + } + + throw new SQLException("Conversion from type [" + columnType + "] to [Short] not supported"); + } + + private static Integer asInteger(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BOOLEAN: + return Integer.valueOf(((Boolean) val).booleanValue() ? 1 : 0); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return safeToInt(((Number) val).longValue()); + case REAL: + case FLOAT: + case DOUBLE: + return safeToInt(safeToLong(((Number) val).doubleValue())); + default: + } + + throw new SQLException("Conversion from type [" + columnType + "] to [Integer] not supported"); + } + + private static Long asLong(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BOOLEAN: + return Long.valueOf(((Boolean) val).booleanValue() ? 1 : 0); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return Long.valueOf(((Number) val).longValue()); + case REAL: + case FLOAT: + case DOUBLE: + return safeToLong(((Number) val).doubleValue()); + case TIMESTAMP: + return ((Number) val).longValue(); + default: + } + + throw new SQLException("Conversion from type [" + columnType + "] to [Long] not supported"); + } + + private static Float asFloat(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BOOLEAN: + return Float.valueOf(((Boolean) val).booleanValue() ? 1 : 0); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return Float.valueOf((float) ((Number) val).longValue()); + case REAL: + case FLOAT: + case DOUBLE: + return new Float(((Number) val).doubleValue()); + default: + } + + throw new SQLException("Conversion from type [" + columnType + "] to [Float] not supported"); + } + + private static Double asDouble(Object val, JDBCType columnType) throws SQLException { + switch (columnType) { + case BOOLEAN: + return Double.valueOf(((Boolean) val).booleanValue() ? 1 : 0); + case TINYINT: + case SMALLINT: + case INTEGER: + case BIGINT: + return Double.valueOf((double) ((Number) val).longValue()); + case REAL: + case FLOAT: + case DOUBLE: + return new Double(((Number) val).doubleValue()); + default: + } + + throw new SQLException("Conversion from type [" + columnType + "] to [Double] not supported"); + } + + private static Date asDate(Object val, JDBCType columnType) throws SQLException { + if (columnType == JDBCType.TIMESTAMP) { + return new Date(utcMillisRemoveTime(((Number) val).longValue())); + } + throw new SQLException("Conversion from type [" + columnType + "] to [Date] not supported"); + } + + private static Time asTime(Object val, JDBCType columnType) throws SQLException { + if (columnType == JDBCType.TIMESTAMP) { + return new Time(utcMillisRemoveDate(((Number) val).longValue())); + } + throw new SQLException("Conversion from type [" + columnType + "] to [Time] not supported"); + } + + private static Timestamp asTimestamp(Object val, JDBCType columnType) throws SQLException { + if (columnType == JDBCType.TIMESTAMP) { + return new Timestamp(((Number) val).longValue()); + } + throw new SQLException("Conversion from type [" + columnType + "] to [Timestamp] not supported"); + } + + private static byte[] asByteArray(Object val, JDBCType columnType) { + throw new UnsupportedOperationException(); + } + + private static LocalDate asLocalDate(Object val, JDBCType columnType) { + throw new UnsupportedOperationException(); + } + + private static LocalTime asLocalTime(Object val, JDBCType columnType) { + throw new UnsupportedOperationException(); + } + + private static LocalDateTime asLocalDateTime(Object val, JDBCType columnType) { + throw new UnsupportedOperationException(); + } + + private static OffsetTime asOffsetTime(Object val, JDBCType columnType) { + throw new UnsupportedOperationException(); + } + + private static OffsetDateTime asOffsetDateTime(Object val, JDBCType columnType) { + throw new UnsupportedOperationException(); + } + + + private static long utcMillisRemoveTime(long l) { + return l - (l % DAY_IN_MILLIS); + } + + private static long utcMillisRemoveDate(long l) { + return l % DAY_IN_MILLIS; + } + + private static byte safeToByte(long x) throws SQLException { + if (x > Byte.MAX_VALUE || x < Byte.MIN_VALUE) { + throw new SQLException(format(Locale.ROOT, "Numeric %d out of range", Long.toString(x))); + } + return (byte) x; + } + + private static short safeToShort(long x) throws SQLException { + if (x > Short.MAX_VALUE || x < Short.MIN_VALUE) { + throw new SQLException(format(Locale.ROOT, "Numeric %d out of range", Long.toString(x))); + } + return (short) x; + } + + private static int safeToInt(long x) throws SQLException { + if (x > Integer.MAX_VALUE || x < Integer.MIN_VALUE) { + throw new SQLException(format(Locale.ROOT, "Numeric %d out of range", Long.toString(x))); + } + return (int) x; + } + + private static long safeToLong(double x) throws SQLException { + if (x > Long.MAX_VALUE || x < Long.MIN_VALUE) { + throw new SQLException(format(Locale.ROOT, "Numeric %d out of range", Double.toString(x))); + } + return Math.round(x); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbcx/JdbcDataSource.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbcx/JdbcDataSource.java new file mode 100644 index 0000000000000..ded8e8893feef --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbcx/JdbcDataSource.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbcx; + +import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; +import org.elasticsearch.xpack.sql.jdbc.debug.Debug; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConnection; +import org.elasticsearch.xpack.sql.client.shared.Version; + +import java.io.PrintWriter; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.Wrapper; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; + +import javax.sql.DataSource; + +import static org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration.CONNECT_TIMEOUT; + +public class JdbcDataSource implements DataSource, Wrapper { + + static { + Version.CURRENT.toString(); + } + + private String url; + private PrintWriter writer; + private int loginTimeout; + private Properties props; + + public JdbcDataSource() {} + + @Override + public PrintWriter getLogWriter() throws SQLException { + return writer; + } + + @Override + public void setLogWriter(PrintWriter out) throws SQLException { + this.writer = out; + } + + @Override + public void setLoginTimeout(int seconds) throws SQLException { + if (seconds < 0) { + throw new SQLException("Negative timeout specified " + seconds); + } + loginTimeout = seconds; + } + + @Override + public int getLoginTimeout() throws SQLException { + return loginTimeout; + } + + @Override + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + throw new SQLFeatureNotSupportedException(); + } + + public String getUrl() { + return url; + } + + public void setUrl(String url) { + this.url = url; + } + + public Properties getProperties() { + Properties copy = new Properties(); + if (props != null) { + copy.putAll(props); + } + return copy; + } + + public void setProperties(Properties props) { + this.props = new Properties(); + this.props.putAll(props); + } + + @Override + public Connection getConnection() throws SQLException { + return doGetConnection(getProperties()); + } + + @Override + public Connection getConnection(String username, String password) throws SQLException { + Properties p = getProperties(); + p.setProperty(ConnectionConfiguration.AUTH_USER, username); + p.setProperty(ConnectionConfiguration.AUTH_PASS, password); + return doGetConnection(p); + } + + private Connection doGetConnection(Properties p) throws SQLException { + JdbcConfiguration cfg = JdbcConfiguration.create(url, p, loginTimeout); + JdbcConnection con = new JdbcConnection(cfg); + // enable logging if needed + return cfg.debug() ? Debug.proxy(cfg, con, writer) : con; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return iface != null && iface.isAssignableFrom(getClass()); + } + + @SuppressWarnings("unchecked") + @Override + public T unwrap(Class iface) throws SQLException { + if (isWrapperFor(iface)) { + return (T) this; + } + throw new SQLException(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/Cursor.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/Cursor.java new file mode 100644 index 0000000000000..5549bc57d63e2 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/Cursor.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.client; + +import java.sql.SQLException; +import java.util.List; + +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; + +public interface Cursor { + + List columns(); + + default int columnSize() { + return columns().size(); + } + + boolean next() throws SQLException; + + Object column(int column); + + /** + * Number of rows that this cursor has pulled back from the + * server in the current batch. + */ + int batchSize(); + + void close() throws SQLException; +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/DefaultCursor.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/DefaultCursor.java new file mode 100644 index 0000000000000..ebc025d79694d --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/DefaultCursor.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.client; + +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; + +import java.sql.SQLException; +import java.util.List; + +class DefaultCursor implements Cursor { + + private final JdbcHttpClient client; + private final RequestMeta meta; + + private final List columnInfos; + private List> rows; + private int row = -1; + private String cursor; + + DefaultCursor(JdbcHttpClient client, String cursor, List columnInfos, List> rows, RequestMeta meta) { + this.client = client; + this.meta = meta; + this.cursor = cursor; + this.columnInfos = columnInfos; + this.rows = rows; + } + + @Override + public List columns() { + return columnInfos; + } + + @Override + public boolean next() throws SQLException { + if (row < rows.size() - 1) { + row++; + return true; + } + else { + if (cursor.isEmpty() == false) { + Tuple>> nextPage = client.nextPage(cursor, meta); + cursor = nextPage.v1(); + rows = nextPage.v2(); + row = -1; + return next(); + } + return false; + } + } + + @Override + public Object column(int column) { + return rows.get(row).get(column); + } + + @Override + public int batchSize() { + return rows.size(); + } + + @Override + public void close() throws SQLException { + if (cursor.isEmpty() == false) { + client.queryClose(cursor); + } + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java new file mode 100644 index 0000000000000..ab4cdff985863 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.client; + +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.sql.client.HttpClient; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; +import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoResponse; +import org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest; +import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest; +import org.elasticsearch.xpack.sql.plugin.SqlQueryRequest; +import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; + +import java.sql.SQLException; +import java.util.List; +import java.util.TimeZone; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.sql.client.shared.StringUtils.EMPTY; + +/** + * JDBC specific HTTP client. + * Since JDBC is not thread-safe, neither is this class. + */ +public class JdbcHttpClient { + private final HttpClient httpClient; + private final JdbcConfiguration conCfg; + private InfoResponse serverInfo; + + public JdbcHttpClient(JdbcConfiguration conCfg) throws SQLException { + httpClient = new HttpClient(conCfg); + this.conCfg = conCfg; + } + + public boolean ping(long timeoutInMs) throws SQLException { + return httpClient.ping(timeoutInMs); + } + + public Cursor query(String sql, List params, RequestMeta meta) throws SQLException { + int fetch = meta.fetchSize() > 0 ? meta.fetchSize() : conCfg.pageSize(); + SqlQueryRequest sqlRequest = new SqlQueryRequest(AbstractSqlRequest.Mode.JDBC, sql, params, null, + AbstractSqlQueryRequest.DEFAULT_TIME_ZONE, + fetch, TimeValue.timeValueMillis(meta.timeoutInMs()), TimeValue.timeValueMillis(meta.queryTimeoutInMs()), ""); + SqlQueryResponse response = httpClient.query(sqlRequest); + return new DefaultCursor(this, response.cursor(), toJdbcColumnInfo(response.columns()), response.rows(), meta); + } + + /** + * Read the next page of results and returning + * the scroll id to use to fetch the next page. + */ + public Tuple>> nextPage(String cursor, RequestMeta meta) throws SQLException { + SqlQueryRequest sqlRequest = new SqlQueryRequest().cursor(cursor); + sqlRequest.mode(AbstractSqlRequest.Mode.JDBC); + sqlRequest.requestTimeout(TimeValue.timeValueMillis(meta.timeoutInMs())); + sqlRequest.pageTimeout(TimeValue.timeValueMillis(meta.queryTimeoutInMs())); + SqlQueryResponse response = httpClient.query(sqlRequest); + return new Tuple<>(response.cursor(), response.rows()); + } + + public boolean queryClose(String cursor) throws SQLException { + return httpClient.queryClose(cursor); + } + + public InfoResponse serverInfo() throws SQLException { + if (serverInfo == null) { + serverInfo = fetchServerInfo(); + } + return serverInfo; + } + + private InfoResponse fetchServerInfo() throws SQLException { + MainResponse mainResponse = httpClient.serverInfo(); + return new InfoResponse(mainResponse.getClusterName().value(), mainResponse.getVersion().major, mainResponse.getVersion().minor); + } + + /** + * Converts REST column metadata into JDBC column metadata + */ + private List toJdbcColumnInfo(List columns) { + return columns.stream().map(columnInfo -> + new ColumnInfo(columnInfo.name(), columnInfo.jdbcType(), EMPTY, EMPTY, EMPTY, EMPTY, columnInfo.displaySize()) + ).collect(Collectors.toList()); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/RequestMeta.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/RequestMeta.java new file mode 100644 index 0000000000000..c8f41bf6b73cc --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/RequestMeta.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.client; + +public class RequestMeta { + + private int fetchSize; + private long timeoutInMs; + private long queryTimeoutInMs; + + public RequestMeta(int fetchSize, long timeout, long queryTimeoutInMs) { + this.fetchSize = fetchSize; + this.timeoutInMs = timeout; + this.queryTimeoutInMs = queryTimeoutInMs; + } + + public RequestMeta queryTimeout(long timeout) { + this.queryTimeoutInMs = timeout; + return this; + } + + public RequestMeta timeout(long timeout) { + this.timeoutInMs = timeout; + return this; + } + + public RequestMeta fetchSize(int size) { + this.fetchSize = size; + return this; + } + + public int fetchSize() { + return fetchSize; + } + + public long timeoutInMs() { + return timeoutInMs; + } + + public long queryTimeoutInMs() { + return queryTimeoutInMs; + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ColumnInfo.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ColumnInfo.java new file mode 100644 index 0000000000000..6e61d65ff532b --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ColumnInfo.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import java.sql.JDBCType; +import java.util.Objects; + +public class ColumnInfo { + public final String catalog; + public final String schema; + public final String table; + public final String label; + public final String name; + public final int displaySize; + public final JDBCType type; + + public ColumnInfo(String name, JDBCType type, String table, String catalog, String schema, String label, int displaySize) { + if (name == null) { + throw new IllegalArgumentException("[name] must not be null"); + } + if (type == null) { + throw new IllegalArgumentException("[type] must not be null"); + } + if (table == null) { + throw new IllegalArgumentException("[table] must not be null"); + } + if (catalog == null) { + throw new IllegalArgumentException("[catalog] must not be null"); + } + if (schema == null) { + throw new IllegalArgumentException("[schema] must not be null"); + } + if (label == null) { + throw new IllegalArgumentException("[label] must not be null"); + } + this.name = name; + this.type = type; + this.table = table; + this.catalog = catalog; + this.schema = schema; + this.label = label; + this.displaySize = displaySize; + } + + public int displaySize() { + // 0 - means unknown + return displaySize; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + if (false == "".equals(table)) { + b.append(table).append('.'); + } + b.append(name).append("').toString(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + ColumnInfo other = (ColumnInfo) obj; + return name.equals(other.name) + && type.equals(other.type) + && table.equals(other.table) + && catalog.equals(other.catalog) + && schema.equals(other.schema) + && label.equals(other.label) + && displaySize == other.displaySize; + } + + @Override + public int hashCode() { + return Objects.hash(name, type, table, catalog, schema, label, displaySize); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoResponse.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoResponse.java new file mode 100644 index 0000000000000..58a442ca74bf2 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/InfoResponse.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +/** + * General information about the server. + */ +public class InfoResponse { + public final String cluster; + public final int majorVersion; + public final int minorVersion; + + public InfoResponse(String clusterName, byte versionMajor, byte versionMinor) { + this.cluster = clusterName; + this.majorVersion = versionMajor; + this.minorVersion = versionMinor; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/Nullable.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/Nullable.java new file mode 100644 index 0000000000000..47818ff7acb01 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/Nullable.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * The presence of this annotation on a method parameter indicates that + * {@code null} is an acceptable value for that parameter. It should not be + * used for parameters of primitive types. + */ +@Documented +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.PARAMETER, ElementType.FIELD, ElementType.METHOD}) +public @interface Nullable { +} diff --git a/x-pack/plugin/sql/jdbc/src/main/resources/META-INF/services/java.sql.Driver b/x-pack/plugin/sql/jdbc/src/main/resources/META-INF/services/java.sql.Driver new file mode 100644 index 0000000000000..672e7aafcf7e5 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/main/resources/META-INF/services/java.sql.Driver @@ -0,0 +1 @@ +org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/DriverManagerRegistrationTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/DriverManagerRegistrationTests.java new file mode 100644 index 0000000000000..9fee037229220 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/DriverManagerRegistrationTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.Version; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver; + +import java.security.AccessController; +import java.security.PrivilegedExceptionAction; +import java.sql.Driver; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.function.Consumer; + +public class DriverManagerRegistrationTests extends ESTestCase { + + public void testRegistration() throws Exception { + driverManagerTemplate(d -> assertNotNull(d)); + } + + public void testVersioning() throws Exception { + driverManagerTemplate(d -> { + /* This test will only work properly in gradle because in gradle we run the tests + * using the jar. */ + + assertNotEquals(String.valueOf(Version.CURRENT.major), d.getMajorVersion()); + assertNotEquals(String.valueOf(Version.CURRENT.minor), d.getMinorVersion()); + }); + } + + private static void driverManagerTemplate(Consumer c) throws Exception { + String url = "jdbc:es:localhost:9200/"; + Driver driver = null; + try { + // can happen (if the driver jar was not loaded) + driver = DriverManager.getDriver(url); + } catch (SQLException ex) { + assertEquals("No suitable driver", ex.getMessage()); + } + boolean set = driver != null; + + try { + JdbcDriver d = JdbcDriver.register(); + if (driver != null) { + assertEquals(driver, d); + } + + c.accept(d); + + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + // mimic DriverManager and unregister the driver + JdbcDriver.deregister(); + return null; + }); + + SQLException ex = expectThrows(SQLException.class, () -> DriverManager.getDriver(url)); + assertEquals("No suitable driver", ex.getMessage()); + } finally { + if (set) { + JdbcDriver.register(); + } + } + } +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java new file mode 100644 index 0000000000000..e15d011e387e3 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/JdbcConfigurationTests.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; + +import java.sql.SQLException; +import java.util.Properties; + +import static org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration.CONNECT_TIMEOUT; +import static org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration.PAGE_TIMEOUT; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class JdbcConfigurationTests extends ESTestCase { + + private JdbcConfiguration ci(String url) throws SQLException { + return JdbcConfiguration.create(url, null, 0); + } + + public void testJustThePrefix() throws Exception { + Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es:")); + assertEquals("Expected [jdbc:es://] url, received [jdbc:es:]", e.getMessage()); + } + + public void testJustTheHost() throws Exception { + assertThat(ci("jdbc:es://localhost").baseUri().toString(), is("http://localhost:9200/")); + } + + public void testHostAndPort() throws Exception { + assertThat(ci("jdbc:es://localhost:1234").baseUri().toString(), is("http://localhost:1234/")); + } + + public void testTrailingSlashForHost() throws Exception { + assertThat(ci("jdbc:es://localhost:1234/").baseUri().toString(), is("http://localhost:1234/")); + } + + public void testMultiPathSuffix() throws Exception { + assertThat(ci("jdbc:es://a:1/foo/bar/tar").baseUri().toString(), is("http://a:1/foo/bar/tar")); + } + + public void testV6Localhost() throws Exception { + assertThat(ci("jdbc:es://[::1]:54161/foo/bar").baseUri().toString(), is("http://[::1]:54161/foo/bar")); + } + + public void testDebug() throws Exception { + JdbcConfiguration ci = ci("jdbc:es://a:1/?debug=true"); + assertThat(ci.baseUri().toString(), is("http://a:1/")); + assertThat(ci.debug(), is(true)); + assertThat(ci.debugOut(), is("err")); + } + + public void testDebugOut() throws Exception { + JdbcConfiguration ci = ci("jdbc:es://a:1/?debug=true&debug.output=jdbc.out"); + assertThat(ci.baseUri().toString(), is("http://a:1/")); + assertThat(ci.debug(), is(true)); + assertThat(ci.debugOut(), is("jdbc.out")); + } + + public void testTypeInParam() throws Exception { + Exception e = expectThrows(JdbcSQLException.class, () -> ci("jdbc:es://a:1/foo/bar/tar?debug=true&debug.out=jdbc.out")); + assertEquals("Unknown parameter [debug.out] ; did you mean [debug.output]", e.getMessage()); + } + + public void testDebugOutWithSuffix() throws Exception { + JdbcConfiguration ci = ci("jdbc:es://a:1/foo/bar/tar?debug=true&debug.output=jdbc.out"); + assertThat(ci.baseUri().toString(), is("http://a:1/foo/bar/tar")); + assertThat(ci.debug(), is(true)); + assertThat(ci.debugOut(), is("jdbc.out")); + } + + public void testHttpWithSSLEnabled() throws Exception { + JdbcConfiguration ci = ci("jdbc:es://test?ssl=true"); + assertThat(ci.baseUri().toString(), is("https://test:9200/")); + } + + public void testHttpWithSSLDisabled() throws Exception { + JdbcConfiguration ci = ci("jdbc:es://test?ssl=false"); + assertThat(ci.baseUri().toString(), is("http://test:9200/")); + } + + public void testTimoutOverride() throws Exception { + Properties properties = new Properties(); + properties.setProperty(CONNECT_TIMEOUT, "3"); // Should be overridden + properties.setProperty(PAGE_TIMEOUT, "4"); + + String url = "jdbc:es://test?connect.timeout=1&page.timeout=2"; + + // No properties + JdbcConfiguration ci = JdbcConfiguration.create(url, null, 0); + assertThat(ci.connectTimeout(), equalTo(1L)); + assertThat(ci.pageTimeout(), equalTo(2L)); + + // Properties override + ci = JdbcConfiguration.create(url, properties, 0); + assertThat(ci.connectTimeout(), equalTo(3L)); + assertThat(ci.pageTimeout(), equalTo(4L)); + + // Driver default override for connection timeout + ci = JdbcConfiguration.create(url, properties, 5); + assertThat(ci.connectTimeout(), equalTo(5000L)); + assertThat(ci.pageTimeout(), equalTo(4L)); + } + + +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionTests.java new file mode 100644 index 0000000000000..82cd623f11599 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/VersionTests.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.client.shared.Version; + +public class VersionTests extends ESTestCase { + public void testVersionIsCurrent() { + /* This test will only work properly in gradle because in gradle we run the tests + * using the jar. */ + assertEquals(org.elasticsearch.Version.CURRENT.toString(), Version.CURRENT.version); + assertNotNull(Version.CURRENT.hash); + assertEquals(org.elasticsearch.Version.CURRENT.major, Version.CURRENT.major); + assertEquals(org.elasticsearch.Version.CURRENT.minor, Version.CURRENT.minor); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/SqlQueryParameterAnalyzerTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/SqlQueryParameterAnalyzerTests.java new file mode 100644 index 0000000000000..c9676a49e87bd --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/SqlQueryParameterAnalyzerTests.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.test.ESTestCase; + +import java.sql.SQLException; + +public class SqlQueryParameterAnalyzerTests extends ESTestCase { + + public void testNoParameters() throws Exception { + assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM table")); + assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM 'table'")); + assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM \"table\"")); + assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM \"table\" WHERE i = 0")); + assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM 'table' WHERE s = '?'")); + assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM 'table' WHERE s = 'foo''bar''?'")); + assertEquals(0, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM `table` where b = 'fo\"o\\\"b{ar\\}?b\"az?}\\-?\"?\\?{'")); + + } + + + public void testSingleParameter() throws Exception { + assertEquals(1, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM 'table' WHERE s = '?' AND b = ?")); + assertEquals(1, SqlQueryParameterAnalyzer.parametersCount("SELECT * FROM 'table' WHERE b = ? AND s = '?'")); + assertEquals(1, SqlQueryParameterAnalyzer.parametersCount("SELECT ?/10 /* multiline \n" + + " * query \n" + + " * more ? /* lines */ ? here \n" + + " */ FROM foo")); + assertEquals(1, SqlQueryParameterAnalyzer.parametersCount("SELECT ?")); + + } + + public void testMultipleParameters() throws Exception { + assertEquals(4, SqlQueryParameterAnalyzer.parametersCount("SELECT ?, ?, ? , ?")); + assertEquals(3, SqlQueryParameterAnalyzer.parametersCount("SELECT ?, ?, '?' , ?")); + assertEquals(3, SqlQueryParameterAnalyzer.parametersCount("SELECT ?, ?\n, '?' , ?")); + assertEquals(3, SqlQueryParameterAnalyzer.parametersCount("SELECT ? - 10 -- first parameter with ????\n" + + ", ? -- second parameter with random \" and ' \n" + + ", ? -- last parameter without new line")); + } + + public void testUnclosedJdbcEscape() { + SQLException exception = expectThrows(SQLException.class, () -> SqlQueryParameterAnalyzer.parametersCount("SELECT {foobar")); + assertEquals("Jdbc escape sequences are not supported yet", exception.getMessage()); + } + + public void testUnclosedMultilineComment() { + SQLException exception = expectThrows(SQLException.class, () -> SqlQueryParameterAnalyzer.parametersCount("SELECT /* * * * ")); + assertEquals("Cannot parse given sql; unclosed /* comment", exception.getMessage()); + } + + public void testUnclosedSingleQuoteStrign() { + SQLException exception = expectThrows(SQLException.class, () -> SqlQueryParameterAnalyzer.parametersCount("SELECT ' '' '' ")); + assertEquals("Cannot parse given sql; unclosed string", exception.getMessage()); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java new file mode 100644 index 0000000000000..612c46fbe56ef --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.jdbc; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest; +import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.joda.time.DateTime; + +import java.sql.JDBCType; + +import static org.hamcrest.Matchers.instanceOf; + + +public class TypeConverterTests extends ESTestCase { + + + public void testFloatAsNative() throws Exception { + assertThat(convertAsNative(42.0f, JDBCType.REAL), instanceOf(Float.class)); + assertThat(convertAsNative(42.0, JDBCType.REAL), instanceOf(Float.class)); + assertEquals(42.0f, (float) convertAsNative(42.0, JDBCType.REAL), 0.001f); + assertEquals(Float.NaN, convertAsNative(Float.NaN, JDBCType.REAL)); + assertEquals(Float.NEGATIVE_INFINITY, convertAsNative(Float.NEGATIVE_INFINITY, JDBCType.REAL)); + assertEquals(Float.POSITIVE_INFINITY, convertAsNative(Float.POSITIVE_INFINITY, JDBCType.REAL)); + } + + public void testDoubleAsNative() throws Exception { + JDBCType type = randomFrom(JDBCType.FLOAT, JDBCType.DOUBLE); + assertThat(convertAsNative(42.0, type), instanceOf(Double.class)); + assertEquals(42.0f, (double) convertAsNative(42.0, type), 0.001f); + assertEquals(Double.NaN, convertAsNative(Double.NaN, type)); + assertEquals(Double.NEGATIVE_INFINITY, convertAsNative(Double.NEGATIVE_INFINITY, type)); + assertEquals(Double.POSITIVE_INFINITY, convertAsNative(Double.POSITIVE_INFINITY, type)); + } + + public void testTimestampAsNative() throws Exception { + DateTime now = DateTime.now(); + assertThat(convertAsNative(now, JDBCType.TIMESTAMP), instanceOf(Long.class)); + assertEquals(now.getMillis(), convertAsNative(now, JDBCType.TIMESTAMP)); + } + + private Object convertAsNative(Object value, JDBCType type) throws Exception { + // Simulate sending over XContent + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + builder.field("value"); + SqlQueryResponse.value(builder, AbstractSqlRequest.Mode.JDBC, value); + builder.endObject(); + builder.close(); + Object copy = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2().get("value"); + return TypeConverter.convert(copy, type); + } + +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ColumnInfoTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ColumnInfoTests.java new file mode 100644 index 0000000000000..680284fae4f89 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/net/protocol/ColumnInfoTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.jdbc.net.protocol; + +import org.elasticsearch.test.ESTestCase; + +import java.sql.JDBCType; + +public class ColumnInfoTests extends ESTestCase { + static ColumnInfo varcharInfo(String name) { + return new ColumnInfo(name, JDBCType.VARCHAR, "", "", "", "", 0); + } + + static ColumnInfo intInfo(String name) { + return new ColumnInfo(name, JDBCType.INTEGER, "", "", "", "", 11); + } + + static ColumnInfo doubleInfo(String name) { + return new ColumnInfo(name, JDBCType.DOUBLE, "", "", "", "", 25); + } + + public void testToString() { + assertEquals("test.doc.a", + new ColumnInfo("a", JDBCType.VARCHAR, "test.doc", "as", "ads", "lab", 0).toString()); + assertEquals("test.doc.a", + new ColumnInfo("a", JDBCType.VARCHAR, "test.doc", "", "", "", 0).toString()); + assertEquals("string", varcharInfo("string").toString()); + assertEquals("int", intInfo("int").toString()); + assertEquals("d", doubleInfo("d").toString()); + } +} diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/package-info.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/package-info.java new file mode 100644 index 0000000000000..4c45324bb08f7 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Unit tests for the Elasticsearch JDBC client. + */ +package org.elasticsearch.xpack.sql.jdbc; diff --git a/x-pack/plugin/sql/jdbc/src/test/resources/plugin-security.policy b/x-pack/plugin/sql/jdbc/src/test/resources/plugin-security.policy new file mode 100644 index 0000000000000..5f16c1579b0be --- /dev/null +++ b/x-pack/plugin/sql/jdbc/src/test/resources/plugin-security.policy @@ -0,0 +1,4 @@ +grant { + // Required for testing the Driver registration + permission java.sql.SQLPermission "deregisterDriver"; +}; diff --git a/x-pack/plugin/sql/licenses/antlr4-runtime-4.5.3.jar.sha1 b/x-pack/plugin/sql/licenses/antlr4-runtime-4.5.3.jar.sha1 new file mode 100644 index 0000000000000..535955b7d6826 --- /dev/null +++ b/x-pack/plugin/sql/licenses/antlr4-runtime-4.5.3.jar.sha1 @@ -0,0 +1 @@ +2609e36f18f7e8d593cc1cddfb2ac776dc96b8e0 \ No newline at end of file diff --git a/x-pack/plugin/sql/licenses/antlr4-runtime-LICENSE.txt b/x-pack/plugin/sql/licenses/antlr4-runtime-LICENSE.txt new file mode 100644 index 0000000000000..95d0a2554f686 --- /dev/null +++ b/x-pack/plugin/sql/licenses/antlr4-runtime-LICENSE.txt @@ -0,0 +1,26 @@ +[The "BSD license"] +Copyright (c) 2015 Terence Parr, Sam Harwell +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/x-pack/plugin/sql/licenses/antlr4-runtime-NOTICE.txt b/x-pack/plugin/sql/licenses/antlr4-runtime-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle new file mode 100644 index 0000000000000..06eb24c743ad8 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -0,0 +1,142 @@ + +/* + * This project is named sql-cli because it is in the "org.elasticsearch.plugin" + * group and it'd be super confusing for it to just be called "cli" there. + * Also, the jar we ultimately want to ship is sql-cli-VERSION.jar which is + * exactly what gradle makes by default when the project is named sql-cli. + */ + +apply plugin: 'elasticsearch.build' +/* We don't use the 'application' plugin because it builds a zip and tgz which + * we don't want. */ + +archivesBaseName = 'elasticsearch-sql-cli' + +description = 'Command line interface to Elasticsearch that speaks SQL' + +dependencies { + compile "org.jline:jline:3.6.0" + compile xpackProject('plugin:sql:sql-shared-client') + compile xpackProject('plugin:sql:sql-proto') + compile "org.elasticsearch:elasticsearch-cli:${version}" + + runtime "org.fusesource.jansi:jansi:1.16" + runtime "org.elasticsearch:jna:${versions.jna}" + + testCompile "org.elasticsearch.test:framework:${version}" +} + +dependencyLicenses { + mapping from: /elasticsearch-cli.*/, to: 'elasticsearch' + mapping from: /elasticsearch-core.*/, to: 'elasticsearch' + mapping from: /jackson-.*/, to: 'jackson' + mapping from: /lucene-.*/, to: 'lucene' + mapping from: /sql-proto.*/, to: 'elasticsearch' + mapping from: /sql-shared-client.*/, to: 'elasticsearch' + ignoreSha 'elasticsearch-cli' + ignoreSha 'elasticsearch-core' + ignoreSha 'elasticsearch' + ignoreSha 'sql-proto' + ignoreSha 'sql-shared-client' +} + +/* + * Bundle all dependencies into the main jar and mark it as executable it + * can be easilly shipped around and used. + */ +jar { + from({ + configurations.compile.collect { it.isDirectory() ? it : zipTree(it) } + configurations.runtime.collect { it.isDirectory() ? it : zipTree(it) } + }) { + // We don't need the META-INF from the things we bundle. For now. + exclude 'META-INF/*' + } + manifest { + attributes 'Main-Class': 'org.elasticsearch.xpack.sql.cli.Cli' + } +} + +/* + * Build a jar that doesn't include the dependencies bundled that we can + * include with QA tests along side Elasticsearch without breaking + * jarhell. + */ +configurations { + nodeps +} +task nodepsJar(type: Jar) { + appendix 'nodeps' + from sourceSets.main.output +} +artifacts { + nodeps nodepsJar +} + + +forbiddenApisMain { + signaturesURLs += file('src/forbidden/cli-signatures.txt').toURI().toURL() +} +forbiddenApisTest { + signaturesURLs += file('src/forbidden/cli-signatures.txt').toURI().toURL() +} + +thirdPartyAudit.excludes = [ + // jLine's optional dependencies + 'org.apache.sshd.client.SshClient', + 'org.apache.sshd.client.auth.keyboard.UserInteraction', + 'org.apache.sshd.client.channel.ChannelShell', + 'org.apache.sshd.client.channel.ClientChannel', + 'org.apache.sshd.client.channel.ClientChannelEvent', + 'org.apache.sshd.client.future.AuthFuture', + 'org.apache.sshd.client.future.ConnectFuture', + 'org.apache.sshd.client.future.OpenFuture', + 'org.apache.sshd.client.session.ClientSession', + 'org.apache.sshd.common.Factory', + 'org.apache.sshd.common.channel.PtyMode', + 'org.apache.sshd.common.config.keys.FilePasswordProvider', + 'org.apache.sshd.common.util.io.NoCloseInputStream', + 'org.apache.sshd.common.util.io.NoCloseOutputStream', + 'org.apache.sshd.server.Command', + 'org.apache.sshd.server.Environment', + 'org.apache.sshd.server.ExitCallback', + 'org.apache.sshd.server.SessionAware', + 'org.apache.sshd.server.Signal', + 'org.apache.sshd.server.SshServer', + 'org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider', + 'org.apache.sshd.server.scp.ScpCommandFactory$Builder', + 'org.apache.sshd.server.session.ServerSession', + 'org.apache.sshd.server.subsystem.sftp.SftpSubsystemFactory$Builder', + 'org.mozilla.universalchardet.UniversalDetector', + 'org.fusesource.jansi.internal.Kernel32$FOCUS_EVENT_RECORD', + 'org.fusesource.jansi.internal.Kernel32$MOUSE_EVENT_RECORD', +] + +task runcli { + description = 'Run the CLI and connect to elasticsearch running on 9200' + dependsOn 'assemble' + doLast { + List command = [new File(project.runtimeJavaHome, 'bin/java').absolutePath] + if ('true'.equals(System.getProperty('debug', 'false'))) { + command += '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000' + } + command += ['-jar', jar.archivePath.absolutePath] + logger.info("running the cli with: ${command}") + + new ProcessBuilder(command) + .redirectOutput(ProcessBuilder.Redirect.INHERIT) + .redirectInput(ProcessBuilder.Redirect.INHERIT) + .redirectError(ProcessBuilder.Redirect.INHERIT) + .start() + .waitFor() + } +} + +// Use the jar for testing so we can get the proper version information +test { + classpath -= compileJava.outputs.files + classpath -= configurations.compile + classpath -= configurations.runtime + classpath += jar.outputs.files + dependsOn jar +} diff --git a/x-pack/plugin/sql/sql-cli/licenses/jansi-1.16.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jansi-1.16.jar.sha1 new file mode 100644 index 0000000000000..8adc5c7977cf8 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jansi-1.16.jar.sha1 @@ -0,0 +1 @@ +b1aaf0028852164ab6b4057192ccd0ba7dedd3a5 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jansi-LICENSE.txt b/x-pack/plugin/sql/sql-cli/licenses/jansi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jansi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/sql/sql-cli/licenses/jansi-NOTICE.txt b/x-pack/plugin/sql/sql-cli/licenses/jansi-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-3.6.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-3.6.0.jar.sha1 new file mode 100644 index 0000000000000..d6938e6d80b0d --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jline-3.6.0.jar.sha1 @@ -0,0 +1 @@ +c8ecc302d6b7d19da41c66be7d428c17cd6b12b2 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-LICENSE.txt b/x-pack/plugin/sql/sql-cli/licenses/jline-LICENSE.txt new file mode 100644 index 0000000000000..d4defddda4cef --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jline-LICENSE.txt @@ -0,0 +1,35 @@ +Copyright (c) 2002-2016, the original author or authors. +All rights reserved. + +http://www.opensource.org/licenses/bsd-license.php + +Redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following +conditions are met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with +the distribution. + +Neither the name of JLine nor the names of its contributors +may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, +OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED +AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-NOTICE.txt b/x-pack/plugin/sql/sql-cli/licenses/jline-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/sql/sql-cli/src/forbidden/cli-signatures.txt b/x-pack/plugin/sql/sql-cli/src/forbidden/cli-signatures.txt new file mode 100644 index 0000000000000..3d90ba67b0323 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/forbidden/cli-signatures.txt @@ -0,0 +1,3 @@ +@defaultMessage println doesn't pay attention to terminal type so it breaks tests on Windows. Just use \n. +java.io.PrintWriter#println() +java.io.PrintWriter#println(java.lang.String) diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java new file mode 100644 index 0000000000000..d1f59c97e5ab1 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.LoggingAwareCommand; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.xpack.sql.cli.command.ClearScreenCliCommand; +import org.elasticsearch.xpack.sql.cli.command.CliCommand; +import org.elasticsearch.xpack.sql.cli.command.CliCommands; +import org.elasticsearch.xpack.sql.cli.command.CliSession; +import org.elasticsearch.xpack.sql.cli.command.FetchSeparatorCliCommand; +import org.elasticsearch.xpack.sql.cli.command.FetchSizeCliCommand; +import org.elasticsearch.xpack.sql.cli.command.PrintLogoCommand; +import org.elasticsearch.xpack.sql.cli.command.ServerInfoCliCommand; +import org.elasticsearch.xpack.sql.cli.command.ServerQueryCliCommand; +import org.elasticsearch.xpack.sql.client.HttpClient; +import org.elasticsearch.xpack.sql.client.shared.ClientException; +import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; +import org.elasticsearch.xpack.sql.client.shared.Version; +import org.jline.terminal.TerminalBuilder; +import java.io.IOException; +import java.net.ConnectException; +import java.util.Arrays; +import java.util.List; +import java.util.logging.LogManager; + +public class Cli extends LoggingAwareCommand { + private final OptionSpec keystoreLocation; + private final OptionSpec checkOption; + private final OptionSpec connectionString; + + /** + * Use this VM Options to run in IntelliJ or Eclipse: + * -Dorg.jline.terminal.type=xterm-256color + * -Dorg.jline.terminal.jna=false + * -Dorg.jline.terminal.jansi=false + * -Dorg.jline.terminal.exec=false + * -Dorg.jline.terminal.dumb=true + */ + public static void main(String[] args) throws Exception { + final Cli cli = new Cli(new JLineTerminal(TerminalBuilder.builder().build(), true)); + configureJLineLogging(); + int status = cli.main(args, Terminal.DEFAULT); + if (status != ExitCodes.OK) { + exit(status); + } + } + + private static void configureJLineLogging() { + try { + /* Initialize the logger from the a properties file we bundle. This makes sure + * we get useful error messages from jLine. */ + LogManager.getLogManager().readConfiguration(Cli.class.getResourceAsStream("/logging.properties")); + } catch (IOException ex) { + throw new RuntimeException("cannot setup logging", ex); + } + } + + private final CliTerminal cliTerminal; + + /** + * Build the CLI. + */ + public Cli(CliTerminal cliTerminal) { + super("Elasticsearch SQL CLI"); + this.cliTerminal = cliTerminal; + parser.acceptsAll(Arrays.asList("d", "debug"), "Enable debug logging"); + this.keystoreLocation = parser.acceptsAll( + Arrays.asList("k", "keystore_location"), + "Location of a keystore to use when setting up SSL. " + + "If specified then the CLI will prompt for a keystore password. " + + "If specified when the uri isn't https then an error is thrown.") + .withRequiredArg().ofType(String.class); + this.checkOption = parser.acceptsAll(Arrays.asList("c", "check"), + "Enable initial connection check on startup") + .withRequiredArg().ofType(Boolean.class) + .defaultsTo(Boolean.parseBoolean(System.getProperty("cli.check", "true"))); + this.connectionString = parser.nonOptions("uri"); + } + + @Override + protected void execute(org.elasticsearch.cli.Terminal terminal, OptionSet options) throws Exception { + boolean debug = options.has("d") || options.has("debug"); + boolean checkConnection = checkOption.value(options); + List args = connectionString.values(options); + if (args.size() > 1) { + throw new UserException(ExitCodes.USAGE, "expecting a single uri"); + } + String uri = args.size() == 1 ? args.get(0) : null; + args = keystoreLocation.values(options); + if (args.size() > 1) { + throw new UserException(ExitCodes.USAGE, "expecting a single keystore file"); + } + String keystoreLocationValue = args.size() == 1 ? args.get(0) : null; + execute(uri, debug, keystoreLocationValue, checkConnection); + } + + private void execute(String uri, boolean debug, String keystoreLocation, boolean checkConnection) throws Exception { + CliCommand cliCommand = new CliCommands( + new PrintLogoCommand(), + new ClearScreenCliCommand(), + new FetchSizeCliCommand(), + new FetchSeparatorCliCommand(), + new ServerInfoCliCommand(), + new ServerQueryCliCommand() + ); + try { + ConnectionBuilder connectionBuilder = new ConnectionBuilder(cliTerminal); + ConnectionConfiguration con = connectionBuilder.buildConnection(uri, keystoreLocation); + CliSession cliSession = new CliSession(new HttpClient(con)); + cliSession.setDebug(debug); + if (checkConnection) { + checkConnection(cliSession, cliTerminal, con); + } + new CliRepl(cliTerminal, cliSession, cliCommand).execute(); + } finally { + cliTerminal.close(); + } + } + + private void checkConnection(CliSession cliSession, CliTerminal cliTerminal, ConnectionConfiguration con) throws UserException { + try { + cliSession.checkConnection(); + } catch (ClientException ex) { + if (cliSession.isDebug()) { + cliTerminal.error("Client Exception", ex.getMessage()); + cliTerminal.println(); + cliTerminal.printStackTrace(ex); + cliTerminal.flush(); + } + if (ex.getCause() != null && ex.getCause() instanceof ConnectException) { + // Most likely Elasticsearch is not running + throw new UserException(ExitCodes.IO_ERROR, + "Cannot connect to the server " + con.connectionString() + " - " + ex.getCause().getMessage()); + } else { + // Most likely we connected to something other than Elasticsearch + throw new UserException(ExitCodes.DATA_ERROR, + "Cannot communicate with the server " + con.connectionString() + + ". This version of CLI only works with Elasticsearch version " + Version.CURRENT.toString()); + } + } + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java new file mode 100644 index 0000000000000..f591ced77ec2d --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliRepl.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.xpack.sql.cli.command.CliCommand; +import org.elasticsearch.xpack.sql.cli.command.CliSession; + +import java.util.Locale; + +public class CliRepl { + + private CliTerminal cliTerminal; + private CliCommand cliCommand; + private CliSession cliSession; + + public CliRepl(CliTerminal cliTerminal, CliSession cliSession, CliCommand cliCommand) { + this.cliTerminal = cliTerminal; + this.cliCommand = cliCommand; + this.cliSession = cliSession; + } + + public void execute() { + String DEFAULT_PROMPT = "sql> "; + String MULTI_LINE_PROMPT = " | "; + + StringBuilder multiLine = new StringBuilder(); + String prompt = DEFAULT_PROMPT; + + cliTerminal.flush(); + cliCommand.handle(cliTerminal, cliSession, "logo"); + + while (true) { + String line = cliTerminal.readLine(prompt); + if (line == null) { + return; + } + line = line.trim(); + + if (!line.endsWith(";")) { + multiLine.append(line); + multiLine.append(" "); + prompt = MULTI_LINE_PROMPT; + continue; + } + + line = line.substring(0, line.length() - 1); + + prompt = DEFAULT_PROMPT; + if (multiLine.length() > 0) { + // append the line without trailing ; + multiLine.append(line); + line = multiLine.toString().trim(); + multiLine.setLength(0); + } + + // special case to handle exit + if (isExit(line)) { + cliTerminal.line().em("Bye!").ln(); + cliTerminal.flush(); + return; + } + if (cliCommand.handle(cliTerminal, cliSession, line) == false) { + cliTerminal.error("Unrecognized command", line); + } + cliTerminal.println(); + } + } + + private static boolean isExit(String line) { + line = line.toLowerCase(Locale.ROOT); + return line.equals("exit") || line.equals("quit"); + } + +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliTerminal.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliTerminal.java new file mode 100644 index 0000000000000..0441e6effca1a --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/CliTerminal.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import java.io.IOException; +import org.elasticsearch.cli.UserException; + +/** + * Represents a terminal endpoint + */ +public interface CliTerminal extends AutoCloseable { + + /** + * Prints line with plain text + */ + void print(String text); + + /** + * Prints line with plain text followed by a new line + */ + void println(String text); + + /** + * Prints a formatted error message + */ + void error(String type, String message); + + /** + * Prints a new line + */ + void println(); + + /** + * Clears the terminal + */ + void clear(); + + /** + * Flushes the terminal + */ + void flush(); + + /** + * Prints the stacktrace of the exception + */ + void printStackTrace(Exception ex); + + /** + * Prompts the user to enter the password and returns it. + * + * @throws UserException if there is a problem reading the password, + * for instance, the user {@code ctrl-c}s while we're waiting + * or they send an EOF + * @return the password the user typed, never null + */ + String readPassword(String prompt) throws UserException; + + /** + * Reads the line from the terminal. + * + * @return {@code null} if the user closes the terminal while we're + * waiting for the line, {@code ""} if the use {@code ctrl-c}s while + * we're waiting, the line they typed otherwise + */ + String readLine(String prompt); + + /** + * Creates a new line builder, which allows building a formatted lines. + * + * The line is not displayed until it is closed with ln() or end(). + */ + LineBuilder line(); + + interface LineBuilder { + /** + * Adds a plain text to the line + */ + LineBuilder text(String text); + + /** + * Adds a text with emphasis to the line + */ + LineBuilder em(String text); + + /** + * Adds a text representing the error message + */ + LineBuilder error(String text); + + /** + * Adds a text representing a parameter of the error message + */ + LineBuilder param(String text); + + /** + * Adds '\n' to the line and send it to the screen. + */ + void ln(); + + /** + * Sends the line to the screen. + */ + void end(); + } + + @Override + void close() throws IOException; +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Completers.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Completers.java new file mode 100644 index 0000000000000..591ef56fd3301 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Completers.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.jline.reader.Completer; +import org.jline.reader.impl.completer.AggregateCompleter; +import org.jline.reader.impl.completer.ArgumentCompleter; +import org.jline.reader.impl.completer.StringsCompleter; + +class Completers { + //TODO: need tree structure + static final Completer INSTANCE = new AggregateCompleter( + new ArgumentCompleter(new StringsCompleter("", "EXPLAIN", "SHOW", "SELECT", "SET")), + new ArgumentCompleter(new StringsCompleter("SHOW", "TABLE", "COLUMNS", "FUNCTIONS"))); + +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilder.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilder.java new file mode 100644 index 0000000000000..dbd0e230043ce --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilder.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.SuppressForbidden; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; + +import java.net.URI; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Properties; + +import static org.elasticsearch.xpack.sql.client.shared.UriUtils.parseURI; +import static org.elasticsearch.xpack.sql.client.shared.UriUtils.removeQuery; + +/** + * Connection Builder. Can interactively ask users for the password if it is not provided + */ +public class ConnectionBuilder { + public static String DEFAULT_CONNECTION_STRING = "http://localhost:9200/"; + public static URI DEFAULT_URI = URI.create(DEFAULT_CONNECTION_STRING); + + private CliTerminal cliTerminal; + + public ConnectionBuilder(CliTerminal cliTerminal) { + this.cliTerminal = cliTerminal; + } + + /** + * Build the connection. + * + * @param connectionStringArg the connection string to connect to + * @param keystoreLocation the location of the keystore to configure. If null then use the system keystore. + * @throws UserException if there is a problem with the information provided by the user + */ + public ConnectionConfiguration buildConnection(String connectionStringArg, String keystoreLocation) throws UserException { + final URI uri; + final String connectionString; + Properties properties = new Properties(); + String user = null; + String password = null; + if (connectionStringArg != null) { + connectionString = connectionStringArg; + uri = removeQuery(parseURI(connectionString, DEFAULT_URI), connectionString, DEFAULT_URI); + user = uri.getUserInfo(); + if (user != null) { + int colonIndex = user.indexOf(':'); + if (colonIndex >= 0) { + password = user.substring(colonIndex + 1); + user = user.substring(0, colonIndex); + } + } + } else { + uri = DEFAULT_URI; + connectionString = DEFAULT_CONNECTION_STRING; + } + + if (keystoreLocation != null) { + if (false == "https".equals(uri.getScheme())) { + throw new UserException(ExitCodes.USAGE, "keystore file specified without https"); + } + Path p = getKeystorePath(keystoreLocation); + checkIfExists("keystore file", p); + String keystorePassword = cliTerminal.readPassword("keystore password: "); + + /* + * Set both the keystore and truststore settings which is required + * to everything work smoothly. I'm not totally sure why we have + * two settings but that is a problem for another day. + */ + properties.put("ssl.keystore.location", keystoreLocation); + properties.put("ssl.keystore.pass", keystorePassword); + properties.put("ssl.truststore.location", keystoreLocation); + properties.put("ssl.truststore.pass", keystorePassword); + } + + if ("https".equals(uri.getScheme())) { + properties.put("ssl", "true"); + } + + if (user != null) { + if (password == null) { + password = cliTerminal.readPassword("password: "); + } + properties.setProperty(ConnectionConfiguration.AUTH_USER, user); + properties.setProperty(ConnectionConfiguration.AUTH_PASS, password); + } + + return newConnectionConfiguration(uri, connectionString, properties); + } + + @SuppressForbidden(reason = "cli application shouldn't depend on ES") + private Path getKeystorePath(String keystoreLocation) { + return Paths.get(keystoreLocation); + } + + protected ConnectionConfiguration newConnectionConfiguration(URI uri, String connectionString, Properties properties) { + return new ConnectionConfiguration(uri, connectionString, properties); + } + + protected void checkIfExists(String name, Path p) throws UserException { + if (false == Files.exists(p)) { + throw new UserException(ExitCodes.USAGE, name + " [" + p + "] doesn't exist"); + } + if (false == Files.isRegularFile(p)) { + throw new UserException(ExitCodes.USAGE, name + " [" + p + "] isn't a regular file"); + } + } + +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/FatalCliException.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/FatalCliException.java new file mode 100644 index 0000000000000..c314ac1009ea0 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/FatalCliException.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +/** + * Throwing this except will cause the CLI to terminate + */ +public class FatalCliException extends RuntimeException { + public FatalCliException(String message, Throwable cause) { + super(message, cause); + } + + public FatalCliException(String message) { + super(message); + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/JLineTerminal.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/JLineTerminal.java new file mode 100644 index 0000000000000..528e44afb51f2 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/JLineTerminal.java @@ -0,0 +1,181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserException; +import org.jline.reader.EndOfFileException; +import org.jline.reader.LineReader; +import org.jline.reader.LineReaderBuilder; +import org.jline.reader.UserInterruptException; +import org.jline.terminal.Terminal; +import org.jline.utils.AttributedString; +import org.jline.utils.AttributedStringBuilder; +import org.jline.utils.InfoCmp; + +import java.io.IOException; + +import static org.jline.utils.AttributedStyle.BOLD; +import static org.jline.utils.AttributedStyle.BRIGHT; +import static org.jline.utils.AttributedStyle.DEFAULT; +import static org.jline.utils.AttributedStyle.RED; +import static org.jline.utils.AttributedStyle.YELLOW; + +/** + * jline-based implementation of the terminal + */ +public class JLineTerminal implements CliTerminal { + + private Terminal terminal; + private LineReader reader; + + /** + * Build the terminal. + * @param terminal the jLine terminal to work with + * @param enableMatchBracket should jLine bounce the cursor to matching brackets? + * this is disabled in tests because it very difficult to predict and + * enabled in production because it is fairly nice. + */ + public JLineTerminal(Terminal terminal, boolean enableMatchBracket) { + this(terminal, + LineReaderBuilder.builder() + .terminal(terminal) + .completer(Completers.INSTANCE) + .build(), + enableMatchBracket); + } + + /** + * Constructor for tests. + */ + JLineTerminal(Terminal terminal, LineReader reader, boolean enableMatchBracket) { + this.terminal = terminal; + this.reader = reader; + if (false == enableMatchBracket) { + reader.setVariable(LineReader.BLINK_MATCHING_PAREN, 0L); + } + } + + @Override + public LineBuilder line() { + return new LineBuilder(); + } + + @Override + public void print(String text) { + terminal.writer().print(text); + } + + @Override + public void println(String text) { + print(text); + print("\n"); + } + + @Override + public void error(String type, String message) { + AttributedStringBuilder sb = new AttributedStringBuilder(); + sb.append(type + " [", BOLD.foreground(RED)); + sb.append(message, DEFAULT.boldOff().italic().foreground(YELLOW)); + sb.append("]", BOLD.underlineOff().foreground(RED)); + terminal.writer().print(sb.toAnsi(terminal)); + terminal.flush(); + } + + @Override + public void println() { + print("\n"); + } + + @Override + public void clear() { + terminal.puts(InfoCmp.Capability.clear_screen); + } + + @Override + public void flush() { + terminal.flush(); + } + + @Override + public void printStackTrace(Exception ex) { + ex.printStackTrace(terminal.writer()); + } + + @Override + public String readPassword(String prompt) throws UserException { + try { + String password = reader.readLine(prompt, (char) 0); + if (password == null) { + /* + * The docs say this can't return null but they lie. Lies, I tell you! + * This returns null when you pipe an empty file into the process. + * Since that is a lot like an EOF we throw the same exception. + */ + throw new UserException(ExitCodes.NOPERM, "password required"); + } + return password; + } catch (UserInterruptException | EndOfFileException ex) { + throw new UserException(ExitCodes.NOPERM, "password required"); + } + } + + @Override + public String readLine(String prompt) { + String attributedString = new AttributedString(prompt, DEFAULT.foreground(YELLOW)).toAnsi(terminal); + try { + return reader.readLine(attributedString); + } catch (UserInterruptException ex) { + return ""; + } catch (EndOfFileException ex) { + return null; + } + } + + @Override + public void close() throws IOException { + terminal.close(); + } + + public final class LineBuilder implements CliTerminal.LineBuilder { + AttributedStringBuilder line; + + private LineBuilder() { + line = new AttributedStringBuilder(); + } + + public LineBuilder text(String text) { + line.append(text, DEFAULT); + return this; + } + + public LineBuilder em(String text) { + line.append(text, DEFAULT.foreground(BRIGHT)); + return this; + } + + + public LineBuilder error(String text) { + line.append(text, BOLD.foreground(RED)); + return this; + } + + public LineBuilder param(String text) { + line.append(text, DEFAULT.italic().foreground(YELLOW)); + return this; + } + + public void ln() { + println(line.toAnsi(terminal)); + } + + public void end() { + terminal.writer().print(line.toAnsi(terminal)); + terminal.writer().flush(); + } + } + +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/AbstractCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/AbstractCliCommand.java new file mode 100644 index 0000000000000..f7efc0888d35f --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/AbstractCliCommand.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * The base class for simple commands that match the pattern + */ +public abstract class AbstractCliCommand implements CliCommand { + + protected final Pattern pattern; + + AbstractCliCommand(Pattern pattern) { + this.pattern = pattern; + } + + @Override + public boolean handle(CliTerminal terminal, CliSession cliSession, String line) { + Matcher matcher = pattern.matcher(line); + if (matcher.matches()) { + return doHandle(terminal, cliSession, matcher, line); + } + return false; + } + + /** + * the perform the command + * returns true if the command handled the line and false otherwise + */ + protected abstract boolean doHandle(CliTerminal terminal, CliSession cliSession, Matcher m, String line); +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/AbstractServerCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/AbstractServerCliCommand.java new file mode 100644 index 0000000000000..346592df5d85f --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/AbstractServerCliCommand.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +public abstract class AbstractServerCliCommand implements CliCommand { + + public AbstractServerCliCommand() { + } + + @Override + public final boolean handle(CliTerminal terminal, CliSession cliSession, String line) { + try { + return doHandle(terminal, cliSession, line); + } catch (RuntimeException e) { + handleExceptionWhileCommunicatingWithServer(terminal, cliSession, e); + } + return true; + } + + protected abstract boolean doHandle(CliTerminal cliTerminal, CliSession cliSession, String line); + + /** + * Handle an exception while communication with the server. Extracted + * into a method so that tests can bubble the failure. + */ + protected void handleExceptionWhileCommunicatingWithServer(CliTerminal terminal, CliSession cliSession, RuntimeException e) { + terminal.line().error("Communication error [").param(e.getMessage() == null ? e.getClass().getName() : e.getMessage()).error("]") + .ln(); + if (cliSession.isDebug()) { + terminal.printStackTrace(e); + } + } + + +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ClearScreenCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ClearScreenCliCommand.java new file mode 100644 index 0000000000000..ffde1ec556a0d --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ClearScreenCliCommand.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * cls command that cleans the screen + */ +public class ClearScreenCliCommand extends AbstractCliCommand { + + public ClearScreenCliCommand() { + super(Pattern.compile("cls", Pattern.CASE_INSENSITIVE)); + } + + @Override + protected boolean doHandle(CliTerminal terminal, CliSession cliSession, Matcher m, String line) { + terminal.clear(); + return true; + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliCommand.java new file mode 100644 index 0000000000000..b87b06b380381 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliCommand.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +public interface CliCommand { + + /** + * Handle the command, return true if the command is handled, false otherwise + */ + boolean handle(CliTerminal terminal, CliSession cliSession, String line); + +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliCommands.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliCommands.java new file mode 100644 index 0000000000000..192195c5b229a --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliCommands.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +import java.util.Arrays; +import java.util.List; + +/** + * Wrapper for several commands + */ +public class CliCommands implements CliCommand { + + private final List commands; + + public CliCommands(CliCommand... commands) { + this.commands = Arrays.asList(commands); + } + + @Override + public boolean handle(CliTerminal terminal, CliSession cliSession, String line) { + for (CliCommand cliCommand : commands) { + if (cliCommand.handle(terminal, cliSession, line)) { + return true; + } + } + return false; + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java new file mode 100644 index 0000000000000..64f38c2254c5f --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.xpack.sql.client.HttpClient; +import org.elasticsearch.xpack.sql.client.shared.ClientException; +import org.elasticsearch.xpack.sql.client.shared.Version; +import org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest; + +import java.sql.SQLException; + +/** + * Stores information about the current session + */ +public class CliSession { + private final HttpClient httpClient; + private int fetchSize = AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE; + private String fetchSeparator = ""; + private boolean debug; + + public CliSession(HttpClient httpClient) { + this.httpClient = httpClient; + } + + public HttpClient getClient() { + return httpClient; + } + + public void setFetchSize(int fetchSize) { + if (fetchSize <= 0) { + throw new IllegalArgumentException("Must be > 0."); + } + this.fetchSize = fetchSize; + } + + public int getFetchSize() { + return fetchSize; + } + + public void setFetchSeparator(String fetchSeparator) { + this.fetchSeparator = fetchSeparator; + } + + public String getFetchSeparator() { + return fetchSeparator; + } + + public void setDebug(boolean debug) { + this.debug = debug; + } + + public boolean isDebug() { + return debug; + } + + public void checkConnection() throws ClientException { + MainResponse response; + try { + response = httpClient.serverInfo(); + } catch (SQLException ex) { + throw new ClientException(ex); + } + // TODO: We can relax compatibility requirement later when we have a better idea about protocol compatibility guarantees + if (response.getVersion().major != Version.CURRENT.major || response.getVersion().minor != Version.CURRENT.minor) { + throw new ClientException("This alpha version of CLI is only compatible with Elasticsearch version " + + Version.CURRENT.toString()); + } + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSeparatorCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSeparatorCliCommand.java new file mode 100644 index 0000000000000..786f31cb0104d --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSeparatorCliCommand.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * fetch_separator command that allows to change the separator string between fetches + */ +public class FetchSeparatorCliCommand extends AbstractCliCommand { + + public FetchSeparatorCliCommand() { + super(Pattern.compile("fetch(?: |_)separator *= *\"(.+)\"", Pattern.CASE_INSENSITIVE)); + } + + @Override + protected boolean doHandle(CliTerminal terminal, CliSession cliSession, Matcher m, String line) { + cliSession.setFetchSeparator(m.group(1)); + terminal.line().text("fetch separator set to \"").em(cliSession.getFetchSeparator()).text("\"").end(); + return true; + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSizeCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSizeCliCommand.java new file mode 100644 index 0000000000000..8ccef47e19d32 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/FetchSizeCliCommand.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * fetch_size command that allows to change the size of fetches + */ +public class FetchSizeCliCommand extends AbstractCliCommand { + + public FetchSizeCliCommand() { + super(Pattern.compile("fetch(?: |_)size *= *(.+)", Pattern.CASE_INSENSITIVE)); + } + + @Override + protected boolean doHandle(CliTerminal terminal, CliSession cliSession, Matcher m, String line) { + try { + cliSession.setFetchSize(Integer.parseInt(m.group(1))); + } catch (NumberFormatException e) { + terminal.line().error("Invalid fetch size [").param(m.group(1)).error("]").end(); + return true; + } catch (IllegalArgumentException e) { + terminal.line().error("Invalid fetch size [").param(m.group(1)).error("]. " + e.getMessage()).end(); + return true; + } + terminal.line().text("fetch size set to ").em(Integer.toString(cliSession.getFetchSize())).end(); + return true; + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/PrintLogoCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/PrintLogoCommand.java new file mode 100644 index 0000000000000..306189b535a03 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/PrintLogoCommand.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.Cli; +import org.elasticsearch.xpack.sql.cli.CliTerminal; +import org.elasticsearch.xpack.sql.cli.FatalCliException; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * logo command that cleans the screen and prints the logo + */ +public class PrintLogoCommand extends AbstractCliCommand { + + public PrintLogoCommand() { + super(Pattern.compile("logo", Pattern.CASE_INSENSITIVE)); + } + + @Override + protected boolean doHandle(CliTerminal terminal, CliSession cliSession, Matcher m, String line) { + printLogo(terminal); + return true; + } + + public void printLogo(CliTerminal terminal) { + terminal.clear(); + try (InputStream in = Cli.class.getResourceAsStream("/logo.txt")) { + if (in == null) { + throw new FatalCliException("Could not find logo!"); + } + try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) { + String line; + while ((line = reader.readLine()) != null) { + terminal.println(line); + } + } + } catch (IOException e) { + throw new FatalCliException("Could not load logo!", e); + } + + terminal.println(); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java new file mode 100644 index 0000000000000..635c041da7ae6 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.xpack.sql.cli.CliTerminal; + +import java.sql.SQLException; +import java.util.Locale; + +public class ServerInfoCliCommand extends AbstractServerCliCommand { + + public ServerInfoCliCommand() { + } + + @Override + public boolean doHandle(CliTerminal terminal, CliSession cliSession, String line) { + if (false == "info".equals(line.toLowerCase(Locale.ROOT))) { + return false; + } + MainResponse info; + try { + info = cliSession.getClient().serverInfo(); + } catch (SQLException e) { + terminal.error("Error fetching server info", e.getMessage()); + return true; + } + terminal.line() + .text("Node:").em(info.getNodeName()) + .text(" Cluster:").em(info.getClusterName().value()) + .text(" Version:").em(info.getVersion().toString()) + .ln(); + return true; + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java new file mode 100644 index 0000000000000..c1fc609c50b8f --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.xpack.sql.cli.CliTerminal; +import org.elasticsearch.xpack.sql.client.HttpClient; +import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection; +import org.elasticsearch.xpack.sql.plugin.CliFormatter; +import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; + +import java.sql.SQLException; + +public class ServerQueryCliCommand extends AbstractServerCliCommand { + + @Override + protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String line) { + SqlQueryResponse response = null; + HttpClient cliClient = cliSession.getClient(); + CliFormatter cliFormatter; + String data; + try { + response = cliClient.queryInit(line, cliSession.getFetchSize()); + cliFormatter = new CliFormatter(response); + data = cliFormatter.formatWithHeader(response); + while (true) { + handleText(terminal, data); + if (response.cursor().isEmpty()) { + // Successfully finished the entire query! + terminal.flush(); + return true; + } + if (false == cliSession.getFetchSeparator().equals("")) { + terminal.println(cliSession.getFetchSeparator()); + } + response = cliSession.getClient().nextPage(response.cursor()); + data = cliFormatter.formatWithoutHeader(response); + } + } catch (SQLException e) { + if (JreHttpUrlConnection.SQL_STATE_BAD_SERVER.equals(e.getSQLState())) { + terminal.error("Server error", e.getMessage()); + } else { + terminal.error("Bad request", e.getMessage()); + } + if (response != null) { + try { + cliClient.queryClose(response.cursor()); + } catch (SQLException ex) { + terminal.error("Could not close cursor", ex.getMessage()); + } + } + } + return true; + } + + private void handleText(CliTerminal terminal, String str) { + terminal.print(str); + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/main/resources/logging.properties b/x-pack/plugin/sql/sql-cli/src/main/resources/logging.properties new file mode 100644 index 0000000000000..cfba3c2935f48 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/resources/logging.properties @@ -0,0 +1,6 @@ +handlers=java.util.logging.ConsoleHandler +.level = WARNING + +# Let jline log information about any failure to setup the terminal properly. +# Without this we have no way of knowing *why* you lose terminal features. +org.jline.level = FINE diff --git a/x-pack/plugin/sql/sql-cli/src/main/resources/logo.txt b/x-pack/plugin/sql/sql-cli/src/main/resources/logo.txt new file mode 100644 index 0000000000000..0229fa9b335b7 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/main/resources/logo.txt @@ -0,0 +1,25 @@ + .sssssss.` .sssssss. + .:sXXXXXXXXXXo` `ohXXXXXXXXXho. + .yXXXXXXXXXXXXXXo` `oXXXXXXXXXXXXXXX- +.XXXXXXXXXXXXXXXXXXo` `oXXXXXXXXXXXXXXXXXX. +.XXXXXXXXXXXXXXXXXXXXo. .oXXXXXXXXXXXXXXXXXXXXh +.XXXXXXXXXXXXXXXXXXXXXXo``oXXXXXXXXXXXXXXXXXXXXXXy +`yXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX. + `oXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXXXXXo` + .XXXXXXXXXXXXXXXXXXXXXXXXXo` + .oXXXXXXXXXXXXXXXXXXXXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXo` `odo` + `oXXXXXXXXXXXXXXXXXXXXXXXXo` `oXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXo` `oXXXXXXXXXo` + `oXXXXXXXXXXXXXXXXXXXXXXXXo` `oXXXXXXXXXXXXXo` +`yXXXXXXXXXXXXXXXXXXXXXXXo` oXXXXXXXXXXXXXXXXX. +.XXXXXXXXXXXXXXXXXXXXXXo` `oXXXXXXXXXXXXXXXXXXXy +.XXXXXXXXXXXXXXXXXXXXo` /XXXXXXXXXXXXXXXXXXXXX +.XXXXXXXXXXXXXXXXXXo` `oXXXXXXXXXXXXXXXXXX- + -XXXXXXXXXXXXXXXo` `oXXXXXXXXXXXXXXXo` + .oXXXXXXXXXXXo` `oXXXXXXXXXXXo. + `.sshXXyso` SQL `.sshXhss.` \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java new file mode 100644 index 0000000000000..2397418256ae9 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliReplTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.cli.command.CliCommand; +import org.elasticsearch.xpack.sql.cli.command.CliSession; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class CliReplTests extends ESTestCase { + + public void testBasicCliFunctionality() throws Exception { + CliTerminal cliTerminal = new TestTerminal( + "test;", + "notest;", + "exit;" + ); + CliSession mockSession = mock(CliSession.class); + CliCommand mockCommand = mock(CliCommand.class); + when(mockCommand.handle(cliTerminal, mockSession, "logo")).thenReturn(true); + when(mockCommand.handle(cliTerminal, mockSession, "test")).thenReturn(true); + when(mockCommand.handle(cliTerminal, mockSession, "notest")).thenReturn(false); + + CliRepl cli = new CliRepl(cliTerminal, mockSession, mockCommand); + cli.execute(); + + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "test"); + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "logo"); + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "notest"); + verifyNoMoreInteractions(mockCommand, mockSession); + } + + + public void testFatalCliExceptionHandling() throws Exception { + CliTerminal cliTerminal = new TestTerminal( + "test;", + "fail;" + ); + + CliSession mockSession = mock(CliSession.class); + CliCommand mockCommand = mock(CliCommand.class); + when(mockCommand.handle(cliTerminal, mockSession, "logo")).thenReturn(true); + when(mockCommand.handle(cliTerminal, mockSession, "test")).thenReturn(true); + when(mockCommand.handle(cliTerminal, mockSession, "fail")).thenThrow(new FatalCliException("die")); + + CliRepl cli = new CliRepl(cliTerminal, mockSession, mockCommand); + expectThrows(FatalCliException.class, cli::execute); + + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "logo"); + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "test"); + verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "fail"); + verifyNoMoreInteractions(mockCommand, mockSession); + } + +} diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java new file mode 100644 index 0000000000000..befcddf9e7d25 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.Build; +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.cli.command.CliSession; +import org.elasticsearch.xpack.sql.client.HttpClient; +import org.elasticsearch.xpack.sql.client.shared.ClientException; +import org.elasticsearch.xpack.sql.client.shared.Version; + +import java.sql.SQLException; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class CliSessionTests extends ESTestCase { + + public void testProperConnection() throws Exception { + HttpClient httpClient = mock(HttpClient.class); + when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.CURRENT, + ClusterName.DEFAULT, UUIDs.randomBase64UUID(), Build.CURRENT)); + CliSession cliSession = new CliSession(httpClient); + cliSession.checkConnection(); + verify(httpClient, times(1)).serverInfo(); + verifyNoMoreInteractions(httpClient); + } + + public void testConnection() throws Exception { + HttpClient httpClient = mock(HttpClient.class); + when(httpClient.serverInfo()).thenThrow(new SQLException("Cannot connect")); + CliSession cliSession = new CliSession(httpClient); + expectThrows(ClientException.class, cliSession::checkConnection); + verify(httpClient, times(1)).serverInfo(); + verifyNoMoreInteractions(httpClient); + } + + public void testWrongServerVersion() throws Exception { + HttpClient httpClient = mock(HttpClient.class); + byte minor; + byte major; + if (randomBoolean()) { + minor = Version.CURRENT.minor; + major = (byte) (Version.CURRENT.major + 1); + } else { + minor = (byte) (Version.CURRENT.minor + 1); + major = Version.CURRENT.major; + + } + when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), + org.elasticsearch.Version.fromString(major + "." + minor + ".23"), + ClusterName.DEFAULT, UUIDs.randomBase64UUID(), Build.CURRENT)); + CliSession cliSession = new CliSession(httpClient); + expectThrows(ClientException.class, cliSession::checkConnection); + verify(httpClient, times(1)).serverInfo(); + verifyNoMoreInteractions(httpClient); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilderTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilderTests.java new file mode 100644 index 0000000000000..69b77931ff0d8 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilderTests.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.cli.UserException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; +import org.elasticsearch.xpack.sql.client.shared.SslConfig; +import java.net.URI; +import java.nio.file.Path; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class ConnectionBuilderTests extends ESTestCase { + + public void testDefaultConnection() throws Exception { + CliTerminal testTerminal = mock(CliTerminal.class); + ConnectionBuilder connectionBuilder = new ConnectionBuilder(testTerminal); + ConnectionConfiguration con = connectionBuilder.buildConnection(null, null); + assertNull(con.authUser()); + assertNull(con.authPass()); + assertEquals("http://localhost:9200/", con.connectionString()); + assertEquals(URI.create("http://localhost:9200/"), con.baseUri()); + assertEquals(30000, con.connectTimeout()); + assertEquals(60000, con.networkTimeout()); + assertEquals(45000, con.pageTimeout()); + assertEquals(90000, con.queryTimeout()); + assertEquals(1000, con.pageSize()); + verifyNoMoreInteractions(testTerminal); + } + + public void testBasicConnection() throws Exception { + CliTerminal testTerminal = mock(CliTerminal.class); + ConnectionBuilder connectionBuilder = new ConnectionBuilder(testTerminal); + ConnectionConfiguration con = connectionBuilder.buildConnection("http://foobar:9242/", null); + assertNull(con.authUser()); + assertNull(con.authPass()); + assertEquals("http://foobar:9242/", con.connectionString()); + assertEquals(URI.create("http://foobar:9242/"), con.baseUri()); + verifyNoMoreInteractions(testTerminal); + } + + public void testUserAndPasswordConnection() throws Exception { + CliTerminal testTerminal = mock(CliTerminal.class); + ConnectionBuilder connectionBuilder = new ConnectionBuilder(testTerminal); + ConnectionConfiguration con = connectionBuilder.buildConnection("http://user:pass@foobar:9242/", null); + assertEquals("user", con.authUser()); + assertEquals("pass", con.authPass()); + assertEquals("http://user:pass@foobar:9242/", con.connectionString()); + assertEquals(URI.create("http://foobar:9242/"), con.baseUri()); + verifyNoMoreInteractions(testTerminal); + } + + public void testAskUserForPassword() throws Exception { + CliTerminal testTerminal = mock(CliTerminal.class); + when(testTerminal.readPassword("password: ")).thenReturn("password"); + ConnectionBuilder connectionBuilder = new ConnectionBuilder(testTerminal); + ConnectionConfiguration con = connectionBuilder.buildConnection("http://user@foobar:9242/", null); + assertEquals("user", con.authUser()); + assertEquals("password", con.authPass()); + assertEquals("http://user@foobar:9242/", con.connectionString()); + assertEquals(URI.create("http://foobar:9242/"), con.baseUri()); + verify(testTerminal, times(1)).readPassword(any()); + verifyNoMoreInteractions(testTerminal); + } + + public void testAskUserForPasswordAndKeystorePassword() throws Exception { + CliTerminal testTerminal = mock(CliTerminal.class); + when(testTerminal.readPassword("keystore password: ")).thenReturn("keystore password"); + when(testTerminal.readPassword("password: ")).thenReturn("password"); + AtomicBoolean called = new AtomicBoolean(false); + ConnectionBuilder connectionBuilder = new ConnectionBuilder(testTerminal) { + @Override + protected void checkIfExists(String name, Path p) { + // Stubbed so we don't need permission to read the file + } + + @Override + protected ConnectionConfiguration newConnectionConfiguration(URI uri, String connectionString, + Properties properties) { + // Stub building the actual configuration because we don't have permission to read the keystore. + assertEquals("true", properties.get(SslConfig.SSL)); + assertEquals("keystore_location", properties.get(SslConfig.SSL_KEYSTORE_LOCATION)); + assertEquals("keystore password", properties.get(SslConfig.SSL_KEYSTORE_PASS)); + assertEquals("keystore_location", properties.get(SslConfig.SSL_TRUSTSTORE_LOCATION)); + assertEquals("keystore password", properties.get(SslConfig.SSL_TRUSTSTORE_PASS)); + + called.set(true); + return null; + } + }; + assertNull(connectionBuilder.buildConnection("https://user@foobar:9242/", "keystore_location")); + assertTrue(called.get()); + verify(testTerminal, times(2)).readPassword(any()); + verifyNoMoreInteractions(testTerminal); + } + + public void testUserGaveUpOnPassword() throws Exception { + CliTerminal testTerminal = mock(CliTerminal.class); + UserException ue = new UserException(randomInt(), randomAlphaOfLength(5)); + when(testTerminal.readPassword("password: ")).thenThrow(ue); + ConnectionBuilder connectionBuilder = new ConnectionBuilder(testTerminal); + UserException actual = expectThrows(UserException.class, () -> + connectionBuilder.buildConnection("http://user@foobar:9242/", null)); + assertSame(actual, ue); + } + + public void testUserGaveUpOnKeystorePassword() throws Exception { + CliTerminal testTerminal = mock(CliTerminal.class); + UserException ue = new UserException(randomInt(), randomAlphaOfLength(5)); + when(testTerminal.readPassword("keystore password: ")).thenThrow(ue); + when(testTerminal.readPassword("password: ")).thenReturn("password"); + ConnectionBuilder connectionBuilder = new ConnectionBuilder(testTerminal) { + @Override + protected void checkIfExists(String name, Path p) { + // Stubbed so we don't need permission to read the file + } + }; + UserException actual = expectThrows(UserException.class, () -> + connectionBuilder.buildConnection("https://user@foobar:9242/", "keystore_location")); + assertSame(actual, ue); + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/JLineTerminalTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/JLineTerminalTests.java new file mode 100644 index 0000000000000..d301fc69db980 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/JLineTerminalTests.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.test.ESTestCase; +import org.jline.reader.EndOfFileException; +import org.jline.reader.LineReader; +import org.jline.reader.UserInterruptException; +import org.jline.terminal.Terminal; +import java.io.IOException; + +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.isNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; + +public class JLineTerminalTests extends ESTestCase { + private final Terminal wrapped = mock(Terminal.class); + private final LineReader reader = mock(LineReader.class); + + public void testDisableMatchBracket() throws IOException { + new JLineTerminal(wrapped, reader, false).close(); + verify(reader).setVariable(LineReader.BLINK_MATCHING_PAREN, 0L); + } + + public void testReadPasswordSuccess() throws IOException, UserException { + String prompt = randomAlphaOfLength(5); + String expected = randomAlphaOfLength(5); + when(reader.readLine(prompt, (char) 0)).thenReturn(expected); + + try (JLineTerminal terminal = new JLineTerminal(wrapped, reader, randomBoolean())) { + String actual = terminal.readPassword(prompt); + + assertEquals(expected, actual); + } + } + + public void testReadPasswordNull() throws IOException { + String prompt = randomAlphaOfLength(5); + /* + * jLine documents readLine as not being able to return null but + * LineReader totally does sometimes. We should interpret that as + * "user hit ctrl-d on the password prompt" because that is similar + * to the situations where this comes up. + */ + when(reader.readLine(prompt, (char) 0)).thenReturn(null); + + try (JLineTerminal terminal = new JLineTerminal(wrapped, reader, randomBoolean())) { + UserException e = expectThrows(UserException.class, () -> terminal.readPassword(prompt)); + assertEquals(ExitCodes.NOPERM, e.exitCode); + assertEquals("password required", e.getMessage()); + } + } + + public void testReadPasswordInterrupted() throws IOException { + String prompt = randomAlphaOfLength(5); + when(reader.readLine(prompt, (char) 0)).thenThrow(new UserInterruptException("")); + + try (JLineTerminal terminal = new JLineTerminal(wrapped, reader, randomBoolean())) { + UserException e = expectThrows(UserException.class, () -> terminal.readPassword(prompt)); + assertEquals(ExitCodes.NOPERM, e.exitCode); + assertEquals("password required", e.getMessage()); + } + } + + public void testReadPasswordClosed() throws IOException { + String prompt = randomAlphaOfLength(5); + when(reader.readLine(prompt, (char) 0)).thenThrow(new EndOfFileException("")); + + try (JLineTerminal terminal = new JLineTerminal(wrapped, reader, randomBoolean())) { + UserException e = expectThrows(UserException.class, () -> terminal.readPassword(prompt)); + assertEquals(ExitCodes.NOPERM, e.exitCode); + assertEquals("password required", e.getMessage()); + } + } + + public void testReadLineSuccess() throws IOException { + String prompt = randomAlphaOfLength(5); + String expected = randomAlphaOfLength(5); + when(reader.readLine(any(String.class))).thenReturn(expected); + + try (JLineTerminal terminal = new JLineTerminal(wrapped, reader, randomBoolean())) { + String actual = terminal.readLine(prompt); + + assertEquals(expected, actual); + } + } + + public void testReadLineInterrupted() throws IOException { + String prompt = randomAlphaOfLength(5); + when(reader.readLine(any(String.class))).thenThrow(new UserInterruptException("")); + + try (JLineTerminal terminal = new JLineTerminal(wrapped, reader, randomBoolean())) { + assertEquals("", terminal.readLine(prompt)); + } + } + + public void testReadLineClosed() throws IOException { + String prompt = randomAlphaOfLength(5); + when(reader.readLine(any(String.class))).thenThrow(new EndOfFileException("")); + + try (JLineTerminal terminal = new JLineTerminal(wrapped, reader, randomBoolean())) { + assertEquals(null, terminal.readLine(prompt)); + } + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/TestTerminal.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/TestTerminal.java new file mode 100644 index 0000000000000..697b62fefbb9e --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/TestTerminal.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Iterator; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestTerminal implements CliTerminal { + + private StringBuilder stringBuilder = new StringBuilder(); + private boolean closed = false; + private Iterator inputLines; + + public TestTerminal(String ... inputLines) { + this.inputLines = Arrays.asList(inputLines).iterator(); + } + + @Override + public LineBuilder line() { + return new LineBuilder() { + + @Override + public LineBuilder text(String text) { + stringBuilder.append(text); + return this; + } + + @Override + public LineBuilder em(String text) { + stringBuilder.append("").append(text).append(""); + return this; + } + + @Override + public LineBuilder error(String text) { + stringBuilder.append("").append(text).append(""); + return this; + } + + @Override + public LineBuilder param(String text) { + stringBuilder.append("").append(text).append(""); + return this; + } + + @Override + public void ln() { + stringBuilder.append("\n"); + } + + @Override + public void end() { + stringBuilder.append(""); + } + }; + } + + @Override + public void print(String text) { + stringBuilder.append(text); + } + + @Override + public void println(String text) { + stringBuilder.append(text); + stringBuilder.append("\n"); + } + + @Override + public void error(String type, String message) { + stringBuilder.append("").append(type).append(" ["); + stringBuilder.append("").append(message).append(""); + stringBuilder.append("]\n"); + } + + @Override + public void println() { + stringBuilder.append("\n"); + } + + @Override + public void clear() { + stringBuilder = new StringBuilder(); + } + + @Override + public void flush() { + stringBuilder.append(""); + } + + @Override + public void printStackTrace(Exception ex) { + stringBuilder.append(""); + } + + @Override + public String readPassword(String prompt) { + return "password"; + } + + @Override + public String readLine(String prompt) { + assertTrue(inputLines.hasNext()); + return inputLines.next(); + } + + @Override + public void close() throws IOException { + assertFalse(closed); + closed = true; + } + + @Override + public String toString() { + return stringBuilder.toString(); + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/VersionTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/VersionTests.java new file mode 100644 index 0000000000000..0f1effc446389 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/VersionTests.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.client.shared.Version; + +public class VersionTests extends ESTestCase { + public void testVersionIsCurrent() { + /* This test will only work properly in gradle because in gradle we run the tests + * using the jar. */ + assertEquals(org.elasticsearch.Version.CURRENT.toString(), Version.CURRENT.version); + assertNotNull(Version.CURRENT.hash); + assertEquals(org.elasticsearch.Version.CURRENT.major, Version.CURRENT.major); + assertEquals(org.elasticsearch.Version.CURRENT.minor, Version.CURRENT.minor); + } + +} diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/BuiltinCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/BuiltinCommandTests.java new file mode 100644 index 0000000000000..b6a54a3a8cf5d --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/BuiltinCommandTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.cli.TestTerminal; +import org.elasticsearch.xpack.sql.client.HttpClient; + +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyNoMoreInteractions; + + +public class BuiltinCommandTests extends ESTestCase { + + public void testInvalidCommand() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + HttpClient httpClient = mock(HttpClient.class); + CliSession cliSession = new CliSession(httpClient); + assertFalse(new ClearScreenCliCommand().handle(testTerminal, cliSession, "something")); + assertFalse(new FetchSeparatorCliCommand().handle(testTerminal, cliSession, "something")); + assertFalse(new FetchSizeCliCommand().handle(testTerminal, cliSession, "something")); + assertFalse(new PrintLogoCommand().handle(testTerminal, cliSession, "something")); + verifyNoMoreInteractions(httpClient); + } + + public void testClearScreen() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + HttpClient httpClient = mock(HttpClient.class); + CliSession cliSession = new CliSession(httpClient); + testTerminal.print("not clean"); + assertTrue(new ClearScreenCliCommand().handle(testTerminal, cliSession, "cls")); + assertEquals("", testTerminal.toString()); + verifyNoMoreInteractions(httpClient); + } + + public void testFetchSeparator() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + HttpClient httpClient = mock(HttpClient.class); + CliSession cliSession = new CliSession(httpClient); + FetchSeparatorCliCommand cliCommand = new FetchSeparatorCliCommand(); + assertFalse(cliCommand.handle(testTerminal, cliSession, "fetch")); + assertEquals("", cliSession.getFetchSeparator()); + + assertTrue(cliCommand.handle(testTerminal, cliSession, "fetch_separator = \"foo\"")); + assertEquals("foo", cliSession.getFetchSeparator()); + assertEquals("fetch separator set to \"foo\"", testTerminal.toString()); + testTerminal.clear(); + + assertTrue(cliCommand.handle(testTerminal, cliSession, "fetch_separator=\"bar\"")); + assertEquals("bar", cliSession.getFetchSeparator()); + assertEquals("fetch separator set to \"bar\"", testTerminal.toString()); + testTerminal.clear(); + + assertTrue(cliCommand.handle(testTerminal, cliSession, "fetch separator=\"baz\"")); + assertEquals("baz", cliSession.getFetchSeparator()); + assertEquals("fetch separator set to \"baz\"", testTerminal.toString()); + verifyNoMoreInteractions(httpClient); + } + + public void testFetchSize() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + HttpClient httpClient = mock(HttpClient.class); + CliSession cliSession = new CliSession(httpClient); + FetchSizeCliCommand cliCommand = new FetchSizeCliCommand(); + assertFalse(cliCommand.handle(testTerminal, cliSession, "fetch")); + assertEquals(1000L, cliSession.getFetchSize()); + + assertTrue(cliCommand.handle(testTerminal, cliSession, "fetch_size = \"foo\"")); + assertEquals(1000L, cliSession.getFetchSize()); + assertEquals("Invalid fetch size [\"foo\"]", testTerminal.toString()); + testTerminal.clear(); + + assertTrue(cliCommand.handle(testTerminal, cliSession, "fetch_size = 10")); + assertEquals(10L, cliSession.getFetchSize()); + assertEquals("fetch size set to 10", testTerminal.toString()); + + testTerminal.clear(); + + assertTrue(cliCommand.handle(testTerminal, cliSession, "fetch_size = -10")); + assertEquals(10L, cliSession.getFetchSize()); + assertEquals("Invalid fetch size [-10]. Must be > 0.", testTerminal.toString()); + verifyNoMoreInteractions(httpClient); + } + + public void testPrintLogo() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + HttpClient httpClient = mock(HttpClient.class); + CliSession cliSession = new CliSession(httpClient); + testTerminal.print("not clean"); + assertTrue(new PrintLogoCommand().handle(testTerminal, cliSession, "logo")); + assertThat(testTerminal.toString(), containsString("SQL")); + verifyNoMoreInteractions(httpClient); + } + +} diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/CliCommandsTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/CliCommandsTests.java new file mode 100644 index 0000000000000..5d1988d5c8dcd --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/CliCommandsTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.cli.TestTerminal; +import org.elasticsearch.xpack.sql.client.HttpClient; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +public class CliCommandsTests extends ESTestCase { + + public void testCliCommands() { + TestTerminal testTerminal = new TestTerminal(); + HttpClient httpClient = mock(HttpClient.class); + CliSession cliSession = new CliSession(httpClient); + CliCommands cliCommands = new CliCommands( + (terminal, session, line) -> line.equals("foo"), + (terminal, session, line) -> line.equals("bar"), + (terminal, session, line) -> line.equals("baz") + ); + + assertTrue(cliCommands.handle(testTerminal, cliSession, "foo")); + assertTrue(cliCommands.handle(testTerminal, cliSession, "bar")); + assertTrue(cliCommands.handle(testTerminal, cliSession, "baz")); + assertFalse(cliCommands.handle(testTerminal, cliSession, "")); + assertFalse(cliCommands.handle(testTerminal, cliSession, "something")); + verifyNoMoreInteractions(httpClient); + } +} diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java new file mode 100644 index 0000000000000..567cd10531d71 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.Build; +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.cli.TestTerminal; +import org.elasticsearch.xpack.sql.client.HttpClient; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class ServerInfoCliCommandTests extends ESTestCase { + + public void testInvalidCommand() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + HttpClient client = mock(HttpClient.class); + CliSession cliSession = new CliSession(client); + ServerInfoCliCommand cliCommand = new ServerInfoCliCommand(); + assertFalse(cliCommand.handle(testTerminal, cliSession, "blah")); + assertEquals(testTerminal.toString(), ""); + verifyNoMoreInteractions(client); + } + + public void testShowInfo() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + HttpClient client = mock(HttpClient.class); + CliSession cliSession = new CliSession(client); + when(client.serverInfo()).thenReturn(new MainResponse("my_node", org.elasticsearch.Version.fromString("1.2.3"), + new ClusterName("my_cluster"), UUIDs.randomBase64UUID(), Build.CURRENT)); + ServerInfoCliCommand cliCommand = new ServerInfoCliCommand(); + assertTrue(cliCommand.handle(testTerminal, cliSession, "info")); + assertEquals(testTerminal.toString(), "Node:my_node Cluster:my_cluster Version:1.2.3\n"); + verify(client, times(1)).serverInfo(); + verifyNoMoreInteractions(client); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java new file mode 100644 index 0000000000000..4385731313aaf --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.cli.command; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.cli.TestTerminal; +import org.elasticsearch.xpack.sql.client.HttpClient; +import org.elasticsearch.xpack.sql.plugin.ColumnInfo; +import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; + +import java.sql.JDBCType; +import java.sql.SQLException; +import java.util.Collections; +import java.util.List; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class ServerQueryCliCommandTests extends ESTestCase { + + public void testExceptionHandling() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + HttpClient client = mock(HttpClient.class); + CliSession cliSession = new CliSession(client); + when(client.queryInit("blah", 1000)).thenThrow(new SQLException("test exception")); + ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); + assertTrue(cliCommand.handle(testTerminal, cliSession, "blah")); + assertEquals("Bad request [test exception]\n", testTerminal.toString()); + verify(client, times(1)).queryInit(eq("blah"), eq(1000)); + verifyNoMoreInteractions(client); + } + + public void testOnePageQuery() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + HttpClient client = mock(HttpClient.class); + CliSession cliSession = new CliSession(client); + cliSession.setFetchSize(10); + when(client.queryInit("test query", 10)).thenReturn(fakeResponse("", true, "foo")); + ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); + assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); + assertEquals(" field \n---------------\nfoo \n", testTerminal.toString()); + verify(client, times(1)).queryInit(eq("test query"), eq(10)); + verifyNoMoreInteractions(client); + } + + public void testThreePageQuery() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + HttpClient client = mock(HttpClient.class); + CliSession cliSession = new CliSession(client); + cliSession.setFetchSize(10); + when(client.queryInit("test query", 10)).thenReturn(fakeResponse("my_cursor1", true, "first")); + when(client.nextPage("my_cursor1")).thenReturn(fakeResponse("my_cursor2", false, "second")); + when(client.nextPage("my_cursor2")).thenReturn(fakeResponse("", false, "third")); + ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); + assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); + assertEquals(" field \n---------------\nfirst \nsecond \nthird \n", + testTerminal.toString()); + verify(client, times(1)).queryInit(eq("test query"), eq(10)); + verify(client, times(2)).nextPage(any()); + verifyNoMoreInteractions(client); + } + + public void testTwoPageQueryWithSeparator() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + HttpClient client = mock(HttpClient.class); + CliSession cliSession = new CliSession(client); + cliSession.setFetchSize(15); + // Set a separator + cliSession.setFetchSeparator("-----"); + when(client.queryInit("test query", 15)).thenReturn(fakeResponse("my_cursor1", true, "first")); + when(client.nextPage("my_cursor1")).thenReturn(fakeResponse("", false, "second")); + ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); + assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); + assertEquals(" field \n---------------\nfirst \n-----\nsecond \n", + testTerminal.toString()); + verify(client, times(1)).queryInit(eq("test query"), eq(15)); + verify(client, times(1)).nextPage(any()); + verifyNoMoreInteractions(client); + } + + public void testCursorCleanupOnError() throws Exception { + TestTerminal testTerminal = new TestTerminal(); + HttpClient client = mock(HttpClient.class); + CliSession cliSession = new CliSession(client); + cliSession.setFetchSize(15); + when(client.queryInit("test query", 15)).thenReturn(fakeResponse("my_cursor1", true, "first")); + when(client.nextPage("my_cursor1")).thenThrow(new SQLException("test exception")); + when(client.queryClose("my_cursor1")).thenReturn(true); + ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); + assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); + assertEquals(" field \n---------------\nfirst \n" + + "Bad request [test exception]\n", testTerminal.toString()); + verify(client, times(1)).queryInit(eq("test query"), eq(15)); + verify(client, times(1)).nextPage(any()); + verify(client, times(1)).queryClose(eq("my_cursor1")); + verifyNoMoreInteractions(client); + } + + private SqlQueryResponse fakeResponse(String cursor, boolean includeColumns, String val) { + List> rows; + List columns; + if (includeColumns) { + columns = Collections.singletonList(new ColumnInfo("", "field", "string", JDBCType.VARCHAR, 0)); + } else { + columns = null; + } + if (val != null) { + rows = Collections.singletonList(Collections.singletonList(val)); + } else { + rows = Collections.singletonList(Collections.emptyList()); + } + return new SqlQueryResponse(cursor, columns, rows); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/package-info.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/package-info.java new file mode 100644 index 0000000000000..9df8484bdc59b --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Unit tests for the Elasticsearch CLI client. + */ +package org.elasticsearch.xpack.sql.cli; diff --git a/x-pack/plugin/sql/sql-proto/build.gradle b/x-pack/plugin/sql/sql-proto/build.gradle new file mode 100644 index 0000000000000..4c58baaaadb63 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/build.gradle @@ -0,0 +1,153 @@ + +/* + * This project is named sql-proto because it is in the + * "org.elasticsearch.plugin" group and it'd be super confusing for it to just + * be called "proto" there. + */ + +import org.elasticsearch.gradle.precommit.PrecommitTasks + +apply plugin: 'elasticsearch.build' + +description = 'Request and response objects shared by the cli, jdbc ' + + 'and the Elasticsearch plugin' + +dependencies { + /* We'd like to just depend on xcontent but there are some bits of + * :server that we rely on.... */ + compile (project(':server')) { + transitive = false + } + compile (project(':libs:elasticsearch-core')) { + transitive = false + } + compile (project(':libs:x-content')) { + transitive = false + } + compile "org.apache.lucene:lucene-core:${versions.lucene}" + compile 'joda-time:joda-time:2.9.9' + runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + runtime "org.apache.logging.log4j:log4j-api:${versions.log4j}" + runtime "org.apache.logging.log4j:log4j-core:${versions.log4j}" + + testCompile "org.elasticsearch.test:framework:${version}" +} + +forbiddenApisMain { + //sql does not depend on server, so only jdk signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +dependencyLicenses { + mapping from: /elasticsearch-core.*/, to: 'elasticsearch' + mapping from: /jackson-.*/, to: 'jackson' + mapping from: /lucene-.*/, to: 'lucene' + ignoreSha 'elasticsearch' + ignoreSha 'elasticsearch-core' +} + +thirdPartyAudit.excludes = [ + 'com.fasterxml.jackson.dataformat.yaml.YAMLFactory', + 'com.fasterxml.jackson.dataformat.yaml.YAMLMapper', + + // from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml) + 'com.fasterxml.jackson.databind.ObjectMapper', + 'org.fusesource.jansi.Ansi', + 'org.fusesource.jansi.AnsiRenderer$Code', + + // from log4j + 'com.conversantmedia.util.concurrent.DisruptorBlockingQueue', + 'com.conversantmedia.util.concurrent.SpinPolicy', + 'com.fasterxml.jackson.annotation.JsonInclude$Include', + 'com.fasterxml.jackson.databind.DeserializationContext', + 'com.fasterxml.jackson.databind.DeserializationFeature', + 'com.fasterxml.jackson.databind.JsonMappingException', + 'com.fasterxml.jackson.databind.JsonNode', + 'com.fasterxml.jackson.databind.Module$SetupContext', + 'com.fasterxml.jackson.databind.ObjectReader', + 'com.fasterxml.jackson.databind.ObjectWriter', + 'com.fasterxml.jackson.databind.SerializerProvider', + 'com.fasterxml.jackson.databind.deser.std.StdDeserializer', + 'com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer', + 'com.fasterxml.jackson.databind.module.SimpleModule', + 'com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter', + 'com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider', + 'com.fasterxml.jackson.databind.ser.std.StdScalarSerializer', + 'com.fasterxml.jackson.databind.ser.std.StdSerializer', + 'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule', + 'com.fasterxml.jackson.dataformat.xml.XmlMapper', + 'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter', + 'com.fasterxml.jackson.databind.node.JsonNodeFactory', + 'com.fasterxml.jackson.databind.node.ObjectNode', + 'com.lmax.disruptor.BlockingWaitStrategy', + 'com.lmax.disruptor.BusySpinWaitStrategy', + 'com.lmax.disruptor.EventFactory', + 'com.lmax.disruptor.EventTranslator', + 'com.lmax.disruptor.EventTranslatorTwoArg', + 'com.lmax.disruptor.EventTranslatorVararg', + 'com.lmax.disruptor.ExceptionHandler', + 'com.lmax.disruptor.LifecycleAware', + 'com.lmax.disruptor.RingBuffer', + 'com.lmax.disruptor.Sequence', + 'com.lmax.disruptor.SequenceReportingEventHandler', + 'com.lmax.disruptor.SleepingWaitStrategy', + 'com.lmax.disruptor.TimeoutBlockingWaitStrategy', + 'com.lmax.disruptor.WaitStrategy', + 'com.lmax.disruptor.YieldingWaitStrategy', + 'com.lmax.disruptor.dsl.Disruptor', + 'com.lmax.disruptor.dsl.ProducerType', + 'javax.jms.Connection', + 'javax.jms.ConnectionFactory', + 'javax.jms.Destination', + 'javax.jms.JMSException', + 'javax.jms.MapMessage', + 'javax.jms.Message', + 'javax.jms.MessageConsumer', + 'javax.jms.MessageProducer', + 'javax.jms.Session', + 'javax.mail.Authenticator', + 'javax.mail.Message$RecipientType', + 'javax.mail.PasswordAuthentication', + 'javax.mail.Session', + 'javax.mail.Transport', + 'javax.mail.internet.InternetAddress', + 'javax.mail.internet.InternetHeaders', + 'javax.mail.internet.MimeBodyPart', + 'javax.mail.internet.MimeMessage', + 'javax.mail.internet.MimeMultipart', + 'javax.mail.internet.MimeUtility', + 'javax.mail.util.ByteArrayDataSource', + 'javax.persistence.AttributeConverter', + 'javax.persistence.EntityManager', + 'javax.persistence.EntityManagerFactory', + 'javax.persistence.EntityTransaction', + 'javax.persistence.Persistence', + 'javax.persistence.PersistenceException', + 'org.apache.commons.compress.compressors.CompressorStreamFactory', + 'org.apache.commons.compress.utils.IOUtils', + 'org.apache.commons.csv.CSVFormat', + 'org.apache.commons.csv.QuoteMode', + 'org.apache.kafka.clients.producer.Callback', + 'org.apache.kafka.clients.producer.KafkaProducer', + 'org.apache.kafka.clients.producer.Producer', + 'org.apache.kafka.clients.producer.ProducerRecord', + 'org.apache.kafka.clients.producer.RecordMetadata', + 'org.codehaus.stax2.XMLStreamWriter2', + 'org.jctools.queues.MessagePassingQueue$Consumer', + 'org.jctools.queues.MpscArrayQueue', + 'org.osgi.framework.AdaptPermission', + 'org.osgi.framework.AdminPermission', + 'org.osgi.framework.Bundle', + 'org.osgi.framework.BundleActivator', + 'org.osgi.framework.BundleContext', + 'org.osgi.framework.BundleEvent', + 'org.osgi.framework.BundleReference', + 'org.osgi.framework.FrameworkUtil', + 'org.osgi.framework.ServiceRegistration', + 'org.osgi.framework.SynchronousBundleListener', + 'org.osgi.framework.wiring.BundleWire', + 'org.osgi.framework.wiring.BundleWiring', + 'org.zeromq.ZMQ$Context', + 'org.zeromq.ZMQ$Socket', + 'org.zeromq.ZMQ' +] diff --git a/x-pack/plugin/sql/sql-proto/licenses/jackson-LICENSE b/x-pack/plugin/sql/sql-proto/licenses/jackson-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/x-pack/plugin/sql/sql-proto/licenses/jackson-NOTICE b/x-pack/plugin/sql/sql-proto/licenses/jackson-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/x-pack/plugin/sql/sql-proto/licenses/jackson-core-2.8.10.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/jackson-core-2.8.10.jar.sha1 new file mode 100644 index 0000000000000..a322d371e265e --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/jackson-core-2.8.10.jar.sha1 @@ -0,0 +1 @@ +eb21a035c66ad307e66ec8fce37f5d50fd62d039 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/joda-time-2.9.9.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/joda-time-2.9.9.jar.sha1 new file mode 100644 index 0000000000000..4009932ea3beb --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/joda-time-2.9.9.jar.sha1 @@ -0,0 +1 @@ +f7b520c458572890807d143670c9b24f4de90897 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/joda-time-LICENSE.txt b/x-pack/plugin/sql/sql-proto/licenses/joda-time-LICENSE.txt new file mode 100644 index 0000000000000..75b52484ea471 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/joda-time-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/sql/sql-proto/licenses/joda-time-NOTICE.txt b/x-pack/plugin/sql/sql-proto/licenses/joda-time-NOTICE.txt new file mode 100644 index 0000000000000..dffbcf31cacf6 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/joda-time-NOTICE.txt @@ -0,0 +1,5 @@ +============================================================================= += NOTICE file corresponding to section 4d of the Apache License Version 2.0 = +============================================================================= +This product includes software developed by +Joda.org (http://www.joda.org/). diff --git a/x-pack/plugin/sql/sql-proto/licenses/log4j-api-2.9.1.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/log4j-api-2.9.1.jar.sha1 new file mode 100644 index 0000000000000..e1a89fadfed95 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/log4j-api-2.9.1.jar.sha1 @@ -0,0 +1 @@ +7a2999229464e7a324aa503c0a52ec0f05efe7bd \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/log4j-api-LICENSE.txt b/x-pack/plugin/sql/sql-proto/licenses/log4j-api-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/log4j-api-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/sql/sql-proto/licenses/log4j-api-NOTICE.txt b/x-pack/plugin/sql/sql-proto/licenses/log4j-api-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/log4j-api-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/log4j-core-2.9.1.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/log4j-core-2.9.1.jar.sha1 new file mode 100644 index 0000000000000..990ea322a7613 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/log4j-core-2.9.1.jar.sha1 @@ -0,0 +1 @@ +c041978c686866ee8534f538c6220238db3bb6be \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/log4j-core-LICENSE.txt b/x-pack/plugin/sql/sql-proto/licenses/log4j-core-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/log4j-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/sql/sql-proto/licenses/log4j-core-NOTICE.txt b/x-pack/plugin/sql/sql-proto/licenses/log4j-core-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/log4j-core-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-LICENSE.txt b/x-pack/plugin/sql/sql-proto/licenses/lucene-LICENSE.txt new file mode 100644 index 0000000000000..28b134f5f8e4d --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/lucene-LICENSE.txt @@ -0,0 +1,475 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from unicode conversion examples available at +http://www.unicode.org/Public/PROGRAMS/CVTUTF. Here is the copyright +from those sources: + +/* + * Copyright 2001-2004 Unicode, Inc. + * + * Disclaimer + * + * This source code is provided as is by Unicode, Inc. No claims are + * made as to fitness for any particular purpose. No warranties of any + * kind are expressed or implied. The recipient agrees to determine + * applicability of information provided. If this file has been + * purchased on magnetic or optical media from Unicode, Inc., the + * sole remedy for any claim will be exchange of defective media + * within 90 days of receipt. + * + * Limitations on Rights to Redistribute This Code + * + * Unicode, Inc. hereby grants the right to freely use the information + * supplied in this file in the creation of products supporting the + * Unicode Standard, and to make copies of this file in any form + * for internal or external distribution as long as this notice + * remains attached. + */ + + +Some code in core/src/java/org/apache/lucene/util/ArrayUtil.java was +derived from Python 2.4.2 sources available at +http://www.python.org. Full license is here: + + http://www.python.org/download/releases/2.4.2/license/ + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from Python 3.1.2 sources available at +http://www.python.org. Full license is here: + + http://www.python.org/download/releases/3.1.2/license/ + +Some code in core/src/java/org/apache/lucene/util/automaton was +derived from Brics automaton sources available at +www.brics.dk/automaton/. Here is the copyright from those sources: + +/* + * Copyright (c) 2001-2009 Anders Moeller + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +The levenshtein automata tables in core/src/java/org/apache/lucene/util/automaton +were automatically generated with the moman/finenight FSA package. +Here is the copyright for those sources: + +# Copyright (c) 2010, Jean-Philippe Barrette-LaPierre, +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +Some code in core/src/java/org/apache/lucene/util/UnicodeUtil.java was +derived from ICU (http://www.icu-project.org) +The full license is available here: + http://source.icu-project.org/repos/icu/icu/trunk/license.html + +/* + * Copyright (C) 1999-2010, International Business Machines + * Corporation and others. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, and/or sell copies of the + * Software, and to permit persons to whom the Software is furnished to do so, + * provided that the above copyright notice(s) and this permission notice appear + * in all copies of the Software and that both the above copyright notice(s) and + * this permission notice appear in supporting documentation. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE + * LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR + * ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER + * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Except as contained in this notice, the name of a copyright holder shall not + * be used in advertising or otherwise to promote the sale, use or other + * dealings in this Software without prior written authorization of the + * copyright holder. + */ + +The following license applies to the Snowball stemmers: + +Copyright (c) 2001, Dr Martin Porter +Copyright (c) 2002, Richard Boulton +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The following license applies to the KStemmer: + +Copyright © 2003, +Center for Intelligent Information Retrieval, +University of Massachusetts, Amherst. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. The names "Center for Intelligent Information Retrieval" and +"University of Massachusetts" must not be used to endorse or promote products +derived from this software without prior written permission. To obtain +permission, contact info@ciir.cs.umass.edu. + +THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +The following license applies to the Morfologik project: + +Copyright (c) 2006 Dawid Weiss +Copyright (c) 2007-2011 Dawid Weiss, Marcin Miłkowski +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Morfologik nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +--- + +The dictionary comes from Morfologik project. Morfologik uses data from +Polish ispell/myspell dictionary hosted at http://www.sjp.pl/slownik/en/ and +is licenced on the terms of (inter alia) LGPL and Creative Commons +ShareAlike. The part-of-speech tags were added in Morfologik project and +are not found in the data from sjp.pl. The tagset is similar to IPI PAN +tagset. + +--- + +The following license applies to the Morfeusz project, +used by org.apache.lucene.analysis.morfologik. + +BSD-licensed dictionary of Polish (SGJP) +http://sgjp.pl/morfeusz/ + +Copyright © 2011 Zygmunt Saloni, Włodzimierz Gruszczyński, + Marcin Woliński, Robert Wołosz + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-NOTICE.txt b/x-pack/plugin/sql/sql-proto/licenses/lucene-NOTICE.txt new file mode 100644 index 0000000000000..1a1d51572432a --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/lucene-NOTICE.txt @@ -0,0 +1,192 @@ +Apache Lucene +Copyright 2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Includes software from other Apache Software Foundation projects, +including, but not limited to: + - Apache Ant + - Apache Jakarta Regexp + - Apache Commons + - Apache Xerces + +ICU4J, (under analysis/icu) is licensed under an MIT styles license +and Copyright (c) 1995-2008 International Business Machines Corporation and others + +Some data files (under analysis/icu/src/data) are derived from Unicode data such +as the Unicode Character Database. See http://unicode.org/copyright.html for more +details. + +Brics Automaton (under core/src/java/org/apache/lucene/util/automaton) is +BSD-licensed, created by Anders Møller. See http://www.brics.dk/automaton/ + +The levenshtein automata tables (under core/src/java/org/apache/lucene/util/automaton) were +automatically generated with the moman/finenight FSA library, created by +Jean-Philippe Barrette-LaPierre. This library is available under an MIT license, +see http://sites.google.com/site/rrettesite/moman and +http://bitbucket.org/jpbarrette/moman/overview/ + +The class org.apache.lucene.util.WeakIdentityMap was derived from +the Apache CXF project and is Apache License 2.0. + +The Google Code Prettify is Apache License 2.0. +See http://code.google.com/p/google-code-prettify/ + +JUnit (junit-4.10) is licensed under the Common Public License v. 1.0 +See http://junit.sourceforge.net/cpl-v10.html + +This product includes code (JaspellTernarySearchTrie) from Java Spelling Checkin +g Package (jaspell): http://jaspell.sourceforge.net/ +License: The BSD License (http://www.opensource.org/licenses/bsd-license.php) + +The snowball stemmers in + analysis/common/src/java/net/sf/snowball +were developed by Martin Porter and Richard Boulton. +The snowball stopword lists in + analysis/common/src/resources/org/apache/lucene/analysis/snowball +were developed by Martin Porter and Richard Boulton. +The full snowball package is available from + http://snowball.tartarus.org/ + +The KStem stemmer in + analysis/common/src/org/apache/lucene/analysis/en +was developed by Bob Krovetz and Sergio Guzman-Lara (CIIR-UMass Amherst) +under the BSD-license. + +The Arabic,Persian,Romanian,Bulgarian, Hindi and Bengali analyzers (common) come with a default +stopword list that is BSD-licensed created by Jacques Savoy. These files reside in: +analysis/common/src/resources/org/apache/lucene/analysis/ar/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/fa/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/ro/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/bg/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/hi/stopwords.txt, +analysis/common/src/resources/org/apache/lucene/analysis/bn/stopwords.txt +See http://members.unine.ch/jacques.savoy/clef/index.html. + +The German,Spanish,Finnish,French,Hungarian,Italian,Portuguese,Russian and Swedish light stemmers +(common) are based on BSD-licensed reference implementations created by Jacques Savoy and +Ljiljana Dolamic. These files reside in: +analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemmer.java +analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemmer.java + +The Stempel analyzer (stempel) includes BSD-licensed software developed +by the Egothor project http://egothor.sf.net/, created by Leo Galambos, Martin Kvapil, +and Edmond Nolan. + +The Polish analyzer (stempel) comes with a default +stopword list that is BSD-licensed created by the Carrot2 project. The file resides +in stempel/src/resources/org/apache/lucene/analysis/pl/stopwords.txt. +See http://project.carrot2.org/license.html. + +The SmartChineseAnalyzer source code (smartcn) was +provided by Xiaoping Gao and copyright 2009 by www.imdict.net. + +WordBreakTestUnicode_*.java (under modules/analysis/common/src/test/) +is derived from Unicode data such as the Unicode Character Database. +See http://unicode.org/copyright.html for more details. + +The Morfologik analyzer (morfologik) includes BSD-licensed software +developed by Dawid Weiss and Marcin Miłkowski (http://morfologik.blogspot.com/). + +Morfologik uses data from Polish ispell/myspell dictionary +(http://www.sjp.pl/slownik/en/) licenced on the terms of (inter alia) +LGPL and Creative Commons ShareAlike. + +Morfologic includes data from BSD-licensed dictionary of Polish (SGJP) +(http://sgjp.pl/morfeusz/) + +Servlet-api.jar and javax.servlet-*.jar are under the CDDL license, the original +source code for this can be found at http://www.eclipse.org/jetty/downloads.php + +=========================================================================== +Kuromoji Japanese Morphological Analyzer - Apache Lucene Integration +=========================================================================== + +This software includes a binary and/or source version of data from + + mecab-ipadic-2.7.0-20070801 + +which can be obtained from + + http://atilika.com/releases/mecab-ipadic/mecab-ipadic-2.7.0-20070801.tar.gz + +or + + http://jaist.dl.sourceforge.net/project/mecab/mecab-ipadic/2.7.0-20070801/mecab-ipadic-2.7.0-20070801.tar.gz + +=========================================================================== +mecab-ipadic-2.7.0-20070801 Notice +=========================================================================== + +Nara Institute of Science and Technology (NAIST), +the copyright holders, disclaims all warranties with regard to this +software, including all implied warranties of merchantability and +fitness, in no event shall NAIST be liable for +any special, indirect or consequential damages or any damages +whatsoever resulting from loss of use, data or profits, whether in an +action of contract, negligence or other tortuous action, arising out +of or in connection with the use or performance of this software. + +A large portion of the dictionary entries +originate from ICOT Free Software. The following conditions for ICOT +Free Software applies to the current dictionary as well. + +Each User may also freely distribute the Program, whether in its +original form or modified, to any third party or parties, PROVIDED +that the provisions of Section 3 ("NO WARRANTY") will ALWAYS appear +on, or be attached to, the Program, which is distributed substantially +in the same form as set out herein and that such intended +distribution, if actually made, will neither violate or otherwise +contravene any of the laws and regulations of the countries having +jurisdiction over the User or the intended distribution itself. + +NO WARRANTY + +The program was produced on an experimental basis in the course of the +research and development conducted during the project and is provided +to users as so produced on an experimental basis. Accordingly, the +program is provided without any warranty whatsoever, whether express, +implied, statutory or otherwise. The term "warranty" used herein +includes, but is not limited to, any warranty of the quality, +performance, merchantability and fitness for a particular purpose of +the program and the nonexistence of any infringement or violation of +any right of any third party. + +Each user of the program will agree and understand, and be deemed to +have agreed and understood, that there is no warranty whatsoever for +the program and, accordingly, the entire risk arising from or +otherwise connected with the program is assumed by the user. + +Therefore, neither ICOT, the copyright holder, or any other +organization that participated in or was otherwise related to the +development of the program and their respective officials, directors, +officers and other employees shall be held liable for any and all +damages, including, without limitation, general, special, incidental +and consequential damages, arising out of or otherwise in connection +with the use or inability to use the program or any product, material +or result produced or otherwise obtained by using the program, +regardless of whether they have been advised of, or otherwise had +knowledge of, the possibility of such damages at any time during the +project or thereafter. Each user will be deemed to have agreed to the +foregoing by his or her commencement of use of the program. The term +"use" as used herein includes, but is not limited to, the use, +modification, copying and distribution of the program and the +production of secondary products from the program. + +In the case where the program, whether in its original form or +modified, was distributed or delivered to or received by a user from +any person, organization or entity other than ICOT, unless it makes or +grants independently of ICOT any specific warranty to the user in +writing, such person, organization or entity, will also be exempted +from and not be held liable to the user for any such damages as noted +above as far as the program is concerned. diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.3.0.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..e12c932b38dd0 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.3.0.jar.sha1 @@ -0,0 +1 @@ +040e2de30c5e6bad868b144e371730200719ceb3 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlQueryRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlQueryRequest.java new file mode 100644 index 0000000000000..8969b88161935 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlQueryRequest.java @@ -0,0 +1,259 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.TimeZone; +import java.util.function.Supplier; + +/** + * Base class for requests that contain sql queries (Query and Translate) + */ +public abstract class AbstractSqlQueryRequest extends AbstractSqlRequest implements CompositeIndicesRequest, ToXContentFragment { + public static final TimeZone DEFAULT_TIME_ZONE = TimeZone.getTimeZone("UTC"); + + /** + * Global choice for the default fetch size. + */ + public static final int DEFAULT_FETCH_SIZE = 1000; + public static final TimeValue DEFAULT_REQUEST_TIMEOUT = TimeValue.timeValueSeconds(90); + public static final TimeValue DEFAULT_PAGE_TIMEOUT = TimeValue.timeValueSeconds(45); + + private String query = ""; + private TimeZone timeZone = DEFAULT_TIME_ZONE; + private int fetchSize = DEFAULT_FETCH_SIZE; + private TimeValue requestTimeout = DEFAULT_REQUEST_TIMEOUT; + private TimeValue pageTimeout = DEFAULT_PAGE_TIMEOUT; + @Nullable + private QueryBuilder filter = null; + private List params = Collections.emptyList(); + + public AbstractSqlQueryRequest() { + super(); + } + + public AbstractSqlQueryRequest(Mode mode, String query, List params, QueryBuilder filter, TimeZone timeZone, + int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout) { + super(mode); + this.query = query; + this.params = params; + this.timeZone = timeZone; + this.fetchSize = fetchSize; + this.requestTimeout = requestTimeout; + this.pageTimeout = pageTimeout; + this.filter = filter; + } + + protected static ObjectParser objectParser(Supplier supplier) { + // TODO: convert this into ConstructingObjectParser + ObjectParser parser = new ObjectParser<>("sql/query", true, supplier); + parser.declareString(AbstractSqlQueryRequest::query, new ParseField("query")); + parser.declareObjectArray(AbstractSqlQueryRequest::params, (p, c) -> SqlTypedParamValue.fromXContent(p), new ParseField("params")); + parser.declareString((request, zoneId) -> request.timeZone(TimeZone.getTimeZone(zoneId)), new ParseField("time_zone")); + parser.declareInt(AbstractSqlQueryRequest::fetchSize, new ParseField("fetch_size")); + parser.declareString( + (request, timeout) -> request.requestTimeout(TimeValue.parseTimeValue(timeout, DEFAULT_REQUEST_TIMEOUT, "request_timeout")), + new ParseField("request_timeout")); + parser.declareString( + (request, timeout) -> request.pageTimeout(TimeValue.parseTimeValue(timeout, DEFAULT_PAGE_TIMEOUT, "page_timeout")), + new ParseField("page_timeout")); + parser.declareObject(AbstractSqlQueryRequest::filter, + (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), new ParseField("filter")); + return parser; + } + + /** + * Text of SQL query + */ + public String query() { + return query; + } + + public AbstractSqlQueryRequest query(String query) { + if (query == null) { + throw new IllegalArgumentException("query may not be null."); + } + this.query = query; + return this; + } + + /** + * An optional list of parameters if the SQL query is parametrized + */ + public List params() { + return params; + } + + public AbstractSqlQueryRequest params(List params) { + if (params == null) { + throw new IllegalArgumentException("params may not be null."); + } + this.params = params; + return this; + } + + /** + * The client's time zone + */ + public TimeZone timeZone() { + return timeZone; + } + + public AbstractSqlQueryRequest timeZone(TimeZone timeZone) { + if (query == null) { + throw new IllegalArgumentException("time zone may not be null."); + } + this.timeZone = timeZone; + return this; + } + + /** + * Hint about how many results to fetch at once. + */ + public int fetchSize() { + return fetchSize; + } + + /** + * Hint about how many results to fetch at once. + */ + public AbstractSqlQueryRequest fetchSize(int fetchSize) { + if (fetchSize <= 0) { + throw new IllegalArgumentException("fetch_size must be more than 0."); + } + this.fetchSize = fetchSize; + return this; + } + + /** + * The timeout specified on the search request + */ + public TimeValue requestTimeout() { + return requestTimeout; + } + + public AbstractSqlQueryRequest requestTimeout(TimeValue requestTimeout) { + this.requestTimeout = requestTimeout; + return this; + } + + /** + * The scroll timeout + */ + public TimeValue pageTimeout() { + return pageTimeout; + } + + public AbstractSqlQueryRequest pageTimeout(TimeValue pageTimeout) { + this.pageTimeout = pageTimeout; + return this; + } + + /** + * An optional Query DSL defined query that can added as a filter on the top of the SQL query + */ + public AbstractSqlQueryRequest filter(QueryBuilder filter) { + this.filter = filter; + return this; + } + + /** + * An optional Query DSL defined query that can added as a filter on the top of the SQL query + */ + public QueryBuilder filter() { + return filter; + } + + public AbstractSqlQueryRequest(StreamInput in) throws IOException { + super(in); + query = in.readString(); + params = in.readList(SqlTypedParamValue::new); + timeZone = TimeZone.getTimeZone(in.readString()); + fetchSize = in.readVInt(); + requestTimeout = in.readTimeValue(); + pageTimeout = in.readTimeValue(); + filter = in.readOptionalNamedWriteable(QueryBuilder.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(query); + out.writeList(params); + out.writeString(timeZone.getID()); + out.writeVInt(fetchSize); + out.writeTimeValue(requestTimeout); + out.writeTimeValue(pageTimeout); + out.writeOptionalNamedWriteable(filter); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + AbstractSqlQueryRequest that = (AbstractSqlQueryRequest) o; + return fetchSize == that.fetchSize && + Objects.equals(query, that.query) && + Objects.equals(params, that.params) && + Objects.equals(timeZone, that.timeZone) && + Objects.equals(requestTimeout, that.requestTimeout) && + Objects.equals(pageTimeout, that.pageTimeout) && + Objects.equals(filter, that.filter); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), query, timeZone, fetchSize, requestTimeout, pageTimeout, filter); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (query != null) { + builder.field("query", query); + } + if (this.params.isEmpty() == false) { + builder.startArray("params"); + for (SqlTypedParamValue val : this.params) { + val.toXContent(builder, params); + } + builder.endArray(); + } + if (timeZone != null) { + builder.field("time_zone", timeZone.getID()); + } + if (fetchSize != DEFAULT_FETCH_SIZE) { + builder.field("fetch_size", fetchSize); + } + if (requestTimeout != DEFAULT_REQUEST_TIMEOUT) { + builder.field("request_timeout", requestTimeout.getStringRep()); + } + if (pageTimeout != DEFAULT_PAGE_TIMEOUT) { + builder.field("page_timeout", pageTimeout.getStringRep()); + } + if (filter != null) { + builder.field("filter"); + filter.toXContent(builder, params); + } + return builder; + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlRequest.java new file mode 100644 index 0000000000000..bc4b1e81e44b3 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlRequest.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Base request for all SQL-related requests. + *

+ * Contains information about the client mode that can be used to generate different responses based on the caller type. + */ +public abstract class AbstractSqlRequest extends ActionRequest implements ToXContent { + + public enum Mode { + PLAIN, + JDBC; + + public static Mode fromString(String mode) { + if (mode == null) { + return PLAIN; + } + return Mode.valueOf(mode.toUpperCase(Locale.ROOT)); + } + + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } + } + + private Mode mode = Mode.PLAIN; + + protected AbstractSqlRequest() { + + } + + protected AbstractSqlRequest(Mode mode) { + this.mode = mode; + } + + protected AbstractSqlRequest(StreamInput in) throws IOException { + super(in); + mode = in.readEnum(Mode.class); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (mode == null) { + validationException = addValidationError("[mode] is required", validationException); + } + return validationException; + } + + @Override + public final void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeEnum(mode); + } + + public Mode mode() { + return mode; + } + + public void mode(Mode mode) { + this.mode = mode; + } + + public void mode(String mode) { + this.mode = Mode.fromString(mode); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AbstractSqlRequest that = (AbstractSqlRequest) o; + return mode == that.mode; + } + + @Override + public int hashCode() { + return Objects.hash(mode); + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java new file mode 100644 index 0000000000000..9d9a9ea04a487 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * Formats {@link SqlQueryResponse} for the CLI. {@linkplain Writeable} so + * that its state can be saved between pages of results. + */ +public class CliFormatter implements Writeable { + /** + * The minimum width for any column in the formatted results. + */ + private static final int MIN_COLUMN_WIDTH = 15; + + private int[] width; + + /** + * Create a new {@linkplain CliFormatter} for formatting responses similar + * to the provided {@link SqlQueryResponse}. + */ + public CliFormatter(SqlQueryResponse response) { + // Figure out the column widths: + // 1. Start with the widths of the column names + width = new int[response.columns().size()]; + for (int i = 0; i < width.length; i++) { + // TODO read the width from the data type? + width[i] = Math.max(MIN_COLUMN_WIDTH, response.columns().get(i).name().length()); + } + + // 2. Expand columns to fit the largest value + for (List row : response.rows()) { + for (int i = 0; i < width.length; i++) { + // TODO are we sure toString is correct here? What about dates that come back as longs. + // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 + width[i] = Math.max(width[i], Objects.toString(row.get(i)).length()); + } + } + } + + public CliFormatter(StreamInput in) throws IOException { + width = in.readIntArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeIntArray(width); + } + + /** + * Format the provided {@linkplain SqlQueryResponse} for the CLI + * including the header lines. + */ + public String formatWithHeader(SqlQueryResponse response) { + // The header lines + StringBuilder sb = new StringBuilder(estimateSize(response.rows().size() + 2)); + for (int i = 0; i < width.length; i++) { + if (i > 0) { + sb.append('|'); + } + + String name = response.columns().get(i).name(); + // left padding + int leftPadding = (width[i] - name.length()) / 2; + for (int j = 0; j < leftPadding; j++) { + sb.append(' '); + } + sb.append(name); + // right padding + for (int j = 0; j < width[i] - name.length() - leftPadding; j++) { + sb.append(' '); + } + } + sb.append('\n'); + + for (int i = 0; i < width.length; i++) { + if (i > 0) { + sb.append('+'); + } + for (int j = 0; j < width[i]; j++) { + sb.append('-'); // emdash creates issues + } + } + sb.append('\n'); + + + /* Now format the results. Sadly, this means that column + * widths are entirely determined by the first batch of + * results. */ + return formatWithoutHeader(sb, response); + } + + /** + * Format the provided {@linkplain SqlQueryResponse} for the CLI + * without the header lines. + */ + public String formatWithoutHeader(SqlQueryResponse response) { + return formatWithoutHeader(new StringBuilder(estimateSize(response.rows().size())), response); + } + + private String formatWithoutHeader(StringBuilder sb, SqlQueryResponse response) { + for (List row : response.rows()) { + for (int i = 0; i < width.length; i++) { + if (i > 0) { + sb.append('|'); + } + + // TODO are we sure toString is correct here? What about dates that come back as longs. + // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 + String string = Objects.toString(row.get(i)); + if (string.length() <= width[i]) { + // Pad + sb.append(string); + int padding = width[i] - string.length(); + for (int p = 0; p < padding; p++) { + sb.append(' '); + } + } else { + // Trim + sb.append(string.substring(0, width[i] - 1)); + sb.append('~'); + } + } + sb.append('\n'); + } + return sb.toString(); + } + + /** + * Pick a good estimate of the buffer size needed to contain the rows. + */ + int estimateSize(int rows) { + /* Each column has either a '|' or a '\n' after it + * so initialize size to number of columns then add + * up the actual widths of each column. */ + int rowWidthEstimate = width.length; + for (int w : width) { + rowWidthEstimate += w; + } + return rowWidthEstimate * rows; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CliFormatter that = (CliFormatter) o; + return Arrays.equals(width, that.width); + } + + @Override + public int hashCode() { + return Arrays.hashCode(width); + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/ColumnInfo.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/ColumnInfo.java new file mode 100644 index 0000000000000..5c12c776dd198 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/ColumnInfo.java @@ -0,0 +1,181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.sql.JDBCType; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Information about a column returned with first query response + */ +public final class ColumnInfo implements Writeable, ToXContentObject { + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("column_info", true, objects -> + new ColumnInfo( + objects[0] == null ? "" : (String) objects[0], + (String) objects[1], + (String) objects[2], + objects[3] == null ? null : JDBCType.valueOf((int) objects[3]), + objects[4] == null ? 0 : (int) objects[4])); + + private static final ParseField TABLE = new ParseField("table"); + private static final ParseField NAME = new ParseField("name"); + private static final ParseField ES_TYPE = new ParseField("type"); + private static final ParseField JDBC_TYPE = new ParseField("jdbc_type"); + private static final ParseField DISPLAY_SIZE = new ParseField("display_size"); + + static { + PARSER.declareString(optionalConstructorArg(), TABLE); + PARSER.declareString(constructorArg(), NAME); + PARSER.declareString(constructorArg(), ES_TYPE); + PARSER.declareInt(optionalConstructorArg(), JDBC_TYPE); + PARSER.declareInt(optionalConstructorArg(), DISPLAY_SIZE); + } + + private final String table; + private final String name; + private final String esType; + @Nullable + private final JDBCType jdbcType; + private final int displaySize; + + public ColumnInfo(String table, String name, String esType, JDBCType jdbcType, int displaySize) { + this.table = table; + this.name = name; + this.esType = esType; + this.jdbcType = jdbcType; + this.displaySize = displaySize; + } + + public ColumnInfo(String table, String name, String esType) { + this.table = table; + this.name = name; + this.esType = esType; + this.jdbcType = null; + this.displaySize = 0; + } + + ColumnInfo(StreamInput in) throws IOException { + table = in.readString(); + name = in.readString(); + esType = in.readString(); + if (in.readBoolean()) { + jdbcType = JDBCType.valueOf(in.readVInt()); + displaySize = in.readVInt(); + } else { + jdbcType = null; + displaySize = 0; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(table); + out.writeString(name); + out.writeString(esType); + if (jdbcType != null) { + out.writeBoolean(true); + out.writeVInt(jdbcType.getVendorTypeNumber()); + out.writeVInt(displaySize); + } else { + out.writeBoolean(false); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (Strings.hasText(table)) { + builder.field("table", table); + } + builder.field("name", name); + builder.field("type", esType); + if (jdbcType != null) { + builder.field("jdbc_type", jdbcType.getVendorTypeNumber()); + builder.field("display_size", displaySize); + } + return builder.endObject(); + } + + + public static ColumnInfo fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + /** + * Name of the table. + */ + public String table() { + return table; + } + + /** + * Name of the column. + */ + public String name() { + return name; + } + + /** + * The type of the column in Elasticsearch. + */ + public String esType() { + return esType; + } + + /** + * The type of the column as it would be returned by a JDBC driver. + */ + public JDBCType jdbcType() { + return jdbcType; + } + + /** + * Used by JDBC + */ + public int displaySize() { + return displaySize; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ColumnInfo that = (ColumnInfo) o; + return displaySize == that.displaySize && + Objects.equals(table, that.table) && + Objects.equals(name, that.name) && + Objects.equals(esType, that.esType) && + jdbcType == that.jdbcType; + } + + @Override + public int hashCode() { + + return Objects.hash(table, name, esType, jdbcType, displaySize); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/MetaColumnInfo.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/MetaColumnInfo.java new file mode 100644 index 0000000000000..72d5932f51137 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/MetaColumnInfo.java @@ -0,0 +1,191 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.sql.JDBCType; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Information about a column returned by the listColumns response + */ +public class MetaColumnInfo implements Writeable, ToXContentObject { + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("column_info", true, objects -> + new MetaColumnInfo( + (String) objects[0], + (String) objects[1], + (String) objects[2], + objects[3] == null ? null : JDBCType.valueOf((int) objects[3]), + objects[4] == null ? 0 : (int) objects[4], + (int) objects[5])); + + private static final ParseField TABLE = new ParseField("table"); + private static final ParseField NAME = new ParseField("name"); + private static final ParseField ES_TYPE = new ParseField("type"); + private static final ParseField JDBC_TYPE = new ParseField("jdbc_type"); + private static final ParseField SIZE = new ParseField("size"); + private static final ParseField POSITION = new ParseField("position"); + + static { + PARSER.declareString(constructorArg(), TABLE); + PARSER.declareString(constructorArg(), NAME); + PARSER.declareString(constructorArg(), ES_TYPE); + PARSER.declareInt(optionalConstructorArg(), JDBC_TYPE); + PARSER.declareInt(optionalConstructorArg(), SIZE); + PARSER.declareInt(constructorArg(), POSITION); + } + + private final String table; + private final String name; + private final String esType; + @Nullable + private final JDBCType jdbcType; + private final int size; + private final int position; + + public MetaColumnInfo(String table, String name, String esType, JDBCType jdbcType, int size, int position) { + this.table = table; + this.name = name; + this.esType = esType; + this.jdbcType = jdbcType; + this.size = size; + this.position = position; + } + + public MetaColumnInfo(String table, String name, String esType, int position) { + this(table, name, esType, null, 0, position); + } + + MetaColumnInfo(StreamInput in) throws IOException { + table = in.readString(); + name = in.readString(); + esType = in.readString(); + if (in.readBoolean()) { + jdbcType = JDBCType.valueOf(in.readVInt()); + size = in.readVInt(); + } else { + jdbcType = null; + size = 0; + } + position = in.readVInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(table); + out.writeString(name); + out.writeString(esType); + if (jdbcType != null) { + out.writeBoolean(true); + out.writeVInt(jdbcType.getVendorTypeNumber()); + out.writeVInt(size); + } else { + out.writeBoolean(false); + } + out.writeVInt(position); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("table", table); + builder.field("name", name); + builder.field("type", esType); + if (jdbcType != null) { + builder.field("jdbc_type", jdbcType.getVendorTypeNumber()); + builder.field("size", size); + } + builder.field("position", position); + return builder.endObject(); + } + + + public static MetaColumnInfo fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + /** + * Name of the table. + */ + public String table() { + return table; + } + + /** + * Name of the column. + */ + public String name() { + return name; + } + + /** + * The type of the column in Elasticsearch. + */ + public String esType() { + return esType; + } + + /** + * The type of the column as it would be returned by a JDBC driver. + */ + public JDBCType jdbcType() { + return jdbcType; + } + + /** + * Precision + */ + public int size() { + return size; + } + + /** + * Column position with in the tables + */ + public int position() { + return position; + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + MetaColumnInfo that = (MetaColumnInfo) o; + return size == that.size && + position == that.position && + Objects.equals(table, that.table) && + Objects.equals(name, that.name) && + Objects.equals(esType, that.esType) && + jdbcType == that.jdbcType; + } + + @Override + public int hashCode() { + return Objects.hash(table, name, esType, jdbcType, size, position); + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java new file mode 100644 index 0000000000000..ed64fa2a41e57 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class SqlClearCursorAction + extends Action { + + public static final SqlClearCursorAction INSTANCE = new SqlClearCursorAction(); + public static final String NAME = "indices:data/read/sql/close_cursor"; + public static final String REST_ENDPOINT = "/_xpack/sql/close"; + + private SqlClearCursorAction() { + super(NAME); + } + + @Override + public SqlClearCursorRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new SqlClearCursorRequestBuilder(client, this); + } + + @Override + public SqlClearCursorResponse newResponse() { + return new SqlClearCursorResponse(); + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequest.java new file mode 100644 index 0000000000000..0dfb9f71e38f1 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequest.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Request to clean all SQL resources associated with the cursor + */ +public class SqlClearCursorRequest extends AbstractSqlRequest implements ToXContentObject { + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(SqlClearCursorAction.NAME, true, (objects, mode) -> new SqlClearCursorRequest( + mode, + (String) objects[0] + )); + + static { + PARSER.declareString(constructorArg(), new ParseField("cursor")); + } + + private String cursor; + + public SqlClearCursorRequest() { + + } + + public SqlClearCursorRequest(Mode mode, String cursor) { + super(mode); + this.cursor = cursor; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + if (getCursor() == null) { + validationException = addValidationError("cursor is required", validationException); + } + return validationException; + } + + public String getCursor() { + return cursor; + } + + public SqlClearCursorRequest setCursor(String cursor) { + this.cursor = cursor; + return this; + } + + @Override + public String getDescription() { + return "SQL Clean cursor [" + getCursor() + "]"; + } + + public SqlClearCursorRequest(StreamInput in) throws IOException { + super(in); + cursor = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(cursor); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + SqlClearCursorRequest that = (SqlClearCursorRequest) o; + return Objects.equals(cursor, that.cursor); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), cursor); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("cursor", cursor); + builder.endObject(); + return builder; + } + + public static SqlClearCursorRequest fromXContent(XContentParser parser, Mode mode) { + return PARSER.apply(parser, mode); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestBuilder.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestBuilder.java new file mode 100644 index 0000000000000..0767f02587c31 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestBuilder.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class SqlClearCursorRequestBuilder extends + ActionRequestBuilder { + + public SqlClearCursorRequestBuilder(ElasticsearchClient client, SqlClearCursorAction action) { + super(client, action, new SqlClearCursorRequest()); + } + + public SqlClearCursorRequestBuilder cursor(String cursor) { + request.setCursor(cursor); + return this; + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponse.java new file mode 100644 index 0000000000000..b157d65dfff84 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponse.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.rest.RestStatus.NOT_FOUND; +import static org.elasticsearch.rest.RestStatus.OK; + +/** + * Response to the request to clean all SQL resources associated with the cursor + */ +public class SqlClearCursorResponse extends ActionResponse implements StatusToXContentObject { + + private static final ParseField SUCCEEDED = new ParseField("succeeded"); + public static final ObjectParser PARSER = + new ObjectParser<>(SqlClearCursorAction.NAME, true, SqlClearCursorResponse::new); + static { + PARSER.declareBoolean(SqlClearCursorResponse::setSucceeded, SUCCEEDED); + } + + + private boolean succeeded; + + public SqlClearCursorResponse(boolean succeeded) { + this.succeeded = succeeded; + } + + SqlClearCursorResponse() { + } + + /** + * @return Whether the attempt to clear a cursor was successful. + */ + public boolean isSucceeded() { + return succeeded; + } + + public SqlClearCursorResponse setSucceeded(boolean succeeded) { + this.succeeded = succeeded; + return this; + } + + @Override + public RestStatus status() { + return succeeded ? NOT_FOUND : OK; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(SUCCEEDED.getPreferredName(), succeeded); + builder.endObject(); + return builder; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + succeeded = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(succeeded); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SqlClearCursorResponse response = (SqlClearCursorResponse) o; + return succeeded == response.succeeded; + } + + @Override + public int hashCode() { + return Objects.hash(succeeded); + } + + public static SqlClearCursorResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryAction.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryAction.java new file mode 100644 index 0000000000000..fd46799608c73 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryAction.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class SqlQueryAction extends Action { + + public static final SqlQueryAction INSTANCE = new SqlQueryAction(); + public static final String NAME = "indices:data/read/sql"; + public static final String REST_ENDPOINT = "/_xpack/sql"; + + private SqlQueryAction() { + super(NAME); + } + + @Override + public SqlQueryRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new SqlQueryRequestBuilder(client, this); + } + + @Override + public SqlQueryResponse newResponse() { + return new SqlQueryResponse(); + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequest.java new file mode 100644 index 0000000000000..5d2215e2a4e13 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequest.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.TimeZone; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request to perform an sql query + */ +public class SqlQueryRequest extends AbstractSqlQueryRequest implements ToXContentObject { + private static final ObjectParser PARSER = objectParser(SqlQueryRequest::new); + + public static final ParseField CURSOR = new ParseField("cursor"); + public static final ParseField FILTER = new ParseField("filter"); + + static { + PARSER.declareString(SqlQueryRequest::cursor, CURSOR); + PARSER.declareObject(SqlQueryRequest::filter, + (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), FILTER); + } + + private String cursor = ""; + + public SqlQueryRequest() { + } + + public SqlQueryRequest(Mode mode, String query, List params, QueryBuilder filter, TimeZone timeZone, + int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout, String cursor) { + super(mode, query, params, filter, timeZone, fetchSize, requestTimeout, pageTimeout); + this.cursor = cursor; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if ((false == Strings.hasText(query())) && Strings.hasText(cursor) == false) { + validationException = addValidationError("one of [query] or [cursor] is required", validationException); + } + return validationException; + } + + /** + * The key that must be sent back to SQL to access the next page of + * results. + */ + public String cursor() { + return cursor; + } + + /** + * The key that must be sent back to SQL to access the next page of + * results. + */ + public SqlQueryRequest cursor(String cursor) { + if (cursor == null) { + throw new IllegalArgumentException("cursor may not be null."); + } + this.cursor = cursor; + return this; + } + + public SqlQueryRequest(StreamInput in) throws IOException { + super(in); + cursor = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(cursor); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), cursor); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(cursor, ((SqlQueryRequest) obj).cursor); + } + + @Override + public String getDescription() { + return "SQL [" + query() + "][" + filter() + "]"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + super.toXContent(builder, params); + if (cursor != null) { + builder.field("cursor", cursor); + } + builder.endObject(); + return builder; + } + + @Override + public boolean isFragment() { + return false; + } + + public static SqlQueryRequest fromXContent(XContentParser parser, Mode mode) { + SqlQueryRequest request = PARSER.apply(parser, null); + request.mode(mode); + return request; + } + +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestBuilder.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestBuilder.java new file mode 100644 index 0000000000000..a08af6f8ce4b0 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestBuilder.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest.Mode; + +import java.util.Collections; +import java.util.List; +import java.util.TimeZone; + +import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE; +import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_PAGE_TIMEOUT; +import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_REQUEST_TIMEOUT; +import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_TIME_ZONE; + +/** + * The builder to build sql request + */ +public class SqlQueryRequestBuilder extends ActionRequestBuilder { + + public SqlQueryRequestBuilder(ElasticsearchClient client, SqlQueryAction action) { + this(client, action, "", Collections.emptyList(), null, DEFAULT_TIME_ZONE, DEFAULT_FETCH_SIZE, DEFAULT_REQUEST_TIMEOUT, + DEFAULT_PAGE_TIMEOUT, "", Mode.PLAIN); + } + + public SqlQueryRequestBuilder(ElasticsearchClient client, SqlQueryAction action, String query, List params, + QueryBuilder filter, TimeZone timeZone, int fetchSize, TimeValue requestTimeout, + TimeValue pageTimeout, String nextPageInfo, Mode mode) { + super(client, action, new SqlQueryRequest(mode, query, params, filter, timeZone, fetchSize, requestTimeout, pageTimeout, + nextPageInfo)); + } + + public SqlQueryRequestBuilder query(String query) { + request.query(query); + return this; + } + + public SqlQueryRequestBuilder mode(String mode) { + request.mode(mode); + return this; + } + + public SqlQueryRequestBuilder mode(Mode mode) { + request.mode(mode); + return this; + } + + public SqlQueryRequestBuilder cursor(String cursor) { + request.cursor(cursor); + return this; + } + + public SqlQueryRequestBuilder filter(QueryBuilder filter) { + request.filter(filter); + return this; + } + + public SqlQueryRequestBuilder timeZone(TimeZone timeZone) { + request.timeZone(timeZone); + return this; + } + + public SqlQueryRequestBuilder requestTimeout(TimeValue timeout) { + request.requestTimeout(timeout); + return this; + } + + public SqlQueryRequestBuilder pageTimeout(TimeValue timeout) { + request.pageTimeout(timeout); + return this; + } + + public SqlQueryRequestBuilder fetchSize(int fetchSize) { + request.fetchSize(fetchSize); + return this; + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponse.java new file mode 100644 index 0000000000000..e0de05cd77438 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponse.java @@ -0,0 +1,249 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.joda.time.ReadableDateTime; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.unmodifiableList; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsValue; + +/** + * Response to perform an sql query + */ +public class SqlQueryResponse extends ActionResponse implements ToXContentObject { + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("sql", true, + objects -> new SqlQueryResponse( + objects[0] == null ? "" : (String) objects[0], + (List) objects[1], + (List>) objects[2])); + + public static final ParseField CURSOR = new ParseField("cursor"); + public static final ParseField COLUMNS = new ParseField("columns"); + public static final ParseField ROWS = new ParseField("rows"); + + static { + PARSER.declareString(optionalConstructorArg(), CURSOR); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ColumnInfo.fromXContent(p), COLUMNS); + PARSER.declareField(constructorArg(), (p, c) -> parseRows(p), ROWS, ValueType.OBJECT_ARRAY); + } + + // TODO: Simplify cursor handling + private String cursor; + private List columns; + // TODO investigate reusing Page here - it probably is much more efficient + private List> rows; + + public SqlQueryResponse() { + } + + public SqlQueryResponse(String cursor, @Nullable List columns, List> rows) { + this.cursor = cursor; + this.columns = columns; + this.rows = rows; + } + + /** + * The key that must be sent back to SQL to access the next page of + * results. If equal to "" then there is no next page. + */ + public String cursor() { + return cursor; + } + + public long size() { + return rows.size(); + } + + public List columns() { + return columns; + } + + public List> rows() { + return rows; + } + + public SqlQueryResponse cursor(String cursor) { + this.cursor = cursor; + return this; + } + + public SqlQueryResponse columns(List columns) { + this.columns = columns; + return this; + } + + public SqlQueryResponse rows(List> rows) { + this.rows = rows; + return this; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + cursor = in.readString(); + if (in.readBoolean()) { + // We might have rows without columns and we might have columns without rows + // So we send the column size twice, just to keep the protocol simple + int columnCount = in.readVInt(); + List columns = new ArrayList<>(columnCount); + for (int c = 0; c < columnCount; c++) { + columns.add(new ColumnInfo(in)); + } + this.columns = unmodifiableList(columns); + } else { + this.columns = null; + } + int rowCount = in.readVInt(); + List> rows = new ArrayList<>(rowCount); + if (rowCount > 0) { + int columnCount = in.readVInt(); + for (int r = 0; r < rowCount; r++) { + List row = new ArrayList<>(columnCount); + for (int c = 0; c < columnCount; c++) { + row.add(in.readGenericValue()); + } + rows.add(unmodifiableList(row)); + } + } + this.rows = unmodifiableList(rows); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(cursor); + if (columns == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(columns.size()); + for (ColumnInfo column : columns) { + column.writeTo(out); + } + } + out.writeVInt(rows.size()); + if (rows.size() > 0) { + out.writeVInt(rows.get(0).size()); + for (List row : rows) { + for (Object value : row) { + out.writeGenericValue(value); + } + } + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + AbstractSqlRequest.Mode mode = AbstractSqlRequest.Mode.fromString(params.param("mode")); + builder.startObject(); + { + if (columns != null) { + builder.startArray("columns"); + { + for (ColumnInfo column : columns) { + column.toXContent(builder, params); + } + } + builder.endArray(); + } + builder.startArray("rows"); + for (List row : rows()) { + builder.startArray(); + for (Object value : row) { + value(builder, mode, value); + } + builder.endArray(); + } + builder.endArray(); + + if (cursor.equals("") == false) { + builder.field(SqlQueryRequest.CURSOR.getPreferredName(), cursor); + } + } + return builder.endObject(); + } + + /** + * Serializes the provided value in SQL-compatible way based on the client mode + */ + public static XContentBuilder value(XContentBuilder builder, AbstractSqlRequest.Mode mode, Object value) throws IOException { + if (mode == AbstractSqlRequest.Mode.JDBC && value instanceof ReadableDateTime) { + // JDBC cannot parse dates in string format + builder.value(((ReadableDateTime) value).getMillis()); + } else { + builder.value(value); + } + return builder; + } + + public static SqlQueryResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public static List> parseRows(XContentParser parser) throws IOException { + List> list = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + list.add(parseRow(parser)); + } else { + throw new IllegalStateException("expected start array but got [" + parser.currentToken() + "]"); + } + } + return list; + } + + public static List parseRow(XContentParser parser) throws IOException { + List list = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + if (parser.currentToken().isValue()) { + list.add(parseFieldsValue(parser)); + } else if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { + list.add(null); + } else { + throw new IllegalStateException("expected value but got [" + parser.currentToken() + "]"); + } + } + return list; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SqlQueryResponse that = (SqlQueryResponse) o; + return Objects.equals(cursor, that.cursor) && + Objects.equals(columns, that.columns) && + Objects.equals(rows, that.rows); + } + + @Override + public int hashCode() { + return Objects.hash(cursor, columns, rows); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateAction.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateAction.java new file mode 100644 index 0000000000000..9bfee2d1c3433 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateAction.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Sql action for translating SQL queries into ES requests + */ +public class SqlTranslateAction extends Action { + + public static final SqlTranslateAction INSTANCE = new SqlTranslateAction(); + public static final String NAME = "indices:data/read/sql/translate"; + + private SqlTranslateAction() { + super(NAME); + } + + @Override + public SqlTranslateRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new SqlTranslateRequestBuilder(client, this); + } + + @Override + public SqlTranslateResponse newResponse() { + return new SqlTranslateResponse(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequest.java new file mode 100644 index 0000000000000..93e0630745100 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequest.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.TimeZone; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request for the sql action for translating SQL queries into ES requests + */ +public class SqlTranslateRequest extends AbstractSqlQueryRequest { + private static final ObjectParser PARSER = objectParser(SqlTranslateRequest::new); + + public SqlTranslateRequest() { + } + + public SqlTranslateRequest(Mode mode, String query, List params, QueryBuilder filter, TimeZone timeZone, + int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout) { + super(mode, query, params, filter, timeZone, fetchSize, requestTimeout, pageTimeout); + } + + public SqlTranslateRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if ((false == Strings.hasText(query()))) { + validationException = addValidationError("query is required", validationException); + } + return validationException; + } + + @Override + public String getDescription() { + return "SQL Translate [" + query() + "][" + filter() + "]"; + } + + public static SqlTranslateRequest fromXContent(XContentParser parser, Mode mode) { + SqlTranslateRequest request = PARSER.apply(parser, null); + request.mode(mode); + return request; + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestBuilder.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestBuilder.java new file mode 100644 index 0000000000000..11adc975014ca --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestBuilder.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; + +import java.util.Collections; +import java.util.List; +import java.util.TimeZone; + +import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE; +import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_PAGE_TIMEOUT; +import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_REQUEST_TIMEOUT; +import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_TIME_ZONE; + +/** + * Builder for the request for the sql action for translating SQL queries into ES requests + */ +public class SqlTranslateRequestBuilder extends ActionRequestBuilder { + public SqlTranslateRequestBuilder(ElasticsearchClient client, SqlTranslateAction action) { + this(client, action, AbstractSqlRequest.Mode.PLAIN, null, null, Collections.emptyList(), DEFAULT_TIME_ZONE, DEFAULT_FETCH_SIZE, + DEFAULT_REQUEST_TIMEOUT, DEFAULT_PAGE_TIMEOUT); + } + + public SqlTranslateRequestBuilder(ElasticsearchClient client, SqlTranslateAction action, AbstractSqlRequest.Mode mode, String query, + QueryBuilder filter, List params, TimeZone timeZone, int fetchSize, + TimeValue requestTimeout, TimeValue pageTimeout) { + super(client, action, new SqlTranslateRequest(mode, query, params, filter, timeZone, fetchSize, requestTimeout, pageTimeout)); + } + + public SqlTranslateRequestBuilder query(String query) { + request.query(query); + return this; + } + + public SqlTranslateRequestBuilder timeZone(TimeZone timeZone) { + request.timeZone(timeZone); + return this; + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponse.java new file mode 100644 index 0000000000000..a19f8d1fc3cba --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponse.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Response for the sql action for translating SQL queries into ES requests + */ +public class SqlTranslateResponse extends ActionResponse implements ToXContentObject { + private SearchSourceBuilder source; + + public SqlTranslateResponse() { + } + + public SqlTranslateResponse(SearchSourceBuilder source) { + this.source = source; + } + + public SearchSourceBuilder source() { + return source; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + source = new SearchSourceBuilder(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + source.writeTo(out); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + SqlTranslateResponse other = (SqlTranslateResponse) obj; + return Objects.equals(source, other.source); + } + + @Override + public int hashCode() { + return Objects.hash(source); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return source.toXContent(builder, params); + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTypedParamValue.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTypedParamValue.java new file mode 100644 index 0000000000000..ffde82fab3491 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTypedParamValue.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Represent a strongly typed parameter value + */ +public class SqlTypedParamValue implements ToXContentObject, Writeable { + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("params", true, objects -> + new SqlTypedParamValue( + objects[0], + DataType.fromEsType((String) objects[1]))); + + private static final ParseField VALUE = new ParseField("value"); + private static final ParseField TYPE = new ParseField("type"); + + static { + PARSER.declareField(constructorArg(), (p, c) -> XContentParserUtils.parseFieldsValue(p), VALUE, ObjectParser.ValueType.VALUE); + PARSER.declareString(constructorArg(), TYPE); + } + + public final Object value; + public final DataType dataType; + + public SqlTypedParamValue(Object value, DataType dataType) { + this.value = value; + this.dataType = dataType; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("type", dataType.esType); + builder.field("value", value); + builder.endObject(); + return builder; + } + + public static SqlTypedParamValue fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(dataType); + out.writeGenericValue(value); + } + + public SqlTypedParamValue(StreamInput in) throws IOException { + dataType = in.readEnum(DataType.class); + value = in.readGenericValue(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SqlTypedParamValue that = (SqlTypedParamValue) o; + return Objects.equals(value, that.value) && + dataType == that.dataType; + } + + @Override + public int hashCode() { + return Objects.hash(value, dataType); + } + + @Override + public String toString() { + return String.valueOf(value) + "[" + dataType + "]"; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java new file mode 100644 index 0000000000000..95c9ade5e295d --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java @@ -0,0 +1,161 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.sql.JDBCType; +import java.sql.Timestamp; +import java.util.Arrays; +import java.util.Locale; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Elasticsearch data types that supported by SQL interface + */ +public enum DataType { + // @formatter:off + // jdbc type, Java Class size, defPrecision, dispSize, int, rat, docvals + NULL( JDBCType.NULL, null, 0, 0, 0), + UNSUPPORTED( JDBCType.OTHER, null, 0, 0, 0), + BOOLEAN( JDBCType.BOOLEAN, Boolean.class, 1, 1, 1), + BYTE( JDBCType.TINYINT, Byte.class, Byte.BYTES, 3, 5, true, false, true), + SHORT( JDBCType.SMALLINT, Short.class, Short.BYTES, 5, 6, true, false, true), + INTEGER( JDBCType.INTEGER, Integer.class, Integer.BYTES, 10, 11, true, false, true), + LONG( JDBCType.BIGINT, Long.class, Long.BYTES, 19, 20, true, false, true), + // 53 bits defaultPrecision ~ 16(15.95) decimal digits (53log10(2)), + DOUBLE( JDBCType.DOUBLE, Double.class, Double.BYTES, 16, 25, false, true, true), + // 24 bits defaultPrecision - 24*log10(2) =~ 7 (7.22) + FLOAT( JDBCType.REAL, Float.class, Float.BYTES, 7, 15, false, true, true), + HALF_FLOAT( JDBCType.FLOAT, Double.class, Double.BYTES, 16, 25, false, true, true), + // precision is based on long + SCALED_FLOAT(JDBCType.FLOAT, Double.class, Double.BYTES, 19, 25, false, true, true), + KEYWORD( JDBCType.VARCHAR, String.class, Integer.MAX_VALUE, 256, 0), + TEXT( JDBCType.VARCHAR, String.class, Integer.MAX_VALUE, Integer.MAX_VALUE, 0, false, false, false), + OBJECT( JDBCType.STRUCT, null, -1, 0, 0), + NESTED( JDBCType.STRUCT, null, -1, 0, 0), + BINARY( JDBCType.VARBINARY, byte[].class, -1, Integer.MAX_VALUE, 0), + DATE( JDBCType.TIMESTAMP, Timestamp.class, Long.BYTES, 19, 20); + // @formatter:on + + private static final Map jdbcToEs; + + static { + jdbcToEs = Arrays.stream(DataType.values()) + .filter(dataType -> dataType != TEXT && dataType != NESTED && dataType != SCALED_FLOAT) // Remove duplicates + .collect(Collectors.toMap(dataType -> dataType.jdbcType, dataType -> dataType)); + } + + /** + * Elasticsearch type name + */ + public final String esType; + + /** + * Compatible JDBC type + */ + public final JDBCType jdbcType; + + /** + * Name of corresponding java class + */ + public final String javaName; + + /** + * Size of the type in bytes + *

+ * -1 if the size can vary + */ + public final int size; + + /** + * Precision + *

+ * Specified column size. For numeric data, this is the maximum precision. For character + * data, this is the length in characters. For datetime datatypes, this is the length in characters of the + * String representation (assuming the maximum allowed defaultPrecision of the fractional seconds component). + */ + public final int defaultPrecision; + + + /** + * Display Size + *

+ * Normal maximum width in characters. + */ + public final int displaySize; + + /** + * True if the type represents an integer number + */ + public final boolean isInteger; + + /** + * True if the type represents a rational number + */ + public final boolean isRational; + + /** + * True if the type supports doc values by default + */ + public final boolean defaultDocValues; + + DataType(JDBCType jdbcType, Class javaClass, int size, int defaultPrecision, int displaySize, boolean isInteger, boolean isRational, + boolean defaultDocValues) { + this.esType = name().toLowerCase(Locale.ROOT); + this.javaName = javaClass == null ? null : javaClass.getName(); + this.jdbcType = jdbcType; + this.size = size; + this.defaultPrecision = defaultPrecision; + this.displaySize = displaySize; + this.isInteger = isInteger; + this.isRational = isRational; + this.defaultDocValues = defaultDocValues; + } + + DataType(JDBCType jdbcType, Class javaClass, int size, int defaultPrecision, int displaySize) { + this(jdbcType, javaClass, size, defaultPrecision, displaySize, false, false, true); + } + + public String sqlName() { + return jdbcType.getName(); + } + + public boolean isNumeric() { + return isInteger || isRational; + } + + /** + * Returns true if value is signed, false otherwise (including if the type is not numeric) + */ + public boolean isSigned() { + // For now all numeric values that es supports are signed + return isNumeric(); + } + + public boolean isString() { + return this == KEYWORD || this == TEXT; + } + + public boolean isPrimitive() { + return this != OBJECT && this != NESTED; + } + + public static DataType fromJdbcType(JDBCType jdbcType) { + if (jdbcToEs.containsKey(jdbcType) == false) { + throw new IllegalArgumentException("Unsupported JDBC type [" + jdbcType + "]"); + } + return jdbcToEs.get(jdbcType); + } + + /** + * Creates returns DataType enum coresponding to the specified es type + *

+ * For any dataType DataType.fromEsType(dataType.esType) == dataType + */ + public static DataType fromEsType(String esType) { + return DataType.valueOf(esType.toUpperCase(Locale.ROOT)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java new file mode 100644 index 0000000000000..83546924a38f8 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.function.Consumer; + +public class SqlClearCursorRequestTests extends AbstractSerializingTestCase { + public AbstractSqlRequest.Mode testMode; + + @Before + public void setup() { + testMode = randomFrom(AbstractSqlRequest.Mode.values()); + } + + @Override + protected SqlClearCursorRequest createTestInstance() { + return new SqlClearCursorRequest(testMode, randomAlphaOfLength(100)); + } + + @Override + protected Writeable.Reader instanceReader() { + return SqlClearCursorRequest::new; + } + + @Override + protected SqlClearCursorRequest doParseInstance(XContentParser parser) { + return SqlClearCursorRequest.fromXContent(parser, testMode); + } + + @Override + protected SqlClearCursorRequest mutateInstance(SqlClearCursorRequest instance) throws IOException { + @SuppressWarnings("unchecked") + Consumer mutator = randomFrom( + request -> request.mode(randomValueOtherThan(request.mode(), () -> randomFrom(AbstractSqlRequest.Mode.values()))), + request -> request.setCursor(randomValueOtherThan(request.getCursor(), SqlQueryResponseTests::randomStringCursor)) + ); + SqlClearCursorRequest newRequest = new SqlClearCursorRequest(instance.mode(), instance.getCursor()); + mutator.accept(newRequest); + return newRequest; + + } +} + diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java new file mode 100644 index 0000000000000..0ef2875d8e7dd --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +public class SqlClearCursorResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected SqlClearCursorResponse createTestInstance() { + return new SqlClearCursorResponse(randomBoolean()); + } + + @Override + protected SqlClearCursorResponse createBlankInstance() { + return new SqlClearCursorResponse(); + } + + @Override + protected SqlClearCursorResponse mutateInstance(SqlClearCursorResponse instance) { + return new SqlClearCursorResponse(instance.isSucceeded() == false); + } + + @Override + protected SqlClearCursorResponse doParseInstance(XContentParser parser) { + return SqlClearCursorResponse.fromXContent(parser); + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestTests.java new file mode 100644 index 0000000000000..5fbe4e42d48f2 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestTests.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.type.DataType; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.sql.plugin.SqlTestUtils.randomFilter; +import static org.elasticsearch.xpack.sql.plugin.SqlTestUtils.randomFilterOrNull; + +public class SqlQueryRequestTests extends AbstractSerializingTestCase { + + public AbstractSqlRequest.Mode testMode; + + @Before + public void setup() { + testMode = randomFrom(AbstractSqlRequest.Mode.values()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + + @Override + protected SqlQueryRequest createTestInstance() { + return new SqlQueryRequest(testMode, randomAlphaOfLength(10), randomParameters(), + SqlTestUtils.randomFilterOrNull(random()), randomTimeZone(), + between(1, Integer.MAX_VALUE), randomTV(), randomTV(), randomAlphaOfLength(10) + ); + } + + public List randomParameters() { + if (randomBoolean()) { + return Collections.emptyList(); + } else { + int len = randomIntBetween(1, 10); + List arr = new ArrayList<>(len); + for (int i = 0; i < len; i++) { + @SuppressWarnings("unchecked") Supplier supplier = randomFrom( + () -> new SqlTypedParamValue(randomBoolean(), DataType.BOOLEAN), + () -> new SqlTypedParamValue(randomLong(), DataType.LONG), + () -> new SqlTypedParamValue(randomDouble(), DataType.DOUBLE), + () -> new SqlTypedParamValue(null, DataType.NULL), + () -> new SqlTypedParamValue(randomAlphaOfLength(10), DataType.KEYWORD) + ); + arr.add(supplier.get()); + } + return Collections.unmodifiableList(arr); + } + } + + @Override + protected Writeable.Reader instanceReader() { + return SqlQueryRequest::new; + } + + private TimeValue randomTV() { + return TimeValue.parseTimeValue(randomTimeValue(), null, "test"); + } + + @Override + protected SqlQueryRequest doParseInstance(XContentParser parser) { + return SqlQueryRequest.fromXContent(parser, testMode); + } + + @Override + protected SqlQueryRequest mutateInstance(SqlQueryRequest instance) { + @SuppressWarnings("unchecked") + Consumer mutator = randomFrom( + request -> request.mode(randomValueOtherThan(request.mode(), () -> randomFrom(AbstractSqlRequest.Mode.values()))), + request -> request.query(randomValueOtherThan(request.query(), () -> randomAlphaOfLength(5))), + request -> request.params(randomValueOtherThan(request.params(), this::randomParameters)), + request -> request.timeZone(randomValueOtherThan(request.timeZone(), ESTestCase::randomTimeZone)), + request -> request.fetchSize(randomValueOtherThan(request.fetchSize(), () -> between(1, Integer.MAX_VALUE))), + request -> request.requestTimeout(randomValueOtherThan(request.requestTimeout(), this::randomTV)), + request -> request.filter(randomValueOtherThan(request.filter(), + () -> request.filter() == null ? randomFilter(random()) : randomFilterOrNull(random()))), + request -> request.cursor(randomValueOtherThan(request.cursor(), SqlQueryResponseTests::randomStringCursor)) + ); + SqlQueryRequest newRequest = new SqlQueryRequest(instance.mode(), instance.query(), instance.params(), instance.filter(), + instance.timeZone(), instance.fetchSize(), instance.requestTimeout(), instance.pageTimeout(), instance.cursor()); + mutator.accept(newRequest); + return newRequest; + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponseTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponseTests.java new file mode 100644 index 0000000000000..42c08bb09142f --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponseTests.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.sql.JDBCType; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; +import static org.hamcrest.Matchers.hasSize; + +public class SqlQueryResponseTests extends AbstractStreamableXContentTestCase { + + static String randomStringCursor() { + return randomBoolean() ? "" : randomAlphaOfLength(10); + } + + @Override + protected SqlQueryResponse createTestInstance() { + return createRandomInstance(randomStringCursor()); + } + + public static SqlQueryResponse createRandomInstance(String cursor) { + int columnCount = between(1, 10); + + List columns = null; + if (randomBoolean()) { + columns = new ArrayList<>(columnCount); + for (int i = 0; i < columnCount; i++) { + columns.add(new ColumnInfo(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10), + randomFrom(JDBCType.values()), randomInt(25))); + } + } + + List> rows; + if (randomBoolean()) { + rows = Collections.emptyList(); + } else { + int rowCount = between(1, 10); + rows = new ArrayList<>(rowCount); + for (int r = 0; r < rowCount; r++) { + List row = new ArrayList<>(rowCount); + for (int c = 0; c < columnCount; c++) { + Supplier value = randomFrom(Arrays.asList( + () -> randomAlphaOfLength(10), + ESTestCase::randomLong, + ESTestCase::randomDouble, + () -> null)); + row.add(value.get()); + + } + rows.add(row); + } + } + return new SqlQueryResponse(cursor, columns, rows); + } + + @Override + protected SqlQueryResponse createBlankInstance() { + return new SqlQueryResponse(); + } + + public void testToXContent() throws IOException { + SqlQueryResponse testInstance = createTestInstance(); + + XContentBuilder builder = testInstance.toXContent(XContentFactory.jsonBuilder(), EMPTY_PARAMS); + Map rootMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + + logger.info(Strings.toString(builder)); + + if (testInstance.columns() != null) { + List columns = (List) rootMap.get("columns"); + assertThat(columns, hasSize(testInstance.columns().size())); + for (int i = 0; i < columns.size(); i++) { + Map columnMap = (Map) columns.get(i); + ColumnInfo columnInfo = testInstance.columns().get(i); + assertEquals(columnInfo.name(), columnMap.get("name")); + assertEquals(columnInfo.esType(), columnMap.get("type")); + assertEquals(columnInfo.displaySize(), columnMap.get("display_size")); + assertEquals(columnInfo.jdbcType().getVendorTypeNumber(), columnMap.get("jdbc_type")); + } + } else { + assertNull(rootMap.get("columns")); + } + + List rows = ((List) rootMap.get("rows")); + assertThat(rows, hasSize(testInstance.rows().size())); + for (int i = 0; i < rows.size(); i++) { + List row = (List) rows.get(i); + assertEquals(row, testInstance.rows().get(i)); + } + + if (testInstance.cursor().equals("") == false) { + assertEquals(rootMap.get(SqlQueryRequest.CURSOR.getPreferredName()), testInstance.cursor()); + } + } + + @Override + protected SqlQueryResponse doParseInstance(XContentParser parser) { + return SqlQueryResponse.fromXContent(parser); + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTestUtils.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTestUtils.java new file mode 100644 index 0000000000000..05a164c1c94b7 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTestUtils.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; + +import java.util.Random; + +public final class SqlTestUtils { + + private SqlTestUtils() { + + } + + /** + * Returns a random QueryBuilder or null + */ + public static QueryBuilder randomFilterOrNull(Random random) { + final QueryBuilder randomFilter; + if (random.nextBoolean()) { + randomFilter = randomFilter(random); + } else { + randomFilter = null; + } + return randomFilter; + } + + /** + * Returns a random QueryBuilder + */ + public static QueryBuilder randomFilter(Random random) { + return new RangeQueryBuilder(RandomStrings.randomAsciiLettersOfLength(random, 10)) + .gt(random.nextInt()); + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestTests.java new file mode 100644 index 0000000000000..21b002293768f --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestTests.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.function.Consumer; + +import static org.elasticsearch.xpack.sql.plugin.SqlTestUtils.randomFilter; +import static org.elasticsearch.xpack.sql.plugin.SqlTestUtils.randomFilterOrNull; + +public class SqlTranslateRequestTests extends AbstractSerializingTestCase { + + public AbstractSqlRequest.Mode testMode; + + @Before + public void setup() { + testMode = randomFrom(AbstractSqlRequest.Mode.values()); + } + + @Override + protected SqlTranslateRequest createTestInstance() { + return new SqlTranslateRequest(testMode, randomAlphaOfLength(10), Collections.emptyList(), randomFilterOrNull(random()), + randomTimeZone(), between(1, Integer.MAX_VALUE), randomTV(), randomTV()); + } + + @Override + protected Writeable.Reader instanceReader() { + return SqlTranslateRequest::new; + } + + private TimeValue randomTV() { + return TimeValue.parseTimeValue(randomTimeValue(), null, "test"); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + + @Override + protected SqlTranslateRequest doParseInstance(XContentParser parser) { + return SqlTranslateRequest.fromXContent(parser, testMode); + } + + @Override + protected SqlTranslateRequest mutateInstance(SqlTranslateRequest instance) throws IOException { + @SuppressWarnings("unchecked") + Consumer mutator = randomFrom( + request -> request.query(randomValueOtherThan(request.query(), () -> randomAlphaOfLength(5))), + request -> request.timeZone(randomValueOtherThan(request.timeZone(), ESTestCase::randomTimeZone)), + request -> request.fetchSize(randomValueOtherThan(request.fetchSize(), () -> between(1, Integer.MAX_VALUE))), + request -> request.requestTimeout(randomValueOtherThan(request.requestTimeout(), () -> randomTV())), + request -> request.filter(randomValueOtherThan(request.filter(), + () -> request.filter() == null ? randomFilter(random()) : randomFilterOrNull(random()))) + ); + SqlTranslateRequest newRequest = new SqlTranslateRequest(instance.mode(), instance.query(), instance.params(), instance.filter(), + instance.timeZone(), instance.fetchSize(), instance.requestTimeout(), instance.pageTimeout()); + mutator.accept(newRequest); + return newRequest; + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java new file mode 100644 index 0000000000000..061b70a55d975 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateResponseTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.AbstractStreamableTestCase; + +import java.io.IOException; + +public class SqlTranslateResponseTests extends AbstractStreamableTestCase { + + @Override + protected SqlTranslateResponse createTestInstance() { + SearchSourceBuilder s = new SearchSourceBuilder(); + if (randomBoolean()) { + long docValues = iterations(5, 10); + for (int i = 0; i < docValues; i++) { + s.docValueField(randomAlphaOfLength(10)); + } + } + + if (randomBoolean()) { + long sourceFields = iterations(5, 10); + for (int i = 0; i < sourceFields; i++) { + s.storedField(randomAlphaOfLength(10)); + } + } + + s.fetchSource(randomBoolean()).from(randomInt(256)).explain(randomBoolean()).size(randomInt(256)); + + return new SqlTranslateResponse(s); + } + + @Override + protected SqlTranslateResponse createBlankInstance() { + return new SqlTranslateResponse(); + } + + @Override + protected SqlTranslateResponse mutateInstance(SqlTranslateResponse instance) throws IOException { + SqlTranslateResponse sqlTranslateResponse = copyInstance(instance); + SearchSourceBuilder source = sqlTranslateResponse.source(); + source.size(randomValueOtherThan(source.size(), () -> between(0, Integer.MAX_VALUE))); + return new SqlTranslateResponse(source); + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/build.gradle b/x-pack/plugin/sql/sql-shared-client/build.gradle new file mode 100644 index 0000000000000..896cccb8aa37a --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/build.gradle @@ -0,0 +1,54 @@ + +/* + * This project is named sql-shared-client because it is in the + * "org.elasticsearch.plugin" group and it'd be super confusing for it to just + * be called "shared-client" there. + */ + +apply plugin: 'elasticsearch.build' + +description = 'Code shared between jdbc and cli' + +dependencies { + compile xpackProject('plugin:sql:sql-proto') + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + testCompile "org.elasticsearch.test:framework:${version}" +} + +dependencyLicenses { + mapping from: /jackson-.*/, to: 'jackson' + mapping from: /sql-proto.*/, to: 'elasticsearch' + mapping from: /elasticsearch-cli.*/, to: 'elasticsearch' + mapping from: /elasticsearch-core.*/, to: 'elasticsearch' + mapping from: /lucene-.*/, to: 'lucene' + ignoreSha 'sql-proto' + ignoreSha 'elasticsearch' + ignoreSha 'elasticsearch-core' +} + +forbiddenApisMain { + // does not depend on core, so only jdk and http signatures should be checked + signaturesURLs = [this.class.getResource('/forbidden/jdk-signatures.txt')] +} + +forbiddenApisTest { + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +forbiddenPatterns { + exclude '**/*.keystore' +} + +// Allow for com.sun.net.httpserver.* usage for testing +eclipse { + classpath.file { + whenMerged { cp -> + def con = entries.find { e -> + e.kind == "con" && e.toString().contains("org.eclipse.jdt.launching.JRE_CONTAINER") + } + con.accessRules.add(new org.gradle.plugins.ide.eclipse.model.AccessRule( + "accessible", "com/sun/net/httpserver/*")) + } + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/licenses/jackson-LICENSE b/x-pack/plugin/sql/sql-shared-client/licenses/jackson-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/x-pack/plugin/sql/sql-shared-client/licenses/jackson-NOTICE b/x-pack/plugin/sql/sql-shared-client/licenses/jackson-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/x-pack/plugin/sql/sql-shared-client/licenses/jackson-core-2.8.10.jar.sha1 b/x-pack/plugin/sql/sql-shared-client/licenses/jackson-core-2.8.10.jar.sha1 new file mode 100644 index 0000000000000..a322d371e265e --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/licenses/jackson-core-2.8.10.jar.sha1 @@ -0,0 +1 @@ +eb21a035c66ad307e66ec8fce37f5d50fd62d039 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java new file mode 100644 index 0000000000000..bf7c245b24cbe --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client; + +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.Streams; +import org.elasticsearch.xpack.sql.client.shared.CheckedFunction; +import org.elasticsearch.xpack.sql.client.shared.ClientException; +import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; +import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection; +import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection.ResponseOrException; +import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest; +import org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction; +import org.elasticsearch.xpack.sql.plugin.SqlClearCursorRequest; +import org.elasticsearch.xpack.sql.plugin.SqlClearCursorResponse; +import org.elasticsearch.xpack.sql.plugin.SqlQueryAction; +import org.elasticsearch.xpack.sql.plugin.SqlQueryRequest; +import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; + +import java.io.IOException; +import java.io.InputStream; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.sql.SQLException; +import java.util.Collections; +import java.util.TimeZone; +import java.util.function.Function; + +/** + * A specialized high-level REST client with support for SQL-related functions. + * Similar to JDBC and the underlying HTTP connection, this class is not thread-safe + * and follows a request-response flow. + */ +public class HttpClient { + + private static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON; + + private final ConnectionConfiguration cfg; + + public HttpClient(ConnectionConfiguration cfg) throws SQLException { + this.cfg = cfg; + } + + private NamedXContentRegistry registry = NamedXContentRegistry.EMPTY; + + public boolean ping(long timeoutInMs) throws SQLException { + return head("/", timeoutInMs); + } + + public MainResponse serverInfo() throws SQLException { + return get("/", MainResponse::fromXContent); + } + + public SqlQueryResponse queryInit(String query, int fetchSize) throws SQLException { + // TODO allow customizing the time zone - this is what session set/reset/get should be about + SqlQueryRequest sqlRequest = new SqlQueryRequest(AbstractSqlRequest.Mode.PLAIN, query, Collections.emptyList(), null, + TimeZone.getTimeZone("UTC"), fetchSize, TimeValue.timeValueMillis(cfg.queryTimeout()), + TimeValue.timeValueMillis(cfg.pageTimeout()), "" + ); + return query(sqlRequest); + } + + public SqlQueryResponse query(SqlQueryRequest sqlRequest) throws SQLException { + return post(SqlQueryAction.REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); + } + + public SqlQueryResponse nextPage(String cursor) throws SQLException { + SqlQueryRequest sqlRequest = new SqlQueryRequest(); + sqlRequest.cursor(cursor); + return post(SqlQueryAction.REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); + } + + public boolean queryClose(String cursor) throws SQLException { + SqlClearCursorResponse response = post(SqlClearCursorAction.REST_ENDPOINT, + new SqlClearCursorRequest(AbstractSqlRequest.Mode.PLAIN, cursor), + SqlClearCursorResponse::fromXContent); + return response.isSucceeded(); + } + + private Response post(String path, Request request, + CheckedFunction responseParser) + throws SQLException { + BytesReference requestBytes = toXContent(request); + String query = "error_trace&mode=" + request.mode(); + Tuple response = + AccessController.doPrivileged((PrivilegedAction>>) () -> + JreHttpUrlConnection.http(path, query, cfg, con -> + con.request( + requestBytes::writeTo, + this::readFrom, + "POST" + ) + )).getResponseOrThrowException(); + return fromXContent(response.v1(), response.v2(), responseParser); + } + + private boolean head(String path, long timeoutInMs) throws SQLException { + ConnectionConfiguration pingCfg = new ConnectionConfiguration(cfg.baseUri(), cfg.connectionString(), + cfg.connectTimeout(), timeoutInMs, cfg.queryTimeout(), cfg.pageTimeout(), cfg.pageSize(), + cfg.authUser(), cfg.authPass(), cfg.sslConfig(), cfg.proxyConfig()); + try { + return AccessController.doPrivileged((PrivilegedAction) () -> + JreHttpUrlConnection.http(path, "error_trace", pingCfg, JreHttpUrlConnection::head)); + } catch (ClientException ex) { + throw new SQLException("Cannot ping server", ex); + } + } + + private Response get(String path, CheckedFunction responseParser) + throws SQLException { + Tuple response = + AccessController.doPrivileged((PrivilegedAction>>) () -> + JreHttpUrlConnection.http(path, "error_trace", cfg, con -> + con.request( + null, + this::readFrom, + "GET" + ) + )).getResponseOrThrowException(); + return fromXContent(response.v1(), response.v2(), responseParser); + } + + private static BytesReference toXContent(Request xContent) { + try { + return XContentHelper.toXContent(xContent, REQUEST_BODY_CONTENT_TYPE, false); + } catch (IOException ex) { + throw new ClientException("Cannot serialize request", ex); + } + } + + private Tuple readFrom(InputStream inputStream, Function headers) { + String contentType = headers.apply("Content-Type"); + XContentType xContentType = XContentType.fromMediaTypeOrFormat(contentType); + if (xContentType == null) { + throw new IllegalStateException("Unsupported Content-Type: " + contentType); + } + BytesStreamOutput out = new BytesStreamOutput(); + try { + Streams.copy(inputStream, out); + } catch (IOException ex) { + throw new ClientException("Cannot deserialize response", ex); + } + return new Tuple<>(xContentType, out.bytes()); + + } + + private Response fromXContent(XContentType xContentType, BytesReference bytesReference, + CheckedFunction responseParser) { + try (InputStream stream = bytesReference.streamInput(); + XContentParser parser = xContentType.xContent().createParser(registry, + LoggingDeprecationHandler.INSTANCE, stream)) { + return responseParser.apply(parser); + } catch (IOException ex) { + throw new ClientException("Cannot parse response", ex); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/Bytes.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/Bytes.java new file mode 100644 index 0000000000000..0e0a3af56339f --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/Bytes.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +public class Bytes { + + private final byte[] buf; + private final int size; + + public Bytes(byte[] buf, int size) { + this.buf = buf; + this.size = size; + } + + public byte[] bytes() { + return buf; + } + + public int size() { + return size; + } + + public byte[] copy() { + return Arrays.copyOf(buf, size); + } + + public String toString() { + return new String(buf, 0, size, StandardCharsets.UTF_8); + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedBiFunction.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedBiFunction.java new file mode 100644 index 0000000000000..ba16da3bfd313 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedBiFunction.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.util.function.BiFunction; + +/** + * A {@link BiFunction}-like interface which allows throwing checked exceptions. + * Elasticsearch has one of these but we don't depend on Elasticsearch. + */ +@FunctionalInterface +public interface CheckedBiFunction { + R apply(T t, U u) throws E; +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedConsumer.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedConsumer.java new file mode 100644 index 0000000000000..62258eebefaaa --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedConsumer.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.util.function.Consumer; + +/** + * A {@link Consumer}-like interface which allows throwing checked exceptions. + * Elasticsearch has one of these but we don't depend on Elasticsearch. + */ +@FunctionalInterface +public interface CheckedConsumer { + void accept(T t) throws E; +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedFunction.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedFunction.java new file mode 100644 index 0000000000000..67e174ffd7075 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/CheckedFunction.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.util.function.Function; + +/** + * A {@link Function}-like interface which allows throwing checked exceptions. + * Elasticsearch has one of these but we don't depend on Elasticsearch. + */ +@FunctionalInterface +public interface CheckedFunction { + R apply(T t) throws E; +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ClientException.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ClientException.java new file mode 100644 index 0000000000000..5eb18b86091c1 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ClientException.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +/** + * A general-purpose exception to be used on the client-side code. Does not support var-args formatting. + */ +public class ClientException extends RuntimeException { + + public ClientException(String message, Throwable cause) { + super(message, cause); + } + + public ClientException(String message) { + super(message); + } + + public ClientException(Throwable cause) { + super(cause); + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ConnectionConfiguration.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ConnectionConfiguration.java new file mode 100644 index 0000000000000..aca262e172647 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ConnectionConfiguration.java @@ -0,0 +1,236 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Enumeration; +import java.util.LinkedHashSet; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static java.util.Collections.emptyList; + +/** + * Common configuration class used for client. + * Uses a Properties object to be created (as clients would use strings to configure it). + * While this is convenient, it makes validation tricky (of both the names and values) and thus + * it's available only during construction. + * Some values might be updated later on in a typed fashion (dedicated method) in order + * to move away from the loose Strings... + */ +public class ConnectionConfiguration { + + // Timeouts + + // 30s + public static final String CONNECT_TIMEOUT = "connect.timeout"; + private static final String CONNECT_TIMEOUT_DEFAULT = String.valueOf(TimeUnit.SECONDS.toMillis(30)); + + // 1m + public static final String NETWORK_TIMEOUT = "network.timeout"; + private static final String NETWORK_TIMEOUT_DEFAULT = String.valueOf(TimeUnit.MINUTES.toMillis(1)); + + // 90s + public static final String QUERY_TIMEOUT = "query.timeout"; + private static final String QUERY_TIMEOUT_DEFAULT = String.valueOf(TimeUnit.SECONDS.toMillis(90)); + + // 45s + public static final String PAGE_TIMEOUT = "page.timeout"; + private static final String PAGE_TIMEOUT_DEFAULT = String.valueOf(TimeUnit.SECONDS.toMillis(45)); + + public static final String PAGE_SIZE = "page.size"; + private static final String PAGE_SIZE_DEFAULT = "1000"; + + // Auth + + public static final String AUTH_USER = "user"; + // NB: this is password instead of pass since that's what JDBC DriverManager/tools use + public static final String AUTH_PASS = "password"; + + protected static final Set OPTION_NAMES = new LinkedHashSet<>( + Arrays.asList(CONNECT_TIMEOUT, NETWORK_TIMEOUT, QUERY_TIMEOUT, PAGE_TIMEOUT, PAGE_SIZE, AUTH_USER, AUTH_PASS)); + + static { + OPTION_NAMES.addAll(SslConfig.OPTION_NAMES); + OPTION_NAMES.addAll(ProxyConfig.OPTION_NAMES); + } + + // Base URI for all request + private final URI baseURI; + private final String connectionString; + // Proxy + + private final long connectTimeout; + private final long networkTimeout; + private final long queryTimeout; + + private final long pageTimeout; + private final int pageSize; + + private final String user, pass; + + private final SslConfig sslConfig; + private final ProxyConfig proxyConfig; + + public ConnectionConfiguration(URI baseURI, String connectionString, Properties props) throws ClientException { + this.connectionString = connectionString; + Properties settings = props != null ? props : new Properties(); + + checkPropertyNames(settings, optionNames()); + + connectTimeout = parseValue(CONNECT_TIMEOUT, settings.getProperty(CONNECT_TIMEOUT, CONNECT_TIMEOUT_DEFAULT), Long::parseLong); + networkTimeout = parseValue(NETWORK_TIMEOUT, settings.getProperty(NETWORK_TIMEOUT, NETWORK_TIMEOUT_DEFAULT), Long::parseLong); + queryTimeout = parseValue(QUERY_TIMEOUT, settings.getProperty(QUERY_TIMEOUT, QUERY_TIMEOUT_DEFAULT), Long::parseLong); + // page + pageTimeout = parseValue(PAGE_TIMEOUT, settings.getProperty(PAGE_TIMEOUT, PAGE_TIMEOUT_DEFAULT), Long::parseLong); + pageSize = parseValue(PAGE_SIZE, settings.getProperty(PAGE_SIZE, PAGE_SIZE_DEFAULT), Integer::parseInt); + + // auth + user = settings.getProperty(AUTH_USER); + pass = settings.getProperty(AUTH_PASS); + + sslConfig = new SslConfig(settings); + proxyConfig = new ProxyConfig(settings); + + this.baseURI = normalizeSchema(baseURI, connectionString, sslConfig.isEnabled()); + } + + public ConnectionConfiguration(URI baseURI, String connectionString, long connectTimeout, long networkTimeout, long queryTimeout, + long pageTimeout, int pageSize, String user, String pass, SslConfig sslConfig, + ProxyConfig proxyConfig) throws ClientException { + this.connectionString = connectionString; + this.connectTimeout = connectTimeout; + this.networkTimeout = networkTimeout; + this.queryTimeout = queryTimeout; + // page + this.pageTimeout = pageTimeout; + this.pageSize = pageSize; + + // auth + this.user = user; + this.pass = pass; + + this.sslConfig = sslConfig; + this.proxyConfig = proxyConfig; + this.baseURI = baseURI; + } + + + private static URI normalizeSchema(URI uri, String connectionString, boolean isSSLEnabled) { + // Make sure the protocol is correct + final String scheme; + if (isSSLEnabled) { + // It's ok to upgrade from http to https + scheme = "https"; + } else { + // Silently downgrading from https to http can cause security issues + if ("https".equals(uri.getScheme())) { + throw new ClientException("SSL is disabled"); + } + scheme = "http"; + } + try { + return new URI(scheme, null, uri.getHost(), uri.getPort(), uri.getPath(), uri.getQuery(), uri.getFragment()); + } catch (URISyntaxException ex) { + throw new ClientException("Cannot parse process baseURI [" + connectionString + "] " + ex.getMessage()); + } + } + + private Collection optionNames() { + Collection options = new ArrayList<>(OPTION_NAMES); + options.addAll(extraOptions()); + return options; + } + + protected Collection extraOptions() { + return emptyList(); + } + + private static void checkPropertyNames(Properties settings, Collection knownNames) throws ClientException { + // validate specified properties to pick up typos and such + Enumeration pNames = settings.propertyNames(); + while (pNames.hasMoreElements()) { + String message = isKnownProperty(pNames.nextElement().toString(), knownNames); + if (message != null) { + throw new ClientException(message); + } + } + } + + private static String isKnownProperty(String propertyName, Collection knownOptions) { + if (knownOptions.contains(propertyName)) { + return null; + } + return "Unknown parameter [" + propertyName + "] ; did you mean " + StringUtils.findSimiliar(propertyName, knownOptions); + } + + protected T parseValue(String key, String value, Function parser) { + try { + return parser.apply(value); + } catch (Exception ex) { + throw new ClientException("Cannot parse property [" + key + "] with value [" + value + "]; " + ex.getMessage()); + } + } + + protected boolean isSSLEnabled() { + return sslConfig.isEnabled(); + } + + public SslConfig sslConfig() { + return sslConfig; + } + + public ProxyConfig proxyConfig() { + return proxyConfig; + } + + public long connectTimeout() { + return connectTimeout; + } + + public long networkTimeout() { + return networkTimeout; + } + + public long queryTimeout() { + return queryTimeout; + } + + public long pageTimeout() { + return pageTimeout; + } + + public int pageSize() { + return pageSize; + } + + // auth + public String authUser() { + return user; + } + + public String authPass() { + return pass; + } + + public URI baseUri() { + return baseURI; + } + + /** + * Returns the original connections string + */ + public String connectionString() { + return connectionString; + } + +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/JreHttpUrlConnection.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/JreHttpUrlConnection.java new file mode 100644 index 0000000000000..2f6289ee39507 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/JreHttpUrlConnection.java @@ -0,0 +1,309 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.io.BufferedInputStream; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.HttpURLConnection; +import java.net.MalformedURLException; +import java.net.Proxy; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.sql.SQLClientInfoException; +import java.sql.SQLDataException; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLInvalidAuthorizationSpecException; +import java.sql.SQLRecoverableException; +import java.sql.SQLSyntaxErrorException; +import java.sql.SQLTimeoutException; +import java.util.Base64; +import java.util.function.Function; +import java.util.zip.GZIPInputStream; + +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLSocketFactory; +import javax.sql.rowset.serial.SerialException; + +import static java.util.Collections.emptyMap; + +/** + * Low-level http client using the built-in {@link HttpURLConnection}. + * As such, it has a stateless, on-demand, request-response flow without + * any connection pooling or sharing. + */ +public class JreHttpUrlConnection implements Closeable { + /** + * State added to {@link SQLException}s when the server encounters an + * error. + */ + public static final String SQL_STATE_BAD_SERVER = "bad_server"; + + public static R http(String path, String query, ConnectionConfiguration cfg, Function handler) { + final URI uriPath = cfg.baseUri().resolve(path); // update path if needed + final String uriQuery = query == null ? uriPath.getQuery() : query; // update query if needed + final URL url; + try { + url = new URI(uriPath.getScheme(), null, uriPath.getHost(), uriPath.getPort(), uriPath.getPath(), uriQuery, + uriPath.getFragment()).toURL(); + } catch (URISyntaxException | MalformedURLException ex) { + throw new ClientException("Cannot build url using base: [" + uriPath + "] query: [" + query + "] path: [" + path + "]", ex); + } + try (JreHttpUrlConnection con = new JreHttpUrlConnection(url, cfg)) { + return handler.apply(con); + } + } + + private boolean closed = false; + final HttpURLConnection con; + private final URL url; + private static final String GZIP = "gzip"; + + public JreHttpUrlConnection(URL url, ConnectionConfiguration cfg) throws ClientException { + this.url = url; + try { + // due to the way the URL API is designed, the proxy needs to be passed in first + Proxy p = cfg.proxyConfig().proxy(); + con = (HttpURLConnection) (p != null ? url.openConnection(p) : url.openConnection()); + } catch (IOException ex) { + throw new ClientException("Cannot setup connection to " + url + " (" + ex.getMessage() + ")", ex); + } + + // the rest of the connection setup + setupConnection(cfg); + } + + private void setupConnection(ConnectionConfiguration cfg) { + // setup basic stuff first + + // timeouts + con.setConnectTimeout((int) cfg.connectTimeout()); + con.setReadTimeout((int) cfg.networkTimeout()); + + // disable content caching + con.setAllowUserInteraction(false); + con.setUseCaches(false); + + // HTTP params + // HttpURL adds this header by default, HttpS does not + // adding it here to be consistent + con.setRequestProperty("Accept-Charset", "UTF-8"); + //con.setRequestProperty("Accept-Encoding", GZIP); + + setupSSL(cfg); + setupBasicAuth(cfg); + } + + private void setupSSL(ConnectionConfiguration cfg) { + if (cfg.sslConfig().isEnabled()) { + HttpsURLConnection https = (HttpsURLConnection) con; + SSLSocketFactory factory = cfg.sslConfig().sslSocketFactory(); + AccessController.doPrivileged((PrivilegedAction) () -> { + https.setSSLSocketFactory(factory); + return null; + }); + } + } + + private void setupBasicAuth(ConnectionConfiguration cfg) { + if (StringUtils.hasText(cfg.authUser())) { + String basicValue = cfg.authUser() + ":" + cfg.authPass(); + String encoded = StringUtils.asUTFString(Base64.getEncoder().encode(StringUtils.toUTF(basicValue))); + con.setRequestProperty("Authorization", "Basic " + encoded); + } + } + + public boolean head() throws ClientException { + try { + con.setRequestMethod("HEAD"); + int responseCode = con.getResponseCode(); + return responseCode == HttpURLConnection.HTTP_OK; + } catch (IOException ex) { + throw new ClientException("Cannot HEAD address " + url + " (" + ex.getMessage() + ")", ex); + } + } + + public ResponseOrException request( + CheckedConsumer doc, + CheckedBiFunction, R, IOException> parser, + String requestMethod + ) throws ClientException { + try { + con.setRequestMethod(requestMethod); + con.setDoOutput(true); + con.setRequestProperty("Content-Type", "application/json"); + con.setRequestProperty("Accept", "application/json"); + if (doc != null) { + try (OutputStream out = con.getOutputStream()) { + doc.accept(out); + } + } + if (shouldParseBody(con.getResponseCode())) { + try (InputStream stream = getStream(con, con.getInputStream())) { + return new ResponseOrException<>(parser.apply( + new BufferedInputStream(stream), + con::getHeaderField + )); + } + } + return parserError(); + } catch (IOException ex) { + throw new ClientException("Cannot POST address " + url + " (" + ex.getMessage() + ")", ex); + } + } + + private boolean shouldParseBody(int responseCode) { + return responseCode == 200 || responseCode == 201 || responseCode == 202; + } + + private ResponseOrException parserError() throws IOException { + RemoteFailure failure; + try (InputStream stream = getStream(con, con.getErrorStream())) { + failure = RemoteFailure.parseFromResponse(stream); + } + if (con.getResponseCode() >= 500) { + return new ResponseOrException<>(new SQLException("Server encountered an error [" + + failure.reason() + "]. [" + failure.remoteTrace() + "]", SQL_STATE_BAD_SERVER)); + } + SqlExceptionType type = SqlExceptionType.fromRemoteFailureType(failure.type()); + if (type == null) { + return new ResponseOrException<>(new SQLException("Server sent bad type [" + + failure.type() + "]. Original type was [" + failure.reason() + "]. [" + + failure.remoteTrace() + "]", SQL_STATE_BAD_SERVER)); + } + return new ResponseOrException<>(type.asException(failure.reason())); + } + + public static class ResponseOrException { + private final R response; + private final SQLException exception; + + private ResponseOrException(R response) { + this.response = response; + this.exception = null; + } + + private ResponseOrException(SQLException exception) { + this.response = null; + this.exception = exception; + } + + public R getResponseOrThrowException() throws SQLException { + if (exception != null) { + throw exception; + } + assert response != null; + return response; + } + } + + private static InputStream getStream(HttpURLConnection con, InputStream stream) throws IOException { + if (GZIP.equals(con.getContentEncoding())) { + return new GZIPInputStream(stream); + } + return stream; + } + + public void connect() { + if (closed) { + throw new ClientException("Connection cannot be reused"); + } + try { + con.connect(); + } catch (IOException ex) { + throw new ClientException("Cannot open connection to " + url + " (" + ex.getMessage() + ")", ex); + } + } + + @Override + public void close() { + if (!closed) { + closed = true; + + // consume streams + consumeStreams(); + } + } + + public void disconnect() { + try { + connect(); + } finally { + con.disconnect(); + } + } + + // http://docs.oracle.com/javase/7/docs/technotes/guides/net/http-keepalive.html + private void consumeStreams() { + try (InputStream in = con.getInputStream()) { + while (in != null && in.read() > -1) { + } + } catch (IOException ex) { + // ignore + } finally { + try (InputStream ein = con.getErrorStream()) { + while (ein != null && ein.read() > -1) { + } + } catch (IOException ex) { + // keep on ignoring + } + } + } + + /** + * Exception type. + */ + public enum SqlExceptionType { + UNKNOWN(SQLException::new), + SERIAL(SerialException::new), + CLIENT_INFO(message -> new SQLClientInfoException(message, emptyMap())), + DATA(SQLDataException::new), + SYNTAX(SQLSyntaxErrorException::new), + RECOVERABLE(SQLRecoverableException::new), + TIMEOUT(SQLTimeoutException::new), + SECURITY(SQLInvalidAuthorizationSpecException::new), + NOT_SUPPORTED(SQLFeatureNotSupportedException::new); + + public static SqlExceptionType fromRemoteFailureType(String type) { + switch (type) { + case "analysis_exception": + case "resource_not_found_exception": + case "verification_exception": + return DATA; + case "planning_exception": + case "mapping_exception": + return NOT_SUPPORTED; + case "parsing_exception": + return SYNTAX; + case "security_exception": + return SECURITY; + case "timeout_exception": + return TIMEOUT; + default: + return null; + } + } + + private final Function toException; + + SqlExceptionType(Function toException) { + this.toException = toException; + } + + SQLException asException(String message) { + if (message == null) { + throw new IllegalArgumentException("[message] cannot be null"); + } + return toException.apply(message); + } + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ObjectUtils.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ObjectUtils.java new file mode 100644 index 0000000000000..df924cdb37513 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ObjectUtils.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.util.Arrays; +import java.util.Map; +import java.util.function.Function; + +import static java.util.stream.Collectors.toMap; + +public abstract class ObjectUtils { + + public static boolean isEmpty(int[] array) { + return (array == null || array.length == 0); + } + + public static boolean isEmpty(byte[] array) { + return (array == null || array.length == 0); + } + + public static boolean isEmpty(Object[] array) { + return (array == null || array.length == 0); + } + + public static > Map mapEnum(Class clazz, Function mapper) { + return Arrays.stream(clazz.getEnumConstants()).collect(toMap(mapper, Function.identity())); + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ProxyConfig.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ProxyConfig.java new file mode 100644 index 0000000000000..ca311e5292d26 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/ProxyConfig.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.net.InetSocketAddress; +import java.net.Proxy; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.Properties; +import java.util.Set; + +class ProxyConfig { + + private static final String HTTP_PROXY = "proxy.http"; + private static final String HTTP_PROXY_DEFAULT = StringUtils.EMPTY; + private static final String SOCKS_PROXY = "proxy.socks"; + private static final String SOCKS_PROXY_DEFAULT = StringUtils.EMPTY; + + static final Set OPTION_NAMES = new LinkedHashSet<>(Arrays.asList(HTTP_PROXY, SOCKS_PROXY)); + + private final Proxy proxy; + + ProxyConfig(Properties settings) { + Proxy.Type type = null; + // try http first + Object[] address = host(settings.getProperty(HTTP_PROXY, HTTP_PROXY_DEFAULT), 80); + type = Proxy.Type.HTTP; + // nope, check socks + if (address == null) { + address = host(settings.getProperty(SOCKS_PROXY, SOCKS_PROXY_DEFAULT), 1080); + type = Proxy.Type.SOCKS; + } + proxy = address != null ? createProxy(type, address) : null; + } + + @SuppressForbidden(reason = "create the actual proxy") + private Proxy createProxy(Proxy.Type type, Object[] address) { + return new Proxy(type, new InetSocketAddress((String) address[0], (int) address[1])); + } + + boolean enabled() { + return proxy != null; + } + + Proxy proxy() { + return proxy; + } + + // returns hostname (string), port (int) + private static Object[] host(String address, int defaultPort) { + if (!StringUtils.hasText(address)) { + return null; + } + try { + URI uri = new URI(address); + Object[] results = { uri.getHost(), uri.getPort() > 0 ? uri.getPort() : defaultPort }; + return results; + } catch (URISyntaxException ex) { + throw new ClientException("Unrecognized address format " + address, ex); + } + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/RemoteFailure.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/RemoteFailure.java new file mode 100644 index 0000000000000..f9eccb4f157ef --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/RemoteFailure.java @@ -0,0 +1,337 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; + +import java.io.BufferedInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Reader; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; + +/** + * A failure that happened on the remote server. + */ +public class RemoteFailure { + /** + * The maximum number of bytes before we no longer include the raw response if + * there is a catastrophic error parsing the remote failure. The actual value + * was chosen because it is ten times larger then a "normal" elasticsearch + * failure but not so big that we'll consume a ton of memory on huge errors. + * It will produce huge error messages but the user might + * want all that because it is probably being thrown by + * their proxy. + */ + static final int MAX_RAW_RESPONSE = 512 * 1024; + + private static final JsonFactory JSON_FACTORY = new JsonFactory(); + static { + // Set up the factory similarly to how XContent does + JSON_FACTORY.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true); + JSON_FACTORY.configure(JsonParser.Feature.ALLOW_COMMENTS, true); + JSON_FACTORY.configure(JsonFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, true); + // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.core.json.UTF8JsonGenerator#close() method + JSON_FACTORY.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); + JSON_FACTORY.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, false); + // Don't close the stream because we might need to reset and reply it if there is an error. The caller closes the stream. + JSON_FACTORY.configure(JsonParser.Feature.AUTO_CLOSE_SOURCE, false); + } + + /** + * Parse a failure from the response. The stream is not closed when the parsing is complete. + * The caller must close it. + * @throws IOException if there is a catastrophic failure parsing the remote failure + */ + public static RemoteFailure parseFromResponse(InputStream stream) throws IOException { + // Mark so we can rewind to get the entire response in case we have to render an error. + stream = new BufferedInputStream(stream); + stream.mark(MAX_RAW_RESPONSE); + JsonParser parser = null; + try { + parser = JSON_FACTORY.createParser(stream); + return parseResponseTopLevel(parser); + } catch (JsonParseException e) { + throw new IOException(parseErrorMessage(e.getOriginalMessage(), stream, parser), e); + } catch (IOException e) { + throw new IOException(parseErrorMessage(e.getMessage(), stream, parser), e); + } finally { + if (parser != null) { + parser.close(); + } + } + } + + private final String type; + private final String reason; + private final String remoteTrace; + private final Map headers; + private final Map> metadata; + private final RemoteFailure cause; + + RemoteFailure(String type, + String reason, + String remoteTrace, + Map headers, + Map> metadata, + RemoteFailure cause) { + this.type = type; + this.reason = reason; + this.remoteTrace = remoteTrace; + this.headers = headers; + this.metadata = metadata; + this.cause = cause; + } + + public String type() { + return type; + } + + public String reason() { + return reason; + } + + /** + * Stack trace from Elasticsearch for the remote failure. Mostly just useful for debugging + * errors that happen to be bugs. + */ + public String remoteTrace() { + return remoteTrace; + } + + /** + * Headers sent by the remote failure. + */ + public Map headers() { + return headers; + } + + /** + * Metadata sent by the remote failure. + */ + public Map> metadata() { + return metadata; + } + + /** + * Cause of the remote failure. Mostly just useful for dbuegging errors that happen to be bugs. + */ + public RemoteFailure cause() { + return cause; + } + + private static RemoteFailure parseResponseTopLevel(JsonParser parser) throws IOException { + RemoteFailure exception = null; + + /* It'd be lovely to use the high level constructs that we have in core like ObjectParser + * but, alas, we aren't going to modularize those out any time soon. */ + JsonToken token = parser.nextToken(); + if (token != JsonToken.START_OBJECT) { + throw new IllegalArgumentException("Expected error to start with [START_OBJECT] but started with [" + token + + "][" + parser.getText() + "]"); + } + String fieldName = null; + while ((token = parser.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + fieldName = parser.getCurrentName(); + } else { + switch (fieldName) { + case "error": + if (token != JsonToken.START_OBJECT) { + throw new IOException("Expected [error] to be an object but was [" + token + "][" + parser.getText() + "]"); + } + exception = parseFailure(parser); + continue; + case "status": + if (token != JsonToken.VALUE_NUMBER_INT) { + throw new IOException("Expected [status] to be a string but was [" + token + "][" + parser.getText() + "]"); + } + // Intentionally ignored + continue; + default: + throw new IOException("Expected one of [error, status] but got [" + fieldName + "][" + parser.getText() + "]"); + } + } + } + if (exception == null) { + throw new IOException("Expected [error] but didn't see it."); + } + return exception; + } + + private static RemoteFailure parseFailure(JsonParser parser) throws IOException { + String type = null; + String reason = null; + String remoteTrace = null; + Map headers = emptyMap(); + RemoteFailure cause = null; + final Map> metadata = new LinkedHashMap<>(); + + JsonToken token; + String fieldName = null; + while ((token = parser.nextToken()) != JsonToken.END_OBJECT) { + if (token == JsonToken.FIELD_NAME) { + fieldName = parser.getCurrentName(); + } else { + switch (fieldName) { + case "caused_by": + if (token != JsonToken.START_OBJECT) { + throw new IOException("Expected [caused_by] to be an object but was [" + token + "][" + parser.getText() + "]"); + } + cause = parseFailure(parser); + break; + case "header": + if (token != JsonToken.START_OBJECT) { + throw new IOException("Expected [header] to be an object but was [" + token + "][" + parser.getText() + "]"); + } + headers = parseHeaders(parser); + break; + case "reason": + switch (token) { + case VALUE_STRING: + reason = parser.getText(); + break; + case VALUE_NULL: + break; + default: + throw new IOException("Expected [reason] to be a string but was [" + token + "][" + parser.getText() + "]"); + } + break; + case "root_cause": + if (token != JsonToken.START_ARRAY) { + throw new IOException("Expected [root_cause] to be an array but was [" + token + "][" + parser.getText() + "]"); + } + parser.skipChildren(); // Intentionally ignored + break; + case "stack_trace": + if (token != JsonToken.VALUE_STRING) { + throw new IOException("Expected [stack_trace] to be a string but was [" + token + "][" + parser.getText() + "]"); + } + remoteTrace = parser.getText(); + break; + case "type": + if (token != JsonToken.VALUE_STRING) { + throw new IOException("Expected [type] to be a string but was [" + token + "][" + parser.getText() + "]"); + } + type = parser.getText(); + break; + default: + metadata.putAll(parseMetadata(parser)); + } + } + } + if (type == null) { + throw new IOException("expected [type] but didn't see it"); + } + if (remoteTrace == null) { + throw new IOException("expected [stack_trace] cannot but didn't see it"); + } + return new RemoteFailure(type, reason, remoteTrace, headers, metadata, cause); + } + + private static Map parseHeaders(JsonParser parser) throws IOException { + Map headers = new HashMap<>(); + + JsonToken token; + while ((token = parser.nextToken()) != JsonToken.END_OBJECT) { + if (token != JsonToken.FIELD_NAME) { + throw new IOException("expected header name but was [" + token + "][" + parser.getText() + "]"); + } + String name = parser.getText(); + token = parser.nextToken(); + if (token != JsonToken.VALUE_STRING) { + throw new IOException("expected header value but was [" + token + "][" + parser.getText() + "]"); + } + String value = parser.getText(); + headers.put(name, value); + } + + return headers; + } + + private static Map> parseMetadata(final JsonParser parser) throws IOException { + final Map> metadata = new HashMap<>(); + final String currentFieldName = parser.getCurrentName(); + + JsonToken token = parser.currentToken(); + if (token == JsonToken.VALUE_STRING) { + metadata.put(currentFieldName, singletonList(parser.getText())); + + } else if (token == JsonToken.START_ARRAY) { + // Parse the array and add each item to the corresponding list of metadata. + // Arrays of objects are not supported yet and just ignored and skipped. + final List values = new ArrayList<>(); + while ((token = parser.nextToken()) != JsonToken.END_ARRAY) { + if (token ==JsonToken.VALUE_STRING) { + values.add(parser.getText()); + } else { + parser.skipChildren(); + } + } + if (values.size() > 0) { + if (metadata.containsKey(currentFieldName)) { + values.addAll(metadata.get(currentFieldName)); + } + metadata.put(currentFieldName, unmodifiableList(values)); + } + + } else { + // Any additional metadata object added by the metadataToXContent method is ignored + // and skipped, so that the parser does not fail on unknown fields. The parser only + // support metadata key-pairs and metadata arrays of values. + parser.skipChildren(); + } + return unmodifiableMap(metadata); + } + + /** + * Build an error message from a parse failure. + */ + private static String parseErrorMessage(String message, InputStream stream, JsonParser parser) { + String responseMessage; + try { + try { + stream.reset(); + } catch (IOException e) { + // So far as I know, this is always caused by the response being too large + throw new IOException("Response too large", e); + } + try (Reader reader = new InputStreamReader(stream, StandardCharsets.UTF_8)) { + StringBuilder builder = new StringBuilder(); + builder.append("Response:\n"); + char[] buf = new char[512]; + int read; + while ((read = reader.read(buf)) != -1) { + builder.append(buf, 0, read); + } + responseMessage = builder.toString(); + } + } catch (IOException replayException) { + responseMessage = "Attempted to include response but failed because [" + replayException.getMessage() + "]."; + } + String parserLocation = ""; + if (parser != null) { + parserLocation = " at [line " + parser.getTokenLocation().getLineNr() + + " col " + parser.getTokenLocation().getColumnNr() + "]"; + } + return "Can't parse error from Elasticsearch [" + message + "]" + parserLocation + ". " + responseMessage; + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/SslConfig.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/SslConfig.java new file mode 100644 index 0000000000000..35a1ebe3b96c6 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/SslConfig.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.Objects; +import java.util.Properties; +import java.util.Set; + +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocketFactory; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; + +public class SslConfig { + + public static final String SSL = "ssl"; + private static final String SSL_DEFAULT = "false"; + + public static final String SSL_PROTOCOL = "ssl.protocol"; + private static final String SSL_PROTOCOL_DEFAULT = "TLS"; // SSL alternative + + public static final String SSL_KEYSTORE_LOCATION = "ssl.keystore.location"; + private static final String SSL_KEYSTORE_LOCATION_DEFAULT = ""; + + public static final String SSL_KEYSTORE_PASS = "ssl.keystore.pass"; + private static final String SSL_KEYSTORE_PASS_DEFAULT = ""; + + public static final String SSL_KEYSTORE_TYPE = "ssl.keystore.type"; + private static final String SSL_KEYSTORE_TYPE_DEFAULT = "JKS"; // PCKS12 + + public static final String SSL_TRUSTSTORE_LOCATION = "ssl.truststore.location"; + private static final String SSL_TRUSTSTORE_LOCATION_DEFAULT = ""; + + public static final String SSL_TRUSTSTORE_PASS = "ssl.truststore.pass"; + private static final String SSL_TRUSTSTORE_PASS_DEFAULT = ""; + + public static final String SSL_TRUSTSTORE_TYPE = "ssl.truststore.type"; + private static final String SSL_TRUSTSTORE_TYPE_DEFAULT = "JKS"; + + static final Set OPTION_NAMES = new LinkedHashSet<>(Arrays.asList(SSL, SSL_PROTOCOL, + SSL_KEYSTORE_LOCATION, SSL_KEYSTORE_PASS, SSL_KEYSTORE_TYPE, + SSL_TRUSTSTORE_LOCATION, SSL_TRUSTSTORE_PASS, SSL_TRUSTSTORE_TYPE)); + + private final boolean enabled; + private final String protocol, keystoreLocation, keystorePass, keystoreType; + private final String truststoreLocation, truststorePass, truststoreType; + + private final SSLContext sslContext; + + SslConfig(Properties settings) { + enabled = StringUtils.parseBoolean(settings.getProperty(SSL, SSL_DEFAULT)); + protocol = settings.getProperty(SSL_PROTOCOL, SSL_PROTOCOL_DEFAULT); + keystoreLocation = settings.getProperty(SSL_KEYSTORE_LOCATION, SSL_KEYSTORE_LOCATION_DEFAULT); + keystorePass = settings.getProperty(SSL_KEYSTORE_PASS, SSL_KEYSTORE_PASS_DEFAULT); + keystoreType = settings.getProperty(SSL_KEYSTORE_TYPE, SSL_KEYSTORE_TYPE_DEFAULT); + truststoreLocation = settings.getProperty(SSL_TRUSTSTORE_LOCATION, SSL_TRUSTSTORE_LOCATION_DEFAULT); + truststorePass = settings.getProperty(SSL_TRUSTSTORE_PASS, SSL_TRUSTSTORE_PASS_DEFAULT); + truststoreType = settings.getProperty(SSL_TRUSTSTORE_TYPE, SSL_TRUSTSTORE_TYPE_DEFAULT); + + sslContext = enabled ? createSSLContext() : null; + } + + // ssl + boolean isEnabled() { + return enabled; + } + + SSLSocketFactory sslSocketFactory() { + return sslContext.getSocketFactory(); + } + + private SSLContext createSSLContext() { + SSLContext ctx; + try { + ctx = SSLContext.getInstance(protocol); + ctx.init(loadKeyManagers(), loadTrustManagers(), null); + } catch (Exception ex) { + throw new ClientException("Failed to initialize SSL - " + ex.getMessage(), ex); + } + + return ctx; + } + + private KeyManager[] loadKeyManagers() throws GeneralSecurityException, IOException { + if (!StringUtils.hasText(keystoreLocation)) { + return null; + } + + char[] pass = (StringUtils.hasText(keystorePass) ? keystorePass.trim().toCharArray() : null); + KeyStore keyStore = loadKeyStore(keystoreLocation, pass, keystoreType); + KeyManagerFactory kmFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmFactory.init(keyStore, pass); + return kmFactory.getKeyManagers(); + } + + + private KeyStore loadKeyStore(String location, char[] pass, String keyStoreType) throws GeneralSecurityException, IOException { + KeyStore keyStore = KeyStore.getInstance(keyStoreType); + Path path = Paths.get(location); + + if (!Files.exists(path)) { + throw new ClientException( + "Expected to find keystore file at [" + location + "] but was unable to. Make sure you have specified a valid URI."); + } + + try (InputStream in = Files.newInputStream(Paths.get(location), StandardOpenOption.READ)) { + keyStore.load(in, pass); + } catch (Exception ex) { + throw new ClientException("Cannot open keystore [" + location + "] - " + ex.getMessage(), ex); + } finally { + + } + return keyStore; + } + + private TrustManager[] loadTrustManagers() throws GeneralSecurityException, IOException { + KeyStore keyStore = null; + + if (StringUtils.hasText(truststoreLocation)) { + char[] pass = (StringUtils.hasText(truststorePass) ? truststorePass.trim().toCharArray() : null); + keyStore = loadKeyStore(truststoreLocation, pass, truststoreType); + } + + TrustManagerFactory tmFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmFactory.init(keyStore); + return tmFactory.getTrustManagers(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + SslConfig other = (SslConfig) obj; + return Objects.equals(enabled, other.enabled) + && Objects.equals(protocol, other.protocol) + && Objects.equals(keystoreLocation, other.keystoreLocation) + && Objects.equals(keystorePass, other.keystorePass) + && Objects.equals(keystoreType, other.keystoreType) + && Objects.equals(truststoreLocation, other.truststoreLocation) + && Objects.equals(truststorePass, other.truststorePass) + && Objects.equals(truststoreType, other.truststoreType); + } + + public int hashCode() { + return getClass().hashCode(); + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/StringUtils.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/StringUtils.java new file mode 100644 index 0000000000000..192c217be513c --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/StringUtils.java @@ -0,0 +1,308 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.StringTokenizer; + +public abstract class StringUtils { + public static final String EMPTY = ""; + public static final String SLASH = "/"; + public static final String PATH_TOP = ".."; + public static final String PATH_CURRENT = "."; + public static final String DEFAULT_DELIMITER = ","; + + public static String nullAsEmpty(String string) { + return string == null ? EMPTY : string; + } + + public static boolean hasText(CharSequence sequence) { + if (!hasLength(sequence)) { + return false; + } + int length = sequence.length(); + for (int i = 0; i < length; i++) { + if (!Character.isWhitespace(sequence.charAt(i))) { + return true; + } + } + return false; + } + + public static boolean hasLength(CharSequence sequence) { + return (sequence != null && sequence.length() > 0); + } + + public static boolean isUpperCase(CharSequence sequence) { + for (int i = 0; i < sequence.length(); i++) { + if (Character.isLetter(sequence.charAt(i)) && !Character.isUpperCase(sequence.charAt(i))) { + return false; + } + } + return true; + } + + public static String[] splitToIndexAndType(String pattern) { + List tokens = tokenize(pattern, "."); + + String[] results = new String[2]; + if (tokens.size() == 2) { + results[0] = tokens.get(0); + results[1] = tokens.get(1); + } + else { + results[0] = nullAsEmpty(pattern); + results[1] = EMPTY; + } + + return results; + } + + public static List tokenize(String string) { + return tokenize(string, DEFAULT_DELIMITER); + } + + public static List tokenize(String string, String delimiters) { + return tokenize(string, delimiters, true, true); + } + + public static List tokenize(String string, String delimiters, boolean trimTokens, boolean ignoreEmptyTokens) { + if (!hasText(string)) { + return Collections.emptyList(); + } + StringTokenizer st = new StringTokenizer(string, delimiters); + List tokens = new ArrayList(); + while (st.hasMoreTokens()) { + String token = st.nextToken(); + if (trimTokens) { + token = token.trim(); + } + if (!ignoreEmptyTokens || token.length() > 0) { + tokens.add(token); + } + } + return tokens; + } + + public static String concatenate(Collection list) { + return concatenate(list, DEFAULT_DELIMITER); + } + + public static String concatenate(Collection list, String delimiter) { + if (list == null || list.isEmpty()) { + return EMPTY; + } + if (delimiter == null) { + delimiter = EMPTY; + } + StringBuilder sb = new StringBuilder(); + + for (Object object : list) { + sb.append(object.toString()); + sb.append(delimiter); + } + + sb.setLength(sb.length() - delimiter.length()); + return sb.toString(); + } + + public static String normalize(String path) { + if (path == null) { + return null; + } + String pathToUse = path.replace("\\", SLASH); + + int prefixIndex = pathToUse.indexOf(":"); + String prefix = ""; + if (prefixIndex != -1) { + prefix = pathToUse.substring(0, prefixIndex + 1); + if (prefix.contains(SLASH)) { + prefix = ""; + } + else { + pathToUse = pathToUse.substring(prefixIndex + 1); + } + } + if (pathToUse.startsWith(SLASH)) { + prefix = prefix + SLASH; + pathToUse = pathToUse.substring(1); + } + + List pathList = tokenize(pathToUse, SLASH); + List pathTokens = new LinkedList(); + int tops = 0; + + for (int i = pathList.size() - 1; i >= 0; i--) { + String element = pathList.get(i); + if (PATH_CURRENT.equals(element)) { + // current folder, ignore it + } + else if (PATH_TOP.equals(element)) { + // top folder, skip previous element + tops++; + } + else { + if (tops > 0) { + // should it be skipped? + tops--; + } + else { + pathTokens.add(0, element); + } + } + } + + for (int i = 0; i < tops; i++) { + pathTokens.add(0, PATH_TOP); + } + + return prefix + concatenate(pathTokens, SLASH); + } + + public static String asUTFString(byte[] content) { + return asUTFString(content, 0, content.length); + } + + public static String asUTFString(byte[] content, int offset, int length) { + return (content == null || length == 0 ? EMPTY : new String(content, offset, length, StandardCharsets.UTF_8)); + } + + public static byte[] toUTF(String string) { + return string.getBytes(StandardCharsets.UTF_8); + } + + // Based on "Algorithms on Strings, Trees and Sequences by Dan Gusfield". + // returns -1 if the two strings are within the given threshold of each other, -1 otherwise + private static int levenshteinDistance(CharSequence one, CharSequence another, int threshold) { + int n = one.length(); + int m = another.length(); + + // if one string is empty, the edit distance is necessarily the length of the other + if (n == 0) { + return m <= threshold ? m : -1; + } + else if (m == 0) { + return n <= threshold ? n : -1; + } + + if (n > m) { + // swap the two strings to consume less memory + final CharSequence tmp = one; + one = another; + another = tmp; + n = m; + m = another.length(); + } + + int p[] = new int[n + 1]; // 'previous' cost array, horizontally + int d[] = new int[n + 1]; // cost array, horizontally + int _d[]; // placeholder to assist in swapping p and d + + // fill in starting table values + final int boundary = Math.min(n, threshold) + 1; + for (int i = 0; i < boundary; i++) { + p[i] = i; + } + + // these fills ensure that the value above the rightmost entry of our + // stripe will be ignored in following loop iterations + Arrays.fill(p, boundary, p.length, Integer.MAX_VALUE); + Arrays.fill(d, Integer.MAX_VALUE); + + for (int j = 1; j <= m; j++) { + final char t_j = another.charAt(j - 1); + d[0] = j; + + // compute stripe indices, constrain to array size + final int min = Math.max(1, j - threshold); + final int max = (j > Integer.MAX_VALUE - threshold) ? n : Math.min(n, j + threshold); + + // the stripe may lead off of the table if s and t are of different sizes + if (min > max) { + return -1; + } + + // ignore entry left of leftmost + if (min > 1) { + d[min - 1] = Integer.MAX_VALUE; + } + + // iterates through [min, max] in s + for (int i = min; i <= max; i++) { + if (one.charAt(i - 1) == t_j) { + // diagonally left and up + d[i] = p[i - 1]; + } + else { + // 1 + minimum of cell to the left, to the top, diagonally left and up + d[i] = 1 + Math.min(Math.min(d[i - 1], p[i]), p[i - 1]); + } + } + + // copy current distance counts to 'previous row' distance counts + _d = p; + p = d; + d = _d; + } + + // if p[n] is greater than the threshold, there's no guarantee on it being the correct + // distance + if (p[n] <= threshold) { + return p[n]; + } + return -1; + } + + public static List findSimiliar(CharSequence match, Collection potential) { + List list = new ArrayList(3); + + // 1 switches or 1 extra char + int maxDistance = 5; + + for (String string : potential) { + int dist = levenshteinDistance(match, string, maxDistance); + if (dist >= 0) { + if (dist < maxDistance) { + maxDistance = dist; + list.clear(); + list.add(string); + } + else if (dist == maxDistance) { + list.add(string); + } + } + } + + return list; + } + + public static boolean parseBoolean(String input) { + switch(input) { + case "true": return true; + case "false": return false; + default: throw new IllegalArgumentException("must be [true] or [false]"); + } + } + + public static String asHexString(byte[] content, int offset, int length) { + StringBuilder buf = new StringBuilder(); + for (int i = offset; i < length; i++) { + String hex = Integer.toHexString(0xFF & content[i]); + if (hex.length() == 1) { + buf.append('0'); + } + buf.append(hex); + } + return buf.toString(); + } + +} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/SuppressForbidden.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/SuppressForbidden.java new file mode 100644 index 0000000000000..52b864edff4bc --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/SuppressForbidden.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation to suppress forbidden-apis errors inside a whole class, a method, or a field. + */ +@Retention(RetentionPolicy.CLASS) +@Target({ ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE }) +public @interface SuppressForbidden { + String reason(); +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/UriUtils.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/UriUtils.java new file mode 100644 index 0000000000000..f8c2e73e6a0c0 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/UriUtils.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.net.URI; +import java.net.URISyntaxException; + +public final class UriUtils { + private UriUtils() { + + } + + /** + * Parses the URL provided by the user and + */ + public static URI parseURI(String connectionString, URI defaultURI) { + final URI uri = parseWithNoScheme(connectionString); + final String path = "".equals(uri.getPath()) ? defaultURI.getPath() : uri.getPath(); + final String query = uri.getQuery() == null ? defaultURI.getQuery() : uri.getQuery(); + final int port = uri.getPort() < 0 ? defaultURI.getPort() : uri.getPort(); + try { + return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), port, path, query, defaultURI.getFragment()); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("Invalid connection configuration [" + connectionString + "]: " + e.getMessage(), e); + } + } + + private static URI parseWithNoScheme(String connectionString) { + URI uri; + // check if URI can be parsed correctly without adding scheme + // if the connection string is in format host:port or just host, the host is going to be null + // if the connection string contains IPv6 localhost [::1] the parsing will fail + URISyntaxException firstException = null; + try { + uri = new URI(connectionString); + if (uri.getHost() == null || uri.getScheme() == null) { + uri = null; + } + } catch (URISyntaxException e) { + firstException = e; + uri = null; + } + + if (uri == null) { + // We couldn't parse URI without adding scheme, let's try again with scheme this time + try { + return new URI("http://" + connectionString); + } catch (URISyntaxException e) { + IllegalArgumentException ie = + new IllegalArgumentException("Invalid connection configuration [" + connectionString + "]: " + e.getMessage(), e); + if (firstException != null) { + ie.addSuppressed(firstException); + } + throw ie; + } + } else { + // We managed to parse URI and all necessary pieces are present, let's make sure the scheme is correct + if ("http".equals(uri.getScheme()) == false && "https".equals(uri.getScheme()) == false) { + throw new IllegalArgumentException( + "Invalid connection configuration [" + connectionString + "]: Only http and https protocols are supported"); + } + return uri; + } + } + + /** + * Removes the query part of the URI + */ + public static URI removeQuery(URI uri, String connectionString, URI defaultURI) { + try { + return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(), uri.getPort(), uri.getPath(), null, defaultURI.getFragment()); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("Invalid connection configuration [" + connectionString + "]: " + e.getMessage(), e); + } + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/Version.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/Version.java new file mode 100644 index 0000000000000..bfa9e0a3cb492 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/shared/Version.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import java.io.IOException; +import java.net.URL; +import java.util.Collections; +import java.util.Enumeration; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import java.util.jar.JarInputStream; +import java.util.jar.Manifest; + +public class Version { + + public static final Version CURRENT; + public final String version; + public final String hash; + public final byte major; + public final byte minor; + public final byte revision; + + private Version(String version, String hash, byte... parts) { + this.version = version; + this.hash = hash; + this.major = parts[0]; + this.minor = parts[1]; + this.revision = parts[2]; + } + + public static Version fromString(String version) { + return new Version(version, "Unknown", from(version)); + } + + static byte[] from(String ver) { + String[] parts = ver.split("[.-]"); + if (parts.length == 3 || parts.length == 4) { + return new byte[] { Byte.parseByte(parts[0]), Byte.parseByte(parts[1]), Byte.parseByte(parts[2]) }; + } + else { + throw new IllegalArgumentException("Invalid version " + ver); + } + } + + static { + // check classpath + String target = Version.class.getName().replace(".", "/").concat(".class"); + Enumeration res; + + try { + res = Version.class.getClassLoader().getResources(target); + } catch (IOException ex) { + throw new IllegalArgumentException("Cannot detect Elasticsearch JDBC jar; it typically indicates a deployment issue..."); + } + + if (res != null) { + List urls = Collections.list(res); + Set normalized = new LinkedHashSet<>(); + + for (URL url : urls) { + normalized.add(StringUtils.normalize(url.toString())); + } + + int foundJars = 0; + if (normalized.size() > 1) { + StringBuilder sb = new StringBuilder( + "Multiple Elasticsearch JDBC versions detected in the classpath; please use only one\n"); + for (String s : normalized) { + if (s.contains("jar:")) { + foundJars++; + sb.append(s.replace("!/" + target, "")); + sb.append("\n"); + } + } + if (foundJars > 1) { + throw new IllegalArgumentException(sb.toString()); + } + } + } + + // This is similar to how Elasticsearch's Build class digs up its build information. + // Since version info is not critical, the parsing is lenient + URL url = Version.class.getProtectionDomain().getCodeSource().getLocation(); + String urlStr = url.toString(); + + byte maj = 0, min = 0, rev = 0; + String ver = "Unknown"; + String hash = ver; + + if (urlStr.endsWith(".jar")) { + try (JarInputStream jar = new JarInputStream(url.openStream())) { + Manifest manifest = jar.getManifest(); + hash = manifest.getMainAttributes().getValue("Change"); + ver = manifest.getMainAttributes().getValue("X-Compile-Elasticsearch-Version"); + byte[] vers = from(ver); + maj = vers[0]; + min = vers[1]; + rev = vers[2]; + } catch (Exception ex) { + throw new IllegalArgumentException("Detected Elasticsearch JDBC jar but cannot retrieve its version", ex); + } + } + CURRENT = new Version(ver, hash, maj, min, rev); + } + + @Override + public String toString() { + return "v" + version + " [" + hash + "]"; + } + + public static int jdbcMajorVersion() { + return 4; + } + + public static int jdbcMinorVersion() { + return 2; + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/RemoteFailureTests.java b/x-pack/plugin/sql/sql-shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/RemoteFailureTests.java new file mode 100644 index 0000000000000..ab0bc40f3ba72 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/RemoteFailureTests.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.Locale; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.startsWith; + +public class RemoteFailureTests extends ESTestCase { + public void testParseBasic() throws IOException { + RemoteFailure failure = parse("basic.json"); + assertEquals("illegal_argument_exception", failure.type()); + assertEquals("[sql/query] unknown field [test], parser not found", failure.reason()); + assertThat(failure.remoteTrace(), + containsString("at org.elasticsearch.common.xcontent.ObjectParser.getParser(ObjectParser.java:346)")); + assertNull(failure.cause()); + assertEquals(emptyMap(), failure.headers()); + } + + public void testParseNested() throws IOException { + RemoteFailure failure = parse("nested.json"); + assertEquals("parsing_exception", failure.type()); + assertEquals("line 1:1: no viable alternative at input 'test'", failure.reason()); + assertThat(failure.remoteTrace(), + containsString("at org.elasticsearch.xpack.sql.parser.SqlParser$1.syntaxError(SqlParser.java:151)")); + assertNotNull(failure.cause()); + + failure = failure.cause(); + assertEquals("no_viable_alt_exception", failure.type()); + assertEquals(null, failure.reason()); + assertThat(failure.remoteTrace(), + containsString("at org.antlr.v4.runtime.atn.ParserATNSimulator.noViableAlt(ParserATNSimulator.java:1886)")); + assertNull(failure.cause()); + assertEquals(emptyMap(), failure.headers()); + } + + public void testParseMissingAuth() throws IOException { + RemoteFailure failure = parse("missing_auth.json"); + assertEquals("security_exception", failure.type()); + assertEquals("missing authentication token for REST request [/?pretty&error_trace]", failure.reason()); + assertThat(failure.remoteTrace(), + containsString("DefaultAuthenticationFailureHandler.missingToken")); + assertNull(failure.cause()); + assertEquals(singletonMap("WWW-Authenticate", "Basic realm=\"security\" charset=\"UTF-8\""), + failure.headers()); + } + + public void testNoError() { + IOException e = expectThrows(IOException.class, () -> parse("no_error.json")); + assertEquals( + "Can't parse error from Elasticsearch [Expected [error] but didn't see it.] at [line 1 col 2]. Response:\n{}", + e.getMessage()); + } + + public void testBogusError() { + IOException e = expectThrows(IOException.class, () -> parse("bogus_error.json")); + assertEquals( + "Can't parse error from Elasticsearch [Expected [error] to be an object but was [VALUE_STRING][bogus]] " + + "at [line 1 col 12]. Response:\n" + + "{ \"error\": \"bogus\" }", + e.getMessage()); + } + + public void testNoStack() { + IOException e = expectThrows(IOException.class, () -> parse("no_stack.json")); + assertThat(e.getMessage(), + startsWith("Can't parse error from Elasticsearch [expected [stack_trace] cannot but " + + "didn't see it] at [line 5 col 3]. Response:\n{")); + } + + public void testNoType() { + IOException e = expectThrows(IOException.class, () -> parse("no_type.json")); + assertThat(e.getMessage(), + startsWith("Can't parse error from Elasticsearch [expected [type] but didn't see it] at [line 5 col 3]. Response:\n{")); + } + + public void testInvalidJson() { + IOException e = expectThrows(IOException.class, () -> parse("invalid_json.txt")); + assertEquals( + "Can't parse error from Elasticsearch [Unrecognized token 'I': was expecting 'null', 'true', 'false' or NaN] " + + "at [line 1 col 1]. Response:\n" + + "I'm not json at all", + e.getMessage()); + } + + public void testExceptionBuildingParser() { + IOException e = expectThrows(IOException.class, () -> RemoteFailure.parseFromResponse(new InputStream() { + @Override + public int read() throws IOException { + throw new IOException("Testing error"); + } + })); + assertEquals( + "Can't parse error from Elasticsearch [Testing error]. Attempted to include response but failed because [Testing error].", + e.getMessage()); + } + + public void testTotalGarbage() { + IOException e = expectThrows(IOException.class, () -> + RemoteFailure.parseFromResponse(new BytesArray(new byte[] { + (byte) 0xEF, (byte) 0xBB, (byte) 0xBF, // The UTF-8 BOM + (byte) 0xFF // An invalid UTF-8 character + }).streamInput())); + assertThat(e.getMessage(), + startsWith("Can't parse error from Elasticsearch [Unrecognized token 'ÿ': " + + "was expecting ('true', 'false' or 'null')] at [line 1 col 1]. Response:\n")); + } + + public void testTooBig() { + StringBuilder tooBig = new StringBuilder(RemoteFailure.MAX_RAW_RESPONSE); + tooBig.append("{\n"); + tooBig.append("\"error\" : {\n"); + tooBig.append(" \"type\" : \"illegal_argument_exception\",\n"); + tooBig.append(" \"reason\" : \"something\",\n"); + tooBig.append(" \"header\" : {\n"); + int i = 0; + while (tooBig.length() < RemoteFailure.MAX_RAW_RESPONSE) { + tooBig.append(" \"").append(String.format(Locale.ROOT, "%04d", i++)) + .append("\" : \"lots and lots and lots and lots and lots of words\",\n"); + } + tooBig.append(" \"end\" : \"lots and lots and lots and lots and lots of words\"\n"); + tooBig.append(" }\n"); + tooBig.append("}\n"); + IOException e = expectThrows(IOException.class, () -> + RemoteFailure.parseFromResponse(new BytesArray(tooBig.toString()).streamInput())); + assertEquals( + "Can't parse error from Elasticsearch [expected [stack_trace] cannot but didn't see it] " + + "at [line 7951 col 1]. Attempted to include response but failed because [Response too large].", + e.getMessage()); + } + + public void testFailureWithMetadata() throws IOException { + final StringBuilder json = new StringBuilder(); + json.append("{"); + json.append("\"error\":{"); + json.append(" \"root_cause\":[],"); + json.append(" \"type\":\"search_phase_execution_exception\","); + json.append(" \"reason\":\"all shards failed\","); + json.append(" \"phase\":\"query\","); + json.append(" \"grouped\":true,"); + json.append(" \"failed_shards\":[],"); + json.append(" \"stack_trace\":\"Failed to execute phase [query], all shards failed at...\""); + json.append(" },"); + json.append(" \"status\":503"); + json.append("}"); + + RemoteFailure failure = RemoteFailure.parseFromResponse(new BytesArray(json.toString()).streamInput()); + assertEquals("search_phase_execution_exception", failure.type()); + assertEquals("all shards failed", failure.reason()); + assertThat(failure.remoteTrace(), containsString("Failed to execute phase [query], all shards failed")); + assertNull(failure.cause()); + assertEquals(emptyMap(), failure.headers()); + assertNotNull(failure.metadata()); + assertEquals(1, failure.metadata().size()); + assertEquals(singletonList("query"), failure.metadata().get("phase")); + } + + public void testFailureWithMetadataAndRootCause() throws IOException { + final StringBuilder json = new StringBuilder(); + json.append("{"); + json.append("\"error\":{"); + json.append(" \"caused_by\":{"); + json.append(" \"type\":\"invalid_index_name_exception\","); + json.append(" \"reason\":\"Invalid index name [_root], must not start with '_'.\","); + json.append(" \"index_uuid\":\"_na_root\","); + json.append(" \"index\":[\"_root\",\"_foo\"],"); + json.append(" \"stack_trace\":\"[_root] InvalidIndexNameException[Invalid index name [_root], must not start with '_'.]\""); + json.append(" },"); + json.append(" \"type\":\"invalid_index_name_exception\","); + json.append(" \"reason\":\"Invalid index name [_foo], must not start with '_'.\","); + json.append(" \"index_uuid\":\"_na_foo\","); + json.append(" \"index\":[\"_foo\"],"); + json.append(" \"stack_trace\":\"[_foo] InvalidIndexNameException[Invalid index name [_foo], must not start with '_'.]\""); + json.append(" },"); + json.append(" \"status\":400"); + json.append("}"); + + RemoteFailure failure = RemoteFailure.parseFromResponse(new BytesArray(json.toString()).streamInput()); + assertEquals("invalid_index_name_exception", failure.type()); + assertEquals("Invalid index name [_foo], must not start with '_'.", failure.reason()); + assertThat(failure.remoteTrace(), + containsString("[_foo] InvalidIndexNameException[Invalid index name [_foo], must not start with '_'.]")); + assertEquals(emptyMap(), failure.headers()); + assertNotNull(failure.metadata()); + assertEquals(2, failure.metadata().size()); + assertEquals(singletonList("_na_foo"), failure.metadata().get("index_uuid")); + assertEquals(singletonList("_foo"), failure.metadata().get("index")); + + RemoteFailure cause = failure.cause(); + assertEquals("invalid_index_name_exception", cause.type()); + assertEquals("Invalid index name [_root], must not start with '_'.", cause.reason()); + assertThat(cause.remoteTrace(), + containsString("[_root] InvalidIndexNameException[Invalid index name [_root], must not start with '_'.]")); + assertEquals(emptyMap(), failure.headers()); + assertNotNull(cause.metadata()); + assertEquals(2, cause.metadata().size()); + assertEquals(singletonList("_na_root"), cause.metadata().get("index_uuid")); + assertEquals(Arrays.asList("_root", "_foo"), cause.metadata().get("index")); + } + + private RemoteFailure parse(String fileName) throws IOException { + try (InputStream in = Files.newInputStream(getDataPath("/remote_failure/" + fileName))) { + return RemoteFailure.parseFromResponse(in); + } + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/StringUtilsTests.java b/x-pack/plugin/sql/sql-shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/StringUtilsTests.java new file mode 100644 index 0000000000000..b758d361ab978 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/StringUtilsTests.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.sql.client.shared.StringUtils.nullAsEmpty; + +public class StringUtilsTests extends ESTestCase { + public void testNullAsEmpty() { + assertEquals("", nullAsEmpty(null)); + assertEquals("", nullAsEmpty("")); + String rando = randomRealisticUnicodeOfCodepointLength(5); + assertEquals(rando, nullAsEmpty(rando)); + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/UriUtilsTests.java b/x-pack/plugin/sql/sql-shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/UriUtilsTests.java new file mode 100644 index 0000000000000..f75b20d0f0d3f --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/UriUtilsTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import org.elasticsearch.test.ESTestCase; + +import java.net.URI; + +import static org.elasticsearch.xpack.sql.client.shared.UriUtils.parseURI; +import static org.elasticsearch.xpack.sql.client.shared.UriUtils.removeQuery; + +public class UriUtilsTests extends ESTestCase { + + public static URI DEFAULT_URI = URI.create("http://localhost:9200/"); + + public void testHostAndPort() throws Exception { + assertEquals(URI.create("http://server:9200/"), parseURI("server:9200", DEFAULT_URI)); + } + + public void testJustHost() throws Exception { + assertEquals(URI.create("http://server:9200/"), parseURI("server", DEFAULT_URI)); + } + + public void testHttpWithPort() throws Exception { + assertEquals(URI.create("http://server:9201/"), parseURI("http://server:9201", DEFAULT_URI)); + } + + public void testHttpsWithPort() throws Exception { + assertEquals(URI.create("https://server:9201/"), parseURI("https://server:9201", DEFAULT_URI)); + } + + public void testHttpNoPort() throws Exception { + assertEquals(URI.create("https://server:9200/"), parseURI("https://server", DEFAULT_URI)); + } + + public void testLocalhostV6() throws Exception { + assertEquals(URI.create("http://[::1]:51082/"), parseURI("[::1]:51082", DEFAULT_URI)); + } + + public void testHttpsWithUser() throws Exception { + assertEquals(URI.create("https://user@server:9200/"), parseURI("https://user@server", DEFAULT_URI)); + } + + public void testUserPassHost() throws Exception { + assertEquals(URI.create("http://user:password@server:9200/"), parseURI("user:password@server", DEFAULT_URI)); + } + + public void testHttpPath() throws Exception { + assertEquals(URI.create("https://server:9201/some_path"), parseURI("https://server:9201/some_path", DEFAULT_URI)); + } + + public void testHttpQuery() throws Exception { + assertEquals(URI.create("https://server:9201/?query"), parseURI("https://server:9201/?query", DEFAULT_URI)); + } + + public void testUnsupportedProtocol() throws Exception { + assertEquals( + "Invalid connection configuration [ftp://server:9201/]: Only http and https protocols are supported", + expectThrows(IllegalArgumentException.class, () -> parseURI("ftp://server:9201/", DEFAULT_URI)).getMessage() + ); + } + + public void testMalformed() throws Exception { + assertEquals( + "Invalid connection configuration []: Expected authority at index 7: http://", + expectThrows(IllegalArgumentException.class, () -> parseURI("", DEFAULT_URI)).getMessage() + ); + } + + public void testRemoveQuery() throws Exception { + assertEquals(URI.create("http://server:9100"), + removeQuery(URI.create("http://server:9100?query"), "http://server:9100?query", DEFAULT_URI)); + } + + public void testRemoveQueryTrailingSlash() throws Exception { + assertEquals(URI.create("http://server:9100/"), + removeQuery(URI.create("http://server:9100/?query"), "http://server:9100/?query", DEFAULT_URI)); + } + + public void testRemoveQueryNoQuery() throws Exception { + assertEquals(URI.create("http://server:9100"), + removeQuery(URI.create("http://server:9100"), "http://server:9100", DEFAULT_URI)); + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/VersionTests.java b/x-pack/plugin/sql/sql-shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/VersionTests.java new file mode 100644 index 0000000000000..9eabed07931a0 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/java/org/elasticsearch/xpack/sql/client/shared/VersionTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.client.shared; + +import org.elasticsearch.test.ESTestCase; + +public class VersionTests extends ESTestCase { + public void test70Version() { + byte[] ver = Version.from("7.0.0-alpha"); + assertEquals(7, ver[0]); + assertEquals(0, ver[1]); + assertEquals(0, ver[2]); + } + + public void test712Version() { + byte[] ver = Version.from("7.1.2"); + assertEquals(7, ver[0]); + assertEquals(1, ver[1]); + assertEquals(2, ver[2]); + } + + public void testCurrent() { + Version ver = Version.fromString(org.elasticsearch.Version.CURRENT.toString()); + assertEquals(org.elasticsearch.Version.CURRENT.major, ver.major); + assertEquals(org.elasticsearch.Version.CURRENT.minor, ver.minor); + assertEquals(org.elasticsearch.Version.CURRENT.revision, ver.revision); + } + + public void testFromString() { + Version ver = Version.fromString("1.2.3"); + assertEquals(1, ver.major); + assertEquals(2, ver.minor); + assertEquals(3, ver.revision); + assertEquals("Unknown", ver.hash); + assertEquals("1.2.3", ver.version); + } + + public void testInvalidVersion() { + IllegalArgumentException err = expectThrows(IllegalArgumentException.class, () -> Version.from("7.1")); + assertEquals("Invalid version 7.1", err.getMessage()); + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/basic.json b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/basic.json new file mode 100644 index 0000000000000..2e2e157fa0b15 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/basic.json @@ -0,0 +1,15 @@ +{ + "error" : { + "root_cause" : [ + { + "type" : "illegal_argument_exception", + "reason" : "[sql/query] unknown field [test], parser not found", + "stack_trace" : "[[sql/query] unknown field [test], parser not found]; nested: IllegalArgumentException[[sql/query] unknown field [test], parser not found];\n\tat org.elasticsearch.ElasticsearchException.guessRootCauses(ElasticsearchException.java:618)\n\tat org.elasticsearch.ElasticsearchException.generateFailureXContent(ElasticsearchException.java:563)\n\tat org.elasticsearch.rest.BytesRestResponse.build(BytesRestResponse.java:138)\n\tat org.elasticsearch.rest.BytesRestResponse.(BytesRestResponse.java:96)\n\tat org.elasticsearch.rest.BytesRestResponse.(BytesRestResponse.java:91)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:243)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:72)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: java.lang.IllegalArgumentException: [sql/query] unknown field [test], parser not found\n\tat org.elasticsearch.common.xcontent.ObjectParser.getParser(ObjectParser.java:346)\n\tat org.elasticsearch.common.xcontent.ObjectParser.parse(ObjectParser.java:159)\n\tat org.elasticsearch.common.xcontent.ObjectParser.apply(ObjectParser.java:183)\n\tat org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction.prepareRequest(RestSqlAction.java:47)\n\tat org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:80)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\t... 45 more\n" + } + ], + "type" : "illegal_argument_exception", + "reason" : "[sql/query] unknown field [test], parser not found", + "stack_trace" : "java.lang.IllegalArgumentException: [sql/query] unknown field [test], parser not found\n\tat org.elasticsearch.common.xcontent.ObjectParser.getParser(ObjectParser.java:346)\n\tat org.elasticsearch.common.xcontent.ObjectParser.parse(ObjectParser.java:159)\n\tat org.elasticsearch.common.xcontent.ObjectParser.apply(ObjectParser.java:183)\n\tat org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction.prepareRequest(RestSqlAction.java:47)\n\tat org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:80)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:72)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\n" + }, + "status" : 400 +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/bogus_error.json b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/bogus_error.json new file mode 100644 index 0000000000000..f79361cec1c95 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/bogus_error.json @@ -0,0 +1 @@ +{ "error": "bogus" } \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/invalid_json.txt b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/invalid_json.txt new file mode 100644 index 0000000000000..e7da6bcf1ab0b --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/invalid_json.txt @@ -0,0 +1 @@ +I'm not json at all \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/missing_auth.json b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/missing_auth.json new file mode 100644 index 0000000000000..3d2927f85d6b1 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/missing_auth.json @@ -0,0 +1,21 @@ +{ + "error" : { + "root_cause" : [ + { + "type" : "security_exception", + "reason" : "missing authentication token for REST request [/?pretty&error_trace]", + "header" : { + "WWW-Authenticate" : "Basic realm=\"security\" charset=\"UTF-8\"" + }, + "stack_trace" : "ElasticsearchSecurityException[missing authentication token for REST request [/?pretty&error_trace]]\n\tat org.elasticsearch.xpack.security.support.Exceptions.authenticationError(Exceptions.java:36)\n\tat org.elasticsearch.xpack.security.authc.DefaultAuthenticationFailureHandler.missingToken(DefaultAuthenticationFailureHandler.java:69)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$AuditableRestRequest.anonymousAccessDenied(AuthenticationService.java:603)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$handleNullToken$17(AuthenticationService.java:357)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.handleNullToken(AuthenticationService.java:362)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.consumeToken(AuthenticationService.java:277)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$extractToken$7(AuthenticationService.java:249)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.extractToken(AuthenticationService.java:266)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$null$0(AuthenticationService.java:201)\n\tat org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:59)\n\tat org.elasticsearch.xpack.security.authc.TokenService.getAndValidateToken(TokenService.java:230)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$authenticateAsync$2(AuthenticationService.java:197)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$lookForExistingAuthentication$4(AuthenticationService.java:228)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lookForExistingAuthentication(AuthenticationService.java:239)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.authenticateAsync(AuthenticationService.java:193)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.access$000(AuthenticationService.java:147)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService.authenticate(AuthenticationService.java:99)\n\tat org.elasticsearch.xpack.security.rest.SecurityRestFilter.handleRequest(SecurityRestFilter.java:69)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:80)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\n" + } + ], + "type" : "security_exception", + "reason" : "missing authentication token for REST request [/?pretty&error_trace]", + "header" : { + "WWW-Authenticate" : "Basic realm=\"security\" charset=\"UTF-8\"" + }, + "stack_trace" : "ElasticsearchSecurityException[missing authentication token for REST request [/?pretty&error_trace]]\n\tat org.elasticsearch.xpack.security.support.Exceptions.authenticationError(Exceptions.java:36)\n\tat org.elasticsearch.xpack.security.authc.DefaultAuthenticationFailureHandler.missingToken(DefaultAuthenticationFailureHandler.java:69)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$AuditableRestRequest.anonymousAccessDenied(AuthenticationService.java:603)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$handleNullToken$17(AuthenticationService.java:357)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.handleNullToken(AuthenticationService.java:362)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.consumeToken(AuthenticationService.java:277)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$extractToken$7(AuthenticationService.java:249)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.extractToken(AuthenticationService.java:266)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$null$0(AuthenticationService.java:201)\n\tat org.elasticsearch.action.ActionListener$1.onResponse(ActionListener.java:59)\n\tat org.elasticsearch.xpack.security.authc.TokenService.getAndValidateToken(TokenService.java:230)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$authenticateAsync$2(AuthenticationService.java:197)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lambda$lookForExistingAuthentication$4(AuthenticationService.java:228)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.lookForExistingAuthentication(AuthenticationService.java:239)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.authenticateAsync(AuthenticationService.java:193)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService$Authenticator.access$000(AuthenticationService.java:147)\n\tat org.elasticsearch.xpack.security.authc.AuthenticationService.authenticate(AuthenticationService.java:99)\n\tat org.elasticsearch.xpack.security.rest.SecurityRestFilter.handleRequest(SecurityRestFilter.java:69)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:80)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\n" + }, + "status" : 401 +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/nested.json b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/nested.json new file mode 100644 index 0000000000000..1b8d0cd02c7e2 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/nested.json @@ -0,0 +1,21 @@ +{ + "error" : { + "root_cause" : [ + { + "type" : "parsing_exception", + "reason" : "line 1:1: no viable alternative at input 'test'", + "stack_trace" : "ParsingException[line 1:1: no viable alternative at input 'test']; nested: NoViableAltException;\n\tat org.elasticsearch.xpack.sql.parser.SqlParser$1.syntaxError(SqlParser.java:151)\n\tat org.antlr.v4.runtime.ProxyErrorListener.syntaxError(ProxyErrorListener.java:65)\n\tat org.antlr.v4.runtime.Parser.notifyErrorListeners(Parser.java:566)\n\tat org.antlr.v4.runtime.DefaultErrorStrategy.reportNoViableAlternative(DefaultErrorStrategy.java:308)\n\tat org.antlr.v4.runtime.DefaultErrorStrategy.reportError(DefaultErrorStrategy.java:145)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.statement(SqlBaseParser.java:726)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.singleStatement(SqlBaseParser.java:173)\n\tat org.elasticsearch.xpack.sql.parser.SqlParser.invokeParser(SqlParser.java:81)\n\tat org.elasticsearch.xpack.sql.parser.SqlParser.createStatement(SqlParser.java:43)\n\tat org.elasticsearch.xpack.sql.session.SqlSession.parse(SqlSession.java:121)\n\tat org.elasticsearch.xpack.sql.session.SqlSession.executable(SqlSession.java:144)\n\tat org.elasticsearch.xpack.sql.execution.PlanExecutor.sql(PlanExecutor.java:89)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.operation(TransportSqlAction.java:71)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.doExecute(TransportSqlAction.java:60)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.doExecute(TransportSqlAction.java:41)\n\tat org.elasticsearch.action.support.TransportAction.doExecute(TransportAction.java:143)\n\tat org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:167)\n\tat org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:139)\n\tat org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:81)\n\tat org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83)\n\tat org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction.lambda$prepareRequest$0(RestSqlAction.java:50)\n\tat org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:97)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:72)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: org.antlr.v4.runtime.NoViableAltException\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.noViableAlt(ParserATNSimulator.java:1886)\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.execATN(ParserATNSimulator.java:486)\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.adaptivePredict(ParserATNSimulator.java:412)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.statement(SqlBaseParser.java:477)\n\t... 62 more\n" + } + ], + "type" : "parsing_exception", + "reason" : "line 1:1: no viable alternative at input 'test'", + "caused_by" : { + "type" : "no_viable_alt_exception", + "reason" : null, + "stack_trace" : "org.antlr.v4.runtime.NoViableAltException\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.noViableAlt(ParserATNSimulator.java:1886)\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.execATN(ParserATNSimulator.java:486)\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.adaptivePredict(ParserATNSimulator.java:412)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.statement(SqlBaseParser.java:477)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.singleStatement(SqlBaseParser.java:173)\n\tat org.elasticsearch.xpack.sql.parser.SqlParser.invokeParser(SqlParser.java:81)\n\tat org.elasticsearch.xpack.sql.parser.SqlParser.createStatement(SqlParser.java:43)\n\tat org.elasticsearch.xpack.sql.session.SqlSession.parse(SqlSession.java:121)\n\tat org.elasticsearch.xpack.sql.session.SqlSession.executable(SqlSession.java:144)\n\tat org.elasticsearch.xpack.sql.execution.PlanExecutor.sql(PlanExecutor.java:89)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.operation(TransportSqlAction.java:71)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.doExecute(TransportSqlAction.java:60)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.doExecute(TransportSqlAction.java:41)\n\tat org.elasticsearch.action.support.TransportAction.doExecute(TransportAction.java:143)\n\tat org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:167)\n\tat org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:139)\n\tat org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:81)\n\tat org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83)\n\tat org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction.lambda$prepareRequest$0(RestSqlAction.java:50)\n\tat org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:97)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:72)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\n" + }, + "stack_trace" : "ParsingException[line 1:1: no viable alternative at input 'test']; nested: NoViableAltException;\n\tat org.elasticsearch.xpack.sql.parser.SqlParser$1.syntaxError(SqlParser.java:151)\n\tat org.antlr.v4.runtime.ProxyErrorListener.syntaxError(ProxyErrorListener.java:65)\n\tat org.antlr.v4.runtime.Parser.notifyErrorListeners(Parser.java:566)\n\tat org.antlr.v4.runtime.DefaultErrorStrategy.reportNoViableAlternative(DefaultErrorStrategy.java:308)\n\tat org.antlr.v4.runtime.DefaultErrorStrategy.reportError(DefaultErrorStrategy.java:145)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.statement(SqlBaseParser.java:726)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.singleStatement(SqlBaseParser.java:173)\n\tat org.elasticsearch.xpack.sql.parser.SqlParser.invokeParser(SqlParser.java:81)\n\tat org.elasticsearch.xpack.sql.parser.SqlParser.createStatement(SqlParser.java:43)\n\tat org.elasticsearch.xpack.sql.session.SqlSession.parse(SqlSession.java:121)\n\tat org.elasticsearch.xpack.sql.session.SqlSession.executable(SqlSession.java:144)\n\tat org.elasticsearch.xpack.sql.execution.PlanExecutor.sql(PlanExecutor.java:89)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.operation(TransportSqlAction.java:71)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.doExecute(TransportSqlAction.java:60)\n\tat org.elasticsearch.xpack.sql.plugin.sql.action.TransportSqlAction.doExecute(TransportSqlAction.java:41)\n\tat org.elasticsearch.action.support.TransportAction.doExecute(TransportAction.java:143)\n\tat org.elasticsearch.action.support.TransportAction$RequestFilterChain.proceed(TransportAction.java:167)\n\tat org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:139)\n\tat org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:81)\n\tat org.elasticsearch.client.node.NodeClient.executeLocally(NodeClient.java:83)\n\tat org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction.lambda$prepareRequest$0(RestSqlAction.java:50)\n\tat org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:97)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:72)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: org.antlr.v4.runtime.NoViableAltException\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.noViableAlt(ParserATNSimulator.java:1886)\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.execATN(ParserATNSimulator.java:486)\n\tat org.antlr.v4.runtime.atn.ParserATNSimulator.adaptivePredict(ParserATNSimulator.java:412)\n\tat org.elasticsearch.xpack.sql.parser.SqlBaseParser.statement(SqlBaseParser.java:477)\n\t... 62 more\n" + }, + "status" : 500 +} + \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/no_error.json b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/no_error.json new file mode 100644 index 0000000000000..9e26dfeeb6e64 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/no_error.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/no_stack.json b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/no_stack.json new file mode 100644 index 0000000000000..2302729a9886c --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/no_stack.json @@ -0,0 +1,6 @@ +{ + "error" : { + "type" : "illegal_argument_exception", + "reason" : "[sql/query] unknown field [test], parser not found" + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/no_type.json b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/no_type.json new file mode 100644 index 0000000000000..fe453fa3d86f9 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/resources/remote_failure/no_type.json @@ -0,0 +1,6 @@ +{ + "error" : { + "reason" : "[sql/query] unknown field [test], parser not found", + "stack_trace" : "java.lang.IllegalArgumentException: [sql/query] unknown field [test], parser not found\n\tat org.elasticsearch.common.xcontent.ObjectParser.getParser(ObjectParser.java:346)\n\tat org.elasticsearch.common.xcontent.ObjectParser.parse(ObjectParser.java:159)\n\tat org.elasticsearch.common.xcontent.ObjectParser.apply(ObjectParser.java:183)\n\tat org.elasticsearch.xpack.sql.plugin.sql.rest.RestSqlAction.prepareRequest(RestSqlAction.java:47)\n\tat org.elasticsearch.rest.BaseRestHandler.handleRequest(BaseRestHandler.java:80)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:240)\n\tat org.elasticsearch.rest.RestController.tryAllHandlers(RestController.java:336)\n\tat org.elasticsearch.rest.RestController.dispatchRequest(RestController.java:174)\n\tat org.elasticsearch.http.netty4.Netty4HttpServerTransport.dispatchRequest(Netty4HttpServerTransport.java:469)\n\tat org.elasticsearch.http.netty4.Netty4HttpRequestHandler.channelRead0(Netty4HttpRequestHandler.java:72)\n\tat io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler.channelRead(HttpPipeliningHandler.java:68)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.handler.codec.MessageToMessageCodec.channelRead(MessageToMessageCodec.java:111)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:310)\n\tat io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:284)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.ChannelInboundHandlerAdapter.channelRead(ChannelInboundHandlerAdapter.java:86)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:340)\n\tat io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1334)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:362)\n\tat io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:348)\n\tat io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:926)\n\tat io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:134)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:644)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain(NioEventLoop.java:544)\n\tat io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:498)\n\tat io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:458)\n\tat io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:858)\n\tat java.lang.Thread.run(Thread.java:748)\n" + } +} diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/resources/ssl/client.keystore b/x-pack/plugin/sql/sql-shared-client/src/test/resources/ssl/client.keystore new file mode 100644 index 0000000000000..07d4463623440 Binary files /dev/null and b/x-pack/plugin/sql/sql-shared-client/src/test/resources/ssl/client.keystore differ diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/resources/ssl/readme.txt b/x-pack/plugin/sql/sql-shared-client/src/test/resources/ssl/readme.txt new file mode 100644 index 0000000000000..769aa43abf0a2 --- /dev/null +++ b/x-pack/plugin/sql/sql-shared-client/src/test/resources/ssl/readme.txt @@ -0,0 +1,13 @@ +# setup of the SSL files + +# generate keys for server and client +$ keytool -v -genkey -keyalg rsa -alias server -keypass password -keystore server.keystore -storepass password -validity 99999 -ext SAN=dns:localhost,ip:127.0.0.1 +$ keytool -v -genkey -keyalg rsa -alias client -keypass password -keystore client.keystore -storepass password -validity 99999 -ext SAN=dns:localhost,ip:127.0.0.1 + +# generate certificates +$ keytool -v -export -alias server -file server.crt -keystore server.keystore -storepass password +$ keytool -v -export -alias client -file client.crt -keystore client.keystore -storepass password + +# import the client cert into the server keystore and vice-versa +$ keytool -v -importcert -alias client -file client.crt -keystore server.keystore -storepass password +$ keytool -v -importcert -alias server -file server.crt -keystore client.keystore -storepass password \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-shared-client/src/test/resources/ssl/server.keystore b/x-pack/plugin/sql/sql-shared-client/src/test/resources/ssl/server.keystore new file mode 100644 index 0000000000000..3a2e80d77bb5b Binary files /dev/null and b/x-pack/plugin/sql/sql-shared-client/src/test/resources/ssl/server.keystore differ diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 new file mode 100644 index 0000000000000..ea0b7da161c1e --- /dev/null +++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.g4 @@ -0,0 +1,434 @@ +/* + * [2017] Elasticsearch Incorporated. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Fork of Facebook Presto Parser - significantly trimmed down and adjusted for ES */ +/** presto-parser/src/main/antlr4/com/facebook/presto/sql/parser/SqlBase.g4 grammar */ + +grammar SqlBase; + +tokens { + DELIMITER +} + +singleStatement + : statement EOF + ; + +singleExpression + : expression EOF + ; + +statement + : query #statementDefault + | EXPLAIN + ('(' + ( + PLAN type=(PARSED | ANALYZED | OPTIMIZED | MAPPED | EXECUTABLE | ALL) + | FORMAT format=(TEXT | GRAPHVIZ) + | VERIFY verify=booleanValue + )* + ')')? + statement #explain + | DEBUG + ('(' + ( + PLAN type=(ANALYZED | OPTIMIZED) + | FORMAT format=(TEXT | GRAPHVIZ) + )* + ')')? + statement #debug + | SHOW TABLES (LIKE? pattern)? #showTables + | SHOW COLUMNS (FROM | IN) tableIdentifier #showColumns + | (DESCRIBE | DESC) tableIdentifier #showColumns + | SHOW FUNCTIONS (LIKE? pattern)? #showFunctions + | SHOW SCHEMAS #showSchemas + | SYS CATALOGS #sysCatalogs + | SYS TABLES (CATALOG LIKE? clusterPattern=pattern)? + (LIKE? tablePattern=pattern)? + (TYPE string (',' string)* )? #sysTables + | SYS COLUMNS (CATALOG cluster=string)? + (TABLE LIKE? indexPattern=pattern)? + (LIKE? columnPattern=pattern)? #sysColumns + | SYS TYPES #sysTypes + | SYS TABLE TYPES #sysTableTypes + ; + +query + : (WITH namedQuery (',' namedQuery)*)? queryNoWith + ; + +queryNoWith + : queryTerm + /** we could add sort by - sort per partition */ + (ORDER BY orderBy (',' orderBy)*)? + (LIMIT limit=(INTEGER_VALUE | ALL))? + ; + +queryTerm + : querySpecification #queryPrimaryDefault + | '(' queryNoWith ')' #subquery + ; + +orderBy + : expression ordering=(ASC | DESC)? + ; + +querySpecification + : SELECT setQuantifier? selectItem (',' selectItem)* + fromClause? + (WHERE where=booleanExpression)? + (GROUP BY groupBy)? + (HAVING having=booleanExpression)? + ; + +fromClause + : FROM relation (',' relation)* + ; + +groupBy + : setQuantifier? groupingElement (',' groupingElement)* + ; + +groupingElement + : groupingExpressions #singleGroupingSet + ; + +groupingExpressions + : '(' (expression (',' expression)*)? ')' + | expression + ; + +namedQuery + : name=identifier AS '(' queryNoWith ')' + ; + +setQuantifier + : DISTINCT + | ALL + ; + +selectItem + : expression (AS? identifier)? #selectExpression + ; + +relation + : relationPrimary joinRelation* + ; + +joinRelation + : (joinType) JOIN right=relationPrimary joinCriteria? + | NATURAL joinType JOIN right=relationPrimary + ; + +joinType + : INNER? + | LEFT OUTER? + | RIGHT OUTER? + | FULL OUTER? + ; + +joinCriteria + : ON booleanExpression + | USING '(' identifier (',' identifier)* ')' + ; + +relationPrimary + : tableIdentifier (AS? qualifiedName)? #tableName + | '(' queryNoWith ')' (AS? qualifiedName)? #aliasedQuery + | '(' relation ')' (AS? qualifiedName)? #aliasedRelation + ; + +expression + : booleanExpression + ; + +booleanExpression + : NOT booleanExpression #logicalNot + | EXISTS '(' query ')' #exists + | QUERY '(' queryString=string (',' options=string)* ')' #stringQuery + | MATCH '(' singleField=qualifiedName ',' queryString=string (',' options=string)* ')' #matchQuery + | MATCH '(' multiFields=string ',' queryString=string (',' options=string)* ')' #multiMatchQuery + | predicated #booleanDefault + | left=booleanExpression operator=AND right=booleanExpression #logicalBinary + | left=booleanExpression operator=OR right=booleanExpression #logicalBinary + ; + +// workaround for: +// https://github.com/antlr/antlr4/issues/780 +// https://github.com/antlr/antlr4/issues/781 +predicated + : valueExpression predicate? + ; + +// dedicated calls for each branch are not used to reuse the NOT handling across them +// instead the property kind is used to differentiate +predicate + : NOT? kind=BETWEEN lower=valueExpression AND upper=valueExpression + | NOT? kind=IN '(' expression (',' expression)* ')' + | NOT? kind=IN '(' query ')' + | NOT? kind=LIKE pattern + | NOT? kind=RLIKE regex=string + | IS NOT? kind=NULL + ; + +pattern + : value=string (ESCAPE escape=string)? + ; + +valueExpression + : primaryExpression #valueExpressionDefault + | operator=(MINUS | PLUS) valueExpression #arithmeticUnary + | left=valueExpression operator=(ASTERISK | SLASH | PERCENT) right=valueExpression #arithmeticBinary + | left=valueExpression operator=(PLUS | MINUS) right=valueExpression #arithmeticBinary + | left=valueExpression comparisonOperator right=valueExpression #comparison + ; + +primaryExpression + : CAST '(' expression AS dataType ')' #cast + | EXTRACT '(' field=identifier FROM valueExpression ')' #extract + | constant #constantDefault + | ASTERISK #star + | (qualifiedName DOT)? ASTERISK #star + | identifier '(' (setQuantifier? expression (',' expression)*)? ')' #functionCall + | '(' query ')' #subqueryExpression + | identifier #columnReference + | qualifiedName #dereference + | '(' expression ')' #parenthesizedExpression + ; + + +constant + : NULL #nullLiteral + | number #numericLiteral + | booleanValue #booleanLiteral + | STRING+ #stringLiteral + | PARAM #paramLiteral + ; + +comparisonOperator + : EQ | NEQ | LT | LTE | GT | GTE + ; + +booleanValue + : TRUE | FALSE + ; + +dataType + : identifier #primitiveDataType + ; + +qualifiedName + : (identifier DOT)* identifier + ; + +identifier + : quoteIdentifier + | unquoteIdentifier + ; + +tableIdentifier + : (catalog=identifier ':')? TABLE_IDENTIFIER + | (catalog=identifier ':')? name=identifier + ; + +quoteIdentifier + : QUOTED_IDENTIFIER #quotedIdentifier + | BACKQUOTED_IDENTIFIER #backQuotedIdentifier + ; + +unquoteIdentifier + : IDENTIFIER #unquotedIdentifier + | nonReserved #unquotedIdentifier + | DIGIT_IDENTIFIER #digitIdentifier + ; + +number + : DECIMAL_VALUE #decimalLiteral + | INTEGER_VALUE #integerLiteral + ; + +string + : PARAM + | STRING + ; + +// http://developer.mimer.se/validator/sql-reserved-words.tml +nonReserved + : ANALYZE | ANALYZED + | CATALOGS | COLUMNS + | DEBUG + | EXECUTABLE | EXPLAIN + | FORMAT | FUNCTIONS + | GRAPHVIZ + | MAPPED + | OPTIMIZED + | PARSED | PHYSICAL | PLAN + | QUERY + | RLIKE + | SCHEMAS | SHOW | SYS + | TABLES | TEXT | TYPE | TYPES + | VERIFY + ; + +ALL: 'ALL'; +ANALYZE: 'ANALYZE'; +ANALYZED: 'ANALYZED'; +AND: 'AND'; +ANY: 'ANY'; +AS: 'AS'; +ASC: 'ASC'; +BETWEEN: 'BETWEEN'; +BY: 'BY'; +CAST: 'CAST'; +CATALOG: 'CATALOG'; +CATALOGS: 'CATALOGS'; +COLUMNS: 'COLUMNS'; +DEBUG: 'DEBUG'; +DESC: 'DESC'; +DESCRIBE: 'DESCRIBE'; +DISTINCT: 'DISTINCT'; +ESCAPE: 'ESCAPE'; +EXECUTABLE: 'EXECUTABLE'; +EXISTS: 'EXISTS'; +EXPLAIN: 'EXPLAIN'; +EXTRACT: 'EXTRACT'; +FALSE: 'FALSE'; +FORMAT: 'FORMAT'; +FROM: 'FROM'; +FULL: 'FULL'; +FUNCTIONS: 'FUNCTIONS'; +GRAPHVIZ: 'GRAPHVIZ'; +GROUP: 'GROUP'; +HAVING: 'HAVING'; +IN: 'IN'; +INNER: 'INNER'; +IS: 'IS'; +JOIN: 'JOIN'; +LEFT: 'LEFT'; +LIKE: 'LIKE'; +LIMIT: 'LIMIT'; +MAPPED: 'MAPPED'; +MATCH: 'MATCH'; +NATURAL: 'NATURAL'; +NOT: 'NOT'; +NULL: 'NULL'; +ON: 'ON'; +OPTIMIZED: 'OPTIMIZED'; +OR: 'OR'; +ORDER: 'ORDER'; +OUTER: 'OUTER'; +PARSED: 'PARSED'; +PHYSICAL: 'PHYSICAL'; +PLAN: 'PLAN'; +RIGHT: 'RIGHT'; +RLIKE: 'RLIKE'; +QUERY: 'QUERY'; +SCHEMAS: 'SCHEMAS'; +SELECT: 'SELECT'; +SHOW: 'SHOW'; +SYS: 'SYS'; +TABLE: 'TABLE'; +TABLES: 'TABLES'; +TEXT: 'TEXT'; +TRUE: 'TRUE'; +TYPE: 'TYPE'; +TYPES: 'TYPES'; +USING: 'USING'; +VERIFY: 'VERIFY'; +WHERE: 'WHERE'; +WITH: 'WITH'; + +EQ : '='; +NEQ : '<>' | '!=' | '<=>'; +LT : '<'; +LTE : '<='; +GT : '>'; +GTE : '>='; + +PLUS: '+'; +MINUS: '-'; +ASTERISK: '*'; +SLASH: '/'; +PERCENT: '%'; +CONCAT: '||'; +DOT: '.'; +PARAM: '?'; + +STRING + : '\'' ( ~'\'' | '\'\'' )* '\'' + ; + +INTEGER_VALUE + : DIGIT+ + ; + +DECIMAL_VALUE + : DIGIT+ DOT DIGIT* + | DOT DIGIT+ + | DIGIT+ (DOT DIGIT*)? EXPONENT + | DOT DIGIT+ EXPONENT + ; + +IDENTIFIER + : (LETTER | '_') (LETTER | DIGIT | '_' | '@' )* + ; + +DIGIT_IDENTIFIER + : DIGIT (LETTER | DIGIT | '_' | '@' | ':')+ + ; + +TABLE_IDENTIFIER + : (LETTER | DIGIT | '_' | '@' | ASTERISK)+ + ; + +QUOTED_IDENTIFIER + : '"' ( ~'"' | '""' )* '"' + ; + +BACKQUOTED_IDENTIFIER + : '`' ( ~'`' | '``' )* '`' + ; + +fragment EXPONENT + : 'E' [+-]? DIGIT+ + ; + +fragment DIGIT + : [0-9] + ; + +fragment LETTER + : [A-Z] + ; + +SIMPLE_COMMENT + : '--' ~[\r\n]* '\r'? '\n'? -> channel(HIDDEN) + ; + +BRACKETED_COMMENT + : '/*' (BRACKETED_COMMENT|.)*? '*/' -> channel(HIDDEN) + ; + +WS + : [ \r\n\t]+ -> channel(HIDDEN) + ; + +// Catch-all for anything we can't recognize. +// We use this to be able to ignore and recover all the text +// when splitting statements with DelimiterLexer +UNRECOGNIZED + : . + ; diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens b/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens new file mode 100644 index 0000000000000..87cf9a4809d4a --- /dev/null +++ b/x-pack/plugin/sql/src/main/antlr/SqlBase.tokens @@ -0,0 +1,182 @@ +T__0=1 +T__1=2 +T__2=3 +T__3=4 +ALL=5 +ANALYZE=6 +ANALYZED=7 +AND=8 +ANY=9 +AS=10 +ASC=11 +BETWEEN=12 +BY=13 +CAST=14 +CATALOG=15 +CATALOGS=16 +COLUMNS=17 +DEBUG=18 +DESC=19 +DESCRIBE=20 +DISTINCT=21 +ESCAPE=22 +EXECUTABLE=23 +EXISTS=24 +EXPLAIN=25 +EXTRACT=26 +FALSE=27 +FORMAT=28 +FROM=29 +FULL=30 +FUNCTIONS=31 +GRAPHVIZ=32 +GROUP=33 +HAVING=34 +IN=35 +INNER=36 +IS=37 +JOIN=38 +LEFT=39 +LIKE=40 +LIMIT=41 +MAPPED=42 +MATCH=43 +NATURAL=44 +NOT=45 +NULL=46 +ON=47 +OPTIMIZED=48 +OR=49 +ORDER=50 +OUTER=51 +PARSED=52 +PHYSICAL=53 +PLAN=54 +RIGHT=55 +RLIKE=56 +QUERY=57 +SCHEMAS=58 +SELECT=59 +SHOW=60 +SYS=61 +TABLE=62 +TABLES=63 +TEXT=64 +TRUE=65 +TYPE=66 +TYPES=67 +USING=68 +VERIFY=69 +WHERE=70 +WITH=71 +EQ=72 +NEQ=73 +LT=74 +LTE=75 +GT=76 +GTE=77 +PLUS=78 +MINUS=79 +ASTERISK=80 +SLASH=81 +PERCENT=82 +CONCAT=83 +DOT=84 +PARAM=85 +STRING=86 +INTEGER_VALUE=87 +DECIMAL_VALUE=88 +IDENTIFIER=89 +DIGIT_IDENTIFIER=90 +TABLE_IDENTIFIER=91 +QUOTED_IDENTIFIER=92 +BACKQUOTED_IDENTIFIER=93 +SIMPLE_COMMENT=94 +BRACKETED_COMMENT=95 +WS=96 +UNRECOGNIZED=97 +DELIMITER=98 +'('=1 +')'=2 +','=3 +':'=4 +'ALL'=5 +'ANALYZE'=6 +'ANALYZED'=7 +'AND'=8 +'ANY'=9 +'AS'=10 +'ASC'=11 +'BETWEEN'=12 +'BY'=13 +'CAST'=14 +'CATALOG'=15 +'CATALOGS'=16 +'COLUMNS'=17 +'DEBUG'=18 +'DESC'=19 +'DESCRIBE'=20 +'DISTINCT'=21 +'ESCAPE'=22 +'EXECUTABLE'=23 +'EXISTS'=24 +'EXPLAIN'=25 +'EXTRACT'=26 +'FALSE'=27 +'FORMAT'=28 +'FROM'=29 +'FULL'=30 +'FUNCTIONS'=31 +'GRAPHVIZ'=32 +'GROUP'=33 +'HAVING'=34 +'IN'=35 +'INNER'=36 +'IS'=37 +'JOIN'=38 +'LEFT'=39 +'LIKE'=40 +'LIMIT'=41 +'MAPPED'=42 +'MATCH'=43 +'NATURAL'=44 +'NOT'=45 +'NULL'=46 +'ON'=47 +'OPTIMIZED'=48 +'OR'=49 +'ORDER'=50 +'OUTER'=51 +'PARSED'=52 +'PHYSICAL'=53 +'PLAN'=54 +'RIGHT'=55 +'RLIKE'=56 +'QUERY'=57 +'SCHEMAS'=58 +'SELECT'=59 +'SHOW'=60 +'SYS'=61 +'TABLE'=62 +'TABLES'=63 +'TEXT'=64 +'TRUE'=65 +'TYPE'=66 +'TYPES'=67 +'USING'=68 +'VERIFY'=69 +'WHERE'=70 +'WITH'=71 +'='=72 +'<'=74 +'<='=75 +'>'=76 +'>='=77 +'+'=78 +'-'=79 +'*'=80 +'/'=81 +'%'=82 +'||'=83 +'.'=84 +'?'=85 diff --git a/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens b/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens new file mode 100644 index 0000000000000..a687a9215ecb5 --- /dev/null +++ b/x-pack/plugin/sql/src/main/antlr/SqlBaseLexer.tokens @@ -0,0 +1,181 @@ +T__0=1 +T__1=2 +T__2=3 +T__3=4 +ALL=5 +ANALYZE=6 +ANALYZED=7 +AND=8 +ANY=9 +AS=10 +ASC=11 +BETWEEN=12 +BY=13 +CAST=14 +CATALOG=15 +CATALOGS=16 +COLUMNS=17 +DEBUG=18 +DESC=19 +DESCRIBE=20 +DISTINCT=21 +ESCAPE=22 +EXECUTABLE=23 +EXISTS=24 +EXPLAIN=25 +EXTRACT=26 +FALSE=27 +FORMAT=28 +FROM=29 +FULL=30 +FUNCTIONS=31 +GRAPHVIZ=32 +GROUP=33 +HAVING=34 +IN=35 +INNER=36 +IS=37 +JOIN=38 +LEFT=39 +LIKE=40 +LIMIT=41 +MAPPED=42 +MATCH=43 +NATURAL=44 +NOT=45 +NULL=46 +ON=47 +OPTIMIZED=48 +OR=49 +ORDER=50 +OUTER=51 +PARSED=52 +PHYSICAL=53 +PLAN=54 +RIGHT=55 +RLIKE=56 +QUERY=57 +SCHEMAS=58 +SELECT=59 +SHOW=60 +SYS=61 +TABLE=62 +TABLES=63 +TEXT=64 +TRUE=65 +TYPE=66 +TYPES=67 +USING=68 +VERIFY=69 +WHERE=70 +WITH=71 +EQ=72 +NEQ=73 +LT=74 +LTE=75 +GT=76 +GTE=77 +PLUS=78 +MINUS=79 +ASTERISK=80 +SLASH=81 +PERCENT=82 +CONCAT=83 +DOT=84 +PARAM=85 +STRING=86 +INTEGER_VALUE=87 +DECIMAL_VALUE=88 +IDENTIFIER=89 +DIGIT_IDENTIFIER=90 +TABLE_IDENTIFIER=91 +QUOTED_IDENTIFIER=92 +BACKQUOTED_IDENTIFIER=93 +SIMPLE_COMMENT=94 +BRACKETED_COMMENT=95 +WS=96 +UNRECOGNIZED=97 +'('=1 +')'=2 +','=3 +':'=4 +'ALL'=5 +'ANALYZE'=6 +'ANALYZED'=7 +'AND'=8 +'ANY'=9 +'AS'=10 +'ASC'=11 +'BETWEEN'=12 +'BY'=13 +'CAST'=14 +'CATALOG'=15 +'CATALOGS'=16 +'COLUMNS'=17 +'DEBUG'=18 +'DESC'=19 +'DESCRIBE'=20 +'DISTINCT'=21 +'ESCAPE'=22 +'EXECUTABLE'=23 +'EXISTS'=24 +'EXPLAIN'=25 +'EXTRACT'=26 +'FALSE'=27 +'FORMAT'=28 +'FROM'=29 +'FULL'=30 +'FUNCTIONS'=31 +'GRAPHVIZ'=32 +'GROUP'=33 +'HAVING'=34 +'IN'=35 +'INNER'=36 +'IS'=37 +'JOIN'=38 +'LEFT'=39 +'LIKE'=40 +'LIMIT'=41 +'MAPPED'=42 +'MATCH'=43 +'NATURAL'=44 +'NOT'=45 +'NULL'=46 +'ON'=47 +'OPTIMIZED'=48 +'OR'=49 +'ORDER'=50 +'OUTER'=51 +'PARSED'=52 +'PHYSICAL'=53 +'PLAN'=54 +'RIGHT'=55 +'RLIKE'=56 +'QUERY'=57 +'SCHEMAS'=58 +'SELECT'=59 +'SHOW'=60 +'SYS'=61 +'TABLE'=62 +'TABLES'=63 +'TEXT'=64 +'TRUE'=65 +'TYPE'=66 +'TYPES'=67 +'USING'=68 +'VERIFY'=69 +'WHERE'=70 +'WITH'=71 +'='=72 +'<'=74 +'<='=75 +'>'=76 +'>='=77 +'+'=78 +'-'=79 +'*'=80 +'/'=81 +'%'=82 +'||'=83 +'.'=84 +'?'=85 diff --git a/x-pack/plugin/sql/src/main/bin/elasticsearch-sql-cli b/x-pack/plugin/sql/src/main/bin/elasticsearch-sql-cli new file mode 100755 index 0000000000000..676b842beedaa --- /dev/null +++ b/x-pack/plugin/sql/src/main/bin/elasticsearch-sql-cli @@ -0,0 +1,16 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +source "`dirname "$0"`"/elasticsearch-env + +source "`dirname "$0"`"/x-pack-env + +CLI_JAR=$(ls $ES_HOME/bin/elasticsearch-sql-cli-*.jar) + +exec \ + "$JAVA" \ + -jar "$CLI_JAR" \ + "$@" diff --git a/x-pack/plugin/sql/src/main/bin/elasticsearch-sql-cli.bat b/x-pack/plugin/sql/src/main/bin/elasticsearch-sql-cli.bat new file mode 100644 index 0000000000000..cf159f0322363 --- /dev/null +++ b/x-pack/plugin/sql/src/main/bin/elasticsearch-sql-cli.bat @@ -0,0 +1,24 @@ +@echo off + +rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +rem or more contributor license agreements. Licensed under the Elastic License; +rem you may not use this file except in compliance with the Elastic License. + +setlocal enabledelayedexpansion +setlocal enableextensions + +call "%~dp0elasticsearch-env.bat" || exit /b 1 + +call "%~dp0x-pack-env.bat" || exit /b 1 + +set CLI_JAR=%ES_HOME%/bin/* + +%JAVA% ^ + -cp "%CLI_JAR%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ + org.elasticsearch.xpack.sql.cli.Cli ^ + %* + +endlocal +endlocal diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/ClientSqlException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/ClientSqlException.java new file mode 100644 index 0000000000000..accca2a6a45b6 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/ClientSqlException.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +public abstract class ClientSqlException extends SqlException { + + protected ClientSqlException(String message, Object... args) { + super(message, args); + } + + protected ClientSqlException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + protected ClientSqlException(String message, Throwable cause) { + super(message, cause); + } + + protected ClientSqlException(Throwable cause, String message, Object... args) { + super(cause, message, args); + } + + protected ClientSqlException(Throwable cause) { + super(cause); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/ServerSqlException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/ServerSqlException.java new file mode 100644 index 0000000000000..e8548ab7f93ce --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/ServerSqlException.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +public abstract class ServerSqlException extends SqlException { + + protected ServerSqlException(String message, Object... args) { + super(message, args); + } + + protected ServerSqlException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + protected ServerSqlException(String message, Throwable cause) { + super(message, cause); + } + + protected ServerSqlException(Throwable cause, String message, Object... args) { + super(cause, message, args); + } + + protected ServerSqlException(Throwable cause) { + super(cause); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlException.java new file mode 100644 index 0000000000000..84b40b221d9ea --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlException.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +import org.elasticsearch.ElasticsearchException; + +public abstract class SqlException extends ElasticsearchException { + public SqlException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + public SqlException(String message, Throwable cause) { + super(message, cause); + } + + public SqlException(String message, Object... args) { + super(message, args); + } + + public SqlException(Throwable cause, String message, Object... args) { + super(message, cause, args); + } + + public SqlException(Throwable cause) { + super(cause); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlIllegalArgumentException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlIllegalArgumentException.java new file mode 100644 index 0000000000000..be121a0ace4df --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlIllegalArgumentException.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql; + +public class SqlIllegalArgumentException extends ServerSqlException { + public SqlIllegalArgumentException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + + public SqlIllegalArgumentException(String message, Throwable cause) { + super(message, cause); + } + + public SqlIllegalArgumentException(String message, Object... args) { + super(message, args); + } + + public SqlIllegalArgumentException(Throwable cause, String message, Object... args) { + super(cause, message, args); + } + + public SqlIllegalArgumentException(String message) { + super(message); + } + + public SqlIllegalArgumentException(Throwable cause) { + super(cause); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/AnalysisException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/AnalysisException.java new file mode 100644 index 0000000000000..5e9224979f831 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/AnalysisException.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis; + +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.sql.ClientSqlException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.Node; + +import java.util.Locale; + +public class AnalysisException extends ClientSqlException { + + private final int line; + private final int column; + + public AnalysisException(Node source, String message, Object... args) { + super(message, args); + + Location loc = Location.EMPTY; + if (source != null && source.location() != null) { + loc = source.location(); + } + this.line = loc.getLineNumber(); + this.column = loc.getColumnNumber(); + } + + public AnalysisException(Node source, String message, Throwable cause) { + super(message, cause); + + Location loc = Location.EMPTY; + if (source != null && source.location() != null) { + loc = source.location(); + } + this.line = loc.getLineNumber(); + this.column = loc.getColumnNumber(); + } + + public int getLineNumber() { + return line; + } + + public int getColumnNumber() { + return column; + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + + @Override + public String getMessage() { + return String.format(Locale.ROOT, "line %s:%s: %s", getLineNumber(), getColumnNumber(), super.getMessage()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java new file mode 100644 index 0000000000000..f5a0b72a2bd38 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java @@ -0,0 +1,1055 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import org.elasticsearch.xpack.sql.analysis.AnalysisException; +import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier.Failure; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.capabilities.Resolvables; +import org.elasticsearch.xpack.sql.expression.Alias; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.AttributeMap; +import org.elasticsearch.xpack.sql.expression.AttributeSet; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.expression.SubQueryExpression; +import org.elasticsearch.xpack.sql.expression.UnresolvedAlias; +import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute; +import org.elasticsearch.xpack.sql.expression.UnresolvedStar; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.FunctionDefinition; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.expression.function.Functions; +import org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.ArithmeticFunction; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.plan.logical.Aggregate; +import org.elasticsearch.xpack.sql.plan.logical.EsRelation; +import org.elasticsearch.xpack.sql.plan.logical.Filter; +import org.elasticsearch.xpack.sql.plan.logical.Join; +import org.elasticsearch.xpack.sql.plan.logical.LocalRelation; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.plan.logical.Project; +import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias; +import org.elasticsearch.xpack.sql.plan.logical.UnaryPlan; +import org.elasticsearch.xpack.sql.plan.logical.UnresolvedRelation; +import org.elasticsearch.xpack.sql.plan.logical.With; +import org.elasticsearch.xpack.sql.rule.Rule; +import org.elasticsearch.xpack.sql.rule.RuleExecutor; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.type.UnsupportedEsField; +import org.joda.time.DateTimeZone; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TimeZone; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toMap; +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + +public class Analyzer extends RuleExecutor { + /** + * Verify a plan. + */ + public static Map, String> verifyFailures(LogicalPlan plan) { + Collection failures = Verifier.verify(plan); + return failures.stream().collect(toMap(Failure::source, Failure::message)); + } + + /** + * Valid functions. + */ + private final FunctionRegistry functionRegistry; + /** + * Information about the index against which the SQL is being analyzed. + */ + private final IndexResolution indexResolution; + /** + * Time zone in which we're executing this SQL. It is attached to functions + * that deal with date and time. + */ + private final TimeZone timeZone; + + public Analyzer(FunctionRegistry functionRegistry, IndexResolution results, TimeZone timeZone) { + this.functionRegistry = functionRegistry; + this.indexResolution = results; + this.timeZone = timeZone; + } + + @Override + protected Iterable.Batch> batches() { + Batch substitution = new Batch("Substitution", + new CTESubstitution()); + Batch resolution = new Batch("Resolution", + new ResolveTable(), + new ResolveRefs(), + new ResolveOrdinalInOrderByAndGroupBy(), + new ResolveMissingRefs(), + new ResolveFunctions(), + new ResolveAliases(), + new ProjectedAggregations(), + new ResolveAggsInHaving() + //new ImplicitCasting() + ); + // TODO: this might be removed since the deduplication happens already in ResolveFunctions + Batch deduplication = new Batch("Deduplication", + new PruneDuplicateFunctions()); + + return Arrays.asList(substitution, resolution); + } + + public LogicalPlan analyze(LogicalPlan plan) { + return analyze(plan, true); + } + + public LogicalPlan analyze(LogicalPlan plan, boolean verify) { + if (plan.analyzed()) { + return plan; + } + return verify ? verify(execute(plan)) : execute(plan); + } + + public ExecutionInfo debugAnalyze(LogicalPlan plan) { + return plan.analyzed() ? null : executeWithInfo(plan); + } + + public LogicalPlan verify(LogicalPlan plan) { + Collection failures = Verifier.verify(plan); + if (!failures.isEmpty()) { + throw new VerificationException(failures); + } + return plan; + } + + @SuppressWarnings("unchecked") + private static E resolveExpression(E expression, LogicalPlan plan) { + return (E) expression.transformUp(e -> { + if (e instanceof UnresolvedAttribute) { + UnresolvedAttribute ua = (UnresolvedAttribute) e; + Attribute a = resolveAgainstList(ua, plan.output()); + return a != null ? a : e; + } + return e; + }); + } + + // + // Shared methods around the analyzer rules + // + + private static Attribute resolveAgainstList(UnresolvedAttribute u, Collection attrList) { + List matches = new ArrayList<>(); + + // first take into account the qualified version + boolean qualified = u.qualifier() != null; + + for (Attribute attribute : attrList) { + if (!attribute.synthetic()) { + boolean match = qualified ? + Objects.equals(u.qualifiedName(), attribute.qualifiedName()) : + // if the field is unqualified + // first check the names directly + (Objects.equals(u.name(), attribute.name()) + // but also if the qualifier might not be quoted and if there's any ambiguity with nested fields + || Objects.equals(u.name(), attribute.qualifiedName())); + if (match) { + matches.add(attribute.withLocation(u.location())); + } + } + } + + // none found + if (matches.isEmpty()) { + return null; + } + + if (matches.size() == 1) { + return matches.get(0); + } + + return u.withUnresolvedMessage("Reference [" + u.qualifiedName() + + "] is ambiguous (to disambiguate use quotes or qualifiers); matches any of " + + matches.stream() + .map(a -> "\"" + a.qualifier() + "\".\"" + a.name() + "\"") + .sorted() + .collect(toList()) + ); + } + + private static boolean hasStar(List exprs) { + for (Expression expression : exprs) { + if (expression instanceof UnresolvedStar) { + return true; + } + } + return false; + } + + private static boolean containsAggregate(List list) { + return Expressions.anyMatch(list, Functions::isAggregate); + } + + private static boolean containsAggregate(Expression exp) { + return containsAggregate(singletonList(exp)); + } + + + private static class CTESubstitution extends AnalyzeRule { + + @Override + protected LogicalPlan rule(With plan) { + return substituteCTE(plan.child(), plan.subQueries()); + } + + private LogicalPlan substituteCTE(LogicalPlan p, Map subQueries) { + if (p instanceof UnresolvedRelation) { + UnresolvedRelation ur = (UnresolvedRelation) p; + SubQueryAlias subQueryAlias = subQueries.get(ur.table().index()); + if (subQueryAlias != null) { + if (ur.alias() != null) { + return new SubQueryAlias(ur.location(), subQueryAlias, ur.alias()); + } + return subQueryAlias; + } + return ur; + } + // inlined queries (SELECT 1 + 2) are already resolved + else if (p instanceof LocalRelation) { + return p; + } + + return p.transformExpressionsDown(e -> { + if (e instanceof SubQueryExpression) { + SubQueryExpression sq = (SubQueryExpression) e; + return sq.withQuery(substituteCTE(sq.query(), subQueries)); + } + return e; + }); + } + + @Override + protected boolean skipResolved() { + return false; + } + } + + private class ResolveTable extends AnalyzeRule { + @Override + protected LogicalPlan rule(UnresolvedRelation plan) { + TableIdentifier table = plan.table(); + if (indexResolution.isValid() == false) { + return plan.unresolvedMessage().equals(indexResolution.toString()) ? plan : new UnresolvedRelation(plan.location(), + plan.table(), plan.alias(), indexResolution.toString()); + } + assert indexResolution.matches(table.index()); + LogicalPlan logicalPlan = new EsRelation(plan.location(), indexResolution.get()); + SubQueryAlias sa = new SubQueryAlias(plan.location(), logicalPlan, table.index()); + + if (plan.alias() != null) { + sa = new SubQueryAlias(plan.location(), sa, plan.alias()); + } + + return sa; + } + } + + private static class ResolveRefs extends AnalyzeRule { + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + // if the children are not resolved, there's no way the node can be resolved + if (!plan.childrenResolved()) { + return plan; + } + + // okay, there's a chance so let's get started + + if (plan instanceof Project) { + Project p = (Project) plan; + if (hasStar(p.projections())) { + return new Project(p.location(), p.child(), expandProjections(p.projections(), p.child())); + } + } + else if (plan instanceof Aggregate) { + Aggregate a = (Aggregate) plan; + if (hasStar(a.aggregates())) { + return new Aggregate(a.location(), a.child(), a.groupings(), + expandProjections(a.aggregates(), a.child())); + } + // if the grouping is unresolved but the aggs are, use the latter to resolve the former + // solves the case of queries declaring an alias in SELECT and referring to it in GROUP BY + if (!a.expressionsResolved() && Resolvables.resolved(a.aggregates())) { + List groupings = a.groupings(); + List newGroupings = new ArrayList<>(); + AttributeMap resolved = Expressions.asAttributeMap(a.aggregates()); + boolean changed = false; + for (Expression grouping : groupings) { + if (grouping instanceof UnresolvedAttribute) { + Attribute maybeResolved = resolveAgainstList((UnresolvedAttribute) grouping, resolved.keySet()); + if (maybeResolved != null) { + changed = true; + // use the matched expression (not its attribute) + grouping = resolved.get(maybeResolved); + } + } + newGroupings.add(grouping); + } + + return changed ? new Aggregate(a.location(), a.child(), newGroupings, a.aggregates()) : a; + } + } + + else if (plan instanceof Join) { + Join j = (Join) plan; + if (!j.duplicatesResolved()) { + LogicalPlan deduped = dedupRight(j.left(), j.right()); + return new Join(j.location(), j.left(), deduped, j.type(), j.condition()); + } + } + // try resolving the order expression (the children are resolved as this point) + else if (plan instanceof OrderBy) { + OrderBy o = (OrderBy) plan; + if (!o.resolved()) { + List resolvedOrder = o.order().stream() + .map(or -> resolveExpression(or, o.child())) + .collect(toList()); + return new OrderBy(o.location(), o.child(), resolvedOrder); + } + } + + if (log.isTraceEnabled()) { + log.trace("Attempting to resolve {}", plan.nodeString()); + } + + return plan.transformExpressionsUp(e -> { + if (e instanceof UnresolvedAttribute) { + UnresolvedAttribute u = (UnresolvedAttribute) e; + List childrenOutput = new ArrayList<>(); + for (LogicalPlan child : plan.children()) { + childrenOutput.addAll(child.output()); + } + NamedExpression named = resolveAgainstList(u, childrenOutput); + // if resolved, return it; otherwise keep it in place to be resolved later + if (named != null) { + // if it's a object/compound type, keep it unresolved with a nice error message + if (named instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) named; + if (DataTypes.isUnsupported(fa.dataType())) { + UnsupportedEsField unsupportedField = (UnsupportedEsField) fa.field(); + named = u.withUnresolvedMessage( + "Cannot use field [" + fa.name() + "] type [" + unsupportedField.getOriginalType() + + "] as is unsupported"); + } + else if (!fa.dataType().isPrimitive()) { + named = u.withUnresolvedMessage( + "Cannot use field [" + fa.name() + "] type [" + fa.dataType().esType + "] only its subfields"); + } + } + + if (log.isTraceEnabled()) { + log.trace("Resolved {} to {}", u, named); + } + return named; + } + } + //TODO: likely have to expand * inside functions as well + return e; + }); + } + + private List expandProjections(List projections, LogicalPlan child) { + List result = new ArrayList<>(); + + List output = child.output(); + for (NamedExpression ne : projections) { + if (ne instanceof UnresolvedStar) { + result.addAll(expandStar((UnresolvedStar) ne, output)); + } else if (ne instanceof UnresolvedAlias) { + UnresolvedAlias ua = (UnresolvedAlias) ne; + if (ua.child() instanceof UnresolvedStar) { + result.addAll(expandStar((UnresolvedStar) ua.child(), output)); + } + } else { + result.add(ne); + } + } + + return result; + } + + private List expandStar(UnresolvedStar us, List output) { + List expanded = new ArrayList<>(); + + // a qualifier is specified - since this is a star, it should be a CompoundDataType + if (us.qualifier() != null) { + // resolve the so-called qualifier first + // since this is an unresolved start we don't know whether it's a path or an actual qualifier + Attribute q = resolveAgainstList(us.qualifier(), output); + + // now use the resolved 'qualifier' to match + for (Attribute attr : output) { + // filter the attributes that match based on their path + if (attr instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) attr; + if (DataTypes.isUnsupported(fa.dataType())) { + continue; + } + if (q.qualifier() != null) { + if (Objects.equals(q.qualifiedName(), fa.qualifiedPath())) { + expanded.add(fa.withLocation(attr.location())); + } + } else { + // use the path only to match non-compound types + if (Objects.equals(q.name(), fa.path())) { + expanded.add(fa.withLocation(attr.location())); + } + } + } + } + } else { + // add only primitives + // but filter out multi fields (allow only the top-level value) + Set seenMultiFields = new LinkedHashSet<>(); + + for (Attribute a : output) { + if (!DataTypes.isUnsupported(a.dataType()) && a.dataType().isPrimitive()) { + if (a instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) a; + // skip nested fields and seen multi-fields + if (!fa.isNested() && !seenMultiFields.contains(fa.parent())) { + expanded.add(a); + seenMultiFields.add(a); + } + } else { + expanded.add(a); + } + } + } + } + + return expanded; + } + + // generate a new (right) logical plan with different IDs for all conflicting attributes + private LogicalPlan dedupRight(LogicalPlan left, LogicalPlan right) { + AttributeSet conflicting = left.outputSet().intersect(right.outputSet()); + + if (log.isTraceEnabled()) { + log.trace("Trying to resolve conflicts " + conflicting + " between left " + left.nodeString() + + " and right " + right.nodeString()); + } + + throw new UnsupportedOperationException("don't know how to resolve conficting IDs yet"); + } + } + + // Allow ordinal positioning in order/sort by (quite useful when dealing with aggs) + // Note that ordering starts at 1 + private static class ResolveOrdinalInOrderByAndGroupBy extends AnalyzeRule { + + @Override + protected boolean skipResolved() { + return false; + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (!plan.childrenResolved()) { + return plan; + } + if (plan instanceof OrderBy) { + OrderBy orderBy = (OrderBy) plan; + boolean changed = false; + + List newOrder = new ArrayList<>(orderBy.order().size()); + List ordinalReference = orderBy.child().output(); + int max = ordinalReference.size(); + + for (Order order : orderBy.order()) { + Integer ordinal = findOrdinal(order.child()); + if (ordinal != null) { + changed = true; + if (ordinal > 0 && ordinal <= max) { + newOrder.add(new Order(order.location(), orderBy.child().output().get(ordinal - 1), order.direction())); + } + else { + throw new AnalysisException(order, "Invalid %d specified in OrderBy (valid range is [1, %d])", ordinal, max); + } + } + else { + newOrder.add(order); + } + } + + return changed ? new OrderBy(orderBy.location(), orderBy.child(), newOrder) : orderBy; + } + + if (plan instanceof Aggregate) { + Aggregate agg = (Aggregate) plan; + + if (!Resolvables.resolved(agg.aggregates())) { + return agg; + } + + boolean changed = false; + List newGroupings = new ArrayList<>(agg.groupings().size()); + List aggregates = agg.aggregates(); + int max = aggregates.size(); + + for (Expression exp : agg.groupings()) { + Integer ordinal = findOrdinal(exp); + if (ordinal != null) { + changed = true; + if (ordinal > 0 && ordinal <= max) { + NamedExpression reference = aggregates.get(ordinal - 1); + if (containsAggregate(reference)) { + throw new AnalysisException(exp, "Group ordinal " + ordinal + " refers to an aggregate function " + + reference.nodeName() + " which is not compatible/allowed with GROUP BY"); + } + newGroupings.add(reference); + } + else { + throw new AnalysisException(exp, "Invalid ordinal " + ordinal + + " specified in Aggregate (valid range is [1, " + max + "])"); + } + } + else { + newGroupings.add(exp); + } + } + + return changed ? new Aggregate(agg.location(), agg.child(), newGroupings, aggregates) : agg; + } + + return plan; + } + + private Integer findOrdinal(Expression expression) { + if (expression instanceof Literal) { + Literal l = (Literal) expression; + if (l.dataType().isInteger) { + Object v = l.value(); + if (v instanceof Number) { + return Integer.valueOf(((Number) v).intValue()); + } + } + } + return null; + } + } + + // It is valid to filter (including HAVING) or sort by attributes not present in the SELECT clause. + // This rule pushed down the attributes for them to be resolved then projects them away. + // As such this rule is an extended version of ResolveRefs + private static class ResolveMissingRefs extends AnalyzeRule { + + private static class AggGroupingFailure { + final List expectedGrouping; + + private AggGroupingFailure(List expectedGrouping) { + this.expectedGrouping = expectedGrouping; + } + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + + if (plan instanceof OrderBy && !plan.resolved() && plan.childrenResolved()) { + OrderBy o = (OrderBy) plan; + List maybeResolved = o.order().stream() + .map(or -> tryResolveExpression(or, o.child())) + .collect(toList()); + + AttributeSet resolvedRefs = Expressions.references(maybeResolved.stream() + .filter(Expression::resolved) + .collect(toList())); + + + AttributeSet missing = resolvedRefs.substract(o.child().outputSet()); + + if (!missing.isEmpty()) { + // Add missing attributes but project them away afterwards + List failedAttrs = new ArrayList<>(); + LogicalPlan newChild = propagateMissing(o.child(), missing, failedAttrs); + + // resolution failed and the failed expressions might contain resolution information so copy it over + if (!failedAttrs.isEmpty()) { + List newOrders = new ArrayList<>(); + // transform the orders with the failed information + for (Order order : o.order()) { + Order transformed = (Order) order.transformUp(ua -> resolveMetadataToMessage(ua, failedAttrs, "order"), + UnresolvedAttribute.class); + newOrders.add(order.equals(transformed) ? order : transformed); + } + + return o.order().equals(newOrders) ? o : new OrderBy(o.location(), o.child(), newOrders); + } + + // everything worked + return new Project(o.location(), new OrderBy(o.location(), newChild, maybeResolved), o.child().output()); + } + + if (!maybeResolved.equals(o.order())) { + return new OrderBy(o.location(), o.child(), maybeResolved); + } + } + + if (plan instanceof Filter && !plan.resolved() && plan.childrenResolved()) { + Filter f = (Filter) plan; + Expression maybeResolved = tryResolveExpression(f.condition(), f.child()); + AttributeSet resolvedRefs = new AttributeSet(maybeResolved.references().stream() + .filter(Expression::resolved) + .collect(toList())); + + AttributeSet missing = resolvedRefs.substract(f.child().outputSet()); + + if (!missing.isEmpty()) { + // Again, add missing attributes and project them away + List failedAttrs = new ArrayList<>(); + LogicalPlan newChild = propagateMissing(f.child(), missing, failedAttrs); + + // resolution failed and the failed expressions might contain resolution information so copy it over + if (!failedAttrs.isEmpty()) { + // transform the orders with the failed information + Expression transformed = f.condition().transformUp(ua -> resolveMetadataToMessage(ua, failedAttrs, "filter"), + UnresolvedAttribute.class); + + return f.condition().equals(transformed) ? f : new Filter(f.location(), f.child(), transformed); + } + + return new Project(f.location(), new Filter(f.location(), newChild, maybeResolved), f.child().output()); + } + + if (!maybeResolved.equals(f.condition())) { + return new Filter(f.location(), f.child(), maybeResolved); + } + } + + return plan; + } + + static E tryResolveExpression(E exp, LogicalPlan plan) { + E resolved = resolveExpression(exp, plan); + if (!resolved.resolved()) { + // look at unary trees but ignore subqueries + if (plan.children().size() == 1 && !(plan instanceof SubQueryAlias)) { + return tryResolveExpression(resolved, plan.children().get(0)); + } + } + return resolved; + } + + + private static LogicalPlan propagateMissing(LogicalPlan plan, AttributeSet missing, List failed) { + // no more attributes, bail out + if (missing.isEmpty()) { + return plan; + } + + if (plan instanceof Project) { + Project p = (Project) plan; + AttributeSet diff = missing.substract(p.child().outputSet()); + return new Project(p.location(), propagateMissing(p.child(), diff, failed), combine(p.projections(), missing)); + } + + if (plan instanceof Aggregate) { + Aggregate a = (Aggregate) plan; + // missing attributes can only be grouping expressions + for (Attribute m : missing) { + // but we don't can't add an agg if the group is missing + if (!Expressions.anyMatch(a.groupings(), m::semanticEquals)) { + if (m instanceof Attribute) { + // pass failure information to help the verifier + m = new UnresolvedAttribute(m.location(), m.name(), m.qualifier(), null, null, + new AggGroupingFailure(Expressions.names(a.groupings()))); + } + failed.add(m); + } + } + // propagation failed, return original plan + if (!failed.isEmpty()) { + return plan; + } + return new Aggregate(a.location(), a.child(), a.groupings(), combine(a.aggregates(), missing)); + } + + // LeafPlans are tables and BinaryPlans are joins so pushing can only happen on unary + if (plan instanceof UnaryPlan) { + return plan.replaceChildren(singletonList(propagateMissing(((UnaryPlan) plan).child(), missing, failed))); + } + + failed.addAll(missing); + return plan; + } + + private static UnresolvedAttribute resolveMetadataToMessage(UnresolvedAttribute ua, List attrs, String actionName) { + for (Attribute attr : attrs) { + if (ua.resolutionMetadata() == null && attr.name().equals(ua.name())) { + if (attr instanceof UnresolvedAttribute) { + UnresolvedAttribute fua = (UnresolvedAttribute) attr; + Object metadata = fua.resolutionMetadata(); + if (metadata instanceof AggGroupingFailure) { + List names = ((AggGroupingFailure) metadata).expectedGrouping; + return ua.withUnresolvedMessage( + "Cannot " + actionName + " by non-grouped column [" + ua.qualifiedName() + "], expected " + names); + } + } + } + } + return ua; + }; + } + + // to avoid creating duplicate functions + // this rule does two iterations + // 1. collect all functions + // 2. search unresolved functions and first try resolving them from already 'seen' functions + private class ResolveFunctions extends AnalyzeRule { + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + Map> seen = new LinkedHashMap<>(); + // collect (and replace duplicates) + LogicalPlan p = plan.transformExpressionsUp(e -> collectResolvedAndReplace(e, seen)); + // resolve based on seen + return resolve(p, seen); + } + + private Expression collectResolvedAndReplace(Expression e, Map> seen) { + if (e instanceof Function && e.resolved()) { + Function f = (Function) e; + String fName = f.functionName(); + // the function is resolved and its name normalized already + List list = getList(seen, fName); + for (Function seenFunction : list) { + if (seenFunction != f && f.arguments().equals(seenFunction.arguments())) { + return seenFunction; + } + } + list.add(f); + } + + return e; + } + + protected LogicalPlan resolve(LogicalPlan plan, Map> seen) { + return plan.transformExpressionsUp(e -> { + if (e instanceof UnresolvedFunction) { + UnresolvedFunction uf = (UnresolvedFunction) e; + + if (uf.analyzed()) { + return uf; + } + + String name = uf.name(); + + if (hasStar(uf.arguments())) { + uf = uf.preprocessStar(); + if (uf.analyzed()) { + return uf; + } + } + + if (!uf.childrenResolved()) { + return uf; + } + + String normalizedName = functionRegistry.concreteFunctionName(name); + + List list = getList(seen, normalizedName); + // first try to resolve from seen functions + if (!list.isEmpty()) { + for (Function seenFunction : list) { + if (uf.arguments().equals(seenFunction.arguments())) { + return seenFunction; + } + } + } + + // not seen before, use the registry + if (!functionRegistry.functionExists(name)) { + return uf.missing(normalizedName, functionRegistry.listFunctions()); + } + // TODO: look into Generator for significant terms, etc.. + FunctionDefinition def = functionRegistry.resolveFunction(normalizedName); + Function f = uf.buildResolved(timeZone, def); + + list.add(f); + return f; + } + return e; + }); + } + + private List getList(Map> seen, String name) { + List list = seen.get(name); + if (list == null) { + list = new ArrayList<>(); + seen.put(name, list); + } + return list; + } + } + + private static class ResolveAliases extends AnalyzeRule { + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (plan instanceof Project) { + Project p = (Project) plan; + if (p.childrenResolved() && hasUnresolvedAliases(p.projections())) { + return new Project(p.location(), p.child(), assignAliases(p.projections())); + } + return p; + } + if (plan instanceof Aggregate) { + Aggregate a = (Aggregate) plan; + if (a.childrenResolved() && hasUnresolvedAliases(a.aggregates())) { + return new Aggregate(a.location(), a.child(), a.groupings(), assignAliases(a.aggregates())); + } + return a; + } + + return plan; + } + + private boolean hasUnresolvedAliases(List expressions) { + return expressions != null && expressions.stream().anyMatch(e -> e instanceof UnresolvedAlias); + } + + private List assignAliases(List exprs) { + List newExpr = new ArrayList<>(exprs.size()); + for (int i = 0; i < exprs.size(); i++) { + NamedExpression expr = exprs.get(i); + NamedExpression transformed = (NamedExpression) expr.transformUp(ua -> { + Expression child = ua.child(); + if (child instanceof NamedExpression) { + return child; + } + if (!child.resolved()) { + return ua; + } + if (child instanceof Cast) { + Cast c = (Cast) child; + if (c.field() instanceof NamedExpression) { + return new Alias(c.location(), ((NamedExpression) c.field()).name(), c); + } + } + //TODO: maybe add something closer to SQL + return new Alias(child.location(), child.toString(), child); + }, UnresolvedAlias.class); + newExpr.add(expr.equals(transformed) ? expr : transformed); + } + return newExpr; + } + } + + + // + // Replace a project with aggregation into an aggregation + // + private static class ProjectedAggregations extends AnalyzeRule { + + @Override + protected LogicalPlan rule(Project p) { + if (containsAggregate(p.projections())) { + return new Aggregate(p.location(), p.child(), emptyList(), p.projections()); + } + return p; + } + }; + + // + // Handle aggs in HAVING. To help folding any aggs not found in Aggregation + // will be pushed down to the Aggregate and then projected. This also simplifies the Verifier's job. + // + private class ResolveAggsInHaving extends AnalyzeRule { + + @Override + protected boolean skipResolved() { + return false; + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + // HAVING = Filter followed by an Agg + if (plan instanceof Filter) { + Filter f = (Filter) plan; + if (f.child() instanceof Aggregate && f.child().resolved()) { + Aggregate agg = (Aggregate) f.child(); + + Set missing = null; + Expression condition = f.condition(); + + // the condition might contain an agg (AVG(salary)) that could have been resolved + // (salary cannot be pushed down to Aggregate since there's no grouping and thus the function wasn't resolved either) + + // so try resolving the condition in one go through a 'dummy' aggregate + if (!condition.resolved()) { + // that's why try to resolve the condition + Aggregate tryResolvingCondition = new Aggregate(agg.location(), agg.child(), agg.groupings(), + singletonList(new Alias(f.location(), ".having", condition))); + + LogicalPlan conditionResolved = analyze(tryResolvingCondition, false); + + // if it got resolved + if (conditionResolved.resolved()) { + // replace the condition with the resolved one + condition = ((Alias) ((Aggregate) conditionResolved).aggregates().get(0)).child(); + } else { + // else bail out + return plan; + } + } + + missing = findMissingAggregate(agg, condition); + + if (!missing.isEmpty()) { + Aggregate newAgg = new Aggregate(agg.location(), agg.child(), agg.groupings(), + combine(agg.aggregates(), missing)); + Filter newFilter = new Filter(f.location(), newAgg, condition); + // preserve old output + return new Project(f.location(), newFilter, f.output()); + } + } + return plan; + } + + return plan; + } + + private Set findMissingAggregate(Aggregate target, Expression from) { + Set missing = new LinkedHashSet<>(); + + for (Expression filterAgg : from.collect(Functions::isAggregate)) { + if (!Expressions.anyMatch(target.aggregates(), + a -> { + Attribute attr = Expressions.attribute(a); + return attr != null && attr.semanticEquals(Expressions.attribute(filterAgg)); + })) { + missing.add(Expressions.wrapAsNamed(filterAgg)); + } + } + + return missing; + } + } + + private class PruneDuplicateFunctions extends AnalyzeRule { + + @Override + protected boolean skipResolved() { + return false; + } + + @Override + public LogicalPlan rule(LogicalPlan plan) { + List seen = new ArrayList<>(); + LogicalPlan p = plan.transformExpressionsUp(e -> rule(e, seen)); + return p; + } + + private Expression rule(Expression e, List seen) { + if (e instanceof Function) { + Function f = (Function) e; + for (Function seenFunction : seen) { + if (seenFunction != f && functionsEquals(f, seenFunction)) { + return seenFunction; + } + } + seen.add(f); + } + + return e; + } + + private boolean functionsEquals(Function f, Function seenFunction) { + return f.name().equals(seenFunction.name()) && f.arguments().equals(seenFunction.arguments()); + } + } + + private class ImplicitCasting extends AnalyzeRule { + + @Override + protected boolean skipResolved() { + return false; + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + return plan.transformExpressionsDown(this::implicitCast); + } + + private Expression implicitCast(Expression e) { + if (!e.childrenResolved()) { + return e; + } + + Expression left = null, right = null; + + // BinaryOperations are ignored as they are pushed down to ES + // and casting (and thus Aliasing when folding) gets in the way + + if (e instanceof ArithmeticFunction) { + ArithmeticFunction f = (ArithmeticFunction) e; + left = f.left(); + right = f.right(); + } + + if (left != null) { + DataType l = left.dataType(); + DataType r = right.dataType(); + if (l != r) { + DataType common = DataTypeConversion.commonType(l, r); + if (common == null) { + return e; + } + left = l == common ? left : new Cast(left.location(), left, common); + right = r == common ? right : new Cast(right.location(), right, common); + return e.replaceChildren(Arrays.asList(left, right)); + } + } + + return e; + } + } + + abstract static class AnalyzeRule extends Rule { + + // transformUp (post-order) - that is first children and then the node + // but with a twist; only if the tree is not resolved or analyzed + @Override + public final LogicalPlan apply(LogicalPlan plan) { + return plan.transformUp(t -> t.analyzed() || skipResolved() && t.resolved() ? t : rule(t), typeToken()); + } + + @Override + protected abstract LogicalPlan rule(SubPlan plan); + + protected boolean skipResolved() { + return true; + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzer.java new file mode 100644 index 0000000000000..68b73cf3a019f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzer.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.UnresolvedRelation; + +import java.util.ArrayList; +import java.util.List; + +import static java.util.Collections.emptyList; + +// Since the pre-analyzer only inspect (and does NOT transform) the tree +// it is not built as a rule executor. +// Further more it applies 'the rules' only once and needs to return some +// state back. +public class PreAnalyzer { + + public static class PreAnalysis { + public static final PreAnalysis EMPTY = new PreAnalysis(emptyList()); + + public final List indices; + + PreAnalysis(List indices) { + this.indices = indices; + } + } + + public PreAnalysis preAnalyze(LogicalPlan plan) { + if (plan.analyzed()) { + return PreAnalysis.EMPTY; + } + + return doPreAnalyze(plan); + } + + private PreAnalysis doPreAnalyze(LogicalPlan plan) { + List indices = new ArrayList<>(); + + plan.forEachUp(p -> indices.add(p.table()), UnresolvedRelation.class); + + // mark plan as preAnalyzed (if it were marked, there would be no analysis) + plan.forEachUp(LogicalPlan::setPreAnalyzed); + + return new PreAnalysis(indices); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerificationException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerificationException.java new file mode 100644 index 0000000000000..86af4815894b8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerificationException.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import java.util.Collection; +import java.util.stream.Collectors; + +import org.elasticsearch.xpack.sql.analysis.AnalysisException; +import org.elasticsearch.xpack.sql.analysis.analyzer.Verifier.Failure; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.util.StringUtils; + + +public class VerificationException extends AnalysisException { + + private final Collection failures; + + protected VerificationException(Collection sources) { + super(null, StringUtils.EMPTY); + failures = sources; + } + + @Override + public String getMessage() { + return failures.stream() + .map(f -> { + Location l = f.source().location(); + return "line " + l.getLineNumber() + ":" + l.getColumnNumber() + ": " + f.message(); + }) + .collect(Collectors.joining(StringUtils.NEW_LINE, "Found " + failures.size() + " problem(s)\n", StringUtils.EMPTY)); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java new file mode 100644 index 0000000000000..f5147b84468b7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -0,0 +1,421 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import org.elasticsearch.xpack.sql.capabilities.Unresolvable; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.AttributeSet; +import org.elasticsearch.xpack.sql.expression.Exists; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.FunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.Functions; +import org.elasticsearch.xpack.sql.expression.function.Score; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.plan.logical.Aggregate; +import org.elasticsearch.xpack.sql.plan.logical.Distinct; +import org.elasticsearch.xpack.sql.plan.logical.Filter; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Consumer; + +import static java.lang.String.format; + +abstract class Verifier { + + static class Failure { + private final Node source; + private final String message; + + Failure(Node source, String message) { + this.source = source; + this.message = message; + } + + Node source() { + return source; + } + + String message() { + return message; + } + + @Override + public int hashCode() { + return source.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Verifier.Failure other = (Verifier.Failure) obj; + return Objects.equals(source, other.source); + } + + @Override + public String toString() { + return message; + } + } + + private static Failure fail(Node source, String message, Object... args) { + return new Failure(source, format(Locale.ROOT, message, args)); + } + + static Collection verify(LogicalPlan plan) { + Set failures = new LinkedHashSet<>(); + + // start bottom-up + plan.forEachUp(p -> { + + if (p.analyzed()) { + return; + } + + // if the children are unresolved, this node will also so counting it will only add noise + if (!p.childrenResolved()) { + return; + } + + Set localFailures = new LinkedHashSet<>(); + + // + // First handle usual suspects + // + + if (p instanceof Unresolvable) { + localFailures.add(fail(p, ((Unresolvable) p).unresolvedMessage())); + } else if (p instanceof Distinct) { + localFailures.add(fail(p, "SELECT DISTINCT is not yet supported")); + } else { + // then take a look at the expressions + p.forEachExpressions(e -> { + // everything is fine, skip expression + if (e.resolved()) { + return; + } + + e.forEachUp(ae -> { + // we're only interested in the children + if (!ae.childrenResolved()) { + return; + } + // again the usual suspects + if (ae instanceof Unresolvable) { + // handle Attributes different to provide more context + if (ae instanceof UnresolvedAttribute) { + UnresolvedAttribute ua = (UnresolvedAttribute) ae; + // only work out the synonyms for raw unresolved attributes + if (!ua.customMessage()) { + boolean useQualifier = ua.qualifier() != null; + List potentialMatches = new ArrayList<>(); + for (Attribute a : p.intputSet()) { + String nameCandidate = useQualifier ? a.qualifiedName() : a.name(); + // add only primitives (object types would only result in another error) + if (!(a.dataType() == DataType.UNSUPPORTED) && a.dataType().isPrimitive()) { + potentialMatches.add(nameCandidate); + } + } + + List matches = StringUtils.findSimilar(ua.qualifiedName(), potentialMatches); + if (!matches.isEmpty()) { + ae = ua.withUnresolvedMessage(UnresolvedAttribute.errorMessage(ua.qualifiedName(), matches)); + } + } + } + + localFailures.add(fail(ae, ((Unresolvable) ae).unresolvedMessage())); + return; + } + // type resolution + if (ae.typeResolved().unresolved()) { + localFailures.add(fail(ae, ae.typeResolved().message())); + } else if (ae instanceof Exists) { + localFailures.add(fail(ae, "EXISTS is not yet supported")); + } + }); + }); + } + failures.addAll(localFailures); + }); + + // Concrete verifications + + // if there are no (major) unresolved failures, do more in-depth analysis + + if (failures.isEmpty()) { + // collect Function to better reason about encountered attributes + Map resolvedFunctions = Functions.collectFunctions(plan); + + // for filtering out duplicated errors + final Set groupingFailures = new LinkedHashSet<>(); + + plan.forEachDown(p -> { + if (p.analyzed()) { + return; + } + + // if the children are unresolved, so will this node; counting it will only add noise + if (!p.childrenResolved()) { + return; + } + + Set localFailures = new LinkedHashSet<>(); + + if (!groupingFailures.contains(p)) { + checkGroupBy(p, localFailures, resolvedFunctions, groupingFailures); + } + + checkForScoreInsideFunctions(p, localFailures); + + checkNestedUsedInGroupByOrHaving(p, localFailures); + + // everything checks out + // mark the plan as analyzed + if (localFailures.isEmpty()) { + p.setAnalyzed(); + } + + failures.addAll(localFailures); + }); + } + + return failures; + } + + /** + * Check validity of Aggregate/GroupBy. + * This rule is needed for two reasons: + * 1. a user might specify an invalid aggregate (SELECT foo GROUP BY bar) + * 2. the order/having might contain a non-grouped attribute. This is typically + * caught by the Analyzer however if wrapped in a function (ABS()) it gets resolved + * (because the expression gets resolved little by little without being pushed down, + * without the Analyzer modifying anything. + */ + private static boolean checkGroupBy(LogicalPlan p, Set localFailures, + Map resolvedFunctions, Set groupingFailures) { + return checkGroupByAgg(p, localFailures, groupingFailures, resolvedFunctions) + && checkGroupByOrder(p, localFailures, groupingFailures, resolvedFunctions) + && checkGroupByHaving(p, localFailures, groupingFailures, resolvedFunctions); + } + + // check whether an orderBy failed + private static boolean checkGroupByOrder(LogicalPlan p, Set localFailures, + Set groupingFailures, Map functions) { + if (p instanceof OrderBy) { + OrderBy o = (OrderBy) p; + if (o.child() instanceof Aggregate) { + Aggregate a = (Aggregate) o.child(); + + Map> missing = new LinkedHashMap<>(); + o.order().forEach(oe -> oe.collectFirstChildren(c -> checkGroupMatch(c, oe, a.groupings(), missing, functions))); + + if (!missing.isEmpty()) { + String plural = missing.size() > 1 ? "s" : StringUtils.EMPTY; + // get the location of the first missing expression as the order by might be on a different line + localFailures.add( + fail(missing.values().iterator().next(), "Cannot order by non-grouped column" + plural + " %s, expected %s", + Expressions.names(missing.keySet()), + Expressions.names(a.groupings()))); + groupingFailures.add(a); + return false; + } + } + } + return true; + } + + + private static boolean checkGroupByHaving(LogicalPlan p, Set localFailures, + Set groupingFailures, Map functions) { + if (p instanceof Filter) { + Filter f = (Filter) p; + if (f.child() instanceof Aggregate) { + Aggregate a = (Aggregate) f.child(); + + Map> missing = new LinkedHashMap<>(); + Expression condition = f.condition(); + condition.collectFirstChildren(c -> checkGroupMatch(c, condition, a.groupings(), missing, functions)); + + if (!missing.isEmpty()) { + String plural = missing.size() > 1 ? "s" : StringUtils.EMPTY; + localFailures.add(fail(condition, "Cannot filter by non-grouped column" + plural + " %s, expected %s", + Expressions.names(missing.keySet()), + Expressions.names(a.groupings()))); + groupingFailures.add(a); + return false; + } + } + } + return true; + } + + + // check whether plain columns specified in an agg are mentioned in the group-by + private static boolean checkGroupByAgg(LogicalPlan p, Set localFailures, + Set groupingFailures, Map functions) { + if (p instanceof Aggregate) { + Aggregate a = (Aggregate) p; + + // The grouping can not be an aggregate function + a.groupings().forEach(e -> e.forEachUp(c -> { + if (Functions.isAggregate(c)) { + localFailures.add(fail(c, "Cannot use an aggregate [" + c.nodeName().toUpperCase(Locale.ROOT) + "] for grouping")); + } + if (c instanceof Score) { + localFailures.add(fail(c, "Cannot use [SCORE()] for grouping")); + } + })); + + if (!localFailures.isEmpty()) { + return false; + } + + // The agg can be: + // 1. plain column - in which case, there should be an equivalent in groupings + // 2. aggregate over non-grouped column + // 3. scalar function on top of 1 and/or 2. the function needs unfolding to make sure + // the 'source' is valid. + + // Note that grouping can be done by a function (GROUP BY YEAR(date)) which means date + // cannot be used as a plain column, only YEAR(date) or aggs(?) on top of it + + Map> missing = new LinkedHashMap<>(); + a.aggregates().forEach(ne -> + ne.collectFirstChildren(c -> checkGroupMatch(c, ne, a.groupings(), missing, functions))); + + if (!missing.isEmpty()) { + String plural = missing.size() > 1 ? "s" : StringUtils.EMPTY; + localFailures.add(fail(missing.values().iterator().next(), "Cannot use non-grouped column" + plural + " %s, expected %s", + Expressions.names(missing.keySet()), + Expressions.names(a.groupings()))); + return false; + } + } + + return true; + } + + private static boolean checkGroupMatch(Expression e, Node source, List groupings, + Map> missing, Map functions) { + // resolve FunctionAttribute to backing functions + if (e instanceof FunctionAttribute) { + FunctionAttribute fa = (FunctionAttribute) e; + Function function = functions.get(fa.functionId()); + // TODO: this should be handled by a different rule + if (function == null) { + return false; + } + e = function; + } + + // scalar functions can be a binary tree + // first test the function against the grouping + // and if that fails, start unpacking hoping to find matches + if (e instanceof ScalarFunction) { + ScalarFunction sf = (ScalarFunction) e; + + // found group for the expression + if (Expressions.anyMatch(groupings, e::semanticEquals)) { + return true; + } + + // unwrap function to find the base + for (Expression arg : sf.arguments()) { + arg.collectFirstChildren(c -> checkGroupMatch(c, source, groupings, missing, functions)); + } + + return true; + } else if (e instanceof Score) { + // Score can't be an aggregate function + missing.put(e, source); + return true; + } + + // skip literals / foldable + if (e.foldable()) { + return true; + } + // skip aggs (allowed to refer to non-group columns) + // TODO: need to check whether it's possible to agg on a field used inside a scalar for grouping + if (Functions.isAggregate(e)) { + return true; + } + // left without leaves which have to match; if not there's a failure + + final Expression exp = e; + if (e.children().isEmpty()) { + if (!Expressions.anyMatch(groupings, c -> exp.semanticEquals(exp instanceof Attribute ? Expressions.attribute(c) : c))) { + missing.put(e, source); + } + return true; + } + return false; + } + + private static void checkForScoreInsideFunctions(LogicalPlan p, Set localFailures) { + // Make sure that SCORE is only used in "top level" functions + p.forEachExpressions(e -> + e.forEachUp((Function f) -> + f.arguments().stream() + .filter(exp -> exp.anyMatch(Score.class::isInstance)) + .forEach(exp -> localFailures.add(fail(exp, "[SCORE()] cannot be an argument to a function"))), + Function.class)); + } + + private static void checkNestedUsedInGroupByOrHaving(LogicalPlan p, Set localFailures) { + List nested = new ArrayList<>(); + Consumer match = fa -> { + if (fa.isNested()) { + nested.add(fa); + } + }; + + // nested fields shouldn't be used in aggregates or having (yet) + p.forEachDown(a -> a.groupings().forEach(agg -> agg.forEachUp(match, FieldAttribute.class)), Aggregate.class); + + if (!nested.isEmpty()) { + localFailures.add( + fail(nested.get(0), "Grouping isn't (yet) compatible with nested fields " + new AttributeSet(nested).names())); + nested.clear(); + } + + // check in having + p.forEachDown(f -> { + if (f.child() instanceof Aggregate) { + f.condition().forEachUp(match, FieldAttribute.class); + } + }, Filter.class); + + if (!nested.isEmpty()) { + localFailures.add( + fail(nested.get(0), "HAVING isn't (yet) compatible with nested fields " + new AttributeSet(nested).names())); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/EsIndex.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/EsIndex.java new file mode 100644 index 0000000000000..790bb67c22cf6 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/EsIndex.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.index; + +import org.elasticsearch.xpack.sql.type.EsField; + +import java.util.Map; + +public class EsIndex { + + private final String name; + private final Map mapping; + + public EsIndex(String name, Map mapping) { + assert name != null; + assert mapping != null; + this.name = name; + this.mapping = mapping; + } + + public String name() { + return name; + } + + public Map mapping() { + return mapping; + } + + @Override + public String toString() { + return name; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolution.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolution.java new file mode 100644 index 0000000000000..4e796ecd8957a --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolution.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.index; + +import org.elasticsearch.common.Nullable; + +import java.util.Objects; + +public final class IndexResolution { + public static IndexResolution valid(EsIndex index) { + Objects.requireNonNull(index, "index must not be null if it was found"); + return new IndexResolution(index, null); + } + public static IndexResolution invalid(String invalid) { + Objects.requireNonNull(invalid, "invalid must not be null to signal that the index is invalid"); + return new IndexResolution(null, invalid); + } + public static IndexResolution notFound(String name) { + Objects.requireNonNull(name, "name must not be null"); + return invalid("Unknown index [" + name + "]"); + } + + private final EsIndex index; + @Nullable + private final String invalid; + + private IndexResolution(EsIndex index, @Nullable String invalid) { + this.index = index; + this.invalid = invalid; + } + + public boolean matches(String index) { + return isValid() && this.index.name().equals(index); + } + + /** + * Get the {@linkplain EsIndex} + * @throws MappingException if the index is invalid for use with sql + */ + public EsIndex get() { + if (invalid != null) { + throw new MappingException(invalid); + } + return index; + } + + /** + * Is the index valid for use with sql? Returns {@code false} if the + * index wasn't found. + */ + public boolean isValid() { + return invalid == null; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + IndexResolution other = (IndexResolution) obj; + return Objects.equals(index, other.index) + && Objects.equals(invalid, other.invalid); + } + + @Override + public int hashCode() { + return Objects.hash(index, invalid); + } + + @Override + public String toString() { + return invalid != null ? invalid : index.name(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java new file mode 100644 index 0000000000000..1800c170b7cff --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -0,0 +1,347 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.index; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.type.Types; +import org.elasticsearch.xpack.sql.util.CollectionUtils; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.EnumSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; +import java.util.regex.Pattern; + +import static java.util.Collections.emptyList; + +public class IndexResolver { + + public enum IndexType { + + INDEX("BASE TABLE"), + ALIAS("ALIAS"), + // value for user types unrecognized + UNKNOWN("UKNOWN"); + + public static final EnumSet VALID = EnumSet.of(INDEX, ALIAS); + + private final String toSql; + + IndexType(String sql) { + this.toSql = sql; + } + + public String toSql() { + return toSql; + } + + public static IndexType from(String name) { + if (name != null) { + name = name.toUpperCase(Locale.ROOT); + for (IndexType type : IndexType.VALID) { + if (type.toSql.equals(name)) { + return type; + } + } + } + return IndexType.UNKNOWN; + } + } + + public static class IndexInfo { + private final String name; + private final IndexType type; + + public IndexInfo(String name, IndexType type) { + this.name = name; + this.type = type; + } + + public String name() { + return name; + } + + public IndexType type() { + return type; + } + + @Override + public String toString() { + return name; + } + + @Override + public int hashCode() { + return Objects.hash(name, type); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + IndexResolver.IndexInfo other = (IndexResolver.IndexInfo) obj; + return Objects.equals(name, other.name) + && Objects.equals(type, other.type); + } + } + + private final Client client; + private final String clusterName; + + + public IndexResolver(Client client, String clusterName) { + this.client = client; + this.clusterName = clusterName; + } + + public String clusterName() { + return clusterName; + } + + /** + * Resolves only the names, differentiating between indices and aliases. + * This method is required since the other methods rely on mapping which is tied to an index (not an alias). + */ + public void resolveNames(String indexWildcard, String javaRegex, EnumSet types, ActionListener> listener) { + + // first get aliases (if specified) + boolean retrieveAliases = CollectionUtils.isEmpty(types) || types.contains(IndexType.ALIAS); + boolean retrieveIndices = CollectionUtils.isEmpty(types) || types.contains(IndexType.INDEX); + + if (retrieveAliases) { + GetAliasesRequest aliasRequest = new GetAliasesRequest() + .local(true) + .aliases(indexWildcard) + .indicesOptions(IndicesOptions.lenientExpandOpen()); + + client.admin().indices().getAliases(aliasRequest, ActionListener.wrap(aliases -> + resolveIndices(indexWildcard, javaRegex, aliases, retrieveIndices, listener), + ex -> { + // with security, two exception can be thrown: + // INFE - if no alias matches + // security exception is the user cannot access aliases + + // in both cases, that is allowed and we continue with the indices request + if (ex instanceof IndexNotFoundException || ex instanceof ElasticsearchSecurityException) { + resolveIndices(indexWildcard, javaRegex, null, retrieveIndices, listener); + } else { + listener.onFailure(ex); + } + })); + } else { + resolveIndices(indexWildcard, javaRegex, null, retrieveIndices, listener); + } + } + + private void resolveIndices(String indexWildcard, String javaRegex, GetAliasesResponse aliases, + boolean retrieveIndices, ActionListener> listener) { + + if (retrieveIndices) { + GetIndexRequest indexRequest = new GetIndexRequest() + .local(true) + .indices(indexWildcard) + .indicesOptions(IndicesOptions.lenientExpandOpen()); + + client.admin().indices().getIndex(indexRequest, + ActionListener.wrap(indices -> filterResults(indexWildcard, javaRegex, aliases, indices, listener), + listener::onFailure)); + } else { + filterResults(indexWildcard, javaRegex, aliases, null, listener); + } + } + + private void filterResults(String indexWildcard, String javaRegex, GetAliasesResponse aliases, GetIndexResponse indices, + ActionListener> listener) { + + // since the index name does not support ?, filter the results manually + Pattern pattern = javaRegex != null ? Pattern.compile(javaRegex) : null; + + Set result = new TreeSet<>(Comparator.comparing(IndexInfo::name)); + // filter aliases (if present) + if (aliases != null) { + for (ObjectCursor> cursor : aliases.getAliases().values()) { + for (AliasMetaData amd : cursor.value) { + String alias = amd.alias(); + if (alias != null && (pattern == null || pattern.matcher(alias).matches())) { + result.add(new IndexInfo(alias, IndexType.ALIAS)); + } + } + } + } + // filter indices (if present) + String[] indicesNames = indices != null ? indices.indices() : null; + if (indicesNames != null) { + for (String indexName : indicesNames) { + if (pattern == null || pattern.matcher(indexName).matches()) { + result.add(new IndexInfo(indexName, IndexType.INDEX)); + } + } + } + + listener.onResponse(result); + } + + + /** + * Resolves a pattern to one (potentially compound meaning that spawns multiple indices) mapping. + */ + public void resolveWithSameMapping(String indexWildcard, String javaRegex, ActionListener listener) { + GetIndexRequest getIndexRequest = createGetIndexRequest(indexWildcard); + client.admin().indices().getIndex(getIndexRequest, ActionListener.wrap(response -> { + ImmutableOpenMap> mappings = response.getMappings(); + + List resolutions; + if (mappings.size() > 0) { + resolutions = new ArrayList<>(mappings.size()); + Pattern pattern = javaRegex != null ? Pattern.compile(javaRegex) : null; + for (ObjectObjectCursor> indexMappings : mappings) { + String concreteIndex = indexMappings.key; + if (pattern == null || pattern.matcher(concreteIndex).matches()) { + resolutions.add(buildGetIndexResult(concreteIndex, concreteIndex, indexMappings.value)); + } + } + } else { + resolutions = emptyList(); + } + + listener.onResponse(merge(resolutions, indexWildcard)); + }, listener::onFailure)); + } + + static IndexResolution merge(List resolutions, String indexWildcard) { + IndexResolution merged = null; + for (IndexResolution resolution : resolutions) { + // everything that follows gets compared + if (!resolution.isValid()) { + return resolution; + } + // initialize resolution on first run + if (merged == null) { + merged = resolution; + } + // need the same mapping across all resolutions + if (!merged.get().mapping().equals(resolution.get().mapping())) { + return IndexResolution.invalid( + "[" + indexWildcard + "] points to indices [" + resolution.get().name() + "] " + + "and [" + resolution.get().name() + "] which have different mappings. " + + "When using multiple indices, the mappings must be identical."); + } + } + if (merged != null) { + // at this point, we are sure there's the same mapping across all (if that's the case) indices + // to keep things simple, use the given pattern as index name + merged = IndexResolution.valid(new EsIndex(indexWildcard, merged.get().mapping())); + } else { + merged = IndexResolution.notFound(indexWildcard); + } + return merged; + } + + /** + * Resolves a pattern to multiple, separate indices. + */ + public void resolveAsSeparateMappings(String indexWildcard, String javaRegex, ActionListener> listener) { + GetIndexRequest getIndexRequest = createGetIndexRequest(indexWildcard); + client.admin().indices().getIndex(getIndexRequest, ActionListener.wrap(getIndexResponse -> { + ImmutableOpenMap> mappings = getIndexResponse.getMappings(); + List results = new ArrayList<>(mappings.size()); + Pattern pattern = javaRegex != null ? Pattern.compile(javaRegex) : null; + for (ObjectObjectCursor> indexMappings : mappings) { + /* + * We support wildcard expressions here, and it's only for commands that only perform the get index call. + * We can and simply have to use the concrete index name and show that to users. + * Get index against an alias with security enabled, where the user has only access to get mappings for the alias + * and not the concrete index: there is a well known information leak of the concrete index name in the response. + */ + String concreteIndex = indexMappings.key; + if (pattern == null || pattern.matcher(concreteIndex).matches()) { + IndexResolution getIndexResult = buildGetIndexResult(concreteIndex, concreteIndex, indexMappings.value); + if (getIndexResult.isValid()) { + results.add(getIndexResult.get()); + } + } + } + results.sort(Comparator.comparing(EsIndex::name)); + listener.onResponse(results); + }, listener::onFailure)); + } + + private static GetIndexRequest createGetIndexRequest(String index) { + return new GetIndexRequest() + .local(true) + .indices(index) + .features(Feature.MAPPINGS) + //lenient because we throw our own errors looking at the response e.g. if something was not resolved + //also because this way security doesn't throw authorization exceptions but rather honours ignore_unavailable + .indicesOptions(IndicesOptions.lenientExpandOpen()); + } + + private static IndexResolution buildGetIndexResult(String concreteIndex, String indexOrAlias, + ImmutableOpenMap mappings) { + + // Make sure that the index contains only a single type + MappingMetaData singleType = null; + List typeNames = null; + for (ObjectObjectCursor type : mappings) { + //Default mappings are ignored as they are applied to each type. Each type alone holds all of its fields. + if ("_default_".equals(type.key)) { + continue; + } + if (singleType != null) { + // There are more than one types + if (typeNames == null) { + typeNames = new ArrayList<>(); + typeNames.add(singleType.type()); + } + typeNames.add(type.key); + } + singleType = type.value; + } + + if (singleType == null) { + return IndexResolution.invalid("[" + indexOrAlias + "] doesn't have any types so it is incompatible with sql"); + } else if (typeNames != null) { + Collections.sort(typeNames); + return IndexResolution.invalid( + "[" + indexOrAlias + "] contains more than one type " + typeNames + " so it is incompatible with sql"); + } else { + try { + Map mapping = Types.fromEs(singleType.sourceAsMap()); + return IndexResolution.valid(new EsIndex(indexOrAlias, mapping)); + } catch (MappingException ex) { + return IndexResolution.invalid(ex.getMessage()); + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/MappingException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/MappingException.java new file mode 100644 index 0000000000000..b510aa0483093 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/MappingException.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.index; + +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.sql.ClientSqlException; + +public class MappingException extends ClientSqlException { + + public MappingException(String message, Object... args) { + super(message, args); + } + + public MappingException(String message, Throwable ex) { + super(message, ex); + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/Resolvable.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/Resolvable.java new file mode 100644 index 0000000000000..a627f06165573 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/Resolvable.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.capabilities; + +public interface Resolvable { + + boolean resolved(); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/Resolvables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/Resolvables.java new file mode 100644 index 0000000000000..222ba7a87c068 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/Resolvables.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.capabilities; + +public abstract class Resolvables { + + public static boolean resolved(Iterable resolvables) { + for (Resolvable resolvable : resolvables) { + if (!resolvable.resolved()) { + return false; + } + } + return true; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/Unresolvable.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/Unresolvable.java new file mode 100644 index 0000000000000..9cd2e6416974c --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/Unresolvable.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.capabilities; + + +public interface Unresolvable extends Resolvable { + + String UNRESOLVED_PREFIX = "?"; + + @Override + default boolean resolved() { + return false; + } + + String unresolvedMessage(); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java new file mode 100644 index 0000000000000..d6c7543f6afa2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/capabilities/UnresolvedException.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.capabilities; + +import org.elasticsearch.xpack.sql.ServerSqlException; + +import java.util.Locale; + +import static java.lang.String.format; + +/** + * Thrown when we accidentally attempt to resolve something on on an unresolved entity. Throwing this + * is always a bug. + */ +public class UnresolvedException extends ServerSqlException { + public UnresolvedException(String action, Object target) { + super(format(Locale.ROOT, "Invalid call to %s on an unresolved object %s", action, target)); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java new file mode 100644 index 0000000000000..8c58769b75962 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.sql.analysis.analyzer.PreAnalyzer; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.execution.search.SourceGenerator; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.optimizer.Optimizer; +import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.sql.planner.Planner; +import org.elasticsearch.xpack.sql.planner.PlanningException; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.RowSet; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; + +import java.util.List; + +public class PlanExecutor { + private final Client client; + private final NamedWriteableRegistry writableRegistry; + + private final FunctionRegistry functionRegistry; + + private final IndexResolver indexResolver; + private final PreAnalyzer preAnalyzer; + private final Optimizer optimizer; + private final Planner planner; + + public PlanExecutor(Client client, IndexResolver indexResolver, NamedWriteableRegistry writeableRegistry) { + this.client = client; + this.writableRegistry = writeableRegistry; + + this.indexResolver = indexResolver; + this.functionRegistry = new FunctionRegistry(); + + this.preAnalyzer = new PreAnalyzer(); + this.optimizer = new Optimizer(); + this.planner = new Planner(); + } + + public NamedWriteableRegistry writableRegistry() { + return writableRegistry; + } + + private SqlSession newSession(Configuration cfg) { + return new SqlSession(cfg, client, functionRegistry, indexResolver, preAnalyzer, optimizer, planner); + } + + public void searchSource(Configuration cfg, String sql, List params, ActionListener listener) { + newSession(cfg).sqlExecutable(sql, params, ActionListener.wrap(exec -> { + if (exec instanceof EsQueryExec) { + EsQueryExec e = (EsQueryExec) exec; + listener.onResponse(SourceGenerator.sourceBuilder(e.queryContainer(), cfg.filter(), cfg.pageSize())); + } else { + listener.onFailure(new PlanningException("Cannot generate a query DSL for {}", sql)); + } + }, listener::onFailure)); + } + + public void sql(Configuration cfg, String sql, List params, ActionListener listener) { + newSession(cfg).sql(sql, params, listener); + } + + public void nextPage(Configuration cfg, Cursor cursor, ActionListener listener) { + cursor.nextPage(cfg, client, writableRegistry, listener); + } + + public void cleanCursor(Configuration cfg, Cursor cursor, ActionListener listener) { + cursor.clear(cfg, client, listener); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/AggRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/AggRef.java new file mode 100644 index 0000000000000..5c9ec6dc623e5 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/AggRef.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +/** + * Reference to a ES aggregation (which can be either a GROUP BY or Metric agg). + */ +public abstract class AggRef implements FieldExtraction { + + @Override + public void collectFields(SqlSourceBuilder sourceBuilder) { + // Aggregations do not need any special fields + } + + @Override + public boolean supportedByAggsOnlyQuery() { + return true; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java new file mode 100644 index 0000000000000..fe9479f3c1aa4 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursor.java @@ -0,0 +1,208 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor; +import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.RowSet; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Cursor for composite aggregation (GROUP BY). + * Stores the query that gets updated/slides across requests. + */ +public class CompositeAggregationCursor implements Cursor { + + private final Logger log = Loggers.getLogger(getClass()); + + public static final String NAME = "c"; + + private final String[] indices; + private final byte[] nextQuery; + private final List extractors; + private final int limit; + + CompositeAggregationCursor(byte[] next, List exts, int remainingLimit, String... indices) { + this.indices = indices; + this.nextQuery = next; + this.extractors = exts; + this.limit = remainingLimit; + } + + public CompositeAggregationCursor(StreamInput in) throws IOException { + indices = in.readStringArray(); + nextQuery = in.readByteArray(); + limit = in.readVInt(); + + extractors = in.readNamedWriteableList(BucketExtractor.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(indices); + out.writeByteArray(nextQuery); + out.writeVInt(limit); + + out.writeNamedWriteableList(extractors); + } + + @Override + public String getWriteableName() { + return NAME; + } + + String[] indices() { + return indices; + } + + byte[] next() { + return nextQuery; + } + + List extractors() { + return extractors; + } + + int limit() { + return limit; + } + + @Override + public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { + SearchSourceBuilder q; + try { + q = deserializeQuery(registry, nextQuery); + } catch (Exception ex) { + listener.onFailure(ex); + return; + } + + SearchSourceBuilder query = q; + if (log.isTraceEnabled()) { + log.trace("About to execute composite query {} on {}", StringUtils.toString(query), indices); + } + + SearchRequest search = Querier.prepareRequest(client, query, cfg.pageTimeout(), indices); + + client.search(search, ActionListener.wrap(r -> { + updateCompositeAfterKey(r, query); + CompositeAggsRowSet rowSet = new CompositeAggsRowSet(extractors, r, limit, + serializeQuery(query), indices); + listener.onResponse(rowSet); + }, listener::onFailure)); + } + + static CompositeAggregation getComposite(SearchResponse response) { + Aggregation agg = response.getAggregations().get(Aggs.ROOT_GROUP_NAME); + if (agg == null) { + return null; + } + + if (agg instanceof CompositeAggregation) { + return (CompositeAggregation) agg; + } + + throw new SqlIllegalArgumentException("Unrecognized root group found; {}", agg.getClass()); + } + + static void updateCompositeAfterKey(SearchResponse r, SearchSourceBuilder next) { + CompositeAggregation composite = getComposite(r); + + if (composite == null) { + throw new SqlIllegalArgumentException("Invalid server response; no group-by detected"); + } + + Map afterKey = composite.afterKey(); + // a null after-key means done + if (afterKey != null) { + AggregationBuilder aggBuilder = next.aggregations().getAggregatorFactories().get(0); + // update after-key with the new value + if (aggBuilder instanceof CompositeAggregationBuilder) { + CompositeAggregationBuilder comp = (CompositeAggregationBuilder) aggBuilder; + comp.aggregateAfter(afterKey); + } else { + throw new SqlIllegalArgumentException("Invalid client request; expected a group-by but instead got {}", aggBuilder); + } + } + } + + /** + * Deserializes the search source from a byte array. + */ + static SearchSourceBuilder deserializeQuery(NamedWriteableRegistry registry, byte[] source) throws IOException { + try (NamedWriteableAwareStreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(source), registry)) { + return new SearchSourceBuilder(in); + } + } + + /** + * Serializes the search source to a byte array. + */ + static byte[] serializeQuery(SearchSourceBuilder source) throws IOException { + if (source == null) { + return new byte[0]; + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + source.writeTo(out); + return BytesReference.toBytes(out.bytes()); + } + } + + + @Override + public void clear(Configuration cfg, Client client, ActionListener listener) { + listener.onResponse(true); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices), Arrays.hashCode(nextQuery), extractors, limit); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + CompositeAggregationCursor other = (CompositeAggregationCursor) obj; + return Arrays.equals(indices, other.indices) + && Arrays.equals(nextQuery, other.nextQuery) + && Objects.equals(extractors, other.extractors) + && Objects.equals(limit, other.limit); + } + + @Override + public String toString() { + return "cursor for composite on index [" + Arrays.toString(indices) + "]"; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java new file mode 100644 index 0000000000000..a9ca179147ea9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggsRowSet.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; +import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor; +import org.elasticsearch.xpack.sql.session.AbstractRowSet; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.RowSet; + +import java.util.List; + +import static java.util.Collections.emptyList; + +/** + * {@link RowSet} specific to (GROUP BY) aggregation. + */ +class CompositeAggsRowSet extends AbstractRowSet { + private final List exts; + + private final List buckets; + + private final Cursor cursor; + + private final int size; + private int row = 0; + + CompositeAggsRowSet(List exts, SearchResponse response, int limit, byte[] next, String... indices) { + this.exts = exts; + + CompositeAggregation composite = CompositeAggregationCursor.getComposite(response); + if (composite != null) { + buckets = composite.getBuckets(); + } else { + buckets = emptyList(); + } + + // page size + size = limit < 0 ? buckets.size() : Math.min(buckets.size(), limit); + + if (next == null) { + cursor = Cursor.EMPTY; + } else { + // compute remaining limit + int remainingLimit = limit - size; + // if the computed limit is zero, or the size is zero it means either there's nothing left or the limit has been reached + // note that a composite agg might be valid but return zero groups (since these can be filtered with HAVING/bucket selector) + // however the Querier takes care of that and keeps making requests until either the query is invalid or at least one response + // is returned + if (next == null || size == 0 || remainingLimit == 0) { + cursor = Cursor.EMPTY; + } else { + cursor = new CompositeAggregationCursor(next, exts, remainingLimit, indices); + } + } + } + + @Override + protected Object getColumn(int column) { + return exts.get(column).extract(buckets.get(row)); + } + + @Override + public int columnCount() { + return exts.size(); + } + + @Override + protected boolean doHasCurrent() { + return row < size; + } + + @Override + protected boolean doNext() { + if (row < size - 1) { + row++; + return true; + } + return false; + } + + @Override + protected void doReset() { + row = 0; + } + + @Override + public int size() { + return size; + } + + @Override + public Cursor nextPageCursor() { + return cursor; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/FieldExtraction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/FieldExtraction.java new file mode 100644 index 0000000000000..71e8db18d5094 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/FieldExtraction.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.search.builder.SearchSourceBuilder; + +/** + * An interface for something that needs to extract field(s) from a result. + */ +public interface FieldExtraction { + + /** + * Add whatever is necessary to the {@link SearchSourceBuilder} + * in order to fetch the field. This can include tracking the score, + * {@code _source} fields, doc values fields, and script fields. + */ + void collectFields(SqlSourceBuilder sourceBuilder); + + /** + * Is this aggregation supported in an "aggregation only" query + * ({@code true}) or should it force a scroll query ({@code false})? + */ + boolean supportedByAggsOnlyQuery(); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java new file mode 100644 index 0000000000000..3c10f08c53a8d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -0,0 +1,437 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.bucket.filter.Filters; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor; +import org.elasticsearch.xpack.sql.execution.search.extractor.CompositeKeyExtractor; +import org.elasticsearch.xpack.sql.execution.search.extractor.ComputingExtractor; +import org.elasticsearch.xpack.sql.execution.search.extractor.ConstantExtractor; +import org.elasticsearch.xpack.sql.execution.search.extractor.FieldHitExtractor; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.execution.search.extractor.MetricAggExtractor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.AggExtractorInput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.AggPathInput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.HitExtractorInput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ReferenceInput; +import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; +import org.elasticsearch.xpack.sql.querydsl.container.ComputedRef; +import org.elasticsearch.xpack.sql.querydsl.container.GlobalCountRef; +import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef; +import org.elasticsearch.xpack.sql.querydsl.container.MetricAggRef; +import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; +import org.elasticsearch.xpack.sql.querydsl.container.ScriptFieldRef; +import org.elasticsearch.xpack.sql.querydsl.container.SearchHitFieldRef; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.type.Schema; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; + +import static java.util.Collections.singletonList; +// TODO: add retry/back-off +public class Querier { + + private final Logger log = Loggers.getLogger(getClass()); + + private final TimeValue keepAlive, timeout; + private final int size; + private final Client client; + @Nullable + private final QueryBuilder filter; + + public Querier(Client client, Configuration cfg) { + this(client, cfg.requestTimeout(), cfg.pageTimeout(), cfg.filter(), cfg.pageSize()); + } + + public Querier(Client client, TimeValue keepAlive, TimeValue timeout, QueryBuilder filter, int size) { + this.client = client; + this.keepAlive = keepAlive; + this.timeout = timeout; + this.filter = filter; + this.size = size; + } + + public void query(Schema schema, QueryContainer query, String index, ActionListener listener) { + // prepare the request + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(query, filter, size); + // set query timeout + if (timeout.getSeconds() > 0) { + sourceBuilder.timeout(timeout); + } + + if (log.isTraceEnabled()) { + log.trace("About to execute query {} on {}", StringUtils.toString(sourceBuilder), index); + } + + SearchRequest search = prepareRequest(client, sourceBuilder, timeout, index); + + ActionListener l; + if (query.isAggsOnly()) { + if (query.aggs().useImplicitGroupBy()) { + l = new ImplicitGroupActionListener(listener, client, timeout, schema, query, search); + } else { + l = new CompositeActionListener(listener, client, timeout, schema, query, search); + } + } else { + search.scroll(keepAlive); + l = new ScrollActionListener(listener, client, timeout, schema, query); + } + + client.search(search, l); + } + + public static SearchRequest prepareRequest(Client client, SearchSourceBuilder source, TimeValue timeout, String... indices) { + SearchRequest search = client.prepareSearch(indices).setSource(source).setTimeout(timeout).request(); + search.allowPartialSearchResults(false); + return search; + } + + /** + * Dedicated listener for implicit/default group-by queries that return only _one_ result. + */ + static class ImplicitGroupActionListener extends BaseAggActionListener { + + private static List EMPTY_BUCKET = singletonList(new Bucket() { + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + throw new SqlIllegalArgumentException("No group-by/aggs defined"); + } + + @Override + public Object getKey() { + throw new SqlIllegalArgumentException("No group-by/aggs defined"); + } + + @Override + public String getKeyAsString() { + throw new SqlIllegalArgumentException("No group-by/aggs defined"); + } + + @Override + public long getDocCount() { + throw new SqlIllegalArgumentException("No group-by/aggs defined"); + } + + @Override + public Aggregations getAggregations() { + throw new SqlIllegalArgumentException("No group-by/aggs defined"); + } + }); + + ImplicitGroupActionListener(ActionListener listener, Client client, TimeValue keepAlive, Schema schema, + QueryContainer query, SearchRequest request) { + super(listener, client, keepAlive, schema, query, request); + } + + @Override + protected void handleResponse(SearchResponse response, ActionListener listener) { + Aggregations aggs = response.getAggregations(); + if (aggs != null) { + Aggregation agg = aggs.get(Aggs.ROOT_GROUP_NAME); + if (agg instanceof Filters) { + handleBuckets(((Filters) agg).getBuckets(), response); + } else { + throw new SqlIllegalArgumentException("Unrecognized root group found; {}", agg.getClass()); + } + } + // can happen when only a count is requested which is derived from the response + else { + handleBuckets(EMPTY_BUCKET, response); + } + } + + private void handleBuckets(List buckets, SearchResponse response) { + if (buckets.size() == 1) { + Bucket implicitGroup = buckets.get(0); + List extractors = initBucketExtractors(response); + Object[] values = new Object[extractors.size()]; + for (int i = 0; i < values.length; i++) { + values[i] = extractors.get(i).extract(implicitGroup); + } + listener.onResponse(Rows.singleton(schema, values)); + + } else if (buckets.isEmpty()) { + listener.onResponse(Rows.empty(schema)); + + } else { + throw new SqlIllegalArgumentException("Too many groups returned by the implicit group; expected 1, received {}", + buckets.size()); + } + } + } + + + /** + * Dedicated listener for composite aggs/group-by results. + */ + static class CompositeActionListener extends BaseAggActionListener { + + CompositeActionListener(ActionListener listener, Client client, TimeValue keepAlive, + Schema schema, QueryContainer query, SearchRequest request) { + super(listener, client, keepAlive, schema, query, request); + } + + + @Override + protected void handleResponse(SearchResponse response, ActionListener listener) { + // there are some results + if (response.getAggregations().asList().size() > 0) { + CompositeAggregationCursor.updateCompositeAfterKey(response, request.source()); + + byte[] nextSearch = null; + try { + nextSearch = CompositeAggregationCursor.serializeQuery(request.source()); + } catch (Exception ex) { + listener.onFailure(ex); + return; + } + + listener.onResponse( + new SchemaCompositeAggsRowSet(schema, initBucketExtractors(response), response, query.limit(), + nextSearch, + request.indices())); + } + // no results + else { + listener.onResponse(Rows.empty(schema)); + } + } + } + + abstract static class BaseAggActionListener extends BaseActionListener { + final QueryContainer query; + final SearchRequest request; + + BaseAggActionListener(ActionListener listener, Client client, TimeValue keepAlive, Schema schema, + QueryContainer query, SearchRequest request) { + super(listener, client, keepAlive, schema); + + this.query = query; + this.request = request; + } + + protected List initBucketExtractors(SearchResponse response) { + // create response extractors for the first time + List refs = query.columns(); + + List exts = new ArrayList<>(refs.size()); + for (FieldExtraction ref : refs) { + exts.add(createExtractor(ref, new ConstantExtractor(response.getHits().getTotalHits()))); + } + return exts; + } + + private BucketExtractor createExtractor(FieldExtraction ref, BucketExtractor totalCount) { + if (ref instanceof GroupByRef) { + GroupByRef r = (GroupByRef) ref; + return new CompositeKeyExtractor(r.key(), r.property(), r.timeZone()); + } + + if (ref instanceof MetricAggRef) { + MetricAggRef r = (MetricAggRef) ref; + return new MetricAggExtractor(r.name(), r.property(), r.innerKey()); + } + + if (ref == GlobalCountRef.INSTANCE) { + return totalCount; + } + + if (ref instanceof ComputedRef) { + ProcessorDefinition proc = ((ComputedRef) ref).processor(); + + // wrap only agg inputs + proc = proc.transformDown(l -> { + BucketExtractor be = createExtractor(l.context(), totalCount); + return new AggExtractorInput(l.location(), l.expression(), l.action(), be); + }, AggPathInput.class); + + return new ComputingExtractor(proc.asProcessor()); + } + + throw new SqlIllegalArgumentException("Unexpected value reference {}", ref.getClass()); + } + } + + /** + * Dedicated listener for column retrieval/non-grouped queries (scrolls). + */ + static class ScrollActionListener extends BaseActionListener { + private final QueryContainer query; + + ScrollActionListener(ActionListener listener, Client client, TimeValue keepAlive, + Schema schema, QueryContainer query) { + super(listener, client, keepAlive, schema); + this.query = query; + } + + @Override + protected void handleResponse(SearchResponse response, ActionListener listener) { + SearchHit[] hits = response.getHits().getHits(); + + // create response extractors for the first time + List refs = query.columns(); + + List exts = new ArrayList<>(refs.size()); + for (FieldExtraction ref : refs) { + exts.add(createExtractor(ref)); + } + + // there are some results + if (hits.length > 0) { + String scrollId = response.getScrollId(); + + // if there's an id, try to setup next scroll + if (scrollId != null && + // is all the content already retrieved? + (Boolean.TRUE.equals(response.isTerminatedEarly()) || response.getHits().getTotalHits() == hits.length + // or maybe the limit has been reached + || (hits.length >= query.limit() && query.limit() > -1))) { + // if so, clear the scroll + clear(response.getScrollId(), ActionListener.wrap( + succeeded -> listener.onResponse(new SchemaSearchHitRowSet(schema, exts, hits, query.limit(), null)), + listener::onFailure)); + } else { + listener.onResponse(new SchemaSearchHitRowSet(schema, exts, hits, query.limit(), scrollId)); + } + } + // no hits + else { + clear(response.getScrollId(), ActionListener.wrap(succeeded -> listener.onResponse(Rows.empty(schema)), + listener::onFailure)); + } + } + + private HitExtractor createExtractor(FieldExtraction ref) { + if (ref instanceof SearchHitFieldRef) { + SearchHitFieldRef f = (SearchHitFieldRef) ref; + return new FieldHitExtractor(f.name(), f.useDocValue(), f.hitName()); + } + + if (ref instanceof ScriptFieldRef) { + ScriptFieldRef f = (ScriptFieldRef) ref; + return new FieldHitExtractor(f.name(), true); + } + + if (ref instanceof ComputedRef) { + ProcessorDefinition proc = ((ComputedRef) ref).processor(); + // collect hitNames + Set hitNames = new LinkedHashSet<>(); + proc = proc.transformDown(l -> { + HitExtractor he = createExtractor(l.context()); + hitNames.add(he.hitName()); + + if (hitNames.size() > 1) { + throw new SqlIllegalArgumentException("Multi-level nested fields [{}] not supported yet", hitNames); + } + + return new HitExtractorInput(l.location(), l.expression(), he); + }, ReferenceInput.class); + String hitName = null; + if (hitNames.size() == 1) { + hitName = hitNames.iterator().next(); + } + return new ComputingExtractor(proc.asProcessor(), hitName); + } + + throw new SqlIllegalArgumentException("Unexpected value reference {}", ref.getClass()); + } + } + + /** + * Base listener class providing clean-up and exception handling. + * Handles both scroll queries (scan/scroll) and regular/composite-aggs queries. + */ + abstract static class BaseActionListener implements ActionListener { + + final ActionListener listener; + + final Client client; + final TimeValue keepAlive; + final Schema schema; + + BaseActionListener(ActionListener listener, Client client, TimeValue keepAlive, Schema schema) { + this.listener = listener; + + this.client = client; + this.keepAlive = keepAlive; + this.schema = schema; + } + + // TODO: need to handle rejections plus check failures (shard size, etc...) + @Override + public void onResponse(final SearchResponse response) { + try { + ShardSearchFailure[] failure = response.getShardFailures(); + if (!CollectionUtils.isEmpty(failure)) { + cleanup(response, new SqlIllegalArgumentException(failure[0].reason(), failure[0].getCause())); + } else { + handleResponse(response, ActionListener.wrap(listener::onResponse, e -> cleanup(response, e))); + } + } catch (Exception ex) { + cleanup(response, ex); + } + } + + protected abstract void handleResponse(SearchResponse response, ActionListener listener); + + // clean-up the scroll in case of exception + protected final void cleanup(SearchResponse response, Exception ex) { + if (response != null && response.getScrollId() != null) { + client.prepareClearScroll().addScrollId(response.getScrollId()) + // in case of failure, report the initial exception instead of the one resulting from cleaning the scroll + .execute(ActionListener.wrap(r -> listener.onFailure(ex), e -> { + ex.addSuppressed(e); + listener.onFailure(ex); + })); + } else { + listener.onFailure(ex); + } + } + + protected final void clear(String scrollId, ActionListener listener) { + if (scrollId != null) { + client.prepareClearScroll().addScrollId(scrollId).execute( + ActionListener.wrap( + clearScrollResponse -> listener.onResponse(clearScrollResponse.isSucceeded()), + listener::onFailure)); + } else { + listener.onResponse(false); + } + } + + @Override + public final void onFailure(Exception ex) { + listener.onFailure(ex); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java new file mode 100644 index 0000000000000..7c646fbb0b7f1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaCompositeAggsRowSet.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor; +import org.elasticsearch.xpack.sql.session.RowSet; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.type.Schema; + +import java.util.List; + +/** + * Extension of the {@link RowSet} over a composite agg, extending it to provide its schema. + * Used for the initial response. + */ +class SchemaCompositeAggsRowSet extends CompositeAggsRowSet implements SchemaRowSet { + + private final Schema schema; + + SchemaCompositeAggsRowSet(Schema schema, List exts, SearchResponse response, int limitAggs, byte[] next, + String... indices) { + super(exts, response, limitAggs, next, indices); + this.schema = schema; + } + + @Override + public Schema schema() { + return schema; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaSearchHitRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaSearchHitRowSet.java new file mode 100644 index 0000000000000..7ec20a93c0945 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaSearchHitRowSet.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.type.Schema; + +import java.util.List; + +/** + * Initial results from a scroll search. Distinct from the following pages + * because it has a {@link Schema} available. See {@link SearchHitRowSet} + * for the next pages. + */ +class SchemaSearchHitRowSet extends SearchHitRowSet implements SchemaRowSet { + private final Schema schema; + + SchemaSearchHitRowSet(Schema schema, List exts, SearchHit[] hits, int limitHits, String scrollId) { + super(exts, hits, limitHits, scrollId); + this.schema = schema; + } + + @Override + public Schema schema() { + return schema; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java new file mode 100644 index 0000000000000..0fa20022f1cd3 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursor.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.RowSet; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +public class ScrollCursor implements Cursor { + + private final Logger log = Loggers.getLogger(getClass()); + + public static final String NAME = "s"; + + private final String scrollId; + private final List extractors; + private final int limit; + + public ScrollCursor(String scrollId, List extractors, int limit) { + this.scrollId = scrollId; + this.extractors = extractors; + this.limit = limit; + } + + public ScrollCursor(StreamInput in) throws IOException { + scrollId = in.readString(); + limit = in.readVInt(); + + extractors = in.readNamedWriteableList(HitExtractor.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(scrollId); + out.writeVInt(limit); + + out.writeNamedWriteableList(extractors); + } + + @Override + public String getWriteableName() { + return NAME; + } + + String scrollId() { + return scrollId; + } + + List extractors() { + return extractors; + } + + int limit() { + return limit; + } + @Override + public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { + log.trace("About to execute scroll query {}", scrollId); + + SearchScrollRequest request = new SearchScrollRequest(scrollId).scroll(cfg.pageTimeout()); + client.searchScroll(request, ActionListener.wrap((SearchResponse response) -> { + SearchHitRowSet rowSet = new SearchHitRowSet(extractors, response.getHits().getHits(), + limit, response.getScrollId()); + if (rowSet.nextPageCursor() == Cursor.EMPTY ) { + // we are finished with this cursor, let's clean it before continuing + clear(cfg, client, ActionListener.wrap(success -> listener.onResponse(rowSet), listener::onFailure)); + } else { + listener.onResponse(rowSet); + } + }, listener::onFailure)); + } + + @Override + public void clear(Configuration cfg, Client client, ActionListener listener) { + cleanCursor(client, scrollId, + ActionListener.wrap( + clearScrollResponse -> listener.onResponse(clearScrollResponse.isSucceeded()), + listener::onFailure)); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + ScrollCursor other = (ScrollCursor) obj; + return Objects.equals(scrollId, other.scrollId) + && Objects.equals(extractors, other.extractors) + && Objects.equals(limit, other.limit); + } + + @Override + public int hashCode() { + return Objects.hash(scrollId, extractors, limit); + } + + @Override + public String toString() { + return "cursor for scroll [" + scrollId + "]"; + } + + public static void cleanCursor(Client client, String scrollId, ActionListener listener) { + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.addScrollId(scrollId); + client.clearScroll(clearScrollRequest, listener); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java new file mode 100644 index 0000000000000..417a3cf3f4916 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.session.AbstractRowSet; +import org.elasticsearch.xpack.sql.session.Cursor; + +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; + +/** + * Extracts rows from an array of {@link SearchHit}. + */ +class SearchHitRowSet extends AbstractRowSet { + private final SearchHit[] hits; + private final Cursor cursor; + private final String scrollId; + private final List extractors; + private final Set innerHits = new LinkedHashSet<>(); + private final String innerHit; + + private final int size; + private final int[] indexPerLevel; + private int row = 0; + + SearchHitRowSet(List exts, SearchHit[] hits, int limit, String scrollId) { + + this.hits = hits; + this.scrollId = scrollId; + this.extractors = exts; + + // Since the results might contain nested docs, the iteration is similar to that of Aggregation + // namely it discovers the nested docs and then, for iteration, increments the deepest level first + // and eventually carries that over to the top level + + String innerHit = null; + for (HitExtractor ex : exts) { + innerHit = ex.hitName(); + if (innerHit != null) { + innerHits.add(innerHit); + } + } + + int sz = hits.length; + + int maxDepth = 0; + if (!innerHits.isEmpty()) { + if (innerHits.size() > 1) { + throw new SqlIllegalArgumentException("Multi-nested docs not yet supported {}", innerHits); + } + maxDepth = 1; + + sz = 0; + for (SearchHit hit : hits) { + for (String ih : innerHits) { + SearchHits sh = hit.getInnerHits().get(ih); + if (sh != null) { + sz += sh.getHits().length; + } + } + } + } + // page size + size = limit < 0 ? sz : Math.min(sz, limit); + indexPerLevel = new int[maxDepth + 1]; + this.innerHit = innerHit; + + if (scrollId == null) { + /* SearchResponse can contain a null scroll when you start a + * scroll but all results fit in the first page. */ + cursor = Cursor.EMPTY; + } else { + // compute remaining limit (only if the limit is specified - that is, positive). + int remainingLimit = limit < 0 ? limit : limit - size; + // if the computed limit is zero, or the size is zero it means either there's nothing left or the limit has been reached + if (size == 0 || remainingLimit == 0) { + cursor = Cursor.EMPTY; + } else { + cursor = new ScrollCursor(scrollId, extractors, remainingLimit); + } + } + } + + @Override + public int columnCount() { + return extractors.size(); + } + + @Override + protected Object getColumn(int column) { + HitExtractor e = extractors.get(column); + int extractorLevel = e.hitName() == null ? 0 : 1; + + SearchHit hit = null; + SearchHit[] sh = hits; + for (int lvl = 0; lvl <= extractorLevel ; lvl++) { + // TODO: add support for multi-nested doc + if (hit != null) { + SearchHits innerHits = hit.getInnerHits().get(innerHit); + sh = innerHits == null ? SearchHits.EMPTY : innerHits.getHits(); + } + hit = sh[indexPerLevel[lvl]]; + } + + return e.extract(hit); + } + + @Override + protected boolean doHasCurrent() { + return row < size; + } + + @Override + protected boolean doNext() { + if (row < size - 1) { + row++; + // increment last row + indexPerLevel[indexPerLevel.length - 1]++; + // then check size + SearchHit[] sh = hits; + for (int lvl = 0; lvl < indexPerLevel.length; lvl++) { + if (indexPerLevel[lvl] == sh.length) { + // reset the current branch + indexPerLevel[lvl] = 0; + // bump the parent - if it's too big it, the loop will restart again from that position + indexPerLevel[lvl - 1]++; + // restart the loop + lvl = 0; + sh = hits; + } + else { + SearchHit h = sh[indexPerLevel[lvl]]; + // TODO: improve this for multi-nested responses + String path = lvl == 0 ? innerHit : null; + if (path != null) { + SearchHits innerHits = h.getInnerHits().get(path); + sh = innerHits == null ? SearchHits.EMPTY : innerHits.getHits(); + } + } + } + + return true; + } + return false; + } + + @Override + protected void doReset() { + row = 0; + Arrays.fill(indexPerLevel, 0); + } + + @Override + public int size() { + return size; + } + + public String scrollId() { + return scrollId; + } + + @Override + public Cursor nextPageCursor() { + return cursor; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java new file mode 100644 index 0000000000000..f4f98a4e4ffd7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.ConstantScoreQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.StoredFieldsContext; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.querydsl.container.AttributeSort; +import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; +import org.elasticsearch.xpack.sql.querydsl.container.ScoreSort; +import org.elasticsearch.xpack.sql.querydsl.container.ScriptSort; +import org.elasticsearch.xpack.sql.querydsl.container.Sort; + +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.search.sort.SortBuilders.fieldSort; +import static org.elasticsearch.search.sort.SortBuilders.scoreSort; +import static org.elasticsearch.search.sort.SortBuilders.scriptSort; + +public abstract class SourceGenerator { + + private static final List NO_STORED_FIELD = singletonList(StoredFieldsContext._NONE_); + + public static SearchSourceBuilder sourceBuilder(QueryContainer container, QueryBuilder filter, Integer size) { + final SearchSourceBuilder source = new SearchSourceBuilder(); + // add the source + if (container.query() == null) { + if (filter != null) { + source.query(new ConstantScoreQueryBuilder(filter)); + } + } else { + if (filter != null) { + source.query(new BoolQueryBuilder().must(container.query().asBuilder()).filter(filter)); + } else { + source.query(container.query().asBuilder()); + } + } + + SqlSourceBuilder sortBuilder = new SqlSourceBuilder(); + // Iterate through all the columns requested, collecting the fields that + // need to be retrieved from the result documents + + // NB: the sortBuilder takes care of eliminating duplicates + container.columns().forEach(cr -> cr.collectFields(sortBuilder)); + sortBuilder.build(source); + optimize(sortBuilder, source); + + // add the aggs (if present) + AggregationBuilder aggBuilder = container.aggs().asAggBuilder(); + + if (aggBuilder != null) { + source.aggregation(aggBuilder); + } + + sorting(container, source); + + // set page size + if (size != null) { + int sz = container.limit() > 0 ? Math.min(container.limit(), size) : size; + + if (source.size() == -1) { + source.size(sz); + } + if (aggBuilder instanceof CompositeAggregationBuilder) { + ((CompositeAggregationBuilder) aggBuilder).size(sz); + } + } + + optimize(container, source); + + return source; + } + + private static void sorting(QueryContainer container, SearchSourceBuilder source) { + if (source.aggregations() != null && source.aggregations().count() > 0) { + // Aggs can't be sorted using search sorting. That sorting is handled elsewhere. + return; + } + if (container.sort() == null || container.sort().isEmpty()) { + // if no sorting is specified, use the _doc one + source.sort("_doc"); + return; + } + for (Sort sortable : container.sort()) { + SortBuilder sortBuilder = null; + + if (sortable instanceof AttributeSort) { + AttributeSort as = (AttributeSort) sortable; + Attribute attr = as.attribute(); + + // sorting only works on not-analyzed fields - look for a multi-field replacement + if (attr instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) attr; + fa = fa.isInexact() ? fa.exactAttribute() : fa; + + sortBuilder = fieldSort(fa.name()); + if (fa.isNested()) { + FieldSortBuilder fieldSort = fieldSort(fa.name()); + NestedSortBuilder newSort = new NestedSortBuilder(fa.nestedParent().name()); + NestedSortBuilder nestedSort = fieldSort.getNestedSort(); + + if (nestedSort == null) { + fieldSort.setNestedSort(newSort); + } else { + for (; nestedSort.getNestedSort() != null; nestedSort = nestedSort.getNestedSort()) { + } + nestedSort.setNestedSort(newSort); + } + + nestedSort = newSort; + + if (container.query() != null) { + container.query().enrichNestedSort(nestedSort); + } + sortBuilder = fieldSort; + } + } + } else if (sortable instanceof ScriptSort) { + ScriptSort ss = (ScriptSort) sortable; + sortBuilder = scriptSort(ss.script().toPainless(), + ss.script().outputType().isNumeric() ? ScriptSortType.NUMBER : ScriptSortType.STRING); + } else if (sortable instanceof ScoreSort) { + sortBuilder = scoreSort(); + } + + if (sortBuilder != null) { + sortBuilder.order(sortable.direction().asOrder()); + source.sort(sortBuilder); + } + } + } + + private static void optimize(SqlSourceBuilder sqlSource, SearchSourceBuilder builder) { + if (sqlSource.sourceFields.isEmpty()) { + disableSource(builder); + } + } + + private static void optimize(QueryContainer query, SearchSourceBuilder builder) { + // if only aggs are needed, don't retrieve any docs and remove scoring + if (query.isAggsOnly()) { + builder.size(0); + builder.trackScores(false); + // disable source fetching (only doc values are used) + disableSource(builder); + } + } + + private static void disableSource(SearchSourceBuilder builder) { + builder.fetchSource(FetchSourceContext.DO_NOT_FETCH_SOURCE); + if (builder.storedFields() == null) { + builder.storedFields(NO_STORED_FIELD); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilder.java new file mode 100644 index 0000000000000..1022b062c4351 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilder.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.script.Script; +import org.elasticsearch.search.builder.SearchSourceBuilder; + +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Set; + +/** + * A {@code SqlSourceBuilder} is a builder object passed to objects implementing + * {@link FieldExtraction} that can "build" whatever needs to be extracted from + * the resulting ES document as a field. + */ +public class SqlSourceBuilder { + // The LinkedHashMaps preserve the order of the fields in the response + final Set sourceFields = new LinkedHashSet<>(); + final Set docFields = new LinkedHashSet<>(); + final Map scriptFields = new LinkedHashMap<>(); + + boolean trackScores = false; + + public SqlSourceBuilder() { + } + + /** + * Turns on returning the {@code _score} for documents. + */ + public void trackScores() { + this.trackScores = true; + } + + /** + * Retrieve the requested field from the {@code _source} of the document + */ + public void addSourceField(String field) { + sourceFields.add(field); + } + + /** + * Retrieve the requested field from doc values (or fielddata) of the document + */ + public void addDocField(String field) { + docFields.add(field); + } + + /** + * Return the given field as a script field with the supplied script + */ + public void addScriptField(String name, Script script) { + scriptFields.put(name, script); + } + + /** + * Collect the necessary fields, modifying the {@code SearchSourceBuilder} + * to retrieve them from the document. + */ + public void build(SearchSourceBuilder sourceBuilder) { + sourceBuilder.trackScores(this.trackScores); + if (!sourceFields.isEmpty()) { + sourceBuilder.fetchSource(sourceFields.toArray(Strings.EMPTY_ARRAY), null); + } + docFields.forEach(sourceBuilder::docValueField); + scriptFields.forEach(sourceBuilder::scriptField); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/BucketExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/BucketExtractor.java new file mode 100644 index 0000000000000..230ad88eea59c --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/BucketExtractor.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; + +/** + * Extracts an aggregation value from a {@link Bucket}. + */ +public interface BucketExtractor extends NamedWriteable { + + Object extract(Bucket bucket); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/BucketExtractors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/BucketExtractors.java new file mode 100644 index 0000000000000..bae0590935fd7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/BucketExtractors.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; + +import java.util.ArrayList; +import java.util.List; + +public final class BucketExtractors { + + private BucketExtractors() {} + + /** + * All of the named writeables needed to deserialize the instances of + * {@linkplain BucketExtractor}s. + */ + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + entries.add(new Entry(BucketExtractor.class, CompositeKeyExtractor.NAME, CompositeKeyExtractor::new)); + entries.add(new Entry(BucketExtractor.class, ComputingExtractor.NAME, ComputingExtractor::new)); + entries.add(new Entry(BucketExtractor.class, MetricAggExtractor.NAME, MetricAggExtractor::new)); + entries.add(new Entry(BucketExtractor.class, ConstantExtractor.NAME, ConstantExtractor::new)); + return entries; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java new file mode 100644 index 0000000000000..e6a2f4adfe217 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractor.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef.Property; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.TimeZone; + +public class CompositeKeyExtractor implements BucketExtractor { + + /** + * Key or Komposite extractor. + */ + static final String NAME = "k"; + + private final String key; + private final Property property; + private final TimeZone timeZone; + + /** + * Constructs a new CompositeKeyExtractor instance. + * The time-zone parameter is used to indicate a date key. + */ + public CompositeKeyExtractor(String key, Property property, TimeZone timeZone) { + this.key = key; + this.property = property; + this.timeZone = timeZone; + } + + CompositeKeyExtractor(StreamInput in) throws IOException { + key = in.readString(); + property = in.readEnum(Property.class); + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + if (in.readBoolean()) { + timeZone = TimeZone.getTimeZone(in.readString()); + } else { + timeZone = null; + } + } else { + DateTimeZone dtz = in.readOptionalTimeZone(); + if (dtz == null) { + timeZone = null; + } else { + timeZone = dtz.toTimeZone(); + } + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(key); + out.writeEnum(property); + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + if (timeZone == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeString(timeZone.getID()); + } + } else { + out.writeOptionalTimeZone(timeZone == null ? null : DateTimeZone.forTimeZone(timeZone)); + } + } + + String key() { + return key; + } + + Property property() { + return property; + } + + TimeZone timeZone() { + return timeZone; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object extract(Bucket bucket) { + if (property == Property.COUNT) { + return bucket.getDocCount(); + } + // get the composite value + Object m = bucket.getKey(); + + if (!(m instanceof Map)) { + throw new SqlIllegalArgumentException("Unexpected bucket returned: {}", m); + } + + Object object = ((Map) m).get(key); + + if (timeZone != null) { + if (object instanceof Long) { + object = new DateTime(((Long) object).longValue(), DateTimeZone.forTimeZone(timeZone)); + } else { + throw new SqlIllegalArgumentException("Invalid date key returned: {}", object); + } + } + + return object; + } + + @Override + public int hashCode() { + return Objects.hash(key, property, timeZone); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + CompositeKeyExtractor other = (CompositeKeyExtractor) obj; + return Objects.equals(key, other.key) + && Objects.equals(property, other.property) + && Objects.equals(timeZone, other.timeZone); + } + + @Override + public String toString() { + return "|" + key + "|"; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractor.java new file mode 100644 index 0000000000000..dded5adfcb84f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractor.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.HitExtractorProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.io.IOException; +import java.util.Objects; + +/** + * Hit/BucketExtractor that delegates to a processor. The difference between this class + * and {@link HitExtractorProcessor} is that the latter is used inside a + * {@link Processor} tree as a leaf (and thus can effectively parse the + * {@link SearchHit} while this class is used when scrolling and passing down + * the results. + * + * In the future, the processor might be used across the board for all columns + * to reduce API complexity (and keep the {@link HitExtractor} only as an + * internal implementation detail). + */ +public class ComputingExtractor implements HitExtractor, BucketExtractor { + /** + * Stands for {@code comPuting}. We try to use short names for {@link HitExtractor}s + * to save a few bytes when when we send them back to the user. + */ + static final String NAME = "p"; + private final Processor processor; + private final String hitName; + + public ComputingExtractor(Processor processor) { + this(processor, null); + } + + public ComputingExtractor(Processor processor, String hitName) { + this.processor = processor; + this.hitName = hitName; + } + + ComputingExtractor(StreamInput in) throws IOException { + processor = in.readNamedWriteable(Processor.class); + hitName = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(processor); + out.writeOptionalString(hitName); + } + + @Override + public String getWriteableName() { + return NAME; + } + + public Processor processor() { + return processor; + } + + public Object extract(Object input) { + return processor.process(input); + } + + @Override + public Object extract(Bucket bucket) { + return processor.process(bucket); + } + + @Override + public Object extract(SearchHit hit) { + return processor.process(hit); + } + + @Override + public String hitName() { + return hitName; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + ComputingExtractor other = (ComputingExtractor) obj; + return Objects.equals(processor, other.processor) + && Objects.equals(hitName, other.hitName); + } + + @Override + public int hashCode() { + return Objects.hash(processor, hitName); + } + + @Override + public String toString() { + return processor.toString(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ConstantExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ConstantExtractor.java new file mode 100644 index 0000000000000..e9bd3b2a6768e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ConstantExtractor.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; + +import java.io.IOException; +import java.util.Objects; + +/** + * Returns the a constant for every search hit against which it is run. + */ +public class ConstantExtractor implements HitExtractor, BucketExtractor { + /** + * Stands for {@code constant}. We try to use short names for {@link HitExtractor}s + * to save a few bytes when when we send them back to the user. + */ + static final String NAME = "c"; + private final Object constant; + + public ConstantExtractor(Object constant) { + this.constant = constant; + } + + ConstantExtractor(StreamInput in) throws IOException { + constant = in.readGenericValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeGenericValue(constant); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object extract(SearchHit hit) { + return constant; + } + + @Override + public Object extract(Bucket bucket) { + return constant; + } + + @Override + public String hitName() { + return null; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + ConstantExtractor other = (ConstantExtractor) obj; + return Objects.equals(constant, other.constant); + } + + @Override + public int hashCode() { + return Objects.hashCode(constant); + } + + @Override + public String toString() { + return "^" + constant; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java new file mode 100644 index 0000000000000..159127fb24cb2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractor.java @@ -0,0 +1,174 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.joda.time.ReadableDateTime; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Extractor for ES fields. Works for both 'normal' fields but also nested ones (which require hitName to be set). + * The latter is used as metadata in assembling the results in the tabular response. + */ +public class FieldHitExtractor implements HitExtractor { + + private static final boolean ARRAYS_LENIENCY = false; + + /** + * Stands for {@code field}. We try to use short names for {@link HitExtractor}s + * to save a few bytes when when we send them back to the user. + */ + static final String NAME = "f"; + + /** + * Source extraction requires only the (relative) field name, without its parent path. + */ + private static String[] sourcePath(String name, boolean useDocValue, String hitName) { + return useDocValue ? Strings.EMPTY_ARRAY : Strings + .tokenizeToStringArray(hitName == null ? name : name.substring(hitName.length() + 1), "."); + } + + private final String fieldName, hitName; + private final boolean useDocValue; + private final String[] path; + + public FieldHitExtractor(String name, boolean useDocValue) { + this(name, useDocValue, null); + } + + public FieldHitExtractor(String name, boolean useDocValue, String hitName) { + this.fieldName = name; + this.useDocValue = useDocValue; + this.hitName = hitName; + + if (hitName != null) { + if (!name.contains(hitName)) { + throw new SqlIllegalArgumentException("Hitname [{}] specified but not part of the name [{}]", hitName, name); + } + } + + this.path = sourcePath(fieldName, useDocValue, hitName); + } + + FieldHitExtractor(StreamInput in) throws IOException { + fieldName = in.readString(); + useDocValue = in.readBoolean(); + hitName = in.readOptionalString(); + path = sourcePath(fieldName, useDocValue, hitName); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + out.writeBoolean(useDocValue); + out.writeOptionalString(hitName); + } + + @Override + public Object extract(SearchHit hit) { + Object value = null; + if (useDocValue) { + DocumentField field = hit.field(fieldName); + if (field != null) { + value = unwrapMultiValue(field.getValues()); + } + } else { + Map source = hit.getSourceAsMap(); + if (source != null) { + value = extractFromSource(source); + } + } + return value; + } + + private Object unwrapMultiValue(Object values) { + if (values == null) { + return null; + } + if (values instanceof List) { + List list = (List) values; + if (list.isEmpty()) { + return null; + } else { + if (ARRAYS_LENIENCY || list.size() == 1) { + return unwrapMultiValue(list.get(0)); + } else { + throw new SqlIllegalArgumentException("Arrays (returned by [{}]) are not supported", fieldName); + } + } + } + if (values instanceof Map) { + throw new SqlIllegalArgumentException("Objects (returned by [{}]) are not supported", fieldName); + } + if (values instanceof Long || values instanceof Double || values instanceof String || values instanceof Boolean + || values instanceof ReadableDateTime) { + return values; + } + throw new SqlIllegalArgumentException("Type {} (returned by [{}]) is not supported", values.getClass().getSimpleName(), fieldName); + } + + @SuppressWarnings("unchecked") + Object extractFromSource(Map map) { + Object value = map; + boolean first = true; + // each node is a key inside the map + for (String node : path) { + if (value == null) { + return null; + } else if (first || value instanceof Map) { + first = false; + value = ((Map) value).get(node); + } else { + throw new SqlIllegalArgumentException("Cannot extract value [{}] from source", fieldName); + } + } + return unwrapMultiValue(value); + } + + @Override + public String hitName() { + return hitName; + } + + public String fieldName() { + return fieldName; + } + + @Override + public String toString() { + return fieldName + "@" + hitName; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + FieldHitExtractor other = (FieldHitExtractor) obj; + return fieldName.equals(other.fieldName) + && hitName.equals(other.hitName) + && useDocValue == other.useDocValue; + } + + @Override + public int hashCode() { + return Objects.hash(fieldName, useDocValue, hitName); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/HitExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/HitExtractor.java new file mode 100644 index 0000000000000..c0de33c812819 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/HitExtractor.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.search.SearchHit; + +/** + * Extracts a column value from a {@link SearchHit}. + */ +public interface HitExtractor extends NamedWriteable { + /** + * Extract the value from a hit. + */ + Object extract(SearchHit hit); + + /** + * Name of the inner hit needed by this extractor if it needs one, {@code null} otherwise. + */ + @Nullable + String hitName(); +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/HitExtractors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/HitExtractors.java new file mode 100644 index 0000000000000..1036829902e50 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/HitExtractors.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; + +import java.util.ArrayList; +import java.util.List; + +public final class HitExtractors { + + private HitExtractors() {} + + /** + * All of the named writeables needed to deserialize the instances of + * {@linkplain HitExtractor}. + */ + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + entries.add(new Entry(HitExtractor.class, ConstantExtractor.NAME, ConstantExtractor::new)); + entries.add(new Entry(HitExtractor.class, FieldHitExtractor.NAME, FieldHitExtractor::new)); + entries.add(new Entry(HitExtractor.class, ComputingExtractor.NAME, ComputingExtractor::new)); + entries.add(new Entry(HitExtractor.class, ScoreExtractor.NAME, in -> ScoreExtractor.INSTANCE)); + return entries; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java new file mode 100644 index 0000000000000..307b6a7cab681 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractor.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class MetricAggExtractor implements BucketExtractor { + + static final String NAME = "m"; + + private final String name; + private final String property; + private final String innerKey; + + public MetricAggExtractor(String name, String property, String innerKey) { + this.name = name; + this.property = property; + this.innerKey = innerKey; + } + + MetricAggExtractor(StreamInput in) throws IOException { + name = in.readString(); + property = in.readString(); + innerKey = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeString(property); + out.writeOptionalString(innerKey); + } + + String name() { + return name; + } + + String property() { + return property; + } + + String innerKey() { + return innerKey; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object extract(Bucket bucket) { + InternalAggregation agg = bucket.getAggregations().get(name); + if (agg == null) { + throw new SqlIllegalArgumentException("Cannot find an aggregation named {}", name); + } + if (agg instanceof InternalNumericMetricsAggregation.MultiValue) { + //TODO: need to investigate when this can be not-null + //if (innerKey == null) { + // throw new SqlIllegalArgumentException("Invalid innerKey {} specified for aggregation {}", innerKey, name); + //} + return ((InternalNumericMetricsAggregation.MultiValue) agg).value(property); + } + + Object v = agg.getProperty(property); + return innerKey != null && v instanceof Map ? ((Map) v).get(innerKey) : v; + } + + @Override + public int hashCode() { + return Objects.hash(name, property, innerKey); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + MetricAggExtractor other = (MetricAggExtractor) obj; + return Objects.equals(name, other.name) + && Objects.equals(property, other.property) + && Objects.equals(innerKey, other.innerKey); + } + + @Override + public String toString() { + String i = innerKey != null ? "[" + innerKey + "]" : ""; + return Aggs.ROOT_GROUP_NAME + ">" + name + "." + property + i; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractor.java new file mode 100644 index 0000000000000..d46c543037f35 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractor.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchHit; + +import java.io.IOException; + +/** + * Returns the a constant for every search hit against which it is run. + */ +public class ScoreExtractor implements HitExtractor { + public static final HitExtractor INSTANCE = new ScoreExtractor(); + /** + * Stands for {@code score}. We try to use short names for {@link HitExtractor}s + * to save a few bytes when when we send them back to the user. + */ + static final String NAME = "sc"; + + private ScoreExtractor() {} + + @Override + public void writeTo(StreamOutput out) throws IOException { + // Nothing to write + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object extract(SearchHit hit) { + return hit.getScore(); + } + + @Override + public String hitName() { + return null; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + return true; + } + + @Override + public int hashCode() { + return 31; + } + + @Override + public String toString() { + return "SCORE"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Alias.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Alias.java new file mode 100644 index 0000000000000..6f3ea405fbee9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Alias.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.EsField; + +import static java.util.Collections.singletonList; + +import java.util.Collections; +import java.util.List; + +/** + * An {@code Alias} is a {@code NamedExpression} that gets renamed to something else through the Alias. + * + * For example, in the statement {@code 5 + 2 AS x}, {@code x} is an alias which is points to {@code ADD(5, 2)}. + * + * And in {@code SELECT col AS x} "col" is a named expression that gets renamed to "x" through an alias. + * + */ +public class Alias extends NamedExpression { + + private final Expression child; + private final String qualifier; + + /** + * Postpone attribute creation until it is actually created. + * Being immutable, create only one instance. + */ + private Attribute lazyAttribute; + + public Alias(Location location, String name, Expression child) { + this(location, name, null, child, null); + } + + public Alias(Location location, String name, String qualifier, Expression child) { + this(location, name, qualifier, child, null); + } + + public Alias(Location location, String name, String qualifier, Expression child, ExpressionId id) { + this(location, name, qualifier, child, id, false); + } + + public Alias(Location location, String name, String qualifier, Expression child, ExpressionId id, boolean synthetic) { + super(location, name, singletonList(child), id, synthetic); + this.child = child; + this.qualifier = qualifier; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Alias::new, name(), qualifier, child, id(), synthetic()); + } + + @Override + public Expression replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new Alias(location(), name(), qualifier, newChildren.get(0), id(), synthetic()); + } + + public Expression child() { + return child; + } + + public String qualifier() { + return qualifier; + } + + @Override + public boolean nullable() { + return child.nullable(); + } + + @Override + public DataType dataType() { + return child.dataType(); + } + + @Override + public Attribute toAttribute() { + if (lazyAttribute == null) { + lazyAttribute = createAttribute(); + } + return lazyAttribute; + } + + private Attribute createAttribute() { + if (resolved()) { + Expression c = child(); + + Attribute attr = Expressions.attribute(c); + if (attr != null) { + return attr.clone(location(), name(), qualifier, child.nullable(), id(), synthetic()); + } + else { + // TODO: WE need to fix this fake Field + return new FieldAttribute(location(), null, name(), + new EsField(name(), child.dataType(), Collections.emptyMap(), true), + qualifier, child.nullable(), id(), synthetic()); + } + } + + return new UnresolvedAttribute(location(), name(), qualifier); + } + + @Override + public String toString() { + return child + " AS " + name() + "#" + id(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java new file mode 100644 index 0000000000000..c8f38113bcf9a --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Attribute.java @@ -0,0 +1,136 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Objects; + +import static java.util.Collections.emptyList; + +import java.util.List; + +/** + * {@link Expression}s that can be converted into Elasticsearch + * sorts, aggregations, or queries. They can also be extracted + * from the result of a search. + * + * In the statement {@code SELECT ABS(foo), A, B+C FROM ...} the three named + * expressions (ABS(foo), A, B+C) get converted to attributes and the user can + * only see Attributes. + * + * In the statement {@code SELECT foo FROM TABLE WHERE foo > 10 + 1} 10+1 is an + * expression. It's not named - meaning there's no alias for it (defined by the + * user) and as such there's no attribute - no column to be returned to the user. + * It's an expression used for filtering so it doesn't appear in the result set + * (derived table). "foo" on the other hand is an expression, a named expression + * (it has a name) and also an attribute - it's a column in the result set. + * + * Another example {@code SELECT foo FROM ... WHERE bar > 10 +1} "foo" gets + * converted into an Attribute, bar does not. That's because bar is used for + * filtering alone but it's not part of the projection meaning the user doesn't + * need it in the derived table. + */ +public abstract class Attribute extends NamedExpression { + + // empty - such as a top level attribute in SELECT cause + // present - table name or a table name alias + private final String qualifier; + + // can the attr be null - typically used in JOINs + private final boolean nullable; + + public Attribute(Location location, String name, String qualifier, ExpressionId id) { + this(location, name, qualifier, true, id); + } + + public Attribute(Location location, String name, String qualifier, boolean nullable, ExpressionId id) { + this(location, name, qualifier, nullable, id, false); + } + + public Attribute(Location location, String name, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + super(location, name, emptyList(), id, synthetic); + this.qualifier = qualifier; + this.nullable = nullable; + } + + @Override + public final Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + public String qualifier() { + return qualifier; + } + + public String qualifiedName() { + return qualifier == null ? name() : qualifier + "." + name(); + } + + @Override + public boolean nullable() { + return nullable; + } + + @Override + public AttributeSet references() { + return new AttributeSet(this); + } + + public Attribute withLocation(Location location) { + return Objects.equals(location(), location) ? this : clone(location, name(), qualifier(), nullable(), id(), synthetic()); + } + + public Attribute withQualifier(String qualifier) { + return Objects.equals(qualifier(), qualifier) ? this : clone(location(), name(), qualifier, nullable(), id(), synthetic()); + } + + public Attribute withNullability(boolean nullable) { + return Objects.equals(nullable(), nullable) ? this : clone(location(), name(), qualifier(), nullable, id(), synthetic()); + } + + protected abstract Attribute clone(Location location, String name, String qualifier, boolean nullable, ExpressionId id, + boolean synthetic); + + @Override + public Attribute toAttribute() { + return this; + } + + @Override + public int semanticHash() { + return id().hashCode(); + } + + @Override + public boolean semanticEquals(Expression other) { + return other instanceof Attribute ? id().equals(((Attribute) other).id()) : false; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), qualifier, nullable); + } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj)) { + Attribute other = (Attribute) obj; + return Objects.equals(qualifier, other.qualifier) + && Objects.equals(nullable, other.nullable); + } + + return false; + } + + @Override + public String toString() { + return name() + "{" + label() + "}" + "#" + id(); + } + + protected abstract String label(); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java new file mode 100644 index 0000000000000..57dc8f6152e99 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeMap.java @@ -0,0 +1,325 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.AbstractSet; +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.stream.Stream; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static java.util.Collections.unmodifiableCollection; +import static java.util.Collections.unmodifiableSet; + +public class AttributeMap { + + static class AttributeWrapper { + + private final Attribute attr; + + AttributeWrapper(Attribute attr) { + this.attr = attr; + } + + @Override + public int hashCode() { + return attr.semanticHash(); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof AttributeWrapper) { + AttributeWrapper aw = (AttributeWrapper) obj; + return attr.semanticEquals(aw.attr); + } + + return false; + } + + @Override + public String toString() { + return attr.toString(); + } + } + + /** + * Set that does unwrapping of keys inside the keySet and iterator. + */ + private abstract static class UnwrappingSet extends AbstractSet { + private final Set set; + + UnwrappingSet(Set originalSet) { + set = unmodifiableSet(originalSet); + } + + @Override + public Iterator iterator() { + return new Iterator() { + final Iterator i = set.iterator(); + + @Override + public boolean hasNext() { + return i.hasNext(); + } + + @Override + public U next() { + return unwrap(i.next()); + } + }; + } + + protected abstract U unwrap(W next); + + + @Override + public Stream stream() { + return set.stream().map(this::unwrap); + } + + @Override + public Stream parallelStream() { + return set.parallelStream().map(this::unwrap); + } + + @Override + public int size() { + return set.size(); + } + + @Override + public boolean equals(Object o) { + return set.equals(o); + } + + @Override + public int hashCode() { + return set.hashCode(); + } + + @Override + public Object[] toArray() { + Object[] array = set.toArray(); + for (int i = 0; i < array.length; i++) { + array[i] = ((AttributeWrapper) array[i]).attr; + } + return array; + } + + @Override + @SuppressWarnings("unchecked") + public A[] toArray(A[] a) { + // collection is immutable so use that to our advantage + if (a.length < size()) + a = (A[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), size()); + int i = 0; + Object[] result = a; + for (U u : this) { + result[i++] = u; + } + // array larger than size, mark the ending element as null + if (a.length > size()) { + a[size()] = null; + } + return a; + } + + @Override + public String toString() { + return set.toString(); + } + }; + + private final Map delegate; + private Set keySet = null; + private Collection values = null; + private Set> entrySet = null; + + public AttributeMap() { + delegate = new LinkedHashMap<>(); + } + + public AttributeMap(Map attr) { + if (attr.isEmpty()) { + delegate = emptyMap(); + } + else { + delegate = new LinkedHashMap<>(attr.size()); + + for (Entry entry : attr.entrySet()) { + delegate.put(new AttributeWrapper(entry.getKey()), entry.getValue()); + } + } + } + + public AttributeMap(Attribute key, E value) { + delegate = singletonMap(new AttributeWrapper(key), value); + } + + void add(Attribute key, E value) { + delegate.put(new AttributeWrapper(key), value); + } + + // a set from a collection of sets without (too much) copying + void addAll(AttributeMap other) { + delegate.putAll(other.delegate); + } + + public AttributeMap substract(AttributeMap other) { + AttributeMap diff = new AttributeMap<>(); + for (Entry entry : this.delegate.entrySet()) { + if (!other.delegate.containsKey(entry.getKey())) { + diff.delegate.put(entry.getKey(), entry.getValue()); + } + } + + return diff; + } + + public AttributeMap intersect(AttributeMap other) { + AttributeMap smaller = (other.size() > size() ? this : other); + AttributeMap larger = (smaller == this ? other : this); + + AttributeMap intersect = new AttributeMap<>(); + for (Entry entry : smaller.delegate.entrySet()) { + if (larger.delegate.containsKey(entry.getKey())) { + intersect.delegate.put(entry.getKey(), entry.getValue()); + } + } + + return intersect; + } + + public boolean subsetOf(AttributeMap other) { + if (this.size() > other.size()) { + return false; + } + for (AttributeWrapper aw : delegate.keySet()) { + if (!other.delegate.containsKey(aw)) { + return false; + } + } + + return true; + } + + public Set attributeNames() { + Set s = new LinkedHashSet<>(size()); + + for (AttributeWrapper aw : delegate.keySet()) { + s.add(aw.attr.name()); + } + return s; + } + + public int size() { + return delegate.size(); + } + + public boolean isEmpty() { + return delegate.isEmpty(); + } + + public boolean containsKey(Object key) { + if (key instanceof NamedExpression) { + return delegate.keySet().contains(new AttributeWrapper(((NamedExpression) key).toAttribute())); + } + return false; + } + + public boolean containsValue(Object value) { + return delegate.values().contains(value); + } + + public E get(Object key) { + if (key instanceof NamedExpression) { + return delegate.get(new AttributeWrapper(((NamedExpression) key).toAttribute())); + } + return null; + } + + public E getOrDefault(Object key, E defaultValue) { + E e; + return (((e = get(key)) != null) || containsKey(key)) + ? e + : defaultValue; + } + + public Set keySet() { + if (keySet == null) { + keySet = new UnwrappingSet(delegate.keySet()) { + @Override + protected Attribute unwrap(AttributeWrapper next) { + return next.attr; + } + }; + } + return keySet; + } + + public Collection values() { + if (values == null) { + values = unmodifiableCollection(delegate.values()); + } + return values; + } + + public Set> entrySet() { + if (entrySet == null) { + entrySet = new UnwrappingSet, Entry>(delegate.entrySet()) { + @Override + protected Entry unwrap(final Entry next) { + return new Entry() { + @Override + public Attribute getKey() { + return next.getKey().attr; + } + + @Override + public E getValue() { + return next.getValue(); + } + + @Override + public E setValue(E value) { + throw new UnsupportedOperationException(); + } + }; + } + }; + } + return entrySet; + } + + public void forEach(BiConsumer action) { + delegate.forEach((k, v) -> action.accept(k.attr, v)); + } + + @Override + public int hashCode() { + return delegate.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof AttributeMap) { + obj = ((AttributeMap) obj).delegate; + } + return delegate.equals(obj); + } + + @Override + public String toString() { + return delegate.toString(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeSet.java new file mode 100644 index 0000000000000..5d4065e5f3654 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/AttributeSet.java @@ -0,0 +1,184 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.Collection; +import java.util.Iterator; +import java.util.Set; +import java.util.Spliterator; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.stream.Stream; + +import static java.util.Collections.emptyMap; + +public class AttributeSet implements Set { + + private static final AttributeMap EMPTY_DELEGATE = new AttributeMap<>(emptyMap()); + + public static final AttributeSet EMPTY = new AttributeSet(EMPTY_DELEGATE); + + // use the same name as in HashSet + private static final Object PRESENT = new Object(); + + private final AttributeMap delegate; + + public AttributeSet() { + delegate = new AttributeMap<>(); + } + + public AttributeSet(Attribute attr) { + delegate = new AttributeMap<>(attr, PRESENT); + } + + public AttributeSet(Collection attr) { + if (attr.isEmpty()) { + delegate = EMPTY_DELEGATE; + } + else { + delegate = new AttributeMap<>(); + + for (Attribute a : attr) { + delegate.add(a, PRESENT); + } + } + } + + private AttributeSet(AttributeMap delegate) { + this.delegate = delegate; + } + + // package protected - should be called through Expressions to cheaply create + // a set from a collection of sets without too much copying + void addAll(AttributeSet other) { + delegate.addAll(other.delegate); + } + + public AttributeSet substract(AttributeSet other) { + return new AttributeSet(delegate.substract(other.delegate)); + } + + public AttributeSet intersect(AttributeSet other) { + return new AttributeSet(delegate.intersect(other.delegate)); + } + + public boolean subsetOf(AttributeSet other) { + return delegate.subsetOf(other.delegate); + } + + public Set names() { + return delegate.attributeNames(); + } + + @Override + public void forEach(Consumer action) { + delegate.forEach((k, v) -> action.accept(k)); + } + + @Override + public int size() { + return delegate.size(); + } + + @Override + public boolean isEmpty() { + return delegate.isEmpty(); + } + + @Override + public boolean contains(Object o) { + return delegate.containsKey(o); + } + + @Override + public boolean containsAll(Collection c) { + for (Object o : c) { + if (!delegate.containsKey(o)) { + return false; + } + } + return true; + } + + @Override + public Iterator iterator() { + return delegate.keySet().iterator(); + } + + @Override + public Object[] toArray() { + return delegate.keySet().toArray(); + } + + @Override + public T[] toArray(T[] a) { + return delegate.keySet().toArray(a); + } + + @Override + public boolean add(Attribute e) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean addAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public Spliterator spliterator() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean removeIf(Predicate filter) { + throw new UnsupportedOperationException(); + } + + @Override + public Stream stream() { + return delegate.keySet().stream(); + } + + @Override + public Stream parallelStream() { + return delegate.keySet().parallelStream(); + } + + @Override + public boolean equals(Object o) { + return delegate.equals(o); + } + + @Override + public int hashCode() { + return delegate.hashCode(); + } + @Override + public String toString() { + return delegate.keySet().toString(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryExpression.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryExpression.java new file mode 100644 index 0000000000000..fd6b8632f8e3e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryExpression.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public abstract class BinaryExpression extends Expression { + + private final Expression left, right; + + protected BinaryExpression(Location location, Expression left, Expression right) { + super(location, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + @Override + public final BinaryExpression replaceChildren(List newChildren) { + if (newChildren.size() != 2) { + throw new IllegalArgumentException("expected [2] children but received [" + newChildren.size() + "]"); + } + return replaceChildren(newChildren.get(0), newChildren.get(1)); + } + protected abstract BinaryExpression replaceChildren(Expression newLeft, Expression newRight); + + public Expression left() { + return left; + } + + public Expression right() { + return right; + } + + @Override + public boolean foldable() { + return left.foldable() && right.foldable(); + } + + @Override + public boolean nullable() { + return left.nullable() || left.nullable(); + } + + @Override + public int hashCode() { + return Objects.hash(left, right); + } + + @Override + public boolean equals(Object obj) { + if (!super.equals(obj)) { + return false; + } + + BinaryExpression other = (BinaryExpression) obj; + return Objects.equals(left, other.left) + && Objects.equals(right, other.right); + } + + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(left()); + sb.append(" "); + sb.append(symbol()); + sb.append(" "); + sb.append(right()); + return sb.toString(); + } + + public abstract String symbol(); + + public abstract BinaryExpression swapLeftAndRight(); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryLogic.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryLogic.java new file mode 100644 index 0000000000000..8cb51a6ea0ca9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryLogic.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +public abstract class BinaryLogic extends BinaryOperator { + + protected BinaryLogic(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + public DataType dataType() { + return DataType.BOOLEAN; + } + + @Override + protected TypeResolution resolveInputType(DataType inputType) { + return DataType.BOOLEAN == inputType ? TypeResolution.TYPE_RESOLVED : new TypeResolution( + "'%s' requires type %s not %s", symbol(), DataType.BOOLEAN.sqlName(), inputType.sqlName()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryOperator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryOperator.java new file mode 100644 index 0000000000000..bfa4358d240ac --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/BinaryOperator.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +//Binary expression that requires both input expressions to have the same type +//Compatible types should be handled by the analyzer (by using the narrowest type) +public abstract class BinaryOperator extends BinaryExpression { + + public interface Negateable { + BinaryExpression negate(); + } + + protected BinaryOperator(Location location, Expression left, Expression right) { + super(location, left, right); + } + + protected abstract TypeResolution resolveInputType(DataType inputType); + + @Override + protected TypeResolution resolveType() { + if (!childrenResolved()) { + return new TypeResolution("Unresolved children"); + } + DataType l = left().dataType(); + DataType r = right().dataType(); + + TypeResolution resolution = resolveInputType(l); + + if (resolution == TypeResolution.TYPE_RESOLVED) { + return resolveInputType(r); + } + return resolution; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Exists.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Exists.java new file mode 100644 index 0000000000000..32c2ee253f1ab --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Exists.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +public class Exists extends SubQueryExpression { + + public Exists(Location location, LogicalPlan query) { + this(location, query, null); + } + + public Exists(Location location, LogicalPlan query, ExpressionId id) { + super(location, query, id); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Exists::new, query(), id()); + } + + @Override + protected SubQueryExpression clone(LogicalPlan newQuery) { + return new Exists(location(), newQuery); + } + + @Override + public DataType dataType() { + return DataType.BOOLEAN; + } + + @Override + public boolean nullable() { + return false; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expression.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expression.java new file mode 100644 index 0000000000000..846c06feb09f6 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expression.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.capabilities.Resolvable; +import org.elasticsearch.xpack.sql.capabilities.Resolvables; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.List; +import java.util.Locale; + +import static java.lang.String.format; + +/** + * In a SQL statement, an Expression is whatever a user specifies inside an + * action, so for instance: + * + * {@code SELECT a, b, MAX(c, d) FROM i} + * + * a, b, ABS(c), and i are all Expressions, with ABS(c) being a Function + * (which is a type of expression) with a single child, c. + */ +public abstract class Expression extends Node implements Resolvable { + + public static class TypeResolution { + private final boolean failed; + private final String message; + + public static final TypeResolution TYPE_RESOLVED = new TypeResolution(false, StringUtils.EMPTY); + + public TypeResolution(String message, Object... args) { + this(true, format(Locale.ROOT, message, args)); + } + + private TypeResolution(boolean unresolved, String message) { + this.failed = unresolved; + this.message = message; + } + + public boolean unresolved() { + return failed; + } + + public boolean resolved() { + return !failed; + } + + public String message() { + return message; + } + } + + private TypeResolution lazyTypeResolution = null; + private Boolean lazyChildrenResolved = null; + private Expression lazyCanonical = null; + + public Expression(Location location, List children) { + super(location, children); + } + + // whether the expression can be evaluated statically (folded) or not + public boolean foldable() { + return false; + } + + public Object fold() { + throw new SqlIllegalArgumentException("Should not fold expression"); + } + + public abstract boolean nullable(); + + // the references/inputs/leaves of the expression tree + public AttributeSet references() { + return Expressions.references(children()); + } + + public boolean childrenResolved() { + if (lazyChildrenResolved == null) { + lazyChildrenResolved = Boolean.valueOf(Resolvables.resolved(children())); + } + return lazyChildrenResolved; + } + + public final TypeResolution typeResolved() { + if (lazyTypeResolution == null) { + lazyTypeResolution = resolveType(); + } + return lazyTypeResolution; + } + + protected TypeResolution resolveType() { + return TypeResolution.TYPE_RESOLVED; + } + + public final Expression canonical() { + if (lazyCanonical == null) { + lazyCanonical = canonicalize(); + } + return lazyCanonical; + } + + protected Expression canonicalize() { + return this; + } + + public boolean semanticEquals(Expression other) { + return canonical().equals(other.canonical()); + } + + public int semanticHash() { + return canonical().hashCode(); + } + + @Override + public boolean resolved() { + return childrenResolved() && typeResolved().resolved(); + } + + public abstract DataType dataType(); + + @Override + public abstract int hashCode(); + + @Override + public String toString() { + return nodeName() + "[" + propertiesToString(false) + "]"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionId.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionId.java new file mode 100644 index 0000000000000..55f947a20ac7f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionId.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Unique identifier for an expression. + *

+ * We use an {@link AtomicLong} to guarantee that they are unique + * and that they produce reproduceable values when run in subsequent + * tests. They don't produce reproduceable values in production, but + * you rarely debug with them in production and commonly do so in + * tests. + */ +public class ExpressionId { + private static final AtomicLong COUNTER = new AtomicLong(); + private final long id; + + public ExpressionId() { + this.id = COUNTER.incrementAndGet(); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + ExpressionId other = (ExpressionId) obj; + return id == other.id; + } + + @Override + public String toString() { + return Long.toString(id); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionSet.java new file mode 100644 index 0000000000000..3adea47c6b853 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ExpressionSet.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; + +import static java.util.Collections.emptyList; + +/** + * @param expression type + */ +public class ExpressionSet implements Set { + + @SuppressWarnings("rawtypes") + public static final ExpressionSet EMPTY = new ExpressionSet<>(emptyList()); + + @SuppressWarnings("unchecked") + public static ExpressionSet emptySet() { + return (ExpressionSet) EMPTY; + } + + // canonical to actual/original association + private final Map map = new LinkedHashMap<>(); + + public ExpressionSet() { + super(); + } + + public ExpressionSet(Collection c) { + addAll(c); + } + + // Returns the equivalent expression (if already exists in the set) or null if none is found + public E get(Expression e) { + return map.get(e.canonical()); + } + + @Override + public int size() { + return map.size(); + } + + @Override + public boolean isEmpty() { + return map.isEmpty(); + } + + @Override + public boolean contains(Object o) { + if (o instanceof Expression) { + return map.containsKey(((Expression) o).canonical()); + } + return false; + } + + @Override + public boolean containsAll(Collection c) { + for (Object o : c) { + if (!contains(o)) { + return false; + } + } + return true; + } + + @Override + public Iterator iterator() { + return map.values().iterator(); + } + + @Override + public boolean add(E e) { + return map.putIfAbsent(e.canonical(), e) == null; + } + + @Override + public boolean addAll(Collection c) { + boolean result = true; + for (E o : c) { + result &= add(o); + } + return result; + } + + @Override + public boolean retainAll(Collection c) { + boolean modified = false; + + Iterator keys = map.keySet().iterator(); + + while (keys.hasNext()) { + Expression key = keys.next(); + boolean found = false; + for (Object o : c) { + if (o instanceof Expression) { + o = ((Expression) o).canonical(); + } + if (key.equals(o)) { + found = true; + break; + } + } + if (!found) { + keys.remove(); + } + } + return modified; + } + + @Override + public boolean remove(Object o) { + if (o instanceof Expression) { + return map.remove(((Expression) o).canonical()) != null; + } + return false; + } + + @Override + public boolean removeAll(Collection c) { + boolean modified = false; + for (Object o : c) { + modified |= remove(o); + } + return modified; + } + + @Override + public void clear() { + map.clear(); + } + + @Override + public Object[] toArray() { + return map.values().toArray(); + } + + @Override + public T[] toArray(T[] a) { + return map.values().toArray(a); + } + + @Override + public String toString() { + return map.toString(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java new file mode 100644 index 0000000000000..f3106cfc3c479 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Expressions.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.expression.Expression.TypeResolution; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.Predicate; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.stream.Collectors.toList; + +public abstract class Expressions { + + public static List asNamed(List exp) { + return exp.stream() + .map(NamedExpression.class::cast) + .collect(toList()); + } + + public static NamedExpression wrapAsNamed(Expression exp) { + return exp instanceof NamedExpression ? (NamedExpression) exp : new Alias(exp.location(), exp.nodeName(), exp); + } + + public static List asAttributes(List named) { + if (named.isEmpty()) { + return emptyList(); + } + List list = new ArrayList<>(named.size()); + for (NamedExpression exp : named) { + list.add(exp.toAttribute()); + } + return list; + } + + public static AttributeMap asAttributeMap(List named) { + if (named.isEmpty()) { + return new AttributeMap<>(emptyMap()); + } + + AttributeMap map = new AttributeMap<>(); + for (NamedExpression exp : named) { + map.add(exp.toAttribute(), exp); + } + return map; + } + + public static boolean anyMatch(List exps, Predicate predicate) { + for (Expression exp : exps) { + if (exp.anyMatch(predicate)) { + return true; + } + } + return false; + } + + public static boolean nullable(List exps) { + for (Expression exp : exps) { + if (!exp.nullable()) { + return false; + } + } + return true; + } + + public static AttributeSet references(List exps) { + if (exps.isEmpty()) { + return AttributeSet.EMPTY; + } + + AttributeSet set = new AttributeSet(); + for (Expression exp : exps) { + set.addAll(exp.references()); + } + return set; + } + + public static String name(Expression e) { + return e instanceof NamedExpression ? ((NamedExpression) e).name() : e.nodeName(); + } + + public static List names(Collection e) { + List names = new ArrayList<>(e.size()); + for (Expression ex : e) { + names.add(name(ex)); + } + + return names; + } + + public static Attribute attribute(Expression e) { + if (e instanceof NamedExpression) { + return ((NamedExpression) e).toAttribute(); + } + if (e != null && e.foldable()) { + return new LiteralAttribute(Literal.of(e)); + } + return null; + } + + public static TypeResolution typeMustBe(Expression e, Predicate predicate, String message) { + return predicate.test(e) ? TypeResolution.TYPE_RESOLVED : new TypeResolution(message); + } + + public static TypeResolution typeMustBeNumeric(Expression e) { + return e.dataType().isNumeric()? TypeResolution.TYPE_RESOLVED : new TypeResolution( + "Argument required to be numeric ('" + Expressions.name(e) + "' of type '" + e.dataType().esType + "')"); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java new file mode 100644 index 0000000000000..d5b472c2fab03 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/FieldAttribute.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.Objects; + +/** + * Attribute for an ES field. + * To differentiate between the different type of fields this class offers: + * - name - the fully qualified name (foo.bar.tar) + * - path - the path pointing to the field name (foo.bar) + * - parent - the immediate parent of the field; useful for figuring out the type of field (nested vs object) + * - nestedParent - if nested, what's the parent (which might not be the immediate one) + */ +public class FieldAttribute extends TypedAttribute { + + private final FieldAttribute parent; + private final FieldAttribute nestedParent; + private final String path; + private final EsField field; + + public FieldAttribute(Location location, String name, EsField field) { + this(location, null, name, field); + } + + public FieldAttribute(Location location, FieldAttribute parent, String name, EsField field) { + this(location, parent, name, field, null, true, null, false); + } + + public FieldAttribute(Location location, FieldAttribute parent, String name, EsField field, String qualifier, + boolean nullable, ExpressionId id, boolean synthetic) { + super(location, name, field.getDataType(), qualifier, nullable, id, synthetic); + this.path = parent != null ? parent.name() : StringUtils.EMPTY; + this.parent = parent; + this.field = field; + + // figure out the last nested parent + FieldAttribute nestedPar = null; + if (parent != null) { + nestedPar = parent.nestedParent; + if (parent.dataType() == DataType.NESTED) { + nestedPar = parent; + } + } + this.nestedParent = nestedPar; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, FieldAttribute::new, parent, name(), field, qualifier(), nullable(), id(), synthetic()); + } + + public FieldAttribute parent() { + return parent; + } + + public String path() { + return path; + } + + public String qualifiedPath() { + // return only the qualifier is there's no path + return qualifier() != null ? qualifier() + (Strings.hasText(path) ? "." + path : StringUtils.EMPTY) : path; + } + + public boolean isNested() { + return nestedParent != null; + } + + public FieldAttribute nestedParent() { + return nestedParent; + } + + public boolean isInexact() { + return field.isExact() == false; + } + + public FieldAttribute exactAttribute() { + if (field.isExact() == false) { + return innerField(field.getExactField()); + } + return this; + } + + private FieldAttribute innerField(EsField type) { + return new FieldAttribute(location(), this, name() + "." + type.getName(), type, qualifier(), nullable(), id(), synthetic()); + } + + @Override + protected Expression canonicalize() { + return new FieldAttribute(location(), null, "", field, null, true, id(), false); + } + + @Override + protected Attribute clone(Location location, String name, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + FieldAttribute qualifiedParent = parent != null ? (FieldAttribute) parent.withQualifier(qualifier) : null; + return new FieldAttribute(location, qualifiedParent, name, field, qualifier, nullable, id, synthetic); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), path); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(path, ((FieldAttribute) obj).path); + } + + @Override + protected String label() { + return "f"; + } + + public EsField field() { + return field; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Foldables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Foldables.java new file mode 100644 index 0000000000000..1f8d658c380fd --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Foldables.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; + +public abstract class Foldables { + + @SuppressWarnings("unchecked") + public static T valueOf(Expression e, DataType to) { + if (e.foldable()) { + return (T) DataTypeConversion.conversionFor(e.dataType(), to).convert(e.fold()); + } + throw new SqlIllegalArgumentException("Cannot determine value for {}", e); + } + + public static Object valueOf(Expression e) { + if (e.foldable()) { + return e.fold(); + } + throw new SqlIllegalArgumentException("Cannot determine value for {}", e); + } + + public static String stringValueOf(Expression e) { + return valueOf(e, DataType.KEYWORD); + } + + public static Integer intValueOf(Expression e) { + return valueOf(e, DataType.INTEGER); + } + + public static Long longValueOf(Expression e) { + return valueOf(e, DataType.LONG); + } + + public static double doubleValueOf(Expression e) { + return valueOf(e, DataType.DOUBLE); + } + + public static List valuesOf(List list, DataType to) { + List l = new ArrayList<>(list.size()); + for (Expression e : list) { + l.add(valueOf(e, to)); + } + return l; + } + + public static List doubleValuesOf(List list) { + return valuesOf(list, DataType.DOUBLE); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LeafExpression.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LeafExpression.java new file mode 100644 index 0000000000000..49325ce1c8175 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LeafExpression.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; + +import static java.util.Collections.emptyList; + +public abstract class LeafExpression extends Expression { + + protected LeafExpression(Location location) { + super(location, emptyList()); + } + + @Override + public final Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + public AttributeSet references() { + return AttributeSet.EMPTY; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java new file mode 100644 index 0000000000000..9a4ffce929592 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.Objects; + +public class Literal extends LeafExpression { + public static final Literal TRUE = Literal.of(Location.EMPTY, Boolean.TRUE); + public static final Literal FALSE = Literal.of(Location.EMPTY, Boolean.FALSE); + + private final Object value; + private final DataType dataType; + + public Literal(Location location, Object value, DataType dataType) { + super(location); + this.dataType = dataType; + this.value = DataTypeConversion.convert(value, dataType); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Literal::new, value, dataType); + } + + public Object value() { + return value; + } + + @Override + public boolean foldable() { + return true; + } + + @Override + public boolean nullable() { + return value == null; + } + + @Override + public DataType dataType() { + return dataType; + } + + @Override + public boolean resolved() { + return true; + } + + @Override + public Object fold() { + return value; + } + + + @Override + public int hashCode() { + return Objects.hash(value, dataType); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Literal other = (Literal) obj; + return Objects.equals(value, other.value) + && Objects.equals(dataType, other.dataType); + } + + @Override + public String toString() { + return Objects.toString(value); + } + + public static Literal of(Location loc, Object value) { + if (value instanceof Literal) { + return (Literal) value; + } + return new Literal(loc, value, DataTypes.fromJava(value)); + } + + public static Literal of(Expression foldable) { + if (foldable instanceof Literal) { + return (Literal) foldable; + } + + if (!foldable.foldable()) { + throw new SqlIllegalArgumentException("Foldable expression required for Literal creation; received unfoldable " + foldable); + } + + return new Literal(foldable.location(), foldable.fold(), foldable.dataType()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java new file mode 100644 index 0000000000000..ff07731b82ebd --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ConstantInput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +public class LiteralAttribute extends TypedAttribute { + + private final Literal literal; + + public LiteralAttribute(Literal literal) { + this(literal.location(), String.valueOf(literal.fold()), null, false, null, false, literal.dataType(), literal); + } + + public LiteralAttribute(Location location, String name, String qualifier, boolean nullable, ExpressionId id, boolean synthetic, + DataType dataType, Literal literal) { + super(location, name, dataType, qualifier, nullable, id, synthetic); + this.literal = literal; + } + + public Literal literal() { + return literal; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, LiteralAttribute::new, + name(), qualifier(), nullable(), id(), synthetic(), dataType(), literal); + } + + @Override + protected LiteralAttribute clone(Location location, String name, String qualifier, boolean nullable, + ExpressionId id, boolean synthetic) { + return new LiteralAttribute(location, name, qualifier, nullable, id, synthetic, dataType(), literal); + } + + public ProcessorDefinition asProcessorDefinition() { + return new ConstantInput(location(), literal, literal.value()); + } + + @Override + protected String label() { + return "c"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/NamedExpression.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/NamedExpression.java new file mode 100644 index 0000000000000..cf06ddcc09ce9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/NamedExpression.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; +import java.util.Objects; + +public abstract class NamedExpression extends Expression { + + private final String name; + private final ExpressionId id; + private final boolean synthetic; + + public NamedExpression(Location location, String name, List children, ExpressionId id) { + this(location, name, children, id, false); + } + + public NamedExpression(Location location, String name, List children, ExpressionId id, boolean synthetic) { + super(location, children); + this.name = name; + this.id = id == null ? new ExpressionId() : id; + this.synthetic = synthetic; + } + + public String name() { + return name; + } + + public ExpressionId id() { + return id; + } + + public boolean synthetic() { + return synthetic; + } + + public abstract Attribute toAttribute(); + + @Override + public int hashCode() { + return Objects.hash(id, name, synthetic); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + NamedExpression other = (NamedExpression) obj; + return Objects.equals(synthetic, other.synthetic) + && Objects.equals(id, other.id) + /* + * It is important that the line below be `name` + * and not `name()` because subclasses might override + * `name()` in ways that are not compatible with + * equality. Specifically the `Unresolved` subclasses. + */ + && Objects.equals(name, other.name) + && Objects.equals(children(), other.children()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/NullIntolerant.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/NullIntolerant.java new file mode 100644 index 0000000000000..d3e62db81ad43 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/NullIntolerant.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +// marker interface for expressions that are do not support null +// and thus are eliminated by it +public interface NullIntolerant { + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java new file mode 100644 index 0000000000000..70e537527c867 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Order.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.Objects; + +public class Order extends UnaryExpression { + + public enum OrderDirection { + ASC, DESC + } + + private final OrderDirection direction; + + public Order(Location location, Expression child, OrderDirection direction) { + super(location, child); + this.direction = direction; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Order::new, child(), direction); + } + + @Override + protected UnaryExpression replaceChild(Expression newChild) { + return new Order(location(), newChild, direction); + } + + public OrderDirection direction() { + return direction; + } + + @Override + public boolean foldable() { + return false; + } + + @Override + public int hashCode() { + return Objects.hash(child(), direction); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Order other = (Order) obj; + return Objects.equals(direction, other.direction) + && Objects.equals(child(), other.child()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ScalarSubquery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ScalarSubquery.java new file mode 100644 index 0000000000000..76906a714d0cb --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/ScalarSubquery.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +public class ScalarSubquery extends SubQueryExpression { + + public ScalarSubquery(Location location, LogicalPlan query) { + this(location, query, null); + } + + public ScalarSubquery(Location location, LogicalPlan query, ExpressionId id) { + super(location, query, id); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ScalarSubquery::new, query(), id()); + } + + @Override + protected ScalarSubquery clone(LogicalPlan newQuery) { + return new ScalarSubquery(location(), newQuery); + } + + @Override + public DataType dataType() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean nullable() { + return true; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/SubQueryExpression.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/SubQueryExpression.java new file mode 100644 index 0000000000000..33f2f3d0b07a0 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/SubQueryExpression.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.tree.Location; + +public abstract class SubQueryExpression extends Expression { + + private final LogicalPlan query; + private final ExpressionId id; + + public SubQueryExpression(Location location, LogicalPlan query) { + this(location, query, null); + } + + public SubQueryExpression(Location location, LogicalPlan query, ExpressionId id) { + super(location, Collections.emptyList()); + this.query = query; + this.id = id == null ? new ExpressionId() : id; + } + + @Override + public final Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + public LogicalPlan query() { + return query; + } + + public ExpressionId id() { + return id; + } + + @Override + public boolean resolved() { + return false; + } + + public SubQueryExpression withQuery(LogicalPlan newQuery) { + return (Objects.equals(query, newQuery) ? this : clone(newQuery)); + } + + protected abstract SubQueryExpression clone(LogicalPlan newQuery); + + @Override + public int hashCode() { + return Objects.hash(query()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + SubQueryExpression other = (SubQueryExpression) obj; + return Objects.equals(query(), other.query()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypedAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypedAttribute.java new file mode 100644 index 0000000000000..1c8034f059a81 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypedAttribute.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Objects; + +public abstract class TypedAttribute extends Attribute { + + private final DataType dataType; + + protected TypedAttribute(Location location, String name, DataType dataType) { + this(location, name, dataType, null, true, null, false); + } + + protected TypedAttribute(Location location, String name, DataType dataType, String qualifier, boolean nullable, + ExpressionId id, boolean synthetic) { + super(location, name, qualifier, nullable, id, synthetic); + this.dataType = dataType; + } + + @Override + public DataType dataType() { + return dataType; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), dataType); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(dataType, ((TypedAttribute) obj).dataType); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnaryExpression.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnaryExpression.java new file mode 100644 index 0000000000000..710ee760328cf --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnaryExpression.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Objects; + +import static java.util.Collections.singletonList; + +import java.util.List; + +public abstract class UnaryExpression extends Expression { + + private final Expression child; + + protected UnaryExpression(Location location, Expression child) { + super(location, singletonList(child)); + this.child = child; + } + + @Override + public final UnaryExpression replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return replaceChild(newChildren.get(0)); + } + protected abstract UnaryExpression replaceChild(Expression newChild); + + public Expression child() { + return child; + } + + @Override + public boolean foldable() { + return child.foldable(); + } + + @Override + public boolean nullable() { + return child.nullable(); + } + + @Override + public boolean resolved() { + return child.resolved(); + } + + @Override + public DataType dataType() { + return child.dataType(); + } + + @Override + public int hashCode() { + return Objects.hash(child); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnaryExpression other = (UnaryExpression) obj; + return Objects.equals(child, other.child); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAlias.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAlias.java new file mode 100644 index 0000000000000..eaa6aeb5afa13 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAlias.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.capabilities.UnresolvedException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import java.util.Objects; + +import static java.util.Collections.singletonList; + +import java.util.List; + +public class UnresolvedAlias extends UnresolvedNamedExpression { + + private final Expression child; + + public UnresolvedAlias(Location location, Expression child) { + super(location, singletonList(child)); + this.child = child; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, UnresolvedAlias::new, child); + } + + @Override + public Expression replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new UnresolvedAlias(location(), newChildren.get(0)); + } + + public Expression child() { + return child; + } + + @Override + public String unresolvedMessage() { + return "Unknown alias [" + name() + "]"; + } + + @Override + public boolean nullable() { + throw new UnresolvedException("nullable", this); + } + + @Override + public int hashCode() { + return Objects.hash(child); + } + + @Override + public boolean equals(Object obj) { + /* + * Intentionally not calling the superclass + * equals because it uses id which we always + * mutate when we make a clone. + */ + if (obj == null || obj.getClass() != getClass()) { + return false; + } + return Objects.equals(child, ((UnresolvedAlias) obj).child); + } + + @Override + public String toString() { + return child + " AS ?"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttribute.java new file mode 100644 index 0000000000000..0a31b74d636ee --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttribute.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.capabilities.Unresolvable; +import org.elasticsearch.xpack.sql.capabilities.UnresolvedException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.CollectionUtils; + +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +import static java.lang.String.format; + +// unfortunately we can't use UnresolvedNamedExpression +public class UnresolvedAttribute extends Attribute implements Unresolvable { + + private final String unresolvedMsg; + private final boolean customMessage; + private final Object resolutionMetadata; + + public UnresolvedAttribute(Location location, String name) { + this(location, name, null); + } + + public UnresolvedAttribute(Location location, String name, String qualifier) { + this(location, name, qualifier, null); + } + + public UnresolvedAttribute(Location location, String name, String qualifier, String unresolvedMessage) { + this(location, name, qualifier, null, unresolvedMessage, null); + } + + public UnresolvedAttribute(Location location, String name, String qualifier, ExpressionId id, String unresolvedMessage, + Object resolutionMetadata) { + super(location, name, qualifier, id); + this.customMessage = unresolvedMessage != null; + this.unresolvedMsg = unresolvedMessage == null ? errorMessage(qualifiedName(), null) : unresolvedMessage; + this.resolutionMetadata = resolutionMetadata; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, UnresolvedAttribute::new, + name(), qualifier(), id(), unresolvedMsg, resolutionMetadata); + } + + public Object resolutionMetadata() { + return resolutionMetadata; + } + + public boolean customMessage() { + return customMessage; + } + + @Override + public boolean resolved() { + return false; + } + + @Override + protected Attribute clone(Location location, String name, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + return this; + } + + public UnresolvedAttribute withUnresolvedMessage(String unresolvedMsg) { + return new UnresolvedAttribute(location(), name(), qualifier(), id(), unresolvedMsg, resolutionMetadata()); + } + + @Override + public DataType dataType() { + throw new UnresolvedException("dataType", this); + } + + @Override + public String nodeString() { + return format(Locale.ROOT, "unknown column '%s'", name()); + } + + @Override + public String toString() { + return UNRESOLVED_PREFIX + qualifiedName(); + } + + @Override + protected String label() { + return UNRESOLVED_PREFIX; + } + + @Override + public String unresolvedMessage() { + return unresolvedMsg; + } + + public static String errorMessage(String name, List potentialMatches) { + String msg = "Unknown column [" + name + "]"; + if (!CollectionUtils.isEmpty(potentialMatches)) { + msg += ", did you mean " + (potentialMatches.size() == 1 ? "[" + potentialMatches.get(0) + + "]": "any of " + potentialMatches.toString()) + "?"; + } + return msg; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), resolutionMetadata, unresolvedMsg); + } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj)) { + UnresolvedAttribute ua = (UnresolvedAttribute) obj; + return Objects.equals(resolutionMetadata, ua.resolutionMetadata) && Objects.equals(unresolvedMsg, ua.unresolvedMsg); + } + return false; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedNamedExpression.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedNamedExpression.java new file mode 100644 index 0000000000000..ea35c38275039 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedNamedExpression.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.capabilities.Unresolvable; +import org.elasticsearch.xpack.sql.capabilities.UnresolvedException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.List; + +abstract class UnresolvedNamedExpression extends NamedExpression implements Unresolvable { + + UnresolvedNamedExpression(Location location, List children) { + super(location, "", children, new ExpressionId()); + } + + @Override + public boolean resolved() { + return false; + } + + @Override + public String name() { + throw new UnresolvedException("name", this); + } + + @Override + public ExpressionId id() { + throw new UnresolvedException("id", this); + } + + @Override + public DataType dataType() { + throw new UnresolvedException("data type", this); + } + + @Override + public Attribute toAttribute() { + throw new UnresolvedException("attribute", this); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedStar.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedStar.java new file mode 100644 index 0000000000000..948a5465efa80 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/UnresolvedStar.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.capabilities.UnresolvedException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import java.util.Objects; + +import static java.util.Collections.emptyList; + +import java.util.List; + +public class UnresolvedStar extends UnresolvedNamedExpression { + + // typically used for nested fields or inner/dotted fields + private final UnresolvedAttribute qualifier; + + public UnresolvedStar(Location location, UnresolvedAttribute qualifier) { + super(location, emptyList()); + this.qualifier = qualifier; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, UnresolvedStar::new, qualifier); + } + + @Override + public Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + @Override + public boolean nullable() { + throw new UnresolvedException("nullable", this); + } + + public UnresolvedAttribute qualifier() { + return qualifier; + } + + @Override + public int hashCode() { + return Objects.hash(qualifier); + } + + @Override + public boolean equals(Object obj) { + /* + * Intentionally not calling the superclass + * equals because it uses id which we always + * mutate when we make a clone. So we need + * to ignore it in equals for the transform + * tests to pass. + */ + if (obj == null || obj.getClass() != getClass()) { + return false; + } + + UnresolvedStar other = (UnresolvedStar) obj; + return Objects.equals(qualifier, other.qualifier); + } + + private String message() { + return (qualifier() != null ? qualifier() + "." : "") + "*"; + } + + @Override + public String unresolvedMessage() { + return "Cannot determine columns for " + message(); + } + + @Override + public String toString() { + return UNRESOLVED_PREFIX + message(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Function.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Function.java new file mode 100644 index 0000000000000..6c6f1a2633ac8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Function.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.List; +import java.util.StringJoiner; + +/** + * Any SQL expression with parentheses, like {@code MAX()}, or {@code ABS()}. A + * function is always a {@code NamedExpression}. + */ +public abstract class Function extends NamedExpression { + + private final String functionName, name; + + protected Function(Location location, List children) { + this(location, children, null, false); + } + + // TODO: Functions supporting distinct should add a dedicated constructor Location, List, boolean + protected Function(Location location, List children, ExpressionId id, boolean synthetic) { + // cannot detect name yet so override the name + super(location, null, children, id, synthetic); + functionName = StringUtils.camelCaseToUnderscore(getClass().getSimpleName()); + name = functionName() + functionArgs(); + } + + public final List arguments() { + return children(); + } + + @Override + public String name() { + return name; + } + + @Override + public boolean nullable() { + return false; + } + + @Override + public String toString() { + return name() + "#" + id(); + } + + public String functionName() { + return functionName; + } + + // TODO: ExpressionId might be converted into an Int which could make the String an int as well + public String functionId() { + return id().toString(); + } + + protected String functionArgs() { + StringJoiner sj = new StringJoiner(",", "(", ")"); + for (Expression child : children()) { + String val = child instanceof NamedExpression && child.resolved() ? Expressions.name(child) : child.toString(); + sj.add(val); + } + return sj.toString(); + } + + public boolean functionEquals(Function f) { + return f != null && getClass() == f.getClass() && arguments().equals(f.arguments()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionAttribute.java new file mode 100644 index 0000000000000..e61549c543c28 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionAttribute.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.TypedAttribute; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Objects; + +public abstract class FunctionAttribute extends TypedAttribute { + + private final String functionId; + + protected FunctionAttribute(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, + boolean synthetic, String functionId) { + super(location, name, dataType, qualifier, nullable, id, synthetic); + this.functionId = functionId; + } + + public String functionId() { + return functionId; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), functionId); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(functionId, ((FunctionAttribute) obj).functionId()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionDefinition.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionDefinition.java new file mode 100644 index 0000000000000..ec76b6ab34ab0 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionDefinition.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import java.util.List; +import java.util.Locale; +import java.util.TimeZone; + +import static java.lang.String.format; + +public class FunctionDefinition { + /** + * Converts an {@link UnresolvedFunction} into the a proper {@link Function}. + */ + @FunctionalInterface + public interface Builder { + Function build(UnresolvedFunction uf, boolean distinct, TimeZone tz); + } + private final String name; + private final List aliases; + private final Class clazz; + /** + * Is this a datetime function comaptible with {@code EXTRACT}. + */ + private final boolean datetime; + private final Builder builder; + private final FunctionType type; + + FunctionDefinition(String name, List aliases, Class clazz, + boolean datetime, Builder builder) { + this.name = name; + this.aliases = aliases; + this.clazz = clazz; + this.datetime = datetime; + this.builder = builder; + this.type = FunctionType.of(clazz); + } + + public String name() { + return name; + } + + public List aliases() { + return aliases; + } + + public FunctionType type() { + return type; + } + + Class clazz() { + return clazz; + } + + Builder builder() { + return builder; + } + + /** + * Is this a datetime function comaptible with {@code EXTRACT}. + */ + boolean datetime() { + return datetime; + } + + @Override + public String toString() { + return format(Locale.ROOT, "%s(%s)", name, aliases.isEmpty() ? "" : aliases.size() == 1 ? aliases.get(0) : aliases ); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java new file mode 100644 index 0000000000000..25915c1a0a858 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java @@ -0,0 +1,324 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Avg; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Kurtosis; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Max; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Min; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Percentile; +import org.elasticsearch.xpack.sql.expression.function.aggregate.PercentileRank; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Skewness; +import org.elasticsearch.xpack.sql.expression.function.aggregate.StddevPop; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Sum; +import org.elasticsearch.xpack.sql.expression.function.aggregate.SumOfSquares; +import org.elasticsearch.xpack.sql.expression.function.aggregate.VarPop; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mod; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfMonth; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfWeek; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfYear; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.HourOfDay; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MinuteOfDay; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MinuteOfHour; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MonthOfYear; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.SecondOfMinute; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.WeekOfYear; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Year; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.ACos; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.ASin; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.ATan; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.ATan2; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Abs; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Cbrt; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Ceil; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Cos; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Cosh; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Cot; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Degrees; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.E; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Exp; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Expm1; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Floor; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Log; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Log10; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Pi; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Power; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Radians; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Random; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Round; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Sign; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Sin; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Sinh; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Sqrt; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Tan; +import org.elasticsearch.xpack.sql.parser.ParsingException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.util.StringUtils; +import org.joda.time.DateTimeZone; + +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.TimeZone; +import java.util.function.BiFunction; +import java.util.regex.Pattern; + +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableList; +import static java.util.stream.Collectors.toList; + +public class FunctionRegistry { + private static final List DEFAULT_FUNCTIONS = unmodifiableList(Arrays.asList( + // Aggregate functions + def(Avg.class, Avg::new), + def(Count.class, Count::new), + def(Max.class, Max::new), + def(Min.class, Min::new), + def(Sum.class, Sum::new), + // Statistics + def(StddevPop.class, StddevPop::new), + def(VarPop.class, VarPop::new), + def(Percentile.class, Percentile::new), + def(PercentileRank.class, PercentileRank::new), + def(SumOfSquares.class, SumOfSquares::new), + def(Skewness.class, Skewness::new), + def(Kurtosis.class, Kurtosis::new), + // Scalar functions + // Date + def(DayOfMonth.class, DayOfMonth::new, "DAY", "DOM"), + def(DayOfWeek.class, DayOfWeek::new, "DOW"), + def(DayOfYear.class, DayOfYear::new, "DOY"), + def(HourOfDay.class, HourOfDay::new, "HOUR"), + def(MinuteOfDay.class, MinuteOfDay::new), + def(MinuteOfHour.class, MinuteOfHour::new, "MINUTE"), + def(SecondOfMinute.class, SecondOfMinute::new, "SECOND"), + def(MonthOfYear.class, MonthOfYear::new, "MONTH"), + def(Year.class, Year::new), + def(WeekOfYear.class, WeekOfYear::new, "WEEK"), + // Math + def(Abs.class, Abs::new), + def(ACos.class, ACos::new), + def(ASin.class, ASin::new), + def(ATan.class, ATan::new), + def(ATan2.class, ATan2::new), + def(Cbrt.class, Cbrt::new), + def(Ceil.class, Ceil::new, "CEILING"), + def(Cos.class, Cos::new), + def(Cosh.class, Cosh::new), + def(Cot.class, Cot::new), + def(Degrees.class, Degrees::new), + def(E.class, E::new), + def(Exp.class, Exp::new), + def(Expm1.class, Expm1::new), + def(Floor.class, Floor::new), + def(Log.class, Log::new), + def(Log10.class, Log10::new), + // SQL and ODBC require MOD as a _function_ + def(Mod.class, Mod::new), + def(Pi.class, Pi::new), + def(Power.class, Power::new), + def(Radians.class, Radians::new), + def(Random.class, Random::new, "RAND"), + def(Round.class, Round::new), + def(Sign.class, Sign::new, "SIGNUM"), + def(Sin.class, Sin::new), + def(Sinh.class, Sinh::new), + def(Sqrt.class, Sqrt::new), + def(Tan.class, Tan::new), + // Special + def(Score.class, Score::new))); + + private final Map defs = new LinkedHashMap<>(); + private final Map aliases; + + /** + * Constructor to build with the default list of functions. + */ + public FunctionRegistry() { + this(DEFAULT_FUNCTIONS); + } + + /** + * Constructor specifying alternate functions for testing. + */ + FunctionRegistry(List functions) { + this.aliases = new HashMap<>(); + for (FunctionDefinition f : functions) { + defs.put(f.name(), f); + for (String alias : f.aliases()) { + Object old = aliases.put(alias, f.name()); + if (old != null) { + throw new IllegalArgumentException("alias [" + alias + "] is used by [" + old + "] and [" + f.name() + "]"); + } + defs.put(alias, f); + } + } + } + + public FunctionDefinition resolveFunction(String name) { + FunctionDefinition def = defs.get(normalize(name)); + if (def == null) { + throw new SqlIllegalArgumentException("Cannot find function {}; this should have been caught during analysis", name); + } + return def; + } + + public String concreteFunctionName(String alias) { + String normalized = normalize(alias); + return aliases.getOrDefault(normalized, normalized); + } + + public boolean functionExists(String name) { + return defs.containsKey(normalize(name)); + } + + public Collection listFunctions() { + // It is worth double checking if we need this copy. These are immutable anyway. + return defs.entrySet().stream() + .map(e -> new FunctionDefinition(e.getKey(), emptyList(), + e.getValue().clazz(), e.getValue().datetime(), e.getValue().builder())) + .collect(toList()); + } + + public Collection listFunctions(String pattern) { + // It is worth double checking if we need this copy. These are immutable anyway. + Pattern p = Strings.hasText(pattern) ? Pattern.compile(normalize(pattern)) : null; + return defs.entrySet().stream() + .filter(e -> p == null || p.matcher(e.getKey()).matches()) + .map(e -> new FunctionDefinition(e.getKey(), emptyList(), + e.getValue().clazz(), e.getValue().datetime(), e.getValue().builder())) + .collect(toList()); + } + + /** + * Build a {@linkplain FunctionDefinition} for a no-argument function that + * is not aware of time zone and does not support {@code DISTINCT}. + */ + static FunctionDefinition def(Class function, + java.util.function.Function ctorRef, String... aliases) { + FunctionBuilder builder = (location, children, distinct, tz) -> { + if (false == children.isEmpty()) { + throw new IllegalArgumentException("expects no arguments"); + } + if (distinct) { + throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + } + return ctorRef.apply(location); + }; + return def(function, builder, false, aliases); + } + + /** + * Build a {@linkplain FunctionDefinition} for a unary function that is not + * aware of time zone and does not support {@code DISTINCT}. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + static FunctionDefinition def(Class function, + BiFunction ctorRef, String... aliases) { + FunctionBuilder builder = (location, children, distinct, tz) -> { + if (children.size() != 1) { + throw new IllegalArgumentException("expects exactly one argument"); + } + if (distinct) { + throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + } + return ctorRef.apply(location, children.get(0)); + }; + return def(function, builder, false, aliases); + } + + /** + * Build a {@linkplain FunctionDefinition} for a unary function that is not + * aware of time zone but does support {@code DISTINCT}. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + static FunctionDefinition def(Class function, + DistinctAwareUnaryFunctionBuilder ctorRef, String... aliases) { + FunctionBuilder builder = (location, children, distinct, tz) -> { + if (children.size() != 1) { + throw new IllegalArgumentException("expects exactly one argument"); + } + return ctorRef.build(location, children.get(0), distinct); + }; + return def(function, builder, false, aliases); + } + interface DistinctAwareUnaryFunctionBuilder { + T build(Location location, Expression target, boolean distinct); + } + + /** + * Build a {@linkplain FunctionDefinition} for a unary function that + * operates on a datetime. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + static FunctionDefinition def(Class function, + DatetimeUnaryFunctionBuilder ctorRef, String... aliases) { + FunctionBuilder builder = (location, children, distinct, tz) -> { + if (children.size() != 1) { + throw new IllegalArgumentException("expects exactly one argument"); + } + if (distinct) { + throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + } + return ctorRef.build(location, children.get(0), tz); + }; + return def(function, builder, true, aliases); + } + interface DatetimeUnaryFunctionBuilder { + T build(Location location, Expression target, TimeZone tz); + } + + /** + * Build a {@linkplain FunctionDefinition} for a binary function that is + * not aware of time zone and does not support {@code DISTINCT}. + */ + @SuppressWarnings("overloads") // These are ambiguous if you aren't using ctor references but we always do + static FunctionDefinition def(Class function, + BinaryFunctionBuilder ctorRef, String... aliases) { + FunctionBuilder builder = (location, children, distinct, tz) -> { + if (children.size() != 2) { + throw new IllegalArgumentException("expects exactly two arguments"); + } + if (distinct) { + throw new IllegalArgumentException("does not support DISTINCT yet it was specified"); + } + return ctorRef.build(location, children.get(0), children.get(1)); + }; + return def(function, builder, false, aliases); + } + interface BinaryFunctionBuilder { + T build(Location location, Expression lhs, Expression rhs); + } + + private static FunctionDefinition def(Class function, FunctionBuilder builder, + boolean datetime, String... aliases) { + String primaryName = normalize(function.getSimpleName()); + FunctionDefinition.Builder realBuilder = (uf, distinct, tz) -> { + try { + return builder.build(uf.location(), uf.children(), distinct, tz); + } catch (IllegalArgumentException e) { + throw new ParsingException("error building [" + primaryName + "]: " + e.getMessage(), e, + uf.location().getLineNumber(), uf.location().getColumnNumber()); + } + }; + return new FunctionDefinition(primaryName, unmodifiableList(Arrays.asList(aliases)), function, datetime, realBuilder); + } + private interface FunctionBuilder { + Function build(Location location, List children, boolean distinct, TimeZone tz); + } + + private static String normalize(String name) { + // translate CamelCase to camel_case + return StringUtils.camelCaseToUnderscore(name); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionType.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionType.java new file mode 100644 index 0000000000000..dc75f0f5be37a --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionType.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.Score; + + +public enum FunctionType { + AGGREGATE(AggregateFunction.class), + SCALAR(ScalarFunction.class), + SCORE(Score.class); + + private final Class baseClass; + + FunctionType(Class base) { + this.baseClass = base; + } + + public static FunctionType of(Class clazz) { + for (FunctionType type : values()) { + if (type.baseClass.isAssignableFrom(clazz)) { + return type; + } + } + throw new SqlIllegalArgumentException("Cannot identify the function type for {}", clazz); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Functions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Functions.java new file mode 100644 index 0000000000000..7f5465b7413a5 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Functions.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.plan.QueryPlan; + +import java.util.LinkedHashMap; +import java.util.Map; + +public abstract class Functions { + + public static boolean isAggregate(Expression e) { + return e instanceof AggregateFunction; + } + + public static Map collectFunctions(QueryPlan plan) { + Map resolvedFunctions = new LinkedHashMap<>(); + plan.forEachExpressionsDown(e -> { + if (e.resolved() && e instanceof Function) { + Function f = (Function) e; + resolvedFunctions.put(f.functionId(), f); + } + }); + return resolvedFunctions; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Score.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Score.java new file mode 100644 index 0000000000000..f913cf0ea5610 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/Score.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import static java.util.Collections.emptyList; + +import java.util.List; + +/** + * Function referring to the {@code _score} in a search. Only available + * in the search context, and only at the "root" so it can't be combined + * with other function. + */ +public class Score extends Function { + public Score(Location location) { + super(location, emptyList()); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this); + } + + @Override + public Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + @Override + public DataType dataType() { + return DataType.FLOAT; + } + + @Override + public Attribute toAttribute() { + return new ScoreAttribute(location()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Score other = (Score) obj; + return location().equals(other.location()); + } + + @Override + public int hashCode() { + return location().hashCode(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/ScoreAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/ScoreAttribute.java new file mode 100644 index 0000000000000..654369d0b961b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/ScoreAttribute.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.function.FunctionAttribute; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +/** + * {@link Attribute} that represents Elasticsearch's {@code _score}. + */ +public class ScoreAttribute extends FunctionAttribute { + /** + * Constructor for normal use. + */ + public ScoreAttribute(Location location) { + this(location, "SCORE()", DataType.FLOAT, null, false, null, false); + } + + /** + * Constructor for {@link #clone()} + */ + private ScoreAttribute(Location location, String name, DataType dataType, String qualifier, boolean nullable, ExpressionId id, + boolean synthetic) { + super(location, name, dataType, qualifier, nullable, id, synthetic, "SCORE"); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this); + } + + @Override + protected Attribute clone(Location location, String name, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + return new ScoreAttribute(location, name, dataType(), qualifier, nullable, id, synthetic); + } + + @Override + protected String label() { + return "SCORE"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunction.java new file mode 100644 index 0000000000000..338b926ce6fce --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunction.java @@ -0,0 +1,278 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.capabilities.Unresolvable; +import org.elasticsearch.xpack.sql.capabilities.UnresolvedException; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.Set; +import java.util.TimeZone; + +import static java.util.Collections.singletonList; + +public class UnresolvedFunction extends Function implements Unresolvable { + private final String name; + private final String unresolvedMsg; + /** + * How the resolution should be performed. This is changed depending + * on how the function was called. + */ + private final ResolutionType resolutionType; + /** + * Flag to indicate analysis has been applied and there's no point in + * doing it again this is an optimization to prevent searching for a + * better unresolved message over and over again. + */ + private final boolean analyzed; + + public UnresolvedFunction(Location location, String name, ResolutionType resolutionType, List children) { + this(location, name, resolutionType, children, false, null); + } + + /** + * Constructor used for specifying a more descriptive message (typically + * 'did you mean') instead of the default one. + * @see #withMessage(String) + */ + UnresolvedFunction(Location location, String name, ResolutionType resolutionType, List children, + boolean analyzed, String unresolvedMessage) { + super(location, children); + this.name = name; + this.resolutionType = resolutionType; + this.analyzed = analyzed; + this.unresolvedMsg = unresolvedMessage == null ? "Unknown " + resolutionType.type() + " [" + name + "]" : unresolvedMessage; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, UnresolvedFunction::new, + name, resolutionType, children(), analyzed, unresolvedMsg); + } + + @Override + public Expression replaceChildren(List newChildren) { + return new UnresolvedFunction(location(), name, resolutionType, newChildren, analyzed, unresolvedMsg); + } + + public UnresolvedFunction withMessage(String message) { + return new UnresolvedFunction(location(), name(), resolutionType, children(), true, message); + } + + public UnresolvedFunction preprocessStar() { + return resolutionType.preprocessStar(this); + } + + /** + * Build a function to replace this one after resolving the function. + */ + public Function buildResolved(TimeZone timeZone, FunctionDefinition def) { + return resolutionType.buildResolved(this, timeZone, def); + } + + /** + * Build a marker {@link UnresolvedFunction} with an error message + * about the function being missing. + */ + public UnresolvedFunction missing(String normalizedName, Iterable alternatives) { + // try to find alternatives + Set names = new LinkedHashSet<>(); + for (FunctionDefinition def : alternatives) { + if (resolutionType.isValidAlternative(def)) { + names.add(def.name()); + names.addAll(def.aliases()); + } + } + + List matches = StringUtils.findSimilar(normalizedName, names); + if (matches.isEmpty()) { + return this; + } + String matchesMessage = matches.size() == 1 ? "[" + matches.get(0) + "]" : "any of " + matches; + return withMessage("Unknown " + resolutionType.type() + " [" + name + "], did you mean " + matchesMessage + "?"); + } + + @Override + public boolean resolved() { + return false; + } + + @Override + public String name() { + return name; + } + + @Override + public String functionName() { + return name; + } + + ResolutionType resolutionType() { + return resolutionType; + } + + public boolean analyzed() { + return analyzed; + } + + @Override + public DataType dataType() { + throw new UnresolvedException("dataType", this); + } + + @Override + public boolean nullable() { + throw new UnresolvedException("nullable", this); + } + + @Override + public Attribute toAttribute() { + throw new UnresolvedException("attribute", this); + } + + @Override + public String unresolvedMessage() { + return unresolvedMsg; + } + + @Override + public String toString() { + return UNRESOLVED_PREFIX + functionName() + functionArgs(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + UnresolvedFunction other = (UnresolvedFunction) obj; + return name.equals(other.name) + && resolutionType.equals(other.resolutionType) + && children().equals(other.children()) + && analyzed == other.analyzed + && Objects.equals(unresolvedMsg, other.unresolvedMsg); + } + + @Override + public int hashCode() { + return Objects.hash(name, resolutionType, children(), analyzed, unresolvedMsg); + } + + /** + * Customize how function resolution works based on + * where the function appeared in the grammar. + */ + public enum ResolutionType { + /** + * Behavior of standard function calls like {@code ABS(col)}. + */ + STANDARD { + @Override + public UnresolvedFunction preprocessStar(UnresolvedFunction uf) { + // TODO: might be removed + // dedicated count optimization + if (uf.name.toUpperCase(Locale.ROOT).equals("COUNT")) { + return new UnresolvedFunction(uf.location(), uf.name(), uf.resolutionType, + singletonList(Literal.of(uf.arguments().get(0).location(), Integer.valueOf(1)))); + } + return uf; + } + @Override + public Function buildResolved(UnresolvedFunction uf, TimeZone tz, FunctionDefinition def) { + return def.builder().build(uf, false, tz); + } + @Override + protected boolean isValidAlternative(FunctionDefinition def) { + return true; + } + @Override + protected String type() { + return "function"; + } + }, + /** + * Behavior of DISTINCT like {@code COUNT DISTINCT(col)}. + */ + DISTINCT { + @Override + public UnresolvedFunction preprocessStar(UnresolvedFunction uf) { + return uf.withMessage("* is not valid with DISTINCT"); + } + @Override + public Function buildResolved(UnresolvedFunction uf, TimeZone tz, FunctionDefinition def) { + return def.builder().build(uf, true, tz); + } + @Override + protected boolean isValidAlternative(FunctionDefinition def) { + return false; // think about this later. + } + @Override + protected String type() { + return "function"; + } + }, + /** + * Behavior of EXTRACT function calls like {@code EXTRACT(DAY FROM col)}. + */ + EXTRACT { + @Override + public UnresolvedFunction preprocessStar(UnresolvedFunction uf) { + return uf.withMessage("Can't extract from *"); + } + @Override + public Function buildResolved(UnresolvedFunction uf, TimeZone tz, FunctionDefinition def) { + if (def.datetime()) { + return def.builder().build(uf, false, tz); + } + return uf.withMessage("Invalid datetime field [" + uf.name() + "]. Use any datetime function."); + } + @Override + protected boolean isValidAlternative(FunctionDefinition def) { + return def.datetime(); + } + @Override + protected String type() { + return "datetime field"; + } + }; + /** + * Preprocess a function that contains a star to some other + * form before attempting to resolve it. For example, + * {@code DISTINCT} doesn't support {@code *} so it converts + * this function into a dead end, unresolveable function. + * Or {@code COUNT(*)} can be rewritten to {@code COUNT(1)} + * so we don't have to resolve {@code *}. + */ + protected abstract UnresolvedFunction preprocessStar(UnresolvedFunction uf); + /** + * Build the real function from this one and resolution metadata. + */ + protected abstract Function buildResolved(UnresolvedFunction uf, TimeZone tz, FunctionDefinition def); + /** + * Is {@code def} a valid alternative for function invocations + * of this kind. Used to filter the list of "did you mean" + * options sent back to the user when they specify a missing + * function. + */ + protected abstract boolean isValidAlternative(FunctionDefinition def); + /** + * The name of the kind of thing being resolved. Used when + * building the error message sent back to the user when + * they specify a function that doesn't exist. + */ + protected abstract String type(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java new file mode 100644 index 0000000000000..413ecf96464c9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunction.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.util.CollectionUtils; + +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; + +/** + * A type of {@code Function} that takes multiple values and extracts a single value out of them. For example, {@code AVG()}. + */ +public abstract class AggregateFunction extends Function { + + private final Expression field; + private final List parameters; + + private AggregateFunctionAttribute lazyAttribute; + + AggregateFunction(Location location, Expression field) { + this(location, field, emptyList()); + } + + AggregateFunction(Location location, Expression field, List parameters) { + super(location, CollectionUtils.combine(singletonList(field), parameters)); + this.field = field; + this.parameters = parameters; + } + + public Expression field() { + return field; + } + + public List parameters() { + return parameters; + } + + @Override + public AggregateFunctionAttribute toAttribute() { + if (lazyAttribute == null) { + // this is highly correlated with QueryFolder$FoldAggregate#addFunction (regarding the function name within the querydsl) + lazyAttribute = new AggregateFunctionAttribute(location(), name(), dataType(), id(), functionId(), null); + } + return lazyAttribute; + } + + @Override + public boolean equals(Object obj) { + if (false == super.equals(obj)) { + return false; + } + AggregateFunction other = (AggregateFunction) obj; + return Objects.equals(other.field(), field()) + && Objects.equals(other.parameters(), parameters()); + } + + @Override + public int hashCode() { + return Objects.hash(field(), parameters()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunctionAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunctionAttribute.java new file mode 100644 index 0000000000000..ab0ae9bfe8b1e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/AggregateFunctionAttribute.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.function.FunctionAttribute; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Objects; + +public class AggregateFunctionAttribute extends FunctionAttribute { + + private final String propertyPath; + + AggregateFunctionAttribute(Location location, String name, DataType dataType, ExpressionId id, + String functionId, String propertyPath) { + this(location, name, dataType, null, false, id, false, functionId, propertyPath); + } + + public AggregateFunctionAttribute(Location location, String name, DataType dataType, String qualifier, + boolean nullable, ExpressionId id, boolean synthetic, String functionId, String propertyPath) { + super(location, name, dataType, qualifier, nullable, id, synthetic, functionId); + this.propertyPath = propertyPath; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, AggregateFunctionAttribute::new, + name(), dataType(), qualifier(), nullable(), id(), synthetic(), functionId(), propertyPath); + } + + public String propertyPath() { + return propertyPath; + } + + @Override + protected Expression canonicalize() { + return new AggregateFunctionAttribute(location(), "", dataType(), null, true, id(), false, "", null); + } + + @Override + protected Attribute clone(Location location, String name, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + // this is highly correlated with QueryFolder$FoldAggregate#addFunction (regarding the function name within the querydsl) + // that is the functionId is actually derived from the expression id to easily track it across contexts + return new AggregateFunctionAttribute(location, name, dataType(), qualifier, nullable, id, synthetic, functionId(), propertyPath); + } + + public AggregateFunctionAttribute withFunctionId(String functionId, String propertyPath) { + return new AggregateFunctionAttribute(location(), name(), dataType(), qualifier(), nullable(), + id(), synthetic(), functionId, propertyPath); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), propertyPath); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(propertyPath(), ((AggregateFunctionAttribute) obj).propertyPath()); + } + + @Override + protected String label() { + return "a->" + functionId(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java new file mode 100644 index 0000000000000..7c413feba8448 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Avg.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * Find the arithmatic mean of a field. + */ +public class Avg extends NumericAggregate implements EnclosedAgg { + + public Avg(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Avg::new, field()); + } + + @Override + public Avg replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new Avg(location(), newChildren.get(0)); + } + + @Override + public String innerName() { + return "avg"; + } + + @Override + public DataType dataType() { + return field().dataType(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/CompoundNumericAggregate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/CompoundNumericAggregate.java new file mode 100644 index 0000000000000..3ff2ae0c44e09 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/CompoundNumericAggregate.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; + +// marker type for compound aggregates, that is aggregate that provide multiple values (like Stats or Matrix) +// and thus cannot be used directly in SQL and are mainly for internal use +public abstract class CompoundNumericAggregate extends NumericAggregate { + + CompoundNumericAggregate(Location location, Expression field, List arguments) { + super(location, field, arguments); + } + + CompoundNumericAggregate(Location location, Expression field) { + super(location, field); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java new file mode 100644 index 0000000000000..397ff1e7c9e68 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Count.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +/** + * Count the number of documents matched ({@code COUNT}) + * OR count the number of distinct values + * for a field that matched ({@code COUNT(DISTINCT}. + */ +public class Count extends AggregateFunction { + + private final boolean distinct; + + public Count(Location location, Expression field, boolean distinct) { + super(location, field); + this.distinct = distinct; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Count::new, field(), distinct); + } + + @Override + public Count replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new Count(location(), newChildren.get(0), distinct); + } + + public boolean distinct() { + return distinct; + } + + @Override + public DataType dataType() { + return DataType.LONG; + } + + @Override + public String functionId() { + String functionId = id().toString(); + // if count works against a given expression, use its id (to identify the group) + if (field() instanceof NamedExpression) { + functionId = ((NamedExpression) field()).id().toString(); + } + return functionId; + } + + @Override + public AggregateFunctionAttribute toAttribute() { + return new AggregateFunctionAttribute(location(), name(), dataType(), id(), functionId(), "_count"); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/EnclosedAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/EnclosedAgg.java new file mode 100644 index 0000000000000..146cc68ba14a4 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/EnclosedAgg.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +// Agg 'enclosed' by another agg. Used for agg that return multiple embedded aggs (like MatrixStats) +public interface EnclosedAgg { + + String innerName(); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/ExtendedStats.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/ExtendedStats.java new file mode 100644 index 0000000000000..3f0555cd57751 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/ExtendedStats.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class ExtendedStats extends CompoundNumericAggregate { + + public ExtendedStats(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ExtendedStats::new, field()); + } + + @Override + public ExtendedStats replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new ExtendedStats(location(), newChildren.get(0)); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/ExtendedStatsEnclosed.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/ExtendedStatsEnclosed.java new file mode 100644 index 0000000000000..3e51fa4ef1adf --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/ExtendedStatsEnclosed.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +public interface ExtendedStatsEnclosed extends StatsEnclosed, EnclosedAgg { + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/InnerAggregate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/InnerAggregate.java new file mode 100644 index 0000000000000..b7fa7cfe3e5d2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/InnerAggregate.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.List; + +public class InnerAggregate extends AggregateFunction { + + private final AggregateFunction inner; + private final CompoundNumericAggregate outer; + private final String innerId; + // used when the result needs to be extracted from a map (like in MatrixAggs or Percentiles) + private final Expression innerKey; + + public InnerAggregate(AggregateFunction inner, CompoundNumericAggregate outer) { + this(inner.location(), inner, outer, null); + } + + public InnerAggregate(Location location, AggregateFunction inner, CompoundNumericAggregate outer, Expression innerKey) { + super(location, outer.field(), outer.arguments()); + this.inner = inner; + this.outer = outer; + this.innerId = ((EnclosedAgg) inner).innerName(); + this.innerKey = innerKey; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, InnerAggregate::new, inner, outer, innerKey); + } + + @Override + public Expression replaceChildren(List newChildren) { + /* I can't figure out how rewriting this one's children ever worked because its + * are all twisted up in `outer`. Refusing to rewrite it doesn't break anything + * that I can see right now so lets just go with it and hope for the best. + * Maybe someone will make this make sense one day! */ + throw new UnsupportedOperationException("can't be rewritten"); + } + + public AggregateFunction inner() { + return inner; + } + + public CompoundNumericAggregate outer() { + return outer; + } + + public String innerId() { + return innerId; + } + + public Expression innerKey() { + return innerKey; + } + + @Override + public DataType dataType() { + return inner.dataType(); + } + + @Override + public String functionId() { + return outer.id().toString(); + } + + @Override + public AggregateFunctionAttribute toAttribute() { + // this is highly correlated with QueryFolder$FoldAggregate#addFunction (regarding the function name within the querydsl) + return new AggregateFunctionAttribute(location(), name(), dataType(), outer.id(), functionId(), + aggMetricValue(functionId(), innerId)); + } + + public static String aggMetricValue(String aggPath, String valueName) { + // handle aggPath inconsistency (for percentiles and percentileRanks) percentile[99.9] (valid) vs percentile.99.9 (invalid) + return aggPath + "[" + valueName + "]"; + } + + @Override + public boolean functionEquals(Function f) { + if (super.equals(f)) { + InnerAggregate other = (InnerAggregate) f; + return inner.equals(other.inner) && outer.equals(other.outer); + } + return false; + } + + @Override + public String name() { + return inner.name(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Kurtosis.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Kurtosis.java new file mode 100644 index 0000000000000..5d46fa68f2bed --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Kurtosis.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class Kurtosis extends NumericAggregate implements MatrixStatsEnclosed { + + public Kurtosis(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Kurtosis::new, field()); + } + + @Override + public Kurtosis replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new Kurtosis(location(), newChildren.get(0)); + } + + @Override + public String innerName() { + return "kurtosis"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/MatrixStats.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/MatrixStats.java new file mode 100644 index 0000000000000..fa7697b55c363 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/MatrixStats.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class MatrixStats extends CompoundNumericAggregate { + + public MatrixStats(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MatrixStats::new, field()); + } + + @Override + public MatrixStats replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new MatrixStats(location(), newChildren.get(0)); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/MatrixStatsEnclosed.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/MatrixStatsEnclosed.java new file mode 100644 index 0000000000000..67d60ae1a640e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/MatrixStatsEnclosed.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +public interface MatrixStatsEnclosed extends EnclosedAgg { + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java new file mode 100644 index 0000000000000..6b710cf06d54f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Max.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * Find the maximum value in matching documents. + */ +public class Max extends NumericAggregate implements EnclosedAgg { + + public Max(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Max::new, field()); + } + + @Override + public Max replaceChildren(List newChildren) { + return new Max(location(), newChildren.get(0)); + } + + @Override + public DataType dataType() { + return field().dataType(); + } + + @Override + public String innerName() { + return "max"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java new file mode 100644 index 0000000000000..16adf6461e1bc --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Min.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * Find the minimum value in matched documents. + */ +public class Min extends NumericAggregate implements EnclosedAgg { + + public Min(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Min::new, field()); + } + + @Override + public Min replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new Min(location(), newChildren.get(0)); + } + + @Override + public DataType dataType() { + return field().dataType(); + } + + @Override + public String innerName() { + return "min"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java new file mode 100644 index 0000000000000..57fdc01e935d8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/NumericAggregate.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.List; + +abstract class NumericAggregate extends AggregateFunction { + + NumericAggregate(Location location, Expression field, List parameters) { + super(location, field, parameters); + } + + NumericAggregate(Location location, Expression field) { + super(location, field); + } + + @Override + protected TypeResolution resolveType() { + return Expressions.typeMustBeNumeric(field()); + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java new file mode 100644 index 0000000000000..a3293161e0879 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Foldables; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.List; + +import static java.util.Collections.singletonList; + +public class Percentile extends NumericAggregate implements EnclosedAgg { + + private final Expression percent; + + public Percentile(Location location, Expression field, Expression percent) { + super(location, field, singletonList(percent)); + this.percent = percent; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Percentile::new, field(), percent); + } + + @Override + public Percentile replaceChildren(List newChildren) { + if (newChildren.size() != 2) { + throw new IllegalArgumentException("expected [2] children but received [" + newChildren.size() + "]"); + } + return new Percentile(location(), newChildren.get(0), newChildren.get(1)); + } + + @Override + protected TypeResolution resolveType() { + TypeResolution resolution = super.resolveType(); + + if (TypeResolution.TYPE_RESOLVED.equals(resolution)) { + resolution = Expressions.typeMustBeNumeric(percent()); + } + + return resolution; + } + + public Expression percent() { + return percent; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + + @Override + public String innerName() { + return Double.toString(Foldables.doubleValueOf(percent)); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java new file mode 100644 index 0000000000000..dabe27a0caef4 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Foldables; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.List; + +import static java.util.Collections.singletonList; + +public class PercentileRank extends AggregateFunction implements EnclosedAgg { + + private final Expression value; + + public PercentileRank(Location location, Expression field, Expression value) { + super(location, field, singletonList(value)); + this.value = value; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, PercentileRank::new, field(), value); + } + + @Override + public Expression replaceChildren(List newChildren) { + if (newChildren.size() != 2) { + throw new IllegalArgumentException("expected [2] children but received [" + newChildren.size() + "]"); + } + return new PercentileRank(location(), newChildren.get(0), newChildren.get(1)); + } + + @Override + protected TypeResolution resolveType() { + TypeResolution resolution = super.resolveType(); + + if (TypeResolution.TYPE_RESOLVED.equals(resolution)) { + resolution = Expressions.typeMustBeNumeric(value); + } + + return resolution; + } + + public Expression value() { + return value; + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + + @Override + public String innerName() { + return Double.toString(Foldables.doubleValueOf(value)); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRanks.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRanks.java new file mode 100644 index 0000000000000..38c79ebd6210b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRanks.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import java.util.List; + +public class PercentileRanks extends CompoundNumericAggregate { + + private final List values; + + public PercentileRanks(Location location, Expression field, List values) { + super(location, field, values); + this.values = values; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, PercentileRanks::new, field(), values); + } + + @Override + public PercentileRanks replaceChildren(List newChildren) { + if (newChildren.size() < 2) { + throw new IllegalArgumentException("expected at least [2] children but received [" + newChildren.size() + "]"); + } + return new PercentileRanks(location(), newChildren.get(0), newChildren.subList(1, newChildren.size())); + } + + public List values() { + return values; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentiles.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentiles.java new file mode 100644 index 0000000000000..932a887806f23 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentiles.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import java.util.List; + +public class Percentiles extends CompoundNumericAggregate { + + private final List percents; + + public Percentiles(Location location, Expression field, List percents) { + super(location, field, percents); + this.percents = percents; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Percentiles::new, field(), percents); + } + + @Override + public Percentiles replaceChildren(List newChildren) { + if (newChildren.size() < 2) { + throw new IllegalArgumentException("expected more than one child but received [" + newChildren.size() + "]"); + } + return new Percentiles(location(), newChildren.get(0), newChildren.subList(1, newChildren.size())); + } + + public List percents() { + return percents; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Skewness.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Skewness.java new file mode 100644 index 0000000000000..d8514e40ede30 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Skewness.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class Skewness extends NumericAggregate implements MatrixStatsEnclosed { + + public Skewness(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Skewness::new, field()); + } + + @Override + public Skewness replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new Skewness(location(), newChildren.get(0)); + } + + @Override + public String innerName() { + return "skewness"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Stats.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Stats.java new file mode 100644 index 0000000000000..3bfddd9374e0d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Stats.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class Stats extends CompoundNumericAggregate { + + public Stats(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Stats::new, field()); + } + + @Override + public Stats replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new Stats(location(), newChildren.get(0)); + } + + public static boolean isTypeCompatible(Expression e) { + return e instanceof Min || e instanceof Max || e instanceof Avg || e instanceof Sum; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/StatsEnclosed.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/StatsEnclosed.java new file mode 100644 index 0000000000000..e68de8d602821 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/StatsEnclosed.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +public interface StatsEnclosed { + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/StddevPop.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/StddevPop.java new file mode 100644 index 0000000000000..acdfaecf55615 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/StddevPop.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class StddevPop extends NumericAggregate implements ExtendedStatsEnclosed { + + public StddevPop(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StddevPop::new, field()); + } + + @Override + public StddevPop replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new StddevPop(location(), newChildren.get(0)); + } + + @Override + public String innerName() { + return "std_deviation"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Sum.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Sum.java new file mode 100644 index 0000000000000..25861542509bf --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Sum.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * Sum all values of a field in matching documents. + */ +public class Sum extends NumericAggregate implements EnclosedAgg { + + public Sum(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Sum::new, field()); + } + + @Override + public Sum replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new Sum(location(), newChildren.get(0)); + } + + @Override + public DataType dataType() { + return field().dataType(); + } + + @Override + public String innerName() { + return "sum"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/SumOfSquares.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/SumOfSquares.java new file mode 100644 index 0000000000000..a52c279e83a5a --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/SumOfSquares.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class SumOfSquares extends NumericAggregate implements ExtendedStatsEnclosed { + + public SumOfSquares(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, SumOfSquares::new, field()); + } + + @Override + public SumOfSquares replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new SumOfSquares(location(), newChildren.get(0)); + } + + @Override + public String innerName() { + return "sum_of_squares"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/VarPop.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/VarPop.java new file mode 100644 index 0000000000000..0acfeba279da9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/VarPop.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.aggregate; + +import java.util.List; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class VarPop extends NumericAggregate implements ExtendedStatsEnclosed { + + public VarPop(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, VarPop::new, field()); + } + + @Override + public VarPop replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new VarPop(location(), newChildren.get(0)); + } + + @Override + public String innerName() { + return "variance"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/BinaryScalarFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/BinaryScalarFunction.java new file mode 100644 index 0000000000000..4e2882d46c19e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/BinaryScalarFunction.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Arrays; +import java.util.List; + +public abstract class BinaryScalarFunction extends ScalarFunction { + + private final Expression left, right; + + protected BinaryScalarFunction(Location location, Expression left, Expression right) { + super(location, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + @Override + public final BinaryScalarFunction replaceChildren(List newChildren) { + if (newChildren.size() != 2) { + throw new IllegalArgumentException("expected [2] children but received [" + newChildren.size() + "]"); + } + return replaceChildren(newChildren.get(0), newChildren.get(1)); + } + + protected abstract BinaryScalarFunction replaceChildren(Expression newLeft, Expression newRight); + + public Expression left() { + return left; + } + + public Expression right() { + return right; + } + + @Override + public boolean foldable() { + return left.foldable() && right.foldable(); + } + + @Override + public ScriptTemplate asScript() { + ScriptTemplate leftScript = asScript(left()); + ScriptTemplate rightScript = asScript(right()); + + return asScriptFrom(leftScript, rightScript); + } + + protected abstract ScriptTemplate asScriptFrom(ScriptTemplate leftScript, ScriptTemplate rightScript); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java new file mode 100644 index 0000000000000..4d68ad57cf931 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.Objects; + +public class Cast extends UnaryScalarFunction { + private final DataType dataType; + + public Cast(Location location, Expression field, DataType dataType) { + super(location, field); + this.dataType = dataType; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Cast::new, field(), dataType); + } + + @Override + protected UnaryScalarFunction replaceChild(Expression newChild) { + return new Cast(location(), newChild, dataType); + } + + public DataType from() { + return field().dataType(); + } + + public DataType to() { + return dataType; + } + + @Override + public DataType dataType() { + return dataType; + } + + @Override + public boolean foldable() { + return field().foldable(); + } + + @Override + public Object fold() { + return DataTypeConversion.convert(field().fold(), dataType); + } + + @Override + public boolean nullable() { + return field().nullable() || DataTypes.isNull(from()); + } + + @Override + protected TypeResolution resolveType() { + return DataTypeConversion.canConvert(from(), to()) ? + TypeResolution.TYPE_RESOLVED : + new TypeResolution("Cannot cast %s to %s", from(), to()); + } + + @Override + protected ScriptTemplate asScriptFrom(ScalarFunctionAttribute scalar) { + return scalar.script(); + } + + @Override + protected ScriptTemplate asScriptFrom(FieldAttribute field) { + return new ScriptTemplate(field.name(), Params.EMPTY, field.dataType()); + } + + @Override + protected ProcessorDefinition makeProcessorDefinition() { + return new UnaryProcessorDefinition(location(), this, ProcessorDefinitions.toProcessorDefinition(field()), + new CastProcessor(DataTypeConversion.conversionFor(from(), to()))); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), dataType); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Cast other = (Cast) obj; + return Objects.equals(dataType, other.dataType()) + && Objects.equals(field(), other.field()); + } + + @Override + public String toString() { + return functionName() + "(" + field().toString() + " AS " + to().sqlName() + ")#" + id(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessor.java new file mode 100644 index 0000000000000..f5fe541fb4683 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessor.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.type.DataTypeConversion.Conversion; + +import java.io.IOException; +import java.util.Objects; + +public class CastProcessor implements Processor { + + public static final String NAME = "ca"; + + private final Conversion conversion; + + public CastProcessor(Conversion conversion) { + this.conversion = conversion; + } + + public CastProcessor(StreamInput in) throws IOException { + conversion = in.readEnum(Conversion.class); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(conversion); + } + + @Override + public Object process(Object input) { + return conversion.convert(input); + } + + Conversion converter() { + return conversion; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + CastProcessor other = (CastProcessor) obj; + return Objects.equals(conversion, other.conversion); + } + + @Override + public int hashCode() { + return Objects.hash(conversion); + } + + @Override + public String toString() { + return conversion.name(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java new file mode 100644 index 0000000000000..2084ad684df67 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.UnaryArithmeticProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.BucketExtractorProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.HitExtractorProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.util.ArrayList; +import java.util.List; + +public final class Processors { + + private Processors() {} + + /** + * All of the named writeables needed to deserialize the instances of + * {@linkplain Processors}. + */ + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + // base + entries.add(new Entry(Processor.class, ConstantProcessor.NAME, ConstantProcessor::new)); + entries.add(new Entry(Processor.class, HitExtractorProcessor.NAME, HitExtractorProcessor::new)); + entries.add(new Entry(Processor.class, BucketExtractorProcessor.NAME, BucketExtractorProcessor::new)); + entries.add(new Entry(Processor.class, CastProcessor.NAME, CastProcessor::new)); + entries.add(new Entry(Processor.class, ChainingProcessor.NAME, ChainingProcessor::new)); + + // arithmetic + entries.add(new Entry(Processor.class, BinaryArithmeticProcessor.NAME, BinaryArithmeticProcessor::new)); + entries.add(new Entry(Processor.class, UnaryArithmeticProcessor.NAME, UnaryArithmeticProcessor::new)); + entries.add(new Entry(Processor.class, BinaryMathProcessor.NAME, BinaryMathProcessor::new)); + // datetime + entries.add(new Entry(Processor.class, DateTimeProcessor.NAME, DateTimeProcessor::new)); + // math + entries.add(new Entry(Processor.class, MathProcessor.NAME, MathProcessor::new)); + return entries; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java new file mode 100644 index 0000000000000..8462ee293cc48 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunction.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; + +/** + * A {@code ScalarFunction} is a {@code Function} that takes values from some + * operation and converts each to another value. An example would be + * {@code ABS()}, which takes one value at a time, applies a function to the + * value (abs) and returns a new value. + */ +public abstract class ScalarFunction extends Function { + + private ScalarFunctionAttribute lazyAttribute = null; + private ProcessorDefinition lazyProcessor = null; + + + protected ScalarFunction(Location location) { + super(location, emptyList()); + } + + protected ScalarFunction(Location location, List fields) { + super(location, fields); + } + + @Override + public final ScalarFunctionAttribute toAttribute() { + if (lazyAttribute == null) { + lazyAttribute = new ScalarFunctionAttribute(location(), name(), dataType(), id(), functionId(), asScript(), orderBy(), + asProcessorDefinition()); + } + return lazyAttribute; + } + + public abstract ScriptTemplate asScript(); + + // utility methods for creating the actual scripts + protected ScriptTemplate asScript(Expression exp) { + if (exp.foldable()) { + return asScriptFromFoldable(exp); + } + + Attribute attr = Expressions.attribute(exp); + if (attr != null) { + if (attr instanceof ScalarFunctionAttribute) { + return asScriptFrom((ScalarFunctionAttribute) attr); + } + if (attr instanceof AggregateFunctionAttribute) { + return asScriptFrom((AggregateFunctionAttribute) attr); + } + // fall-back to + return asScriptFrom((FieldAttribute) attr); + } + throw new SqlIllegalArgumentException("Cannot evaluate script for expression {}", exp); + } + + protected ScriptTemplate asScriptFrom(ScalarFunctionAttribute scalar) { + ScriptTemplate nested = scalar.script(); + Params p = paramsBuilder().script(nested.params()).build(); + return new ScriptTemplate(formatScript(nested.template()), p, dataType()); + } + + protected ScriptTemplate asScriptFromFoldable(Expression foldable) { + return new ScriptTemplate(formatScript("{}"), + paramsBuilder().variable(foldable.fold()).build(), + foldable.dataType()); + } + + protected ScriptTemplate asScriptFrom(FieldAttribute field) { + return new ScriptTemplate(formatScript("doc[{}].value"), + paramsBuilder().variable(field.name()).build(), + field.dataType()); + } + + protected ScriptTemplate asScriptFrom(AggregateFunctionAttribute aggregate) { + return new ScriptTemplate(formatScript("{}"), + paramsBuilder().agg(aggregate).build(), + aggregate.dataType()); + } + + protected String formatScript(String scriptTemplate) { + return formatTemplate(scriptTemplate); + } + + public ProcessorDefinition asProcessorDefinition() { + if (lazyProcessor == null) { + lazyProcessor = makeProcessorDefinition(); + } + return lazyProcessor; + } + + protected abstract ProcessorDefinition makeProcessorDefinition(); + + // used if the function is monotonic and thus does not have to be computed for ordering purposes + // null means the script needs to be used; expression means the field/expression to be used instead + public Expression orderBy() { + return null; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunctionAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunctionAttribute.java new file mode 100644 index 0000000000000..84ad136e95bcb --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/ScalarFunctionAttribute.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.function.FunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Objects; + +public class ScalarFunctionAttribute extends FunctionAttribute { + + private final ScriptTemplate script; + private final Expression orderBy; + private final ProcessorDefinition processorDef; + + ScalarFunctionAttribute(Location location, String name, DataType dataType, ExpressionId id, + String functionId, ScriptTemplate script, Expression orderBy, ProcessorDefinition processorDef) { + this(location, name, dataType, null, true, id, false, functionId, script, orderBy, processorDef); + } + + public ScalarFunctionAttribute(Location location, String name, DataType dataType, String qualifier, + boolean nullable, ExpressionId id, boolean synthetic, String functionId, ScriptTemplate script, + Expression orderBy, ProcessorDefinition processorDef) { + super(location, name, dataType, qualifier, nullable, id, synthetic, functionId); + + this.script = script; + this.orderBy = orderBy; + this.processorDef = processorDef; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ScalarFunctionAttribute::new, + name(), dataType(), qualifier(), nullable(), id(), synthetic(), + functionId(), script, orderBy, processorDef); + } + + public ScriptTemplate script() { + return script; + } + + public Expression orderBy() { + return orderBy; + } + + public ProcessorDefinition processorDef() { + return processorDef; + } + + @Override + protected Expression canonicalize() { + return new ScalarFunctionAttribute(location(), "", dataType(), null, true, id(), false, + functionId(), script, orderBy, processorDef); + } + + @Override + protected Attribute clone(Location location, String name, String qualifier, boolean nullable, ExpressionId id, boolean synthetic) { + return new ScalarFunctionAttribute(location, name, dataType(), qualifier, nullable, id, synthetic, + functionId(), script, orderBy, processorDef); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), orderBy); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(orderBy, ((ScalarFunctionAttribute) obj).orderBy()); + } + + @Override + protected String label() { + return "s->" + functionId(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/UnaryScalarFunction.java new file mode 100644 index 0000000000000..e4a0953c11544 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/UnaryScalarFunction.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.singletonList; + +import java.util.List; + +public abstract class UnaryScalarFunction extends ScalarFunction { + + private final Expression field; + + protected UnaryScalarFunction(Location location) { + super(location); + this.field = null; + } + + protected UnaryScalarFunction(Location location, Expression field) { + super(location, singletonList(field)); + this.field = field; + } + + @Override + public final UnaryScalarFunction replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return replaceChild(newChildren.get(0)); + } + protected abstract UnaryScalarFunction replaceChild(Expression newChild); + + public Expression field() { + return field; + } + + @Override + public boolean foldable() { + return field.foldable(); + } + + @Override + public ScriptTemplate asScript() { + return asScript(field); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Add.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Add.java new file mode 100644 index 0000000000000..92d6e5218ac9c --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Add.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Addition function ({@code a + b}). + */ +public class Add extends ArithmeticFunction { + public Add(Location location, Expression left, Expression right) { + super(location, left, right, BinaryArithmeticOperation.ADD); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Add::new, left(), right()); + } + + @Override + protected Add replaceChildren(Expression left, Expression right) { + return new Add(location(), left, right); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java new file mode 100644 index 0000000000000..5715e19963cbc --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/ArithmeticFunction.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryNumericFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; + +import java.util.Locale; + +import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; + +public abstract class ArithmeticFunction extends BinaryNumericFunction { + + private final BinaryArithmeticOperation operation; + + ArithmeticFunction(Location location, Expression left, Expression right, BinaryArithmeticOperation operation) { + super(location, left, right); + this.operation = operation; + } + + @Override + public BinaryArithmeticOperation operation() { + return operation; + } + + @Override + public DataType dataType() { + return DataTypeConversion.commonType(left().dataType(), right().dataType()); + } + + @Override + protected ScriptTemplate asScriptFrom(ScriptTemplate leftScript, ScriptTemplate rightScript) { + String op = operation.symbol(); + // escape % + if (operation == BinaryArithmeticOperation.MOD) { + op = "%" + op; + } + return new ScriptTemplate(format(Locale.ROOT, "(%s) %s (%s)", leftScript.template(), op, rightScript.template()), + paramsBuilder() + .script(leftScript.params()).script(rightScript.params()) + .build(), dataType()); + } + + @Override + protected ProcessorDefinition makeProcessorDefinition() { + return new BinaryArithmeticProcessorDefinition(location(), this, + ProcessorDefinitions.toProcessorDefinition(left()), + ProcessorDefinitions.toProcessorDefinition(right()), + operation); + } + + @Override + public String name() { + StringBuilder sb = new StringBuilder(); + sb.append("("); + sb.append(left()); + if (!(left() instanceof Literal)) { + sb.insert(1, "("); + sb.append(")"); + } + sb.append(" "); + sb.append(operation); + sb.append(" "); + int pos = sb.length(); + sb.append(right()); + if (!(right() instanceof Literal)) { + sb.insert(pos, "("); + sb.append(")"); + } + sb.append(")"); + return sb.toString(); + } + + @Override + public String toString() { + return name() + "#" + functionId(); + } + + protected boolean useParanthesis() { + return !(left() instanceof Literal) || !(right() instanceof Literal); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Arithmetics.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Arithmetics.java new file mode 100644 index 0000000000000..51cccb850666b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Arithmetics.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +/** + * Arithmetic operation using the type widening rules of the JLS 5.6.2 namely + * widen to double or float or long or int in this order. + */ +abstract class Arithmetics { + + static Number add(Number l, Number r) { + if (l == null || r == null) { + return null; + } + + if (l instanceof Double || r instanceof Double) { + return Double.valueOf(l.doubleValue() + r.doubleValue()); + } + if (l instanceof Float || r instanceof Float) { + return Float.valueOf(l.floatValue() + r.floatValue()); + } + if (l instanceof Long || r instanceof Long) { + return Long.valueOf(Math.addExact(l.longValue(), r.longValue())); + } + + return Integer.valueOf(Math.addExact(l.intValue(), r.intValue())); + } + + static Number sub(Number l, Number r) { + if (l == null || r == null) { + return null; + } + + if (l instanceof Double || r instanceof Double) { + return Double.valueOf(l.doubleValue() - r.doubleValue()); + } + if (l instanceof Float || r instanceof Float) { + return Float.valueOf(l.floatValue() - r.floatValue()); + } + if (l instanceof Long || r instanceof Long) { + return Long.valueOf(Math.subtractExact(l.longValue(), r.longValue())); + } + + return Integer.valueOf(Math.subtractExact(l.intValue(), r.intValue())); + } + + static Number mul(Number l, Number r) { + if (l == null || r == null) { + return null; + } + + if (l instanceof Double || r instanceof Double) { + return Double.valueOf(l.doubleValue() * r.doubleValue()); + } + if (l instanceof Float || r instanceof Float) { + return Float.valueOf(l.floatValue() * r.floatValue()); + } + if (l instanceof Long || r instanceof Long) { + return Long.valueOf(Math.multiplyExact(l.longValue(), r.longValue())); + } + + return Integer.valueOf(Math.multiplyExact(l.intValue(), r.intValue())); + } + + static Number div(Number l, Number r) { + if (l == null || r == null) { + return null; + } + + if (l instanceof Double || r instanceof Double) { + return l.doubleValue() / r.doubleValue(); + } + if (l instanceof Float || r instanceof Float) { + return l.floatValue() / r.floatValue(); + } + if (l instanceof Long || r instanceof Long) { + return l.longValue() / r.longValue(); + } + + return l.intValue() / r.intValue(); + } + + static Number mod(Number l, Number r) { + if (l == null || r == null) { + return null; + } + + if (l instanceof Long || r instanceof Long) { + return Long.valueOf(Math.floorMod(l.longValue(), r.longValue())); + } + if (l instanceof Double || r instanceof Double) { + return Double.valueOf(l.doubleValue() % r.doubleValue()); + } + if (l instanceof Float || r instanceof Float) { + return Float.valueOf(l.floatValue() % r.floatValue()); + } + + return Math.floorMod(l.intValue(), r.intValue()); + } + + static Number negate(Number n) { + if (n == null) { + return null; + } + + if (n instanceof Double) { + double d = n.doubleValue(); + if (d == Double.MIN_VALUE) { + throw new ArithmeticException("double overflow"); + } + return Double.valueOf(-n.doubleValue()); + } + if (n instanceof Float) { + float f = n.floatValue(); + if (f == Float.MIN_VALUE) { + throw new ArithmeticException("float overflow"); + } + return Float.valueOf(-n.floatValue()); + } + if (n instanceof Long) { + return Long.valueOf(Math.negateExact(n.longValue())); + } + + return Integer.valueOf(Math.negateExact(n.intValue())); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessor.java new file mode 100644 index 0000000000000..3f54004c1b0dc --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessor.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryNumericProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.io.IOException; +import java.util.function.BiFunction; + +public class BinaryArithmeticProcessor extends BinaryNumericProcessor { + + public enum BinaryArithmeticOperation implements BiFunction { + + ADD(Arithmetics::add, "+"), + SUB(Arithmetics::sub, "-"), + MUL(Arithmetics::mul, "*"), + DIV(Arithmetics::div, "/"), + MOD(Arithmetics::mod, "%"); + + private final BiFunction process; + private final String symbol; + + BinaryArithmeticOperation(BiFunction process, String symbol) { + this.process = process; + this.symbol = symbol; + } + + public String symbol() { + return symbol; + } + + @Override + public final Number apply(Number left, Number right) { + return process.apply(left, right); + } + + @Override + public String toString() { + return symbol; + } + } + + public static final String NAME = "ab"; + + public BinaryArithmeticProcessor(Processor left, Processor right, BinaryArithmeticOperation operation) { + super(left, right, operation); + } + + public BinaryArithmeticProcessor(StreamInput in) throws IOException { + super(in, i -> i.readEnum(BinaryArithmeticOperation.class)); + } + + @Override + protected void doWrite(StreamOutput out) throws IOException { + out.writeEnum(operation()); + } + + @Override + public String getWriteableName() { + return NAME; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessorDefinition.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessorDefinition.java new file mode 100644 index 0000000000000..b94a726290ed9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessorDefinition.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.BinaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import java.util.Objects; + +public class BinaryArithmeticProcessorDefinition extends BinaryProcessorDefinition { + + private final BinaryArithmeticOperation operation; + + public BinaryArithmeticProcessorDefinition(Location location, Expression expression, ProcessorDefinition left, + ProcessorDefinition right, BinaryArithmeticOperation operation) { + super(location, expression, left, right); + this.operation = operation; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, BinaryArithmeticProcessorDefinition::new, + expression(), left(), right(), operation); + } + + public BinaryArithmeticOperation operation() { + return operation; + } + + @Override + protected BinaryProcessorDefinition replaceChildren(ProcessorDefinition left, ProcessorDefinition right) { + return new BinaryArithmeticProcessorDefinition(location(), expression(), left, right, operation); + } + + @Override + public BinaryArithmeticProcessor asProcessor() { + return new BinaryArithmeticProcessor(left().asProcessor(), right().asProcessor(), operation); + } + + @Override + public int hashCode() { + return Objects.hash(left(), right(), operation); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BinaryArithmeticProcessorDefinition other = (BinaryArithmeticProcessorDefinition) obj; + return Objects.equals(operation, other.operation) + && Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Div.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Div.java new file mode 100644 index 0000000000000..fa3a82f3113fb --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Div.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.BinaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; + +/** + * Division function ({@code a / b}). + */ +public class Div extends ArithmeticFunction { + + public Div(Location location, Expression left, Expression right) { + super(location, left, right, BinaryArithmeticOperation.DIV); + } + + @Override + protected NodeInfo

info() { + return NodeInfo.create(this, Div::new, left(), right()); + } + + @Override + protected BinaryScalarFunction replaceChildren(Expression newLeft, Expression newRight) { + return new Div(location(), newLeft, newRight); + } + + @Override + public DataType dataType() { + return DataTypeConversion.commonType(left().dataType(), right().dataType()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Mod.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Mod.java new file mode 100644 index 0000000000000..7cdfa72a86c18 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Mod.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.BinaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Modulo + * function ({@code a % b}). + */ +public class Mod extends ArithmeticFunction { + + public Mod(Location location, Expression left, Expression right) { + super(location, left, right, BinaryArithmeticOperation.MOD); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Mod::new, left(), right()); + } + + @Override + protected BinaryScalarFunction replaceChildren(Expression newLeft, Expression newRight) { + return new Mod(location(), newLeft, newRight); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Mul.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Mul.java new file mode 100644 index 0000000000000..87d2574c26221 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Mul.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.BinaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Multiplication function ({@code a * b}). + */ +public class Mul extends ArithmeticFunction { + + public Mul(Location location, Expression left, Expression right) { + super(location, left, right, BinaryArithmeticOperation.MUL); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Mul::new, left(), right()); + } + + @Override + protected BinaryScalarFunction replaceChildren(Expression newLeft, Expression newRight) { + return new Mul(location(), newLeft, newRight); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Neg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Neg.java new file mode 100644 index 0000000000000..44cd51522b10c --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Neg.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.UnaryArithmeticProcessor.UnaryArithmeticOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * Negation function (@{code -x}). + */ +public class Neg extends UnaryScalarFunction { + public Neg(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Neg::new, field()); + } + + @Override + protected UnaryScalarFunction replaceChild(Expression newChild) { + return new Neg(location(), newChild); + } + + @Override + protected TypeResolution resolveType() { + return Expressions.typeMustBeNumeric(field()); + } + + @Override + public Object fold() { + return Arithmetics.negate((Number) field().fold()); + } + + @Override + public DataType dataType() { + return field().dataType(); + } + + @Override + public String name() { + return "-" + (field() instanceof NamedExpression && field().resolved() ? Expressions.name(field()) : field().toString()); + } + + @Override + protected String formatScript(String template) { + // Painless supports negating (and hopefully its corner cases) + return super.formatScript("-" + template); + } + + @Override + protected ProcessorDefinition makeProcessorDefinition() { + return new UnaryProcessorDefinition(location(), this, ProcessorDefinitions.toProcessorDefinition(field()), + new UnaryArithmeticProcessor(UnaryArithmeticOperation.NEGATE)); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Sub.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Sub.java new file mode 100644 index 0000000000000..bd36a8dd8430f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/Sub.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.BinaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Subtraction function ({@code a - b}). + */ +public class Sub extends ArithmeticFunction { + + public Sub(Location location, Expression left, Expression right) { + super(location, left, right, BinaryArithmeticOperation.SUB); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Sub::new, left(), right()); + } + + @Override + protected BinaryScalarFunction replaceChildren(Expression newLeft, Expression newRight) { + return new Sub(location(), newLeft, newRight); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/UnaryArithmeticProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/UnaryArithmeticProcessor.java new file mode 100644 index 0000000000000..d0da0f9e719ed --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/UnaryArithmeticProcessor.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.io.IOException; +import java.util.function.Function; + +public class UnaryArithmeticProcessor implements Processor { + + public enum UnaryArithmeticOperation { + + NEGATE(Arithmetics::negate); + + private final Function process; + + UnaryArithmeticOperation(Function process) { + this.process = process; + } + + public final Number apply(Number number) { + return process.apply(number); + } + + public String symbol() { + return "-"; + } + } + + public static final String NAME = "au"; + + private final UnaryArithmeticOperation operation; + + public UnaryArithmeticProcessor(UnaryArithmeticOperation operation) { + this.operation = operation; + } + + public UnaryArithmeticProcessor(StreamInput in) throws IOException { + operation = in.readEnum(UnaryArithmeticOperation.class); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(operation); + } + + @Override + public Object process(Object input) { + if (input == null) { + return null; + } + + if (input instanceof Number) { + return operation.apply((Number) input); + } + throw new SqlIllegalArgumentException("A number is required; received {}", input); + } + + + @Override + public String toString() { + return operation.symbol() + super.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java new file mode 100644 index 0000000000000..3b3717b1318df --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoField; +import java.util.Objects; +import java.util.TimeZone; + +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; + +public abstract class DateTimeFunction extends UnaryScalarFunction { + + private final TimeZone timeZone; + private final String name; + + DateTimeFunction(Location location, Expression field, TimeZone timeZone) { + super(location, field); + this.timeZone = timeZone; + + StringBuilder sb = new StringBuilder(super.name()); + // add timezone as last argument + sb.insert(sb.length() - 1, " [" + timeZone.getID() + "]"); + + this.name = sb.toString(); + } + + @Override + protected final NodeInfo info() { + return NodeInfo.create(this, ctorForInfo(), field(), timeZone()); + } + protected abstract NodeInfo.NodeCtor2 ctorForInfo(); + + public TimeZone timeZone() { + return timeZone; + } + + @Override + public boolean foldable() { + return field().foldable(); + } + + @Override + public Object fold() { + DateTime folded = (DateTime) field().fold(); + if (folded == null) { + return null; + } + + ZonedDateTime time = ZonedDateTime.ofInstant( + Instant.ofEpochMilli(folded.getMillis()), ZoneId.of(timeZone.getID())); + return time.get(chronoField()); + } + + @Override + protected TypeResolution resolveType() { + if (field().dataType() == DataType.DATE) { + return TypeResolution.TYPE_RESOLVED; + } + return new TypeResolution("Function [" + functionName() + "] cannot be applied on a non-date expression ([" + + Expressions.name(field()) + "] of type [" + field().dataType().esType + "])"); + } + + @Override + protected ScriptTemplate asScriptFrom(FieldAttribute field) { + ParamsBuilder params = paramsBuilder(); + + String template = null; + if (TimeZone.getTimeZone("UTC").equals(timeZone)) { + // TODO: it would be nice to be able to externalize the extract function and reuse the script across all extractors + template = formatTemplate("doc[{}].value.get" + extractFunction() + "()"); + params.variable(field.name()); + } else { + // TODO ewwww + /* + * This uses the Java 8 time API because Painless doesn't whitelist creation of new + * Joda classes. + * + * The actual script is + * ZonedDateTime.ofInstant(Instant.ofEpochMilli(.value.millis), + * ZoneId.of()).get(ChronoField.get(MONTH_OF_YEAR)) + */ + + template = formatTemplate("ZonedDateTime.ofInstant(Instant.ofEpochMilli(doc[{}].value.millis), " + + "ZoneId.of({})).get(ChronoField.valueOf({}))"); + params.variable(field.name()) + .variable(timeZone.getID()) + .variable(chronoField().name()); + } + + return new ScriptTemplate(template, params.build(), dataType()); + } + + + @Override + protected ScriptTemplate asScriptFrom(AggregateFunctionAttribute aggregate) { + throw new UnsupportedOperationException(); + } + + protected String extractFunction() { + return getClass().getSimpleName(); + } + + /** + * Used for generating the painless script version of this function when the time zone is not UTC + */ + protected abstract ChronoField chronoField(); + + @Override + protected final ProcessorDefinition makeProcessorDefinition() { + return new UnaryProcessorDefinition(location(), this, ProcessorDefinitions.toProcessorDefinition(field()), + new DateTimeProcessor(extractor(), timeZone)); + } + + protected abstract DateTimeExtractor extractor(); + + @Override + public DataType dataType() { + return DataType.INTEGER; + } + + // used for applying ranges + public abstract String dateTimeFormat(); + + // add tz along the rest of the params + @Override + public String name() { + return name; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + DateTimeFunction other = (DateTimeFunction) obj; + return Objects.equals(other.field(), field()) + && Objects.equals(other.timeZone, timeZone); + } + + @Override + public int hashCode() { + return Objects.hash(field(), timeZone); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java new file mode 100644 index 0000000000000..bb5aaea61fb3f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeHistogramFunction.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.TimeZone; + +/** + * DateTimeFunctions that can be mapped as histogram. This means the dates order is maintained + * Unfortunately this means only YEAR works since everything else changes the order + */ +public abstract class DateTimeHistogramFunction extends DateTimeFunction { + + DateTimeHistogramFunction(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + /** + * used for aggregration (date histogram) + */ + public abstract String interval(); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java new file mode 100644 index 0000000000000..6aa6b6a50e9bc --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessor.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.joda.time.DateTimeFieldType; +import org.joda.time.DateTimeZone; +import org.joda.time.ReadableDateTime; + +import java.io.IOException; +import java.util.Objects; +import java.util.TimeZone; + +public class DateTimeProcessor implements Processor { + + public enum DateTimeExtractor { + DAY_OF_MONTH(DateTimeFieldType.dayOfMonth()), + DAY_OF_WEEK(DateTimeFieldType.dayOfWeek()), + DAY_OF_YEAR(DateTimeFieldType.dayOfYear()), + HOUR_OF_DAY(DateTimeFieldType.hourOfDay()), + MINUTE_OF_DAY(DateTimeFieldType.minuteOfDay()), + MINUTE_OF_HOUR(DateTimeFieldType.minuteOfHour()), + MONTH_OF_YEAR(DateTimeFieldType.monthOfYear()), + SECOND_OF_MINUTE(DateTimeFieldType.secondOfMinute()), + WEEK_OF_YEAR(DateTimeFieldType.weekOfWeekyear()), + YEAR(DateTimeFieldType.year()); + + private final DateTimeFieldType field; + + DateTimeExtractor(DateTimeFieldType field) { + this.field = field; + } + + public int extract(ReadableDateTime dt) { + return dt.get(field); + } + } + + public static final String NAME = "dt"; + + private final DateTimeExtractor extractor; + private final TimeZone timeZone; + + public DateTimeProcessor(DateTimeExtractor extractor, TimeZone timeZone) { + this.extractor = extractor; + this.timeZone = timeZone; + } + + public DateTimeProcessor(StreamInput in) throws IOException { + extractor = in.readEnum(DateTimeExtractor.class); + timeZone = TimeZone.getTimeZone(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(extractor); + out.writeString(timeZone.getID()); + } + + @Override + public String getWriteableName() { + return NAME; + } + + DateTimeExtractor extractor() { + return extractor; + } + + @Override + public Object process(Object l) { + if (l == null) { + return null; + } + + if (!(l instanceof ReadableDateTime)) { + throw new SqlIllegalArgumentException("A date/time is required; received {}", l); + } + + ReadableDateTime dt = (ReadableDateTime) l; + + if (!TimeZone.getTimeZone("UTC").equals(timeZone)) { + dt = dt.toDateTime().withZone(DateTimeZone.forTimeZone(timeZone)); + } + return extractor.extract(dt); + } + + @Override + public int hashCode() { + return Objects.hash(extractor, timeZone); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + DateTimeProcessor other = (DateTimeProcessor) obj; + return Objects.equals(extractor, other.extractor) + && Objects.equals(timeZone, other.timeZone); + } + + @Override + public String toString() { + return extractor.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java new file mode 100644 index 0000000000000..1ac3771d49db1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfMonth.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.time.temporal.ChronoField; +import java.util.TimeZone; + +/** + * Extract the day of the month from a datetime. + */ +public class DayOfMonth extends DateTimeFunction { + public DayOfMonth(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return DayOfMonth::new; + } + + @Override + protected DayOfMonth replaceChild(Expression newChild) { + return new DayOfMonth(location(), newChild, timeZone()); + } + + @Override + public String dateTimeFormat() { + return "d"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.DAY_OF_MONTH; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.DAY_OF_MONTH; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java new file mode 100644 index 0000000000000..7582ece6250bd --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfWeek.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.time.temporal.ChronoField; +import java.util.TimeZone; + +/** + * Extract the day of the week from a datetime. 1 is Monday, 2 is Tuesday, etc. + */ +public class DayOfWeek extends DateTimeFunction { + public DayOfWeek(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return DayOfWeek::new; + } + + @Override + protected DayOfWeek replaceChild(Expression newChild) { + return new DayOfWeek(location(), newChild, timeZone()); + } + + @Override + public String dateTimeFormat() { + return "e"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.DAY_OF_WEEK; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.DAY_OF_WEEK; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java new file mode 100644 index 0000000000000..8f5e06188327d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYear.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.time.temporal.ChronoField; +import java.util.TimeZone; + +/** + * Extract the day of the year from a datetime. + */ +public class DayOfYear extends DateTimeFunction { + public DayOfYear(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return DayOfYear::new; + } + + @Override + protected UnaryScalarFunction replaceChild(Expression newChild) { + return new DayOfYear(location(), newChild, timeZone()); + } + + @Override + public String dateTimeFormat() { + return "D"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.DAY_OF_YEAR; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.DAY_OF_YEAR; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java new file mode 100644 index 0000000000000..eaa5a862ab0c0 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/HourOfDay.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; +import org.joda.time.DateTimeZone; + +import java.time.temporal.ChronoField; +import java.util.TimeZone; + +/** + * Extract the hour of the day from a datetime. + */ +public class HourOfDay extends DateTimeFunction { + public HourOfDay(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return HourOfDay::new; + } + + @Override + protected HourOfDay replaceChild(Expression newChild) { + return new HourOfDay(location(), newChild, timeZone()); + } + + @Override + public String dateTimeFormat() { + return "hour"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.HOUR_OF_DAY; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.HOUR_OF_DAY; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java new file mode 100644 index 0000000000000..2840fa0c21b85 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfDay.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.time.temporal.ChronoField; +import java.util.TimeZone; + +/** + * Extract the minute of the day from a datetime. + */ +public class MinuteOfDay extends DateTimeFunction { + + public MinuteOfDay(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return MinuteOfDay::new; + } + + @Override + protected MinuteOfDay replaceChild(Expression newChild) { + return new MinuteOfDay(location(), newChild, timeZone()); + } + + @Override + public String dateTimeFormat() { + throw new UnsupportedOperationException("is there a format for it?"); + } + + @Override + protected ChronoField chronoField() { + return ChronoField.MINUTE_OF_DAY; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.MINUTE_OF_DAY; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java new file mode 100644 index 0000000000000..d577bb916966a --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MinuteOfHour.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.time.temporal.ChronoField; +import java.util.TimeZone; + +/** + * Exract the minute of the hour from a datetime. + */ +public class MinuteOfHour extends DateTimeFunction { + public MinuteOfHour(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return MinuteOfHour::new; + } + + @Override + protected MinuteOfHour replaceChild(Expression newChild) { + return new MinuteOfHour(location(), newChild, timeZone()); + } + + @Override + public String dateTimeFormat() { + return "m"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.MINUTE_OF_HOUR; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.MINUTE_OF_HOUR; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java new file mode 100644 index 0000000000000..3a2d51bee78ad --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/MonthOfYear.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.time.temporal.ChronoField; +import java.util.TimeZone; + +/** + * Extract the month of the year from a datetime. + */ +public class MonthOfYear extends DateTimeFunction { + public MonthOfYear(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return MonthOfYear::new; + } + + @Override + protected MonthOfYear replaceChild(Expression newChild) { + return new MonthOfYear(location(), newChild, timeZone()); + } + + @Override + public String dateTimeFormat() { + return "M"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.MONTH_OF_YEAR; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.MONTH_OF_YEAR; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java new file mode 100644 index 0000000000000..883502c017da5 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/SecondOfMinute.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.time.temporal.ChronoField; +import java.util.TimeZone; + +/** + * Extract the second of the minute from a datetime. + */ +public class SecondOfMinute extends DateTimeFunction { + public SecondOfMinute(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return SecondOfMinute::new; + } + + @Override + protected SecondOfMinute replaceChild(Expression newChild) { + return new SecondOfMinute(location(), newChild, timeZone()); + } + + @Override + public String dateTimeFormat() { + return "s"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.SECOND_OF_MINUTE; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.SECOND_OF_MINUTE; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java new file mode 100644 index 0000000000000..eef2c48ad0f72 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/WeekOfYear.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.time.temporal.ChronoField; +import java.util.TimeZone; + +/** + * Extract the week of the year from a datetime. + */ +public class WeekOfYear extends DateTimeFunction { + public WeekOfYear(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return WeekOfYear::new; + } + + @Override + protected WeekOfYear replaceChild(Expression newChild) { + return new WeekOfYear(location(), newChild, timeZone()); + } + + @Override + public String dateTimeFormat() { + return "w"; + } + + @Override + protected ChronoField chronoField() { + return ChronoField.ALIGNED_WEEK_OF_YEAR; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.WEEK_OF_YEAR; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java new file mode 100644 index 0000000000000..28d475e4c7085 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Year.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2; + +import java.time.temporal.ChronoField; +import java.util.TimeZone; + +/** + * Extract the year from a datetime. + */ +public class Year extends DateTimeHistogramFunction { + public Year(Location location, Expression field, TimeZone timeZone) { + super(location, field, timeZone); + } + + @Override + protected NodeCtor2 ctorForInfo() { + return Year::new; + } + + @Override + protected Year replaceChild(Expression newChild) { + return new Year(location(), newChild, timeZone()); + } + + @Override + public String dateTimeFormat() { + return "year"; + } + + @Override + public Expression orderBy() { + return field(); + } + + @Override + protected ChronoField chronoField() { + return ChronoField.YEAR; + } + + @Override + protected DateTimeExtractor extractor() { + return DateTimeExtractor.YEAR; + } + + @Override + public String interval() { + return "year"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ACos.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ACos.java new file mode 100644 index 0000000000000..d4e01329cd33e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ACos.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + + +/** + * Arc cosine + * function. + */ +public class ACos extends MathFunction { + public ACos(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ACos::new, field()); + } + + @Override + protected ACos replaceChild(Expression newChild) { + return new ACos(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.ACOS; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ASin.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ASin.java new file mode 100644 index 0000000000000..26362af968fac --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ASin.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Arc sine + * function. + */ +public class ASin extends MathFunction { + public ASin(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ASin::new, field()); + } + + @Override + protected ASin replaceChild(Expression newChild) { + return new ASin(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.ASIN; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ATan.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ATan.java new file mode 100644 index 0000000000000..ae64ab2e36456 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ATan.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Arc tangent + * function. + */ +public class ATan extends MathFunction { + public ATan(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ATan::new, field()); + } + + @Override + protected ATan replaceChild(Expression newChild) { + return new ATan(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.ATAN; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ATan2.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ATan2.java new file mode 100644 index 0000000000000..1a86a44d32b13 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/ATan2.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor.BinaryMathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.function.BiFunction; + +/** + * Multi-valued inverse tangent + * function. + */ +public class ATan2 extends BinaryNumericFunction { + + public ATan2(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + protected BiFunction operation() { + return BinaryMathOperation.ATAN2; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ATan2::new, left(), right()); + } + + @Override + protected ATan2 replaceChildren(Expression newLeft, Expression newRight) { + return new ATan2(location(), newLeft, newRight); + } + + @Override + protected ProcessorDefinition makeProcessorDefinition() { + return new BinaryMathProcessorDefinition(location(), this, + ProcessorDefinitions.toProcessorDefinition(left()), + ProcessorDefinitions.toProcessorDefinition(right()), + BinaryMathOperation.ATAN2); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Abs.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Abs.java new file mode 100644 index 0000000000000..def66bfe4a887 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Abs.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * Absolute value + * function. + */ +public class Abs extends MathFunction { + public Abs(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Abs::new, field()); + } + + @Override + protected Abs replaceChild(Expression newChild) { + return new Abs(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.ABS; + } + + @Override + public DataType dataType() { + return field().dataType(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessor.java new file mode 100644 index 0000000000000..fca6aa5023d95 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessor.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor.BinaryMathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.io.IOException; +import java.util.function.BiFunction; + +/** + * Binary math operations. Sister class to {@link MathOperation}. + */ +public class BinaryMathProcessor extends BinaryNumericProcessor { + + public enum BinaryMathOperation implements BiFunction { + + ATAN2((l, r) -> Math.atan2(l.doubleValue(), r.doubleValue())), + POWER((l, r) -> Math.pow(l.doubleValue(), r.doubleValue())); + + private final BiFunction process; + + BinaryMathOperation(BiFunction process) { + this.process = process; + } + + @Override + public final Number apply(Number left, Number right) { + return process.apply(left, right); + } + } + + public static final String NAME = "mb"; + + public BinaryMathProcessor(Processor left, Processor right, BinaryMathOperation operation) { + super(left, right, operation); + } + + public BinaryMathProcessor(StreamInput in) throws IOException { + super(in, i -> i.readEnum(BinaryMathOperation.class)); + } + + @Override + protected void doWrite(StreamOutput out) throws IOException { + out.writeEnum(operation()); + } + + @Override + public String getWriteableName() { + return NAME; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorDefinition.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorDefinition.java new file mode 100644 index 0000000000000..482029a8c1673 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorDefinition.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor.BinaryMathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.BinaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.Objects; + +/** + * Processor definition for math operations requiring two arguments. + */ +public class BinaryMathProcessorDefinition extends BinaryProcessorDefinition { + + private final BinaryMathOperation operation; + + public BinaryMathProcessorDefinition(Location location, Expression expression, ProcessorDefinition left, + ProcessorDefinition right, BinaryMathOperation operation) { + super(location, expression, left, right); + this.operation = operation; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, BinaryMathProcessorDefinition::new, expression(), left(), right(), operation); + } + + public BinaryMathOperation operation() { + return operation; + } + + @Override + protected BinaryProcessorDefinition replaceChildren(ProcessorDefinition left, ProcessorDefinition right) { + return new BinaryMathProcessorDefinition(location(), expression(), left, right, operation); + } + + @Override + public BinaryMathProcessor asProcessor() { + return new BinaryMathProcessor(left().asProcessor(), right().asProcessor(), operation); + } + + @Override + public int hashCode() { + return Objects.hash(left(), right(), operation); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BinaryMathProcessorDefinition other = (BinaryMathProcessorDefinition) obj; + return Objects.equals(operation, other.operation) + && Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java new file mode 100644 index 0000000000000..14675270f9f07 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericFunction.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.BinaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Locale; +import java.util.Objects; +import java.util.function.BiFunction; + +import static java.lang.String.format; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; + +public abstract class BinaryNumericFunction extends BinaryScalarFunction { + + protected BinaryNumericFunction(Location location, Expression left, Expression right) { + super(location, left, right); + } + + protected abstract BiFunction operation(); + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + + @Override + protected TypeResolution resolveType() { + if (!childrenResolved()) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = resolveInputType(left().dataType()); + + if (resolution == TypeResolution.TYPE_RESOLVED) { + return resolveInputType(right().dataType()); + } + return resolution; + } + + protected TypeResolution resolveInputType(DataType inputType) { + return inputType.isNumeric() ? + TypeResolution.TYPE_RESOLVED : + new TypeResolution("'%s' requires a numeric type, received %s", mathFunction(), inputType.esType); + } + + @Override + public Object fold() { + return operation().apply((Number) left().fold(), (Number) right().fold()); + } + + @Override + protected ScriptTemplate asScriptFrom(ScriptTemplate leftScript, ScriptTemplate rightScript) { + return new ScriptTemplate(format(Locale.ROOT, "Math.%s(%s,%s)", mathFunction(), leftScript.template(), rightScript.template()), + paramsBuilder() + .script(leftScript.params()).script(rightScript.params()) + .build(), dataType()); + } + + protected String mathFunction() { + return getClass().getSimpleName().toLowerCase(Locale.ROOT); + } + + @Override + public int hashCode() { + return Objects.hash(left(), right(), operation()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + BinaryNumericFunction other = (BinaryNumericFunction) obj; + return Objects.equals(other.left(), left()) + && Objects.equals(other.right(), right()) + && Objects.equals(other.operation(), operation()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericProcessor.java new file mode 100644 index 0000000000000..3acc1cabf2bd7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryNumericProcessor.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.BinaryProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; +import java.util.function.BiFunction; + +public abstract class BinaryNumericProcessor & BiFunction> extends BinaryProcessor { + + private final O operation; + + protected BinaryNumericProcessor(Processor left, Processor right, O operation) { + super(left, right); + this.operation = operation; + } + + protected BinaryNumericProcessor(StreamInput in, Reader reader) throws IOException { + super(in); + operation = reader.read(in); + } + + protected O operation() { + return operation; + } + + @Override + protected Object doProcess(Object left, Object right) { + if (left == null || right == null) { + return null; + } + if (!(left instanceof Number)) { + throw new SqlIllegalArgumentException("A number is required; received {}", left); + } + if (!(right instanceof Number)) { + throw new SqlIllegalArgumentException("A number is required; received {}", right); + } + + return operation.apply((Number) left, (Number) right); + } + + @Override + public int hashCode() { + return Objects.hash(operation); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BinaryNumericProcessor other = (BinaryNumericProcessor) obj; + return Objects.equals(operation, other.operation) + && Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } + + @Override + public String toString() { + return String.format(Locale.ROOT, "(%s %s %s)", left(), operation, right()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cbrt.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cbrt.java new file mode 100644 index 0000000000000..323e343d97cdc --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cbrt.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Cube root + * function. + */ +public class Cbrt extends MathFunction { + public Cbrt(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Cbrt::new, field()); + } + + @Override + protected Cbrt replaceChild(Expression newChild) { + return new Cbrt(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.CBRT; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Ceil.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Ceil.java new file mode 100644 index 0000000000000..0203a1374662d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Ceil.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; + +/** + * Ceiling + * function. + */ +public class Ceil extends MathFunction { + public Ceil(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Ceil::new, field()); + } + + @Override + protected Ceil replaceChild(Expression newChild) { + return new Ceil(location(), newChild); + } + + @Override + public Number fold() { + return DataTypeConversion.toInteger((double) super.fold(), dataType()); + } + + @Override + protected MathOperation operation() { + return MathOperation.CEIL; + } + + @Override + public DataType dataType() { + return DataTypeConversion.asInteger(field().dataType()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cos.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cos.java new file mode 100644 index 0000000000000..5458caf552108 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cos.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Cosine + * function. + */ +public class Cos extends MathFunction { + public Cos(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Cos::new, field()); + } + + @Override + protected Cos replaceChild(Expression newChild) { + return new Cos(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.COS; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cosh.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cosh.java new file mode 100644 index 0000000000000..77df82212185e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cosh.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Hyperbolic cosine + * function. + */ +public class Cosh extends MathFunction { + public Cosh(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Cosh::new, field()); + } + + @Override + protected Cosh replaceChild(Expression newChild) { + return new Cosh(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.COSH; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cot.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cot.java new file mode 100644 index 0000000000000..5bb4e0630bb49 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Cot.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.Locale; + +import static java.lang.String.format; + +/** + * Cotangent + * function. + */ +public class Cot extends MathFunction { + public Cot(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Cot::new, field()); + } + + @Override + protected Cot replaceChild(Expression newChild) { + return new Cot(location(), newChild); + } + + @Override + protected String formatScript(String template) { + return super.formatScript(format(Locale.ROOT, "1.0 / Math.tan(%s)", template)); + } + + @Override + protected MathOperation operation() { + return MathOperation.COT; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Degrees.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Degrees.java new file mode 100644 index 0000000000000..138fda5bcaeb4 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Degrees.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Convert from radians + * to degrees. + */ +public class Degrees extends MathFunction { + public Degrees(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Degrees::new, field()); + } + + @Override + protected Degrees replaceChild(Expression newChild) { + return new Degrees(location(), newChild); + } + + @Override + protected String mathFunction() { + return "toDegrees"; + } + + @Override + protected MathOperation operation() { + return MathOperation.DEGREES; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java new file mode 100644 index 0000000000000..921b6edaef632 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/E.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.StringUtils; + +public class E extends MathFunction { + + private static final ScriptTemplate TEMPLATE = new ScriptTemplate("Math.E", Params.EMPTY, DataType.DOUBLE); + + public E(Location location) { + super(location, new Literal(location, Math.E, DataType.DOUBLE)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this); + } + + @Override + protected E replaceChild(Expression field) { + throw new UnsupportedOperationException("this node doesn't have any children"); + } + + @Override + public Object fold() { + return Math.E; + } + + @Override + protected String functionArgs() { + return StringUtils.EMPTY; + } + + @Override + public ScriptTemplate asScript() { + return TEMPLATE; + } + + @Override + protected MathOperation operation() { + return MathOperation.E; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Exp.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Exp.java new file mode 100644 index 0000000000000..b8ec51ca27d2e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Exp.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * ex + * function. + */ +public class Exp extends MathFunction { + public Exp(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Exp::new, field()); + } + + @Override + protected Exp replaceChild(Expression newChild) { + return new Exp(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.EXP; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Expm1.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Expm1.java new file mode 100644 index 0000000000000..3a844f8c39b2b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Expm1.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * ex + 1 + * function. + */ +public class Expm1 extends MathFunction { + public Expm1(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Expm1::new, field()); + } + + @Override + protected Expm1 replaceChild(Expression newChild) { + return new Expm1(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.EXPM1; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Floor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Floor.java new file mode 100644 index 0000000000000..7548b03f78412 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Floor.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; + +/** + * Floor + * function. + */ +public class Floor extends MathFunction { + public Floor(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Floor::new, field()); + } + + @Override + protected Floor replaceChild(Expression newChild) { + return new Floor(location(), newChild); + } + + @Override + public Object fold() { + return DataTypeConversion.toInteger((double) super.fold(), dataType()); + } + + @Override + protected MathOperation operation() { + return MathOperation.FLOOR; + } + + @Override + public DataType dataType() { + return DataTypeConversion.asInteger(field().dataType()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Log.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Log.java new file mode 100644 index 0000000000000..0202f61a7abb3 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Log.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Natural logarithm + * function. + */ +public class Log extends MathFunction { + public Log(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Log::new, field()); + } + + @Override + protected Log replaceChild(Expression newChild) { + return new Log(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.LOG; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Log10.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Log10.java new file mode 100644 index 0000000000000..0005488c619c5 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Log10.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Logarithm + * base 10 function. + */ +public class Log10 extends MathFunction { + public Log10(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Log10::new, field()); + } + + @Override + protected Log10 replaceChild(Expression newChild) { + return new Log10(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.LOG10; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java new file mode 100644 index 0000000000000..c50b7243f10ec --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Locale; +import java.util.Objects; + +import static java.lang.String.format; + +public abstract class MathFunction extends UnaryScalarFunction { + + protected MathFunction(Location location) { + super(location); + } + + protected MathFunction(Location location, Expression field) { + super(location, field); + } + + @Override + public boolean foldable() { + return field().foldable(); + } + + @Override + public Object fold() { + return operation().apply(field().fold()); + } + + @Override + protected String formatScript(String template) { + return super.formatScript(format(Locale.ROOT, "Math.%s(%s)", mathFunction(), template)); + } + + protected String mathFunction() { + return getClass().getSimpleName().toLowerCase(Locale.ROOT); + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + + @Override + protected TypeResolution resolveType() { + if (!childrenResolved()) { + return new TypeResolution("Unresolved children"); + } + + return field().dataType().isNumeric() ? TypeResolution.TYPE_RESOLVED + : new TypeResolution("'%s' requires a numeric type, received %s", operation(), field().dataType().esType); + } + + @Override + protected final ProcessorDefinition makeProcessorDefinition() { + return new UnaryProcessorDefinition(location(), this, + ProcessorDefinitions.toProcessorDefinition(field()), new MathProcessor(operation())); + } + + protected abstract MathOperation operation(); + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MathFunction other = (MathFunction) obj; + return Objects.equals(other.field(), field()); + } + + @Override + public int hashCode() { + return Objects.hash(field()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathProcessor.java new file mode 100644 index 0000000000000..b9bf56f33a4da --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathProcessor.java @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.io.IOException; +import java.util.Random; +import java.util.function.DoubleFunction; +import java.util.function.Function; +import java.util.function.Supplier; + +public class MathProcessor implements Processor { + + public enum MathOperation { + ABS((Object l) -> { + if (l instanceof Float) { + return Math.abs(((Float) l).floatValue()); + } + if (l instanceof Double) { + return Math.abs(((Double) l).doubleValue()); + } + long lo = ((Number) l).longValue(); + //handles the corner-case of Long.MIN_VALUE + return lo >= 0 ? lo : lo == Long.MIN_VALUE ? Long.MAX_VALUE : -lo; + }), + + ACOS(Math::acos), + ASIN(Math::asin), + ATAN(Math::atan), + CBRT(Math::cbrt), + CEIL(Math::ceil), + COS(Math::cos), + COSH(Math::cosh), + COT((Object l) -> 1.0d / Math.tan(((Number) l).doubleValue())), + DEGREES(Math::toDegrees), + E(() -> Math.E), + EXP(Math::exp), + EXPM1(Math::expm1), + FLOOR(Math::floor), + LOG(Math::log), + LOG10(Math::log10), + PI(() -> Math.PI), + RADIANS(Math::toRadians), + RANDOM((Object l) -> l != null ? + new Random(((Number) l).longValue()).nextDouble() : + Randomness.get().nextDouble(), true), + ROUND((DoubleFunction) Math::round), + SIGN((DoubleFunction) Math::signum), + SIN(Math::sin), + SINH(Math::sinh), + SQRT(Math::sqrt), + TAN(Math::tan); + + private final Function apply; + + MathOperation(Function apply) { + this(apply, false); + } + + /** + * Wrapper for nulls around the given function. + * If true, nulls are passed through, otherwise the function is short-circuited + * and null returned. + */ + MathOperation(Function apply, boolean nullAware) { + if (nullAware) { + this.apply = apply; + } else { + this.apply = l -> l == null ? null : apply.apply(l); + } + } + + MathOperation(DoubleFunction apply) { + this.apply = (Object l) -> l == null ? null : apply.apply(((Number) l).doubleValue()); + } + + MathOperation(Supplier supplier) { + this.apply = l -> supplier.get(); + } + + public final Object apply(Object l) { + return apply.apply(l); + } + } + + public static final String NAME = "m"; + + private final MathOperation processor; + + public MathProcessor(MathOperation processor) { + this.processor = processor; + } + + public MathProcessor(StreamInput in) throws IOException { + processor = in.readEnum(MathOperation.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(processor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object process(Object input) { + if (input != null && !(input instanceof Number)) { + throw new SqlIllegalArgumentException("A number is required; received [{}]", input); + } + + return processor.apply(input); + } + + MathOperation processor() { + return processor; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + MathProcessor other = (MathProcessor) obj; + return processor == other.processor; + } + + @Override + public int hashCode() { + return processor.hashCode(); + } + + @Override + public String toString() { + return processor.toString(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java new file mode 100644 index 0000000000000..9758843ee5d52 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Pi.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.StringUtils; + +public class Pi extends MathFunction { + + private static final ScriptTemplate TEMPLATE = new ScriptTemplate("Math.PI", Params.EMPTY, DataType.DOUBLE); + + public Pi(Location location) { + super(location, new Literal(location, Math.PI, DataType.DOUBLE)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this); + } + + @Override + protected Pi replaceChild(Expression field) { + throw new UnsupportedOperationException("this node doesn't have any children"); + } + + @Override + public Object fold() { + return Math.PI; + } + + @Override + protected String functionArgs() { + return StringUtils.EMPTY; + } + + @Override + public ScriptTemplate asScript() { + return TEMPLATE; + } + + @Override + protected MathOperation operation() { + return MathOperation.PI; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Power.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Power.java new file mode 100644 index 0000000000000..d38d7067cafb5 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Power.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor.BinaryMathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.function.BiFunction; + +public class Power extends BinaryNumericFunction { + + public Power(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + protected BiFunction operation() { + return BinaryMathOperation.POWER; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Power::new, left(), right()); + } + + @Override + protected Power replaceChildren(Expression newLeft, Expression newRight) { + return new Power(location(), newLeft, newRight); + } + + @Override + protected ProcessorDefinition makeProcessorDefinition() { + return new BinaryMathProcessorDefinition(location(), this, + ProcessorDefinitions.toProcessorDefinition(left()), + ProcessorDefinitions.toProcessorDefinition(right()), + BinaryMathOperation.POWER); + } + + @Override + protected String mathFunction() { + return "pow"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Radians.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Radians.java new file mode 100644 index 0000000000000..290600d1d999b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Radians.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Convert from degrees + * to radians. + */ +public class Radians extends MathFunction { + public Radians(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Radians::new, field()); + } + + @Override + protected Radians replaceChild(Expression newChild) { + return new Radians(location(), newChild); + } + + @Override + protected String mathFunction() { + return "toRadians"; + } + + @Override + protected MathOperation operation() { + return MathOperation.RADIANS; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Random.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Random.java new file mode 100644 index 0000000000000..4e078ed212626 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Random.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.Locale; + +import static java.lang.String.format; + +/** + * Returns a random double (using the given seed). + */ +public class Random extends MathFunction { + + public Random(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Random::new, field()); + } + + @Override + protected Random replaceChild(Expression newChild) { + return new Random(location(), newChild); + } + + @Override + protected String formatScript(String template) { + //TODO: Painless script uses Random since Randomness is not whitelisted + return super.formatScript( + format(Locale.ROOT, "%s != null ? new Random((long) %s).nextDouble() : Math.random()", template, template)); + } + + @Override + protected MathOperation operation() { + return MathOperation.RANDOM; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java new file mode 100644 index 0000000000000..52d7bc5aecaa8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Round.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; + +/** + * Round + * function. + * + * Note that this uses {@link Math#round(double)} which uses "half up" rounding + * for `ROUND(-1.5)` rounds to `-1`. + */ +public class Round extends MathFunction { + public Round(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Round::new, field()); + } + + @Override + protected Round replaceChild(Expression newChild) { + return new Round(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.ROUND; + } + + @Override + public DataType dataType() { + return DataTypeConversion.asInteger(field().dataType()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sign.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sign.java new file mode 100644 index 0000000000000..f162f015bf543 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sign.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +/** + * Returns the sign of the given expression: + *
    + *
  • -1 if it is negative
  • + *
  • 0 if it is zero
  • + *
  • +1 if it is positive
  • + *
+ */ +public class Sign extends MathFunction { + public Sign(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Sign::new, field()); + } + + @Override + protected Sign replaceChild(Expression newChild) { + return new Sign(location(), newChild); + } + + @Override + protected String mathFunction() { + return "signum"; + } + + @Override + protected MathOperation operation() { + return MathOperation.SIGN; + } + + @Override + public DataType dataType() { + return DataType.INTEGER; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sin.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sin.java new file mode 100644 index 0000000000000..e61ba739e5238 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sin.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Sine + * fuction. + */ +public class Sin extends MathFunction { + public Sin(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Sin::new, field()); + } + + @Override + protected Sin replaceChild(Expression newChild) { + return new Sin(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.SIN; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sinh.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sinh.java new file mode 100644 index 0000000000000..52a358176d0fd --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sinh.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Hyperbolic sine + * function. + */ +public class Sinh extends MathFunction { + public Sinh(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Sinh::new, field()); + } + + @Override + protected Sinh replaceChild(Expression newChild) { + return new Sinh(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.SINH; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sqrt.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sqrt.java new file mode 100644 index 0000000000000..f9daf25d2188b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Sqrt.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Square root + * function. + */ +public class Sqrt extends MathFunction { + public Sqrt(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Sqrt::new, field()); + } + + @Override + protected Sqrt replaceChild(Expression newChild) { + return new Sqrt(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.SQRT; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Tan.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Tan.java new file mode 100644 index 0000000000000..25409c84ff39c --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/Tan.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * Tangent + * function. + */ +public class Tan extends MathFunction { + public Tan(Location location, Expression field) { + super(location, field); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Tan::new, field()); + } + + @Override + protected Tan replaceChild(Expression newChild) { + return new Tan(location(), newChild); + } + + @Override + protected MathOperation operation() { + return MathOperation.TAN; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggExtractorInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggExtractorInput.java new file mode 100644 index 0000000000000..22a7ab2fa3e3b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggExtractorInput.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.BucketExtractorProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class AggExtractorInput extends LeafInput { + + private final Processor chained; + + public AggExtractorInput(Location location, Expression expression, Processor processor, BucketExtractor context) { + super(location, expression, context); + this.chained = processor; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, AggExtractorInput::new, expression(), chained, context()); + } + + @Override + public Processor asProcessor() { + Processor proc = new BucketExtractorProcessor(context()); + return chained != null ? new ChainingProcessor(proc, chained) : proc; + } + + @Override + public final boolean supportedByAggsOnlyQuery() { + return true; + } + + @Override + public ProcessorDefinition resolveAttributes(AttributeResolver resolver) { + return this; + } + + @Override + public final void collectFields(SqlSourceBuilder sourceBuilder) { + // Nothing to collect + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggNameInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggNameInput.java new file mode 100644 index 0000000000000..43da886de4934 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggNameInput.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class AggNameInput extends CommonNonExecutableInput { + public AggNameInput(Location location, Expression expression, String context) { + super(location, expression, context); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, AggNameInput::new, expression(), context()); + } + + @Override + public final boolean supportedByAggsOnlyQuery() { + return true; + } + + @Override + public final boolean resolved() { + return false; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggPathInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggPathInput.java new file mode 100644 index 0000000000000..957a13c2f3a9f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AggPathInput.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.execution.search.AggRef; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.Objects; + +public class AggPathInput extends CommonNonExecutableInput { + + // used in case the agg itself is not returned in a suitable format (like date aggs) + private final Processor action; + + public AggPathInput(Expression expression, AggRef context) { + this(Location.EMPTY, expression, context, null); + } + + /** + * + * Constructs a new AggPathInput instance. + * The action is used for handling corner-case results such as date histogram which returns + * a full date object for year which requires additional extraction. + */ + public AggPathInput(Location location, Expression expression, AggRef context, Processor action) { + super(location, expression, context); + this.action = action; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, AggPathInput::new, expression(), context(), action); + } + + public Processor action() { + return action; + } + + @Override + public boolean resolved() { + return true; + } + + @Override + public final boolean supportedByAggsOnlyQuery() { + return true; + } + + @Override + public int hashCode() { + return Objects.hash(context(), action); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + AggPathInput other = (AggPathInput) obj; + return Objects.equals(context(), other.context()) + && Objects.equals(action, other.action); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AttributeInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AttributeInput.java new file mode 100644 index 0000000000000..ba97f0e64947e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AttributeInput.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * An input that must first be rewritten against the rest of the query + * before it can be further processed. + */ +public class AttributeInput extends NonExecutableInput { + public AttributeInput(Location location, Expression expression, Attribute context) { + super(location, expression, context); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, AttributeInput::new, expression(), context()); + } + + @Override + public final boolean supportedByAggsOnlyQuery() { + return true; + } + + @Override + public ProcessorDefinition resolveAttributes(AttributeResolver resolver) { + return new ReferenceInput(location(), expression(), resolver.resolve(context())); + } + + @Override + public final void collectFields(SqlSourceBuilder sourceBuilder) { + // Nothing to extract + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/BinaryProcessorDefinition.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/BinaryProcessorDefinition.java new file mode 100644 index 0000000000000..e758b104f28a8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/BinaryProcessorDefinition.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Arrays; +import java.util.List; + +public abstract class BinaryProcessorDefinition extends ProcessorDefinition { + + private final ProcessorDefinition left, right; + + public BinaryProcessorDefinition(Location location, Expression expression, ProcessorDefinition left, ProcessorDefinition right) { + super(location, expression, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + @Override + public final ProcessorDefinition replaceChildren(List newChildren) { + if (newChildren.size() != 2) { + throw new IllegalArgumentException("expected [2] children but received [" + newChildren.size() + "]"); + } + return replaceChildren(newChildren.get(0), newChildren.get(1)); + } + + public ProcessorDefinition left() { + return left; + } + + public ProcessorDefinition right() { + return right; + } + + @Override + public boolean supportedByAggsOnlyQuery() { + return left.supportedByAggsOnlyQuery() && right.supportedByAggsOnlyQuery(); + } + + @Override + public final ProcessorDefinition resolveAttributes(AttributeResolver resolver) { + ProcessorDefinition newLeft = left.resolveAttributes(resolver); + ProcessorDefinition newRight = right.resolveAttributes(resolver); + if (newLeft == left && newRight == right) { + return this; + } + return replaceChildren(newLeft, newRight); + } + + /** + * Build a copy of this object with new left and right children. Used by + * {@link #resolveAttributes(AttributeResolver)}. + */ + protected abstract BinaryProcessorDefinition replaceChildren(ProcessorDefinition left, ProcessorDefinition right); + + @Override + public boolean resolved() { + return left().resolved() && right().resolved(); + } + + @Override + public final void collectFields(SqlSourceBuilder sourceBuilder) { + left.collectFields(sourceBuilder); + right.collectFields(sourceBuilder); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/CommonNonExecutableInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/CommonNonExecutableInput.java new file mode 100644 index 0000000000000..c202f215457de --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/CommonNonExecutableInput.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.tree.Location; + +/** + * Implementation common to most subclasses of + * {@link NonExecutableInput} but not shared by all. + */ +abstract class CommonNonExecutableInput extends NonExecutableInput { + CommonNonExecutableInput(Location location, Expression expression, T context) { + super(location, expression, context); + } + + @Override + public final Processor asProcessor() { + throw new SqlIllegalArgumentException("Unresolved input - needs resolving first"); + } + + @Override + public final ProcessorDefinition resolveAttributes(AttributeResolver resolver) { + return this; + } + + @Override + public final void collectFields(SqlSourceBuilder sourceBuilder) { + // Nothing to extract + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ConstantInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ConstantInput.java new file mode 100644 index 0000000000000..63db9d9a4c58d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ConstantInput.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class ConstantInput extends LeafInput { + + public ConstantInput(Location location, Expression expression, Object context) { + super(location, expression, context); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ConstantInput::new, expression(), context()); + } + + @Override + public Processor asProcessor() { + return new ConstantProcessor(context()); + } + + @Override + public final boolean supportedByAggsOnlyQuery() { + return true; + } + + @Override + public ProcessorDefinition resolveAttributes(AttributeResolver resolver) { + return this; + } + + @Override + public final void collectFields(SqlSourceBuilder sourceBuilder) { + // Nothing to collect + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/HitExtractorInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/HitExtractorInput.java new file mode 100644 index 0000000000000..50a00880e9d07 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/HitExtractorInput.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.HitExtractorProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class HitExtractorInput extends LeafInput { + + public HitExtractorInput(Location location, Expression expression, HitExtractor context) { + super(location, expression, context); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, HitExtractorInput::new, expression(), context()); + } + + @Override + public Processor asProcessor() { + return new HitExtractorProcessor(context()); + } + + @Override + public final boolean supportedByAggsOnlyQuery() { + return true; + } + + @Override + public ProcessorDefinition resolveAttributes(AttributeResolver resolver) { + return this; + } + + @Override + public final void collectFields(SqlSourceBuilder sourceBuilder) { + // No fields to collect + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/LeafInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/LeafInput.java new file mode 100644 index 0000000000000..5b7468faa315c --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/LeafInput.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.emptyList; + +public abstract class LeafInput extends ProcessorDefinition { + + private T context; + + public LeafInput(Location location, Expression expression, T context) { + super(location, expression, emptyList()); + this.context = context; + } + + @Override + public final ProcessorDefinition replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + public T context() { + return context; + } + + @Override + public boolean resolved() { + return true; + } + + @Override + public int hashCode() { + return Objects.hash(expression(), context); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + LeafInput other = (LeafInput) obj; + return Objects.equals(context(), other.context()) + && Objects.equals(expression(), other.expression()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/NonExecutableInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/NonExecutableInput.java new file mode 100644 index 0000000000000..2161f09d46ac2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/NonExecutableInput.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.tree.Location; + +public abstract class NonExecutableInput extends LeafInput { + NonExecutableInput(Location location, Expression expression, T context) { + super(location, expression, context); + } + + @Override + public boolean resolved() { + return false; + } + + @Override + public Processor asProcessor() { + throw new SqlIllegalArgumentException("Unresolved input - needs resolving first"); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ProcessorDefinition.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ProcessorDefinition.java new file mode 100644 index 0000000000000..929367fca94f9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ProcessorDefinition.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.execution.search.FieldExtraction; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.Node; + +import java.util.List; + +/** + * Contains the tree for processing a function, so for example, the {@code ProcessorDefinition} of: + * + * ABS(MAX(foo)) + CAST(bar) + * + * Is an {@code Add} Function with left {@code ABS} over an aggregate (MAX), and + * right being a {@code CAST} function. + */ +public abstract class ProcessorDefinition extends Node implements FieldExtraction { + + private final Expression expression; + + public ProcessorDefinition(Location location, Expression expression, List children) { + super(location, children); + this.expression = expression; + } + + public Expression expression() { + return expression; + } + + public abstract boolean resolved(); + + public abstract Processor asProcessor(); + + /** + * Resolve {@link Attribute}s which are unprocessable into + * {@link FieldExtraction}s which are processable. + * + * @return {@code this} if the resolution doesn't change the + * definition, a new {@link ProcessorDefinition} otherwise + */ + public abstract ProcessorDefinition resolveAttributes(AttributeResolver resolver); + public interface AttributeResolver { + FieldExtraction resolve(Attribute attribute); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ProcessorDefinitions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ProcessorDefinitions.java new file mode 100644 index 0000000000000..e525b37b4dd7b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ProcessorDefinitions.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; + +public abstract class ProcessorDefinitions { + + public static ProcessorDefinition toProcessorDefinition(Expression ex) { + if (ex.foldable()) { + return new ConstantInput(ex.location(), ex, ex.fold()); + } + if (ex instanceof ScalarFunction) { + return ((ScalarFunction) ex).asProcessorDefinition(); + } + if (ex instanceof AggregateFunction) { + // unresolved AggNameInput (should always get replaced by the folder) + return new AggNameInput(ex.location(), ex, ((AggregateFunction) ex).name()); + } + if (ex instanceof NamedExpression) { + return new AttributeInput(ex.location(), ex, ((NamedExpression) ex).toAttribute()); + } + throw new SqlIllegalArgumentException("Cannot extract processor from {}", ex); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ReferenceInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ReferenceInput.java new file mode 100644 index 0000000000000..59c001093be79 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ReferenceInput.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.execution.search.FieldExtraction; +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class ReferenceInput extends NonExecutableInput { + public ReferenceInput(Location location, Expression expression, FieldExtraction context) { + super(location, expression, context); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ReferenceInput::new, expression(), context()); + } + + @Override + public final boolean supportedByAggsOnlyQuery() { + return false; + } + + @Override + public ProcessorDefinition resolveAttributes(AttributeResolver resolver) { + return this; + } + + @Override + public final void collectFields(SqlSourceBuilder sourceBuilder) { + context().collectFields(sourceBuilder); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ScoreProcessorDefinition.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ScoreProcessorDefinition.java new file mode 100644 index 0000000000000..5617fa016177f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/ScoreProcessorDefinition.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.execution.search.extractor.ScoreExtractor; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.HitExtractorProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; + +import static java.util.Collections.emptyList; + +public class ScoreProcessorDefinition extends ProcessorDefinition { + public ScoreProcessorDefinition(Location location, Expression expression) { + super(location, expression, emptyList()); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ScoreProcessorDefinition::new, expression()); + } + + @Override + public final ProcessorDefinition replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + @Override + public boolean resolved() { + return true; + } + + @Override + public Processor asProcessor() { + return new HitExtractorProcessor(ScoreExtractor.INSTANCE); + } + + @Override + public boolean supportedByAggsOnlyQuery() { + return false; + } + + @Override + public ProcessorDefinition resolveAttributes(AttributeResolver resolver) { + return this; + } + + @Override + public void collectFields(SqlSourceBuilder sourceBuilder) { + sourceBuilder.trackScores(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/UnaryProcessorDefinition.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/UnaryProcessorDefinition.java new file mode 100644 index 0000000000000..fe8a4099ec3c1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/UnaryProcessorDefinition.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.singletonList; + +public final class UnaryProcessorDefinition extends ProcessorDefinition { + + private final ProcessorDefinition child; + private final Processor action; + + public UnaryProcessorDefinition(Location location, Expression expression, ProcessorDefinition child, Processor action) { + super(location, expression, singletonList(child)); + this.child = child; + this.action = action; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, UnaryProcessorDefinition::new, expression(), child, action); + } + + @Override + public ProcessorDefinition replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new UnaryProcessorDefinition(location(), expression(), newChildren.get(0), action); + } + + public ProcessorDefinition child() { + return child; + } + + public Processor action() { + return action; + } + + @Override + public boolean resolved() { + return child.resolved(); + } + + @Override + public Processor asProcessor() { + return new ChainingProcessor(child.asProcessor(), action); + } + + @Override + public boolean supportedByAggsOnlyQuery() { + return child.supportedByAggsOnlyQuery(); + } + + @Override + public ProcessorDefinition resolveAttributes(AttributeResolver resolver) { + ProcessorDefinition newChild = child.resolveAttributes(resolver); + if (newChild == child) { + return this; + } + return new UnaryProcessorDefinition(location(), expression(), newChild, action); + } + + @Override + public void collectFields(SqlSourceBuilder sourceBuilder) { + child.collectFields(sourceBuilder); + } + + @Override + public int hashCode() { + return Objects.hash(expression(), child, action); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnaryProcessorDefinition other = (UnaryProcessorDefinition) obj; + return Objects.equals(action, other.action) + && Objects.equals(child, other.child) + && Objects.equals(expression(), other.expression()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/BinaryProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/BinaryProcessor.java new file mode 100644 index 0000000000000..81795923915a3 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/BinaryProcessor.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public abstract class BinaryProcessor implements Processor { + + private final Processor left, right; + + public BinaryProcessor(Processor left, Processor right) { + this.left = left; + this.right = right; + } + + protected BinaryProcessor(StreamInput in) throws IOException { + left = in.readNamedWriteable(Processor.class); + right = in.readNamedWriteable(Processor.class); + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(left); + out.writeNamedWriteable(right); + doWrite(out); + } + + protected abstract void doWrite(StreamOutput out) throws IOException; + + @Override + public Object process(Object input) { + return doProcess(left.process(input), right.process(input)); + } + + protected Processor left() { + return left; + } + + protected Processor right() { + return right; + } + + protected abstract Object doProcess(Object left, Object right); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/BucketExtractorProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/BucketExtractorProcessor.java new file mode 100644 index 0000000000000..0a5a2b1f1e091 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/BucketExtractorProcessor.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor; + +import java.io.IOException; +import java.util.Objects; + +/** + * Processor wrapping an {@link BucketExtractor}, essentially being a source/leaf of a + * Processor tree. + */ +public class BucketExtractorProcessor implements Processor { + + public static final String NAME = "a"; + + private final BucketExtractor extractor; + + public BucketExtractorProcessor(BucketExtractor extractor) { + this.extractor = extractor; + } + + public BucketExtractorProcessor(StreamInput in) throws IOException { + extractor = in.readNamedWriteable(BucketExtractor.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(extractor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object process(Object input) { + if (!(input instanceof Bucket)) { + throw new SqlIllegalArgumentException("Expected an agg bucket but received {}", input); + } + return extractor.extract((Bucket) input); + } + + @Override + public int hashCode() { + return Objects.hash(extractor); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BucketExtractorProcessor other = (BucketExtractorProcessor) obj; + return Objects.equals(extractor, other.extractor); + } + + @Override + public String toString() { + return extractor.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ChainingProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ChainingProcessor.java new file mode 100644 index 0000000000000..9be7de637e349 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ChainingProcessor.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Objects; + +/** + * A {@linkplain Processor} that composes the results of two + * {@linkplain Processor}s. + */ +public class ChainingProcessor extends UnaryProcessor { + public static final String NAME = "."; + + private final Processor processor; + + public ChainingProcessor(Processor first, Processor second) { + super(first); + this.processor = second; + } + + public ChainingProcessor(StreamInput in) throws IOException { + super(in); + processor = in.readNamedWriteable(Processor.class); + } + + @Override + protected void doWrite(StreamOutput out) throws IOException { + out.writeNamedWriteable(processor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected Object doProcess(Object input) { + return processor.process(input); + } + + Processor first() { + return child(); + } + + Processor second() { + return processor; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), processor); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(processor, ((ChainingProcessor) obj).processor); + } + + @Override + public String toString() { + return processor + "(" + super.toString() + ")"; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ConstantProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ConstantProcessor.java new file mode 100644 index 0000000000000..cc419f3c7b71a --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ConstantProcessor.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Objects; + +public class ConstantProcessor implements Processor { + + public static String NAME = "c"; + + private final Object constant; + + public ConstantProcessor(Object value) { + this.constant = value; + } + + public ConstantProcessor(StreamInput in) throws IOException { + constant = in.readGenericValue(); + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeGenericValue(constant); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object process(Object input) { + return constant; + } + + @Override + public int hashCode() { + return Objects.hashCode(constant); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ConstantProcessor other = (ConstantProcessor) obj; + return Objects.equals(constant, other.constant); + } + + @Override + public String toString() { + return "^" + constant; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/HitExtractorProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/HitExtractorProcessor.java new file mode 100644 index 0000000000000..5960b8cfca04c --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/HitExtractorProcessor.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; + +import java.io.IOException; +import java.util.Objects; + +/** + * Processor wrapping a {@link HitExtractor}, essentially being a source/leaf of a + * Processor tree. + */ +public class HitExtractorProcessor implements Processor { + + public static final String NAME = "h"; + + private final HitExtractor extractor; + + public HitExtractorProcessor(HitExtractor extractor) { + this.extractor = extractor; + } + + public HitExtractorProcessor(StreamInput in) throws IOException { + extractor = in.readNamedWriteable(HitExtractor.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(extractor); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public Object process(Object input) { + if (!(input instanceof SearchHit)) { + throw new SqlIllegalArgumentException("Expected a SearchHit but received {}", input); + } + return extractor.extract((SearchHit) input); + } + + @Override + public int hashCode() { + return Objects.hash(extractor); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + HitExtractorProcessor other = (HitExtractorProcessor) obj; + return Objects.equals(extractor, other.extractor); + } + + @Override + public String toString() { + return extractor.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/Processor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/Processor.java new file mode 100644 index 0000000000000..9fb67fb51a177 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/Processor.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.NamedWriteable; + +/** + * For a scalar function, a {@code Processor} is how we convert the value to convert one value to another value. For instance, ABS(foo). + * Aggregate functions are handled by ES but scalars are not. + * + * This is an opaque class, the computed/compiled result gets saved on the client during scrolling. + */ +public interface Processor extends NamedWriteable { + + Object process(Object input); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/UnaryProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/UnaryProcessor.java new file mode 100644 index 0000000000000..613e263228366 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/UnaryProcessor.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Objects; + +public abstract class UnaryProcessor implements Processor { + + private final Processor child; + + public UnaryProcessor(Processor child) { + this.child = child; + } + + protected UnaryProcessor(StreamInput in) throws IOException { + child = in.readNamedWriteable(Processor.class); + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(child); + doWrite(out); + } + + protected abstract void doWrite(StreamOutput out) throws IOException; + + @Override + public final Object process(Object input) { + return doProcess(child.process(input)); + } + + public Processor child() { + return child; + } + + protected abstract Object doProcess(Object input); + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnaryProcessor other = (UnaryProcessor) obj; + return Objects.equals(child, other.child); + } + + @Override + public int hashCode() { + return Objects.hashCode(child); + } + + @Override + public String toString() { + return Objects.toString(child); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Agg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Agg.java new file mode 100644 index 0000000000000..3b75b7f98b5a8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Agg.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; + +class Agg extends Param { + + Agg(AggregateFunctionAttribute aggRef) { + super(aggRef); + } + + String aggName() { + return value().functionId(); + } + + public String aggProperty() { + return value().propertyPath(); + } + + @Override + public String prefix() { + return "a"; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Param.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Param.java new file mode 100644 index 0000000000000..ff2e3322ae0fb --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Param.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +import java.util.Locale; + +abstract class Param { + private final T value; + + Param(T value) { + this.value = value; + } + + abstract String prefix(); + + T value() { + return value; + } + + @Override + public String toString() { + return String.format(Locale.ROOT, "{%s=%s}", prefix(), value); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Params.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Params.java new file mode 100644 index 0000000000000..c7c331e3b5813 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Params.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyList; + +/** + * Parameters for a script + * + * This class mainly exists to handle the different aggregation cases. + * While aggs can appear in scripts like regular parameters, they are not passed + * as parameters but rather as bucket_path. + * However in some cases (like count), it's not the agg path that is relevant but rather + * its property (_count). + * As the agg name still needs to be remembered to properly associate the script with. + * + * Hence why this class supports aggRef (which always returns the agg names) and aggPaths + * (which returns the agg property if it exists or the agg name/reference). + * + * Also the parameter names support late binding/evaluation since the agg reference (like function id) + * can be changed during the optimization phase (for example min agg -> stats.min). + */ +public class Params { + + public static final Params EMPTY = new Params(emptyList()); + + private final List> params; + + Params(List> params) { + // flatten params + this.params = flatten(params); + } + + // return vars and aggs in the declared order for binding them to the script + List asCodeNames() { + if (params.isEmpty()) { + return emptyList(); + } + + List names = new ArrayList<>(params.size()); + int aggs = 0, vars = 0; + + for (Param p : params) { + names.add(p.prefix() + (p instanceof Agg ? aggs++ : vars++)); + } + + return names; + } + + // return only the vars (as parameter for a script) + // agg refs are returned separately to be provided as bucket_paths + Map asParams() { + Map map = new LinkedHashMap<>(params.size()); + + int count = 0; + + for (Param p : params) { + if (p instanceof Var) { + map.put(p.prefix() + count++, p.value()); + } + } + + return map; + } + + // return agg refs in a format suitable for bucket_paths + Map asAggPaths() { + Map map = new LinkedHashMap<>(); + + int aggs = 0; + + for (Param p : params) { + if (p instanceof Agg) { + Agg a = (Agg) p; + String s = a.aggProperty() != null ? a.aggProperty() : a.aggName(); + map.put(p.prefix() + aggs++, s); + } + } + + return map; + } + + // return the agg refs + List asAggRefs() { + List refs = new ArrayList<>(); + + for (Param p : params) { + if (p instanceof Agg) { + refs.add(((Agg) p).aggName()); + } + } + + return refs; + } + + + private static List> flatten(List> params) { + List> flatten = emptyList(); + + if (!params.isEmpty()) { + flatten = new ArrayList<>(); + for (Param p : params) { + if (p instanceof Script) { + flatten.addAll(flatten(((Script) p).value().params)); + } + else if (p instanceof Agg) { + flatten.add(p); + } + else if (p instanceof Var) { + flatten.add(p); + } + else { + throw new SqlIllegalArgumentException("Unsupported field {}", p); + } + } + } + return flatten; + } + + @Override + public String toString() { + return params.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/ParamsBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/ParamsBuilder.java new file mode 100644 index 0000000000000..8f99f29b9c1cd --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/ParamsBuilder.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; + +import java.util.ArrayList; +import java.util.List; + +public class ParamsBuilder { + + private final List> params = new ArrayList<>(); + + public static ParamsBuilder paramsBuilder() { + return new ParamsBuilder(); + } + + public ParamsBuilder variable(Object value) { + params.add(new Var(value)); + return this; + } + + public ParamsBuilder agg(AggregateFunctionAttribute agg) { + params.add(new Agg(agg)); + return this; + } + + public ParamsBuilder script(Params ps) { + params.add(new Script(ps)); + return this; + } + + public Params build() { + return new Params(new ArrayList<>(params)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Script.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Script.java new file mode 100644 index 0000000000000..ceabac9c49934 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Script.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +class Script extends Param { + + Script(Params value) { + super(value); + } + + @Override + public String prefix() { + return "s"; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/ScriptTemplate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/ScriptTemplate.java new file mode 100644 index 0000000000000..b5f9e7f3f9cfd --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/ScriptTemplate.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +import static java.lang.String.format; + +public class ScriptTemplate { + + public static final ScriptTemplate EMPTY = new ScriptTemplate(StringUtils.EMPTY); + + private final String template; + private final Params params; + // used for sorting based on scripts + private final DataType outputType; + + public ScriptTemplate(String template) { + this(template, Params.EMPTY, DataType.KEYWORD); + } + + public ScriptTemplate(String template, Params params, DataType outputType) { + this.template = template; + this.params = params; + this.outputType = outputType; + } + + public String template() { + return template; + } + + public Params params() { + return params; + } + + public List aggRefs() { + return params.asAggRefs(); + } + + public Map aggPaths() { + return params.asAggPaths(); + } + + public DataType outputType() { + return outputType; + } + + public Script toPainless() { + return new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, bindTemplate(), params.asParams()); + } + + private String bindTemplate() { + List binding = params.asCodeNames(); + return binding.isEmpty() ? template : format(Locale.ROOT, template, binding.toArray()); + } + + @Override + public int hashCode() { + return Objects.hash(template, params, outputType); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ScriptTemplate other = (ScriptTemplate) obj; + return Objects.equals(template, other.template) + && Objects.equals(params, other.params) + && Objects.equals(outputType, other.outputType); + } + + @Override + public String toString() { + return bindTemplate(); + } + + public static String formatTemplate(String template) { + return template.replace("{}", "params.%s"); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Var.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Var.java new file mode 100644 index 0000000000000..96bda8eabe68f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/script/Var.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.script; + +class Var extends Param { + + Var(Object value) { + super(value); + } + + @Override + public String prefix() { + return "v"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/And.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/And.java new file mode 100644 index 0000000000000..0cabf065f8f57 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/And.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryLogic; +import org.elasticsearch.xpack.sql.expression.BinaryOperator; +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.Objects; + +public class And extends BinaryLogic implements Negateable { + + public And(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, And::new, left(), right()); + } + + @Override + protected BinaryOperator replaceChildren(Expression newLeft, Expression newRight) { + return new And(location(), newLeft, newRight); + } + + public Object fold() { + return Objects.equals(left().fold(), Boolean.TRUE) && Objects.equals(right().fold(), Boolean.TRUE); + } + + @Override + public Or negate() { + return new Or(location(), new Not(location(), left()), new Not(location(), right())); + } + + @Override + public And swapLeftAndRight() { + return new And(location(), right(), left()); + } + + @Override + public String symbol() { + return "&&"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryComparison.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryComparison.java new file mode 100644 index 0000000000000..0fe94feba16cc --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/BinaryComparison.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +// marker class to indicate operations that rely on values +public abstract class BinaryComparison extends BinaryOperator { + + public BinaryComparison(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + protected TypeResolution resolveInputType(DataType inputType) { + return TypeResolution.TYPE_RESOLVED; + } + + @Override + protected Expression canonicalize() { + return left().hashCode() > right().hashCode() ? swapLeftAndRight() : this; + } + + @Override + public DataType dataType() { + return DataType.BOOLEAN; + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + static Integer compare(Object left, Object right) { + if (left instanceof Comparable && right instanceof Comparable) { + return Integer.valueOf(((Comparable) left).compareTo(right)); + } + return null; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Equals.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Equals.java new file mode 100644 index 0000000000000..4d4d7082eea68 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Equals.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.Objects; + +public class Equals extends BinaryComparison { + + public Equals(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Equals::new, left(), right()); + } + + @Override + protected Equals replaceChildren(Expression newLeft, Expression newRight) { + return new Equals(location(), newLeft, newRight); + } + + public Object fold() { + return Objects.equals(left().fold(), right().fold()); + } + + @Override + public Equals swapLeftAndRight() { + return new Equals(location(), right(), left()); + } + + @Override + public String symbol() { + return "="; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/GreaterThan.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/GreaterThan.java new file mode 100644 index 0000000000000..5fecc7c4f63e0 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/GreaterThan.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class GreaterThan extends BinaryComparison implements Negateable { + + public GreaterThan(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, GreaterThan::new, left(), right()); + } + + @Override + protected GreaterThan replaceChildren(Expression newLeft, Expression newRight) { + return new GreaterThan(location(), newLeft, newRight); + } + + public Object fold() { + Integer compare = compare(left().fold(), right().fold()); + return compare != null && compare.intValue() > 0; + } + + @Override + public LessThan swapLeftAndRight() { + return new LessThan(location(), right(), left()); + } + + @Override + public LessThanOrEqual negate() { + return new LessThanOrEqual(location(), left(), right()); + } + + @Override + public String symbol() { + return ">"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/GreaterThanOrEqual.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/GreaterThanOrEqual.java new file mode 100644 index 0000000000000..837cfa1df9349 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/GreaterThanOrEqual.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class GreaterThanOrEqual extends BinaryComparison implements Negateable { + + public GreaterThanOrEqual(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, GreaterThanOrEqual::new, left(), right()); + } + + @Override + protected GreaterThanOrEqual replaceChildren(Expression newLeft, Expression newRight) { + return new GreaterThanOrEqual(location(), newLeft, newRight); + } + + public Object fold() { + Integer compare = compare(left().fold(), right().fold()); + return compare != null && compare.intValue() >= 0; + } + + @Override + public LessThanOrEqual swapLeftAndRight() { + return new LessThanOrEqual(location(), right(), left()); + } + + @Override + public LessThan negate() { + return new LessThan(location(), left(), right()); + } + + @Override + public String symbol() { + return ">="; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java new file mode 100644 index 0000000000000..ca3a08f95136e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/In.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.util.CollectionUtils; + +public class In extends Expression { + + private final Expression value; + private final List list; + private final boolean nullable, foldable; + + public In(Location location, Expression value, List list) { + super(location, CollectionUtils.combine(list, value)); + this.value = value; + this.list = list; + + this.nullable = children().stream().anyMatch(Expression::nullable); + this.foldable = children().stream().allMatch(Expression::foldable); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, In::new, value(), list()); + } + + @Override + public Expression replaceChildren(List newChildren) { + if (newChildren.size() < 1) { + throw new IllegalArgumentException("expected one or more children but received [" + newChildren.size() + "]"); + } + return new In(location(), newChildren.get(newChildren.size() - 1), newChildren.subList(0, newChildren.size() - 1)); + } + + public Expression value() { + return value; + } + + public List list() { + return list; + } + + @Override + public DataType dataType() { + return DataType.BOOLEAN; + } + + @Override + public boolean nullable() { + return nullable; + } + + @Override + public boolean foldable() { + return foldable; + } + + @Override + public int hashCode() { + return Objects.hash(value, list); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (!super.equals(obj) || getClass() != obj.getClass()) { + return false; + } + + In other = (In) obj; + return Objects.equals(value, other.value) + && Objects.equals(list, other.list); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java new file mode 100644 index 0000000000000..421405102459e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.UnaryExpression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +public class IsNotNull extends UnaryExpression { + + public IsNotNull(Location location, Expression child) { + super(location, child); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, IsNotNull::new, child()); + } + + @Override + protected IsNotNull replaceChild(Expression newChild) { + return new IsNotNull(location(), newChild); + } + + public Object fold() { + return child().fold() != null && !DataTypes.isNull(child().dataType()); + } + + @Override + public boolean nullable() { + return false; + } + + @Override + public DataType dataType() { + return DataType.BOOLEAN; + } + + @Override + public String toString() { + return child().toString() + " IS NOT NULL"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/LessThan.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/LessThan.java new file mode 100644 index 0000000000000..151614b45dd84 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/LessThan.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class LessThan extends BinaryComparison implements Negateable { + + public LessThan(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, LessThan::new, left(), right()); + } + + @Override + protected LessThan replaceChildren(Expression newLeft, Expression newRight) { + return new LessThan(location(), newLeft, newRight); + } + + public Object fold() { + Integer compare = compare(left().fold(), right().fold()); + return compare != null && compare.intValue() < 0; + } + + @Override + public GreaterThan swapLeftAndRight() { + return new GreaterThan(location(), right(), left()); + } + + @Override + public GreaterThanOrEqual negate() { + return new GreaterThanOrEqual(location(), left(), right()); + } + + @Override + public String symbol() { + return "<"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/LessThanOrEqual.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/LessThanOrEqual.java new file mode 100644 index 0000000000000..3f5a1252691db --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/LessThanOrEqual.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class LessThanOrEqual extends BinaryComparison implements Negateable { + + public LessThanOrEqual(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, LessThanOrEqual::new, left(), right()); + } + + @Override + protected LessThanOrEqual replaceChildren(Expression newLeft, Expression newRight) { + return new LessThanOrEqual(location(), newLeft, newRight); + } + + @Override + public Object fold() { + Integer compare = compare(left().fold(), right().fold()); + return compare != null && compare.intValue() <= 0; + } + + @Override + public GreaterThanOrEqual swapLeftAndRight() { + return new GreaterThanOrEqual(location(), right(), left()); + } + + @Override + public GreaterThan negate() { + return new GreaterThan(location(), left(), right()); + } + + @Override + public String symbol() { + return "<="; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Not.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Not.java new file mode 100644 index 0000000000000..71ce42ba8aa81 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Not.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.UnaryExpression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Objects; + +public class Not extends UnaryExpression { + + public Not(Location location, Expression child) { + super(location, child); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Not::new, child()); + } + + @Override + protected Not replaceChild(Expression newChild) { + return new Not(location(), newChild); + } + + protected TypeResolution resolveType() { + if (DataType.BOOLEAN == child().dataType()) { + return TypeResolution.TYPE_RESOLVED; + } + return new TypeResolution("Cannot negate expression ([" + Expressions.name(child()) + "] of type [" + + child().dataType().esType + "])"); + } + + @Override + public Object fold() { + return Objects.equals(child().fold(), Boolean.TRUE) ? Boolean.FALSE : Boolean.TRUE; + } + + @Override + protected Expression canonicalize() { + Expression canonicalChild = child().canonical(); + if (canonicalChild instanceof Negateable) { + return ((Negateable) canonicalChild).negate(); + } + return this; + } + + @Override + public DataType dataType() { + return DataType.BOOLEAN; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Or.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Or.java new file mode 100644 index 0000000000000..49bd40b284686 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Or.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.BinaryLogic; +import org.elasticsearch.xpack.sql.expression.BinaryOperator; +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.Objects; + +public class Or extends BinaryLogic implements Negateable { + + public Or(Location location, Expression left, Expression right) { + super(location, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Or::new, left(), right()); + } + + @Override + protected BinaryOperator replaceChildren(Expression newLeft, Expression newRight) { + return new Or(location(), newLeft, newRight); + } + + @Override + public Object fold() { + return Objects.equals(left().fold(), Boolean.TRUE) || Objects.equals(right().fold(), Boolean.TRUE); + } + + @Override + public Or swapLeftAndRight() { + return new Or(location(), right(), left()); + } + + @Override + public And negate() { + return new And(location(), new Not(location(), left()), new Not(location(), right())); + } + + @Override + public String symbol() { + return "||"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java new file mode 100644 index 0000000000000..7439f6def1453 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Predicates.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public abstract class Predicates { + + public static List splitAnd(Expression exp) { + if (exp instanceof And) { + And and = (And) exp; + List list = new ArrayList<>(); + list.addAll(splitAnd(and.left())); + list.addAll(splitAnd(and.right())); + return list; + } + return Collections.singletonList(exp); + } + + public static List splitOr(Expression exp) { + if (exp instanceof Or) { + Or or = (Or) exp; + List list = new ArrayList<>(); + list.addAll(splitOr(or.left())); + list.addAll(splitOr(or.right())); + return list; + } + return Collections.singletonList(exp); + } + + public static Expression combineOr(List exps) { + return exps.stream().reduce((l, r) -> new Or(l.location(), l, r)).orElse(null); + } + + public static Expression combineAnd(List exps) { + return exps.stream().reduce((l, r) -> new And(l.location(), l, r)).orElse(null); + } + + public static List inCommon(List l, List r) { + List common = new ArrayList<>(Math.min(l.size(), r.size())); + for (Expression lExp : l) { + for (Expression rExp : r) { + if (lExp.semanticEquals(rExp)) { + common.add(lExp); + } + } + } + return common.isEmpty() ? Collections.emptyList() : common; + } + + public static List subtract(List from, List r) { + List diff = new ArrayList<>(Math.min(from.size(), r.size())); + for (Expression lExp : from) { + for (Expression rExp : r) { + if (!lExp.semanticEquals(rExp)) { + diff.add(lExp); + } + } + } + return diff.isEmpty() ? Collections.emptyList() : diff; + } + + + public static boolean canEvaluate(Expression exp, LogicalPlan plan) { + return exp.references().subsetOf(plan.outputSet()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java new file mode 100644 index 0000000000000..96e427e90f166 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/Range.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +// BETWEEN or range - is a mix of gt(e) AND lt(e) +public class Range extends Expression { + + private final Expression value, lower, upper; + private final boolean includeLower, includeUpper; + + public Range(Location location, Expression value, Expression lower, boolean includeLower, Expression upper, boolean includeUpper) { + super(location, Arrays.asList(value, lower, upper)); + + this.value = value; + this.lower = lower; + this.upper = upper; + this.includeLower = includeLower; + this.includeUpper = includeUpper; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Range::new, value, lower, includeLower, upper, includeUpper); + } + + @Override + public Expression replaceChildren(List newChildren) { + if (newChildren.size() != 3) { + throw new IllegalArgumentException("expected [3] children but received [" + newChildren.size() + "]"); + } + return new Range(location(), newChildren.get(0), newChildren.get(1), includeLower, newChildren.get(2), includeUpper); + } + + public Expression value() { + return value; + } + + public Expression lower() { + return lower; + } + + public Expression upper() { + return upper; + } + + public boolean includeLower() { + return includeLower; + } + + public boolean includeUpper() { + return includeUpper; + } + + @Override + public boolean foldable() { + return value.foldable() && lower.foldable() && upper.foldable(); + } + + @Override + public Object fold() { + Object val = value.fold(); + Integer lowerCompare = BinaryComparison.compare(lower.fold(), val); + Integer upperCompare = BinaryComparison.compare(val, upper().fold()); + boolean lowerComparsion = lowerCompare == null ? false : (includeLower ? lowerCompare <= 0 : lowerCompare < 0); + boolean upperComparsion = upperCompare == null ? false : (includeUpper ? upperCompare <= 0 : upperCompare < 0); + return lowerComparsion && upperComparsion; + } + + @Override + public boolean nullable() { + return value.nullable() && lower.nullable() && upper.nullable(); + } + + @Override + public DataType dataType() { + return DataType.BOOLEAN; + } + + @Override + public int hashCode() { + return Objects.hash(includeLower, includeUpper, value, lower, upper); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Range other = (Range) obj; + return Objects.equals(includeLower, other.includeLower) + && Objects.equals(includeUpper, other.includeUpper) + && Objects.equals(value, other.value) + && Objects.equals(lower, other.lower) + && Objects.equals(upper, other.upper); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(lower); + sb.append(includeLower ? " <= " : " < "); + sb.append(value); + sb.append(includeUpper ? " <= " : " < "); + sb.append(upper); + return sb.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextPredicate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextPredicate.java new file mode 100644 index 0000000000000..c0f5edb8095d7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextPredicate.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.fulltext; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public abstract class FullTextPredicate extends Expression { + + public enum Operator { + AND, + OR; + + public org.elasticsearch.index.query.Operator toEs() { + return org.elasticsearch.index.query.Operator.fromString(name()); + } + } + + private final String query; + private final String options; + private final Map optionMap; + // common properties + private final String analyzer; + + FullTextPredicate(Location location, String query, String options, List children) { + super(location, children); + this.query = query; + this.options = options; + // inferred + this.optionMap = FullTextUtils.parseSettings(options, location); + this.analyzer = optionMap.get("analyzer"); + } + + public String query() { + return query; + } + + public String options() { + return options; + } + + public Map optionMap() { + return optionMap; + } + + public String analyzer() { + return analyzer; + } + + @Override + public boolean nullable() { + return false; + } + + @Override + public DataType dataType() { + return DataType.BOOLEAN; + } + + @Override + public int hashCode() { + return Objects.hash(query, options); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + FullTextPredicate other = (FullTextPredicate) obj; + return Objects.equals(query, other.query) + && Objects.equals(options, other.options); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtils.java new file mode 100644 index 0000000000000..f22f46cad2b03 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/FullTextUtils.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.fulltext; + +import java.util.LinkedHashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.FullTextPredicate.Operator; +import org.elasticsearch.xpack.sql.parser.ParsingException; +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.emptyMap; + +abstract class FullTextUtils { + + private static final String DELIMITER = ";"; + + static Map parseSettings(String options, Location location) { + if (!Strings.hasText(options)) { + return emptyMap(); + } + String[] list = Strings.delimitedListToStringArray(options, DELIMITER); + Map op = new LinkedHashMap(list.length); + + for (String entry : list) { + String[] split = splitInTwo(entry, "="); + if (split == null) { + throw new ParsingException(location, "Cannot parse entry {} in options {}", entry, options); + } + + String previous = op.put(split[0], split[1]); + if (previous != null) { + throw new ParsingException(location, "Duplicate option {} detected in options {}", entry, options); + } + + } + return op; + } + + static Map parseFields(Map options, Location location) { + return parseFields(options.get("fields"), location); + } + + static Map parseFields(String fieldString, Location location) { + if (!Strings.hasText(fieldString)) { + return emptyMap(); + } + Set fieldNames = Strings.commaDelimitedListToSet(fieldString); + + Float defaultBoost = Float.valueOf(1.0f); + Map fields = new LinkedHashMap<>(); + + for (String fieldName : fieldNames) { + if (fieldName.contains("^")) { + String[] split = splitInTwo(fieldName, "^"); + if (split == null) { + fields.put(fieldName, defaultBoost); + } + else { + try { + fields.put(split[0], Float.parseFloat(split[1])); + } catch (NumberFormatException nfe) { + throw new ParsingException(location, "Cannot parse boosting for {}", fieldName); + } + } + } + else { + fields.put(fieldName, defaultBoost); + } + } + + return fields; + } + + private static String[] splitInTwo(String string, String delimiter) { + String[] split = Strings.split(string, delimiter); + if (split == null || split.length != 2) { + return null; + } + return split; + } + + static FullTextPredicate.Operator operator(Map options, String key) { + String value = options.get(key); + return value != null ? Operator.valueOf(value.toUpperCase(Locale.ROOT)) : null; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/MatchQueryPredicate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/MatchQueryPredicate.java new file mode 100644 index 0000000000000..1235cb1a205e6 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/MatchQueryPredicate.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.fulltext; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import static java.util.Collections.singletonList; + +import java.util.List; + +public class MatchQueryPredicate extends FullTextPredicate { + + private final Expression field; + + public MatchQueryPredicate(Location location, Expression field, String query, String options) { + super(location, query, options, singletonList(field)); + this.field = field; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MatchQueryPredicate::new, field, query(), options()); + } + + @Override + public MatchQueryPredicate replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new MatchQueryPredicate(location(), newChildren.get(0), query(), options()); + } + + public Expression field() { + return field; + } + + @Override + public int hashCode() { + return Objects.hash(field, super.hashCode()); + } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj)) { + MatchQueryPredicate other = (MatchQueryPredicate) obj; + return Objects.equals(field, other.field); + } + return false; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/MultiMatchQueryPredicate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/MultiMatchQueryPredicate.java new file mode 100644 index 0000000000000..eb3df01d6ef58 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/MultiMatchQueryPredicate.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.fulltext; + +import java.util.Map; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import static java.util.Collections.emptyList; + +import java.util.List; + +public class MultiMatchQueryPredicate extends FullTextPredicate { + + private final String fieldString; + private final Map fields; + + public MultiMatchQueryPredicate(Location location, String fieldString, String query, String options) { + super(location, query, options, emptyList()); + this.fieldString = fieldString; + // inferred + this.fields = FullTextUtils.parseFields(fieldString, location); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, MultiMatchQueryPredicate::new, fieldString, query(), options()); + } + + @Override + public Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + public String fieldString() { + return fieldString; + } + + public Map fields() { + return fields; + } + + @Override + public int hashCode() { + return Objects.hash(fieldString, super.hashCode()); + } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj)) { + MultiMatchQueryPredicate other = (MultiMatchQueryPredicate) obj; + return Objects.equals(fieldString, other.fieldString); + } + return false; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/StringQueryPredicate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/StringQueryPredicate.java new file mode 100644 index 0000000000000..3275044477495 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/fulltext/StringQueryPredicate.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.predicate.fulltext; + +import java.util.Map; +import java.util.List; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import static java.util.Collections.emptyList; + +public class StringQueryPredicate extends FullTextPredicate { + + private final Map fields; + + public StringQueryPredicate(Location location, String query, String options) { + super(location, query, options, emptyList()); + + // inferred + this.fields = FullTextUtils.parseFields(optionMap(), location); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, StringQueryPredicate::new, query(), options()); + } + + @Override + public Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + public Map fields() { + return fields; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/regex/Like.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/regex/Like.java new file mode 100644 index 0000000000000..ef2635cec3761 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/regex/Like.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.regex; + +import org.elasticsearch.xpack.sql.expression.BinaryExpression; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.regex.Pattern; + +public class Like extends BinaryExpression { + + public Like(Location location, Expression left, LikePattern right) { + super(location, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Like::new, left(), right()); + } + + @Override + protected BinaryExpression replaceChildren(Expression newLeft, Expression newRight) { + return new Like(location(), newLeft, (LikePattern) newRight); + } + + public LikePattern right() { + return (LikePattern) super.right(); + } + + @Override + public boolean foldable() { + // right() is not directly foldable in any context but Like can fold it. + return left().foldable(); + } + + @Override + public Object fold() { + Pattern p = Pattern.compile(right().asJavaRegex()); + return p.matcher(left().fold().toString()).matches(); + } + + @Override + public Like swapLeftAndRight() { + return this; + } + + @Override + public DataType dataType() { + return DataType.BOOLEAN; + } + + @Override + public String symbol() { + return "LIKE"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/regex/LikePattern.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/regex/LikePattern.java new file mode 100644 index 0000000000000..b9c10435660a9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/regex/LikePattern.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.regex; + +import org.elasticsearch.xpack.sql.expression.LeafExpression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.Objects; + +/** + * A SQL 'like' pattern. + * Similar to basic regex, supporting '_' instead of '?' and '%' instead of '*'. + *

+ * Allows escaping based on a regular char. + * + * To prevent conflicts with ES, the string and char must be validated to not contain '*'. + */ +public class LikePattern extends LeafExpression { + + private final String pattern; + private final char escape; + + private final String regex; + private final String wildcard; + private final String indexNameWildcard; + + public LikePattern(Location location, String pattern, char escape) { + super(location); + this.pattern = pattern; + this.escape = escape; + // early initialization to force string validation + this.regex = StringUtils.likeToJavaPattern(pattern, escape); + this.wildcard = StringUtils.likeToLuceneWildcard(pattern, escape); + this.indexNameWildcard = StringUtils.likeToIndexWildcard(pattern, escape); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, LikePattern::new, pattern, escape); + } + + public String pattern() { + return pattern; + } + + public char escape() { + return escape; + } + + /** + * Returns the pattern in (Java) regex format. + */ + public String asJavaRegex() { + return regex; + } + + /** + * Returns the pattern in (Lucene) wildcard format. + */ + public String asLuceneWildcard() { + return wildcard; + } + + /** + * Returns the pattern in (IndexNameExpressionResolver) wildcard format. + */ + public String asIndexNameWildcard() { + return indexNameWildcard; + } + + @Override + public boolean nullable() { + return false; + } + + @Override + public DataType dataType() { + return DataType.KEYWORD; + } + + @Override + public int hashCode() { + return Objects.hash(pattern, escape); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + LikePattern other = (LikePattern) obj; + return Objects.equals(pattern, other.pattern) + && escape == other.escape; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/regex/RLike.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/regex/RLike.java new file mode 100644 index 0000000000000..7d3879ef84fad --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/regex/RLike.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.regex; + +import org.elasticsearch.xpack.sql.expression.BinaryExpression; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.regex.Pattern; + +public class RLike extends BinaryExpression { + + public RLike(Location location, Expression left, Literal right) { + super(location, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, RLike::new, left(), right()); + } + + @Override + protected BinaryExpression replaceChildren(Expression newLeft, Expression newRight) { + return new RLike(location(), newLeft, (Literal) newRight); + } + + public Literal right() { + return (Literal) super.right(); + } + + @Override + public Object fold() { + Pattern p = Pattern.compile(right().fold().toString()); + return p.matcher(left().fold().toString()).matches(); + } + + @Override + public RLike swapLeftAndRight() { + return this; + } + + @Override + public DataType dataType() { + return DataType.BOOLEAN; + } + + @Override + public String symbol() { + return "RLIKE"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java new file mode 100644 index 0000000000000..a56be01c95534 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -0,0 +1,1438 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.optimizer; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Alias; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.AttributeMap; +import org.elasticsearch.xpack.sql.expression.AttributeSet; +import org.elasticsearch.xpack.sql.expression.BinaryExpression; +import org.elasticsearch.xpack.sql.expression.BinaryOperator.Negateable; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.ExpressionSet; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.FunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.Functions; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.aggregate.ExtendedStats; +import org.elasticsearch.xpack.sql.expression.function.aggregate.ExtendedStatsEnclosed; +import org.elasticsearch.xpack.sql.expression.function.aggregate.InnerAggregate; +import org.elasticsearch.xpack.sql.expression.function.aggregate.MatrixStats; +import org.elasticsearch.xpack.sql.expression.function.aggregate.MatrixStatsEnclosed; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Percentile; +import org.elasticsearch.xpack.sql.expression.function.aggregate.PercentileRank; +import org.elasticsearch.xpack.sql.expression.function.aggregate.PercentileRanks; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Percentiles; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Stats; +import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.predicate.And; +import org.elasticsearch.xpack.sql.expression.predicate.BinaryComparison; +import org.elasticsearch.xpack.sql.expression.predicate.Equals; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThan; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.LessThan; +import org.elasticsearch.xpack.sql.expression.predicate.LessThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.Not; +import org.elasticsearch.xpack.sql.expression.predicate.Or; +import org.elasticsearch.xpack.sql.expression.predicate.Range; +import org.elasticsearch.xpack.sql.plan.logical.Aggregate; +import org.elasticsearch.xpack.sql.plan.logical.Filter; +import org.elasticsearch.xpack.sql.plan.logical.Limit; +import org.elasticsearch.xpack.sql.plan.logical.LocalRelation; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.plan.logical.Project; +import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias; +import org.elasticsearch.xpack.sql.rule.Rule; +import org.elasticsearch.xpack.sql.rule.RuleExecutor; +import org.elasticsearch.xpack.sql.session.EmptyExecutable; +import org.elasticsearch.xpack.sql.session.SingletonExecutable; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import static java.util.stream.Collectors.toList; +import static org.elasticsearch.xpack.sql.expression.Literal.FALSE; +import static org.elasticsearch.xpack.sql.expression.Literal.TRUE; +import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.combineAnd; +import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.combineOr; +import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.inCommon; +import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.splitAnd; +import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.splitOr; +import static org.elasticsearch.xpack.sql.expression.predicate.Predicates.subtract; +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + + +public class Optimizer extends RuleExecutor { + + public ExecutionInfo debugOptimize(LogicalPlan verified) { + return verified.optimized() ? null : executeWithInfo(verified); + } + + public LogicalPlan optimize(LogicalPlan verified) { + return verified.optimized() ? verified : execute(verified); + } + + @Override + protected Iterable.Batch> batches() { + Batch resolution = new Batch("Finish Analysis", + new PruneSubqueryAliases(), + CleanAliases.INSTANCE + ); + + Batch aggregate = new Batch("Aggregation", + new PruneDuplicatesInGroupBy(), + new ReplaceDuplicateAggsWithReferences(), + new ReplaceAggsWithMatrixStats(), + new ReplaceAggsWithExtendedStats(), + new ReplaceAggsWithStats(), + new PromoteStatsToExtendedStats(), + new ReplaceAggsWithPercentiles(), + new ReplceAggsWithPercentileRanks() + ); + + Batch operators = new Batch("Operator Optimization", + // combining + new CombineProjections(), + // folding + new ReplaceFoldableAttributes(), + new ConstantFolding(), + // boolean + new BooleanSimplification(), + new BinaryComparisonSimplification(), + new BooleanLiteralsOnTheRight(), + new CombineComparisonsIntoRange(), + // prune/elimination + new PruneFilters(), + new PruneOrderBy(), + new PruneOrderByNestedFields(), + new PruneCast() + // requires changes in the folding + // since the exact same function, with the same ID can appear in multiple places + // see https://github.com/elastic/x-pack-elasticsearch/issues/3527 + //new PruneDuplicateFunctions() + ); + + Batch local = new Batch("Skip Elasticsearch", + new SkipQueryOnLimitZero(), + new SkipQueryIfFoldingProjection() + ); + //new BalanceBooleanTrees()); + Batch label = new Batch("Set as Optimized", Limiter.ONCE, + new SetAsOptimized()); + + return Arrays.asList(resolution, aggregate, operators, local, label); + } + + + static class PruneSubqueryAliases extends OptimizerRule { + + PruneSubqueryAliases() { + super(TransformDirection.UP); + } + + @Override + protected LogicalPlan rule(SubQueryAlias alias) { + return alias.child(); + } + } + + static class CleanAliases extends OptimizerRule { + + private static final CleanAliases INSTANCE = new CleanAliases(); + + CleanAliases() { + super(TransformDirection.UP); + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (plan instanceof Project) { + Project p = (Project) plan; + return new Project(p.location(), p.child(), cleanExpressions(p.projections())); + } + + if (plan instanceof Aggregate) { + Aggregate a = (Aggregate) plan; + // clean group expressions + List cleanedGroups = a.groupings().stream().map(CleanAliases::trimAliases).collect(toList()); + return new Aggregate(a.location(), a.child(), cleanedGroups, cleanExpressions(a.aggregates())); + } + + return plan.transformExpressionsOnly(e -> { + if (e instanceof Alias) { + return ((Alias) e).child(); + } + return e; + }); + } + + private List cleanExpressions(List args) { + return args.stream().map(CleanAliases::trimNonTopLevelAliases).map(NamedExpression.class::cast) + .collect(toList()); + } + + static Expression trimNonTopLevelAliases(Expression e) { + if (e instanceof Alias) { + Alias a = (Alias) e; + return new Alias(a.location(), a.name(), a.qualifier(), trimAliases(a.child()), a.id()); + } + return trimAliases(e); + } + + private static Expression trimAliases(Expression e) { + return e.transformDown(Alias::child, Alias.class); + } + } + + static class PruneDuplicatesInGroupBy extends OptimizerRule { + + @Override + protected LogicalPlan rule(Aggregate agg) { + List groupings = agg.groupings(); + if (groupings.isEmpty()) { + return agg; + } + ExpressionSet unique = new ExpressionSet<>(groupings); + if (unique.size() != groupings.size()) { + return new Aggregate(agg.location(), agg.child(), new ArrayList<>(unique), agg.aggregates()); + } + return agg; + } + } + + static class ReplaceDuplicateAggsWithReferences extends OptimizerRule { + + @Override + protected LogicalPlan rule(Aggregate agg) { + List aggs = agg.aggregates(); + + Map unique = new HashMap<>(); + Map reverse = new HashMap<>(); + + // find duplicates by looking at the function and canonical form + for (NamedExpression ne : aggs) { + if (ne instanceof Alias) { + Alias a = (Alias) ne; + unique.putIfAbsent(a.child(), a); + reverse.putIfAbsent(ne, a.child()); + } + else { + unique.putIfAbsent(ne.canonical(), ne); + reverse.putIfAbsent(ne, ne.canonical()); + } + } + + if (unique.size() != aggs.size()) { + List newAggs = new ArrayList<>(aggs.size()); + for (NamedExpression ne : aggs) { + newAggs.add(unique.get(reverse.get(ne))); + } + return new Aggregate(agg.location(), agg.child(), agg.groupings(), newAggs); + } + + return agg; + } + } + + static class ReplaceAggsWithMatrixStats extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan p) { + Map seen = new LinkedHashMap<>(); + Map promotedFunctionIds = new LinkedHashMap<>(); + + p = p.transformExpressionsUp(e -> rule(e, seen, promotedFunctionIds)); + + // nothing found + if (seen.isEmpty()) { + return p; + } + + return ReplaceAggsWithStats.updateAggAttributes(p, promotedFunctionIds); + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + + protected Expression rule(Expression e, Map seen, Map promotedIds) { + if (e instanceof MatrixStatsEnclosed) { + AggregateFunction f = (AggregateFunction) e; + + Expression argument = f.field(); + MatrixStats matrixStats = seen.get(argument); + + if (matrixStats == null) { + matrixStats = new MatrixStats(f.location(), argument); + seen.put(argument, matrixStats); + } + + InnerAggregate ia = new InnerAggregate(f.location(), f, matrixStats, f.field()); + promotedIds.putIfAbsent(f.functionId(), ia.toAttribute()); + return ia; + } + + return e; + } + } + + static class ReplaceAggsWithExtendedStats extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan p) { + Map promotedFunctionIds = new LinkedHashMap<>(); + Map seen = new LinkedHashMap<>(); + p = p.transformExpressionsUp(e -> rule(e, seen, promotedFunctionIds)); + + // nothing found + if (seen.isEmpty()) { + return p; + } + + // update old agg attributes + return ReplaceAggsWithStats.updateAggAttributes(p, promotedFunctionIds); + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + + protected Expression rule(Expression e, Map seen, + Map promotedIds) { + if (e instanceof ExtendedStatsEnclosed) { + AggregateFunction f = (AggregateFunction) e; + + Expression argument = f.field(); + ExtendedStats extendedStats = seen.get(argument); + + if (extendedStats == null) { + extendedStats = new ExtendedStats(f.location(), argument); + seen.put(argument, extendedStats); + } + + InnerAggregate ia = new InnerAggregate(f, extendedStats); + promotedIds.putIfAbsent(f.functionId(), ia.toAttribute()); + return ia; + } + + return e; + } + } + + static class ReplaceAggsWithStats extends Rule { + + private static class Match { + final Stats stats; + int count = 1; + final Set> functionTypes = new LinkedHashSet<>(); + + Match(Stats stats) { + this.stats = stats; + } + + @Override + public String toString() { + return stats.toString(); + } + } + + @Override + public LogicalPlan apply(LogicalPlan p) { + Map potentialPromotions = new LinkedHashMap<>(); + + p.forEachExpressionsUp(e -> collect(e, potentialPromotions)); + + // no promotions found - skip + if (potentialPromotions.isEmpty()) { + return p; + } + + // start promotion + + // old functionId to new function attribute + Map promotedFunctionIds = new LinkedHashMap<>(); + + // 1. promote aggs to InnerAggs + p = p.transformExpressionsUp(e -> promote(e, potentialPromotions, promotedFunctionIds)); + + // 2. update the old agg attrs to the promoted agg functions + return updateAggAttributes(p, promotedFunctionIds); + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + + private Expression collect(Expression e, Map seen) { + if (Stats.isTypeCompatible(e)) { + AggregateFunction f = (AggregateFunction) e; + + Expression argument = f.field(); + Match match = seen.get(argument); + + if (match == null) { + match = new Match(new Stats(f.location(), argument)); + match.functionTypes.add(f.getClass()); + seen.put(argument, match); + } + else { + if (match.functionTypes.add(f.getClass())) { + match.count++; + } + } + } + + return e; + } + + private static Expression promote(Expression e, Map seen, Map attrs) { + if (Stats.isTypeCompatible(e)) { + AggregateFunction f = (AggregateFunction) e; + + Expression argument = f.field(); + Match counter = seen.get(argument); + + // if the stat has at least two different functions for it, promote it as stat + if (counter != null && counter.count > 1) { + InnerAggregate innerAgg = new InnerAggregate(f, counter.stats); + attrs.putIfAbsent(f.functionId(), innerAgg.toAttribute()); + return innerAgg; + } + } + return e; + } + + static LogicalPlan updateAggAttributes(LogicalPlan p, Map promotedFunctionIds) { + // 1. update old agg function attributes + p = p.transformExpressionsUp(e -> updateAggFunctionAttrs(e, promotedFunctionIds)); + + // 2. update all scalar function consumers of the promoted aggs + // since they contain the old ids in scrips and processorDefinitions that need regenerating + + // 2a. collect ScalarFunctions that unwrapped refer to any of the updated aggregates + // 2b. replace any of the old ScalarFunction attributes + + final Set newAggIds = new LinkedHashSet<>(promotedFunctionIds.size()); + + for (AggregateFunctionAttribute afa : promotedFunctionIds.values()) { + newAggIds.add(afa.functionId()); + } + + final Map updatedScalarAttrs = new LinkedHashMap<>(); + final Map updatedScalarAliases = new LinkedHashMap<>(); + + p = p.transformExpressionsUp(e -> { + + // replace scalar attributes of the old replaced functions + if (e instanceof ScalarFunctionAttribute) { + ScalarFunctionAttribute sfa = (ScalarFunctionAttribute) e; + // check aliases + sfa = updatedScalarAttrs.getOrDefault(sfa.functionId(), sfa); + // check scalars + sfa = updatedScalarAliases.getOrDefault(sfa.id(), sfa); + return sfa; + } + + // unwrap aliases as they 'hide' functions under their own attributes + if (e instanceof Alias) { + Attribute att = Expressions.attribute(e); + if (att instanceof ScalarFunctionAttribute) { + ScalarFunctionAttribute sfa = (ScalarFunctionAttribute) att; + // the underlying function has been updated + // thus record the alias as well + if (updatedScalarAttrs.containsKey(sfa.functionId())) { + updatedScalarAliases.put(sfa.id(), sfa); + } + } + } + + else if (e instanceof ScalarFunction) { + ScalarFunction sf = (ScalarFunction) e; + + // if it's a unseen function check if the function children/arguments refers to any of the promoted aggs + if (!updatedScalarAttrs.containsKey(sf.functionId()) && e.anyMatch(c -> { + Attribute a = Expressions.attribute(c); + if (a instanceof FunctionAttribute) { + return newAggIds.contains(((FunctionAttribute) a).functionId()); + } + return false; + })) { + // if so, record its attribute + updatedScalarAttrs.put(sf.functionId(), sf.toAttribute()); + } + } + + return e; + }); + + return p; + } + + + private static Expression updateAggFunctionAttrs(Expression e, Map promotedIds) { + if (e instanceof AggregateFunctionAttribute) { + AggregateFunctionAttribute ae = (AggregateFunctionAttribute) e; + AggregateFunctionAttribute promoted = promotedIds.get(ae.functionId()); + if (promoted != null) { + return ae.withFunctionId(promoted.functionId(), promoted.propertyPath()); + } + } + return e; + } + } + + static class PromoteStatsToExtendedStats extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan p) { + Map seen = new LinkedHashMap<>(); + + // count the extended stats + p.forEachExpressionsUp(e -> count(e, seen)); + // then if there's a match, replace the stat inside the InnerAgg + return p.transformExpressionsUp(e -> promote(e, seen)); + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + + private void count(Expression e, Map seen) { + if (e instanceof InnerAggregate) { + InnerAggregate ia = (InnerAggregate) e; + if (ia.outer() instanceof ExtendedStats) { + ExtendedStats extStats = (ExtendedStats) ia.outer(); + seen.putIfAbsent(extStats.field(), extStats); + } + } + } + + protected Expression promote(Expression e, Map seen) { + if (e instanceof InnerAggregate) { + InnerAggregate ia = (InnerAggregate) e; + if (ia.outer() instanceof Stats) { + Stats stats = (Stats) ia.outer(); + ExtendedStats ext = seen.get(stats.field()); + if (ext != null && stats.field().equals(ext.field())) { + return new InnerAggregate(ia.inner(), ext); + } + } + } + + return e; + } + } + + static class ReplaceAggsWithPercentiles extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan p) { + // percentile per field/expression + Map> percentsPerField = new LinkedHashMap<>(); + + // count gather the percents for each field + p.forEachExpressionsUp(e -> count(e, percentsPerField)); + + Map percentilesPerField = new LinkedHashMap<>(); + // create a Percentile agg for each field (and its associated percents) + percentsPerField.forEach((k, v) -> { + percentilesPerField.put(k, new Percentiles(v.iterator().next().location(), k, new ArrayList<>(v))); + }); + + // now replace the agg with pointer to the main ones + Map promotedFunctionIds = new LinkedHashMap<>(); + p = p.transformExpressionsUp(e -> rule(e, percentilesPerField, promotedFunctionIds)); + // finally update all the function references as well + return p.transformExpressionsDown(e -> ReplaceAggsWithStats.updateAggFunctionAttrs(e, promotedFunctionIds)); + } + + private void count(Expression e, Map> percentsPerField) { + if (e instanceof Percentile) { + Percentile p = (Percentile) e; + Expression field = p.field(); + Set percentiles = percentsPerField.get(field); + + if (percentiles == null) { + percentiles = new LinkedHashSet<>(); + percentsPerField.put(field, percentiles); + } + + percentiles.add(p.percent()); + } + } + + protected Expression rule(Expression e, Map percentilesPerField, + Map promotedIds) { + if (e instanceof Percentile) { + Percentile p = (Percentile) e; + Percentiles percentiles = percentilesPerField.get(p.field()); + + InnerAggregate ia = new InnerAggregate(p, percentiles); + promotedIds.putIfAbsent(p.functionId(), ia.toAttribute()); + return ia; + } + + return e; + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + } + + static class ReplceAggsWithPercentileRanks extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan p) { + // percentile per field/expression + Map> valuesPerField = new LinkedHashMap<>(); + + // count gather the percents for each field + p.forEachExpressionsUp(e -> count(e, valuesPerField)); + + Map ranksPerField = new LinkedHashMap<>(); + // create a PercentileRanks agg for each field (and its associated values) + valuesPerField.forEach((k, v) -> { + ranksPerField.put(k, new PercentileRanks(v.iterator().next().location(), k, new ArrayList<>(v))); + }); + + // now replace the agg with pointer to the main ones + Map promotedFunctionIds = new LinkedHashMap<>(); + p = p.transformExpressionsUp(e -> rule(e, ranksPerField, promotedFunctionIds)); + // finally update all the function references as well + return p.transformExpressionsDown(e -> ReplaceAggsWithStats.updateAggFunctionAttrs(e, promotedFunctionIds)); + } + + private void count(Expression e, Map> ranksPerField) { + if (e instanceof PercentileRank) { + PercentileRank p = (PercentileRank) e; + Expression field = p.field(); + Set percentiles = ranksPerField.get(field); + + if (percentiles == null) { + percentiles = new LinkedHashSet<>(); + ranksPerField.put(field, percentiles); + } + + percentiles.add(p.value()); + } + } + + protected Expression rule(Expression e, Map ranksPerField, + Map promotedIds) { + if (e instanceof PercentileRank) { + PercentileRank p = (PercentileRank) e; + PercentileRanks ranks = ranksPerField.get(p.field()); + + InnerAggregate ia = new InnerAggregate(p, ranks); + promotedIds.putIfAbsent(p.functionId(), ia.toAttribute()); + return ia; + } + + return e; + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + } + + static class PruneFilters extends OptimizerRule { + + @Override + protected LogicalPlan rule(Filter filter) { + if (filter.condition() instanceof Literal) { + if (TRUE.equals(filter.condition())) { + return filter.child(); + } + // TODO: add comparison with null as well + if (FALSE.equals(filter.condition())) { + return new LocalRelation(filter.location(), new EmptyExecutable(filter.output())); + } + } + + return filter; + } + } + + static class ReplaceAliasesInHaving extends OptimizerRule { + + @Override + protected LogicalPlan rule(Filter filter) { + if (filter.child() instanceof Aggregate) { + Expression cond = filter.condition(); + // resolve attributes to their actual + Expression newCondition = cond.transformDown(a -> { + + return a; + }, AggregateFunctionAttribute.class); + + if (newCondition != cond) { + return new Filter(filter.location(), filter.child(), newCondition); + } + } + return filter; + } + } + + static class PruneOrderByNestedFields extends OptimizerRule { + + private void findNested(Expression exp, Map functions, Consumer onFind) { + exp.forEachUp(e -> { + if (e instanceof FunctionAttribute) { + FunctionAttribute sfa = (FunctionAttribute) e; + Function f = functions.get(sfa.functionId()); + if (f != null) { + findNested(f, functions, onFind); + } + } + if (e instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) e; + if (fa.isNested()) { + onFind.accept(fa); + } + } + }); + } + + @Override + protected LogicalPlan rule(Project project) { + // check whether OrderBy relies on nested fields which are not used higher up + if (project.child() instanceof OrderBy) { + OrderBy ob = (OrderBy) project.child(); + + // resolve function aliases (that are hiding the target) + Map functions = Functions.collectFunctions(project); + + // track the direct parents + Map nestedOrders = new LinkedHashMap<>(); + + for (Order order : ob.order()) { + // traverse the tree since the field might be wrapped in a function + findNested(order.child(), functions, fa -> nestedOrders.put(fa.nestedParent().name(), order)); + } + + // no nested fields in sort + if (nestedOrders.isEmpty()) { + return project; + } + + // count the nested parents (if any) inside the parents + List nestedTopFields = new ArrayList<>(); + + for (NamedExpression ne : project.projections()) { + // traverse the tree since the field might be wrapped in a function + findNested(ne, functions, fa -> nestedTopFields.add(fa.nestedParent().name())); + } + + List orders = new ArrayList<>(ob.order()); + // projection has no nested field references, remove any nested orders + if (nestedTopFields.isEmpty()) { + orders.removeAll(nestedOrders.values()); + } + else { + // remove orders that are not ancestors of the nested projections + for (Entry entry : nestedOrders.entrySet()) { + String parent = entry.getKey(); + boolean shouldKeep = false; + for (String topParent : nestedTopFields) { + if (topParent.startsWith(parent)) { + shouldKeep = true; + break; + } + } + if (!shouldKeep) { + orders.remove(entry.getValue()); + } + } + } + + // no orders left, eliminate it all-together + if (orders.isEmpty()) { + return new Project(project.location(), ob.child(), project.projections()); + } + + if (orders.size() != ob.order().size()) { + OrderBy newOrder = new OrderBy(ob.location(), ob.child(), orders); + return new Project(project.location(), newOrder, project.projections()); + } + } + return project; + } + } + + static class PruneOrderBy extends OptimizerRule { + + @Override + protected LogicalPlan rule(OrderBy ob) { + List order = ob.order(); + + // remove constants + List nonConstant = order.stream().filter(o -> !o.child().foldable()).collect(toList()); + + if (nonConstant.isEmpty()) { + return ob.child(); + } + + // if the sort points to an agg, consider it only if there's grouping + if (ob.child() instanceof Aggregate) { + Aggregate a = (Aggregate) ob.child(); + + if (a.groupings().isEmpty()) { + AttributeSet aggsAttr = new AttributeSet(Expressions.asAttributes(a.aggregates())); + + List nonAgg = nonConstant.stream().filter(o -> { + if (o.child() instanceof NamedExpression) { + return !aggsAttr.contains(((NamedExpression) o.child()).toAttribute()); + } + return true; + }).collect(toList()); + + return nonAgg.isEmpty() ? ob.child() : new OrderBy(ob.location(), ob.child(), nonAgg); + } + } + return ob; + } + } + + static class CombineLimits extends OptimizerRule { + + @Override + protected LogicalPlan rule(Limit limit) { + if (limit.child() instanceof Limit) { + throw new UnsupportedOperationException("not implemented yet"); + } + throw new UnsupportedOperationException("not implemented yet"); + } + } + + // NB: it is important to start replacing casts from the bottom to properly replace aliases + static class PruneCast extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan plan) { + return rule(plan); + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + final Map replacedCast = new LinkedHashMap<>(); + + // first eliminate casts inside Aliases + LogicalPlan transformed = plan.transformExpressionsUp(e -> { + // cast wrapped in an alias + if (e instanceof Alias) { + Alias as = (Alias) e; + if (as.child() instanceof Cast) { + Cast c = (Cast) as.child(); + + if (c.from() == c.to()) { + Alias newAs = new Alias(as.location(), as.name(), as.qualifier(), c.field(), as.id(), as.synthetic()); + replacedCast.put(as.toAttribute(), newAs.toAttribute()); + return newAs; + } + } + return e; + } + return e; + }); + + // then handle stand-alone casts (mixed together the cast rule will kick in before the alias) + transformed = transformed.transformExpressionsUp(e -> { + if (e instanceof Cast) { + Cast c = (Cast) e; + + if (c.from() == c.to()) { + Expression argument = c.field(); + if (argument instanceof NamedExpression) { + replacedCast.put(c.toAttribute(), ((NamedExpression) argument).toAttribute()); + } + + return argument; + } + } + return e; + }); + + + // replace attributes from previous removed Casts + if (!replacedCast.isEmpty()) { + return transformed.transformUp(p -> { + List newProjections = new ArrayList<>(); + + boolean changed = false; + for (NamedExpression ne : p.projections()) { + Attribute found = replacedCast.get(ne.toAttribute()); + if (found != null) { + changed = true; + newProjections.add(found); + } + else { + newProjections.add(ne.toAttribute()); + } + } + + return changed ? new Project(p.location(), p.child(), newProjections) : p; + + }, Project.class); + } + return transformed; + } + } + + static class PruneDuplicateFunctions extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan p) { + List seen = new ArrayList<>(); + return p.transformExpressionsUp(e -> rule(e, seen)); + } + + @Override + protected LogicalPlan rule(LogicalPlan e) { + return e; + } + + protected Expression rule(Expression exp, List seen) { + Expression e = exp; + if (e instanceof Function) { + Function f = (Function) e; + for (Function seenFunction : seen) { + if (seenFunction != f && f.functionEquals(seenFunction)) { + return seenFunction; + } + } + seen.add(f); + } + + return exp; + } + } + + static class CombineProjections extends OptimizerRule { + + CombineProjections() { + super(TransformDirection.UP); + } + + @Override + protected LogicalPlan rule(Project project) { + LogicalPlan child = project.child(); + if (child instanceof Project) { + Project p = (Project) child; + // eliminate lower project but first replace the aliases in the upper one + return new Project(p.location(), p.child(), combineProjections(project.projections(), p.projections())); + } + if (child instanceof Aggregate) { + Aggregate a = (Aggregate) child; + return new Aggregate(a.location(), a.child(), a.groupings(), combineProjections(project.projections(), a.aggregates())); + } + + return project; + } + + // normally only the upper projections should survive but since the lower list might have aliases definitions + // that might be reused by the upper one, these need to be replaced. + // for example an alias defined in the lower list might be referred in the upper - without replacing it the alias becomes invalid + private List combineProjections(List upper, List lower) { + // collect aliases in the lower list + Map map = new LinkedHashMap<>(); + for (NamedExpression ne : lower) { + if (ne instanceof Alias) { + Alias a = (Alias) ne; + map.put(a.toAttribute(), a); + } + } + + AttributeMap aliases = new AttributeMap<>(map); + List replaced = new ArrayList<>(); + + // replace any matching attribute with a lower alias (if there's a match) + // but clean-up non-top aliases at the end + for (NamedExpression ne : upper) { + NamedExpression replacedExp = (NamedExpression) ne.transformUp(a -> { + Alias as = aliases.get(a); + return as != null ? as : a; + }, Attribute.class); + + replaced.add((NamedExpression) CleanAliases.trimNonTopLevelAliases(replacedExp)); + } + return replaced; + } + } + + + // replace attributes of foldable expressions with the foldable trees + // SELECT 5 a, 3 + 2 b ... WHERE a < 10 ORDER BY b + + static class ReplaceFoldableAttributes extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan plan) { + return rule(plan); + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + Map aliases = new LinkedHashMap<>(); + List attrs = new ArrayList<>(); + + // find aliases of all projections + plan.forEachDown(p -> { + for (NamedExpression ne : p.projections()) { + if (ne instanceof Alias) { + if (((Alias) ne).child().foldable()) { + Attribute attr = ne.toAttribute(); + attrs.add(attr); + aliases.put(attr, (Alias) ne); + } + } + } + }, Project.class); + + if (attrs.isEmpty()) { + return plan; + } + + AtomicBoolean stop = new AtomicBoolean(false); + + // propagate folding up to unary nodes + // anything higher and the propagate stops + plan = plan.transformUp(p -> { + if (stop.get() == false && canPropagateFoldable(p)) { + return p.transformExpressionsDown(e -> { + if (e instanceof Attribute && attrs.contains(e)) { + Alias as = aliases.get(e); + if (as == null) { + // might need to implement an Attribute map + throw new SqlIllegalArgumentException("unsupported"); + } + return as; + } + return e; + }); + } + + if (p.children().size() > 1) { + stop.set(true); + } + + return p; + }); + + // finally clean-up aliases + return CleanAliases.INSTANCE.apply(plan); + + } + + private boolean canPropagateFoldable(LogicalPlan p) { + return p instanceof Project + || p instanceof Filter + || p instanceof SubQueryAlias + || p instanceof Aggregate + || p instanceof Limit + || p instanceof OrderBy; + } + } + + static class ConstantFolding extends OptimizerExpressionRule { + + ConstantFolding() { + super(TransformDirection.DOWN); + } + + @Override + protected Expression rule(Expression e) { + // handle aliases to avoid double aliasing of functions + // alias points to function which gets folded and wrapped in an alias that is + // aliases + if (e instanceof Alias) { + Alias a = (Alias) e; + Expression fold = fold(a.child()); + if (fold != a.child()) { + return new Alias(a.location(), a.name(), null, fold, a.id()); + } + return a; + } + + Expression fold = fold(e); + if (fold != e) { + // preserve the name through an alias + if (e instanceof NamedExpression) { + NamedExpression ne = (NamedExpression) e; + return new Alias(e.location(), ne.name(), null, fold, ne.id()); + } + return fold; + } + return e; + } + + private Expression fold(Expression e) { + // literals are always foldable, so avoid creating a duplicate + if (e.foldable() && !(e instanceof Literal)) { + return new Literal(e.location(), e.fold(), e.dataType()); + } + return e; + } + } + + static class BooleanSimplification extends OptimizerExpressionRule { + + BooleanSimplification() { + super(TransformDirection.UP); + } + + @Override + protected Expression rule(Expression e) { + if (e instanceof BinaryExpression) { + return simplifyAndOr((BinaryExpression) e); + } + if (e instanceof Not) { + return simplifyNot((Not) e); + } + + return e; + } + + private Expression simplifyAndOr(BinaryExpression bc) { + Expression l = bc.left(); + Expression r = bc.right(); + + if (bc instanceof And) { + if (TRUE.equals(l)) { + return r; + } + if (TRUE.equals(r)) { + return l; + } + + if (FALSE.equals(l) || FALSE.equals(r)) { + return FALSE; + } + if (l.semanticEquals(r)) { + return l; + } + + // + // common factor extraction -> (a || b) && (a || c) => a && (b || c) + // + List leftSplit = splitOr(l); + List rightSplit = splitOr(r); + + List common = inCommon(leftSplit, rightSplit); + if (common.isEmpty()) { + return bc; + } + List lDiff = subtract(leftSplit, common); + List rDiff = subtract(rightSplit, common); + // (a || b || c || ... ) && (a || b) => (a || b) + if (lDiff.isEmpty() || rDiff.isEmpty()) { + return combineOr(common); + } + // (a || b || c || ... ) && (a || b || d || ... ) => ((c || ...) && (d || ...)) || a || b + Expression combineLeft = combineOr(lDiff); + Expression combineRight = combineOr(rDiff); + return combineOr(combine(common, new And(combineLeft.location(), combineLeft, combineRight))); + } + + if (bc instanceof Or) { + if (TRUE.equals(l) || TRUE.equals(r)) { + return TRUE; + } + + if (FALSE.equals(l)) { + return r; + } + if (FALSE.equals(r)) { + return l; + } + + if (l.semanticEquals(r)) { + return l; + } + + // + // common factor extraction -> (a && b) || (a && c) => a || (b & c) + // + List leftSplit = splitAnd(l); + List rightSplit = splitAnd(r); + + List common = inCommon(leftSplit, rightSplit); + if (common.isEmpty()) { + return bc; + } + List lDiff = subtract(leftSplit, common); + List rDiff = subtract(rightSplit, common); + // (a || b || c || ... ) && (a || b) => (a || b) + if (lDiff.isEmpty() || rDiff.isEmpty()) { + return combineAnd(common); + } + // (a || b || c || ... ) && (a || b || d || ... ) => ((c || ...) && (d || ...)) || a || b + Expression combineLeft = combineAnd(lDiff); + Expression combineRight = combineAnd(rDiff); + return combineAnd(combine(common, new Or(combineLeft.location(), combineLeft, combineRight))); + } + + // TODO: eliminate conjunction/disjunction + return bc; + } + + private Expression simplifyNot(Not n) { + Expression c = n.child(); + + if (TRUE.equals(c)) { + return FALSE; + } + if (FALSE.equals(c)) { + return TRUE; + } + + if (c instanceof Negateable) { + return ((Negateable) c).negate(); + } + + if (c instanceof Not) { + return ((Not) c).child(); + } + + return n; + } + } + + static class BinaryComparisonSimplification extends OptimizerExpressionRule { + + BinaryComparisonSimplification() { + super(TransformDirection.UP); + } + + @Override + protected Expression rule(Expression e) { + return e instanceof BinaryComparison ? simplify((BinaryComparison) e) : e; + } + + private Expression simplify(BinaryComparison bc) { + Expression l = bc.left(); + Expression r = bc.right(); + + // true for equality + if (bc instanceof Equals || bc instanceof GreaterThanOrEqual || bc instanceof LessThanOrEqual) { + if (!l.nullable() && !r.nullable() && l.semanticEquals(r)) { + return TRUE; + } + } + + // false for equality + if (bc instanceof GreaterThan || bc instanceof LessThan) { + if (!l.nullable() && !r.nullable() && l.semanticEquals(r)) { + return FALSE; + } + } + + return bc; + } + } + + static class BooleanLiteralsOnTheRight extends OptimizerExpressionRule { + + BooleanLiteralsOnTheRight() { + super(TransformDirection.UP); + } + + @Override + protected Expression rule(Expression e) { + return e instanceof BinaryExpression ? literalToTheRight((BinaryExpression) e) : e; + } + + private Expression literalToTheRight(BinaryExpression be) { + return be.left() instanceof Literal && !(be.right() instanceof Literal) ? be.swapLeftAndRight() : be; + } + } + + static class CombineComparisonsIntoRange extends OptimizerExpressionRule { + + CombineComparisonsIntoRange() { + super(TransformDirection.UP); + } + + @Override + protected Expression rule(Expression e) { + return e instanceof And ? combine((And) e) : e; + } + + private Expression combine(And and) { + Expression l = and.left(); + Expression r = and.right(); + + if (l instanceof BinaryComparison && r instanceof BinaryComparison) { + // if the same operator is used + BinaryComparison lb = (BinaryComparison) l; + BinaryComparison rb = (BinaryComparison) r; + + + if (lb.left().equals(((BinaryComparison) r).left()) && lb.right() instanceof Literal && rb.right() instanceof Literal) { + // >/>= AND />= + else if ((r instanceof GreaterThan || r instanceof GreaterThanOrEqual) + && (l instanceof LessThan || l instanceof LessThanOrEqual)) { + return new Range(and.location(), rb.left(), rb.right(), r instanceof GreaterThanOrEqual, lb.right(), + l instanceof LessThanOrEqual); + } + } + } + + return and; + } + } + + + static class SkipQueryOnLimitZero extends OptimizerRule { + @Override + protected LogicalPlan rule(Limit limit) { + if (limit.limit() instanceof Literal) { + if (Integer.valueOf(0).equals((((Literal) limit.limit()).fold()))) { + return new LocalRelation(limit.location(), new EmptyExecutable(limit.output())); + } + } + return limit; + } + } + + static class SkipQueryIfFoldingProjection extends OptimizerRule { + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (plan instanceof Project) { + Project p = (Project) plan; + List values = extractConstants(p.projections()); + if (values.size() == p.projections().size()) { + return new LocalRelation(p.location(), new SingletonExecutable(p.output(), values.toArray())); + } + } + if (plan instanceof Aggregate) { + Aggregate a = (Aggregate) plan; + List values = extractConstants(a.aggregates()); + if (values.size() == a.aggregates().size()) { + return new LocalRelation(a.location(), new SingletonExecutable(a.output(), values.toArray())); + } + } + return plan; + } + + private List extractConstants(List named) { + List values = new ArrayList<>(); + for (NamedExpression n : named) { + if (n instanceof Alias) { + Alias a = (Alias) n; + if (a.child().foldable()) { + values.add(a.child().fold()); + } + else { + return values; + } + } + } + return values; + } + } + + + static class SetAsOptimized extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan plan) { + plan.forEachUp(this::rule); + return plan; + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (!plan.optimized()) { + plan.setOptimized(); + } + return plan; + } + } + + + abstract static class OptimizerRule extends Rule { + + private final TransformDirection direction; + + OptimizerRule() { + this(TransformDirection.DOWN); + } + + protected OptimizerRule(TransformDirection direction) { + this.direction = direction; + } + + + @Override + public final LogicalPlan apply(LogicalPlan plan) { + return direction == TransformDirection.DOWN ? + plan.transformDown(this::rule, typeToken()) : plan.transformUp(this::rule, typeToken()); + } + + @Override + protected abstract LogicalPlan rule(SubPlan plan); + } + + abstract static class OptimizerExpressionRule extends Rule { + + private final TransformDirection direction; + + OptimizerExpressionRule(TransformDirection direction) { + this.direction = direction; + } + + @Override + public final LogicalPlan apply(LogicalPlan plan) { + return direction == TransformDirection.DOWN ? plan.transformExpressionsDown(this::rule) : plan + .transformExpressionsUp(this::rule); + } + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + return plan; + } + + protected abstract Expression rule(Expression e); + } + + enum TransformDirection { + UP, DOWN + }; +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/package-info.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/package-info.java new file mode 100644 index 0000000000000..9528df0332df3 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/package-info.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * X-Pack SQL module is a SQL interface to Elasticsearch.
+ * In a nutshell, currently, SQL acts as a translator, allowing + * traditional SQL queries to be executed against Elasticsearch indices + * without any modifications. Do note that SQL does not try to hide + * Elasticsearch or abstract it in anyway; rather it maps the given SQL, + * if possible, to one (at the moment) query DSL. Of course, this means + * not all SQL queries are supported.
+ * + *

Premise

+ * Since Elasticsearch is not a database nor does it supports arbitrary + * {@code JOIN}s (a cornerstone of SQL), SQL module is built from the + * ground-up with Elasticsearch in mind first and SQL second. In fact, + * even the grammar introduces Elasticsearch specific components that + * have no concept in ANSI SQL. + * + *

Architecture

+ * SQL module is roughly based on the Volcano project (by Graefe + * {@code &} co) + * [1] + * [2] + * [3] + * + * which argues for several design principles, from which 2 are relevant + * to this project, namely: + * + *
+ *
Logical and Physical algebra
+ *
Use of extensible algebraic and logical set of operators to + * describe the operation to underlying engine. The engine job is to + * map a user query into logical algebra and then translate this into + * physical algebra.
+ *
Rules to identify patterns
+ *
The use of rules as a way to identify relevant + * patterns inside the plans that can be worked upon
+ *
+ * + * In other words, the use of a logical plan, which represents what the + * user has requested and a physical plan which is what the engine needs + * to execute based on the user request. To manipulate the plans, the + * engine does pattern matching implemented as rules that get applied over + * and over until none matches. + * An example of a rule would be expanding {@code *} to actual concrete + * references. + * + * As a side-note, the Volcano model has proved quite popular being used + * (to different degrees) by the majority of SQL engines out there such + * as Apache Calcite, Apache Impala, Apache Spark and Facebook Presto. + * + *

Concepts

+ * + * The building operation of the SQL engine is defined by an action, + * namely a rule (defined in {@link org.elasticsearch.xpack.sql.rule rule} + * package that accepts one immutable tree (defined in + * {@link org.elasticsearch.xpack.sql.tree tree} package) and transforms + * it to another immutable tree. + * Each rules looks for a certain pattern that it can identify and + * then transform. + * + * The engine works with 3 main type of trees: + * + *
+ *
Logical plan
+ *
Logical representation of a user query. Any transformation + * of this plan should result in an equivalent plan - meaning for + * the same input, it will generate the same output.
+ *
Physical plan
+ *
Execution representation of a user query. This plan needs + * to translate to (currently) one query to Elasticsearch. It is likely + * in the future (once we look into supporting {@code JOIN}s, different + * strategies for generating a physical plan will be available depending + * on the cost.
+ *
Expression tree
+ *
Both the logical and physical plan contain an expression trees + * that need to be incorporated into the query. For the most part, most + * of the work inside the engine resolves around expressions.
+ *
+ * + * All types of tree inside the engine have the following properties: + *
+ *
Immutability
+ *
Each node and its properties are immutable. A change in a property + * results in a new node which results in a new tree.
+ *
Resolution
+ *
Due to the algebraic nature of SQL, each tree has the notion of + * resolution which indicates whether it has been resolved or not. A node + * can be resolved only if it and its children have all been + * resolved.
+ *
Traversal
+ *
Each tree can be traversed top-to-bottom/pre-order/parents-first or + * bottom-up/post-order/children-first. The difference in the traversal + * depends on the pattern that is being identified.
+ *
+ * + * A typical flow inside the engine is the following: + * + *
    + *
  1. The engine is given a query
  2. + *
  3. The query is parsed and transformed into an unresolved AST or + * logical plan
  4. + *
  5. The logical plan gets analyzed and resolved
  6. + *
  7. The logical plan gets optimized
  8. + *
  9. The logical plan gets transformed into a physical plan
  10. + *
  11. The physical plan gets mapped and then folded into an Elasticsearch + * query
  12. + *
  13. The Elasticsearch query gets executed
  14. + *
+ * + *

Digression - Visitors, pattern matching, {@code instanceof} and + * Java 10/11/12

+ * + * To implement the above concepts, several choices have been made in the + * engine (which are not common in the rest of the XPack code base). In + * particular the conventions/signatures of + * {@link org.elasticsearch.xpack.sql.tree.Node tree}s and usage of + * {@code instanceof} inside + * {@link org.elasticsearch.xpack.sql.rule.Rule rule}s). + * Java doesn't provide any utilities for tree abstractions or pattern + * matching for that matter. Typically for tree traversal one would employ + * the Visitor + * pattern however that is not a suitable candidate for SQL because: + *
    + *
  • the visitor granularity is a node and patterns are likely to involve + * multiple nodes
  • + *
  • transforming a tree and identifying a pattern requires holding a + * state which means either the tree or the visitor become stateful
  • + *
  • a node can stop traversal (which is not desired)
  • + *
  • it's unwieldy - every node type requires a dedicated {@code visit} + * method
  • + *
+ * + * While in Java, there might be hope for + * the future + * Scala has made it a + * core feature. + * Its byte-code implementation is less pretty as it relies on + * {@code instanceof} checks. Which is how many rules are implemented in + * the SQL engine as well. Where possible though, one can use typed + * traversal by passing a {@code Class} token to the lambdas (i.e. + * {@link org.elasticsearch.xpack.sql.tree.Node#transformDown(java.util.function.Function, Class) + * pre-order transformation}). + * + *

Components

+ * + * The SQL engine is made up of the following components: + *
+ *
{@link org.elasticsearch.xpack.sql.parser Parser} package
+ *
Tokenizer and Lexer of the SQL grammar. Translates user query into an + * AST tree ({@code LogicalPlan}. Makes sure the user query is syntactically + * valid.
+ *
{@link org.elasticsearch.xpack.sql.analysis.analyzer.PreAnalyzer PreAnalyzer}
+ *
Performs basic inspection of the {@code LogicalPlan} for gathering critical + * information for the main analysis. This stage is separate from {@code Analysis} + * since it performs async/remote calls to the cluster.
+ *
{@link org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer Analyzer}
+ *
Performs {@code LogicalPlan} analysis, resolution and verification. Makes + * sure the user query is actually valid and semantically valid.
+ *
{@link org.elasticsearch.xpack.sql.optimizer.Optimizer Optimizer}
+ *
Transforms the resolved {@code LogicalPlan} into a semantically + * equivalent tree, meaning for the same input, the same output is produced.
+ *
{@link org.elasticsearch.xpack.sql.planner.Planner Planner}
+ *
Performs query planning. The planning is made up of two components: + *
+ *
{@code Mapper}
+ *
Maps the {@code LogicalPlan} to a {@code PhysicalPlan}
+ *
{@code Folder}
+ *
Folds or rolls-up the {@code PhysicalPlan} into an Elasticsearch + * {@link org.elasticsearch.xpack.sql.plan.physical.EsQueryExec executable query} + *
+ *
+ *
+ *
{@link org.elasticsearch.xpack.sql.execution Execution}
+ *
Actual execution of the query, results retrieval, extractions and translation + * into a {@link org.elasticsearch.xpack.sql.session.RowSet tabular} format.
+ *
+ */ +package org.elasticsearch.xpack.sql; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AbstractBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AbstractBuilder.java new file mode 100644 index 0000000000000..480d22a9699d6 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AbstractBuilder.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.util.Check; + +import java.util.ArrayList; +import java.util.List; + +/** + * Base parsing visitor class offering utility methods. + * + * Implementation note: ANTLR 4 generates sources with a parameterized signature that isn't really useful for SQL. + * That is mainly because it forces each visitor method to return a node inside the generated AST which + * might be or not the case. + * Since the parser generates two types of trees ({@code LogicalPlan} and {@code Expression}) plus string handling, + * the generic signature does not fit and does give any advantage hence why it is erased, each subsequent + * child class acting as a layer for parsing and building its respective type + */ +abstract class AbstractBuilder extends SqlBaseBaseVisitor { + + @Override + public Object visit(ParseTree tree) { + Object result = super.visit(tree); + Check.notNull(result, "Don't know how to handle context [{}] with value [{}]", tree.getClass(), tree.getText()); + return result; + } + + @SuppressWarnings("unchecked") + protected T typedParsing(ParseTree ctx, Class type) { + Object result = ctx.accept(this); + if (type.isInstance(result)) { + return (T) result; + } + + throw new ParsingException(source(ctx), "Invalid query '{}'[{}] given; expected {} but found {}", + ctx.getText(), ctx.getClass().getSimpleName(), + type.getSimpleName(), (result != null ? result.getClass().getSimpleName() : "null")); + } + + protected LogicalPlan plan(ParseTree ctx) { + return typedParsing(ctx, LogicalPlan.class); + } + + protected List plans(List ctxs) { + return visitList(ctxs, LogicalPlan.class); + } + + protected List visitList(List contexts, Class clazz) { + List results = new ArrayList<>(contexts.size()); + for (ParserRuleContext context : contexts) { + results.add(clazz.cast(visit(context))); + } + return results; + } + + static Location source(ParseTree ctx) { + if (ctx instanceof ParserRuleContext) { + return source((ParserRuleContext) ctx); + } + return Location.EMPTY; + } + + static Location source(TerminalNode terminalNode) { + Check.notNull(terminalNode, "terminalNode is null"); + return source(terminalNode.getSymbol()); + } + + static Location source(ParserRuleContext parserRuleContext) { + Check.notNull(parserRuleContext, "parserRuleContext is null"); + return source(parserRuleContext.getStart()); + } + + static Location source(Token token) { + Check.notNull(token, "token is null"); + return new Location(token.getLine(), token.getCharPositionInLine()); + } + + /** + * Retrieves the raw text of the node (without interpreting it as a string literal). + */ + static String text(ParseTree node) { + return node == null ? null : node.getText(); + } + + /** + * Extracts the actual unescaped string (literal) value of a terminal node. + */ + static String string(TerminalNode node) { + return node == null ? null : unquoteString(node.getText()); + } + + static String unquoteString(String text) { + // remove leading and trailing ' for strings and also eliminate escaped single quotes + return text == null ? null : text.substring(1, text.length() - 1).replace("''", "'"); + } + + @Override + public Object visitTerminal(TerminalNode node) { + throw new ParsingException(source(node), "Does not know how to handle {}", node.getText()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java new file mode 100644 index 0000000000000..de28f33187260 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.Token; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SingleStatementContext; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; + +import java.util.Map; + +class AstBuilder extends CommandBuilder { + /** + * Create AST Builder + * @param params a map between '?' tokens that represent parameters and the actual parameter values + */ + AstBuilder(Map params) { + super(params); + } + + @Override + public LogicalPlan visitSingleStatement(SingleStatementContext ctx) { + return plan(ctx.statement()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CaseInsensitiveStream.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CaseInsensitiveStream.java new file mode 100644 index 0000000000000..fff954ab592c5 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CaseInsensitiveStream.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.ANTLRInputStream; +import org.antlr.v4.runtime.IntStream; + +import java.util.Locale; + +// extension of ANTLR that does the upper-casing once for the whole stream +// the ugly part is that it has to duplicate LA method + +// This approach is the official solution from the ANTLR authors +// in that it's both faster and easier than having a dedicated lexer +// see https://github.com/antlr/antlr4/issues/1002 +class CaseInsensitiveStream extends ANTLRInputStream { + protected char[] uppedChars; + + CaseInsensitiveStream(String input) { + super(input); + this.uppedChars = input.toUpperCase(Locale.ROOT).toCharArray(); + } + + // this part is copied from ANTLRInputStream + @Override + public int LA(int i) { + if (i == 0) { + return 0; // undefined + } + if (i < 0) { + i++; + if ((p + i - 1) < 0) { + return IntStream.EOF; + } + } + + if ((p + i - 1) >= n) { + return IntStream.EOF; + } + return uppedChars[p + i - 1]; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java new file mode 100644 index 0000000000000..fb08d08fcb926 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java @@ -0,0 +1,186 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.Token; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.DebugContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ExplainContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ShowColumnsContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ShowFunctionsContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ShowSchemasContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ShowTablesContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StringContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SysCatalogsContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SysColumnsContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SysTableTypesContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SysTablesContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SysTypesContext; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.plan.logical.command.Debug; +import org.elasticsearch.xpack.sql.plan.logical.command.Explain; +import org.elasticsearch.xpack.sql.plan.logical.command.ShowColumns; +import org.elasticsearch.xpack.sql.plan.logical.command.ShowFunctions; +import org.elasticsearch.xpack.sql.plan.logical.command.ShowSchemas; +import org.elasticsearch.xpack.sql.plan.logical.command.ShowTables; +import org.elasticsearch.xpack.sql.plan.logical.command.sys.SysCatalogs; +import org.elasticsearch.xpack.sql.plan.logical.command.sys.SysColumns; +import org.elasticsearch.xpack.sql.plan.logical.command.sys.SysTableTypes; +import org.elasticsearch.xpack.sql.plan.logical.command.sys.SysTables; +import org.elasticsearch.xpack.sql.plan.logical.command.sys.SysTypes; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +abstract class CommandBuilder extends LogicalPlanBuilder { + + protected CommandBuilder(Map params) { + super(params); + } + + @Override + public Command visitDebug(DebugContext ctx) { + Location loc = source(ctx); + if (ctx.FORMAT().size() > 1) { + throw new ParsingException(loc, "Debug FORMAT should be specified at most once"); + } + if (ctx.PLAN().size() > 1) { + throw new ParsingException(loc, "Debug PLAN should be specified at most once"); + } + + Debug.Type type = null; + + if (ctx.type != null) { + if (ctx.type.getType() == SqlBaseLexer.ANALYZED) { + type = Debug.Type.ANALYZED; + } + else { + type = Debug.Type.OPTIMIZED; + } + } + boolean graphViz = ctx.format != null && ctx.format.getType() == SqlBaseLexer.GRAPHVIZ; + Debug.Format format = graphViz ? Debug.Format.GRAPHVIZ : Debug.Format.TEXT; + + return new Debug(loc, plan(ctx.statement()), type, format); + } + + + @Override + public Command visitExplain(ExplainContext ctx) { + Location loc = source(ctx); + if (ctx.PLAN().size() > 1) { + throw new ParsingException(loc, "Explain TYPE should be specified at most once"); + } + if (ctx.FORMAT().size() > 1) { + throw new ParsingException(loc, "Explain FORMAT should be specified at most once"); + } + if (ctx.VERIFY().size() > 1) { + throw new ParsingException(loc, "Explain VERIFY should be specified at most once"); + } + + Explain.Type type = null; + + if (ctx.type != null) { + switch (ctx.type.getType()) { + case SqlBaseLexer.PARSED: + type = Explain.Type.PARSED; + break; + case SqlBaseLexer.ANALYZED: + type = Explain.Type.ANALYZED; + break; + case SqlBaseLexer.OPTIMIZED: + type = Explain.Type.OPTIMIZED; + break; + case SqlBaseLexer.MAPPED: + type = Explain.Type.MAPPED; + break; + case SqlBaseLexer.EXECUTABLE: + type = Explain.Type.EXECUTABLE; + break; + default: + type = Explain.Type.ALL; + } + } + boolean graphViz = ctx.format != null && ctx.format.getType() == SqlBaseLexer.GRAPHVIZ; + Explain.Format format = graphViz ? Explain.Format.GRAPHVIZ : Explain.Format.TEXT; + boolean verify = (ctx.verify != null ? Booleans.parseBoolean(ctx.verify.getText().toLowerCase(Locale.ROOT), true) : true); + + return new Explain(loc, plan(ctx.statement()), type, format, verify); + } + + @Override + public Object visitShowFunctions(ShowFunctionsContext ctx) { + return new ShowFunctions(source(ctx), visitPattern(ctx.pattern())); + } + + @Override + public Object visitShowTables(ShowTablesContext ctx) { + return new ShowTables(source(ctx), visitPattern(ctx.pattern())); + } + + @Override + public Object visitShowSchemas(ShowSchemasContext ctx) { + return new ShowSchemas(source(ctx)); + } + + @Override + public Object visitShowColumns(ShowColumnsContext ctx) { + TableIdentifier identifier = visitTableIdentifier(ctx.tableIdentifier()); + return new ShowColumns(source(ctx), identifier.index()); + } + + @Override + public Object visitSysCatalogs(SysCatalogsContext ctx) { + return new SysCatalogs(source(ctx)); + } + + @Override + public SysTables visitSysTables(SysTablesContext ctx) { + List types = new ArrayList<>(); + for (StringContext string : ctx.string()) { + String value = string(string); + if (value != null) { + // check special ODBC wildcard case + if (value.equals(StringUtils.SQL_WILDCARD) && ctx.string().size() == 1) { + // since % is the same as not specifying a value, choose + // https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/value-list-arguments?view=ssdt-18vs2017 + // that is skip the value + } else { + IndexType type = IndexType.from(value); + types.add(type); + } + } + } + + // if the ODBC enumeration is specified, skip validation + EnumSet set = types.isEmpty() ? null : EnumSet.copyOf(types); + return new SysTables(source(ctx), visitPattern(ctx.clusterPattern), visitPattern(ctx.tablePattern), set); + } + + @Override + public Object visitSysColumns(SysColumnsContext ctx) { + Location loc = source(ctx); + return new SysColumns(loc, string(ctx.cluster), visitPattern(ctx.indexPattern), visitPattern(ctx.columnPattern)); + } + + @Override + public SysTypes visitSysTypes(SysTypesContext ctx) { + return new SysTypes(source(ctx)); + } + + @Override + public Object visitSysTableTypes(SysTableTypesContext ctx) { + return new SysTableTypes(source(ctx)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java new file mode 100644 index 0000000000000..b14611f9f599f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java @@ -0,0 +1,519 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.tree.ParseTree; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Alias; +import org.elasticsearch.xpack.sql.expression.Exists; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.expression.ScalarSubquery; +import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute; +import org.elasticsearch.xpack.sql.expression.UnresolvedStar; +import org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Add; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Div; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mod; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mul; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Neg; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Sub; +import org.elasticsearch.xpack.sql.expression.predicate.And; +import org.elasticsearch.xpack.sql.expression.predicate.Equals; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThan; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.In; +import org.elasticsearch.xpack.sql.expression.predicate.IsNotNull; +import org.elasticsearch.xpack.sql.expression.predicate.LessThan; +import org.elasticsearch.xpack.sql.expression.predicate.LessThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.Not; +import org.elasticsearch.xpack.sql.expression.predicate.Or; +import org.elasticsearch.xpack.sql.expression.predicate.Range; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.StringQueryPredicate; +import org.elasticsearch.xpack.sql.expression.regex.Like; +import org.elasticsearch.xpack.sql.expression.regex.LikePattern; +import org.elasticsearch.xpack.sql.expression.regex.RLike; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ArithmeticBinaryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ArithmeticUnaryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.BooleanLiteralContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.CastContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ColumnReferenceContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ComparisonContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.DecimalLiteralContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.DereferenceContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ExistsContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ExtractContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.FunctionCallContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.IntegerLiteralContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.LogicalBinaryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.LogicalNotContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.MatchQueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.MultiMatchQueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.NullLiteralContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.OrderByContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ParamLiteralContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ParenthesizedExpressionContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.PatternContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.PredicateContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.PredicatedContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.PrimitiveDataTypeContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SelectExpressionContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SingleExpressionContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StarContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StringContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StringLiteralContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StringQueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SubqueryExpressionContext; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.math.BigDecimal; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.sql.type.DataTypeConversion.conversionFor; + +abstract class ExpressionBuilder extends IdentifierBuilder { + + private final Map params; + + ExpressionBuilder(Map params) { + this.params = params; + } + + protected Expression expression(ParseTree ctx) { + return typedParsing(ctx, Expression.class); + } + + protected List expressions(List contexts) { + return visitList(contexts, Expression.class); + } + + @Override + public Expression visitSingleExpression(SingleExpressionContext ctx) { + return expression(ctx.expression()); + } + + @Override + public Expression visitSelectExpression(SelectExpressionContext ctx) { + Expression exp = expression(ctx.expression()); + String alias = visitIdentifier(ctx.identifier()); + if (alias != null) { + exp = new Alias(source(ctx), alias, exp); + } + return exp; + } + + @Override + public Expression visitStar(StarContext ctx) { + return new UnresolvedStar(source(ctx), ctx.qualifiedName() != null ? + new UnresolvedAttribute(source(ctx.qualifiedName()), visitQualifiedName(ctx.qualifiedName())) : null); + } + + @Override + public Object visitColumnReference(ColumnReferenceContext ctx) { + return new UnresolvedAttribute(source(ctx), visitIdentifier(ctx.identifier())); + } + + @Override + public Object visitDereference(DereferenceContext ctx) { + return new UnresolvedAttribute(source(ctx), visitQualifiedName(ctx.qualifiedName())); + } + + @Override + public Expression visitExists(ExistsContext ctx) { + return new Exists(source(ctx), plan(ctx.query())); + } + + @Override + public Expression visitComparison(ComparisonContext ctx) { + Expression left = expression(ctx.left); + Expression right = expression(ctx.right); + TerminalNode op = (TerminalNode) ctx.comparisonOperator().getChild(0); + + Location loc = source(ctx); + + switch (op.getSymbol().getType()) { + case SqlBaseParser.EQ: + return new Equals(loc, left, right); + case SqlBaseParser.NEQ: + return new Not(loc, new Equals(loc, left, right)); + case SqlBaseParser.LT: + return new LessThan(loc, left, right); + case SqlBaseParser.LTE: + return new LessThanOrEqual(loc, left, right); + case SqlBaseParser.GT: + return new GreaterThan(loc, left, right); + case SqlBaseParser.GTE: + return new GreaterThanOrEqual(loc, left, right); + default: + throw new ParsingException(loc, "Unknown operator {}", op.getSymbol().getText()); + } + } + + @Override + public Expression visitPredicated(PredicatedContext ctx) { + Expression exp = expression(ctx.valueExpression()); + + // no predicate, quick exit + if (ctx.predicate() == null) { + return exp; + } + + PredicateContext pCtx = ctx.predicate(); + Location loc = source(pCtx); + + Expression e = null; + switch (pCtx.kind.getType()) { + case SqlBaseParser.BETWEEN: + e = new Range(loc, exp, expression(pCtx.lower), true, expression(pCtx.upper), true); + break; + case SqlBaseParser.IN: + if (pCtx.query() != null) { + throw new ParsingException(loc, "IN query not supported yet"); + } + e = new In(loc, exp, expressions(pCtx.expression())); + break; + case SqlBaseParser.LIKE: + e = new Like(loc, exp, visitPattern(pCtx.pattern())); + break; + case SqlBaseParser.RLIKE: + e = new RLike(loc, exp, new Literal(source(pCtx.regex), string(pCtx.regex), DataType.KEYWORD)); + break; + case SqlBaseParser.NULL: + // shortcut to avoid double negation later on (since there's no IsNull (missing in ES is a negated exists)) + e = new IsNotNull(loc, exp); + return pCtx.NOT() != null ? e : new Not(loc, e); + default: + throw new ParsingException(loc, "Unknown predicate {}", pCtx.kind.getText()); + } + + return pCtx.NOT() != null ? new Not(loc, e) : e; + } + + @Override + public LikePattern visitPattern(PatternContext ctx) { + if (ctx == null) { + return null; + } + + String pattern = string(ctx.value); + int pos = pattern.indexOf('*'); + if (pos >= 0) { + throw new ParsingException(source(ctx.value), + "Invalid char [*] found in pattern [{}] at position {}; use [%] or [_] instead", + pattern, pos); + } + + char escape = 0; + String escapeString = string(ctx.escape); + + if (Strings.hasText(escapeString)) { + // shouldn't happen but adding validation in case the string parsing gets wonky + if (escapeString.length() > 1) { + throw new ParsingException(source(ctx.escape), "A character not a string required for escaping; found [{}]", escapeString); + } else if (escapeString.length() == 1) { + escape = escapeString.charAt(0); + // these chars already have a meaning + if (escape == '*' || escape == '%' || escape == '_') { + throw new ParsingException(source(ctx.escape), "Char [{}] cannot be used for escaping", escape); + } + // lastly validate that escape chars (if present) are followed by special chars + for (int i = 0; i < pattern.length(); i++) { + char current = pattern.charAt(i); + if (current == escape) { + if (i + 1 == pattern.length()) { + throw new ParsingException(source(ctx.value), + "Pattern [{}] is invalid as escape char [{}] at position {} does not escape anything", pattern, escape, + i); + } + char next = pattern.charAt(i + 1); + if (next != '%' && next != '_') { + throw new ParsingException(source(ctx.value), + "Pattern [{}] is invalid as escape char [{}] at position {} can only escape wildcard chars; found [{}]", + pattern, escape, i, next); + } + } + } + } + } + + return new LikePattern(source(ctx), pattern, escape); + } + + + // + // Arithmetic + // + @Override + public Object visitArithmeticUnary(ArithmeticUnaryContext ctx) { + Expression value = expression(ctx.valueExpression()); + Location loc = source(ctx); + + switch (ctx.operator.getType()) { + case SqlBaseParser.PLUS: + return value; + case SqlBaseParser.MINUS: + return new Neg(source(ctx.operator), value); + default: + throw new ParsingException(loc, "Unknown arithemtic {}", ctx.operator.getText()); + } + } + + @Override + public Object visitArithmeticBinary(ArithmeticBinaryContext ctx) { + Expression left = expression(ctx.left); + Expression right = expression(ctx.right); + + Location loc = source(ctx.operator); + + switch (ctx.operator.getType()) { + case SqlBaseParser.ASTERISK: + return new Mul(loc, left, right); + case SqlBaseParser.SLASH: + return new Div(loc, left, right); + case SqlBaseParser.PERCENT: + return new Mod(loc, left, right); + case SqlBaseParser.PLUS: + return new Add(loc, left, right); + case SqlBaseParser.MINUS: + return new Sub(loc, left, right); + default: + throw new ParsingException(loc, "Unknown arithemtic {}", ctx.operator.getText()); + } + } + + // + // Full-text search predicates + // + @Override + public Object visitStringQuery(StringQueryContext ctx) { + return new StringQueryPredicate(source(ctx), string(ctx.queryString), string(ctx.options)); + } + + @Override + public Object visitMatchQuery(MatchQueryContext ctx) { + return new MatchQueryPredicate(source(ctx), new UnresolvedAttribute(source(ctx.singleField), + visitQualifiedName(ctx.singleField)), string(ctx.queryString), string(ctx.options)); + } + + @Override + public Object visitMultiMatchQuery(MultiMatchQueryContext ctx) { + return new MultiMatchQueryPredicate(source(ctx), string(ctx.multiFields), string(ctx.queryString), string(ctx.options)); + } + + @Override + public Order visitOrderBy(OrderByContext ctx) { + return new Order(source(ctx), expression(ctx.expression()), + ctx.DESC() != null ? Order.OrderDirection.DESC : Order.OrderDirection.ASC); + } + + @Override + public Object visitCast(CastContext ctx) { + return new Cast(source(ctx), expression(ctx.expression()), typedParsing(ctx.dataType(), DataType.class)); + } + + @Override + public DataType visitPrimitiveDataType(PrimitiveDataTypeContext ctx) { + String type = visitIdentifier(ctx.identifier()).toLowerCase(Locale.ROOT); + + switch (type) { + case "bit": + case "bool": + case "boolean": + return DataType.BOOLEAN; + case "tinyint": + case "byte": + return DataType.BYTE; + case "smallint": + case "short": + return DataType.SHORT; + case "int": + case "integer": + return DataType.INTEGER; + case "long": + case "bigint": + return DataType.LONG; + case "real": + return DataType.FLOAT; + case "float": + case "double": + return DataType.DOUBLE; + case "date": + case "timestamp": + return DataType.DATE; + case "char": + case "varchar": + case "string": + return DataType.KEYWORD; + default: + throw new ParsingException(source(ctx), "Does not recognize type {}", type); + } + } + + @Override + public Object visitFunctionCall(FunctionCallContext ctx) { + String name = visitIdentifier(ctx.identifier()); + boolean isDistinct = ctx.setQuantifier() != null && ctx.setQuantifier().DISTINCT() != null; + UnresolvedFunction.ResolutionType resolutionType = + isDistinct ? UnresolvedFunction.ResolutionType.DISTINCT : UnresolvedFunction.ResolutionType.STANDARD; + return new UnresolvedFunction(source(ctx), name, resolutionType, expressions(ctx.expression())); + } + + @Override + public Object visitExtract(ExtractContext ctx) { + String fieldString = visitIdentifier(ctx.field); + return new UnresolvedFunction(source(ctx), fieldString, + UnresolvedFunction.ResolutionType.EXTRACT, singletonList(expression(ctx.valueExpression()))); + } + + @Override + public Expression visitSubqueryExpression(SubqueryExpressionContext ctx) { + return new ScalarSubquery(source(ctx), plan(ctx.query())); + } + + @Override + public Expression visitParenthesizedExpression(ParenthesizedExpressionContext ctx) { + return expression(ctx.expression()); + } + + + // + // Logical constructs + // + + @Override + public Object visitLogicalNot(LogicalNotContext ctx) { + return new Not(source(ctx), expression(ctx.booleanExpression())); + } + + @Override + public Object visitLogicalBinary(LogicalBinaryContext ctx) { + int type = ctx.operator.getType(); + Location loc = source(ctx); + Expression left = expression(ctx.left); + Expression right = expression(ctx.right); + + if (type == SqlBaseParser.AND) { + return new And(loc, left, right); + } + if (type == SqlBaseParser.OR) { + return new Or(loc, left, right); + } + throw new ParsingException(loc, "Don't know how to parse {}", ctx); + } + + + // + // Literal + // + + + @Override + public Expression visitNullLiteral(NullLiteralContext ctx) { + return new Literal(source(ctx), null, DataType.NULL); + } + + @Override + public Expression visitBooleanLiteral(BooleanLiteralContext ctx) { + return new Literal(source(ctx), Booleans.parseBoolean(ctx.getText().toLowerCase(Locale.ROOT), false), DataType.BOOLEAN); + } + + @Override + public Expression visitStringLiteral(StringLiteralContext ctx) { + StringBuilder sb = new StringBuilder(); + for (TerminalNode node : ctx.STRING()) { + sb.append(unquoteString(text(node))); + } + return new Literal(source(ctx), sb.toString(), DataType.KEYWORD); + } + + @Override + public Object visitDecimalLiteral(DecimalLiteralContext ctx) { + return new Literal(source(ctx), new BigDecimal(ctx.getText()).doubleValue(), DataType.DOUBLE); + } + + @Override + public Object visitIntegerLiteral(IntegerLiteralContext ctx) { + BigDecimal bigD = new BigDecimal(ctx.getText()); + // TODO: this can be improved to use the smallest type available + return new Literal(source(ctx), bigD.longValueExact(), DataType.INTEGER); + } + + @Override + public Object visitParamLiteral(ParamLiteralContext ctx) { + SqlTypedParamValue param = param(ctx.PARAM()); + Location loc = source(ctx); + if (param.value == null) { + // no conversion is required for null values + return new Literal(loc, null, param.dataType); + } + final DataType sourceType; + try { + sourceType = DataTypes.fromJava(param.value); + } catch (SqlIllegalArgumentException ex) { + throw new ParsingException(ex, loc, "Unexpected actual parameter type [{}] for type [{}]", param.value.getClass().getName(), + param.dataType); + } + if (sourceType == param.dataType) { + // no conversion is required if the value is already have correct type + return new Literal(loc, param.value, param.dataType); + } + // otherwise we need to make sure that xcontent-serialized value is converted to the correct type + try { + return new Literal(loc, conversionFor(sourceType, param.dataType).convert(param.value), param.dataType); + } catch (SqlIllegalArgumentException ex) { + throw new ParsingException(ex, loc, "Unexpected actual parameter type [{}] for type [{}]", sourceType, param.dataType); + } + } + + @Override + public String visitString(StringContext ctx) { + return string(ctx); + } + + /** + * Extracts the string (either as unescaped literal) or parameter. + */ + String string(StringContext ctx) { + if (ctx == null) { + return null; + } + SqlTypedParamValue param = param(ctx.PARAM()); + if (param != null) { + return param.value != null ? param.value.toString() : null; + } else { + return unquoteString(ctx.getText()); + } + } + + private SqlTypedParamValue param(TerminalNode node) { + if (node == null) { + return null; + } + + Token token = node.getSymbol(); + + if (params.containsKey(token) == false) { + throw new ParsingException(source(node), "Unexpected parameter"); + } + + return params.get(token); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java new file mode 100644 index 0000000000000..8c79ae1ef0595 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilder.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.tree.ParseTree; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.IdentifierContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QualifiedNameContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.TableIdentifierContext; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.tree.Location; + +abstract class IdentifierBuilder extends AbstractBuilder { + + @Override + public TableIdentifier visitTableIdentifier(TableIdentifierContext ctx) { + Location source = source(ctx); + ParseTree tree = ctx.name != null ? ctx.name : ctx.TABLE_IDENTIFIER(); + String index = tree.getText(); + + validateIndex(index, source); + return new TableIdentifier(source, visitIdentifier(ctx.catalog), index); + } + + // see https://github.com/elastic/elasticsearch/issues/6736 + static void validateIndex(String index, Location source) { + for (int i = 0; i < index.length(); i++) { + char c = index.charAt(i); + if (Character.isUpperCase(c)) { + throw new ParsingException(source, "Invalid index name (needs to be lowercase) {}", index); + } + if (c == '\\' || c == '/' || c == '<' || c == '>' || c == '|' || c == ',' || c == ' ') { + throw new ParsingException(source, "Invalid index name (illegal character {}) {}", c, index); + } + } + } + + @Override + public String visitIdentifier(IdentifierContext ctx) { + return ctx == null ? null : ctx.getText(); + } + + @Override + public String visitQualifiedName(QualifiedNameContext ctx) { + if (ctx == null) { + return null; + } + + return Strings.collectionToDelimitedString(visitList(ctx.identifier(), String.class), "."); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java new file mode 100644 index 0000000000000..f41fce1602783 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java @@ -0,0 +1,225 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.expression.UnresolvedAlias; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.AliasedQueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.AliasedRelationContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.FromClauseContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.GroupByContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.JoinCriteriaContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.JoinRelationContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.JoinTypeContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.NamedQueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryNoWithContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QuerySpecificationContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.RelationContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SetQuantifierContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SubqueryContext; +import org.elasticsearch.xpack.sql.parser.SqlBaseParser.TableNameContext; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.plan.logical.Aggregate; +import org.elasticsearch.xpack.sql.plan.logical.Distinct; +import org.elasticsearch.xpack.sql.plan.logical.Filter; +import org.elasticsearch.xpack.sql.plan.logical.Join; +import org.elasticsearch.xpack.sql.plan.logical.Join.JoinType; +import org.elasticsearch.xpack.sql.plan.logical.Limit; +import org.elasticsearch.xpack.sql.plan.logical.LocalRelation; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.plan.logical.Project; +import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias; +import org.elasticsearch.xpack.sql.plan.logical.UnresolvedRelation; +import org.elasticsearch.xpack.sql.plan.logical.With; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.session.EmptyExecutable; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; + +abstract class LogicalPlanBuilder extends ExpressionBuilder { + + protected LogicalPlanBuilder(Map params) { + super(params); + } + + @Override + public LogicalPlan visitQuery(QueryContext ctx) { + LogicalPlan body = plan(ctx.queryNoWith()); + + List namedQueries = visitList(ctx.namedQuery(), SubQueryAlias.class); + + // unwrap query (and validate while at it) + Map cteRelations = new LinkedHashMap<>(namedQueries.size()); + for (SubQueryAlias namedQuery : namedQueries) { + if (cteRelations.put(namedQuery.alias(), namedQuery) != null) { + throw new ParsingException(namedQuery.location(), "Duplicate alias {}", namedQuery.alias()); + } + } + + // return WITH + return new With(source(ctx), body, cteRelations); + } + + @Override + public LogicalPlan visitNamedQuery(NamedQueryContext ctx) { + return new SubQueryAlias(source(ctx), plan(ctx.queryNoWith()), ctx.name.getText()); + } + + @Override + public LogicalPlan visitQueryNoWith(QueryNoWithContext ctx) { + LogicalPlan plan = plan(ctx.queryTerm()); + + if (!ctx.orderBy().isEmpty()) { + plan = new OrderBy(source(ctx.ORDER()), plan, visitList(ctx.orderBy(), Order.class)); + } + + if (ctx.limit != null && ctx.INTEGER_VALUE() != null) { + plan = new Limit(source(ctx.limit), new Literal(source(ctx), + Integer.parseInt(ctx.limit.getText()), DataType.INTEGER), plan); + } + + return plan; + } + + @Override + public LogicalPlan visitQuerySpecification(QuerySpecificationContext ctx) { + LogicalPlan query; + if (ctx.fromClause() == null) { + query = new LocalRelation(source(ctx), new EmptyExecutable(emptyList())); + } else { + query = plan(ctx.fromClause()); + } + + // add WHERE + if (ctx.where != null) { + query = new Filter(source(ctx), query, expression(ctx.where)); + } + + List selectTarget = emptyList(); + + // SELECT a, b, c ... + if (!ctx.selectItem().isEmpty()) { + selectTarget = expressions(ctx.selectItem()).stream() + .map(e -> (e instanceof NamedExpression) ? (NamedExpression) e : new UnresolvedAlias(e.location(), e)) + .collect(toList()); + } + + // GROUP BY + GroupByContext groupByCtx = ctx.groupBy(); + if (groupByCtx != null) { + SetQuantifierContext setQualifierContext = groupByCtx.setQuantifier(); + TerminalNode groupByAll = setQualifierContext == null ? null : setQualifierContext.ALL(); + if (groupByAll != null) { + throw new ParsingException(source(groupByAll), "GROUP BY ALL is not supported"); + } + List groupBy = expressions(groupByCtx.groupingElement()); + query = new Aggregate(source(groupByCtx), query, groupBy, selectTarget); + } + else if (!selectTarget.isEmpty()) { + query = new Project(source(ctx.selectItem(0)), query, selectTarget); + } + + // HAVING + if (ctx.having != null) { + query = new Filter(source(ctx.having), query, expression(ctx.having)); + } + + if (ctx.setQuantifier() != null && ctx.setQuantifier().DISTINCT() != null) { + query = new Distinct(source(ctx.setQuantifier()), query); + } + return query; + } + + @Override + public LogicalPlan visitFromClause(FromClauseContext ctx) { + // if there are multiple FROM clauses, convert each pair in a inner join + List plans = plans(ctx.relation()); + return plans.stream() + .reduce((left, right) -> new Join(source(ctx), left, right, Join.JoinType.IMPLICIT, null)) + .get(); + } + + @Override + public LogicalPlan visitRelation(RelationContext ctx) { + // check if there are multiple join clauses. ANTLR produces a right nested tree with the left join clause + // at the top. However the fields previously references might be used in the following clauses. + // As such, swap/reverse the tree. + + LogicalPlan result = plan(ctx.relationPrimary()); + for (JoinRelationContext j : ctx.joinRelation()) { + result = doJoin(result, j); + } + + return result; + } + + private Join doJoin(LogicalPlan left, JoinRelationContext ctx) { + JoinTypeContext joinType = ctx.joinType(); + + Join.JoinType type = JoinType.INNER; + if (joinType != null) { + if (joinType.FULL() != null) { + type = JoinType.FULL; + } + if (joinType.LEFT() != null) { + type = JoinType.LEFT; + } + if (joinType.RIGHT() != null) { + type = JoinType.RIGHT; + } + } + + Expression condition = null; + JoinCriteriaContext criteria = ctx.joinCriteria(); + if (criteria != null) { + if (criteria.USING() != null) { + throw new UnsupportedOperationException(); + } + if (criteria.booleanExpression() != null) { + condition = expression(criteria.booleanExpression()); + } + } + + // We would return this if we actually supported JOINs, but we don't yet. + // new Join(source(ctx), left, plan(ctx.right), type, condition); + throw new ParsingException(source(ctx), "Queries with JOIN are not yet supported"); + } + + @Override + public Object visitAliasedRelation(AliasedRelationContext ctx) { + return new SubQueryAlias(source(ctx), plan(ctx.relation()), visitQualifiedName(ctx.qualifiedName())); + } + + @Override + public Object visitAliasedQuery(AliasedQueryContext ctx) { + return new SubQueryAlias(source(ctx), plan(ctx.queryNoWith()), visitQualifiedName(ctx.qualifiedName())); + } + + @Override + public Object visitSubquery(SubqueryContext ctx) { + return plan(ctx.queryNoWith()); + } + + @Override + public LogicalPlan visitTableName(TableNameContext ctx) { + String alias = visitQualifiedName(ctx.qualifiedName()); + TableIdentifier tableIdentifier = visitTableIdentifier(ctx.tableIdentifier()); + return new UnresolvedRelation(source(ctx), tableIdentifier, alias); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ParsingException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ParsingException.java new file mode 100644 index 0000000000000..e294b831a88e6 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ParsingException.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.RecognitionException; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.sql.ClientSqlException; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Locale; + +public class ParsingException extends ClientSqlException { + private final int line; + private final int charPositionInLine; + + public ParsingException(String message, Exception cause, int line, int charPositionInLine) { + super(message, cause); + this.line = line; + this.charPositionInLine = charPositionInLine; + } + + ParsingException(String message, Object... args) { + this(Location.EMPTY, message, args); + } + + public ParsingException(Location nodeLocation, String message, Object... args) { + super(message, args); + this.line = nodeLocation.getLineNumber(); + this.charPositionInLine = nodeLocation.getColumnNumber(); + } + + public ParsingException(Exception cause, Location nodeLocation, String message, Object... args) { + super(cause, message, args); + this.line = nodeLocation.getLineNumber(); + this.charPositionInLine = nodeLocation.getColumnNumber(); + } + + public int getLineNumber() { + return line; + } + + public int getColumnNumber() { + return charPositionInLine + 1; + } + + public String getErrorMessage() { + return super.getMessage(); + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + + @Override + public String getMessage() { + return String.format(Locale.ROOT, "line %s:%s: %s", getLineNumber(), getColumnNumber(), getErrorMessage()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java new file mode 100644 index 0000000000000..4e80e8db9bb52 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseListener.java @@ -0,0 +1,968 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +// ANTLR GENERATED CODE: DO NOT EDIT +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.tree.ErrorNode; +import org.antlr.v4.runtime.tree.TerminalNode; + +/** + * This class provides an empty implementation of {@link SqlBaseListener}, + * which can be extended to create a listener which only needs to handle a subset + * of the available methods. + */ +class SqlBaseBaseListener implements SqlBaseListener { + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSingleStatement(SqlBaseParser.SingleStatementContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSingleStatement(SqlBaseParser.SingleStatementContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSingleExpression(SqlBaseParser.SingleExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSingleExpression(SqlBaseParser.SingleExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterStatementDefault(SqlBaseParser.StatementDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitStatementDefault(SqlBaseParser.StatementDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterExplain(SqlBaseParser.ExplainContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitExplain(SqlBaseParser.ExplainContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterDebug(SqlBaseParser.DebugContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitDebug(SqlBaseParser.DebugContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterShowTables(SqlBaseParser.ShowTablesContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitShowTables(SqlBaseParser.ShowTablesContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterShowColumns(SqlBaseParser.ShowColumnsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitShowColumns(SqlBaseParser.ShowColumnsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterShowFunctions(SqlBaseParser.ShowFunctionsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitShowFunctions(SqlBaseParser.ShowFunctionsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterShowSchemas(SqlBaseParser.ShowSchemasContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitShowSchemas(SqlBaseParser.ShowSchemasContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSysCatalogs(SqlBaseParser.SysCatalogsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSysCatalogs(SqlBaseParser.SysCatalogsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSysTables(SqlBaseParser.SysTablesContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSysTables(SqlBaseParser.SysTablesContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSysColumns(SqlBaseParser.SysColumnsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSysColumns(SqlBaseParser.SysColumnsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSysTypes(SqlBaseParser.SysTypesContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSysTypes(SqlBaseParser.SysTypesContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSysTableTypes(SqlBaseParser.SysTableTypesContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSysTableTypes(SqlBaseParser.SysTableTypesContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterQuery(SqlBaseParser.QueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitQuery(SqlBaseParser.QueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterQueryNoWith(SqlBaseParser.QueryNoWithContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitQueryNoWith(SqlBaseParser.QueryNoWithContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterQueryPrimaryDefault(SqlBaseParser.QueryPrimaryDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitQueryPrimaryDefault(SqlBaseParser.QueryPrimaryDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSubquery(SqlBaseParser.SubqueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSubquery(SqlBaseParser.SubqueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterOrderBy(SqlBaseParser.OrderByContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitOrderBy(SqlBaseParser.OrderByContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterQuerySpecification(SqlBaseParser.QuerySpecificationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitQuerySpecification(SqlBaseParser.QuerySpecificationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterFromClause(SqlBaseParser.FromClauseContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitFromClause(SqlBaseParser.FromClauseContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterGroupBy(SqlBaseParser.GroupByContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitGroupBy(SqlBaseParser.GroupByContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterGroupingExpressions(SqlBaseParser.GroupingExpressionsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitGroupingExpressions(SqlBaseParser.GroupingExpressionsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterNamedQuery(SqlBaseParser.NamedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitNamedQuery(SqlBaseParser.NamedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSetQuantifier(SqlBaseParser.SetQuantifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSetQuantifier(SqlBaseParser.SetQuantifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSelectExpression(SqlBaseParser.SelectExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSelectExpression(SqlBaseParser.SelectExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterRelation(SqlBaseParser.RelationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitRelation(SqlBaseParser.RelationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterJoinRelation(SqlBaseParser.JoinRelationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitJoinRelation(SqlBaseParser.JoinRelationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterJoinType(SqlBaseParser.JoinTypeContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitJoinType(SqlBaseParser.JoinTypeContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterJoinCriteria(SqlBaseParser.JoinCriteriaContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitJoinCriteria(SqlBaseParser.JoinCriteriaContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterTableName(SqlBaseParser.TableNameContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitTableName(SqlBaseParser.TableNameContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterAliasedQuery(SqlBaseParser.AliasedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitAliasedQuery(SqlBaseParser.AliasedQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterAliasedRelation(SqlBaseParser.AliasedRelationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitAliasedRelation(SqlBaseParser.AliasedRelationContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterExpression(SqlBaseParser.ExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitExpression(SqlBaseParser.ExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterLogicalNot(SqlBaseParser.LogicalNotContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitLogicalNot(SqlBaseParser.LogicalNotContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterStringQuery(SqlBaseParser.StringQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitStringQuery(SqlBaseParser.StringQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterBooleanDefault(SqlBaseParser.BooleanDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitBooleanDefault(SqlBaseParser.BooleanDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterExists(SqlBaseParser.ExistsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitExists(SqlBaseParser.ExistsContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterMultiMatchQuery(SqlBaseParser.MultiMatchQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitMultiMatchQuery(SqlBaseParser.MultiMatchQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterMatchQuery(SqlBaseParser.MatchQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitMatchQuery(SqlBaseParser.MatchQueryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterPredicated(SqlBaseParser.PredicatedContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitPredicated(SqlBaseParser.PredicatedContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterPredicate(SqlBaseParser.PredicateContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitPredicate(SqlBaseParser.PredicateContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterPattern(SqlBaseParser.PatternContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitPattern(SqlBaseParser.PatternContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterValueExpressionDefault(SqlBaseParser.ValueExpressionDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitValueExpressionDefault(SqlBaseParser.ValueExpressionDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterComparison(SqlBaseParser.ComparisonContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitComparison(SqlBaseParser.ComparisonContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterCast(SqlBaseParser.CastContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitCast(SqlBaseParser.CastContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterExtract(SqlBaseParser.ExtractContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitExtract(SqlBaseParser.ExtractContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterConstantDefault(SqlBaseParser.ConstantDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitConstantDefault(SqlBaseParser.ConstantDefaultContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterStar(SqlBaseParser.StarContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitStar(SqlBaseParser.StarContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterFunctionCall(SqlBaseParser.FunctionCallContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitFunctionCall(SqlBaseParser.FunctionCallContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterSubqueryExpression(SqlBaseParser.SubqueryExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitSubqueryExpression(SqlBaseParser.SubqueryExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterColumnReference(SqlBaseParser.ColumnReferenceContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitColumnReference(SqlBaseParser.ColumnReferenceContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterDereference(SqlBaseParser.DereferenceContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitDereference(SqlBaseParser.DereferenceContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterNullLiteral(SqlBaseParser.NullLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitNullLiteral(SqlBaseParser.NullLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterNumericLiteral(SqlBaseParser.NumericLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitNumericLiteral(SqlBaseParser.NumericLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterBooleanLiteral(SqlBaseParser.BooleanLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitBooleanLiteral(SqlBaseParser.BooleanLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterStringLiteral(SqlBaseParser.StringLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitStringLiteral(SqlBaseParser.StringLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterParamLiteral(SqlBaseParser.ParamLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitParamLiteral(SqlBaseParser.ParamLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterComparisonOperator(SqlBaseParser.ComparisonOperatorContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitComparisonOperator(SqlBaseParser.ComparisonOperatorContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterBooleanValue(SqlBaseParser.BooleanValueContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitBooleanValue(SqlBaseParser.BooleanValueContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterPrimitiveDataType(SqlBaseParser.PrimitiveDataTypeContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitPrimitiveDataType(SqlBaseParser.PrimitiveDataTypeContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterQualifiedName(SqlBaseParser.QualifiedNameContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitQualifiedName(SqlBaseParser.QualifiedNameContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterIdentifier(SqlBaseParser.IdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitIdentifier(SqlBaseParser.IdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterTableIdentifier(SqlBaseParser.TableIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitTableIdentifier(SqlBaseParser.TableIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterUnquotedIdentifier(SqlBaseParser.UnquotedIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitUnquotedIdentifier(SqlBaseParser.UnquotedIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterDigitIdentifier(SqlBaseParser.DigitIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitDigitIdentifier(SqlBaseParser.DigitIdentifierContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterDecimalLiteral(SqlBaseParser.DecimalLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitDecimalLiteral(SqlBaseParser.DecimalLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterIntegerLiteral(SqlBaseParser.IntegerLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitIntegerLiteral(SqlBaseParser.IntegerLiteralContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterString(SqlBaseParser.StringContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitString(SqlBaseParser.StringContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterNonReserved(SqlBaseParser.NonReservedContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitNonReserved(SqlBaseParser.NonReservedContext ctx) { } + + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void enterEveryRule(ParserRuleContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void exitEveryRule(ParserRuleContext ctx) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void visitTerminal(TerminalNode node) { } + /** + * {@inheritDoc} + * + *

The default implementation does nothing.

+ */ + @Override public void visitErrorNode(ErrorNode node) { } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java new file mode 100644 index 0000000000000..1adb0a423c7f2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseBaseVisitor.java @@ -0,0 +1,558 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +// ANTLR GENERATED CODE: DO NOT EDIT +package org.elasticsearch.xpack.sql.parser; +import org.antlr.v4.runtime.tree.AbstractParseTreeVisitor; + +/** + * This class provides an empty implementation of {@link SqlBaseVisitor}, + * which can be extended to create a visitor which only needs to handle a subset + * of the available methods. + * + * @param The return type of the visit operation. Use {@link Void} for + * operations with no return type. + */ +class SqlBaseBaseVisitor extends AbstractParseTreeVisitor implements SqlBaseVisitor { + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSingleStatement(SqlBaseParser.SingleStatementContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSingleExpression(SqlBaseParser.SingleExpressionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitStatementDefault(SqlBaseParser.StatementDefaultContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitExplain(SqlBaseParser.ExplainContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitDebug(SqlBaseParser.DebugContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitShowTables(SqlBaseParser.ShowTablesContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitShowColumns(SqlBaseParser.ShowColumnsContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitShowFunctions(SqlBaseParser.ShowFunctionsContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitShowSchemas(SqlBaseParser.ShowSchemasContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSysCatalogs(SqlBaseParser.SysCatalogsContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSysTables(SqlBaseParser.SysTablesContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSysColumns(SqlBaseParser.SysColumnsContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSysTypes(SqlBaseParser.SysTypesContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSysTableTypes(SqlBaseParser.SysTableTypesContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitQuery(SqlBaseParser.QueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitQueryNoWith(SqlBaseParser.QueryNoWithContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitQueryPrimaryDefault(SqlBaseParser.QueryPrimaryDefaultContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSubquery(SqlBaseParser.SubqueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitOrderBy(SqlBaseParser.OrderByContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitQuerySpecification(SqlBaseParser.QuerySpecificationContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitFromClause(SqlBaseParser.FromClauseContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitGroupBy(SqlBaseParser.GroupByContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitGroupingExpressions(SqlBaseParser.GroupingExpressionsContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitNamedQuery(SqlBaseParser.NamedQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSetQuantifier(SqlBaseParser.SetQuantifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSelectExpression(SqlBaseParser.SelectExpressionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitRelation(SqlBaseParser.RelationContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitJoinRelation(SqlBaseParser.JoinRelationContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitJoinType(SqlBaseParser.JoinTypeContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitJoinCriteria(SqlBaseParser.JoinCriteriaContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitTableName(SqlBaseParser.TableNameContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitAliasedQuery(SqlBaseParser.AliasedQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitAliasedRelation(SqlBaseParser.AliasedRelationContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitExpression(SqlBaseParser.ExpressionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitLogicalNot(SqlBaseParser.LogicalNotContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitStringQuery(SqlBaseParser.StringQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitBooleanDefault(SqlBaseParser.BooleanDefaultContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitExists(SqlBaseParser.ExistsContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitMultiMatchQuery(SqlBaseParser.MultiMatchQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitMatchQuery(SqlBaseParser.MatchQueryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitPredicated(SqlBaseParser.PredicatedContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitPredicate(SqlBaseParser.PredicateContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitPattern(SqlBaseParser.PatternContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitValueExpressionDefault(SqlBaseParser.ValueExpressionDefaultContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitComparison(SqlBaseParser.ComparisonContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitCast(SqlBaseParser.CastContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitExtract(SqlBaseParser.ExtractContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitConstantDefault(SqlBaseParser.ConstantDefaultContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitStar(SqlBaseParser.StarContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitFunctionCall(SqlBaseParser.FunctionCallContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitSubqueryExpression(SqlBaseParser.SubqueryExpressionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitColumnReference(SqlBaseParser.ColumnReferenceContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitDereference(SqlBaseParser.DereferenceContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitNullLiteral(SqlBaseParser.NullLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitNumericLiteral(SqlBaseParser.NumericLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitBooleanLiteral(SqlBaseParser.BooleanLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitStringLiteral(SqlBaseParser.StringLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitParamLiteral(SqlBaseParser.ParamLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitComparisonOperator(SqlBaseParser.ComparisonOperatorContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitBooleanValue(SqlBaseParser.BooleanValueContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitPrimitiveDataType(SqlBaseParser.PrimitiveDataTypeContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitQualifiedName(SqlBaseParser.QualifiedNameContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitIdentifier(SqlBaseParser.IdentifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitTableIdentifier(SqlBaseParser.TableIdentifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitUnquotedIdentifier(SqlBaseParser.UnquotedIdentifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitDigitIdentifier(SqlBaseParser.DigitIdentifierContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitDecimalLiteral(SqlBaseParser.DecimalLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitIntegerLiteral(SqlBaseParser.IntegerLiteralContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitString(SqlBaseParser.StringContext ctx) { return visitChildren(ctx); } + /** + * {@inheritDoc} + * + *

The default implementation returns the result of calling + * {@link #visitChildren} on {@code ctx}.

+ */ + @Override public T visitNonReserved(SqlBaseParser.NonReservedContext ctx) { return visitChildren(ctx); } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java new file mode 100644 index 0000000000000..1367b98a89982 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseLexer.java @@ -0,0 +1,434 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +// ANTLR GENERATED CODE: DO NOT EDIT +package org.elasticsearch.xpack.sql.parser; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.atn.*; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.*; + +@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) +class SqlBaseLexer extends Lexer { + static { RuntimeMetaData.checkVersion("4.5.3", RuntimeMetaData.VERSION); } + + protected static final DFA[] _decisionToDFA; + protected static final PredictionContextCache _sharedContextCache = + new PredictionContextCache(); + public static final int + T__0=1, T__1=2, T__2=3, T__3=4, ALL=5, ANALYZE=6, ANALYZED=7, AND=8, ANY=9, + AS=10, ASC=11, BETWEEN=12, BY=13, CAST=14, CATALOG=15, CATALOGS=16, COLUMNS=17, + DEBUG=18, DESC=19, DESCRIBE=20, DISTINCT=21, ESCAPE=22, EXECUTABLE=23, + EXISTS=24, EXPLAIN=25, EXTRACT=26, FALSE=27, FORMAT=28, FROM=29, FULL=30, + FUNCTIONS=31, GRAPHVIZ=32, GROUP=33, HAVING=34, IN=35, INNER=36, IS=37, + JOIN=38, LEFT=39, LIKE=40, LIMIT=41, MAPPED=42, MATCH=43, NATURAL=44, + NOT=45, NULL=46, ON=47, OPTIMIZED=48, OR=49, ORDER=50, OUTER=51, PARSED=52, + PHYSICAL=53, PLAN=54, RIGHT=55, RLIKE=56, QUERY=57, SCHEMAS=58, SELECT=59, + SHOW=60, SYS=61, TABLE=62, TABLES=63, TEXT=64, TRUE=65, TYPE=66, TYPES=67, + USING=68, VERIFY=69, WHERE=70, WITH=71, EQ=72, NEQ=73, LT=74, LTE=75, + GT=76, GTE=77, PLUS=78, MINUS=79, ASTERISK=80, SLASH=81, PERCENT=82, CONCAT=83, + DOT=84, PARAM=85, STRING=86, INTEGER_VALUE=87, DECIMAL_VALUE=88, IDENTIFIER=89, + DIGIT_IDENTIFIER=90, TABLE_IDENTIFIER=91, QUOTED_IDENTIFIER=92, BACKQUOTED_IDENTIFIER=93, + SIMPLE_COMMENT=94, BRACKETED_COMMENT=95, WS=96, UNRECOGNIZED=97; + public static String[] modeNames = { + "DEFAULT_MODE" + }; + + public static final String[] ruleNames = { + "T__0", "T__1", "T__2", "T__3", "ALL", "ANALYZE", "ANALYZED", "AND", "ANY", + "AS", "ASC", "BETWEEN", "BY", "CAST", "CATALOG", "CATALOGS", "COLUMNS", + "DEBUG", "DESC", "DESCRIBE", "DISTINCT", "ESCAPE", "EXECUTABLE", "EXISTS", + "EXPLAIN", "EXTRACT", "FALSE", "FORMAT", "FROM", "FULL", "FUNCTIONS", + "GRAPHVIZ", "GROUP", "HAVING", "IN", "INNER", "IS", "JOIN", "LEFT", "LIKE", + "LIMIT", "MAPPED", "MATCH", "NATURAL", "NOT", "NULL", "ON", "OPTIMIZED", + "OR", "ORDER", "OUTER", "PARSED", "PHYSICAL", "PLAN", "RIGHT", "RLIKE", + "QUERY", "SCHEMAS", "SELECT", "SHOW", "SYS", "TABLE", "TABLES", "TEXT", + "TRUE", "TYPE", "TYPES", "USING", "VERIFY", "WHERE", "WITH", "EQ", "NEQ", + "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", + "CONCAT", "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", + "IDENTIFIER", "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", + "BACKQUOTED_IDENTIFIER", "EXPONENT", "DIGIT", "LETTER", "SIMPLE_COMMENT", + "BRACKETED_COMMENT", "WS", "UNRECOGNIZED" + }; + + private static final String[] _LITERAL_NAMES = { + null, "'('", "')'", "','", "':'", "'ALL'", "'ANALYZE'", "'ANALYZED'", + "'AND'", "'ANY'", "'AS'", "'ASC'", "'BETWEEN'", "'BY'", "'CAST'", "'CATALOG'", + "'CATALOGS'", "'COLUMNS'", "'DEBUG'", "'DESC'", "'DESCRIBE'", "'DISTINCT'", + "'ESCAPE'", "'EXECUTABLE'", "'EXISTS'", "'EXPLAIN'", "'EXTRACT'", "'FALSE'", + "'FORMAT'", "'FROM'", "'FULL'", "'FUNCTIONS'", "'GRAPHVIZ'", "'GROUP'", + "'HAVING'", "'IN'", "'INNER'", "'IS'", "'JOIN'", "'LEFT'", "'LIKE'", "'LIMIT'", + "'MAPPED'", "'MATCH'", "'NATURAL'", "'NOT'", "'NULL'", "'ON'", "'OPTIMIZED'", + "'OR'", "'ORDER'", "'OUTER'", "'PARSED'", "'PHYSICAL'", "'PLAN'", "'RIGHT'", + "'RLIKE'", "'QUERY'", "'SCHEMAS'", "'SELECT'", "'SHOW'", "'SYS'", "'TABLE'", + "'TABLES'", "'TEXT'", "'TRUE'", "'TYPE'", "'TYPES'", "'USING'", "'VERIFY'", + "'WHERE'", "'WITH'", "'='", null, "'<'", "'<='", "'>'", "'>='", "'+'", + "'-'", "'*'", "'/'", "'%'", "'||'", "'.'", "'?'" + }; + private static final String[] _SYMBOLIC_NAMES = { + null, null, null, null, null, "ALL", "ANALYZE", "ANALYZED", "AND", "ANY", + "AS", "ASC", "BETWEEN", "BY", "CAST", "CATALOG", "CATALOGS", "COLUMNS", + "DEBUG", "DESC", "DESCRIBE", "DISTINCT", "ESCAPE", "EXECUTABLE", "EXISTS", + "EXPLAIN", "EXTRACT", "FALSE", "FORMAT", "FROM", "FULL", "FUNCTIONS", + "GRAPHVIZ", "GROUP", "HAVING", "IN", "INNER", "IS", "JOIN", "LEFT", "LIKE", + "LIMIT", "MAPPED", "MATCH", "NATURAL", "NOT", "NULL", "ON", "OPTIMIZED", + "OR", "ORDER", "OUTER", "PARSED", "PHYSICAL", "PLAN", "RIGHT", "RLIKE", + "QUERY", "SCHEMAS", "SELECT", "SHOW", "SYS", "TABLE", "TABLES", "TEXT", + "TRUE", "TYPE", "TYPES", "USING", "VERIFY", "WHERE", "WITH", "EQ", "NEQ", + "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", + "CONCAT", "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", + "IDENTIFIER", "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", + "BACKQUOTED_IDENTIFIER", "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", + "UNRECOGNIZED" + }; + public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); + + /** + * @deprecated Use {@link #VOCABULARY} instead. + */ + @Deprecated + public static final String[] tokenNames; + static { + tokenNames = new String[_SYMBOLIC_NAMES.length]; + for (int i = 0; i < tokenNames.length; i++) { + tokenNames[i] = VOCABULARY.getLiteralName(i); + if (tokenNames[i] == null) { + tokenNames[i] = VOCABULARY.getSymbolicName(i); + } + + if (tokenNames[i] == null) { + tokenNames[i] = ""; + } + } + } + + @Override + @Deprecated + public String[] getTokenNames() { + return tokenNames; + } + + @Override + + public Vocabulary getVocabulary() { + return VOCABULARY; + } + + + public SqlBaseLexer(CharStream input) { + super(input); + _interp = new LexerATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache); + } + + @Override + public String getGrammarFileName() { return "SqlBase.g4"; } + + @Override + public String[] getRuleNames() { return ruleNames; } + + @Override + public String getSerializedATN() { return _serializedATN; } + + @Override + public String[] getModeNames() { return modeNames; } + + @Override + public ATN getATN() { return _ATN; } + + public static final String _serializedATN = + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2c\u033b\b\1\4\2\t"+ + "\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13"+ + "\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ + "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ + "\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+ + "\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4"+ + ",\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t"+ + "\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t="+ + "\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I"+ + "\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT"+ + "\4U\tU\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4^\t^\4_\t_\4"+ + "`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6"+ + "\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\b\3\b\3\b\3"+ + "\b\3\b\3\b\3\t\3\t\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\f\3\f\3\f"+ + "\3\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3\17\3\17"+ + "\3\17\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\23\3\23"+ + "\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25"+ + "\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\27"+ + "\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\30\3\30"+ + "\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32"+ + "\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34"+ + "\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36"+ + "\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3 \3 \3 \3!\3"+ + "!\3!\3!\3!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#\3#\3#\3#\3"+ + "$\3$\3$\3%\3%\3%\3%\3%\3%\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3("+ + "\3)\3)\3)\3)\3)\3*\3*\3*\3*\3*\3*\3+\3+\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,"+ + "\3,\3-\3-\3-\3-\3-\3-\3-\3-\3.\3.\3.\3.\3/\3/\3/\3/\3/\3\60\3\60\3\60"+ + "\3\61\3\61\3\61\3\61\3\61\3\61\3\61\3\61\3\61\3\61\3\62\3\62\3\62\3\63"+ + "\3\63\3\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\64\3\65\3\65\3\65"+ + "\3\65\3\65\3\65\3\65\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3\67"+ + "\3\67\3\67\3\67\3\67\38\38\38\38\38\38\39\39\39\39\39\39\3:\3:\3:\3:\3"+ + ":\3:\3;\3;\3;\3;\3;\3;\3;\3;\3<\3<\3<\3<\3<\3<\3<\3=\3=\3=\3=\3=\3>\3"+ + ">\3>\3>\3?\3?\3?\3?\3?\3?\3@\3@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3B\3B\3"+ + "B\3B\3B\3C\3C\3C\3C\3C\3D\3D\3D\3D\3D\3D\3E\3E\3E\3E\3E\3E\3F\3F\3F\3"+ + "F\3F\3F\3F\3G\3G\3G\3G\3G\3G\3H\3H\3H\3H\3H\3I\3I\3J\3J\3J\3J\3J\3J\3"+ + "J\5J\u027b\nJ\3K\3K\3L\3L\3L\3M\3M\3N\3N\3N\3O\3O\3P\3P\3Q\3Q\3R\3R\3"+ + "S\3S\3T\3T\3T\3U\3U\3V\3V\3W\3W\3W\3W\7W\u029c\nW\fW\16W\u029f\13W\3W"+ + "\3W\3X\6X\u02a4\nX\rX\16X\u02a5\3Y\6Y\u02a9\nY\rY\16Y\u02aa\3Y\3Y\7Y\u02af"+ + "\nY\fY\16Y\u02b2\13Y\3Y\3Y\6Y\u02b6\nY\rY\16Y\u02b7\3Y\6Y\u02bb\nY\rY"+ + "\16Y\u02bc\3Y\3Y\7Y\u02c1\nY\fY\16Y\u02c4\13Y\5Y\u02c6\nY\3Y\3Y\3Y\3Y"+ + "\6Y\u02cc\nY\rY\16Y\u02cd\3Y\3Y\5Y\u02d2\nY\3Z\3Z\5Z\u02d6\nZ\3Z\3Z\3"+ + "Z\7Z\u02db\nZ\fZ\16Z\u02de\13Z\3[\3[\3[\3[\6[\u02e4\n[\r[\16[\u02e5\3"+ + "\\\3\\\3\\\3\\\6\\\u02ec\n\\\r\\\16\\\u02ed\3]\3]\3]\3]\7]\u02f4\n]\f"+ + "]\16]\u02f7\13]\3]\3]\3^\3^\3^\3^\7^\u02ff\n^\f^\16^\u0302\13^\3^\3^\3"+ + "_\3_\5_\u0308\n_\3_\6_\u030b\n_\r_\16_\u030c\3`\3`\3a\3a\3b\3b\3b\3b\7"+ + "b\u0317\nb\fb\16b\u031a\13b\3b\5b\u031d\nb\3b\5b\u0320\nb\3b\3b\3c\3c"+ + "\3c\3c\3c\7c\u0329\nc\fc\16c\u032c\13c\3c\3c\3c\3c\3c\3d\6d\u0334\nd\r"+ + "d\16d\u0335\3d\3d\3e\3e\3\u032a\2f\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n"+ + "\23\13\25\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30"+ + "/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.["+ + "/]\60_\61a\62c\63e\64g\65i\66k\67m8o9q:s;u{?}@\177A\u0081B\u0083"+ + "C\u0085D\u0087E\u0089F\u008bG\u008dH\u008fI\u0091J\u0093K\u0095L\u0097"+ + "M\u0099N\u009bO\u009dP\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00ab"+ + "W\u00adX\u00afY\u00b1Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd\2\u00bf"+ + "\2\u00c1\2\u00c3`\u00c5a\u00c7b\u00c9c\3\2\f\3\2))\4\2BBaa\5\2<\3\2\2\2\u017d\u017e"+ + "\7H\2\2\u017e\u017f\7W\2\2\u017f\u0180\7P\2\2\u0180\u0181\7E\2\2\u0181"+ + "\u0182\7V\2\2\u0182\u0183\7K\2\2\u0183\u0184\7Q\2\2\u0184\u0185\7P\2\2"+ + "\u0185\u0186\7U\2\2\u0186@\3\2\2\2\u0187\u0188\7I\2\2\u0188\u0189\7T\2"+ + "\2\u0189\u018a\7C\2\2\u018a\u018b\7R\2\2\u018b\u018c\7J\2\2\u018c\u018d"+ + "\7X\2\2\u018d\u018e\7K\2\2\u018e\u018f\7\\\2\2\u018fB\3\2\2\2\u0190\u0191"+ + "\7I\2\2\u0191\u0192\7T\2\2\u0192\u0193\7Q\2\2\u0193\u0194\7W\2\2\u0194"+ + "\u0195\7R\2\2\u0195D\3\2\2\2\u0196\u0197\7J\2\2\u0197\u0198\7C\2\2\u0198"+ + "\u0199\7X\2\2\u0199\u019a\7K\2\2\u019a\u019b\7P\2\2\u019b\u019c\7I\2\2"+ + "\u019cF\3\2\2\2\u019d\u019e\7K\2\2\u019e\u019f\7P\2\2\u019fH\3\2\2\2\u01a0"+ + "\u01a1\7K\2\2\u01a1\u01a2\7P\2\2\u01a2\u01a3\7P\2\2\u01a3\u01a4\7G\2\2"+ + "\u01a4\u01a5\7T\2\2\u01a5J\3\2\2\2\u01a6\u01a7\7K\2\2\u01a7\u01a8\7U\2"+ + "\2\u01a8L\3\2\2\2\u01a9\u01aa\7L\2\2\u01aa\u01ab\7Q\2\2\u01ab\u01ac\7"+ + "K\2\2\u01ac\u01ad\7P\2\2\u01adN\3\2\2\2\u01ae\u01af\7N\2\2\u01af\u01b0"+ + "\7G\2\2\u01b0\u01b1\7H\2\2\u01b1\u01b2\7V\2\2\u01b2P\3\2\2\2\u01b3\u01b4"+ + "\7N\2\2\u01b4\u01b5\7K\2\2\u01b5\u01b6\7M\2\2\u01b6\u01b7\7G\2\2\u01b7"+ + "R\3\2\2\2\u01b8\u01b9\7N\2\2\u01b9\u01ba\7K\2\2\u01ba\u01bb\7O\2\2\u01bb"+ + "\u01bc\7K\2\2\u01bc\u01bd\7V\2\2\u01bdT\3\2\2\2\u01be\u01bf\7O\2\2\u01bf"+ + "\u01c0\7C\2\2\u01c0\u01c1\7R\2\2\u01c1\u01c2\7R\2\2\u01c2\u01c3\7G\2\2"+ + "\u01c3\u01c4\7F\2\2\u01c4V\3\2\2\2\u01c5\u01c6\7O\2\2\u01c6\u01c7\7C\2"+ + "\2\u01c7\u01c8\7V\2\2\u01c8\u01c9\7E\2\2\u01c9\u01ca\7J\2\2\u01caX\3\2"+ + "\2\2\u01cb\u01cc\7P\2\2\u01cc\u01cd\7C\2\2\u01cd\u01ce\7V\2\2\u01ce\u01cf"+ + "\7W\2\2\u01cf\u01d0\7T\2\2\u01d0\u01d1\7C\2\2\u01d1\u01d2\7N\2\2\u01d2"+ + "Z\3\2\2\2\u01d3\u01d4\7P\2\2\u01d4\u01d5\7Q\2\2\u01d5\u01d6\7V\2\2\u01d6"+ + "\\\3\2\2\2\u01d7\u01d8\7P\2\2\u01d8\u01d9\7W\2\2\u01d9\u01da\7N\2\2\u01da"+ + "\u01db\7N\2\2\u01db^\3\2\2\2\u01dc\u01dd\7Q\2\2\u01dd\u01de\7P\2\2\u01de"+ + "`\3\2\2\2\u01df\u01e0\7Q\2\2\u01e0\u01e1\7R\2\2\u01e1\u01e2\7V\2\2\u01e2"+ + "\u01e3\7K\2\2\u01e3\u01e4\7O\2\2\u01e4\u01e5\7K\2\2\u01e5\u01e6\7\\\2"+ + "\2\u01e6\u01e7\7G\2\2\u01e7\u01e8\7F\2\2\u01e8b\3\2\2\2\u01e9\u01ea\7"+ + "Q\2\2\u01ea\u01eb\7T\2\2\u01ebd\3\2\2\2\u01ec\u01ed\7Q\2\2\u01ed\u01ee"+ + "\7T\2\2\u01ee\u01ef\7F\2\2\u01ef\u01f0\7G\2\2\u01f0\u01f1\7T\2\2\u01f1"+ + "f\3\2\2\2\u01f2\u01f3\7Q\2\2\u01f3\u01f4\7W\2\2\u01f4\u01f5\7V\2\2\u01f5"+ + "\u01f6\7G\2\2\u01f6\u01f7\7T\2\2\u01f7h\3\2\2\2\u01f8\u01f9\7R\2\2\u01f9"+ + "\u01fa\7C\2\2\u01fa\u01fb\7T\2\2\u01fb\u01fc\7U\2\2\u01fc\u01fd\7G\2\2"+ + "\u01fd\u01fe\7F\2\2\u01fej\3\2\2\2\u01ff\u0200\7R\2\2\u0200\u0201\7J\2"+ + "\2\u0201\u0202\7[\2\2\u0202\u0203\7U\2\2\u0203\u0204\7K\2\2\u0204\u0205"+ + "\7E\2\2\u0205\u0206\7C\2\2\u0206\u0207\7N\2\2\u0207l\3\2\2\2\u0208\u0209"+ + "\7R\2\2\u0209\u020a\7N\2\2\u020a\u020b\7C\2\2\u020b\u020c\7P\2\2\u020c"+ + "n\3\2\2\2\u020d\u020e\7T\2\2\u020e\u020f\7K\2\2\u020f\u0210\7I\2\2\u0210"+ + "\u0211\7J\2\2\u0211\u0212\7V\2\2\u0212p\3\2\2\2\u0213\u0214\7T\2\2\u0214"+ + "\u0215\7N\2\2\u0215\u0216\7K\2\2\u0216\u0217\7M\2\2\u0217\u0218\7G\2\2"+ + "\u0218r\3\2\2\2\u0219\u021a\7S\2\2\u021a\u021b\7W\2\2\u021b\u021c\7G\2"+ + "\2\u021c\u021d\7T\2\2\u021d\u021e\7[\2\2\u021et\3\2\2\2\u021f\u0220\7"+ + "U\2\2\u0220\u0221\7E\2\2\u0221\u0222\7J\2\2\u0222\u0223\7G\2\2\u0223\u0224"+ + "\7O\2\2\u0224\u0225\7C\2\2\u0225\u0226\7U\2\2\u0226v\3\2\2\2\u0227\u0228"+ + "\7U\2\2\u0228\u0229\7G\2\2\u0229\u022a\7N\2\2\u022a\u022b\7G\2\2\u022b"+ + "\u022c\7E\2\2\u022c\u022d\7V\2\2\u022dx\3\2\2\2\u022e\u022f\7U\2\2\u022f"+ + "\u0230\7J\2\2\u0230\u0231\7Q\2\2\u0231\u0232\7Y\2\2\u0232z\3\2\2\2\u0233"+ + "\u0234\7U\2\2\u0234\u0235\7[\2\2\u0235\u0236\7U\2\2\u0236|\3\2\2\2\u0237"+ + "\u0238\7V\2\2\u0238\u0239\7C\2\2\u0239\u023a\7D\2\2\u023a\u023b\7N\2\2"+ + "\u023b\u023c\7G\2\2\u023c~\3\2\2\2\u023d\u023e\7V\2\2\u023e\u023f\7C\2"+ + "\2\u023f\u0240\7D\2\2\u0240\u0241\7N\2\2\u0241\u0242\7G\2\2\u0242\u0243"+ + "\7U\2\2\u0243\u0080\3\2\2\2\u0244\u0245\7V\2\2\u0245\u0246\7G\2\2\u0246"+ + "\u0247\7Z\2\2\u0247\u0248\7V\2\2\u0248\u0082\3\2\2\2\u0249\u024a\7V\2"+ + "\2\u024a\u024b\7T\2\2\u024b\u024c\7W\2\2\u024c\u024d\7G\2\2\u024d\u0084"+ + "\3\2\2\2\u024e\u024f\7V\2\2\u024f\u0250\7[\2\2\u0250\u0251\7R\2\2\u0251"+ + "\u0252\7G\2\2\u0252\u0086\3\2\2\2\u0253\u0254\7V\2\2\u0254\u0255\7[\2"+ + "\2\u0255\u0256\7R\2\2\u0256\u0257\7G\2\2\u0257\u0258\7U\2\2\u0258\u0088"+ + "\3\2\2\2\u0259\u025a\7W\2\2\u025a\u025b\7U\2\2\u025b\u025c\7K\2\2\u025c"+ + "\u025d\7P\2\2\u025d\u025e\7I\2\2\u025e\u008a\3\2\2\2\u025f\u0260\7X\2"+ + "\2\u0260\u0261\7G\2\2\u0261\u0262\7T\2\2\u0262\u0263\7K\2\2\u0263\u0264"+ + "\7H\2\2\u0264\u0265\7[\2\2\u0265\u008c\3\2\2\2\u0266\u0267\7Y\2\2\u0267"+ + "\u0268\7J\2\2\u0268\u0269\7G\2\2\u0269\u026a\7T\2\2\u026a\u026b\7G\2\2"+ + "\u026b\u008e\3\2\2\2\u026c\u026d\7Y\2\2\u026d\u026e\7K\2\2\u026e\u026f"+ + "\7V\2\2\u026f\u0270\7J\2\2\u0270\u0090\3\2\2\2\u0271\u0272\7?\2\2\u0272"+ + "\u0092\3\2\2\2\u0273\u0274\7>\2\2\u0274\u027b\7@\2\2\u0275\u0276\7#\2"+ + "\2\u0276\u027b\7?\2\2\u0277\u0278\7>\2\2\u0278\u0279\7?\2\2\u0279\u027b"+ + "\7@\2\2\u027a\u0273\3\2\2\2\u027a\u0275\3\2\2\2\u027a\u0277\3\2\2\2\u027b"+ + "\u0094\3\2\2\2\u027c\u027d\7>\2\2\u027d\u0096\3\2\2\2\u027e\u027f\7>\2"+ + "\2\u027f\u0280\7?\2\2\u0280\u0098\3\2\2\2\u0281\u0282\7@\2\2\u0282\u009a"+ + "\3\2\2\2\u0283\u0284\7@\2\2\u0284\u0285\7?\2\2\u0285\u009c\3\2\2\2\u0286"+ + "\u0287\7-\2\2\u0287\u009e\3\2\2\2\u0288\u0289\7/\2\2\u0289\u00a0\3\2\2"+ + "\2\u028a\u028b\7,\2\2\u028b\u00a2\3\2\2\2\u028c\u028d\7\61\2\2\u028d\u00a4"+ + "\3\2\2\2\u028e\u028f\7\'\2\2\u028f\u00a6\3\2\2\2\u0290\u0291\7~\2\2\u0291"+ + "\u0292\7~\2\2\u0292\u00a8\3\2\2\2\u0293\u0294\7\60\2\2\u0294\u00aa\3\2"+ + "\2\2\u0295\u0296\7A\2\2\u0296\u00ac\3\2\2\2\u0297\u029d\7)\2\2\u0298\u029c"+ + "\n\2\2\2\u0299\u029a\7)\2\2\u029a\u029c\7)\2\2\u029b\u0298\3\2\2\2\u029b"+ + "\u0299\3\2\2\2\u029c\u029f\3\2\2\2\u029d\u029b\3\2\2\2\u029d\u029e\3\2"+ + "\2\2\u029e\u02a0\3\2\2\2\u029f\u029d\3\2\2\2\u02a0\u02a1\7)\2\2\u02a1"+ + "\u00ae\3\2\2\2\u02a2\u02a4\5\u00bf`\2\u02a3\u02a2\3\2\2\2\u02a4\u02a5"+ + "\3\2\2\2\u02a5\u02a3\3\2\2\2\u02a5\u02a6\3\2\2\2\u02a6\u00b0\3\2\2\2\u02a7"+ + "\u02a9\5\u00bf`\2\u02a8\u02a7\3\2\2\2\u02a9\u02aa\3\2\2\2\u02aa\u02a8"+ + "\3\2\2\2\u02aa\u02ab\3\2\2\2\u02ab\u02ac\3\2\2\2\u02ac\u02b0\5\u00a9U"+ + "\2\u02ad\u02af\5\u00bf`\2\u02ae\u02ad\3\2\2\2\u02af\u02b2\3\2\2\2\u02b0"+ + "\u02ae\3\2\2\2\u02b0\u02b1\3\2\2\2\u02b1\u02d2\3\2\2\2\u02b2\u02b0\3\2"+ + "\2\2\u02b3\u02b5\5\u00a9U\2\u02b4\u02b6\5\u00bf`\2\u02b5\u02b4\3\2\2\2"+ + "\u02b6\u02b7\3\2\2\2\u02b7\u02b5\3\2\2\2\u02b7\u02b8\3\2\2\2\u02b8\u02d2"+ + "\3\2\2\2\u02b9\u02bb\5\u00bf`\2\u02ba\u02b9\3\2\2\2\u02bb\u02bc\3\2\2"+ + "\2\u02bc\u02ba\3\2\2\2\u02bc\u02bd\3\2\2\2\u02bd\u02c5\3\2\2\2\u02be\u02c2"+ + "\5\u00a9U\2\u02bf\u02c1\5\u00bf`\2\u02c0\u02bf\3\2\2\2\u02c1\u02c4\3\2"+ + "\2\2\u02c2\u02c0\3\2\2\2\u02c2\u02c3\3\2\2\2\u02c3\u02c6\3\2\2\2\u02c4"+ + "\u02c2\3\2\2\2\u02c5\u02be\3\2\2\2\u02c5\u02c6\3\2\2\2\u02c6\u02c7\3\2"+ + "\2\2\u02c7\u02c8\5\u00bd_\2\u02c8\u02d2\3\2\2\2\u02c9\u02cb\5\u00a9U\2"+ + "\u02ca\u02cc\5\u00bf`\2\u02cb\u02ca\3\2\2\2\u02cc\u02cd\3\2\2\2\u02cd"+ + "\u02cb\3\2\2\2\u02cd\u02ce\3\2\2\2\u02ce\u02cf\3\2\2\2\u02cf\u02d0\5\u00bd"+ + "_\2\u02d0\u02d2\3\2\2\2\u02d1\u02a8\3\2\2\2\u02d1\u02b3\3\2\2\2\u02d1"+ + "\u02ba\3\2\2\2\u02d1\u02c9\3\2\2\2\u02d2\u00b2\3\2\2\2\u02d3\u02d6\5\u00c1"+ + "a\2\u02d4\u02d6\7a\2\2\u02d5\u02d3\3\2\2\2\u02d5\u02d4\3\2\2\2\u02d6\u02dc"+ + "\3\2\2\2\u02d7\u02db\5\u00c1a\2\u02d8\u02db\5\u00bf`\2\u02d9\u02db\t\3"+ + "\2\2\u02da\u02d7\3\2\2\2\u02da\u02d8\3\2\2\2\u02da\u02d9\3\2\2\2\u02db"+ + "\u02de\3\2\2\2\u02dc\u02da\3\2\2\2\u02dc\u02dd\3\2\2\2\u02dd\u00b4\3\2"+ + "\2\2\u02de\u02dc\3\2\2\2\u02df\u02e3\5\u00bf`\2\u02e0\u02e4\5\u00c1a\2"+ + "\u02e1\u02e4\5\u00bf`\2\u02e2\u02e4\t\4\2\2\u02e3\u02e0\3\2\2\2\u02e3"+ + "\u02e1\3\2\2\2\u02e3\u02e2\3\2\2\2\u02e4\u02e5\3\2\2\2\u02e5\u02e3\3\2"+ + "\2\2\u02e5\u02e6\3\2\2\2\u02e6\u00b6\3\2\2\2\u02e7\u02ec\5\u00c1a\2\u02e8"+ + "\u02ec\5\u00bf`\2\u02e9\u02ec\t\3\2\2\u02ea\u02ec\5\u00a1Q\2\u02eb\u02e7"+ + "\3\2\2\2\u02eb\u02e8\3\2\2\2\u02eb\u02e9\3\2\2\2\u02eb\u02ea\3\2\2\2\u02ec"+ + "\u02ed\3\2\2\2\u02ed\u02eb\3\2\2\2\u02ed\u02ee\3\2\2\2\u02ee\u00b8\3\2"+ + "\2\2\u02ef\u02f5\7$\2\2\u02f0\u02f4\n\5\2\2\u02f1\u02f2\7$\2\2\u02f2\u02f4"+ + "\7$\2\2\u02f3\u02f0\3\2\2\2\u02f3\u02f1\3\2\2\2\u02f4\u02f7\3\2\2\2\u02f5"+ + "\u02f3\3\2\2\2\u02f5\u02f6\3\2\2\2\u02f6\u02f8\3\2\2\2\u02f7\u02f5\3\2"+ + "\2\2\u02f8\u02f9\7$\2\2\u02f9\u00ba\3\2\2\2\u02fa\u0300\7b\2\2\u02fb\u02ff"+ + "\n\6\2\2\u02fc\u02fd\7b\2\2\u02fd\u02ff\7b\2\2\u02fe\u02fb\3\2\2\2\u02fe"+ + "\u02fc\3\2\2\2\u02ff\u0302\3\2\2\2\u0300\u02fe\3\2\2\2\u0300\u0301\3\2"+ + "\2\2\u0301\u0303\3\2\2\2\u0302\u0300\3\2\2\2\u0303\u0304\7b\2\2\u0304"+ + "\u00bc\3\2\2\2\u0305\u0307\7G\2\2\u0306\u0308\t\7\2\2\u0307\u0306\3\2"+ + "\2\2\u0307\u0308\3\2\2\2\u0308\u030a\3\2\2\2\u0309\u030b\5\u00bf`\2\u030a"+ + "\u0309\3\2\2\2\u030b\u030c\3\2\2\2\u030c\u030a\3\2\2\2\u030c\u030d\3\2"+ + "\2\2\u030d\u00be\3\2\2\2\u030e\u030f\t\b\2\2\u030f\u00c0\3\2\2\2\u0310"+ + "\u0311\t\t\2\2\u0311\u00c2\3\2\2\2\u0312\u0313\7/\2\2\u0313\u0314\7/\2"+ + "\2\u0314\u0318\3\2\2\2\u0315\u0317\n\n\2\2\u0316\u0315\3\2\2\2\u0317\u031a"+ + "\3\2\2\2\u0318\u0316\3\2\2\2\u0318\u0319\3\2\2\2\u0319\u031c\3\2\2\2\u031a"+ + "\u0318\3\2\2\2\u031b\u031d\7\17\2\2\u031c\u031b\3\2\2\2\u031c\u031d\3"+ + "\2\2\2\u031d\u031f\3\2\2\2\u031e\u0320\7\f\2\2\u031f\u031e\3\2\2\2\u031f"+ + "\u0320\3\2\2\2\u0320\u0321\3\2\2\2\u0321\u0322\bb\2\2\u0322\u00c4\3\2"+ + "\2\2\u0323\u0324\7\61\2\2\u0324\u0325\7,\2\2\u0325\u032a\3\2\2\2\u0326"+ + "\u0329\5\u00c5c\2\u0327\u0329\13\2\2\2\u0328\u0326\3\2\2\2\u0328\u0327"+ + "\3\2\2\2\u0329\u032c\3\2\2\2\u032a\u032b\3\2\2\2\u032a\u0328\3\2\2\2\u032b"+ + "\u032d\3\2\2\2\u032c\u032a\3\2\2\2\u032d\u032e\7,\2\2\u032e\u032f\7\61"+ + "\2\2\u032f\u0330\3\2\2\2\u0330\u0331\bc\2\2\u0331\u00c6\3\2\2\2\u0332"+ + "\u0334\t\13\2\2\u0333\u0332\3\2\2\2\u0334\u0335\3\2\2\2\u0335\u0333\3"+ + "\2\2\2\u0335\u0336\3\2\2\2\u0336\u0337\3\2\2\2\u0337\u0338\bd\2\2\u0338"+ + "\u00c8\3\2\2\2\u0339\u033a\13\2\2\2\u033a\u00ca\3\2\2\2\"\2\u027a\u029b"+ + "\u029d\u02a5\u02aa\u02b0\u02b7\u02bc\u02c2\u02c5\u02cd\u02d1\u02d5\u02da"+ + "\u02dc\u02e3\u02e5\u02eb\u02ed\u02f3\u02f5\u02fe\u0300\u0307\u030c\u0318"+ + "\u031c\u031f\u0328\u032a\u0335\3\2\3\2"; + public static final ATN _ATN = + new ATNDeserializer().deserialize(_serializedATN.toCharArray()); + static { + _decisionToDFA = new DFA[_ATN.getNumberOfDecisions()]; + for (int i = 0; i < _ATN.getNumberOfDecisions(); i++) { + _decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i); + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java new file mode 100644 index 0000000000000..48f6eb4a7c816 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseListener.java @@ -0,0 +1,887 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +// ANTLR GENERATED CODE: DO NOT EDIT +package org.elasticsearch.xpack.sql.parser; +import org.antlr.v4.runtime.tree.ParseTreeListener; + +/** + * This interface defines a complete listener for a parse tree produced by + * {@link SqlBaseParser}. + */ +interface SqlBaseListener extends ParseTreeListener { + /** + * Enter a parse tree produced by {@link SqlBaseParser#singleStatement}. + * @param ctx the parse tree + */ + void enterSingleStatement(SqlBaseParser.SingleStatementContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#singleStatement}. + * @param ctx the parse tree + */ + void exitSingleStatement(SqlBaseParser.SingleStatementContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#singleExpression}. + * @param ctx the parse tree + */ + void enterSingleExpression(SqlBaseParser.SingleExpressionContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#singleExpression}. + * @param ctx the parse tree + */ + void exitSingleExpression(SqlBaseParser.SingleExpressionContext ctx); + /** + * Enter a parse tree produced by the {@code statementDefault} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterStatementDefault(SqlBaseParser.StatementDefaultContext ctx); + /** + * Exit a parse tree produced by the {@code statementDefault} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitStatementDefault(SqlBaseParser.StatementDefaultContext ctx); + /** + * Enter a parse tree produced by the {@code explain} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterExplain(SqlBaseParser.ExplainContext ctx); + /** + * Exit a parse tree produced by the {@code explain} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitExplain(SqlBaseParser.ExplainContext ctx); + /** + * Enter a parse tree produced by the {@code debug} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterDebug(SqlBaseParser.DebugContext ctx); + /** + * Exit a parse tree produced by the {@code debug} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitDebug(SqlBaseParser.DebugContext ctx); + /** + * Enter a parse tree produced by the {@code showTables} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterShowTables(SqlBaseParser.ShowTablesContext ctx); + /** + * Exit a parse tree produced by the {@code showTables} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitShowTables(SqlBaseParser.ShowTablesContext ctx); + /** + * Enter a parse tree produced by the {@code showColumns} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterShowColumns(SqlBaseParser.ShowColumnsContext ctx); + /** + * Exit a parse tree produced by the {@code showColumns} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitShowColumns(SqlBaseParser.ShowColumnsContext ctx); + /** + * Enter a parse tree produced by the {@code showFunctions} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterShowFunctions(SqlBaseParser.ShowFunctionsContext ctx); + /** + * Exit a parse tree produced by the {@code showFunctions} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitShowFunctions(SqlBaseParser.ShowFunctionsContext ctx); + /** + * Enter a parse tree produced by the {@code showSchemas} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterShowSchemas(SqlBaseParser.ShowSchemasContext ctx); + /** + * Exit a parse tree produced by the {@code showSchemas} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitShowSchemas(SqlBaseParser.ShowSchemasContext ctx); + /** + * Enter a parse tree produced by the {@code sysCatalogs} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterSysCatalogs(SqlBaseParser.SysCatalogsContext ctx); + /** + * Exit a parse tree produced by the {@code sysCatalogs} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitSysCatalogs(SqlBaseParser.SysCatalogsContext ctx); + /** + * Enter a parse tree produced by the {@code sysTables} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterSysTables(SqlBaseParser.SysTablesContext ctx); + /** + * Exit a parse tree produced by the {@code sysTables} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitSysTables(SqlBaseParser.SysTablesContext ctx); + /** + * Enter a parse tree produced by the {@code sysColumns} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterSysColumns(SqlBaseParser.SysColumnsContext ctx); + /** + * Exit a parse tree produced by the {@code sysColumns} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitSysColumns(SqlBaseParser.SysColumnsContext ctx); + /** + * Enter a parse tree produced by the {@code sysTypes} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterSysTypes(SqlBaseParser.SysTypesContext ctx); + /** + * Exit a parse tree produced by the {@code sysTypes} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitSysTypes(SqlBaseParser.SysTypesContext ctx); + /** + * Enter a parse tree produced by the {@code sysTableTypes} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void enterSysTableTypes(SqlBaseParser.SysTableTypesContext ctx); + /** + * Exit a parse tree produced by the {@code sysTableTypes} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + */ + void exitSysTableTypes(SqlBaseParser.SysTableTypesContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#query}. + * @param ctx the parse tree + */ + void enterQuery(SqlBaseParser.QueryContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#query}. + * @param ctx the parse tree + */ + void exitQuery(SqlBaseParser.QueryContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#queryNoWith}. + * @param ctx the parse tree + */ + void enterQueryNoWith(SqlBaseParser.QueryNoWithContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#queryNoWith}. + * @param ctx the parse tree + */ + void exitQueryNoWith(SqlBaseParser.QueryNoWithContext ctx); + /** + * Enter a parse tree produced by the {@code queryPrimaryDefault} + * labeled alternative in {@link SqlBaseParser#queryTerm}. + * @param ctx the parse tree + */ + void enterQueryPrimaryDefault(SqlBaseParser.QueryPrimaryDefaultContext ctx); + /** + * Exit a parse tree produced by the {@code queryPrimaryDefault} + * labeled alternative in {@link SqlBaseParser#queryTerm}. + * @param ctx the parse tree + */ + void exitQueryPrimaryDefault(SqlBaseParser.QueryPrimaryDefaultContext ctx); + /** + * Enter a parse tree produced by the {@code subquery} + * labeled alternative in {@link SqlBaseParser#queryTerm}. + * @param ctx the parse tree + */ + void enterSubquery(SqlBaseParser.SubqueryContext ctx); + /** + * Exit a parse tree produced by the {@code subquery} + * labeled alternative in {@link SqlBaseParser#queryTerm}. + * @param ctx the parse tree + */ + void exitSubquery(SqlBaseParser.SubqueryContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#orderBy}. + * @param ctx the parse tree + */ + void enterOrderBy(SqlBaseParser.OrderByContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#orderBy}. + * @param ctx the parse tree + */ + void exitOrderBy(SqlBaseParser.OrderByContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#querySpecification}. + * @param ctx the parse tree + */ + void enterQuerySpecification(SqlBaseParser.QuerySpecificationContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#querySpecification}. + * @param ctx the parse tree + */ + void exitQuerySpecification(SqlBaseParser.QuerySpecificationContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#fromClause}. + * @param ctx the parse tree + */ + void enterFromClause(SqlBaseParser.FromClauseContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#fromClause}. + * @param ctx the parse tree + */ + void exitFromClause(SqlBaseParser.FromClauseContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#groupBy}. + * @param ctx the parse tree + */ + void enterGroupBy(SqlBaseParser.GroupByContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#groupBy}. + * @param ctx the parse tree + */ + void exitGroupBy(SqlBaseParser.GroupByContext ctx); + /** + * Enter a parse tree produced by the {@code singleGroupingSet} + * labeled alternative in {@link SqlBaseParser#groupingElement}. + * @param ctx the parse tree + */ + void enterSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext ctx); + /** + * Exit a parse tree produced by the {@code singleGroupingSet} + * labeled alternative in {@link SqlBaseParser#groupingElement}. + * @param ctx the parse tree + */ + void exitSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#groupingExpressions}. + * @param ctx the parse tree + */ + void enterGroupingExpressions(SqlBaseParser.GroupingExpressionsContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#groupingExpressions}. + * @param ctx the parse tree + */ + void exitGroupingExpressions(SqlBaseParser.GroupingExpressionsContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#namedQuery}. + * @param ctx the parse tree + */ + void enterNamedQuery(SqlBaseParser.NamedQueryContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#namedQuery}. + * @param ctx the parse tree + */ + void exitNamedQuery(SqlBaseParser.NamedQueryContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#setQuantifier}. + * @param ctx the parse tree + */ + void enterSetQuantifier(SqlBaseParser.SetQuantifierContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#setQuantifier}. + * @param ctx the parse tree + */ + void exitSetQuantifier(SqlBaseParser.SetQuantifierContext ctx); + /** + * Enter a parse tree produced by the {@code selectExpression} + * labeled alternative in {@link SqlBaseParser#selectItem}. + * @param ctx the parse tree + */ + void enterSelectExpression(SqlBaseParser.SelectExpressionContext ctx); + /** + * Exit a parse tree produced by the {@code selectExpression} + * labeled alternative in {@link SqlBaseParser#selectItem}. + * @param ctx the parse tree + */ + void exitSelectExpression(SqlBaseParser.SelectExpressionContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#relation}. + * @param ctx the parse tree + */ + void enterRelation(SqlBaseParser.RelationContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#relation}. + * @param ctx the parse tree + */ + void exitRelation(SqlBaseParser.RelationContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#joinRelation}. + * @param ctx the parse tree + */ + void enterJoinRelation(SqlBaseParser.JoinRelationContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#joinRelation}. + * @param ctx the parse tree + */ + void exitJoinRelation(SqlBaseParser.JoinRelationContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#joinType}. + * @param ctx the parse tree + */ + void enterJoinType(SqlBaseParser.JoinTypeContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#joinType}. + * @param ctx the parse tree + */ + void exitJoinType(SqlBaseParser.JoinTypeContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#joinCriteria}. + * @param ctx the parse tree + */ + void enterJoinCriteria(SqlBaseParser.JoinCriteriaContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#joinCriteria}. + * @param ctx the parse tree + */ + void exitJoinCriteria(SqlBaseParser.JoinCriteriaContext ctx); + /** + * Enter a parse tree produced by the {@code tableName} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + */ + void enterTableName(SqlBaseParser.TableNameContext ctx); + /** + * Exit a parse tree produced by the {@code tableName} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + */ + void exitTableName(SqlBaseParser.TableNameContext ctx); + /** + * Enter a parse tree produced by the {@code aliasedQuery} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + */ + void enterAliasedQuery(SqlBaseParser.AliasedQueryContext ctx); + /** + * Exit a parse tree produced by the {@code aliasedQuery} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + */ + void exitAliasedQuery(SqlBaseParser.AliasedQueryContext ctx); + /** + * Enter a parse tree produced by the {@code aliasedRelation} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + */ + void enterAliasedRelation(SqlBaseParser.AliasedRelationContext ctx); + /** + * Exit a parse tree produced by the {@code aliasedRelation} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + */ + void exitAliasedRelation(SqlBaseParser.AliasedRelationContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#expression}. + * @param ctx the parse tree + */ + void enterExpression(SqlBaseParser.ExpressionContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#expression}. + * @param ctx the parse tree + */ + void exitExpression(SqlBaseParser.ExpressionContext ctx); + /** + * Enter a parse tree produced by the {@code logicalNot} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterLogicalNot(SqlBaseParser.LogicalNotContext ctx); + /** + * Exit a parse tree produced by the {@code logicalNot} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitLogicalNot(SqlBaseParser.LogicalNotContext ctx); + /** + * Enter a parse tree produced by the {@code stringQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterStringQuery(SqlBaseParser.StringQueryContext ctx); + /** + * Exit a parse tree produced by the {@code stringQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitStringQuery(SqlBaseParser.StringQueryContext ctx); + /** + * Enter a parse tree produced by the {@code booleanDefault} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterBooleanDefault(SqlBaseParser.BooleanDefaultContext ctx); + /** + * Exit a parse tree produced by the {@code booleanDefault} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitBooleanDefault(SqlBaseParser.BooleanDefaultContext ctx); + /** + * Enter a parse tree produced by the {@code exists} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterExists(SqlBaseParser.ExistsContext ctx); + /** + * Exit a parse tree produced by the {@code exists} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitExists(SqlBaseParser.ExistsContext ctx); + /** + * Enter a parse tree produced by the {@code multiMatchQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterMultiMatchQuery(SqlBaseParser.MultiMatchQueryContext ctx); + /** + * Exit a parse tree produced by the {@code multiMatchQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitMultiMatchQuery(SqlBaseParser.MultiMatchQueryContext ctx); + /** + * Enter a parse tree produced by the {@code matchQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterMatchQuery(SqlBaseParser.MatchQueryContext ctx); + /** + * Exit a parse tree produced by the {@code matchQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitMatchQuery(SqlBaseParser.MatchQueryContext ctx); + /** + * Enter a parse tree produced by the {@code logicalBinary} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void enterLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx); + /** + * Exit a parse tree produced by the {@code logicalBinary} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + */ + void exitLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#predicated}. + * @param ctx the parse tree + */ + void enterPredicated(SqlBaseParser.PredicatedContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#predicated}. + * @param ctx the parse tree + */ + void exitPredicated(SqlBaseParser.PredicatedContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#predicate}. + * @param ctx the parse tree + */ + void enterPredicate(SqlBaseParser.PredicateContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#predicate}. + * @param ctx the parse tree + */ + void exitPredicate(SqlBaseParser.PredicateContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#pattern}. + * @param ctx the parse tree + */ + void enterPattern(SqlBaseParser.PatternContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#pattern}. + * @param ctx the parse tree + */ + void exitPattern(SqlBaseParser.PatternContext ctx); + /** + * Enter a parse tree produced by the {@code valueExpressionDefault} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void enterValueExpressionDefault(SqlBaseParser.ValueExpressionDefaultContext ctx); + /** + * Exit a parse tree produced by the {@code valueExpressionDefault} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void exitValueExpressionDefault(SqlBaseParser.ValueExpressionDefaultContext ctx); + /** + * Enter a parse tree produced by the {@code comparison} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void enterComparison(SqlBaseParser.ComparisonContext ctx); + /** + * Exit a parse tree produced by the {@code comparison} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void exitComparison(SqlBaseParser.ComparisonContext ctx); + /** + * Enter a parse tree produced by the {@code arithmeticBinary} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void enterArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext ctx); + /** + * Exit a parse tree produced by the {@code arithmeticBinary} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void exitArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext ctx); + /** + * Enter a parse tree produced by the {@code arithmeticUnary} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void enterArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext ctx); + /** + * Exit a parse tree produced by the {@code arithmeticUnary} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + */ + void exitArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext ctx); + /** + * Enter a parse tree produced by the {@code cast} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterCast(SqlBaseParser.CastContext ctx); + /** + * Exit a parse tree produced by the {@code cast} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitCast(SqlBaseParser.CastContext ctx); + /** + * Enter a parse tree produced by the {@code extract} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterExtract(SqlBaseParser.ExtractContext ctx); + /** + * Exit a parse tree produced by the {@code extract} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitExtract(SqlBaseParser.ExtractContext ctx); + /** + * Enter a parse tree produced by the {@code constantDefault} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterConstantDefault(SqlBaseParser.ConstantDefaultContext ctx); + /** + * Exit a parse tree produced by the {@code constantDefault} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitConstantDefault(SqlBaseParser.ConstantDefaultContext ctx); + /** + * Enter a parse tree produced by the {@code star} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterStar(SqlBaseParser.StarContext ctx); + /** + * Exit a parse tree produced by the {@code star} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitStar(SqlBaseParser.StarContext ctx); + /** + * Enter a parse tree produced by the {@code functionCall} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterFunctionCall(SqlBaseParser.FunctionCallContext ctx); + /** + * Exit a parse tree produced by the {@code functionCall} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitFunctionCall(SqlBaseParser.FunctionCallContext ctx); + /** + * Enter a parse tree produced by the {@code subqueryExpression} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterSubqueryExpression(SqlBaseParser.SubqueryExpressionContext ctx); + /** + * Exit a parse tree produced by the {@code subqueryExpression} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitSubqueryExpression(SqlBaseParser.SubqueryExpressionContext ctx); + /** + * Enter a parse tree produced by the {@code columnReference} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterColumnReference(SqlBaseParser.ColumnReferenceContext ctx); + /** + * Exit a parse tree produced by the {@code columnReference} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitColumnReference(SqlBaseParser.ColumnReferenceContext ctx); + /** + * Enter a parse tree produced by the {@code dereference} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterDereference(SqlBaseParser.DereferenceContext ctx); + /** + * Exit a parse tree produced by the {@code dereference} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitDereference(SqlBaseParser.DereferenceContext ctx); + /** + * Enter a parse tree produced by the {@code parenthesizedExpression} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void enterParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx); + /** + * Exit a parse tree produced by the {@code parenthesizedExpression} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + */ + void exitParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx); + /** + * Enter a parse tree produced by the {@code nullLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void enterNullLiteral(SqlBaseParser.NullLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code nullLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void exitNullLiteral(SqlBaseParser.NullLiteralContext ctx); + /** + * Enter a parse tree produced by the {@code numericLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void enterNumericLiteral(SqlBaseParser.NumericLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code numericLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void exitNumericLiteral(SqlBaseParser.NumericLiteralContext ctx); + /** + * Enter a parse tree produced by the {@code booleanLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void enterBooleanLiteral(SqlBaseParser.BooleanLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code booleanLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void exitBooleanLiteral(SqlBaseParser.BooleanLiteralContext ctx); + /** + * Enter a parse tree produced by the {@code stringLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void enterStringLiteral(SqlBaseParser.StringLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code stringLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void exitStringLiteral(SqlBaseParser.StringLiteralContext ctx); + /** + * Enter a parse tree produced by the {@code paramLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void enterParamLiteral(SqlBaseParser.ParamLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code paramLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + */ + void exitParamLiteral(SqlBaseParser.ParamLiteralContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#comparisonOperator}. + * @param ctx the parse tree + */ + void enterComparisonOperator(SqlBaseParser.ComparisonOperatorContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#comparisonOperator}. + * @param ctx the parse tree + */ + void exitComparisonOperator(SqlBaseParser.ComparisonOperatorContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#booleanValue}. + * @param ctx the parse tree + */ + void enterBooleanValue(SqlBaseParser.BooleanValueContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#booleanValue}. + * @param ctx the parse tree + */ + void exitBooleanValue(SqlBaseParser.BooleanValueContext ctx); + /** + * Enter a parse tree produced by the {@code primitiveDataType} + * labeled alternative in {@link SqlBaseParser#dataType}. + * @param ctx the parse tree + */ + void enterPrimitiveDataType(SqlBaseParser.PrimitiveDataTypeContext ctx); + /** + * Exit a parse tree produced by the {@code primitiveDataType} + * labeled alternative in {@link SqlBaseParser#dataType}. + * @param ctx the parse tree + */ + void exitPrimitiveDataType(SqlBaseParser.PrimitiveDataTypeContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#qualifiedName}. + * @param ctx the parse tree + */ + void enterQualifiedName(SqlBaseParser.QualifiedNameContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#qualifiedName}. + * @param ctx the parse tree + */ + void exitQualifiedName(SqlBaseParser.QualifiedNameContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#identifier}. + * @param ctx the parse tree + */ + void enterIdentifier(SqlBaseParser.IdentifierContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#identifier}. + * @param ctx the parse tree + */ + void exitIdentifier(SqlBaseParser.IdentifierContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#tableIdentifier}. + * @param ctx the parse tree + */ + void enterTableIdentifier(SqlBaseParser.TableIdentifierContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#tableIdentifier}. + * @param ctx the parse tree + */ + void exitTableIdentifier(SqlBaseParser.TableIdentifierContext ctx); + /** + * Enter a parse tree produced by the {@code quotedIdentifier} + * labeled alternative in {@link SqlBaseParser#quoteIdentifier}. + * @param ctx the parse tree + */ + void enterQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext ctx); + /** + * Exit a parse tree produced by the {@code quotedIdentifier} + * labeled alternative in {@link SqlBaseParser#quoteIdentifier}. + * @param ctx the parse tree + */ + void exitQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext ctx); + /** + * Enter a parse tree produced by the {@code backQuotedIdentifier} + * labeled alternative in {@link SqlBaseParser#quoteIdentifier}. + * @param ctx the parse tree + */ + void enterBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext ctx); + /** + * Exit a parse tree produced by the {@code backQuotedIdentifier} + * labeled alternative in {@link SqlBaseParser#quoteIdentifier}. + * @param ctx the parse tree + */ + void exitBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext ctx); + /** + * Enter a parse tree produced by the {@code unquotedIdentifier} + * labeled alternative in {@link SqlBaseParser#unquoteIdentifier}. + * @param ctx the parse tree + */ + void enterUnquotedIdentifier(SqlBaseParser.UnquotedIdentifierContext ctx); + /** + * Exit a parse tree produced by the {@code unquotedIdentifier} + * labeled alternative in {@link SqlBaseParser#unquoteIdentifier}. + * @param ctx the parse tree + */ + void exitUnquotedIdentifier(SqlBaseParser.UnquotedIdentifierContext ctx); + /** + * Enter a parse tree produced by the {@code digitIdentifier} + * labeled alternative in {@link SqlBaseParser#unquoteIdentifier}. + * @param ctx the parse tree + */ + void enterDigitIdentifier(SqlBaseParser.DigitIdentifierContext ctx); + /** + * Exit a parse tree produced by the {@code digitIdentifier} + * labeled alternative in {@link SqlBaseParser#unquoteIdentifier}. + * @param ctx the parse tree + */ + void exitDigitIdentifier(SqlBaseParser.DigitIdentifierContext ctx); + /** + * Enter a parse tree produced by the {@code decimalLiteral} + * labeled alternative in {@link SqlBaseParser#number}. + * @param ctx the parse tree + */ + void enterDecimalLiteral(SqlBaseParser.DecimalLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code decimalLiteral} + * labeled alternative in {@link SqlBaseParser#number}. + * @param ctx the parse tree + */ + void exitDecimalLiteral(SqlBaseParser.DecimalLiteralContext ctx); + /** + * Enter a parse tree produced by the {@code integerLiteral} + * labeled alternative in {@link SqlBaseParser#number}. + * @param ctx the parse tree + */ + void enterIntegerLiteral(SqlBaseParser.IntegerLiteralContext ctx); + /** + * Exit a parse tree produced by the {@code integerLiteral} + * labeled alternative in {@link SqlBaseParser#number}. + * @param ctx the parse tree + */ + void exitIntegerLiteral(SqlBaseParser.IntegerLiteralContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#string}. + * @param ctx the parse tree + */ + void enterString(SqlBaseParser.StringContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#string}. + * @param ctx the parse tree + */ + void exitString(SqlBaseParser.StringContext ctx); + /** + * Enter a parse tree produced by {@link SqlBaseParser#nonReserved}. + * @param ctx the parse tree + */ + void enterNonReserved(SqlBaseParser.NonReservedContext ctx); + /** + * Exit a parse tree produced by {@link SqlBaseParser#nonReserved}. + * @param ctx the parse tree + */ + void exitNonReserved(SqlBaseParser.NonReservedContext ctx); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java new file mode 100644 index 0000000000000..e9d7ff2639954 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseParser.java @@ -0,0 +1,5245 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +// ANTLR GENERATED CODE: DO NOT EDIT +package org.elasticsearch.xpack.sql.parser; +import org.antlr.v4.runtime.atn.*; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.*; +import org.antlr.v4.runtime.misc.*; +import org.antlr.v4.runtime.tree.*; +import java.util.List; +import java.util.Iterator; +import java.util.ArrayList; + +@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) +class SqlBaseParser extends Parser { + static { RuntimeMetaData.checkVersion("4.5.3", RuntimeMetaData.VERSION); } + + protected static final DFA[] _decisionToDFA; + protected static final PredictionContextCache _sharedContextCache = + new PredictionContextCache(); + public static final int + T__0=1, T__1=2, T__2=3, T__3=4, ALL=5, ANALYZE=6, ANALYZED=7, AND=8, ANY=9, + AS=10, ASC=11, BETWEEN=12, BY=13, CAST=14, CATALOG=15, CATALOGS=16, COLUMNS=17, + DEBUG=18, DESC=19, DESCRIBE=20, DISTINCT=21, ESCAPE=22, EXECUTABLE=23, + EXISTS=24, EXPLAIN=25, EXTRACT=26, FALSE=27, FORMAT=28, FROM=29, FULL=30, + FUNCTIONS=31, GRAPHVIZ=32, GROUP=33, HAVING=34, IN=35, INNER=36, IS=37, + JOIN=38, LEFT=39, LIKE=40, LIMIT=41, MAPPED=42, MATCH=43, NATURAL=44, + NOT=45, NULL=46, ON=47, OPTIMIZED=48, OR=49, ORDER=50, OUTER=51, PARSED=52, + PHYSICAL=53, PLAN=54, RIGHT=55, RLIKE=56, QUERY=57, SCHEMAS=58, SELECT=59, + SHOW=60, SYS=61, TABLE=62, TABLES=63, TEXT=64, TRUE=65, TYPE=66, TYPES=67, + USING=68, VERIFY=69, WHERE=70, WITH=71, EQ=72, NEQ=73, LT=74, LTE=75, + GT=76, GTE=77, PLUS=78, MINUS=79, ASTERISK=80, SLASH=81, PERCENT=82, CONCAT=83, + DOT=84, PARAM=85, STRING=86, INTEGER_VALUE=87, DECIMAL_VALUE=88, IDENTIFIER=89, + DIGIT_IDENTIFIER=90, TABLE_IDENTIFIER=91, QUOTED_IDENTIFIER=92, BACKQUOTED_IDENTIFIER=93, + SIMPLE_COMMENT=94, BRACKETED_COMMENT=95, WS=96, UNRECOGNIZED=97, DELIMITER=98; + public static final int + RULE_singleStatement = 0, RULE_singleExpression = 1, RULE_statement = 2, + RULE_query = 3, RULE_queryNoWith = 4, RULE_queryTerm = 5, RULE_orderBy = 6, + RULE_querySpecification = 7, RULE_fromClause = 8, RULE_groupBy = 9, RULE_groupingElement = 10, + RULE_groupingExpressions = 11, RULE_namedQuery = 12, RULE_setQuantifier = 13, + RULE_selectItem = 14, RULE_relation = 15, RULE_joinRelation = 16, RULE_joinType = 17, + RULE_joinCriteria = 18, RULE_relationPrimary = 19, RULE_expression = 20, + RULE_booleanExpression = 21, RULE_predicated = 22, RULE_predicate = 23, + RULE_pattern = 24, RULE_valueExpression = 25, RULE_primaryExpression = 26, + RULE_constant = 27, RULE_comparisonOperator = 28, RULE_booleanValue = 29, + RULE_dataType = 30, RULE_qualifiedName = 31, RULE_identifier = 32, RULE_tableIdentifier = 33, + RULE_quoteIdentifier = 34, RULE_unquoteIdentifier = 35, RULE_number = 36, + RULE_string = 37, RULE_nonReserved = 38; + public static final String[] ruleNames = { + "singleStatement", "singleExpression", "statement", "query", "queryNoWith", + "queryTerm", "orderBy", "querySpecification", "fromClause", "groupBy", + "groupingElement", "groupingExpressions", "namedQuery", "setQuantifier", + "selectItem", "relation", "joinRelation", "joinType", "joinCriteria", + "relationPrimary", "expression", "booleanExpression", "predicated", "predicate", + "pattern", "valueExpression", "primaryExpression", "constant", "comparisonOperator", + "booleanValue", "dataType", "qualifiedName", "identifier", "tableIdentifier", + "quoteIdentifier", "unquoteIdentifier", "number", "string", "nonReserved" + }; + + private static final String[] _LITERAL_NAMES = { + null, "'('", "')'", "','", "':'", "'ALL'", "'ANALYZE'", "'ANALYZED'", + "'AND'", "'ANY'", "'AS'", "'ASC'", "'BETWEEN'", "'BY'", "'CAST'", "'CATALOG'", + "'CATALOGS'", "'COLUMNS'", "'DEBUG'", "'DESC'", "'DESCRIBE'", "'DISTINCT'", + "'ESCAPE'", "'EXECUTABLE'", "'EXISTS'", "'EXPLAIN'", "'EXTRACT'", "'FALSE'", + "'FORMAT'", "'FROM'", "'FULL'", "'FUNCTIONS'", "'GRAPHVIZ'", "'GROUP'", + "'HAVING'", "'IN'", "'INNER'", "'IS'", "'JOIN'", "'LEFT'", "'LIKE'", "'LIMIT'", + "'MAPPED'", "'MATCH'", "'NATURAL'", "'NOT'", "'NULL'", "'ON'", "'OPTIMIZED'", + "'OR'", "'ORDER'", "'OUTER'", "'PARSED'", "'PHYSICAL'", "'PLAN'", "'RIGHT'", + "'RLIKE'", "'QUERY'", "'SCHEMAS'", "'SELECT'", "'SHOW'", "'SYS'", "'TABLE'", + "'TABLES'", "'TEXT'", "'TRUE'", "'TYPE'", "'TYPES'", "'USING'", "'VERIFY'", + "'WHERE'", "'WITH'", "'='", null, "'<'", "'<='", "'>'", "'>='", "'+'", + "'-'", "'*'", "'/'", "'%'", "'||'", "'.'", "'?'" + }; + private static final String[] _SYMBOLIC_NAMES = { + null, null, null, null, null, "ALL", "ANALYZE", "ANALYZED", "AND", "ANY", + "AS", "ASC", "BETWEEN", "BY", "CAST", "CATALOG", "CATALOGS", "COLUMNS", + "DEBUG", "DESC", "DESCRIBE", "DISTINCT", "ESCAPE", "EXECUTABLE", "EXISTS", + "EXPLAIN", "EXTRACT", "FALSE", "FORMAT", "FROM", "FULL", "FUNCTIONS", + "GRAPHVIZ", "GROUP", "HAVING", "IN", "INNER", "IS", "JOIN", "LEFT", "LIKE", + "LIMIT", "MAPPED", "MATCH", "NATURAL", "NOT", "NULL", "ON", "OPTIMIZED", + "OR", "ORDER", "OUTER", "PARSED", "PHYSICAL", "PLAN", "RIGHT", "RLIKE", + "QUERY", "SCHEMAS", "SELECT", "SHOW", "SYS", "TABLE", "TABLES", "TEXT", + "TRUE", "TYPE", "TYPES", "USING", "VERIFY", "WHERE", "WITH", "EQ", "NEQ", + "LT", "LTE", "GT", "GTE", "PLUS", "MINUS", "ASTERISK", "SLASH", "PERCENT", + "CONCAT", "DOT", "PARAM", "STRING", "INTEGER_VALUE", "DECIMAL_VALUE", + "IDENTIFIER", "DIGIT_IDENTIFIER", "TABLE_IDENTIFIER", "QUOTED_IDENTIFIER", + "BACKQUOTED_IDENTIFIER", "SIMPLE_COMMENT", "BRACKETED_COMMENT", "WS", + "UNRECOGNIZED", "DELIMITER" + }; + public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); + + /** + * @deprecated Use {@link #VOCABULARY} instead. + */ + @Deprecated + public static final String[] tokenNames; + static { + tokenNames = new String[_SYMBOLIC_NAMES.length]; + for (int i = 0; i < tokenNames.length; i++) { + tokenNames[i] = VOCABULARY.getLiteralName(i); + if (tokenNames[i] == null) { + tokenNames[i] = VOCABULARY.getSymbolicName(i); + } + + if (tokenNames[i] == null) { + tokenNames[i] = ""; + } + } + } + + @Override + @Deprecated + public String[] getTokenNames() { + return tokenNames; + } + + @Override + + public Vocabulary getVocabulary() { + return VOCABULARY; + } + + @Override + public String getGrammarFileName() { return "SqlBase.g4"; } + + @Override + public String[] getRuleNames() { return ruleNames; } + + @Override + public String getSerializedATN() { return _serializedATN; } + + @Override + public ATN getATN() { return _ATN; } + + public SqlBaseParser(TokenStream input) { + super(input); + _interp = new ParserATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache); + } + public static class SingleStatementContext extends ParserRuleContext { + public StatementContext statement() { + return getRuleContext(StatementContext.class,0); + } + public TerminalNode EOF() { return getToken(SqlBaseParser.EOF, 0); } + public SingleStatementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_singleStatement; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSingleStatement(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSingleStatement(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSingleStatement(this); + else return visitor.visitChildren(this); + } + } + + public final SingleStatementContext singleStatement() throws RecognitionException { + SingleStatementContext _localctx = new SingleStatementContext(_ctx, getState()); + enterRule(_localctx, 0, RULE_singleStatement); + try { + enterOuterAlt(_localctx, 1); + { + setState(78); + statement(); + setState(79); + match(EOF); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class SingleExpressionContext extends ParserRuleContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode EOF() { return getToken(SqlBaseParser.EOF, 0); } + public SingleExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_singleExpression; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSingleExpression(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSingleExpression(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSingleExpression(this); + else return visitor.visitChildren(this); + } + } + + public final SingleExpressionContext singleExpression() throws RecognitionException { + SingleExpressionContext _localctx = new SingleExpressionContext(_ctx, getState()); + enterRule(_localctx, 2, RULE_singleExpression); + try { + enterOuterAlt(_localctx, 1); + { + setState(81); + expression(); + setState(82); + match(EOF); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class StatementContext extends ParserRuleContext { + public StatementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_statement; } + + public StatementContext() { } + public void copyFrom(StatementContext ctx) { + super.copyFrom(ctx); + } + } + public static class ExplainContext extends StatementContext { + public Token type; + public Token format; + public BooleanValueContext verify; + public TerminalNode EXPLAIN() { return getToken(SqlBaseParser.EXPLAIN, 0); } + public StatementContext statement() { + return getRuleContext(StatementContext.class,0); + } + public List PLAN() { return getTokens(SqlBaseParser.PLAN); } + public TerminalNode PLAN(int i) { + return getToken(SqlBaseParser.PLAN, i); + } + public List FORMAT() { return getTokens(SqlBaseParser.FORMAT); } + public TerminalNode FORMAT(int i) { + return getToken(SqlBaseParser.FORMAT, i); + } + public List VERIFY() { return getTokens(SqlBaseParser.VERIFY); } + public TerminalNode VERIFY(int i) { + return getToken(SqlBaseParser.VERIFY, i); + } + public List booleanValue() { + return getRuleContexts(BooleanValueContext.class); + } + public BooleanValueContext booleanValue(int i) { + return getRuleContext(BooleanValueContext.class,i); + } + public List PARSED() { return getTokens(SqlBaseParser.PARSED); } + public TerminalNode PARSED(int i) { + return getToken(SqlBaseParser.PARSED, i); + } + public List ANALYZED() { return getTokens(SqlBaseParser.ANALYZED); } + public TerminalNode ANALYZED(int i) { + return getToken(SqlBaseParser.ANALYZED, i); + } + public List OPTIMIZED() { return getTokens(SqlBaseParser.OPTIMIZED); } + public TerminalNode OPTIMIZED(int i) { + return getToken(SqlBaseParser.OPTIMIZED, i); + } + public List MAPPED() { return getTokens(SqlBaseParser.MAPPED); } + public TerminalNode MAPPED(int i) { + return getToken(SqlBaseParser.MAPPED, i); + } + public List EXECUTABLE() { return getTokens(SqlBaseParser.EXECUTABLE); } + public TerminalNode EXECUTABLE(int i) { + return getToken(SqlBaseParser.EXECUTABLE, i); + } + public List ALL() { return getTokens(SqlBaseParser.ALL); } + public TerminalNode ALL(int i) { + return getToken(SqlBaseParser.ALL, i); + } + public List TEXT() { return getTokens(SqlBaseParser.TEXT); } + public TerminalNode TEXT(int i) { + return getToken(SqlBaseParser.TEXT, i); + } + public List GRAPHVIZ() { return getTokens(SqlBaseParser.GRAPHVIZ); } + public TerminalNode GRAPHVIZ(int i) { + return getToken(SqlBaseParser.GRAPHVIZ, i); + } + public ExplainContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterExplain(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitExplain(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitExplain(this); + else return visitor.visitChildren(this); + } + } + public static class SysCatalogsContext extends StatementContext { + public TerminalNode SYS() { return getToken(SqlBaseParser.SYS, 0); } + public TerminalNode CATALOGS() { return getToken(SqlBaseParser.CATALOGS, 0); } + public SysCatalogsContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSysCatalogs(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSysCatalogs(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSysCatalogs(this); + else return visitor.visitChildren(this); + } + } + public static class SysColumnsContext extends StatementContext { + public StringContext cluster; + public PatternContext indexPattern; + public PatternContext columnPattern; + public TerminalNode SYS() { return getToken(SqlBaseParser.SYS, 0); } + public TerminalNode COLUMNS() { return getToken(SqlBaseParser.COLUMNS, 0); } + public TerminalNode CATALOG() { return getToken(SqlBaseParser.CATALOG, 0); } + public TerminalNode TABLE() { return getToken(SqlBaseParser.TABLE, 0); } + public StringContext string() { + return getRuleContext(StringContext.class,0); + } + public List pattern() { + return getRuleContexts(PatternContext.class); + } + public PatternContext pattern(int i) { + return getRuleContext(PatternContext.class,i); + } + public List LIKE() { return getTokens(SqlBaseParser.LIKE); } + public TerminalNode LIKE(int i) { + return getToken(SqlBaseParser.LIKE, i); + } + public SysColumnsContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSysColumns(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSysColumns(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSysColumns(this); + else return visitor.visitChildren(this); + } + } + public static class SysTypesContext extends StatementContext { + public TerminalNode SYS() { return getToken(SqlBaseParser.SYS, 0); } + public TerminalNode TYPES() { return getToken(SqlBaseParser.TYPES, 0); } + public SysTypesContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSysTypes(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSysTypes(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSysTypes(this); + else return visitor.visitChildren(this); + } + } + public static class DebugContext extends StatementContext { + public Token type; + public Token format; + public TerminalNode DEBUG() { return getToken(SqlBaseParser.DEBUG, 0); } + public StatementContext statement() { + return getRuleContext(StatementContext.class,0); + } + public List PLAN() { return getTokens(SqlBaseParser.PLAN); } + public TerminalNode PLAN(int i) { + return getToken(SqlBaseParser.PLAN, i); + } + public List FORMAT() { return getTokens(SqlBaseParser.FORMAT); } + public TerminalNode FORMAT(int i) { + return getToken(SqlBaseParser.FORMAT, i); + } + public List ANALYZED() { return getTokens(SqlBaseParser.ANALYZED); } + public TerminalNode ANALYZED(int i) { + return getToken(SqlBaseParser.ANALYZED, i); + } + public List OPTIMIZED() { return getTokens(SqlBaseParser.OPTIMIZED); } + public TerminalNode OPTIMIZED(int i) { + return getToken(SqlBaseParser.OPTIMIZED, i); + } + public List TEXT() { return getTokens(SqlBaseParser.TEXT); } + public TerminalNode TEXT(int i) { + return getToken(SqlBaseParser.TEXT, i); + } + public List GRAPHVIZ() { return getTokens(SqlBaseParser.GRAPHVIZ); } + public TerminalNode GRAPHVIZ(int i) { + return getToken(SqlBaseParser.GRAPHVIZ, i); + } + public DebugContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterDebug(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitDebug(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitDebug(this); + else return visitor.visitChildren(this); + } + } + public static class SysTableTypesContext extends StatementContext { + public TerminalNode SYS() { return getToken(SqlBaseParser.SYS, 0); } + public TerminalNode TABLE() { return getToken(SqlBaseParser.TABLE, 0); } + public TerminalNode TYPES() { return getToken(SqlBaseParser.TYPES, 0); } + public SysTableTypesContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSysTableTypes(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSysTableTypes(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSysTableTypes(this); + else return visitor.visitChildren(this); + } + } + public static class StatementDefaultContext extends StatementContext { + public QueryContext query() { + return getRuleContext(QueryContext.class,0); + } + public StatementDefaultContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterStatementDefault(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitStatementDefault(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitStatementDefault(this); + else return visitor.visitChildren(this); + } + } + public static class SysTablesContext extends StatementContext { + public PatternContext clusterPattern; + public PatternContext tablePattern; + public TerminalNode SYS() { return getToken(SqlBaseParser.SYS, 0); } + public TerminalNode TABLES() { return getToken(SqlBaseParser.TABLES, 0); } + public TerminalNode CATALOG() { return getToken(SqlBaseParser.CATALOG, 0); } + public TerminalNode TYPE() { return getToken(SqlBaseParser.TYPE, 0); } + public List string() { + return getRuleContexts(StringContext.class); + } + public StringContext string(int i) { + return getRuleContext(StringContext.class,i); + } + public List pattern() { + return getRuleContexts(PatternContext.class); + } + public PatternContext pattern(int i) { + return getRuleContext(PatternContext.class,i); + } + public List LIKE() { return getTokens(SqlBaseParser.LIKE); } + public TerminalNode LIKE(int i) { + return getToken(SqlBaseParser.LIKE, i); + } + public SysTablesContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSysTables(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSysTables(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSysTables(this); + else return visitor.visitChildren(this); + } + } + public static class ShowFunctionsContext extends StatementContext { + public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } + public TerminalNode FUNCTIONS() { return getToken(SqlBaseParser.FUNCTIONS, 0); } + public PatternContext pattern() { + return getRuleContext(PatternContext.class,0); + } + public TerminalNode LIKE() { return getToken(SqlBaseParser.LIKE, 0); } + public ShowFunctionsContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterShowFunctions(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitShowFunctions(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitShowFunctions(this); + else return visitor.visitChildren(this); + } + } + public static class ShowTablesContext extends StatementContext { + public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } + public TerminalNode TABLES() { return getToken(SqlBaseParser.TABLES, 0); } + public PatternContext pattern() { + return getRuleContext(PatternContext.class,0); + } + public TerminalNode LIKE() { return getToken(SqlBaseParser.LIKE, 0); } + public ShowTablesContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterShowTables(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitShowTables(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitShowTables(this); + else return visitor.visitChildren(this); + } + } + public static class ShowSchemasContext extends StatementContext { + public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } + public TerminalNode SCHEMAS() { return getToken(SqlBaseParser.SCHEMAS, 0); } + public ShowSchemasContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterShowSchemas(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitShowSchemas(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitShowSchemas(this); + else return visitor.visitChildren(this); + } + } + public static class ShowColumnsContext extends StatementContext { + public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } + public TerminalNode COLUMNS() { return getToken(SqlBaseParser.COLUMNS, 0); } + public TableIdentifierContext tableIdentifier() { + return getRuleContext(TableIdentifierContext.class,0); + } + public TerminalNode FROM() { return getToken(SqlBaseParser.FROM, 0); } + public TerminalNode IN() { return getToken(SqlBaseParser.IN, 0); } + public TerminalNode DESCRIBE() { return getToken(SqlBaseParser.DESCRIBE, 0); } + public TerminalNode DESC() { return getToken(SqlBaseParser.DESC, 0); } + public ShowColumnsContext(StatementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterShowColumns(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitShowColumns(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitShowColumns(this); + else return visitor.visitChildren(this); + } + } + + public final StatementContext statement() throws RecognitionException { + StatementContext _localctx = new StatementContext(_ctx, getState()); + enterRule(_localctx, 4, RULE_statement); + int _la; + try { + setState(193); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,21,_ctx) ) { + case 1: + _localctx = new StatementDefaultContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(84); + query(); + } + break; + case 2: + _localctx = new ExplainContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(85); + match(EXPLAIN); + setState(99); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,2,_ctx) ) { + case 1: + { + setState(86); + match(T__0); + setState(95); + _errHandler.sync(this); + _la = _input.LA(1); + while (((((_la - 28)) & ~0x3f) == 0 && ((1L << (_la - 28)) & ((1L << (FORMAT - 28)) | (1L << (PLAN - 28)) | (1L << (VERIFY - 28)))) != 0)) { + { + setState(93); + switch (_input.LA(1)) { + case PLAN: + { + setState(87); + match(PLAN); + setState(88); + ((ExplainContext)_localctx).type = _input.LT(1); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ALL) | (1L << ANALYZED) | (1L << EXECUTABLE) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED))) != 0)) ) { + ((ExplainContext)_localctx).type = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + case FORMAT: + { + setState(89); + match(FORMAT); + setState(90); + ((ExplainContext)_localctx).format = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==GRAPHVIZ || _la==TEXT) ) { + ((ExplainContext)_localctx).format = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + case VERIFY: + { + setState(91); + match(VERIFY); + setState(92); + ((ExplainContext)_localctx).verify = booleanValue(); + } + break; + default: + throw new NoViableAltException(this); + } + } + setState(97); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(98); + match(T__1); + } + break; + } + setState(101); + statement(); + } + break; + case 3: + _localctx = new DebugContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(102); + match(DEBUG); + setState(114); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { + case 1: + { + setState(103); + match(T__0); + setState(110); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==FORMAT || _la==PLAN) { + { + setState(108); + switch (_input.LA(1)) { + case PLAN: + { + setState(104); + match(PLAN); + setState(105); + ((DebugContext)_localctx).type = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==ANALYZED || _la==OPTIMIZED) ) { + ((DebugContext)_localctx).type = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + case FORMAT: + { + setState(106); + match(FORMAT); + setState(107); + ((DebugContext)_localctx).format = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==GRAPHVIZ || _la==TEXT) ) { + ((DebugContext)_localctx).format = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + default: + throw new NoViableAltException(this); + } + } + setState(112); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(113); + match(T__1); + } + break; + } + setState(116); + statement(); + } + break; + case 4: + _localctx = new ShowTablesContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(117); + match(SHOW); + setState(118); + match(TABLES); + setState(123); + _la = _input.LA(1); + if (((((_la - 40)) & ~0x3f) == 0 && ((1L << (_la - 40)) & ((1L << (LIKE - 40)) | (1L << (PARAM - 40)) | (1L << (STRING - 40)))) != 0)) { + { + setState(120); + _la = _input.LA(1); + if (_la==LIKE) { + { + setState(119); + match(LIKE); + } + } + + setState(122); + pattern(); + } + } + + } + break; + case 5: + _localctx = new ShowColumnsContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(125); + match(SHOW); + setState(126); + match(COLUMNS); + setState(127); + _la = _input.LA(1); + if ( !(_la==FROM || _la==IN) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(128); + tableIdentifier(); + } + break; + case 6: + _localctx = new ShowColumnsContext(_localctx); + enterOuterAlt(_localctx, 6); + { + setState(129); + _la = _input.LA(1); + if ( !(_la==DESC || _la==DESCRIBE) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(130); + tableIdentifier(); + } + break; + case 7: + _localctx = new ShowFunctionsContext(_localctx); + enterOuterAlt(_localctx, 7); + { + setState(131); + match(SHOW); + setState(132); + match(FUNCTIONS); + setState(137); + _la = _input.LA(1); + if (((((_la - 40)) & ~0x3f) == 0 && ((1L << (_la - 40)) & ((1L << (LIKE - 40)) | (1L << (PARAM - 40)) | (1L << (STRING - 40)))) != 0)) { + { + setState(134); + _la = _input.LA(1); + if (_la==LIKE) { + { + setState(133); + match(LIKE); + } + } + + setState(136); + pattern(); + } + } + + } + break; + case 8: + _localctx = new ShowSchemasContext(_localctx); + enterOuterAlt(_localctx, 8); + { + setState(139); + match(SHOW); + setState(140); + match(SCHEMAS); + } + break; + case 9: + _localctx = new SysCatalogsContext(_localctx); + enterOuterAlt(_localctx, 9); + { + setState(141); + match(SYS); + setState(142); + match(CATALOGS); + } + break; + case 10: + _localctx = new SysTablesContext(_localctx); + enterOuterAlt(_localctx, 10); + { + setState(143); + match(SYS); + setState(144); + match(TABLES); + setState(150); + _la = _input.LA(1); + if (_la==CATALOG) { + { + setState(145); + match(CATALOG); + setState(147); + _la = _input.LA(1); + if (_la==LIKE) { + { + setState(146); + match(LIKE); + } + } + + setState(149); + ((SysTablesContext)_localctx).clusterPattern = pattern(); + } + } + + setState(156); + _la = _input.LA(1); + if (((((_la - 40)) & ~0x3f) == 0 && ((1L << (_la - 40)) & ((1L << (LIKE - 40)) | (1L << (PARAM - 40)) | (1L << (STRING - 40)))) != 0)) { + { + setState(153); + _la = _input.LA(1); + if (_la==LIKE) { + { + setState(152); + match(LIKE); + } + } + + setState(155); + ((SysTablesContext)_localctx).tablePattern = pattern(); + } + } + + setState(167); + _la = _input.LA(1); + if (_la==TYPE) { + { + setState(158); + match(TYPE); + setState(159); + string(); + setState(164); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(160); + match(T__2); + setState(161); + string(); + } + } + setState(166); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + + } + break; + case 11: + _localctx = new SysColumnsContext(_localctx); + enterOuterAlt(_localctx, 11); + { + setState(169); + match(SYS); + setState(170); + match(COLUMNS); + setState(173); + _la = _input.LA(1); + if (_la==CATALOG) { + { + setState(171); + match(CATALOG); + setState(172); + ((SysColumnsContext)_localctx).cluster = string(); + } + } + + setState(180); + _la = _input.LA(1); + if (_la==TABLE) { + { + setState(175); + match(TABLE); + setState(177); + _la = _input.LA(1); + if (_la==LIKE) { + { + setState(176); + match(LIKE); + } + } + + setState(179); + ((SysColumnsContext)_localctx).indexPattern = pattern(); + } + } + + setState(186); + _la = _input.LA(1); + if (((((_la - 40)) & ~0x3f) == 0 && ((1L << (_la - 40)) & ((1L << (LIKE - 40)) | (1L << (PARAM - 40)) | (1L << (STRING - 40)))) != 0)) { + { + setState(183); + _la = _input.LA(1); + if (_la==LIKE) { + { + setState(182); + match(LIKE); + } + } + + setState(185); + ((SysColumnsContext)_localctx).columnPattern = pattern(); + } + } + + } + break; + case 12: + _localctx = new SysTypesContext(_localctx); + enterOuterAlt(_localctx, 12); + { + setState(188); + match(SYS); + setState(189); + match(TYPES); + } + break; + case 13: + _localctx = new SysTableTypesContext(_localctx); + enterOuterAlt(_localctx, 13); + { + setState(190); + match(SYS); + setState(191); + match(TABLE); + setState(192); + match(TYPES); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class QueryContext extends ParserRuleContext { + public QueryNoWithContext queryNoWith() { + return getRuleContext(QueryNoWithContext.class,0); + } + public TerminalNode WITH() { return getToken(SqlBaseParser.WITH, 0); } + public List namedQuery() { + return getRuleContexts(NamedQueryContext.class); + } + public NamedQueryContext namedQuery(int i) { + return getRuleContext(NamedQueryContext.class,i); + } + public QueryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_query; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitQuery(this); + else return visitor.visitChildren(this); + } + } + + public final QueryContext query() throws RecognitionException { + QueryContext _localctx = new QueryContext(_ctx, getState()); + enterRule(_localctx, 6, RULE_query); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(204); + _la = _input.LA(1); + if (_la==WITH) { + { + setState(195); + match(WITH); + setState(196); + namedQuery(); + setState(201); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(197); + match(T__2); + setState(198); + namedQuery(); + } + } + setState(203); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + + setState(206); + queryNoWith(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class QueryNoWithContext extends ParserRuleContext { + public Token limit; + public QueryTermContext queryTerm() { + return getRuleContext(QueryTermContext.class,0); + } + public TerminalNode ORDER() { return getToken(SqlBaseParser.ORDER, 0); } + public TerminalNode BY() { return getToken(SqlBaseParser.BY, 0); } + public List orderBy() { + return getRuleContexts(OrderByContext.class); + } + public OrderByContext orderBy(int i) { + return getRuleContext(OrderByContext.class,i); + } + public TerminalNode LIMIT() { return getToken(SqlBaseParser.LIMIT, 0); } + public TerminalNode INTEGER_VALUE() { return getToken(SqlBaseParser.INTEGER_VALUE, 0); } + public TerminalNode ALL() { return getToken(SqlBaseParser.ALL, 0); } + public QueryNoWithContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_queryNoWith; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterQueryNoWith(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitQueryNoWith(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitQueryNoWith(this); + else return visitor.visitChildren(this); + } + } + + public final QueryNoWithContext queryNoWith() throws RecognitionException { + QueryNoWithContext _localctx = new QueryNoWithContext(_ctx, getState()); + enterRule(_localctx, 8, RULE_queryNoWith); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(208); + queryTerm(); + setState(219); + _la = _input.LA(1); + if (_la==ORDER) { + { + setState(209); + match(ORDER); + setState(210); + match(BY); + setState(211); + orderBy(); + setState(216); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(212); + match(T__2); + setState(213); + orderBy(); + } + } + setState(218); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + + setState(223); + _la = _input.LA(1); + if (_la==LIMIT) { + { + setState(221); + match(LIMIT); + setState(222); + ((QueryNoWithContext)_localctx).limit = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==ALL || _la==INTEGER_VALUE) ) { + ((QueryNoWithContext)_localctx).limit = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + } + } + + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class QueryTermContext extends ParserRuleContext { + public QueryTermContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_queryTerm; } + + public QueryTermContext() { } + public void copyFrom(QueryTermContext ctx) { + super.copyFrom(ctx); + } + } + public static class SubqueryContext extends QueryTermContext { + public QueryNoWithContext queryNoWith() { + return getRuleContext(QueryNoWithContext.class,0); + } + public SubqueryContext(QueryTermContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSubquery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSubquery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSubquery(this); + else return visitor.visitChildren(this); + } + } + public static class QueryPrimaryDefaultContext extends QueryTermContext { + public QuerySpecificationContext querySpecification() { + return getRuleContext(QuerySpecificationContext.class,0); + } + public QueryPrimaryDefaultContext(QueryTermContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterQueryPrimaryDefault(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitQueryPrimaryDefault(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitQueryPrimaryDefault(this); + else return visitor.visitChildren(this); + } + } + + public final QueryTermContext queryTerm() throws RecognitionException { + QueryTermContext _localctx = new QueryTermContext(_ctx, getState()); + enterRule(_localctx, 10, RULE_queryTerm); + try { + setState(230); + switch (_input.LA(1)) { + case SELECT: + _localctx = new QueryPrimaryDefaultContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(225); + querySpecification(); + } + break; + case T__0: + _localctx = new SubqueryContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(226); + match(T__0); + setState(227); + queryNoWith(); + setState(228); + match(T__1); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class OrderByContext extends ParserRuleContext { + public Token ordering; + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode ASC() { return getToken(SqlBaseParser.ASC, 0); } + public TerminalNode DESC() { return getToken(SqlBaseParser.DESC, 0); } + public OrderByContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_orderBy; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterOrderBy(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitOrderBy(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitOrderBy(this); + else return visitor.visitChildren(this); + } + } + + public final OrderByContext orderBy() throws RecognitionException { + OrderByContext _localctx = new OrderByContext(_ctx, getState()); + enterRule(_localctx, 12, RULE_orderBy); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(232); + expression(); + setState(234); + _la = _input.LA(1); + if (_la==ASC || _la==DESC) { + { + setState(233); + ((OrderByContext)_localctx).ordering = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==ASC || _la==DESC) ) { + ((OrderByContext)_localctx).ordering = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + } + } + + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class QuerySpecificationContext extends ParserRuleContext { + public BooleanExpressionContext where; + public BooleanExpressionContext having; + public TerminalNode SELECT() { return getToken(SqlBaseParser.SELECT, 0); } + public List selectItem() { + return getRuleContexts(SelectItemContext.class); + } + public SelectItemContext selectItem(int i) { + return getRuleContext(SelectItemContext.class,i); + } + public SetQuantifierContext setQuantifier() { + return getRuleContext(SetQuantifierContext.class,0); + } + public FromClauseContext fromClause() { + return getRuleContext(FromClauseContext.class,0); + } + public TerminalNode WHERE() { return getToken(SqlBaseParser.WHERE, 0); } + public TerminalNode GROUP() { return getToken(SqlBaseParser.GROUP, 0); } + public TerminalNode BY() { return getToken(SqlBaseParser.BY, 0); } + public GroupByContext groupBy() { + return getRuleContext(GroupByContext.class,0); + } + public TerminalNode HAVING() { return getToken(SqlBaseParser.HAVING, 0); } + public List booleanExpression() { + return getRuleContexts(BooleanExpressionContext.class); + } + public BooleanExpressionContext booleanExpression(int i) { + return getRuleContext(BooleanExpressionContext.class,i); + } + public QuerySpecificationContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_querySpecification; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterQuerySpecification(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitQuerySpecification(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitQuerySpecification(this); + else return visitor.visitChildren(this); + } + } + + public final QuerySpecificationContext querySpecification() throws RecognitionException { + QuerySpecificationContext _localctx = new QuerySpecificationContext(_ctx, getState()); + enterRule(_localctx, 14, RULE_querySpecification); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(236); + match(SELECT); + setState(238); + _la = _input.LA(1); + if (_la==ALL || _la==DISTINCT) { + { + setState(237); + setQuantifier(); + } + } + + setState(240); + selectItem(); + setState(245); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(241); + match(T__2); + setState(242); + selectItem(); + } + } + setState(247); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(249); + _la = _input.LA(1); + if (_la==FROM) { + { + setState(248); + fromClause(); + } + } + + setState(253); + _la = _input.LA(1); + if (_la==WHERE) { + { + setState(251); + match(WHERE); + setState(252); + ((QuerySpecificationContext)_localctx).where = booleanExpression(0); + } + } + + setState(258); + _la = _input.LA(1); + if (_la==GROUP) { + { + setState(255); + match(GROUP); + setState(256); + match(BY); + setState(257); + groupBy(); + } + } + + setState(262); + _la = _input.LA(1); + if (_la==HAVING) { + { + setState(260); + match(HAVING); + setState(261); + ((QuerySpecificationContext)_localctx).having = booleanExpression(0); + } + } + + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class FromClauseContext extends ParserRuleContext { + public TerminalNode FROM() { return getToken(SqlBaseParser.FROM, 0); } + public List relation() { + return getRuleContexts(RelationContext.class); + } + public RelationContext relation(int i) { + return getRuleContext(RelationContext.class,i); + } + public FromClauseContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_fromClause; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterFromClause(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitFromClause(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitFromClause(this); + else return visitor.visitChildren(this); + } + } + + public final FromClauseContext fromClause() throws RecognitionException { + FromClauseContext _localctx = new FromClauseContext(_ctx, getState()); + enterRule(_localctx, 16, RULE_fromClause); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(264); + match(FROM); + setState(265); + relation(); + setState(270); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(266); + match(T__2); + setState(267); + relation(); + } + } + setState(272); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class GroupByContext extends ParserRuleContext { + public List groupingElement() { + return getRuleContexts(GroupingElementContext.class); + } + public GroupingElementContext groupingElement(int i) { + return getRuleContext(GroupingElementContext.class,i); + } + public SetQuantifierContext setQuantifier() { + return getRuleContext(SetQuantifierContext.class,0); + } + public GroupByContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_groupBy; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterGroupBy(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitGroupBy(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitGroupBy(this); + else return visitor.visitChildren(this); + } + } + + public final GroupByContext groupBy() throws RecognitionException { + GroupByContext _localctx = new GroupByContext(_ctx, getState()); + enterRule(_localctx, 18, RULE_groupBy); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(274); + _la = _input.LA(1); + if (_la==ALL || _la==DISTINCT) { + { + setState(273); + setQuantifier(); + } + } + + setState(276); + groupingElement(); + setState(281); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(277); + match(T__2); + setState(278); + groupingElement(); + } + } + setState(283); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class GroupingElementContext extends ParserRuleContext { + public GroupingElementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_groupingElement; } + + public GroupingElementContext() { } + public void copyFrom(GroupingElementContext ctx) { + super.copyFrom(ctx); + } + } + public static class SingleGroupingSetContext extends GroupingElementContext { + public GroupingExpressionsContext groupingExpressions() { + return getRuleContext(GroupingExpressionsContext.class,0); + } + public SingleGroupingSetContext(GroupingElementContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSingleGroupingSet(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSingleGroupingSet(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSingleGroupingSet(this); + else return visitor.visitChildren(this); + } + } + + public final GroupingElementContext groupingElement() throws RecognitionException { + GroupingElementContext _localctx = new GroupingElementContext(_ctx, getState()); + enterRule(_localctx, 20, RULE_groupingElement); + try { + _localctx = new SingleGroupingSetContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(284); + groupingExpressions(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class GroupingExpressionsContext extends ParserRuleContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public GroupingExpressionsContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_groupingExpressions; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterGroupingExpressions(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitGroupingExpressions(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitGroupingExpressions(this); + else return visitor.visitChildren(this); + } + } + + public final GroupingExpressionsContext groupingExpressions() throws RecognitionException { + GroupingExpressionsContext _localctx = new GroupingExpressionsContext(_ctx, getState()); + enterRule(_localctx, 22, RULE_groupingExpressions); + int _la; + try { + setState(299); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,40,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(286); + match(T__0); + setState(295); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << MATCH) | (1L << NOT) | (1L << NULL) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TRUE - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (PLUS - 64)) | (1L << (MINUS - 64)) | (1L << (ASTERISK - 64)) | (1L << (PARAM - 64)) | (1L << (STRING - 64)) | (1L << (INTEGER_VALUE - 64)) | (1L << (DECIMAL_VALUE - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { + { + setState(287); + expression(); + setState(292); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(288); + match(T__2); + setState(289); + expression(); + } + } + setState(294); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + + setState(297); + match(T__1); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(298); + expression(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class NamedQueryContext extends ParserRuleContext { + public IdentifierContext name; + public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); } + public QueryNoWithContext queryNoWith() { + return getRuleContext(QueryNoWithContext.class,0); + } + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public NamedQueryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_namedQuery; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterNamedQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitNamedQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitNamedQuery(this); + else return visitor.visitChildren(this); + } + } + + public final NamedQueryContext namedQuery() throws RecognitionException { + NamedQueryContext _localctx = new NamedQueryContext(_ctx, getState()); + enterRule(_localctx, 24, RULE_namedQuery); + try { + enterOuterAlt(_localctx, 1); + { + setState(301); + ((NamedQueryContext)_localctx).name = identifier(); + setState(302); + match(AS); + setState(303); + match(T__0); + setState(304); + queryNoWith(); + setState(305); + match(T__1); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class SetQuantifierContext extends ParserRuleContext { + public TerminalNode DISTINCT() { return getToken(SqlBaseParser.DISTINCT, 0); } + public TerminalNode ALL() { return getToken(SqlBaseParser.ALL, 0); } + public SetQuantifierContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_setQuantifier; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSetQuantifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSetQuantifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSetQuantifier(this); + else return visitor.visitChildren(this); + } + } + + public final SetQuantifierContext setQuantifier() throws RecognitionException { + SetQuantifierContext _localctx = new SetQuantifierContext(_ctx, getState()); + enterRule(_localctx, 26, RULE_setQuantifier); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(307); + _la = _input.LA(1); + if ( !(_la==ALL || _la==DISTINCT) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class SelectItemContext extends ParserRuleContext { + public SelectItemContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_selectItem; } + + public SelectItemContext() { } + public void copyFrom(SelectItemContext ctx) { + super.copyFrom(ctx); + } + } + public static class SelectExpressionContext extends SelectItemContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); } + public SelectExpressionContext(SelectItemContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSelectExpression(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSelectExpression(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSelectExpression(this); + else return visitor.visitChildren(this); + } + } + + public final SelectItemContext selectItem() throws RecognitionException { + SelectItemContext _localctx = new SelectItemContext(_ctx, getState()); + enterRule(_localctx, 28, RULE_selectItem); + int _la; + try { + _localctx = new SelectExpressionContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(309); + expression(); + setState(314); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { + { + setState(311); + _la = _input.LA(1); + if (_la==AS) { + { + setState(310); + match(AS); + } + } + + setState(313); + identifier(); + } + } + + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class RelationContext extends ParserRuleContext { + public RelationPrimaryContext relationPrimary() { + return getRuleContext(RelationPrimaryContext.class,0); + } + public List joinRelation() { + return getRuleContexts(JoinRelationContext.class); + } + public JoinRelationContext joinRelation(int i) { + return getRuleContext(JoinRelationContext.class,i); + } + public RelationContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_relation; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterRelation(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitRelation(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitRelation(this); + else return visitor.visitChildren(this); + } + } + + public final RelationContext relation() throws RecognitionException { + RelationContext _localctx = new RelationContext(_ctx, getState()); + enterRule(_localctx, 30, RULE_relation); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(316); + relationPrimary(); + setState(320); + _errHandler.sync(this); + _la = _input.LA(1); + while ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << FULL) | (1L << INNER) | (1L << JOIN) | (1L << LEFT) | (1L << NATURAL) | (1L << RIGHT))) != 0)) { + { + { + setState(317); + joinRelation(); + } + } + setState(322); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class JoinRelationContext extends ParserRuleContext { + public RelationPrimaryContext right; + public TerminalNode JOIN() { return getToken(SqlBaseParser.JOIN, 0); } + public RelationPrimaryContext relationPrimary() { + return getRuleContext(RelationPrimaryContext.class,0); + } + public JoinTypeContext joinType() { + return getRuleContext(JoinTypeContext.class,0); + } + public JoinCriteriaContext joinCriteria() { + return getRuleContext(JoinCriteriaContext.class,0); + } + public TerminalNode NATURAL() { return getToken(SqlBaseParser.NATURAL, 0); } + public JoinRelationContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_joinRelation; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterJoinRelation(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitJoinRelation(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitJoinRelation(this); + else return visitor.visitChildren(this); + } + } + + public final JoinRelationContext joinRelation() throws RecognitionException { + JoinRelationContext _localctx = new JoinRelationContext(_ctx, getState()); + enterRule(_localctx, 32, RULE_joinRelation); + int _la; + try { + setState(334); + switch (_input.LA(1)) { + case FULL: + case INNER: + case JOIN: + case LEFT: + case RIGHT: + enterOuterAlt(_localctx, 1); + { + { + setState(323); + joinType(); + } + setState(324); + match(JOIN); + setState(325); + ((JoinRelationContext)_localctx).right = relationPrimary(); + setState(327); + _la = _input.LA(1); + if (_la==ON || _la==USING) { + { + setState(326); + joinCriteria(); + } + } + + } + break; + case NATURAL: + enterOuterAlt(_localctx, 2); + { + setState(329); + match(NATURAL); + setState(330); + joinType(); + setState(331); + match(JOIN); + setState(332); + ((JoinRelationContext)_localctx).right = relationPrimary(); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class JoinTypeContext extends ParserRuleContext { + public TerminalNode INNER() { return getToken(SqlBaseParser.INNER, 0); } + public TerminalNode LEFT() { return getToken(SqlBaseParser.LEFT, 0); } + public TerminalNode OUTER() { return getToken(SqlBaseParser.OUTER, 0); } + public TerminalNode RIGHT() { return getToken(SqlBaseParser.RIGHT, 0); } + public TerminalNode FULL() { return getToken(SqlBaseParser.FULL, 0); } + public JoinTypeContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_joinType; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterJoinType(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitJoinType(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitJoinType(this); + else return visitor.visitChildren(this); + } + } + + public final JoinTypeContext joinType() throws RecognitionException { + JoinTypeContext _localctx = new JoinTypeContext(_ctx, getState()); + enterRule(_localctx, 34, RULE_joinType); + int _la; + try { + setState(351); + switch (_input.LA(1)) { + case INNER: + case JOIN: + enterOuterAlt(_localctx, 1); + { + setState(337); + _la = _input.LA(1); + if (_la==INNER) { + { + setState(336); + match(INNER); + } + } + + } + break; + case LEFT: + enterOuterAlt(_localctx, 2); + { + setState(339); + match(LEFT); + setState(341); + _la = _input.LA(1); + if (_la==OUTER) { + { + setState(340); + match(OUTER); + } + } + + } + break; + case RIGHT: + enterOuterAlt(_localctx, 3); + { + setState(343); + match(RIGHT); + setState(345); + _la = _input.LA(1); + if (_la==OUTER) { + { + setState(344); + match(OUTER); + } + } + + } + break; + case FULL: + enterOuterAlt(_localctx, 4); + { + setState(347); + match(FULL); + setState(349); + _la = _input.LA(1); + if (_la==OUTER) { + { + setState(348); + match(OUTER); + } + } + + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class JoinCriteriaContext extends ParserRuleContext { + public TerminalNode ON() { return getToken(SqlBaseParser.ON, 0); } + public BooleanExpressionContext booleanExpression() { + return getRuleContext(BooleanExpressionContext.class,0); + } + public TerminalNode USING() { return getToken(SqlBaseParser.USING, 0); } + public List identifier() { + return getRuleContexts(IdentifierContext.class); + } + public IdentifierContext identifier(int i) { + return getRuleContext(IdentifierContext.class,i); + } + public JoinCriteriaContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_joinCriteria; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterJoinCriteria(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitJoinCriteria(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitJoinCriteria(this); + else return visitor.visitChildren(this); + } + } + + public final JoinCriteriaContext joinCriteria() throws RecognitionException { + JoinCriteriaContext _localctx = new JoinCriteriaContext(_ctx, getState()); + enterRule(_localctx, 36, RULE_joinCriteria); + int _la; + try { + setState(367); + switch (_input.LA(1)) { + case ON: + enterOuterAlt(_localctx, 1); + { + setState(353); + match(ON); + setState(354); + booleanExpression(0); + } + break; + case USING: + enterOuterAlt(_localctx, 2); + { + setState(355); + match(USING); + setState(356); + match(T__0); + setState(357); + identifier(); + setState(362); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(358); + match(T__2); + setState(359); + identifier(); + } + } + setState(364); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(365); + match(T__1); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class RelationPrimaryContext extends ParserRuleContext { + public RelationPrimaryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_relationPrimary; } + + public RelationPrimaryContext() { } + public void copyFrom(RelationPrimaryContext ctx) { + super.copyFrom(ctx); + } + } + public static class AliasedRelationContext extends RelationPrimaryContext { + public RelationContext relation() { + return getRuleContext(RelationContext.class,0); + } + public QualifiedNameContext qualifiedName() { + return getRuleContext(QualifiedNameContext.class,0); + } + public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); } + public AliasedRelationContext(RelationPrimaryContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterAliasedRelation(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitAliasedRelation(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitAliasedRelation(this); + else return visitor.visitChildren(this); + } + } + public static class AliasedQueryContext extends RelationPrimaryContext { + public QueryNoWithContext queryNoWith() { + return getRuleContext(QueryNoWithContext.class,0); + } + public QualifiedNameContext qualifiedName() { + return getRuleContext(QualifiedNameContext.class,0); + } + public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); } + public AliasedQueryContext(RelationPrimaryContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterAliasedQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitAliasedQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitAliasedQuery(this); + else return visitor.visitChildren(this); + } + } + public static class TableNameContext extends RelationPrimaryContext { + public TableIdentifierContext tableIdentifier() { + return getRuleContext(TableIdentifierContext.class,0); + } + public QualifiedNameContext qualifiedName() { + return getRuleContext(QualifiedNameContext.class,0); + } + public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); } + public TableNameContext(RelationPrimaryContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterTableName(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitTableName(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitTableName(this); + else return visitor.visitChildren(this); + } + } + + public final RelationPrimaryContext relationPrimary() throws RecognitionException { + RelationPrimaryContext _localctx = new RelationPrimaryContext(_ctx, getState()); + enterRule(_localctx, 38, RULE_relationPrimary); + int _la; + try { + setState(394); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,59,_ctx) ) { + case 1: + _localctx = new TableNameContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(369); + tableIdentifier(); + setState(374); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { + { + setState(371); + _la = _input.LA(1); + if (_la==AS) { + { + setState(370); + match(AS); + } + } + + setState(373); + qualifiedName(); + } + } + + } + break; + case 2: + _localctx = new AliasedQueryContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(376); + match(T__0); + setState(377); + queryNoWith(); + setState(378); + match(T__1); + setState(383); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { + { + setState(380); + _la = _input.LA(1); + if (_la==AS) { + { + setState(379); + match(AS); + } + } + + setState(382); + qualifiedName(); + } + } + + } + break; + case 3: + _localctx = new AliasedRelationContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(385); + match(T__0); + setState(386); + relation(); + setState(387); + match(T__1); + setState(392); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << AS) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { + { + setState(389); + _la = _input.LA(1); + if (_la==AS) { + { + setState(388); + match(AS); + } + } + + setState(391); + qualifiedName(); + } + } + + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class ExpressionContext extends ParserRuleContext { + public BooleanExpressionContext booleanExpression() { + return getRuleContext(BooleanExpressionContext.class,0); + } + public ExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_expression; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterExpression(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitExpression(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitExpression(this); + else return visitor.visitChildren(this); + } + } + + public final ExpressionContext expression() throws RecognitionException { + ExpressionContext _localctx = new ExpressionContext(_ctx, getState()); + enterRule(_localctx, 40, RULE_expression); + try { + enterOuterAlt(_localctx, 1); + { + setState(396); + booleanExpression(0); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class BooleanExpressionContext extends ParserRuleContext { + public BooleanExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_booleanExpression; } + + public BooleanExpressionContext() { } + public void copyFrom(BooleanExpressionContext ctx) { + super.copyFrom(ctx); + } + } + public static class LogicalNotContext extends BooleanExpressionContext { + public TerminalNode NOT() { return getToken(SqlBaseParser.NOT, 0); } + public BooleanExpressionContext booleanExpression() { + return getRuleContext(BooleanExpressionContext.class,0); + } + public LogicalNotContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterLogicalNot(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitLogicalNot(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitLogicalNot(this); + else return visitor.visitChildren(this); + } + } + public static class StringQueryContext extends BooleanExpressionContext { + public StringContext queryString; + public StringContext options; + public TerminalNode QUERY() { return getToken(SqlBaseParser.QUERY, 0); } + public List string() { + return getRuleContexts(StringContext.class); + } + public StringContext string(int i) { + return getRuleContext(StringContext.class,i); + } + public StringQueryContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterStringQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitStringQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitStringQuery(this); + else return visitor.visitChildren(this); + } + } + public static class BooleanDefaultContext extends BooleanExpressionContext { + public PredicatedContext predicated() { + return getRuleContext(PredicatedContext.class,0); + } + public BooleanDefaultContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterBooleanDefault(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitBooleanDefault(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitBooleanDefault(this); + else return visitor.visitChildren(this); + } + } + public static class ExistsContext extends BooleanExpressionContext { + public TerminalNode EXISTS() { return getToken(SqlBaseParser.EXISTS, 0); } + public QueryContext query() { + return getRuleContext(QueryContext.class,0); + } + public ExistsContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterExists(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitExists(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitExists(this); + else return visitor.visitChildren(this); + } + } + public static class MultiMatchQueryContext extends BooleanExpressionContext { + public StringContext multiFields; + public StringContext queryString; + public StringContext options; + public TerminalNode MATCH() { return getToken(SqlBaseParser.MATCH, 0); } + public List string() { + return getRuleContexts(StringContext.class); + } + public StringContext string(int i) { + return getRuleContext(StringContext.class,i); + } + public MultiMatchQueryContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterMultiMatchQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitMultiMatchQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitMultiMatchQuery(this); + else return visitor.visitChildren(this); + } + } + public static class MatchQueryContext extends BooleanExpressionContext { + public QualifiedNameContext singleField; + public StringContext queryString; + public StringContext options; + public TerminalNode MATCH() { return getToken(SqlBaseParser.MATCH, 0); } + public QualifiedNameContext qualifiedName() { + return getRuleContext(QualifiedNameContext.class,0); + } + public List string() { + return getRuleContexts(StringContext.class); + } + public StringContext string(int i) { + return getRuleContext(StringContext.class,i); + } + public MatchQueryContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterMatchQuery(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitMatchQuery(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitMatchQuery(this); + else return visitor.visitChildren(this); + } + } + public static class LogicalBinaryContext extends BooleanExpressionContext { + public BooleanExpressionContext left; + public Token operator; + public BooleanExpressionContext right; + public List booleanExpression() { + return getRuleContexts(BooleanExpressionContext.class); + } + public BooleanExpressionContext booleanExpression(int i) { + return getRuleContext(BooleanExpressionContext.class,i); + } + public TerminalNode AND() { return getToken(SqlBaseParser.AND, 0); } + public TerminalNode OR() { return getToken(SqlBaseParser.OR, 0); } + public LogicalBinaryContext(BooleanExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterLogicalBinary(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitLogicalBinary(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitLogicalBinary(this); + else return visitor.visitChildren(this); + } + } + + public final BooleanExpressionContext booleanExpression() throws RecognitionException { + return booleanExpression(0); + } + + private BooleanExpressionContext booleanExpression(int _p) throws RecognitionException { + ParserRuleContext _parentctx = _ctx; + int _parentState = getState(); + BooleanExpressionContext _localctx = new BooleanExpressionContext(_ctx, _parentState); + BooleanExpressionContext _prevctx = _localctx; + int _startState = 42; + enterRecursionRule(_localctx, 42, RULE_booleanExpression, _p); + int _la; + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + setState(447); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,63,_ctx) ) { + case 1: + { + _localctx = new LogicalNotContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + + setState(399); + match(NOT); + setState(400); + booleanExpression(8); + } + break; + case 2: + { + _localctx = new ExistsContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(401); + match(EXISTS); + setState(402); + match(T__0); + setState(403); + query(); + setState(404); + match(T__1); + } + break; + case 3: + { + _localctx = new StringQueryContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(406); + match(QUERY); + setState(407); + match(T__0); + setState(408); + ((StringQueryContext)_localctx).queryString = string(); + setState(413); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(409); + match(T__2); + setState(410); + ((StringQueryContext)_localctx).options = string(); + } + } + setState(415); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(416); + match(T__1); + } + break; + case 4: + { + _localctx = new MatchQueryContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(418); + match(MATCH); + setState(419); + match(T__0); + setState(420); + ((MatchQueryContext)_localctx).singleField = qualifiedName(); + setState(421); + match(T__2); + setState(422); + ((MatchQueryContext)_localctx).queryString = string(); + setState(427); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(423); + match(T__2); + setState(424); + ((MatchQueryContext)_localctx).options = string(); + } + } + setState(429); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(430); + match(T__1); + } + break; + case 5: + { + _localctx = new MultiMatchQueryContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(432); + match(MATCH); + setState(433); + match(T__0); + setState(434); + ((MultiMatchQueryContext)_localctx).multiFields = string(); + setState(435); + match(T__2); + setState(436); + ((MultiMatchQueryContext)_localctx).queryString = string(); + setState(441); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(437); + match(T__2); + setState(438); + ((MultiMatchQueryContext)_localctx).options = string(); + } + } + setState(443); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(444); + match(T__1); + } + break; + case 6: + { + _localctx = new BooleanDefaultContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(446); + predicated(); + } + break; + } + _ctx.stop = _input.LT(-1); + setState(457); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,65,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + if ( _parseListeners!=null ) triggerExitRuleEvent(); + _prevctx = _localctx; + { + setState(455); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,64,_ctx) ) { + case 1: + { + _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); + ((LogicalBinaryContext)_localctx).left = _prevctx; + pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); + setState(449); + if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); + setState(450); + ((LogicalBinaryContext)_localctx).operator = match(AND); + setState(451); + ((LogicalBinaryContext)_localctx).right = booleanExpression(3); + } + break; + case 2: + { + _localctx = new LogicalBinaryContext(new BooleanExpressionContext(_parentctx, _parentState)); + ((LogicalBinaryContext)_localctx).left = _prevctx; + pushNewRecursionContext(_localctx, _startState, RULE_booleanExpression); + setState(452); + if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); + setState(453); + ((LogicalBinaryContext)_localctx).operator = match(OR); + setState(454); + ((LogicalBinaryContext)_localctx).right = booleanExpression(2); + } + break; + } + } + } + setState(459); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,65,_ctx); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + unrollRecursionContexts(_parentctx); + } + return _localctx; + } + + public static class PredicatedContext extends ParserRuleContext { + public ValueExpressionContext valueExpression() { + return getRuleContext(ValueExpressionContext.class,0); + } + public PredicateContext predicate() { + return getRuleContext(PredicateContext.class,0); + } + public PredicatedContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_predicated; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterPredicated(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitPredicated(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitPredicated(this); + else return visitor.visitChildren(this); + } + } + + public final PredicatedContext predicated() throws RecognitionException { + PredicatedContext _localctx = new PredicatedContext(_ctx, getState()); + enterRule(_localctx, 44, RULE_predicated); + try { + enterOuterAlt(_localctx, 1); + { + setState(460); + valueExpression(0); + setState(462); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,66,_ctx) ) { + case 1: + { + setState(461); + predicate(); + } + break; + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class PredicateContext extends ParserRuleContext { + public Token kind; + public ValueExpressionContext lower; + public ValueExpressionContext upper; + public StringContext regex; + public TerminalNode AND() { return getToken(SqlBaseParser.AND, 0); } + public TerminalNode BETWEEN() { return getToken(SqlBaseParser.BETWEEN, 0); } + public List valueExpression() { + return getRuleContexts(ValueExpressionContext.class); + } + public ValueExpressionContext valueExpression(int i) { + return getRuleContext(ValueExpressionContext.class,i); + } + public TerminalNode NOT() { return getToken(SqlBaseParser.NOT, 0); } + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode IN() { return getToken(SqlBaseParser.IN, 0); } + public QueryContext query() { + return getRuleContext(QueryContext.class,0); + } + public PatternContext pattern() { + return getRuleContext(PatternContext.class,0); + } + public TerminalNode LIKE() { return getToken(SqlBaseParser.LIKE, 0); } + public TerminalNode RLIKE() { return getToken(SqlBaseParser.RLIKE, 0); } + public StringContext string() { + return getRuleContext(StringContext.class,0); + } + public TerminalNode IS() { return getToken(SqlBaseParser.IS, 0); } + public TerminalNode NULL() { return getToken(SqlBaseParser.NULL, 0); } + public PredicateContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_predicate; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterPredicate(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitPredicate(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitPredicate(this); + else return visitor.visitChildren(this); + } + } + + public final PredicateContext predicate() throws RecognitionException { + PredicateContext _localctx = new PredicateContext(_ctx, getState()); + enterRule(_localctx, 46, RULE_predicate); + int _la; + try { + setState(510); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,74,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(465); + _la = _input.LA(1); + if (_la==NOT) { + { + setState(464); + match(NOT); + } + } + + setState(467); + ((PredicateContext)_localctx).kind = match(BETWEEN); + setState(468); + ((PredicateContext)_localctx).lower = valueExpression(0); + setState(469); + match(AND); + setState(470); + ((PredicateContext)_localctx).upper = valueExpression(0); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(473); + _la = _input.LA(1); + if (_la==NOT) { + { + setState(472); + match(NOT); + } + } + + setState(475); + ((PredicateContext)_localctx).kind = match(IN); + setState(476); + match(T__0); + setState(477); + expression(); + setState(482); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(478); + match(T__2); + setState(479); + expression(); + } + } + setState(484); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(485); + match(T__1); + } + break; + case 3: + enterOuterAlt(_localctx, 3); + { + setState(488); + _la = _input.LA(1); + if (_la==NOT) { + { + setState(487); + match(NOT); + } + } + + setState(490); + ((PredicateContext)_localctx).kind = match(IN); + setState(491); + match(T__0); + setState(492); + query(); + setState(493); + match(T__1); + } + break; + case 4: + enterOuterAlt(_localctx, 4); + { + setState(496); + _la = _input.LA(1); + if (_la==NOT) { + { + setState(495); + match(NOT); + } + } + + setState(498); + ((PredicateContext)_localctx).kind = match(LIKE); + setState(499); + pattern(); + } + break; + case 5: + enterOuterAlt(_localctx, 5); + { + setState(501); + _la = _input.LA(1); + if (_la==NOT) { + { + setState(500); + match(NOT); + } + } + + setState(503); + ((PredicateContext)_localctx).kind = match(RLIKE); + setState(504); + ((PredicateContext)_localctx).regex = string(); + } + break; + case 6: + enterOuterAlt(_localctx, 6); + { + setState(505); + match(IS); + setState(507); + _la = _input.LA(1); + if (_la==NOT) { + { + setState(506); + match(NOT); + } + } + + setState(509); + ((PredicateContext)_localctx).kind = match(NULL); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class PatternContext extends ParserRuleContext { + public StringContext value; + public StringContext escape; + public List string() { + return getRuleContexts(StringContext.class); + } + public StringContext string(int i) { + return getRuleContext(StringContext.class,i); + } + public TerminalNode ESCAPE() { return getToken(SqlBaseParser.ESCAPE, 0); } + public PatternContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_pattern; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterPattern(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitPattern(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitPattern(this); + else return visitor.visitChildren(this); + } + } + + public final PatternContext pattern() throws RecognitionException { + PatternContext _localctx = new PatternContext(_ctx, getState()); + enterRule(_localctx, 48, RULE_pattern); + try { + enterOuterAlt(_localctx, 1); + { + setState(512); + ((PatternContext)_localctx).value = string(); + setState(515); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,75,_ctx) ) { + case 1: + { + setState(513); + match(ESCAPE); + setState(514); + ((PatternContext)_localctx).escape = string(); + } + break; + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class ValueExpressionContext extends ParserRuleContext { + public ValueExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_valueExpression; } + + public ValueExpressionContext() { } + public void copyFrom(ValueExpressionContext ctx) { + super.copyFrom(ctx); + } + } + public static class ValueExpressionDefaultContext extends ValueExpressionContext { + public PrimaryExpressionContext primaryExpression() { + return getRuleContext(PrimaryExpressionContext.class,0); + } + public ValueExpressionDefaultContext(ValueExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterValueExpressionDefault(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitValueExpressionDefault(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitValueExpressionDefault(this); + else return visitor.visitChildren(this); + } + } + public static class ComparisonContext extends ValueExpressionContext { + public ValueExpressionContext left; + public ValueExpressionContext right; + public ComparisonOperatorContext comparisonOperator() { + return getRuleContext(ComparisonOperatorContext.class,0); + } + public List valueExpression() { + return getRuleContexts(ValueExpressionContext.class); + } + public ValueExpressionContext valueExpression(int i) { + return getRuleContext(ValueExpressionContext.class,i); + } + public ComparisonContext(ValueExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterComparison(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitComparison(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitComparison(this); + else return visitor.visitChildren(this); + } + } + public static class ArithmeticBinaryContext extends ValueExpressionContext { + public ValueExpressionContext left; + public Token operator; + public ValueExpressionContext right; + public List valueExpression() { + return getRuleContexts(ValueExpressionContext.class); + } + public ValueExpressionContext valueExpression(int i) { + return getRuleContext(ValueExpressionContext.class,i); + } + public TerminalNode ASTERISK() { return getToken(SqlBaseParser.ASTERISK, 0); } + public TerminalNode SLASH() { return getToken(SqlBaseParser.SLASH, 0); } + public TerminalNode PERCENT() { return getToken(SqlBaseParser.PERCENT, 0); } + public TerminalNode PLUS() { return getToken(SqlBaseParser.PLUS, 0); } + public TerminalNode MINUS() { return getToken(SqlBaseParser.MINUS, 0); } + public ArithmeticBinaryContext(ValueExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterArithmeticBinary(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitArithmeticBinary(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitArithmeticBinary(this); + else return visitor.visitChildren(this); + } + } + public static class ArithmeticUnaryContext extends ValueExpressionContext { + public Token operator; + public ValueExpressionContext valueExpression() { + return getRuleContext(ValueExpressionContext.class,0); + } + public TerminalNode MINUS() { return getToken(SqlBaseParser.MINUS, 0); } + public TerminalNode PLUS() { return getToken(SqlBaseParser.PLUS, 0); } + public ArithmeticUnaryContext(ValueExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterArithmeticUnary(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitArithmeticUnary(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitArithmeticUnary(this); + else return visitor.visitChildren(this); + } + } + + public final ValueExpressionContext valueExpression() throws RecognitionException { + return valueExpression(0); + } + + private ValueExpressionContext valueExpression(int _p) throws RecognitionException { + ParserRuleContext _parentctx = _ctx; + int _parentState = getState(); + ValueExpressionContext _localctx = new ValueExpressionContext(_ctx, _parentState); + ValueExpressionContext _prevctx = _localctx; + int _startState = 50; + enterRecursionRule(_localctx, 50, RULE_valueExpression, _p); + int _la; + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + setState(521); + switch (_input.LA(1)) { + case T__0: + case ANALYZE: + case ANALYZED: + case CAST: + case CATALOGS: + case COLUMNS: + case DEBUG: + case EXECUTABLE: + case EXPLAIN: + case EXTRACT: + case FALSE: + case FORMAT: + case FUNCTIONS: + case GRAPHVIZ: + case MAPPED: + case NULL: + case OPTIMIZED: + case PARSED: + case PHYSICAL: + case PLAN: + case RLIKE: + case QUERY: + case SCHEMAS: + case SHOW: + case SYS: + case TABLES: + case TEXT: + case TRUE: + case TYPE: + case TYPES: + case VERIFY: + case ASTERISK: + case PARAM: + case STRING: + case INTEGER_VALUE: + case DECIMAL_VALUE: + case IDENTIFIER: + case DIGIT_IDENTIFIER: + case QUOTED_IDENTIFIER: + case BACKQUOTED_IDENTIFIER: + { + _localctx = new ValueExpressionDefaultContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + + setState(518); + primaryExpression(); + } + break; + case PLUS: + case MINUS: + { + _localctx = new ArithmeticUnaryContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(519); + ((ArithmeticUnaryContext)_localctx).operator = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==PLUS || _la==MINUS) ) { + ((ArithmeticUnaryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + setState(520); + valueExpression(4); + } + break; + default: + throw new NoViableAltException(this); + } + _ctx.stop = _input.LT(-1); + setState(535); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,78,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + if ( _parseListeners!=null ) triggerExitRuleEvent(); + _prevctx = _localctx; + { + setState(533); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,77,_ctx) ) { + case 1: + { + _localctx = new ArithmeticBinaryContext(new ValueExpressionContext(_parentctx, _parentState)); + ((ArithmeticBinaryContext)_localctx).left = _prevctx; + pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); + setState(523); + if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); + setState(524); + ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); + _la = _input.LA(1); + if ( !(((((_la - 80)) & ~0x3f) == 0 && ((1L << (_la - 80)) & ((1L << (ASTERISK - 80)) | (1L << (SLASH - 80)) | (1L << (PERCENT - 80)))) != 0)) ) { + ((ArithmeticBinaryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + setState(525); + ((ArithmeticBinaryContext)_localctx).right = valueExpression(4); + } + break; + case 2: + { + _localctx = new ArithmeticBinaryContext(new ValueExpressionContext(_parentctx, _parentState)); + ((ArithmeticBinaryContext)_localctx).left = _prevctx; + pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); + setState(526); + if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); + setState(527); + ((ArithmeticBinaryContext)_localctx).operator = _input.LT(1); + _la = _input.LA(1); + if ( !(_la==PLUS || _la==MINUS) ) { + ((ArithmeticBinaryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); + } else { + consume(); + } + setState(528); + ((ArithmeticBinaryContext)_localctx).right = valueExpression(3); + } + break; + case 3: + { + _localctx = new ComparisonContext(new ValueExpressionContext(_parentctx, _parentState)); + ((ComparisonContext)_localctx).left = _prevctx; + pushNewRecursionContext(_localctx, _startState, RULE_valueExpression); + setState(529); + if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); + setState(530); + comparisonOperator(); + setState(531); + ((ComparisonContext)_localctx).right = valueExpression(2); + } + break; + } + } + } + setState(537); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,78,_ctx); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + unrollRecursionContexts(_parentctx); + } + return _localctx; + } + + public static class PrimaryExpressionContext extends ParserRuleContext { + public PrimaryExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_primaryExpression; } + + public PrimaryExpressionContext() { } + public void copyFrom(PrimaryExpressionContext ctx) { + super.copyFrom(ctx); + } + } + public static class DereferenceContext extends PrimaryExpressionContext { + public QualifiedNameContext qualifiedName() { + return getRuleContext(QualifiedNameContext.class,0); + } + public DereferenceContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterDereference(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitDereference(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitDereference(this); + else return visitor.visitChildren(this); + } + } + public static class CastContext extends PrimaryExpressionContext { + public TerminalNode CAST() { return getToken(SqlBaseParser.CAST, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode AS() { return getToken(SqlBaseParser.AS, 0); } + public DataTypeContext dataType() { + return getRuleContext(DataTypeContext.class,0); + } + public CastContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterCast(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitCast(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitCast(this); + else return visitor.visitChildren(this); + } + } + public static class ConstantDefaultContext extends PrimaryExpressionContext { + public ConstantContext constant() { + return getRuleContext(ConstantContext.class,0); + } + public ConstantDefaultContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterConstantDefault(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitConstantDefault(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitConstantDefault(this); + else return visitor.visitChildren(this); + } + } + public static class ColumnReferenceContext extends PrimaryExpressionContext { + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public ColumnReferenceContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterColumnReference(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitColumnReference(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitColumnReference(this); + else return visitor.visitChildren(this); + } + } + public static class ExtractContext extends PrimaryExpressionContext { + public IdentifierContext field; + public TerminalNode EXTRACT() { return getToken(SqlBaseParser.EXTRACT, 0); } + public TerminalNode FROM() { return getToken(SqlBaseParser.FROM, 0); } + public ValueExpressionContext valueExpression() { + return getRuleContext(ValueExpressionContext.class,0); + } + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public ExtractContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterExtract(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitExtract(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitExtract(this); + else return visitor.visitChildren(this); + } + } + public static class ParenthesizedExpressionContext extends PrimaryExpressionContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public ParenthesizedExpressionContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterParenthesizedExpression(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitParenthesizedExpression(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitParenthesizedExpression(this); + else return visitor.visitChildren(this); + } + } + public static class StarContext extends PrimaryExpressionContext { + public TerminalNode ASTERISK() { return getToken(SqlBaseParser.ASTERISK, 0); } + public QualifiedNameContext qualifiedName() { + return getRuleContext(QualifiedNameContext.class,0); + } + public TerminalNode DOT() { return getToken(SqlBaseParser.DOT, 0); } + public StarContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterStar(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitStar(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitStar(this); + else return visitor.visitChildren(this); + } + } + public static class FunctionCallContext extends PrimaryExpressionContext { + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public SetQuantifierContext setQuantifier() { + return getRuleContext(SetQuantifierContext.class,0); + } + public FunctionCallContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterFunctionCall(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitFunctionCall(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitFunctionCall(this); + else return visitor.visitChildren(this); + } + } + public static class SubqueryExpressionContext extends PrimaryExpressionContext { + public QueryContext query() { + return getRuleContext(QueryContext.class,0); + } + public SubqueryExpressionContext(PrimaryExpressionContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterSubqueryExpression(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitSubqueryExpression(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitSubqueryExpression(this); + else return visitor.visitChildren(this); + } + } + + public final PrimaryExpressionContext primaryExpression() throws RecognitionException { + PrimaryExpressionContext _localctx = new PrimaryExpressionContext(_ctx, getState()); + enterRule(_localctx, 52, RULE_primaryExpression); + int _la; + try { + setState(587); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,83,_ctx) ) { + case 1: + _localctx = new CastContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(538); + match(CAST); + setState(539); + match(T__0); + setState(540); + expression(); + setState(541); + match(AS); + setState(542); + dataType(); + setState(543); + match(T__1); + } + break; + case 2: + _localctx = new ExtractContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(545); + match(EXTRACT); + setState(546); + match(T__0); + setState(547); + ((ExtractContext)_localctx).field = identifier(); + setState(548); + match(FROM); + setState(549); + valueExpression(0); + setState(550); + match(T__1); + } + break; + case 3: + _localctx = new ConstantDefaultContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(552); + constant(); + } + break; + case 4: + _localctx = new StarContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(553); + match(ASTERISK); + } + break; + case 5: + _localctx = new StarContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(557); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { + { + setState(554); + qualifiedName(); + setState(555); + match(DOT); + } + } + + setState(559); + match(ASTERISK); + } + break; + case 6: + _localctx = new FunctionCallContext(_localctx); + enterOuterAlt(_localctx, 6); + { + setState(560); + identifier(); + setState(561); + match(T__0); + setState(573); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << T__0) | (1L << ALL) | (1L << ANALYZE) | (1L << ANALYZED) | (1L << CAST) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << DISTINCT) | (1L << EXECUTABLE) | (1L << EXISTS) | (1L << EXPLAIN) | (1L << EXTRACT) | (1L << FALSE) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << MATCH) | (1L << NOT) | (1L << NULL) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TRUE - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (PLUS - 64)) | (1L << (MINUS - 64)) | (1L << (ASTERISK - 64)) | (1L << (PARAM - 64)) | (1L << (STRING - 64)) | (1L << (INTEGER_VALUE - 64)) | (1L << (DECIMAL_VALUE - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { + { + setState(563); + _la = _input.LA(1); + if (_la==ALL || _la==DISTINCT) { + { + setState(562); + setQuantifier(); + } + } + + setState(565); + expression(); + setState(570); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==T__2) { + { + { + setState(566); + match(T__2); + setState(567); + expression(); + } + } + setState(572); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + + setState(575); + match(T__1); + } + break; + case 7: + _localctx = new SubqueryExpressionContext(_localctx); + enterOuterAlt(_localctx, 7); + { + setState(577); + match(T__0); + setState(578); + query(); + setState(579); + match(T__1); + } + break; + case 8: + _localctx = new ColumnReferenceContext(_localctx); + enterOuterAlt(_localctx, 8); + { + setState(581); + identifier(); + } + break; + case 9: + _localctx = new DereferenceContext(_localctx); + enterOuterAlt(_localctx, 9); + { + setState(582); + qualifiedName(); + } + break; + case 10: + _localctx = new ParenthesizedExpressionContext(_localctx); + enterOuterAlt(_localctx, 10); + { + setState(583); + match(T__0); + setState(584); + expression(); + setState(585); + match(T__1); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class ConstantContext extends ParserRuleContext { + public ConstantContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_constant; } + + public ConstantContext() { } + public void copyFrom(ConstantContext ctx) { + super.copyFrom(ctx); + } + } + public static class NullLiteralContext extends ConstantContext { + public TerminalNode NULL() { return getToken(SqlBaseParser.NULL, 0); } + public NullLiteralContext(ConstantContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterNullLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitNullLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitNullLiteral(this); + else return visitor.visitChildren(this); + } + } + public static class StringLiteralContext extends ConstantContext { + public List STRING() { return getTokens(SqlBaseParser.STRING); } + public TerminalNode STRING(int i) { + return getToken(SqlBaseParser.STRING, i); + } + public StringLiteralContext(ConstantContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterStringLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitStringLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitStringLiteral(this); + else return visitor.visitChildren(this); + } + } + public static class ParamLiteralContext extends ConstantContext { + public TerminalNode PARAM() { return getToken(SqlBaseParser.PARAM, 0); } + public ParamLiteralContext(ConstantContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterParamLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitParamLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitParamLiteral(this); + else return visitor.visitChildren(this); + } + } + public static class NumericLiteralContext extends ConstantContext { + public NumberContext number() { + return getRuleContext(NumberContext.class,0); + } + public NumericLiteralContext(ConstantContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterNumericLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitNumericLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitNumericLiteral(this); + else return visitor.visitChildren(this); + } + } + public static class BooleanLiteralContext extends ConstantContext { + public BooleanValueContext booleanValue() { + return getRuleContext(BooleanValueContext.class,0); + } + public BooleanLiteralContext(ConstantContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterBooleanLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitBooleanLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitBooleanLiteral(this); + else return visitor.visitChildren(this); + } + } + + public final ConstantContext constant() throws RecognitionException { + ConstantContext _localctx = new ConstantContext(_ctx, getState()); + enterRule(_localctx, 54, RULE_constant); + try { + int _alt; + setState(598); + switch (_input.LA(1)) { + case NULL: + _localctx = new NullLiteralContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(589); + match(NULL); + } + break; + case INTEGER_VALUE: + case DECIMAL_VALUE: + _localctx = new NumericLiteralContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(590); + number(); + } + break; + case FALSE: + case TRUE: + _localctx = new BooleanLiteralContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(591); + booleanValue(); + } + break; + case STRING: + _localctx = new StringLiteralContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(593); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(592); + match(STRING); + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(595); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,84,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + } + break; + case PARAM: + _localctx = new ParamLiteralContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(597); + match(PARAM); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class ComparisonOperatorContext extends ParserRuleContext { + public TerminalNode EQ() { return getToken(SqlBaseParser.EQ, 0); } + public TerminalNode NEQ() { return getToken(SqlBaseParser.NEQ, 0); } + public TerminalNode LT() { return getToken(SqlBaseParser.LT, 0); } + public TerminalNode LTE() { return getToken(SqlBaseParser.LTE, 0); } + public TerminalNode GT() { return getToken(SqlBaseParser.GT, 0); } + public TerminalNode GTE() { return getToken(SqlBaseParser.GTE, 0); } + public ComparisonOperatorContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_comparisonOperator; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterComparisonOperator(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitComparisonOperator(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitComparisonOperator(this); + else return visitor.visitChildren(this); + } + } + + public final ComparisonOperatorContext comparisonOperator() throws RecognitionException { + ComparisonOperatorContext _localctx = new ComparisonOperatorContext(_ctx, getState()); + enterRule(_localctx, 56, RULE_comparisonOperator); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(600); + _la = _input.LA(1); + if ( !(((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (EQ - 72)) | (1L << (NEQ - 72)) | (1L << (LT - 72)) | (1L << (LTE - 72)) | (1L << (GT - 72)) | (1L << (GTE - 72)))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class BooleanValueContext extends ParserRuleContext { + public TerminalNode TRUE() { return getToken(SqlBaseParser.TRUE, 0); } + public TerminalNode FALSE() { return getToken(SqlBaseParser.FALSE, 0); } + public BooleanValueContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_booleanValue; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterBooleanValue(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitBooleanValue(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitBooleanValue(this); + else return visitor.visitChildren(this); + } + } + + public final BooleanValueContext booleanValue() throws RecognitionException { + BooleanValueContext _localctx = new BooleanValueContext(_ctx, getState()); + enterRule(_localctx, 58, RULE_booleanValue); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(602); + _la = _input.LA(1); + if ( !(_la==FALSE || _la==TRUE) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class DataTypeContext extends ParserRuleContext { + public DataTypeContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_dataType; } + + public DataTypeContext() { } + public void copyFrom(DataTypeContext ctx) { + super.copyFrom(ctx); + } + } + public static class PrimitiveDataTypeContext extends DataTypeContext { + public IdentifierContext identifier() { + return getRuleContext(IdentifierContext.class,0); + } + public PrimitiveDataTypeContext(DataTypeContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterPrimitiveDataType(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitPrimitiveDataType(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitPrimitiveDataType(this); + else return visitor.visitChildren(this); + } + } + + public final DataTypeContext dataType() throws RecognitionException { + DataTypeContext _localctx = new DataTypeContext(_ctx, getState()); + enterRule(_localctx, 60, RULE_dataType); + try { + _localctx = new PrimitiveDataTypeContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(604); + identifier(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class QualifiedNameContext extends ParserRuleContext { + public List identifier() { + return getRuleContexts(IdentifierContext.class); + } + public IdentifierContext identifier(int i) { + return getRuleContext(IdentifierContext.class,i); + } + public List DOT() { return getTokens(SqlBaseParser.DOT); } + public TerminalNode DOT(int i) { + return getToken(SqlBaseParser.DOT, i); + } + public QualifiedNameContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_qualifiedName; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterQualifiedName(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitQualifiedName(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitQualifiedName(this); + else return visitor.visitChildren(this); + } + } + + public final QualifiedNameContext qualifiedName() throws RecognitionException { + QualifiedNameContext _localctx = new QualifiedNameContext(_ctx, getState()); + enterRule(_localctx, 62, RULE_qualifiedName); + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + setState(611); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,86,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(606); + identifier(); + setState(607); + match(DOT); + } + } + } + setState(613); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,86,_ctx); + } + setState(614); + identifier(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class IdentifierContext extends ParserRuleContext { + public QuoteIdentifierContext quoteIdentifier() { + return getRuleContext(QuoteIdentifierContext.class,0); + } + public UnquoteIdentifierContext unquoteIdentifier() { + return getRuleContext(UnquoteIdentifierContext.class,0); + } + public IdentifierContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_identifier; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterIdentifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitIdentifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitIdentifier(this); + else return visitor.visitChildren(this); + } + } + + public final IdentifierContext identifier() throws RecognitionException { + IdentifierContext _localctx = new IdentifierContext(_ctx, getState()); + enterRule(_localctx, 64, RULE_identifier); + try { + setState(618); + switch (_input.LA(1)) { + case QUOTED_IDENTIFIER: + case BACKQUOTED_IDENTIFIER: + enterOuterAlt(_localctx, 1); + { + setState(616); + quoteIdentifier(); + } + break; + case ANALYZE: + case ANALYZED: + case CATALOGS: + case COLUMNS: + case DEBUG: + case EXECUTABLE: + case EXPLAIN: + case FORMAT: + case FUNCTIONS: + case GRAPHVIZ: + case MAPPED: + case OPTIMIZED: + case PARSED: + case PHYSICAL: + case PLAN: + case RLIKE: + case QUERY: + case SCHEMAS: + case SHOW: + case SYS: + case TABLES: + case TEXT: + case TYPE: + case TYPES: + case VERIFY: + case IDENTIFIER: + case DIGIT_IDENTIFIER: + enterOuterAlt(_localctx, 2); + { + setState(617); + unquoteIdentifier(); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class TableIdentifierContext extends ParserRuleContext { + public IdentifierContext catalog; + public IdentifierContext name; + public TerminalNode TABLE_IDENTIFIER() { return getToken(SqlBaseParser.TABLE_IDENTIFIER, 0); } + public List identifier() { + return getRuleContexts(IdentifierContext.class); + } + public IdentifierContext identifier(int i) { + return getRuleContext(IdentifierContext.class,i); + } + public TableIdentifierContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_tableIdentifier; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterTableIdentifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitTableIdentifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitTableIdentifier(this); + else return visitor.visitChildren(this); + } + } + + public final TableIdentifierContext tableIdentifier() throws RecognitionException { + TableIdentifierContext _localctx = new TableIdentifierContext(_ctx, getState()); + enterRule(_localctx, 66, RULE_tableIdentifier); + int _la; + try { + setState(632); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,90,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(623); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << ANALYZE) | (1L << ANALYZED) | (1L << CATALOGS) | (1L << COLUMNS) | (1L << DEBUG) | (1L << EXECUTABLE) | (1L << EXPLAIN) | (1L << FORMAT) | (1L << FUNCTIONS) | (1L << GRAPHVIZ) | (1L << MAPPED) | (1L << OPTIMIZED) | (1L << PARSED) | (1L << PHYSICAL) | (1L << PLAN) | (1L << RLIKE) | (1L << QUERY) | (1L << SCHEMAS) | (1L << SHOW) | (1L << SYS) | (1L << TABLES))) != 0) || ((((_la - 64)) & ~0x3f) == 0 && ((1L << (_la - 64)) & ((1L << (TEXT - 64)) | (1L << (TYPE - 64)) | (1L << (TYPES - 64)) | (1L << (VERIFY - 64)) | (1L << (IDENTIFIER - 64)) | (1L << (DIGIT_IDENTIFIER - 64)) | (1L << (QUOTED_IDENTIFIER - 64)) | (1L << (BACKQUOTED_IDENTIFIER - 64)))) != 0)) { + { + setState(620); + ((TableIdentifierContext)_localctx).catalog = identifier(); + setState(621); + match(T__3); + } + } + + setState(625); + match(TABLE_IDENTIFIER); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(629); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,89,_ctx) ) { + case 1: + { + setState(626); + ((TableIdentifierContext)_localctx).catalog = identifier(); + setState(627); + match(T__3); + } + break; + } + setState(631); + ((TableIdentifierContext)_localctx).name = identifier(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class QuoteIdentifierContext extends ParserRuleContext { + public QuoteIdentifierContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_quoteIdentifier; } + + public QuoteIdentifierContext() { } + public void copyFrom(QuoteIdentifierContext ctx) { + super.copyFrom(ctx); + } + } + public static class BackQuotedIdentifierContext extends QuoteIdentifierContext { + public TerminalNode BACKQUOTED_IDENTIFIER() { return getToken(SqlBaseParser.BACKQUOTED_IDENTIFIER, 0); } + public BackQuotedIdentifierContext(QuoteIdentifierContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterBackQuotedIdentifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitBackQuotedIdentifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitBackQuotedIdentifier(this); + else return visitor.visitChildren(this); + } + } + public static class QuotedIdentifierContext extends QuoteIdentifierContext { + public TerminalNode QUOTED_IDENTIFIER() { return getToken(SqlBaseParser.QUOTED_IDENTIFIER, 0); } + public QuotedIdentifierContext(QuoteIdentifierContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterQuotedIdentifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitQuotedIdentifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitQuotedIdentifier(this); + else return visitor.visitChildren(this); + } + } + + public final QuoteIdentifierContext quoteIdentifier() throws RecognitionException { + QuoteIdentifierContext _localctx = new QuoteIdentifierContext(_ctx, getState()); + enterRule(_localctx, 68, RULE_quoteIdentifier); + try { + setState(636); + switch (_input.LA(1)) { + case QUOTED_IDENTIFIER: + _localctx = new QuotedIdentifierContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(634); + match(QUOTED_IDENTIFIER); + } + break; + case BACKQUOTED_IDENTIFIER: + _localctx = new BackQuotedIdentifierContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(635); + match(BACKQUOTED_IDENTIFIER); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class UnquoteIdentifierContext extends ParserRuleContext { + public UnquoteIdentifierContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_unquoteIdentifier; } + + public UnquoteIdentifierContext() { } + public void copyFrom(UnquoteIdentifierContext ctx) { + super.copyFrom(ctx); + } + } + public static class DigitIdentifierContext extends UnquoteIdentifierContext { + public TerminalNode DIGIT_IDENTIFIER() { return getToken(SqlBaseParser.DIGIT_IDENTIFIER, 0); } + public DigitIdentifierContext(UnquoteIdentifierContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterDigitIdentifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitDigitIdentifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitDigitIdentifier(this); + else return visitor.visitChildren(this); + } + } + public static class UnquotedIdentifierContext extends UnquoteIdentifierContext { + public TerminalNode IDENTIFIER() { return getToken(SqlBaseParser.IDENTIFIER, 0); } + public NonReservedContext nonReserved() { + return getRuleContext(NonReservedContext.class,0); + } + public UnquotedIdentifierContext(UnquoteIdentifierContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterUnquotedIdentifier(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitUnquotedIdentifier(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitUnquotedIdentifier(this); + else return visitor.visitChildren(this); + } + } + + public final UnquoteIdentifierContext unquoteIdentifier() throws RecognitionException { + UnquoteIdentifierContext _localctx = new UnquoteIdentifierContext(_ctx, getState()); + enterRule(_localctx, 70, RULE_unquoteIdentifier); + try { + setState(641); + switch (_input.LA(1)) { + case IDENTIFIER: + _localctx = new UnquotedIdentifierContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(638); + match(IDENTIFIER); + } + break; + case ANALYZE: + case ANALYZED: + case CATALOGS: + case COLUMNS: + case DEBUG: + case EXECUTABLE: + case EXPLAIN: + case FORMAT: + case FUNCTIONS: + case GRAPHVIZ: + case MAPPED: + case OPTIMIZED: + case PARSED: + case PHYSICAL: + case PLAN: + case RLIKE: + case QUERY: + case SCHEMAS: + case SHOW: + case SYS: + case TABLES: + case TEXT: + case TYPE: + case TYPES: + case VERIFY: + _localctx = new UnquotedIdentifierContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(639); + nonReserved(); + } + break; + case DIGIT_IDENTIFIER: + _localctx = new DigitIdentifierContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(640); + match(DIGIT_IDENTIFIER); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class NumberContext extends ParserRuleContext { + public NumberContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_number; } + + public NumberContext() { } + public void copyFrom(NumberContext ctx) { + super.copyFrom(ctx); + } + } + public static class DecimalLiteralContext extends NumberContext { + public TerminalNode DECIMAL_VALUE() { return getToken(SqlBaseParser.DECIMAL_VALUE, 0); } + public DecimalLiteralContext(NumberContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterDecimalLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitDecimalLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitDecimalLiteral(this); + else return visitor.visitChildren(this); + } + } + public static class IntegerLiteralContext extends NumberContext { + public TerminalNode INTEGER_VALUE() { return getToken(SqlBaseParser.INTEGER_VALUE, 0); } + public IntegerLiteralContext(NumberContext ctx) { copyFrom(ctx); } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterIntegerLiteral(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitIntegerLiteral(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitIntegerLiteral(this); + else return visitor.visitChildren(this); + } + } + + public final NumberContext number() throws RecognitionException { + NumberContext _localctx = new NumberContext(_ctx, getState()); + enterRule(_localctx, 72, RULE_number); + try { + setState(645); + switch (_input.LA(1)) { + case DECIMAL_VALUE: + _localctx = new DecimalLiteralContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(643); + match(DECIMAL_VALUE); + } + break; + case INTEGER_VALUE: + _localctx = new IntegerLiteralContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(644); + match(INTEGER_VALUE); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class StringContext extends ParserRuleContext { + public TerminalNode PARAM() { return getToken(SqlBaseParser.PARAM, 0); } + public TerminalNode STRING() { return getToken(SqlBaseParser.STRING, 0); } + public StringContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_string; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterString(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitString(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitString(this); + else return visitor.visitChildren(this); + } + } + + public final StringContext string() throws RecognitionException { + StringContext _localctx = new StringContext(_ctx, getState()); + enterRule(_localctx, 74, RULE_string); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(647); + _la = _input.LA(1); + if ( !(_la==PARAM || _la==STRING) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public static class NonReservedContext extends ParserRuleContext { + public TerminalNode ANALYZE() { return getToken(SqlBaseParser.ANALYZE, 0); } + public TerminalNode ANALYZED() { return getToken(SqlBaseParser.ANALYZED, 0); } + public TerminalNode CATALOGS() { return getToken(SqlBaseParser.CATALOGS, 0); } + public TerminalNode COLUMNS() { return getToken(SqlBaseParser.COLUMNS, 0); } + public TerminalNode DEBUG() { return getToken(SqlBaseParser.DEBUG, 0); } + public TerminalNode EXECUTABLE() { return getToken(SqlBaseParser.EXECUTABLE, 0); } + public TerminalNode EXPLAIN() { return getToken(SqlBaseParser.EXPLAIN, 0); } + public TerminalNode FORMAT() { return getToken(SqlBaseParser.FORMAT, 0); } + public TerminalNode FUNCTIONS() { return getToken(SqlBaseParser.FUNCTIONS, 0); } + public TerminalNode GRAPHVIZ() { return getToken(SqlBaseParser.GRAPHVIZ, 0); } + public TerminalNode MAPPED() { return getToken(SqlBaseParser.MAPPED, 0); } + public TerminalNode OPTIMIZED() { return getToken(SqlBaseParser.OPTIMIZED, 0); } + public TerminalNode PARSED() { return getToken(SqlBaseParser.PARSED, 0); } + public TerminalNode PHYSICAL() { return getToken(SqlBaseParser.PHYSICAL, 0); } + public TerminalNode PLAN() { return getToken(SqlBaseParser.PLAN, 0); } + public TerminalNode QUERY() { return getToken(SqlBaseParser.QUERY, 0); } + public TerminalNode RLIKE() { return getToken(SqlBaseParser.RLIKE, 0); } + public TerminalNode SCHEMAS() { return getToken(SqlBaseParser.SCHEMAS, 0); } + public TerminalNode SHOW() { return getToken(SqlBaseParser.SHOW, 0); } + public TerminalNode SYS() { return getToken(SqlBaseParser.SYS, 0); } + public TerminalNode TABLES() { return getToken(SqlBaseParser.TABLES, 0); } + public TerminalNode TEXT() { return getToken(SqlBaseParser.TEXT, 0); } + public TerminalNode TYPE() { return getToken(SqlBaseParser.TYPE, 0); } + public TerminalNode TYPES() { return getToken(SqlBaseParser.TYPES, 0); } + public TerminalNode VERIFY() { return getToken(SqlBaseParser.VERIFY, 0); } + public NonReservedContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_nonReserved; } + @Override + public void enterRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).enterNonReserved(this); + } + @Override + public void exitRule(ParseTreeListener listener) { + if ( listener instanceof SqlBaseListener ) ((SqlBaseListener)listener).exitNonReserved(this); + } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof SqlBaseVisitor ) return ((SqlBaseVisitor)visitor).visitNonReserved(this); + else return visitor.visitChildren(this); + } + } + + public final NonReservedContext nonReserved() throws RecognitionException { + NonReservedContext _localctx = new NonReservedContext(_ctx, getState()); + enterRule(_localctx, 76, RULE_nonReserved); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(649); + _la = _input.LA(1); + if ( !(((((_la - 6)) & ~0x3f) == 0 && ((1L << (_la - 6)) & ((1L << (ANALYZE - 6)) | (1L << (ANALYZED - 6)) | (1L << (CATALOGS - 6)) | (1L << (COLUMNS - 6)) | (1L << (DEBUG - 6)) | (1L << (EXECUTABLE - 6)) | (1L << (EXPLAIN - 6)) | (1L << (FORMAT - 6)) | (1L << (FUNCTIONS - 6)) | (1L << (GRAPHVIZ - 6)) | (1L << (MAPPED - 6)) | (1L << (OPTIMIZED - 6)) | (1L << (PARSED - 6)) | (1L << (PHYSICAL - 6)) | (1L << (PLAN - 6)) | (1L << (RLIKE - 6)) | (1L << (QUERY - 6)) | (1L << (SCHEMAS - 6)) | (1L << (SHOW - 6)) | (1L << (SYS - 6)) | (1L << (TABLES - 6)) | (1L << (TEXT - 6)) | (1L << (TYPE - 6)) | (1L << (TYPES - 6)) | (1L << (VERIFY - 6)))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + + public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { + switch (ruleIndex) { + case 21: + return booleanExpression_sempred((BooleanExpressionContext)_localctx, predIndex); + case 25: + return valueExpression_sempred((ValueExpressionContext)_localctx, predIndex); + } + return true; + } + private boolean booleanExpression_sempred(BooleanExpressionContext _localctx, int predIndex) { + switch (predIndex) { + case 0: + return precpred(_ctx, 2); + case 1: + return precpred(_ctx, 1); + } + return true; + } + private boolean valueExpression_sempred(ValueExpressionContext _localctx, int predIndex) { + switch (predIndex) { + case 2: + return precpred(_ctx, 3); + case 3: + return precpred(_ctx, 2); + case 4: + return precpred(_ctx, 1); + } + return true; + } + + public static final String _serializedATN = + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3d\u028e\4\2\t\2\4"+ + "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ + "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ + "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ + "\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+ + "\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\3\2\3\2\3\2\3\3\3\3"+ + "\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4`\n\4\f\4\16\4c\13\4\3\4\5"+ + "\4f\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4o\n\4\f\4\16\4r\13\4\3\4\5\4u\n"+ + "\4\3\4\3\4\3\4\3\4\5\4{\n\4\3\4\5\4~\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3"+ + "\4\3\4\5\4\u0089\n\4\3\4\5\4\u008c\n\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4\3\4"+ + "\5\4\u0096\n\4\3\4\5\4\u0099\n\4\3\4\5\4\u009c\n\4\3\4\5\4\u009f\n\4\3"+ + "\4\3\4\3\4\3\4\7\4\u00a5\n\4\f\4\16\4\u00a8\13\4\5\4\u00aa\n\4\3\4\3\4"+ + "\3\4\3\4\5\4\u00b0\n\4\3\4\3\4\5\4\u00b4\n\4\3\4\5\4\u00b7\n\4\3\4\5\4"+ + "\u00ba\n\4\3\4\5\4\u00bd\n\4\3\4\3\4\3\4\3\4\3\4\5\4\u00c4\n\4\3\5\3\5"+ + "\3\5\3\5\7\5\u00ca\n\5\f\5\16\5\u00cd\13\5\5\5\u00cf\n\5\3\5\3\5\3\6\3"+ + "\6\3\6\3\6\3\6\3\6\7\6\u00d9\n\6\f\6\16\6\u00dc\13\6\5\6\u00de\n\6\3\6"+ + "\3\6\5\6\u00e2\n\6\3\7\3\7\3\7\3\7\3\7\5\7\u00e9\n\7\3\b\3\b\5\b\u00ed"+ + "\n\b\3\t\3\t\5\t\u00f1\n\t\3\t\3\t\3\t\7\t\u00f6\n\t\f\t\16\t\u00f9\13"+ + "\t\3\t\5\t\u00fc\n\t\3\t\3\t\5\t\u0100\n\t\3\t\3\t\3\t\5\t\u0105\n\t\3"+ + "\t\3\t\5\t\u0109\n\t\3\n\3\n\3\n\3\n\7\n\u010f\n\n\f\n\16\n\u0112\13\n"+ + "\3\13\5\13\u0115\n\13\3\13\3\13\3\13\7\13\u011a\n\13\f\13\16\13\u011d"+ + "\13\13\3\f\3\f\3\r\3\r\3\r\3\r\7\r\u0125\n\r\f\r\16\r\u0128\13\r\5\r\u012a"+ + "\n\r\3\r\3\r\5\r\u012e\n\r\3\16\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\20"+ + "\3\20\5\20\u013a\n\20\3\20\5\20\u013d\n\20\3\21\3\21\7\21\u0141\n\21\f"+ + "\21\16\21\u0144\13\21\3\22\3\22\3\22\3\22\5\22\u014a\n\22\3\22\3\22\3"+ + "\22\3\22\3\22\5\22\u0151\n\22\3\23\5\23\u0154\n\23\3\23\3\23\5\23\u0158"+ + "\n\23\3\23\3\23\5\23\u015c\n\23\3\23\3\23\5\23\u0160\n\23\5\23\u0162\n"+ + "\23\3\24\3\24\3\24\3\24\3\24\3\24\3\24\7\24\u016b\n\24\f\24\16\24\u016e"+ + "\13\24\3\24\3\24\5\24\u0172\n\24\3\25\3\25\5\25\u0176\n\25\3\25\5\25\u0179"+ + "\n\25\3\25\3\25\3\25\3\25\5\25\u017f\n\25\3\25\5\25\u0182\n\25\3\25\3"+ + "\25\3\25\3\25\5\25\u0188\n\25\3\25\5\25\u018b\n\25\5\25\u018d\n\25\3\26"+ + "\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27"+ + "\7\27\u019e\n\27\f\27\16\27\u01a1\13\27\3\27\3\27\3\27\3\27\3\27\3\27"+ + "\3\27\3\27\3\27\7\27\u01ac\n\27\f\27\16\27\u01af\13\27\3\27\3\27\3\27"+ + "\3\27\3\27\3\27\3\27\3\27\3\27\7\27\u01ba\n\27\f\27\16\27\u01bd\13\27"+ + "\3\27\3\27\3\27\5\27\u01c2\n\27\3\27\3\27\3\27\3\27\3\27\3\27\7\27\u01ca"+ + "\n\27\f\27\16\27\u01cd\13\27\3\30\3\30\5\30\u01d1\n\30\3\31\5\31\u01d4"+ + "\n\31\3\31\3\31\3\31\3\31\3\31\3\31\5\31\u01dc\n\31\3\31\3\31\3\31\3\31"+ + "\3\31\7\31\u01e3\n\31\f\31\16\31\u01e6\13\31\3\31\3\31\3\31\5\31\u01eb"+ + "\n\31\3\31\3\31\3\31\3\31\3\31\3\31\5\31\u01f3\n\31\3\31\3\31\3\31\5\31"+ + "\u01f8\n\31\3\31\3\31\3\31\3\31\5\31\u01fe\n\31\3\31\5\31\u0201\n\31\3"+ + "\32\3\32\3\32\5\32\u0206\n\32\3\33\3\33\3\33\3\33\5\33\u020c\n\33\3\33"+ + "\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\3\33\7\33\u0218\n\33\f\33\16"+ + "\33\u021b\13\33\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34"+ + "\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\5\34\u0230\n\34\3\34\3\34\3\34"+ + "\3\34\5\34\u0236\n\34\3\34\3\34\3\34\7\34\u023b\n\34\f\34\16\34\u023e"+ + "\13\34\5\34\u0240\n\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3"+ + "\34\3\34\3\34\5\34\u024e\n\34\3\35\3\35\3\35\3\35\6\35\u0254\n\35\r\35"+ + "\16\35\u0255\3\35\5\35\u0259\n\35\3\36\3\36\3\37\3\37\3 \3 \3!\3!\3!\7"+ + "!\u0264\n!\f!\16!\u0267\13!\3!\3!\3\"\3\"\5\"\u026d\n\"\3#\3#\3#\5#\u0272"+ + "\n#\3#\3#\3#\3#\5#\u0278\n#\3#\5#\u027b\n#\3$\3$\5$\u027f\n$\3%\3%\3%"+ + "\5%\u0284\n%\3&\3&\5&\u0288\n&\3\'\3\'\3(\3(\3(\2\4,\64)\2\4\6\b\n\f\16"+ + "\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJLN\2\20\b\2\7\7"+ + "\t\t\31\31,,\62\62\66\66\4\2\"\"BB\4\2\t\t\62\62\4\2\37\37%%\3\2\25\26"+ + "\4\2\7\7YY\4\2\r\r\25\25\4\2\7\7\27\27\3\2PQ\3\2RT\3\2JO\4\2\35\35CC\3"+ + "\2WX\20\2\b\t\22\24\31\31\33\33\36\36!\",,\62\62\668:<>?ABDEGG\u02e8\2"+ + "P\3\2\2\2\4S\3\2\2\2\6\u00c3\3\2\2\2\b\u00ce\3\2\2\2\n\u00d2\3\2\2\2\f"+ + "\u00e8\3\2\2\2\16\u00ea\3\2\2\2\20\u00ee\3\2\2\2\22\u010a\3\2\2\2\24\u0114"+ + "\3\2\2\2\26\u011e\3\2\2\2\30\u012d\3\2\2\2\32\u012f\3\2\2\2\34\u0135\3"+ + "\2\2\2\36\u0137\3\2\2\2 \u013e\3\2\2\2\"\u0150\3\2\2\2$\u0161\3\2\2\2"+ + "&\u0171\3\2\2\2(\u018c\3\2\2\2*\u018e\3\2\2\2,\u01c1\3\2\2\2.\u01ce\3"+ + "\2\2\2\60\u0200\3\2\2\2\62\u0202\3\2\2\2\64\u020b\3\2\2\2\66\u024d\3\2"+ + "\2\28\u0258\3\2\2\2:\u025a\3\2\2\2<\u025c\3\2\2\2>\u025e\3\2\2\2@\u0265"+ + "\3\2\2\2B\u026c\3\2\2\2D\u027a\3\2\2\2F\u027e\3\2\2\2H\u0283\3\2\2\2J"+ + "\u0287\3\2\2\2L\u0289\3\2\2\2N\u028b\3\2\2\2PQ\5\6\4\2QR\7\2\2\3R\3\3"+ + "\2\2\2ST\5*\26\2TU\7\2\2\3U\5\3\2\2\2V\u00c4\5\b\5\2We\7\33\2\2Xa\7\3"+ + "\2\2YZ\78\2\2Z`\t\2\2\2[\\\7\36\2\2\\`\t\3\2\2]^\7G\2\2^`\5<\37\2_Y\3"+ + "\2\2\2_[\3\2\2\2_]\3\2\2\2`c\3\2\2\2a_\3\2\2\2ab\3\2\2\2bd\3\2\2\2ca\3"+ + "\2\2\2df\7\4\2\2eX\3\2\2\2ef\3\2\2\2fg\3\2\2\2g\u00c4\5\6\4\2ht\7\24\2"+ + "\2ip\7\3\2\2jk\78\2\2ko\t\4\2\2lm\7\36\2\2mo\t\3\2\2nj\3\2\2\2nl\3\2\2"+ + "\2or\3\2\2\2pn\3\2\2\2pq\3\2\2\2qs\3\2\2\2rp\3\2\2\2su\7\4\2\2ti\3\2\2"+ + "\2tu\3\2\2\2uv\3\2\2\2v\u00c4\5\6\4\2wx\7>\2\2x}\7A\2\2y{\7*\2\2zy\3\2"+ + "\2\2z{\3\2\2\2{|\3\2\2\2|~\5\62\32\2}z\3\2\2\2}~\3\2\2\2~\u00c4\3\2\2"+ + "\2\177\u0080\7>\2\2\u0080\u0081\7\23\2\2\u0081\u0082\t\5\2\2\u0082\u00c4"+ + "\5D#\2\u0083\u0084\t\6\2\2\u0084\u00c4\5D#\2\u0085\u0086\7>\2\2\u0086"+ + "\u008b\7!\2\2\u0087\u0089\7*\2\2\u0088\u0087\3\2\2\2\u0088\u0089\3\2\2"+ + "\2\u0089\u008a\3\2\2\2\u008a\u008c\5\62\32\2\u008b\u0088\3\2\2\2\u008b"+ + "\u008c\3\2\2\2\u008c\u00c4\3\2\2\2\u008d\u008e\7>\2\2\u008e\u00c4\7<\2"+ + "\2\u008f\u0090\7?\2\2\u0090\u00c4\7\22\2\2\u0091\u0092\7?\2\2\u0092\u0098"+ + "\7A\2\2\u0093\u0095\7\21\2\2\u0094\u0096\7*\2\2\u0095\u0094\3\2\2\2\u0095"+ + "\u0096\3\2\2\2\u0096\u0097\3\2\2\2\u0097\u0099\5\62\32\2\u0098\u0093\3"+ + "\2\2\2\u0098\u0099\3\2\2\2\u0099\u009e\3\2\2\2\u009a\u009c\7*\2\2\u009b"+ + "\u009a\3\2\2\2\u009b\u009c\3\2\2\2\u009c\u009d\3\2\2\2\u009d\u009f\5\62"+ + "\32\2\u009e\u009b\3\2\2\2\u009e\u009f\3\2\2\2\u009f\u00a9\3\2\2\2\u00a0"+ + "\u00a1\7D\2\2\u00a1\u00a6\5L\'\2\u00a2\u00a3\7\5\2\2\u00a3\u00a5\5L\'"+ + "\2\u00a4\u00a2\3\2\2\2\u00a5\u00a8\3\2\2\2\u00a6\u00a4\3\2\2\2\u00a6\u00a7"+ + "\3\2\2\2\u00a7\u00aa\3\2\2\2\u00a8\u00a6\3\2\2\2\u00a9\u00a0\3\2\2\2\u00a9"+ + "\u00aa\3\2\2\2\u00aa\u00c4\3\2\2\2\u00ab\u00ac\7?\2\2\u00ac\u00af\7\23"+ + "\2\2\u00ad\u00ae\7\21\2\2\u00ae\u00b0\5L\'\2\u00af\u00ad\3\2\2\2\u00af"+ + "\u00b0\3\2\2\2\u00b0\u00b6\3\2\2\2\u00b1\u00b3\7@\2\2\u00b2\u00b4\7*\2"+ + "\2\u00b3\u00b2\3\2\2\2\u00b3\u00b4\3\2\2\2\u00b4\u00b5\3\2\2\2\u00b5\u00b7"+ + "\5\62\32\2\u00b6\u00b1\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7\u00bc\3\2\2\2"+ + "\u00b8\u00ba\7*\2\2\u00b9\u00b8\3\2\2\2\u00b9\u00ba\3\2\2\2\u00ba\u00bb"+ + "\3\2\2\2\u00bb\u00bd\5\62\32\2\u00bc\u00b9\3\2\2\2\u00bc\u00bd\3\2\2\2"+ + "\u00bd\u00c4\3\2\2\2\u00be\u00bf\7?\2\2\u00bf\u00c4\7E\2\2\u00c0\u00c1"+ + "\7?\2\2\u00c1\u00c2\7@\2\2\u00c2\u00c4\7E\2\2\u00c3V\3\2\2\2\u00c3W\3"+ + "\2\2\2\u00c3h\3\2\2\2\u00c3w\3\2\2\2\u00c3\177\3\2\2\2\u00c3\u0083\3\2"+ + "\2\2\u00c3\u0085\3\2\2\2\u00c3\u008d\3\2\2\2\u00c3\u008f\3\2\2\2\u00c3"+ + "\u0091\3\2\2\2\u00c3\u00ab\3\2\2\2\u00c3\u00be\3\2\2\2\u00c3\u00c0\3\2"+ + "\2\2\u00c4\7\3\2\2\2\u00c5\u00c6\7I\2\2\u00c6\u00cb\5\32\16\2\u00c7\u00c8"+ + "\7\5\2\2\u00c8\u00ca\5\32\16\2\u00c9\u00c7\3\2\2\2\u00ca\u00cd\3\2\2\2"+ + "\u00cb\u00c9\3\2\2\2\u00cb\u00cc\3\2\2\2\u00cc\u00cf\3\2\2\2\u00cd\u00cb"+ + "\3\2\2\2\u00ce\u00c5\3\2\2\2\u00ce\u00cf\3\2\2\2\u00cf\u00d0\3\2\2\2\u00d0"+ + "\u00d1\5\n\6\2\u00d1\t\3\2\2\2\u00d2\u00dd\5\f\7\2\u00d3\u00d4\7\64\2"+ + "\2\u00d4\u00d5\7\17\2\2\u00d5\u00da\5\16\b\2\u00d6\u00d7\7\5\2\2\u00d7"+ + "\u00d9\5\16\b\2\u00d8\u00d6\3\2\2\2\u00d9\u00dc\3\2\2\2\u00da\u00d8\3"+ + "\2\2\2\u00da\u00db\3\2\2\2\u00db\u00de\3\2\2\2\u00dc\u00da\3\2\2\2\u00dd"+ + "\u00d3\3\2\2\2\u00dd\u00de\3\2\2\2\u00de\u00e1\3\2\2\2\u00df\u00e0\7+"+ + "\2\2\u00e0\u00e2\t\7\2\2\u00e1\u00df\3\2\2\2\u00e1\u00e2\3\2\2\2\u00e2"+ + "\13\3\2\2\2\u00e3\u00e9\5\20\t\2\u00e4\u00e5\7\3\2\2\u00e5\u00e6\5\n\6"+ + "\2\u00e6\u00e7\7\4\2\2\u00e7\u00e9\3\2\2\2\u00e8\u00e3\3\2\2\2\u00e8\u00e4"+ + "\3\2\2\2\u00e9\r\3\2\2\2\u00ea\u00ec\5*\26\2\u00eb\u00ed\t\b\2\2\u00ec"+ + "\u00eb\3\2\2\2\u00ec\u00ed\3\2\2\2\u00ed\17\3\2\2\2\u00ee\u00f0\7=\2\2"+ + "\u00ef\u00f1\5\34\17\2\u00f0\u00ef\3\2\2\2\u00f0\u00f1\3\2\2\2\u00f1\u00f2"+ + "\3\2\2\2\u00f2\u00f7\5\36\20\2\u00f3\u00f4\7\5\2\2\u00f4\u00f6\5\36\20"+ + "\2\u00f5\u00f3\3\2\2\2\u00f6\u00f9\3\2\2\2\u00f7\u00f5\3\2\2\2\u00f7\u00f8"+ + "\3\2\2\2\u00f8\u00fb\3\2\2\2\u00f9\u00f7\3\2\2\2\u00fa\u00fc\5\22\n\2"+ + "\u00fb\u00fa\3\2\2\2\u00fb\u00fc\3\2\2\2\u00fc\u00ff\3\2\2\2\u00fd\u00fe"+ + "\7H\2\2\u00fe\u0100\5,\27\2\u00ff\u00fd\3\2\2\2\u00ff\u0100\3\2\2\2\u0100"+ + "\u0104\3\2\2\2\u0101\u0102\7#\2\2\u0102\u0103\7\17\2\2\u0103\u0105\5\24"+ + "\13\2\u0104\u0101\3\2\2\2\u0104\u0105\3\2\2\2\u0105\u0108\3\2\2\2\u0106"+ + "\u0107\7$\2\2\u0107\u0109\5,\27\2\u0108\u0106\3\2\2\2\u0108\u0109\3\2"+ + "\2\2\u0109\21\3\2\2\2\u010a\u010b\7\37\2\2\u010b\u0110\5 \21\2\u010c\u010d"+ + "\7\5\2\2\u010d\u010f\5 \21\2\u010e\u010c\3\2\2\2\u010f\u0112\3\2\2\2\u0110"+ + "\u010e\3\2\2\2\u0110\u0111\3\2\2\2\u0111\23\3\2\2\2\u0112\u0110\3\2\2"+ + "\2\u0113\u0115\5\34\17\2\u0114\u0113\3\2\2\2\u0114\u0115\3\2\2\2\u0115"+ + "\u0116\3\2\2\2\u0116\u011b\5\26\f\2\u0117\u0118\7\5\2\2\u0118\u011a\5"+ + "\26\f\2\u0119\u0117\3\2\2\2\u011a\u011d\3\2\2\2\u011b\u0119\3\2\2\2\u011b"+ + "\u011c\3\2\2\2\u011c\25\3\2\2\2\u011d\u011b\3\2\2\2\u011e\u011f\5\30\r"+ + "\2\u011f\27\3\2\2\2\u0120\u0129\7\3\2\2\u0121\u0126\5*\26\2\u0122\u0123"+ + "\7\5\2\2\u0123\u0125\5*\26\2\u0124\u0122\3\2\2\2\u0125\u0128\3\2\2\2\u0126"+ + "\u0124\3\2\2\2\u0126\u0127\3\2\2\2\u0127\u012a\3\2\2\2\u0128\u0126\3\2"+ + "\2\2\u0129\u0121\3\2\2\2\u0129\u012a\3\2\2\2\u012a\u012b\3\2\2\2\u012b"+ + "\u012e\7\4\2\2\u012c\u012e\5*\26\2\u012d\u0120\3\2\2\2\u012d\u012c\3\2"+ + "\2\2\u012e\31\3\2\2\2\u012f\u0130\5B\"\2\u0130\u0131\7\f\2\2\u0131\u0132"+ + "\7\3\2\2\u0132\u0133\5\n\6\2\u0133\u0134\7\4\2\2\u0134\33\3\2\2\2\u0135"+ + "\u0136\t\t\2\2\u0136\35\3\2\2\2\u0137\u013c\5*\26\2\u0138\u013a\7\f\2"+ + "\2\u0139\u0138\3\2\2\2\u0139\u013a\3\2\2\2\u013a\u013b\3\2\2\2\u013b\u013d"+ + "\5B\"\2\u013c\u0139\3\2\2\2\u013c\u013d\3\2\2\2\u013d\37\3\2\2\2\u013e"+ + "\u0142\5(\25\2\u013f\u0141\5\"\22\2\u0140\u013f\3\2\2\2\u0141\u0144\3"+ + "\2\2\2\u0142\u0140\3\2\2\2\u0142\u0143\3\2\2\2\u0143!\3\2\2\2\u0144\u0142"+ + "\3\2\2\2\u0145\u0146\5$\23\2\u0146\u0147\7(\2\2\u0147\u0149\5(\25\2\u0148"+ + "\u014a\5&\24\2\u0149\u0148\3\2\2\2\u0149\u014a\3\2\2\2\u014a\u0151\3\2"+ + "\2\2\u014b\u014c\7.\2\2\u014c\u014d\5$\23\2\u014d\u014e\7(\2\2\u014e\u014f"+ + "\5(\25\2\u014f\u0151\3\2\2\2\u0150\u0145\3\2\2\2\u0150\u014b\3\2\2\2\u0151"+ + "#\3\2\2\2\u0152\u0154\7&\2\2\u0153\u0152\3\2\2\2\u0153\u0154\3\2\2\2\u0154"+ + "\u0162\3\2\2\2\u0155\u0157\7)\2\2\u0156\u0158\7\65\2\2\u0157\u0156\3\2"+ + "\2\2\u0157\u0158\3\2\2\2\u0158\u0162\3\2\2\2\u0159\u015b\79\2\2\u015a"+ + "\u015c\7\65\2\2\u015b\u015a\3\2\2\2\u015b\u015c\3\2\2\2\u015c\u0162\3"+ + "\2\2\2\u015d\u015f\7 \2\2\u015e\u0160\7\65\2\2\u015f\u015e\3\2\2\2\u015f"+ + "\u0160\3\2\2\2\u0160\u0162\3\2\2\2\u0161\u0153\3\2\2\2\u0161\u0155\3\2"+ + "\2\2\u0161\u0159\3\2\2\2\u0161\u015d\3\2\2\2\u0162%\3\2\2\2\u0163\u0164"+ + "\7\61\2\2\u0164\u0172\5,\27\2\u0165\u0166\7F\2\2\u0166\u0167\7\3\2\2\u0167"+ + "\u016c\5B\"\2\u0168\u0169\7\5\2\2\u0169\u016b\5B\"\2\u016a\u0168\3\2\2"+ + "\2\u016b\u016e\3\2\2\2\u016c\u016a\3\2\2\2\u016c\u016d\3\2\2\2\u016d\u016f"+ + "\3\2\2\2\u016e\u016c\3\2\2\2\u016f\u0170\7\4\2\2\u0170\u0172\3\2\2\2\u0171"+ + "\u0163\3\2\2\2\u0171\u0165\3\2\2\2\u0172\'\3\2\2\2\u0173\u0178\5D#\2\u0174"+ + "\u0176\7\f\2\2\u0175\u0174\3\2\2\2\u0175\u0176\3\2\2\2\u0176\u0177\3\2"+ + "\2\2\u0177\u0179\5@!\2\u0178\u0175\3\2\2\2\u0178\u0179\3\2\2\2\u0179\u018d"+ + "\3\2\2\2\u017a\u017b\7\3\2\2\u017b\u017c\5\n\6\2\u017c\u0181\7\4\2\2\u017d"+ + "\u017f\7\f\2\2\u017e\u017d\3\2\2\2\u017e\u017f\3\2\2\2\u017f\u0180\3\2"+ + "\2\2\u0180\u0182\5@!\2\u0181\u017e\3\2\2\2\u0181\u0182\3\2\2\2\u0182\u018d"+ + "\3\2\2\2\u0183\u0184\7\3\2\2\u0184\u0185\5 \21\2\u0185\u018a\7\4\2\2\u0186"+ + "\u0188\7\f\2\2\u0187\u0186\3\2\2\2\u0187\u0188\3\2\2\2\u0188\u0189\3\2"+ + "\2\2\u0189\u018b\5@!\2\u018a\u0187\3\2\2\2\u018a\u018b\3\2\2\2\u018b\u018d"+ + "\3\2\2\2\u018c\u0173\3\2\2\2\u018c\u017a\3\2\2\2\u018c\u0183\3\2\2\2\u018d"+ + ")\3\2\2\2\u018e\u018f\5,\27\2\u018f+\3\2\2\2\u0190\u0191\b\27\1\2\u0191"+ + "\u0192\7/\2\2\u0192\u01c2\5,\27\n\u0193\u0194\7\32\2\2\u0194\u0195\7\3"+ + "\2\2\u0195\u0196\5\b\5\2\u0196\u0197\7\4\2\2\u0197\u01c2\3\2\2\2\u0198"+ + "\u0199\7;\2\2\u0199\u019a\7\3\2\2\u019a\u019f\5L\'\2\u019b\u019c\7\5\2"+ + "\2\u019c\u019e\5L\'\2\u019d\u019b\3\2\2\2\u019e\u01a1\3\2\2\2\u019f\u019d"+ + "\3\2\2\2\u019f\u01a0\3\2\2\2\u01a0\u01a2\3\2\2\2\u01a1\u019f\3\2\2\2\u01a2"+ + "\u01a3\7\4\2\2\u01a3\u01c2\3\2\2\2\u01a4\u01a5\7-\2\2\u01a5\u01a6\7\3"+ + "\2\2\u01a6\u01a7\5@!\2\u01a7\u01a8\7\5\2\2\u01a8\u01ad\5L\'\2\u01a9\u01aa"+ + "\7\5\2\2\u01aa\u01ac\5L\'\2\u01ab\u01a9\3\2\2\2\u01ac\u01af\3\2\2\2\u01ad"+ + "\u01ab\3\2\2\2\u01ad\u01ae\3\2\2\2\u01ae\u01b0\3\2\2\2\u01af\u01ad\3\2"+ + "\2\2\u01b0\u01b1\7\4\2\2\u01b1\u01c2\3\2\2\2\u01b2\u01b3\7-\2\2\u01b3"+ + "\u01b4\7\3\2\2\u01b4\u01b5\5L\'\2\u01b5\u01b6\7\5\2\2\u01b6\u01bb\5L\'"+ + "\2\u01b7\u01b8\7\5\2\2\u01b8\u01ba\5L\'\2\u01b9\u01b7\3\2\2\2\u01ba\u01bd"+ + "\3\2\2\2\u01bb\u01b9\3\2\2\2\u01bb\u01bc\3\2\2\2\u01bc\u01be\3\2\2\2\u01bd"+ + "\u01bb\3\2\2\2\u01be\u01bf\7\4\2\2\u01bf\u01c2\3\2\2\2\u01c0\u01c2\5."+ + "\30\2\u01c1\u0190\3\2\2\2\u01c1\u0193\3\2\2\2\u01c1\u0198\3\2\2\2\u01c1"+ + "\u01a4\3\2\2\2\u01c1\u01b2\3\2\2\2\u01c1\u01c0\3\2\2\2\u01c2\u01cb\3\2"+ + "\2\2\u01c3\u01c4\f\4\2\2\u01c4\u01c5\7\n\2\2\u01c5\u01ca\5,\27\5\u01c6"+ + "\u01c7\f\3\2\2\u01c7\u01c8\7\63\2\2\u01c8\u01ca\5,\27\4\u01c9\u01c3\3"+ + "\2\2\2\u01c9\u01c6\3\2\2\2\u01ca\u01cd\3\2\2\2\u01cb\u01c9\3\2\2\2\u01cb"+ + "\u01cc\3\2\2\2\u01cc-\3\2\2\2\u01cd\u01cb\3\2\2\2\u01ce\u01d0\5\64\33"+ + "\2\u01cf\u01d1\5\60\31\2\u01d0\u01cf\3\2\2\2\u01d0\u01d1\3\2\2\2\u01d1"+ + "/\3\2\2\2\u01d2\u01d4\7/\2\2\u01d3\u01d2\3\2\2\2\u01d3\u01d4\3\2\2\2\u01d4"+ + "\u01d5\3\2\2\2\u01d5\u01d6\7\16\2\2\u01d6\u01d7\5\64\33\2\u01d7\u01d8"+ + "\7\n\2\2\u01d8\u01d9\5\64\33\2\u01d9\u0201\3\2\2\2\u01da\u01dc\7/\2\2"+ + "\u01db\u01da\3\2\2\2\u01db\u01dc\3\2\2\2\u01dc\u01dd\3\2\2\2\u01dd\u01de"+ + "\7%\2\2\u01de\u01df\7\3\2\2\u01df\u01e4\5*\26\2\u01e0\u01e1\7\5\2\2\u01e1"+ + "\u01e3\5*\26\2\u01e2\u01e0\3\2\2\2\u01e3\u01e6\3\2\2\2\u01e4\u01e2\3\2"+ + "\2\2\u01e4\u01e5\3\2\2\2\u01e5\u01e7\3\2\2\2\u01e6\u01e4\3\2\2\2\u01e7"+ + "\u01e8\7\4\2\2\u01e8\u0201\3\2\2\2\u01e9\u01eb\7/\2\2\u01ea\u01e9\3\2"+ + "\2\2\u01ea\u01eb\3\2\2\2\u01eb\u01ec\3\2\2\2\u01ec\u01ed\7%\2\2\u01ed"+ + "\u01ee\7\3\2\2\u01ee\u01ef\5\b\5\2\u01ef\u01f0\7\4\2\2\u01f0\u0201\3\2"+ + "\2\2\u01f1\u01f3\7/\2\2\u01f2\u01f1\3\2\2\2\u01f2\u01f3\3\2\2\2\u01f3"+ + "\u01f4\3\2\2\2\u01f4\u01f5\7*\2\2\u01f5\u0201\5\62\32\2\u01f6\u01f8\7"+ + "/\2\2\u01f7\u01f6\3\2\2\2\u01f7\u01f8\3\2\2\2\u01f8\u01f9\3\2\2\2\u01f9"+ + "\u01fa\7:\2\2\u01fa\u0201\5L\'\2\u01fb\u01fd\7\'\2\2\u01fc\u01fe\7/\2"+ + "\2\u01fd\u01fc\3\2\2\2\u01fd\u01fe\3\2\2\2\u01fe\u01ff\3\2\2\2\u01ff\u0201"+ + "\7\60\2\2\u0200\u01d3\3\2\2\2\u0200\u01db\3\2\2\2\u0200\u01ea\3\2\2\2"+ + "\u0200\u01f2\3\2\2\2\u0200\u01f7\3\2\2\2\u0200\u01fb\3\2\2\2\u0201\61"+ + "\3\2\2\2\u0202\u0205\5L\'\2\u0203\u0204\7\30\2\2\u0204\u0206\5L\'\2\u0205"+ + "\u0203\3\2\2\2\u0205\u0206\3\2\2\2\u0206\63\3\2\2\2\u0207\u0208\b\33\1"+ + "\2\u0208\u020c\5\66\34\2\u0209\u020a\t\n\2\2\u020a\u020c\5\64\33\6\u020b"+ + "\u0207\3\2\2\2\u020b\u0209\3\2\2\2\u020c\u0219\3\2\2\2\u020d\u020e\f\5"+ + "\2\2\u020e\u020f\t\13\2\2\u020f\u0218\5\64\33\6\u0210\u0211\f\4\2\2\u0211"+ + "\u0212\t\n\2\2\u0212\u0218\5\64\33\5\u0213\u0214\f\3\2\2\u0214\u0215\5"+ + ":\36\2\u0215\u0216\5\64\33\4\u0216\u0218\3\2\2\2\u0217\u020d\3\2\2\2\u0217"+ + "\u0210\3\2\2\2\u0217\u0213\3\2\2\2\u0218\u021b\3\2\2\2\u0219\u0217\3\2"+ + "\2\2\u0219\u021a\3\2\2\2\u021a\65\3\2\2\2\u021b\u0219\3\2\2\2\u021c\u021d"+ + "\7\20\2\2\u021d\u021e\7\3\2\2\u021e\u021f\5*\26\2\u021f\u0220\7\f\2\2"+ + "\u0220\u0221\5> \2\u0221\u0222\7\4\2\2\u0222\u024e\3\2\2\2\u0223\u0224"+ + "\7\34\2\2\u0224\u0225\7\3\2\2\u0225\u0226\5B\"\2\u0226\u0227\7\37\2\2"+ + "\u0227\u0228\5\64\33\2\u0228\u0229\7\4\2\2\u0229\u024e\3\2\2\2\u022a\u024e"+ + "\58\35\2\u022b\u024e\7R\2\2\u022c\u022d\5@!\2\u022d\u022e\7V\2\2\u022e"+ + "\u0230\3\2\2\2\u022f\u022c\3\2\2\2\u022f\u0230\3\2\2\2\u0230\u0231\3\2"+ + "\2\2\u0231\u024e\7R\2\2\u0232\u0233\5B\"\2\u0233\u023f\7\3\2\2\u0234\u0236"+ + "\5\34\17\2\u0235\u0234\3\2\2\2\u0235\u0236\3\2\2\2\u0236\u0237\3\2\2\2"+ + "\u0237\u023c\5*\26\2\u0238\u0239\7\5\2\2\u0239\u023b\5*\26\2\u023a\u0238"+ + "\3\2\2\2\u023b\u023e\3\2\2\2\u023c\u023a\3\2\2\2\u023c\u023d\3\2\2\2\u023d"+ + "\u0240\3\2\2\2\u023e\u023c\3\2\2\2\u023f\u0235\3\2\2\2\u023f\u0240\3\2"+ + "\2\2\u0240\u0241\3\2\2\2\u0241\u0242\7\4\2\2\u0242\u024e\3\2\2\2\u0243"+ + "\u0244\7\3\2\2\u0244\u0245\5\b\5\2\u0245\u0246\7\4\2\2\u0246\u024e\3\2"+ + "\2\2\u0247\u024e\5B\"\2\u0248\u024e\5@!\2\u0249\u024a\7\3\2\2\u024a\u024b"+ + "\5*\26\2\u024b\u024c\7\4\2\2\u024c\u024e\3\2\2\2\u024d\u021c\3\2\2\2\u024d"+ + "\u0223\3\2\2\2\u024d\u022a\3\2\2\2\u024d\u022b\3\2\2\2\u024d\u022f\3\2"+ + "\2\2\u024d\u0232\3\2\2\2\u024d\u0243\3\2\2\2\u024d\u0247\3\2\2\2\u024d"+ + "\u0248\3\2\2\2\u024d\u0249\3\2\2\2\u024e\67\3\2\2\2\u024f\u0259\7\60\2"+ + "\2\u0250\u0259\5J&\2\u0251\u0259\5<\37\2\u0252\u0254\7X\2\2\u0253\u0252"+ + "\3\2\2\2\u0254\u0255\3\2\2\2\u0255\u0253\3\2\2\2\u0255\u0256\3\2\2\2\u0256"+ + "\u0259\3\2\2\2\u0257\u0259\7W\2\2\u0258\u024f\3\2\2\2\u0258\u0250\3\2"+ + "\2\2\u0258\u0251\3\2\2\2\u0258\u0253\3\2\2\2\u0258\u0257\3\2\2\2\u0259"+ + "9\3\2\2\2\u025a\u025b\t\f\2\2\u025b;\3\2\2\2\u025c\u025d\t\r\2\2\u025d"+ + "=\3\2\2\2\u025e\u025f\5B\"\2\u025f?\3\2\2\2\u0260\u0261\5B\"\2\u0261\u0262"+ + "\7V\2\2\u0262\u0264\3\2\2\2\u0263\u0260\3\2\2\2\u0264\u0267\3\2\2\2\u0265"+ + "\u0263\3\2\2\2\u0265\u0266\3\2\2\2\u0266\u0268\3\2\2\2\u0267\u0265\3\2"+ + "\2\2\u0268\u0269\5B\"\2\u0269A\3\2\2\2\u026a\u026d\5F$\2\u026b\u026d\5"+ + "H%\2\u026c\u026a\3\2\2\2\u026c\u026b\3\2\2\2\u026dC\3\2\2\2\u026e\u026f"+ + "\5B\"\2\u026f\u0270\7\6\2\2\u0270\u0272\3\2\2\2\u0271\u026e\3\2\2\2\u0271"+ + "\u0272\3\2\2\2\u0272\u0273\3\2\2\2\u0273\u027b\7]\2\2\u0274\u0275\5B\""+ + "\2\u0275\u0276\7\6\2\2\u0276\u0278\3\2\2\2\u0277\u0274\3\2\2\2\u0277\u0278"+ + "\3\2\2\2\u0278\u0279\3\2\2\2\u0279\u027b\5B\"\2\u027a\u0271\3\2\2\2\u027a"+ + "\u0277\3\2\2\2\u027bE\3\2\2\2\u027c\u027f\7^\2\2\u027d\u027f\7_\2\2\u027e"+ + "\u027c\3\2\2\2\u027e\u027d\3\2\2\2\u027fG\3\2\2\2\u0280\u0284\7[\2\2\u0281"+ + "\u0284\5N(\2\u0282\u0284\7\\\2\2\u0283\u0280\3\2\2\2\u0283\u0281\3\2\2"+ + "\2\u0283\u0282\3\2\2\2\u0284I\3\2\2\2\u0285\u0288\7Z\2\2\u0286\u0288\7"+ + "Y\2\2\u0287\u0285\3\2\2\2\u0287\u0286\3\2\2\2\u0288K\3\2\2\2\u0289\u028a"+ + "\t\16\2\2\u028aM\3\2\2\2\u028b\u028c\t\17\2\2\u028cO\3\2\2\2`_aenptz}"+ + "\u0088\u008b\u0095\u0098\u009b\u009e\u00a6\u00a9\u00af\u00b3\u00b6\u00b9"+ + "\u00bc\u00c3\u00cb\u00ce\u00da\u00dd\u00e1\u00e8\u00ec\u00f0\u00f7\u00fb"+ + "\u00ff\u0104\u0108\u0110\u0114\u011b\u0126\u0129\u012d\u0139\u013c\u0142"+ + "\u0149\u0150\u0153\u0157\u015b\u015f\u0161\u016c\u0171\u0175\u0178\u017e"+ + "\u0181\u0187\u018a\u018c\u019f\u01ad\u01bb\u01c1\u01c9\u01cb\u01d0\u01d3"+ + "\u01db\u01e4\u01ea\u01f2\u01f7\u01fd\u0200\u0205\u020b\u0217\u0219\u022f"+ + "\u0235\u023c\u023f\u024d\u0255\u0258\u0265\u026c\u0271\u0277\u027a\u027e"+ + "\u0283\u0287"; + public static final ATN _ATN = + new ATNDeserializer().deserialize(_serializedATN.toCharArray()); + static { + _decisionToDFA = new DFA[_ATN.getNumberOfDecisions()]; + for (int i = 0; i < _ATN.getNumberOfDecisions(); i++) { + _decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i); + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java new file mode 100644 index 0000000000000..35ce6cd0029d6 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlBaseVisitor.java @@ -0,0 +1,531 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +// ANTLR GENERATED CODE: DO NOT EDIT +package org.elasticsearch.xpack.sql.parser; +import org.antlr.v4.runtime.tree.ParseTreeVisitor; + +/** + * This interface defines a complete generic visitor for a parse tree produced + * by {@link SqlBaseParser}. + * + * @param The return type of the visit operation. Use {@link Void} for + * operations with no return type. + */ +interface SqlBaseVisitor extends ParseTreeVisitor { + /** + * Visit a parse tree produced by {@link SqlBaseParser#singleStatement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSingleStatement(SqlBaseParser.SingleStatementContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#singleExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSingleExpression(SqlBaseParser.SingleExpressionContext ctx); + /** + * Visit a parse tree produced by the {@code statementDefault} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitStatementDefault(SqlBaseParser.StatementDefaultContext ctx); + /** + * Visit a parse tree produced by the {@code explain} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitExplain(SqlBaseParser.ExplainContext ctx); + /** + * Visit a parse tree produced by the {@code debug} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitDebug(SqlBaseParser.DebugContext ctx); + /** + * Visit a parse tree produced by the {@code showTables} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitShowTables(SqlBaseParser.ShowTablesContext ctx); + /** + * Visit a parse tree produced by the {@code showColumns} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitShowColumns(SqlBaseParser.ShowColumnsContext ctx); + /** + * Visit a parse tree produced by the {@code showFunctions} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitShowFunctions(SqlBaseParser.ShowFunctionsContext ctx); + /** + * Visit a parse tree produced by the {@code showSchemas} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitShowSchemas(SqlBaseParser.ShowSchemasContext ctx); + /** + * Visit a parse tree produced by the {@code sysCatalogs} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSysCatalogs(SqlBaseParser.SysCatalogsContext ctx); + /** + * Visit a parse tree produced by the {@code sysTables} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSysTables(SqlBaseParser.SysTablesContext ctx); + /** + * Visit a parse tree produced by the {@code sysColumns} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSysColumns(SqlBaseParser.SysColumnsContext ctx); + /** + * Visit a parse tree produced by the {@code sysTypes} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSysTypes(SqlBaseParser.SysTypesContext ctx); + /** + * Visit a parse tree produced by the {@code sysTableTypes} + * labeled alternative in {@link SqlBaseParser#statement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSysTableTypes(SqlBaseParser.SysTableTypesContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#query}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitQuery(SqlBaseParser.QueryContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#queryNoWith}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitQueryNoWith(SqlBaseParser.QueryNoWithContext ctx); + /** + * Visit a parse tree produced by the {@code queryPrimaryDefault} + * labeled alternative in {@link SqlBaseParser#queryTerm}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitQueryPrimaryDefault(SqlBaseParser.QueryPrimaryDefaultContext ctx); + /** + * Visit a parse tree produced by the {@code subquery} + * labeled alternative in {@link SqlBaseParser#queryTerm}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSubquery(SqlBaseParser.SubqueryContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#orderBy}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitOrderBy(SqlBaseParser.OrderByContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#querySpecification}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitQuerySpecification(SqlBaseParser.QuerySpecificationContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#fromClause}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitFromClause(SqlBaseParser.FromClauseContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#groupBy}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitGroupBy(SqlBaseParser.GroupByContext ctx); + /** + * Visit a parse tree produced by the {@code singleGroupingSet} + * labeled alternative in {@link SqlBaseParser#groupingElement}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSingleGroupingSet(SqlBaseParser.SingleGroupingSetContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#groupingExpressions}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitGroupingExpressions(SqlBaseParser.GroupingExpressionsContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#namedQuery}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNamedQuery(SqlBaseParser.NamedQueryContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#setQuantifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSetQuantifier(SqlBaseParser.SetQuantifierContext ctx); + /** + * Visit a parse tree produced by the {@code selectExpression} + * labeled alternative in {@link SqlBaseParser#selectItem}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSelectExpression(SqlBaseParser.SelectExpressionContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#relation}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitRelation(SqlBaseParser.RelationContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#joinRelation}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitJoinRelation(SqlBaseParser.JoinRelationContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#joinType}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitJoinType(SqlBaseParser.JoinTypeContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#joinCriteria}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitJoinCriteria(SqlBaseParser.JoinCriteriaContext ctx); + /** + * Visit a parse tree produced by the {@code tableName} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitTableName(SqlBaseParser.TableNameContext ctx); + /** + * Visit a parse tree produced by the {@code aliasedQuery} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitAliasedQuery(SqlBaseParser.AliasedQueryContext ctx); + /** + * Visit a parse tree produced by the {@code aliasedRelation} + * labeled alternative in {@link SqlBaseParser#relationPrimary}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitAliasedRelation(SqlBaseParser.AliasedRelationContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#expression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitExpression(SqlBaseParser.ExpressionContext ctx); + /** + * Visit a parse tree produced by the {@code logicalNot} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitLogicalNot(SqlBaseParser.LogicalNotContext ctx); + /** + * Visit a parse tree produced by the {@code stringQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitStringQuery(SqlBaseParser.StringQueryContext ctx); + /** + * Visit a parse tree produced by the {@code booleanDefault} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitBooleanDefault(SqlBaseParser.BooleanDefaultContext ctx); + /** + * Visit a parse tree produced by the {@code exists} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitExists(SqlBaseParser.ExistsContext ctx); + /** + * Visit a parse tree produced by the {@code multiMatchQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitMultiMatchQuery(SqlBaseParser.MultiMatchQueryContext ctx); + /** + * Visit a parse tree produced by the {@code matchQuery} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitMatchQuery(SqlBaseParser.MatchQueryContext ctx); + /** + * Visit a parse tree produced by the {@code logicalBinary} + * labeled alternative in {@link SqlBaseParser#booleanExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitLogicalBinary(SqlBaseParser.LogicalBinaryContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#predicated}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitPredicated(SqlBaseParser.PredicatedContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#predicate}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitPredicate(SqlBaseParser.PredicateContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#pattern}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitPattern(SqlBaseParser.PatternContext ctx); + /** + * Visit a parse tree produced by the {@code valueExpressionDefault} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitValueExpressionDefault(SqlBaseParser.ValueExpressionDefaultContext ctx); + /** + * Visit a parse tree produced by the {@code comparison} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitComparison(SqlBaseParser.ComparisonContext ctx); + /** + * Visit a parse tree produced by the {@code arithmeticBinary} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitArithmeticBinary(SqlBaseParser.ArithmeticBinaryContext ctx); + /** + * Visit a parse tree produced by the {@code arithmeticUnary} + * labeled alternative in {@link SqlBaseParser#valueExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitArithmeticUnary(SqlBaseParser.ArithmeticUnaryContext ctx); + /** + * Visit a parse tree produced by the {@code cast} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitCast(SqlBaseParser.CastContext ctx); + /** + * Visit a parse tree produced by the {@code extract} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitExtract(SqlBaseParser.ExtractContext ctx); + /** + * Visit a parse tree produced by the {@code constantDefault} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitConstantDefault(SqlBaseParser.ConstantDefaultContext ctx); + /** + * Visit a parse tree produced by the {@code star} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitStar(SqlBaseParser.StarContext ctx); + /** + * Visit a parse tree produced by the {@code functionCall} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitFunctionCall(SqlBaseParser.FunctionCallContext ctx); + /** + * Visit a parse tree produced by the {@code subqueryExpression} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitSubqueryExpression(SqlBaseParser.SubqueryExpressionContext ctx); + /** + * Visit a parse tree produced by the {@code columnReference} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitColumnReference(SqlBaseParser.ColumnReferenceContext ctx); + /** + * Visit a parse tree produced by the {@code dereference} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitDereference(SqlBaseParser.DereferenceContext ctx); + /** + * Visit a parse tree produced by the {@code parenthesizedExpression} + * labeled alternative in {@link SqlBaseParser#primaryExpression}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitParenthesizedExpression(SqlBaseParser.ParenthesizedExpressionContext ctx); + /** + * Visit a parse tree produced by the {@code nullLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNullLiteral(SqlBaseParser.NullLiteralContext ctx); + /** + * Visit a parse tree produced by the {@code numericLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNumericLiteral(SqlBaseParser.NumericLiteralContext ctx); + /** + * Visit a parse tree produced by the {@code booleanLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitBooleanLiteral(SqlBaseParser.BooleanLiteralContext ctx); + /** + * Visit a parse tree produced by the {@code stringLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitStringLiteral(SqlBaseParser.StringLiteralContext ctx); + /** + * Visit a parse tree produced by the {@code paramLiteral} + * labeled alternative in {@link SqlBaseParser#constant}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitParamLiteral(SqlBaseParser.ParamLiteralContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#comparisonOperator}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitComparisonOperator(SqlBaseParser.ComparisonOperatorContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#booleanValue}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitBooleanValue(SqlBaseParser.BooleanValueContext ctx); + /** + * Visit a parse tree produced by the {@code primitiveDataType} + * labeled alternative in {@link SqlBaseParser#dataType}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitPrimitiveDataType(SqlBaseParser.PrimitiveDataTypeContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#qualifiedName}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitQualifiedName(SqlBaseParser.QualifiedNameContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#identifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitIdentifier(SqlBaseParser.IdentifierContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#tableIdentifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitTableIdentifier(SqlBaseParser.TableIdentifierContext ctx); + /** + * Visit a parse tree produced by the {@code quotedIdentifier} + * labeled alternative in {@link SqlBaseParser#quoteIdentifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext ctx); + /** + * Visit a parse tree produced by the {@code backQuotedIdentifier} + * labeled alternative in {@link SqlBaseParser#quoteIdentifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext ctx); + /** + * Visit a parse tree produced by the {@code unquotedIdentifier} + * labeled alternative in {@link SqlBaseParser#unquoteIdentifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitUnquotedIdentifier(SqlBaseParser.UnquotedIdentifierContext ctx); + /** + * Visit a parse tree produced by the {@code digitIdentifier} + * labeled alternative in {@link SqlBaseParser#unquoteIdentifier}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitDigitIdentifier(SqlBaseParser.DigitIdentifierContext ctx); + /** + * Visit a parse tree produced by the {@code decimalLiteral} + * labeled alternative in {@link SqlBaseParser#number}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitDecimalLiteral(SqlBaseParser.DecimalLiteralContext ctx); + /** + * Visit a parse tree produced by the {@code integerLiteral} + * labeled alternative in {@link SqlBaseParser#number}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitIntegerLiteral(SqlBaseParser.IntegerLiteralContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#string}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitString(SqlBaseParser.StringContext ctx); + /** + * Visit a parse tree produced by {@link SqlBaseParser#nonReserved}. + * @param ctx the parse tree + * @return the visitor result + */ + T visitNonReserved(SqlBaseParser.NonReservedContext ctx); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java new file mode 100644 index 0000000000000..7aa3748e31eae --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java @@ -0,0 +1,262 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.antlr.v4.runtime.BaseErrorListener; +import org.antlr.v4.runtime.CharStream; +import org.antlr.v4.runtime.CommonToken; +import org.antlr.v4.runtime.CommonTokenStream; +import org.antlr.v4.runtime.DiagnosticErrorListener; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.Recognizer; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenFactory; +import org.antlr.v4.runtime.TokenSource; +import org.antlr.v4.runtime.atn.ATNConfigSet; +import org.antlr.v4.runtime.atn.PredictionMode; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.misc.Pair; +import org.antlr.v4.runtime.tree.TerminalNode; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; + +import java.util.Arrays; +import java.util.BitSet; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Function; + +public class SqlParser { + private static final Logger log = Loggers.getLogger(SqlParser.class); + + private final boolean DEBUG = false; + + /** + * Used only in tests + */ + public LogicalPlan createStatement(String sql) { + return createStatement(sql, Collections.emptyList()); + } + + /** + * Parses an SQL statement into execution plan + * @param sql - the SQL statement + * @param params - a list of parameters for the statement if the statement is parametrized + * @return logical plan + */ + public LogicalPlan createStatement(String sql, List params) { + if (log.isDebugEnabled()) { + log.debug("Parsing as statement: {}", sql); + } + return invokeParser(sql, params, SqlBaseParser::singleStatement, AstBuilder::plan); + } + + /** + * Parses an expression - used only in tests + */ + public Expression createExpression(String expression) { + return createExpression(expression, Collections.emptyList()); + } + + /** + * Parses an expression - Used only in tests + */ + public Expression createExpression(String expression, List params) { + if (log.isDebugEnabled()) { + log.debug("Parsing as expression: {}", expression); + } + + return invokeParser(expression, params, SqlBaseParser::singleExpression, AstBuilder::expression); + } + + private T invokeParser(String sql, List params, Function parseFunction, + BiFunction visitor) { + SqlBaseLexer lexer = new SqlBaseLexer(new CaseInsensitiveStream(sql)); + + lexer.removeErrorListeners(); + lexer.addErrorListener(ERROR_LISTENER); + + Map paramTokens = new HashMap<>(); + TokenSource tokenSource = new ParametrizedTokenSource(lexer, paramTokens, params); + + CommonTokenStream tokenStream = new CommonTokenStream(tokenSource); + SqlBaseParser parser = new SqlBaseParser(tokenStream); + + parser.addParseListener(new PostProcessor(Arrays.asList(parser.getRuleNames()))); + + parser.removeErrorListeners(); + parser.addErrorListener(ERROR_LISTENER); + + parser.getInterpreter().setPredictionMode(PredictionMode.SLL); + + if (DEBUG) { + debug(parser); + } + + ParserRuleContext tree = parseFunction.apply(parser); + + return visitor.apply(new AstBuilder(paramTokens), tree); + } + + private void debug(SqlBaseParser parser) { + // when debugging, use the exact prediction mode (needed for diagnostics as well) + parser.getInterpreter().setPredictionMode(PredictionMode.LL_EXACT_AMBIG_DETECTION); + + parser.addParseListener(parser.new TraceListener()); + + parser.addErrorListener(new DiagnosticErrorListener(false) { + @Override + public void reportAttemptingFullContext(Parser recognizer, DFA dfa, + int startIndex, int stopIndex, BitSet conflictingAlts, ATNConfigSet configs) {} + + @Override + public void reportContextSensitivity(Parser recognizer, DFA dfa, + int startIndex, int stopIndex, int prediction, ATNConfigSet configs) {} + }); + } + + private class PostProcessor extends SqlBaseBaseListener { + private final List ruleNames; + + PostProcessor(List ruleNames) { + this.ruleNames = ruleNames; + } + + @Override + public void exitBackQuotedIdentifier(SqlBaseParser.BackQuotedIdentifierContext context) { + Token token = context.BACKQUOTED_IDENTIFIER().getSymbol(); + throw new ParsingException( + "backquoted indetifiers not supported; please use double quotes instead", + null, + token.getLine(), + token.getCharPositionInLine()); + } + + @Override + public void exitDigitIdentifier(SqlBaseParser.DigitIdentifierContext context) { + Token token = context.DIGIT_IDENTIFIER().getSymbol(); + throw new ParsingException( + "identifiers must not start with a digit; please use double quotes", + null, + token.getLine(), + token.getCharPositionInLine()); + } + + @Override + public void exitQuotedIdentifier(SqlBaseParser.QuotedIdentifierContext context) { + // Remove quotes + context.getParent().removeLastChild(); + + Token token = (Token) context.getChild(0).getPayload(); + context.getParent().addChild(new CommonToken( + new Pair<>(token.getTokenSource(), token.getInputStream()), + SqlBaseLexer.IDENTIFIER, + token.getChannel(), + token.getStartIndex() + 1, + token.getStopIndex() - 1)); + } + + @Override + public void exitNonReserved(SqlBaseParser.NonReservedContext context) { + // tree cannot be modified during rule enter/exit _unless_ it's a terminal node + if (!(context.getChild(0) instanceof TerminalNode)) { + int rule = ((ParserRuleContext) context.getChild(0)).getRuleIndex(); + throw new ParsingException("nonReserved can only contain tokens. Found nested rule: " + ruleNames.get(rule)); + } + + // replace nonReserved words with IDENT tokens + context.getParent().removeLastChild(); + + Token token = (Token) context.getChild(0).getPayload(); + context.getParent().addChild(new CommonToken( + new Pair<>(token.getTokenSource(), token.getInputStream()), + SqlBaseLexer.IDENTIFIER, + token.getChannel(), + token.getStartIndex(), + token.getStopIndex())); + } + } + + private static final BaseErrorListener ERROR_LISTENER = new BaseErrorListener() { + @Override + public void syntaxError(Recognizer recognizer, Object offendingSymbol, int line, + int charPositionInLine, String message, RecognitionException e) { + throw new ParsingException(message, e, line, charPositionInLine); + } + }; + + /** + * Finds all parameter tokens (?) and associates them with actual parameter values + *

+ * Parameters are positional and we know where parameters occurred in the original stream in order to associate them + * with actual values. + */ + private static class ParametrizedTokenSource implements TokenSource { + + private TokenSource delegate; + private Map paramTokens; + private int param; + private List params; + + ParametrizedTokenSource(TokenSource delegate, Map paramTokens, List params) { + this.delegate = delegate; + this.paramTokens = paramTokens; + this.params = params; + param = 0; + } + + @Override + public Token nextToken() { + Token token = delegate.nextToken(); + if (token.getType() == SqlBaseLexer.PARAM) { + if (param >= params.size()) { + throw new ParsingException("Not enough actual parameters {} ", params.size()); + } + paramTokens.put(token, params.get(param)); + param++; + } + return token; + } + + @Override + public int getLine() { + return delegate.getLine(); + } + + @Override + public int getCharPositionInLine() { + return delegate.getCharPositionInLine(); + } + + @Override + public CharStream getInputStream() { + return delegate.getInputStream(); + } + + @Override + public String getSourceName() { + return delegate.getSourceName(); + } + + @Override + public void setTokenFactory(TokenFactory factory) { + delegate.setTokenFactory(factory); + } + + @Override + public TokenFactory getTokenFactory() { + return delegate.getTokenFactory(); + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/QueryPlan.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/QueryPlan.java new file mode 100644 index 0000000000000..e56aa7819fc22 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/QueryPlan.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.AttributeSet; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Function; + +/** + * There are two main types of plans, {@code LogicalPlan} and {@code PhysicalPlan} + */ +public abstract class QueryPlan> extends Node { + + private AttributeSet lazyOutputSet; + private AttributeSet lazyInputSet; + + + public QueryPlan(Location location, List children) { + super(location, children); + } + + public abstract List output(); + + public AttributeSet outputSet() { + if (lazyOutputSet == null) { + lazyOutputSet = new AttributeSet(output()); + } + return lazyOutputSet; + } + + public AttributeSet intputSet() { + if (lazyInputSet == null) { + List attrs = new ArrayList<>(); + for (PlanType child : children()) { + attrs.addAll(child.output()); + } + lazyInputSet = new AttributeSet(attrs); + } + return lazyInputSet; + } + + public PlanType transformExpressionsOnly(Function rule) { + return transformPropertiesOnly(e -> doTransformExpression(e, exp -> exp.transformDown(rule)), Object.class); + } + + public PlanType transformExpressionsDown(Function rule) { + return transformPropertiesDown(e -> doTransformExpression(e, exp -> exp.transformDown(rule)), Object.class); + } + + public PlanType transformExpressionsUp(Function rule) { + return transformPropertiesUp(e -> doTransformExpression(e, exp -> exp.transformUp(rule)), Object.class); + } + + private Object doTransformExpression(Object arg, Function traversal) { + if (arg instanceof Expression) { + return traversal.apply((Expression) arg); + } + if (arg instanceof DataType || arg instanceof Map) { + return arg; + } + + // WARNING: if the collection is typed, an incompatible function will be applied to it + // this results in CCE at runtime and additional filtering is required + // preserving the type information is hacky and weird (a lot of context needs to be passed around and the lambda itself + // has no type info so it's difficult to have automatic checking without having base classes). + + if (arg instanceof Collection) { + Collection c = (Collection) arg; + List transformed = new ArrayList<>(c.size()); + boolean hasChanged = false; + for (Object e : c) { + Object next = doTransformExpression(e, traversal); + if (!e.equals(next)) { + hasChanged = true; + } + else { + // use the initial value + next = e; + } + transformed.add(next); + } + + return hasChanged ? transformed : arg; + } + + return arg; + } + + public void forEachExpressionsDown(Consumer rule) { + forEachPropertiesDown(e -> doForEachExpression(e, exp -> exp.forEachDown(rule)), Object.class); + } + + public void forEachExpressionsUp(Consumer rule) { + forEachPropertiesUp(e -> doForEachExpression(e, exp -> exp.forEachUp(rule)), Object.class); + } + + public void forEachExpressions(Consumer rule) { + forEachPropertiesOnly(e -> doForEachExpression(e, rule::accept), Object.class); + } + + private void doForEachExpression(Object arg, Consumer traversal) { + if (arg instanceof Expression) { + traversal.accept((Expression) arg); + } + else if (arg instanceof Collection) { + Collection c = (Collection) arg; + for (Object o : c) { + doForEachExpression(o, traversal); + } + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/TableIdentifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/TableIdentifier.java new file mode 100644 index 0000000000000..0d91703679c74 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/TableIdentifier.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan; + +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Objects; + +public class TableIdentifier { + + private final Location location; + + private final String cluster; + private final String index; + + public TableIdentifier(Location location, String catalog, String index) { + this.location = location; + this.cluster = catalog; + this.index = index; + } + + public String cluster() { + return cluster; + } + + public String index() { + return index; + } + + @Override + public int hashCode() { + return Objects.hash(cluster, index); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + TableIdentifier other = (TableIdentifier) obj; + return Objects.equals(index, other.index) && Objects.equals(cluster, other.cluster); + } + + public Location location() { + return location; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("["); + if (cluster != null) { + builder.append(cluster); + } + builder.append("][index="); + builder.append(index); + builder.append("]"); + return builder.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Aggregate.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Aggregate.java new file mode 100644 index 0000000000000..b588bff38657e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Aggregate.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.xpack.sql.capabilities.Resolvables; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; +import java.util.Objects; + +public class Aggregate extends UnaryPlan { + + private final List groupings; + private final List aggregates; + + public Aggregate(Location location, LogicalPlan child, List groupings, List aggregates) { + super(location, child); + this.groupings = groupings; + this.aggregates = aggregates; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Aggregate::new, child(), groupings, aggregates); + } + + @Override + protected Aggregate replaceChild(LogicalPlan newChild) { + return new Aggregate(location(), newChild, groupings, aggregates); + } + + public List groupings() { + return groupings; + } + + public List aggregates() { + return aggregates; + } + + @Override + public boolean expressionsResolved() { + return Resolvables.resolved(groupings) && Resolvables.resolved(aggregates); + } + + @Override + public List output() { + return Expressions.asAttributes(aggregates); + } + + @Override + public int hashCode() { + return Objects.hash(groupings, aggregates, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Aggregate other = (Aggregate) obj; + return Objects.equals(groupings, other.groupings) + && Objects.equals(aggregates, other.aggregates) + && Objects.equals(child(), other.child()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/BinaryPlan.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/BinaryPlan.java new file mode 100644 index 0000000000000..daee3a97ee8d7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/BinaryPlan.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.Arrays; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.tree.Location; + +abstract class BinaryPlan extends LogicalPlan { + + private final LogicalPlan left, right; + + BinaryPlan(Location location, LogicalPlan left, LogicalPlan right) { + super(location, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + public LogicalPlan left() { + return left; + } + + public LogicalPlan right() { + return right; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BinaryPlan other = (BinaryPlan) obj; + + return Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } + + @Override + public int hashCode() { + return Objects.hash(left, right); + } + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Distinct.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Distinct.java new file mode 100644 index 0000000000000..63759f944129d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Distinct.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class Distinct extends UnaryPlan { + + public Distinct(Location location, LogicalPlan child) { + super(location, child); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Distinct::new, child()); + } + + @Override + protected Distinct replaceChild(LogicalPlan newChild) { + return new Distinct(location(), newChild); + } + + @Override + public boolean expressionsResolved() { + return true; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/EsRelation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/EsRelation.java new file mode 100644 index 0000000000000..73a953854465b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/EsRelation.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.EsField; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; + +public class EsRelation extends LeafPlan { + + private final EsIndex index; + private final List attrs; + + public EsRelation(Location location, EsIndex index) { + super(location); + this.index = index; + attrs = flatten(location, index.mapping()); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, EsRelation::new, index); + } + + private static List flatten(Location location, Map mapping) { + return flatten(location, mapping, null); + } + + private static List flatten(Location location, Map mapping, FieldAttribute parent) { + List list = new ArrayList<>(); + + for (Entry entry : mapping.entrySet()) { + String name = entry.getKey(); + EsField t = entry.getValue(); + + if (t != null) { + FieldAttribute f = new FieldAttribute(location, parent, parent != null ? parent.name() + "." + name : name, t); + list.add(f); + // object or nested + if (t.getProperties().isEmpty() == false) { + list.addAll(flatten(location, t.getProperties(), f)); + } + } + } + return list; + } + + public EsIndex index() { + return index; + } + + @Override + public List output() { + return attrs; + } + + @Override + public boolean expressionsResolved() { + return true; + } + + @Override + public int hashCode() { + return Objects.hash(index); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + EsRelation other = (EsRelation) obj; + return Objects.equals(index, other.index); + } + + private static final int TO_STRING_LIMIT = 52; + + private static String limitedToString(Collection c) { + Iterator it = c.iterator(); + if (!it.hasNext()) { + return "[]"; + } + + // ..] + StringBuilder sb = new StringBuilder(TO_STRING_LIMIT + 4); + sb.append('['); + for (;;) { + E e = it.next(); + String next = e == c ? "(this Collection)" : String.valueOf(e); + if (next.length() + sb.length() > TO_STRING_LIMIT) { + sb.append(next.substring(0, Math.max(0, TO_STRING_LIMIT - sb.length()))); + sb.append('.').append('.').append(']'); + return sb.toString(); + } else { + sb.append(next); + } + if (!it.hasNext()) { + return sb.append(']').toString(); + } + sb.append(',').append(' '); + } + } + + @Override + public String nodeString() { + return nodeName() + "[" + index + "]" + limitedToString(attrs); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Filter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Filter.java new file mode 100644 index 0000000000000..3a7dcdd991947 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Filter.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * A {@code Filter} is a type of Plan that performs filtering of results. In + * {@code SELECT x FROM y WHERE z ..} the "WHERE" clause is a Filter. A + * {@code Filter} has a "condition" Expression that does the filtering. + */ +public class Filter extends UnaryPlan { + + private final Expression condition; + + public Filter(Location location, LogicalPlan child, Expression condition) { + super(location, child); + this.condition = condition; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Filter::new, child(), condition); + } + + @Override + protected Filter replaceChild(LogicalPlan newChild) { + return new Filter(location(), newChild, condition); + } + + public Expression condition() { + return condition; + } + + @Override + public boolean expressionsResolved() { + return condition.resolved(); + } + + @Override + public int hashCode() { + return Objects.hash(condition, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Filter other = (Filter) obj; + + return Objects.equals(condition, other.condition) + && Objects.equals(child(), other.child()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Join.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Join.java new file mode 100644 index 0000000000000..6a513eed10790 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Join.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import static java.util.stream.Collectors.toList; + +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + +public class Join extends BinaryPlan { + + private final JoinType type; + private final Expression condition; + + public enum JoinType { + INNER, + LEFT, // OUTER + RIGHT, // OUTER + FULL, // OUTER + IMPLICIT, + } + + public Join(Location location, LogicalPlan left, LogicalPlan right, JoinType type, Expression condition) { + super(location, left, right); + this.type = type; + this.condition = condition; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Join::new, left(), right(), type, condition); + } + + @Override + public LogicalPlan replaceChildren(List newChildren) { + if (newChildren.size() != 2) { + throw new IllegalArgumentException("expected [2] children but received [" + newChildren.size() + "]"); + } + return new Join(location(), newChildren.get(0), newChildren.get(1), type, condition); + } + + public JoinType type() { + return type; + } + + public Expression condition() { + return condition; + } + + @Override + public List output() { + switch (type) { + case LEFT: + // right side can be null + return combine(left().output(), makeNullable(right().output())); + case RIGHT: + // left side can be null + return combine(makeNullable(left().output()), right().output()); + case FULL: + // both sides can be null + return combine(makeNullable(left().output()), makeNullable(right().output())); + // INNER + default: + return combine(left().output(), right().output()); + } + } + + private static List makeNullable(List output) { + return output.stream() + .map(a -> a.withNullability(true)) + .collect(toList()); + } + + @Override + public boolean expressionsResolved() { + return condition == null || condition.resolved(); + } + + public boolean duplicatesResolved() { + return left().outputSet().intersect(right().outputSet()).isEmpty(); + } + + @Override + public boolean resolved() { + // resolve the join if + // - the children are resolved + // - there are no conflicts in output + // - the condition (if present) is resolved to a boolean + return childrenResolved() && + duplicatesResolved() && + expressionsResolved() && + (condition == null || DataType.BOOLEAN == condition.dataType()); + } + + @Override + public int hashCode() { + return Objects.hash(type, condition, left(), right()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Join other = (Join) obj; + + return Objects.equals(type, other.type) + && Objects.equals(condition, other.condition) + && Objects.equals(left(), other.left()) + && Objects.equals(right(), other.right()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LeafPlan.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LeafPlan.java new file mode 100644 index 0000000000000..a68eeb53e1d60 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LeafPlan.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.Collections; +import java.util.List; + +import org.elasticsearch.xpack.sql.tree.Location; + +abstract class LeafPlan extends LogicalPlan { + + protected LeafPlan(Location location) { + super(location, Collections.emptyList()); + } + + @Override + public final LogicalPlan replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Limit.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Limit.java new file mode 100644 index 0000000000000..ef194c4eae379 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Limit.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class Limit extends UnaryPlan { + + private final Expression limit; + + public Limit(Location location, Expression limit, LogicalPlan child) { + super(location, child); + this.limit = limit; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Limit::new, limit, child()); + } + + @Override + protected Limit replaceChild(LogicalPlan newChild) { + return new Limit(location(), limit, newChild); + } + + public Expression limit() { + return limit; + } + + @Override + public boolean expressionsResolved() { + return limit.resolved(); + } + + @Override + public int hashCode() { + return Objects.hash(limit, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Limit other = (Limit) obj; + + return Objects.equals(limit, other.limit) + && Objects.equals(child(), other.child()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LocalRelation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LocalRelation.java new file mode 100644 index 0000000000000..cf6b4933787e7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LocalRelation.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.session.Executable; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.emptyList; + +public class LocalRelation extends LogicalPlan implements Executable { + + private final Executable executable; + + public LocalRelation(Location location, Executable executable) { + super(location, emptyList()); + this.executable = executable; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, LocalRelation::new, executable); + } + + @Override + public LogicalPlan replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + public Executable executable() { + return executable; + } + + @Override + public boolean expressionsResolved() { + return true; + } + + @Override + public List output() { + return executable.output(); + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + executable.execute(session, listener); + } + + @Override + public int hashCode() { + return executable.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + LocalRelation other = (LocalRelation) obj; + return Objects.equals(executable, other.executable); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LogicalPlan.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LogicalPlan.java new file mode 100644 index 0000000000000..c5960c113de24 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/LogicalPlan.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.xpack.sql.capabilities.Resolvable; +import org.elasticsearch.xpack.sql.capabilities.Resolvables; +import org.elasticsearch.xpack.sql.plan.QueryPlan; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.List; + +/** + * A LogicalPlan is what (not the "how") a user told us they want to do. + * For example, a logical plan in English would be: "I want to get from DEN to SFO". + */ +public abstract class LogicalPlan extends QueryPlan implements Resolvable { + + /** + * Order is important in the enum; any values should be added at the end. + */ + public enum Stage { + PARSED, + PRE_ANALYZED, + ANALYZED, + OPTIMIZED; + } + + private Stage stage = Stage.PARSED; + private Boolean lazyChildrenResolved = null; + private Boolean lazyResolved = null; + + public LogicalPlan(Location location, List children) { + super(location, children); + } + + public boolean preAnalyzed() { + return stage.ordinal() >= Stage.PRE_ANALYZED.ordinal(); + } + + public void setPreAnalyzed() { + stage = Stage.PRE_ANALYZED; + } + + public boolean analyzed() { + return stage.ordinal() >= Stage.ANALYZED.ordinal(); + } + + public void setAnalyzed() { + stage = Stage.ANALYZED; + } + + public boolean optimized() { + return stage.ordinal() >= Stage.OPTIMIZED.ordinal(); + } + + public void setOptimized() { + stage = Stage.OPTIMIZED; + } + + public final boolean childrenResolved() { + if (lazyChildrenResolved == null) { + lazyChildrenResolved = Boolean.valueOf(Resolvables.resolved(children())); + } + return lazyChildrenResolved; + } + + @Override + public boolean resolved() { + if (lazyResolved == null) { + lazyResolved = expressionsResolved() && childrenResolved(); + } + return lazyResolved; + } + + public abstract boolean expressionsResolved(); + + @Override + public abstract int hashCode(); + + @Override + public abstract boolean equals(Object obj); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/OrderBy.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/OrderBy.java new file mode 100644 index 0000000000000..8800dcaae656b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/OrderBy.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.capabilities.Resolvables; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class OrderBy extends UnaryPlan { + + private final List order; + + public OrderBy(Location location, LogicalPlan child, List order) { + super(location, child); + this.order = order; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, OrderBy::new, child(), order); + } + + @Override + protected OrderBy replaceChild(LogicalPlan newChild) { + return new OrderBy(location(), newChild, order); + } + + public List order() { + return order; + } + + @Override + public boolean expressionsResolved() { + return Resolvables.resolved(order); + } + + @Override + public int hashCode() { + return Objects.hash(order, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + OrderBy other = (OrderBy) obj; + return Objects.equals(order, other.order) + && Objects.equals(child(), other.child()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Project.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Project.java new file mode 100644 index 0000000000000..4e15b2843a511 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/Project.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.capabilities.Resolvables; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.function.Functions; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +/** + * A {@code Project} is a {@code Plan} with one child. In {@code SELECT x FROM y}, the "SELECT" statement is a Project. + */ +public class Project extends UnaryPlan { + + private final List projections; + + public Project(Location location, LogicalPlan child, List projections) { + super(location, child); + this.projections = projections; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Project::new, child(), projections); + } + + @Override + protected Project replaceChild(LogicalPlan newChild) { + return new Project(location(), newChild, projections); + } + + public List projections() { + return projections; + } + + @Override + public boolean resolved() { + return super.resolved() && !Expressions.anyMatch(projections, Functions::isAggregate); + } + + @Override + public boolean expressionsResolved() { + return Resolvables.resolved(projections); + } + + @Override + public List output() { + return Expressions.asAttributes(projections); + } + + @Override + public int hashCode() { + return Objects.hash(projections, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Project other = (Project) obj; + + return Objects.equals(projections, other.projections) + && Objects.equals(child(), other.child()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/SubQueryAlias.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/SubQueryAlias.java new file mode 100644 index 0000000000000..b068d09febf5b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/SubQueryAlias.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; +import java.util.Objects; + +import static java.util.stream.Collectors.toList; + +public class SubQueryAlias extends UnaryPlan { + + private final String alias; + + public SubQueryAlias(Location location, LogicalPlan child, String alias) { + super(location, child); + this.alias = alias; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, SubQueryAlias::new, child(), alias); + } + + @Override + protected SubQueryAlias replaceChild(LogicalPlan newChild) { + return new SubQueryAlias(location(), newChild, alias); + } + + public String alias() { + return alias; + } + + @Override + public List output() { + return (alias == null ? child().output() : + child().output().stream() + .map(e -> e.withQualifier(alias)) + .collect(toList()) + ); + } + + @Override + public boolean expressionsResolved() { + return true; + } + + @Override + public int hashCode() { + return Objects.hash(alias, super.hashCode()); + } + + @Override + public boolean equals(Object obj) { + if (!super.equals(obj)) { + return false; + } + + SubQueryAlias other = (SubQueryAlias) obj; + return Objects.equals(alias, other.alias); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnaryPlan.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnaryPlan.java new file mode 100644 index 0000000000000..637e2594e5345 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnaryPlan.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.tree.Location; + +/** + * A {@code UnaryPlan} is a {@code LogicalPlan} with exactly one child, for example, {@code WHERE x} in a + * SQL statement is an {@code UnaryPlan}. + */ +public abstract class UnaryPlan extends LogicalPlan { + + private final LogicalPlan child; + + UnaryPlan(Location location, LogicalPlan child) { + super(location, Collections.singletonList(child)); + this.child = child; + } + + @Override + public final UnaryPlan replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return replaceChild(newChildren.get(0)); + } + protected abstract UnaryPlan replaceChild(LogicalPlan newChild); + + public LogicalPlan child() { + return child; + } + + @Override + public List output() { + return child.output(); + } + + @Override + public int hashCode() { + return Objects.hashCode(child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnaryPlan other = (UnaryPlan) obj; + + return Objects.equals(child, other.child); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelation.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelation.java new file mode 100644 index 0000000000000..472503af4b98f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelation.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.xpack.sql.capabilities.Unresolvable; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class UnresolvedRelation extends LeafPlan implements Unresolvable { + + private final TableIdentifier table; + private final String alias; + private final String unresolvedMsg; + + public UnresolvedRelation(Location location, TableIdentifier table, String alias) { + this(location, table, alias, null); + } + + public UnresolvedRelation(Location location, TableIdentifier table, String alias, String unresolvedMessage) { + super(location); + this.table = table; + this.alias = alias; + this.unresolvedMsg = unresolvedMessage == null ? "Unknown index [" + table.index() + "]" : unresolvedMessage; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, UnresolvedRelation::new, table, alias, unresolvedMsg); + } + + public TableIdentifier table() { + return table; + } + + public String alias() { + return alias; + } + + @Override + public boolean resolved() { + return false; + } + + @Override + public boolean expressionsResolved() { + return false; + } + + @Override + public List output() { + return Collections.emptyList(); + } + + @Override + public String unresolvedMessage() { + return unresolvedMsg; + } + + @Override + public int hashCode() { + return Objects.hash(location(), table, alias, unresolvedMsg); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnresolvedRelation other = (UnresolvedRelation) obj; + return location().equals(other.location()) + && table.equals(other.table) + && Objects.equals(alias, other.alias) + && unresolvedMsg.equals(other.unresolvedMsg); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/With.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/With.java new file mode 100644 index 0000000000000..7c76ba4e9ba0a --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/With.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import java.util.Map; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class With extends UnaryPlan { + private final Map subQueries; + + public With(Location location, LogicalPlan child, Map subQueries) { + super(location, child); + this.subQueries = subQueries; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, With::new, child(), subQueries); + } + + @Override + protected With replaceChild(LogicalPlan newChild) { + return new With(location(), newChild, subQueries); + } + + public Map subQueries() { + return subQueries; + } + + @Override + public boolean expressionsResolved() { + return true; + } + + @Override + public int hashCode() { + return Objects.hash(child(), subQueries); + } + + @Override + public boolean equals(Object obj) { + if (!super.equals(obj)) { + return false; + } + + With other = (With) obj; + return Objects.equals(subQueries, other.subQueries); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Command.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Command.java new file mode 100644 index 0000000000000..aec44a9c6fbaf --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Command.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.session.Executable; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.type.KeywordEsField; + +import java.util.List; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; + +public abstract class Command extends LogicalPlan implements Executable { + + protected Command(Location location) { + super(location, emptyList()); + } + + @Override + public final LogicalPlan replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + @Override + public boolean expressionsResolved() { + return true; + } + + /** + * Syntactic sugar for creating a schema keyword/string field. + */ + protected final FieldAttribute keyword(String name) { + return field(name, new KeywordEsField(name)); + } + + /** + * Syntactic sugar for creating a schema field. + */ + protected final FieldAttribute field(String name, DataType type) { + return field(name, new EsField(name, type, emptyMap(), true)); + } + + private FieldAttribute field(String name, EsField field) { + return new FieldAttribute(location(), name, field); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Debug.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Debug.java new file mode 100644 index 0000000000000..403165d50870a --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Debug.java @@ -0,0 +1,157 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.rule.RuleExecutor.Batch; +import org.elasticsearch.xpack.sql.rule.RuleExecutor.ExecutionInfo; +import org.elasticsearch.xpack.sql.rule.RuleExecutor.Transformation; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.NodeUtils; +import org.elasticsearch.xpack.sql.type.KeywordEsField; +import org.elasticsearch.xpack.sql.util.Graphviz; + +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.action.ActionListener.wrap; + +public class Debug extends Command { + + public enum Type { + ANALYZED, OPTIMIZED; + } + + public enum Format { + TEXT, GRAPHVIZ + } + + private final LogicalPlan plan; + private final Format format; + private final Type type; + + public Debug(Location location, LogicalPlan plan, Type type, Format format) { + super(location); + this.plan = plan; + this.format = format == null ? Format.TEXT : format; + this.type = type == null ? Type.OPTIMIZED : type; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Debug::new, plan, type, format); + } + + public LogicalPlan plan() { + return plan; + } + + public Format format() { + return format; + } + + public Type type() { + return type; + } + + @Override + public List output() { + return singletonList(new FieldAttribute(location(), "plan", new KeywordEsField("plan"))); + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + switch (type) { + case ANALYZED: + session.debugAnalyzedPlan(plan, wrap(i -> handleInfo(i, listener), listener::onFailure)); + break; + case OPTIMIZED: + session.analyzedPlan(plan, true, + wrap(analyzedPlan -> handleInfo(session.optimizer().debugOptimize(analyzedPlan), listener), listener::onFailure)); + break; + default: + break; + } + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + private void handleInfo(ExecutionInfo info, ActionListener listener) { + String planString = null; + + if (format == Format.TEXT) { + StringBuilder sb = new StringBuilder(); + if (info == null) { + sb.append(plan.toString()); + } else { + Map> map = info.transformations(); + + for (Entry> entry : map.entrySet()) { + // for each batch + sb.append("***"); + sb.append(entry.getKey().name()); + sb.append("***"); + for (Transformation tf : entry.getValue()) { + sb.append(tf.ruleName()); + sb.append("\n"); + sb.append(NodeUtils.diffString(tf.before(), tf.after())); + sb.append("\n"); + } + } + } + planString = sb.toString(); + } else { + if (info == null) { + planString = Graphviz.dot("Planned", plan); + } else { + Map> plans = new LinkedHashMap<>(); + Map> map = info.transformations(); + plans.put("start", info.before()); + + for (Entry> entry : map.entrySet()) { + // for each batch + int counter = 0; + for (Transformation tf : entry.getValue()) { + if (tf.hasChanged()) { + plans.put(tf.ruleName() + "#" + ++counter, tf.after()); + } + } + } + planString = Graphviz.dot(plans, true); + } + } + + listener.onResponse(Rows.singleton(output(), planString)); + } + + @Override + public int hashCode() { + return Objects.hash(plan, type, format); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + Debug o = (Debug) obj; + return Objects.equals(format, o.format) && Objects.equals(type, o.type) && Objects.equals(plan, o.plan); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Explain.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Explain.java new file mode 100644 index 0000000000000..5ff9df391a7b7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/Explain.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.plan.QueryPlan; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.planner.Planner; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.KeywordEsField; +import org.elasticsearch.xpack.sql.util.Graphviz; + +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.action.ActionListener.wrap; + +public class Explain extends Command { + + public enum Type { + PARSED, ANALYZED, OPTIMIZED, MAPPED, EXECUTABLE, ALL; + + public String printableName() { + return Strings.capitalize(name().toLowerCase(Locale.ROOT)); + } + } + + public enum Format { + TEXT, GRAPHVIZ + } + + private final LogicalPlan plan; + private final boolean verify; + private final Format format; + private final Type type; + + public Explain(Location location, LogicalPlan plan, Type type, Format format, boolean verify) { + super(location); + this.plan = plan; + this.verify = verify; + this.format = format == null ? Format.TEXT : format; + this.type = type == null ? Type.ANALYZED : type; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Explain::new, plan, type, format, verify); + } + + public LogicalPlan plan() { + return plan; + } + + public boolean verify() { + return verify; + } + + public Format format() { + return format; + } + + public Type type() { + return type; + } + + @Override + public List output() { + return singletonList(new FieldAttribute(location(), "plan", new KeywordEsField("plan"))); + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + + if (type == Type.PARSED) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, plan))); + return; + } + + // to avoid duplicating code, the type/verification filtering happens inside the listeners instead of outside using a CASE + session.analyzedPlan(plan, verify, wrap(analyzedPlan -> { + + if (type == Type.ANALYZED) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, analyzedPlan))); + return; + } + + Planner planner = session.planner(); + // verification is on, exceptions can be thrown + if (verify) { + session.optimizedPlan(analyzedPlan, wrap(optimizedPlan -> { + if (type == Type.OPTIMIZED) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, optimizedPlan))); + return; + } + + PhysicalPlan mappedPlan = planner.mapPlan(optimizedPlan, verify); + if (type == Type.MAPPED) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, mappedPlan))); + return; + } + + PhysicalPlan executablePlan = planner.foldPlan(mappedPlan, verify); + if (type == Type.EXECUTABLE) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, executablePlan))); + return; + } + + // Type.All + listener.onResponse(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, optimizedPlan, + mappedPlan, executablePlan))); + }, listener::onFailure)); + } + + // check errors manually to see how far the plans work out + else { + // no analysis failure, can move on + if (Analyzer.verifyFailures(analyzedPlan).isEmpty()) { + session.optimizedPlan(analyzedPlan, wrap(optimizedPlan -> { + if (type == Type.OPTIMIZED) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, optimizedPlan))); + return; + } + + PhysicalPlan mappedPlan = planner.mapPlan(optimizedPlan, verify); + + if (type == Type.MAPPED) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, mappedPlan))); + return; + } + + if (planner.verifyMappingPlanFailures(mappedPlan).isEmpty()) { + PhysicalPlan executablePlan = planner.foldPlan(mappedPlan, verify); + + if (type == Type.EXECUTABLE) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, executablePlan))); + return; + } + + listener.onResponse(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, optimizedPlan, + mappedPlan, executablePlan))); + return; + } + // mapped failed + if (type != Type.ALL) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, mappedPlan))); + return; + } + + listener.onResponse(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, optimizedPlan, + mappedPlan, null))); + }, listener::onFailure)); + // cannot continue + } else { + if (type != Type.ALL) { + listener.onResponse(Rows.singleton(output(), formatPlan(format, analyzedPlan))); + } + else { + listener.onResponse(Rows.singleton(output(), printPlans(format, plan, analyzedPlan, null, null, null))); + } + } + } + }, listener::onFailure)); + } + + private static String printPlans(Format format, LogicalPlan parsed, LogicalPlan analyzedPlan, LogicalPlan optimizedPlan, + PhysicalPlan mappedPlan, PhysicalPlan executionPlan) { + if (format == Format.TEXT) { + StringBuilder sb = new StringBuilder(); + sb.append("Parsed\n"); + sb.append("-----------\n"); + sb.append(parsed.toString()); + sb.append("\nAnalyzed\n"); + sb.append("--------\n"); + sb.append(analyzedPlan.toString()); + sb.append("\nOptimized\n"); + sb.append("---------\n"); + sb.append(nullablePlan(optimizedPlan)); + sb.append("\nMapped\n"); + sb.append("---------\n"); + sb.append(nullablePlan(mappedPlan)); + sb.append("\nExecutable\n"); + sb.append("---------\n"); + sb.append(nullablePlan(executionPlan)); + + return sb.toString(); + } else { + Map> plans = new HashMap<>(); + plans.put("Parsed", parsed); + plans.put("Analyzed", analyzedPlan); + + if (optimizedPlan != null) { + plans.put("Optimized", optimizedPlan); + plans.put("Mapped", mappedPlan); + plans.put("Execution", executionPlan); + } + return Graphviz.dot(unmodifiableMap(plans), false); + } + } + + private static String nullablePlan(QueryPlan plan) { + return plan != null ? plan.toString() : ""; + } + + private String formatPlan(Format format, QueryPlan plan) { + return (format == Format.TEXT ? nullablePlan(plan) : Graphviz.dot(type.printableName(), plan)); + } + + @Override + public int hashCode() { + return Objects.hash(plan, type, format, verify); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if ((obj == null) || (getClass() != obj.getClass())) { + return false; + } + Explain o = (Explain) obj; + return Objects.equals(verify, o.verify) + && Objects.equals(format, o.format) + && Objects.equals(type, o.type) + && Objects.equals(plan, o.plan); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java new file mode 100644 index 0000000000000..47716687b33cd --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.type.KeywordEsField; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; + +public class ShowColumns extends Command { + + private final String index; + + public ShowColumns(Location location, String index) { + super(location); + this.index = index; + } + + public String index() { + return index; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ShowColumns::new, index); + } + + @Override + public List output() { + return asList(new FieldAttribute(location(), "column", new KeywordEsField("column")), + new FieldAttribute(location(), "type", new KeywordEsField("type"))); } + + @Override + public void execute(SqlSession session, ActionListener listener) { + session.indexResolver().resolveWithSameMapping(index, null, ActionListener.wrap( + indexResult -> { + List> rows = emptyList(); + if (indexResult.isValid()) { + rows = new ArrayList<>(); + fillInRows(indexResult.get().mapping(), null, rows); + } + listener.onResponse(Rows.of(output(), rows)); + }, + listener::onFailure + )); + } + + private void fillInRows(Map mapping, String prefix, List> rows) { + for (Entry e : mapping.entrySet()) { + EsField field = e.getValue(); + DataType dt = field.getDataType(); + String name = e.getKey(); + if (dt != null) { + rows.add(asList(prefix != null ? prefix + "." + name : name, dt.sqlName())); + if (field.getProperties().isEmpty() == false) { + String newPrefix = prefix != null ? prefix + "." + name : name; + fillInRows(field.getProperties(), newPrefix, rows); + } + } + } + } + + @Override + public int hashCode() { + return Objects.hash(index); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ShowColumns other = (ShowColumns) obj; + return Objects.equals(index, other.index); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowFunctions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowFunctions.java new file mode 100644 index 0000000000000..9fdbab46eb894 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowFunctions.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.FunctionDefinition; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.expression.regex.LikePattern; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.KeywordEsField; + +import java.util.Collection; +import java.util.List; +import java.util.Objects; + +import static java.util.Arrays.asList; +import static java.util.stream.Collectors.toList; + +public class ShowFunctions extends Command { + + private final LikePattern pattern; + + public ShowFunctions(Location location, LikePattern pattern) { + super(location); + this.pattern = pattern; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ShowFunctions::new, pattern); + } + + public LikePattern pattern() { + return pattern; + } + + @Override + public List output() { + return asList(new FieldAttribute(location(), "name", new KeywordEsField("name")), + new FieldAttribute(location(), "type", new KeywordEsField("type"))); + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + FunctionRegistry registry = session.functionRegistry(); + Collection functions = registry.listFunctions(pattern != null ? pattern.asJavaRegex() : null); + + listener.onResponse(Rows.of(output(), functions.stream() + .map(f -> asList(f.name(), f.type().name())) + .collect(toList()))); + } + + @Override + public int hashCode() { + return Objects.hash(pattern); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ShowFunctions other = (ShowFunctions) obj; + return Objects.equals(pattern, other.pattern); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowSchemas.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowSchemas.java new file mode 100644 index 0000000000000..8a1c8ad0807c7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowSchemas.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.KeywordEsField; + +import java.util.List; + +import static java.util.Collections.singletonList; + +public class ShowSchemas extends Command { + + public ShowSchemas(Location location) { + super(location); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this); + } + + @Override + public List output() { + return singletonList(new FieldAttribute(location(), "schema", new KeywordEsField("schema"))); + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + listener.onResponse(Rows.empty(output())); + } + + @Override + public int hashCode() { + return getClass().hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + return true; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java new file mode 100644 index 0000000000000..ce81aa9a2e6ec --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowTables.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.regex.LikePattern; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; +import java.util.Objects; + +import static java.util.Arrays.asList; +import static java.util.stream.Collectors.toList; + +public class ShowTables extends Command { + + private final LikePattern pattern; + + public ShowTables(Location location, LikePattern pattern) { + super(location); + this.pattern = pattern; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ShowTables::new, pattern); + } + + public LikePattern pattern() { + return pattern; + } + + @Override + public List output() { + return asList(keyword("name"), keyword("type")); + } + + @Override + public final void execute(SqlSession session, ActionListener listener) { + String index = pattern != null ? pattern.asIndexNameWildcard() : "*"; + String regex = pattern != null ? pattern.asJavaRegex() : null; + session.indexResolver().resolveNames(index, regex, null, ActionListener.wrap(result -> { + listener.onResponse(Rows.of(output(), result.stream() + .map(t -> asList(t.name(), t.type().toSql())) + .collect(toList()))); + }, listener::onFailure)); + } + + @Override + public int hashCode() { + return Objects.hash(pattern); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ShowTables other = (ShowTables) obj; + return Objects.equals(pattern, other.pattern); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysCatalogs.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysCatalogs.java new file mode 100644 index 0000000000000..0aa2af9c202f1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysCatalogs.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command.sys; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; + +import static java.util.Collections.singletonList; + +/** + * System command returning the catalogs (clusters) available. + * Currently returns only the current cluster name. + */ +public class SysCatalogs extends Command { + + public SysCatalogs(Location location) { + super(location); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this); + } + + @Override + public List output() { + return singletonList(keyword("TABLE_CAT")); + } + + @Override + public final void execute(SqlSession session, ActionListener listener) { + String cluster = session.indexResolver().clusterName(); + listener.onResponse(Rows.of(output(), singletonList(singletonList(cluster)))); + } + + @Override + public int hashCode() { + return getClass().hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + return true; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java new file mode 100644 index 0000000000000..3c01736cebe89 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command.sys; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.regex.LikePattern; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.EsField; + +import java.sql.DatabaseMetaData; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.regex.Pattern; + +import static java.util.Arrays.asList; +import static org.elasticsearch.xpack.sql.type.DataType.INTEGER; +import static org.elasticsearch.xpack.sql.type.DataType.NULL; +import static org.elasticsearch.xpack.sql.type.DataType.SHORT; + +/** + * System command designed to be used by JDBC / ODBC for column metadata, such as + * {@link DatabaseMetaData#getColumns(String, String, String, String)}. + */ +public class SysColumns extends Command { + + private final String catalog; + private final LikePattern indexPattern; + private final LikePattern columnPattern; + + public SysColumns(Location location, String catalog, LikePattern indexPattern, LikePattern columnPattern) { + super(location); + this.catalog = catalog; + this.indexPattern = indexPattern; + this.columnPattern = columnPattern; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, SysColumns::new, catalog, indexPattern, columnPattern); + } + + @Override + public List output() { + return asList(keyword("TABLE_CAT"), + keyword("TABLE_SCHEM"), + keyword("TABLE_NAME"), + keyword("COLUMN_NAME"), + field("DATA_TYPE", INTEGER), + keyword("TYPE_NAME"), + field("COLUMN_SIZE", INTEGER), + field("BUFFER_LENGTH", INTEGER), + field("DECIMAL_DIGITS", INTEGER), + field("NUM_PREC_RADIX", INTEGER), + field("NULLABLE", INTEGER), + keyword("REMARKS"), + keyword("COLUMN_DEF"), + field("SQL_DATA_TYPE", INTEGER), + field("SQL_DATETIME_SUB", INTEGER), + field("CHAR_OCTET_LENGTH", INTEGER), + field("ORDINAL_POSITION", INTEGER), + keyword("IS_NULLABLE"), + // JDBC specific + keyword("SCOPE_CATALOG"), + keyword("SCOPE_SCHEMA"), + keyword("SCOPE_TABLE"), + field("SOURCE_DATA_TYPE", SHORT), + keyword("IS_AUTOINCREMENT"), + keyword("IS_GENERATEDCOLUMN") + ); + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + String cluster = session.indexResolver().clusterName(); + + // bail-out early if the catalog is present but differs + if (Strings.hasText(catalog) && !cluster.equals(catalog)) { + listener.onResponse(Rows.empty(output())); + return; + } + + String index = indexPattern != null ? indexPattern.asIndexNameWildcard() : "*"; + String regex = indexPattern != null ? indexPattern.asJavaRegex() : null; + + Pattern columnMatcher = columnPattern != null ? Pattern.compile(columnPattern.asJavaRegex()) : null; + + session.indexResolver().resolveAsSeparateMappings(index, regex, ActionListener.wrap(esIndices -> { + List> rows = new ArrayList<>(); + for (EsIndex esIndex : esIndices) { + fillInRows(cluster, esIndex.name(), esIndex.mapping(), null, rows, columnMatcher); + } + + listener.onResponse(Rows.of(output(), rows)); + }, listener::onFailure)); + } + + static void fillInRows(String clusterName, String indexName, Map mapping, String prefix, List> rows, + Pattern columnMatcher) { + int pos = 0; + for (Map.Entry entry : mapping.entrySet()) { + pos++; // JDBC is 1-based so we start with 1 here + + String name = entry.getKey(); + name = prefix != null ? prefix + "." + name : name; + EsField field = entry.getValue(); + DataType type = field.getDataType(); + + if (columnMatcher == null || columnMatcher.matcher(name).matches()) { + rows.add(asList(clusterName, + // schema is not supported + null, + indexName, + name, + type.jdbcType.getVendorTypeNumber(), + type.esType.toUpperCase(Locale.ROOT), + type.displaySize, + // TODO: is the buffer_length correct? + type.size, + // no DECIMAL support + null, + // RADIX - Determines how numbers returned by COLUMN_SIZE and DECIMAL_DIGITS should be interpreted. + // 10 means they represent the number of decimal digits allowed for the column. + // 2 means they represent the number of bits allowed for the column. + // null means radix is not applicable for the given type. + type.isInteger ? Integer.valueOf(10) : type.isRational ? Integer.valueOf(2) : null, + // everything is nullable + DatabaseMetaData.columnNullable, + // no remarks + null, + // no column def + null, + // SQL_DATA_TYPE apparently needs to be same as DATA_TYPE except for datetime and interval data types + type.jdbcType.getVendorTypeNumber(), + // SQL_DATETIME_SUB ? + null, + // char octet length + type.isString() || type == DataType.BINARY ? type.size : null, + // position + pos, + "YES", + null, + null, + null, + null, + "NO", + "NO" + )); + } + if (field.getProperties() != null) { + fillInRows(clusterName, indexName, field.getProperties(), name, rows, columnMatcher); + } + } + } + + @Override + public int hashCode() { + return Objects.hash(catalog, indexPattern, columnPattern); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + SysColumns other = (SysColumns) obj; + return Objects.equals(catalog, other.catalog) + && Objects.equals(indexPattern, other.indexPattern) + && Objects.equals(columnPattern, other.columnPattern); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTableTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTableTypes.java new file mode 100644 index 0000000000000..ff6789bc3731d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTableTypes.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command.sys; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; + +import static java.util.Collections.singletonList; +import static java.util.stream.Collectors.toList; + +/** + * System command returning the types of tables supported, + * index and alias. + */ +public class SysTableTypes extends Command { + + public SysTableTypes(Location location) { + super(location); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this); + } + + @Override + public List output() { + return singletonList(keyword("TABLE_TYPE")); + } + + @Override + public final void execute(SqlSession session, ActionListener listener) { + listener.onResponse(Rows.of(output(), IndexType.VALID.stream() + .map(t -> singletonList(t.toSql())) + .collect(toList()))); + } + + @Override + public int hashCode() { + return getClass().hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + return true; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java new file mode 100644 index 0000000000000..2b8e5e8527c31 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command.sys; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.regex.LikePattern; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.util.CollectionUtils; + +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; +import java.util.Objects; +import java.util.regex.Pattern; + +import static java.util.Arrays.asList; +import static java.util.stream.Collectors.toList; +import static org.elasticsearch.xpack.sql.util.StringUtils.EMPTY; +import static org.elasticsearch.xpack.sql.util.StringUtils.SQL_WILDCARD; + +public class SysTables extends Command { + + private final LikePattern pattern; + private final LikePattern clusterPattern; + private final EnumSet types; + + public SysTables(Location location, LikePattern clusterPattern, LikePattern pattern, EnumSet types) { + super(location); + this.clusterPattern = clusterPattern; + this.pattern = pattern; + this.types = types; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, SysTables::new, clusterPattern, pattern, types); + } + + @Override + public List output() { + return asList(keyword("TABLE_CAT"), + keyword("TABLE_SCHEM"), + keyword("TABLE_NAME"), + keyword("TABLE_TYPE"), + keyword("REMARKS"), + keyword("TYPE_CAT"), + keyword("TYPE_SCHEM"), + keyword("TYPE_NAME"), + keyword("SELF_REFERENCING_COL_NAME"), + keyword("REF_GENERATION") + ); + } + + @Override + public final void execute(SqlSession session, ActionListener listener) { + String cluster = session.indexResolver().clusterName(); + + // first check if where dealing with ODBC enumeration + // namely one param specified with '%', everything else empty string + // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqltables-function?view=ssdt-18vs2017#comments + + if (clusterPattern != null && clusterPattern.pattern().equals(SQL_WILDCARD)) { + if (pattern != null && pattern.pattern().isEmpty() && CollectionUtils.isEmpty(types)) { + Object[] enumeration = new Object[10]; + // send only the cluster, everything else null + enumeration[0] = cluster; + listener.onResponse(Rows.singleton(output(), enumeration)); + return; + } + } + + // if no types were specified (the parser takes care of the % case) + if (CollectionUtils.isEmpty(types)) { + if (clusterPattern != null && clusterPattern.pattern().isEmpty()) { + List> values = new ArrayList<>(); + // send only the types, everything else null + for (IndexType type : IndexType.VALID) { + Object[] enumeration = new Object[10]; + enumeration[3] = type.toSql(); + values.add(asList(enumeration)); + } + listener.onResponse(Rows.of(output(), values)); + return; + } + } + + + String cRegex = clusterPattern != null ? clusterPattern.asJavaRegex() : null; + + // if the catalog doesn't match, don't return any results + if (cRegex != null && !Pattern.matches(cRegex, cluster)) { + listener.onResponse(Rows.empty(output())); + return; + } + + String index = pattern != null ? pattern.asIndexNameWildcard() : "*"; + String regex = pattern != null ? pattern.asJavaRegex() : null; + + session.indexResolver().resolveNames(index, regex, types, ActionListener.wrap(result -> listener.onResponse( + Rows.of(output(), result.stream() + .map(t -> asList(cluster, + EMPTY, + t.name(), + t.type().toSql(), + EMPTY, + null, + null, + null, + null, + null)) + .collect(toList()))) + , listener::onFailure)); + } + + @Override + public int hashCode() { + return Objects.hash(clusterPattern, pattern, types); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + SysTables other = (SysTables) obj; + return Objects.equals(clusterPattern, other.clusterPattern) + && Objects.equals(pattern, other.pattern) + && Objects.equals(types, other.types); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java new file mode 100644 index 0000000000000..508ffef530573 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command.sys; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.sql.DatabaseMetaData; +import java.util.Comparator; +import java.util.List; +import java.util.Locale; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static java.util.stream.Collectors.toList; +import static org.elasticsearch.xpack.sql.type.DataType.BOOLEAN; +import static org.elasticsearch.xpack.sql.type.DataType.INTEGER; +import static org.elasticsearch.xpack.sql.type.DataType.SHORT; + +public class SysTypes extends Command { + + public SysTypes(Location location) { + super(location); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this); + } + + @Override + public List output() { + return asList(keyword("TYPE_NAME"), + field("DATA_TYPE", INTEGER), + field("PRECISION",INTEGER), + keyword("LITERAL_PREFIX"), + keyword("LITERAL_SUFFIX"), + keyword("CREATE_PARAMS"), + field("NULLABLE", SHORT), + field("CASE_SENSITIVE", BOOLEAN), + field("SEARCHABLE", SHORT), + field("UNSIGNED_ATTRIBUTE", BOOLEAN), + field("FIXED_PREC_SCALE", BOOLEAN), + field("AUTO_INCREMENT", BOOLEAN), + keyword("LOCAL_TYPE_NAME"), + field("MINIMUM_SCALE", SHORT), + field("MAXIMUM_SCALE", SHORT), + field("SQL_DATA_TYPE", INTEGER), + field("SQL_DATETIME_SUB", INTEGER), + field("NUM_PREC_RADIX", INTEGER), + // ODBC + field("INTERVAL_PRECISION", INTEGER) + ); + } + + @Override + public final void execute(SqlSession session, ActionListener listener) { + List> rows = Stream.of(DataType.values()) + // sort by SQL int type (that's what the JDBC/ODBC specs want) + .sorted(Comparator.comparing(t -> t.jdbcType)) + .map(t -> asList(t.esType.toUpperCase(Locale.ROOT), + t.jdbcType.getVendorTypeNumber(), + t.defaultPrecision, + "'", + "'", + null, + // don't be specific on nullable + DatabaseMetaData.typeNullableUnknown, + // all strings are case-sensitive + t.isString(), + // everything is searchable, + DatabaseMetaData.typeSearchable, + // only numerics are signed + !t.isSigned(), + //no fixed precision scale SQL_FALSE + false, + null, + null, + null, + null, + // SQL_DATA_TYPE - ODBC wants this to be not null + 0, + null, + // Radix + t.isInteger ? Integer.valueOf(10) : (t.isRational ? Integer.valueOf(2) : null), + null + )) + .collect(toList()); + + listener.onResponse(Rows.of(output(), rows)); + } + + @Override + public int hashCode() { + return getClass().hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + return true; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/AggregateExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/AggregateExec.java new file mode 100644 index 0000000000000..6814633c7e304 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/AggregateExec.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import java.util.List; +import java.util.Objects; + +public class AggregateExec extends UnaryExec implements Unexecutable { + + private final List groupings; + private final List aggregates; + + public AggregateExec(Location location, PhysicalPlan child, + List groupings, List aggregates) { + super(location, child); + this.groupings = groupings; + this.aggregates = aggregates; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, AggregateExec::new, child(), groupings, aggregates); + } + + @Override + protected AggregateExec replaceChild(PhysicalPlan newChild) { + return new AggregateExec(location(), newChild, groupings, aggregates); + } + + public List groupings() { + return groupings; + } + + public List aggregates() { + return aggregates; + } + + @Override + public List output() { + return Expressions.asAttributes(aggregates); + } + + @Override + public int hashCode() { + return Objects.hash(groupings, aggregates, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + AggregateExec other = (AggregateExec) obj; + + return Objects.equals(groupings, other.groupings) + && Objects.equals(aggregates, other.aggregates) + && Objects.equals(child(), other.child()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/BinaryExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/BinaryExec.java new file mode 100644 index 0000000000000..e3b78001ed674 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/BinaryExec.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.tree.Location; + +abstract class BinaryExec extends PhysicalPlan { + + private final PhysicalPlan left, right; + + protected BinaryExec(Location location, PhysicalPlan left, PhysicalPlan right) { + super(location, Arrays.asList(left, right)); + this.left = left; + this.right = right; + } + + @Override + public final BinaryExec replaceChildren(List newChildren) { + if (newChildren.size() != 2) { + throw new IllegalArgumentException("expected [2] children but received [" + newChildren.size() + "]"); + } + return replaceChildren(newChildren.get(0), newChildren.get(1)); + } + protected abstract BinaryExec replaceChildren(PhysicalPlan newLeft, PhysicalPlan newRight); + + public PhysicalPlan left() { + return left; + } + + public PhysicalPlan right() { + return right; + } + + @Override + public int hashCode() { + return Objects.hash(left, right); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + BinaryExec other = (BinaryExec) obj; + return Objects.equals(left, other.left) + && Objects.equals(right, other.right); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/CommandExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/CommandExec.java new file mode 100644 index 0000000000000..3d392eee8810e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/CommandExec.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; +import java.util.Objects; + +public class CommandExec extends LeafExec { + + private final Command command; + + public CommandExec(Location location, Command command) { + super(location); + this.command = command; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, CommandExec::new, command); + } + + public Command command() { + return command; + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + command.execute(session, listener); + } + + @Override + public List output() { + return command.output(); + } + + @Override + public int hashCode() { + return Objects.hash(command); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + CommandExec other = (CommandExec) obj; + return Objects.equals(command, other.command); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/EsQueryExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/EsQueryExec.java new file mode 100644 index 0000000000000..36c33920dffba --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/EsQueryExec.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.execution.search.Querier; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; +import java.util.Objects; + +public class EsQueryExec extends LeafExec { + + private final String index; + private final List output; + + private final QueryContainer queryContainer; + + public EsQueryExec(Location location, String index, List output, QueryContainer queryContainer) { + super(location); + this.index = index; + this.output = output; + this.queryContainer = queryContainer; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, EsQueryExec::new, index, output, queryContainer); + } + + public EsQueryExec with(QueryContainer queryContainer) { + return new EsQueryExec(location(), index, output, queryContainer); + } + + public String index() { + return index; + } + + public QueryContainer queryContainer() { + return queryContainer; + } + + @Override + public List output() { + return output; + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + Querier scroller = new Querier(session.client(), session.settings()); + scroller.query(Rows.schema(output), queryContainer, index, listener); + } + + @Override + public int hashCode() { + return Objects.hash(index, queryContainer, output); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + EsQueryExec other = (EsQueryExec) obj; + return Objects.equals(index, other.index) + && Objects.equals(queryContainer, other.queryContainer) + && Objects.equals(output, other.output); + } + + @Override + public String nodeString() { + return nodeName() + "[" + index + "," + queryContainer + "]"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/FilterExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/FilterExec.java new file mode 100644 index 0000000000000..6c2f8523b3fd9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/FilterExec.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class FilterExec extends UnaryExec implements Unexecutable { + + private final Expression condition; + // indicates whether the filter is regular or agg-based (HAVING xxx) + // gets setup automatically and then copied over during cloning + private final boolean isHaving; + + public FilterExec(Location location, PhysicalPlan child, Expression condition) { + this(location, child, condition, child instanceof AggregateExec); + } + + public FilterExec(Location location, PhysicalPlan child, Expression condition, boolean isHaving) { + super(location, child); + this.condition = condition; + this.isHaving = isHaving; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, FilterExec::new, child(), condition, isHaving); + } + + @Override + protected FilterExec replaceChild(PhysicalPlan newChild) { + return new FilterExec(location(), newChild, condition, isHaving); + } + + public Expression condition() { + return condition; + } + + public boolean isHaving() { + return isHaving; + } + + @Override + public List output() { + return child().output(); + } + + @Override + public int hashCode() { + return Objects.hash(condition, isHaving, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + FilterExec other = (FilterExec) obj; + return Objects.equals(condition, other.condition) + && Objects.equals(child(), other.child()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LeafExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LeafExec.java new file mode 100644 index 0000000000000..eec10b307c062 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LeafExec.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.Collections; +import java.util.List; + +import org.elasticsearch.xpack.sql.tree.Location; + +abstract class LeafExec extends PhysicalPlan { + LeafExec(Location location) { + super(location, Collections.emptyList()); + } + + @Override + public final LeafExec replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LimitExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LimitExec.java new file mode 100644 index 0000000000000..1d4d5a24221f4 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LimitExec.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class LimitExec extends UnaryExec implements Unexecutable { + + private final Expression limit; + + public LimitExec(Location location, PhysicalPlan child, Expression limit) { + super(location, child); + this.limit = limit; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, LimitExec::new, child(), limit); + } + + @Override + protected LimitExec replaceChild(PhysicalPlan newChild) { + return new LimitExec(location(), newChild, limit); + } + + public Expression limit() { + return limit; + } + + @Override + public int hashCode() { + return Objects.hash(limit, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + LimitExec other = (LimitExec) obj; + return Objects.equals(limit, other.limit) + && Objects.equals(child(), other.child()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LocalExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LocalExec.java new file mode 100644 index 0000000000000..287d006b5e6c8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/LocalExec.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.session.EmptyExecutable; +import org.elasticsearch.xpack.sql.session.Executable; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; +import java.util.Objects; + +public class LocalExec extends LeafExec { + + private final Executable executable; + + public LocalExec(Location location, Executable executable) { + super(location); + this.executable = executable; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, LocalExec::new, executable); + } + + public Executable executable() { + return executable; + } + + @Override + public List output() { + return executable.output(); + } + + public boolean isEmpty() { + return executable instanceof EmptyExecutable; + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + executable.execute(session, listener); + } + + @Override + public int hashCode() { + return Objects.hash(executable); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + LocalExec other = (LocalExec) obj; + return Objects.equals(executable, other.executable); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/OrderExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/OrderExec.java new file mode 100644 index 0000000000000..8b2f224260a09 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/OrderExec.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class OrderExec extends UnaryExec implements Unexecutable { + + private final List order; + + public OrderExec(Location location, PhysicalPlan child, List order) { + super(location, child); + this.order = order; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, OrderExec::new, child(), order); + } + + @Override + protected OrderExec replaceChild(PhysicalPlan newChild) { + return new OrderExec(location(), newChild, order); + } + + public List order() { + return order; + } + + @Override + public int hashCode() { + return Objects.hash(order, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + OrderExec other = (OrderExec) obj; + + return Objects.equals(order, other.order) + && Objects.equals(child(), other.child()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/PhysicalPlan.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/PhysicalPlan.java new file mode 100644 index 0000000000000..749a494c9d836 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/PhysicalPlan.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.List; + +import org.elasticsearch.xpack.sql.plan.QueryPlan; +import org.elasticsearch.xpack.sql.session.Executable; +import org.elasticsearch.xpack.sql.session.Rows; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.Schema; + +/** + * A PhysicalPlan is "how" a LogicalPlan (the "what") actually gets translated into one or more queries. + * + * LogicalPlan = I want to get from DEN to SFO + * PhysicalPlan = take Delta, DEN to SJC, then SJC to SFO + */ +public abstract class PhysicalPlan extends QueryPlan implements Executable { + + private Schema lazySchema; + + public PhysicalPlan(Location location, List children) { + super(location, children); + } + + public Schema schema() { + if (lazySchema == null) { + lazySchema = Rows.schema(output()); + } + return lazySchema; + } + + @Override + public abstract int hashCode(); + + @Override + public abstract boolean equals(Object obj); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/ProjectExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/ProjectExec.java new file mode 100644 index 0000000000000..411e6c6a20c2f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/ProjectExec.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class ProjectExec extends UnaryExec implements Unexecutable { + + private final List projections; + + public ProjectExec(Location location, PhysicalPlan child, List projections) { + super(location, child); + this.projections = projections; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ProjectExec::new, child(), projections); + } + + @Override + protected ProjectExec replaceChild(PhysicalPlan newChild) { + return new ProjectExec(location(), newChild, projections); + } + + public List projections() { + return projections; + } + + @Override + public List output() { + return Expressions.asAttributes(projections); + } + + @Override + public int hashCode() { + return Objects.hash(projections, child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ProjectExec other = (ProjectExec) obj; + + return Objects.equals(projections, other.projections) + && Objects.equals(child(), other.child()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/UnaryExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/UnaryExec.java new file mode 100644 index 0000000000000..942a60b2cd8d0 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/UnaryExec.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.tree.Location; + +abstract class UnaryExec extends PhysicalPlan { + + private final PhysicalPlan child; + + UnaryExec(Location location, PhysicalPlan child) { + super(location, Collections.singletonList(child)); + this.child = child; + } + + @Override + public final PhysicalPlan replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return replaceChild(newChildren.get(0)); + } + protected abstract UnaryExec replaceChild(PhysicalPlan newChild); + + public PhysicalPlan child() { + return child; + } + + @Override + public List output() { + return child.output(); + } + + @Override + public int hashCode() { + return Objects.hashCode(child()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnaryExec other = (UnaryExec) obj; + + return Objects.equals(child, other.child); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/Unexecutable.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/Unexecutable.java new file mode 100644 index 0000000000000..ad3c566919178 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/Unexecutable.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.planner.PlanningException; +import org.elasticsearch.xpack.sql.session.Executable; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; + +import java.util.Locale; + +// this is mainly a marker interface to validate a plan before being executed +public interface Unexecutable extends Executable { + + default void execute(SqlSession session, ActionListener listener) { + throw new PlanningException("Current plan {} is not executable", this); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/UnplannedExec.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/UnplannedExec.java new file mode 100644 index 0000000000000..a2a2055da6c23 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/physical/UnplannedExec.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.physical; + +import java.util.List; +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +public class UnplannedExec extends LeafExec implements Unexecutable { + + private final LogicalPlan plan; + + public UnplannedExec(Location location, LogicalPlan plan) { + super(location); + this.plan = plan; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, UnplannedExec::new, plan); + } + + public LogicalPlan plan() { + return plan; + } + + @Override + public List output() { + return plan.output(); + } + + @Override + public int hashCode() { + return plan.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + UnplannedExec other = (UnplannedExec) obj; + return Objects.equals(plan, other.plan); + } + + @Override + public String nodeString() { + return nodeName() + "[" + plan.nodeString() + "]"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/FoldingException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/FoldingException.java new file mode 100644 index 0000000000000..9ad380e3153ad --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/FoldingException.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.sql.ClientSqlException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.Node; + +import java.util.Locale; + +public class FoldingException extends ClientSqlException { + + private final int line; + private final int column; + + public FoldingException(Node source, String message, Object... args) { + super(message, args); + + Location loc = Location.EMPTY; + if (source != null && source.location() != null) { + loc = source.location(); + } + this.line = loc.getLineNumber(); + this.column = loc.getColumnNumber(); + } + + public FoldingException(Node source, String message, Throwable cause) { + super(message, cause); + + Location loc = Location.EMPTY; + if (source != null && source.location() != null) { + loc = source.location(); + } + this.line = loc.getLineNumber(); + this.column = loc.getColumnNumber(); + } + + public int getLineNumber() { + return line; + } + + public int getColumnNumber() { + return column; + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + + @Override + public String getMessage() { + return String.format(Locale.ROOT, "line %s:%s: %s", getLineNumber(), getColumnNumber(), super.getMessage()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java new file mode 100644 index 0000000000000..6a0b96f444a04 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.plan.logical.Aggregate; +import org.elasticsearch.xpack.sql.plan.logical.EsRelation; +import org.elasticsearch.xpack.sql.plan.logical.Filter; +import org.elasticsearch.xpack.sql.plan.logical.Join; +import org.elasticsearch.xpack.sql.plan.logical.Limit; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.plan.logical.Project; +import org.elasticsearch.xpack.sql.plan.logical.LocalRelation; +import org.elasticsearch.xpack.sql.plan.logical.With; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.plan.physical.AggregateExec; +import org.elasticsearch.xpack.sql.plan.physical.CommandExec; +import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.sql.plan.physical.FilterExec; +import org.elasticsearch.xpack.sql.plan.physical.LimitExec; +import org.elasticsearch.xpack.sql.plan.physical.OrderExec; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.plan.physical.ProjectExec; +import org.elasticsearch.xpack.sql.plan.physical.LocalExec; +import org.elasticsearch.xpack.sql.plan.physical.UnplannedExec; +import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; +import org.elasticsearch.xpack.sql.rule.Rule; +import org.elasticsearch.xpack.sql.rule.RuleExecutor; +import org.elasticsearch.xpack.sql.util.ReflectionUtils; + +import java.util.Arrays; +import java.util.List; + +class Mapper extends RuleExecutor { + + public PhysicalPlan map(LogicalPlan plan) { + return execute(planLater(plan)); + } + + @Override + protected Iterable.Batch> batches() { + Batch conversion = new Batch("Mapping", + new JoinMapper(), + new SimpleExecMapper() + ); + + return Arrays.asList(conversion); + } + + private static PhysicalPlan planLater(LogicalPlan plan) { + return new UnplannedExec(plan.location(), plan); + } + + private static class SimpleExecMapper extends MapExecRule { + + @Override + protected PhysicalPlan map(LogicalPlan p) { + if (p instanceof Command) { + return new CommandExec(p.location(), (Command) p); + } + + if (p instanceof LocalRelation) { + return new LocalExec(p.location(), (LocalRelation) p); + } + + if (p instanceof Project) { + Project pj = (Project) p; + return new ProjectExec(p.location(), map(pj.child()), pj.projections()); + } + + if (p instanceof Filter) { + Filter fl = (Filter) p; + return new FilterExec(p.location(), map(fl.child()), fl.condition()); + } + + if (p instanceof OrderBy) { + OrderBy o = (OrderBy) p; + return new OrderExec(p.location(), map(o.child()), o.order()); + } + + if (p instanceof Aggregate) { + Aggregate a = (Aggregate) p; + // analysis and optimizations have converted the grouping into actual attributes + return new AggregateExec(p.location(), map(a.child()), a.groupings(), a.aggregates()); + } + + if (p instanceof EsRelation) { + EsRelation c = (EsRelation) p; + List output = c.output(); + return new EsQueryExec(p.location(), c.index().name(), output, new QueryContainer()); + } + + if (p instanceof Limit) { + Limit l = (Limit) p; + return new LimitExec(p.location(), map(l.child()), l.limit()); + } + // TODO: Translate With in a subplan + if (p instanceof With) { + throw new UnsupportedOperationException("With should have been translated already"); + } + + return planLater(p); + } + } + + private static class JoinMapper extends MapExecRule { + + @Override + protected PhysicalPlan map(Join j) { + return join(j); + } + + private PhysicalPlan join(Join join) { + //TODO: pick up on nested/parent-child docs + // 2. Hash? + // 3. Cartesian + // 3. Fallback to nested loop + + + throw new UnsupportedOperationException("Don't know how to handle join " + join.nodeString()); + } + } + + abstract static class MapExecRule extends Rule { + + private final Class subPlanToken = ReflectionUtils.detectSuperTypeForRuleLike(getClass()); + + @Override + public final PhysicalPlan apply(PhysicalPlan plan) { + return plan.transformUp(this::rule, UnplannedExec.class); + } + + @SuppressWarnings("unchecked") + @Override + protected final PhysicalPlan rule(UnplannedExec plan) { + LogicalPlan subPlan = plan.plan(); + if (subPlanToken.isInstance(subPlan)) { + return map((SubPlan) subPlan); + } + return plan; + } + + protected abstract PhysicalPlan map(SubPlan plan); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Planner.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Planner.java new file mode 100644 index 0000000000000..5bda469853d6b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Planner.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import java.util.List; +import java.util.Map; + +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.planner.Verifier.Failure; +import org.elasticsearch.xpack.sql.tree.Node; + +import static java.util.stream.Collectors.toMap; + +public class Planner { + + private final Mapper mapper = new Mapper(); + private final QueryFolder folder = new QueryFolder(); + + public PhysicalPlan plan(LogicalPlan plan) { + return plan(plan, true); + } + + public PhysicalPlan plan(LogicalPlan plan, boolean verify) { + return foldPlan(mapPlan(plan, verify), verify); + } + + // first, map the logical plan + public PhysicalPlan mapPlan(LogicalPlan plan, boolean verify) { + return verify ? verifyMappingPlan(mapper.map(plan)) : mapper.map(plan); + } + + // second, pack it up + public PhysicalPlan foldPlan(PhysicalPlan mapped, boolean verify) { + return verify ? verifyExecutingPlan(folder.fold(mapped)) : folder.fold(mapped); + } + + // verify the mapped plan + public PhysicalPlan verifyMappingPlan(PhysicalPlan plan) { + List failures = Verifier.verifyMappingPlan(plan); + if (!failures.isEmpty()) { + throw new PlanningException(failures); + } + return plan; + } + + public Map, String> verifyMappingPlanFailures(PhysicalPlan plan) { + List failures = Verifier.verifyMappingPlan(plan); + return failures.stream().collect(toMap(Failure::source, Failure::message)); + } + + public PhysicalPlan verifyExecutingPlan(PhysicalPlan plan) { + List failures = Verifier.verifyExecutingPlan(plan); + if (!failures.isEmpty()) { + throw new PlanningException(failures); + } + return plan; + } + + public Map, String> verifyExecutingPlanFailures(PhysicalPlan plan) { + List failures = Verifier.verifyExecutingPlan(plan); + return failures.stream().collect(toMap(Failure::source, Failure::message)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/PlanningException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/PlanningException.java new file mode 100644 index 0000000000000..66ad7013cf2e7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/PlanningException.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.sql.ClientSqlException; +import org.elasticsearch.xpack.sql.planner.Verifier.Failure; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.Collection; +import java.util.stream.Collectors; + +public class PlanningException extends ClientSqlException { + public PlanningException(String message, Object... args) { + super(message, args); + } + + public PlanningException(Collection sources) { + super(extractMessage(sources)); + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + + private static String extractMessage(Collection failures) { + return failures.stream() + .map(f -> { + Location l = f.source().location(); + return "line " + l.getLineNumber() + ":" + l.getColumnNumber() + ": " + f.message(); + }) + .collect(Collectors.joining("\n", "Found " + failures.size() + " problem(s)\n", "")); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java new file mode 100644 index 0000000000000..9ef79642abc57 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java @@ -0,0 +1,555 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.xpack.sql.execution.search.AggRef; +import org.elasticsearch.xpack.sql.expression.Alias; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Foldables; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.Functions; +import org.elasticsearch.xpack.sql.expression.function.ScoreAttribute; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.aggregate.CompoundNumericAggregate; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.sql.expression.function.aggregate.InnerAggregate; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeHistogramFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.AggPathInput; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinitions; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.UnaryProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.plan.physical.AggregateExec; +import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec; +import org.elasticsearch.xpack.sql.plan.physical.FilterExec; +import org.elasticsearch.xpack.sql.plan.physical.LimitExec; +import org.elasticsearch.xpack.sql.plan.physical.LocalExec; +import org.elasticsearch.xpack.sql.plan.physical.OrderExec; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.plan.physical.ProjectExec; +import org.elasticsearch.xpack.sql.planner.QueryTranslator.GroupingContext; +import org.elasticsearch.xpack.sql.planner.QueryTranslator.QueryTranslation; +import org.elasticsearch.xpack.sql.querydsl.agg.AggFilter; +import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByKey; +import org.elasticsearch.xpack.sql.querydsl.agg.LeafAgg; +import org.elasticsearch.xpack.sql.querydsl.container.AttributeSort; +import org.elasticsearch.xpack.sql.querydsl.container.ComputedRef; +import org.elasticsearch.xpack.sql.querydsl.container.GlobalCountRef; +import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef; +import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef.Property; +import org.elasticsearch.xpack.sql.querydsl.container.MetricAggRef; +import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; +import org.elasticsearch.xpack.sql.querydsl.container.ScoreSort; +import org.elasticsearch.xpack.sql.querydsl.container.ScriptSort; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; +import org.elasticsearch.xpack.sql.querydsl.query.Query; +import org.elasticsearch.xpack.sql.rule.Rule; +import org.elasticsearch.xpack.sql.rule.RuleExecutor; +import org.elasticsearch.xpack.sql.session.EmptyExecutable; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.Check; +import org.joda.time.DateTimeZone; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.TimeZone; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.xpack.sql.planner.QueryTranslator.and; +import static org.elasticsearch.xpack.sql.planner.QueryTranslator.toAgg; +import static org.elasticsearch.xpack.sql.planner.QueryTranslator.toQuery; + +/** + * Folds the PhysicalPlan into a {@link Query}. + */ +class QueryFolder extends RuleExecutor { + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); + + PhysicalPlan fold(PhysicalPlan plan) { + return execute(plan); + } + + @Override + protected Iterable.Batch> batches() { + Batch rollup = new Batch("Fold queries", + new FoldAggregate(), + new FoldProject(), + new FoldFilter(), + new FoldOrderBy(), + new FoldLimit() + ); + + Batch local = new Batch("Local queries", + new PropagateEmptyLocal(), + new LocalLimit() + ); + + Batch finish = new Batch("Finish query", Limiter.ONCE, + new PlanOutputToQueryRef() + ); + + return Arrays.asList(rollup, local, finish); + } + + private static class FoldProject extends FoldingRule { + + @Override + protected PhysicalPlan rule(ProjectExec project) { + if (project.child() instanceof EsQueryExec) { + EsQueryExec exec = (EsQueryExec) project.child(); + QueryContainer queryC = exec.queryContainer(); + + Map aliases = new LinkedHashMap<>(queryC.aliases()); + Map processors = new LinkedHashMap<>(queryC.scalarFunctions()); + + for (NamedExpression pj : project.projections()) { + if (pj instanceof Alias) { + Attribute aliasAttr = pj.toAttribute(); + Expression e = ((Alias) pj).child(); + + if (e instanceof NamedExpression) { + Attribute attr = ((NamedExpression) e).toAttribute(); + aliases.put(aliasAttr, attr); + // add placeholder for each scalar function + if (e instanceof ScalarFunction) { + processors.put(attr, ProcessorDefinitions.toProcessorDefinition(e)); + } + } else { + processors.put(aliasAttr, ProcessorDefinitions.toProcessorDefinition(e)); + } + } + else { + // for named expressions nothing is recorded as these are resolved last + // otherwise 'intermediate' projects might pollute the + // output + + if (pj instanceof ScalarFunction) { + ScalarFunction f = (ScalarFunction) pj; + processors.put(f.toAttribute(), f.asProcessorDefinition()); + } + } + } + + QueryContainer clone = new QueryContainer(queryC.query(), queryC.aggs(), queryC.columns(), aliases, + queryC.pseudoFunctions(), processors, queryC.sort(), queryC.limit()); + return new EsQueryExec(exec.location(), exec.index(), project.output(), clone); + } + return project; + } + } + + private static class FoldFilter extends FoldingRule { + @Override + protected PhysicalPlan rule(FilterExec plan) { + + if (plan.child() instanceof EsQueryExec) { + EsQueryExec exec = (EsQueryExec) plan.child(); + QueryContainer qContainer = exec.queryContainer(); + + QueryTranslation qt = toQuery(plan.condition(), plan.isHaving()); + + Query query = null; + if (qContainer.query() != null || qt.query != null) { + query = and(plan.location(), qContainer.query(), qt.query); + } + Aggs aggs = addPipelineAggs(qContainer, qt, plan); + + qContainer = new QueryContainer(query, aggs, qContainer.columns(), qContainer.aliases(), + qContainer.pseudoFunctions(), + qContainer.scalarFunctions(), + qContainer.sort(), + qContainer.limit()); + + return exec.with(qContainer); + } + return plan; + } + + private Aggs addPipelineAggs(QueryContainer qContainer, QueryTranslation qt, FilterExec fexec) { + AggFilter filter = qt.aggFilter; + Aggs aggs = qContainer.aggs(); + + if (filter == null) { + return qContainer.aggs(); + } + else { + aggs = aggs.addAgg(filter); + } + + return aggs; + } + } + + private static class FoldAggregate extends FoldingRule { + @Override + protected PhysicalPlan rule(AggregateExec a) { + + if (a.child() instanceof EsQueryExec) { + EsQueryExec exec = (EsQueryExec) a.child(); + + // build the group aggregation + // and also collect info about it (since the group columns might be used inside the select) + + GroupingContext groupingContext = QueryTranslator.groupBy(a.groupings()); + + QueryContainer queryC = exec.queryContainer(); + if (groupingContext != null) { + queryC = queryC.addGroups(groupingContext.groupMap.values()); + } + + Map aliases = new LinkedHashMap<>(); + // tracker for compound aggs seen in a group + Map compoundAggMap = new LinkedHashMap<>(); + + // followed by actual aggregates + for (NamedExpression ne : a.aggregates()) { + + // unwrap alias - it can be + // - an attribute (since we support aliases inside group-by) + // SELECT emp_no ... GROUP BY emp_no + // SELECT YEAR(hire_date) ... GROUP BY YEAR(hire_date) + + // - an agg function (typically) + // SELECT COUNT(*), AVG(salary) ... GROUP BY salary; + + // - a scalar function, which can be applied on an attribute or aggregate and can require one or multiple inputs + + // SELECT SIN(emp_no) ... GROUP BY emp_no + // SELECT CAST(YEAR(hire_date)) ... GROUP BY YEAR(hire_date) + // SELECT CAST(AVG(salary)) ... GROUP BY salary + // SELECT AVG(salary) + SIN(MIN(salary)) ... GROUP BY salary + + if (ne instanceof Alias || ne instanceof Function) { + Alias as = ne instanceof Alias ? (Alias) ne : null; + Expression child = as != null ? as.child() : ne; + + // record aliases in case they are later referred in the tree + if (as != null && as.child() instanceof NamedExpression) { + aliases.put(as.toAttribute(), ((NamedExpression) as.child()).toAttribute()); + } + + // + // look first for scalar functions which might wrap the actual grouped target + // (e.g. + // CAST(field) GROUP BY field or + // ABS(YEAR(field)) GROUP BY YEAR(field) or + // ABS(AVG(salary)) ... GROUP BY salary + // ) + if (child instanceof ScalarFunction) { + ScalarFunction f = (ScalarFunction) child; + ProcessorDefinition proc = f.asProcessorDefinition(); + + final AtomicReference qC = new AtomicReference<>(queryC); + + proc = proc.transformUp(p -> { + // bail out if the def is resolved + if (p.resolved()) { + return p; + } + + // get the backing expression and check if it belongs to a agg group or whether it's + // an expression in the first place + Expression exp = p.expression(); + GroupByKey matchingGroup = null; + if (groupingContext != null) { + // is there a group (aggregation) for this expression ? + matchingGroup = groupingContext.groupFor(exp); + } + else { + // a scalar function can be used only if has already been mentioned for grouping + // (otherwise it is the opposite of grouping) + if (exp instanceof ScalarFunction) { + throw new FoldingException(exp, "Scalar function " +exp.toString() + + " can be used only if included already in grouping"); + } + } + + // found match for expression; if it's an attribute or scalar, end the processing chain with + // the reference to the backing agg + if (matchingGroup != null) { + if (exp instanceof Attribute || exp instanceof ScalarFunction) { + Processor action = null; + TimeZone tz = null; + /* + * special handling of dates since aggs return the typed Date object which needs + * extraction instead of handling this in the scroller, the folder handles this + * as it already got access to the extraction action + */ + if (exp instanceof DateTimeHistogramFunction) { + action = ((UnaryProcessorDefinition) p).action(); + tz = ((DateTimeFunction) exp).timeZone(); + } + return new AggPathInput(exp.location(), exp, new GroupByRef(matchingGroup.id(), null, tz), action); + } + } + // or found an aggregate expression (which has to work on an attribute used for grouping) + // (can happen when dealing with a root group) + if (Functions.isAggregate(exp)) { + Tuple withFunction = addAggFunction(matchingGroup, + (AggregateFunction) exp, compoundAggMap, qC.get()); + qC.set(withFunction.v1()); + return withFunction.v2(); + } + // not an aggregate and no matching - go to a higher node (likely a function YEAR(birth_date)) + return p; + }); + + if (!proc.resolved()) { + throw new FoldingException(child, "Cannot find grouping for '{}'", Expressions.name(child)); + } + + // add the computed column + queryC = qC.get().addColumn(new ComputedRef(proc)); + + // TODO: is this needed? + // redirect the alias to the scalar group id (changing the id altogether doesn't work it is + // already used in the aggpath) + //aliases.put(as.toAttribute(), sf.toAttribute()); + } + // apply the same logic above (for function inputs) to non-scalar functions with small variations: + // instead of adding things as input, add them as full blown column + else { + GroupByKey matchingGroup = null; + if (groupingContext != null) { + // is there a group (aggregation) for this expression ? + matchingGroup = groupingContext.groupFor(child); + } + // attributes can only refer to declared groups + if (child instanceof Attribute) { + Check.notNull(matchingGroup, "Cannot find group [{}]", Expressions.name(child)); + // check if the field is a date - if so mark it as such to interpret the long as a date + // UTC is used since that's what the server uses and there's no conversion applied + // (like for date histograms) + TimeZone dt = DataType.DATE == child.dataType() ? UTC : null; + queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, dt)); + } + else { + // the only thing left is agg function + Check.isTrue(Functions.isAggregate(child), + "Expected aggregate function inside alias; got [{}]", child.nodeString()); + Tuple withAgg = addAggFunction(matchingGroup, + (AggregateFunction) child, compoundAggMap, queryC); + queryC = withAgg.v1().addColumn(withAgg.v2().context()); + } + } + // not an Alias or Function means it's an Attribute so apply the same logic as above + } else { + GroupByKey matchingGroup = null; + if (groupingContext != null) { + matchingGroup = groupingContext.groupFor(ne); + Check.notNull(matchingGroup, "Cannot find group [{}]", Expressions.name(ne)); + + TimeZone dt = DataType.DATE == ne.dataType() ? UTC : null; + queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, dt)); + } + } + } + + if (!aliases.isEmpty()) { + Map newAliases = new LinkedHashMap<>(queryC.aliases()); + newAliases.putAll(aliases); + queryC = queryC.withAliases(newAliases); + } + return new EsQueryExec(exec.location(), exec.index(), a.output(), queryC); + } + return a; + } + + private Tuple addAggFunction(GroupByKey groupingAgg, AggregateFunction f, + Map compoundAggMap, QueryContainer queryC) { + String functionId = f.functionId(); + // handle count as a special case agg + if (f instanceof Count) { + Count c = (Count) f; + if (!c.distinct()) { + AggRef ref = groupingAgg == null ? + GlobalCountRef.INSTANCE : + new GroupByRef(groupingAgg.id(), Property.COUNT, null); + + Map pseudoFunctions = new LinkedHashMap<>(queryC.pseudoFunctions()); + pseudoFunctions.put(functionId, groupingAgg); + return new Tuple<>(queryC.withPseudoFunctions(pseudoFunctions), new AggPathInput(f, ref)); + } + } + + AggPathInput aggInput = null; + + if (f instanceof InnerAggregate) { + InnerAggregate ia = (InnerAggregate) f; + CompoundNumericAggregate outer = ia.outer(); + String cAggPath = compoundAggMap.get(outer); + + // the compound agg hasn't been seen before so initialize it + if (cAggPath == null) { + LeafAgg leafAgg = toAgg(outer.functionId(), outer); + cAggPath = leafAgg.id(); + compoundAggMap.put(outer, cAggPath); + // add the agg (without any reference) + queryC = queryC.with(queryC.aggs().addAgg(leafAgg)); + } + + // FIXME: concern leak - hack around MatrixAgg which is not + // generalized (afaik) + aggInput = new AggPathInput(f, + new MetricAggRef(cAggPath, ia.innerId(), ia.innerKey() != null ? QueryTranslator.nameOf(ia.innerKey()) : null)); + } + else { + LeafAgg leafAgg = toAgg(functionId, f); + aggInput = new AggPathInput(f, new MetricAggRef(leafAgg.id())); + queryC = queryC.with(queryC.aggs().addAgg(leafAgg)); + } + + return new Tuple<>(queryC, aggInput); + } + } + + private static class FoldOrderBy extends FoldingRule { + @Override + protected PhysicalPlan rule(OrderExec plan) { + if (plan.child() instanceof EsQueryExec) { + EsQueryExec exec = (EsQueryExec) plan.child(); + QueryContainer qContainer = exec.queryContainer(); + + for (Order order : plan.order()) { + Direction direction = Direction.from(order.direction()); + + // check whether sorting is on an group (and thus nested agg) or field + Attribute attr = ((NamedExpression) order.child()).toAttribute(); + // check whether there's an alias (occurs with scalar functions which are not named) + attr = qContainer.aliases().getOrDefault(attr, attr); + String lookup = attr.id().toString(); + GroupByKey group = qContainer.findGroupForAgg(lookup); + + // TODO: might need to validate whether the target field or group actually exist + if (group != null && group != Aggs.IMPLICIT_GROUP_KEY) { + // check whether the lookup matches a group + if (group.id().equals(lookup)) { + qContainer = qContainer.updateGroup(group.with(direction)); + } + // else it's a leafAgg + else { + qContainer = qContainer.updateGroup(group.with(direction)); + } + } + else { + // scalar functions typically require script ordering + if (attr instanceof ScalarFunctionAttribute) { + ScalarFunctionAttribute sfa = (ScalarFunctionAttribute) attr; + // is there an expression to order by? + if (sfa.orderBy() != null) { + if (sfa.orderBy() instanceof NamedExpression) { + Attribute at = ((NamedExpression) sfa.orderBy()).toAttribute(); + at = qContainer.aliases().getOrDefault(at, at); + qContainer = qContainer.sort(new AttributeSort(at, direction)); + } else if (!sfa.orderBy().foldable()) { + // ignore constant + throw new PlanningException("does not know how to order by expression {}", sfa.orderBy()); + } + } else { + // nope, use scripted sorting + qContainer = qContainer.sort(new ScriptSort(sfa.script(), direction)); + } + } else if (attr instanceof ScoreAttribute) { + qContainer = qContainer.sort(new ScoreSort(direction)); + } else { + qContainer = qContainer.sort(new AttributeSort(attr, direction)); + } + } + } + + return exec.with(qContainer); + } + return plan; + } + } + + + private static class FoldLimit extends FoldingRule { + + @Override + protected PhysicalPlan rule(LimitExec plan) { + if (plan.child() instanceof EsQueryExec) { + EsQueryExec exec = (EsQueryExec) plan.child(); + int limit = Foldables.intValueOf(plan.limit()); + int currentSize = exec.queryContainer().limit(); + int newSize = currentSize < 0 ? limit : Math.min(currentSize, limit); + return exec.with(exec.queryContainer().withLimit(newSize)); + } + return plan; + } + } + + private static class PlanOutputToQueryRef extends FoldingRule { + @Override + protected PhysicalPlan rule(EsQueryExec exec) { + QueryContainer qContainer = exec.queryContainer(); + + // references (aka aggs) are in place + if (qContainer.hasColumns()) { + return exec; + } + + for (Attribute attr : exec.output()) { + qContainer = qContainer.addColumn(attr); + } + + // after all attributes have been resolved + return exec.with(qContainer); + } + } + + // + // local + // + + private static class PropagateEmptyLocal extends FoldingRule { + + @Override + protected PhysicalPlan rule(PhysicalPlan plan) { + if (plan.children().size() == 1) { + PhysicalPlan p = plan.children().get(0); + if (p instanceof LocalExec && ((LocalExec) p).isEmpty()) { + return new LocalExec(plan.location(), new EmptyExecutable(plan.output())); + } + } + return plan; + } + } + + // local exec currently means empty or one entry so limit can't really be applied + private static class LocalLimit extends FoldingRule { + + @Override + protected PhysicalPlan rule(LimitExec plan) { + if (plan.child() instanceof LocalExec) { + return plan.child(); + } + return plan; + } + } + + // rule for folding physical plans together + abstract static class FoldingRule extends Rule { + + @Override + public final PhysicalPlan apply(PhysicalPlan plan) { + return plan.transformUp(this::rule, typeToken()); + } + + @Override + protected abstract PhysicalPlan rule(SubPlan plan); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java new file mode 100644 index 0000000000000..dd0456e9aefc8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -0,0 +1,859 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.BinaryExpression; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.UnaryExpression; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.Functions; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Avg; +import org.elasticsearch.xpack.sql.expression.function.aggregate.CompoundNumericAggregate; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.sql.expression.function.aggregate.ExtendedStats; +import org.elasticsearch.xpack.sql.expression.function.aggregate.MatrixStats; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Max; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Min; +import org.elasticsearch.xpack.sql.expression.function.aggregate.PercentileRanks; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Percentiles; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Stats; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Sum; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeHistogramFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.expression.predicate.And; +import org.elasticsearch.xpack.sql.expression.predicate.BinaryComparison; +import org.elasticsearch.xpack.sql.expression.predicate.Equals; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThan; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.IsNotNull; +import org.elasticsearch.xpack.sql.expression.predicate.LessThan; +import org.elasticsearch.xpack.sql.expression.predicate.LessThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.Not; +import org.elasticsearch.xpack.sql.expression.predicate.Or; +import org.elasticsearch.xpack.sql.expression.predicate.Range; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.StringQueryPredicate; +import org.elasticsearch.xpack.sql.expression.regex.Like; +import org.elasticsearch.xpack.sql.expression.regex.LikePattern; +import org.elasticsearch.xpack.sql.expression.regex.RLike; +import org.elasticsearch.xpack.sql.querydsl.agg.AggFilter; +import org.elasticsearch.xpack.sql.querydsl.agg.AndAggFilter; +import org.elasticsearch.xpack.sql.querydsl.agg.AvgAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.CardinalityAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.ExtendedStatsAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByColumnKey; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByDateKey; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByKey; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByScriptKey; +import org.elasticsearch.xpack.sql.querydsl.agg.LeafAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.MatrixStatsAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.MaxAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.MinAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.OrAggFilter; +import org.elasticsearch.xpack.sql.querydsl.agg.PercentileRanksAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.PercentilesAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.StatsAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.SumAgg; +import org.elasticsearch.xpack.sql.querydsl.query.BoolQuery; +import org.elasticsearch.xpack.sql.querydsl.query.ExistsQuery; +import org.elasticsearch.xpack.sql.querydsl.query.MatchQuery; +import org.elasticsearch.xpack.sql.querydsl.query.MultiMatchQuery; +import org.elasticsearch.xpack.sql.querydsl.query.NestedQuery; +import org.elasticsearch.xpack.sql.querydsl.query.NotQuery; +import org.elasticsearch.xpack.sql.querydsl.query.Query; +import org.elasticsearch.xpack.sql.querydsl.query.QueryStringQuery; +import org.elasticsearch.xpack.sql.querydsl.query.RangeQuery; +import org.elasticsearch.xpack.sql.querydsl.query.RegexQuery; +import org.elasticsearch.xpack.sql.querydsl.query.ScriptQuery; +import org.elasticsearch.xpack.sql.querydsl.query.TermQuery; +import org.elasticsearch.xpack.sql.querydsl.query.WildcardQuery; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.Check; +import org.elasticsearch.xpack.sql.util.ReflectionUtils; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; + +import static java.lang.String.format; +import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.sql.expression.Foldables.doubleValuesOf; +import static org.elasticsearch.xpack.sql.expression.Foldables.stringValueOf; +import static org.elasticsearch.xpack.sql.expression.Foldables.valueOf; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder.paramsBuilder; +import static org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate.formatTemplate; + +abstract class QueryTranslator { + + static final List> QUERY_TRANSLATORS = Arrays.asList( + new BinaryComparisons(), + new Ranges(), + new BinaryLogic(), + new Nots(), + new Nulls(), + new Likes(), + new StringQueries(), + new Matches(), + new MultiMatches() + ); + + static final List> AGG_TRANSLATORS = Arrays.asList( + new Maxes(), + new Mins(), + new Avgs(), + new Sums(), + new StatsAggs(), + new ExtendedStatsAggs(), + new MatrixStatsAggs(), + new PercentilesAggs(), + new PercentileRanksAggs(), + new DistinctCounts(), + new DateTimes() + ); + + static class QueryTranslation { + final Query query; + // Agg filter / Function or Agg association + final AggFilter aggFilter; + + QueryTranslation(Query query) { + this(query, null); + } + + QueryTranslation(AggFilter aggFilter) { + this(null, aggFilter); + } + + QueryTranslation(Query query, AggFilter aggFilter) { + this.query = query; + this.aggFilter = aggFilter; + } + } + + static QueryTranslation toQuery(Expression e, boolean onAggs) { + QueryTranslation translation = null; + for (ExpressionTranslator translator : QUERY_TRANSLATORS) { + translation = translator.translate(e, onAggs); + if (translation != null) { + return translation; + } + } + + throw new SqlIllegalArgumentException("Don't know how to translate {} {}", e.nodeName(), e); + } + + static LeafAgg toAgg(String id, Function f) { + + for (AggTranslator translator : AGG_TRANSLATORS) { + LeafAgg agg = translator.apply(id, f); + if (agg != null) { + return agg; + } + } + + throw new SqlIllegalArgumentException("Don't know how to translate {} {}", f.nodeName(), f); + } + + static class GroupingContext { + final Map groupMap; + final GroupByKey tail; + + GroupingContext(Map groupMap) { + this.groupMap = groupMap; + + GroupByKey lastAgg = null; + for (Entry entry : groupMap.entrySet()) { + lastAgg = entry.getValue(); + } + + tail = lastAgg; + } + + GroupByKey groupFor(Expression exp) { + if (Functions.isAggregate(exp)) { + AggregateFunction f = (AggregateFunction) exp; + // if there's at least one agg in the tree + if (!groupMap.isEmpty()) { + GroupByKey matchingGroup = null; + // group found - finding the dedicated agg + if (f.field() instanceof NamedExpression) { + matchingGroup = groupMap.get(((NamedExpression) f.field()).id()); + } + // return matching group or the tail (last group) + return matchingGroup != null ? matchingGroup : tail; + } + else { + return null; + } + } + if (exp instanceof NamedExpression) { + return groupMap.get(((NamedExpression) exp).id()); + } + throw new SqlIllegalArgumentException("Don't know how to find group for expression {}", exp); + } + + @Override + public String toString() { + return groupMap.toString(); + } + } + + /** + * Creates the list of GroupBy keys + */ + static GroupingContext groupBy(List groupings) { + if (groupings.isEmpty()) { + return null; + } + + Map aggMap = new LinkedHashMap<>(); + + for (Expression exp : groupings) { + String aggId; + if (exp instanceof NamedExpression) { + NamedExpression ne = (NamedExpression) exp; + + // change analyzed to non non-analyzed attributes + if (exp instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) exp; + if (fa.isInexact()) { + ne = fa.exactAttribute(); + } + } + aggId = ne.id().toString(); + + GroupByKey key = null; + + // handle functions differently + if (exp instanceof Function) { + // dates are handled differently because of date histograms + if (exp instanceof DateTimeHistogramFunction) { + DateTimeHistogramFunction dthf = (DateTimeHistogramFunction) exp; + key = new GroupByDateKey(aggId, nameOf(exp), dthf.interval(), dthf.timeZone()); + } + // all other scalar functions become a script + else if (exp instanceof ScalarFunction) { + ScalarFunction sf = (ScalarFunction) exp; + key = new GroupByScriptKey(aggId, nameOf(exp), sf.asScript()); + } + // bumped into into an invalid function (which should be caught by the verifier) + else { + throw new SqlIllegalArgumentException("Cannot GROUP BY function {}", exp); + } + } + else { + key = new GroupByColumnKey(aggId, ne.name()); + } + + aggMap.put(ne.id(), key); + } + else { + throw new SqlIllegalArgumentException("Don't know how to group on {}", exp.nodeString()); + } + } + return new GroupingContext(aggMap); + } + + static QueryTranslation and(Location loc, QueryTranslation left, QueryTranslation right) { + Check.isTrue(left != null || right != null, "Both expressions are null"); + if (left == null) { + return right; + } + if (right == null) { + return left; + } + + Query newQ = null; + if (left.query != null || right.query != null) { + newQ = and(loc, left.query, right.query); + } + + AggFilter aggFilter = null; + + if (left.aggFilter == null) { + aggFilter = right.aggFilter; + } + else if (right.aggFilter == null) { + aggFilter = left.aggFilter; + } + else { + aggFilter = new AndAggFilter(left.aggFilter, right.aggFilter); + } + + return new QueryTranslation(newQ, aggFilter); + } + + static Query and(Location loc, Query left, Query right) { + Check.isTrue(left != null || right != null, "Both expressions are null"); + if (left == null) { + return right; + } + if (right == null) { + return left; + } + return new BoolQuery(loc, true, left, right); + } + + static QueryTranslation or(Location loc, QueryTranslation left, QueryTranslation right) { + Check.isTrue(left != null || right != null, "Both expressions are null"); + if (left == null) { + return right; + } + if (right == null) { + return left; + } + + Query newQ = null; + if (left.query != null || right.query != null) { + newQ = or(loc, left.query, right.query); + } + + AggFilter aggFilter = null; + + if (left.aggFilter == null) { + aggFilter = right.aggFilter; + } + else if (right.aggFilter == null) { + aggFilter = left.aggFilter; + } + else { + aggFilter = new OrAggFilter(left.aggFilter, right.aggFilter); + } + + return new QueryTranslation(newQ, aggFilter); + } + + static Query or(Location loc, Query left, Query right) { + Check.isTrue(left != null || right != null, "Both expressions are null"); + + if (left == null) { + return right; + } + if (right == null) { + return left; + } + return new BoolQuery(loc, false, left, right); + } + + static Query not(Query query) { + Check.isTrue(query != null, "Expressions is null"); + return new NotQuery(query.location(), query); + } + + static String nameOf(Expression e) { + if (e instanceof DateTimeFunction) { + return nameOf(((DateTimeFunction) e).field()); + } + if (e instanceof NamedExpression) { + return ((NamedExpression) e).name(); + } + if (e instanceof Literal) { + return String.valueOf(e.fold()); + } + throw new SqlIllegalArgumentException("Cannot determine name for {}", e); + } + + static String idOf(Expression e) { + if (e instanceof NamedExpression) { + return ((NamedExpression) e).id().toString(); + } + throw new SqlIllegalArgumentException("Cannot determine id for {}", e); + } + + static String dateFormat(Expression e) { + if (e instanceof DateTimeFunction) { + return ((DateTimeFunction) e).dateTimeFormat(); + } + return null; + } + + static String field(AggregateFunction af) { + Expression arg = af.field(); + if (arg instanceof FieldAttribute) { + return ((FieldAttribute) arg).name(); + } + if (arg instanceof Literal) { + return String.valueOf(((Literal) arg).value()); + } + throw new SqlIllegalArgumentException("Does not know how to convert argument {} for function {}", arg.nodeString(), + af.nodeString()); + } + + // TODO: need to optimize on ngram + // TODO: see whether escaping is needed + static class Likes extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(BinaryExpression e, boolean onAggs) { + Query q = null; + boolean inexact = true; + String target = null; + + if (e.left() instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) e.left(); + inexact = fa.isInexact(); + target = nameOf(inexact ? fa : fa.exactAttribute()); + } + + if (e instanceof Like) { + LikePattern p = ((Like) e).right(); + if (inexact) { + q = new QueryStringQuery(e.location(), p.asLuceneWildcard(), target); + } + else { + q = new WildcardQuery(e.location(), nameOf(e.left()), p.asLuceneWildcard()); + } + } + + if (e instanceof RLike) { + String pattern = stringValueOf(e.right()); + if (inexact) { + q = new QueryStringQuery(e.location(), "/" + pattern + "/", target); + } + else { + q = new RegexQuery(e.location(), nameOf(e.left()), pattern); + } + } + + return q != null ? new QueryTranslation(wrapIfNested(q, e.left())) : null; + } + } + + static class StringQueries extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(StringQueryPredicate q, boolean onAggs) { + return new QueryTranslation(new QueryStringQuery(q.location(), q.query(), q.fields(), q)); + } + } + + static class Matches extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(MatchQueryPredicate q, boolean onAggs) { + return new QueryTranslation(wrapIfNested(new MatchQuery(q.location(), nameOf(q.field()), q.query(), q), q.field())); + } + } + + static class MultiMatches extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(MultiMatchQueryPredicate q, boolean onAggs) { + return new QueryTranslation(new MultiMatchQuery(q.location(), q.query(), q.fields(), q)); + } + } + + static class BinaryLogic extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(BinaryExpression e, boolean onAggs) { + if (e instanceof And) { + return and(e.location(), toQuery(e.left(), onAggs), toQuery(e.right(), onAggs)); + } + if (e instanceof Or) { + return or(e.location(), toQuery(e.left(), onAggs), toQuery(e.right(), onAggs)); + } + + return null; + } + } + + static class Nots extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(Not not, boolean onAggs) { + QueryTranslation translation = toQuery(not.child(), onAggs); + return new QueryTranslation(not(translation.query), translation.aggFilter); + } + } + + static class Nulls extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(UnaryExpression ue, boolean onAggs) { + // TODO: handle onAggs - missing bucket aggregation + if (ue instanceof IsNotNull) { + return new QueryTranslation(new ExistsQuery(ue.location(), nameOf(ue.child()))); + } + return null; + } + } + + // assume the Optimizer properly orders the predicates to ease the translation + static class BinaryComparisons extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(BinaryComparison bc, boolean onAggs) { + Check.isTrue(bc.right().foldable(), + "Line {}:{}: Comparisons against variables are not (currently) supported; offender [{}] in [{}]", + bc.right().location().getLineNumber(), bc.right().location().getColumnNumber(), + Expressions.name(bc.right()), bc.symbol()); + + if (bc.left() instanceof NamedExpression) { + NamedExpression ne = (NamedExpression) bc.left(); + + Query query = null; + AggFilter aggFilter = null; + + Attribute at = ne.toAttribute(); + + // scalar function can appear in both WHERE and HAVING so handle it first + // in both cases the function script is used - script-query/query for the former, bucket-selector/aggFilter for the latter + + if (at instanceof ScalarFunctionAttribute) { + ScalarFunctionAttribute sfa = (ScalarFunctionAttribute) at; + ScriptTemplate scriptTemplate = sfa.script(); + + String template = formatTemplate(format(Locale.ROOT, "%s %s {}", scriptTemplate.template(), bc.symbol())); + // no need to bind the wrapped/target agg - it is already available through the nested script + // (needed to create the script itself) + Params params = paramsBuilder().script(scriptTemplate.params()).variable(valueOf(bc.right())).build(); + ScriptTemplate script = new ScriptTemplate(template, params, DataType.BOOLEAN); + if (onAggs) { + aggFilter = new AggFilter(at.id().toString(), script); + } + else { + query = new ScriptQuery(at.location(), script); + } + } + + // + // Agg context means HAVING -> PipelineAggs + // + else if (onAggs) { + String template = null; + Params params = null; + + // agg function + if (at instanceof AggregateFunctionAttribute) { + AggregateFunctionAttribute fa = (AggregateFunctionAttribute) at; + + // TODO: handle case where both sides of the comparison are functions + template = formatTemplate(format(Locale.ROOT, "{} %s {}", bc.symbol())); + + // bind the agg and the variable to the script + params = paramsBuilder().agg(fa).variable(valueOf(bc.right())).build(); + } + + aggFilter = new AggFilter(at.id().toString(), new ScriptTemplate(template, params, DataType.BOOLEAN)); + } + + // + // No Agg context means WHERE clause + // + else { + if (at instanceof FieldAttribute) { + query = wrapIfNested(translateQuery(bc), ne); + } + } + + return new QueryTranslation(query, aggFilter); + } + + else { + throw new UnsupportedOperationException("No idea how to translate " + bc.left()); + } + } + + private static Query translateQuery(BinaryComparison bc) { + Location loc = bc.location(); + String name = nameOf(bc.left()); + Object value = valueOf(bc.right()); + String format = dateFormat(bc.left()); + + if (bc instanceof GreaterThan) { + return new RangeQuery(loc, name, value, false, null, false, format); + } + if (bc instanceof GreaterThanOrEqual) { + return new RangeQuery(loc, name, value, true, null, false, format); + } + if (bc instanceof LessThan) { + return new RangeQuery(loc, name, null, false, value, false, format); + } + if (bc instanceof LessThanOrEqual) { + return new RangeQuery(loc, name, null, false, value, true, format); + } + if (bc instanceof Equals) { + if (bc.left() instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) bc.left(); + // equality should always be against an exact match + // (which is important for strings) + if (fa.isInexact()) { + name = fa.exactAttribute().name(); + } + } + return new TermQuery(loc, name, value); + } + + throw new SqlIllegalArgumentException("Don't know how to translate binary comparison [{}] in [{}]", bc.right().nodeString(), + bc); + } + } + + static class Ranges extends ExpressionTranslator { + + @Override + protected QueryTranslation asQuery(Range r, boolean onAggs) { + Object lower = valueOf(r.lower()); + Object upper = valueOf(r.upper()); + + Expression e = r.value(); + + + if (e instanceof NamedExpression) { + NamedExpression ne = (NamedExpression) e; + + Query query = null; + AggFilter aggFilter = null; + + Attribute at = ne.toAttribute(); + + // scalar function can appear in both WHERE and HAVING so handle it first + // in both cases the function script is used - script-query/query for the former, bucket-selector/aggFilter + // for the latter + + if (at instanceof ScalarFunctionAttribute) { + ScalarFunctionAttribute sfa = (ScalarFunctionAttribute) at; + ScriptTemplate scriptTemplate = sfa.script(); + + String template = formatTemplate(format(Locale.ROOT, "({} %s %s) && (%s %s {})", + r.includeLower() ? "<=" : "<", + scriptTemplate.template(), + scriptTemplate.template(), + r.includeUpper() ? "<=" : "<")); + + // no need to bind the wrapped/target - it is already available through the nested script (needed to + // create the script itself) + Params params = paramsBuilder().variable(lower) + .script(scriptTemplate.params()) + .script(scriptTemplate.params()) + .variable(upper) + .build(); + + ScriptTemplate script = new ScriptTemplate(template, params, DataType.BOOLEAN); + + if (onAggs) { + aggFilter = new AggFilter(at.id().toString(), script); + } + else { + query = new ScriptQuery(at.location(), script); + } + } + + // + // HAVING + // + else if (onAggs) { + String template = null; + Params params = null; + + // agg function + if (at instanceof AggregateFunctionAttribute) { + AggregateFunctionAttribute fa = (AggregateFunctionAttribute) at; + + template = formatTemplate(format(Locale.ROOT, "{} %s {} && {} %s {}", + r.includeLower() ? "<=" : "<", + r.includeUpper() ? "<=" : "<")); + + params = paramsBuilder().variable(lower) + .agg(fa) + .agg(fa) + .variable(upper) + .build(); + + } + aggFilter = new AggFilter(((NamedExpression) r.value()).id().toString(), + new ScriptTemplate(template, params, DataType.BOOLEAN)); + } + // + // WHERE + // + else { + // typical range + if (at instanceof FieldAttribute) { + RangeQuery rangeQuery = new RangeQuery(r.location(), nameOf(r.value()), + valueOf(r.lower()), r.includeLower(), valueOf(r.upper()), r.includeUpper(), dateFormat(r.value())); + query = wrapIfNested(rangeQuery, r.value()); + } + } + + return new QueryTranslation(query, aggFilter); + } + else { + throw new SqlIllegalArgumentException("No idea how to translate " + e); + } + } + } + + + // + // Agg translators + // + + static class DistinctCounts extends SingleValueAggTranslator { + + @Override + protected LeafAgg toAgg(String id, Count c) { + if (!c.distinct()) { + return null; + } + return new CardinalityAgg(id, field(c)); + } + } + + static class Sums extends SingleValueAggTranslator { + + @Override + protected LeafAgg toAgg(String id, Sum s) { + return new SumAgg(id, field(s)); + } + } + + static class Avgs extends SingleValueAggTranslator { + + @Override + protected LeafAgg toAgg(String id, Avg a) { + return new AvgAgg(id, field(a)); + } + } + + static class Maxes extends SingleValueAggTranslator { + + @Override + protected LeafAgg toAgg(String id, Max m) { + return new MaxAgg(id, field(m)); + } + } + + static class Mins extends SingleValueAggTranslator { + + @Override + protected LeafAgg toAgg(String id, Min m) { + return new MinAgg(id, field(m)); + } + } + + static class StatsAggs extends CompoundAggTranslator { + + @Override + protected LeafAgg toAgg(String id, Stats s) { + return new StatsAgg(id, field(s)); + } + } + + static class ExtendedStatsAggs extends CompoundAggTranslator { + + @Override + protected LeafAgg toAgg(String id, ExtendedStats e) { + return new ExtendedStatsAgg(id, field(e)); + } + } + + static class MatrixStatsAggs extends CompoundAggTranslator { + + @Override + protected LeafAgg toAgg(String id, MatrixStats m) { + return new MatrixStatsAgg(id, singletonList(field(m))); + } + } + + static class PercentilesAggs extends CompoundAggTranslator { + + @Override + protected LeafAgg toAgg(String id, Percentiles p) { + return new PercentilesAgg(id, field(p), doubleValuesOf(p.percents())); + } + } + + static class PercentileRanksAggs extends CompoundAggTranslator { + + @Override + protected LeafAgg toAgg(String id, PercentileRanks p) { + return new PercentileRanksAgg(id, field(p), doubleValuesOf(p.values())); + } + } + + static class DateTimes extends SingleValueAggTranslator { + + @Override + protected LeafAgg toAgg(String id, Min m) { + return new MinAgg(id, field(m)); + } + } + + abstract static class AggTranslator { + + private final Class typeToken = ReflectionUtils.detectSuperTypeForRuleLike(getClass()); + + @SuppressWarnings("unchecked") + public final LeafAgg apply(String id, Function f) { + return (typeToken.isInstance(f) ? asAgg(id, (F) f) : null); + } + + protected abstract LeafAgg asAgg(String id, F f); + } + + abstract static class SingleValueAggTranslator extends AggTranslator { + + @Override + protected final LeafAgg asAgg(String id, F function) { + return toAgg(id, function); + } + + protected abstract LeafAgg toAgg(String id, F f); + } + + abstract static class CompoundAggTranslator extends AggTranslator { + + @Override + protected final LeafAgg asAgg(String id, C function) { + return toAgg(id, function); + } + + protected abstract LeafAgg toAgg(String id, C f); + } + + + abstract static class ExpressionTranslator { + + private final Class typeToken = ReflectionUtils.detectSuperTypeForRuleLike(getClass()); + + @SuppressWarnings("unchecked") + public QueryTranslation translate(Expression exp, boolean onAggs) { + return (typeToken.isInstance(exp) ? asQuery((E) exp, onAggs) : null); + } + + protected abstract QueryTranslation asQuery(E e, boolean onAggs); + + protected static Query wrapIfNested(Query query, Expression exp) { + if (exp instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) exp; + if (fa.isNested()) { + return new NestedQuery(fa.location(), fa.nestedParent().name(), query); + } + } + return query; + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Verifier.java new file mode 100644 index 0000000000000..0aef7ccaf8efe --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Verifier.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.plan.physical.AggregateExec; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.plan.physical.Unexecutable; +import org.elasticsearch.xpack.sql.plan.physical.UnplannedExec; +import org.elasticsearch.xpack.sql.tree.Node; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +abstract class Verifier { + + static class Failure { + private final Node source; + private final String message; + + Failure(Node source, String message) { + this.source = source; + this.message = message; + } + + Node source() { + return source; + } + + String message() { + return message; + } + + @Override + public int hashCode() { + return source.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Verifier.Failure other = (Verifier.Failure) obj; + return Objects.equals(source, other.source); + } + } + + private static Failure fail(Node source, String message) { + return new Failure(source, message); + } + + static List verifyMappingPlan(PhysicalPlan plan) { + List failures = new ArrayList<>(); + + plan.forEachUp(p -> { + if (p instanceof UnplannedExec) { + failures.add(fail(p, "Unplanned item")); + } + p.forEachExpressionsUp(e -> { + if (e.childrenResolved() && !e.resolved()) { + failures.add(fail(e, "Unresolved expression")); + } + }); + + if (p instanceof AggregateExec) { + forbidMultiFieldGroupBy((AggregateExec) p, failures); + } + }); + + return failures; + } + + private static void forbidMultiFieldGroupBy(AggregateExec a, List failures) { + if (a.groupings().size() > 1) { + failures.add(fail(a.groupings().get(0), "Currently, only a single expression can be used with GROUP BY; please select one of " + + Expressions.names(a.groupings()))); + } + } + + + static List verifyExecutingPlan(PhysicalPlan plan) { + List failures = new ArrayList<>(); + + plan.forEachUp(p -> { + if (p instanceof Unexecutable) { + failures.add(fail(p, "Unexecutable item")); + } + p.forEachExpressionsUp(e -> { + if (e.childrenResolved() && !e.resolved()) { + failures.add(fail(e, "Unresolved expression")); + } + }); + }); + + return failures; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java new file mode 100644 index 0000000000000..94422dd2888bf --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.RowSet; + +import java.io.IOException; +import java.util.Objects; + +/** + * The cursor that wraps all necessary information for textual representation of the result table + */ +public class CliFormatterCursor implements Cursor { + public static final String NAME = "f"; + + private final Cursor delegate; + private final CliFormatter formatter; + + /** + * If the newCursor is empty, returns an empty cursor. Otherwise, creates a new + * CliFormatterCursor that wraps the newCursor. + */ + public static Cursor wrap(Cursor newCursor, CliFormatter formatter) { + if (newCursor == EMPTY) { + return EMPTY; + } + return new CliFormatterCursor(newCursor, formatter); + } + + private CliFormatterCursor(Cursor delegate, CliFormatter formatter) { + this.delegate = delegate; + this.formatter = formatter; + } + + public CliFormatterCursor(StreamInput in) throws IOException { + delegate = in.readNamedWriteable(Cursor.class); + formatter = new CliFormatter(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeNamedWriteable(delegate); + formatter.writeTo(out); + } + + public CliFormatter getCliFormatter() { + return formatter; + } + + @Override + public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { + delegate.nextPage(cfg, client, registry, listener); + } + + @Override + public void clear(Configuration cfg, Client client, ActionListener listener) { + delegate.clear(cfg, client, listener); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CliFormatterCursor that = (CliFormatterCursor) o; + return Objects.equals(delegate, that.delegate) && + Objects.equals(formatter, that.formatter); + } + + @Override + public int hashCode() { + return Objects.hash(delegate, formatter); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java new file mode 100644 index 0000000000000..4d47ca8c373e1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction.REST_ENDPOINT; + +public class RestSqlClearCursorAction extends BaseRestHandler { + public RestSqlClearCursorAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(POST, REST_ENDPOINT, this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + SqlClearCursorRequest sqlRequest; + try (XContentParser parser = request.contentOrSourceParamParser()) { + sqlRequest = SqlClearCursorRequest.fromXContent(parser, AbstractSqlRequest.Mode.fromString(request.param("mode"))); + } + return channel -> client.executeLocally(SqlClearCursorAction.INSTANCE, sqlRequest, new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "sql_translate_action"; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java new file mode 100644 index 0000000000000..9d043f855fd44 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.Version; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestResponseListener; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.Cursors; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestSqlQueryAction extends BaseRestHandler { + + public RestSqlQueryAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, SqlQueryAction.REST_ENDPOINT, this); + controller.registerHandler(POST, SqlQueryAction.REST_ENDPOINT, this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + SqlQueryRequest sqlRequest; + try (XContentParser parser = request.contentOrSourceParamParser()) { + sqlRequest = SqlQueryRequest.fromXContent(parser, AbstractSqlRequest.Mode.fromString(request.param("mode"))); + } + + /* + * Since we support {@link TextFormat} and + * {@link XContent} outputs we can't use {@link RestToXContentListener} + * like everything else. We want to stick as closely as possible to + * Elasticsearch's defaults though, while still layering in ways to + * control the output more easilly. + * + * First we find the string that the user used to specify the response + * format. If there is a {@code format} paramter we use that. If there + * isn't but there is a {@code Accept} header then we use that. If there + * isn't then we use the {@code Content-Type} header which is required. + */ + String accept = request.param("format"); + if (accept == null) { + accept = request.header("Accept"); + if ("*/*".equals(accept)) { + // */* means "I don't care" which we should treat like not specifying the header + accept = null; + } + } + if (accept == null) { + accept = request.header("Content-Type"); + } + assert accept != null : "The Content-Type header is required"; + + /* + * Second, we pick the actual content type to use by first parsing the + * string from the previous step as an {@linkplain XContent} value. If + * that doesn't parse we parse it as a {@linkplain TextFormat} value. If + * that doesn't parse it'll throw an {@link IllegalArgumentException} + * which we turn into a 400 error. + */ + XContentType xContentType = accept == null ? XContentType.JSON : XContentType.fromMediaTypeOrFormat(accept); + if (xContentType != null) { + return channel -> client.execute(SqlQueryAction.INSTANCE, sqlRequest, new RestResponseListener(channel) { + @Override + public RestResponse buildResponse(SqlQueryResponse response) throws Exception { + XContentBuilder builder = XContentBuilder.builder(xContentType.xContent()); + response.toXContent(builder, request); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } + + TextFormat textFormat = TextFormat.fromMediaTypeOrFormat(accept); + + long startNanos = System.nanoTime(); + return channel -> client.execute(SqlQueryAction.INSTANCE, sqlRequest, new RestResponseListener(channel) { + @Override + public RestResponse buildResponse(SqlQueryResponse response) throws Exception { + Cursor cursor = Cursors.decodeFromString(sqlRequest.cursor()); + final String data = textFormat.format(cursor, request, response); + + RestResponse restResponse = new BytesRestResponse(RestStatus.OK, textFormat.contentType(request), + data.getBytes(StandardCharsets.UTF_8)); + + Cursor responseCursor = textFormat.wrapCursor(cursor, response); + + if (responseCursor != Cursor.EMPTY) { + restResponse.addHeader("Cursor", Cursors.encodeToString(Version.CURRENT, responseCursor)); + } + restResponse.addHeader("Took-nanos", Long.toString(System.nanoTime() - startNanos)); + + return restResponse; + } + }); + } + + @Override + public String getName() { + return "xpack_sql_action"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java new file mode 100644 index 0000000000000..6167e4e571dff --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * REST action for translating SQL queries into ES requests + */ +public class RestSqlTranslateAction extends BaseRestHandler { + public RestSqlTranslateAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, "/_xpack/sql/translate", this); + controller.registerHandler(POST, "/_xpack/sql/translate", this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + SqlTranslateRequest sqlRequest; + try (XContentParser parser = request.contentOrSourceParamParser()) { + sqlRequest = SqlTranslateRequest.fromXContent(parser, AbstractSqlRequest.Mode.fromString(request.param("mode"))); + } + return channel -> client.executeLocally(SqlTranslateAction.INSTANCE, sqlRequest, new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "sql_translate_action"; + } +} + diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java new file mode 100644 index 0000000000000..8a3ef973d6bf1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import java.util.function.Consumer; + +/** + * Determines if different features of SQL should be enabled + */ +public class SqlLicenseChecker { + + private final Consumer checkIfSqlAllowed; + + public SqlLicenseChecker(Consumer checkIfSqlAllowed) { + this.checkIfSqlAllowed = checkIfSqlAllowed; + } + + /** + * Throws an ElasticsearchSecurityException if the specified mode is not allowed + */ + public void checkIfSqlAllowed(AbstractSqlRequest.Mode mode) { + checkIfSqlAllowed.accept(mode); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java new file mode 100644 index 0000000000000..24bf8f15aa7de --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.execution.PlanExecutor; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; + +public class SqlPlugin extends Plugin implements ActionPlugin { + + private final boolean enabled; + private final SqlLicenseChecker sqlLicenseChecker; + + SqlPlugin(boolean enabled, SqlLicenseChecker sqlLicenseChecker) { + this.enabled = enabled; + this.sqlLicenseChecker = sqlLicenseChecker; + } + + public SqlPlugin(Settings settings) { + this(XPackSettings.SQL_ENABLED.get(settings), new SqlLicenseChecker( + (mode) -> { + XPackLicenseState licenseState = XPackPlugin.getSharedLicenseState(); + switch (mode) { + case JDBC: + if (licenseState.isJdbcAllowed() == false) { + throw LicenseUtils.newComplianceException("jdbc"); + } + break; + case PLAIN: + if (licenseState.isSqlAllowed() == false) { + throw LicenseUtils.newComplianceException(XPackField.SQL); + } + break; + default: + throw new IllegalArgumentException("Unknown SQL mode " + mode); + } + } + )); + } + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, + NamedXContentRegistry xContentRegistry, Environment environment, + NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + + return createComponents(client, clusterService.getClusterName().value(), namedWriteableRegistry); + } + + /** + * Create components used by the sql plugin. + */ + Collection createComponents(Client client, String clusterName, NamedWriteableRegistry namedWriteableRegistry) { + if (false == enabled) { + return emptyList(); + } + IndexResolver indexResolver = new IndexResolver(client, clusterName); + return Arrays.asList(sqlLicenseChecker, indexResolver, new PlanExecutor(client, indexResolver, namedWriteableRegistry)); + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, + ClusterSettings clusterSettings, IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + + if (false == enabled) { + return emptyList(); + } + + return Arrays.asList(new RestSqlQueryAction(settings, restController), + new RestSqlTranslateAction(settings, restController), + new RestSqlClearCursorAction(settings, restController)); + } + + @Override + public List> getActions() { + if (false == enabled) { + return emptyList(); + } + + return Arrays.asList(new ActionHandler<>(SqlQueryAction.INSTANCE, TransportSqlQueryAction.class), + new ActionHandler<>(SqlTranslateAction.INSTANCE, TransportSqlTranslateAction.class), + new ActionHandler<>(SqlClearCursorAction.INSTANCE, TransportSqlClearCursorAction.class)); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java new file mode 100644 index 0000000000000..349a481cf660f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java @@ -0,0 +1,300 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.Cursors; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.function.Function; + +/** + * Templating class for displaying SQL responses in text formats. + */ + +// TODO are we sure toString is correct here? What about dates that come back as longs. +// Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 +enum TextFormat { + + /** + * Default text writer. + * + * The implementation is a bit weird since state needs to be passed around, namely the formatter + * since it is initialized based on the first page of data. + * To avoid leaking the formatter, it gets discovered again in the wrapping method to attach it + * to the next cursor and so on. + */ + PLAIN_TEXT() { + @Override + String format(Cursor cursor, RestRequest request, SqlQueryResponse response) { + final CliFormatter formatter; + if (cursor instanceof CliFormatterCursor) { + formatter = ((CliFormatterCursor) cursor).getCliFormatter(); + return formatter.formatWithoutHeader(response); + } else { + formatter = new CliFormatter(response); + return formatter.formatWithHeader(response); + } + } + + @Override + Cursor wrapCursor(Cursor oldCursor, SqlQueryResponse response) { + CliFormatter formatter = (oldCursor instanceof CliFormatterCursor) ? + ((CliFormatterCursor) oldCursor).getCliFormatter() : new CliFormatter(response); + return CliFormatterCursor.wrap(super.wrapCursor(oldCursor, response), formatter); + } + + @Override + String shortName() { + return "txt"; + } + + @Override + String contentType() { + return "text/plain"; + } + + @Override + protected String delimiter() { + throw new UnsupportedOperationException(); + } + + @Override + protected String eol() { + throw new UnsupportedOperationException(); + } + }, + + /** + * Comma Separated Values implementation. + * + * Based on: + * https://tools.ietf.org/html/rfc4180 + * https://www.iana.org/assignments/media-types/text/csv + * https://www.w3.org/TR/sparql11-results-csv-tsv/ + * + */ + CSV() { + + @Override + protected String delimiter() { + return ","; + } + + @Override + protected String eol() { + //LFCR + return "\r\n"; + } + + @Override + String shortName() { + return "csv"; + } + + @Override + String contentType() { + return "text/csv"; + } + + @Override + String contentType(RestRequest request) { + return contentType() + "; charset=utf-8; header=" + (hasHeader(request) ? "present" : "absent"); + } + + @Override + String maybeEscape(String value) { + boolean needsEscaping = false; + + for (int i = 0; i < value.length(); i++) { + char c = value.charAt(i); + if (c == '"' || c == ',' || c == '\n' || c == '\r') { + needsEscaping = true; + break; + } + } + + if (needsEscaping) { + StringBuilder sb = new StringBuilder(); + + sb.append('"'); + for (int i = 0; i < value.length(); i++) { + char c = value.charAt(i); + if (value.charAt(i) == '"') { + sb.append('"'); + } + sb.append(c); + } + sb.append('"'); + value = sb.toString(); + } + return value; + } + + @Override + boolean hasHeader(RestRequest request) { + String header = request.param("header"); + if (header == null) { + List values = request.getAllHeaderValues("Accept"); + if (values != null) { + // header is a parameter specified by ; so try breaking it down + for (String value : values) { + String[] params = Strings.tokenizeToStringArray(value, ";"); + for (String param : params) { + if (param.toLowerCase(Locale.ROOT).equals("header=absent")) { + return false; + } + } + } + } + return true; + } else { + return !header.toLowerCase(Locale.ROOT).equals("absent"); + } + } + }, + + TSV() { + @Override + protected String delimiter() { + return "\t"; + } + + @Override + protected String eol() { + // only CR + return "\n"; + } + + @Override + String shortName() { + return "tsv"; + } + + @Override + String contentType() { + return "text/tab-separated-values"; + } + + @Override + String contentType(RestRequest request) { + return contentType() + "; charset=utf-8"; + } + + @Override + String maybeEscape(String value) { + StringBuilder sb = new StringBuilder(); + + for (int i = 0; i < value.length(); i++) { + char c = value.charAt(i); + switch (c) { + case '\n' : + sb.append("\\n"); + break; + case '\t' : + sb.append("\\t"); + break; + default: + sb.append(c); + } + } + + return sb.toString(); + } + }; + + + String format(Cursor cursor, RestRequest request, SqlQueryResponse response) { + StringBuilder sb = new StringBuilder(); + + boolean header = hasHeader(request); + + if (header) { + row(sb, response.columns(), ColumnInfo::name); + } + + for (List row : response.rows()) { + row(sb, row, f -> Objects.toString(f, StringUtils.EMPTY)); + } + + return sb.toString(); + } + + boolean hasHeader(RestRequest request) { + return true; + } + + Cursor wrapCursor(Cursor oldCursor, SqlQueryResponse response) { + return Cursors.decodeFromString(response.cursor()); + } + + static TextFormat fromMediaTypeOrFormat(String accept) { + for (TextFormat text : values()) { + String contentType = text.contentType(); + if (contentType.equalsIgnoreCase(accept) + || accept.toLowerCase(Locale.ROOT).startsWith(contentType + ";") + || text.shortName().equalsIgnoreCase(accept)) { + return text; + } + } + + throw new IllegalArgumentException("invalid format [" + accept + "]"); + } + + /** + * Short name typically used by format parameter. + * Can differ from the IANA mime type. + */ + abstract String shortName(); + + + /** + * Formal IANA mime type. + */ + abstract String contentType(); + + /** + * Content type depending on the request. + * Might be used by some formatters (like CSV) to specify certain metadata like + * whether the header is returned or not. + */ + String contentType(RestRequest request) { + return contentType(); + } + + // utility method for consuming a row. + void row(StringBuilder sb, List row, Function toString) { + for (int i = 0; i < row.size(); i++) { + sb.append(maybeEscape(toString.apply(row.get(i)))); + if (i < row.size() - 1) { + sb.append(delimiter()); + } + } + sb.append(eol()); + } + + /** + * Delimiter between fields + */ + protected abstract String delimiter(); + + /** + * String indicating end-of-line or row. + */ + protected abstract String eol(); + + /** + * Method used for escaping (if needed) a given value. + */ + String maybeEscape(String value) { + return value; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java new file mode 100644 index 0000000000000..68752928166ea --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlClearCursorAction.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.sql.execution.PlanExecutor; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.Cursors; + +import static org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction.NAME; + +public class TransportSqlClearCursorAction extends HandledTransportAction { + private final PlanExecutor planExecutor; + private final SqlLicenseChecker sqlLicenseChecker; + + @Inject + public TransportSqlClearCursorAction(Settings settings, ThreadPool threadPool, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + PlanExecutor planExecutor, + SqlLicenseChecker sqlLicenseChecker) { + super(settings, NAME, threadPool, transportService, actionFilters, SqlClearCursorRequest::new, + indexNameExpressionResolver); + this.planExecutor = planExecutor; + this.sqlLicenseChecker = sqlLicenseChecker; + } + + @Override + protected void doExecute(SqlClearCursorRequest request, ActionListener listener) { + sqlLicenseChecker.checkIfSqlAllowed(request.mode()); + operation(planExecutor, request, listener); + } + + public static void operation(PlanExecutor planExecutor, SqlClearCursorRequest request, + ActionListener listener) { + Cursor cursor = Cursors.decodeFromString(request.getCursor()); + planExecutor.cleanCursor(Configuration.DEFAULT, cursor, ActionListener.wrap( + success -> listener.onResponse(new SqlClearCursorResponse(success)), listener::onFailure)); + } +} + diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java new file mode 100644 index 0000000000000..5b59ced7a494d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.sql.execution.PlanExecutor; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursors; +import org.elasticsearch.xpack.sql.session.RowSet; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.type.Schema; + +import java.util.ArrayList; +import java.util.List; + +import static java.util.Collections.unmodifiableList; +import static org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest.Mode.JDBC; + +public class TransportSqlQueryAction extends HandledTransportAction { + private final PlanExecutor planExecutor; + private final SqlLicenseChecker sqlLicenseChecker; + + @Inject + public TransportSqlQueryAction(Settings settings, ThreadPool threadPool, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + PlanExecutor planExecutor, + SqlLicenseChecker sqlLicenseChecker) { + super(settings, SqlQueryAction.NAME, threadPool, transportService, actionFilters, SqlQueryRequest::new, + indexNameExpressionResolver); + + this.planExecutor = planExecutor; + this.sqlLicenseChecker = sqlLicenseChecker; + } + + @Override + protected void doExecute(SqlQueryRequest request, ActionListener listener) { + sqlLicenseChecker.checkIfSqlAllowed(request.mode()); + operation(planExecutor, request, listener); + } + + /** + * Actual implementation of the action. Statically available to support embedded mode. + */ + public static void operation(PlanExecutor planExecutor, SqlQueryRequest request, ActionListener listener) { + // The configuration is always created however when dealing with the next page, only the timeouts are relevant + // the rest having default values (since the query is already created) + Configuration cfg = new Configuration(request.timeZone(), request.fetchSize(), request.requestTimeout(), request.pageTimeout(), + request.filter()); + + if (Strings.hasText(request.cursor()) == false) { + planExecutor.sql(cfg, request.query(), request.params(), + ActionListener.wrap(rowSet -> listener.onResponse(createResponse(request, rowSet)), listener::onFailure)); + } else { + planExecutor.nextPage(cfg, Cursors.decodeFromString(request.cursor()), + ActionListener.wrap(rowSet -> listener.onResponse(createResponse(rowSet, null)), listener::onFailure)); + } + } + + static SqlQueryResponse createResponse(SqlQueryRequest request, SchemaRowSet rowSet) { + List columns = new ArrayList<>(rowSet.columnCount()); + for (Schema.Entry entry : rowSet.schema()) { + if (request.mode() == JDBC) { + columns.add(new ColumnInfo("", entry.name(), entry.type().esType, entry.type().jdbcType, + entry.type().displaySize)); + } else { + columns.add(new ColumnInfo("", entry.name(), entry.type().esType)); + } + } + columns = unmodifiableList(columns); + return createResponse(rowSet, columns); + } + + static SqlQueryResponse createResponse(RowSet rowSet, List columns) { + List> rows = new ArrayList<>(); + rowSet.forEachRow(rowView -> { + List row = new ArrayList<>(rowView.columnCount()); + rowView.forEachColumn(row::add); + rows.add(unmodifiableList(row)); + }); + + return new SqlQueryResponse( + Cursors.encodeToString(Version.CURRENT, rowSet.nextPageCursor()), + columns, + rows); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java new file mode 100644 index 0000000000000..8f494231727eb --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlTranslateAction.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.sql.execution.PlanExecutor; +import org.elasticsearch.xpack.sql.session.Configuration; + +/** + * Transport action for translating SQL queries into ES requests + */ +public class TransportSqlTranslateAction extends HandledTransportAction { + private final PlanExecutor planExecutor; + private final SqlLicenseChecker sqlLicenseChecker; + + @Inject + public TransportSqlTranslateAction(Settings settings, ThreadPool threadPool, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + PlanExecutor planExecutor, + SqlLicenseChecker sqlLicenseChecker) { + super(settings, SqlTranslateAction.NAME, threadPool, transportService, actionFilters, + SqlTranslateRequest::new, indexNameExpressionResolver); + + this.planExecutor = planExecutor; + this.sqlLicenseChecker = sqlLicenseChecker; + } + + @Override + protected void doExecute(SqlTranslateRequest request, ActionListener listener) { + sqlLicenseChecker.checkIfSqlAllowed(request.mode()); + + Configuration cfg = new Configuration(request.timeZone(), request.fetchSize(), + request.requestTimeout(), request.pageTimeout(), request.filter()); + + planExecutor.searchSource(cfg, request.query(), request.params(), ActionListener.wrap( + searchSourceBuilder -> listener.onResponse(new SqlTranslateResponse(searchSourceBuilder)), listener::onFailure)); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Agg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Agg.java new file mode 100644 index 0000000000000..09c92a77ac3aa --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Agg.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import java.util.Locale; +import java.util.Objects; + +import static java.lang.String.format; + +/** + * Base class holding common properties for Elasticsearch aggregations. + */ +public abstract class Agg { + + private final String id; + private final String fieldName; + + Agg(String id, String fieldName) { + this.id = id; + this.fieldName = fieldName; + } + + public String id() { + return id; + } + + public String fieldName() { + return fieldName; + } + + @Override + public int hashCode() { + return Objects.hash(id, fieldName); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Agg other = (Agg) obj; + return Objects.equals(id, other.id) + && Objects.equals(fieldName, other.fieldName); + } + + @Override + public String toString() { + return format(Locale.ROOT, "%s(%s)", getClass().getSimpleName(), fieldName); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java new file mode 100644 index 0000000000000..60f621b38a3e2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AggFilter.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import java.util.Collection; +import java.util.Map; +import java.util.Objects; + +import org.elasticsearch.script.Script; +import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.util.Check; + +import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketSelector; + +public class AggFilter extends PipelineAgg { + + private final ScriptTemplate scriptTemplate; + private final Map aggPaths; + + public AggFilter(String name, ScriptTemplate scriptTemplate) { + super(name); + Check.isTrue(scriptTemplate != null, "a valid script is required"); + this.scriptTemplate = scriptTemplate; + this.aggPaths = scriptTemplate.aggPaths(); + } + + public Map aggPaths() { + return aggPaths; + } + + public Collection aggRefs() { + return scriptTemplate.aggRefs(); + } + + public ScriptTemplate scriptTemplate() { + return scriptTemplate; + } + + @Override + PipelineAggregationBuilder toBuilder() { + Script script = scriptTemplate.toPainless(); + return bucketSelector(name(), aggPaths, script); + } + + @Override + public int hashCode() { + return Objects.hash(name(), scriptTemplate); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + AggFilter other = (AggFilter) obj; + return Objects.equals(name(), other.name()) + && Objects.equals(scriptTemplate(), other.scriptTemplate()); + } + + @Override + public String toString() { + return scriptTemplate.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java new file mode 100644 index 0000000000000..5fb8a754f0f54 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; +import static org.elasticsearch.xpack.sql.util.StringUtils.EMPTY; + +/** + * SQL Aggregations associated with a query. + * + * This class maps the SQL GroupBy's (and co) to ES composite agg. + * While the composite agg doesn't require a dedicated structure, for folding purposes, this structure + * tracks the relationship between each key and its sub-aggs or pipelines. + * + * Since sub-aggs can only refer to their group key and these are on the root-level, the tree can have at most + * 2 levels - the grouping and its sub-aggs. + * + * In case no group is specified (which maps to the default group in SQL), due to ES nature a 'dummy' filter agg + * is used. + */ +public class Aggs { + + public static final String ROOT_GROUP_NAME = "groupby"; + + public static final GroupByKey IMPLICIT_GROUP_KEY = new GroupByKey(ROOT_GROUP_NAME, EMPTY, null) { + + @Override + public CompositeValuesSourceBuilder asValueSource() { + throw new SqlIllegalArgumentException("Default group does not translate to an aggregation"); + } + + @Override + protected GroupByKey copy(String id, String fieldName, Direction direction) { + return this; + } + }; + + private final List groups; + private final List metricAggs; + private final List pipelineAggs; + + public Aggs() { + this(emptyList(), emptyList(), emptyList()); + } + + public Aggs(List groups, List metricAggs, List pipelineAggs) { + this.groups = groups; + + this.metricAggs = metricAggs; + this.pipelineAggs = pipelineAggs; + } + + public List groups() { + return groups; + } + + public AggregationBuilder asAggBuilder() { + AggregationBuilder rootGroup = null; + + if (groups.isEmpty() && metricAggs.isEmpty()) { + return null; + } + + // if there's a group, move everything under the composite agg + if (!groups.isEmpty()) { + List> keys = new ArrayList<>(groups.size()); + // first iterate to compute the sources + for (GroupByKey key : groups) { + keys.add(key.asValueSource()); + } + + rootGroup = new CompositeAggregationBuilder(ROOT_GROUP_NAME, keys); + + } else { + rootGroup = new FiltersAggregationBuilder(ROOT_GROUP_NAME, matchAllQuery()); + } + + for (LeafAgg agg : metricAggs) { + rootGroup.subAggregation(agg.toBuilder()); + } + + for (PipelineAgg agg : pipelineAggs) { + rootGroup.subAggregation(agg.toBuilder()); + } + + return rootGroup; + } + + public boolean useImplicitGroupBy() { + return groups.isEmpty(); + } + + public Aggs addGroups(Collection groups) { + return new Aggs(combine(this.groups, groups), metricAggs, pipelineAggs); + } + + public Aggs addAgg(LeafAgg agg) { + return new Aggs(groups, combine(metricAggs, agg), pipelineAggs); + } + + public Aggs addAgg(PipelineAgg pipelineAgg) { + return new Aggs(groups, metricAggs, combine(pipelineAggs, pipelineAgg)); + } + + public GroupByKey findGroupForAgg(String groupOrAggId) { + for (GroupByKey group : this.groups) { + if (groupOrAggId.equals(group.id())) { + return group; + } + } + + // maybe it's the default group agg ? + for (Agg agg : metricAggs) { + if (groupOrAggId.equals(agg.id())) { + return IMPLICIT_GROUP_KEY; + } + } + + return null; + } + + public Aggs updateGroup(GroupByKey group) { + List groups = new ArrayList<>(this.groups); + for (int i = 0; i < groups.size(); i++) { + GroupByKey g = groups.get(i); + if (group.id().equals(g.id())) { + groups.set(i, group); + return with(groups); + } + } + throw new SqlIllegalArgumentException("Could not find group named {}", group.id()); + } + + public Aggs with(List groups) { + return new Aggs(groups, metricAggs, pipelineAggs); + } + + @Override + public int hashCode() { + return Objects.hash(groups, metricAggs, pipelineAggs); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Aggs other = (Aggs) obj; + return Objects.equals(groups, other.groups) + && Objects.equals(metricAggs, other.metricAggs) + && Objects.equals(pipelineAggs, other.pipelineAggs); + + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AndAggFilter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AndAggFilter.java new file mode 100644 index 0000000000000..920be9e8198f8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AndAggFilter.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.Locale; + +import static java.lang.String.format; + +public class AndAggFilter extends AggFilter { + + public AndAggFilter(AggFilter left, AggFilter right) { + this(left.name() + "_&_" + right.name(), left, right); + } + + public AndAggFilter(String name, AggFilter left, AggFilter right) { + super(name, and(left.scriptTemplate(), right.scriptTemplate())); + } + + private static ScriptTemplate and(ScriptTemplate left, ScriptTemplate right) { + String template = format(Locale.ROOT, "( %s ) && ( %s )", left.template(), right.template()); + Params params = new ParamsBuilder().script(left.params()).script(right.params()).build(); + return new ScriptTemplate(template, params, DataType.BOOLEAN); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AvgAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AvgAgg.java new file mode 100644 index 0000000000000..b33411ed7997b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/AvgAgg.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; + +public class AvgAgg extends LeafAgg { + + public AvgAgg(String id, String fieldName) { + super(id, fieldName); + } + + @Override + AggregationBuilder toBuilder() { + return avg(id()).field(fieldName()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/CardinalityAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/CardinalityAgg.java new file mode 100644 index 0000000000000..f4fb20428c59f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/CardinalityAgg.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality; + +public class CardinalityAgg extends LeafAgg { + + public CardinalityAgg(String id, String fieldName) { + super(id, fieldName); + } + + @Override AggregationBuilder toBuilder() { + return cardinality(id()).field(fieldName()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/ExtendedStatsAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/ExtendedStatsAgg.java new file mode 100644 index 0000000000000..5dbc67abd1fc0 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/ExtendedStatsAgg.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats; + +public class ExtendedStatsAgg extends LeafAgg { + + public ExtendedStatsAgg(String id, String fieldName) { + super(id, fieldName); + } + + @Override + AggregationBuilder toBuilder() { + return extendedStats(id()).field(fieldName()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnKey.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnKey.java new file mode 100644 index 0000000000000..e98770318d218 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByColumnKey.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; + +/** + * GROUP BY key for regular fields. + */ +public class GroupByColumnKey extends GroupByKey { + + public GroupByColumnKey(String id, String fieldName) { + this(id, fieldName, null); + } + + public GroupByColumnKey(String id, String fieldName, Direction direction) { + super(id, fieldName, direction); + } + + @Override + public TermsValuesSourceBuilder asValueSource() { + return new TermsValuesSourceBuilder(id()) + .field(fieldName()) + .order(direction().asOrder()); + } + + @Override + protected GroupByKey copy(String id, String fieldName, Direction direction) { + return new GroupByColumnKey(id, fieldName, direction); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateKey.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateKey.java new file mode 100644 index 0000000000000..43c80e75057e9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByDateKey.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.bucket.composite.DateHistogramValuesSourceBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; +import org.joda.time.DateTimeZone; + +import java.util.Objects; +import java.util.TimeZone; + +/** + * GROUP BY key specific for date fields. + */ +public class GroupByDateKey extends GroupByKey { + + private final String interval; + private final TimeZone timeZone; + + public GroupByDateKey(String id, String fieldName, String interval, TimeZone timeZone) { + this(id, fieldName, null, interval, timeZone); + } + + public GroupByDateKey(String id, String fieldName, Direction direction, String interval, TimeZone timeZone) { + super(id, fieldName, direction); + this.interval = interval; + this.timeZone = timeZone; + } + + public String interval() { + return interval; + } + + public TimeZone timeZone() { + return timeZone; + } + + @Override + public DateHistogramValuesSourceBuilder asValueSource() { + return new DateHistogramValuesSourceBuilder(id()) + .field(fieldName()) + .dateHistogramInterval(new DateHistogramInterval(interval)) + .timeZone(DateTimeZone.forTimeZone(timeZone)); + } + + @Override + protected GroupByKey copy(String id, String fieldName, Direction direction) { + return new GroupByDateKey(id, fieldName, direction, interval, timeZone); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), interval, timeZone); + } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj)) { + GroupByDateKey other = (GroupByDateKey) obj; + return Objects.equals(interval, other.interval) + && Objects.equals(timeZone, other.timeZone); + } + return false; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByKey.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByKey.java new file mode 100644 index 0000000000000..fd2bd5799df17 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByKey.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.bucket.composite.CompositeValuesSourceBuilder; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; + +import java.util.Objects; + +/** + * A key for a SQL GroupBy which maps to value source for composite aggregation. + */ +public abstract class GroupByKey extends Agg { + + private final Direction direction; + + GroupByKey(String id, String fieldName, Direction direction) { + super(id, fieldName); + // ASC is the default order of CompositeValueSource + this.direction = direction == null ? Direction.ASC : direction; + } + + public Direction direction() { + return direction; + } + + public abstract CompositeValuesSourceBuilder asValueSource(); + + protected abstract GroupByKey copy(String id, String fieldName, Direction direction); + + public GroupByKey with(Direction direction) { + return this.direction == direction ? this : copy(id(), fieldName(), direction); + } + + @Override + public int hashCode() { + return Objects.hash(id(), fieldName(), direction); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(direction, ((GroupByKey) obj).direction); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptKey.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptKey.java new file mode 100644 index 0000000000000..a4af765d034bf --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/GroupByScriptKey.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; + +import java.util.Objects; + +/** + * GROUP BY key for scripts (typically caused by functions). + */ +public class GroupByScriptKey extends GroupByKey { + + private final ScriptTemplate script; + + public GroupByScriptKey(String id, String fieldName, ScriptTemplate script) { + this(id, fieldName, null, script); + } + + public GroupByScriptKey(String id, String fieldName, Direction direction, ScriptTemplate script) { + super(id, fieldName, direction); + this.script = script; + } + + public ScriptTemplate script() { + return script; + } + + @Override + public TermsValuesSourceBuilder asValueSource() { + TermsValuesSourceBuilder builder = new TermsValuesSourceBuilder(id()) + .script(script.toPainless()) + .order(direction().asOrder()); + + if (script.outputType().isNumeric()) { + builder.valueType(ValueType.NUMBER); + } + + return builder; + } + + @Override + protected GroupByKey copy(String id, String fieldName, Direction direction) { + return new GroupByScriptKey(id, fieldName, direction, script); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), script); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(script, ((GroupByScriptKey) obj).script); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/LeafAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/LeafAgg.java new file mode 100644 index 0000000000000..883d772f2ac8c --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/LeafAgg.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +public abstract class LeafAgg extends Agg { + + LeafAgg(String id, String fieldName) { + super(id, fieldName); + } + + abstract AggregationBuilder toBuilder(); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MatrixStatsAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MatrixStatsAgg.java new file mode 100644 index 0000000000000..43500e9b6f723 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MatrixStatsAgg.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import java.util.List; + +import static org.elasticsearch.search.aggregations.MatrixStatsAggregationBuilders.matrixStats; + +public class MatrixStatsAgg extends LeafAgg { + + private final List fields; + + public MatrixStatsAgg(String id, List fields) { + super(id, ""); + this.fields = fields; + } + + @Override + AggregationBuilder toBuilder() { + return matrixStats(id()).fields(fields); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MaxAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MaxAgg.java new file mode 100644 index 0000000000000..2266cd7c0c485 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MaxAgg.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.max; + +public class MaxAgg extends LeafAgg { + + public MaxAgg(String id, String fieldName) { + super(id, fieldName); + } + + @Override + AggregationBuilder toBuilder() { + return max(id()).field(fieldName()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MinAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MinAgg.java new file mode 100644 index 0000000000000..4d7e3703f5c49 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MinAgg.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.min; + +public class MinAgg extends LeafAgg { + + public MinAgg(String id, String fieldName) { + super(id, fieldName); + } + + @Override + AggregationBuilder toBuilder() { + return min(id()).field(fieldName()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MultiFieldAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MultiFieldAgg.java new file mode 100644 index 0000000000000..ccce45731708f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/MultiFieldAgg.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +public abstract class MultiFieldAgg { + +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/OrAggFilter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/OrAggFilter.java new file mode 100644 index 0000000000000..e06253cc75bec --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/OrAggFilter.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.xpack.sql.expression.function.scalar.script.Params; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ParamsBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; + +import java.util.Locale; + +import static java.lang.String.format; + +public class OrAggFilter extends AggFilter { + + public OrAggFilter(AggFilter left, AggFilter right) { + this(left.name() + "_|_" + right.name(), left, right); + } + + public OrAggFilter(String name, AggFilter left, AggFilter right) { + super(name, and(left.scriptTemplate(), right.scriptTemplate())); + } + + private static ScriptTemplate and(ScriptTemplate left, ScriptTemplate right) { + String template = format(Locale.ROOT, "( %s ) || ( %s )", left.template(), right.template()); + Params params = new ParamsBuilder().script(left.params()).script(right.params()).build(); + return new ScriptTemplate(template, params, DataType.BOOLEAN); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PercentileRanksAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PercentileRanksAgg.java new file mode 100644 index 0000000000000..07da84648b506 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PercentileRanksAgg.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import java.util.List; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.percentileRanks; + +public class PercentileRanksAgg extends LeafAgg { + + private final List values; + + public PercentileRanksAgg(String id, String fieldName, List values) { + super(id, fieldName); + this.values = values; + } + + public List percents() { + return values; + } + + @Override + AggregationBuilder toBuilder() { + return percentileRanks(id(), values.stream().mapToDouble(Double::doubleValue).toArray()) + .field(fieldName()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PercentilesAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PercentilesAgg.java new file mode 100644 index 0000000000000..deaa780fc1b54 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PercentilesAgg.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import java.util.List; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles; + +public class PercentilesAgg extends LeafAgg { + + private final List percents; + + public PercentilesAgg(String id, String fieldName, List percents) { + super(id, fieldName); + this.percents = percents; + } + + public List percents() { + return percents; + } + + @Override + AggregationBuilder toBuilder() { + // TODO: look at keyed + return percentiles(id()) + .field(fieldName()) + .percentiles(percents.stream().mapToDouble(Double::doubleValue).toArray()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PipelineAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PipelineAgg.java new file mode 100644 index 0000000000000..3b78eba8304e2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/PipelineAgg.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; + +public abstract class PipelineAgg { + + private final String name; + + public PipelineAgg(String name) { + this.name = name; + } + + public String name() { + return name; + } + + abstract PipelineAggregationBuilder toBuilder(); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/StatsAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/StatsAgg.java new file mode 100644 index 0000000000000..5a36a2756b637 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/StatsAgg.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; + +public class StatsAgg extends LeafAgg { + + public StatsAgg(String id, String fieldName) { + super(id, fieldName); + } + + @Override + AggregationBuilder toBuilder() { + return stats(id()).field(fieldName()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/SumAgg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/SumAgg.java new file mode 100644 index 0000000000000..f40f5a8042879 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/SumAgg.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.agg; + +import org.elasticsearch.search.aggregations.AggregationBuilder; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; + +public class SumAgg extends LeafAgg { + + public SumAgg(String id, String fieldName) { + super(id, fieldName); + } + + @Override AggregationBuilder toBuilder() { + return sum(id()).field(fieldName()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/AttributeSort.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/AttributeSort.java new file mode 100644 index 0000000000000..0fde127f6345d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/AttributeSort.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.Attribute; + +public class AttributeSort extends Sort { + + private final Attribute attribute; + + public AttributeSort(Attribute attribute, Direction direction) { + super(direction); + this.attribute = attribute; + } + + public Attribute attribute() { + return attribute; + } + + @Override + public int hashCode() { + return Objects.hash(attribute, direction()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + AttributeSort other = (AttributeSort) obj; + return Objects.equals(direction(), other.direction()) + && Objects.equals(attribute, other.attribute); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ComputedRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ComputedRef.java new file mode 100644 index 0000000000000..8d1a55cfdd17b --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ComputedRef.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.xpack.sql.execution.search.FieldExtraction; +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; + +public class ComputedRef implements FieldExtraction { + + private final ProcessorDefinition processor; + + public ComputedRef(ProcessorDefinition processor) { + this.processor = processor; + } + + public ProcessorDefinition processor() { + return processor; + } + + @Override + public boolean supportedByAggsOnlyQuery() { + return processor.supportedByAggsOnlyQuery(); + } + + @Override + public void collectFields(SqlSourceBuilder sourceBuilder) { + processor.collectFields(sourceBuilder); + } + + @Override + public String toString() { + return processor + "(" + processor + ")"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/FieldReference.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/FieldReference.java new file mode 100644 index 0000000000000..271fbfd57d2fa --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/FieldReference.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.xpack.sql.execution.search.FieldExtraction; + +public abstract class FieldReference implements FieldExtraction { + /** + * Field name. + * + * @return field name. + */ + public abstract String name(); + + @Override + public final boolean supportedByAggsOnlyQuery() { + return false; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/GlobalCountRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/GlobalCountRef.java new file mode 100644 index 0000000000000..8c16f6dd545f2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/GlobalCountRef.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.xpack.sql.execution.search.AggRef; + +/** + * Aggregation reference pointing to the (so called) global count, meaning + * COUNT over the entire data set. + */ +public final class GlobalCountRef extends AggRef { + public static final GlobalCountRef INSTANCE = new GlobalCountRef(); + + @Override + public String toString() { + return "#_Total_Hits_#"; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/GroupByRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/GroupByRef.java new file mode 100644 index 0000000000000..cfb66fea992ff --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/GroupByRef.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.xpack.sql.execution.search.AggRef; + +import java.util.TimeZone; + +/** + * Reference to a GROUP BY agg (typically this gets translated to a composite key). + */ +public class GroupByRef extends AggRef { + + public enum Property { + VALUE, COUNT; + } + + private final String key; + private final Property property; + private final TimeZone timeZone; + + public GroupByRef(String key) { + this(key, null, null); + } + + public GroupByRef(String key, Property property, TimeZone timeZone) { + this.key = key; + this.property = property == null ? Property.VALUE : property; + this.timeZone = timeZone; + } + + public String key() { + return key; + } + + public Property property() { + return property; + } + + public TimeZone timeZone() { + return timeZone; + } + + @Override + public String toString() { + return "|" + key + (property == Property.COUNT ? ".count" : "") + "|"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/MetricAggRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/MetricAggRef.java new file mode 100644 index 0000000000000..75ee3d8f44743 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/MetricAggRef.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.xpack.sql.execution.search.AggRef; +import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; + +/** + * Reference to a sub/nested/metric aggregation. + * Due to how ES query works, this is _always_ a child aggregation with the grouping (composite agg) as the parent. + */ +public class MetricAggRef extends AggRef { + + private final String name; + private final String property; + private final String innerKey; + + public MetricAggRef(String name) { + this(name, "value"); + } + + public MetricAggRef(String name, String property) { + this(name, property, null); + } + + public MetricAggRef(String name, String property, String innerKey) { + this.name = name; + this.property = property; + this.innerKey = innerKey; + } + + public String name() { + return name; + } + + public String property() { + return property; + } + + public String innerKey() { + return innerKey; + } + + @Override + public String toString() { + String i = innerKey != null ? "[" + innerKey + "]" : ""; + return Aggs.ROOT_GROUP_NAME + ">" + name + "." + property + i; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java new file mode 100644 index 0000000000000..bca180315d9e5 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java @@ -0,0 +1,358 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.execution.search.FieldExtraction; +import org.elasticsearch.xpack.sql.execution.search.SourceGenerator; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.LiteralAttribute; +import org.elasticsearch.xpack.sql.expression.function.ScoreAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ScoreProcessorDefinition; +import org.elasticsearch.xpack.sql.querydsl.agg.Aggs; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByKey; +import org.elasticsearch.xpack.sql.querydsl.agg.LeafAgg; +import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef.Property; +import org.elasticsearch.xpack.sql.querydsl.query.BoolQuery; +import org.elasticsearch.xpack.sql.querydsl.query.MatchAll; +import org.elasticsearch.xpack.sql.querydsl.query.NestedQuery; +import org.elasticsearch.xpack.sql.querydsl.query.Query; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + +public class QueryContainer { + + private final Aggs aggs; + private final Query query; + + // final output seen by the client (hence the list or ordering) + // gets converted by the Scroller into Extractors for hits or actual results in case of aggregations + private final List columns; + + // aliases (maps an alias to its actual resolved attribute) + private final Map aliases; + + // pseudo functions (like count) - that are 'extracted' from other aggs + private final Map pseudoFunctions; + + // scalar function processors - recorded as functions get folded; + // at scrolling, their inputs (leaves) get updated + private final Map scalarFunctions; + + private final Set sort; + private final int limit; + + // computed + private final boolean aggsOnly; + + public QueryContainer() { + this(null, null, null, null, null, null, null, -1); + } + + public QueryContainer(Query query, Aggs aggs, List refs, Map aliases, + Map pseudoFunctions, + Map scalarFunctions, + Set sort, int limit) { + this.query = query; + this.aggs = aggs == null ? new Aggs() : aggs; + this.aliases = aliases == null || aliases.isEmpty() ? emptyMap() : aliases; + this.pseudoFunctions = pseudoFunctions == null || pseudoFunctions.isEmpty() ? emptyMap() : pseudoFunctions; + this.scalarFunctions = scalarFunctions == null || scalarFunctions.isEmpty() ? emptyMap() : scalarFunctions; + this.columns = refs == null || refs.isEmpty() ? emptyList() : refs; + this.sort = sort == null || sort.isEmpty() ? emptySet() : sort; + this.limit = limit; + aggsOnly = columns.stream().allMatch(FieldExtraction::supportedByAggsOnlyQuery); + } + + public Query query() { + return query; + } + + public Aggs aggs() { + return aggs; + } + + public List columns() { + return columns; + } + + public Map aliases() { + return aliases; + } + + public Map pseudoFunctions() { + return pseudoFunctions; + } + + public Set sort() { + return sort; + } + + public int limit() { + return limit; + } + + public boolean isAggsOnly() { + return aggsOnly; + } + + public boolean hasColumns() { + return !columns.isEmpty(); + } + + // + // copy methods + // + + public QueryContainer with(Query q) { + return new QueryContainer(q, aggs, columns, aliases, pseudoFunctions, scalarFunctions, sort, limit); + } + + public QueryContainer with(List r) { + return new QueryContainer(query, aggs, r, aliases, pseudoFunctions, scalarFunctions, sort, limit); + } + + public QueryContainer withAliases(Map a) { + return new QueryContainer(query, aggs, columns, a, pseudoFunctions, scalarFunctions, sort, limit); + } + + public QueryContainer withPseudoFunctions(Map p) { + return new QueryContainer(query, aggs, columns, aliases, p, scalarFunctions, sort, limit); + } + + public QueryContainer with(Aggs a) { + return new QueryContainer(query, a, columns, aliases, pseudoFunctions, scalarFunctions, sort, limit); + } + + public QueryContainer withLimit(int l) { + return l == limit ? this : new QueryContainer(query, aggs, columns, aliases, pseudoFunctions, scalarFunctions, sort, l); + } + + public QueryContainer withScalarProcessors(Map procs) { + return new QueryContainer(query, aggs, columns, aliases, pseudoFunctions, procs, sort, limit); + } + + public QueryContainer sort(Sort sortable) { + Set sort = new LinkedHashSet<>(this.sort); + sort.add(sortable); + return new QueryContainer(query, aggs, columns, aliases, pseudoFunctions, scalarFunctions, sort, limit); + } + + private String aliasName(Attribute attr) { + return aliases.getOrDefault(attr, attr).name(); + } + + // + // reference methods + // + private FieldExtraction topHitFieldRef(FieldAttribute fieldAttr) { + return new SearchHitFieldRef(aliasName(fieldAttr), fieldAttr.field().hasDocValues()); + } + + private Tuple nestedHitFieldRef(FieldAttribute attr) { + // Find the nested query for this field. If there isn't one then create it + List nestedRefs = new ArrayList<>(); + + String name = aliasName(attr); + Query q = rewriteToContainNestedField(query, attr.location(), + attr.nestedParent().name(), name, attr.field().hasDocValues()); + + SearchHitFieldRef nestedFieldRef = new SearchHitFieldRef(name, attr.field().hasDocValues(), attr.parent().name()); + nestedRefs.add(nestedFieldRef); + + return new Tuple<>(new QueryContainer(q, aggs, columns, aliases, pseudoFunctions, scalarFunctions, sort, limit), nestedFieldRef); + } + + static Query rewriteToContainNestedField(@Nullable Query query, Location location, String path, String name, boolean hasDocValues) { + if (query == null) { + /* There is no query so we must add the nested query + * ourselves to fetch the field. */ + return new NestedQuery(location, path, singletonMap(name, hasDocValues), new MatchAll(location)); + } + if (query.containsNestedField(path, name)) { + // The query already has the nested field. Nothing to do. + return query; + } + /* The query doesn't have the nested field so we have to ask + * it to add it. */ + Query rewritten = query.addNestedField(path, name, hasDocValues); + if (rewritten != query) { + /* It successfully added it so we can use the rewritten + * query. */ + return rewritten; + } + /* There is no nested query with a matching path so we must + * add the nested query ourselves just to fetch the field. */ + NestedQuery nested = new NestedQuery(location, path, singletonMap(name, hasDocValues), new MatchAll(location)); + return new BoolQuery(location, true, query, nested); + } + + // replace function's input with references + private Tuple computingRef(ScalarFunctionAttribute sfa) { + Attribute name = aliases.getOrDefault(sfa, sfa); + ProcessorDefinition proc = scalarFunctions.get(name); + + // check the attribute itself + if (proc == null) { + if (name instanceof ScalarFunctionAttribute) { + sfa = (ScalarFunctionAttribute) name; + } + proc = sfa.processorDef(); + } + + // find the processor inputs (Attributes) and convert them into references + // no need to promote them to the top since the container doesn't have to be aware + class QueryAttributeResolver implements ProcessorDefinition.AttributeResolver { + private QueryContainer container; + + private QueryAttributeResolver(QueryContainer container) { + this.container = container; + } + + @Override + public FieldExtraction resolve(Attribute attribute) { + Attribute attr = aliases.getOrDefault(attribute, attribute); + Tuple ref = container.toReference(attr); + container = ref.v1(); + return ref.v2(); + } + } + QueryAttributeResolver resolver = new QueryAttributeResolver(this); + proc = proc.resolveAttributes(resolver); + QueryContainer qContainer = resolver.container; + + // update proc + Map procs = new LinkedHashMap<>(qContainer.scalarFunctions()); + procs.put(name, proc); + qContainer = qContainer.withScalarProcessors(procs); + return new Tuple<>(qContainer, new ComputedRef(proc)); + } + + public QueryContainer addColumn(Attribute attr) { + Tuple tuple = toReference(attr); + return tuple.v1().addColumn(tuple.v2()); + } + + private Tuple toReference(Attribute attr) { + if (attr instanceof FieldAttribute) { + FieldAttribute fa = (FieldAttribute) attr; + if (fa.isNested()) { + return nestedHitFieldRef(fa); + } else { + return new Tuple<>(this, topHitFieldRef(fa)); + } + } + if (attr instanceof ScalarFunctionAttribute) { + return computingRef((ScalarFunctionAttribute) attr); + } + if (attr instanceof LiteralAttribute) { + return new Tuple<>(this, new ComputedRef(((LiteralAttribute) attr).asProcessorDefinition())); + } + if (attr instanceof ScoreAttribute) { + return new Tuple<>(this, new ComputedRef(new ScoreProcessorDefinition(attr.location(), attr))); + } + + throw new SqlIllegalArgumentException("Unknown output attribute {}", attr); + } + + public QueryContainer addColumn(FieldExtraction ref) { + return with(combine(columns, ref)); + } + + public Map scalarFunctions() { + return scalarFunctions; + } + + // + // agg methods + // + + public QueryContainer addAggCount(GroupByKey group, String functionId) { + FieldExtraction ref = group == null ? GlobalCountRef.INSTANCE : new GroupByRef(group.id(), Property.COUNT, null); + Map pseudoFunctions = new LinkedHashMap<>(this.pseudoFunctions); + pseudoFunctions.put(functionId, group); + return new QueryContainer(query, aggs, combine(columns, ref), aliases, pseudoFunctions, scalarFunctions, sort, limit); + } + + public QueryContainer addAgg(String groupId, LeafAgg agg) { + return with(aggs.addAgg(agg)); + } + + public QueryContainer addGroups(Collection values) { + return with(aggs.addGroups(values)); + } + + public GroupByKey findGroupForAgg(String aggId) { + return aggs.findGroupForAgg(aggId); + } + + public QueryContainer updateGroup(GroupByKey group) { + return with(aggs.updateGroup(group)); + } + + // + // boiler plate + // + + @Override + public int hashCode() { + return Objects.hash(query, aggs, columns, aliases); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + QueryContainer other = (QueryContainer) obj; + return Objects.equals(query, other.query) + && Objects.equals(aggs, other.aggs) + && Objects.equals(columns, other.columns) + && Objects.equals(aliases, other.aliases) + && Objects.equals(sort, other.sort) + && Objects.equals(limit, other.limit); + } + + @Override + public String toString() { + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.humanReadable(true).prettyPrint(); + SourceGenerator.sourceBuilder(this, null, null).toXContent(builder, ToXContent.EMPTY_PARAMS); + return Strings.toString(builder); + } catch (IOException e) { + throw new RuntimeException("error rendering", e); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScoreSort.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScoreSort.java new file mode 100644 index 0000000000000..c05864578d9c8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScoreSort.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import java.util.Objects; + +public class ScoreSort extends Sort { + public ScoreSort(Direction direction) { + super(direction); + } + + @Override + public int hashCode() { + return Objects.hash(direction()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ScriptSort other = (ScriptSort) obj; + return Objects.equals(direction(), other.direction()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScriptFieldRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScriptFieldRef.java new file mode 100644 index 0000000000000..02767bdea9e7a --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScriptFieldRef.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; + +public class ScriptFieldRef extends FieldReference { + + private final String name; + private final ScriptTemplate script; + + public ScriptFieldRef(String name, ScriptTemplate script) { + this.name = name; + this.script = script; + } + + @Override + public String name() { + return name; + } + + public ScriptTemplate script() { + return script; + } + + @Override + public void collectFields(SqlSourceBuilder sourceBuilder) { + sourceBuilder.addScriptField(name, script.toPainless()); + } + + @Override + public String toString() { + return "{" + name + "}"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScriptSort.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScriptSort.java new file mode 100644 index 0000000000000..62c3750f638e8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/ScriptSort.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import java.util.Objects; + +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; + +public class ScriptSort extends Sort { + + private final ScriptTemplate script; + + public ScriptSort(ScriptTemplate script, Direction direction) { + super(direction); + this.script = script; + } + + public ScriptTemplate script() { + return script; + } + + @Override + public int hashCode() { + return Objects.hash(direction(), script); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ScriptSort other = (ScriptSort) obj; + return Objects.equals(direction(), other.direction()) + && Objects.equals(script, other.script); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java new file mode 100644 index 0000000000000..6a7f24b447e55 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/SearchHitFieldRef.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; + +public class SearchHitFieldRef extends FieldReference { + private final String name; + private final boolean docValue; + private final String hitName; + + public SearchHitFieldRef(String name, boolean useDocValueInsteadOfSource) { + this(name, useDocValueInsteadOfSource, null); + } + + public SearchHitFieldRef(String name, boolean useDocValueInsteadOfSource, String hitName) { + this.name = name; + this.docValue = useDocValueInsteadOfSource; + this.hitName = hitName; + } + + public String hitName() { + return hitName; + } + + @Override + public String name() { + return name; + } + + public boolean useDocValue() { + return docValue; + } + + @Override + public void collectFields(SqlSourceBuilder sourceBuilder) { + // nested fields are handled by inner hits + if (hitName != null) { + return; + } + if (docValue) { + sourceBuilder.addDocField(name); + } else { + sourceBuilder.addSourceField(name); + } + } + + @Override + public String toString() { + return name; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/Sort.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/Sort.java new file mode 100644 index 0000000000000..07250b3a9ccd4 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/Sort.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xpack.sql.expression.Order.OrderDirection; + +public class Sort { + + public enum Direction { + ASC, DESC; + + public static Direction from(OrderDirection dir) { + return dir == null || dir == OrderDirection.ASC ? ASC : DESC; + } + + public SortOrder asOrder() { + return this == Direction.ASC ? SortOrder.ASC : SortOrder.DESC; + } + } + + private final Direction direction; + + protected Sort(Direction direction) { + this.direction = direction; + } + + public Direction direction() { + return direction; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java new file mode 100644 index 0000000000000..64949fe318ce6 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Objects; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; + +/** + * Query representing boolean AND or boolean OR. + */ +public class BoolQuery extends Query { + /** + * {@code true} for boolean {@code AND}, {@code false} for boolean {@code OR}. + */ + private final boolean isAnd; + private final Query left; + private final Query right; + + public BoolQuery(Location location, boolean isAnd, Query left, Query right) { + super(location); + if (left == null) { + throw new IllegalArgumentException("left is required"); + } + if (right == null) { + throw new IllegalArgumentException("right is required"); + } + this.isAnd = isAnd; + this.left = left; + this.right = right; + } + + @Override + public boolean containsNestedField(String path, String field) { + return left.containsNestedField(path, field) || right.containsNestedField(path, field); + } + + @Override + public Query addNestedField(String path, String field, boolean hasDocValues) { + Query rewrittenLeft = left.addNestedField(path, field, hasDocValues); + Query rewrittenRight = right.addNestedField(path, field, hasDocValues); + if (rewrittenLeft == left && rewrittenRight == right) { + return this; + } + return new BoolQuery(location(), isAnd, rewrittenLeft, rewrittenRight); + } + + @Override + public void enrichNestedSort(NestedSortBuilder sort) { + left.enrichNestedSort(sort); + right.enrichNestedSort(sort); + } + + @Override + public QueryBuilder asBuilder() { + BoolQueryBuilder boolQuery = boolQuery(); + if (isAnd) { + // TODO are we throwing out score by using filter? + boolQuery.filter(left.asBuilder()); + boolQuery.filter(right.asBuilder()); + } else { + boolQuery.should(left.asBuilder()); + boolQuery.should(right.asBuilder()); + } + return boolQuery; + } + + boolean isAnd() { + return isAnd; + } + + Query left() { + return left; + } + + Query right() { + return right; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), isAnd, left, right); + } + + @Override + public boolean equals(Object obj) { + if (false == super.equals(obj)) { + return false; + } + BoolQuery other = (BoolQuery) obj; + return isAnd == other.isAnd + && left.equals(other.left) + && right.equals(other.right); + } + + @Override + protected String innerToString() { + return left + (isAnd ? " AND " : " OR ") + right; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/ExistsQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/ExistsQuery.java new file mode 100644 index 0000000000000..aa0f39f1bec04 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/ExistsQuery.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.existsQuery; + +public class ExistsQuery extends LeafQuery { + + private final String name; + + public ExistsQuery(Location location, String name) { + super(location); + this.name = name; + } + + @Override + public QueryBuilder asBuilder() { + return existsQuery(name); + } + + @Override + protected String innerToString() { + return name; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQuery.java new file mode 100644 index 0000000000000..c66de04f550dd --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQuery.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +abstract class LeafQuery extends Query { + LeafQuery(Location location) { + super(location); + } + + @Override + public final boolean containsNestedField(String path, String field) { + // No leaf queries are nested + return false; + } + + @Override + public Query addNestedField(String path, String field, boolean hasDocValues) { + // No leaf queries are nested + return this; + } + + @Override + public void enrichNestedSort(NestedSortBuilder sort) { + // No leaf queries are nested + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchAll.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchAll.java new file mode 100644 index 0000000000000..af2801694d033 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchAll.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; + +public class MatchAll extends LeafQuery { + public MatchAll(Location location) { + super(location); + } + + @Override + public QueryBuilder asBuilder() { + return matchAllQuery(); + } + + @Override + protected String innerToString() { + return ""; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java new file mode 100644 index 0000000000000..292a8b9a8e341 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.common.Booleans; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.Operator; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; + +public class MatchQuery extends LeafQuery { + + private static final Map> BUILDER_APPLIERS; + + static { + HashMap> appliers = new HashMap<>(11); + // TODO: it'd be great if these could be constants instead of Strings, needs a core change to make the fields public first + // TODO: add zero terms query support, I'm not sure the best way to parse it yet... + // appliers.put("zero_terms_query", (qb, s) -> qb.zeroTermsQuery(s)); + appliers.put("cutoff_frequency", (qb, s) -> qb.cutoffFrequency(Float.valueOf(s))); + appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); + appliers.put("fuzzy_rewrite", (qb, s) -> qb.fuzzyRewrite(s)); + appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); + appliers.put("operator", (qb, s) -> qb.operator(Operator.fromString(s))); + appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); + appliers.put("prefix_length", (qb, s) -> qb.prefixLength(Integer.valueOf(s))); + appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); + BUILDER_APPLIERS = Collections.unmodifiableMap(appliers); + } + + private final String name; + private final Object text; + private final MatchQueryPredicate predicate; + private final Map options; + + + public MatchQuery(Location location, String name, Object text) { + this(location, name, text, null); + } + + public MatchQuery(Location location, String name, Object text, MatchQueryPredicate predicate) { + super(location); + this.name = name; + this.text = text; + this.predicate = predicate; + this.options = predicate == null ? Collections.emptyMap() : predicate.optionMap(); + } + + @Override + public QueryBuilder asBuilder() { + final MatchQueryBuilder queryBuilder = QueryBuilders.matchQuery(name, text); + options.forEach((k, v) -> { + if (BUILDER_APPLIERS.containsKey(k)) { + BUILDER_APPLIERS.get(k).accept(queryBuilder, v); + } else { + throw new IllegalArgumentException("illegal match option [" + k + "]"); + } + }); + return queryBuilder; + } + + public String name() { + return name; + } + + public Object text() { + return text; + } + + MatchQueryPredicate predicate() { + return predicate; + } + + @Override + public int hashCode() { + return Objects.hash(text, name, predicate); + } + + @Override + public boolean equals(Object obj) { + if (false == super.equals(obj)) { + return false; + } + + MatchQuery other = (MatchQuery) obj; + return Objects.equals(text, other.text) + && Objects.equals(name, other.name) + && Objects.equals(predicate, other.predicate); + } + + @Override + protected String innerToString() { + return name + ":" + text; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java new file mode 100644 index 0000000000000..81c990f85bdb4 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.common.Booleans; +import org.elasticsearch.index.query.Operator; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.MultiMatchQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; + +public class MultiMatchQuery extends LeafQuery { + + private static final Map> BUILDER_APPLIERS; + + static { + HashMap> appliers = new HashMap<>(14); + // TODO: it'd be great if these could be constants instead of Strings, needs a core change to make the fields public first + appliers.put("slop", (qb, s) -> qb.slop(Integer.valueOf(s))); + // TODO: add zero terms query support, I'm not sure the best way to parse it yet... + // appliers.put("zero_terms_query", (qb, s) -> qb.zeroTermsQuery(s)); + appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("cutoff_frequency", (qb, s) -> qb.cutoffFrequency(Float.valueOf(s))); + appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); + appliers.put("use_dis_max", (qb, s) -> qb.useDisMax(Booleans.parseBoolean(s))); + appliers.put("fuzzy_rewrite", (qb, s) -> qb.fuzzyRewrite(s)); + appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); + appliers.put("operator", (qb, s) -> qb.operator(Operator.fromString(s))); + appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); + appliers.put("prefix_length", (qb, s) -> qb.prefixLength(Integer.valueOf(s))); + appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("type", (qb, s) -> qb.type(s)); + appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); + appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); + BUILDER_APPLIERS = Collections.unmodifiableMap(appliers); + } + + private final String query; + private final Map fields; + private final Map options; + private final MultiMatchQueryPredicate predicate; + + public MultiMatchQuery(Location location, String query, Map fields, MultiMatchQueryPredicate predicate) { + super(location); + this.query = query; + this.fields = fields; + this.predicate = predicate; + this.options = predicate.optionMap(); + } + + @Override + public QueryBuilder asBuilder() { + final MultiMatchQueryBuilder queryBuilder = QueryBuilders.multiMatchQuery(query); + queryBuilder.fields(fields); + queryBuilder.analyzer(predicate.analyzer()); + options.forEach((k, v) -> { + if (BUILDER_APPLIERS.containsKey(k)) { + BUILDER_APPLIERS.get(k).accept(queryBuilder, v); + } else { + throw new IllegalArgumentException("illegal multi_match option [" + k + "]"); + } + }); + return queryBuilder; + } + + @Override + public int hashCode() { + return Objects.hash(query, fields, predicate); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + MultiMatchQuery other = (MultiMatchQuery) obj; + return Objects.equals(query, other.query) + && Objects.equals(fields, other.fields) + && Objects.equals(predicate, other.predicate); + } + + @Override + protected String innerToString() { + return fields + ":" + query; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQuery.java new file mode 100644 index 0000000000000..03af1433f30ce --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQuery.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; + +import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.index.query.InnerHitBuilder; +import org.elasticsearch.index.query.NestedQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.fetch.StoredFieldsContext; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.tree.Location; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableMap; + +import static org.elasticsearch.index.query.QueryBuilders.nestedQuery; + +/** + * A query to a nested document. + */ +public class NestedQuery extends Query { + // TODO: make this configurable + private static final int MAX_INNER_HITS = 99; + private static final List NO_STORED_FIELD = singletonList(StoredFieldsContext._NONE_); + + private final String path; + private final Map fields; + private final Query child; + + public NestedQuery(Location location, String path, Query child) { + this(location, path, emptyMap(), child); + } + + public NestedQuery(Location location, String path, Map fields, Query child) { + super(location); + if (path == null) { + throw new IllegalArgumentException("path is required"); + } + if (fields == null) { + throw new IllegalArgumentException("fields is required"); + } + if (child == null) { + throw new IllegalArgumentException("child is required"); + } + this.path = path; + this.fields = fields; + this.child = child; + } + + @Override + public boolean containsNestedField(String path, String field) { + boolean iContainThisField = this.path.equals(path) && fields.containsKey(field); + boolean myChildContainsThisField = child.containsNestedField(path, field); + return iContainThisField || myChildContainsThisField; + } + + @Override + public Query addNestedField(String path, String field, boolean hasDocValues) { + if (false == this.path.equals(path)) { + // I'm not at the right path so let my child query have a crack at it + Query rewrittenChild = child.addNestedField(path, field, hasDocValues); + if (rewrittenChild == child) { + return this; + } + return new NestedQuery(location(), path, fields, rewrittenChild); + } + if (fields.containsKey(field)) { + // I already have the field, no rewriting needed + return this; + } + Map newFields = new HashMap<>(fields.size() + 1); + newFields.putAll(fields); + newFields.put(field, hasDocValues); + return new NestedQuery(location(), path, unmodifiableMap(newFields), child); + } + + @Override + public void enrichNestedSort(NestedSortBuilder sort) { + child.enrichNestedSort(sort); + if (false == sort.getPath().equals(path)) { + return; + } + QueryBuilder childAsBuilder = child.asBuilder(); + if (sort.getFilter() != null && false == sort.getFilter().equals(childAsBuilder)) { + throw new SqlIllegalArgumentException("nested query should have been grouped in one place"); + } + sort.setFilter(childAsBuilder); + } + + @Override + public QueryBuilder asBuilder() { + // disable score + NestedQueryBuilder query = nestedQuery(path, child.asBuilder(), ScoreMode.None); + + if (!fields.isEmpty()) { + InnerHitBuilder ihb = new InnerHitBuilder(); + ihb.setSize(0); + ihb.setSize(MAX_INNER_HITS); + + boolean noSourceNeeded = true; + List sourceFields = new ArrayList<>(); + + for (Entry entry : fields.entrySet()) { + if (entry.getValue()) { + ihb.addDocValueField(entry.getKey()); + } + else { + sourceFields.add(entry.getKey()); + noSourceNeeded = false; + } + } + + if (noSourceNeeded) { + ihb.setFetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE); + ihb.setStoredFieldNames(NO_STORED_FIELD); + } + else { + ihb.setFetchSourceContext(new FetchSourceContext(true, sourceFields.toArray(new String[sourceFields.size()]), null)); + } + + query.innerHit(ihb); + } + + return query; + } + + String path() { + return path; + } + + Map fields() { + return fields; + } + + Query child() { + return child; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), path, fields, child); + } + + @Override + public boolean equals(Object obj) { + if (false == super.equals(obj)) { + return false; + } + NestedQuery other = (NestedQuery) obj; + return path.equals(other.path) + && fields.equals(other.fields) + && child.equals(other.child); + } + + @Override + protected String innerToString() { + return path + "." + fields + "[" + child + "]"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NotQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NotQuery.java new file mode 100644 index 0000000000000..b3d50b8149a43 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/NotQuery.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; + +import java.util.Objects; + +public class NotQuery extends Query { + private final Query child; + + public NotQuery(Location location, Query child) { + super(location); + if (child == null) { + throw new IllegalArgumentException("child is required"); + } + this.child = child; + } + + @Override + public boolean containsNestedField(String path, String field) { + return child.containsNestedField(path, field); + } + + @Override + public Query addNestedField(String path, String field, boolean hasDocValues) { + Query rewrittenChild = child.addNestedField(path, field, hasDocValues); + if (child == rewrittenChild) { + return this; + } + return new NotQuery(location(), child); + } + + @Override + public void enrichNestedSort(NestedSortBuilder sort) { + child.enrichNestedSort(sort); + } + + @Override + public QueryBuilder asBuilder() { + return boolQuery().mustNot(child.asBuilder()); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), child.hashCode()); + } + + @Override + public boolean equals(Object obj) { + if (false == super.equals(obj)) { + return false; + } + NotQuery other = (NotQuery) obj; + return child.equals(other.child); + } + + @Override + protected String innerToString() { + return child.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/Query.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/Query.java new file mode 100644 index 0000000000000..057454b6fd817 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/Query.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +/** + * Intermediate representation of queries that is rewritten to fetch + * otherwise unreferenced nested fields and then used to build + * Elasticsearch {@link QueryBuilder}s. + */ +public abstract class Query { + private final Location location; + + Query(Location location) { + if (location == null) { + throw new IllegalArgumentException("location must be specified"); + } + this.location = location; + } + + /** + * Location in the source statement. + */ + public Location location() { + return location; + } + + /** + * Does this query contain a particular nested field? + */ + public abstract boolean containsNestedField(String path, String field); + + /** + * Rewrite this query to one that contains the specified nested field. + *

+ * Used to make sure that we fetch nested fields even if they aren't + * explicitly part of the query. + * @return a new query if we could add the nested field, the same query + * instance otherwise + */ + public abstract Query addNestedField(String path, String field, boolean hasDocValues); + + /** + * Attach the one and only one matching nested query's filter to this + * sort. + */ + public abstract void enrichNestedSort(NestedSortBuilder sort); + + /** + * Convert to an Elasticsearch {@link QueryBuilder} all set up to execute + * the query. + */ + public abstract QueryBuilder asBuilder(); + + /** + * Used by {@link Query#toString()} to produce a pretty string. + */ + protected abstract String innerToString(); + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Query other = (Query) obj; + return location.equals(other.location); + } + + @Override + public int hashCode() { + return location.hashCode(); + } + + @Override + public String toString() { + return getClass().getSimpleName() + location + "[" + innerToString() + "]"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java new file mode 100644 index 0000000000000..aa8fb5b2dea60 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java @@ -0,0 +1,127 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.index.query.MultiMatchQueryBuilder; +import org.elasticsearch.index.query.Operator; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryStringQueryBuilder; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.StringQueryPredicate; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; + +public class QueryStringQuery extends LeafQuery { + + private static final Map> BUILDER_APPLIERS; + + static { + HashMap> appliers = new HashMap<>(28); + // TODO: it'd be great if these could be constants instead of Strings, needs a core change to make the fields public first + appliers.put("default_field", (qb, s) -> qb.defaultField(s)); + appliers.put("default_operator", (qb, s) -> qb.defaultOperator(Operator.fromString(s))); + appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("quote_analyzer", (qb, s) -> qb.quoteAnalyzer(s)); + appliers.put("allow_leading_wildcard", (qb, s) -> qb.allowLeadingWildcard(Booleans.parseBoolean(s))); + appliers.put("auto_generate_phrase_queries", (qb, s) -> qb.autoGeneratePhraseQueries(Booleans.parseBoolean(s))); + appliers.put("max_determinized_states", (qb, s) -> qb.maxDeterminizedStates(Integer.valueOf(s))); + appliers.put("lowercase_expanded_terms", (qb, s) -> {}); + appliers.put("enable_position_increments", (qb, s) -> qb.enablePositionIncrements(Booleans.parseBoolean(s))); + appliers.put("escape", (qb, s) -> qb.escape(Booleans.parseBoolean(s))); + appliers.put("use_dis_max", (qb, s) -> qb.useDisMax(Booleans.parseBoolean(s))); + appliers.put("fuzzy_prefix_length", (qb, s) -> qb.fuzzyPrefixLength(Integer.valueOf(s))); + appliers.put("fuzzy_max_expansions", (qb, s) -> qb.fuzzyMaxExpansions(Integer.valueOf(s))); + appliers.put("fuzzy_rewrite", (qb, s) -> qb.fuzzyRewrite(s)); + appliers.put("phrase_slop", (qb, s) -> qb.phraseSlop(Integer.valueOf(s))); + appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); + appliers.put("analyze_wildcard", (qb, s) -> qb.analyzeWildcard(Booleans.parseBoolean(s))); + appliers.put("rewrite", (qb, s) -> qb.rewrite(s)); + appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); + appliers.put("quote_field_suffix", (qb, s) -> qb.quoteFieldSuffix(s)); + appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("locale", (qb, s) -> {}); + appliers.put("time_zone", (qb, s) -> qb.timeZone(s)); + appliers.put("split_on_whitespace", (qb, s) -> qb.splitOnWhitespace(Booleans.parseBoolean(s))); + appliers.put("all_fields", (qb, s) -> qb.useAllFields(Booleans.parseBoolean(s))); + appliers.put("type", (qb, s) -> qb.type(MultiMatchQueryBuilder.Type.parse(s, LoggingDeprecationHandler.INSTANCE))); + appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); + appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); + BUILDER_APPLIERS = Collections.unmodifiableMap(appliers); + } + + private final String query; + private final Map fields; + private StringQueryPredicate predicate; + private final Map options; + + // dedicated constructor for QueryTranslator + public QueryStringQuery(Location location, String query, String fieldName) { + this(location, query, Collections.singletonMap(fieldName, Float.valueOf(1.0f)), null); + } + + public QueryStringQuery(Location location, String query, Map fields, StringQueryPredicate predicate) { + super(location); + this.query = query; + this.fields = fields; + this.predicate = predicate; + this.options = predicate == null ? Collections.emptyMap() : predicate.optionMap(); + } + + @Override + public QueryBuilder asBuilder() { + final QueryStringQueryBuilder queryBuilder = QueryBuilders.queryStringQuery(query); + queryBuilder.fields(fields); + options.forEach((k, v) -> { + if (BUILDER_APPLIERS.containsKey(k)) { + BUILDER_APPLIERS.get(k).accept(queryBuilder, v); + } else { + throw new IllegalArgumentException("illegal query_string option [" + k + "]"); + } + }); + return queryBuilder; + } + + public Map fields() { + return fields; + } + + public String query() { + return query; + } + + @Override + public int hashCode() { + return Objects.hash(query, fields, predicate); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + QueryStringQuery other = (QueryStringQuery) obj; + return Objects.equals(query, other.query) + && Objects.equals(fields, other.fields) + && Objects.equals(predicate, other.predicate); + } + + @Override + protected String innerToString() { + return fields + ":" + query; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/RangeQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/RangeQuery.java new file mode 100644 index 0000000000000..4402222e8986e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/RangeQuery.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Objects; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; + +public class RangeQuery extends LeafQuery { + + private final String field; + private final Object lower, upper; + private final boolean includeLower, includeUpper; + private final String format; + + public RangeQuery(Location location, String field, Object lower, boolean includeLower, Object upper, boolean includeUpper) { + this(location, field, lower, includeLower, upper, includeUpper, null); + } + + public RangeQuery(Location location, String field, Object lower, boolean includeLower, Object upper, + boolean includeUpper, String format) { + super(location); + this.field = field; + this.lower = lower; + this.upper = upper; + this.includeLower = includeLower; + this.includeUpper = includeUpper; + this.format = format; + } + + public String field() { + return field; + } + + public Object lower() { + return lower; + } + + public Object upper() { + return upper; + } + + public boolean includeLower() { + return includeLower; + } + + public boolean includeUpper() { + return includeUpper; + } + + public String format() { + return format; + } + + @Override + public QueryBuilder asBuilder() { + RangeQueryBuilder queryBuilder = rangeQuery(field).from(lower, includeLower).to(upper, includeUpper); + if (Strings.hasText(format)) { + queryBuilder.format(format); + } + + return queryBuilder; + } + + @Override + public int hashCode() { + return Objects.hash(field, lower, upper, includeLower, includeUpper, format); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + RangeQuery other = (RangeQuery) obj; + return Objects.equals(field, other.field) && + Objects.equals(includeLower, other.includeLower) && + Objects.equals(includeUpper, other.includeUpper) && + Objects.equals(lower, other.lower) && + Objects.equals(upper, other.upper) && + Objects.equals(format, other.format); + } + + @Override + protected String innerToString() { + return field + ":" + + (includeLower ? "[" : "(") + lower + ", " + + upper + (includeUpper ? "]" : ")"); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/RegexQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/RegexQuery.java new file mode 100644 index 0000000000000..bf3364388a977 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/RegexQuery.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Objects; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.regexpQuery; + +public class RegexQuery extends LeafQuery { + + private final String field, regex; + + public RegexQuery(Location location, String field, String regex) { + super(location); + this.field = field; + this.regex = regex; + } + + public String field() { + return field; + } + + public String regex() { + return regex; + } + + @Override + public QueryBuilder asBuilder() { + return regexpQuery(field, regex); + } + + @Override + public int hashCode() { + return Objects.hash(field, regex); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + RegexQuery other = (RegexQuery) obj; + return Objects.equals(field, other.field) + && Objects.equals(regex, other.regex); + } + + @Override + protected String innerToString() { + return field + "~ /" + regex + "/"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/ScriptQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/ScriptQuery.java new file mode 100644 index 0000000000000..b918fd71a5850 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/ScriptQuery.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Objects; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.scriptQuery; + +public class ScriptQuery extends LeafQuery { + + private final ScriptTemplate script; + + public ScriptQuery(Location location, ScriptTemplate script) { + super(location); + this.script = script; + } + + public ScriptTemplate script() { + return script; + } + + @Override + public QueryBuilder asBuilder() { + return scriptQuery(script.toPainless()); + } + + @Override + public int hashCode() { + return Objects.hash(script); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ScriptQuery other = (ScriptQuery) obj; + return Objects.equals(script, other.script); + } + + @Override + protected String innerToString() { + return script.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermQuery.java new file mode 100644 index 0000000000000..af14272dff5b1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/TermQuery.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Objects; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.termQuery; + +public class TermQuery extends LeafQuery { + + private final String term; + private final Object value; + + public TermQuery(Location location, String term, Object value) { + super(location); + this.term = term; + this.value = value; + } + + public String term() { + return term; + } + + public Object value() { + return value; + } + + @Override + public QueryBuilder asBuilder() { + return termQuery(term, value); + } + + @Override + public int hashCode() { + return Objects.hash(term, value); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + TermQuery other = (TermQuery) obj; + return Objects.equals(term, other.term) + && Objects.equals(value, other.value); + } + + @Override + protected String innerToString() { + return term + ":" + value; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/WildcardQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/WildcardQuery.java new file mode 100644 index 0000000000000..6c252450c6f07 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/WildcardQuery.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import java.util.Objects; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; + +public class WildcardQuery extends LeafQuery { + + private final String field, query; + + public WildcardQuery(Location location, String field, String query) { + super(location); + this.field = field; + this.query = query; + } + + public String field() { + return field; + } + + public String query() { + return query; + } + + @Override + public QueryBuilder asBuilder() { + return wildcardQuery(field, query); + } + + @Override + public int hashCode() { + return Objects.hash(field, query); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + WildcardQuery other = (WildcardQuery) obj; + return Objects.equals(field, other.field) + && Objects.equals(query, other.query); + } + + @Override + protected String innerToString() { + return field + ":" + query; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/Rule.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/Rule.java new file mode 100644 index 0000000000000..d63357e151aed --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/Rule.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.rule; + +import java.util.function.UnaryOperator; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.util.ReflectionUtils; + +/** + * Rules that apply transformation to a tree. In addition, performs + * type filtering so that a rule that the rule implementation doesn't + * have to manually filter. + *

+ * Rules could could be built as lambdas but most + * rules are much larger so we keep them as full blown subclasses. + */ +public abstract class Rule> implements UnaryOperator { + + protected Logger log = Loggers.getLogger(getClass()); + + private final String name; + private final Class typeToken = ReflectionUtils.detectSuperTypeForRuleLike(getClass()); + + protected Rule() { + this(null); + } + + protected Rule(String name) { + this.name = (name == null ? ReflectionUtils.ruleLikeNaming(getClass()) : name); + } + + public Class typeToken() { + return typeToken; + } + + public String name() { + return name; + } + + protected abstract T rule(E e); + + @Override + public String toString() { + return name(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutionException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutionException.java new file mode 100644 index 0000000000000..ab5bf8ec4ce2d --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutionException.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.rule; + +import org.elasticsearch.xpack.sql.ServerSqlException; + +public class RuleExecutionException extends ServerSqlException { + + public RuleExecutionException(String message, Object... args) { + super(message, args); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java new file mode 100644 index 0000000000000..2936e6342add9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/rule/RuleExecutor.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.rule; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.sql.tree.Node; +import org.elasticsearch.xpack.sql.tree.NodeUtils; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +public abstract class RuleExecutor> { + + private final Logger log = Loggers.getLogger(getClass()); + + public static class Limiter { + public static final Limiter DEFAULT = new Limiter(100); + public static final Limiter ONCE = new Limiter(1) { + + @Override + boolean reached(int runs) { + return runs >= 1; + } + }; + + private final int runs; + + public Limiter(int maximumRuns) { + this.runs = maximumRuns; + } + + boolean reached(int runs) { + if (runs >= this.runs) { + throw new RuleExecutionException("Rule execution limit %d reached", runs); + } + return false; + } + } + + public class Batch { + private final String name; + private final Rule[] rules; + private final Limiter limit; + + @SafeVarargs + @SuppressWarnings("varargs") + public Batch(String name, Limiter limit, Rule... rules) { + this.name = name; + this.limit = limit; + this.rules = rules; + } + + @SafeVarargs + public Batch(String name, Rule... rules) { + this(name, Limiter.DEFAULT, rules); + } + + public String name() { + return name; + } + } + + private final Iterable batches = batches(); + + protected abstract Iterable.Batch> batches(); + + public class Transformation { + private final TreeType before, after; + private final Rule rule; + private Boolean lazyHasChanged; + + Transformation(TreeType plan, Rule rule) { + this.rule = rule; + before = plan; + after = rule.apply(before); + } + + public boolean hasChanged() { + if (lazyHasChanged == null) { + lazyHasChanged = !before.equals(after); + } + return lazyHasChanged; + } + + public String ruleName() { + return rule.name(); + } + + public TreeType before() { + return before; + } + + public TreeType after() { + return after; + } + } + + public class ExecutionInfo { + + private final TreeType before, after; + private final Map> transformations; + + ExecutionInfo(TreeType before, TreeType after, Map> transformations) { + this.before = before; + this.after = after; + this.transformations = transformations; + } + + public TreeType before() { + return before; + } + + public TreeType after() { + return after; + } + + public Map> transformations() { + return transformations; + } + } + + protected TreeType execute(TreeType plan) { + return executeWithInfo(plan).after; + } + + protected ExecutionInfo executeWithInfo(TreeType plan) { + TreeType currentPlan = plan; + + long totalDuration = 0; + + Map> transformations = new LinkedHashMap<>(); + + for (Batch batch : batches) { + int batchRuns = 0; + List tfs = new ArrayList(); + transformations.put(batch, tfs); + + boolean hasChanged = false; + long batchStart = System.currentTimeMillis(); + long batchDuration = 0; + + // run each batch until no change occurs or the limit is reached + do { + hasChanged = false; + batchRuns++; + + for (Rule rule : batch.rules) { + Transformation tf = new Transformation(currentPlan, rule); + tfs.add(tf); + currentPlan = tf.after; + + if (tf.hasChanged()) { + hasChanged = true; + if (log.isTraceEnabled()) { + log.trace("Rule {} applied\n{}", rule, NodeUtils.diffString(tf.before, tf.after)); + } + } + else { + if (log.isTraceEnabled()) { + log.trace("Rule {} applied w/o changes", rule); + } + } + } + batchDuration = System.currentTimeMillis() - batchStart; + } while (hasChanged && !batch.limit.reached(batchRuns)); + + totalDuration += batchDuration; + + if (log.isTraceEnabled()) { + TreeType before = plan; + TreeType after = plan; + if (!tfs.isEmpty()) { + before = tfs.get(0).before; + after = tfs.get(tfs.size() - 1).after; + } + log.trace("Batch {} applied took {}\n{}", + batch.name, TimeValue.timeValueMillis(batchDuration), NodeUtils.diffString(before, after)); + } + } + + if (false == currentPlan.equals(plan) && log.isDebugEnabled()) { + log.debug("Tree transformation took {}\n{}", + TimeValue.timeValueMillis(totalDuration), NodeUtils.diffString(plan, currentPlan)); + } + + return new ExecutionInfo(plan, currentPlan, transformations); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/AbstractRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/AbstractRowSet.java new file mode 100644 index 0000000000000..6420953647d8f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/AbstractRowSet.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.xpack.sql.util.Check; + +public abstract class AbstractRowSet implements RowSet { + private boolean terminated = false; + + @Override + public Object column(int index) { + Check.isTrue(index >= 0, "Invalid index {}; needs to be positive", index); + Check.isTrue(index < columnCount(), "Invalid index {} for row of size {}", index, columnCount()); + Check.isTrue(hasCurrentRow(), "RowSet contains no (more) entries; use hasCurrent() to check its status"); + return getColumn(index); + } + + protected abstract Object getColumn(int column); + + @Override + public boolean hasCurrentRow() { + return terminated ? false : doHasCurrent(); + } + + @Override + public boolean advanceRow() { + if (terminated) { + return false; + } + if (!doNext()) { + terminated = true; + return false; + } + return true; + } + + protected abstract boolean doHasCurrent(); + + protected abstract boolean doNext(); + + @Override + public void reset() { + terminated = false; + doReset(); + } + + protected abstract void doReset(); + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + if (hasCurrentRow()) { + for (int column = 0; column < columnCount(); column++) { + if (column > 0) { + sb.append("|"); + } + + String val = String.valueOf(getColumn(column)); + // the value might contain multiple lines (plan execution for example) + // TODO: this needs to be improved to properly scale each row across multiple lines + String[] split = val.split("\\n"); + + for (int splitIndex = 0; splitIndex < split.length; splitIndex++) { + if (splitIndex > 0) { + sb.append("\n"); + } + String string = split[splitIndex]; + sb.append(string); + } + } + sb.append("\n"); + } + + return sb.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java new file mode 100644 index 0000000000000..681a5eb1fbd24 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest; + +import java.util.TimeZone; + +// Typed object holding properties for a given action +public class Configuration { + public static final Configuration DEFAULT = new Configuration(TimeZone.getTimeZone("UTC"), + AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE, + AbstractSqlQueryRequest.DEFAULT_REQUEST_TIMEOUT, + AbstractSqlQueryRequest.DEFAULT_PAGE_TIMEOUT, + null); + + private TimeZone timeZone; + private int pageSize; + private TimeValue requestTimeout; + private TimeValue pageTimeout; + + @Nullable + private QueryBuilder filter; + + public Configuration(TimeZone tz, int pageSize, TimeValue requestTimeout, TimeValue pageTimeout, QueryBuilder filter) { + this.timeZone = tz; + this.pageSize = pageSize; + this.requestTimeout = requestTimeout; + this.pageTimeout = pageTimeout; + this.filter = filter; + } + + public TimeZone timeZone() { + return timeZone; + } + + public int pageSize() { + return pageSize; + } + + public TimeValue requestTimeout() { + return requestTimeout; + } + + public TimeValue pageTimeout() { + return pageTimeout; + } + + public QueryBuilder filter() { + return filter; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursor.java new file mode 100644 index 0000000000000..ccb4a7cdc40d2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursor.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; + +/** + * Information required to access the next page of response. + */ +public interface Cursor extends NamedWriteable { + Cursor EMPTY = EmptyCursor.INSTANCE; + + /** + * Request the next page of data. + */ + void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener); + + /** + * Cleans the resources associated with the cursor + */ + void clear(Configuration cfg, Client client, ActionListener listener); +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java new file mode 100644 index 0000000000000..2ecf8d6e115f1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.execution.search.CompositeAggregationCursor; +import org.elasticsearch.xpack.sql.execution.search.ScrollCursor; +import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractors; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractors; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; + +import java.io.ByteArrayOutputStream; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; + +/** + * Registry and utilities around {@link Cursor}s. + */ +public final class Cursors { + + private static final NamedWriteableRegistry WRITEABLE_REGISTRY = new NamedWriteableRegistry(getNamedWriteables()); + + private Cursors() {}; + + /** + * The {@link NamedWriteable}s required to deserialize {@link Cursor}s. + */ + public static List getNamedWriteables() { + List entries = new ArrayList<>(); + + // cursors + entries.add(new NamedWriteableRegistry.Entry(Cursor.class, EmptyCursor.NAME, in -> Cursor.EMPTY)); + entries.add(new NamedWriteableRegistry.Entry(Cursor.class, ScrollCursor.NAME, ScrollCursor::new)); + entries.add(new NamedWriteableRegistry.Entry(Cursor.class, CompositeAggregationCursor.NAME, CompositeAggregationCursor::new)); + entries.add(new NamedWriteableRegistry.Entry(Cursor.class, CliFormatterCursor.NAME, CliFormatterCursor::new)); + + // plus all their dependencies + entries.addAll(Processors.getNamedWriteables()); + entries.addAll(HitExtractors.getNamedWriteables()); + entries.addAll(BucketExtractors.getNamedWriteables()); + + return entries; + } + + /** + * Write a {@linkplain Cursor} to a string for serialization across xcontent. + */ + public static String encodeToString(Version version, Cursor info) { + if (info == Cursor.EMPTY) { + return ""; + } + try (ByteArrayOutputStream os = new ByteArrayOutputStream()) { + try (OutputStream base64 = Base64.getEncoder().wrap(os); StreamOutput out = new OutputStreamStreamOutput(base64)) { + Version.writeVersion(version, out); + out.writeNamedWriteable(info); + } + return os.toString(StandardCharsets.UTF_8.name()); + } catch (Exception ex) { + throw new SqlIllegalArgumentException("Unexpected failure retriving next page", ex); + } + } + + + /** + * Read a {@linkplain Cursor} from a string. + */ + public static Cursor decodeFromString(String info) { + if (info.isEmpty()) { + return Cursor.EMPTY; + } + byte[] bytes = info.getBytes(StandardCharsets.UTF_8); + try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(Base64.getDecoder().decode(bytes)), WRITEABLE_REGISTRY)) { + Version version = Version.readVersion(in); + if (version.after(Version.CURRENT)) { + throw new SqlIllegalArgumentException("Unsupported cursor version " + version); + } + in.setVersion(version); + return in.readNamedWriteable(Cursor.class); + } catch (SqlIllegalArgumentException ex) { + throw ex; + } catch (Exception ex) { + throw new SqlIllegalArgumentException("Unexpected failure decoding cursor", ex); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyCursor.java new file mode 100644 index 0000000000000..fd9c63438a878 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyCursor.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +class EmptyCursor implements Cursor { + static final String NAME = "0"; + static final EmptyCursor INSTANCE = new EmptyCursor(); + + private EmptyCursor() { + // Only one instance allowed + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // Nothing to write + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void nextPage(Configuration cfg, Client client, NamedWriteableRegistry registry, ActionListener listener) { + throw new IllegalArgumentException("there is no next page"); + } + + @Override + public void clear(Configuration cfg, Client client, ActionListener listener) { + // There is nothing to clean + listener.onResponse(false); + } + + @Override + public boolean equals(Object obj) { + return obj == this; + } + + @Override + public int hashCode() { + return 27; + } + + @Override + public String toString() { + return "no next page"; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyExecutable.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyExecutable.java new file mode 100644 index 0000000000000..09e0d3ac2a300 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyExecutable.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; + +import java.util.List; +import java.util.Objects; + +public class EmptyExecutable implements Executable { + + private final List output; + + public EmptyExecutable(List output) { + this.output = output; + } + + @Override + public List output() { + return output; + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + listener.onResponse(Rows.empty(output)); + } + + @Override + public int hashCode() { + return output.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + EmptyExecutable other = (EmptyExecutable) obj; + return Objects.equals(output, other.output); + } + + @Override + public String toString() { + return output.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyRowSetCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyRowSetCursor.java new file mode 100644 index 0000000000000..7e943931e910c --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/EmptyRowSetCursor.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.xpack.sql.type.Schema; + +class EmptyRowSetCursor extends AbstractRowSet implements SchemaRowSet { + private final Schema schema; + + EmptyRowSetCursor(Schema schema) { + this.schema = schema; + } + + @Override + protected boolean doHasCurrent() { + return false; + } + + @Override + protected boolean doNext() { + return false; + } + + @Override + protected Object getColumn(int index) { + throw new UnsupportedOperationException(); + } + + @Override + protected void doReset() { + // no-op + } + + @Override + public int size() { + return 0; + } + + @Override + public Cursor nextPageCursor() { + return Cursor.EMPTY; + } + + @Override + public Schema schema() { + return schema; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Executable.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Executable.java new file mode 100644 index 0000000000000..dbc163170291e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Executable.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import java.util.List; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; + +public interface Executable { + + List output(); + + void execute(SqlSession session, ActionListener listener); +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListRowSetCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListRowSetCursor.java new file mode 100644 index 0000000000000..39987d21ac6b9 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/ListRowSetCursor.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.xpack.sql.type.Schema; + +import java.util.List; + +class ListRowSetCursor extends AbstractRowSet implements SchemaRowSet { + + private final Schema schema; + private final List> list; + private int pos = 0; + + ListRowSetCursor(Schema schema, List> list) { + this.schema = schema; + this.list = list; + } + + @Override + protected boolean doHasCurrent() { + return pos < list.size(); + } + + @Override + protected boolean doNext() { + if (pos + 1 < list.size()) { + pos++; + return true; + } + return false; + } + + @Override + protected Object getColumn(int index) { + return list.get(pos).get(index); + } + + @Override + protected void doReset() { + pos = 0; + } + + @Override + public int size() { + return list.size(); + } + + @Override + public Cursor nextPageCursor() { + return Cursor.EMPTY; + } + + @Override + public Schema schema() { + return schema; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowSet.java new file mode 100644 index 0000000000000..38a22ff73f145 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowSet.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import java.util.function.Consumer; + +/** + * A set of rows to be returned at one time and a way + * to get the next set of rows. + */ +public interface RowSet extends RowView { + + boolean hasCurrentRow(); + + boolean advanceRow(); + + // number or rows in this set; while not really necessary (the return of advanceRow works) + int size(); + + void reset(); + + /** + * The key used by PlanExecutor#nextPage to fetch the next page. + */ + Cursor nextPageCursor(); + + default void forEachRow(Consumer action) { + for (boolean hasRows = hasCurrentRow(); hasRows; hasRows = advanceRow()) { + action.accept(this); + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowView.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowView.java new file mode 100644 index 0000000000000..c37b018d524c7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/RowView.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.function.Consumer; + +/** + * A view into a row. + * Offers access to the data but it shouldn't be held since it is not a data container. + */ +public interface RowView extends Iterable { + /** + * Number of columns in this row. + */ + int columnCount(); + + Object column(int index); + + default T column(int index, Class type) { + return type.cast(column(index)); + } + + @Override + default void forEach(Consumer action) { + forEachColumn(action::accept); + } + + default void forEachColumn(Consumer action) { + Objects.requireNonNull(action); + int rowSize = columnCount(); + for (int i = 0; i < rowSize; i++) { + action.accept(column(i)); + } + } + + @Override + default Iterator iterator() { + return new Iterator() { + private int pos = 0; + private final int rowSize = columnCount(); + + @Override + public boolean hasNext() { + return pos < rowSize; + } + + @Override + public Object next() { + if (pos >= rowSize) { + throw new NoSuchElementException(); + } + return column(pos++); + } + }; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Rows.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Rows.java new file mode 100644 index 0000000000000..00b261b4be71a --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Rows.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.Schema; +import org.elasticsearch.xpack.sql.util.Check; + +import java.util.ArrayList; +import java.util.List; + +public abstract class Rows { + + public static Schema schema(List attr) { + List names = new ArrayList<>(attr.size()); + List types = new ArrayList<>(attr.size()); + + for (Attribute a : attr) { + names.add(a.name()); + types.add(a.dataType()); + } + return new Schema(names, types); + } + + public static SchemaRowSet of(List attrs, List> values) { + if (values.isEmpty()) { + return empty(attrs); + } + + if (values.size() == 1) { + return singleton(attrs, values.get(0).toArray()); + } + + Schema schema = schema(attrs); + return new ListRowSetCursor(schema, values); + } + + public static SchemaRowSet singleton(List attrs, Object... values) { + return singleton(schema(attrs), values); + } + + public static SchemaRowSet singleton(Schema schema, Object... values) { + Check.isTrue(schema.size() == values.length, "Schema {} and values {} are out of sync", schema, values); + return new SingletonRowSet(schema, values); + } + + public static SchemaRowSet empty(Schema schema) { + return new EmptyRowSetCursor(schema); + } + + public static SchemaRowSet empty(List attrs) { + return new EmptyRowSetCursor(schema(attrs)); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SchemaRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SchemaRowSet.java new file mode 100644 index 0000000000000..88c89b4054328 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SchemaRowSet.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.xpack.sql.type.Schema; + +/** + * A {@linkplain RowSet} with the {@link Schema} for the results + * attached. + */ +public interface SchemaRowSet extends RowSet { + /** + * Schema for the results. + */ + Schema schema(); + + @Override + default int columnCount() { + return schema().names().size(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java new file mode 100644 index 0000000000000..129c30a0df3b8 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonExecutable.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.util.Check; + +import java.util.List; + +public class SingletonExecutable implements Executable { + + private final List output; + private final Object[] values; + + public SingletonExecutable(List output, Object... values) { + Check.isTrue(output.size() == values.length, "Attributes {} and values {} are out of sync", output, values); + this.output = output; + this.values = values; + } + + @Override + public List output() { + return output; + } + + @Override + public void execute(SqlSession session, ActionListener listener) { + listener.onResponse(Rows.singleton(output, values)); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < values.length; i++) { + sb.append(output.get(i)); + sb.append("="); + sb.append(values[i]); + } + return sb.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonRowSet.java new file mode 100644 index 0000000000000..c8a4e5eddfb6e --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SingletonRowSet.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.xpack.sql.type.Schema; + +//TODO is it worth keeping this when we have ListRowSet? +class SingletonRowSet extends AbstractRowSet implements SchemaRowSet { + + private final Schema schema; + private final Object[] values; + + SingletonRowSet(Schema schema, Object[] values) { + this.schema = schema; + this.values = values; + } + + @Override + protected boolean doHasCurrent() { + return true; + } + + @Override + protected boolean doNext() { + return false; + } + + @Override + protected Object getColumn(int index) { + return values[index]; + } + + @Override + protected void doReset() { + // no-op + } + + @Override + public int size() { + return 1; + } + + @Override + public Cursor nextPageCursor() { + return Cursor.EMPTY; + } + + @Override + public Schema schema() { + return schema; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java new file mode 100644 index 0000000000000..880e98c606408 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.session; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.analyzer.PreAnalyzer; +import org.elasticsearch.xpack.sql.analysis.analyzer.PreAnalyzer.PreAnalysis; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.analysis.index.MappingException; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.optimizer.Optimizer; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.sql.planner.Planner; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.rule.RuleExecutor; + +import java.util.List; +import java.util.function.Function; + +import static org.elasticsearch.action.ActionListener.wrap; + +public class SqlSession { + + private final Client client; + + private final FunctionRegistry functionRegistry; + private final IndexResolver indexResolver; + private final PreAnalyzer preAnalyzer; + private final Optimizer optimizer; + private final Planner planner; + + // TODO rename to `configuration` + private final Configuration settings; + + public SqlSession(SqlSession other) { + this(other.settings, other.client, other.functionRegistry, other.indexResolver, + other.preAnalyzer, other.optimizer,other.planner); + } + + public SqlSession(Configuration settings, Client client, FunctionRegistry functionRegistry, + IndexResolver indexResolver, + PreAnalyzer preAnalyzer, + Optimizer optimizer, + Planner planner) { + this.client = client; + this.functionRegistry = functionRegistry; + + this.indexResolver = indexResolver; + this.preAnalyzer = preAnalyzer; + this.optimizer = optimizer; + this.planner = planner; + + this.settings = settings; + } + + public FunctionRegistry functionRegistry() { + return functionRegistry; + } + + public Client client() { + return client; + } + + public Planner planner() { + return planner; + } + + public IndexResolver indexResolver() { + return indexResolver; + } + + public Optimizer optimizer() { + return optimizer; + } + + private LogicalPlan doParse(String sql, List params) { + return new SqlParser().createStatement(sql, params); + } + + public void analyzedPlan(LogicalPlan parsed, boolean verify, ActionListener listener) { + if (parsed.analyzed()) { + listener.onResponse(parsed); + return; + } + + preAnalyze(parsed, c -> { + Analyzer analyzer = new Analyzer(functionRegistry, c, settings.timeZone()); + LogicalPlan p = analyzer.analyze(parsed); + return verify ? analyzer.verify(p) : p; + }, listener); + } + + public void debugAnalyzedPlan(LogicalPlan parsed, ActionListener.ExecutionInfo> listener) { + if (parsed.analyzed()) { + listener.onResponse(null); + return; + } + + preAnalyze(parsed, r -> { + Analyzer analyzer = new Analyzer(functionRegistry, r, settings.timeZone()); + return analyzer.debugAnalyze(parsed); + }, listener); + } + + private void preAnalyze(LogicalPlan parsed, Function action, ActionListener listener) { + PreAnalysis preAnalysis = preAnalyzer.preAnalyze(parsed); + // TODO we plan to support joins in the future when possible, but for now we'll just fail early if we see one + if (preAnalysis.indices.size() > 1) { + // Note: JOINs are not supported but we detect them when + listener.onFailure(new MappingException("Queries with multiple indices are not supported")); + } else if (preAnalysis.indices.size() == 1) { + TableIdentifier table = preAnalysis.indices.get(0); + + String cluster = table.cluster(); + + if (Strings.hasText(cluster) && !indexResolver.clusterName().equals(cluster)) { + listener.onFailure(new MappingException("Cannot inspect indices in cluster/catalog [{}]", cluster)); + } + + indexResolver.resolveWithSameMapping(table.index(), null, + wrap(indexResult -> listener.onResponse(action.apply(indexResult)), listener::onFailure)); + } else { + try { + // occurs when dealing with local relations (SELECT 5+2) + listener.onResponse(action.apply(IndexResolution.invalid("[none specified]"))); + } catch (Exception ex) { + listener.onFailure(ex); + } + } + } + + public void optimizedPlan(LogicalPlan verified, ActionListener listener) { + analyzedPlan(verified, true, wrap(v -> listener.onResponse(optimizer.optimize(v)), listener::onFailure)); + } + + public void physicalPlan(LogicalPlan optimized, boolean verify, ActionListener listener) { + optimizedPlan(optimized, wrap(o -> listener.onResponse(planner.plan(o, verify)), listener::onFailure)); + } + + public void sql(String sql, List params, ActionListener listener) { + sqlExecutable(sql, params, wrap(e -> e.execute(this, listener), listener::onFailure)); + } + + public void sqlExecutable(String sql, List params, ActionListener listener) { + try { + physicalPlan(doParse(sql, params), true, listener); + } catch (Exception ex) { + listener.onFailure(ex); + } + } + + public Configuration settings() { + return settings; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/Location.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/Location.java new file mode 100644 index 0000000000000..6a5543a69db70 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/Location.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.tree; + +import java.util.Objects; + +public final class Location { + private final int line; + private final int charPositionInLine; + + public static final Location EMPTY = new Location(-1, -2); + + public Location(int line, int charPositionInLine) { + this.line = line; + this.charPositionInLine = charPositionInLine; + } + + public int getLineNumber() { + return line; + } + + public int getColumnNumber() { + return charPositionInLine + 1; + } + + @Override + public String toString() { + return "@" + getLineNumber() + ":" + getColumnNumber(); + } + + @Override + public int hashCode() { + return Objects.hash(line, charPositionInLine); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Location other = (Location) obj; + return line == other.line + && charPositionInLine == other.charPositionInLine; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/Node.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/Node.java new file mode 100644 index 0000000000000..c0d885c6dcc4f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/Node.java @@ -0,0 +1,386 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.tree; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; +import java.util.Objects; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Predicate; + +import static java.util.Collections.emptyList; + +/** + * Immutable tree structure. + * The traversal is done depth-first, pre-order (first the node then its children), that is seeks up and then goes down. + * Alternative method for post-order (children first, then node) is also offered, that is seeks down and then goes up. + * + * Allows transformation which returns the same tree (if no change has been performed) or a new tree otherwise. + * + * While it tries as much as possible to use functional Java, due to lack of parallelism, + * the use of streams and iterators is not really useful and brings too much baggage which + * might be used incorrectly. + * + * @param node type + */ +public abstract class Node> { + private static final int TO_STRING_MAX_PROP = 10; + private static final int TO_STRING_MAX_WIDTH = 110; + + private final Location location; + private final List children; + + public Node(Location location, List children) { + this.location = (location != null ? location : Location.EMPTY); + this.children = children; + } + + public Location location() { + return location; + } + + public List children() { + return children; + } + + @SuppressWarnings("unchecked") + public void forEachDown(Consumer action) { + action.accept((T) this); + children().forEach(c -> c.forEachDown(action)); + } + + @SuppressWarnings("unchecked") + public void forEachDown(Consumer action, final Class typeToken) { + forEachDown(t -> { + if (typeToken.isInstance(t)) { + action.accept((E) t); + } + }); + } + + @SuppressWarnings("unchecked") + public void forEachUp(Consumer action) { + children().forEach(c -> c.forEachUp(action)); + action.accept((T) this); + } + + @SuppressWarnings("unchecked") + public void forEachUp(Consumer action, final Class typeToken) { + forEachUp(t -> { + if (typeToken.isInstance(t)) { + action.accept((E) t); + } + }); + } + + public void forEachPropertiesOnly(Consumer rule, Class typeToken) { + forEachProperty(rule, typeToken); + } + + public void forEachPropertiesDown(Consumer rule, Class typeToken) { + forEachDown(e -> e.forEachProperty(rule, typeToken)); + } + + public void forEachPropertiesUp(Consumer rule, Class typeToken) { + forEachUp(e -> e.forEachProperty(rule, typeToken)); + } + + @SuppressWarnings("unchecked") + protected void forEachProperty(Consumer rule, Class typeToken) { + for (Object prop : info().properties()) { + // skip children (only properties are interesting) + if (prop != children && !children.contains(prop) && typeToken.isInstance(prop)) { + rule.accept((E) prop); + } + } + } + + @SuppressWarnings("unchecked") + public boolean anyMatch(Predicate predicate) { + boolean result = predicate.test((T) this); + if (!result) { + for (T child : children) { + if (child.anyMatch(predicate)) { + return true; + } + } + } + return result; + } + + public List collect(Predicate predicate) { + List l = new ArrayList<>(); + forEachDown(n -> { + if (predicate.test(n)) { + l.add(n); + } + }); + return l.isEmpty() ? emptyList() : l; + } + + public List collectLeaves() { + return collect(n -> n.children().isEmpty()); + } + + // parse the list in pre-order and on match, skip the child/branch and move on to the next child/branch + public List collectFirstChildren(Predicate predicate) { + List matches = new ArrayList<>(); + doCollectFirst(predicate, matches); + return matches; + } + + @SuppressWarnings("unchecked") + protected void doCollectFirst(Predicate predicate, List matches) { + T t = (T) this; + if (predicate.test(t)) { + matches.add(t); + } else { + for (T child : children()) { + child.doCollectFirst(predicate, matches); + } + } + } + + // TODO: maybe add a flatMap (need to double check the Stream bit) + + // + // Transform methods + // + + // + // transform the node itself and its children + // + + @SuppressWarnings("unchecked") + public T transformDown(Function rule) { + T root = rule.apply((T) this); + Node node = this.equals(root) ? this : root; + + return node.transformChildren(child -> child.transformDown(rule)); + } + + @SuppressWarnings("unchecked") + public T transformDown(Function rule, final Class typeToken) { + // type filtering function + return transformDown((t) -> (typeToken.isInstance(t) ? rule.apply((E) t) : t)); + } + + @SuppressWarnings("unchecked") + public T transformUp(Function rule) { + T transformed = transformChildren(child -> child.transformUp(rule)); + T node = this.equals(transformed) ? (T) this : transformed; + return rule.apply(node); + } + + @SuppressWarnings("unchecked") + public T transformUp(Function rule, final Class typeToken) { + // type filtering function + return transformUp((t) -> (typeToken.isInstance(t) ? rule.apply((E) t) : t)); + } + + @SuppressWarnings("unchecked") + protected > T transformChildren(Function traversalOperation) { + boolean childrenChanged = false; + + // stream() could be used but the code is just as complicated without any advantages + // further more, it would include bring in all the associated stream/collector object creation even though in + // most cases the immediate tree would be quite small (0,1,2 elements) + List transformedChildren = new ArrayList<>(children().size()); + + for (T child : children) { + T next = traversalOperation.apply(child); + if (!child.equals(next)) { + childrenChanged = true; + } + else { + // use the initial value + next = child; + } + transformedChildren.add(next); + } + + return (childrenChanged ? replaceChildren(transformedChildren) : (T) this); + } + + /** + * Replace the children of this node. + */ + public abstract T replaceChildren(List newChildren); + + // + // transform the node properties and use the tree only for navigation + // + + public T transformPropertiesOnly(Function rule, Class typeToken) { + return transformNodeProps(rule, typeToken); + } + + public T transformPropertiesDown(Function rule, Class typeToken) { + return transformDown(t -> t.transformNodeProps(rule, typeToken)); + } + + public T transformPropertiesUp(Function rule, Class typeToken) { + return transformUp(t -> t.transformNodeProps(rule, typeToken)); + } + + /** + * Transform this node's properties. + *

+ * This always returns something of the same type as the current + * node but since {@link Node} doesn't have a {@code SelfT} parameter + * we return the closest thing we do have: {@code T}, which is the + * root of the hierarchy for the this node. + */ + protected final T transformNodeProps(Function rule, Class typeToken) { + return info().transform(rule, typeToken); + } + + /** + * Return the information about this node. + */ + protected abstract NodeInfo info(); + + @Override + public int hashCode() { + return Objects.hash(children); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Node other = (Node) obj; + return Objects.equals(children(), other.children()); + } + + public String nodeName() { + return getClass().getSimpleName(); + } + + public String nodeString() { + StringBuilder sb = new StringBuilder(); + sb.append(nodeName()); + sb.append("["); + sb.append(propertiesToString(true)); + sb.append("]"); + return sb.toString(); + } + + @Override + public String toString() { + return treeString(new StringBuilder(), 0, new BitSet()).toString(); + } + + /** + * Render this {@link Node} as a tree like + *

+     * {@code
+     * Project[[i{f}#0]]
+     * \_Filter[i{f}#1]
+     *   \_SubQueryAlias[test]
+     *     \_EsRelation[test][i{f}#2]
+     * }
+     * 
+ */ + final StringBuilder treeString(StringBuilder sb, int depth, BitSet hasParentPerDepth) { + if (depth > 0) { + // draw children + for (int column = 0; column < depth; column++) { + if (hasParentPerDepth.get(column)) { + sb.append("|"); + // if not the last elder, adding padding (since each column has two chars ("|_" or "\_") + if (column < depth - 1) { + sb.append(" "); + } + } + else { + // if the child has no parent (elder on the previous level), it means its the last sibling + sb.append((column == depth - 1) ? "\\" : " "); + } + } + + sb.append("_"); + } + + sb.append(nodeString()); + + List children = children(); + if (!children.isEmpty()) { + sb.append("\n"); + } + for (int i = 0; i < children.size(); i++) { + T t = children.get(i); + hasParentPerDepth.set(depth, i < children.size() - 1); + t.treeString(sb, depth + 1, hasParentPerDepth); + if (i < children.size() - 1) { + sb.append("\n"); + } + } + return sb; + } + + /** + * Render the properties of this {@link Node} one by + * one like {@code foo bar baz}. These go inside the + * {@code [} and {@code ]} of the output of {@link #treeString}. + */ + public String propertiesToString(boolean skipIfChild) { + NodeInfo> info = info(); + StringBuilder sb = new StringBuilder(); + + List children = children(); + // eliminate children (they are rendered as part of the tree) + int remainingProperties = TO_STRING_MAX_PROP; + int maxWidth = 0; + boolean needsComma = false; + + List props = info.properties(); + for (Object prop : props) { + // consider a property if it is not ignored AND + // it's not a child (optional) + if (!(skipIfChild && (children.contains(prop) || children.equals(prop)))) { + if (remainingProperties-- < 0) { + sb.append("...").append(props.size() - TO_STRING_MAX_PROP).append("fields not shown"); + break; + } + + if (needsComma) { + sb.append(","); + } + String stringValue = Objects.toString(prop); + if (maxWidth + stringValue.length() > TO_STRING_MAX_WIDTH) { + int cutoff = Math.max(0, TO_STRING_MAX_WIDTH - maxWidth); + sb.append(stringValue.substring(0, cutoff)); + sb.append("\n"); + stringValue = stringValue.substring(cutoff); + maxWidth = 0; + } + maxWidth += stringValue.length(); + sb.append(stringValue); + + needsComma = true; + } + } + + return sb.toString(); + } + + /** + * The values of all the properties that are important + * to this {@link Node}. + */ + public List properties() { + return info().properties(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/NodeInfo.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/NodeInfo.java new file mode 100644 index 0000000000000..23f29137cffa7 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/NodeInfo.java @@ -0,0 +1,403 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.tree; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.function.BiFunction; +import java.util.function.Function; + +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableList; + +/** + * Information about a {@link Node}. + *

+ * All the uses of this are fairly non-OO and we're looking + * for ways to use this less and less. + *

+ * The implementations of this class are super copy-and-paste-ish + * but they are better then the sneaky reflection tricks we had + * earlier. Still terrifying. + * + * @param actual subclass of node that produced this {@linkplain NodeInfo} + */ +public abstract class NodeInfo> { + protected final T node; + + private NodeInfo(T node) { + this.node = node; + } + + /** + * Values for all properties on the instance that created + * this {@linkplain NodeInfo}. + */ + public final List properties() { + return unmodifiableList(innerProperties()); + } + protected abstract List innerProperties(); + + /** + * Transform the properties on {@code node}, returning a new instance + * of {@code N} if any properties change. + */ + final T transform(Function rule, Class typeToken) { + List children = node.children(); + + Function realRule = p -> { + if (p != children && false == children.contains(p) + && (p == null || typeToken.isInstance(p))) { + return rule.apply(typeToken.cast(p)); + } + return p; + }; + return innerTransform(realRule); + } + protected abstract T innerTransform(Function rule); + + /** + * Builds a {@link NodeInfo} for Nodes without any properties. + */ + public static > NodeInfo create(T n) { + return new NodeInfo(n) { + @Override + protected List innerProperties() { + return emptyList(); + } + + protected T innerTransform(Function rule) { + return node; + } + }; + } + + public static , P1> NodeInfo create( + T n, BiFunction ctor, + P1 p1) { + return new NodeInfo(n) { + @Override + protected List innerProperties() { + return Arrays.asList(p1); + } + + protected T innerTransform(Function rule) { + boolean same = true; + + @SuppressWarnings("unchecked") + P1 newP1 = (P1) rule.apply(p1); + same &= Objects.equals(p1, newP1); + + return same ? node : ctor.apply(node.location(), newP1); + } + }; + } + + public static , P1, P2> NodeInfo create( + T n, NodeCtor2 ctor, + P1 p1, P2 p2) { + return new NodeInfo(n) { + @Override + protected List innerProperties() { + return Arrays.asList(p1, p2); + } + + protected T innerTransform(Function rule) { + boolean same = true; + + @SuppressWarnings("unchecked") + P1 newP1 = (P1) rule.apply(p1); + same &= Objects.equals(p1, newP1); + @SuppressWarnings("unchecked") + P2 newP2 = (P2) rule.apply(p2); + same &= Objects.equals(p2, newP2); + + return same ? node : ctor.apply(node.location(), newP1, newP2); + } + }; + } + public interface NodeCtor2 { + T apply(Location l, P1 p1, P2 p2); + } + + public static , P1, P2, P3> NodeInfo create( + T n, NodeCtor3 ctor, + P1 p1, P2 p2, P3 p3) { + return new NodeInfo(n) { + @Override + protected List innerProperties() { + return Arrays.asList(p1, p2, p3); + } + + protected T innerTransform(Function rule) { + boolean same = true; + + @SuppressWarnings("unchecked") + P1 newP1 = (P1) rule.apply(p1); + same &= Objects.equals(p1, newP1); + @SuppressWarnings("unchecked") + P2 newP2 = (P2) rule.apply(p2); + same &= Objects.equals(p2, newP2); + @SuppressWarnings("unchecked") + P3 newP3 = (P3) rule.apply(p3); + same &= Objects.equals(p3, newP3); + + return same ? node : ctor.apply(node.location(), newP1, newP2, newP3); + } + }; + } + public interface NodeCtor3 { + T apply(Location l, P1 p1, P2 p2, P3 p3); + } + + public static , P1, P2, P3, P4> NodeInfo create( + T n, NodeCtor4 ctor, + P1 p1, P2 p2, P3 p3, P4 p4) { + return new NodeInfo(n) { + @Override + protected List innerProperties() { + return Arrays.asList(p1, p2, p3, p4); + } + + protected T innerTransform(Function rule) { + boolean same = true; + + @SuppressWarnings("unchecked") + P1 newP1 = (P1) rule.apply(p1); + same &= Objects.equals(p1, newP1); + @SuppressWarnings("unchecked") + P2 newP2 = (P2) rule.apply(p2); + same &= Objects.equals(p2, newP2); + @SuppressWarnings("unchecked") + P3 newP3 = (P3) rule.apply(p3); + same &= Objects.equals(p3, newP3); + @SuppressWarnings("unchecked") + P4 newP4 = (P4) rule.apply(p4); + same &= Objects.equals(p4, newP4); + + return same ? node : ctor.apply(node.location(), newP1, newP2, newP3, newP4); + } + }; + } + public interface NodeCtor4 { + T apply(Location l, P1 p1, P2 p2, P3 p3, P4 p4); + } + + public static , P1, P2, P3, P4, P5> NodeInfo create( + T n, NodeCtor5 ctor, + P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) { + return new NodeInfo(n) { + @Override + protected List innerProperties() { + return Arrays.asList(p1, p2, p3, p4, p5); + } + + protected T innerTransform(Function rule) { + boolean same = true; + + @SuppressWarnings("unchecked") + P1 newP1 = (P1) rule.apply(p1); + same &= Objects.equals(p1, newP1); + @SuppressWarnings("unchecked") + P2 newP2 = (P2) rule.apply(p2); + same &= Objects.equals(p2, newP2); + @SuppressWarnings("unchecked") + P3 newP3 = (P3) rule.apply(p3); + same &= Objects.equals(p3, newP3); + @SuppressWarnings("unchecked") + P4 newP4 = (P4) rule.apply(p4); + same &= Objects.equals(p4, newP4); + @SuppressWarnings("unchecked") + P5 newP5 = (P5) rule.apply(p5); + same &= Objects.equals(p5, newP5); + + return same ? node : ctor.apply(node.location(), newP1, newP2, newP3, newP4, newP5); + } + }; + } + public interface NodeCtor5 { + T apply(Location l, P1 p1, P2 p2, P3 p3, P4 p4, P5 p5); + } + + public static , P1, P2, P3, P4, P5, P6> NodeInfo create( + T n, NodeCtor6 ctor, + P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) { + return new NodeInfo(n) { + @Override + protected List innerProperties() { + return Arrays.asList(p1, p2, p3, p4, p5, p6); + } + + protected T innerTransform(Function rule) { + boolean same = true; + + @SuppressWarnings("unchecked") + P1 newP1 = (P1) rule.apply(p1); + same &= Objects.equals(p1, newP1); + @SuppressWarnings("unchecked") + P2 newP2 = (P2) rule.apply(p2); + same &= Objects.equals(p2, newP2); + @SuppressWarnings("unchecked") + P3 newP3 = (P3) rule.apply(p3); + same &= Objects.equals(p3, newP3); + @SuppressWarnings("unchecked") + P4 newP4 = (P4) rule.apply(p4); + same &= Objects.equals(p4, newP4); + @SuppressWarnings("unchecked") + P5 newP5 = (P5) rule.apply(p5); + same &= Objects.equals(p5, newP5); + @SuppressWarnings("unchecked") + P6 newP6 = (P6) rule.apply(p6); + same &= Objects.equals(p6, newP6); + + return same ? node : ctor.apply(node.location(), newP1, newP2, newP3, newP4, newP5, newP6); + } + }; + } + public interface NodeCtor6 { + T apply(Location l, P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6); + } + + public static , P1, P2, P3, P4, P5, P6, P7> NodeInfo create( + T n, NodeCtor7 ctor, + P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) { + return new NodeInfo(n) { + @Override + protected List innerProperties() { + return Arrays.asList(p1, p2, p3, p4, p5, p6, p7); + } + + protected T innerTransform(Function rule) { + boolean same = true; + + @SuppressWarnings("unchecked") + P1 newP1 = (P1) rule.apply(p1); + same &= Objects.equals(p1, newP1); + @SuppressWarnings("unchecked") + P2 newP2 = (P2) rule.apply(p2); + same &= Objects.equals(p2, newP2); + @SuppressWarnings("unchecked") + P3 newP3 = (P3) rule.apply(p3); + same &= Objects.equals(p3, newP3); + @SuppressWarnings("unchecked") + P4 newP4 = (P4) rule.apply(p4); + same &= Objects.equals(p4, newP4); + @SuppressWarnings("unchecked") + P5 newP5 = (P5) rule.apply(p5); + same &= Objects.equals(p5, newP5); + @SuppressWarnings("unchecked") + P6 newP6 = (P6) rule.apply(p6); + same &= Objects.equals(p6, newP6); + @SuppressWarnings("unchecked") + P7 newP7 = (P7) rule.apply(p7); + same &= Objects.equals(p7, newP7); + + return same ? node : ctor.apply(node.location(), newP1, newP2, newP3, newP4, newP5, newP6, newP7); + } + }; + } + public interface NodeCtor7 { + T apply(Location l, P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7); + } + + public static , P1, P2, P3, P4, P5, P6, P7, P8> NodeInfo create( + T n, NodeCtor8 ctor, + P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) { + return new NodeInfo(n) { + @Override + protected List innerProperties() { + return Arrays.asList(p1, p2, p3, p4, p5, p6, p7, p8); + } + + protected T innerTransform(Function rule) { + boolean same = true; + + @SuppressWarnings("unchecked") + P1 newP1 = (P1) rule.apply(p1); + same &= Objects.equals(p1, newP1); + @SuppressWarnings("unchecked") + P2 newP2 = (P2) rule.apply(p2); + same &= Objects.equals(p2, newP2); + @SuppressWarnings("unchecked") + P3 newP3 = (P3) rule.apply(p3); + same &= Objects.equals(p3, newP3); + @SuppressWarnings("unchecked") + P4 newP4 = (P4) rule.apply(p4); + same &= Objects.equals(p4, newP4); + @SuppressWarnings("unchecked") + P5 newP5 = (P5) rule.apply(p5); + same &= Objects.equals(p5, newP5); + @SuppressWarnings("unchecked") + P6 newP6 = (P6) rule.apply(p6); + same &= Objects.equals(p6, newP6); + @SuppressWarnings("unchecked") + P7 newP7 = (P7) rule.apply(p7); + same &= Objects.equals(p7, newP7); + @SuppressWarnings("unchecked") + P8 newP8 = (P8) rule.apply(p8); + same &= Objects.equals(p8, newP8); + + return same ? node : ctor.apply(node.location(), newP1, newP2, newP3, newP4, newP5, newP6, newP7, newP8); + } + }; + } + public interface NodeCtor8 { + T apply(Location l, P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8); + } + + public static , P1, P2, P3, P4, P5, P6, P7, P8, P9, P10> NodeInfo create( + T n, NodeCtor10 ctor, + P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9, P10 p10) { + return new NodeInfo(n) { + @Override + protected List innerProperties() { + return Arrays.asList(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10); + } + + protected T innerTransform(Function rule) { + boolean same = true; + + @SuppressWarnings("unchecked") + P1 newP1 = (P1) rule.apply(p1); + same &= Objects.equals(p1, newP1); + @SuppressWarnings("unchecked") + P2 newP2 = (P2) rule.apply(p2); + same &= Objects.equals(p2, newP2); + @SuppressWarnings("unchecked") + P3 newP3 = (P3) rule.apply(p3); + same &= Objects.equals(p3, newP3); + @SuppressWarnings("unchecked") + P4 newP4 = (P4) rule.apply(p4); + same &= Objects.equals(p4, newP4); + @SuppressWarnings("unchecked") + P5 newP5 = (P5) rule.apply(p5); + same &= Objects.equals(p5, newP5); + @SuppressWarnings("unchecked") + P6 newP6 = (P6) rule.apply(p6); + same &= Objects.equals(p6, newP6); + @SuppressWarnings("unchecked") + P7 newP7 = (P7) rule.apply(p7); + same &= Objects.equals(p7, newP7); + @SuppressWarnings("unchecked") + P8 newP8 = (P8) rule.apply(p8); + same &= Objects.equals(p8, newP8); + @SuppressWarnings("unchecked") + P9 newP9 = (P9) rule.apply(p9); + same &= Objects.equals(p9, newP9); + @SuppressWarnings("unchecked") + P10 newP10 = (P10) rule.apply(p10); + same &= Objects.equals(p10, newP10); + + return same ? node : ctor.apply(node.location(), newP1, newP2, newP3, newP4, newP5, newP6, newP7, newP8, + newP9, newP10); + } + }; + } + public interface NodeCtor10 { + T apply(Location l, P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9, P10 p10); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/NodeUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/NodeUtils.java new file mode 100644 index 0000000000000..f2cb74f6abcb4 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/tree/NodeUtils.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.tree; + +public abstract class NodeUtils { + public static , B extends Node> String diffString(A left, B right) { + return diffString(left.toString(), right.toString()); + } + + public static String diffString(String left, String right) { + // break the strings into lines + // then compare each line + String[] leftSplit = left.split("\\n"); + String[] rightSplit = right.split("\\n"); + + // find max - we could use streams but autoboxing is not cool + int leftMaxPadding = 0; + for (String string : leftSplit) { + leftMaxPadding = Math.max(string.length(), leftMaxPadding); + } + + // try to allocate the buffer - 5 represents the column comparison chars + StringBuilder sb = new StringBuilder(left.length() + right.length() + Math.max(left.length(), right.length()) * 3); + + boolean leftAvailable = true, rightAvailable = true; + for (int leftIndex = 0, rightIndex = 0; leftAvailable || rightAvailable; leftIndex++, rightIndex++) { + String leftRow = "", rightRow = leftRow; + if (leftIndex < leftSplit.length) { + leftRow = leftSplit[leftIndex]; + } + else { + leftAvailable = false; + } + sb.append(leftRow); + for (int i = leftRow.length(); i < leftMaxPadding; i++) { + sb.append(" "); + } + // right side still available + if (rightIndex < rightSplit.length) { + rightRow = rightSplit[rightIndex]; + } + else { + rightAvailable = false; + } + if (leftAvailable || rightAvailable) { + sb.append(leftRow.equals(rightRow) ? " = " : " ! "); + sb.append(rightRow); + sb.append("\n"); + } + } + return sb.toString(); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java new file mode 100644 index 0000000000000..c0f4947bb88b3 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java @@ -0,0 +1,465 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.common.Booleans; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.ReadableInstant; +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.util.Locale; +import java.util.function.DoubleFunction; +import java.util.function.Function; +import java.util.function.LongFunction; + +import static org.elasticsearch.xpack.sql.type.DataType.BOOLEAN; +import static org.elasticsearch.xpack.sql.type.DataType.DATE; +import static org.elasticsearch.xpack.sql.type.DataType.LONG; +import static org.elasticsearch.xpack.sql.type.DataType.NULL; + +/** + * Conversions from one Elasticsearch data type to another Elasticsearch data types. + *

+ * This class throws {@link SqlIllegalArgumentException} to differentiate between validation + * errors inside SQL as oppose to the rest of ES. + */ +public abstract class DataTypeConversion { + + private static final DateTimeFormatter UTC_DATE_FORMATTER = ISODateTimeFormat.dateTimeNoMillis().withZoneUTC(); + + /** + * Returns the type compatible with both left and right types + *

+ * If one of the types is null - returns another type + * If both types are numeric - returns type with the highest precision int < long < float < double + * If one of the types is string and another numeric - returns numeric + */ + public static DataType commonType(DataType left, DataType right) { + if (left == right) { + return left; + } + if (DataTypes.isNull(left)) { + return right; + } + if (DataTypes.isNull(right)) { + return left; + } + if (left.isNumeric() && right.isNumeric()) { + // if one is int + if (left.isInteger) { + // promote the highest int + if (right.isInteger) { + return left.size > right.size ? left : right; + } + // promote the rational + return right; + } + // try the other side + if (right.isInteger) { + return left; + } + // promote the highest rational + return left.size > right.size ? left : right; + } + if (left.isString()) { + if (right.isNumeric()) { + return right; + } + } + if (right.isString()) { + if (left.isNumeric()) { + return left; + } + } + // none found + return null; + } + + /** + * Returns true if the from type can be converted to the to type, false - otherwise + */ + public static boolean canConvert(DataType from, DataType to) { + // Special handling for nulls and if conversion is not requires + if (from == to || from == NULL) { + return true; + } + // only primitives are supported so far + return from.isPrimitive() && to.isPrimitive() && conversion(from, to) != null; + } + + /** + * Get the conversion from one type to another. + */ + public static Conversion conversionFor(DataType from, DataType to) { + // Special handling for nulls and if conversion is not requires + if (from == to) { + return Conversion.IDENTITY; + } + if (to == DataType.NULL) { + return Conversion.NULL; + } + + Conversion conversion = conversion(from, to); + if (conversion == null) { + throw new SqlIllegalArgumentException("cannot convert from [" + from + "] to [" + to + "]"); + } + return conversion; + } + + private static Conversion conversion(DataType from, DataType to) { + switch (to) { + case KEYWORD: + case TEXT: + return conversionToString(from); + case LONG: + return conversionToLong(from); + case INTEGER: + return conversionToInt(from); + case SHORT: + return conversionToShort(from); + case BYTE: + return conversionToByte(from); + case FLOAT: + return conversionToFloat(from); + case DOUBLE: + return conversionToDouble(from); + case DATE: + return conversionToDate(from); + case BOOLEAN: + return conversionToBoolean(from); + default: + return null; + } + + } + + private static Conversion conversionToString(DataType from) { + if (from == DATE) { + return Conversion.DATE_TO_STRING; + } + return Conversion.OTHER_TO_STRING; + } + + private static Conversion conversionToLong(DataType from) { + if (from.isRational) { + return Conversion.RATIONAL_TO_LONG; + } + if (from.isInteger) { + return Conversion.INTEGER_TO_LONG; + } + if (from == BOOLEAN) { + return Conversion.BOOL_TO_INT; // We emit an int here which is ok because of Java's casting rules + } + if (from.isString()) { + return Conversion.STRING_TO_LONG; + } + if (from == DATE) { + return Conversion.DATE_TO_LONG; + } + return null; + } + + private static Conversion conversionToInt(DataType from) { + if (from.isRational) { + return Conversion.RATIONAL_TO_INT; + } + if (from.isInteger) { + return Conversion.INTEGER_TO_INT; + } + if (from == BOOLEAN) { + return Conversion.BOOL_TO_INT; + } + if (from.isString()) { + return Conversion.STRING_TO_INT; + } + if (from == DATE) { + return Conversion.DATE_TO_INT; + } + return null; + } + + private static Conversion conversionToShort(DataType from) { + if (from.isRational) { + return Conversion.RATIONAL_TO_SHORT; + } + if (from.isInteger) { + return Conversion.INTEGER_TO_SHORT; + } + if (from == BOOLEAN) { + return Conversion.BOOL_TO_SHORT; + } + if (from.isString()) { + return Conversion.STRING_TO_SHORT; + } + if (from == DATE) { + return Conversion.DATE_TO_SHORT; + } + return null; + } + + private static Conversion conversionToByte(DataType from) { + if (from.isRational) { + return Conversion.RATIONAL_TO_BYTE; + } + if (from.isInteger) { + return Conversion.INTEGER_TO_BYTE; + } + if (from == BOOLEAN) { + return Conversion.BOOL_TO_BYTE; + } + if (from.isString()) { + return Conversion.STRING_TO_BYTE; + } + if (from == DATE) { + return Conversion.DATE_TO_BYTE; + } + return null; + } + + private static Conversion conversionToFloat(DataType from) { + if (from.isRational) { + return Conversion.RATIONAL_TO_FLOAT; + } + if (from.isInteger) { + return Conversion.INTEGER_TO_FLOAT; + } + if (from == BOOLEAN) { + return Conversion.BOOL_TO_FLOAT; + } + if (from.isString()) { + return Conversion.STRING_TO_FLOAT; + } + if (from == DATE) { + return Conversion.DATE_TO_FLOAT; + } + return null; + } + + private static Conversion conversionToDouble(DataType from) { + if (from.isRational) { + return Conversion.RATIONAL_TO_DOUBLE; + } + if (from.isInteger) { + return Conversion.INTEGER_TO_DOUBLE; + } + if (from == BOOLEAN) { + return Conversion.BOOL_TO_DOUBLE; + } + if (from.isString()) { + return Conversion.STRING_TO_DOUBLE; + } + if (from == DATE) { + return Conversion.DATE_TO_DOUBLE; + } + return null; + } + + private static Conversion conversionToDate(DataType from) { + if (from.isRational) { + return Conversion.RATIONAL_TO_DATE; + } + if (from.isInteger) { + return Conversion.INTEGER_TO_DATE; + } + if (from == BOOLEAN) { + return Conversion.BOOL_TO_DATE; // We emit an int here which is ok because of Java's casting rules + } + if (from.isString()) { + return Conversion.STRING_TO_DATE; + } + return null; + } + + private static Conversion conversionToBoolean(DataType from) { + if (from.isNumeric()) { + return Conversion.NUMERIC_TO_BOOLEAN; + } + if (from.isString()) { + return Conversion.STRING_TO_BOOLEAN; + } + if (from == DATE) { + return Conversion.DATE_TO_BOOLEAN; + } + return null; + } + + public static byte safeToByte(long x) { + if (x > Byte.MAX_VALUE || x < Byte.MIN_VALUE) { + throw new SqlIllegalArgumentException("[" + x + "] out of [Byte] range"); + } + return (byte) x; + } + + public static short safeToShort(long x) { + if (x > Short.MAX_VALUE || x < Short.MIN_VALUE) { + throw new SqlIllegalArgumentException("[" + x + "] out of [Short] range"); + } + return (short) x; + } + + public static int safeToInt(long x) { + if (x > Integer.MAX_VALUE || x < Integer.MIN_VALUE) { + throw new SqlIllegalArgumentException("[" + x + "] out of [Int] range"); + } + return (int) x; + } + + public static long safeToLong(double x) { + if (x > Long.MAX_VALUE || x < Long.MIN_VALUE) { + throw new SqlIllegalArgumentException("[" + x + "] out of [Long] range"); + } + return Math.round(x); + } + + public static Number toInteger(double x, DataType dataType) { + long l = safeToLong(x); + + switch (dataType) { + case BYTE: + return safeToByte(l); + case SHORT: + return safeToShort(l); + case INTEGER: + return safeToInt(l); + default: + return l; + } + } + + public static boolean convertToBoolean(String val) { + String lowVal = val.toLowerCase(Locale.ROOT); + if (Booleans.isBoolean(lowVal) == false) { + throw new SqlIllegalArgumentException("cannot cast [" + val + "] to [Boolean]"); + } + return Booleans.parseBoolean(lowVal); + } + + /** + * Converts arbitrary object to the desired data type. + *

+ * Throws SqlIllegalArgumentException if such conversion is not possible + */ + public static Object convert(Object value, DataType dataType) { + DataType detectedType = DataTypes.fromJava(value); + if (detectedType == dataType || value == null) { + return value; + } + return conversionFor(detectedType, dataType).convert(value); + } + + /** + * Reference to a data type conversion that can be serialized. Note that the position in the enum + * is important because it is used for serialization. + */ + public enum Conversion { + IDENTITY(Function.identity()), + NULL(value -> null), + + DATE_TO_STRING(Object::toString), + OTHER_TO_STRING(String::valueOf), + + RATIONAL_TO_LONG(fromDouble(DataTypeConversion::safeToLong)), + INTEGER_TO_LONG(fromLong(value -> value)), + STRING_TO_LONG(fromString(Long::valueOf, "Long")), + DATE_TO_LONG(fromDate(value -> value)), + + RATIONAL_TO_INT(fromDouble(value -> safeToInt(safeToLong(value)))), + INTEGER_TO_INT(fromLong(DataTypeConversion::safeToInt)), + BOOL_TO_INT(fromBool(value -> value ? 1 : 0)), + STRING_TO_INT(fromString(Integer::valueOf, "Int")), + DATE_TO_INT(fromDate(DataTypeConversion::safeToInt)), + + RATIONAL_TO_SHORT(fromDouble(value -> safeToShort(safeToLong(value)))), + INTEGER_TO_SHORT(fromLong(DataTypeConversion::safeToShort)), + BOOL_TO_SHORT(fromBool(value -> value ? (short) 1 : (short) 0)), + STRING_TO_SHORT(fromString(Short::valueOf, "Short")), + DATE_TO_SHORT(fromDate(DataTypeConversion::safeToShort)), + + RATIONAL_TO_BYTE(fromDouble(value -> safeToByte(safeToLong(value)))), + INTEGER_TO_BYTE(fromLong(DataTypeConversion::safeToByte)), + BOOL_TO_BYTE(fromBool(value -> value ? (byte) 1 : (byte) 0)), + STRING_TO_BYTE(fromString(Byte::valueOf, "Byte")), + DATE_TO_BYTE(fromDate(DataTypeConversion::safeToByte)), + + // TODO floating point conversions are lossy but conversions to integer conversions are not. Are we ok with that? + RATIONAL_TO_FLOAT(fromDouble(value -> (float) value)), + INTEGER_TO_FLOAT(fromLong(value -> (float) value)), + BOOL_TO_FLOAT(fromBool(value -> value ? 1f : 0f)), + STRING_TO_FLOAT(fromString(Float::valueOf, "Float")), + DATE_TO_FLOAT(fromDate(value -> (float) value)), + + RATIONAL_TO_DOUBLE(fromDouble(Double::valueOf)), + INTEGER_TO_DOUBLE(fromLong(Double::valueOf)), + BOOL_TO_DOUBLE(fromBool(value -> value ? 1d : 0d)), + STRING_TO_DOUBLE(fromString(Double::valueOf, "Double")), + DATE_TO_DOUBLE(fromDate(Double::valueOf)), + + RATIONAL_TO_DATE(toDate(RATIONAL_TO_LONG)), + INTEGER_TO_DATE(toDate(INTEGER_TO_LONG)), + BOOL_TO_DATE(toDate(BOOL_TO_INT)), + STRING_TO_DATE(fromString(UTC_DATE_FORMATTER::parseDateTime, "Date")), + + NUMERIC_TO_BOOLEAN(fromLong(value -> value != 0)), + STRING_TO_BOOLEAN(fromString(DataTypeConversion::convertToBoolean, "Boolean")), + DATE_TO_BOOLEAN(fromDate(value -> value != 0)); + + private final Function converter; + + Conversion(Function converter) { + this.converter = converter; + } + + private static Function fromDouble(DoubleFunction converter) { + return (Object l) -> converter.apply(((Number) l).doubleValue()); + } + + private static Function fromLong(LongFunction converter) { + return (Object l) -> converter.apply(((Number) l).longValue()); + } + + private static Function fromString(Function converter, String to) { + return (Object value) -> { + try { + return converter.apply(value.toString()); + } catch (NumberFormatException e) { + throw new SqlIllegalArgumentException(e, "cannot cast [{}] to [{}]", value, to); + } catch (IllegalArgumentException e) { + throw new SqlIllegalArgumentException(e, "cannot cast [{}] to [{}]:{}", value, to, e.getMessage()); + } + }; + } + + private static Function fromBool(Function converter) { + return (Object l) -> converter.apply(((Boolean) l)); + } + + private static Function fromDate(Function converter) { + return l -> ((ReadableInstant) l).getMillis(); + } + + private static Function toDate(Conversion conversion) { + return l -> new DateTime(((Number) conversion.convert(l)).longValue(), DateTimeZone.UTC); + } + + public Object convert(Object l) { + if (l == null) { + return null; + } + return converter.apply(l); + } + } + + public static DataType asInteger(DataType dataType) { + if (!dataType.isNumeric()) { + return dataType; + } + + return dataType.isInteger ? dataType : LONG; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java new file mode 100644 index 0000000000000..c2b40656ba294 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.joda.time.DateTime; + +public abstract class DataTypes { + + public static boolean isNull(DataType from) { + return from == DataType.NULL; + } + + public static boolean isUnsupported(DataType from) { + return from == DataType.UNSUPPORTED; + } + + public static DataType fromJava(Object value) { + if (value == null) { + return DataType.NULL; + } + if (value instanceof Integer) { + return DataType.INTEGER; + } + if (value instanceof Long) { + return DataType.LONG; + } + if (value instanceof Boolean) { + return DataType.BOOLEAN; + } + if (value instanceof Double) { + return DataType.DOUBLE; + } + if (value instanceof Float) { + return DataType.FLOAT; + } + if (value instanceof Byte) { + return DataType.BYTE; + } + if (value instanceof Short) { + return DataType.SHORT; + } + if (value instanceof DateTime) { + return DataType.DATE; + } + if (value instanceof String || value instanceof Character) { + return DataType.KEYWORD; + } + throw new SqlIllegalArgumentException("No idea what's the DataType for {}", value.getClass()); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DateEsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DateEsField.java new file mode 100644 index 0000000000000..b9737fbba608f --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DateEsField.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.common.util.CollectionUtils; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * SQL-related information about an index field with date type + */ +public class DateEsField extends EsField { + + public static final List DEFAULT_FORMAT = Arrays.asList("strict_date_optional_time", "epoch_millis"); + private final List formats; + + public DateEsField(String name, Map properties, boolean hasDocValues, String... formats) { + super(name, DataType.DATE, properties, hasDocValues); + this.formats = CollectionUtils.isEmpty(formats) ? DEFAULT_FORMAT : Arrays.asList(formats); + } + + @Override + public int getPrecision() { + // same as Long + // TODO: based this on format string + return 19; + } + + public List getFormats() { + return formats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + DateEsField dateField = (DateEsField) o; + return Objects.equals(formats, dateField.formats); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), formats); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/EsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/EsField.java new file mode 100644 index 0000000000000..cc7e085416caa --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/EsField.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.common.Nullable; + +import java.util.Map; +import java.util.Objects; + +/** + * SQL-related information about an index field + */ +public class EsField { + private final DataType esDataType; + private final boolean hasDocValues; + private final Map properties; + private final String name; + + public EsField(String name, DataType esDataType, Map properties, boolean hasDocValues) { + this.name = name; + this.esDataType = esDataType; + this.hasDocValues = hasDocValues; + this.properties = properties; + } + + /** + * Returns the field path + */ + public String getName() { + return name; + } + + /** + * The field type + */ + public DataType getDataType() { + return esDataType; + } + + /** + * The field supports doc values + */ + public boolean hasDocValues() { + return hasDocValues; + } + + /** + * Returns list of properties for the nested and object fields, list of subfield if the field + * was indexed in a few different ways or null otherwise + */ + @Nullable + public Map getProperties() { + return properties; + } + + /** + * Returns the path to the keyword version of this field if this field is text and it has a subfield that is + * indexed as keyword, null if such field is not found or the field name itself in all other cases + */ + public EsField getExactField() { + return this; + } + + /** + * Returns the precision of the field + *

+ * Precision is the specified column size. For numeric data, this is the maximum precision. For character + * data, this is the length in characters. For datetime datatypes, this is the length in characters of the + * String representation (assuming the maximum allowed defaultPrecision of the fractional seconds component). + */ + public int getPrecision() { + return esDataType.defaultPrecision; + } + + /** + * True if this field name can be used in sorting, aggregations and term queries as is + *

+ * This will be true for most fields except analyzed text fields that cannot be used directly and should be + * replaced with the field returned by {@link EsField#getExactField()} instead. + */ + public boolean isExact() { + return true; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + EsField field = (EsField) o; + return hasDocValues == field.hasDocValues && + esDataType == field.esDataType && + Objects.equals(properties, field.properties) && + Objects.equals(name, field.name); + } + + @Override + public int hashCode() { + return Objects.hash(esDataType, hasDocValues, properties, name); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/KeywordEsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/KeywordEsField.java new file mode 100644 index 0000000000000..d40fa7b19af92 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/KeywordEsField.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * SQL-related information about an index field with keyword type + */ +public class KeywordEsField extends EsField { + + private final int precision; + private final boolean normalized; + + public KeywordEsField(String name) { + this(name, Collections.emptyMap(), true, DataType.KEYWORD.defaultPrecision, false); + } + + public KeywordEsField(String name, Map properties, boolean hasDocValues, int precision, boolean normalized) { + super(name, DataType.KEYWORD, properties, hasDocValues); + this.precision = precision; + this.normalized = normalized; + } + + @Override + public int getPrecision() { + return precision; + } + + @Override + public boolean isExact() { + return normalized == false; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + KeywordEsField that = (KeywordEsField) o; + return precision == that.precision && + normalized == that.normalized; + } + + @Override + public int hashCode() { + + return Objects.hash(super.hashCode(), precision, normalized); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/Schema.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/Schema.java new file mode 100644 index 0000000000000..62a7881b6adc1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/Schema.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.xpack.sql.type.Schema.Entry; +import org.elasticsearch.xpack.sql.util.Check; + +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Spliterator; +import java.util.Spliterators; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +import static java.util.Collections.emptyList; + +public class Schema implements Iterable { + + public interface Entry { + String name(); + DataType type(); + } + + static class DefaultEntry implements Entry { + private final String name; + private final DataType type; + + DefaultEntry(String name, DataType type) { + this.name = name; + this.type = type; + } + + @Override + public String name() { + return name; + } + + @Override + public DataType type() { + return type; + } + } + + public static final Schema EMPTY = new Schema(emptyList(), emptyList()); + + private final List names; + private final List types; + + public Schema(List names, List types) { + Check.isTrue(names.size() == types.size(), "Different # of names {} vs types {}", names, types); + this.types = types; + this.names = names; + } + + public List names() { + return names; + } + + public List types() { + return types; + } + + public int size() { + return names.size(); + } + + public Entry get(int i) { + return new DefaultEntry(names.get(i), types.get(i)); + } + + public DataType type(String name) { + int indexOf = names.indexOf(name); + if (indexOf < 0) { + return null; + } + return types.get(indexOf); + } + + @Override + public Iterator iterator() { + return new Iterator() { + private final int size = size(); + private int pos = -1; + + @Override + public boolean hasNext() { + return pos < size - 1; + } + + @Override + public Entry next() { + if (pos++ >= size) { + throw new NoSuchElementException(); + } + return get(pos); + } + }; + } + + public Stream stream() { + return StreamSupport.stream(spliterator(), false); + } + + @Override + public Spliterator spliterator() { + return Spliterators.spliterator(iterator(), size(), 0); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("["); + for (int i = 0; i < names.size(); i++) { + if (i > 0) { + sb.append(","); + } + sb.append(names.get(i)); + sb.append(":"); + sb.append(types.get(i).esType); + } + sb.append("]"); + return sb.toString(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/TextEsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/TextEsField.java new file mode 100644 index 0000000000000..f1c596a301c54 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/TextEsField.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.xpack.sql.analysis.index.MappingException; + +import java.util.Map; + +/** + * SQL-related information about an index field with text type + */ +public class TextEsField extends EsField { + + public TextEsField(String name, Map properties, boolean hasDocValues) { + super(name, DataType.TEXT, properties, hasDocValues); + } + + @Override + public EsField getExactField() { + EsField field = null; + for (EsField property : getProperties().values()) { + if (property.getDataType() == DataType.KEYWORD && property.isExact()) { + if (field != null) { + throw new MappingException("Multiple exact keyword candidates available for [" + getName() + + "]; specify which one to use"); + } + field = property; + } + } + if (field == null) { + throw new MappingException("No keyword/multi-field defined exact matches for [" + getName() + + "]; define one or use MATCH/QUERY instead"); + } + return field; + } + + @Override + public boolean isExact() { + return false; + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/Types.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/Types.java new file mode 100644 index 0000000000000..e0d3d2b35e9b2 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/Types.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; + +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; + +import static java.util.Collections.emptyMap; + +public abstract class Types { + + @SuppressWarnings("unchecked") + public static Map fromEs(Map asMap) { + Map props = null; + if (asMap != null && !asMap.isEmpty()) { + props = (Map) asMap.get("properties"); + } + return props == null || props.isEmpty() ? emptyMap() : startWalking(props); + } + + private static Map startWalking(Map mapping) { + Map types = new LinkedHashMap<>(); + + if (mapping == null) { + return emptyMap(); + } + for (Entry entry : mapping.entrySet()) { + walkMapping(entry.getKey(), entry.getValue(), types); + } + + return types; + } + + private static DataType getType(Map content) { + if (content.containsKey("type")) { + try { + return DataType.fromEsType(content.get("type").toString()); + } catch (IllegalArgumentException ex) { + return DataType.UNSUPPORTED; + } + } else if (content.containsKey("properties")) { + return DataType.OBJECT; + } else { + return DataType.UNSUPPORTED; + } + } + + @SuppressWarnings("unchecked") + private static void walkMapping(String name, Object value, Map mapping) { + // object type - only root or nested docs supported + if (value instanceof Map) { + Map content = (Map) value; + + // extract field type + DataType esDataType = getType(content); + final Map properties; + if (esDataType == DataType.OBJECT || esDataType == DataType.NESTED) { + properties = fromEs(content); + } else if (content.containsKey("fields")) { + // Check for multifields + Object fields = content.get("fields"); + if (fields instanceof Map) { + properties = startWalking((Map) fields); + } else { + properties = Collections.emptyMap(); + } + } else { + properties = Collections.emptyMap(); + } + boolean docValues = boolSetting(content.get("doc_values"), esDataType.defaultDocValues); + final EsField field; + switch (esDataType) { + case TEXT: + field = new TextEsField(name, properties, docValues); + break; + case KEYWORD: + int length = intSetting(content.get("ignore_above"), esDataType.defaultPrecision); + boolean normalized = Strings.hasText(textSetting(content.get("normalizer"), null)); + field = new KeywordEsField(name, properties, docValues, length, normalized); + break; + case DATE: + Object fmt = content.get("format"); + if (fmt != null) { + field = new DateEsField(name, properties, docValues, Strings.delimitedListToStringArray(fmt.toString(), "||")); + } else { + field = new DateEsField(name, properties, docValues); + } + break; + case UNSUPPORTED: + String type = content.get("type").toString(); + field = new UnsupportedEsField(name, type); + break; + default: + field = new EsField(name, esDataType, properties, docValues); + } + mapping.put(name, field); + } else { + throw new IllegalArgumentException("Unrecognized mapping " + value); + } + } + + private static String textSetting(Object value, String defaultValue) { + return value == null ? defaultValue : value.toString(); + } + + private static boolean boolSetting(Object value, boolean defaultValue) { + return value == null ? defaultValue : Booleans.parseBoolean(value.toString(), defaultValue); + } + + private static int intSetting(Object value, int defaultValue) { + return value == null ? defaultValue : Integer.parseInt(value.toString()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/UnsupportedEsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/UnsupportedEsField.java new file mode 100644 index 0000000000000..c88d676c223b6 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/UnsupportedEsField.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import java.util.Collections; +import java.util.Objects; + +/** + * SQL-related information about an index field that cannot be supported by SQL + */ +public class UnsupportedEsField extends EsField { + + private String originalType; + + public UnsupportedEsField(String name, String originalType) { + super(name, DataType.UNSUPPORTED, Collections.emptyMap(), false); + this.originalType = originalType; + } + + public String getOriginalType() { + return originalType; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + UnsupportedEsField that = (UnsupportedEsField) o; + return Objects.equals(originalType, that.originalType); + } + + @Override + public int hashCode() { + + return Objects.hash(super.hashCode(), originalType); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/Check.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/Check.java new file mode 100644 index 0000000000000..2fa8164a1ccc1 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/Check.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.util; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +/** + * Utility class used for checking various conditions at runtime, inside SQL (hence the specific exception) with + * minimum amount of code + */ +public abstract class Check { + + public static void isTrue(boolean expression, String message, Object... values) { + if (!expression) { + throw new SqlIllegalArgumentException(message, values); + } + } + + public static void isTrue(boolean expression, String message) { + if (!expression) { + throw new SqlIllegalArgumentException(message); + } + } + + public static void notNull(Object object, String message) { + if (object == null) { + throw new SqlIllegalArgumentException(message); + } + } + + public static void notNull(Object object, String message, Object... values) { + if (object == null) { + throw new SqlIllegalArgumentException(message, values); + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/CollectionUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/CollectionUtils.java new file mode 100644 index 0000000000000..24ed7b979f63c --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/CollectionUtils.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.util; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import static java.util.Collections.emptyList; + +public abstract class CollectionUtils { + + public static boolean isEmpty(Collection col) { + return col == null || col.isEmpty(); + } + + @SuppressWarnings("unchecked") + public static List combine(List left, List right) { + if (right.isEmpty()) { + return (List) left; + } + if (left.isEmpty()) { + return (List) right; + } + + List list = new ArrayList<>(left.size() + right.size()); + if (!left.isEmpty()) { + list.addAll(left); + } + if (!right.isEmpty()) { + list.addAll(right); + } + return list; + } + + @SafeVarargs + @SuppressWarnings("varargs") + public static List combine(Collection... collections) { + if (org.elasticsearch.common.util.CollectionUtils.isEmpty(collections)) { + return emptyList(); + } + + List list = new ArrayList<>(); + for (Collection col : collections) { + // typically AttributeSet which ends up iterating anyway plus creating a redundant array + if (col instanceof Set) { + for (T t : col) { + list.add(t); + } + } + else { + list.addAll(col); + } + } + return list; + } + + @SafeVarargs + @SuppressWarnings("varargs") + public static List combine(Collection left, T... entries) { + List list = new ArrayList<>(left.size() + entries.length); + if (!left.isEmpty()) { + list.addAll(left); + } + if (entries.length > 0) { + Collections.addAll(list, entries); + } + return list; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/Graphviz.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/Graphviz.java new file mode 100644 index 0000000000000..97e84efa3b0d5 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/Graphviz.java @@ -0,0 +1,319 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.util; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicInteger; + +import org.elasticsearch.xpack.sql.tree.Node; + +// use the awesome http://mdaines.github.io/viz.js/ to visualize and play around with the various options +public abstract class Graphviz { + + private static final int NODE_LABEL_INDENT = 12; + private static final int CLUSTER_INDENT = 2; + private static final int INDENT = 1; + + + public static String dot(String name, Node root) { + StringBuilder sb = new StringBuilder(); + // name + sb.append("digraph G { " + + "rankdir=BT; \n" + + "label=\"" + name + "\"; \n" + + "node[shape=plaintext, color=azure1];\n " + + "edge[color=black,arrowsize=0.5];\n"); + handleNode(sb, root, new AtomicInteger(0), INDENT, true); + sb.append("}"); + return sb.toString(); + } + + public static String dot(Map> clusters, boolean drawSubTrees) { + AtomicInteger nodeCounter = new AtomicInteger(0); + + StringBuilder sb = new StringBuilder(); + // name + sb.append("digraph G { " + + "rankdir=BT;\n " + + "node[shape=plaintext, color=azure1];\n " + + "edge[color=black];\n " + + "graph[compound=true];\n\n"); + + + int clusterNodeStart = 1; + int clusterId = 0; + + StringBuilder clusterEdges = new StringBuilder(); + + for (Entry> entry : clusters.entrySet()) { + indent(sb, INDENT); + // draw cluster + sb.append("subgraph cluster"); + sb.append(++clusterId); + sb.append(" {\n"); + indent(sb, CLUSTER_INDENT); + sb.append("color=blue;\n"); + indent(sb, CLUSTER_INDENT); + sb.append("label="); + sb.append(quoteGraphviz(entry.getKey())); + sb.append(";\n\n"); + + /* to help align the clusters, add an invisible node (that could + * otherwise be used for labeling but it consumes too much space) + * used for alignment */ + indent(sb, CLUSTER_INDENT); + sb.append("c" + clusterId); + sb.append("[style=invis]\n"); + // add edge to the first node in the cluster + indent(sb, CLUSTER_INDENT); + sb.append("node" + (nodeCounter.get() + 1)); + sb.append(" -> "); + sb.append("c" + clusterId); + sb.append(" [style=invis];\n"); + + handleNode(sb, entry.getValue(), nodeCounter, CLUSTER_INDENT, drawSubTrees); + + int clusterNodeStop = nodeCounter.get(); + + indent(sb, INDENT); + sb.append("}\n"); + + // connect cluster only if there are at least two + if (clusterId > 1) { + indent(clusterEdges, INDENT); + clusterEdges.append("node" + clusterNodeStart); + clusterEdges.append(" -> "); + clusterEdges.append("node" + clusterNodeStop); + clusterEdges.append("[ltail=cluster"); + clusterEdges.append(clusterId - 1); + clusterEdges.append(" lhead=cluster"); + clusterEdges.append(clusterId); + clusterEdges.append("];\n"); + } + clusterNodeStart = clusterNodeStop; + } + + sb.append("\n"); + + // connecting the clusters arranges them in a weird position + // so don't + //sb.append(clusterEdges.toString()); + + // align the cluster by requiring the invisible nodes in each cluster to be of the same rank + indent(sb, INDENT); + sb.append("{ rank=same"); + for (int i = 1; i <= clusterId; i++) { + sb.append(" c" + i); + } + sb.append(" };\n}"); + + return sb.toString(); + } + + private static void handleNode(StringBuilder output, Node n, AtomicInteger nodeId, int currentIndent, boolean drawSubTrees) { + // each node has its own id + int thisId = nodeId.incrementAndGet(); + + // first determine node info + StringBuilder nodeInfo = new StringBuilder(); + nodeInfo.append("\n"); + indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); + nodeInfo.append("\n"); + indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); + nodeInfo.append("\n"); + indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); + + List props = n.properties(); + List parsed = new ArrayList<>(props.size()); + List> subTrees = new ArrayList<>(); + + for (Object v : props) { + // skip null values, children and location + if (v != null && !n.children().contains(v)) { + if (v instanceof Collection) { + Collection c = (Collection) v; + StringBuilder colS = new StringBuilder(); + for (Object o : c) { + if (drawSubTrees && isAnotherTree(o)) { + subTrees.add((Node) o); + } + else { + colS.append(o); + colS.append("\n"); + } + } + if (colS.length() > 0) { + parsed.add(colS.toString()); + } + } + else { + if (drawSubTrees && isAnotherTree(v)) { + subTrees.add((Node) v); + } + else { + parsed.add(v.toString()); + } + } + } + } + + for (String line : parsed) { + nodeInfo.append("\n"); + indent(nodeInfo, currentIndent + NODE_LABEL_INDENT); + } + + nodeInfo.append("
" + + n.nodeName() + + "
"); + nodeInfo.append(escapeHtml(line)); + nodeInfo.append("
\n"); + + // check any subtrees + if (!subTrees.isEmpty()) { + // write nested trees + output.append("subgraph cluster_" + thisId + " {"); + output.append("style=filled; color=white; fillcolor=azure2; label=\"\";\n"); + } + + // write node info + indent(output, currentIndent); + output.append("node"); + output.append(thisId); + output.append("[label="); + output.append(quoteGraphviz(nodeInfo.toString())); + output.append("];\n"); + + if (!subTrees.isEmpty()) { + indent(output, currentIndent + INDENT); + output.append("node[shape=ellipse, color=black]\n"); + + for (Node node : subTrees) { + indent(output, currentIndent + INDENT); + drawNodeTree(output, node, "st_" + thisId + "_", 0); + } + + output.append("\n}\n"); + } + + indent(output, currentIndent + 1); + //output.append("{ rankdir=LR; rank=same; \n"); + int prevId = -1; + // handle children + for (Node c : n.children()) { + // the child will always have the next id + int childId = nodeId.get() + 1; + handleNode(output, c, nodeId, currentIndent + INDENT, drawSubTrees); + indent(output, currentIndent + 1); + output.append("node"); + output.append(childId); + output.append(" -> "); + output.append("node"); + output.append(thisId); + output.append(";\n"); + + // add invisible connection between children for ordering + if (prevId != -1) { + indent(output, currentIndent + 1); + output.append("node"); + output.append(prevId); + output.append(" -> "); + output.append("node"); + output.append(childId); + output.append(";\n"); + } + prevId = childId; + } + indent(output, currentIndent); + //output.append("}\n"); + } + + private static void drawNodeTree(StringBuilder sb, Node node, String prefix, int counter) { + String nodeName = prefix + counter; + prefix = nodeName; + + // draw node + drawNode(sb, node, nodeName); + // then draw all children nodes and connections between them to be on the same level + sb.append("{ rankdir=LR; rank=same;\n"); + int prevId = -1; + int saveId = counter; + for (Node child : node.children()) { + int currId = ++counter; + drawNode(sb, child, prefix + currId); + if (prevId > -1) { + sb.append(prefix + prevId + " -> " + prefix + currId + " [style=invis];\n"); + } + prevId = currId; + } + sb.append("}\n"); + + // now draw connections to the parent + for (int i = saveId; i < counter; i++) { + sb.append(prefix + (i + 1) + " -> " + nodeName + ";\n"); + } + + // draw the child + counter = saveId; + for (Node child : node.children()) { + drawNodeTree(sb, child, prefix, ++counter); + } + } + + private static void drawNode(StringBuilder sb, Node node, String nodeName) { + if (node.children().isEmpty()) { + sb.append(nodeName + " [label=\"" + node.toString() + "\"];\n"); + } + else { + sb.append(nodeName + " [label=\"" + node.nodeName() + "\"];\n"); + } + } + + private static boolean isAnotherTree(Object value) { + if (value instanceof Node) { + Node n = (Node) value; + // create a subgraph + if (n.children().size() > 0) { + return true; + } + } + return false; + } + + private static String escapeHtml(Object value) { + return String.valueOf(value) + .replace("&", "&") + .replace("\"", """) + .replace("'", "'") + .replace("<", "<") + .replace(">", ">") + .replace("\n", "
"); + } + + private static String quoteGraphviz(String value) { + if (value.contains("<")) { + return "<" + value + ">"; + } + + return "\"" + value + "\""; + } + + private static String escapeGraphviz(String value) { + return value + .replace("<", "\\<") + .replace(">", "\\>") + .replace("\"", "\\\""); + } + + private static void indent(StringBuilder sb, int indent) { + for (int i = 0; i < indent; i++) { + sb.append(" "); + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/ReflectionUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/ReflectionUtils.java new file mode 100644 index 0000000000000..0ddccccf7ddf0 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/ReflectionUtils.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.util; + +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.util.Arrays; + +public class ReflectionUtils { + + @SuppressWarnings("unchecked") + public static Class detectSuperTypeForRuleLike(Class c) { + Class clazz = c; + for (Type type = clazz.getGenericSuperclass(); clazz != Object.class; type = clazz.getGenericSuperclass()) { + if (type instanceof ParameterizedType) { + Type[] typeArguments = ((ParameterizedType) type).getActualTypeArguments(); + if (typeArguments.length != 2 && typeArguments.length != 1) { + throw new SqlIllegalArgumentException("Unexpected number of type arguments {} for {}", Arrays.toString(typeArguments), + c); + } + + return (Class) typeArguments[0]; + } + clazz = clazz.getSuperclass(); + } + throw new SqlIllegalArgumentException("Unexpected class structure for class {}", c); + } + + // remove packaging from the name - strategy used for naming rules by default + public static String ruleLikeNaming(Class c) { + String className = c.getName(); + int parentPackage = className.lastIndexOf("."); + if (parentPackage > 0) { + int grandParentPackage = className.substring(0, parentPackage).lastIndexOf("."); + return (grandParentPackage > 0 ? className.substring(grandParentPackage + 1) : className.substring(parentPackage)); + } + else { + return className; + } + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java new file mode 100644 index 0000000000000..e8bb9368d69ad --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java @@ -0,0 +1,237 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.util; + +import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static java.util.stream.Collectors.toList; + +public abstract class StringUtils { + + public static final String EMPTY = ""; + public static final String NEW_LINE = "\n"; + public static final String SQL_WILDCARD = "%"; + + //CamelCase to camel_case + public static String camelCaseToUnderscore(String string) { + if (!Strings.hasText(string)) { + return EMPTY; + } + StringBuilder sb = new StringBuilder(); + String s = string.trim(); + + boolean previousCharWasUp = false; + for (int i = 0; i < s.length(); i++) { + char ch = s.charAt(i); + if (Character.isAlphabetic(ch)) { + if (Character.isUpperCase(ch)) { + if (i > 0 && !previousCharWasUp) { + sb.append("_"); + } + previousCharWasUp = true; + } + else { + previousCharWasUp = (ch == '_'); + } + } + else { + previousCharWasUp = true; + } + sb.append(ch); + } + return sb.toString().toUpperCase(Locale.ROOT); + } + + public static String nullAsEmpty(String string) { + return string == null ? EMPTY : string; + } + + // % -> .* + // _ -> . + // escape character - can be 0 (in which case every regex gets escaped) or + // should be followed by % or _ (otherwise an exception is thrown) + public static String likeToJavaPattern(String pattern, char escape) { + StringBuilder regex = new StringBuilder(pattern.length() + 4); + + boolean escaped = false; + regex.append('^'); + for (int i = 0; i < pattern.length(); i++) { + char curr = pattern.charAt(i); + if (!escaped && (curr == escape) && escape != 0) { + escaped = true; + if (i + 1 == pattern.length()) { + throw new SqlIllegalArgumentException( + "Invalid sequence - escape character is not followed by special wildcard char"); + } + } + else { + switch (curr) { + case '%': + regex.append(escaped ? SQL_WILDCARD : ".*"); + break; + case '_': + regex.append(escaped ? "_" : "."); + break; + default: + if (escaped) { + throw new SqlIllegalArgumentException( + "Invalid sequence - escape character is not followed by special wildcard char"); + } + // escape special regex characters + switch (curr) { + case '\\': + case '^': + case '$': + case '.': + case '*': + case '?': + case '+': + case '|': + case '(': + case ')': + case '[': + case ']': + case '{': + case '}': + regex.append('\\'); + } + regex.append(curr); + } + escaped = false; + } + } + regex.append('$'); + + return regex.toString(); + } + + /** + * Translates a like pattern to a Lucene wildcard. + * This methods pays attention to the custom escape char which gets converted into \ (used by Lucene). + *

+     * % -> *
+     * _ -> ?
+     * escape character - can be 0 (in which case every regex gets escaped) or should be followed by
+     * % or _ (otherwise an exception is thrown)
+     * 
+ */ + public static String likeToLuceneWildcard(String pattern, char escape) { + StringBuilder wildcard = new StringBuilder(pattern.length() + 4); + + boolean escaped = false; + for (int i = 0; i < pattern.length(); i++) { + char curr = pattern.charAt(i); + + if (!escaped && (curr == escape) && escape != 0) { + if (i + 1 == pattern.length()) { + throw new SqlIllegalArgumentException("Invalid sequence - escape character is not followed by special wildcard char"); + } + escaped = true; + } else { + switch (curr) { + case '%': + wildcard.append(escaped ? SQL_WILDCARD : "*"); + break; + case '_': + wildcard.append(escaped ? "_" : "?"); + break; + default: + if (escaped) { + throw new SqlIllegalArgumentException( + "Invalid sequence - escape character is not followed by special wildcard char"); + } + // escape special regex characters + switch (curr) { + case '\\': + case '*': + case '?': + wildcard.append('\\'); + } + wildcard.append(curr); + } + escaped = false; + } + } + return wildcard.toString(); + } + + /** + * Translates a like pattern to pattern for ES index name expression resolver. + * + * Note the resolver only supports * (not ?) and has no notion of escaping. This is not really an issue since we don't allow * + * anyway in the pattern. + */ + public static String likeToIndexWildcard(String pattern, char escape) { + StringBuilder wildcard = new StringBuilder(pattern.length() + 4); + + boolean escaped = false; + for (int i = 0; i < pattern.length(); i++) { + char curr = pattern.charAt(i); + + if (!escaped && (curr == escape) && escape != 0) { + if (i + 1 == pattern.length()) { + throw new SqlIllegalArgumentException("Invalid sequence - escape character is not followed by special wildcard char"); + } + escaped = true; + } else { + switch (curr) { + case '%': + wildcard.append(escaped ? SQL_WILDCARD : "*"); + break; + case '_': + wildcard.append(escaped ? "_" : "*"); + break; + default: + if (escaped) { + throw new SqlIllegalArgumentException( + "Invalid sequence - escape character is not followed by special wildcard char"); + } + // the resolver doesn't support escaping... + wildcard.append(curr); + } + escaped = false; + } + } + return wildcard.toString(); + } + + public static String toString(SearchSourceBuilder source) { + try (XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint().humanReadable(true)) { + source.toXContent(builder, ToXContent.EMPTY_PARAMS); + return Strings.toString(builder); + } catch (IOException e) { + throw new RuntimeException("error rendering", e); + } + } + + public static List findSimilar(String match, Iterable potentialMatches) { + LevensteinDistance ld = new LevensteinDistance(); + List> scoredMatches = new ArrayList<>(); + for (String potentialMatch : potentialMatches) { + float distance = ld.getDistance(match, potentialMatch); + if (distance >= 0.5f) { + scoredMatches.add(new Tuple<>(distance, potentialMatch)); + } + } + CollectionUtil.timSort(scoredMatches, (a,b) -> b.v1().compareTo(a.v1())); + return scoredMatches.stream() + .map(a -> a.v2()) + .collect(toList()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/sql/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/AbstractSqlIntegTestCase.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/AbstractSqlIntegTestCase.java new file mode 100644 index 0000000000000..0e7d2888f9e16 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/AbstractSqlIntegTestCase.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.action; + +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.reindex.ReindexPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.discovery.TestZenDiscovery; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; + +import java.util.Arrays; +import java.util.Collection; + +public abstract class AbstractSqlIntegTestCase extends ESIntegTestCase { + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); + settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); + settings.put("xpack.ml.autodetect_process", false); + return settings.build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(XPackPlugin.class, CommonAnalysisPlugin.class, ReindexPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + @Override + protected Settings transportClientSettings() { + // Plugin should be loaded on the transport client as well + return nodeSettings(0); + } + + @Override + protected Collection> getMockPlugins() { + return Arrays.asList(TestZenDiscovery.TestPlugin.class, TestSeedPlugin.class); + } +} + diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java new file mode 100644 index 0000000000000..b36fa811d3b25 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.action; + +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest.Mode; +import org.elasticsearch.xpack.sql.plugin.ColumnInfo; +import org.elasticsearch.xpack.sql.plugin.SqlQueryAction; +import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; + +import java.sql.JDBCType; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class SqlActionIT extends AbstractSqlIntegTestCase { + + public void testSqlAction() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").get()); + client().prepareBulk() + .add(new IndexRequest("test", "doc", "1").source("data", "bar", "count", 42)) + .add(new IndexRequest("test", "doc", "2").source("data", "baz", "count", 43)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureYellow("test"); + + boolean dataBeforeCount = randomBoolean(); + String columns = dataBeforeCount ? "data, count" : "count, data"; + SqlQueryResponse response = client().prepareExecute(SqlQueryAction.INSTANCE) + .query("SELECT " + columns + " FROM test ORDER BY count").mode(Mode.JDBC).get(); + assertThat(response.size(), equalTo(2L)); + assertThat(response.columns(), hasSize(2)); + int dataIndex = dataBeforeCount ? 0 : 1; + int countIndex = dataBeforeCount ? 1 : 0; + assertEquals(new ColumnInfo("", "data", "text", JDBCType.VARCHAR, 0), response.columns().get(dataIndex)); + assertEquals(new ColumnInfo("", "count", "long", JDBCType.BIGINT, 20), response.columns().get(countIndex)); + + assertThat(response.rows(), hasSize(2)); + assertEquals("bar", response.rows().get(0).get(dataIndex)); + assertEquals(42L, response.rows().get(0).get(countIndex)); + assertEquals("baz", response.rows().get(1).get(dataIndex)); + assertEquals(43L, response.rows().get(1).get(countIndex)); + } +} + diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlClearCursorActionIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlClearCursorActionIT.java new file mode 100644 index 0000000000000..7b747ffeb6727 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlClearCursorActionIT.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.action; + +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction; +import org.elasticsearch.xpack.sql.plugin.SqlClearCursorResponse; +import org.elasticsearch.xpack.sql.plugin.SqlQueryAction; +import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.session.Cursor; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; + +public class SqlClearCursorActionIT extends AbstractSqlIntegTestCase { + + public void testSqlClearCursorAction() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").get()); + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + int indexSize = randomIntBetween(100, 300); + logger.info("Indexing {} records", indexSize); + for (int i = 0; i < indexSize; i++) { + bulkRequestBuilder.add(new IndexRequest("test", "doc", "id" + i).source("data", "bar", "count", i)); + } + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + ensureYellow("test"); + + assertEquals(0, getNumberOfSearchContexts()); + + int fetchSize = randomIntBetween(5, 20); + logger.info("Fetching {} records at a time", fetchSize); + SqlQueryResponse sqlQueryResponse = client().prepareExecute(SqlQueryAction.INSTANCE) + .query("SELECT * FROM test").fetchSize(fetchSize).get(); + assertEquals(fetchSize, sqlQueryResponse.size()); + + assertThat(getNumberOfSearchContexts(), greaterThan(0L)); + assertThat(sqlQueryResponse.cursor(), notNullValue()); + assertThat(sqlQueryResponse.cursor(), not(equalTo(Cursor.EMPTY))); + + SqlClearCursorResponse cleanCursorResponse = client().prepareExecute(SqlClearCursorAction.INSTANCE) + .cursor(sqlQueryResponse.cursor()).get(); + assertTrue(cleanCursorResponse.isSucceeded()); + + assertEquals(0, getNumberOfSearchContexts()); + } + + public void testAutoCursorCleanup() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").get()); + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + int indexSize = randomIntBetween(100, 300); + logger.info("Indexing {} records", indexSize); + for (int i = 0; i < indexSize; i++) { + bulkRequestBuilder.add(new IndexRequest("test", "doc", "id" + i).source("data", "bar", "count", i)); + } + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + ensureYellow("test"); + + assertEquals(0, getNumberOfSearchContexts()); + + int fetchSize = randomIntBetween(5, 20); + logger.info("Fetching {} records at a time", fetchSize); + SqlQueryResponse sqlQueryResponse = client().prepareExecute(SqlQueryAction.INSTANCE) + .query("SELECT * FROM test").fetchSize(fetchSize).get(); + assertEquals(fetchSize, sqlQueryResponse.size()); + + assertThat(getNumberOfSearchContexts(), greaterThan(0L)); + assertThat(sqlQueryResponse.cursor(), notNullValue()); + assertThat(sqlQueryResponse.cursor(), not(equalTo(Cursor.EMPTY))); + + long fetched = sqlQueryResponse.size(); + do { + sqlQueryResponse = client().prepareExecute(SqlQueryAction.INSTANCE).cursor(sqlQueryResponse.cursor()).get(); + fetched += sqlQueryResponse.size(); + } while (sqlQueryResponse.cursor().equals("") == false); + assertEquals(indexSize, fetched); + + SqlClearCursorResponse cleanCursorResponse = client().prepareExecute(SqlClearCursorAction.INSTANCE) + .cursor(sqlQueryResponse.cursor()).get(); + assertFalse(cleanCursorResponse.isSucceeded()); + + assertEquals(0, getNumberOfSearchContexts()); + } + + private long getNumberOfSearchContexts() { + return client().admin().indices().prepareStats("test").clear().setSearch(true).get() + .getIndex("test").getTotal().getSearch().getOpenContexts(); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlDisabledIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlDisabledIT.java new file mode 100644 index 0000000000000..a9fc420a8353b --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlDisabledIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.action; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.sql.plugin.SqlQueryAction; + +import static org.hamcrest.CoreMatchers.either; +import static org.hamcrest.CoreMatchers.startsWith; + +public class SqlDisabledIT extends AbstractSqlIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(XPackSettings.SQL_ENABLED.getKey(), false) + .build(); + } + + @Override + protected Settings transportClientSettings() { + return Settings.builder() + .put(super.transportClientSettings()) + .put(XPackSettings.SQL_ENABLED.getKey(), randomBoolean()) + .build(); + } + + public void testSqlAction() throws Exception { + Throwable throwable = expectThrows(Throwable.class, + () -> client().prepareExecute(SqlQueryAction.INSTANCE).query("SHOW tables").get()); + assertThat(throwable.getMessage(), + either(startsWith("no proxy found for action")) // disabled on client + .or(startsWith("failed to find action")) // disabled on proxy client + .or(startsWith("No handler for action [indices:data/read/sql]"))); // disabled on server + } +} + diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java new file mode 100644 index 0000000000000..d4d9ab4c3e87e --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.action; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.license.AbstractLicensesIntegrationTestCase; +import org.elasticsearch.license.License; +import org.elasticsearch.license.License.OperationMode; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.transport.Netty4Plugin; +import org.elasticsearch.xpack.sql.plugin.SqlQueryAction; +import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.plugin.SqlTranslateAction; +import org.elasticsearch.xpack.sql.plugin.SqlTranslateResponse; +import org.hamcrest.Matchers; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Locale; + +import static org.elasticsearch.license.XPackLicenseStateTests.randomBasicStandardOrGold; +import static org.elasticsearch.license.XPackLicenseStateTests.randomTrialBasicStandardGoldOrPlatinumMode; +import static org.elasticsearch.license.XPackLicenseStateTests.randomTrialOrPlatinumMode; +import static org.hamcrest.Matchers.equalTo; + +public class SqlLicenseIT extends AbstractLicensesIntegrationTestCase { + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Before + public void resetLicensing() throws Exception { + enableJdbcLicensing(); + } + + @Override + protected Collection> nodePlugins() { + // Add Netty so we can test JDBC licensing because only exists on the REST layer. + List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(Netty4Plugin.class); + return plugins; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + // Enable http so we can test JDBC licensing because only exists on the REST layer. + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) + .build(); + } + + private static OperationMode randomValidSqlLicenseType() { + return randomTrialBasicStandardGoldOrPlatinumMode(); + } + + private static OperationMode randomInvalidSqlLicenseType() { + return OperationMode.MISSING; + } + + private static OperationMode randomValidJdbcLicenseType() { + return randomTrialOrPlatinumMode(); + } + + private static OperationMode randomInvalidJdbcLicenseType() { + return randomBasicStandardOrGold(); + } + + public void enableSqlLicensing() throws Exception { + updateLicensing(randomValidSqlLicenseType()); + } + + public void disableSqlLicensing() throws Exception { + updateLicensing(randomInvalidSqlLicenseType()); + } + + public void enableJdbcLicensing() throws Exception { + updateLicensing(randomValidJdbcLicenseType()); + } + + public void disableJdbcLicensing() throws Exception { + updateLicensing(randomInvalidJdbcLicenseType()); + } + + public void updateLicensing(OperationMode licenseOperationMode) throws Exception { + String licenseType = licenseOperationMode.name().toLowerCase(Locale.ROOT); + wipeAllLicenses(); + if (licenseType.equals("missing")) { + putLicenseTombstone(); + } else { + License license = org.elasticsearch.license.TestUtils.generateSignedLicense(licenseType, TimeValue.timeValueMinutes(1)); + putLicense(license); + } + } + + public void testSqlQueryActionLicense() throws Exception { + setupTestIndex(); + disableSqlLicensing(); + + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, + () -> client().prepareExecute(SqlQueryAction.INSTANCE).query("SELECT * FROM test").get()); + assertThat(e.getMessage(), equalTo("current license is non-compliant for [sql]")); + enableSqlLicensing(); + + SqlQueryResponse response = client().prepareExecute(SqlQueryAction.INSTANCE).query("SELECT * FROM test").get(); + assertThat(response.size(), Matchers.equalTo(2L)); + } + + + public void testSqlQueryActionJdbcModeLicense() throws Exception { + setupTestIndex(); + disableJdbcLicensing(); + + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, + () -> client().prepareExecute(SqlQueryAction.INSTANCE).query("SELECT * FROM test").mode("jdbc").get()); + assertThat(e.getMessage(), equalTo("current license is non-compliant for [jdbc]")); + enableJdbcLicensing(); + + SqlQueryResponse response = client().prepareExecute(SqlQueryAction.INSTANCE).query("SELECT * FROM test").mode("jdbc").get(); + assertThat(response.size(), Matchers.equalTo(2L)); + } + + public void testSqlTranslateActionLicense() throws Exception { + setupTestIndex(); + disableSqlLicensing(); + + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, + () -> client().prepareExecute(SqlTranslateAction.INSTANCE).query("SELECT * FROM test").get()); + assertThat(e.getMessage(), equalTo("current license is non-compliant for [sql]")); + enableSqlLicensing(); + + SqlTranslateResponse response = client().prepareExecute(SqlTranslateAction.INSTANCE).query("SELECT * FROM test").get(); + SearchSourceBuilder source = response.source(); + assertThat(source.docValueFields(), Matchers.contains("count")); + FetchSourceContext fetchSource = source.fetchSource(); + assertThat(fetchSource.includes(), Matchers.arrayContaining("data")); + } + + // TODO test SqlGetIndicesAction. Skipping for now because of lack of serialization support. + + private void setupTestIndex() { + ElasticsearchAssertions.assertAcked(client().admin().indices().prepareCreate("test").get()); + client().prepareBulk() + .add(new IndexRequest("test", "doc", "1").source("data", "bar", "count", 42)) + .add(new IndexRequest("test", "doc", "2").source("data", "baz", "count", 43)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + } + +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java new file mode 100644 index 0000000000000..5de9cfca97a36 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.action; + +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.xpack.sql.plugin.SqlTranslateAction; +import org.elasticsearch.xpack.sql.plugin.SqlTranslateResponse; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +public class SqlTranslateActionIT extends AbstractSqlIntegTestCase { + + public void testSqlTranslateAction() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").get()); + client().prepareBulk() + .add(new IndexRequest("test", "doc", "1").source("data", "bar", "count", 42)) + .add(new IndexRequest("test", "doc", "2").source("data", "baz", "count", 43)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + ensureYellow("test"); + + boolean columnOrder = randomBoolean(); + String columns = columnOrder ? "data, count" : "count, data"; + SqlTranslateResponse response = client().prepareExecute(SqlTranslateAction.INSTANCE) + .query("SELECT " + columns + " FROM test ORDER BY count").get(); + SearchSourceBuilder source = response.source(); + FetchSourceContext fetch = source.fetchSource(); + assertEquals(true, fetch.fetchSource()); + assertArrayEquals(new String[] { "data" }, fetch.includes()); + assertEquals(singletonList("count"), source.docValueFields()); + assertEquals(singletonList(SortBuilders.fieldSort("count")), source.sorts()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java new file mode 100644 index 0000000000000..9d05d151359fd --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java @@ -0,0 +1,197 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.analysis.index.MappingException; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.Project; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.type.TypesTests; + +import java.util.List; +import java.util.Map; +import java.util.TimeZone; + +import static org.elasticsearch.xpack.sql.type.DataType.BOOLEAN; +import static org.elasticsearch.xpack.sql.type.DataType.KEYWORD; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +public class FieldAttributeTests extends ESTestCase { + + private SqlParser parser; + private IndexResolution getIndexResult; + private FunctionRegistry functionRegistry; + private Analyzer analyzer; + + public FieldAttributeTests() { + parser = new SqlParser(); + functionRegistry = new FunctionRegistry(); + + Map mapping = TypesTests.loadMapping("mapping-multi-field-variation.json"); + + EsIndex test = new EsIndex("test", mapping); + getIndexResult = IndexResolution.valid(test); + analyzer = new Analyzer(functionRegistry, getIndexResult, TimeZone.getTimeZone("UTC")); + } + + private LogicalPlan plan(String sql) { + return analyzer.analyze(parser.createStatement(sql), true); + } + + private FieldAttribute attribute(String fieldName) { + // test multiple version of the attribute name + // to make sure all match the same thing + + // NB: the equality is done on the same since each plan bumps the expression counter + + // unqualified + FieldAttribute unqualified = parseQueryFor(fieldName); + // unquoted qualifier + FieldAttribute unquotedQualifier = parseQueryFor("test." + fieldName); + assertEquals(unqualified.name(), unquotedQualifier.name()); + assertEquals(unqualified.qualifiedName(), unquotedQualifier.qualifiedName()); + // quoted qualifier + FieldAttribute quotedQualifier = parseQueryFor("\"test\"." + fieldName); + assertEquals(unqualified.name(), quotedQualifier.name()); + assertEquals(unqualified.qualifiedName(), quotedQualifier.qualifiedName()); + + return randomFrom(unqualified, unquotedQualifier, quotedQualifier); + } + + private FieldAttribute parseQueryFor(String fieldName) { + LogicalPlan plan = plan("SELECT " + fieldName + " FROM test"); + assertThat(plan, instanceOf(Project.class)); + Project p = (Project) plan; + List projections = p.projections(); + assertThat(projections, hasSize(1)); + Attribute attribute = projections.get(0).toAttribute(); + assertThat(attribute, instanceOf(FieldAttribute.class)); + return (FieldAttribute) attribute; + } + + private String error(String fieldName) { + VerificationException ve = expectThrows(VerificationException.class, () -> plan("SELECT " + fieldName + " FROM test")); + return ve.getMessage(); + } + + public void testRootField() { + FieldAttribute attr = attribute("bool"); + assertThat(attr.name(), is("bool")); + assertThat(attr.dataType(), is(BOOLEAN)); + } + + public void testDottedField() { + FieldAttribute attr = attribute("some.dotted.field"); + assertThat(attr.path(), is("some.dotted")); + assertThat(attr.name(), is("some.dotted.field")); + assertThat(attr.dataType(), is(KEYWORD)); + } + + public void testExactKeyword() { + FieldAttribute attr = attribute("some.string"); + assertThat(attr.path(), is("some")); + assertThat(attr.name(), is("some.string")); + assertThat(attr.dataType(), is(DataType.TEXT)); + assertThat(attr.isInexact(), is(true)); + FieldAttribute exact = attr.exactAttribute(); + assertThat(exact.isInexact(), is(false)); + assertThat(exact.name(), is("some.string.typical")); + assertThat(exact.dataType(), is(KEYWORD)); + } + + public void testAmbiguousExactKeyword() { + FieldAttribute attr = attribute("some.ambiguous"); + assertThat(attr.path(), is("some")); + assertThat(attr.name(), is("some.ambiguous")); + assertThat(attr.dataType(), is(DataType.TEXT)); + assertThat(attr.isInexact(), is(true)); + MappingException me = expectThrows(MappingException.class, () -> attr.exactAttribute()); + assertThat(me.getMessage(), + is("Multiple exact keyword candidates available for [ambiguous]; specify which one to use")); + } + + public void testNormalizedKeyword() { + FieldAttribute attr = attribute("some.string.normalized"); + assertThat(attr.path(), is("some.string")); + assertThat(attr.name(), is("some.string.normalized")); + assertThat(attr.dataType(), is(KEYWORD)); + assertThat(attr.isInexact(), is(true)); + } + + public void testDottedFieldPath() { + assertThat(error("some"), is("Found 1 problem(s)\nline 1:8: Cannot use field [some] type [object] only its subfields")); + } + + public void testDottedFieldPathDeeper() { + assertThat(error("some.dotted"), + is("Found 1 problem(s)\nline 1:8: Cannot use field [some.dotted] type [object] only its subfields")); + } + + public void testDottedFieldPathTypo() { + assertThat(error("some.dotted.fild"), + is("Found 1 problem(s)\nline 1:8: Unknown column [some.dotted.fild], did you mean [some.dotted.field]?")); + } + + public void testStarExpansionExcludesObjectAndUnsupportedTypes() { + LogicalPlan plan = plan("SELECT * FROM test"); + List list = ((Project) plan).projections(); + assertThat(list, hasSize(8)); + List names = Expressions.names(list); + assertThat(names, not(hasItem("some"))); + assertThat(names, not(hasItem("some.dotted"))); + assertThat(names, not(hasItem("unsupported"))); + assertThat(names, hasItems("bool", "text", "keyword", "int")); + } + + public void testFieldAmbiguity() { + Map mapping = TypesTests.loadMapping("mapping-dotted-field.json"); + + EsIndex index = new EsIndex("test", mapping); + getIndexResult = IndexResolution.valid(index); + analyzer = new Analyzer(functionRegistry, getIndexResult, TimeZone.getTimeZone("UTC")); + + VerificationException ex = expectThrows(VerificationException.class, () -> plan("SELECT test.bar FROM test")); + assertEquals( + "Found 1 problem(s)\nline 1:8: Reference [test.bar] is ambiguous (to disambiguate use quotes or qualifiers); " + + "matches any of [\"test\".\"bar\", \"test\".\"test.bar\"]", + ex.getMessage()); + + ex = expectThrows(VerificationException.class, () -> plan("SELECT test.test FROM test")); + assertEquals( + "Found 1 problem(s)\nline 1:8: Reference [test.test] is ambiguous (to disambiguate use quotes or qualifiers); " + + "matches any of [\"test\".\"test\", \"test\".\"test.test\"]", + ex.getMessage()); + + LogicalPlan plan = plan("SELECT test.test FROM test AS x"); + assertThat(plan, instanceOf(Project.class)); + + plan = plan("SELECT \"test\".test.test FROM test"); + assertThat(plan, instanceOf(Project.class)); + + Project p = (Project) plan; + List projections = p.projections(); + assertThat(projections, hasSize(1)); + Attribute attribute = projections.get(0).toAttribute(); + assertThat(attribute, instanceOf(FieldAttribute.class)); + assertThat(attribute.qualifier(), is("test")); + assertThat(attribute.name(), is("test.test")); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzerTests.java new file mode 100644 index 0000000000000..43aacd52083e2 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/PreAnalyzerTests.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.analysis.analyzer.PreAnalyzer.PreAnalysis; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; + +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +public class PreAnalyzerTests extends ESTestCase { + + private SqlParser parser = new SqlParser(); + private PreAnalyzer preAnalyzer = new PreAnalyzer(); + + public void testBasicIndex() { + LogicalPlan plan = parser.createStatement("SELECT * FROM index"); + PreAnalysis result = preAnalyzer.preAnalyze(plan); + assertThat(plan.preAnalyzed(), is(true)); + assertThat(result.indices, hasSize(1)); + assertThat(result.indices.get(0).cluster(), nullValue()); + assertThat(result.indices.get(0).index(), is("index")); + } + + public void testBasicIndexWithCatalog() { + LogicalPlan plan = parser.createStatement("SELECT * FROM elastic:index"); + PreAnalysis result = preAnalyzer.preAnalyze(plan); + assertThat(plan.preAnalyzed(), is(true)); + assertThat(result.indices, hasSize(1)); + assertThat(result.indices.get(0).cluster(), is("elastic")); + assertThat(result.indices.get(0).index(), is("index")); + } + + public void testWildIndexWithCatalog() { + LogicalPlan plan = parser.createStatement("SELECT * FROM elastic:index*"); + PreAnalysis result = preAnalyzer.preAnalyze(plan); + assertThat(plan.preAnalyzed(), is(true)); + assertThat(result.indices, hasSize(1)); + assertThat(result.indices.get(0).cluster(), is("elastic")); + assertThat(result.indices.get(0).index(), is("index*")); + } + + public void testQuotedIndex() { + LogicalPlan plan = parser.createStatement("SELECT * FROM \"aaa\""); + PreAnalysis result = preAnalyzer.preAnalyze(plan); + assertThat(plan.preAnalyzed(), is(true)); + assertThat(result.indices, hasSize(1)); + assertThat(result.indices.get(0).cluster(), nullValue()); + assertThat(result.indices.get(0).index(), is("aaa")); + } + + public void testQuotedCatalog() { + LogicalPlan plan = parser.createStatement("SELECT * FROM \"elastic\":\"aaa\""); + PreAnalysis result = preAnalyzer.preAnalyze(plan); + assertThat(plan.preAnalyzed(), is(true)); + assertThat(result.indices, hasSize(1)); + assertThat(result.indices.get(0).cluster(), is("elastic")); + assertThat(result.indices.get(0).index(), is("aaa")); + } + + public void testComplicatedQuery() { + LogicalPlan plan = parser.createStatement("SELECT MAX(a) FROM aaa WHERE d > 10 GROUP BY b HAVING AVG(c) ORDER BY e ASC"); + PreAnalysis result = preAnalyzer.preAnalyze(plan); + assertThat(plan.preAnalyzed(), is(true)); + assertThat(result.indices, hasSize(1)); + assertThat(result.indices.get(0).cluster(), nullValue()); + assertThat(result.indices.get(0).index(), is("aaa")); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java new file mode 100644 index 0000000000000..355c4d2f7b763 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.analysis.analyzer; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.analysis.AnalysisException; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.type.TypesTests; + +import java.util.Map; +import java.util.TimeZone; + +public class VerifierErrorMessagesTests extends ESTestCase { + private SqlParser parser = new SqlParser(); + + private String verify(String sql) { + Map mapping = TypesTests.loadMapping("mapping-multi-field-with-nested.json"); + EsIndex test = new EsIndex("test", mapping); + return verify(IndexResolution.valid(test), sql); + } + + private String verify(IndexResolution getIndexResult, String sql) { + Analyzer analyzer = new Analyzer(new FunctionRegistry(), getIndexResult, TimeZone.getTimeZone("UTC")); + AnalysisException e = expectThrows(AnalysisException.class, () -> analyzer.analyze(parser.createStatement(sql), true)); + assertTrue(e.getMessage().startsWith("Found ")); + String header = "Found 1 problem(s)\nline "; + return e.getMessage().substring(header.length()); + } + + public void testMissingIndex() { + assertEquals("1:17: Unknown index [missing]", verify(IndexResolution.notFound("missing"), "SELECT foo FROM missing")); + } + + public void testMissingColumn() { + assertEquals("1:8: Unknown column [xxx]", verify("SELECT xxx FROM test")); + } + + public void testMisspelledColumn() { + assertEquals("1:8: Unknown column [txt], did you mean [text]?", verify("SELECT txt FROM test")); + } + + public void testFunctionOverMissingField() { + assertEquals("1:12: Unknown column [xxx]", verify("SELECT ABS(xxx) FROM test")); + } + + public void testMissingFunction() { + assertEquals("1:8: Unknown function [ZAZ]", verify("SELECT ZAZ(bool) FROM test")); + } + + public void testMisspelledFunction() { + assertEquals("1:8: Unknown function [COONT], did you mean any of [COUNT, COT]?", verify("SELECT COONT(bool) FROM test")); + } + + public void testMissingColumnInGroupBy() { + assertEquals("1:41: Unknown column [xxx]", verify("SELECT * FROM test GROUP BY DAY_OF_YEAR(xxx)")); + } + + public void testFilterOnUnknownColumn() { + assertEquals("1:26: Unknown column [xxx]", verify("SELECT * FROM test WHERE xxx = 1")); + } + + public void testMissingColumnInOrderBy() { + // xxx offset is that of the order by field + assertEquals("1:29: Unknown column [xxx]", verify("SELECT * FROM test ORDER BY xxx")); + } + + public void testMissingColumnFunctionInOrderBy() { + // xxx offset is that of the order by field + assertEquals("1:41: Unknown column [xxx]", verify("SELECT * FROM test ORDER BY DAY_oF_YEAR(xxx)")); + } + + public void testMissingExtract() { + assertEquals("1:8: Unknown datetime field [ZAZ]", verify("SELECT EXTRACT(ZAZ FROM date) FROM test")); + } + + public void testMissingExtractSimilar() { + assertEquals("1:8: Unknown datetime field [DAP], did you mean [DAY]?", verify("SELECT EXTRACT(DAP FROM date) FROM test")); + } + + public void testMissingExtractSimilarMany() { + assertEquals("1:8: Unknown datetime field [DOP], did you mean any of [DOM, DOW, DOY]?", + verify("SELECT EXTRACT(DOP FROM date) FROM test")); + } + + public void testExtractNonDateTime() { + assertEquals("1:8: Invalid datetime field [ABS]. Use any datetime function.", verify("SELECT EXTRACT(ABS FROM date) FROM test")); + } + + public void testMultipleColumns() { + // xxx offset is that of the order by field + assertEquals("1:43: Unknown column [xxx]\nline 1:8: Unknown column [xxx]", + verify("SELECT xxx FROM test GROUP BY DAY_oF_YEAR(xxx)")); + } + + // GROUP BY + public void testGroupBySelectNonGrouped() { + assertEquals("1:8: Cannot use non-grouped column [text], expected [int]", + verify("SELECT text, int FROM test GROUP BY int")); + } + + public void testGroupByOrderByNonGrouped() { + assertEquals("1:50: Cannot order by non-grouped column [bool], expected [text]", + verify("SELECT MAX(int) FROM test GROUP BY text ORDER BY bool")); + } + + public void testGroupByOrderByScalarOverNonGrouped() { + assertEquals("1:50: Cannot order by non-grouped column [date], expected [text]", + verify("SELECT MAX(int) FROM test GROUP BY text ORDER BY YEAR(date)")); + } + + public void testGroupByHavingNonGrouped() { + assertEquals("1:48: Cannot filter by non-grouped column [int], expected [text]", + verify("SELECT AVG(int) FROM test GROUP BY text HAVING int > 10")); + } + + public void testGroupByAggregate() { + assertEquals("1:36: Cannot use an aggregate [AVG] for grouping", + verify("SELECT AVG(int) FROM test GROUP BY AVG(int)")); + } + + public void testGroupByOnNested() { + assertEquals("1:38: Grouping isn't (yet) compatible with nested fields [dep.dep_id]", + verify("SELECT dep.dep_id FROM test GROUP BY dep.dep_id")); + } + + public void testHavingOnNested() { + assertEquals("1:51: HAVING isn't (yet) compatible with nested fields [dep.start_date]", + verify("SELECT int FROM test GROUP BY int HAVING AVG(YEAR(dep.start_date)) > 1980")); + } + + public void testGroupByScalarFunctionWithAggOnTarget() { + assertEquals("1:31: Cannot use an aggregate [AVG] for grouping", + verify("SELECT int FROM test GROUP BY AVG(int) + 2")); + } + + public void testUnsupportedType() { + assertEquals("1:8: Cannot use field [unsupported] type [ip_range] as is unsupported", + verify("SELECT unsupported FROM test")); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java new file mode 100644 index 0000000000000..dba3e3ddfda87 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggregationCursorTests.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.execution.search.extractor.BucketExtractor; +import org.elasticsearch.xpack.sql.execution.search.extractor.CompositeKeyExtractorTests; +import org.elasticsearch.xpack.sql.execution.search.extractor.ConstantExtractorTests; +import org.elasticsearch.xpack.sql.execution.search.extractor.MetricAggExtractorTests; +import org.elasticsearch.xpack.sql.session.Cursors; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +public class CompositeAggregationCursorTests extends AbstractWireSerializingTestCase { + public static CompositeAggregationCursor randomCompositeCursor() { + int extractorsSize = between(1, 20); + List extractors = new ArrayList<>(extractorsSize); + for (int i = 0; i < extractorsSize; i++) { + extractors.add(randomBucketExtractor()); + } + return new CompositeAggregationCursor(new byte[randomInt(256)], extractors, randomIntBetween(10, 1024), randomAlphaOfLength(5)); + } + + static BucketExtractor randomBucketExtractor() { + List> options = new ArrayList<>(); + options.add(ConstantExtractorTests::randomConstantExtractor); + options.add(MetricAggExtractorTests::randomMetricAggExtractor); + options.add(CompositeKeyExtractorTests::randomCompositeKeyExtractor); + return randomFrom(options).get(); + } + + @Override + protected CompositeAggregationCursor mutateInstance(CompositeAggregationCursor instance) throws IOException { + return new CompositeAggregationCursor(instance.next(), instance.extractors(), + randomValueOtherThan(instance.limit(), () -> randomIntBetween(1, 512)), instance.indices()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Cursors.getNamedWriteables()); + } + + @Override + protected CompositeAggregationCursor createTestInstance() { + return randomCompositeCursor(); + } + + @Override + protected Reader instanceReader() { + return CompositeAggregationCursor::new; + } + + @Override + protected CompositeAggregationCursor copyInstance(CompositeAggregationCursor instance, Version version) throws IOException { + /* Randomly choose between internal protocol round trip and String based + * round trips used to toXContent. */ + if (randomBoolean()) { + return super.copyInstance(instance, version); + } + return (CompositeAggregationCursor) Cursors.decodeFromString(Cursors.encodeToString(version, instance)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java new file mode 100644 index 0000000000000..0cd8c33b11688 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlException; +import org.elasticsearch.xpack.sql.plugin.CliFormatter; +import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; +import org.elasticsearch.xpack.sql.plugin.ColumnInfo; +import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.session.Configuration; +import org.elasticsearch.xpack.sql.session.Cursor; +import org.elasticsearch.xpack.sql.session.Cursors; +import org.mockito.ArgumentCaptor; + +import java.sql.JDBCType; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.action.support.PlainActionFuture.newFuture; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; + +public class CursorTests extends ESTestCase { + + public void testEmptyCursorClearCursor() { + Client clientMock = mock(Client.class); + Cursor cursor = Cursor.EMPTY; + PlainActionFuture future = newFuture(); + cursor.clear(Configuration.DEFAULT, clientMock, future); + assertFalse(future.actionGet()); + verifyZeroInteractions(clientMock); + } + + @SuppressWarnings("unchecked") + public void testScrollCursorClearCursor() { + Client clientMock = mock(Client.class); + ActionListener listenerMock = mock(ActionListener.class); + String cursorString = randomAlphaOfLength(10); + Cursor cursor = new ScrollCursor(cursorString, Collections.emptyList(), randomInt()); + + cursor.clear(Configuration.DEFAULT, clientMock, listenerMock); + + ArgumentCaptor request = ArgumentCaptor.forClass(ClearScrollRequest.class); + verify(clientMock).clearScroll(request.capture(), any(ActionListener.class)); + assertEquals(Collections.singletonList(cursorString), request.getValue().getScrollIds()); + verifyZeroInteractions(listenerMock); + } + + private static SqlQueryResponse createRandomSqlResponse() { + int columnCount = between(1, 10); + + List columns = null; + if (randomBoolean()) { + columns = new ArrayList<>(columnCount); + for (int i = 0; i < columnCount; i++) { + columns.add(new ColumnInfo(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10), + randomFrom(JDBCType.values()), randomInt(25))); + } + } + return new SqlQueryResponse("", columns, Collections.emptyList()); + } + + @SuppressWarnings("unchecked") + static Cursor randomNonEmptyCursor() { + Supplier cursorSupplier = randomFrom( + () -> ScrollCursorTests.randomScrollCursor(), + () -> { + SqlQueryResponse response = createRandomSqlResponse(); + if (response.columns() != null && response.rows() != null) { + return CliFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), new CliFormatter(response)); + } else { + return ScrollCursorTests.randomScrollCursor(); + } + + } + ); + return cursorSupplier.get(); + } + + public void testVersionHandling() { + Cursor cursor = randomNonEmptyCursor(); + assertEquals(cursor, Cursors.decodeFromString(Cursors.encodeToString(Version.CURRENT, cursor))); + + Version nextMinorVersion = Version.fromId(Version.CURRENT.id + 10000); + + String encodedWithWrongVersion = Cursors.encodeToString(nextMinorVersion, cursor); + SqlException exception = expectThrows(SqlException.class, () -> Cursors.decodeFromString(encodedWithWrongVersion)); + + assertEquals("Unsupported cursor version " + nextMinorVersion, exception.getMessage()); + } + + +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursorTests.java new file mode 100644 index 0000000000000..9a3a2fe2eb9d5 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/ScrollCursorTests.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.execution.search.extractor.ComputingExtractorTests; +import org.elasticsearch.xpack.sql.execution.search.extractor.ConstantExtractorTests; +import org.elasticsearch.xpack.sql.execution.search.extractor.FieldHitExtractorTests; +import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractor; +import org.elasticsearch.xpack.sql.session.Cursors; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +public class ScrollCursorTests extends AbstractWireSerializingTestCase { + public static ScrollCursor randomScrollCursor() { + int extractorsSize = between(1, 20); + List extractors = new ArrayList<>(extractorsSize); + for (int i = 0; i < extractorsSize; i++) { + extractors.add(randomHitExtractor(0)); + } + return new ScrollCursor(randomAlphaOfLength(5), extractors, randomIntBetween(10, 1024)); + } + + static HitExtractor randomHitExtractor(int depth) { + List> options = new ArrayList<>(); + if (depth < 5) { + options.add(() -> ComputingExtractorTests.randomComputingExtractor()); + } + options.add(ConstantExtractorTests::randomConstantExtractor); + options.add(FieldHitExtractorTests::randomFieldHitExtractor); + return randomFrom(options).get(); + } + + @Override + protected ScrollCursor mutateInstance(ScrollCursor instance) throws IOException { + return new ScrollCursor(instance.scrollId(), instance.extractors(), + randomValueOtherThan(instance.limit(), () -> randomIntBetween(1, 1024))); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Cursors.getNamedWriteables()); + } + + @Override + protected ScrollCursor createTestInstance() { + return randomScrollCursor(); + } + + @Override + protected Reader instanceReader() { + return ScrollCursor::new; + } + + @Override + protected ScrollCursor copyInstance(ScrollCursor instance, Version version) throws IOException { + /* Randomly choose between internal protocol round trip and String based + * round trips used to toXContent. */ + if (randomBoolean()) { + return super.copyInstance(instance, version); + } + return (ScrollCursor) Cursors.decodeFromString(Cursors.encodeToString(version, instance)); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java new file mode 100644 index 0000000000000..f038a20823dbc --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.ConstantScoreQueryBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.Operator; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.function.Score; +import org.elasticsearch.xpack.sql.querydsl.agg.AvgAgg; +import org.elasticsearch.xpack.sql.querydsl.agg.GroupByColumnKey; +import org.elasticsearch.xpack.sql.querydsl.container.AttributeSort; +import org.elasticsearch.xpack.sql.querydsl.container.QueryContainer; +import org.elasticsearch.xpack.sql.querydsl.container.ScoreSort; +import org.elasticsearch.xpack.sql.querydsl.container.Sort.Direction; +import org.elasticsearch.xpack.sql.querydsl.query.MatchQuery; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.type.KeywordEsField; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.search.sort.SortBuilders.fieldSort; +import static org.elasticsearch.search.sort.SortBuilders.scoreSort; + +public class SourceGeneratorTests extends ESTestCase { + + public void testNoQueryNoFilter() { + QueryContainer container = new QueryContainer(); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); + assertNull(sourceBuilder.query()); + } + + public void testQueryNoFilter() { + QueryContainer container = new QueryContainer().with(new MatchQuery(Location.EMPTY, "foo", "bar")); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); + assertEquals(new MatchQueryBuilder("foo", "bar").operator(Operator.OR), sourceBuilder.query()); + } + + public void testNoQueryFilter() { + QueryContainer container = new QueryContainer(); + QueryBuilder filter = new MatchQueryBuilder("bar", "baz"); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, filter, randomIntBetween(1, 10)); + assertEquals(new ConstantScoreQueryBuilder(new MatchQueryBuilder("bar", "baz")), sourceBuilder.query()); + } + + public void testQueryFilter() { + QueryContainer container = new QueryContainer().with(new MatchQuery(Location.EMPTY, "foo", "bar")); + QueryBuilder filter = new MatchQueryBuilder("bar", "baz"); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, filter, randomIntBetween(1, 10)); + assertEquals(new BoolQueryBuilder().must(new MatchQueryBuilder("foo", "bar").operator(Operator.OR)) + .filter(new MatchQueryBuilder("bar", "baz")), sourceBuilder.query()); + } + + public void testLimit() { + QueryContainer container = new QueryContainer().withLimit(10).addGroups(singletonList(new GroupByColumnKey("1", "field"))); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); + Builder aggBuilder = sourceBuilder.aggregations(); + assertEquals(1, aggBuilder.count()); + CompositeAggregationBuilder composite = (CompositeAggregationBuilder) aggBuilder.getAggregatorFactories().get(0); + // TODO: cannot access size + //assertEquals(10, composite.size()); + } + + public void testSortNoneSpecified() { + QueryContainer container = new QueryContainer(); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); + assertEquals(singletonList(fieldSort("_doc")), sourceBuilder.sorts()); + } + + public void testSelectScoreForcesTrackingScore() { + QueryContainer container = new QueryContainer() + .addColumn(new Score(new Location(1, 1)).toAttribute()); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); + assertTrue(sourceBuilder.trackScores()); + } + + public void testSortScoreSpecified() { + QueryContainer container = new QueryContainer() + .sort(new ScoreSort(Direction.DESC)); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); + assertEquals(singletonList(scoreSort()), sourceBuilder.sorts()); + } + + public void testSortFieldSpecified() { + QueryContainer container = new QueryContainer() + .sort(new AttributeSort(new FieldAttribute(new Location(1, 1), "test", new KeywordEsField("test")), Direction.ASC)); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); + assertEquals(singletonList(fieldSort("test").order(SortOrder.ASC)), sourceBuilder.sorts()); + + container = new QueryContainer() + .sort(new AttributeSort(new FieldAttribute(new Location(1, 1), "test", new KeywordEsField("test")), Direction.DESC)); + sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); + assertEquals(singletonList(fieldSort("test").order(SortOrder.DESC)), sourceBuilder.sorts()); + } + + public void testNoSort() { + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(new QueryContainer(), null, randomIntBetween(1, 10)); + assertEquals(singletonList(fieldSort("_doc").order(SortOrder.ASC)), sourceBuilder.sorts()); + } + + public void testNoSortIfAgg() { + QueryContainer container = new QueryContainer() + .addGroups(singletonList(new GroupByColumnKey("group_id", "group_column"))) + .addAgg("group_id", new AvgAgg("agg_id", "avg_column")); + SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); + assertNull(sourceBuilder.sorts()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilderTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilderTests.java new file mode 100644 index 0000000000000..0d57ad97c9831 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilderTests.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search; + +import org.elasticsearch.script.Script; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; + +public class SqlSourceBuilderTests extends ESTestCase { + public void testSqlSourceBuilder() { + final SqlSourceBuilder ssb = new SqlSourceBuilder(); + final SearchSourceBuilder source = new SearchSourceBuilder(); + ssb.trackScores(); + ssb.addSourceField("foo"); + ssb.addSourceField("foo2"); + ssb.addDocField("bar"); + ssb.addDocField("bar2"); + final Script s = new Script("eggplant"); + ssb.addScriptField("baz", s); + final Script s2 = new Script("potato"); + ssb.addScriptField("baz2", s2); + ssb.build(source); + + assertTrue(source.trackScores()); + FetchSourceContext fsc = source.fetchSource(); + assertThat(Arrays.asList(fsc.includes()), contains("foo", "foo2")); + assertThat(source.docValueFields(), contains("bar", "bar2")); + Map scriptFields = source.scriptFields() + .stream() + .collect(Collectors.toMap(SearchSourceBuilder.ScriptField::fieldName, SearchSourceBuilder.ScriptField::script)); + assertThat(scriptFields.get("baz").getIdOrCode(), equalTo("eggplant")); + assertThat(scriptFields.get("baz2").getIdOrCode(), equalTo("potato")); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java new file mode 100644 index 0000000000000..11068372bcc8a --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.querydsl.container.GroupByRef.Property; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.TimeZone; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; + +public class CompositeKeyExtractorTests extends AbstractWireSerializingTestCase { + + public static CompositeKeyExtractor randomCompositeKeyExtractor() { + return new CompositeKeyExtractor(randomAlphaOfLength(16), randomFrom(asList(Property.values())), randomSafeTimeZone()); + } + + @Override + protected CompositeKeyExtractor createTestInstance() { + return randomCompositeKeyExtractor(); + } + + @Override + protected Reader instanceReader() { + return CompositeKeyExtractor::new; + } + + @Override + protected CompositeKeyExtractor mutateInstance(CompositeKeyExtractor instance) throws IOException { + return new CompositeKeyExtractor(instance.key() + "mutated", instance.property(), instance.timeZone()); + } + + public void testExtractBucketCount() { + Bucket bucket = new TestBucket(emptyMap(), randomLong(), new Aggregations(emptyList())); + CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.COUNT, + randomTimeZone()); + assertEquals(bucket.getDocCount(), extractor.extract(bucket)); + } + + public void testExtractKey() { + CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, null); + + Object value = new Object(); + Bucket bucket = new TestBucket(singletonMap(extractor.key(), value), randomLong(), new Aggregations(emptyList())); + assertEquals(value, extractor.extract(bucket)); + } + + public void testExtractDate() { + CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, randomSafeTimeZone()); + + long millis = System.currentTimeMillis(); + Bucket bucket = new TestBucket(singletonMap(extractor.key(), millis), randomLong(), new Aggregations(emptyList())); + assertEquals(new DateTime(millis, DateTimeZone.forTimeZone(extractor.timeZone())), extractor.extract(bucket)); + } + + public void testExtractIncorrectDateKey() { + CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, randomTimeZone()); + + Object value = new Object(); + Bucket bucket = new TestBucket(singletonMap(extractor.key(), value), randomLong(), new Aggregations(emptyList())); + SqlIllegalArgumentException exception = expectThrows(SqlIllegalArgumentException.class, () -> extractor.extract(bucket)); + assertEquals("Invalid date key returned: " + value, exception.getMessage()); + } + + /** + * We need to exclude SystemV/* time zones because they cannot be converted + * back to DateTimeZone which we currently still need to do internally, + * e.g. in bwc serialization and in the extract() method + */ + private static TimeZone randomSafeTimeZone() { + return randomValueOtherThanMany(tz -> tz.getID().startsWith("SystemV"), () -> randomTimeZone()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java new file mode 100644 index 0000000000000..74721eca22af1 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.CastProcessorTests; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessorTests; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathFunctionProcessorTests; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessorTests; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.HitExtractorProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.sql.util.CollectionUtils.combine; + +public class ComputingExtractorTests extends AbstractWireSerializingTestCase { + public static ComputingExtractor randomComputingExtractor() { + return new ComputingExtractor(randomProcessor(), randomAlphaOfLength(10)); + } + + public static Processor randomProcessor() { + List> options = new ArrayList<>(); + options.add(() -> ChainingProcessorTests.randomComposeProcessor()); + options.add(CastProcessorTests::randomCastProcessor); + options.add(DateTimeProcessorTests::randomDateTimeProcessor); + options.add(MathFunctionProcessorTests::randomMathFunctionProcessor); + return randomFrom(options).get(); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(combine(Processors.getNamedWriteables(), HitExtractors.getNamedWriteables())); + } + + @Override + protected ComputingExtractor createTestInstance() { + return randomComputingExtractor(); + } + + @Override + protected Reader instanceReader() { + return ComputingExtractor::new; + } + + @Override + protected ComputingExtractor mutateInstance(ComputingExtractor instance) throws IOException { + return new ComputingExtractor( + randomValueOtherThan(instance.processor(), () -> randomProcessor()), + randomValueOtherThan(instance.hitName(), () -> randomAlphaOfLength(10)) + ); + } + + public void testGet() { + String fieldName = randomAlphaOfLength(5); + ChainingProcessor extractor = new ChainingProcessor( + new HitExtractorProcessor(new FieldHitExtractor(fieldName, true)), + new MathProcessor(MathOperation.LOG)); + + int times = between(1, 1000); + for (int i = 0; i < times; i++) { + double value = randomDouble(); + double expected = Math.log(value); + SearchHit hit = new SearchHit(1); + DocumentField field = new DocumentField(fieldName, singletonList(value)); + hit.fields(singletonMap(fieldName, field)); + assertEquals(expected, extractor.process(hit)); + } + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ConstantExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ConstantExtractorTests.java new file mode 100644 index 0000000000000..7c1e5258b7655 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ConstantExtractorTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.function.Supplier; + +public class ConstantExtractorTests extends AbstractWireSerializingTestCase { + public static ConstantExtractor randomConstantExtractor() { + return new ConstantExtractor(randomValidConstant()); + } + + private static Object randomValidConstant() { + @SuppressWarnings("unchecked") + Supplier valueSupplier = randomFrom( + () -> randomInt(), + () -> randomDouble(), + () -> randomAlphaOfLengthBetween(1, 140)); + return valueSupplier.get(); + } + + @Override + protected ConstantExtractor createTestInstance() { + return randomConstantExtractor(); + } + + @Override + protected Reader instanceReader() { + return ConstantExtractor::new; + } + + @Override + protected ConstantExtractor mutateInstance(ConstantExtractor instance) throws IOException { + return new ConstantExtractor(instance.extract((SearchHit) null) + "mutated"); + } + + public void testGet() { + Object expected = randomValidConstant(); + int times = between(1, 1000); + for (int i = 0; i < times; i++) { + assertSame(expected, new ConstantExtractor(expected).extract((SearchHit) null)); + } + } + + public void testToString() { + assertEquals("^foo", new ConstantExtractor("foo").toString()); + assertEquals("^42", new ConstantExtractor("42").toString()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java new file mode 100644 index 0000000000000..de36969898c20 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -0,0 +1,249 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.is; + +public class FieldHitExtractorTests extends AbstractWireSerializingTestCase { + public static FieldHitExtractor randomFieldHitExtractor() { + String hitName = randomAlphaOfLength(5); + String name = randomAlphaOfLength(5) + "." + hitName; + return new FieldHitExtractor(name, randomBoolean(), hitName); + } + + @Override + protected FieldHitExtractor createTestInstance() { + return randomFieldHitExtractor(); + } + + @Override + protected Reader instanceReader() { + return FieldHitExtractor::new; + } + + @Override + protected FieldHitExtractor mutateInstance(FieldHitExtractor instance) throws IOException { + return new FieldHitExtractor(instance.fieldName() + "mutated", true, instance.hitName()); + } + + @AwaitsFix(bugUrl = "implement after we're sure of the InnerHitExtractor's implementation") + public void testGetNested() throws IOException { + fail("implement after we're sure of the InnerHitExtractor's implementation"); + } + + public void testGetDottedValueWithDocValues() { + String grandparent = randomAlphaOfLength(5); + String parent = randomAlphaOfLength(5); + String child = randomAlphaOfLength(5); + String fieldName = grandparent + "." + parent + "." + child; + + FieldHitExtractor extractor = new FieldHitExtractor(fieldName, true); + + int times = between(1, 1000); + for (int i = 0; i < times; i++) { + + List documentFieldValues = new ArrayList<>(); + if (randomBoolean()) { + documentFieldValues.add(randomValue()); + } + + SearchHit hit = new SearchHit(1); + DocumentField field = new DocumentField(fieldName, documentFieldValues); + hit.fields(singletonMap(fieldName, field)); + Object result = documentFieldValues.isEmpty() ? null : documentFieldValues.get(0); + assertEquals(result, extractor.extract(hit)); + } + } + + public void testGetDottedValueWithSource() throws Exception { + String grandparent = randomAlphaOfLength(5); + String parent = randomAlphaOfLength(5); + String child = randomAlphaOfLength(5); + String fieldName = grandparent + "." + parent + "." + child; + + FieldHitExtractor extractor = new FieldHitExtractor(fieldName, false); + + int times = between(1, 1000); + for (int i = 0; i < times; i++) { + /* We use values that are parsed from json as "equal" to make the + * test simpler. */ + Object value = randomValue(); + SearchHit hit = new SearchHit(1); + XContentBuilder source = JsonXContent.contentBuilder(); + boolean hasGrandparent = randomBoolean(); + boolean hasParent = randomBoolean(); + boolean hasChild = randomBoolean(); + boolean hasSource = hasGrandparent && hasParent && hasChild; + + source.startObject(); + if (hasGrandparent) { + source.startObject(grandparent); + if (hasParent) { + source.startObject(parent); + if (hasChild) { + source.field(child, value); + if (randomBoolean()) { + source.field(fieldName + randomAlphaOfLength(3), value + randomAlphaOfLength(3)); + } + } + source.endObject(); + } + source.endObject(); + } + source.endObject(); + BytesReference sourceRef = BytesReference.bytes(source); + hit.sourceRef(sourceRef); + Object extract = extractor.extract(hit); + assertEquals(hasSource ? value : null, extract); + } + } + + public void testGetDocValue() { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor extractor = new FieldHitExtractor(fieldName, true); + + int times = between(1, 1000); + for (int i = 0; i < times; i++) { + List documentFieldValues = new ArrayList<>(); + if (randomBoolean()) { + documentFieldValues.add(randomValue()); + } + SearchHit hit = new SearchHit(1); + DocumentField field = new DocumentField(fieldName, documentFieldValues); + hit.fields(singletonMap(fieldName, field)); + Object result = documentFieldValues.isEmpty() ? null : documentFieldValues.get(0); + assertEquals(result, extractor.extract(hit)); + } + } + + public void testGetSource() throws IOException { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor extractor = new FieldHitExtractor(fieldName, false); + + int times = between(1, 1000); + for (int i = 0; i < times; i++) { + /* We use values that are parsed from json as "equal" to make the + * test simpler. */ + Object value = randomValue(); + SearchHit hit = new SearchHit(1); + XContentBuilder source = JsonXContent.contentBuilder(); + source.startObject(); { + source.field(fieldName, value); + if (randomBoolean()) { + source.field(fieldName + "_random_junk", value + "_random_junk"); + } + } + source.endObject(); + BytesReference sourceRef = BytesReference.bytes(source); + hit.sourceRef(sourceRef); + assertEquals(value, extractor.extract(hit)); + } + } + + public void testToString() { + assertEquals("hit.field@hit", new FieldHitExtractor("hit.field", true, "hit").toString()); + } + + public void testMultiValuedDocValue() { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, true); + SearchHit hit = new SearchHit(1); + DocumentField field = new DocumentField(fieldName, asList("a", "b")); + hit.fields(singletonMap(fieldName, field)); + SqlException ex = expectThrows(SqlException.class, () -> fe.extract(hit)); + assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); + } + + public void testMultiValuedSourceValue() throws IOException { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, false); + SearchHit hit = new SearchHit(1); + XContentBuilder source = JsonXContent.contentBuilder(); + source.startObject(); { + source.field(fieldName, asList("a", "b")); + } + source.endObject(); + BytesReference sourceRef = BytesReference.bytes(source); + hit.sourceRef(sourceRef); + SqlException ex = expectThrows(SqlException.class, () -> fe.extract(hit)); + assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); + } + + public void testSingleValueArrayInSource() throws IOException { + String fieldName = randomAlphaOfLength(5); + FieldHitExtractor fe = new FieldHitExtractor(fieldName, false); + SearchHit hit = new SearchHit(1); + XContentBuilder source = JsonXContent.contentBuilder(); + Object value = randomValue(); + source.startObject(); { + source.field(fieldName, Collections.singletonList(value)); + } + source.endObject(); + BytesReference sourceRef = BytesReference.bytes(source); + hit.sourceRef(sourceRef); + assertEquals(value, fe.extract(hit)); + } + + public void testExtractSourcePath() { + FieldHitExtractor fe = new FieldHitExtractor("a.b.c", false); + Object value = randomValue(); + Map map = singletonMap("a", singletonMap("b", singletonMap("c", value))); + assertThat(fe.extractFromSource(map), is(value)); + } + + public void testExtractSourceIncorrectPath() { + FieldHitExtractor fe = new FieldHitExtractor("a.b.c.d", false); + Object value = randomNonNullValue(); + Map map = singletonMap("a", singletonMap("b", singletonMap("c", value))); + SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map)); + assertThat(ex.getMessage(), is("Cannot extract value [a.b.c.d] from source")); + } + + public void testMultiValuedSource() { + FieldHitExtractor fe = new FieldHitExtractor("a", false); + Object value = randomValue(); + Map map = singletonMap("a", asList(value, value)); + SqlException ex = expectThrows(SqlException.class, () -> fe.extractFromSource(map)); + assertThat(ex.getMessage(), is("Arrays (returned by [a]) are not supported")); + } + + public Object randomValue() { + Supplier value = randomFrom(Arrays.asList( + () -> randomAlphaOfLength(10), + ESTestCase::randomLong, + ESTestCase::randomDouble, + () -> null)); + return value.get(); + } + + public Object randomNonNullValue() { + Supplier value = randomFrom(Arrays.asList( + () -> randomAlphaOfLength(10), + ESTestCase::randomLong, + ESTestCase::randomDouble)); + return value.get(); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractorTests.java new file mode 100644 index 0000000000000..12a8dd0420f0f --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractorTests.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.SqlException; + +import java.io.IOException; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; + +public class MetricAggExtractorTests extends AbstractWireSerializingTestCase { + + public static MetricAggExtractor randomMetricAggExtractor() { + return new MetricAggExtractor(randomAlphaOfLength(16), randomAlphaOfLength(16), randomAlphaOfLength(16)); + } + + @Override + protected MetricAggExtractor createTestInstance() { + return randomMetricAggExtractor(); + } + + @Override + protected Reader instanceReader() { + return MetricAggExtractor::new; + } + + @Override + protected MetricAggExtractor mutateInstance(MetricAggExtractor instance) throws IOException { + return new MetricAggExtractor(instance.name() + "mutated", instance.property(), instance.innerKey()); + } + + public void testNoAggs() { + Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(emptyList())); + MetricAggExtractor extractor = randomMetricAggExtractor(); + SqlException exception = expectThrows(SqlException.class, () -> extractor.extract(bucket)); + assertEquals("Cannot find an aggregation named " + extractor.name(), exception.getMessage()); + } + + public void testSingleValueProperty() { + MetricAggExtractor extractor = randomMetricAggExtractor(); + + double value = randomDouble(); + Aggregation agg = new TestSingleValueAggregation(extractor.name(), singletonList(extractor.property()), value); + Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); + assertEquals(value, extractor.extract(bucket)); + } + + public void testSingleValueInnerKey() { + MetricAggExtractor extractor = randomMetricAggExtractor(); + double innerValue = randomDouble(); + Aggregation agg = new TestSingleValueAggregation(extractor.name(), singletonList(extractor.property()), + singletonMap(extractor.innerKey(), innerValue)); + Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); + assertEquals(innerValue, extractor.extract(bucket)); + } + + public void testMultiValueProperty() { + MetricAggExtractor extractor = randomMetricAggExtractor(); + + double value = randomDouble(); + Aggregation agg = new TestMultiValueAggregation(extractor.name(), singletonMap(extractor.property(), value)); + Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); + assertEquals(value, extractor.extract(bucket)); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractorTests.java new file mode 100644 index 0000000000000..d484db5d1bcfd --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractorTests.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.ESTestCase; + +public class ScoreExtractorTests extends ESTestCase { + public void testGet() { + int times = between(1, 1000); + for (int i = 0; i < times; i++) { + float score = randomFloat(); + SearchHit hit = new SearchHit(1); + hit.score(score); + assertEquals(score, ScoreExtractor.INSTANCE.extract(hit)); + } + } + + public void testToString() { + assertEquals("SCORE", ScoreExtractor.INSTANCE.toString()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestBucket.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestBucket.java new file mode 100644 index 0000000000000..fa3be10f91e51 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestBucket.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation.Bucket; + +import java.io.IOException; +import java.util.Map; + +class TestBucket implements Bucket { + + private final Map key; + private final long count; + private final Aggregations aggs; + + TestBucket(Map key, long count, Aggregations aggs) { + this.key = key; + this.count = count; + this.aggs = aggs; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Map getKey() { + return key; + } + + @Override + public String getKeyAsString() { + return key.toString(); + } + + @Override + public long getDocCount() { + return count; + } + + @Override + public Aggregations getAggregations() { + return aggs; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestMultiValueAggregation.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestMultiValueAggregation.java new file mode 100644 index 0000000000000..f8996966fa1f6 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestMultiValueAggregation.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; + +class TestMultiValueAggregation extends InternalNumericMetricsAggregation.MultiValue { + + private final Map values; + + TestMultiValueAggregation(String name, Map values) { + super(name, emptyList(), emptyMap()); + this.values = values; + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + + @Override + public double value(String name) { + return values.get(name); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { + throw new UnsupportedOperationException(); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + protected int doHashCode() { + throw new UnsupportedOperationException(); + } + + @Override + protected boolean doEquals(Object obj) { + throw new UnsupportedOperationException(); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestSingleValueAggregation.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestSingleValueAggregation.java new file mode 100644 index 0000000000000..2c00f5e845f98 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestSingleValueAggregation.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.execution.search.extractor; + +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.InternalAggregation; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; + +public class TestSingleValueAggregation extends InternalAggregation { + + private final List path; + private final Object value; + + TestSingleValueAggregation(String name, List path, Object value) { + super(name, emptyList(), emptyMap()); + this.path = path; + this.value = value; + } + + @Override + public String getWriteableName() { + throw new UnsupportedOperationException(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { + throw new UnsupportedOperationException(); + } + + @Override + public Object getProperty(List path) { + if (this.path.equals(path)) { + return value; + } + throw new IllegalArgumentException("unknown path " + path); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + protected int doHashCode() { + throw new UnsupportedOperationException(); + } + + @Override + protected boolean doEquals(Object obj) { + throw new UnsupportedOperationException(); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/AttributeMapTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/AttributeMapTests.java new file mode 100644 index 0000000000000..fa85ca9cbff12 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/AttributeMapTests.java @@ -0,0 +1,181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import static java.util.stream.Collectors.toList; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +public class AttributeMapTests extends ESTestCase { + + private static Attribute a(String name) { + return new UnresolvedAttribute(Location.EMPTY, name); + } + + private static AttributeMap threeMap() { + Map map = new LinkedHashMap<>(); + map.put(a("one"), "one"); + map.put(a("two"), "two"); + map.put(a("three"), "three"); + + return new AttributeMap<>(map); + } + + public void testEmptyConstructor() { + AttributeMap m = new AttributeMap<>(); + assertThat(m.size(), is(0)); + assertThat(m.isEmpty(), is(true)); + } + + public void testMapConstructor() { + Map map = new LinkedHashMap<>(); + map.put(a("one"), "one"); + map.put(a("two"), "two"); + map.put(a("three"), "three"); + + AttributeMap m = new AttributeMap<>(map); + assertThat(m.size(), is(3)); + assertThat(m.isEmpty(), is(false)); + + Attribute one = m.keySet().iterator().next(); + assertThat(m.containsKey(one), is(true)); + assertThat(m.containsKey(a("one")), is(false)); + assertThat(m.containsValue("one"), is(true)); + assertThat(m.containsValue("on"), is(false)); + assertThat(m.attributeNames(), contains("one", "two", "three")); + assertThat(m.values(), contains(map.values().toArray())); + + // defensive copying + map.put(a("four"), "four"); + assertThat(m.size(), is(3)); + assertThat(m.isEmpty(), is(false)); + } + + public void testSingleItemConstructor() { + Attribute one = a("one"); + AttributeMap m = new AttributeMap<>(one, "one"); + assertThat(m.size(), is(1)); + assertThat(m.isEmpty(), is(false)); + + assertThat(m.containsKey(one), is(true)); + assertThat(m.containsKey(a("one")), is(false)); + assertThat(m.containsValue("one"), is(true)); + assertThat(m.containsValue("on"), is(false)); + } + + public void testSubstract() { + AttributeMap m = threeMap(); + AttributeMap mo = new AttributeMap<>(m.keySet().iterator().next(), "one"); + AttributeMap empty = new AttributeMap<>(); + + assertThat(m.substract(empty), is(m)); + assertThat(m.substract(m), is(empty)); + assertThat(mo.substract(m), is(empty)); + + AttributeMap substract = m.substract(mo); + + assertThat(substract.size(), is(2)); + assertThat(substract.attributeNames(), contains("two", "three")); + } + + public void testIntersect() { + AttributeMap m = threeMap(); + AttributeMap mo = new AttributeMap<>(m.keySet().iterator().next(), "one"); + AttributeMap empty = new AttributeMap<>(); + + assertThat(m.intersect(empty), is(empty)); + assertThat(m.intersect(m), is(m)); + assertThat(mo.intersect(m), is(mo)); + } + + public void testSubsetOf() { + AttributeMap m = threeMap(); + AttributeMap mo = new AttributeMap<>(m.keySet().iterator().next(), "one"); + AttributeMap empty = new AttributeMap<>(); + + assertThat(m.subsetOf(empty), is(false)); + assertThat(m.subsetOf(m), is(true)); + assertThat(mo.subsetOf(m), is(true)); + + assertThat(empty.subsetOf(m), is(true)); + assertThat(mo.subsetOf(m), is(true)); + } + + public void testKeySet() { + Attribute one = a("one"); + Attribute two = a("two"); + Attribute three = a("three"); + + Map map = new LinkedHashMap<>(); + map.put(one, "one"); + map.put(two, "two"); + map.put(three, "three"); + + Set keySet = new AttributeMap<>(map).keySet(); + assertThat(keySet, contains(one, two, three)); + + // toObject + Object[] array = keySet.toArray(); + + assertThat(array, arrayWithSize(3)); + assertThat(array, arrayContaining(one, two, three)); + } + + public void testValues() { + AttributeMap m = threeMap(); + Collection values = m.values(); + + assertThat(values, hasSize(3)); + assertThat(values, contains("one", "two", "three")); + } + + public void testEntrySet() { + Attribute one = a("one"); + Attribute two = a("two"); + Attribute three = a("three"); + + Map map = new LinkedHashMap<>(); + map.put(one, "one"); + map.put(two, "two"); + map.put(three, "three"); + + Set> set = new AttributeMap<>(map).entrySet(); + + assertThat(set, hasSize(3)); + + List keys = set.stream().map(Map.Entry::getKey).collect(toList()); + List values = set.stream().map(Map.Entry::getValue).collect(toList()); + + assertThat(keys, hasSize(3)); + + + assertThat(values, hasSize(3)); + assertThat(values, contains("one", "two", "three")); + } + + public void testForEach() { + AttributeMap m = threeMap(); + + Map collect = new LinkedHashMap<>(); + m.forEach(collect::put); + AttributeMap copy = new AttributeMap<>(collect); + + assertThat(m, is(copy)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ExpressionIdTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ExpressionIdTests.java new file mode 100644 index 0000000000000..3efa228f7ccea --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ExpressionIdTests.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.test.ESTestCase; +import java.util.concurrent.atomic.AtomicLong; + +public class ExpressionIdTests extends ESTestCase { + /** + * Each {@link ExpressionId} should be unique. Technically + * you can roll the {@link AtomicLong} that backs them but + * that is not going to happen within a single query. + */ + public void testUnique() { + assertNotEquals(new ExpressionId(), new ExpressionId()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/LiteralTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/LiteralTests.java new file mode 100644 index 0000000000000..8527c5b62dfae --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/LiteralTests.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.tree.AbstractNodeTestCase; +import org.elasticsearch.xpack.sql.tree.LocationTests; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypeConversion; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; + +public class LiteralTests extends AbstractNodeTestCase { + static class ValueAndCompatibleTypes { + final Supplier valueSupplier; + final List validDataTypes; + + ValueAndCompatibleTypes(Supplier valueSupplier, DataType... validDataTypes) { + this.valueSupplier = valueSupplier; + this.validDataTypes = Arrays.asList(validDataTypes); + } + } + /** + * Generators for values and data types. The first valid + * data type is special it is used when picking a generator + * for a specific data type. So the first valid data type + * after a generators is its "native" type. + */ + private static final List GENERATORS = Arrays.asList( + new ValueAndCompatibleTypes(() -> randomBoolean() ? randomBoolean() : randomFrom("true", "false"), DataType.BOOLEAN), + new ValueAndCompatibleTypes(ESTestCase::randomByte, DataType.BYTE, DataType.SHORT, DataType.INTEGER, DataType.LONG, + DataType.FLOAT, DataType.DOUBLE, DataType.BOOLEAN), + new ValueAndCompatibleTypes(ESTestCase::randomShort, DataType.SHORT, DataType.INTEGER, DataType.LONG, + DataType.FLOAT, DataType.DOUBLE, DataType.BOOLEAN), + new ValueAndCompatibleTypes(ESTestCase::randomInt, DataType.INTEGER, DataType.LONG, + DataType.FLOAT, DataType.DOUBLE, DataType.BOOLEAN), + new ValueAndCompatibleTypes(ESTestCase::randomLong, DataType.LONG, DataType.FLOAT, DataType.DOUBLE, DataType.BOOLEAN), + new ValueAndCompatibleTypes(ESTestCase::randomFloat, DataType.FLOAT, DataType.LONG, DataType.DOUBLE, DataType.BOOLEAN), + new ValueAndCompatibleTypes(ESTestCase::randomDouble, DataType.DOUBLE, DataType.LONG, DataType.FLOAT, DataType.BOOLEAN), + new ValueAndCompatibleTypes(() -> randomAlphaOfLength(5), DataType.KEYWORD)); + + public static Literal randomLiteral() { + ValueAndCompatibleTypes gen = randomFrom(GENERATORS); + return new Literal(LocationTests.randomLocation(), gen.valueSupplier.get(), randomFrom(gen.validDataTypes)); + } + + @Override + protected Literal randomInstance() { + return randomLiteral(); + } + + @Override + protected Literal copy(Literal instance) { + return new Literal(instance.location(), instance.value(), instance.dataType()); + } + + @Override + protected Literal mutate(Literal instance) { + List> mutators = new ArrayList<>(); + // Changing the location doesn't count as mutation because..... it just doesn't, ok?! + // Change the value to another valid value + mutators.add(l -> new Literal(l.location(), randomValueOfTypeOtherThan(l.value(), l.dataType()), l.dataType())); + // If we can change the data type then add that as an option as well + List validDataTypes = validReplacementDataTypes(instance.value(), instance.dataType()); + if (validDataTypes.size() > 1) { + mutators.add(l -> new Literal(l.location(), l.value(), randomValueOtherThan(l.dataType(), () -> randomFrom(validDataTypes)))); + } + return randomFrom(mutators).apply(instance); + } + + @Override + public void testTransform() { + Literal literal = randomInstance(); + + // Replace value + Object newValue = randomValueOfTypeOtherThan(literal.value(), literal.dataType()); + assertEquals(new Literal(literal.location(), newValue, literal.dataType()), + literal.transformPropertiesOnly(p -> p == literal.value() ? newValue : p, Object.class)); + + // Replace data type if there are more compatible data types + List validDataTypes = validReplacementDataTypes(literal.value(), literal.dataType()); + if (validDataTypes.size() > 1) { + DataType newDataType = randomValueOtherThan(literal.dataType(), () -> randomFrom(validDataTypes)); + assertEquals(new Literal(literal.location(), literal.value(), newDataType), + literal.transformPropertiesOnly(p -> newDataType, DataType.class)); + } + } + + @Override + public void testReplaceChildren() { + Exception e = expectThrows(UnsupportedOperationException.class, () -> randomInstance().replaceChildren(emptyList())); + assertEquals("this type of node doesn't have any children to replace", e.getMessage()); + } + + private Object randomValueOfTypeOtherThan(Object original, DataType type) { + for (ValueAndCompatibleTypes gen : GENERATORS) { + if (gen.validDataTypes.get(0) == type) { + return randomValueOtherThan(original, () -> DataTypeConversion.convert(gen.valueSupplier.get(), type)); + } + } + throw new IllegalArgumentException("No native generator for [" + type + "]"); + } + + private List validReplacementDataTypes(Object value, DataType type) { + List validDataTypes = new ArrayList<>(); + List options = Arrays.asList(DataType.BYTE, DataType.SHORT, DataType.INTEGER, DataType.LONG, + DataType.FLOAT, DataType.DOUBLE, DataType.BOOLEAN); + for (DataType candidate : options) { + try { + DataTypeConversion.Conversion c = DataTypeConversion.conversionFor(type, candidate); + c.convert(value); + validDataTypes.add(candidate); + } catch (SqlIllegalArgumentException e) { + // invalid conversion then.... + } + } + return validDataTypes; + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ParameterTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ParameterTests.java new file mode 100644 index 0000000000000..5e35965985987 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ParameterTests.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Add; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mul; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Sub; +import org.elasticsearch.xpack.sql.expression.predicate.Equals; +import org.elasticsearch.xpack.sql.parser.ParsingException; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Arrays; +import java.util.Collections; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + + +public class ParameterTests extends ESTestCase { + + public void testSingleParameter() { + Expression expression = new SqlParser().createExpression("a = \n?", + Collections.singletonList( + new SqlTypedParamValue("foo", DataType.KEYWORD) + )); + logger.info(expression); + assertThat(expression, instanceOf(Equals.class)); + Expression right = ((Equals) expression).right(); + assertThat(right, instanceOf(Literal.class)); + Literal param = (Literal) right; + assertThat(param.dataType(), equalTo(DataType.KEYWORD)); + assertThat(param.dataType(), equalTo(DataType.KEYWORD)); + assertThat(param.value(), equalTo("foo")); + } + + public void testMultipleParameters() { + Expression expression = new SqlParser().createExpression("(? + ? * ?) - ?", Arrays.asList( + new SqlTypedParamValue(1L, DataType.LONG), + new SqlTypedParamValue(2L, DataType.LONG), + new SqlTypedParamValue(3L, DataType.LONG), + new SqlTypedParamValue(4L, DataType.LONG) + )); + assertThat(expression, instanceOf(Sub.class)); + Sub sub = (Sub) expression; + assertThat(((Literal) sub.right()).value(), equalTo(4L)); + assertThat(sub.left(), instanceOf(Add.class)); + Add add = (Add) sub.left(); + assertThat(((Literal) add.left()).value(), equalTo(1L)); + assertThat(add.right(), instanceOf(Mul.class)); + Mul mul = (Mul) add.right(); + assertThat(((Literal) mul.left()).value(), equalTo(2L)); + assertThat(((Literal) mul.right()).value(), equalTo(3L)); + } + + public void testNotEnoughParameters() { + ParsingException ex = expectThrows(ParsingException.class, + () -> new SqlParser().createExpression("(? + ? * ?) - ?", Arrays.asList( + new SqlTypedParamValue(1L, DataType.LONG), + new SqlTypedParamValue(2L, DataType.LONG), + new SqlTypedParamValue(3L, DataType.LONG) + ))); + assertThat(ex.getMessage(), containsString("Not enough actual parameters")); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/QuotingTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/QuotingTests.java new file mode 100644 index 0000000000000..ceb9611a62c90 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/QuotingTests.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.parser.ParsingException; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + + +public class QuotingTests extends ESTestCase { + + private static UnresolvedAttribute from(String s) { + return new UnresolvedAttribute(Location.EMPTY, s); + } + + public void testBasicString() { + String s = "someField"; + UnresolvedAttribute ua = from(s); + assertThat(ua.name(), equalTo(s)); + assertThat(ua.qualifiedName(), equalTo(s)); + assertThat(ua.qualifier(), nullValue()); + } + + public void testSingleQuoteLiteral() { + String name = "@timestamp"; + Expression exp = new SqlParser().createExpression("'" + name + "'"); + assertThat(exp, instanceOf(Literal.class)); + Literal l = (Literal) exp; + assertThat(l.value(), equalTo(name)); + } + + public void testMultiSingleQuotedLiteral() { + String first = "bucket"; + String second = "head"; + Expression exp = new SqlParser().createExpression(String.format(Locale.ROOT, "'%s' '%s'", first, second)); + assertThat(exp, instanceOf(Literal.class)); + Literal l = (Literal) exp; + assertThat(l.value(), equalTo(first + second)); + } + + public void testQuotedAttribute() { + String quote = "\""; + String name = "@timestamp"; + Expression exp = new SqlParser().createExpression(quote + name + quote); + assertThat(exp, instanceOf(UnresolvedAttribute.class)); + UnresolvedAttribute ua = (UnresolvedAttribute) exp; + assertThat(ua.name(), equalTo(name)); + assertThat(ua.qualifiedName(), equalTo(name)); + assertThat(ua.qualifier(), nullValue()); + } + + public void testBackQuotedAttribute() { + String quote = "`"; + String name = "@timestamp"; + ParsingException ex = expectThrows(ParsingException.class, () -> + new SqlParser().createExpression(quote + name + quote)); + assertThat(ex.getMessage(), equalTo("line 1:1: backquoted indetifiers not supported; please use double quotes instead")); + } + + public void testQuotedAttributeAndQualifier() { + String quote = "\""; + String qualifier = "table"; + String name = "@timestamp"; + Expression exp = new SqlParser().createExpression(quote + qualifier + quote + "." + quote + name + quote); + assertThat(exp, instanceOf(UnresolvedAttribute.class)); + UnresolvedAttribute ua = (UnresolvedAttribute) exp; + assertThat(ua.name(), equalTo(qualifier + "." + name)); + assertThat(ua.qualifiedName(), equalTo(qualifier + "." + name)); + assertThat(ua.qualifier(), is(nullValue())); + } + + + public void testBackQuotedAttributeAndQualifier() { + String quote = "`"; + String qualifier = "table"; + String name = "@timestamp"; + ParsingException ex = expectThrows(ParsingException.class, () -> + new SqlParser().createExpression(quote + qualifier + quote + "." + quote + name + quote)); + assertThat(ex.getMessage(), equalTo("line 1:1: backquoted indetifiers not supported; please use double quotes instead")); + } + + public void testGreedyQuoting() { + LogicalPlan plan = new SqlParser().createStatement("SELECT * FROM \"table\" ORDER BY \"field\""); + final List plans = new ArrayList<>(); + plan.forEachDown(plans::add); + assertThat(plans, hasSize(4)); + assertThat(plans.get(1), instanceOf(OrderBy.class)); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttributeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttributeTests.java new file mode 100644 index 0000000000000..51a16c14a8889 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttributeTests.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression; + +import org.elasticsearch.xpack.sql.tree.AbstractNodeTestCase; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Arrays; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.sql.tree.LocationTests.randomLocation; + +public class UnresolvedAttributeTests extends AbstractNodeTestCase { + public static UnresolvedAttribute randomUnresolvedAttribute() { + Location location = randomLocation(); + String name = randomAlphaOfLength(5); + String qualifier = randomQualifier(); + ExpressionId id = randomBoolean() ? null : new ExpressionId(); + String unresolvedMessage = randomUnresolvedMessage(); + Object resolutionMetadata = new Object(); + return new UnresolvedAttribute(location, name, qualifier, id, unresolvedMessage, resolutionMetadata); + } + + /** + * A random qualifier. It is important that this be distinct + * from the name and the unresolvedMessage for testing transform. + */ + private static String randomQualifier() { + return randomBoolean() ? null : randomAlphaOfLength(6); + } + + /** + * A random qualifier. It is important that this be distinct + * from the name and the qualifier for testing transform. + */ + private static String randomUnresolvedMessage() { + return randomAlphaOfLength(7); + } + + @Override + protected UnresolvedAttribute randomInstance() { + return randomUnresolvedAttribute(); + } + + @Override + protected UnresolvedAttribute mutate(UnresolvedAttribute a) { + Supplier option = randomFrom(Arrays.asList( + () -> new UnresolvedAttribute(a.location(), + randomValueOtherThan(a.name(), () -> randomAlphaOfLength(5)), + a.qualifier(), a.id(), a.unresolvedMessage(), a.resolutionMetadata()), + () -> new UnresolvedAttribute(a.location(), a.name(), + randomValueOtherThan(a.qualifier(), UnresolvedAttributeTests::randomQualifier), + a.id(), a.unresolvedMessage(), a.resolutionMetadata()), + () -> new UnresolvedAttribute(a.location(), a.name(), a.qualifier(), + new ExpressionId(), a.unresolvedMessage(), a.resolutionMetadata()), + () -> new UnresolvedAttribute(a.location(), a.name(), a.qualifier(), a.id(), + randomValueOtherThan(a.unresolvedMessage(), () -> randomUnresolvedMessage()), + a.resolutionMetadata()), + () -> new UnresolvedAttribute(a.location(), a.name(), + a.qualifier(), a.id(), a.unresolvedMessage(), new Object()) + )); + return option.get(); + } + + @Override + protected UnresolvedAttribute copy(UnresolvedAttribute a) { + return new UnresolvedAttribute(a.location(), a.name(), a.qualifier(), a.id(), a.unresolvedMessage(), a.resolutionMetadata()); + } + + @Override + public void testTransform() { + UnresolvedAttribute a = randomUnresolvedAttribute(); + + String newName = randomValueOtherThan(a.name(), () -> randomAlphaOfLength(5)); + assertEquals(new UnresolvedAttribute(a.location(), newName, a.qualifier(), a.id(), + a.unresolvedMessage(), a.resolutionMetadata()), + a.transformPropertiesOnly(v -> Objects.equals(v, a.name()) ? newName : v, Object.class)); + + String newQualifier = randomValueOtherThan(a.qualifier(), UnresolvedAttributeTests::randomQualifier); + assertEquals(new UnresolvedAttribute(a.location(), a.name(), newQualifier, a.id(), + a.unresolvedMessage(), a.resolutionMetadata()), + a.transformPropertiesOnly(v -> Objects.equals(v, a.qualifier()) ? newQualifier : v, Object.class)); + + ExpressionId newId = new ExpressionId(); + assertEquals(new UnresolvedAttribute(a.location(), a.name(), a.qualifier(), newId, + a.unresolvedMessage(), a.resolutionMetadata()), + a.transformPropertiesOnly(v -> Objects.equals(v, a.id()) ? newId : v, Object.class)); + + String newMessage = randomValueOtherThan(a.unresolvedMessage(), UnresolvedAttributeTests::randomUnresolvedMessage); + assertEquals(new UnresolvedAttribute(a.location(), a.name(), a.qualifier(), a.id(), + newMessage, a.resolutionMetadata()), + a.transformPropertiesOnly(v -> Objects.equals(v, a.unresolvedMessage()) ? newMessage : v, Object.class)); + + Object newMeta = new Object(); + assertEquals(new UnresolvedAttribute(a.location(), a.name(), a.qualifier(), a.id(), + a.unresolvedMessage(), newMeta), + a.transformPropertiesOnly(v -> Objects.equals(v, a.resolutionMetadata()) ? newMeta : v, Object.class)); + } + + @Override + public void testReplaceChildren() { + // UnresolvedAttribute doesn't have any children + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistryTests.java new file mode 100644 index 0000000000000..12581e9577cb9 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistryTests.java @@ -0,0 +1,193 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.LocationTests; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.parser.ParsingException; +import java.util.Arrays; +import java.util.List; +import java.util.TimeZone; + +import static org.elasticsearch.xpack.sql.expression.function.FunctionRegistry.def; +import static org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction.ResolutionType.DISTINCT; +import static org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction.ResolutionType.EXTRACT; +import static org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction.ResolutionType.STANDARD; +import static org.hamcrest.Matchers.endsWith; +import static org.mockito.Mockito.mock; +import static java.util.Collections.emptyList; + +public class FunctionRegistryTests extends ESTestCase { + public void testNoArgFunction() { + UnresolvedFunction ur = uf(STANDARD); + FunctionRegistry r = new FunctionRegistry(Arrays.asList(def(Dummy.class, Dummy::new))); + FunctionDefinition def = r.resolveFunction(ur.name()); + assertEquals(ur.location(), ur.buildResolved(randomTimeZone(), def).location()); + + // Distinct isn't supported + ParsingException e = expectThrows(ParsingException.class, () -> + uf(DISTINCT).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("does not support DISTINCT yet it was specified")); + + // Any children aren't supported + e = expectThrows(ParsingException.class, () -> + uf(STANDARD, mock(Expression.class)).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("expects no arguments")); + } + + public void testUnaryFunction() { + UnresolvedFunction ur = uf(STANDARD, mock(Expression.class)); + FunctionRegistry r = new FunctionRegistry(Arrays.asList(def(Dummy.class, (Location l, Expression e) -> { + assertSame(e, ur.children().get(0)); + return new Dummy(l); + }))); + FunctionDefinition def = r.resolveFunction(ur.name()); + assertFalse(def.datetime()); + assertEquals(ur.location(), ur.buildResolved(randomTimeZone(), def).location()); + + // Distinct isn't supported + ParsingException e = expectThrows(ParsingException.class, () -> + uf(DISTINCT, mock(Expression.class)).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("does not support DISTINCT yet it was specified")); + + // No children aren't supported + e = expectThrows(ParsingException.class, () -> + uf(STANDARD).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("expects exactly one argument")); + + // Multiple children aren't supported + e = expectThrows(ParsingException.class, () -> + uf(STANDARD, mock(Expression.class), mock(Expression.class)).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("expects exactly one argument")); + } + + public void testUnaryDistinctAwareFunction() { + boolean urIsDistinct = randomBoolean(); + UnresolvedFunction ur = uf(urIsDistinct ? DISTINCT : STANDARD, mock(Expression.class)); + FunctionRegistry r = new FunctionRegistry(Arrays.asList(def(Dummy.class, (Location l, Expression e, boolean distinct) -> { + assertEquals(urIsDistinct, distinct); + assertSame(e, ur.children().get(0)); + return new Dummy(l); + }))); + FunctionDefinition def = r.resolveFunction(ur.name()); + assertEquals(ur.location(), ur.buildResolved(randomTimeZone(), def).location()); + assertFalse(def.datetime()); + + // No children aren't supported + ParsingException e = expectThrows(ParsingException.class, () -> + uf(STANDARD).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("expects exactly one argument")); + + // Multiple children aren't supported + e = expectThrows(ParsingException.class, () -> + uf(STANDARD, mock(Expression.class), mock(Expression.class)).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("expects exactly one argument")); + } + + public void testDateTimeFunction() { + boolean urIsExtract = randomBoolean(); + UnresolvedFunction ur = uf(urIsExtract ? EXTRACT : STANDARD, mock(Expression.class)); + TimeZone providedTimeZone = randomTimeZone(); + FunctionRegistry r = new FunctionRegistry(Arrays.asList(def(Dummy.class, (Location l, Expression e, TimeZone tz) -> { + assertEquals(providedTimeZone, tz); + assertSame(e, ur.children().get(0)); + return new Dummy(l); + }))); + FunctionDefinition def = r.resolveFunction(ur.name()); + assertEquals(ur.location(), ur.buildResolved(providedTimeZone, def).location()); + assertTrue(def.datetime()); + + // Distinct isn't supported + ParsingException e = expectThrows(ParsingException.class, () -> + uf(DISTINCT, mock(Expression.class)).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("does not support DISTINCT yet it was specified")); + + // No children aren't supported + e = expectThrows(ParsingException.class, () -> + uf(STANDARD).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("expects exactly one argument")); + + // Multiple children aren't supported + e = expectThrows(ParsingException.class, () -> + uf(STANDARD, mock(Expression.class), mock(Expression.class)).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("expects exactly one argument")); + } + + public void testBinaryFunction() { + UnresolvedFunction ur = uf(STANDARD, mock(Expression.class), mock(Expression.class)); + FunctionRegistry r = new FunctionRegistry(Arrays.asList(def(Dummy.class, (Location l, Expression lhs, Expression rhs) -> { + assertSame(lhs, ur.children().get(0)); + assertSame(rhs, ur.children().get(1)); + return new Dummy(l); + }))); + FunctionDefinition def = r.resolveFunction(ur.name()); + assertEquals(ur.location(), ur.buildResolved(randomTimeZone(), def).location()); + assertFalse(def.datetime()); + + // Distinct isn't supported + ParsingException e = expectThrows(ParsingException.class, () -> + uf(DISTINCT, mock(Expression.class), mock(Expression.class)).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("does not support DISTINCT yet it was specified")); + + // No children aren't supported + e = expectThrows(ParsingException.class, () -> + uf(STANDARD).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("expects exactly two arguments")); + + // One child isn't supported + e = expectThrows(ParsingException.class, () -> + uf(STANDARD, mock(Expression.class)).buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("expects exactly two arguments")); + + // Many children aren't supported + e = expectThrows(ParsingException.class, () -> + uf(STANDARD, mock(Expression.class), mock(Expression.class), mock(Expression.class)) + .buildResolved(randomTimeZone(), def)); + assertThat(e.getMessage(), endsWith("expects exactly two arguments")); + } + + private UnresolvedFunction uf(UnresolvedFunction.ResolutionType resolutionType, Expression... children) { + return new UnresolvedFunction(LocationTests.randomLocation(), "dummy", resolutionType, Arrays.asList(children)); + } + + public static class Dummy extends ScalarFunction { + public Dummy(Location location) { + super(location, emptyList()); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this); + } + + @Override + public Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + @Override + public DataType dataType() { + return null; + } + + @Override + public ScriptTemplate asScript() { + return null; + } + + @Override + protected ProcessorDefinition makeProcessorDefinition() { + return null; + } + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/NamedExpressionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/NamedExpressionTests.java new file mode 100644 index 0000000000000..79f0e970b1eba --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/NamedExpressionTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Add; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Div; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mod; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mul; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Neg; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Sub; + +import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; + +public class NamedExpressionTests extends ESTestCase { + + public void testArithmeticFunctionName() { + Add add = new Add(EMPTY, l(5), l(2)); + assertEquals("(5 + 2)", add.name()); + + Div div = new Div(EMPTY, l(5), l(2)); + assertEquals("(5 / 2)", div.name()); + + Mod mod = new Mod(EMPTY, l(5), l(2)); + assertEquals("(5 % 2)", mod.name()); + + Mul mul = new Mul(EMPTY, l(5), l(2)); + assertEquals("(5 * 2)", mul.name()); + + Sub sub = new Sub(EMPTY, l(5), l(2)); + assertEquals("(5 - 2)", sub.name()); + + Neg neg = new Neg(EMPTY, l(5)); + assertEquals("-5", neg.name()); + } + + private static Literal l(Object value) { + return Literal.of(EMPTY, value); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunctionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunctionTests.java new file mode 100644 index 0000000000000..1e58a1a5dc277 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunctionTests.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction; +import org.elasticsearch.xpack.sql.tree.AbstractNodeTestCase; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.sql.tree.LocationTests.randomLocation; +import static org.elasticsearch.xpack.sql.expression.UnresolvedAttributeTests.randomUnresolvedAttribute; + +import static java.util.Collections.singletonList; + +public class UnresolvedFunctionTests extends AbstractNodeTestCase { + public static UnresolvedFunction randomUnresolvedFunction() { + /* Pick an UnresolvedFunction where the name and the + * message don't happen to be the same String. If they + * matched then transform would get them confused. */ + Location location = randomLocation(); + String name = randomAlphaOfLength(5); + UnresolvedFunction.ResolutionType resolutionType = randomFrom(UnresolvedFunction.ResolutionType.values()); + List args = randomFunctionArgs(); + boolean analyzed = randomBoolean(); + String unresolvedMessage = randomUnresolvedMessage(); + return new UnresolvedFunction(location, name, resolutionType, args, analyzed, unresolvedMessage); + } + + private static List randomFunctionArgs() { + // At this point we only support functions with 0, 1, or 2 arguments. + Supplier> option = randomFrom(Arrays.asList( + Collections::emptyList, + () -> singletonList(randomUnresolvedAttribute()), + () -> Arrays.asList(randomUnresolvedAttribute(), randomUnresolvedAttribute()) + )); + return option.get(); + } + + /** + * Pick a random value for the unresolved message. + * It is important that this value is not the same + * as the value for the name for tests like the {@link #testTransform} + * and for general ease of reading. + */ + private static String randomUnresolvedMessage() { + return randomBoolean() ? null : randomAlphaOfLength(6); + } + + @Override + protected UnresolvedFunction randomInstance() { + return randomUnresolvedFunction(); + } + + @Override + protected UnresolvedFunction mutate(UnresolvedFunction uf) { + Supplier option = randomFrom(Arrays.asList( + () -> new UnresolvedFunction(uf.location(), randomValueOtherThan(uf.name(), () -> randomAlphaOfLength(5)), + uf.resolutionType(), uf.children(), uf.analyzed(), uf.unresolvedMessage()), + () -> new UnresolvedFunction(uf.location(), uf.name(), + randomValueOtherThan(uf.resolutionType(), () -> randomFrom(UnresolvedFunction.ResolutionType.values())), + uf.children(), uf.analyzed(), uf.unresolvedMessage()), + () -> new UnresolvedFunction(uf.location(), uf.name(), uf.resolutionType(), + randomValueOtherThan(uf.children(), UnresolvedFunctionTests::randomFunctionArgs), + uf.analyzed(), uf.unresolvedMessage()), + () -> new UnresolvedFunction(uf.location(), uf.name(), uf.resolutionType(), uf.children(), + !uf.analyzed(), uf.unresolvedMessage()), + () -> new UnresolvedFunction(uf.location(), uf.name(), uf.resolutionType(), uf.children(), + uf.analyzed(), randomValueOtherThan(uf.unresolvedMessage(), () -> randomAlphaOfLength(5))) + )); + return option.get(); + } + + @Override + protected UnresolvedFunction copy(UnresolvedFunction uf) { + return new UnresolvedFunction(uf.location(), uf.name(), uf.resolutionType(), uf.children(), + uf.analyzed(), uf.unresolvedMessage()); + } + + @Override + public void testTransform() { + UnresolvedFunction uf = randomUnresolvedFunction(); + + String newName = randomValueOtherThan(uf.name(), () -> randomAlphaOfLength(5)); + assertEquals(new UnresolvedFunction(uf.location(), newName, uf.resolutionType(), uf.children(), + uf.analyzed(), uf.unresolvedMessage()), + uf.transformPropertiesOnly(p -> Objects.equals(p, uf.name()) ? newName : p, Object.class)); + + UnresolvedFunction.ResolutionType newResolutionType = randomValueOtherThan(uf.resolutionType(), + () -> randomFrom(UnresolvedFunction.ResolutionType.values())); + assertEquals(new UnresolvedFunction(uf.location(), uf.name(), newResolutionType, uf.children(), + uf.analyzed(), uf.unresolvedMessage()), + uf.transformPropertiesOnly(p -> Objects.equals(p, uf.resolutionType()) ? newResolutionType : p, Object.class)); + + String newUnresolvedMessage = randomValueOtherThan(uf.unresolvedMessage(), + UnresolvedFunctionTests::randomUnresolvedMessage); + assertEquals(new UnresolvedFunction(uf.location(), uf.name(), uf.resolutionType(), uf.children(), + uf.analyzed(), newUnresolvedMessage), + uf.transformPropertiesOnly(p -> Objects.equals(p, uf.unresolvedMessage()) ? newUnresolvedMessage : p, Object.class)); + + assertEquals(new UnresolvedFunction(uf.location(), uf.name(), uf.resolutionType(), uf.children(), + !uf.analyzed(), uf.unresolvedMessage()), + uf.transformPropertiesOnly(p -> Objects.equals(p, uf.analyzed()) ? !uf.analyzed() : p, Object.class)); + + } + + @Override + public void testReplaceChildren() { + UnresolvedFunction uf = randomUnresolvedFunction(); + + List newChildren = randomValueOtherThan(uf.children(), UnresolvedFunctionTests::randomFunctionArgs); + assertEquals(new UnresolvedFunction(uf.location(), uf.name(), uf.resolutionType(), newChildren, + uf.analyzed(), uf.unresolvedMessage()), + uf.replaceChildren(newChildren)); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessorTests.java new file mode 100644 index 0000000000000..831978705d061 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/CastProcessorTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.type.DataTypeConversion.Conversion; + +import java.io.IOException; + +public class CastProcessorTests extends AbstractWireSerializingTestCase { + public static CastProcessor randomCastProcessor() { + return new CastProcessor(randomFrom(Conversion.values())); + } + + @Override + protected CastProcessor createTestInstance() { + return randomCastProcessor(); + } + + @Override + protected Reader instanceReader() { + return CastProcessor::new; + } + + @Override + protected CastProcessor mutateInstance(CastProcessor instance) throws IOException { + return new CastProcessor(randomValueOtherThan(instance.converter(), () -> randomFrom(Conversion.values()))); + } + + public void testApply() { + { + CastProcessor proc = new CastProcessor(Conversion.STRING_TO_INT); + assertEquals(null, proc.process(null)); + assertEquals(1, proc.process("1")); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> proc.process("1.2")); + assertEquals("cannot cast [1.2] to [Int]", e.getMessage()); + } + { + CastProcessor proc = new CastProcessor(Conversion.BOOL_TO_INT); + assertEquals(null, proc.process(null)); + assertEquals(1, proc.process(true)); + assertEquals(0, proc.process(false)); + } + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Processors.getNamedWriteables()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessorTests.java new file mode 100644 index 0000000000000..7baba683d7432 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/arithmetic/BinaryArithmeticProcessorTests.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; + +public class BinaryArithmeticProcessorTests extends AbstractWireSerializingTestCase { + public static BinaryArithmeticProcessor randomProcessor() { + return new BinaryArithmeticProcessor( + new ConstantProcessor(randomLong()), + new ConstantProcessor(randomLong()), + randomFrom(BinaryArithmeticProcessor.BinaryArithmeticOperation.values())); + } + + @Override + protected BinaryArithmeticProcessor createTestInstance() { + return randomProcessor(); + } + + @Override + protected Reader instanceReader() { + return BinaryArithmeticProcessor::new; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Processors.getNamedWriteables()); + } + + public void testAdd() { + Processor ba = new Add(EMPTY, l(7), l(3)).makeProcessorDefinition().asProcessor(); + assertEquals(10, ba.process(null)); + } + + public void testSub() { + Processor ba = new Sub(EMPTY, l(7), l(3)).makeProcessorDefinition().asProcessor(); + assertEquals(4, ba.process(null)); + } + + public void testMul() { + Processor ba = new Mul(EMPTY, l(7), l(3)).makeProcessorDefinition().asProcessor(); + assertEquals(21, ba.process(null)); + } + + public void testDiv() { + Processor ba = new Div(EMPTY, l(7), l(3)).makeProcessorDefinition().asProcessor(); + assertEquals(2, ((Number) ba.process(null)).longValue()); + ba = new Div(EMPTY, l((double) 7), l(3)).makeProcessorDefinition().asProcessor(); + assertEquals(2.33, ((Number) ba.process(null)).doubleValue(), 0.01d); + } + + public void testMod() { + Processor ba = new Mod(EMPTY, l(7), l(3)).makeProcessorDefinition().asProcessor(); + assertEquals(1, ba.process(null)); + } + + public void testNegate() { + Processor ba = new Neg(EMPTY, l(7)).asProcessorDefinition().asProcessor(); + assertEquals(-7, ba.process(null)); + } + + // ((3*2+4)/2-2)%2 + public void testTree() { + Expression mul = new Mul(EMPTY, l(3), l(2)); + Expression add = new Add(EMPTY, mul, l(4)); + Expression div = new Div(EMPTY, add, l(2)); + Expression sub = new Sub(EMPTY, div, l(2)); + Mod mod = new Mod(EMPTY, sub, l(2)); + + Processor proc = mod.makeProcessorDefinition().asProcessor(); + assertEquals(1, proc.process(null)); + } + + public void testHandleNull() { + assertNull(new Add(EMPTY, l(null), l(3)).makeProcessorDefinition().asProcessor().process(null)); + assertNull(new Sub(EMPTY, l(null), l(3)).makeProcessorDefinition().asProcessor().process(null)); + assertNull(new Mul(EMPTY, l(null), l(3)).makeProcessorDefinition().asProcessor().process(null)); + assertNull(new Div(EMPTY, l(null), l(3)).makeProcessorDefinition().asProcessor().process(null)); + assertNull(new Mod(EMPTY, l(null), l(3)).makeProcessorDefinition().asProcessor().process(null)); + assertNull(new Neg(EMPTY, l(null)).makeProcessorDefinition().asProcessor().process(null)); + } + + private static Literal l(Object value) { + return Literal.of(EMPTY, value); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessorTests.java new file mode 100644 index 0000000000000..8b0af5e968137 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeProcessorTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.TimeZone; + +public class DateTimeProcessorTests extends AbstractWireSerializingTestCase { + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); + + public static DateTimeProcessor randomDateTimeProcessor() { + return new DateTimeProcessor(randomFrom(DateTimeExtractor.values()), UTC); + } + + @Override + protected DateTimeProcessor createTestInstance() { + return randomDateTimeProcessor(); + } + + @Override + protected Reader instanceReader() { + return DateTimeProcessor::new; + } + + @Override + protected DateTimeProcessor mutateInstance(DateTimeProcessor instance) throws IOException { + DateTimeExtractor replaced = randomValueOtherThan(instance.extractor(), () -> randomFrom(DateTimeExtractor.values())); + return new DateTimeProcessor(replaced, UTC); + } + + public void testApply() { + DateTimeProcessor proc = new DateTimeProcessor(DateTimeExtractor.YEAR, UTC); + assertEquals(1970, proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals(2017, proc.process(new DateTime(2017, 01, 02, 10, 10, DateTimeZone.UTC))); + + proc = new DateTimeProcessor(DateTimeExtractor.DAY_OF_MONTH, UTC); + assertEquals(1, proc.process(new DateTime(0L, DateTimeZone.UTC))); + assertEquals(2, proc.process(new DateTime(2017, 01, 02, 10, 10, DateTimeZone.UTC))); + assertEquals(31, proc.process(new DateTime(2017, 01, 31, 10, 10, DateTimeZone.UTC))); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYearTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYearTests.java new file mode 100644 index 0000000000000..f866ee7292085 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DayOfYearTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.type.DataType; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.util.TimeZone; + +public class DayOfYearTests extends ESTestCase { + private static final TimeZone UTC = TimeZone.getTimeZone("UTC"); + + public void testAsColumnProcessor() { + assertEquals(1, extract(dateTime(0), UTC)); + assertEquals(1, extract(dateTime(0), TimeZone.getTimeZone("GMT+01:00"))); + assertEquals(365, extract(dateTime(0), TimeZone.getTimeZone("GMT-01:00"))); + } + + private DateTime dateTime(long millisSinceEpoch) { + return new DateTime(millisSinceEpoch, DateTimeZone.forTimeZone(UTC)); + } + + private Object extract(Object value, TimeZone timeZone) { + return build(value, timeZone).asProcessorDefinition().asProcessor().process(value); + } + + private DayOfYear build(Object value, TimeZone timeZone) { + return new DayOfYear(null, new Literal(null, value, DataType.DATE), timeZone); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorTests.java new file mode 100644 index 0000000000000..6563760d22512 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; + +public class BinaryMathProcessorTests extends AbstractWireSerializingTestCase { + public static BinaryMathProcessor randomProcessor() { + return new BinaryMathProcessor( + new ConstantProcessor(randomLong()), + new ConstantProcessor(randomLong()), + randomFrom(BinaryMathProcessor.BinaryMathOperation.values())); + } + + @Override + protected BinaryMathProcessor createTestInstance() { + return randomProcessor(); + } + + @Override + protected Reader instanceReader() { + return BinaryMathProcessor::new; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Processors.getNamedWriteables()); + } + + public void testAtan2() { + Processor ba = new ATan2(EMPTY, l(1), l(1)).makeProcessorDefinition().asProcessor(); + assertEquals(0.7853981633974483d, ba.process(null)); + } + + public void testPower() { + Processor ba = new Power(EMPTY, l(2), l(2)).makeProcessorDefinition().asProcessor(); + assertEquals(4d, ba.process(null)); + } + + public void testHandleNull() { + assertNull(new ATan2(EMPTY, l(null), l(3)).makeProcessorDefinition().asProcessor().process(null)); + assertNull(new Power(EMPTY, l(null), l(null)).makeProcessorDefinition().asProcessor().process(null)); + } + + private static Literal l(Object value) { + return Literal.of(EMPTY, value); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunctionProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunctionProcessorTests.java new file mode 100644 index 0000000000000..9ff32c5a05741 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunctionProcessorTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; + +import java.io.IOException; + +public class MathFunctionProcessorTests extends AbstractWireSerializingTestCase { + public static MathProcessor randomMathFunctionProcessor() { + return new MathProcessor(randomFrom(MathOperation.values())); + } + + @Override + protected MathProcessor createTestInstance() { + return randomMathFunctionProcessor(); + } + + @Override + protected Reader instanceReader() { + return MathProcessor::new; + } + + @Override + protected MathProcessor mutateInstance(MathProcessor instance) throws IOException { + return new MathProcessor(randomValueOtherThan(instance.processor(), () -> randomFrom(MathOperation.values()))); + } + + public void testApply() { + MathProcessor proc = new MathProcessor(MathOperation.E); + assertEquals(Math.E, proc.process(null)); + assertEquals(Math.E, proc.process(Math.PI)); + + proc = new MathProcessor(MathOperation.SQRT); + assertEquals(2.0, (double) proc.process(4), 0); + assertEquals(3.0, (double) proc.process(9d), 0); + assertEquals(1.77, (double) proc.process(3.14), 0.01); + } + + public void testNumberCheck() { + MathProcessor proc = new MathProcessor(MathOperation.E); + SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, () -> proc.process("string")); + assertEquals("A number is required; received [string]", siae.getMessage()); + + } + + public void testRandom() { + MathProcessor proc = new MathProcessor(MathOperation.RANDOM); + assertNotNull(proc.process(null)); + assertNotNull(proc.process(randomLong())); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AttributeInputTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AttributeInputTests.java new file mode 100644 index 0000000000000..7378675075dd9 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/AttributeInputTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.execution.search.FieldExtraction; +import org.elasticsearch.xpack.sql.expression.Attribute; +import org.elasticsearch.xpack.sql.expression.Expression; + +import static org.mockito.Mockito.mock; + +public class AttributeInputTests extends ESTestCase { + public void testResolveAttributes() { + FieldExtraction column = mock(FieldExtraction.class); + Expression expression = mock(Expression.class); + Attribute attribute = mock(Attribute.class); + + ReferenceInput expected = new ReferenceInput(expression.location(), expression, column); + + assertEquals(expected, new AttributeInput(expression.location(), expression, attribute).resolveAttributes(a -> { + assertSame(attribute, a); + return column; + })); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/BinaryProcessorDefinitionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/BinaryProcessorDefinitionTests.java new file mode 100644 index 0000000000000..110c482916228 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/BinaryProcessorDefinitionTests.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition.AttributeResolver; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; + +import java.util.List; + +import static java.util.Collections.emptyList; + +public class BinaryProcessorDefinitionTests extends ESTestCase { + public void testSupportedByAggsOnlyQuery() { + ProcessorDefinition supported = new DummyProcessorDefinition(true); + ProcessorDefinition unsupported = new DummyProcessorDefinition(false); + + assertFalse(new DummyBinaryProcessorDefinition(unsupported, unsupported).supportedByAggsOnlyQuery()); + assertFalse(new DummyBinaryProcessorDefinition(unsupported, supported).supportedByAggsOnlyQuery()); + assertFalse(new DummyBinaryProcessorDefinition(supported, unsupported).supportedByAggsOnlyQuery()); + assertTrue(new DummyBinaryProcessorDefinition(supported, supported).supportedByAggsOnlyQuery()); + } + + public void testResolveAttributes() { + ProcessorDefinition needsNothing = new DummyProcessorDefinition(randomBoolean()); + ProcessorDefinition resolvesTo = new DummyProcessorDefinition(randomBoolean()); + ProcessorDefinition needsResolution = new DummyProcessorDefinition(randomBoolean()) { + @Override + public ProcessorDefinition resolveAttributes(AttributeResolver resolver) { + return resolvesTo; + } + }; + AttributeResolver resolver = a -> { + fail("not exepected"); + return null; + }; + + ProcessorDefinition d = new DummyBinaryProcessorDefinition(needsNothing, needsNothing); + assertSame(d, d.resolveAttributes(resolver)); + + d = new DummyBinaryProcessorDefinition(needsNothing, needsResolution); + ProcessorDefinition expected = new DummyBinaryProcessorDefinition(needsNothing, resolvesTo); + assertEquals(expected, d.resolveAttributes(resolver)); + + d = new DummyBinaryProcessorDefinition(needsResolution, needsNothing); + expected = new DummyBinaryProcessorDefinition(resolvesTo, needsNothing); + assertEquals(expected, d.resolveAttributes(resolver)); + } + + public void testCollectFields() { + DummyProcessorDefinition wantsScore = new DummyProcessorDefinition(randomBoolean()) { + @Override + public void collectFields(SqlSourceBuilder sourceBuilder) { + sourceBuilder.trackScores(); + } + }; + DummyProcessorDefinition wantsNothing = new DummyProcessorDefinition(randomBoolean()); + assertFalse(tracksScores(new DummyBinaryProcessorDefinition(wantsNothing, wantsNothing))); + assertTrue(tracksScores(new DummyBinaryProcessorDefinition(wantsScore, wantsNothing))); + assertTrue(tracksScores(new DummyBinaryProcessorDefinition(wantsNothing, wantsScore))); + } + + /** + * Returns {@code true} if the processor defintion builds a query that + * tracks scores, {@code false} otherwise. Used for testing + * {@link ProcessorDefinition#collectFields(SqlSourceBuilder)}. + */ + static boolean tracksScores(ProcessorDefinition d) { + SqlSourceBuilder b = new SqlSourceBuilder(); + d.collectFields(b); + SearchSourceBuilder source = new SearchSourceBuilder(); + b.build(source); + return source.trackScores(); + } + + public static final class DummyBinaryProcessorDefinition extends BinaryProcessorDefinition { + public DummyBinaryProcessorDefinition(ProcessorDefinition left, ProcessorDefinition right) { + this(Location.EMPTY, left, right); + } + + public DummyBinaryProcessorDefinition(Location location, ProcessorDefinition left, ProcessorDefinition right) { + super(location, null, left, right); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, DummyBinaryProcessorDefinition::new, left(), right()); + } + + @Override + public Processor asProcessor() { + return null; + } + + @Override + protected BinaryProcessorDefinition replaceChildren(ProcessorDefinition left, ProcessorDefinition right) { + return new DummyBinaryProcessorDefinition(location(), left, right); + } + } + + public static class DummyProcessorDefinition extends ProcessorDefinition { + private final boolean supportedByAggsOnlyQuery; + + public DummyProcessorDefinition(boolean supportedByAggsOnlyQuery) { + this(Location.EMPTY, supportedByAggsOnlyQuery); + } + + public DummyProcessorDefinition(Location location, boolean supportedByAggsOnlyQuery) { + super(location, null, emptyList()); + this.supportedByAggsOnlyQuery = supportedByAggsOnlyQuery; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, DummyProcessorDefinition::new, supportedByAggsOnlyQuery); + } + + @Override + public ProcessorDefinition replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); + } + + @Override + public boolean supportedByAggsOnlyQuery() { + return supportedByAggsOnlyQuery; + } + + @Override + public boolean resolved() { + return true; + } + + @Override + public Processor asProcessor() { + return null; + } + + @Override + public ProcessorDefinition resolveAttributes(AttributeResolver resolver) { + return this; + } + + @Override + public void collectFields(SqlSourceBuilder sourceBuilder) { + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/UnaryProcessorDefinitionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/UnaryProcessorDefinitionTests.java new file mode 100644 index 0000000000000..5a102403d30c1 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/definition/UnaryProcessorDefinitionTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.execution.search.SqlSourceBuilder; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.BinaryProcessorDefinitionTests.DummyProcessorDefinition; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.ProcessorDefinition.AttributeResolver; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.BinaryProcessorDefinitionTests.tracksScores; + +public class UnaryProcessorDefinitionTests extends ESTestCase { + public void testSupportedByAggsOnlyQuery() { + ProcessorDefinition supported = new DummyProcessorDefinition(true); + ProcessorDefinition unsupported = new DummyProcessorDefinition(false); + + assertFalse(newUnaryProcessor(unsupported).supportedByAggsOnlyQuery()); + assertTrue(newUnaryProcessor(supported).supportedByAggsOnlyQuery()); + } + + public void testResolveAttributes() { + ProcessorDefinition needsNothing = new DummyProcessorDefinition(randomBoolean()); + ProcessorDefinition resolvesTo = new DummyProcessorDefinition(randomBoolean()); + ProcessorDefinition needsResolution = new DummyProcessorDefinition(randomBoolean()) { + @Override + public ProcessorDefinition resolveAttributes(AttributeResolver resolver) { + return resolvesTo; + } + }; + AttributeResolver resolver = a -> { + fail("not exepected"); + return null; + }; + + ProcessorDefinition d = newUnaryProcessor(needsNothing); + assertSame(d, d.resolveAttributes(resolver)); + + d = newUnaryProcessor(needsResolution); + ProcessorDefinition expected = newUnaryProcessor(resolvesTo); + assertEquals(expected, d.resolveAttributes(resolver)); + } + + public void testCollectFields() { + DummyProcessorDefinition wantsScore = new DummyProcessorDefinition(randomBoolean()) { + @Override + public void collectFields(SqlSourceBuilder sourceBuilder) { + sourceBuilder.trackScores(); + } + }; + DummyProcessorDefinition wantsNothing = new DummyProcessorDefinition(randomBoolean()); + assertFalse(tracksScores(newUnaryProcessor(wantsNothing))); + assertTrue(tracksScores(newUnaryProcessor(wantsScore))); + } + + private ProcessorDefinition newUnaryProcessor(ProcessorDefinition child) { + return new UnaryProcessorDefinition(Location.EMPTY, null, child, null); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ChainingProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ChainingProcessorTests.java new file mode 100644 index 0000000000000..a7440ba53774d --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ChainingProcessorTests.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; + +import java.io.IOException; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.sql.execution.search.extractor.ComputingExtractorTests.randomProcessor; + +public class ChainingProcessorTests extends AbstractWireSerializingTestCase { + public static ChainingProcessor randomComposeProcessor() { + return new ChainingProcessor(randomProcessor(), randomProcessor()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Processors.getNamedWriteables()); + } + + @Override + protected ChainingProcessor createTestInstance() { + return randomComposeProcessor(); + } + + @Override + protected Reader instanceReader() { + return ChainingProcessor::new; + } + + @Override + protected ChainingProcessor mutateInstance(ChainingProcessor instance) throws IOException { + @SuppressWarnings("unchecked") + Supplier supplier = randomFrom( + () -> new ChainingProcessor( + instance.first(), randomValueOtherThan(instance.second(), () -> randomProcessor())), + () -> new ChainingProcessor( + randomValueOtherThan(instance.first(), () -> randomProcessor()), instance.second())); + return supplier.get(); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ConstantProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ConstantProcessorTests.java new file mode 100644 index 0000000000000..b26cf8edc1cb7 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/processor/runtime/ConstantProcessorTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; + +public class ConstantProcessorTests extends AbstractWireSerializingTestCase { + public static ConstantProcessor randomConstantProcessor() { + return new ConstantProcessor(randomAlphaOfLength(5)); + } + + @Override + protected ConstantProcessor createTestInstance() { + return randomConstantProcessor(); + } + + @Override + protected Reader instanceReader() { + return ConstantProcessor::new; + } + + @Override + protected ConstantProcessor mutateInstance(ConstantProcessor instance) throws IOException { + return new ConstantProcessor(randomValueOtherThan(instance.process(null), () -> randomAlphaOfLength(5))); + } + + public void testApply() { + ConstantProcessor proc = new ConstantProcessor("test"); + assertEquals("test", proc.process(null)); + assertEquals("test", proc.process("cat")); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java new file mode 100644 index 0000000000000..a49538c8d53f8 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -0,0 +1,372 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.optimizer; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.Alias; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.expression.Order.OrderDirection; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Add; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Div; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mod; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Mul; +import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.Sub; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfMonth; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfYear; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.MonthOfYear; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.WeekOfYear; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.Year; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.ACos; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.ASin; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.ATan; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Abs; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.E; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.Floor; +import org.elasticsearch.xpack.sql.expression.predicate.And; +import org.elasticsearch.xpack.sql.expression.predicate.Equals; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThan; +import org.elasticsearch.xpack.sql.expression.predicate.GreaterThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.IsNotNull; +import org.elasticsearch.xpack.sql.expression.predicate.LessThan; +import org.elasticsearch.xpack.sql.expression.predicate.LessThanOrEqual; +import org.elasticsearch.xpack.sql.expression.predicate.Not; +import org.elasticsearch.xpack.sql.expression.predicate.Or; +import org.elasticsearch.xpack.sql.expression.predicate.Range; +import org.elasticsearch.xpack.sql.expression.regex.Like; +import org.elasticsearch.xpack.sql.expression.regex.LikePattern; +import org.elasticsearch.xpack.sql.expression.regex.RLike; +import org.elasticsearch.xpack.sql.optimizer.Optimizer.BinaryComparisonSimplification; +import org.elasticsearch.xpack.sql.optimizer.Optimizer.BooleanLiteralsOnTheRight; +import org.elasticsearch.xpack.sql.optimizer.Optimizer.BooleanSimplification; +import org.elasticsearch.xpack.sql.optimizer.Optimizer.CombineProjections; +import org.elasticsearch.xpack.sql.optimizer.Optimizer.ConstantFolding; +import org.elasticsearch.xpack.sql.optimizer.Optimizer.PruneDuplicateFunctions; +import org.elasticsearch.xpack.sql.optimizer.Optimizer.PruneSubqueryAliases; +import org.elasticsearch.xpack.sql.optimizer.Optimizer.ReplaceFoldableAttributes; +import org.elasticsearch.xpack.sql.plan.logical.Filter; +import org.elasticsearch.xpack.sql.plan.logical.LocalRelation; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.plan.logical.Project; +import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias; +import org.elasticsearch.xpack.sql.plan.logical.command.ShowTables; +import org.elasticsearch.xpack.sql.session.EmptyExecutable; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.type.DataType; +import org.joda.time.DateTimeZone; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.TimeZone; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; + +public class OptimizerTests extends ESTestCase { + + private static final Expression DUMMY_EXPRESSION = new DummyBooleanExpression(EMPTY, 0); + + public static class DummyBooleanExpression extends Expression { + + private final int id; + + public DummyBooleanExpression(Location location, int id) { + super(location, Collections.emptyList()); + this.id = id; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, DummyBooleanExpression::new, id); + } + + @Override + public Expression replaceChildren(List newChildren) { + throw new UnsupportedOperationException("this type of node doesn't have any children"); + } + + @Override + public boolean nullable() { + return false; + } + + @Override + public DataType dataType() { + return DataType.BOOLEAN; + } + + @Override + public int hashCode() { + int h = getClass().hashCode(); + h = 31 * h + id; + return h; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + return id == ((DummyBooleanExpression) obj).id; + } + } + + private static LogicalPlan FROM() { + return new LocalRelation(EMPTY, new EmptyExecutable(emptyList())); + } + + private static Literal L(Object value) { + return Literal.of(EMPTY, value); + } + + public void testPruneSubqueryAliases() { + ShowTables s = new ShowTables(EMPTY, null); + SubQueryAlias plan = new SubQueryAlias(EMPTY, s, "show"); + LogicalPlan result = new PruneSubqueryAliases().apply(plan); + assertEquals(result, s); + } + + public void testDuplicateFunctions() { + AggregateFunction f1 = new Count(EMPTY, Literal.TRUE, false); + AggregateFunction f2 = new Count(EMPTY, Literal.TRUE, false); + + assertTrue(f1.functionEquals(f2)); + + Project p = new Project(EMPTY, FROM(), Arrays.asList(f1, f2)); + LogicalPlan result = new PruneDuplicateFunctions().apply(p); + assertTrue(result instanceof Project); + List projections = ((Project) result).projections(); + assertEquals(2, projections.size()); + assertSame(projections.get(0), projections.get(1)); + } + + public void testCombineProjections() { + // a + Alias a = new Alias(EMPTY, "a", L(5)); + // b + Alias b = new Alias(EMPTY, "b", L(10)); + // x -> a + Alias x = new Alias(EMPTY, "x", a); + + Project lowerP = new Project(EMPTY, FROM(), asList(a, b)); + Project upperP = new Project(EMPTY, lowerP, singletonList(x)); + + LogicalPlan result = new CombineProjections().apply(upperP); + assertNotSame(upperP, result); + + assertTrue(result instanceof Project); + Project p = (Project) result; + assertEquals(1, p.projections().size()); + Alias al = (Alias) p.projections().get(0); + assertEquals("x", al.name()); + assertTrue(al.child() instanceof Literal); + assertEquals(5, al.child().fold()); + assertTrue(p.child() instanceof LocalRelation); + } + + public void testReplaceFoldableAttributes() { + // SELECT 5 a, 10 b FROM foo WHERE a < 10 ORDER BY b + + // a + Alias a = new Alias(EMPTY, "a", L(5)); + // b + Alias b = new Alias(EMPTY, "b", L(10)); + // WHERE a < 10 + LogicalPlan p = new Filter(EMPTY, FROM(), new LessThan(EMPTY, a, L(10))); + // SELECT + p = new Project(EMPTY, p, Arrays.asList(a, b)); + // ORDER BY + p = new OrderBy(EMPTY, p, singletonList(new Order(EMPTY, b, OrderDirection.ASC))); + + LogicalPlan result = new ReplaceFoldableAttributes().apply(p); + assertNotSame(p, result); + + // ORDER BY b -> ORDER BY 10 + assertTrue(result instanceof OrderBy); + OrderBy o = (OrderBy) result; + assertEquals(1, o.order().size()); + Expression oe = o.order().get(0).child(); + assertTrue(oe instanceof Literal); + assertEquals(10, oe.fold()); + + // WHERE a < 10 + assertTrue(o.child() instanceof Project); + Project pj = (Project) o.child(); + assertTrue(pj.child() instanceof Filter); + Filter f = (Filter) pj.child(); + assertTrue(f.condition() instanceof LessThan); + LessThan lt = (LessThan) f.condition(); + assertTrue(lt.left() instanceof Literal); + assertTrue(lt.right() instanceof Literal); + assertEquals(5, lt.left().fold()); + assertEquals(10, lt.right().fold()); + } + + public void testConstantFolding() { + Expression exp = new Add(EMPTY, L(2), L(3)); + + assertTrue(exp.foldable()); + assertTrue(exp instanceof NamedExpression); + String n = Expressions.name(exp); + + Expression result = new ConstantFolding().rule(exp); + assertTrue(result instanceof Alias); + assertEquals(n, Expressions.name(result)); + Expression c = ((Alias) result).child(); + assertTrue(c instanceof Literal); + assertEquals(5, ((Literal) c).value()); + + // check now with an alias + result = new ConstantFolding().rule(new Alias(EMPTY, "a", exp)); + assertTrue(result instanceof Alias); + assertEquals("a", Expressions.name(result)); + c = ((Alias) result).child(); + assertTrue(c instanceof Literal); + assertEquals(5, ((Literal) c).value()); + } + + public void testConstantFoldingBinaryComparison() { + assertEquals(Literal.FALSE, new ConstantFolding().rule(new GreaterThan(EMPTY, L(2), L(3)))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new GreaterThanOrEqual(EMPTY, L(2), L(3)))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new Equals(EMPTY, L(2), L(3)))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new LessThanOrEqual(EMPTY, L(2), L(3)))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new LessThan(EMPTY, L(2), L(3)))); + } + + public void testConstantFoldingBinaryLogic() { + assertEquals(Literal.FALSE, new ConstantFolding().rule(new And(EMPTY, new GreaterThan(EMPTY, L(2), L(3)), Literal.TRUE))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new Or(EMPTY, new GreaterThanOrEqual(EMPTY, L(2), L(3)), Literal.TRUE))); + } + + public void testConstantFoldingRange() { + assertEquals(Literal.TRUE, new ConstantFolding().rule(new Range(EMPTY, L(5), L(5), true, L(10), false))); + assertEquals(Literal.FALSE, new ConstantFolding().rule(new Range(EMPTY, L(5), L(5), false, L(10), false))); + } + + public void testConstantIsNotNull() { + assertEquals(Literal.FALSE, new ConstantFolding().rule(new IsNotNull(EMPTY, L(null)))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new IsNotNull(EMPTY, L(5)))); + } + + public void testConstantNot() { + assertEquals(Literal.FALSE, new ConstantFolding().rule(new Not(EMPTY, Literal.TRUE))); + assertEquals(Literal.TRUE, new ConstantFolding().rule(new Not(EMPTY, Literal.FALSE))); + } + + public void testConstantFoldingLikes() { + assertEquals(Literal.TRUE, + new ConstantFolding().rule(new Like(EMPTY, Literal.of(EMPTY, "test_emp"), new LikePattern(EMPTY, "test%", (char) 0)))); + assertEquals(Literal.TRUE, + new ConstantFolding().rule(new RLike(EMPTY, Literal.of(EMPTY, "test_emp"), Literal.of(EMPTY, "test.emp")))); + } + + public void testConstantFoldingDatetime() { + final TimeZone UTC = TimeZone.getTimeZone("UTC"); + Expression cast = new Cast(EMPTY, Literal.of(EMPTY, "2018-01-19T10:23:27Z"), DataType.DATE); + assertEquals(2018, foldFunction(new Year(EMPTY, cast, UTC))); + assertEquals(1, foldFunction(new MonthOfYear(EMPTY, cast, UTC))); + assertEquals(19, foldFunction(new DayOfMonth(EMPTY, cast, UTC))); + assertEquals(19, foldFunction(new DayOfYear(EMPTY, cast, UTC))); + assertEquals(3, foldFunction(new WeekOfYear(EMPTY, cast, UTC))); + assertNull(foldFunction( + new WeekOfYear(EMPTY, new Literal(EMPTY, null, DataType.NULL), UTC))); + } + + public void testArithmeticFolding() { + assertEquals(10, foldFunction(new Add(EMPTY, L(7), L(3)))); + assertEquals(4, foldFunction(new Sub(EMPTY, L(7), L(3)))); + assertEquals(21, foldFunction(new Mul(EMPTY, L(7), L(3)))); + assertEquals(2, foldFunction(new Div(EMPTY, L(7), L(3)))); + assertEquals(1, foldFunction(new Mod(EMPTY, L(7), L(3)))); + } + + public void testMathFolding() { + assertEquals(7, foldFunction(new Abs(EMPTY, L(7)))); + assertEquals(0d, (double) foldFunction(new ACos(EMPTY, L(1))), 0.01d); + assertEquals(1.57076d, (double) foldFunction(new ASin(EMPTY, L(1))), 0.01d); + assertEquals(0.78539d, (double) foldFunction(new ATan(EMPTY, L(1))), 0.01d); + assertEquals(7, foldFunction(new Floor(EMPTY, L(7)))); + assertEquals(Math.E, foldFunction(new E(EMPTY))); + } + + private static Object foldFunction(Function f) { + return unwrapAlias(new ConstantFolding().rule(f)); + } + + private static Object unwrapAlias(Expression e) { + Alias a = (Alias) e; + Literal l = (Literal) a.child(); + return l.value(); + } + + public void testBinaryComparisonSimplification() { + assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new Equals(EMPTY, L(5), L(5)))); + assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new GreaterThanOrEqual(EMPTY, L(5), L(5)))); + assertEquals(Literal.TRUE, new BinaryComparisonSimplification().rule(new LessThanOrEqual(EMPTY, L(5), L(5)))); + + assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new GreaterThan(EMPTY, L(5), L(5)))); + assertEquals(Literal.FALSE, new BinaryComparisonSimplification().rule(new LessThan(EMPTY, L(5), L(5)))); + } + + public void testLiteralsOnTheRight() { + Alias a = new Alias(EMPTY, "a", L(10)); + Expression result = new BooleanLiteralsOnTheRight().rule(new Equals(EMPTY, L(5), a)); + assertTrue(result instanceof Equals); + Equals eq = (Equals) result; + assertEquals(a, eq.left()); + assertEquals(L(5), eq.right()); + } + + public void testBoolSimplifyOr() { + BooleanSimplification simplification = new BooleanSimplification(); + + assertEquals(Literal.TRUE, simplification.rule(new Or(EMPTY, Literal.TRUE, Literal.TRUE))); + assertEquals(Literal.TRUE, simplification.rule(new Or(EMPTY, Literal.TRUE, DUMMY_EXPRESSION))); + assertEquals(Literal.TRUE, simplification.rule(new Or(EMPTY, DUMMY_EXPRESSION, Literal.TRUE))); + + assertEquals(Literal.FALSE, simplification.rule(new Or(EMPTY, Literal.FALSE, Literal.FALSE))); + assertEquals(DUMMY_EXPRESSION, simplification.rule(new Or(EMPTY, Literal.FALSE, DUMMY_EXPRESSION))); + assertEquals(DUMMY_EXPRESSION, simplification.rule(new Or(EMPTY, DUMMY_EXPRESSION, Literal.FALSE))); + } + + public void testBoolSimplifyAnd() { + BooleanSimplification simplification = new BooleanSimplification(); + + assertEquals(Literal.TRUE, simplification.rule(new And(EMPTY, Literal.TRUE, Literal.TRUE))); + assertEquals(DUMMY_EXPRESSION, simplification.rule(new And(EMPTY, Literal.TRUE, DUMMY_EXPRESSION))); + assertEquals(DUMMY_EXPRESSION, simplification.rule(new And(EMPTY, DUMMY_EXPRESSION, Literal.TRUE))); + + assertEquals(Literal.FALSE, simplification.rule(new And(EMPTY, Literal.FALSE, Literal.FALSE))); + assertEquals(Literal.FALSE, simplification.rule(new And(EMPTY, Literal.FALSE, DUMMY_EXPRESSION))); + assertEquals(Literal.FALSE, simplification.rule(new And(EMPTY, DUMMY_EXPRESSION, Literal.FALSE))); + } + + public void testBoolCommonFactorExtraction() { + BooleanSimplification simplification = new BooleanSimplification(); + + Expression a1 = new DummyBooleanExpression(EMPTY, 1); + Expression a2 = new DummyBooleanExpression(EMPTY, 1); + Expression b = new DummyBooleanExpression(EMPTY, 2); + Expression c = new DummyBooleanExpression(EMPTY, 3); + + Expression actual = new Or(EMPTY, new And(EMPTY, a1, b), new And(EMPTY, a2, c)); + Expression expected = new And(EMPTY, a1, new Or(EMPTY, b, c)); + + assertEquals(expected, simplification.rule(actual)); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilderTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilderTests.java new file mode 100644 index 0000000000000..ec8b8abc51f2d --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/IdentifierBuilderTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.tree.Location; + +import static org.hamcrest.Matchers.is; + +public class IdentifierBuilderTests extends ESTestCase { + + private static Location L = new Location(1, 10); + + public void testTypicalIndex() throws Exception { + IdentifierBuilder.validateIndex("some-index", L); + } + + public void testInternalIndex() throws Exception { + IdentifierBuilder.validateIndex(".some-internal-index-2020-02-02", L); + } + + public void testIndexPattern() throws Exception { + IdentifierBuilder.validateIndex(".some-*", L); + } + + public void testInvalidIndex() throws Exception { + ParsingException pe = expectThrows(ParsingException.class, () -> IdentifierBuilder.validateIndex("some,index", L)); + assertThat(pe.getMessage(), is("line 1:12: Invalid index name (illegal character ,) some,index")); + } + + public void testUpperCasedIndex() throws Exception { + ParsingException pe = expectThrows(ParsingException.class, () -> IdentifierBuilder.validateIndex("thisIsAnIndex", L)); + assertThat(pe.getMessage(), is("line 1:12: Invalid index name (needs to be lowercase) thisIsAnIndex")); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/LikeEscapingParsingTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/LikeEscapingParsingTests.java new file mode 100644 index 0000000000000..c94bcf0e664c4 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/LikeEscapingParsingTests.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.regex.Like; +import org.elasticsearch.xpack.sql.expression.regex.LikePattern; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.type.DataType; + +import java.util.Locale; + +import static java.util.Collections.singletonList; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class LikeEscapingParsingTests extends ESTestCase { + + private final SqlParser parser = new SqlParser(); + + private String error(String pattern) { + ParsingException ex = expectThrows(ParsingException.class, + () -> parser.createExpression(String.format(Locale.ROOT, "exp LIKE %s", pattern))); + + return ex.getMessage(); + } + + private LikePattern like(String pattern) { + Expression exp = null; + boolean parameterized = randomBoolean(); + if (parameterized) { + exp = parser.createExpression("exp LIKE ?", singletonList(new SqlTypedParamValue(pattern, DataType.KEYWORD))); + } else { + exp = parser.createExpression(String.format(Locale.ROOT, "exp LIKE '%s'", pattern)); + } + assertThat(exp, instanceOf(Like.class)); + Like l = (Like) exp; + return l.right(); + } + + public void testNoEscaping() { + LikePattern like = like("string"); + assertThat(like.pattern(), is("string")); + assertThat(like.asJavaRegex(), is("^string$")); + assertThat(like.asLuceneWildcard(), is("string")); + } + + public void testEscapingLastChar() { + assertThat(error("'string|' ESCAPE '|'"), + is("line 1:11: Pattern [string|] is invalid as escape char [|] at position 6 does not escape anything")); + } + + public void testEscapingWrongChar() { + assertThat(error("'|string' ESCAPE '|'"), + is("line 1:11: Pattern [|string] is invalid as escape char [|] at position 0 can only escape wildcard chars; found [s]")); + } + + public void testInvalidChar() { + assertThat(error("'%string' ESCAPE '%'"), + is("line 1:28: Char [%] cannot be used for escaping")); + } + + public void testCannotUseStar() { + assertThat(error("'|*string' ESCAPE '|'"), + is("line 1:11: Invalid char [*] found in pattern [|*string] at position 1; use [%] or [_] instead")); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java new file mode 100644 index 0000000000000..f7f03e5e4b70f --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.parser; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.NamedExpression; +import org.elasticsearch.xpack.sql.expression.Order; +import org.elasticsearch.xpack.sql.expression.UnresolvedAttribute; +import org.elasticsearch.xpack.sql.expression.UnresolvedStar; +import org.elasticsearch.xpack.sql.expression.function.UnresolvedFunction; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.OrderBy; +import org.elasticsearch.xpack.sql.plan.logical.Project; + +import java.util.ArrayList; +import java.util.List; + +import static java.util.stream.Collectors.toList; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; + +public class SqlParserTests extends ESTestCase { + public void testSelectStar() { + singleProjection(project(parseStatement("SELECT * FROM foo")), UnresolvedStar.class); + } + + private T singleProjection(Project project, Class type) { + assertThat(project.projections(), hasSize(1)); + NamedExpression p = project.projections().get(0); + assertThat(p, instanceOf(type)); + return type.cast(p); + } + + public void testSelectField() { + UnresolvedAttribute a = singleProjection(project(parseStatement("SELECT bar FROM foo")), UnresolvedAttribute.class); + assertEquals("bar", a.name()); + } + + public void testSelectScore() { + UnresolvedFunction f = singleProjection(project(parseStatement("SELECT SCORE() FROM foo")), UnresolvedFunction.class); + assertEquals("SCORE", f.functionName()); + } + + public void testOrderByField() { + Order.OrderDirection dir = randomFrom(Order.OrderDirection.values()); + OrderBy ob = orderBy(parseStatement("SELECT * FROM foo ORDER BY bar" + stringForDirection(dir))); + assertThat(ob.order(), hasSize(1)); + Order o = ob.order().get(0); + assertEquals(dir, o.direction()); + assertThat(o.child(), instanceOf(UnresolvedAttribute.class)); + UnresolvedAttribute a = (UnresolvedAttribute) o.child(); + assertEquals("bar", a.name()); + } + + public void testOrderByScore() { + Order.OrderDirection dir = randomFrom(Order.OrderDirection.values()); + OrderBy ob = orderBy(parseStatement("SELECT * FROM foo ORDER BY SCORE()" + stringForDirection(dir))); + assertThat(ob.order(), hasSize(1)); + Order o = ob.order().get(0); + assertEquals(dir, o.direction()); + assertThat(o.child(), instanceOf(UnresolvedFunction.class)); + UnresolvedFunction f = (UnresolvedFunction) o.child(); + assertEquals("SCORE", f.functionName()); + } + + public void testOrderByTwo() { + Order.OrderDirection dir0 = randomFrom(Order.OrderDirection.values()); + Order.OrderDirection dir1 = randomFrom(Order.OrderDirection.values()); + OrderBy ob = orderBy(parseStatement( + " SELECT *" + + " FROM foo" + + " ORDER BY bar" + stringForDirection(dir0) + ", baz" + stringForDirection(dir1))); + assertThat(ob.order(), hasSize(2)); + Order o = ob.order().get(0); + assertEquals(dir0, o.direction()); + assertThat(o.child(), instanceOf(UnresolvedAttribute.class)); + UnresolvedAttribute a = (UnresolvedAttribute) o.child(); + assertEquals("bar", a.name()); + o = ob.order().get(1); + assertEquals(dir1, o.direction()); + assertThat(o.child(), instanceOf(UnresolvedAttribute.class)); + a = (UnresolvedAttribute) o.child(); + assertEquals("baz", a.name()); + } + + private LogicalPlan parseStatement(String sql) { + return new SqlParser().createStatement(sql); + } + + private Project project(LogicalPlan plan) { + List sync = new ArrayList<>(1); + projectRecur(plan, sync); + assertThat("expected only one SELECT", sync, hasSize(1)); + return sync.get(0); + } + + private void projectRecur(LogicalPlan plan, List sync) { + if (plan instanceof Project) { + sync.add((Project) plan); + return; + } + for (LogicalPlan child : plan.children()) { + projectRecur(child, sync); + } + } + + /** + * Find the one and only {@code ORDER BY} in a plan. + */ + private OrderBy orderBy(LogicalPlan plan) { + List l = plan.children().stream() + .filter(c -> c instanceof OrderBy) + .collect(toList()); + assertThat("expected only one ORDER BY", l, hasSize(1)); + return (OrderBy) l.get(0); + } + + /** + * Convert a direction into a string that represents that parses to + * that direction. + */ + private String stringForDirection(Order.OrderDirection dir) { + String dirStr = dir.toString(); + return randomBoolean() && dirStr.equals("ASC") ? "" : " " + dirStr; + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelationTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelationTests.java new file mode 100644 index 0000000000000..99df12327e159 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/UnresolvedRelationTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.plan.TableIdentifier; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.LocationTests; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; + +public class UnresolvedRelationTests extends ESTestCase { + public void testEqualsAndHashCode() { + Location location = new Location(between(1, 1000), between(1, 1000)); + TableIdentifier table = new TableIdentifier(location, randomAlphaOfLength(5), randomAlphaOfLength(5)); + String alias = randomBoolean() ? null : randomAlphaOfLength(5); + String unresolvedMessage = randomAlphaOfLength(5); + UnresolvedRelation relation = new UnresolvedRelation(location, table, alias, unresolvedMessage); + List> mutators = new ArrayList<>(); + mutators.add(r -> new UnresolvedRelation( + LocationTests.mutate(r.location()), + r.table(), + r.alias(), + r.unresolvedMessage())); + mutators.add(r -> new UnresolvedRelation( + r.location(), + new TableIdentifier(r.location(), r.table().cluster(), r.table().index() + "m"), + r.alias(), + r.unresolvedMessage())); + mutators.add(r -> new UnresolvedRelation( + r.location(), + r.table(), + randomValueOtherThanMany( + a -> Objects.equals(a, r.alias()), + () -> randomBoolean() ? null : randomAlphaOfLength(5)), + r.unresolvedMessage())); + mutators.add(r -> new UnresolvedRelation( + r.location(), + r.table(), + r.alias(), + randomValueOtherThan(r.unresolvedMessage(), () -> randomAlphaOfLength(5)))); + checkEqualsAndHashCode(relation, + r -> new UnresolvedRelation(r.location(), r.table(), r.alias(), r.unresolvedMessage()), + r -> randomFrom(mutators).apply(r)); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysCatalogsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysCatalogsTests.java new file mode 100644 index 0000000000000..0a8ac30de7b6c --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysCatalogsTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command.sys; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.type.TypesTests; + +import java.util.TimeZone; + +import static java.util.Collections.singletonList; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SysCatalogsTests extends ESTestCase { + + private final SqlParser parser = new SqlParser(); + + @SuppressWarnings({ "rawtypes", "unchecked" }) + private Tuple sql(String sql) { + EsIndex test = new EsIndex("test", TypesTests.loadMapping("mapping-multi-field-with-nested.json", true)); + Analyzer analyzer = new Analyzer(new FunctionRegistry(), IndexResolution.valid(test), TimeZone.getTimeZone("UTC")); + Command cmd = (Command) analyzer.analyze(parser.createStatement(sql), true); + + IndexResolver resolver = mock(IndexResolver.class); + when(resolver.clusterName()).thenReturn("cluster"); + + doAnswer(invocation -> { + ((ActionListener) invocation.getArguments()[2]).onResponse(singletonList(test)); + return Void.TYPE; + }).when(resolver).resolveAsSeparateMappings(any(), any(), any()); + + SqlSession session = new SqlSession(null, null, null, resolver, null, null, null); + return new Tuple<>(cmd, session); + } + + public void testSysCatalogs() throws Exception { + Tuple sql = sql("SYS CATALOGS"); + + sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { + assertEquals(1, r.size()); + assertEquals("cluster", r.column(0)); + }, ex -> fail(ex.getMessage()))); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java new file mode 100644 index 0000000000000..bddddc6941cbb --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command.sys; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.type.TypesTests; + +import java.sql.Types; +import java.util.ArrayList; +import java.util.List; + +public class SysColumnsTests extends ESTestCase { + + public void testSysColumns() { + List> rows = new ArrayList<>(); + SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null); + assertEquals(16, rows.size()); + assertEquals(24, rows.get(0).size()); + + List row = rows.get(0); + assertEquals("bool", name(row)); + assertEquals(Types.BOOLEAN, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(1, bufferLength(row)); + + row = rows.get(1); + assertEquals("int", name(row)); + assertEquals(Types.INTEGER, sqlType(row)); + assertEquals(10, radix(row)); + assertEquals(4, bufferLength(row)); + + row = rows.get(2); + assertEquals("text", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + + row = rows.get(7); + assertEquals("some.dotted", name(row)); + assertEquals(Types.STRUCT, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(-1, bufferLength(row)); + + row = rows.get(15); + assertEquals("some.ambiguous.normalized", name(row)); + assertEquals(Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + } + + private static Object name(List list) { + return list.get(3); + } + + private static Object sqlType(List list) { + return list.get(4); + } + + private static Object bufferLength(List list) { + return list.get(7); + } + + private static Object radix(List list) { + return list.get(9); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java new file mode 100644 index 0000000000000..ac72bcba4d647 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command.sys; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.type.TypesTests; + +import java.util.List; +import java.util.Map; +import java.util.TimeZone; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SysParserTests extends ESTestCase { + + private final SqlParser parser = new SqlParser(); + private final Map mapping = TypesTests.loadMapping("mapping-multi-field-with-nested.json", true); + + @SuppressWarnings({ "rawtypes", "unchecked" }) + private Tuple sql(String sql) { + EsIndex test = new EsIndex("test", mapping); + Analyzer analyzer = new Analyzer(new FunctionRegistry(), IndexResolution.valid(test), TimeZone.getTimeZone("UTC")); + Command cmd = (Command) analyzer.analyze(parser.createStatement(sql), true); + + IndexResolver resolver = mock(IndexResolver.class); + when(resolver.clusterName()).thenReturn("cluster"); + + doAnswer(invocation -> { + ((ActionListener) invocation.getArguments()[2]).onResponse(singletonList(test)); + return Void.TYPE; + }).when(resolver).resolveAsSeparateMappings(any(), any(), any()); + + SqlSession session = new SqlSession(null, null, null, resolver, null, null, null); + return new Tuple<>(cmd, session); + } + + public void testSysTypes() throws Exception { + Command cmd = sql("SYS TYPES").v1(); + + List names = asList("BYTE", "SHORT", "INTEGER", "LONG", "HALF_FLOAT", "SCALED_FLOAT", "FLOAT", "DOUBLE", "KEYWORD", "TEXT", + "DATE", "BINARY", "NULL", "UNSUPPORTED", "OBJECT", "NESTED", "BOOLEAN"); + + cmd.execute(null, ActionListener.wrap(r -> { + assertEquals(19, r.columnCount()); + assertEquals(17, r.size()); + assertFalse(r.schema().types().contains(DataType.NULL)); + // test numeric as signed + assertFalse(r.column(9, Boolean.class)); + // make sure precision is returned as boolean (not int) + assertFalse(r.column(10, Boolean.class)); + + for (int i = 0; i < r.size(); i++) { + assertEquals(names.get(i), r.column(0)); + r.advanceRow(); + } + + }, ex -> fail(ex.getMessage()))); + } + + public void testSysColsNoArgs() throws Exception { + runSysColumns("SYS COLUMNS"); + } + + public void testSysColumnEmptyCatalog() throws Exception { + Tuple sql = sql("SYS COLUMNS CATALOG '' TABLE LIKE '%' LIKE '%'"); + + sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { + assertEquals(24, r.columnCount()); + assertEquals(22, r.size()); + }, ex -> fail(ex.getMessage()))); + } + + public void testSysColsTableOnlyCatalog() throws Exception { + Tuple sql = sql("SYS COLUMNS CATALOG 'catalog'"); + + sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { + assertEquals(24, r.columnCount()); + assertEquals(0, r.size()); + }, ex -> fail(ex.getMessage()))); + } + + public void testSysColsTableOnlyPattern() throws Exception { + runSysColumns("SYS COLUMNS TABLE LIKE 'test'"); + } + + public void testSysColsColOnlyPattern() throws Exception { + runSysColumns("SYS COLUMNS LIKE '%'"); + } + + public void testSysColsTableAndColsPattern() throws Exception { + runSysColumns("SYS COLUMNS TABLE LIKE 'test' LIKE '%'"); + } + + + private void runSysColumns(String commandVariation) throws Exception { + Tuple sql = sql(commandVariation); + List names = asList("bool", + "int", + "text", + "keyword", + "unsupported", + "date", + "some", + "some.dotted", + "some.dotted.field", + "some.string", + "some.string.normalized", + "some.string.typical", + "some.ambiguous", + "some.ambiguous.one", + "some.ambiguous.two", + "some.ambiguous.normalized", + "dep", + "dep.dep_name", + "dep.dep_id", + "dep.dep_id.keyword", + "dep.end_date", + "dep.start_date"); + + sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { + assertEquals(24, r.columnCount()); + assertEquals(22, r.size()); + + for (int i = 0; i < r.size(); i++) { + assertEquals("cluster", r.column(0)); + assertNull(r.column(1)); + assertEquals("test", r.column(2)); + assertEquals(names.get(i), r.column(3)); + r.advanceRow(); + } + + }, ex -> fail(ex.getMessage()))); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTableTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTableTypesTests.java new file mode 100644 index 0000000000000..956273b9aae2d --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTableTypesTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command.sys; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.type.TypesTests; + +import java.util.TimeZone; + +import static org.mockito.Mockito.mock; + +public class SysTableTypesTests extends ESTestCase { + + private final SqlParser parser = new SqlParser(); + + private Tuple sql(String sql) { + EsIndex test = new EsIndex("test", TypesTests.loadMapping("mapping-multi-field-with-nested.json", true)); + Analyzer analyzer = new Analyzer(new FunctionRegistry(), IndexResolution.valid(test), TimeZone.getTimeZone("UTC")); + Command cmd = (Command) analyzer.analyze(parser.createStatement(sql), true); + + IndexResolver resolver = mock(IndexResolver.class); + SqlSession session = new SqlSession(null, null, null, resolver, null, null, null); + return new Tuple<>(cmd, session); + } + + public void testSysCatalogs() throws Exception { + Tuple sql = sql("SYS TABLE TYPES"); + + sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { + assertEquals(2, r.size()); + assertEquals("BASE TABLE", r.column(0)); + r.advanceRow(); + assertEquals("ALIAS", r.column(0)); + }, ex -> fail(ex.getMessage()))); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java new file mode 100644 index 0000000000000..fe36095641a60 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java @@ -0,0 +1,234 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plan.logical.command.sys; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexInfo; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.command.Command; +import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.session.SchemaRowSet; +import org.elasticsearch.xpack.sql.session.SqlSession; +import org.elasticsearch.xpack.sql.type.DataTypes; +import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.type.TypesTests; + +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.TimeZone; +import java.util.function.Consumer; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static org.elasticsearch.action.ActionListener.wrap; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SysTablesTests extends ESTestCase { + + private static final String CLUSTER_NAME = "cluster"; + + private final SqlParser parser = new SqlParser(); + private final Map mapping = TypesTests.loadMapping("mapping-multi-field-with-nested.json", true); + private final IndexInfo index = new IndexInfo("test", IndexType.INDEX); + private final IndexInfo alias = new IndexInfo("alias", IndexType.ALIAS); + + public void testSysTablesDifferentCatalog() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE 'foo'", r -> { + assertEquals(0, r.size()); + assertFalse(r.hasCurrentRow()); + }); + } + + public void testSysTablesNoTypes() throws Exception { + executeCommand("SYS TABLES", r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + }, index, alias); + } + + public void testSysTablesPattern() throws Exception { + executeCommand("SYS TABLES LIKE '%'", r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + }, index, alias); + } + + public void testSysTablesPatternParameterized() throws Exception { + List params = asList(param("%")); + executeCommand("SYS TABLES LIKE ?", params, r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + }, index, alias); + } + + public void testSysTablesOnlyAliases() throws Exception { + executeCommand("SYS TABLES LIKE 'test' TYPE 'ALIAS'", r -> { + assertEquals(1, r.size()); + assertEquals("alias", r.column(2)); + }, alias); + } + + public void testSysTablesOnlyAliasesParameterized() throws Exception { + List params = asList(param("ALIAS")); + executeCommand("SYS TABLES LIKE 'test' TYPE ?", params, r -> { + assertEquals(1, r.size()); + assertEquals("alias", r.column(2)); + }, alias); + } + + public void testSysTablesOnlyIndices() throws Exception { + executeCommand("SYS TABLES LIKE 'test' TYPE 'BASE TABLE'", r -> { + assertEquals(1, r.size()); + assertEquals("test", r.column(2)); + }, index); + } + + public void testSysTablesOnlyIndicesParameterized() throws Exception { + executeCommand("SYS TABLES LIKE 'test' TYPE ?", asList(param("ALIAS")), r -> { + assertEquals(1, r.size()); + assertEquals("test", r.column(2)); + }, index); + } + + public void testSysTablesOnlyIndicesAndAliases() throws Exception { + executeCommand("SYS TABLES LIKE 'test' TYPE 'ALIAS', 'BASE TABLE'", r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + }, index, alias); + } + + public void testSysTablesOnlyIndicesAndAliasesParameterized() throws Exception { + List params = asList(param("ALIAS"), param("BASE TABLE")); + executeCommand("SYS TABLES LIKE 'test' TYPE ?, ?", params, r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + }, index, alias); + } + + public void testSysTablesWithCatalogOnlyAliases() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '%' LIKE 'test' TYPE 'ALIAS'", r -> { + assertEquals(1, r.size()); + assertEquals("alias", r.column(2)); + }, alias); + } + + public void testSysTablesWithInvalidType() throws Exception { + executeCommand("SYS TABLES LIKE 'test' TYPE 'QUE HORA ES'", r -> { + assertEquals(0, r.size()); + }, new IndexInfo[0]); + } + + public void testSysTablesCatalogEnumeration() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '%' LIKE ''", r -> { + assertEquals(1, r.size()); + assertEquals(CLUSTER_NAME, r.column(0)); + // everything else should be null + for (int i = 1; i < 10; i++) { + assertNull(r.column(i)); + } + }, new IndexInfo[0]); + } + + public void testSysTablesTypesEnumeration() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> { + assertEquals(2, r.size()); + + Iterator it = IndexType.VALID.iterator(); + + for (int t = 0; t < r.size(); t++) { + assertEquals(it.next().toSql(), r.column(3)); + + // everything else should be null + for (int i = 0; i < 10; i++) { + if (i != 3) { + assertNull(r.column(i)); + } + } + + r.advanceRow(); + } + }, new IndexInfo[0]); + } + + public void testSysTablesTypesEnumerationWoString() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> { + assertEquals(2, r.size()); + + Iterator it = IndexType.VALID.iterator(); + + for (int t = 0; t < r.size(); t++) { + assertEquals(it.next().toSql(), r.column(3)); + + // everything else should be null + for (int i = 0; i < 10; i++) { + if (i != 3) { + assertNull(r.column(i)); + } + } + + r.advanceRow(); + } + }, new IndexInfo[0]); + } + + private SqlTypedParamValue param(Object value) { + return new SqlTypedParamValue(value, DataTypes.fromJava(value)); + } + + private Tuple sql(String sql, List params) { + EsIndex test = new EsIndex("test", mapping); + Analyzer analyzer = new Analyzer(new FunctionRegistry(), IndexResolution.valid(test), TimeZone.getTimeZone("UTC")); + Command cmd = (Command) analyzer.analyze(parser.createStatement(sql, params), true); + + IndexResolver resolver = mock(IndexResolver.class); + when(resolver.clusterName()).thenReturn(CLUSTER_NAME); + + SqlSession session = new SqlSession(null, null, null, resolver, null, null, null); + return new Tuple<>(cmd, session); + } + + private void executeCommand(String sql, Consumer consumer, IndexInfo... infos) throws Exception { + executeCommand(sql, emptyList(), consumer, infos); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + private void executeCommand(String sql, List params, Consumer consumer, IndexInfo... infos) + throws Exception { + Tuple tuple = sql(sql, params); + + IndexResolver resolver = tuple.v2().indexResolver(); + + doAnswer(invocation -> { + ((ActionListener) invocation.getArguments()[3]).onResponse(new LinkedHashSet<>(asList(infos))); + return Void.TYPE; + }).when(resolver).resolveNames(any(), any(), any(), any()); + + tuple.v1().execute(tuple.v2(), wrap(consumer::accept, ex -> fail(ex.getMessage()))); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java new file mode 100644 index 0000000000000..2a3d87b65c964 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.analysis.index.MappingException; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.Filter; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.plan.logical.Project; +import org.elasticsearch.xpack.sql.planner.QueryTranslator.QueryTranslation; +import org.elasticsearch.xpack.sql.querydsl.query.Query; +import org.elasticsearch.xpack.sql.querydsl.query.RangeQuery; +import org.elasticsearch.xpack.sql.querydsl.query.TermQuery; +import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.type.TypesTests; +import org.joda.time.DateTime; + +import java.util.Map; +import java.util.TimeZone; + +public class QueryTranslatorTests extends ESTestCase { + + private SqlParser parser; + private IndexResolution getIndexResult; + private FunctionRegistry functionRegistry; + private Analyzer analyzer; + + public QueryTranslatorTests() { + parser = new SqlParser(); + functionRegistry = new FunctionRegistry(); + + Map mapping = TypesTests.loadMapping("mapping-multi-field-variation.json"); + + EsIndex test = new EsIndex("test", mapping); + getIndexResult = IndexResolution.valid(test); + analyzer = new Analyzer(functionRegistry, getIndexResult, TimeZone.getTimeZone("UTC")); + } + + private LogicalPlan plan(String sql) { + return analyzer.analyze(parser.createStatement(sql), true); + } + + public void testTermEqualityAnalyzer() { + LogicalPlan p = plan("SELECT some.string FROM test WHERE some.string = 'value'"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + Query query = translation.query; + assertTrue(query instanceof TermQuery); + TermQuery tq = (TermQuery) query; + assertEquals("some.string.typical", tq.term()); + assertEquals("value", tq.value()); + } + + public void testTermEqualityAnalyzerAmbiguous() { + LogicalPlan p = plan("SELECT some.string FROM test WHERE some.ambiguous = 'value'"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + // the message is checked elsewhere (in FieldAttributeTests) + expectThrows(MappingException.class, () -> QueryTranslator.toQuery(condition, false)); + } + + public void testTermEqualityNotAnalyzed() { + LogicalPlan p = plan("SELECT some.string FROM test WHERE int = 5"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + Query query = translation.query; + assertTrue(query instanceof TermQuery); + TermQuery tq = (TermQuery) query; + assertEquals("int", tq.term()); + assertEquals(5, tq.value()); + } + + public void testComparisonAgainstColumns() { + LogicalPlan p = plan("SELECT some.string FROM test WHERE date > int"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + SqlIllegalArgumentException ex = expectThrows(SqlIllegalArgumentException.class, () -> QueryTranslator.toQuery(condition, false)); + assertEquals("Line 1:43: Comparisons against variables are not (currently) supported; offender [int] in [>]", ex.getMessage()); + } + + public void testDateRange() { + LogicalPlan p = plan("SELECT some.string FROM test WHERE date > 1969-05-13"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + Query query = translation.query; + assertTrue(query instanceof RangeQuery); + RangeQuery rq = (RangeQuery) query; + assertEquals("date", rq.field()); + assertEquals(1951, rq.lower()); + } + + public void testDateRangeLiteral() { + LogicalPlan p = plan("SELECT some.string FROM test WHERE date > '1969-05-13'"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + Query query = translation.query; + assertTrue(query instanceof RangeQuery); + RangeQuery rq = (RangeQuery) query; + assertEquals("date", rq.field()); + assertEquals("1969-05-13", rq.lower()); + } + + public void testDateRangeCast() { + LogicalPlan p = plan("SELECT some.string FROM test WHERE date > CAST('1969-05-13T12:34:56Z' AS DATE)"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + Query query = translation.query; + assertTrue(query instanceof RangeQuery); + RangeQuery rq = (RangeQuery) query; + assertEquals("date", rq.field()); + assertEquals(DateTime.parse("1969-05-13T12:34:56Z"), rq.lower()); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/VerifierErrorMessagesTests.java new file mode 100644 index 0000000000000..154885261fdb8 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/VerifierErrorMessagesTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.planner; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; +import org.elasticsearch.xpack.sql.analysis.index.EsIndex; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; +import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; +import org.elasticsearch.xpack.sql.optimizer.Optimizer; +import org.elasticsearch.xpack.sql.parser.SqlParser; +import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.EsField; +import org.elasticsearch.xpack.sql.type.KeywordEsField; +import org.elasticsearch.xpack.sql.type.TextEsField; + +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.TimeZone; + +public class VerifierErrorMessagesTests extends ESTestCase { + + private SqlParser parser = new SqlParser(); + private Optimizer optimizer = new Optimizer(); + private Planner planner = new Planner(); + + private String verify(String sql) { + Map mapping = new LinkedHashMap<>(); + mapping.put("bool", new EsField("bool", DataType.BOOLEAN, Collections.emptyMap(), true)); + mapping.put("int", new EsField("int", DataType.INTEGER, Collections.emptyMap(), true)); + mapping.put("text", new TextEsField("text", Collections.emptyMap(), true)); + mapping.put("keyword", new KeywordEsField("keyword", Collections.emptyMap(), true, DataType.KEYWORD.defaultPrecision, true)); + EsIndex test = new EsIndex("test", mapping); + IndexResolution getIndexResult = IndexResolution.valid(test); + Analyzer analyzer = new Analyzer(new FunctionRegistry(), getIndexResult, TimeZone.getTimeZone("UTC")); + LogicalPlan plan = optimizer.optimize(analyzer.analyze(parser.createStatement(sql), true)); + PlanningException e = expectThrows(PlanningException.class, () -> planner.mapPlan(plan, true)); + assertTrue(e.getMessage().startsWith("Found ")); + String header = "Found 1 problem(s)\nline "; + return e.getMessage().substring(header.length()); + } + + public void testMultiGroupBy() { + assertEquals("1:32: Currently, only a single expression can be used with GROUP BY; please select one of [bool, keyword]", + verify("SELECT bool FROM test GROUP BY bool, keyword")); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java new file mode 100644 index 0000000000000..1fe3c9fc89e99 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.test.ESTestCase; + +import java.sql.JDBCType; +import java.util.Arrays; + +import static org.hamcrest.Matchers.arrayWithSize; + +public class CliFormatterTests extends ESTestCase { + private final SqlQueryResponse firstResponse = new SqlQueryResponse("", + Arrays.asList( + new ColumnInfo("", "foo", "string", JDBCType.VARCHAR, 0), + new ColumnInfo("", "bar", "long", JDBCType.BIGINT, 15), + new ColumnInfo("", "15charwidename!", "double", JDBCType.DOUBLE, 25), + new ColumnInfo("", "superduperwidename!!!", "double", JDBCType.DOUBLE, 25), + new ColumnInfo("", "baz", "keyword", JDBCType.VARCHAR, 0)), + Arrays.asList( + Arrays.asList("15charwidedata!", 1, 6.888, 12, "rabbit"), + Arrays.asList("dog", 1.7976931348623157E308, 123124.888, 9912, "goat"))); + private final CliFormatter formatter = new CliFormatter(firstResponse); + + /** + * Tests for {@link CliFormatter#formatWithHeader(SqlQueryResponse)}, values + * of exactly the minimum column size, column names of exactly + * the minimum column size, column headers longer than the + * minimum column size, and values longer than the minimum + * column size. + */ + public void testFormatWithHeader() { + String[] result = formatter.formatWithHeader(firstResponse).split("\n"); + assertThat(result, arrayWithSize(4)); + assertEquals(" foo | bar |15charwidename!|superduperwidename!!!| baz ", result[0]); + assertEquals("---------------+----------------------+---------------+---------------------+---------------", result[1]); + assertEquals("15charwidedata!|1 |6.888 |12 |rabbit ", result[2]); + assertEquals("dog |1.7976931348623157E308|123124.888 |9912 |goat ", result[3]); + } + + /** + * Tests for {@link CliFormatter#formatWithoutHeader(SqlQueryResponse)} and + * truncation of long columns. + */ + public void testFormatWithoutHeader() { + String[] result = formatter.formatWithoutHeader(new SqlQueryResponse("", null, + Arrays.asList( + Arrays.asList("ohnotruncateddata", 4, 1, 77, "wombat"), + Arrays.asList("dog", 2, 123124.888, 9912, "goat")))).split("\n"); + assertThat(result, arrayWithSize(2)); + assertEquals("ohnotruncatedd~|4 |1 |77 |wombat ", result[0]); + assertEquals("dog |2 |123124.888 |9912 |goat ", result[1]); + } + + /** + * Ensure that our estimates are perfect in at least some cases. + */ + public void testEstimateSize() { + assertEquals(formatter.formatWithHeader(firstResponse).length(), + formatter.estimateSize(firstResponse.rows().size() + 2)); + assertEquals(formatter.formatWithoutHeader(firstResponse).length(), + formatter.estimateSize(firstResponse.rows().size())); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlPluginTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlPluginTests.java new file mode 100644 index 0000000000000..4a60224666515 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlPluginTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.session.Cursors; + +import java.util.Collections; + +import static org.hamcrest.Matchers.empty; +import static org.mockito.Mockito.mock; + +public class SqlPluginTests extends ESTestCase { + + public void testSqlDisabled() { + SqlPlugin plugin = new SqlPlugin(false, new SqlLicenseChecker((mode) -> {})); + assertThat(plugin.createComponents(mock(Client.class), "cluster", new NamedWriteableRegistry(Cursors.getNamedWriteables())), + empty()); + assertThat(plugin.getActions(), empty()); + assertThat(plugin.getRestHandlers(Settings.EMPTY, mock(RestController.class), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, new SettingsFilter(Settings.EMPTY, Collections.emptyList()), + mock(IndexNameExpressionResolver.class), () -> mock(DiscoveryNodes.class)), empty()); + } + +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java new file mode 100644 index 0000000000000..1c6bbfa69e816 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.plugin; + + +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import java.util.ArrayList; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.sql.plugin.TextFormat.CSV; +import static org.elasticsearch.xpack.sql.plugin.TextFormat.TSV; +import static org.hamcrest.CoreMatchers.is; + +public class TextFormatTests extends ESTestCase { + + public void testPlainTextDetection() { + TextFormat text = TextFormat.fromMediaTypeOrFormat("text/plain"); + assertThat(text, is(TextFormat.PLAIN_TEXT)); + } + + public void testCsvDetection() { + TextFormat text = TextFormat.fromMediaTypeOrFormat("text/csv"); + assertThat(text, is(CSV)); + } + + public void testTsvDetection() { + TextFormat text = TextFormat.fromMediaTypeOrFormat("text/tab-separated-values"); + assertThat(text, is(TSV)); + } + + public void testInvalidFormat() { + Exception e = expectThrows(IllegalArgumentException.class, () -> TextFormat.fromMediaTypeOrFormat("text/garbage")); + assertEquals("invalid format [text/garbage]", e.getMessage()); + } + + public void testCsvContentType() { + assertEquals("text/csv; charset=utf-8; header=present", CSV.contentType(req())); + } + + public void testCsvContentTypeWithoutHeader() { + assertEquals("text/csv; charset=utf-8; header=absent", CSV.contentType(reqNoHeader())); + } + + public void testTsvContentType() { + assertEquals("text/tab-separated-values; charset=utf-8", TSV.contentType(req())); + } + + public void testCsvEscaping() { + assertEquals("string", CSV.maybeEscape("string")); + assertEquals("", CSV.maybeEscape("")); + assertEquals("\"\"\"\"", CSV.maybeEscape("\"")); + assertEquals("\"\"\",\"\"\"", CSV.maybeEscape("\",\"")); + assertEquals("\"\"\"quo\"\"ted\"\"\"", CSV.maybeEscape("\"quo\"ted\"")); + } + + public void testTsvEscaping() { + assertEquals("string", TSV.maybeEscape("string")); + assertEquals("", TSV.maybeEscape("")); + assertEquals("\"", TSV.maybeEscape("\"")); + assertEquals("\\t", TSV.maybeEscape("\t")); + assertEquals("\\n\"\\t", TSV.maybeEscape("\n\"\t")); + } + + public void testCsvFormatWithEmptyData() { + String text = CSV.format(null, req(), emptyData()); + assertEquals("name\r\n", text); + } + + public void testTsvFormatWithEmptyData() { + String text = TSV.format(null, req(), emptyData()); + assertEquals("name\n", text); + } + + public void testCsvFormatWithRegularData() { + String text = CSV.format(null, req(), regularData()); + assertEquals("string,number\r\n" + + "Along The River Bank,708\r\n" + + "Mind Train,280\r\n", + text); + } + + public void testTsvFormatWithRegularData() { + String text = TSV.format(null, req(), regularData()); + assertEquals("string\tnumber\n" + + "Along The River Bank\t708\n" + + "Mind Train\t280\n", + text); + } + + public void testCsvFormatWithEscapedData() { + String text = CSV.format(null, req(), escapedData()); + assertEquals("first,\"\"\"special\"\"\"\r\n" + + "normal,\"\"\"quo\"\"ted\"\",\n\"\r\n" + + "commas,\"a,b,c,\n,d,e,\t\n\"\r\n" + , text); + } + + public void testTsvFormatWithEscapedData() { + String text = TSV.format(null, req(), escapedData()); + assertEquals("first\t\"special\"\n" + + "normal\t\"quo\"ted\",\\n\n" + + "commas\ta,b,c,\\n,d,e,\\t\\n\n" + , text); + } + + private static SqlQueryResponse emptyData() { + return new SqlQueryResponse(null, singletonList(new ColumnInfo("index", "name", "keyword")), emptyList()); + } + + private static SqlQueryResponse regularData() { + // headers + List headers = new ArrayList<>(); + headers.add(new ColumnInfo("index", "string", "keyword")); + headers.add(new ColumnInfo("index", "number", "integer")); + + // values + List> values = new ArrayList<>(); + values.add(asList("Along The River Bank", 11 * 60 + 48)); + values.add(asList("Mind Train", 4 * 60 + 40)); + + return new SqlQueryResponse(null, headers, values); + } + + private static SqlQueryResponse escapedData() { + // headers + List headers = new ArrayList<>(); + headers.add(new ColumnInfo("index", "first", "keyword")); + headers.add(new ColumnInfo("index", "\"special\"", "keyword")); + + // values + List> values = new ArrayList<>(); + values.add(asList("normal", "\"quo\"ted\",\n")); + values.add(asList("commas", "a,b,c,\n,d,e,\t\n")); + + return new SqlQueryResponse(null, headers, values); + } + + private static RestRequest req() { + return new FakeRestRequest(); + } + + private static RestRequest reqNoHeader() { + return new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(singletonMap("header", "absent")).build(); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainerTests.java new file mode 100644 index 0000000000000..f943e325cf72e --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainerTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.container; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.querydsl.query.BoolQuery; +import org.elasticsearch.xpack.sql.querydsl.query.MatchAll; +import org.elasticsearch.xpack.sql.querydsl.query.NestedQuery; +import org.elasticsearch.xpack.sql.querydsl.query.Query; +import org.elasticsearch.xpack.sql.querydsl.query.RangeQuery; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.LocationTests; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; + +public class QueryContainerTests extends ESTestCase { + private Location location = LocationTests.randomLocation(); + private String path = randomAlphaOfLength(5); + private String name = randomAlphaOfLength(5); + private boolean hasDocValues = randomBoolean(); + + public void testRewriteToContainNestedFieldNoQuery() { + Query expected = new NestedQuery(location, path, singletonMap(name, hasDocValues), new MatchAll(location)); + assertEquals(expected, QueryContainer.rewriteToContainNestedField(null, location, path, name, hasDocValues)); + } + + public void testRewriteToContainsNestedFieldWhenContainsNestedField() { + Query original = new BoolQuery(location, true, + new NestedQuery(location, path, singletonMap(name, hasDocValues), new MatchAll(location)), + new RangeQuery(location, randomAlphaOfLength(5), 0, randomBoolean(), 100, randomBoolean())); + assertSame(original, QueryContainer.rewriteToContainNestedField(original, location, path, name, randomBoolean())); + } + + public void testRewriteToContainsNestedFieldWhenCanAddNestedField() { + Query buddy = new RangeQuery(location, randomAlphaOfLength(5), 0, randomBoolean(), 100, randomBoolean()); + Query original = new BoolQuery(location, true, + new NestedQuery(location, path, emptyMap(), new MatchAll(location)), + buddy); + Query expected = new BoolQuery(location, true, + new NestedQuery(location, path, singletonMap(name, hasDocValues), new MatchAll(location)), + buddy); + assertEquals(expected, QueryContainer.rewriteToContainNestedField(original, location, path, name, hasDocValues)); + } + + public void testRewriteToContainsNestedFieldWhenDoesNotContainNestedFieldAndCantAdd() { + Query original = new RangeQuery(location, randomAlphaOfLength(5), 0, randomBoolean(), 100, randomBoolean()); + Query expected = new BoolQuery(location, true, + original, + new NestedQuery(location, path, singletonMap(name, hasDocValues), new MatchAll(location))); + assertEquals(expected, QueryContainer.rewriteToContainNestedField(original, location, path, name, hasDocValues)); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQueryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQueryTests.java new file mode 100644 index 0000000000000..901709479d3d8 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQueryTests.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.LocationTests; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.hasEntry; +import static java.util.Collections.singletonMap; + +public class BoolQueryTests extends ESTestCase { + static BoolQuery randomBoolQuery(int depth) { + return new BoolQuery(LocationTests.randomLocation(), randomBoolean(), + NestedQueryTests.randomQuery(depth), NestedQueryTests.randomQuery(depth)); + } + + public void testEqualsAndHashCode() { + checkEqualsAndHashCode(randomBoolQuery(5), BoolQueryTests::copy, BoolQueryTests::mutate); + } + + private static BoolQuery copy(BoolQuery query) { + return new BoolQuery(query.location(), query.isAnd(), query.left(), query.right()); + } + + private static BoolQuery mutate(BoolQuery query) { + List> options = Arrays.asList( + q -> new BoolQuery(LocationTests.mutate(q.location()), q.isAnd(), q.left(), q.right()), + q -> new BoolQuery(q.location(), false == q.isAnd(), q.left(), q.right()), + q -> new BoolQuery(q.location(), q.isAnd(), randomValueOtherThan(q.left(), () -> NestedQueryTests.randomQuery(5)), q.right()), + q -> new BoolQuery(q.location(), q.isAnd(), q.left(), randomValueOtherThan(q.right(), () -> NestedQueryTests.randomQuery(5)))); + return randomFrom(options).apply(query); + } + + public void testContainsNestedField() { + assertFalse(boolQueryWithoutNestedChildren().containsNestedField(randomAlphaOfLength(5), randomAlphaOfLength(5))); + + String path = randomAlphaOfLength(5); + String field = randomAlphaOfLength(5); + assertTrue(boolQueryWithNestedChildren(path, field).containsNestedField(path, field)); + } + + public void testAddNestedField() { + Query q = boolQueryWithoutNestedChildren(); + assertSame(q, q.addNestedField(randomAlphaOfLength(5), randomAlphaOfLength(5), randomBoolean())); + + String path = randomAlphaOfLength(5); + String field = randomAlphaOfLength(5); + q = boolQueryWithNestedChildren(path, field); + String newField = randomAlphaOfLength(5); + boolean hasDocValues = randomBoolean(); + Query rewritten = q.addNestedField(path, newField, hasDocValues); + assertNotSame(q, rewritten); + assertTrue(rewritten.containsNestedField(path, newField)); + } + + public void testEnrichNestedSort() { + Query q = boolQueryWithoutNestedChildren(); + NestedSortBuilder sort = new NestedSortBuilder(randomAlphaOfLength(5)); + q.enrichNestedSort(sort); + assertNull(sort.getFilter()); + + String path = randomAlphaOfLength(5); + String field = randomAlphaOfLength(5); + q = boolQueryWithNestedChildren(path, field); + sort = new NestedSortBuilder(path); + q.enrichNestedSort(sort); + assertNotNull(sort.getFilter()); + } + + private Query boolQueryWithoutNestedChildren() { + return new BoolQuery(LocationTests.randomLocation(), randomBoolean(), + new MatchAll(LocationTests.randomLocation()), new MatchAll(LocationTests.randomLocation())); + } + + private Query boolQueryWithNestedChildren(String path, String field) { + NestedQuery match = new NestedQuery(LocationTests.randomLocation(), path, + singletonMap(field, randomBoolean()), new MatchAll(LocationTests.randomLocation())); + Query matchAll = new MatchAll(LocationTests.randomLocation()); + Query left; + Query right; + if (randomBoolean()) { + left = match; + right = matchAll; + } else { + left = matchAll; + right = match; + } + return new BoolQuery(LocationTests.randomLocation(), randomBoolean(), left, right); + } + + public void testToString() { + assertEquals("BoolQuery@1:2[ExistsQuery@1:2[f1] AND ExistsQuery@1:8[f2]]", + new BoolQuery(new Location(1, 1), true, + new ExistsQuery(new Location(1, 1), "f1"), + new ExistsQuery(new Location(1, 7), "f2")).toString()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQueryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQueryTests.java new file mode 100644 index 0000000000000..232040b79bc85 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQueryTests.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.LocationTests; + +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; + +public class LeafQueryTests extends ESTestCase { + private static class DummyLeafQuery extends LeafQuery { + private DummyLeafQuery(Location location) { + super(location); + } + + @Override + public QueryBuilder asBuilder() { + return null; + } + + @Override + protected String innerToString() { + return ""; + } + } + + public void testEqualsAndHashCode() { + DummyLeafQuery query = new DummyLeafQuery(LocationTests.randomLocation()); + checkEqualsAndHashCode(query, LeafQueryTests::copy, LeafQueryTests::mutate); + } + + private static DummyLeafQuery copy(DummyLeafQuery query) { + return new DummyLeafQuery(query.location()); + } + + private static DummyLeafQuery mutate(DummyLeafQuery query) { + return new DummyLeafQuery(LocationTests.mutate(query.location())); + } + + public void testContainsNestedField() { + Query query = new DummyLeafQuery(LocationTests.randomLocation()); + // Leaf queries don't contain nested fields. + assertFalse(query.containsNestedField(randomAlphaOfLength(5), randomAlphaOfLength(5))); + } + + public void testAddNestedField() { + Query query = new DummyLeafQuery(LocationTests.randomLocation()); + // Leaf queries don't contain nested fields. + assertSame(query, query.addNestedField(randomAlphaOfLength(5), randomAlphaOfLength(5), randomBoolean())); + } + + public void testEnrichNestedSort() { + Query query = new DummyLeafQuery(LocationTests.randomLocation()); + // Leaf queries don't contain nested fields. + NestedSortBuilder sort = new NestedSortBuilder(randomAlphaOfLength(5)); + query.enrichNestedSort(sort); + assertNull(sort.getFilter()); + } + + public void testToString() { + assertEquals("DummyLeafQuery@1:2[]", new DummyLeafQuery(new Location(1, 1)).toString()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQueryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQueryTests.java new file mode 100644 index 0000000000000..431a6a146aee0 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQueryTests.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.Operator; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.LocationTests; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; + +import static org.hamcrest.Matchers.equalTo; +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; + +public class MatchQueryTests extends ESTestCase { + static MatchQuery randomMatchQuery() { + return new MatchQuery( + LocationTests.randomLocation(), + randomAlphaOfLength(5), + randomAlphaOfLength(5)); + // TODO add the predicate + } + + public void testEqualsAndHashCode() { + checkEqualsAndHashCode(randomMatchQuery(), MatchQueryTests::copy, MatchQueryTests::mutate); + } + + private static MatchQuery copy(MatchQuery query) { + return new MatchQuery(query.location(), query.name(), query.text(), query.predicate()); + } + + private static MatchQuery mutate(MatchQuery query) { + List> options = Arrays.asList( + q -> new MatchQuery(LocationTests.mutate(q.location()), q.name(), q.text(), q.predicate()), + q -> new MatchQuery(q.location(), randomValueOtherThan(q.name(), () -> randomAlphaOfLength(5)), q.text(), q.predicate()), + q -> new MatchQuery(q.location(), q.name(), randomValueOtherThan(q.text(), () -> randomAlphaOfLength(5)), q.predicate())); + // TODO mutate the predicate + return randomFrom(options).apply(query); + } + + public void testQueryBuilding() { + MatchQueryBuilder qb = getBuilder("lenient=true"); + assertThat(qb.lenient(), equalTo(true)); + + qb = getBuilder("lenient=true;operator=AND"); + assertThat(qb.lenient(), equalTo(true)); + assertThat(qb.operator(), equalTo(Operator.AND)); + + Exception e = expectThrows(IllegalArgumentException.class, () -> getBuilder("pizza=yummy")); + assertThat(e.getMessage(), equalTo("illegal match option [pizza]")); + + e = expectThrows(IllegalArgumentException.class, () -> getBuilder("operator=aoeu")); + assertThat(e.getMessage(), equalTo("No enum constant org.elasticsearch.index.query.Operator.AOEU")); + } + + private static MatchQueryBuilder getBuilder(String options) { + final Location location = new Location(1, 1); + final MatchQueryPredicate mmqp = new MatchQueryPredicate(location, null, "eggplant", options); + final MatchQuery mmq = new MatchQuery(location, "eggplant", "foo", mmqp); + return (MatchQueryBuilder) mmq.asBuilder(); + } + + public void testToString() { + final Location location = new Location(1, 1); + final MatchQueryPredicate mmqp = new MatchQueryPredicate(location, null, "eggplant", ""); + final MatchQuery mmq = new MatchQuery(location, "eggplant", "foo", mmqp); + assertEquals("MatchQuery@1:2[eggplant:foo]", mmq.toString()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQueryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQueryTests.java new file mode 100644 index 0000000000000..ba2d548cde9dd --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQueryTests.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.index.query.MultiMatchQueryBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.HashMap; +import java.util.Map; +import java.util.TreeMap; + +import static org.hamcrest.Matchers.equalTo; + +public class MultiMatchQueryTests extends ESTestCase { + + public void testQueryBuilding() { + MultiMatchQueryBuilder qb = getBuilder("lenient=true"); + assertThat(qb.lenient(), equalTo(true)); + + qb = getBuilder("use_dis_max=true;type=best_fields"); + assertThat(qb.useDisMax(), equalTo(true)); + assertThat(qb.getType(), equalTo(MultiMatchQueryBuilder.Type.BEST_FIELDS)); + + Exception e = expectThrows(IllegalArgumentException.class, () -> getBuilder("pizza=yummy")); + assertThat(e.getMessage(), equalTo("illegal multi_match option [pizza]")); + + e = expectThrows(ElasticsearchParseException.class, () -> getBuilder("type=aoeu")); + assertThat(e.getMessage(), equalTo("failed to parse [multi_match] query type [aoeu]. unknown type.")); + } + + private static MultiMatchQueryBuilder getBuilder(String options) { + final Location location = new Location(1, 1); + final MultiMatchQueryPredicate mmqp = new MultiMatchQueryPredicate(location, "foo,bar", "eggplant", options); + final Map fields = new HashMap<>(); + fields.put("foo", 1.0f); + fields.put("bar", 1.0f); + final MultiMatchQuery mmq = new MultiMatchQuery(location, "eggplant", fields, mmqp); + return (MultiMatchQueryBuilder) mmq.asBuilder(); + } + + public void testToString() { + final Location location = new Location(1, 1); + final MultiMatchQueryPredicate mmqp = new MultiMatchQueryPredicate(location, "foo,bar", "eggplant", ""); + // Use a TreeMap so we get the fields in a predictable order. + final Map fields = new TreeMap<>(); + fields.put("foo", 1.0f); + fields.put("bar", 1.0f); + final MultiMatchQuery mmq = new MultiMatchQuery(location, "eggplant", fields, mmqp); + assertEquals("MultiMatchQuery@1:2[{bar=1.0, foo=1.0}:eggplant]", mmq.toString()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQueryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQueryTests.java new file mode 100644 index 0000000000000..7cd53bc73defa --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQueryTests.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.tree.Location; +import org.elasticsearch.xpack.sql.tree.LocationTests; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.hasEntry; +import static java.util.Collections.singletonMap; + +public class NestedQueryTests extends ESTestCase { + static Query randomQuery(int depth) { + List> options = new ArrayList<>(); + options.add(MatchQueryTests::randomMatchQuery); + if (depth > 0) { + options.add(() -> randomNestedQuery(depth - 1)); + options.add(() -> BoolQueryTests.randomBoolQuery(depth - 1)); + } + return randomFrom(options).get(); + } + + static NestedQuery randomNestedQuery(int depth) { + return new NestedQuery(LocationTests.randomLocation(), randomAlphaOfLength(5), randomFields(), randomQuery(depth)); + } + + private static Map randomFields() { + int size = between(0, 5); + Map fields = new HashMap<>(size); + while (fields.size() < size) { + fields.put(randomAlphaOfLength(5), randomBoolean()); + } + return fields; + } + + public void testEqualsAndHashCode() { + checkEqualsAndHashCode(randomNestedQuery(5), NestedQueryTests::copy, NestedQueryTests::mutate); + } + + private static NestedQuery copy(NestedQuery query) { + return new NestedQuery(query.location(), query.path(), query.fields(), query.child()); + } + + private static NestedQuery mutate(NestedQuery query) { + List> options = Arrays.asList( + q -> new NestedQuery(LocationTests.mutate(q.location()), q.path(), q.fields(), q.child()), + q -> new NestedQuery(q.location(), randomValueOtherThan(q.path(), () -> randomAlphaOfLength(5)), q.fields(), q.child()), + q -> new NestedQuery(q.location(), q.path(), randomValueOtherThan(q.fields(), NestedQueryTests::randomFields), q.child()), + q -> new NestedQuery(q.location(), q.path(), q.fields(), randomValueOtherThan(q.child(), () -> randomQuery(5)))); + return randomFrom(options).apply(query); + } + + public void testContainsNestedField() { + NestedQuery q = randomNestedQuery(0); + for (String field : q.fields().keySet()) { + assertTrue(q.containsNestedField(q.path(), field)); + assertFalse(q.containsNestedField(randomValueOtherThan(q.path(), () -> randomAlphaOfLength(5)), field)); + } + assertFalse(q.containsNestedField(q.path(), randomValueOtherThanMany(q.fields()::containsKey, () -> randomAlphaOfLength(5)))); + } + + public void testAddNestedField() { + NestedQuery q = randomNestedQuery(0); + for (String field : q.fields().keySet()) { + // add does nothing if the field is already there + assertSame(q, q.addNestedField(q.path(), field, randomBoolean())); + String otherPath = randomValueOtherThan(q.path(), () -> randomAlphaOfLength(5)); + // add does nothing if the path doesn't match + assertSame(q, q.addNestedField(otherPath, randomAlphaOfLength(5), randomBoolean())); + } + + // if the field isn't in the list then add rewrites to a query with all the old fields and the new one + String newField = randomValueOtherThanMany(q.fields()::containsKey, () -> randomAlphaOfLength(5)); + boolean hasDocValues = randomBoolean(); + NestedQuery added = (NestedQuery) q.addNestedField(q.path(), newField, hasDocValues); + assertNotSame(q, added); + assertThat(added.fields(), hasEntry(newField, hasDocValues)); + assertTrue(added.containsNestedField(q.path(), newField)); + for (Map.Entry field : q.fields().entrySet()) { + assertThat(added.fields(), hasEntry(field.getKey(), field.getValue())); + assertTrue(added.containsNestedField(q.path(), field.getKey())); + } + } + + public void testEnrichNestedSort() { + NestedQuery q = randomNestedQuery(0); + + // enrich adds the filter if the path matches + { + NestedSortBuilder sort = new NestedSortBuilder(q.path()); + q.enrichNestedSort(sort); + assertEquals(q.child().asBuilder(), sort.getFilter()); + } + + // but doesn't if it doesn't match + { + NestedSortBuilder sort = new NestedSortBuilder(randomValueOtherThan(q.path(), () -> randomAlphaOfLength(5))); + q.enrichNestedSort(sort); + assertNull(sort.getFilter()); + } + + // enriching with the same query twice is fine + { + NestedSortBuilder sort = new NestedSortBuilder(q.path()); + q.enrichNestedSort(sort); + assertEquals(q.child().asBuilder(), sort.getFilter()); + q.enrichNestedSort(sort); + + // But enriching using another query is not + NestedQuery other = new NestedQuery(LocationTests.randomLocation(), q.path(), q.fields(), + randomValueOtherThan(q.child(), () -> randomQuery(0))); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> other.enrichNestedSort(sort)); + assertEquals("nested query should have been grouped in one place", e.getMessage()); + } + } + + public void testToString() { + NestedQuery q = new NestedQuery(new Location(1, 1), "a.b", singletonMap("f", true), new MatchAll(new Location(1, 1))); + assertEquals("NestedQuery@1:2[a.b.{f=true}[MatchAll@1:2[]]]", q.toString()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQueryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQueryTests.java new file mode 100644 index 0000000000000..229a4392ed2e0 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQueryTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.querydsl.query; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.index.query.QueryStringQueryBuilder; +import org.elasticsearch.index.query.Operator; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.StringQueryPredicate; +import org.elasticsearch.xpack.sql.tree.Location; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; + +public class QueryStringQueryTests extends ESTestCase { + + public void testQueryBuilding() { + QueryStringQueryBuilder qb = getBuilder("lenient=true"); + assertThat(qb.lenient(), equalTo(true)); + + qb = getBuilder("lenient=true;default_operator=AND"); + assertThat(qb.lenient(), equalTo(true)); + assertThat(qb.defaultOperator(), equalTo(Operator.AND)); + + Exception e = expectThrows(IllegalArgumentException.class, () -> getBuilder("pizza=yummy")); + assertThat(e.getMessage(), equalTo("illegal query_string option [pizza]")); + + e = expectThrows(ElasticsearchParseException.class, () -> getBuilder("type=aoeu")); + assertThat(e.getMessage(), equalTo("failed to parse [multi_match] query type [aoeu]. unknown type.")); + } + + private static QueryStringQueryBuilder getBuilder(String options) { + final Location location = new Location(1, 1); + final StringQueryPredicate mmqp = new StringQueryPredicate(location, "eggplant", options); + final QueryStringQuery mmq = new QueryStringQuery(location, "eggplant", Collections.singletonMap("foo", 1.0f), mmqp); + return (QueryStringQueryBuilder) mmq.asBuilder(); + } + + + public void testToString() { + final Location location = new Location(1, 1); + final StringQueryPredicate mmqp = new StringQueryPredicate(location, "eggplant", ""); + final QueryStringQuery mmq = new QueryStringQuery(location, "eggplant", Collections.singletonMap("foo", 1.0f), mmqp); + assertEquals("QueryStringQuery@1:2[{foo=1.0}:eggplant]", mmq.toString()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/AbstractNodeTestCase.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/AbstractNodeTestCase.java new file mode 100644 index 0000000000000..b5b0adfb0e6a9 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/AbstractNodeTestCase.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.tree; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +/** + * Superclass for tests of subclasses of {@link Node}. + */ +public abstract class AbstractNodeTestCase> extends ESTestCase { + /** + * Make a new random instance. + */ + protected abstract T randomInstance(); + /** + * Mutate an instance into some other similar instance that + * shouldn't be {@link #equals} to the original. + */ + protected abstract T mutate(T instance); + /** + * Copy and instance so it isn't {@code ==} but should still + * be {@link #equals}. + */ + protected abstract T copy(T instance); + + /** + * Test this subclass's implementation of {@link Node#transformNodeProps}. + */ + public abstract void testTransform(); + /** + * Test this subclass's implementation of {@link Node#replaceChildren}. + */ + public abstract void testReplaceChildren(); + + public final void testHashCodeAndEquals() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(randomInstance(), this::copy, this::mutate); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/LocationTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/LocationTests.java new file mode 100644 index 0000000000000..45c429d04028b --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/LocationTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.tree; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils.MutateFunction; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; + +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; + +public class LocationTests extends ESTestCase { + public static Location randomLocation() { + return new Location(between(1, Integer.MAX_VALUE), between(1, Integer.MAX_VALUE)); + } + + public static Location mutate(Location location) { + List> options = Arrays.asList( + l -> new Location( + randomValueOtherThan(l.getLineNumber(), () -> between(1, Integer.MAX_VALUE)), + l.getColumnNumber() - 1), + l -> new Location( + l.getLineNumber(), + randomValueOtherThan(l.getColumnNumber() - 1, () -> between(1, Integer.MAX_VALUE)))); + return randomFrom(options).apply(location); + } + + public void testEqualsAndHashCode() { + checkEqualsAndHashCode(randomLocation(), + l -> new Location(l.getLineNumber(), l.getColumnNumber() - 1), + LocationTests::mutate); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java new file mode 100644 index 0000000000000..96c641d4fbbb2 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeSubclassTests.java @@ -0,0 +1,606 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.tree; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.FieldAttribute; +import org.elasticsearch.xpack.sql.expression.UnresolvedAttributeTests; +import org.elasticsearch.xpack.sql.expression.function.Function; +import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.sql.expression.function.aggregate.Avg; +import org.elasticsearch.xpack.sql.expression.function.aggregate.InnerAggregate; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.definition.AggExtractorInput; +import org.elasticsearch.xpack.sql.expression.predicate.fulltext.FullTextPredicate; +import org.elasticsearch.xpack.sql.expression.regex.LikePattern; +import org.elasticsearch.xpack.sql.tree.NodeTests.ChildrenAreAProperty; +import org.elasticsearch.xpack.sql.tree.NodeTests.Dummy; +import org.elasticsearch.xpack.sql.tree.NodeTests.NoChildren; +import org.mockito.exceptions.base.MockitoException; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.lang.reflect.WildcardType; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.ArrayList; +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; +import static org.mockito.Mockito.mock; + +/** + * Looks for all subclasses of {@link Node} and verifies that they + * implement {@link Node#info()} and + * {@link Node#replaceChildren(List)} sanely. It'd be better if + * each subclass had its own test case that verified those methods + * and any other interesting things that that they do but we're a + * long way from that and this gets the job done for now. + *

+ * This test attempts to use reflection to create believeable nodes + * and manipulate them in believeable ways with as little knowledge + * of the actual subclasses as possible. This is problematic because + * it is possible, for example, for nodes to stackoverflow because + * they can contain themselves. So this class + * does have some {@link Node}-subclass-specific + * knowledge. As little as I could get away with though. + *

+ * When there are actual tests for a subclass of {@linkplain Node} + * then this class will do two things: + *

    + *
  • Skip running any tests for that subclass entirely. + *
  • Delegate to that test to build nodes of that type when a + * node of that type is called for. + *
+ */ +public class NodeSubclassTests> extends ESTestCase { + private final Class subclass; + + public NodeSubclassTests(Class subclass) { + this.subclass = subclass; + } + + public void testInfoParameters() throws Exception { + Constructor ctor = longestCtor(subclass); + Object[] nodeCtorArgs = ctorArgs(ctor); + T node = ctor.newInstance(nodeCtorArgs); + /* + * The count should be the same size as the longest constructor + * by convention. If it isn't then we're missing something. + */ + int expectedCount = ctor.getParameterCount(); + /* + * Except the first `Location` argument of the ctor is implicit + * in the parameters and not included. + */ + expectedCount -= 1; + assertEquals(expectedCount, node.info().properties().size()); + } + + /** + * Test {@link Node#transformPropertiesOnly(java.util.function.Function, Class)} + * implementation on {@link #subclass} which tests the implementation of + * {@link Node#info()}. And tests the actual {@link NodeInfo} subclass + * implementations in the process. + */ + public void testTransform() throws Exception { + Constructor ctor = longestCtor(subclass); + Object[] nodeCtorArgs = ctorArgs(ctor); + T node = ctor.newInstance(nodeCtorArgs); + + Type[] argTypes = ctor.getGenericParameterTypes(); + // start at 1 because we can't change Location. + for (int changedArgOffset = 1; changedArgOffset < ctor.getParameterCount(); changedArgOffset++) { + Object originalArgValue = nodeCtorArgs[changedArgOffset]; + + Type changedArgType = argTypes[changedArgOffset]; + Object changedArgValue = randomValueOtherThan(nodeCtorArgs[changedArgOffset], () -> makeArg(changedArgType)); + + B transformed = node.transformNodeProps(prop -> { + return Objects.equals(prop, originalArgValue) ? changedArgValue : prop; + }, Object.class); + + if (node.children().contains(originalArgValue) || node.children().equals(originalArgValue)) { + if (node.children().equals(emptyList()) && originalArgValue.equals(emptyList())) { + /* + * If the children are an empty list and the value + * we want to change is an empty list they'll be + * equal to one another so they'll come on this branch. + * This case is rare and hard to reason about so we're + * just going to assert nothing here and hope to catch + * it when we write non-reflection hack tests. + */ + continue; + } + // Transformation shouldn't apply to children. + assertSame(node, transformed); + } else { + assertTransformedOrReplacedChildren(node, transformed, ctor, nodeCtorArgs, changedArgOffset, changedArgValue); + } + } + } + + /** + * Test {@link Node#replaceChildren} implementation on {@link #subclass}. + */ + public void testReplaceChildren() throws Exception { + Constructor ctor = longestCtor(subclass); + Object[] nodeCtorArgs = ctorArgs(ctor); + T node = ctor.newInstance(nodeCtorArgs); + + Type[] argTypes = ctor.getGenericParameterTypes(); + // start at 1 because we can't change Location. + for (int changedArgOffset = 1; changedArgOffset < ctor.getParameterCount(); changedArgOffset++) { + Object originalArgValue = nodeCtorArgs[changedArgOffset]; + Type changedArgType = argTypes[changedArgOffset]; + + if (originalArgValue instanceof Collection) { + Collection col = (Collection) originalArgValue; + + if (col.isEmpty() || col instanceof EnumSet) { + /* + * We skip empty lists here because they'll spuriously + * pass the conditions below if statements even if they don't + * have anything to do with children. This might cause us to + * ignore the case where a parameter gets copied into the + * children and just happens to be empty but I don't really + * know another way. + */ + + continue; + } + + List originalList = (List) originalArgValue; + + if (node.children().equals(originalList)) { + // The arg we're looking at *is* the children + @SuppressWarnings("unchecked") // we pass a reasonable type so get reasonable results + List newChildren = (List) makeListOfSameSizeOtherThan(changedArgType, originalList); + B transformed = node.replaceChildren(newChildren); + assertTransformedOrReplacedChildren(node, transformed, ctor, nodeCtorArgs, changedArgOffset, newChildren); + } else if (false == originalList.isEmpty() && node.children().containsAll(originalList)) { + // The arg we're looking at is a collection contained within the children + + // First make the new children + @SuppressWarnings("unchecked") // we pass a reasonable type so get reasonable results + List newCollection = (List) makeListOfSameSizeOtherThan(changedArgType, originalList); + + // Now merge that list of thildren into the original list of children + List originalChildren = node.children(); + List newChildren = new ArrayList<>(originalChildren.size()); + int originalOffset = 0; + for (int i = 0; i < originalChildren.size(); i++) { + if (originalOffset < originalList.size() && originalChildren.get(i).equals(originalList.get(originalOffset))) { + newChildren.add(newCollection.get(originalOffset)); + originalOffset++; + } else { + newChildren.add(originalChildren.get(i)); + } + } + + // Finally! We can assert..... + B transformed = node.replaceChildren(newChildren); + assertTransformedOrReplacedChildren(node, transformed, ctor, nodeCtorArgs, changedArgOffset, newCollection); + } else { + // The arg we're looking at has nothing to do with the children + } + } else { + if (node.children().contains(originalArgValue)) { + // The arg we're looking at is one of the children + List newChildren = new ArrayList<>(node.children()); + @SuppressWarnings("unchecked") // makeArg produced reasonable values + B newChild = (B) randomValueOtherThan(nodeCtorArgs[changedArgOffset], () -> makeArg(changedArgType)); + newChildren.replaceAll(e -> Objects.equals(originalArgValue, e) ? newChild : e); + B transformed = node.replaceChildren(newChildren); + assertTransformedOrReplacedChildren(node, transformed, ctor, nodeCtorArgs, changedArgOffset, newChild); + } else { + // The arg we're looking at has nothing to do with the children + } + } + } + } + + private void assertTransformedOrReplacedChildren(T node, B transformed, Constructor ctor, + Object[] nodeCtorArgs, int changedArgOffset, Object changedArgValue) throws Exception { + if (node instanceof Function) { + /* + * Functions have a weaker definition of transform then other + * things: + * + * Transforming using the way we did above should only change + * the one property of the node that we intended to transform. + */ + assertEquals(node.location(), transformed.location()); + List op = node.properties(); + List tp = transformed.properties(); + for (int p = 0; p < op.size(); p++) { + if (p == changedArgOffset - 1) { // -1 because location isn't in the list + assertEquals(changedArgValue, tp.get(p)); + } else { + assertEquals(op.get(p), tp.get(p)); + } + } + } else { + /* + * The stronger assertion for all non-Functions: transforming + * a node changes *only* the transformed value such that you + * can rebuild a copy of the node using its constructor changing + * only one argument and it'll be *equal* to the result of the + * transformation. + */ + Type[] argTypes = ctor.getGenericParameterTypes(); + Object[] args = new Object[argTypes.length]; + for (int i = 0; i < argTypes.length; i++) { + args[i] = nodeCtorArgs[i] == nodeCtorArgs[changedArgOffset] ? changedArgValue : nodeCtorArgs[i]; + } + T reflectionTransformed = ctor.newInstance(args); + assertEquals(reflectionTransformed, transformed); + } + } + + /** + * Find the longest constructor of the given class. + * By convention, for all subclasses of {@link Node}, + * this constructor should have "all" of the state of + * the node. All other constructors should all delegate + * to this constructor. + */ + static Constructor longestCtor(Class clazz) { + Constructor longest = null; + for (Constructor ctor: clazz.getConstructors()) { + if (longest == null || longest.getParameterCount() < ctor.getParameterCount()) { + @SuppressWarnings("unchecked") // Safe because the ctor has to be a ctor for T + Constructor castCtor = (Constructor) ctor; + longest = castCtor; + } + } + if (longest == null) { + throw new IllegalArgumentException("Couldn't find any constructors for [" + clazz.getName() + "]"); + } + return longest; + } + + /** + * Scans the {@code .class} files to identify all classes and + * checks if they are subclasses of {@link Node}. + */ + @ParametersFactory + @SuppressWarnings("rawtypes") + public static List nodeSubclasses() throws IOException { + return subclassesOf(Node.class).stream() + .filter(c -> testClassFor(c) == null) + .map(c -> new Object[] {c}) + .collect(toList()); + } + + /** + * Build a list of arguments to use when calling + * {@code ctor} that make sense when {@code ctor} + * builds subclasses of {@link Node}. + */ + private static Object[] ctorArgs(Constructor> ctor) throws Exception { + Type[] argTypes = ctor.getGenericParameterTypes(); + Object[] args = new Object[argTypes.length]; + for (int i = 0; i < argTypes.length; i++) { + final int currentArgIndex = i; + args[i] = randomValueOtherThanMany(candidate -> { + for (int a = 0; a < currentArgIndex; a++) { + if (Objects.equals(args[a], candidate)) { + return true; + } + } + return false; + }, () -> { + try { + return makeArg(ctor.getDeclaringClass(), argTypes[currentArgIndex]); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + return args; + } + + /** + * Make an argument to feed the {@link #subclass}'s ctor. + */ + private Object makeArg(Type argType) { + try { + return makeArg(subclass, argType); + } catch (Exception e) { + // Wrap to make `randomValueOtherThan` happy. + throw new RuntimeException(e); + } + } + + /** + * Make an argument to feed to the constructor for {@code toBuildClass}. + */ + @SuppressWarnings("unchecked") + private static Object makeArg(Class> toBuildClass, Type argType) throws Exception { + if (argType instanceof ParameterizedType) { + ParameterizedType pt = (ParameterizedType) argType; + if (pt.getRawType() == Map.class) { + Map map = new HashMap<>(); + int size = between(0, 10); + while (map.size() < size) { + Object key = makeArg(toBuildClass, pt.getActualTypeArguments()[0]); + Object value = makeArg(toBuildClass, pt.getActualTypeArguments()[1]); + map.put(key, value); + } + return map; + } + if (pt.getRawType() == List.class) { + return makeList(toBuildClass, pt, between(1, 10)); + } + if (pt.getRawType() == EnumSet.class) { + @SuppressWarnings("rawtypes") + Enum enm = (Enum) makeArg(toBuildClass, pt.getActualTypeArguments()[0]); + return EnumSet.of(enm); + } + if (pt.getRawType() == Supplier.class) { + if (toBuildClass == AggExtractorInput.class) { + // AggValueInput just needs a valid java type in a supplier + Object o = randomBoolean() ? null : randomAlphaOfLength(5); + // But the supplier has to implement equals for randomValueOtherThan + return new Supplier() { + @Override + public Object get() { + return o; + } + + @Override + public int hashCode() { + return Objects.hash(o); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Supplier other = (Supplier) obj; + return Objects.equals(o, other.get()); + } + }; + } + + } + throw new IllegalArgumentException("Unsupported parameterized type [" + pt + "]"); + } + if (argType instanceof WildcardType) { + WildcardType wt = (WildcardType) argType; + if (wt.getLowerBounds().length > 0 || wt.getUpperBounds().length > 1) { + throw new IllegalArgumentException("Unsupported wildcard type [" + wt + "]"); + } + return makeArg(toBuildClass, wt.getUpperBounds()[0]); + } + Class argClass = (Class) argType; + + /* + * Sometimes all of the required type information isn't in the ctor + * so we have to hard code it here. + */ + if (toBuildClass == InnerAggregate.class) { + // InnerAggregate's AggregateFunction must be an EnclosedAgg. Avg is. + if (argClass == AggregateFunction.class) { + return makeNode(Avg.class); + } + } else if (toBuildClass == FieldAttribute.class) { + // `parent` is nullable. + if (argClass == FieldAttribute.class && randomBoolean()) { + return null; + } + } else if (toBuildClass == ChildrenAreAProperty.class) { + /* + * While any subclass of Dummy will do here we want to prevent + * stack overflow so we use the one without children. + */ + if (argClass == Dummy.class) { + return makeNode(NoChildren.class); + } + } else if (FullTextPredicate.class.isAssignableFrom(toBuildClass)) { + /* + * FullTextPredicate analyzes its string arguments on + * construction so they have to be valid. + */ + if (argClass == String.class) { + int size = between(0, 5); + StringBuilder b = new StringBuilder(); + for (int i = 0; i < size; i++) { + if (i != 0) { + b.append(';'); + } + b.append(randomAlphaOfLength(5)).append('=').append(randomAlphaOfLength(5)); + } + return b.toString(); + } + } else if (toBuildClass == LikePattern.class) { + /* + * The pattern and escape character have to be valid together + * so we pick an escape character that isn't used + */ + if (argClass == char.class) { + return randomFrom('\\', '|', '/', '`'); + } + } + + if (Expression.class == argClass) { + /* + * Rather than use any old subclass of expression lets + * use a simple one. Without this we're very prone to + * stackoverflow errors while building the tree. + */ + return UnresolvedAttributeTests.randomUnresolvedAttribute(); + } + if (Node.class.isAssignableFrom(argClass)) { + /* + * Rather than attempting to mock subclasses of node + * and emulate them we just try and instantiate an + * appropriate subclass + */ + @SuppressWarnings("unchecked") // safe because this is the lowest possible bounds for Node + Class> asNodeSubclass = (Class>) argType; + return makeNode(asNodeSubclass); + } + + if (argClass.isEnum()) { + // Can't mock enums but luckily we can just pick one + return randomFrom(argClass.getEnumConstants()); + } + if (argClass == boolean.class) { + // Can't mock primitives.... + return randomBoolean(); + } + if (argClass == int.class) { + return randomInt(); + } + if (argClass == String.class) { + // Nor strings + return randomAlphaOfLength(5); + } + if (argClass == Location.class) { + // Location is final and can't be mocked but we have a handy method to generate ones. + return LocationTests.randomLocation(); + } + try { + return mock(argClass); + } catch (MockitoException e) { + throw new RuntimeException("failed to mock [" + argClass.getName() + "] for [" + toBuildClass.getName() + "]", e); + } + } + + private static List makeList(Class> toBuildClass, ParameterizedType listType, int size) throws Exception { + List list = new ArrayList<>(); + for (int i = 0; i < size; i++) { + list.add(makeArg(toBuildClass, listType.getActualTypeArguments()[0])); + } + return list; + } + + private List makeListOfSameSizeOtherThan(Type listType, List original) throws Exception { + if (original.isEmpty()) { + throw new IllegalArgumentException("Can't make a different empty list"); + } + return randomValueOtherThan(original, () -> { + try { + return makeList(subclass, (ParameterizedType) listType, original.size()); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + } + + public static > T makeNode(Class nodeClass) throws Exception { + if (Modifier.isAbstract(nodeClass.getModifiers())) { + nodeClass = randomFrom(subclassesOf(nodeClass)); + } + Class testSubclassFor = testClassFor(nodeClass); + if (testSubclassFor != null) { + // Delegate to the test class for a node if there is one + Method m = testSubclassFor.getMethod("random" + Strings.capitalize(nodeClass.getSimpleName())); + return nodeClass.cast(m.invoke(null)); + } + Constructor ctor = longestCtor(nodeClass); + Object[] nodeCtorArgs = ctorArgs(ctor); + return ctor.newInstance(nodeCtorArgs); + } + + /** + * Cache of subclasses. We use a cache because it significantly speeds up + * the test. + */ + private static final Map, List> subclassCache = new HashMap<>(); + /** + * Find all subclasses of a particular class. + */ + private static List> subclassesOf(Class clazz) throws IOException { + @SuppressWarnings("unchecked") // The map is built this way + List> lookup = (List>) subclassCache.get(clazz); + if (lookup != null) { + return lookup; + } + List> results = new ArrayList<>(); + String[] paths = System.getProperty("java.class.path").split(System.getProperty("path.separator")); + for (String path: paths) { + Path root = PathUtils.get(path); + int rootLength = root.toString().length() + 1; + Files.walkFileTree(root, new SimpleFileVisitor() { + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().endsWith(".class")) { + String className = file.toString(); + // Chop off the root and file extension + className = className.substring(rootLength, className.length() - ".class".length()); + // Go from "path" style to class style + className = className.replace(PathUtils.getDefaultFileSystem().getSeparator(), "."); + + // filter the class that are not interested + // (and IDE folders like eclipse) + if (!className.startsWith("org.elasticsearch.xpack.sql")) { + return FileVisitResult.CONTINUE; + } + + Class c; + try { + c = Class.forName(className); + } catch (ClassNotFoundException e) { + throw new IOException("Couldn't find " + file, e); + } + + if (false == Modifier.isAbstract(c.getModifiers()) + && false == c.isAnonymousClass() + && clazz.isAssignableFrom(c)) { + Class s = c.asSubclass(clazz); + results.add(s); + } + } + return FileVisitResult.CONTINUE; + } + }); + } + subclassCache.put(clazz, results); + return results; + } + + /** + * The test class for some subclass of node or {@code null} + * if there isn't such a class or it doesn't extend + * {@link AbstractNodeTestCase}. + */ + private static Class testClassFor(Class nodeSubclass) { + String testClassName = nodeSubclass.getName() + "Tests"; + try { + Class c = Class.forName(testClassName); + if (AbstractNodeTestCase.class.isAssignableFrom(c)) { + return c; + } + return null; + } catch (ClassNotFoundException e) { + return null; + } + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeTests.java new file mode 100644 index 0000000000000..e9d03d31c1b1a --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/tree/NodeTests.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.tree; + +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.sql.tree.LocationTests.randomLocation; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; + +public class NodeTests extends ESTestCase { + public void testToString() { + assertEquals("NoChildren[thing]", new NoChildren(randomLocation(), "thing").toString()); + { + ChildrenAreAProperty empty = new ChildrenAreAProperty(randomLocation(), emptyList(), "thing"); + assertEquals("ChildrenAreAProperty[thing]", empty.toString()); + assertEquals("ChildrenAreAProperty[single]\n\\_ChildrenAreAProperty[thing]", + new ChildrenAreAProperty(randomLocation(), singletonList(empty), "single").toString()); + assertEquals("ChildrenAreAProperty[many]\n" + + "|_ChildrenAreAProperty[thing]\n" + + "\\_ChildrenAreAProperty[thing]", + new ChildrenAreAProperty(randomLocation(), Arrays.asList(empty, empty), "many").toString()); + } + { + NoChildren empty = new NoChildren(randomLocation(), "thing"); + assertEquals("AChildIsAProperty[single]\n" + + "\\_NoChildren[thing]", + new AChildIsAProperty(randomLocation(), empty, "single").toString()); + } + } + + public abstract static class Dummy extends Node { + private final String thing; + public Dummy(Location location, List children, String thing) { + super(location, children); + this.thing = thing; + } + + public String thing() { + return thing; + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Dummy other = (Dummy) obj; + return thing.equals(other.thing) + && children().equals(other.children()); + } + + @Override + public int hashCode() { + return Objects.hash(thing, children()); + } + } + + public static class ChildrenAreAProperty extends Dummy { + public ChildrenAreAProperty(Location location, List children, String thing) { + super(location, children, thing); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ChildrenAreAProperty::new, children(), thing()); + } + + @Override + public ChildrenAreAProperty replaceChildren(List newChildren) { + return new ChildrenAreAProperty(location(), newChildren, thing()); + } + } + + public static class AChildIsAProperty extends Dummy { + public AChildIsAProperty(Location location, Dummy child, String thing) { + super(location, singletonList(child), thing); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, AChildIsAProperty::new, child(), thing()); + } + + @Override + public AChildIsAProperty replaceChildren(List newChildren) { + if (newChildren.size() != 1) { + throw new IllegalArgumentException("expected [1] child but received [" + newChildren.size() + "]"); + } + return new AChildIsAProperty(location(), newChildren.get(0), thing()); + } + + public Dummy child() { + return children().get(0); + } + } + + public static class NoChildren extends Dummy { + public NoChildren(Location location, String thing) { + super(location, emptyList(), thing); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, NoChildren::new, thing()); + } + + @Override + public Dummy replaceChildren(List newChildren) { + throw new UnsupportedOperationException("no children to replace"); + } + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java new file mode 100644 index 0000000000000..a6a322b31838f --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -0,0 +1,237 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.type.DataTypeConversion.Conversion; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +public class DataTypeConversionTests extends ESTestCase { + public void testConversionToString() { + Conversion conversion = DataTypeConversion.conversionFor(DataType.DOUBLE, DataType.KEYWORD); + assertNull(conversion.convert(null)); + assertEquals("10.0", conversion.convert(10.0)); + + conversion = DataTypeConversion.conversionFor(DataType.DATE, DataType.KEYWORD); + assertNull(conversion.convert(null)); + assertEquals("1970-01-01T00:00:00.000Z", conversion.convert(new DateTime(0, DateTimeZone.UTC))); + } + + /** + * Test conversion to long. + */ + public void testConversionToLong() { + DataType to = DataType.LONG; + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.DOUBLE, to); + assertNull(conversion.convert(null)); + assertEquals(10L, conversion.convert(10.0)); + assertEquals(10L, conversion.convert(10.1)); + assertEquals(11L, conversion.convert(10.6)); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Double.MAX_VALUE)); + assertEquals("[" + Double.MAX_VALUE + "] out of [Long] range", e.getMessage()); + } + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.INTEGER, to); + assertNull(conversion.convert(null)); + assertEquals(10L, conversion.convert(10)); + assertEquals(-134L, conversion.convert(-134)); + } + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.BOOLEAN, to); + assertNull(conversion.convert(null)); + assertEquals(1, conversion.convert(true)); + assertEquals(0, conversion.convert(false)); + } + Conversion conversion = DataTypeConversion.conversionFor(DataType.KEYWORD, to); + assertNull(conversion.convert(null)); + assertEquals(1L, conversion.convert("1")); + assertEquals(0L, conversion.convert("-0")); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); + assertEquals("cannot cast [0xff] to [Long]", e.getMessage()); + } + + public void testConversionToDate() { + DataType to = DataType.DATE; + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.DOUBLE, to); + assertNull(conversion.convert(null)); + assertEquals(new DateTime(10L, DateTimeZone.UTC), conversion.convert(10.0)); + assertEquals(new DateTime(10L, DateTimeZone.UTC), conversion.convert(10.1)); + assertEquals(new DateTime(11L, DateTimeZone.UTC), conversion.convert(10.6)); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Double.MAX_VALUE)); + assertEquals("[" + Double.MAX_VALUE + "] out of [Long] range", e.getMessage()); + } + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.INTEGER, to); + assertNull(conversion.convert(null)); + assertEquals(new DateTime(10L, DateTimeZone.UTC), conversion.convert(10)); + assertEquals(new DateTime(-134L, DateTimeZone.UTC), conversion.convert(-134)); + } + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.BOOLEAN, to); + assertNull(conversion.convert(null)); + assertEquals(new DateTime(1, DateTimeZone.UTC), conversion.convert(true)); + assertEquals(new DateTime(0, DateTimeZone.UTC), conversion.convert(false)); + } + Conversion conversion = DataTypeConversion.conversionFor(DataType.KEYWORD, to); + assertNull(conversion.convert(null)); + + // TODO we'd like to be able to optionally parse millis here I think.... + assertEquals(new DateTime(1000L, DateTimeZone.UTC), conversion.convert("1970-01-01T00:00:01Z")); + assertEquals(new DateTime(1483228800000L, DateTimeZone.UTC), conversion.convert("2017-01-01T00:00:00Z")); + assertEquals(new DateTime(18000000L, DateTimeZone.UTC), conversion.convert("1970-01-01T00:00:00-05:00")); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); + assertEquals("cannot cast [0xff] to [Date]:Invalid format: \"0xff\" is malformed at \"xff\"", e.getMessage()); + } + + public void testConversionToDouble() { + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.FLOAT, DataType.DOUBLE); + assertNull(conversion.convert(null)); + assertEquals(10.0, (double) conversion.convert(10.0f), 0.00001); + assertEquals(10.1, (double) conversion.convert(10.1f), 0.00001); + assertEquals(10.6, (double) conversion.convert(10.6f), 0.00001); + } + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.INTEGER, DataType.DOUBLE); + assertNull(conversion.convert(null)); + assertEquals(10.0, (double) conversion.convert(10), 0.00001); + assertEquals(-134.0, (double) conversion.convert(-134), 0.00001); + } + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.BOOLEAN, DataType.DOUBLE); + assertNull(conversion.convert(null)); + assertEquals(1.0, (double) conversion.convert(true), 0); + assertEquals(0.0, (double) conversion.convert(false), 0); + } + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.KEYWORD, DataType.DOUBLE); + assertNull(conversion.convert(null)); + assertEquals(1.0, (double) conversion.convert("1"), 0); + assertEquals(0.0, (double) conversion.convert("-0"), 0); + assertEquals(12.776, (double) conversion.convert("12.776"), 0.00001); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); + assertEquals("cannot cast [0xff] to [Double]", e.getMessage()); + } + } + + public void testConversionToBoolean() { + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.FLOAT, DataType.BOOLEAN); + assertNull(conversion.convert(null)); + assertEquals(true, conversion.convert(10.0f)); + assertEquals(true, conversion.convert(-10.0f)); + assertEquals(false, conversion.convert(0.0f)); + } + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.INTEGER, DataType.BOOLEAN); + assertNull(conversion.convert(null)); + assertEquals(true, conversion.convert(10)); + assertEquals(true, conversion.convert(-10)); + assertEquals(false, conversion.convert(0)); + } + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.DOUBLE, DataType.BOOLEAN); + assertNull(conversion.convert(null)); + assertEquals(true, conversion.convert(10.0)); + assertEquals(true, conversion.convert(-10.0)); + assertEquals(false, conversion.convert(0.0)); + } + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.KEYWORD, DataType.BOOLEAN); + assertNull(conversion.convert(null)); + // We only handled upper and lower case true and false + assertEquals(true, conversion.convert("true")); + assertEquals(false, conversion.convert("false")); + assertEquals(true, conversion.convert("True")); + assertEquals(false, conversion.convert("fAlSe")); + // Everything else should fail + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("10")); + assertEquals("cannot cast [10] to [Boolean]", e.getMessage()); + e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("-1")); + assertEquals("cannot cast [-1] to [Boolean]", e.getMessage()); + e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0")); + assertEquals("cannot cast [0] to [Boolean]", e.getMessage()); + e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("blah")); + assertEquals("cannot cast [blah] to [Boolean]", e.getMessage()); + e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("Yes")); + assertEquals("cannot cast [Yes] to [Boolean]", e.getMessage()); + e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("nO")); + assertEquals("cannot cast [nO] to [Boolean]", e.getMessage()); + } + } + + public void testConversionToInt() { + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.DOUBLE, DataType.INTEGER); + assertNull(conversion.convert(null)); + assertEquals(10, conversion.convert(10.0)); + assertEquals(10, conversion.convert(10.1)); + assertEquals(11, conversion.convert(10.6)); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Long.MAX_VALUE)); + assertEquals("[" + Long.MAX_VALUE + "] out of [Int] range", e.getMessage()); + } + } + + public void testConversionToShort() { + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.DOUBLE, DataType.SHORT); + assertNull(conversion.convert(null)); + assertEquals((short) 10, conversion.convert(10.0)); + assertEquals((short) 10, conversion.convert(10.1)); + assertEquals((short) 11, conversion.convert(10.6)); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Integer.MAX_VALUE)); + assertEquals("[" + Integer.MAX_VALUE + "] out of [Short] range", e.getMessage()); + } + } + + public void testConversionToByte() { + { + Conversion conversion = DataTypeConversion.conversionFor(DataType.DOUBLE, DataType.BYTE); + assertNull(conversion.convert(null)); + assertEquals((byte) 10, conversion.convert(10.0)); + assertEquals((byte) 10, conversion.convert(10.1)); + assertEquals((byte) 11, conversion.convert(10.6)); + Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert(Short.MAX_VALUE)); + assertEquals("[" + Short.MAX_VALUE + "] out of [Byte] range", e.getMessage()); + } + } + + public void testConversionToNull() { + Conversion conversion = DataTypeConversion.conversionFor(DataType.DOUBLE, DataType.NULL); + assertNull(conversion.convert(null)); + assertNull(conversion.convert(10.0)); + } + + public void testConversionToIdentity() { + Conversion conversion = DataTypeConversion.conversionFor(DataType.INTEGER, DataType.INTEGER); + assertNull(conversion.convert(null)); + assertEquals(10, conversion.convert(10)); + } + + public void testCommonType() { + assertEquals(DataType.BOOLEAN, DataTypeConversion.commonType(DataType.BOOLEAN, DataType.NULL)); + assertEquals(DataType.BOOLEAN, DataTypeConversion.commonType(DataType.NULL, DataType.BOOLEAN)); + assertEquals(DataType.BOOLEAN, DataTypeConversion.commonType(DataType.BOOLEAN, DataType.BOOLEAN)); + assertEquals(DataType.NULL, DataTypeConversion.commonType(DataType.NULL, DataType.NULL)); + assertEquals(DataType.INTEGER, DataTypeConversion.commonType(DataType.INTEGER, DataType.KEYWORD)); + assertEquals(DataType.LONG, DataTypeConversion.commonType(DataType.TEXT, DataType.LONG)); + assertEquals(null, DataTypeConversion.commonType(DataType.TEXT, DataType.KEYWORD)); + assertEquals(DataType.SHORT, DataTypeConversion.commonType(DataType.SHORT, DataType.BYTE)); + assertEquals(DataType.FLOAT, DataTypeConversion.commonType(DataType.BYTE, DataType.FLOAT)); + assertEquals(DataType.FLOAT, DataTypeConversion.commonType(DataType.FLOAT, DataType.INTEGER)); + assertEquals(DataType.DOUBLE, DataTypeConversion.commonType(DataType.DOUBLE, DataType.FLOAT)); + } + + public void testEsDataTypes() { + for (DataType type : DataType.values()) { + assertEquals(type, DataType.fromEsType(type.esType)); + } + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java new file mode 100644 index 0000000000000..c5e82123d7b8b --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java @@ -0,0 +1,203 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import java.io.InputStream; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.xpack.sql.type.DataType.DATE; +import static org.elasticsearch.xpack.sql.type.DataType.INTEGER; +import static org.elasticsearch.xpack.sql.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.sql.type.DataType.NESTED; +import static org.elasticsearch.xpack.sql.type.DataType.OBJECT; +import static org.elasticsearch.xpack.sql.type.DataType.TEXT; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class TypesTests extends ESTestCase { + + public void testNullMap() { + Map fromEs = Types.fromEs(null); + assertThat(fromEs.isEmpty(), is(true)); + } + + public void testEmptyMap() { + Map fromEs = Types.fromEs(emptyMap()); + assertThat(fromEs.isEmpty(), is(true)); + } + + public void testBasicMapping() { + Map mapping = loadMapping("mapping-basic.json"); + assertThat(mapping.size(), is(6)); + assertThat(mapping.get("emp_no").getDataType(), is(INTEGER)); + assertThat(mapping.get("first_name"), instanceOf(TextEsField.class)); + assertThat(mapping.get("last_name").getDataType(), is(TEXT)); + assertThat(mapping.get("gender").getDataType(), is(KEYWORD)); + assertThat(mapping.get("salary").getDataType(), is(INTEGER)); + } + + public void testDefaultStringMapping() { + Map mapping = loadMapping("mapping-default-string.json"); + + assertThat(mapping.size(), is(1)); + assertThat(mapping.get("dep_no").getDataType(), is(TEXT)); + } + + public void testTextField() { + Map mapping = loadMapping("mapping-text.json"); + + assertThat(mapping.size(), is(1)); + EsField type = mapping.get("full_name"); + assertThat(type, instanceOf(TextEsField.class)); + assertThat(type.hasDocValues(), is(false)); + TextEsField ttype = (TextEsField) type; + assertThat(type.getPrecision(), is(Integer.MAX_VALUE)); + assertThat(ttype.hasDocValues(), is(false)); + } + + public void testKeywordField() { + Map mapping = loadMapping("mapping-keyword.json"); + + assertThat(mapping.size(), is(1)); + EsField field = mapping.get("full_name"); + assertThat(field, instanceOf(KeywordEsField.class)); + assertThat(field.hasDocValues(), is(true)); + assertThat(field.getPrecision(), is(256)); + } + + public void testDateField() { + Map mapping = loadMapping("mapping-date.json"); + + assertThat(mapping.size(), is(1)); + EsField field = mapping.get("date"); + assertThat(field.getDataType(), is(DATE)); + assertThat(field.hasDocValues(), is(true)); + assertThat(field.getPrecision(), is(19)); + + DateEsField dfield = (DateEsField) field; + List formats = dfield.getFormats(); + assertThat(formats, hasSize(3)); + } + + public void testDateNoFormat() { + Map mapping = loadMapping("mapping-date-no-format.json"); + + assertThat(mapping.size(), is(1)); + EsField field = mapping.get("date"); + assertThat(field.getDataType(), is(DATE)); + assertThat(field.hasDocValues(), is(true)); + DateEsField dfield = (DateEsField) field; + // default types + assertThat(dfield.getFormats(), hasSize(2)); + } + + public void testDateMulti() { + Map mapping = loadMapping("mapping-date-multi.json"); + + assertThat(mapping.size(), is(1)); + EsField field = mapping.get("date"); + assertThat(field.getDataType(), is(DATE)); + assertThat(field.hasDocValues(), is(true)); + DateEsField dfield = (DateEsField) field; + // default types + assertThat(dfield.getFormats(), hasSize(1)); + } + + public void testDocValueField() { + Map mapping = loadMapping("mapping-docvalues.json"); + + assertThat(mapping.size(), is(1)); + EsField field = mapping.get("session_id"); + assertThat(field, instanceOf(KeywordEsField.class)); + assertThat(field.getPrecision(), is(15)); + assertThat(field.hasDocValues(), is(false)); + } + + public void testDottedField() { + Map mapping = loadMapping("mapping-object.json"); + + assertThat(mapping.size(), is(2)); + EsField field = mapping.get("manager"); + assertThat(field.getDataType().isPrimitive(), is(false)); + assertThat(field.getDataType(), is(OBJECT)); + Map children = field.getProperties(); + assertThat(children.size(), is(2)); + EsField names = children.get("name"); + children = names.getProperties(); + assertThat(children.size(), is(2)); + assertThat(children.get("first").getDataType(), is(TEXT)); + } + + public void testMultiField() { + Map mapping = loadMapping("mapping-multi-field.json"); + + assertThat(mapping.size(), is(1)); + EsField field = mapping.get("text"); + assertThat(field.getDataType().isPrimitive(), is(true)); + assertThat(field.getDataType(), is(TEXT)); + Map fields = field.getProperties(); + assertThat(fields.size(), is(2)); + assertThat(fields.get("raw").getDataType(), is(KEYWORD)); + assertThat(fields.get("english").getDataType(), is(TEXT)); + } + + public void testMultiFieldTooManyOptions() { + Map mapping = loadMapping("mapping-multi-field.json"); + + assertThat(mapping.size(), is(1)); + EsField field = mapping.get("text"); + assertThat(field.getDataType().isPrimitive(), is(true)); + assertThat(field, instanceOf(TextEsField.class)); + Map fields = field.getProperties(); + assertThat(fields.size(), is(2)); + assertThat(fields.get("raw").getDataType(), is(KEYWORD)); + assertThat(fields.get("english").getDataType(), is(TEXT)); + } + + public void testNestedDoc() { + Map mapping = loadMapping("mapping-nested.json"); + + assertThat(mapping.size(), is(1)); + EsField field = mapping.get("dep"); + assertThat(field.getDataType().isPrimitive(), is(false)); + assertThat(field.getDataType(), is(NESTED)); + Map children = field.getProperties(); + assertThat(children.size(), is(4)); + assertThat(children.get("dep_name").getDataType(), is(TEXT)); + assertThat(children.get("start_date").getDataType(), is(DATE)); + } + + public void testGeoField() { + Map mapping = loadMapping("mapping-geo.json"); + EsField dt = mapping.get("location"); + assertThat(dt.getDataType().esType, is("unsupported")); + } + + public void testUnsupportedTypes() { + Map mapping = loadMapping("mapping-unsupported.json"); + EsField dt = mapping.get("range"); + assertThat(dt.getDataType().esType, is("unsupported")); + } + + public static Map loadMapping(String name) { + InputStream stream = TypesTests.class.getResourceAsStream("/" + name); + assertNotNull("Could not find mapping resource:" + name, stream); + return Types.fromEs(XContentHelper.convertToMap(JsonXContent.jsonXContent, stream, randomBoolean())); + } + + public static Map loadMapping(String name, boolean ordered) { + InputStream stream = TypesTests.class.getResourceAsStream("/" + name); + assertNotNull("Could not find mapping resource:" + name, stream); + return Types.fromEs(XContentHelper.convertToMap(JsonXContent.jsonXContent, stream, ordered)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java new file mode 100644 index 0000000000000..d363eb5274b23 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.util; + +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.sql.util.StringUtils.likeToJavaPattern; +import static org.elasticsearch.xpack.sql.util.StringUtils.likeToLuceneWildcard;; + +public class LikeConversionTests extends ESTestCase { + + private static String regex(String pattern) { + return likeToJavaPattern(pattern, '|'); + } + + private static String wildcard(String pattern) { + return likeToLuceneWildcard(pattern, '|'); + } + + public void testNoRegex() { + assertEquals("^fooBar$", regex("fooBar")); + } + + public void testEscapedSqlWildcard() { + assertEquals("^foo\\\\_bar$", regex("foo\\|_bar")); + } + + public void testEscapedSqlWildcardGreedy() { + assertEquals("^foo.*%bar$", regex("foo%|%bar")); + } + + public void testSimpleSqlRegex1() { + assertEquals("^foo.bar$", regex("foo_bar")); + } + + public void testSimpleSqlRegex2() { + assertEquals("^foo.*bar$", regex("foo%bar")); + } + + public void testMultipleSqlRegexes() { + assertEquals("^foo.*bar.$", regex("foo%bar_")); + } + + public void testJavaRegexNoSqlRegex() { + assertEquals("^foo\\.\\*bar$", regex("foo.*bar")); + } + + public void testMultipleRegexAndSqlRegex() { + assertEquals("^foo\\\\\\.\\*bar\\..*$", regex("foo\\.*bar.%")); + } + + public void testEscapedJavaRegex() { + assertEquals("^\\[a-zA-Z\\]$", regex("[a-zA-Z]")); + } + + public void testComplicatedJavaRegex() { + assertEquals("^\\^\\[0\\.\\.9\\]\\.\\*\\$$", regex("^[0..9].*$")); + } + + public void testNoWildcard() { + assertEquals("foo", wildcard("foo")); + } + + public void testQuestionMarkWildcard() { + assertEquals("foo?bar", wildcard("foo_bar")); + } + + public void testStarWildcard() { + assertEquals("foo*", wildcard("foo%")); + } + + public void testWildcardEscapeLuceneWildcard() { + assertEquals("foo\\*bar*", wildcard("foo*bar%")); + } + + public void testWildcardEscapedWildcard() { + assertEquals("foo\\*bar%", wildcard("foo*bar|%")); + } + + public void testEscapedLuceneEscape() { + assertEquals("foo\\\\\\*bar", wildcard("foo\\*bar")); + } + + public void testMixOfEscapedLuceneAndSqlEscapes() { + assertEquals("foo\\\\?_\\*bar*", wildcard("foo\\_|_*bar%")); + } + + public void testWildcardIgnoreEscapedWildcard() { + assertEquals("foo\\\\\\*bar*", wildcard("foo\\*bar%")); + } + + public void testWildcardDoubleEscaping() { + assertEquals("foo\\\\\\\\bar", wildcard("foo\\\\bar")); + } + + public void testWildcardTripleEscaping() { + assertEquals("foo\\\\\\\\bar\\?\\\\?", wildcard("foo\\\\bar?\\_")); + } + + public void testWildcardIgnoreDoubleEscapedButSkipEscapingOfSql() { + assertEquals("foo\\\\\\*bar\\\\?\\?", wildcard("foo\\*bar\\_?")); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/resources/mapping-basic.json b/x-pack/plugin/sql/src/test/resources/mapping-basic.json new file mode 100644 index 0000000000000..c1747d1561c6d --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-basic.json @@ -0,0 +1,22 @@ +{ + "properties" : { + "emp_no" : { + "type" : "integer" + }, + "first_name" : { + "type" : "text" + }, + "gender" : { + "type" : "keyword" + }, + "languages" : { + "type" : "byte" + }, + "last_name" : { + "type" : "text" + }, + "salary" : { + "type" : "integer" + } + } +} diff --git a/x-pack/plugin/sql/src/test/resources/mapping-date-multi.json b/x-pack/plugin/sql/src/test/resources/mapping-date-multi.json new file mode 100644 index 0000000000000..e6cd9091f8415 --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-date-multi.json @@ -0,0 +1,9 @@ +{ + "properties": { + "date": { + "type": "date", + "format": "yyyy-MM-dd" + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/resources/mapping-date-no-format.json b/x-pack/plugin/sql/src/test/resources/mapping-date-no-format.json new file mode 100644 index 0000000000000..e0e5fa852f52e --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-date-no-format.json @@ -0,0 +1,8 @@ +{ + "properties": { + "date": { + "type": "date" + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/resources/mapping-date.json b/x-pack/plugin/sql/src/test/resources/mapping-date.json new file mode 100644 index 0000000000000..0422d7e1026bc --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-date.json @@ -0,0 +1,9 @@ +{ + "properties": { + "date": { + "type": "date", + "format": "yyyy-MM-dd || basic_time || year" + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/resources/mapping-default-string.json b/x-pack/plugin/sql/src/test/resources/mapping-default-string.json new file mode 100644 index 0000000000000..e8777a9cd68b4 --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-default-string.json @@ -0,0 +1,13 @@ +{ + "properties" : { + "dep_no" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + } + } +} diff --git a/x-pack/plugin/sql/src/test/resources/mapping-docvalues.json b/x-pack/plugin/sql/src/test/resources/mapping-docvalues.json new file mode 100644 index 0000000000000..5cd0ed200ce96 --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-docvalues.json @@ -0,0 +1,9 @@ +{ + "properties" : { + "session_id" : { + "type" : "keyword", + "ignore_above" : 15, + "doc_values" : false + } + } +} diff --git a/x-pack/plugin/sql/src/test/resources/mapping-dotted-field.json b/x-pack/plugin/sql/src/test/resources/mapping-dotted-field.json new file mode 100644 index 0000000000000..c48cd5c770659 --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-dotted-field.json @@ -0,0 +1,32 @@ +{ + "properties" : { + "test" : { + "properties" : { + "test" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "bar" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + } + } + }, + "bar" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/resources/mapping-geo.json b/x-pack/plugin/sql/src/test/resources/mapping-geo.json new file mode 100644 index 0000000000000..3c958ff37edfc --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-geo.json @@ -0,0 +1,7 @@ +{ + "properties" : { + "location" : { + "type" : "geo_point" + } + } +} diff --git a/x-pack/plugin/sql/src/test/resources/mapping-keyword.json b/x-pack/plugin/sql/src/test/resources/mapping-keyword.json new file mode 100644 index 0000000000000..aa47e9e42ad0f --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-keyword.json @@ -0,0 +1,8 @@ +{ + "properties" : { + "full_name" : { + "type" : "keyword", + "ignore_above" : 256 + } + } +} diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-options.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-options.json new file mode 100644 index 0000000000000..f2389aed3d78e --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-options.json @@ -0,0 +1,15 @@ +{ + "properties" : { + "text" : { + "type" : "text", + "fields" : { + "raw" : { + "type" : "keyword" + }, + "key" : { + "type" : "keyword" + } + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json new file mode 100644 index 0000000000000..13c9f62b2136e --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json @@ -0,0 +1,48 @@ +{ + "properties" : { + "bool" : { "type" : "boolean" }, + "int" : { "type" : "integer" }, + "text" : { "type" : "text" }, + "keyword" : { "type" : "keyword" }, + "date" : { "type" : "date" }, + "unsupported" : { "type" : "ip_range" }, + "some" : { + "properties" : { + "dotted" : { + "properties" : { + "field" : { + "type" : "keyword" + } + } + }, + "string" : { + "type" : "text", + "fields" : { + "normalized" : { + "type" : "keyword", + "normalizer" : "some_normalizer" + }, + "typical" : { + "type" : "keyword" + } + } + }, + "ambiguous" : { + "type" : "text", + "fields" : { + "one" : { + "type" : "keyword" + }, + "two" : { + "type" : "keyword" + }, + "normalized" : { + "type" : "keyword", + "normalizer" : "some_normalizer" + } + } + } + } + } + } +} diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json new file mode 100644 index 0000000000000..448c50e6a9f0a --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-with-nested.json @@ -0,0 +1,71 @@ +{ + "properties" : { + "bool" : { "type" : "boolean" }, + "int" : { "type" : "integer" }, + "text" : { "type" : "text" }, + "keyword" : { "type" : "keyword" }, + "unsupported" : { "type" : "ip_range" }, + "date" : { "type" : "date"}, + "some" : { + "properties" : { + "dotted" : { + "properties" : { + "field" : { + "type" : "keyword" + } + } + }, + "string" : { + "type" : "text", + "fields" : { + "normalized" : { + "type" : "keyword", + "normalizer" : "some_normalizer" + }, + "typical" : { + "type" : "keyword" + } + } + }, + "ambiguous" : { + "type" : "text", + "fields" : { + "one" : { + "type" : "keyword" + }, + "two" : { + "type" : "keyword" + }, + "normalized" : { + "type" : "keyword", + "normalizer" : "some_normalizer" + } + } + } + } + }, + "dep" : { + "type" : "nested", + "properties" : { + "dep_name" : { + "type" : "text" + }, + "dep_id" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "end_date" : { + "type" : "date" + }, + "start_date" : { + "type" : "date" + } + } + } + } +} diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field.json new file mode 100644 index 0000000000000..9e293d4271333 --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field.json @@ -0,0 +1,16 @@ +{ + "properties" : { + "text" : { + "type" : "text", + "fields" : { + "raw" : { + "type" : "keyword" + }, + "english" : { + "type" : "text", + "analyzer" : "english" + } + } + } + } +} diff --git a/x-pack/plugin/sql/src/test/resources/mapping-nested.json b/x-pack/plugin/sql/src/test/resources/mapping-nested.json new file mode 100644 index 0000000000000..d9b6398458f14 --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-nested.json @@ -0,0 +1,27 @@ +{ + "properties" : { + "dep" : { + "type" : "nested", + "properties" : { + "dep_name" : { + "type" : "text" + }, + "dep_no" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "end_date" : { + "type" : "date" + }, + "start_date" : { + "type" : "date" + } + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/resources/mapping-object.json b/x-pack/plugin/sql/src/test/resources/mapping-object.json new file mode 100644 index 0000000000000..65fd391f901db --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-object.json @@ -0,0 +1,24 @@ +{ + "properties" : { + "region" : { + "type" : "keyword" + }, + "manager" : { + "properties" : { + "age" : { + "type" : "integer" + }, + "name" : { + "properties" : { + "first" : { + "type" : "text" + }, + "last" : { + "type" : "text" + } + } + } + } + } + } +} diff --git a/x-pack/plugin/sql/src/test/resources/mapping-parent-child.json b/x-pack/plugin/sql/src/test/resources/mapping-parent-child.json new file mode 100644 index 0000000000000..b62e19625e26d --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-parent-child.json @@ -0,0 +1,10 @@ +{ + "properties" : { + "parent_child" : { + "type" : "join", + "relations" : { + "question" : "answer" + } + } + } +} diff --git a/x-pack/plugin/sql/src/test/resources/mapping-text.json b/x-pack/plugin/sql/src/test/resources/mapping-text.json new file mode 100644 index 0000000000000..ecf2f09c98a23 --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-text.json @@ -0,0 +1,8 @@ +{ + "properties" : { + "full_name" : { + "type" : "text", + "fielddata" : false + } + } +} diff --git a/x-pack/plugin/sql/src/test/resources/mapping-unsupported.json b/x-pack/plugin/sql/src/test/resources/mapping-unsupported.json new file mode 100644 index 0000000000000..832dc9c0d745d --- /dev/null +++ b/x-pack/plugin/sql/src/test/resources/mapping-unsupported.json @@ -0,0 +1,11 @@ +{ + "properties" : { + "range" : { + "type" : "integer_range" + }, + "time_frame" : { + "type" : "date_range", + "format" : "yyyy-MM-dd" + } + } +} diff --git a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java new file mode 100644 index 0000000000000..dcca2677f2ce2 --- /dev/null +++ b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java @@ -0,0 +1,312 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.http.HttpStatus; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.plugins.MetaDataUpgrader; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xpack.core.ml.MlMetaIndex; +import org.elasticsearch.xpack.core.ml.integration.MlRestTestStateCleaner; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.notifications.AuditorField; +import org.elasticsearch.xpack.core.rollup.RollupRestTestStateCleaner; +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +/** Runs rest tests against external cluster */ +public class XPackRestIT extends ESClientYamlSuiteTestCase { + private static final String BASIC_AUTH_VALUE = + basicAuthHeaderValue("x_pack_rest_user", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + + public XPackRestIT(ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Override + protected Settings restClientSettings() { + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE) + .build(); + } + + + @Before + public void setupForTests() throws Exception { + waitForTemplates(); + waitForWatcher(); + enableMonitoring(); + } + + /** + * Waits for the Security template and the Machine Learning templates to be created by the {@link MetaDataUpgrader} + */ + private void waitForTemplates() throws Exception { + if (installTemplates()) { + List templates = new ArrayList<>(); + templates.addAll(Arrays.asList(AuditorField.NOTIFICATIONS_INDEX, MlMetaIndex.INDEX_NAME, + AnomalyDetectorsIndex.jobStateIndexName(), + AnomalyDetectorsIndex.jobResultsIndexPrefix())); + templates.addAll(Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES)); + + for (String template : templates) { + awaitCallApi("indices.exists_template", singletonMap("name", template), emptyList(), + response -> true, + () -> "Exception when waiting for [" + template + "] template to be created"); + } + } + } + + private void waitForWatcher() throws Exception { + // ensure watcher is started, so that a test can stop watcher and everything still works fine + if (isWatcherTest()) { + assertBusy(() -> { + try { + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + if ("started".equals(state) == false) { + getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + } + // assertion required to exit the assertBusy lambda + assertThat(state, is("started")); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + } + + /** + * Enable monitoring and waits for monitoring documents to be collected and indexed in + * monitoring indices.This is the signal that the local exporter is started and ready + * for the tests. + */ + private void enableMonitoring() throws Exception { + if (isMonitoringTest()) { + final ClientYamlTestResponse xpackUsage = + callApi("xpack.usage", singletonMap("filter_path", "monitoring.enabled_exporters"), emptyList(), getApiCallHeaders()); + + @SuppressWarnings("unchecked") + final Map exporters = (Map) xpackUsage.evaluate("monitoring.enabled_exporters"); + assertNotNull("List of monitoring exporters must not be null", exporters); + assertThat("List of enabled exporters must be empty before enabling monitoring", + XContentMapValues.extractRawValues("monitoring.enabled_exporters", exporters), hasSize(0)); + + final Map settings = new HashMap<>(); + settings.put("xpack.monitoring.collection.enabled", true); + settings.put("xpack.monitoring.collection.interval", "1s"); + settings.put("xpack.monitoring.exporters._local.enabled", true); + + awaitCallApi("cluster.put_settings", emptyMap(), + singletonList(singletonMap("transient", settings)), + response -> { + Object acknowledged = response.evaluate("acknowledged"); + return acknowledged != null && (Boolean) acknowledged; + }, + () -> "Exception when enabling monitoring"); + awaitCallApi("search", singletonMap("index", ".monitoring-*"), emptyList(), + response -> ((Number) response.evaluate("hits.total")).intValue() > 0, + () -> "Exception when waiting for monitoring documents to be indexed"); + } + } + + /** + * Disable monitoring + */ + private void disableMonitoring() throws Exception { + if (isMonitoringTest()) { + final Map settings = new HashMap<>(); + settings.put("xpack.monitoring.collection.enabled", null); + settings.put("xpack.monitoring.collection.interval", null); + settings.put("xpack.monitoring.exporters._local.enabled", null); + + awaitCallApi("cluster.put_settings", emptyMap(), + singletonList(singletonMap("transient", settings)), + response -> { + Object acknowledged = response.evaluate("acknowledged"); + return acknowledged != null && (Boolean) acknowledged; + }, + () -> "Exception when disabling monitoring"); + + awaitBusy(() -> { + try { + ClientYamlTestResponse response = + callApi("xpack.usage", singletonMap("filter_path", "monitoring.enabled_exporters"), emptyList(), + getApiCallHeaders()); + + @SuppressWarnings("unchecked") + final Map exporters = (Map) response.evaluate("monitoring.enabled_exporters"); + if (exporters.isEmpty() == false) { + return false; + } + + final Map params = new HashMap<>(); + params.put("node_id", "_local"); + params.put("metric", "thread_pool"); + params.put("filter_path", "nodes.*.thread_pool.write.active"); + response = callApi("nodes.stats", params, emptyList(), getApiCallHeaders()); + + @SuppressWarnings("unchecked") + final Map nodes = (Map) response.evaluate("nodes"); + @SuppressWarnings("unchecked") + final Map node = (Map) nodes.values().iterator().next(); + + @SuppressWarnings("unchecked") + final Number activeWrites = (Number) extractValue("thread_pool.write.active", node); + return activeWrites != null && activeWrites.longValue() == 0L; + } catch (Exception e) { + throw new ElasticsearchException("Failed to wait for monitoring exporters to stop:", e); + } + }); + } + } + + /** + * Cleanup after tests. + * + * Feature-specific cleanup methods should be called from here rather than using + * separate @After annotated methods to ensure there is a well-defined cleanup order. + */ + @After + public void cleanup() throws Exception { + disableMonitoring(); + clearMlState(); + clearRollupState(); + if (isWaitForPendingTasks()) { + // This waits for pending tasks to complete, so must go last (otherwise + // it could be waiting for pending tasks while monitoring is still running). + XPackRestTestHelper.waitForPendingTasks(adminClient()); + } + } + + /** + * Delete any left over machine learning datafeeds and jobs. + */ + private void clearMlState() throws Exception { + if (isMachineLearningTest()) { + new MlRestTestStateCleaner(logger, adminClient(), this).clearMlMetadata(); + } + } + + /** + * Delete any left over rollup jobs + * + * Also reuses the pending-task logic from Ml... should refactor to shared location + */ + private void clearRollupState() throws Exception { + if (isRollupTest()) { + new RollupRestTestStateCleaner(logger, adminClient(), this).clearRollupMetadata(); + } + } + + /** + * Executes an API call using the admin context, waiting for it to succeed. + */ + private void awaitCallApi(String apiName, + Map params, + List> bodies, + CheckedFunction success, + Supplier error) throws Exception { + + AtomicReference exceptionHolder = new AtomicReference<>(); + awaitBusy(() -> { + try { + ClientYamlTestResponse response = callApi(apiName, params, bodies, getApiCallHeaders()); + if (response.getStatusCode() == HttpStatus.SC_OK) { + exceptionHolder.set(null); + return success.apply(response); + } + return false; + } catch (IOException e) { + exceptionHolder.set(e); + } + return false; + }); + + IOException exception = exceptionHolder.get(); + if (exception != null) { + throw new IllegalStateException(error.get(), exception); + } + } + + private ClientYamlTestResponse callApi(String apiName, + Map params, + List> bodies, + Map headers) throws IOException { + return getAdminExecutionContext().callApi(apiName, params, bodies, headers); + } + + protected Map getApiCallHeaders() { + return Collections.emptyMap(); + } + + protected boolean installTemplates() { + return true; + } + + protected boolean isMonitoringTest() { + String testName = getTestName(); + return testName != null && (testName.contains("=monitoring/") || testName.contains("=monitoring\\")); + } + + protected boolean isWatcherTest() { + String testName = getTestName(); + return testName != null && (testName.contains("=watcher/") || testName.contains("=watcher\\")); + } + + protected boolean isMachineLearningTest() { + String testName = getTestName(); + return testName != null && (testName.contains("=ml/") || testName.contains("=ml\\")); + } + + protected boolean isRollupTest() { + String testName = getTestName(); + return testName != null && (testName.contains("=rollup/") || testName.contains("=rollup\\")); + } + + /** + * Should each test wait for pending tasks to finish after execution? + * @return Wait for pending tasks + */ + protected boolean isWaitForPendingTasks() { + return true; + } + +} diff --git a/x-pack/plugin/src/test/resources/IndexLifecycleManagerTests-template-v512.json b/x-pack/plugin/src/test/resources/IndexLifecycleManagerTests-template-v512.json new file mode 100644 index 0000000000000..b4b21c5a372da --- /dev/null +++ b/x-pack/plugin/src/test/resources/IndexLifecycleManagerTests-template-v512.json @@ -0,0 +1,15 @@ +{ + "index_patterns": "IndexLifeCycleManagerTests", + "mappings": { + "doc": { + "_meta": { + "security-version": "5.1.2" + }, + "properties": { + "test": { + "type": "keyword" + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/service/logo.png b/x-pack/plugin/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/service/logo.png new file mode 100644 index 0000000000000..9f84883503f85 Binary files /dev/null and b/x-pack/plugin/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/service/logo.png differ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.graph.explore.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.graph.explore.json new file mode 100644 index 0000000000000..2879ce182cb7d --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.graph.explore.json @@ -0,0 +1,33 @@ +{ + "xpack.graph.explore": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html", + "methods": ["GET", "POST"], + "url": { + "path": "/{index}/_xpack/graph/_explore", + "paths": ["/{index}/_xpack/graph/_explore", "/{index}/{type}/_xpack/graph/_explore"], + "parts" : { + "index": { + "type" : "list", + "description" : "A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices" + }, + "type": { + "type" : "list", + "description" : "A comma-separated list of document types to search; leave empty to perform the operation on all types" + } + }, + "params": { + "routing": { + "type" : "string", + "description" : "Specific routing value" + }, + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + } + } + }, + "body": { + "description" : "Graph Query DSL" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.info.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.info.json new file mode 100644 index 0000000000000..9913bc3fba6df --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.info.json @@ -0,0 +1,18 @@ +{ + "xpack.info": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html", + "methods": [ "GET" ], + "url": { + "path": "/_xpack", + "paths": [ "/_xpack" ], + "parts": {}, + "params": { + "categories": { + "type": "list", + "description" : "Comma-separated list of info categories. Can be any of: build, license, features" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.delete.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.delete.json new file mode 100644 index 0000000000000..6b5a449b04f3f --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.delete.json @@ -0,0 +1,12 @@ +{ + "xpack.license.delete": { + "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "methods": ["DELETE"], + "url": { + "path": "/_xpack/license", + "paths": ["/_xpack/license"], + "parts" : {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get.json new file mode 100644 index 0000000000000..f8498a3db9799 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get.json @@ -0,0 +1,19 @@ +{ + "xpack.license.get": { + "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "methods": ["GET"], + "url": { + "path": "/_xpack/license", + "paths": ["/_xpack/license"], + "parts" : { + }, + "params": { + "local": { + "type" : "boolean", + "description" : "Return local information, do not retrieve the state from master node (default: false)" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_basic_status.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_basic_status.json new file mode 100644 index 0000000000000..80e9cfe9ab461 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_basic_status.json @@ -0,0 +1,15 @@ +{ + "xpack.license.get_basic_status": { + "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "methods": ["GET"], + "url": { + "path": "/_xpack/license/basic_status", + "paths": ["/_xpack/license/basic_status"], + "parts" : { + }, + "params": { + } + }, + "body": null + } +} \ No newline at end of file diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_trial_status.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_trial_status.json new file mode 100644 index 0000000000000..9824b02240577 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.get_trial_status.json @@ -0,0 +1,15 @@ +{ + "xpack.license.get_trial_status": { + "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "methods": ["GET"], + "url": { + "path": "/_xpack/license/trial_status", + "paths": ["/_xpack/license/trial_status"], + "parts" : { + }, + "params": { + } + }, + "body": null + } +} \ No newline at end of file diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post.json new file mode 100644 index 0000000000000..0899a742a8156 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post.json @@ -0,0 +1,21 @@ +{ + "xpack.license.post": { + "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "methods": ["PUT", "POST"], + "url": { + "path": "/_xpack/license", + "paths": ["/_xpack/license"], + "parts" : { + }, + "params": { + "acknowledge": { + "type" : "boolean", + "description" : "whether the user has acknowledged acknowledge messages (default: false)" + } + } + }, + "body": { + "description" : "licenses to be installed" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_basic.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_basic.json new file mode 100644 index 0000000000000..77c7a10878fea --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_basic.json @@ -0,0 +1,19 @@ +{ + "xpack.license.post_start_basic": { + "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "methods": ["POST"], + "url": { + "path": "/_xpack/license/start_basic", + "paths": ["/_xpack/license/start_basic"], + "parts" : { + }, + "params": { + "acknowledge": { + "type" : "boolean", + "description" : "whether the user has acknowledged acknowledge messages (default: false)" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json new file mode 100644 index 0000000000000..a1e5d27da1eda --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json @@ -0,0 +1,23 @@ +{ + "xpack.license.post_start_trial": { + "documentation": "https://www.elastic.co/guide/en/x-pack/current/license-management.html", + "methods": ["POST"], + "url": { + "path": "/_xpack/license/start_trial", + "paths": ["/_xpack/license/start_trial"], + "parts" : { + }, + "params": { + "type": { + "type" : "string", + "description" : "The type of trial license to generate (default: \"trial\")" + }, + "acknowledge": { + "type" : "boolean", + "description" : "whether the user has acknowledged acknowledge messages (default: false)" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.deprecations.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.deprecations.json new file mode 100644 index 0000000000000..f639880f510a8 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.deprecations.json @@ -0,0 +1,19 @@ +{ + "xpack.migration.deprecations": { + "documentation": "http://www.elastic.co/guide/en/migration/current/migration-api-deprecation.html", + "methods": [ "GET" ], + "url": { + "path": "/{index}/_xpack/migration/deprecations", + "paths": ["/_xpack/migration/deprecations", "/{index}/_xpack/migration/deprecations"], + "parts": { + "index": { + "type" : "string", + "description" : "Index pattern" + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.get_assistance.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.get_assistance.json new file mode 100644 index 0000000000000..46abf6741c1ca --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.get_assistance.json @@ -0,0 +1,35 @@ +{ + "xpack.migration.get_assistance": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-assistance.html", + "methods": [ "GET" ], + "url": { + "path": "/_xpack/migration/assistance", + "paths": [ + "/_xpack/migration/assistance", + "/_xpack/migration/assistance/{index}" + ], + "parts": { + "index": { + "type" : "list", + "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices" + } + }, + "params": { + "allow_no_indices": { + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "expand_wildcards": { + "type" : "enum", + "options" : ["open","closed","none","all"], + "default" : "open", + "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." + }, + "ignore_unavailable": { + "type" : "boolean", + "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.upgrade.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.upgrade.json new file mode 100644 index 0000000000000..d6cdff03fa689 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.migration.upgrade.json @@ -0,0 +1,26 @@ +{ + "xpack.migration.upgrade": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/migration-api-upgrade.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/migration/upgrade/{index}", + "paths": [ + "/_xpack/migration/upgrade/{index}" + ], + "parts": { + "index": { + "type" : "string", + "required" : true, + "description" : "The name of the index" + } + }, + "params": { + "wait_for_completion": { + "type" : "boolean", + "default": true, + "description" : "Should the request block until the upgrade operation is completed" + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.close_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.close_job.json new file mode 100644 index 0000000000000..8830926ed5c05 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.close_job.json @@ -0,0 +1,33 @@ +{ + "xpack.ml.close_job": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/_close", + "paths": [ "/_xpack/ml/anomaly_detectors/{job_id}/_close" ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The name of the job to close" + } + }, + "params": { + "allow_no_jobs": { + "type": "boolean", + "required": false, + "description": "Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)" + }, + "force": { + "type": "boolean", + "required": false, + "description": "True if the job should be forcefully closed" + }, + "timeout": { + "type": "time", + "description": "Controls the time to wait until a job has closed. Default to 30 minutes" + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_calendar.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_calendar.json new file mode 100644 index 0000000000000..3fcfa8582e570 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_calendar.json @@ -0,0 +1,17 @@ +{ + "xpack.ml.delete_calendar": { + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/ml/calendars/{calendar_id}", + "paths": [ "/_xpack/ml/calendars/{calendar_id}" ], + "parts": { + "calendar_id": { + "type" : "string", + "required" : true, + "description" : "The ID of the calendar to delete" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_calendar_event.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_calendar_event.json new file mode 100644 index 0000000000000..d31990645076c --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_calendar_event.json @@ -0,0 +1,22 @@ +{ + "xpack.ml.delete_calendar_event": { + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/ml/calendars/{calendar_id}/events/{event_id}", + "paths": [ "/_xpack/ml/calendars/{calendar_id}/events/{event_id}" ], + "parts": { + "calendar_id": { + "type" : "string", + "required" : true, + "description" : "The ID of the calendar to modify" + }, + "event_id": { + "type": "string", + "required": true, + "description": "The ID of the event to remove from the calendar" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_calendar_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_calendar_job.json new file mode 100644 index 0000000000000..43dc1b94789b5 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_calendar_job.json @@ -0,0 +1,22 @@ +{ + "xpack.ml.delete_calendar_job": { + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/ml/calendars/{calendar_id}/jobs/{job_id}", + "paths": [ "/_xpack/ml/calendars/{calendar_id}/jobs/{job_id}" ], + "parts": { + "calendar_id": { + "type" : "string", + "required" : true, + "description" : "The ID of the calendar to modify" + }, + "job_id": { + "type": "string", + "required": true, + "description": "The ID of the job to remove from the calendar" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_datafeed.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_datafeed.json new file mode 100644 index 0000000000000..05ec6e0cd2ddb --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_datafeed.json @@ -0,0 +1,25 @@ +{ + "xpack.ml.delete_datafeed": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html", + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/ml/datafeeds/{datafeed_id}", + "paths": [ "/_xpack/ml/datafeeds/{datafeed_id}" ], + "parts": { + "datafeed_id": { + "type": "string", + "required": true, + "description": "The ID of the datafeed to delete" + } + }, + "params": { + "force": { + "type": "boolean", + "required": false, + "description": "True if the datafeed should be forcefully deleted" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_expired_data.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_expired_data.json new file mode 100644 index 0000000000000..4a4c079d52243 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_expired_data.json @@ -0,0 +1,11 @@ +{ + "xpack.ml.delete_expired_data": { + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/ml/_delete_expired_data", + "paths": [ "/_xpack/ml/_delete_expired_data" ], + "parts": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_filter.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_filter.json new file mode 100644 index 0000000000000..6c120fc243f51 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_filter.json @@ -0,0 +1,17 @@ +{ + "xpack.ml.delete_filter": { + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/ml/filters/{filter_id}", + "paths": [ "/_xpack/ml/filters/{filter_id}" ], + "parts": { + "filter_id": { + "type" : "string", + "required" : true, + "description" : "The ID of the filter to delete" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_job.json new file mode 100644 index 0000000000000..77eb89c00f92d --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_job.json @@ -0,0 +1,25 @@ +{ + "xpack.ml.delete_job": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html", + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}", + "paths": [ "/_xpack/ml/anomaly_detectors/{job_id}" ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The ID of the job to delete" + } + }, + "params": { + "force": { + "type": "boolean", + "required": false, + "description": "True if the job should be forcefully deleted" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_model_snapshot.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_model_snapshot.json new file mode 100644 index 0000000000000..4616e45737ed4 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.delete_model_snapshot.json @@ -0,0 +1,23 @@ +{ + "xpack.ml.delete_model_snapshot": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-snapshot.html", + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}", + "paths": [ "/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}" ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The ID of the job to fetch" + }, + "snapshot_id": { + "type": "string", + "required": true, + "description": "The ID of the snapshot to delete" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.flush_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.flush_job.json new file mode 100644 index 0000000000000..22dba0ed6cee6 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.flush_job.json @@ -0,0 +1,46 @@ +{ + "xpack.ml.flush_job": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html", + "methods": [ + "POST" + ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/_flush", + "paths": [ + "/_xpack/ml/anomaly_detectors/{job_id}/_flush" + ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The name of the job to flush" + } + }, + "params": { + "calc_interim": { + "type": "boolean", + "description": "Calculates interim results for the most recent bucket or all buckets within the latency period" + }, + "start": { + "type": "string", + "description": "When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results" + }, + "end": { + "type": "string", + "description": "When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results" + }, + "advance_time": { + "type": "string", + "description": "Advances time to the given value generating results and updating the model for the advanced interval" + }, + "skip_time": { + "type": "string", + "description": "Skips time to the given value without generating results or updating the model for the skipped interval" + } + } + }, + "body": { + "description": "Flush parameters" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.forecast.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.forecast.json new file mode 100644 index 0000000000000..10c3725d374af --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.forecast.json @@ -0,0 +1,29 @@ +{ + "xpack.ml.forecast": { + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/_forecast", + "paths": [ "/_xpack/ml/anomaly_detectors/{job_id}/_forecast" ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The ID of the job to forecast for" + } + }, + "params": { + "duration": { + "type": "time", + "required": false, + "description": "The duration of the forecast" + }, + "expires_in": { + "type": "time", + "required": false, + "description": "The time interval after which the forecast expires. Expired forecasts will be deleted at the first opportunity." + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_buckets.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_buckets.json new file mode 100644 index 0000000000000..b94f534884df5 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_buckets.json @@ -0,0 +1,65 @@ +{ + "xpack.ml.get_buckets": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-bucket.html", + "methods": [ "GET", "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/results/buckets/{timestamp}", + "paths": [ + "/_xpack/ml/anomaly_detectors/{job_id}/results/buckets/{timestamp}", + "/_xpack/ml/anomaly_detectors/{job_id}/results/buckets" + ], + "parts": { + "job_id": { + "type" : "string", + "required": true, + "description": "ID of the job to get bucket results from" + }, + "timestamp": { + "type" : "string", + "description" : "The timestamp of the desired single bucket result" + } + }, + "params": { + "expand": { + "type": "boolean", + "description" : "Include anomaly records" + }, + "exclude_interim": { + "type": "boolean", + "description" : "Exclude interim results" + }, + "from": { + "type": "int", + "description": "skips a number of buckets" + }, + "size": { + "type": "int", + "description": "specifies a max number of buckets to get" + }, + "start": { + "type": "string", + "description" : "Start time filter for buckets" + }, + "end": { + "type": "string", + "description" : "End time filter for buckets" + }, + "anomaly_score": { + "type": "double", + "description": "Filter for the most anomalous buckets" + }, + "sort": { + "type": "string", + "description": "Sort buckets by a particular field" + }, + "desc": { + "type": "boolean", + "description": "Set the sort direction" + } + } + }, + "body": { + "description" : "Bucket selection details if not provided in URI" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_calendar_events.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_calendar_events.json new file mode 100644 index 0000000000000..48b1bd07fd61c --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_calendar_events.json @@ -0,0 +1,41 @@ +{ + "xpack.ml.get_calendar_events": { + "methods": [ "GET" ], + "url": { + "path": "/_xpack/ml/calendars/{calendar_id}/events", + "paths": [ + "/_xpack/ml/calendars/{calendar_id}/events" + ], + "parts": { + "calendar_id": { + "type": "string", + "description": "The ID of the calendar containing the events", + "required": true + } + }, + "params": { + "job_id": { + "type": "string", + "description": "Get events for the job. When this option is used calendar_id must be '_all'" + }, + "start": { + "type": "string", + "description": "Get events after this time" + }, + "end": { + "type": "date", + "description": "Get events before this time" + }, + "from": { + "type": "int", + "description": "Skips a number of events" + }, + "size": { + "type": "int", + "description": "Specifies a max number of events to get" + } + } + }, + "body": null + } +} \ No newline at end of file diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_calendars.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_calendars.json new file mode 100644 index 0000000000000..5b252a0e89c32 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_calendars.json @@ -0,0 +1,29 @@ +{ + "xpack.ml.get_calendars": { + "methods": [ "GET", "POST" ], + "url": { + "path": "/_xpack/ml/calendars/{calendar_id}", + "paths": [ + "/_xpack/ml/calendars", + "/_xpack/ml/calendars/{calendar_id}" + ], + "parts": { + "calendar_id": { + "type": "string", + "description": "The ID of the calendar to fetch" + } + }, + "params": { + "from": { + "type": "int", + "description": "skips a number of calendars" + }, + "size": { + "type": "int", + "description": "specifies a max number of calendars to get" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_categories.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_categories.json new file mode 100644 index 0000000000000..481aba426dfe7 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_categories.json @@ -0,0 +1,37 @@ +{ + "xpack.ml.get_categories": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html", + "methods": [ "GET", "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/results/categories/{category_id}", + "paths": [ + "/_xpack/ml/anomaly_detectors/{job_id}/results/categories/{category_id}", + "/_xpack/ml/anomaly_detectors/{job_id}/results/categories/" + ], + "parts": { + "job_id": { + "type" : "string", + "required": true, + "description": "The name of the job" + }, + "category_id": { + "type" : "long", + "description" : "The identifier of the category definition of interest" + } + }, + "params": { + "from": { + "type": "int", + "description": "skips a number of categories" + }, + "size": { + "type": "int", + "description": "specifies a max number of categories to get" + } + } + }, + "body": { + "description" : "Category selection details if not provided in URI" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_datafeed_stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_datafeed_stats.json new file mode 100644 index 0000000000000..f22dfee9b1623 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_datafeed_stats.json @@ -0,0 +1,27 @@ +{ + "xpack.ml.get_datafeed_stats": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html", + "methods": [ "GET"], + "url": { + "path": "/_xpack/ml/datafeeds/{datafeed_id}/_stats", + "paths": [ + "/_xpack/ml/datafeeds/{datafeed_id}/_stats", + "/_xpack/ml/datafeeds/_stats" + ], + "parts": { + "datafeed_id": { + "type": "string", + "description": "The ID of the datafeeds stats to fetch" + } + }, + "params": { + "allow_no_datafeeds": { + "type": "boolean", + "required": false, + "description": "Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified)" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_datafeeds.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_datafeeds.json new file mode 100644 index 0000000000000..e646ef8cd03fd --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_datafeeds.json @@ -0,0 +1,27 @@ +{ + "xpack.ml.get_datafeeds": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html", + "methods": [ "GET"], + "url": { + "path": "/_xpack/ml/datafeeds/{datafeed_id}", + "paths": [ + "/_xpack/ml/datafeeds/{datafeed_id}", + "/_xpack/ml/datafeeds" + ], + "parts": { + "datafeed_id": { + "type": "string", + "description": "The ID of the datafeeds to fetch" + } + }, + "params": { + "allow_no_datafeeds": { + "type": "boolean", + "required": false, + "description": "Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified)" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_filters.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_filters.json new file mode 100644 index 0000000000000..f5dfe0c74e013 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_filters.json @@ -0,0 +1,26 @@ +{ + "xpack.ml.get_filters": { + "methods": [ "GET" ], + "url": { + "path": "/_xpack/ml/filters/{filter_id}", + "paths": [ "/_xpack/ml/filters", "/_xpack/ml/filters/{filter_id}" ], + "parts": { + "filter_id": { + "type" : "string", + "description" : "The ID of the filter to fetch" + } + }, + "params": { + "from": { + "type": "int", + "description": "skips a number of filters" + }, + "size": { + "type": "int", + "description": "specifies a max number of filters to get" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_influencers.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_influencers.json new file mode 100644 index 0000000000000..88a42a880919d --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_influencers.json @@ -0,0 +1,53 @@ +{ + "xpack.ml.get_influencers": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-influencer.html", + "methods": [ "GET", "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/results/influencers", + "paths": [ "/_xpack/ml/anomaly_detectors/{job_id}/results/influencers" ], + "parts": { + "job_id": { + "type" : "string", + "required": true + } + }, + "params": { + "exclude_interim": { + "type": "boolean", + "description" : "Exclude interim results" + }, + "from": { + "type": "int", + "description": "skips a number of influencers" + }, + "size": { + "type": "int", + "description": "specifies a max number of influencers to get" + }, + "start": { + "type": "string", + "description": "start timestamp for the requested influencers" + }, + "end": { + "type": "string", + "description": "end timestamp for the requested influencers" + }, + "influencer_score": { + "type": "double", + "description": "influencer score threshold for the requested influencers" + }, + "sort": { + "type": "string", + "description": "sort field for the requested influencers" + }, + "desc": { + "type": "boolean", + "description": "whether the results should be sorted in decending order" + } + } + }, + "body": { + "description": "Influencer selection criteria" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_job_stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_job_stats.json new file mode 100644 index 0000000000000..174de8907e963 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_job_stats.json @@ -0,0 +1,27 @@ +{ + "xpack.ml.get_job_stats": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html", + "methods": [ "GET"], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/_stats", + "paths": [ + "/_xpack/ml/anomaly_detectors/_stats", + "/_xpack/ml/anomaly_detectors/{job_id}/_stats" + ], + "parts": { + "job_id": { + "type": "string", + "description": "The ID of the jobs stats to fetch" + } + }, + "params": { + "allow_no_jobs": { + "type": "boolean", + "required": false, + "description": "Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_jobs.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_jobs.json new file mode 100644 index 0000000000000..0da7be761d068 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_jobs.json @@ -0,0 +1,27 @@ +{ + "xpack.ml.get_jobs": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html", + "methods": [ "GET"], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}", + "paths": [ + "/_xpack/ml/anomaly_detectors/{job_id}", + "/_xpack/ml/anomaly_detectors" + ], + "parts": { + "job_id": { + "type": "string", + "description": "The ID of the jobs to fetch" + } + }, + "params": { + "allow_no_jobs": { + "type": "boolean", + "required": false, + "description": "Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_model_snapshots.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_model_snapshots.json new file mode 100644 index 0000000000000..f820bf43562a0 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_model_snapshots.json @@ -0,0 +1,53 @@ +{ + "xpack.ml.get_model_snapshots": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-snapshot.html", + "methods": [ "GET", "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}", + "paths": [ + "/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}", + "/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots" + ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The ID of the job to fetch" + }, + "snapshot_id": { + "type": "string", + "description": "The ID of the snapshot to fetch" + } + }, + "params": { + "from": { + "type": "int", + "description": "Skips a number of documents" + }, + "size": { + "type": "int", + "description": "The default number of documents returned in queries as a string." + }, + "start": { + "type": "date", + "description": "The filter 'start' query parameter" + }, + "end": { + "type": "date", + "description": "The filter 'end' query parameter" + }, + "sort": { + "type": "string", + "description": "Name of the field to sort on" + }, + "desc": { + "type": "boolean", + "description": "True if the results should be sorted in descending order" + } + } + }, + "body": { + "description": "Model snapshot selection criteria" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_overall_buckets.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_overall_buckets.json new file mode 100644 index 0000000000000..eaf3b2a233f51 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_overall_buckets.json @@ -0,0 +1,52 @@ +{ + "xpack.ml.get_overall_buckets": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-overall-buckets.html", + "methods": [ "GET", "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/results/overall_buckets", + "paths": [ + "/_xpack/ml/anomaly_detectors/{job_id}/results/overall_buckets" + ], + "parts": { + "job_id": { + "type" : "string", + "required": true, + "description": "The job IDs for which to calculate overall bucket results" + } + }, + "params": { + "top_n": { + "type": "int", + "description": "The number of top job bucket scores to be used in the overall_score calculation" + }, + "bucket_span": { + "type": "string", + "description": "The span of the overall buckets. Defaults to the longest job bucket_span" + }, + "overall_score": { + "type": "double", + "description": "Returns overall buckets with overall scores higher than this value" + }, + "exclude_interim": { + "type": "boolean", + "description" : "If true overall buckets that include interim buckets will be excluded" + }, + "start": { + "type": "string", + "description" : "Returns overall buckets with timestamps after this time" + }, + "end": { + "type": "string", + "description" : "Returns overall buckets with timestamps earlier than this time" + }, + "allow_no_jobs": { + "type": "boolean", + "description": "Whether to ignore if a wildcard expression matches no jobs. (This includes `_all` string or when no jobs have been specified)" + } + } + }, + "body": { + "description" : "Overall bucket selection details if not provided in URI" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_records.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_records.json new file mode 100644 index 0000000000000..d039d9d4a4070 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.get_records.json @@ -0,0 +1,54 @@ +{ + "xpack.ml.get_records": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-record.html", + "methods": ["GET", "POST"], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/results/records", + "paths": [ + "/_xpack/ml/anomaly_detectors/{job_id}/results/records" + ], + "parts": { + "job_id": { + "type": "string", + "required": true + } + }, + "params": { + "exclude_interim": { + "type": "boolean", + "description": "Exclude interim results" + }, + "from": { + "type": "int", + "description": "skips a number of records" + }, + "size": { + "type": "int", + "description": "specifies a max number of records to get" + }, + "start": { + "type": "string", + "description": "Start time filter for records" + }, + "end": { + "type": "string", + "description": "End time filter for records" + }, + "record_score": { + "type": "double" + }, + "sort": { + "type": "string", + "description": "Sort records by a particular field" + }, + "desc": { + "type": "boolean", + "description": "Set the sort direction" + } + } + }, + "body": { + "description": "Record selection criteria" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.info.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.info.json new file mode 100644 index 0000000000000..770438251f616 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.info.json @@ -0,0 +1,11 @@ +{ + "xpack.ml.info": { + "methods": [ "GET" ], + "url": { + "path": "/_xpack/ml/info", + "paths": [ "/_xpack/ml/info" ], + "parts": {}, + "body": null + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.open_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.open_job.json new file mode 100644 index 0000000000000..4a1d8a7c63310 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.open_job.json @@ -0,0 +1,26 @@ +{ + "xpack.ml.open_job": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/_open", + "paths": [ "/_xpack/ml/anomaly_detectors/{job_id}/_open" ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The ID of the job to open" + }, + "ignore_downtime": { + "type": "boolean", + "description": "Controls if gaps in data are treated as anomalous or as a maintenance window after a job re-start" + }, + "timeout": { + "type": "time", + "description": "Controls the time to wait until a job has opened. Default to 30 minutes" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.post_calendar_events.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.post_calendar_events.json new file mode 100644 index 0000000000000..71ed167a7367e --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.post_calendar_events.json @@ -0,0 +1,20 @@ +{ + "xpack.ml.post_calendar_events": { + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/calendars/{calendar_id}/events", + "paths": [ "/_xpack/ml/calendars/{calendar_id}/events" ], + "parts": { + "calendar_id": { + "type": "string", + "required": true, + "description": "The ID of the calendar to modify" + } + } + }, + "body": { + "description" : "A list of events", + "required" : true + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.post_data.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.post_data.json new file mode 100644 index 0000000000000..b25af68b268c6 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.post_data.json @@ -0,0 +1,32 @@ +{ + "xpack.ml.post_data": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-data.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/_data", + "paths": [ "/_xpack/ml/anomaly_detectors/{job_id}/_data" ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The name of the job receiving the data" + } + }, + "params": { + "reset_start": { + "type": "string", + "description": "Optional parameter to specify the start of the bucket resetting range" + }, + "reset_end": { + "type": "string", + "description": "Optional parameter to specify the end of the bucket resetting range" + } + } + }, + "body": { + "description" : "The data to process", + "required" : true, + "serialize" : "bulk" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.preview_datafeed.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.preview_datafeed.json new file mode 100644 index 0000000000000..66e201e630ae2 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.preview_datafeed.json @@ -0,0 +1,18 @@ +{ + "xpack.ml.preview_datafeed": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html", + "methods": [ "GET" ], + "url": { + "path": "/_xpack/ml/datafeeds/{datafeed_id}/_preview", + "paths": [ "/_xpack/ml/datafeeds/{datafeed_id}/_preview" ], + "parts": { + "datafeed_id": { + "type": "string", + "required": true, + "description": "The ID of the datafeed to preview" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_calendar.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_calendar.json new file mode 100644 index 0000000000000..d762ad2931571 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_calendar.json @@ -0,0 +1,20 @@ +{ + "xpack.ml.put_calendar": { + "methods": [ "PUT" ], + "url": { + "path": "/_xpack/ml/calendars/{calendar_id}", + "paths": [ "/_xpack/ml/calendars/{calendar_id}" ], + "parts": { + "calendar_id": { + "type": "string", + "required": true, + "description": "The ID of the calendar to create" + } + } + }, + "body": { + "description" : "The calendar details", + "required" : false + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_calendar_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_calendar_job.json new file mode 100644 index 0000000000000..2abf870058c7e --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_calendar_job.json @@ -0,0 +1,22 @@ +{ + "xpack.ml.put_calendar_job": { + "methods": [ "PUT" ], + "url": { + "path": "/_xpack/ml/calendars/{calendar_id}/jobs/{job_id}", + "paths": [ "/_xpack/ml/calendars/{calendar_id}/jobs/{job_id}" ], + "parts": { + "calendar_id": { + "type": "string", + "required": true, + "description": "The ID of the calendar to modify" + }, + "job_id": { + "type": "string", + "required": true, + "description": "The ID of the job to add to the calendar" + } + } + }, + "body": null + } +} \ No newline at end of file diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_datafeed.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_datafeed.json new file mode 100644 index 0000000000000..0f7e4de21c7d2 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_datafeed.json @@ -0,0 +1,21 @@ +{ + "xpack.ml.put_datafeed": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html", + "methods": [ "PUT" ], + "url": { + "path": "/_xpack/ml/datafeeds/{datafeed_id}", + "paths": [ "/_xpack/ml/datafeeds/{datafeed_id}" ], + "parts": { + "datafeed_id": { + "type": "string", + "required": true, + "description": "The ID of the datafeed to create" + } + } + }, + "body": { + "description" : "The datafeed config", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_filter.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_filter.json new file mode 100644 index 0000000000000..36c5f0582cbd8 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_filter.json @@ -0,0 +1,20 @@ +{ + "xpack.ml.put_filter": { + "methods": [ "PUT" ], + "url": { + "path": "/_xpack/ml/filters/{filter_id}", + "paths": [ "/_xpack/ml/filters/{filter_id}" ], + "parts": { + "filter_id": { + "type": "string", + "required": true, + "description": "The ID of the filter to create" + } + } + }, + "body": { + "description" : "The filter details", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_job.json new file mode 100644 index 0000000000000..f7b292cdafc32 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.put_job.json @@ -0,0 +1,21 @@ +{ + "xpack.ml.put_job": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html", + "methods": [ "PUT" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}", + "paths": [ "/_xpack/ml/anomaly_detectors/{job_id}" ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The ID of the job to create" + } + } + }, + "body": { + "description" : "The job", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.revert_model_snapshot.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.revert_model_snapshot.json new file mode 100644 index 0000000000000..6f7810aff7f55 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.revert_model_snapshot.json @@ -0,0 +1,31 @@ +{ + "xpack.ml.revert_model_snapshot": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-revert-snapshot.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}/_revert", + "paths": [ "/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}/_revert" ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The ID of the job to fetch" + }, + "snapshot_id": { + "type": "string", + "required": true, + "description": "The ID of the snapshot to revert to" + } + }, + "params": { + "delete_intervening_results": { + "type": "boolean", + "description": "Should we reset the results back to the time of the snapshot?" + } + } + }, + "body": { + "description": "Reversion options" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.start_datafeed.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.start_datafeed.json new file mode 100644 index 0000000000000..1018cb224363b --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.start_datafeed.json @@ -0,0 +1,37 @@ +{ + "xpack.ml.start_datafeed": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/datafeeds/{datafeed_id}/_start", + "paths": [ "/_xpack/ml/datafeeds/{datafeed_id}/_start" ], + "parts": { + "datafeed_id": { + "type": "string", + "required": true, + "description": "The ID of the datafeed to start" + } + }, + "params": { + "start": { + "type": "string", + "required": false, + "description": "The start time from where the datafeed should begin" + }, + "end": { + "type": "string", + "required": false, + "description": "The end time when the datafeed should stop. When not set, the datafeed continues in real time" + }, + "timeout": { + "type": "time", + "required": false, + "description": "Controls the time to wait until a datafeed has started. Default to 20 seconds" + } + } + }, + "body": { + "description": "The start datafeed parameters" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.stop_datafeed.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.stop_datafeed.json new file mode 100644 index 0000000000000..0876e3019ab42 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.stop_datafeed.json @@ -0,0 +1,39 @@ +{ + "xpack.ml.stop_datafeed": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html", + "methods": [ + "POST" + ], + "url": { + "path": "/_xpack/ml/datafeeds/{datafeed_id}/_stop", + "paths": [ + "/_xpack/ml/datafeeds/{datafeed_id}/_stop" + ], + "parts": { + "datafeed_id": { + "type": "string", + "required": true, + "description": "The ID of the datafeed to stop" + } + }, + "params": { + "allow_no_datafeeds": { + "type": "boolean", + "required": false, + "description": "Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified)" + }, + "force": { + "type": "boolean", + "required": false, + "description": "True if the datafeed should be forcefully stopped." + }, + "timeout": { + "type": "time", + "required": false, + "description": "Controls the time to wait until a datafeed has stopped. Default to 20 seconds" + } + }, + "body": null + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_datafeed.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_datafeed.json new file mode 100644 index 0000000000000..d089012325a46 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_datafeed.json @@ -0,0 +1,21 @@ +{ + "xpack.ml.update_datafeed": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/datafeeds/{datafeed_id}/_update", + "paths": [ "/_xpack/ml/datafeeds/{datafeed_id}/_update" ], + "parts": { + "datafeed_id": { + "type": "string", + "required": true, + "description": "The ID of the datafeed to update" + } + } + }, + "body": { + "description" : "The datafeed update settings", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_job.json new file mode 100644 index 0000000000000..d59e5811364b6 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_job.json @@ -0,0 +1,21 @@ +{ + "xpack.ml.update_job": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/_update", + "paths": [ "/_xpack/ml/anomaly_detectors/{job_id}/_update" ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The ID of the job to create" + } + } + }, + "body": { + "description" : "The job update settings", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_model_snapshot.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_model_snapshot.json new file mode 100644 index 0000000000000..5c62e0d3b8a2a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.update_model_snapshot.json @@ -0,0 +1,27 @@ +{ + "xpack.ml.update_model_snapshot": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-snapshot.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}/_update", + "paths": [ "/_xpack/ml/anomaly_detectors/{job_id}/model_snapshots/{snapshot_id}/_update" ], + "parts": { + "job_id": { + "type": "string", + "required": true, + "description": "The ID of the job to fetch" + }, + "snapshot_id": { + "type": "string", + "required": true, + "description": "The ID of the snapshot to update" + } + }, + "params": {} + }, + "body": { + "description" : "The model snapshot properties to update", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.validate.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.validate.json new file mode 100644 index 0000000000000..bee1f20b2f267 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.validate.json @@ -0,0 +1,14 @@ +{ + "xpack.ml.validate": { + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/_validate", + "paths": [ "/_xpack/ml/anomaly_detectors/_validate" ], + "params": {} + }, + "body": { + "description" : "The job config", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.validate_detector.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.validate_detector.json new file mode 100644 index 0000000000000..587bc3fb51a2c --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ml.validate_detector.json @@ -0,0 +1,14 @@ +{ + "xpack.ml.validate_detector": { + "methods": [ "POST" ], + "url": { + "path": "/_xpack/ml/anomaly_detectors/_validate/detector", + "paths": [ "/_xpack/ml/anomaly_detectors/_validate/detector" ], + "params": {} + }, + "body": { + "description" : "The detector", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.monitoring.bulk.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.monitoring.bulk.json new file mode 100644 index 0000000000000..71f1b1fc13bf7 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.monitoring.bulk.json @@ -0,0 +1,35 @@ +{ + "xpack.monitoring.bulk": { + "documentation": "http://www.elastic.co/guide/en/monitoring/current/appendix-api-bulk.html", + "methods": ["POST", "PUT"], + "url": { + "path": "/_xpack/monitoring/_bulk", + "paths": ["/_xpack/monitoring/_bulk", "/_xpack/monitoring/{type}/_bulk"], + "parts": { + "type": { + "type" : "string", + "description" : "Default document type for items which don't provide one" + } + }, + "params": { + "system_id": { + "type": "string", + "description" : "Identifier of the monitored system" + }, + "system_api_version" : { + "type" : "string", + "description" : "API Version of the monitored system" + }, + "interval": { + "type" : "string", + "description" : "Collection interval (e.g., '10s' or '10000ms') of the payload" + } + } + }, + "body": { + "description" : "The operation definition and data (action-data pairs), separated by newlines", + "required" : true, + "serialize" : "bulk" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.delete_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.delete_job.json new file mode 100644 index 0000000000000..21bdb5087a785 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.delete_job.json @@ -0,0 +1,17 @@ +{ + "xpack.rollup.delete_job": { + "documentation": "", + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/rollup/job/{id}", + "paths": [ "/_xpack/rollup/job/{id}" ], + "parts": { + "id": { + "type": "string", + "required": true, + "description": "The ID of the job to delete" + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_jobs.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_jobs.json new file mode 100644 index 0000000000000..7ea3c1e16062a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_jobs.json @@ -0,0 +1,17 @@ +{ + "xpack.rollup.get_jobs": { + "documentation": "", + "methods": [ "GET" ], + "url": { + "path": "/_xpack/rollup/job/{id}", + "paths": [ "/_xpack/rollup/job/{id}", "/_xpack/rollup/job/" ], + "parts": { + "id": { + "type": "string", + "required": false, + "description": "The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs" + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_caps.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_caps.json new file mode 100644 index 0000000000000..28edd044c3cfe --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.get_rollup_caps.json @@ -0,0 +1,17 @@ +{ + "xpack.rollup.get_rollup_caps": { + "documentation": "", + "methods": [ "GET" ], + "url": { + "path": "/_xpack/rollup/data/{id}", + "paths": [ "/_xpack/rollup/data/{id}", "/_xpack/rollup/data/" ], + "parts": { + "id": { + "type": "string", + "required": false, + "description": "The ID of the index to check rollup capabilities on, or left blank for all jobs" + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.put_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.put_job.json new file mode 100644 index 0000000000000..57b2a062c0a6e --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.put_job.json @@ -0,0 +1,21 @@ +{ + "xpack.rollup.put_job": { + "documentation": "", + "methods": [ "PUT" ], + "url": { + "path": "/_xpack/rollup/job/{id}", + "paths": [ "/_xpack/rollup/job/{id}" ], + "parts": { + "id": { + "type": "string", + "required": true, + "description": "The ID of the job to create" + } + } + }, + "body": { + "description" : "The job configuration", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.rollup_search.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.rollup_search.json new file mode 100644 index 0000000000000..bc535784fc43f --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.rollup_search.json @@ -0,0 +1,26 @@ +{ + "xpack.rollup.rollup_search": { + "documentation": "", + "methods": [ "GET", "POST" ], + "url": { + "path": "/{index}/_rollup_search", + "paths": [ "{index}/_rollup_search", "{index}/{type}/_rollup_search" ], + "parts": { + "index": { + "type": "string", + "required": true, + "description": "The index or index-pattern (containing rollup or regular data) that should be searched" + }, + "type": { + "type": "string", + "required": false, + "description": "The doc type inside the index" + } + } + }, + "body": { + "description" : "The search request body", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.start_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.start_job.json new file mode 100644 index 0000000000000..db5feed680b2a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.start_job.json @@ -0,0 +1,17 @@ +{ + "xpack.rollup.start_job": { + "documentation": "", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/rollup/job/{id}/_start", + "paths": [ "/_xpack/rollup/job/{id}/_start" ], + "parts": { + "id": { + "type": "string", + "required": true, + "description": "The ID of the job to start" + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.stop_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.stop_job.json new file mode 100644 index 0000000000000..39c3fd4a11359 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.rollup.stop_job.json @@ -0,0 +1,17 @@ +{ + "xpack.rollup.stop_job": { + "documentation": "", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/rollup/job/{id}/_stop", + "paths": [ "/_xpack/rollup/job/{id}/_stop" ], + "parts": { + "id": { + "type": "string", + "required": true, + "description": "The ID of the job to stop" + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.authenticate.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.authenticate.json new file mode 100644 index 0000000000000..650f89e89a461 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.authenticate.json @@ -0,0 +1,13 @@ +{ + "xpack.security.authenticate": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html", + "methods": [ "GET" ], + "url": { + "path": "/_xpack/security/_authenticate", + "paths": [ "/_xpack/security/_authenticate" ], + "parts": {}, + "params": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.change_password.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.change_password.json new file mode 100644 index 0000000000000..7cb4277ee5083 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.change_password.json @@ -0,0 +1,28 @@ +{ + "xpack.security.change_password": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-change-password.html", + "methods": [ "PUT", "POST" ], + "url": { + "path": "/_xpack/security/user/{username}/_password", + "paths": [ "/_xpack/security/user/{username}/_password", "/_xpack/security/user/_password" ], + "parts": { + "username": { + "type" : "string", + "description" : "The username of the user to change the password for", + "required" : false + } + }, + "params": { + "refresh": { + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." + } + } + }, + "body": { + "description" : "the new password for the user", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_realms.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_realms.json new file mode 100644 index 0000000000000..059441d654eff --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_realms.json @@ -0,0 +1,25 @@ +{ + "xpack.security.clear_cached_realms": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-cache.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/security/realm/{realms}/_clear_cache", + "paths": [ "/_xpack/security/realm/{realms}/_clear_cache" ], + "parts": { + "realms": { + "type" : "list", + "description" : "Comma-separated list of realms to clear", + "required" : true + } + }, + "params": { + "usernames": { + "type" : "list", + "description" : "Comma-separated list of usernames to clear from the cache", + "required" : false + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_roles.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_roles.json new file mode 100644 index 0000000000000..c94333325b127 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.clear_cached_roles.json @@ -0,0 +1,19 @@ +{ + "xpack.security.clear_cached_roles": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-roles.html#security-api-clear-role-cache", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/security/role/{name}/_clear_cache", + "paths": [ "/_xpack/security/role/{name}/_clear_cache" ], + "parts": { + "name": { + "type" : "list", + "description" : "Role name", + "required" : true + } + }, + "params": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role.json new file mode 100644 index 0000000000000..4351b1bc847a1 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role.json @@ -0,0 +1,25 @@ +{ + "xpack.security.delete_role": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-roles.html#security-api-delete-role", + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/security/role/{name}", + "paths": [ "/_xpack/security/role/{name}" ], + "parts": { + "name": { + "type" : "string", + "description" : "Role name", + "required" : true + } + }, + "params": { + "refresh": { + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json new file mode 100644 index 0000000000000..26c72666e8fa4 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_role_mapping.json @@ -0,0 +1,25 @@ +{ + "xpack.security.delete_role_mapping": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-role-mapping.html#security-api-delete-role-mapping", + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/security/role_mapping/{name}", + "paths": [ "/_xpack/security/role_mapping/{name}" ], + "parts": { + "name": { + "type" : "string", + "description" : "Role-mapping name", + "required" : true + } + }, + "params": { + "refresh": { + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_user.json new file mode 100644 index 0000000000000..d72c854a69dcb --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.delete_user.json @@ -0,0 +1,25 @@ +{ + "xpack.security.delete_user": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-delete-user", + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/security/user/{username}", + "paths": [ "/_xpack/security/user/{username}" ], + "parts": { + "username": { + "type" : "string", + "description" : "username", + "required" : true + } + }, + "params": { + "refresh": { + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.disable_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.disable_user.json new file mode 100644 index 0000000000000..3a72b3141911f --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.disable_user.json @@ -0,0 +1,25 @@ +{ + "xpack.security.disable_user": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-disable-user", + "methods": [ "PUT", "POST" ], + "url": { + "path": "/_xpack/security/user/{username}/_disable", + "paths": [ "/_xpack/security/user/{username}/_disable" ], + "parts": { + "username": { + "type" : "string", + "description" : "The username of the user to disable", + "required" : false + } + }, + "params": { + "refresh": { + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.enable_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.enable_user.json new file mode 100644 index 0000000000000..c68144957f07d --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.enable_user.json @@ -0,0 +1,25 @@ +{ + "xpack.security.enable_user": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-enable-user", + "methods": [ "PUT", "POST" ], + "url": { + "path": "/_xpack/security/user/{username}/_enable", + "paths": [ "/_xpack/security/user/{username}/_enable" ], + "parts": { + "username": { + "type" : "string", + "description" : "The username of the user to enable", + "required" : false + } + }, + "params": { + "refresh": { + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role.json new file mode 100644 index 0000000000000..3479c911ccdce --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role.json @@ -0,0 +1,19 @@ +{ + "xpack.security.get_role": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-roles.html#security-api-get-role", + "methods": [ "GET" ], + "url": { + "path": "/_xpack/security/role/{name}", + "paths": [ "/_xpack/security/role/{name}", "/_xpack/security/role" ], + "parts": { + "name": { + "type" : "string", + "description" : "Role name", + "required" : false + } + }, + "params": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json new file mode 100644 index 0000000000000..0bdeb54cfb678 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_role_mapping.json @@ -0,0 +1,19 @@ +{ + "xpack.security.get_role_mapping": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-role-mapping.html#security-api-get-role-mapping", + "methods": [ "GET" ], + "url": { + "path": "/_xpack/security/role_mapping/{name}", + "paths": [ "/_xpack/security/role_mapping/{name}", "/_xpack/security/role_mapping" ], + "parts": { + "name": { + "type" : "string", + "description" : "Role-Mapping name", + "required" : false + } + }, + "params": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_token.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_token.json new file mode 100644 index 0000000000000..8020d1ecd6d97 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_token.json @@ -0,0 +1,16 @@ +{ + "xpack.security.get_token": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-tokens.html#security-api-get-token", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/security/oauth2/token", + "paths": [ "/_xpack/security/oauth2/token" ], + "parts": {}, + "params": {} + }, + "body": { + "description" : "The token request to get", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_user.json new file mode 100644 index 0000000000000..910fb7d064582 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.get_user.json @@ -0,0 +1,19 @@ +{ + "xpack.security.get_user": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-get-user", + "methods": [ "GET" ], + "url": { + "path": "/_xpack/security/user/{username}", + "paths": [ "/_xpack/security/user/{username}", "/_xpack/security/user" ], + "parts": { + "username": { + "type" : "list", + "description" : "A comma-separated list of usernames", + "required" : false + } + }, + "params": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.invalidate_token.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.invalidate_token.json new file mode 100644 index 0000000000000..be032c2ffd020 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.invalidate_token.json @@ -0,0 +1,16 @@ +{ + "xpack.security.invalidate_token": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-tokens.html#security-api-invalidate-token", + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/security/oauth2/token", + "paths": [ "/_xpack/security/oauth2/token" ], + "parts": {}, + "params": {} + }, + "body": { + "description" : "The token to invalidate", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role.json new file mode 100644 index 0000000000000..4152975189e24 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role.json @@ -0,0 +1,28 @@ +{ + "xpack.security.put_role": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-roles.html#security-api-put-role", + "methods": [ "PUT", "POST" ], + "url": { + "path": "/_xpack/security/role/{name}", + "paths": [ "/_xpack/security/role/{name}" ], + "parts": { + "name": { + "type" : "string", + "description" : "Role name", + "required" : true + } + }, + "params": { + "refresh": { + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." + } + } + }, + "body": { + "description" : "The role to add", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json new file mode 100644 index 0000000000000..3f92cd130bab4 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_role_mapping.json @@ -0,0 +1,28 @@ +{ + "xpack.security.put_role_mapping": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-role-mapping.html#security-api-put-role-mapping", + "methods": [ "PUT", "POST" ], + "url": { + "path": "/_xpack/security/role_mapping/{name}", + "paths": [ "/_xpack/security/role_mapping/{name}" ], + "parts": { + "name": { + "type" : "string", + "description" : "Role-mapping name", + "required" : true + } + }, + "params": { + "refresh": { + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." + } + } + }, + "body": { + "description" : "The role to add", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_user.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_user.json new file mode 100644 index 0000000000000..de07498a40954 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.security.put_user.json @@ -0,0 +1,28 @@ +{ + "xpack.security.put_user": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-put-user", + "methods": [ "PUT", "POST" ], + "url": { + "path": "/_xpack/security/user/{username}", + "paths": [ "/_xpack/security/user/{username}" ], + "parts": { + "username": { + "type" : "string", + "description" : "The username of the User", + "required" : true + } + }, + "params": { + "refresh": { + "type" : "enum", + "options": ["true", "false", "wait_for"], + "description" : "If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes." + } + } + }, + "body": { + "description" : "The user to add", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.clear_cursor.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.clear_cursor.json new file mode 100644 index 0000000000000..d82e499c70183 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.clear_cursor.json @@ -0,0 +1,15 @@ +{ + "xpack.sql.clear_cursor": { + "documentation": "Clear SQL cursor", + "methods": [ "POST"], + "url": { + "path": "/_xpack/sql/close", + "paths": [ "/_xpack/sql/close" ], + "parts": {} + }, + "body": { + "description" : "Specify the cursor value in the `cursor` element to clean the cursor.", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.query.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.query.json new file mode 100644 index 0000000000000..60bbcda8cad36 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.query.json @@ -0,0 +1,21 @@ +{ + "xpack.sql.query": { + "documentation": "Execute SQL", + "methods": [ "POST", "GET" ], + "url": { + "path": "/_xpack/sql", + "paths": [ "/_xpack/sql" ], + "parts": {}, + "params": { + "format": { + "type" : "string", + "description" : "a short version of the Accept header, e.g. json, yaml" + } + } + }, + "body": { + "description" : "Use the `query` element to start a query. Use the `cursor` element to continue a query.", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.translate.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.translate.json new file mode 100644 index 0000000000000..9b854665a71d0 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.sql.translate.json @@ -0,0 +1,16 @@ +{ + "xpack.sql.translate": { + "documentation": "Translate SQL into Elasticsearch queries", + "methods": [ "POST", "GET" ], + "url": { + "path": "/_xpack/sql/translate", + "paths": [ "/_xpack/sql/translate" ], + "parts": {}, + "params": {} + }, + "body": { + "description" : "Specify the query in the `query` element.", + "required" : true + } + } + } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ssl.certificates.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ssl.certificates.json new file mode 100644 index 0000000000000..b9ad98b172bae --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.ssl.certificates.json @@ -0,0 +1,13 @@ +{ + "xpack.ssl.certificates": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-ssl.html", + "methods": [ "GET" ], + "url": { + "path": "/_xpack/ssl/certificates", + "paths": [ "/_xpack/ssl/certificates" ], + "parts": {}, + "params": {} + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.usage.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.usage.json new file mode 100644 index 0000000000000..8476c1f06d61b --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.usage.json @@ -0,0 +1,18 @@ +{ + "xpack.usage": { + "documentation": "Retrieve information about xpack features usage", + "methods": [ "GET" ], + "url": { + "path": "/_xpack/usage", + "paths": [ "/_xpack/usage" ], + "parts": {}, + "params": { + "master_timeout": { + "type": "time", + "description": "Specify timeout for watch write operation" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.ack_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.ack_watch.json new file mode 100644 index 0000000000000..e48f6a820f840 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.ack_watch.json @@ -0,0 +1,22 @@ +{ + "xpack.watcher.ack_watch": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html", + "methods": [ "PUT", "POST" ], + "url": { + "path": "/_xpack/watcher/watch/{watch_id}/_ack", + "paths": [ "/_xpack/watcher/watch/{watch_id}/_ack", "/_xpack/watcher/watch/{watch_id}/_ack/{action_id}"], + "parts": { + "watch_id": { + "type" : "string", + "description" : "Watch ID", + "required" : true + }, + "action_id": { + "type" : "list", + "description" : "A comma-separated list of the action ids to be acked" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.activate_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.activate_watch.json new file mode 100644 index 0000000000000..bceb74b5745de --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.activate_watch.json @@ -0,0 +1,18 @@ +{ + "xpack.watcher.activate_watch": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-activate-watch.html", + "methods": [ "PUT", "POST" ], + "url": { + "path": "/_xpack/watcher/watch/{watch_id}/_activate", + "paths": [ "/_xpack/watcher/watch/{watch_id}/_activate" ], + "parts": { + "watch_id": { + "type" : "string", + "description" : "Watch ID", + "required" : true + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.deactivate_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.deactivate_watch.json new file mode 100644 index 0000000000000..dff58c3702928 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.deactivate_watch.json @@ -0,0 +1,18 @@ +{ + "xpack.watcher.deactivate_watch": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-deactivate-watch.html", + "methods": [ "PUT", "POST" ], + "url": { + "path": "/_xpack/watcher/watch/{watch_id}/_deactivate", + "paths": [ "/_xpack/watcher/watch/{watch_id}/_deactivate" ], + "parts": { + "watch_id": { + "type" : "string", + "description" : "Watch ID", + "required" : true + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.delete_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.delete_watch.json new file mode 100644 index 0000000000000..ff657fd9ee929 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.delete_watch.json @@ -0,0 +1,19 @@ +{ + "xpack.watcher.delete_watch": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html", + + "methods": [ "DELETE" ], + "url": { + "path": "/_xpack/watcher/watch/{id}", + "paths": [ "/_xpack/watcher/watch/{id}" ], + "parts": { + "id": { + "type" : "string", + "description" : "Watch ID", + "required" : true + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.execute_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.execute_watch.json new file mode 100644 index 0000000000000..109124027f455 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.execute_watch.json @@ -0,0 +1,27 @@ +{ + "xpack.watcher.execute_watch": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html", + "methods": [ "PUT", "POST" ], + "url": { + "path": "/_xpack/watcher/watch/{id}/_execute", + "paths": [ "/_xpack/watcher/watch/{id}/_execute", "/_xpack/watcher/watch/_execute" ], + "parts": { + "id": { + "type" : "string", + "description" : "Watch ID" + } + }, + "params": { + "debug" : { + "type" : "boolean", + "description" : "indicates whether the watch should execute in debug mode", + "required" : false + } + } + }, + "body": { + "description" : "Execution control", + "required" : false + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.get_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.get_watch.json new file mode 100644 index 0000000000000..071421919b7cc --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.get_watch.json @@ -0,0 +1,20 @@ +{ + "xpack.watcher.get_watch": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-get-watch.html", + "methods": [ "GET" ], + "url": { + "path": "/_xpack/watcher/watch/{id}", + "paths": [ "/_xpack/watcher/watch/{id}" ], + "parts": { + "id": { + "type" : "string", + "description" : "Watch ID", + "required" : true + } + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.put_watch.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.put_watch.json new file mode 100644 index 0000000000000..27007bbfe5741 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.put_watch.json @@ -0,0 +1,31 @@ +{ + "xpack.watcher.put_watch": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-put-watch.html", + "methods": [ "PUT", "POST" ], + "url": { + "path": "/_xpack/watcher/watch/{id}", + "paths": [ "/_xpack/watcher/watch/{id}" ], + "parts": { + "id": { + "type" : "string", + "description" : "Watch ID", + "required" : true + } + }, + "params": { + "active": { + "type": "boolean", + "description": "Specify whether the watch is in/active by default" + }, + "version" : { + "type" : "number", + "description" : "Explicit version number for concurrency control" + } + } + }, + "body": { + "description" : "The watch", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.start.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.start.json new file mode 100644 index 0000000000000..18379efb37938 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.start.json @@ -0,0 +1,15 @@ +{ + "xpack.watcher.start": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-start.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/watcher/_start", + "paths": [ "/_xpack/watcher/_start" ], + "parts": { + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json new file mode 100644 index 0000000000000..40eda835a4bc8 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stats.json @@ -0,0 +1,30 @@ +{ + "xpack.watcher.stats": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stats.html", + "methods": [ "GET" ], + "url": { + "path": "/_xpack/watcher/stats", + "paths": [ "/_xpack/watcher/stats", "/_xpack/watcher/stats/{metric}" ], + "parts": { + "metric": { + "type" : "enum", + "options" : ["_all", "queued_watches", "pending_watches"], + "description" : "Controls what additional stat metrics should be include in the response" + } + }, + "params": { + "metric": { + "type" : "enum", + "options" : ["_all", "queued_watches", "pending_watches"], + "description" : "Controls what additional stat metrics should be include in the response" + }, + "emit_stacktraces": { + "type" : "boolean", + "description" : "Emits stack traces of currently running watches", + "required" : false + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stop.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stop.json new file mode 100644 index 0000000000000..05881bebe1322 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.watcher.stop.json @@ -0,0 +1,15 @@ +{ + "xpack.watcher.stop": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-stop.html", + "methods": [ "POST" ], + "url": { + "path": "/_xpack/watcher/_stop", + "paths": [ "/_xpack/watcher/_stop" ], + "parts": { + }, + "params": { + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/authenticate/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/authenticate/10_basic.yml new file mode 100644 index 0000000000000..103bfe55c3078 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/authenticate/10_basic.yml @@ -0,0 +1,37 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_user: + username: "authenticate_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "superuser" ], + "full_name" : "Authenticate User" + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "authenticate_user" + ignore: 404 + +--- +"Test authenticate api": + + - do: + headers: + Authorization: "Basic YXV0aGVudGljYXRlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" + xpack.security.authenticate: {} + + - match: { username: "authenticate_user" } + - match: { roles.0: "superuser" } + - match: { full_name: "Authenticate User" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/authenticate/10_field_level_security.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/authenticate/10_field_level_security.yml new file mode 100644 index 0000000000000..acb2daf3ae97e --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/authenticate/10_field_level_security.yml @@ -0,0 +1,181 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_role: + name: "readall" + body: > + { + "indices": [ + { + "names": ["*"], + "privileges": ["read"] + } + ] + } + + - do: + xpack.security.put_role: + name: "limitread" + body: > + { + "indices": [ + { + "names": ["*"], + "privileges": ["read"], + "query": {"match": {"marker": "test_1"}} + } + ] + } + + - do: + xpack.security.put_user: + username: "full" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "readall" ], + "full_name" : "user who can read all data" + } + + - do: + xpack.security.put_user: + username: "limited" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "limitread" ], + "full_name" : "user who can read some data" + } +--- +teardown: + - do: + xpack.security.delete_user: + username: "full" + ignore: 404 + + - do: + xpack.security.delete_user: + username: "limited" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "readall" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "limitread" + ignore: 404 + +--- +"Test doc level security against alias with different users": + + - do: + indices.create: + index: test_index + body: + aliases: + the_alias : {} + mappings: + doc: + properties: + location: + properties: + city: + type: "keyword" + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test_index", "_type": "doc"}}' + - '{"marker": "test_1", "location.city": "bos"}' + - '{"index": {"_index": "test_index", "_type": "doc"}}' + - '{"marker": "test_2", "location.city": "ams"}' + + - do: + headers: { Authorization: "Basic ZnVsbDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # full - user + search: + index: the_alias + size: 0 + from: 0 + body: + aggs: + cities: + terms: + field: location.city + + - match: { _shards.total: 1 } + - match: { hits.total: 2 } + - length: { aggregations.cities.buckets: 2 } + - match: { aggregations.cities.buckets.0.key: "ams" } + - match: { aggregations.cities.buckets.0.doc_count: 1 } + - match: { aggregations.cities.buckets.1.key: "bos" } + - match: { aggregations.cities.buckets.1.doc_count: 1 } + + - do: + headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user + search: + index: the_alias + size: 0 + from: 0 + body: + aggs: + cities: + terms: + field: location.city + + - match: { _shards.total: 1 } + - match: { hits.total: 1 } + - length: { aggregations.cities.buckets: 1 } + - match: { aggregations.cities.buckets.0.key: "bos" } + - match: { aggregations.cities.buckets.0.doc_count: 1 } + + - do: + headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user + search: + index: the_* + size: 0 + from: 0 + body: + aggs: + cities: + terms: + field: location.city + + - match: { _shards.total: 1 } + - match: { hits.total: 1 } + - length: { aggregations.cities.buckets: 1 } + - match: { aggregations.cities.buckets.0.key: "bos" } + - match: { aggregations.cities.buckets.0.doc_count: 1 } + + - do: + headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user + search: + index: test_* + size: 0 + from: 0 + body: + aggs: + cities: + terms: + field: location.city + + - match: { _shards.total: 1 } + - match: { hits.total: 1 } + - length: { aggregations.cities.buckets: 1 } + - match: { aggregations.cities.buckets.0.key: "bos" } + - match: { aggregations.cities.buckets.0.doc_count: 1 } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/change_password/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/change_password/10_basic.yml new file mode 100644 index 0000000000000..562a56f41547b --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/change_password/10_basic.yml @@ -0,0 +1,140 @@ +--- +setup: + - skip: + features: headers + - do: + cluster.health: + wait_for_status: yellow + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "superuser" ] + } + - do: + xpack.security.put_role: + name: "user" + body: > + { + "cluster": ["monitor"], + "indices": [ + { + "names": "*", + "privileges": ["all"] + } + ] + } + - do: + xpack.security.put_user: + username: "unprivileged_user" + body: > + { + "password": "s3krit", + "roles" : [ "user" ] + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + - do: + xpack.security.delete_role: + name: "user" + ignore: 404 + - do: + xpack.security.delete_user: + username: "unprivileged_user" + ignore: 404 + +--- +"Test changing users password": + - skip: + features: catch_unauthorized + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + +# change password + - do: + xpack.security.change_password: + username: "joe" + body: > + { + "password" : "s3krit2" + } + +# attempt to login with invalid credentials + - do: + catch: unauthorized + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + +# login with new credentials + - do: + headers: + Authorization: "Basic am9lOnMza3JpdDI=" + cluster.health: {} + - match: { timed_out: false } + +--- +"Test user changing their own password": + - skip: + features: catch_unauthorized +# test that the role actually works + - do: + headers: + Authorization: "Basic dW5wcml2aWxlZ2VkX3VzZXI6czNrcml0" + cluster.health: {} + - match: { timed_out: false } + +# change password as the current user. the user role only grants the ability to change their own password + - do: + headers: + Authorization: "Basic dW5wcml2aWxlZ2VkX3VzZXI6czNrcml0" + xpack.security.change_password: + body: > + { + "password" : "s3krit2" + } + +# attempt to login with invalid credentials + - do: + catch: unauthorized + headers: + Authorization: "Basic dW5wcml2aWxlZ2VkX3VzZXI6czNrcml0" + cluster.health: {} + +# login with new credentials + - do: + headers: + Authorization: "Basic dW5wcml2aWxlZ2VkX3VzZXI6czNrcml0Mg==" + cluster.health: {} + - match: { timed_out: false } + +--- +"Test unauthorized user changing anothers password": +# test that the role actually works + - do: + headers: + Authorization: "Basic dW5wcml2aWxlZ2VkX3VzZXI6czNrcml0" + cluster.health: {} + - match: { timed_out: false } + +# attempt to change another users password + - do: + headers: + Authorization: "Basic dW5wcml2aWxlZ2VkX3VzZXI6czNrcml0" + catch: forbidden + xpack.security.change_password: + username: "anotheruser" + body: > + { + "password" : "s3krit2" + } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/deprecation/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/deprecation/10_basic.yml new file mode 100644 index 0000000000000..dad0c3b08eb57 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/deprecation/10_basic.yml @@ -0,0 +1,15 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +"Test Deprecations": + - do: + xpack.migration.deprecations: + index: "*" + - length: { cluster_settings: 0 } + - length: { node_settings: 0 } + - length: { index_settings: 0 } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml new file mode 100644 index 0000000000000..0ef7a25547285 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/graph/10_basic.yml @@ -0,0 +1,47 @@ +--- +setup: +- do: + indices.create: + index: test_1 + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + test: + properties: + keys: + type : integer + +--- +"Test basic graph explore": + - do: + index: + index: test_1 + type: test + id: 1 + body: { keys: [1,2,3] } + + - do: + index: + index: test_1 + type: test + id: 2 + body: { keys: [4,5,6] } + + - do: + indices.refresh: {} + + - do: + cluster.health: + index: test_1 + wait_for_status: green + + - do: + xpack.graph.explore: + index: test_1 + type: test + body: {"query": {"match": {"keys": 1}},"controls":{"use_significance":false},"vertices":[{"field": "keys","min_doc_count": 1}]} + - length: {failures: 0} + - length: {vertices: 3} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml new file mode 100644 index 0000000000000..9eb3b79fda7a7 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml @@ -0,0 +1,181 @@ +--- +teardown: + - do: + xpack.license.post: + acknowledge: true + body: | + {"licenses":[{"uid":"894371dc-9t49-4997-93cb-8o2e3r7fa6a8","type":"trial","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1916956799999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAgAAAA0FWh0T9njItjQ2qammAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQBZhvozA0trrxhUZ1QbaTsKTna9C5KVQ6pv8yg1pnsBpZXCl8kX1SrgoFn1bXq61IvJwfw5qnmYNiH3hRhTO9EyaCBqaLk8NXZQ6TrRkQSpEnnBwAYUkZeKXsIuBoOk4B4mzwC/r8aMAkzrTiEBtBbog+57cSaU9y37Gkdd+1jXCQrxP+jOEUf7gnXWZvE6oeRroLvCt1fYn09k0CF8kKTbrPTSjC6igZR3uvTHyee74XQ9PRavvHax73T4UOEdQZX/P1ibSQIWKbBRD5YQ1POYVjTayoltTnWLMxfEcAkkATJZLhpBEHST7kZWjrTS6J1dCReJc7a8Vsj/78HXvOIy"}]} +--- +"Installing and getting license works": + + ## current license version + - do: + xpack.license.post: + acknowledge: true + body: | + {"licenses":[{"uid":"894371dc-9t49-4997-93cb-8o2e3r7fa6a8","type":"trial","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1916956799999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAgAAAA0FWh0T9njItjQ2qammAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQBZhvozA0trrxhUZ1QbaTsKTna9C5KVQ6pv8yg1pnsBpZXCl8kX1SrgoFn1bXq61IvJwfw5qnmYNiH3hRhTO9EyaCBqaLk8NXZQ6TrRkQSpEnnBwAYUkZeKXsIuBoOk4B4mzwC/r8aMAkzrTiEBtBbog+57cSaU9y37Gkdd+1jXCQrxP+jOEUf7gnXWZvE6oeRroLvCt1fYn09k0CF8kKTbrPTSjC6igZR3uvTHyee74XQ9PRavvHax73T4UOEdQZX/P1ibSQIWKbBRD5YQ1POYVjTayoltTnWLMxfEcAkkATJZLhpBEHST7kZWjrTS6J1dCReJc7a8Vsj/78HXvOIy"}]} + + - match: { license_status: "valid" } + + - do: + xpack.license.get: {} + + ## a license object has 11 attributes + - length: { license: 11 } + + ## bwc for licenses format + - do: + xpack.license.post: + acknowledge: true + body: | + {"licenses":[{"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"gold","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issued_to","issuer":"issuer","signature":"AAAAAwAAAA2T3vqdBBetKQaBgxipAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQB7pGFYgawfLm9zzT80LvcLHjy1t/v2uSzCQWKdXXhrwSy4WrAH2uK/+PEiQ7aEpW5erLsyJ5KLA6OEZJDaP7r+mjOPuLt0++l5j4DMn7ybMzOPHXWBc6LETE3+pp0GZPyOmwsDkZSRUegTtciR2R6z+mdnGrhOYM80y08KVWwhdU/DHw41MK7ePo6tq73Nz49y9lDgt9fxA0t4ggEBPbnTDDBVQ25AjauY8sa0M5eg9rDDRayw1KamYWrara8PIGX+2YjhtUeQhmlCPdlxc9wECJ7/knPss5bI3ZoXQR3fyXhjcXNnHEIsblqLrMCal3pLxs7lI+KPYMa2ZYL/am4P"}]} + + - match: { license_status: "valid" } + + - do: + xpack.license.get: {} + + - length: { license: 11 } + + ## license version: 1.x + - do: + xpack.license.post: + acknowledge: true + body: | + {"licenses":[{"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"subscription","subscription_type":"gold","issue_date_in_millis":1411948800000,"feature":"shield","expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAQAAAA0LVAywwpSH94cyXr4zAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQA4qscc/URRZVdFoLwgy9dqybYEQLW8YLkiAyPV5XHHHdtk+dtZIepiNEDkUXhSX2waVJlsNRF8/4kqplDfwNoD2TUM8fTgiIfiSiZYGDTGST+yW/5eAveEU5J5v1liBN27bwkqL+V4YAa0Tcm7NKKwjScWKAHiTU3vF8chPkGfCHE0kQgVwPC9RE82pTw0s6/uR4PfLGNFfqPM0uiE5nucfVrtj89JQiO/KA/7ZyFbo7VTNXxZQt7T7rZWBCP9KIjptXzcWuk08Q5S+rSoJNYbFo3HGKtrCVsRz/55rceNtdwKKXu1IwnSeir4I1/KLduQTtFLy0+1th87VS8T88UT"}]} + + - match: { license_status: "valid" } + + - do: + xpack.license.get: {} + + - length: { license: 11 } + + ## multiple licenses version: 1.x + - do: + xpack.license.post: + acknowledge: true + body: | + {"licenses":[{"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"internal","subscription_type":"none","issue_date_in_millis":1411948800000,"feature":"shield","expiry_date_in_millis":1440892799999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAQAAAA04Q4ky3rFyyWLFkytEAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQBxMvUMn4h2E4R4TQMijahTxQj4LPQO4f1M79UxX/XkDlGcH+J5pRHx08OtTRPsFL1lED+h+PIXx307Vo+PNDsOxrWvoYZeYBkOLAO3ny9vhQga+52jYhMxIuFrT9xbcSCSNpMhGojgOIPU2WgiopVdVcimo1+Gk8VtklPB1wPwFzfOjOnPgp/Icx3WYpfkeAUUOyWUYiFIBAe4bnz84iF+xwLKbgYk6aHF25ECBtdb/Uruhcm9+jEFpoIEUtCouvvk9C+NJZ4OickV4xpRgaRG2x9PONH8ZN0QGhGYhJGbisoCxuDmlLsyVxqxfMu3n/r7/jdsEJScjAlSrsLDOu6H"},{"uid":"893361dc-9749-4997-93cb-802e3dofh7aa","type":"internal","subscription_type":"none","issue_date_in_millis":1443484800000,"feature":"watcher","expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAQAAAA0Sc90guRIaQEmgLvMnAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQCQ94dju0pnDZR3Uuypi0ic3aQJ+nvVqe+U8u79Dga5n1qIjcHDh7HvIBJEkF+tnVPlo/PXV/x7BZSwVY1PVErit+6rYix1yuHEgqwxmx/VdRICjCaZM6tk0Ob4dZCPv6Ebn2Mmk89KHC/PwiLPqF6QfwV/Pkpa8k2A3ORJmvYSDvXhe6tCs8dqc4ebrsFxqrZjwWh5CZSpzqqZBFXlngDv2N0hHhpGlueRszD0JJ5dfEL5ZA1DDOrgO9OJVejSHyRqe1L5QRUNdXPVfS+EAG0Dd1cNdJ/sMpYCPnVjbw6iq2/YgM3cuztsXVBY7ij4WnoP3ce7Zjs9TwHn+IqzftC6"}]} + + - match: { license_status: "valid" } + + - do: + xpack.license.get: {} + + - length: { license: 11 } + - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } +--- +"Should throw 404 after license deletion": + - do: + xpack.license.delete: {} + + - match: { acknowledged: true } + + - do: + xpack.license.get: {} + catch: missing + +--- +"Should install a feature type license": + + # VERSION_NO_FEATURE_TYPE license version + - do: + xpack.license.post: + acknowledge: true + body: | + {"license": {"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"gold","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issued_to","issuer":"issuer","signature":"AAAAAgAAAA3U8+YmnvwC+CWsV/mRAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQBe8GfzDm6T537Iuuvjetb3xK5dvg0K5NQapv+rczWcQFxgCuzbF8plkgetP1aAGZP4uRESDQPMlOCsx4d0UqqAm9f7GbBQ3l93P+PogInPFeEH9NvOmaAQovmxVM9SE6DsDqlX4cXSO+bgWpXPTd2LmpoQc1fXd6BZ8GeuyYpVHVKp9hVU0tAYjw6HzYOE7+zuO1oJYOxElqy66AnIfkvHrvni+flym3tE7tDTgsDRaz7W3iBhaqiSntEqabEkvHdPHQdSR99XGaEvnHO1paK01/35iZF6OXHsF7CCj+558GRXiVxzueOe7TsGSSt8g7YjZwV9bRCyU7oB4B/nidgI"}} + + - match: { license_status: "valid" } + + - do: + xpack.license.get: {} + + - length: { license: 11 } +--- +"Cannot start basic": + + - do: + catch: bad_request + xpack.license.post: + acknowledge: true + body: | + {"license":{"uid":"893361dc-9749-4997-93cb-802e3d7fa4a8","type":"basic","issue_date_in_millis":1411948800000,"expiry_date_in_millis":1914278399999,"max_nodes":1,"issued_to":"issuedTo","issuer":"issuer","signature":"AAAAAgAAAA0lKPZ0a7aZquUltho/AAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQAALuQ44S3IG6SzolcXVJ6Z4CIXORDrYQ+wdLCeey0XdujTslAOj+k+vNgo6wauc7Uswi01esHu4lb5IgpvKy7RRCbh5bj/z2ubu2qMJqopp9BQyD7VQjVfqmG6seUMJwJ1a5Avvm9r41YPSPcrii3bKK2e1l6jK6N8ibCvnTyY/XkYGCJrBWTSJePDbg6ErbyodrZ37x1StLbPWcNAkmweyHjDJnvYnbeZZO7A3NmubXZjW7Ttf8/YwQyE00PqMcl7fVPY3hkKpAeHf8aaJbqkKYbqZuER3EWJX7ZvLVb1dNdNg8aXRn7YrkQcYwWgptYQpfV+D7yEJ4j5muAEoler"}} + + - match: { error.root_cause.0.reason: 'Installing basic licenses is no longer allowed. Use the POST /_xpack/license/start_basic API to install a basic license that does not expire.' } +--- +"Should fail gracefully when body content is not provided": + + - do: + catch: bad_request + xpack.license.post: + acknowledge: true + + - match: { error.root_cause.0.reason: 'The license must be provided in the request body' } +--- +"Current license is trial means not eligle to start trial": + + - do: + xpack.license.get_trial_status: {} + + - match: { eligible_to_start_trial: false } + + - do: + xpack.license.post_start_basic: + acknowledge: true + + - match: { basic_was_started: true } + + - do: + xpack.license.get_trial_status: {} + + - match: { eligible_to_start_trial: false } + + - do: + catch: forbidden + xpack.license.post_start_trial: + acknowledge: true + + - match: { trial_was_started: false } + - match: { error_message: "Operation failed: Trial was already activated." } +--- +"Trial license cannot be basic": + - do: + catch: bad_request + xpack.license.post_start_trial: + type: "basic" + acknowledge: true +--- +"Can start basic license if do not already have basic": + - do: + xpack.license.get_basic_status: {} + + - match: { eligible_to_start_basic: true } + + - do: + xpack.license.post_start_basic: + acknowledge: true + + - match: { basic_was_started: true } + - match: { acknowledged: true } + + - do: + xpack.license.get_basic_status: {} + + - match: { eligible_to_start_basic: false } + + - do: + catch: forbidden + xpack.license.post_start_basic: {} + + - match: { basic_was_started: false } + - match: { acknowledged: true } + - match: { error_message: "Operation failed: Current license is basic." } +--- +"Must acknowledge to start basic": + - do: + xpack.license.post_start_basic: {} + + - match: { basic_was_started: false } + - match: { acknowledged: false } + - match: { error_message: "Operation failed: Needs acknowledgement." } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/calendar_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/calendar_crud.yml new file mode 100644 index 0000000000000..9b3572739cd8c --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/calendar_crud.yml @@ -0,0 +1,716 @@ +--- +"Test calendar CRUD": + + - do: + xpack.ml.put_job: + job_id: cal-job + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + - match: { job_id: "cal-job" } + + - do: + xpack.ml.put_job: + job_id: cal-job2 + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + - match: { job_id: "cal-job2" } + + + - do: + xpack.ml.put_calendar: + calendar_id: "advent" + body: > + { + "job_ids": ["cal-job", "cal-job2"], + "description": "This is a calendar about..." + } + - match: { calendar_id: advent } + - match: { job_ids.0: cal-job } + - match: { job_ids.1: cal-job2 } + - match: { description: "This is a calendar about..."} + + - do: + xpack.ml.get_calendars: + calendar_id: "advent" + - match: { count: 1 } + - match: + calendars.0: + calendar_id: "advent" + job_ids: ["cal-job", "cal-job2"] + description: "This is a calendar about..." + - is_false: type + + - do: + xpack.ml.put_calendar: + calendar_id: "dogs_of_the_year" + body: > + { + "job_ids": ["cal-job"] + } + + - do: + xpack.ml.put_calendar: + calendar_id: "cats_of_the_year" + + - do: + xpack.ml.get_calendars: {} + - match: { count: 3 } + + - do: + xpack.ml.delete_calendar: + calendar_id: "dogs_of_the_year" + + - do: + xpack.ml.get_calendars: {} + - match: { count: 2 } + + - do: + xpack.ml.get_calendars: + calendar_id: _all + - match: { count: 2 } + + - do: + catch: missing + xpack.ml.get_calendars: + calendar_id: "dogs_of_the_year" + + - do: + xpack.ml.put_calendar: + calendar_id: "new_cal_with_unknown_job_group" + body: > + { + "job_ids": ["cal-job", "unknown-job-group"] + } + + - do: + xpack.ml.get_calendars: + calendar_id: "new_cal_with_unknown_job_group" + - match: { count: 1 } + - match: + calendars.0: + calendar_id: "new_cal_with_unknown_job_group" + job_ids: ["cal-job", "unknown-job-group"] + +--- +"Test get calendar given missing": + - do: + catch: /No calendar with id \[unknown\]/ + xpack.ml.get_calendars: + calendar_id: "unknown" + +--- +"Test put calendar given id contains invalid chars": + - do: + catch: bad_request + xpack.ml.put_calendar: + calendar_id: "Mayas" + +--- +"Test PageParams": + - do: + xpack.ml.put_calendar: + calendar_id: "calendar1" + - do: + xpack.ml.put_calendar: + calendar_id: "calendar2" + - do: + xpack.ml.put_calendar: + calendar_id: "calendar3" + + - do: + xpack.ml.get_calendars: + from: 2 + - match: { count: 3 } + - length: { calendars: 1} + - match: { calendars.0.calendar_id: calendar3 } + + - do: + xpack.ml.get_calendars: + from: 1 + size: 1 + - match: { count: 3 } + - length: { calendars: 1} + - match: { calendars.0.calendar_id: calendar2 } + +--- +"Test PageParams with ID is invalid": + - do: + catch: bad_request + xpack.ml.get_calendars: + calendar_id: tides + size: 10 + +--- +"Test cannot overwrite an exisiting calendar": + + - do: + xpack.ml.put_calendar: + calendar_id: "mayan" + + - do: + catch: bad_request + xpack.ml.put_calendar: + calendar_id: "mayan" + +--- +"Test cannot create calendar with name _all": + - do: + catch: bad_request + xpack.ml.put_calendar: + calendar_id: "_all" + +--- +"Test deleted job is removed from calendar": + + - do: + xpack.ml.put_job: + job_id: cal-crud-test-delete + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + - match: { job_id: "cal-crud-test-delete" } + + - do: + xpack.ml.put_calendar: + calendar_id: "delete-test" + body: > + { + "job_ids": ["cal-crud-test-delete"] + } + + - do: + xpack.ml.delete_job: + job_id: cal-crud-test-delete + - match: { acknowledged: true } + + - do: + xpack.ml.get_calendars: + calendar_id: "delete-test" + - match: { count: 1 } + - match: { calendars.0.job_ids: [] } + +--- +"Test update calendar job ids": + + - do: + xpack.ml.put_calendar: + calendar_id: "wildlife" + + - do: + xpack.ml.put_job: + job_id: tiger + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + - match: { job_id: "tiger" } + + - do: + xpack.ml.put_job: + job_id: otter + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + - match: { job_id: "otter" } + + - do: + xpack.ml.put_calendar_job: + calendar_id: "wildlife" + job_id: "tiger" + - match: { calendar_id: "wildlife" } + - match: { job_ids.0: "tiger" } + + - do: + xpack.ml.get_calendars: + calendar_id: "wildlife" + - match: { count: 1 } + - match: { calendars.0.calendar_id: "wildlife" } + - length: { calendars.0.job_ids: 1 } + - match: { calendars.0.job_ids.0: "tiger" } + + - do: + xpack.ml.delete_calendar_job: + calendar_id: "wildlife" + job_id: "tiger" + - match: { calendar_id: "wildlife" } + - length: { job_ids: 0 } + + - do: + catch: /Cannot remove \[otter\] as it is not present in calendar \[wildlife\]/ + xpack.ml.delete_calendar_job: + calendar_id: "wildlife" + job_id: "otter" + + - do: + xpack.ml.get_calendars: + calendar_id: "wildlife" + - match: { count: 1 } + - match: { calendars.0.calendar_id: "wildlife" } + - length: { calendars.0.job_ids: 0 } + + - do: + catch: /Cannot remove \[missing_job\] as it is not present in calendar \[wildlife\]/ + xpack.ml.delete_calendar_job: + calendar_id: "wildlife" + job_id: "missing_job" +--- +"Test calendar get events": + + - do: + xpack.ml.put_calendar: + calendar_id: "events" + + - do: + xpack.ml.post_calendar_events: + calendar_id: "events" + body: > + { + "events" : [ + { "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "events" }, + { "description": "event 2", "start_time": "2017-12-05T00:00:00Z", "end_time": "2017-12-06T00:00:00Z", "calendar_id": "events" }, + { "description": "event 3", "start_time": "2017-12-12T00:00:00Z", "end_time": "2017-12-13T00:00:00Z", "calendar_id": "events" }, + { "description": "event 4", "start_time": "2017-12-12T00:00:00Z", "end_time": "2017-12-15T00:00:00Z", "calendar_id": "events" }] + } + + - do: + xpack.ml.get_calendar_events: + calendar_id: "events" + - length: { events: 4 } + - match: { events.0.description: "event 1" } + - match: { events.1.description: "event 2" } + - match: { events.2.description: "event 3" } + - match: { events.3.description: "event 4" } + - is_true: events.0.event_id + - set: { events.0.event_id: event_1_id } + + - do: + xpack.ml.get_calendar_events: + calendar_id: "events" + from: 1 + size: 2 + - length: { events: 2 } + - match: { events.0.description: "event 2" } + - match: { events.1.description: "event 3" } + + - do: + xpack.ml.get_calendar_events: + calendar_id: "events" + end: "2017-12-12T00:00:00Z" + - length: { events: 2 } + - match: { events.0.description: "event 1" } + - match: { events.1.description: "event 2" } + + - do: + xpack.ml.get_calendar_events: + calendar_id: "events" + start: "2017-12-05T03:00:00Z" + - length: { events: 3 } + - match: { events.0.description: "event 2" } + - match: { events.1.description: "event 3" } + - match: { events.2.description: "event 4" } + + - do: + xpack.ml.get_calendar_events: + calendar_id: "events" + start: "2017-12-02T00:00:00Z" + end: "2017-12-12T00:00:00Z" + - length: { events: 1 } + - match: { events.0.description: "event 2" } + + - do: + xpack.ml.put_calendar: + calendar_id: "events-2" + + - do: + xpack.ml.post_calendar_events: + calendar_id: "events-2" + body: > + { + "events" : [ + { "description": "event 21", "start_time": "2017-12-02T00:00:00Z", "end_time": "2017-12-02T05:00:00Z"}, + { "description": "event 22", "start_time": "2017-12-25T00:00:00Z", "end_time": "2017-12-26T00:00:00Z"}] + } + + - do: + catch: bad_request + xpack.ml.post_calendar_events: + calendar_id: "events-2" + body: > + { + "events" : [ + { "description": "event 21", "start_time": "2017-12-02T00:00:00Z", "end_time": "2017-12-03T00:00:00Z", "calendar_id": "events"}] + } + +# Event is not in calendar events-2 + - do: + catch: bad_request + xpack.ml.delete_calendar_event: + calendar_id: "events-2" + event_id: $event_1_id + + - do: + xpack.ml.delete_calendar_event: + calendar_id: "events" + event_id: $event_1_id + + - do: + catch: missing + xpack.ml.delete_calendar_event: + calendar_id: "events" + event_id: "missing event" + + +--- +"Test delete calendar deletes events": + + - do: + xpack.ml.put_calendar: + calendar_id: "cal-foo" + + - do: + xpack.ml.post_calendar_events: + calendar_id: "cal-foo" + body: > + { + "events" : [ + { "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z" }, + { "description": "event 2", "start_time": "2017-12-05T00:00:00Z", "end_time": "2017-12-06T00:00:00Z" }, + { "description": "event 2", "start_time": "2017-12-05T00:00:00Z", "end_time": "2017-12-06T00:00:00Z" }] + } + + - do: + xpack.ml.put_calendar: + calendar_id: "cal-bar" + + - do: + xpack.ml.post_calendar_events: + calendar_id: "cal-bar" + body: > + { + "events" : [ + { "description": "event 21", "start_time": "2017-12-02T00:00:00Z", "end_time": "2017-12-02T05:00:00Z"}, + { "description": "event 22", "start_time": "2017-12-25T00:00:00Z", "end_time": "2017-12-26T00:00:00Z"}] + } + + - do: + xpack.ml.delete_calendar: + calendar_id: "cal-foo" + +# Check the event from calendar 1 is deleted + - do: + count: + index: .ml-meta + body: + query: + constant_score: + filter: + term: + type: scheduled_event + - match: { count: 2 } + + - do: + count: + index: .ml-meta + body: + query: + bool: + must: + - term: + type: scheduled_event + - term: + calendar_id: cal-foo + - match: { count: 0 } + +--- +"Test get all calendar events": + + - do: + xpack.ml.put_calendar: + calendar_id: "dave-holidays" + + - do: + xpack.ml.post_calendar_events: + calendar_id: "dave-holidays" + body: > + { + "events" : [ + { "description": "xmas", "start_time": "2017-12-25T00:00:00Z", "end_time": "2017-12-26T00:00:00Z" }, + { "description": "ny", "start_time": "2018-01-01T00:00:00Z", "end_time": "2018-01-02T00:00:00Z" }] + } + + - do: + xpack.ml.put_calendar: + calendar_id: "tom-holidays" + + - do: + xpack.ml.post_calendar_events: + calendar_id: "tom-holidays" + body: > + { + "events" : [ + { "description": "xmas", "start_time": "2017-12-20T00:00:00Z", "end_time": "2017-12-26T00:00:00Z" }, + { "description": "other", "start_time": "2017-12-27T00:00:00Z", "end_time": "2018-01-02T00:00:00Z" }] + } + + - do: + xpack.ml.get_calendar_events: + calendar_id: "_all" + - length: { events: 4 } + +--- +"Test get calendar events for job": + + - do: + xpack.ml.put_job: + job_id: cal-crud-job-with-events + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + + - do: + xpack.ml.put_calendar: + calendar_id: "dave-holidays" + body: > + { + "job_ids": ["cal-crud-job-with-events"] + } + + - do: + xpack.ml.post_calendar_events: + calendar_id: "dave-holidays" + body: > + { + "events" : [{ "description": "xmas", "start_time": "2017-12-25T00:00:00Z", "end_time": "2017-12-26T00:00:00Z" }, + { "description": "ny", "start_time": "2018-01-01T00:00:00Z", "end_time": "2018-01-02T00:00:00Z" }] + } + + - do: + xpack.ml.put_calendar: + calendar_id: "tom-holidays" + body: > + { + "job_ids": ["cal-crud-job-with-events"] + } + + - do: + xpack.ml.post_calendar_events: + calendar_id: "tom-holidays" + body: > + { + "events" : [ + { "description": "xmas", "start_time": "2017-12-20T00:00:00Z", "end_time": "2017-12-26T00:00:00Z" }, + { "description": "other", "start_time": "2018-01-15T00:00:00Z", "end_time": "2018-01-16T00:00:00Z" }] + } + + - do: + xpack.ml.put_calendar: + calendar_id: "not-used-by-job" + + - do: + xpack.ml.post_calendar_events: + calendar_id: "not-used-by-job" + body: > + { + "events" : [ + { "description": "random", "start_time": "2018-01-20T00:00:00Z", "end_time": "2018-01-26T00:00:00Z" }, + { "description": "random2", "start_time": "2018-02-20T00:00:00Z", "end_time": "2018-02-26T00:00:00Z" }] + } + + +# Calendar Id must be _all if a job id is used + - do: + catch: /action_request_validation_exception/ + xpack.ml.get_calendar_events: + calendar_id: "dave-holiday" + job_id: cal-crud-job-with-events + + - do: + xpack.ml.get_calendar_events: + calendar_id: _all + job_id: cal-crud-job-with-events + - match: { count: 4 } + - length: { events: 4 } + + - do: + xpack.ml.get_calendar_events: + calendar_id: _all + start: "2018-01-01T00:00:00Z" + job_id: cal-crud-job-with-events + - match: { count: 2 } + - length: { events: 2 } + - match: { events.0.description: ny } + - match: { events.1.description: other } + +--- +"Test get calendar events with job groups": +# Test job group + - do: + xpack.ml.put_job: + job_id: cal-crud-job-with-events-group + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + }, + "groups" : ["ben-holidays-group"] + } + + - do: + xpack.ml.put_calendar: + calendar_id: "ben-holidays" + body: > + { + "job_ids": ["ben-holidays-group"] + } + + - do: + xpack.ml.post_calendar_events: + calendar_id: "ben-holidays" + body: > + { + "events" : [ + { "description": "ski", "start_time": "2018-01-20T00:00:00Z", "end_time": "2018-01-27T00:00:00Z" }, + { "description": "snow", "start_time": "2018-01-30T00:00:00Z", "end_time": "2018-02-01T00:00:00Z" }] + } + + - do: + xpack.ml.get_calendar_events: + calendar_id: _all + job_id: "cal-crud-job-with-events-group" + - match: { count: 2 } + - length: { events: 2 } + - match: { events.0.description: ski } + - match: { events.1.description: snow } + + - do: + xpack.ml.get_calendar_events: + calendar_id: _all + job_id: "ben-holidays-group" + - match: { count: 2 } + - length: { events: 2 } + - match: { events.0.description: ski } + - match: { events.1.description: snow } + +--- +"Test post calendar events given empty events": + + - do: + catch: /At least 1 event is required/ + xpack.ml.post_calendar_events: + calendar_id: "foo" + body: > + { + "events": [] + } + +--- +"Test delete event from non existing calendar": + + - do: + catch: /No calendar with id \[unknown\]/ + xpack.ml.delete_calendar_event: + calendar_id: "unknown" + event_id: "event_1" + +--- +"Test delete job from non existing calendar": + + - do: + catch: /No calendar with id \[unknown\]/ + xpack.ml.delete_calendar_job: + calendar_id: "unknown" + job_id: "missing_calendar" + +--- +"Test list of job Ids": + - do: + xpack.ml.put_job: + job_id: foo-a + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + + - do: + xpack.ml.put_job: + job_id: foo-b + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + + - do: + xpack.ml.put_job: + job_id: bar-a + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + + - do: + xpack.ml.put_calendar: + calendar_id: "expression" + body: > + { + "job_ids": ["bar-a"] + } + + - do: + xpack.ml.put_calendar_job: + calendar_id: "expression" + job_id: "foo-a,foo-b" + - match: { calendar_id: "expression" } + - length: { job_ids: 3 } + + - do: + xpack.ml.delete_calendar_job: + calendar_id: "expression" + job_id: "foo-a,foo-b" + - match: { calendar_id: "expression" } + - length: { job_ids: 1 } + - match: { job_ids.0: "bar-a" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml new file mode 100644 index 0000000000000..ffbbf4d95bdda --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/custom_all_field.yml @@ -0,0 +1,145 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: custom-all-test-1 + body: > + { + "description":"Job for testing custom all field", + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description": {} + } + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: custom-all-test-2 + body: > + { + "description":"Job for testing custom all field", + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description": {} + } + + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-custom-all-test-1 + type: doc + id: custom_all_1464739200000_1_1 + body: + { + "job_id": "custom-all-test-1", + "result_type": "record", + "timestamp": "2016-06-01T00:00:00Z", + "anomaly_score": 60.0, + "bucket_span": 1, + "by_field_value": "A by field", + "partition_field_value": "A partition field", + "over_field_value": "An over field", + "influencer_field_name": "An influencer field", + "causes": [{ + "by_field_value": "Cause by field", + "partition_field_value": "Cause partition field", + "over_field_value": "Cause over field", + "correlated_by_field_value": "Cause correlated by field" + }] + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-custom-all-test-2 + type: doc + id: custom_all_1464739200000_1_2 + body: + { + "job_id": "custom-all-test-2", + "result_type": "record", + "timestamp": "2016-06-01T00:00:00Z", + "bucket_span": 1, + "by_field_value": "A by field" + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: [.ml-anomalies-shared] + +--- +"Test querying custom all field": + + - do: + search: + index: .ml-anomalies-custom-all-test-1 + body: { query: { query_string: { query: "A by field" } } } + - match: { hits.total: 1 } + + - do: + search: + index: .ml-anomalies-custom-all-test-1 + body: { query: { query_string: { query: "A partition field" } } } + - match: { hits.total: 1 } + + - do: + search: + index: .ml-anomalies-custom-all-test-1 + body: { query: { query_string: { query: "An over field" } } } + - match: { hits.total: 1 } + + - do: + search: + index: .ml-anomalies-custom-all-test-1 + body: { query: { query_string: { query: "An influencer field" } } } + - match: { hits.total: 1 } + + - do: + search: + index: .ml-anomalies-custom-all-test-1 + body: { query: { query_string: { query: "Cause by field" } } } + - match: { hits.total: 1 } + + - do: + search: + index: .ml-anomalies-custom-all-test-1 + body: { query: { query_string: { query: "Cause partition field" } } } + - match: { hits.total: 1 } + + - do: + search: + index: .ml-anomalies-custom-all-test-1 + body: { query: { query_string: { query: "Cause over field" } } } + - match: { hits.total: 1 } + + - do: + search: + index: .ml-anomalies-custom-all-test-1 + body: { query: { query_string: { query: "Cause correlated by field" } } } + - match: { hits.total: 1 } + + - do: + search: + index: .ml-anomalies-custom-all-test-1 + body: { query: { query_string: { query: "custom-all-test-1" } } } + - match: { hits.total: 1 } + +--- +"Test wildcard job id": + + - do: + search: + body: { query: { bool: { must: [ + { query_string: { query: "result_type:record"} }, + { query_string: { query: "A by field" } }, + { query_string: { query: "job_id:*" } } + ] } } } + - match: { hits.total: 2 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml new file mode 100644 index 0000000000000..a0f79b7cabad4 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml @@ -0,0 +1,357 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: datafeeds-crud-1 + body: > + { + "job_id":"datafeeds-crud-1", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: datafeeds-crud-2 + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "data_description" : { + "time_field":"time" + } + } + +--- +"Test get all datafeeds and stats given no datafeed exists": + + - do: + xpack.ml.get_datafeeds: + datafeed_id: "_all" + - match: { count: 0 } + - match: { datafeeds: [] } + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: "_all" + - match: { count: 0 } + - match: { datafeeds: [] } + +--- +"Test get datafeed with expression that does not match and allow_no_datafeeds": + + - do: + xpack.ml.get_datafeeds: + datafeed_id: "missing-*" + allow_no_datafeeds: true + - match: { count: 0 } + - match: { datafeeds: [] } + +--- +"Test get datafeed with expression that does not match and not allow_no_datafeeds": + + - do: + catch: missing + xpack.ml.get_datafeeds: + datafeed_id: "missing-*" + allow_no_datafeeds: false + +--- +"Test put datafeed referring to missing job_id": + - do: + catch: /resource_not_found_exception/ + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"a-missing-job", + "indexes":["index-foo"], + "types":["type-bar"] + } + +--- +"Test put datafeed with security headers in the body": + - do: + catch: /unknown field \[headers\], parser not found/ + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-1", + "indices":["index-foo"], + "headers":{ "a_security_header" : "secret" } + } + +--- +"Test put datafeed referring to existing job_id": + - do: + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-1", + "indexes":["index-foo"], + "types":["type-bar"] + } + - match: { datafeed_id: "test-datafeed-1" } + - match: { job_id: "datafeeds-crud-1" } + - match: { indices: ["index-foo"] } + - match: { types: ["type-bar"] } + - match: { scroll_size: 1000 } + - is_true: query.match_all + - match: { chunking_config: { mode: "auto" }} + +--- +"Test put datafeed whose id is already taken": + - do: + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-1", + "indexes":["index-foo"], + "types":["type-bar"] + } + - match: { datafeed_id: "test-datafeed-1" } + + - do: + catch: /resource_already_exists_exception/ + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-2", + "indexes":["index-foo"], + "types":["type-bar"] + } + +--- +"Test put datafeed with job_id that is already used by another datafeed": + - do: + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-1", + "indexes":["index-foo"], + "types":["type-bar"] + } + - match: { datafeed_id: "test-datafeed-1" } + + - do: + catch: /A datafeed \[test-datafeed-1\] already exists for job \[datafeeds-crud-1\]/ + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-2 + body: > + { + "job_id":"datafeeds-crud-1", + "indexes":["index-foo"], + "types":["type-bar"] + } + +--- +"Test put datafeed with invalid query": + - do: + catch: /parsing_exception/ + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-1", + "indexes":["index-foo"], + "types":["type-bar"], + "query":{"match_all_mispelled":{}} + } + +--- +"Test update datafeed": + - do: + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-1", + "indexes":["index-foo"], + "types":["type-bar"], + "scroll_size": 2000, + "frequency": "1m", + "query_delay": "30s" + } + + - do: + xpack.ml.update_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "indexes":["index-*"], + "scroll_size": 10000, + "frequency": "2m", + "query_delay": "0s" + } + - match: { datafeed_id: "test-datafeed-1" } + - match: { job_id: "datafeeds-crud-1" } + - match: { indices: ["index-*"] } + - match: { types: ["type-bar"] } + - match: { scroll_size: 10000 } + - match: { frequency: "2m" } + - match: { query_delay: "0s" } + +--- +"Test update datafeed to point to different job": + - do: + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-1", + "indexes":["index-foo"], + "types":["type-bar"], + "scroll_size": 2000 + } + + - do: + xpack.ml.update_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id": "datafeeds-crud-2" + } + - match: { datafeed_id: "test-datafeed-1" } + - match: { job_id: "datafeeds-crud-2" } + - match: { indices: ["index-foo"] } + - match: { types: ["type-bar"] } + +--- +"Test update datafeed with missing id": + + - do: + catch: /resource_not_found_exception/ + xpack.ml.update_datafeed: + datafeed_id: a-missing-datafeed + body: > + {} + +--- +"Test update datafeed to point to missing job": + - do: + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-1", + "indexes":["index-foo"], + "types":["type-bar"], + "scroll_size": 2000 + } + + - do: + catch: /resource_not_found_exception/ + xpack.ml.update_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id": "update-datafeed-missing-job" + } + +--- +"Test update datafeed to point to job already attached to another datafeed": + - do: + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-1", + "indexes":["index-foo"], + "types":["type-bar"] + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-2 + body: > + { + "job_id":"datafeeds-crud-2", + "indexes":["index-foo"], + "types":["type-bar"] + } + + - do: + catch: /A datafeed \[test-datafeed-2\] already exists for job \[datafeeds-crud-2\]/ + xpack.ml.update_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id": "datafeeds-crud-2" + } + +--- +"Test delete datafeed with missing id": + - do: + catch: /resource_not_found_exception/ + xpack.ml.delete_datafeed: + datafeed_id: a-missing-datafeed + +--- +"Test put datafeed with chunking_config": + - do: + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-1", + "indices":["index-foo"], + "types":["type-bar"], + "chunking_config": {"mode":"manual","time_span": "1h"} + } + - match: { datafeed_id: "test-datafeed-1" } + - match: { chunking_config.mode: "manual" } + - match: { chunking_config.time_span: "1h" } + +--- +"Test delete datafeed": + - do: + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-1", + "indexes":["index-foo"], + "types":["type-bar"] + } + - match: { datafeed_id: "test-datafeed-1" } + + - do: + xpack.ml.delete_datafeed: + datafeed_id: test-datafeed-1 + - match: { acknowledged: true } + +--- +"Test force delete datafeed": + - do: + xpack.ml.put_datafeed: + datafeed_id: test-datafeed-1 + body: > + { + "job_id":"datafeeds-crud-1", + "indexes":["index-foo"], + "types":["type-bar"] + } + - match: { datafeed_id: "test-datafeed-1" } + + - do: + xpack.ml.delete_datafeed: + datafeed_id: test-datafeed-1 + force: true + - match: { acknowledged: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_job_force.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_job_force.yml new file mode 100644 index 0000000000000..5faba0c8031b2 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_job_force.yml @@ -0,0 +1,74 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: force-delete-job + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "data_description" : { + } + } + +--- +"Test force delete a closed job": + - do: + xpack.ml.delete_job: + force: true + job_id: force-delete-job + - match: { acknowledged: true } + + - do: + xpack.ml.get_jobs: + job_id: "_all" + - match: { count: 0 } + +--- +"Test force delete an open job": + + - do: + xpack.ml.open_job: + job_id: force-delete-job + + - do: + xpack.ml.delete_job: + force: true + job_id: force-delete-job + - match: { acknowledged: true } + + - do: + xpack.ml.get_jobs: + job_id: "_all" + - match: { count: 0 } + +--- +"Test cannot force delete a non-existent job": + + - do: + catch: /resource_not_found_exception/ + xpack.ml.delete_job: + force: true + job_id: inexistent-job + +--- +"Test force delete job that is referred by a datafeed": + + - do: + xpack.ml.put_datafeed: + datafeed_id: force-delete-job-datafeed + body: > + { + "job_id":"force-delete-job", + "indexes":["index-foo"], + "types":["type-bar"] + } + - match: { datafeed_id: force-delete-job-datafeed } + + - do: + catch: /Cannot delete job \[force-delete-job\] because datafeed \[force-delete-job-datafeed\] refers to it/ + xpack.ml.delete_job: + job_id: force-delete-job diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml new file mode 100644 index 0000000000000..ea545da5f639c --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml @@ -0,0 +1,194 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: delete-model-snapshot + body: > + { + "job_id": "delete-model-snapshot", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span" : "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "field_delimiter":",", + "time_field":"time", + "time_format":"yyyy-MM-dd HH:mm:ssX" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.open_job: + job_id: delete-model-snapshot + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.close_job: + job_id: delete-model-snapshot + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-delete-model-snapshot + type: doc + id: "delete-model-snapshot_model_snapshot_inactive-snapshot" + body: > + { + "job_id": "delete-model-snapshot", + "timestamp": "2016-06-02T00:00:00Z", + "snapshot_id": "inactive-snapshot", + "description": "first", + "latest_record_time_stamp": "2016-06-02T00:00:00Z", + "latest_result_time_stamp": "2016-06-02T00:00:00Z", + "snapshot_doc_count": 2 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: "delete-model-snapshot_model_state_inactive-snapshot#1" + body: > + { + "state": "a" + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: "delete-model-snapshot_model_state_inactive-snapshot#2" + body: > + { + "state": "b" + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-delete-model-snapshot + type: doc + id: "delete-model-snapshot_model_snapshot_active-snapshot" + body: > + { + "job_id": "delete-model-snapshot", + "timestamp": "2016-06-01T00:00:00Z", + "snapshot_id": "active-snapshot", + "description": "second", + "latest_record_time_stamp": "2016-06-01T00:00:00Z", + "latest_result_time_stamp": "2016-06-01T00:00:00Z", + "snapshot_doc_count": 3 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-state + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-anomalies-delete-model-snapshot + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.update_job: + job_id: delete-model-snapshot + body: > + { + "model_snapshot_id": "active-snapshot" + } + +--- +"Test delete snapshot missing snapshotId": + - do: + catch: param + xpack.ml.delete_model_snapshot: + job_id: "delete-model-snapshot" + +--- +"Test delete snapshot missing job_id": + - do: + catch: param + xpack.ml.delete_model_snapshot: + snapshot_id: "inactive-snapshot" + +--- +"Test valid delete snapshot": + - do: + xpack.ml.get_model_snapshots: + job_id: "delete-model-snapshot" + - match: { count: 2 } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-state + type: doc + + - match: { count: 3 } + + - do: + xpack.ml.delete_model_snapshot: + job_id: "delete-model-snapshot" + snapshot_id: "inactive-snapshot" + - match: { acknowledged: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-anomalies-delete-model-snapshot + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-state + + - do: + xpack.ml.get_model_snapshots: + job_id: "delete-model-snapshot" + - match: { count: 1 } + - match: { model_snapshots.0.snapshot_id: "active-snapshot"} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-state + type: doc + + - match: { count: 1 } + +--- +"Test delete with in-use model": + + - do: + catch: bad_request + xpack.ml.delete_model_snapshot: + job_id: "delete-model-snapshot" + snapshot_id: "active-snapshot" + +--- +"Test with unknown job id": + - do: + catch: missing + xpack.ml.delete_model_snapshot: + job_id: "non-existent-job" + snapshot_id: "delete-model-snapshot" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml new file mode 100644 index 0000000000000..5203ead3ce8a9 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml @@ -0,0 +1,221 @@ +--- +setup: + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-meta + type: doc + id: filter_imposter-filter + body: > + { + "filter_id": "imposter", + "items": ["a", "b"], + "type": "imposter" + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_filter: + filter_id: filter-foo + body: > + { + "items": ["abc", "xyz"] + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_filter: + filter_id: filter-foo2 + body: > + { + "items": ["123", "lmnop"] + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: {} + +--- +"Test get filter API with bad ID": + + - do: + catch: missing + xpack.ml.get_filters: + filter_id: "do_not_exist" + +--- +"Test get filter API": + + - do: + xpack.ml.get_filters: + filter_id: "filter-foo" + + - match: { count: 1 } + - match: + filters.0: + filter_id: "filter-foo" + items: ["abc", "xyz"] + +--- +"Test get filters API": + + - do: + xpack.ml.get_filters: {} + + - match: { count: 2 } + - match: + filters.0: + filter_id: "filter-foo" + items: ["abc", "xyz"] + + - match: + filters.1: + filter_id: "filter-foo2" + items: ["123", "lmnop"] + + - do: + xpack.ml.get_filters: + from: 1 + size: 1 + + - match: { count: 1 } + +--- +"Test invalid param combinations": + + - do: + catch: bad_request + xpack.ml.get_filters: + filter_id: "filter-foo" + from: 0 + + - do: + catch: bad_request + xpack.ml.get_filters: + filter_id: "filter-foo" + size: 1 + + - do: + catch: bad_request + xpack.ml.get_filters: + filter_id: "filter-foo" + from: 0 + size: 1 +--- +"Test create filter api": + - do: + xpack.ml.put_filter: + filter_id: filter-foo2 + body: > + { + "items": ["abc", "xyz"] + } + + - match: { acknowledged: true } + + - do: + xpack.ml.get_filters: + filter_id: "filter-foo2" + + - match: { count: 1 } + - match: + filters.0: + filter_id: "filter-foo2" + items: ["abc", "xyz"] + +--- +"Test create filter api with mismatching body ID": + - do: + catch: /illegal_argument_exception/ + xpack.ml.put_filter: + filter_id: "uri_id" + body: > + { + "filter_id": "body_id", + "items": ["abc", "xyz"] + } + +--- +"Test delete in-use filter": + - do: + xpack.ml.put_job: + job_id: filter-crud + body: > + { + "job_id":"filter-crud", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "3600s", + "detectors" :[{"function":"mean","field_name":"responsetime", "by_field_name": "airline", + "rules": [ + { + "conditions": [ + { + "type": "categorical", + "field_name": "airline", + "filter_id": "filter-foo" + } + ] + } + ]}] + }, + "data_description" : { + "field_delimiter":",", + "time_field":"time", + "time_format":"yyyy-MM-dd HH:mm:ssX" + } + } + - do: + catch: conflict + xpack.ml.delete_filter: + filter_id: "filter-foo" + +--- +"Test non-existing filter": + - do: + catch: missing + xpack.ml.delete_filter: + filter_id: "does_not_exist" + +--- +"Test valid delete filter": + + - do: + xpack.ml.get_filters: + filter_id: "filter-foo" + + - match: { count: 1 } + - match: + filters.0: + filter_id: "filter-foo" + items: ["abc", "xyz"] + + - do: + xpack.ml.delete_filter: + filter_id: "filter-foo" + + - do: + catch: missing + xpack.ml.get_filters: + filter_id: "filter-foo" + +--- +"Test get all filter given no filter exists": + + - do: + xpack.ml.delete_filter: + filter_id: "filter-foo" + + - do: + xpack.ml.delete_filter: + filter_id: "filter-foo2" + + - do: + xpack.ml.get_filters: {} + - match: { count: 0 } + - match: { filters: [] } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/forecast.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/forecast.yml new file mode 100644 index 0000000000000..df44751a37cd9 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/forecast.yml @@ -0,0 +1,52 @@ +setup: + - do: + xpack.ml.put_job: + job_id: forecast-job + body: > + { + "description":"A forecast job", + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format":"xcontent" + } + } + +--- +"Test forecast unknown job": + - do: + catch: missing + xpack.ml.forecast: + job_id: "non-existing-job" + +--- +"Test forecast on closed job": + - do: + catch: /status_exception/ + xpack.ml.forecast: + job_id: "forecast-job" + +--- +"Test forecast given duration is zero": + - do: + catch: /\[duration\] must be positive[:] \[0s\]/ + xpack.ml.forecast: + job_id: "forecast-job" + duration: "0s" + +--- +"Test forecast given duration is negative": + - do: + catch: /\[duration\] must be positive[:] \[-1\]/ + xpack.ml.forecast: + job_id: "forecast-job" + duration: "-1s" + +--- +"Test forecast given expires_in is negative": + - do: + catch: /\[expires_in\] must be non-negative[:] \[-1\]/ + xpack.ml.forecast: + job_id: "forecast-job" + expires_in: "-1s" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml new file mode 100644 index 0000000000000..7f3250c7db614 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeed_stats.yml @@ -0,0 +1,213 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.create: + index: index-1 + body: + settings: + index: + number_of_replicas: 1 + mappings: + type-1: + properties: + time: + type: date + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.create: + index: index-2 + body: + settings: + index: + number_of_replicas: 1 + mappings: + type-2: + properties: + time: + type: date + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: get-datafeed-stats-1 + body: > + { + "job_id":"get-datafeed-stats-1", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "10mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: get-datafeed-stats-2 + body: > + { + "job_id":"get-datafeed-stats-2", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "10mb" + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_datafeed: + datafeed_id: datafeed-1 + body: > + { + "job_id":"get-datafeed-stats-1", + "indexes":["index-1"], + "types":["type-1"] + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_datafeed: + datafeed_id: datafeed-2 + body: > + { + "job_id":"get-datafeed-stats-2", + "indexes":["index-2"], + "types":["type-2"] + } + +--- +"Test get datafeed stats given missing datafeed_id": + + - do: + catch: missing + xpack.ml.get_datafeed_stats: + datafeed_id: missing-datafeed + +--- +"Test get datafeed stats with expression that does not match and allow_no_datafeeds": + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: "missing-*" + allow_no_datafeeds: true + - match: { count: 0 } + - match: { datafeeds: [] } + +--- +"Test get datafeed stats with expression that does not match and not allow_no_datafeeds": + + - do: + catch: missing + xpack.ml.get_datafeed_stats: + datafeed_id: "missing-*" + allow_no_datafeeds: false + +--- +"Test get single datafeed stats": + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: datafeed-1 + - match: { datafeeds.0.datafeed_id: "datafeed-1"} + - match: { datafeeds.0.state: "stopped"} + - is_false: datafeeds.0.node + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: datafeed-2 + - match: { datafeeds.0.datafeed_id: "datafeed-2"} + - match: { datafeeds.0.state: "stopped"} + - is_false: datafeeds.0.node + +--- +"Test get stats for started datafeed": + + - do: + xpack.ml.open_job: + job_id: get-datafeed-stats-1 + + - do: + xpack.ml.start_datafeed: + "datafeed_id": "datafeed-1" + "start": 0 + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: datafeed-1 + - match: { datafeeds.0.datafeed_id: "datafeed-1"} + - match: { datafeeds.0.state: "started"} + - is_true: datafeeds.0.node.name + - is_true: datafeeds.0.node.transport_address + - match: { datafeeds.0.node.attributes.ml\.enabled: "true"} + +--- +"Test implicit get all datafeed stats given started datafeeds": + + - do: + xpack.ml.open_job: + job_id: get-datafeed-stats-1 + + - do: + xpack.ml.start_datafeed: + "datafeed_id": "datafeed-1" + "start": 0 + + - do: + xpack.ml.open_job: + job_id: get-datafeed-stats-2 + + - do: + xpack.ml.start_datafeed: + "datafeed_id": "datafeed-2" + "start": 0 + + - do: + xpack.ml.get_datafeed_stats: {} + - match: { count: 2 } + - match: { datafeeds.0.datafeed_id: "datafeed-1"} + - match: { datafeeds.0.state: "started"} + - match: { datafeeds.1.datafeed_id: "datafeed-2"} + - match: { datafeeds.1.state: "started"} + +--- +"Test explicit get all datafeed stats given stopped datafeeds": + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: _all + - match: { count: 2 } + - match: { datafeeds.0.datafeed_id: "datafeed-1"} + - match: { datafeeds.0.state: "stopped"} + - match: { datafeeds.1.datafeed_id: "datafeed-2"} + - match: { datafeeds.1.state: "stopped"} + +--- +"Test implicit get all datafeed stats given stopped datafeeds": + + - do: + xpack.ml.get_datafeed_stats: {} + - match: { count: 2 } + - match: { datafeeds.0.datafeed_id: "datafeed-1"} + - match: { datafeeds.0.state: "stopped"} + - match: { datafeeds.1.datafeed_id: "datafeed-2"} + - match: { datafeeds.1.state: "stopped"} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeeds.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeeds.yml new file mode 100644 index 0000000000000..6daedaa8068cc --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_datafeeds.yml @@ -0,0 +1,106 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: get-datafeed-1 + body: > + { + "job_id":"get-datafeed-1", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: get-datafeed-2 + body: > + { + "job_id":"get-datafeed-2", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_datafeed: + datafeed_id: datafeed-1 + body: > + { + "job_id":"get-datafeed-1", + "indexes":["index-1"], + "types":["type-1"] + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_datafeed: + datafeed_id: datafeed-2 + body: > + { + "job_id":"get-datafeed-2", + "indexes":["index-2"], + "types":["type-2"] + } + +--- +"Test get datafeed given missing datafeed_id": + + - do: + catch: missing + xpack.ml.get_datafeeds: + datafeed_id: missing-datafeed + +--- +"Test get single datafeed": + + - do: + xpack.ml.get_datafeeds: + datafeed_id: datafeed-1 + - match: { datafeeds.0.datafeed_id: "datafeed-1"} + - match: { datafeeds.0.job_id: "get-datafeed-1"} + + - do: + xpack.ml.get_datafeeds: + datafeed_id: datafeed-2 + - match: { datafeeds.0.datafeed_id: "datafeed-2"} + - match: { datafeeds.0.job_id: "get-datafeed-2"} + +--- +"Test explicit get all datafeeds": + + - do: + xpack.ml.get_datafeeds: + datafeed_id: _all + - match: { count: 2 } + - match: { datafeeds.0.datafeed_id: "datafeed-1"} + - match: { datafeeds.0.job_id: "get-datafeed-1"} + - match: { datafeeds.1.datafeed_id: "datafeed-2"} + - match: { datafeeds.1.job_id: "get-datafeed-2"} + +--- +"Test implicit get all datafeeds": + + - do: + xpack.ml.get_datafeeds: {} + - match: { count: 2 } + - match: { datafeeds.0.datafeed_id: "datafeed-1"} + - match: { datafeeds.0.job_id: "get-datafeed-1"} + - match: { datafeeds.1.datafeed_id: "datafeed-2"} + - match: { datafeeds.1.job_id: "get-datafeed-2"} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml new file mode 100644 index 0000000000000..57cc80ae2fb73 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/get_model_snapshots.yml @@ -0,0 +1,168 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: get-model-snapshots + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-get-model-snapshots + type: doc + id: "get-model-snapshots-1" + body: > + { + "job_id": "get-model-snapshots", + "snapshot_id": "1", + "timestamp": "2016-06-02T00:00:00Z", + "snapshot_doc_count": 1 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: "get-model-snapshots_model_state_1#1" + body: > + { + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-get-model-snapshots + type: doc + id: "get-model-snapshots-2" + body: > + { + "job_id": "get-model-snapshots", + "snapshot_id": "2", + "timestamp": "2016-06-01T00:00:00Z", + "snapshot_doc_count": 2 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: "get-model-snapshots_model_state_2#1" + body: > + { + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: "get-model-snapshots_model_state_2#2" + body: > + { + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: [.ml-anomalies-get-model-snapshots,.ml-state] + +--- +"Test get model snapshots API with no params": + - do: + xpack.ml.get_model_snapshots: + job_id: "get-model-snapshots" + + - match: { count: 2 } + - match: { model_snapshots.0.timestamp: 1464825600000 } + - match: { model_snapshots.1.timestamp: 1464739200000 } + +--- +"Test get model snapshots API with _all": + - do: + xpack.ml.get_model_snapshots: + job_id: "get-model-snapshots" + snapshot_id: "_all" + + - match: { count: 2 } + - match: { model_snapshots.0.timestamp: 1464825600000 } + - match: { model_snapshots.1.timestamp: 1464739200000 } + +--- +"Test get specific model snapshot": + - do: + xpack.ml.get_model_snapshots: + job_id: "get-model-snapshots" + snapshot_id: "2" + + - match: { count: 1 } + - match: { model_snapshots.0.timestamp: 1464739200000 } + +--- +"Test get model snapshots API with start/end": + - do: + xpack.ml.get_model_snapshots: + job_id: "get-model-snapshots" + start: "2016-05-01T00:00:00Z" + end: "2016-07-01T00:00:00Z" + + - match: { count: 2 } + - match: { model_snapshots.0.timestamp: 1464825600000 } + - match: { model_snapshots.1.timestamp: 1464739200000 } + +--- +"Test get model snapshots API with ascending": + - do: + xpack.ml.get_model_snapshots: + job_id: "get-model-snapshots" + desc: false + + - match: { count: 2 } + - match: { model_snapshots.0.timestamp: 1464739200000 } + - match: { model_snapshots.1.timestamp: 1464825600000 } + +--- +"Test get model snapshots API with size": + - do: + xpack.ml.get_model_snapshots: + job_id: "get-model-snapshots" + size: 1 + + - match: { count: 2 } + - match: { model_snapshots.0.timestamp: 1464825600000 } + - length: { model_snapshots: 1 } + +--- +"Test get model snapshots API with from": + - do: + xpack.ml.get_model_snapshots: + job_id: "get-model-snapshots" + from: 1 + + - match: { count: 2 } + - match: { model_snapshots.0.timestamp: 1464739200000 } + - length: { model_snapshots: 1 } + +--- +"Test with unknown job id": + - do: + catch: missing + xpack.ml.get_model_snapshots: + job_id: "non-existent-job" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml new file mode 100644 index 0000000000000..c13ae86e06f50 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/index_layout.yml @@ -0,0 +1,700 @@ +--- +"Test CRUD on two jobs in shared index": + + - do: + xpack.ml.put_job: + job_id: index-layout-job + body: > + { + "job_id":"index-layout-job", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + - match: { job_id: "index-layout-job" } + + + - do: + xpack.ml.put_job: + job_id: index-layout-job2 + body: > + { + "job_id":"index-layout-job2", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + - match: { job_id: "index-layout-job2" } + + - do: + xpack.ml.open_job: + job_id: index-layout-job + + - do: + xpack.ml.open_job: + job_id: index-layout-job2 + + - do: + xpack.ml.post_data: + job_id: index-layout-job + body: > + {"airline":"AAL","responsetime":"132.2046","sourcetype":"farequote","time":"1403481600"} + {"airline":"JZA","responsetime":"990.4628","sourcetype":"farequote","time":"1403481700"} + + - do: + xpack.ml.post_data: + job_id: index-layout-job2 + body: > + {"airline":"AAL","responsetime":"132.2046","sourcetype":"farequote","time":"1403481600"} + {"airline":"JZA","responsetime":"990.4628","sourcetype":"farequote","time":"1403481700"} + + + - do: + xpack.ml.flush_job: + job_id: index-layout-job + - match: { flushed: true } + + - do: + xpack.ml.flush_job: + job_id: index-layout-job2 + - match: { flushed: true } + + + - do: + xpack.ml.close_job: + job_id: index-layout-job + - match: { closed: true } + + - do: + xpack.ml.close_job: + job_id: index-layout-job2 + - match: { closed: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.exists: + index: ".ml-state" + - is_true: '' + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.exists: + index: ".ml-anomalies-index-layout-job" + - is_true: '' + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: {} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-state + - gt: {count: 0} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-shared + - match: {count: 6} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-index-layout-job + body: + query: + constant_score: + filter: + term: + job_id: index-layout-job + + - match: {count: 3} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-shared + body: + query: + constant_score: + filter: + term: + job_id: index-layout-job + + - match: {count: 3} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-index-layout-job2 + body: + query: + constant_score: + filter: + term: + job_id: index-layout-job2 + + - match: {count: 3} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-shared + body: + query: + constant_score: + filter: + term: + job_id: index-layout-job2 + - match: {count: 3} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: index-layout-job2_categorizer_state#1 + body: + key: value + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: index-layout-job2_categorizer_state#2 + body: + key: value + + - do: + xpack.ml.delete_job: + job_id: "index-layout-job" + - match: { acknowledged: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.exists: + index: ".ml-anomalies-shared" + - is_true: '' + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-state + - match: {count: 4} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-shared + body: + query: + constant_score: + filter: + term: + job_id: index-layout-job + - match: {count: 0} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-shared + - match: {count: 3} + + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-index-layout-job2 + body: + query: + constant_score: + filter: + term: + job_id: index-layout-job2 + + - match: {count: 3} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-shared + body: + query: + constant_score: + filter: + term: + job_id: index-layout-job2 + + - match: {count: 3} + + - do: + xpack.ml.delete_job: + job_id: "index-layout-job2" + - match: { acknowledged: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.exists: + index: ".ml-anomalies-shared" + - is_true: '' + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.exists: + index: ".ml-anomalies-index-layout-job" + - is_false: '' + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.exists: + index: ".ml-anomalies-index-layout-job2" + - is_false: '' + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + type: doc + index: .ml-state + - match: {count: 0} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + type: doc + index: .ml-state + - match: {count: 0} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + type: doc + index: .ml-state + - match: {count: 0} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-state + - match: {count: 0} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-shared + - match: {count: 0} + + +--- +"Test unrelated index": + + - do: + xpack.ml.put_job: + job_id: index-layout-job + body: > + { + "job_id":"index-layout-job", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + - match: { job_id: "index-layout-job" } + + - do: + xpack.ml.open_job: + job_id: index-layout-job + + - do: + xpack.ml.post_data: + job_id: index-layout-job + body: > + {"airline":"AAL","responsetime":"132.2046","sourcetype":"farequote","time":"1403481600"} + {"airline":"JZA","responsetime":"990.4628","sourcetype":"farequote","time":"1403481700"} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.create: + index: foo + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.create: + index: .ml-anomalies-foo + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: foo + type: doc + body: + key: value + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-foo + type: doc + body: + key: value + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-foo + type: doc + body: + key: value + job_id: foo + + - do: + xpack.ml.flush_job: + job_id: index-layout-job + - match: { flushed: true } + + - do: + xpack.ml.close_job: + job_id: index-layout-job + - match: { closed: true } + + - do: + xpack.ml.delete_job: + job_id: "index-layout-job" + - match: { acknowledged: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.exists: + index: ".ml-anomalies-shared" + - is_true: '' + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.exists: + index: ".ml-anomalies-foo" + - is_true: '' + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.exists: + index: ".ml-state" + - is_true: '' + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.exists: + index: "foo" + - is_true: '' + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-state + - match: {count: 0} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-shared + - match: {count: 0} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: {} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: foo + - match: {count: 1} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-foo + - match: {count: 2} + +--- +"Test delete removes quantiles": + + - do: + xpack.ml.put_job: + job_id: index-layout-quantiles-job + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : { + "format":"xcontent" + } + } + - match: { job_id: "index-layout-quantiles-job" } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: index-layout-quantiles-job_quantiles + body: + state: quantile-state + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: {} + + - do: + xpack.ml.delete_job: + job_id: "index-layout-quantiles-job" + - match: { acknowledged: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-state + - match: {count: 0} + +--- +"Test delete removes state": + + - do: + xpack.ml.put_job: + job_id: index-layout-state-job + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"mlcategory"}], + "categorization_field_name": "message" + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : { + "format":"xcontent" + } + } + - match: { job_id: "index-layout-state-job" } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: "index-layout-state-job_model_snapshot_123" + body: > + { + "job_id" : "index-layout-state-job", + "timestamp": "2017-05-02T00:00:00Z", + "snapshot_id": "123", + "snapshot_doc_count": 2, + "retain": false + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: index-layout-state-job_model_state_123#1 + body: + state: new-model-state + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: index-layout-state-job_model_state_123#2 + body: + state: more-new-model-state + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: index-layout-state-job_categorizer_state#1 + body: + state: new-categorizer-state + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: index-layout-state-job_categorizer_state#2 + body: + state: more-new-categorizer-state + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: {} + + - do: + xpack.ml.delete_job: + job_id: "index-layout-state-job" + - match: { acknowledged: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-anomalies-shared + - match: {count: 0} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-state + - match: {count: 0} + +--- +"Test force close does not create state": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.create: + index: .ml-state + + - do: + xpack.ml.put_job: + job_id: index-layout-force-close-job + body: > + { + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : { + "time_field":"time", + "time_format":"epoch" + } + } + - match: { job_id: "index-layout-force-close-job" } + + - do: + xpack.ml.open_job: + job_id: index-layout-force-close-job + + - do: + xpack.ml.post_data: + job_id: index-layout-force-close-job + body: > + {"airline":"AAL","responsetime":"132.2046","sourcetype":"farequote","time":"1403481600"} + {"airline":"JZA","responsetime":"990.4628","sourcetype":"farequote","time":"1403481700"} + + - do: + xpack.ml.close_job: + job_id: index-layout-force-close-job + force: true + - match: { closed: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.exists: + index: ".ml-state" + - is_true: '' + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: {} + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + count: + index: .ml-state + - match: {count: 0} + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/job_groups.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/job_groups.yml new file mode 100644 index 0000000000000..d1e2851e17690 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/job_groups.yml @@ -0,0 +1,270 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: test-job-groups-foo-1 + body: > + { + "groups": ["foo-group", "ones"], + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "10mb" + }, + "data_description" : {} + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: test-job-groups-foo-2 + body: > + { + "groups": ["foo-group", "twos"], + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "10mb" + }, + "data_description" : {} + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: test-job-groups-bar-1 + body: > + { + "groups": ["bar-group", "ones"], + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "10mb" + }, + "data_description" : {} + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: test-job-groups-bar-2 + body: > + { + "groups": ["bar-group", "twos"], + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "10mb" + }, + "data_description" : {} + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: test-job-groups-nogroup + body: > + { + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "10mb" + }, + "data_description" : {} + } + +--- +"Test put job with id that matches an existing group": + + - do: + catch: /resource_already_exists_exception/ + xpack.ml.put_job: + job_id: foo-group + body: > + { + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "data_description" : {} + } + +--- +"Test put job with group that matches an job id": + + - do: + catch: /resource_already_exists_exception/ + xpack.ml.put_job: + job_id: test-job-groups-job-with-group-matching-existing-job-id + body: > + { + "groups": ["test-job-groups-nogroup"], + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "data_description" : {} + } + +--- +"Test put job with group that matches its id": + + - do: + catch: /resource_already_exists_exception/ + xpack.ml.put_job: + job_id: test-job-groups-job-with-group-matching-its-id + body: > + { + "groups": ["test-job-groups-job-with-group-matching-its-id"], + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "data_description" : {} + } + +--- +"Test put job with empty group": + + - do: + catch: /Invalid group id ''; must be non-empty string and may contain lowercase alphanumeric \(a-z and 0-9\), hyphens or underscores; must start and end with alphanumeric/ + xpack.ml.put_job: + job_id: test-job-groups-job-with-empty-group + body: > + { + "groups": ["foo-group", ""], + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "data_description" : {} + } + +--- +"Test put job with invalid group": + + - do: + catch: /Invalid group id '___'; must be non-empty string and may contain lowercase alphanumeric \(a-z and 0-9\), hyphens or underscores; must start and end with alphanumeric/ + xpack.ml.put_job: + job_id: test-job-groups-job-with-invalid-group + body: > + { + "groups": ["foo", "___"], + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "data_description" : {} + } + +--- +"Test get job API": + + - do: + xpack.ml.get_jobs: + job_id: "_all" + - match: { count: 5 } + - match: { jobs.0.job_id: "test-job-groups-bar-1"} + - match: { jobs.1.job_id: "test-job-groups-bar-2"} + - match: { jobs.2.job_id: "test-job-groups-foo-1"} + - match: { jobs.3.job_id: "test-job-groups-foo-2"} + - match: { jobs.4.job_id: "test-job-groups-nogroup"} + + - do: + xpack.ml.get_jobs: + job_id: "test-job-groups-bar-1" + - match: { count: 1 } + - match: { jobs.0.job_id: "test-job-groups-bar-1"} + + - do: + xpack.ml.get_jobs: + job_id: "foo-group" + - match: { count: 2 } + - match: { jobs.0.job_id: "test-job-groups-foo-1"} + - match: { jobs.1.job_id: "test-job-groups-foo-2"} + + - do: + xpack.ml.get_jobs: + job_id: "bar-group" + - match: { count: 2 } + - match: { jobs.0.job_id: "test-job-groups-bar-1"} + - match: { jobs.1.job_id: "test-job-groups-bar-2"} + + - do: + xpack.ml.get_jobs: + job_id: "ones" + - match: { count: 2 } + - match: { jobs.0.job_id: "test-job-groups-bar-1"} + - match: { jobs.1.job_id: "test-job-groups-foo-1"} + + - do: + xpack.ml.get_jobs: + job_id: "twos" + - match: { count: 2 } + - match: { jobs.0.job_id: "test-job-groups-bar-2"} + - match: { jobs.1.job_id: "test-job-groups-foo-2"} + + - do: + xpack.ml.get_jobs: + job_id: "*-group" + - match: { count: 4 } + - match: { jobs.0.job_id: "test-job-groups-bar-1"} + - match: { jobs.1.job_id: "test-job-groups-bar-2"} + - match: { jobs.2.job_id: "test-job-groups-foo-1"} + - match: { jobs.3.job_id: "test-job-groups-foo-2"} + + - do: + xpack.ml.get_jobs: + job_id: "bar-group,test-job-groups-nogroup" + - match: { count: 3 } + - match: { jobs.0.job_id: "test-job-groups-bar-1"} + - match: { jobs.1.job_id: "test-job-groups-bar-2"} + - match: { jobs.2.job_id: "test-job-groups-nogroup"} + +--- +"Test get job stats API": + + - do: + xpack.ml.get_job_stats: + job_id: "foo-group" + - match: { count: 2 } + - match: { jobs.0.job_id: "test-job-groups-foo-1"} + - match: { jobs.1.job_id: "test-job-groups-foo-2"} + +--- +"Test close job API": + + - do: + xpack.ml.open_job: + job_id: "test-job-groups-foo-1" + + - do: + xpack.ml.open_job: + job_id: "test-job-groups-bar-1" + + - do: + xpack.ml.get_job_stats: + job_id: "ones" + - match: { count: 2 } + - match: { jobs.0.job_id: "test-job-groups-bar-1"} + - match: { jobs.0.state: opened} + - match: { jobs.1.job_id: "test-job-groups-foo-1"} + - match: { jobs.1.state: opened} + + - do: + xpack.ml.close_job: + job_id: "ones" + + - do: + xpack.ml.get_job_stats: + job_id: "ones" + - match: { count: 2 } + - match: { jobs.0.job_id: "test-job-groups-bar-1"} + - match: { jobs.0.state: closed} + - match: { jobs.1.job_id: "test-job-groups-foo-1"} + - match: { jobs.1.state: closed} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml new file mode 100644 index 0000000000000..9ed14c2f860ef --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml @@ -0,0 +1,1400 @@ +--- +"Test get all jobs and stats given no job exists": + + - do: + xpack.ml.get_jobs: + job_id: "_all" + - match: { count: 0 } + - match: { jobs: [] } + + - do: + xpack.ml.get_job_stats: + job_id: "_all" + - match: { count: 0 } + - match: { jobs: [] } + +--- +"Test get jobs with expression that does not match and allow_no_jobs": + + - do: + xpack.ml.get_jobs: + job_id: "missing-*" + allow_no_jobs: true + - match: { count: 0 } + - match: { jobs: [] } + +--- +"Test get jobs with expression that does not match and not allow_no_jobs": + + - do: + catch: missing + xpack.ml.get_jobs: + job_id: "missing-*" + allow_no_jobs: false + +--- +"Test job crud apis": + + - do: + xpack.ml.put_job: + job_id: job-crud-test-apis + body: > + { + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "field_delimiter":",", + "time_field":"time", + "time_format":"yyyy-MM-dd HH:mm:ssX" + } + } + - match: { job_id: "job-crud-test-apis" } + - match: { analysis_limits.model_memory_limit: "1024mb" } + - match: { analysis_limits.categorization_examples_limit: 4 } + + - do: + xpack.ml.get_jobs: + job_id: "job-crud-test-apis" + - match: { count: 1 } + - match: { jobs.0.job_id: "job-crud-test-apis" } + - match: { jobs.0.analysis_limits.model_memory_limit: "1024mb" } + + - do: + indices.get_alias: + name: ".ml-anomalies-job-crud-test-apis" + - match: { \.ml-anomalies-shared.aliases.\.ml-anomalies-job-crud-test-apis.filter.term.job_id.value: job-crud-test-apis } + + - do: + indices.get_alias: + name: ".ml-anomalies-.write-job-crud-test-apis" + - match: { \.ml-anomalies-shared.aliases.\.ml-anomalies-\.write-job-crud-test-apis: {} } + + - do: + xpack.ml.delete_job: + job_id: "job-crud-test-apis" + - match: { acknowledged: true } + + - do: + indices.exists: + index: ".ml-anomalies-job-crud-test-apis" + - is_false: '' + + - do: + indices.exists_alias: + name: ".ml-anomalies-job-crud-test-apis" + - is_false: '' + + - do: + indices.exists_alias: + name: ".ml-anomalies-.write-job-crud-test-apis" + - is_false: '' + +--- +"Test put job with model_memory_limit as number": + + - do: + xpack.ml.put_job: + job_id: job-model-memory-limit-as-number + body: > + { + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "data_description" : { + }, + "analysis_limits": { + "model_memory_limit": 2048 + } + } + - match: { job_id: "job-model-memory-limit-as-number" } + - match: { analysis_limits.model_memory_limit: "2048mb" } + +--- +"Test put job with model_memory_limit as string": + + - do: + xpack.ml.put_job: + job_id: job-model-memory-limit-as-string + body: > + { + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "data_description" : { + }, + "analysis_limits": { + "model_memory_limit": "3g" + } + } + - match: { job_id: "job-model-memory-limit-as-string" } + - match: { analysis_limits.model_memory_limit: "3072mb" } + +--- +"Test get job API with non existing job id": + - do: + catch: missing + xpack.ml.get_jobs: + job_id: "non-existing" + +--- +"Test put job with inconsistent body/param ids": + - do: + catch: /illegal_argument_exception/ + xpack.ml.put_job: + job_id: an_id + body: > + { + "job_id":"a_different_id", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "field_delimiter":",", + "time_field":"time", + "time_format":"yyyy-MM-dd HH:mm:ssX" + } + } + + - do: + catch: /Inconsistent job_id; 'a_different_id' specified in the body differs from 'an_id' specified as a URL argument/ + xpack.ml.put_job: + job_id: an_id + body: > + { + "job_id":"a_different_id", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "3600s", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "field_delimiter":",", + "time_field":"time", + "time_format":"yyyy-MM-dd HH:mm:ssX" + } + } + +--- +"Test put job with id that is already taken": + - do: + xpack.ml.put_job: + job_id: jobs-crud-id-already-taken + body: > + { + "job_id":"jobs-crud-id-already-taken", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "3600000ms", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "field_delimiter":",", + "time_field":"time", + "time_format":"yyyy-MM-dd HH:mm:ssX" + } + } + - match: { job_id: "jobs-crud-id-already-taken" } + + - do: + catch: /resource_already_exists_exception/ + xpack.ml.put_job: + job_id: jobs-crud-id-already-taken + body: > + { + "job_id":"jobs-crud-id-already-taken", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "field_delimiter":",", + "time_field":"time", + "time_format":"yyyy-MM-dd HH:mm:ssX" + } + } + - do: + catch: /The job cannot be created with the Id 'jobs-crud-id-already-taken'. The Id is already used./ + xpack.ml.put_job: + job_id: jobs-crud-id-already-taken + body: > + { + "job_id":"jobs-crud-id-already-taken", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "field_delimiter":",", + "time_field":"time", + "time_format":"yyyy-MM-dd HH:mm:ssX" + } + } + - do: + catch: param + xpack.ml.put_job: + job_id: jobs-crud-id-already-taken + body: > + { + "job_id":"jobs-crud-id-already-taken", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "field_delimiter":",", + "time_field":"time", + "time_format":"yyyy-MM-dd HH:mm:ssX" + } + } + +--- +"Test update job": + - do: + xpack.ml.put_job: + job_id: jobs-crud-update-job + body: > + { + "description":"Pre update description", + "analysis_config" : { + "detectors" :[{"function":"mean","field_name":"responsetime","by_field_name":"airline"}, + {"function":"count","by_field_name":"mlcategory"}], + "categorization_field_name": "some_category", + "categorization_filters" : ["cat1.*", "cat2.*"] + }, + "data_description" : { + "field_delimiter":"," + }, + "model_plot_config": { + "enabled": true, + "terms": "foo,bar" + }, + "analysis_limits": { + "model_memory_limit": "10mb" + }, + "renormalization_window_days": 1, + "background_persist_interval": "2h", + "model_snapshot_retention_days": 3, + "results_retention_days": 4, + "custom_settings": { + "setting1": "custom1", + "setting2": "custom2" + } + } + - match: { job_id: "jobs-crud-update-job" } + + - do: + xpack.ml.open_job: + job_id: jobs-crud-update-job + + - do: + xpack.ml.update_job: + job_id: jobs-crud-update-job + body: > + { + "groups": ["group-1", "group-2"], + "description":"Post update description", + "detectors": [{"detector_index": 0, "rules": {"target_field_name": "airline", + "conditions": [ { "type": "numerical_actual", + "condition": {"operator": "gt", "value": "10" } } ] } }, + {"detector_index": 1, "description": "updated description"}], + "model_plot_config": { + "enabled": false, + "terms": "foobar" + }, + "renormalization_window_days": 10, + "background_persist_interval": "3h", + "model_snapshot_retention_days": 30, + "results_retention_days": 40, + "categorization_filters" : ["cat3.*"], + "custom_settings": { + "setting3": "custom3" + } + } + - match: { job_id: "jobs-crud-update-job" } + - match: { groups: ["group-1", "group-2"] } + - match: { description: "Post update description" } + - match: { model_plot_config.enabled: false } + - match: { model_plot_config.terms: "foobar" } + - match: { analysis_config.categorization_filters: ["cat3.*"] } + - match: { analysis_config.detectors.0.rules.0.target_field_name: "airline" } + - match: { analysis_config.detectors.0.detector_index: 0 } + - match: { analysis_config.detectors.1.detector_description: "updated description" } + - match: { analysis_config.detectors.1.detector_index: 1 } + - match: { renormalization_window_days: 10 } + - match: { background_persist_interval: "3h" } + - match: { model_snapshot_retention_days: 30 } + - match: { results_retention_days: 40 } + + - do: + catch: "/Cannot update analysis_limits while the job is open/" + xpack.ml.update_job: + job_id: jobs-crud-update-job + body: > + { + "analysis_limits": { + "model_memory_limit": "20mb" + } + } + + - do: + xpack.ml.close_job: + job_id: jobs-crud-update-job + - match: { closed: true } + + - do: + xpack.ml.update_job: + job_id: jobs-crud-update-job + body: > + { + "analysis_limits": { + "model_memory_limit": "20mb" + } + } + - match: { analysis_limits.model_memory_limit: "20mb" } + + - do: + xpack.ml.update_job: + job_id: jobs-crud-update-job + body: > + { + "analysis_limits": { + "model_memory_limit": "15mb" + } + } + - match: { analysis_limits.model_memory_limit: "15mb" } + + - do: + catch: bad_request + xpack.ml.update_job: + job_id: _all + body: > + { + "description":"Can't update all description" + } + +--- +"Test cannot decrease model_memory_limit below current usage": + - do: + xpack.ml.put_job: + job_id: jobs-crud-model-memory-limit-decrease + body: > + { + "job_id":"jobs-crud-model-memory-limit-decrease", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "analysis_limits": { + "model_memory_limit": "50mb" + }, + "data_description" : { + "time_field":"time" + } + } + - match: { job_id: "jobs-crud-model-memory-limit-decrease" } + + - do: + index: + index: .ml-anomalies-shared + type: doc + id: jobs-crud-model-memory-limit-decrease_model_size_stats_1517443200000 + body: + job_id: jobs-crud-model-memory-limit-decrease + result_type: model_size_stats + log_time: 1517443200000 + timestamp: 1517443200000 + model_bytes: 10000000 + + - do: + indices.refresh: {} + + - do: + catch: /Invalid update value for analysis_limits[:] model_memory_limit cannot be decreased below current usage; current usage \[9mb\], update had \[5mb\]/ + xpack.ml.update_job: + job_id: jobs-crud-model-memory-limit-decrease + body: > + { + "analysis_limits": { + "model_memory_limit": "5mb" + } + } + + # Decreasing over current usage works + - do: + xpack.ml.update_job: + job_id: jobs-crud-model-memory-limit-decrease + body: > + { + "analysis_limits": { + "model_memory_limit": "30mb" + } + } + - match: { analysis_limits.model_memory_limit: "30mb" } + +--- +"Test delete job that is referred by a datafeed": + - do: + xpack.ml.put_job: + job_id: jobs-crud-datafeed-job + body: > + { + "job_id":"jobs-crud-datafeed-job", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"yyyy-MM-dd HH:mm:ssX" + } + } + - match: { job_id: "jobs-crud-datafeed-job" } + + - do: + xpack.ml.put_datafeed: + datafeed_id: jobs-crud-test-datafeed-1 + body: > + { + "job_id":"jobs-crud-datafeed-job", + "indexes":["index-foo"], + "types":["type-bar"] + } + - match: { datafeed_id: "jobs-crud-test-datafeed-1" } + + - do: + catch: /Cannot delete job \[jobs-crud-datafeed-job\] because datafeed \[jobs-crud-test-datafeed-1\] refers to it/ + xpack.ml.delete_job: + job_id: jobs-crud-datafeed-job + +--- +"Test delete job that is opened": + - do: + xpack.ml.put_job: + job_id: delete-opened-job + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : { + "time_field":"time" + } + } + - match: { job_id: "delete-opened-job" } + + - do: + xpack.ml.open_job: + job_id: delete-opened-job + - match: { opened: true } + + - do: + catch: /Cannot delete job \[delete-opened-job\] because the job is opened/ + xpack.ml.delete_job: + job_id: delete-opened-job + +--- +"Test close job": + + - do: + xpack.ml.put_job: + job_id: jobs-crud-close-job + body: > + { + "job_id":"jobs-crud-close-job", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span":"1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + - match: { job_id: "jobs-crud-close-job" } + + - do: + xpack.ml.open_job: + job_id: jobs-crud-close-job + + - do: + xpack.ml.post_data: + job_id: jobs-crud-close-job + body: > + {"airline":"AAL","responsetime":"132.2046","time":"1403481600"} + {"airline":"JZA","responsetime":"990.4628","time":"1403481700"} + + + - do: + xpack.ml.flush_job: + job_id: jobs-crud-close-job + - match: { flushed: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + cluster.state: + metric: [ metadata ] + filter_path: metadata.persistent_tasks + - match: {"metadata.persistent_tasks.tasks.0.task.xpack/ml/job.status.state": opened} + + - do: + xpack.ml.close_job: + job_id: jobs-crud-close-job + - match: { closed: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + cluster.state: + metric: [ metadata ] + filter_path: metadata.persistent_tasks + - match: + metadata.persistent_tasks.tasks: [] + +--- +"Test closing a closed job isn't an error": + - do: + xpack.ml.put_job: + job_id: jobs-crud-close-a-closed-job + body: > + { + "description":"Analysis of response time by airline", + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : { + "format":"xcontent" + } + } + - match: { job_id: "jobs-crud-close-a-closed-job" } + + - do: + xpack.ml.open_job: + job_id: jobs-crud-close-a-closed-job + + - do: + xpack.ml.close_job: + job_id: jobs-crud-close-a-closed-job + - match: { closed: true } + + - do: + xpack.ml.close_job: + job_id: jobs-crud-close-a-closed-job + - match: { closed: true } + +--- +"Test close all jobs": + - do: + xpack.ml.put_job: + job_id: jobs-crud-close-all-1 + body: > + { + "description":"Analysis of response time by airline", + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : { + "format":"xcontent" + } + } + - match: { job_id: "jobs-crud-close-all-1" } + + - do: + xpack.ml.put_job: + job_id: jobs-crud-close-all-2 + body: > + { + "description":"Analysis of response time by airline", + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : { + "format":"xcontent" + } + } + - match: { job_id: "jobs-crud-close-all-2" } + + - do: + xpack.ml.open_job: + job_id: jobs-crud-close-all-2 + + - do: + xpack.ml.close_job: + job_id: _all + - match: { closed: true } + + - do: + xpack.ml.get_job_stats: + job_id: _all + - match: { jobs.0.state: closed } + - match: { jobs.1.state: closed } + +--- +"Test close jobs with expression that matches": + - do: + xpack.ml.put_job: + job_id: jobs-crud-with-expression-that-matches-foo-1 + body: > + { + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : {} + } + + - do: + xpack.ml.put_job: + job_id: jobs-crud-with-expression-that-matches-foo-2 + body: > + { + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : {} + } + + - do: + xpack.ml.put_job: + job_id: jobs-crud-with-expression-that-matches-bar-1 + body: > + { + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : {} + } + + - do: + xpack.ml.open_job: + job_id: jobs-crud-with-expression-that-matches-foo-1 + + - do: + xpack.ml.open_job: + job_id: jobs-crud-with-expression-that-matches-foo-2 + + - do: + xpack.ml.open_job: + job_id: jobs-crud-with-expression-that-matches-bar-1 + + - do: + xpack.ml.close_job: + job_id: "*foo-*" + - match: { closed: true } + + - do: + xpack.ml.get_job_stats: + job_id: "*foo-*" + - match: { jobs.0.state: closed } + - match: { jobs.1.state: closed } + + - do: + xpack.ml.get_job_stats: + job_id: "*bar-1" + - match: { jobs.0.state: opened } + +--- +"Test close jobs with expression that does not match and allow_no_jobs": + + - do: + xpack.ml.close_job: + job_id: "missing-*" + allow_no_jobs: true + - match: { closed: true } + +--- +"Test close jobs with expression that does not match and not allow_no_jobs": + + - do: + catch: missing + xpack.ml.close_job: + job_id: "missing-*" + allow_no_jobs: false + +--- +"Test force close job": + + - do: + xpack.ml.put_job: + job_id: jobs-crud-force-close-job + body: > + { + "job_id":"jobs-crud-force-close-job", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span":"1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + - match: { job_id: "jobs-crud-force-close-job" } + + - do: + xpack.ml.open_job: + job_id: jobs-crud-force-close-job + + - do: + xpack.ml.post_data: + job_id: jobs-crud-force-close-job + body: > + {"airline":"AAL","responsetime":"132.2046","time":"1403481600"} + {"airline":"JZA","responsetime":"990.4628","time":"1403481700"} + + + - do: + xpack.ml.flush_job: + job_id: jobs-crud-force-close-job + - match: { flushed: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + cluster.state: + metric: [ metadata ] + filter_path: metadata.persistent_tasks + - match: {"metadata.persistent_tasks.tasks.0.task.xpack/ml/job.status.state": opened} + + - do: + xpack.ml.close_job: + job_id: jobs-crud-force-close-job + force: true + - match: { closed: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + cluster.state: + metric: [ metadata ] + filter_path: metadata.persistent_tasks + - match: + metadata.persistent_tasks.tasks: [] + +--- +"Test force closing a closed job isn't an error": + - do: + xpack.ml.put_job: + job_id: jobs-crud-close-a-closed-job + body: > + { + "description":"Analysis of response time by airline", + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "20mb" + }, + "data_description" : { + "format":"xcontent" + } + } + - match: { job_id: "jobs-crud-close-a-closed-job" } + + - do: + xpack.ml.open_job: + job_id: jobs-crud-close-a-closed-job + + - do: + xpack.ml.close_job: + job_id: jobs-crud-close-a-closed-job + force: true + - match: { closed: true } + + - do: + xpack.ml.close_job: + job_id: jobs-crud-close-a-closed-job + force: true + - match: { closed: true } + +--- +"Test open and close an unknown job is resource not found": + - do: + catch: missing + xpack.ml.open_job: + job_id: jobs-crud-some-missing-job-i-made-up + + - do: + catch: missing + xpack.ml.close_job: + job_id: jobs-crud-some-missing-job-i-made-up + +--- +"Test cannot create job with existing categorizer state document": + + - do: + index: + index: .ml-state + type: doc + id: jobs-crud-existing-docs_categorizer_state#1 + body: + key: value + + - do: + indices.refresh: {} + + - do: + catch: /status_exception/ + xpack.ml.put_job: + job_id: jobs-crud-existing-docs + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + +--- +"Test cannot create job with existing quantiles document": + + - do: + index: + index: .ml-state + type: doc + id: jobs-crud-existing-docs_quantiles + body: + key: value + + - do: + indices.refresh: {} + + - do: + catch: /status_exception/ + xpack.ml.put_job: + job_id: jobs-crud-existing-docs + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + + +--- +"Test cannot create job with existing result document": + + - do: + index: + index: .ml-anomalies-shared + type: doc + id: "jobs-crud-existing-result-docs_1464739200000_1" + body: + { + "job_id": "jobs-crud-existing-result-docs", + "result_type": "bucket", + "timestamp": "2016-06-01T00:00:00Z", + "anomaly_score": 90.0, + "bucket_span":1 + } + + - do: + indices.refresh: {} + + - do: + catch: /status_exception/ + xpack.ml.put_job: + job_id: jobs-crud-existing-result-docs + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + +--- +"Test cannot create job with model snapshot id set": + + - do: + catch: /illegal_argument_exception/ + xpack.ml.put_job: + job_id: has-model-snapshot-id + body: > + { + "model_snapshot_id": "wont-create-with-this-setting", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + +--- +"Test put job with time field in analysis_config": + + - do: + catch: /illegal_argument_exception/ + xpack.ml.put_job: + job_id: jobs-crud-time-field-in-analysis_config + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"rare","by_field_name":"@timestamp"}] + }, + "data_description" : { + "time_field":"@timestamp" + } + } + + - do: + catch: /data_description.time_field may not be used in the analysis_config/ + xpack.ml.put_job: + job_id: jobs-crud-time-field-in-analysis_config + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"mean","field_name":"time"}] + }, + "data_description" : {} + } + +--- +"Test put job after closing results index": + + - do: + indices.create: + index: ".ml-anomalies-shared" + + - do: + indices.close: + index: ".ml-anomalies-shared" + + - do: + catch: /Cannot create job \[closed-results-job\] as it requires closed index \[\.ml-anomalies-shared\]/ + xpack.ml.put_job: + job_id: closed-results-job + body: > + { + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "time_field":"time" + } + } + +--- +"Test put job after closing state index": + + - do: + indices.create: + index: ".ml-state" + + - do: + indices.close: + index: ".ml-state" + + - do: + catch: /Cannot create job \[closed-results-job\] as it requires closed index \[\.ml-state\]/ + xpack.ml.put_job: + job_id: closed-results-job + body: > + { + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "time_field":"time" + } + } + +--- +"Test max model memory limit": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + cluster.put_settings: + body: + transient: + xpack.ml.max_model_memory_limit: "9g" + - match: {transient.xpack.ml.max_model_memory_limit: "9g"} + + - do: + xpack.ml.put_job: + job_id: job-model-memory-limit-below-global-max + body: > + { + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "data_description" : { + }, + "analysis_limits": { + "model_memory_limit": "8g" + } + } + - match: { job_id: "job-model-memory-limit-below-global-max" } + - match: { analysis_limits.model_memory_limit: "8192mb" } + + - do: + catch: /model_memory_limit \[10gb\] must be less than the value of the xpack.ml.max_model_memory_limit setting \[9gb\]/ + xpack.ml.put_job: + job_id: job-model-memory-limit-above-global-max + body: > + { + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "data_description" : { + }, + "analysis_limits": { + "model_memory_limit": "10g" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + cluster.put_settings: + body: + transient: + xpack.ml.max_model_memory_limit: null + - match: {transient: {}} + + - do: + xpack.ml.put_job: + job_id: job-model-memory-limit-above-removed-global-max + body: > + { + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "data_description" : { + }, + "analysis_limits": { + "model_memory_limit": "10g" + } + } + - match: { job_id: "job-model-memory-limit-above-removed-global-max" } + - match: { analysis_limits.model_memory_limit: "10240mb" } + +--- +"Test create job with delimited format": + + - skip: + features: "warnings" + reason: deprecation logging for delimited format was introduced in 5.4.0 + + - do: + warnings: + - Creating jobs with delimited data format is deprecated. Please use xcontent instead. + xpack.ml.put_job: + job_id: delimited-format-job + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format":"delimited", + "field_delimiter":",", + "time_field":"time", + "time_format":"yyyy-MM-dd HH:mm:ssX" + } + } + - match: { job_id: "delimited-format-job" } + +--- +"Test job with named categorization_analyzer": + - do: + xpack.ml.put_job: + job_id: jobs-crud-categorization-analyzer-job + body: > + { + "analysis_config" : { + "detectors" :[{"function":"mean","field_name":"responsetime","by_field_name":"airline"}, + {"function":"count","by_field_name":"mlcategory"}], + "categorization_field_name": "some_category", + "categorization_analyzer" : "standard" + }, + "data_description" : { + } + } + - match: { job_id: "jobs-crud-categorization-analyzer-job" } + - match: { analysis_config.categorization_analyzer: "standard" } + +--- +"Test job with custom categorization_analyzer": + - do: + xpack.ml.put_job: + job_id: jobs-crud-categorization-analyzer-job + body: > + { + "analysis_config" : { + "detectors" :[{"function":"mean","field_name":"responsetime","by_field_name":"airline"}, + {"function":"count","by_field_name":"mlcategory"}], + "categorization_field_name": "some_category", + "categorization_analyzer" : { + "char_filter" : ["html_strip"], + "tokenizer" : "classic", + "filter" : ["stop"] + } + }, + "data_description" : { + } + } + - match: { job_id: "jobs-crud-categorization-analyzer-job" } + - match: { analysis_config.categorization_analyzer.char_filter.0: "html_strip" } + - match: { analysis_config.categorization_analyzer.tokenizer: "classic" } + - match: { analysis_config.categorization_analyzer.filter.0: "stop" } + +--- +"Test job with categorization_analyzer and categorization_filters": + - do: + catch: /categorization_filters cannot be used with categorization_analyzer - instead specify them as pattern_replace char_filters in the analyzer/ + xpack.ml.put_job: + job_id: jobs-crud-categorization-analyzer-job + body: > + { + "analysis_config" : { + "detectors" :[{"function":"mean","field_name":"responsetime","by_field_name":"airline"}, + {"function":"count","by_field_name":"mlcategory"}], + "categorization_field_name": "some_category", + "categorization_analyzer" : { + "char_filter" : ["html_strip"], + "tokenizer" : "classic", + "filter" : ["stop"] + }, + "categorization_filters" : ["cat1.*", "cat2.*"] + }, + "data_description" : { + } + } + +--- +"Test job with rules": + + - do: + xpack.ml.put_job: + job_id: jobs-crud-rules + body: > + { + "analysis_config": { + "detectors": [ + { + "function": "count", + "by_field_name": "country", + "rules": [ + { + "actions": ["filter_results", "skip_sampling"], + "conditions": [ + { + "type":"numerical_actual", + "field_name":"country", + "field_value": "uk", + "condition": {"operator":"lt","value":"33.3"} + }, + {"type":"categorical", "field_name":"country", "filter_id": "foo"} + ] + } + ] + } + ] + }, + "data_description" : {} + } + + - do: + xpack.ml.get_jobs: + job_id: jobs-crud-rules + - match: { count: 1 } + - match: { + jobs.0.analysis_config.detectors.0.rules: [ + { + "actions": ["filter_results", "skip_sampling"], + "conditions_connective": "or", + "conditions": [ + { + "type":"numerical_actual", + "field_name":"country", + "field_value": "uk", + "condition": {"operator":"lt","value":"33.3"} + }, + {"type":"categorical", "field_name":"country", "filter_id": "foo"} + ] + } + ] + } + +--- +"Test job with pre 6.2 rules": + + - skip: + features: "warnings" + reason: certain rule fields were renamed in 6.2.0 + + - do: + warnings: + - Deprecated field [detector_rules] used, expected [rules] instead + - Deprecated field [rule_action] used, expected [actions] instead + - Deprecated field [rule_conditions] used, expected [conditions] instead + - Deprecated field [condition_type] used, expected [type] instead + - Deprecated field [value_filter] used, expected [filter_id] instead + xpack.ml.put_job: + job_id: jobs-crud-pre-6-2-rules + body: > + { + "analysis_config": { + "detectors": [ + { + "function": "count", + "by_field_name": "country", + "detector_rules": [ + { + "rule_action": "filter_results", + "rule_conditions": [ + { + "condition_type":"numerical_actual", + "field_name":"country", + "field_value": "uk", + "condition": {"operator":"lt","value":"33.3"} + }, + {"type":"categorical", "field_name":"country", "value_filter": "foo"} + ] + } + ] + } + ] + }, + "data_description" : {} + } + + - do: + xpack.ml.get_jobs: + job_id: jobs-crud-pre-6-2-rules + - match: { count: 1 } + - match: { + jobs.0.analysis_config.detectors.0.rules: [ + { + "actions": ["filter_results"], + "conditions_connective": "or", + "conditions": [ + { + "type":"numerical_actual", + "field_name":"country", + "field_value": "uk", + "condition": {"operator":"lt","value":"33.3"} + }, + {"type":"categorical", "field_name":"country", "filter_id": "foo"} + ] + } + ] + } + +--- +"Test function shortcut expansion": + - do: + xpack.ml.put_job: + job_id: jobs-function-shortcut-expansion + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"nzc","by_field_name":"airline"}] + }, + "data_description" : {} + } + - match: { job_id: "jobs-function-shortcut-expansion" } + - match: { analysis_config.detectors.0.function: "non_zero_count"} + +--- +"Test open job when persistent task allocation disabled": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + cluster.put_settings: + body: + transient: + cluster.persistent_tasks.allocation.enable: "none" + - match: {transient.cluster.persistent_tasks.allocation.enable: "none"} + + - do: + xpack.ml.put_job: + job_id: persistent-task-allocation-allowed-test + body: > + { + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "data_description" : { + }, + "analysis_limits": { + "model_memory_limit": "10m" + } + } + - match: { job_id: "persistent-task-allocation-allowed-test" } + + - do: + catch: /no persistent task assignments are allowed due to cluster settings/ + xpack.ml.open_job: + job_id: persistent-task-allocation-allowed-test + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + cluster.put_settings: + body: + transient: + cluster.persistent_tasks.allocation.enable: "all" + - match: {transient.cluster.persistent_tasks.allocation.enable: "all"} + + - do: + xpack.ml.open_job: + job_id: persistent-task-allocation-allowed-test + - match: { opened: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get.yml new file mode 100644 index 0000000000000..7c4903bae95cf --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get.yml @@ -0,0 +1,86 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: jobs-get-1 + body: > + { + "job_id":"jobs-get-1", + "description":"Job 1", + "analysis_config" : { + "bucket_span": "300s", + "detectors" :[{"function":"count"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: jobs-get-2 + body: > + { + "job_id":"jobs-get-2", + "description":"Job 2", + "analysis_config" : { + "bucket_span": "600s", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format" : "xcontent", + "time_field":"time", + "time_format":"yyyy-MM-dd'T'HH:mm:ssX" + } + } + +--- +"Test get job given missing job_id": + + - do: + catch: missing + xpack.ml.get_jobs: + job_id: missing-job + +--- +"Test get single job": + + - do: + xpack.ml.get_jobs: + job_id: jobs-get-1 + - match: { jobs.0.job_id: "jobs-get-1"} + - match: { jobs.0.description: "Job 1"} + + - do: + xpack.ml.get_jobs: + job_id: jobs-get-2 + - match: { jobs.0.job_id: "jobs-get-2"} + - match: { jobs.0.description: "Job 2"} + +--- +"Test explicit get all jobs": + + - do: + xpack.ml.get_jobs: + job_id: _all + - match: { count: 2 } + - match: { jobs.0.job_id: "jobs-get-1"} + - match: { jobs.0.description: "Job 1"} + - match: { jobs.1.job_id: "jobs-get-2"} + - match: { jobs.1.description: "Job 2"} + +--- +"Test implicit get all jobs": + + - do: + xpack.ml.get_jobs: {} + - match: { count: 2 } + - match: { jobs.0.job_id: "jobs-get-1"} + - match: { jobs.0.description: "Job 1"} + - match: { jobs.1.job_id: "jobs-get-2"} + - match: { jobs.1.description: "Job 2"} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml new file mode 100644 index 0000000000000..2a7a7970e5db2 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_buckets.yml @@ -0,0 +1,299 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: jobs-get-result-buckets + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-jobs-get-result-buckets + type: doc + id: "jobs-get-result-buckets_1464739200000_1" + body: + { + "job_id": "jobs-get-result-buckets", + "result_type": "bucket", + "timestamp": "2016-06-01T00:00:00Z", + "anomaly_score": 90.0, + "bucket_span":1 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-jobs-get-result-buckets + type: doc + id: "jobs-get-result-buckets_1470009600000_2" + body: + { + "job_id": "jobs-get-result-buckets", + "result_type": "bucket", + "timestamp": "2016-08-01T00:00:00Z", + "anomaly_score": 60.0, + "bucket_span":1, + "is_interim": true + } + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-jobs-get-result-buckets + type: doc + id: "jobs-get-result-buckets_1470096000000_3" + body: + { + "job_id": "jobs-get-result-buckets", + "result_type": "bucket", + "timestamp": "2016-08-02T00:00:00Z", + "anomaly_score": 60.0, + "bucket_span":1, + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-anomalies-jobs-get-result-buckets + +--- +"Test result buckets api with time range": + - do: + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + start: "2016-05-01T00:00:00Z" + end: "2016-07-01T00:00:00Z" + + - match: { count: 1 } + - match: { buckets.0.timestamp: 1464739200000 } + - match: { buckets.0.job_id: jobs-get-result-buckets} + - match: { buckets.0.result_type: bucket} + +--- +"Test result buckets api": + - do: + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + + - match: { count: 3 } + - match: { buckets.0.timestamp: 1464739200000 } + - match: { buckets.0.job_id: jobs-get-result-buckets} + - match: { buckets.0.result_type: bucket} + - match: { buckets.1.timestamp: 1470009600000 } + - match: { buckets.1.job_id: jobs-get-result-buckets} + - match: { buckets.1.result_type: bucket} + - match: { buckets.2.timestamp: 1470096000000 } + - match: { buckets.2.job_id: jobs-get-result-buckets} + - match: { buckets.2.result_type: bucket} + - is_false: buckets.0.partition_scores + - is_false: buckets.1.partition_scores + - is_false: buckets.2.partition_scores + +--- +"Test get buckets with paging": + - do: + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + from: 1 + size: 2 + + - match: { count: 3 } + - length: { buckets: 2 } + - match: { buckets.0.timestamp: 1470009600000 } + - match: { buckets.0.result_type: bucket} + - match: { buckets.1.timestamp: 1470096000000 } + - match: { buckets.1.result_type: bucket} + +--- +"Test get buckets with paging in body": + - do: + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + body: > + { + "page": { + "from": 1, + "size": 2 + } + } + + - match: { count: 3 } + - length: { buckets: 2 } + - match: { buckets.0.timestamp: 1470009600000 } + - match: { buckets.0.result_type: bucket} + - match: { buckets.1.timestamp: 1470096000000 } + - match: { buckets.1.result_type: bucket} + +--- +"Test get buckets given exclude_interim is false": + - do: + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + exclude_interim: false + + - match: { count: 3 } + +--- +"Test get buckets given exclude_interim is true": + - do: + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + exclude_interim: true + + - match: { count: 2 } + - match: { buckets.0.timestamp: 1464739200000 } + - match: { buckets.0.is_interim: false } + - match: { buckets.1.timestamp: 1470096000000 } + - match: { buckets.1.is_interim: false } + +--- +"Test result single bucket api": + - do: + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + timestamp: "2016-06-01T00:00:00Z" + + - match: { buckets.0.timestamp: 1464739200000} + - match: { buckets.0.job_id: jobs-get-result-buckets } + - match: { buckets.0.result_type: bucket} + +--- +"Test result single bucket api with empty body": + - do: + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + timestamp: "2016-06-01T00:00:00Z" + body: {} + + - match: { buckets.0.timestamp: 1464739200000} + - match: { buckets.0.job_id: jobs-get-result-buckets } + - match: { buckets.0.result_type: bucket} + +--- +"Test mutually-exclusive params": + - do: + catch: bad_request + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + timestamp: "2016-06-01T00:00:00Z" + start: "2016-05-01T00:00:00Z" + + - do: + catch: bad_request + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + timestamp: "2016-06-01T00:00:00Z" + end: "2016-05-01T00:00:00Z" + + - do: + catch: bad_request + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + timestamp: "2016-06-01T00:00:00Z" + from: "2016-05-01T00:00:00Z" + + - do: + catch: bad_request + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + timestamp: "2016-06-01T00:00:00Z" + end: "2016-05-01T00:00:00Z" + + - do: + catch: bad_request + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + timestamp: "2016-06-01T00:00:00Z" + anomaly_score: "80.0" + +--- +"Test mutually-exclusive params via body": + - do: + catch: bad_request + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + body: + timestamp: "2016-06-01T00:00:00Z" + start: "2016-05-01T00:00:00Z" + + - do: + catch: bad_request + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + body: + timestamp: "2016-06-01T00:00:00Z" + end: "2016-05-01T00:00:00Z" + + - do: + catch: bad_request + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + body: + timestamp: "2016-06-01T00:00:00Z" + from: "2016-05-01T00:00:00Z" + + - do: + catch: bad_request + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + body: + timestamp: "2016-06-01T00:00:00Z" + end: "2016-05-01T00:00:00Z" + + - do: + catch: bad_request + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + body: + timestamp: "2016-06-01T00:00:00Z" + anomaly_score: "80.0" + +--- +"Test with unknown job id": + - do: + catch: missing + xpack.ml.get_buckets: + job_id: "non-existent-job" + +--- +"Test get buckets with sort field and secondary sort by time": + - do: + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + sort: anomaly_score + + - match: { count: 3 } + - match: { buckets.0.anomaly_score: 60.0 } + - match: { buckets.0.timestamp: 1470009600000 } + - match: { buckets.1.anomaly_score: 60.0 } + - match: { buckets.1.timestamp: 1470096000000 } + - match: { buckets.2.anomaly_score: 90.0} + - match: { buckets.2.timestamp: 1464739200000 } + + - do: + xpack.ml.get_buckets: + job_id: "jobs-get-result-buckets" + sort: anomaly_score + desc: true + + - match: { count: 3 } + - match: { buckets.0.anomaly_score: 90.0 } + - match: { buckets.0.timestamp: 1464739200000 } + - match: { buckets.1.anomaly_score: 60.0} + - match: { buckets.1.timestamp: 1470096000000 } + - match: { buckets.2.anomaly_score: 60.0} + - match: { buckets.2.timestamp: 1470009600000 } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml new file mode 100644 index 0000000000000..565f1612f89a2 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_categories.yml @@ -0,0 +1,174 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: jobs-get-result-categories + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-jobs-get-result-categories + type: doc + id: jobs-get-result-categories-1 + body: { "job_id": "jobs-get-result-categories", "category_id": 1 } + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-jobs-get-result-categories + type: doc + id: jobs-get-result-categories-2 + body: { "job_id": "jobs-get-result-categories", "category_id": 2 } + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-unrelated + type: doc + id: jobs-get-result-categories-3 + body: { "job_id": "unrelated", "category_id": 1 } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-anomalies-jobs-get-result-categories + +--- +"Test result categories api": + - do: + xpack.ml.get_categories: + job_id: "jobs-get-result-categories" + + - match: { count: 2 } + - match: { categories.0.job_id: jobs-get-result-categories } + - match: { categories.0.category_id: 1 } + - match: { categories.1.job_id: jobs-get-result-categories } + - match: { categories.1.category_id: 2 } + +--- +"Test get categories with pagination": + - do: + xpack.ml.get_categories: + job_id: "jobs-get-result-categories" + size: 1 + + - length: { categories: 1 } + - match: { categories.0.job_id: jobs-get-result-categories } + - match: { categories.0.category_id: 1 } + + - do: + xpack.ml.get_categories: + job_id: "jobs-get-result-categories" + from: 1 + size: 2 + + - length: { categories: 1 } + - match: { categories.0.job_id: jobs-get-result-categories } + - match: { categories.0.category_id: 2 } + +--- +"Test post get categories with pagination": + - do: + xpack.ml.get_categories: + job_id: "jobs-get-result-categories" + body: > + { + "page": { "size": 1} + } + + - length: { categories: 1 } + - match: { categories.0.job_id: jobs-get-result-categories } + - match: { categories.0.category_id: 1 } + + - do: + xpack.ml.get_categories: + job_id: "jobs-get-result-categories" + body: > + { + "page": { "from":1, "size": 1} + } + + - length: { categories: 1 } + - match: { categories.0.job_id: jobs-get-result-categories } + - match: { categories.0.category_id: 2 } + +--- +"Test get category by id": + - do: + xpack.ml.get_categories: + job_id: "jobs-get-result-categories" + category_id: "1" + + - match: { categories.0.job_id: jobs-get-result-categories } + - match: { categories.0.category_id: 1 } + +--- +"Test with invalid param combinations": + - do: + catch: bad_request + xpack.ml.get_categories: + job_id: "jobs-get-result-categories" + category_id: 1 + from: 0 + + - do: + catch: bad_request + xpack.ml.get_categories: + job_id: "jobs-get-result-categories" + category_id: 1 + size: 1 + + - do: + catch: bad_request + xpack.ml.get_categories: + job_id: "jobs-get-result-categories" + category_id: 1 + from: 0 + size: 1 + +--- +"Test with invalid param combinations via body": + - do: + catch: bad_request + xpack.ml.get_categories: + job_id: "jobs-get-result-categories" + category_id: 1 + body: + from: 0 + + - do: + catch: bad_request + xpack.ml.get_categories: + job_id: "jobs-get-result-categories" + category_id: 1 + body: + size: 1 + + - do: + catch: bad_request + xpack.ml.get_categories: + job_id: "jobs-get-result-categories" + category_id: 1 + body: + from: 0 + size: 1 + +--- +"Test with unknown job id": + - do: + catch: missing + xpack.ml.get_categories: + job_id: "non-existent-job" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml new file mode 100644 index 0000000000000..50f0cfc6816bc --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_influencers.yml @@ -0,0 +1,198 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: get-influencers-test + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-get-influencers-test + type: doc + id: get-influencers-test_1464739200000_1_1 + body: + { + "job_id": "get-influencers-test", + "timestamp": "2016-06-01T00:00:00Z", + "influencer_field_name": "foo", + "influencer_field_value": "bar", + "influencer_score": 50.0, + "result_type" : "influencer", + "bucket_span" : 1 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-get-influencers-test + type: doc + id: get-influencers-test_1464825600000_1_2 + body: + { + "job_id": "get-influencers-test", + "timestamp": "2016-06-02T00:00:00Z", + "influencer_field_name": "foo", + "influencer_field_value": "zoo", + "influencer_score": 80.0, + "result_type" : "influencer", + "bucket_span" : 1, + "is_interim": true + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-get-influencers-test + type: doc + id: get-influencers-test_1464912000000_1_3 + body: + { + "job_id": "get-influencers-test", + "timestamp": "2016-06-03T00:00:00Z", + "influencer_field_name": "foo", + "influencer_field_value": "zoo", + "influencer_score": 60.0, + "result_type" : "influencer", + "bucket_span" : 1 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-anomalies-get-influencers-test + +--- +"Test result influencers api": + - do: + xpack.ml.get_influencers: + job_id: "get-influencers-test" + + - match: { count: 3 } + - match: { influencers.0.influencer_score: 80 } + - match: { influencers.0.timestamp: 1464825600000 } + - match: { influencers.1.influencer_score: 60 } + - match: { influencers.1.timestamp: 1464912000000 } + - match: { influencers.2.influencer_score: 50 } + - match: { influencers.2.timestamp: 1464739200000 } + +--- +"Test get influencers with paging": + - do: + xpack.ml.get_influencers: + job_id: "get-influencers-test" + from: 1 + size: 2 + + - match: { count: 3 } + - length: { influencers: 2 } + - match: { influencers.0.influencer_score: 60 } + - match: { influencers.0.timestamp: 1464912000000 } + - match: { influencers.1.influencer_score: 50 } + - match: { influencers.1.timestamp: 1464739200000 } + +--- +"Test get influencers with paging in body": + - do: + xpack.ml.get_influencers: + job_id: "get-influencers-test" + body: > + { + "page": { + "from": 1, + "size": 2 + } + } + + - match: { count: 3 } + - length: { influencers: 2 } + - match: { influencers.0.influencer_score: 60 } + - match: { influencers.0.timestamp: 1464912000000 } + - match: { influencers.1.influencer_score: 50 } + - match: { influencers.1.timestamp: 1464739200000 } + +--- +"Test get influencers given exclude_interim false": + - do: + xpack.ml.get_influencers: + job_id: "get-influencers-test" + exclude_interim: false + + - match: { count: 3 } + +--- +"Test get influencers given exclude_interim true": + - do: + xpack.ml.get_influencers: + job_id: "get-influencers-test" + exclude_interim: true + + - match: { count: 2 } + - match: { influencers.0.timestamp: 1464912000000 } + - match: { influencers.0.is_interim: false } + - match: { influencers.1.timestamp: 1464739200000 } + - match: { influencers.1.is_interim: false } + +--- +"Test result influencers api with time range": + - do: + xpack.ml.get_influencers: + job_id: "get-influencers-test" + start: "2016-06-01T00:00:00Z" + end: "2016-06-01T01:00:00Z" + + - match: { count: 1 } + - match: { influencers.0.timestamp: 1464739200000 } + +--- +"Test with unknown job id": + - do: + catch: missing + xpack.ml.get_influencers: + job_id: "non-existent-job" + +--- +"Test get influencers api with influencer score filter": + - do: + xpack.ml.get_influencers: + job_id: "get-influencers-test" + influencer_score: 70 + + - match: { count: 1 } + - match: { influencers.0.timestamp: 1464825600000 } + +--- +"Test get influencers api sort": + - do: + xpack.ml.get_influencers: + job_id: "get-influencers-test" + desc: false + + - match: { count: 3 } + - match: { influencers.0.influencer_score: 50 } + - match: { influencers.1.influencer_score: 60 } + - match: { influencers.2.influencer_score: 80} + + - do: + xpack.ml.get_influencers: + job_id: "get-influencers-test" + sort: timestamp + + - match: { count: 3 } + - match: { influencers.0.timestamp: 1464912000000 } + - match: { influencers.1.timestamp: 1464825600000 } + - match: { influencers.2.timestamp: 1464739200000 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml new file mode 100644 index 0000000000000..75f35f311177c --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_overall_buckets.yml @@ -0,0 +1,616 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: jobs-get-result-overall-buckets-60 + body: > + { + "groups": [ "jobs-get-result-overall-buckets-group"], + "analysis_config" : { + "bucket_span": "60m", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: jobs-get-result-overall-buckets-30 + body: > + { + "groups": [ "jobs-get-result-overall-buckets-group"], + "analysis_config" : { + "bucket_span": "30m", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: jobs-get-result-overall-buckets-17 + body: > + { + "groups": [ "jobs-get-result-overall-buckets-group"], + "analysis_config" : { + "bucket_span": "17m", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "time_field":"time" + } + } + + # Now index some buckets + # The buckets are: + # job-60: [ 30.0] [0.0] [ 20.0 ] + # job-30: [ ] [0.0] [ 10.0, 40.0 ] + # job-17: [ ] [0.0] [ 1.0, 0.0, 60.0 ] + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: "jobs-get-result-overall-buckets-60_1" + body: + { + "job_id": "jobs-get-result-overall-buckets-60", + "result_type": "bucket", + "timestamp": "2016-06-01T00:00:00Z", + "anomaly_score": 30.0, + "bucket_span": 3600 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: "jobs-get-result-overall-buckets-60_2" + body: + { + "job_id": "jobs-get-result-overall-buckets-60", + "result_type": "bucket", + "timestamp": "2016-06-01T01:00:00Z", + "anomaly_score": 0.0, + "bucket_span": 3600 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: "jobs-get-result-overall-buckets-60_3" + body: + { + "job_id": "jobs-get-result-overall-buckets-60", + "result_type": "bucket", + "timestamp": "2016-06-01T02:00:00Z", + "anomaly_score": 20.0, + "bucket_span": 3600 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: "jobs-get-result-overall-buckets-30_1" + body: + { + "job_id": "jobs-get-result-overall-buckets-30", + "result_type": "bucket", + "timestamp": "2016-06-01T01:00:00Z", + "anomaly_score": 0.0, + "bucket_span": 1800 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: "jobs-get-result-overall-buckets-30_2" + body: + { + "job_id": "jobs-get-result-overall-buckets-30", + "result_type": "bucket", + "timestamp": "2016-06-01T02:00:00Z", + "anomaly_score": 10.0, + "bucket_span": 1800 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: "jobs-get-result-overall-buckets-30_3" + body: + { + "job_id": "jobs-get-result-overall-buckets-30", + "result_type": "bucket", + "timestamp": "2016-06-01T02:30:00Z", + "anomaly_score": 40.0, + "bucket_span": 1800 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: "jobs-get-result-overall-buckets-17_1" + body: + { + "job_id": "jobs-get-result-overall-buckets-17", + "result_type": "bucket", + "timestamp": "2016-06-01T01:00:00Z", + "anomaly_score": 0.0, + "bucket_span": 1020 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: "jobs-get-result-overall-buckets-17_2" + body: + { + "job_id": "jobs-get-result-overall-buckets-17", + "result_type": "bucket", + "timestamp": "2016-06-01T02:08:00Z", + "anomaly_score": 1.0, + "bucket_span": 1020 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: "jobs-get-result-overall-buckets-17_3" + body: + { + "job_id": "jobs-get-result-overall-buckets-17", + "result_type": "bucket", + "timestamp": "2016-06-01T02:25:00Z", + "anomaly_score": 0.0, + "bucket_span": 1020 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: "jobs-get-result-overall-buckets-17_4" + body: + { + "job_id": "jobs-get-result-overall-buckets-17", + "result_type": "bucket", + "timestamp": "2016-06-01T02:42:00Z", + "anomaly_score": 60.0, + "bucket_span": 1020, + "is_interim": true + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-anomalies-shared + +--- +"Test overall buckets given missing job": + - do: + catch: missing + xpack.ml.get_overall_buckets: + job_id: "missing-job" + +--- +"Test overall buckets given non-matching expression and allow_no_jobs": + - do: + xpack.ml.get_overall_buckets: + job_id: "none-matching-*" + - match: { count: 0 } + +--- +"Test overall buckets given non-matching expression and not allow_no_jobs": + - do: + catch: missing + xpack.ml.get_overall_buckets: + job_id: "none-matching-*" + allow_no_jobs: false + +--- +"Test overall buckets given top_n is 0": + - do: + catch: /\[topN\] parameter must be positive, found \[0\]/ + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + top_n: 0 + +--- +"Test overall buckets given top_n is negative": + - do: + catch: /\[topN\] parameter must be positive, found \[-1\]/ + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + top_n: -1 + +--- +"Test overall buckets given default": + - do: + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + - match: { count: 3 } + - match: { overall_buckets.0.timestamp: 1464739200000 } + - match: { overall_buckets.0.bucket_span: 3600 } + - match: { overall_buckets.0.overall_score: 30.0 } + - length: { overall_buckets.0.jobs: 1} + - match: {overall_buckets.0.jobs.0.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.0.jobs.0.max_anomaly_score: 30.0 } + - match: { overall_buckets.0.is_interim: false } + - match: { overall_buckets.0.result_type: overall_bucket } + - match: { overall_buckets.1.timestamp: 1464742800000 } + - match: { overall_buckets.1.bucket_span: 3600 } + - match: { overall_buckets.1.overall_score: 0.0 } + - length: { overall_buckets.1.jobs: 3} + - match: {overall_buckets.1.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.1.jobs.0.max_anomaly_score: 0.0 } + - match: {overall_buckets.1.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.1.jobs.1.max_anomaly_score: 0.0 } + - match: {overall_buckets.1.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.1.jobs.2.max_anomaly_score: 0.0 } + - match: { overall_buckets.1.is_interim: false } + - match: { overall_buckets.1.result_type: overall_bucket } + - match: { overall_buckets.2.timestamp: 1464746400000 } + - match: { overall_buckets.2.bucket_span: 3600 } + - match: { overall_buckets.2.overall_score: 60.0 } + - length: { overall_buckets.2.jobs: 3} + - match: {overall_buckets.2.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.2.jobs.0.max_anomaly_score: 60.0 } + - match: {overall_buckets.2.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.2.jobs.1.max_anomaly_score: 40.0 } + - match: {overall_buckets.2.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.2.jobs.2.max_anomaly_score: 20.0 } + - match: { overall_buckets.2.is_interim: true } + - match: { overall_buckets.2.result_type: overall_bucket } + +--- +"Test overall buckets given top_n is 2": + - do: + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-group" + top_n: 2 + - match: { count: 3 } + - match: { overall_buckets.0.timestamp: 1464739200000 } + - match: { overall_buckets.0.bucket_span: 3600 } + - match: { overall_buckets.0.overall_score: 30.0 } + - length: { overall_buckets.0.jobs: 1} + - match: {overall_buckets.0.jobs.0.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.0.jobs.0.max_anomaly_score: 30.0 } + - match: { overall_buckets.0.is_interim: false } + - match: { overall_buckets.0.result_type: overall_bucket } + - match: { overall_buckets.1.timestamp: 1464742800000 } + - match: { overall_buckets.1.bucket_span: 3600 } + - match: { overall_buckets.1.overall_score: 0.0 } + - length: { overall_buckets.1.jobs: 3} + - match: {overall_buckets.1.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.1.jobs.0.max_anomaly_score: 0.0 } + - match: {overall_buckets.1.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.1.jobs.1.max_anomaly_score: 0.0 } + - match: {overall_buckets.1.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.1.jobs.2.max_anomaly_score: 0.0 } + - match: { overall_buckets.1.is_interim: false } + - match: { overall_buckets.1.result_type: overall_bucket } + - match: { overall_buckets.2.timestamp: 1464746400000 } + - match: { overall_buckets.2.bucket_span: 3600 } + - match: { overall_buckets.2.overall_score: 50.0 } + - length: { overall_buckets.2.jobs: 3} + - match: {overall_buckets.2.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.2.jobs.0.max_anomaly_score: 60.0 } + - match: {overall_buckets.2.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.2.jobs.1.max_anomaly_score: 40.0 } + - match: {overall_buckets.2.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.2.jobs.2.max_anomaly_score: 20.0 } + - match: { overall_buckets.2.is_interim: true } + - match: { overall_buckets.2.result_type: overall_bucket } + +--- +"Test overall buckets given top_n is 3": + - do: + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-group" + top_n: 3 + - match: { count: 3 } + - match: { overall_buckets.0.timestamp: 1464739200000 } + - match: { overall_buckets.0.bucket_span: 3600 } + - match: { overall_buckets.0.overall_score: 30.0 } + - length: { overall_buckets.0.jobs: 1} + - match: {overall_buckets.0.jobs.0.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.0.jobs.0.max_anomaly_score: 30.0 } + - match: { overall_buckets.0.is_interim: false } + - match: { overall_buckets.0.result_type: overall_bucket } + - match: { overall_buckets.1.timestamp: 1464742800000 } + - match: { overall_buckets.1.bucket_span: 3600 } + - match: { overall_buckets.1.overall_score: 0.0 } + - length: { overall_buckets.1.jobs: 3} + - match: {overall_buckets.1.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.1.jobs.0.max_anomaly_score: 0.0 } + - match: {overall_buckets.1.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.1.jobs.1.max_anomaly_score: 0.0 } + - match: {overall_buckets.1.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.1.jobs.2.max_anomaly_score: 0.0 } + - match: { overall_buckets.1.is_interim: false } + - match: { overall_buckets.1.result_type: overall_bucket } + - match: { overall_buckets.2.timestamp: 1464746400000 } + - match: { overall_buckets.2.bucket_span: 3600 } + - match: { overall_buckets.2.overall_score: 40.0 } + - length: { overall_buckets.2.jobs: 3} + - match: {overall_buckets.2.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.2.jobs.0.max_anomaly_score: 60.0 } + - match: {overall_buckets.2.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.2.jobs.1.max_anomaly_score: 40.0 } + - match: {overall_buckets.2.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.2.jobs.2.max_anomaly_score: 20.0 } + - match: { overall_buckets.2.is_interim: true } + - match: { overall_buckets.2.result_type: overall_bucket } + +--- +"Test overall buckets given top_n is greater than the job count": + - do: + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + top_n: 333 + - match: { count: 3 } + - match: { overall_buckets.0.timestamp: 1464739200000 } + - match: { overall_buckets.0.bucket_span: 3600 } + - match: { overall_buckets.0.overall_score: 30.0 } + - length: { overall_buckets.0.jobs: 1} + - match: {overall_buckets.0.jobs.0.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.0.jobs.0.max_anomaly_score: 30.0 } + - match: { overall_buckets.0.is_interim: false } + - match: { overall_buckets.0.result_type: overall_bucket } + - match: { overall_buckets.1.timestamp: 1464742800000 } + - match: { overall_buckets.1.bucket_span: 3600 } + - match: { overall_buckets.1.overall_score: 0.0 } + - length: { overall_buckets.1.jobs: 3} + - match: {overall_buckets.1.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.1.jobs.0.max_anomaly_score: 0.0 } + - match: {overall_buckets.1.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.1.jobs.1.max_anomaly_score: 0.0 } + - match: {overall_buckets.1.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.1.jobs.2.max_anomaly_score: 0.0 } + - match: { overall_buckets.1.is_interim: false } + - match: { overall_buckets.1.result_type: overall_bucket } + - match: { overall_buckets.2.timestamp: 1464746400000 } + - match: { overall_buckets.2.bucket_span: 3600 } + - match: { overall_buckets.2.overall_score: 40.0 } + - length: { overall_buckets.2.jobs: 3} + - match: {overall_buckets.2.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.2.jobs.0.max_anomaly_score: 60.0 } + - match: {overall_buckets.2.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.2.jobs.1.max_anomaly_score: 40.0 } + - match: {overall_buckets.2.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.2.jobs.2.max_anomaly_score: 20.0 } + - match: { overall_buckets.2.is_interim: true } + - match: { overall_buckets.2.result_type: overall_bucket } + +--- +"Test overall buckets given overall_score filter": + - do: + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + top_n: 2 + overall_score: 50.0 + - match: { count: 1 } + - match: { overall_buckets.0.timestamp: 1464746400000 } + - match: { overall_buckets.0.overall_score: 50.0 } + +--- +"Test overall buckets given exclude_interim": + - do: + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + exclude_interim: true + - match: { count: 3 } + - match: { overall_buckets.0.timestamp: 1464739200000 } + - match: { overall_buckets.0.bucket_span: 3600 } + - match: { overall_buckets.0.overall_score: 30.0 } + - length: { overall_buckets.0.jobs: 1} + - match: {overall_buckets.0.jobs.0.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.0.jobs.0.max_anomaly_score: 30.0 } + - match: { overall_buckets.0.is_interim: false } + - match: { overall_buckets.0.result_type: overall_bucket } + - match: { overall_buckets.1.timestamp: 1464742800000 } + - match: { overall_buckets.1.bucket_span: 3600 } + - match: { overall_buckets.1.overall_score: 0.0 } + - length: { overall_buckets.1.jobs: 3} + - match: {overall_buckets.1.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.1.jobs.0.max_anomaly_score: 0.0 } + - match: {overall_buckets.1.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.1.jobs.1.max_anomaly_score: 0.0 } + - match: {overall_buckets.1.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.1.jobs.2.max_anomaly_score: 0.0 } + - match: { overall_buckets.1.is_interim: false } + - match: { overall_buckets.1.result_type: overall_bucket } + - match: { overall_buckets.2.timestamp: 1464746400000 } + - match: { overall_buckets.2.bucket_span: 3600 } + - match: { overall_buckets.2.overall_score: 40.0 } + - length: { overall_buckets.2.jobs: 3} + - match: {overall_buckets.2.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.2.jobs.0.max_anomaly_score: 1.0 } + - match: {overall_buckets.2.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.2.jobs.1.max_anomaly_score: 40.0 } + - match: {overall_buckets.2.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.2.jobs.2.max_anomaly_score: 20.0 } + - match: { overall_buckets.2.is_interim: false } + - match: { overall_buckets.2.result_type: overall_bucket } + +--- +"Test overall buckets given string start and end params": + - do: + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + top_n: 2 + start: "2016-06-01T00:30:00Z" + end: "2016-06-01T02:30:00Z" + - match: { count: 1 } + - match: { overall_buckets.0.timestamp: 1464742800000 } + - match: { overall_buckets.0.overall_score: 0.0 } + - length: { overall_buckets.0.jobs: 3} + +--- +"Test overall buckets given epoch start and end params": + - do: + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + top_n: 2 + start: 1464744600000 + end: 1464751800000 + - match: { count: 1 } + - match: { overall_buckets.0.timestamp: 1464746400000 } + - match: { overall_buckets.0.overall_score: 50.0 } + - length: { overall_buckets.0.jobs: 3} + - match: {overall_buckets.0.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.0.jobs.0.max_anomaly_score: 60.0 } + - match: {overall_buckets.0.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.0.jobs.1.max_anomaly_score: 40.0 } + - match: {overall_buckets.0.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.0.jobs.2.max_anomaly_score: 20.0 } + +--- +"Test overall buckets given invalid start param": + - do: + catch: /.*Query param \[start\] with value \[invalid\] cannot be parsed as a date or converted to a number.*/ + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + start: "invalid" + +--- +"Test overall buckets given invalid end param": + - do: + catch: /.*Query param \[end\] with value \[invalid\] cannot be parsed as a date or converted to a number.*/ + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + end: "invalid" + +--- +"Test overall buckets given bucket_span": + - do: + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + bucket_span: "2h" + - match: { count: 2 } + - match: { overall_buckets.0.timestamp: 1464739200000 } + - match: { overall_buckets.0.bucket_span: 7200 } + - match: { overall_buckets.0.overall_score: 30.0 } + - length: { overall_buckets.0.jobs: 3} + - match: {overall_buckets.0.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.0.jobs.0.max_anomaly_score: 0.0 } + - match: {overall_buckets.0.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.0.jobs.1.max_anomaly_score: 0.0 } + - match: {overall_buckets.0.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.0.jobs.2.max_anomaly_score: 30.0 } + - match: { overall_buckets.0.is_interim: false } + - match: { overall_buckets.0.result_type: overall_bucket } + - match: { overall_buckets.1.timestamp: 1464746400000 } + - match: { overall_buckets.1.bucket_span: 7200 } + - match: { overall_buckets.1.overall_score: 60.0 } + - length: { overall_buckets.1.jobs: 3} + - match: {overall_buckets.1.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.1.jobs.0.max_anomaly_score: 60.0 } + - match: {overall_buckets.1.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.1.jobs.1.max_anomaly_score: 40.0 } + - match: {overall_buckets.1.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.1.jobs.2.max_anomaly_score: 20.0 } + - match: { overall_buckets.1.is_interim: true } + - match: { overall_buckets.1.result_type: overall_bucket } + +--- +"Test overall buckets given bucket_span and top_n is 2": + - do: + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + top_n: 2 + bucket_span: "2h" + + - match: { count: 2 } + - match: { overall_buckets.0.timestamp: 1464739200000 } + - match: { overall_buckets.0.bucket_span: 7200 } + - match: { overall_buckets.0.overall_score: 30.0 } + - length: { overall_buckets.0.jobs: 3} + - match: {overall_buckets.0.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.0.jobs.0.max_anomaly_score: 0.0 } + - match: {overall_buckets.0.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.0.jobs.1.max_anomaly_score: 0.0 } + - match: {overall_buckets.0.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.0.jobs.2.max_anomaly_score: 30.0 } + - match: { overall_buckets.0.is_interim: false } + - match: { overall_buckets.0.result_type: overall_bucket } + - match: { overall_buckets.1.timestamp: 1464746400000 } + - match: { overall_buckets.1.bucket_span: 7200 } + - match: { overall_buckets.1.overall_score: 50.0 } + - length: { overall_buckets.1.jobs: 3} + - match: {overall_buckets.1.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.1.jobs.0.max_anomaly_score: 60.0 } + - match: {overall_buckets.1.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.1.jobs.1.max_anomaly_score: 40.0 } + - match: {overall_buckets.1.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.1.jobs.2.max_anomaly_score: 20.0 } + - match: { overall_buckets.1.is_interim: true } + - match: { overall_buckets.1.result_type: overall_bucket } + +--- +"Test overall buckets given bucket_span and overall_score filter": + - do: + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + bucket_span: "2h" + overall_score: "41.0" + + - match: { count: 1 } + - match: { overall_buckets.0.timestamp: 1464746400000 } + - match: { overall_buckets.0.bucket_span: 7200 } + - match: { overall_buckets.0.overall_score: 60.0 } + - length: { overall_buckets.0.jobs: 3} + - match: {overall_buckets.0.jobs.0.job_id: jobs-get-result-overall-buckets-17 } + - match: {overall_buckets.0.jobs.0.max_anomaly_score: 60.0 } + - match: {overall_buckets.0.jobs.1.job_id: jobs-get-result-overall-buckets-30 } + - match: {overall_buckets.0.jobs.1.max_anomaly_score: 40.0 } + - match: {overall_buckets.0.jobs.2.job_id: jobs-get-result-overall-buckets-60 } + - match: {overall_buckets.0.jobs.2.max_anomaly_score: 20.0 } + - match: { overall_buckets.0.is_interim: true } + - match: { overall_buckets.0.result_type: overall_bucket } + +--- +"Test overall buckets given bucket_span is smaller than max job bucket_span": + - do: + catch: /.*Param \[bucket_span\] must be greater or equal to the max bucket_span \[60m\]*/ + xpack.ml.get_overall_buckets: + job_id: "jobs-get-result-overall-buckets-*" + bucket_span: "59m" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml new file mode 100644 index 0000000000000..b5dae2045f440 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_result_records.yml @@ -0,0 +1,142 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: jobs-get-result-records + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-jobs-get-result-records + type: doc + id: jobs-get-result-records_1464739200000_1_1 + body: + { + "job_id": "jobs-get-result-records", + "result_type": "record", + "timestamp": "2016-06-01T00:00:00Z", + "record_score": 60.0, + "bucket_span": 1 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-jobs-get-result-records + type: doc + id: jobs-get-result-records_1464825600000_1_2 + body: + { + "job_id": "jobs-get-result-records", + "result_type": "record", + "timestamp": "2016-06-02T00:00:00Z", + "record_score": 80.0, + "bucket_span": 1, + "is_interim": true + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-anomalies-jobs-get-result-records + +--- +"Test result records api": + - do: + xpack.ml.get_records: + job_id: "jobs-get-result-records" + + - match: { count: 2 } + - match: { records.0.timestamp: 1464825600000 } + - match: { records.0.job_id: jobs-get-result-records} + - match: { records.0.result_type: record} + - match: { records.1.timestamp: 1464739200000 } + - match: { records.1.job_id: jobs-get-result-records} + - match: { records.1.result_type: record} + +--- +"Test get records with paging": + - do: + xpack.ml.get_records: + job_id: "jobs-get-result-records" + from: 1 + size: 1 + + - match: { count: 2 } + - length: { records: 1 } + - match: { records.0.timestamp: 1464739200000 } + - match: { records.0.job_id: jobs-get-result-records} + - match: { records.0.result_type: record} + +--- +"Test get records with paging in body": + - do: + xpack.ml.get_records: + job_id: "jobs-get-result-records" + body: > + { + "page": { + "from": 1, + "size": 1 + } + } + + - match: { count: 2 } + - length: { records: 1 } + - match: { records.0.timestamp: 1464739200000 } + - match: { records.0.job_id: jobs-get-result-records} + - match: { records.0.result_type: record} + +--- +"Test get records given exclude_interim is false": + - do: + xpack.ml.get_records: + job_id: "jobs-get-result-records" + exclude_interim: false + + - match: { count: 2 } + +--- +"Test get records given exclude_interim is true": + - do: + xpack.ml.get_records: + job_id: "jobs-get-result-records" + exclude_interim: true + + - match: { count: 1 } + - match: { records.0.timestamp: 1464739200000 } + - match: { records.0.is_interim: false } + +--- +"Test result records api with time range": + - do: + xpack.ml.get_records: + job_id: "jobs-get-result-records" + start: "2016-05-01T00:00:00Z" + end: "2016-06-01T01:00:00Z" + + - match: { count: 1 } + - match: { records.0.timestamp: 1464739200000 } + - match: { records.0.job_id: jobs-get-result-records} + - match: { records.0.result_type: record} + +--- +"Test with unknown job id": + - do: + catch: missing + xpack.ml.get_records: + job_id: "non-existent-job" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml new file mode 100644 index 0000000000000..61bcf63e39869 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_get_stats.yml @@ -0,0 +1,325 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: job-stats-test + body: > + { + "job_id":"job-stats-test", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "analysis_limits" : { + "model_memory_limit": "10mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.open_job: + job_id: job-stats-test + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: jobs-get-stats-datafeed-job + body: > + { + "job_id":"jobs-get-stats-datafeed-job", + "description":"A job with a datafeed", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "analysis_limits" : { + "model_memory_limit": "10mb" + }, + "data_description" : { + "format" : "xcontent", + "time_field":"time", + "time_format":"yyyy-MM-dd'T'HH:mm:ssX" + } + } + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.open_job: + job_id: jobs-get-stats-datafeed-job + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_datafeed: + datafeed_id: datafeed-1 + body: > + { + "job_id":"jobs-get-stats-datafeed-job", + "indexes":["farequote"], + "types":["response"] + } + +--- +"Test get job stats after uploading data prompting the creation of some stats": + + - do: + xpack.ml.post_data: + job_id: job-stats-test + body: > + {"airline":"AAL","responsetime":"132.2046","time":"1403481600"} + {"airline":"JZA","responsetime":"990.4628","time":"1403481600"} + + - do: + xpack.ml.flush_job: + job_id: job-stats-test + - match: { flushed: true } + + + - do: + xpack.ml.get_job_stats: + job_id: job-stats-test + - match: { jobs.0.job_id : job-stats-test } + - match: { jobs.0.data_counts.processed_record_count: 2 } + - match: { jobs.0.data_counts.processed_field_count: 4 } + - match: { jobs.0.data_counts.input_field_count: 4 } + - match: { jobs.0.model_size_stats.model_bytes: 0 } + - match: { jobs.0.state: opened } + - is_true: jobs.0.node.name + - is_true: jobs.0.node.transport_address + - match: { jobs.0.node.attributes.ml\.enabled: "true"} + - is_true: jobs.0.open_time + +--- +"Test get job stats for closed job": + + - do: + xpack.ml.post_data: + job_id: job-stats-test + body: > + {"airline":"AAL","responsetime":"132.2046","time":"1403481600"} + {"airline":"JZA","responsetime":"990.4628","time":"1403481600"} + + - do: + xpack.ml.flush_job: + job_id: job-stats-test + - match: { flushed: true } + + - do: + xpack.ml.close_job: + job_id: job-stats-test + - match: { closed: true } + + - do: + xpack.ml.get_job_stats: + job_id: job-stats-test + - match: { jobs.0.job_id : job-stats-test } + - match: { jobs.0.data_counts.processed_record_count: 2 } + - match: { jobs.0.data_counts.processed_field_count: 4} + - match: { jobs.0.data_counts.input_field_count: 4 } + - gt: { jobs.0.model_size_stats.model_bytes: 0 } + - match: { jobs.0.state: closed } + - is_false: jobs.0.node + - is_false: jobs.0.open_time + +--- +"Test get job stats of datafeed job that has not received any data": + + - do: + xpack.ml.get_job_stats: + job_id: jobs-get-stats-datafeed-job + - match: { jobs.0.job_id : jobs-get-stats-datafeed-job } + - match: { jobs.0.data_counts.processed_record_count: 0 } + - match: { jobs.0.model_size_stats.model_bytes : 0 } + - match: { jobs.0.state: opened } + - is_true: jobs.0.open_time + +--- +"Test get all job stats with _all": + + - do: + xpack.ml.get_job_stats: + job_id: _all + - match: { count: 2 } + - match: { jobs.0.state: opened } + - match: { jobs.1.state: opened } + +--- +"Test get all job stats with wildcard": + + - do: + xpack.ml.get_job_stats: + job_id: "*" + - match: { count: 2 } + - match: { jobs.0.state: opened } + - match: { jobs.1.state: opened } + +--- +"Test get all job stats implicitly": + + - do: + xpack.ml.get_job_stats: {} + - match: { count: 2 } + +--- +"Test get job stats given missing job": + + - do: + catch: missing + xpack.ml.get_job_stats: + job_id: unknown-job + +--- +"Test get job stats given pattern and allow_no_jobs": + + - do: + xpack.ml.get_job_stats: + job_id: "missing-*" + allow_no_jobs: true + - match: { count: 0 } + +--- +"Test get job stats given pattern and not allow_no_jobs": + + - do: + catch: missing + xpack.ml.get_job_stats: + job_id: "missing-*" + allow_no_jobs: false + +--- +"Test reading v54 data counts and model size stats": + + - do: + xpack.ml.put_job: + job_id: job-stats-v54-bwc-test + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "analysis_limits" : { + "model_memory_limit": "10mb" + }, + "data_description" : { + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: {} + + # This is testing that the documents with v5.4 IDs are fetched. + # Ideally we would use the v5.4 type but we can't put a mapping + # for another type into the single type indices. Type isn't used + # in the query so the test is valid + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: job-stats-v54-bwc-test-data-counts + body: + { + job_id : job-stats-v54-bwc-test, + processed_record_count : 10, + processed_field_count : 0, + input_bytes : 0, + input_field_count : 0, + invalid_date_count : 0, + missing_field_count : 0, + out_of_order_timestamp_count : 0, + empty_bucket_count : 0, + sparse_bucket_count : 0, + bucket_count : 0, + input_record_count : 0, + latest_record_timestamp: 2000000000000 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: job-stats-v54-bwc-test-model_size_stats + body: + { + job_id : job-stats-v54-bwc-test, + result_type : model_size_stats, + model_bytes : 0, + total_by_field_count : 101, + total_over_field_count : 0, + total_partition_field_count : 0, + bucket_allocation_failures_count : 0, + memory_status : ok, + log_time : 1495808248662 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: [.ml-anomalies-shared] + + - do: + xpack.ml.get_job_stats: + job_id: job-stats-v54-bwc-test + - match: { jobs.0.job_id : job-stats-v54-bwc-test } + - match: { jobs.0.data_counts.processed_record_count: 10 } + - match: { jobs.0.model_size_stats.total_by_field_count: 101 } + +--- +"Test no exception on get job stats with missing index": + + - do: + xpack.ml.post_data: + job_id: job-stats-test + body: > + {"airline":"AAL","responsetime":"132.2046","time":"1403481600"} + {"airline":"JZA","responsetime":"990.4628","time":"1403481600"} + + - do: + xpack.ml.close_job: + job_id: jobs-get-stats-datafeed-job + - match: { closed: true } + + - do: + xpack.ml.close_job: + job_id: job-stats-test + - match: { closed: true } + + - do: + indices.delete: + index: .ml-anomalies-shared + + - do: + xpack.ml.get_job_stats: {} + - match: { count: 2 } + - match: { jobs.0.data_counts.processed_record_count: 0 } + - match: { jobs.0.data_counts.processed_field_count: 0 } + - match: { jobs.0.data_counts.input_field_count: 0 } + - match: { jobs.0.model_size_stats.model_bytes: 0 } + - match: { jobs.0.state: closed } + - is_false: jobs.0.node + - is_false: jobs.0.open_time + - match: { jobs.1.data_counts.processed_record_count: 0 } + - match: { jobs.1.data_counts.processed_field_count: 0 } + - match: { jobs.1.data_counts.input_field_count: 0 } + - match: { jobs.1.model_size_stats.model_bytes: 0 } + - match: { jobs.1.state: closed } + - is_false: jobs.1.node + - is_false: jobs.1.open_time + + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml new file mode 100644 index 0000000000000..42fca7b81a036 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_anomalies_default_mappings.yml @@ -0,0 +1,103 @@ +--- +"Test new fields are mapped as keyword": + + - do: + xpack.ml.put_job: + job_id: ml-anomalies-default-mappings-job + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count","by_field_name":"foo"}] + }, + "data_description" : { + "time_field":"time" + } + } + - match: { job_id: "ml-anomalies-default-mappings-job" } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-shared + type: doc + id: "new_doc" + body: > + { + "new_field": "bar" + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-anomalies-shared + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.get_field_mapping: + index: .ml-anomalies-shared + type: doc + fields: new_field + - match: {\.ml-anomalies-shared.mappings.doc.new_field.mapping.new_field.type: keyword} + +--- +"Test _meta exists when two jobs share an index": + + - do: + xpack.ml.put_job: + job_id: ml-anomalies-shared-mappings-job1 + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count","by_field_name":"foo"}] + }, + "data_description" : { + "time_field":"time" + } + } + - match: { job_id: "ml-anomalies-shared-mappings-job1" } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-anomalies-shared + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.get_mapping: + index: .ml-anomalies-shared + - is_true: \.ml-anomalies-shared.mappings.doc._meta.version + + - do: + xpack.ml.put_job: + job_id: ml-anomalies-shared-mappings-job2 + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count","by_field_name":"bar"}] + }, + "data_description" : { + "time_field":"time" + } + } + - match: { job_id: "ml-anomalies-shared-mappings-job2" } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-anomalies-shared + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.get_mapping: + index: .ml-anomalies-shared + - is_true: \.ml-anomalies-shared.mappings.doc._meta.version diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_classic_analyze.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_classic_analyze.yml new file mode 100644 index 0000000000000..ba0fd2a7a08ac --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_classic_analyze.yml @@ -0,0 +1,83 @@ +--- +"Test analyze API with an analyzer that does what we used to do in native code": + - do: + indices.analyze: + body: > + { + "tokenizer" : "ml_classic", + "filter" : [ + { "type" : "stop", "stopwords": [ + "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday", + "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun", + "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December", + "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", + "GMT", "UTC" + ] } + ], + "text" : "[elasticsearch] [2017-12-13T10:46:30,816][INFO ][o.e.c.m.MetaDataCreateIndexService] [node-0] [.watcher-history-7-2017.12.13] creating index, cause [auto(bulk api)], templates [.watch-history-7], shards [1]/[1], mappings [doc]" + } + - match: { tokens.0.token: "elasticsearch" } + - match: { tokens.0.start_offset: 1 } + - match: { tokens.0.end_offset: 14 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "INFO" } + - match: { tokens.1.start_offset: 42 } + - match: { tokens.1.end_offset: 46 } + - match: { tokens.1.position: 5 } + - match: { tokens.2.token: "o.e.c.m.MetaDataCreateIndexService" } + - match: { tokens.2.start_offset: 49 } + - match: { tokens.2.end_offset: 83 } + - match: { tokens.2.position: 6 } + - match: { tokens.3.token: "node-0" } + - match: { tokens.3.start_offset: 86 } + - match: { tokens.3.end_offset: 92 } + - match: { tokens.3.position: 7 } + - match: { tokens.4.token: "watcher-history-7-2017.12.13" } + - match: { tokens.4.start_offset: 96 } + - match: { tokens.4.end_offset: 124 } + - match: { tokens.4.position: 8 } + - match: { tokens.5.token: "creating" } + - match: { tokens.5.start_offset: 126 } + - match: { tokens.5.end_offset: 134 } + - match: { tokens.5.position: 9 } + - match: { tokens.6.token: "index" } + - match: { tokens.6.start_offset: 135 } + - match: { tokens.6.end_offset: 140 } + - match: { tokens.6.position: 10 } + - match: { tokens.7.token: "cause" } + - match: { tokens.7.start_offset: 142 } + - match: { tokens.7.end_offset: 147 } + - match: { tokens.7.position: 11 } + - match: { tokens.8.token: "auto" } + - match: { tokens.8.start_offset: 149 } + - match: { tokens.8.end_offset: 153 } + - match: { tokens.8.position: 12 } + - match: { tokens.9.token: "bulk" } + - match: { tokens.9.start_offset: 154 } + - match: { tokens.9.end_offset: 158 } + - match: { tokens.9.position: 13 } + - match: { tokens.10.token: "api" } + - match: { tokens.10.start_offset: 159 } + - match: { tokens.10.end_offset: 162 } + - match: { tokens.10.position: 14 } + - match: { tokens.11.token: "templates" } + - match: { tokens.11.start_offset: 166 } + - match: { tokens.11.end_offset: 175 } + - match: { tokens.11.position: 15 } + - match: { tokens.12.token: "watch-history-7" } + - match: { tokens.12.start_offset: 178 } + - match: { tokens.12.end_offset: 193 } + - match: { tokens.12.position: 16 } + - match: { tokens.13.token: "shards" } + - match: { tokens.13.start_offset: 196 } + - match: { tokens.13.end_offset: 202 } + - match: { tokens.13.position: 17 } + - match: { tokens.14.token: "mappings" } + - match: { tokens.14.start_offset: 212 } + - match: { tokens.14.end_offset: 220 } + - match: { tokens.14.position: 20 } + - match: { tokens.15.token: "doc" } + - match: { tokens.15.start_offset: 222 } + - match: { tokens.15.end_offset: 225 } + - match: { tokens.15.position: 21 } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_info.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_info.yml new file mode 100644 index 0000000000000..ce934c1d2f640 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_info.yml @@ -0,0 +1,37 @@ +--- +"Test ml info": + - do: + xpack.ml.info: {} + - match: { defaults.anomaly_detectors.model_memory_limit: "1gb" } + - match: { defaults.anomaly_detectors.categorization_examples_limit: 4 } + - match: { defaults.anomaly_detectors.model_snapshot_retention_days: 1 } + - match: { defaults.datafeeds.scroll_size: 1000 } + - match: { limits: {} } + + - do: + cluster.put_settings: + body: + persistent: + xpack.ml.max_model_memory_limit: "512mb" + + - do: + xpack.ml.info: {} + - match: { defaults.anomaly_detectors.model_memory_limit: "512mb" } + - match: { defaults.anomaly_detectors.categorization_examples_limit: 4 } + - match: { defaults.anomaly_detectors.model_snapshot_retention_days: 1 } + - match: { defaults.datafeeds.scroll_size: 1000 } + - match: { limits.max_model_memory_limit: "512mb" } + + - do: + cluster.put_settings: + body: + persistent: + xpack.ml.max_model_memory_limit: "6gb" + + - do: + xpack.ml.info: {} + - match: { defaults.anomaly_detectors.model_memory_limit: "1gb" } + - match: { defaults.anomaly_detectors.categorization_examples_limit: 4 } + - match: { defaults.anomaly_detectors.model_snapshot_retention_days: 1 } + - match: { defaults.datafeeds.scroll_size: 1000 } + - match: { limits.max_model_memory_limit: "6gb" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml new file mode 100644 index 0000000000000..7bc4f7df92acd --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/post_data.yml @@ -0,0 +1,292 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: post-data-job + body: > + { + "job_id":"post-data-job", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: post-data-closed-job + body: > + { + "job_id":"post-data-closed-job", + "description":"A closed job", + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format":"xcontent" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.open_job: + job_id: post-data-job + +--- +"Test POST data job api, flush, close and verify DataCounts doc": + - do: + xpack.ml.post_data: + job_id: post-data-job + body: + - airline: AAL + responsetime: 132.2046 + sourcetype: post-data-job + time: 1403481600 + - airline: JZA + responsetime: 990.4628 + sourcetype: post-data-job + time: 1403481700 + + - match: { processed_record_count: 2 } + - match: { processed_field_count: 4} + - gte: { input_bytes: 140 } + - lte: { input_bytes: 180 } + - match: { input_field_count: 6 } + - match: { invalid_date_count: 0 } + - match: { missing_field_count: 0 } + - match: { out_of_order_timestamp_count: 0} + - match: { earliest_record_timestamp: 1403481600000} + - match: { latest_record_timestamp: 1403481700000} + + - do: + xpack.ml.flush_job: + job_id: post-data-job + - match: { flushed: true } + - match: { last_finalized_bucket_end: 1403481600000 } + + - do: + xpack.ml.close_job: + job_id: post-data-job + - match: { closed: true } + + - do: + xpack.ml.get_job_stats: + job_id: post-data-job + - match: { jobs.0.state: "closed" } + + - do: + get: + index: .ml-anomalies-post-data-job + type: doc + id: post-data-job_data_counts + + - match: { _source.processed_record_count: 2 } + - match: { _source.processed_field_count: 4} + - gte: { _source.input_bytes: 140 } + - lte: { _source.input_bytes: 180 } + - match: { _source.input_field_count: 6 } + - match: { _source.invalid_date_count: 0 } + - match: { _source.missing_field_count: 0 } + - match: { _source.out_of_order_timestamp_count: 0} + - match: { _source.earliest_record_timestamp: 1403481600000} + - match: { _source.latest_record_timestamp: 1403481700000} + +--- +"Test flush and close job WITHOUT sending any data": + - do: + xpack.ml.flush_job: + job_id: post-data-job + - match: { flushed: true } + - match: { last_finalized_bucket_end: 0 } + + - do: + xpack.ml.close_job: + job_id: post-data-job + - match: { closed: true } + + - do: + xpack.ml.get_job_stats: + job_id: post-data-job + - match: { jobs.0.state: "closed" } + +--- +"Test flush with skip_time": + + - do: + xpack.ml.post_data: + job_id: post-data-job + body: + - airline: AAL + responsetime: 132.2046 + sourcetype: post-data-job + time: 1403481600 + - airline: AAL + responsetime: 990.4628 + sourcetype: post-data-job + time: 1403485200 + + # Skip a bucket + - do: + xpack.ml.flush_job: + job_id: post-data-job + skip_time: 1403488700 + - match: { flushed: true } + - match: { last_finalized_bucket_end: 1403488800000 } + + # Send some data that should be ignored + - do: + xpack.ml.post_data: + job_id: post-data-job + body: + - airline: AAL + responsetime: 132.2046 + sourcetype: post-data-job + time: 1403488600 + - airline: AAL + responsetime: 990.4628 + sourcetype: post-data-job + time: 1403488700 + + # Send data that will create results for the bucket after the skipped one + - do: + xpack.ml.post_data: + job_id: post-data-job + body: + - airline: AAL + responsetime: 132.2046 + sourcetype: post-data-job + time: 1403488900 + - airline: AAL + responsetime: 132.2046 + sourcetype: post-data-job + time: 1403492400 + + - do: + xpack.ml.close_job: + job_id: post-data-job + - match: { closed: true } + + - do: + xpack.ml.get_buckets: + job_id: "post-data-job" + - match: { count: 2 } + - match: { buckets.0.timestamp: 1403481600000 } + - match: { buckets.0.event_count: 1 } + - match: { buckets.1.timestamp: 1403488800000 } + - match: { buckets.1.event_count: 1 } + +--- +"Test POST data with invalid parameters": + + - do: + catch: missing + xpack.ml.post_data: + job_id: not_a_job + body: + - airline: AAL + responsetime: 132.2046 + sourcetype: farequote + time: 1403481600 + - airline: JZA + responsetime: 990.4628 + sourcetype: farequote + time: 1403481700 + + - do: + catch: /parse_exception/ + xpack.ml.post_data: + job_id: post-data-job + reset_start: not_a_date + body: + - airline: AAL + responsetime: 132.2046 + sourcetype: farequote + time: 1403481600 + - airline: JZA + responsetime: 990.4628 + sourcetype: farequote + time: 1403481700 + + - do: + catch: /parse_exception/ + xpack.ml.post_data: + job_id: post-data-job + reset_end: end_not_a_date + body: + - airline: AAL + responsetime: 132.2046 + sourcetype: farequote + time: 1403481600 + - airline: JZA + responsetime: 990.4628 + sourcetype: farequote + time: 1403481700 + +--- +"Test Flush data with invalid parameters": + - do: + catch: missing + xpack.ml.flush_job: + job_id: not_a_job + + - do: + catch: /parse_exception/ + xpack.ml.flush_job: + job_id: post-data-job + start: not_a_date + + - do: + catch: /parse_exception/ + xpack.ml.flush_job: + job_id: post-data-job + end: end_not_a_date + + - do: + catch: /parse_exception/ + xpack.ml.flush_job: + job_id: post-data-job + advance_time: advance_time_not_a_date + +--- +"Test open and close with non-existent job id": + - do: + catch: missing + xpack.ml.open_job: + job_id: not_a_job + + - do: + catch: missing + xpack.ml.close_job: + job_id: not_a_job + +--- +"Test flushing and posting a closed job": + + - do: + catch: /status_exception/ + xpack.ml.flush_job: + job_id: post-data-closed-job + + - do: + catch: /status_exception/ + xpack.ml.post_data: + job_id: post-data-closed-job + body: + - airline: AAL + responsetime: 132.2046 + sourcetype: farequote + time: 1403481600 + - airline: JZA + responsetime: 990.4628 + sourcetype: farequote + time: 1403481700 diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/preview_datafeed.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/preview_datafeed.yml new file mode 100644 index 0000000000000..dc7116978dc19 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/preview_datafeed.yml @@ -0,0 +1,403 @@ +setup: + - do: + indices.create: + index: airline-data + body: + mappings: + response: + properties: + time: + type: date + airline: + type: keyword + responsetime: + type: float + event_rate: + type: integer + + - do: + index: + index: airline-data + type: response + id: 1 + body: > + { + "time": "2017-02-18T00:00:00Z", + "airline": "foo", + "responsetime": 1.0, + "event_rate": 5 + } + + - do: + index: + index: airline-data + type: response + id: 2 + body: > + { + "time": "2017-02-18T00:30:00Z", + "airline": "foo", + "responsetime": 1.0, + "event_rate": 6 + } + + - do: + index: + index: airline-data + type: response + id: 3 + body: > + { + "time": "2017-02-18T01:00:00Z", + "airline": "bar", + "responsetime": 42.0, + "event_rate": 8 + } + + - do: + index: + index: airline-data + type: response + id: 4 + body: > + { + "time": "2017-02-18T01:01:00Z", + "airline": "foo", + "responsetime": 42.0, + "event_rate": 7 + } + + - do: + indices.refresh: + index: airline-data + +--- +"Test preview scroll datafeed": + + - do: + xpack.ml.put_job: + job_id: preview-datafeed-job + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"sum","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: preview-datafeed-feed + body: > + { + "job_id":"preview-datafeed-job", + "indexes":"airline-data", + "types":"response" + } + + - do: + xpack.ml.preview_datafeed: + datafeed_id: preview-datafeed-feed + - length: { $body: 4 } + - match: { 0.time: 1487376000000 } + - match: { 0.airline: foo } + - match: { 0.responsetime: 1.0 } + - match: { 1.time: 1487377800000 } + - match: { 1.airline: foo } + - match: { 1.responsetime: 1.0 } + - match: { 2.time: 1487379600000 } + - match: { 2.airline: bar } + - match: { 2.responsetime: 42.0 } + - match: { 3.time: 1487379660000 } + - match: { 3.airline: foo } + - match: { 3.responsetime: 42.0 } + +--- +"Test preview aggregation datafeed with doc_count": + + - do: + xpack.ml.put_job: + job_id: aggregation-doc-count-job + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "summary_count_field_name": "doc_count", + "detectors" :[{"function":"sum","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: aggregation-doc-count-feed + body: > + { + "job_id":"aggregation-doc-count-job", + "indexes":"airline-data", + "types":"response", + "aggregations": { + "buckets": { + "histogram": { + "field": "time", + "interval": 3600000 + }, + "aggregations": { + "time": { + "max": { + "field": "time" + } + }, + "airline": { + "terms": { + "field": "airline", + "size": 100 + }, + "aggregations": { + "responsetime": { + "sum": { + "field": "responsetime" + } + } + } + } + } + } + } + } + + - do: + xpack.ml.preview_datafeed: + datafeed_id: aggregation-doc-count-feed + - length: { $body: 3 } + - match: { 0.time: 1487377800000 } + - match: { 0.airline: foo } + - match: { 0.responsetime: 2.0 } + - match: { 0.doc_count: 2 } + - match: { 1.time: 1487379660000 } + - match: { 1.airline: bar } + - match: { 1.responsetime: 42.0 } + - match: { 1.doc_count: 1 } + - match: { 1.time: 1487379660000 } + - match: { 2.airline: foo } + - match: { 2.responsetime: 42.0 } + - match: { 2.doc_count: 1 } + +--- +"Test preview single metric aggregation datafeed with different summary count field": + + - do: + xpack.ml.put_job: + job_id: aggregation-custom-single-metric-summary-job + body: > + { + "analysis_config" : { + "bucket_span": "3600s", + "summary_count_field_name": "dc_airline", + "detectors" :[{"function":"count"}] + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: aggregation-custom-single-metric-summary-feed + body: > + { + "job_id":"aggregation-custom-single-metric-summary-job", + "indexes":"airline-data", + "types":"response", + "aggregations": { + "buckets": { + "histogram": { + "field": "time", + "interval": 3600000 + }, + "aggregations": { + "time": { + "max": { + "field": "time" + } + }, + "dc_airline": { + "cardinality": { + "field": "airline" + } + } + } + } + } + } + + - do: + xpack.ml.preview_datafeed: + datafeed_id: aggregation-custom-single-metric-summary-feed + - length: { $body: 2 } + - match: { 0.time: 1487377800000 } + - match: { 0.dc_airline: 1 } + - is_false: 0.doc_count + - match: { 1.time: 1487379660000 } + - match: { 1.dc_airline: 2 } + - is_false: 1.doc_count + +--- +"Test preview multi metric aggregation datafeed with different summary count field": + + - do: + xpack.ml.put_job: + job_id: aggregation-custom-multi-metric-summary-job + body: > + { + "analysis_config" : { + "bucket_span": "3600s", + "summary_count_field_name": "event_rate", + "detectors" :[{"function":"mean","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: aggregation-custom-multi-metric-summary-feed + body: > + { + "job_id":"aggregation-custom-multi-metric-summary-job", + "indexes":"airline-data", + "types":"response", + "aggregations": { + "buckets": { + "histogram": { + "field": "time", + "interval": 3600000 + }, + "aggregations": { + "time": { + "max": { + "field": "time" + } + }, + "airline": { + "terms": { + "field": "airline" + }, + "aggs": { + "responsetime": { + "sum": { + "field": "responsetime" + } + }, + "event_rate": { + "sum": { + "field": "event_rate" + } + } + } + } + } + } + } + } + + - do: + xpack.ml.preview_datafeed: + datafeed_id: aggregation-custom-multi-metric-summary-feed + - length: { $body: 3 } + - match: { 0.time: 1487377800000 } + - match: { 0.airline: foo } + - match: { 0.responsetime: 2.0 } + - match: { 0.event_rate: 11 } + - is_false: 0.doc_count + - match: { 1.time: 1487379660000 } + - match: { 1.airline: bar } + - match: { 1.responsetime: 42.0 } + - match: { 1.event_rate: 8 } + - is_false: 1.doc_count + - match: { 1.time: 1487379660000 } + - match: { 2.airline: foo } + - match: { 2.responsetime: 42.0 } + - match: { 2.event_rate: 7 } + - is_false: 2.doc_count + +--- +"Test preview missing datafeed": + + - do: + catch: missing + xpack.ml.preview_datafeed: + datafeed_id: missing-feed + +--- +"Test preview datafeed with unavailable index": + + - do: + xpack.ml.put_job: + job_id: unavailable-job + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"sum","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: unavailable-feed + body: > + { + "job_id":"unavailable-job", + "indexes":"unavailable-data", + "types":"response" + } + + - do: + catch: missing + xpack.ml.preview_datafeed: + datafeed_id: unavailable-feed + +--- +"Test preview datafeed with query that matches nothing": + + - do: + xpack.ml.put_job: + job_id: empty-job + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"sum","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: empty-feed + body: > + { + "job_id":"empty-job", + "indexes":"airline-data", + "types":"response", + "query": { + "term": {"airline":"missing"} + } + } + + - do: + xpack.ml.preview_datafeed: + datafeed_id: empty-feed + - length: { $body: 0 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml new file mode 100644 index 0000000000000..a66c0da12d0a9 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/revert_model_snapshot.yml @@ -0,0 +1,298 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: revert-model-snapshot + body: > + { + "job_id":"revert-model-snapshot", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "field_delimiter":",", + "time_field":"time", + "time_format":"yyyy-MM-dd HH:mm:ssX" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.open_job: + job_id: revert-model-snapshot + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.close_job: + job_id: revert-model-snapshot + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-revert-model-snapshot + type: doc + id: "revert-model-snapshot_model_snapshot_first" + body: > + { + "job_id": "revert-model-snapshot", + "timestamp": "2016-06-02T00:00:00Z", + "snapshot_id": "first", + "description": "first snapshot", + "latest_record_time_stamp": "2016-06-02T00:00:00Z", + "latest_result_time_stamp": "2016-06-02T00:00:00Z", + "model_size_stats": { + "job_id": "revert-model-snapshot", + "model_bytes": 10, + "log_time": "2016-06-02T00:00:00Z" + }, + "quantiles": { + "job_id": "revert-model-snapshot", + "timestamp": 1464825600000, + "quantile_state": "quantiles-1" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-revert-model-snapshot + type: doc + id: "revert-model-snapshot_model_snapshot_second" + body: > + { + "job_id": "revert-model-snapshot", + "timestamp": "2016-06-01T00:00:00Z", + "snapshot_id": "second", + "description": "second snapshot", + "latest_record_time_stamp": "2016-06-01T00:00:00Z", + "latest_result_time_stamp": "2016-06-01T00:00:00Z", + "model_size_stats": { + "job_id": "revert-model-snapshot", + "model_bytes": 20, + "log_time": "2016-06-01T00:00:00Z" + }, + "quantiles": { + "job_id": "revert-model-snapshot", + "timestamp": 1464739200000, + "quantile_state": "quantiles-2" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-revert-model-snapshot + type: doc + id: "revert-model-snapshot_1464825600000_1" + body: > + { + "job_id": "revert-model-snapshot", + "result_type": "bucket", + "timestamp": "2016-06-02T00:00:00Z", + "bucket_span":1 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-revert-model-snapshot + type: doc + id: "revert-model-snapshot_1464782400000_1" + body: > + { + "job_id": "revert-model-snapshot", + "result_type": "bucket", + "timestamp": "2016-06-01T12:00:00Z", + "bucket_span":1 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-revert-model-snapshot + type: doc + id: "revert-model-snapshot_1462060800000_1" + body: > + { + "job_id": "revert-model-snapshot", + "result_type": "bucket", + "timestamp": "2016-05-01T00:00:00Z", + "bucket_span":1 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-revert-model-snapshot + type: doc + id: "revert-model-snapshot_1464825600000_1_1" + body: > + { + "job_id": "revert-model-snapshot", + "result_type": "record", + "timestamp": "2016-06-02T00:00:00Z", + "bucket_span":1 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-revert-model-snapshot + type: doc + id: "revert-model-snapshot_1462060800000_1_2" + body: > + { + "job_id": "revert-model-snapshot", + "result_type": "record", + "timestamp": "2016-05-01T00:00:00Z", + "bucket_span":1 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-revert-model-snapshot + type: doc + id: "revert-model-snapshot_1464825600000_1_3" + body: { + "job_id": "revert-model-snapshot", + "result_type": "influencer", + "timestamp": "2016-06-02T00:00:00Z", + "influencer_field_name": "revert-model-snapshot", + "influencer_field_value": "zoo", + "influencer_score": 50.0, + "bucket_span": 1 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-revert-model-snapshot + type: doc + id: "revert-model-snapshot_1462060800000_1_4" + body: + { + "job_id": "revert-model-snapshot", + "result_type": "influencer", + "timestamp": "2016-05-01T00:00:00Z", + "influencer_field_name": "revert-model-snapshot", + "influencer_field_value": "zoo", + "influencer_score": 50.0, + "bucket_span": 1 + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: [.ml-anomalies-revert-model-snapshot,.ml-state] + +--- +"Test revert model with invalid snapshotId": + - do: + catch: /resource_not_found_exception/ + xpack.ml.revert_model_snapshot: + job_id: "revert-model-snapshot" + snapshot_id: "not_exist" + +--- +"Test revert model with valid snapshotId": + - do: + xpack.ml.revert_model_snapshot: + job_id: "revert-model-snapshot" + snapshot_id: "first" + + - match: { model.job_id: "revert-model-snapshot" } + - match: { model.timestamp: 1464825600000 } + - match: { model.snapshot_id: "first" } + - match: { model.snapshot_doc_count: 0 } + + - do: + xpack.ml.revert_model_snapshot: + job_id: "revert-model-snapshot" + snapshot_id: "second" + + - match: { model.job_id: "revert-model-snapshot" } + - match: { model.timestamp: 1464739200000 } + - match: { model.snapshot_id: "second" } + - match: { model.snapshot_doc_count: 0 } + +--- +"Test revert model with delete_intervening_results": + - do: + xpack.ml.get_buckets: + job_id: "revert-model-snapshot" + start: "2016-01-01T00:00:00Z" + end: "2016-12-01T00:00:00Z" + + - match: { count: 3 } + + - do: + xpack.ml.revert_model_snapshot: + job_id: "revert-model-snapshot" + snapshot_id: "second" + delete_intervening_results: true + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: .ml-anomalies-revert-model-snapshot + + - do: + xpack.ml.get_buckets: + job_id: "revert-model-snapshot" + start: "2016-01-01T00:00:00Z" + end: "2016-12-01T00:00:00Z" + + - match: { count: 1 } + - match: { buckets.0.job_id: "revert-model-snapshot" } + - match: { buckets.0.timestamp: 1462060800000 } + + - do: + xpack.ml.get_records: + job_id: "revert-model-snapshot" + start: "2016-01-01T00:00:00Z" + end: "2016-12-01T00:00:00Z" + + - match: { count: 1 } + - match: { records.0.job_id: "revert-model-snapshot" } + - match: { records.0.timestamp: 1462060800000 } + + - do: + xpack.ml.get_influencers: + job_id: "revert-model-snapshot" + start: "2016-01-01T00:00:00Z" + end: "2016-12-01T01:00:00Z" + + - match: { count: 1 } + - match: { influencers.0.job_id: "revert-model-snapshot" } + - match: { influencers.0.timestamp: 1462060800000 } + + - do: + xpack.ml.get_job_stats: + job_id: revert-model-snapshot + + - match: { jobs.0.data_counts.latest_record_timestamp: 1464739200000 } + +--- +"Test with unknown job id": + - do: + catch: missing + xpack.ml.revert_model_snapshot: + job_id: "non-existent-job" + snapshot_id: "second" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_stop_datafeed.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_stop_datafeed.yml new file mode 100644 index 0000000000000..d216ecfe13e1a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/start_stop_datafeed.yml @@ -0,0 +1,446 @@ +setup: + - do: + indices.create: + index: airline-data + body: + mappings: + response: + properties: + time: + type: date + airline: + type: keyword + airport: + type: text + responsetime: + type: float + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: start-stop-datafeed-job + body: > + { + "job_id":"start-stop-datafeed-job", + "description":"Analysis of response time by airline", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}], + "influencers": ["airport"] + }, + "analysis_limits" : { + "model_memory_limit": "30mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_datafeed: + datafeed_id: start-stop-datafeed-datafeed-1 + body: > + { + "job_id":"start-stop-datafeed-job", + "indexes":"airline-data", + "types":"response" + } + +--- +"Test start and stop datafeed happy path": + - do: + xpack.ml.open_job: + job_id: "start-stop-datafeed-job" + - do: + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + start: 0 + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: "start-stop-datafeed-datafeed-1" + - match: { datafeeds.0.state: started } + - do: + xpack.ml.stop_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: "start-stop-datafeed-datafeed-1" + - match: { datafeeds.0.state: stopped } + +--- +"Test force stop datafeed": + - do: + xpack.ml.open_job: + job_id: "start-stop-datafeed-job" + - do: + xpack.ml.start_datafeed: + "datafeed_id": "start-stop-datafeed-datafeed-1" + "start": 0 + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: "start-stop-datafeed-datafeed-1" + - match: { datafeeds.0.state: started } + + - do: + xpack.ml.stop_datafeed: + "datafeed_id": "start-stop-datafeed-datafeed-1" + force: true + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: "start-stop-datafeed-datafeed-1" + - match: { datafeeds.0.state: stopped } + +--- +"Test start datafeed given start is now": + - do: + xpack.ml.open_job: + job_id: "start-stop-datafeed-job" + - do: + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + start: "now" + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: "start-stop-datafeed-datafeed-1" + - match: { datafeeds.0.state: started } + +--- +"Test start non existing datafeed": + - do: + catch: missing + xpack.ml.start_datafeed: + datafeed_id: "non-existing-datafeed" + start: 0 + +--- +"Test start datafeed job, but not open": + - do: + catch: conflict + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + start: 0 + - do: + catch: /cannot start datafeed \[start-stop-datafeed-datafeed-1\] because job \[start-stop-datafeed-job\] is closed/ + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + start: 0 + +--- +"Test start already started datafeed job": + - do: + xpack.ml.open_job: + job_id: "start-stop-datafeed-job" + - do: + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + start: 0 + - do: + catch: conflict + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + start: 0 + + - do: + catch: /cannot start datafeed \[start-stop-datafeed-datafeed-1\] because it has already been started/ + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + start: 0 + +--- +"Test start given end earlier than start": + - do: + xpack.ml.open_job: + job_id: "start-stop-datafeed-job" + + - do: + catch: /.* start \[1485910800000\] must be earlier than end \[1485907200000\]/ + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + start: "2017-02-01T01:00:00Z" + end: "2017-02-01T00:00:00Z" + +--- +"Test start given end same as start": + - do: + xpack.ml.open_job: + job_id: "start-stop-datafeed-job" + + - do: + catch: /.* start \[1485910800000\] must be earlier than end \[1485910800000\]/ + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + start: "2017-02-01T01:00:00Z" + end: "2017-02-01T01:00:00Z" + +--- +"Test start given datafeed index does not exist": + - do: + xpack.ml.update_datafeed: + datafeed_id: start-stop-datafeed-datafeed-1 + body: > + { + "indexes":["utopia"] + } + + - do: + xpack.ml.open_job: + job_id: "start-stop-datafeed-job" + + - do: + catch: /datafeed \[start-stop-datafeed-datafeed-1] cannot retrieve data because index \[utopia\] does not exist/ + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + +--- +"Test start given field without mappings": + - do: + xpack.ml.put_job: + job_id: start-stop-datafeed-job-field-without-mappings + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count","by_field_name":"airline2"}] + }, + "analysis_limits" : { + "model_memory_limit": "30mb" + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: start-stop-datafeed-job-field-without-mappings-feed + body: > + { + "job_id":"start-stop-datafeed-job-field-without-mappings", + "indexes":"airline-data", + "types":"response" + } + + - do: + xpack.ml.open_job: + job_id: "start-stop-datafeed-job-field-without-mappings" + + - do: + catch: /datafeed \[start-stop-datafeed-job-field-without-mappings-feed] cannot retrieve field \[airline2\] because it has no mappings/ + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-job-field-without-mappings-feed" + +--- +"Test stop non existing datafeed": + - do: + catch: missing + xpack.ml.stop_datafeed: + datafeed_id: "non-existing-datafeed" + +--- +"Test stop with expression that does not match and allow_no_datafeeds": + + - do: + xpack.ml.stop_datafeed: + datafeed_id: "missing-*" + allow_no_datafeeds: true + - match: { stopped: true } + +--- +"Test stop with expression that does not match and not allow_no_datafeeds": + + - do: + catch: missing + xpack.ml.stop_datafeed: + datafeed_id: "missing-*" + allow_no_datafeeds: false + +--- +"Test stop already stopped datafeed job is not an error": + - do: + xpack.ml.stop_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + + - do: + xpack.ml.stop_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + +--- +"Test stop given expression": + + - do: + xpack.ml.put_job: + job_id: start-stop-datafeed-job-foo-1 + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "30mb" + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + xpack.ml.put_job: + job_id: start-stop-datafeed-job-foo-2 + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "30mb" + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + xpack.ml.put_job: + job_id: start-stop-datafeed-job-bar-1 + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "30mb" + }, + "data_description" : { + "time_field":"time" + } + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: start-stop-datafeed-job-foo-1-feed + body: > + { + "job_id":"start-stop-datafeed-job-foo-1", + "indexes":"airline-data", + "types":"response" + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: start-stop-datafeed-job-foo-2-feed + body: > + { + "job_id":"start-stop-datafeed-job-foo-2", + "indexes":"airline-data", + "types":"response" + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: start-stop-datafeed-job-bar-1-feed + body: > + { + "job_id":"start-stop-datafeed-job-bar-1", + "indexes":"airline-data", + "types":"response" + } + + - do: + xpack.ml.open_job: + job_id: start-stop-datafeed-job-foo-1 + - match: { opened: true } + - do: + xpack.ml.open_job: + job_id: start-stop-datafeed-job-foo-2 + - match: { opened: true } + - do: + xpack.ml.open_job: + job_id: start-stop-datafeed-job-bar-1 + - match: { opened: true } + + - do: + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-job-foo-1-feed" + - match: { started: true } + - do: + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-job-foo-2-feed" + - match: { started: true } + - do: + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-job-bar-1-feed" + - match: { started: true } + + - do: + xpack.ml.stop_datafeed: + datafeed_id: "start-stop-datafeed-job-foo-*" + - match: { stopped: true } + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: "start-stop-datafeed-job-foo-*" + - match: { datafeeds.0.state: "stopped"} + - match: { datafeeds.1.state: "stopped"} + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: "start-stop-datafeed-job-bar-1-feed" + - match: { datafeeds.0.state: "started"} + +--- +"Test start datafeed when persistent task allocation disabled": + + - do: + xpack.ml.open_job: + job_id: "start-stop-datafeed-job" + - match: { opened: true } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + cluster.put_settings: + body: + transient: + cluster.persistent_tasks.allocation.enable: "none" + - match: {transient.cluster.persistent_tasks.allocation.enable: "none"} + + - do: + catch: /no persistent task assignments are allowed due to cluster settings/ + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + start: 0 + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: "start-stop-datafeed-datafeed-1" + - match: { datafeeds.0.state: stopped } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + cluster.put_settings: + body: + transient: + cluster.persistent_tasks.allocation.enable: "all" + - match: {transient.cluster.persistent_tasks.allocation.enable: "all"} + + - do: + xpack.ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + start: 0 + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: "start-stop-datafeed-datafeed-1" + - match: { datafeeds.0.state: started } + + - do: + xpack.ml.stop_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml new file mode 100644 index 0000000000000..6a1d6e117e924 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/update_model_snapshot.yml @@ -0,0 +1,231 @@ +setup: + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.ml.put_job: + job_id: update-model-snapshot + body: > + { + "analysis_config" : { + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-update-model-snapshot + type: doc + id: "update-model-snapshot_model_snapshot_snapshot-1" + body: > + { + "job_id" : "update-model-snapshot", + "timestamp": "2016-06-02T00:00:00Z", + "snapshot_id": "snapshot-1", + "snapshot_doc_count": 3, + "retain": false + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: "update-model-snapshot_model_state_1#1" + body: > + { + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: "update-model-snapshot_model_state_1#2" + body: > + { + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: "update-model-snapshot_model_state_1#3" + body: > + { + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-anomalies-update-model-snapshot + type: doc + id: "update-model-snapshot_model_snapshot_snapshot-2" + body: > + { + "job_id": "update-model-snapshot", + "timestamp": "2016-06-01T00:00:00Z", + "snapshot_id": "snapshot-2", + "description": "snapshot 2 description", + "snapshot_doc_count": 2, + "retain": true + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: "update-model-snapshot_model_state_2#1" + body: > + { + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + index: + index: .ml-state + type: doc + id: "update-model-snapshot_model_state_2#2" + body: > + { + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.refresh: + index: [.ml-anomalies-update-model-snapshot,.ml-state] + +--- +"Test with valid description": + - do: + xpack.ml.get_model_snapshots: + job_id: "update-model-snapshot" + snapshot_id: "snapshot-1" + + - match: { count: 1 } + - is_false: model_snapshots.0.description + + - do: + xpack.ml.update_model_snapshot: + job_id: "update-model-snapshot" + snapshot_id: "snapshot-1" + body: > + { + "description": "new_description" + } + + - match: { acknowledged: true } + - match: { model.retain: false } + - match: { model.description: "new_description" } + + - do: + xpack.ml.get_model_snapshots: + job_id: "update-model-snapshot" + snapshot_id: "snapshot-1" + + - match: { count: 1 } + - match: { model_snapshots.0.snapshot_id: "snapshot-1" } + - match: { model_snapshots.0.timestamp: 1464825600000 } + - match: { model_snapshots.0.description: "new_description" } + +--- +"Test duplicate descriptions are allowed": + - do: + xpack.ml.update_model_snapshot: + job_id: "update-model-snapshot" + snapshot_id: "snapshot-1" + body: > + { + "description": "snapshot 2 description" + } + + - do: + xpack.ml.get_model_snapshots: + job_id: "update-model-snapshot" + + - match: { count: 2 } + - match: { model_snapshots.0.snapshot_id: "snapshot-1" } + - match: { model_snapshots.0.description: "snapshot 2 description" } + - match: { model_snapshots.1.snapshot_id: "snapshot-2" } + - match: { model_snapshots.1.description: "snapshot 2 description" } +--- +"Test with retain": + - do: + xpack.ml.update_model_snapshot: + job_id: "update-model-snapshot" + snapshot_id: "snapshot-1" + body: > + { + "retain": true + } + + - match: { acknowledged: true } + - match: { model.retain: true } + + - do: + xpack.ml.update_model_snapshot: + job_id: "update-model-snapshot" + snapshot_id: "snapshot-2" + body: > + { + "retain": false + } + + - match: { acknowledged: true } + - match: { model.retain: false } + - match: { model.description: "snapshot 2 description" } + +--- +"Test with all fields": + - do: + xpack.ml.update_model_snapshot: + job_id: "update-model-snapshot" + snapshot_id: "snapshot-1" + body: > + { + "description": "new snapshot 1 description", + "retain": true + } + + - match: { acknowledged: true } + - match: { model.description: "new snapshot 1 description" } + - match: { model.retain: true } + +--- +"Test with unknown job id": + - do: + catch: missing + xpack.ml.update_model_snapshot: + job_id: "non-existent-job" + snapshot_id: "san" + body: > + { + "description": "new foo", + "retain": true + } + +--- +"Test with unknown snapshot id": + - do: + catch: missing + xpack.ml.update_model_snapshot: + job_id: "update-model-snapshot" + snapshot_id: "snapshot-9999" + body: > + { + "description": "new description for snapshot 9999" + } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/validate.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/validate.yml new file mode 100644 index 0000000000000..1913fd0ed09f8 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/validate.yml @@ -0,0 +1,108 @@ +--- +"Test valid job config": + - do: + xpack.ml.validate: + body: > + { + "analysis_config": { + "bucket_span": "1h", + "detectors": [{"function": "metric", "field_name": "responsetime", "by_field_name": "airline"}] + }, + "data_description": { + "format": "delimited", + "field_delimiter": ",", + "time_field": "time", + "time_format": "yyyy-MM-dd HH:mm:ssX" + } + } + - match: { acknowledged: true } + +--- +"Test invalid job config": + - do: + catch: /.data_description. failed to parse field .format./ + xpack.ml.validate: + body: > + { + "analysis_config": { + "bucket_span": "1h", + "detectors": [{"function": "metric", "field_name": "responsetime", "by_field_name": "airline"}] + }, + "data_description": { + "format": "wrong", + "field_delimiter": ",", + "time_field": "time", + "time_format": "yyyy-MM-dd HH:mm:ssX" + } + } + +--- +"Test valid job config with job ID": + - do: + xpack.ml.validate: + body: > + { + "job_id": "validate-job-config-with-job-id", + "analysis_config": { + "bucket_span": "1h", + "detectors": [{"function": "metric", "field_name": "responsetime", "by_field_name": "airline"}] + }, + "data_description": { + "format": "delimited", + "field_delimiter": ",", + "time_field": "time", + "time_format": "yyyy-MM-dd HH:mm:ssX" + } + } + - match: { acknowledged: true } + +--- +"Test job config that is invalid only because of the job ID": + - do: + catch: /Invalid job_id; '_' can contain lowercase alphanumeric \(a-z and 0-9\), hyphens or underscores; must start and end with alphanumeric/ + xpack.ml.validate: + body: > + { + "job_id": "_", + "analysis_config": { + "bucket_span": "1h", + "detectors": [{"function": "metric", "field_name": "responsetime", "by_field_name": "airline"}] + }, + "data_description": { + "format": "delimited", + "field_delimiter": ",", + "time_field": "time", + "time_format": "yyyy-MM-dd HH:mm:ssX" + } + } + +--- +"Test job config is invalid because model snapshot id set": + + - do: + catch: /illegal_argument_exception/ + xpack.ml.validate: + body: > + { + "model_snapshot_id": "wont-create-with-this-setting", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } + + - do: + catch: /The job is configured with fields \[model_snapshot_id\] that are illegal to set at job creation/ + xpack.ml.validate: + body: > + { + "model_snapshot_id": "wont-create-with-this-setting", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "data_description" : { + } + } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/validate_detector.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/validate_detector.yml new file mode 100644 index 0000000000000..709e98f77d9dc --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/validate_detector.yml @@ -0,0 +1,21 @@ +--- +"Test valid detector": + - do: + xpack.ml.validate_detector: + body: > + { + "function":"count", + "by_field_name":"airline" + } + - match: { acknowledged: true } + +--- +"Test invalid detector": + - do: + catch: /field_name must be set when the 'mean' function is used/ + xpack.ml.validate_detector: + body: > + { + "function":"mean", + "by_field_name":"airline" + } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/10_basic.yml new file mode 100644 index 0000000000000..c5d2285269249 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/10_basic.yml @@ -0,0 +1,205 @@ +--- +"Bulk indexing of monitoring data": + + - do: + xpack.monitoring.bulk: + system_id: "kibana" + system_api_version: "6" + interval: "10s" + body: + - index: + _type: test_type + - avg-cpu: + user: 13.26 + nice: 0.17 + system: 1.51 + iowait: 0.85 + idle: 84.20 + - index: + _type: test_type + - avg-cpu: + user: 13.23 + nice: 0.17 + system: 1.51 + iowait: 0.85 + idle: 84.24 + + - is_false: errors + + - do: + indices.refresh: {} + + - do: + search: + index: .monitoring-kibana-* + body: { "query": { "term" : { "type": "test_type" } } } + + - match: { hits.total: 2 } + + - do: + xpack.monitoring.bulk: + system_id: "kibana" + system_api_version: "6" + interval: "123456ms" + type: "default_type" + body: + - '{"index": {}}' + - '{"field_1": "value_1"}' + - '{"index": {"_type": "custom_type"}}' + - '{"field_1": "value_2"}' + - '{"index": {}}' + - '{"field_1": "value_3"}' + - '{"index": {"_index": "_data", "_type": "kibana"}}' + - '{"field_1": "value_4"}' + + - is_false: errors + + - do: + indices.refresh: {} + + - do: + search: + index: .monitoring-kibana-* + body: { "query": { "term" : { "type": "default_type" } } } + + - match: { hits.total: 2 } + - match: { hits.hits.0._source.interval_ms: 123456 } + - match: { hits.hits.1._source.interval_ms: 123456 } + + - do: + search: + index: .monitoring-kibana-* + body: { "query": { "term" : { "type": "custom_type" } } } + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.interval_ms: 123456 } + + # We actively ignore indexing requests made to the _data index starting with 5.5 + - do: + search: + index: .monitoring-data-* + + - match: { hits.total: 0 } + + # Old system_api_version should still be accepted + - do: + xpack.monitoring.bulk: + system_id: "kibana" + system_api_version: "2" + interval: "10000ms" + type: "default_type" + body: + - '{"index": {}}' + - '{"field_1": "value_1"}' + - '{"index": {"_type": "custom_type"}}' + - '{"field_1": "value_2"}' + - '{"index": {}}' + - '{"field_1": "value_3"}' + - '{"index": {"_index": "_data", "_type": "kibana"}}' + - '{"field_1": "value_4"}' + + - is_false: errors + + - do: + indices.refresh: {} + + - do: + search: + index: .monitoring-kibana-* + body: { "query": { "term" : { "type": "default_type" } } } + + - match: { hits.total: 4 } + + - do: + search: + index: .monitoring-kibana-* + body: { "query": { "term" : { "type": "custom_type" } } } + + - match: { hits.total: 2 } + + # We actively ignore indexing requests made to the _data index starting with 5.5, even for the old versions + - do: + search: + index: .monitoring-data-* + + - match: { hits.total: 0 } + + # Missing a system_id causes it to fail + - do: + catch: bad_request + xpack.monitoring.bulk: + system_api_version: "6" + interval: "10s" + type: "default_type" + body: + - '{"index": {}}' + - '{"field_1": "value_1"}' + + # Missing a system_api_version causes it to fail + - do: + catch: bad_request + xpack.monitoring.bulk: + system_id: "kibana" + interval: "10s" + type: "default_type" + body: + - '{"index": {}}' + - '{"field_1": "value_1"}' + + # Missing an interval causes it to fail + - do: + catch: bad_request + xpack.monitoring.bulk: + system_id: "kibana" + system_api_version: "6" + type: "default_type" + body: + - '{"index": {}}' + - '{"field_1": "value_1"}' + +--- +"Bulk indexing of monitoring data on closed indices should throw an export exception": + + - do: + xpack.monitoring.bulk: + system_id: "beats" + system_api_version: "6" + interval: "5s" + body: + - index: + _type: metric_beat + - modules: + nginx: true + mysql: false + - index: + _type: file_beat + - file: + path: /var/log/dmesg + size: 31kb + + - is_false: errors + + - do: + indices.refresh: {} + + - do: + search: + index: .monitoring-beats-* + - match: { hits.total: 2 } + + - do: + indices.close: + index: .monitoring-beats-* + + - do: + catch: /export_exception/ + xpack.monitoring.bulk: + system_id: "beats" + system_api_version: "6" + interval: "5s" + body: + - index: + _type: file_beat + - file: + path: /var/log/auth.log + size: 5kb diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/20_privileges.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/20_privileges.yml new file mode 100644 index 0000000000000..b1d4158ac1550 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/monitoring/bulk/20_privileges.yml @@ -0,0 +1,140 @@ +--- +setup: + - skip: + features: headers + - do: + cluster.health: + wait_for_status: yellow + + # Creates a role and a user "Logstash Agent" who can use + # the Monitoring Bulk API and read the monitoring indices. + - do: + xpack.security.put_role: + name: "logstash_agent_role" + body: > + { + "cluster": ["cluster:admin/xpack/monitoring/bulk"], + "indices": [ + { + "privileges": ["read"], + "names": ".monitoring-*" + } + ] + } + - do: + xpack.security.put_user: + username: "logstash_agent" + body: > + { + "password": "s3krit", + "roles" : [ "logstash_agent_role" ] + } + + # Creates a role and a user "Unknown Agent" who can only + # read the monitoring indices. + - do: + xpack.security.put_role: + name: "unkown_agent_role" + body: > + { + "cluster": ["monitor"], + "indices": [ + { + "privileges": ["read"], + "names": ".monitoring-*" + } + ] + } + - do: + xpack.security.put_user: + username: "unknown_agent" + body: > + { + "password": "s3krit", + "roles" : [ "unkown_agent_role" ] + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "logstash_agent" + ignore: 404 + - do: + xpack.security.delete_role: + name: "logstash_agent_role" + ignore: 404 + - do: + xpack.security.delete_user: + username: "unknown_agent" + ignore: 404 + - do: + xpack.security.delete_role: + name: "unkown_agent_role" + ignore: 404 + +--- +"Monitoring Bulk API": + - skip: + features: catch_unauthorized + + - do: + headers: + # Authorization: logstash_agent + Authorization: "Basic bG9nc3Rhc2hfYWdlbnQ6czNrcml0" + xpack.monitoring.bulk: + system_id: "logstash" + system_api_version: "6" + interval: "10s" + body: + - index: + _type: logstash_metric + - metric: + queue: 10 + - index: + _index: _data + _type: logstash_info + - info: + license: basic + - is_false: errors + + - do: + indices.refresh: {} + + - do: + search: + index: .monitoring-logstash-* + body: { "query": { "term" : { "type": "logstash_metric" } } } + - match: { hits.total: 1 } + + # We actively ignore indexing requests made to .monitoring-data-N starting with 5.5 + - do: + search: + index: .monitoring-data-* + - match: { hits.total: 0 } + + - do: + catch: forbidden + headers: + # Authorization: unknown_agent + Authorization: "Basic dW5rbm93bl9hZ2VudDpzM2tyaXQ=" + xpack.monitoring.bulk: + system_id: "logstash" + system_api_version: "6" + interval: "10s" + body: + - index: + _type: logstash_metric + - metric: + queue: 10 + - match: { "error.type": "security_exception" } + - match: { "error.reason": "action [cluster:admin/xpack/monitoring/bulk] is unauthorized for user [unknown_agent]" } + + - do: + indices.refresh: {} + + - do: + search: + index: .monitoring-logstash-* + body: { "query": { "term" : { "type": "logstash_metric" } } } + - match: { hits.total: 1 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/role_mapping/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/role_mapping/10_basic.yml new file mode 100644 index 0000000000000..8c64e995c96ed --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/role_mapping/10_basic.yml @@ -0,0 +1,46 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.security.delete_role_mapping: + name: "everyone" + ignore: 404 +--- +"Test put role_mapping api": + - do: + xpack.security.put_role_mapping: + name: "everyone" + body: > + { + "enabled": true, + "roles": [ "kibana_user" ], + "rules": { "field": { "username": "*" } }, + "metadata": { + "uuid" : "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7" + } + } + - match: { role_mapping: { created: true } } + + # Get by name + - do: + xpack.security.get_role_mapping: + name: "everyone" + - match: { everyone.enabled: true } + - match: { everyone.roles.0: "kibana_user" } + - match: { everyone.rules.field.username: "*" } + + # Get all + - do: + xpack.security.get_role_mapping: + name: null + - match: { everyone.enabled: true } + - match: { everyone.roles.0: "kibana_user" } + - match: { everyone.rules.field.username: "*" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/role_mapping/20_get_missing.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/role_mapping/20_get_missing.yml new file mode 100644 index 0000000000000..31208ceaf2ea2 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/role_mapping/20_get_missing.yml @@ -0,0 +1,12 @@ +"Get missing role-mapping": + - do: + catch: missing + xpack.security.get_role_mapping: + name: 'does-not-exist' + +--- +"Get missing (multiple) role-mappings": + - do: + catch: missing + xpack.security.get_role_mapping: + name: [ 'dne1', 'dne2' ] diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/role_mapping/30_delete.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/role_mapping/30_delete.yml new file mode 100644 index 0000000000000..0939fdfb1f757 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/role_mapping/30_delete.yml @@ -0,0 +1,45 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.security.delete_role_mapping: + name: "test_delete" + ignore: 404 +--- +"Test delete role_mapping api": + - do: + xpack.security.put_role_mapping: + name: "test_delete" + body: > + { + "enabled": true, + "roles": [ "kibana_user" ], + "rules": { "field": { "username": "*" } } + } + - match: { role_mapping: { created: true } } + + # Get by name + - do: + xpack.security.get_role_mapping: + name: "test_delete" + - match: { test_delete.enabled: true } + + # Delete it + - do: + xpack.security.delete_role_mapping: + name: "test_delete" + - match: { found: true } + + # Get by name + - do: + xpack.security.get_role_mapping: + name: "test_delete" + catch: missing diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/roles/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/roles/10_basic.yml new file mode 100644 index 0000000000000..badfcec6406e5 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/roles/10_basic.yml @@ -0,0 +1,82 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "admin_role" ] + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + - do: + xpack.security.delete_role: + name: "admin_role" + ignore: 404 + - do: + xpack.security.delete_role: + name: "backwards_role" + ignore: 404 + +--- +"Test put role api": + - do: + xpack.security.put_role: + name: "admin_role" + body: > + { + "cluster": ["all"], + "metadata": { + "key1" : "val1", + "key2" : "val2" + }, + "indices": [ + { + "names": "*", + "privileges": ["all"] + } + ] + } + - match: { role: { created: true } } + + - do: + xpack.security.put_role: + name: "backwards_role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "privileges": ["all"], + "names": "*" + } + ] + } + - match: { role: { created: true } } + + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + + - do: + xpack.security.get_role: + name: "admin_role" + - match: { admin_role.cluster.0: "all" } + - match: { admin_role.metadata.key1: "val1" } + - match: { admin_role.metadata.key2: "val2" } + - match: { admin_role.indices.0.names.0: "*" } + - match: { admin_role.indices.0.privileges.0: "all" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/roles/11_idx_arrays.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/roles/11_idx_arrays.yml new file mode 100644 index 0000000000000..4d32866ee8b33 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/roles/11_idx_arrays.yml @@ -0,0 +1,88 @@ +--- +setup: + - skip: + features: headers + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.security.delete_role: + name: "admin_role2" + ignore: 404 + + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + + - do: + delete: + index: foo + type: doc + id: 1 + ignore: 404 + +--- +"Test put role api using as array of index names": + + - do: + xpack.security.put_role: + name: "admin_role2" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": ["foo", "bar"], + "privileges": ["all"] + } + ] + } + - match: { role: { created: true } } + + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "admin_role2" ] + } + - match: { user: { created: true } } + + - do: + index: + index: foo + type: doc + id: 1 + body: { foo: bar } + + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + get: + index: foo + type: doc + id: 1 + - match: { _index: foo } + - match: { _type: doc } + - match: { _id: "1"} + - match: { _source: { foo: bar }} + +# test that the role works on the cluster level + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + + - do: + xpack.security.get_role: + name: "admin_role2" + - match: { admin_role2.cluster.0: "all" } + - match: { admin_role2.indices.0.names.0: "foo" } + - match: { admin_role2.indices.0.names.1: "bar" } + - match: { admin_role2.indices.0.privileges.0: "all" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/roles/20_get_missing.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/roles/20_get_missing.yml new file mode 100644 index 0000000000000..ebe0a5d9df3de --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/roles/20_get_missing.yml @@ -0,0 +1,12 @@ +"Get missing role": + - do: + catch: missing + xpack.security.get_role: + name: 'foo' + +--- +"Get missing (multiple) roles": + - do: + catch: missing + xpack.security.get_role: + name: [ 'foo', 'bar' ] diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/roles/30_prohibited_role_query.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/roles/30_prohibited_role_query.yml new file mode 100644 index 0000000000000..fd90474a1fbb2 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/roles/30_prohibited_role_query.yml @@ -0,0 +1,71 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_role: + name: "role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": "index", + "privileges": ["all"], + "query" : { + "terms" : { "field" : { "index" : "_index", "type" : "_type", "id" : "_id", "path" : "_path"} } + } + } + ] + } + + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "x-pack-test-password", + "roles" : [ "role" ] + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + - do: + xpack.security.delete_role: + name: "role" + ignore: 404 + + +--- +"Test use prohibited query inside role query": + + - do: + headers: + Authorization: "Basic am9lOngtcGFjay10ZXN0LXBhc3N3b3Jk" + index: + index: index + type: type + id: 1 + body: > + { + "foo": "bar" + } + + + - do: + catch: /terms query with terms lookup isn't supported as part of a role query/ + headers: + Authorization: "Basic am9lOngtcGFjay10ZXN0LXBhc3N3b3Jk" + search: + index: index + body: { "query" : { "match_all" : {} } } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml new file mode 100644 index 0000000000000..d172e27b2a378 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml @@ -0,0 +1,184 @@ +setup: + - do: + indices.create: + index: foo + body: + mappings: + _doc: + properties: + the_field: + type: date + value_field: + type: integer + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + +--- +"Test basic delete_job": + + - do: + xpack.rollup.get_jobs: + id: foo + + - match: + jobs: + - config: + id: "foo" + index_pattern: "foo" + rollup_index: "foo_rollup" + cron: "*/30 * * * * ?" + page_size: 10 + groups : + date_histogram: + interval: "1h" + field: "the_field" + time_zone: "UTC" + metrics: + - field: "value_field" + metrics: + - "min" + - "max" + - "sum" + timeout: "20s" + stats: + pages_processed: 0 + documents_processed: 0 + rollups_indexed: 0 + trigger_count: 0 + status: + job_state: "stopped" + + - do: + xpack.rollup.delete_job: + id: foo + - is_true: acknowledged + + - do: + xpack.rollup.get_jobs: + id: foo + - match: + jobs: [] + +--- +"Test delete job twice": + + - do: + xpack.rollup.get_jobs: + id: foo + + - match: + jobs: + - config: + id: "foo" + index_pattern: "foo" + rollup_index: "foo_rollup" + cron: "*/30 * * * * ?" + page_size: 10 + groups : + date_histogram: + interval: "1h" + field: "the_field" + time_zone: "UTC" + metrics: + - field: "value_field" + metrics: + - "min" + - "max" + - "sum" + timeout: "20s" + stats: + pages_processed: 0 + documents_processed: 0 + rollups_indexed: 0 + trigger_count: 0 + status: + job_state: "stopped" + + - do: + xpack.rollup.delete_job: + id: foo + - is_true: acknowledged + + - do: + xpack.rollup.get_jobs: + id: foo + - match: + jobs: [] + +--- +"Test delete running job": + + - do: + xpack.rollup.get_jobs: + id: foo + + - match: + jobs: + - config: + id: "foo" + index_pattern: "foo" + rollup_index: "foo_rollup" + cron: "*/30 * * * * ?" + page_size: 10 + groups : + date_histogram: + interval: "1h" + field: "the_field" + time_zone: "UTC" + metrics: + - field: "value_field" + metrics: + - "min" + - "max" + - "sum" + timeout: "20s" + stats: + pages_processed: 0 + documents_processed: 0 + rollups_indexed: 0 + trigger_count: 0 + status: + job_state: "stopped" + + - do: + xpack.rollup.start_job: + id: foo + - is_true: started + + - do: + xpack.rollup.delete_job: + id: foo + - is_true: acknowledged + +--- +"Test delete non-existant job": + + - do: + catch: /the task with id does_not_exist doesn't exist/ + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.delete_job: + id: does_not_exist diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml new file mode 100644 index 0000000000000..fb2d9f59e3410 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml @@ -0,0 +1,209 @@ +setup: + - do: + indices.create: + index: foo + body: + mappings: + _doc: + properties: + the_field: + type: date + value_field: + type: integer + +--- +"Test basic get_jobs": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + - is_true: acknowledged + + - do: + xpack.rollup.get_jobs: + id: foo + + - match: + jobs: + - config: + id: "foo" + index_pattern: "foo" + rollup_index: "foo_rollup" + cron: "*/30 * * * * ?" + page_size: 10 + groups : + date_histogram: + interval: "1h" + field: "the_field" + time_zone: "UTC" + metrics: + - field: "value_field" + metrics: + - "min" + - "max" + - "sum" + timeout: "20s" + stats: + pages_processed: 0 + documents_processed: 0 + rollups_indexed: 0 + trigger_count: 0 + status: + job_state: "stopped" +--- +"Test get with no jobs": + + - do: + xpack.rollup.get_jobs: + id: "_all" + + - length: { jobs: 0 } + +--- +"Test get missing job": + + - do: + xpack.rollup.get_jobs: + id: foo + + - match: + jobs: [] + +--- +"Test get all jobs": + + - skip: + version: all + reason: Job ordering isn't guaranteed right now, cannot test + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + - is_true: acknowledged + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: bar + body: > + { + "index_pattern": "bar", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + - is_true: acknowledged + + - do: + xpack.rollup.get_jobs: + id: "_all" + + - length: { jobs: 2 } + - match: + jobs: + - config: + id: "foo" + index_pattern: "foo" + rollup_index: "foo_rollup" + cron: "*/30 * * * * ?" + page_size: 10 + groups : + date_histogram: + interval: "1h" + field: "the_field" + time_zone: "UTC" + metrics: + - field: "value_field" + metrics: + - "min" + - "max" + - "sum" + timeout: "20s" + stats: + pages_processed: 0 + documents_processed: 0 + rollups_indexed: 0 + trigger_count: 0 + status: + job_state: "stopped" + - config: + id: "bar" + index_pattern: "bar" + rollup_index: "foo_rollup" + cron: "*/30 * * * * ?" + page_size: 10 + groups : + date_histogram: + interval: "1h" + field: "the_field" + time_zone: "UTC" + metrics: + - field: "value_field" + metrics: + - "min" + - "max" + - "sum" + timeout: "20s" + stats: + pages_processed: 0 + documents_processed: 0 + rollups_indexed: 0 + trigger_count: 0 + status: + job_state: "stopped" + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml new file mode 100644 index 0000000000000..050e49bc4b40f --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml @@ -0,0 +1,231 @@ +setup: + - do: + indices.create: + index: foo + include_type_name: false + body: + mappings: + properties: + the_field: + type: date + value_field: + type: integer + + - do: + indices.create: + index: foo2 + include_type_name: false + body: + mappings: + properties: + the_field: + type: date + value_field: + type: integer + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + +--- +"Verify one job caps": + + - do: + xpack.rollup.get_rollup_caps: + id: "foo" + + - match: + foo: + rollup_jobs: + - job_id: "foo" + rollup_index: "foo_rollup" + index_pattern: "foo" + fields: + the_field: + - agg: "date_histogram" + interval: "1h" + time_zone: "UTC" + value_field: + - agg: "min" + - agg: "max" + - agg: "sum" + +--- +"Verify two job caps": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo2 + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + - do: + xpack.rollup.get_rollup_caps: + id: "foo" + + - match: + foo: + rollup_jobs: + - job_id: "foo" + rollup_index: "foo_rollup" + index_pattern: "foo" + fields: + the_field: + - agg: "date_histogram" + interval: "1h" + time_zone: "UTC" + value_field: + - agg: "min" + - agg: "max" + - agg: "sum" + - job_id: "foo2" + rollup_index: "foo_rollup" + index_pattern: "foo" + fields: + the_field: + - agg: "date_histogram" + interval: "1h" + time_zone: "UTC" + value_field: + - agg: "min" + - agg: "max" + - agg: "sum" + +--- +"Verify all caps": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo2 + body: > + { + "index_pattern": "foo2", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo3 + body: > + { + "index_pattern": "foo", + "rollup_index": "different_index", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + + - do: + xpack.rollup.get_rollup_caps: + id: "_all" + + - match: + foo: + rollup_jobs: + - job_id: "foo" + rollup_index: "foo_rollup" + index_pattern: "foo" + fields: + the_field: + - agg: "date_histogram" + interval: "1h" + time_zone: "UTC" + value_field: + - agg: "min" + - agg: "max" + - agg: "sum" + - job_id: "foo3" + rollup_index: "different_index" + index_pattern: "foo" + fields: + the_field: + - agg: "date_histogram" + interval: "1h" + time_zone: "UTC" + value_field: + - agg: "min" + - agg: "max" + - agg: "sum" + - match: + foo2: + rollup_jobs: + - job_id: "foo2" + rollup_index: "foo_rollup" + index_pattern: "foo2" + fields: + the_field: + - agg: "date_histogram" + interval: "1h" + time_zone: "UTC" + value_field: + - agg: "min" + - agg: "max" + - agg: "sum" + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml new file mode 100644 index 0000000000000..080fed7a80ec9 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml @@ -0,0 +1,163 @@ +setup: + - do: + indices.create: + index: foo + body: + mappings: + _doc: + properties: + the_field: + type: date + value_field: + type: integer + +--- +"Test basic put_job": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + - is_true: acknowledged + + - do: + xpack.rollup.get_jobs: + id: foo + + - match: + jobs: + - config: + id: "foo" + index_pattern: "foo" + rollup_index: "foo_rollup" + cron: "*/30 * * * * ?" + page_size: 10 + groups : + date_histogram: + interval: "1h" + field: "the_field" + time_zone: "UTC" + metrics: + - field: "value_field" + metrics: + - "min" + - "max" + - "sum" + timeout: "20s" + stats: + pages_processed: 0 + documents_processed: 0 + rollups_indexed: 0 + trigger_count: 0 + status: + job_state: "stopped" + +--- +"Test put_job with existing name": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + - is_true: acknowledged + + - do: + catch: /Cannot create rollup job \[foo\] because job was previously created \(existing metadata\)/ + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + +--- +"Try to include headers": + + - do: + catch: /unknown field \[headers\], parser not found/ + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "headers": { + "foo": "bar" + }, + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + + + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml new file mode 100644 index 0000000000000..d401d5c69bacb --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -0,0 +1,672 @@ +setup: + - do: + indices.create: + index: foo + body: + mappings: + _doc: + properties: + timestamp: + type: date + partition: + type: keyword + price: + type: integer + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h" + }, + "terms": { + "fields": ["partition"] + } + }, + "metrics": [ + { + "field": "price", + "metrics": ["max"] + } + ] + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + bulk: + refresh: true + body: + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T05:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 1 + partition.terms.value: "a" + partition.terms._count: 1 + price.max.value: 1 + "_rollup.id": "foo" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T06:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 2 + partition.terms.value: "b" + partition.terms._count: 2 + price.max.value: 2 + "_rollup.id": "foo" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T07:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 10 + partition.terms.value: "a" + partition.terms._count: 10 + price.max.value: 3 + "_rollup.id": "foo" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 10 + partition.terms.value: "b" + partition.terms._count: 10 + price.max.value: 4 + "_rollup.id": "foo" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 10 + partition.terms.value: "a" + partition.terms._count: 10 + price.max.value: 3 + "_rollup.id": "foo" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + +--- +"Basic Search": + + - do: + xpack.rollup.rollup_search: + index: "foo_rollup" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "1h" + time_zone: "UTC" + + - length: { aggregations.histo.buckets: 4 } + - match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" } + - match: { aggregations.histo.buckets.0.doc_count: 1 } + - match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" } + - match: { aggregations.histo.buckets.1.doc_count: 2 } + - match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" } + - match: { aggregations.histo.buckets.2.doc_count: 10 } + - match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" } + - match: { aggregations.histo.buckets.3.doc_count: 20 } + + +--- +"Search with Metric": + + - do: + xpack.rollup.rollup_search: + index: "foo_rollup" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "1h" + time_zone: "UTC" + aggs: + the_max: + max: + field: "price" + + - length: { aggregations.histo.buckets: 4 } + - match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" } + - match: { aggregations.histo.buckets.0.doc_count: 1 } + - match: { aggregations.histo.buckets.0.the_max.value: 1 } + - match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" } + - match: { aggregations.histo.buckets.1.doc_count: 2 } + - match: { aggregations.histo.buckets.1.the_max.value: 2 } + - match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" } + - match: { aggregations.histo.buckets.2.doc_count: 10 } + - match: { aggregations.histo.buckets.2.the_max.value: 3 } + - match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" } + - match: { aggregations.histo.buckets.3.doc_count: 20 } + - match: { aggregations.histo.buckets.3.the_max.value: 4 } + +--- +"Search with Query": + + - do: + xpack.rollup.rollup_search: + index: "foo_rollup" + body: + size: 0 + query: + term: + partition: a + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "1h" + time_zone: "UTC" + aggs: + the_max: + max: + field: "price" + + - length: { aggregations.histo.buckets: 4 } + - match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" } + - match: { aggregations.histo.buckets.0.doc_count: 1 } + - match: { aggregations.histo.buckets.0.the_max.value: 1 } + - match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" } + - match: { aggregations.histo.buckets.1.doc_count: 0 } + - match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" } + - match: { aggregations.histo.buckets.2.doc_count: 10 } + - match: { aggregations.histo.buckets.2.the_max.value: 3 } + - match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" } + - match: { aggregations.histo.buckets.3.doc_count: 10 } + - match: { aggregations.histo.buckets.3.the_max.value: 3 } + +--- +"Search with MatchAll and Second Job": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo2 + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h" + }, + "terms": { + "fields": ["partition"] + } + }, + "metrics": [ + { + "field": "price", + "metrics": ["max"] + } + ] + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + bulk: + refresh: true + body: + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T05:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 1 + partition.terms.value: "a" + partition.terms._count: 1 + price.max.value: 1 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T06:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 2 + partition.terms.value: "b" + partition.terms._count: 2 + price.max.value: 2 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T07:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 10 + partition.terms.value: "a" + partition.terms._count: 10 + price.max.value: 3 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 10 + partition.terms.value: "b" + partition.terms._count: 10 + price.max.value: 4 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 10 + partition.terms.value: "a" + partition.terms._count: 10 + price.max.value: 3 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + + - do: + xpack.rollup.rollup_search: + index: "foo_rollup" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "1h" + time_zone: "UTC" + aggs: + the_max: + max: + field: "price" + + - length: { aggregations.histo.buckets: 4 } + - match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" } + - match: { aggregations.histo.buckets.0.doc_count: 1 } + - match: { aggregations.histo.buckets.0.the_max.value: 1 } + - match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" } + - match: { aggregations.histo.buckets.1.doc_count: 2 } + - match: { aggregations.histo.buckets.1.the_max.value: 2 } + - match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" } + - match: { aggregations.histo.buckets.2.doc_count: 10 } + - match: { aggregations.histo.buckets.2.the_max.value: 3 } + - match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" } + - match: { aggregations.histo.buckets.3.doc_count: 20 } + - match: { aggregations.histo.buckets.3.the_max.value: 4 } + +--- +"Search with Query and Second Job": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo2 + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h" + }, + "terms": { + "fields": ["partition"] + } + }, + "metrics": [ + { + "field": "price", + "metrics": ["max"] + } + ] + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + bulk: + refresh: true + body: + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T05:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 1 + partition.terms.value: "a" + partition.terms._count: 1 + price.max.value: 1 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T06:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 2 + partition.terms.value: "b" + partition.terms._count: 2 + price.max.value: 2 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T07:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 10 + partition.terms.value: "a" + partition.terms._count: 10 + price.max.value: 3 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 10 + partition.terms.value: "b" + partition.terms._count: 10 + price.max.value: 4 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 10 + partition.terms.value: "a" + partition.terms._count: 10 + price.max.value: 3 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + + - do: + xpack.rollup.rollup_search: + index: "foo_rollup" + body: + size: 0 + query: + term: + partition: a + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "1h" + time_zone: "UTC" + aggs: + the_max: + max: + field: "price" + + - length: { aggregations.histo.buckets: 4 } + - match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" } + - match: { aggregations.histo.buckets.0.doc_count: 1 } + - match: { aggregations.histo.buckets.0.the_max.value: 1 } + - match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" } + - match: { aggregations.histo.buckets.1.doc_count: 0 } + - match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" } + - match: { aggregations.histo.buckets.2.doc_count: 10 } + - match: { aggregations.histo.buckets.2.the_max.value: 3 } + - match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" } + - match: { aggregations.histo.buckets.3.doc_count: 10 } + - match: { aggregations.histo.buckets.3.the_max.value: 3 } + +--- +"Search with Query and Second Job different intervals": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo2 + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1d" + }, + "terms": { + "fields": ["partition"] + } + }, + "metrics": [ + { + "field": "price", + "metrics": ["max"] + } + ] + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + bulk: + refresh: true + body: + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T05:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 1 + partition.terms.value: "a" + partition.terms._count: 1 + price.max.value: 1 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T06:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 2 + partition.terms.value: "b" + partition.terms._count: 2 + price.max.value: 2 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T07:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 10 + partition.terms.value: "a" + partition.terms._count: 10 + price.max.value: 3 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 10 + partition.terms.value: "b" + partition.terms._count: 10 + price.max.value: 4 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - index: + _index: "foo_rollup" + _type: "_doc" + - timestamp.date_histogram.timestamp: "2017-01-01T08:00:00Z" + timestamp.date_histogram.interval: "1h" + timestamp.date_histogram.time_zone: "UTC" + timestamp.date_histogram._count: 10 + partition.terms.value: "a" + partition.terms._count: 10 + price.max.value: 3 + "_rollup.id": "foo2" + "_rollup.computed": + - "timestamp.date_histogram" + - "partition.terms" + "_rollup.version": 1 + + - do: + xpack.rollup.rollup_search: + index: "foo_rollup" + body: + size: 0 + query: + term: + partition: a + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "1h" + time_zone: "UTC" + aggs: + the_max: + max: + field: "price" + + - length: { aggregations.histo.buckets: 4 } + - match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" } + - match: { aggregations.histo.buckets.0.doc_count: 1 } + - match: { aggregations.histo.buckets.0.the_max.value: 1 } + - match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" } + - match: { aggregations.histo.buckets.1.doc_count: 0 } + - match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" } + - match: { aggregations.histo.buckets.2.doc_count: 10 } + - match: { aggregations.histo.buckets.2.the_max.value: 3 } + - match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" } + - match: { aggregations.histo.buckets.3.doc_count: 10 } + - match: { aggregations.histo.buckets.3.the_max.value: 3 } + + + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml new file mode 100644 index 0000000000000..7adba9035ebd5 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml @@ -0,0 +1,66 @@ +setup: + - do: + indices.create: + index: foo + body: + mappings: + _doc: + properties: + the_field: + type: date + value_field: + type: integer + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + +--- +"Test start non-existant job": + + - do: + catch: /Task for Rollup Job \[does_not_exist\] not found/ + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.start_job: + id: does_not_exist + + +--- +"Test start job twice": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.start_job: + id: foo + - is_true: started + + - do: + catch: /Cannot start task for Rollup Job \[foo\] because state was/ + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.start_job: + id: foo + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml new file mode 100644 index 0000000000000..42a1dea8163fb --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml @@ -0,0 +1,84 @@ +setup: + - do: + indices.create: + index: foo + body: + mappings: + _doc: + properties: + the_field: + type: date + value_field: + type: integer + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.put_job: + id: foo + body: > + { + "index_pattern": "foo", + "rollup_index": "foo_rollup", + "cron": "*/30 * * * * ?", + "page_size" :10, + "groups" : { + "date_histogram": { + "field": "the_field", + "interval": "1h" + } + }, + "metrics": [ + { + "field": "value_field", + "metrics": ["min", "max", "sum"] + } + ] + } + +--- +"Test stop non-existant job": + + - do: + catch: /Task for Rollup Job \[does_not_exist\] not found/ + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.stop_job: + id: does_not_exist + + +--- +"Test stop job twice": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.start_job: + id: foo + - is_true: started + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.stop_job: + id: foo + - is_true: stopped + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.stop_job: + id: foo + - is_true: stopped + +--- +"Test stop non-started job": + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + xpack.rollup.stop_job: + id: foo + + - is_true: stopped + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/10_index_doc.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/10_index_doc.yml new file mode 100644 index 0000000000000..b5132fc75e084 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/10_index_doc.yml @@ -0,0 +1,257 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_role: + name: "mixed_role" + body: > + { + "indices": [ + { "names": ["only_read"], "privileges": ["read"] }, + { "names": ["only_index"], "privileges": ["index"] }, + { "names": ["only_delete"], "privileges": ["delete"] }, + { "names": ["everything"], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "mixed_role" ], + "full_name" : "user with mixed privileges to multiple indices" + } + + - do: + indices.create: + index: only_read + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + + - do: + indices.create: + index: only_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + - do: + indices.create: + index: only_delete + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + + - do: + indices.create: + index: everything + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" +--- +teardown: + - do: + xpack.security.delete_user: + username: "test_user" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "mixed_role" + ignore: 404 + +--- +"Test indexing a document when allowed": + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + create: + id: 1 + index: only_index + type: doc + body: > + { + "name" : "doc1" + } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + create: + id: 2 + index: everything + type: doc + body: > + { + "name" : "doc2" + } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "only_index", "_type": "doc", "_id": "3"}}' + - '{"name": "doc3"}' + - '{"index": {"_index": "everything", "_type": "doc", "_id": "4"}}' + - '{"name": "doc4"}' + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "only_index", "_type": "doc", "_id": "5"}}' + - '{"name": "doc5"}' + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "everything", "_type": "doc", "_id": "6"}}' + - '{"name": "doc6"}' + + - do: # superuser + search: + index: only_index + + - match: { hits.total: 3 } + + - do: # superuser + search: + index: everything + + - match: { hits.total: 3 } + +--- +"Test indexing a document when not allowed": + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + create: + refresh: true + id: 7 + index: only_read + type: doc + body: > + { + "name" : "doc7" + } + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + create: + refresh: true + id: 8 + index: only_delete + type: doc + body: > + { + "name" : "doc8" + } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "only_read", "_type": "doc", "_id": "9"}}' + - '{"name": "doc9"}' + - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "10"}}' + - '{"name": "doc10"}' + - match: { errors: true } + - match: { items.0.index.status: 403 } + - match: { items.0.index.error.type: "security_exception" } + - match: { items.1.index.status: 403 } + - match: { items.1.index.error.type: "security_exception" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "only_read", "_type": "doc", "_id": "11"}}' + - '{"name": "doc11"}' + - match: { errors: true } + - match: { items.0.index.status: 403 } + - match: { items.0.index.error.type: "security_exception" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "12"}}' + - '{"name": "doc12"}' + - match: { errors: true } + - match: { items.0.index.status: 403 } + - match: { items.0.index.error.type: "security_exception" } + + - do: # superuser + search: + index: only_read + - match: { hits.total: 0 } + + - do: # superuser + search: + index: only_delete + - match: { hits.total: 0 } + +--- +"Test bulk indexing documents when only some are allowed": + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "only_read", "_type": "doc", "_id": "13"}}' + - '{"name": "doc13"}' + - '{"index": {"_index": "only_index", "_type": "doc", "_id": "14"}}' + - '{"name": "doc14"}' + - match: { errors: true } + - match: { items.0.index.status: 403 } + - match: { items.0.index.error.type: "security_exception" } + - match: { items.1.index.status: 201 } + + - do: # superuser + search: + index: only_index + body: { "query": { "term": { "_id": "14" } } } + - match: { hits.total: 1 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/11_delete_doc.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/11_delete_doc.yml new file mode 100644 index 0000000000000..3fd523ac495ce --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/11_delete_doc.yml @@ -0,0 +1,331 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_role: + name: "mixed_role" + body: > + { + "indices": [ + { "names": ["only_read"], "privileges": ["read"] }, + { "names": ["only_index"], "privileges": ["index"] }, + { "names": ["only_delete"], "privileges": ["delete"] }, + { "names": ["everything"], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "mixed_role" ], + "full_name" : "user with mixed privileges to multiple indices" + } + + - do: + indices.create: + index: only_read + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + + - do: + indices.create: + index: only_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + - do: + indices.create: + index: only_delete + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + + - do: + indices.create: + index: everything + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "only_read", "_type": "doc", "_id": "1"}}' + - '{"name": "doc1"}' + - '{"index": {"_index": "only_index", "_type": "doc", "_id": "2"}}' + - '{"name": "doc2"}' + - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "3"}}' + - '{"name": "doc3"}' + - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "4"}}' + - '{"name": "doc4"}' + - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "5"}}' + - '{"name": "doc5"}' + - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "6"}}' + - '{"name": "doc6"}' + - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "7"}}' + - '{"name": "doc7"}' + - '{"index": {"_index": "everything", "_type": "doc", "_id": "8"}}' + - '{"name": "doc8"}' + - '{"index": {"_index": "everything", "_type": "doc", "_id": "9"}}' + - '{"name": "doc9"}' + - '{"index": {"_index": "everything", "_type": "doc", "_id": "10"}}' + - '{"name": "doc10"}' + +--- +teardown: + - do: + xpack.security.delete_user: + username: "test_user" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "mixed_role" + ignore: 404 + +--- +"Test deleting a document when allowed": + + - do: # superuser + search: + index: only_delete + body: { "query": { "terms": { "_id": [ "3", "4", "5" ] } } } + - match: { hits.total: 3 } + + - do: # superuser + search: + index: everything + body: { "query": { "terms": { "_id": [ "8", "9", "10" ] } } } + - match: { hits.total: 3 } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + delete: + refresh: true + index: only_delete + type: doc + id: 3 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + delete: + refresh: true + index: everything + type: doc + id: 8 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"delete": {"_index": "only_delete", "_type": "doc", "_id": "4"}}' + - '{"delete": {"_index": "everything" , "_type": "doc", "_id": "9"}}' + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice + - '{"delete": {"_index": "only_delete", "_type": "doc", "_id": "5"}}' + - '{"delete": {"_index": "only_delete", "_type": "doc", "_id": "5"}}' + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice + - delete: + _index: everything + _type: doc + _id: 10 + - delete: + _index: everything + _type: doc + _id: 10 + + - do: # superuser + search: + index: only_delete + body: { "query": { "terms": { "_id": [ "3", "4", "5" ] } } } + - match: { hits.total: 0 } + + - do: # superuser + search: + index: everything + body: { "query": { "terms": { "_id": [ "8", "9", "10" ] } } } + - match: { hits.total: 0 } + +--- +"Test deleting a document when not allowed": + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + delete: + refresh: true + index: only_read + type: doc + id: 1 + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + delete: + refresh: true + index: only_index + type: doc + id: 2 + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"delete": {"_index": "only_read" , "_type": "doc", "_id": "1"}}' + - '{"delete": {"_index": "only_index", "_type": "doc", "_id": "2"}}' + + - match: { errors: true } + - match: { items.0.delete.status: 403 } + - match: { items.0.delete.error.type: "security_exception" } + - match: { items.1.delete.status: 403 } + - match: { items.1.delete.error.type: "security_exception" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice + - '{"delete": {"_index": "only_read" , "_type": "doc", "_id": "1"}}' + - '{"delete": {"_index": "only_read" , "_type": "doc", "_id": "1"}}' + + - match: { errors: true } + - match: { items.0.delete.status: 403 } + - match: { items.0.delete.error.type: "security_exception" } + - match: { items.1.delete.status: 403 } + - match: { items.1.delete.error.type: "security_exception" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice + - '{"delete": {"_index": "only_index", "_type": "doc", "_id": "2"}}' + - '{"delete": {"_index": "only_index", "_type": "doc", "_id": "2"}}' + + - match: { errors: true } + - match: { items.0.delete.status: 403 } + - match: { items.0.delete.error.type: "security_exception" } + - match: { items.1.delete.status: 403 } + - match: { items.1.delete.error.type: "security_exception" } + + - do: # superuser + search: + index: only_read + + - match: { hits.total: 1 } + + - do: # superuser + search: + index: only_index + + - match: { hits.total: 1 } + +--- +"Test bulk delete documents when only some are allowed": + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"delete": {"_index": "only_read" , "_type": "doc", "_id": "1"}}' + - '{"delete": {"_index": "only_delete", "_type": "doc", "_id": "6"}}' + - match: { errors: true } + - match: { items.0.delete.status: 403 } + - match: { items.0.delete.error.type: "security_exception" } + - match: { items.1.delete.status: 200 } + + - do: # superuser + search: + index: only_read + body: { "query": { "term": { "_id": "1" } } } + - match: { hits.total: 1 } + + - do: # superuser + search: + index: only_delete + body: { "query": { "term": { "_id": "6" } } } + - match: { hits.total: 0 } + +--- +"Test bulk delete and index documents when only some are allowed": + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index" : {"_index": "only_delete", "_type": "doc", "_id": "11"}}' + - '{"name" : "doc11"}' + - '{"delete": {"_index": "only_delete", "_type": "doc", "_id": "7"}}' + - '{"index" : {"_index": "only_index", "_type": "doc", "_id": "12"}}' + - '{"name" : "doc12"}' + - '{"delete": {"_index": "only_index", "_type": "doc", "_id": "2"}}' + - match: { errors: true } + - match: { items.0.index.status: 403 } + - match: { items.0.index.error.type: "security_exception" } + - match: { items.1.delete.status: 200 } + - match: { items.2.index.status: 201 } + - match: { items.3.delete.status: 403 } + - match: { items.3.delete.error.type: "security_exception" } + + - do: # superuser + search: + index: only_delete + body: { "query": { "terms": { "_id": [ "11", "7" ] } } } + # 11 wasn't created, 7 was deleted + - match: { hits.total: 0 } + + - do: # superuser + search: + index: only_index + body: { "query": { "terms": { "_id": [ "12", "2" ] } } } + # 12 was created, 2 wasn't deleted + - match: { hits.total: 2 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/12_index_alias.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/12_index_alias.yml new file mode 100644 index 0000000000000..44d91d691e1c2 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/12_index_alias.yml @@ -0,0 +1,312 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_role: + name: "mixed_role" + body: > + { + "indices": [ + { "names": ["can_read_1", "can_read_2" ], "privileges": ["read"] }, + { "names": ["can_write_1", "can_write_2", "can_write_3" ], "privileges": ["read", "write"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "mixed_role" ], + "full_name" : "user with mixed privileges to multiple indices" + } + + - do: + indices.create: + index: read_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + + - do: + indices.create: + index: write_index_1 + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + + - do: + indices.create: + index: write_index_2 + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + + - do: + indices.put_alias: + index: read_index + name: can_read_1 + + - do: + indices.put_alias: + index: read_index + name: can_read_2 + + - do: + indices.put_alias: + index: write_index_1 + name: can_write_1 + + - do: + indices.put_alias: + index: write_index_1 + name: can_write_2 + + - do: + indices.put_alias: + index: write_index_2 + name: can_write_3 + +--- +teardown: + - do: + xpack.security.delete_user: + username: "test_user" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "mixed_role" + ignore: 404 + + - do: + indices.delete_alias: + index: "read_index" + name: [ "can_read_1", "can_read_2" ] + ignore: 404 + + - do: + indices.delete_alias: + index: "write_index_1" + name: [ "can_write_1", "can_write_2" ] + ignore: 404 + + - do: + indices.delete: + index: [ "write_index_1", "read_index" ] + ignore: 404 + +--- +"Test indexing documents into an alias, when permitted": + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + create: + id: 1 + index: can_write_1 + type: doc + body: > + { + "name" : "doc1" + } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + create: + id: 2 + index: can_write_2 + type: doc + body: > + { + "name" : "doc2" + } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "3"}}' + - '{"name": "doc3"}' + - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "4"}}' + - '{"name": "doc4"}' + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "5"}}' + - '{"name": "doc5"}' + - '{"index": {"_index": "can_write_2", "_type": "doc", "_id": "6"}}' + - '{"name": "doc6"}' + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "7"}}' + - '{"name": "doc7"}' + - '{"index": {"_index": "can_write_2", "_type": "doc", "_id": "8"}}' + - '{"name": "doc8"}' + - '{"index": {"_index": "can_write_3", "_type": "doc", "_id": "9"}}' + - '{"name": "doc9"}' + + - do: # superuser + search: + index: write_index_1 + - match: { hits.total: 8 } + + - do: # superuser + search: + index: write_index_2 + - match: { hits.total: 1 } + +--- +"Test indexing documents into an alias, when forbidden": + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + create: + refresh: true + id: 7 + index: can_read_1 + type: doc + body: > + { + "name" : "doc7" + } + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + create: + refresh: true + id: 8 + index: can_read_2 + type: doc + body: > + { + "name" : "doc8" + } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "can_read_1", "_type": "doc", "_id": "9"}}' + - '{"name": "doc9"}' + - '{"index": {"_index": "can_read_1", "_type": "doc", "_id": "10"}}' + - '{"name": "doc10"}' + - match: { errors: true } + - match: { items.0.index.status: 403 } + - match: { items.0.index.error.type: "security_exception" } + - match: { items.1.index.status: 403 } + - match: { items.1.index.error.type: "security_exception" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "can_read_1", "_type": "doc", "_id": "11"}}' + - '{"name": "doc11"}' + - '{"index": {"_index": "can_read_2", "_type": "doc", "_id": "12"}}' + - '{"name": "doc12"}' + - match: { errors: true } + - match: { items.0.index.status: 403 } + - match: { items.0.index.error.type: "security_exception" } + - match: { items.1.index.status: 403 } + - match: { items.1.index.error.type: "security_exception" } + + - do: # superuser + search: + index: read_index + - match: { hits.total: 0 } + +--- +"Test bulk indexing into an alias when only some are allowed": + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "can_read_1", "_type": "doc", "_id": "13"}}' + - '{"name": "doc13"}' + - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "14"}}' + - '{"name": "doc14"}' + - match: { errors: true } + - match: { items.0.index.status: 403 } + - match: { items.0.index.error.type: "security_exception" } + - match: { items.1.index.status: 201 } + + - do: # superuser + search: + index: write_index_1 + body: { "query": { "term": { "_id": "14" } } } + - match: { hits.total: 1 } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + refresh: true + body: + - '{"index": {"_index": "can_read_1", "_type": "doc", "_id": "15"}}' + - '{"name": "doc15"}' + - '{"index": {"_index": "can_write_1", "_type": "doc", "_id": "16"}}' + - '{"name": "doc16"}' + - '{"index": {"_index": "can_read_2", "_type": "doc", "_id": "17"}}' + - '{"name": "doc17"}' + - '{"index": {"_index": "can_write_2", "_type": "doc", "_id": "18"}}' + - '{"name": "doc18"}' + - '{"index": {"_index": "can_write_3", "_type": "doc", "_id": "19"}}' + - '{"name": "doc19"}' + - match: { errors: true } + - match: { items.0.index.status: 403 } + - match: { items.0.index.error.type: "security_exception" } + - match: { items.1.index.status: 201 } + - match: { items.2.index.status: 403 } + - match: { items.2.index.error.type: "security_exception" } + - match: { items.3.index.status: 201 } + - match: { items.4.index.status: 201 } + + - do: # superuser + search: + index: write_index_1 + body: { "query": { "terms": { "_id": [ "16", "18" ] } } } + - match: { hits.total: 2 } + - do: # superuser + search: + index: write_index_2 + body: { "query": { "terms": { "_id": [ "19" ] } } } + - match: { hits.total: 1 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/13_index_datemath.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/13_index_datemath.yml new file mode 100644 index 0000000000000..7f3a20a607452 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/13_index_datemath.yml @@ -0,0 +1,138 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_role: + name: "mixed_role" + body: > + { + "indices": [ + { "names": ["read-*" ], "privileges": ["read"] }, + { "names": ["write-*" ], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "mixed_role" ], + "full_name" : "user with mixed privileges to multiple indices" + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "test_user" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "mixed_role" + ignore: 404 + +--- +"Test indexing documents with datemath, when permitted": + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + index: + id: 1 + index: "" + type: doc + body: > + { + "name" : "doc1" + } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + body: + - '{"index": {"_index": "", "_type": "doc", "_id": "2"}}' + - '{"name": "doc2"}' + - '{"index": {"_index": "", "_type": "doc", "_id": "3"}}' + - '{"name": "doc3"}' + - match: { errors: false } + - match: { items.0.index.status: 201 } + - match: { items.1.index.status: 201 } + + - do: # superuser + indices.refresh: + index: "_all" + + - do: # superuser + search: + index: "write-*" + - match: { hits.total: 3 } + +--- +"Test indexing documents with datemath, when forbidden": + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + index: + id: 4 + index: "" + type: doc + body: > + { + "name" : "doc4" + } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + body: + - '{"index": {"_index": "", "_type": "doc", "_id": "5"}}' + - '{"name": "doc5"}' + - '{"index": {"_index": "", "_type": "doc", "_id": "6"}}' + - '{"name": "doc6"}' + - match: { errors: true } + - match: { items.0.index.status: 403 } + - match: { items.0.index.error.type: "security_exception" } + - match: { items.1.index.status: 403 } + - match: { items.1.index.error.type: "security_exception" } + + - do: # superuser + indices.refresh: + index: "_all" + + - do: # superuser + search: + index: "read-*" + - match: { hits.total: 0 } + +--- +"Test bulk indexing with datemath when only some are allowed": + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + bulk: + body: + - '{"index": {"_index": "", "_type": "doc", "_id": "7"}}' + - '{"name": "doc7"}' + - '{"index": {"_index": "", "_type": "doc", "_id": "8"}}' + - '{"name": "doc8"}' + - match: { errors: true } + - match: { items.0.index.status: 403 } + - match: { items.0.index.error.type: "security_exception" } + - match: { items.1.index.status: 201 } + + - do: # superuser + indices.refresh: + index: "_all" + + - do: # superuser + search: + index: write-* + body: { "query": { "term": { "_id": "8" } } } + - match: { hits.total: 1 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/20_get_doc.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/20_get_doc.yml new file mode 100644 index 0000000000000..3767ca5dd27f1 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/20_get_doc.yml @@ -0,0 +1,291 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_role: + name: "mixed_role" + body: > + { + "indices": [ + { "names": ["only_read"], "privileges": ["read"] }, + { "names": ["only_index"], "privileges": ["index"] }, + { "names": ["only_delete"], "privileges": ["delete"] }, + { "names": ["read_write"], "privileges": ["read", "write"] }, + { "names": ["everything"], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "mixed_role" ], + "full_name" : "user with mixed privileges to multiple indices" + } + + - do: + indices.create: + index: only_read + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + + - do: + indices.create: + index: only_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + - do: + indices.create: + index: only_delete + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + - do: + indices.create: + index: read_write + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + - do: + indices.create: + index: everything + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "only_read", "_type": "doc", "_id": "1"}}' + - '{"name": "doc1"}' + - '{"index": {"_index": "only_index", "_type": "doc", "_id": "2"}}' + - '{"name": "doc2"}' + - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "3"}}' + - '{"name": "doc3"}' + - '{"index": {"_index": "read_write", "_type": "doc", "_id": "4"}}' + - '{"name": "doc4"}' + - '{"index": {"_index": "everything", "_type": "doc", "_id": "5"}}' + - '{"name": "doc5"}' + +--- +teardown: + - do: + xpack.security.delete_user: + username: "test_user" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "mixed_role" + ignore: 404 + +--- +"Test get a document when authorized": + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + get: + id: 1 + index: only_read + type: doc + + - match: { _index: only_read } + - match: { _id: "1" } + - match: { _source.name: "doc1" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + get: + id: 4 + index: read_write + type: doc + - match: { _index: read_write } + - match: { _id: "4" } + - match: { _source.name: "doc4" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + get: + id: 5 + index: everything + type: doc + - match: { _index: everything } + - match: { _id: "5" } + - match: { _source.name: "doc5" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + mget: + body: + docs: + - { _index: "only_read", _type: "doc", _id: "1" } + - { _index: "read_write", _type: "doc", _id: "4" } + - { _index: "everything", _type: "doc", _id: "5" } + - match: { docs.0._index: "only_read" } + - match: { docs.0._id: "1" } + - match: { docs.0._source.name: "doc1" } + - match: { docs.1._index: "read_write" } + - match: { docs.1._id: "4" } + - match: { docs.1._source.name: "doc4" } + - match: { docs.2._index: "everything"} + - match: { docs.2._id: "5" } + - match: { docs.2._source.name: "doc5" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + mget: + body: + docs: + - { _index: "only_read", _type: "doc", _id: "1" } + - match: { docs.0._index: "only_read"} + - match: { docs.0._id: "1" } + - match: { docs.0._source.name: "doc1" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + mget: + body: + docs: + - { _index: "read_write", _type: "doc", _id: "4" } + - match: { docs.0._index: read_write} + - match: { docs.0._id: "4" } + - match: { docs.0._source.name: "doc4" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + mget: + body: + docs: + - { _index: "everything", _type: "doc", _id: "5" } + - match: { docs.0._index: "everything"} + - match: { docs.0._id: "5" } + - match: { docs.0._source.name: "doc5" } + +--- +"Test get a document when not allowed": + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + get: + id: 2 + index: only_index + type: doc + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + get: + id: 3 + index: only_delete + type: doc + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + mget: + body: + docs: + - { _index: "only_index", _type: "doc", _id: "2" } + - { _index: "only_delete", _type: "doc", _id: "3" } + - match: { docs.0._index: "only_index"} + - match: { docs.0._id: "2" } + - match: { docs.0.error.type: "security_exception" } + - match: { docs.1._index: "only_delete"} + - match: { docs.1._id: "3" } + - match: { docs.1.error.type: "security_exception" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + mget: + body: + docs: + - { _index: "only_index", _type: "doc", _id: "2" } + - match: { docs.0._index: "only_index"} + - match: { docs.0._id: "2" } + - match: { docs.0.error.type: "security_exception" } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + mget: + body: + docs: + - { _index: "only_delete", _type: "doc", _id: "3" } + - match: { docs.0._index: "only_delete"} + - match: { docs.0._id: "3" } + - match: { docs.0.error.type: "security_exception" } + +--- +"Test mget documents when only some are allowed": + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + mget: + body: + docs: + - { _index: "only_read" , _type: "doc", _id: "1" } + - { _index: "only_index" , _type: "doc", _id: "2" } + - { _index: "only_delete", _type: "doc", _id: "3" } + - { _index: "read_write" , _type: "doc", _id: "4" } + - { _index: "everything" , _type: "doc", _id: "5" } + + - match: { docs.0._index: "only_read" } + - match: { docs.0._id: "1" } + - match: { docs.0._source.name: "doc1" } + - match: { docs.1._index: "only_index"} + - match: { docs.1._id: "2" } + - match: { docs.1.error.type: "security_exception" } + - match: { docs.2._index: "only_delete"} + - match: { docs.2._id: "3" } + - match: { docs.2.error.type: "security_exception" } + - match: { docs.3._index: "read_write" } + - match: { docs.3._id: "4" } + - match: { docs.3._source.name: "doc4" } + - match: { docs.4._index: "everything" } + - match: { docs.4._id: "5" } + - match: { docs.4._source.name: "doc5" } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/21_search_doc.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/21_search_doc.yml new file mode 100644 index 0000000000000..b26b797bd297a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/authz/21_search_doc.yml @@ -0,0 +1,266 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_role: + name: "mixed_role" + body: > + { + "indices": [ + { "names": ["only_read"], "privileges": ["read"] }, + { "names": ["only_index"], "privileges": ["index"] }, + { "names": ["only_delete"], "privileges": ["delete"] }, + { "names": ["read_write"], "privileges": ["read", "write"] }, + { "names": ["everything"], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "mixed_role" ], + "full_name" : "user with mixed privileges to multiple indices" + } + + - do: + indices.create: + index: only_read + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + tag: + type: "keyword" + - do: + indices.create: + index: only_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + tag: + type: "keyword" + - do: + indices.create: + index: only_delete + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + tag: + type: "keyword" + - do: + indices.create: + index: read_write + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + tag: + type: "keyword" + - do: + indices.create: + index: everything + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + doc: + properties: + name: + type: "keyword" + tag: + type: "keyword" + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "only_read", "_type": "doc", "_id": "1"}}' + - '{"name": "doc1", "tag": [ "can-read", "tag-a" ] }' + - '{"index": {"_index": "only_read", "_type": "doc", "_id": "2"}}' + - '{"name": "doc2", "tag": [ "can-read", "tag-b"] }' + - '{"index": {"_index": "only_index", "_type": "doc", "_id": "3"}}' + - '{"name": "doc3", "tag": [ "no-read", "tag-a"] }' + - '{"index": {"_index": "only_index", "_type": "doc", "_id": "4"}}' + - '{"name": "doc4", "tag": [ "no-read", "tag-b"] }' + - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "5"}}' + - '{"name": "doc5", "tag": [ "no-read", "tag-a"] }' + - '{"index": {"_index": "only_delete", "_type": "doc", "_id": "6"}}' + - '{"name": "doc6", "tag": [ "no-read", "tag-b"] }' + - '{"index": {"_index": "read_write", "_type": "doc", "_id": "7"}}' + - '{"name": "doc7", "tag": [ "can-read", "tag-a" ] }' + - '{"index": {"_index": "read_write", "_type": "doc", "_id": "8"}}' + - '{"name": "doc8", "tag": [ "can-read", "tag-b"] }' + - '{"index": {"_index": "everything", "_type": "doc", "_id": "9"}}' + - '{"name": "doc9", "tag": [ "can-read", "tag-a" ] }' + - '{"index": {"_index": "everything", "_type": "doc", "_id": "10"}}' + - '{"name": "doc10", "tag": [ "can-read", "tag-b"] }' + +--- +teardown: + - do: + xpack.security.delete_user: + username: "test_user" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "mixed_role" + ignore: 404 + +--- +"Test search for document when authorized": + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: only_read + body: + - match: { hits.total: 2 } + - match: { hits.hits.0._index: only_read } + - match: { hits.hits.1._index: only_read } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: read_write + body: + - match: { hits.total: 2 } + - match: { hits.hits.0._index: read_write } + - match: { hits.hits.1._index: read_write } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: everything + body: + - match: { hits.total: 2 } + - match: { hits.hits.0._index: everything } + - match: { hits.hits.1._index: everything } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + body: { "query": { "term": { "tag": "can-read" } } } + - match: { hits.total: 6 } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + msearch: + body: + - { } + - { "query": { "term": { "tag": "can-read" } } } + - { "index": "only_read" } + - { "query": { "term": { "tag": "tag-a" } } } + - { "index": "read_write" } + - { } + - match: { responses.0.hits.total: 6 } + - match: { responses.1.hits.total: 1 } + - match: { responses.2.hits.total: 2 } + +--- +"Test search for documents when not allowed": + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: only_index + body: + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: only_delete + body: + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + body: { "query": { "term": { "tag": "no-read" } } } + - match: { hits.total: 0 } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + body: { "query": { "term": { "_index": "only_index" } } } + - match: { hits.total: 0 } + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + msearch: + body: + - { } + - { "query": { "term": { "tag": "no-read" } } } + - { } + - { "query": { "term": { "_index": "only_index" } } } + - { "index": "only_delete" } + - { } + - match: { responses.0.hits.total: 0 } + - match: { responses.1.hits.total: 0 } + - match: { responses.2.error.type: "security_exception" } + +--- +"Test search documents when only some are allowed": + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + body: { "query": { "term": { "tag": "tag-a" } } } + - match: { hits.total: 3 } # can-read, read_write, everything + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + msearch: + body: + - { } + - { "query": { "term": { "tag": "tag-a" } } } + - { } + - { "query": { "term": { "tag": "can-read" } } } + - { } + - { "query": { "term": { "tag": "no-read" } } } + - { "index": "only_read" } + - { "query": { "term": { "tag": "tag-a" } } } + - { "index": "only_delete" } + - { "query": { "term": { "tag": "tag-a" } } } + - match: { responses.0.hits.total: 3 } # tag-a (in readable indices) + - match: { responses.1.hits.total: 6 } # can-read + - match: { responses.2.hits.total: 0 } # no-read + - match: { responses.3.hits.total: 1 } # only_read + tag-a + - match: { responses.4.error.type: "security_exception" } # only_delete + tag-a diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/hidden-index/10_security_read.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/hidden-index/10_security_read.yml new file mode 100644 index 0000000000000..dd81f4dc35fa5 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/hidden-index/10_security_read.yml @@ -0,0 +1,83 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_role: + name: "all_access" + body: > + { + "cluster": [ "all" ], + "indices": [ + { "names": ["*"], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "all_access" ], + "full_name" : "user with all possible privileges (but not superuser)" + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "test_user" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "all_access" + ignore: 404 + +--- +"Test get security index metadata": + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.get: + index: ".security" + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.get: + index: ".secu*rity" + - length: { $body: 0 } + +--- +"Test get security document": + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + get: + index: ".security" + type: "doc" + id: "user-test_user" + +--- +"Test search security index": + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: ".security" + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: ".secu*rity" + - match: { hits.total: 0 } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/security/hidden-index/11_security-6_read.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/hidden-index/11_security-6_read.yml new file mode 100644 index 0000000000000..8d88211d2a1c3 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/security/hidden-index/11_security-6_read.yml @@ -0,0 +1,83 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_role: + name: "all_access" + body: > + { + "cluster": [ "all" ], + "indices": [ + { "names": ["*"], "privileges": ["all"] } + ] + } + + - do: + xpack.security.put_user: + username: "test_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "all_access" ], + "full_name" : "user with all possible privileges (but not superuser)" + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "test_user" + ignore: 404 + + - do: + xpack.security.delete_role: + name: "all_access" + ignore: 404 + +--- +"Test get security index metadata": + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.get: + index: ".security-6" + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + indices.get: + index: ".security*6" + - length: { $body: 0 } + +--- +"Test get security document": + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + get: + index: ".security-6" + type: "doc" + id: "user-test_user" + +--- +"Test search security index": + + - do: + catch: forbidden + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: ".security-6" + + - do: + headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user + search: + index: ".security*6" + - match: { hits.total: 0 } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/set_security_user/10_small_users_one_index.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/set_security_user/10_small_users_one_index.yml new file mode 100644 index 0000000000000..0e42a13b8fd2a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/set_security_user/10_small_users_one_index.yml @@ -0,0 +1,143 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "processors": [ + { + "set_security_user" : { + "field" : "user", + "properties" : ["username", "roles"] + } + } + ] + } + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "x-pack-test-password", + "roles" : [ "company_x_logs_role" ] + } + - do: + xpack.security.put_user: + username: "john" + body: > + { + "password": "x-pack-test-password", + "roles" : [ "company_y_logs_role" ] + } + + - do: + xpack.security.put_role: + name: "company_x_logs_role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": "shared_logs", + "privileges": ["all"], + "query" : { + "term" : { "user.roles" : "company_x_logs_role" } + } + } + ] + } + + - do: + xpack.security.put_role: + name: "company_y_logs_role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": "shared_logs", + "privileges": ["all"], + "query" : { + "term" : { "user.roles" : "company_y_logs_role" } + } + } + ] + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + - do: + xpack.security.delete_user: + username: "john" + ignore: 404 + - do: + xpack.security.delete_role: + name: "company_x_logs_role" + ignore: 404 + - do: + xpack.security.delete_role: + name: "company_y_logs_role" + ignore: 404 + +--- +"Test shared index seperating user by using DLS": + - do: + headers: + Authorization: "Basic am9lOngtcGFjay10ZXN0LXBhc3N3b3Jk" + index: + index: shared_logs + type: type + id: 1 + pipeline: "my_pipeline" + body: > + { + "log": "Joe's first log entry" + } + - do: + headers: + Authorization: "Basic am9objp4LXBhY2stdGVzdC1wYXNzd29yZA==" + index: + index: shared_logs + type: type + id: 2 + pipeline: "my_pipeline" + body: > + { + "log": "John's first log entry" + } + + - do: + indices.refresh: {} + + # Joe searches: + - do: + headers: + Authorization: "Basic am9lOngtcGFjay10ZXN0LXBhc3N3b3Jk" + search: + index: shared_logs + body: { "query" : { "match_all" : {} } } + - match: { hits.total: 1} + - match: { hits.hits.0._source.user.username: joe} + - match: { hits.hits.0._source.user.roles.0: company_x_logs_role} + + # John searches: + - do: + headers: + Authorization: "Basic am9objp4LXBhY2stdGVzdC1wYXNzd29yZA==" + search: + index: shared_logs + body: { "query" : { "match_all" : {} } } + - match: { hits.total: 1} + - match: { hits.hits.0._source.user.username: john} + - match: { hits.hits.0._source.user.roles.0: company_y_logs_role} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml new file mode 100644 index 0000000000000..551866b3b1ebd --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/sql.yml @@ -0,0 +1,119 @@ +--- +setup: + - do: + bulk: + refresh: true + body: + - index: + _index: test + _type: doc + _id: 1 + - str: test1 + int: 1 + - index: + _index: test + _type: doc + _id: 2 + - str: test2 + int: 2 + - index: + _index: test + _type: doc + _id: 3 + - str: test3 + int: 3 + +--- +"Execute some SQL": + - do: + xpack.sql.query: + format: json + body: + query: "SELECT * FROM test ORDER BY int asc" + - match: { columns.0.name: int } + - match: { columns.1.name: str } + - match: { rows.0.0: 1 } + - match: { rows.0.1: test1 } + - match: { rows.1.0: 2 } + - match: { rows.1.1: test2 } + - match: { rows.2.0: 3 } + - match: { rows.2.1: test3 } + +--- +"Paging through results": + - do: + xpack.sql.query: + format: json + body: + query: "SELECT * FROM test ORDER BY int asc" + fetch_size: 2 + - match: { columns.0.name: int } + - match: { columns.1.name: str } + - match: { rows.0.0: 1 } + - match: { rows.0.1: test1 } + - match: { rows.1.0: 2 } + - match: { rows.1.1: test2 } + - is_true: cursor + - set: { cursor: cursor } + + - do: + xpack.sql.query: + format: json + body: + cursor: "$cursor" + - match: { rows.0.0: 3 } + - match: { rows.0.1: test3 } + - is_false: columns + - is_true: cursor + - set: { cursor: cursor } + + - do: + xpack.sql.query: + format: json + body: + cursor: "$cursor" + - is_false: columns + - is_false: cursor + - length: { rows: 0 } + +--- +"Getting textual representation": + - do: + xpack.sql.query: + format: txt + body: + query: "SELECT * FROM test ORDER BY int asc" + - match: + $body: | + /^ \s+ int \s+ \| \s+ str \s+ \n + ---------------\+---------------\n + 1 \s+ \|test1 \s+ \n + 2 \s+ \|test2 \s+ \n + 3 \s+ \|test3 \s+ \n + $/ + +--- +"Clean cursor": + - do: + xpack.sql.query: + format: json + body: + query: "SELECT * FROM test ORDER BY int asc" + fetch_size: 2 + - match: { columns.0.name: int } + - match: { columns.1.name: str } + - match: { rows.0.0: 1 } + - match: { rows.0.1: test1 } + - is_true: cursor + - set: { cursor: cursor} + + - do: + xpack.sql.clear_cursor: + body: + cursor: "$cursor" + - match: { "succeeded": true } + + - do: + indices.stats: { index: 'test' } + + - match: { indices.test.total.search.open_contexts: 0 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml new file mode 100644 index 0000000000000..b3d93e5298810 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml @@ -0,0 +1,29 @@ +--- +"Translate SQL": + - do: + bulk: + refresh: true + body: + - index: + _index: test + _type: doc + _id: 1 + - str: test1 + int: 1 + + - do: + xpack.sql.translate: + body: + query: "SELECT * FROM test ORDER BY int asc" + - match: + $body: + size: 1000 + _source: + includes: + - str + excludes: [] + docvalue_fields: + - int + sort: + - int: + order: asc diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ssl/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ssl/10_basic.yml new file mode 100644 index 0000000000000..7a87ef511e591 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ssl/10_basic.yml @@ -0,0 +1,10 @@ +--- +"Test get SSL certificates": + - do: + xpack.ssl.certificates: {} + + - length: { $body: 1 } + - match: { $body.0.path: "test-node.jks" } + - match: { $body.0.format: "jks" } + - match: { $body.0.alias: "test-node" } + - match: { $body.0.has_private_key: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/token/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/token/10_basic.yml new file mode 100644 index 0000000000000..62e32cdaed2b6 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/token/10_basic.yml @@ -0,0 +1,88 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_user: + username: "token_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "superuser" ], + "full_name" : "Token User" + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "token_user" + ignore: 404 + +--- +"Test get and use token": + + - do: + xpack.security.get_token: + body: + grant_type: "password" + username: "token_user" + password: "x-pack-test-password" + + - match: { type: "Bearer" } + - is_true: access_token + - set: { access_token: token } + - match: { expires_in: 1200 } + - is_false: scope + + - do: + headers: + Authorization: Bearer ${token} + xpack.security.authenticate: {} + + - match: { username: "token_user" } + - match: { roles.0: "superuser" } + - match: { full_name: "Token User" } + +--- +"Test invalidate token": + + - do: + xpack.security.get_token: + body: + grant_type: "password" + username: "token_user" + password: "x-pack-test-password" + + - match: { type: "Bearer" } + - is_true: access_token + - set: { access_token: token } + - match: { expires_in: 1200 } + - is_false: scope + + - do: + headers: + Authorization: Bearer ${token} + xpack.security.authenticate: {} + + - match: { username: "token_user" } + - match: { roles.0: "superuser" } + - match: { full_name: "Token User" } + + - do: + xpack.security.invalidate_token: + body: + token: $token + + - match: { created: true } + + - do: + catch: unauthorized + headers: + Authorization: Bearer ${token} + xpack.security.authenticate: {} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/upgrade/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/upgrade/10_basic.yml new file mode 100644 index 0000000000000..cfa75e0b4bdfd --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/upgrade/10_basic.yml @@ -0,0 +1,63 @@ +--- +setup: + - skip: + version: "all" + reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/29890" + - do: + xpack.license.post: + body: > + { + "license": { + "uid": "b8520184-985d-4b04-8a89-b52da6e0aad1", + "type": "platinum", + "issue_date_in_millis": 1494510840000, + "expiry_date_in_millis": 2756814840000, + "max_nodes": 1, + "issued_to": "upgrade_api_test", + "issuer": "elasticsearch", + "signature": "AAAAAwAAAA0hsB+mfk9EqWiY6e1KAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQA6NkNF3Z219ptzRwZwGzgIwaXn5rXvOWSB9KK86xBqeYQMlO1ahCd4eW3FHWTuginPuqMX8okzN+UEMANPE3l0QxvrgCcTzNYPGqCJDwBb0ghuQ4Y5Cezn806sBnXLVF35B1HU2C1PYc1mZvisD63NqasrAVYb3GS6vwq8a7PYfKpfZfFCqG2SZIkSHACPGBTUiPbVEVv1iiOC04x/pjF4Kn26MPbFD5jbQBSY2V8TxoapMHf11EDpOTlMYkXgerbMg7VWtVCypTMJJrhoVguCrZvM8U/+sSnbodtnZUeAImnFbYeV10Rcw62dtrpka0yuo7h6Qtrvy9YqVHZDtyrM", + "start_date_in_millis": -1 + } + } + + - do: + indices.create: + index: test1 + + - do: + indices.refresh: {} + +--- +"Upgrade info - all": + - do: + xpack.migration.get_assistance: { index: _all } + + - length: { indices: 0 } + +--- +"Upgrade test - should fail as index is already up to date": + - do: + catch: /illegal_state_exception/ + xpack.migration.upgrade: { index: test1 } + +--- +"Upgrade test - wait_for_completion:false": + + - do: + xpack.migration.upgrade: + index: test1 + wait_for_completion: false + + - match: {task: '/.+:\d+/'} + - set: {task: task} + + - do: + tasks.get: + wait_for_completion: true + task_id: $task + - is_false: node_failures + - is_true: task + - match: {completed: true} + - is_true: error + - match: {error.type: "illegal_state_exception"} + - match: {error.reason: "Index [test1] cannot be upgraded"} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/users/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/10_basic.yml new file mode 100644 index 0000000000000..5e5138c88fb01 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/10_basic.yml @@ -0,0 +1,103 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + +--- +"Test put user api": + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password" : "s3krit", + "roles" : [ "superuser" ], + "full_name" : "Bazooka Joe", + "email" : "joe@bazooka.gum", + "metadata" : { + "key1" : "val1", + "key2" : "val2" + } + } + - match: { user: { created: true } } + + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + + - do: + xpack.security.get_user: + username: "joe" + - match: { joe.username: "joe" } + - match: { joe.roles.0: "superuser" } + - match: { joe.full_name: "Bazooka Joe" } + - match: { joe.email: "joe@bazooka.gum" } + - match: { joe.metadata.key1: "val1" } + - match: { joe.metadata.key2: "val2" } + +--- +"Test put user with username in body": + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "username": "joe", + "password" : "s3krit", + "roles" : [ "superuser" ], + "full_name" : "Bazooka Joe", + "email" : "joe@bazooka.gum", + "metadata" : { + "key1" : "val1", + "key2" : "val2" + } + } + - match: { user: { created: true } } + + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + + - do: + xpack.security.get_user: + username: "joe" + - match: { joe.username: "joe" } + - match: { joe.roles.0: "superuser" } + - match: { joe.full_name: "Bazooka Joe" } + - match: { joe.email: "joe@bazooka.gum" } + - match: { joe.metadata.key1: "val1" } + - match: { joe.metadata.key2: "val2" } + +--- +"Test put user with different username in body": + - do: + catch: bad_request + xpack.security.put_user: + username: "joe" + body: > + { + "username": "joey", + "password" : "s3krit", + "roles" : [ "superuser" ], + "full_name" : "Bazooka Joe", + "email" : "joe@bazooka.gum", + "metadata" : { + "key1" : "val1", + "key2" : "val2" + } + } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/users/15_overwrite_user.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/15_overwrite_user.yml new file mode 100644 index 0000000000000..66bcc9d1c5adf --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/15_overwrite_user.yml @@ -0,0 +1,71 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "superuser" ] + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + +--- +"Test overwriting a user": + - do: + xpack.security.get_user: + username: "joe" + - match: { joe.username: "joe" } + - match: { joe.roles.0: "superuser" } + + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password" : "s3krit2", + "roles" : [ "superuser", "foo" ], + "full_name" : "Bazooka Joe", + "email" : "joe@bazooka.gum", + "metadata" : { + "key1" : "val1", + "key2" : "val2" + } + } + - match: { user: { created: false } } + + - do: + xpack.security.get_user: + username: "joe" + - match: { joe.username: "joe" } + - match: { joe.roles.0: "superuser" } + - match: { joe.roles.1: "foo" } + - match: { joe.full_name: "Bazooka Joe" } + - match: { joe.email: "joe@bazooka.gum" } + - match: { joe.metadata.key1: "val1" } + - match: { joe.metadata.key2: "val2" } + + - do: + headers: + Authorization: "Basic am9lOnMza3JpdDI=" + cluster.health: {} + - match: { timed_out: false } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/users/16_update_user.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/16_update_user.yml new file mode 100644 index 0000000000000..abe6f44369ae4 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/16_update_user.yml @@ -0,0 +1,132 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "superuser" ] + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + +--- +"Test creating a user without password": + - do: + catch: bad_request + xpack.security.put_user: + username: "no_password_user" + body: > + { + "roles" : [ "superuser" ] + } + - match: { error.root_cause.0.reason: 'Validation Failed: 1: password must be specified unless you are updating an existing user;' } + +--- +"Test create user and update without and with password": + - skip: + features: [headers, catch_unauthorized] + + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + + - do: + xpack.security.get_user: + username: "joe" + - match: { joe.username: "joe" } + - match: { joe.roles.0: "superuser" } + - is_false: joe.full_name + - is_false: joe.email + +# update the user without a password + + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "roles" : [ "superuser", "foo" ], + "full_name" : "Bazooka Joe", + "email" : "joe@bazooka.gum", + "metadata" : { + "key1" : "val1", + "key2" : "val2" + } + } + - match: { user: { created: false } } + +# validate existing password works + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + +# validate other properties + - do: + xpack.security.get_user: + username: "joe" + - match: { joe.username: "joe" } + - match: { joe.roles.0: "superuser" } + - match: { joe.roles.1: "foo" } + - match: { joe.full_name: "Bazooka Joe" } + - match: { joe.email: "joe@bazooka.gum" } + - match: { joe.metadata.key1: "val1" } + - match: { joe.metadata.key2: "val2" } + +# update with password + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password" : "s3krit2", + "roles" : [ "superuser" ], + "full_name" : "Bazooka Joe", + "email" : "joe@bazooka.gum", + "metadata" : { + "key1" : "val1", + "key2" : "val2", + "key3" : "val3" + } + } + - match: { user: { created: false } } + +# validate old password doesn't work + - do: + catch: unauthorized + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + +# validate new password works + - do: + headers: + Authorization: "Basic am9lOnMza3JpdDI=" + cluster.health: {} + - match: { timed_out: false } + +# validate properties + - do: + xpack.security.get_user: + username: "joe" + - match: { joe.username: "joe" } + - match: { joe.roles.0: "superuser" } + - match: { joe.full_name: "Bazooka Joe" } + - match: { joe.email: "joe@bazooka.gum" } + - match: { joe.metadata.key1: "val1" } + - match: { joe.metadata.key2: "val2" } + - match: { joe.metadata.key3: "val3" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/users/20_get_missing.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/20_get_missing.yml new file mode 100644 index 0000000000000..290e612f427da --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/20_get_missing.yml @@ -0,0 +1,12 @@ +"Get missing user": + - do: + catch: missing + xpack.security.get_user: + username: 'foo' + +--- +"Get missing (multiple) users": + - do: + catch: missing + xpack.security.get_user: + username: [ 'foo', 'bar' ] diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/users/30_enable_disable.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/30_enable_disable.yml new file mode 100644 index 0000000000000..746aaed73be5b --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/30_enable_disable.yml @@ -0,0 +1,123 @@ +--- +setup: + - skip: + features: [headers, catch_unauthorized] + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "superuser" ] + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + +--- +"Test disable then enable user": + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + +# disable the user + - do: + xpack.security.disable_user: + username: "joe" + +# validate user cannot login + - do: + catch: unauthorized + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + +# enable the user + - do: + xpack.security.enable_user: + username: "joe" + +# validate that the user can login again + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + +--- +"Test enabling already enabled user": +# check that the user works + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + +# enable the user + - do: + xpack.security.enable_user: + username: "joe" + +# validate that the user still works + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + +--- +"Test disabling already disabled user": +# check that the user works + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + +# disable the user + - do: + xpack.security.disable_user: + username: "joe" + +# validate user cannot login + - do: + catch: unauthorized + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + +# disable again + - do: + xpack.security.disable_user: + username: "joe" + + - do: + xpack.security.enable_user: + username: "joe" + +--- +"Test disabling yourself": +# check that the user works + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } + +# try to disable yourself + - do: + catch: '/users may not update the enabled status of their own account/' + headers: + Authorization: "Basic am9lOnMza3JpdA==" + xpack.security.disable_user: + username: "joe" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/users/31_create_disabled.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/31_create_disabled.yml new file mode 100644 index 0000000000000..50811a3cb698f --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/users/31_create_disabled.yml @@ -0,0 +1,43 @@ +--- +setup: + - skip: + features: [headers, catch_unauthorized] + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "superuser" ], + "enabled": false + } +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + +--- +"Test disable then enable user": + - do: + catch: unauthorized + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + +# enable + - do: + xpack.security.enable_user: + username: "joe" + +# validate user can login + - do: + headers: + Authorization: "Basic am9lOnMza3JpdA==" + cluster.health: {} + - match: { timed_out: false } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml new file mode 100644 index 0000000000000..f74fd7a9a657a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml @@ -0,0 +1,68 @@ +--- +"Test ack watch api": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger" : { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } + + - match: { _id: "my_watch" } + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.ack_watch: + watch_id: "my_watch" + + - match: { "status.actions.test_index.ack.state" : "awaits_successful_execution" } + + - do: + search: + index: .watches + body: { "query": { "term": { "_id": "my_watch" } } } + - match: { hits.total: 1 } + - match: { hits.hits.0._source.status.actions.test_index.ack.state: "awaits_successful_execution" } + + - do: + xpack.watcher.delete_watch: + id: "my_watch" + + - match: { found: true } + +--- +"Non existent watch returns 404": + - do: + cluster.health: + wait_for_status: yellow + - do: + xpack.watcher.ack_watch: + watch_id: "non-existent-watch" + catch: missing diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml new file mode 100644 index 0000000000000..b59fd561a7594 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml @@ -0,0 +1,53 @@ +--- +"Test ack watch api on an individual action": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger" : { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } + + - match: { _id: "my_watch" } + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.ack_watch: + watch_id: "my_watch" + action_id: "test_index" + + - match: { "status.actions.test_index.ack.state" : "awaits_successful_execution" } + + - do: + xpack.watcher.delete_watch: + id: "my_watch" + + - match: { found: true } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml new file mode 100644 index 0000000000000..5b2d00235c5c7 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml @@ -0,0 +1,104 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "my_watch" + ignore: 404 + +--- +"Ensure that ack status is reset after unsuccesful execution": + + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "interval": "1m" + } + }, + "input": { + "simple" : { "match" : "true" } + }, + "condition": { + "compare": { + "ctx.payload.match": { + "eq": "true" + } + } + }, + "actions": { + "indexme" : { + "index" : { + "index" : "my_test_index", + "doc_type" : "my-type", + "doc_id": "my-id" + } + } + } + } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + body: > + { + "record_execution" : true + } + - match: { watch_record.status.actions.indexme.ack.state: "ackable" } + + - do: + xpack.watcher.ack_watch: + watch_id: "my_watch" + - match: { "status.actions.indexme.ack.state" : "acked" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { "status.actions.indexme.ack.state" : "acked" } + + # having a false result will reset the ack state + - do: + xpack.watcher.execute_watch: + id: "my_watch" + body: > + { + "record_execution" : true, + "alternative_input" : { + "match" : "false" + }, + "action_modes" : { + "indexme" : "force_execute" + } + } + - match: { watch_record.status.actions.indexme.ack.state: "awaits_successful_execution" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { "status.actions.indexme.ack.state" : "awaits_successful_execution" } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + body: > + { + "record_execution" : true, + "action_modes" : { + "indexme" : "force_execute" + } + } + - match: { watch_record.status.actions.indexme.ack.state: "ackable" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { "status.actions.indexme.ack.state" : "ackable" } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml new file mode 100644 index 0000000000000..93639163ac04b --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml @@ -0,0 +1,104 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "my_watch" + ignore: 404 + +--- +"Ensure that ack status is reset after unmet action condition": + + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "interval": "1m" + } + }, + "input": { + "simple" : { "match" : "true" } + }, + "actions": { + "indexme" : { + "condition": { + "compare": { + "ctx.payload.match": { + "eq": "true" + } + } + }, + "index" : { + "index" : "my_test_index", + "doc_type" : "my-type", + "doc_id": "my-id" + } + } + } + } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + body: > + { + "record_execution" : true + } + - match: { watch_record.status.actions.indexme.ack.state: "ackable" } + + - do: + xpack.watcher.ack_watch: + watch_id: "my_watch" + - match: { "status.actions.indexme.ack.state" : "acked" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { "status.actions.indexme.ack.state" : "acked" } + + # having a false result will reset the ack state + - do: + xpack.watcher.execute_watch: + id: "my_watch" + body: > + { + "record_execution" : true, + "alternative_input" : { + "match" : "false" + }, + "action_modes" : { + "indexme" : "force_execute" + } + } + - match: { watch_record.status.actions.indexme.ack.state: "awaits_successful_execution" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { "status.actions.indexme.ack.state" : "awaits_successful_execution" } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + body: > + { + "record_execution" : true, + "action_modes" : { + "indexme" : "force_execute" + } + } + - match: { watch_record.status.actions.indexme.ack.state: "ackable" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { "status.actions.indexme.ack.state" : "ackable" } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml new file mode 100644 index 0000000000000..b71486295d114 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml @@ -0,0 +1,106 @@ +--- +"Test activate watch api": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger" : { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } + + - match: { _id: "my_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + + - match: { found : true} + - match: { _id: "my_watch" } + - match: { status.state.active: true } + + - do: + xpack.watcher.deactivate_watch: + watch_id: "my_watch" + + - match: { status.state.active : false } + + - do: + search: + index: .watches + body: { "query": { "term": { "_id": "my_watch" } } } + - match: { hits.total: 1 } + - match: { hits.hits.0._source.status.state.active: false } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { found : true} + - match: { _id: "my_watch" } + - match: { status.state.active: false } + + - do: + xpack.watcher.activate_watch: + watch_id: "my_watch" + + - match: { status.state.active : true } + + - do: + search: + index: .watches + body: { "query": { "term": { "_id": "my_watch" } } } + - match: { hits.total: 1 } + - match: { hits.hits.0._source.status.state.active: true } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + + - match: { found : true} + - match: { _id: "my_watch" } + - match: { status.state.active: true } + + - do: + xpack.watcher.delete_watch: + id: "my_watch" + + - match: { found: true } + +--- +"Non existent watch returns 404": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.activate_watch: + watch_id: "non-existent-watch" + catch: missing + + - do: + xpack.watcher.deactivate_watch: + watch_id: "non-existent-watch" + catch: missing diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml new file mode 100644 index 0000000000000..ede88d2ae16fe --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml @@ -0,0 +1,66 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "my_watch" + ignore: 404 + +--- +"Test delete watch api": + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } + - match: { _id: "my_watch" } + - match: { created: true } + + - do: + xpack.watcher.delete_watch: + id: "my_watch" + - match: { found: true } + + - do: + search: + index: .watches + body: { "query": { "term": { "_id": "my_watch" } } } + - match: { hits.total: 0 } + +--- +"Non existent watch returns 404": + - do: + xpack.watcher.delete_watch: + id: "non-existent-watch" + catch: missing diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml new file mode 100644 index 0000000000000..9ae698ced70bd --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml @@ -0,0 +1,164 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "test_watch" + ignore: 404 + +--- +"Test execute watch api with configured trigger data timestamps": + - do: + xpack.watcher.put_watch: + id: "test_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "condition": { + "always": {} + }, + "actions": { + "indexme" : { + "index" : { + "index" : "my_test_index", + "doc_type" : "my-type", + "doc_id": "my-id" + } + } + } + } + - match: { _id: "test_watch" } + - match: { created: true } + + - do: + xpack.watcher.execute_watch: + id: "test_watch" + body: > + { + "trigger_data" : { + "triggered_time" : "2012-12-12T12:12:12.120Z", + "scheduled_time" : "2000-12-12T12:12:12.120Z" + } + } + + - match: { watch_record.watch_id: "test_watch" } + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.trigger_event.triggered_time: "2012-12-12T12:12:12.120Z" } + - match: { watch_record.trigger_event.manual.schedule.scheduled_time: "2000-12-12T12:12:12.120Z" } + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.status.state.active: true } + - is_true: watch_record.node + - match: { watch_record.status.actions.indexme.ack.state: "ackable" } + - gt: { watch_record.result.execution_duration: 0 } + +--- +"Test execute watch API with user supplied watch": + + - do: + xpack.watcher.execute_watch: + body: > + { + "watch" : { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "condition": { + "always": {} + }, + "actions": { + "indexme" : { + "index" : { + "index" : "my_test_index", + "doc_type" : "my-type", + "doc_id": "my-id" + } + } + } + } + } + + - match: { watch_record.watch_id: "_inlined_" } + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.status.state.active: true } + - match: { watch_record.status.actions.indexme.ack.state: "ackable" } + +--- +"Execute unknown watch results in 404": + + - do: + xpack.watcher.execute_watch: + id: "non-existent-watch" + catch: missing + +--- +"Test execute watch with alternative input": + + - do: + xpack.watcher.put_watch: + id: "test_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "actions": { + "indexme" : { + "index" : { + "index" : "my_test_index", + "doc_type" : "my-type", + "refresh" : "wait_for", + "doc_id": "my-id" + } + } + } + } + - match: { _id: "test_watch" } + - match: { created: true } + + - do: + xpack.watcher.execute_watch: + id: "test_watch" + body: > + { + "alternative_input" : { + "spam" : "eggs" + } + } + + - match: { watch_record.watch_id: "test_watch" } + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.status.state.active: true } + - is_true: watch_record.node + - is_false: watch_record.result.input.payload.foo + - is_true: watch_record.result.input.payload.spam + + - do: + search: + index: my_test_index + - match: { hits.total : 1 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml new file mode 100644 index 0000000000000..59ebacbfe902d --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml @@ -0,0 +1,199 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +"Test execute watch api with configured search transform": + - do: + index: + index: my_test_index + type: doc + id: my_id + refresh: true + body: > + { + "key": "value" + } + + - do: + xpack.watcher.execute_watch: + body: > + { + "watch" : { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "condition": { + "always": {} + }, + "transform" : { + "search" : { + "request" : { + "indices" : [ "my_test_index" ], + "body" : { + "query": { + "match_all" : {} + } + } + } + } + }, + "actions": { + "indexme" : { + "index" : { + "index" : "my_test_index", + "doc_type" : "doc", + "doc_id": "my-id" + } + } + } + } + } + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.result.transform.status: "success" } + + - do: + get: + index: my_test_index + type: doc + id: my_id + + - match: { _source.key: "value" } + +--- +"Test execute watch api with configured search transform using DFS_QUERY_THEN_FETCH": + - do: + indices.create: + index: my_test_index + body: + settings: + number_of_shards: "2" + number_of_replicas: "0" + + - do: + index: + index: my_test_index + type: doc + id: my_id + refresh: true + body: > + { + "key": "value" + } + + - do: + xpack.watcher.execute_watch: + body: > + { + "watch" : { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "condition": { + "always": {} + }, + "transform" : { + "search" : { + "request" : { + "search_type" : "dfs_query_then_fetch", + "indices" : [ "my_test_index" ], + "body" : { + "query": { + "match_all" : {} + } + } + } + } + }, + "actions": { + "indexme" : { + "index" : { + "index" : "my_test_index", + "doc_type" : "doc", + "doc_id": "my-id" + } + } + } + } + } + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.result.transform.status: "success" } + + - do: + get: + index: my_test_index + type: doc + id: my_id + + - match: { _source.key: "value" } + + +--- +"Test execute watch api with misconfigured search transform on failure": + + - do: + indices.create: + index: my_test_index + + - do: + xpack.watcher.execute_watch: + body: > + { + "watch" : { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "condition": { + "always": {} + }, + "transform" : { + "search" : { + "request" : { + "indices" : [ "my_test_index" ], + "body" : { + "query": { + "does_not_exist" : {} + } + } + } + } + }, + "actions": { + "indexme" : { + "index" : { + "index" : "my_test_index", + "doc_type" : "doc", + "doc_id": "my-id" + } + } + } + } + } + + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.result.transform.status: "failure" } + - match: { watch_record.result.transform.reason: "no [query] registered for [does_not_exist]" } + - is_true: watch_record.result.transform.error diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/30_throttled.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/30_throttled.yml new file mode 100644 index 0000000000000..0d2497fed79f1 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/30_throttled.yml @@ -0,0 +1,121 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "test_watch" + ignore: 404 + +--- +"Test execute watch api works with throttling": + - do: + xpack.watcher.put_watch: + id: "test_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "condition": { + "never": {} + }, + "actions": { + "logging" : { + "logging" : { + "text" : "logging text from test: execute_watch/30_throttled.yml" + } + } + } + } + - match: { _id: "test_watch" } + - match: { created: true } + + - do: + xpack.watcher.execute_watch: + id: "test_watch" + body: > + { + "trigger_data" : { + "triggered_time" : "2012-12-12T12:12:12.120Z", + "scheduled_time" : "2000-12-12T12:12:12.120Z" + } + } + + - match: { watch_record.watch_id: "test_watch" } + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.trigger_event.triggered_time: "2012-12-12T12:12:12.120Z" } + - match: { watch_record.trigger_event.manual.schedule.scheduled_time: "2000-12-12T12:12:12.120Z" } + - match: { watch_record.state: "execution_not_needed" } + - match: { watch_record.status.execution_state: "execution_not_needed" } + - match: { watch_record.status.state.active: true } + + - do: + xpack.watcher.put_watch: + id: "test_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "condition": { + "always": {} + }, + "throttle_period" : "1h", + "actions": { + "logging" : { + "logging" : { + "text" : "logging text from test: execute_watch/30_throttled.yml" + } + } + } + } + - match: { _id: "test_watch" } + + - do: + xpack.watcher.execute_watch: + id: "test_watch" + body: > + { + "trigger_data" : { + "triggered_time" : "2012-12-12T12:12:12.120Z", + "scheduled_time" : "2000-12-12T12:12:12.120Z" + }, + "record_execution": true + } + + - match: { watch_record.watch_id: "test_watch" } + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + + - do: + xpack.watcher.execute_watch: + id: "test_watch" + body: > + { + "trigger_data" : { + "triggered_time" : "2012-12-12T12:12:12.120Z", + "scheduled_time" : "2000-12-12T12:12:12.120Z" + }, + "record_execution": true + } + + - match: { watch_record.watch_id: "test_watch" } + - match: { watch_record.state: "throttled" } + - match: { watch_record.status.execution_state: "throttled" } + + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/40_ignore_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/40_ignore_condition.yml new file mode 100644 index 0000000000000..5c835f7d6927a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/40_ignore_condition.yml @@ -0,0 +1,63 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "test_watch" + ignore: 404 + +--- +"Test execute watch api can ignore conditions": + - do: + xpack.watcher.put_watch: + id: "test_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "condition": { + "never": {} + }, + "actions": { + "logging" : { + "logging" : { + "text" : "logging text from test: execute_watch/30_throttled.yml" + } + } + } + } + - match: { _id: "test_watch" } + - match: { created: true } + + - do: + xpack.watcher.execute_watch: + id: "test_watch" + body: > + { + "ignore_condition" : true + } + + - match: { watch_record.watch_id: "test_watch" } + - match: { watch_record.input.simple.foo: "bar" } + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.status.state.active: true } + - match: { watch_record.status.actions.logging.ack.state: "ackable" } + - is_true: watch_record.condition.never + - is_true: watch_record.result.execution_time + - match: { watch_record.result.input.type: "simple" } + - match: { watch_record.result.input.payload.foo: "bar" } + - match: { watch_record.result.condition.type: "always" } + - match: { watch_record.result.condition.met: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/50_action_mode.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/50_action_mode.yml new file mode 100644 index 0000000000000..3f6303b4d4718 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/50_action_mode.yml @@ -0,0 +1,73 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "test_watch" + ignore: 404 + +--- +"Test execute watch api supports action modes": + - do: + xpack.watcher.put_watch: + id: "test_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "actions": { + "logging" : { + "logging" : { + "text" : "logging text from test: execute_watch/30_throttled.yml" + } + } + } + } + - match: { _id: "test_watch" } + - match: { created: true } + + - do: + xpack.watcher.execute_watch: + id: "test_watch" + body: > + { + "action_modes" : { + "logging" : "simulate" + } + } + + - match: { watch_record.watch_id: "test_watch" } + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.result.actions.0.id: "logging" } + - match: { watch_record.result.actions.0.status: "simulated" } + + - do: + xpack.watcher.execute_watch: + id: "test_watch" + body: > + { + "action_modes" : { + "_all" : "simulate" + } + } + + - match: { watch_record.watch_id: "test_watch" } + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.result.actions.0.id: "logging" } + - match: { watch_record.result.actions.0.status: "simulated" } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/60_http_input.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/60_http_input.yml new file mode 100644 index 0000000000000..8a9ba14cb849a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/60_http_input.yml @@ -0,0 +1,58 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +"HTTP input supports extracting of keys": + + - do: + cluster.state: {} + - set: { metadata.cluster_uuid : cluster_uuid } + - set: { master_node: master } + + - do: + nodes.info: {} + - set: { nodes.$master.http.publish_address: http_host } + + - do: + xpack.watcher.execute_watch: + body: > + { + "watch" : { + "trigger": { + "schedule": { + "interval": "1s" + } + }, + "input" : { + "http": { + "request": { + "url": "http://${http_host}/_cluster/health", + "auth" : { + "basic" : { + "username" : "x_pack_rest_user", + "password" : "x-pack-test-password" + } + } + }, + "extract": [ "timed_out", "cluster_name" ] + } + }, + "actions": { + "log": { + "logging": { + "text": "executed at {{ctx.execution_time}}" + } + } + } + } + } + + - match: { watch_record.result.input.payload.timed_out: false } + - match: { watch_record.result.input.payload._status_code: 200 } + - is_true: watch_record.result.input.payload._headers + - is_true: watch_record.result.input.payload.cluster_name + # not part of the extract keys, should not occur + - is_false: watch_record.result.input.payload.status diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml new file mode 100644 index 0000000000000..fc8687eb699b1 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml @@ -0,0 +1,47 @@ +--- +"Test execute watch api returns proper error message with watch directly in the body": + - do: + catch: /please wrap watch including field \[trigger\] inside a \"watch\" field/ + xpack.watcher.execute_watch: + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + } + } + - do: + catch: /please wrap watch including field \[input\] inside a \"watch\" field/ + xpack.watcher.execute_watch: + body: > + { + "input": { + "simple": { + "foo": "bar" + } + } + } + - do: + catch: /please wrap watch including field \[condition\] inside a \"watch\" field/ + xpack.watcher.execute_watch: + body: > + { + "condition": { + "always": {} + } + } + - do: + catch: /please wrap watch including field \[actions\] inside a \"watch\" field/ + xpack.watcher.execute_watch: + body: > + { + "actions": { + "indexme" : { + "index" : { + "index" : "my_test_index", + "doc_type" : "my-type", + "doc_id": "my-id" + } + } + } + } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml new file mode 100644 index 0000000000000..74f6ce6520f93 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml @@ -0,0 +1,62 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "my_watch" + ignore: 404 + +--- +"Test get watch api": + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } + - match: { _id: "my_watch" } + - match: { created: true } + + - do: + search: + index: .watches + body: { "query": { "term": { "_id": "my_watch" } } } + - match: { hits.total: 1 } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { found : true} + - match: { _id: "my_watch" } + - is_true: watch + - is_false: watch.status diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml new file mode 100644 index 0000000000000..ee4fd2e7e43a8 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml @@ -0,0 +1,63 @@ +--- +"Test get watch api with missing watch on existing index": + - do: + cluster.health: + wait_for_status: yellow + + # ensure index exists by creating a different watch + - do: + xpack.watcher.put_watch: + id: "other" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } + + - do: + catch: missing + xpack.watcher.get_watch: + id: "missing_watch" + - match: { found : false} + - match: { _id: "missing_watch" } + +--- +"Test get watch api with missing watch on missing index": + - do: + cluster.health: + wait_for_status: yellow + + - do: + indices.delete: + index: .watches* + ignore: 404 + + - do: + catch: missing + xpack.watcher.get_watch: + id: "missing_watch" + - match: { found : false} + - match: { _id: "missing_watch" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml new file mode 100644 index 0000000000000..ed72f32981d34 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml @@ -0,0 +1,38 @@ +--- +"Test put watch api": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } + - match: { _id: "my_watch" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml new file mode 100644 index 0000000000000..14bd682bd02d6 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml @@ -0,0 +1,55 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "my_watch1" + ignore: 404 + +--- +"Test put watch api with watch level throttle": + - do: + xpack.watcher.put_watch: + id: "my_watch1" + body: > + { + "throttle_period" : "10s", + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } + - match: { _id: "my_watch1" } + + - do: + xpack.watcher.get_watch: + id: "my_watch1" + - match: { found : true} + - match: { _id: "my_watch1" } + - match: { watch.throttle_period_in_millis: 10000 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml new file mode 100644 index 0000000000000..db4013bc38a25 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml @@ -0,0 +1,55 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "my_watch1" + ignore: 404 + +--- +"Test put watch api with action level throttle period": + - do: + xpack.watcher.put_watch: + id: "my_watch1" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "throttle_period" : "10s", + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } + - match: { _id: "my_watch1" } + + - do: + xpack.watcher.get_watch: + id: "my_watch1" + - match: { found : true} + - match: { _id: "my_watch1" } + - match: { watch.actions.test_index.throttle_period_in_millis: 10000 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml new file mode 100644 index 0000000000000..23075ecfa7940 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml @@ -0,0 +1,56 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "my_watch" + ignore: 404 + +--- +"Test put inactive watch": + - do: + xpack.watcher.put_watch: + id: "my_watch" + active: false + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + + - match: { found : true } + - match: { _id: "my_watch" } + - match: { status.state.active: false } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/50_email_attachment_validation.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/50_email_attachment_validation.yml new file mode 100644 index 0000000000000..e76ab7fd71508 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/50_email_attachment_validation.yml @@ -0,0 +1,84 @@ +--- +"Test invalid urls in email attachments reject put watch": + - do: + cluster.health: + wait_for_status: yellow + + - do: + catch: /Configured URL is empty/ + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": {} + } + }, + "condition": { + "always": {} + }, + "actions": { + "send_email": { + "email": { + "to": "test.account@elastic.co", + "subject": "Cluster Status Warning", + "body": "hello", + "attachments": { + "my_id": { + "http": { + "request": { "url": "" } + } + } + } + } + } + } + } + + - do: + catch: /Malformed URL/ + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": {} + } + }, + "condition": { + "always": {} + }, + "actions": { + "send_email": { + "email": { + "to": "test.account@elastic.co", + "subject": "Cluster Status Warning", + "body": "hello", + "attachments": { + "my_id": { + "http": { + "request": { "url": "https://" } + } + } + } + } + } + } + } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml new file mode 100644 index 0000000000000..670a64381d041 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml @@ -0,0 +1,59 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "my_watch1" + ignore: 404 + +--- +"Test put watch api with action level condition": + - do: + xpack.watcher.put_watch: + id: "my_watch1" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "value": 15 + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "condition": { + "compare": { + "ctx.payload.value": { + "lt": 10 + } + } + }, + "index": { + "index": "test", + "doc_type": "test2" + } + } + } + } + - match: { _id: "my_watch1" } + + - do: + xpack.watcher.get_watch: + id: "my_watch1" + - match: { found : true } + - match: { _id: "my_watch1" } + - match: { watch.actions.test_index.condition.compare: { "ctx.payload.value": { lt: 10 } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml new file mode 100644 index 0000000000000..50420c1e4eeda --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml @@ -0,0 +1,227 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "my_watch" + ignore: 404 + +--- +"Test put watch api with index action using doc_id": + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "value": 15 + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "my_test_index", + "doc_type": "test2", + "doc_id": "test_id1" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { found : true } + - match: { _id: "my_watch" } + - match: { watch.input.simple.value: 15 } + - match: { watch.actions.test_index.index.doc_id: "test_id1" } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.result.actions.0.index.response.id: "test_id1" } + +--- +"Test put watch api with index action using _id field": + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "_id": "test_id2", + "value": 20 + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "my_test_index", + "doc_type": "test2" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { found : true } + - match: { _id: "my_watch" } + - match: { watch.input.simple._id: "test_id2" } + - match: { watch.input.simple.value: 20 } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.result.actions.0.index.response.id: "test_id2" } + +--- +"Test put watch api with bulk index action using _id field": + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "_doc": [ + { + "_id": "test_id3", + "value": 30 + }, + { + "_id": "test_id4", + "value": 40 + } + ] + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "my_test_index", + "doc_type": "test2" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { found : true } + - match: { _id: "my_watch" } + - match: { watch.input.simple._doc.0._id: "test_id3" } + - match: { watch.input.simple._doc.0.value: 30 } + - match: { watch.input.simple._doc.1._id: "test_id4" } + - match: { watch.input.simple._doc.1.value: 40 } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.result.actions.0.index.response.0.id: "test_id3" } + - match: { watch_record.result.actions.0.index.response.1.id: "test_id4" } + +--- +"Test put watch api with bulk index action using _id field in one document": + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "_doc": [ + { + "value": 50 + }, + { + "_id": "test_id6", + "value": 60 + } + ] + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "my_test_index", + "doc_type": "test2" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { found : true } + - match: { _id: "my_watch" } + - match: { watch.input.simple._doc.0.value: 50 } + - match: { watch.input.simple._doc.1._id: "test_id6" } + - match: { watch.input.simple._doc.1.value: 60 } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + - match: { watch_record.state: "executed" } + - match: { watch_record.status.execution_state: "executed" } + - match: { watch_record.result.actions.0.index.response.1.id: "test_id6" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/80_put_get_watch_with_passwords.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/80_put_get_watch_with_passwords.yml new file mode 100644 index 0000000000000..db1fa84370410 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/80_put_get_watch_with_passwords.yml @@ -0,0 +1,317 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + +--- +"Test getting a watch does not contain the original password": + + - do: + xpack.watcher.put_watch: + id: "watch_with_password" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "http" : { + "request" : { + "host" : "host.domain", + "port" : 9200, + "path" : "/myservice", + "auth" : { + "basic" : { + "username" : "user", + "password" : "pass" + } + } + } + } + }, + "actions": { + "logging": { + "logging": { + "text": "Log me Amadeus!" + } + } + } + } + + - do: + xpack.watcher.get_watch: + id: "watch_with_password" + - match: { _id: "watch_with_password" } + - match: { watch.input.http.request.auth.basic.password: "::es_redacted::" } + +--- +"Test putting a watch with a redacted password without version returns an error": + + # version 1 + - do: + xpack.watcher.put_watch: + id: "watch_without_version_test" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "http" : { + "request" : { + "host" : "host.domain", + "port" : 9200, + "path" : "/myservice", + "auth" : { + "basic" : { + "username" : "user", + "password" : "pass" + } + } + } + } + }, + "actions": { + "logging": { + "logging": { + "text": "Log me Amadeus!" + } + } + } + } + + - do: + catch: bad_request + xpack.watcher.put_watch: + id: "watch_without_version_test" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "http" : { + "request" : { + "host" : "host.domain", + "port" : 9200, + "path" : "/myservice", + "auth" : { + "basic" : { + "username" : "user", + "password" : "::es_redacted::" + } + } + } + } + }, + "actions": { + "logging": { + "logging": { + "text": "Log me Amadeus!" + } + } + } + } + + +--- +"Test putting a watch with a redacted password with old version returns an error": + + # version 1 + - do: + xpack.watcher.put_watch: + id: "watch_old_version" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "http" : { + "request" : { + "host" : "host.domain", + "port" : 9200, + "path" : "/myservice", + "auth" : { + "basic" : { + "username" : "user", + "password" : "pass" + } + } + } + } + }, + "actions": { + "logging": { + "logging": { + "text": "Log me Amadeus!" + } + } + } + } + + # version 2 + - do: + xpack.watcher.put_watch: + id: "watch_old_version" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "http" : { + "request" : { + "host" : "host.domain", + "port" : 9200, + "path" : "/myservice", + "auth" : { + "basic" : { + "username" : "user", + "password" : "pass" + } + } + } + } + }, + "actions": { + "logging": { + "logging": { + "text": "Log me Amadeus!" + } + } + } + } + + + # using optimistic concurrency control, this one will loose + # as if two users in the watch UI tried to update the same watch + - do: + catch: conflict + xpack.watcher.put_watch: + id: "watch_old_version" + version: 1 + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "http" : { + "request" : { + "host" : "host.domain", + "port" : 9200, + "path" : "/myservice", + "auth" : { + "basic" : { + "username" : "user", + "password" : "::es_redacted::" + } + } + } + } + }, + "actions": { + "logging": { + "logging": { + "text": "Log me Amadeus!" + } + } + } + } + +--- +"Test putting a watch with a redacted password with current version works": + + - do: + xpack.watcher.put_watch: + id: "my_watch_with_version" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "http" : { + "request" : { + "host" : "host.domain", + "port" : 9200, + "path" : "/myservice", + "auth" : { + "basic" : { + "username" : "user", + "password" : "pass" + } + } + } + } + }, + "actions": { + "logging": { + "logging": { + "text": "Log me Amadeus!" + } + } + } + } + + - match: { _id: "my_watch_with_version" } + - match: { _version: 1 } + + # this resembles the exact update from the UI and thus should work, no password change, any change in the watch + # but correct version provided + - do: + xpack.watcher.put_watch: + id: "my_watch_with_version" + version: 1 + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "http" : { + "request" : { + "host" : "host.domain", + "port" : 9200, + "path" : "/myservice", + "auth" : { + "basic" : { + "username" : "user", + "password" : "::es_redacted::" + } + } + } + } + }, + "actions": { + "logging": { + "logging": { + "text": "Log me Amadeus!" + } + } + } + } + + - match: { _id: "my_watch_with_version" } + - match: { _version: 2 } + + - do: + search: + index: .watches + body: > + { + "query": { + "term": { + "_id": { + "value": "my_watch_with_version" + } + } + } + } + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "my_watch_with_version" } + - match: { hits.hits.0._source.input.http.request.auth.basic.password: "pass" } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/start_watcher/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/start_watcher/10_basic.yml new file mode 100644 index 0000000000000..575d01fcee767 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/start_watcher/10_basic.yml @@ -0,0 +1,8 @@ +--- +"Test start watcher api": + - do: + cluster.health: + wait_for_status: yellow + + - do: {xpack.watcher.start: {}} + - match: { acknowledged: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml new file mode 100644 index 0000000000000..6fa66667e2641 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml @@ -0,0 +1,16 @@ +--- +"Test watcher stats output": + + - do: {xpack.watcher.stats: {}} + - match: { "manually_stopped": false } + - match: { "stats.0.watcher_state": "started" } + +--- +"Test watcher stats supports emit_stacktraces parameter": + + - do: + xpack.watcher.stats: + metric: "all" + emit_stacktraces: "true" + - match: { "manually_stopped": false } + - match: { "stats.0.watcher_state": "started" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stop_watcher/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stop_watcher/10_basic.yml new file mode 100644 index 0000000000000..518714c57ab3f --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stop_watcher/10_basic.yml @@ -0,0 +1,11 @@ +--- +"Test stop watcher api": + - do: + cluster.health: + wait_for_status: yellow + + - do: {xpack.watcher.stop: {}} + - match: { acknowledged: true } + + - do: {xpack.watcher.start: {}} + - match: { acknowledged: true } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml new file mode 100644 index 0000000000000..a33fcdb529745 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml @@ -0,0 +1,61 @@ +--- +"Test watcher usage stats output": + + - do: + catch: missing + xpack.watcher.delete_watch: + id: "usage_stats_watch" + + - do: {xpack.usage: {}} + - set: { "watcher.count.active": watch_count_active } + - set: { "watcher.count.total": watch_count_total } + + - do: + xpack.watcher.put_watch: + id: "usage_stats_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "search" : { + "request" : { + "indices" : [ "my_test_index" ], + "body" :{ + "query" : { "match_all": {} } + } + } + } + }, + "condition" : { + "compare" : { + "ctx.payload.hits.total" : { + "gte" : 1 + } + } + }, + "actions": { + "logging": { + "logging": { + "text": "Successfully ran my_watch to test for search input" + } + } + } + } + - match: { _id: "usage_stats_watch" } + + - do: {xpack.usage: {}} + - gt: { "watcher.count.active": $watch_count_active } + - gt: { "watcher.count.total": $watch_count_total } + - gte: { "watcher.watch.action._all.active": 1 } + - gte: { "watcher.watch.action.logging.active": 1 } + - gte: { "watcher.watch.condition._all.active": 1 } + - gte: { "watcher.watch.condition.compare.active": 1 } + - gte: { "watcher.watch.input._all.active": 1 } + - gte: { "watcher.watch.input.search.active": 1 } + - gte: { "watcher.watch.trigger._all.active": 1 } + - gte: { "watcher.watch.trigger.schedule.active": 1 } + - gte: { "watcher.watch.trigger.schedule.cron.active": 1 } + - gte: { "watcher.watch.trigger.schedule._all.active": 1 } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml new file mode 100644 index 0000000000000..8958af0ff4486 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml @@ -0,0 +1,23 @@ +# Integration tests for monitoring +# +"X-Pack loaded": + - do: + cluster.state: {} + + # Get master node id + - set: { master_node: master } + + - do: + nodes.info: {} + + - match: { nodes.$master.modules.13.name: x-pack-core } + - match: { nodes.$master.modules.14.name: x-pack-deprecation } + - match: { nodes.$master.modules.15.name: x-pack-graph } + - match: { nodes.$master.modules.16.name: x-pack-logstash } + - match: { nodes.$master.modules.17.name: x-pack-ml } + - match: { nodes.$master.modules.18.name: x-pack-monitoring } + - match: { nodes.$master.modules.19.name: x-pack-rollup } + - match: { nodes.$master.modules.20.name: x-pack-security } + - match: { nodes.$master.modules.21.name: x-pack-sql } + - match: { nodes.$master.modules.22.name: x-pack-upgrade } + - match: { nodes.$master.modules.23.name: x-pack-watcher } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml new file mode 100644 index 0000000000000..5e61f98bbc297 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml @@ -0,0 +1,170 @@ +# Integration tests xpack info and usage API +# +"X-Pack Info and Usage": + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.license.delete: {} + - match: { acknowledged: true } + + # we don't have a license now + - do: + xpack.info: + categories: "license,features" + - is_false: license + - is_true: features + - is_true: features.watcher + - is_true: features.watcher.enabled +# - is_false: features.watcher.available TODO fix once licensing is fixed + - is_true: features.security + - is_true: features.security.enabled +# - is_false: features.security.available TODO fix once licensing is fixed + - is_true: features.graph + - is_true: features.graph.enabled +# - is_false: features.graph.available TODO fix once licensing is fixed + - is_true: features.monitoring + - is_true: features.monitoring.enabled +# - is_false: features.monitoring.available TODO fix once licensing is fixed + + - do: + xpack.license.post: + body: > + { + "license": { + "uid": "893361dc-9749-4997-93cb-802e3dofh7aa", + "type": "internal", + "subscription_type": "none", + "issue_date_in_millis": 1443484800000, + "feature": "watcher", + "expiry_date_in_millis": 1914278399999, + "max_nodes": 1, + "issued_to": "issuedTo", + "issuer": "issuer", + "signature": "AAAAAQAAAA0Sc90guRIaQEmgLvMnAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQCQ94dju0pnDZR3Uuypi0ic3aQJ+nvVqe+U8u79Dga5n1qIjcHDh7HvIBJEkF+tnVPlo/PXV/x7BZSwVY1PVErit+6rYix1yuHEgqwxmx/VdRICjCaZM6tk0Ob4dZCPv6Ebn2Mmk89KHC/PwiLPqF6QfwV/Pkpa8k2A3ORJmvYSDvXhe6tCs8dqc4ebrsFxqrZjwWh5CZSpzqqZBFXlngDv2N0hHhpGlueRszD0JJ5dfEL5ZA1DDOrgO9OJVejSHyRqe1L5QRUNdXPVfS+EAG0Dd1cNdJ/sMpYCPnVjbw6iq2/YgM3cuztsXVBY7ij4WnoP3ce7Zjs9TwHn+IqzftC6" + } + } + - match: { license_status: "valid" } + + - do: + xpack.license.get: {} + - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } + - match: { license.type: "internal" } + - match: { license.status: "active" } + + - do: + xpack.info: {} + - is_true: build.hash + - is_true: build.date + - is_true: license + - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } + - match: { license.type: "internal" } + - match: { license.mode: "trial" } + - match: { license.status: "active" } + - match: { license.expiry_date_in_millis: 1914278399999 } + - is_true: features + - is_true: features.watcher + - is_true: features.watcher.enabled + - is_true: features.watcher.available + - is_true: features.watcher.description + - is_true: features.security + - is_true: features.security.enabled + - is_true: features.security.available + - is_true: features.security.description + - is_true: features.graph + - is_true: features.graph.enabled + - is_true: features.graph.available + - is_true: features.graph.description + - is_true: features.monitoring + - is_true: features.monitoring.enabled + - is_true: features.monitoring.available + - is_true: features.monitoring.description + - is_true: tagline + + - do: + xpack.usage: {} + - is_true: watcher.enabled + - is_true: watcher.available + - is_true: security.enabled + - is_true: security.available + - is_true: graph.enabled + - is_true: graph.available + - is_true: monitoring.enabled + - is_true: monitoring.available + + - do: + xpack.info: + categories: "_none" + - is_false: build + - is_false: features + - is_false: license + - match: { tagline: "You know, for X" } + + - do: + xpack.info: + categories: "_none" + human: false + - is_false: build + - is_false: features + - is_false: license + - is_false: tagline + + - do: + xpack.info: + categories: "build" + - is_true: build + - is_true: build.hash + - is_true: build.date + - is_true: tagline + - is_false: features + - is_false: license + + - do: + xpack.info: + categories: "build,license" + - is_true: build.hash + - is_true: build.date + - is_true: tagline + - is_false: features + - is_true: license + - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } + - match: { license.type: "internal" } + - match: { license.mode: "trial" } + - match: { license.status: "active" } + - match: { license.expiry_date_in_millis: 1914278399999 } + + + - do: + xpack.info: + categories: "build,license,features" + human: false + - is_true: build.hash + - is_true: build.date + - is_true: license + - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } + - match: { license.type: "internal" } + - match: { license.mode: "trial" } + - match: { license.status: "active" } + - match: { license.expiry_date_in_millis: 1914278399999 } + - is_true: features + - is_true: features.watcher + - is_true: features.watcher.enabled + - is_true: features.watcher.available + - is_false: features.watcher.description + - is_true: features.security + - is_true: features.security.enabled + - is_true: features.security.available + - is_false: features.security.description + - is_true: features.graph + - is_true: features.graph.enabled + - is_true: features.graph.available + - is_false: features.graph.description + - is_true: features.monitoring + - is_true: features.monitoring.enabled + - is_true: features.monitoring.available + - is_false: features.monitoring.description + - is_false: tagline + + diff --git a/x-pack/plugin/src/test/resources/wrong-version-logstash-index-template.json b/x-pack/plugin/src/test/resources/wrong-version-logstash-index-template.json new file mode 100644 index 0000000000000..95d588737d9f3 --- /dev/null +++ b/x-pack/plugin/src/test/resources/wrong-version-logstash-index-template.json @@ -0,0 +1,46 @@ +{ + "index_patterns" : ".logstash", + "settings": { + "index": { + "number_of_shards": 1, + "number_of_replicas": 1, + "codec": "best_compression" + } + }, + "mappings" : { + "doc" : { + "_meta": { + "logstash-version": "4.0.0" + }, + "dynamic": "strict", + "properties":{ + "description":{ + "type":"text" + }, + "last_modified":{ + "type":"date" + }, + "pipeline_metadata":{ + "properties":{ + "version":{ + "type":"short" + }, + "type":{ + "type":"keyword" + } + } + }, + "pipeline":{ + "type":"text" + }, + "username":{ + "type":"keyword" + }, + "metadata":{ + "type":"object", + "dynamic":false + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/wrong-version-security-index-template.json b/x-pack/plugin/src/test/resources/wrong-version-security-index-template.json new file mode 100644 index 0000000000000..18da429a08a04 --- /dev/null +++ b/x-pack/plugin/src/test/resources/wrong-version-security-index-template.json @@ -0,0 +1,116 @@ +{ + "index_patterns" : ".security", + "order" : 1000, + "settings" : { + "number_of_shards" : 1, + "number_of_replicas" : 0, + "auto_expand_replicas" : "0-all", + "analysis" : { + "filter" : { + "email" : { + "type" : "pattern_capture", + "preserve_original" : true, + "patterns" : [ + "([^@]+)", + "(\\p{L}+)", + "(\\d+)", + "@(.+)" + ] + } + }, + "analyzer" : { + "email" : { + "tokenizer" : "uax_url_email", + "filter" : [ + "email", + "lowercase", + "unique" + ] + } + } + } + }, + "mappings" : { + "user" : { + "_meta": { + "security-version": "4.0.0" + }, + "dynamic" : "strict", + "properties" : { + "username" : { + "type" : "keyword" + }, + "roles" : { + "type" : "keyword" + }, + "password" : { + "type" : "keyword", + "index" : false, + "doc_values": false + }, + "full_name" : { + "type" : "text" + }, + "email" : { + "type" : "text", + "analyzer" : "email" + }, + "metadata" : { + "type" : "object", + "dynamic" : true + } + } + }, + "role" : { + "_meta": { + "security-version": "5.0.0-alpha5" + }, + "dynamic" : "strict", + "properties" : { + "cluster" : { + "type" : "keyword" + }, + "indices" : { + "type" : "object", + "properties" : { + "fields" : { + "type" : "keyword" + }, + "names" : { + "type" : "keyword" + }, + "privileges" : { + "type" : "keyword" + }, + "query" : { + "type" : "keyword" + } + } + }, + "name" : { + "type" : "keyword" + }, + "run_as" : { + "type" : "keyword" + }, + "metadata" : { + "type" : "object", + "dynamic" : true + } + } + }, + "reserved-user" : { + "_meta": { + "security-version": "5.0.0-alpha5" + }, + "dynamic" : "strict", + "properties" : { + "password": { + "type" : "keyword", + "index" : false, + "doc_values" : false + } + } + } + } +} diff --git a/x-pack/plugin/upgrade/build.gradle b/x-pack/plugin/upgrade/build.gradle new file mode 100644 index 0000000000000..5cead96ac7aa5 --- /dev/null +++ b/x-pack/plugin/upgrade/build.gradle @@ -0,0 +1,48 @@ +import com.carrotsearch.gradle.junit4.RandomizedTestingTask +import org.elasticsearch.gradle.BuildPlugin + +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' +esplugin { + name 'x-pack-upgrade' + description 'Elasticsearch Expanded Pack Plugin - Upgrade' + classname 'org.elasticsearch.xpack.upgrade.Upgrade' + extendedPlugins = ['x-pack-core'] +} + +archivesBaseName = 'x-pack-upgrade' + +dependencies { + compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" +compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" + +run { + plugin xpackModule('core') +} + +integTest.enabled = false + +// Instead we create a separate task to run the +// tests based on ESIntegTestCase +task internalClusterTest(type: RandomizedTestingTask, + group: JavaBasePlugin.VERIFICATION_GROUP, + description: 'Multi-node tests', + dependsOn: test.dependsOn) { + configure(BuildPlugin.commonTestConfig(project)) + classpath = project.test.classpath + testClassesDir = project.test.testClassesDir + include '**/*IT.class' + systemProperty 'es.set.netty.runtime.available.processors', 'false' +} +check.dependsOn internalClusterTest +internalClusterTest.mustRunAfter test + +// also add an "alias" task to make typing on the command line easier +task icTest { + dependsOn internalClusterTest +} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheck.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheck.java new file mode 100644 index 0000000000000..0972c780618bd --- /dev/null +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheck.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.script.Script; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.xpack.core.upgrade.IndexUpgradeCheckVersion; +import org.elasticsearch.xpack.core.upgrade.UpgradeActionRequired; + +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; + +/** + * Generic upgrade check applicable to all indices to be upgraded from the current version + * to the next major version + *

+ * The upgrade is performed in the following way: + *

+ * - preUpgrade method is called + * - reindex is performed + * - postUpgrade is called if reindex was successful + */ +public class IndexUpgradeCheck extends AbstractComponent { + + private final String name; + private final Function actionRequired; + private final InternalIndexReindexer reindexer; + + /** + * Creates a new upgrade check + * + * @param name - the name of the check + * @param settings - system settings + * @param actionRequired - return true if they can work with the index with specified name + * @param client - client + * @param clusterService - cluster service + * @param types - a list of types that the reindexing should be limited to + * @param updateScript - the upgrade script that should be used during reindexing + */ + public IndexUpgradeCheck(String name, Settings settings, + Function actionRequired, + Client client, ClusterService clusterService, String[] types, Script updateScript) { + this(name, settings, actionRequired, client, clusterService, types, updateScript, + listener -> listener.onResponse(null), (t, listener) -> listener.onResponse(TransportResponse.Empty.INSTANCE)); + } + + /** + * Creates a new upgrade check + * + * @param name - the name of the check + * @param settings - system settings + * @param actionRequired - return true if they can work with the index with specified name + * @param client - client + * @param clusterService - cluster service + * @param types - a list of types that the reindexing should be limited to + * @param updateScript - the upgrade script that should be used during reindexing + * @param preUpgrade - action that should be performed before upgrade + * @param postUpgrade - action that should be performed after upgrade + */ + public IndexUpgradeCheck(String name, Settings settings, + Function actionRequired, + Client client, ClusterService clusterService, String[] types, Script updateScript, + Consumer> preUpgrade, + BiConsumer> postUpgrade) { + super(settings); + this.name = name; + this.actionRequired = actionRequired; + this.reindexer = new InternalIndexReindexer<>(client, clusterService, IndexUpgradeCheckVersion.UPRADE_VERSION, updateScript, + types, preUpgrade, postUpgrade); + } + + /** + * Returns the name of the check + */ + public String getName() { + return name; + } + + /** + * This method is called by Upgrade API to verify if upgrade or reindex for this index is required + * + * @param indexMetaData index metadata + * @return required action or UpgradeActionRequired.NOT_APPLICABLE if this check cannot be performed on the index + */ + public UpgradeActionRequired actionRequired(IndexMetaData indexMetaData) { + return actionRequired.apply(indexMetaData); + } + + /** + * Perform the index upgrade + * + * @param task the task that executes the upgrade operation + * @param indexMetaData index metadata + * @param state current cluster state + * @param listener the listener that should be called upon completion of the upgrade + */ + public void upgrade(TaskId task, IndexMetaData indexMetaData, ClusterState state, + ActionListener listener) { + reindexer.upgrade(task, indexMetaData.getIndex().getName(), state, listener); + } +} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheckFactory.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheckFactory.java new file mode 100644 index 0000000000000..804e159025136 --- /dev/null +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeCheckFactory.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; + +import java.util.Collection; +import java.util.Collections; + +/** + * Factory for index checks + */ +public interface IndexUpgradeCheckFactory { + + /** + * Using this method the check can expose additional user parameter that can be specified by the user on upgrade api + * + * @return the list of supported parameters + */ + default Collection supportedParams() { + return Collections.emptyList(); + } + + /** + * Creates an upgrade check + *

+ * This method is called from {@link org.elasticsearch.plugins.Plugin#createComponents} method. + */ + IndexUpgradeCheck createCheck(Client client, ClusterService clusterService); + +} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java new file mode 100644 index 0000000000000..af75595d4fd85 --- /dev/null +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/IndexUpgradeService.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.xpack.core.upgrade.UpgradeActionRequired; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class IndexUpgradeService extends AbstractComponent { + + private final List upgradeChecks; + + private final IndexNameExpressionResolver indexNameExpressionResolver; + + public IndexUpgradeService(Settings settings, List upgradeChecks) { + super(settings); + this.upgradeChecks = upgradeChecks; + this.indexNameExpressionResolver = new IndexNameExpressionResolver(settings); + } + + /** + * Returns the information about required upgrade action for the given indices + * + * @param indices list of indices to check, specify _all for all indices + * @param options wild card resolution option + * @param state the current cluster state + * @return a list of indices that should be upgraded/reindexed + */ + public Map upgradeInfo(String[] indices, IndicesOptions options, ClusterState state) { + Map results = new HashMap<>(); + String[] concreteIndexNames = indexNameExpressionResolver.concreteIndexNames(state, options, indices); + MetaData metaData = state.getMetaData(); + for (String index : concreteIndexNames) { + IndexMetaData indexMetaData = metaData.index(index); + UpgradeActionRequired upgradeActionRequired = upgradeInfo(indexMetaData, index); + if (upgradeActionRequired != null) { + results.put(index, upgradeActionRequired); + } + } + return results; + } + + private UpgradeActionRequired upgradeInfo(IndexMetaData indexMetaData, String index) { + for (IndexUpgradeCheck check : upgradeChecks) { + UpgradeActionRequired upgradeActionRequired = check.actionRequired(indexMetaData); + logger.trace("[{}] check [{}] returned [{}]", index, check.getName(), upgradeActionRequired); + switch (upgradeActionRequired) { + case UPGRADE: + case REINDEX: + // this index needs to be upgraded or reindexed - skipping all other checks + return upgradeActionRequired; + case UP_TO_DATE: + // this index is good - skipping all other checks + return null; + case NOT_APPLICABLE: + // this action is not applicable to this index - skipping to the next one + break; + default: + throw new IllegalStateException("unknown upgrade action " + upgradeActionRequired + " for the index " + + index); + + } + } + // Catch all check for all indices that didn't match the specific checks + if (indexMetaData.getCreationVersion().before(Version.V_5_0_0)) { + return UpgradeActionRequired.REINDEX; + } else { + return null; + } + } + + public void upgrade(TaskId task, String index, ClusterState state, ActionListener listener) { + IndexMetaData indexMetaData = state.metaData().index(index); + if (indexMetaData == null) { + throw new IndexNotFoundException(index); + } + for (IndexUpgradeCheck check : upgradeChecks) { + UpgradeActionRequired upgradeActionRequired = check.actionRequired(indexMetaData); + switch (upgradeActionRequired) { + case UPGRADE: + // this index needs to be upgraded - start the upgrade procedure + check.upgrade(task, indexMetaData, state, listener); + return; + case REINDEX: + // this index needs to be re-indexed + throw new IllegalStateException("Index [" + index + "] cannot be upgraded, it should be reindex instead"); + case UP_TO_DATE: + throw new IllegalStateException("Index [" + index + "] cannot be upgraded, it is up to date"); + case NOT_APPLICABLE: + // this action is not applicable to this index - skipping to the next one + break; + default: + throw new IllegalStateException("unknown upgrade action [" + upgradeActionRequired + "] for the index [" + index + "]"); + + } + } + throw new IllegalStateException("Index [" + index + "] cannot be upgraded"); + } + +} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java new file mode 100644 index 0000000000000..7e1e31919b1fc --- /dev/null +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexer.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.ParentTaskAssigningClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.ReindexAction; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.script.Script; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.TransportResponse; + +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +/** + * A component that performs the following upgrade procedure: + *

+ * - Check that all data and master nodes are running running the same version + * - Create a new index .{name}-6 + * - Make index .{name} read only + * - Reindex from .{name} to .{name}-6 with transform + * - Delete index .{name} and add alias .{name} to .{name}-6 + */ +public class InternalIndexReindexer { + + private final Client client; + private final ClusterService clusterService; + private final Script transformScript; + private final String[] types; + private final int version; + private final Consumer> preUpgrade; + private final BiConsumer> postUpgrade; + + public InternalIndexReindexer(Client client, ClusterService clusterService, int version, Script transformScript, String[] types, + Consumer> preUpgrade, + BiConsumer> postUpgrade) { + this.client = client; + this.clusterService = clusterService; + this.transformScript = transformScript; + this.types = types; + this.version = version; + this.preUpgrade = preUpgrade; + this.postUpgrade = postUpgrade; + } + + public void upgrade(TaskId task, String index, ClusterState clusterState, ActionListener listener) { + ParentTaskAssigningClient parentAwareClient = new ParentTaskAssigningClient(client, task); + preUpgrade.accept(ActionListener.wrap( + t -> innerUpgrade(parentAwareClient, index, clusterState, ActionListener.wrap( + response -> postUpgrade.accept(t, ActionListener.wrap( + empty -> listener.onResponse(response), + listener::onFailure + )), + listener::onFailure + )), + listener::onFailure)); + } + + private void innerUpgrade(ParentTaskAssigningClient parentAwareClient, String index, ClusterState clusterState, + ActionListener listener) { + String newIndex = index + "-" + version; + try { + checkMasterAndDataNodeVersion(clusterState); + parentAwareClient.admin().indices().prepareCreate(newIndex).execute(ActionListener.wrap(createIndexResponse -> + setReadOnlyBlock(index, ActionListener.wrap(setReadOnlyResponse -> + reindex(parentAwareClient, index, newIndex, ActionListener.wrap( + bulkByScrollResponse -> // Successful completion of reindexing - delete old index + removeReadOnlyBlock(parentAwareClient, index, ActionListener.wrap(unsetReadOnlyResponse -> + parentAwareClient.admin().indices().prepareAliases().removeIndex(index) + .addAlias(newIndex, index).execute(ActionListener.wrap(deleteIndexResponse -> + listener.onResponse(bulkByScrollResponse), listener::onFailure + )), listener::onFailure + )), + e -> // Something went wrong during reindexing - remove readonly flag and report the error + removeReadOnlyBlock(parentAwareClient, index, ActionListener.wrap(unsetReadOnlyResponse -> { + listener.onFailure(e); + }, e1 -> { + listener.onFailure(e); + })) + )), listener::onFailure + )), listener::onFailure + )); + } catch (Exception ex) { + listener.onFailure(ex); + } + } + + private void checkMasterAndDataNodeVersion(ClusterState clusterState) { + if (clusterState.nodes().getMinNodeVersion().before(Upgrade.UPGRADE_INTRODUCED)) { + throw new IllegalStateException("All nodes should have at least version [" + Upgrade.UPGRADE_INTRODUCED + "] to upgrade"); + } + } + + private void removeReadOnlyBlock(ParentTaskAssigningClient parentAwareClient, String index, + ActionListener listener) { + Settings settings = Settings.builder().put(IndexMetaData.INDEX_READ_ONLY_SETTING.getKey(), false).build(); + parentAwareClient.admin().indices().prepareUpdateSettings(index).setSettings(settings).execute(listener); + } + + private void reindex(ParentTaskAssigningClient parentAwareClient, String index, String newIndex, + ActionListener listener) { + SearchRequest sourceRequest = new SearchRequest(index); + sourceRequest.types(types); + IndexRequest destinationRequest = new IndexRequest(newIndex); + ReindexRequest reindexRequest = new ReindexRequest(sourceRequest, destinationRequest); + reindexRequest.setRefresh(true); + reindexRequest.setScript(transformScript); + parentAwareClient.execute(ReindexAction.INSTANCE, reindexRequest, listener); + } + + /** + * Makes the index readonly if it's not set as a readonly yet + */ + private void setReadOnlyBlock(String index, ActionListener listener) { + clusterService.submitStateUpdateTask("lock-index-for-upgrade", new ClusterStateUpdateTask() { + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + final IndexMetaData indexMetaData = currentState.metaData().index(index); + if (indexMetaData == null) { + throw new IndexNotFoundException(index); + } + + if (indexMetaData.getState() != IndexMetaData.State.OPEN) { + throw new IllegalStateException("unable to upgrade a closed index[" + index + "]"); + } + if (currentState.blocks().hasIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK)) { + throw new IllegalStateException("unable to upgrade a read-only index[" + index + "]"); + } + + Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings()) + .put(IndexMetaData.INDEX_READ_ONLY_SETTING.getKey(), true); + + MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData()) + .put(IndexMetaData.builder(indexMetaData).settings(indexSettings)); + + ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()) + .addIndexBlock(index, IndexMetaData.INDEX_READ_ONLY_BLOCK); + + return ClusterState.builder(currentState).metaData(metaDataBuilder).blocks(blocks).build(); + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + listener.onResponse(TransportResponse.Empty.INSTANCE); + } + }); + } + +} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java new file mode 100644 index 0000000000000..568397e37395a --- /dev/null +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/Upgrade.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; +import org.elasticsearch.xpack.upgrade.actions.TransportIndexUpgradeAction; +import org.elasticsearch.xpack.upgrade.actions.TransportIndexUpgradeInfoAction; +import org.elasticsearch.xpack.upgrade.rest.RestIndexUpgradeAction; +import org.elasticsearch.xpack.upgrade.rest.RestIndexUpgradeInfoAction; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.function.BiFunction; +import java.util.function.Supplier; + +public class Upgrade extends Plugin implements ActionPlugin { + + public static final Version UPGRADE_INTRODUCED = Version.V_5_6_0; + + private final Settings settings; + private final List> upgradeCheckFactories; + + public Upgrade(Settings settings) { + this.settings = settings; + this.upgradeCheckFactories = new ArrayList<>(); + } + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, + NamedXContentRegistry xContentRegistry, Environment environment, + NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + List upgradeChecks = new ArrayList<>(upgradeCheckFactories.size()); + for (BiFunction checkFactory : upgradeCheckFactories) { + upgradeChecks.add(checkFactory.apply(client, clusterService)); + } + return Collections.singletonList(new IndexUpgradeService(settings, Collections.unmodifiableList(upgradeChecks))); + } + + @Override + public List> getActions() { + return Arrays.asList( + new ActionHandler<>(IndexUpgradeInfoAction.INSTANCE, TransportIndexUpgradeInfoAction.class), + new ActionHandler<>(IndexUpgradeAction.INSTANCE, TransportIndexUpgradeAction.class) + ); + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + return Arrays.asList( + new RestIndexUpgradeInfoAction(settings, restController), + new RestIndexUpgradeAction(settings, restController) + ); + } + +} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeAction.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeAction.java new file mode 100644 index 0000000000000..edb560174390c --- /dev/null +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeAction.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade.actions; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; +import org.elasticsearch.xpack.upgrade.IndexUpgradeService; + +public class TransportIndexUpgradeAction extends TransportMasterNodeAction { + + private final IndexUpgradeService indexUpgradeService; + + @Inject + public TransportIndexUpgradeAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexUpgradeService indexUpgradeService, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, IndexUpgradeAction.NAME, transportService, clusterService, threadPool, actionFilters, + IndexUpgradeAction.Request::new, indexNameExpressionResolver); + this.indexUpgradeService = indexUpgradeService; + } + + @Override + protected String executor() { + return ThreadPool.Names.GENERIC; + } + + @Override + protected BulkByScrollResponse newResponse() { + return new BulkByScrollResponse(); + } + + @Override + protected ClusterBlockException checkBlock(IndexUpgradeAction.Request request, ClusterState state) { + // Cluster is not affected but we look up repositories in metadata + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected final void masterOperation(Task task, IndexUpgradeAction.Request request, ClusterState state, + ActionListener listener) { + TaskId taskId = new TaskId(clusterService.localNode().getId(), task.getId()); + indexUpgradeService.upgrade(taskId, request.index(), state, listener); + } + + @Override + protected final void masterOperation(IndexUpgradeAction.Request request, ClusterState state, + ActionListener listener) { + throw new UnsupportedOperationException("the task parameter is required"); + } + +} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeInfoAction.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeInfoAction.java new file mode 100644 index 0000000000000..67ec115d79f5c --- /dev/null +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/actions/TransportIndexUpgradeInfoAction.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade.actions; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; +import org.elasticsearch.xpack.upgrade.IndexUpgradeService; +import org.elasticsearch.xpack.core.upgrade.UpgradeActionRequired; + +import java.util.Map; + +public class TransportIndexUpgradeInfoAction extends TransportMasterNodeReadAction { + + private final IndexUpgradeService indexUpgradeService; + private final XPackLicenseState licenseState; + + + @Inject + public TransportIndexUpgradeInfoAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexUpgradeService indexUpgradeService, + IndexNameExpressionResolver indexNameExpressionResolver, + XPackLicenseState licenseState) { + super(settings, IndexUpgradeInfoAction.NAME, transportService, clusterService, threadPool, actionFilters, + IndexUpgradeInfoAction.Request::new, indexNameExpressionResolver); + this.indexUpgradeService = indexUpgradeService; + this.licenseState = licenseState; + } + + @Override + protected String executor() { + return ThreadPool.Names.GENERIC; + } + + @Override + protected IndexUpgradeInfoAction.Response newResponse() { + return new IndexUpgradeInfoAction.Response(); + } + + @Override + protected ClusterBlockException checkBlock(IndexUpgradeInfoAction.Request request, ClusterState state) { + // Cluster is not affected but we look up repositories in metadata + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected final void masterOperation(final IndexUpgradeInfoAction.Request request, ClusterState state, + final ActionListener listener) { + if (licenseState.isUpgradeAllowed()) { + Map results = + indexUpgradeService.upgradeInfo(request.indices(), request.indicesOptions(), state); + listener.onResponse(new IndexUpgradeInfoAction.Response(results)); + } else { + listener.onFailure(LicenseUtils.newComplianceException(XPackField.UPGRADE)); + } + } +} diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/rest/RestIndexUpgradeAction.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/rest/RestIndexUpgradeAction.java new file mode 100644 index 0000000000000..337cd810f5ee3 --- /dev/null +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/rest/RestIndexUpgradeAction.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade.rest; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.BulkByScrollTask; +import org.elasticsearch.index.reindex.ScrollableHitSource; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.tasks.LoggingTaskListener; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction.Request; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class RestIndexUpgradeAction extends BaseRestHandler { + public RestIndexUpgradeAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "_xpack/migration/upgrade/{index}", this); + } + + @Override + public String getName() { + return "xpack_migration_upgrade"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + if (request.method().equals(RestRequest.Method.POST)) { + return handlePost(request, client); + } else { + throw new IllegalArgumentException("illegal method [" + request.method() + "] for request [" + request.path() + "]"); + } + } + + private RestChannelConsumer handlePost(final RestRequest request, NodeClient client) { + Request upgradeRequest = new Request(request.param("index")); + Map params = new HashMap<>(); + params.put(BulkByScrollTask.Status.INCLUDE_CREATED, Boolean.toString(true)); + params.put(BulkByScrollTask.Status.INCLUDE_UPDATED, Boolean.toString(true)); + + if (request.paramAsBoolean("wait_for_completion", true)) { + return channel -> client.execute(IndexUpgradeAction.INSTANCE, upgradeRequest, + new RestBuilderListener(channel) { + + @Override + public RestResponse buildResponse(BulkByScrollResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + response.toXContent(builder, new ToXContent.DelegatingMapParams(params, channel.request())); + builder.endObject(); + return new BytesRestResponse(getStatus(response), builder); + } + + private RestStatus getStatus(BulkByScrollResponse response) { + /* + * Return the highest numbered rest status under the assumption that higher numbered statuses are "more error" + * and thus more interesting to the user. + */ + RestStatus status = RestStatus.OK; + if (response.isTimedOut()) { + status = RestStatus.REQUEST_TIMEOUT; + } + for (BulkItemResponse.Failure failure : response.getBulkFailures()) { + if (failure.getStatus().getStatus() > status.getStatus()) { + status = failure.getStatus(); + } + } + for (ScrollableHitSource.SearchFailure failure : response.getSearchFailures()) { + RestStatus failureStatus = ExceptionsHelper.status(failure.getReason()); + if (failureStatus.getStatus() > status.getStatus()) { + status = failureStatus; + } + } + return status; + } + + }); + } else { + upgradeRequest.setShouldStoreResult(true); + + /* + * Validating before forking to make sure we can catch the issues earlier + */ + ActionRequestValidationException validationException = upgradeRequest.validate(); + if (validationException != null) { + throw validationException; + } + Task task = client.executeLocally(IndexUpgradeAction.INSTANCE, upgradeRequest, LoggingTaskListener.instance()); + // Send task description id instead of waiting for the message + return channel -> { + try (XContentBuilder builder = channel.newBuilder()) { + builder.startObject(); + builder.field("task", client.getLocalNodeId() + ":" + task.getId()); + builder.endObject(); + channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); + } + }; + } + } +} + diff --git a/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/rest/RestIndexUpgradeInfoAction.java b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/rest/RestIndexUpgradeInfoAction.java new file mode 100644 index 0000000000000..24b576187cb6e --- /dev/null +++ b/x-pack/plugin/upgrade/src/main/java/org/elasticsearch/xpack/upgrade/rest/RestIndexUpgradeInfoAction.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade.rest; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction.Request; + +import java.io.IOException; + +public class RestIndexUpgradeInfoAction extends BaseRestHandler { + + public RestIndexUpgradeInfoAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, "/_xpack/migration/assistance", this); + controller.registerHandler(RestRequest.Method.GET, "/_xpack/migration/assistance/{index}", this); + } + + @Override + public String getName() { + return "xpack_migration_assistance"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + if (request.method().equals(RestRequest.Method.GET)) { + return handleGet(request, client); + } else { + throw new IllegalArgumentException("illegal method [" + request.method() + "] for request [" + request.path() + "]"); + } + } + + private RestChannelConsumer handleGet(final RestRequest request, NodeClient client) { + Request infoRequest = new Request(Strings.splitStringByCommaToArray(request.param("index"))); + infoRequest.indicesOptions(IndicesOptions.fromRequest(request, infoRequest.indicesOptions())); + return channel -> client.execute(IndexUpgradeInfoAction.INSTANCE, infoRequest, new RestToXContentListener<>(channel)); + } + +} + diff --git a/x-pack/plugin/upgrade/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/upgrade/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..f603bf9ad63ba --- /dev/null +++ b/x-pack/plugin/upgrade/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,25 @@ +grant { + // needed for multiple server implementations used in tests + permission java.net.SocketPermission "*", "accept,connect"; +}; + +grant codeBase "${codebase.netty-common}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; +}; + +grant codeBase "${codebase.netty-transport}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 + // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! + permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; +}; + +grant codeBase "${codebase.elasticsearch-rest-client}" { + // rest client uses system properties which gets the default proxy + permission java.net.NetPermission "getProxySelector"; +}; + +grant codeBase "${codebase.httpasyncclient}" { + // rest client uses system properties which gets the default proxy + permission java.net.NetPermission "getProxySelector"; +}; \ No newline at end of file diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java new file mode 100644 index 0000000000000..ef5c3acc3d238 --- /dev/null +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade; + +import org.elasticsearch.Build; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.xpack.core.upgrade.UpgradeActionRequired; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction.Response; +import org.junit.Before; + +import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.core.IsEqual.equalTo; + +public class IndexUpgradeIT extends IndexUpgradeIntegTestCase { + + @Before + public void resetLicensing() throws Exception { + enableLicensing(); + } + + public void testIndexUpgradeInfo() { + // Testing only negative case here, the positive test is done in bwcTests + assertAcked(client().admin().indices().prepareCreate("test").get()); + ensureYellow("test"); + Response response = client().prepareExecute(IndexUpgradeInfoAction.INSTANCE).setIndices("test").get(); + assertThat(response.getActions().entrySet(), empty()); + } + + public void testIndexUpgradeInfoLicense() throws Exception { + // This test disables all licenses and generates a new one using dev private key + // in non-snapshot builds we are using produciton public key for license verification + // which makes this test to fail + assumeTrue("License is only valid when tested against snapshot/test keys", Build.CURRENT.isSnapshot()); + assertAcked(client().admin().indices().prepareCreate("test").get()); + ensureYellow("test"); + disableLicensing(); + ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, + () -> client().prepareExecute(IndexUpgradeInfoAction.INSTANCE).setIndices("test").get()); + assertThat(e.getMessage(), equalTo("current license is non-compliant for [upgrade]")); + enableLicensing(); + Response response = client().prepareExecute(IndexUpgradeInfoAction.INSTANCE).setIndices("test").get(); + assertThat(response.getActions().entrySet(), empty()); + } + + public void testUpToDateIndexUpgrade() throws Exception { + // Testing only negative case here, the positive test is done in bwcTests + String testIndex = "test"; + String testType = "doc"; + assertAcked(client().admin().indices().prepareCreate(testIndex).get()); + indexRandom(true, + client().prepareIndex(testIndex, testType, "1").setSource("{\"foo\":\"bar\"}", XContentType.JSON), + client().prepareIndex(testIndex, testType, "2").setSource("{\"foo\":\"baz\"}", XContentType.JSON) + ); + ensureYellow(testIndex); + + IllegalStateException ex = expectThrows(IllegalStateException.class, + () -> client().prepareExecute(IndexUpgradeAction.INSTANCE).setIndex(testIndex).get()); + assertThat(ex.getMessage(), equalTo("Index [" + testIndex + "] cannot be upgraded")); + + SearchResponse searchResponse = client().prepareSearch(testIndex).get(); + assertEquals(2L, searchResponse.getHits().getTotalHits()); + } + + public void testInternalUpgradePrePostChecks() throws Exception { + String testIndex = "internal_index"; + String testType = "test"; + Long val = randomLong(); + AtomicBoolean preUpgradeIsCalled = new AtomicBoolean(); + AtomicBoolean postUpgradeIsCalled = new AtomicBoolean(); + + IndexUpgradeCheck check = new IndexUpgradeCheck( + "test", Settings.EMPTY, + indexMetaData -> { + if (indexMetaData.getIndex().getName().equals(testIndex)) { + return UpgradeActionRequired.UPGRADE; + } else { + return UpgradeActionRequired.NOT_APPLICABLE; + } + }, + client(), internalCluster().clusterService(internalCluster().getMasterName()), Strings.EMPTY_ARRAY, null, + listener -> { + assertFalse(preUpgradeIsCalled.getAndSet(true)); + assertFalse(postUpgradeIsCalled.get()); + listener.onResponse(val); + }, + (aLong, listener) -> { + assertTrue(preUpgradeIsCalled.get()); + assertFalse(postUpgradeIsCalled.getAndSet(true)); + assertEquals(aLong, val); + listener.onResponse(TransportResponse.Empty.INSTANCE); + }); + + assertAcked(client().admin().indices().prepareCreate(testIndex).get()); + indexRandom(true, + client().prepareIndex(testIndex, testType, "1").setSource("{\"foo\":\"bar\"}", XContentType.JSON), + client().prepareIndex(testIndex, testType, "2").setSource("{\"foo\":\"baz\"}", XContentType.JSON) + ); + ensureYellow(testIndex); + + IndexUpgradeService service = new IndexUpgradeService(Settings.EMPTY, Collections.singletonList(check)); + + PlainActionFuture future = PlainActionFuture.newFuture(); + service.upgrade(new TaskId("abc", 123), testIndex, clusterService().state(), future); + BulkByScrollResponse response = future.actionGet(); + assertThat(response.getCreated(), equalTo(2L)); + + SearchResponse searchResponse = client().prepareSearch(testIndex).get(); + assertEquals(2L, searchResponse.getHits().getTotalHits()); + + assertTrue(preUpgradeIsCalled.get()); + assertTrue(postUpgradeIsCalled.get()); + } + + public void testIndexUpgradeInfoOnEmptyCluster() { + // On empty cluster asking for all indices shouldn't fail since no indices means nothing needs to be upgraded + Response response = client().prepareExecute(IndexUpgradeInfoAction.INSTANCE).setIndices("_all").get(); + assertThat(response.getActions().entrySet(), empty()); + + // but calling on a particular index should fail + assertThrows(client().prepareExecute(IndexUpgradeInfoAction.INSTANCE).setIndices("test"), IndexNotFoundException.class); + } +} diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIntegTestCase.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIntegTestCase.java new file mode 100644 index 0000000000000..4f99960f23f8e --- /dev/null +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIntegTestCase.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade; + +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.reindex.ReindexPlugin; +import org.elasticsearch.license.AbstractLicensesIntegrationTestCase; +import org.elasticsearch.license.License; +import org.elasticsearch.license.TestUtils; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.monitoring.test.MockPainlessScriptEngine; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; + +public abstract class IndexUpgradeIntegTestCase extends AbstractLicensesIntegrationTestCase { + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, Upgrade.class, ReindexPlugin.class, + MockPainlessScriptEngine.TestPlugin.class, CommonAnalysisPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return Arrays.asList(XPackClientPlugin.class, ReindexPlugin.class); + } + private static String randomValidLicenseType() { + return randomFrom("trial", "platinum", "gold", "standard", "basic"); + } + + private static String randomInvalidLicenseType() { + return "missing"; + } + + public void disableLicensing() throws Exception { + updateLicensing(randomInvalidLicenseType()); + } + + public void enableLicensing() throws Exception { + updateLicensing(randomValidLicenseType()); + } + + public void updateLicensing(String licenseType) throws Exception { + wipeAllLicenses(); + if (licenseType.equals("missing")) { + putLicenseTombstone(); + } else { + License license = TestUtils.generateSignedLicense(licenseType, TimeValue.timeValueMinutes(1)); + putLicense(license); + } + } +} diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java new file mode 100644 index 0000000000000..63b1c602bf9c2 --- /dev/null +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeServiceTests.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade; + + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.upgrade.UpgradeActionRequired; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.function.Function; + +import static org.hamcrest.core.IsEqual.equalTo; + +public class IndexUpgradeServiceTests extends ESTestCase { + + private IndexUpgradeCheck upgradeBarCheck = new IndexUpgradeCheck("upgrade_bar", Settings.EMPTY, + (Function) indexMetaData -> { + if ("bar".equals(indexMetaData.getSettings().get("test.setting"))) { + return UpgradeActionRequired.UPGRADE; + } else { + return UpgradeActionRequired.NOT_APPLICABLE; + } + }, null, null, null, null); + + private IndexUpgradeCheck reindexFooCheck = new IndexUpgradeCheck("reindex_foo", Settings.EMPTY, + (Function) indexMetaData -> { + if ("foo".equals(indexMetaData.getSettings().get("test.setting"))) { + return UpgradeActionRequired.REINDEX; + } else { + return UpgradeActionRequired.NOT_APPLICABLE; + } + }, null, null, null, null); + + private IndexUpgradeCheck everythingIsFineCheck = new IndexUpgradeCheck("everything_is_fine", Settings.EMPTY, + indexMetaData -> UpgradeActionRequired.UP_TO_DATE, null, null, null, null); + + private IndexUpgradeCheck unreachableCheck = new IndexUpgradeCheck("unreachable", Settings.EMPTY, + (Function) indexMetaData -> { + fail("Unreachable check is called"); + return null; + }, null, null, null, null); + + public void testIndexUpgradeServiceMultipleCheck() throws Exception { + IndexUpgradeService service; + if (randomBoolean()) { + service = new IndexUpgradeService(Settings.EMPTY, Arrays.asList( + upgradeBarCheck, + reindexFooCheck, + everythingIsFineCheck, + unreachableCheck // This one should never be called + )); + } else { + service = new IndexUpgradeService(Settings.EMPTY, Arrays.asList( + reindexFooCheck, + upgradeBarCheck, + everythingIsFineCheck, + unreachableCheck // This one should never be called + )); + } + + IndexMetaData fooIndex = newTestIndexMeta("bar", Settings.builder().put("test.setting", "bar").build()); + IndexMetaData barIndex = newTestIndexMeta("foo", Settings.builder().put("test.setting", "foo").build()); + IndexMetaData bazIndex = newTestIndexMeta("baz", Settings.EMPTY); + + ClusterState clusterState = mockClusterState(fooIndex, barIndex, bazIndex); + + Map result = service.upgradeInfo(new String[]{"bar", "foo", "baz"}, + IndicesOptions.lenientExpandOpen(), clusterState); + + assertThat(result.size(), equalTo(2)); + assertThat(result.get("bar"), equalTo(UpgradeActionRequired.UPGRADE)); + assertThat(result.get("foo"), equalTo(UpgradeActionRequired.REINDEX)); + + result = service.upgradeInfo(new String[]{"b*"}, IndicesOptions.lenientExpandOpen(), clusterState); + + assertThat(result.size(), equalTo(1)); + assertThat(result.get("bar"), equalTo(UpgradeActionRequired.UPGRADE)); + } + + + public void testNoMatchingChecks() throws Exception { + IndexUpgradeService service = new IndexUpgradeService(Settings.EMPTY, Arrays.asList( + upgradeBarCheck, + reindexFooCheck + )); + + IndexMetaData fooIndex = newTestIndexMeta("bar", Settings.builder().put("test.setting", "bar").build()); + IndexMetaData barIndex = newTestIndexMeta("foo", Settings.builder().put("test.setting", "foo").build()); + IndexMetaData bazIndex = newTestIndexMeta("baz", Settings.EMPTY); + + ClusterState clusterState = mockClusterState(fooIndex, barIndex, bazIndex); + + Map result = service.upgradeInfo(new String[]{"bar", "foo", "baz"}, + IndicesOptions.lenientExpandOpen(), clusterState); + + assertThat(result.size(), equalTo(2)); + assertThat(result.get("bar"), equalTo(UpgradeActionRequired.UPGRADE)); + assertThat(result.get("foo"), equalTo(UpgradeActionRequired.REINDEX)); + } + + public void testEarlierChecksWin() throws Exception { + IndexUpgradeService service = new IndexUpgradeService(Settings.EMPTY, Arrays.asList( + everythingIsFineCheck, + upgradeBarCheck, + reindexFooCheck + )); + + IndexMetaData fooIndex = newTestIndexMeta("bar", Settings.builder().put("test.setting", "bar").build()); + IndexMetaData barIndex = newTestIndexMeta("foo", Settings.builder().put("test.setting", "foo").build()); + IndexMetaData bazIndex = newTestIndexMeta("baz", Settings.EMPTY); + + ClusterState clusterState = mockClusterState(fooIndex, barIndex, bazIndex); + + Map result = service.upgradeInfo(new String[]{"bar", "foo", "baz"}, + IndicesOptions.lenientExpandOpen(), clusterState); + + assertThat(result.size(), equalTo(0)); // everything as the first checker should indicate that everything is fine + } + + public void testGenericTest() throws Exception { + IndexUpgradeService service = new IndexUpgradeService(Settings.EMPTY, Arrays.asList( + upgradeBarCheck, + reindexFooCheck + )); + + IndexMetaData goodIndex = newTestIndexMeta("good", Settings.EMPTY); + IndexMetaData badIndex = newTestIndexMeta("bad", + Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("2.0.0")).build()); + + ClusterState clusterState = mockClusterState(goodIndex, badIndex); + + Map result = service.upgradeInfo(new String[]{"good", "bad"}, + IndicesOptions.lenientExpandOpen(), clusterState); + + assertThat(result.size(), equalTo(1)); + assertThat(result.get("bad"), equalTo(UpgradeActionRequired.REINDEX)); + + } + + + private ClusterState mockClusterState(IndexMetaData... indices) { + MetaData.Builder metaDataBuilder = MetaData.builder(); + for (IndexMetaData indexMetaData : indices) { + metaDataBuilder.put(indexMetaData, false); + } + return ClusterState.builder(ClusterName.DEFAULT).metaData(metaDataBuilder).build(); + } + + public static IndexMetaData newTestIndexMeta(String name, String alias, Settings indexSettings) throws IOException { + Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_CREATION_DATE, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_5_0_0_beta1) + .put(indexSettings) + .build(); + IndexMetaData.Builder builder = IndexMetaData.builder(name).settings(build); + if (alias != null) { + // Create alias + builder.putAlias(AliasMetaData.newAliasMetaDataBuilder(alias).build()); + } + return builder.build(); + } + + public static IndexMetaData newTestIndexMeta(String name, Settings indexSettings) throws IOException { + return newTestIndexMeta(name, null, indexSettings); + } +} diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeTasksIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeTasksIT.java new file mode 100644 index 0000000000000..c3f371a74b71d --- /dev/null +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeTasksIT.java @@ -0,0 +1,202 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.ReindexAction; +import org.elasticsearch.index.reindex.ReindexPlugin; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.upgrade.UpgradeActionRequired; +import org.elasticsearch.xpack.core.upgrade.UpgradeField; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +@ESIntegTestCase.ClusterScope(scope = TEST, supportsDedicatedMasters = false, numClientNodes = 0, maxNumDataNodes = 1) +public class IndexUpgradeTasksIT extends ESIntegTestCase { + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockUpgradePlugin.class, ReindexPlugin.class); + } + + public static class MockUpgradePlugin extends Plugin implements ScriptPlugin, ActionPlugin { + + public static final String NAME = MockScriptEngine.NAME; + + private Settings settings; + private Upgrade upgrade; + + private CountDownLatch upgradeLatch = new CountDownLatch(1); + private CountDownLatch upgradeCalledLatch = new CountDownLatch(1); + + @Override + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { + return new MockScriptEngine(pluginScriptLang(), pluginScripts()); + } + + public String pluginScriptLang() { + return NAME; + } + + public MockUpgradePlugin(Settings settings) { + this.settings = settings; + this.upgrade = new Upgrade(settings); + Loggers.getLogger(IndexUpgradeTasksIT.class).info("MockUpgradePlugin is created"); + } + + + protected Map, Object>> pluginScripts() { + Map, Object>> scripts = new HashMap<>(); + scripts.put("block", map -> { + upgradeCalledLatch.countDown(); + try { + assertThat(upgradeLatch.await(10, TimeUnit.SECONDS), equalTo(true)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + return null; + }); + return scripts; + } + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, + NamedXContentRegistry xContentRegistry, Environment environment, + NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + return Arrays.asList(new IndexUpgradeService(settings, Collections.singletonList( + new IndexUpgradeCheck("test", settings, + new Function() { + @Override + public UpgradeActionRequired apply(IndexMetaData indexMetaData) { + if ("test".equals(indexMetaData.getIndex().getName())) { + if (UpgradeField.checkInternalIndexFormat(indexMetaData)) { + return UpgradeActionRequired.UP_TO_DATE; + } else { + return UpgradeActionRequired.UPGRADE; + } + } else { + return UpgradeActionRequired.NOT_APPLICABLE; + } + } + }, + client, clusterService, Strings.EMPTY_ARRAY, + new Script(ScriptType.INLINE, NAME, "block", Collections.emptyMap())) + )), new XPackLicenseState(settings)); + } + + @Override + public List> getActions() { + return upgrade.getActions(); + } + + @Override + public Collection getRestHeaders() { + return upgrade.getRestHeaders(); + } + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + public void testParentTasksDuringUpgrade() throws Exception { + logger.info("before getInstance"); + PluginsService pluginsService = internalCluster().getDataNodeInstance(PluginsService.class); + MockUpgradePlugin mockUpgradePlugin = pluginsService.filterPlugins(MockUpgradePlugin.class).get(0); + assertThat(mockUpgradePlugin, notNullValue()); + logger.info("after getInstance"); + + assertAcked(client().admin().indices().prepareCreate("test").get()); + client().prepareIndex("test", "doc", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + ensureYellow("test"); + + + IndexUpgradeInfoAction.Response infoResponse = client().prepareExecute(IndexUpgradeInfoAction.INSTANCE).setIndices("test").get(); + assertThat(infoResponse.getActions().keySet(), contains("test")); + assertThat(infoResponse.getActions().get("test"), equalTo(UpgradeActionRequired.UPGRADE)); + + + ActionFuture upgradeResponse = + client().prepareExecute(IndexUpgradeAction.INSTANCE).setIndex("test").execute(); + + + assertThat(mockUpgradePlugin.upgradeCalledLatch.await(10, TimeUnit.SECONDS), equalTo(true)); + ListTasksResponse response = client().admin().cluster().prepareListTasks().get(); + mockUpgradePlugin.upgradeLatch.countDown(); + + // Find the upgrade task group + TaskGroup upgradeGroup = null; + for (TaskGroup group : response.getTaskGroups()) { + if (IndexUpgradeAction.NAME.equals(group.getTaskInfo().getAction())) { + assertThat(upgradeGroup, nullValue()); + upgradeGroup = group; + } + } + assertThat(upgradeGroup, notNullValue()); + assertThat(upgradeGroup.getTaskInfo().isCancellable(), equalTo(true)); // The task should be cancellable + assertThat(upgradeGroup.getChildTasks(), hasSize(1)); // The reindex task should be a child + assertThat(upgradeGroup.getChildTasks().get(0).getTaskInfo().getAction(), equalTo(ReindexAction.NAME)); + + assertThat(upgradeResponse.get().getCreated(), equalTo(1L)); + } +} diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java new file mode 100644 index 0000000000000..be5251ad577d2 --- /dev/null +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/InternalIndexReindexerIT.java @@ -0,0 +1,215 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.ReindexPlugin; +import org.elasticsearch.indices.InvalidIndexNameException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import static org.elasticsearch.test.VersionUtils.randomVersionBetween; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; +import static org.hamcrest.core.IsEqual.equalTo; + +public class InternalIndexReindexerIT extends IndexUpgradeIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, + ReindexPlugin.class, CustomScriptPlugin.class, CommonAnalysisPlugin.class); + } + + public static class CustomScriptPlugin extends MockScriptPlugin { + @Override + protected Map, Object>> pluginScripts() { + Map, Object>> scripts = new HashMap<>(); + scripts.put("add_bar", map -> { + @SuppressWarnings("unchecked") Map ctx = (Map) map.get("ctx"); + ctx.put("_id", "bar" + "-" + ctx.get("_id")); + @SuppressWarnings("unchecked") Map source = (Map) ctx.get("_source"); + source.put("bar", true); + return null; + }); + scripts.put("fail", map -> { + throw new RuntimeException("Stop reindexing"); + }); + return scripts; + } + } + + public void testUpgradeIndex() throws Exception { + createTestIndex("test"); + InternalIndexReindexer reindexer = createIndexReindexer(123, script("add_bar"), Strings.EMPTY_ARRAY); + PlainActionFuture future = PlainActionFuture.newFuture(); + reindexer.upgrade(new TaskId("abc", 123), "test", clusterState(), future); + BulkByScrollResponse response = future.actionGet(); + assertThat(response.getCreated(), equalTo(2L)); + + SearchResponse searchResponse = client().prepareSearch("test-123").get(); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(2)); + for (SearchHit hit : searchResponse.getHits().getHits()) { + assertThat(hit.getId(), startsWith("bar-")); + assertThat(hit.getSourceAsMap(), notNullValue()); + assertThat(hit.getSourceAsMap().get("bar"), equalTo(true)); + } + + GetAliasesResponse aliasesResponse = client().admin().indices().prepareGetAliases("test").get(); + assertThat(aliasesResponse.getAliases().size(), equalTo(1)); + List testAlias = aliasesResponse.getAliases().get("test-123"); + assertNotNull(testAlias); + assertThat(testAlias.size(), equalTo(1)); + assertThat(testAlias.get(0).alias(), equalTo("test")); + } + + public void testTargetIndexExists() throws Exception { + createTestIndex("test"); + createTestIndex("test-123"); + InternalIndexReindexer reindexer = createIndexReindexer(123, script("add_bar"), Strings.EMPTY_ARRAY); + PlainActionFuture future = PlainActionFuture.newFuture(); + reindexer.upgrade(new TaskId("abc", 123), "test", clusterState(), future); + assertThrows(future, ResourceAlreadyExistsException.class); + + // Make sure that the index is not marked as read-only + client().prepareIndex("test", "doc").setSource("foo", "bar").get(); + } + + public void testTargetIndexExistsAsAlias() throws Exception { + createTestIndex("test"); + createTestIndex("test-foo"); + client().admin().indices().prepareAliases().addAlias("test-foo", "test-123").get(); + InternalIndexReindexer reindexer = createIndexReindexer(123, script("add_bar"), Strings.EMPTY_ARRAY); + PlainActionFuture future = PlainActionFuture.newFuture(); + reindexer.upgrade(new TaskId("abc", 123), "test", clusterState(), future); + assertThrows(future, InvalidIndexNameException.class); + + // Make sure that the index is not marked as read-only + client().prepareIndex("test-123", "doc").setSource("foo", "bar").get(); + } + + public void testSourceIndexIsReadonly() throws Exception { + createTestIndex("test"); + try { + Settings settings = Settings.builder().put(IndexMetaData.INDEX_READ_ONLY_SETTING.getKey(), true).build(); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get()); + InternalIndexReindexer reindexer = createIndexReindexer(123, script("add_bar"), Strings.EMPTY_ARRAY); + PlainActionFuture future = PlainActionFuture.newFuture(); + reindexer.upgrade(new TaskId("abc", 123), "test", clusterState(), future); + assertThrows(future, IllegalStateException.class); + + // Make sure that the index is still marked as read-only + assertThrows(client().prepareIndex("test", "doc").setSource("foo", "bar"), ClusterBlockException.class); + } finally { + // Clean up the readonly index + Settings settings = Settings.builder().put(IndexMetaData.INDEX_READ_ONLY_SETTING.getKey(), false).build(); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get()); + } + } + + + public void testReindexingFailure() throws Exception { + createTestIndex("test"); + // Make sure that the index is not marked as read-only + client().prepareIndex("test", "doc").setSource("foo", "bar").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + InternalIndexReindexer reindexer = createIndexReindexer(123, script("fail"), Strings.EMPTY_ARRAY); + PlainActionFuture future = PlainActionFuture.newFuture(); + reindexer.upgrade(new TaskId("abc", 123), "test", clusterState(), future); + assertThrows(future, RuntimeException.class); + + // Make sure that the index is not marked as read-only + client().prepareIndex("test", "doc").setSource("foo", "bar").get(); + } + + public void testMixedNodeVersion() throws Exception { + createTestIndex("test"); + + InternalIndexReindexer reindexer = createIndexReindexer(123, script("add_bar"), Strings.EMPTY_ARRAY); + PlainActionFuture future = PlainActionFuture.newFuture(); + reindexer.upgrade(new TaskId("abc", 123), "test", withRandomOldNode(), future); + assertThrows(future, IllegalStateException.class); + + // Make sure that the index is not marked as read-only + client().prepareIndex("test_v123", "doc").setSource("foo", "bar").get(); + } + + private void createTestIndex(String indexName) throws Exception { + assertAcked(client().admin().indices().prepareCreate(indexName).get()); + indexRandom(true, + client().prepareIndex(indexName, "doc", "1").setSource("{\"foo\":\"bar1-1\"}", XContentType.JSON), + client().prepareIndex(indexName, "doc", "2").setSource("{\"foo\":\"baz1-1\"}", XContentType.JSON) + ); + ensureYellow(indexName); + } + + private Script script(String name) { + return new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, name, new HashMap<>()); + } + + private InternalIndexReindexer createIndexReindexer(int version, Script transformScript, String[] types) { + return new InternalIndexReindexer(client(), internalCluster().clusterService(internalCluster().getMasterName()), + version, transformScript, types, voidActionListener -> voidActionListener.onResponse(null), + (aVoid, listener) -> listener.onResponse(TransportResponse.Empty.INSTANCE)); + + } + + private ClusterState clusterState() { + return clusterService().state(); + } + + private ClusterState withRandomOldNode() { + ClusterState clusterState = clusterState(); + DiscoveryNodes discoveryNodes = clusterState.nodes(); + List nodes = new ArrayList<>(); + for (ObjectCursor key : discoveryNodes.getMasterAndDataNodes().keys()) { + nodes.add(key.value); + } + // Fake one of the node versions + String nodeId = randomFrom(nodes); + DiscoveryNode node = discoveryNodes.get(nodeId); + DiscoveryNode newNode = new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), + node.getHostAddress(), node.getAddress(), node.getAttributes(), node.getRoles(), + randomVersionBetween(random(), Version.V_5_0_0, Version.V_5_4_0)); + + return ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(discoveryNodes).remove(node).add(newNode)).build(); + + } +} \ No newline at end of file diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/actions/IndexUpgradeActionRequestTests.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/actions/IndexUpgradeActionRequestTests.java new file mode 100644 index 0000000000000..acc2d07df7284 --- /dev/null +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/actions/IndexUpgradeActionRequestTests.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade.actions; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction.Request; + +public class IndexUpgradeActionRequestTests extends AbstractWireSerializingTestCase { + @Override + protected Request createTestInstance() { + return new Request(randomAlphaOfLength(10)); + } + + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } +} diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/actions/IndexUpgradeInfoActionRequestTests.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/actions/IndexUpgradeInfoActionRequestTests.java new file mode 100644 index 0000000000000..38072755bc384 --- /dev/null +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/actions/IndexUpgradeInfoActionRequestTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade.actions; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction.Request; + +public class IndexUpgradeInfoActionRequestTests extends AbstractWireSerializingTestCase { + @Override + protected Request createTestInstance() { + int indexCount = randomInt(4); + String[] indices = new String[indexCount]; + for (int i = 0; i < indexCount; i++) { + indices[i] = randomAlphaOfLength(10); + } + Request request = new Request(indices); + if (randomBoolean()) { + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + } + return request; + } + + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } +} diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/actions/IndexUpgradeInfoActionResponseTests.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/actions/IndexUpgradeInfoActionResponseTests.java new file mode 100644 index 0000000000000..6893e45dd2e4f --- /dev/null +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/actions/IndexUpgradeInfoActionResponseTests.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.upgrade.actions; + +import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.xpack.core.upgrade.UpgradeActionRequired; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction.Response; + +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; + +public class IndexUpgradeInfoActionResponseTests extends AbstractStreamableTestCase { + + + @Override + protected Response createTestInstance() { + int actionsCount = randomIntBetween(0, 5); + Map actions = new HashMap<>(actionsCount); + for (int i = 0; i < actionsCount; i++) { + actions.put(randomAlphaOfLength(10), randomFrom(EnumSet.allOf(UpgradeActionRequired.class))); + } + return new Response(actions); + } + + @Override + protected Response createBlankInstance() { + return new Response(); + } +} diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle new file mode 100644 index 0000000000000..2b7b73d37962e --- /dev/null +++ b/x-pack/plugin/watcher/build.gradle @@ -0,0 +1,116 @@ +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' +esplugin { + name 'x-pack-watcher' + description 'Elasticsearch Expanded Pack Plugin - Watcher' + classname 'org.elasticsearch.xpack.watcher.Watcher' + hasNativeController false + requiresKeystore false + extendedPlugins = ['x-pack-core'] +} + +archivesBaseName = 'x-pack-watcher' + +ext.compactProfile = 'full' + +compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" +compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" + +dependencyLicenses { + mapping from: /owasp-java-html-sanitizer.*/, to: 'owasp-java-html-sanitizer' + ignoreSha 'x-pack-core' +} + +dependencies { + compileOnly "org.elasticsearch:elasticsearch:${version}" + + compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" + compileOnly project(path: ':modules:transport-netty4', configuration: 'runtime') + compileOnly project(path: ':plugins:transport-nio', configuration: 'runtime') + + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + + // watcher deps + compile 'com.googlecode.owasp-java-html-sanitizer:owasp-java-html-sanitizer:r239' + compile 'com.google.guava:guava:16.0.1' // needed by watcher for the html sanitizer and security tests for jimfs + compile 'com.sun.mail:javax.mail:1.5.6' + // HACK: java 9 removed javax.activation from the default modules, so instead of trying to add modules, which would have + // to be conditionalized for java 8/9, we pull in the classes directly + compile 'javax.activation:activation:1.1.1' + compileOnly "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compileOnly "org.apache.httpcomponents:httpcore:${versions.httpcore}" + + testCompile 'org.subethamail:subethasmtp:3.1.7' + // needed for subethasmtp, has @GuardedBy annotation + testCompile 'com.google.code.findbugs:jsr305:3.0.1' +} + +// classes are missing, e.g. com.ibm.icu.lang.UCharacter +thirdPartyAudit.excludes = [ + // uses internal java api: sun.misc.Unsafe + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + + // pulled in as external dependency to work on java 9 + 'com.sun.activation.registries.LineTokenizer', + 'com.sun.activation.registries.LogSupport', + 'com.sun.activation.registries.MailcapFile', + 'com.sun.activation.registries.MailcapTokenizer', + 'com.sun.activation.registries.MimeTypeEntry', + 'com.sun.activation.registries.MimeTypeFile', + 'javax.activation.MailcapCommandMap', + 'javax.activation.MimetypesFileTypeMap', +] + +// pulled in as external dependency to work on java 9 +if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { + thirdPartyAudit.excludes += [ + 'com.sun.activation.registries.MailcapParseException', + 'javax.activation.ActivationDataFlavor', + 'javax.activation.CommandInfo', + 'javax.activation.CommandMap', + 'javax.activation.CommandObject', + 'javax.activation.DataContentHandler', + 'javax.activation.DataContentHandlerFactory', + 'javax.activation.DataHandler$1', + 'javax.activation.DataHandler', + 'javax.activation.DataHandlerDataSource', + 'javax.activation.DataSource', + 'javax.activation.DataSourceDataContentHandler', + 'javax.activation.FileDataSource', + 'javax.activation.FileTypeMap', + 'javax.activation.MimeType', + 'javax.activation.MimeTypeParameterList', + 'javax.activation.MimeTypeParseException', + 'javax.activation.ObjectDataContentHandler', + 'javax.activation.SecuritySupport$1', + 'javax.activation.SecuritySupport$2', + 'javax.activation.SecuritySupport$3', + 'javax.activation.SecuritySupport$4', + 'javax.activation.SecuritySupport$5', + 'javax.activation.SecuritySupport', + 'javax.activation.URLDataSource', + 'javax.activation.UnsupportedDataTypeException' + ] +} + +run { + plugin xpackModule('core') +} + +test { + /* + * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each + * other if we allow them to set the number of available processors as it's set-once in Netty. + */ + systemProperty 'es.set.netty.runtime.available.processors', 'false' +} + +// xpack modules are installed in real clusters as the meta plugin, so +// installing them as individual plugins for integ tests doesn't make sense, +// so we disable integ tests +integTest.enabled = false diff --git a/x-pack/plugin/watcher/licenses/activation-1.1.1.jar.sha1 b/x-pack/plugin/watcher/licenses/activation-1.1.1.jar.sha1 new file mode 100644 index 0000000000000..3bba66230e85f --- /dev/null +++ b/x-pack/plugin/watcher/licenses/activation-1.1.1.jar.sha1 @@ -0,0 +1 @@ +485de3a253e23f645037828c07f1d7f1af40763a \ No newline at end of file diff --git a/x-pack/plugin/watcher/licenses/activation-LICENSE.txt b/x-pack/plugin/watcher/licenses/activation-LICENSE.txt new file mode 100644 index 0000000000000..5f3844e85cb35 --- /dev/null +++ b/x-pack/plugin/watcher/licenses/activation-LICENSE.txt @@ -0,0 +1,119 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 + +1. Definitions. + +1.1. Contributor means each individual or entity that creates or contributes to the creation of Modifications. + +1.2. Contributor Version means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. + +1.3. Covered Software means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. + +1.4. Executable means the Covered Software in any form other than Source Code. + +1.5. Initial Developer means the individual or entity that first makes Original Software available under this License. + +1.6. Larger Work means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. + +1.7. License means this document. + +1.8. Licensable means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. + +1.9. Modifications means the Source Code and Executable form of any of the following: + +A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; + +B. Any new file that contains any part of the Original Software or previous Modification; or + +C. Any new file that is contributed or otherwise made available under the terms of this License. + +1.10. Original Software means the Source Code and Executable form of computer software code that is originally released under this License. + +1.11. Patent Claims means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. + +1.12. Source Code means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. + +1.13. You (or Your) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, You includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, control means (a)�the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b)�ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. + +2. License Grants. + +2.1. The Initial Developer Grant. +Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: +(a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and +(b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). +(c) The licenses granted in Sections�2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. +(d) Notwithstanding Section�2.1(b) above, no patent license is granted: (1)�for code that You delete from the Original Software, or (2)�for infringements caused by: (i)�the modification of the Original Software, or (ii)�the combination of the Original Software with other software or devices. + +2.2. Contributor Grant. +Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: +(a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and +(b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1)�Modifications made by that Contributor (or portions thereof); and (2)�the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). +(c) The licenses granted in Sections�2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. +(d) Notwithstanding Section�2.2(b) above, no patent license is granted: (1)�for any code that Contributor has deleted from the Contributor Version; (2)�for infringements caused by: (i)�third party modifications of Contributor Version, or (ii)�the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3)�under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. + +3. Distribution Obligations. + +3.1. Availability of Source Code. + +Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. + +3.2. Modifications. + +The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. + +3.3. Required Notices. +You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. + +3.4. Application of Additional Terms. +You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. + +3.5. Distribution of Executable Versions. +You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipients rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. + +3.6. Larger Works. +You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. + +4. Versions of the License. + +4.1. New Versions. +Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. + +4.2. Effect of New Versions. + +You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. +4.3. Modified Versions. + +When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a)�rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b)�otherwise make it clear that the license contains terms which differ from this License. + +5. DISCLAIMER OF WARRANTY. + +COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN AS IS BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + +6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. + +6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as Participant) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections�2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. + +6.3. In the event of termination under Sections�6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + +UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTYS NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + +The Covered Software is a commercial item, as that term is defined in 48�C.F.R.�2.101 (Oct. 1995), consisting of commercial computer software (as that term is defined at 48 C.F.R. �252.227-7014(a)(1)) and commercial computer software documentation as such terms are used in 48�C.F.R.�12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. + +9. MISCELLANEOUS. + +This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdictions conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + +As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. + +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) +The GlassFish code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California. + + + diff --git a/x-pack/plugin/watcher/licenses/activation-NOTICE.txt b/x-pack/plugin/watcher/licenses/activation-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/watcher/licenses/guava-16.0.1.jar.sha1 b/x-pack/plugin/watcher/licenses/guava-16.0.1.jar.sha1 new file mode 100644 index 0000000000000..68f2b233a000d --- /dev/null +++ b/x-pack/plugin/watcher/licenses/guava-16.0.1.jar.sha1 @@ -0,0 +1 @@ +5fa98cd1a63c99a44dd8d3b77e4762b066a5d0c5 \ No newline at end of file diff --git a/x-pack/plugin/watcher/licenses/guava-LICENSE.txt b/x-pack/plugin/watcher/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/x-pack/plugin/watcher/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/x-pack/plugin/watcher/licenses/guava-NOTICE.txt b/x-pack/plugin/watcher/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/watcher/licenses/javax.mail-1.5.6.jar.sha1 b/x-pack/plugin/watcher/licenses/javax.mail-1.5.6.jar.sha1 new file mode 100644 index 0000000000000..c9d823f6a5300 --- /dev/null +++ b/x-pack/plugin/watcher/licenses/javax.mail-1.5.6.jar.sha1 @@ -0,0 +1 @@ +ab5daef2f881c42c8e280cbe918ec4d7fdfd7efe \ No newline at end of file diff --git a/x-pack/plugin/watcher/licenses/javax.mail-LICENSE.txt b/x-pack/plugin/watcher/licenses/javax.mail-LICENSE.txt new file mode 100644 index 0000000000000..5ad62c442b336 --- /dev/null +++ b/x-pack/plugin/watcher/licenses/javax.mail-LICENSE.txt @@ -0,0 +1,759 @@ +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates or + contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Software, prior Modifications used by a Contributor (if any), and + the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) + Modifications, or (c) the combination of files containing Original + Software with files containing Modifications, in each case including + portions thereof. + + 1.4. "Executable" means the Covered Software in any form other than + Source Code. + + 1.5. "Initial Developer" means the individual or entity that first + makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or + portions thereof with code not governed by the terms of this License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed herein. + + 1.9. "Modifications" means the Source Code and Executable form of + any of the following: + + A. Any file that results from an addition to, deletion from or + modification of the contents of a file containing Original Software + or previous Modifications; + + B. Any new file that contains any part of the Original Software or + previous Modification; or + + C. Any new file that is contributed or otherwise made available + under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable form + of computer software code that is originally released under this + License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, process, + and apparatus claims, in any patent Licensable by grantor. + + 1.12. "Source Code" means (a) the common form of computer software + code in which modifications are made and (b) associated + documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms of, + this License. For legal entities, "You" includes any entity which + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, the Initial Developer + hereby grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer, to use, reproduce, + modify, display, perform, sublicense and distribute the Original + Software (or portions thereof), with or without Modifications, + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using or selling of + Original Software, to make, have made, use, practice, sell, and + offer for sale, and/or otherwise dispose of the Original Software + (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are effective on + the date Initial Developer first distributes or otherwise makes the + Original Software available to a third party under the terms of this + License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: (1) for code that You delete from the Original Software, or + (2) for infringements caused by: (i) the modification of the + Original Software, or (ii) the combination of the Original Software + with other software or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and subject + to third party intellectual property claims, each Contributor hereby + grants You a world-wide, royalty-free, non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor to use, reproduce, modify, + display, perform, sublicense and distribute the Modifications + created by such Contributor (or portions thereof), either on an + unmodified basis, with other Modifications, as Covered Software + and/or as part of a Larger Work; and + + (b) under Patent Claims infringed by the making, using, or selling + of Modifications made by that Contributor either alone and/or in + combination with its Contributor Version (or portions of such + combination), to make, use, sell, offer for sale, have made, and/or + otherwise dispose of: (1) Modifications made by that Contributor (or + portions thereof); and (2) the combination of Modifications made by + that Contributor with its Contributor Version (or portions of such + combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective + on the date Contributor first distributes or otherwise makes the + Modifications available to a third party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: (1) for any code that Contributor has deleted from the + Contributor Version; (2) for infringements caused by: (i) third + party modifications of Contributor Version, or (ii) the combination + of Modifications made by that Contributor with other software + (except as part of the Contributor Version) or other devices; or (3) + under Patent Claims infringed by Covered Software in the absence of + Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make available + in Executable form must also be made available in Source Code form + and that Source Code form must be distributed only under the terms + of this License. You must include a copy of this License with every + copy of the Source Code form of the Covered Software You distribute + or otherwise make available. You must inform recipients of any such + Covered Software in Executable form as to how they can obtain such + Covered Software in Source Code form in a reasonable manner on or + through a medium customarily used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are + governed by the terms of this License. You represent that You + believe Your Modifications are Your original creation(s) and/or You + have sufficient rights to grant the rights conveyed by this License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that + identifies You as the Contributor of the Modification. You may not + remove or alter any copyright, patent or trademark notices contained + within the Covered Software, or any notices of licensing or any + descriptive text giving attribution to any Contributor or the + Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in + Source Code form that alters or restricts the applicable version of + this License or the recipients' rights hereunder. You may choose to + offer, and to charge a fee for, warranty, support, indemnity or + liability obligations to one or more recipients of Covered Software. + However, you may do so only on Your own behalf, and not on behalf of + the Initial Developer or any Contributor. You must make it + absolutely clear that any such warranty, support, indemnity or + liability obligation is offered by You alone, and You hereby agree + to indemnify the Initial Developer and every Contributor for any + liability incurred by the Initial Developer or such Contributor as a + result of warranty, support, indemnity or liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software under + the terms of this License or under the terms of a license of Your + choice, which may contain terms different from this License, + provided that You are in compliance with the terms of this License + and that the license for the Executable form does not attempt to + limit or alter the recipient's rights in the Source Code form from + the rights set forth in this License. If You distribute the Covered + Software in Executable form under a different license, You must make + it absolutely clear that any terms which differ from this License + are offered by You alone, not by the Initial Developer or + Contributor. You hereby agree to indemnify the Initial Developer and + every Contributor for any liability incurred by the Initial + Developer or such Contributor as a result of any such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with + other code not governed by the terms of this License and distribute + the Larger Work as a single product. In such a case, You must make + sure the requirements of this License are fulfilled for the Covered + Software. + +4. Versions of the License. + + 4.1. New Versions. + + Oracle is the initial license steward and may publish revised and/or + new versions of this License from time to time. Each version will be + given a distinguishing version number. Except as provided in Section + 4.3, no one other than the license steward has the right to modify + this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the + Covered Software available under the terms of the version of the + License under which You originally received the Covered Software. If + the Initial Developer includes a notice in the Original Software + prohibiting it from being distributed or otherwise made available + under any subsequent version of the License, You must distribute and + make the Covered Software available under the terms of the version + of the License under which You originally received the Covered + Software. Otherwise, You may also choose to use, distribute or + otherwise make the Covered Software available under the terms of any + subsequent version of the License published by the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new + license for Your Original Software, You may create and use a + modified version of this License if You: (a) rename the license and + remove any references to the name of the license steward (except to + note that the license differs from this License); and (b) otherwise + make it clear that the license contains terms which differ from this + License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE + IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR + NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF + THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE + DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY + OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, + REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN + ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS + AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to + cure such breach within 30 days of becoming aware of the breach. + Provisions which, by their nature, must remain in effect beyond the + termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or a + Contributor (the Initial Developer or Contributor against whom You + assert such claim is referred to as "Participant") alleging that the + Participant Software (meaning the Contributor Version where the + Participant is a Contributor or the Original Software where the + Participant is the Initial Developer) directly or indirectly + infringes any patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial Developer (if the + Initial Developer is not the Participant) and all Contributors under + Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice + from Participant terminate prospectively and automatically at the + expiration of such 60 day notice period, unless if within such 60 + day period You withdraw Your claim with respect to the Participant + Software against such Participant either unilaterally or pursuant to + a written agreement with Participant. + + 6.3. If You assert a patent infringement claim against Participant + alleging that the Participant Software directly or indirectly + infringes any patent where such claim is resolved (such as by + license or settlement) prior to the initiation of patent + infringement litigation, then the reasonable value of the licenses + granted by such Participant under Sections 2.1 or 2.2 shall be taken + into account in determining the amount or value of any payment or + license. + + 6.4. In the event of termination under Sections 6.1 or 6.2 above, + all end user licenses that have been validly granted by You or any + distributor hereunder prior to termination (excluding licenses + granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE + TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER + FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR + LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE + POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT + APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH + PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH + LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR + LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION + AND LIMITATION MAY NOT APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is defined + in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer + software" (as that term is defined at 48 C.F.R. � + 252.227-7014(a)(1)) and "commercial computer software documentation" + as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent + with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 + (June 1995), all U.S. Government End Users acquire Covered Software + with only those rights set forth herein. This U.S. Government Rights + clause is in lieu of, and supersedes, any other FAR, DFAR, or other + clause or provision that addresses Government rights in computer + software under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed by + the law of the jurisdiction specified in a notice contained within + the Original Software (except to the extent applicable law, if any, + provides otherwise), excluding such jurisdiction's conflict-of-law + provisions. Any litigation relating to this License shall be subject + to the jurisdiction of the courts located in the jurisdiction and + venue specified in a notice contained within the Original Software, + with the losing party responsible for costs, including, without + limitation, court costs and reasonable attorneys' fees and expenses. + The application of the United Nations Convention on Contracts for + the International Sale of Goods is expressly excluded. Any law or + regulation which provides that the language of a contract shall be + construed against the drafter shall not apply to this License. You + agree that You alone are responsible for compliance with the United + States export administration regulations (and the export control + laws and regulation of any other countries) when You use, distribute + or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or indirectly, + out of its utilization of rights under this License and You agree to + work with Initial Developer and Contributors to distribute such + responsibility on an equitable basis. Nothing herein is intended or + shall be deemed to constitute any admission of liability. + +------------------------------------------------------------------------ + +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION +LICENSE (CDDL) + +The code released under the CDDL shall be governed by the laws of the +State of California (excluding conflict-of-law provisions). Any +litigation relating to this License shall be subject to the jurisdiction +of the Federal Courts of the Northern District of California and the +state courts of the State of California, with venue lying in Santa Clara +County, California. + + + + The GNU General Public License (GPL) Version 2, June 1991 + +Copyright (C) 1989, 1991 Free Software Foundation, Inc. +51 Franklin Street, Fifth Floor +Boston, MA 02110-1335 +USA + +Everyone is permitted to copy and distribute verbatim copies +of this license document, but changing it is not allowed. + +Preamble + +The licenses for most software are designed to take away your freedom to +share and change it. By contrast, the GNU General Public License is +intended to guarantee your freedom to share and change free software--to +make sure the software is free for all its users. This General Public +License applies to most of the Free Software Foundation's software and +to any other program whose authors commit to using it. (Some other Free +Software Foundation software is covered by the GNU Library General +Public License instead.) You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. +Our General Public Licenses are designed to make sure that you have the +freedom to distribute copies of free software (and charge for this +service if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs; and that you know you can do these things. + +To protect your rights, we need to make restrictions that forbid anyone +to deny you these rights or to ask you to surrender the rights. These +restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis +or for a fee, you must give the recipients all the rights that you have. +You must make sure that they, too, receive or can get the source code. +And you must show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + +Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + +Finally, any free program is threatened constantly by software patents. +We wish to avoid the danger that redistributors of a free program will +individually obtain patent licenses, in effect making the program +proprietary. To prevent this, we have made it clear that any patent must +be licensed for everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and +modification follow. + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. This License applies to any program or other work which contains a +notice placed by the copyright holder saying it may be distributed under +the terms of this General Public License. The "Program", below, refers +to any such program or work, and a "work based on the Program" means +either the Program or any derivative work under copyright law: that is +to say, a work containing the Program or a portion of it, either +verbatim or with modifications and/or translated into another language. +(Hereinafter, translation is included without limitation in the term +"modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of running +the Program is not restricted, and the output from the Program is +covered only if its contents constitute a work based on the Program +(independent of having been made by running the Program). Whether that +is true depends on what the Program does. + +1. You may copy and distribute verbatim copies of the Program's source +code as you receive it, in any medium, provided that you conspicuously +and appropriately publish on each copy an appropriate copyright notice +and disclaimer of warranty; keep intact all the notices that refer to +this License and to the absence of any warranty; and give any other +recipients of the Program a copy of this License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Program or any portion of +it, thus forming a work based on the Program, and copy and distribute +such modifications or work under the terms of Section 1 above, provided +that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any part + thereof, to be licensed as a whole at no charge to all third parties + under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a notice + that there is no warranty (or else, saying that you provide a + warranty) and that users may redistribute the program under these + conditions, and telling the user how to view a copy of this License. + (Exception: if the Program itself is interactive but does not + normally print such an announcement, your work based on the Program + is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, and +can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based on +the Program, the distribution of the whole must be on the terms of this +License, whose permissions for other licensees extend to the entire +whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of a +storage or distribution medium does not bring the other work under the +scope of this License. + +3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections 1 + and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your cost + of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer to + distribute corresponding source code. (This alternative is allowed + only for noncommercial distribution and only if you received the + program in object code or executable form with such an offer, in + accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source code +means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to control +compilation and installation of the executable. However, as a special +exception, the source code distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies the +executable. + +If distribution of executable or object code is made by offering access +to copy from a designated place, then offering equivalent access to copy +the source code from the same place counts as distribution of the source +code, even though third parties are not compelled to copy the source +along with the object code. + +4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt otherwise +to copy, modify, sublicense or distribute the Program is void, and will +automatically terminate your rights under this License. However, parties +who have received copies, or rights, from you under this License will +not have their licenses terminated so long as such parties remain in +full compliance. + +5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and all +its terms and conditions for copying, distributing or modifying the +Program or works based on it. + +6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further restrictions +on the recipients' exercise of the rights granted herein. You are not +responsible for enforcing compliance by third parties to this License. + +7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot distribute +so as to satisfy simultaneously your obligations under this License and +any other pertinent obligations, then as a consequence you may not +distribute the Program at all. For example, if a patent license would +not permit royalty-free redistribution of the Program by all those who +receive copies directly or indirectly through you, then the only way you +could satisfy both it and this License would be to refrain entirely from +distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is implemented +by public license practices. Many people have made generous +contributions to the wide range of software distributed through that +system in reliance on consistent application of that system; it is up to +the author/donor to decide if he or she is willing to distribute +software through any other system and a licensee cannot impose that choice. + +This section is intended to make thoroughly clear what is believed to be +a consequence of the rest of this License. + +8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License may +add an explicit geographical distribution limitation excluding those +countries, so that distribution is permitted only in or among countries +not thus excluded. In such case, this License incorporates the +limitation as if written in the body of this License. + +9. The Free Software Foundation may publish revised and/or new +versions of the General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Program does not specify a version +number of this License, you may choose any version ever published by the +Free Software Foundation. + +10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the +author to ask for permission. For software which is copyrighted by the +Free Software Foundation, write to the Free Software Foundation; we +sometimes make exceptions for this. Our decision will be guided by the +two goals of preserving the free status of all derivatives of our free +software and of promoting the sharing and reuse of software generally. + +NO WARRANTY + +11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, +EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE +ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH +YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL +NECESSARY SERVICING, REPAIR OR CORRECTION. + +12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR +DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL +DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM +(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED +INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF +THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR +OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +END OF TERMS AND CONDITIONS + +How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to +attach them to the start of each source file to most effectively convey +the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + One line to give the program's name and a brief idea of what it does. + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type + `show w'. This is free software, and you are welcome to redistribute + it under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the +appropriate parts of the General Public License. Of course, the commands +you use may be called something other than `show w' and `show c'; they +could even be mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + program `Gnomovision' (which makes passes at compilers) written by + James Hacker. + + signature of Ty Coon, 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications +with the library. If this is what you want to do, use the GNU Library +General Public License instead of this License. + +# + +Certain source files distributed by Oracle America, Inc. and/or its +affiliates are subject to the following clarification and special +exception to the GPLv2, based on the GNU Project exception for its +Classpath libraries, known as the GNU Classpath Exception, but only +where Oracle has expressly included in the particular source file's +header the words "Oracle designates this particular file as subject to +the "Classpath" exception as provided by Oracle in the LICENSE file +that accompanied this code." + +You should also note that Oracle includes multiple, independent +programs in this software package. Some of those programs are provided +under licenses deemed incompatible with the GPLv2 by the Free Software +Foundation and others. For example, the package includes programs +licensed under the Apache License, Version 2.0. Such programs are +licensed to you under their original licenses. + +Oracle facilitates your further distribution of this package by adding +the Classpath Exception to the necessary parts of its GPLv2 code, which +permits you to use that code in combination with other independent +modules not licensed under the GPLv2. However, note that this would +not permit you to commingle code under an incompatible license with +Oracle's GPLv2 licensed code by, for example, cutting and pasting such +code into a file also containing Oracle's GPLv2 licensed code and then +distributing the result. Additionally, if you were to remove the +Classpath Exception from any of the files to which it applies and +distribute the result, you would likely be required to license some or +all of the other code in that distribution under the GPLv2 as well, and +since the GPLv2 is incompatible with the license terms of some items +included in the distribution by Oracle, removing the Classpath +Exception could therefore effectively compromise your ability to +further distribute the package. + +Proceed with caution and we recommend that you obtain the advice of a +lawyer skilled in open source matters before removing the Classpath +Exception or making modifications to this package which may +subsequently be redistributed and/or involve the use of third party +software. + +CLASSPATH EXCEPTION +Linking this library statically or dynamically with other modules is +making a combined work based on this library. Thus, the terms and +conditions of the GNU General Public License version 2 cover the whole +combination. + +As a special exception, the copyright holders of this library give you +permission to link this library with independent modules to produce an +executable, regardless of the license terms of these independent +modules, and to copy and distribute the resulting executable under +terms of your choice, provided that you also meet, for each linked +independent module, the terms and conditions of the license of that +module. An independent module is a module which is not derived from or +based on this library. If you modify this library, you may extend this +exception to your version of the library, but you are not obligated to +do so. If you do not wish to do so, delete this exception statement +from your version. diff --git a/x-pack/plugin/watcher/licenses/javax.mail-NOTICE.txt b/x-pack/plugin/watcher/licenses/javax.mail-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/watcher/licenses/owasp-java-html-sanitizer-LICENSE.txt b/x-pack/plugin/watcher/licenses/owasp-java-html-sanitizer-LICENSE.txt new file mode 100644 index 0000000000000..379d6e417da3a --- /dev/null +++ b/x-pack/plugin/watcher/licenses/owasp-java-html-sanitizer-LICENSE.txt @@ -0,0 +1,234 @@ +You may use under either the Apache License Version 2.0 or the BSD +3-Clause License. + +------------------------------------------------------------------ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------ + +Copyright (c) 2011, Mike Samuel +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/x-pack/plugin/watcher/licenses/owasp-java-html-sanitizer-NOTICE.txt b/x-pack/plugin/watcher/licenses/owasp-java-html-sanitizer-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/plugin/watcher/licenses/owasp-java-html-sanitizer-r239.jar.sha1 b/x-pack/plugin/watcher/licenses/owasp-java-html-sanitizer-r239.jar.sha1 new file mode 100644 index 0000000000000..71eaba394daf9 --- /dev/null +++ b/x-pack/plugin/watcher/licenses/owasp-java-html-sanitizer-r239.jar.sha1 @@ -0,0 +1 @@ +ea8dd89a9e8fcf90c1b666ac0585e7769224da5e \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval new file mode 100755 index 0000000000000..6de537660cbc4 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval @@ -0,0 +1,19 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +source "`dirname "$0"`"/elasticsearch-env + +source "`dirname "$0"`"/x-pack-watcher-env + +exec \ + "$JAVA" \ + $ES_JAVA_OPTS \ + -Des.path.home="$ES_HOME" \ + -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -cp "$ES_CLASSPATH" \ + org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool \ + "$@" diff --git a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat new file mode 100644 index 0000000000000..7fd983c9ba5fe --- /dev/null +++ b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat @@ -0,0 +1,25 @@ +@echo off + +rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +rem or more contributor license agreements. Licensed under the Elastic License; +rem you may not use this file except in compliance with the Elastic License. + +setlocal enabledelayedexpansion +setlocal enableextensions + +call "%~dp0elasticsearch-env.bat" || exit /b 1 + +call "%~dp0x-pack-watcher-env.bat" || exit /b 1 + +%JAVA% ^ + %ES_JAVA_OPTS% ^ + -Des.path.home="%ES_HOME%" ^ + -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ + -cp "%ES_CLASSPATH%" ^ + org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool ^ + %* + +endlocal +endlocal diff --git a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env new file mode 100644 index 0000000000000..4abe3d8c60761 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env @@ -0,0 +1,10 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +source "`dirname "$0"`"/x-pack-env + +# include x-pack-security jars in classpath +ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack/x-pack-watcher/*" diff --git a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat new file mode 100644 index 0000000000000..9e43ffaa0521f --- /dev/null +++ b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat @@ -0,0 +1,7 @@ +rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +rem or more contributor license agreements. Licensed under the Elastic License; +rem you may not use this file except in compliance with the Elastic License. + +call "%~dp0x-pack-env.bat" || exit /b 1 + +set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack/x-pack-watcher/* diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/EncryptSensitiveDataBootstrapCheck.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/EncryptSensitiveDataBootstrapCheck.java new file mode 100644 index 0000000000000..4c3316c7d7273 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/EncryptSensitiveDataBootstrapCheck.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.watcher.WatcherField; + +import java.nio.file.Files; +import java.nio.file.Path; + +final class EncryptSensitiveDataBootstrapCheck implements BootstrapCheck { + + private final Environment environment; + + EncryptSensitiveDataBootstrapCheck(Environment environment) { + this.environment = environment; + } + + @Override + public BootstrapCheckResult check(BootstrapContext context) { + if (Watcher.ENCRYPT_SENSITIVE_DATA_SETTING.get(context.settings) + && WatcherField.ENCRYPTION_KEY_SETTING.exists(context.settings) == false) { + final Path systemKeyPath = XPackPlugin.resolveConfigFile(environment, "system_key").toAbsolutePath(); + final String message; + if (Files.exists(systemKeyPath)) { + message = "Encryption of sensitive data requires the key to be placed in the secure setting store. Run " + + "'bin/elasticsearch-keystore add-file " + WatcherField.ENCRYPTION_KEY_SETTING.getKey() + " " + + systemKeyPath + + "' to import the file.\nAfter importing, the system_key file should be removed from the " + + "filesystem.\nRepeat this on every node in the cluster."; + } else { + message = "Encryption of sensitive data requires a key to be placed in the secure setting store. First run the " + + "bin/elasticsearch-syskeygen tool to generate a key file.\nThen run 'bin/elasticsearch-keystore add-file " + + WatcherField.ENCRYPTION_KEY_SETTING.getKey() + " " + + systemKeyPath + "' to import the key into" + + " the secure setting store. Finally, remove the system_key file from the filesystem.\n" + + "Repeat this on every node in the cluster"; + } + return BootstrapCheckResult.failure(message); + } else { + return BootstrapCheckResult.success(); + } + } + + @Override + public boolean alwaysEnforce() { + return true; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java new file mode 100644 index 0000000000000..57fcff7671518 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -0,0 +1,611 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.inject.util.Providers; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.SearchScript; +import org.elasticsearch.script.TemplateScript; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.threadpool.FixedExecutorBuilder; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.watcher.WatcherField; +import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; +import org.elasticsearch.xpack.core.watcher.actions.ActionRegistry; +import org.elasticsearch.xpack.core.watcher.condition.ConditionFactory; +import org.elasticsearch.xpack.core.watcher.condition.ConditionRegistry; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; +import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.input.none.NoneInput; +import org.elasticsearch.xpack.core.watcher.transform.TransformFactory; +import org.elasticsearch.xpack.core.watcher.transform.TransformRegistry; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.actions.email.EmailAction; +import org.elasticsearch.xpack.watcher.actions.email.EmailActionFactory; +import org.elasticsearch.xpack.watcher.actions.hipchat.HipChatAction; +import org.elasticsearch.xpack.watcher.actions.hipchat.HipChatActionFactory; +import org.elasticsearch.xpack.watcher.actions.index.IndexAction; +import org.elasticsearch.xpack.watcher.actions.index.IndexActionFactory; +import org.elasticsearch.xpack.watcher.actions.jira.JiraAction; +import org.elasticsearch.xpack.watcher.actions.jira.JiraActionFactory; +import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; +import org.elasticsearch.xpack.watcher.actions.logging.LoggingActionFactory; +import org.elasticsearch.xpack.watcher.actions.pagerduty.PagerDutyAction; +import org.elasticsearch.xpack.watcher.actions.pagerduty.PagerDutyActionFactory; +import org.elasticsearch.xpack.watcher.actions.slack.SlackAction; +import org.elasticsearch.xpack.watcher.actions.slack.SlackActionFactory; +import org.elasticsearch.xpack.watcher.actions.webhook.WebhookAction; +import org.elasticsearch.xpack.watcher.actions.webhook.WebhookActionFactory; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.HttpSettings; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthFactory; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuthFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.condition.ArrayCompareCondition; +import org.elasticsearch.xpack.watcher.condition.CompareCondition; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.condition.NeverCondition; +import org.elasticsearch.xpack.watcher.condition.ScriptCondition; +import org.elasticsearch.xpack.watcher.execution.AsyncTriggerEventConsumer; +import org.elasticsearch.xpack.watcher.execution.ExecutionService; +import org.elasticsearch.xpack.watcher.execution.InternalWatchExecutor; +import org.elasticsearch.xpack.watcher.execution.TriggeredWatch; +import org.elasticsearch.xpack.watcher.execution.TriggeredWatchStore; +import org.elasticsearch.xpack.watcher.execution.WatchExecutor; +import org.elasticsearch.xpack.watcher.history.HistoryStore; +import org.elasticsearch.xpack.watcher.input.InputFactory; +import org.elasticsearch.xpack.watcher.input.InputRegistry; +import org.elasticsearch.xpack.watcher.input.chain.ChainInput; +import org.elasticsearch.xpack.watcher.input.chain.ChainInputFactory; +import org.elasticsearch.xpack.watcher.input.http.HttpInput; +import org.elasticsearch.xpack.watcher.input.http.HttpInputFactory; +import org.elasticsearch.xpack.watcher.input.none.NoneInputFactory; +import org.elasticsearch.xpack.watcher.input.search.SearchInput; +import org.elasticsearch.xpack.watcher.input.search.SearchInputFactory; +import org.elasticsearch.xpack.watcher.input.simple.SimpleInput; +import org.elasticsearch.xpack.watcher.input.simple.SimpleInputFactory; +import org.elasticsearch.xpack.watcher.input.transform.TransformInput; +import org.elasticsearch.xpack.watcher.input.transform.TransformInputFactory; +import org.elasticsearch.xpack.watcher.notification.email.Account; +import org.elasticsearch.xpack.watcher.notification.email.EmailService; +import org.elasticsearch.xpack.watcher.notification.email.HtmlSanitizer; +import org.elasticsearch.xpack.watcher.notification.email.attachment.DataAttachmentParser; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentParser; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentsParser; +import org.elasticsearch.xpack.watcher.notification.email.attachment.HttpEmailAttachementParser; +import org.elasticsearch.xpack.watcher.notification.email.attachment.ReportingAttachmentParser; +import org.elasticsearch.xpack.watcher.notification.email.support.BodyPartSource; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService; +import org.elasticsearch.xpack.watcher.notification.jira.JiraService; +import org.elasticsearch.xpack.watcher.notification.pagerduty.PagerDutyService; +import org.elasticsearch.xpack.watcher.notification.slack.SlackService; +import org.elasticsearch.xpack.watcher.rest.action.RestAckWatchAction; +import org.elasticsearch.xpack.watcher.rest.action.RestActivateWatchAction; +import org.elasticsearch.xpack.watcher.rest.action.RestDeleteWatchAction; +import org.elasticsearch.xpack.watcher.rest.action.RestExecuteWatchAction; +import org.elasticsearch.xpack.watcher.rest.action.RestGetWatchAction; +import org.elasticsearch.xpack.watcher.rest.action.RestPutWatchAction; +import org.elasticsearch.xpack.watcher.rest.action.RestWatchServiceAction; +import org.elasticsearch.xpack.watcher.rest.action.RestWatcherStatsAction; +import org.elasticsearch.xpack.watcher.support.WatcherIndexTemplateRegistry; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateService; +import org.elasticsearch.xpack.watcher.transform.script.ScriptTransform; +import org.elasticsearch.xpack.watcher.transform.script.ScriptTransformFactory; +import org.elasticsearch.xpack.watcher.transform.search.SearchTransform; +import org.elasticsearch.xpack.watcher.transform.search.SearchTransformFactory; +import org.elasticsearch.xpack.watcher.transport.actions.ack.TransportAckWatchAction; +import org.elasticsearch.xpack.watcher.transport.actions.activate.TransportActivateWatchAction; +import org.elasticsearch.xpack.watcher.transport.actions.delete.TransportDeleteWatchAction; +import org.elasticsearch.xpack.watcher.transport.actions.execute.TransportExecuteWatchAction; +import org.elasticsearch.xpack.watcher.transport.actions.get.TransportGetWatchAction; +import org.elasticsearch.xpack.watcher.transport.actions.put.TransportPutWatchAction; +import org.elasticsearch.xpack.watcher.transport.actions.service.TransportWatcherServiceAction; +import org.elasticsearch.xpack.watcher.transport.actions.stats.TransportWatcherStatsAction; +import org.elasticsearch.xpack.watcher.trigger.TriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; +import org.elasticsearch.xpack.watcher.trigger.manual.ManualTriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.schedule.CronSchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.DailySchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.HourlySchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.MonthlySchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.Schedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleRegistry; +import org.elasticsearch.xpack.watcher.trigger.schedule.WeeklySchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.YearlySchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleTriggerEngine; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; + +import static java.util.Collections.emptyList; + +public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin { + + public static final Setting INDEX_WATCHER_TEMPLATE_VERSION_SETTING = + new Setting<>("index.xpack.watcher.template.version", "", Function.identity(), Setting.Property.IndexScope); + public static final Setting ENCRYPT_SENSITIVE_DATA_SETTING = + Setting.boolSetting("xpack.watcher.encrypt_sensitive_data", false, Setting.Property.NodeScope); + public static final Setting MAX_STOP_TIMEOUT_SETTING = + Setting.timeSetting("xpack.watcher.stop.timeout", TimeValue.timeValueSeconds(30), Setting.Property.NodeScope); + + // list of headers that will be stored when a watch is stored + public static final Set HEADER_FILTERS = + new HashSet<>(Arrays.asList("es-security-runas-user", "_xpack_security_authentication")); + + public static final ScriptContext SCRIPT_SEARCH_CONTEXT = + new ScriptContext<>("xpack", SearchScript.Factory.class); + // TODO: remove this context when each xpack script use case has their own contexts + public static final ScriptContext SCRIPT_EXECUTABLE_CONTEXT + = new ScriptContext<>("xpack_executable", ExecutableScript.Factory.class); + public static final ScriptContext SCRIPT_TEMPLATE_CONTEXT + = new ScriptContext<>("xpack_template", TemplateScript.Factory.class); + + private static final Logger logger = Loggers.getLogger(Watcher.class); + private WatcherIndexingListener listener; + + protected final Settings settings; + protected final boolean transportClient; + protected final boolean enabled; + protected final Environment env; + + public Watcher(final Settings settings) { + this.settings = settings; + this.transportClient = XPackPlugin.transportClientMode(settings); + this.enabled = XPackSettings.WATCHER_ENABLED.get(settings); + env = transportClient ? null : new Environment(settings, null); + + if (enabled && transportClient == false) { + validAutoCreateIndex(settings, logger); + } + } + + // overridable by tests + protected SSLService getSslService() { return XPackPlugin.getSharedSslService(); } + protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + protected Clock getClock() { return Clock.systemUTC(); } + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, + NamedXContentRegistry xContentRegistry, Environment environment, + NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + if (enabled == false) { + return Collections.emptyList(); + } + + // only initialize these classes if Watcher is enabled, and only after the plugin security policy for Watcher is in place + BodyPartSource.init(); + Account.init(); + + final CryptoService cryptoService; + try { + cryptoService = ENCRYPT_SENSITIVE_DATA_SETTING.get(settings) ? new CryptoService(settings) : null; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + new WatcherIndexTemplateRegistry(settings, clusterService, threadPool, client); + + // http client + Map httpAuthFactories = new HashMap<>(); + httpAuthFactories.put(BasicAuth.TYPE, new BasicAuthFactory(cryptoService)); + // TODO: add more auth types, or remove this indirection + HttpAuthRegistry httpAuthRegistry = new HttpAuthRegistry(httpAuthFactories); + HttpRequestTemplate.Parser httpTemplateParser = new HttpRequestTemplate.Parser(httpAuthRegistry); + final HttpClient httpClient = new HttpClient(settings, httpAuthRegistry, getSslService()); + + // notification + EmailService emailService = new EmailService(settings, cryptoService, clusterService.getClusterSettings()); + HipChatService hipChatService = new HipChatService(settings, httpClient, clusterService.getClusterSettings()); + JiraService jiraService = new JiraService(settings, httpClient, clusterService.getClusterSettings()); + SlackService slackService = new SlackService(settings, httpClient, clusterService.getClusterSettings()); + PagerDutyService pagerDutyService = new PagerDutyService(settings, httpClient, clusterService.getClusterSettings()); + + TextTemplateEngine templateEngine = new TextTemplateEngine(settings, scriptService); + Map emailAttachmentParsers = new HashMap<>(); + emailAttachmentParsers.put(HttpEmailAttachementParser.TYPE, new HttpEmailAttachementParser(httpClient, httpTemplateParser, + templateEngine)); + emailAttachmentParsers.put(DataAttachmentParser.TYPE, new DataAttachmentParser()); + emailAttachmentParsers.put(ReportingAttachmentParser.TYPE, new ReportingAttachmentParser(settings, httpClient, templateEngine, + httpAuthRegistry)); + EmailAttachmentsParser emailAttachmentsParser = new EmailAttachmentsParser(emailAttachmentParsers); + + // conditions + final Map parsers = new HashMap<>(); + parsers.put(InternalAlwaysCondition.TYPE, (c, id, p) -> InternalAlwaysCondition.parse(id, p)); + parsers.put(NeverCondition.TYPE, (c, id, p) -> NeverCondition.parse(id, p)); + parsers.put(ArrayCompareCondition.TYPE, ArrayCompareCondition::parse); + parsers.put(CompareCondition.TYPE, CompareCondition::parse); + parsers.put(ScriptCondition.TYPE, (c, id, p) -> ScriptCondition.parse(scriptService, id, p)); + + final ConditionRegistry conditionRegistry = new ConditionRegistry(Collections.unmodifiableMap(parsers), getClock()); + final Map transformFactories = new HashMap<>(); + transformFactories.put(ScriptTransform.TYPE, new ScriptTransformFactory(settings, scriptService)); + transformFactories.put(SearchTransform.TYPE, new SearchTransformFactory(settings, client, xContentRegistry, scriptService)); + final TransformRegistry transformRegistry = new TransformRegistry(settings, Collections.unmodifiableMap(transformFactories)); + + // actions + final Map actionFactoryMap = new HashMap<>(); + actionFactoryMap.put(EmailAction.TYPE, new EmailActionFactory(settings, emailService, templateEngine, emailAttachmentsParser)); + actionFactoryMap.put(WebhookAction.TYPE, new WebhookActionFactory(settings, httpClient, httpTemplateParser, templateEngine)); + actionFactoryMap.put(IndexAction.TYPE, new IndexActionFactory(settings, client)); + actionFactoryMap.put(LoggingAction.TYPE, new LoggingActionFactory(settings, templateEngine)); + actionFactoryMap.put(HipChatAction.TYPE, new HipChatActionFactory(settings, templateEngine, hipChatService)); + actionFactoryMap.put(JiraAction.TYPE, new JiraActionFactory(settings, templateEngine, jiraService)); + actionFactoryMap.put(SlackAction.TYPE, new SlackActionFactory(settings, templateEngine, slackService)); + actionFactoryMap.put(PagerDutyAction.TYPE, new PagerDutyActionFactory(settings, templateEngine, pagerDutyService)); + final ActionRegistry registry = new ActionRegistry(actionFactoryMap, conditionRegistry, transformRegistry, getClock(), + getLicenseState()); + + // inputs + final Map inputFactories = new HashMap<>(); + inputFactories.put(SearchInput.TYPE, new SearchInputFactory(settings, client, xContentRegistry, scriptService)); + inputFactories.put(SimpleInput.TYPE, new SimpleInputFactory(settings)); + inputFactories.put(HttpInput.TYPE, new HttpInputFactory(settings, httpClient, templateEngine, httpTemplateParser)); + inputFactories.put(NoneInput.TYPE, new NoneInputFactory(settings)); + inputFactories.put(TransformInput.TYPE, new TransformInputFactory(settings, transformRegistry)); + final InputRegistry inputRegistry = new InputRegistry(settings, inputFactories); + inputFactories.put(ChainInput.TYPE, new ChainInputFactory(settings, inputRegistry)); + + final HistoryStore historyStore = new HistoryStore(settings, client); + + // schedulers + final Set scheduleParsers = new HashSet<>(); + scheduleParsers.add(new CronSchedule.Parser()); + scheduleParsers.add(new DailySchedule.Parser()); + scheduleParsers.add(new HourlySchedule.Parser()); + scheduleParsers.add(new IntervalSchedule.Parser()); + scheduleParsers.add(new MonthlySchedule.Parser()); + scheduleParsers.add(new WeeklySchedule.Parser()); + scheduleParsers.add(new YearlySchedule.Parser()); + final ScheduleRegistry scheduleRegistry = new ScheduleRegistry(scheduleParsers); + + TriggerEngine manualTriggerEngine = new ManualTriggerEngine(); + final TriggerEngine configuredTriggerEngine = getTriggerEngine(getClock(), scheduleRegistry); + + final Set triggerEngines = new HashSet<>(); + triggerEngines.add(manualTriggerEngine); + triggerEngines.add(configuredTriggerEngine); + final TriggerService triggerService = new TriggerService(settings, triggerEngines); + + final TriggeredWatch.Parser triggeredWatchParser = new TriggeredWatch.Parser(settings, triggerService); + final TriggeredWatchStore triggeredWatchStore = new TriggeredWatchStore(settings, client, triggeredWatchParser); + + final WatcherSearchTemplateService watcherSearchTemplateService = + new WatcherSearchTemplateService(settings, scriptService, xContentRegistry); + final WatchExecutor watchExecutor = getWatchExecutor(threadPool); + final WatchParser watchParser = new WatchParser(settings, triggerService, registry, inputRegistry, cryptoService, getClock()); + + final ExecutionService executionService = new ExecutionService(settings, historyStore, triggeredWatchStore, watchExecutor, + getClock(), watchParser, clusterService, client); + + final Consumer> triggerEngineListener = getTriggerEngineListener(executionService); + triggerService.register(triggerEngineListener); + + WatcherService watcherService = new WatcherService(settings, triggerService, triggeredWatchStore, executionService, + watchParser, client); + + final WatcherLifeCycleService watcherLifeCycleService = + new WatcherLifeCycleService(settings, threadPool, clusterService, watcherService); + + listener = new WatcherIndexingListener(settings, watchParser, getClock(), triggerService); + clusterService.addListener(listener); + + return Arrays.asList(registry, inputRegistry, historyStore, triggerService, triggeredWatchParser, + watcherLifeCycleService, executionService, triggerEngineListener, watcherService, watchParser, + configuredTriggerEngine, triggeredWatchStore, watcherSearchTemplateService, slackService, pagerDutyService, hipChatService); + } + + protected TriggerEngine getTriggerEngine(Clock clock, ScheduleRegistry scheduleRegistry) { + return new TickerScheduleTriggerEngine(settings, scheduleRegistry, clock); + } + + protected WatchExecutor getWatchExecutor(ThreadPool threadPool) { + return new InternalWatchExecutor(threadPool); + } + + protected Consumer> getTriggerEngineListener(ExecutionService executionService) { + return new AsyncTriggerEventConsumer(settings, executionService); + } + + @Override + public Collection createGuiceModules() { + List modules = new ArrayList<>(); + modules.add(b -> b.bind(Clock.class).toInstance(getClock())); //currently assuming the only place clock is bound + modules.add(b -> { + XPackPlugin.bindFeatureSet(b, WatcherFeatureSet.class); + if (transportClient || enabled == false) { + b.bind(WatcherService.class).toProvider(Providers.of(null)); + } + }); + + return modules; + } + + @Override + public List> getSettings() { + List> settings = new ArrayList<>(); + settings.add(INDEX_WATCHER_TEMPLATE_VERSION_SETTING); + settings.add(MAX_STOP_TIMEOUT_SETTING); + settings.add(ExecutionService.DEFAULT_THROTTLE_PERIOD_SETTING); + settings.add(TickerScheduleTriggerEngine.TICKER_INTERVAL_SETTING); + settings.add(Setting.intSetting("xpack.watcher.execution.scroll.size", 0, Setting.Property.NodeScope)); + settings.add(Setting.intSetting("xpack.watcher.watch.scroll.size", 0, Setting.Property.NodeScope)); + settings.add(ENCRYPT_SENSITIVE_DATA_SETTING); + settings.add(WatcherField.ENCRYPTION_KEY_SETTING); + + settings.add(Setting.simpleString("xpack.watcher.internal.ops.search.default_timeout", Setting.Property.NodeScope)); + settings.add(Setting.simpleString("xpack.watcher.internal.ops.bulk.default_timeout", Setting.Property.NodeScope)); + settings.add(Setting.simpleString("xpack.watcher.internal.ops.index.default_timeout", Setting.Property.NodeScope)); + settings.add(Setting.simpleString("xpack.watcher.actions.index.default_timeout", Setting.Property.NodeScope)); + settings.add(Setting.simpleString("xpack.watcher.actions.bulk.default_timeout", Setting.Property.NodeScope)); + settings.add(Setting.simpleString("xpack.watcher.index.rest.direct_access", Setting.Property.NodeScope)); + settings.add(Setting.simpleString("xpack.watcher.input.search.default_timeout", Setting.Property.NodeScope)); + settings.add(Setting.simpleString("xpack.watcher.transform.search.default_timeout", Setting.Property.NodeScope)); + settings.add(Setting.simpleString("xpack.watcher.execution.scroll.timeout", Setting.Property.NodeScope)); + settings.add(WatcherLifeCycleService.SETTING_REQUIRE_MANUAL_START); + + // notification services + settings.addAll(SlackService.getSettings()); + settings.addAll(EmailService.getSettings()); + settings.addAll(HtmlSanitizer.getSettings()); + settings.addAll(HipChatService.getSettings()); + settings.addAll(JiraService.getSettings()); + settings.addAll(PagerDutyService.getSettings()); + settings.add(ReportingAttachmentParser.RETRIES_SETTING); + settings.add(ReportingAttachmentParser.INTERVAL_SETTING); + + // http settings + settings.addAll(HttpSettings.getSettings()); + + // encryption settings + CryptoService.addSettings(settings); + return settings; + } + + @Override + public List> getExecutorBuilders(final Settings settings) { + if (enabled) { + final FixedExecutorBuilder builder = + new FixedExecutorBuilder( + settings, + InternalWatchExecutor.THREAD_POOL_NAME, + getWatcherThreadPoolSize(settings), + 1000, + "xpack.watcher.thread_pool"); + return Collections.singletonList(builder); + } + return Collections.emptyList(); + } + + /** + * A method to indicate the size of the watcher thread pool + * As watches are primarily bound on I/O waiting and execute + * synchronously, it makes sense to have a certain minimum of a + * threadpool size. This means you should start with a fair number + * of threads which is more than the number of CPUs, but you also need + * to ensure that this number does not go crazy high if you have really + * beefy machines. This can still be configured manually. + * + * Calculation is as follows: + * Use five times the number of processors up until 50, then stick with the + * number of processors. + * + * If the node is not a data node, we will never need so much threads, so we + * just return 1 here, which still allows to execute a watch locally, but + * there is no need of managing any more threads here + * + * @param settings The current settings + * @return A number between 5 and the number of processors + */ + static int getWatcherThreadPoolSize(Settings settings) { + boolean isDataNode = Node.NODE_DATA_SETTING.get(settings); + if (isDataNode) { + int numberOfProcessors = EsExecutors.numberOfProcessors(settings); + long size = Math.max(Math.min(5 * numberOfProcessors, 50), numberOfProcessors); + return Math.toIntExact(size); + } else { + return 1; + } + } + + @Override + public List> getActions() { + if (false == enabled) { + return emptyList(); + } + return Arrays.asList(new ActionHandler<>(PutWatchAction.INSTANCE, TransportPutWatchAction.class), + new ActionHandler<>(DeleteWatchAction.INSTANCE, TransportDeleteWatchAction.class), + new ActionHandler<>(GetWatchAction.INSTANCE, TransportGetWatchAction.class), + new ActionHandler<>(WatcherStatsAction.INSTANCE, TransportWatcherStatsAction.class), + new ActionHandler<>(AckWatchAction.INSTANCE, TransportAckWatchAction.class), + new ActionHandler<>(ActivateWatchAction.INSTANCE, TransportActivateWatchAction.class), + new ActionHandler<>(WatcherServiceAction.INSTANCE, TransportWatcherServiceAction.class), + new ActionHandler<>(ExecuteWatchAction.INSTANCE, TransportExecuteWatchAction.class)); + } + + @Override + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + if (false == enabled) { + return emptyList(); + } + return Arrays.asList( + new RestPutWatchAction(settings, restController), + new RestDeleteWatchAction(settings, restController), + new RestWatcherStatsAction(settings, restController), + new RestGetWatchAction(settings, restController), + new RestWatchServiceAction(settings, restController), + new RestAckWatchAction(settings, restController), + new RestActivateWatchAction(settings, restController), + new RestExecuteWatchAction(settings, restController)); + } + + @Override + public void onIndexModule(IndexModule module) { + if (enabled == false || transportClient) { + return; + } + + assert listener != null; + // for now, we only add this index operation listener to indices starting with .watches + // this also means, that aliases pointing to this index have to follow this notation + if (module.getIndex().getName().startsWith(Watch.INDEX)) { + module.addIndexOperationListener(listener); + } + } + + static void validAutoCreateIndex(Settings settings, Logger logger) { + String value = settings.get("action.auto_create_index"); + if (value == null) { + return; + } + + String errorMessage = LoggerMessageFormat.format("the [action.auto_create_index] setting value [{}] is too" + + " restrictive. disable [action.auto_create_index] or set it to " + + "[{}, {}, {}*]", (Object) value, Watch.INDEX, TriggeredWatchStoreField.INDEX_NAME, HistoryStoreField.INDEX_PREFIX); + if (Booleans.isFalse(value)) { + throw new IllegalArgumentException(errorMessage); + } + + if (Booleans.isTrue(value)) { + return; + } + + String[] matches = Strings.commaDelimitedListToStringArray(value); + List indices = new ArrayList<>(); + indices.add(".watches"); + indices.add(".triggered_watches"); + DateTime now = new DateTime(DateTimeZone.UTC); + indices.add(HistoryStoreField.getHistoryIndexNameForTime(now)); + indices.add(HistoryStoreField.getHistoryIndexNameForTime(now.plusDays(1))); + indices.add(HistoryStoreField.getHistoryIndexNameForTime(now.plusMonths(1))); + indices.add(HistoryStoreField.getHistoryIndexNameForTime(now.plusMonths(2))); + indices.add(HistoryStoreField.getHistoryIndexNameForTime(now.plusMonths(3))); + indices.add(HistoryStoreField.getHistoryIndexNameForTime(now.plusMonths(4))); + indices.add(HistoryStoreField.getHistoryIndexNameForTime(now.plusMonths(5))); + indices.add(HistoryStoreField.getHistoryIndexNameForTime(now.plusMonths(6))); + for (String index : indices) { + boolean matched = false; + for (String match : matches) { + char c = match.charAt(0); + if (c == '-') { + if (Regex.simpleMatch(match.substring(1), index)) { + throw new IllegalArgumentException(errorMessage); + } + } else if (c == '+') { + if (Regex.simpleMatch(match.substring(1), index)) { + matched = true; + break; + } + } else { + if (Regex.simpleMatch(match, index)) { + matched = true; + break; + } + } + } + if (!matched) { + throw new IllegalArgumentException(errorMessage); + } + } + logger.warn("the [action.auto_create_index] setting is configured to be restrictive [{}]. " + + " for the next 6 months daily history indices are allowed to be created, but please make sure" + + " that any future history indices after 6 months with the pattern " + + "[.watcher-history-YYYY.MM.dd] are allowed to be created", value); + } + + // These are all old templates from pre 6.0 era, that need to be deleted + @Override + public UnaryOperator> getIndexTemplateMetaDataUpgrader() { + return map -> { + map.keySet().removeIf(name -> name.startsWith("watch_history_")); + return map; + }; + } + + @Override + public List getBootstrapChecks() { + return Collections.singletonList(new EncryptSensitiveDataBootstrapCheck(env)); + } + + @Override + public List getContexts() { + return Arrays.asList(Watcher.SCRIPT_SEARCH_CONTEXT, Watcher.SCRIPT_EXECUTABLE_CONTEXT, Watcher.SCRIPT_TEMPLATE_CONTEXT); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherClientHelper.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherClientHelper.java new file mode 100644 index 0000000000000..1019f5a423e98 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherClientHelper.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.watcher.watch.Watch; + +import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; + +/** + * A helper class which decides if we should run via the xpack user and set watcher as origin or + * if we should use the run_as functionality by setting the correct headers + */ +public class WatcherClientHelper { + + /** + * Execute a client operation and return the response, try to run with least privileges, when headers exist + * + * @param watch The watch in which context this method gets executed in + * @param client The client used to query + * @param supplier The action to run + * @param The client response class this should return + * @return An instance of the response class + */ + public static T execute(Watch watch, Client client, Supplier supplier) { + // no headers, we will have to use the xpack internal user for our execution by specifying the watcher origin + if (watch.status().getHeaders().isEmpty()) { + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + return supplier.get(); + } + } else { + try (ThreadContext.StoredContext ignored = client.threadPool().getThreadContext().stashContext()) { + Map filteredHeaders = watch.status().getHeaders().entrySet().stream() + .filter(e -> Watcher.HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + client.threadPool().getThreadContext().copyHeaders(filteredHeaders.entrySet()); + return supplier.get(); + } + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherFeatureSet.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherFeatureSet.java new file mode 100644 index 0000000000000..1549a94ac350a --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherFeatureSet.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.watcher.WatcherFeatureSetUsage; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.common.stats.Counters; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; + +public class WatcherFeatureSet implements XPackFeatureSet { + + private final boolean enabled; + private final XPackLicenseState licenseState; + private Client client; + + @Inject + public WatcherFeatureSet(Settings settings, @Nullable XPackLicenseState licenseState, Client client) { + this.enabled = XPackSettings.WATCHER_ENABLED.get(settings); + this.licenseState = licenseState; + this.client = client; + } + + @Override + public String name() { + return XPackField.WATCHER; + } + + @Override + public String description() { + return "Alerting, Notification and Automation for the Elastic Stack"; + } + + @Override + public boolean available() { + return licenseState != null && licenseState.isWatcherAllowed(); + } + + @Override + public boolean enabled() { + return enabled; + } + + @Override + public Map nativeCodeInfo() { + return null; + } + + @Override + public void usage(ActionListener listener) { + if (enabled) { + try (ThreadContext.StoredContext ignore = + stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + WatcherClient watcherClient = new WatcherClient(client); + WatcherStatsRequest request = new WatcherStatsRequest(); + request.includeStats(true); + watcherClient.watcherStats(request, ActionListener.wrap(r -> { + List countersPerNode = r.getNodes() + .stream() + .map(WatcherStatsResponse.Node::getStats) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + Counters mergedCounters = Counters.merge(countersPerNode); + listener.onResponse(new WatcherFeatureSetUsage(available(), enabled(), mergedCounters.toNestedMap())); + }, listener::onFailure)); + } + } else { + listener.onResponse(new WatcherFeatureSetUsage(available(), enabled(), Collections.emptyMap())); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java new file mode 100644 index 0000000000000..37836ca94f8c0 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java @@ -0,0 +1,392 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.AllocationId; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.Murmur3HashFunction; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexingOperationListener; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; +import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.joda.time.DateTimeZone.UTC; + +/** + * This index listener ensures, that watches that are being indexed are put into the trigger service + * Because the condition for this might change based on the shard allocation, this class is also a + * cluster state listener + * + * Whenever a write operation to the current active watch index is made, this listener checks, if + * the document should also be added to the local trigger service + * + */ +final class WatcherIndexingListener extends AbstractComponent implements IndexingOperationListener, ClusterStateListener { + + static final Configuration INACTIVE = new Configuration(null, Collections.emptyMap()); + + private final WatchParser parser; + private final Clock clock; + private final TriggerService triggerService; + private volatile Configuration configuration = INACTIVE; + + WatcherIndexingListener(Settings settings, WatchParser parser, Clock clock, TriggerService triggerService) { + super(settings); + this.parser = parser; + this.clock = clock; + this.triggerService = triggerService; + } + + // package private for testing + Configuration getConfiguration() { + return configuration; + } + + // package private for testing + void setConfiguration(Configuration configuration) { + this.configuration = configuration; + } + + /** + * single watch operations that check if the local trigger service should trigger for this + * concrete watch + * + * Watch parsing could be optimized, so that parsing only happens on primary and where the + * shard is supposed to be put into the trigger service at some point, right no we dont care + * + * Note, we have to parse on the primary, because otherwise a failure on the replica when + * parsing the watch would result in failing + * the replica + * + * @param shardId The shard id object of the document being processed + * @param operation The index operation + * @return The index operation + */ + @Override + public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { + if (isWatchDocument(shardId.getIndexName(), operation.type())) { + DateTime now = new DateTime(clock.millis(), UTC); + try { + Watch watch = parser.parseWithSecrets(operation.id(), true, operation.source(), now, XContentType.JSON); + ShardAllocationConfiguration shardAllocationConfiguration = configuration.localShards.get(shardId); + if (shardAllocationConfiguration == null) { + logger.debug("no distributed watch execution info found for watch [{}] on shard [{}], got configuration for {}", + watch.id(), shardId, configuration.localShards.keySet()); + return operation; + } + + // the watch status is -1, in case a watch has been freshly stored and this save + // watch operation does not stem from an execution + // we dont need to update the trigger service, when the watch has been updated as + // part of an execution, so we can exit early + boolean isWatchExecutionOperation = watch.status().version() != -1; + if (isWatchExecutionOperation) { + logger.debug("not updating trigger for watch [{}], watch has been updated as part of an execution", watch.id()); + return operation; + } + + boolean shouldBeTriggered = shardAllocationConfiguration.shouldBeTriggered(watch.id()); + if (shouldBeTriggered) { + if (watch.status().state().isActive()) { + logger.debug("adding watch [{}] to trigger", watch.id()); + triggerService.add(watch); + } else { + logger.debug("removing watch [{}] to trigger", watch.id()); + triggerService.remove(watch.id()); + } + } else { + logger.debug("watch [{}] should not be triggered", watch.id()); + } + } catch (IOException e) { + throw new ElasticsearchParseException("Could not parse watch with id [{}]", e, operation.id()); + } + + } + + return operation; + } + + /** + * + * In case of an error, we have to ensure that the triggerservice does not leave anything behind + * + * TODO: If the configuration changes between preindex and postindex methods and we add a + * watch, that could not be indexed + * TODO: this watch might not be deleted from the triggerservice. Are we willing to accept this? + * TODO: This could be circumvented by using a threadlocal in preIndex(), that contains the + * watch and is cleared afterwards + * + * @param shardId The shard id object of the document being processed + * @param index The index operation + * @param ex The exception occured during indexing + */ + @Override + public void postIndex(ShardId shardId, Engine.Index index, Exception ex) { + if (isWatchDocument(shardId.getIndexName(), index.type())) { + logger.debug(() -> new ParameterizedMessage("removing watch [{}] from trigger", index.id()), ex); + triggerService.remove(index.id()); + } + } + + /** + * If the index operation happened on a watcher shard and is of doc type watcher, we will + * remove the watch id from the trigger service + * + * @param shardId The shard id object of the document being processed + * @param delete The delete operation + * @return The delete operation + */ + @Override + public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { + if (isWatchDocument(shardId.getIndexName(), delete.type())) { + triggerService.remove(delete.id()); + } + + return delete; + } + + /** + * Check if a supplied index and document matches the current configuration for watcher + * + * @param index The index to check for + * @param docType The document type + * @return true if this is a watch in the active watcher index, false otherwise + */ + private boolean isWatchDocument(String index, String docType) { + return configuration.isIndexAndActive(index) && docType.equals(Watch.DOC_TYPE); + } + + /** + * Listen for cluster state changes. This method will start, stop or reload the watcher + * service based on cluster state information. + * The method checks, if there are local watch indices up and running. + * + * @param event The ClusterChangedEvent class containing the current and new cluster state + */ + @Override + public void clusterChanged(ClusterChangedEvent event) { + // if there is no master node configured in the current state, this node should not try to trigger anything, but consider itself + // inactive. the same applies, if there is a cluster block that does not allow writes + if (Strings.isNullOrEmpty(event.state().nodes().getMasterNodeId()) || + event.state().getBlocks().hasGlobalBlock(ClusterBlockLevel.WRITE)) { + configuration = INACTIVE; + return; + } + + if (event.state().nodes().getLocalNode().isDataNode() && event.metaDataChanged()) { + try { + IndexMetaData metaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, event.state().metaData()); + if (metaData == null) { + configuration = INACTIVE; + } else { + checkWatchIndexHasChanged(metaData, event); + } + } catch (IllegalStateException e) { + logger.error("error loading watches index: [{}]", e.getMessage()); + configuration = INACTIVE; + } + } + } + + private void checkWatchIndexHasChanged(IndexMetaData metaData, ClusterChangedEvent event) { + String watchIndex = metaData.getIndex().getName(); + ClusterState state = event.state(); + String localNodeId = state.nodes().getLocalNode().getId(); + RoutingNode routingNode = state.getRoutingNodes().node(localNodeId); + + // no local shards, exit early + List localShardRouting = routingNode.shardsWithState(watchIndex, STARTED, RELOCATING); + if (localShardRouting.isEmpty()) { + configuration = INACTIVE; + } else { + reloadConfiguration(watchIndex, localShardRouting, event); + } + } + + /** + * Reload the configuration if the alias pointing to the watch index was changed or + * the index routing table for an index was changed + * + * @param watchIndex Name of the concrete watches index pointing + * @param localShardRouting List of local shards of that index + * @param event The cluster changed event containing the new cluster state + */ + private void reloadConfiguration(String watchIndex, List localShardRouting, + ClusterChangedEvent event) { + // changed alias means to always read a new configuration + boolean isAliasChanged = watchIndex.equals(configuration.index) == false; + if (isAliasChanged || hasShardAllocationIdChanged(watchIndex, event.state())) { + IndexRoutingTable watchIndexRoutingTable = event.state().routingTable().index(watchIndex); + Map ids = getLocalShardAllocationIds(localShardRouting, watchIndexRoutingTable); + configuration = new Configuration(watchIndex, ids); + } + } + + /** + * Check if the routing table has changed and local shards are affected + * + * @param watchIndex Name of the concrete watches index pointing + * @param state The new cluster state + * @return true if the routing tables has changed and local shards are affected + */ + private boolean hasShardAllocationIdChanged(String watchIndex, ClusterState state) { + List allStartedRelocatedShards = state.getRoutingTable().index(watchIndex).shardsWithState(STARTED); + allStartedRelocatedShards.addAll(state.getRoutingTable().index(watchIndex).shardsWithState(RELOCATING)); + + // exit early, when there are shards, but the current configuration is inactive + if (allStartedRelocatedShards.isEmpty() == false && configuration == INACTIVE) { + return true; + } + + // check for different shard ids + String localNodeId = state.nodes().getLocalNodeId(); + Set clusterStateLocalShardIds = state.getRoutingNodes().node(localNodeId) + .shardsWithState(watchIndex, STARTED, RELOCATING).stream() + .map(ShardRouting::shardId) + .collect(Collectors.toSet()); + Set configuredLocalShardIds = new HashSet<>(configuration.localShards.keySet()); + Set differenceSet = Sets.difference(clusterStateLocalShardIds, configuredLocalShardIds); + if (differenceSet.isEmpty() == false) { + return true; + } + + Map> shards = allStartedRelocatedShards.stream() + .collect(Collectors.groupingBy(ShardRouting::shardId, + Collectors.mapping(sr -> sr.allocationId().getId(), + Collectors.toCollection(ArrayList::new)))); + + // sort the collection, so we have a stable order + shards.values().forEach(Collections::sort); + + // check for different allocation ids + for (Map.Entry entry : configuration.localShards.entrySet()) { + if (shards.containsKey(entry.getKey()) == false) { + return true; + } + + Collection allocationIds = shards.get(entry.getKey()); + if (allocationIds.equals(entry.getValue().allocationIds) == false) { + return true; + } + } + + return false; + } + + /** + * This returns a mapping of the shard it to the index of the shard allocation ids in that + * list. The idea here is to have a basis for consistent hashing in order to decide if a + * watch needs to be triggered locally or on another system, when it is being indexed + * as a single watch action. + * + * Example: + * - ShardId(".watch", 0) + * - all allocation ids sorted (in the cluster): [ "a", "b", "c", "d"] + * - local allocation id: b (index position 1) + * - then store the size of the allocation ids and the index position + * data.put(ShardId(".watch", 0), new Tuple(1, 4)) + */ + Map getLocalShardAllocationIds(List localShards, IndexRoutingTable routingTable) { + Map data = new HashMap<>(localShards.size()); + + for (ShardRouting shardRouting : localShards) { + ShardId shardId = shardRouting.shardId(); + + // find all allocation ids for this shard id in the cluster state + List allocationIds = routingTable.shard(shardId.getId()).getActiveShards() + .stream() + .map(ShardRouting::allocationId) + .map(AllocationId::getId) + .collect(Collectors.toList()); + + // sort the list so it is stable + Collections.sort(allocationIds); + + String allocationId = shardRouting.allocationId().getId(); + int idx = allocationIds.indexOf(allocationId); + data.put(shardId, new ShardAllocationConfiguration(idx, allocationIds.size(), allocationIds)); + } + + return data; + } + + /** + * A helper class, that contains shard configuration per shard id + */ + static final class Configuration { + + final Map localShards; + final boolean active; + final String index; + + Configuration(String index, Map localShards) { + this.active = localShards.isEmpty() == false; + this.index = index; + this.localShards = Collections.unmodifiableMap(localShards); + } + + /** + * Find out, if the supplied index matches the current watcher configuration and the + * current state is active + * + * @param index The name of the index to compare with + * @return false if watcher is not active or the passed index is not the watcher index + */ + public boolean isIndexAndActive(String index) { + return active == true && index.equals(this.index); + } + } + + static final class ShardAllocationConfiguration { + final int index; + final int shardCount; + final List allocationIds; + + ShardAllocationConfiguration(int index, int shardCount, List allocationIds) { + this.index = index; + this.shardCount = shardCount; + this.allocationIds = allocationIds; + } + + public boolean shouldBeTriggered(String id) { + int hash = Murmur3HashFunction.hash(id); + int shardIndex = Math.floorMod(hash, shardCount); + return shardIndex == index; + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java new file mode 100644 index 0000000000000..bec496068e3a7 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -0,0 +1,308 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.AllocationId; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.upgrade.UpgradeField; +import org.elasticsearch.xpack.core.watcher.WatcherMetaData; +import org.elasticsearch.xpack.core.watcher.WatcherState; +import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.support.WatcherIndexTemplateRegistry; +import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; +import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; + +public class WatcherLifeCycleService extends AbstractComponent implements ClusterStateListener { + + // this option configures watcher not to start, unless the cluster state contains information to start watcher + // if you start with an empty cluster, you can delay starting watcher until you call the API manually + // if you start with a cluster containing data, this setting might have no effect, once you called the API yourself + // this is merely for testing, to make sure that watcher only starts when manually called + public static final Setting SETTING_REQUIRE_MANUAL_START = + Setting.boolSetting("xpack.watcher.require_manual_start", false, Property.NodeScope); + + private static final String LIFECYCLE_THREADPOOL_NAME = "watcher-lifecycle"; + + private final WatcherService watcherService; + private final ExecutorService executor; + private AtomicReference> previousAllocationIds = new AtomicReference<>(Collections.emptyList()); + private volatile boolean shutDown = false; // indicates that the node has been shutdown and we should never start watcher after this. + private final boolean requireManualStart; + + WatcherLifeCycleService(Settings settings, ThreadPool threadPool, ClusterService clusterService, + WatcherService watcherService) { + // use a single thread executor so that lifecycle changes are handled in the order they + // are submitted in + this(settings, clusterService, watcherService, EsExecutors.newFixed( + LIFECYCLE_THREADPOOL_NAME, + 1, + 1000, + daemonThreadFactory(settings, LIFECYCLE_THREADPOOL_NAME), + threadPool.getThreadContext())); + } + + WatcherLifeCycleService(Settings settings, ClusterService clusterService, + WatcherService watcherService, ExecutorService executorService) { + super(settings); + this.executor = executorService; + this.watcherService = watcherService; + this.requireManualStart = SETTING_REQUIRE_MANUAL_START.get(settings); + clusterService.addListener(this); + // Close if the indices service is being stopped, so we don't run into search failures (locally) that will + // happen because we're shutting down and an watch is scheduled. + clusterService.addLifecycleListener(new LifecycleListener() { + @Override + public void beforeStop() { + shutDown(); + } + }); + } + + public synchronized void stop(String reason) { + watcherService.stop(reason); + } + + synchronized void shutDown() { + shutDown = true; + stop("shutdown initiated"); + stopExecutor(); + } + + void stopExecutor() { + ThreadPool.terminate(executor, 10L, TimeUnit.SECONDS); + } + + private synchronized void start(ClusterState state) { + if (shutDown) { + return; + } + final WatcherState watcherState = watcherService.state(); + if (watcherState != WatcherState.STOPPED) { + logger.debug("not starting watcher. watcher can only start if its current state is [{}], but its current state now is [{}]", + WatcherState.STOPPED, watcherState); + return; + } + + // If we start from a cluster state update we need to check if previously we stopped manually + // otherwise Watcher would start upon the next cluster state update while the user instructed Watcher to not run + WatcherMetaData watcherMetaData = state.getMetaData().custom(WatcherMetaData.TYPE); + if (watcherMetaData != null && watcherMetaData.manuallyStopped()) { + logger.debug("not starting watcher. watcher was stopped manually and therefore cannot be auto-started"); + return; + } + + // ensure that templates are existing before starting watcher + // the watcher index template registry is independent from watcher being started or stopped + if (WatcherIndexTemplateRegistry.validate(state) == false) { + logger.debug("not starting watcher, watcher templates are missing in the cluster state"); + return; + } + + if (watcherService.validate(state)) { + logger.trace("starting... (based on cluster state version [{}])", state.getVersion()); + try { + // we need to populate the allocation ids before the next cluster state listener comes in + checkAndSetAllocationIds(state, false); + watcherService.start(state); + } catch (Exception e) { + logger.warn("failed to start watcher. please wait for the cluster to become ready or try to start Watcher manually", e); + } + } else { + logger.debug("not starting watcher. because the cluster isn't ready yet to run watcher"); + } + } + + /** + * @param event The event of containing the new cluster state + * + * stop certain parts of watcher, when there are no watcher indices on this node by checking the shardrouting + * note that this is not easily possible, because of the execute watch api, that needs to be able to execute anywhere! + * this means, only certain components can be stopped + */ + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) || shutDown) { + clearAllocationIds(); + // wait until the gateway has recovered from disk, otherwise we think may not have .watches and + // a .triggered_watches index, but they may not have been restored from the cluster state on disk + return; + } + + // if watcher should not be started immediately unless it is has been manually configured to do so + WatcherMetaData watcherMetaData = event.state().getMetaData().custom(WatcherMetaData.TYPE); + if (watcherMetaData == null && requireManualStart) { + clearAllocationIds(); + return; + } + + if (Strings.isNullOrEmpty(event.state().nodes().getMasterNodeId())) { + clearAllocationIds(); + executor.execute(() -> this.stop("no master node")); + return; + } + + if (event.state().getBlocks().hasGlobalBlock(ClusterBlockLevel.WRITE)) { + clearAllocationIds(); + executor.execute(() -> this.stop("write level cluster block")); + return; + } + + if (isWatcherStoppedManually(event.state())) { + clearAllocationIds(); + executor.execute(() -> this.stop("watcher manually marked to shutdown by cluster state update")); + } else { + final WatcherState watcherState = watcherService.state(); + if (watcherState == WatcherState.STARTED && event.state().nodes().getLocalNode().isDataNode()) { + checkAndSetAllocationIds(event.state(), true); + } else if (watcherState != WatcherState.STARTED && watcherState != WatcherState.STARTING) { + IndexMetaData watcherIndexMetaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, event.state().metaData()); + IndexMetaData triggeredWatchesIndexMetaData = WatchStoreUtils.getConcreteIndex(TriggeredWatchStoreField.INDEX_NAME, + event.state().metaData()); + boolean isIndexInternalFormatWatchIndex = watcherIndexMetaData == null || + UpgradeField.checkInternalIndexFormat(watcherIndexMetaData); + boolean isIndexInternalFormatTriggeredWatchIndex = triggeredWatchesIndexMetaData == null || + UpgradeField.checkInternalIndexFormat(triggeredWatchesIndexMetaData); + if (isIndexInternalFormatTriggeredWatchIndex && isIndexInternalFormatWatchIndex) { + checkAndSetAllocationIds(event.state(), false); + executor.execute(() -> start(event.state())); + } else { + logger.warn("not starting watcher, upgrade API run required: .watches[{}], .triggered_watches[{}]", + isIndexInternalFormatWatchIndex, isIndexInternalFormatTriggeredWatchIndex); + } + } + } + } + + /** + * check if watcher has been stopped manually via the stop API + */ + private boolean isWatcherStoppedManually(ClusterState state) { + WatcherMetaData watcherMetaData = state.getMetaData().custom(WatcherMetaData.TYPE); + return watcherMetaData != null && watcherMetaData.manuallyStopped(); + } + + /** + * check and optionally set the current allocation ids + * + * @param state the current cluster state + * @param callWatcherService should the watcher service be called for starting/stopping/reloading or should this be treated as a + * dryrun so that the caller is responsible for this + */ + private void checkAndSetAllocationIds(ClusterState state, boolean callWatcherService) { + IndexMetaData watcherIndexMetaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, state.metaData()); + if (watcherIndexMetaData == null) { + if (clearAllocationIds() && callWatcherService) { + executor.execute(wrapWatcherService(() -> watcherService.pauseExecution("no watcher index found"), + e -> logger.error("error pausing watch execution", e))); + } + return; + } + + DiscoveryNode localNode = state.nodes().getLocalNode(); + RoutingNode routingNode = state.getRoutingNodes().node(localNode.getId()); + // this can happen if the node does not hold any data + if (routingNode == null) { + if (clearAllocationIds() && callWatcherService) { + executor.execute(wrapWatcherService( + () -> watcherService.pauseExecution("no routing node for local node found, network issue?"), + e -> logger.error("error pausing watch execution", e))); + } + return; + } + + String watchIndex = watcherIndexMetaData.getIndex().getName(); + List localShards = routingNode.shardsWithState(watchIndex, RELOCATING, STARTED); + // no local shards, empty out watcher and dont waste resources! + if (localShards.isEmpty()) { + if (clearAllocationIds() && callWatcherService) { + executor.execute(wrapWatcherService(() -> watcherService.pauseExecution("no local watcher shards found"), + e -> logger.error("error pausing watch execution", e))); + } + return; + } + + List currentAllocationIds = localShards.stream() + .map(ShardRouting::allocationId) + .map(AllocationId::getId) + .collect(Collectors.toList()); + Collections.sort(currentAllocationIds); + + if (previousAllocationIds.get().equals(currentAllocationIds) == false) { + previousAllocationIds.set(Collections.unmodifiableList(currentAllocationIds)); + if (callWatcherService) { + executor.execute(wrapWatcherService(() -> watcherService.reload(state, "new local watcher shard allocation ids"), + e -> logger.error("error reloading watcher", e))); + } + } + } + + /** + * clear out current allocation ids if not already happened + * @return true, if existing allocation ids were cleaned out, false otherwise + */ + private boolean clearAllocationIds() { + List previousIds = previousAllocationIds.getAndSet(Collections.emptyList()); + return previousIds.equals(Collections.emptyList()) == false; + } + + // for testing purposes only + List allocationIds() { + return previousAllocationIds.get(); + } + + /** + * Wraps an abstract runnable to easier supply onFailure and doRun methods via lambdas + * This ensures that the uncaught exception handler in the executing threadpool does not get called + * + * @param run The code to be executed in the runnable + * @param exceptionConsumer The exception handling code to be executed, if the runnable fails + * @return The AbstractRunnable instance to pass to the executor + */ + private static AbstractRunnable wrapWatcherService(Runnable run, Consumer exceptionConsumer) { + + return new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + exceptionConsumer.accept(e); + } + + @Override + protected void doRun() throws Exception { + run.run(); + } + }; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java new file mode 100644 index 0000000000000..56c56baae8944 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -0,0 +1,326 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.AllocationId; +import org.elasticsearch.cluster.routing.Murmur3HashFunction; +import org.elasticsearch.cluster.routing.Preference; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.xpack.core.watcher.WatcherState; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.execution.ExecutionService; +import org.elasticsearch.xpack.watcher.execution.TriggeredWatch; +import org.elasticsearch.xpack.watcher.execution.TriggeredWatchStore; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; +import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalState; +import static org.elasticsearch.xpack.core.watcher.watch.Watch.INDEX; + + +public class WatcherService extends AbstractComponent { + + private final TriggerService triggerService; + private final TriggeredWatchStore triggeredWatchStore; + private final ExecutionService executionService; + private final TimeValue scrollTimeout; + private final int scrollSize; + private final WatchParser parser; + private final Client client; + // package-private for testing + final AtomicReference state = new AtomicReference<>(WatcherState.STOPPED); + private final TimeValue defaultSearchTimeout; + + public WatcherService(Settings settings, TriggerService triggerService, TriggeredWatchStore triggeredWatchStore, + ExecutionService executionService, WatchParser parser, Client client) { + super(settings); + this.triggerService = triggerService; + this.triggeredWatchStore = triggeredWatchStore; + this.executionService = executionService; + this.scrollTimeout = settings.getAsTime("xpack.watcher.watch.scroll.timeout", TimeValue.timeValueSeconds(30)); + this.scrollSize = settings.getAsInt("xpack.watcher.watch.scroll.size", 100); + this.defaultSearchTimeout = settings.getAsTime("xpack.watcher.internal.ops.search.default_timeout", TimeValue.timeValueSeconds(30)); + this.parser = parser; + this.client = client; + } + + /** + * Ensure that watcher can be started, by checking if all indices are marked as up and ready in the cluster state + * @param state The current cluster state + * @return true if everything is good to go, so that the service can be started + */ + public boolean validate(ClusterState state) { + boolean executionServiceValid = executionService.validate(state); + if (executionServiceValid) { + try { + IndexMetaData indexMetaData = WatchStoreUtils.getConcreteIndex(Watch.INDEX, state.metaData()); + // no watch index yet means we are good to go + if (indexMetaData == null) { + return true; + } else { + if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { + logger.debug("watch index [{}] is marked as closed, watcher cannot be started", indexMetaData.getIndex().getName()); + return false; + } else { + return state.routingTable().index(indexMetaData.getIndex()).allPrimaryShardsActive(); + } + } + } catch (IllegalStateException e) { + logger.trace((Supplier) () -> new ParameterizedMessage("error getting index meta data [{}]: ", Watch.INDEX), e); + return false; + } + } + + return false; + } + + public void start(ClusterState clusterState) throws Exception { + // starting already triggered, exit early + WatcherState currentState = state.get(); + if (currentState == WatcherState.STARTING || currentState == WatcherState.STARTED) { + throw new IllegalStateException("watcher is already in state ["+ currentState +"]"); + } + + if (state.compareAndSet(WatcherState.STOPPED, WatcherState.STARTING)) { + try { + logger.debug("starting watch service..."); + + executionService.start(); + Collection watches = loadWatches(clusterState); + triggerService.start(watches); + + Collection triggeredWatches = triggeredWatchStore.findTriggeredWatches(watches, clusterState); + executionService.executeTriggeredWatches(triggeredWatches); + + state.set(WatcherState.STARTED); + logger.debug("watch service has started"); + } catch (Exception e) { + state.set(WatcherState.STOPPED); + throw e; + } + } else { + logger.debug("could not transition state from stopped to starting, current state [{}]", state.get()); + } + } + + /** + * Stops the watcher service and it's subservices. Should only be called, when watcher is stopped manually + */ + public void stop(String reason) { + WatcherState currentState = state.get(); + if (currentState == WatcherState.STOPPING || currentState == WatcherState.STOPPED) { + logger.trace("watcher is already in state [{}] not stopping", currentState); + } else { + try { + if (state.compareAndSet(WatcherState.STARTED, WatcherState.STOPPING)) { + logger.info("stopping watch service, reason [{}]", reason); + triggerService.stop(); + executionService.stop(); + state.set(WatcherState.STOPPED); + logger.debug("watch service has stopped"); + } else { + logger.debug("could not transition state from started to stopping, current state [{}]", state.get()); + } + } catch (Exception e) { + state.set(WatcherState.STOPPED); + logger.error("Error stopping watcher", e); + } + } + } + + /** + * Reload the watcher service, does not switch the state from stopped to started, just keep going + * @param clusterState cluster state, which is needed to find out about local shards + */ + public void reload(ClusterState clusterState, String reason) { + pauseExecution(reason); + + // load watches + Collection watches = loadWatches(clusterState); + watches.forEach(triggerService::add); + + // then load triggered watches, which might have been in the queue that we just cleared, + // maybe we dont need to execute those anymore however, i.e. due to shard shuffling + // then someone else will + Collection triggeredWatches = triggeredWatchStore.findTriggeredWatches(watches, clusterState); + executionService.executeTriggeredWatches(triggeredWatches); + } + + /** + * Stop execution of watches on this node, do not try to reload anything, but still allow + * manual watch execution, i.e. via the execute watch API + */ + public void pauseExecution(String reason) { + int cancelledTaskCount = executionService.pauseExecution(); + triggerService.pauseExecution(); + logger.info("paused watch execution, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); + } + + /** + * This reads all watches from the .watches index/alias and puts them into memory for a short period of time, + * before they are fed into the trigger service. + */ + private Collection loadWatches(ClusterState clusterState) { + IndexMetaData indexMetaData = WatchStoreUtils.getConcreteIndex(INDEX, clusterState.metaData()); + // no index exists, all good, we can start + if (indexMetaData == null) { + return Collections.emptyList(); + } + + SearchResponse response = null; + List watches = new ArrayList<>(); + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + RefreshResponse refreshResponse = client.admin().indices().refresh(new RefreshRequest(INDEX)) + .actionGet(TimeValue.timeValueSeconds(5)); + if (refreshResponse.getSuccessfulShards() < indexMetaData.getNumberOfShards()) { + throw illegalState("not all required shards have been refreshed"); + } + + // find out local shards + String watchIndexName = indexMetaData.getIndex().getName(); + RoutingNode routingNode = clusterState.getRoutingNodes().node(clusterState.nodes().getLocalNodeId()); + // yes, this can happen, if the state is not recovered + if (routingNode == null) { + return Collections.emptyList(); + } + List localShards = routingNode.shardsWithState(watchIndexName, RELOCATING, STARTED); + + // find out all allocation ids + List watchIndexShardRoutings = clusterState.getRoutingTable().allShards(watchIndexName); + + SearchRequest searchRequest = new SearchRequest(INDEX) + .scroll(scrollTimeout) + .preference(Preference.ONLY_LOCAL.toString()) + .source(new SearchSourceBuilder() + .size(scrollSize) + .sort(SortBuilders.fieldSort("_doc")) + .version(true)); + response = client.search(searchRequest).actionGet(defaultSearchTimeout); + + if (response.getTotalShards() != response.getSuccessfulShards()) { + throw new ElasticsearchException("Partial response while loading watches"); + } + + if (response.getHits().getTotalHits() == 0) { + return Collections.emptyList(); + } + + Map> sortedShards = new HashMap<>(localShards.size()); + for (ShardRouting localShardRouting : localShards) { + List sortedAllocationIds = watchIndexShardRoutings.stream() + .filter(sr -> localShardRouting.getId() == sr.getId()) + .map(ShardRouting::allocationId).filter(Objects::nonNull) + .map(AllocationId::getId).filter(Objects::nonNull) + .sorted() + .collect(Collectors.toList()); + + sortedShards.put(localShardRouting.getId(), sortedAllocationIds); + } + + while (response.getHits().getHits().length != 0) { + for (SearchHit hit : response.getHits()) { + // find out if this hit should be processed locally + Optional correspondingShardOptional = localShards.stream() + .filter(sr -> sr.shardId().equals(hit.getShard().getShardId())) + .findFirst(); + if (correspondingShardOptional.isPresent() == false) { + continue; + } + ShardRouting correspondingShard = correspondingShardOptional.get(); + List shardAllocationIds = sortedShards.get(hit.getShard().getShardId().id()); + // based on the shard allocation ids, get the bucket of the shard, this hit was in + int bucket = shardAllocationIds.indexOf(correspondingShard.allocationId().getId()); + String id = hit.getId(); + + if (parseWatchOnThisNode(hit.getId(), shardAllocationIds.size(), bucket) == false) { + continue; + } + + try { + Watch watch = parser.parse(id, true, hit.getSourceRef(), XContentType.JSON); + watch.version(hit.getVersion()); + if (watch.status().state().isActive()) { + watches.add(watch); + } + } catch (Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("couldn't load watch [{}], ignoring it...", id), e); + } + } + SearchScrollRequest request = new SearchScrollRequest(response.getScrollId()); + request.scroll(scrollTimeout); + response = client.searchScroll(request).actionGet(defaultSearchTimeout); + } + } finally { + if (response != null) { + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.addScrollId(response.getScrollId()); + client.clearScroll(clearScrollRequest).actionGet(scrollTimeout); + } + } + } + + logger.debug("Loaded [{}] watches for execution", watches.size()); + + return watches; + } + + /** + * Find out if the watch with this id, should be parsed and triggered on this node + * + * @param id The id of the watch + * @param totalShardCount The count of all primary shards of the current watches index + * @param index The index of the local shard + * @return true if the we should parse the watch on this node, false otherwise + */ + private boolean parseWatchOnThisNode(String id, int totalShardCount, int index) { + int hash = Murmur3HashFunction.hash(id); + int shardIndex = Math.floorMod(hash, totalShardCount); + return shardIndex == index; + } + + public WatcherState state() { + return state.get(); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/ActionBuilders.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/ActionBuilders.java new file mode 100644 index 0000000000000..ab068c2e3457d --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/ActionBuilders.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; +import org.elasticsearch.xpack.watcher.notification.pagerduty.IncidentEvent; +import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessage; +import org.elasticsearch.xpack.watcher.actions.email.EmailAction; +import org.elasticsearch.xpack.watcher.actions.hipchat.HipChatAction; +import org.elasticsearch.xpack.watcher.actions.index.IndexAction; +import org.elasticsearch.xpack.watcher.actions.jira.JiraAction; +import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; +import org.elasticsearch.xpack.watcher.actions.pagerduty.PagerDutyAction; +import org.elasticsearch.xpack.watcher.actions.slack.SlackAction; +import org.elasticsearch.xpack.watcher.actions.webhook.WebhookAction; + +import java.util.Map; + +public final class ActionBuilders { + + private ActionBuilders() { + } + + public static EmailAction.Builder emailAction(EmailTemplate.Builder email) { + return emailAction(email.build()); + } + + public static EmailAction.Builder emailAction(EmailTemplate email) { + return EmailAction.builder(email); + } + + public static IndexAction.Builder indexAction(String index, String type) { + return IndexAction.builder(index, type); + } + + public static JiraAction.Builder jiraAction(String account, MapBuilder fields) { + return jiraAction(account, fields.immutableMap()); + } + + public static JiraAction.Builder jiraAction(String account, Map fields) { + return JiraAction.builder(account, fields); + } + + public static WebhookAction.Builder webhookAction(HttpRequestTemplate.Builder httpRequest) { + return webhookAction(httpRequest.build()); + } + + public static WebhookAction.Builder webhookAction(HttpRequestTemplate httpRequest) { + return WebhookAction.builder(httpRequest); + } + + public static LoggingAction.Builder loggingAction(String text) { + return loggingAction(new TextTemplate(text)); + } + + public static LoggingAction.Builder loggingAction(TextTemplate text) { + return LoggingAction.builder(text); + } + + public static HipChatAction.Builder hipchatAction(String message) { + return hipchatAction(new TextTemplate(message)); + } + + public static HipChatAction.Builder hipchatAction(String account, String body) { + return hipchatAction(account, new TextTemplate(body)); + } + + public static HipChatAction.Builder hipchatAction(TextTemplate body) { + return hipchatAction(null, body); + } + + public static HipChatAction.Builder hipchatAction(String account, TextTemplate body) { + return HipChatAction.builder(account, body); + } + + public static SlackAction.Builder slackAction(String account, SlackMessage.Template.Builder message) { + return slackAction(account, message.build()); + } + + public static SlackAction.Builder slackAction(String account, SlackMessage.Template message) { + return SlackAction.builder(account, message); + } + + public static PagerDutyAction.Builder triggerPagerDutyAction(String account, String description) { + return pagerDutyAction(IncidentEvent.templateBuilder(description).setAccount(account)); + } + + public static PagerDutyAction.Builder pagerDutyAction(IncidentEvent.Template.Builder event) { + return PagerDutyAction.builder(event.build()); + } + + public static PagerDutyAction.Builder pagerDutyAction(IncidentEvent.Template event) { + return PagerDutyAction.builder(event); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/EmailAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/EmailAction.java new file mode 100644 index 0000000000000..1dee11a0be88c --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/EmailAction.java @@ -0,0 +1,303 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.email; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.common.secret.Secret; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherXContentParser; +import org.elasticsearch.xpack.watcher.notification.email.Authentication; +import org.elasticsearch.xpack.watcher.notification.email.DataAttachment; +import org.elasticsearch.xpack.watcher.notification.email.Email; +import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; +import org.elasticsearch.xpack.watcher.notification.email.Profile; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachments; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentsParser; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +public class EmailAction implements Action { + + public static final String TYPE = "email"; + + private final EmailTemplate email; + @Nullable private final String account; + @Nullable private final Authentication auth; + @Nullable private final Profile profile; + @Nullable private final DataAttachment dataAttachment; + @Nullable private final EmailAttachments emailAttachments; + + public EmailAction(EmailTemplate email, @Nullable String account, @Nullable Authentication auth, @Nullable Profile profile, + @Nullable DataAttachment dataAttachment, @Nullable EmailAttachments emailAttachments) { + this.email = email; + this.account = account; + this.auth = auth; + this.profile = profile; + this.dataAttachment = dataAttachment; + this.emailAttachments = emailAttachments; + } + + public EmailTemplate getEmail() { + return email; + } + + public String getAccount() { + return account; + } + + public Authentication getAuth() { + return auth; + } + + public Profile getProfile() { + return profile; + } + + public DataAttachment getDataAttachment() { + return dataAttachment; + } + + public EmailAttachments getAttachments() { + return emailAttachments; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + EmailAction action = (EmailAction) o; + + return Objects.equals(email, action.email) && + Objects.equals(account, action.account) && + Objects.equals(auth, action.auth) && + Objects.equals(profile, action.profile) && + Objects.equals(emailAttachments, action.emailAttachments) && + Objects.equals(dataAttachment, action.dataAttachment); + } + + @Override + public int hashCode() { + return Objects.hash(email, account, auth, profile, dataAttachment, emailAttachments); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (account != null) { + builder.field(Field.ACCOUNT.getPreferredName(), account); + } + if (auth != null) { + builder.field(Field.USER.getPreferredName(), auth.user()); + if (WatcherParams.hideSecrets(params) && auth.password().value().startsWith(CryptoService.ENCRYPTED_TEXT_PREFIX) == false) { + builder.field(Field.PASSWORD.getPreferredName(), WatcherXContentParser.REDACTED_PASSWORD); + } else { + builder.field(Field.PASSWORD.getPreferredName(), auth.password().value()); + } + } + if (profile != null) { + builder.field(Field.PROFILE.getPreferredName(), profile.name().toLowerCase(Locale.ROOT)); + } + if (dataAttachment != null) { + builder.field(Field.ATTACH_DATA.getPreferredName(), dataAttachment, params); + } + if (emailAttachments != null) { + emailAttachments.toXContent(builder, params); + } + email.xContentBody(builder, params); + return builder.endObject(); + } + + public static EmailAction parse(String watchId, String actionId, XContentParser parser, + EmailAttachmentsParser emailAttachmentsParser) throws IOException { + EmailTemplate.Parser emailParser = new EmailTemplate.Parser(); + String account = null; + String user = null; + Secret password = null; + Profile profile = Profile.STANDARD; + DataAttachment dataAttachment = null; + EmailAttachments attachments = EmailAttachments.EMPTY_ATTACHMENTS; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.ATTACH_DATA.match(currentFieldName, parser.getDeprecationHandler())) { + try { + dataAttachment = DataAttachment.parse(parser); + } catch (IOException ioe) { + throw new ElasticsearchParseException("could not parse [{}] action [{}/{}]. failed to parse data attachment field " + + "[{}]", ioe, TYPE, watchId, actionId, currentFieldName); + } + } else if (Field.ATTACHMENTS.match(currentFieldName, parser.getDeprecationHandler())) { + attachments = emailAttachmentsParser.parse(parser); + } else if (!emailParser.handle(currentFieldName, parser)) { + if (token == XContentParser.Token.VALUE_STRING) { + if (Field.ACCOUNT.match(currentFieldName, parser.getDeprecationHandler())) { + account = parser.text(); + } else if (Field.USER.match(currentFieldName, parser.getDeprecationHandler())) { + user = parser.text(); + } else if (Field.PASSWORD.match(currentFieldName, parser.getDeprecationHandler())) { + password = WatcherXContentParser.secretOrNull(parser); + } else if (Field.PROFILE.match(currentFieldName, parser.getDeprecationHandler())) { + try { + profile = Profile.resolve(parser.text()); + } catch (IllegalArgumentException iae) { + throw new ElasticsearchParseException("could not parse [{}] action [{}/{}]", TYPE, watchId, actionId, iae); + } + } else { + throw new ElasticsearchParseException("could not parse [{}] action [{}/{}]. unexpected string field [{}]", TYPE, + watchId, actionId, currentFieldName); + } + } else { + throw new ElasticsearchParseException("could not parse [{}] action [{}/{}]. unexpected token [{}]", TYPE, watchId, + actionId, token); + } + } + } + + Authentication auth = null; + if (user != null) { + auth = new Authentication(user, password); + } + + return new EmailAction(emailParser.parsedTemplate(), account, auth, profile, dataAttachment, attachments); + } + + public static Builder builder(EmailTemplate email) { + return new Builder(email); + } + + public abstract static class Result extends Action.Result { + + protected Result(Status status) { + super(TYPE, status); + } + + public static class Success extends Result { + + private final String account; + private final Email email; + + Success(String account, Email email) { + super(Status.SUCCESS); + this.account = account; + this.email = email; + } + + public String account() { + return account; + } + + public Email email() { + return email; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject(type) + .field(Field.ACCOUNT.getPreferredName(), account) + .field(Field.MESSAGE.getPreferredName(), email, params) + .endObject(); + } + } + + public static class Simulated extends Result { + + private final Email email; + + public Email email() { + return email; + } + + Simulated(Email email) { + super(Status.SIMULATED); + this.email = email; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject(type) + .field(Field.MESSAGE.getPreferredName(), email, params) + .endObject(); + } + } + } + + public static class Builder implements Action.Builder { + + final EmailTemplate email; + @Nullable String account; + @Nullable Authentication auth; + @Nullable Profile profile; + @Nullable DataAttachment dataAttachment; + @Nullable EmailAttachments attachments; + + private Builder(EmailTemplate email) { + this.email = email; + } + + public Builder setAccount(String account) { + this.account = account; + return this; + } + + public Builder setAuthentication(String username, char[] password) { + this.auth = new Authentication(username, new Secret(password)); + return this; + } + + public Builder setProfile(Profile profile) { + this.profile = profile; + return this; + } + + @Deprecated + public Builder setAttachPayload(DataAttachment dataAttachment) { + this.dataAttachment = dataAttachment; + return this; + } + + public Builder setAttachments(EmailAttachments attachments) { + this.attachments = attachments; + return this; + } + + public EmailAction build() { + return new EmailAction(email, account, auth, profile, dataAttachment, attachments); + } + } + + interface Field { + + // common fields + ParseField ACCOUNT = new ParseField("account"); + + // action fields + ParseField PROFILE = new ParseField("profile"); + ParseField USER = new ParseField("user"); + ParseField PASSWORD = new ParseField("password"); + ParseField ATTACH_DATA = new ParseField("attach_data"); + ParseField ATTACHMENTS = new ParseField("attachments"); + + // result fields + ParseField MESSAGE = new ParseField("message"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionFactory.java new file mode 100644 index 0000000000000..c2dae2855fb32 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionFactory.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.email; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.email.EmailService; +import org.elasticsearch.xpack.watcher.notification.email.HtmlSanitizer; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentsParser; + +import java.io.IOException; + +public class EmailActionFactory extends ActionFactory { + + private final EmailService emailService; + private final TextTemplateEngine templateEngine; + private final HtmlSanitizer htmlSanitizer; + private final EmailAttachmentsParser emailAttachmentsParser; + + public EmailActionFactory(Settings settings, EmailService emailService, TextTemplateEngine templateEngine, + EmailAttachmentsParser emailAttachmentsParser) { + super(Loggers.getLogger(ExecutableEmailAction.class, settings)); + this.emailService = emailService; + this.templateEngine = templateEngine; + this.htmlSanitizer = new HtmlSanitizer(settings); + this.emailAttachmentsParser = emailAttachmentsParser; + } + + @Override + public ExecutableEmailAction parseExecutable(String watchId, String actionId, XContentParser parser) throws IOException { + return new ExecutableEmailAction(EmailAction.parse(watchId, actionId, parser, emailAttachmentsParser), + actionLogger, emailService, templateEngine, htmlSanitizer, emailAttachmentsParser.getParsers()); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/ExecutableEmailAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/ExecutableEmailAction.java new file mode 100644 index 0000000000000..f737d89c1286d --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/ExecutableEmailAction.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.email; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.email.Attachment; +import org.elasticsearch.xpack.watcher.notification.email.DataAttachment; +import org.elasticsearch.xpack.watcher.notification.email.Email; +import org.elasticsearch.xpack.watcher.notification.email.EmailService; +import org.elasticsearch.xpack.watcher.notification.email.HtmlSanitizer; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentParser; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class ExecutableEmailAction extends ExecutableAction { + + private final EmailService emailService; + private final TextTemplateEngine templateEngine; + private final HtmlSanitizer htmlSanitizer; + private final Map emailAttachmentParsers; + + public ExecutableEmailAction(EmailAction action, Logger logger, EmailService emailService, TextTemplateEngine templateEngine, + HtmlSanitizer htmlSanitizer, Map emailAttachmentParsers) { + super(action, logger); + this.emailService = emailService; + this.templateEngine = templateEngine; + this.htmlSanitizer = htmlSanitizer; + this.emailAttachmentParsers = emailAttachmentParsers; + } + + public Action.Result execute(String actionId, WatchExecutionContext ctx, Payload payload) throws Exception { + Map model = Variables.createCtxModel(ctx, payload); + + Map attachments = new HashMap<>(); + DataAttachment dataAttachment = action.getDataAttachment(); + if (dataAttachment != null) { + Attachment attachment = dataAttachment.create("data", model); + attachments.put(attachment.id(), attachment); + } + + if (action.getAttachments() != null && action.getAttachments().getAttachments().size() > 0) { + for (EmailAttachmentParser.EmailAttachment emailAttachment : action.getAttachments().getAttachments()) { + EmailAttachmentParser parser = emailAttachmentParsers.get(emailAttachment.type()); + try { + Attachment attachment = parser.toAttachment(ctx, payload, emailAttachment); + attachments.put(attachment.id(), attachment); + } catch (ElasticsearchException | IOException e) { + return new EmailAction.Result.FailureWithException(action.type(), e); + } + } + } + + Email.Builder email = action.getEmail().render(templateEngine, model, htmlSanitizer, attachments); + email.id(actionId + "_" + ctx.id().value()); + + if (ctx.simulateAction(actionId)) { + return new EmailAction.Result.Simulated(email.build()); + } + + EmailService.EmailSent sent = emailService.send(email.build(), action.getAuth(), action.getProfile(), action.getAccount()); + return new EmailAction.Result.Success(sent.account(), sent.email()); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/ExecutableHipChatAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/ExecutableHipChatAction.java new file mode 100644 index 0000000000000..5772d1ffa6f1f --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/ExecutableHipChatAction.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.hipchat; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatAccount; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService; +import org.elasticsearch.xpack.watcher.notification.hipchat.SentMessages; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.util.Map; + +public class ExecutableHipChatAction extends ExecutableAction { + + private final TextTemplateEngine templateEngine; + private final HipChatService hipchatService; + + public ExecutableHipChatAction(HipChatAction action, Logger logger, HipChatService hipchatService, + TextTemplateEngine templateEngine) { + super(action, logger); + this.hipchatService = hipchatService; + this.templateEngine = templateEngine; + } + + @Override + public Action.Result execute(final String actionId, WatchExecutionContext ctx, Payload payload) throws Exception { + + HipChatAccount account = hipchatService.getAccount(action.account); + // lets validate the message again, in case the hipchat service were updated since the + // watch/action were created. + account.validateParsedTemplate(ctx.id().watchId(), actionId, action.message); + + Map model = Variables.createCtxModel(ctx, payload); + HipChatMessage message = account.render(ctx.id().watchId(), actionId, templateEngine, action.message, model); + + if (ctx.simulateAction(actionId)) { + return new HipChatAction.Result.Simulated(message); + } + + SentMessages sentMessages = account.send(message, action.proxy); + return new HipChatAction.Result.Executed(sentMessages); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatAction.java new file mode 100644 index 0000000000000..57b5bfa591e4d --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatAction.java @@ -0,0 +1,253 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.hipchat; + + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage; +import org.elasticsearch.xpack.watcher.notification.hipchat.SentMessages; + +import java.io.IOException; +import java.util.Objects; + +public class HipChatAction implements Action { + + public static final String TYPE = "hipchat"; + + @Nullable final String account; + @Nullable final HttpProxy proxy; + final HipChatMessage.Template message; + + public HipChatAction(@Nullable String account, HipChatMessage.Template message, @Nullable HttpProxy proxy) { + this.account = account; + this.message = message; + this.proxy = proxy; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + HipChatAction that = (HipChatAction) o; + + return Objects.equals(account, that.account) && + Objects.equals(message, that.message) && + Objects.equals(proxy, that.proxy); + } + + @Override + public int hashCode() { + return Objects.hash(account, message, proxy); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (account != null) { + builder.field(Field.ACCOUNT.getPreferredName(), account); + } + if (proxy != null) { + proxy.toXContent(builder, params); + } + builder.field(Field.MESSAGE.getPreferredName(), message); + return builder.endObject(); + } + + public static HipChatAction parse(String watchId, String actionId, XContentParser parser) throws IOException { + String account = null; + HipChatMessage.Template message = null; + HttpProxy proxy = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.ACCOUNT.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + account = parser.text(); + } else { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. expected [{}] to be of type string, but " + + "found [{}] instead", TYPE, watchId, actionId, Field.ACCOUNT.getPreferredName(), token); + } + } else if (Field.PROXY.match(currentFieldName, parser.getDeprecationHandler())) { + proxy = HttpProxy.parse(parser); + } else if (Field.MESSAGE.match(currentFieldName, parser.getDeprecationHandler())) { + try { + message = HipChatMessage.Template.parse(parser); + } catch (Exception e) { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. failed to parse [{}] field", e, TYPE, + watchId, actionId, Field.MESSAGE.getPreferredName()); + } + } else { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. unexpected token [{}]", TYPE, watchId, + actionId, token); + } + } + + if (message == null) { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. missing required [{}] field", TYPE, watchId, + actionId, Field.MESSAGE.getPreferredName()); + } + + return new HipChatAction(account, message, proxy); + } + + public static Builder builder(String account, TextTemplate body) { + return new Builder(account, body); + } + + public interface Result { + + class Executed extends Action.Result implements Result { + + private final SentMessages sentMessages; + + public Executed(SentMessages sentMessages) { + super(TYPE, status(sentMessages)); + this.sentMessages = sentMessages; + } + + public SentMessages sentMessages() { + return sentMessages; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(type, sentMessages, params); + } + + static Status status(SentMessages sentMessages) { + boolean hasSuccesses = false; + boolean hasFailures = false; + for (SentMessages.SentMessage message : sentMessages) { + if (message.isSuccess()) { + hasSuccesses = true; + } else { + hasFailures = true; + } + if (hasFailures && hasSuccesses) { + return Status.PARTIAL_FAILURE; + } + } + return hasFailures ? Status.FAILURE : Status.SUCCESS; + } + } + + class Simulated extends Action.Result implements Result { + + private final HipChatMessage message; + + protected Simulated(HipChatMessage message) { + super(TYPE, Status.SIMULATED); + this.message = message; + } + + public HipChatMessage getMessage() { + return message; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject(type) + .field(Field.MESSAGE.getPreferredName(), message, params) + .endObject(); + } + } + } + + public static class Builder implements Action.Builder { + + final String account; + final HipChatMessage.Template.Builder messageBuilder; + private HttpProxy proxy; + + public Builder(String account, TextTemplate body) { + this.account = account; + this.messageBuilder = new HipChatMessage.Template.Builder(body); + } + + public Builder addRooms(TextTemplate... rooms) { + messageBuilder.addRooms(rooms); + return this; + } + + public Builder addRooms(String... rooms) { + TextTemplate[] templates = new TextTemplate[rooms.length]; + for (int i = 0; i < rooms.length; i++) { + templates[i] = new TextTemplate(rooms[i]); + } + return addRooms(templates); + } + + + public Builder addUsers(TextTemplate... users) { + messageBuilder.addUsers(users); + return this; + } + + public Builder addUsers(String... users) { + TextTemplate[] templates = new TextTemplate[users.length]; + for (int i = 0; i < users.length; i++) { + templates[i] = new TextTemplate(users[i]); + } + return addUsers(templates); + } + + public Builder setFrom(String from) { + messageBuilder.setFrom(from); + return this; + } + + public Builder setFormat(HipChatMessage.Format format) { + messageBuilder.setFormat(format); + return this; + } + + public Builder setColor(TextTemplate color) { + messageBuilder.setColor(color); + return this; + } + + public Builder setColor(HipChatMessage.Color color) { + return setColor(color.asTemplate()); + } + + public Builder setNotify(boolean notify) { + messageBuilder.setNotify(notify); + return this; + } + + public Builder setProxy(HttpProxy proxy) { + this.proxy = proxy; + return this; + } + + @Override + public HipChatAction build() { + return new HipChatAction(account, messageBuilder.build(), proxy); + } + } + + public interface Field { + ParseField ACCOUNT = new ParseField("account"); + ParseField MESSAGE = new ParseField("message"); + ParseField PROXY = new ParseField("proxy"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionFactory.java new file mode 100644 index 0000000000000..081dc2e331bcd --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionFactory.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.hipchat; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatAccount; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService; + +import java.io.IOException; + +public class HipChatActionFactory extends ActionFactory { + + private final TextTemplateEngine templateEngine; + private final HipChatService hipchatService; + + public HipChatActionFactory(Settings settings, TextTemplateEngine templateEngine, HipChatService hipchatService) { + super(Loggers.getLogger(ExecutableHipChatAction.class, settings)); + this.templateEngine = templateEngine; + this.hipchatService = hipchatService; + } + + @Override + public ExecutableHipChatAction parseExecutable(String watchId, String actionId, XContentParser parser) throws IOException { + HipChatAction action = HipChatAction.parse(watchId, actionId, parser); + HipChatAccount account = hipchatService.getAccount(action.account); + account.validateParsedTemplate(watchId, actionId, action.message); + return new ExecutableHipChatAction(action, actionLogger, hipchatService, templateEngine); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java new file mode 100644 index 0000000000000..e49732f0cb543 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java @@ -0,0 +1,224 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.index; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.Action.Result.Status; +import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.WatcherClientHelper; +import org.elasticsearch.xpack.watcher.support.ArrayObjectIterator; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Stream; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalState; + +public class ExecutableIndexAction extends ExecutableAction { + + private static final String INDEX_FIELD = "_index"; + private static final String TYPE_FIELD = "_type"; + private static final String ID_FIELD = "_id"; + + private final Client client; + private final TimeValue indexDefaultTimeout; + private final TimeValue bulkDefaultTimeout; + + public ExecutableIndexAction(IndexAction action, Logger logger, Client client, + TimeValue indexDefaultTimeout, TimeValue bulkDefaultTimeout) { + super(action, logger); + this.client = client; + this.indexDefaultTimeout = action.timeout != null ? action.timeout : indexDefaultTimeout; + this.bulkDefaultTimeout = action.timeout != null ? action.timeout : bulkDefaultTimeout; + } + + @Override + public Action.Result execute(String actionId, WatchExecutionContext ctx, Payload payload) throws Exception { + Map data = payload.data(); + if (data.containsKey("_doc")) { + Object doc = data.get("_doc"); + if (doc instanceof Iterable) { + return indexBulk((Iterable) doc, actionId, ctx); + } + if (doc.getClass().isArray()) { + return indexBulk(new ArrayObjectIterator.Iterable(doc), actionId, ctx); + } + if (doc instanceof Map) { + data = (Map) doc; + } else { + throw illegalState("could not execute action [{}] of watch [{}]. failed to index payload data." + + "[_data] field must either hold a Map or an List/Array of Maps", actionId, ctx.watch().id()); + } + } + + if (data.containsKey(INDEX_FIELD) || data.containsKey(TYPE_FIELD) || data.containsKey(ID_FIELD)) { + data = mutableMap(data); + } + IndexRequest indexRequest = new IndexRequest(); + if (action.refreshPolicy != null) { + indexRequest.setRefreshPolicy(action.refreshPolicy); + } + + indexRequest.index(getField(actionId, ctx.id().watchId(), "index", data, INDEX_FIELD, action.index)); + indexRequest.type(getField(actionId, ctx.id().watchId(), "type",data, TYPE_FIELD, action.docType)); + indexRequest.id(getField(actionId, ctx.id().watchId(), "id",data, ID_FIELD, action.docId)); + + data = addTimestampToDocument(data, ctx.executionTime()); + BytesReference bytesReference; + try (XContentBuilder builder = jsonBuilder()) { + indexRequest.source(builder.prettyPrint().map(data)); + } + + if (ctx.simulateAction(actionId)) { + return new IndexAction.Simulated(indexRequest.index(), indexRequest.type(), indexRequest.id(), action.refreshPolicy, + new XContentSource(indexRequest.source(), XContentType.JSON)); + } + + IndexResponse response = WatcherClientHelper.execute(ctx.watch(), client, + () -> client.index(indexRequest).actionGet(indexDefaultTimeout)); + try (XContentBuilder builder = jsonBuilder()) { + indexResponseToXContent(builder, response); + bytesReference = BytesReference.bytes(builder); + } + return new IndexAction.Result(Status.SUCCESS, new XContentSource(bytesReference, XContentType.JSON)); + } + + Action.Result indexBulk(Iterable list, String actionId, WatchExecutionContext ctx) throws Exception { + if (action.docId != null) { + throw illegalState("could not execute action [{}] of watch [{}]. [doc_id] cannot be used with bulk [_doc] indexing"); + } + + BulkRequest bulkRequest = new BulkRequest(); + if (action.refreshPolicy != null) { + bulkRequest.setRefreshPolicy(action.refreshPolicy); + } + + for (Object item : list) { + if (!(item instanceof Map)) { + throw illegalState("could not execute action [{}] of watch [{}]. failed to index payload data. " + + "[_data] field must either hold a Map or an List/Array of Maps", actionId, ctx.watch().id()); + } + + Map doc = (Map) item; + if (doc.containsKey(INDEX_FIELD) || doc.containsKey(TYPE_FIELD) || doc.containsKey(ID_FIELD)) { + doc = mutableMap(doc); + } + + IndexRequest indexRequest = new IndexRequest(); + indexRequest.index(getField(actionId, ctx.id().watchId(), "index", doc, INDEX_FIELD, action.index)); + indexRequest.type(getField(actionId, ctx.id().watchId(), "type",doc, TYPE_FIELD, action.docType)); + indexRequest.id(getField(actionId, ctx.id().watchId(), "id",doc, ID_FIELD, action.docId)); + + doc = addTimestampToDocument(doc, ctx.executionTime()); + try (XContentBuilder builder = jsonBuilder()) { + indexRequest.source(builder.prettyPrint().map(doc)); + } + bulkRequest.add(indexRequest); + } + BulkResponse bulkResponse = WatcherClientHelper.execute(ctx.watch(), client, + () -> client.bulk(bulkRequest).actionGet(bulkDefaultTimeout)); + try (XContentBuilder jsonBuilder = jsonBuilder().startArray()) { + for (BulkItemResponse item : bulkResponse) { + itemResponseToXContent(jsonBuilder, item); + } + jsonBuilder.endArray(); + + // different error states, depending on how successful the bulk operation was + long failures = Stream.of(bulkResponse.getItems()).filter(BulkItemResponse::isFailed).count(); + if (failures == 0) { + return new IndexAction.Result(Status.SUCCESS, new XContentSource(BytesReference.bytes(jsonBuilder), XContentType.JSON)); + } else if (failures == bulkResponse.getItems().length) { + return new IndexAction.Result(Status.FAILURE, new XContentSource(BytesReference.bytes(jsonBuilder), XContentType.JSON)); + } else { + return new IndexAction.Result(Status.PARTIAL_FAILURE, + new XContentSource(BytesReference.bytes(jsonBuilder), XContentType.JSON)); + } + } + } + + private Map addTimestampToDocument(Map data, DateTime executionTime) { + if (action.executionTimeField != null) { + data = mutableMap(data); + data.put(action.executionTimeField, WatcherDateTimeUtils.formatDate(executionTime)); + } + return data; + } + + /** + * Extracts the specified field out of data map, or alternative falls back to the action value + */ + private String getField(String actionId, String watchId, String name, Map data, String fieldName, String defaultValue) { + Object obj = data.remove(fieldName); + if (obj != null) { + if (defaultValue != null) { + throw illegalState("could not execute action [{}] of watch [{}]. " + + "[ctx.payload.{}] or [ctx.payload._doc.{}] were set together with action [{}] field. Only set one of them", + actionId, watchId, fieldName, fieldName, name); + } else { + return obj.toString(); + } + } + + return defaultValue; + } + + /** + * Guarantees that the {@code data} is mutable for any code that needs to modify the {@linkplain Map} before using it (e.g., from + * singleton, immutable {@code Map}s). + * + * @param data The map to make mutable + * @return Always a {@linkplain HashMap} + */ + private Map mutableMap(Map data) { + return data instanceof HashMap ? data : new HashMap<>(data); + } + + private static void itemResponseToXContent(XContentBuilder builder, BulkItemResponse item) throws IOException { + if (item.isFailed()) { + builder.startObject() + .field("failed", item.isFailed()) + .field("message", item.getFailureMessage()) + .field("id", item.getId()) + .field("type", item.getType()) + .field("index", item.getIndex()) + .endObject(); + } else { + indexResponseToXContent(builder, item.getResponse()); + } + } + + static void indexResponseToXContent(XContentBuilder builder, IndexResponse response) throws IOException { + builder.startObject() + .field("created", response.getResult() == DocWriteResponse.Result.CREATED) + .field("result", response.getResult().getLowercase()) + .field("id", response.getId()) + .field("version", response.getVersion()) + .field("type", response.getType()) + .field("index", response.getIndex()) + .endObject(); + } +} + + diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexAction.java new file mode 100644 index 0000000000000..ceb6ac88f17f6 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexAction.java @@ -0,0 +1,323 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.index; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; + +public class IndexAction implements Action { + + public static final String TYPE = "index"; + + @Nullable final String docType; + @Nullable final String index; + @Nullable final String docId; + @Nullable final String executionTimeField; + @Nullable final TimeValue timeout; + @Nullable final DateTimeZone dynamicNameTimeZone; + @Nullable final RefreshPolicy refreshPolicy; + + public IndexAction(@Nullable String index, @Nullable String docType, @Nullable String docId, + @Nullable String executionTimeField, + @Nullable TimeValue timeout, @Nullable DateTimeZone dynamicNameTimeZone, @Nullable RefreshPolicy refreshPolicy) { + this.index = index; + this.docType = docType; + this.docId = docId; + this.executionTimeField = executionTimeField; + this.timeout = timeout; + this.dynamicNameTimeZone = dynamicNameTimeZone; + this.refreshPolicy = refreshPolicy; + } + + @Override + public String type() { + return TYPE; + } + + public String getIndex() { + return index; + } + + public String getDocType() { + return docType; + } + + public String getDocId() { + return docId; + } + + public String getExecutionTimeField() { + return executionTimeField; + } + + public DateTimeZone getDynamicNameTimeZone() { + return dynamicNameTimeZone; + } + + public RefreshPolicy getRefreshPolicy() { + return refreshPolicy; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IndexAction that = (IndexAction) o; + + return Objects.equals(index, that.index) && Objects.equals(docType, that.docType) && Objects.equals(docId, that.docId) + && Objects.equals(executionTimeField, that.executionTimeField) + && Objects.equals(timeout, that.timeout) + && Objects.equals(dynamicNameTimeZone, that.dynamicNameTimeZone) + && Objects.equals(refreshPolicy, that.refreshPolicy); + } + + @Override + public int hashCode() { + return Objects.hash(index, docType, docId, executionTimeField, timeout, dynamicNameTimeZone, refreshPolicy); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (index != null) { + builder.field(Field.INDEX.getPreferredName(), index); + } + if (docType != null) { + builder.field(Field.DOC_TYPE.getPreferredName(), docType); + } + if (docId != null) { + builder.field(Field.DOC_ID.getPreferredName(), docId); + } + if (executionTimeField != null) { + builder.field(Field.EXECUTION_TIME_FIELD.getPreferredName(), executionTimeField); + } + if (timeout != null) { + builder.humanReadableField(Field.TIMEOUT.getPreferredName(), Field.TIMEOUT_HUMAN.getPreferredName(), timeout); + } + if (dynamicNameTimeZone != null) { + builder.field(Field.DYNAMIC_NAME_TIMEZONE.getPreferredName(), dynamicNameTimeZone.toString()); + } + if (refreshPolicy!= null) { + builder.field(Field.REFRESH.getPreferredName(), refreshPolicy.getValue()); + } + return builder.endObject(); + } + + public static IndexAction parse(String watchId, String actionId, XContentParser parser) throws IOException { + String index = null; + String docType = null; + String docId = null; + String executionTimeField = null; + TimeValue timeout = null; + DateTimeZone dynamicNameTimeZone = null; + RefreshPolicy refreshPolicy = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.INDEX.match(currentFieldName, parser.getDeprecationHandler())) { + try { + index = parser.text(); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] action [{}/{}]. failed to parse index name value for " + + "field [{}]", pe, TYPE, watchId, actionId, currentFieldName); + } + } else if (token == XContentParser.Token.VALUE_NUMBER) { + if (Field.TIMEOUT.match(currentFieldName, parser.getDeprecationHandler())) { + timeout = timeValueMillis(parser.longValue()); + } else { + throw new ElasticsearchParseException("could not parse [{}] action [{}/{}]. unexpected number field [{}]", TYPE, + watchId, actionId, currentFieldName); + } + } else if (token == XContentParser.Token.VALUE_STRING) { + if (Field.DOC_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + docType = parser.text(); + } else if (Field.DOC_ID.match(currentFieldName, parser.getDeprecationHandler())) { + docId = parser.text(); + } else if (Field.EXECUTION_TIME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + executionTimeField = parser.text(); + } else if (Field.TIMEOUT_HUMAN.match(currentFieldName, parser.getDeprecationHandler())) { + // Parser for human specified timeouts and 2.x compatibility + timeout = WatcherDateTimeUtils.parseTimeValue(parser, Field.TIMEOUT_HUMAN.toString()); + } else if (Field.DYNAMIC_NAME_TIMEZONE.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + dynamicNameTimeZone = DateTimeZone.forID(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse [{}] action for watch [{}]. failed to parse [{}]. must be " + + "a string value (e.g. 'UTC' or '+01:00').", TYPE, watchId, currentFieldName); + } + } else if (Field.REFRESH.match(currentFieldName, parser.getDeprecationHandler())) { + refreshPolicy = RefreshPolicy.parse(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse [{}] action [{}/{}]. unexpected string field [{}]", TYPE, + watchId, actionId, currentFieldName); + } + } else { + throw new ElasticsearchParseException("could not parse [{}] action [{}/{}]. unexpected token [{}]", TYPE, watchId, + actionId, token); + } + } + + return new IndexAction(index, docType, docId, executionTimeField, timeout, dynamicNameTimeZone, refreshPolicy); + } + + public static Builder builder(String index, String docType) { + return new Builder(index, docType); + } + + public static class Result extends Action.Result { + + private final XContentSource response; + + public Result(Status status, XContentSource response) { + super(TYPE, status); + this.response = response; + } + + public XContentSource response() { + return response; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject(type) + .field(Field.RESPONSE.getPreferredName(), response, params) + .endObject(); + } + } + + static class Simulated extends Action.Result { + + private final String index; + private final String docType; + @Nullable private final String docId; + @Nullable private final RefreshPolicy refreshPolicy; + private final XContentSource source; + + protected Simulated(String index, String docType, @Nullable String docId, @Nullable RefreshPolicy refreshPolicy, + XContentSource source) { + super(TYPE, Status.SIMULATED); + this.index = index; + this.docType = docType; + this.docId = docId; + this.source = source; + this.refreshPolicy = refreshPolicy; + } + + public String index() { + return index; + } + + public String docType() { + return docType; + } + + public String docId() { + return docId; + } + + public XContentSource source() { + return source; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(type) + .startObject(Field.REQUEST.getPreferredName()) + .field(Field.INDEX.getPreferredName(), index) + .field(Field.DOC_TYPE.getPreferredName(), docType); + + if (docId != null) { + builder.field(Field.DOC_ID.getPreferredName(), docId); + } + + if (refreshPolicy != null) { + builder.field(Field.REFRESH.getPreferredName(), refreshPolicy.getValue()); + } + + return builder.field(Field.SOURCE.getPreferredName(), source, params) + .endObject() + .endObject(); + } + } + + public static class Builder implements Action.Builder { + + final String index; + final String docType; + String docId; + String executionTimeField; + TimeValue timeout; + DateTimeZone dynamicNameTimeZone; + RefreshPolicy refreshPolicy; + + private Builder(String index, String docType) { + this.index = index; + this.docType = docType; + } + + public Builder setDocId(String docId) { + this.docId = docId; + return this; + } + + public Builder setExecutionTimeField(String executionTimeField) { + this.executionTimeField = executionTimeField; + return this; + } + + public Builder setTimeout(TimeValue writeTimeout) { + this.timeout = writeTimeout; + return this; + } + + public Builder setDynamicNameTimeZone(DateTimeZone dynamicNameTimeZone) { + this.dynamicNameTimeZone = dynamicNameTimeZone; + return this; + } + + public Builder setRefreshPolicy(RefreshPolicy refreshPolicy) { + this.refreshPolicy = refreshPolicy; + return this; + } + + @Override + public IndexAction build() { + return new IndexAction(index, docType, docId, executionTimeField, timeout, dynamicNameTimeZone, refreshPolicy); + } + } + + interface Field { + ParseField INDEX = new ParseField("index"); + ParseField DOC_TYPE = new ParseField("doc_type"); + ParseField DOC_ID = new ParseField("doc_id"); + ParseField EXECUTION_TIME_FIELD = new ParseField("execution_time_field"); + ParseField SOURCE = new ParseField("source"); + ParseField RESPONSE = new ParseField("response"); + ParseField REQUEST = new ParseField("request"); + ParseField TIMEOUT = new ParseField("timeout_in_millis"); + ParseField TIMEOUT_HUMAN = new ParseField("timeout"); + ParseField DYNAMIC_NAME_TIMEZONE = new ParseField("dynamic_name_timezone"); + ParseField REFRESH = new ParseField("refresh"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionFactory.java new file mode 100644 index 0000000000000..7f9b13a6de9b1 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionFactory.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.index; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; + +import java.io.IOException; + +public class IndexActionFactory extends ActionFactory { + + private final Client client; + private final TimeValue indexDefaultTimeout; + private final TimeValue bulkDefaultTimeout; + + public IndexActionFactory(Settings settings, Client client) { + super(Loggers.getLogger(IndexActionFactory.class, settings)); + this.client = client; + this.indexDefaultTimeout = settings.getAsTime("xpack.watcher.actions.index.default_timeout", TimeValue.timeValueSeconds(30)); + this.bulkDefaultTimeout = settings.getAsTime("xpack.watcher.actions.bulk.default_timeout", TimeValue.timeValueMinutes(1)); + } + + @Override + public ExecutableIndexAction parseExecutable(String watchId, String actionId, XContentParser parser) throws IOException { + return new ExecutableIndexAction(IndexAction.parse(watchId, actionId, parser), actionLogger, client, + indexDefaultTimeout, bulkDefaultTimeout); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraAction.java new file mode 100644 index 0000000000000..acb0c8ce59142 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraAction.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.jira; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.jira.JiraAccount; +import org.elasticsearch.xpack.watcher.notification.jira.JiraIssue; +import org.elasticsearch.xpack.watcher.notification.jira.JiraService; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +public class ExecutableJiraAction extends ExecutableAction { + + private final TextTemplateEngine engine; + private final JiraService jiraService; + + public ExecutableJiraAction(JiraAction action, Logger logger, JiraService jiraService, TextTemplateEngine templateEngine) { + super(action, logger); + this.jiraService = jiraService; + this.engine = templateEngine; + } + + @Override + public Action.Result execute(final String actionId, WatchExecutionContext ctx, Payload payload) throws Exception { + JiraAccount account = jiraService.getAccount(action.account); + if (account == null) { + // the account associated with this action was deleted + throw new IllegalStateException("account [" + action.account + "] was not found. perhaps it was deleted"); + } + + final Function render = s -> engine.render(new TextTemplate(s), Variables.createCtxModel(ctx, payload)); + + Map fields = new HashMap<>(); + // Apply action fields + fields = merge(fields, action.fields, render); + // Apply default fields + fields = merge(fields, account.getDefaults(), render); + + if (ctx.simulateAction(actionId)) { + return new JiraAction.Simulated(fields); + } + + JiraIssue result = account.createIssue(fields, action.proxy); + return new JiraAction.Executed(result); + } + + /** + * Merges the defaults provided as the second parameter into the content of the first + * while applying a {@link Function} on both map key and map value. + */ + static Map merge(final Map fields, final Map defaults, final Function fn) { + if (defaults != null) { + for (Map.Entry defaultEntry : defaults.entrySet()) { + Object value = defaultEntry.getValue(); + if (value instanceof String) { + // Apply the transformation to a simple string + value = fn.apply((String) value); + + } else if (value instanceof Map) { + // Apply the transformation to a map + value = merge(new HashMap<>(), (Map) value, fn); + + } else if (value instanceof String[]) { + // Apply the transformation to an array of strings + String[] newValues = new String[((String[]) value).length]; + for (int i = 0; i < newValues.length; i++) { + newValues[i] = fn.apply(((String[]) value)[i]); + } + value = newValues; + + } else if (value instanceof List) { + // Apply the transformation to a list of strings + List newValues = new ArrayList<>(((List) value).size()); + for (Object v : (List) value) { + if (v instanceof String) { + newValues.add(fn.apply((String) v)); + } else { + newValues.add(v); + } + } + value = newValues; + } + + // Apply the transformation to the key + String key = fn.apply(defaultEntry.getKey()); + + // Copy the value directly in the map if it does not exist yet. + // We don't try to merge maps or list. + if (fields.containsKey(key) == false) { + fields.put(key, value); + } + } + } + return fields; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/JiraAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/JiraAction.java new file mode 100644 index 0000000000000..0d142fb344e4b --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/JiraAction.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.jira; + + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.notification.jira.JiraIssue; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +public class JiraAction implements Action { + + public static final String TYPE = "jira"; + + @Nullable final String account; + @Nullable final HttpProxy proxy; + final Map fields; + + public JiraAction(@Nullable String account, Map fields, HttpProxy proxy) { + this.account = account; + this.fields = fields; + this.proxy = proxy; + } + + @Override + public String type() { + return TYPE; + } + + public String getAccount() { + return account; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + JiraAction that = (JiraAction) o; + return Objects.equals(account, that.account) && + Objects.equals(fields, that.fields) && + Objects.equals(proxy, that.proxy); + } + + @Override + public int hashCode() { + return Objects.hash(account, fields, proxy); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (account != null) { + builder.field(Field.ACCOUNT.getPreferredName(), account); + } + if (proxy != null) { + proxy.toXContent(builder, params); + } + builder.field(Field.FIELDS.getPreferredName(), fields); + return builder.endObject(); + } + + public static JiraAction parse(String watchId, String actionId, XContentParser parser) throws IOException { + String account = null; + HttpProxy proxy = null; + Map fields = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.ACCOUNT.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + account = parser.text(); + } else { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. expected [{}] to be of type string, but " + + "found [{}] instead", TYPE, watchId, actionId, Field.ACCOUNT.getPreferredName(), token); + } + } else if (Field.PROXY.match(currentFieldName, parser.getDeprecationHandler())) { + proxy = HttpProxy.parse(parser); + } else if (Field.FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { + try { + fields = parser.map(); + } catch (Exception e) { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. failed to parse [{}] field", e, TYPE, + watchId, actionId, Field.FIELDS.getPreferredName()); + } + } else { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. unexpected token [{}/{}]", TYPE, watchId, + actionId, token, currentFieldName); + } + } + if (fields == null) { + fields = Collections.emptyMap(); + } + return new JiraAction(account, fields, proxy); + } + + public static class Executed extends Action.Result { + + private final JiraIssue result; + + public Executed(JiraIssue result) { + super(TYPE, result.successful() ? Status.SUCCESS : Status.FAILURE); + this.result = result; + } + + public JiraIssue getResult() { + return result; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(type, result, params); + } + } + + static class Simulated extends Action.Result { + + private final Map fields; + + protected Simulated(Map fields) { + super(TYPE, Status.SIMULATED); + this.fields = fields; + } + + public Map getFields() { + return fields; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject(type) + .field(Field.FIELDS.getPreferredName(), fields) + .endObject(); + } + } + + public static class Builder implements Action.Builder { + + final JiraAction action; + + public Builder(JiraAction action) { + this.action = action; + } + + @Override + public JiraAction build() { + return action; + } + } + + public static Builder builder(String account, Map fields) { + return new Builder(new JiraAction(account, fields, null)); + } + + public interface Field { + ParseField ACCOUNT = new ParseField("account"); + ParseField PROXY = new ParseField("proxy"); + ParseField FIELDS = new ParseField("fields"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionFactory.java new file mode 100644 index 0000000000000..3d2184283a5f7 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionFactory.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.jira; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.jira.JiraService; + +import java.io.IOException; + +public class JiraActionFactory extends ActionFactory { + + private final TextTemplateEngine templateEngine; + private final JiraService jiraService; + + public JiraActionFactory(Settings settings, TextTemplateEngine templateEngine, JiraService jiraService) { + super(Loggers.getLogger(ExecutableJiraAction.class, settings)); + this.templateEngine = templateEngine; + this.jiraService = jiraService; + } + + @Override + public ExecutableJiraAction parseExecutable(String watchId, String actionId, XContentParser parser) throws IOException { + JiraAction action = JiraAction.parse(watchId, actionId, parser); + jiraService.getAccount(action.getAccount()); // for validation -- throws exception if account not present + return new ExecutableJiraAction(action, actionLogger, jiraService, templateEngine); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/ExecutableLoggingAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/ExecutableLoggingAction.java new file mode 100644 index 0000000000000..37f242ca499e6 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/ExecutableLoggingAction.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.logging; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.util.Map; + +public class ExecutableLoggingAction extends ExecutableAction { + + private final Logger textLogger; + private final TextTemplateEngine templateEngine; + + public ExecutableLoggingAction(LoggingAction action, Logger logger, Settings settings, TextTemplateEngine templateEngine) { + super(action, logger); + this.textLogger = action.category != null ? Loggers.getLogger(action.category, settings) : logger; + this.templateEngine = templateEngine; + } + + // for tests + ExecutableLoggingAction(LoggingAction action, Logger logger, Logger textLogger, TextTemplateEngine templateEngine) { + super(action, logger); + this.textLogger = textLogger; + this.templateEngine = templateEngine; + } + + Logger textLogger() { + return textLogger; + } + + @Override + public Action.Result execute(String actionId, WatchExecutionContext ctx, Payload payload) throws Exception { + Map model = Variables.createCtxModel(ctx, payload); + + String loggedText = templateEngine.render(action.text, model); + if (ctx.simulateAction(actionId)) { + return new LoggingAction.Result.Simulated(loggedText); + } + + action.level.log(textLogger, loggedText); + return new LoggingAction.Result.Success(loggedText); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingAction.java new file mode 100644 index 0000000000000..69eb90d67eb82 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingAction.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.logging; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; + +import java.io.IOException; +import java.util.Locale; + +public class LoggingAction implements Action { + + public static final String TYPE = "logging"; + + final TextTemplate text; + @Nullable final LoggingLevel level; + @Nullable final String category; + + public LoggingAction(TextTemplate text, @Nullable LoggingLevel level, @Nullable String category) { + this.text = text; + this.level = level != null ? level : LoggingLevel.INFO; + this.category = category; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + LoggingAction action = (LoggingAction) o; + + if (!text.equals(action.text)) return false; + if (level != action.level) return false; + return !(category != null ? !category.equals(action.category) : action.category != null); + } + + @Override + public int hashCode() { + int result = text.hashCode(); + result = 31 * result + (level != null ? level.hashCode() : 0); + result = 31 * result + (category != null ? category.hashCode() : 0); + return result; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (category != null) { + builder.field(Field.CATEGORY.getPreferredName(), category); + } + builder.field(Field.LEVEL.getPreferredName(), level.value()); + builder.field(Field.TEXT.getPreferredName(), text, params); + return builder.endObject(); + } + + public static LoggingAction parse(String watchId, String actionId, XContentParser parser) throws IOException { + String category = null; + LoggingLevel level = null; + TextTemplate text = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.TEXT.match(currentFieldName, parser.getDeprecationHandler())) { + try { + text = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. failed to parse [{}] field", pe, TYPE, + watchId, actionId, Field.TEXT.getPreferredName()); + } + } else if (token == XContentParser.Token.VALUE_STRING) { + if (Field.CATEGORY.match(currentFieldName, parser.getDeprecationHandler())) { + category = parser.text(); + } else if (Field.LEVEL.match(currentFieldName, parser.getDeprecationHandler())) { + try { + level = LoggingLevel.valueOf(parser.text().toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException iae) { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. unknown logging level [{}]", TYPE, + watchId, actionId, parser.text()); + } + } else { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. unexpected string field [{}]", TYPE, + watchId, actionId, currentFieldName); + } + } else { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. unexpected token [{}]", TYPE, watchId, + actionId, token); + } + } + + if (text == null) { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. missing required [{}] field", TYPE, watchId, + actionId, Field.TEXT.getPreferredName()); + } + + return new LoggingAction(text, level, category); + } + + public static Builder builder(TextTemplate template) { + return new Builder(template); + } + + public interface Result { + + class Success extends Action.Result implements Result { + + private final String loggedText; + + public Success(String loggedText) { + super(TYPE, Status.SUCCESS); + this.loggedText = loggedText; + } + + public String loggedText() { + return loggedText; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject(type) + .field(Field.LOGGED_TEXT.getPreferredName(), loggedText) + .endObject(); + } + } + + class Simulated extends Action.Result implements Result { + + private final String loggedText; + + protected Simulated(String loggedText) { + super(TYPE, Status.SIMULATED); + this.loggedText = loggedText; + } + + public String loggedText() { + return loggedText; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject(type) + .field(Field.LOGGED_TEXT.getPreferredName(), loggedText) + .endObject(); + } + } + } + + public static class Builder implements Action.Builder { + + final TextTemplate text; + LoggingLevel level; + @Nullable String category; + + private Builder(TextTemplate text) { + this.text = text; + } + + public Builder setLevel(LoggingLevel level) { + this.level = level; + return this; + } + + public Builder setCategory(String category) { + this.category = category; + return this; + } + + @Override + public LoggingAction build() { + return new LoggingAction(text, level, category); + } + } + + interface Field { + ParseField CATEGORY = new ParseField("category"); + ParseField LEVEL = new ParseField("level"); + ParseField TEXT = new ParseField("text"); + ParseField LOGGED_TEXT = new ParseField("logged_text"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionFactory.java new file mode 100644 index 0000000000000..44a8ace89e9ad --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionFactory.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.logging; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.io.IOException; + +public class LoggingActionFactory extends ActionFactory { + + private final Settings settings; + private final TextTemplateEngine templateEngine; + + public LoggingActionFactory(Settings settings, TextTemplateEngine templateEngine) { + super(Loggers.getLogger(ExecutableLoggingAction.class, settings)); + this.settings = settings; + this.templateEngine = templateEngine; + } + + @Override + public ExecutableLoggingAction parseExecutable(String watchId, String actionId, XContentParser parser) throws IOException { + LoggingAction action = LoggingAction.parse(watchId, actionId, parser); + return new ExecutableLoggingAction(action, actionLogger, settings, templateEngine); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingLevel.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingLevel.java new file mode 100644 index 0000000000000..7d40cdc8afa13 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingLevel.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.logging; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.SuppressLoggerChecks; + +import java.util.Locale; + +public enum LoggingLevel { + + ERROR() { + @Override + @SuppressLoggerChecks(reason = "logger delegation") + void log(Logger logger, String text) { + logger.error(text); + } + }, + WARN() { + @Override + @SuppressLoggerChecks(reason = "logger delegation") + void log(Logger logger, String text) { + logger.warn(text); + } + }, + INFO() { + @Override + @SuppressLoggerChecks(reason = "logger delegation") + void log(Logger logger, String text) { + logger.info(text); + } + }, + DEBUG() { + @Override + @SuppressLoggerChecks(reason = "logger delegation") + void log(Logger logger, String text) { + logger.debug(text); + } + }, + TRACE() { + @Override + @SuppressLoggerChecks(reason = "logger delegation") + void log(Logger logger, String text) { + logger.trace(text); + } + }; + + abstract void log(Logger logger, String text); + + public String value() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/pagerduty/ExecutablePagerDutyAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/pagerduty/ExecutablePagerDutyAction.java new file mode 100644 index 0000000000000..224e72e1a3da5 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/pagerduty/ExecutablePagerDutyAction.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.pagerduty; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.pagerduty.IncidentEvent; +import org.elasticsearch.xpack.watcher.notification.pagerduty.PagerDutyAccount; +import org.elasticsearch.xpack.watcher.notification.pagerduty.PagerDutyService; +import org.elasticsearch.xpack.watcher.notification.pagerduty.SentEvent; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.util.Map; + +public class ExecutablePagerDutyAction extends ExecutableAction { + + private final TextTemplateEngine templateEngine; + private final PagerDutyService pagerDutyService; + + public ExecutablePagerDutyAction(PagerDutyAction action, Logger logger, PagerDutyService pagerDutyService, + TextTemplateEngine templateEngine) { + super(action, logger); + this.pagerDutyService = pagerDutyService; + this.templateEngine = templateEngine; + } + + @Override + public Action.Result execute(final String actionId, WatchExecutionContext ctx, Payload payload) throws Exception { + + PagerDutyAccount account = pagerDutyService.getAccount(action.event.account); + if (account == null) { + // the account associated with this action was deleted + throw new IllegalStateException("account [" + action.event.account + "] was not found. perhaps it was deleted"); + } + + Map model = Variables.createCtxModel(ctx, payload); + IncidentEvent event = action.event.render(ctx.watch().id(), actionId, templateEngine, model, account.getDefaults()); + + if (ctx.simulateAction(actionId)) { + return new PagerDutyAction.Result.Simulated(event); + } + + SentEvent sentEvent = account.send(event, payload); + return new PagerDutyAction.Result.Executed(account.getName(), sentEvent); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/pagerduty/PagerDutyAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/pagerduty/PagerDutyAction.java new file mode 100644 index 0000000000000..78ddfd9efeb29 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/pagerduty/PagerDutyAction.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.pagerduty; + + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.watcher.notification.pagerduty.IncidentEvent; +import org.elasticsearch.xpack.watcher.notification.pagerduty.SentEvent; + +import java.io.IOException; +import java.util.Objects; + +public class PagerDutyAction implements Action { + + public static final String TYPE = "pagerduty"; + + final IncidentEvent.Template event; + + public PagerDutyAction(IncidentEvent.Template event) { + this.event = event; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PagerDutyAction that = (PagerDutyAction) o; + return Objects.equals(event, that.event); + } + + @Override + public int hashCode() { + return Objects.hash(event); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + event.toXContent(builder, params); + return builder; + } + + public static PagerDutyAction parse(String watchId, String actionId, XContentParser parser) throws IOException { + IncidentEvent.Template eventTemplate = IncidentEvent.Template.parse(watchId, actionId, parser); + return new PagerDutyAction(eventTemplate); + } + + public static Builder builder(IncidentEvent.Template event) { + return new Builder(new PagerDutyAction(event)); + } + + public interface Result { + + class Executed extends Action.Result implements Result { + + private final String account; + private final SentEvent sentEvent; + + public Executed(String account, SentEvent sentEvent) { + super(TYPE, status(sentEvent)); + this.account = account; + this.sentEvent = sentEvent; + } + + public SentEvent sentEvent() { + return sentEvent; + } + + public String account() { + return account; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(type); + builder.field(XField.SENT_EVENT.getPreferredName(), sentEvent, params); + return builder.endObject(); + } + + static Status status(SentEvent sentEvent) { + return sentEvent.successful() ? Status.SUCCESS : Status.FAILURE; + } + } + + class Simulated extends Action.Result implements Result { + + private final IncidentEvent event; + + protected Simulated(IncidentEvent event) { + super(TYPE, Status.SIMULATED); + this.event = event; + } + + public IncidentEvent event() { + return event; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject(type) + .field(XField.EVENT.getPreferredName(), event, params) + .endObject(); + } + } + } + + public static class Builder implements Action.Builder { + + final PagerDutyAction action; + + public Builder(PagerDutyAction action) { + this.action = action; + } + + @Override + public PagerDutyAction build() { + return action; + } + } + + public interface XField { + ParseField SENT_EVENT = new ParseField("sent_event"); + ParseField EVENT = new ParseField("event"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/pagerduty/PagerDutyActionFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/pagerduty/PagerDutyActionFactory.java new file mode 100644 index 0000000000000..5cd18af3af699 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/pagerduty/PagerDutyActionFactory.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.pagerduty; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.pagerduty.PagerDutyService; + +import java.io.IOException; + +public class PagerDutyActionFactory extends ActionFactory { + + private final TextTemplateEngine templateEngine; + private final PagerDutyService pagerDutyService; + + public PagerDutyActionFactory(Settings settings, TextTemplateEngine templateEngine, PagerDutyService pagerDutyService) { + super(Loggers.getLogger(ExecutablePagerDutyAction.class, settings)); + this.templateEngine = templateEngine; + this.pagerDutyService = pagerDutyService; + } + + @Override + public ExecutablePagerDutyAction parseExecutable(String watchId, String actionId, XContentParser parser) throws IOException { + PagerDutyAction action = PagerDutyAction.parse(watchId, actionId, parser); + pagerDutyService.getAccount(action.event.account); + return new ExecutablePagerDutyAction(action, actionLogger, pagerDutyService, templateEngine); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/slack/ExecutableSlackAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/slack/ExecutableSlackAction.java new file mode 100644 index 0000000000000..9ab4a028ca13d --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/slack/ExecutableSlackAction.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.slack; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.slack.SentMessages; +import org.elasticsearch.xpack.watcher.notification.slack.SlackAccount; +import org.elasticsearch.xpack.watcher.notification.slack.SlackService; +import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessage; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.util.Map; + +public class ExecutableSlackAction extends ExecutableAction { + + private final TextTemplateEngine templateEngine; + private final SlackService slackService; + + public ExecutableSlackAction(SlackAction action, Logger logger, SlackService slackService, TextTemplateEngine templateEngine) { + super(action, logger); + this.slackService = slackService; + this.templateEngine = templateEngine; + } + + @Override + public Action.Result execute(final String actionId, WatchExecutionContext ctx, Payload payload) throws Exception { + + SlackAccount account = slackService.getAccount(action.account); + + if (account == null) { + // the account associated with this action was deleted + throw new IllegalStateException("account [" + action.account + "] was not found. perhaps it was deleted"); + } + + Map model = Variables.createCtxModel(ctx, payload); + SlackMessage message = action.message.render(ctx.id().watchId(), actionId, templateEngine, model, account.getMessageDefaults()); + + if (ctx.simulateAction(actionId)) { + return new SlackAction.Result.Simulated(message); + } + + SentMessages sentMessages = account.send(message, action.proxy); + return new SlackAction.Result.Executed(sentMessages); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/slack/SlackAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/slack/SlackAction.java new file mode 100644 index 0000000000000..2a5f6378add6b --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/slack/SlackAction.java @@ -0,0 +1,193 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.slack; + + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.notification.slack.SentMessages; +import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessage; + +import java.io.IOException; +import java.util.Objects; + +public class SlackAction implements Action { + + public static final String TYPE = "slack"; + + final SlackMessage.Template message; + @Nullable final String account; + @Nullable final HttpProxy proxy; + + public SlackAction(@Nullable String account, SlackMessage.Template message, HttpProxy proxy) { + this.account = account; + this.message = message; + this.proxy = proxy; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + SlackAction that = (SlackAction) o; + + return Objects.equals(account, that.account) && + Objects.equals(message, that.message) && + Objects.equals(proxy, that.proxy); + } + + @Override + public int hashCode() { + return Objects.hash(account, message, proxy); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (account != null) { + builder.field(Field.ACCOUNT.getPreferredName(), account); + } + if (proxy != null) { + proxy.toXContent(builder, params); + } + builder.field(Field.MESSAGE.getPreferredName(), message); + return builder.endObject(); + } + + public static SlackAction parse(String watchId, String actionId, XContentParser parser) throws IOException { + String account = null; + SlackMessage.Template message = null; + HttpProxy proxy = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.ACCOUNT.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + account = parser.text(); + } else { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. expected [{}] to be of type string, but " + + "found [{}] instead", TYPE, watchId, actionId, Field.ACCOUNT.getPreferredName(), token); + } + } else if (Field.PROXY.match(currentFieldName, parser.getDeprecationHandler())) { + proxy = HttpProxy.parse(parser); + } else if (Field.MESSAGE.match(currentFieldName, parser.getDeprecationHandler())) { + try { + message = SlackMessage.Template.parse(parser); + } catch (Exception e) { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. failed to parse [{}] field", e, TYPE, + watchId, actionId, Field.MESSAGE.getPreferredName()); + } + } else { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. unexpected token [{}]", TYPE, watchId, + actionId, token); + } + } + + if (message == null) { + throw new ElasticsearchParseException("failed to parse [{}] action [{}/{}]. missing required [{}] field", TYPE, watchId, + actionId, Field.MESSAGE.getPreferredName()); + } + + return new SlackAction(account, message, proxy); + } + + public static Builder builder(String account, SlackMessage.Template message) { + return new Builder(new SlackAction(account, message, null)); + } + + public interface Result { + + class Executed extends Action.Result implements Result { + + private final SentMessages sentMessages; + + public Executed(SentMessages sentMessages) { + super(TYPE, status(sentMessages)); + this.sentMessages = sentMessages; + } + + public SentMessages sentMessages() { + return sentMessages; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(type, sentMessages, params); + } + + static Status status(SentMessages sentMessages) { + boolean hasSuccesses = false; + boolean hasFailures = false; + for (SentMessages.SentMessage message : sentMessages) { + if (message.isSuccess()) { + hasSuccesses = true; + } else { + hasFailures = true; + } + if (hasFailures && hasSuccesses) { + return Status.PARTIAL_FAILURE; + } + } + return hasFailures ? Status.FAILURE : Status.SUCCESS; + } + } + + class Simulated extends Action.Result implements Result { + + private final SlackMessage message; + + protected Simulated(SlackMessage message) { + super(TYPE, Status.SIMULATED); + this.message = message; + } + + public SlackMessage getMessage() { + return message; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject(type) + .field(Field.MESSAGE.getPreferredName(), message, params) + .endObject(); + } + } + } + + public static class Builder implements Action.Builder { + + final SlackAction action; + + public Builder(SlackAction action) { + this.action = action; + } + + @Override + public SlackAction build() { + return action; + } + } + + public interface Field { + ParseField ACCOUNT = new ParseField("account"); + ParseField MESSAGE = new ParseField("message"); + ParseField PROXY = new ParseField("proxy"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/slack/SlackActionFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/slack/SlackActionFactory.java new file mode 100644 index 0000000000000..8392976f27367 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/slack/SlackActionFactory.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.slack; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.slack.SlackService; + +import java.io.IOException; + +public class SlackActionFactory extends ActionFactory { + private final TextTemplateEngine templateEngine; + private final SlackService slackService; + + public SlackActionFactory(Settings settings, TextTemplateEngine templateEngine, SlackService slackService) { + super(Loggers.getLogger(ExecutableSlackAction.class, settings)); + this.templateEngine = templateEngine; + this.slackService = slackService; + } + + @Override + public ExecutableSlackAction parseExecutable(String watchId, String actionId, XContentParser parser) throws IOException { + SlackAction action = SlackAction.parse(watchId, actionId, parser); + slackService.getAccount(action.account); // for validation -- throws exception if account not present + return new ExecutableSlackAction(action, actionLogger, slackService, templateEngine); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/webhook/ExecutableWebhookAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/webhook/ExecutableWebhookAction.java new file mode 100644 index 0000000000000..7313d529b4aea --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/webhook/ExecutableWebhookAction.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.webhook; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.util.Map; + +public class ExecutableWebhookAction extends ExecutableAction { + + private final HttpClient httpClient; + private final TextTemplateEngine templateEngine; + + public ExecutableWebhookAction(WebhookAction action, Logger logger, HttpClient httpClient, TextTemplateEngine templateEngine) { + super(action, logger); + this.httpClient = httpClient; + this.templateEngine = templateEngine; + } + + @Override + public Action.Result execute(String actionId, WatchExecutionContext ctx, Payload payload) throws Exception { + Map model = Variables.createCtxModel(ctx, payload); + + HttpRequest request = action.requestTemplate.render(templateEngine, model); + + if (ctx.simulateAction(actionId)) { + return new WebhookAction.Result.Simulated(request); + } + + HttpResponse response = httpClient.execute(request); + + if (response.status() >= 400) { + return new WebhookAction.Result.Failure(request, response); + } else { + return new WebhookAction.Result.Success(request, response); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookAction.java new file mode 100644 index 0000000000000..8d20d91f4dca7 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookAction.java @@ -0,0 +1,177 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.webhook; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; + +import java.io.IOException; + +public class WebhookAction implements Action { + + public static final String TYPE = "webhook"; + + final HttpRequestTemplate requestTemplate; + + public WebhookAction(HttpRequestTemplate requestTemplate) { + this.requestTemplate = requestTemplate; + } + + @Override + public String type() { + return TYPE; + } + + public HttpRequestTemplate getRequest() { + return requestTemplate; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + WebhookAction action = (WebhookAction) o; + + return requestTemplate.equals(action.requestTemplate); + } + + @Override + public int hashCode() { + return requestTemplate.hashCode(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return requestTemplate.toXContent(builder, params); + } + + public static WebhookAction parse(String watchId, String actionId, XContentParser parser, + HttpRequestTemplate.Parser requestParser) throws IOException { + try { + HttpRequestTemplate request = requestParser.parse(parser); + return new WebhookAction(request); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] action [{}/{}]. failed parsing http request template", pe, TYPE, + watchId, actionId); + } + } + + public static Builder builder(HttpRequestTemplate requestTemplate) { + return new Builder(requestTemplate); + } + + public interface Result { + + class Success extends Action.Result implements Result { + + private final HttpRequest request; + private final HttpResponse response; + + public Success(HttpRequest request, HttpResponse response) { + super(TYPE, Status.SUCCESS); + this.request = request; + this.response = response; + } + + public HttpResponse response() { + return response; + } + + public HttpRequest request() { + return request; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject(type) + .field(Field.REQUEST.getPreferredName(), request, params) + .field(Field.RESPONSE.getPreferredName(), response, params) + .endObject(); + } + } + + class Failure extends Action.Result.Failure implements Result { + + private final HttpRequest request; + private final HttpResponse response; + + public Failure(HttpRequest request, HttpResponse response) { + this(request, response, "received [{}] status code", response.status()); + } + + private Failure(HttpRequest request, HttpResponse response, String reason, Object... args) { + super(TYPE, reason, args); + this.request = request; + this.response = response; + } + + public HttpResponse response() { + return response; + } + + public HttpRequest request() { + return request; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + super.toXContent(builder, params); + return builder.startObject(type) + .field(Field.REQUEST.getPreferredName(), request, params) + .field(Field.RESPONSE.getPreferredName(), response, params) + .endObject(); + } + } + + class Simulated extends Action.Result implements Result { + + private final HttpRequest request; + + public Simulated(HttpRequest request) { + super(TYPE, Status.SIMULATED); + this.request = request; + } + + public HttpRequest request() { + return request; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject(type) + .field(Field.REQUEST.getPreferredName(), request, params) + .endObject(); + } + } + + } + + public static class Builder implements Action.Builder { + + final HttpRequestTemplate requestTemplate; + + private Builder(HttpRequestTemplate requestTemplate) { + this.requestTemplate = requestTemplate; + } + + @Override + public WebhookAction build() { + return new WebhookAction(requestTemplate); + } + } + + interface Field { + ParseField REQUEST = new ParseField("request"); + ParseField RESPONSE = new ParseField("response"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookActionFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookActionFactory.java new file mode 100644 index 0000000000000..6de16006432b7 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookActionFactory.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.webhook; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.io.IOException; + +public class WebhookActionFactory extends ActionFactory { + + private final HttpClient httpClient; + private final HttpRequestTemplate.Parser requestTemplateParser; + private final TextTemplateEngine templateEngine; + + public WebhookActionFactory(Settings settings, HttpClient httpClient, HttpRequestTemplate.Parser requestTemplateParser, + TextTemplateEngine templateEngine) { + + super(Loggers.getLogger(ExecutableWebhookAction.class, settings)); + this.httpClient = httpClient; + this.requestTemplateParser = requestTemplateParser; + this.templateEngine = templateEngine; + } + + @Override + public ExecutableWebhookAction parseExecutable(String watchId, String actionId, XContentParser parser) throws IOException { + return new ExecutableWebhookAction(WebhookAction.parse(watchId, actionId, parser, requestTemplateParser), + actionLogger, httpClient, templateEngine); + + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/client/WatchSourceBuilders.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/client/WatchSourceBuilders.java new file mode 100644 index 0000000000000..8fe4f667c505d --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/client/WatchSourceBuilders.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.client; + +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; + +public final class WatchSourceBuilders { + + private WatchSourceBuilders() { + } + + public static WatchSourceBuilder watchBuilder() { + return new WatchSourceBuilder(); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java new file mode 100644 index 0000000000000..729696ffa3518 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java @@ -0,0 +1,272 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import org.apache.http.Header; +import org.apache.http.HttpHeaders; +import org.apache.http.HttpHost; +import org.apache.http.NameValuePair; +import org.apache.http.auth.AuthScope; +import org.apache.http.client.AuthCache; +import org.apache.http.client.CredentialsProvider; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.http.client.utils.URIUtils; +import org.apache.http.client.utils.URLEncodedUtils; +import org.apache.http.conn.ssl.DefaultHostnameVerifier; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.impl.auth.BasicScheme; +import org.apache.http.impl.client.BasicAuthCache; +import org.apache.http.impl.client.BasicCredentialsProvider; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.message.BasicNameValuePair; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.Streams; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.watcher.common.http.auth.ApplicableHttpAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; + +import javax.net.ssl.HostnameVerifier; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class HttpClient extends AbstractComponent { + + private static final String SETTINGS_SSL_PREFIX = "xpack.http.ssl."; + + private final HttpAuthRegistry httpAuthRegistry; + private final CloseableHttpClient client; + private final HttpProxy settingsProxy; + private final TimeValue defaultConnectionTimeout; + private final TimeValue defaultReadTimeout; + private final ByteSizeValue maxResponseSize; + + public HttpClient(Settings settings, HttpAuthRegistry httpAuthRegistry, SSLService sslService) { + super(settings); + this.httpAuthRegistry = httpAuthRegistry; + this.defaultConnectionTimeout = HttpSettings.CONNECTION_TIMEOUT.get(settings); + this.defaultReadTimeout = HttpSettings.READ_TIMEOUT.get(settings); + this.maxResponseSize = HttpSettings.MAX_HTTP_RESPONSE_SIZE.get(settings); + this.settingsProxy = getProxyFromSettings(); + + HttpClientBuilder clientBuilder = HttpClientBuilder.create(); + + // ssl setup + Settings sslSettings = settings.getByPrefix(SETTINGS_SSL_PREFIX); + boolean isHostnameVerificationEnabled = sslService.getVerificationMode(sslSettings, Settings.EMPTY).isHostnameVerificationEnabled(); + HostnameVerifier verifier = isHostnameVerificationEnabled ? new DefaultHostnameVerifier() : NoopHostnameVerifier.INSTANCE; + SSLConnectionSocketFactory factory = new SSLConnectionSocketFactory(sslService.sslSocketFactory(sslSettings), verifier); + clientBuilder.setSSLSocketFactory(factory); + + client = clientBuilder.build(); + } + + public HttpResponse execute(HttpRequest request) throws IOException { + URI uri = createURI(request); + + HttpRequestBase internalRequest; + if (request.method == HttpMethod.HEAD) { + internalRequest = new HttpHead(uri); + } else { + HttpMethodWithEntity methodWithEntity = new HttpMethodWithEntity(uri, request.method.name()); + if (request.hasBody()) { + ByteArrayEntity entity = new ByteArrayEntity(request.body.getBytes(StandardCharsets.UTF_8)); + String contentType = request.headers().get(HttpHeaders.CONTENT_TYPE); + if (Strings.hasLength(contentType)) { + entity.setContentType(contentType); + } else { + entity.setContentType(ContentType.TEXT_PLAIN.toString()); + } + methodWithEntity.setEntity(entity); + } + internalRequest = methodWithEntity; + } + internalRequest.setHeader(HttpHeaders.ACCEPT_CHARSET, StandardCharsets.UTF_8.name()); + + // headers + if (request.headers().isEmpty() == false) { + for (Map.Entry entry : request.headers.entrySet()) { + internalRequest.setHeader(entry.getKey(), entry.getValue()); + } + } + + // BWC - hack for input requests made to elasticsearch that do not provide the right content-type header! + if (request.hasBody() && internalRequest.containsHeader("Content-Type") == false) { + XContentType xContentType = XContentFactory.xContentType(request.body()); + if (xContentType != null) { + internalRequest.setHeader("Content-Type", xContentType.mediaType()); + } + } + + RequestConfig.Builder config = RequestConfig.custom(); + setProxy(config, request, settingsProxy); + HttpClientContext localContext = HttpClientContext.create(); + // auth + if (request.auth() != null) { + ApplicableHttpAuth applicableAuth = httpAuthRegistry.createApplicable(request.auth); + CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + applicableAuth.apply(credentialsProvider, new AuthScope(request.host, request.port)); + localContext.setCredentialsProvider(credentialsProvider); + + // preemptive auth, no need to wait for a 401 first + AuthCache authCache = new BasicAuthCache(); + BasicScheme basicAuth = new BasicScheme(); + authCache.put(new HttpHost(request.host, request.port, request.scheme.scheme()), basicAuth); + localContext.setAuthCache(authCache); + } + + // timeouts + if (request.connectionTimeout() != null) { + config.setConnectTimeout(Math.toIntExact(request.connectionTimeout.millis())); + } else { + config.setConnectTimeout(Math.toIntExact(defaultConnectionTimeout.millis())); + } + + if (request.readTimeout() != null) { + config.setSocketTimeout(Math.toIntExact(request.readTimeout.millis())); + config.setConnectionRequestTimeout(Math.toIntExact(request.readTimeout.millis())); + } else { + config.setSocketTimeout(Math.toIntExact(defaultReadTimeout.millis())); + config.setConnectionRequestTimeout(Math.toIntExact(defaultReadTimeout.millis())); + } + + internalRequest.setConfig(config.build()); + + try (CloseableHttpResponse response = SocketAccess.doPrivileged(() -> client.execute(internalRequest, localContext))) { + // headers + Header[] headers = response.getAllHeaders(); + Map responseHeaders = new HashMap<>(headers.length); + for (Header header : headers) { + if (responseHeaders.containsKey(header.getName())) { + String[] old = responseHeaders.get(header.getName()); + String[] values = new String[old.length + 1]; + + System.arraycopy(old, 0, values, 0, old.length); + values[values.length - 1] = header.getValue(); + + responseHeaders.put(header.getName(), values); + } else { + responseHeaders.put(header.getName(), new String[]{header.getValue()}); + } + } + + final byte[] body; + // not every response has a content, i.e. 204 + if (response.getEntity() == null) { + body = new byte[0]; + } else { + try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { + try (InputStream is = new SizeLimitInputStream(maxResponseSize, response.getEntity().getContent())) { + Streams.copy(is, outputStream); + } + body = outputStream.toByteArray(); + } + } + return new HttpResponse(response.getStatusLine().getStatusCode(), body, responseHeaders); + } + } + + /** + * Enriches the config object optionally with proxy information + * + * @param config The request builder config object + * @param request The request parsed into the HTTP client + */ + static void setProxy(RequestConfig.Builder config, HttpRequest request, HttpProxy configuredProxy) { + if (request.proxy != null && request.proxy.equals(HttpProxy.NO_PROXY) == false) { + // if a proxy scheme is configured use this, but fall back to the same than the request in case there was no special + // configuration given + String scheme = request.proxy.getScheme() != null ? request.proxy.getScheme().scheme() : Scheme.HTTP.scheme(); + HttpHost proxy = new HttpHost(request.proxy.getHost(), request.proxy.getPort(), scheme); + config.setProxy(proxy); + } else if (HttpProxy.NO_PROXY.equals(configuredProxy) == false) { + HttpHost proxy = new HttpHost(configuredProxy.getHost(), configuredProxy.getPort(), configuredProxy.getScheme().scheme()); + config.setProxy(proxy); + } + } + + /** + * Creates a HTTP proxy from the system wide settings + * + * @return A http proxy instance, if no settings are configured this will be a HttpProxy.NO_PROXY instance + */ + private HttpProxy getProxyFromSettings() { + String proxyHost = HttpSettings.PROXY_HOST.get(settings); + Scheme proxyScheme = HttpSettings.PROXY_SCHEME.exists(settings) ? + Scheme.parse(HttpSettings.PROXY_SCHEME.get(settings)) : Scheme.HTTP; + int proxyPort = HttpSettings.PROXY_PORT.get(settings); + if (proxyPort != 0 && Strings.hasText(proxyHost)) { + logger.info("Using default proxy for http input and slack/hipchat/pagerduty/webhook actions [{}:{}]", proxyHost, proxyPort); + } else if (proxyPort != 0 ^ Strings.hasText(proxyHost)) { + throw new IllegalArgumentException("HTTP proxy requires both settings: [" + HttpSettings.PROXY_HOST.getKey() + "] and [" + + HttpSettings.PROXY_PORT.getKey() + "]"); + } + + if (proxyPort > 0 && Strings.hasText(proxyHost)) { + return new HttpProxy(proxyHost, proxyPort, proxyScheme); + } + + return HttpProxy.NO_PROXY; + } + + private URI createURI(HttpRequest request) { + // this could be really simple, as the apache http client has a UriBuilder class, however this class is always doing + // url path escaping, and we have done this already, so this would result in double escaping + try { + List qparams = new ArrayList<>(request.params.size()); + request.params.forEach((k, v) -> qparams.add(new BasicNameValuePair(k, v))); + String format = URLEncodedUtils.format(qparams, "UTF-8"); + URI uri = URIUtils.createURI(request.scheme.scheme(), request.host, request.port, request.path, + Strings.isNullOrEmpty(format) ? null : format, null); + + return uri; + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e); + } + } + + /** + * Helper class to have all HTTP methods except HEAD allow for an body, including GET + */ + final class HttpMethodWithEntity extends HttpEntityEnclosingRequestBase { + + private final String methodName; + + HttpMethodWithEntity(final URI uri, String methodName) { + this.methodName = methodName; + setURI(uri); + } + + @Override + public String getMethod() { + return methodName; + } + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpContentType.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpContentType.java new file mode 100644 index 0000000000000..7901bdfdf9313 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpContentType.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import org.elasticsearch.common.xcontent.XContentType; + +import java.util.Locale; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; + +public enum HttpContentType { + + JSON() { + @Override + public XContentType contentType() { + return XContentType.JSON; + } + }, + + YAML() { + @Override + public XContentType contentType() { + return XContentType.YAML; + } + }, + + TEXT() { + @Override + public XContentType contentType() { + return null; + } + }; + + public abstract XContentType contentType(); + + @Override + public String toString() { + return id(); + } + + public String id() { + return name().toLowerCase(Locale.ROOT); + } + + public static HttpContentType resolve(String id) { + switch (id.toLowerCase(Locale.ROOT)) { + case "json" : return JSON; + case "yaml": return YAML; + case "text": return TEXT; + default: + throw illegalArgument("unknown http content type [{}]", id); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpMethod.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpMethod.java new file mode 100644 index 0000000000000..67e4d6a7abd2c --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpMethod.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import java.util.Locale; + +public enum HttpMethod { + + HEAD("HEAD"), + GET("GET"), + POST("POST"), + PUT("PUT"), + DELETE("DELETE"); + + private final String method; + + HttpMethod(String method) { + this.method = method; + } + + public String method() { + return method; + } + + public static HttpMethod parse(String value) { + value = value.toUpperCase(Locale.ROOT); + switch (value) { + case "HEAD": + return HEAD; + case "GET": + return GET; + case "POST": + return POST; + case "PUT": + return PUT; + case "DELETE": + return DELETE; + default: + throw new IllegalArgumentException("unsupported http method [" + value + "]"); + } + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpProxy.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpProxy.java new file mode 100644 index 0000000000000..135e74cd86924 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpProxy.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Proxy; +import java.net.UnknownHostException; +import java.util.Objects; + +public class HttpProxy implements ToXContentFragment { + + public static final HttpProxy NO_PROXY = new HttpProxy(null, null, null); + + private static final ParseField HOST = new ParseField("host"); + private static final ParseField PORT = new ParseField("port"); + private static final ParseField SCHEME = new ParseField("scheme"); + + private String host; + private Integer port; + private Scheme scheme; + + public HttpProxy(String host, Integer port) { + this.host = host; + this.port = port; + } + + public HttpProxy(String host, Integer port, Scheme scheme) { + this.host = host; + this.port = port; + this.scheme = scheme; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (Strings.hasText(host) && port != null) { + builder.startObject("proxy").field("host", host).field("port", port); + if (scheme != null) { + builder.field("scheme", scheme.scheme()); + } + builder.endObject(); + } + return builder; + } + + public String getHost() { + return host; + } + + public Integer getPort() { + return port; + } + + public Scheme getScheme() { + return scheme; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + HttpProxy that = (HttpProxy) o; + + return Objects.equals(port, that.port) && Objects.equals(host, that.host) && Objects.equals(scheme, that.scheme); + } + + @Override + public int hashCode() { + return Objects.hash(host, port, scheme); + } + + + public static HttpProxy parse(XContentParser parser) throws IOException { + XContentParser.Token token; + String currentFieldName = null; + String host = null; + Integer port = null; + Scheme scheme = null; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (HOST.match(currentFieldName, parser.getDeprecationHandler())) { + host = parser.text(); + } else if (SCHEME.match(currentFieldName, parser.getDeprecationHandler())) { + scheme = Scheme.parse(parser.text()); + } else if (PORT.match(currentFieldName, parser.getDeprecationHandler())) { + port = parser.intValue(); + if (port <= 0 || port >= 65535) { + throw new ElasticsearchParseException("Proxy port must be between 1 and 65534, but was " + port); + } + } + } + + if (port == null || host == null) { + throw new ElasticsearchParseException("Proxy must contain 'port' and 'host' field"); + } + + return new HttpProxy(host, port, scheme); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java new file mode 100644 index 0000000000000..7d9e91384e515 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java @@ -0,0 +1,526 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestUtils; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.elasticsearch.xpack.core.watcher.support.WatcherUtils; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherXContentParser; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLDecoder; +import java.net.URLEncoder; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableMap; + +public class HttpRequest implements ToXContentObject { + + final String host; + final int port; + final Scheme scheme; + final HttpMethod method; + @Nullable final String path; + final Map params; + final Map headers; + @Nullable final HttpAuth auth; + @Nullable final String body; + @Nullable final TimeValue connectionTimeout; + @Nullable final TimeValue readTimeout; + @Nullable final HttpProxy proxy; + + public HttpRequest(String host, int port, @Nullable Scheme scheme, @Nullable HttpMethod method, @Nullable String path, + @Nullable Map params, @Nullable Map headers, + @Nullable HttpAuth auth, @Nullable String body, @Nullable TimeValue connectionTimeout, + @Nullable TimeValue readTimeout, @Nullable HttpProxy proxy) { + this.host = host; + this.port = port; + this.scheme = scheme != null ? scheme : Scheme.HTTP; + this.method = method != null ? method : HttpMethod.GET; + this.path = path; + this.params = params != null ? params : emptyMap(); + this.headers = headers != null ? headers : emptyMap(); + this.auth = auth; + this.body = body; + this.connectionTimeout = connectionTimeout; + this.readTimeout = readTimeout; + this.proxy = proxy; + } + + public Scheme scheme() { + return scheme; + } + + public String host() { + return host; + } + + public int port() { + return port; + } + + public HttpMethod method() { + return method; + } + + public String path() { + return path; + } + + public Map params() { + return params; + } + + public Map headers() { + return headers; + } + + public HttpAuth auth() { + return auth; + } + + public boolean hasBody() { + return body != null; + } + + public String body() { + return body; + } + + public TimeValue connectionTimeout() { + return connectionTimeout; + } + + public TimeValue readTimeout() { + return readTimeout; + } + + public HttpProxy proxy() { + return proxy; + } + + public static String encodeUrl(String text) { + try { + return URLEncoder.encode(text, "UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new IllegalArgumentException("failed to URL encode text [" + text + "]", e); + } + } + + public static String decodeUrl(String text) { + try { + return URLDecoder.decode(text, "UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new IllegalArgumentException("failed to URL decode text [" + text + "]", e); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params toXContentParams) throws IOException { + builder.startObject(); + builder.field(Field.HOST.getPreferredName(), host); + builder.field(Field.PORT.getPreferredName(), port); + builder.field(Field.SCHEME.getPreferredName(), scheme.value()); + builder.field(Field.METHOD.getPreferredName(), method.value()); + if (path != null) { + builder.field(Field.PATH.getPreferredName(), path); + } + if (this.params.isEmpty() == false) { + builder.field(Field.PARAMS.getPreferredName(), this.params); + } + if (headers.isEmpty() == false) { + if (WatcherParams.hideSecrets(toXContentParams) && headers.containsKey("Authorization")) { + Map sanitizedHeaders = new HashMap<>(headers); + sanitizedHeaders.put("Authorization", WatcherXContentParser.REDACTED_PASSWORD); + builder.field(Field.HEADERS.getPreferredName(), sanitizedHeaders); + } else { + builder.field(Field.HEADERS.getPreferredName(), headers); + } + } + if (auth != null) { + builder.startObject(Field.AUTH.getPreferredName()) + .field(auth.type(), auth, toXContentParams) + .endObject(); + } + if (body != null) { + builder.field(Field.BODY.getPreferredName(), body); + } + if (connectionTimeout != null) { + builder.humanReadableField(HttpRequest.Field.CONNECTION_TIMEOUT.getPreferredName(), + HttpRequest.Field.CONNECTION_TIMEOUT_HUMAN.getPreferredName(), connectionTimeout); + } + if (readTimeout != null) { + builder.humanReadableField(HttpRequest.Field.READ_TIMEOUT.getPreferredName(), + HttpRequest.Field.READ_TIMEOUT_HUMAN.getPreferredName(), readTimeout); + } + if (proxy != null) { + proxy.toXContent(builder, toXContentParams); + } + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + HttpRequest that = (HttpRequest) o; + + if (port != that.port) return false; + if (!host.equals(that.host)) return false; + if (scheme != that.scheme) return false; + if (method != that.method) return false; + if (path != null ? !path.equals(that.path) : that.path != null) return false; + if (!params.equals(that.params)) return false; + if (!headers.equals(that.headers)) return false; + if (auth != null ? !auth.equals(that.auth) : that.auth != null) return false; + if (connectionTimeout != null ? !connectionTimeout.equals(that.connectionTimeout) : that.connectionTimeout != null) return false; + if (readTimeout != null ? !readTimeout.equals(that.readTimeout) : that.readTimeout != null) return false; + if (proxy != null ? !proxy.equals(that.proxy) : that.proxy != null) return false; + return !(body != null ? !body.equals(that.body) : that.body != null); + + } + + @Override + public int hashCode() { + return Objects.hash(host, port, scheme, method, path, params, headers, auth, connectionTimeout, readTimeout, body, proxy); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("method=[").append(method).append("], "); + sb.append("scheme=[").append(scheme).append("], "); + sb.append("host=[").append(host).append("], "); + sb.append("port=[").append(port).append("], "); + sb.append("path=[").append(path).append("], "); + if (!headers.isEmpty()) { + sb.append(", headers=["); + boolean first = true; + for (Map.Entry header : headers.entrySet()) { + if (!first) { + sb.append(", "); + } + sb.append("[").append(header.getKey()).append(": ").append(header.getValue()).append("]"); + first = false; + } + sb.append("], "); + } + if (auth != null) { + sb.append("auth=[").append(auth.type()).append("], "); + } + sb.append("connection_timeout=[").append(connectionTimeout).append("], "); + sb.append("read_timeout=[").append(readTimeout).append("], "); + if (proxy != null) { + sb.append("proxy=[").append(proxy).append("], "); + } + sb.append("body=[").append(body).append("], "); + return sb.toString(); + } + + public static Builder builder(String host, int port) { + return new Builder(host, port); + } + + static Builder builder() { + return new Builder(); + } + + public static class Parser { + + private final HttpAuthRegistry httpAuthRegistry; + + public Parser(HttpAuthRegistry httpAuthRegistry) { + this.httpAuthRegistry = httpAuthRegistry; + } + + public HttpRequest parse(XContentParser parser) throws IOException { + Builder builder = new Builder(); + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.PROXY.match(currentFieldName, parser.getDeprecationHandler())) { + try { + builder.proxy(HttpProxy.parse(parser)); + } catch (Exception e) { + throw new ElasticsearchParseException("could not parse http request. could not parse [{}] field", currentFieldName); + } + } else if (Field.AUTH.match(currentFieldName, parser.getDeprecationHandler())) { + builder.auth(httpAuthRegistry.parse(parser)); + } else if (HttpRequest.Field.CONNECTION_TIMEOUT.match(currentFieldName, parser.getDeprecationHandler())) { + builder.connectionTimeout(TimeValue.timeValueMillis(parser.longValue())); + } else if (HttpRequest.Field.CONNECTION_TIMEOUT_HUMAN.match(currentFieldName, parser.getDeprecationHandler())) { + // Users and 2.x specify the timeout this way + try { + builder.connectionTimeout(WatcherDateTimeUtils.parseTimeValue(parser, + HttpRequest.Field.CONNECTION_TIMEOUT.toString())); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse http request template. invalid time value for [{}] field", + pe, currentFieldName); + } + } else if (HttpRequest.Field.READ_TIMEOUT.match(currentFieldName, parser.getDeprecationHandler())) { + builder.readTimeout(TimeValue.timeValueMillis(parser.longValue())); + } else if (HttpRequest.Field.READ_TIMEOUT_HUMAN.match(currentFieldName, parser.getDeprecationHandler())) { + // Users and 2.x specify the timeout this way + try { + builder.readTimeout(WatcherDateTimeUtils.parseTimeValue(parser, HttpRequest.Field.READ_TIMEOUT.toString())); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse http request template. invalid time value for [{}] field", + pe, currentFieldName); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (Field.HEADERS.match(currentFieldName, parser.getDeprecationHandler())) { + builder.setHeaders((Map) WatcherUtils.flattenModel(parser.map())); + } else if (Field.PARAMS.match(currentFieldName, parser.getDeprecationHandler())) { + builder.setParams((Map) WatcherUtils.flattenModel(parser.map())); + } else if (Field.BODY.match(currentFieldName, parser.getDeprecationHandler())) { + builder.body(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse http request. unexpected object field [{}]", + currentFieldName); + } + } else if (token == XContentParser.Token.VALUE_STRING) { + if (Field.SCHEME.match(currentFieldName, parser.getDeprecationHandler())) { + builder.scheme(Scheme.parse(parser.text())); + } else if (Field.METHOD.match(currentFieldName, parser.getDeprecationHandler())) { + builder.method(HttpMethod.parse(parser.text())); + } else if (Field.HOST.match(currentFieldName, parser.getDeprecationHandler())) { + builder.host = parser.text(); + } else if (Field.PATH.match(currentFieldName, parser.getDeprecationHandler())) { + builder.path(parser.text()); + } else if (Field.BODY.match(currentFieldName, parser.getDeprecationHandler())) { + builder.body(parser.text()); + } else if (Field.URL.match(currentFieldName, parser.getDeprecationHandler())) { + builder.fromUrl(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse http request. unexpected string field [{}]", + currentFieldName); + } + } else if (token == XContentParser.Token.VALUE_NUMBER) { + if (Field.PORT.match(currentFieldName, parser.getDeprecationHandler())) { + builder.port = parser.intValue(); + } else { + throw new ElasticsearchParseException("could not parse http request. unexpected numeric field [{}]", + currentFieldName); + } + } else { + throw new ElasticsearchParseException("could not parse http request. unexpected token [{}]", token); + } + } + + if (builder.host == null) { + throw new ElasticsearchParseException("could not parse http request. missing required [{}] field", + Field.HOST.getPreferredName()); + } + + if (builder.port < 0) { + throw new ElasticsearchParseException("could not parse http request. missing required [{}] field", + Field.PORT.getPreferredName()); + } + + return builder.build(); + } + } + + public static class Builder { + + private String host; + private int port = -1; + private Scheme scheme; + private HttpMethod method; + private String path; + private Map params = new HashMap<>(); + private Map headers = new HashMap<>(); + private HttpAuth auth; + private String body; + private TimeValue connectionTimeout; + private TimeValue readTimeout; + private HttpProxy proxy; + + private Builder(String host, int port) { + this.host = host; + this.port = port; + } + + private Builder() { + } + + public Builder scheme(Scheme scheme) { + this.scheme = scheme; + return this; + } + + public Builder method(HttpMethod method) { + this.method = method; + return this; + } + + public Builder path(String path) { + this.path = path; + return this; + } + + public Builder setParams(Map params) { + if (this.params == null) { + throw new IllegalStateException("Request has already been built!"); + } + this.params.putAll(params); + return this; + } + + public Builder setParam(String key, String value) { + if (params == null) { + throw new IllegalStateException("Request has already been built!"); + } + this.params.put(key, value); + return this; + } + + public Builder setHeaders(Map headers) { + if (this.headers == null) { + throw new IllegalStateException("Request has already been built!"); + } + this.headers.putAll(headers); + return this; + } + + public Builder setHeader(String key, String value) { + if (headers == null) { + throw new IllegalStateException("Request has already been built!"); + } + this.headers.put(key, value); + return this; + } + + public Builder auth(HttpAuth auth) { + this.auth = auth; + return this; + } + + public Builder body(String body) { + this.body = body; + return this; + } + + public Builder jsonBody(ToXContent xContent) { + return body(Strings.toString(xContent)).setHeader("Content-Type", XContentType.JSON.mediaType()); + } + + public Builder connectionTimeout(TimeValue timeout) { + this.connectionTimeout = timeout; + return this; + } + + public Builder readTimeout(TimeValue timeout) { + this.readTimeout = timeout; + return this; + } + + public Builder proxy(HttpProxy proxy) { + this.proxy = proxy; + return this; + } + + public HttpRequest build() { + HttpRequest request = new HttpRequest(host, port, scheme, method, path, unmodifiableMap(params), + unmodifiableMap(headers), auth, body, connectionTimeout, readTimeout, proxy); + params = null; + headers = null; + return request; + } + + public Builder fromUrl(String supposedUrl) { + if (Strings.hasLength(supposedUrl) == false) { + throw new ElasticsearchParseException("Configured URL is empty, please configure a valid URL"); + } + + try { + URI uri = new URI(supposedUrl); + if (Strings.hasLength(uri.getScheme()) == false) { + throw new ElasticsearchParseException("URL [{}] does not contain a scheme", uri); + } + scheme = Scheme.parse(uri.getScheme()); + port = uri.getPort() > 0 ? uri.getPort() : scheme.defaultPort(); + host = uri.getHost(); + if (Strings.hasLength(uri.getRawPath())) { + path = uri.getRawPath(); + } + String rawQuery = uri.getRawQuery(); + if (Strings.hasLength(rawQuery)) { + RestUtils.decodeQueryString(rawQuery, 0, params); + } + } catch (URISyntaxException e) { + throw new ElasticsearchParseException("Malformed URL [{}]", supposedUrl); + } + return this; + } + } + + public interface Field { + ParseField SCHEME = new ParseField("scheme"); + ParseField HOST = new ParseField("host"); + ParseField PORT = new ParseField("port"); + ParseField METHOD = new ParseField("method"); + ParseField PATH = new ParseField("path"); + ParseField PARAMS = new ParseField("params"); + ParseField HEADERS = new ParseField("headers"); + ParseField AUTH = new ParseField("auth"); + ParseField BODY = new ParseField("body"); + ParseField CONNECTION_TIMEOUT = new ParseField("connection_timeout_in_millis"); + ParseField CONNECTION_TIMEOUT_HUMAN = new ParseField("connection_timeout"); + ParseField READ_TIMEOUT = new ParseField("read_timeout_millis"); + ParseField READ_TIMEOUT_HUMAN = new ParseField("read_timeout"); + ParseField PROXY = new ParseField("proxy"); + ParseField URL = new ParseField("url"); + } + + /** + * Write a request via toXContent, but filter certain parts of it - this is needed to not expose secrets + * + * @param request The HttpRequest object to serialize + * @param xContent The xContent from the parent outputstream builder + * @param params The ToXContentParams from the parent write + * @param excludeField The field to exclude + * @return A bytearrayinputstream that contains the serialized request + * @throws IOException + */ + public static InputStream filterToXContent(HttpRequest request, XContent xContent, ToXContent.Params params, + String excludeField) throws IOException { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream(); + XContentBuilder filteredBuilder = new XContentBuilder(xContent, bos, + Collections.emptySet(), Collections.singleton(excludeField))) { + request.toXContent(filteredBuilder, params); + filteredBuilder.flush(); + return new ByteArrayInputStream(bos.toByteArray()); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequestTemplate.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequestTemplate.java new file mode 100644 index 0000000000000..e511b30ef5934 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequestTemplate.java @@ -0,0 +1,517 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import io.netty.handler.codec.http.HttpHeaders; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.RestUtils; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static java.util.Collections.unmodifiableMap; + +public class HttpRequestTemplate implements ToXContentObject { + + private final Scheme scheme; + private final String host; + private final int port; + private final HttpMethod method; + private final TextTemplate path; + private final Map params; + private final Map headers; + private final HttpAuth auth; + private final TextTemplate body; + @Nullable private final TimeValue connectionTimeout; + @Nullable private final TimeValue readTimeout; + @Nullable private final HttpProxy proxy; + + public HttpRequestTemplate(String host, int port, @Nullable Scheme scheme, @Nullable HttpMethod method, @Nullable TextTemplate path, + Map params, Map headers, HttpAuth auth, + TextTemplate body, @Nullable TimeValue connectionTimeout, @Nullable TimeValue readTimeout, + @Nullable HttpProxy proxy) { + this.host = host; + this.port = port; + this.scheme = scheme != null ? scheme :Scheme.HTTP; + this.method = method != null ? method : HttpMethod.GET; + this.path = path; + this.params = params != null ? params : emptyMap(); + this.headers = headers != null ? headers : emptyMap(); + this.auth = auth; + this.body = body; + this.connectionTimeout = connectionTimeout; + this.readTimeout = readTimeout; + this.proxy = proxy; + } + + public Scheme scheme() { + return scheme; + } + + public String host() { + return host; + } + + public int port() { + return port; + } + + public HttpMethod method() { + return method; + } + + public TextTemplate path() { + return path; + } + + public Map params() { + return params; + } + + public Map headers() { + return headers; + } + + public HttpAuth auth() { + return auth; + } + + public TextTemplate body() { + return body; + } + + public TimeValue connectionTimeout() { + return connectionTimeout; + } + + public TimeValue readTimeout() { + return readTimeout; + } + + public HttpProxy proxy() { + return proxy; + } + + public HttpRequest render(TextTemplateEngine engine, Map model) { + HttpRequest.Builder request = HttpRequest.builder(host, port); + request.method(method); + request.scheme(scheme); + if (path != null) { + request.path(engine.render(path, model)); + } + if (params != null && !params.isEmpty()) { + MapBuilder mapBuilder = MapBuilder.newMapBuilder(); + for (Map.Entry entry : params.entrySet()) { + mapBuilder.put(entry.getKey(), engine.render(entry.getValue(), model)); + } + request.setParams(mapBuilder.map()); + } + if ((headers == null || headers.isEmpty()) && body != null && body.getContentType() != null) { + request.setHeaders(singletonMap(HttpHeaders.Names.CONTENT_TYPE, body.getContentType().mediaType())); + } else if (headers != null && !headers.isEmpty()) { + MapBuilder mapBuilder = MapBuilder.newMapBuilder(); + if (body != null && body.getContentType() != null) { + // putting the content type first, so it can be overridden by custom headers + mapBuilder.put(HttpHeaders.Names.CONTENT_TYPE, body.getContentType().mediaType()); + } + for (Map.Entry entry : headers.entrySet()) { + mapBuilder.put(entry.getKey(), engine.render(entry.getValue(), model)); + } + request.setHeaders(mapBuilder.map()); + } + if (auth != null) { + request.auth(auth); + } + if (body != null) { + request.body(engine.render(body, model)); + } + if (connectionTimeout != null) { + request.connectionTimeout(connectionTimeout); + } + if (readTimeout != null) { + request.readTimeout(readTimeout); + } + if (proxy != null) { + request.proxy(proxy); + } + return request.build(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(HttpRequest.Field.SCHEME.getPreferredName(), scheme.value()); + builder.field(HttpRequest.Field.HOST.getPreferredName(), host); + builder.field(HttpRequest.Field.PORT.getPreferredName(), port); + builder.field(HttpRequest.Field.METHOD.getPreferredName(), method.value()); + if (path != null) { + builder.field(HttpRequest.Field.PATH.getPreferredName(), path, params); + } + if (this.params != null) { + builder.startObject(HttpRequest.Field.PARAMS.getPreferredName()); + for (Map.Entry entry : this.params.entrySet()) { + builder.field(entry.getKey(), entry.getValue(), params); + } + builder.endObject(); + } + if (headers != null) { + builder.startObject(HttpRequest.Field.HEADERS.getPreferredName()); + for (Map.Entry entry : headers.entrySet()) { + builder.field(entry.getKey(), entry.getValue(), params); + } + builder.endObject(); + } + if (auth != null) { + builder.startObject(HttpRequest.Field.AUTH.getPreferredName()) + .field(auth.type(), auth, params) + .endObject(); + } + if (body != null) { + builder.field(HttpRequest.Field.BODY.getPreferredName(), body, params); + } + if (connectionTimeout != null) { + builder.humanReadableField(HttpRequest.Field.CONNECTION_TIMEOUT.getPreferredName(), + HttpRequest.Field.CONNECTION_TIMEOUT_HUMAN.getPreferredName(), connectionTimeout); + } + if (readTimeout != null) { + builder.humanReadableField(HttpRequest.Field.READ_TIMEOUT.getPreferredName(), + HttpRequest.Field.READ_TIMEOUT_HUMAN.getPreferredName(), readTimeout); + } + if (proxy != null) { + proxy.toXContent(builder, params); + } + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + HttpRequestTemplate that = (HttpRequestTemplate) o; + + if (port != that.port) return false; + if (scheme != that.scheme) return false; + if (host != null ? !host.equals(that.host) : that.host != null) return false; + if (method != that.method) return false; + if (path != null ? !path.equals(that.path) : that.path != null) return false; + if (params != null ? !params.equals(that.params) : that.params != null) return false; + if (headers != null ? !headers.equals(that.headers) : that.headers != null) return false; + if (auth != null ? !auth.equals(that.auth) : that.auth != null) return false; + if (connectionTimeout != null ? !connectionTimeout.equals(that.connectionTimeout) : that.connectionTimeout != null) return false; + if (readTimeout != null ? !readTimeout.equals(that.readTimeout) : that.readTimeout != null) return false; + if (proxy != null ? !proxy.equals(that.proxy) : that.proxy != null) return false; + return body != null ? body.equals(that.body) : that.body == null; + } + + @Override + public int hashCode() { + int result = scheme != null ? scheme.hashCode() : 0; + result = 31 * result + (host != null ? host.hashCode() : 0); + result = 31 * result + port; + result = 31 * result + (method != null ? method.hashCode() : 0); + result = 31 * result + (path != null ? path.hashCode() : 0); + result = 31 * result + (params != null ? params.hashCode() : 0); + result = 31 * result + (headers != null ? headers.hashCode() : 0); + result = 31 * result + (auth != null ? auth.hashCode() : 0); + result = 31 * result + (body != null ? body.hashCode() : 0); + result = 31 * result + (connectionTimeout != null ? connectionTimeout.hashCode() : 0); + result = 31 * result + (readTimeout != null ? readTimeout.hashCode() : 0); + result = 31 * result + (proxy != null ? proxy.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public static Builder builder(String host, int port) { + return new Builder(host, port); + } + + public static Builder builder(String url) { + return new Builder(url); + } + + static Builder builder() { + return new Builder(); + } + + public static class Parser { + + private final HttpAuthRegistry httpAuthRegistry; + + public Parser(HttpAuthRegistry httpAuthRegistry) { + this.httpAuthRegistry = httpAuthRegistry; + } + + public HttpRequestTemplate parse(XContentParser parser) throws IOException { + assert parser.currentToken() == XContentParser.Token.START_OBJECT; + + Builder builder = new Builder(); + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (HttpRequest.Field.PROXY.match(currentFieldName, parser.getDeprecationHandler())) { + builder.proxy(HttpProxy.parse(parser)); + } else if (HttpRequest.Field.PATH.match(currentFieldName, parser.getDeprecationHandler())) { + builder.path(parseFieldTemplate(currentFieldName, parser)); + } else if (HttpRequest.Field.HEADERS.match(currentFieldName, parser.getDeprecationHandler())) { + builder.putHeaders(parseFieldTemplates(currentFieldName, parser)); + } else if (HttpRequest.Field.PARAMS.match(currentFieldName, parser.getDeprecationHandler())) { + builder.putParams(parseFieldTemplates(currentFieldName, parser)); + } else if (HttpRequest.Field.BODY.match(currentFieldName, parser.getDeprecationHandler())) { + builder.body(parseFieldTemplate(currentFieldName, parser)); + } else if (HttpRequest.Field.URL.match(currentFieldName, parser.getDeprecationHandler())) { + builder.fromUrl(parser.text()); + } else if (HttpRequest.Field.CONNECTION_TIMEOUT.match(currentFieldName, parser.getDeprecationHandler())) { + builder.connectionTimeout(TimeValue.timeValueMillis(parser.longValue())); + } else if (HttpRequest.Field.CONNECTION_TIMEOUT_HUMAN.match(currentFieldName, parser.getDeprecationHandler())) { + // Users and 2.x specify the timeout this way + try { + builder.connectionTimeout(WatcherDateTimeUtils.parseTimeValue(parser, + HttpRequest.Field.CONNECTION_TIMEOUT.toString())); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse http request template. invalid time value for [{}] field", + pe, currentFieldName); + } + } else if (HttpRequest.Field.READ_TIMEOUT.match(currentFieldName, parser.getDeprecationHandler())) { + builder.readTimeout(TimeValue.timeValueMillis(parser.longValue())); + } else if (HttpRequest.Field.READ_TIMEOUT_HUMAN.match(currentFieldName, parser.getDeprecationHandler())) { + // Users and 2.x specify the timeout this way + try { + builder.readTimeout(WatcherDateTimeUtils.parseTimeValue(parser, HttpRequest.Field.READ_TIMEOUT.toString())); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse http request template. invalid time value for [{}] field", + pe, currentFieldName); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (HttpRequest.Field.AUTH.match(currentFieldName, parser.getDeprecationHandler())) { + builder.auth(httpAuthRegistry.parse(parser)); + } else { + throw new ElasticsearchParseException("could not parse http request template. unexpected object field [{}]", + currentFieldName); + } + } else if (token == XContentParser.Token.VALUE_STRING) { + if (HttpRequest.Field.SCHEME.match(currentFieldName, parser.getDeprecationHandler())) { + builder.scheme(Scheme.parse(parser.text())); + } else if (HttpRequest.Field.METHOD.match(currentFieldName, parser.getDeprecationHandler())) { + builder.method(HttpMethod.parse(parser.text())); + } else if (HttpRequest.Field.HOST.match(currentFieldName, parser.getDeprecationHandler())) { + builder.host = parser.text(); + } else { + throw new ElasticsearchParseException("could not parse http request template. unexpected string field [{}]", + currentFieldName); + } + } else if (token == XContentParser.Token.VALUE_NUMBER) { + if (HttpRequest.Field.PORT.match(currentFieldName, parser.getDeprecationHandler())) { + builder.port = parser.intValue(); + } else { + throw new ElasticsearchParseException("could not parse http request template. unexpected numeric field [{}]", + currentFieldName); + } + } else { + throw new ElasticsearchParseException("could not parse http request template. unexpected token [{}] for field [{}]", + token, currentFieldName); + } + } + + if (builder.host == null) { + throw new ElasticsearchParseException("could not parse http request template. missing required [{}] string field", + HttpRequest.Field.HOST.getPreferredName()); + } + if (builder.port <= 0) { + throw new ElasticsearchParseException("could not parse http request template. wrong port for [{}]", + HttpRequest.Field.PORT.getPreferredName()); + } + + return builder.build(); + } + + private static TextTemplate parseFieldTemplate(String field, XContentParser parser) throws IOException { + try { + return TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse http request template. could not parse value for [{}] field", pe, + field); + } + } + + private static Map parseFieldTemplates(String field, XContentParser parser) throws IOException { + Map templates = new HashMap<>(); + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + templates.put(currentFieldName, parseFieldTemplate(field, parser)); + } + } + return templates; + } + } + + public static class Builder { + + private String host; + private int port; + private Scheme scheme; + private HttpMethod method; + private TextTemplate path; + private final Map params = new HashMap<>(); + private final Map headers = new HashMap<>(); + private HttpAuth auth; + private TextTemplate body; + private TimeValue connectionTimeout; + private TimeValue readTimeout; + private HttpProxy proxy; + + private Builder() { + } + + private Builder(String url) { + fromUrl(url); + } + + private Builder(String host, int port) { + this.host = host; + this.port = port; + } + + public Builder scheme(Scheme scheme) { + this.scheme = scheme; + return this; + } + + public Builder method(HttpMethod method) { + this.method = method; + return this; + } + + public Builder path(String path) { + return path(new TextTemplate(path)); + } + + public Builder path(TextTemplate path) { + this.path = path; + return this; + } + + public Builder putParams(Map params) { + this.params.putAll(params); + return this; + } + + public Builder putParam(String key, TextTemplate value) { + this.params.put(key, value); + return this; + } + + public Builder putHeaders(Map headers) { + this.headers.putAll(headers); + return this; + } + + public Builder putHeader(String key, TextTemplate value) { + this.headers.put(key, value); + return this; + } + + public Builder auth(HttpAuth auth) { + this.auth = auth; + return this; + } + + public Builder body(String body) { + return body(new TextTemplate(body)); + } + + public Builder body(TextTemplate body) { + this.body = body; + return this; + } + + public Builder body(XContentBuilder content) throws IOException { + return body(new TextTemplate(Strings.toString(content), content.contentType(), ScriptType.INLINE, null)); + } + + public Builder connectionTimeout(TimeValue timeout) { + this.connectionTimeout = timeout; + return this; + } + + public Builder readTimeout(TimeValue timeout) { + this.readTimeout = timeout; + return this; + } + + public Builder proxy(HttpProxy proxy) { + this.proxy = proxy; + return this; + } + + public HttpRequestTemplate build() { + return new HttpRequestTemplate(host, port, scheme, method, path, unmodifiableMap(new HashMap<>(params)), + unmodifiableMap(new HashMap<>(headers)), auth, body, connectionTimeout, readTimeout, proxy); + } + + public Builder fromUrl(String supposedUrl) { + if (Strings.hasLength(supposedUrl) == false) { + throw new ElasticsearchParseException("Configured URL is empty, please configure a valid URL"); + } + + try { + URI uri = new URI(supposedUrl); + if (Strings.hasLength(uri.getScheme()) == false) { + throw new ElasticsearchParseException("URL [{}] does not contain a scheme", uri); + } + scheme = Scheme.parse(uri.getScheme()); + port = uri.getPort() > 0 ? uri.getPort() : scheme.defaultPort(); + host = uri.getHost(); + if (Strings.hasLength(uri.getPath())) { + path = new TextTemplate(uri.getPath()); + } + + String rawQuery = uri.getRawQuery(); + if (Strings.hasLength(rawQuery)) { + Map stringParams = new HashMap<>(); + RestUtils.decodeQueryString(rawQuery, 0, stringParams); + for (Map.Entry entry : stringParams.entrySet()) { + params.put(entry.getKey(), new TextTemplate(entry.getValue())); + } + } + } catch (URISyntaxException e) { + throw new ElasticsearchParseException("Malformed URL [{}]", supposedUrl); + } + return this; + } + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpResponse.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpResponse.java new file mode 100644 index 0000000000000..497de954d0cb2 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpResponse.java @@ -0,0 +1,242 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import io.netty.handler.codec.http.HttpHeaders; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableMap; + +public class HttpResponse implements ToXContentObject { + + private final int status; + private final Map headers; + private final BytesReference body; + + public HttpResponse(int status) { + this(status, emptyMap()); + } + + public HttpResponse(int status, Map headers) { + this(status, (BytesReference) null, headers); + } + + public HttpResponse(int status, @Nullable String body) { + this(status, body != null ? new BytesArray(body) : null, emptyMap()); + } + + public HttpResponse(int status, @Nullable String body, Map headers) { + this(status, body != null ? new BytesArray(body) : null, headers); + } + + public HttpResponse(int status, @Nullable byte[] body) { + this(status, body != null && body.length > 0 ? new BytesArray(body) : null, emptyMap()); + } + + public HttpResponse(int status, @Nullable byte[] body, Map headers) { + this(status, body != null && body.length > 0 ? new BytesArray(body) : null, headers); + } + + public HttpResponse(int status, @Nullable BytesReference body, Map headers) { + this.status = status; + this.body = body; + MapBuilder mapBuilder = MapBuilder.newMapBuilder(); + for (Map.Entry entry : headers.entrySet()) { + mapBuilder.put(entry.getKey().toLowerCase(Locale.ROOT), entry.getValue()); + } + this.headers = mapBuilder.immutableMap(); + } + + public int status() { + return status; + } + + public boolean hasContent() { + return body != null; + } + + public BytesReference body() { + return body; + } + + /** + * Returns all the headers, with keys being lowercased, so they are always consistent + * in the payload + */ + public Map> headers() { + MapBuilder> builder = MapBuilder.newMapBuilder(); + for (Map.Entry entry : headers.entrySet()) { + builder.put(entry.getKey().toLowerCase(Locale.ROOT), Arrays.asList(entry.getValue())); + } + return builder.immutableMap(); + } + + public String[] header(String header) { + return headers.get(header.toLowerCase(Locale.ROOT)); + } + + public String contentType() { + String[] values = header(HttpHeaders.Names.CONTENT_TYPE); + if (values == null || values.length == 0) { + return null; + } + return values[0]; + } + + public XContentType xContentType() { + String[] values = header(HttpHeaders.Names.CONTENT_TYPE); + if (values == null || values.length == 0) { + return null; + } + return XContentType.fromMediaTypeOrFormat(values[0]); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + HttpResponse that = (HttpResponse) o; + + if (status != that.status) return false; + if (!headers.equals(that.headers)) return false; + return !(body != null ? !body.equals(that.body) : that.body != null); + } + + @Override + public int hashCode() { + int result = status; + result = 31 * result + headers.hashCode(); + result = 31 * result + (body != null ? body.hashCode() : 0); + return result; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("status=[").append(status).append("]"); + if (!headers.isEmpty()) { + sb.append(", headers=["); + boolean first = true; + for (Map.Entry header : headers.entrySet()) { + if (!first) { + sb.append(", "); + } + sb.append("[").append(header.getKey()).append(": ").append(Arrays.toString(header.getValue())).append("]"); + first = false; + } + sb.append("]"); + } + if (hasContent()) { + sb.append(", body=[").append(body.utf8ToString()).append("]"); + } + return sb.toString(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder = builder.startObject().field(Field.STATUS.getPreferredName(), status); + if (!headers.isEmpty()) { + builder.startObject(Field.HEADERS.getPreferredName()); + for (Map.Entry header : headers.entrySet()) { + // in order to prevent dots in field names, that might occur in headers, we simply de_dot those header names + // when writing toXContent + builder.array(header.getKey().replaceAll("\\.", "_"), header.getValue()); + } + builder.endObject(); + } + if (hasContent()) { + builder = builder.field(Field.BODY.getPreferredName(), body.utf8ToString()); + } + builder.endObject(); + return builder; + } + + public static HttpResponse parse(XContentParser parser) throws IOException { + assert parser.currentToken() == XContentParser.Token.START_OBJECT; + + int status = -1; + String body = null; + Map headers = new HashMap<>(); + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (currentFieldName == null) { + throw new ElasticsearchParseException("could not parse http response. expected a field name but found [{}] instead", token); + } else if (token == XContentParser.Token.VALUE_NUMBER) { + if (Field.STATUS.match(currentFieldName, parser.getDeprecationHandler())) { + status = parser.intValue(); + } else { + throw new ElasticsearchParseException("could not parse http response. unknown numeric field [{}]", currentFieldName); + } + } else if (token == XContentParser.Token.VALUE_STRING) { + if (Field.BODY.match(currentFieldName, parser.getDeprecationHandler())) { + body = parser.text(); + } else { + throw new ElasticsearchParseException("could not parse http response. unknown string field [{}]", currentFieldName); + } + } else if (token == XContentParser.Token.START_OBJECT) { + String headerName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + headerName = parser.currentName(); + } else if (headerName == null){ + throw new ElasticsearchParseException("could not parse http response. expected a header name but found [{}] " + + "instead", token); + } else if (token.isValue()) { + headers.put(headerName, new String[] { String.valueOf(parser.objectText()) }); + } else if (token == XContentParser.Token.START_ARRAY) { + List values = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (!token.isValue()) { + throw new ElasticsearchParseException("could not parse http response. expected a header value for header " + + "[{}] but found [{}] instead", headerName, token); + } else { + values.add(String.valueOf(parser.objectText())); + } + } + headers.put(headerName, values.toArray(new String[values.size()])); + } + } + } else { + throw new ElasticsearchParseException("could not parse http response. unexpected token [{}]", token); + } + } + + if (status < 0) { + throw new ElasticsearchParseException("could not parse http response. missing required numeric [{}] field holding the " + + "response's http status code", Field.STATUS.getPreferredName()); + } + return new HttpResponse(status, body, unmodifiableMap(headers)); + } + + interface Field { + ParseField STATUS = new ParseField("status"); + ParseField HEADERS = new ParseField("headers"); + ParseField BODY = new ParseField("body"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpSettings.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpSettings.java new file mode 100644 index 0000000000000..f4f97df1d4fd8 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpSettings.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; + +import java.util.ArrayList; +import java.util.List; + +/** + * Handles the configuration and parsing of settings for the xpack.http. prefix + */ +public class HttpSettings { + + private static final TimeValue DEFAULT_READ_TIMEOUT = TimeValue.timeValueSeconds(10); + private static final TimeValue DEFAULT_CONNECTION_TIMEOUT = DEFAULT_READ_TIMEOUT; + + static final Setting READ_TIMEOUT = Setting.timeSetting("xpack.http.default_read_timeout", + DEFAULT_READ_TIMEOUT, Property.NodeScope); + static final Setting CONNECTION_TIMEOUT = Setting.timeSetting("xpack.http.default_connection_timeout", + DEFAULT_CONNECTION_TIMEOUT, Property.NodeScope); + + private static final String PROXY_HOST_KEY = "xpack.http.proxy.host"; + private static final String PROXY_PORT_KEY = "xpack.http.proxy.port"; + private static final String PROXY_SCHEME_KEY = "xpack.http.proxy.scheme"; + private static final String SSL_KEY_PREFIX = "xpack.http.ssl."; + + static final Setting PROXY_HOST = Setting.simpleString(PROXY_HOST_KEY, Property.NodeScope); + static final Setting PROXY_SCHEME = Setting.simpleString(PROXY_SCHEME_KEY, (v, s) -> Scheme.parse(v), Property.NodeScope); + static final Setting PROXY_PORT = Setting.intSetting(PROXY_PORT_KEY, 0, 0, 0xFFFF, Property.NodeScope); + + static final Setting MAX_HTTP_RESPONSE_SIZE = Setting.byteSizeSetting("xpack.http.max_response_size", + new ByteSizeValue(10, ByteSizeUnit.MB), // default + new ByteSizeValue(1, ByteSizeUnit.BYTES), // min + new ByteSizeValue(50, ByteSizeUnit.MB), // max + Property.NodeScope); + + private static final SSLConfigurationSettings SSL = SSLConfigurationSettings.withPrefix(SSL_KEY_PREFIX); + + public static List> getSettings() { + final ArrayList> settings = new ArrayList<>(); + settings.addAll(SSL.getAllSettings()); + settings.add(READ_TIMEOUT); + settings.add(CONNECTION_TIMEOUT); + settings.add(PROXY_HOST); + settings.add(PROXY_PORT); + settings.add(PROXY_SCHEME); + settings.add(MAX_HTTP_RESPONSE_SIZE); + return settings; + } + + private HttpSettings() { + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/Scheme.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/Scheme.java new file mode 100644 index 0000000000000..04557271c26ce --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/Scheme.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import java.util.Locale; + +public enum Scheme { + + HTTP("http", 80), + HTTPS("https", 443); + + private final String scheme; + private final int defaultPort; + + Scheme(String scheme, int defaultPort) { + this.scheme = scheme; + this.defaultPort = defaultPort; + } + + public String scheme() { + return scheme; + } + + public int defaultPort() { + return defaultPort; + } + + public static Scheme parse(String value) { + value = value.toLowerCase(Locale.ROOT); + switch (value) { + case "http": + return HTTP; + case "https": + return HTTPS; + default: + throw new IllegalArgumentException("unsupported http scheme [" + value + "]"); + } + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/SizeLimitInputStream.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/SizeLimitInputStream.java new file mode 100644 index 0000000000000..5d724915de74e --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/SizeLimitInputStream.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import org.elasticsearch.common.unit.ByteSizeValue; + +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * An inputstream throwing an exception when a preconfigured number of bytes is reached + * This inputstream exists to prevent reading streaming or very big requests + * + * This implementation does not support mark/reset to prevent complex byte counting recalculations + */ +final class SizeLimitInputStream extends FilterInputStream { + + private final int maxByteSize; + private final AtomicInteger byteCounter = new AtomicInteger(0); + + /** + * Creates a new input stream, that throws an exception when a certain number of bytes is read + * @param maxByteSize The maximum data to read, before throwing an exception + * @param in The underlying inputstream containing the data + */ + SizeLimitInputStream(ByteSizeValue maxByteSize, InputStream in) { + super(in); + this.maxByteSize = maxByteSize.bytesAsInt(); + } + + @Override + public int read() throws IOException { + byteCounter.incrementAndGet(); + checkMaximumLengthReached(); + return super.read(); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + byteCounter.addAndGet(len); + checkMaximumLengthReached(); + return super.read(b, off, len); + } + + @Override + public synchronized void mark(int readlimit) { + throw new UnsupportedOperationException("mark not supported"); + } + + @Override + public synchronized void reset() throws IOException { + throw new IOException("reset not supported"); + } + + @Override + public boolean markSupported() { + return false; + } + + private void checkMaximumLengthReached() throws IOException { + if (byteCounter.get() > maxByteSize) { + throw new IOException("Maximum limit of [" + maxByteSize + "] bytes reached"); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/ApplicableHttpAuth.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/ApplicableHttpAuth.java new file mode 100644 index 0000000000000..cecc7bfda79ed --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/ApplicableHttpAuth.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http.auth; + +import org.apache.http.auth.AuthScope; +import org.apache.http.client.CredentialsProvider; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.net.HttpURLConnection; + +public abstract class ApplicableHttpAuth implements ToXContentObject { + + protected final Auth auth; + + public ApplicableHttpAuth(Auth auth) { + this.auth = auth; + } + + public final String type() { + return auth.type(); + } + + public abstract void apply(HttpURLConnection connection); + + public abstract void apply(CredentialsProvider credsProvider, AuthScope authScope); + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return auth.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ApplicableHttpAuth that = (ApplicableHttpAuth) o; + + return auth.equals(that.auth); + } + + @Override + public int hashCode() { + return auth.hashCode(); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/HttpAuth.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/HttpAuth.java new file mode 100644 index 0000000000000..0909a33ab7312 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/HttpAuth.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http.auth; + +import org.elasticsearch.common.xcontent.ToXContentObject; + +public interface HttpAuth extends ToXContentObject { + + String type(); + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/HttpAuthFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/HttpAuthFactory.java new file mode 100644 index 0000000000000..7667e568b0ad3 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/HttpAuthFactory.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http.auth; + +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public abstract class HttpAuthFactory> { + + public abstract String type(); + + public abstract Auth parse(XContentParser parser) throws IOException; + + public abstract AAuth createApplicable(Auth auth); + + public AAuth parseApplicable(XContentParser parser) throws IOException { + Auth auth = parse(parser); + return createApplicable(auth); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/HttpAuthRegistry.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/HttpAuthRegistry.java new file mode 100644 index 0000000000000..edf584231d636 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/HttpAuthRegistry.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http.auth; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; + +public class HttpAuthRegistry { + + private final Map factories; + + public HttpAuthRegistry(Map factories) { + this.factories = factories; + } + + public HttpAuth parse(XContentParser parser) throws IOException { + String type = null; + XContentParser.Token token; + HttpAuth auth = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + type = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT && type != null) { + HttpAuthFactory factory = factories.get(type); + if (factory == null) { + throw new ElasticsearchParseException("unknown http auth type [{}]", type); + } + auth = factory.parse(parser); + } + } + return auth; + } + + public > AA createApplicable(A auth) { + HttpAuthFactory factory = factories.get(auth.type()); + if (factory == null) { + throw illegalArgument("unknown http auth type [{}]", auth.type()); + } + return (AA) factory.createApplicable(auth); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/basic/ApplicableBasicAuth.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/basic/ApplicableBasicAuth.java new file mode 100644 index 0000000000000..86639a59a1242 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/basic/ApplicableBasicAuth.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http.auth.basic; + +import org.apache.http.auth.AuthScope; +import org.apache.http.auth.UsernamePasswordCredentials; +import org.apache.http.client.CredentialsProvider; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; +import org.elasticsearch.xpack.watcher.common.http.auth.ApplicableHttpAuth; + +import java.net.HttpURLConnection; +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class ApplicableBasicAuth extends ApplicableHttpAuth { + + private final String basicAuth; + private final CryptoService cryptoService; + + public ApplicableBasicAuth(BasicAuth auth, CryptoService service) { + super(auth); + basicAuth = headerValue(auth.username, auth.password.text(service)); + this.cryptoService = service; + } + + public static String headerValue(String username, char[] password) { + return "Basic " + Base64.getEncoder().encodeToString((username + ":" + new String(password)).getBytes(StandardCharsets.UTF_8)); + } + + public void apply(HttpURLConnection connection) { + connection.setRequestProperty("Authorization", basicAuth); + } + + @Override + public void apply(CredentialsProvider credsProvider, AuthScope authScope) { + credsProvider.setCredentials(authScope, + new UsernamePasswordCredentials(auth.username, new String(auth.password.text(cryptoService)))); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/basic/BasicAuth.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/basic/BasicAuth.java new file mode 100644 index 0000000000000..2d316735efd77 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/basic/BasicAuth.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http.auth.basic; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.common.secret.Secret; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherXContentParser; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuth; + +import java.io.IOException; +import java.util.Objects; + +public class BasicAuth implements HttpAuth { + + public static final String TYPE = "basic"; + + final String username; + final Secret password; + + public BasicAuth(String username, char[] password) { + this(username, new Secret(password)); + } + + public BasicAuth(String username, Secret password) { + this.username = username; + this.password = password; + } + + @Override + public String type() { + return TYPE; + } + + public String getUsername() { + return username; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + BasicAuth other = (BasicAuth) o; + + return Objects.equals(username, other.username) && Objects.equals(password, other.password); + } + + @Override + public int hashCode() { + return Objects.hash(username, password); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Field.USERNAME.getPreferredName(), username); + // if the password is null, do not render it out, so we have the possibility to call toXContent when we want to update a watch + // if the password is not null, ensure we never return the original password value, unless it is encrypted with the CryptoService + if (password != null) { + if (WatcherParams.hideSecrets(params) && password.value().startsWith(CryptoService.ENCRYPTED_TEXT_PREFIX) == false) { + builder.field(Field.PASSWORD.getPreferredName(), WatcherXContentParser.REDACTED_PASSWORD); + } else { + builder.field(Field.PASSWORD.getPreferredName(), password.value()); + } + } + return builder.endObject(); + } + + public static BasicAuth parse(XContentParser parser) throws IOException { + String username = null; + Secret password = null; + + String fieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + if (Field.USERNAME.getPreferredName().equals(fieldName)) { + username = parser.text(); + } else if (Field.PASSWORD.getPreferredName().equals(fieldName)) { + password = WatcherXContentParser.secretOrNull(parser); + } else { + throw new ElasticsearchParseException("unsupported field [" + fieldName + "]"); + } + } else { + throw new ElasticsearchParseException("unsupported token [" + token + "]"); + } + } + + if (username == null) { + throw new ElasticsearchParseException("username is a required option"); + } + + return new BasicAuth(username, password); + } + + interface Field { + ParseField USERNAME = new ParseField("username"); + ParseField PASSWORD = new ParseField("password"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/basic/BasicAuthFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/basic/BasicAuthFactory.java new file mode 100644 index 0000000000000..838c53be7b19a --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/auth/basic/BasicAuthFactory.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http.auth.basic; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthFactory; + +import java.io.IOException; + +public class BasicAuthFactory extends HttpAuthFactory { + + private final CryptoService cryptoService; + + public BasicAuthFactory(@Nullable CryptoService cryptoService) { + this.cryptoService = cryptoService; + } + + public String type() { + return BasicAuth.TYPE; + } + + public BasicAuth parse(XContentParser parser) throws IOException { + return BasicAuth.parse(parser); + } + + @Override + public ApplicableBasicAuth createApplicable(BasicAuth auth) { + return new ApplicableBasicAuth(auth, cryptoService); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplate.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplate.java new file mode 100644 index 0000000000000..e52e1cbc1e857 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplate.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.text; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Holds a template to be used in many places in a watch as configuration. + * + * One liner templates are kept around as just strings and {@link Script} is used for + * parsing/serialization logic for any non inlined templates and/or when templates + * have custom params, lang or content type. + */ +public class TextTemplate implements ToXContent { + + private final Script script; + private final String inlineTemplate; + + public TextTemplate(String template) { + this.script = null; + this.inlineTemplate = template; + } + + public TextTemplate(String template, @Nullable XContentType contentType, ScriptType type, + @Nullable Map params) { + Map options = null; + if (type == ScriptType.INLINE) { + options = new HashMap<>(); + if (contentType != null) { + options.put(Script.CONTENT_TYPE_OPTION, contentType.mediaType()); + } + } + if (params == null) { + params = new HashMap<>(); + } + this.script = new Script(type, type == ScriptType.STORED ? null : Script.DEFAULT_TEMPLATE_LANG, template, options, params); + this.inlineTemplate = null; + } + + public TextTemplate(Script script) { + this.script = script; + this.inlineTemplate = null; + } + + public Script getScript() { + return script; + } + + public String getTemplate() { + return script != null ? script.getIdOrCode() : inlineTemplate; + } + + public XContentType getContentType() { + if (script == null || script.getOptions() == null) { + return null; + } + + String mediaType = script.getOptions().get(Script.CONTENT_TYPE_OPTION); + + if (mediaType == null) { + return null; + } + + return XContentType.fromMediaTypeOrFormat(mediaType); + } + + public ScriptType getType() { + return script != null ? script.getType(): ScriptType.INLINE; + } + + public Map getParams() { + return script != null ? script.getParams(): null; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TextTemplate template1 = (TextTemplate) o; + return Objects.equals(script, template1.script) && + Objects.equals(inlineTemplate, template1.inlineTemplate); + } + + @Override + public int hashCode() { + return Objects.hash(script, inlineTemplate); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (script != null) { + script.toXContent(builder, params); + } else { + builder.value(inlineTemplate); + } + return builder; + } + + public static TextTemplate parse(XContentParser parser) throws IOException { + if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + return new TextTemplate(parser.text()); + } else { + Script template = Script.parse(parser, Script.DEFAULT_TEMPLATE_LANG); + return new TextTemplate(template); + } + } +} + diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateEngine.java new file mode 100644 index 0000000000000..7b87a9e87a542 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateEngine.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.text; + +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.TemplateScript; +import org.elasticsearch.xpack.watcher.Watcher; + +import java.util.HashMap; +import java.util.Map; + +public class TextTemplateEngine extends AbstractComponent { + + private final ScriptService service; + + public TextTemplateEngine(Settings settings, ScriptService service) { + super(settings); + this.service = service; + } + + public String render(TextTemplate textTemplate, Map model) { + if (textTemplate == null) { + return null; + } + + String template = textTemplate.getTemplate(); + String mediaType = compileParams(detectContentType(template)); + template = trimContentType(textTemplate); + + Map mergedModel = new HashMap<>(); + if (textTemplate.getParams() != null) { + mergedModel.putAll(textTemplate.getParams()); + } + mergedModel.putAll(model); + + Map options = null; + if (textTemplate.getType() == ScriptType.INLINE) { + options = new HashMap<>(); + + if (textTemplate.getScript() != null && textTemplate.getScript().getOptions() != null) { + options.putAll(textTemplate.getScript().getOptions()); + } + + options.put(Script.CONTENT_TYPE_OPTION, mediaType); + } + Script script = new Script(textTemplate.getType(), + textTemplate.getType() == ScriptType.STORED ? null : "mustache", template, options, mergedModel); + TemplateScript.Factory compiledTemplate = service.compile(script, Watcher.SCRIPT_TEMPLATE_CONTEXT); + return compiledTemplate.newInstance(model).execute(); + } + + private String trimContentType(TextTemplate textTemplate) { + String template = textTemplate.getTemplate(); + if (!template.startsWith("__")){ + return template; //Doesn't even start with __ so can't have a content type + } + // There must be a __= 0 && index < 12) { + if (template.length() == 6) { + template = ""; + } else { + template = template.substring(index + 4); + } + } + return template; + } + + private XContentType detectContentType(String content) { + if (content.startsWith("__")) { + //There must be a __"); + static final Pattern PATH_PATTERN = Pattern.compile("\\{\\{(.+)\\}\\}"); + + private final Clock clock; + private final String type; + + protected AbstractCompareCondition(String type, Clock clock) { + this.clock = clock; + this.type = type; + } + + @Override + public final Result execute(WatchExecutionContext ctx) { + Map resolvedValues = new HashMap<>(); + Map model = Variables.createCtxModel(ctx, ctx.payload()); + return doExecute(model, resolvedValues); + } + + protected Object resolveConfiguredValue(Map resolvedValues, Map model, Object configuredValue) { + if (configuredValue instanceof String) { + + // checking if the given value is a date math expression + Matcher matcher = DATE_MATH_PATTERN.matcher((String) configuredValue); + if (matcher.matches()) { + String dateMath = matcher.group(1); + configuredValue = WatcherDateTimeUtils.parseDateMath(dateMath, DateTimeZone.UTC, clock); + resolvedValues.put(dateMath, WatcherDateTimeUtils.formatDate((DateTime) configuredValue)); + } else { + // checking if the given value is a path expression + matcher = PATH_PATTERN.matcher((String) configuredValue); + if (matcher.matches()) { + String configuredPath = matcher.group(1); + configuredValue = ObjectPath.eval(configuredPath, model); + resolvedValues.put(configuredPath, configuredValue); + } + } + } + return configuredValue; + } + + protected abstract Result doExecute(Map model, Map resolvedValues); + + @Override + public String type() { + return type; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().endObject(); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareCondition.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareCondition.java new file mode 100644 index 0000000000000..6aeccf734c2f4 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareCondition.java @@ -0,0 +1,321 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.common.xcontent.XContentUtils; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; + +import java.io.IOException; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +public final class ArrayCompareCondition extends AbstractCompareCondition { + + public static final String TYPE = "array_compare"; + private final String arrayPath; + private final String path; + private final Op op; + private final Object value; + private final Quantifier quantifier; + + ArrayCompareCondition(String arrayPath, String path, Op op, Object value, + Quantifier quantifier, + Clock clock) { + super(TYPE, clock); + this.arrayPath = arrayPath; + this.path = path; + this.op = op; + this.value = value; + this.quantifier = quantifier; + } + + public String getArrayPath() { + return arrayPath; + } + + public String getPath() { + return path; + } + + public ArrayCompareCondition.Op getOp() { + return op; + } + + public Object getValue() { + return value; + } + + public ArrayCompareCondition.Quantifier getQuantifier() { + return quantifier; + } + + public static ArrayCompareCondition parse(Clock clock, String watchId, XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected an object but found [{}] " + + "instead", TYPE, watchId, parser.currentToken()); + } + String arrayPath = null; + String path = null; + Op op = null; + Object value = null; + boolean haveValue = false; + Quantifier quantifier = null; + + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + arrayPath = parser.currentName(); + } else if (arrayPath == null) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected a field indicating the " + + "compared path, but found [{}] instead", TYPE, watchId, token); + } else if (token == XContentParser.Token.START_OBJECT) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + if (parser.currentName().equals("path")) { + parser.nextToken(); + path = parser.text(); + } else { + if (op != null) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. encountered " + + "duplicate comparison operator, but already saw [{}].", TYPE, watchId, parser.currentName(), op + .id()); + } + try { + op = Op.resolve(parser.currentName()); + } catch (IllegalArgumentException iae) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. unknown comparison " + + "operator [{}]", TYPE, watchId, parser.currentName(), iae); + } + token = parser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + if (parser.currentName().equals("value")) { + if (haveValue) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. " + + "encountered duplicate field \"value\", but already saw value [{}].", TYPE, + watchId, value); + } + token = parser.nextToken(); + if (!op.supportsStructures() && !token.isValue() && token != XContentParser.Token.VALUE_NULL) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. " + + "compared value for [{}] with operation [{}] must either be a numeric, string, " + + "boolean or null value, but found [{}] instead", TYPE, watchId, path, + op.name().toLowerCase(Locale.ROOT), token); + } + value = XContentUtils.readValue(parser, token); + haveValue = true; + } else if (parser.currentName().equals("quantifier")) { + if (quantifier != null) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. " + + "encountered duplicate field \"quantifier\", but already saw quantifier [{}].", + TYPE, watchId, quantifier.id()); + } + parser.nextToken(); + try { + quantifier = Quantifier.resolve(parser.text()); + } catch (IllegalArgumentException iae) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. " + + "unknown comparison quantifier [{}]", TYPE, watchId, parser.text(), iae); + } + } else { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. " + + "expected a field indicating the comparison value or comparison quantifier, but found" + + " [{}] instead", TYPE, watchId, parser.currentName()); + } + } else { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected a " + + "field indicating the comparison value or comparison quantifier, but found [{}] instead", + TYPE, watchId, token); + } + } + } else { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected an object " + + "for field [{}] but found [{}] instead", TYPE, watchId, op.id(), token); + } + } + } else { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected a field indicating" + + " the compared path or a comparison operator, but found [{}] instead", TYPE, watchId, token); + } + } + } else { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected an object for field [{}] " + + "but found [{}] instead", TYPE, watchId, path, token); + } + } + + if (path == null) { + path = ""; + } + if (quantifier == null) { + quantifier = Quantifier.SOME; + } + + return new ArrayCompareCondition(arrayPath, path, op, value, quantifier, clock); + } + + public Result doExecute(Map model, Map resolvedValues) { + Object configuredValue = resolveConfiguredValue(resolvedValues, model, value); + + Object object = ObjectPath.eval(arrayPath, model); + if (object != null && !(object instanceof List)) { + throw new IllegalStateException("array path " + arrayPath + " did not evaluate to array, was " + object); + } + + @SuppressWarnings("unchecked") + List resolvedArray = object != null ? (List) object : Collections.emptyList(); + + List resolvedValue = new ArrayList<>(resolvedArray.size()); + for (int i = 0; i < resolvedArray.size(); i++) { + resolvedValue.add(ObjectPath.eval(path, resolvedArray.get(i))); + } + resolvedValues.put(arrayPath, resolvedArray); + + return new Result(resolvedValues, TYPE, quantifier.eval(resolvedValue, + configuredValue, op)); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ArrayCompareCondition that = (ArrayCompareCondition) o; + return Objects.equals(getArrayPath(), that.getArrayPath()) && + Objects.equals(getPath(), that.getPath()) && + Objects.equals(getOp(), that.getOp()) && + Objects.equals(getValue(), that.getValue()) && + Objects.equals(getQuantifier(), that.getQuantifier()); + } + + @Override + public int hashCode() { + return Objects.hash(arrayPath, path, op, value, quantifier); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .startObject(arrayPath) + .field("path", path) + .startObject(op.id()) + .field("value", value) + .field("quantifier", quantifier.id()) + .endObject() + .endObject() + .endObject(); + } + + public enum Op { + EQ() { + @Override + public boolean comparison(int x) { + return x == 0; + } + + @Override + public boolean supportsStructures() { + return true; + } + }, + NOT_EQ() { + @Override + public boolean comparison(int x) { + return x != 0; + } + + @Override + public boolean supportsStructures() { + return true; + } + }, + GTE() { + @Override + public boolean comparison(int x) { + return x >= 0; + } + }, + GT() { + @Override + public boolean comparison(int x) { + return x > 0; + } + }, + LTE() { + @Override + public boolean comparison(int x) { + return x <= 0; + } + }, + LT() { + @Override + public boolean comparison(int x) { + return x < 0; + } + }; + + public abstract boolean comparison(int x); + + public boolean supportsStructures() { + return false; + } + + public String id() { + return name().toLowerCase(Locale.ROOT); + } + + public static Op resolve(String id) { + return Op.valueOf(id.toUpperCase(Locale.ROOT)); + } + } + + public enum Quantifier { + ALL() { + @Override + public boolean eval(List values, Object configuredValue, Op op) { + for (Object value : values) { + Integer compare = LenientCompare.compare(value, configuredValue); + boolean comparison = compare != null && op.comparison(compare); + if (!comparison) { + return false; + } + } + return true; + } + }, + SOME() { + @Override + public boolean eval(List values, Object configuredValue, Op op) { + for (Object value : values) { + Integer compare = LenientCompare.compare(value, configuredValue); + boolean comparison = compare != null && op.comparison(compare); + if (comparison) { + return true; + } + } + return false; + } + }; + + public abstract boolean eval(List values, Object configuredValue, Op op); + + public static Quantifier resolve(String id) { + return Quantifier.valueOf(id.toUpperCase(Locale.ROOT)); + } + + public String id() { + return name().toLowerCase(Locale.ROOT); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/CompareCondition.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/CompareCondition.java new file mode 100644 index 0000000000000..a3b21dec295d2 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/CompareCondition.java @@ -0,0 +1,203 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.common.xcontent.XContentUtils; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; + +import java.io.IOException; +import java.time.Clock; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + + +public final class CompareCondition extends AbstractCompareCondition { + public static final String TYPE = "compare"; + private final String path; + private final Op op; + private final Object value; + + public CompareCondition(String path, Op op, Object value) { + this(path, op, value, null); + } + + CompareCondition(String path, Op op, Object value, Clock clock) { + super(TYPE, clock); + this.path = path; + this.op = op; + this.value = value; + } + + public String getPath() { + return path; + } + + public Op getOp() { + return op; + } + + public Object getValue() { + return value; + } + + public static CompareCondition parse(Clock clock, String watchId, XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected an object but found [{}] " + + "instead", TYPE, watchId, parser.currentToken()); + } + String path = null; + Object value = null; + Op op = null; + + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + path = parser.currentName(); + } else if (path == null) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected a field indicating the " + + "compared path, but found [{}] instead", TYPE, watchId, token); + } else if (token == XContentParser.Token.START_OBJECT) { + token = parser.nextToken(); + if (token != XContentParser.Token.FIELD_NAME) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected a field indicating the" + + " comparison operator, but found [{}] instead", TYPE, watchId, token); + } + try { + op = Op.resolve(parser.currentName()); + } catch (IllegalArgumentException iae) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. unknown comparison operator " + + "[{}]", TYPE, watchId, parser.currentName()); + } + token = parser.nextToken(); + if (!op.supportsStructures() && !token.isValue() && token != XContentParser.Token.VALUE_NULL) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. compared value for [{}] with " + + "operation [{}] must either be a numeric, string, boolean or null value, but found [{}] instead", TYPE, + watchId, path, op.name().toLowerCase(Locale.ROOT), token); + } + value = XContentUtils.readValue(parser, token); + token = parser.nextToken(); + if (token != XContentParser.Token.END_OBJECT) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected end of path object, " + + "but found [{}] instead", TYPE, watchId, token); + } + } else { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected an object for field [{}] " + + "but found [{}] instead", TYPE, watchId, path, token); + } + } + return new CompareCondition(path, op, value, clock); + } + + @Override + protected Result doExecute(Map model, Map resolvedValues) { + Object configuredValue = resolveConfiguredValue(resolvedValues, model, value); + + Object resolvedValue = ObjectPath.eval(path, model); + resolvedValues.put(path, resolvedValue); + + return new Result(resolvedValues, TYPE, op.eval(resolvedValue, configuredValue)); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + CompareCondition condition = (CompareCondition) o; + + if (!Objects.equals(path, condition.path)) return false; + if (op != condition.op) return false; + return Objects.equals(value, condition.value); + } + + @Override + public int hashCode() { + return Objects.hash(path, op, value); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .startObject(path) + .field(op.id(), value) + .endObject() + .endObject(); + } + + public enum Op { + + EQ() { + @Override + public boolean eval(Object v1, Object v2) { + Integer compVal = LenientCompare.compare(v1, v2); + return compVal != null && compVal == 0; + } + + @Override + public boolean supportsStructures() { + return true; + } + }, + NOT_EQ() { + @Override + public boolean eval(Object v1, Object v2) { + Integer compVal = LenientCompare.compare(v1, v2); + return compVal == null || compVal != 0; + } + + @Override + public boolean supportsStructures() { + return true; + } + }, + LT() { + @Override + public boolean eval(Object v1, Object v2) { + Integer compVal = LenientCompare.compare(v1, v2); + return compVal != null && compVal < 0; + } + }, + LTE() { + @Override + public boolean eval(Object v1, Object v2) { + Integer compVal = LenientCompare.compare(v1, v2); + return compVal != null && compVal <= 0; + } + }, + GT() { + @Override + public boolean eval(Object v1, Object v2) { + Integer compVal = LenientCompare.compare(v1, v2); + return compVal != null && compVal > 0; + } + }, + GTE() { + @Override + public boolean eval(Object v1, Object v2) { + Integer compVal = LenientCompare.compare(v1, v2); + return compVal != null && compVal >= 0; + } + }; + + public abstract boolean eval(Object v1, Object v2); + + public boolean supportsStructures() { + return false; + } + + public String id() { + return name().toLowerCase(Locale.ROOT); + } + + public static Op resolve(String id) { + return Op.valueOf(id.toUpperCase(Locale.ROOT)); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/InternalAlwaysCondition.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/InternalAlwaysCondition.java new file mode 100644 index 0000000000000..1f1fe8c0325f2 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/InternalAlwaysCondition.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.condition.AlwaysCondition; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; + +import java.io.IOException; + +public final class InternalAlwaysCondition extends AlwaysCondition implements ExecutableCondition { + + public static final Result RESULT_INSTANCE = new Result(null, TYPE, true); + public static final InternalAlwaysCondition INSTANCE = new InternalAlwaysCondition(); + + private InternalAlwaysCondition() { } + + public static InternalAlwaysCondition parse(String watchId, XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("unable to parse [{}] condition for watch [{}]. expected an empty object but found [{}]", + TYPE, watchId, parser.currentName()); + } + XContentParser.Token token = parser.nextToken(); + if (token != XContentParser.Token.END_OBJECT) { + throw new ElasticsearchParseException("unable to parse [{}] condition for watch [{}]. expected an empty object but found [{}]", + TYPE, watchId, parser.currentName()); + } + return INSTANCE; + } + + @Override + public Result execute(WatchExecutionContext ctx) { + return RESULT_INSTANCE; + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/LenientCompare.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/LenientCompare.java new file mode 100644 index 0000000000000..0594b6c8cf097 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/LenientCompare.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.util.Objects; + +public class LenientCompare { + // this method performs lenient comparison, potentially between different types. The second argument + // type (v2) determines the type of comparison (this is because the second argument is configured by the + // user while the first argument is the dynamic path that is evaluated at runtime. That is, if the user configures + // a number, it expects a number, therefore the comparison will be based on numeric comparison). If the + // comparison is numeric, other types (e.g. strings) will converted to numbers if possible, if not, the comparison + // will fail and `false` will be returned. + // + // may return `null` indicating v1 simply doesn't equal v2 (without any order association) + public static Integer compare(Object v1, Object v2) { + if (Objects.equals(v1, v2)) { + return 0; + } + if (v1 == null || v2 == null) { + return null; + } + + if (v1.equals(Double.NaN) || v2.equals(Double.NaN) || v1.equals(Float.NaN) || v2.equals(Float.NaN)) { + return null; + } + + // special case for numbers. If v1 is not a number, we'll try to convert it to a number + if (v2 instanceof Number) { + if (!(v1 instanceof Number)) { + try { + v1 = Double.valueOf(String.valueOf(v1)); + } catch (NumberFormatException nfe) { + // could not convert to number + return null; + } + } + return ((Number) v1).doubleValue() > ((Number) v2).doubleValue() ? 1 : + ((Number) v1).doubleValue() < ((Number) v2).doubleValue() ? -1 : 0; + } + + // special case for strings. If v1 is not a string, we'll convert it to a string + if (v2 instanceof String) { + v1 = String.valueOf(v1); + return ((String) v1).compareTo((String) v2); + } + + // special case for date/times. If v1 is not a dateTime, we'll try to convert it to a datetime + if (v2 instanceof DateTime) { + if (v1 instanceof DateTime) { + return ((DateTime) v1).compareTo((DateTime) v2); + } + if (v1 instanceof String) { + try { + v1 = WatcherDateTimeUtils.parseDate((String) v1); + } catch (Exception e) { + return null; + } + } else if (v1 instanceof Number) { + v1 = new DateTime(((Number) v1).longValue(), DateTimeZone.UTC); + } else { + // cannot convert to date... + return null; + } + return ((DateTime) v1).compareTo((DateTime) v2); + } + + if (v1.getClass() != v2.getClass() || Comparable.class.isAssignableFrom(v1.getClass())) { + return null; + } + + try { + return ((Comparable) v1).compareTo(v2); + } catch (Exception e) { + return null; + } + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/NeverCondition.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/NeverCondition.java new file mode 100644 index 0000000000000..f4994a1d6d543 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/NeverCondition.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; + +import java.io.IOException; + +public final class NeverCondition implements ExecutableCondition { + + public static final String TYPE = "never"; + public static final Result RESULT_INSTANCE = new Result(null, TYPE, false); + public static final NeverCondition INSTANCE = new NeverCondition(); + + private NeverCondition() { } + + public static NeverCondition parse(String watchId, XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected an empty object but found [{}]", + TYPE, watchId, parser.currentName()); + } + XContentParser.Token token = parser.nextToken(); + if (token != XContentParser.Token.END_OBJECT) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected an empty object but found [{}]", + TYPE, watchId, parser.currentName()); + } + return INSTANCE; + } + + @Override + public Result execute(WatchExecutionContext ctx) { + return RESULT_INSTANCE; + } + + @Override + public boolean equals(Object obj) { + return obj instanceof NeverCondition; + } + + @Override + public int hashCode() { + // All instances has to produce the same hashCode because they are all equal + return 0; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().endObject(); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ScriptCondition.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ScriptCondition.java new file mode 100644 index 0000000000000..e2befe9a24e68 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/condition/ScriptCondition.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.watcher.Watcher; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalState; + +/** + * This class executes a script against the ctx payload and returns a boolean + */ +public final class ScriptCondition implements ExecutableCondition { + public static final String TYPE = "script"; + private static final Result MET = new Result(null, TYPE, true); + private static final Result UNMET = new Result(null, TYPE, false); + + private final ScriptService scriptService; + private final Script script; + private final ExecutableScript.Factory scriptFactory; + + public ScriptCondition(Script script) { + this.script = script; + scriptService = null; + scriptFactory = null; + } + + ScriptCondition(Script script, ScriptService scriptService) { + this.scriptService = scriptService; + this.script = script; + scriptFactory = scriptService.compile(script, Watcher.SCRIPT_EXECUTABLE_CONTEXT); + } + + public Script getScript() { + return script; + } + + public static ScriptCondition parse(ScriptService scriptService, String watchId, XContentParser parser) throws IOException { + try { + Script script = Script.parse(parser); + return new ScriptCondition(script, scriptService); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. failed to parse script", pe, TYPE, + watchId); + } + } + + @Override + public Result execute(WatchExecutionContext ctx) { + return doExecute(ctx); + } + + public Result doExecute(WatchExecutionContext ctx) { + Map parameters = Variables.createCtxModel(ctx, ctx.payload()); + if (script.getParams() != null && !script.getParams().isEmpty()) { + parameters.putAll(script.getParams()); + } + ExecutableScript executable = scriptFactory.newInstance(parameters); + Object value = executable.run(); + if (value instanceof Boolean) { + return (Boolean) value ? MET : UNMET; + } + throw illegalState("condition [{}] must return a boolean value (true|false) but instead returned [{}]", type(), ctx.watch().id(), + script, value); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return script.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ScriptCondition condition = (ScriptCondition) o; + + return script.equals(condition.script); + } + + @Override + public int hashCode() { + return script.hashCode(); + } + + @Override + public String type() { + return TYPE; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/AsyncTriggerEventConsumer.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/AsyncTriggerEventConsumer.java new file mode 100644 index 0000000000000..61a34e554d2d8 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/AsyncTriggerEventConsumer.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.execution; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; + +import java.util.function.Consumer; + +import static java.util.stream.StreamSupport.stream; + +public class AsyncTriggerEventConsumer implements Consumer> { + + private final Logger logger; + private final ExecutionService executionService; + + public AsyncTriggerEventConsumer(Settings settings, ExecutionService executionService) { + this.logger = Loggers.getLogger(SyncTriggerEventConsumer.class, settings); + this.executionService = executionService; + } + + @Override + public void accept(Iterable events) { + try { + executionService.processEventsAsync(events); + } catch (Exception e) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "failed to process triggered events [{}]", + (Object) stream(events.spliterator(), false).toArray(size -> + new TriggerEvent[size])), + e); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/CurrentExecutions.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/CurrentExecutions.java new file mode 100644 index 0000000000000..95ac803003681 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/CurrentExecutions.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.execution; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.unit.TimeValue; + +import java.util.Iterator; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalState; + +public final class CurrentExecutions implements Iterable { + + private final ConcurrentMap currentExecutions = new ConcurrentHashMap<>(); + // the condition of the lock is used to wait and signal the finishing of all executions on shutdown + private final ReentrantLock lock = new ReentrantLock(); + private final Condition empty = lock.newCondition(); + // a marker to not accept new executions, used when the watch service is powered down + private SetOnce seal = new SetOnce<>(); + + /** + * Tries to put an watch execution class for a watch in the current executions + * + * @param id The id of the watch + * @param execution The watch execution class + * @return Returns true if watch with id already is in the current executions class, false otherwise + */ + public boolean put(String id, ExecutionService.WatchExecution execution) { + lock.lock(); + try { + if (seal.get() != null) { + // We shouldn't get here, because, ExecutionService#started should have been set to false + throw illegalState("could not register execution [{}]. current executions are sealed and forbid registrations of " + + "additional executions.", id); + } + return currentExecutions.putIfAbsent(id, execution) != null; + } finally { + lock.unlock(); + } + } + + public void remove(String id) { + lock.lock(); + try { + currentExecutions.remove(id); + if (currentExecutions.isEmpty()) { + empty.signal(); + } + } finally { + lock.unlock(); + } + } + + /** + * Calling this method makes the class stop accepting new executions and throws and exception instead. + * In addition it waits for a certain amount of time for current executions to finish before returning + * + * @param maxStopTimeout The maximum wait time to wait to current executions to finish + */ + void sealAndAwaitEmpty(TimeValue maxStopTimeout) { + lock.lock(); + // We may have current executions still going on. + // We should try to wait for the current executions to have completed. + // Otherwise we can run into a situation where we didn't delete the watch from the .triggered_watches index, + // but did insert into the history index. Upon start this can lead to DocumentAlreadyExistsException, + // because we already stored the history record during shutdown... + // (we always first store the watch record and then remove the triggered watch) + try { + seal.set(true); + while (currentExecutions.size() > 0) { + empty.await(maxStopTimeout.millis(), TimeUnit.MILLISECONDS); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } finally { + lock.unlock(); + } + } + + @Override + public Iterator iterator() { + return currentExecutions.values().iterator(); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java new file mode 100644 index 0000000000000..29ef6e03f6d4d --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ExecutionService.java @@ -0,0 +1,582 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.execution; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.Preference; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.engine.DocumentMissingException; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapperResult; +import org.elasticsearch.xpack.core.watcher.common.stats.Counters; +import org.elasticsearch.xpack.core.watcher.condition.Condition; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.execution.QueuedWatch; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionSnapshot; +import org.elasticsearch.xpack.core.watcher.history.WatchRecord; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.Watcher; +import org.elasticsearch.xpack.watcher.history.HistoryStore; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; +import static org.joda.time.DateTimeZone.UTC; + +public class ExecutionService extends AbstractComponent { + + public static final Setting DEFAULT_THROTTLE_PERIOD_SETTING = + Setting.positiveTimeSetting("xpack.watcher.execution.default_throttle_period", + TimeValue.timeValueSeconds(5), Setting.Property.NodeScope); + + private final MeanMetric totalExecutionsTime = new MeanMetric(); + private final Map actionByTypeExecutionTime = new HashMap<>(); + + private final HistoryStore historyStore; + private final TriggeredWatchStore triggeredWatchStore; + private final WatchExecutor executor; + private final Clock clock; + private final TimeValue defaultThrottlePeriod; + private final TimeValue maxStopTimeout; + private final WatchParser parser; + private final ClusterService clusterService; + private final Client client; + private final TimeValue indexDefaultTimeout; + + private volatile CurrentExecutions currentExecutions; + private final AtomicBoolean started = new AtomicBoolean(false); + + public ExecutionService(Settings settings, HistoryStore historyStore, TriggeredWatchStore triggeredWatchStore, WatchExecutor executor, + Clock clock, WatchParser parser, ClusterService clusterService, Client client) { + super(settings); + this.historyStore = historyStore; + this.triggeredWatchStore = triggeredWatchStore; + this.executor = executor; + this.clock = clock; + this.defaultThrottlePeriod = DEFAULT_THROTTLE_PERIOD_SETTING.get(settings); + this.maxStopTimeout = Watcher.MAX_STOP_TIMEOUT_SETTING.get(settings); + this.parser = parser; + this.clusterService = clusterService; + this.client = client; + this.indexDefaultTimeout = settings.getAsTime("xpack.watcher.internal.ops.index.default_timeout", TimeValue.timeValueSeconds(30)); + } + + public synchronized void start() throws Exception { + if (started.get()) { + return; + } + + assert executor.queue().isEmpty() : "queue should be empty, but contains " + executor.queue().size() + " elements."; + if (started.compareAndSet(false, true)) { + try { + logger.debug("starting execution service"); + historyStore.start(); + triggeredWatchStore.start(); + currentExecutions = new CurrentExecutions(); + logger.debug("started execution service"); + } catch (Exception e) { + started.set(false); + throw e; + } + } + } + + public boolean validate(ClusterState state) { + return triggeredWatchStore.validate(state) && HistoryStore.validate(state); + } + + public synchronized void stop() { + if (started.compareAndSet(true, false)) { + logger.debug("stopping execution service"); + // We could also rely on the shutdown in #updateSettings call, but + // this is a forceful shutdown that also interrupts the worker threads in the thread pool + int cancelledTaskCount = executor.queue().drainTo(new ArrayList<>()); + + this.clearExecutions(); + triggeredWatchStore.stop(); + historyStore.stop(); + logger.debug("stopped execution service, cancelled [{}] queued tasks", cancelledTaskCount); + } + } + + /** + * Pause the execution of the watcher executor + * @return the number of tasks that have been removed + */ + public synchronized int pauseExecution() { + int cancelledTaskCount = executor.queue().drainTo(new ArrayList<>()); + this.clearExecutions(); + return cancelledTaskCount; + } + + public TimeValue defaultThrottlePeriod() { + return defaultThrottlePeriod; + } + + public long executionThreadPoolQueueSize() { + return executor.queue().size(); + } + + public long executionThreadPoolMaxSize() { + return executor.largestPoolSize(); + } + + // for testing only + CurrentExecutions getCurrentExecutions() { + return currentExecutions; + } + + public List currentExecutions() { + List currentExecutions = new ArrayList<>(); + for (WatchExecution watchExecution : this.currentExecutions) { + currentExecutions.add(watchExecution.createSnapshot()); + } + // Lets show the longest running watch first: + currentExecutions.sort(Comparator.comparing(WatchExecutionSnapshot::executionTime)); + return currentExecutions; + } + + public List queuedWatches() { + List snapshot = new ArrayList<>(); + executor.tasks().forEach(snapshot::add); + if (snapshot.isEmpty()) { + return Collections.emptyList(); + } + + List queuedWatches = new ArrayList<>(snapshot.size()); + for (Runnable task : snapshot) { + WatchExecutionTask executionTask = (WatchExecutionTask) task; + queuedWatches.add(new QueuedWatch(executionTask.ctx)); + } + + // Lets show the execution that pending the longest first: + queuedWatches.sort(Comparator.comparing(QueuedWatch::executionTime)); + return queuedWatches; + } + + void processEventsAsync(Iterable events) throws Exception { + if (!started.get()) { + throw new IllegalStateException("not started"); + } + Tuple, List> watchesAndContext = createTriggeredWatchesAndContext(events); + List triggeredWatches = watchesAndContext.v1(); + triggeredWatchStore.putAll(triggeredWatches, ActionListener.wrap( + response -> executeTriggeredWatches(response, watchesAndContext), + e -> { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof EsRejectedExecutionException) { + logger.debug("failed to store watch records due to filled up watcher threadpool"); + } else { + logger.warn("failed to store watch records", e); + } + })); + } + + void processEventsSync(Iterable events) throws IOException { + if (!started.get()) { + throw new IllegalStateException("not started"); + } + Tuple, List> watchesAndContext = createTriggeredWatchesAndContext(events); + List triggeredWatches = watchesAndContext.v1(); + logger.debug("saving watch records [{}]", triggeredWatches.size()); + BulkResponse bulkResponse = triggeredWatchStore.putAll(triggeredWatches); + executeTriggeredWatches(bulkResponse, watchesAndContext); + } + + /** + * Create a tuple of triggered watches and their corresponding contexts, usable for sync and async processing + * + * @param events The iterable list of trigger events to create the two lists from + * @return Two linked lists that contain the triggered watches and contexts + */ + private Tuple, List> createTriggeredWatchesAndContext(Iterable events) { + final LinkedList triggeredWatches = new LinkedList<>(); + final LinkedList contexts = new LinkedList<>(); + + DateTime now = new DateTime(clock.millis(), UTC); + for (TriggerEvent event : events) { + GetResponse response = getWatch(event.jobName()); + if (response.isExists() == false) { + logger.warn("unable to find watch [{}] in watch index, perhaps it has been deleted", event.jobName()); + continue; + } + TriggeredExecutionContext ctx = new TriggeredExecutionContext(event.jobName(), now, event, defaultThrottlePeriod); + contexts.add(ctx); + triggeredWatches.add(new TriggeredWatch(ctx.id(), event)); + } + + return Tuple.tuple(triggeredWatches, contexts); + } + + /** + * Execute triggered watches, which have been successfully indexed into the triggered watches index + * + * @param response The bulk response containing the response of indexing triggered watches + * @param watchesAndContext The triggered watches and context objects needed for execution + */ + private void executeTriggeredWatches(final BulkResponse response, + final Tuple, List> watchesAndContext) { + for (int i = 0; i < response.getItems().length; i++) { + BulkItemResponse itemResponse = response.getItems()[i]; + if (itemResponse.isFailed()) { + logger.error("could not store triggered watch with id [{}]: [{}]", itemResponse.getId(), itemResponse.getFailureMessage()); + } else { + executeAsync(watchesAndContext.v2().get(i), watchesAndContext.v1().get(i)); + } + } + } + + public WatchRecord execute(WatchExecutionContext ctx) { + ctx.setNodeId(clusterService.localNode().getId()); + WatchRecord record = null; + final String watchId = ctx.id().watchId(); + try { + boolean executionAlreadyExists = currentExecutions.put(watchId, new WatchExecution(ctx, Thread.currentThread())); + if (executionAlreadyExists) { + logger.trace("not executing watch [{}] because it is already queued", watchId); + record = ctx.abortBeforeExecution(ExecutionState.NOT_EXECUTED_ALREADY_QUEUED, "Watch is already queued in thread pool"); + } else { + try { + ctx.ensureWatchExists(() -> { + GetResponse resp = getWatch(watchId); + if (resp.isExists() == false) { + throw new ResourceNotFoundException("watch [{}] does not exist", watchId); + } + return parser.parseWithSecrets(watchId, true, resp.getSourceAsBytesRef(), ctx.executionTime(), XContentType.JSON); + }); + } catch (ResourceNotFoundException e) { + String message = "unable to find watch for record [" + ctx.id() + "]"; + record = ctx.abortBeforeExecution(ExecutionState.NOT_EXECUTED_WATCH_MISSING, message); + } catch (Exception e) { + record = ctx.abortFailedExecution(e); + } + + if (ctx.watch() != null) { + if (ctx.shouldBeExecuted()) { + logger.debug("executing watch [{}]", watchId); + + record = executeInner(ctx); + if (ctx.recordExecution()) { + updateWatchStatus(ctx.watch()); + } + } else { + logger.debug("not executing watch [{}]", watchId); + record = ctx.abortBeforeExecution(ExecutionState.EXECUTION_NOT_NEEDED, "Watch is not active"); + } + } + } + } catch (Exception e) { + record = createWatchRecord(record, ctx, e); + logWatchRecord(ctx, e); + } finally { + if (ctx.knownWatch()) { + if (record != null && ctx.recordExecution()) { + try { + if (ctx.overrideRecordOnConflict()) { + historyStore.forcePut(record); + } else { + historyStore.put(record); + } + } catch (Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("failed to update watch record [{}]", ctx.id()), e); + // TODO log watch record in logger, when saving in history store failed, otherwise the info is gone! + } + } + try { + triggeredWatchStore.delete(ctx.id()); + } catch (Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("failed to delete triggered watch [{}]", ctx.id()), e); + } + } + currentExecutions.remove(watchId); + logger.debug("finished [{}]/[{}]", watchId, ctx.id()); + } + return record; + } + + /** + * Updates and persists the status of the given watch + * + * If the watch is missing (because it might have been deleted by the user during an execution), then this method + * does nothing and just returns without throwing an exception + */ + public void updateWatchStatus(Watch watch) throws IOException { + // at the moment we store the status together with the watch, + // so we just need to update the watch itself + // we do not want to update the status.state field, as it might have been deactivated inbetween + Map parameters = MapBuilder.newMapBuilder() + .put(Watch.INCLUDE_STATUS_KEY, "true") + .put(WatchStatus.INCLUDE_STATE, "false") + .immutableMap(); + ToXContent.MapParams params = new ToXContent.MapParams(parameters); + XContentBuilder source = JsonXContent.contentBuilder(). + startObject() + .field(WatchField.STATUS.getPreferredName(), watch.status(), params) + .endObject(); + + UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, Watch.DOC_TYPE, watch.id()); + updateRequest.doc(source); + updateRequest.version(watch.version()); + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + client.update(updateRequest).actionGet(indexDefaultTimeout); + } catch (DocumentMissingException e) { + // do not rethrow this exception, otherwise the watch history will contain an exception + // even though the execution might have been fine + // TODO should we really just drop this exception on the floor? + } + } + + private WatchRecord createWatchRecord(WatchRecord existingRecord, WatchExecutionContext ctx, Exception e) { + // it is possible that the watch store update failed, the execution phase is finished + if (ctx.executionPhase().sealed()) { + if (existingRecord == null) { + return new WatchRecord.ExceptionWatchRecord(ctx, e); + } else { + return new WatchRecord.ExceptionWatchRecord(existingRecord, e); + } + } else { + return ctx.abortFailedExecution(e); + } + } + + private void logWatchRecord(WatchExecutionContext ctx, Exception e) { + // failed watches stack traces are only logged in debug, otherwise they should be checked out in the history + if (logger.isDebugEnabled()) { + logger.debug((Supplier) () -> new ParameterizedMessage("failed to execute watch [{}]", ctx.id().watchId()), e); + } else { + logger.warn("failed to execute watch [{}]", ctx.id().watchId()); + } + } + + /* + The execution of an watch is split into two phases: + 1. the trigger part which just makes sure to store the associated watch record in the history + 2. the actual processing of the watch + + The reason this split is that we don't want to lose the fact watch was triggered. This way, even if the + thread pool that executes the watches is completely busy, we don't lose the fact that the watch was + triggered (it'll have its history record) + */ + private void executeAsync(WatchExecutionContext ctx, final TriggeredWatch triggeredWatch) { + try { + executor.execute(new WatchExecutionTask(ctx, () -> execute(ctx))); + } catch (EsRejectedExecutionException e) { + String message = "failed to run triggered watch [" + triggeredWatch.id() + "] due to thread pool capacity"; + WatchRecord record = ctx.abortBeforeExecution(ExecutionState.THREADPOOL_REJECTION, message); + try { + if (ctx.overrideRecordOnConflict()) { + historyStore.forcePut(record); + } else { + historyStore.put(record); + } + } catch (Exception exc) { + logger.error((Supplier) () -> + new ParameterizedMessage("Error storing watch history record for watch [{}] after thread pool rejection", + triggeredWatch.id()), exc); + } + + try { + triggeredWatchStore.delete(triggeredWatch.id()); + } catch (Exception exc) { + logger.error((Supplier) () -> + new ParameterizedMessage("Error deleting triggered watch store record for watch [{}] after thread pool " + + "rejection", triggeredWatch.id()), exc); + } + }; + } + + WatchRecord executeInner(WatchExecutionContext ctx) { + ctx.start(); + Watch watch = ctx.watch(); + + // input + ctx.beforeInput(); + Input.Result inputResult = ctx.inputResult(); + if (inputResult == null) { + inputResult = watch.input().execute(ctx, ctx.payload()); + ctx.onInputResult(inputResult); + } + if (inputResult.status() == Input.Result.Status.FAILURE) { + return ctx.abortFailedExecution("failed to execute watch input"); + } + + // condition + ctx.beforeCondition(); + Condition.Result conditionResult = ctx.conditionResult(); + if (conditionResult == null) { + conditionResult = watch.condition().execute(ctx); + ctx.onConditionResult(conditionResult); + } + if (conditionResult.status() == Condition.Result.Status.FAILURE) { + return ctx.abortFailedExecution("failed to execute watch condition"); + } + + if (conditionResult.met()) { + if (watch.actions().size() > 0 && watch.transform() != null) { + ctx.beforeWatchTransform(); + Transform.Result transformResult = watch.transform().execute(ctx, ctx.payload()); + ctx.onWatchTransformResult(transformResult); + if (transformResult.status() == Transform.Result.Status.FAILURE) { + return ctx.abortFailedExecution("failed to execute watch transform"); + } + } + + // actions + ctx.beforeActions(); + for (ActionWrapper action : watch.actions()) { + long start = System.nanoTime(); + ActionWrapperResult actionResult = action.execute(ctx); + long executionTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); + String type = action.action().type(); + actionByTypeExecutionTime.putIfAbsent(type, new MeanMetric()); + actionByTypeExecutionTime.get(type).inc(executionTime); + ctx.onActionResult(actionResult); + } + } + + WatchRecord record = ctx.finish(); + totalExecutionsTime.inc(record.result().executionDurationMs()); + return record; + } + + public void executeTriggeredWatches(Collection triggeredWatches) { + assert triggeredWatches != null; + int counter = 0; + for (TriggeredWatch triggeredWatch : triggeredWatches) { + GetResponse response = getWatch(triggeredWatch.id().watchId()); + if (response.isExists() == false) { + String message = "unable to find watch for record [" + triggeredWatch.id().watchId() + "]/[" + triggeredWatch.id() + + "], perhaps it has been deleted, ignoring..."; + WatchRecord record = new WatchRecord.MessageWatchRecord(triggeredWatch.id(), triggeredWatch.triggerEvent(), + ExecutionState.NOT_EXECUTED_WATCH_MISSING, message, clusterService.localNode().getId()); + historyStore.forcePut(record); + triggeredWatchStore.delete(triggeredWatch.id()); + } else { + DateTime now = new DateTime(clock.millis(), UTC); + TriggeredExecutionContext ctx = new TriggeredExecutionContext(triggeredWatch.id().watchId(), now, + triggeredWatch.triggerEvent(), defaultThrottlePeriod, true); + executeAsync(ctx, triggeredWatch); + counter++; + } + } + logger.debug("triggered execution of [{}] watches", counter); + } + + /** + * Gets a watch but in a synchronous way, so that no async calls need to be built + * @param id The id of watch + * @return The GetResponse of calling the get API of this watch + */ + private GetResponse getWatch(String id) { + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, id).preference(Preference.LOCAL.type()).realtime(true); + PlainActionFuture future = PlainActionFuture.newFuture(); + client.get(getRequest, future); + return future.actionGet(); + } + } + + public Counters executionTimes() { + Counters counters = new Counters(); + counters.inc("execution.actions._all.total", totalExecutionsTime.count()); + counters.inc("execution.actions._all.total_time_in_ms", totalExecutionsTime.sum()); + + for (Map.Entry entry : actionByTypeExecutionTime.entrySet()) { + counters.inc("execution.actions." + entry.getKey() + ".total", entry.getValue().count()); + counters.inc("execution.actions." + entry.getKey() + ".total_time_in_ms", entry.getValue().sum()); + } + + return counters; + } + + /** + * This clears out the current executions and sets new empty current executions + * This is needed, because when this method is called, watcher keeps running, so sealing executions would be a bad idea + */ + public synchronized void clearExecutions() { + currentExecutions.sealAndAwaitEmpty(maxStopTimeout); + currentExecutions = new CurrentExecutions(); + } + + // the watch execution task takes another runnable as parameter + // the best solution would be to move the whole execute() method, which is handed over as ctor parameter + // over into this class, this is the quicker way though + static final class WatchExecutionTask implements Runnable { + + private final WatchExecutionContext ctx; + private final Runnable runnable; + + WatchExecutionTask(WatchExecutionContext ctx, Runnable runnable) { + this.ctx = ctx; + this.runnable = runnable; + } + + @Override + public void run() { + runnable.run(); + } + } + + static class WatchExecution { + + private final WatchExecutionContext context; + private final Thread executionThread; + + WatchExecution(WatchExecutionContext context, Thread executionThread) { + this.context = context; + this.executionThread = executionThread; + } + + WatchExecutionSnapshot createSnapshot() { + return context.createSnapshot(executionThread); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/InternalWatchExecutor.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/InternalWatchExecutor.java new file mode 100644 index 0000000000000..21ba6451af64c --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/InternalWatchExecutor.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.execution; + +import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackField; + +import java.util.concurrent.BlockingQueue; +import java.util.stream.Stream; + +public class InternalWatchExecutor implements WatchExecutor { + + public static final String THREAD_POOL_NAME = XPackField.WATCHER; + + private final ThreadPool threadPool; + + public InternalWatchExecutor(ThreadPool threadPool) { + this.threadPool = threadPool; + } + + @Override + public BlockingQueue queue() { + return executor().getQueue(); + } + + @Override + public Stream tasks() { + return executor().getTasks(); + } + + @Override + public long largestPoolSize() { + return executor().getLargestPoolSize(); + } + + @Override + public void execute(Runnable runnable) { + executor().execute(runnable); + } + + private EsThreadPoolExecutor executor() { + return (EsThreadPoolExecutor) threadPool.executor(THREAD_POOL_NAME); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ManualExecutionContext.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ManualExecutionContext.java new file mode 100644 index 0000000000000..c161b24e85619 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/ManualExecutionContext.java @@ -0,0 +1,186 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.execution; + +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapperResult; +import org.elasticsearch.xpack.core.watcher.condition.Condition; +import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.trigger.manual.ManualTriggerEvent; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.unmodifiableMap; + +public class ManualExecutionContext extends WatchExecutionContext { + + private final Map actionModes; + private final boolean recordExecution; + private final boolean knownWatch; + private final Watch watch; + + ManualExecutionContext(Watch watch, boolean knownWatch, DateTime executionTime, ManualTriggerEvent triggerEvent, + TimeValue defaultThrottlePeriod, Input.Result inputResult, Condition.Result conditionResult, + Map actionModes, boolean recordExecution) { + + super(watch.id(), executionTime, triggerEvent, defaultThrottlePeriod); + + this.actionModes = actionModes; + this.recordExecution = recordExecution; + this.knownWatch = knownWatch; + this.watch = watch; + + if (inputResult != null) { + onInputResult(inputResult); + } + if (conditionResult != null) { + onConditionResult(conditionResult); + } + ActionExecutionMode allMode = actionModes.get(Builder.ALL); + if (allMode == null || allMode == ActionExecutionMode.SKIP) { + boolean throttleAll = allMode == ActionExecutionMode.SKIP; + for (ActionWrapper action : watch.actions()) { + if (throttleAll) { + onActionResult(new ActionWrapperResult(action.id(), + new Action.Result.Throttled(action.action().type(), "manually skipped"))); + } else { + ActionExecutionMode mode = actionModes.get(action.id()); + if (mode == ActionExecutionMode.SKIP) { + onActionResult(new ActionWrapperResult(action.id(), + new Action.Result.Throttled(action.action().type(), "manually skipped"))); + } + } + } + } + } + + // a noop operation, as the watch is already loaded via ctor + @Override + public void ensureWatchExists(CheckedSupplier supplier) throws Exception { + super.ensureWatchExists(() -> watch); + } + + @Override + public boolean knownWatch() { + return knownWatch; + } + + @Override + public final boolean simulateAction(String actionId) { + ActionExecutionMode mode = actionModes.get(Builder.ALL); + if (mode == null) { + mode = actionModes.get(actionId); + } + return mode != null && mode.simulate(); + } + + @Override + public boolean skipThrottling(String actionId) { + ActionExecutionMode mode = actionModes.get(Builder.ALL); + if (mode == null) { + mode = actionModes.get(actionId); + } + return mode != null && mode.force(); + } + + @Override + public boolean shouldBeExecuted() { + // we always want to execute a manually triggered watch as the user has triggered this via an + // external API call + return true; + } + + @Override + public final boolean recordExecution() { + return recordExecution; + } + + @Override + public Watch watch() { + return watch; + } + + public static Builder builder(Watch watch, boolean knownWatch, ManualTriggerEvent event, TimeValue defaultThrottlePeriod) { + return new Builder(watch, knownWatch, event, defaultThrottlePeriod); + } + + public static class Builder { + + static final String ALL = "_all"; + + private final Watch watch; + private final boolean knownWatch; + private final ManualTriggerEvent triggerEvent; + private final TimeValue defaultThrottlePeriod; + protected DateTime executionTime; + private boolean recordExecution = false; + private Map actionModes = new HashMap<>(); + private Input.Result inputResult; + private Condition.Result conditionResult; + + private Builder(Watch watch, boolean knownWatch, ManualTriggerEvent triggerEvent, TimeValue defaultThrottlePeriod) { + this.watch = watch; + this.knownWatch = knownWatch; + assert triggerEvent != null; + this.triggerEvent = triggerEvent; + this.defaultThrottlePeriod = defaultThrottlePeriod; + } + + public Builder executionTime(DateTime executionTime) { + this.executionTime = executionTime; + return this; + } + + public Builder recordExecution(boolean recordExecution) { + this.recordExecution = recordExecution; + return this; + } + + public Builder allActionsMode(ActionExecutionMode mode) { + return actionMode(ALL, mode); + } + + public Builder actionMode(String id, ActionExecutionMode mode) { + if (actionModes == null) { + throw new IllegalStateException("ManualExecutionContext has already been built!"); + } + if (ALL.equals(id)) { + actionModes = new HashMap<>(); + } + actionModes.put(id, mode); + return this; + } + + public Builder withInput(Input.Result inputResult) { + this.inputResult = inputResult; + return this; + } + + public Builder withCondition(Condition.Result conditionResult) { + this.conditionResult = conditionResult; + return this; + } + + public ManualExecutionContext build() { + if (executionTime == null) { + executionTime = DateTime.now(DateTimeZone.UTC); + } + ManualExecutionContext context = new ManualExecutionContext(watch, knownWatch, executionTime, triggerEvent, + defaultThrottlePeriod, inputResult, conditionResult, unmodifiableMap(actionModes), recordExecution); + actionModes = null; + return context; + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/SyncTriggerEventConsumer.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/SyncTriggerEventConsumer.java new file mode 100644 index 0000000000000..7608ad2908fc5 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/SyncTriggerEventConsumer.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.execution; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; + +import java.util.function.Consumer; + +import static java.util.stream.StreamSupport.stream; + +public class SyncTriggerEventConsumer implements Consumer> { + + private final ExecutionService executionService; + private final Logger logger; + + public SyncTriggerEventConsumer(Settings settings, ExecutionService executionService) { + this.logger = Loggers.getLogger(SyncTriggerEventConsumer.class, settings); + this.executionService = executionService; + } + + @Override + public void accept(Iterable events) { + try { + executionService.processEventsSync(events); + } catch (Exception e) { + logger.error( + (Supplier) () -> new ParameterizedMessage( + "failed to process triggered events [{}]", + (Object) stream(events.spliterator(), false).toArray(size -> + new TriggerEvent[size])), + e); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredExecutionContext.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredExecutionContext.java new file mode 100644 index 0000000000000..41486a7f810e1 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredExecutionContext.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.execution; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.joda.time.DateTime; + +public class TriggeredExecutionContext extends WatchExecutionContext { + + private final boolean overrideOnConflict; + + public TriggeredExecutionContext(String watchId, DateTime executionTime, TriggerEvent triggerEvent, TimeValue defaultThrottlePeriod) { + this(watchId, executionTime, triggerEvent, defaultThrottlePeriod, false); + } + + TriggeredExecutionContext(String watchId, DateTime executionTime, TriggerEvent triggerEvent, TimeValue defaultThrottlePeriod, + boolean overrideOnConflict) { + super(watchId, executionTime, triggerEvent, defaultThrottlePeriod); + this.overrideOnConflict = overrideOnConflict; + } + + @Override + public boolean overrideRecordOnConflict() { + return overrideOnConflict; + } + + @Override + public boolean knownWatch() { + return true; + } + + @Override + public final boolean simulateAction(String actionId) { + return false; + } + + @Override + public final boolean skipThrottling(String actionId) { + return false; + } + + @Override + public boolean shouldBeExecuted() { + return watch().status().state().isActive(); + } + + @Override + public final boolean recordExecution() { + return true; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatch.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatch.java new file mode 100644 index 0000000000000..51998a14bd770 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatch.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.execution; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; + +import java.io.IOException; + +public class TriggeredWatch implements ToXContentObject { + + private final Wid id; + private final TriggerEvent triggerEvent; + + public TriggeredWatch(Wid id, TriggerEvent triggerEvent) { + this.id = id; + this.triggerEvent = triggerEvent; + } + + public Wid id() { + return id; + } + + public TriggerEvent triggerEvent() { + return triggerEvent; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Field.TRIGGER_EVENT.getPreferredName()).startObject().field(triggerEvent.type(), triggerEvent, params).endObject(); + builder.endObject(); + return builder; + } + + public static class Parser extends AbstractComponent { + + private final TriggerService triggerService; + + public Parser(Settings settings, TriggerService triggerService) { + super(settings); + this.triggerService = triggerService; + } + + public TriggeredWatch parse(String id, long version, BytesReference source) { + // EMPTY is safe here because we never use namedObject + try (XContentParser parser = XContentHelper + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, source)) { + return parse(id, version, parser); + } catch (IOException e) { + throw new ElasticsearchException("unable to parse watch record", e); + } + } + + public TriggeredWatch parse(String id, long version, XContentParser parser) throws IOException { + assert id != null : "watch record id is missing"; + + Wid wid = new Wid(id); + TriggerEvent triggerEvent = null; + + String currentFieldName = null; + XContentParser.Token token = parser.nextToken(); + assert token == XContentParser.Token.START_OBJECT; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if (Field.TRIGGER_EVENT.match(currentFieldName, parser.getDeprecationHandler())) { + triggerEvent = triggerService.parseTriggerEvent(wid.watchId(), id, parser); + } else { + parser.skipChildren(); + } + } + } + + TriggeredWatch record = new TriggeredWatch(wid, triggerEvent); + assert record.triggerEvent() != null : "watch record [" + id +"] is missing trigger"; + return record; + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TriggeredWatch entry = (TriggeredWatch) o; + if (!id.equals(entry.id)) return false; + + return true; + } + + @Override + public int hashCode() { + return id.hashCode(); + } + + @Override + public String toString() { + return id.toString(); + } + + public interface Field { + ParseField TRIGGER_EVENT = new ParseField("trigger_event"); + ParseField STATE = new ParseField("state"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java new file mode 100644 index 0000000000000..35bc805fc59b8 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java @@ -0,0 +1,227 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.execution; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.Preference; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalState; + +public class TriggeredWatchStore extends AbstractComponent { + + private final int scrollSize; + private final Client client; + private final TimeValue scrollTimeout; + private final TriggeredWatch.Parser triggeredWatchParser; + + private final AtomicBoolean started = new AtomicBoolean(false); + private final TimeValue defaultBulkTimeout; + private final TimeValue defaultSearchTimeout; + + public TriggeredWatchStore(Settings settings, Client client, TriggeredWatch.Parser triggeredWatchParser) { + super(settings); + this.scrollSize = settings.getAsInt("xpack.watcher.execution.scroll.size", 1000); + this.client = client; + this.scrollTimeout = settings.getAsTime("xpack.watcher.execution.scroll.timeout", TimeValue.timeValueMinutes(5)); + this.defaultBulkTimeout = settings.getAsTime("xpack.watcher.internal.ops.bulk.default_timeout", TimeValue.timeValueSeconds(120)); + this.defaultSearchTimeout = settings.getAsTime("xpack.watcher.internal.ops.search.default_timeout", TimeValue.timeValueSeconds(30)); + this.triggeredWatchParser = triggeredWatchParser; + this.started.set(true); + } + + public void start() { + started.set(true); + } + + public boolean validate(ClusterState state) { + try { + IndexMetaData indexMetaData = WatchStoreUtils.getConcreteIndex(TriggeredWatchStoreField.INDEX_NAME, state.metaData()); + if (indexMetaData == null) { + return true; + } else { + if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { + logger.debug("triggered watch index [{}] is marked as closed, watcher cannot be started", + indexMetaData.getIndex().getName()); + return false; + } else { + return state.routingTable().index(indexMetaData.getIndex()).allPrimaryShardsActive(); + } + } + } catch (IllegalStateException e) { + logger.trace((Supplier) () -> new ParameterizedMessage("error getting index meta data [{}]: ", + TriggeredWatchStoreField.INDEX_NAME), e); + return false; + } + } + + public void stop() { + started.set(false); + } + + public void putAll(final List triggeredWatches, final ActionListener listener) throws IOException { + if (triggeredWatches.isEmpty()) { + listener.onResponse(new BulkResponse(new BulkItemResponse[]{}, 0)); + return; + } + + ensureStarted(); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, createBulkRequest(triggeredWatches, + TriggeredWatchStoreField.DOC_TYPE), listener, client::bulk); + } + + public BulkResponse putAll(final List triggeredWatches) throws IOException { + PlainActionFuture future = PlainActionFuture.newFuture(); + putAll(triggeredWatches, future); + return future.actionGet(defaultBulkTimeout); + } + + /** + * Create a bulk request from the triggered watches with a specified document type + * @param triggeredWatches The list of triggered watches + * @param docType The document type to use, either the current one or legacy + * @return The bulk request for the triggered watches + * @throws IOException If a triggered watch could not be parsed to JSON, this exception is thrown + */ + private BulkRequest createBulkRequest(final List triggeredWatches, String docType) throws IOException { + BulkRequest request = new BulkRequest(); + for (TriggeredWatch triggeredWatch : triggeredWatches) { + IndexRequest indexRequest = new IndexRequest(TriggeredWatchStoreField.INDEX_NAME, docType, triggeredWatch.id().value()); + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + triggeredWatch.toXContent(builder, ToXContent.EMPTY_PARAMS); + indexRequest.source(builder); + } + indexRequest.opType(IndexRequest.OpType.CREATE); + request.add(indexRequest); + } + return request; + } + + public void delete(Wid wid) { + ensureStarted(); + DeleteRequest request = new DeleteRequest(TriggeredWatchStoreField.INDEX_NAME, TriggeredWatchStoreField.DOC_TYPE, wid.value()); + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + client.delete(request); // FIXME shouldn't we wait before saying the delete was successful + } + logger.trace("successfully deleted triggered watch with id [{}]", wid); + } + + private void ensureStarted() { + if (!started.get()) { + throw illegalState("unable to persist triggered watches, the store is not ready"); + } + } + + /** + * Checks if any of the loaded watches has been put into the triggered watches index for immediate execution + * + * Note: This is executing a blocking call over the network, thus a potential source of problems + * + * @param watches The list of watches that will be loaded here + * @param clusterState The current cluster state + * @return A list of triggered watches that have been started to execute somewhere else but not finished + */ + public Collection findTriggeredWatches(Collection watches, ClusterState clusterState) { + if (watches.isEmpty()) { + return Collections.emptyList(); + } + + // non existing index, return immediately + IndexMetaData indexMetaData = WatchStoreUtils.getConcreteIndex(TriggeredWatchStoreField.INDEX_NAME, clusterState.metaData()); + if (indexMetaData == null) { + return Collections.emptyList(); + } + + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + client.admin().indices().refresh(new RefreshRequest(TriggeredWatchStoreField.INDEX_NAME)) + .actionGet(TimeValue.timeValueSeconds(5)); + } catch (IndexNotFoundException e) { + return Collections.emptyList(); + } + + Set ids = watches.stream().map(Watch::id).collect(Collectors.toSet()); + Collection triggeredWatches = new ArrayList<>(ids.size()); + + SearchRequest searchRequest = new SearchRequest(TriggeredWatchStoreField.INDEX_NAME) + .scroll(scrollTimeout) + .preference(Preference.LOCAL.toString()) + .source(new SearchSourceBuilder() + .size(scrollSize) + .sort(SortBuilders.fieldSort("_doc")) + .version(true)); + + SearchResponse response = null; + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + response = client.search(searchRequest).actionGet(defaultSearchTimeout); + logger.debug("trying to find triggered watches for ids {}: found [{}] docs", ids, response.getHits().getTotalHits()); + while (response.getHits().getHits().length != 0) { + for (SearchHit hit : response.getHits()) { + Wid wid = new Wid(hit.getId()); + if (ids.contains(wid.watchId())) { + TriggeredWatch triggeredWatch = triggeredWatchParser.parse(hit.getId(), hit.getVersion(), hit.getSourceRef()); + triggeredWatches.add(triggeredWatch); + } + } + SearchScrollRequest request = new SearchScrollRequest(response.getScrollId()); + request.scroll(scrollTimeout); + response = client.searchScroll(request).actionGet(defaultSearchTimeout); + } + } finally { + if (response != null) { + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.addScrollId(response.getScrollId()); + client.clearScroll(clearScrollRequest).actionGet(scrollTimeout); + } + } + } + + return triggeredWatches; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/WatchExecutor.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/WatchExecutor.java new file mode 100644 index 0000000000000..46dad83640a3c --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/WatchExecutor.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.execution; + +import java.util.concurrent.BlockingQueue; +import java.util.stream.Stream; + +public interface WatchExecutor { + + BlockingQueue queue(); + + Stream tasks(); + + long largestPoolSize(); + + void execute(Runnable runnable); + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java new file mode 100644 index 0000000000000..d226917c57459 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/history/HistoryStore.java @@ -0,0 +1,152 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.history; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.history.WatchRecord; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.ioException; + +public class HistoryStore extends AbstractComponent { + + public static final String DOC_TYPE = "doc"; + + private final Client client; + + private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + private final Lock putUpdateLock = readWriteLock.readLock(); + private final Lock stopLock = readWriteLock.writeLock(); + private final AtomicBoolean started = new AtomicBoolean(false); + + public HistoryStore(Settings settings, Client client) { + super(settings); + this.client = client; + } + + public void start() { + started.set(true); + } + + public void stop() { + stopLock.lock(); //This will block while put or update actions are underway + try { + started.set(false); + } finally { + stopLock.unlock(); + } + } + + /** + * Stores the specified watchRecord. + * If the specified watchRecord already was stored this call will fail with a version conflict. + */ + public void put(WatchRecord watchRecord) throws Exception { + if (!started.get()) { + throw new IllegalStateException("unable to persist watch record history store is not ready"); + } + String index = HistoryStoreField.getHistoryIndexNameForTime(watchRecord.triggerEvent().triggeredTime()); + putUpdateLock.lock(); + try (XContentBuilder builder = XContentFactory.jsonBuilder(); + ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + watchRecord.toXContent(builder, WatcherParams.HIDE_SECRETS); + + IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()) + .source(builder) + .opType(IndexRequest.OpType.CREATE); + client.index(request).actionGet(30, TimeUnit.SECONDS); + logger.debug("indexed watch history record [{}]", watchRecord.id().value()); + } catch (IOException ioe) { + throw ioException("failed to persist watch record [{}]", ioe, watchRecord); + } finally { + putUpdateLock.unlock(); + } + } + + /** + * Stores the specified watchRecord. + * Any existing watchRecord will be overwritten. + */ + public void forcePut(WatchRecord watchRecord) { + if (!started.get()) { + throw new IllegalStateException("unable to persist watch record history store is not ready"); + } + String index = HistoryStoreField.getHistoryIndexNameForTime(watchRecord.triggerEvent().triggeredTime()); + putUpdateLock.lock(); + try { + try (XContentBuilder builder = XContentFactory.jsonBuilder(); + ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + watchRecord.toXContent(builder, WatcherParams.HIDE_SECRETS); + + IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()) + .source(builder) + .opType(IndexRequest.OpType.CREATE); + client.index(request).get(30, TimeUnit.SECONDS); + logger.debug("indexed watch history record [{}]", watchRecord.id().value()); + } catch (VersionConflictEngineException vcee) { + watchRecord = new WatchRecord.MessageWatchRecord(watchRecord, ExecutionState.EXECUTED_MULTIPLE_TIMES, + "watch record [{ " + watchRecord.id() + " }] has been stored before, previous state [" + watchRecord.state() + "]"); + try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder(); + ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { + IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value()) + .source(xContentBuilder.value(watchRecord)); + client.index(request).get(30, TimeUnit.SECONDS); + } + logger.debug("overwrote watch history record [{}]", watchRecord.id().value()); + } + } catch (InterruptedException | ExecutionException | TimeoutException | IOException ioe) { + final WatchRecord wr = watchRecord; + logger.error((Supplier) () -> new ParameterizedMessage("failed to persist watch record [{}]", wr), ioe); + } finally { + putUpdateLock.unlock(); + } + } + + /** + * Check if everything is set up for the history store to operate fully. Checks for the + * current watcher history index and if it is open. + * + * @param state The current cluster state + * @return true, if history store is ready to be started + */ + public static boolean validate(ClusterState state) { + String currentIndex = HistoryStoreField.getHistoryIndexNameForTime(DateTime.now(DateTimeZone.UTC)); + IndexMetaData indexMetaData = WatchStoreUtils.getConcreteIndex(currentIndex, state.metaData()); + if (indexMetaData == null) { + return true; + } else { + return indexMetaData.getState() == IndexMetaData.State.OPEN && + state.routingTable().index(indexMetaData.getIndex()).allPrimaryShardsActive(); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/InputBuilders.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/InputBuilders.java new file mode 100644 index 0000000000000..4faf6636edcfb --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/InputBuilders.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.xpack.core.watcher.input.none.NoneInput; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.input.chain.ChainInput; +import org.elasticsearch.xpack.watcher.input.http.HttpInput; +import org.elasticsearch.xpack.watcher.input.search.SearchInput; +import org.elasticsearch.xpack.watcher.input.simple.SimpleInput; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; + +import java.util.HashMap; +import java.util.Map; + +public final class InputBuilders { + + private InputBuilders() { + } + + public static NoneInput.Builder noneInput() { + return NoneInput.builder(); + } + + public static SearchInput.Builder searchInput(WatcherSearchTemplateRequest request) { + return SearchInput.builder(request); + } + + public static SimpleInput.Builder simpleInput() { + return simpleInput(new HashMap<>()); + } + + public static SimpleInput.Builder simpleInput(String key, Object value) { + return simpleInput(MapBuilder.newMapBuilder().put(key, value)); + } + + public static SimpleInput.Builder simpleInput(MapBuilder data) { + return simpleInput(data.map()); + } + + public static SimpleInput.Builder simpleInput(Map data) { + return SimpleInput.builder(new Payload.Simple(data)); + } + + public static HttpInput.Builder httpInput(HttpRequestTemplate.Builder request) { + return httpInput(request.build()); + } + + public static HttpInput.Builder httpInput(HttpRequestTemplate request) { + return HttpInput.builder(request); + } + + public static ChainInput.Builder chainInput() { + return ChainInput.builder(); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/InputFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/InputFactory.java new file mode 100644 index 0000000000000..25deb2227c605 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/InputFactory.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.input.Input; + +import java.io.IOException; + +/** + * Parses xcontent to a concrete input of the same type. + */ +public abstract class InputFactory> { + + protected final Logger inputLogger; + + public InputFactory(Logger inputLogger) { + this.inputLogger = inputLogger; + } + + /** + * @return The type of the input + */ + public abstract String type(); + + /** + * Parses the given xcontent and creates a concrete input + * + * @param watchId The id of the watch + * @param parser The parser containing the input content of the watch + */ + public abstract I parseInput(String watchId, XContentParser parser) throws IOException; + + /** + * Creates an executable input out of the given input. + */ + public abstract E createExecutable(I input); + + public E parseExecutable(String watchId, XContentParser parser) throws IOException { + I input = parseInput(watchId, parser); + return createExecutable(input); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/InputRegistry.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/InputRegistry.java new file mode 100644 index 0000000000000..460725c3dda98 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/InputRegistry.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.watcher.input.chain.ChainInput; +import org.elasticsearch.xpack.watcher.input.chain.ChainInputFactory; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class InputRegistry { + + private final Map factories; + + public InputRegistry(Settings settings, Map factories) { + Map map = new HashMap<>(factories); + map.put(ChainInput.TYPE, new ChainInputFactory(settings, this)); + this.factories = Collections.unmodifiableMap(map); + } + + /** + * Reads the contents of parser to create the correct Input + * + * @param parser The parser containing the input definition + * @return A new input instance from the parser + */ + public ExecutableInput parse(String watchId, XContentParser parser) throws IOException { + String type = null; + + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse input for watch [{}]. expected an object representing the input, but " + + "found [{}] instead", watchId, parser.currentToken()); + } + + XContentParser.Token token; + ExecutableInput input = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + type = parser.currentName(); + } else if (type == null) { + throw new ElasticsearchParseException("could not parse input for watch [{}]. expected field indicating the input type, " + + "but found [{}] instead", watchId, token); + } else if (token == XContentParser.Token.START_OBJECT) { + InputFactory factory = factories.get(type); + if (factory == null) { + throw new ElasticsearchParseException("could not parse input for watch [{}]. unknown input type [{}]", watchId, type); + } + input = factory.parseExecutable(watchId, parser); + } else { + throw new ElasticsearchParseException("could not parse input for watch [{}]. expected an object representing input [{}], " + + "but found [{}] instead", watchId, type, token); + } + } + + if (input == null) { + throw new ElasticsearchParseException("could not parse input for watch [{}]. expected field indicating the input type, but " + + "found an empty object instead", watchId, token); + } + + return input; + } + + public Map factories() { + return factories; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ChainInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ChainInput.java new file mode 100644 index 0000000000000..3c62f4d1066d2 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ChainInput.java @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.chain; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.input.InputRegistry; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class ChainInput implements Input { + + public static final String TYPE = "chain"; + public static final ParseField INPUTS = new ParseField("inputs"); + + private final List> inputs; + + public ChainInput(List> inputs) { + this.inputs = inputs; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray(INPUTS.getPreferredName()); + for (Tuple tuple : inputs) { + builder.startObject().startObject(tuple.v1()); + builder.field(tuple.v2().type(), tuple.v2()); + builder.endObject().endObject(); + } + builder.endArray(); + builder.endObject(); + + return builder; + } + + public List> getInputs() { + return inputs; + } + + public static ChainInput parse(String watchId, XContentParser parser, InputRegistry inputRegistry) throws IOException { + List> inputs = new ArrayList<>(); + String currentFieldName; + XContentParser.Token token; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + token = parser.nextToken(); + if (token == XContentParser.Token.START_ARRAY && INPUTS.getPreferredName().equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.FIELD_NAME) { + String inputName = parser.currentName(); + inputs.add(new Tuple<>(inputName, parseSingleInput(watchId, inputName, parser, inputRegistry))); + } + } + } + } + } + + return new ChainInput(inputs); + } + + private static Input parseSingleInput(String watchId, String name, XContentParser parser, + InputRegistry inputRegistry) throws IOException { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("Expected starting JSON object after [{}] in watch [{}]", name, watchId); + } + + Input input = inputRegistry.parse(watchId, parser).input(); + + // expecting closing of two json object to start the next element in the array + if (parser.currentToken() != XContentParser.Token.END_OBJECT || parser.nextToken() != XContentParser.Token.END_OBJECT) { + throw new ElasticsearchParseException("Expected closing JSON object after parsing input [{}] named [{}] in watch [{}]", + input.type(), name, watchId); + } + + return input; + } + + public static ChainInput.Builder builder() { + return new Builder(); + } + + public static class Builder implements Input.Builder { + + private List> inputs; + + private Builder() { + inputs = new ArrayList<>(); + } + + public Builder add(String name, Input.Builder input) { + inputs.add(new Tuple<>(name, input.build())); + return this; + } + + @Override + public ChainInput build() { + return new ChainInput(inputs); + } + } + + public static class Result extends Input.Result { + + private List> results = Collections.emptyList(); + + protected Result(List> results, Payload payload) { + super(TYPE, payload); + this.results = results; + } + + protected Result(Exception e) { + super(TYPE, e); + } + + @Override + protected XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(type); + for (Tuple tuple : results) { + builder.field(tuple.v1(), tuple.v2()); + } + builder.endObject(); + + return builder; + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ChainInputFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ChainInputFactory.java new file mode 100644 index 0000000000000..ac3f7b820c283 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ChainInputFactory.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.chain; + +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.watcher.input.InputFactory; +import org.elasticsearch.xpack.watcher.input.InputRegistry; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class ChainInputFactory extends InputFactory { + + private final InputRegistry inputRegistry; + + public ChainInputFactory(Settings settings, InputRegistry inputRegistry) { + super(Loggers.getLogger(ExecutableChainInput.class, settings)); + this.inputRegistry = inputRegistry; + } + + @Override + public String type() { + return ChainInput.TYPE; + } + + @Override + public ChainInput parseInput(String watchId, XContentParser parser) throws IOException { + return ChainInput.parse(watchId, parser, inputRegistry); + } + + @Override + public ExecutableChainInput createExecutable(ChainInput input) { + List> executableInputs = new ArrayList<>(); + for (Tuple tuple : input.getInputs()) { + ExecutableInput executableInput = inputRegistry.factories().get(tuple.v2().type()).createExecutable(tuple.v2()); + executableInputs.add(new Tuple<>(tuple.v1(), executableInput)); + } + + return new ExecutableChainInput(input, executableInputs, inputLogger); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ExecutableChainInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ExecutableChainInput.java new file mode 100644 index 0000000000000..2643876bdb63d --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ExecutableChainInput.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.chain; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.watcher.input.chain.ChainInput.TYPE; + +public class ExecutableChainInput extends ExecutableInput { + + private List> inputs; + + public ExecutableChainInput(ChainInput input, List> inputs, Logger logger) { + super(input, logger); + this.inputs = inputs; + } + + @Override + public ChainInput.Result execute(WatchExecutionContext ctx, Payload payload) { + List> results = new ArrayList<>(); + Map payloads = new HashMap<>(); + + try { + for (Tuple tuple : inputs) { + Input.Result result = tuple.v2().execute(ctx, new Payload.Simple(payloads)); + results.add(new Tuple<>(tuple.v1(), result)); + payloads.put(tuple.v1(), result.payload().data()); + } + + return new ChainInput.Result(results, new Payload.Simple(payloads)); + } catch (Exception e) { + logger.error("failed to execute [{}] input for watch [{}], reason [{}]", TYPE, ctx.watch().id(), e.getMessage()); + return new ChainInput.Result(e); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/ExecutableHttpInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/ExecutableHttpInput.java new file mode 100644 index 0000000000000..1bc7ab309f0aa --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/ExecutableHttpInput.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.http; + + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.support.Variables; +import org.elasticsearch.xpack.watcher.support.XContentFilterKeysUtils; + +import java.io.InputStream; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.watcher.input.http.HttpInput.TYPE; + +public class ExecutableHttpInput extends ExecutableInput { + + private final HttpClient client; + private final TextTemplateEngine templateEngine; + + public ExecutableHttpInput(HttpInput input, Logger logger, HttpClient client, TextTemplateEngine templateEngine) { + super(input, logger); + this.client = client; + this.templateEngine = templateEngine; + } + + public HttpInput.Result execute(WatchExecutionContext ctx, Payload payload) { + HttpRequest request = null; + try { + Map model = Variables.createCtxModel(ctx, payload); + request = input.getRequest().render(templateEngine, model); + return doExecute(ctx, request); + } catch (Exception e) { + logger.error("failed to execute [{}] input for watch [{}], reason [{}]", TYPE, ctx.watch().id(), e.getMessage()); + return new HttpInput.Result(request, e); + } + } + + HttpInput.Result doExecute(WatchExecutionContext ctx, HttpRequest request) throws Exception { + HttpResponse response = client.execute(request); + Map> headers = response.headers(); + Map payloadMap = new HashMap<>(); + payloadMap.put("_status_code", response.status()); + if (headers.isEmpty() == false) { + payloadMap.put("_headers", headers); + } + + if (!response.hasContent()) { + return new HttpInput.Result(request, response.status(), new Payload.Simple(payloadMap)); + } + + final XContentType contentType; + XContentType responseContentType = response.xContentType(); + if (input.getExpectedResponseXContentType() == null) { + //Attempt to auto detect content type, if not set in response + contentType = responseContentType != null ? responseContentType : XContentHelper.xContentType(response.body()); + } else { + contentType = input.getExpectedResponseXContentType().contentType(); + if (responseContentType != contentType) { + logger.warn("[{}] [{}] input expected content type [{}] but read [{}] from headers, using expected one", type(), ctx.id(), + input.getExpectedResponseXContentType(), responseContentType); + } + } + + if (contentType != null) { + // EMPTY is safe here because we never use namedObject + try (InputStream stream = response.body().streamInput(); + XContentParser parser = contentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + if (input.getExtractKeys() != null) { + payloadMap.putAll(XContentFilterKeysUtils.filterMapOrdered(input.getExtractKeys(), parser)); + } else { + // special handling if a list is returned, i.e. JSON like [ {},{} ] + XContentParser.Token token = parser.nextToken(); + if (token == XContentParser.Token.START_ARRAY) { + payloadMap.put("data", parser.listOrderedMap()); + } else { + payloadMap.putAll(parser.mapOrdered()); + } + } + } catch (Exception e) { + throw new ElasticsearchParseException("could not parse response body [{}] it does not appear to be [{}]", type(), ctx.id(), + response.body().utf8ToString(), contentType.shortName()); + } + } else { + payloadMap.put("_value", response.body().utf8ToString()); + } + + return new HttpInput.Result(request, response.status(), new Payload.Simple(payloadMap)); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/HttpInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/HttpInput.java new file mode 100644 index 0000000000000..3f3acdc0b3235 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/HttpInput.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.http; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpContentType; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static java.util.Collections.unmodifiableSet; + +public class HttpInput implements Input { + + public static final String TYPE = "http"; + + private final HttpRequestTemplate request; + @Nullable private final HttpContentType expectedResponseXContentType; + @Nullable private final Set extractKeys; + + public HttpInput(HttpRequestTemplate request, @Nullable HttpContentType expectedResponseXContentType, + @Nullable Set extractKeys) { + this.request = request; + this.expectedResponseXContentType = expectedResponseXContentType; + this.extractKeys = extractKeys; + } + + @Override + public String type() { + return TYPE; + } + + public HttpRequestTemplate getRequest() { + return request; + } + + public Set getExtractKeys() { + return extractKeys; + } + + public HttpContentType getExpectedResponseXContentType() { + return expectedResponseXContentType; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Field.REQUEST.getPreferredName(), request, params); + if (extractKeys != null) { + builder.field(Field.EXTRACT.getPreferredName(), extractKeys); + } + if (expectedResponseXContentType != null) { + builder.field(Field.RESPONSE_CONTENT_TYPE.getPreferredName(), expectedResponseXContentType.id()); + } + builder.endObject(); + return builder; + } + + public static HttpInput parse(String watchId, XContentParser parser, HttpRequestTemplate.Parser requestParser) throws IOException { + Set extract = null; + HttpRequestTemplate request = null; + HttpContentType expectedResponseBodyType = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.REQUEST.match(currentFieldName, parser.getDeprecationHandler())) { + try { + request = requestParser.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. failed to parse http request " + + "template", pe, TYPE, watchId); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (Field.EXTRACT.getPreferredName().equals(currentFieldName)) { + extract = new HashSet<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + extract.add(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. expected a string value as " + + "an [{}] item but found [{}] instead", TYPE, watchId, currentFieldName, token); + } + } + } else { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. unexpected array field [{}]", TYPE, + watchId, currentFieldName); + } + } else if (token == XContentParser.Token.VALUE_STRING) { + if (Field.RESPONSE_CONTENT_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + expectedResponseBodyType = HttpContentType.resolve(parser.text()); + if (expectedResponseBodyType == null) { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. unknown content type [{}]", + TYPE, watchId, parser.text()); + } + } else { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. unexpected string field [{}]", + TYPE, watchId, currentFieldName); + } + } else { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. unexpected token [{}]", TYPE, watchId, + token); + } + } + + if (request == null) { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. missing require [{}] field", TYPE, watchId, + Field.REQUEST.getPreferredName()); + } + + if (expectedResponseBodyType == HttpContentType.TEXT && extract != null ) { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. key extraction is not supported for content" + + " type [{}]", TYPE, watchId, expectedResponseBodyType); + } + + return new HttpInput(request, expectedResponseBodyType, extract); + } + + public static Builder builder(HttpRequestTemplate httpRequest) { + return new Builder(httpRequest); + } + + public static class Result extends Input.Result { + + @Nullable private final HttpRequest request; + final int statusCode; + + public Result(HttpRequest request, int statusCode, Payload payload) { + super(TYPE, payload); + this.request = request; + this.statusCode = statusCode; + } + + public Result(@Nullable HttpRequest request, Exception e) { + super(TYPE, e); + this.request = request; + this.statusCode = -1; + } + + public HttpRequest request() { + return request; + } + + @Override + protected XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException { + if (request == null) { + return builder; + } + builder.startObject(type); + builder.field(Field.REQUEST.getPreferredName(), request, params); + if (statusCode > 0) { + builder.field(Field.STATUS_CODE.getPreferredName(), statusCode); + } + return builder.endObject(); + } + } + + public static class Builder implements Input.Builder { + + private final HttpRequestTemplate request; + private final Set extractKeys = new HashSet<>(); + private HttpContentType expectedResponseXContentType = null; + + private Builder(HttpRequestTemplate request) { + this.request = request; + } + + public Builder extractKeys(Collection keys) { + extractKeys.addAll(keys); + return this; + } + + public Builder extractKeys(String... keys) { + Collections.addAll(extractKeys, keys); + return this; + } + + public Builder expectedResponseXContentType(HttpContentType expectedResponseXContentType) { + this.expectedResponseXContentType = expectedResponseXContentType; + return this; + } + + @Override + public HttpInput build() { + return new HttpInput(request, expectedResponseXContentType, extractKeys.isEmpty() ? null : unmodifiableSet(extractKeys)); + } + } + + interface Field { + ParseField REQUEST = new ParseField("request"); + ParseField EXTRACT = new ParseField("extract"); + ParseField STATUS_CODE = new ParseField("status_code"); + ParseField RESPONSE_CONTENT_TYPE = new ParseField("response_content_type"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/HttpInputFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/HttpInputFactory.java new file mode 100644 index 0000000000000..2cb26ab1d6707 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/http/HttpInputFactory.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.http; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.input.InputFactory; + +import java.io.IOException; + +public final class HttpInputFactory extends InputFactory { + + private final HttpClient httpClient; + private final TextTemplateEngine templateEngine; + private final HttpRequestTemplate.Parser requestTemplateParser; + + public HttpInputFactory(Settings settings, HttpClient httpClient, TextTemplateEngine templateEngine, + HttpRequestTemplate.Parser requestTemplateParser) { + super(Loggers.getLogger(ExecutableHttpInput.class, settings)); + this.templateEngine = templateEngine; + this.httpClient = httpClient; + this.requestTemplateParser = requestTemplateParser; + } + + @Override + public String type() { + return HttpInput.TYPE; + } + + @Override + public HttpInput parseInput(String watchId, XContentParser parser) throws IOException { + return HttpInput.parse(watchId, parser, requestTemplateParser); + } + + @Override + public ExecutableHttpInput createExecutable(HttpInput input) { + return new ExecutableHttpInput(input, inputLogger, httpClient, templateEngine); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/none/ExecutableNoneInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/none/ExecutableNoneInput.java new file mode 100644 index 0000000000000..ff0eb3eacbb01 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/none/ExecutableNoneInput.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.none; + + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.input.none.NoneInput; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +public class ExecutableNoneInput extends ExecutableInput { + + public ExecutableNoneInput(Logger logger) { + super(NoneInput.INSTANCE, logger); + } + + @Override + public NoneInput.Result execute(WatchExecutionContext ctx, Payload payload) { + return NoneInput.Result.INSTANCE; + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/none/NoneInputFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/none/NoneInputFactory.java new file mode 100644 index 0000000000000..b44d788ae56dd --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/none/NoneInputFactory.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.none; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.input.none.NoneInput; +import org.elasticsearch.xpack.watcher.input.InputFactory; + +import java.io.IOException; + +public class NoneInputFactory extends InputFactory { + + public NoneInputFactory(Settings settings) { + super(Loggers.getLogger(ExecutableNoneInput.class, settings)); + } + + @Override + public String type() { + return NoneInput.TYPE; + } + + @Override + public NoneInput parseInput(String watchId, XContentParser parser) throws IOException { + return NoneInput.parse(watchId, parser); + } + + @Override + public ExecutableNoneInput createExecutable(NoneInput input) { + return new ExecutableNoneInput(inputLogger); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java new file mode 100644 index 0000000000000..83a4f1f85e732 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.search; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.Script; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.WatcherClientHelper; +import org.elasticsearch.xpack.watcher.support.XContentFilterKeysUtils; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateService; + +import java.util.Map; + +import static org.elasticsearch.xpack.watcher.input.search.SearchInput.TYPE; + +/** + * An input that executes search and returns the search response as the initial payload + */ +public class ExecutableSearchInput extends ExecutableInput { + + public static final SearchType DEFAULT_SEARCH_TYPE = SearchType.QUERY_THEN_FETCH; + + private final Client client; + private final WatcherSearchTemplateService searchTemplateService; + private final TimeValue timeout; + + public ExecutableSearchInput(SearchInput input, Logger logger, Client client, WatcherSearchTemplateService searchTemplateService, + TimeValue defaultTimeout) { + super(input, logger); + this.client = client; + this.searchTemplateService = searchTemplateService; + this.timeout = input.getTimeout() != null ? input.getTimeout() : defaultTimeout; + } + + @Override + public SearchInput.Result execute(WatchExecutionContext ctx, Payload payload) { + WatcherSearchTemplateRequest request = null; + try { + Script template = input.getRequest().getOrCreateTemplate(); + String renderedTemplate = searchTemplateService.renderTemplate(template, ctx, payload); + request = new WatcherSearchTemplateRequest(input.getRequest(), new BytesArray(renderedTemplate)); + return doExecute(ctx, request); + } catch (Exception e) { + logger.error("failed to execute [{}] input for watch [{}], reason [{}]", TYPE, ctx.watch().id(), e.getMessage()); + return new SearchInput.Result(request, e); + } + } + + SearchInput.Result doExecute(WatchExecutionContext ctx, WatcherSearchTemplateRequest request) throws Exception { + if (logger.isTraceEnabled()) { + logger.trace("[{}] running query for [{}] [{}]", ctx.id(), ctx.watch().id(), request.getSearchSource().utf8ToString()); + } + + SearchRequest searchRequest = searchTemplateService.toSearchRequest(request); + final SearchResponse response = WatcherClientHelper.execute(ctx.watch(), client, + () -> client.search(searchRequest).actionGet(timeout)); + + if (logger.isDebugEnabled()) { + logger.debug("[{}] found [{}] hits", ctx.id(), response.getHits().getTotalHits()); + for (SearchHit hit : response.getHits()) { + logger.debug("[{}] hit [{}]", ctx.id(), hit.getSourceAsMap()); + } + } + + final Payload payload; + if (input.getExtractKeys() != null) { + BytesReference bytes = XContentHelper.toXContent(response, XContentType.JSON, false); + // EMPTY is safe here because we never use namedObject + try (XContentParser parser = XContentHelper + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, bytes)) { + Map filteredKeys = XContentFilterKeysUtils.filterMapOrdered(input.getExtractKeys(), parser); + payload = new Payload.Simple(filteredKeys); + } + } else { + payload = new Payload.XContent(response); + } + + return new SearchInput.Result(request, payload); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/SearchInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/SearchInput.java new file mode 100644 index 0000000000000..e8acf1eae0889 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/SearchInput.java @@ -0,0 +1,243 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.search; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static java.util.Collections.unmodifiableSet; +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; + +public class SearchInput implements Input { + + public static final String TYPE = "search"; + + private final WatcherSearchTemplateRequest request; + @Nullable private final Set extractKeys; + @Nullable private final TimeValue timeout; + @Nullable private final DateTimeZone dynamicNameTimeZone; + + public SearchInput(WatcherSearchTemplateRequest request, @Nullable Set extractKeys, + @Nullable TimeValue timeout, @Nullable DateTimeZone dynamicNameTimeZone) { + this.request = request; + this.extractKeys = extractKeys; + this.timeout = timeout; + this.dynamicNameTimeZone = dynamicNameTimeZone; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + SearchInput that = (SearchInput) o; + + if (request != null ? !request.equals(that.request) : that.request != null) return false; + if (extractKeys != null ? !extractKeys.equals(that.extractKeys) : that.extractKeys != null) return false; + if (timeout != null ? !timeout.equals(that.timeout) : that.timeout != null) return false; + return !(dynamicNameTimeZone != null ? !dynamicNameTimeZone.equals(that.dynamicNameTimeZone) : that.dynamicNameTimeZone != null); + } + + @Override + public int hashCode() { + int result = request != null ? request.hashCode() : 0; + result = 31 * result + (extractKeys != null ? extractKeys.hashCode() : 0); + result = 31 * result + (timeout != null ? timeout.hashCode() : 0); + result = 31 * result + (dynamicNameTimeZone != null ? dynamicNameTimeZone.hashCode() : 0); + return result; + } + + public WatcherSearchTemplateRequest getRequest() { + return request; + } + + public Set getExtractKeys() { + return extractKeys; + } + + public TimeValue getTimeout() { + return timeout; + } + + public DateTimeZone getDynamicNameTimeZone() { + return dynamicNameTimeZone; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (request != null) { + builder.field(Field.REQUEST.getPreferredName(), request); + } + if (extractKeys != null) { + builder.field(Field.EXTRACT.getPreferredName(), extractKeys); + } + if (timeout != null) { + builder.humanReadableField(Field.TIMEOUT.getPreferredName(), Field.TIMEOUT_HUMAN.getPreferredName(), timeout); + } + if (dynamicNameTimeZone != null) { + builder.field(Field.DYNAMIC_NAME_TIMEZONE.getPreferredName(), dynamicNameTimeZone); + } + builder.endObject(); + return builder; + } + + public static SearchInput parse(String watchId, XContentParser parser) throws IOException { + WatcherSearchTemplateRequest request = null; + Set extract = null; + TimeValue timeout = null; + DateTimeZone dynamicNameTimeZone = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.REQUEST.match(currentFieldName, parser.getDeprecationHandler())) { + try { + request = WatcherSearchTemplateRequest.fromXContent(parser, ExecutableSearchInput.DEFAULT_SEARCH_TYPE); + } catch (ElasticsearchParseException srpe) { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. failed to parse [{}]", srpe, TYPE, + watchId, currentFieldName); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (Field.EXTRACT.match(currentFieldName, parser.getDeprecationHandler())) { + extract = new HashSet<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + extract.add(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. expected a string value in " + + "[{}] array, but found [{}] instead", TYPE, watchId, currentFieldName, token); + } + } + } else { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. unexpected array field [{}]", TYPE, + watchId, currentFieldName); + } + } else if (Field.TIMEOUT.match(currentFieldName, parser.getDeprecationHandler())) { + timeout = timeValueMillis(parser.longValue()); + } else if (Field.TIMEOUT_HUMAN.match(currentFieldName, parser.getDeprecationHandler())) { + // Parser for human specified timeouts and 2.x compatibility + timeout = WatcherDateTimeUtils.parseTimeValue(parser, Field.TIMEOUT_HUMAN.toString()); + } else if (Field.DYNAMIC_NAME_TIMEZONE.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + dynamicNameTimeZone = DateTimeZone.forID(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. failed to parse [{}]. must be a " + + "string value (e.g. 'UTC' or '+01:00').", TYPE, watchId, currentFieldName); + } + } else { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. unexpected token [{}]", TYPE, watchId, + token); + } + } + + if (request == null) { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. missing required [{}] field", TYPE, + watchId, Field.REQUEST.getPreferredName()); + } + return new SearchInput(request, extract, timeout, dynamicNameTimeZone); + } + + public static Builder builder(WatcherSearchTemplateRequest request) { + return new Builder(request); + } + + public static class Result extends Input.Result { + + @Nullable private final WatcherSearchTemplateRequest request; + + public Result(WatcherSearchTemplateRequest request, Payload payload) { + super(TYPE, payload); + this.request = request; + } + + public Result(@Nullable WatcherSearchTemplateRequest request, Exception e) { + super(TYPE, e); + this.request = request; + } + + public WatcherSearchTemplateRequest executedRequest() { + return request; + } + + @Override + protected XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException { + if (request == null) { + return builder; + } + builder.startObject(type); + builder.field(Field.REQUEST.getPreferredName(), request); + return builder.endObject(); + } + } + + public static class Builder implements Input.Builder { + + private final WatcherSearchTemplateRequest request; + private final Set extractKeys = new HashSet<>(); + private TimeValue timeout; + private DateTimeZone dynamicNameTimeZone; + + private Builder(WatcherSearchTemplateRequest request) { + this.request = request; + } + + public Builder extractKeys(Collection keys) { + extractKeys.addAll(keys); + return this; + } + + public Builder extractKeys(String... keys) { + Collections.addAll(extractKeys, keys); + return this; + } + + public Builder timeout(TimeValue readTimeout) { + this.timeout = readTimeout; + return this; + } + + public Builder dynamicNameTimeZone(DateTimeZone dynamicNameTimeZone) { + this.dynamicNameTimeZone = dynamicNameTimeZone; + return this; + } + + @Override + public SearchInput build() { + return new SearchInput(request, extractKeys.isEmpty() ? null : unmodifiableSet(extractKeys), timeout, dynamicNameTimeZone); + } + } + + public interface Field { + ParseField REQUEST = new ParseField("request"); + ParseField EXTRACT = new ParseField("extract"); + ParseField TIMEOUT = new ParseField("timeout_in_millis"); + ParseField TIMEOUT_HUMAN = new ParseField("timeout"); + ParseField DYNAMIC_NAME_TIMEZONE = new ParseField("dynamic_name_timezone"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/SearchInputFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/SearchInputFactory.java new file mode 100644 index 0000000000000..2342c3e6ca69d --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/SearchInputFactory.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.search; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.xpack.watcher.input.InputFactory; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateService; + +import java.io.IOException; + +public class SearchInputFactory extends InputFactory { + + private final Client client; + private final TimeValue defaultTimeout; + private final WatcherSearchTemplateService searchTemplateService; + + public SearchInputFactory(Settings settings, Client client, NamedXContentRegistry xContentRegistry, + ScriptService scriptService) { + super(Loggers.getLogger(ExecutableSearchInput.class, settings)); + this.client = client; + this.defaultTimeout = settings.getAsTime("xpack.watcher.input.search.default_timeout", TimeValue.timeValueMinutes(1)); + this.searchTemplateService = new WatcherSearchTemplateService(settings, scriptService, xContentRegistry); + } + + @Override + public String type() { + return SearchInput.TYPE; + } + + @Override + public SearchInput parseInput(String watchId, XContentParser parser) throws IOException { + return SearchInput.parse(watchId, parser); + } + + @Override + public ExecutableSearchInput createExecutable(SearchInput input) { + return new ExecutableSearchInput(input, inputLogger, client, searchTemplateService, defaultTimeout); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/simple/ExecutableSimpleInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/simple/ExecutableSimpleInput.java new file mode 100644 index 0000000000000..97cb764fbd279 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/simple/ExecutableSimpleInput.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.simple; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +/** + * This class just defines a simple xcontent map as an input + */ +public class ExecutableSimpleInput extends ExecutableInput { + + public ExecutableSimpleInput(SimpleInput input, Logger logger) { + super(input, logger); + } + + @Override + public SimpleInput.Result execute(WatchExecutionContext ctx, Payload payload) { + return new SimpleInput.Result(input.getPayload()); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/simple/SimpleInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/simple/SimpleInput.java new file mode 100644 index 0000000000000..584b3eaf172ed --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/simple/SimpleInput.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.simple; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; + +public class SimpleInput implements Input { + + public static final String TYPE = "simple"; + + private final Payload payload; + + public SimpleInput(Payload payload) { + this.payload = payload; + } + + @Override + public String type() { + return TYPE; + } + + public Payload getPayload() { + return payload; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + SimpleInput that = (SimpleInput) o; + + return payload.equals(that.payload); + } + + @Override + public int hashCode() { + return payload.hashCode(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return payload.toXContent(builder, params); + } + + public static SimpleInput parse(String watchId, XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. expected an object but found [{}] instead", + TYPE, watchId, parser.currentToken()); + } + Payload payload = new Payload.Simple(parser.map()); + return new SimpleInput(payload); + } + + public static Builder builder(Payload payload) { + return new Builder(payload); + } + + public static class Result extends Input.Result { + + public Result(Payload payload) { + super(TYPE, payload); + } + + @Override + protected XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + } + + public static class Builder implements Input.Builder { + + private final Payload payload; + + private Builder(Payload payload) { + this.payload = payload; + } + + @Override + public SimpleInput build() { + return new SimpleInput(payload); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/simple/SimpleInputFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/simple/SimpleInputFactory.java new file mode 100644 index 0000000000000..91089f165dd32 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/simple/SimpleInputFactory.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.simple; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.input.InputFactory; + +import java.io.IOException; + +public class SimpleInputFactory extends InputFactory { + + public SimpleInputFactory(Settings settings) { + super(Loggers.getLogger(ExecutableSimpleInput.class, settings)); + } + + @Override + public String type() { + return SimpleInput.TYPE; + } + + @Override + public SimpleInput parseInput(String watchId, XContentParser parser) throws IOException { + return SimpleInput.parse(watchId, parser); + } + + @Override + public ExecutableSimpleInput createExecutable(SimpleInput input) { + return new ExecutableSimpleInput(input, inputLogger); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/transform/ExecutableTransformInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/transform/ExecutableTransformInput.java new file mode 100644 index 0000000000000..944a616d0671b --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/transform/ExecutableTransformInput.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.transform; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +public final class ExecutableTransformInput extends ExecutableInput { + + private final ExecutableTransform executableTransform; + + ExecutableTransformInput(TransformInput input, Logger logger, ExecutableTransform executableTransform) { + super(input, logger); + this.executableTransform = executableTransform; + } + + @Override + public TransformInput.Result execute(WatchExecutionContext ctx, Payload payload) { + Transform.Result transformResult = executableTransform.execute(ctx, payload); + return new TransformInput.Result(transformResult.payload()); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/transform/TransformInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/transform/TransformInput.java new file mode 100644 index 0000000000000..f8e6191dfa118 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/transform/TransformInput.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.transform; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; +import java.util.Objects; + +/** + * The Transform Input allows to configure a transformation, that should be + * put between two other inputs in a chained input in order to support easy + * data transformations + * + * This class does not have a builder, as it just consists of a single + * transform + */ +public class TransformInput implements Input { + + public static final String TYPE = "transform"; + + private final Transform transform; + + public TransformInput(Transform transform) { + this.transform = transform; + } + + public Transform getTransform() { + return transform; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().field(transform.type(), transform, params).endObject(); + return builder; + } + + @Override + public int hashCode() { + return transform.hashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TransformInput that = (TransformInput) o; + + return Objects.equals(transform, that.transform); + } + + static class Result extends Input.Result { + + Result(Payload payload) { + super(TYPE, payload); + } + + @Override + protected XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/transform/TransformInputFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/transform/TransformInputFactory.java new file mode 100644 index 0000000000000..d1524f99945c8 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/transform/TransformInputFactory.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.transform; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.transform.TransformFactory; +import org.elasticsearch.xpack.core.watcher.transform.TransformRegistry; +import org.elasticsearch.xpack.watcher.input.InputFactory; + +import java.io.IOException; + +/** + * + * Transform inputs should be used between two other inputs in a chained input, + * so that you can do a transformation of your data, before sending it off to + * another input + * + * The transform input factory is pretty lightweight, as all the infra structure + * for transform can be reused for this + * + */ +public final class TransformInputFactory extends InputFactory { + + private final TransformRegistry transformRegistry; + + public TransformInputFactory(Settings settings, TransformRegistry transformRegistry) { + super(Loggers.getLogger(ExecutableTransformInput.class, settings)); + this.transformRegistry = transformRegistry; + } + + @Override + public String type() { + return TransformInput.TYPE; + } + + @Override + public TransformInput parseInput(String watchId, XContentParser parser) throws IOException { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + Transform transform = transformRegistry.parse(watchId, parser).transform(); + return new TransformInput(transform); + } + + @Override + public ExecutableTransformInput createExecutable(TransformInput input) { + Transform transform = input.getTransform(); + TransformFactory factory = transformRegistry.factory(transform.type()); + ExecutableTransform executableTransform = factory.createExecutable(transform); + return new ExecutableTransformInput(input, inputLogger, executableTransform); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java new file mode 100644 index 0000000000000..9870bcd086534 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification; + +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.BiFunction; + +/** + * Basic notification service + */ +public abstract class NotificationService extends AbstractComponent { + + private final String type; + // both are guarded by this + private Map accounts; + private Account defaultAccount; + + public NotificationService(Settings settings, String type) { + super(settings); + this.type = type; + } + + protected synchronized void setAccountSetting(Settings settings) { + Tuple, Account> accounts = buildAccounts(settings, this::createAccount); + this.accounts = Collections.unmodifiableMap(accounts.v1()); + this.defaultAccount = accounts.v2(); + } + + protected abstract Account createAccount(String name, Settings accountSettings); + + public Account getAccount(String name) { + // note this is not final since we mock it in tests and that causes + // trouble since final methods can't be mocked... + final Map accounts; + final Account defaultAccount; + synchronized (this) { // must read under sync block otherwise it might be inconsistent + accounts = this.accounts; + defaultAccount = this.defaultAccount; + } + Account theAccount = accounts.getOrDefault(name, defaultAccount); + if (theAccount == null && name == null) { + throw new IllegalArgumentException("no accounts of type [" + type + "] configured. " + + "Please set up an account using the [xpack.notification." + type +"] settings"); + } + if (theAccount == null) { + throw new IllegalArgumentException("no account found for name: [" + name + "]"); + } + return theAccount; + } + + private Tuple, A> buildAccounts(Settings settings, BiFunction accountFactory) { + Settings accountsSettings = settings.getByPrefix("xpack.notification." + type + ".").getAsSettings("account"); + Map accounts = new HashMap<>(); + for (String name : accountsSettings.names()) { + Settings accountSettings = accountsSettings.getAsSettings(name); + A account = accountFactory.apply(name, accountSettings); + accounts.put(name, account); + } + + final String defaultAccountName = settings.get("xpack.notification." + type + ".default_account"); + A defaultAccount; + if (defaultAccountName == null) { + if (accounts.isEmpty()) { + defaultAccount = null; + } else { + A account = accounts.values().iterator().next(); + defaultAccount = account; + + } + } else { + defaultAccount = accounts.get(defaultAccountName); + if (defaultAccount == null) { + throw new SettingsException("could not find default account [" + defaultAccountName + "]"); + } + } + return new Tuple<>(accounts, defaultAccount); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java new file mode 100644 index 0000000000000..8ba8d030524e1 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java @@ -0,0 +1,343 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; + +import javax.activation.CommandMap; +import javax.activation.MailcapCommandMap; +import javax.mail.MessagingException; +import javax.mail.Session; +import javax.mail.Transport; +import javax.mail.internet.InternetAddress; +import javax.mail.internet.MimeMessage; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.util.Properties; + +public class Account { + + static final String SMTP_PROTOCOL = "smtp"; + + static { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new SpecialPermission()); + } + AccessController.doPrivileged((PrivilegedAction) () -> { + // required as java doesn't always find the correct mailcap to properly handle mime types + final MailcapCommandMap mailcap = (MailcapCommandMap) CommandMap.getDefaultCommandMap(); + mailcap.addMailcap("text/html;; x-java-content-handler=com.sun.mail.handlers.text_html"); + mailcap.addMailcap("text/xml;; x-java-content-handler=com.sun.mail.handlers.text_xml"); + mailcap.addMailcap("text/plain;; x-java-content-handler=com.sun.mail.handlers.text_plain"); + mailcap.addMailcap("multipart/*;; x-java-content-handler=com.sun.mail.handlers.multipart_mixed"); + mailcap.addMailcap("message/rfc822;; x-java-content-handler=com.sun.mail.handlers.message_rfc822"); + CommandMap.setDefaultCommandMap(mailcap); + return null; + }); + } + + // exists only to allow ensuring class is initialized + public static void init() {} + + static final Settings DEFAULT_SMTP_TIMEOUT_SETTINGS = Settings.builder() + .put("connection_timeout", TimeValue.timeValueMinutes(2)) + .put("write_timeout", TimeValue.timeValueMinutes(2)) + .put("timeout", TimeValue.timeValueMinutes(2)) + .build(); + + private final Config config; + private final CryptoService cryptoService; + private final Logger logger; + private final Session session; + + Account(Config config, CryptoService cryptoService, Logger logger) { + this.config = config; + this.cryptoService = cryptoService; + this.logger = logger; + session = config.createSession(); + } + + public String name() { + return config.name; + } + + Config getConfig() { + return config; + } + + public Email send(Email email, Authentication auth, Profile profile) throws MessagingException { + + // applying the defaults on missing emails fields + email = config.defaults.apply(email); + + if (email.to == null) { + throw new SettingsException("missing required email [to] field"); + } + + Transport transport = session.getTransport(SMTP_PROTOCOL); + + String user = auth != null ? auth.user() : config.smtp.user; + if (user == null) { + InternetAddress localAddress = InternetAddress.getLocalAddress(session); + // null check needed, because if the local host does not resolve, this may be null + // this can happen in wrongly setup linux distributions + if (localAddress != null) { + user = localAddress.getAddress(); + } + } + + String password = null; + if (auth != null && auth.password() != null) { + password = new String(auth.password().text(cryptoService)); + } else if (config.smtp.password != null) { + password = new String(config.smtp.password); + } + + if (profile == null) { + profile = config.profile; + } + + executeConnect(transport, user, password); + ClassLoader contextClassLoader = null; + try { + MimeMessage message = profile.toMimeMessage(email, session); + String mid = message.getMessageID(); + message.saveChanges(); + if (mid != null) { + // saveChanges may rewrite/remove the message id, so + // we need to add it back + message.setHeader(Profile.MESSAGE_ID_HEADER, mid); + } + + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + // unprivileged code such as scripts do not have SpecialPermission + sm.checkPermission(new SpecialPermission()); + } + contextClassLoader = AccessController.doPrivileged((PrivilegedAction) () -> + Thread.currentThread().getContextClassLoader()); + // if we cannot get the context class loader, changing does not make sense, as we run into the danger of not being able to + // change it back + if (contextClassLoader != null) { + setContextClassLoader(this.getClass().getClassLoader()); + } + transport.sendMessage(message, message.getAllRecipients()); + } finally { + try { + transport.close(); + } catch (MessagingException me) { + logger.error("failed to close email transport for account [{}]", config.name); + } + if (contextClassLoader != null) { + setContextClassLoader(contextClassLoader); + } + } + return email; + } + + private void executeConnect(Transport transport, String user, String password) throws MessagingException { + SpecialPermission.check(); + try { + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + transport.connect(config.smtp.host, config.smtp.port, user, password); + return null; + }); + } catch (PrivilegedActionException e) { + throw (MessagingException) e.getCause(); + } + } + + private void setContextClassLoader(final ClassLoader classLoader) { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + // unprivileged code such as scripts do not have SpecialPermission + sm.checkPermission(new SpecialPermission()); + } + AccessController.doPrivileged((PrivilegedAction) () -> { + Thread.currentThread().setContextClassLoader(classLoader); + return null; + }); + } + + static class Config { + + static final String SMTP_SETTINGS_PREFIX = "mail.smtp."; + + final String name; + final Profile profile; + final Smtp smtp; + final EmailDefaults defaults; + + Config(String name, Settings settings) { + this.name = name; + profile = Profile.resolve(settings.get("profile"), Profile.STANDARD); + defaults = new EmailDefaults(name, settings.getAsSettings("email_defaults")); + smtp = new Smtp(settings.getAsSettings(SMTP_PROTOCOL)); + if (smtp.host == null) { + String msg = "missing required email account setting for account [" + name + "]. 'smtp.host' must be configured"; + throw new SettingsException(msg); + } + } + + public Session createSession() { + return Session.getInstance(smtp.properties); + } + + static class Smtp { + + final String host; + final int port; + final String user; + final char[] password; + final Properties properties; + + Smtp(Settings settings) { + host = settings.get("host", settings.get("localaddress", settings.get("local_address"))); + port = settings.getAsInt("port", settings.getAsInt("localport", settings.getAsInt("local_port", 25))); + user = settings.get("user", settings.get("from", null)); + String passStr = settings.get("password", null); + password = passStr != null ? passStr.toCharArray() : null; + properties = loadSmtpProperties(settings); + } + + /** + * loads the standard Java Mail properties as settings from the given account settings. + * The standard settings are not that readable, therefore we enabled the user to configure + * those in a readable way... this method first loads the smtp settings (which corresponds to + * all Java Mail {@code mail.smtp.*} settings), and then replaces the readable keys to the official + * "unreadable" keys. We'll then use these settings when crea + */ + static Properties loadSmtpProperties(Settings settings) { + Settings.Builder builder = Settings.builder().put(DEFAULT_SMTP_TIMEOUT_SETTINGS).put(settings); + replaceTimeValue(builder, "connection_timeout", "connectiontimeout"); + replaceTimeValue(builder, "write_timeout", "writetimeout"); + replaceTimeValue(builder, "timeout", "timeout"); + + replace(builder, "local_address", "localaddress"); + replace(builder, "local_port", "localport"); + replace(builder, "send_partial", "sendpartial"); + replace(builder, "wait_on_quit", "quitwait"); + + settings = builder.build(); + Properties props = new Properties(); + for (String key : settings.keySet()) { + props.setProperty(SMTP_SETTINGS_PREFIX + key, settings.get(key)); + } + return props; + } + + static void replace(Settings.Builder settings, String currentKey, String newKey) { + String value = settings.remove(currentKey); + if (value != null) { + settings.put(newKey, value); + } + } + + static void replaceTimeValue(Settings.Builder settings, String currentKey, String newKey) { + String value = settings.remove(currentKey); + if (value != null) { + settings.put(newKey, TimeValue.parseTimeValue(value, currentKey).millis()); + } + } + } + + /** + * holds email fields that can be configured on the account. These fields + * will hold the default values for missing fields in email messages. Having + * the ability to create these default can substantially reduced the configuration + * needed on each watch (e.g. if all the emails are always sent to the same recipients + * one could set those here and leave them out on the watch definition). + */ + static class EmailDefaults { + + final Email.Address from; + final Email.AddressList replyTo; + final Email.Priority priority; + final Email.AddressList to; + final Email.AddressList cc; + final Email.AddressList bcc; + final String subject; + + EmailDefaults(String accountName, Settings settings) { + try { + from = Email.Address.parse(settings, Email.Field.FROM.getPreferredName()); + replyTo = Email.AddressList.parse(settings, Email.Field.REPLY_TO.getPreferredName()); + priority = Email.Priority.parse(settings, Email.Field.PRIORITY.getPreferredName()); + to = Email.AddressList.parse(settings, Email.Field.TO.getPreferredName()); + cc = Email.AddressList.parse(settings, Email.Field.CC.getPreferredName()); + bcc = Email.AddressList.parse(settings, Email.Field.BCC.getPreferredName()); + subject = settings.get(Email.Field.SUBJECT.getPreferredName()); + } catch (IllegalArgumentException iae) { + throw new SettingsException("invalid email defaults in email account settings [" + accountName + "]", iae); + } + } + + Email apply(Email email) { + Email.Builder builder = Email.builder().copyFrom(email); + if (email.from == null) { + builder.from(from); + } + if (email.replyTo == null) { + builder.replyTo(replyTo); + } + if (email.priority == null) { + builder.priority(priority); + } + if (email.to == null) { + builder.to(to); + } + if (email.cc == null) { + builder.cc(cc); + } + if (email.bcc == null) { + builder.bcc(bcc); + } + if (email.subject == null) { + builder.subject(subject); + } + return builder.build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + EmailDefaults that = (EmailDefaults) o; + + if (bcc != null ? !bcc.equals(that.bcc) : that.bcc != null) return false; + if (cc != null ? !cc.equals(that.cc) : that.cc != null) return false; + if (from != null ? !from.equals(that.from) : that.from != null) return false; + if (priority != that.priority) return false; + if (replyTo != null ? !replyTo.equals(that.replyTo) : that.replyTo != null) return false; + if (subject != null ? !subject.equals(that.subject) : that.subject != null) return false; + if (to != null ? !to.equals(that.to) : that.to != null) return false; + + return true; + } + + @Override + public int hashCode() { + int result = from != null ? from.hashCode() : 0; + result = 31 * result + (replyTo != null ? replyTo.hashCode() : 0); + result = 31 * result + (priority != null ? priority.hashCode() : 0); + result = 31 * result + (to != null ? to.hashCode() : 0); + result = 31 * result + (cc != null ? cc.hashCode() : 0); + result = 31 * result + (bcc != null ? bcc.hashCode() : 0); + result = 31 * result + (subject != null ? subject.hashCode() : 0); + return result; + } + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Attachment.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Attachment.java new file mode 100644 index 0000000000000..cc21ba9f03f44 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Attachment.java @@ -0,0 +1,272 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.inject.Provider; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.watcher.notification.email.support.BodyPartSource; + +import javax.activation.DataHandler; +import javax.activation.DataSource; +import javax.activation.FileDataSource; +import javax.mail.MessagingException; +import javax.mail.internet.MimeBodyPart; +import javax.mail.util.ByteArrayDataSource; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.file.Path; + +import static javax.mail.Part.ATTACHMENT; +import static javax.mail.Part.INLINE; + +public abstract class Attachment extends BodyPartSource { + + private final boolean inline; + + protected Attachment(String id, String name, String contentType, boolean inline) { + super(id, name, contentType); + this.inline = inline; + } + + @Override + public final MimeBodyPart bodyPart() throws MessagingException { + MimeBodyPart part = new MimeBodyPart(); + part.setContentID(id); + part.setFileName(name); + part.setDisposition(inline ? INLINE : ATTACHMENT); + writeTo(part); + return part; + } + + public abstract String type(); + + public boolean isInline() { + return inline; + } + + /** + * intentionally not emitting path as it may come as an information leak + */ + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("type", type()) + .field("id", id) + .field("name", name) + .field("content_type", contentType) + .endObject(); + } + + protected abstract void writeTo(MimeBodyPart part) throws MessagingException; + + public static class File extends Attachment { + + static final String TYPE = "file"; + + private final Path path; + private final DataSource dataSource; + + public File(String id, Path path, boolean inline) { + this(id, path.getFileName().toString(), path, inline); + } + + public File(String id, Path path, String contentType, boolean inline) { + this(id, path.getFileName().toString(), path, contentType, inline); + } + + @SuppressForbidden(reason = "uses toFile") + public File(String id, String name, Path path, boolean inline) { + this(id, name, path, fileTypeMap.getContentType(path.toFile()), inline); + } + + @SuppressForbidden(reason = "uses toFile") + public File(String id, String name, Path path, String contentType, boolean inline) { + super(id, name, contentType, inline); + this.path = path; + this.dataSource = new FileDataSource(path.toFile()); + } + + public Path path() { + return path; + } + + public String type() { + return TYPE; + } + + @Override + public void writeTo(MimeBodyPart part) throws MessagingException { + part.setDataHandler(new DataHandler(dataSource)); + } + } + + public static class Bytes extends Attachment { + + static final String TYPE = "bytes"; + + private final byte[] bytes; + + public Bytes(String id, byte[] bytes, String contentType, boolean inline) { + this(id, id, bytes, contentType, inline); + } + + public Bytes(String id, String name, byte[] bytes, boolean inline) { + this(id, name, bytes, fileTypeMap.getContentType(name), inline); + } + + public Bytes(String id, String name, byte[] bytes, String contentType, boolean inline) { + super(id, name, contentType, inline); + this.bytes = bytes; + } + + public String type() { + return TYPE; + } + + public byte[] bytes() { + return bytes; + } + + @Override + public void writeTo(MimeBodyPart part) throws MessagingException { + DataSource dataSource = new ByteArrayDataSource(bytes, contentType); + DataHandler handler = new DataHandler(dataSource); + part.setDataHandler(handler); + } + } + + public static class Stream extends Attachment { + + static final String TYPE = "stream"; + + private final Provider source; + + public Stream(String id, String name, boolean inline, Provider source) { + this(id, name, fileTypeMap.getContentType(name), inline, source); + } + + public Stream(String id, String name, String contentType, boolean inline, Provider source) { + super(id, name, contentType, inline); + this.source = source; + } + + @Override + public String type() { + return TYPE; + } + + @Override + protected void writeTo(MimeBodyPart part) throws MessagingException { + DataSource ds = new StreamDataSource(name, contentType, source); + DataHandler dh = new DataHandler(ds); + part.setDataHandler(dh); + } + + static class StreamDataSource implements DataSource { + + private final String name; + private final String contentType; + private final Provider source; + + StreamDataSource(String name, String contentType, Provider source) { + this.name = name; + this.contentType = contentType; + this.source = source; + } + + @Override + public InputStream getInputStream() throws IOException { + return source.get(); + } + + @Override + public OutputStream getOutputStream() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String getContentType() { + return contentType; + } + + @Override + public String getName() { + return name; + } + } + + } + + public static class XContent extends Bytes { + + protected XContent(String id, ToXContent content, XContentType type) { + this(id, id, content, type); + } + + protected XContent(String id, String name, ToXContent content, XContentType type) { + super(id, name, bytes(name, content, type), mimeType(type), false); + } + + static String mimeType(XContentType type) { + switch (type) { + case JSON: return "application/json"; + case YAML: return "application/yaml"; + case SMILE: return "application/smile"; + case CBOR: return "application/cbor"; + default: + throw new IllegalArgumentException("unsupported xcontent attachment type [" + type.name() + "]"); + } + } + + static byte[] bytes(String name, ToXContent content, XContentType type) { + try { + XContentBuilder builder = XContentBuilder.builder(type.xContent()).prettyPrint(); + content.toXContent(builder, ToXContent.EMPTY_PARAMS); + return BytesReference.toBytes(BytesReference.bytes(builder)); + } catch (IOException ioe) { + throw new ElasticsearchException("could not create an xcontent attachment [" + name + "]", ioe); + } + } + + public static class Yaml extends XContent { + + public Yaml(String id, ToXContent content) { + super(id, content, XContentType.YAML); + } + + public Yaml(String id, String name, ToXContent content) { + super(id, name, content, XContentType.YAML); + } + + @Override + public String type() { + return "yaml"; + } + } + + public static class Json extends XContent { + + public Json(String id, ToXContent content) { + super(id, content, XContentType.JSON); + } + + public Json(String id, String name, ToXContent content) { + super(id, name, content, XContentType.JSON); + } + + @Override + public String type() { + return "json"; + } + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Authentication.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Authentication.java new file mode 100644 index 0000000000000..4ab76020bb6cf --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Authentication.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.xpack.core.watcher.common.secret.Secret; + +import java.util.Objects; + +public class Authentication { + + private final String user; + private final Secret password; + + public Authentication(String user, Secret password) { + this.user = user; + this.password = password; + } + + public String user() { + return user; + } + + public Secret password() { + return password; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Authentication that = (Authentication) o; + return Objects.equals(user, that.user) && + Objects.equals(password, that.password); + } + + @Override + public int hashCode() { + return Objects.hash(user, password); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/DataAttachment.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/DataAttachment.java new file mode 100644 index 0000000000000..76c38a932af90 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/DataAttachment.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; + +public enum DataAttachment implements ToXContentObject { + + YAML() { + @Override + public String contentType() { + return XContentType.YAML.mediaType(); + } + + @Override + public Attachment create(String id, Map data) { + return new Attachment.XContent.Yaml(id, id, new Payload.Simple(data)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().field(Field.FORMAT.getPreferredName(), "yaml").endObject(); + } + }, + + JSON() { + @Override + public String contentType() { + return XContentType.JSON.mediaType(); + } + + @Override + public Attachment create(String id, Map data) { + return new Attachment.XContent.Json(id, id, new Payload.Simple(data)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().field(Field.FORMAT.getPreferredName(), "json").endObject(); + } + }; + + public static DataAttachment DEFAULT = YAML; + + public abstract String contentType(); + + public abstract Attachment create(String id, Map data); + + public static DataAttachment resolve(String format) { + switch (format.toLowerCase(Locale.ROOT)) { + case "yaml": return YAML; + case "json": return JSON; + default: + throw illegalArgument("unknown data attachment format [{}]", format); + } + } + + public static DataAttachment parse(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_NULL) { + return null; + } + if (token == XContentParser.Token.VALUE_BOOLEAN) { + return parser.booleanValue() ? DEFAULT : null; + } + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse data attachment. expected either a boolean value or an object but " + + "found [{}] instead", token); + } + + DataAttachment dataAttachment = DEFAULT; + + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (currentFieldName == null) { + throw new ElasticsearchParseException("could not parse data attachment. expected [{}] field but found [{}] instead", + Field.FORMAT.getPreferredName(), token); + } else if (Field.FORMAT.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + dataAttachment = resolve(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse data attachment. expected string value for [{}] field but " + + "found [{}] instead", currentFieldName, token); + } + } else { + throw new ElasticsearchParseException("could not parse data attachment. unexpected field [{}]", currentFieldName); + } + } + + return dataAttachment; + } + + interface Field { + ParseField FORMAT = new ParseField("format"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Email.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Email.java new file mode 100644 index 0000000000000..88800f8709aa6 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Email.java @@ -0,0 +1,609 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import javax.mail.MessagingException; +import javax.mail.internet.AddressException; +import javax.mail.internet.InternetAddress; +import javax.mail.internet.MimeMessage; + +import static java.util.Collections.unmodifiableMap; + +public class Email implements ToXContentObject { + + final String id; + final Address from; + final AddressList replyTo; + final Priority priority; + final DateTime sentDate; + final AddressList to; + final AddressList cc; + final AddressList bcc; + final String subject; + final String textBody; + final String htmlBody; + final Map attachments; + + public Email(String id, Address from, AddressList replyTo, Priority priority, DateTime sentDate, + AddressList to, AddressList cc, AddressList bcc, String subject, String textBody, String htmlBody, + Map attachments) { + + this.id = id; + this.from = from; + this.replyTo = replyTo; + this.priority = priority; + this.sentDate = sentDate != null ? sentDate : new DateTime(DateTimeZone.UTC); + this.to = to; + this.cc = cc; + this.bcc = bcc; + this.subject = subject; + this.textBody = textBody; + this.htmlBody = htmlBody; + this.attachments = attachments; + } + + public String id() { + return id; + } + + public Address from() { + return from; + } + + public AddressList replyTo() { + return replyTo; + } + + public Priority priority() { + return priority; + } + + public DateTime sentDate() { + return sentDate; + } + + public AddressList to() { + return to; + } + + public AddressList cc() { + return cc; + } + + public AddressList bcc() { + return bcc; + } + + public String subject() { + return subject; + } + + public String textBody() { + return textBody; + } + + public String htmlBody() { + return htmlBody; + } + + public Map attachments() { + return attachments; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Field.ID.getPreferredName(), id); + if (from != null) { + builder.field(Field.FROM.getPreferredName(), from.toUnicodeString()); + } + if (replyTo != null) { + builder.field(Field.REPLY_TO.getPreferredName(), replyTo, params); + } + if (priority != null) { + builder.field(Field.PRIORITY.getPreferredName(), priority.value()); + } + builder.timeField(Field.SENT_DATE.getPreferredName(), sentDate); + if (to != null) { + builder.field(Field.TO.getPreferredName(), to, params); + } + if (cc != null) { + builder.field(Field.CC.getPreferredName(), cc, params); + } + if (bcc != null) { + builder.field(Field.BCC.getPreferredName(), bcc, params); + } + builder.field(Field.SUBJECT.getPreferredName(), subject); + if (textBody != null || htmlBody != null) { + builder.startObject(Field.BODY.getPreferredName()); + if (textBody != null) { + builder.field(Field.BODY_TEXT.getPreferredName(), textBody); + } + if (htmlBody != null) { + builder.field(Field.BODY_HTML.getPreferredName(), htmlBody); + } + builder.endObject(); + } + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Email email = (Email) o; + + if (!id.equals(email.id)) return false; + + return true; + } + + @Override + public int hashCode() { + return id.hashCode(); + } + + public static Builder builder() { + return new Builder(); + } + + public static Email parse(XContentParser parser) throws IOException{ + Builder email = new Builder(); + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if ((token.isValue() || token == XContentParser.Token.START_OBJECT || token == XContentParser.Token.START_ARRAY) && + currentFieldName != null) { + if (Field.ID.match(currentFieldName, parser.getDeprecationHandler())) { + email.id(parser.text()); + } else if (Field.FROM.match(currentFieldName, parser.getDeprecationHandler())) { + email.from(Address.parse(currentFieldName, token, parser)); + } else if (Field.REPLY_TO.match(currentFieldName, parser.getDeprecationHandler())) { + email.replyTo(AddressList.parse(currentFieldName, token, parser)); + } else if (Field.TO.match(currentFieldName, parser.getDeprecationHandler())) { + email.to(AddressList.parse(currentFieldName, token, parser)); + } else if (Field.CC.match(currentFieldName, parser.getDeprecationHandler())) { + email.cc(AddressList.parse(currentFieldName, token, parser)); + } else if (Field.BCC.match(currentFieldName, parser.getDeprecationHandler())) { + email.bcc(AddressList.parse(currentFieldName, token, parser)); + } else if (Field.PRIORITY.match(currentFieldName, parser.getDeprecationHandler())) { + email.priority(Email.Priority.resolve(parser.text())); + } else if (Field.SENT_DATE.match(currentFieldName, parser.getDeprecationHandler())) { + email.sentDate(new DateTime(parser.text(), DateTimeZone.UTC)); + } else if (Field.SUBJECT.match(currentFieldName, parser.getDeprecationHandler())) { + email.subject(parser.text()); + } else if (Field.BODY.match(currentFieldName, parser.getDeprecationHandler())) { + String bodyField = currentFieldName; + if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + email.textBody(parser.text()); + } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (currentFieldName == null) { + throw new ElasticsearchParseException("could not parse email. empty [{}] field", bodyField); + } else if (Email.Field.BODY_TEXT.match(currentFieldName, parser.getDeprecationHandler())) { + email.textBody(parser.text()); + } else if (Email.Field.BODY_HTML.match(currentFieldName, parser.getDeprecationHandler())) { + email.htmlBody(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse email. unexpected field [{}.{}] field", bodyField, + currentFieldName); + } + } + } + } else { + throw new ElasticsearchParseException("could not parse email. unexpected field [{}]", currentFieldName); + } + } + } + return email.build(); + } + + public static class Builder { + + private String id; + private Address from; + private AddressList replyTo; + private Priority priority; + private DateTime sentDate; + private AddressList to; + private AddressList cc; + private AddressList bcc; + private String subject; + private String textBody; + private String htmlBody; + private Map attachments = new HashMap<>(); + + private Builder() { + } + + public Builder copyFrom(Email email) { + id = email.id; + from = email.from; + replyTo = email.replyTo; + priority = email.priority; + sentDate = email.sentDate; + to = email.to; + cc = email.cc; + bcc = email.bcc; + subject = email.subject; + textBody = email.textBody; + htmlBody = email.htmlBody; + attachments.putAll(email.attachments); + return this; + } + + public Builder id(String id) { + this.id = id; + return this; + } + + public Builder from(String address) throws AddressException { + return from(new Address(address)); + } + + public Builder from(Address from) { + this.from = from; + return this; + } + + public Builder replyTo(AddressList replyTo) { + this.replyTo = replyTo; + return this; + } + + public Builder replyTo(String addresses) throws AddressException { + return replyTo(Email.AddressList.parse(addresses)); + } + + public Builder priority(Priority priority) { + this.priority = priority; + return this; + } + + public Builder sentDate(DateTime sentDate) { + this.sentDate = sentDate; + return this; + } + + public Builder to(String addresses) throws AddressException { + return to(AddressList.parse(addresses)); + } + + public Builder to(AddressList to) { + this.to = to; + return this; + } + + public AddressList to() { + return to; + } + + public Builder cc(String addresses) throws AddressException { + return cc(AddressList.parse(addresses)); + } + + public Builder cc(AddressList cc) { + this.cc = cc; + return this; + } + + public Builder bcc(String addresses) throws AddressException { + return bcc(AddressList.parse(addresses)); + } + + public Builder bcc(AddressList bcc) { + this.bcc = bcc; + return this; + } + + public Builder subject(String subject) { + this.subject = subject; + return this; + } + + public Builder textBody(String text) { + this.textBody = text; + return this; + } + + public Builder htmlBody(String html) { + this.htmlBody = html; + return this; + } + + public Builder attach(Attachment attachment) { + if (attachments == null) { + throw new IllegalStateException("Email has already been built!"); + } + attachments.put(attachment.id(), attachment); + return this; + } + + /** + * Build the email. Note that adding items to attachments or inlines + * after this is called is incorrect. + */ + public Email build() { + assert id != null : "email id should not be null"; + Email email = new Email(id, from, replyTo, priority, sentDate, to, cc, bcc, subject, textBody, htmlBody, + unmodifiableMap(attachments)); + attachments = null; + return email; + } + + } + + public enum Priority { + + HIGHEST(1), + HIGH(2), + NORMAL(3), + LOW(4), + LOWEST(5); + + static final String HEADER = "X-Priority"; + + private final int value; + + Priority(int value) { + this.value = value; + } + + public void applyTo(MimeMessage message) throws MessagingException { + message.setHeader(HEADER, String.valueOf(value)); + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } + + public static Priority resolve(String name) { + Priority priority = resolve(name, null); + if (priority == null) { + throw new IllegalArgumentException("[" + name + "] is not a valid email priority"); + } + return priority; + } + + public static Priority resolve(String name, Priority defaultPriority) { + if (name == null) { + return defaultPriority; + } + switch (name.toLowerCase(Locale.ROOT)) { + case "highest": return HIGHEST; + case "high": return HIGH; + case "normal": return NORMAL; + case "low": return LOW; + case "lowest": return LOWEST; + default: + return defaultPriority; + } + } + + public static Priority parse(Settings settings, String name) { + String value = settings.get(name); + if (value == null) { + return null; + } + return resolve(value); + } + } + + public static class Address extends javax.mail.internet.InternetAddress implements ToXContentFragment { + + public static final ParseField ADDRESS_NAME_FIELD = new ParseField("name"); + public static final ParseField ADDRESS_EMAIL_FIELD = new ParseField("email"); + + public Address(String address) throws AddressException { + super(address); + } + + public Address(String address, String personal) throws UnsupportedEncodingException { + super(address, personal, StandardCharsets.UTF_8.name()); + } + + public static Address parse(String field, XContentParser.Token token, XContentParser parser) throws IOException { + if (token == XContentParser.Token.VALUE_STRING) { + String text = parser.text(); + try { + return new Email.Address(parser.text()); + } catch (AddressException ae) { + String msg = "could not parse [" + text + "] in field [" + field + "] as address. address must be RFC822 encoded"; + throw new ElasticsearchParseException(msg, ae); + } + } + + if (token == XContentParser.Token.START_OBJECT) { + String email = null; + String name = null; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + if (ADDRESS_EMAIL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + email = parser.text(); + } else if (ADDRESS_NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + name = parser.text(); + } else { + throw new ElasticsearchParseException("could not parse [" + field + "] object as address. unknown address " + + "field [" + currentFieldName + "]"); + } + } + } + if (email == null) { + String msg = "could not parse [" + field + "] as address. address object must define an [email] field"; + throw new ElasticsearchParseException(msg); + } + try { + return name != null ? new Email.Address(email, name) : new Email.Address(email); + } catch (AddressException ae) { + throw new ElasticsearchParseException("could not parse [" + field + "] as address", ae); + } + + } + throw new ElasticsearchParseException("could not parse [{}] as address. address must either be a string (RFC822 encoded) or " + + "an object specifying the address [name] and [email]", field); + } + + public static Address parse(Settings settings, String name) { + String value = settings.get(name); + try { + return value != null ? new Address(value) : null; + } catch (AddressException ae) { + throw new IllegalArgumentException("[" + value + "] is not a valid RFC822 email address", ae); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } + } + + public static class AddressList implements Iterable
, ToXContentObject { + + public static final AddressList EMPTY = new AddressList(Collections.
emptyList()); + + private final List
addresses; + + public AddressList(List
addresses) { + this.addresses = addresses; + } + + public boolean isEmpty() { + return addresses.isEmpty(); + } + + @Override + public Iterator
iterator() { + return addresses.iterator(); + } + + public Address[] toArray() { + return addresses.toArray(new Address[addresses.size()]); + } + + public int size() { + return addresses.size(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(); + for (Address address : addresses) { + builder.value(address.toUnicodeString()); + } + return builder.endArray(); + } + + public static AddressList parse(String text) throws AddressException { + InternetAddress[] addresses = InternetAddress.parse(text); + List
list = new ArrayList<>(addresses.length); + for (InternetAddress address : addresses) { + list.add(new Address(address.toUnicodeString())); + } + return new AddressList(list); + } + + public static AddressList parse(Settings settings, String name) { + List addresses = settings.getAsList(name); + if (addresses == null || addresses.isEmpty()) { + return null; + } + try { + List
list = new ArrayList<>(addresses.size()); + for (String address : addresses) { + list.add(new Address(address)); + } + return new AddressList(list); + } catch (AddressException ae) { + throw new IllegalArgumentException("[" + settings.get(name) + "] is not a valid list of RFC822 email addresses", ae); + } + } + + public static Email.AddressList parse(String field, XContentParser.Token token, XContentParser parser) throws IOException { + if (token == XContentParser.Token.VALUE_STRING) { + String text = parser.text(); + try { + return parse(parser.text()); + } catch (AddressException ae) { + throw new ElasticsearchParseException("could not parse field [" + field + "] with value [" + text + "] as address " + + "list. address(es) must be RFC822 encoded", ae); + } + } + if (token == XContentParser.Token.START_ARRAY) { + List addresses = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + addresses.add(Address.parse(field, token, parser)); + } + return new Email.AddressList(addresses); + } + throw new ElasticsearchParseException("could not parse [" + field + "] as address list. field must either be a string " + + "(comma-separated list of RFC822 encoded addresses) or an array of objects representing addresses"); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + AddressList addresses1 = (AddressList) o; + + if (!addresses.equals(addresses1.addresses)) return false; + + return true; + } + + @Override + public int hashCode() { + return addresses.hashCode(); + } + } + + interface Field { + ParseField ID = new ParseField("id"); + ParseField FROM = new ParseField("from"); + ParseField REPLY_TO = new ParseField("reply_to"); + ParseField PRIORITY = new ParseField("priority"); + ParseField SENT_DATE = new ParseField("sent_date"); + ParseField TO = new ParseField("to"); + ParseField CC = new ParseField("cc"); + ParseField BCC = new ParseField("bcc"); + ParseField SUBJECT = new ParseField("subject"); + ParseField BODY = new ParseField("body"); + ParseField BODY_TEXT = new ParseField("text"); + ParseField BODY_HTML = new ParseField("html"); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java new file mode 100644 index 0000000000000..41a2ecc3bcc80 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java @@ -0,0 +1,174 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; +import org.elasticsearch.xpack.watcher.notification.NotificationService; + +import javax.mail.MessagingException; +import java.util.Arrays; +import java.util.List; + +/** + * A component to store email credentials and handle sending email notifications. + */ +public class EmailService extends NotificationService { + + private static final Setting SETTING_DEFAULT_ACCOUNT = + Setting.simpleString("xpack.notification.email.default_account", Property.Dynamic, Property.NodeScope); + + private static final Setting.AffixSetting SETTING_PROFILE = + Setting.affixKeySetting("xpack.notification.email.account.", "profile", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_EMAIL_DEFAULTS = + Setting.affixKeySetting("xpack.notification.email.account.", "email_defaults", + (key) -> Setting.groupSetting(key + ".", Property.Dynamic, Property.NodeScope)); + + // settings that can be configured as smtp properties + private static final Setting.AffixSetting SETTING_SMTP_AUTH = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.auth", + (key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_SMTP_STARTTLS_ENABLE = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.starttls.enable", + (key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_SMTP_STARTTLS_REQUIRED = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.starttls.required", + (key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_SMTP_HOST = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.host", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_SMTP_PORT = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.port", + (key) -> Setting.intSetting(key, 587, Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_SMTP_USER = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.user", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_SMTP_PASSWORD = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.password", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered)); + + private static final Setting.AffixSetting SETTING_SMTP_TIMEOUT = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.timeout", + (key) -> Setting.timeSetting(key, TimeValue.timeValueMinutes(2), Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_SMTP_CONNECTION_TIMEOUT = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.connection_timeout", + (key) -> Setting.timeSetting(key, TimeValue.timeValueMinutes(2), Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_SMTP_WRITE_TIMEOUT = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.write_timeout", + (key) -> Setting.timeSetting(key, TimeValue.timeValueMinutes(2), Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_SMTP_LOCAL_ADDRESS = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.local_address", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_SMTP_LOCAL_PORT = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.local_port", + (key) -> Setting.intSetting(key, 25, Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_SMTP_SEND_PARTIAL = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.send_partial", + (key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_SMTP_WAIT_ON_QUIT = + Setting.affixKeySetting("xpack.notification.email.account.", "smtp.wait_on_quit", + (key) -> Setting.boolSetting(key, true, Property.Dynamic, Property.NodeScope)); + + private final CryptoService cryptoService; + + public EmailService(Settings settings, @Nullable CryptoService cryptoService, ClusterSettings clusterSettings) { + super(settings, "email"); + this.cryptoService = cryptoService; + clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, getSettings()); + // ensure logging of setting changes + clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_PROFILE, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_EMAIL_DEFAULTS, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_AUTH, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_STARTTLS_ENABLE, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_STARTTLS_REQUIRED, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_HOST, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_PORT, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_USER, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_PASSWORD, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_TIMEOUT, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_CONNECTION_TIMEOUT, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_WRITE_TIMEOUT, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_LOCAL_ADDRESS, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_LOCAL_PORT, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_SEND_PARTIAL, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SMTP_WAIT_ON_QUIT, (s, o) -> {}, (s, o) -> {}); + // do an initial load + setAccountSetting(settings); + } + + @Override + protected Account createAccount(String name, Settings accountSettings) { + Account.Config config = new Account.Config(name, accountSettings); + return new Account(config, cryptoService, logger); + } + + public EmailSent send(Email email, Authentication auth, Profile profile, String accountName) throws MessagingException { + Account account = getAccount(accountName); + if (account == null) { + throw new IllegalArgumentException("failed to send email with subject [" + email.subject() + "] via account [" + accountName + + "]. account does not exist"); + } + return send(email, auth, profile, account); + } + + private EmailSent send(Email email, Authentication auth, Profile profile, Account account) throws MessagingException { + assert account != null; + try { + email = account.send(email, auth, profile); + } catch (MessagingException me) { + throw new MessagingException("failed to send email with subject [" + email.subject() + "] via account [" + account.name() + + "]", me); + } + return new EmailSent(account.name(), email); + } + + public static class EmailSent { + + private final String account; + private final Email email; + + public EmailSent(String account, Email email) { + this.account = account; + this.email = email; + } + + public String account() { + return account; + } + + public Email email() { + return email; + } + } + + public static List> getSettings() { + return Arrays.asList(SETTING_DEFAULT_ACCOUNT, SETTING_PROFILE, SETTING_EMAIL_DEFAULTS, SETTING_SMTP_AUTH, SETTING_SMTP_HOST, + SETTING_SMTP_PASSWORD, SETTING_SMTP_PORT, SETTING_SMTP_STARTTLS_ENABLE, SETTING_SMTP_USER, SETTING_SMTP_STARTTLS_REQUIRED, + SETTING_SMTP_TIMEOUT, SETTING_SMTP_CONNECTION_TIMEOUT, SETTING_SMTP_WRITE_TIMEOUT, SETTING_SMTP_LOCAL_ADDRESS, + SETTING_SMTP_LOCAL_PORT, SETTING_SMTP_SEND_PARTIAL, SETTING_SMTP_WAIT_ON_QUIT); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailTemplate.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailTemplate.java new file mode 100644 index 0000000000000..7097b369863d2 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailTemplate.java @@ -0,0 +1,421 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import javax.mail.internet.AddressException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class EmailTemplate implements ToXContentObject { + + final TextTemplate from; + final TextTemplate[] replyTo; + final TextTemplate priority; + final TextTemplate[] to; + final TextTemplate[] cc; + final TextTemplate[] bcc; + final TextTemplate subject; + final TextTemplate textBody; + final TextTemplate htmlBody; + + public EmailTemplate(TextTemplate from, TextTemplate[] replyTo, TextTemplate priority, TextTemplate[] to, + TextTemplate[] cc, TextTemplate[] bcc, TextTemplate subject, TextTemplate textBody, + TextTemplate htmlBody) { + this.from = from; + this.replyTo = replyTo; + this.priority = priority; + this.to = to; + this.cc = cc; + this.bcc = bcc; + this.subject = subject; + this.textBody = textBody; + this.htmlBody = htmlBody; + } + + public TextTemplate from() { + return from; + } + + public TextTemplate[] replyTo() { + return replyTo; + } + + public TextTemplate priority() { + return priority; + } + + public TextTemplate[] to() { + return to; + } + + public TextTemplate[] cc() { + return cc; + } + + public TextTemplate[] bcc() { + return bcc; + } + + public TextTemplate subject() { + return subject; + } + + public TextTemplate textBody() { + return textBody; + } + + public TextTemplate htmlBody() { + return htmlBody; + } + + public Email.Builder render(TextTemplateEngine engine, Map model, HtmlSanitizer htmlSanitizer, + Map attachments) throws AddressException { + Email.Builder builder = Email.builder(); + if (from != null) { + builder.from(engine.render(from, model)); + } + if (replyTo != null) { + Email.AddressList addresses = templatesToAddressList(engine, replyTo, model); + builder.replyTo(addresses); + } + if (priority != null) { + builder.priority(Email.Priority.resolve(engine.render(priority, model))); + } + if (to != null) { + Email.AddressList addresses = templatesToAddressList(engine, to, model); + builder.to(addresses); + } + if (cc != null) { + Email.AddressList addresses = templatesToAddressList(engine, cc, model); + builder.cc(addresses); + } + if (bcc != null) { + Email.AddressList addresses = templatesToAddressList(engine, bcc, model); + builder.bcc(addresses); + } + if (subject != null) { + builder.subject(engine.render(subject, model)); + } + if (textBody != null) { + builder.textBody(engine.render(textBody, model)); + } + if (attachments != null) { + for (Attachment attachment : attachments.values()) { + builder.attach(attachment); + } + } + if (htmlBody != null) { + String renderedHtml = engine.render(htmlBody, model); + renderedHtml = htmlSanitizer.sanitize(renderedHtml); + builder.htmlBody(renderedHtml); + } + return builder; + } + + private static Email.AddressList templatesToAddressList(TextTemplateEngine engine, TextTemplate[] templates, + Map model) throws AddressException { + List addresses = new ArrayList<>(templates.length); + for (TextTemplate template : templates) { + Email.AddressList.parse(engine.render(template, model)).forEach(addresses::add); + } + return new Email.AddressList(addresses); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + EmailTemplate that = (EmailTemplate) o; + return Objects.equals(from, that.from) && + Arrays.equals(replyTo, that.replyTo) && + Objects.equals(priority, that.priority) && + Arrays.equals(to, that.to) && + Arrays.equals(cc, that.cc) && + Arrays.equals(bcc, that.bcc) && + Objects.equals(subject, that.subject) && + Objects.equals(textBody, that.textBody) && + Objects.equals(htmlBody, that.htmlBody); + } + + @Override + public int hashCode() { + return Objects.hash(from, replyTo, priority, to, cc, bcc, subject, textBody, htmlBody); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + xContentBody(builder, params); + return builder.endObject(); + } + + public XContentBuilder xContentBody(XContentBuilder builder, Params params) throws IOException { + if (from != null) { + builder.field(Email.Field.FROM.getPreferredName(), from, params); + } + if (replyTo != null) { + builder.startArray(Email.Field.REPLY_TO.getPreferredName()); + for (TextTemplate template : replyTo) { + template.toXContent(builder, params); + } + builder.endArray(); + } + if (priority != null) { + builder.field(Email.Field.PRIORITY.getPreferredName(), priority, params); + } + if (to != null) { + builder.startArray(Email.Field.TO.getPreferredName()); + for (TextTemplate template : to) { + template.toXContent(builder, params); + } + builder.endArray(); + } + if (cc != null) { + builder.startArray(Email.Field.CC.getPreferredName()); + for (TextTemplate template : cc) { + template.toXContent(builder, params); + } + builder.endArray(); + } + if (bcc != null) { + builder.startArray(Email.Field.BCC.getPreferredName()); + for (TextTemplate template : bcc) { + template.toXContent(builder, params); + } + builder.endArray(); + } + if (subject != null) { + builder.field(Email.Field.SUBJECT.getPreferredName(), subject, params); + } + if (textBody != null || htmlBody != null) { + builder.startObject(Email.Field.BODY.getPreferredName()); + if (textBody != null) { + builder.field(Email.Field.BODY_TEXT.getPreferredName(), textBody, params); + } + if (htmlBody != null) { + builder.field(Email.Field.BODY_HTML.getPreferredName(), htmlBody, params); + } + builder.endObject(); + } + return builder; + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + + private TextTemplate from; + private TextTemplate[] replyTo; + private TextTemplate priority; + private TextTemplate[] to; + private TextTemplate[] cc; + private TextTemplate[] bcc; + private TextTemplate subject; + private TextTemplate textBody; + private TextTemplate htmlBody; + + private Builder() { + } + + public Builder from(String from) { + return from(new TextTemplate(from)); + } + + public Builder from(TextTemplate from) { + this.from = from; + return this; + } + + public Builder replyTo(String... replyTo) { + TextTemplate[] templates = new TextTemplate[replyTo.length]; + for (int i = 0; i < templates.length; i++) { + templates[i] = new TextTemplate(replyTo[i]); + } + return replyTo(templates); + } + + public Builder replyTo(TextTemplate... replyTo) { + this.replyTo = replyTo; + return this; + } + + public Builder priority(Email.Priority priority) { + return priority(new TextTemplate(priority.name())); + } + + public Builder priority(TextTemplate priority) { + this.priority = priority; + return this; + } + + public Builder to(String... to) { + TextTemplate[] templates = new TextTemplate[to.length]; + for (int i = 0; i < templates.length; i++) { + templates[i] = new TextTemplate(to[i]); + } + return to(templates); + } + + public Builder to(TextTemplate... to) { + this.to = to; + return this; + } + + public Builder cc(String... cc) { + TextTemplate[] templates = new TextTemplate[cc.length]; + for (int i = 0; i < templates.length; i++) { + templates[i] = new TextTemplate(cc[i]); + } + return cc(templates); + } + + public Builder cc(TextTemplate... cc) { + this.cc = cc; + return this; + } + + public Builder bcc(String... bcc) { + TextTemplate[] templates = new TextTemplate[bcc.length]; + for (int i = 0; i < templates.length; i++) { + templates[i] = new TextTemplate(bcc[i]); + } + return bcc(templates); + } + + public Builder bcc(TextTemplate... bcc) { + this.bcc = bcc; + return this; + } + + public Builder subject(String subject) { + return subject(new TextTemplate(subject)); + } + + public Builder subject(TextTemplate subject) { + this.subject = subject; + return this; + } + + public Builder textBody(String text) { + return textBody(new TextTemplate(text)); + } + + public Builder textBody(TextTemplate text) { + this.textBody = text; + return this; + } + + public Builder htmlBody(String html) { + return htmlBody(new TextTemplate(html)); + } + + public Builder htmlBody(TextTemplate html) { + this.htmlBody = html; + return this; + } + + public EmailTemplate build() { + return new EmailTemplate(from, replyTo, priority, to, cc, bcc, subject, textBody, htmlBody); + } + } + + public static class Parser { + + private final EmailTemplate.Builder builder = builder(); + + public boolean handle(String fieldName, XContentParser parser) throws IOException { + if (Email.Field.FROM.match(fieldName, parser.getDeprecationHandler())) { + builder.from(TextTemplate.parse(parser)); + } else if (Email.Field.REPLY_TO.match(fieldName, parser.getDeprecationHandler())) { + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + List templates = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + templates.add(TextTemplate.parse(parser)); + } + builder.replyTo(templates.toArray(new TextTemplate[templates.size()])); + } else { + builder.replyTo(TextTemplate.parse(parser)); + } + } else if (Email.Field.TO.match(fieldName, parser.getDeprecationHandler())) { + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + List templates = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + templates.add(TextTemplate.parse(parser)); + } + builder.to(templates.toArray(new TextTemplate[templates.size()])); + } else { + builder.to(TextTemplate.parse(parser)); + } + } else if (Email.Field.CC.match(fieldName, parser.getDeprecationHandler())) { + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + List templates = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + templates.add(TextTemplate.parse(parser)); + } + builder.cc(templates.toArray(new TextTemplate[templates.size()])); + } else { + builder.cc(TextTemplate.parse(parser)); + } + } else if (Email.Field.BCC.match(fieldName, parser.getDeprecationHandler())) { + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + List templates = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + templates.add(TextTemplate.parse(parser)); + } + builder.bcc(templates.toArray(new TextTemplate[templates.size()])); + } else { + builder.bcc(TextTemplate.parse(parser)); + } + } else if (Email.Field.PRIORITY.match(fieldName, parser.getDeprecationHandler())) { + builder.priority(TextTemplate.parse(parser)); + } else if (Email.Field.SUBJECT.match(fieldName, parser.getDeprecationHandler())) { + builder.subject(TextTemplate.parse(parser)); + } else if (Email.Field.BODY.match(fieldName, parser.getDeprecationHandler())) { + if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + builder.textBody(TextTemplate.parse(parser)); + } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (currentFieldName == null) { + throw new ElasticsearchParseException("could not parse email template. empty [{}] field", fieldName); + } else if (Email.Field.BODY_TEXT.match(currentFieldName, parser.getDeprecationHandler())) { + builder.textBody(TextTemplate.parse(parser)); + } else if (Email.Field.BODY_HTML.match(currentFieldName, parser.getDeprecationHandler())) { + builder.htmlBody(TextTemplate.parse(parser)); + } else { + throw new ElasticsearchParseException("could not parse email template. unknown field [{}.{}] field", + fieldName, currentFieldName); + } + } + } + } else { + return false; + } + return true; + } + + public EmailTemplate parsedTemplate() { + return builder.build(); + } + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizer.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizer.java new file mode 100644 index 0000000000000..c7be7e6db0f30 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizer.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.owasp.html.CssSchema; +import org.owasp.html.ElementPolicy; +import org.owasp.html.HtmlPolicyBuilder; +import org.owasp.html.PolicyFactory; + +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; +import java.util.Locale; +import java.util.function.Function; + +public class HtmlSanitizer { + + static final String[] FORMATTING_TAGS = new String[] { + "b", "i", "s", "u", "o", "sup", "sub", "ins", "del", "strong", + "strike", "tt", "code", "big", "small", "br", "span", "em", "hr" + }; + static final String[] BLOCK_TAGS = new String[] { + "p", "div", "h1", "h2", "h3", "h4", "h5", "h6", "ul", "ol", "li", "blockquote" + }; + static final String[] TABLE_TAGS = new String[] { + "table", "th", "tr", "td", "caption", "col", "colgroup", "thead", "tbody", "tfoot" + }; + static final List DEFAULT_ALLOWED = Arrays.asList( + "body", "head", "_tables", "_links", "_blocks", "_formatting", "img:embedded" + ); + + private static Setting SETTING_SANITIZATION_ENABLED = + Setting.boolSetting("xpack.notification.email.html.sanitization.enabled", true, Property.NodeScope); + + private static Setting> SETTING_SANITIZATION_ALLOW = + Setting.listSetting("xpack.notification.email.html.sanitization.allow", DEFAULT_ALLOWED, Function.identity(), + Property.NodeScope); + + private static Setting> SETTING_SANITIZATION_DISALLOW = + Setting.listSetting("xpack.notification.email.html.sanitization.disallow", Collections.emptyList(), Function.identity(), + Property.NodeScope); + + private final boolean enabled; + @SuppressForbidden( reason = "PolicyFactory uses guava Function") + private final PolicyFactory policy; + + public HtmlSanitizer(Settings settings) { + enabled = SETTING_SANITIZATION_ENABLED.get(settings); + List allow = SETTING_SANITIZATION_ALLOW.get(settings); + List disallow = SETTING_SANITIZATION_DISALLOW.get(settings); + policy = createCommonPolicy(allow, disallow); + } + + public String sanitize(String html) { + if (!enabled) { + return html; + } + return policy.sanitize(html); + } + + @SuppressForbidden( reason = "PolicyFactory uses guava Function") + static PolicyFactory createCommonPolicy(List allow, List disallow) { + HtmlPolicyBuilder policyBuilder = new HtmlPolicyBuilder(); + + if (allow.stream().anyMatch("_all"::equals)) { + return policyBuilder + .allowElements(TABLE_TAGS) + .allowAttributes("span").onElements("col") + .allowElements(BLOCK_TAGS) + .allowElements(FORMATTING_TAGS) + .allowWithoutAttributes("span") + .allowStyling(CssSchema.DEFAULT) + .allowStandardUrlProtocols().allowElements("a") + .allowAttributes("href").onElements("a").requireRelNofollowOnLinks() + .allowElements("img") + .allowAttributes("src").onElements("img") + .allowStandardUrlProtocols() + .allowUrlProtocols("cid") + .toFactory(); + } + + EnumSet images = EnumSet.noneOf(Images.class); + + for (String tag : allow) { + tag = tag.toLowerCase(Locale.ROOT); + switch (tag) { + case "_tables": + policyBuilder.allowElements(TABLE_TAGS); + policyBuilder.allowAttributes("span").onElements("col"); + policyBuilder.allowAttributes("border", "cellpadding").onElements("table"); + policyBuilder.allowAttributes("colspan", "rowspan").onElements("th", "td"); + break; + case "_links": + policyBuilder.allowElements("a") + .allowAttributes("href").onElements("a") + .allowStandardUrlProtocols() + .requireRelNofollowOnLinks(); + break; + case "_blocks": + policyBuilder.allowElements(BLOCK_TAGS); + break; + case "_formatting": + policyBuilder.allowElements(FORMATTING_TAGS) + .allowWithoutAttributes("span"); + break; + case "_styles": + policyBuilder.allowStyling(CssSchema.DEFAULT); + break; + case "img:all": + case "img": + images.add(Images.ALL); + break; + case "img:embedded": + images.add(Images.EMBEDDED); + break; + default: + policyBuilder.allowElements(tag); + } + } + for (String tag : disallow) { + tag = tag.toLowerCase(Locale.ROOT); + switch (tag) { + case "_tables": + policyBuilder.disallowElements(TABLE_TAGS); + break; + case "_links": + policyBuilder.disallowElements("a"); + break; + case "_blocks": + policyBuilder.disallowElements(BLOCK_TAGS); + break; + case "_formatting": + policyBuilder.disallowElements(FORMATTING_TAGS); + break; + case "_styles": + policyBuilder.disallowAttributes("style"); + break; + case "img:all": + case "img": + images.remove(Images.ALL); + break; + case "img:embedded": + images.remove(Images.EMBEDDED); + break; + default: + policyBuilder.disallowElements(tag); + } + } + + if (!images.isEmpty()) { + policyBuilder.allowAttributes("src").onElements("img").allowUrlProtocols("cid"); + if (images.contains(Images.ALL)) { + policyBuilder.allowElements("img"); + policyBuilder.allowStandardUrlProtocols(); + } else { + // embedded + policyBuilder.allowElements(EmbeddedImgOnlyPolicy.INSTANCE, "img"); + } + } + + return policyBuilder.toFactory(); + } + + + + /** + * An {@code img} tag policy that only accept {@code cid:} values in its {@code src} attribute. + * If such value is found, the content id is verified against the available attachements of the + * email and if the content/attachment is not found, the element is dropped. + */ + private static class EmbeddedImgOnlyPolicy implements ElementPolicy { + + private static EmbeddedImgOnlyPolicy INSTANCE = new EmbeddedImgOnlyPolicy(); + + @Override + public String apply(String elementName, List attrs) { + if (!"img".equals(elementName) || attrs.size() == 0) { + return elementName; + } + String attrName = null; + for (String attr : attrs) { + if (attrName == null) { + attrName = attr.toLowerCase(Locale.ROOT); + continue; + } + // reject external image source (only allow embedded ones) + if ("src".equals(attrName) && !attr.startsWith("cid:")) { + return null; + } + } + return elementName; + } + } + + enum Images { + ALL, + EMBEDDED + } + + public static List> getSettings() { + return Arrays.asList(SETTING_SANITIZATION_ALLOW, SETTING_SANITIZATION_DISALLOW, SETTING_SANITIZATION_ENABLED); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Profile.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Profile.java new file mode 100644 index 0000000000000..b59ee070d7f24 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Profile.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Locale; + +import javax.mail.Message; +import javax.mail.MessagingException; +import javax.mail.Session; +import javax.mail.internet.MimeBodyPart; +import javax.mail.internet.MimeMessage; +import javax.mail.internet.MimeMultipart; + +/** + * A profile of an email client, can be seen as a strategy to emulate a real world email client + * (different clients potentially support different mime message structures) + */ +public enum Profile { + + STANDARD() { + + @Override + public String textBody(MimeMessage msg) throws IOException, MessagingException { + MimeMultipart mixed = (MimeMultipart) msg.getContent(); + MimeMultipart related = null; + for (int i = 0; i < mixed.getCount(); i++) { + MimeBodyPart part = (MimeBodyPart) mixed.getBodyPart(i); + if (part.getContentType().startsWith("multipart/related")) { + related = (MimeMultipart) part.getContent(); + break; + } + } + if (related == null) { + throw new IllegalStateException("could not extract body text from mime message using [standard] profile. could not find " + + "part content type with [multipart/related]"); + } + + MimeMultipart alternative = null; + for (int i = 0; i < related.getCount(); i++) { + MimeBodyPart part = (MimeBodyPart) related.getBodyPart(i); + if (part.getContentType().startsWith("multipart/alternative")) { + alternative = (MimeMultipart) part.getContent(); + break; + } + } + if (alternative == null) { + throw new IllegalStateException("could not extract body text from mime message using [standard] profile. could not find " + + "part content type with [multipart/alternative]"); + } + + for (int i = 0; i < alternative.getCount(); i++) { + MimeBodyPart part = (MimeBodyPart) alternative.getBodyPart(i); + if (part.getContentType().startsWith("text/plain")) { + return (String) part.getContent(); + } + } + + throw new IllegalStateException("could not extract body text from mime message using [standard] profile"); + } + + @Override + public MimeMessage toMimeMessage(Email email, Session session) throws MessagingException { + MimeMessage message = createCommon(email, session); + + MimeMultipart mixed = new MimeMultipart("mixed"); + message.setContent(mixed); + + MimeMultipart related = new MimeMultipart("related"); + mixed.addBodyPart(wrap(related, null)); + + MimeMultipart alternative = new MimeMultipart("alternative"); + related.addBodyPart(wrap(alternative, "text/alternative")); + + MimeBodyPart text = new MimeBodyPart(); + if (email.textBody != null) { + text.setText(email.textBody, StandardCharsets.UTF_8.name()); + } else { + text.setText("", StandardCharsets.UTF_8.name()); + } + alternative.addBodyPart(text); + + if (email.htmlBody != null) { + MimeBodyPart html = new MimeBodyPart(); + html.setText(email.htmlBody, StandardCharsets.UTF_8.name(), "html"); + alternative.addBodyPart(html); + } + + if (!email.attachments.isEmpty()) { + for (Attachment attachment : email.attachments.values()) { + if (attachment.isInline()) { + related.addBodyPart(attachment.bodyPart()); + } else { + mixed.addBodyPart(attachment.bodyPart()); + } + } + } + + return message; + } + }, + + OUTLOOK() { + + @Override + public String textBody(MimeMessage msg) throws IOException, MessagingException { + return STANDARD.textBody(msg); + } + + @Override + public MimeMessage toMimeMessage(Email email, Session session) throws MessagingException { + return STANDARD.toMimeMessage(email, session); + } + }, + GMAIL() { + + @Override + public String textBody(MimeMessage msg) throws IOException, MessagingException { + return STANDARD.textBody(msg); + } + + @Override + public MimeMessage toMimeMessage(Email email, Session session) throws MessagingException { + return STANDARD.toMimeMessage(email, session); + } + }, + MAC() { + + @Override + public String textBody(MimeMessage msg) throws IOException, MessagingException { + return STANDARD.textBody(msg); + } + + @Override + public MimeMessage toMimeMessage(Email email, Session session) throws MessagingException { + return STANDARD.toMimeMessage(email, session); + } + }; + + static final String MESSAGE_ID_HEADER = "Message-ID"; + + public abstract MimeMessage toMimeMessage(Email email, Session session) throws MessagingException ; + + public abstract String textBody(MimeMessage msg) throws IOException, MessagingException; + + public static Profile resolve(String name) { + Profile profile = resolve(name, null); + if (profile == null) { + throw new IllegalArgumentException("[" + name + "] is an unknown email profile"); + } + return profile; + } + + public static Profile resolve(String name, Profile defaultProfile) { + if (name == null) { + return defaultProfile; + } + switch (name.toLowerCase(Locale.ROOT)) { + case "std": + case "standard": return STANDARD; + case "outlook": return OUTLOOK; + case "gmail": return GMAIL; + case "mac": return MAC; + default: + return defaultProfile; + } + } + + static MimeMessage createCommon(Email email, Session session) throws MessagingException { + MimeMessage message = new MimeMessage(session); + message.setHeader(MESSAGE_ID_HEADER, email.id); + if (email.from != null) { + message.setFrom(email.from); + } + if (email.replyTo != null) { + message.setReplyTo(email.replyTo.toArray()); + } + if (email.priority != null) { + email.priority.applyTo(message); + } + message.setSentDate(email.sentDate.toDate()); + message.setRecipients(Message.RecipientType.TO, email.to.toArray()); + if (email.cc != null) { + message.setRecipients(Message.RecipientType.CC, email.cc.toArray()); + } + if (email.bcc != null) { + message.setRecipients(Message.RecipientType.BCC, email.bcc.toArray()); + } + if (email.subject != null) { + message.setSubject(email.subject, StandardCharsets.UTF_8.name()); + } else { + message.setSubject("", StandardCharsets.UTF_8.name()); + } + + return message; + } + + static MimeBodyPart wrap(MimeMultipart multipart, String contentType) throws MessagingException { + MimeBodyPart part = new MimeBodyPart(); + if (contentType == null) { + part.setContent(multipart); + } else { + part.setContent(multipart, contentType); + } + return part; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/DataAttachment.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/DataAttachment.java new file mode 100644 index 0000000000000..c55ef067a9540 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/DataAttachment.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class DataAttachment implements EmailAttachmentParser.EmailAttachment { + + private final String id; + private final org.elasticsearch.xpack.watcher.notification.email.DataAttachment dataAttachment; + + public DataAttachment(String id, org.elasticsearch.xpack.watcher.notification.email.DataAttachment dataAttachment) { + this.id = id; + this.dataAttachment = dataAttachment; + } + + public org.elasticsearch.xpack.watcher.notification.email.DataAttachment getDataAttachment() { + return dataAttachment; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(id).startObject(DataAttachmentParser.TYPE); + if (dataAttachment == org.elasticsearch.xpack.watcher.notification.email.DataAttachment.YAML) { + builder.field("format", "yaml"); + } else { + builder.field("format", "json"); + } + return builder.endObject().endObject(); + } + + @Override + public String type() { + return DataAttachmentParser.TYPE; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + DataAttachment otherDataAttachment = (DataAttachment) o; + return Objects.equals(id, otherDataAttachment.id) && Objects.equals(dataAttachment, otherDataAttachment.dataAttachment); + } + + @Override + public int hashCode() { + return Objects.hash(id, dataAttachment); + } + + @Override + public String id() { + return id; + } + + @Override + public boolean inline() { + return false; + } + + public static Builder builder(String id) { + return new Builder(id); + } + + + public static class Builder { + + private String id; + private org.elasticsearch.xpack.watcher.notification.email.DataAttachment dataAttachment; + + private Builder(String id) { + this.id = id; + } + + public Builder dataAttachment(org.elasticsearch.xpack.watcher.notification.email.DataAttachment dataAttachment) { + this.dataAttachment = dataAttachment; + return this; + } + + public DataAttachment build() { + return new DataAttachment(id, dataAttachment); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/DataAttachmentParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/DataAttachmentParser.java new file mode 100644 index 0000000000000..9164e1db7ea1b --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/DataAttachmentParser.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.notification.email.Attachment; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.xpack.watcher.notification.email.DataAttachment.resolve; + +public class DataAttachmentParser implements EmailAttachmentParser { + + interface Fields { + ParseField FORMAT = new ParseField("format"); + } + + public static final String TYPE = "data"; + + @Override + public String type() { + return TYPE; + } + + @Override + public DataAttachment parse(String id, XContentParser parser) throws IOException { + org.elasticsearch.xpack.watcher.notification.email.DataAttachment dataAttachment = + org.elasticsearch.xpack.watcher.notification.email.DataAttachment.YAML; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Strings.hasLength(currentFieldName) && Fields.FORMAT.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + dataAttachment = resolve(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse data attachment. expected string value for [{}] field but " + + "found [{}] instead", currentFieldName, token); + } + } + } + + return new DataAttachment(id, dataAttachment); + } + + @Override + public Attachment toAttachment(WatchExecutionContext ctx, Payload payload, DataAttachment attachment) throws IOException { + Map model = Variables.createCtxModel(ctx, payload); + return attachment.getDataAttachment().create(attachment.id(), model); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachmentParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachmentParser.java new file mode 100644 index 0000000000000..5bf786bef445e --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachmentParser.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.notification.email.Attachment; + +import java.io.IOException; + +/** + * Marker interface for email attachments that have an additional execution step and are used by + * EmailAttachmentParser class + */ +public interface EmailAttachmentParser { + + interface EmailAttachment extends ToXContentFragment { + /** + * @return A type to identify the email attachment, same as the parser identifier + */ + String type(); + + /** + * @return The id of this attachment + */ + String id(); + + /** + * Allows the attachment to decide of it should be of disposition type attachment or inline, which is important + * for being able to display inside of desktop email clients + * + * @return a boolean flagging this attachment as being inline + */ + boolean inline(); + } + + /** + * @return An identifier of this parser + */ + String type(); + + /** + * A parser to create an EmailAttachment, that is serializable and does not execute anything + * + * @param id The id of this attachment, parsed from the outer content + * @param parser The XContentParser used for parsing + * @return A concrete EmailAttachment + * @throws IOException in case parsing fails + */ + T parse(String id, XContentParser parser) throws IOException; + + /** + * Converts an email attachment to an attachment, potentially executing code like an HTTP request + * @param context The WatchExecutionContext supplied with the whole watch execution + * @param payload The Payload supplied with the action + * @param attachment The typed attachment + * @return An attachment that is ready to be used in a MimeMessage + */ + Attachment toAttachment(WatchExecutionContext context, Payload payload, T attachment) throws IOException; + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachments.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachments.java new file mode 100644 index 0000000000000..5225be9340239 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachments.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.Objects; + +public class EmailAttachments implements ToXContentFragment { + + public static final EmailAttachments EMPTY_ATTACHMENTS = new EmailAttachments( + Collections.emptyList()); + + public interface Fields { + ParseField ATTACHMENTS = new ParseField("attachments"); + } + + private final Collection attachments; + + public EmailAttachments(Collection attachments) { + this.attachments = attachments; + } + + public Collection getAttachments() { + return attachments; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (attachments != null && attachments.size() > 0) { + builder.startObject(Fields.ATTACHMENTS.getPreferredName()); + for (EmailAttachmentParser.EmailAttachment attachment : attachments) { + attachment.toXContent(builder, params); + } + builder.endObject(); + } + + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + EmailAttachments other = (EmailAttachments) o; + return Objects.equals(attachments, other.attachments); + } + + @Override + public int hashCode() { + return Objects.hash(attachments); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachmentsParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachmentsParser.java new file mode 100644 index 0000000000000..2b4957f815444 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachmentsParser.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; + +public class EmailAttachmentsParser { + + private Map parsers; + + public EmailAttachmentsParser(Map parsers) { + this.parsers = Collections.unmodifiableMap(parsers); + } + + public EmailAttachments parse(XContentParser parser) throws IOException { + Map attachments = new LinkedHashMap<>(); + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) { + String currentAttachmentType = null; + if (parser.nextToken() == XContentParser.Token.FIELD_NAME) { + currentAttachmentType = parser.currentName(); + } + parser.nextToken(); + + EmailAttachmentParser emailAttachmentParser = parsers.get(currentAttachmentType); + if (emailAttachmentParser == null) { + throw new ElasticsearchParseException("Cannot parse attachment of type [{}]", currentAttachmentType); + } + EmailAttachmentParser.EmailAttachment emailAttachment = emailAttachmentParser.parse(currentFieldName, parser); + if (attachments.containsKey(emailAttachment.id())) { + throw new ElasticsearchParseException("Attachment with id [{}] has already been created, must be renamed", + emailAttachment.id()); + } + attachments.put(emailAttachment.id(), emailAttachment); + // one further to skip the end_object from the attachment + parser.nextToken(); + } + } + } + + return new EmailAttachments(new ArrayList<>(attachments.values())); + } + + public Map getParsers() { + return parsers; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachementParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachementParser.java new file mode 100644 index 0000000000000..a9f161a3fc8a4 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachementParser.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.email.Attachment; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.io.IOException; +import java.util.Map; + +public class HttpEmailAttachementParser implements EmailAttachmentParser { + + public interface Fields { + ParseField INLINE = new ParseField("inline"); + ParseField REQUEST = new ParseField("request"); + ParseField CONTENT_TYPE = new ParseField("content_type"); + } + + public static final String TYPE = "http"; + private final HttpClient httpClient; + private HttpRequestTemplate.Parser requestTemplateParser; + private final TextTemplateEngine templateEngine; + + public HttpEmailAttachementParser(HttpClient httpClient, HttpRequestTemplate.Parser requestTemplateParser, + TextTemplateEngine templateEngine) { + this.httpClient = httpClient; + this.requestTemplateParser = requestTemplateParser; + this.templateEngine = templateEngine; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public HttpRequestAttachment parse(String id, XContentParser parser) throws IOException { + boolean inline = false; + String contentType = null; + HttpRequestTemplate requestTemplate = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Fields.CONTENT_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + contentType = parser.text(); + } else if (Fields.INLINE.match(currentFieldName, parser.getDeprecationHandler())) { + inline = parser.booleanValue(); + } else if (Fields.REQUEST.match(currentFieldName, parser.getDeprecationHandler())) { + requestTemplate = requestTemplateParser.parse(parser); + } else { + String msg = "Unknown field name [" + currentFieldName + "] in http request attachment configuration"; + throw new ElasticsearchParseException(msg); + } + } + + if (requestTemplate != null) { + return new HttpRequestAttachment(id, requestTemplate, inline, contentType); + } + + throw new ElasticsearchParseException("Could not parse http request attachment"); + } + + @Override + public Attachment toAttachment(WatchExecutionContext context, Payload payload, + HttpRequestAttachment attachment) throws IOException { + Map model = Variables.createCtxModel(context, payload); + HttpRequest httpRequest = attachment.getRequestTemplate().render(templateEngine, model); + + HttpResponse response = httpClient.execute(httpRequest); + // check for status 200, only then append attachment + if (response.status() >= 200 && response.status() < 300) { + if (response.hasContent()) { + String contentType = attachment.getContentType(); + String attachmentContentType = Strings.hasLength(contentType) ? contentType : response.contentType(); + return new Attachment.Bytes(attachment.id(), BytesReference.toBytes(response.body()), attachmentContentType, + attachment.inline()); + } else { + throw new ElasticsearchException("Watch[{}] attachment[{}] HTTP empty response body host[{}], port[{}], " + + "method[{}], path[{}], status[{}]", + context.watch().id(), attachment.id(), httpRequest.host(), httpRequest.port(), httpRequest.method(), + httpRequest.path(), response.status()); + } + } else { + throw new ElasticsearchException("Watch[{}] attachment[{}] HTTP error status host[{}], port[{}], " + + "method[{}], path[{}], status[{}]", + context.watch().id(), attachment.id(), httpRequest.host(), httpRequest.port(), httpRequest.method(), + httpRequest.path(), response.status()); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpRequestAttachment.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpRequestAttachment.java new file mode 100644 index 0000000000000..44e65f49ad476 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpRequestAttachment.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; + +import java.io.IOException; +import java.util.Objects; + +public class HttpRequestAttachment implements EmailAttachmentParser.EmailAttachment { + + private final HttpRequestTemplate requestTemplate; + private boolean inline; + private final String contentType; + private final String id; + + public HttpRequestAttachment(String id, HttpRequestTemplate requestTemplate, boolean inline, @Nullable String contentType) { + this.id = id; + this.requestTemplate = requestTemplate; + this.inline = inline; + this.contentType = contentType; + } + + public HttpRequestTemplate getRequestTemplate() { + return requestTemplate; + } + + public String getContentType() { + return contentType; + } + + @Override + public String id() { + return id; + } + + @Override + public boolean inline() { + return inline; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(id) + .startObject(HttpEmailAttachementParser.TYPE) + .field(HttpEmailAttachementParser.Fields.REQUEST.getPreferredName(), requestTemplate, params); + if (Strings.hasLength(contentType)) { + builder.field(HttpEmailAttachementParser.Fields.CONTENT_TYPE.getPreferredName(), contentType); + } + if (inline) { + builder.field(HttpEmailAttachementParser.Fields.INLINE.getPreferredName(), inline); + } + return builder.endObject().endObject(); + } + + public static Builder builder(String id) { + return new Builder(id); + } + + @Override + public String type() { + return HttpEmailAttachementParser.TYPE; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + HttpRequestAttachment otherDataAttachment = (HttpRequestAttachment) o; + return Objects.equals(id, otherDataAttachment.id) && Objects.equals(requestTemplate, otherDataAttachment.requestTemplate) + && Objects.equals(contentType, otherDataAttachment.contentType) && Objects.equals(inline, otherDataAttachment.inline); + } + + @Override + public int hashCode() { + return Objects.hash(id, requestTemplate, contentType, inline); + } + + public static class Builder { + + private String id; + private HttpRequestTemplate httpRequestTemplate; + private String contentType; + private boolean inline = false; + + private Builder(String id) { + this.id = id; + } + + public Builder httpRequestTemplate(HttpRequestTemplate httpRequestTemplate) { + this.httpRequestTemplate = httpRequestTemplate; + return this; + } + + public Builder contentType(String contentType) { + this.contentType = contentType; + return this; + } + + public Builder inline(boolean inline) { + this.inline = inline; + return this; + } + + public HttpRequestAttachment build() { + return new HttpRequestAttachment(id, httpRequestTemplate, inline, contentType); + } + + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachment.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachment.java new file mode 100644 index 0000000000000..b4c0208ae2a1c --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachment.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuth; + +import java.io.IOException; +import java.util.Objects; + +public class ReportingAttachment implements EmailAttachmentParser.EmailAttachment { + + static final ParseField INLINE = new ParseField("inline"); + static final ParseField AUTH = new ParseField("auth"); + static final ParseField PROXY = new ParseField("proxy"); + static final ParseField INTERVAL = new ParseField("interval"); + static final ParseField RETRIES = new ParseField("retries"); + static final ParseField URL = new ParseField("url"); + + private final boolean inline; + private final String id; + private final HttpAuth auth; + private final String url; + private final TimeValue interval; + private final Integer retries; + private final HttpProxy proxy; + + ReportingAttachment(String id, String url, boolean inline, @Nullable TimeValue interval, @Nullable Integer retries, + @Nullable HttpAuth auth, @Nullable HttpProxy proxy) { + this.id = id; + this.url = url; + this.retries = retries; + this.inline = inline; + this.auth = auth; + this.interval = interval; + this.proxy = proxy; + if (retries != null && retries < 0) { + throw new IllegalArgumentException("Retries for attachment must be >= 0"); + } + } + + @Override + public String type() { + return ReportingAttachmentParser.TYPE; + } + + @Override + public String id() { + return id; + } + + @Override + public boolean inline() { + return inline; + } + + public HttpAuth auth() { + return auth; + } + + public String url() { + return url; + } + + public TimeValue interval() { + return interval; + } + + public Integer retries() { + return retries; + } + + public HttpProxy proxy() { + return proxy; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(id).startObject(ReportingAttachmentParser.TYPE) + .field(URL.getPreferredName(), url); + + if (retries != null) { + builder.field(RETRIES.getPreferredName(), retries); + } + + if (interval != null) { + builder.field(INTERVAL.getPreferredName(), interval); + } + + if (inline) { + builder.field(INLINE.getPreferredName(), inline); + } + + if (auth != null) { + builder.startObject(AUTH.getPreferredName()); + builder.field(auth.type(), auth, params); + builder.endObject(); + } + + if (proxy != null) { + proxy.toXContent(builder, params); + } + + return builder.endObject().endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ReportingAttachment otherAttachment = (ReportingAttachment) o; + return Objects.equals(id, otherAttachment.id) && Objects.equals(url, otherAttachment.url) && + Objects.equals(interval, otherAttachment.interval) && Objects.equals(inline, otherAttachment.inline) && + Objects.equals(retries, otherAttachment.retries) && Objects.equals(auth, otherAttachment.auth) && + Objects.equals(proxy, otherAttachment.proxy); + } + + @Override + public int hashCode() { + return Objects.hash(id, url, interval, inline, retries, auth, proxy); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java new file mode 100644 index 0000000000000..cb50876678374 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java @@ -0,0 +1,318 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.email.Attachment; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.util.Map; + +public class ReportingAttachmentParser implements EmailAttachmentParser { + + public static final String TYPE = "reporting"; + + // total polling of 10 minutes happens this way by default + public static final Setting INTERVAL_SETTING = + Setting.timeSetting("xpack.notification.reporting.interval", TimeValue.timeValueSeconds(15), Setting.Property.NodeScope); + public static final Setting RETRIES_SETTING = + Setting.intSetting("xpack.notification.reporting.retries", 40, 0, Setting.Property.NodeScope); + + private static final ObjectParser PARSER = new ObjectParser<>("reporting_attachment"); + private static final ObjectParser PAYLOAD_PARSER = + new ObjectParser<>("reporting_attachment_kibana_payload", true, null); + + static { + PARSER.declareInt(Builder::retries, ReportingAttachment.RETRIES); + PARSER.declareBoolean(Builder::inline, ReportingAttachment.INLINE); + PARSER.declareString(Builder::interval, ReportingAttachment.INTERVAL); + PARSER.declareString(Builder::url, ReportingAttachment.URL); + PARSER.declareObjectOrDefault(Builder::auth, (p, s) -> s.parseAuth(p), () -> null, ReportingAttachment.AUTH); + PARSER.declareObjectOrDefault(Builder::proxy, (p, s) -> s.parseProxy(p), () -> null, ReportingAttachment.PROXY); + PAYLOAD_PARSER.declareString(KibanaReportingPayload::setPath, new ParseField("path")); + } + + private final Logger logger; + private final TimeValue interval; + private final int retries; + private HttpClient httpClient; + private final TextTemplateEngine templateEngine; + private HttpAuthRegistry authRegistry; + + public ReportingAttachmentParser(Settings settings, HttpClient httpClient, + TextTemplateEngine templateEngine, HttpAuthRegistry authRegistry) { + this.interval = INTERVAL_SETTING.get(settings); + this.retries = RETRIES_SETTING.get(settings); + this.httpClient = httpClient; + this.templateEngine = templateEngine; + this.authRegistry = authRegistry; + this.logger = Loggers.getLogger(getClass()); + } + + @Override + public String type() { + return TYPE; + } + + @Override + public ReportingAttachment parse(String id, XContentParser parser) throws IOException { + Builder builder = new Builder(id); + PARSER.parse(parser, builder, new AuthParseContext(authRegistry)); + return builder.build(); + } + + @Override + public Attachment toAttachment(WatchExecutionContext context, Payload payload, ReportingAttachment attachment) throws IOException { + Map model = Variables.createCtxModel(context, payload); + + String initialUrl = templateEngine.render(new TextTemplate(attachment.url()), model); + + HttpRequestTemplate requestTemplate = HttpRequestTemplate.builder(initialUrl) + .connectionTimeout(TimeValue.timeValueSeconds(15)) + .readTimeout(TimeValue.timeValueSeconds(15)) + .method(HttpMethod.POST) + .auth(attachment.auth()) + .proxy(attachment.proxy()) + .putHeader("kbn-xsrf", new TextTemplate("reporting")) + .build(); + HttpRequest request = requestTemplate.render(templateEngine, model); + + HttpResponse reportGenerationResponse = requestReportGeneration(context.watch().id(), attachment.id(), request); + String path = extractIdFromJson(context.watch().id(), attachment.id(), reportGenerationResponse.body()); + + HttpRequestTemplate pollingRequestTemplate = HttpRequestTemplate.builder(request.host(), request.port()) + .connectionTimeout(TimeValue.timeValueSeconds(10)) + .readTimeout(TimeValue.timeValueSeconds(10)) + .auth(attachment.auth()) + .path(path) + .scheme(request.scheme()) + .proxy(attachment.proxy()) + .putHeader("kbn-xsrf", new TextTemplate("reporting")) + .build(); + HttpRequest pollingRequest = pollingRequestTemplate.render(templateEngine, model); + + int maxRetries = attachment.retries() != null ? attachment.retries() : this.retries; + long sleepMillis = getSleepMillis(context, attachment); + int retryCount = 0; + while (retryCount < maxRetries) { + retryCount++; + // IMPORTANT NOTE: This is only a temporary solution until we made the execution of watcher more async + // This still blocks other executions on the thread and we have to get away from that + sleep(sleepMillis, context, attachment); + HttpResponse response = httpClient.execute(pollingRequest); + + if (response.status() == 503) { + // requires us to interval another run, no action to take, except logging + logger.trace("Watch[{}] reporting[{}] pdf is not ready, polling in [{}] again", context.watch().id(), attachment.id(), + TimeValue.timeValueMillis(sleepMillis)); + } else if (response.status() >= 400) { + String body = response.body() != null ? response.body().utf8ToString() : null; + throw new ElasticsearchException("Watch[{}] reporting[{}] Error when polling pdf from host[{}], port[{}], " + + "method[{}], path[{}], status[{}], body[{}]", context.watch().id(), attachment.id(), request.host(), + request.port(), request.method(), request.path(), response.status(), body); + } else if (response.status() == 200) { + return new Attachment.Bytes(attachment.id(), BytesReference.toBytes(response.body()), + response.contentType(), attachment.inline()); + } else { + String body = response.body() != null ? response.body().utf8ToString() : null; + String message = LoggerMessageFormat.format("", "Watch[{}] reporting[{}] Unexpected status code host[{}], port[{}], " + + "method[{}], path[{}], status[{}], body[{}]", context.watch().id(), attachment.id(), request.host(), + request.port(), request.method(), request.path(), response.status(), body); + throw new IllegalStateException(message); + } + } + + throw new ElasticsearchException("Watch[{}] reporting[{}]: Aborting due to maximum number of retries hit [{}]", + context.watch().id(), attachment.id(), maxRetries); + } + + private void sleep(long sleepMillis, WatchExecutionContext context, ReportingAttachment attachment) { + try { + Thread.sleep(sleepMillis); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new ElasticsearchException("Watch[{}] reporting[{}] thread was interrupted, while waiting for polling. Aborting.", + context.watch().id(), attachment.id()); + } + } + + /** + * Use the default time to sleep between polls if it was not set + */ + private long getSleepMillis(WatchExecutionContext context, ReportingAttachment attachment) { + long sleepMillis; + if (attachment.interval() == null) { + sleepMillis = interval.millis(); + logger.trace("Watch[{}] reporting[{}] invalid interval configuration [{}], using configured default [{}]", context.watch().id(), + attachment.id(), attachment.interval(), this.interval); + } else { + sleepMillis = attachment.interval().millis(); + } + return sleepMillis; + } + + /** + * Trigger the initial report generation and catch possible exceptions + */ + private HttpResponse requestReportGeneration(String watchId, String attachmentId, HttpRequest request) throws IOException { + HttpResponse response = httpClient.execute(request); + if (response.status() != 200) { + throw new ElasticsearchException("Watch[{}] reporting[{}] Error response when trying to trigger reporting generation " + + "host[{}], port[{}] method[{}], path[{}], status[{}]", watchId, attachmentId, request.host(), + request.port(), request.method(), request.path(), response.status()); + } + + return response; + } + + /** + * Extract the id from JSON payload, so we know which ID to poll for + */ + private String extractIdFromJson(String watchId, String attachmentId, BytesReference body) throws IOException { + // EMPTY is safe here becaus we never call namedObject + try (InputStream stream = body.streamInput(); + XContentParser parser = JsonXContent.jsonXContent + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + KibanaReportingPayload payload = new KibanaReportingPayload(); + PAYLOAD_PARSER.parse(parser, payload, null); + String path = payload.getPath(); + if (Strings.isEmpty(path)) { + throw new ElasticsearchException("Watch[{}] reporting[{}] field path found in JSON payload, payload was {}", + watchId, attachmentId, body.utf8ToString()); + } + return path; + } + } + + /** + * A helper class to parse HTTP auth and proxy structures, which is read by an old school pull parser, that is handed over in the ctor. + * See the static parser definition at the top + */ + private static class AuthParseContext { + + private final HttpAuthRegistry authRegistry; + + AuthParseContext(HttpAuthRegistry authRegistry) { + this.authRegistry = authRegistry; + } + + HttpAuth parseAuth(XContentParser parser) { + try { + return authRegistry.parse(parser); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + HttpProxy parseProxy(XContentParser parser) { + try { + return HttpProxy.parse(parser); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + + /** + * Helper class to extract the URL path of the dashboard from the response after a report was triggered + * + * Example JSON: { "path" : "/path/to/dashboard.pdf", ... otherstuff ... } + */ + static class KibanaReportingPayload { + + private String path; + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + } + + /** + * Builder helper class used by the ObjectParser to create an attachment from xcontent input + */ + static class Builder { + + private final String id; + private boolean inline; + private String url; + private TimeValue interval; + private Integer retries; + private HttpAuth auth; + private HttpProxy proxy; + + Builder(String id) { + this.id = id; + } + + Builder url(String url) { + this.url = url; + return this; + } + + // package protected, so it can be used by the object parser in ReportingAttachmentParser + Builder interval(String waitTime) { + this.interval = TimeValue.parseTimeValue(waitTime, "attachment.reporting.interval"); + return this; + } + + Builder retries(Integer retries) { + this.retries = retries; + return this; + } + + Builder inline(boolean inline) { + this.inline = inline; + return this; + } + + Builder auth(HttpAuth auth) { + this.auth = auth; + return this; + } + + Builder proxy(HttpProxy proxy) { + this.proxy = proxy; + return this; + } + + ReportingAttachment build() { + return new ReportingAttachment(id, url, inline, interval, retries, auth, proxy); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/support/BodyPartSource.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/support/BodyPartSource.java new file mode 100644 index 0000000000000..bfd8ac0b104c6 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/support/BodyPartSource.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.support; + +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.xcontent.ToXContentObject; + +import javax.activation.FileTypeMap; +import javax.mail.MessagingException; +import javax.mail.internet.MimeBodyPart; +import java.security.AccessController; +import java.security.PrivilegedAction; + +public abstract class BodyPartSource implements ToXContentObject { + + protected static FileTypeMap fileTypeMap; + static { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new SpecialPermission()); + } + fileTypeMap = AccessController.doPrivileged( + (PrivilegedAction)() -> FileTypeMap.getDefaultFileTypeMap()); + } + + protected final String id; + protected final String name; + protected final String contentType; + + public BodyPartSource(String id, String contentType) { + this(id, id, contentType); + } + + public BodyPartSource(String id, String name, String contentType) { + this.id = id; + this.name = name; + this.contentType = contentType; + } + + public String id() { + return id; + } + + public String name() { + return name; + } + + public String contentType() { + return contentType; + } + + public abstract MimeBodyPart bodyPart() throws MessagingException; + + // exists only to allow ensuring class is initialized + public static void init() {} + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatAccount.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatAccount.java new file mode 100644 index 0000000000000..53f8c1533a193 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatAccount.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; + +public abstract class HipChatAccount { + + public static final String AUTH_TOKEN_SETTING = "auth_token"; + public static final String ROOM_SETTING = HipChatMessage.Field.ROOM.getPreferredName(); + public static final String DEFAULT_ROOM_SETTING = "message_defaults." + HipChatMessage.Field.ROOM.getPreferredName(); + public static final String DEFAULT_USER_SETTING = "message_defaults." + HipChatMessage.Field.USER.getPreferredName(); + public static final String DEFAULT_FROM_SETTING = "message_defaults." + HipChatMessage.Field.FROM.getPreferredName(); + public static final String DEFAULT_FORMAT_SETTING = "message_defaults." + HipChatMessage.Field.FORMAT.getPreferredName(); + public static final String DEFAULT_COLOR_SETTING = "message_defaults." + HipChatMessage.Field.COLOR.getPreferredName(); + public static final String DEFAULT_NOTIFY_SETTING = "message_defaults." + HipChatMessage.Field.NOTIFY.getPreferredName(); + + private static final Setting SECURE_AUTH_TOKEN_SETTING = SecureSetting.secureString("secure_" + AUTH_TOKEN_SETTING, null); + + protected final Logger logger; + protected final String name; + protected final Profile profile; + protected final HipChatServer server; + protected final HttpClient httpClient; + protected final String authToken; + + protected HipChatAccount(String name, Profile profile, Settings settings, HipChatServer defaultServer, HttpClient httpClient, + Logger logger) { + this.name = name; + this.profile = profile; + this.server = new HipChatServer(settings, defaultServer); + this.httpClient = httpClient; + this.authToken = getAuthToken(name, settings); + this.logger = logger; + } + + private static String getAuthToken(String name, Settings settings) { + String authToken = settings.get(AUTH_TOKEN_SETTING); + if (authToken == null || authToken.length() == 0) { + SecureString secureString = SECURE_AUTH_TOKEN_SETTING.get(settings); + if (secureString == null || secureString.length() < 1) { + throw new SettingsException("hipchat account [" + name + "] missing required [" + AUTH_TOKEN_SETTING + "] setting"); + } + authToken = secureString.toString(); + } + + return authToken; + } + + public abstract String type(); + + public abstract void validateParsedTemplate(String watchId, String actionId, HipChatMessage.Template message) throws SettingsException; + + public abstract HipChatMessage render(String watchId, String actionId, TextTemplateEngine engine, HipChatMessage.Template template, + Map model); + + public abstract SentMessages send(HipChatMessage message, @Nullable HttpProxy proxy); + + public enum Profile { + + V1() { + @Override + HipChatAccount createAccount(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, + Logger logger) { + return new V1Account(name, settings, defaultServer, httpClient, logger); + } + }, + INTEGRATION() { + @Override + HipChatAccount createAccount(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, + Logger logger) { + return new IntegrationAccount(name, settings, defaultServer, httpClient, logger); + } + }, + USER() { + @Override + HipChatAccount createAccount(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, + Logger logger) { + return new UserAccount(name, settings, defaultServer, httpClient, logger); + } + }; + + abstract HipChatAccount createAccount(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, + Logger logger); + + public String value() { + return name().toLowerCase(Locale.ROOT); + } + + public static Profile parse(XContentParser parser) throws IOException { + return Profile.valueOf(parser.text().toUpperCase(Locale.ROOT)); + } + + public static Profile resolve(String value, Profile defaultValue) { + if (value == null) { + return defaultValue; + } + return Profile.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static Profile resolve(Settings settings, String setting, Profile defaultValue) { + return resolve(settings.get(setting), defaultValue); + } + + public static boolean validate(String value) { + try { + Profile.valueOf(value.toUpperCase(Locale.ROOT)); + return true; + } catch (IllegalArgumentException ilae) { + return false; + } + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatMessage.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatMessage.java new file mode 100644 index 0000000000000..ae08bb85e9dfd --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatMessage.java @@ -0,0 +1,479 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +public class HipChatMessage implements ToXContentObject { + + final String body; + @Nullable final String[] rooms; + @Nullable final String[] users; + @Nullable final String from; + @Nullable final Format format; + @Nullable final Color color; + @Nullable final Boolean notify; + + public HipChatMessage(String body, String[] rooms, String[] users, String from, Format format, Color color, Boolean notify) { + this.body = body; + this.rooms = rooms; + this.users = users; + this.from = from; + this.format = format; + this.color = color; + this.notify = notify; + } + + public String getBody() { + return body; + } + + public String[] getRooms() { + return rooms; + } + + @Nullable + public String[] getUsers() { + return users; + } + + @Nullable + public String getFrom() { + return from; + } + + @Nullable + public Format getFormat() { + return format; + } + + @Nullable + public Color getColor() { + return color; + } + + @Nullable + public Boolean getNotify() { + return notify; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + HipChatMessage that = (HipChatMessage) o; + return Objects.equals(body, that.body) && + Objects.deepEquals(rooms, that.rooms) && + Objects.deepEquals(users, that.users) && + Objects.equals(from, that.from) && + Objects.equals(format, that.format) && + Objects.equals(color, that.color) && + Objects.equals(notify, that.notify); + } + + @Override + public int hashCode() { + int result = body.hashCode(); + result = 31 * result + (rooms != null ? Arrays.hashCode(rooms) : 0); + result = 31 * result + (users != null ? Arrays.hashCode(users) : 0); + result = 31 * result + (from != null ? from.hashCode() : 0); + result = 31 * result + (format != null ? format.hashCode() : 0); + result = 31 * result + (color != null ? color.hashCode() : 0); + result = 31 * result + (notify != null ? notify.hashCode() : 0); + return result; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return toXContent(builder, params, true); + } + + public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean includeTargets) throws IOException { + builder.startObject(); + if (from != null) { + builder.field(Field.FROM.getPreferredName(), from); + } + if (includeTargets) { + if (rooms != null && rooms.length > 0) { + builder.array(Field.ROOM.getPreferredName(), rooms); + } + if (users != null && users.length > 0) { + builder.array(Field.USER.getPreferredName(), users); + } + } + builder.field(Field.BODY.getPreferredName(), body); + if (format != null) { + builder.field(Field.FORMAT.getPreferredName(), format.value()); + } + if (color != null) { + builder.field(Field.COLOR.getPreferredName(), color.value()); + } + if (notify != null) { + builder.field(Field.NOTIFY.getPreferredName(), notify); + } + return builder.endObject(); + } + + public static class Template implements ToXContentObject { + + final TextTemplate body; + @Nullable final TextTemplate[] rooms; + @Nullable final TextTemplate[] users; + @Nullable final String from; + @Nullable final Format format; + @Nullable final TextTemplate color; + @Nullable final Boolean notify; + + public Template(TextTemplate body, + TextTemplate[] rooms, + TextTemplate[] users, + String from, + Format format, + TextTemplate color, + Boolean notify) { + this.rooms = rooms; + this.users = users; + this.body = body; + this.from = from; + this.format = format; + this.color = color; + this.notify = notify; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Template template = (Template) o; + + return Objects.equals(body, template.body) && + Objects.deepEquals(rooms, template.rooms) && + Objects.deepEquals(users, template.users) && + Objects.equals(from, template.from) && + Objects.equals(format, template.format) && + Objects.equals(color, template.color) && + Objects.equals(notify, template.notify); + } + + @Override + public int hashCode() { + return Objects.hash(body, rooms, users, from, format, color, notify); + } + + public HipChatMessage render(TextTemplateEngine engine, Map model) { + String body = engine.render(this.body, model); + String[] rooms = null; + if (this.rooms != null) { + rooms = new String[this.rooms.length]; + for (int i = 0; i < this.rooms.length; i++) { + rooms[i] = engine.render(this.rooms[i], model); + } + } + String[] users = null; + if (this.users != null) { + users = new String[this.users.length]; + for (int i = 0; i < this.users.length; i++) { + users[i] = engine.render(this.users[i], model); + } + } + Color color = this.color == null ? null : Color.resolve(engine.render(this.color, model), null); + return new HipChatMessage(body, rooms, users, from, format, color, notify); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (from != null) { + builder.field(Field.FROM.getPreferredName(), from); + } + if (rooms != null && rooms.length > 0) { + builder.startArray(Field.ROOM.getPreferredName()); + for (TextTemplate room : rooms) { + room.toXContent(builder, params); + } + builder.endArray(); + } + if (users != null && users.length > 0) { + builder.startArray(Field.USER.getPreferredName()); + for (TextTemplate user : users) { + user.toXContent(builder, params); + } + builder.endArray(); + } + builder.field(Field.BODY.getPreferredName(), body, params); + if (format != null) { + builder.field(Field.FORMAT.getPreferredName(), format.value()); + } + if (color != null) { + builder.field(Field.COLOR.getPreferredName(), color, params); + } + if (notify != null) { + builder.field(Field.NOTIFY.getPreferredName(), notify); + } + return builder.endObject(); + } + + public static Template parse(XContentParser parser) throws IOException { + TextTemplate body = null; + TextTemplate[] rooms = null; + TextTemplate[] users = null; + String from = null; + TextTemplate color = null; + Boolean notify = null; + HipChatMessage.Format messageFormat = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.FROM.match(currentFieldName, parser.getDeprecationHandler())) { + from = parser.text(); + } else if (Field.ROOM.match(currentFieldName, parser.getDeprecationHandler())) { + List templates = new ArrayList<>(); + if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + templates.add(TextTemplate.parse(parser)); + } catch (ElasticsearchParseException epe) { + throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", epe, + Field.ROOM.getPreferredName()); + } + } + } else { + try { + templates.add(TextTemplate.parse(parser)); + } catch (ElasticsearchParseException epe) { + throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", epe, + Field.ROOM.getPreferredName()); + } + } + rooms = templates.toArray(new TextTemplate[templates.size()]); + } else if (Field.USER.match(currentFieldName, parser.getDeprecationHandler())) { + List templates = new ArrayList<>(); + if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + templates.add(TextTemplate.parse(parser)); + } catch (ElasticsearchParseException epe) { + throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", epe, + Field.USER.getPreferredName()); + } + } + } else { + try { + templates.add(TextTemplate.parse(parser)); + } catch (ElasticsearchParseException epe) { + throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", epe, + Field.USER.getPreferredName()); + } + } + users = templates.toArray(new TextTemplate[templates.size()]); + } else if (Field.COLOR.match(currentFieldName, parser.getDeprecationHandler())) { + try { + color = TextTemplate.parse(parser); + } catch (ElasticsearchParseException | IllegalArgumentException e) { + throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", e, + Field.COLOR.getPreferredName()); + } + } else if (Field.NOTIFY.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_BOOLEAN) { + notify = parser.booleanValue(); + } else { + throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field, expected a " + + "boolean value but found [{}]", Field.NOTIFY.getPreferredName(), token); + } + } else if (Field.BODY.match(currentFieldName, parser.getDeprecationHandler())) { + try { + body = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", pe, + Field.BODY.getPreferredName()); + } + } else if (Field.FORMAT.match(currentFieldName, parser.getDeprecationHandler())) { + try { + messageFormat = HipChatMessage.Format.parse(parser); + } catch (IllegalArgumentException ilae) { + throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", ilae, + Field.FORMAT.getPreferredName()); + } + } else { + throw new ElasticsearchParseException("failed to parse hipchat message. unexpected field [{}]", currentFieldName); + } + } + + if (body == null) { + throw new ElasticsearchParseException("failed to parse hipchat message. missing required [{}] field", + Field.BODY.getPreferredName()); + } + + return new HipChatMessage.Template(body, rooms, users, from, messageFormat, color, notify); + } + + public static class Builder { + + final TextTemplate body; + final List rooms = new ArrayList<>(); + final List users = new ArrayList<>(); + @Nullable String from; + @Nullable Format format; + @Nullable TextTemplate color; + @Nullable Boolean notify; + + public Builder(TextTemplate body) { + this.body = body; + } + + public Builder addRooms(TextTemplate... rooms) { + this.rooms.addAll(Arrays.asList(rooms)); + return this; + } + + public Builder addUsers(TextTemplate... users) { + this.users.addAll(Arrays.asList(users)); + return this; + } + + public Builder setFrom(String from) { + this.from = from; + return this; + } + + public Builder setFormat(Format format) { + this.format = format; + return this; + } + + public Builder setColor(TextTemplate color) { + this.color = color; + return this; + } + + public Builder setNotify(boolean notify) { + this.notify = notify; + return this; + } + + public Template build() { + return new Template( + body, + rooms.isEmpty() ? null : rooms.toArray(new TextTemplate[rooms.size()]), + users.isEmpty() ? null : users.toArray(new TextTemplate[users.size()]), + from, + format, + color, + notify); + } + } + } + + + public enum Color { + YELLOW, GREEN, RED, PURPLE, GRAY, RANDOM; + + private final TextTemplate template = new TextTemplate(name()); + + public TextTemplate asTemplate() { + return template; + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } + + public static Color parse(XContentParser parser) throws IOException { + return Color.valueOf(parser.text().toUpperCase(Locale.ROOT)); + } + + public static Color resolve(String value, Color defaultValue) { + if (value == null) { + return defaultValue; + } + return Color.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static Color resolve(Settings settings, String setting, Color defaultValue) { + return resolve(settings.get(setting), defaultValue); + } + + public static boolean validate(String value) { + try { + Color.valueOf(value.toUpperCase(Locale.ROOT)); + return true; + } catch (IllegalArgumentException ilae) { + return false; + } + } + } + + public enum Format { + + TEXT, + HTML; + + private final TextTemplate template = new TextTemplate(name()); + + public TextTemplate asTemplate() { + return template; + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } + + public static Format parse(XContentParser parser) throws IOException { + return Format.valueOf(parser.text().toUpperCase(Locale.ROOT)); + } + + public static Format resolve(String value, Format defaultValue) { + if (value == null) { + return defaultValue; + } + return Format.valueOf(value.toUpperCase(Locale.ROOT)); + } + + public static Format resolve(Settings settings, String setting, Format defaultValue) { + return resolve(settings.get(setting), defaultValue); + } + + public static boolean validate(String value) { + try { + Format.valueOf(value.toUpperCase(Locale.ROOT)); + return true; + } catch (IllegalArgumentException ilae) { + return false; + } + } + } + + public interface Field { + ParseField ROOM = new ParseField("room"); + ParseField USER = new ParseField("user"); + ParseField BODY = new ParseField("body"); + ParseField FROM = new ParseField("from"); + ParseField COLOR = new ParseField("color"); + ParseField NOTIFY = new ParseField("notify"); + ParseField FORMAT = new ParseField("format"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatServer.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatServer.java new file mode 100644 index 0000000000000..4a40a1d47fbc3 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatServer.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; + +public class HipChatServer { + + public static final String HOST_SETTING = "host"; + public static final String PORT_SETTING = "port"; + + public static final HipChatServer DEFAULT = new HipChatServer("api.hipchat.com", 443, null); + + private final String host; + private final int port; + private final HipChatServer fallback; + + public HipChatServer(Settings settings) { + this(settings, DEFAULT); + } + + public HipChatServer(Settings settings, HipChatServer fallback) { + this(settings.get(HOST_SETTING, null), settings.getAsInt(PORT_SETTING, -1), fallback); + } + + public HipChatServer(String host, int port, HipChatServer fallback) { + this.host = host; + this.port = port; + this.fallback = fallback; + } + + public String host() { + return host != null ? host : fallback.host(); + } + + public int port() { + return port > 0 ? port : fallback.port(); + } + + public HipChatServer fallback() { + return fallback != null ? fallback : DEFAULT; + } + + public HipChatServer rebuild(Settings settings, HipChatServer fallback) { + return new HipChatServer(settings.get(HOST_SETTING, host), settings.getAsInt(PORT_SETTING, port), fallback); + } + + public synchronized HttpRequest.Builder httpRequest() { + return HttpRequest.builder(host(), port()); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java new file mode 100644 index 0000000000000..477b4545294bd --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.notification.NotificationService; + +import java.util.Arrays; +import java.util.List; + +/** + * A component to store hipchat credentials. + */ +public class HipChatService extends NotificationService { + + private static final Setting SETTING_DEFAULT_ACCOUNT = + Setting.simpleString("xpack.notification.hipchat.default_account", Setting.Property.Dynamic, Setting.Property.NodeScope); + + static final Setting SETTING_DEFAULT_HOST = + Setting.simpleString("xpack.notification.hipchat.host", Setting.Property.Dynamic, Setting.Property.NodeScope); + + static final Setting SETTING_DEFAULT_PORT = + Setting.intSetting("xpack.notification.hipchat.port", 443, Setting.Property.Dynamic, Setting.Property.NodeScope); + + private static final Setting.AffixSetting SETTING_AUTH_TOKEN = + Setting.affixKeySetting("xpack.notification.hipchat.account.", "auth_token", + (key) -> Setting.simpleString(key, Setting.Property.Dynamic, Setting.Property.NodeScope, Setting.Property.Filtered, + Setting.Property.Deprecated)); + + private static final Setting.AffixSetting SETTING_AUTH_TOKEN_SECURE = + Setting.affixKeySetting("xpack.notification.hipchat.account.", "secure_auth_token", + (key) -> SecureSetting.simpleString(key, Setting.Property.Dynamic, Setting.Property.NodeScope, + Setting.Property.Filtered)); + + private static final Setting.AffixSetting SETTING_PROFILE = + Setting.affixKeySetting("xpack.notification.hipchat.account.", "profile", + (key) -> Setting.simpleString(key, Setting.Property.Dynamic, Setting.Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_ROOM = + Setting.affixKeySetting("xpack.notification.hipchat.account.", "room", + (key) -> Setting.simpleString(key, Setting.Property.Dynamic, Setting.Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_HOST = + Setting.affixKeySetting("xpack.notification.hipchat.account.", "host", + (key) -> Setting.simpleString(key, Setting.Property.Dynamic, Setting.Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_PORT = + Setting.affixKeySetting("xpack.notification.hipchat.account.", "port", + (key) -> Setting.intSetting(key, 443, Setting.Property.Dynamic, Setting.Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_MESSAGE_DEFAULTS = + Setting.affixKeySetting("xpack.notification.hipchat.account.", "message", + (key) -> Setting.groupSetting(key + ".", Setting.Property.Dynamic, Setting.Property.NodeScope)); + + + private final HttpClient httpClient; + private HipChatServer defaultServer; + + public HipChatService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { + super(settings, "hipchat"); + this.httpClient = httpClient; + clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, getSettings()); + // ensure logging of setting changes + clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); + clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_HOST, (s) -> {}); + clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_PORT, (s) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_AUTH_TOKEN, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_AUTH_TOKEN_SECURE, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_PROFILE, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_ROOM, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_HOST, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_PORT, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_MESSAGE_DEFAULTS, (s, o) -> {}, (s, o) -> {}); + + setAccountSetting(settings); + } + + @Override + protected synchronized void setAccountSetting(Settings settings) { + defaultServer = new HipChatServer(settings.getByPrefix("xpack.notification.hipchat.")); + super.setAccountSetting(settings); + } + + @Override + protected HipChatAccount createAccount(String name, Settings accountSettings) { + HipChatAccount.Profile profile = HipChatAccount.Profile.resolve(accountSettings, "profile", null); + if (profile == null) { + throw new SettingsException("missing [profile] setting for hipchat account [" + name + "]"); + } + return profile.createAccount(name, accountSettings, defaultServer, httpClient, logger); + } + + public static List> getSettings() { + return Arrays.asList(SETTING_DEFAULT_ACCOUNT, SETTING_AUTH_TOKEN, SETTING_AUTH_TOKEN_SECURE, SETTING_PROFILE, SETTING_ROOM, + SETTING_MESSAGE_DEFAULTS, SETTING_DEFAULT_HOST, SETTING_DEFAULT_PORT, SETTING_HOST, SETTING_PORT); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccount.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccount.java new file mode 100644 index 0000000000000..8af00ae8f8169 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccount.java @@ -0,0 +1,136 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.xpack.watcher.actions.hipchat.HipChatAction; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.Scheme; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage.Color; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage.Format; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class IntegrationAccount extends HipChatAccount { + + public static final String TYPE = "integration"; + + final String room; + final Defaults defaults; + + public IntegrationAccount(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, Logger logger) { + super(name, Profile.INTEGRATION, settings, defaultServer, httpClient, logger); + List rooms = settings.getAsList(ROOM_SETTING, null); + if (rooms == null || rooms.isEmpty()) { + throw new SettingsException("invalid hipchat account [" + name + "]. missing required [" + ROOM_SETTING + "] setting for [" + + TYPE + "] account profile"); + } + if (rooms.size() > 1) { + throw new SettingsException("invalid hipchat account [" + name + "]. [" + ROOM_SETTING + "] setting for [" + TYPE + "] " + + "account must only be set with a single value"); + } + this.room = rooms.get(0); + defaults = new Defaults(settings); + } + + @Override + public String type() { + return TYPE; + } + + @Override + public void validateParsedTemplate(String watchId, String actionId, HipChatMessage.Template template) throws SettingsException { + if (template.rooms != null) { + throw new ElasticsearchParseException("invalid [" + HipChatAction.TYPE + "] action for [" + watchId + "/" + actionId + "] " + + "action. [" + name + "] hipchat account doesn't support custom rooms"); + } + if (template.users != null) { + throw new ElasticsearchParseException("invalid [" + HipChatAction.TYPE + "] action for [" + watchId + "/" + actionId + "] " + + "action. [" + name + "] hipchat account doesn't support user private messages"); + } + if (template.from != null) { + throw new ElasticsearchParseException("invalid [" + HipChatAction.TYPE + "] action for [" + watchId + "/" + actionId + "] " + + "action. [" + name + "] hipchat account doesn't support custom `from` fields"); + } + } + + @Override + public HipChatMessage render(String watchId, String actionId, TextTemplateEngine engine, HipChatMessage.Template template, + Map model) { + String message = engine.render(template.body, model); + Color color = template.color != null ? Color.resolve(engine.render(template.color, model), defaults.color) : defaults.color; + Boolean notify = template.notify != null ? template.notify : defaults.notify; + Format messageFormat = template.format != null ? template.format : defaults.format; + return new HipChatMessage(message, null, null, null, messageFormat, color, notify); + } + + @Override + public SentMessages send(HipChatMessage message, @Nullable HttpProxy proxy) { + List sentMessages = new ArrayList<>(); + HttpRequest request = buildRoomRequest(room, message, proxy); + try { + HttpResponse response = httpClient.execute(request); + sentMessages.add(SentMessages.SentMessage.responded(room, SentMessages.SentMessage.TargetType.ROOM, message, request, + response)); + } catch (Exception e) { + logger.error("failed to execute hipchat api http request", e); + sentMessages.add(SentMessages.SentMessage.error(room, SentMessages.SentMessage.TargetType.ROOM, message, e)); + } + return new SentMessages(name, sentMessages); + } + + private HttpRequest buildRoomRequest(String room, final HipChatMessage message, HttpProxy proxy) { + String urlEncodedRoom = HttpRequest.encodeUrl(room); + HttpRequest.Builder builder = server.httpRequest() + .method(HttpMethod.POST) + .scheme(Scheme.HTTPS) + .path("/v2/room/" + urlEncodedRoom + "/notification") + .setHeader("Content-Type", "application/json") + .setHeader("Authorization", "Bearer " + authToken) + .body(Strings.toString((xbuilder, params) -> { + xbuilder.field("message", message.body); + if (message.format != null) { + xbuilder.field("message_format", message.format.value()); + } + if (message.notify != null) { + xbuilder.field("notify", message.notify); + } + if (message.color != null) { + xbuilder.field("color", String.valueOf(message.color.value())); + } + return xbuilder; + })); + if (proxy != null) { + builder.proxy(proxy); + } + return builder.build(); + } + + static class Defaults { + + @Nullable final Format format; + @Nullable final Color color; + @Nullable final Boolean notify; + + Defaults(Settings settings) { + this.format = Format.resolve(settings, DEFAULT_FORMAT_SETTING, null); + this.color = Color.resolve(settings, DEFAULT_COLOR_SETTING, null); + this.notify = settings.getAsBoolean(DEFAULT_NOTIFY_SETTING, null); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/SentMessages.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/SentMessages.java new file mode 100644 index 0000000000000..ed05c4fe5ad3c --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/SentMessages.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; + +public class SentMessages implements ToXContentObject, Iterable { + + private static final ParseField ACCOUNT = new ParseField("account"); + private static final ParseField SENT_MESSAGES = new ParseField("sent_messages"); + + private String accountName; + private List messages; + + public SentMessages(String accountName, List messages) { + this.accountName = accountName; + this.messages = messages; + } + + public String getAccountName() { + return accountName; + } + + @Override + public Iterator iterator() { + return messages.iterator(); + } + + public int count() { + return messages.size(); + } + + public List asList() { + return Collections.unmodifiableList(messages); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ACCOUNT.getPreferredName(), accountName); + builder.startArray(SENT_MESSAGES.getPreferredName()); + for (SentMessage message : messages) { + message.toXContent(builder, params); + } + builder.endArray(); + return builder.endObject(); + } + + public static class SentMessage implements ToXContentObject { + + private static final ParseField STATUS = new ParseField("status"); + private static final ParseField REQUEST = new ParseField("request"); + private static final ParseField RESPONSE = new ParseField("response"); + private static final ParseField MESSAGE = new ParseField("message"); + + public enum TargetType { + ROOM, USER; + + final String fieldName = new String(name().toLowerCase(Locale.ROOT)); + } + + final String targetName; + final TargetType targetType; + final HipChatMessage message; + @Nullable final HttpRequest request; + @Nullable final HttpResponse response; + @Nullable final Exception exception; + + public static SentMessage responded(String targetName, TargetType targetType, HipChatMessage message, HttpRequest request, + HttpResponse response) { + return new SentMessage(targetName, targetType, message, request, response, null); + } + + public static SentMessage error(String targetName, TargetType targetType, HipChatMessage message, Exception e) { + return new SentMessage(targetName, targetType, message, null, null, e); + } + + private SentMessage(String targetName, TargetType targetType, HipChatMessage message, HttpRequest request, HttpResponse response, + Exception exception) { + this.targetName = targetName; + this.targetType = targetType; + this.message = message; + this.request = request; + this.response = response; + this.exception = exception; + } + + public HttpRequest getRequest() { + return request; + } + + public HttpResponse getResponse() { + return response; + } + + public Exception getException() { + return exception; + } + + public boolean isSuccess() { + return response != null && response.status() >= 200 && response.status() < 300; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + boolean success = isSuccess(); + builder.field(STATUS.getPreferredName(), success ? "success" : "failure"); + if (success == false) { + if (request != null) { + if (WatcherParams.hideSecrets(params)) { + // this writes out the request to the byte array output stream with the correct excludes for hipchat + try (InputStream is = HttpRequest.filterToXContent(request, builder.contentType().xContent(), + params, "params.auth_token")) { + builder.rawField(REQUEST.getPreferredName(), is, builder.contentType()); + } + } else { + builder.field(REQUEST.getPreferredName()); + request.toXContent(builder, params); + } + } + if (response != null) { + builder.field(RESPONSE.getPreferredName()); + response.toXContent(builder, params); + } + if (exception != null) { + ElasticsearchException.generateFailureXContent(builder, params, exception, true); + } + } + builder.field(targetType.fieldName, targetName); + builder.field(MESSAGE.getPreferredName()); + message.toXContent(builder, params, false); + return builder.endObject(); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/UserAccount.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/UserAccount.java new file mode 100644 index 0000000000000..c0b89cc66ec5f --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/UserAccount.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.Scheme; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage.Color; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage.Format; +import org.elasticsearch.xpack.watcher.actions.hipchat.HipChatAction; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class UserAccount extends HipChatAccount { + + public static final String TYPE = "user"; + + final Defaults defaults; + + public UserAccount(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, Logger logger) { + super(name, Profile.USER, settings, defaultServer, httpClient, logger); + defaults = new Defaults(settings); + } + + @Override + public String type() { + return TYPE; + } + + @Override + public void validateParsedTemplate(String watchId, String actionId, HipChatMessage.Template template) throws SettingsException { + if (template.from != null) { + throw new ElasticsearchParseException("invalid [" + HipChatAction.TYPE + "] action for [" + watchId + "/" + actionId + "]. [" + + name + "] hipchat account doesn't support custom `from` fields"); + } + } + + @Override + public HipChatMessage render(String watchId, String actionId, TextTemplateEngine engine, HipChatMessage.Template template, + Map model) { + String[] rooms = defaults.rooms; + if (template.rooms != null) { + rooms = new String[template.rooms.length]; + for (int i = 0; i < template.rooms.length; i++) { + rooms[i] = engine.render(template.rooms[i], model); + } + } + String[] users = defaults.users; + if (template.users != null) { + users = new String[template.users.length]; + for (int i = 0; i < template.users.length; i++) { + users[i] = engine.render(template.users[i], model); + } + } + String message = engine.render(template.body, model); + Color color = Color.resolve(engine.render(template.color, model), defaults.color); + Boolean notify = template.notify != null ? template.notify : defaults.notify; + Format messageFormat = template.format != null ? template.format : defaults.format; + return new HipChatMessage(message, rooms, users, null, messageFormat, color, notify); + } + + @Override + public SentMessages send(HipChatMessage message, HttpProxy proxy) { + List sentMessages = new ArrayList<>(); + if (message.rooms != null) { + for (String room : message.rooms) { + HttpRequest request = buildRoomRequest(room, message, proxy); + try { + HttpResponse response = httpClient.execute(request); + sentMessages.add(SentMessages.SentMessage.responded(room, SentMessages.SentMessage.TargetType.ROOM, message, request, + response)); + } catch (IOException e) { + logger.error("failed to execute hipchat api http request", e); + sentMessages.add(SentMessages.SentMessage.error(room, SentMessages.SentMessage.TargetType.ROOM, message, e)); + } + } + } + if (message.users != null) { + for (String user : message.users) { + HttpRequest request = buildUserRequest(user, message, proxy); + try { + HttpResponse response = httpClient.execute(request); + sentMessages.add(SentMessages.SentMessage.responded(user, SentMessages.SentMessage.TargetType.USER, message, request, + response)); + } catch (Exception e) { + logger.error("failed to execute hipchat api http request", e); + sentMessages.add(SentMessages.SentMessage.error(user, SentMessages.SentMessage.TargetType.USER, message, e)); + } + } + } + return new SentMessages(name, sentMessages); + } + + public HttpRequest buildRoomRequest(String room, final HipChatMessage message, HttpProxy proxy) { + String urlEncodedRoom = encodeRoom(room); + HttpRequest.Builder builder = server.httpRequest() + .method(HttpMethod.POST) + .scheme(Scheme.HTTPS) + .path("/v2/room/" + urlEncodedRoom + "/notification") + .setHeader("Content-Type", "application/json") + .setHeader("Authorization", "Bearer " + authToken) + .body(Strings.toString((xbuilder, params) -> { + xbuilder.field("message", message.body); + if (message.format != null) { + xbuilder.field("message_format", message.format.value()); + } + if (message.notify != null) { + xbuilder.field("notify", message.notify); + } + if (message.color != null) { + xbuilder.field("color", String.valueOf(message.color.value())); + } + return xbuilder; + })); + if (proxy != null) { + builder.proxy(proxy); + } + return builder.build(); + } + + // this specific hipchat API does not accept application-form encoding, but requires real URL encoding + // spaces must not be replaced with a plus, but rather with %20 + // this workaround ensures, that this happens + private String encodeRoom(String text) { + try { + return new URI("//", "", "", text, null).getRawQuery(); + } catch (URISyntaxException e) { + throw new IllegalArgumentException("failed to URL encode text [" + text + "]", e); + } + + } + + public HttpRequest buildUserRequest(String user, final HipChatMessage message, HttpProxy proxy) { + HttpRequest.Builder builder = server.httpRequest() + .method(HttpMethod.POST) + .scheme(Scheme.HTTPS) + .path("/v2/user/" + user + "/message") + .setHeader("Content-Type", "application/json") + .setHeader("Authorization", "Bearer " + authToken) + .body(Strings.toString((xbuilder, params) -> { + xbuilder.field("message", message.body); + if (message.format != null) { + xbuilder.field("message_format", message.format.value()); + } + if (message.notify != null) { + xbuilder.field("notify", message.notify); + } + return xbuilder; + })); + if (proxy != null) { + builder.proxy(proxy); + } + return builder.build(); + } + + static class Defaults { + + @Nullable final String[] rooms; + @Nullable final String[] users; + @Nullable final Format format; + @Nullable final Color color; + @Nullable final Boolean notify; + + Defaults(Settings settings) { + List rooms = settings.getAsList(DEFAULT_ROOM_SETTING, null); + this.rooms = rooms == null ? null : rooms.toArray(Strings.EMPTY_ARRAY); + List users = settings.getAsList(DEFAULT_USER_SETTING, null); + this.users = users == null ? null : users.toArray(Strings.EMPTY_ARRAY); + this.format = Format.resolve(settings, DEFAULT_FORMAT_SETTING, null); + this.color = Color.resolve(settings, DEFAULT_COLOR_SETTING, null); + this.notify = settings.getAsBoolean(DEFAULT_NOTIFY_SETTING, null); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/V1Account.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/V1Account.java new file mode 100644 index 0000000000000..084cff2d0947c --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/V1Account.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.Scheme; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage.Color; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage.Format; +import org.elasticsearch.xpack.watcher.actions.hipchat.HipChatAction; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class V1Account extends HipChatAccount { + + public static final String TYPE = "v1"; + + final Defaults defaults; + + public V1Account(String name, Settings settings, HipChatServer defaultServer, HttpClient httpClient, Logger logger) { + super(name, Profile.V1, settings, defaultServer, httpClient, logger); + defaults = new Defaults(settings); + } + + @Override + public String type() { + return TYPE; + } + + @Override + public void validateParsedTemplate(String watchId, String actionId, + HipChatMessage.Template template) throws ElasticsearchParseException { + if (template.users != null) { + throw new ElasticsearchParseException("invalid [" + HipChatAction.TYPE + "] action for [" + watchId + "/" + actionId + "]. [" + + name + "] hipchat account doesn't support user private messaging"); + } + if ((template.rooms == null || template.rooms.length == 0) && (defaults.rooms == null || defaults.rooms.length == 0)) { + throw new ElasticsearchParseException("invalid [" + HipChatAction.TYPE + "] action for [" + watchId + "/" + actionId + "]. " + + "missing required [" + HipChatMessage.Field.ROOM + "] field for [" + name + "] hipchat account"); + } + } + + @Override + public HipChatMessage render(String watchId, String actionId, TextTemplateEngine engine, HipChatMessage.Template template, + Map model) { + String message = engine.render(template.body, model); + String[] rooms = defaults.rooms; + if (template.rooms != null) { + rooms = new String[template.rooms.length]; + for (int i = 0; i < template.rooms.length; i++) { + rooms[i] = engine.render(template.rooms[i], model); + } + } + String from = template.from != null ? template.from : defaults.from != null ? defaults.from : watchId; + Color color = Color.resolve(engine.render(template.color, model), defaults.color); + Boolean notify = template.notify != null ? template.notify : defaults.notify; + Format messageFormat = template.format != null ? template.format : defaults.format; + return new HipChatMessage(message, rooms, null, from, messageFormat, color, notify); + } + + @Override + public SentMessages send(HipChatMessage message, @Nullable HttpProxy proxy) { + List sentMessages = new ArrayList<>(); + if (message.rooms != null) { + for (String room : message.rooms) { + HttpRequest request = buildRoomRequest(room, message, proxy); + try { + HttpResponse response = httpClient.execute(request); + sentMessages.add(SentMessages.SentMessage.responded(room, SentMessages.SentMessage.TargetType.ROOM, message, request, + response)); + } catch (Exception e) { + logger.error("failed to execute hipchat api http request", e); + sentMessages.add(SentMessages.SentMessage.error(room, SentMessages.SentMessage.TargetType.ROOM, message, e)); + } + } + } + return new SentMessages(name, sentMessages); + } + + public HttpRequest buildRoomRequest(String room, HipChatMessage message, HttpProxy proxy) { + HttpRequest.Builder builder = server.httpRequest(); + builder.method(HttpMethod.POST); + builder.scheme(Scheme.HTTPS); + builder.path("/v1/rooms/message"); + builder.setHeader("Content-Type", "application/x-www-form-urlencoded"); + builder.setParam("format", "json"); + builder.setParam("auth_token", authToken); + if (proxy != null) { + builder.proxy(proxy); + } + StringBuilder body = new StringBuilder(); + body.append("room_id=").append(HttpRequest.encodeUrl(room)); + body.append("&from=").append(HttpRequest.encodeUrl(message.from)); + body.append("&message=").append(HttpRequest.encodeUrl(message.body)); + if (message.format != null) { + body.append("&message_format=").append(message.format.value()); + } + if (message.color != null) { + body.append("&color=").append(message.color.value()); + } + if (message.notify != null) { + body.append("¬ify=").append(message.notify ? "1" : "0"); + } + builder.body(body.toString()); + return builder.build(); + } + + static class Defaults { + + @Nullable final String[] rooms; + @Nullable final String from; + @Nullable final Format format; + @Nullable final Color color; + @Nullable final Boolean notify; + + Defaults(Settings settings) { + List rooms = settings.getAsList(DEFAULT_ROOM_SETTING, null); + this.rooms = rooms == null ? null : rooms.toArray(Strings.EMPTY_ARRAY); + this.from = settings.get(DEFAULT_FROM_SETTING); + this.format = Format.resolve(settings, DEFAULT_FORMAT_SETTING, null); + this.color = Color.resolve(settings, DEFAULT_COLOR_SETTING, null); + this.notify = settings.getAsBoolean(DEFAULT_NOTIFY_SETTING, null); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraAccount.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraAccount.java new file mode 100644 index 0000000000000..5efe09c575924 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraAccount.java @@ -0,0 +1,138 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.jira; + +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.Scheme; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; + +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Collections; +import java.util.Map; + +public class JiraAccount { + + /** + * Default JIRA REST API path for create issues + **/ + public static final String DEFAULT_PATH = "/rest/api/2/issue"; + + static final String USER_SETTING = "user"; + static final String PASSWORD_SETTING = "password"; + static final String URL_SETTING = "url"; + static final String ISSUE_DEFAULTS_SETTING = "issue_defaults"; + static final String ALLOW_HTTP_SETTING = "allow_http"; + + private static final Setting SECURE_USER_SETTING = SecureSetting.secureString("secure_" + USER_SETTING, null); + private static final Setting SECURE_PASSWORD_SETTING = SecureSetting.secureString("secure_" + PASSWORD_SETTING, null); + private static final Setting SECURE_URL_SETTING = SecureSetting.secureString("secure_" + URL_SETTING, null); + + private final HttpClient httpClient; + private final String name; + private final String user; + private final String password; + private final URI url; + private final Map issueDefaults; + + public JiraAccount(String name, Settings settings, HttpClient httpClient) { + this.httpClient = httpClient; + this.name = name; + String url = getSetting(name, URL_SETTING, settings, SECURE_URL_SETTING); + ESLoggerFactory.getLogger(getClass()).error("THE URL WAS [{}]", url); + try { + URI uri = new URI(url); + Scheme protocol = Scheme.parse(uri.getScheme()); + if ((protocol == Scheme.HTTP) && (Booleans.isTrue(settings.get(ALLOW_HTTP_SETTING)) == false)) { + throw new SettingsException("invalid jira [" + name + "] account settings. unsecure scheme [" + protocol + "]"); + } + this.url = uri; + } catch (URISyntaxException | IllegalArgumentException e) { + throw new SettingsException("invalid jira [" + name + "] account settings. invalid [" + URL_SETTING + "] setting", e); + } + this.user = getSetting(name, USER_SETTING, settings, SECURE_USER_SETTING); + if (Strings.isEmpty(this.user)) { + throw requiredSettingException(name, USER_SETTING); + } + this.password = getSetting(name, PASSWORD_SETTING, settings, SECURE_PASSWORD_SETTING); + if (Strings.isEmpty(this.password)) { + throw requiredSettingException(name, PASSWORD_SETTING); + } + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + builder.startObject(); + settings.getAsSettings(ISSUE_DEFAULTS_SETTING).toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + try (InputStream stream = BytesReference.bytes(builder).streamInput(); + XContentParser parser = XContentType.JSON.xContent() + .createParser(new NamedXContentRegistry(Collections.emptyList()), LoggingDeprecationHandler.INSTANCE, stream)) { + this.issueDefaults = Collections.unmodifiableMap(parser.map()); + } + } catch (IOException ex) { + throw new UncheckedIOException(ex); + } + } + + private static String getSetting(String accountName, String settingName, Settings settings, Setting secureSetting) { + String value = settings.get(settingName); + if (value == null) { + SecureString secureString = secureSetting.get(settings); + if (secureString == null || secureString.length() < 1) { + throw requiredSettingException(accountName, settingName); + } + value = secureString.toString(); + } + + return value; + } + + public String getName() { + return name; + } + + public Map getDefaults() { + return issueDefaults; + } + + public JiraIssue createIssue(final Map fields, final HttpProxy proxy) throws IOException { + HttpRequest request = HttpRequest.builder(url.getHost(), url.getPort()) + .scheme(Scheme.parse(url.getScheme())) + .method(HttpMethod.POST) + .path(url.getPath().isEmpty() || url.getPath().equals("/") ? DEFAULT_PATH : url.getPath()) + .jsonBody((builder, params) -> builder.field("fields", fields)) + .auth(new BasicAuth(user, password.toCharArray())) + .proxy(proxy) + .build(); + + HttpResponse response = httpClient.execute(request); + return JiraIssue.responded(name, fields, request, response); + } + + private static SettingsException requiredSettingException(String account, String setting) { + return new SettingsException("invalid jira [" + account + "] account settings. missing required [" + setting + "] setting"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraIssue.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraIssue.java new file mode 100644 index 0000000000000..2c38d1fac3218 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraIssue.java @@ -0,0 +1,207 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.jira; + +import org.apache.http.HttpStatus; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.actions.jira.JiraAction; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class JiraIssue implements ToXContentObject { + + @Nullable final String account; + private final Map fields; + @Nullable private final HttpRequest request; + @Nullable private final HttpResponse response; + @Nullable private final String failureReason; + + public static JiraIssue responded(String account, Map fields, HttpRequest request, HttpResponse response) { + return new JiraIssue(account, fields, request, response, resolveFailureReason(response)); + } + + JiraIssue(String account, Map fields, HttpRequest request, HttpResponse response, String failureReason) { + this.account = account; + this.fields = fields; + this.request = request; + this.response = response; + this.failureReason = failureReason; + } + + public boolean successful() { + return failureReason == null; + } + + public String getAccount() { + return account; + } + + public HttpRequest getRequest() { + return request; + } + + public HttpResponse getResponse() { + return response; + } + + public Map getFields() { + return fields; + } + + public String getFailureReason() { + return failureReason; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + JiraIssue issue = (JiraIssue) o; + return Objects.equals(account, issue.account) && + Objects.equals(fields, issue.fields) && + Objects.equals(request, issue.request) && + Objects.equals(response, issue.response) && + Objects.equals(failureReason, issue.failureReason); + } + + @Override + public int hashCode() { + return Objects.hash(account, fields, request, response, failureReason); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Field.ACCOUNT.getPreferredName(), account); + if (fields != null) { + builder.field(Field.FIELDS.getPreferredName(), fields); + } + if (successful() == false) { + builder.field(Field.REASON.getPreferredName(), failureReason); + if (request != null) { + builder.field(Field.REQUEST.getPreferredName(), request, params); + } + if (response != null) { + builder.field(Field.RESPONSE.getPreferredName(), response, params); + } + } else { + try (InputStream stream = response.body().streamInput()) { + builder.rawField(Field.RESULT.getPreferredName(), stream); + } + } + return builder.endObject(); + } + + /** + * Resolve the failure reason, when a reason can be extracted from the response body: + * Ex: {"errorMessages":[],"errors":{"customfield_10004":"Epic Name is required."}} + *

+ * See https://docs.atlassian.com/jira/REST/cloud/ for the format of the error response body. + */ + static String resolveFailureReason(HttpResponse response) { + int status = response.status(); + if (status < 300) { + return null; + } + + StringBuilder message = new StringBuilder(); + switch (status) { + case HttpStatus.SC_BAD_REQUEST: + message.append("Bad Request"); + break; + case HttpStatus.SC_UNAUTHORIZED: + message.append("Unauthorized (authentication credentials are invalid)"); + break; + case HttpStatus.SC_FORBIDDEN: + message.append("Forbidden (account doesn't have permission to create this issue)"); + break; + case HttpStatus.SC_NOT_FOUND: + message.append("Not Found (account uses invalid JIRA REST APIs)"); + break; + case HttpStatus.SC_REQUEST_TIMEOUT: + message.append("Request Timeout (request took too long to process)"); + break; + case HttpStatus.SC_INTERNAL_SERVER_ERROR: + message.append("JIRA Server Error (internal error occurred while processing request)"); + break; + default: + message.append("Unknown Error"); + break; + } + + if (response.hasContent()) { + final List errors = new ArrayList<>(); + // EMPTY is safe here because we never call namedObject + try (InputStream stream = response.body().streamInput(); + XContentParser parser = JsonXContent.jsonXContent + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse jira project. expected an object, but found [{}] instead", + token); + } + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.ERRORS.match(currentFieldName, parser.getDeprecationHandler())) { + Map fieldErrors = parser.mapOrdered(); + for (Map.Entry entry : fieldErrors.entrySet()) { + errors.add("Field [" + entry.getKey() + "] has error [" + String.valueOf(entry.getValue()) + "]"); + } + } else if (Field.ERROR_MESSAGES.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + errors.add(parser.text()); + } + } else { + throw new ElasticsearchParseException("could not parse jira response. unexpected field [{}]", currentFieldName); + } + } + } catch (Exception e) { + errors.add("Exception when parsing jira response [" + String.valueOf(e) + "]"); + } + + if (errors.isEmpty() == false) { + message.append(" - "); + for (String error : errors) { + message.append(error).append('\n'); + } + } + } + return message.toString(); + } + + private interface Field { + ParseField FIELDS = JiraAction.Field.FIELDS; + ParseField ACCOUNT = new ParseField("account"); + ParseField REASON = new ParseField("reason"); + ParseField REQUEST = new ParseField("request"); + ParseField RESPONSE = new ParseField("response"); + ParseField RESULT = new ParseField("result"); + + ParseField ERROR_MESSAGES = new ParseField("errorMessages"); + ParseField ERRORS = new ParseField("errors"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraService.java new file mode 100644 index 0000000000000..297531cbe81ca --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraService.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.jira; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.notification.NotificationService; + +import java.util.Arrays; +import java.util.List; + +/** + * A component to store Atlassian's JIRA credentials. + * + * https://www.atlassian.com/software/jira + */ +public class JiraService extends NotificationService { + + private static final Setting SETTING_DEFAULT_ACCOUNT = + Setting.simpleString("xpack.notification.jira.default_account", Property.Dynamic, Property.NodeScope); + + private static final Setting.AffixSetting SETTING_ALLOW_HTTP = + Setting.affixKeySetting("xpack.notification.jira.account.", "allow_http", + (key) -> Setting.boolSetting(key, false, Property.Dynamic, Property.NodeScope)); + + private static final Setting.AffixSetting SETTING_URL = + Setting.affixKeySetting("xpack.notification.jira.account.", "url", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered)); + + private static final Setting.AffixSetting SETTING_USER = + Setting.affixKeySetting("xpack.notification.jira.account.", "user", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered)); + + private static final Setting.AffixSetting SETTING_PASSWORD = + Setting.affixKeySetting("xpack.notification.jira.account.", "password", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered, Property.Deprecated)); + + private static final Setting.AffixSetting SETTING_SECURE_USER = + Setting.affixKeySetting("xpack.notification.jira.account.", "secure_user", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered)); + + private static final Setting.AffixSetting SETTING_SECURE_URL = + Setting.affixKeySetting("xpack.notification.jira.account.", "secure_url", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered)); + + private static final Setting.AffixSetting SETTING_SECURE_PASSWORD = + Setting.affixKeySetting("xpack.notification.jira.account.", "secure_password", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered)); + + private static final Setting.AffixSetting SETTING_DEFAULTS = + Setting.affixKeySetting("xpack.notification.jira.account.", "issue_defaults", + (key) -> Setting.groupSetting(key + ".", Property.Dynamic, Property.NodeScope)); + + private final HttpClient httpClient; + + public JiraService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { + super(settings, "jira"); + this.httpClient = httpClient; + clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, getSettings()); + // ensure logging of setting changes + clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_ALLOW_HTTP, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_URL, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_USER, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_PASSWORD, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SECURE_USER, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SECURE_URL, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SECURE_PASSWORD, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_DEFAULTS, (s, o) -> {}, (s, o) -> {}); + // do an initial load + setAccountSetting(settings); + } + + @Override + protected JiraAccount createAccount(String name, Settings settings) { + return new JiraAccount(name, settings, httpClient); + } + + public static List> getSettings() { + return Arrays.asList(SETTING_ALLOW_HTTP, SETTING_URL, SETTING_USER, SETTING_PASSWORD, SETTING_SECURE_USER, + SETTING_SECURE_PASSWORD, SETTING_SECURE_URL, SETTING_DEFAULTS, SETTING_DEFAULT_ACCOUNT); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEvent.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEvent.java new file mode 100644 index 0000000000000..0fb1a52d28633 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEvent.java @@ -0,0 +1,452 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.pagerduty; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.Scheme; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Official documentation for this can be found at + * + * https://developer.pagerduty.com/documentation/howto/manually-trigger-an-incident/ + * https://developer.pagerduty.com/documentation/integration/events/trigger + * https://developer.pagerduty.com/documentation/integration/events/acknowledge + * https://developer.pagerduty.com/documentation/integration/events/resolve + */ +public class IncidentEvent implements ToXContentObject { + + static final String HOST = "events.pagerduty.com"; + static final String PATH = "/generic/2010-04-15/create_event.json"; + + final String description; + @Nullable final HttpProxy proxy; + @Nullable final String incidentKey; + @Nullable final String client; + @Nullable final String clientUrl; + @Nullable final String account; + final String eventType; + final boolean attachPayload; + @Nullable final IncidentEventContext[] contexts; + + public IncidentEvent(String description, @Nullable String eventType, @Nullable String incidentKey, @Nullable String client, + @Nullable String clientUrl, @Nullable String account, boolean attachPayload, + @Nullable IncidentEventContext[] contexts, @Nullable HttpProxy proxy) { + this.description = description; + if (description == null) { + throw new IllegalStateException("could not create pagerduty event. missing required [" + + Fields.DESCRIPTION.getPreferredName() + "] setting"); + } + this.incidentKey = incidentKey; + this.client = client; + this.clientUrl = clientUrl; + this.account = account; + this.proxy = proxy; + this.attachPayload = attachPayload; + this.contexts = contexts; + this.eventType = Strings.hasLength(eventType) ? eventType : "trigger"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IncidentEvent template = (IncidentEvent) o; + return Objects.equals(description, template.description) && + Objects.equals(incidentKey, template.incidentKey) && + Objects.equals(client, template.client) && + Objects.equals(clientUrl, template.clientUrl) && + Objects.equals(attachPayload, template.attachPayload) && + Objects.equals(eventType, template.eventType) && + Objects.equals(account, template.account) && + Objects.equals(proxy, template.proxy) && + Arrays.equals(contexts, template.contexts); + } + + @Override + public int hashCode() { + int result = Objects.hash(description, incidentKey, client, clientUrl, account, attachPayload, eventType, proxy); + result = 31 * result + Arrays.hashCode(contexts); + return result; + } + + public HttpRequest createRequest(final String serviceKey, final Payload payload) throws IOException { + return HttpRequest.builder(HOST, -1) + .method(HttpMethod.POST) + .scheme(Scheme.HTTPS) + .path(PATH) + .proxy(proxy) + .jsonBody(new ToXContent() { + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(Fields.SERVICE_KEY.getPreferredName(), serviceKey); + builder.field(Fields.EVENT_TYPE.getPreferredName(), eventType); + builder.field(Fields.DESCRIPTION.getPreferredName(), description); + if (incidentKey != null) { + builder.field(Fields.INCIDENT_KEY.getPreferredName(), incidentKey); + } + if (client != null) { + builder.field(Fields.CLIENT.getPreferredName(), client); + } + if (clientUrl != null) { + builder.field(Fields.CLIENT_URL.getPreferredName(), clientUrl); + } + if (attachPayload) { + builder.startObject(Fields.DETAILS.getPreferredName()); + builder.field(Fields.PAYLOAD.getPreferredName()); + payload.toXContent(builder, params); + builder.endObject(); + } + if (contexts != null && contexts.length > 0) { + builder.startArray(Fields.CONTEXTS.getPreferredName()); + for (IncidentEventContext context : contexts) { + context.toXContent(builder, params); + } + builder.endArray(); + } + return builder; + } + }) + .build(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(Fields.DESCRIPTION.getPreferredName(), description); + if (incidentKey != null) { + builder.field(Fields.INCIDENT_KEY.getPreferredName(), incidentKey); + } + if (client != null) { + builder.field(Fields.CLIENT.getPreferredName(), client); + } + if (clientUrl != null) { + builder.field(Fields.CLIENT_URL.getPreferredName(), clientUrl); + } + if (account != null) { + builder.field(Fields.ACCOUNT.getPreferredName(), account); + } + if (proxy != null) { + proxy.toXContent(builder, params); + } + builder.field(Fields.ATTACH_PAYLOAD.getPreferredName(), attachPayload); + if (contexts != null) { + builder.startArray(Fields.CONTEXTS.getPreferredName()); + for (IncidentEventContext context : contexts) { + context.toXContent(builder, params); + } + builder.endArray(); + } + return builder.endObject(); + } + public static Template.Builder templateBuilder(String description) { + return templateBuilder(new TextTemplate(description)); + } + + public static Template.Builder templateBuilder(TextTemplate description) { + return new Template.Builder(description); + } + + public static class Template implements ToXContentObject { + + final TextTemplate description; + final TextTemplate incidentKey; + final TextTemplate client; + final TextTemplate clientUrl; + final TextTemplate eventType; + public final String account; + final Boolean attachPayload; + final IncidentEventContext.Template[] contexts; + final HttpProxy proxy; + + public Template(TextTemplate description, TextTemplate eventType, TextTemplate incidentKey, TextTemplate client, + TextTemplate clientUrl, String account, Boolean attachPayload, IncidentEventContext.Template[] contexts, + HttpProxy proxy) { + this.description = description; + this.eventType = eventType; + this.incidentKey = incidentKey; + this.client = client; + this.clientUrl = clientUrl; + this.account = account; + this.attachPayload = attachPayload; + this.contexts = contexts; + this.proxy = proxy; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Template template = (Template) o; + return Objects.equals(description, template.description) && + Objects.equals(incidentKey, template.incidentKey) && + Objects.equals(client, template.client) && + Objects.equals(clientUrl, template.clientUrl) && + Objects.equals(eventType, template.eventType) && + Objects.equals(attachPayload, template.attachPayload) && + Objects.equals(account, template.account) && + Objects.equals(proxy, template.proxy) && + Arrays.equals(contexts, template.contexts); + } + + @Override + public int hashCode() { + int result = Objects.hash(description, eventType, incidentKey, client, clientUrl, attachPayload, account, proxy); + result = 31 * result + Arrays.hashCode(contexts); + return result; + } + + public IncidentEvent render(String watchId, String actionId, TextTemplateEngine engine, Map model, + IncidentEventDefaults defaults) { + String description = this.description != null ? engine.render(this.description, model) : defaults.description; + String incidentKey = this.incidentKey != null ? engine.render(this.incidentKey, model) : + defaults.incidentKey != null ? defaults.incidentKey : watchId; + String client = this.client != null ? engine.render(this.client, model) : defaults.client; + String clientUrl = this.clientUrl != null ? engine.render(this.clientUrl, model) : defaults.clientUrl; + String eventType = this.eventType != null ? engine.render(this.eventType, model) : defaults.eventType; + boolean attachPayload = this.attachPayload != null ? this.attachPayload : defaults.attachPayload; + IncidentEventContext[] contexts = null; + if (this.contexts != null) { + contexts = new IncidentEventContext[this.contexts.length]; + for (int i = 0; i < this.contexts.length; i++) { + contexts[i] = this.contexts[i].render(engine, model, defaults); + } + } + return new IncidentEvent(description, eventType, incidentKey, client, clientUrl, account, attachPayload, contexts, proxy); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(Fields.DESCRIPTION.getPreferredName(), description, params); + if (incidentKey != null) { + builder.field(Fields.INCIDENT_KEY.getPreferredName(), incidentKey, params); + } + if (client != null) { + builder.field(Fields.CLIENT.getPreferredName(), client, params); + } + if (clientUrl != null) { + builder.field(Fields.CLIENT_URL.getPreferredName(), clientUrl, params); + } + if (eventType != null) { + builder.field(Fields.EVENT_TYPE.getPreferredName(), eventType, params); + } + if (attachPayload != null) { + builder.field(Fields.ATTACH_PAYLOAD.getPreferredName(), attachPayload); + } + if (account != null) { + builder.field(Fields.ACCOUNT.getPreferredName(), account); + } + if (proxy != null) { + proxy.toXContent(builder, params); + } + if (contexts != null) { + builder.startArray(Fields.CONTEXTS.getPreferredName()); + for (IncidentEventContext.Template context : contexts) { + context.toXContent(builder, params); + } + builder.endArray(); + } + return builder.endObject(); + } + + public static Template parse(String watchId, String actionId, XContentParser parser) throws IOException { + TextTemplate incidentKey = null; + TextTemplate description = null; + TextTemplate client = null; + TextTemplate clientUrl = null; + TextTemplate eventType = null; + String account = null; + HttpProxy proxy = null; + Boolean attachPayload = null; + IncidentEventContext.Template[] contexts = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Fields.INCIDENT_KEY.match(currentFieldName, parser.getDeprecationHandler())) { + try { + incidentKey = TextTemplate.parse(parser); + } catch (ElasticsearchParseException e) { + throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}]", + Fields.INCIDENT_KEY.getPreferredName()); + } + } else if (Fields.DESCRIPTION.match(currentFieldName, parser.getDeprecationHandler())) { + try { + description = TextTemplate.parse(parser); + } catch (ElasticsearchParseException e) { + throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}]", + Fields.DESCRIPTION.getPreferredName()); + } + } else if (Fields.CLIENT.match(currentFieldName, parser.getDeprecationHandler())) { + try { + client = TextTemplate.parse(parser); + } catch (ElasticsearchParseException e) { + throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}]", + Fields.CLIENT.getPreferredName()); + } + } else if (Fields.CLIENT_URL.match(currentFieldName, parser.getDeprecationHandler())) { + try { + clientUrl = TextTemplate.parse(parser); + } catch (ElasticsearchParseException e) { + throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}]", + Fields.CLIENT_URL.getPreferredName()); + } + } else if (Fields.ACCOUNT.match(currentFieldName, parser.getDeprecationHandler())) { + try { + account = parser.text(); + } catch (ElasticsearchParseException e) { + throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}]", + Fields.CLIENT_URL.getPreferredName()); + } + } else if (Fields.PROXY.match(currentFieldName, parser.getDeprecationHandler())) { + proxy = HttpProxy.parse(parser); + } else if (Fields.EVENT_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + try { + eventType = TextTemplate.parse(parser); + } catch (ElasticsearchParseException e) { + throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}]", + Fields.EVENT_TYPE.getPreferredName()); + } + } else if (Fields.ATTACH_PAYLOAD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_BOOLEAN) { + attachPayload = parser.booleanValue(); + } else { + throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}], " + + "expected a boolean value but found [{}] instead", Fields.ATTACH_PAYLOAD.getPreferredName(), token); + } + } else if (Fields.CONTEXTS.match(currentFieldName, parser.getDeprecationHandler()) + || Fields.CONTEXT_DEPRECATED.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.START_ARRAY) { + List list = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + list.add(IncidentEventContext.Template.parse(parser)); + } catch (ElasticsearchParseException e) { + throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field " + + "[{}]", parser.currentName()); + } + } + contexts = list.toArray(new IncidentEventContext.Template[list.size()]); + } + } else { + throw new ElasticsearchParseException("could not parse pager duty event template. unexpected field [{}]", + currentFieldName); + } + } + return new Template(description, eventType, incidentKey, client, clientUrl, account, attachPayload, contexts, proxy); + } + + public static class Builder { + + final TextTemplate description; + TextTemplate incidentKey; + TextTemplate client; + TextTemplate clientUrl; + TextTemplate eventType; + String account; + HttpProxy proxy; + Boolean attachPayload; + List contexts = new ArrayList<>(); + + public Builder(TextTemplate description) { + this.description = description; + } + + public Builder setIncidentKey(TextTemplate incidentKey) { + this.incidentKey = incidentKey; + return this; + } + + public Builder setClient(TextTemplate client) { + this.client = client; + return this; + } + + public Builder setClientUrl(TextTemplate clientUrl) { + this.clientUrl = clientUrl; + return this; + } + + public Builder setEventType(TextTemplate eventType) { + this.eventType = eventType; + return this; + } + + public Builder setAccount(String account) { + this.account= account; + return this; + } + + public Builder setAttachPayload(Boolean attachPayload) { + this.attachPayload = attachPayload; + return this; + } + + public Builder setProxy(HttpProxy proxy) { + this.proxy = proxy; + return this; + } + + public Builder addContext(IncidentEventContext.Template context) { + this.contexts.add(context); + return this; + } + + public Template build() { + IncidentEventContext.Template[] contexts = this.contexts.isEmpty() ? null : + this.contexts.toArray(new IncidentEventContext.Template[this.contexts.size()]); + return new Template(description, eventType, incidentKey, client, clientUrl, account, attachPayload, contexts, proxy); + } + } + } + + interface Fields { + + ParseField TYPE = new ParseField("type"); + ParseField EVENT_TYPE = new ParseField("event_type"); + + ParseField ACCOUNT = new ParseField("account"); + ParseField PROXY = new ParseField("proxy"); + ParseField DESCRIPTION = new ParseField("description"); + ParseField INCIDENT_KEY = new ParseField("incident_key"); + ParseField CLIENT = new ParseField("client"); + ParseField CLIENT_URL = new ParseField("client_url"); + ParseField ATTACH_PAYLOAD = new ParseField("attach_payload"); + ParseField CONTEXTS = new ParseField("contexts"); + // this field exists because in versions prior 6.0 we accidentally used context instead of contexts and thus the correct data + // was never picked up on the pagerduty side + // we need to keep this for BWC + ParseField CONTEXT_DEPRECATED = new ParseField("context"); + + ParseField SERVICE_KEY = new ParseField("service_key"); + ParseField PAYLOAD = new ParseField("payload"); + ParseField DETAILS = new ParseField("details"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventContext.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventContext.java new file mode 100644 index 0000000000000..d43829346b626 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventContext.java @@ -0,0 +1,284 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.pagerduty; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +public class IncidentEventContext implements ToXContentObject { + + enum Type { + LINK, IMAGE + } + + final Type type; + final String href; + final String text; + final String src; + final String alt; + + public static IncidentEventContext link(String href, @Nullable String text) { + assert href != null; + return new IncidentEventContext(Type.LINK, href, text, null, null); + } + + public static IncidentEventContext image(String src, @Nullable String href, @Nullable String alt) { + assert src != null; + return new IncidentEventContext(Type.IMAGE, href, null, src, alt); + } + + private IncidentEventContext(Type type, String href, String text, String src, String alt) { + this.type = type; + this.href = href; + this.text = text; + this.src = src; + this.alt = alt; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IncidentEventContext that = (IncidentEventContext) o; + + return Objects.equals(type, that.type) && + Objects.equals(href, that.href) && + Objects.equals(text, that.text) && + Objects.equals(src, that.src) && + Objects.equals(alt, that.alt); + } + + @Override + public int hashCode() { + return Objects.hash(type, href, text, src, alt); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(XField.TYPE.getPreferredName(), type.name().toLowerCase(Locale.ROOT)); + switch (type) { + case LINK: + builder.field(XField.HREF.getPreferredName(), href); + if (text != null) { + builder.field(XField.TEXT.getPreferredName(), text); + } + break; + case IMAGE: + builder.field(XField.SRC.getPreferredName(), src); + if (href != null) { + builder.field(XField.HREF.getPreferredName(), href); + } + if (alt != null) { + builder.field(XField.ALT.getPreferredName(), alt); + } + } + return builder.endObject(); + } + + public static class Template implements ToXContentObject { + + final Type type; + final TextTemplate href; + final TextTemplate text; + final TextTemplate src; + final TextTemplate alt; + + public static Template link(TextTemplate href, @Nullable TextTemplate text) { + if (href == null) { + throw new IllegalStateException("could not create link context for pager duty trigger incident event. missing required " + + "[href] setting"); + } + return new Template(Type.LINK, href, text, null, null); + } + + public static Template image(TextTemplate src, @Nullable TextTemplate href, @Nullable TextTemplate alt) { + if (src == null) { + throw new IllegalStateException("could not create link context for pager duty trigger incident event. missing required " + + "[src] setting"); + } + return new Template(Type.IMAGE, href, null, src, alt); + } + + private Template(Type type, TextTemplate href, TextTemplate text, TextTemplate src, TextTemplate alt) { + this.type = type; + this.href = href; + this.text = text; + this.src = src; + this.alt = alt; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Template that = (Template) o; + return Objects.equals(type, that.type) && + Objects.equals(href, that.href) && + Objects.equals(text, that.text) && + Objects.equals(src, that.src) && + Objects.equals(alt, that.alt); + } + + @Override + public int hashCode() { + return Objects.hash(type, href, text, src, alt); + } + + public IncidentEventContext render(TextTemplateEngine engine, Map model, IncidentEventDefaults defaults) { + switch (type) { + case LINK: + String href = this.href != null ? engine.render(this.href, model) : defaults.link.href; + String text = this.text != null ? engine.render(this.text, model) : defaults.link.text; + return IncidentEventContext.link(href, text); + + default: + assert type == Type.IMAGE; + String src = this.src != null ? engine.render(this.src, model) : defaults.image.src; + href = this.href != null ? engine.render(this.href, model) : defaults.image.href; + String alt = this.alt != null ? engine.render(this.alt, model) : defaults.image.alt; + return IncidentEventContext.image(src, href, alt); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(XField.TYPE.getPreferredName(), type.name().toLowerCase(Locale.ROOT)); + switch (type) { + case LINK: + builder.field(XField.HREF.getPreferredName(), href, params); + if (text != null) { + builder.field(XField.TEXT.getPreferredName(), text, params); + } + break; + case IMAGE: + builder.field(XField.SRC.getPreferredName(), src, params); + if (href != null) { + builder.field(XField.HREF.getPreferredName(), href, params); + } + if (alt != null) { + builder.field(XField.ALT.getPreferredName(), alt, params); + } + } + return builder.endObject(); + } + + public static Template parse(XContentParser parser) throws IOException { + Type type = null; + TextTemplate href = null; + TextTemplate text = null; + TextTemplate src = null; + TextTemplate alt = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Strings.hasLength(currentFieldName)) { + if (XField.TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + try { + type = Type.valueOf(parser.text().toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + String msg = "could not parse trigger incident event context. unknown context type [{}]"; + throw new ElasticsearchParseException(msg, parser.text()); + } + } else { + TextTemplate parsedTemplate; + try { + parsedTemplate = TextTemplate.parse(parser); + } catch (ElasticsearchParseException e) { + String msg = "could not parse trigger incident event context. failed to parse [{}] field"; + throw new ElasticsearchParseException(msg, e, currentFieldName); + } + + if (XField.HREF.match(currentFieldName, parser.getDeprecationHandler())) { + href = parsedTemplate; + } else if (XField.TEXT.match(currentFieldName, parser.getDeprecationHandler())) { + text = parsedTemplate; + } else if (XField.SRC.match(currentFieldName, parser.getDeprecationHandler())) { + src = parsedTemplate; + } else if (XField.ALT.match(currentFieldName, parser.getDeprecationHandler())) { + alt = parsedTemplate; + } else { + String msg = "could not parse trigger incident event context. unknown field [{}]"; + throw new ElasticsearchParseException(msg, currentFieldName); + } + } + } + } + + return createAndValidateTemplate(type, href, src, alt, text); + } + + private static Template createAndValidateTemplate(Type type, TextTemplate href, TextTemplate src, TextTemplate alt, + TextTemplate text) { + if (type == null) { + throw new ElasticsearchParseException("could not parse trigger incident event context. missing required field [{}]", + XField.TYPE.getPreferredName()); + } + + switch (type) { + case LINK: + if (href == null) { + throw new ElasticsearchParseException("could not parse trigger incident event context. missing required field " + + "[{}] for [{}] context", XField.HREF.getPreferredName(), Type.LINK.name().toLowerCase(Locale.ROOT)); + } + if (src != null) { + throw new ElasticsearchParseException("could not parse trigger incident event context. unexpected field [{}] for " + + "[{}] context", XField.SRC.getPreferredName(), Type.LINK.name().toLowerCase(Locale.ROOT)); + } + if (alt != null) { + throw new ElasticsearchParseException("could not parse trigger incident event context. unexpected field [{}] for " + + "[{}] context", XField.ALT.getPreferredName(), Type.LINK.name().toLowerCase(Locale.ROOT)); + } + return link(href, text); + case IMAGE: + if (src == null) { + throw new ElasticsearchParseException("could not parse trigger incident event context. missing required field " + + "[{}] for [{}] context", XField.SRC.getPreferredName(), Type.IMAGE.name().toLowerCase(Locale.ROOT)); + } + if (text != null) { + throw new ElasticsearchParseException("could not parse trigger incident event context. unexpected field [{}] for " + + "[{}] context", XField.TEXT.getPreferredName(), Type.IMAGE.name().toLowerCase(Locale.ROOT)); + } + return image(src, href, alt); + default: + throw new ElasticsearchParseException("could not parse trigger incident event context. unknown context type [{}]", + type); + } + } + + } + + interface XField { + ParseField TYPE = new ParseField("type"); + ParseField HREF = new ParseField("href"); + + // "link" context fields + ParseField TEXT = new ParseField("text"); + + // "image" context fields + ParseField SRC = new ParseField("src"); + ParseField ALT = new ParseField("alt"); + + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventDefaults.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventDefaults.java new file mode 100644 index 0000000000000..632f5318c2098 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventDefaults.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.pagerduty; + +import org.elasticsearch.common.settings.Settings; + +import java.util.Objects; + +/** + * Get trigger default configurations either from global settings or specific account settings and merge them + */ +public class IncidentEventDefaults { + + final String description; + final String incidentKey; + final String client; + final String clientUrl; + final String eventType; + final boolean attachPayload; + final Context.LinkDefaults link; + final Context.ImageDefaults image; + + public IncidentEventDefaults(Settings accountSettings) { + description = accountSettings.get(IncidentEvent.Fields.DESCRIPTION.getPreferredName(), null); + incidentKey = accountSettings.get(IncidentEvent.Fields.INCIDENT_KEY.getPreferredName(), null); + client = accountSettings.get(IncidentEvent.Fields.CLIENT.getPreferredName(), null); + clientUrl = accountSettings.get(IncidentEvent.Fields.CLIENT_URL.getPreferredName(), null); + eventType = accountSettings.get(IncidentEvent.Fields.EVENT_TYPE.getPreferredName(), null); + attachPayload = accountSettings.getAsBoolean(IncidentEvent.Fields.ATTACH_PAYLOAD.getPreferredName(), false); + link = new Context.LinkDefaults(accountSettings.getAsSettings("link")); + image = new Context.ImageDefaults(accountSettings.getAsSettings("image")); + + } + + static class Context { + + static class LinkDefaults { + + final String href; + final String text; + + LinkDefaults(Settings settings) { + href = settings.get(IncidentEventContext.XField.HREF.getPreferredName(), null); + text = settings.get(IncidentEventContext.XField.TEXT.getPreferredName(), null); + } + + @Override + public int hashCode() { + return Objects.hash(href, text); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()){ + return false; + } + final LinkDefaults other = (LinkDefaults) obj; + return Objects.equals(href, other.href) && Objects.equals(text, other.text); + } + } + + static class ImageDefaults { + + final String href; + final String src; + final String alt; + + ImageDefaults(Settings settings) { + href = settings.get(IncidentEventContext.XField.HREF.getPreferredName(), null); + src = settings.get(IncidentEventContext.XField.SRC.getPreferredName(), null); + alt = settings.get(IncidentEventContext.XField.ALT.getPreferredName(), null); + } + + @Override + public int hashCode() { + return Objects.hash(href, src, alt); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()){ + return false; + } + final ImageDefaults other = (ImageDefaults) obj; + return Objects.equals(href, other.href) && Objects.equals(src, other.src) && Objects.equals(alt, other.alt); + } + } + } + + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyAccount.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyAccount.java new file mode 100644 index 0000000000000..5cf1a77f9711a --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyAccount.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.pagerduty; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; + +import java.io.IOException; + +public class PagerDutyAccount { + + private static final String SERVICE_KEY_SETTING = "service_api_key"; + private static final String TRIGGER_DEFAULTS_SETTING = "event_defaults"; + private static final Setting SECURE_SERVICE_API_KEY_SETTING = + SecureSetting.secureString("secure_" + SERVICE_KEY_SETTING, null); + + private final String name; + private final String serviceKey; + private final HttpClient httpClient; + private final IncidentEventDefaults eventDefaults; + private final Logger logger; + + PagerDutyAccount(String name, Settings accountSettings, Settings serviceSettings, HttpClient httpClient, Logger logger) { + this.name = name; + this.serviceKey = getServiceKey(name, accountSettings, serviceSettings); + this.httpClient = httpClient; + + this.eventDefaults = new IncidentEventDefaults(accountSettings.getAsSettings(TRIGGER_DEFAULTS_SETTING)); + this.logger = logger; + } + + public String getName() { + return name; + } + + public IncidentEventDefaults getDefaults() { + return eventDefaults; + } + + public SentEvent send(IncidentEvent event, Payload payload) throws IOException { + HttpRequest request = event.createRequest(serviceKey, payload); + HttpResponse response = httpClient.execute(request); + return SentEvent.responded(event, request, response); + } + + private static String getServiceKey(String name, Settings accountSettings, Settings serviceSettings) { + String serviceKey = accountSettings.get(SERVICE_KEY_SETTING, serviceSettings.get(SERVICE_KEY_SETTING, null)); + if (serviceKey == null) { + SecureString secureString = SECURE_SERVICE_API_KEY_SETTING.get(accountSettings); + if (secureString == null || secureString.length() < 1) { + throw new SettingsException("invalid pagerduty account [" + name + "]. missing required [" + SERVICE_KEY_SETTING + + "] setting"); + } + serviceKey = secureString.toString(); + } + + return serviceKey; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyService.java new file mode 100644 index 0000000000000..e74e78707beff --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyService.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.pagerduty; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.notification.NotificationService; + +import java.util.Arrays; +import java.util.List; + +/** + * A component to store pagerduty credentials. + */ +public class PagerDutyService extends NotificationService { + + private static final Setting SETTING_DEFAULT_ACCOUNT = + Setting.simpleString("xpack.notification.pagerduty.default_account", Property.Dynamic, Property.NodeScope); + + private static final Setting.AffixSetting SETTING_SERVICE_API_KEY = + Setting.affixKeySetting("xpack.notification.pagerduty.account.", "service_api_key", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered, Property.Deprecated)); + + private static final Setting.AffixSetting SETTING_SECURE_SERVICE_API_KEY = + Setting.affixKeySetting("xpack.notification.pagerduty.account.", "secure_service_api_key", + (key) -> SecureSetting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered)); + + private static final Setting.AffixSetting SETTING_DEFAULTS = + Setting.affixKeySetting("xpack.notification.pagerduty.account.", "event_defaults", + (key) -> Setting.groupSetting(key + ".", Property.Dynamic, Property.NodeScope)); + + private final HttpClient httpClient; + + public PagerDutyService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { + super(settings, "pagerduty"); + this.httpClient = httpClient; + clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SERVICE_API_KEY, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_SECURE_SERVICE_API_KEY, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_DEFAULTS, (s, o) -> {}, (s, o) -> {}); + setAccountSetting(settings); + } + + @Override + protected PagerDutyAccount createAccount(String name, Settings accountSettings) { + return new PagerDutyAccount(name, accountSettings, accountSettings, httpClient, logger); + } + + public static List> getSettings() { + return Arrays.asList(SETTING_SERVICE_API_KEY, SETTING_SECURE_SERVICE_API_KEY, SETTING_DEFAULTS, SETTING_DEFAULT_ACCOUNT); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/SentEvent.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/SentEvent.java new file mode 100644 index 0000000000000..9bc040511a4a3 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/SentEvent.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.pagerduty; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.watcher.actions.pagerduty.PagerDutyAction; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +public class SentEvent implements ToXContentObject { + + final IncidentEvent event; + @Nullable final HttpRequest request; + @Nullable final HttpResponse response; + @Nullable final String failureReason; + + public static SentEvent responded(IncidentEvent event, HttpRequest request, HttpResponse response) { + String failureReason = resolveFailureReason(response); + return new SentEvent(event, request, response, failureReason); + } + + public static SentEvent error(IncidentEvent event, String reason) { + return new SentEvent(event, null, null, reason); + } + + private SentEvent(IncidentEvent event, HttpRequest request, HttpResponse response, String failureReason) { + this.event = event; + this.request = request; + this.response = response; + this.failureReason = failureReason; + } + + public boolean successful() { + return failureReason == null; + } + + public HttpRequest getRequest() { + return request; + } + + public HttpResponse getResponse() { + return response; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + SentEvent sentEvent = (SentEvent) o; + return Objects.equals(event, sentEvent.event) && + Objects.equals(request, sentEvent.request) && + Objects.equals(failureReason, sentEvent.failureReason); + } + + @Override + public int hashCode() { + return Objects.hash(event, request, response, failureReason); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(XField.EVENT.getPreferredName(), event, params); + if (successful() == false) { + builder.field(XField.REASON.getPreferredName(), failureReason); + if (request != null) { + // this excludes the whole body, even though we should only exclude a small field inside of the body + // as this makes debugging pagerduty services much harder, this should be changed to only filter for + // body.service_key - however the body is currently just a string, making filtering much harder + if (WatcherParams.hideSecrets(params)) { + try (InputStream is = HttpRequest.filterToXContent(request, builder.contentType().xContent(), + params, "body")) { + builder.rawField(XField.REQUEST.getPreferredName(), is, builder.contentType()); + } + } else { + builder.field(XField.REQUEST.getPreferredName()); + request.toXContent(builder, params); + } + } + if (response != null) { + builder.field(XField.RESPONSE.getPreferredName(), response, params); + } + } + return builder.endObject(); + } + + private static String resolveFailureReason(HttpResponse response) { + + // if for some reason we failed to parse the body, lets fall back on the http status code. + int status = response.status(); + if (status < 300) { + return null; + } + + // ok... we have an error + + // lets first try to parse the error response in the body + // based on https://developer.pagerduty.com/documentation/rest/errors + try (InputStream stream = response.body().streamInput(); + XContentParser parser = JsonXContent.jsonXContent + // EMPTY is safe here because we never call namedObject + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { + parser.nextToken(); + + String message = null; + List errors = new ArrayList<>(); + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (XField.MESSAGE.match(currentFieldName, parser.getDeprecationHandler())) { + message = parser.text(); + } else if (XField.CODE.match(currentFieldName, parser.getDeprecationHandler())) { + // we don't use this code.. so just consume the token + } else if (XField.ERRORS.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + errors.add(parser.text()); + } + } else { + throw new ElasticsearchParseException("could not parse pagerduty event response. unexpected field [{}]", + currentFieldName); + } + } + + StringBuilder sb = new StringBuilder(); + if (message != null) { + sb.append(message); + } + if (!errors.isEmpty()) { + sb.append(":"); + for (String error : errors) { + sb.append(" ").append(error).append("."); + } + } + return sb.toString(); + } catch (Exception e) { + // too bad... we couldn't parse the body... note that we don't log this error as there's no real + // need for it. This whole error parsing is a nice to have, nothing more. On error, the http + // response object is anyway added to the action result in the watch record (though not searchable) + } + + switch (status) { + case 400: return "Bad Request"; + case 401: return "Unauthorized. The account service api key is invalid."; + case 403: return "Forbidden. The account doesn't have permission to send this trigger."; + case 404: return "The account used invalid HipChat APIs"; + case 408: return "Request Timeout. The request took too long to process."; + case 500: return "PagerDuty Server Error. Internal error occurred while processing request."; + default: + return "Unknown Error"; + } + } + + public interface XField { + ParseField EVENT = PagerDutyAction.XField.EVENT; + ParseField REASON = new ParseField("reason"); + ParseField REQUEST = new ParseField("request"); + ParseField RESPONSE = new ParseField("response"); + + ParseField MESSAGE = new ParseField("message"); + ParseField CODE = new ParseField("code"); + ParseField ERRORS = new ParseField("errors"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SentMessages.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SentMessages.java new file mode 100644 index 0000000000000..a9670c5ffd0ce --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SentMessages.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.slack; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessage; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +public class SentMessages implements ToXContentObject, Iterable { + + private static final ParseField ACCOUNT = new ParseField("account"); + private static final ParseField SENT_MESSAGES = new ParseField("sent_messages"); + + private String accountName; + private List messages; + + public SentMessages(String accountName, List messages) { + this.accountName = accountName; + this.messages = messages; + } + + public String getAccountName() { + return accountName; + } + + @Override + public Iterator iterator() { + return messages.iterator(); + } + + public int count() { + return messages.size(); + } + + public List asList() { + return Collections.unmodifiableList(messages); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ACCOUNT.getPreferredName(), accountName); + builder.startArray(SENT_MESSAGES.getPreferredName()); + for (SentMessage message : messages) { + message.toXContent(builder, params); + } + builder.endArray(); + return builder.endObject(); + } + + public static class SentMessage implements ToXContentObject { + + private static final ParseField STATUS = new ParseField("status"); + private static final ParseField REQUEST = new ParseField("request"); + private static final ParseField RESPONSE = new ParseField("response"); + private static final ParseField TO = new ParseField("to"); + private static final ParseField MESSAGE = new ParseField("message"); + + final String to; + final SlackMessage message; + @Nullable final HttpRequest request; + @Nullable final HttpResponse response; + @Nullable final Exception exception; + + public static SentMessage responded(String to, SlackMessage message, HttpRequest request, HttpResponse response) { + return new SentMessage(to, message, request, response, null); + } + + public static SentMessage error(String to, SlackMessage message, Exception e) { + return new SentMessage(to, message, null, null, e); + } + + private SentMessage(String to, SlackMessage message, HttpRequest request, HttpResponse response, Exception exception) { + this.to = to; + this.message = message; + this.request = request; + this.response = response; + this.exception = exception; + } + + public HttpRequest getRequest() { + return request; + } + + public HttpResponse getResponse() { + return response; + } + + public Exception getException() { + return exception; + } + + public boolean isSuccess() { + return response != null && response.status() >= 200 && response.status() < 300; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(STATUS.getPreferredName(), isSuccess() ? "success" : "failure"); + if (isSuccess() == false) { + if (request != null) { + if (WatcherParams.hideSecrets(params)) { + // this writes out the request to the byte array output stream with the correct excludes + // for slack + try (InputStream is = HttpRequest.filterToXContent(request, builder.contentType().xContent(), + params, "path")) { + builder.rawField(REQUEST.getPreferredName(), is, builder.contentType()); + } + } else { + builder.field(REQUEST.getPreferredName()); + request.toXContent(builder, params); + } + } + if (response != null) { + builder.field(RESPONSE.getPreferredName()); + response.toXContent(builder, params); + } + if (exception != null) { + ElasticsearchException.generateFailureXContent(builder, params, exception, true); + } + } + if (to != null) { + builder.field(TO.getPreferredName(), to); + } + builder.field(MESSAGE.getPreferredName()); + message.toXContent(builder, params, false); + return builder.endObject(); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackAccount.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackAccount.java new file mode 100644 index 0000000000000..98857cc4cb28a --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackAccount.java @@ -0,0 +1,140 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.slack; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.Scheme; +import org.elasticsearch.xpack.watcher.notification.slack.message.Attachment; +import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessage; +import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessageDefaults; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class SlackAccount { + + + public static final String URL_SETTING = "url"; + public static final String MESSAGE_DEFAULTS_SETTING = "message_defaults"; + + private static final Setting SECURE_URL_SETTING = SecureSetting.secureString("secure_" + URL_SETTING, null); + + final String name; + final URI url; + final HttpClient httpClient; + final Logger logger; + final SlackMessageDefaults messageDefaults; + + public SlackAccount(String name, Settings settings, Settings defaultSettings, HttpClient httpClient, Logger logger) { + this.name = name; + this.url = url(name, settings, defaultSettings); + this.messageDefaults = new SlackMessageDefaults(settings.getAsSettings(MESSAGE_DEFAULTS_SETTING)); + this.httpClient = httpClient; + this.logger = logger; + } + + public SlackMessageDefaults getMessageDefaults() { + return messageDefaults; + } + + public SentMessages send(final SlackMessage message, HttpProxy proxy) { + + String[] to = message.getTo(); + if (to == null || to.length == 0) { + SentMessages.SentMessage sentMessage = send(null, message, proxy); + return new SentMessages(name, Collections.singletonList(sentMessage)); + } + + List sentMessages = new ArrayList<>(); + for (String channel : to) { + sentMessages.add(send(channel, message, proxy)); + } + return new SentMessages(name, sentMessages); + } + + public SentMessages.SentMessage send(final String to, final SlackMessage message, final HttpProxy proxy) { + HttpRequest request = HttpRequest.builder(url.getHost(), url.getPort()) + .path(url.getPath()) + .method(HttpMethod.POST) + .proxy(proxy) + .scheme(Scheme.parse(url.getScheme())) + .jsonBody(new ToXContent() { + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (to != null) { + builder.field("channel", to); + } + if (message.getFrom() != null) { + builder.field("username", message.getFrom()); + } + String icon = message.getIcon(); + if (icon != null) { + if (icon.startsWith("http")) { + builder.field("icon_url", icon); + } else { + builder.field("icon_emoji", icon); + } + } + if (message.getText() != null) { + builder.field("text", message.getText()); + } + Attachment[] attachments = message.getAttachments(); + if (attachments != null && attachments.length > 0) { + builder.startArray("attachments"); + for (Attachment attachment : attachments) { + attachment.toXContent(builder, params); + } + builder.endArray(); + + } + return builder; + } + }) + .build(); + + try { + HttpResponse response = httpClient.execute(request); + return SentMessages.SentMessage.responded(to, message, request, response); + } catch (Exception e) { + logger.error("failed to execute slack api http request", e); + return SentMessages.SentMessage.error(to, message, e); + } + } + + static URI url(String name, Settings settings, Settings defaultSettings) { + String url = settings.get(URL_SETTING, defaultSettings.get(URL_SETTING, null)); + if (url == null) { + SecureString secureStringUrl = SECURE_URL_SETTING.get(settings); + if (secureStringUrl != null && secureStringUrl.length() > 0) { + url = secureStringUrl.toString(); + } + } + if (url == null) { + throw new SettingsException("invalid slack [" + name + "] account settings. missing required [" + URL_SETTING + "] setting"); + } + try { + return new URI(url); + } catch (URISyntaxException e) { + throw new SettingsException("invalid slack [" + name + "] account settings. invalid [" + URL_SETTING + "] setting", e); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackService.java new file mode 100644 index 0000000000000..92f44bfcbe39b --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackService.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.slack; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.notification.NotificationService; + +import java.util.Arrays; +import java.util.List; + +/** + * A component to store slack credentials. + */ +public class SlackService extends NotificationService { + + private static final Setting SETTING_DEFAULT_ACCOUNT = + Setting.simpleString("xpack.notification.slack.default_account", Property.Dynamic, Property.NodeScope); + + private static final Setting.AffixSetting SETTING_URL = + Setting.affixKeySetting("xpack.notification.slack.account.", "url", + (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered, Property.Deprecated)); + + private static final Setting.AffixSetting SETTING_URL_SECURE = + Setting.affixKeySetting("xpack.notification.slack.account.", "secure_url", + (key) -> SecureSetting.simpleString(key, Property.Dynamic, Property.NodeScope, Property.Filtered)); + + private static final Setting.AffixSetting SETTING_DEFAULTS = + Setting.affixKeySetting("xpack.notification.slack.account.", "message_defaults", + (key) -> Setting.groupSetting(key + ".", Property.Dynamic, Property.NodeScope)); + + private final HttpClient httpClient; + + public SlackService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { + super(settings, "slack"); + this.httpClient = httpClient; + clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, getSettings()); + clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_URL, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_URL_SECURE, (s, o) -> {}, (s, o) -> {}); + clusterSettings.addAffixUpdateConsumer(SETTING_DEFAULTS, (s, o) -> {}, (s, o) -> {}); + setAccountSetting(settings); + } + + @Override + protected SlackAccount createAccount(String name, Settings accountSettings) { + return new SlackAccount(name, accountSettings, accountSettings, httpClient, logger); + } + + public static List> getSettings() { + return Arrays.asList(SETTING_URL, SETTING_URL_SECURE, SETTING_DEFAULT_ACCOUNT, SETTING_DEFAULTS); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Action.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Action.java new file mode 100644 index 0000000000000..1976828ad711f --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Action.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.slack.message; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class Action implements MessageElement { + + static final ObjectParser ACTION_PARSER = new ObjectParser<>("action", Template::new); + static { + ACTION_PARSER.declareField(Template::setType, (p, c) -> new TextTemplate(p.text()), new ParseField("type"), ValueType.STRING); + ACTION_PARSER.declareField(Template::setUrl, (p, c) -> new TextTemplate(p.text()), new ParseField("url"), ValueType.STRING); + ACTION_PARSER.declareField(Template::setText, (p, c) -> new TextTemplate(p.text()), new ParseField("text"), ValueType.STRING); + ACTION_PARSER.declareField(Template::setStyle, (p, c) -> new TextTemplate(p.text()), new ParseField("style"), ValueType.STRING); + ACTION_PARSER.declareField(Template::setName, (p, c) -> new TextTemplate(p.text()), new ParseField("name"), ValueType.STRING); + } + + private static final ParseField URL = new ParseField("url"); + private static final ParseField TYPE = new ParseField("type"); + private static final ParseField TEXT = new ParseField("text"); + private static final ParseField STYLE = new ParseField("style"); + private static final ParseField NAME = new ParseField("name"); + + private String style; + private String name; + private String type; + private String text; + private String url; + + public Action() { + } + + public Action(String style, String name, String type, String text, String url) { + this.style = style; + this.name = name; + this.type = type; + this.text = text; + this.url = url; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Action template = (Action) o; + + return Objects.equals(style, template.style) && Objects.equals(type, template.type) && Objects.equals(url, template.url) + && Objects.equals(text, template.text) && Objects.equals(name, template.name); + } + + @Override + public int hashCode() { + return Objects.hash(style, type, url, name, text); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(NAME.getPreferredName(), name) + .field(STYLE.getPreferredName(), style) + .field(TYPE.getPreferredName(), type) + .field(TEXT.getPreferredName(), text) + .field(URL.getPreferredName(), url) + .endObject(); + } + + static class Template implements ToXContent { + + private TextTemplate type; + private TextTemplate name; + private TextTemplate text; + private TextTemplate url; + private TextTemplate style; + + public Action render(TextTemplateEngine engine, Map model) { + String style = engine.render(this.style, model); + String type = engine.render(this.type, model); + String url = engine.render(this.url, model); + String name = engine.render(this.name, model); + String text = engine.render(this.text, model); + return new Action(style, name, type, text, url); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Template template = (Template) o; + + return Objects.equals(style, template.style) && Objects.equals(type, template.type) && Objects.equals(url, template.url) + && Objects.equals(text, template.text) && Objects.equals(name, template.name); + } + + @Override + public int hashCode() { + return Objects.hash(style, type, url, name, text); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(NAME.getPreferredName(), name) + .field(STYLE.getPreferredName(), style) + .field(TYPE.getPreferredName(), type) + .field(TEXT.getPreferredName(), text) + .field(URL.getPreferredName(), url) + .endObject(); + } + + public TextTemplate getType() { + return type; + } + + public void setType(TextTemplate type) { + this.type = type; + } + + public TextTemplate getName() { + return name; + } + + public void setName(TextTemplate name) { + this.name = name; + } + + public TextTemplate getText() { + return text; + } + + public void setText(TextTemplate text) { + this.text = text; + } + + public TextTemplate getUrl() { + return url; + } + + public void setUrl(TextTemplate url) { + this.url = url; + } + + public TextTemplate getStyle() { + return style; + } + + public void setStyle(TextTemplate style) { + this.style = style; + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Attachment.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Attachment.java new file mode 100644 index 0000000000000..78cdee1ace6c1 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Attachment.java @@ -0,0 +1,643 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.slack.message; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class Attachment implements MessageElement { + + final String fallback; + final String color; + final String pretext; + final String authorName; + final String authorLink; + final String authorIcon; + final String title; + final String titleLink; + final String text; + final Field[] fields; + final String imageUrl; + final String thumbUrl; + final String[] markdownSupportedFields; + final List actions; + + public Attachment(String fallback, String color, String pretext, String authorName, String authorLink, + String authorIcon, String title, String titleLink, String text, Field[] fields, + String imageUrl, String thumbUrl, String[] markdownSupportedFields, List actions) { + + this.fallback = fallback; + this.color = color; + this.pretext = pretext; + this.authorName = authorName; + this.authorLink = authorLink; + this.authorIcon = authorIcon; + this.title = title; + this.titleLink = titleLink; + this.text = text; + this.fields = fields; + this.imageUrl = imageUrl; + this.thumbUrl = thumbUrl; + this.markdownSupportedFields = markdownSupportedFields; + this.actions = actions; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Attachment that = (Attachment) o; + return Objects.equals(fallback, that.fallback) && Objects.equals(color, that.color) && + Objects.equals(pretext, that.pretext) && Objects.equals(authorName, that.authorName) && + Objects.equals(authorLink, that.authorLink) && Objects.equals(authorIcon, that.authorIcon) && + Objects.equals(title, that.title) && Objects.equals(titleLink, that.titleLink) && + Objects.equals(text, that.text) && Objects.equals(imageUrl, that.imageUrl) && + Objects.equals(thumbUrl, that.thumbUrl) && Objects.equals(actions, that.actions) && + Arrays.equals(markdownSupportedFields, that.markdownSupportedFields) && Arrays.equals(fields, that.fields); + } + + @Override + public int hashCode() { + return Objects.hash(fallback, color, pretext, authorName, authorLink, authorIcon, title, titleLink, text, fields, imageUrl, + thumbUrl, markdownSupportedFields, actions); + } + + /** + * renders the attachment in slack compatible structure: + *

+ * https://api.slack.com/docs/attachments#attachment_structure + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (fallback != null) { + builder.field(XField.FALLBACK.getPreferredName(), fallback); + } + if (color != null) { + builder.field(XField.COLOR.getPreferredName(), color); + } + if (pretext != null) { + builder.field(XField.PRETEXT.getPreferredName(), pretext); + } + if (authorName != null) { + builder.field(XField.AUTHOR_NAME.getPreferredName(), authorName); + if (authorLink != null) { + builder.field(XField.AUTHOR_LINK.getPreferredName(), authorLink); + } + if (authorIcon != null) { + builder.field(XField.AUTHOR_ICON.getPreferredName(), authorIcon); + } + } + if (title != null) { + builder.field(XField.TITLE.getPreferredName(), title); + if (titleLink != null) { + builder.field(XField.TITLE_LINK.getPreferredName(), titleLink); + } + } + if (text != null) { + builder.field(XField.TEXT.getPreferredName(), text); + } + if (fields != null) { + builder.startArray(XField.FIELDS.getPreferredName()); + for (Field field : fields) { + field.toXContent(builder, params); + } + builder.endArray(); + } + if (imageUrl != null) { + builder.field(XField.IMAGE_URL.getPreferredName(), imageUrl); + } + if (thumbUrl != null) { + builder.field(XField.THUMB_URL.getPreferredName(), thumbUrl); + } + if (markdownSupportedFields != null) { + builder.startArray(XField.MARKDOWN_IN.getPreferredName()); + for (String field : markdownSupportedFields) { + builder.value(field); + } + builder.endArray(); + } + if (actions != null && actions.isEmpty() == false) { + builder.startArray("actions"); + for (Action action : actions) { + action.toXContent(builder, params); + } + builder.endArray(); + } + + return builder.endObject(); + } + + static class Template implements ToXContentObject { + + final TextTemplate fallback; + final TextTemplate color; + final TextTemplate pretext; + final TextTemplate authorName; + final TextTemplate authorLink; + final TextTemplate authorIcon; + final TextTemplate title; + final TextTemplate titleLink; + final TextTemplate text; + final Field.Template[] fields; + final TextTemplate imageUrl; + final TextTemplate thumbUrl; + final TextTemplate[] markdownSupportedFields; + final List actions; + + Template(TextTemplate fallback, TextTemplate color, TextTemplate pretext, TextTemplate authorName, + TextTemplate authorLink, TextTemplate authorIcon, TextTemplate title, TextTemplate titleLink, + TextTemplate text, Field.Template[] fields, TextTemplate imageUrl, TextTemplate thumbUrl, + TextTemplate[] markdownSupportedFields, List actions) { + + this.fallback = fallback; + this.color = color; + this.pretext = pretext; + this.authorName = authorName; + this.authorLink = authorLink; + this.authorIcon = authorIcon; + this.title = title; + this.titleLink = titleLink; + this.text = text; + this.fields = fields; + this.imageUrl = imageUrl; + this.thumbUrl = thumbUrl; + this.markdownSupportedFields = markdownSupportedFields; + this.actions = actions; + } + + public Attachment render(TextTemplateEngine engine, Map model, SlackMessageDefaults.AttachmentDefaults defaults) { + String fallback = this.fallback != null ? engine.render(this.fallback, model) : defaults.fallback; + String color = this.color != null ? engine.render(this.color, model) : defaults.color; + String pretext = this.pretext != null ? engine.render(this.pretext, model) : defaults.pretext; + String authorName = this.authorName != null ? engine.render(this.authorName, model) : defaults.authorName; + String authorLink = this.authorLink != null ? engine.render(this.authorLink, model) : defaults.authorLink; + String authorIcon = this.authorIcon != null ? engine.render(this.authorIcon, model) : defaults.authorIcon; + String title = this.title != null ? engine.render(this.title, model) : defaults.title; + String titleLink = this.titleLink != null ? engine.render(this.titleLink, model) : defaults.titleLink; + String text = this.text != null ? engine.render(this.text, model) : defaults.text; + String imageUrl = this.imageUrl != null ? engine.render(this.imageUrl, model) : defaults.imageUrl; + String thumbUrl = this.thumbUrl != null ? engine.render(this.thumbUrl, model) : defaults.thumbUrl; + Field[] fields = null; + if (this.fields != null) { + fields = new Field[this.fields.length]; + for (int i = 0; i < fields.length; i++) { + fields[i] = this.fields[i].render(engine, model, defaults.field); + } + } + String[] markdownFields = null; + if (this.markdownSupportedFields != null) { + markdownFields = new String[this.markdownSupportedFields.length]; + for (int i = 0; i < markdownSupportedFields.length; i++) { + markdownFields[i] = engine.render(this.markdownSupportedFields[i], model); + } + } + List actions = new ArrayList<>(); + if (this.actions != null && this.actions.isEmpty() == false) { + for (Action.Template action : this.actions) { + actions.add(action.render(engine, model)); + } + } + + return new Attachment(fallback, color, pretext, authorName, authorLink, authorIcon, title, titleLink, text, fields, imageUrl, + thumbUrl, markdownFields, actions); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Template template = (Template) o; + + return Objects.equals(fallback, template.fallback) && Objects.equals(color, template.color) && + Objects.equals(pretext, template.pretext) && Objects.equals(authorName, template.authorName) && + Objects.equals(authorLink, template.authorLink) && Objects.equals(authorIcon, template.authorIcon) && + Objects.equals(title, template.title) && Objects.equals(titleLink, template.titleLink) && + Objects.equals(text, template.text) && Objects.equals(imageUrl, template.imageUrl) && + Objects.equals(thumbUrl, template.thumbUrl) && Objects.equals(actions, template.actions) && + Arrays.equals(fields, template.fields) && + Arrays.equals(markdownSupportedFields, template.markdownSupportedFields); + } + + @Override + public int hashCode() { + return Objects.hash(fallback, color, pretext, authorName, authorLink, authorIcon, title, titleLink, text, fields, imageUrl, + thumbUrl, markdownSupportedFields, actions); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (fallback != null) { + builder.field(XField.FALLBACK.getPreferredName(), fallback, params); + } + if (color != null) { + builder.field(XField.COLOR.getPreferredName(), color, params); + } + if (pretext != null) { + builder.field(XField.PRETEXT.getPreferredName(), pretext, params); + } + if (authorName != null) { + builder.field(XField.AUTHOR_NAME.getPreferredName(), authorName, params); + if (authorLink != null) { + builder.field(XField.AUTHOR_LINK.getPreferredName(), authorLink, params); + } + if (authorIcon != null) { + builder.field(XField.AUTHOR_ICON.getPreferredName(), authorIcon, params); + } + } + if (title != null) { + builder.field(XField.TITLE.getPreferredName(), title, params); + if (titleLink != null) { + builder.field(XField.TITLE_LINK.getPreferredName(), titleLink, params); + } + } + if (text != null) { + builder.field(XField.TEXT.getPreferredName(), text, params); + } + if (fields != null) { + builder.startArray(XField.FIELDS.getPreferredName()); + for (Field.Template field : fields) { + field.toXContent(builder, params); + } + builder.endArray(); + } + if (imageUrl != null) { + builder.field(XField.IMAGE_URL.getPreferredName(), imageUrl, params); + } + if (thumbUrl != null) { + builder.field(XField.THUMB_URL.getPreferredName(), thumbUrl, params); + } + if (markdownSupportedFields != null) { + builder.startArray(XField.MARKDOWN_IN.getPreferredName()); + for (TextTemplate field : markdownSupportedFields) { + field.toXContent(builder, params); + } + builder.endArray(); + } + if (actions != null && actions.isEmpty() == false) { + builder.startArray(XField.ACTIONS.getPreferredName()); + for (Action.Template action : actions) { + action.toXContent(builder, params); + } + builder.endArray(); + } + + return builder.endObject(); + } + + public static Template parse(XContentParser parser) throws IOException { + + TextTemplate fallback = null; + TextTemplate color = null; + TextTemplate pretext = null; + TextTemplate authorName = null; + TextTemplate authorLink = null; + TextTemplate authorIcon = null; + TextTemplate title = null; + TextTemplate titleLink = null; + TextTemplate text = null; + Field.Template[] fields = null; + TextTemplate imageUrl = null; + TextTemplate thumbUrl = null; + TextTemplate[] markdownFields = null; + List actions = new ArrayList<>(); + + XContentParser.Token token = null; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (XField.FALLBACK.match(currentFieldName, parser.getDeprecationHandler())) { + try { + fallback = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.FALLBACK); + } + } else if (XField.COLOR.match(currentFieldName, parser.getDeprecationHandler())) { + try { + color = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.COLOR); + } + } else if (XField.PRETEXT.match(currentFieldName, parser.getDeprecationHandler())) { + try { + pretext = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.PRETEXT); + } + } else if (XField.AUTHOR_NAME.match(currentFieldName, parser.getDeprecationHandler())) { + try { + authorName = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.AUTHOR_NAME); + } + } else if (XField.AUTHOR_LINK.match(currentFieldName, parser.getDeprecationHandler())) { + try { + authorLink = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.AUTHOR_LINK); + } + } else if (XField.AUTHOR_ICON.match(currentFieldName, parser.getDeprecationHandler())) { + try { + authorIcon = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.AUTHOR_ICON); + } + } else if (XField.TITLE.match(currentFieldName, parser.getDeprecationHandler())) { + try { + title = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.TITLE); + } + } else if (XField.TITLE_LINK.match(currentFieldName, parser.getDeprecationHandler())) { + try { + titleLink = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.TITLE_LINK); + } + } else if (XField.TEXT.match(currentFieldName, parser.getDeprecationHandler())) { + try { + text = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.TEXT); + } + } else if (XField.FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.START_ARRAY) { + List list = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + list.add(Field.Template.parse(parser)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", + pe, XField.FIELDS); + } + } + fields = list.toArray(new Field.Template[list.size()]); + } else { + try { + fields = new Field.Template[]{Field.Template.parse(parser)}; + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.FIELDS); + } + } + } else if (XField.IMAGE_URL.match(currentFieldName, parser.getDeprecationHandler())) { + try { + imageUrl = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.IMAGE_URL); + } + } else if (XField.THUMB_URL.match(currentFieldName, parser.getDeprecationHandler())) { + try { + thumbUrl = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.THUMB_URL); + } + } else if (XField.MARKDOWN_IN.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.START_ARRAY) { + List list = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + list.add(new TextTemplate(parser.text())); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", + pe, XField.MARKDOWN_IN); + } + } + markdownFields = list.toArray(new TextTemplate[list.size()]); + } else { + try { + markdownFields = new TextTemplate[]{new TextTemplate(parser.text())}; + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe, + XField.MARKDOWN_IN); + } + } + } else if (XField.ACTIONS.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.START_OBJECT) { + actions.add(Action.ACTION_PARSER.parse(parser, null)); + } + } else { + throw new ElasticsearchParseException("could not parse message attachment field. unexpected field [{}]", + currentFieldName); + } + } + if (authorName == null) { + if (authorLink != null) { + throw new ElasticsearchParseException("could not parse message attachment field. found field [{}], but no [{}] is " + + "defined", XField.AUTHOR_LINK, XField.AUTHOR_NAME); + } + if (authorIcon != null) { + throw new ElasticsearchParseException("could not parse message attachment field. found field [{}], but no [{}] is " + + "defined", XField.AUTHOR_ICON, XField.AUTHOR_NAME); + } + } + if (title == null) { + if (titleLink != null) { + throw new ElasticsearchParseException("could not parse message attachment field. found field [{}], but no [{}] is " + + "defined", XField.TITLE_LINK, XField.TITLE); + } + } + return new Template(fallback, color, pretext, authorName, authorLink, authorIcon, title, titleLink, text, fields, imageUrl, + thumbUrl, markdownFields, actions); + } + + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + + private TextTemplate fallback; + private TextTemplate color; + private TextTemplate pretext; + private TextTemplate authorName; + private TextTemplate authorLink; + private TextTemplate authorIcon; + private TextTemplate title; + private TextTemplate titleLink; + private TextTemplate text; + private List fields = new ArrayList<>(); + private TextTemplate imageUrl; + private TextTemplate thumbUrl; + private List markdownFields = new ArrayList<>(); + private List actions = new ArrayList<>(); + + private Builder() { + } + + public Builder setFallback(TextTemplate fallback) { + this.fallback = fallback; + return this; + } + + public Builder setFallback(String fallback) { + return setFallback(new TextTemplate(fallback)); + } + + public Builder setColor(TextTemplate color) { + this.color = color; + return this; + } + + public Builder setColor(String color) { + return setColor(new TextTemplate(color)); + } + + public Builder setPretext(TextTemplate pretext) { + this.pretext = pretext; + return this; + } + + public Builder setPretext(String pretext) { + return setPretext(new TextTemplate(pretext)); + } + + public Builder setAuthorName(TextTemplate authorName) { + this.authorName = authorName; + return this; + } + + public Builder setAuthorName(String authorName) { + return setAuthorName(new TextTemplate(authorName)); + } + + public Builder setAuthorLink(TextTemplate authorLink) { + this.authorLink = authorLink; + return this; + } + + public Builder setAuthorLink(String authorLink) { + return setAuthorLink(new TextTemplate(authorLink)); + } + + public Builder setAuthorIcon(TextTemplate authorIcon) { + this.authorIcon = authorIcon; + return this; + } + + public Builder setAuthorIcon(String authorIcon) { + return setAuthorIcon(new TextTemplate(authorIcon)); + } + + public Builder setTitle(TextTemplate title) { + this.title = title; + return this; + } + + public Builder setTitle(String title) { + return setTitle(new TextTemplate(title)); + } + + public Builder setTitleLink(TextTemplate titleLink) { + this.titleLink = titleLink; + return this; + } + + public Builder setTitleLink(String titleLink) { + return setTitleLink(new TextTemplate(titleLink)); + } + + public Builder setText(TextTemplate text) { + this.text = text; + return this; + } + + public Builder setText(String text) { + return setText(new TextTemplate(text)); + } + + public Builder addField(TextTemplate title, TextTemplate value, boolean isShort) { + fields.add(new Field.Template(title, value, isShort)); + return this; + } + + public Builder addField(String title, String value, boolean isShort) { + return addField(new TextTemplate(title), new TextTemplate(value), isShort); + } + + public Builder setImageUrl(TextTemplate imageUrl) { + this.imageUrl = imageUrl; + return this; + } + + public Builder setImageUrl(String imageUrl) { + return setImageUrl(new TextTemplate(imageUrl)); + } + + public Builder setThumbUrl(TextTemplate thumbUrl) { + this.thumbUrl = thumbUrl; + return this; + } + + public Builder setThumbUrl(String thumbUrl) { + return setThumbUrl(new TextTemplate(thumbUrl)); + } + + public Builder addMarkdownField(String name) { + this.markdownFields.add(new TextTemplate(name)); + return this; + } + + public Builder addAction(Action.Template action) { + this.actions.add(action); + return this; + } + + public Template build() { + Field.Template[] fields = this.fields.isEmpty() ? null : this.fields.toArray(new Field.Template[this.fields.size()]); + TextTemplate[] markdownFields = + this.markdownFields.isEmpty() ? null : this.markdownFields.toArray(new TextTemplate[this.markdownFields.size()]); + return new Template(fallback, color, pretext, authorName, authorLink, authorIcon, title, titleLink, text, fields, imageUrl, + thumbUrl, markdownFields, actions); + } + } + } + + interface XField extends MessageElement.XField { + ParseField FALLBACK = new ParseField("fallback"); + ParseField COLOR = new ParseField("color"); + ParseField PRETEXT = new ParseField("pretext"); + ParseField AUTHOR_NAME = new ParseField("author_name"); + ParseField AUTHOR_LINK = new ParseField("author_link"); + ParseField AUTHOR_ICON = new ParseField("author_icon"); + + ParseField TITLE_LINK = new ParseField("title_link"); + ParseField FIELDS = new ParseField("fields"); + ParseField IMAGE_URL = new ParseField("image_url"); + ParseField THUMB_URL = new ParseField("thumb_url"); + + ParseField MARKDOWN_IN = new ParseField("mrkdwn_in"); + ParseField ACTIONS = new ParseField("actions"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/DynamicAttachments.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/DynamicAttachments.java new file mode 100644 index 0000000000000..4c53fc767c788 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/DynamicAttachments.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.slack.message; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class DynamicAttachments implements MessageElement { + + private String listPath; + private Attachment.Template attachment; + + public DynamicAttachments(String listPath, Attachment.Template attachment) { + this.listPath = listPath; + this.attachment = attachment; + } + + public List render(TextTemplateEngine engine, Map model, SlackMessageDefaults.AttachmentDefaults defaults) { + Object value = ObjectPath.eval(listPath, model); + if (!(value instanceof Iterable)) { + throw new IllegalArgumentException("dynamic attachment could not be resolved. expected context [" + listPath + "] to be a " + + "list, but found [" + value + "] instead"); + } + List attachments = new ArrayList<>(); + for (Object obj : (Iterable) value) { + if (!(obj instanceof Map)) { + throw new IllegalArgumentException("dynamic attachment could not be resolved. expected [" + listPath + "] list to contain" + + " key/value pairs, but found [" + obj + "] instead"); + } + Map attachmentModel = (Map) obj; + attachments.add(attachment.render(engine, attachmentModel, defaults)); + } + return attachments; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(XField.LIST_PATH.getPreferredName(), listPath) + .field(XField.TEMPLATE.getPreferredName(), attachment, params) + .endObject(); + } + + public static DynamicAttachments parse(XContentParser parser) throws IOException { + String listPath = null; + Attachment.Template template = null; + + String currentFieldName = null; + XContentParser.Token token = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (XField.LIST_PATH.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + listPath = parser.text(); + } else { + throw new ElasticsearchParseException("could not parse dynamic attachments. expected a string value for [{}] field, " + + "but found [{}]", XField.LIST_PATH.getPreferredName(), token); + } + } else if (XField.TEMPLATE.match(currentFieldName, parser.getDeprecationHandler())) { + try { + template = Attachment.Template.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse dynamic attachments. failed to parse [{}] field", pe, + XField.TEMPLATE.getPreferredName()); + } + } else { + throw new ElasticsearchParseException("could not parse dynamic attachments. unexpected field [{}]", currentFieldName); + } + } + if (listPath == null) { + throw new ElasticsearchParseException("could not parse dynamic attachments. missing required field [{}]", + XField.LIST_PATH.getPreferredName()); + } + if (template == null) { + throw new ElasticsearchParseException("could not parse dynamic attachments. missing required field [{}]", + XField.TEMPLATE.getPreferredName()); + } + return new DynamicAttachments(listPath, template); + } + + interface XField extends MessageElement.XField { + ParseField LIST_PATH = new ParseField("list_path"); + ParseField TEMPLATE = new ParseField("attachment_template"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Field.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Field.java new file mode 100644 index 0000000000000..8b29381da369c --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Field.java @@ -0,0 +1,161 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.slack.message; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +class Field implements MessageElement { + + final String title; + final String value; + final boolean isShort; + + Field(String title, String value, boolean isShort) { + this.title = title; + this.value = value; + this.isShort = isShort; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Field field = (Field) o; + + if (isShort != field.isShort) return false; + if (!title.equals(field.title)) return false; + return value.equals(field.value); + } + + @Override + public int hashCode() { + return Objects.hash(title, value, isShort); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(XField.TITLE.getPreferredName(), title) + .field(XField.VALUE.getPreferredName(), value) + .field(XField.SHORT.getPreferredName(), isShort) + .endObject(); + } + + static class Template implements ToXContentObject { + + final TextTemplate title; + final TextTemplate value; + final Boolean isShort; + + Template(TextTemplate title, TextTemplate value, Boolean isShort) { + this.title = title; + this.value = value; + this.isShort = isShort; + } + + public Field render(TextTemplateEngine engine, Map model, + SlackMessageDefaults.AttachmentDefaults.FieldDefaults defaults) { + String title = this.title != null ? engine.render(this.title, model) : defaults.title; + String value = this.value != null ? engine.render(this.value, model) : defaults.value; + Boolean isShort = this.isShort != null ? this.isShort : defaults.isShort; + return new Field(title, value, isShort); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Template template = (Template) o; + + if (isShort != template.isShort) return false; + if (!title.equals(template.title)) return false; + return value.equals(template.value); + } + + @Override + public int hashCode() { + int result = title.hashCode(); + result = 31 * result + value.hashCode(); + result = 31 * result + (isShort ? 1 : 0); + return result; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(XField.TITLE.getPreferredName(), title) + .field(XField.VALUE.getPreferredName(), value) + .field(XField.SHORT.getPreferredName(), isShort) + .endObject(); + } + + public static Template parse(XContentParser parser) throws IOException { + + TextTemplate title = null; + TextTemplate value = null; + boolean isShort = false; + + XContentParser.Token token = null; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (XField.TITLE.match(currentFieldName, parser.getDeprecationHandler())) { + try { + title = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment field. failed to parse [{}] field", pe, + XField.TITLE); + } + } else if (XField.VALUE.match(currentFieldName, parser.getDeprecationHandler())) { + try { + value = TextTemplate.parse(parser); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse message attachment field. failed to parse [{}] field", pe, + XField.VALUE); + } + } else if (XField.SHORT.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_BOOLEAN) { + isShort = parser.booleanValue(); + } else { + throw new ElasticsearchParseException("could not parse message attachment field. expected a boolean value for " + + "[{}] field, but found [{}]", XField.SHORT, token); + } + } else { + throw new ElasticsearchParseException("could not parse message attachment field. unexpected field [{}]", + currentFieldName); + } + } + + if (title == null) { + throw new ElasticsearchParseException("could not parse message attachment field. missing required [{}] field", + XField.TITLE); + } + if (value == null) { + throw new ElasticsearchParseException("could not parse message attachment field. missing required [{}] field", + XField.VALUE); + } + return new Template(title, value, isShort); + } + } + + interface XField extends MessageElement.XField { + ParseField VALUE = new ParseField("value"); + ParseField SHORT = new ParseField("short"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/MessageElement.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/MessageElement.java new file mode 100644 index 0000000000000..e800bf5802eb5 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/MessageElement.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.slack.message; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentObject; + +public interface MessageElement extends ToXContentObject { + + interface XField { + ParseField TITLE = new ParseField("title"); + ParseField TEXT = new ParseField("text"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/SlackMessage.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/SlackMessage.java new file mode 100644 index 0000000000000..9f7cd36c9106a --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/SlackMessage.java @@ -0,0 +1,410 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.slack.message; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class SlackMessage implements MessageElement { + + final String from; + final String[] to; + final String icon; + final String text; + final Attachment[] attachments; + + public SlackMessage(String from, String[] to, String icon, String text, Attachment[] attachments) { + this.from = from; + this.to = to; + this.icon = icon; + this.text = text; + this.attachments = attachments; + } + + public String getFrom() { + return from; + } + + public String[] getTo() { + return to; + } + + public String getIcon() { + return icon; + } + + public String getText() { + return text; + } + + public Attachment[] getAttachments() { + return attachments; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + SlackMessage that = (SlackMessage) o; + + if (from != null ? !from.equals(that.from) : that.from != null) return false; + if (!Arrays.equals(to, that.to)) return false; + if (icon != null ? !icon.equals(that.icon) : that.icon != null) return false; + if (text != null ? !text.equals(that.text) : that.text != null) return false; + return Arrays.equals(attachments, that.attachments); + } + + @Override + public int hashCode() { + int result = from != null ? from.hashCode() : 0; + result = 31 * result + (to != null ? Arrays.hashCode(to) : 0); + result = 31 * result + (icon != null ? icon.hashCode() : 0); + result = 31 * result + (text != null ? text.hashCode() : 0); + result = 31 * result + (attachments != null ? Arrays.hashCode(attachments) : 0); + return result; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return toXContent(builder, params, true); + } + + public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean includeTargets) throws IOException { + builder.startObject(); + if (from != null) { + builder.field(XField.FROM.getPreferredName(), from); + } + if (includeTargets) { + if (to != null) { + builder.array(XField.TO.getPreferredName(), to); + } + } + if (icon != null) { + builder.field(XField.ICON.getPreferredName(), icon); + } + if (text != null) { + builder.field(XField.TEXT.getPreferredName(), text); + } + if (attachments != null) { + builder.startArray(XField.ATTACHMENTS.getPreferredName()); + for (Attachment attachment : attachments) { + attachment.toXContent(builder, params); + } + builder.endArray(); + } + return builder.endObject(); + } + + public static class Template implements ToXContentObject { + + final TextTemplate from; + final TextTemplate[] to; + final TextTemplate text; + final TextTemplate icon; + final Attachment.Template[] attachments; + final DynamicAttachments dynamicAttachments; + + public Template(TextTemplate from, TextTemplate[] to, TextTemplate text, TextTemplate icon, Attachment.Template[] attachments, + DynamicAttachments dynamicAttachments) { + this.from = from; + this.to = to; + this.text = text; + this.icon = icon; + this.attachments = attachments; + this.dynamicAttachments = dynamicAttachments; + } + + public TextTemplate getFrom() { + return from; + } + + public TextTemplate[] getTo() { + return to; + } + + public TextTemplate getText() { + return text; + } + + public TextTemplate getIcon() { + return icon; + } + + public Attachment.Template[] getAttachments() { + return attachments; + } + + public DynamicAttachments dynamicAttachments() { + return dynamicAttachments; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Template template = (Template) o; + + return Objects.equals(from, template.from) && + Objects.equals(text, template.text) && + Objects.equals(icon, template.icon) && + Objects.equals(dynamicAttachments, template.dynamicAttachments) && + Arrays.equals(to, template.to) && + Arrays.equals(attachments, template.attachments); + } + + @Override + public int hashCode() { + return Objects.hash(from, to, text, icon, attachments, dynamicAttachments); + } + + public SlackMessage render(String watchId, String actionId, TextTemplateEngine engine, Map model, + SlackMessageDefaults defaults) { + String from = this.from != null ? engine.render(this.from, model) : + defaults.from != null ? defaults.from : watchId; + String[] to = defaults.to; + if (this.to != null) { + to = new String[this.to.length]; + for (int i = 0; i < to.length; i++) { + to[i] = engine.render(this.to[i], model); + } + } + String text = this.text != null ? engine.render(this.text, model) : defaults.text; + String icon = this.icon != null ? engine.render(this.icon, model) : defaults.icon; + List attachments = null; + if (this.attachments != null) { + attachments = new ArrayList<>(); + for (Attachment.Template attachment : this.attachments) { + attachments.add(attachment.render(engine, model, defaults.attachment)); + } + } + if (dynamicAttachments != null) { + if (attachments == null) { + attachments = new ArrayList<>(); + } + attachments.addAll(dynamicAttachments.render(engine, model, defaults.attachment)); + } + if (attachments == null) { + return new SlackMessage(from, to, icon, text, null); + } + return new SlackMessage(from, to, icon, text, attachments.toArray(new Attachment[attachments.size()])); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (from != null) { + builder.field(XField.FROM.getPreferredName(), from); + } + if (to != null) { + builder.startArray(XField.TO.getPreferredName()); + for (TextTemplate template : to) { + template.toXContent(builder, params); + } + builder.endArray(); + } + if (text != null) { + builder.field(XField.TEXT.getPreferredName(), text, params); + } + if (icon != null) { + builder.field(XField.ICON.getPreferredName(), icon, params); + } + if (attachments != null) { + builder.startArray(XField.ATTACHMENTS.getPreferredName()); + for (Attachment.Template attachment : attachments) { + attachment.toXContent(builder, params); + } + builder.endArray(); + } + if (dynamicAttachments != null) { + builder.field(XField.DYNAMIC_ATTACHMENTS.getPreferredName(), dynamicAttachments, params); + } + return builder.endObject(); + } + + public static Template parse(XContentParser parser) throws IOException { + Builder builder = new Builder(); + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (XField.FROM.match(currentFieldName, parser.getDeprecationHandler())) { + try { + builder.setFrom(TextTemplate.parse(parser)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse slack message. failed to parse [{}] field", pe, + XField.FROM.getPreferredName()); + } + } else if (XField.TO.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + builder.addTo(TextTemplate.parse(parser)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse slack message. failed to parse [{}] field.", pe, + XField.TO.getPreferredName()); + } + } + } else { + try { + builder.addTo(TextTemplate.parse(parser)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse slack message. failed to parse [{}] field", pe, + XField.TO.getPreferredName()); + } + } + } else if (XField.TEXT.match(currentFieldName, parser.getDeprecationHandler())) { + try { + builder.setText(TextTemplate.parse(parser)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse slack message. failed to parse [{}] field", pe, + XField.TEXT.getPreferredName()); + } + } else if (XField.ICON.match(currentFieldName, parser.getDeprecationHandler())) { + try { + builder.setIcon(TextTemplate.parse(parser)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse slack message. failed to parse [{}] field.", pe, + XField.ICON.getPreferredName()); + } + } else if (XField.ATTACHMENTS.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + builder.addAttachments(Attachment.Template.parse(parser)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse slack message. failed to parse [{}] field.", pe, + XField.ATTACHMENTS.getPreferredName()); + } + } + } else { + try { + builder.addAttachments(Attachment.Template.parse(parser)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse slack message. failed to parse [{}] field.", pe, + XField.ATTACHMENTS.getPreferredName()); + } + } + } else if (XField.DYNAMIC_ATTACHMENTS.match(currentFieldName, parser.getDeprecationHandler())) { + try { + builder.setDynamicAttachments(DynamicAttachments.parse(parser)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse slack message. failed to parse [{}] field.", pe, + XField.ICON.getPreferredName()); + } + } else { + throw new ElasticsearchParseException("could not parse slack message. unknown field [{}].", currentFieldName); + } + } + + return builder.build(); + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + + TextTemplate from; + final List to = new ArrayList<>(); + TextTemplate text; + TextTemplate icon; + final List attachments = new ArrayList<>(); + DynamicAttachments dynamicAttachments; + + private Builder() { + } + + public Builder setFrom(TextTemplate from) { + this.from = from; + return this; + } + + public Builder setFrom(String from) { + return setFrom(new TextTemplate(from)); + } + + public Builder addTo(TextTemplate... to) { + Collections.addAll(this.to, to); + return this; + } + + public Builder addTo(String... to) { + for (String name : to) { + this.to.add(new TextTemplate(name)); + } + return this; + } + + public Builder setText(TextTemplate text) { + this.text = text; + return this; + } + + public Builder setText(String text) { + return setText(new TextTemplate(text)); + } + + public Builder setIcon(TextTemplate icon) { + this.icon = icon; + return this; + } + + public Builder setIcon(String icon) { + return setIcon(new TextTemplate(icon)); + } + + public Builder addAttachments(Attachment.Template... attachments) { + Collections.addAll(this.attachments, attachments); + return this; + } + + public Builder addAttachments(Attachment.Template.Builder... attachments) { + for (Attachment.Template.Builder attachment : attachments) { + this.attachments.add(attachment.build()); + } + return this; + } + + public Builder setDynamicAttachments(DynamicAttachments dynamicAttachments) { + this.dynamicAttachments = dynamicAttachments; + return this; + } + + public Template build() { + TextTemplate[] to = this.to.isEmpty() ? null : this.to.toArray(new TextTemplate[this.to.size()]); + Attachment.Template[] attachments = this.attachments.isEmpty() ? null : + this.attachments.toArray(new Attachment.Template[this.attachments.size()]); + return new Template(from, to, text, icon, attachments, dynamicAttachments); + } + } + } + + interface XField extends MessageElement.XField { + ParseField FROM = new ParseField("from"); + ParseField TO = new ParseField("to"); + ParseField ICON = new ParseField("icon"); + ParseField ATTACHMENTS = new ParseField("attachments"); + ParseField DYNAMIC_ATTACHMENTS = new ParseField("dynamic_attachments"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/SlackMessageDefaults.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/SlackMessageDefaults.java new file mode 100644 index 0000000000000..b6d3e09f71608 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/SlackMessageDefaults.java @@ -0,0 +1,157 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.slack.message; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public class SlackMessageDefaults { + + private static final String FROM_SETTING = SlackMessage.XField.FROM.getPreferredName(); + private static final String TO_SETTING = SlackMessage.XField.TO.getPreferredName(); + private static final String ICON_SETTING = SlackMessage.XField.ICON.getPreferredName(); + private static final String TEXT_SETTING = SlackMessage.XField.TEXT.getPreferredName(); + private static final String ATTACHMENT_SETTING = "attachment"; + + public final String from; + public final String[] to; + public final String icon; + public final String text; + public final AttachmentDefaults attachment; + + public SlackMessageDefaults(Settings settings) { + from = settings.get(FROM_SETTING, null); + List to = settings.getAsList(TO_SETTING, null); + this.to = to == null ? null : to.toArray(Strings.EMPTY_ARRAY); + icon = settings.get(ICON_SETTING, null); + text = settings.get(TEXT_SETTING, null); + attachment = new AttachmentDefaults(settings.getAsSettings(ATTACHMENT_SETTING)); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + SlackMessageDefaults defaults = (SlackMessageDefaults) o; + + if (from != null ? !from.equals(defaults.from) : defaults.from != null) return false; + if (!Arrays.equals(to, defaults.to)) return false; + if (icon != null ? !icon.equals(defaults.icon) : defaults.icon != null) return false; + if (text != null ? !text.equals(defaults.text) : defaults.text != null) return false; + return !(attachment != null ? !attachment.equals(defaults.attachment) : defaults.attachment != null); + } + + @Override + public int hashCode() { + return Objects.hash(to, icon, text, attachment); + } + + static class AttachmentDefaults { + + static final String FALLBACK_SETTING = Attachment.XField.FALLBACK.getPreferredName(); + static final String COLOR_SETTING = Attachment.XField.COLOR.getPreferredName(); + static final String PRETEXT_SETTING = Attachment.XField.PRETEXT.getPreferredName(); + static final String AUTHOR_NAME_SETTING = Attachment.XField.AUTHOR_NAME.getPreferredName(); + static final String AUTHOR_LINK_SETTING = Attachment.XField.AUTHOR_LINK.getPreferredName(); + static final String AUTHOR_ICON_SETTING = Attachment.XField.AUTHOR_ICON.getPreferredName(); + static final String TITLE_SETTING = Attachment.XField.TITLE.getPreferredName(); + static final String TITLE_LINK_SETTING = Attachment.XField.TITLE_LINK.getPreferredName(); + static final String TEXT_SETTING = Attachment.XField.TEXT.getPreferredName(); + static final String IMAGE_URL_SETTING = Attachment.XField.IMAGE_URL.getPreferredName(); + static final String THUMB_URL_SETTING = Attachment.XField.THUMB_URL.getPreferredName(); + static final String MARKDOWN_IN_SETTING = Attachment.XField.MARKDOWN_IN.getPreferredName(); + static final String FIELD_SETTING = "field"; + + final String fallback; + final String color; + final String pretext; + final String authorName; + final String authorLink; + final String authorIcon; + final String title; + final String titleLink; + final String text; + final String imageUrl; + final String thumbUrl; + final List markdownSupportedFields; + final FieldDefaults field; + + AttachmentDefaults(Settings settings) { + fallback = settings.get(FALLBACK_SETTING, null); + color = settings.get(COLOR_SETTING, null); + pretext = settings.get(PRETEXT_SETTING, null); + authorName = settings.get(AUTHOR_NAME_SETTING, null); + authorLink = settings.get(AUTHOR_LINK_SETTING, null); + authorIcon = settings.get(AUTHOR_ICON_SETTING, null); + title = settings.get(TITLE_SETTING, null); + titleLink = settings.get(TITLE_LINK_SETTING, null); + text = settings.get(TEXT_SETTING, null); + imageUrl = settings.get(IMAGE_URL_SETTING, null); + thumbUrl = settings.get(THUMB_URL_SETTING, null); + markdownSupportedFields = settings.getAsList(MARKDOWN_IN_SETTING, null); + field = new FieldDefaults(settings.getAsSettings(FIELD_SETTING)); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + AttachmentDefaults that = (AttachmentDefaults) o; + + return Objects.equals(fallback, that.fallback) && Objects.equals(color, that.color) && + Objects.equals(pretext, that.pretext) && Objects.equals(authorName, that.authorName) && + Objects.equals(authorLink, that.authorLink) && Objects.equals(authorIcon, that.authorIcon) && + Objects.equals(title, that.title) && Objects.equals(titleLink, that.titleLink) && + Objects.equals(text, that.text) && Objects.equals(imageUrl, that.imageUrl) && + Objects.equals(thumbUrl, that.thumbUrl) && Objects.equals(field, that.field) && + Objects.equals(markdownSupportedFields, that.markdownSupportedFields); + } + + @Override + public int hashCode() { + return Objects.hash(fallback, color, pretext, authorName, authorLink, authorIcon, title, titleLink, text, imageUrl, + thumbUrl, field, markdownSupportedFields); + } + + static class FieldDefaults { + + static final String TITLE_SETTING = Field.XField.TITLE.getPreferredName(); + static final String VALUE_SETTING = Field.XField.VALUE.getPreferredName(); + static final String SHORT_SETTING = Field.XField.SHORT.getPreferredName(); + + final String title; + final String value; + final Boolean isShort; + + FieldDefaults(Settings settings) { + title = settings.get(TITLE_SETTING, null); + value = settings.get(VALUE_SETTING, null); + isShort = settings.getAsBoolean(SHORT_SETTING, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + FieldDefaults that = (FieldDefaults) o; + + return Objects.equals(title, that.title) && Objects.equals(value, that.value) && Objects.equals(isShort, that.isShort); + } + + @Override + public int hashCode() { + return Objects.hash(title, value, isShort); + } + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/WatcherRestHandler.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/WatcherRestHandler.java new file mode 100644 index 0000000000000..f6c3f2189634e --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/WatcherRestHandler.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; + +import java.io.IOException; + +public abstract class WatcherRestHandler extends BaseRestHandler { + + protected static String URI_BASE = "_xpack/watcher"; + + public WatcherRestHandler(Settings settings) { + super(settings); + } + + @Override + public final RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + return doPrepareRequest(request, new WatcherClient(client)); + } + + protected abstract RestChannelConsumer doPrepareRequest(RestRequest request, WatcherClient client) throws IOException; + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestAckWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestAckWatchAction.java new file mode 100644 index 0000000000000..b75cb9cfb7ab4 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestAckWatchAction.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.rest.action; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; +import org.elasticsearch.xpack.watcher.rest.WatcherRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; + +/** + * The rest action to ack a watch + */ +public class RestAckWatchAction extends WatcherRestHandler { + + public RestAckWatchAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(POST, URI_BASE + "/watch/{id}/_ack", this); + controller.registerHandler(PUT, URI_BASE + "/watch/{id}/_ack", this); + controller.registerHandler(POST, URI_BASE + "/watch/{id}/_ack/{actions}", this); + controller.registerHandler(PUT, URI_BASE + "/watch/{id}/_ack/{actions}", this); + } + + @Override + public String getName() { + return "xpack_watcher_ack_watch_action"; + } + + @Override + public RestChannelConsumer doPrepareRequest(RestRequest request, WatcherClient client) throws IOException { + AckWatchRequest ackWatchRequest = new AckWatchRequest(request.param("id")); + String[] actions = request.paramAsStringArray("actions", null); + if (actions != null) { + ackWatchRequest.setActionIds(actions); + } + return channel -> client.ackWatch(ackWatchRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(AckWatchResponse response, XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, builder.startObject() + .field(WatchField.STATUS.getPreferredName(), response.getStatus(), WatcherParams.HIDE_SECRETS) + .endObject()); + + } + }); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestActivateWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestActivateWatchAction.java new file mode 100644 index 0000000000000..68fc1d6d4fc54 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestActivateWatchAction.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.rest.action; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; +import org.elasticsearch.xpack.watcher.rest.WatcherRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; + +/** + * The rest action to de/activate a watch + */ +public class RestActivateWatchAction extends WatcherRestHandler { + public RestActivateWatchAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(POST, URI_BASE + "/watch/{id}/_activate", this); + controller.registerHandler(PUT, URI_BASE + "/watch/{id}/_activate", this); + final DeactivateRestHandler deactivateRestHandler = new DeactivateRestHandler(settings); + controller.registerHandler(POST, URI_BASE + "/watch/{id}/_deactivate", deactivateRestHandler); + controller.registerHandler(PUT, URI_BASE + "/watch/{id}/_deactivate", deactivateRestHandler); + } + + @Override + public String getName() { + return "xpack_watcher_activate_watch_action"; + } + + @Override + public RestChannelConsumer doPrepareRequest(RestRequest request, WatcherClient client) throws IOException { + String watchId = request.param("id"); + return channel -> + client.activateWatch(new ActivateWatchRequest(watchId, true), new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(ActivateWatchResponse response, XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, builder.startObject() + .field(WatchField.STATUS.getPreferredName(), response.getStatus(), WatcherParams.HIDE_SECRETS) + .endObject()); + } + }); + } + + private static class DeactivateRestHandler extends WatcherRestHandler { + + DeactivateRestHandler(Settings settings) { + super(settings); + } + + @Override + public String getName() { + return "xpack_watcher_deactivate_watch_action"; + } + + @Override + public RestChannelConsumer doPrepareRequest(RestRequest request, WatcherClient client) throws IOException { + String watchId = request.param("id"); + return channel -> + client.activateWatch(new ActivateWatchRequest(watchId, false), new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(ActivateWatchResponse response, XContentBuilder builder) throws Exception { + return new BytesRestResponse(RestStatus.OK, builder.startObject() + .field(WatchField.STATUS.getPreferredName(), response.getStatus(), WatcherParams.HIDE_SECRETS) + .endObject()); + } + }); + } + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestDeleteWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestDeleteWatchAction.java new file mode 100644 index 0000000000000..e41d52c1954fe --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestDeleteWatchAction.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.rest.action; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchResponse; +import org.elasticsearch.xpack.watcher.rest.WatcherRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestStatus.NOT_FOUND; +import static org.elasticsearch.rest.RestStatus.OK; + +public class RestDeleteWatchAction extends WatcherRestHandler { + public RestDeleteWatchAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(DELETE, URI_BASE + "/watch/{id}", this); + } + + @Override + public String getName() { + return "xpack_watcher_delete_watch_action"; + } + + @Override + protected RestChannelConsumer doPrepareRequest(final RestRequest request, WatcherClient client) throws IOException { + DeleteWatchRequest deleteWatchRequest = new DeleteWatchRequest(request.param("id")); + return channel -> client.deleteWatch(deleteWatchRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(DeleteWatchResponse response, XContentBuilder builder) throws Exception { + builder.startObject() + .field("_id", response.getId()) + .field("_version", response.getVersion()) + .field("found", response.isFound()) + .endObject(); + RestStatus status = response.isFound() ? OK : NOT_FOUND; + return new BytesRestResponse(status, builder); + } + }); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestExecuteWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestExecuteWatchAction.java new file mode 100644 index 0000000000000..5de47107be8dd --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestExecuteWatchAction.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.rest.action; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.rest.RestRequestFilter; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequestBuilder; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; +import org.elasticsearch.xpack.watcher.rest.WatcherRestHandler; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.xpack.watcher.rest.action.RestExecuteWatchAction.Field.IGNORE_CONDITION; +import static org.elasticsearch.xpack.watcher.rest.action.RestExecuteWatchAction.Field.RECORD_EXECUTION; + +public class RestExecuteWatchAction extends WatcherRestHandler implements RestRequestFilter { + + private static final List RESERVED_FIELD_NAMES = Arrays.asList(WatchField.TRIGGER.getPreferredName(), + WatchField.INPUT.getPreferredName(), WatchField.CONDITION.getPreferredName(), + WatchField.ACTIONS.getPreferredName(), WatchField.TRANSFORM.getPreferredName(), + WatchField.THROTTLE_PERIOD.getPreferredName(), WatchField.THROTTLE_PERIOD_HUMAN.getPreferredName(), + WatchField.METADATA.getPreferredName(), WatchField.STATUS.getPreferredName(), + WatchField.VERSION.getPreferredName()); + + public RestExecuteWatchAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(POST, URI_BASE + "/watch/{id}/_execute", this); + controller.registerHandler(PUT, URI_BASE + "/watch/{id}/_execute", this); + controller.registerHandler(POST, URI_BASE + "/watch/_execute", this); + controller.registerHandler(PUT, URI_BASE + "/watch/_execute", this); + } + + @Override + public String getName() { + return "xpack_watcher_execute_watch_action"; + } + + @Override + protected RestChannelConsumer doPrepareRequest(final RestRequest request, WatcherClient client) throws IOException { + ExecuteWatchRequest executeWatchRequest = parseRequest(request, client); + + return channel -> client.executeWatch(executeWatchRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(ExecuteWatchResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + builder.field(Field.ID.getPreferredName(), response.getRecordId()); + builder.field(Field.WATCH_RECORD.getPreferredName(), response.getRecordSource(), request); + builder.endObject(); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } + + //This tightly binds the REST API to the java API + private ExecuteWatchRequest parseRequest(RestRequest request, WatcherClient client) throws IOException { + ExecuteWatchRequestBuilder builder = client.prepareExecuteWatch(); + builder.setId(request.param("id")); + builder.setDebug(WatcherParams.debug(request)); + + if (request.hasContent() == false) { + return builder.request(); + } + + builder.setRecordExecution(request.paramAsBoolean(RECORD_EXECUTION.getPreferredName(), builder.request().isRecordExecution())); + builder.setIgnoreCondition(request.paramAsBoolean(IGNORE_CONDITION.getPreferredName(), builder.request().isIgnoreCondition())); + + try (XContentParser parser = request.contentParser()) { + parser.nextToken(); + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_BOOLEAN) { + if (IGNORE_CONDITION.match(currentFieldName, parser.getDeprecationHandler())) { + builder.setIgnoreCondition(parser.booleanValue()); + } else if (RECORD_EXECUTION.match(currentFieldName, parser.getDeprecationHandler())) { + builder.setRecordExecution(parser.booleanValue()); + } else { + throw new ElasticsearchParseException("could not parse watch execution request. unexpected boolean field [{}]", + currentFieldName); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (Field.ALTERNATIVE_INPUT.match(currentFieldName, parser.getDeprecationHandler())) { + builder.setAlternativeInput(parser.map()); + } else if (Field.TRIGGER_DATA.match(currentFieldName, parser.getDeprecationHandler())) { + builder.setTriggerData(parser.map()); + } else if (Field.WATCH.match(currentFieldName, parser.getDeprecationHandler())) { + try (XContentBuilder watcherSource = XContentBuilder.builder(parser.contentType().xContent())) { + watcherSource.generator().copyCurrentStructure(parser); + builder.setWatchSource(BytesReference.bytes(watcherSource), parser.contentType()); + } + } else if (Field.ACTION_MODES.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + try { + ActionExecutionMode mode = ActionExecutionMode.resolve(parser.textOrNull()); + builder.setActionMode(currentFieldName, mode); + } catch (IllegalArgumentException iae) { + throw new ElasticsearchParseException("could not parse watch execution request", iae); + } + } else { + throw new ElasticsearchParseException( + "could not parse watch execution request. unexpected array field [{}]", + currentFieldName); + } + } + } else { + if (RESERVED_FIELD_NAMES.contains(currentFieldName)) { + throw new ElasticsearchParseException("please wrap watch including field [{}] inside a \"watch\" field", + currentFieldName); + } else { + throw new ElasticsearchParseException("could not parse watch execution request. unexpected object field [{}]", + currentFieldName); + } + } + } else { + throw new ElasticsearchParseException("could not parse watch execution request. unexpected token [{}]", token); + } + } + } + + return builder.request(); + } + + private static final Set FILTERED_FIELDS = Collections.unmodifiableSet( + Sets.newHashSet("watch.input.http.request.auth.basic.password", + "watch.input.chain.inputs.*.http.request.auth.basic.password", + "watch.actions.*.email.attachments.*.reporting.auth.basic.password", + "watch.actions.*.webhook.auth.basic.password")); + + @Override + public Set getFilteredFields() { + return FILTERED_FIELDS; + } + + interface Field { + ParseField ID = new ParseField("_id"); + ParseField WATCH_RECORD = new ParseField("watch_record"); + + ParseField RECORD_EXECUTION = new ParseField("record_execution"); + ParseField ACTION_MODES = new ParseField("action_modes"); + ParseField ALTERNATIVE_INPUT = new ParseField("alternative_input"); + ParseField IGNORE_CONDITION = new ParseField("ignore_condition"); + ParseField TRIGGER_DATA = new ParseField("trigger_data"); + ParseField WATCH = new ParseField("watch"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestGetWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestGetWatchAction.java new file mode 100644 index 0000000000000..8c80ef060a8fc --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestGetWatchAction.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.rest.action; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; +import org.elasticsearch.xpack.watcher.rest.WatcherRestHandler; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestStatus.NOT_FOUND; +import static org.elasticsearch.rest.RestStatus.OK; + +public class RestGetWatchAction extends WatcherRestHandler { + public RestGetWatchAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, URI_BASE + "/watch/{id}", this); + } + + @Override + public String getName() { + return "xpack_watcher_get_watch_action"; + } + + @Override + protected RestChannelConsumer doPrepareRequest(final RestRequest request, WatcherClient client) { + final GetWatchRequest getWatchRequest = new GetWatchRequest(request.param("id")); + return channel -> client.getWatch(getWatchRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(GetWatchResponse response, XContentBuilder builder) throws Exception { + builder.startObject() + .field("found", response.isFound()) + .field("_id", response.getId()); + if (response.isFound()) { + builder.field("_version", response.getVersion()); + ToXContent.MapParams xContentParams = new ToXContent.MapParams(request.params()); + builder.field("status", response.getStatus(), xContentParams); + builder.field("watch", response.getSource(), xContentParams); + } + builder.endObject(); + + RestStatus status = response.isFound() ? OK : NOT_FOUND; + return new BytesRestResponse(status, builder); + } + }); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestPutWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestPutWatchAction.java new file mode 100644 index 0000000000000..0386ca47c3498 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestPutWatchAction.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.rest.action; + +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.xpack.core.security.rest.RestRequestFilter; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.watcher.rest.WatcherRestHandler; + +import java.io.IOException; +import java.util.Collections; +import java.util.Set; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestStatus.CREATED; +import static org.elasticsearch.rest.RestStatus.OK; + +public class RestPutWatchAction extends WatcherRestHandler implements RestRequestFilter { + + public RestPutWatchAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(POST, URI_BASE + "/watch/{id}", this); + controller.registerHandler(PUT, URI_BASE + "/watch/{id}", this); + } + + @Override + public String getName() { + return "xpack_watcher_put_watch_action"; + } + + @Override + protected RestChannelConsumer doPrepareRequest(final RestRequest request, WatcherClient client) throws IOException { + PutWatchRequest putWatchRequest = + new PutWatchRequest(request.param("id"), request.content(), request.getXContentType()); + putWatchRequest.setVersion(request.paramAsLong("version", Versions.MATCH_ANY)); + putWatchRequest.setActive(request.paramAsBoolean("active", putWatchRequest.isActive())); + return channel -> client.putWatch(putWatchRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(PutWatchResponse response, XContentBuilder builder) throws Exception { + builder.startObject() + .field("_id", response.getId()) + .field("_version", response.getVersion()) + .field("created", response.isCreated()) + .endObject(); + RestStatus status = response.isCreated() ? CREATED : OK; + return new BytesRestResponse(status, builder); + } + }); + } + + private static final Set FILTERED_FIELDS = Collections.unmodifiableSet( + Sets.newHashSet("input.http.request.auth.basic.password", "input.chain.inputs.*.http.request.auth.basic.password", + "actions.*.email.attachments.*.reporting.auth.basic.password", "actions.*.webhook.auth.basic.password")); + + @Override + public Set getFilteredFields() { + return FILTERED_FIELDS; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatchServiceAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatchServiceAction.java new file mode 100644 index 0000000000000..745384916ba89 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatchServiceAction.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.rest.action; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceRequest; +import org.elasticsearch.xpack.watcher.rest.WatcherRestHandler; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestWatchServiceAction extends WatcherRestHandler { + + public RestWatchServiceAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(POST, URI_BASE + "/_start", this); + controller.registerHandler(POST, URI_BASE + "/_stop", new StopRestHandler(settings)); + } + + @Override + public String getName() { + return "xpack_watcher_start_service_action"; + } + + @Override + public RestChannelConsumer doPrepareRequest(RestRequest request, WatcherClient client) { + return channel -> client.watcherService(new WatcherServiceRequest().start(), new RestToXContentListener<>(channel)); + } + + private static class StopRestHandler extends WatcherRestHandler { + + StopRestHandler(Settings settings) { + super(settings); + } + + @Override + public String getName() { + return "xpack_watcher_stop_service_action"; + } + + @Override + public RestChannelConsumer doPrepareRequest(RestRequest request, WatcherClient client) { + return channel -> client.watcherService(new WatcherServiceRequest().stop(), new RestToXContentListener<>(channel)); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatcherStatsAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatcherStatsAction.java new file mode 100644 index 0000000000000..90c756c1323be --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatcherStatsAction.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.rest.action; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestActions; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsRequest; +import org.elasticsearch.xpack.watcher.rest.WatcherRestHandler; + +import java.io.IOException; +import java.util.Collections; +import java.util.Set; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestWatcherStatsAction extends WatcherRestHandler { + public RestWatcherStatsAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, URI_BASE + "/stats", this); + controller.registerHandler(GET, URI_BASE + "/stats/{metric}", this); + } + + @Override + public String getName() { + return "xpack_watcher_stats_action"; + } + + @Override + protected RestChannelConsumer doPrepareRequest(final RestRequest restRequest, WatcherClient client) throws IOException { + Set metrics = Strings.tokenizeByCommaToSet(restRequest.param("metric", "")); + + WatcherStatsRequest request = new WatcherStatsRequest(); + if (metrics.contains("_all")) { + request.includeCurrentWatches(true); + request.includeQueuedWatches(true); + } else { + request.includeCurrentWatches(metrics.contains("queued_watches")); + request.includeQueuedWatches(metrics.contains("pending_watches")); + } + + + return channel -> client.watcherStats(request, new RestActions.NodesResponseRestListener<>(channel)); + } + + private static final Set RESPONSE_PARAMS = Collections.singleton("emit_stacktraces"); + + @Override + protected Set responseParams() { + // this parameter is only needed when current watches are supposed to be returned + // it's used in the WatchExecutionContext.toXContent() method + return RESPONSE_PARAMS; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/ArrayObjectIterator.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/ArrayObjectIterator.java new file mode 100644 index 0000000000000..6401d49ca1aa8 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/ArrayObjectIterator.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support; + +import java.lang.reflect.Array; +import java.util.Iterator; + +public class ArrayObjectIterator implements Iterator { + + private final Object array; + private final int length; + private int index; + + public ArrayObjectIterator(Object array) { + this.array = array; + this.length = Array.getLength(array); + this.index = 0; + } + + @Override + public boolean hasNext() { + return index < length; + } + + @Override + public Object next() { + return Array.get(array, index++); + } + + @Override + public void remove() { + throw new UnsupportedOperationException("array iterator does not support removing elements"); + } + + public static class Iterable implements java.lang.Iterable { + + private Object array; + + public Iterable(Object array) { + this.array = array; + } + + @Override + public Iterator iterator() { + return new ArrayObjectIterator(array); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/Strings.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/Strings.java new file mode 100644 index 0000000000000..b55ae3298a1c0 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/Strings.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support; + +import java.util.Objects; + +public class Strings { + private Strings() { + } + + public static String join(String delimiter, int... values) { + Objects.requireNonNull(delimiter); + Objects.requireNonNull(values); + if (values.length == 0) { + return ""; + } + StringBuilder sb = new StringBuilder(4 * values.length); + sb.append(values[0]); + + for (int i = 1; i < values.length; i++) { + sb.append(delimiter).append(values[i]); + } + + return sb.toString(); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/Variables.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/Variables.java new file mode 100644 index 0000000000000..858f6707f29a9 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/Variables.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support; + +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.util.HashMap; +import java.util.Map; + +public final class Variables { + + public static final String CTX = "ctx"; + public static final String ID = "id"; + public static final String WATCH_ID = "watch_id"; + public static final String EXECUTION_TIME = "execution_time"; + public static final String TRIGGER = "trigger"; + public static final String PAYLOAD = "payload"; + public static final String METADATA = "metadata"; + public static final String VARS = "vars"; + + public static Map createCtxModel(WatchExecutionContext ctx, Payload payload) { + Map ctxModel = new HashMap<>(); + ctxModel.put(ID, ctx.id().value()); + ctxModel.put(WATCH_ID, ctx.id().watchId()); + ctxModel.put(EXECUTION_TIME, ctx.executionTime()); + ctxModel.put(TRIGGER, ctx.triggerEvent().data()); + if (payload != null) { + ctxModel.put(PAYLOAD, payload.data()); + } + ctxModel.put(METADATA, ctx.watch().metadata()); + ctxModel.put(VARS, ctx.vars()); + Map model = new HashMap<>(); + model.put(CTX, ctxModel); + return model; + } + + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java new file mode 100644 index 0000000000000..6e1fb85928431 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistry.java @@ -0,0 +1,160 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.elasticsearch.xpack.core.template.TemplateUtils; + +import java.nio.charset.StandardCharsets; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.regex.Pattern; + +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +public class WatcherIndexTemplateRegistry extends AbstractComponent implements ClusterStateListener { + + public static final TemplateConfig TEMPLATE_CONFIG_TRIGGERED_WATCHES = new TemplateConfig( + WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME, "triggered-watches"); + public static final TemplateConfig TEMPLATE_CONFIG_WATCH_HISTORY = new TemplateConfig( + WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME, "watch-history"); + public static final TemplateConfig TEMPLATE_CONFIG_WATCHES = new TemplateConfig( + WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME, "watches"); + public static final TemplateConfig[] TEMPLATE_CONFIGS = new TemplateConfig[]{ + TEMPLATE_CONFIG_TRIGGERED_WATCHES, TEMPLATE_CONFIG_WATCH_HISTORY, TEMPLATE_CONFIG_WATCHES + }; + + private final Client client; + private final ThreadPool threadPool; + private final TemplateConfig[] indexTemplates; + private final ConcurrentMap templateCreationsInProgress = new ConcurrentHashMap<>(); + + public WatcherIndexTemplateRegistry(Settings settings, ClusterService clusterService, ThreadPool threadPool, Client client) { + super(settings); + this.client = client; + this.threadPool = threadPool; + this.indexTemplates = TEMPLATE_CONFIGS; + clusterService.addListener(this); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + ClusterState state = event.state(); + if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + // wait until the gateway has recovered from disk, otherwise we think may not have the index templates, + // while they actually do exist + return; + } + + // no master node, exit immediately + DiscoveryNode masterNode = event.state().getNodes().getMasterNode(); + if (masterNode == null) { + return; + } + + // if this node is newer than the master node, we probably need to add the history template, which might be newer than the + // history template the master node has, so we need potentially add new templates despite being not the master node + DiscoveryNode localNode = event.state().getNodes().getLocalNode(); + boolean localNodeVersionAfterMaster = localNode.getVersion().after(masterNode.getVersion()); + + if (event.localNodeMaster() || localNodeVersionAfterMaster) { + addTemplatesIfMissing(state); + } + } + + private void addTemplatesIfMissing(ClusterState state) { + for (TemplateConfig template : indexTemplates) { + final String templateName = template.getTemplateName(); + final AtomicBoolean creationCheck = templateCreationsInProgress.computeIfAbsent(templateName, key -> new AtomicBoolean(false)); + if (creationCheck.compareAndSet(false, true)) { + if (!state.metaData().getTemplates().containsKey(templateName)) { + logger.debug("adding index template [{}], because it doesn't exist", templateName); + putTemplate(template, creationCheck); + } else { + creationCheck.set(false); + logger.trace("not adding index template [{}], because it already exists", templateName); + } + } + } + } + + private void putTemplate(final TemplateConfig config, final AtomicBoolean creationCheck) { + final Executor executor = threadPool.generic(); + executor.execute(() -> { + final String templateName = config.getTemplateName(); + + PutIndexTemplateRequest request = new PutIndexTemplateRequest(templateName).source(config.load(), XContentType.JSON); + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, request, + new ActionListener() { + @Override + public void onResponse(PutIndexTemplateResponse response) { + creationCheck.set(false); + if (response.isAcknowledged() == false) { + logger.error("Error adding watcher template [{}], request was not acknowledged", templateName); + } + } + + @Override + public void onFailure(Exception e) { + creationCheck.set(false); + logger.error(new ParameterizedMessage("Error adding watcher template [{}]", templateName), e); + } + }, client.admin().indices()::putTemplate); + }); + } + + public static boolean validate(ClusterState state) { + return state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME) && + state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME) && + state.getMetaData().getTemplates().containsKey(WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME); + } + + public static class TemplateConfig { + + private final String templateName; + private String fileName; + + TemplateConfig(String templateName, String fileName) { + this.templateName = templateName; + this.fileName = fileName; + } + + public String getFileName() { + return fileName; + } + + public String getTemplateName() { + return templateName; + } + + public byte[] load() { + String template = TemplateUtils.loadTemplate("/" + fileName + ".json", WatcherIndexTemplateRegistryField.INDEX_TEMPLATE_VERSION, + Pattern.quote("${xpack.watcher.template.version}")); + assert template != null && template.length() > 0; + return template.getBytes(StandardCharsets.UTF_8); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/XContentFilterKeysUtils.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/XContentFilterKeysUtils.java new file mode 100644 index 0000000000000..ef689fe469e25 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/XContentFilterKeysUtils.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support; + +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.XContentParser.Token.END_ARRAY; +import static org.elasticsearch.common.xcontent.XContentParser.Token.END_OBJECT; +import static org.elasticsearch.common.xcontent.XContentParser.Token.START_OBJECT; + +public final class XContentFilterKeysUtils { + + private XContentFilterKeysUtils() { + } + + public static Map filterMapOrdered(Set keys, XContentParser parser) throws IOException { + try { + if (parser.currentToken() != null) { + throw new IllegalArgumentException("Parser already started"); + } + if (parser.nextToken() != START_OBJECT) { + throw new IllegalArgumentException("Content should start with START_OBJECT"); + } + State state = new State(new ArrayList<>(keys)); + return parse(parser, state); + } catch (IOException e) { + throw new IOException("could not build a filtered payload out of xcontent", e); + } + } + + private static Map parse(XContentParser parser, State state) throws IOException { + return parse(parser, state, true); + } + + private static Map parse(XContentParser parser, State state, boolean isOutsideOfArray) throws IOException { + if (state.includeLeaf) { + return parser.map(); + } + + Map data = new HashMap<>(); + for (XContentParser.Token token = parser.nextToken(); token != END_OBJECT; token = parser.nextToken()) { + switch (token) { + case FIELD_NAME: + state.nextField(parser.currentName()); + break; + case START_OBJECT: + if (state.includeKey) { + String fieldName = state.currentFieldName(); + Map nestedData = parse(parser, state, isOutsideOfArray); + data.put(fieldName, nestedData); + } else { + parser.skipChildren(); + } + if (isOutsideOfArray) { + state.previousField(); + } + break; + case START_ARRAY: + if (state.includeKey) { + String fieldName = state.currentFieldName(); + List arrayData = arrayParsing(parser, state); + data.put(fieldName, arrayData); + } else { + parser.skipChildren(); + } + state.previousField(); + break; + case VALUE_STRING: + if (state.includeKey) { + data.put(state.currentFieldName(), parser.text()); + } + if (isOutsideOfArray) { + state.previousField(); + } + break; + case VALUE_NUMBER: + if (state.includeKey) { + data.put(state.currentFieldName(), parser.numberValue()); + } + if (isOutsideOfArray) { + state.previousField(); + } + break; + case VALUE_BOOLEAN: + if (state.includeKey) { + data.put(state.currentFieldName(), parser.booleanValue()); + } + if (isOutsideOfArray) { + state.previousField(); + } + break; + } + } + return data; + } + + private static List arrayParsing(XContentParser parser, State state) throws IOException { + List values = new ArrayList<>(); + for (XContentParser.Token token = parser.nextToken(); token != END_ARRAY; token = parser.nextToken()) { + switch (token) { + case START_OBJECT: + values.add(parse(parser, state, false)); + break; + case VALUE_STRING: + values.add(parser.text()); + break; + case VALUE_NUMBER: + values.add(parser.numberValue()); + break; + case VALUE_BOOLEAN: + values.add(parser.booleanValue()); + break; + } + } + return values; + } + + private static final class State { + + final List extractPaths; + StringBuilder currentPath = new StringBuilder(); + + boolean includeLeaf; + boolean includeKey; + String currentFieldName; + + private State(List extractPaths) { + this.extractPaths = extractPaths; + } + + void nextField(String fieldName) { + currentFieldName = fieldName; + if (currentPath.length() != 0) { + currentPath.append('.'); + } + currentPath = currentPath.append(fieldName); + final String path = currentPath.toString(); + for (String extractPath : extractPaths) { + if (path.equals(extractPath)) { + includeKey = true; + includeLeaf = true; + return; + } else if (extractPath.startsWith(path)) { + includeKey = true; + return; + } + } + includeKey = false; + includeLeaf = false; + } + + String currentFieldName() { + return currentFieldName; + } + + void previousField() { + int start = currentPath.lastIndexOf(currentFieldName); + currentPath = currentPath.delete(start, currentPath.length()); + if (currentPath.length() > 0 && currentPath.charAt(currentPath.length() - 1) == '.') { + currentPath = currentPath.deleteCharAt(currentPath.length() - 1); + } + currentFieldName = currentPath.toString(); + includeKey = false; + includeLeaf = false; + } + + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequest.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequest.java new file mode 100644 index 0000000000000..e69d6d8681f09 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequest.java @@ -0,0 +1,314 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support.search; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +/** + * A {@link WatcherSearchTemplateRequest} contains the search request and the eventual template that will + * be rendered as a script by {@link WatcherSearchTemplateService} before being executed. + */ +public class WatcherSearchTemplateRequest implements ToXContentObject { + + private final String[] indices; + private final String[] types; + private final SearchType searchType; + private final IndicesOptions indicesOptions; + private final Script template; + + private final BytesReference searchSource; + + public WatcherSearchTemplateRequest(String[] indices, String[] types, SearchType searchType, IndicesOptions indicesOptions, + BytesReference searchSource) { + this.indices = indices; + this.types = types; + this.searchType = searchType; + this.indicesOptions = indicesOptions; + // Here we convert a watch search request body into an inline search template, + // this way if any Watcher related context variables are used, they will get resolved. + this.template = new Script(ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, searchSource.utf8ToString(), Collections.emptyMap()); + this.searchSource = BytesArray.EMPTY; + } + + public WatcherSearchTemplateRequest(String[] indices, String[] types, SearchType searchType, IndicesOptions indicesOptions, + Script template) { + this.indices = indices; + this.types = types; + this.searchType = searchType; + this.indicesOptions = indicesOptions; + this.template = template; + this.searchSource = BytesArray.EMPTY; + } + + public WatcherSearchTemplateRequest(WatcherSearchTemplateRequest original, BytesReference source) { + this.indices = original.indices; + this.types = original.types; + this.searchType = original.searchType; + this.indicesOptions = original.indicesOptions; + this.searchSource = source; + this.template = original.template; + } + + private WatcherSearchTemplateRequest(String[] indices, String[] types, SearchType searchType, IndicesOptions indicesOptions, + BytesReference searchSource, Script template) { + this.indices = indices; + this.types = types; + this.searchType = searchType; + this.indicesOptions = indicesOptions; + this.template = template; + this.searchSource = searchSource; + } + + @Nullable + public Script getTemplate() { + return template; + } + + public String[] getIndices() { + return indices; + } + + public String[] getTypes() { + return types; + } + + public SearchType getSearchType() { + return searchType; + } + + public IndicesOptions getIndicesOptions() { + return indicesOptions; + } + + public BytesReference getSearchSource() { + return searchSource; + } + + public Script getOrCreateTemplate() { + if (template != null) { + return template; + } else { + return new Script(ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, searchSource.utf8ToString(), Collections.emptyMap()); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (searchType != null) { + builder.field(SEARCH_TYPE_FIELD.getPreferredName(), searchType.toString().toLowerCase(Locale.ENGLISH)); + } + if (indices != null) { + builder.array(INDICES_FIELD.getPreferredName(), indices); + } + if (types != null) { + builder.array(TYPES_FIELD.getPreferredName(), types); + } + if (searchSource != null && searchSource.length() > 0) { + try (InputStream stream = searchSource.streamInput()) { + builder.rawField(BODY_FIELD.getPreferredName(), stream); + } + } + if (indicesOptions != DEFAULT_INDICES_OPTIONS) { + builder.startObject(INDICES_OPTIONS_FIELD.getPreferredName()); + String value; + if (indicesOptions.expandWildcardsClosed() && indicesOptions.expandWildcardsOpen()) { + value = "all"; + } else if (indicesOptions.expandWildcardsOpen()) { + value = "open"; + } else if (indicesOptions.expandWildcardsClosed()) { + value = "closed"; + } else { + value = "none"; + } + builder.field(EXPAND_WILDCARDS_FIELD.getPreferredName(), value); + builder.field(IGNORE_UNAVAILABLE_FIELD.getPreferredName(), indicesOptions.ignoreUnavailable()); + builder.field(ALLOW_NO_INDICES_FIELD.getPreferredName(), indicesOptions.allowNoIndices()); + builder.endObject(); + } + if (template != null) { + builder.field(TEMPLATE_FIELD.getPreferredName(), template); + } + return builder.endObject(); + } + + + /** + * Reads a new watcher search request instance for the specified parser. + */ + public static WatcherSearchTemplateRequest fromXContent(XContentParser parser, SearchType searchType) throws IOException { + List indices = new ArrayList<>(); + List types = new ArrayList<>(); + IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; + BytesReference searchSource = null; + Script template = null; + + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_ARRAY) { + if (INDICES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + indices.add(parser.textOrNull()); + } else { + throw new ElasticsearchParseException("could not read search request. expected string values in [" + + currentFieldName + "] field, but instead found [" + token + "]"); + } + } + } else if (TYPES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_STRING) { + types.add(parser.textOrNull()); + } else { + throw new ElasticsearchParseException("could not read search request. expected string values in [" + + currentFieldName + "] field, but instead found [" + token + "]"); + } + } + } else { + throw new ElasticsearchParseException("could not read search request. unexpected array field [" + + currentFieldName + "]"); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (BODY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + builder.copyCurrentStructure(parser); + searchSource = BytesReference.bytes(builder); + } + } else if (INDICES_OPTIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + boolean expandOpen = DEFAULT_INDICES_OPTIONS.expandWildcardsOpen(); + boolean expandClosed = DEFAULT_INDICES_OPTIONS.expandWildcardsClosed(); + boolean allowNoIndices = DEFAULT_INDICES_OPTIONS.allowNoIndices(); + boolean ignoreUnavailable = DEFAULT_INDICES_OPTIONS.ignoreUnavailable(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (EXPAND_WILDCARDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + switch (parser.text()) { + case "all": + expandOpen = true; + expandClosed = true; + break; + case "open": + expandOpen = true; + expandClosed = false; + break; + case "closed": + expandOpen = false; + expandClosed = true; + break; + case "none": + expandOpen = false; + expandClosed = false; + break; + default: + throw new ElasticsearchParseException("could not read search request. unknown value [" + + parser.text() + "] for [" + currentFieldName + "] field "); + } + } else if (IGNORE_UNAVAILABLE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + ignoreUnavailable = parser.booleanValue(); + } else if (ALLOW_NO_INDICES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + allowNoIndices = parser.booleanValue(); + } else { + throw new ElasticsearchParseException("could not read search request. unexpected index option [" + + currentFieldName + "]"); + } + } else { + throw new ElasticsearchParseException("could not read search request. unexpected object field [" + + currentFieldName + "]"); + } + } + indicesOptions = IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandOpen, expandClosed, + DEFAULT_INDICES_OPTIONS); + } else if (TEMPLATE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + template = Script.parse(parser, Script.DEFAULT_TEMPLATE_LANG); + } else { + throw new ElasticsearchParseException("could not read search request. unexpected object field [" + + currentFieldName + "]"); + } + } else if (token == XContentParser.Token.VALUE_STRING) { + if (INDICES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + String indicesStr = parser.text(); + indices.addAll(Arrays.asList(Strings.delimitedListToStringArray(indicesStr, ",", " \t"))); + } else if (TYPES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + String typesStr = parser.text(); + types.addAll(Arrays.asList(Strings.delimitedListToStringArray(typesStr, ",", " \t"))); + } else if (SEARCH_TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + searchType = SearchType.fromString(parser.text().toLowerCase(Locale.ROOT)); + } else { + throw new ElasticsearchParseException("could not read search request. unexpected string field [" + + currentFieldName + "]"); + } + } else { + throw new ElasticsearchParseException("could not read search request. unexpected token [" + token + "]"); + } + } + + if (searchSource == null) { + searchSource = BytesArray.EMPTY; + } + + return new WatcherSearchTemplateRequest(indices.toArray(new String[0]), types.toArray(new String[0]), searchType, + indicesOptions, searchSource, template); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + WatcherSearchTemplateRequest other = (WatcherSearchTemplateRequest) o; + return Arrays.equals(indices, other.indices) && + Arrays.equals(types, other.types) && + Objects.equals(searchType, other.searchType) && + Objects.equals(indicesOptions, other.indicesOptions) && + Objects.equals(searchSource, other.searchSource) && + Objects.equals(template, other.template); + + } + + @Override + public int hashCode() { + return Objects.hash(indices, types, searchType, indicesOptions, searchSource, template); + } + + private static final ParseField INDICES_FIELD = new ParseField("indices"); + private static final ParseField TYPES_FIELD = new ParseField("types"); + private static final ParseField BODY_FIELD = new ParseField("body"); + private static final ParseField SEARCH_TYPE_FIELD = new ParseField("search_type"); + private static final ParseField INDICES_OPTIONS_FIELD = new ParseField("indices_options"); + private static final ParseField EXPAND_WILDCARDS_FIELD = new ParseField("expand_wildcards"); + private static final ParseField IGNORE_UNAVAILABLE_FIELD = new ParseField("ignore_unavailable"); + private static final ParseField ALLOW_NO_INDICES_FIELD = new ParseField("allow_no_indices"); + private static final ParseField TEMPLATE_FIELD = new ParseField("template"); + + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.lenientExpandOpen(); +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java new file mode 100644 index 0000000000000..9df4f5f8b5234 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support.search; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.TemplateScript; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.Watcher; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Map; + +/** + * {@link WatcherSearchTemplateService} renders {@link WatcherSearchTemplateRequest} before their execution. + */ +public class WatcherSearchTemplateService extends AbstractComponent { + + private final ScriptService scriptService; + private final NamedXContentRegistry xContentRegistry; + + public WatcherSearchTemplateService(Settings settings, ScriptService scriptService, NamedXContentRegistry xContentRegistry) { + super(settings); + this.scriptService = scriptService; + this.xContentRegistry = xContentRegistry; + } + + public String renderTemplate(Script source, WatchExecutionContext ctx, Payload payload) throws IOException { + // Due the inconsistency with templates in ES 1.x, we maintain our own template format. + // This template format we use now, will become the template structure in ES 2.0 + Map watcherContextParams = Variables.createCtxModel(ctx, payload); + // Here we convert watcher template into a ES core templates. Due to the different format we use, we + // convert to the template format used in ES core + if (source.getParams() != null) { + watcherContextParams.putAll(source.getParams()); + } + // Templates are always of lang mustache: + Script template = new Script(source.getType(), source.getType() == ScriptType.STORED ? null : "mustache", + source.getIdOrCode(), source.getOptions(), watcherContextParams); + TemplateScript.Factory compiledTemplate = scriptService.compile(template, Watcher.SCRIPT_TEMPLATE_CONTEXT); + return compiledTemplate.newInstance(template.getParams()).execute(); + } + + public SearchRequest toSearchRequest(WatcherSearchTemplateRequest request) throws IOException { + SearchRequest searchRequest = new SearchRequest(request.getIndices()); + searchRequest.types(request.getTypes()); + searchRequest.searchType(request.getSearchType()); + searchRequest.indicesOptions(request.getIndicesOptions()); + SearchSourceBuilder sourceBuilder = SearchSourceBuilder.searchSource(); + BytesReference source = request.getSearchSource(); + if (source != null && source.length() > 0) { + try (InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(source)) + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream)) { + sourceBuilder.parseXContent(parser); + searchRequest.source(sourceBuilder); + } + } + return searchRequest; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/TransformBuilders.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/TransformBuilders.java new file mode 100644 index 0000000000000..29df7c787d740 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/TransformBuilders.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transform; + +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.transform.chain.ChainTransform; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.elasticsearch.xpack.watcher.transform.script.ScriptTransform; +import org.elasticsearch.xpack.watcher.transform.search.SearchTransform; + +import static java.util.Collections.emptyMap; + +public final class TransformBuilders { + + private TransformBuilders() { + } + + public static SearchTransform.Builder searchTransform(WatcherSearchTemplateRequest request) { + return SearchTransform.builder(request); + } + + public static ScriptTransform.Builder scriptTransform(String script) { + return scriptTransform(new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, script, emptyMap())); + } + + public static ScriptTransform.Builder scriptTransform(Script script) { + return ScriptTransform.builder(script); + } + + public static ChainTransform.Builder chainTransform(Transform.Builder... transforms) { + return ChainTransform.builder().add(transforms); + } + + public static ChainTransform.Builder chainTransform(Transform... transforms) { + return ChainTransform.builder(transforms); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/ExecutableScriptTransform.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/ExecutableScriptTransform.java new file mode 100644 index 0000000000000..e2b1cf882cc47 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/ExecutableScriptTransform.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transform.script; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.Watcher; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.watcher.support.Variables.createCtxModel; +import static org.elasticsearch.xpack.watcher.transform.script.ScriptTransform.TYPE; + +public class ExecutableScriptTransform extends ExecutableTransform { + + private final ScriptService scriptService; + + public ExecutableScriptTransform(ScriptTransform transform, Logger logger, ScriptService scriptService) { + super(transform, logger); + this.scriptService = scriptService; + Script script = transform.getScript(); + // try to compile so we catch syntax errors early + scriptService.compile(script, Watcher.SCRIPT_EXECUTABLE_CONTEXT); + } + + @Override + public ScriptTransform.Result execute(WatchExecutionContext ctx, Payload payload) { + try { + return doExecute(ctx, payload); + } catch (Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("failed to execute [{}] transform for [{}]", TYPE, ctx.id()), e); + return new ScriptTransform.Result(e); + } + } + + ScriptTransform.Result doExecute(WatchExecutionContext ctx, Payload payload) throws IOException { + Script script = transform.getScript(); + Map model = new HashMap<>(); + if (script.getParams() != null) { + model.putAll(script.getParams()); + } + model.putAll(createCtxModel(ctx, payload)); + ExecutableScript.Factory factory = scriptService.compile(script, Watcher.SCRIPT_EXECUTABLE_CONTEXT); + ExecutableScript executable = factory.newInstance(model); + Object value = executable.run(); + // TODO: deprecate one of these styles (returning a map or returning an opaque value below) + if (value instanceof Map) { + return new ScriptTransform.Result(new Payload.Simple((Map) value)); + } + Map data = new HashMap<>(); + data.put("_value", value); + return new ScriptTransform.Result(new Payload.Simple(data)); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransform.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransform.java new file mode 100644 index 0000000000000..0c0f9c34bdc02 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransform.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transform.script; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.Script; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; + +public class ScriptTransform implements Transform { + + public static final String TYPE = "script"; + + private final Script script; + + public ScriptTransform(Script script) { + this.script = script; + } + + @Override + public String type() { + return TYPE; + } + + public Script getScript() { + return script; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ScriptTransform that = (ScriptTransform) o; + + return script.equals(that.script); + } + + @Override + public int hashCode() { + return script.hashCode(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return script.toXContent(builder, params); + } + + public static ScriptTransform parse(String watchId, XContentParser parser) throws IOException { + try { + Script script = Script.parse(parser); + return new ScriptTransform(script); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] transform for watch [{}]. failed to parse script", pe, TYPE, + watchId); + } + } + + public static Builder builder(Script script) { + return new Builder(script); + } + + public static class Result extends Transform.Result { + + public Result(Payload payload) { + super(TYPE, payload); + } + + public Result(Exception e) { + super(TYPE, e); + } + + @Override + protected XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + } + + public static class Builder implements Transform.Builder { + + private final Script script; + + public Builder(Script script) { + this.script = script; + } + + @Override + public ScriptTransform build() { + return new ScriptTransform(script); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransformFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransformFactory.java new file mode 100644 index 0000000000000..c675c00d0b9de --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransformFactory.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transform.script; + +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.xpack.core.watcher.transform.TransformFactory; + +import java.io.IOException; + +public class ScriptTransformFactory extends TransformFactory { + + private final ScriptService scriptService; + + public ScriptTransformFactory(Settings settings, ScriptService scriptService) { + super(Loggers.getLogger(ExecutableScriptTransform.class, settings)); + this.scriptService = scriptService; + } + + @Override + public String type() { + return ScriptTransform.TYPE; + } + + @Override + public ScriptTransform parseTransform(String watchId, XContentParser parser) throws IOException { + return ScriptTransform.parse(watchId, parser); + } + + @Override + public ExecutableScriptTransform createExecutable(ScriptTransform transform) { + return new ExecutableScriptTransform(transform, transformLogger, scriptService); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/ExecutableSearchTransform.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/ExecutableSearchTransform.java new file mode 100644 index 0000000000000..03dbf88fb0d80 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/ExecutableSearchTransform.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transform.search; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.script.Script; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.WatcherClientHelper; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateService; + +import static org.elasticsearch.xpack.watcher.transform.search.SearchTransform.TYPE; + +public class ExecutableSearchTransform extends ExecutableTransform { + + static final SearchType DEFAULT_SEARCH_TYPE = SearchType.QUERY_THEN_FETCH; + + private final Client client; + private final WatcherSearchTemplateService searchTemplateService; + private final TimeValue timeout; + + public ExecutableSearchTransform(SearchTransform transform, Logger logger, Client client, + WatcherSearchTemplateService searchTemplateService, TimeValue defaultTimeout) { + super(transform, logger); + this.client = client; + this.searchTemplateService = searchTemplateService; + this.timeout = transform.getTimeout() != null ? transform.getTimeout() : defaultTimeout; + } + + @Override + public SearchTransform.Result execute(WatchExecutionContext ctx, Payload payload) { + WatcherSearchTemplateRequest request = null; + try { + Script template = transform.getRequest().getOrCreateTemplate(); + String renderedTemplate = searchTemplateService.renderTemplate(template, ctx, payload); + // We need to make a copy, so that we don't modify the original instance that we keep around in a watch: + request = new WatcherSearchTemplateRequest(transform.getRequest(), new BytesArray(renderedTemplate)); + SearchRequest searchRequest = searchTemplateService.toSearchRequest(request); + SearchResponse resp = WatcherClientHelper.execute(ctx.watch(), client, () -> client.search(searchRequest).actionGet(timeout)); + return new SearchTransform.Result(request, new Payload.XContent(resp)); + } catch (Exception e) { + logger.error((Supplier) () -> new ParameterizedMessage("failed to execute [{}] transform for [{}]", TYPE, ctx.id()), e); + return new SearchTransform.Result(request, e); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/SearchTransform.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/SearchTransform.java new file mode 100644 index 0000000000000..b7d57a67c6a06 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/SearchTransform.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transform.search; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.joda.time.DateTimeZone; + +import java.io.IOException; + +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; + +public class SearchTransform implements Transform { + + public static final String TYPE = "search"; + + private final WatcherSearchTemplateRequest request; + @Nullable private final TimeValue timeout; + @Nullable private final DateTimeZone dynamicNameTimeZone; + + public SearchTransform(WatcherSearchTemplateRequest request, @Nullable TimeValue timeout, @Nullable DateTimeZone dynamicNameTimeZone) { + this.request = request; + this.timeout = timeout; + this.dynamicNameTimeZone = dynamicNameTimeZone; + } + + @Override + public String type() { + return TYPE; + } + + public WatcherSearchTemplateRequest getRequest() { + return request; + } + + public TimeValue getTimeout() { + return timeout; + } + + public DateTimeZone getDynamicNameTimeZone() { + return dynamicNameTimeZone; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + SearchTransform that = (SearchTransform) o; + + if (request != null ? !request.equals(that.request) : that.request != null) return false; + if (timeout != null ? !timeout.equals(that.timeout) : that.timeout != null) return false; + return !(dynamicNameTimeZone != null ? !dynamicNameTimeZone.equals(that.dynamicNameTimeZone) : that.dynamicNameTimeZone != null); + } + + @Override + public int hashCode() { + int result = request != null ? request.hashCode() : 0; + result = 31 * result + (timeout != null ? timeout.hashCode() : 0); + result = 31 * result + (dynamicNameTimeZone != null ? dynamicNameTimeZone.hashCode() : 0); + return result; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (request != null) { + builder.field(Field.REQUEST.getPreferredName(), request); + } + if (timeout != null) { + builder.humanReadableField(Field.TIMEOUT.getPreferredName(), Field.TIMEOUT_HUMAN.getPreferredName(), timeout); + } + if (dynamicNameTimeZone != null) { + builder.field(Field.DYNAMIC_NAME_TIMEZONE.getPreferredName(), dynamicNameTimeZone.toString()); + } + builder.endObject(); + return builder; + } + + public static SearchTransform parse(String watchId, XContentParser parser) throws IOException { + WatcherSearchTemplateRequest request = null; + TimeValue timeout = null; + DateTimeZone dynamicNameTimeZone = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.REQUEST.match(currentFieldName, parser.getDeprecationHandler())) { + try { + request = WatcherSearchTemplateRequest.fromXContent(parser, ExecutableSearchTransform.DEFAULT_SEARCH_TYPE); + } catch (ElasticsearchParseException srpe) { + throw new ElasticsearchParseException("could not parse [{}] transform for watch [{}]. failed to parse [{}]", srpe, + TYPE, watchId, currentFieldName); + } + } else if (Field.TIMEOUT.match(currentFieldName, parser.getDeprecationHandler())) { + timeout = timeValueMillis(parser.longValue()); + } else if (Field.TIMEOUT_HUMAN.match(currentFieldName, parser.getDeprecationHandler())) { + // Parser for human specified timeouts and 2.x compatibility + timeout = WatcherDateTimeUtils.parseTimeValue(parser, Field.TIMEOUT_HUMAN.toString()); + } else if (Field.DYNAMIC_NAME_TIMEZONE.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + dynamicNameTimeZone = DateTimeZone.forID(parser.text()); + } else { + throw new ElasticsearchParseException("could not parse [{}] transform for watch [{}]. failed to parse [{}]. must be a" + + " string value (e.g. 'UTC' or '+01:00').", TYPE, watchId, currentFieldName); + } + } else { + throw new ElasticsearchParseException("could not parse [{}] transform for watch [{}]. unexpected field [{}]", TYPE, + watchId, currentFieldName); + } + } + + if (request == null) { + throw new ElasticsearchParseException("could not parse [{}] transform for watch [{}]. missing required [{}] field", TYPE, + watchId, Field.REQUEST.getPreferredName()); + } + return new SearchTransform(request, timeout, dynamicNameTimeZone); + } + + public static Builder builder(WatcherSearchTemplateRequest request) { + return new Builder(request); + } + + public static class Result extends Transform.Result { + + @Nullable private final WatcherSearchTemplateRequest request; + + public Result(WatcherSearchTemplateRequest request, Payload payload) { + super(TYPE, payload); + this.request = request; + } + + public Result(WatcherSearchTemplateRequest request, Exception e) { + super(TYPE, e); + this.request = request; + } + + public WatcherSearchTemplateRequest executedRequest() { + return request; + } + + @Override + protected XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException { + if (request != null) { + builder.startObject(type); + builder.field(Field.REQUEST.getPreferredName(), request); + builder.endObject(); + } + return builder; + } + } + + public static class Builder implements Transform.Builder { + + private final WatcherSearchTemplateRequest request; + private TimeValue timeout; + private DateTimeZone dynamicNameTimeZone; + + public Builder(WatcherSearchTemplateRequest request) { + this.request = request; + } + + public Builder timeout(TimeValue readTimeout) { + this.timeout = readTimeout; + return this; + } + + public Builder dynamicNameTimeZone(DateTimeZone dynamicNameTimeZone) { + this.dynamicNameTimeZone = dynamicNameTimeZone; + return this; + } + + @Override + public SearchTransform build() { + return new SearchTransform(request, timeout, dynamicNameTimeZone); + } + } + + public interface Field { + ParseField REQUEST = new ParseField("request"); + ParseField TIMEOUT = new ParseField("timeout_in_millis"); + ParseField TIMEOUT_HUMAN = new ParseField("timeout"); + ParseField DYNAMIC_NAME_TIMEZONE = new ParseField("dynamic_name_timezone"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/SearchTransformFactory.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/SearchTransformFactory.java new file mode 100644 index 0000000000000..7e08ef3afad9d --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/SearchTransformFactory.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transform.search; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.xpack.core.watcher.transform.TransformFactory; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateService; + +import java.io.IOException; + +public class SearchTransformFactory extends TransformFactory { + + private final Client client; + private final TimeValue defaultTimeout; + private final WatcherSearchTemplateService searchTemplateService; + + public SearchTransformFactory(Settings settings, Client client, NamedXContentRegistry xContentRegistry, ScriptService scriptService) { + super(Loggers.getLogger(ExecutableSearchTransform.class, settings)); + this.client = client; + this.defaultTimeout = settings.getAsTime("xpack.watcher.transform.search.default_timeout", TimeValue.timeValueMinutes(1)); + this.searchTemplateService = new WatcherSearchTemplateService(settings, scriptService, xContentRegistry); + } + + @Override + public String type() { + return SearchTransform.TYPE; + } + + @Override + public SearchTransform parseTransform(String watchId, XContentParser parser) throws IOException { + return SearchTransform.parse(watchId, parser); + } + + @Override + public ExecutableSearchTransform createExecutable(SearchTransform transform) { + return new ExecutableSearchTransform(transform, transformLogger, client, searchTemplateService, defaultTimeout); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/WatcherTransportAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/WatcherTransportAction.java new file mode 100644 index 0000000000000..363857f2766d3 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/WatcherTransportAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.actions; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; + +public abstract class WatcherTransportAction + extends HandledTransportAction { + + protected final XPackLicenseState licenseState; + + public WatcherTransportAction(Settings settings, String actionName, TransportService transportService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + XPackLicenseState licenseState, Writeable.Reader request) { + super(settings, actionName, threadPool, transportService, actionFilters, request, indexNameExpressionResolver); + this.licenseState = licenseState; + } + + protected String executor() { + return ThreadPool.Names.GENERIC; + } + + @Override + protected void doExecute(Task task, final Request request, ActionListener listener) { + if (licenseState.isWatcherAllowed()) { + super.doExecute(task, request, listener); + } else { + listener.onFailure(LicenseUtils.newComplianceException(XPackField.WATCHER)); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchAction.java new file mode 100644 index 0000000000000..52c63cab69cab --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchAction.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.actions.ack; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.Preference; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionSnapshot; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; +import org.elasticsearch.xpack.watcher.execution.ExecutionService; +import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.joda.time.DateTime; + +import java.time.Clock; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.joda.time.DateTimeZone.UTC; + +public class TransportAckWatchAction extends WatcherTransportAction { + + private final Clock clock; + private final WatchParser parser; + private ExecutionService executionService; + private final Client client; + + @Inject + public TransportAckWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Clock clock, XPackLicenseState licenseState, + WatchParser parser, ExecutionService executionService, Client client) { + super(settings, AckWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver, + licenseState, AckWatchRequest::new); + this.clock = clock; + this.parser = parser; + this.executionService = executionService; + this.client = client; + } + + @Override + protected void doExecute(AckWatchRequest request, ActionListener listener) { + // if the watch to be acked is running currently, reject this request + List snapshots = executionService.currentExecutions(); + boolean isWatchRunning = snapshots.stream().anyMatch(s -> s.watchId().equals(request.getWatchId())); + if (isWatchRunning) { + listener.onFailure(new ElasticsearchStatusException("watch[{}] is running currently, cannot ack until finished", + RestStatus.CONFLICT, request.getWatchId())); + return; + } + + GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getWatchId()) + .preference(Preference.LOCAL.type()).realtime(true); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, getRequest, + ActionListener.wrap((response) -> { + if (response.isExists() == false) { + listener.onFailure(new ResourceNotFoundException("Watch with id [{}] does not exist", request.getWatchId())); + } else { + DateTime now = new DateTime(clock.millis(), UTC); + Watch watch = parser.parseWithSecrets(request.getWatchId(), true, response.getSourceAsBytesRef(), + now, XContentType.JSON); + watch.version(response.getVersion()); + watch.status().version(response.getVersion()); + String[] actionIds = request.getActionIds(); + if (actionIds == null || actionIds.length == 0) { + actionIds = new String[]{WatchField.ALL_ACTIONS_ID}; + } + + // exit early in case nothing changes + boolean isChanged = watch.ack(now, actionIds); + if (isChanged == false) { + listener.onResponse(new AckWatchResponse(watch.status())); + return; + } + + UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, Watch.DOC_TYPE, request.getWatchId()); + // this may reject this action, but prevents concurrent updates from a watch execution + updateRequest.version(response.getVersion()); + updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + XContentBuilder builder = jsonBuilder(); + builder.startObject() + .startObject(WatchField.STATUS.getPreferredName()) + .startObject("actions"); + + List actionIdsAsList = Arrays.asList(actionIds); + boolean updateAll = actionIdsAsList.contains("_all"); + for (ActionWrapper actionWrapper : watch.actions()) { + if (updateAll || actionIdsAsList.contains(actionWrapper.id())) { + builder.startObject(actionWrapper.id()) + .field("ack", watch.status().actionStatus(actionWrapper.id()).ackStatus(), ToXContent.EMPTY_PARAMS) + .endObject(); + } + } + + builder.endObject().endObject().endObject(); + updateRequest.doc(builder); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, updateRequest, + ActionListener.wrap( + (updateResponse) -> listener.onResponse(new AckWatchResponse(watch.status())), + listener::onFailure), client::update); + } + }, listener::onFailure), client::get); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/activate/TransportActivateWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/activate/TransportActivateWatchAction.java new file mode 100644 index 0000000000000..cc6ef9274e8f3 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/activate/TransportActivateWatchAction.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.actions.activate; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.Preference; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.time.Clock; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.writeDate; +import static org.joda.time.DateTimeZone.UTC; + +/** + * Performs the watch de/activation operation. + */ +public class TransportActivateWatchAction extends WatcherTransportAction { + + private final Clock clock; + private final WatchParser parser; + private final Client client; + + @Inject + public TransportActivateWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Clock clock, + XPackLicenseState licenseState, WatchParser parser, Client client) { + super(settings, ActivateWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver, + licenseState, ActivateWatchRequest::new); + this.clock = clock; + this.parser = parser; + this.client = client; + } + + @Override + protected void doExecute(ActivateWatchRequest request, ActionListener listener) { + try { + DateTime now = new DateTime(clock.millis(), UTC); + UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, Watch.DOC_TYPE, request.getWatchId()); + updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + XContentBuilder builder = activateWatchBuilder(request.isActivate(), now); + updateRequest.doc(builder); + // a watch execution updates the status in between, we still want this want to override the active state + // two has been chosen arbitrary, maybe one would make more sense, as a watch would not execute more often than + // once per second? + updateRequest.retryOnConflict(2); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, updateRequest, + ActionListener.wrap(updateResponse -> { + GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getWatchId()) + .preference(Preference.LOCAL.type()).realtime(true); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, getRequest, + ActionListener.wrap(getResponse -> { + if (getResponse.isExists()) { + Watch watch = parser.parseWithSecrets(request.getWatchId(), true, getResponse.getSourceAsBytesRef(), now, + XContentType.JSON); + watch.version(getResponse.getVersion()); + watch.status().version(getResponse.getVersion()); + listener.onResponse(new ActivateWatchResponse(watch.status())); + } else { + listener.onFailure(new ResourceNotFoundException("Watch with id [{}] does not exist", + request.getWatchId())); + } + }, listener::onFailure), client::get); + }, listener::onFailure), client::update); + } catch (IOException e) { + listener.onFailure(e); + } + } + + private XContentBuilder activateWatchBuilder(boolean active, DateTime now) throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject() + .startObject(WatchField.STATUS.getPreferredName()) + .startObject(WatchStatus.Field.STATE.getPreferredName()) + .field(WatchStatus.Field.ACTIVE.getPreferredName(), active); + + writeDate(WatchStatus.Field.TIMESTAMP.getPreferredName(), builder, now); + builder.endObject().endObject().endObject(); + return builder; + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/delete/TransportDeleteWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/delete/TransportDeleteWatchAction.java new file mode 100644 index 0000000000000..ca66e9e678d94 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/delete/TransportDeleteWatchAction.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.actions.delete; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.Watch; + +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +/** + * Performs the delete operation. This inherits directly from HandledTransportAction, because deletion should always work + * independently from the license check in WatcherTransportAction! + */ +public class TransportDeleteWatchAction extends HandledTransportAction { + + private final Client client; + + @Inject + public TransportDeleteWatchAction(Settings settings, TransportService transportService,ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + Client client) { + super(settings, DeleteWatchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + DeleteWatchRequest::new); + this.client = client; + } + + @Override + protected void doExecute(DeleteWatchRequest request, ActionListener listener) { + DeleteRequest deleteRequest = new DeleteRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId()); + deleteRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, deleteRequest, + ActionListener.wrap(deleteResponse -> { + boolean deleted = deleteResponse.getResult() == DocWriteResponse.Result.DELETED; + DeleteWatchResponse response = new DeleteWatchResponse(deleteResponse.getId(), deleteResponse.getVersion(), deleted); + listener.onResponse(response); + }, listener::onFailure), client::delete); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/execute/TransportExecuteWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/execute/TransportExecuteWatchAction.java new file mode 100644 index 0000000000000..2a199c2b3eb7b --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/execute/TransportExecuteWatchAction.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.actions.execute; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.Preference; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.history.WatchRecord; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.execution.ExecutionService; +import org.elasticsearch.xpack.watcher.execution.ManualExecutionContext; +import org.elasticsearch.xpack.watcher.input.simple.SimpleInput; +import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; +import org.elasticsearch.xpack.watcher.trigger.manual.ManualTriggerEvent; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.time.Clock; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.joda.time.DateTimeZone.UTC; + +/** + * Performs the watch execution operation. + */ +public class TransportExecuteWatchAction extends WatcherTransportAction { + + private final ExecutionService executionService; + private final Clock clock; + private final TriggerService triggerService; + private final WatchParser watchParser; + private final Client client; + + @Inject + public TransportExecuteWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + ExecutionService executionService, Clock clock, XPackLicenseState licenseState, + WatchParser watchParser, Client client, TriggerService triggerService) { + super(settings, ExecuteWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver, + licenseState, ExecuteWatchRequest::new); + this.executionService = executionService; + this.clock = clock; + this.triggerService = triggerService; + this.watchParser = watchParser; + this.client = client; + } + + @Override + protected void doExecute(ExecuteWatchRequest request, ActionListener listener) { + if (request.getId() != null) { + GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId()) + .preference(Preference.LOCAL.type()).realtime(true); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, getRequest, + ActionListener.wrap(response -> { + if (response.isExists()) { + Watch watch = + watchParser.parse(request.getId(), true, response.getSourceAsBytesRef(), request.getXContentType()); + watch.version(response.getVersion()); + watch.status().version(response.getVersion()); + executeWatch(request, listener, watch, true); + } else { + listener.onFailure(new ResourceNotFoundException("Watch with id [{}] does not exist", request.getId())); + } + }, listener::onFailure), client::get); + } else if (request.getWatchSource() != null) { + try { + assert !request.isRecordExecution(); + Watch watch = watchParser.parse(ExecuteWatchRequest.INLINE_WATCH_ID, true, request.getWatchSource(), + request.getXContentType()); + executeWatch(request, listener, watch, false); + } catch (IOException e) { + logger.error(new ParameterizedMessage("failed to parse [{}]", request.getId()), e); + listener.onFailure(e); + } + } else { + listener.onFailure(new IllegalArgumentException("no watch provided")); + } + } + + private void executeWatch(ExecuteWatchRequest request, ActionListener listener, + Watch watch, boolean knownWatch) { + + threadPool.executor(XPackField.WATCHER).submit(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + + @Override + protected void doRun() throws Exception { + // ensure that the headers from the incoming request are used instead those of the stored watch + // otherwise the watch would run as the user who stored the watch, but it needs to be run as the user who + // executes this request + Map headers = new HashMap<>(threadPool.getThreadContext().getHeaders()); + watch.status().setHeaders(headers); + + String triggerType = watch.trigger().type(); + TriggerEvent triggerEvent = triggerService.simulateEvent(triggerType, watch.id(), request.getTriggerData()); + + ManualExecutionContext.Builder ctxBuilder = ManualExecutionContext.builder(watch, knownWatch, + new ManualTriggerEvent(triggerEvent.jobName(), triggerEvent), executionService.defaultThrottlePeriod()); + + DateTime executionTime = new DateTime(clock.millis(), UTC); + ctxBuilder.executionTime(executionTime); + for (Map.Entry entry : request.getActionModes().entrySet()) { + ctxBuilder.actionMode(entry.getKey(), entry.getValue()); + } + if (request.getAlternativeInput() != null) { + ctxBuilder.withInput(new SimpleInput.Result(new Payload.Simple(request.getAlternativeInput()))); + } + if (request.isIgnoreCondition()) { + ctxBuilder.withCondition(InternalAlwaysCondition.RESULT_INSTANCE); + } + ctxBuilder.recordExecution(request.isRecordExecution()); + + WatchRecord record = executionService.execute(ctxBuilder.build()); + XContentBuilder builder = XContentFactory.jsonBuilder(); + + record.toXContent(builder, WatcherParams.builder().hideSecrets(true).debug(request.isDebug()).build()); + listener.onResponse(new ExecuteWatchResponse(record.id().value(), BytesReference.bytes(builder), XContentType.JSON)); + } + }); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/get/TransportGetWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/get/TransportGetWatchAction.java new file mode 100644 index 0000000000000..033fb7deef07f --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/get/TransportGetWatchAction.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.actions.get; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.Preference; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.joda.time.DateTime; + +import java.time.Clock; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.joda.time.DateTimeZone.UTC; + +public class TransportGetWatchAction extends WatcherTransportAction { + + private final WatchParser parser; + private final Clock clock; + private final Client client; + + @Inject + public TransportGetWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, XPackLicenseState licenseState, + WatchParser parser, Clock clock, Client client) { + super(settings, GetWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver, + licenseState, GetWatchRequest::new); + this.parser = parser; + this.clock = clock; + this.client = client; + } + + @Override + protected void doExecute(GetWatchRequest request, ActionListener listener) { + GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId()) + .preference(Preference.LOCAL.type()).realtime(true); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, getRequest, + ActionListener.wrap(getResponse -> { + if (getResponse.isExists()) { + try (XContentBuilder builder = jsonBuilder()) { + // When we return the watch via the Get Watch REST API, we want to return the watch as was specified in + // the put api, we don't include the status in the watch source itself, but as a separate top level field, + // so that it indicates the the status is managed by watcher itself. + DateTime now = new DateTime(clock.millis(), UTC); + Watch watch = parser.parseWithSecrets(request.getId(), true, getResponse.getSourceAsBytesRef(), now, + XContentType.JSON); + watch.toXContent(builder, WatcherParams.builder() + .hideSecrets(true) + .includeStatus(false) + .build()); + watch.version(getResponse.getVersion()); + watch.status().version(getResponse.getVersion()); + listener.onResponse(new GetWatchResponse(watch.id(), getResponse.getVersion(), watch.status(), + BytesReference.bytes(builder), XContentType.JSON)); + } + } else { + listener.onResponse(new GetWatchResponse(request.getId())); + } + }, e -> { + // special case. This API should not care if the index is missing or not, + // it should respond with the watch not being found + if (e instanceof IndexNotFoundException) { + listener.onResponse(new GetWatchResponse(request.getId())); + } else { + listener.onFailure(e); + } + }), client::get); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchAction.java new file mode 100644 index 0000000000000..ba380ef842033 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchAction.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.actions.put; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.Watcher; +import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.joda.time.DateTime; + +import java.time.Clock; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.joda.time.DateTimeZone.UTC; + +/** + * This action internally has two modes of operation - an insert and an update mode + * + * The insert mode will simply put a watch and that is it. + * The update mode is a bit more complex and uses versioning. First this prevents the + * last-write-wins issue, when two users store the same watch. This could happen due + * to UI users. To prevent this a version is required to trigger the update mode. + * This mode has been mainly introduced to deal with updates, where the user does not + * need to provide secrets like passwords for basic auth or sending emails. If this + * is an update, the watch will not parse the secrets coming in, and the resulting JSON + * to store the new watch will not contain a password allowing for updates. + * + * Internally both requests result in an update call, albeit with different parameters and + * use of versioning as well as setting the docAsUpsert boolean. + */ +public class TransportPutWatchAction extends WatcherTransportAction { + + private final Clock clock; + private final WatchParser parser; + private final Client client; + private static final ToXContent.Params DEFAULT_PARAMS = + WatcherParams.builder().hideSecrets(false).hideHeaders(false).includeStatus(true).build(); + + @Inject + public TransportPutWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Clock clock, XPackLicenseState licenseState, + WatchParser parser, Client client) { + super(settings, PutWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver, + licenseState, PutWatchRequest::new); + this.clock = clock; + this.parser = parser; + this.client = client; + } + + @Override + protected void doExecute(PutWatchRequest request, ActionListener listener) { + try { + DateTime now = new DateTime(clock.millis(), UTC); + boolean isUpdate = request.getVersion() > 0; + Watch watch = parser.parseWithSecrets(request.getId(), false, request.getSource(), now, request.xContentType(), isUpdate); + watch.setState(request.isActive(), now); + + // ensure we only filter for the allowed headers + Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() + .filter(e -> Watcher.HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + watch.status().setHeaders(filteredHeaders); + + try (XContentBuilder builder = jsonBuilder()) { + watch.toXContent(builder, DEFAULT_PARAMS); + + UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId()); + updateRequest.docAsUpsert(isUpdate == false); + updateRequest.version(request.getVersion()); + updateRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + updateRequest.doc(builder); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, updateRequest, + ActionListener.wrap(response -> { + boolean created = response.getResult() == DocWriteResponse.Result.CREATED; + listener.onResponse(new PutWatchResponse(response.getId(), response.getVersion(), created)); + }, listener::onFailure), + client::update); + } + } catch (Exception e) { + listener.onFailure(e); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java new file mode 100644 index 0000000000000..fa78208494f94 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/service/TransportWatcherServiceAction.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.actions.service; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ack.AckedRequest; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.watcher.WatcherMetaData; +import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceResponse; + +public class TransportWatcherServiceAction extends TransportMasterNodeAction { + + private AckedRequest ackedRequest = new AckedRequest() { + @Override + public TimeValue ackTimeout() { + return AcknowledgedRequest.DEFAULT_ACK_TIMEOUT; + } + + @Override + public TimeValue masterNodeTimeout() { + return AcknowledgedRequest.DEFAULT_ACK_TIMEOUT; + } + }; + + @Inject + public TransportWatcherServiceAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, WatcherServiceAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, WatcherServiceRequest::new); + } + + @Override + protected String executor() { + return ThreadPool.Names.MANAGEMENT; + } + + @Override + protected WatcherServiceResponse newResponse() { + return new WatcherServiceResponse(); + } + + @Override + protected void masterOperation(WatcherServiceRequest request, ClusterState state, + ActionListener listener) { + switch (request.getCommand()) { + case STOP: + setWatcherMetaDataAndWait(true, listener); + break; + case START: + setWatcherMetaDataAndWait(false, listener); + break; + } + } + + private void setWatcherMetaDataAndWait(boolean manuallyStopped, final ActionListener listener) { + String source = manuallyStopped ? "update_watcher_manually_stopped" : "update_watcher_manually_started"; + + clusterService.submitStateUpdateTask(source, + new AckedClusterStateUpdateTask(ackedRequest, listener) { + + @Override + protected WatcherServiceResponse newResponse(boolean acknowledged) { + return new WatcherServiceResponse(acknowledged); + } + + @Override + public ClusterState execute(ClusterState clusterState) { + WatcherMetaData newWatcherMetaData = new WatcherMetaData(manuallyStopped); + WatcherMetaData currentMetaData = clusterState.metaData().custom(WatcherMetaData.TYPE); + + // adhere to the contract of returning the original state if nothing has changed + if (newWatcherMetaData.equals(currentMetaData)) { + return clusterState; + } else { + ClusterState.Builder builder = new ClusterState.Builder(clusterState); + builder.metaData(MetaData.builder(clusterState.getMetaData()) + .putCustom(WatcherMetaData.TYPE, newWatcherMetaData)); + return builder.build(); + } + } + + @Override + public void onFailure(String source, Exception e) { + logger.error(new ParameterizedMessage("could not update watcher stopped status to [{}], source [{}]", + manuallyStopped, source), e); + listener.onFailure(e); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(WatcherServiceRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsAction.java new file mode 100644 index 0000000000000..d7f8962756b7c --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsAction.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.actions.stats; + +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.watcher.WatcherMetaData; +import org.elasticsearch.xpack.core.watcher.common.stats.Counters; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; +import org.elasticsearch.xpack.watcher.WatcherService; +import org.elasticsearch.xpack.watcher.execution.ExecutionService; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; + +import java.util.Arrays; +import java.util.List; + +/** + * Performs the stats operation. + */ +public class TransportWatcherStatsAction extends TransportNodesAction { + + private final WatcherService watcherService; + private final ExecutionService executionService; + private final TriggerService triggerService; + + @Inject + public TransportWatcherStatsAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, WatcherService watcherService, + ExecutionService executionService, TriggerService triggerService) { + super(settings, WatcherStatsAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, + WatcherStatsRequest::new, WatcherStatsRequest.Node::new, ThreadPool.Names.MANAGEMENT, + WatcherStatsResponse.Node.class); + this.watcherService = watcherService; + this.executionService = executionService; + this.triggerService = triggerService; + } + + @Override + protected WatcherStatsResponse newResponse(WatcherStatsRequest request, List nodes, + List failures) { + return new WatcherStatsResponse(clusterService.getClusterName(), getWatcherMetaData(), nodes, failures); + } + + @Override + protected WatcherStatsRequest.Node newNodeRequest(String nodeId, WatcherStatsRequest request) { + return new WatcherStatsRequest.Node(request, nodeId); + } + + @Override + protected WatcherStatsResponse.Node newNodeResponse() { + return new WatcherStatsResponse.Node(); + } + + @Override + protected WatcherStatsResponse.Node nodeOperation(WatcherStatsRequest.Node request) { + WatcherStatsResponse.Node statsResponse = new WatcherStatsResponse.Node(clusterService.localNode()); + statsResponse.setWatcherState(watcherService.state()); + statsResponse.setThreadPoolQueueSize(executionService.executionThreadPoolQueueSize()); + statsResponse.setThreadPoolMaxSize(executionService.executionThreadPoolMaxSize()); + if (request.includeCurrentWatches()) { + statsResponse.setSnapshots(executionService.currentExecutions()); + } + if (request.includeQueuedWatches()) { + statsResponse.setQueuedWatches(executionService.queuedWatches()); + } + if (request.includeStats()) { + Counters stats = Counters.merge(Arrays.asList(triggerService.stats(), executionService.executionTimes())); + statsResponse.setStats(stats); + } + statsResponse.setWatchesCount(triggerService.count()); + return statsResponse; + } + + private WatcherMetaData getWatcherMetaData() { + WatcherMetaData watcherMetaData = clusterService.state().getMetaData().custom(WatcherMetaData.TYPE); + if (watcherMetaData == null) { + watcherMetaData = new WatcherMetaData(false); + } + return watcherMetaData; + } +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerBuilders.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerBuilders.java new file mode 100644 index 0000000000000..947d1043570ff --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerBuilders.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger; + +import org.elasticsearch.xpack.watcher.trigger.schedule.Schedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; + +public final class TriggerBuilders { + + private TriggerBuilders() { + } + + public static ScheduleTrigger.Builder schedule(Schedule schedule) { + return ScheduleTrigger.builder(schedule); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerEngine.java new file mode 100644 index 0000000000000..f370847aca965 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerEngine.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.trigger.Trigger; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Watch; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.function.Consumer; + +public interface TriggerEngine { + + String type(); + + /** + * It's the responsibility of the trigger engine implementation to select the appropriate jobs + * from the given list of jobs + */ + void start(Collection jobs); + + void stop(); + + void register(Consumer> consumer); + + void add(Watch job); + + /** + * Get into a pause state, implies clearing out existing jobs + */ + void pauseExecution(); + + /** + * Returns the number of active jobs currently in this trigger engine implementation + */ + int getJobCount(); + + /** + * Removes the job associated with the given name from this trigger engine. + * + * @param jobId The name of the job to remove + * @return {@code true} if the job existed and removed, {@code false} otherwise. + */ + boolean remove(String jobId); + + E simulateEvent(String jobId, @Nullable Map data, TriggerService service); + + T parseTrigger(String context, XContentParser parser) throws IOException; + + E parseTriggerEvent(TriggerService service, String watchId, String context, XContentParser parser) throws IOException; + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerService.java new file mode 100644 index 0000000000000..355d6d6c32e41 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerService.java @@ -0,0 +1,260 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.common.stats.Counters; +import org.elasticsearch.xpack.core.watcher.trigger.Trigger; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Watch; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Consumer; + +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; + +public class TriggerService extends AbstractComponent { + + private final GroupedConsumer consumer = new GroupedConsumer(); + private final Map engines; + private final Map perWatchStats = new HashMap<>(); + + public TriggerService(Settings settings, Set engines) { + super(settings); + Map builder = new HashMap<>(); + for (TriggerEngine engine : engines) { + builder.put(engine.type(), engine); + engine.register(consumer); + } + this.engines = unmodifiableMap(builder); + } + + public synchronized void start(Collection watches) { + for (TriggerEngine engine : engines.values()) { + engine.start(watches); + } + watches.forEach(this::addToStats); + } + + public synchronized void stop() { + for (TriggerEngine engine : engines.values()) { + engine.stop(); + } + perWatchStats.clear(); + } + + /** + * Stop execution/triggering of watches on this node, do not try to reload anything, just sit still + */ + public synchronized void pauseExecution() { + engines.values().forEach(TriggerEngine::pauseExecution); + perWatchStats.clear(); + } + + /** + * create statistics for a single watch, and store it in a local map + * allowing for easy deletion in case the watch gets removed from the trigger service + */ + private void addToStats(Watch watch) { + TriggerWatchStats watchStats = TriggerWatchStats.create(watch); + perWatchStats.put(watch.id(), watchStats); + } + + /** + * Returns some statistics about the watches loaded in the trigger service + * @return a set of counters containing statistics + */ + public Counters stats() { + Counters counters = new Counters(); + // for bwc reasons, active/total contain the same values + int watchCount = perWatchStats.size(); + counters.inc("count.active", watchCount); + counters.inc("count.total", watchCount); + counters.inc("watch.trigger._all.active", watchCount); + counters.inc("watch.trigger._all.total", watchCount); + counters.inc("watch.input._all.total", watchCount); + counters.inc("watch.input._all.active", watchCount); + perWatchStats.values().forEach(stats -> { + if (stats.metadata) { + counters.inc("watch.metadata.active"); + counters.inc("watch.metadata.total"); + } + counters.inc("watch.trigger." + stats.triggerType + ".total"); + counters.inc("watch.trigger." + stats.triggerType + ".active"); + if (Strings.isNullOrEmpty(stats.scheduleType) == false) { + counters.inc("watch.trigger.schedule." + stats.scheduleType + ".total"); + counters.inc("watch.trigger.schedule." + stats.scheduleType + ".active"); + counters.inc("watch.trigger.schedule._all.total"); + counters.inc("watch.trigger.schedule._all.active"); + } + counters.inc("watch.input." + stats.inputType + ".active"); + counters.inc("watch.input." + stats.inputType + ".total"); + + counters.inc("watch.condition." + stats.conditionType + ".active"); + counters.inc("watch.condition." + stats.conditionType + ".total"); + counters.inc("watch.condition._all.total"); + counters.inc("watch.condition._all.active"); + + if (Strings.isNullOrEmpty(stats.transformType) == false) { + counters.inc("watch.transform." + stats.transformType + ".active"); + counters.inc("watch.transform." + stats.transformType + ".total"); + counters.inc("watch.transform._all.active"); + counters.inc("watch.transform._all.total"); + } + + for (TriggerWatchStats.ActionStats action : stats.actions) { + counters.inc("watch.action." + action.actionType + ".active"); + counters.inc("watch.action." + action.actionType + ".total"); + counters.inc("watch.action._all.active"); + counters.inc("watch.action._all.total"); + + if (Strings.isNullOrEmpty(action.conditionType) == false) { + counters.inc("watch.action.condition." + action.conditionType + ".active"); + counters.inc("watch.action.condition." + action.conditionType + ".total"); + counters.inc("watch.action.condition._all.active"); + counters.inc("watch.action.condition._all.total"); + } + if (Strings.isNullOrEmpty(action.transformType) == false) { + counters.inc("watch.action.transform." + action.transformType + ".active"); + counters.inc("watch.action.transform." + action.transformType + ".total"); + counters.inc("watch.action.transform._all.active"); + counters.inc("watch.action.transform._all.total"); + } + } + }); + return counters; + } + + /** + * Adds the given job to the trigger service. If there is already a registered job in this service with the + * same job ID, the newly added job will replace the old job (the old job will not be triggered anymore) + * + * @param watch The new watch + */ + public void add(Watch watch) { + engines.get(watch.trigger().type()).add(watch); + addToStats(watch); + } + + /** + * Removes the job associated with the given name from this trigger service. + * + * @param jobName The name of the job to remove + * @return {@code true} if the job existed and removed, {@code false} otherwise. + */ + public boolean remove(String jobName) { + perWatchStats.remove(jobName); + for (TriggerEngine engine : engines.values()) { + if (engine.remove(jobName)) { + return true; + } + } + return false; + } + + public void register(Consumer> consumer) { + this.consumer.add(consumer); + } + + public TriggerEvent simulateEvent(String type, String jobId, Map data) { + TriggerEngine engine = engines.get(type); + if (engine == null) { + throw illegalArgument("could not simulate trigger event. unknown trigger type [{}]", type); + } + return engine.simulateEvent(jobId, data, this); + } + + public Trigger parseTrigger(String jobName, XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + assert token == XContentParser.Token.START_OBJECT; + token = parser.nextToken(); + if (token != XContentParser.Token.FIELD_NAME) { + throw new ElasticsearchParseException("could not parse trigger for [{}]. expected trigger type string field, but found [{}]", + jobName, token); + } + String type = parser.currentName(); + token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse trigger [{}] for [{}]. expected trigger an object as the trigger body," + + " but found [{}]", type, jobName, token); + } + Trigger trigger = parseTrigger(jobName, type, parser); + token = parser.nextToken(); + if (token != XContentParser.Token.END_OBJECT) { + throw new ElasticsearchParseException("could not parse trigger [{}] for [{}]. expected [END_OBJECT] token, but found [{}]", + type, jobName, token); + } + return trigger; + } + + public Trigger parseTrigger(String jobName, String type, XContentParser parser) throws IOException { + TriggerEngine engine = engines.get(type); + if (engine == null) { + throw new ElasticsearchParseException("could not parse trigger [{}] for [{}]. unknown trigger type [{}]", type, jobName, type); + } + return engine.parseTrigger(jobName, parser); + } + + public TriggerEvent parseTriggerEvent(String watchId, String context, XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + assert token == XContentParser.Token.START_OBJECT; + token = parser.nextToken(); + if (token != XContentParser.Token.FIELD_NAME) { + throw new ElasticsearchParseException("could not parse trigger event for [{}] for watch [{}]. expected trigger type string " + + "field, but found [{}]", context, watchId, token); + } + String type = parser.currentName(); + token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse trigger event for [{}] for watch [{}]. expected trigger an object as " + + "the trigger body, but found [{}]", context, watchId, token); + } + TriggerEvent trigger = parseTriggerEvent(watchId, context, type, parser); + token = parser.nextToken(); + if (token != XContentParser.Token.END_OBJECT) { + throw new ElasticsearchParseException("could not parse trigger [{}] for [{}]. expected [END_OBJECT] token, but found [{}]", + type, context, token); + } + return trigger; + } + + public TriggerEvent parseTriggerEvent(String watchId, String context, String type, XContentParser parser) throws IOException { + TriggerEngine engine = engines.get(type); + if (engine == null) { + throw new ElasticsearchParseException("Unknown trigger type [{}]", type); + } + return engine.parseTriggerEvent(this, watchId, context, parser); + } + + public long count() { + return perWatchStats.size(); + } + + static class GroupedConsumer implements java.util.function.Consumer> { + + private List>> consumers = new CopyOnWriteArrayList<>(); + + public void add(Consumer> consumer) { + consumers.add(consumer); + } + + @Override + public void accept(Iterable events) { + consumers.forEach(c -> c.accept(events)); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerWatchStats.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerWatchStats.java new file mode 100644 index 0000000000000..fb4e8b3f784fa --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerWatchStats.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger; + +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; + +public class TriggerWatchStats { + + public final boolean metadata; + public final String triggerType; + public final String scheduleType; + public final String inputType; + public final String conditionType; + public final String transformType; + public final ActionStats[] actions; + + private TriggerWatchStats(boolean metadata, String triggerType, String scheduleType, String inputType, + String conditionType, String transformType, ActionStats[] actions) { + this.metadata = metadata; + this.triggerType = triggerType; + this.scheduleType = scheduleType; + this.inputType = inputType; + this.conditionType = conditionType; + this.transformType = transformType; + this.actions = actions; + } + + public static final class ActionStats { + public final String actionType; + public final String transformType; + public final String conditionType; + + public ActionStats(String actionType, String transformType, String conditionType) { + this.actionType = actionType; + this.transformType = transformType; + this.conditionType = conditionType; + } + } + + public static TriggerWatchStats create(Watch watch) { + final boolean metadata = watch.metadata() != null && watch.metadata().isEmpty() == false; + final String triggerType = watch.trigger().type(); + String scheduleTriggerType = null; + if (ScheduleTrigger.TYPE.equals(watch.trigger().type())) { + ScheduleTrigger scheduleTrigger = (ScheduleTrigger) watch.trigger(); + scheduleTriggerType = scheduleTrigger.getSchedule().type(); + } + final String inputType = watch.input().type(); + final String conditionType = watch.condition().type(); + final String transformType = watch.transform() != null ? watch.transform().type() : null; + + final ActionStats[] actionStats = new ActionStats[watch.actions().size()]; + int i = 0; + for (ActionWrapper actionWrapper : watch.actions()) { + String transform = actionWrapper.transform() != null ? actionWrapper.transform().type() : null; + String condition = actionWrapper.condition() != null ? actionWrapper.condition().type() : null; + String type = actionWrapper.action().type(); + actionStats[i++] = new ActionStats(type, transform, condition); + } + + return new TriggerWatchStats(metadata, triggerType, scheduleTriggerType, inputType, + conditionType, transformType, actionStats); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTrigger.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTrigger.java new file mode 100644 index 0000000000000..d1f9d6e55fac4 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTrigger.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.manual; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.trigger.Trigger; + +import java.io.IOException; + +public class ManualTrigger implements Trigger { + + @Override + public String type() { + return ManualTriggerEngine.TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().endObject(); + } + + static ManualTrigger parse(XContentParser parser) throws IOException{ + if (parser.currentToken() != XContentParser.Token.START_OBJECT){ + throw new ElasticsearchParseException("unable to parse [" + ManualTriggerEngine.TYPE + + "] trigger. expected a start object token, found [" + parser.currentToken() + "]"); + } + XContentParser.Token token = parser.nextToken(); + if (token != XContentParser.Token.END_OBJECT) { + throw new ElasticsearchParseException("unable to parse [" + ManualTriggerEngine.TYPE + + "] trigger. expected an empty object, but found an object with [" + token + "]"); + } + return new ManualTrigger(); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTriggerEngine.java new file mode 100644 index 0000000000000..386e28501210e --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTriggerEngine.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.manual; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.trigger.TriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.function.Consumer; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; + +public class ManualTriggerEngine implements TriggerEngine { + + static final String TYPE = "manual"; + + @Override + public String type() { + return TYPE; + } + + /** + * It's the responsibility of the trigger engine implementation to select the appropriate jobs + * from the given list of jobs + */ + @Override + public void start(Collection jobs) { + } + + @Override + public void stop() { + } + + @Override + public void register(Consumer> consumer) { + } + + @Override + public void add(Watch job) { + } + + @Override + public void pauseExecution() { + } + + @Override + public int getJobCount() { + return 0; + } + + @Override + public boolean remove(String jobId) { + return false; + } + + @Override + public ManualTriggerEvent simulateEvent(String jobId, @Nullable Map data, TriggerService service) { + if (data == null) { + throw illegalArgument("could not simulate manual trigger event. missing required simulated trigger type"); + } + if (data.size() == 1) { + String type = data.keySet().iterator().next(); + return new ManualTriggerEvent(jobId, service.simulateEvent(type, jobId, data)); + } + Object type = data.get("type"); + if (type instanceof String) { + return new ManualTriggerEvent(jobId, service.simulateEvent((String) type, jobId, data)); + } + throw illegalArgument("could not simulate manual trigger event. could not resolve simulated trigger type"); + } + + @Override + public ManualTrigger parseTrigger(String context, XContentParser parser) throws IOException { + return ManualTrigger.parse(parser); + } + + @Override + public ManualTriggerEvent parseTriggerEvent(TriggerService service, String watchId, String context, XContentParser parser) throws + IOException { + return ManualTriggerEvent.parse(service, watchId, context, parser); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTriggerEvent.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTriggerEvent.java new file mode 100644 index 0000000000000..a0d03a735be9a --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTriggerEvent.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.manual; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; + +import java.io.IOException; + +public class ManualTriggerEvent extends TriggerEvent { + + private final TriggerEvent triggerEvent; + + public ManualTriggerEvent(String jobName, TriggerEvent triggerEvent) { + super(jobName, triggerEvent.triggeredTime()); + this.triggerEvent = triggerEvent; + data.putAll(triggerEvent.data()); + } + + @Override + public String type() { + return ManualTriggerEngine.TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(triggerEvent.type(), triggerEvent, params); + return builder.endObject(); + } + + @Override + public void recordDataXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(ManualTriggerEngine.TYPE); + triggerEvent.recordDataXContent(builder, params); + builder.endObject(); + } + + public static ManualTriggerEvent parse(TriggerService triggerService, String watchId, String context, XContentParser parser) throws + IOException { + TriggerEvent parsedTriggerEvent = triggerService.parseTriggerEvent(watchId, context, parser); + return new ManualTriggerEvent(context, parsedTriggerEvent); + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronSchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronSchedule.java new file mode 100644 index 0000000000000..2e8a2f0e3604c --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronSchedule.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class CronSchedule extends CronnableSchedule { + + public static final String TYPE = "cron"; + + public CronSchedule(String... crons) { + super(crons); + } + + @Override + public String type() { + return TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return crons.length == 1 ? builder.value(crons[0]) : builder.value(crons); + } + + public static class Parser implements Schedule.Parser { + + @Override + public String type() { + return TYPE; + } + + @Override + public CronSchedule parse(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_STRING) { + try { + return new CronSchedule(parser.text()); + } catch (IllegalArgumentException iae) { + throw new ElasticsearchParseException("could not parse [cron] schedule", iae); + } + } else if (token == XContentParser.Token.START_ARRAY) { + List crons = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + switch (token) { + case VALUE_STRING: + crons.add(parser.text()); + break; + default: + throw new ElasticsearchParseException("could not parse [cron] schedule. expected a string value in the cron " + + "array but found [" + token + "]"); + } + } + if (crons.isEmpty()) { + throw new ElasticsearchParseException("could not parse [cron] schedule. no cron expression found in cron array"); + } + try { + return new CronSchedule(crons.toArray(new String[crons.size()])); + } catch (IllegalArgumentException iae) { + throw new ElasticsearchParseException("could not parse [cron] schedule", iae); + } + + } else { + throw new ElasticsearchParseException("could not parse [cron] schedule. expected either a cron string value or an array " + + "of cron string values, but found [" + token + "]"); + } + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java new file mode 100644 index 0000000000000..ec309c69476cc --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronnableSchedule.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.xpack.core.scheduler.Cron; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.Objects; + +public abstract class CronnableSchedule implements Schedule { + + private static final Comparator CRON_COMPARATOR = new Comparator() { + @Override + public int compare(Cron c1, Cron c2) { + return c1.expression().compareTo(c2.expression()); + } + }; + + protected final Cron[] crons; + + public CronnableSchedule(String... expressions) { + this(crons(expressions)); + } + + public CronnableSchedule(Cron... crons) { + assert crons.length > 0; + this.crons = crons; + Arrays.sort(crons, CRON_COMPARATOR); + } + + @Override + public long nextScheduledTimeAfter(long startTime, long time) { + assert time >= startTime; + long nextTime = Long.MAX_VALUE; + for (Cron cron : crons) { + nextTime = Math.min(nextTime, cron.getNextValidTimeAfter(time)); + } + return nextTime; + } + + public Cron[] crons() { + return crons; + } + + @Override + public int hashCode() { + return Objects.hash((Object[]) crons); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + final CronnableSchedule other = (CronnableSchedule) obj; + return Objects.deepEquals(this.crons, other.crons); + } + + static Cron[] crons(String... expressions) { + Cron[] crons = new Cron[expressions.length]; + for (int i = 0; i < crons.length; i++) { + crons[i] = new Cron(expressions[i]); + } + return crons; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/DailySchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/DailySchedule.java new file mode 100644 index 0000000000000..e091863b9bd69 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/DailySchedule.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.DayTimes; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class DailySchedule extends CronnableSchedule { + + public static final String TYPE = "daily"; + + public static final DayTimes[] DEFAULT_TIMES = new DayTimes[] { DayTimes.MIDNIGHT }; + + private final DayTimes[] times; + + DailySchedule() { + this(DEFAULT_TIMES); + } + + DailySchedule(DayTimes... times) { + super(crons(times)); + this.times = times; + } + + @Override + public String type() { + return TYPE; + } + + public DayTimes[] times() { + return times; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (params.paramAsBoolean("normalize", false) && times.length == 1) { + builder.field(Parser.AT_FIELD.getPreferredName(), times[0], params); + } else { + builder.startArray(Parser.AT_FIELD.getPreferredName()); + for (DayTimes dayTimes : times) { + dayTimes.toXContent(builder, params); + } + builder.endArray(); + } + return builder.endObject(); + } + + public static Builder builder() { + return new Builder(); + } + + static String[] crons(DayTimes[] times) { + assert times.length > 0 : "at least one time must be defined"; + List crons = new ArrayList<>(times.length); + for (DayTimes time : times) { + crons.add(time.cron()); + } + return crons.toArray(new String[crons.size()]); + } + + public static class Parser implements Schedule.Parser { + + static final ParseField AT_FIELD = new ParseField("at"); + + @Override + public String type() { + return TYPE; + } + + @Override + public DailySchedule parse(XContentParser parser) throws IOException { + List times = new ArrayList<>(); + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (AT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token != XContentParser.Token.START_ARRAY) { + try { + times.add(DayTimes.parse(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] schedule. invalid time value for field [{}] - [{}]", + pe, TYPE, currentFieldName, token); + } + } else { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + times.add(DayTimes.parse(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] schedule. invalid time value for field [{}] -" + + " [{}]", pe, TYPE, currentFieldName, token); + } + } + } + } else { + throw new ElasticsearchParseException("could not parse [{}] schedule. unexpected field [{}]", TYPE, currentFieldName); + } + } + + return times.isEmpty() ? new DailySchedule() : new DailySchedule(times.toArray(new DayTimes[times.size()])); + } + } + + public static class Builder { + + private Set times = new HashSet<>(); + + private Builder() { + } + + public Builder at(int hour, int minute) { + times.add(new DayTimes(hour, minute)); + return this; + } + + public Builder atRoundHour(int... hours) { + times.add(new DayTimes(hours, new int[] { 0 })); + return this; + } + + public Builder atNoon() { + times.add(DayTimes.NOON); + return this; + } + + public Builder atMidnight() { + times.add(DayTimes.MIDNIGHT); + return this; + } + + public DailySchedule build() { + return times.isEmpty() ? new DailySchedule() : new DailySchedule(times.toArray(new DayTimes[times.size()])); + } + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/HourlySchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/HourlySchedule.java new file mode 100644 index 0000000000000..2ba274251b395 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/HourlySchedule.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.DayTimes; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; + +public class HourlySchedule extends CronnableSchedule { + + public static final String TYPE = "hourly"; + + public static final int[] DEFAULT_MINUTES = new int[] { 0 }; + + private final int[] minutes; + + HourlySchedule() { + this(DEFAULT_MINUTES); + } + + HourlySchedule(int... minutes) { + super(cron(minutes)); + this.minutes = minutes; + } + + @Override + public String type() { + return TYPE; + } + + public int[] minutes() { + return minutes; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (params.paramAsBoolean("normalize", false) && minutes.length == 1) { + builder.field(Parser.MINUTE_FIELD.getPreferredName(), minutes[0]); + } else { + builder.array(Parser.MINUTE_FIELD.getPreferredName(), minutes); + } + return builder.endObject(); + } + + public static Builder builder() { + return new Builder(); + } + + static String cron(int[] minutes) { + assert minutes.length > 0 : "at least one minute must be defined"; + StringBuilder sb = new StringBuilder("0 "); + for (int i = 0; i < minutes.length; i++) { + if (i != 0) { + sb.append(","); + } + if (!validMinute(minutes[i])) { + throw illegalArgument("invalid hourly minute [{}]. minute must be between 0 and 59 incl.", minutes[i]); + } + sb.append(minutes[i]); + } + return sb.append(" * * * ?").toString(); + } + + static boolean validMinute(int minute) { + return minute >= 0 && minute < 60; + } + + public static class Parser implements Schedule.Parser { + + static final ParseField MINUTE_FIELD = new ParseField("minute"); + + @Override + public String type() { + return TYPE; + } + + @Override + public HourlySchedule parse(XContentParser parser) throws IOException { + List minutes = new ArrayList<>(); + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (currentFieldName == null) { + throw new ElasticsearchParseException("could not parse [{}] schedule. unexpected token [{}]", TYPE, token); + } else if (MINUTE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token.isValue()) { + try { + minutes.add(DayTimes.parseMinuteValue(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] schedule. invalid value for [{}]", pe, TYPE, + currentFieldName); + } + } else if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + minutes.add(DayTimes.parseMinuteValue(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] schedule. invalid value for [{}]", pe, TYPE, + currentFieldName); + } + } + } else { + throw new ElasticsearchParseException("could not parse [{}] schedule. invalid value for [{}]. " + + "expected either string/value or an array of string/number values, but found [{}]", TYPE, currentFieldName, + token); + } + } else { + throw new ElasticsearchParseException("could not parse [{}] schedule. unexpected field [{}]", TYPE, currentFieldName); + } + } + + return minutes.isEmpty() ? new HourlySchedule() : new HourlySchedule(CollectionUtils.toArray(minutes)); + } + + } + + public static class Builder { + + private Set minutes = new HashSet<>(); + + private Builder() { + } + + public Builder minutes(int... minutes) { + for (int minute : minutes) { + this.minutes.add(minute); + } + return this; + } + + public HourlySchedule build() { + return minutes.isEmpty() ? new HourlySchedule() : new HourlySchedule(CollectionUtils.toArray(minutes)); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/IntervalSchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/IntervalSchedule.java new file mode 100644 index 0000000000000..4940377cc1a7d --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/IntervalSchedule.java @@ -0,0 +1,205 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Locale; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; + +public class IntervalSchedule implements Schedule { + + public static final String TYPE = "interval"; + + private final Interval interval; + + public IntervalSchedule(Interval interval) { + if (interval.millis < 1000) { + throw illegalArgument("interval can't be lower than 1000 ms, but [{}] was specified", interval); + } + this.interval = interval; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public long nextScheduledTimeAfter(long startTime, long time) { + assert time >= startTime; + if (startTime == time) { + time++; + } + long delta = time - startTime; + return startTime + (delta / interval.millis + 1) * interval.millis; + } + + public Interval interval() { + return interval; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return interval.toXContent(builder, params); + } + + @Override + public String toString() { + return interval.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IntervalSchedule schedule = (IntervalSchedule) o; + + if (!interval.equals(schedule.interval)) return false; + + return true; + } + + @Override + public int hashCode() { + return interval.hashCode(); + } + + public static class Parser implements Schedule.Parser { + + @Override + public String type() { + return TYPE; + } + + @Override + public IntervalSchedule parse(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + try { + if (token == XContentParser.Token.VALUE_NUMBER) { + return new IntervalSchedule(Interval.seconds(parser.longValue())); + } + if (token == XContentParser.Token.VALUE_STRING) { + String value = parser.text(); + return new IntervalSchedule(Interval.parse(value)); + } + } catch (Exception e) { + throw new ElasticsearchParseException("could not parse schedule: {}", e, e.getMessage()); + } + throw new ElasticsearchParseException("could not parse [{}] schedule. expected either a numeric value " + + "(millis) or a string value representing time value (e.g. '5s'), but found [{}]", TYPE, token); + } + } + + /** + * Represents a time interval. Ideally we would have used TimeValue here, but we don't because: + * 1. We should limit the time values that the user can configure (we don't want to support nanos & millis + * 2. TimeValue formatting & parsing is inconsistent (it doesn't format to a value that it can parse) + * 3. The equals of TimeValue is odd - it will only equate two time values that have the exact same unit & duration, + * this interval on the other hand, equates based on the millis value. + * 4. We have the advantage of making this interval construct a ToXContent + */ + public static class Interval implements ToXContent { + + public enum Unit { + SECONDS(TimeUnit.SECONDS.toMillis(1), "s"), + MINUTES(TimeUnit.MINUTES.toMillis(1), "m"), + HOURS(TimeUnit.HOURS.toMillis(1), "h"), + DAYS(TimeUnit.DAYS.toMillis(1), "d"), + WEEK(TimeUnit.DAYS.toMillis(7), "w"); + + private final String suffix; + private final long millis; + + Unit(long millis, String suffix) { + this.millis = millis; + this.suffix = suffix; + } + + public long millis(long duration) { + return duration * millis; + } + + public long parse(String value) { + assert value.endsWith(suffix); + String num = value.substring(0, value.indexOf(suffix)); + try { + return Long.parseLong(num); + } catch (NumberFormatException nfe) { + throw new ElasticsearchParseException("could not parse [{}] schedule. could not parse [{}] as a [{}] duration", + TYPE, num, name().toLowerCase(Locale.ROOT)); + } + } + + public String format(long duration) { + return duration + suffix; + } + } + + private final long duration; + private final Unit unit; + private final long millis; // computed once + + public Interval(long duration, Unit unit) { + this.duration = duration; + this.unit = unit; + this.millis = unit.millis(duration); + } + + public long seconds() { + return unit.millis(duration) / Unit.SECONDS.millis; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(unit.format(duration)); + } + + @Override + public String toString() { + return unit.format(duration); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Interval interval = (Interval) o; + + if (unit.millis(duration) != interval.unit.millis(interval.duration)) return false; + + return true; + } + + @Override + public int hashCode() { + long millis = unit.millis(duration); + int result = (int) (millis ^ (millis >>> 32)); + return result; + } + + public static Interval seconds(long duration) { + return new Interval(duration, Unit.SECONDS); + } + + public static Interval parse(String value) { + for (Unit unit : Unit.values()) { + if (value.endsWith(unit.suffix)) { + return new Interval(unit.parse(value), unit); + } + } + throw illegalArgument("could not parse [{}] schedule. unrecognized interval format [{}]", TYPE, value); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/MonthlySchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/MonthlySchedule.java new file mode 100644 index 0000000000000..63c1d497f1796 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/MonthlySchedule.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.MonthTimes; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class MonthlySchedule extends CronnableSchedule { + + public static final String TYPE = "monthly"; + + public static final MonthTimes[] DEFAULT_TIMES = new MonthTimes[] { new MonthTimes() }; + + private final MonthTimes[] times; + + MonthlySchedule() { + this(DEFAULT_TIMES); + } + + MonthlySchedule(MonthTimes... times) { + super(crons(times)); + this.times = times; + } + + @Override + public String type() { + return TYPE; + } + + public MonthTimes[] times() { + return times; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (params.paramAsBoolean("normalize", false) && times.length == 1) { + return times[0].toXContent(builder, params); + } + builder.startArray(); + for (MonthTimes monthTimes : times) { + monthTimes.toXContent(builder, params); + } + return builder.endArray(); + } + + public static Builder builder() { + return new Builder(); + } + + static String[] crons(MonthTimes[] times) { + assert times.length > 0 : "at least one time must be defined"; + Set crons = new HashSet<>(times.length); + for (MonthTimes time : times) { + crons.addAll(time.crons()); + } + return crons.toArray(new String[crons.size()]); + } + + public static class Parser implements Schedule.Parser { + + @Override + public String type() { + return TYPE; + } + + @Override + public MonthlySchedule parse(XContentParser parser) throws IOException { + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + try { + return new MonthlySchedule(MonthTimes.parse(parser, parser.currentToken())); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] schedule. invalid month times", pe, TYPE); + } + } + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + List times = new ArrayList<>(); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + times.add(MonthTimes.parse(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] schedule. invalid month times", pe, TYPE); + } + } + return times.isEmpty() ? new MonthlySchedule() : new MonthlySchedule(times.toArray(new MonthTimes[times.size()])); + } + throw new ElasticsearchParseException("could not parse [{}] schedule. expected either an object or an array " + + "of objects representing month times, but found [{}] instead", TYPE, parser.currentToken()); + } + } + + public static class Builder { + + private final Set times = new HashSet<>(); + + private Builder() { + } + + public Builder time(MonthTimes time) { + times.add(time); + return this; + } + + public Builder time(MonthTimes.Builder builder) { + return time(builder.build()); + } + + public MonthlySchedule build() { + return times.isEmpty() ? new MonthlySchedule() : new MonthlySchedule(times.toArray(new MonthTimes[times.size()])); + } + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/Schedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/Schedule.java new file mode 100644 index 0000000000000..f3c9248fbb894 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/Schedule.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; + +import java.io.IOException; + +public interface Schedule extends SchedulerEngine.Schedule, ToXContent { + + String type(); + + interface Parser { + + String type(); + + S parse(XContentParser parser) throws IOException; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistry.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistry.java new file mode 100644 index 0000000000000..f4a272ebdd893 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistry.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +public class ScheduleRegistry { + private final Map parsers = new HashMap<>(); + + public ScheduleRegistry(Set parsers) { + parsers.stream().forEach(parser -> this.parsers.put(parser.type(), parser)); + } + + public Set types() { + return parsers.keySet(); + } + + public Schedule parse(String context, XContentParser parser) throws IOException { + String type = null; + XContentParser.Token token; + Schedule schedule = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + type = parser.currentName(); + } else if (type != null) { + schedule = parse(context, type, parser); + } else { + throw new ElasticsearchParseException("could not parse schedule. expected a schedule type field, but found [{}] instead", + token); + } + } + if (schedule == null) { + throw new ElasticsearchParseException("could not parse schedule. expected a schedule type field, but no fields were found"); + } + return schedule; + } + + public Schedule parse(String context, String type, XContentParser parser) throws IOException { + Schedule.Parser scheduleParser = parsers.get(type); + if (scheduleParser == null) { + throw new ElasticsearchParseException("could not parse schedule for [{}]. unknown schedule type [{}]", context, type); + } + return scheduleParser.parse(parser); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTrigger.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTrigger.java new file mode 100644 index 0000000000000..7561560202dfb --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTrigger.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.watcher.trigger.Trigger; + +import java.io.IOException; + +public class ScheduleTrigger implements Trigger { + + public static final String TYPE = "schedule"; + + private final Schedule schedule; + + public ScheduleTrigger(Schedule schedule) { + this.schedule = schedule; + } + + @Override + public String type() { + return TYPE; + } + + public Schedule getSchedule() { + return schedule; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ScheduleTrigger trigger = (ScheduleTrigger) o; + + if (!schedule.equals(trigger.schedule)) return false; + + return true; + } + + @Override + public int hashCode() { + return schedule.hashCode(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().field(schedule.type(), schedule, params).endObject(); + } + + public static Builder builder(Schedule schedule) { + return new Builder(schedule); + } + + public static class Builder implements Trigger.Builder { + + private final Schedule schedule; + + private Builder(Schedule schedule) { + this.schedule = schedule; + } + + @Override + public ScheduleTrigger build() { + return new ScheduleTrigger(schedule); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEngine.java new file mode 100644 index 0000000000000..dfee54c391aff --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEngine.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.watcher.trigger.TriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.time.Clock; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Consumer; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; +import static org.joda.time.DateTimeZone.UTC; + +public abstract class ScheduleTriggerEngine extends AbstractComponent implements TriggerEngine { + + public static final String TYPE = ScheduleTrigger.TYPE; + + protected final List>> consumers = new CopyOnWriteArrayList<>(); + protected final ScheduleRegistry scheduleRegistry; + protected final Clock clock; + + public ScheduleTriggerEngine(Settings settings, ScheduleRegistry scheduleRegistry, Clock clock) { + super(settings); + this.scheduleRegistry = scheduleRegistry; + this.clock = clock; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public void register(Consumer> consumer) { + consumers.add(consumer); + } + + + @Override + public ScheduleTriggerEvent simulateEvent(String jobId, @Nullable Map data, TriggerService service) { + DateTime now = new DateTime(clock.millis(), UTC); + if (data == null) { + return new ScheduleTriggerEvent(jobId, now, now); + } + + Object value = data.get(ScheduleTriggerEvent.Field.TRIGGERED_TIME.getPreferredName()); + DateTime triggeredTime = value != null ? WatcherDateTimeUtils.convertToDate(value, clock) : now; + if (triggeredTime == null) { + throw illegalArgument("could not simulate schedule event. could not convert provided triggered time [{}] to date/time", value); + } + + value = data.get(ScheduleTriggerEvent.Field.SCHEDULED_TIME.getPreferredName()); + DateTime scheduledTime = value != null ? WatcherDateTimeUtils.convertToDate(value, clock) : triggeredTime; + if (scheduledTime == null) { + throw illegalArgument("could not simulate schedule event. could not convert provided scheduled time [{}] to date/time", value); + } + + return new ScheduleTriggerEvent(jobId, triggeredTime, scheduledTime); + } + + @Override + public ScheduleTrigger parseTrigger(String context, XContentParser parser) throws IOException { + Schedule schedule = scheduleRegistry.parse(context, parser); + return new ScheduleTrigger(schedule); + } + + @Override + public ScheduleTriggerEvent parseTriggerEvent(TriggerService service, String watchId, String context, XContentParser parser) throws + IOException { + return ScheduleTriggerEvent.parse(parser, watchId, context, clock); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEvent.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEvent.java new file mode 100644 index 0000000000000..89fe1bf7e60d4 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEvent.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.time.Clock; + +public class ScheduleTriggerEvent extends TriggerEvent { + + private final DateTime scheduledTime; + + public ScheduleTriggerEvent(DateTime triggeredTime, DateTime scheduledTime) { + this(null, triggeredTime, scheduledTime); + } + + public ScheduleTriggerEvent(String jobName, DateTime triggeredTime, DateTime scheduledTime) { + super(jobName, triggeredTime); + this.scheduledTime = scheduledTime; + data.put(Field.SCHEDULED_TIME.getPreferredName(), scheduledTime); + } + + @Override + public String type() { + return ScheduleTrigger.TYPE; + } + + public DateTime scheduledTime() { + return scheduledTime; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + WatcherDateTimeUtils.writeDate(Field.TRIGGERED_TIME.getPreferredName(), builder, triggeredTime); + WatcherDateTimeUtils.writeDate(Field.SCHEDULED_TIME.getPreferredName(), builder, scheduledTime); + return builder.endObject(); + } + + @Override + public void recordDataXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(ScheduleTrigger.TYPE); + WatcherDateTimeUtils.writeDate(Field.SCHEDULED_TIME.getPreferredName(), builder, scheduledTime); + builder.endObject(); + } + + public static ScheduleTriggerEvent parse(XContentParser parser, String watchId, String context, Clock clock) throws IOException { + DateTime triggeredTime = null; + DateTime scheduledTime = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (Field.TRIGGERED_TIME.match(currentFieldName, parser.getDeprecationHandler())) { + try { + triggeredTime = WatcherDateTimeUtils.parseDateMath(currentFieldName, parser, DateTimeZone.UTC, clock); + } catch (ElasticsearchParseException pe) { + //Failed to parse as a date try datemath parsing + throw new ElasticsearchParseException("could not parse [{}] trigger event for [{}] for watch [{}]. failed to parse " + + "date field [{}]", pe, ScheduleTriggerEngine.TYPE, context, watchId, currentFieldName); + } + } else if (Field.SCHEDULED_TIME.match(currentFieldName, parser.getDeprecationHandler())) { + try { + scheduledTime = WatcherDateTimeUtils.parseDateMath(currentFieldName, parser, DateTimeZone.UTC, clock); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] trigger event for [{}] for watch [{}]. failed to parse " + + "date field [{}]", pe, ScheduleTriggerEngine.TYPE, context, watchId, currentFieldName); + } + }else { + throw new ElasticsearchParseException("could not parse trigger event for [{}] for watch [{}]. unexpected token [{}]", + context, watchId, token); + } + } + + // should never be, it's fully controlled internally (not coming from the user) + assert triggeredTime != null && scheduledTime != null; + return new ScheduleTriggerEvent(triggeredTime, scheduledTime); + } + + interface Field extends TriggerEvent.Field { + ParseField SCHEDULED_TIME = new ParseField("scheduled_time"); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/Schedules.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/Schedules.java new file mode 100644 index 0000000000000..ea9ed90e61ab9 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/Schedules.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +/** + * A static factory for all available schedules. + */ +public class Schedules { + + private Schedules() { + } + + /** + * Creates an interval schedule. The provided string can have the following format: + *
    + *
  • 34s - a 34 seconds long interval
  • + *
  • 23m - a 23 minutes long interval
  • + *
  • 40h - a 40 hours long interval
  • + *
  • 63d - a 63 days long interval
  • + *
  • 27w - a 27 weeks long interval
  • + *
+ * + * @param interval The fixed interval by which the schedule will trigger. + * @return The newly created interval schedule + */ + public static IntervalSchedule interval(String interval) { + return new IntervalSchedule(IntervalSchedule.Interval.parse(interval)); + } + + /** + * Creates an interval schedule. + * + * @param duration The duration of the interval + * @param unit The unit of the duration (seconds, minutes, hours, days or weeks) + * @return The newly created interval schedule. + */ + public static IntervalSchedule interval(long duration, IntervalSchedule.Interval.Unit unit) { + return new IntervalSchedule(new IntervalSchedule.Interval(duration, unit)); + } + + /** + * Creates a cron schedule. + * + * @param cronExpressions one or more cron expressions + * @return the newly created cron schedule. + * @throws IllegalArgumentException if any of the given expression is invalid + */ + public static CronSchedule cron(String... cronExpressions) { + return new CronSchedule(cronExpressions); + } + + /** + * Creates an hourly schedule. + * + * @param minutes the minutes within the hour that the schedule should trigger at. values must be + * between 0 and 59 (inclusive). + * @return the newly created hourly schedule + * @throws IllegalArgumentException if any of the provided minutes are out of valid range + */ + public static HourlySchedule hourly(int... minutes) { + return new HourlySchedule(minutes); + } + + /** + * @return A builder for an hourly schedule. + */ + public static HourlySchedule.Builder hourly() { + return HourlySchedule.builder(); + } + + /** + * @return A builder for a daily schedule. + */ + public static DailySchedule.Builder daily() { + return DailySchedule.builder(); + } + + /** + * @return A builder for a weekly schedule. + */ + public static WeeklySchedule.Builder weekly() { + return WeeklySchedule.builder(); + } + + /** + * @return A builder for a monthly schedule. + */ + public static MonthlySchedule.Builder monthly() { + return MonthlySchedule.builder(); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/WeeklySchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/WeeklySchedule.java new file mode 100644 index 0000000000000..923c642566eb2 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/WeeklySchedule.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.WeekTimes; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class WeeklySchedule extends CronnableSchedule { + + public static final String TYPE = "weekly"; + + public static final WeekTimes[] DEFAULT_TIMES = new WeekTimes[] { new WeekTimes() }; + + private final WeekTimes[] times; + + WeeklySchedule() { + this(DEFAULT_TIMES); + } + + WeeklySchedule(WeekTimes... times) { + super(crons(times)); + this.times = times; + } + + @Override + public String type() { + return TYPE; + } + + public WeekTimes[] times() { + return times; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (params.paramAsBoolean("normalize", false) && times.length == 1) { + return times[0].toXContent(builder, params); + } + builder.startArray(); + for (WeekTimes weekTimes : times) { + weekTimes.toXContent(builder, params); + } + return builder.endArray(); + } + + public static Builder builder() { + return new Builder(); + } + + static String[] crons(WeekTimes[] times) { + assert times.length > 0 : "at least one time must be defined"; + List crons = new ArrayList<>(times.length); + for (WeekTimes time : times) { + crons.addAll(time.crons()); + } + return crons.toArray(new String[crons.size()]); + } + + public static class Parser implements Schedule.Parser { + + @Override + public String type() { + return TYPE; + } + + @Override + public WeeklySchedule parse(XContentParser parser) throws IOException { + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + try { + return new WeeklySchedule(WeekTimes.parse(parser, parser.currentToken())); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] schedule. invalid weekly times", pe, TYPE); + } + } + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + List times = new ArrayList<>(); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + times.add(WeekTimes.parse(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] schedule. invalid weekly times", pe, TYPE); + } + } + return times.isEmpty() ? new WeeklySchedule() : new WeeklySchedule(times.toArray(new WeekTimes[times.size()])); + } + throw new ElasticsearchParseException("could not parse [{}] schedule. expected either an object or an array " + + "of objects representing weekly times, but found [{}] instead", TYPE, parser.currentToken()); + } + } + + public static class Builder { + + private final Set times = new HashSet<>(); + + public Builder time(WeekTimes time) { + times.add(time); + return this; + } + + public Builder time(WeekTimes.Builder time) { + return time(time.build()); + } + + public WeeklySchedule build() { + return times.isEmpty() ? new WeeklySchedule() : new WeeklySchedule(times.toArray(new WeekTimes[times.size()])); + } + + } + + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/YearlySchedule.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/YearlySchedule.java new file mode 100644 index 0000000000000..739f22e2dfe8a --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/YearlySchedule.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.YearTimes; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class YearlySchedule extends CronnableSchedule { + + public static final String TYPE = "yearly"; + + public static final YearTimes[] DEFAULT_TIMES = new YearTimes[] { new YearTimes() }; + + private final YearTimes[] times; + + YearlySchedule() { + this(DEFAULT_TIMES); + } + + YearlySchedule(YearTimes... times) { + super(crons(times)); + this.times = times; + } + + @Override + public String type() { + return TYPE; + } + + public YearTimes[] times() { + return times; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (params.paramAsBoolean("normalize", false) && times.length == 1) { + return times[0].toXContent(builder, params); + } + builder.startArray(); + for (YearTimes yearTimes : times) { + yearTimes.toXContent(builder, params); + } + return builder.endArray(); + } + + public static Builder builder() { + return new Builder(); + } + + static String[] crons(YearTimes[] times) { + assert times.length > 0 : "at least one time must be defined"; + Set crons = new HashSet<>(times.length); + for (YearTimes time : times) { + crons.addAll(time.crons()); + } + return crons.toArray(new String[crons.size()]); + } + + public static class Parser implements Schedule.Parser { + + @Override + public String type() { + return TYPE; + } + + @Override + public YearlySchedule parse(XContentParser parser) throws IOException { + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + try { + return new YearlySchedule(YearTimes.parse(parser, parser.currentToken())); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] schedule. invalid year times", pe, TYPE); + } + } + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + List times = new ArrayList<>(); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + times.add(YearTimes.parse(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse [{}] schedule. invalid year times", pe, TYPE); + } + } + return times.isEmpty() ? new YearlySchedule() : new YearlySchedule(times.toArray(new YearTimes[times.size()])); + } + throw new ElasticsearchParseException("could not parse [{}] schedule. expected either an object or an array " + + "of objects representing year times, but found [{}] instead", TYPE, parser.currentToken()); + } + } + + public static class Builder { + + private final Set times = new HashSet<>(); + + private Builder() { + } + + public Builder time(YearTimes time) { + times.add(time); + return this; + } + + public Builder time(YearTimes.Builder builder) { + return time(builder.build()); + } + + public YearlySchedule build() { + return times.isEmpty() ? new YearlySchedule() : new YearlySchedule(times.toArray(new YearTimes[times.size()])); + } + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java new file mode 100644 index 0000000000000..7e08f140dafae --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java @@ -0,0 +1,181 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule.engine; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.trigger.schedule.Schedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleRegistry; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.joda.time.DateTime; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; + +import static org.elasticsearch.common.settings.Setting.positiveTimeSetting; +import static org.joda.time.DateTimeZone.UTC; + +public class TickerScheduleTriggerEngine extends ScheduleTriggerEngine { + + public static final Setting TICKER_INTERVAL_SETTING = + positiveTimeSetting("xpack.watcher.trigger.schedule.ticker.tick_interval", TimeValue.timeValueMillis(500), Property.NodeScope); + + private final TimeValue tickInterval; + private volatile Map schedules; + private Ticker ticker; + + public TickerScheduleTriggerEngine(Settings settings, ScheduleRegistry scheduleRegistry, Clock clock) { + super(settings, scheduleRegistry, clock); + this.tickInterval = TICKER_INTERVAL_SETTING.get(settings); + this.schedules = new ConcurrentHashMap<>(); + } + + @Override + public void start(Collection jobs) { + long starTime = clock.millis(); + Map schedules = new ConcurrentHashMap<>(); + for (Watch job : jobs) { + if (job.trigger() instanceof ScheduleTrigger) { + ScheduleTrigger trigger = (ScheduleTrigger) job.trigger(); + schedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), starTime)); + } + } + this.schedules = schedules; + this.ticker = new Ticker(); + } + + @Override + public void stop() { + ticker.close(); + pauseExecution(); + } + + @Override + public void add(Watch watch) { + assert watch.trigger() instanceof ScheduleTrigger; + ScheduleTrigger trigger = (ScheduleTrigger) watch.trigger(); + schedules.put(watch.id(), new ActiveSchedule(watch.id(), trigger.getSchedule(), clock.millis())); + } + + @Override + public void pauseExecution() { + schedules.clear(); + } + + @Override + public int getJobCount() { + return schedules.size(); + } + + @Override + public boolean remove(String jobId) { + return schedules.remove(jobId) != null; + } + + void checkJobs() { + long triggeredTime = clock.millis(); + List events = new ArrayList<>(); + for (ActiveSchedule schedule : schedules.values()) { + long scheduledTime = schedule.check(triggeredTime); + if (scheduledTime > 0) { + logger.debug("triggered job [{}] at [{}] (scheduled time was [{}])", schedule.name, + new DateTime(triggeredTime, UTC), new DateTime(scheduledTime, UTC)); + events.add(new ScheduleTriggerEvent(schedule.name, new DateTime(triggeredTime, UTC), + new DateTime(scheduledTime, UTC))); + if (events.size() >= 1000) { + notifyListeners(events); + events.clear(); + } + } + } + if (events.isEmpty() == false) { + notifyListeners(events); + } + } + + protected void notifyListeners(List events) { + consumers.forEach(consumer -> consumer.accept(events)); + } + + static class ActiveSchedule { + + private final String name; + private final Schedule schedule; + private final long startTime; + + private volatile long scheduledTime; + + ActiveSchedule(String name, Schedule schedule, long startTime) { + this.name = name; + this.schedule = schedule; + this.startTime = startTime; + this.scheduledTime = schedule.nextScheduledTimeAfter(startTime, startTime); + } + + /** + * Checks whether the given time is the same or after the scheduled time of this schedule. If so, the scheduled time is + * returned a new scheduled time is computed and set. Otherwise (the given time is before the scheduled time), {@code -1} + * is returned. + */ + public long check(long time) { + if (time < scheduledTime) { + return -1; + } + long prevScheduledTime = scheduledTime == 0 ? time : scheduledTime; + scheduledTime = schedule.nextScheduledTimeAfter(startTime, time); + return prevScheduledTime; + } + } + + class Ticker extends Thread { + + private volatile boolean active = true; + private final CountDownLatch closeLatch = new CountDownLatch(1); + + Ticker() { + super("ticker-schedule-trigger-engine"); + setDaemon(true); + start(); + } + + @Override + public void run() { + while (active) { + logger.trace("checking jobs [{}]", new DateTime(clock.millis(), UTC)); + checkJobs(); + try { + sleep(tickInterval.millis()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + closeLatch.countDown(); + } + + public void close() { + logger.trace("stopping ticker thread"); + active = false; + try { + closeLatch.await(); + } catch (InterruptedException e) { + logger.warn("caught an interrupted exception when waiting while closing ticker thread", e); + Thread.currentThread().interrupt(); + } + logger.trace("ticker thread stopped"); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/DayOfWeek.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/DayOfWeek.java new file mode 100644 index 0000000000000..db0d2108defce --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/DayOfWeek.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule.support; + +import org.elasticsearch.ElasticsearchParseException; + +import java.util.EnumSet; +import java.util.Locale; + +public enum DayOfWeek { + + SUNDAY("SUN"), + MONDAY("MON"), + TUESDAY("TUE"), + WEDNESDAY("WED"), + THURSDAY("THU"), + FRIDAY("FRI"), + SATURDAY("SAT"); + + private final String cronKey; + + DayOfWeek(String cronKey) { + this.cronKey = cronKey; + } + + public static String cronPart(EnumSet days) { + StringBuilder sb = new StringBuilder(); + for (DayOfWeek day : days) { + if (sb.length() != 0) { + sb.append(","); + } + sb.append(day.cronKey); + } + return sb.toString(); + } + + public static DayOfWeek resolve(int day) { + switch (day) { + case 1: return SUNDAY; + case 2: return MONDAY; + case 3: return TUESDAY; + case 4: return WEDNESDAY; + case 5: return THURSDAY; + case 6: return FRIDAY; + case 7: return SATURDAY; + default: + throw new ElasticsearchParseException("unknown day of week number [{}]", day); + } + } + + public static DayOfWeek resolve(String day) { + switch (day.toLowerCase(Locale.ROOT)) { + case "1": + case "sun": + case "sunday": return SUNDAY; + case "2": + case "mon": + case "monday": return MONDAY; + case "3": + case "tue": + case "tuesday": return TUESDAY; + case "4": + case "wed": + case "wednesday": return WEDNESDAY; + case "5": + case "thu": + case "thursday": return THURSDAY; + case "6": + case "fri": + case "friday": return FRIDAY; + case "7": + case "sat": + case "saturday": return SATURDAY; + default: + throw new ElasticsearchParseException("unknown day of week [{}]", day); + } + } + + + @Override + public String toString() { + return cronKey; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/DayTimes.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/DayTimes.java new file mode 100644 index 0000000000000..cfa809132ebeb --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/DayTimes.java @@ -0,0 +1,288 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule.support; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; +import static org.elasticsearch.xpack.watcher.support.Strings.join; + +public class DayTimes implements Times { + + public static final DayTimes NOON = new DayTimes("noon", new int[] { 12 }, new int[] { 0 }); + public static final DayTimes MIDNIGHT = new DayTimes("midnight", new int[] { 0 }, new int[] { 0 }); + + final int[] hour; + final int[] minute; + final String time; + + public DayTimes() { + this(0, 0); + } + + public DayTimes(int hour, int minute) { + this(new int[] { hour }, new int[] { minute }); + } + + public DayTimes(int[] hour, int[] minute) { + this(null, hour, minute); + } + + DayTimes(String time, int[] hour, int[] minute) { + this.time = time; + this.hour = hour; + this.minute = minute; + validate(); + } + + public int[] hour() { + return hour; + } + + public int[] minute() { + return minute; + } + + public String time() { + return time; + } + + public static DayTimes parse(String time) throws ElasticsearchParseException { + if (NOON.time.equals(time)) { + return NOON; + } + if (MIDNIGHT.time.equals(time)) { + return MIDNIGHT; + } + int[] hour; + int[] minute; + int i = time.indexOf(":"); + if (i < 0) { + throw new ElasticsearchParseException("could not parse time [{}]. time format must be in the form of hh:mm", time); + } + if (i == time.length() - 1 || time.indexOf(":", i + 1) >= 0) { + throw new ElasticsearchParseException("could not parse time [{}]. time format must be in the form of hh:mm", time); + } + String hrStr = time.substring(0, i); + String minStr = time.substring(i + 1); + if (hrStr.length() != 1 && hrStr.length() != 2) { + throw new ElasticsearchParseException("could not parse time [{}]. time format must be in the form of hh:mm", time); + } + if (minStr.length() != 2) { + throw new ElasticsearchParseException("could not parse time [{}]. time format must be in the form of hh:mm", time); + } + try { + hour = new int[] { Integer.parseInt(hrStr) }; + } catch (NumberFormatException nfe) { + throw new ElasticsearchParseException("could not parse time [{}]. time hour [{}] is not a number", time, hrStr); + } + try { + minute = new int[] { Integer.parseInt(minStr) }; + } catch (NumberFormatException nfe) { + throw new ElasticsearchParseException("could not parse time [{}]. time minute [{}] is not a number", time, minStr); + } + try { + return new DayTimes(time, hour, minute); + } catch (IllegalArgumentException iae) { + throw new ElasticsearchParseException("could not parse time [{}]", iae); + } + } + + public void validate() { + for (int i = 0; i < hour.length; i++) { + if (!validHour(hour[i])) { + throw illegalArgument("invalid time [{}]. invalid time hour value [{}]. time hours must be between 0 and 23 incl.", + this, hour[i]); + } + } + for (int i = 0; i < minute.length; i++) { + if (!validMinute(minute[i])) { + throw illegalArgument("invalid time [{}]. invalid time minute value [{}]. time minutes must be between 0 and 59 incl.", + this, minute[i]); + } + } + } + + static boolean validHour(int hour) { + return hour >= 0 && hour < 24; + } + + static boolean validMinute(int minute) { + return minute >= 0 && minute < 60; + } + + public String cron() { + String hrs = join(",", hour); + String mins = join(",", minute); + return "0 " + mins + " " + hrs + " * * ?"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (time != null) { + return builder.value(time); + } + return builder.startObject() + .array(HOUR_FIELD.getPreferredName(), hour) + .array(MINUTE_FIELD.getPreferredName(), minute) + .endObject(); + } + + @Override + public String toString() { + if (time != null) { + return time; + } + StringBuilder sb = new StringBuilder(); + for (int h = 0; h < hour.length; h++) { + for (int m = 0; m < minute.length; m++) { + if (sb.length() > 0) { + sb.append(", "); + } + if (hour[h] < 10) { + sb.append("0"); + } + sb.append(hour[h]).append(":"); + if (minute[m] < 10) { + sb.append("0"); + } + sb.append(minute[m]); + } + } + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + DayTimes time = (DayTimes) o; + + if (!Arrays.equals(hour, time.hour)) return false; + if (!Arrays.equals(minute, time.minute)) return false; + + return true; + } + + @Override + public int hashCode() { + int result = Arrays.hashCode(hour); + result = 31 * result + Arrays.hashCode(minute); + return result; + } + + public static DayTimes parse(XContentParser parser, XContentParser.Token token) throws IOException, ElasticsearchParseException { + if (token == XContentParser.Token.VALUE_STRING) { + return DayTimes.parse(parser.text()); + } + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse time. expected string/number value or an object, but found [{}]", token); + } + List hours = new ArrayList<>(); + List minutes = new ArrayList<>(); + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (HOUR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token.isValue()) { + hours.add(parseHourValue(parser, token)); + } else if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + hours.add(parseHourValue(parser, token)); + } + } else { + throw new ElasticsearchParseException("invalid time hour value. expected string/number value or an array of " + + "string/number values, but found [{}]", token); + } + } else if (MINUTE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token.isValue()) { + minutes.add(parseMinuteValue(parser, token)); + } else if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + minutes.add(parseMinuteValue(parser, token)); + } + } else { + throw new ElasticsearchParseException("invalid time minute value. expected string/number value or an array of " + + "string/number values, but found [{}]", token); + } + } + } + if (hours.isEmpty()) { + hours.add(0); + } + if (minutes.isEmpty()) { + minutes.add(0); + } + return new DayTimes(CollectionUtils.toArray(hours), CollectionUtils.toArray(minutes)); + } + + public static int parseHourValue(XContentParser parser, XContentParser.Token token) throws IOException, ElasticsearchParseException { + switch (token) { + case VALUE_NUMBER: + int hour = parser.intValue(); + if (!DayTimes.validHour(hour)) { + throw new ElasticsearchParseException("invalid time hour value [{}] (possible values may be between 0 and 23 incl.)", + hour); + } + return hour; + + case VALUE_STRING: + String value = parser.text(); + try { + hour = Integer.valueOf(value); + if (!DayTimes.validHour(hour)) { + String msg = "invalid time hour value [{}] (possible values may be between 0 and 23 incl.)"; + throw new ElasticsearchParseException(msg, hour); + } + return hour; + } catch (NumberFormatException nfe) { + throw new ElasticsearchParseException("invalid time hour value [{}]", value); + } + + default: + throw new ElasticsearchParseException("invalid hour value. expected string/number value, but found [{}]", token); + } + } + + public static int parseMinuteValue(XContentParser parser, XContentParser.Token token) throws IOException, ElasticsearchParseException { + switch (token) { + case VALUE_NUMBER: + int minute = parser.intValue(); + if (!DayTimes.validMinute(minute)) { + throw new ElasticsearchParseException("invalid time minute value [{}] (possible values may be between 0 and 59 incl.)", + minute); + } + return minute; + + case VALUE_STRING: + String value = parser.text(); + try { + minute = Integer.valueOf(value); + if (!DayTimes.validMinute(minute)) { + throw new ElasticsearchParseException("invalid time minute value [{}] (possible values may be between 0 and 59 " + + "incl.)", minute); + } + return minute; + } catch (NumberFormatException nfe) { + throw new ElasticsearchParseException("invalid time minute value [{}]", value); + } + + default: + throw new ElasticsearchParseException("invalid time minute value. expected string/number value, but found [{}]", token); + } + } + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/Month.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/Month.java new file mode 100644 index 0000000000000..d683cc3cd008b --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/Month.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule.support; + +import org.elasticsearch.ElasticsearchParseException; + +import java.util.EnumSet; +import java.util.Locale; + +public enum Month { + + JANUARY("JAN"), + FEBRUARY("FEB"), + MARCH("MAR"), + APRIL("APR"), + MAY("MAY"), + JUNE("JUN"), + JULY("JUL"), + AUGUST("AUG"), + SEPTEMBER("SEP"), + OCTOBER("OCT"), + NOVEMBER("NOV"), + DECEMBER("DEC"); + + private final String cronKey; + + Month(String cronKey) { + this.cronKey = cronKey; + } + + public static String cronPart(EnumSet days) { + StringBuilder sb = new StringBuilder(); + for (Month day : days) { + if (sb.length() != 0) { + sb.append(","); + } + sb.append(day.cronKey); + } + return sb.toString(); + } + + public static Month resolve(int month) { + switch (month) { + case 1: return JANUARY; + case 2: return FEBRUARY; + case 3: return MARCH; + case 4: return APRIL; + case 5: return MAY; + case 6: return JUNE; + case 7: return JULY; + case 8: return AUGUST; + case 9: return SEPTEMBER; + case 10: return OCTOBER; + case 11: return NOVEMBER; + case 12: return DECEMBER; + default: + throw new ElasticsearchParseException("unknown month number [{}]", month); + } + } + + public static Month resolve(String day) { + switch (day.toLowerCase(Locale.ROOT)) { + case "1": + case "jan": + case "first": + case "january": return JANUARY; + case "2": + case "feb": + case "february": return FEBRUARY; + case "3": + case "mar": + case "march": return MARCH; + case "4": + case "apr": + case "april": return APRIL; + case "5": + case "may": return MAY; + case "6": + case "jun": + case "june": return JUNE; + case "7": + case "jul": + case "july": return JULY; + case "8": + case "aug": + case "august": return AUGUST; + case "9": + case "sep": + case "september": return SEPTEMBER; + case "10": + case "oct": + case "october": return OCTOBER; + case "11": + case "nov": + case "november": return NOVEMBER; + case "12": + case "dec": + case "last": + case "december": return DECEMBER; + default: + throw new ElasticsearchParseException("unknown month [{}]", day); + } + } + + + @Override + public String toString() { + return cronKey; + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/MonthTimes.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/MonthTimes.java new file mode 100644 index 0000000000000..a3be7b3d148e0 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/MonthTimes.java @@ -0,0 +1,249 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule.support; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Locale; +import java.util.Set; + +import static org.elasticsearch.common.util.set.Sets.newHashSet; +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; +import static org.elasticsearch.xpack.watcher.support.Strings.join; + +public class MonthTimes implements Times { + + public static final String LAST = "last_day"; + public static final String FIRST = "first_day"; + + public static final int[] DEFAULT_DAYS = new int[] { 1 }; + public static final DayTimes[] DEFAULT_TIMES = new DayTimes[] { new DayTimes() }; + + private final int[] days; + private final DayTimes[] times; + + public MonthTimes() { + this(DEFAULT_DAYS, DEFAULT_TIMES); + } + + public MonthTimes(int[] days, DayTimes[] times) { + this.days = days.length == 0 ? DEFAULT_DAYS : days; + Arrays.sort(this.days); + this.times = times.length == 0 ? DEFAULT_TIMES : times; + validate(); + } + + void validate() { + for (int day : days) { + if (day < 1 || day > 32) { //32 represents the last day of the month + throw illegalArgument("invalid month day [{}]", day); + } + } + for (DayTimes dayTimes : times) { + dayTimes.validate(); + } + } + + public int[] days() { + return days; + } + + public DayTimes[] times() { + return times; + } + + public Set crons() { + Set crons = new HashSet<>(); + for (DayTimes times : this.times) { + String hrsStr = join(",", times.hour); + String minsStr = join(",", times.minute); + String daysStr = join(",", this.days); + daysStr = daysStr.replace("32", "L"); + crons.add("0 " + minsStr + " " + hrsStr + " " + daysStr + " * ?"); + } + return crons; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + MonthTimes that = (MonthTimes) o; + + if (!Arrays.equals(days, that.days)) return false; + // order doesn't matter + if (!newHashSet(times).equals(newHashSet(that.times))) return false; + + return true; + } + + @Override + public int hashCode() { + int result = Arrays.hashCode(days); + result = 31 * result + Arrays.hashCode(times); + return result; + } + + @Override + public String toString() { + return String.format( + Locale.ROOT, + "days [%s], times [%s]", + join(",", days), + Strings.arrayToCommaDelimitedString(times) + ); + } + + public boolean contains(int day, DayTimes dayTimes) { + if (Arrays.binarySearch(days, day) == -1) { //days are already sorted + return false; + } + for (DayTimes dayTimes1 : this.times()) { + if (dayTimes.equals(dayTimes1)) { + return true; + } + } + return false; + } + + public boolean intersects(MonthTimes testTimes) { + for (int day : testTimes.days()) { + for (DayTimes dayTimes : testTimes.times()) { + if (contains(day, dayTimes)) { + return true; + } + } + } + return false; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.array(DAY_FIELD.getPreferredName(), days); + builder.startArray(TIME_FIELD.getPreferredName()); + for (DayTimes dayTimes : times) { + dayTimes.toXContent(builder, params); + } + builder.endArray(); + return builder.endObject(); + } + + public static Builder builder() { + return new Builder(); + } + + public static MonthTimes parse(XContentParser parser, XContentParser.Token token) throws IOException, ElasticsearchParseException { + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse month times. expected an object, but found [{}]", token); + } + Set daysSet = new HashSet<>(); + Set timesSet = new HashSet<>(); + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (DAY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token.isValue()) { + daysSet.add(parseDayValue(parser, token)); + } else if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + daysSet.add(parseDayValue(parser, token)); + } + } else { + throw new ElasticsearchParseException("invalid month day value for [{}] field. expected string/number value or an " + + "array of string/number values, but found [{}]", currentFieldName, token); + } + } else if (TIME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token != XContentParser.Token.START_ARRAY) { + try { + timesSet.add(DayTimes.parse(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("invalid time value for field [{}] - [{}]", pe, currentFieldName, token); + } + } else { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + timesSet.add(DayTimes.parse(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("invalid time value for field [{}] - [{}]", pe, currentFieldName, token); + } + } + } + } + } + int[] days = daysSet.isEmpty() ? DEFAULT_DAYS : CollectionUtils.toArray(daysSet); + DayTimes[] times = timesSet.isEmpty() ? new DayTimes[] { new DayTimes(0, 0) } : timesSet.toArray(new DayTimes[timesSet.size()]); + return new MonthTimes(days, times); + } + + static int parseDayValue(XContentParser parser, XContentParser.Token token) throws IOException { + if (token == XContentParser.Token.VALUE_STRING) { + String value = parser.text().toLowerCase(Locale.ROOT); + if (LAST.equals(value)) { + return 32; + } + if (FIRST.equals(value)) { + return 1; + } + try { + return Integer.parseInt(value); + } catch (NumberFormatException nfe) { + throw new ElasticsearchParseException("invalid month day value. string value [{}] cannot be", value); + } + } + if (token == XContentParser.Token.VALUE_NUMBER) { + return parser.intValue(); + } + throw new ElasticsearchParseException("invalid month day value. expected a string or a number value, but found [{}]", token); + } + + public static class Builder { + + private final Set days = new HashSet<>(); + private final Set times = new HashSet<>(); + + private Builder() { + } + + public Builder on(int... days) { + Arrays.stream(days).forEach(this.days::add); + return this; + } + + public Builder at(int hour, int minute) { + times.add(new DayTimes(hour, minute)); + return this; + } + + public Builder atRoundHour(int... hours) { + times.add(new DayTimes(hours, new int[] { 0 })); + return this; + } + + public Builder atNoon() { + times.add(DayTimes.NOON); + return this; + } + + public Builder atMidnight() { + times.add(DayTimes.MIDNIGHT); + return this; + } + + public MonthTimes build() { + return new MonthTimes(CollectionUtils.toArray(days), times.toArray(new DayTimes[times.size()])); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/Times.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/Times.java new file mode 100644 index 0000000000000..75d3b53d7eb7d --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/Times.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule.support; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContentObject; + +public interface Times extends ToXContentObject { + + ParseField MONTH_FIELD = new ParseField("in", "month"); + ParseField DAY_FIELD = new ParseField("on", "day"); + ParseField TIME_FIELD = new ParseField("at", "time"); + ParseField HOUR_FIELD = new ParseField("hour"); + ParseField MINUTE_FIELD = new ParseField("minute"); + +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/WeekTimes.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/WeekTimes.java new file mode 100644 index 0000000000000..052179880ed4d --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/WeekTimes.java @@ -0,0 +1,197 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule.support; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.Set; + +import static org.elasticsearch.common.util.set.Sets.newHashSet; +import static org.elasticsearch.xpack.watcher.support.Strings.join; + +public class WeekTimes implements Times { + + public static final EnumSet DEFAULT_DAYS = EnumSet.of(DayOfWeek.MONDAY); + public static final DayTimes[] DEFAULT_TIMES = new DayTimes[] { new DayTimes() }; + + private final EnumSet days; + private final DayTimes[] times; + + public WeekTimes() { + this(DEFAULT_DAYS, DEFAULT_TIMES); + } + + public WeekTimes(DayOfWeek day, DayTimes times) { + this(day, new DayTimes[] { times }); + } + + public WeekTimes(DayOfWeek day, DayTimes[] times) { + this(EnumSet.of(day), times); + } + + public WeekTimes(EnumSet days, DayTimes[] times) { + this.days = days.isEmpty() ? DEFAULT_DAYS : days; + this.times = times.length == 0 ? DEFAULT_TIMES : times; + } + + public EnumSet days() { + return days; + } + + public DayTimes[] times() { + return times; + } + + public Set crons() { + Set crons = new HashSet<>(); + for (DayTimes times : this.times) { + String hrsStr = join(",", times.hour); + String minsStr = join(",", times.minute); + String daysStr = DayOfWeek.cronPart(this.days); + crons.add("0 " + minsStr + " " + hrsStr + " ? * " + daysStr); + } + return crons; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + WeekTimes that = (WeekTimes) o; + + if (!days.equals(that.days)) return false; + + // we don't care about order + if (!newHashSet(times).equals(newHashSet(that.times))) return false; + + return true; + } + + @Override + public int hashCode() { + int result = days.hashCode(); + result = 31 * result + Arrays.hashCode(times); + return result; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(DAY_FIELD.getPreferredName(), days); + builder.startArray(TIME_FIELD.getPreferredName()); + for (DayTimes dayTimes : times) { + dayTimes.toXContent(builder, params); + } + builder.endArray(); + return builder.endObject(); + } + + public static Builder builder() { + return new Builder(); + } + + public static WeekTimes parse(XContentParser parser, XContentParser.Token token) throws IOException, ElasticsearchParseException { + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse week times. expected an object, but found [{}]", token); + } + Set daysSet = new HashSet<>(); + Set timesSet = new HashSet<>(); + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (DAY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token.isValue()) { + daysSet.add(parseDayValue(parser, token)); + } else if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + daysSet.add(parseDayValue(parser, token)); + } + } else { + throw new ElasticsearchParseException("invalid week day value for [{}] field. expected string/number value or an " + + "array of string/number values, but found [{}]", currentFieldName, token); + } + } else if (TIME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token != XContentParser.Token.START_ARRAY) { + try { + timesSet.add(DayTimes.parse(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("invalid time value for field [{}] - [{}]", pe, currentFieldName, token); + } + } else { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + timesSet.add(DayTimes.parse(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("invalid time value for field [{}] - [{}]", pe, currentFieldName, token); + } + } + } + } + } + EnumSet days = daysSet.isEmpty() ? EnumSet.of(DayOfWeek.MONDAY) : EnumSet.copyOf(daysSet); + DayTimes[] times = timesSet.isEmpty() ? new DayTimes[] { new DayTimes(0, 0) } : timesSet.toArray(new DayTimes[timesSet.size()]); + return new WeekTimes(days, times); + } + + static DayOfWeek parseDayValue(XContentParser parser, XContentParser.Token token) throws IOException { + if (token == XContentParser.Token.VALUE_STRING) { + return DayOfWeek.resolve(parser.text()); + } + if (token == XContentParser.Token.VALUE_NUMBER) { + return DayOfWeek.resolve(parser.intValue()); + } + throw new ElasticsearchParseException("invalid weekly day value. expected a string or a number value, but found [" + token + "]"); + } + + public static class Builder { + + private final Set days = new HashSet<>(); + private final Set times = new HashSet<>(); + + private Builder() { + } + + public Builder on(DayOfWeek... days) { + Collections.addAll(this.days, days); + return this; + } + + public Builder at(int hour, int minute) { + times.add(new DayTimes(hour, minute)); + return this; + } + + public Builder atRoundHour(int... hours) { + times.add(new DayTimes(hours, new int[] { 0 })); + return this; + } + + public Builder atNoon() { + times.add(DayTimes.NOON); + return this; + } + + public Builder atMidnight() { + times.add(DayTimes.MIDNIGHT); + return this; + } + + public WeekTimes build() { + EnumSet dow = days.isEmpty() ? WeekTimes.DEFAULT_DAYS : EnumSet.copyOf(days); + return new WeekTimes(dow, times.toArray(new DayTimes[times.size()])); + } + + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/YearTimes.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/YearTimes.java new file mode 100644 index 0000000000000..a4941c8335f05 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/support/YearTimes.java @@ -0,0 +1,246 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule.support; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.Locale; +import java.util.Set; + +import static org.elasticsearch.common.util.set.Sets.newHashSet; +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; +import static org.elasticsearch.xpack.watcher.support.Strings.join; + +public class YearTimes implements Times { + + public static final EnumSet DEFAULT_MONTHS = EnumSet.of(Month.JANUARY); + public static final int[] DEFAULT_DAYS = new int[] { 1 }; + public static final DayTimes[] DEFAULT_TIMES = new DayTimes[] { new DayTimes() }; + + private final EnumSet months; + private final int[] days; + private final DayTimes[] times; + + public YearTimes() { + this(DEFAULT_MONTHS, DEFAULT_DAYS, DEFAULT_TIMES); + } + + public YearTimes(EnumSet months, int[] days, DayTimes[] times) { + this.months = months.isEmpty() ? DEFAULT_MONTHS : months; + this.days = days.length == 0 ? DEFAULT_DAYS : days; + Arrays.sort(this.days); + this.times = times.length == 0 ? DEFAULT_TIMES : times; + validate(); + } + + void validate() { + for (int day : days) { + if (day < 1 || day > 32) { //32 represents the last day of the month + throw illegalArgument("invalid month day [{}]", day); + } + } + for (DayTimes dayTimes : times) { + dayTimes.validate(); + } + } + + public EnumSet months() { + return months; + } + + public int[] days() { + return days; + } + + public DayTimes[] times() { + return times; + } + + public Set crons() { + Set crons = new HashSet<>(); + for (DayTimes times : this.times) { + String hrsStr = join(",", times.hour); + String minsStr = join(",", times.minute); + String daysStr = join(",", this.days); + daysStr = daysStr.replace("32", "L"); + String monthsStr = Strings.collectionToCommaDelimitedString(months); + String expression = "0 " + minsStr + " " + hrsStr + " " + daysStr + " " + monthsStr + " ?"; + crons.add(expression); + } + return crons; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + YearTimes that = (YearTimes) o; + + if (!Arrays.equals(days, that.days)) return false; + if (!months.equals(that.months)) return false; + // order doesn't matter + if (!newHashSet(times).equals(newHashSet(that.times))) return false; + + return true; + } + + @Override + public int hashCode() { + int result = months.hashCode(); + result = 31 * result + Arrays.hashCode(days); + result = 31 * result + Arrays.hashCode(times); + return result; + } + + @Override + public String toString() { + return String.format( + Locale.ROOT, + "months [%s], days [%s], times [%s]", + Strings.collectionToCommaDelimitedString(months), + join(",", days), + Strings.arrayToCommaDelimitedString(times) + ); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MONTH_FIELD.getPreferredName(), months); + builder.array(DAY_FIELD.getPreferredName(), days); + builder.startArray(TIME_FIELD.getPreferredName()); + for (DayTimes dayTimes : times) { + dayTimes.toXContent(builder, params); + } + builder.endArray(); + return builder.endObject(); + } + + public static Builder builder() { + return new Builder(); + } + + public static YearTimes parse(XContentParser parser, XContentParser.Token token) throws IOException, ElasticsearchParseException { + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("could not parse year times. expected an object, but found [{}]", token); + } + Set monthsSet = new HashSet<>(); + Set daysSet = new HashSet<>(); + Set timesSet = new HashSet<>(); + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (MONTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token.isValue()) { + monthsSet.add(parseMonthValue(parser, token)); + } else if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + monthsSet.add(parseMonthValue(parser, token)); + } + } else { + throw new ElasticsearchParseException("invalid year month value for [{}] field. expected string/number value or an " + + "array of string/number values, but found [{}]", currentFieldName, token); + } + } else if (DAY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token.isValue()) { + daysSet.add(MonthTimes.parseDayValue(parser, token)); + } else if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + daysSet.add(MonthTimes.parseDayValue(parser, token)); + } + } else { + throw new ElasticsearchParseException("invalid year day value for [{}] field. expected string/number value or an " + + "array of string/number values, but found [{}]", currentFieldName, token); + } + } else if (TIME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token != XContentParser.Token.START_ARRAY) { + try { + timesSet.add(DayTimes.parse(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("invalid time value for field [{}] - [{}]", pe, currentFieldName, token); + } + } else { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + try { + timesSet.add(DayTimes.parse(parser, token)); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("invalid time value for field [{}] - [{}]", pe, currentFieldName, token); + } + } + } + } + } + EnumSet months = monthsSet.isEmpty() ? DEFAULT_MONTHS : EnumSet.copyOf(monthsSet); + int[] days = daysSet.isEmpty() ? DEFAULT_DAYS : CollectionUtils.toArray(daysSet); + DayTimes[] times = timesSet.isEmpty() ? new DayTimes[] { new DayTimes(0, 0) } : timesSet.toArray(new DayTimes[timesSet.size()]); + return new YearTimes(months, days, times); + } + + static Month parseMonthValue(XContentParser parser, XContentParser.Token token) throws IOException { + if (token == XContentParser.Token.VALUE_STRING) { + return Month.resolve(parser.text()); + } + if (token == XContentParser.Token.VALUE_NUMBER) { + return Month.resolve(parser.intValue()); + } + throw new ElasticsearchParseException("invalid year month value. expected a string or a number value, but found [{}]", token); + } + + public static class Builder { + + private final Set months = new HashSet<>(); + private final Set days = new HashSet<>(); + private final Set times = new HashSet<>(); + + private Builder() { + } + + public Builder in(Month... months) { + Collections.addAll(this.months, months); + return this; + } + + public Builder on(int... days) { + Arrays.stream(days).forEach(this.days::add); + return this; + } + + public Builder at(int hour, int minute) { + times.add(new DayTimes(hour, minute)); + return this; + } + + public Builder atRoundHour(int... hours) { + times.add(new DayTimes(hours, new int[] { 0 })); + return this; + } + + public Builder atNoon() { + times.add(DayTimes.NOON); + return this; + } + + public Builder atMidnight() { + times.add(DayTimes.MIDNIGHT); + return this; + } + + public YearTimes build() { + return new YearTimes(EnumSet.copyOf(months), CollectionUtils.toArray(days), times.toArray(new DayTimes[times.size()])); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java new file mode 100644 index 0000000000000..33b1217895dca --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule.tool; + +import java.util.Arrays; +import java.util.List; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.LoggingAwareCommand; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.xpack.core.scheduler.Cron; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; + +public class CronEvalTool extends LoggingAwareCommand { + + public static void main(String[] args) throws Exception { + exit(new CronEvalTool().main(args, Terminal.DEFAULT)); + } + + private static final DateTimeFormatter formatter = DateTimeFormat.forPattern("EEE, d MMM yyyy HH:mm:ss"); + + private final OptionSpec countOption; + private final OptionSpec arguments; + + CronEvalTool() { + super("Validates and evaluates a cron expression"); + this.countOption = parser.acceptsAll(Arrays.asList("c", "count"), + "The number of future times this expression will be triggered") + // TODO: change this to ofType(Integer.class) with jopt-simple 5.0 + // before then it will cause a security exception in tests + .withRequiredArg().defaultsTo("10"); + this.arguments = parser.nonOptions("expression"); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + int count = Integer.parseInt(countOption.value(options)); + List args = arguments.values(options); + if (args.size() != 1) { + throw new UserException(ExitCodes.USAGE, "expecting a single argument that is the cron expression to evaluate"); + } + execute(terminal, args.get(0), count); + } + + void execute(Terminal terminal, String expression, int count) throws Exception { + Cron.validate(expression); + terminal.println("Valid!"); + + DateTime date = DateTime.now(DateTimeZone.UTC); + terminal.println("Now is [" + formatter.print(date) + "]"); + terminal.println("Here are the next " + count + " times this cron expression will trigger:"); + + Cron cron = new Cron(expression); + long time = date.getMillis(); + for (int i = 0; i < count; i++) { + long prevTime = time; + time = cron.getNextValidTimeAfter(time); + if (time < 0) { + throw new UserException(ExitCodes.OK, (i + 1) + ".\t Could not compute future times since [" + + formatter.print(prevTime) + "] " + "(perhaps the cron expression only points to times in the past?)"); + } + terminal.println((i+1) + ".\t" + formatter.print(time)); + } + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java new file mode 100644 index 0000000000000..524913105823a --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchParser.java @@ -0,0 +1,202 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.watch; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.actions.ActionRegistry; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; +import org.elasticsearch.xpack.core.watcher.common.secret.Secret; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherXContentParser; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.trigger.Trigger; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.input.InputRegistry; +import org.elasticsearch.xpack.watcher.input.none.ExecutableNoneInput; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.io.InputStream; +import java.time.Clock; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.ioException; +import static org.joda.time.DateTimeZone.UTC; + +public class WatchParser extends AbstractComponent { + + private final TriggerService triggerService; + private final ActionRegistry actionRegistry; + private final InputRegistry inputRegistry; + private final CryptoService cryptoService; + private final Clock clock; + private final ExecutableInput defaultInput; + private final ExecutableCondition defaultCondition; + private final List defaultActions; + + public WatchParser(Settings settings, TriggerService triggerService, ActionRegistry actionRegistry, InputRegistry inputRegistry, + @Nullable CryptoService cryptoService, Clock clock) { + super(settings); + this.triggerService = triggerService; + this.actionRegistry = actionRegistry; + this.inputRegistry = inputRegistry; + this.cryptoService = cryptoService; + this.clock = clock; + this.defaultInput = new ExecutableNoneInput(logger); + this.defaultCondition = InternalAlwaysCondition.INSTANCE; + this.defaultActions = Collections.emptyList(); + } + + public Watch parse(String name, boolean includeStatus, BytesReference source, XContentType xContentType) throws IOException { + return parse(name, includeStatus, false, source, new DateTime(clock.millis(), UTC), xContentType, false); + } + + public Watch parse(String name, boolean includeStatus, BytesReference source, DateTime now, + XContentType xContentType) throws IOException { + return parse(name, includeStatus, false, source, now, xContentType, false); + } + + /** + * Parses the watch represented by the given source. When parsing, any sensitive data that the + * source might contain (e.g. passwords) will be converted to {@link Secret secrets} + * Such that the returned watch will potentially hide this sensitive data behind a "secret". A secret + * is an abstraction around sensitive data (text). When security is enabled, the + * {@link CryptoService} is used to encrypt the secrets. + * + * This method is only called once - when the user adds a new watch. From that moment on, all representations + * of the watch in the system will be use secrets for sensitive data. + * + */ + public Watch parseWithSecrets(String id, boolean includeStatus, BytesReference source, DateTime now, + XContentType xContentType, boolean allowRedactedPasswords) throws IOException { + return parse(id, includeStatus, true, source, now, xContentType, allowRedactedPasswords); + } + + public Watch parseWithSecrets(String id, boolean includeStatus, BytesReference source, DateTime now, + XContentType xContentType) throws IOException { + return parse(id, includeStatus, true, source, now, xContentType, false); + } + + private Watch parse(String id, boolean includeStatus, boolean withSecrets, BytesReference source, DateTime now, + XContentType xContentType, boolean allowRedactedPasswords) throws IOException { + if (logger.isTraceEnabled()) { + logger.trace("parsing watch [{}] ", source.utf8ToString()); + } + // EMPTY is safe here because we never use namedObject + try (InputStream stream = source.streamInput(); + WatcherXContentParser parser = new WatcherXContentParser(xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, stream), + now, withSecrets ? cryptoService : null, allowRedactedPasswords)) { + parser.nextToken(); + return parse(id, includeStatus, parser); + } catch (IOException ioe) { + throw ioException("could not parse watch [{}]", ioe, id); + } + } + + public Watch parse(String id, boolean includeStatus, WatcherXContentParser parser) throws IOException { + Trigger trigger = null; + ExecutableInput input = defaultInput; + ExecutableCondition condition = defaultCondition; + List actions = defaultActions; + ExecutableTransform transform = null; + TimeValue throttlePeriod = null; + Map metatdata = null; + WatchStatus status = null; + long version = Versions.MATCH_ANY; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == null ) { + throw new ElasticsearchParseException("could not parse watch [{}]. null token", id); + } else if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == null || currentFieldName == null) { + throw new ElasticsearchParseException("could not parse watch [{}], unexpected token [{}]", id, token); + } else if (WatchField.TRIGGER.match(currentFieldName, parser.getDeprecationHandler())) { + trigger = triggerService.parseTrigger(id, parser); + } else if (WatchField.INPUT.match(currentFieldName, parser.getDeprecationHandler())) { + input = inputRegistry.parse(id, parser); + } else if (WatchField.CONDITION.match(currentFieldName, parser.getDeprecationHandler())) { + condition = actionRegistry.getConditionRegistry().parseExecutable(id, parser); + } else if (WatchField.TRANSFORM.match(currentFieldName, parser.getDeprecationHandler())) { + transform = actionRegistry.getTransformRegistry().parse(id, parser); + } else if (WatchField.THROTTLE_PERIOD.match(currentFieldName, parser.getDeprecationHandler())) { + throttlePeriod = timeValueMillis(parser.longValue()); + } else if (WatchField.THROTTLE_PERIOD_HUMAN.match(currentFieldName, parser.getDeprecationHandler())) { + // Parser for human specified and 2.x backwards compatible throttle period + try { + throttlePeriod = WatcherDateTimeUtils.parseTimeValue(parser, WatchField.THROTTLE_PERIOD_HUMAN.toString()); + } catch (ElasticsearchParseException pe) { + throw new ElasticsearchParseException("could not parse watch [{}]. failed to parse time value for field [{}]", + pe, id, currentFieldName); + } + } else if (WatchField.ACTIONS.match(currentFieldName, parser.getDeprecationHandler())) { + actions = actionRegistry.parseActions(id, parser); + } else if (WatchField.METADATA.match(currentFieldName, parser.getDeprecationHandler())) { + metatdata = parser.map(); + } else if (WatchField.VERSION.match(currentFieldName, parser.getDeprecationHandler())) { + version = parser.longValue(); + } else if (WatchField.STATUS.match(currentFieldName, parser.getDeprecationHandler())) { + if (includeStatus) { + status = WatchStatus.parse(id, parser); + } else { + parser.skipChildren(); + } + } else { + throw new ElasticsearchParseException("could not parse watch [{}]. unexpected field [{}]", id, currentFieldName); + } + } + if (trigger == null) { + throw new ElasticsearchParseException("could not parse watch [{}]. missing required field [{}]", id, + WatchField.TRIGGER.getPreferredName()); + } + + if (status != null) { + // verify the status is valid (that every action indeed has a status) + for (ActionWrapper action : actions) { + if (status.actionStatus(action.id()) == null) { + throw new ElasticsearchParseException("could not parse watch [{}]. watch status in invalid state. action [{}] " + + "status is missing", id, action.id()); + } + } + } else { + // we need to create the initial statuses for the actions + Map actionsStatuses = new HashMap<>(); + for (ActionWrapper action : actions) { + actionsStatuses.put(action.id(), new ActionStatus(parser.getParseDateTime())); + } + status = new WatchStatus(parser.getParseDateTime(), unmodifiableMap(actionsStatuses)); + } + + + return new Watch(id, trigger, input, condition, transform, throttlePeriod, actions, metatdata, status, version); + } +} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtils.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtils.java new file mode 100644 index 0000000000000..151d3c59b6ffa --- /dev/null +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtils.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.watch; + +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.index.IndexNotFoundException; + +public class WatchStoreUtils { + + /** + * Method to get indexmetadata of a index, that potentially is behind an alias. + * + * @param name Name of the index or the alias + * @param metaData Metadata to search for the name + * @return IndexMetaData of the concrete index + * @throws IllegalStateException If an alias points to two indices + * @throws IndexNotFoundException If no index exists + */ + public static IndexMetaData getConcreteIndex(String name, MetaData metaData) { + AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(name); + if (aliasOrIndex == null) { + return null; + } + + if (aliasOrIndex.isAlias() && aliasOrIndex.getIndices().size() > 1) { + throw new IllegalStateException("Alias [" + name + "] points to more than one index"); + } + + return aliasOrIndex.getIndices().get(0); + } + +} diff --git a/x-pack/plugin/watcher/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/watcher/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..8472a42a64832 --- /dev/null +++ b/x-pack/plugin/watcher/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,36 @@ +grant { + // required to configure the custom mailcap for watcher + permission java.lang.RuntimePermission "setFactory"; + + // needed when sending emails for javax.activation + // otherwise a classnotfound exception is thrown due to trying + // to load the class with the application class loader + permission java.lang.RuntimePermission "setContextClassLoader"; + permission java.lang.RuntimePermission "getClassLoader"; + // TODO: remove use of this jar as soon as possible!!!! + permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries"; + + // needed for multiple server implementations used in tests + permission java.net.SocketPermission "*", "accept,connect"; +}; + +grant codeBase "${codebase.netty-common}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; +}; + +grant codeBase "${codebase.netty-transport}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 + // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! + permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; +}; + +grant codeBase "${codebase.elasticsearch-rest-client}" { + // rest client uses system properties which gets the default proxy + permission java.net.NetPermission "getProxySelector"; +}; + +grant codeBase "${codebase.httpasyncclient}" { + // rest client uses system properties which gets the default proxy + permission java.net.NetPermission "getProxySelector"; +}; \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/notification/NotificationServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/notification/NotificationServiceTests.java new file mode 100644 index 0000000000000..bb5f234ca950a --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/notification/NotificationServiceTests.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.notification; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.notification.NotificationService; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.is; + +public class NotificationServiceTests extends ESTestCase { + + public void testSingleAccount() { + String accountName = randomAlphaOfLength(10); + Settings settings = Settings.builder().put("xpack.notification.test.account." + accountName, "bar").build(); + + TestNotificationService service = new TestNotificationService(settings); + assertThat(service.getAccount(accountName), is(accountName)); + // single account, this will also be the default + assertThat(service.getAccount("non-existing"), is(accountName)); + } + + public void testMultipleAccountsWithExistingDefault() { + String accountName = randomAlphaOfLength(10); + Settings settings = Settings.builder() + .put("xpack.notification.test.account." + accountName, "bar") + .put("xpack.notification.test.account.second", "bar") + .put("xpack.notification.test.default_account", accountName) + .build(); + + TestNotificationService service = new TestNotificationService(settings); + assertThat(service.getAccount(accountName), is(accountName)); + assertThat(service.getAccount("second"), is("second")); + assertThat(service.getAccount("non-existing"), is(accountName)); + } + + public void testMultipleAccountsWithNoDefault() { + String accountName = randomAlphaOfLength(10); + Settings settings = Settings.builder() + .put("xpack.notification.test.account." + accountName, "bar") + .put("xpack.notification.test.account.second", "bar") + .put("xpack.notification.test.account.third", "bar") + .build(); + + TestNotificationService service = new TestNotificationService(settings); + assertThat(service.getAccount(null), anyOf(is(accountName), is("second"), is("third"))); + } + + public void testMultipleAccountsUnknownDefault() { + String accountName = randomAlphaOfLength(10); + Settings settings = Settings.builder() + .put("xpack.notification.test.account." + accountName, "bar") + .put("xpack.notification.test.account.second", "bar") + .put("xpack.notification.test.default_account", "non-existing") + .build(); + + SettingsException e = expectThrows(SettingsException.class, () -> new TestNotificationService(settings)); + assertThat(e.getMessage(), is("could not find default account [non-existing]")); + } + + public void testNoSpecifiedDefaultAccount() { + String accountName = randomAlphaOfLength(10); + Settings settings = Settings.builder().put("xpack.notification.test.account." + accountName, "bar").build(); + + TestNotificationService service = new TestNotificationService(settings); + assertThat(service.getAccount(null), is(accountName)); + } + + public void testAccountDoesNotExist() throws Exception{ + TestNotificationService service = new TestNotificationService(Settings.EMPTY); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.getAccount(null)); + assertThat(e.getMessage(), + is("no accounts of type [test] configured. Please set up an account using the [xpack.notification.test] settings")); + } + + private static class TestNotificationService extends NotificationService { + + TestNotificationService(Settings settings) { + super(settings, "test"); + setAccountSetting(settings); + } + + @Override + protected String createAccount(String name, Settings accountSettings) { + return name; + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/EncryptSensitiveDataBootstrapCheckTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/EncryptSensitiveDataBootstrapCheckTests.java new file mode 100644 index 0000000000000..3dcec25ddb86c --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/EncryptSensitiveDataBootstrapCheckTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.WatcherField; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoServiceTests; + +public class EncryptSensitiveDataBootstrapCheckTests extends ESTestCase { + + public void testDefaultIsFalse() { + Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + Environment env = TestEnvironment.newEnvironment(settings); + EncryptSensitiveDataBootstrapCheck check = new EncryptSensitiveDataBootstrapCheck(env); + assertFalse(check.check(new BootstrapContext(settings, null)).isFailure()); + assertTrue(check.alwaysEnforce()); + } + + public void testNoKeyInKeystore() { + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(Watcher.ENCRYPT_SENSITIVE_DATA_SETTING.getKey(), true) + .build(); + Environment env = TestEnvironment.newEnvironment(settings); + EncryptSensitiveDataBootstrapCheck check = new EncryptSensitiveDataBootstrapCheck(env); + assertTrue(check.check(new BootstrapContext(settings, null)).isFailure()); + } + + public void testKeyInKeystore() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setFile(WatcherField.ENCRYPTION_KEY_SETTING.getKey(), CryptoServiceTests.generateKey()); + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(Watcher.ENCRYPT_SENSITIVE_DATA_SETTING.getKey(), true) + .setSecureSettings(secureSettings) + .build(); + Environment env = TestEnvironment.newEnvironment(settings); + EncryptSensitiveDataBootstrapCheck check = new EncryptSensitiveDataBootstrapCheck(env); + assertFalse(check.check(new BootstrapContext(settings, null)).isFailure()); + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherClientHelperTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherClientHelperTests.java new file mode 100644 index 0000000000000..f1908ccefc2ec --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherClientHelperTests.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.watcher.test.WatchExecutionContextMockBuilder; +import org.junit.Before; + +import java.util.Collections; +import java.util.Map; +import java.util.function.Consumer; + +import static org.elasticsearch.xpack.core.ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME; +import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class WatcherClientHelperTests extends ESTestCase { + + private Client client = mock(Client.class); + + @Before + public void setupMocks() { + PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + searchFuture.onResponse(new SearchResponse()); + when(client.search(any())).thenReturn(searchFuture); + + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + when(client.threadPool()).thenReturn(threadPool); + } + + public void testEmptyHeaders() { + WatchExecutionContext ctx = new WatchExecutionContextMockBuilder("_id").buildMock(); + when(ctx.watch().status().getHeaders()).thenReturn(Collections.emptyMap()); + + assertExecutionWithOrigin(ctx); + } + + public void testWithHeaders() { + WatchExecutionContext ctx = new WatchExecutionContextMockBuilder("_id").buildMock(); + Map watchStatusHeaders = MapBuilder.newMapBuilder() + .put("es-security-runas-user", "anything") + .put("_xpack_security_authentication", "anything") + .map(); + when(ctx.watch().status().getHeaders()).thenReturn(watchStatusHeaders); + + assertRunAsExecution(ctx, headers -> { + assertThat(headers.keySet(), hasSize(2)); + assertThat(headers, hasEntry("es-security-runas-user", "anything")); + assertThat(headers, hasEntry("_xpack_security_authentication", "anything")); + }); + } + + public void testFilteredHeaders() { + WatchExecutionContext ctx = new WatchExecutionContextMockBuilder("_id").buildMock(); + Map watchStatusHeaders = MapBuilder.newMapBuilder() + .put(randomAlphaOfLength(10), "anything") + .map(); + when(ctx.watch().status().getHeaders()).thenReturn(watchStatusHeaders); + + assertRunAsExecution(ctx, headers -> { + assertThat(headers.keySet(), hasSize(0)); + }); + } + + /** + * This method executes a search and checks if the thread context was enriched with the watcher origin + */ + private void assertExecutionWithOrigin(WatchExecutionContext ctx) { + WatcherClientHelper.execute(ctx.watch(), client, () -> { + Object origin = client.threadPool().getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME); + assertThat(origin, is(WATCHER_ORIGIN)); + + // check that headers are not set + Map headers = client.threadPool().getThreadContext().getHeaders(); + assertThat(headers, not(hasEntry("es-security-runas-user", "anything"))); + assertThat(headers, not(hasEntry("_xpack_security_authentication", "anything"))); + + return client.search(new SearchRequest()).actionGet(); + }); + + } + + /** + * This method executes a search and ensures no stashed origin thread context was created, so that the regular node + * client was used, to emulate a run_as function + */ + public void assertRunAsExecution(WatchExecutionContext ctx, Consumer> consumer) { + WatcherClientHelper.execute(ctx.watch(), client, () -> { + Object origin = client.threadPool().getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME); + assertThat(origin, is(nullValue())); + + Map headers = client.threadPool().getThreadContext().getHeaders(); + consumer.accept(headers); + return client.search(new SearchRequest()).actionGet(); + }); + + } +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherFeatureSetTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherFeatureSetTests.java new file mode 100644 index 0000000000000..e1e8b5b2ddd7d --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherFeatureSetTests.java @@ -0,0 +1,138 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.watcher.WatcherFeatureSetUsage; +import org.elasticsearch.xpack.core.watcher.WatcherMetaData; +import org.elasticsearch.xpack.core.watcher.common.stats.Counters; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.core.Is.is; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class WatcherFeatureSetTests extends ESTestCase { + + private XPackLicenseState licenseState; + private Client client; + + @Before + public void init() throws Exception { + licenseState = mock(XPackLicenseState.class); + client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + when(client.threadPool()).thenReturn(threadPool); + } + + public void testAvailable() { + WatcherFeatureSet featureSet = new WatcherFeatureSet(Settings.EMPTY, licenseState, client); + boolean available = randomBoolean(); + when(licenseState.isWatcherAllowed()).thenReturn(available); + assertThat(featureSet.available(), is(available)); + } + + public void testEnabled() { + boolean enabled = randomBoolean(); + Settings.Builder settings = Settings.builder(); + if (enabled) { + if (randomBoolean()) { + settings.put("xpack.watcher.enabled", enabled); + } + } else { + settings.put("xpack.watcher.enabled", enabled); + } + WatcherFeatureSet featureSet = new WatcherFeatureSet(settings.build(), licenseState, client); + assertThat(featureSet.enabled(), is(enabled)); + } + + public void testUsageStats() throws Exception { + doAnswer(mock -> { + ActionListener listener = + (ActionListener) mock.getArguments()[2]; + + List nodes = new ArrayList<>(); + DiscoveryNode first = new DiscoveryNode("first", buildNewFakeTransportAddress(), Version.CURRENT); + WatcherStatsResponse.Node firstNode = new WatcherStatsResponse.Node(first); + Counters firstCounters = new Counters(); + firstCounters.inc("foo.foo", 1); + firstCounters.inc("foo.bar.baz", 1); + firstNode.setStats(firstCounters); + nodes.add(firstNode); + + DiscoveryNode second = new DiscoveryNode("second", buildNewFakeTransportAddress(), Version.CURRENT); + WatcherStatsResponse.Node secondNode = new WatcherStatsResponse.Node(second); + Counters secondCounters = new Counters(); + secondCounters.inc("spam", 1); + secondCounters.inc("foo.bar.baz", 4); + secondNode.setStats(secondCounters); + nodes.add(secondNode); + + listener.onResponse(new WatcherStatsResponse(new ClusterName("whatever"), new WatcherMetaData(false), + nodes, Collections.emptyList())); + return null; + }).when(client).execute(eq(WatcherStatsAction.INSTANCE), any(), any()); + + PlainActionFuture future = new PlainActionFuture<>(); + new WatcherFeatureSet(Settings.EMPTY, licenseState, client).usage(future); + WatcherFeatureSetUsage watcherUsage = (WatcherFeatureSetUsage) future.get(); + assertThat(watcherUsage.stats().keySet(), containsInAnyOrder("foo", "spam")); + long fooBarBaz = ObjectPath.eval("foo.bar.baz", watcherUsage.stats()); + assertThat(fooBarBaz, is(5L)); + long fooFoo = ObjectPath.eval("foo.foo", watcherUsage.stats()); + assertThat(fooFoo, is(1L)); + long spam = ObjectPath.eval("spam", watcherUsage.stats()); + assertThat(spam, is(1L)); + BytesStreamOutput out = new BytesStreamOutput(); + watcherUsage.writeTo(out); + XPackFeatureSet.Usage serializedUsage = new WatcherFeatureSetUsage(out.bytes().streamInput()); + + for (XPackFeatureSet.Usage usage : Arrays.asList(watcherUsage, serializedUsage)) { + XContentBuilder builder = jsonBuilder(); + usage.toXContent(builder, ToXContent.EMPTY_PARAMS); + + XContentSource source = new XContentSource(builder); + assertThat(source.getValue("foo.bar.baz"), is(5)); + assertThat(source.getValue("spam"), is(1)); + assertThat(source.getValue("foo.foo"), is(1)); + + assertThat(usage, instanceOf(WatcherFeatureSetUsage.class)); + WatcherFeatureSetUsage featureSetUsage = (WatcherFeatureSetUsage) usage; + assertThat(featureSetUsage.stats().keySet(), containsInAnyOrder("foo", "spam")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java new file mode 100644 index 0000000000000..582ef6abe1e5e --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherIndexingListenerTests.java @@ -0,0 +1,733 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.WatcherIndexingListener.Configuration; +import org.elasticsearch.xpack.watcher.WatcherIndexingListener.ShardAllocationConfiguration; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.joda.time.DateTime; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +import static java.util.Arrays.asList; +import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; +import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.xpack.watcher.WatcherIndexingListener.INACTIVE; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.core.Is.is; +import static org.joda.time.DateTimeZone.UTC; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class WatcherIndexingListenerTests extends ESTestCase { + + private WatcherIndexingListener listener; + private WatchParser parser = mock(WatchParser.class); + private ClockMock clock = new ClockMock(); + private TriggerService triggerService = mock(TriggerService.class); + + private ShardId shardId = mock(ShardId.class); + private Engine.IndexResult result = mock(Engine.IndexResult.class); + private Engine.Index operation = mock(Engine.Index.class); + private Engine.Delete delete = mock(Engine.Delete.class); + + @Before + public void setup() throws Exception { + clock.freeze(); + listener = new WatcherIndexingListener(Settings.EMPTY, parser, clock, triggerService); + + Map map = new HashMap<>(); + map.put(shardId, new ShardAllocationConfiguration(0, 1, Collections.singletonList("foo"))); + + listener.setConfiguration(new Configuration(Watch.INDEX, map)); + } + + // + // tests for document level operations + // + public void testPreIndexCheckType() throws Exception { + when(shardId.getIndexName()).thenReturn(Watch.INDEX); + when(operation.type()).thenReturn(randomAlphaOfLength(10)); + + Engine.Index index = listener.preIndex(shardId, operation); + assertThat(index, is(operation)); + verifyZeroInteractions(parser); + } + + public void testPreIndexCheckIndex() throws Exception { + when(operation.type()).thenReturn(Watch.DOC_TYPE); + when(shardId.getIndexName()).thenReturn(randomAlphaOfLength(10)); + + Engine.Index index = listener.preIndex(shardId, operation); + assertThat(index, is(operation)); + verifyZeroInteractions(parser); + } + + public void testPreIndexCheckActive() throws Exception { + listener.setConfiguration(INACTIVE); + when(operation.type()).thenReturn(Watch.DOC_TYPE); + when(shardId.getIndexName()).thenReturn(Watch.INDEX); + + Engine.Index index = listener.preIndex(shardId, operation); + assertThat(index, is(operation)); + verifyZeroInteractions(parser); + } + + public void testPreIndex() throws Exception { + when(operation.type()).thenReturn(Watch.DOC_TYPE); + when(operation.id()).thenReturn(randomAlphaOfLength(10)); + when(operation.source()).thenReturn(BytesArray.EMPTY); + when(shardId.getIndexName()).thenReturn(Watch.INDEX); + + boolean watchActive = randomBoolean(); + boolean isNewWatch = randomBoolean(); + Watch watch = mockWatch("_id", watchActive, isNewWatch); + when(parser.parseWithSecrets(anyObject(), eq(true), anyObject(), anyObject(), anyObject())).thenReturn(watch); + + Engine.Index returnedOperation = listener.preIndex(shardId, operation); + assertThat(returnedOperation, is(operation)); + + DateTime now = new DateTime(clock.millis(), UTC); + verify(parser).parseWithSecrets(eq(operation.id()), eq(true), eq(BytesArray.EMPTY), eq(now), anyObject()); + + if (isNewWatch) { + if (watchActive) { + verify(triggerService).add(eq(watch)); + } else { + verify(triggerService).remove(eq("_id")); + } + } + } + + // this test emulates an index with 10 shards, and ensures that triggering only happens on a + // single shard + public void testPreIndexWatchGetsOnlyTriggeredOnceAcrossAllShards() throws Exception { + String id = randomAlphaOfLength(10); + int totalShardCount = randomIntBetween(1, 10); + boolean watchActive = randomBoolean(); + boolean isNewWatch = randomBoolean(); + Watch watch = mockWatch(id, watchActive, isNewWatch); + + when(shardId.getIndexName()).thenReturn(Watch.INDEX); + when(operation.type()).thenReturn(Watch.DOC_TYPE); + when(parser.parseWithSecrets(anyObject(), eq(true), anyObject(), anyObject(), anyObject())).thenReturn(watch); + + for (int idx = 0; idx < totalShardCount; idx++) { + final Map localShards = new HashMap<>(); + localShards.put(shardId, new ShardAllocationConfiguration(idx, totalShardCount, Collections.emptyList())); + Configuration configuration = new Configuration(Watch.INDEX, localShards); + listener.setConfiguration(configuration); + listener.preIndex(shardId, operation); + } + + // no matter how many shards we had, this should have been only called once + if (isNewWatch) { + if (watchActive) { + verify(triggerService, times(1)).add(eq(watch)); + } else { + verify(triggerService, times(1)).remove(eq(watch.id())); + } + } + } + + private Watch mockWatch(String id, boolean active, boolean isNewWatch) { + WatchStatus.State watchState = mock(WatchStatus.State.class); + when(watchState.isActive()).thenReturn(active); + + WatchStatus watchStatus = mock(WatchStatus.class); + when(watchStatus.state()).thenReturn(watchState); + if (isNewWatch) { + when(watchStatus.version()).thenReturn(-1L); + } else { + when(watchStatus.version()).thenReturn(randomLong()); + } + + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn(id); + when(watch.status()).thenReturn(watchStatus); + + return watch; + } + + public void testPreIndexCheckParsingException() throws Exception { + when(operation.type()).thenReturn(Watch.DOC_TYPE); + String id = randomAlphaOfLength(10); + when(operation.id()).thenReturn(id); + when(operation.source()).thenReturn(BytesArray.EMPTY); + when(shardId.getIndexName()).thenReturn(Watch.INDEX); + when(parser.parseWithSecrets(anyObject(), eq(true), anyObject(), anyObject(), anyObject())) + .thenThrow(new IOException("self thrown")); + + ElasticsearchParseException exc = expectThrows(ElasticsearchParseException.class, + () -> listener.preIndex(shardId, operation)); + assertThat(exc.getMessage(), containsString("Could not parse watch")); + assertThat(exc.getMessage(), containsString(id)); + } + + public void testPostIndexRemoveTriggerOnException() throws Exception { + when(operation.id()).thenReturn("_id"); + when(operation.type()).thenReturn(Watch.DOC_TYPE); + when(shardId.getIndexName()).thenReturn(Watch.INDEX); + + listener.postIndex(shardId, operation, new ElasticsearchParseException("whatever")); + verify(triggerService).remove(eq("_id")); + } + + public void testPostIndexDontInvokeForOtherDocuments() throws Exception { + when(operation.id()).thenReturn("_id"); + when(operation.type()).thenReturn(Watch.DOC_TYPE); + when(shardId.getIndexName()).thenReturn("anything"); + when(result.hasFailure()).thenReturn(false); + + listener.postIndex(shardId, operation, new ElasticsearchParseException("whatever")); + verifyZeroInteractions(triggerService); + } + + public void testPreDeleteCheckActive() throws Exception { + listener.setConfiguration(INACTIVE); + listener.preDelete(shardId, delete); + + verifyZeroInteractions(triggerService); + } + + public void testPreDeleteCheckIndex() throws Exception { + when(shardId.getIndexName()).thenReturn(randomAlphaOfLength(10)); + + listener.preDelete(shardId, delete); + + verifyZeroInteractions(triggerService); + } + + public void testPreDeleteCheckType() throws Exception { + when(shardId.getIndexName()).thenReturn(Watch.INDEX); + when(delete.type()).thenReturn(randomAlphaOfLength(10)); + + listener.preDelete(shardId, delete); + + verifyZeroInteractions(triggerService); + } + + public void testPreDelete() throws Exception { + when(shardId.getIndexName()).thenReturn(Watch.INDEX); + when(delete.type()).thenReturn(Watch.DOC_TYPE); + when(delete.id()).thenReturn("_id"); + + listener.preDelete(shardId, delete); + + verify(triggerService).remove(eq("_id")); + } + + // + // tests for cluster state updates + // + public void testClusterChangedNoMetadata() throws Exception { + ClusterState state = mockClusterState(randomAlphaOfLength(10)); + listener.clusterChanged(new ClusterChangedEvent("any", state, state)); + + assertThat(listener.getConfiguration().isIndexAndActive(Watch.INDEX), is(true)); + } + + public void testClusterChangedNoWatchIndex() throws Exception { + Map map = new HashMap<>(); + map.put(shardId, new ShardAllocationConfiguration(0, 1, Collections.singletonList("foo"))); + Configuration randomConfiguration = new Configuration(randomAlphaOfLength(10), map); + listener.setConfiguration(randomConfiguration); + + ClusterState clusterState = mockClusterState(null); + ClusterChangedEvent clusterChangedEvent = mock(ClusterChangedEvent.class); + when(clusterChangedEvent.metaDataChanged()).thenReturn(true); + when(clusterChangedEvent.state()).thenReturn(clusterState); + + listener.clusterChanged(clusterChangedEvent); + + assertThat(listener.getConfiguration(), equalTo(INACTIVE)); + } + + public void testClusterChangedWatchAliasChanged() throws Exception { + String newActiveWatchIndex = randomAlphaOfLength(10); + RoutingTable routingTable = mock(RoutingTable.class); + when(routingTable.hasIndex(eq(newActiveWatchIndex))).thenReturn(true); + + ClusterState currentClusterState = mockClusterState(newActiveWatchIndex); + when(currentClusterState.routingTable()).thenReturn(routingTable); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(newNode("node_1")) + .localNodeId("node_1").build(); + when(currentClusterState.getNodes()).thenReturn(nodes); + RoutingNodes routingNodes = mock(RoutingNodes.class); + RoutingNode routingNode = mock(RoutingNode.class); + boolean emptyShards = randomBoolean(); + + if (emptyShards) { + when(routingNode.shardsWithState(eq(newActiveWatchIndex), any())) + .thenReturn(Collections.emptyList()); + } else { + Index index = new Index(newActiveWatchIndex, "uuid"); + ShardId shardId = new ShardId(index, 0); + ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, "node_1", true, + STARTED); + List routing = Collections.singletonList(shardRouting); + when(routingNode.shardsWithState(eq(newActiveWatchIndex), eq(STARTED), eq(RELOCATING))) + .thenReturn(routing); + when(routingTable.allShards(eq(newActiveWatchIndex))).thenReturn(routing); + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(shardRouting).build(); + when(routingTable.index(newActiveWatchIndex)).thenReturn(indexRoutingTable); + } + + when(routingNodes.node(eq("node_1"))).thenReturn(routingNode); + when(currentClusterState.getRoutingNodes()).thenReturn(routingNodes); + + ClusterState previousClusterState = mockClusterState(randomAlphaOfLength(8)); + when(previousClusterState.routingTable()).thenReturn(routingTable); + + ClusterChangedEvent event = new ClusterChangedEvent("something", currentClusterState, + previousClusterState); + listener.clusterChanged(event); + + if (emptyShards) { + assertThat(listener.getConfiguration(), is(INACTIVE)); + } else { + assertThat(listener.getConfiguration().isIndexAndActive(newActiveWatchIndex), + is(true)); + } + } + + public void testClusterChangedNoRoutingChanges() throws Exception { + Index index = new Index(Watch.INDEX, "foo"); + IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(index).build(); + ClusterState previousState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") + .add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) + .build(); + + ClusterState currentState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") + .add(newNode("node_1")).add(newNode("node_2"))) + .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) + .build(); + + Configuration configuration = listener.getConfiguration(); + assertThat(configuration.isIndexAndActive(Watch.INDEX), is(true)); + + ClusterChangedEvent event = new ClusterChangedEvent("something", currentState, + previousState); + listener.clusterChanged(event); + + assertThat(listener.getConfiguration(), is(configuration)); + assertThat(listener.getConfiguration().isIndexAndActive(Watch.INDEX), is(true)); + } + + // a shard is marked as relocating, no change in the routing yet (replica might be added, + // shard might be offloaded) + public void testCheckAllocationIdsOnShardStarted() throws Exception { + Index index = new Index(Watch.INDEX, "foo"); + ShardId shardId = new ShardId(index, 0); + ShardRoutingState randomState = randomFrom(STARTED, RELOCATING); + ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, "current", randomState == RELOCATING ? "other" : null, true, + randomState); + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(shardRouting).build(); + + Map allocationIds = + listener.getLocalShardAllocationIds(asList(shardRouting), indexRoutingTable); + + assertThat(allocationIds.size(), is(1)); + assertThat(allocationIds.get(shardId).index, is(0)); + assertThat(allocationIds.get(shardId).shardCount, is(1)); + } + + public void testCheckAllocationIdsWithoutShards() throws Exception { + Index index = new Index(Watch.INDEX, "foo"); + ShardId shardId = new ShardId(index, 0); + ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, "other", true, + STARTED); + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(shardRouting).build(); + + Map allocationIds = + listener.getLocalShardAllocationIds(Collections.emptyList(), indexRoutingTable); + assertThat(allocationIds.size(), is(0)); + } + + public void testCheckAllocationIdsWithSeveralShards() { + // setup 5 shards, one replica, 10 shards total, all started + Index index = new Index(Watch.INDEX, "foo"); + ShardId firstShardId = new ShardId(index, 0); + ShardId secondShardId = new ShardId(index, 1); + + List localShards = new ArrayList<>(); + localShards.add(TestShardRouting.newShardRouting(firstShardId, "node1", true, STARTED)); + localShards.add(TestShardRouting.newShardRouting(secondShardId, "node1", true, STARTED)); + + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(localShards.get(0)) + .addShard(localShards.get(1)) + .addShard(TestShardRouting.newShardRouting(firstShardId, "node2", true, STARTED)) + .addShard(TestShardRouting.newShardRouting(secondShardId, "node2", true, STARTED)) + .build(); + + Map allocationIds = + listener.getLocalShardAllocationIds(localShards, indexRoutingTable); + assertThat(allocationIds.size(), is(2)); + } + + // no matter how many copies of a shard exist, a watch should always be triggered exactly once + public void testShardConfigurationShouldBeTriggeredExactlyOnce() throws Exception { + // random number of shards + int numberOfShards = randomIntBetween(1, 20); + int numberOfDocuments = randomIntBetween(1, 10000); + BitSet bitSet = new BitSet(numberOfDocuments); + logger.info("Testing [{}] documents with [{}] shards", numberOfDocuments, numberOfShards); + + for (int currentShardId = 0; currentShardId < numberOfShards; currentShardId++) { + ShardAllocationConfiguration sac = new ShardAllocationConfiguration(currentShardId, + numberOfShards, Collections.emptyList()); + + for (int i = 0; i < numberOfDocuments; i++) { + boolean shouldBeTriggered = sac.shouldBeTriggered("watch_" + i); + boolean hasAlreadyBeenTriggered = bitSet.get(i); + if (shouldBeTriggered) { + String message = String.format(Locale.ROOT, "Watch [%s] has already been " + + "triggered", i); + assertThat(message, hasAlreadyBeenTriggered, is(false)); + bitSet.set(i); + } + } + } + + assertThat(bitSet.cardinality(), is(numberOfDocuments)); + } + + // ensure that non data nodes, deal properly with this cluster state listener + public void testOnNonDataNodes() { + listener.setConfiguration(INACTIVE); + Index index = new Index(Watch.INDEX, "foo"); + ShardId shardId = new ShardId(index, 0); + ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, "node2", true, STARTED); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index).addShard(shardRouting); + + DiscoveryNode node1 = new DiscoveryNode("node_1", ESTestCase.buildNewFakeTransportAddress(), + Collections.emptyMap(), new HashSet<>(Collections.singletonList( + randomFrom(DiscoveryNode.Role.INGEST, DiscoveryNode.Role.MASTER))), + Version.CURRENT); + + DiscoveryNode node2 = new DiscoveryNode("node_2", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(Collections.singletonList(DiscoveryNode.Role.DATA)), Version.CURRENT); + + DiscoveryNode node3 = new DiscoveryNode("node_3", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(Collections.singletonList(DiscoveryNode.Role.DATA)), Version.CURRENT); + + IndexMetaData.Builder indexMetaDataBuilder = createIndexBuilder(Watch.INDEX, 1 ,0); + + ClusterState previousState = ClusterState.builder(new ClusterName("my-cluster")) + .metaData(MetaData.builder().put(indexMetaDataBuilder)) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(node1).add(node2).add(node3)) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) + .build(); + + IndexMetaData.Builder newIndexMetaDataBuilder = createIndexBuilder(Watch.INDEX, 1, 1); + + ShardRouting replicaShardRouting = TestShardRouting.newShardRouting(shardId, "node3", false, STARTED); + IndexRoutingTable.Builder newRoutingTable = IndexRoutingTable.builder(index) + .addShard(shardRouting) + .addShard(replicaShardRouting); + ClusterState currentState = ClusterState.builder(new ClusterName("my-cluster")) + .metaData(MetaData.builder().put(newIndexMetaDataBuilder)) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(node1).add(node2).add(node3)) + .routingTable(RoutingTable.builder().add(newRoutingTable).build()) + .build(); + + ClusterChangedEvent event = new ClusterChangedEvent("something", currentState, previousState); + listener.clusterChanged(event); + assertThat(listener.getConfiguration(), is(INACTIVE)); + } + + public void testListenerWorksIfOtherIndicesChange() throws Exception { + DiscoveryNode node1 = newNode("node_1"); + DiscoveryNode node2 = newNode("node_2"); + + Index index = new Index("random-index", "foo"); + ShardId firstShardId = new ShardId(index, 0); + + IndexMetaData.Builder indexMetaDataBuilder = createIndexBuilder("random-index", 2, 1); + + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(firstShardId, "node_1", true, STARTED)) + .addShard(TestShardRouting.newShardRouting(firstShardId, "node_2", false, STARTED)); + + ClusterState previousState = ClusterState.builder(new ClusterName("my-cluster")) + .metaData(MetaData.builder().put(indexMetaDataBuilder)) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(node1).add(node2)) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) + .build(); + + IndexMetaData.Builder currentMetaDataBuilder = createIndexBuilder(Watch.INDEX, 2, 1); + + boolean useWatchIndex = randomBoolean(); + String indexName = useWatchIndex ? Watch.INDEX : "other-index-name"; + Index otherIndex = new Index(indexName, "foo"); + ShardId watchShardId = new ShardId(otherIndex, 0); + + IndexRoutingTable.Builder currentRoutingTable = IndexRoutingTable.builder(otherIndex) + .addShard(TestShardRouting.newShardRouting(watchShardId, "node_1", true, STARTED)) + .addShard(TestShardRouting.newShardRouting(watchShardId, "node_2", false, STARTED)); + + ClusterState currentState = ClusterState.builder(new ClusterName("my-cluster")) + .metaData(MetaData.builder().put(currentMetaDataBuilder)) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(node1).add(node2)) + .routingTable(RoutingTable.builder().add(currentRoutingTable).build()) + .build(); + + listener.setConfiguration(INACTIVE); + ClusterChangedEvent event = new ClusterChangedEvent("something", currentState, previousState); + listener.clusterChanged(event); + if (useWatchIndex) { + assertThat(listener.getConfiguration(), is(not(INACTIVE))); + } else { + assertThat(listener.getConfiguration(), is(INACTIVE)); + } + } + + // 4 nodes, each node has one shard, now node 3 fails, which means only one node should + // reload, where as two should not + // this test emulates on of those two nodes + public void testThatShardConfigurationIsNotReloadedNonAffectedShardsChange() { + listener.setConfiguration(INACTIVE); + + DiscoveryNode node1 = newNode("node_1"); + DiscoveryNode node2 = newNode("node_2"); + DiscoveryNode node3 = newNode("node_3"); + DiscoveryNode node4 = newNode("node_4"); + + String localNode = randomFrom("node_1", "node_2"); + + Index index = new Index(Watch.INDEX, "foo"); + ShardId firstShardId = new ShardId(index, 0); + ShardId secondShardId = new ShardId(index, 1); + + IndexMetaData.Builder indexMetaDataBuilder = createIndexBuilder(Watch.INDEX, 2, 1); + + ShardRouting firstShardRoutingPrimary = TestShardRouting.newShardRouting(firstShardId, "node_1", true, STARTED); + ShardRouting firstShardRoutingReplica = TestShardRouting.newShardRouting(firstShardId, "node_2", false, STARTED); + ShardRouting secondShardRoutingPrimary = TestShardRouting.newShardRouting(secondShardId, "node_3", true, STARTED); + ShardRouting secondShardRoutingReplica = TestShardRouting.newShardRouting(secondShardId, "node_4", false, STARTED); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index) + .addShard(firstShardRoutingPrimary) + .addShard(firstShardRoutingReplica) + .addShard(secondShardRoutingPrimary) + .addShard(secondShardRoutingReplica); + + ClusterState previousState = ClusterState.builder(new ClusterName("my-cluster")) + .metaData(MetaData.builder().put(indexMetaDataBuilder)) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId(localNode) + .add(node1).add(node2).add(node3).add(node4)) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) + .build(); + + ClusterState emptyState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId(localNode) + .add(node1).add(node2).add(node3).add(node4)) + .build(); + + ClusterChangedEvent event = new ClusterChangedEvent("something", previousState, emptyState); + listener.clusterChanged(event); + Configuration configuration = listener.getConfiguration(); + assertThat(configuration, is(not(INACTIVE))); + + // now create a cluster state where node 4 is missing + IndexMetaData.Builder newIndexMetaDataBuilder = createIndexBuilder(Watch.INDEX, 2, 1); + + IndexRoutingTable.Builder newRoutingTable = IndexRoutingTable.builder(index) + .addShard(firstShardRoutingPrimary) + .addShard(firstShardRoutingReplica) + .addShard(secondShardRoutingPrimary); + + ClusterState currentState = ClusterState.builder(new ClusterName("my-cluster")) + .metaData(MetaData.builder().put(newIndexMetaDataBuilder)) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId(localNode) + .add(node1).add(node2).add(node3).add(node4)) + .routingTable(RoutingTable.builder().add(newRoutingTable).build()) + .build(); + + ClusterChangedEvent nodeGoneEvent = new ClusterChangedEvent("something", currentState, previousState); + listener.clusterChanged(nodeGoneEvent); + + // ensure no configuration replacement has happened + assertThat(listener.getConfiguration(), is(configuration)); + } + + // if the creates a .watches alias that points to two indices, set watcher to be inactive + public void testWithAliasPointingToTwoIndicesSetsWatcherInactive() { + listener.setConfiguration(INACTIVE); + DiscoveryNode node1 = newNode("node_1"); + + // index foo pointing to .watches + Index fooIndex = new Index("foo", "someuuid"); + ShardId fooShardId = new ShardId(fooIndex, 0); + ShardRouting fooShardRouting = TestShardRouting.newShardRouting(fooShardId, node1.getId(), true, STARTED); + IndexRoutingTable.Builder fooIndexRoutingTable = IndexRoutingTable.builder(fooIndex).addShard(fooShardRouting); + + // regular cluster state with correct single alias pointing to watches index + ClusterState previousState = ClusterState.builder(new ClusterName("my-cluster")) + .metaData(MetaData.builder().put(createIndexBuilder("foo", 1, 0) + .putAlias(AliasMetaData.builder(Watch.INDEX)))) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(node1)) + .routingTable(RoutingTable.builder().add(fooIndexRoutingTable).build()) + .build(); + + // index bar pointing to .watches + Index barIndex = new Index("bar", "someuuid2"); + ShardId barShardId = new ShardId(fooIndex, 0); + IndexMetaData.Builder barIndexMetaData = createIndexBuilder("bar", 1, 0).putAlias(AliasMetaData.builder(Watch.INDEX)); + ShardRouting barShardRouting = TestShardRouting.newShardRouting(barShardId, node1.getId(), true, STARTED); + IndexRoutingTable.Builder barIndexRoutingTable = IndexRoutingTable.builder(barIndex).addShard(barShardRouting); + + // cluster state with two indices pointing to the .watches index + ClusterState currentState = ClusterState.builder(new ClusterName("my-cluster")) + .metaData(MetaData.builder().put(createIndexBuilder("foo", 1, 0) + .putAlias(AliasMetaData.builder(Watch.INDEX))) + .put(barIndexMetaData)) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") + .add(node1)) + .routingTable(RoutingTable.builder() + .add(IndexRoutingTable.builder(fooIndex).addShard(fooShardRouting)) + .add(barIndexRoutingTable).build()) + .build(); + + ClusterChangedEvent nodeGoneEvent = new ClusterChangedEvent("something", currentState, previousState); + listener.clusterChanged(nodeGoneEvent); + + // ensure no configuration replacement has happened + assertThat(listener.getConfiguration(), is(INACTIVE)); + } + + public void testThatIndexingListenerBecomesInactiveWithoutMasterNode() { + ClusterState clusterStateWithMaster = mockClusterState(Watch.INDEX); + ClusterState clusterStateWithoutMaster = mockClusterState(Watch.INDEX); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node_1").add(newNode("node_1")).build(); + when(clusterStateWithoutMaster.nodes()).thenReturn(nodes); + + assertThat(listener.getConfiguration(), is(not(INACTIVE))); + listener.clusterChanged(new ClusterChangedEvent("something", clusterStateWithoutMaster, clusterStateWithMaster)); + + assertThat(listener.getConfiguration(), is(INACTIVE)); + } + + public void testThatIndexingListenerBecomesInactiveOnClusterBlock() { + ClusterState clusterState = mockClusterState(Watch.INDEX); + ClusterState clusterStateWriteBlock = mockClusterState(Watch.INDEX); + ClusterBlocks clusterBlocks = ClusterBlocks.builder().addGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_WRITES).build(); + when(clusterStateWriteBlock.getBlocks()).thenReturn(clusterBlocks); + + assertThat(listener.getConfiguration(), is(not(INACTIVE))); + listener.clusterChanged(new ClusterChangedEvent("something", clusterStateWriteBlock, clusterState)); + + assertThat(listener.getConfiguration(), is(INACTIVE)); + } + + // + // helper methods + // + /** + * create a mock cluster state, the returns the specified index as watch index + */ + private ClusterState mockClusterState(String watchIndex) { + MetaData metaData = mock(MetaData.class); + if (watchIndex == null) { + when(metaData.getAliasAndIndexLookup()).thenReturn(Collections.emptySortedMap()); + } else { + SortedMap indices = new TreeMap<>(); + + IndexMetaData indexMetaData = mock(IndexMetaData.class); + when(indexMetaData.getIndex()).thenReturn(new Index(watchIndex, randomAlphaOfLength(10))); + indices.put(watchIndex, new AliasOrIndex.Index(indexMetaData)); + + // now point the alias, if the watch index is not .watches + if (watchIndex.equals(Watch.INDEX) == false) { + AliasMetaData aliasMetaData = mock(AliasMetaData.class); + when(aliasMetaData.alias()).thenReturn(watchIndex); + indices.put(Watch.INDEX, new AliasOrIndex.Alias(aliasMetaData, indexMetaData)); + } + + when(metaData.getAliasAndIndexLookup()).thenReturn(indices); + } + + ClusterState clusterState = mock(ClusterState.class); + when(clusterState.metaData()).thenReturn(metaData); + + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node_1").masterNodeId("node_1").add(newNode("node_1")).build(); + when(clusterState.nodes()).thenReturn(nodes); + when(clusterState.getBlocks()).thenReturn(ClusterBlocks.EMPTY_CLUSTER_BLOCK); + + return clusterState; + } + + private IndexMetaData.Builder createIndexBuilder(String name, int numberOfShards, + int numberOfReplicas) { + return IndexMetaData.builder(name) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + ); + } + + private static DiscoveryNode newNode(String nodeId) { + return new DiscoveryNode(nodeId, ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(asList(DiscoveryNode.Role.values())), Version.CURRENT); + } +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java new file mode 100644 index 0000000000000..86375c0ea4862 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -0,0 +1,693 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.watcher.WatcherState; +import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.junit.Before; +import org.mockito.stubbing.Answer; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static java.util.Arrays.asList; +import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; +import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME; +import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME; +import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME; +import static org.hamcrest.Matchers.hasSize; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class WatcherLifeCycleServiceTests extends ESTestCase { + + private WatcherService watcherService; + private WatcherLifeCycleService lifeCycleService; + + @Before + public void prepareServices() { + ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + ClusterService clusterService = mock(ClusterService.class); + Answer answer = invocationOnMock -> { + AckedClusterStateUpdateTask updateTask = (AckedClusterStateUpdateTask) invocationOnMock.getArguments()[1]; + updateTask.onAllNodesAcked(null); + return null; + }; + doAnswer(answer).when(clusterService).submitStateUpdateTask(anyString(), any(ClusterStateUpdateTask.class)); + watcherService = mock(WatcherService.class); + lifeCycleService = new WatcherLifeCycleService(Settings.EMPTY, clusterService, watcherService, + EsExecutors.newDirectExecutorService()) { + @Override + void stopExecutor() { + // direct executor cannot be terminated + } + }; + } + + public void testStartAndStopCausedByClusterState() throws Exception { + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(new Index("anything", "foo")).build(); + ClusterState previousClusterState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) + .build(); + + IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(new Index(Watch.INDEX, "foo")).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .build()) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) + .build(); + + when(watcherService.state()).thenReturn(WatcherState.STOPPED); + when(watcherService.validate(clusterState)).thenReturn(true); + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, previousClusterState)); + verify(watcherService, times(1)).start(clusterState); + verify(watcherService, never()).stop(anyString()); + + // Trying to start a second time, but that should have no affect. + when(watcherService.state()).thenReturn(WatcherState.STARTED); + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, previousClusterState)); + verify(watcherService, times(1)).start(clusterState); + verify(watcherService, never()).stop(anyString()); + } + + public void testStartWithStateNotRecoveredBlock() throws Exception { + DiscoveryNodes.Builder nodes = new DiscoveryNodes.Builder().masterNodeId("id1").localNodeId("id1"); + ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) + .blocks(ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) + .nodes(nodes).build(); + when(watcherService.state()).thenReturn(WatcherState.STOPPED); + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, clusterState)); + verify(watcherService, never()).start(any(ClusterState.class)); + } + + public void testShutdown() throws Exception { + IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(new Index(Watch.INDEX, "foo")).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .build()) + .build(); + + when(watcherService.validate(clusterState)).thenReturn(true); + + when(watcherService.state()).thenReturn(WatcherState.STOPPED); + lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, clusterState)); + verify(watcherService, times(1)).start(any(ClusterState.class)); + verify(watcherService, never()).stop(anyString()); + + when(watcherService.state()).thenReturn(WatcherState.STARTED); + lifeCycleService.shutDown(); + verify(watcherService, times(1)).start(any(ClusterState.class)); + verify(watcherService, times(1)).stop(eq("shutdown initiated")); + + when(watcherService.state()).thenReturn(WatcherState.STOPPED); + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, clusterState)); + verify(watcherService, times(1)).start(any(ClusterState.class)); + verify(watcherService, times(1)).stop(eq("shutdown initiated")); + } + + public void testManualStartStop() throws Exception { + IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(new Index(Watch.INDEX, "foo")).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .build()) + .build(); + + when(watcherService.validate(clusterState)).thenReturn(true); + + when(watcherService.state()).thenReturn(WatcherState.STOPPED); + lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, clusterState)); + verify(watcherService, times(1)).start(any(ClusterState.class)); + verify(watcherService, never()).stop(anyString()); + + when(watcherService.state()).thenReturn(WatcherState.STARTED); + String reason = randomAlphaOfLength(10); + lifeCycleService.stop(reason); + verify(watcherService, times(1)).start(any(ClusterState.class)); + verify(watcherService, times(1)).stop(eq(reason)); + + // Starting via cluster state update, we shouldn't start because we have been stopped manually. + when(watcherService.state()).thenReturn(WatcherState.STOPPED); + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, clusterState)); + verify(watcherService, times(2)).start(any(ClusterState.class)); + verify(watcherService, times(1)).stop(eq(reason)); + + // no change, keep going + clusterState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .build()) + .build(); + when(watcherService.state()).thenReturn(WatcherState.STARTED); + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, clusterState)); + verify(watcherService, times(2)).start(any(ClusterState.class)); + verify(watcherService, times(1)).stop(eq(reason)); + + ClusterState previousClusterState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .build()) + .build(); + when(watcherService.validate(clusterState)).thenReturn(true); + when(watcherService.state()).thenReturn(WatcherState.STOPPED); + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, previousClusterState)); + verify(watcherService, times(3)).start(any(ClusterState.class)); + verify(watcherService, times(1)).stop(eq(reason)); + } + + public void testManualStartStopClusterStateNotValid() throws Exception { + DiscoveryNodes.Builder nodes = new DiscoveryNodes.Builder().masterNodeId("id1").localNodeId("id1"); + ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes).build(); + when(watcherService.state()).thenReturn(WatcherState.STOPPED); + when(watcherService.validate(clusterState)).thenReturn(false); + + lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, clusterState)); + + verify(watcherService, never()).start(any(ClusterState.class)); + verify(watcherService, never()).stop(anyString()); + } + + public void testManualStartStopWatcherNotStopped() throws Exception { + DiscoveryNodes.Builder nodes = new DiscoveryNodes.Builder().masterNodeId("id1").localNodeId("id1"); + ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes).build(); + when(watcherService.state()).thenReturn(WatcherState.STOPPING); + + lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, clusterState)); + verify(watcherService, never()).validate(any(ClusterState.class)); + verify(watcherService, never()).start(any(ClusterState.class)); + verify(watcherService, never()).stop(anyString()); + } + + public void testNoLocalShards() throws Exception { + Index watchIndex = new Index(Watch.INDEX, "foo"); + ShardId shardId = new ShardId(watchIndex, 0); + DiscoveryNodes nodes = new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") + .add(newNode("node_1")).add(newNode("node_2")) + .build(); + IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + ).build(); + + IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(randomBoolean() ? + TestShardRouting.newShardRouting(shardId, "node_1", true, STARTED) : + TestShardRouting.newShardRouting(shardId, "node_1", "node_2", true, RELOCATING)) + .build(); + ClusterState clusterStateWithLocalShards = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes) + .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); + + // shard moved over to node 2 + IndexRoutingTable watchRoutingTableNode2 = IndexRoutingTable.builder(watchIndex) + .addShard(randomBoolean() ? + TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED) : + TestShardRouting.newShardRouting(shardId, "node_2", "node_1", true, RELOCATING)) + .build(); + ClusterState clusterStateWithoutLocalShards = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes) + .routingTable(RoutingTable.builder().add(watchRoutingTableNode2).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); + + when(watcherService.state()).thenReturn(WatcherState.STARTED); + + // set current allocation ids + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithLocalShards, clusterStateWithoutLocalShards)); + verify(watcherService, times(0)).pauseExecution(eq("no local watcher shards found")); + + // no more local hards, lets pause execution + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithoutLocalShards, clusterStateWithLocalShards)); + verify(watcherService, times(1)).pauseExecution(eq("no local watcher shards found")); + + // no further invocations should happen if the cluster state does not change in regard to local shards + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithoutLocalShards, clusterStateWithoutLocalShards)); + verify(watcherService, times(1)).pauseExecution(eq("no local watcher shards found")); + } + + public void testReplicaWasAddedOrRemoved() throws Exception { + Index watchIndex = new Index(Watch.INDEX, "foo"); + ShardId shardId = new ShardId(watchIndex, 0); + ShardId secondShardId = new ShardId(watchIndex, 1); + DiscoveryNodes discoveryNodes = new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") + .add(newNode("node_1")) + .add(newNode("node_2")) + .build(); + + IndexRoutingTable previousWatchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) + .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) + .build(); + + IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + ).build(); + + ClusterState stateWithPrimaryShard = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(discoveryNodes) + .routingTable(RoutingTable.builder().add(previousWatchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); + + IndexRoutingTable currentWatchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(TestShardRouting.newShardRouting(shardId, "node_1", false, STARTED)) + .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) + .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) + .build(); + + ClusterState stateWithReplicaAdded = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(discoveryNodes) + .routingTable(RoutingTable.builder().add(currentWatchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); + + // randomize between addition or removal of a replica + boolean replicaAdded = randomBoolean(); + ClusterChangedEvent event; + ClusterState usedClusterState; + if (replicaAdded) { + event = new ClusterChangedEvent("any", stateWithReplicaAdded, stateWithPrimaryShard); + usedClusterState = stateWithReplicaAdded; + } else { + event = new ClusterChangedEvent("any", stateWithPrimaryShard, stateWithReplicaAdded); + usedClusterState = stateWithPrimaryShard; + } + + when(watcherService.state()).thenReturn(WatcherState.STARTED); + lifeCycleService.clusterChanged(event); + verify(watcherService).reload(eq(usedClusterState), anyString()); + } + + // make sure that cluster state changes can be processed on nodes that do not hold data + public void testNonDataNode() { + Index index = new Index(Watch.INDEX, "foo"); + ShardId shardId = new ShardId(index, 0); + ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, "node2", true, STARTED); + IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index).addShard(shardRouting); + + DiscoveryNode node1 = new DiscoveryNode("node_1", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(asList(randomFrom(DiscoveryNode.Role.INGEST, DiscoveryNode.Role.MASTER))), Version.CURRENT); + + DiscoveryNode node2 = new DiscoveryNode("node_2", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(asList(DiscoveryNode.Role.DATA)), Version.CURRENT); + + DiscoveryNode node3 = new DiscoveryNode("node_3", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(asList(DiscoveryNode.Role.DATA)), Version.CURRENT); + + IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(Watch.INDEX) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + ); + + ClusterState previousState = ClusterState.builder(new ClusterName("my-cluster")) + .metaData(MetaData.builder().put(indexMetaDataBuilder)) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(node1).add(node2).add(node3)) + .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) + .build(); + + IndexMetaData.Builder newIndexMetaDataBuilder = IndexMetaData.builder(Watch.INDEX) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + ); + + ShardRouting replicaShardRouting = TestShardRouting.newShardRouting(shardId, "node3", false, STARTED); + IndexRoutingTable.Builder newRoutingTable = IndexRoutingTable.builder(index).addShard(shardRouting).addShard(replicaShardRouting); + ClusterState currentState = ClusterState.builder(new ClusterName("my-cluster")) + .metaData(MetaData.builder().put(newIndexMetaDataBuilder)) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(node1).add(node2).add(node3)) + .routingTable(RoutingTable.builder().add(newRoutingTable).build()) + .build(); + + when(watcherService.state()).thenReturn(WatcherState.STARTED); + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", currentState, previousState)); + verify(watcherService, times(0)).pauseExecution(anyObject()); + verify(watcherService, times(0)).reload(any(), any()); + } + + public void testThatMissingWatcherIndexMetadataOnlyResetsOnce() { + Index watchIndex = new Index(Watch.INDEX, "foo"); + ShardId shardId = new ShardId(watchIndex, 0); + IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(TestShardRouting.newShardRouting(shardId, "node_1", true, STARTED)).build(); + DiscoveryNodes nodes = new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1")).build(); + + IndexMetaData.Builder newIndexMetaDataBuilder = IndexMetaData.builder(Watch.INDEX) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + ); + + ClusterState clusterStateWithWatcherIndex = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes) + .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) + .metaData(MetaData.builder().put(newIndexMetaDataBuilder)) + .build(); + + ClusterState clusterStateWithoutWatcherIndex = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes) + .build(); + + when(watcherService.state()).thenReturn(WatcherState.STARTED); + + // first add the shard allocation ids, by going from empty cs to CS with watcher index + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithWatcherIndex, clusterStateWithoutWatcherIndex)); + + // now remove watches index, and ensure that pausing is only called once, no matter how often called (i.e. each CS update) + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithoutWatcherIndex, clusterStateWithWatcherIndex)); + verify(watcherService, times(1)).pauseExecution(anyObject()); + + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithoutWatcherIndex, clusterStateWithWatcherIndex)); + verify(watcherService, times(1)).pauseExecution(anyObject()); + } + + public void testWatcherDoesNotStartWithOldIndexFormat() throws Exception { + String index = randomFrom(Watch.INDEX, TriggeredWatchStoreField.INDEX_NAME); + Index watchIndex = new Index(index, "foo"); + ShardId shardId = new ShardId(watchIndex, 0); + IndexRoutingTable watchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(TestShardRouting.newShardRouting(shardId, "node_1", true, STARTED)).build(); + DiscoveryNodes nodes = new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1")).build(); + + Settings.Builder indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); + // no matter if not set or set to one, watcher should not start + if (randomBoolean()) { + indexSettings.put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 1); + } + IndexMetaData.Builder newIndexMetaDataBuilder = IndexMetaData.builder(index).settings(indexSettings); + + ClusterState clusterStateWithWatcherIndex = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(nodes) + .routingTable(RoutingTable.builder().add(watchRoutingTable).build()) + .metaData(MetaData.builder().put(newIndexMetaDataBuilder)) + .build(); + + ClusterState emptyClusterState = ClusterState.builder(new ClusterName("my-cluster")).nodes(nodes).build(); + + when(watcherService.state()).thenReturn(WatcherState.STOPPED); + when(watcherService.validate(eq(clusterStateWithWatcherIndex))).thenReturn(true); + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterStateWithWatcherIndex, emptyClusterState)); + verify(watcherService, never()).start(any(ClusterState.class)); + } + + public void testWatcherServiceDoesNotStartIfIndexTemplatesAreMissing() throws Exception { + DiscoveryNodes nodes = new DiscoveryNodes.Builder() + .masterNodeId("node_1").localNodeId("node_1") + .add(newNode("node_1")) + .build(); + + MetaData.Builder metaDataBuilder = MetaData.builder(); + boolean isHistoryTemplateAdded = randomBoolean(); + if (isHistoryTemplateAdded) { + metaDataBuilder.put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())); + } + boolean isTriggeredTemplateAdded = randomBoolean(); + if (isTriggeredTemplateAdded) { + metaDataBuilder.put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())); + } + boolean isWatchesTemplateAdded = randomBoolean(); + if (isWatchesTemplateAdded) { + // ensure not all templates are added, otherwise life cycle service would start + if ((isHistoryTemplateAdded || isTriggeredTemplateAdded) == false) { + metaDataBuilder.put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())); + } + } + ClusterState state = ClusterState.builder(new ClusterName("my-cluster")).nodes(nodes).metaData(metaDataBuilder).build(); + when(watcherService.validate(eq(state))).thenReturn(true); + when(watcherService.state()).thenReturn(WatcherState.STOPPED); + + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", state, state)); + verify(watcherService, times(0)).start(any(ClusterState.class)); + } + + public void testWatcherStopsWhenMasterNodeIsMissing() { + DiscoveryNodes nodes = new DiscoveryNodes.Builder() + .localNodeId("node_1") + .add(newNode("node_1")) + .build(); + ClusterState state = ClusterState.builder(new ClusterName("my-cluster")).nodes(nodes).build(); + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", state, state)); + verify(watcherService, times(1)).stop(eq("no master node")); + } + + public void testWatcherStopsOnClusterLevelBlock() { + DiscoveryNodes nodes = new DiscoveryNodes.Builder() + .localNodeId("node_1") + .masterNodeId("node_1") + .add(newNode("node_1")) + .build(); + ClusterBlocks clusterBlocks = ClusterBlocks.builder().addGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_WRITES).build(); + ClusterState state = ClusterState.builder(new ClusterName("my-cluster")).nodes(nodes).blocks(clusterBlocks).build(); + lifeCycleService.clusterChanged(new ClusterChangedEvent("any", state, state)); + verify(watcherService, times(1)).stop(eq("write level cluster block")); + } + + public void testStateIsSetImmediately() throws Exception { + Index index = new Index(Watch.INDEX, "foo"); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + indexRoutingTableBuilder.addShard( + TestShardRouting.newShardRouting(Watch.INDEX, 0, "node_1", true, ShardRoutingState.STARTED)); + IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(Watch.INDEX).settings(settings(Version.CURRENT) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6)) // the internal index format, required + .numberOfShards(1).numberOfReplicas(0); + ClusterState state = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") + .add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build()) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(indexMetaDataBuilder) + .build()) + .build(); + when(watcherService.validate(state)).thenReturn(true); + when(watcherService.state()).thenReturn(WatcherState.STOPPED); + + lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", state, state)); + verify(watcherService, times(1)).start(eq(state)); + assertThat(lifeCycleService.allocationIds(), hasSize(1)); + + // now do any cluster state upgrade, see that reload gets triggers, but should not + when(watcherService.state()).thenReturn(WatcherState.STARTED); + lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", state, state)); + verify(watcherService, never()).pauseExecution(anyString()); + + verify(watcherService, never()).reload(eq(state), anyString()); + assertThat(lifeCycleService.allocationIds(), hasSize(1)); + } + + public void testWatcherServiceExceptionsAreCaught() { + Index index = new Index(Watch.INDEX, "foo"); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + indexRoutingTableBuilder.addShard( + TestShardRouting.newShardRouting(Watch.INDEX, 0, "node_1", true, ShardRoutingState.STARTED)); + IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX).settings(settings(Version.CURRENT) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6)) // the internal index format, required + .numberOfShards(1).numberOfReplicas(0).build(); + + // special setup for one of the following cluster states + DiscoveryNodes discoveryNodes = mock(DiscoveryNodes.class); + DiscoveryNode localNode = mock(DiscoveryNode.class); + when(discoveryNodes.getMasterNodeId()).thenReturn("node_1"); + when(discoveryNodes.getLocalNode()).thenReturn(localNode); + when(localNode.isDataNode()).thenReturn(true); + when(localNode.getId()).thenReturn("does_not_exist"); + + ClusterState clusterState = randomFrom( + // cluster state with no watcher index + ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .build()) + .build(), + // cluster state with no routing node + ClusterState.builder(new ClusterName("my-cluster")) + .nodes(discoveryNodes) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .build()) + .build(), + + // cluster state with no local shards + ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(indexMetaData, true) + .build()) + .build() + ); + + ClusterState stateWithWatcherShards = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") + .add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build()) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(indexMetaData, true) + .build()) + .build(); + + lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", stateWithWatcherShards, stateWithWatcherShards)); + + when(watcherService.validate(anyObject())).thenReturn(true); + when(watcherService.state()).thenReturn(WatcherState.STARTED); + doAnswer(invocation -> { + throw new ElasticsearchSecurityException("breakme"); + }).when(watcherService).pauseExecution(anyString()); + + lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, stateWithWatcherShards)); + verify(watcherService, times(1)).pauseExecution(anyString()); + } + + public void testWatcherServiceExceptionsAreCaughtOnReload() { + Index index = new Index(Watch.INDEX, "foo"); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + indexRoutingTableBuilder.addShard( + TestShardRouting.newShardRouting(Watch.INDEX, 0, "node_1", true, ShardRoutingState.STARTED)); + IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX).settings(settings(Version.CURRENT) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6)) // the internal index format, required + .numberOfShards(1).numberOfReplicas(0).build(); + + // cluster state with different local shards (another shard id) + ClusterState clusterState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))).routingTable( + RoutingTable.builder().add(IndexRoutingTable.builder(index) + .addShard(TestShardRouting.newShardRouting(Watch.INDEX, 1, "node_1", true, ShardRoutingState.STARTED)) + .build()).build()).metaData( + MetaData.builder().put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(indexMetaData, true).build()).build(); + + ClusterState stateWithWatcherShards = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1") + .add(newNode("node_1"))) + .routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build()) + .metaData(MetaData.builder() + .put(IndexTemplateMetaData.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(TRIGGERED_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(IndexTemplateMetaData.builder(WATCHES_TEMPLATE_NAME).patterns(randomIndexPatterns())) + .put(indexMetaData, true) + .build()) + .build(); + + lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", stateWithWatcherShards, stateWithWatcherShards)); + + when(watcherService.validate(anyObject())).thenReturn(true); + when(watcherService.state()).thenReturn(WatcherState.STARTED); + doAnswer(invocation -> { + throw new ElasticsearchSecurityException("breakme"); + }).when(watcherService).reload(eq(clusterState), anyString()); + + lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", clusterState, stateWithWatcherShards)); + verify(watcherService, times(1)).reload(eq(clusterState), anyString()); + + } + + private List randomIndexPatterns() { + return IntStream.range(0, between(1, 10)) + .mapToObj(n -> randomAlphaOfLengthBetween(1, 100)) + .collect(Collectors.toList()); + } + + private static DiscoveryNode newNode(String nodeName) { + return newNode(nodeName, Version.CURRENT); + } + + private static DiscoveryNode newNode(String nodeName, Version version) { + return new DiscoveryNode(nodeName, ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(asList(DiscoveryNode.Role.values())), version); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetaDataSerializationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetaDataSerializationTests.java new file mode 100644 index 0000000000000..50a7fec474935 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetaDataSerializationTests.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoriesMetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.watcher.WatcherMetaData; + +import java.util.Collections; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class WatcherMetaDataSerializationTests extends ESTestCase { + public void testXContentSerializationOneSignedWatcher() throws Exception { + boolean manuallyStopped = randomBoolean(); + WatcherMetaData watcherMetaData = new WatcherMetaData(manuallyStopped); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder.startObject("watcher"); + watcherMetaData.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.endObject(); + WatcherMetaData watchersMetaDataFromXContent = getWatcherMetaDataFromXContent(createParser(builder)); + assertThat(watchersMetaDataFromXContent.manuallyStopped(), equalTo(manuallyStopped)); + } + + public void testWatcherMetadataParsingDoesNotSwallowOtherMetaData() throws Exception { + Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + new Watcher(settings); // makes sure WatcherMetaData is registered in Custom MetaData + boolean manuallyStopped = randomBoolean(); + WatcherMetaData watcherMetaData = new WatcherMetaData(manuallyStopped); + RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repo", "fs", Settings.EMPTY); + RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(repositoryMetaData); + final MetaData.Builder metaDataBuilder = MetaData.builder(); + if (randomBoolean()) { // random order of insertion + metaDataBuilder.putCustom(watcherMetaData.getWriteableName(), watcherMetaData); + metaDataBuilder.putCustom(repositoriesMetaData.getWriteableName(), repositoriesMetaData); + } else { + metaDataBuilder.putCustom(repositoriesMetaData.getWriteableName(), repositoriesMetaData); + metaDataBuilder.putCustom(watcherMetaData.getWriteableName(), watcherMetaData); + } + // serialize metadata + XContentBuilder builder = XContentFactory.jsonBuilder(); + ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap(MetaData.CONTEXT_MODE_PARAM, + MetaData.CONTEXT_MODE_GATEWAY)); + builder.startObject(); + builder = metaDataBuilder.build().toXContent(builder, params); + builder.endObject(); + // deserialize metadata again + MetaData metaData = MetaData.Builder.fromXContent(createParser(builder)); + // check that custom metadata still present + assertThat(metaData.custom(watcherMetaData.getWriteableName()), notNullValue()); + assertThat(metaData.custom(repositoriesMetaData.getWriteableName()), notNullValue()); + } + + private static WatcherMetaData getWatcherMetaDataFromXContent(XContentParser parser) throws Exception { + parser.nextToken(); // consume null + parser.nextToken(); // consume "watcher" + WatcherMetaData watcherMetaDataFromXContent = (WatcherMetaData)WatcherMetaData.fromXContent(parser); + parser.nextToken(); // consume endObject + assertThat(parser.nextToken(), nullValue()); + return watcherMetaDataFromXContent; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(Stream.concat( + new XPackClientPlugin(Settings.builder().put("path.home", createTempDir()).build()).getNamedXContent().stream(), + ClusterModule.getNamedXWriteables().stream() + ).collect(Collectors.toList())); + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java new file mode 100644 index 0000000000000..c7c2b59caaa50 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.xpack.core.watcher.watch.Watch; + +import java.util.List; + +import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; + +public class WatcherPluginTests extends ESTestCase { + + public void testValidAutoCreateIndex() { + Watcher.validAutoCreateIndex(Settings.EMPTY, logger); + Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", true).build(), logger); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", false).build(), logger)); + assertThat(exception.getMessage(), containsString("[.watches, .triggered_watches, .watcher-history-*]")); + + Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", + ".watches,.triggered_watches,.watcher-history*").build(), logger); + Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", "*w*").build(), logger); + Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".w*,.t*").build(), logger); + + exception = expectThrows(IllegalArgumentException.class, + () -> Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".watches").build(), logger)); + assertThat(exception.getMessage(), containsString("[.watches, .triggered_watches, .watcher-history-*]")); + + exception = expectThrows(IllegalArgumentException.class, + () -> Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".triggered_watch").build(), logger)); + assertThat(exception.getMessage(), containsString("[.watches, .triggered_watches, .watcher-history-*]")); + + exception = expectThrows(IllegalArgumentException.class, + () -> Watcher.validAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".watcher-history-*").build(), + logger)); + assertThat(exception.getMessage(), containsString("[.watches, .triggered_watches, .watcher-history-*]")); + } + + public void testWatcherDisabledTests() throws Exception { + Settings settings = Settings.builder() + .put("xpack.watcher.enabled", false) + .put("path.home", createTempDir()) + .build(); + Watcher watcher = new Watcher(settings); + + List> executorBuilders = watcher.getExecutorBuilders(settings); + assertThat(executorBuilders, hasSize(0)); + assertThat(watcher.createGuiceModules(), hasSize(2)); + assertThat(watcher.getActions(), hasSize(0)); + assertThat(watcher.getRestHandlers(settings, null, null, null, null, null, null), hasSize(0)); + + // ensure index module is not called, even if watches index is tried + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(Watch.INDEX, settings); + AnalysisRegistry registry = new AnalysisRegistry(TestEnvironment.newEnvironment(settings), emptyMap(), emptyMap(), emptyMap(), + emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()); + IndexModule indexModule = new IndexModule(indexSettings, registry); + // this will trip an assertion if the watcher indexing operation listener is null (which it is) but we try to add it + watcher.onIndexModule(indexModule); + + // also no component creation if not enabled + assertThat(watcher.createComponents(null, null, null, null, null, null, null, null, null), hasSize(0)); + } + + public void testThreadPoolSize() { + // old calculation was 5 * number of processors + assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 1).build()), is(5)); + assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 2).build()), is(10)); + assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 4).build()), is(20)); + assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 8).build()), is(40)); + assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 9).build()), is(45)); + assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 10).build()), is(50)); + assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 16).build()), is(50)); + assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 24).build()), is(50)); + assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 50).build()), is(50)); + assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 51).build()), is(51)); + assertThat(Watcher.getWatcherThreadPoolSize(Settings.builder().put("processors", 96).build()), is(96)); + + Settings noDataNodeSettings = Settings.builder() + .put("processors", scaledRandomIntBetween(1, 100)) + .put("node.data", false) + .build(); + assertThat(Watcher.getWatcherThreadPoolSize(noDataNodeSettings), is(1)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java new file mode 100644 index 0000000000000..0622ab48227fe --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -0,0 +1,202 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchResponseSections; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.execution.ExecutionService; +import org.elasticsearch.xpack.watcher.execution.TriggeredWatchStore; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.mockito.ArgumentCaptor; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; + +import static java.util.Arrays.asList; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class WatcherServiceTests extends ESTestCase { + + public void testValidateStartWithClosedIndex() throws Exception { + TriggerService triggerService = mock(TriggerService.class); + TriggeredWatchStore triggeredWatchStore = mock(TriggeredWatchStore.class); + ExecutionService executionService = mock(ExecutionService.class); + when(executionService.validate(anyObject())).thenReturn(true); + WatchParser parser = mock(WatchParser.class); + + WatcherService service = new WatcherService(Settings.EMPTY, triggerService, triggeredWatchStore, + executionService, parser, mock(Client.class)); + + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); + MetaData.Builder metaDataBuilder = MetaData.builder(); + Settings indexSettings = settings(Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + metaDataBuilder.put(IndexMetaData.builder(Watch.INDEX).state(IndexMetaData.State.CLOSE).settings(indexSettings)); + csBuilder.metaData(metaDataBuilder); + + assertThat(service.validate(csBuilder.build()), is(false)); + } + + public void testLoadOnlyActiveWatches() throws Exception { + // this is just, so we dont have to add any mocking to the threadpool + Settings settings = Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); + + TriggerService triggerService = mock(TriggerService.class); + TriggeredWatchStore triggeredWatchStore = mock(TriggeredWatchStore.class); + ExecutionService executionService = mock(ExecutionService.class); + when(executionService.validate(anyObject())).thenReturn(true); + WatchParser parser = mock(WatchParser.class); + Client client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + WatcherService service = new WatcherService(settings, triggerService, triggeredWatchStore, + executionService, parser, client); + + + // cluster state setup, with one node, one shard + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); + MetaData.Builder metaDataBuilder = MetaData.builder(); + Settings indexSettings = settings(Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + metaDataBuilder.put(IndexMetaData.builder(Watch.INDEX).settings(indexSettings)); + csBuilder.metaData(metaDataBuilder); + + Index watchIndex = new Index(Watch.INDEX, "uuid"); + ShardId shardId = new ShardId(watchIndex, 0); + + IndexShardRoutingTable indexShardRoutingTable = new IndexShardRoutingTable.Builder(shardId) + .addShard(TestShardRouting.newShardRouting(shardId, "node", true, ShardRoutingState.STARTED)) + .build(); + + IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(watchIndex).addIndexShard(indexShardRoutingTable).build(); + RoutingTable routingTable = RoutingTable.builder().add(indexRoutingTable).build(); + csBuilder.routingTable(routingTable); + + csBuilder.nodes(new DiscoveryNodes.Builder().masterNodeId("node").localNodeId("node").add(newNode())); + ClusterState clusterState = csBuilder.build(); + + + // response setup, successful refresh response + RefreshResponse refreshResponse = mock(RefreshResponse.class); + when(refreshResponse.getSuccessfulShards()) + .thenReturn(clusterState.getMetaData().getIndices().get(Watch.INDEX).getNumberOfShards()); + AdminClient adminClient = mock(AdminClient.class); + IndicesAdminClient indicesAdminClient = mock(IndicesAdminClient.class); + when(client.admin()).thenReturn(adminClient); + when(adminClient.indices()).thenReturn(indicesAdminClient); + PlainActionFuture refreshFuture = new PlainActionFuture<>(); + when(indicesAdminClient.refresh(any(RefreshRequest.class))).thenReturn(refreshFuture); + refreshFuture.onResponse(refreshResponse); + + // empty scroll response, no further scrolling needed + SearchResponseSections scrollSearchSections = new SearchResponseSections(SearchHits.empty(), null, null, false, false, null, 1); + SearchResponse scrollSearchResponse = new SearchResponse(scrollSearchSections, "scrollId", 1, 1, 0, 10, + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + PlainActionFuture searchScrollResponseFuture = new PlainActionFuture<>(); + when(client.searchScroll(any(SearchScrollRequest.class))).thenReturn(searchScrollResponseFuture); + searchScrollResponseFuture.onResponse(scrollSearchResponse); + + // one search response containing active and inactive watches + int count = randomIntBetween(2, 200); + int activeWatchCount = 0; + SearchHit[] hits = new SearchHit[count]; + for (int i = 0; i < count; i++) { + String id = String.valueOf(i); + SearchHit hit = new SearchHit(1, id, new Text("watch"), Collections.emptyMap()); + hit.version(1L); + hit.shard(new SearchShardTarget("nodeId", watchIndex, 0, "whatever")); + hits[i] = hit; + + boolean active = randomBoolean(); + if (active) { + activeWatchCount++; + } + WatchStatus.State state = new WatchStatus.State(active, DateTime.now(DateTimeZone.UTC)); + WatchStatus watchStatus = mock(WatchStatus.class); + Watch watch = mock(Watch.class); + when(watchStatus.state()).thenReturn(state); + when(watch.status()).thenReturn(watchStatus); + when(parser.parse(eq(id), eq(true), any(), eq(XContentType.JSON))).thenReturn(watch); + } + SearchHits searchHits = new SearchHits(hits, count, 1.0f); + SearchResponseSections sections = new SearchResponseSections(searchHits, null, null, false, false, null, 1); + SearchResponse searchResponse = new SearchResponse(sections, "scrollId", 1, 1, 0, 10, ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY); + PlainActionFuture searchResponseFuture = new PlainActionFuture<>(); + when(client.search(any(SearchRequest.class))).thenReturn(searchResponseFuture); + searchResponseFuture.onResponse(searchResponse); + + PlainActionFuture clearScrollFuture = new PlainActionFuture<>(); + when(client.clearScroll(any(ClearScrollRequest.class))).thenReturn(clearScrollFuture); + clearScrollFuture.onResponse(new ClearScrollResponse(true, 1)); + + service.start(clusterState); + + ArgumentCaptor captor = ArgumentCaptor.forClass(List.class); + verify(triggerService).start(captor.capture()); + List watches = captor.getValue(); + watches.forEach(watch -> assertThat(watch.status().state().isActive(), is(true))); + assertThat(watches, hasSize(activeWatchCount)); + } + + private static DiscoveryNode newNode() { + return new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(asList(DiscoveryNode.Role.values())), Version.CURRENT); + } +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherXpackUsageStatsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherXpackUsageStatsTests.java new file mode 100644 index 0000000000000..3a314640d742a --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherXpackUsageStatsTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher; + +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.action.XPackUsageAction; +import org.elasticsearch.xpack.core.action.XPackUsageRequest; +import org.elasticsearch.xpack.core.action.XPackUsageResponse; +import org.elasticsearch.xpack.core.watcher.WatcherFeatureSetUsage; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.util.Map; +import java.util.Optional; + +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.cron; +import static org.hamcrest.Matchers.is; + +public class WatcherXpackUsageStatsTests extends AbstractWatcherIntegrationTestCase { + + // as these tests use three data nodes, those watches will be across two of those + // nodes due to having two watcher shards, so that we can be sure that the count + // was merged + public void testWatcherUsageStatsTests() { + long watchCount = randomLongBetween(5, 20); + for (int i = 0; i < watchCount; i++) { + watcherClient().preparePutWatch("_id" + i).setSource(watchBuilder() + .trigger(schedule(cron("0/5 * * * * ? 2050"))) + .input(simpleInput()) + .addAction("_id", loggingAction("whatever " + i))) + .get(); + } + + XPackUsageRequest request = new XPackUsageRequest(); + XPackUsageResponse usageResponse = client().execute(XPackUsageAction.INSTANCE, request).actionGet(); + Optional usage = usageResponse.getUsages().stream() + .filter(u -> u instanceof WatcherFeatureSetUsage) + .findFirst(); + assertThat(usage.isPresent(), is(true)); + WatcherFeatureSetUsage featureSetUsage = (WatcherFeatureSetUsage) usage.get(); + + long activeWatchCount = (long) ((Map) featureSetUsage.stats().get("count")).get("active"); + assertThat(activeWatchCount, is(watchCount)); + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/ActionErrorIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/ActionErrorIntegrationTests.java new file mode 100644 index 0000000000000..144a0a75b1516 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/ActionErrorIntegrationTests.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.watcher.actions.index.IndexAction; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import static org.elasticsearch.index.query.QueryBuilders.termsQuery; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.is; + +public class ActionErrorIntegrationTests extends AbstractWatcherIntegrationTestCase { + + /** + * This test makes sure that when an action encounters an error it should + * not be subject to throttling. Also, the ack status of the action in the + * watch should remain awaits_successful_execution as long as the execution + * fails. + */ + public void testErrorInAction() throws Exception { + createIndex("foo"); + client().admin().indices().prepareUpdateSettings("foo").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("_id").setSource(watchBuilder() + .trigger(schedule(interval("10m"))) + + // adding an action that throws an error and is associated with a 60 minute throttle period + // with such a period, on successful execution we other executions of the watch will be + // throttled within the hour... but on failed execution there should be no throttling + .addAction("_action", TimeValue.timeValueMinutes(60), IndexAction.builder("foo", "bar"))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + + timeWarp().trigger("_id"); + + flush(); + + // there should be a single history record with a failure status for the action: + assertBusy(() -> { + long count = watchRecordCount(QueryBuilders.boolQuery() + .must(termsQuery("result.actions.id", "_action")) + .must(termsQuery("result.actions.status", "failure"))); + assertThat(count, is(1L)); + }); + + // now we'll trigger the watch again and make sure that it's not throttled and instead + // writes another record to the history + + // within the 60 minute throttling period + timeWarp().clock().fastForward(TimeValue.timeValueMinutes(randomIntBetween(1, 50))); + timeWarp().trigger("_id"); + + flush(); + + // there should be a single history record with a failure status for the action: + assertBusy(() -> { + long count = watchRecordCount(QueryBuilders.boolQuery() + .must(termsQuery("result.actions.id", "_action")) + .must(termsQuery("result.actions.status", "failure"))); + assertThat(count, is(2L)); + }); + + // now lets confirm that the ack status of the action is awaits_successful_execution + GetWatchResponse getWatchResponse = watcherClient().prepareGetWatch("_id").get(); + XContentSource watch = getWatchResponse.getSource(); + watch.getValue("status.actions._action.ack.awaits_successful_execution"); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/ActionWrapperTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/ActionWrapperTests.java new file mode 100644 index 0000000000000..7754e622d5a6b --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/ActionWrapperTests.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapperResult; +import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.condition.NeverCondition; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.core.watcher.actions.ActionStatus.AckStatus.State; +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ActionWrapperTests extends ESTestCase { + + private DateTime now = DateTime.now(DateTimeZone.UTC); + private Watch watch = mock(Watch.class); + private ExecutableAction executableAction = mock(ExecutableAction.class); + private ActionWrapper actionWrapper = new ActionWrapper("_action", null, NeverCondition.INSTANCE, null, executableAction); + + public void testThatUnmetActionConditionResetsAckStatus() throws Exception { + WatchStatus watchStatus = new WatchStatus(now, Collections.singletonMap("_action", createActionStatus(State.ACKED))); + when(watch.status()).thenReturn(watchStatus); + + ActionWrapperResult result = actionWrapper.execute(mockExecutionContent(watch)); + assertThat(result.condition().met(), is(false)); + assertThat(result.action().status(), is(Action.Result.Status.CONDITION_FAILED)); + assertThat(watch.status().actionStatus("_action").ackStatus().state(), is(State.AWAITS_SUCCESSFUL_EXECUTION)); + } + + public void testOtherActionsAreNotAffectedOnActionConditionReset() throws Exception { + Map statusMap = new HashMap<>(); + statusMap.put("_action", createActionStatus(State.ACKED)); + State otherState = randomFrom(State.ACKABLE, State.AWAITS_SUCCESSFUL_EXECUTION); + statusMap.put("other", createActionStatus(otherState)); + + WatchStatus watchStatus = new WatchStatus(now, statusMap); + when(watch.status()).thenReturn(watchStatus); + + actionWrapper.execute(mockExecutionContent(watch)); + assertThat(watch.status().actionStatus("other").ackStatus().state(), is(otherState)); + } + + private WatchExecutionContext mockExecutionContent(Watch watch) { + WatchExecutionContext ctx = mock(WatchExecutionContext.class); + when(watch.id()).thenReturn("watchId"); + when(ctx.watch()).thenReturn(watch); + when(ctx.skipThrottling(eq("_action"))).thenReturn(true); + return ctx; + } + + private ActionStatus createActionStatus(State state) { + ActionStatus.AckStatus ackStatus = new ActionStatus.AckStatus(now, state); + ActionStatus.Execution execution = ActionStatus.Execution.successful(now); + return new ActionStatus(ackStatus, execution, execution, null); + } +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java new file mode 100644 index 0000000000000..c0be6c1c17f6b --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/TimeThrottleIntegrationTests.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.util.Map; + +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.is; + +@TestLogging("org.elasticsearch.xpack.watcher:DEBUG," + + "org.elasticsearch.xpack.watcher.WatcherLifeCycleService:DEBUG," + + "org.elasticsearch.xpack.watcher.trigger.ScheduleTriggerMock:TRACE," + + "org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") +public class TimeThrottleIntegrationTests extends AbstractWatcherIntegrationTestCase { + + public void testTimeThrottle(){ + String id = randomAlphaOfLength(20); + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch() + .setId(id) + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput()) + .addAction("my-logging-action", loggingAction("foo")) + .defaultThrottlePeriod(TimeValue.timeValueSeconds(30))) + .get(); + assertThat(putWatchResponse.isCreated(), is(true)); + + timeWarp().trigger(id); + assertHistoryEntryExecuted(id); + + timeWarp().clock().fastForward(TimeValue.timeValueMillis(4000)); + timeWarp().trigger(id); + assertHistoryEntryThrottled(id); + + timeWarp().clock().fastForwardSeconds(30); + timeWarp().trigger(id); + assertHistoryEntryExecuted(id); + + assertTotalHistoryEntries(id, 3); + } + + public void testTimeThrottleDefaults() { + String id = randomAlphaOfLength(30); + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch() + .setId(id) + .setSource(watchBuilder() + .trigger(schedule(interval("1s"))) + .input(simpleInput()) + .addAction("my-logging-action", indexAction("my_watcher_index", "action"))) + .get(); + assertThat(putWatchResponse.isCreated(), is(true)); + + timeWarp().trigger(id); + assertHistoryEntryExecuted(id); + + timeWarp().clock().fastForwardSeconds(2); + timeWarp().trigger(id); + assertHistoryEntryThrottled(id); + + timeWarp().clock().fastForwardSeconds(10); + timeWarp().trigger(id); + assertHistoryEntryExecuted(id); + + assertTotalHistoryEntries(id, 3); + } + + private void assertHistoryEntryExecuted(String id) { + Map map = assertLatestHistoryEntry(id); + String actionStatus = ObjectPath.eval("result.actions.0.status", map); + assertThat(actionStatus, is("success")); + } + + private void assertHistoryEntryThrottled(String id) { + Map map = assertLatestHistoryEntry(id); + String actionStatus = ObjectPath.eval("result.actions.0.status", map); + assertThat(actionStatus, is("throttled")); + } + + private Map assertLatestHistoryEntry(String id) { + refresh(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*"); + + SearchResponse searchResponse = client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*") + .setSize(1) + .setSource(new SearchSourceBuilder().query(QueryBuilders.boolQuery() + .must(termQuery("watch_id", id)))) + .addSort(SortBuilders.fieldSort("result.execution_time").order(SortOrder.DESC)) + .get(); + + Map map = searchResponse.getHits().getHits()[0].getSourceAsMap(); + String actionId = ObjectPath.eval("result.actions.0.id", map); + assertThat(actionId, is("my-logging-action")); + return map; + } + + private void assertTotalHistoryEntries(String id, long expectedCount) { + SearchResponse searchResponse = client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*") + .setSize(0) + .setSource(new SearchSourceBuilder().query(QueryBuilders.boolQuery().must(termQuery("watch_id", id)))) + .get(); + + assertHitCount(searchResponse, expectedCount); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionTests.java new file mode 100644 index 0000000000000..83b48cb9f4f0a --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionTests.java @@ -0,0 +1,605 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.email; + +import io.netty.handler.codec.http.HttpHeaders; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.common.secret.Secret; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuthFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.email.Attachment; +import org.elasticsearch.xpack.watcher.notification.email.Authentication; +import org.elasticsearch.xpack.watcher.notification.email.Email; +import org.elasticsearch.xpack.watcher.notification.email.EmailService; +import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; +import org.elasticsearch.xpack.watcher.notification.email.HtmlSanitizer; +import org.elasticsearch.xpack.watcher.notification.email.Profile; +import org.elasticsearch.xpack.watcher.notification.email.attachment.DataAttachmentParser; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentParser; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachments; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentsParser; +import org.elasticsearch.xpack.watcher.notification.email.attachment.HttpEmailAttachementParser; +import org.elasticsearch.xpack.watcher.notification.email.attachment.HttpRequestAttachment; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContextBuilder; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class EmailActionTests extends ESTestCase { + + private HttpAuthRegistry registry = new HttpAuthRegistry(singletonMap("basic", new BasicAuthFactory(null))); + private HttpClient httpClient = mock(HttpClient.class); + private EmailAttachmentsParser emailAttachmentParser; + + @Before + public void addEmailAttachmentParsers() { + Map emailAttachmentParsers = new HashMap<>(); + emailAttachmentParsers.put(HttpEmailAttachementParser.TYPE, new HttpEmailAttachementParser(httpClient, + new HttpRequestTemplate.Parser(registry), new MockTextTemplateEngine())); + emailAttachmentParsers.put(DataAttachmentParser.TYPE, new DataAttachmentParser()); + emailAttachmentParser = new EmailAttachmentsParser(emailAttachmentParsers); + } + + public void testExecute() throws Exception { + final String account = "account1"; + EmailService service = new AbstractWatcherIntegrationTestCase.NoopEmailService(); + TextTemplateEngine engine = mock(TextTemplateEngine.class); + HtmlSanitizer htmlSanitizer = mock(HtmlSanitizer.class); + + EmailTemplate.Builder emailBuilder = EmailTemplate.builder(); + TextTemplate subject = null; + if (randomBoolean()) { + subject = new TextTemplate("_subject"); + emailBuilder.subject(subject); + } + TextTemplate textBody = null; + if (randomBoolean()) { + textBody = new TextTemplate("_text_body"); + emailBuilder.textBody(textBody); + } + TextTemplate htmlBody = null; + if (randomBoolean()) { + htmlBody = new TextTemplate("_html_body"); + emailBuilder.htmlBody(htmlBody); + } + EmailTemplate email = emailBuilder.build(); + + Authentication auth = new Authentication("user", new Secret("passwd".toCharArray())); + Profile profile = randomFrom(Profile.values()); + + org.elasticsearch.xpack.watcher.notification.email.DataAttachment dataAttachment = randomDataAttachment(); + EmailAttachments emailAttachments = randomEmailAttachments(); + + EmailAction action = new EmailAction(email, account, auth, profile, dataAttachment, emailAttachments); + ExecutableEmailAction executable = new ExecutableEmailAction(action, logger, service, engine, htmlSanitizer, + emailAttachmentParser.getParsers()); + + Map data = new HashMap<>(); + Payload payload = new Payload.Simple(data); + + Map metadata = MapBuilder.newMapBuilder().put("_key", "_val").map(); + + DateTime now = DateTime.now(DateTimeZone.UTC); + + Wid wid = new Wid("watch1", now); + WatchExecutionContext ctx = mockExecutionContextBuilder("watch1") + .wid(wid) + .payload(payload) + .time("watch1", now) + .metadata(metadata) + .buildMock(); + + Map triggerModel = new HashMap<>(); + triggerModel.put("triggered_time", now); + triggerModel.put("scheduled_time", now); + Map ctxModel = new HashMap<>(); + ctxModel.put("id", ctx.id().value()); + ctxModel.put("watch_id", "watch1"); + ctxModel.put("payload", data); + ctxModel.put("metadata", metadata); + ctxModel.put("execution_time", now); + ctxModel.put("trigger", triggerModel); + ctxModel.put("vars", emptyMap()); + Map expectedModel = singletonMap("ctx", ctxModel); + + if (subject != null) { + when(engine.render(subject, expectedModel)).thenReturn(subject.getTemplate()); + } + if (textBody != null) { + when(engine.render(textBody, expectedModel)).thenReturn(textBody.getTemplate()); + } + if (htmlBody != null) { + when(htmlSanitizer.sanitize(htmlBody.getTemplate())).thenReturn(htmlBody.getTemplate()); + when(engine.render(htmlBody, expectedModel)).thenReturn(htmlBody.getTemplate()); + } + + Action.Result result = executable.execute("_id", ctx, payload); + + assertThat(result, notNullValue()); + assertThat(result, instanceOf(EmailAction.Result.Success.class)); + assertThat(((EmailAction.Result.Success) result).account(), equalTo(account)); + Email actualEmail = ((EmailAction.Result.Success) result).email(); + assertThat(actualEmail.id(), is("_id_" + wid.value())); + assertThat(actualEmail, notNullValue()); + assertThat(actualEmail.subject(), is(subject == null ? null : subject.getTemplate())); + assertThat(actualEmail.textBody(), is(textBody == null ? null : textBody.getTemplate())); + assertThat(actualEmail.htmlBody(), is(htmlBody == null ? null : htmlBody.getTemplate())); + if (dataAttachment != null) { + assertThat(actualEmail.attachments(), hasKey("data")); + } + } + + public void testParser() throws Exception { + TextTemplateEngine engine = mock(TextTemplateEngine.class); + EmailService emailService = mock(EmailService.class); + Profile profile = randomFrom(Profile.values()); + Email.Priority priority = randomFrom(Email.Priority.values()); + Email.Address[] to = rarely() ? null : Email.AddressList.parse(randomBoolean() ? "to@domain" : "to1@domain,to2@domain").toArray(); + Email.Address[] cc = rarely() ? null : Email.AddressList.parse(randomBoolean() ? "cc@domain" : "cc1@domain,cc2@domain").toArray(); + Email.Address[] bcc = rarely() ? null : Email.AddressList.parse( + randomBoolean() ? "bcc@domain" : "bcc1@domain,bcc2@domain").toArray(); + Email.Address[] replyTo = rarely() ? null : Email.AddressList.parse( + randomBoolean() ? "reply@domain" : "reply1@domain,reply2@domain").toArray(); + TextTemplate subject = randomBoolean() ? new TextTemplate("_subject") : null; + TextTemplate textBody = randomBoolean() ? new TextTemplate("_text_body") : null; + TextTemplate htmlBody = randomBoolean() ? new TextTemplate("_text_html") : null; + org.elasticsearch.xpack.watcher.notification.email.DataAttachment dataAttachment = randomDataAttachment(); + XContentBuilder builder = jsonBuilder().startObject() + .field("account", "_account") + .field("profile", profile.name()) + .field("user", "_user") + .field("password", "_passwd") + .field("from", "from@domain") + .field("priority", priority.name()); + if (dataAttachment != null) { + builder.field("attach_data", dataAttachment); + } else if (randomBoolean()) { + dataAttachment = org.elasticsearch.xpack.watcher.notification.email.DataAttachment.DEFAULT; + builder.field("attach_data", true); + } else if (randomBoolean()) { + builder.field("attach_data", false); + } + + if (to != null) { + if (to.length == 1) { + builder.field("to", to[0]); + } else { + builder.array("to", (Object[]) to); + } + } + if (cc != null) { + if (cc.length == 1) { + builder.field("cc", cc[0]); + } else { + builder.array("cc", (Object[]) cc); + } + } + if (bcc != null) { + if (bcc.length == 1) { + builder.field("bcc", bcc[0]); + } else { + builder.array("bcc", (Object[]) bcc); + } + } + if (replyTo != null) { + if (replyTo.length == 1) { + builder.field("reply_to", replyTo[0]); + } else { + builder.array("reply_to", (Object[]) replyTo); + } + } + if (subject != null) { + if (randomBoolean()) { + builder.field("subject", subject.getTemplate()); + } else { + builder.field("subject", subject); + } + } + if (textBody != null && htmlBody == null) { + if (randomBoolean()) { + builder.field("body", textBody.getTemplate()); + } else { + builder.startObject("body"); + if (randomBoolean()) { + builder.field("text", textBody.getTemplate()); + } else { + builder.field("text", textBody); + } + builder.endObject(); + } + } else if (textBody != null || htmlBody != null) { + builder.startObject("body"); + if (textBody != null) { + if (randomBoolean()) { + builder.field("text", textBody.getTemplate()); + } else { + builder.field("text", textBody); + } + } + if (htmlBody != null) { + if (randomBoolean()) { + builder.field("html", htmlBody.getTemplate()); + } else { + builder.field("html", htmlBody); + } + } + builder.endObject(); + } + builder.endObject(); + + BytesReference bytes = BytesReference.bytes(builder); + logger.info("email action json [{}]", bytes.utf8ToString()); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + + ExecutableEmailAction executable = new EmailActionFactory(Settings.EMPTY, emailService, engine, + emailAttachmentParser) + .parseExecutable(randomAlphaOfLength(8), randomAlphaOfLength(3), parser); + + assertThat(executable, notNullValue()); + assertThat(executable.action().getAccount(), is("_account")); + if (dataAttachment == null) { + assertThat(executable.action().getDataAttachment(), nullValue()); + } else { + assertThat(executable.action().getDataAttachment(), is(dataAttachment)); + } + assertThat(executable.action().getAuth(), notNullValue()); + assertThat(executable.action().getAuth().user(), is("_user")); + assertThat(executable.action().getAuth().password(), is(new Secret("_passwd".toCharArray()))); + assertThat(executable.action().getEmail().priority(), is(new TextTemplate(priority.name()))); + if (to != null) { + assertThat(executable.action().getEmail().to(), arrayContainingInAnyOrder(addressesToTemplates(to))); + } else { + assertThat(executable.action().getEmail().to(), nullValue()); + } + if (cc != null) { + assertThat(executable.action().getEmail().cc(), arrayContainingInAnyOrder(addressesToTemplates(cc))); + } else { + assertThat(executable.action().getEmail().cc(), nullValue()); + } + if (bcc != null) { + assertThat(executable.action().getEmail().bcc(), arrayContainingInAnyOrder(addressesToTemplates(bcc))); + } else { + assertThat(executable.action().getEmail().bcc(), nullValue()); + } + if (replyTo != null) { + assertThat(executable.action().getEmail().replyTo(), arrayContainingInAnyOrder(addressesToTemplates(replyTo))); + } else { + assertThat(executable.action().getEmail().replyTo(), nullValue()); + } + } + + private static TextTemplate[] addressesToTemplates(Email.Address[] addresses) { + TextTemplate[] templates = new TextTemplate[addresses.length]; + for (int i = 0; i < templates.length; i++) { + templates[i] = new TextTemplate(addresses[i].toString()); + } + return templates; + } + + public void testParserSelfGenerated() throws Exception { + EmailService service = mock(EmailService.class); + TextTemplateEngine engine = mock(TextTemplateEngine.class); + HtmlSanitizer htmlSanitizer = mock(HtmlSanitizer.class); + EmailTemplate.Builder emailTemplate = EmailTemplate.builder(); + if (randomBoolean()) { + emailTemplate.from("from@domain"); + } + if (randomBoolean()) { + emailTemplate.to(randomBoolean() ? "to@domain" : "to1@domain,to2@domain"); + } + if (randomBoolean()) { + emailTemplate.cc(randomBoolean() ? "cc@domain" : "cc1@domain,cc2@domain"); + } + if (randomBoolean()) { + emailTemplate.bcc(randomBoolean() ? "bcc@domain" : "bcc1@domain,bcc2@domain"); + } + if (randomBoolean()) { + emailTemplate.replyTo(randomBoolean() ? "reply@domain" : "reply1@domain,reply2@domain"); + } + if (randomBoolean()) { + emailTemplate.subject("_subject"); + } + if (randomBoolean()) { + emailTemplate.textBody("_text_body"); + } + if (randomBoolean()) { + emailTemplate.htmlBody("_html_body"); + } + EmailTemplate email = emailTemplate.build(); + Authentication auth = randomBoolean() ? null : new Authentication("_user", new Secret("_passwd".toCharArray())); + Profile profile = randomFrom(Profile.values()); + String account = randomAlphaOfLength(6); + org.elasticsearch.xpack.watcher.notification.email.DataAttachment dataAttachment = randomDataAttachment(); + EmailAttachments emailAttachments = randomEmailAttachments(); + + EmailAction action = new EmailAction(email, account, auth, profile, dataAttachment, emailAttachments); + ExecutableEmailAction executable = new ExecutableEmailAction(action, logger, service, engine, htmlSanitizer, + emailAttachmentParser.getParsers()); + + boolean hideSecrets = randomBoolean(); + ToXContent.Params params = WatcherParams.builder().hideSecrets(hideSecrets).build(); + + XContentBuilder builder = jsonBuilder(); + executable.toXContent(builder, params); + BytesReference bytes = BytesReference.bytes(builder); + logger.info("{}", bytes.utf8ToString()); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + + ExecutableEmailAction parsed = new EmailActionFactory(Settings.EMPTY, service, engine, emailAttachmentParser) + .parseExecutable(randomAlphaOfLength(4), randomAlphaOfLength(10), parser); + + if (!hideSecrets) { + assertThat(parsed, equalTo(executable)); + } else { + assertThat(parsed.action().getAccount(), is(executable.action().getAccount())); + assertThat(parsed.action().getEmail(), is(executable.action().getEmail())); + if (executable.action().getDataAttachment() == null) { + assertThat(parsed.action().getDataAttachment(), nullValue()); + } else { + assertThat(parsed.action().getDataAttachment(), is(executable.action().getDataAttachment())); + } + if (auth != null) { + assertThat(parsed.action().getAuth().user(), is(executable.action().getAuth().user())); + assertThat(parsed.action().getAuth().password(), notNullValue()); + assertThat(parsed.action().getAuth().password().value(), startsWith("::es_redacted::")); + assertThat(executable.action().getAuth().password(), notNullValue()); + } + } + } + + public void testParserInvalid() throws Exception { + EmailService emailService = mock(EmailService.class); + TextTemplateEngine engine = mock(TextTemplateEngine.class); + EmailAttachmentsParser emailAttachmentsParser = mock(EmailAttachmentsParser.class); + + XContentBuilder builder = jsonBuilder().startObject().field("unknown_field", "value").endObject(); + XContentParser parser = createParser(builder); + parser.nextToken(); + try { + new EmailActionFactory(Settings.EMPTY, emailService, engine, emailAttachmentsParser) + .parseExecutable(randomAlphaOfLength(3), randomAlphaOfLength(7), parser); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("unexpected string field [unknown_field]")); + } + } + + public void testRequestAttachmentGetsAppendedToEmailAttachments() throws Exception { + String attachmentId = "my_attachment"; + + // setup mock response + Map headers = new HashMap<>(1); + headers.put(HttpHeaders.Names.CONTENT_TYPE, new String[]{"plain/text"}); + String content = "My wonderful text"; + HttpResponse mockResponse = new HttpResponse(200, content, headers); + when(httpClient.execute(any(HttpRequest.class))).thenReturn(mockResponse); + + XContentBuilder builder = jsonBuilder().startObject() + .startObject("attachments") + // http attachment + .startObject(attachmentId) + .startObject("http") + .startObject("request") + .field("host", "localhost") + .field("port", 443) + .field("path", "/the/evil/test") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + XContentParser parser = createParser(builder); + logger.info("JSON: {}", Strings.toString(builder)); + + parser.nextToken(); + + EmailActionFactory emailActionFactory = createEmailActionFactory(); + ExecutableEmailAction executableEmailAction = + emailActionFactory.parseExecutable(randomAlphaOfLength(3), randomAlphaOfLength(7), parser); + + Action.Result result = executableEmailAction.execute("test", createWatchExecutionContext(), new Payload.Simple()); + assertThat(result, instanceOf(EmailAction.Result.Success.class)); + + EmailAction.Result.Success successResult = (EmailAction.Result.Success) result; + Map attachments = successResult.email().attachments(); + assertThat(attachments.keySet(), hasSize(1)); + assertThat(attachments, hasKey(attachmentId)); + + Attachment externalAttachment = attachments.get(attachmentId); + assertThat(externalAttachment.bodyPart(), is(notNullValue())); + InputStream is = externalAttachment.bodyPart().getInputStream(); + String data = Streams.copyToString(new InputStreamReader(is, StandardCharsets.UTF_8)); + assertThat(data, is(content)); + } + + public void testThatDataAttachmentGetsAttachedWithId() throws Exception { + String attachmentId = randomAlphaOfLength(10) + ".yml"; + + XContentBuilder builder = jsonBuilder().startObject() + .startObject("attachments") + .startObject(attachmentId) + .startObject("data") + .endObject() + .endObject() + .endObject() + .endObject(); + XContentParser parser = createParser(builder); + logger.info("JSON: {}", Strings.toString(builder)); + + parser.nextToken(); + + EmailActionFactory emailActionFactory = createEmailActionFactory(); + ExecutableEmailAction executableEmailAction = + emailActionFactory.parseExecutable(randomAlphaOfLength(3), randomAlphaOfLength(7), parser); + + Action.Result result = executableEmailAction.execute("test", createWatchExecutionContext(), new Payload.Simple()); + assertThat(result, instanceOf(EmailAction.Result.Success.class)); + + EmailAction.Result.Success successResult = (EmailAction.Result.Success) result; + Map attachments = successResult.email().attachments(); + + assertThat(attachments, hasKey(attachmentId)); + Attachment dataAttachment = attachments.get(attachmentId); + assertThat(dataAttachment.name(), is(attachmentId)); + assertThat(dataAttachment.type(), is("yaml")); + assertThat(dataAttachment.contentType(), is("application/yaml")); + } + + public void testThatOneFailedEmailAttachmentResultsInActionFailure() throws Exception { + EmailService emailService = new AbstractWatcherIntegrationTestCase.NoopEmailService(); + TextTemplateEngine engine = new MockTextTemplateEngine(); + HttpClient httpClient = mock(HttpClient.class); + + // setup mock response, second one is an error + Map headers = new HashMap<>(1); + headers.put(HttpHeaders.Names.CONTENT_TYPE, new String[]{"plain/text"}); + when(httpClient.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(200, "body", headers)) + .thenReturn(new HttpResponse(403)); + + // setup email attachment parsers + HttpRequestTemplate.Parser httpRequestTemplateParser = new HttpRequestTemplate.Parser(registry); + Map attachmentParsers = new HashMap<>(); + attachmentParsers.put(HttpEmailAttachementParser.TYPE, new HttpEmailAttachementParser(httpClient, httpRequestTemplateParser, + engine)); + EmailAttachmentsParser emailAttachmentsParser = new EmailAttachmentsParser(attachmentParsers); + + XContentBuilder builder = jsonBuilder().startObject() + .startObject("attachments") + .startObject("first") + .startObject("http") + .startObject("request").field("url", "http://localhost/first").endObject() + .endObject() + .endObject() + .startObject("second") + .startObject("http") + .startObject("request").field("url", "http://localhost/second").endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + XContentParser parser = createParser(builder); + + parser.nextToken(); + + ExecutableEmailAction executableEmailAction = new EmailActionFactory(Settings.EMPTY, emailService, engine, + emailAttachmentsParser).parseExecutable(randomAlphaOfLength(3), randomAlphaOfLength(7), parser); + + DateTime now = DateTime.now(DateTimeZone.UTC); + Wid wid = new Wid(randomAlphaOfLength(5), now); + Map metadata = MapBuilder.newMapBuilder().put("_key", "_val").map(); + WatchExecutionContext ctx = mockExecutionContextBuilder("watch1") + .wid(wid) + .payload(new Payload.Simple()) + .time("watch1", now) + .metadata(metadata) + .buildMock(); + + Action.Result result = executableEmailAction.execute("test", ctx, new Payload.Simple()); + assertThat(result, instanceOf(EmailAction.Result.FailureWithException.class)); + EmailAction.Result.FailureWithException failure = (EmailAction.Result.FailureWithException) result; + assertThat(failure.getException().getMessage(), + is("Watch[watch1] attachment[second] HTTP error status host[localhost], port[80], method[GET], path[/second], " + + "status[403]")); + } + + private EmailActionFactory createEmailActionFactory() { + EmailService emailService = new AbstractWatcherIntegrationTestCase.NoopEmailService(); + TextTemplateEngine engine = mock(TextTemplateEngine.class); + + return new EmailActionFactory(Settings.EMPTY, emailService, engine, emailAttachmentParser); + } + + private WatchExecutionContext createWatchExecutionContext() { + DateTime now = DateTime.now(DateTimeZone.UTC); + Wid wid = new Wid(randomAlphaOfLength(5), now); + Map metadata = MapBuilder.newMapBuilder().put("_key", "_val").map(); + return mockExecutionContextBuilder("watch1") + .wid(wid) + .payload(new Payload.Simple()) + .time("watch1", now) + .metadata(metadata) + .buildMock(); + } + + static org.elasticsearch.xpack.watcher.notification.email.DataAttachment randomDataAttachment() { + return randomFrom(org.elasticsearch.xpack.watcher.notification.email.DataAttachment.JSON, + org.elasticsearch.xpack.watcher.notification.email.DataAttachment.YAML, null); + } + + private EmailAttachments randomEmailAttachments() throws IOException { + List attachments = new ArrayList<>(); + + String attachmentType = randomFrom("http", "data", null); + if ("http".equals(attachmentType)) { + Map headers = new HashMap<>(1); + headers.put(HttpHeaders.Names.CONTENT_TYPE, new String[]{"plain/text"}); + String content = "My wonderful text"; + HttpResponse mockResponse = new HttpResponse(200, content, headers); + when(httpClient.execute(any(HttpRequest.class))).thenReturn(mockResponse); + + HttpRequestTemplate template = HttpRequestTemplate.builder("localhost", 1234).build(); + attachments.add(new HttpRequestAttachment(randomAlphaOfLength(10), template, + randomBoolean(), randomFrom("my/custom-type", null))); + } else if ("data".equals(attachmentType)) { + attachments.add(new org.elasticsearch.xpack.watcher.notification.email.attachment.DataAttachment(randomAlphaOfLength(10), + randomFrom(org.elasticsearch.xpack.watcher.notification.email.DataAttachment.JSON, org.elasticsearch.xpack.watcher + .notification.email.DataAttachment.YAML))); + } + + return new EmailAttachments(attachments); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailAttachmentTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailAttachmentTests.java new file mode 100644 index 0000000000000..88f837d714024 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailAttachmentTests.java @@ -0,0 +1,197 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.email; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.Scheme; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; +import org.elasticsearch.xpack.watcher.notification.email.attachment.DataAttachment; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentParser; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachments; +import org.elasticsearch.xpack.watcher.notification.email.attachment.HttpRequestAttachment; +import org.elasticsearch.xpack.watcher.notification.email.support.EmailServer; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; +import org.junit.After; + +import javax.mail.BodyPart; +import javax.mail.Multipart; +import javax.mail.Part; +import javax.mail.internet.MimeMessage; + +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.emailAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.noneInput; +import static org.elasticsearch.xpack.watcher.notification.email.DataAttachment.JSON; +import static org.elasticsearch.xpack.watcher.notification.email.DataAttachment.YAML; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.startsWith; + +public class EmailAttachmentTests extends AbstractWatcherIntegrationTestCase { + + private MockWebServer webServer = new MockWebServer(); + private MockResponse mockResponse = new MockResponse().setResponseCode(200) + .addHeader("Content-Type", "application/foo").setBody("This is the content"); + private EmailServer server; + + @Override + public void setUp() throws Exception { + super.setUp(); + webServer.enqueue(mockResponse); + webServer.start(); + + server = EmailServer.localhost(logger); + } + + @After + public void cleanup() throws Exception { + server.stop(); + webServer.close(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("xpack.notification.email.account.test.smtp.auth", true) + .put("xpack.notification.email.account.test.smtp.user", EmailServer.USERNAME) + .put("xpack.notification.email.account.test.smtp.password", EmailServer.PASSWORD) + .put("xpack.notification.email.account.test.smtp.port", server.port()) + .put("xpack.notification.email.account.test.smtp.host", "localhost") + .build(); + } + + public List getAttachments(MimeMessage message) throws Exception { + Object content = message.getContent(); + if (content instanceof String) + return null; + + if (content instanceof Multipart) { + Multipart multipart = (Multipart) content; + List result = new ArrayList<>(); + + for (int i = 0; i < multipart.getCount(); i++) { + result.addAll(getAttachments(multipart.getBodyPart(i))); + } + return result; + + } + return null; + } + + private List getAttachments(BodyPart part) throws Exception { + List result = new ArrayList<>(); + Object content = part.getContent(); + if (content instanceof InputStream || content instanceof String) { + if (Part.ATTACHMENT.equalsIgnoreCase(part.getDisposition()) || Strings.hasLength(part.getFileName())) { + result.add(Streams.copyToString(new InputStreamReader(part.getInputStream(), StandardCharsets.UTF_8))); + return result; + } else { + return new ArrayList<>(); + } + } + + if (content instanceof Multipart) { + Multipart multipart = (Multipart) content; + for (int i = 0; i < multipart.getCount(); i++) { + BodyPart bodyPart = multipart.getBodyPart(i); + result.addAll(getAttachments(bodyPart)); + } + } + return result; + } + + public void testThatEmailAttachmentsAreSent() throws Exception { + org.elasticsearch.xpack.watcher.notification.email.DataAttachment dataFormat = randomFrom(JSON, YAML); + final CountDownLatch latch = new CountDownLatch(1); + server.addListener(message -> { + assertThat(message.getSubject(), equalTo("Subject")); + List attachments = getAttachments(message); + if (dataFormat == YAML) { + assertThat(attachments, hasItem(allOf(startsWith("---"), containsString("_test_id")))); + } else { + assertThat(attachments, hasItem(allOf(startsWith("{"), containsString("_test_id")))); + } + assertThat(attachments, hasItem(containsString("This is the content"))); + latch.countDown(); + }); + + WatcherClient watcherClient = watcherClient(); + createIndex("idx"); + // Have a sample document in the index, the watch is going to evaluate + client().prepareIndex("idx", "type").setSource("field", "value").get(); + refresh(); + + List attachments = new ArrayList<>(); + + DataAttachment dataAttachment = DataAttachment.builder("my-id").dataAttachment(dataFormat).build(); + attachments.add(dataAttachment); + + HttpRequestTemplate requestTemplate = HttpRequestTemplate.builder("localhost", webServer.getPort()) + .path("/").scheme(Scheme.HTTP).build(); + HttpRequestAttachment httpRequestAttachment = HttpRequestAttachment.builder("other-id") + .httpRequestTemplate(requestTemplate).build(); + + attachments.add(httpRequestAttachment); + EmailAttachments emailAttachments = new EmailAttachments(attachments); + XContentBuilder tmpBuilder = jsonBuilder(); + tmpBuilder.startObject(); + emailAttachments.toXContent(tmpBuilder, ToXContent.EMPTY_PARAMS); + tmpBuilder.endObject(); + + EmailTemplate.Builder emailBuilder = EmailTemplate.builder().from("_from").to("_to").subject("Subject"); + WatchSourceBuilder watchSourceBuilder = watchBuilder() + .trigger(schedule(interval(5, IntervalSchedule.Interval.Unit.SECONDS))) + .input(noneInput()) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_email", emailAction(emailBuilder).setAuthentication(EmailServer.USERNAME, EmailServer.PASSWORD.toCharArray()) + .setAttachments(emailAttachments)); + + watcherClient.preparePutWatch("_test_id") + .setSource(watchSourceBuilder) + .get(); + + timeWarp().trigger("_test_id"); + refresh(); + + SearchResponse searchResponse = client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*") + .setQuery(QueryBuilders.termQuery("watch_id", "_test_id")) + .execute().actionGet(); + assertHitCount(searchResponse, 1); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("waited too long for email to be received"); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailMessageIdTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailMessageIdTests.java new file mode 100644 index 0000000000000..9051f50e62b85 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailMessageIdTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.email; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.email.EmailService; +import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; +import org.elasticsearch.xpack.watcher.notification.email.HtmlSanitizer; +import org.elasticsearch.xpack.watcher.notification.email.support.EmailServer; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.junit.After; +import org.junit.Before; + +import javax.mail.internet.MimeMessage; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.hamcrest.Matchers.hasSize; + +public class EmailMessageIdTests extends ESTestCase { + + private EmailServer server; + private TextTemplateEngine textTemplateEngine = new MockTextTemplateEngine(); + private HtmlSanitizer htmlSanitizer = new HtmlSanitizer(Settings.EMPTY); + private EmailService emailService; + private EmailAction emailAction; + + @Before + public void startSmtpServer() { + server = EmailServer.localhost(logger); + + Settings settings = Settings.builder() + .put("xpack.notification.email.account.test.smtp.auth", true) + .put("xpack.notification.email.account.test.smtp.user", EmailServer.USERNAME) + .put("xpack.notification.email.account.test.smtp.password", EmailServer.PASSWORD) + .put("xpack.notification.email.account.test.smtp.port", server.port()) + .put("xpack.notification.email.account.test.smtp.host", "localhost") + .build(); + + Set> registeredSettings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + registeredSettings.addAll(EmailService.getSettings()); + ClusterSettings clusterSettings = new ClusterSettings(settings, registeredSettings); + emailService = new EmailService(settings, null, clusterSettings); + EmailTemplate emailTemplate = EmailTemplate.builder().from("from@example.org").to("to@example.org") + .subject("subject").textBody("body").build(); + emailAction = new EmailAction(emailTemplate, null, null, null, null, null); + } + + @After + public void stopSmtpServer() { + server.stop(); + } + + public void testThatMessageIdIsUnique() throws Exception { + List messages = new ArrayList<>(); + server.addListener(messages::add); + ExecutableEmailAction firstEmailAction = new ExecutableEmailAction(emailAction, logger, emailService, textTemplateEngine, + htmlSanitizer, Collections.emptyMap()); + ExecutableEmailAction secondEmailAction = new ExecutableEmailAction(emailAction, logger, emailService, textTemplateEngine, + htmlSanitizer, Collections.emptyMap()); + + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + firstEmailAction.execute("my_first_action_id", ctx, Payload.EMPTY); + secondEmailAction.execute("my_second_action_id", ctx, Payload.EMPTY); + + assertThat(messages, hasSize(2)); + // check for unique message ids, should be two as well + Set messageIds = new HashSet<>(); + for (MimeMessage message : messages) { + messageIds.add(message.getMessageID()); + } + assertThat(messageIds, hasSize(2)); + } +} + diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionFactoryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionFactoryTests.java new file mode 100644 index 0000000000000..363a0c498c176 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionFactoryTests.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.hipchat; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatAccount; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashSet; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.hipchatAction; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class HipChatActionFactoryTests extends ESTestCase { + private HipChatActionFactory factory; + private HipChatService hipchatService; + + @Before + public void init() throws Exception { + hipchatService = mock(HipChatService.class); + factory = new HipChatActionFactory(Settings.EMPTY, mock(TextTemplateEngine.class), hipchatService); + } + + public void testParseAction() throws Exception { + HipChatAccount account = mock(HipChatAccount.class); + when(hipchatService.getAccount("_account1")).thenReturn(account); + + HipChatAction action = hipchatAction("_account1", "_body").build(); + XContentBuilder jsonBuilder = jsonBuilder().value(action); + XContentParser parser = createParser(jsonBuilder); + parser.nextToken(); + + ExecutableHipChatAction parsedAction = factory.parseExecutable("_w1", "_a1", parser); + assertThat(parsedAction.action(), is(action)); + + verify(account, times(1)).validateParsedTemplate("_w1", "_a1", action.message); + } + + public void testParseActionUnknownAccount() throws Exception { + hipchatService = new HipChatService(Settings.EMPTY, null, new ClusterSettings(Settings.EMPTY, + new HashSet<>(HipChatService.getSettings()))); + factory = new HipChatActionFactory(Settings.EMPTY, mock(TextTemplateEngine.class), hipchatService); + HipChatAction action = hipchatAction("_unknown", "_body").build(); + XContentBuilder jsonBuilder = jsonBuilder().value(action); + XContentParser parser = createParser(jsonBuilder); + parser.nextToken(); + expectThrows(IllegalArgumentException.class, () -> factory.parseExecutable("_w1", "_a1", parser)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionTests.java new file mode 100644 index 0000000000000..eef175c4ff5d0 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/hipchat/HipChatActionTests.java @@ -0,0 +1,294 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.hipchat; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatAccount; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatMessage; +import org.elasticsearch.xpack.watcher.notification.hipchat.HipChatService; +import org.elasticsearch.xpack.watcher.notification.hipchat.SentMessages; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContextBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class HipChatActionTests extends ESTestCase { + private HipChatService service; + + @Before + public void init() throws Exception { + service = mock(HipChatService.class); + } + + public void testExecute() throws Exception { + final String accountName = "account1"; + + TextTemplateEngine templateEngine = mock(TextTemplateEngine.class); + + TextTemplate body = new TextTemplate("_body"); + HipChatMessage.Template.Builder messageBuilder = new HipChatMessage.Template.Builder(body); + + HipChatMessage.Template messageTemplate = messageBuilder.build(); + + HipChatAction action = new HipChatAction(accountName, messageTemplate, null); + ExecutableHipChatAction executable = new ExecutableHipChatAction(action, logger, service, templateEngine); + + Map data = new HashMap<>(); + Payload payload = new Payload.Simple(data); + + Map metadata = MapBuilder.newMapBuilder().put("_key", "_val").map(); + + DateTime now = DateTime.now(DateTimeZone.UTC); + + Wid wid = new Wid(randomAlphaOfLength(5), now); + WatchExecutionContext ctx = mockExecutionContextBuilder(wid.watchId()) + .wid(wid) + .payload(payload) + .time(wid.watchId(), now) + .metadata(metadata) + .buildMock(); + + Map triggerModel = new HashMap<>(); + triggerModel.put("triggered_time", now); + triggerModel.put("scheduled_time", now); + Map ctxModel = new HashMap<>(); + ctxModel.put("id", ctx.id().value()); + ctxModel.put("watch_id", wid.watchId()); + ctxModel.put("payload", data); + ctxModel.put("metadata", metadata); + ctxModel.put("execution_time", now); + ctxModel.put("trigger", triggerModel); + ctxModel.put("vars", Collections.emptyMap()); + Map expectedModel = singletonMap("ctx", ctxModel); + + if (body != null) { + when(templateEngine.render(body, expectedModel)).thenReturn(body.getTemplate()); + } + + String[] rooms = new String[] { "_r1" }; + HipChatMessage message = new HipChatMessage(body.getTemplate(), rooms, null, null, null, null, null); + HipChatAccount account = mock(HipChatAccount.class); + when(account.render(wid.watchId(), "_id", templateEngine, messageTemplate, expectedModel)).thenReturn(message); + boolean responseFailure = randomBoolean(); + HttpResponse response = new HttpResponse(responseFailure ? 404 : 200); + HttpRequest request = HttpRequest.builder("localhost", 12345).path("/").build(); + SentMessages sentMessages = new SentMessages(accountName, Arrays.asList( + SentMessages.SentMessage.responded("_r1", SentMessages.SentMessage.TargetType.ROOM, message, request, response) + )); + when(account.send(message, null)).thenReturn(sentMessages); + when(service.getAccount(accountName)).thenReturn(account); + + Action.Result result = executable.execute("_id", ctx, payload); + + assertThat(result, notNullValue()); + assertThat(result, instanceOf(HipChatAction.Result.Executed.class)); + if (responseFailure) { + assertThat(result.status(), equalTo(Action.Result.Status.FAILURE)); + } else { + assertThat(result.status(), equalTo(Action.Result.Status.SUCCESS)); + } + assertThat(((HipChatAction.Result.Executed) result).sentMessages(), sameInstance(sentMessages)); + assertValidToXContent(result); + } + + public void testParser() throws Exception { + XContentBuilder builder = jsonBuilder().startObject(); + + String accountName = randomAlphaOfLength(10); + builder.field("account", accountName); + builder.startObject("message"); + + TextTemplate body = new TextTemplate("_body"); + builder.field("body", body); + + TextTemplate[] rooms = null; + if (randomBoolean()) { + TextTemplate r1 = new TextTemplate("_r1"); + TextTemplate r2 = new TextTemplate("_r2"); + rooms = new TextTemplate[] { r1, r2 }; + builder.array("room", r1, r2); + } + TextTemplate[] users = null; + if (randomBoolean()) { + TextTemplate u1 = new TextTemplate("_u1"); + TextTemplate u2 = new TextTemplate("_u2"); + users = new TextTemplate[] { u1, u2 }; + builder.array("user", u1, u2); + } + String from = null; + if (randomBoolean()) { + from = randomAlphaOfLength(10); + builder.field("from", from); + } + HipChatMessage.Format format = null; + if (randomBoolean()) { + format = randomFrom(HipChatMessage.Format.values()); + builder.field("format", format.value()); + } + TextTemplate color = null; + if (randomBoolean()) { + color = new TextTemplate(randomFrom(HipChatMessage.Color.values()).value()); + builder.field("color", color); + } + Boolean notify = null; + if (randomBoolean()) { + notify = randomBoolean(); + builder.field("notify", notify); + } + builder.endObject(); + HttpProxy proxy = null; + if (randomBoolean()) { + proxy = new HttpProxy("localhost", 8080); + builder.startObject("proxy").field("host", "localhost").field("port", 8080).endObject(); + } + builder.endObject(); + + BytesReference bytes = BytesReference.bytes(builder); + logger.info("hipchat action json [{}]", bytes.utf8ToString()); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + + HipChatAction action = HipChatAction.parse("_watch", "_action", parser); + + assertThat(action, notNullValue()); + assertThat(action.account, is(accountName)); + assertThat(action.proxy, is(proxy)); + assertThat(action.message, notNullValue()); + assertThat(action.message, is(new HipChatMessage.Template(body, rooms, users, from, format, color, notify))); + } + + public void testParserSelfGenerated() throws Exception { + String accountName = randomAlphaOfLength(10); + TextTemplate body = new TextTemplate("_body"); + HipChatMessage.Template.Builder templateBuilder = new HipChatMessage.Template.Builder(body); + + XContentBuilder builder = jsonBuilder().startObject(); + builder.field("account", accountName); + + HttpProxy proxy = null; + if (randomBoolean()) { + proxy = new HttpProxy("localhost", 8080); + builder.startObject("proxy").field("host", "localhost").field("port", 8080).endObject(); + } + + builder.startObject("message"); + builder.field("body", body); + + if (randomBoolean()) { + TextTemplate r1 = new TextTemplate("_r1"); + TextTemplate r2 = new TextTemplate("_r2"); + templateBuilder.addRooms(r1, r2); + builder.array("room", r1, r2); + } + if (randomBoolean()) { + TextTemplate u1 = new TextTemplate("_u1"); + TextTemplate u2 = new TextTemplate("_u2"); + templateBuilder.addUsers(u1, u2); + builder.array("user", u1, u2); + } + if (randomBoolean()) { + String from = randomAlphaOfLength(10); + templateBuilder.setFrom(from); + builder.field("from", from); + } + if (randomBoolean()) { + HipChatMessage.Format format = randomFrom(HipChatMessage.Format.values()); + templateBuilder.setFormat(format); + builder.field("format", format.value()); + } + if (randomBoolean()) { + TextTemplate color = new TextTemplate(randomFrom(HipChatMessage.Color.values()).value()); + templateBuilder.setColor(color); + builder.field("color", color); + } + if (randomBoolean()) { + boolean notify = randomBoolean(); + templateBuilder.setNotify(notify); + builder.field("notify", notify); + } + + builder.endObject(); + builder.endObject(); + + HipChatMessage.Template template = templateBuilder.build(); + + HipChatAction action = new HipChatAction(accountName, template, proxy); + + XContentBuilder jsonBuilder = jsonBuilder(); + action.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); + BytesReference bytes = BytesReference.bytes(builder); + logger.info("{}", bytes.utf8ToString()); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + + HipChatAction parsedAction = HipChatAction.parse("_watch", "_action", parser); + + assertThat(parsedAction, notNullValue()); + assertThat(parsedAction, is(action)); + } + + public void testParserInvalid() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().field("unknown_field", "value").endObject(); + XContentParser parser = createParser(builder); + parser.nextToken(); + try { + HipChatAction.parse("_watch", "_action", parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("failed to parse [hipchat] action [_watch/_action]. unexpected token [VALUE_STRING]")); + } + } + + // ensure that toXContent can be serialized and read again + private void assertValidToXContent(Action.Result result) throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + result.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + Strings.toString(builder); + try (XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, Strings.toString(builder))) { + parser.map(); + } + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java new file mode 100644 index 0000000000000..346b2d0e85cdf --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java @@ -0,0 +1,364 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.index; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.Action.Result.Status; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.joda.time.DateTime; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static java.util.Collections.unmodifiableSet; +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import static org.elasticsearch.common.util.set.Sets.newHashSet; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; +import static org.joda.time.DateTimeZone.UTC; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IndexActionTests extends ESTestCase { + + private RefreshPolicy refreshPolicy = randomBoolean() ? null : randomFrom(RefreshPolicy.values()); + + private final Client client = mock(Client.class); + + @Before + public void setupClient() { + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + when(client.threadPool()).thenReturn(threadPool); + } + + public void testParser() throws Exception { + String timestampField = randomBoolean() ? "@timestamp" : null; + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + boolean includeIndex = randomBoolean(); + if (includeIndex) { + builder.field(IndexAction.Field.INDEX.getPreferredName(), "test-index"); + } + builder.field(IndexAction.Field.DOC_TYPE.getPreferredName(), "test-type"); + if (timestampField != null) { + builder.field(IndexAction.Field.EXECUTION_TIME_FIELD.getPreferredName(), timestampField); + } + TimeValue writeTimeout = randomBoolean() ? TimeValue.timeValueSeconds(randomInt(10)) : null; + if (writeTimeout != null) { + builder.field(IndexAction.Field.TIMEOUT.getPreferredName(), writeTimeout.millis()); + } + builder.endObject(); + IndexActionFactory actionParser = new IndexActionFactory(Settings.EMPTY, client); + XContentParser parser = createParser(builder); + parser.nextToken(); + + ExecutableIndexAction executable = actionParser.parseExecutable(randomAlphaOfLength(5), randomAlphaOfLength(3), parser); + + assertThat(executable.action().docType, equalTo("test-type")); + if (includeIndex) { + assertThat(executable.action().index, equalTo("test-index")); + } + if (timestampField != null) { + assertThat(executable.action().executionTimeField, equalTo(timestampField)); + } + assertThat(executable.action().timeout, equalTo(writeTimeout)); + } + + public void testParserFailure() throws Exception { + // wrong type for field + expectParseFailure(jsonBuilder() + .startObject() + .field(IndexAction.Field.DOC_TYPE.getPreferredName(), 1234) + .endObject()); + + expectParseFailure(jsonBuilder() + .startObject() + .field(IndexAction.Field.TIMEOUT.getPreferredName(), "1234") + .endObject()); + + // unknown field + expectParseFailure(jsonBuilder() + .startObject() + .field("unknown", "whatever") + .endObject()); + + expectParseFailure(jsonBuilder() + .startObject() + .field("unknown", 1234) + .endObject()); + + // unknown refresh policy + expectFailure(IllegalArgumentException.class, jsonBuilder() + .startObject() + .field(IndexAction.Field.REFRESH.getPreferredName(), "unknown") + .endObject()); + } + + private void expectParseFailure(XContentBuilder builder) throws Exception { + expectFailure(ElasticsearchParseException.class, builder); + } + + private void expectFailure(Class clazz, XContentBuilder builder) throws Exception { + IndexActionFactory actionParser = new IndexActionFactory(Settings.EMPTY, client); + XContentParser parser = createParser(builder); + parser.nextToken(); + expectThrows(clazz, () -> + actionParser.parseExecutable(randomAlphaOfLength(4), randomAlphaOfLength(5), parser)); + } + + public void testUsingParameterIdWithBulkOrIdFieldThrowsIllegalState() { + final IndexAction action = new IndexAction("test-index", "test-type", "123", null, null, null, refreshPolicy); + final ExecutableIndexAction executable = new ExecutableIndexAction(action, logger, client, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30)); + final Map docWithId = MapBuilder.newMapBuilder().put("foo", "bar").put("_id", "0").immutableMap(); + final DateTime executionTime = DateTime.now(UTC); + + // using doc_id with bulk fails regardless of using ID + expectThrows(IllegalStateException.class, () -> { + final List idList = Arrays.asList(docWithId, MapBuilder.newMapBuilder().put("foo", "bar1").put("_id", "1").map()); + + final Object list = randomFrom( + new Map[] { singletonMap("foo", "bar"), singletonMap("foo", "bar1") }, + Arrays.asList(singletonMap("foo", "bar"), singletonMap("foo", "bar1")), + unmodifiableSet(newHashSet(singletonMap("foo", "bar"), singletonMap("foo", "bar1"))), + idList + ); + + final WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", executionTime, new Payload.Simple("_doc", list)); + + executable.execute("_id", ctx, ctx.payload()); + }); + + // using doc_id with _id + expectThrows(IllegalStateException.class, () -> { + final Payload payload = randomBoolean() ? new Payload.Simple("_doc", docWithId) : new Payload.Simple(docWithId); + final WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", executionTime, payload); + + executable.execute("_id", ctx, ctx.payload()); + }); + } + + public void testThatIndexTypeIdDynamically() throws Exception { + boolean configureIndexDynamically = randomBoolean(); + boolean configureTypeDynamically = randomBoolean(); + boolean configureIdDynamically = (configureTypeDynamically == false && configureIndexDynamically == false) || randomBoolean(); + + MapBuilder builder = MapBuilder.newMapBuilder().put("foo", "bar"); + if (configureIdDynamically) { + builder.put("_id", "my_dynamic_id"); + } + if (configureTypeDynamically) { + builder.put("_type", "my_dynamic_type"); + } + if (configureIndexDynamically) { + builder.put("_index", "my_dynamic_index"); + } + + final IndexAction action = new IndexAction(configureIndexDynamically ? null : "my_index", + configureTypeDynamically ? null : "my_type", + configureIdDynamically ? null : "my_id", + null, null, null, refreshPolicy); + final ExecutableIndexAction executable = new ExecutableIndexAction(action, logger, client, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30)); + + final WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", new Payload.Simple(builder.immutableMap())); + + ArgumentCaptor captor = ArgumentCaptor.forClass(IndexRequest.class); + PlainActionFuture listener = PlainActionFuture.newFuture(); + listener.onResponse(new IndexResponse(new ShardId(new Index("foo", "bar"), 0), "whatever", "whatever", 1, 1, 1, true)); + when(client.index(captor.capture())).thenReturn(listener); + Action.Result result = executable.execute("_id", ctx, ctx.payload()); + + assertThat(result.status(), is(Status.SUCCESS)); + assertThat(captor.getAllValues(), hasSize(1)); + + assertThat(captor.getValue().index(), is(configureIndexDynamically ? "my_dynamic_index" : "my_index")); + assertThat(captor.getValue().type(), is(configureTypeDynamically ? "my_dynamic_type" : "my_type")); + assertThat(captor.getValue().id(), is(configureIdDynamically ? "my_dynamic_id" : "my_id")); + } + + public void testThatIndexActionCanBeConfiguredWithDynamicIndexNameAndBulk() throws Exception { + final IndexAction action = new IndexAction(null, "my-type", null, null, null, null, refreshPolicy); + final ExecutableIndexAction executable = new ExecutableIndexAction(action, logger, client, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30)); + + final Map docWithIndex = MapBuilder.newMapBuilder().put("foo", "bar") + .put("_index", "my-index").immutableMap(); + final Map docWithOtherIndex = MapBuilder.newMapBuilder().put("foo", "bar") + .put("_index", "my-other-index").immutableMap(); + final WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", + new Payload.Simple("_doc", Arrays.asList(docWithIndex, docWithOtherIndex))); + + ArgumentCaptor captor = ArgumentCaptor.forClass(BulkRequest.class); + PlainActionFuture listener = PlainActionFuture.newFuture(); + IndexResponse indexResponse = new IndexResponse(new ShardId(new Index("foo", "bar"), 0), "whatever", "whatever", 1, 1, 1, true); + BulkItemResponse response = new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, indexResponse); + BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[]{response}, 1); + listener.onResponse(bulkResponse); + when(client.bulk(captor.capture())).thenReturn(listener); + Action.Result result = executable.execute("_id", ctx, ctx.payload()); + + assertThat(result.status(), is(Status.SUCCESS)); + assertThat(captor.getAllValues(), hasSize(1)); + assertThat(captor.getValue().requests(), hasSize(2)); + assertThat(captor.getValue().requests().get(0).type(), is("my-type")); + assertThat(captor.getValue().requests().get(0).index(), is("my-index")); + assertThat(captor.getValue().requests().get(1).type(), is("my-type")); + assertThat(captor.getValue().requests().get(1).index(), is("my-other-index")); + } + + public void testConfigureIndexInMapAndAction() { + String fieldName = randomFrom("_index", "_type"); + final IndexAction action = new IndexAction(fieldName.equals("_index") ? "my_index" : null, + fieldName.equals("_type") ? "my_type" : null, + null,null, null, null, refreshPolicy); + final ExecutableIndexAction executable = new ExecutableIndexAction(action, logger, client, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30)); + + final Map docWithIndex = MapBuilder.newMapBuilder().put("foo", "bar") + .put(fieldName, "my-value").immutableMap(); + final WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", + new Payload.Simple("_doc", Collections.singletonList(docWithIndex))); + + IllegalStateException e = expectThrows(IllegalStateException.class, () -> executable.execute("_id", ctx, ctx.payload())); + assertThat(e.getMessage(), startsWith("could not execute action [_id] of watch [_id]. [ctx.payload." + + fieldName + "] or [ctx.payload._doc." + fieldName + "]")); + } + + public void testIndexActionExecuteSingleDoc() throws Exception { + boolean customId = randomBoolean(); + boolean docIdAsParam = customId && randomBoolean(); + String docId = randomAlphaOfLength(5); + String timestampField = randomFrom("@timestamp", null); + + IndexAction action = new IndexAction("test-index", "test-type", docIdAsParam ? docId : null, timestampField, null, null, + refreshPolicy); + ExecutableIndexAction executable = new ExecutableIndexAction(action, logger, client, TimeValue.timeValueSeconds(30), + TimeValue.timeValueSeconds(30)); + DateTime executionTime = DateTime.now(UTC); + Payload payload; + + if (customId && docIdAsParam == false) { + // intentionally immutable because the other side needs to cut out _id + payload = new Payload.Simple("_doc", MapBuilder.newMapBuilder().put("foo", "bar").put("_id", docId).immutableMap()); + } else { + payload = randomBoolean() ? new Payload.Simple("foo", "bar") : new Payload.Simple("_doc", singletonMap("foo", "bar")); + } + + WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", executionTime, payload); + + ArgumentCaptor captor = ArgumentCaptor.forClass(IndexRequest.class); + PlainActionFuture listener = PlainActionFuture.newFuture(); + listener.onResponse(new IndexResponse(new ShardId(new Index("test-index", "uuid"), 0), "test-type", docId, 1, 1, 1, true)); + when(client.index(captor.capture())).thenReturn(listener); + + Action.Result result = executable.execute("_id", ctx, ctx.payload()); + + assertThat(result.status(), equalTo(Status.SUCCESS)); + assertThat(result, instanceOf(IndexAction.Result.class)); + IndexAction.Result successResult = (IndexAction.Result) result; + XContentSource response = successResult.response(); + assertThat(response.getValue("created"), equalTo((Object)Boolean.TRUE)); + assertThat(response.getValue("version"), equalTo((Object) 1)); + assertThat(response.getValue("type").toString(), equalTo("test-type")); + assertThat(response.getValue("index").toString(), equalTo("test-index")); + + assertThat(captor.getAllValues(), hasSize(1)); + IndexRequest indexRequest = captor.getValue(); + assertThat(indexRequest.sourceAsMap(), is(hasEntry("foo", "bar"))); + if (customId) { + assertThat(indexRequest.id(), is(docId)); + } + + RefreshPolicy expectedRefreshPolicy = refreshPolicy == null ? RefreshPolicy.NONE: refreshPolicy; + assertThat(indexRequest.getRefreshPolicy(), is(expectedRefreshPolicy)); + + if (timestampField != null) { + assertThat(indexRequest.sourceAsMap().keySet(), is(hasSize(2))); + assertThat(indexRequest.sourceAsMap(), hasEntry(timestampField, executionTime.toString())); + } else { + assertThat(indexRequest.sourceAsMap().keySet(), is(hasSize(1))); + } + } + + public void testFailureResult() throws Exception { + IndexAction action = new IndexAction("test-index", "test-type", null, "@timestamp", null, null, refreshPolicy); + ExecutableIndexAction executable = new ExecutableIndexAction(action, logger, client, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30)); + + // should the result resemble a failure or a partial failure + boolean isPartialFailure = randomBoolean(); + + List> docs = new ArrayList<>(); + docs.add(Collections.singletonMap("foo", Collections.singletonMap("foo", "bar"))); + docs.add(Collections.singletonMap("foo", Collections.singletonMap("foo", "bar"))); + Payload payload = new Payload.Simple(Collections.singletonMap("_doc", docs)); + + WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", DateTime.now(UTC), payload); + + ArgumentCaptor captor = ArgumentCaptor.forClass(BulkRequest.class); + PlainActionFuture listener = PlainActionFuture.newFuture(); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure("test-index", "test-type", "anything", + new ElasticsearchException("anything")); + BulkItemResponse firstResponse = new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, failure); + BulkItemResponse secondResponse; + if (isPartialFailure) { + ShardId shardId = new ShardId(new Index("foo", "bar"), 0); + IndexResponse indexResponse = new IndexResponse(shardId, "whatever", "whatever", 1, 1, 1, true); + secondResponse = new BulkItemResponse(1, DocWriteRequest.OpType.INDEX, indexResponse); + } else { + secondResponse = new BulkItemResponse(1, DocWriteRequest.OpType.INDEX, failure); + } + BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[]{firstResponse, secondResponse}, 1); + listener.onResponse(bulkResponse); + when(client.bulk(captor.capture())).thenReturn(listener); + Action.Result result = executable.execute("_id", ctx, payload); + RefreshPolicy expectedRefreshPolicy = refreshPolicy == null ? RefreshPolicy.NONE: refreshPolicy; + assertThat(captor.getValue().getRefreshPolicy(), is(expectedRefreshPolicy)); + + if (isPartialFailure) { + assertThat(result.status(), is(Status.PARTIAL_FAILURE)); + } else { + assertThat(result.status(), is(Status.FAILURE)); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraActionTests.java new file mode 100644 index 0000000000000..cb434e62df5f9 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraActionTests.java @@ -0,0 +1,314 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.jira; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.jira.JiraAccount; +import org.elasticsearch.xpack.watcher.notification.jira.JiraService; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.mockito.ArgumentCaptor; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContextBuilder; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ExecutableJiraActionTests extends ESTestCase { + + public void testProxy() throws Exception { + HttpProxy proxy = new HttpProxy("localhost", 8080); + Map issueDefaults = Collections.singletonMap("customfield_0001", "test"); + JiraAction action = new JiraAction("account1", issueDefaults, proxy); + + HttpClient httpClient = mock(HttpClient.class); + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(HttpRequest.class); + when(httpClient.execute(argumentCaptor.capture())).thenReturn(new HttpResponse(200)); + + final String host = randomFrom("localhost", "internal-jira.elastic.co"); + final int port = randomFrom(80, 8080, 449, 9443); + final String url = "https://" + host + ":" + port; + final String user = randomAlphaOfLength(10); + final String password = randomAlphaOfLength(10); + + Settings accountSettings = Settings.builder() + .put("url", url) + .put("user", user) + .put("password", password) + .build(); + + JiraAccount account = new JiraAccount("account1", accountSettings, httpClient); + + JiraService service = mock(JiraService.class); + when(service.getAccount(eq("account1"))).thenReturn(account); + + DateTime now = DateTime.now(DateTimeZone.UTC); + + Wid wid = new Wid(randomAlphaOfLength(5), now); + WatchExecutionContext ctx = mockExecutionContextBuilder(wid.watchId()) + .wid(wid) + .payload(new Payload.Simple()) + .time(wid.watchId(), now) + .buildMock(); + + ExecutableJiraAction executable = new ExecutableJiraAction(action, logger, service, new UpperCaseTextTemplateEngine()); + executable.execute("foo", ctx, new Payload.Simple()); + + HttpRequest request = argumentCaptor.getValue(); + assertThat(request.proxy(), is(proxy)); + assertThat(request.host(), is(host)); + assertThat(request.port(), is(port)); + assertThat(request.path(), is(JiraAccount.DEFAULT_PATH)); + + HttpAuth httpAuth = request.auth(); + assertThat(httpAuth.type(), is("basic")); + + BasicAuth basicAuth = (BasicAuth) httpAuth; + assertThat(basicAuth.getUsername(), is(user)); + } + + public void testExecutionWithNoDefaults() throws Exception { + JiraAction.Simulated result = simulateExecution(singletonMap("key", "value"), emptyMap()); + assertEquals(result.getFields().size(), 1); + assertThat(result.getFields(), hasEntry("KEY", "VALUE")); + } + + public void testExecutionNoFieldsWithDefaults() throws Exception { + Map defaults = new HashMap<>(); + defaults.put("k0", "v0"); + + JiraAction.Simulated result = simulateExecution(new HashMap<>(), defaults); + assertEquals(result.getFields().size(), 1); + assertThat(result.getFields(), hasEntry("K0", "V0")); + + defaults.put("k1", "v1"); + + result = simulateExecution(new HashMap<>(), defaults); + assertEquals(result.getFields().size(), 2); + assertThat(result.getFields(), allOf(hasEntry("K0", "V0"), hasEntry("K1", "V1"))); + } + + public void testExecutionFields() throws Exception { + Map defaults = new HashMap<>(); + defaults.put("k0", "v0"); + defaults.put("k1", "v1"); + + Map fields = new HashMap<>(); + fields.put("k1", "new_v1"); // overridden + fields.put("k2", "v2"); + fields.put("k3", "v3"); + + JiraAction.Simulated result = simulateExecution(fields, defaults); + assertEquals(result.getFields().size(), 4); + assertThat(result.getFields(), allOf(hasEntry("K0", "V0"), hasEntry("K1", "NEW_V1"), hasEntry("K2", "V2"), hasEntry("K3", "V3"))); + } + + public void testExecutionFieldsMaps() throws Exception { + Map defaults = new HashMap<>(); + defaults.put("k0.a", "b"); + defaults.put("k1.c", "d"); + defaults.put("k1.e", "f"); + defaults.put("k1.g.a", "b"); + + Map fields = new HashMap<>(); + fields.put("k2", "v2"); + fields.put("k3", "v3"); + + JiraAction.Simulated result = simulateExecution(fields, defaults); + + final Map expected = new HashMap<>(); + expected.put("K0", singletonMap("A", "B")); + expected.put("K2", "V2"); + expected.put("K3", "V3"); + + final Map expectedK1 = new HashMap<>(); + expectedK1.put("C", "D"); + expectedK1.put("E", "F"); + expectedK1.put("G", singletonMap("A", "B")); + expected.put("K1", expectedK1); + + assertThat(result.getFields(), equalTo(expected)); + } + + public void testExecutionFieldsMapsAreOverridden() throws Exception { + Map defaults = new HashMap<>(); + defaults.put("k0", "v0"); + defaults.put("k1.a", "b"); + defaults.put("k1.c", "d"); + + Map fields = new HashMap<>(); + fields.put("k1", singletonMap("c", "e")); // will overrides the defaults + fields.put("k2", "v2"); + + JiraAction.Simulated result = simulateExecution(fields, defaults); + + final Map expected = new HashMap<>(); + expected.put("K0", "V0"); + expected.put("K1", singletonMap("C", "E")); + expected.put("K2", "V2"); + + assertThat(result.getFields(), equalTo(expected)); + } + + public void testExecutionFieldsLists() throws Exception { + Map defaults = new HashMap<>(); + defaults.put("k0.0", "a"); + defaults.put("k0.1", "b"); + defaults.put("k0.2", "c"); + defaults.put("k1", "v1"); + + Map fields = new HashMap<>(); + fields.put("k2", "v2"); + fields.put("k3", Arrays.asList("d", "e", "f")); + + JiraAction.Simulated result = simulateExecution(fields, defaults); + + final Map expected = new HashMap<>(); + expected.put("K0", Arrays.asList("A", "B", "C")); + expected.put("K1", "V1"); + expected.put("K2", "V2"); + expected.put("K3", Arrays.asList("D", "E", "F")); + + assertThat(result.getFields(), equalTo(expected)); + } + + public void testExecutionFieldsListsNotOverridden() throws Exception { + Map defaults = new HashMap<>(); + defaults.put("k0.0", "a"); + defaults.put("k0.1", "b"); + defaults.put("k0.2", "c"); + + Map fields = new HashMap<>(); + fields.put("k1", "v1"); + fields.put("k0", Arrays.asList("d", "e", "f")); // should not be overridden byt the defaults + + JiraAction.Simulated result = simulateExecution(fields, defaults); + + final Map expected = new HashMap<>(); + expected.put("K0", Arrays.asList("D", "E", "F")); + expected.put("K1", "V1"); + + assertThat(result.getFields(), equalTo(expected)); + } + + public void testExecutionFieldsStringArrays() throws Exception { + Settings build = Settings.builder() + .putList("k0", "a", "b", "c") + .put("k1", "v1") + .build(); + Map defaults = build.keySet().stream().collect(Collectors.toMap(Function.identity(), k -> build.get(k))); + + Map fields = new HashMap<>(); + fields.put("k2", "v2"); + fields.put("k3", new String[]{"d", "e", "f"}); + + JiraAction.Simulated result = simulateExecution(fields, defaults); + + assertThat(result.getFields().get("K1"), equalTo("V1")); + assertThat(result.getFields().get("K2"), equalTo("V2")); + assertArrayEquals((Object[]) result.getFields().get("K3"), new Object[]{"D", "E", "F"}); + } + + public void testExecutionFieldsStringArraysNotOverridden() throws Exception { + Settings build = Settings.builder() + .putList("k0", "a", "b", "c") + .build(); + Map defaults = build.keySet().stream().collect(Collectors.toMap(Function.identity(), k -> build.get(k))); + Map fields = new HashMap<>(); + fields.put("k1", "v1"); + fields.put("k0", new String[]{"d", "e", "f"}); // should not be overridden byt the defaults + + JiraAction.Simulated result = simulateExecution(fields, defaults); + + final Map expected = new HashMap<>(); + expected.put("K0", new String[]{"D", "E", "F"}); + expected.put("K1", "V1"); + + assertArrayEquals((Object[]) result.getFields().get("K0"), new Object[]{"D", "E", "F"}); + assertThat(result.getFields().get("K1"), equalTo("V1")); + } + + private JiraAction.Simulated simulateExecution(Map actionFields, Map accountFields) throws Exception { + Settings.Builder settings = Settings.builder() + .put("url", "https://internal-jira.elastic.co:443") + .put("user", "elastic") + .put("password", "secret") + .putProperties(accountFields, s -> "issue_defaults." + s); + + JiraAccount account = new JiraAccount("account", settings.build(), mock(HttpClient.class)); + + JiraService service = mock(JiraService.class); + when(service.getAccount(eq("account"))).thenReturn(account); + + JiraAction action = new JiraAction("account", actionFields, null); + ExecutableJiraAction executable = new ExecutableJiraAction(action, null, service, new UpperCaseTextTemplateEngine()); + + WatchExecutionContext context = createWatchExecutionContext(); + when(context.simulateAction("test")).thenReturn(true); + + Action.Result result = executable.execute("test", context, new Payload.Simple()); + assertThat(result, instanceOf(JiraAction.Result.class)); + assertThat(result, instanceOf(JiraAction.Simulated.class)); + return (JiraAction.Simulated) result; + } + + private WatchExecutionContext createWatchExecutionContext() { + DateTime now = DateTime.now(DateTimeZone.UTC); + Wid wid = new Wid(randomAlphaOfLength(5), now); + Map metadata = MapBuilder.newMapBuilder().put("_key", "_val").map(); + return mockExecutionContextBuilder("watch1") + .wid(wid) + .payload(new Payload.Simple()) + .time("watch1", now) + .metadata(metadata) + .buildMock(); + } + + /** + * TextTemplateEngine that convert templates to uppercase + */ + class UpperCaseTextTemplateEngine extends TextTemplateEngine { + + UpperCaseTextTemplateEngine() { + super(Settings.EMPTY, mock(ScriptService.class)); + } + + @Override + public String render(TextTemplate textTemplate, Map model) { + return textTemplate.getTemplate().toUpperCase(Locale.ROOT); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionFactoryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionFactoryTests.java new file mode 100644 index 0000000000000..49fe4090e6791 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionFactoryTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.jira; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.notification.jira.JiraAccount; +import org.elasticsearch.xpack.watcher.notification.jira.JiraService; +import org.junit.Before; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.notification.jira.JiraAccountTests.randomIssueDefaults; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.jiraAction; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class JiraActionFactoryTests extends ESTestCase { + + private JiraService service; + + @Before + public void init() throws Exception { + service = mock(JiraService.class); + } + + public void testParseAction() throws Exception { + JiraAccount account = mock(JiraAccount.class); + when(service.getAccount("_account1")).thenReturn(account); + + JiraAction action = jiraAction("_account1", randomIssueDefaults()).build(); + XContentBuilder jsonBuilder = jsonBuilder().value(action); + XContentParser parser = createParser(jsonBuilder); + parser.nextToken(); + + JiraAction parsedAction = JiraAction.parse("_w1", "_a1", parser); + assertThat(parsedAction, equalTo(action)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionTests.java new file mode 100644 index 0000000000000..22a6ced9e7e08 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/JiraActionTests.java @@ -0,0 +1,309 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.jira; + +import org.apache.http.HttpStatus; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.jira.JiraAccount; +import org.elasticsearch.xpack.watcher.notification.jira.JiraAccountTests; +import org.elasticsearch.xpack.watcher.notification.jira.JiraIssue; +import org.elasticsearch.xpack.watcher.notification.jira.JiraService; +import org.elasticsearch.xpack.watcher.support.Variables; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.cborBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContextBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class JiraActionTests extends ESTestCase { + public void testParser() throws Exception { + final String accountName = randomAlphaOfLength(10); + final Map issueDefaults = JiraAccountTests.randomIssueDefaults(); + + XContentBuilder builder = jsonBuilder().startObject() + .field("account", accountName) + .field("fields", issueDefaults) + .endObject(); + + BytesReference bytes = BytesReference.bytes(builder); + logger.info("jira action json [{}]", bytes.utf8ToString()); + + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + + JiraAction action = JiraAction.parse("_watch", "_action", parser); + + assertThat(action, notNullValue()); + assertThat(action.account, is(accountName)); + assertThat(action.fields, notNullValue()); + assertThat(action.fields, is(issueDefaults)); + } + + public void testParserSelfGenerated() throws Exception { + final JiraAction action = randomJiraAction(); + + XContentBuilder builder = jsonBuilder(); + action.toXContent(builder, ToXContent.EMPTY_PARAMS); + BytesReference bytes = BytesReference.bytes(builder); + logger.info("{}", bytes.utf8ToString()); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + + JiraAction parsedAction = JiraAction.parse("_watch", "_action", parser); + + assertThat(parsedAction, notNullValue()); + assertThat(parsedAction.proxy, equalTo(action.proxy)); + assertThat(parsedAction.fields, equalTo(action.fields)); + assertThat(parsedAction.account, equalTo(action.account)); + assertThat(parsedAction, is(action)); + } + + public void testParserInvalid() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().field("unknown_field", "value").endObject(); + XContentParser parser = createParser(builder); + parser.nextToken(); + + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> JiraAction.parse("_w", "_a", parser)); + assertThat(e.getMessage(), is("failed to parse [jira] action [_w/_a]. unexpected token [VALUE_STRING/unknown_field]")); + } + + public void testToXContent() throws Exception { + final JiraAction action = randomJiraAction(); + + try (XContentBuilder builder = randomFrom(jsonBuilder(), smileBuilder(), yamlBuilder(), cborBuilder())) { + action.toXContent(builder, ToXContent.EMPTY_PARAMS); + + String parsedAccount = null; + HttpProxy parsedProxy = null; + Map parsedFields = null; + + try (XContentParser parser = createParser(builder)) { + assertNull(parser.currentToken()); + parser.nextToken(); + + XContentParser.Token token = parser.currentToken(); + assertThat(token, is(XContentParser.Token.START_OBJECT)); + + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if ("account".equals(currentFieldName)) { + parsedAccount = parser.text(); + } else if ("proxy".equals(currentFieldName)) { + parsedProxy = HttpProxy.parse(parser); + } else if ("fields".equals(currentFieldName)) { + parsedFields = parser.map(); + } else { + fail("unknown field [" + currentFieldName + "]"); + } + } + } + + assertThat(parsedAccount, equalTo(action.getAccount())); + assertThat(parsedProxy, equalTo(action.proxy)); + assertThat(parsedFields, equalTo(action.fields)); + } + } + + public void testEquals() throws Exception { + final JiraAction action1 = randomJiraAction(); + + String account = action1.account; + Map fields = action1.fields; + HttpProxy proxy = action1.proxy; + + boolean equals = randomBoolean(); + if (!equals) { + equals = true; + if (rarely()) { + equals = false; + account = "another account"; + } + if (rarely()) { + equals = false; + // cover the special case that randomIssueDefaults() left an empty map here as + // well as in the action1, so that those would be equal - make sure they are not + fields = JiraAccountTests.randomIssueDefaults(); + while (fields.equals(action1.fields)) { + fields = JiraAccountTests.randomIssueDefaults(); + } + } + if (rarely()) { + equals = false; + // another low probability case, that a random proxy is exactly the same including + // port number + proxy = randomHttpProxy(); + while (proxy.equals(action1.proxy)) { + proxy = randomHttpProxy(); + } + } + } + + JiraAction action2 = new JiraAction(account, fields, proxy); + assertThat(action1.equals(action2), is(equals)); + } + + public void testExecute() throws Exception { + final Map model = new HashMap<>(); + final MapBuilder actionFields = MapBuilder.newMapBuilder(); + + String summary = randomAlphaOfLength(15); + actionFields.put("summary", "{{ctx.summary}}"); + model.put("{{ctx.summary}}", summary); + + String projectId = randomAlphaOfLength(10); + actionFields.put("project", singletonMap("id", "{{ctx.project_id}}")); + model.put("{{ctx.project_id}}", projectId); + + String description = null; + if (randomBoolean()) { + description = randomAlphaOfLength(50); + actionFields.put("description", description); + } + + String issueType = null; + if (randomBoolean()) { + issueType = randomFrom("Bug", "Test", "Task", "Epic"); + actionFields.put("issuetype", singletonMap("name", issueType)); + } + + String watchId = null; + if (randomBoolean()) { + watchId = "jira_watch_" + randomInt(); + model.put("{{" + Variables.WATCH_ID + "}}", watchId); + actionFields.put("customfield_0", "{{watch_id}}"); + } + + HttpClient httpClient = mock(HttpClient.class); + when(httpClient.execute(any(HttpRequest.class))).thenReturn(new HttpResponse(HttpStatus.SC_CREATED)); + + Settings.Builder settings = Settings.builder() + .put("url", "https://internal-jira.elastic.co:443") + .put("user", "elastic") + .put("password", "secret") + .put("issue_defaults.customfield_000", "foo") + .put("issue_defaults.customfield_001", "bar"); + + JiraAccount account = new JiraAccount("account", settings.build(), httpClient); + + JiraService service = mock(JiraService.class); + when(service.getAccount(eq("account"))).thenReturn(account); + + JiraAction action = new JiraAction("account", actionFields.immutableMap(), null); + ExecutableJiraAction executable = new ExecutableJiraAction(action, logger, service, new ModelTextTemplateEngine(model)); + + Map data = new HashMap<>(); + Payload payload = new Payload.Simple(data); + + DateTime now = DateTime.now(DateTimeZone.UTC); + + Wid wid = new Wid(randomAlphaOfLength(5), now); + WatchExecutionContext context = mockExecutionContextBuilder(wid.watchId()) + .wid(wid) + .payload(payload) + .time(wid.watchId(), now) + .buildMock(); + when(context.simulateAction("test")).thenReturn(false); + + Action.Result result = executable.execute("test", context, new Payload.Simple()); + assertThat(result, instanceOf(JiraAction.Result.class)); + assertThat(result, instanceOf(JiraAction.Executed.class)); + + JiraIssue issue = ((JiraAction.Executed) result).getResult(); + assertThat(issue.getFields().get("summary"), equalTo(summary)); + assertThat(issue.getFields().get("customfield_000"), equalTo("foo")); + assertThat(issue.getFields().get("customfield_001"), equalTo("bar")); + assertThat(((Map) issue.getFields().get("project")).get("id"), equalTo(projectId)); + if (issueType != null) { + assertThat(((Map) issue.getFields().get("issuetype")).get("name"), equalTo(issueType)); + } + if (description != null) { + assertThat(issue.getFields().get("description"), equalTo(description)); + } + if (watchId != null) { + assertThat(issue.getFields().get("customfield_0"), equalTo(watchId)); + } + } + + private static JiraAction randomJiraAction() { + String account = null; + if (randomBoolean()) { + account = randomAlphaOfLength(randomIntBetween(5, 10)); + } + Map fields = emptyMap(); + if (frequently()) { + fields = JiraAccountTests.randomIssueDefaults(); + } + HttpProxy proxy = null; + if (randomBoolean()) { + proxy = randomHttpProxy(); + } + return new JiraAction(account, fields, proxy); + } + + private static HttpProxy randomHttpProxy() { + return new HttpProxy(randomFrom("localhost", "www.elastic.co", "198.18.0.0"), randomIntBetween(8000, 10000)); + } + + /** + * TextTemplateEngine that picks up templates from the model if exist, + * otherwise returns the template as it is. + */ + class ModelTextTemplateEngine extends TextTemplateEngine { + + private final Map model; + + ModelTextTemplateEngine(Map model) { + super(Settings.EMPTY, mock(ScriptService.class)); + this.model = model; + } + + @Override + public String render(TextTemplate textTemplate, Map ignoredModel) { + String template = textTemplate.getTemplate(); + if (model.containsKey(template)) { + return (String) model.get(template); + } + return template; + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionTests.java new file mode 100644 index 0000000000000..fdfd8ae0745ca --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/logging/LoggingActionTests.java @@ -0,0 +1,215 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.logging; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.SuppressLoggerChecks; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.email.Attachment; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.joda.time.DateTime; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.Is.is; +import static org.joda.time.DateTimeZone.UTC; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class LoggingActionTests extends ESTestCase { + + private Logger actionLogger; + private LoggingLevel level; + private TextTemplateEngine engine; + + @Before + public void init() throws IOException { + actionLogger = mock(Logger.class); + level = randomFrom(LoggingLevel.values()); + engine = mock(TextTemplateEngine.class); + } + + public void testExecute() throws Exception { + final DateTime now = DateTime.now(UTC); + + WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContextBuilder("_watch_id") + .time("_watch_id", now) + .buildMock(); + + Map triggerModel = new HashMap<>(); + triggerModel.put("scheduled_time", now); + triggerModel.put("triggered_time", now); + Map ctxModel = new HashMap<>(); + ctxModel.put("id", ctx.id().value()); + ctxModel.put("watch_id", "_watch_id"); + ctxModel.put("execution_time", now); + ctxModel.put("payload", emptyMap()); + ctxModel.put("metadata", emptyMap()); + ctxModel.put("vars", emptyMap()); + ctxModel.put("trigger", triggerModel); + Map expectedModel = singletonMap("ctx", ctxModel); + + String text = randomAlphaOfLength(10); + TextTemplate template = new TextTemplate(text); + LoggingAction action = new LoggingAction(template, level, "_category"); + ExecutableLoggingAction executable = new ExecutableLoggingAction(action, logger, actionLogger, engine); + when(engine.render(template, expectedModel)).thenReturn(text); + + + + Action.Result result = executable.execute("_id", ctx, new Payload.Simple()); + verifyLogger(actionLogger, level, text); + + assertThat(result, notNullValue()); + assertThat(result.status(), is(Action.Result.Status.SUCCESS)); + assertThat(result, instanceOf(LoggingAction.Result.Success.class)); + assertThat(((LoggingAction.Result.Success) result).loggedText(), is(text)); + } + + public void testParser() throws Exception { + Settings settings = Settings.EMPTY; + LoggingActionFactory parser = new LoggingActionFactory(settings, engine); + + String text = randomAlphaOfLength(10); + TextTemplate template = new TextTemplate(text); + + XContentBuilder builder = jsonBuilder().startObject(); + builder.field("text", template); + String category = null; + if (randomBoolean()) { + category = randomAlphaOfLength(10); + builder.field("category", category); + } + LoggingLevel level = null; + if (randomBoolean()) { + level = randomFrom(LoggingLevel.values()); + builder.field("level", level); + } + builder.endObject(); + + XContentParser xContentParser = createParser(builder); + xContentParser.nextToken(); + + ExecutableLoggingAction executable = parser.parseExecutable(randomAlphaOfLength(5), randomAlphaOfLength(3), xContentParser); + + assertThat(executable, notNullValue()); + assertThat(executable.action().category, is(category)); + assertThat(executable.action().level, level == null ? is(LoggingLevel.INFO) : is(level)); + assertThat(executable.textLogger(), notNullValue()); + assertThat(executable.action().text, notNullValue()); + assertThat(executable.action().text, is(template)); + } + + public void testParserSelfGenerated() throws Exception { + Settings settings = Settings.EMPTY; + LoggingActionFactory parser = new LoggingActionFactory(settings, engine); + + String text = randomAlphaOfLength(10); + TextTemplate template = new TextTemplate(text); + String category = randomAlphaOfLength(10); + LoggingAction action = new LoggingAction(template, level, category); + ExecutableLoggingAction executable = new ExecutableLoggingAction(action, logger, settings, engine); + XContentBuilder builder = jsonBuilder(); + executable.toXContent(builder, Attachment.XContent.EMPTY_PARAMS); + + XContentParser xContentParser = createParser(builder); + xContentParser.nextToken(); + + ExecutableLoggingAction parsedAction = parser.parseExecutable(randomAlphaOfLength(5), randomAlphaOfLength(5), xContentParser); + + assertThat(parsedAction, equalTo(executable)); + } + + public void testParserBuilder() throws Exception { + Settings settings = Settings.EMPTY; + LoggingActionFactory parser = new LoggingActionFactory(settings, engine); + + String text = randomAlphaOfLength(10); + TextTemplate template = new TextTemplate(text); + LoggingAction.Builder actionBuilder = loggingAction(template); + if (randomBoolean()) { + actionBuilder.setCategory(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + actionBuilder.setLevel(randomFrom(LoggingLevel.values())); + } + LoggingAction action = actionBuilder.build(); + + XContentBuilder builder = jsonBuilder().value(action); + XContentParser xContentParser = createParser(builder); + + assertThat(xContentParser.nextToken(), is(XContentParser.Token.START_OBJECT)); + ExecutableLoggingAction executable = parser.parseExecutable(randomAlphaOfLength(4), randomAlphaOfLength(5), xContentParser); + assertThat(executable, notNullValue()); + assertThat(executable.action(), is(action)); + assertThat(executable.action(), is(action)); + assertThat(executable.action(), is(action)); + } + + public void testParserFailure() throws Exception { + Settings settings = Settings.EMPTY; + LoggingActionFactory parser = new LoggingActionFactory(settings, engine); + + XContentBuilder builder = jsonBuilder() + .startObject().endObject(); + + XContentParser xContentParser = createParser(builder); + xContentParser.nextToken(); + + try { + parser.parseExecutable(randomAlphaOfLength(5), randomAlphaOfLength(5), xContentParser); + fail("Expected failure as there's no text"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("missing required [text] field")); + } + } + + @SuppressLoggerChecks(reason = "mock usage") + static void verifyLogger(Logger logger, LoggingLevel level, String text) { + switch (level) { + case ERROR: + verify(logger, times(1)).error(text); + break; + case WARN: + verify(logger, times(1)).warn(text); + break; + case INFO: + verify(logger, times(1)).info(text); + break; + case DEBUG: + verify(logger, times(1)).debug(text); + break; + case TRACE: + verify(logger, times(1)).trace(text); + break; + default: + fail("unhandled logging level [" + level.name() + "]"); + } + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/pagerduty/PagerDutyActionFactoryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/pagerduty/PagerDutyActionFactoryTests.java new file mode 100644 index 0000000000000..ca2b8b365ad47 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/pagerduty/PagerDutyActionFactoryTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.pagerduty; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.pagerduty.PagerDutyAccount; +import org.elasticsearch.xpack.watcher.notification.pagerduty.PagerDutyService; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashSet; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.triggerPagerDutyAction; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class PagerDutyActionFactoryTests extends ESTestCase { + + private PagerDutyActionFactory factory; + private PagerDutyService service; + + @Before + public void init() throws Exception { + service = mock(PagerDutyService.class); + factory = new PagerDutyActionFactory(Settings.EMPTY, mock(TextTemplateEngine.class), service); + } + + public void testParseAction() throws Exception { + + PagerDutyAccount account = mock(PagerDutyAccount.class); + when(service.getAccount("_account1")).thenReturn(account); + + PagerDutyAction action = triggerPagerDutyAction("_account1", "_description").build(); + XContentBuilder jsonBuilder = jsonBuilder().value(action); + XContentParser parser = createParser(jsonBuilder); + parser.nextToken(); + + PagerDutyAction parsedAction = PagerDutyAction.parse("_w1", "_a1", parser); + assertThat(parsedAction, is(action)); + } + + public void testParseActionUnknownAccount() throws Exception { + factory = new PagerDutyActionFactory(Settings.EMPTY, mock(TextTemplateEngine.class), new PagerDutyService(Settings.EMPTY, null, + new ClusterSettings(Settings.EMPTY, new HashSet<>(PagerDutyService.getSettings())))); + PagerDutyAction action = triggerPagerDutyAction("_unknown", "_body").build(); + XContentBuilder jsonBuilder = jsonBuilder().value(action); + XContentParser parser = createParser(jsonBuilder); + parser.nextToken(); + expectThrows(IllegalArgumentException.class, () -> + factory.parseExecutable("_w1", "_a1", parser)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/pagerduty/PagerDutyActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/pagerduty/PagerDutyActionTests.java new file mode 100644 index 0000000000000..6f57ccd82d930 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/pagerduty/PagerDutyActionTests.java @@ -0,0 +1,254 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.pagerduty; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.pagerduty.IncidentEvent; +import org.elasticsearch.xpack.watcher.notification.pagerduty.IncidentEventContext; +import org.elasticsearch.xpack.watcher.notification.pagerduty.IncidentEventDefaults; +import org.elasticsearch.xpack.watcher.notification.pagerduty.PagerDutyAccount; +import org.elasticsearch.xpack.watcher.notification.pagerduty.PagerDutyService; +import org.elasticsearch.xpack.watcher.notification.pagerduty.SentEvent; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.pagerDutyAction; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContextBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class PagerDutyActionTests extends ESTestCase { + + private PagerDutyService service; + + @Before + public void init() throws Exception { + service = mock(PagerDutyService.class); + } + + public void testExecute() throws Exception { + final String accountName = "account1"; + + TextTemplateEngine templateEngine = mock(TextTemplateEngine.class); + + TextTemplate description = new TextTemplate("_description"); + IncidentEvent.Template.Builder eventBuilder = new IncidentEvent.Template.Builder(description); + boolean attachPayload = randomBoolean(); + eventBuilder.setAttachPayload(attachPayload); + eventBuilder.setAccount(accountName); + IncidentEvent.Template eventTemplate = eventBuilder.build(); + + PagerDutyAction action = new PagerDutyAction(eventTemplate); + ExecutablePagerDutyAction executable = new ExecutablePagerDutyAction(action, logger, service, templateEngine); + + Map data = new HashMap<>(); + Payload payload = new Payload.Simple(data); + + Map metadata = MapBuilder.newMapBuilder().put("_key", "_val").map(); + + DateTime now = DateTime.now(DateTimeZone.UTC); + + Wid wid = new Wid(randomAlphaOfLength(5), now); + WatchExecutionContext ctx = mockExecutionContextBuilder(wid.watchId()) + .wid(wid) + .payload(payload) + .time(wid.watchId(), now) + .metadata(metadata) + .buildMock(); + + Map ctxModel = new HashMap<>(); + ctxModel.put("id", ctx.id().value()); + ctxModel.put("watch_id", wid.watchId()); + ctxModel.put("payload", data); + ctxModel.put("metadata", metadata); + ctxModel.put("execution_time", now); + Map triggerModel = new HashMap<>(); + triggerModel.put("triggered_time", now); + triggerModel.put("scheduled_time", now); + ctxModel.put("trigger", triggerModel); + ctxModel.put("vars", Collections.emptyMap()); + Map expectedModel = new HashMap<>(); + expectedModel.put("ctx", ctxModel); + + when(templateEngine.render(description, expectedModel)).thenReturn(description.getTemplate()); + + IncidentEvent event = new IncidentEvent(description.getTemplate(), null, wid.watchId(), null, null, accountName, attachPayload, + null, null); + PagerDutyAccount account = mock(PagerDutyAccount.class); + when(account.getDefaults()).thenReturn(new IncidentEventDefaults(Settings.EMPTY)); + HttpResponse response = mock(HttpResponse.class); + when(response.status()).thenReturn(200); + HttpRequest request = mock(HttpRequest.class); + SentEvent sentEvent = SentEvent.responded(event, request, response); + when(account.send(event, payload)).thenReturn(sentEvent); + when(service.getAccount(accountName)).thenReturn(account); + + Action.Result result = executable.execute("_id", ctx, payload); + + assertThat(result, notNullValue()); + assertThat(result, instanceOf(PagerDutyAction.Result.Executed.class)); + assertThat(result.status(), equalTo(Action.Result.Status.SUCCESS)); + assertThat(((PagerDutyAction.Result.Executed) result).sentEvent(), sameInstance(sentEvent)); + } + + public void testParser() throws Exception { + + XContentBuilder builder = jsonBuilder().startObject(); + + String accountName = randomAlphaOfLength(10); + builder.field("account", accountName); + + TextTemplate incidentKey = null; + if (randomBoolean()) { + incidentKey = new TextTemplate("_incident_key"); + builder.field("incident_key", incidentKey); + } + + TextTemplate description = null; + if (randomBoolean()) { + description = new TextTemplate("_description"); + builder.field("description", description); + } + + TextTemplate client = null; + if (randomBoolean()) { + client = new TextTemplate("_client"); + builder.field("client", client); + } + + TextTemplate clientUrl = null; + if (randomBoolean()) { + clientUrl = new TextTemplate("_client_url"); + builder.field("client_url", clientUrl); + } + + TextTemplate eventType = null; + if (randomBoolean()) { + eventType = new TextTemplate(randomFrom("trigger", "resolve", "acknowledge")); + builder.field("event_type", eventType); + } + + Boolean attachPayload = randomBoolean() ? null : randomBoolean(); + if (attachPayload != null) { + builder.field("attach_payload", attachPayload.booleanValue()); + } + + HttpProxy proxy = null; + if (randomBoolean()) { + proxy = new HttpProxy("localhost", 8080); + proxy.toXContent(builder, ToXContent.EMPTY_PARAMS); + } + + IncidentEventContext.Template[] contexts = null; + if (randomBoolean()) { + contexts = new IncidentEventContext.Template[] { + IncidentEventContext.Template.link(new TextTemplate("_href"), new TextTemplate("_text")), + IncidentEventContext.Template.image(new TextTemplate("_src"), new TextTemplate("_href"), new TextTemplate("_alt")) + }; + String fieldName = randomBoolean() ? "contexts" : "context"; + builder.array(fieldName, (Object) contexts); + } + + builder.endObject(); + + BytesReference bytes = BytesReference.bytes(builder); + logger.info("pagerduty action json [{}]", bytes.utf8ToString()); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + + PagerDutyAction action = PagerDutyAction.parse("_watch", "_action", parser); + + assertThat(action, notNullValue()); + assertThat(action.event.account, is(accountName)); + assertThat(action.event, notNullValue()); + assertThat(action.event, instanceOf(IncidentEvent.Template.class)); + assertThat(action.event, is(new IncidentEvent.Template(description, eventType, incidentKey, client, clientUrl, accountName, + attachPayload, contexts, proxy))); + } + + public void testParserSelfGenerated() throws Exception { + IncidentEvent.Template.Builder event = IncidentEvent.templateBuilder(randomAlphaOfLength(50)); + + if (randomBoolean()) { + event.setIncidentKey(new TextTemplate(randomAlphaOfLength(50))); + } + if (randomBoolean()) { + event.setClient(new TextTemplate(randomAlphaOfLength(50))); + } + if (randomBoolean()) { + event.setClientUrl(new TextTemplate(randomAlphaOfLength(50))); + } + if (randomBoolean()) { + event.setAttachPayload(randomBoolean()); + } + if (randomBoolean()) { + event.addContext(IncidentEventContext.Template.link(new TextTemplate("_href"), new TextTemplate("_text"))); + } + if (randomBoolean()) { + event.addContext(IncidentEventContext.Template.image(new TextTemplate("_src"), new TextTemplate("_href"), + new TextTemplate("_alt"))); + } + if (randomBoolean()) { + event.setEventType(new TextTemplate(randomAlphaOfLength(50))); + } + if (randomBoolean()) { + event.setAccount(randomAlphaOfLength(50)).build(); + } + if (randomBoolean()) { + event.setProxy(new HttpProxy("localhost", 8080)); + } + + PagerDutyAction action = pagerDutyAction(event).build(); + XContentBuilder jsonBuilder = jsonBuilder(); + action.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); + XContentParser parser = createParser(jsonBuilder); + parser.nextToken(); + + PagerDutyAction parsedAction = PagerDutyAction.parse("_w1", "_a1", parser); + assertThat(parsedAction, notNullValue()); + assertThat(parsedAction, is(action)); + } + + public void testParserInvalid() throws Exception { + try { + XContentBuilder builder = jsonBuilder().startObject().field("unknown_field", "value").endObject(); + XContentParser parser = createParser(builder); + parser.nextToken(); + PagerDutyAction.parse("_watch", "_action", parser); + fail("Expected ElasticsearchParseException but did not happen"); + } catch (ElasticsearchParseException e) { + + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/slack/ExecutableSlackActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/slack/ExecutableSlackActionTests.java new file mode 100644 index 0000000000000..4c945ec9fd526 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/slack/ExecutableSlackActionTests.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.slack; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.notification.slack.SlackAccount; +import org.elasticsearch.xpack.watcher.notification.slack.SlackService; +import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessage; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.mockito.ArgumentCaptor; + +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContextBuilder; +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ExecutableSlackActionTests extends ESTestCase { + + public void testProxy() throws Exception { + HttpProxy proxy = new HttpProxy("localhost", 8080); + SlackMessage.Template messageTemplate = SlackMessage.Template.builder().addTo("to").setText(new TextTemplate("content")).build(); + SlackAction action = new SlackAction("account1", messageTemplate, proxy); + + HttpClient httpClient = mock(HttpClient.class); + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(HttpRequest.class); + when(httpClient.execute(argumentCaptor.capture())).thenReturn(new HttpResponse(200)); + + Settings accountSettings = Settings.builder().put("url", "http://example.org").build(); + SlackAccount account = new SlackAccount("account1", accountSettings, Settings.EMPTY, httpClient, logger); + + SlackService service = mock(SlackService.class); + when(service.getAccount(eq("account1"))).thenReturn(account); + + DateTime now = DateTime.now(DateTimeZone.UTC); + + Wid wid = new Wid(randomAlphaOfLength(5), now); + WatchExecutionContext ctx = mockExecutionContextBuilder(wid.watchId()) + .wid(wid) + .payload(new Payload.Simple()) + .time(wid.watchId(), now) + .buildMock(); + + ExecutableSlackAction executable = new ExecutableSlackAction(action, logger, service, new MockTextTemplateEngine()); + executable.execute("foo", ctx, new Payload.Simple()); + + HttpRequest request = argumentCaptor.getValue(); + assertThat(request.proxy(), is(proxy)); + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/slack/SlackActionFactoryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/slack/SlackActionFactoryTests.java new file mode 100644 index 0000000000000..b8edb2e41a9e8 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/slack/SlackActionFactoryTests.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.slack; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.slack.SlackAccount; +import org.elasticsearch.xpack.watcher.notification.slack.SlackService; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashSet; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessageTests.createRandomTemplate; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.slackAction; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SlackActionFactoryTests extends ESTestCase { + private SlackActionFactory factory; + private SlackService service; + + @Before + public void init() throws Exception { + service = mock(SlackService.class); + factory = new SlackActionFactory(Settings.EMPTY, mock(TextTemplateEngine.class), service); + } + + public void testParseAction() throws Exception { + SlackAccount account = mock(SlackAccount.class); + when(service.getAccount("_account1")).thenReturn(account); + + SlackAction action = slackAction("_account1", createRandomTemplate()).build(); + XContentBuilder jsonBuilder = jsonBuilder().value(action); + XContentParser parser = createParser(jsonBuilder); + parser.nextToken(); + + SlackAction parsedAction = SlackAction.parse("_w1", "_a1", parser); + assertThat(parsedAction, is(action)); + } + + public void testParseActionUnknownAccount() throws Exception { + SlackService service = new SlackService(Settings.EMPTY, null, new ClusterSettings(Settings.EMPTY, + new HashSet<>(SlackService.getSettings()))); + factory = new SlackActionFactory(Settings.EMPTY, mock(TextTemplateEngine.class), service); + SlackAction action = slackAction("_unknown", createRandomTemplate()).build(); + XContentBuilder jsonBuilder = jsonBuilder().value(action); + XContentParser parser = createParser(jsonBuilder); + parser.nextToken(); + expectThrows(IllegalArgumentException.class, () -> factory.parseExecutable("_w1", "_a1", parser)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/slack/SlackActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/slack/SlackActionTests.java new file mode 100644 index 0000000000000..29eaece9037e5 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/slack/SlackActionTests.java @@ -0,0 +1,204 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.slack; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.slack.SentMessages; +import org.elasticsearch.xpack.watcher.notification.slack.SlackAccount; +import org.elasticsearch.xpack.watcher.notification.slack.SlackService; +import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessage; +import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessageDefaults; +import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessageTests; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContextBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SlackActionTests extends ESTestCase { + private SlackService service; + + @Before + public void init() throws Exception { + service = mock(SlackService.class); + } + + public void testExecute() throws Exception { + final String accountName = "account1"; + + TextTemplateEngine templateEngine = mock(TextTemplateEngine.class); + + SlackMessage.Template messageTemplate = mock(SlackMessage.Template.class); + SlackMessage message = mock(SlackMessage.class); + + SlackAction action = new SlackAction(accountName, messageTemplate, null); + ExecutableSlackAction executable = new ExecutableSlackAction(action, logger, service, templateEngine); + + Map data = new HashMap<>(); + Payload payload = new Payload.Simple(data); + + Map metadata = MapBuilder.newMapBuilder().put("_key", "_val").map(); + + DateTime now = DateTime.now(DateTimeZone.UTC); + + Wid wid = new Wid(randomAlphaOfLength(5), now); + WatchExecutionContext ctx = mockExecutionContextBuilder(wid.watchId()) + .wid(wid) + .payload(payload) + .time(wid.watchId(), now) + .metadata(metadata) + .buildMock(); + + Map triggerModel = new HashMap<>(); + triggerModel.put("triggered_time", now); + triggerModel.put("scheduled_time", now); + Map ctxModel = new HashMap<>(); + ctxModel.put("id", ctx.id().value()); + ctxModel.put("watch_id", wid.watchId()); + ctxModel.put("payload", data); + ctxModel.put("metadata", metadata); + ctxModel.put("execution_time", now); + ctxModel.put("trigger", triggerModel); + ctxModel.put("vars", emptyMap()); + Map expectedModel = singletonMap("ctx", ctxModel); + + when(messageTemplate.render(eq(wid.watchId()), eq("_action"), eq(templateEngine), eq(expectedModel), + any(SlackMessageDefaults.class))).thenReturn(message); + SlackAccount account = mock(SlackAccount.class); + when(service.getAccount(accountName)).thenReturn(account); + + + List messages = new ArrayList<>(); + boolean hasError = false; + boolean hasSuccess = false; + int count = randomIntBetween(1, 2); + for (int i = 0; i < count; i++) { + HttpResponse response = mock(HttpResponse.class); + HttpRequest request = mock(HttpRequest.class); + int randomInt = randomIntBetween(0, 2); + switch (randomInt) { + case 0: + messages.add(SentMessages.SentMessage.error(randomAlphaOfLength(10), message, new Exception("unknown error"))); + hasError = true; + break; + case 1: + when(response.status()).thenReturn(randomIntBetween(300, 600)); // error reponse + messages.add(SentMessages.SentMessage.responded(randomAlphaOfLength(10), message, request, response)); + hasError = true; + break; + case 2: + when(response.status()).thenReturn(randomIntBetween(200, 299)); // success + messages.add(SentMessages.SentMessage.responded(randomAlphaOfLength(10), message, request, response)); + hasSuccess = true; + } + } + SentMessages sentMessages = new SentMessages(accountName, messages); + when(account.send(message, eq(any()))).thenReturn(sentMessages); + + Action.Result.Status expectedStatus = !hasError ? Action.Result.Status.SUCCESS : + !hasSuccess ? Action.Result.Status.FAILURE : + Action.Result.Status.PARTIAL_FAILURE; + + + Action.Result result = executable.execute("_action", ctx, payload); + + assertThat(result, notNullValue()); + assertThat(result, instanceOf(SlackAction.Result.Executed.class)); + assertThat(result.status(), equalTo(expectedStatus)); + assertThat(((SlackAction.Result.Executed) result).sentMessages(), sameInstance(sentMessages)); + } + + public void testParser() throws Exception { + XContentBuilder builder = jsonBuilder().startObject(); + + String accountName = randomAlphaOfLength(10); + SlackMessage.Template message = SlackMessageTests.createRandomTemplate(); + + builder.field("account", accountName); + builder.field("message", message, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + BytesReference bytes = BytesReference.bytes(builder); + logger.info("slack action json [{}]", bytes.utf8ToString()); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + + SlackAction action = SlackAction.parse("_watch", "_action", parser); + + assertThat(action, notNullValue()); + assertThat(action.account, is(accountName)); + assertThat(action.message, notNullValue()); + assertThat(action.message, is(message)); + } + + public void testParserSelfGenerated() throws Exception { + String accountName = randomBoolean() ? randomAlphaOfLength(10) : null; + SlackMessage.Template message = SlackMessageTests.createRandomTemplate(); + + HttpProxy proxy = null; + if (randomBoolean()) { + proxy = new HttpProxy("localhost", 8080); + } + SlackAction action = new SlackAction(accountName, message, proxy); + + XContentBuilder builder = jsonBuilder(); + action.toXContent(builder, ToXContent.EMPTY_PARAMS); + BytesReference bytes = BytesReference.bytes(builder); + logger.info("{}", bytes.utf8ToString()); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + + SlackAction parsedAction = SlackAction.parse("_watch", "_action", parser); + + assertThat(parsedAction, notNullValue()); + assertThat(parsedAction, is(action)); + assertThat(parsedAction.proxy, is(action.proxy)); + } + + public void testParserInvalid() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().field("unknown_field", "value").endObject(); + XContentParser parser = createParser(builder); + parser.nextToken(); + try { + SlackAction.parse("_watch", "_action", parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("failed to parse [slack] action [_watch/_action]. unexpected token [VALUE_STRING]")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/AckThrottlerTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/AckThrottlerTests.java new file mode 100644 index 0000000000000..787c2c7ce51f8 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/AckThrottlerTests.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.throttler; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.actions.throttler.AckThrottler; +import org.elasticsearch.xpack.core.watcher.actions.throttler.Throttler; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.joda.time.DateTime; + +import java.time.Clock; + +import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.formatDate; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContext; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.is; +import static org.joda.time.DateTimeZone.UTC; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class AckThrottlerTests extends ESTestCase { + public void testWhenAcked() throws Exception { + DateTime timestamp = new DateTime(Clock.systemUTC().millis(), UTC); + WatchExecutionContext ctx = mockExecutionContext("_watch", Payload.EMPTY); + Watch watch = ctx.watch(); + ActionStatus actionStatus = mock(ActionStatus.class); + when(actionStatus.ackStatus()).thenReturn(new ActionStatus.AckStatus(timestamp, ActionStatus.AckStatus.State.ACKED)); + WatchStatus watchStatus = mock(WatchStatus.class); + when(watchStatus.actionStatus("_action")).thenReturn(actionStatus); + when(watch.status()).thenReturn(watchStatus); + AckThrottler throttler = new AckThrottler(); + Throttler.Result result = throttler.throttle("_action", ctx); + assertThat(result.throttle(), is(true)); + assertThat(result.reason(), is("action [_action] was acked at [" + formatDate(timestamp) + "]")); + assertThat(result.type(), is(Throttler.Type.ACK)); + } + + public void testThrottleWhenAwaitsSuccessfulExecution() throws Exception { + DateTime timestamp = new DateTime(Clock.systemUTC().millis(), UTC); + WatchExecutionContext ctx = mockExecutionContext("_watch", Payload.EMPTY); + Watch watch = ctx.watch(); + ActionStatus actionStatus = mock(ActionStatus.class); + when(actionStatus.ackStatus()).thenReturn(new ActionStatus.AckStatus(timestamp, + ActionStatus.AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION)); + WatchStatus watchStatus = mock(WatchStatus.class); + when(watchStatus.actionStatus("_action")).thenReturn(actionStatus); + when(watch.status()).thenReturn(watchStatus); + AckThrottler throttler = new AckThrottler(); + Throttler.Result result = throttler.throttle("_action", ctx); + assertThat(result.throttle(), is(false)); + assertThat(result.reason(), nullValue()); + } + + public void testThrottleWhenAckable() throws Exception { + DateTime timestamp = new DateTime(Clock.systemUTC().millis(), UTC); + WatchExecutionContext ctx = mockExecutionContext("_watch", Payload.EMPTY); + Watch watch = ctx.watch(); + ActionStatus actionStatus = mock(ActionStatus.class); + when(actionStatus.ackStatus()).thenReturn(new ActionStatus.AckStatus(timestamp, ActionStatus.AckStatus.State.ACKABLE)); + WatchStatus watchStatus = mock(WatchStatus.class); + when(watchStatus.actionStatus("_action")).thenReturn(actionStatus); + when(watch.status()).thenReturn(watchStatus); + AckThrottler throttler = new AckThrottler(); + Throttler.Result result = throttler.throttle("_action", ctx); + assertThat(result.throttle(), is(false)); + assertThat(result.reason(), nullValue()); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/ActionThrottleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/ActionThrottleTests.java new file mode 100644 index 0000000000000..bc22d58917931 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/ActionThrottleTests.java @@ -0,0 +1,409 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.throttler; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequestBuilder; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.actions.email.EmailAction; +import org.elasticsearch.xpack.watcher.actions.index.IndexAction; +import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; +import org.elasticsearch.xpack.watcher.actions.webhook.WebhookAction; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.trigger.manual.ManualTriggerEvent; +import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.equalTo; + +public class ActionThrottleTests extends AbstractWatcherIntegrationTestCase { + + public void testSingleActionAckThrottle() throws Exception { + WatchSourceBuilder watchSourceBuilder = watchBuilder() + .trigger(schedule(interval("60m"))); + + AvailableAction availableAction = randomFrom(AvailableAction.values()); + Action.Builder action = availableAction.action(); + watchSourceBuilder.addAction("test_id", action); + + watcherClient().putWatch(new PutWatchRequest("_id", watchSourceBuilder.buildAsBytes(XContentType.JSON), + XContentType.JSON)).actionGet(); + refresh(Watch.INDEX); + + ExecuteWatchRequestBuilder executeWatchRequestBuilder = watcherClient().prepareExecuteWatch("_id") + .setRecordExecution(true) + .setActionMode("test_id", ActionExecutionMode.SIMULATE); + + Map responseMap = executeWatchRequestBuilder.get().getRecordSource().getAsMap(); + String status = ObjectPath.eval("result.actions.0.status", responseMap); + assertThat(status, equalTo(Action.Result.Status.SIMULATED.toString().toLowerCase(Locale.ROOT))); + + timeWarp().clock().fastForward(TimeValue.timeValueSeconds(15)); + + boolean ack = randomBoolean(); + if (ack) { + watcherClient().prepareAckWatch("_id").setActionIds("test_id").get(); + } + + executeWatchRequestBuilder = watcherClient().prepareExecuteWatch("_id") + .setRecordExecution(true) + .setActionMode("test_id", ActionExecutionMode.SIMULATE); + responseMap = executeWatchRequestBuilder.get().getRecordSource().getAsMap(); + status = ObjectPath.eval("result.actions.0.status", responseMap); + if (ack) { + assertThat(status, equalTo(Action.Result.Status.ACKNOWLEDGED.toString().toLowerCase(Locale.ROOT))); + } else { + assertThat(status, equalTo(Action.Result.Status.SIMULATED.toString().toLowerCase(Locale.ROOT))); + } + } + + public void testRandomMultiActionAckThrottle() throws Exception { + WatchSourceBuilder watchSourceBuilder = watchBuilder() + .trigger(schedule(interval("60m"))); + + Set ackingActions = new HashSet<>(); + for (int i = 0; i < scaledRandomIntBetween(5,10); ++i) { + AvailableAction availableAction = randomFrom(AvailableAction.values()); + Action.Builder action = availableAction.action(); + watchSourceBuilder.addAction("test_id" + i, action); + if (randomBoolean()) { + ackingActions.add("test_id" + i); + } + } + + watcherClient().putWatch(new PutWatchRequest("_id", + watchSourceBuilder.buildAsBytes(XContentType.JSON), XContentType.JSON)).actionGet(); + refresh(Watch.INDEX); + executeWatch("_id"); + + for (String actionId : ackingActions) { + watcherClient().prepareAckWatch("_id").setActionIds(actionId).get(); + } + + timeWarp().clock().fastForwardSeconds(15); + + Map responseMap = executeWatch("_id"); + List> actions = ObjectPath.eval("result.actions", responseMap); + for (Map result : actions) { + if (ackingActions.contains(result.get("id"))) { + assertThat(result.get("status"), equalTo(Action.Result.Status.ACKNOWLEDGED.toString().toLowerCase(Locale.ROOT))); + } else { + assertThat(result.get("status"), equalTo(Action.Result.Status.SIMULATED.toString().toLowerCase(Locale.ROOT))); + } + } + } + + private Map executeWatch(String id) { + return watcherClient().prepareExecuteWatch(id) + .setRecordExecution(true) + .setActionMode("_all", ActionExecutionMode.SIMULATE).get().getRecordSource().getAsMap(); + } + + public void testDifferentThrottlePeriods() throws Exception { + timeWarp().clock().setTime(DateTime.now(DateTimeZone.UTC)); + WatchSourceBuilder watchSourceBuilder = watchBuilder() + .trigger(schedule(interval("60m"))); + + watchSourceBuilder.addAction("ten_sec_throttle", new TimeValue(10, TimeUnit.SECONDS), + randomFrom(AvailableAction.values()).action()); + watchSourceBuilder.addAction("fifteen_sec_throttle", new TimeValue(15, TimeUnit.SECONDS), + randomFrom(AvailableAction.values()).action()); + + watcherClient().putWatch(new PutWatchRequest("_id", + watchSourceBuilder.buildAsBytes(XContentType.JSON), XContentType.JSON)).actionGet(); + refresh(Watch.INDEX); + + timeWarp().clock().fastForwardSeconds(1); + Map responseMap = executeWatch("_id"); + List> actions = ObjectPath.eval("result.actions", responseMap); + for (Map result : actions) { + assertThat(result.get("status"), equalTo(Action.Result.Status.SIMULATED.toString().toLowerCase(Locale.ROOT))); + } + timeWarp().clock().fastForwardSeconds(1); + + responseMap = executeWatch("_id"); + actions = ObjectPath.eval("result.actions", responseMap); + for (Map result : actions) { + assertThat(result.get("status"), equalTo(Action.Result.Status.THROTTLED.toString().toLowerCase(Locale.ROOT))); + } + + timeWarp().clock().fastForwardSeconds(10); + + responseMap = executeWatch("_id"); + actions = ObjectPath.eval("result.actions", responseMap); + for (Map result : actions) { + if ("ten_sec_throttle".equals(result.get("id"))) { + assertThat(result.get("status"), equalTo(Action.Result.Status.SIMULATED.toString().toLowerCase(Locale.ROOT))); + } else { + assertThat(result.get("status"), equalTo(Action.Result.Status.THROTTLED.toString().toLowerCase(Locale.ROOT))); + } + } + } + + public void testDefaultThrottlePeriod() throws Exception { + WatchSourceBuilder watchSourceBuilder = watchBuilder() + .trigger(schedule(interval("60m"))); + + AvailableAction availableAction = randomFrom(AvailableAction.values()); + watchSourceBuilder.addAction("default_global_throttle", availableAction.action()); + + watcherClient().putWatch(new PutWatchRequest("_id", + watchSourceBuilder.buildAsBytes(XContentType.JSON), XContentType.JSON)).actionGet(); + refresh(Watch.INDEX); + + timeWarp().clock().setTime(new DateTime(DateTimeZone.UTC)); + + ExecuteWatchResponse executeWatchResponse = watcherClient().prepareExecuteWatch("_id") + .setTriggerEvent(new ManualTriggerEvent("execute_id", + new ScheduleTriggerEvent(new DateTime(DateTimeZone.UTC), new DateTime(DateTimeZone.UTC)))) + .setActionMode("default_global_throttle", ActionExecutionMode.SIMULATE) + .setRecordExecution(true) + .get(); + + String status = ObjectPath.eval("result.actions.0.status", executeWatchResponse.getRecordSource().getAsMap()); + assertThat(status, equalTo("simulated")); + + timeWarp().clock().fastForwardSeconds(1); + + executeWatchResponse = watcherClient().prepareExecuteWatch("_id") + .setTriggerEvent(new ManualTriggerEvent("execute_id", + new ScheduleTriggerEvent(new DateTime(DateTimeZone.UTC), new DateTime(DateTimeZone.UTC)))) + .setActionMode("default_global_throttle", ActionExecutionMode.SIMULATE) + .setRecordExecution(true) + .get(); + status = ObjectPath.eval("result.actions.0.status", executeWatchResponse.getRecordSource().getAsMap()); + assertThat(status, equalTo("throttled")); + + timeWarp().clock().fastForwardSeconds(5); + + assertBusy(() -> { + try { + ExecuteWatchResponse executeWatchResponse1 = watcherClient().prepareExecuteWatch("_id") + .setTriggerEvent(new ManualTriggerEvent("execute_id", + new ScheduleTriggerEvent(new DateTime(DateTimeZone.UTC), new DateTime(DateTimeZone.UTC)))) + .setActionMode("default_global_throttle", ActionExecutionMode.SIMULATE) + .setRecordExecution(true) + .get(); + String currentStatus = ObjectPath.eval("result.actions.0.status", executeWatchResponse1.getRecordSource().getAsMap()); + assertThat(currentStatus, equalTo("simulated")); + } catch (IOException ioe) { + throw new ElasticsearchException("failed to execute", ioe); + } + }, 6, TimeUnit.SECONDS); + } + + public void testWatchThrottlePeriod() throws Exception { + WatchSourceBuilder watchSourceBuilder = watchBuilder() + .trigger(schedule(interval("60m"))) + .defaultThrottlePeriod(new TimeValue(20, TimeUnit.SECONDS)); + + AvailableAction availableAction = randomFrom(AvailableAction.values()); + watchSourceBuilder.addAction("default_global_throttle", availableAction.action()); + + watcherClient().putWatch(new PutWatchRequest("_id", + watchSourceBuilder.buildAsBytes(XContentType.JSON), XContentType.JSON)).actionGet(); + refresh(Watch.INDEX); + + timeWarp().clock().setTime(new DateTime(DateTimeZone.UTC)); + + ExecuteWatchResponse executeWatchResponse = watcherClient().prepareExecuteWatch("_id") + .setTriggerEvent(new ManualTriggerEvent("execute_id", + new ScheduleTriggerEvent(new DateTime(DateTimeZone.UTC), new DateTime(DateTimeZone.UTC)))) + .setActionMode("default_global_throttle", ActionExecutionMode.SIMULATE) + .setRecordExecution(true) + .get(); + String status = ObjectPath.eval("result.actions.0.status", executeWatchResponse.getRecordSource().getAsMap()); + assertThat(status, equalTo("simulated")); + + timeWarp().clock().fastForwardSeconds(1); + + executeWatchResponse = watcherClient().prepareExecuteWatch("_id") + .setTriggerEvent(new ManualTriggerEvent("execute_id", + new ScheduleTriggerEvent(new DateTime(DateTimeZone.UTC), new DateTime(DateTimeZone.UTC)))) + .setActionMode("default_global_throttle", ActionExecutionMode.SIMULATE) + .setRecordExecution(true) + .get(); + status = ObjectPath.eval("result.actions.0.status", executeWatchResponse.getRecordSource().getAsMap()); + assertThat(status, equalTo("throttled")); + + timeWarp().clock().fastForwardSeconds(20); + + assertBusy(() -> { + try { + //Since the default throttle period is 5 seconds but we have overridden the period in the watch this should trigger + ExecuteWatchResponse executeWatchResponse1 = watcherClient().prepareExecuteWatch("_id") + .setTriggerEvent(new ManualTriggerEvent("execute_id", + new ScheduleTriggerEvent(new DateTime(DateTimeZone.UTC), new DateTime(DateTimeZone.UTC)))) + .setActionMode("default_global_throttle", ActionExecutionMode.SIMULATE) + .setRecordExecution(true) + .get(); + String status1 = ObjectPath.eval("result.actions.0.status", executeWatchResponse1.getRecordSource().getAsMap()); + assertThat(status1, equalTo("simulated")); + } catch (IOException ioe) { + throw new ElasticsearchException("failed to execute", ioe); + } + }, 20, TimeUnit.SECONDS); + } + + public void testFailingActionDoesGetThrottled() throws Exception { + // create a mapping with a wrong @timestamp field, so that the index action of the watch below will fail + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("bar") + .startObject("properties") + .startObject("@timestamp") + .field("type", "integer") + .endObject() + .endObject() + .endObject() + .endObject()); + + client().admin().indices().prepareCreate("foo").addMapping("bar", mapping, XContentType.JSON).get(); + + TimeValue throttlePeriod = new TimeValue(60, TimeUnit.MINUTES); + + watcherClient().preparePutWatch("_id").setSource(watchBuilder() + .trigger(new ScheduleTrigger(new IntervalSchedule( + new IntervalSchedule.Interval(60, IntervalSchedule.Interval.Unit.MINUTES)))) + .defaultThrottlePeriod(throttlePeriod) + .addAction("logging", loggingAction("test out")) + .addAction("failing_hook", indexAction("foo", "bar").setExecutionTimeField("@timestamp"))) + .get(); + refresh(Watch.INDEX); + + { + Map responseMap = watcherClient().prepareExecuteWatch("_id") + .setRecordExecution(true) + .get().getRecordSource().getAsMap(); + + String state = ObjectPath.eval("state", responseMap); + + String firstId = ObjectPath.eval("result.actions.0.id", responseMap); + String statusLogging, statusFailingHook; + if ("logging".equals(firstId)) { + statusLogging = ObjectPath.eval("result.actions.0.status", responseMap); + statusFailingHook = ObjectPath.eval("result.actions.1.status", responseMap); + } else { + statusFailingHook = ObjectPath.eval("result.actions.0.status", responseMap); + statusLogging = ObjectPath.eval("result.actions.1.status", responseMap); + } + + assertThat(state, equalTo(ExecutionState.EXECUTED.toString().toLowerCase(Locale.ROOT))); + assertThat(statusLogging, equalTo(Action.Result.Status.SUCCESS.toString().toLowerCase(Locale.ROOT))); + assertThat(statusFailingHook, equalTo(Action.Result.Status.FAILURE.toString().toLowerCase(Locale.ROOT))); + } + + { + Map responseMap = watcherClient().prepareExecuteWatch("_id") + .setRecordExecution(true) + .get().getRecordSource().getAsMap(); + String state = ObjectPath.eval("state", responseMap); + + String firstId = ObjectPath.eval("result.actions.0.id", responseMap); + String statusLogging, statusFailingHook; + if ("logging".equals(firstId)) { + statusLogging = ObjectPath.eval("result.actions.0.status", responseMap); + statusFailingHook = ObjectPath.eval("result.actions.1.status", responseMap); + } else { + statusFailingHook = ObjectPath.eval("result.actions.0.status", responseMap); + statusLogging = ObjectPath.eval("result.actions.1.status", responseMap); + } + + assertThat(state, equalTo(ExecutionState.THROTTLED.toString().toLowerCase(Locale.ROOT))); + assertThat(statusLogging, equalTo(Action.Result.Status.THROTTLED.toString().toLowerCase(Locale.ROOT))); + assertThat(statusFailingHook, equalTo(Action.Result.Status.FAILURE.toString().toLowerCase(Locale.ROOT))); + } + } + + enum AvailableAction { + EMAIL { + @Override + public Action.Builder action() throws Exception { + EmailTemplate.Builder emailBuilder = EmailTemplate.builder(); + emailBuilder.from("test@test.com"); + emailBuilder.to("test@test.com"); + emailBuilder.subject("test subject"); + return EmailAction.builder(emailBuilder.build()); + } + + @Override + public String type() { + return EmailAction.TYPE; + } + }, + WEBHOOK { + @Override + public Action.Builder action() throws Exception { + HttpRequestTemplate.Builder requestBuilder = HttpRequestTemplate.builder("localhost", 1234) + .path("/") + .method(HttpMethod.GET); + return WebhookAction.builder(requestBuilder.build()); + } + + @Override + public String type() { + return WebhookAction.TYPE; + } + }, + LOGGING { + @Override + public Action.Builder action() throws Exception { + return LoggingAction.builder(new TextTemplate("_logging")); + } + + @Override + public String type() { + return LoggingAction.TYPE; + } + }, + INDEX { + @Override + public Action.Builder action() throws Exception { + return IndexAction.builder("test_index", "test_type"); + } + + @Override + public String type() { + return IndexAction.TYPE; + } + }; + + public abstract Action.Builder action() throws Exception; + + public abstract String type(); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/PeriodThrottlerTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/PeriodThrottlerTests.java new file mode 100644 index 0000000000000..89ef2e23ccca3 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/throttler/PeriodThrottlerTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.throttler; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.actions.throttler.PeriodThrottler; +import org.elasticsearch.xpack.core.watcher.actions.throttler.Throttler; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.joda.time.DateTime; + +import java.time.Clock; + +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContext; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class PeriodThrottlerTests extends ESTestCase { + public void testBelowPeriodSuccessful() throws Exception { + TimeValue period = TimeValue.timeValueSeconds(randomIntBetween(2, 5)); + PeriodThrottler throttler = new PeriodThrottler(Clock.systemUTC(), period); + + WatchExecutionContext ctx = mockExecutionContext("_name", Payload.EMPTY); + ActionStatus actionStatus = mock(ActionStatus.class); + DateTime now = new DateTime(Clock.systemUTC().millis()); + when(actionStatus.lastSuccessfulExecution()) + .thenReturn(ActionStatus.Execution.successful(now.minusSeconds((int) period.seconds() - 1))); + WatchStatus status = mock(WatchStatus.class); + when(status.actionStatus("_action")).thenReturn(actionStatus); + when(ctx.watch().status()).thenReturn(status); + + Throttler.Result result = throttler.throttle("_action", ctx); + assertThat(result, notNullValue()); + assertThat(result.throttle(), is(true)); + assertThat(result.reason(), notNullValue()); + assertThat(result.reason(), startsWith("throttling interval is set to [" + period + "]")); + assertThat(result.type(), is(Throttler.Type.PERIOD)); + } + + public void testAbovePeriod() throws Exception { + TimeValue period = TimeValue.timeValueSeconds(randomIntBetween(2, 5)); + PeriodThrottler throttler = new PeriodThrottler(Clock.systemUTC(), period); + + WatchExecutionContext ctx = mockExecutionContext("_name", Payload.EMPTY); + ActionStatus actionStatus = mock(ActionStatus.class); + DateTime now = new DateTime(Clock.systemUTC().millis()); + when(actionStatus.lastSuccessfulExecution()) + .thenReturn(ActionStatus.Execution.successful(now.minusSeconds((int) period.seconds() + 1))); + WatchStatus status = mock(WatchStatus.class); + when(status.actionStatus("_action")).thenReturn(actionStatus); + when(ctx.watch().status()).thenReturn(status); + + Throttler.Result result = throttler.throttle("_action", ctx); + assertThat(result, notNullValue()); + assertThat(result.throttle(), is(false)); + assertThat(result.reason(), nullValue()); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookActionTests.java new file mode 100644 index 0000000000000..f57f65f1d6204 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookActionTests.java @@ -0,0 +1,352 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.webhook; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.Action.Result.Status; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuthFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.execution.TriggeredExecutionContext; +import org.elasticsearch.xpack.watcher.notification.email.Attachment; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateService; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.hamcrest.Matchers; +import org.joda.time.DateTime; +import org.junit.Before; + +import javax.mail.internet.AddressException; + +import java.io.IOException; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.core.Is.is; +import static org.joda.time.DateTimeZone.UTC; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class WebhookActionTests extends ESTestCase { + + private static final String TEST_HOST = "test.com"; + private static final int TEST_PORT = 8089; + private static final String TEST_BODY_STRING = "ERROR HAPPENED"; + private static final String TEST_PATH_STRING = "/testPath"; + + private TextTemplateEngine templateEngine; + private HttpAuthRegistry authRegistry; + private TextTemplate testBody; + private TextTemplate testPath; + + @Before + public void init() throws Exception { + templateEngine = new MockTextTemplateEngine(); + testBody = new TextTemplate(TEST_BODY_STRING); + testPath = new TextTemplate(TEST_PATH_STRING); + authRegistry = new HttpAuthRegistry(singletonMap("basic", new BasicAuthFactory(null))); + } + + public void testExecute() throws Exception { + ExecuteScenario scenario = randomFrom(ExecuteScenario.Success, ExecuteScenario.ErrorCode); + + HttpClient httpClient = scenario.client(); + HttpMethod method = randomFrom(HttpMethod.GET, HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE, HttpMethod.HEAD); + + HttpRequestTemplate httpRequest = getHttpRequestTemplate(method, TEST_HOST, TEST_PORT, testPath, testBody, null); + + WebhookAction action = new WebhookAction(httpRequest); + ExecutableWebhookAction executable = new ExecutableWebhookAction(action, logger, httpClient, templateEngine); + WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", new Payload.Simple("foo", "bar")); + + Action.Result actionResult = executable.execute("_id", ctx, Payload.EMPTY); + + scenario.assertResult(httpClient, actionResult); + } + + private HttpRequestTemplate getHttpRequestTemplate(HttpMethod method, String host, int port, TextTemplate path, TextTemplate body, + Map params) { + HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder(host, port); + if (path != null) { + builder.path(path); + } + if (body != null) { + builder.body(body); + } + if (method != null) { + builder.method(method); + } + if (params != null){ + builder.putParams(params); + } + return builder.build(); + } + + public void testParser() throws Exception { + TextTemplate body = randomBoolean() ? new TextTemplate("_subject") : null; + TextTemplate path = new TextTemplate("_url"); + String host = "test.host"; + HttpMethod method = randomFrom(HttpMethod.GET, HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE, HttpMethod.HEAD, null); + HttpRequestTemplate request = getHttpRequestTemplate(method, host, TEST_PORT, path, body, null); + + XContentBuilder builder = jsonBuilder(); + request.toXContent(builder, Attachment.XContent.EMPTY_PARAMS); + + WebhookActionFactory actionParser = webhookFactory(ExecuteScenario.Success.client()); + + XContentParser parser = createParser(builder); + parser.nextToken(); + + ExecutableWebhookAction executable = actionParser.parseExecutable(randomAlphaOfLength(5), randomAlphaOfLength(5), parser); + + assertThat(executable.action().getRequest(), equalTo(request)); + } + + public void testParserSelfGenerated() throws Exception { + TextTemplate body = randomBoolean() ? new TextTemplate("_body") : null; + TextTemplate path = new TextTemplate("_url"); + String host = "test.host"; + String watchId = "_watch"; + String actionId = randomAlphaOfLength(5); + + HttpMethod method = randomFrom(HttpMethod.GET, HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE, HttpMethod.HEAD, null); + + HttpRequestTemplate request = getHttpRequestTemplate(method, host, TEST_PORT, path, body, null); + WebhookAction action = new WebhookAction(request); + ExecutableWebhookAction executable = new ExecutableWebhookAction(action, logger, ExecuteScenario.Success.client(), templateEngine); + + XContentBuilder builder = jsonBuilder(); + executable.toXContent(builder, ToXContent.EMPTY_PARAMS); + + WebhookActionFactory actionParser = webhookFactory(ExecuteScenario.Success.client()); + + XContentParser parser = createParser(builder); + parser.nextToken(); + + ExecutableWebhookAction parsedExecutable = actionParser.parseExecutable(watchId, actionId, parser); + assertThat(parsedExecutable, notNullValue()); + assertThat(parsedExecutable.action(), is(action)); + } + + public void testParserBuilder() throws Exception { + TextTemplate body = randomBoolean() ? new TextTemplate("_body") : null; + TextTemplate path = new TextTemplate("_url"); + String host = "test.host"; + + String watchId = "_watch"; + String actionId = randomAlphaOfLength(5); + + HttpMethod method = randomFrom(HttpMethod.GET, HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE, HttpMethod.HEAD, null); + HttpRequestTemplate request = getHttpRequestTemplate(method, host, TEST_PORT, path, body, null); + + WebhookAction action = WebhookAction.builder(request).build(); + + XContentBuilder builder = jsonBuilder(); + action.toXContent(builder, ToXContent.EMPTY_PARAMS); + + WebhookActionFactory actionParser = webhookFactory(ExecuteScenario.Success.client()); + + XContentParser parser = createParser(builder); + assertThat(parser.nextToken(), is(XContentParser.Token.START_OBJECT)); + ExecutableWebhookAction parsedAction = actionParser.parseExecutable(watchId, actionId, parser); + assertThat(parsedAction.action(), is(action)); + } + + public void testParserFailure() throws Exception { + XContentBuilder builder = jsonBuilder().startObject(); + if (randomBoolean()) { + builder.field(HttpRequest.Field.HOST.getPreferredName(), TEST_HOST); + } else { + builder.field(HttpRequest.Field.PORT.getPreferredName(), TEST_PORT); + } + builder.endObject(); + + XContentParser parser = createParser(builder); + parser.nextToken(); + + WebhookActionFactory actionParser = webhookFactory(ExecuteScenario.Success.client()); + //This should fail since we are not supplying a url + try { + actionParser.parseExecutable("_watch", randomAlphaOfLength(5), parser); + fail("expected a WebhookActionException since we only provided either a host or a port but not both"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("failed parsing http request template")); + } + } + + private WebhookActionFactory webhookFactory(HttpClient client) { + return new WebhookActionFactory(Settings.EMPTY, client, new HttpRequestTemplate.Parser(authRegistry), templateEngine); + } + + public void testThatSelectingProxyWorks() throws Exception { + Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + HttpClient httpClient = new HttpClient(Settings.EMPTY, authRegistry, + new SSLService(environment.settings(), environment)); + + try (MockWebServer proxyServer = new MockWebServer()) { + proxyServer.start(); + proxyServer.enqueue(new MockResponse().setResponseCode(200).setBody("fullProxiedContent")); + + HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder("localhost", 65535) + .path("/").proxy(new HttpProxy("localhost", proxyServer.getPort())); + WebhookAction action = new WebhookAction(builder.build()); + + ExecutableWebhookAction executable = new ExecutableWebhookAction(action, logger, httpClient, templateEngine); + String watchId = "test_url_encode" + randomAlphaOfLength(10); + TriggeredExecutionContext ctx = new TriggeredExecutionContext(watchId, new DateTime(UTC), + new ScheduleTriggerEvent(watchId, new DateTime(UTC), new DateTime(UTC)), timeValueSeconds(5)); + Watch watch = createWatch(watchId); + ctx.ensureWatchExists(() -> watch); + executable.execute("_id", ctx, new Payload.Simple()); + + assertThat(proxyServer.requests(), hasSize(1)); + } + } + + public void testValidUrls() throws Exception { + HttpClient client = mock(HttpClient.class); + when(client.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(randomIntBetween(200, 399))); + String watchId = "test_url_encode" + randomAlphaOfLength(10); + + HttpMethod method = HttpMethod.POST; + TextTemplate path = new TextTemplate("/test_" + watchId); + String host = "test.host"; + TextTemplate testBody = new TextTemplate("ERROR HAPPENED"); + HttpRequestTemplate requestTemplate = getHttpRequestTemplate(method, host, TEST_PORT, path, testBody, null); + WebhookAction action = new WebhookAction(requestTemplate); + + ExecutableWebhookAction executable = new ExecutableWebhookAction(action, logger, client, templateEngine); + + TriggeredExecutionContext ctx = new TriggeredExecutionContext(watchId, new DateTime(UTC), + new ScheduleTriggerEvent(watchId, new DateTime(UTC), new DateTime(UTC)), timeValueSeconds(5)); + Watch watch = createWatch(watchId); + ctx.ensureWatchExists(() -> watch); + Action.Result result = executable.execute("_id", ctx, new Payload.Simple()); + assertThat(result, Matchers.instanceOf(WebhookAction.Result.Success.class)); + } + + private Watch createWatch(String watchId) throws AddressException, IOException { + return WatcherTestUtils.createTestWatch(watchId, + mock(Client.class), + ExecuteScenario.Success.client(), + new AbstractWatcherIntegrationTestCase.NoopEmailService(), + mock(WatcherSearchTemplateService.class), + logger); + } + + private enum ExecuteScenario { + ErrorCode() { + @Override + public HttpClient client() throws IOException { + HttpClient client = mock(HttpClient.class); + when(client.execute(any(HttpRequest.class))).thenReturn(new HttpResponse(randomIntBetween(400, 599))); + return client; + } + + @Override + public void assertResult(HttpClient client, Action.Result actionResult) throws Exception { + assertThat(actionResult.status(), is(Status.FAILURE)); + assertThat(actionResult, instanceOf(WebhookAction.Result.Failure.class)); + WebhookAction.Result.Failure executedActionResult = (WebhookAction.Result.Failure) actionResult; + assertThat(executedActionResult.response().status(), greaterThanOrEqualTo(400)); + assertThat(executedActionResult.response().status(), lessThanOrEqualTo(599)); + assertThat(executedActionResult.request().body(), equalTo(TEST_BODY_STRING)); + assertThat(executedActionResult.request().path(), equalTo(TEST_PATH_STRING)); + } + }, + + Error() { + @Override + public HttpClient client() throws IOException { + HttpClient client = mock(HttpClient.class); + when(client.execute(any(HttpRequest.class))) + .thenThrow(new IOException("Unable to connect")); + return client; + } + + @Override + public void assertResult(HttpClient client, Action.Result actionResult) throws Exception { + assertThat(actionResult, instanceOf(WebhookAction.Result.Failure.class)); + WebhookAction.Result.Failure failResult = (WebhookAction.Result.Failure) actionResult; + assertThat(failResult.status(), is(Status.FAILURE)); + } + }, + + Success() { + @Override + public HttpClient client() throws IOException{ + HttpClient client = mock(HttpClient.class); + when(client.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(randomIntBetween(200, 399))); + return client; + } + + @Override + public void assertResult(HttpClient client, Action.Result actionResult) throws Exception { + assertThat(actionResult.status(), is(Status.SUCCESS)); + assertThat(actionResult, instanceOf(WebhookAction.Result.Success.class)); + WebhookAction.Result.Success executedActionResult = (WebhookAction.Result.Success) actionResult; + assertThat(executedActionResult.response().status(), greaterThanOrEqualTo(200)); + assertThat(executedActionResult.response().status(), lessThanOrEqualTo(399)); + assertThat(executedActionResult.request().body(), equalTo(TEST_BODY_STRING)); + assertThat(executedActionResult.request().path(), equalTo(TEST_PATH_STRING)); + } + }, + + NoExecute() { + @Override + public HttpClient client() throws IOException{ + return mock(HttpClient.class); + } + + @Override + public void assertResult(HttpClient client, Action.Result actionResult) throws Exception { + verify(client, never()).execute(any(HttpRequest.class)); + } + }; + + public abstract HttpClient client() throws IOException; + + public abstract void assertResult(HttpClient client, Action.Result result) throws Exception ; + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java new file mode 100644 index 0000000000000..9858a5cd11851 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.webhook; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.xpack.core.ssl.TestsSSLService; +import org.elasticsearch.xpack.core.watcher.history.WatchRecord; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.watcher.actions.ActionBuilders; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.Scheme; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Path; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.xContentSource; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class WebhookHttpsIntegrationTests extends AbstractWatcherIntegrationTestCase { + + private MockWebServer webServer; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Path resource = getDataPath("/org/elasticsearch/xpack/security/keystore/testnode.jks"); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("xpack.http.ssl.keystore.path", resource.toString()) + .put("xpack.http.ssl.keystore.password", "testnode") + .build(); + } + + @Before + public void startWebservice() throws Exception { + Settings settings = getInstanceFromMaster(Settings.class); + TestsSSLService sslService = new TestsSSLService(settings, getInstanceFromMaster(Environment.class)); + webServer = new MockWebServer(sslService.sslContext(settings.getByPrefix("xpack.http.ssl.")), false); + webServer.start(); + } + + @After + public void stopWebservice() throws Exception { + webServer.close(); + } + + public void testHttps() throws Exception { + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder("localhost", webServer.getPort()) + .scheme(Scheme.HTTPS) + .path(new TextTemplate("/test/_id")) + .body(new TextTemplate("{key=value}")) + .method(HttpMethod.POST); + + watcherClient().preparePutWatch("_id") + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput("key", "value")) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_id", ActionBuilders.webhookAction(builder))) + .get(); + + timeWarp().trigger("_id"); + refresh(); + + assertWatchWithMinimumPerformedActionsCount("_id", 1, false); + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getPath(), equalTo("/test/_id")); + assertThat(webServer.requests().get(0).getBody(), equalTo("{key=value}")); + + SearchResponse response = + searchWatchRecords(b -> b.setQuery(QueryBuilders.termQuery(WatchRecord.STATE.getPreferredName(), "executed"))); + + assertNoFailures(response); + XContentSource source = xContentSource(response.getHits().getAt(0).getSourceRef()); + String body = source.getValue("result.actions.0.webhook.response.body"); + assertThat(body, notNullValue()); + assertThat(body, is("body")); + + Number status = source.getValue("result.actions.0.webhook.response.status"); + assertThat(status, notNullValue()); + assertThat(status.intValue(), is(200)); + } + + public void testHttpsAndBasicAuth() throws Exception { + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder("localhost", webServer.getPort()) + .scheme(Scheme.HTTPS) + .auth(new BasicAuth("_username", "_password".toCharArray())) + .path(new TextTemplate("/test/_id")) + .body(new TextTemplate("{key=value}")) + .method(HttpMethod.POST); + + watcherClient().preparePutWatch("_id") + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput("key", "value")) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_id", ActionBuilders.webhookAction(builder))) + .get(); + + timeWarp().trigger("_id"); + refresh(); + + assertWatchWithMinimumPerformedActionsCount("_id", 1, false); + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getPath(), equalTo("/test/_id")); + assertThat(webServer.requests().get(0).getBody(), equalTo("{key=value}")); + assertThat(webServer.requests().get(0).getHeader("Authorization"), equalTo("Basic X3VzZXJuYW1lOl9wYXNzd29yZA==")); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java new file mode 100644 index 0000000000000..151bf4af18939 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java @@ -0,0 +1,169 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.webhook; + +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.transport.Netty4Plugin; +import org.elasticsearch.xpack.core.watcher.history.WatchRecord; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.watcher.actions.ActionBuilders; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collection; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertExists; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.xContentSource; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class WebhookIntegrationTests extends AbstractWatcherIntegrationTestCase { + + private MockWebServer webServer = new MockWebServer(); + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put("http.enabled", true).build(); + } + + @Override + protected Collection> nodePlugins() { + ArrayList> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(Netty4Plugin.class); // for http + return plugins; + } + + @Before + public void startWebservice() throws Exception { + webServer.start(); + } + + @After + public void stopWebservice() throws Exception { + webServer.close(); + } + + public void testWebhook() throws Exception { + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder("localhost", webServer.getPort()) + .path(new TextTemplate("/test/_id")) + .putParam("param1", new TextTemplate("value1")) + .putParam("watch_id", new TextTemplate("_id")) + .body(new TextTemplate("_body")) + .auth(new BasicAuth("user", "pass".toCharArray())) + .method(HttpMethod.POST); + + watcherClient().preparePutWatch("_id") + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput("key", "value")) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_id", ActionBuilders.webhookAction(builder))) + .get(); + + timeWarp().trigger("_id"); + refresh(); + + assertWatchWithMinimumPerformedActionsCount("_id", 1, false); + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getQuery(), + anyOf(equalTo("watch_id=_id¶m1=value1"), equalTo("param1=value1&watch_id=_id"))); + + assertThat(webServer.requests().get(0).getBody(), is("_body")); + + SearchResponse response = searchWatchRecords(b -> QueryBuilders.termQuery(WatchRecord.STATE.getPreferredName(), "executed")); + + assertNoFailures(response); + XContentSource source = xContentSource(response.getHits().getAt(0).getSourceRef()); + String body = source.getValue("result.actions.0.webhook.response.body"); + assertThat(body, notNullValue()); + assertThat(body, is("body")); + Number status = source.getValue("result.actions.0.webhook.response.status"); + assertThat(status, notNullValue()); + assertThat(status.intValue(), is(200)); + } + + public void testWebhookWithBasicAuth() throws Exception { + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder("localhost", webServer.getPort()) + .auth(new BasicAuth("_username", "_password".toCharArray())) + .path(new TextTemplate("/test/_id")) + .putParam("param1", new TextTemplate("value1")) + .putParam("watch_id", new TextTemplate("_id")) + .body(new TextTemplate("_body")) + .method(HttpMethod.POST); + + watcherClient().preparePutWatch("_id") + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput("key", "value")) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_id", ActionBuilders.webhookAction(builder))) + .get(); + + timeWarp().trigger("_id"); + refresh(); + + assertWatchWithMinimumPerformedActionsCount("_id", 1, false); + + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getQuery(), + anyOf(equalTo("watch_id=_id¶m1=value1"), equalTo("param1=value1&watch_id=_id"))); + assertThat(webServer.requests().get(0).getBody(), is("_body")); + assertThat(webServer.requests().get(0).getHeader("Authorization"), is(("Basic X3VzZXJuYW1lOl9wYXNzd29yZA=="))); + } + + public void testWebhookWithTimebasedIndex() throws Exception { + assertAcked(client().admin().indices().prepareCreate("").get()); + + HttpServerTransport serverTransport = internalCluster().getDataNodeInstance(HttpServerTransport.class); + TransportAddress publishAddress = serverTransport.boundAddress().publishAddress(); + + String host = publishAddress.address().getHostString(); + HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder(host, publishAddress.getPort()) + .path(new TextTemplate("/%3Clogstash-%7Bnow%2Fd%7D%3E/log/1")) + .body(new TextTemplate("{\"foo\":\"bar\"}")) + .putHeader("Content-Type", new TextTemplate("application/json")) + .method(HttpMethod.PUT); + + watcherClient().preparePutWatch("_id") + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput("key", "value")) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_id", ActionBuilders.webhookAction(builder))) + .get(); + + watcherClient().prepareExecuteWatch("_id").get(); + + GetResponse response = client().prepareGet("", "log", "1").get(); + assertExists(response); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java new file mode 100644 index 0000000000000..2a02c5300bded --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java @@ -0,0 +1,609 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.apache.http.HttpHeaders; +import org.apache.http.client.ClientProtocolException; +import org.apache.http.client.config.RequestConfig; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.mocksocket.MockServerSocket; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.test.junit.annotations.Network; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.TestsSSLService; +import org.elasticsearch.xpack.core.ssl.VerificationMode; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuthFactory; +import org.junit.After; +import org.junit.Before; + +import javax.net.ssl.SSLContext; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.InetAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.SocketTimeoutException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.Locale; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; +import static org.hamcrest.core.Is.is; + +public class HttpClientTests extends ESTestCase { + + private MockWebServer webServer = new MockWebServer(); + private HttpClient httpClient; + private HttpAuthRegistry authRegistry; + private Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + + @Before + public void init() throws Exception { + authRegistry = new HttpAuthRegistry(singletonMap(BasicAuth.TYPE, new BasicAuthFactory(null))); + webServer.start(); + httpClient = new HttpClient(Settings.EMPTY, authRegistry, new SSLService(environment.settings(), environment)); + } + + @After + public void shutdown() throws Exception { + webServer.close(); + } + + public void testBasics() throws Exception { + int responseCode = randomIntBetween(200, 203); + String body = randomAlphaOfLengthBetween(2, 8096); + webServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body)); + + HttpRequest.Builder requestBuilder = HttpRequest.builder("localhost", webServer.getPort()) + .method(HttpMethod.POST) + .path("/" + randomAlphaOfLength(5)); + + String paramKey = randomAlphaOfLength(3); + String paramValue = randomAlphaOfLength(3); + requestBuilder.setParam(paramKey, paramValue); + + // Certain headers keys like via and host are illegal and the jdk http client ignores those, so lets + // prepend all keys with `_`, so we don't run into a failure because randomly a restricted header was used: + String headerKey = "_" + randomAlphaOfLength(3); + String headerValue = randomAlphaOfLength(3); + requestBuilder.setHeader(headerKey, headerValue); + + requestBuilder.body(randomAlphaOfLength(5)); + HttpRequest request = requestBuilder.build(); + + HttpResponse response = httpClient.execute(request); + assertThat(response.status(), equalTo(responseCode)); + assertThat(response.body().utf8ToString(), equalTo(body)); + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getPath(), equalTo(request.path())); + assertThat(webServer.requests().get(0).getUri().getQuery(), equalTo(paramKey + "=" + paramValue)); + assertThat(webServer.requests().get(0).getHeader(headerKey), equalTo(headerValue)); + } + + public void testNoQueryString() throws Exception { + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + HttpRequest.Builder requestBuilder = HttpRequest.builder("localhost", webServer.getPort()) + .method(HttpMethod.GET) + .path("/test"); + + HttpResponse response = httpClient.execute(requestBuilder.build()); + assertThat(response.status(), equalTo(200)); + assertThat(response.body().utf8ToString(), equalTo("body")); + + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getPath(), is("/test")); + assertThat(webServer.requests().get(0).getBody(), is(nullValue())); + } + + public void testUrlEncodingWithQueryStrings() throws Exception { + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + HttpRequest.Builder requestBuilder = HttpRequest.builder("localhost", webServer.getPort()) + .method(HttpMethod.GET) + .path("/test") + .setParam("key", "value 123:123"); + + HttpResponse response = httpClient.execute(requestBuilder.build()); + assertThat(response.status(), equalTo(200)); + assertThat(response.body().utf8ToString(), equalTo("body")); + + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getPath(), is("/test")); + assertThat(webServer.requests().get(0).getUri().getRawQuery(), is("key=value+123%3A123")); + assertThat(webServer.requests().get(0).getBody(), is(nullValue())); + } + + public void testBasicAuth() throws Exception { + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + HttpRequest.Builder request = HttpRequest.builder("localhost", webServer.getPort()) + .method(HttpMethod.POST) + .path("/test") + .auth(new BasicAuth("user", "pass".toCharArray())) + .body("body"); + HttpResponse response = httpClient.execute(request.build()); + assertThat(response.status(), equalTo(200)); + assertThat(response.body().utf8ToString(), equalTo("body")); + + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getPath(), is("/test")); + assertThat(webServer.requests().get(0).getHeader("Authorization"), is("Basic dXNlcjpwYXNz")); + } + + public void testNoPathSpecified() throws Exception { + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("doesntmatter")); + HttpRequest.Builder request = HttpRequest.builder("localhost", webServer.getPort()).method(HttpMethod.GET); + httpClient.execute(request.build()); + + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getPath(), is("/")); + } + + public void testHttps() throws Exception { + Path resource = getDataPath("/org/elasticsearch/xpack/security/keystore/truststore-testnode-only.jks"); + MockSecureSettings secureSettings = new MockSecureSettings(); + Settings settings; + if (randomBoolean()) { + secureSettings.setString("xpack.http.ssl.truststore.secure_password", "truststore-testnode-only"); + settings = Settings.builder() + .put("xpack.http.ssl.truststore.path", resource.toString()) + .setSecureSettings(secureSettings) + .build(); + } else { + secureSettings.setString("xpack.ssl.truststore.secure_password", "truststore-testnode-only"); + settings = Settings.builder() + .put("xpack.ssl.truststore.path", resource.toString()) + .setSecureSettings(secureSettings) + .build(); + } + httpClient = new HttpClient(settings, authRegistry, new SSLService(settings, environment)); + secureSettings = new MockSecureSettings(); + // We can't use the client created above for the server since it is only a truststore + secureSettings.setString("xpack.ssl.keystore.secure_password", "testnode"); + Settings settings2 = Settings.builder() + .put("xpack.ssl.keystore.path", getDataPath("/org/elasticsearch/xpack/security/keystore/testnode.jks")) + .setSecureSettings(secureSettings) + .build(); + + TestsSSLService sslService = new TestsSSLService(settings2, environment); + testSslMockWebserver(sslService.sslContext(), false); + } + + public void testHttpsDisableHostnameVerification() throws Exception { + Path resource = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.jks"); + Settings settings; + if (randomBoolean()) { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.http.ssl.truststore.secure_password", "testnode-no-subjaltname"); + settings = Settings.builder() + .put("xpack.http.ssl.truststore.path", resource.toString()) + .put("xpack.http.ssl.verification_mode", randomFrom(VerificationMode.NONE, VerificationMode.CERTIFICATE)) + .setSecureSettings(secureSettings) + .build(); + } else { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.ssl.truststore.secure_password", "testnode-no-subjaltname"); + settings = Settings.builder() + .put("xpack.ssl.truststore.path", resource.toString()) + .put("xpack.ssl.verification_mode", randomFrom(VerificationMode.NONE, VerificationMode.CERTIFICATE)) + .setSecureSettings(secureSettings) + .build(); + } + httpClient = new HttpClient(settings, authRegistry, new SSLService(settings, environment)); + MockSecureSettings secureSettings = new MockSecureSettings(); + // We can't use the client created above for the server since it only defines a truststore + secureSettings.setString("xpack.ssl.keystore.secure_password", "testnode-no-subjaltname"); + Settings settings2 = Settings.builder() + .put("xpack.ssl.keystore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.jks")) + .setSecureSettings(secureSettings) + .build(); + + TestsSSLService sslService = new TestsSSLService(settings2, environment); + testSslMockWebserver(sslService.sslContext(), false); + } + + public void testHttpsClientAuth() throws Exception { + Path resource = getDataPath("/org/elasticsearch/xpack/security/keystore/testnode.jks"); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.ssl.keystore.secure_password", "testnode"); + Settings settings = Settings.builder() + .put("xpack.ssl.keystore.path", resource.toString()) + .setSecureSettings(secureSettings) + .build(); + + TestsSSLService sslService = new TestsSSLService(settings, environment); + httpClient = new HttpClient(settings, authRegistry, sslService); + testSslMockWebserver(sslService.sslContext(), true); + } + + private void testSslMockWebserver(SSLContext sslContext, boolean needClientAuth) throws IOException { + try (MockWebServer mockWebServer = new MockWebServer(sslContext, needClientAuth)) { + mockWebServer.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + mockWebServer.start(); + + HttpRequest.Builder request = HttpRequest.builder("localhost", mockWebServer.getPort()) + .scheme(Scheme.HTTPS) + .path("/test"); + HttpResponse response = httpClient.execute(request.build()); + assertThat(response.status(), equalTo(200)); + assertThat(response.body().utf8ToString(), equalTo("body")); + + assertThat(mockWebServer.requests(), hasSize(1)); + assertThat(mockWebServer.requests().get(0).getUri().getPath(), is("/test")); + } + } + + public void testHttpResponseWithAnyStatusCodeCanReturnBody() throws Exception { + int statusCode = randomFrom(200, 201, 400, 401, 403, 404, 405, 409, 413, 429, 500, 503); + String body = RandomStrings.randomAsciiOfLength(random(), 100); + boolean hasBody = usually(); + MockResponse mockResponse = new MockResponse().setResponseCode(statusCode); + if (hasBody) { + mockResponse.setBody(body); + } + webServer.enqueue(mockResponse); + HttpRequest.Builder request = HttpRequest.builder("localhost", webServer.getPort()) + .method(HttpMethod.POST) + .path("/test") + .auth(new BasicAuth("user", "pass".toCharArray())) + .body("body") + .connectionTimeout(TimeValue.timeValueMillis(500)) + .readTimeout(TimeValue.timeValueMillis(500)); + HttpResponse response = httpClient.execute(request.build()); + assertThat(response.status(), equalTo(statusCode)); + assertThat(response.hasContent(), is(hasBody)); + if (hasBody) { + assertThat(response.body().utf8ToString(), is(body)); + } + } + + @Network + public void testHttpsWithoutTruststore() throws Exception { + HttpClient httpClient = new HttpClient(Settings.EMPTY, authRegistry, new SSLService(Settings.EMPTY, environment)); + + // Known server with a valid cert from a commercial CA + HttpRequest.Builder request = HttpRequest.builder("www.elastic.co", 443).scheme(Scheme.HTTPS); + HttpResponse response = httpClient.execute(request.build()); + assertThat(response.status(), equalTo(200)); + assertThat(response.hasContent(), is(true)); + assertThat(response.body(), notNullValue()); + } + + public void testThatProxyCanBeConfigured() throws Exception { + // this test fakes a proxy server that sends a response instead of forwarding it to the mock web server + try (MockWebServer proxyServer = new MockWebServer()) { + proxyServer.enqueue(new MockResponse().setResponseCode(200).setBody("fullProxiedContent")); + proxyServer.start(); + Settings settings = Settings.builder() + .put(HttpSettings.PROXY_HOST.getKey(), "localhost") + .put(HttpSettings.PROXY_PORT.getKey(), proxyServer.getPort()) + .build(); + HttpClient httpClient = new HttpClient(settings, authRegistry, new SSLService(settings, environment)); + + HttpRequest.Builder requestBuilder = HttpRequest.builder("localhost", webServer.getPort()) + .method(HttpMethod.GET) + .path("/"); + + HttpResponse response = httpClient.execute(requestBuilder.build()); + assertThat(response.status(), equalTo(200)); + assertThat(response.body().utf8ToString(), equalTo("fullProxiedContent")); + + // ensure we hit the proxyServer and not the webserver + assertThat(webServer.requests(), hasSize(0)); + assertThat(proxyServer.requests(), hasSize(1)); + } + } + + public void testSetProxy() throws Exception { + HttpProxy localhostHttpProxy = new HttpProxy("localhost", 1234, Scheme.HTTP); + RequestConfig.Builder config = RequestConfig.custom(); + + // no proxy configured at all + HttpClient.setProxy(config, HttpRequest.builder().fromUrl("https://elastic.co").build(), HttpProxy.NO_PROXY); + assertThat(config.build().getProxy(), is(nullValue())); + + // no system wide proxy configured, proxy in request + config = RequestConfig.custom(); + HttpClient.setProxy(config, + HttpRequest.builder().fromUrl("https://elastic.co").proxy(new HttpProxy("localhost", 23456)).build(), + HttpProxy.NO_PROXY); + assertThat(config.build().getProxy().toString(), is("http://localhost:23456")); + + // system wide proxy configured, no proxy in request + config = RequestConfig.custom(); + HttpClient.setProxy(config, HttpRequest.builder().fromUrl("https://elastic.co").build(), + localhostHttpProxy); + assertThat(config.build().getProxy().toString(), is("http://localhost:1234")); + + // proxy in request, no system wide proxy configured. request + config = RequestConfig.custom(); + HttpClient.setProxy(config, + HttpRequest.builder().fromUrl("https://elastic.co").proxy(new HttpProxy("localhost", 23456, Scheme.HTTP)).build(), + HttpProxy.NO_PROXY); + assertThat(config.build().getProxy().toString(), is("http://localhost:23456")); + + // proxy in request, system wide proxy configured. request wins + config = RequestConfig.custom(); + HttpClient.setProxy(config, + HttpRequest.builder().fromUrl("http://elastic.co").proxy(new HttpProxy("localhost", 23456, Scheme.HTTPS)).build(), + localhostHttpProxy); + assertThat(config.build().getProxy().toString(), is("https://localhost:23456")); + } + + public void testProxyCanHaveDifferentSchemeThanRequest() throws Exception { + // this test fakes a proxy server that sends a response instead of forwarding it to the mock web server + // on top of that the proxy request is HTTPS but the real request is HTTP only + MockSecureSettings serverSecureSettings = new MockSecureSettings(); + // We can't use the client created above for the server since it is only a truststore + serverSecureSettings.setString("xpack.ssl.keystore.secure_password", "testnode"); + Settings serverSettings = Settings.builder() + .put("xpack.ssl.keystore.path", getDataPath("/org/elasticsearch/xpack/security/keystore/testnode.jks")) + .setSecureSettings(serverSecureSettings) + .build(); + TestsSSLService sslService = new TestsSSLService(serverSettings, environment); + + try (MockWebServer proxyServer = new MockWebServer(sslService.sslContext(), false)) { + proxyServer.enqueue(new MockResponse().setResponseCode(200).setBody("fullProxiedContent")); + proxyServer.start(); + + Path resource = getDataPath("/org/elasticsearch/xpack/security/keystore/truststore-testnode-only.jks"); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.http.ssl.truststore.secure_password", "truststore-testnode-only"); + Settings settings = Settings.builder() + .put(HttpSettings.PROXY_HOST.getKey(), "localhost") + .put(HttpSettings.PROXY_PORT.getKey(), proxyServer.getPort()) + .put(HttpSettings.PROXY_SCHEME.getKey(), "https") + .put("xpack.http.ssl.truststore.path", resource.toString()) + .setSecureSettings(secureSettings) + .build(); + + HttpClient httpClient = new HttpClient(settings, authRegistry, new SSLService(settings, environment)); + + HttpRequest.Builder requestBuilder = HttpRequest.builder("localhost", webServer.getPort()) + .method(HttpMethod.GET) + .scheme(Scheme.HTTP) + .path("/"); + + HttpResponse response = httpClient.execute(requestBuilder.build()); + assertThat(response.status(), equalTo(200)); + assertThat(response.body().utf8ToString(), equalTo("fullProxiedContent")); + + // ensure we hit the proxyServer and not the webserver + assertThat(webServer.requests(), hasSize(0)); + assertThat(proxyServer.requests(), hasSize(1)); + } + } + + public void testThatProxyCanBeOverriddenByRequest() throws Exception { + // this test fakes a proxy server that sends a response instead of forwarding it to the mock web server + try (MockWebServer proxyServer = new MockWebServer()) { + proxyServer.enqueue(new MockResponse().setResponseCode(200).setBody("fullProxiedContent")); + proxyServer.start(); + Settings settings = Settings.builder() + .put(HttpSettings.PROXY_HOST.getKey(), "localhost") + .put(HttpSettings.PROXY_PORT.getKey(), proxyServer.getPort() + 1) + .put(HttpSettings.PROXY_HOST.getKey(), "https") + .build(); + HttpClient httpClient = new HttpClient(settings, authRegistry, new SSLService(settings, environment)); + + HttpRequest.Builder requestBuilder = HttpRequest.builder("localhost", webServer.getPort()) + .method(HttpMethod.GET) + .proxy(new HttpProxy("localhost", proxyServer.getPort(), Scheme.HTTP)) + .path("/"); + + HttpResponse response = httpClient.execute(requestBuilder.build()); + assertThat(response.status(), equalTo(200)); + assertThat(response.body().utf8ToString(), equalTo("fullProxiedContent")); + + // ensure we hit the proxyServer and not the webserver + assertThat(webServer.requests(), hasSize(0)); + assertThat(proxyServer.requests(), hasSize(1)); + } + } + + public void testThatProxyConfigurationRequiresHostAndPort() { + Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + settings.put(HttpSettings.PROXY_HOST.getKey(), "localhost"); + } else { + settings.put(HttpSettings.PROXY_PORT.getKey(), 8080); + } + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new HttpClient(settings.build(), authRegistry, new SSLService(settings.build(), environment))); + assertThat(e.getMessage(), + containsString("HTTP proxy requires both settings: [xpack.http.proxy.host] and [xpack.http.proxy.port]")); + } + + public void testThatUrlPathIsNotEncoded() throws Exception { + // %2F is a slash that needs to be encoded to not be misinterpreted as a path + String path = "/%3Clogstash-%7Bnow%2Fd%7D%3E/_search"; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("foo")); + HttpRequest request; + if (randomBoolean()) { + request = HttpRequest.builder("localhost", webServer.getPort()).path(path).build(); + } else { + // ensure that fromUrl acts the same way than the above builder + request = HttpRequest.builder().fromUrl(String.format(Locale.ROOT, "http://localhost:%s%s", webServer.getPort(), path)).build(); + } + httpClient.execute(request); + + assertThat(webServer.requests(), hasSize(1)); + + // under no circumstances have a double encode of %2F => %25 (percent sign) + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getRawPath(), not(containsString("%25"))); + assertThat(webServer.requests().get(0).getUri().getPath(), is("//_search")); + } + + public void testThatDuplicateHeaderKeysAreReturned() throws Exception { + MockResponse mockResponse = new MockResponse().setResponseCode(200).setBody("foo") + .addHeader("foo", "bar") + .addHeader("foo", "baz") + .addHeader("Content-Length", "3"); + webServer.enqueue(mockResponse); + + HttpRequest request = HttpRequest.builder("localhost", webServer.getPort()).path("/").build(); + HttpResponse httpResponse = httpClient.execute(request); + + assertThat(webServer.requests(), hasSize(1)); + + assertThat(httpResponse.headers(), hasKey("foo")); + assertThat(httpResponse.headers().get("foo"), containsInAnyOrder("bar", "baz")); + } + + // finally fixing https://github.com/elastic/x-plugins/issues/1141 - yay! Fixed due to switching to apache http client internally! + public void testThatClientTakesTimeoutsIntoAccountAfterHeadersAreSent() throws Exception { + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("foo").setBodyDelay(TimeValue.timeValueSeconds(2))); + + HttpRequest request = HttpRequest.builder("localhost", webServer.getPort()).path("/foo") + .method(HttpMethod.POST) + .body("foo") + .connectionTimeout(TimeValue.timeValueMillis(500)) + .readTimeout(TimeValue.timeValueMillis(500)) + .build(); + SocketTimeoutException e = expectThrows(SocketTimeoutException.class, () -> httpClient.execute(request)); + assertThat(e.getMessage(), is("Read timed out")); + } + + public void testThatHttpClientFailsOnNonHttpResponse() throws Exception { + ExecutorService executor = Executors.newSingleThreadExecutor(); + AtomicReference hasExceptionHappened = new AtomicReference(); + try (ServerSocket serverSocket = new MockServerSocket(0, 50, InetAddress.getByName("localhost"))) { + executor.execute(() -> { + try (Socket socket = serverSocket.accept()) { + BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); + in.readLine(); + socket.getOutputStream().write("This is not a HTTP response".getBytes(StandardCharsets.UTF_8)); + socket.getOutputStream().flush(); + } catch (Exception e) { + hasExceptionHappened.set(e); + logger.error((Supplier) () -> new ParameterizedMessage("Error in writing non HTTP response"), e); + } + }); + HttpRequest request = HttpRequest.builder("localhost", serverSocket.getLocalPort()).path("/").build(); + expectThrows(ClientProtocolException.class, () -> httpClient.execute(request)); + assertThat("A server side exception occured, but shouldn't", hasExceptionHappened.get(), is(nullValue())); + } finally { + terminate(executor); + } + } + + public void testNoContentResponse() throws Exception { + int noContentStatusCode = 204; + webServer.enqueue(new MockResponse().setResponseCode(noContentStatusCode)); + HttpRequest request = HttpRequest.builder("localhost", webServer.getPort()).path("/foo").build(); + HttpResponse response = httpClient.execute(request); + assertThat(response.status(), is(noContentStatusCode)); + assertThat(response.body(), is(nullValue())); + } + + public void testMaxHttpResponseSize() throws Exception { + int randomBytesLength = scaledRandomIntBetween(2, 100); + String data = randomAlphaOfLength(randomBytesLength); + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(data)); + + Settings settings = Settings.builder() + .put(HttpSettings.MAX_HTTP_RESPONSE_SIZE.getKey(), new ByteSizeValue(randomBytesLength - 1, ByteSizeUnit.BYTES)) + .build(); + HttpClient httpClient = new HttpClient(settings, authRegistry, new SSLService(environment.settings(), environment)); + + HttpRequest.Builder requestBuilder = HttpRequest.builder("localhost", webServer.getPort()).method(HttpMethod.GET).path("/"); + + IOException e = expectThrows(IOException.class, () -> httpClient.execute(requestBuilder.build())); + assertThat(e.getMessage(), startsWith("Maximum limit of")); + } + + public void testThatGetRedirectIsFollowed() throws Exception { + String redirectUrl = "http://" + webServer.getHostName() + ":" + webServer.getPort() + "/foo"; + webServer.enqueue(new MockResponse().setResponseCode(302).addHeader("Location", redirectUrl)); + HttpMethod method = randomFrom(HttpMethod.GET, HttpMethod.HEAD); + + if (method == HttpMethod.GET) { + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("shouldBeRead")); + } else if (method == HttpMethod.HEAD) { + webServer.enqueue(new MockResponse().setResponseCode(200)); + } + + HttpRequest request = HttpRequest.builder("localhost", webServer.getPort()).path("/") + .method(method) + .build(); + HttpResponse response = httpClient.execute(request); + + assertThat(webServer.requests(), hasSize(2)); + if (method == HttpMethod.GET) { + assertThat(response.body().utf8ToString(), is("shouldBeRead")); + } else if (method == HttpMethod.HEAD) { + assertThat(response.body(), is(nullValue())); + } + } + + // not allowed by RFC, only allowed for GET or HEAD + public void testThatPostRedirectIsNotFollowed() throws Exception { + String redirectUrl = "http://" + webServer.getHostName() + ":" + webServer.getPort() + "/foo"; + webServer.enqueue(new MockResponse().setResponseCode(302).addHeader("Location", redirectUrl)); + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("shouldNeverBeRead")); + + HttpRequest request = HttpRequest.builder("localhost", webServer.getPort()).path("/").method(HttpMethod.POST).build(); + HttpResponse response = httpClient.execute(request); + assertThat(response.body(), is(nullValue())); + assertThat(webServer.requests(), hasSize(1)); + } + + public void testThatBodyWithUTF8Content() throws Exception { + String body = "title あいうえお"; + webServer.enqueue(new MockResponse().setResponseCode(200).setBody(body)); + + HttpRequest request = HttpRequest.builder("localhost", webServer.getPort()) + .path("/") + .setHeader(HttpHeaders.CONTENT_TYPE, XContentType.JSON.mediaType()) + .body(body) + .build(); + HttpResponse response = httpClient.execute(request); + assertThat(response.body().utf8ToString(), is(body)); + + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), is(XContentType.JSON.mediaType())); + assertThat(webServer.requests().get(0).getBody(), is(body)); + } + + public void testThatUrlDoesNotContainQuestionMarkAtTheEnd() throws Exception { + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("whatever")); + + HttpRequest request = HttpRequest.builder("localhost", webServer.getPort()) + .path("foo") + .build(); + httpClient.execute(request); + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getRawPath(), is("/foo")); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpConnectionTimeoutTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpConnectionTimeoutTests.java new file mode 100644 index 0000000000000..8ac2bef16e8d8 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpConnectionTimeoutTests.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import org.apache.http.conn.ConnectTimeoutException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.junit.annotations.Network; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; + +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; +import static org.mockito.Mockito.mock; + +public class HttpConnectionTimeoutTests extends ESTestCase { + // setting an unroutable IP to simulate a connection timeout + private static final String UNROUTABLE_IP = "192.168.255.255"; + + @Network + public void testDefaultTimeout() throws Exception { + Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + HttpClient httpClient = new HttpClient(Settings.EMPTY, mock(HttpAuthRegistry.class), + new SSLService(environment.settings(), environment)); + + HttpRequest request = HttpRequest.builder(UNROUTABLE_IP, 12345) + .method(HttpMethod.POST) + .path("/" + randomAlphaOfLength(5)) + .build(); + + long start = System.nanoTime(); + try { + httpClient.execute(request); + fail("expected timeout exception"); + } catch (ConnectTimeoutException ete) { + TimeValue timeout = TimeValue.timeValueNanos(System.nanoTime() - start); + logger.info("http connection timed out after {}", timeout); + // it's supposed to be 10, but we'll give it an error margin of 2 seconds + assertThat(timeout.seconds(), greaterThan(8L)); + assertThat(timeout.seconds(), lessThan(12L)); + // expected + } + } + + @Network + public void testDefaultTimeoutCustom() throws Exception { + Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + HttpClient httpClient = new HttpClient(Settings.builder() + .put("xpack.http.default_connection_timeout", "5s").build() + , mock(HttpAuthRegistry.class), new SSLService(environment.settings(), environment)); + + HttpRequest request = HttpRequest.builder(UNROUTABLE_IP, 12345) + .method(HttpMethod.POST) + .path("/" + randomAlphaOfLength(5)) + .build(); + + long start = System.nanoTime(); + try { + httpClient.execute(request); + fail("expected timeout exception"); + } catch (ConnectTimeoutException ete) { + TimeValue timeout = TimeValue.timeValueNanos(System.nanoTime() - start); + logger.info("http connection timed out after {}", timeout); + // it's supposed to be 7, but we'll give it an error margin of 2 seconds + assertThat(timeout.seconds(), greaterThan(3L)); + assertThat(timeout.seconds(), lessThan(7L)); + // expected + } + } + + @Network + public void testTimeoutCustomPerRequest() throws Exception { + Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + HttpClient httpClient = new HttpClient(Settings.builder() + .put("xpack.http.default_connection_timeout", "10s").build() + , mock(HttpAuthRegistry.class), new SSLService(environment.settings(), environment)); + + HttpRequest request = HttpRequest.builder(UNROUTABLE_IP, 12345) + .connectionTimeout(TimeValue.timeValueSeconds(5)) + .method(HttpMethod.POST) + .path("/" + randomAlphaOfLength(5)) + .build(); + + long start = System.nanoTime(); + try { + httpClient.execute(request); + fail("expected timeout exception"); + } catch (ConnectTimeoutException ete) { + TimeValue timeout = TimeValue.timeValueNanos(System.nanoTime() - start); + logger.info("http connection timed out after {}", timeout); + // it's supposed to be 7, but we'll give it an error margin of 2 seconds + assertThat(timeout.seconds(), greaterThan(3L)); + assertThat(timeout.seconds(), lessThan(7L)); + // expected + } + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpProxyTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpProxyTests.java new file mode 100644 index 0000000000000..578b46e587008 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpProxyTests.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class HttpProxyTests extends ESTestCase { + + public void testParser() throws Exception { + int port = randomIntBetween(1, 65000); + String host = randomAlphaOfLength(10); + XContentBuilder builder = jsonBuilder().startObject().field("host", host).field("port", port); + boolean isSchemeConfigured = randomBoolean(); + String scheme = null; + if (isSchemeConfigured) { + scheme = randomFrom(Scheme.values()).scheme(); + builder.field("scheme", scheme); + } + builder.endObject(); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput())) { + parser.nextToken(); + HttpProxy proxy = HttpProxy.parse(parser); + assertThat(proxy.getHost(), is(host)); + assertThat(proxy.getPort(), is(port)); + if (isSchemeConfigured) { + assertThat(proxy.getScheme().scheme(), is(scheme)); + } else { + assertThat(proxy.getScheme(), is(nullValue())); + } + } + } + + public void testParserValidScheme() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .field("host", "localhost").field("port", 12345).field("scheme", "invalid") + .endObject(); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput())) { + parser.nextToken(); + expectThrows(IllegalArgumentException.class, () -> HttpProxy.parse(parser)); + } + } + + public void testParserValidPortRange() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .field("host", "localhost").field("port", -1) + .endObject(); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput())) { + parser.nextToken(); + expectThrows(ElasticsearchParseException.class, () -> HttpProxy.parse(parser)); + } + } + + public void testParserNoHost() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .field("port", -1) + .endObject(); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput())) { + parser.nextToken(); + expectThrows(ElasticsearchParseException.class, () -> HttpProxy.parse(parser)); + } + } + + public void testParserNoPort() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .field("host", "localhost") + .endObject(); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput())) { + parser.nextToken(); + expectThrows(ElasticsearchParseException.class, () -> HttpProxy.parse(parser)); + } + } + + public void testToXContent() throws Exception { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + HttpProxy proxy = new HttpProxy("localhost", 3128); + proxy.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + assertThat(Strings.toString(builder), is("{\"proxy\":{\"host\":\"localhost\",\"port\":3128}}")); + } + + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + HttpProxy httpsProxy = new HttpProxy("localhost", 3128, Scheme.HTTPS); + httpsProxy.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + assertThat(Strings.toString(builder), is("{\"proxy\":{\"host\":\"localhost\",\"port\":3128,\"scheme\":\"https\"}}")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpReadTimeoutTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpReadTimeoutTests.java new file mode 100644 index 0000000000000..2d134681e8b18 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpReadTimeoutTests.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.junit.After; +import org.junit.Before; + +import java.net.SocketTimeoutException; + +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; +import static org.mockito.Mockito.mock; + +public class HttpReadTimeoutTests extends ESTestCase { + + private MockWebServer webServer = new MockWebServer(); + + @Before + public void init() throws Exception { + webServer.start(); + webServer.enqueue(new MockResponse().setBeforeReplyDelay(TimeValue.timeValueSeconds(20))); + } + + @After + public void cleanup() throws Exception { + webServer.close(); + } + + public void testDefaultTimeout() throws Exception { + Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + HttpClient httpClient = new HttpClient(Settings.EMPTY, mock(HttpAuthRegistry.class), + new SSLService(environment.settings(), environment)); + + HttpRequest request = HttpRequest.builder("localhost", webServer.getPort()) + .method(HttpMethod.POST) + .path("/") + .build(); + + long start = System.nanoTime(); + expectThrows(SocketTimeoutException.class, () -> httpClient.execute(request)); + TimeValue timeout = TimeValue.timeValueNanos(System.nanoTime() - start); + logger.info("http connection timed out after {}", timeout); + + // it's supposed to be 10, but we'll give it an error margin of 2 seconds + assertThat(timeout.seconds(), greaterThan(8L)); + assertThat(timeout.seconds(), lessThan(12L)); + } + + public void testDefaultTimeoutCustom() throws Exception { + Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + + HttpClient httpClient = new HttpClient(Settings.builder() + .put("xpack.http.default_read_timeout", "3s").build() + , mock(HttpAuthRegistry.class), new SSLService(environment.settings(), environment)); + + HttpRequest request = HttpRequest.builder("localhost", webServer.getPort()) + .method(HttpMethod.POST) + .path("/") + .build(); + + long start = System.nanoTime(); + expectThrows(SocketTimeoutException.class, () -> httpClient.execute(request)); + TimeValue timeout = TimeValue.timeValueNanos(System.nanoTime() - start); + logger.info("http connection timed out after {}", timeout); + + // it's supposed to be 3, but we'll give it an error margin of 2 seconds + assertThat(timeout.seconds(), greaterThan(1L)); + assertThat(timeout.seconds(), lessThan(5L)); + } + + public void testTimeoutCustomPerRequest() throws Exception { + Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + + HttpClient httpClient = new HttpClient(Settings.builder() + .put("xpack.http.default_read_timeout", "10s").build() + , mock(HttpAuthRegistry.class), new SSLService(environment.settings(), environment)); + + HttpRequest request = HttpRequest.builder("localhost", webServer.getPort()) + .readTimeout(TimeValue.timeValueSeconds(3)) + .method(HttpMethod.POST) + .path("/") + .build(); + + long start = System.nanoTime(); + expectThrows(SocketTimeoutException.class, () -> httpClient.execute(request)); + TimeValue timeout = TimeValue.timeValueNanos(System.nanoTime() - start); + logger.info("http connection timed out after {}", timeout); + + // it's supposed to be 3, but we'll give it an error margin of 2 seconds + assertThat(timeout.seconds(), greaterThan(1L)); + assertThat(timeout.seconds(), lessThan(5L)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpRequestTemplateTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpRequestTemplateTests.java new file mode 100644 index 0000000000000..1f57c812ebbe4 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpRequestTemplateTests.java @@ -0,0 +1,208 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import io.netty.handler.codec.http.HttpHeaders; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuthFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; + +import java.util.Collections; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class HttpRequestTemplateTests extends ESTestCase { + + public void testBodyWithXContent() throws Exception { + XContentType type = randomFrom(XContentType.JSON, XContentType.YAML); + HttpRequestTemplate template = HttpRequestTemplate.builder("_host", 1234) + .body(XContentBuilder.builder(type.xContent()).startObject().endObject()) + .build(); + HttpRequest request = template.render(new MockTextTemplateEngine(), emptyMap()); + assertThat(request.headers, hasEntry(HttpHeaders.Names.CONTENT_TYPE, type.mediaType())); + } + + public void testBody() throws Exception { + HttpRequestTemplate template = HttpRequestTemplate.builder("_host", 1234) + .body("_body") + .build(); + HttpRequest request = template.render(new MockTextTemplateEngine(), emptyMap()); + assertThat(request.headers.size(), is(0)); + } + + public void testProxy() throws Exception { + HttpRequestTemplate template = HttpRequestTemplate.builder("_host", 1234) + .proxy(new HttpProxy("localhost", 8080)) + .build(); + HttpRequest request = template.render(new MockTextTemplateEngine(), Collections.emptyMap()); + assertThat(request.proxy().getHost(), is("localhost")); + assertThat(request.proxy().getPort(), is(8080)); + } + + public void testRender() { + HttpRequestTemplate template = HttpRequestTemplate.builder("_host", 1234) + .body(new TextTemplate("_body")) + .path(new TextTemplate("_path")) + .putParam("_key1", new TextTemplate("_value1")) + .putHeader("_key2", new TextTemplate("_value2")) + .build(); + + HttpRequest result = template.render(new MockTextTemplateEngine(), Collections.emptyMap()); + assertThat(result.body(), equalTo("_body")); + assertThat(result.path(), equalTo("_path")); + assertThat(result.params(), equalTo(Collections.singletonMap("_key1", "_value1"))); + assertThat(result.headers(), equalTo(Collections.singletonMap("_key2", "_value2"))); + } + + public void testProxyParsing() throws Exception { + HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder("_host", 1234); + builder.path("/path"); + builder.method(randomFrom(HttpMethod.values())); + String proxyHost = randomAlphaOfLength(10); + int proxyPort = randomIntBetween(1, 65534); + builder.proxy(new HttpProxy(proxyHost, proxyPort)); + HttpRequestTemplate template = builder.build(); + + XContentBuilder xContentBuilder = template.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS); + XContentParser xContentParser = createParser(xContentBuilder); + xContentParser.nextToken(); + + HttpRequestTemplate.Parser parser = new HttpRequestTemplate.Parser(mock(HttpAuthRegistry.class)); + HttpRequestTemplate parsedTemplate = parser.parse(xContentParser); + assertThat(parsedTemplate.proxy().getPort(), is(proxyPort)); + assertThat(parsedTemplate.proxy().getHost(), is(proxyHost)); + } + + public void testParseSelfGenerated() throws Exception { + HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder("_host", 1234); + + if (randomBoolean()) { + builder.method(randomFrom(HttpMethod.values())); + } + if (randomBoolean()) { + builder.path("/path"); + } + boolean xbody = randomBoolean(); + if (randomBoolean()) { + if (xbody) { + builder.body(jsonBuilder().startObject().endObject()); + } else { + builder.body("_body"); + } + } + if (randomBoolean()) { + builder.auth(new BasicAuth("_username", "_password".toCharArray())); + } + if (randomBoolean()) { + builder.putParam("_key", new TextTemplate("_value")); + } + if (randomBoolean()) { + builder.putHeader("_key", new TextTemplate("_value")); + } + long connectionTimeout = randomBoolean() ? 0 : randomIntBetween(5, 100000); + if (connectionTimeout > 0) { + builder.connectionTimeout(TimeValue.timeValueSeconds(connectionTimeout)); + } + long readTimeout = randomBoolean() ? 0 : randomIntBetween(5, 100000); + if (readTimeout > 0) { + builder.readTimeout(TimeValue.timeValueSeconds(readTimeout)); + } + boolean enableProxy = randomBoolean(); + if (enableProxy) { + builder.proxy(new HttpProxy(randomAlphaOfLength(10), randomIntBetween(1, 65534))); + } + + HttpRequestTemplate template = builder.build(); + + HttpAuthRegistry registry = new HttpAuthRegistry(singletonMap(BasicAuth.TYPE, + new BasicAuthFactory(null))); + HttpRequestTemplate.Parser parser = new HttpRequestTemplate.Parser(registry); + + XContentBuilder xContentBuilder = template.toXContent(jsonBuilder(), WatcherParams.builder().hideSecrets(false).build()); + XContentParser xContentParser = createParser(xContentBuilder); + xContentParser.nextToken(); + HttpRequestTemplate parsed = parser.parse(xContentParser); + + assertEquals(template, parsed); + } + + public void testParsingFromUrl() throws Exception { + HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder("www.example.org", 1234); + builder.path("/foo/bar/org"); + builder.putParam("param", new TextTemplate("test")); + builder.scheme(Scheme.HTTPS); + assertThatManualBuilderEqualsParsingFromUrl("https://www.example.org:1234/foo/bar/org?param=test", builder); + + // ssl support, getting the default port right + builder = HttpRequestTemplate.builder("www.example.org", 443).scheme(Scheme.HTTPS).path("/test"); + assertThatManualBuilderEqualsParsingFromUrl("https://www.example.org/test", builder); + + // test without specifying port + builder = HttpRequestTemplate.builder("www.example.org", 80); + assertThatManualBuilderEqualsParsingFromUrl("http://www.example.org", builder); + + // encoded values + builder = HttpRequestTemplate.builder("www.example.org", 80).putParam("foo", new TextTemplate(" white space")); + assertThatManualBuilderEqualsParsingFromUrl("http://www.example.org?foo=%20white%20space", builder); + } + + public void testParsingEmptyUrl() throws Exception { + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> HttpRequestTemplate.builder().fromUrl("")); + assertThat(e.getMessage(), containsString("Configured URL is empty, please configure a valid URL")); + } + + public void testInvalidUrlsWithMissingScheme() throws Exception { + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, + () -> HttpRequestTemplate.builder().fromUrl("www.test.de")); + assertThat(e.getMessage(), containsString("URL [www.test.de] does not contain a scheme")); + } + + public void testInvalidUrlsWithHost() throws Exception { + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, + () -> HttpRequestTemplate.builder().fromUrl("https://")); + assertThat(e.getMessage(), containsString("Malformed URL [https://]")); + } + + public void testThatPartsFromUrlAreTemplatable() throws Exception { + HttpRequestTemplate template = HttpRequestTemplate.builder().fromUrl("http://www.test.de/%7B%7Bfoo%7D%7D").build(); + HttpRequest request = template.render(new MockTextTemplateEngine(), emptyMap()); + assertThat(request.path(), is("/{{foo}}")); + } + + private void assertThatManualBuilderEqualsParsingFromUrl(String url, HttpRequestTemplate.Builder builder) throws Exception { + XContentBuilder urlContentBuilder = jsonBuilder().startObject().field("url", url).endObject(); + XContentParser urlContentParser = createParser(urlContentBuilder); + urlContentParser.nextToken(); + + HttpRequestTemplate.Parser parser = new HttpRequestTemplate.Parser(mock(HttpAuthRegistry.class)); + HttpRequestTemplate urlParsedTemplate = parser.parse(urlContentParser); + + XContentBuilder xContentBuilder = builder.build().toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS); + XContentParser xContentParser = createParser(xContentBuilder); + xContentParser.nextToken(); + HttpRequestTemplate parsedTemplate = parser.parse(xContentParser); + + assertThat(parsedTemplate, is(urlParsedTemplate)); + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpRequestTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpRequestTests.java new file mode 100644 index 0000000000000..2350fb4e416e8 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpRequestTests.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherXContentParser; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuthFactory; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.cborBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; + +public class HttpRequestTests extends ESTestCase { + + public void testParsingFromUrl() throws Exception { + HttpRequest.Builder builder = HttpRequest.builder("www.example.org", 1234); + builder.path("/foo/bar/org"); + builder.setParam("param", "test"); + builder.scheme(Scheme.HTTPS); + assertThatManualBuilderEqualsParsingFromUrl("https://www.example.org:1234/foo/bar/org?param=test", builder); + + // test without specifying port + builder = HttpRequest.builder("www.example.org", 80); + assertThatManualBuilderEqualsParsingFromUrl("http://www.example.org", builder); + + // encoded values + builder = HttpRequest.builder("www.example.org", 80).setParam("foo", " white space"); + assertThatManualBuilderEqualsParsingFromUrl("http://www.example.org?foo=%20white%20space", builder); + } + + public void testParsingEmptyUrl() throws Exception { + try { + HttpRequest.builder().fromUrl(""); + fail("Expected exception due to empty URL"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("Configured URL is empty, please configure a valid URL")); + } + } + + public void testInvalidUrlsWithMissingScheme() throws Exception { + try { + HttpRequest.builder().fromUrl("www.test.de"); + fail("Expected exception due to missing scheme"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("URL [www.test.de] does not contain a scheme")); + } + } + + public void testInvalidUrlsWithHost() throws Exception { + try { + HttpRequest.builder().fromUrl("https://"); + fail("Expected exception due to missing host"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("Malformed URL [https://]")); + } + } + + public void testXContentSerialization() throws Exception { + final HttpRequest.Builder builder; + if (randomBoolean()) { + builder = HttpRequest.builder(); + builder.fromUrl("http://localhost:9200/generic/createevent"); + } else { + builder = HttpRequest.builder("localhost", 9200); + if (randomBoolean()) { + builder.scheme(randomFrom(Scheme.values())); + if (usually()) { + builder.path(randomAlphaOfLength(50)); + } + } + } + if (usually()) { + builder.method(randomFrom(HttpMethod.values())); + } + if (randomBoolean()) { + builder.setParam(randomAlphaOfLength(10), randomAlphaOfLength(10)); + if (randomBoolean()) { + builder.setParam(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + } + if (randomBoolean()) { + builder.setHeader(randomAlphaOfLength(10), randomAlphaOfLength(10)); + if (randomBoolean()) { + builder.setHeader(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + } + if (randomBoolean()) { + builder.auth(new BasicAuth(randomAlphaOfLength(10), randomAlphaOfLength(20).toCharArray())); + } + if (randomBoolean()) { + builder.body(randomAlphaOfLength(200)); + } + if (randomBoolean()) { + // micros and nanos don't round trip will full precision so exclude them from the test + String safeConnectionTimeout = randomValueOtherThanMany(s -> (s.endsWith("micros") || s.endsWith("nanos")), + () -> randomTimeValue()); + builder.connectionTimeout(TimeValue.parseTimeValue(safeConnectionTimeout, "my.setting")); + } + if (randomBoolean()) { + // micros and nanos don't round trip will full precision so exclude them from the test + String safeReadTimeout = randomValueOtherThanMany(s -> (s.endsWith("micros") || s.endsWith("nanos")), + () -> randomTimeValue()); + builder.readTimeout(TimeValue.parseTimeValue(safeReadTimeout, "my.setting")); + } + if (randomBoolean()) { + builder.proxy(new HttpProxy(randomAlphaOfLength(10), randomIntBetween(1024, 65000))); + } + + final HttpRequest httpRequest = builder.build(); + assertNotNull(httpRequest); + + try (XContentBuilder xContentBuilder = randomFrom(jsonBuilder(), smileBuilder(), yamlBuilder(), cborBuilder())) { + httpRequest.toXContent(xContentBuilder, WatcherParams.builder().hideSecrets(false).build()); + + HttpAuthRegistry registry = new HttpAuthRegistry(singletonMap(BasicAuth.TYPE, new BasicAuthFactory(null))); + HttpRequest.Parser httpRequestParser = new HttpRequest.Parser(registry); + + try (XContentParser parser = createParser(xContentBuilder)) { + assertNull(parser.currentToken()); + parser.nextToken(); + + HttpRequest parsedRequest = httpRequestParser.parse(parser); + assertEquals(httpRequest, parsedRequest); + } + } + } + + public void testXContentRemovesAuthorization() throws Exception { + HttpRequest request = HttpRequest.builder("localhost", 443).setHeader("Authorization", "Bearer Foo").build(); + try (XContentBuilder builder = jsonBuilder()) { + WatcherParams params = WatcherParams.builder().hideSecrets(false).build(); + request.toXContent(builder, params); + assertThat(Strings.toString(builder), containsString("Bearer Foo")); + } + try (XContentBuilder builder = jsonBuilder()) { + request.toXContent(builder, WatcherParams.HIDE_SECRETS); + assertThat(Strings.toString(builder), not(containsString("Bearer Foo"))); + assertThat(Strings.toString(builder), containsString(WatcherXContentParser.REDACTED_PASSWORD)); + } + } + + private void assertThatManualBuilderEqualsParsingFromUrl(String url, HttpRequest.Builder builder) throws Exception { + XContentBuilder urlContentBuilder = jsonBuilder().startObject().field("url", url).endObject(); + XContentParser urlContentParser = createParser(urlContentBuilder); + urlContentParser.nextToken(); + + HttpRequest.Parser parser = new HttpRequest.Parser(mock(HttpAuthRegistry.class)); + HttpRequest urlParsedRequest = parser.parse(urlContentParser); + + WatcherParams params = WatcherParams.builder().hideSecrets(false).build(); + XContentBuilder xContentBuilder = builder.build().toXContent(jsonBuilder(), params); + XContentParser xContentParser = createParser(xContentBuilder); + xContentParser.nextToken(); + HttpRequest parsedRequest = parser.parse(xContentParser); + + assertThat(parsedRequest, is(urlParsedRequest)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpResponseTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpResponseTests.java new file mode 100644 index 0000000000000..3629a984f22c6 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpResponseTests.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; + +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; + +public class HttpResponseTests extends ESTestCase { + + public void testParseSelfGenerated() throws Exception { + int status = randomIntBetween(200, 600); + Map headers = emptyMap(); + if (randomBoolean()) { + headers = singletonMap("key", new String[] { "value" }); + } + String body = randomBoolean() ? "body" : null; + final HttpResponse response; + if (randomBoolean() && headers.isEmpty() && body == null) { + response = new HttpResponse(status); + } else if (body != null ){ + switch (randomIntBetween(0, 2)) { + case 0: + response = new HttpResponse(status, body, headers); + break; + case 1: + response = new HttpResponse(status, body.getBytes(StandardCharsets.UTF_8), headers); + break; + default: // 2 + response = new HttpResponse(status, new BytesArray(body), headers); + break; + } + } else { // body is null + switch (randomIntBetween(0, 3)) { + case 0: + response = new HttpResponse(status, (String) null, headers); + break; + case 1: + response = new HttpResponse(status, (byte[]) null, headers); + break; + case 2: + response = new HttpResponse(status, (BytesReference) null, headers); + break; + default: //3 + response = new HttpResponse(status, headers); + break; + } + } + + XContentBuilder builder = jsonBuilder().value(response); + XContentParser parser = createParser(builder); + parser.nextToken(); + HttpResponse parsedResponse = HttpResponse.parse(parser); + assertThat(parsedResponse, notNullValue()); + assertThat(parsedResponse.status(), is(status)); + if (body == null) { + assertThat(parsedResponse.body(), nullValue()); + } else { + assertThat(parsedResponse.body().utf8ToString(), is(body)); + } + for (Map.Entry headerEntry : headers.entrySet()) { + assertThat(headerEntry.getValue(), arrayContaining(parsedResponse.header(headerEntry.getKey()))); + } + } + + public void testThatHeadersAreCaseInsensitive() { + Map headers = new HashMap<>(); + headers.put(randomFrom("key", "keY", "KEY", "Key"), new String[] { "value" }); + headers.put(randomFrom("content-type"), new String[] { "text/html" }); + HttpResponse response = new HttpResponse(200, headers); + assertThat(response.header("key")[0], is("value")); + assertThat(response.contentType(), is("text/html")); + } + + public void testThatHeaderNamesDoNotContainDotsOnSerialization() throws Exception { + Map headers = new HashMap<>(); + headers.put("es.index", new String[] { "value" }); + headers.put("es.index.2", new String[] { "value" }); + + HttpResponse response = new HttpResponse(200, headers); + assertThat(response.header("es.index")[0], is("value")); + assertThat(response.header("es.index.2")[0], is("value")); + + XContentBuilder builder = jsonBuilder(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + + XContentParser parser = createParser(builder); + Map responseMap = parser.map(); + parser.close(); + + assertThat(responseMap, hasKey("headers")); + assertThat(responseMap.get("headers"), instanceOf(Map.class)); + Map responseHeaders = (Map) responseMap.get("headers"); + + assertThat(responseHeaders, not(hasKey("es.index"))); + assertThat(responseHeaders, hasEntry("es_index", Collections.singletonList("value"))); + + assertThat(responseHeaders, not(hasKey("es.index.2"))); + assertThat(responseHeaders, hasEntry("es_index_2", Collections.singletonList("value"))); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/SizeLimitInputStreamTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/SizeLimitInputStreamTests.java new file mode 100644 index 0000000000000..fa9af2367bc66 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/SizeLimitInputStreamTests.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.http; + +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.test.ESTestCase; + +import java.io.ByteArrayInputStream; +import java.io.IOException; + +import static com.google.common.base.Charsets.UTF_8; +import static org.hamcrest.Matchers.is; + +public class SizeLimitInputStreamTests extends ESTestCase { + + public void testGoodCase() throws IOException { + int length = scaledRandomIntBetween(1, 100); + test(length, length); + } + + public void testLimitReached() { + int length = scaledRandomIntBetween(1, 100); + IOException e = expectThrows(IOException.class, () -> test(length+1, length)); + assertThat(e.getMessage(), is("Maximum limit of [" + length + "] bytes reached")); + } + + public void testMarking() { + ByteSizeValue byteSizeValue = new ByteSizeValue(1, ByteSizeUnit.BYTES); + SizeLimitInputStream is = new SizeLimitInputStream(byteSizeValue, + new ByteArrayInputStream("empty".getBytes(UTF_8))); + assertThat(is.markSupported(), is(false)); + expectThrows(UnsupportedOperationException.class, () -> is.mark(10)); + IOException e = expectThrows(IOException.class, () -> is.reset()); + assertThat(e.getMessage(), is("reset not supported")); + } + + private void test(int inputStreamLength, int maxAllowedSize) throws IOException { + String data = randomAlphaOfLength(inputStreamLength); + ByteSizeValue byteSizeValue = new ByteSizeValue(maxAllowedSize, ByteSizeUnit.BYTES); + SizeLimitInputStream is = new SizeLimitInputStream(byteSizeValue, + new ByteArrayInputStream(data.getBytes(UTF_8))); + + if (randomBoolean()) { + is.read(new byte[inputStreamLength]); + } else { + for (int i = 0; i < inputStreamLength; i++) { + is.read(); + } + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java new file mode 100644 index 0000000000000..45c34e3465096 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateTests.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.common.text; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.TemplateScript; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.Watcher; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TextTemplateTests extends ESTestCase { + + private ScriptService service; + private TextTemplateEngine engine; + private final String lang = "mustache"; + + @Before + public void init() throws Exception { + service = mock(ScriptService.class); + engine = new TextTemplateEngine(Settings.EMPTY, service); + } + + public void testRender() throws Exception { + String templateText = "_template"; + Map params = singletonMap("param_key", "param_val"); + Map model = singletonMap("model_key", "model_val"); + Map merged = new HashMap<>(params); + merged.putAll(model); + merged = unmodifiableMap(merged); + ScriptType type = randomFrom(ScriptType.values()); + + TemplateScript.Factory compiledTemplate = templateParams -> + new TemplateScript(templateParams) { + @Override + public String execute() { + return "rendered_text"; + } + }; + + when(service.compile(new Script(type, type == ScriptType.STORED ? null : lang, templateText, + type == ScriptType.INLINE ? Collections.singletonMap("content_type", "text/plain") : null, + merged), Watcher.SCRIPT_TEMPLATE_CONTEXT)).thenReturn(compiledTemplate); + + TextTemplate template = templateBuilder(type, templateText, params); + assertThat(engine.render(template, model), is("rendered_text")); + } + + public void testRenderOverridingModel() throws Exception { + String templateText = "_template"; + Map params = singletonMap("key", "param_val"); + Map model = singletonMap("key", "model_val"); + ScriptType type = randomFrom(ScriptType.values()); + + TemplateScript.Factory compiledTemplate = templateParams -> + new TemplateScript(templateParams) { + @Override + public String execute() { + return "rendered_text"; + } + }; + + when(service.compile(new Script(type, type == ScriptType.STORED ? null : lang, templateText, + type == ScriptType.INLINE ? Collections.singletonMap("content_type", "text/plain") : null, + model), Watcher.SCRIPT_TEMPLATE_CONTEXT)).thenReturn(compiledTemplate); + + TextTemplate template = templateBuilder(type, templateText, params); + assertThat(engine.render(template, model), is("rendered_text")); + } + + public void testRenderDefaults() throws Exception { + String templateText = "_template"; + Map model = singletonMap("key", "model_val"); + + TemplateScript.Factory compiledTemplate = templateParams -> + new TemplateScript(templateParams) { + @Override + public String execute() { + return "rendered_text"; + } + }; + + when(service.compile(new Script(ScriptType.INLINE, lang, templateText, + Collections.singletonMap("content_type", "text/plain"), model), Watcher.SCRIPT_TEMPLATE_CONTEXT)) + .thenReturn(compiledTemplate); + + TextTemplate template = new TextTemplate(templateText); + assertThat(engine.render(template, model), is("rendered_text")); + } + + public void testParser() throws Exception { + ScriptType type = randomScriptType(); + TextTemplate template = + templateBuilder(type, "_template", singletonMap("param_key", "param_val")); + XContentBuilder builder = jsonBuilder().startObject(); + switch (type) { + case INLINE: + builder.field("source", template.getTemplate()); + break; + case STORED: + builder.field("id", template.getTemplate()); + } + builder.field("params", template.getParams()); + builder.endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + TextTemplate parsed = TextTemplate.parse(parser); + assertThat(parsed, notNullValue()); + assertThat(parsed, equalTo(template)); + } + + public void testParserParserSelfGenerated() throws Exception { + ScriptType type = randomScriptType(); + TextTemplate template = + templateBuilder(type, "_template", singletonMap("param_key", "param_val")); + + XContentBuilder builder = jsonBuilder().value(template); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + TextTemplate parsed = TextTemplate.parse(parser); + assertThat(parsed, notNullValue()); + assertThat(parsed, equalTo(template)); + } + + public void testParserInvalidUnexpectedField() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .field("unknown_field", "value") + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + try { + TextTemplate.parse(parser); + fail("expected parse exception when encountering an unknown field"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("[script] unknown field [unknown_field], parser not found")); + } + } + + public void testParserInvalidUnknownScriptType() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .field("template", "_template") + .field("type", "unknown_type") + .startObject("params").endObject() + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + try { + TextTemplate.parse(parser); + fail("expected parse exception when script type is unknown"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), is("[script] unknown field [template], parser not found")); + } + } + + public void testParserInvalidMissingText() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .field("type", ScriptType.STORED) + .startObject("params").endObject() + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + try { + TextTemplate.parse(parser); + fail("expected parse exception when template text is missing"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("[script] unknown field [type], parser not found")); + } + } + + public void testNullObject() throws Exception { + assertThat(engine.render(null ,new HashMap<>()), is(nullValue())); + } + + private TextTemplate templateBuilder(ScriptType type, String text, Map params) { + return new TextTemplate(text, null, type, params); + } + + private static ScriptType randomScriptType() { + return randomFrom(ScriptType.values()); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/AlwaysConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/AlwaysConditionTests.java new file mode 100644 index 0000000000000..8ae0025066eb0 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/AlwaysConditionTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; + +import java.time.Clock; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; + +public class AlwaysConditionTests extends ESTestCase { + public void testExecute() throws Exception { + ExecutableCondition alwaysTrue = InternalAlwaysCondition.INSTANCE; + assertTrue(alwaysTrue.execute(null).met()); + } + + public void testParserValid() throws Exception { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.endObject(); + XContentParser parser = createParser(builder); + parser.nextToken(); + ExecutableCondition executable = InternalAlwaysCondition.parse("_id", parser); + assertTrue(executable.execute(null).met()); + } + + public void testParserInvalid() throws Exception { + XContentBuilder builder = jsonBuilder() + .startObject() + .field("foo", "bar") + .endObject(); + XContentParser parser = createParser(builder); + parser.nextToken(); + try { + InternalAlwaysCondition.parse( "_id", parser); + fail("expected a condition exception trying to parse an invalid condition XContent, [" + + InternalAlwaysCondition.TYPE + "] condition should not parse with a body"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("expected an empty object but found [foo]")); + } + } + + public static ExecutableCondition randomCondition(ScriptService scriptService) { + String type = randomFrom(ScriptCondition.TYPE, InternalAlwaysCondition.TYPE, CompareCondition.TYPE, ArrayCompareCondition.TYPE); + switch (type) { + case ScriptCondition.TYPE: + return new ScriptCondition(mockScript("_script"), scriptService); + case CompareCondition.TYPE: + return new CompareCondition("_path", randomFrom(CompareCondition.Op.values()), randomFrom(5, "3"), + Clock.systemUTC()); + case ArrayCompareCondition.TYPE: + return new ArrayCompareCondition("_array_path", "_path", + randomFrom(ArrayCompareCondition.Op.values()), randomFrom(5, "3"), ArrayCompareCondition.Quantifier.SOME, + Clock.systemUTC()); + default: + return InternalAlwaysCondition.INSTANCE; + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareConditionSearchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareConditionSearchTests.java new file mode 100644 index 0000000000000..b8931013dedd1 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareConditionSearchTests.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.xpack.core.watcher.condition.Condition; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.io.IOException; +import java.time.Clock; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContext; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.collection.IsMapContaining.hasEntry; + +public class ArrayCompareConditionSearchTests extends AbstractWatcherIntegrationTestCase { + + public void testExecuteWithAggs() throws Exception { + String index = "test-index"; + String type = "test-type"; + client().admin().indices().prepareCreate(index) + .addMapping(type) + .get(); + + ArrayCompareCondition.Op op = randomFrom(ArrayCompareCondition.Op.values()); + ArrayCompareCondition.Quantifier quantifier = randomFrom(ArrayCompareCondition.Quantifier.values()); + int numberOfDocuments = randomIntBetween(1, 100); + int numberOfDocumentsWatchingFor = 1 + numberOfDocuments; + for (int i = 0; i < numberOfDocuments; i++) { + client().prepareIndex(index, type).setSource(source("elastic", "you know, for search", i)).get(); + client().prepareIndex(index, type).setSource(source("fights_for_the_users", "you know, for the users", i)).get(); + } + + refresh(); + + SearchResponse response = client().prepareSearch(index) + .addAggregation(AggregationBuilders.terms("top_tweeters").field("user.screen_name.keyword").size(3)).get(); + + + ArrayCompareCondition condition = new ArrayCompareCondition("ctx.payload.aggregations.top_tweeters.buckets" , "doc_count", op, + numberOfDocumentsWatchingFor, quantifier, Clock.systemUTC()); + + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response)); + Condition.Result result = condition.execute(ctx); + + boolean met = quantifier.eval(Arrays.asList(numberOfDocuments, numberOfDocuments), numberOfDocumentsWatchingFor, op); + assertEquals(met, result.met()); + + Map resolvedValues = result.getResolvedValues(); + assertThat(resolvedValues, notNullValue()); + assertThat(resolvedValues.size(), is(1)); + Map elastic = new HashMap<>(); + elastic.put("doc_count", numberOfDocuments); + elastic.put("key", "elastic"); + Map fightsForTheUsers = new HashMap<>(); + fightsForTheUsers.put("doc_count", numberOfDocuments); + fightsForTheUsers.put("key", "fights_for_the_users"); + assertThat(resolvedValues, hasEntry("ctx.payload.aggregations.top_tweeters.buckets", + (Object) Arrays.asList(elastic, fightsForTheUsers))); + + client().prepareIndex(index, type).setSource(source("fights_for_the_users", "you know, for the users", numberOfDocuments)).get(); + refresh(); + + response = client().prepareSearch(index) + .addAggregation(AggregationBuilders.terms("top_tweeters").field("user.screen_name.keyword").size(3)).get(); + + ctx = mockExecutionContext("_name", new Payload.XContent(response)); + result = condition.execute(ctx); + + met = quantifier.eval(Arrays.asList(numberOfDocumentsWatchingFor, numberOfDocuments), numberOfDocumentsWatchingFor, op); + assertEquals(met, result.met()); + + resolvedValues = result.getResolvedValues(); + assertThat(resolvedValues, notNullValue()); + assertThat(resolvedValues.size(), is(1)); + fightsForTheUsers.put("doc_count", numberOfDocumentsWatchingFor); + assertThat(resolvedValues, hasEntry("ctx.payload.aggregations.top_tweeters.buckets", + (Object) Arrays.asList(fightsForTheUsers, elastic))); + } + + private XContentBuilder source(String screenName, String tweet, int i) throws IOException { + return jsonBuilder().startObject() + .startObject("user") + .field("screen_name", screenName) + .endObject() + .field("tweet", tweet + " " + i) + .endObject(); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareConditionTests.java new file mode 100644 index 0000000000000..d97842fa359e0 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ArrayCompareConditionTests.java @@ -0,0 +1,352 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.joda.time.DateTime; +import org.junit.Rule; +import org.junit.rules.ExpectedException; + +import java.io.IOException; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContext; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.is; + +public class ArrayCompareConditionTests extends ESTestCase { + @Rule + public ExpectedException expectedException = ExpectedException.none(); + + public void testOpEvalEQ() throws Exception { + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Arrays.asList(1, 1), 1, ArrayCompareCondition.Op.EQ), is(true)); + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Arrays.asList(1, 3), 2, ArrayCompareCondition.Op.EQ), is(false)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Arrays.asList(1, 3), 1, ArrayCompareCondition.Op.EQ), is(true)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Arrays.asList(1, 3), 2, ArrayCompareCondition.Op.EQ), is(false)); + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Collections.emptyList(), 1, ArrayCompareCondition.Op.EQ), is(true)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Collections.emptyList(), 1, ArrayCompareCondition.Op.EQ), is(false)); + } + + public void testOpEvalNotEQ() throws Exception { + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Arrays.asList(1, 1), 3, ArrayCompareCondition.Op.NOT_EQ), is(true)); + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Arrays.asList(1, 3), 1, ArrayCompareCondition.Op.NOT_EQ), is(false)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Arrays.asList(1, 3), 1, ArrayCompareCondition.Op.NOT_EQ), is(true)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Arrays.asList(1, 1), 1, ArrayCompareCondition.Op.NOT_EQ), is(false)); + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Collections.emptyList(), 1, ArrayCompareCondition.Op.NOT_EQ), is(true)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Collections.emptyList(), 1, ArrayCompareCondition.Op.NOT_EQ), is(false)); + } + + public void testOpEvalGTE() throws Exception { + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Arrays.asList(1, 3), 1, ArrayCompareCondition.Op.GTE), is(true)); + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Arrays.asList(1, 3), 2, ArrayCompareCondition.Op.GTE), is(false)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Arrays.asList(1, 3), 2, ArrayCompareCondition.Op.GTE), is(true)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Arrays.asList(1, 3), 4, ArrayCompareCondition.Op.GTE), is(false)); + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Collections.emptyList(), 1, ArrayCompareCondition.Op.GTE), is(true)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Collections.emptyList(), 1, ArrayCompareCondition.Op.GTE), is(false)); + } + + public void testOpEvalGT() throws Exception { + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Arrays.asList(1, 3), 0, ArrayCompareCondition.Op.GT), is(true)); + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Arrays.asList(1, 3), 1, ArrayCompareCondition.Op.GT), is(false)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Arrays.asList(1, 3), 2, ArrayCompareCondition.Op.GT), is(true)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Arrays.asList(1, 3), 4, ArrayCompareCondition.Op.GT), is(false)); + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Collections.emptyList(), 1, ArrayCompareCondition.Op.GT), is(true)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Collections.emptyList(), 1, ArrayCompareCondition.Op.GT), is(false)); + } + + public void testOpEvalLTE() throws Exception { + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Arrays.asList(1, 3), 3, ArrayCompareCondition.Op.LTE), is(true)); + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Arrays.asList(1, 3), 0, ArrayCompareCondition.Op.LTE), is(false)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Arrays.asList(1, 3), 3, ArrayCompareCondition.Op.LTE), is(true)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Arrays.asList(1, 3), 0, ArrayCompareCondition.Op.LTE), is(false)); + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Collections.emptyList(), 1, ArrayCompareCondition.Op.LTE), is(true)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Collections.emptyList(), 1, ArrayCompareCondition.Op.LTE), is(false)); + } + + public void testOpEvalLT() throws Exception { + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Arrays.asList(1, 3), 4, ArrayCompareCondition.Op.LT), is(true)); + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Arrays.asList(1, 3), 3, ArrayCompareCondition.Op.LT), is(false)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Arrays.asList(1, 3), 2, ArrayCompareCondition.Op.LT), is(true)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Arrays.asList(1, 3), 0, ArrayCompareCondition.Op.LT), is(false)); + assertThat(ArrayCompareCondition.Quantifier.ALL.eval(Collections.emptyList(), 1, ArrayCompareCondition.Op.LT), is(true)); + assertThat(ArrayCompareCondition.Quantifier.SOME.eval(Collections.emptyList(), 1, ArrayCompareCondition.Op.LT), is(false)); + } + + public void testExecute() { + ArrayCompareCondition.Op op = randomFrom(ArrayCompareCondition.Op.values()); + int value = randomInt(10); + int numberOfValues = randomIntBetween(0, 3); + List values = new ArrayList<>(numberOfValues); + for (int i = 0; i < numberOfValues; i++) { + values.add(randomInt(10)); + } + ArrayCompareCondition.Quantifier quantifier = randomFrom(ArrayCompareCondition.Quantifier.values()); + boolean met = quantifier.eval(values, value, op); + + logger.debug("op [{}]", op); + logger.debug("value [{}]", value); + logger.debug("numberOfValues [{}]", numberOfValues); + logger.debug("values [{}]", values); + logger.debug("quantifier [{}]", quantifier); + logger.debug("met [{}]", met); + + ArrayCompareCondition condition = new ArrayCompareCondition("ctx.payload.value", "", op, value, quantifier, + Clock.systemUTC()); + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.Simple("value", values)); + assertThat(condition.execute(ctx).met(), is(met)); + } + + public void testExecutePath() { + ArrayCompareCondition.Op op = randomFrom(ArrayCompareCondition.Op.values()); + int value = randomInt(10); + int numberOfValues = randomIntBetween(0, 3); + List docCounts = new ArrayList<>(numberOfValues); + for (int i = 0; i < numberOfValues; i++) { + docCounts.add(randomInt(10)); + } + List values = new ArrayList<>(numberOfValues); + for (int i = 0; i < numberOfValues; i++) { + Map map = new HashMap<>(1); + map.put("doc_count", docCounts.get(i)); + values.add(map); + } + ArrayCompareCondition.Quantifier quantifier = randomFrom(ArrayCompareCondition.Quantifier.values()); + boolean met = quantifier.eval(docCounts, value, op); + + logger.debug("op [{}]", op); + logger.debug("value [{}]", value); + logger.debug("numberOfValues [{}]", numberOfValues); + logger.debug("values [{}]", values); + logger.debug("quantifier [{}]", quantifier); + logger.debug("met [{}]", met); + + ArrayCompareCondition condition = new ArrayCompareCondition("ctx.payload.value", "doc_count", op, value, quantifier, + Clock.systemUTC()); + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.Simple("value", values)); + assertThat(condition.execute(ctx).met(), is(met)); + } + + public void testExecuteDateMath() { + ClockMock clock = ClockMock.frozen(); + boolean met = randomBoolean(); + ArrayCompareCondition.Op op = met ? + randomFrom(ArrayCompareCondition.Op.GT, ArrayCompareCondition.Op.GTE, ArrayCompareCondition.Op.NOT_EQ) : + randomFrom(ArrayCompareCondition.Op.LT, ArrayCompareCondition.Op.LTE, ArrayCompareCondition.Op.EQ); + + ArrayCompareCondition.Quantifier quantifier = randomFrom(ArrayCompareCondition.Quantifier.ALL, + ArrayCompareCondition.Quantifier.SOME); + String value = "<{now-1d}>"; + int numberOfValues = randomIntBetween(1, 10); + List values = new ArrayList<>(numberOfValues); + for (int i = 0; i < numberOfValues; i++) { + clock.fastForwardSeconds(1); + values.add(new DateTime(clock.millis())); + } + + ArrayCompareCondition condition = new ArrayCompareCondition("ctx.payload.value", "", op, value, quantifier, clock); + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.Simple("value", values)); + assertThat(condition.execute(ctx).met(), is(met)); + } + + public void testParse() throws IOException { + ArrayCompareCondition.Op op = randomFrom(ArrayCompareCondition.Op.values()); + ArrayCompareCondition.Quantifier quantifier = randomFrom(ArrayCompareCondition.Quantifier.values()); + Object value = randomFrom("value", 1, null); + XContentBuilder builder = + jsonBuilder().startObject() + .startObject("key1.key2") + .field("path", "key3.key4") + .startObject(op.id()) + .field("value", value) + .field("quantifier", quantifier.id()) + .endObject() + .endObject() + .endObject(); + + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + + ArrayCompareCondition condition = ArrayCompareCondition.parse(ClockMock.frozen(), "_id", parser); + + assertThat(condition, notNullValue()); + assertThat(condition.getArrayPath(), is("key1.key2")); + assertThat(condition.getOp(), is(op)); + assertThat(condition.getValue(), is(value)); + assertThat(condition.getPath(), is("key3.key4")); + assertThat(condition.getQuantifier(), is(quantifier)); + } + + public void testParseContainsDuplicateOperator() throws IOException { + assumeFalse("Test only makes sense if XContent parser doesn't have strict duplicate checks enabled", + XContent.isStrictDuplicateDetectionEnabled()); + ArrayCompareCondition.Op op = randomFrom(ArrayCompareCondition.Op.values()); + ArrayCompareCondition.Quantifier quantifier = randomFrom(ArrayCompareCondition.Quantifier.values()); + Object value = randomFrom("value", 1, null); + XContentBuilder builder = + jsonBuilder().startObject() + .startObject("key1.key2") + .field("path", "key3.key4") + .startObject(op.id()) + .field("value", value) + .field("quantifier", quantifier.id()) + .endObject() + .startObject(op.id()) + .field("value", value) + .field("quantifier", quantifier.id()) + .endObject() + .endObject() + .endObject(); + + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + + expectedException.expect(ElasticsearchParseException.class); + expectedException.expectMessage("duplicate comparison operator"); + + ArrayCompareCondition.parse(ClockMock.frozen(), "_id", parser); + } + + public void testParseContainsUnknownOperator() throws IOException { + ArrayCompareCondition.Quantifier quantifier = randomFrom(ArrayCompareCondition.Quantifier.values()); + Object value = randomFrom("value", 1, null); + XContentBuilder builder = + jsonBuilder().startObject() + .startObject("key1.key2") + .field("path", "key3.key4") + .startObject("unknown") + .field("value", value) + .field("quantifier", quantifier.id()) + .endObject() + .endObject() + .endObject(); + + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + + expectedException.expect(ElasticsearchParseException.class); + expectedException.expectMessage("unknown comparison operator"); + + ArrayCompareCondition.parse(ClockMock.frozen(), "_id", parser); + } + + public void testParseContainsDuplicateValue() throws IOException { + assumeFalse("Test only makes sense if XContent parser doesn't have strict duplicate checks enabled", + XContent.isStrictDuplicateDetectionEnabled()); + ArrayCompareCondition.Op op = randomFrom(ArrayCompareCondition.Op.values()); + ArrayCompareCondition.Quantifier quantifier = randomFrom(ArrayCompareCondition.Quantifier.values()); + Object value = randomFrom("value", 1, null); + XContentBuilder builder = + jsonBuilder().startObject() + .startObject("key1.key2") + .field("path", "key3.key4") + .startObject(op.id()) + .field("value", value) + .field("value", value) + .field("quantifier", quantifier.id()) + .endObject() + .endObject() + .endObject(); + + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + + expectedException.expect(ElasticsearchParseException.class); + expectedException.expectMessage("duplicate field \"value\""); + + ArrayCompareCondition.parse(ClockMock.frozen(), "_id", parser); + } + + public void testParseContainsDuplicateQuantifier() throws IOException { + assumeFalse("Test only makes sense if XContent parser doesn't have strict duplicate checks enabled", + XContent.isStrictDuplicateDetectionEnabled()); + ArrayCompareCondition.Op op = randomFrom(ArrayCompareCondition.Op.values()); + ArrayCompareCondition.Quantifier quantifier = randomFrom(ArrayCompareCondition.Quantifier.values()); + Object value = randomFrom("value", 1, null); + XContentBuilder builder = + jsonBuilder().startObject() + .startObject("key1.key2") + .field("path", "key3.key4") + .startObject(op.id()) + .field("value", value) + .field("quantifier", quantifier.id()) + .field("quantifier", quantifier.id()) + .endObject() + .endObject() + .endObject(); + + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + + expectedException.expect(ElasticsearchParseException.class); + expectedException.expectMessage("duplicate field \"quantifier\""); + + ArrayCompareCondition.parse(ClockMock.frozen(), "_id", parser); + } + + public void testParseContainsUnknownQuantifier() throws IOException { + ArrayCompareCondition.Op op = randomFrom(ArrayCompareCondition.Op.values()); + Object value = randomFrom("value", 1, null); + XContentBuilder builder = + jsonBuilder().startObject() + .startObject("key1.key2") + .field("path", "key3.key4") + .startObject(op.id()) + .field("value", value) + .field("quantifier", "unknown") + .endObject() + .endObject() + .endObject(); + + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + + expectedException.expect(ElasticsearchParseException.class); + expectedException.expectMessage("unknown comparison quantifier"); + + ArrayCompareCondition.parse(ClockMock.frozen(), "_id", parser); + } + + public void testParseContainsUnexpectedFieldInComparisonOperator() throws IOException { + ArrayCompareCondition.Op op = randomFrom(ArrayCompareCondition.Op.values()); + ArrayCompareCondition.Quantifier quantifier = randomFrom(ArrayCompareCondition.Quantifier.values()); + Object value = randomFrom("value", 1, null); + XContentBuilder builder = + jsonBuilder().startObject() + .startObject("key1.key2") + .field("path", "key3.key4") + .startObject(op.id()) + .field("value", value) + .field("quantifier", quantifier.id()) + .field("unexpected", "unexpected") + .endObject() + .endObject() + .endObject(); + + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + + expectedException.expect(ElasticsearchParseException.class); + expectedException.expectMessage("expected a field indicating the comparison value or comparison quantifier"); + + ArrayCompareCondition.parse(ClockMock.frozen(), "_id", parser); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java new file mode 100644 index 0000000000000..0cfc2575386bc --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.index.Index; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.time.Clock; +import java.util.Map; + +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContext; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.when; + +public class CompareConditionSearchTests extends AbstractWatcherIntegrationTestCase { + + public void testExecuteWithAggs() throws Exception { + client().prepareIndex("my-index", "my-type").setSource("@timestamp", "2005-01-01T00:00").get(); + client().prepareIndex("my-index", "my-type").setSource("@timestamp", "2005-01-01T00:10").get(); + client().prepareIndex("my-index", "my-type").setSource("@timestamp", "2005-01-01T00:20").get(); + client().prepareIndex("my-index", "my-type").setSource("@timestamp", "2005-01-01T00:30").get(); + refresh(); + + SearchResponse response = client().prepareSearch("my-index") + .addAggregation(AggregationBuilders.dateHistogram("rate").field("@timestamp") + .dateHistogramInterval(DateHistogramInterval.HOUR).order(BucketOrder.count(false))) + .get(); + + CompareCondition condition = new CompareCondition("ctx.payload.aggregations.rate.buckets.0.doc_count", CompareCondition.Op.GTE, 5, + Clock.systemUTC()); + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response)); + CompareCondition.Result result = condition.execute(ctx); + assertThat(result.met(), is(false)); + Map resolvedValues = result.getResolvedValues(); + assertThat(resolvedValues, notNullValue()); + assertThat(resolvedValues.size(), is(1)); + assertThat(resolvedValues, hasEntry("ctx.payload.aggregations.rate.buckets.0.doc_count", (Object) 4)); + + client().prepareIndex("my-index", "my-type").setSource("@timestamp", "2005-01-01T00:40").get(); + refresh(); + + response = client().prepareSearch("my-index") + .addAggregation(AggregationBuilders.dateHistogram("rate") + .field("@timestamp").dateHistogramInterval(DateHistogramInterval.HOUR).order(BucketOrder.count(false))) + .get(); + + ctx = mockExecutionContext("_name", new Payload.XContent(response)); + result = condition.execute(ctx); + assertThat(result.met(), is(true)); + resolvedValues = result.getResolvedValues(); + assertThat(resolvedValues, notNullValue()); + assertThat(resolvedValues.size(), is(1)); + assertThat(resolvedValues, hasEntry("ctx.payload.aggregations.rate.buckets.0.doc_count", (Object) 5)); + } + + public void testExecuteAccessHits() throws Exception { + CompareCondition condition = new CompareCondition("ctx.payload.hits.hits.0._score", CompareCondition.Op.EQ, 1, + Clock.systemUTC()); + SearchHit hit = new SearchHit(0, "1", new Text("type"), null); + hit.score(1f); + hit.shard(new SearchShardTarget("a", new Index("a", "indexUUID"), 0, null)); + + InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + new SearchHits(new SearchHit[]{hit}, 1L, 1f), null, null, null, false, false, 1); + SearchResponse response = new SearchResponse(internalSearchResponse, "", 3, 3, 0, 500L, ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY); + + WatchExecutionContext ctx = mockExecutionContext("_watch_name", new Payload.XContent(response)); + assertThat(condition.execute(ctx).met(), is(true)); + hit.score(2f); + when(ctx.payload()).thenReturn(new Payload.XContent(response)); + CompareCondition.Result result = condition.execute(ctx); + assertThat(result.met(), is(false)); + Map resolvedValues = result.getResolvedValues(); + assertThat(resolvedValues, notNullValue()); + assertThat(resolvedValues.size(), is(1)); + assertThat(resolvedValues, hasEntry(is("ctx.payload.hits.hits.0._score"), notNullValue())); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java new file mode 100644 index 0000000000000..7331be4553bbe --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionTests.java @@ -0,0 +1,275 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.condition.CompareCondition.Op; +import org.joda.time.DateTime; + +import java.time.Clock; +import java.util.Arrays; +import java.util.Locale; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContext; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class CompareConditionTests extends ESTestCase { + public void testOpEvalEQ() throws Exception { + assertThat(CompareCondition.Op.EQ.eval(null, null), is(true)); + assertThat(CompareCondition.Op.EQ.eval(4, 3.0), is(false)); + assertThat(CompareCondition.Op.EQ.eval(3, 3.0), is(true)); + assertThat(CompareCondition.Op.EQ.eval(2, new Float(3.0)), is(false)); + assertThat(CompareCondition.Op.EQ.eval(3, null), is(false)); + assertThat(CompareCondition.Op.EQ.eval(2, "2"), is(true)); // comparing as strings + assertThat(CompareCondition.Op.EQ.eval(3, "4"), is(false)); // comparing as strings + assertThat(CompareCondition.Op.EQ.eval(3, "a"), is(false)); // comparing as strings + assertThat(CompareCondition.Op.EQ.eval("3", 3), is(true)); // comparing as numbers + assertThat(CompareCondition.Op.EQ.eval("a", "aa"), is(false)); + assertThat(CompareCondition.Op.EQ.eval("a", "a"), is(true)); + assertThat(CompareCondition.Op.EQ.eval("aa", "ab"), is(false)); + assertThat(CompareCondition.Op.EQ.eval(singletonMap("k", "v"), singletonMap("k", "v")), is(true)); + assertThat(CompareCondition.Op.EQ.eval(singletonMap("k", "v"), singletonMap("k1", "v1")), is(false)); + assertThat(CompareCondition.Op.EQ.eval(Arrays.asList("k", "v"), Arrays.asList("k", "v")), is(true)); + assertThat(CompareCondition.Op.EQ.eval(Arrays.asList("k", "v"), Arrays.asList("k1", "v1")), is(false)); + assertThat(CompareCondition.Op.EQ.eval(Double.NaN, 250000), is(false)); + assertThat(CompareCondition.Op.EQ.eval(250000, Double.NaN), is(false)); + assertThat(CompareCondition.Op.EQ.eval(Double.NaN, Double.NaN), is(true)); + assertThat(CompareCondition.Op.EQ.eval(Float.NaN, 250000), is(false)); + assertThat(CompareCondition.Op.EQ.eval(250000, Float.NaN), is(false)); + assertThat(CompareCondition.Op.EQ.eval(Float.NaN, Float.NaN), is(true)); + + } + + public void testOpEvalNotEQ() throws Exception { + assertThat(CompareCondition.Op.NOT_EQ.eval(null, null), is(false)); + assertThat(CompareCondition.Op.NOT_EQ.eval(4, 3.0), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(3, 3.0), is(false)); + assertThat(CompareCondition.Op.NOT_EQ.eval(2, new Float(3.0)), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(3, null), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(2, "2"), is(false)); // comparing as strings + assertThat(CompareCondition.Op.NOT_EQ.eval(3, "4"), is(true)); // comparing as strings + assertThat(CompareCondition.Op.NOT_EQ.eval(3, "a"), is(true)); // comparing as strings + assertThat(CompareCondition.Op.NOT_EQ.eval("3", 3), is(false)); // comparing as numbers + assertThat(CompareCondition.Op.NOT_EQ.eval("a", "aa"), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval("a", "a"), is(false)); + assertThat(CompareCondition.Op.NOT_EQ.eval("aa", "ab"), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(singletonMap("k", "v"), singletonMap("k", "v")), is(false)); + assertThat(CompareCondition.Op.NOT_EQ.eval(singletonMap("k", "v"), singletonMap("k1", "v1")), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(Arrays.asList("k", "v"), Arrays.asList("k", "v")), is(false)); + assertThat(CompareCondition.Op.NOT_EQ.eval(Arrays.asList("k", "v"), Arrays.asList("k1", "v1")), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(Double.NaN, 250000), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(250000, Double.NaN), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(Double.NaN, Double.NaN), is(false)); + assertThat(CompareCondition.Op.NOT_EQ.eval(Float.NaN, 250000), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(250000, Float.NaN), is(true)); + assertThat(CompareCondition.Op.NOT_EQ.eval(Float.NaN, Float.NaN), is(false)); + } + + public void testOpEvalGTE() throws Exception { + assertThat(CompareCondition.Op.GTE.eval(4, 3.0), is(true)); + assertThat(CompareCondition.Op.GTE.eval(3, 3.0), is(true)); + assertThat(CompareCondition.Op.GTE.eval(2, new Float(3.0)), is(false)); + assertThat(CompareCondition.Op.GTE.eval(3, null), is(false)); + assertThat(CompareCondition.Op.GTE.eval(3, "2"), is(true)); // comparing as strings + assertThat(CompareCondition.Op.GTE.eval(3, "4"), is(false)); // comparing as strings + assertThat(CompareCondition.Op.GTE.eval(3, "a"), is(false)); // comparing as strings + assertThat(CompareCondition.Op.GTE.eval("4", 3), is(true)); // comparing as numbers + assertThat(CompareCondition.Op.GTE.eval("a", "aa"), is(false)); + assertThat(CompareCondition.Op.GTE.eval("a", "a"), is(true)); + assertThat(CompareCondition.Op.GTE.eval("aa", "ab"), is(false)); + assertThat(CompareCondition.Op.GTE.eval(Double.NaN, 250000), is(false)); + assertThat(CompareCondition.Op.GTE.eval(250000, Double.NaN), is(false)); + assertThat(CompareCondition.Op.GTE.eval(Double.NaN, Double.NaN), is(true)); + assertThat(CompareCondition.Op.GTE.eval(Float.NaN, 250000), is(false)); + assertThat(CompareCondition.Op.GTE.eval(250000, Float.NaN), is(false)); + assertThat(CompareCondition.Op.GTE.eval(Float.NaN, Float.NaN), is(true)); + } + + public void testOpEvalGT() throws Exception { + assertThat(CompareCondition.Op.GT.eval(4, 3.0), is(true)); + assertThat(CompareCondition.Op.GT.eval(3, 3.0), is(false)); + assertThat(CompareCondition.Op.GT.eval(2, new Float(3.0)), is(false)); + assertThat(CompareCondition.Op.GT.eval(3, null), is(false)); + assertThat(CompareCondition.Op.GT.eval(3, "2"), is(true)); // comparing as strings + assertThat(CompareCondition.Op.GT.eval(3, "4"), is(false)); // comparing as strings + assertThat(CompareCondition.Op.GT.eval(3, "a"), is(false)); // comparing as strings + assertThat(CompareCondition.Op.GT.eval("4", 3), is(true)); // comparing as numbers + assertThat(CompareCondition.Op.GT.eval("a", "aa"), is(false)); + assertThat(CompareCondition.Op.GT.eval("a", "a"), is(false)); + assertThat(CompareCondition.Op.GT.eval("aa", "ab"), is(false)); + assertThat(CompareCondition.Op.GT.eval(Double.NaN, 250000), is(false)); + assertThat(CompareCondition.Op.GT.eval(250000, Double.NaN), is(false)); + assertThat(CompareCondition.Op.GT.eval(Double.NaN, Double.NaN), is(false)); + assertThat(CompareCondition.Op.GT.eval(Float.NaN, 250000), is(false)); + assertThat(CompareCondition.Op.GT.eval(250000, Float.NaN), is(false)); + assertThat(CompareCondition.Op.GT.eval(Float.NaN, Float.NaN), is(false)); + + } + + public void testOpEvalLTE() throws Exception { + assertThat(CompareCondition.Op.LTE.eval(4, 3.0), is(false)); + assertThat(CompareCondition.Op.LTE.eval(3, 3.0), is(true)); + assertThat(CompareCondition.Op.LTE.eval(2, new Float(3.0)), is(true)); + assertThat(CompareCondition.Op.LTE.eval(3, null), is(false)); + assertThat(CompareCondition.Op.LTE.eval(3, "2"), is(false)); // comparing as strings + assertThat(CompareCondition.Op.LTE.eval(3, "4"), is(true)); // comparing as strings + assertThat(CompareCondition.Op.LTE.eval(3, "a"), is(true)); // comparing as strings + assertThat(CompareCondition.Op.LTE.eval("4", 3), is(false)); // comparing as numbers + assertThat(CompareCondition.Op.LTE.eval("a", "aa"), is(true)); + assertThat(CompareCondition.Op.LTE.eval("a", "a"), is(true)); + assertThat(CompareCondition.Op.LTE.eval("aa", "ab"), is(true)); + assertThat(CompareCondition.Op.LTE.eval(Double.NaN, 250000), is(false)); + assertThat(CompareCondition.Op.LTE.eval(250000, Double.NaN), is(false)); + assertThat(CompareCondition.Op.LTE.eval(Double.NaN, Double.NaN), is(true)); + assertThat(CompareCondition.Op.LTE.eval(Float.NaN, 250000), is(false)); + assertThat(CompareCondition.Op.LTE.eval(250000, Float.NaN), is(false)); + assertThat(CompareCondition.Op.LTE.eval(Float.NaN, Float.NaN), is(true)); + } + + public void testOpEvalLT() throws Exception { + assertThat(CompareCondition.Op.LT.eval(4, 3.0), is(false)); + assertThat(CompareCondition.Op.LT.eval(3, 3.0), is(false)); + assertThat(CompareCondition.Op.LT.eval(2, new Float(3.0)), is(true)); + assertThat(CompareCondition.Op.LT.eval(3, null), is(false)); + assertThat(CompareCondition.Op.LT.eval(3, "2"), is(false)); // comparing as strings + assertThat(CompareCondition.Op.LT.eval(3, "4"), is(true)); // comparing as strings + assertThat(CompareCondition.Op.LT.eval(3, "a"), is(true)); // comparing as strings + assertThat(CompareCondition.Op.LT.eval("4", 3), is(false)); // comparing as numbers + assertThat(CompareCondition.Op.LT.eval("a", "aa"), is(true)); + assertThat(CompareCondition.Op.LT.eval("a", "a"), is(false)); + assertThat(CompareCondition.Op.LT.eval("aa", "ab"), is(true)); + assertThat(CompareCondition.Op.LT.eval(Double.NaN, 250000), is(false)); + assertThat(CompareCondition.Op.LT.eval(250000, Double.NaN), is(false)); + assertThat(CompareCondition.Op.LT.eval(Double.NaN, Double.NaN), is(false)); + assertThat(CompareCondition.Op.LT.eval(Float.NaN, 250000), is(false)); + assertThat(CompareCondition.Op.LT.eval(250000, Float.NaN), is(false)); + assertThat(CompareCondition.Op.LT.eval(Float.NaN, Float.NaN), is(false)); + } + + public void testExecute() throws Exception { + Op op = randomFrom(CompareCondition.Op.values()); + int value = randomInt(10); + int payloadValue = randomInt(10); + boolean met = op.eval(payloadValue, value); + + CompareCondition condition = new CompareCondition("ctx.payload.value", op, value, Clock.systemUTC()); + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.Simple("value", payloadValue)); + assertThat(condition.execute(ctx).met(), is(met)); + } + + public void testExecuteDateMath() throws Exception { + ClockMock clock = ClockMock.frozen(); + boolean met = randomBoolean(); + Op op = met ? randomFrom(CompareCondition.Op.GT, CompareCondition.Op.GTE, CompareCondition.Op.NOT_EQ) : + randomFrom(CompareCondition.Op.LT, CompareCondition.Op.LTE, CompareCondition.Op.EQ); + String value = "<{now-1d}>"; + DateTime payloadValue = new DateTime(clock.millis()); + + CompareCondition condition = new CompareCondition("ctx.payload.value", op, value, clock); + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.Simple("value", payloadValue)); + assertThat(condition.execute(ctx).met(), is(met)); + } + + public void testExecutePath() throws Exception { + ClockMock clock = ClockMock.frozen(); + boolean met = randomBoolean(); + Op op = met ? CompareCondition.Op.EQ : CompareCondition.Op.NOT_EQ; + String value = "{{ctx.payload.value}}"; + Object payloadValue = new Object(); + + CompareCondition condition = new CompareCondition("ctx.payload.value", op, value, clock); + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.Simple("value", payloadValue)); + assertThat(condition.execute(ctx).met(), is(met)); + } + + public void testParseValid() throws Exception { + Op op = randomFrom(CompareCondition.Op.values()); + Object value = randomFrom("value", 1, null); + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.startObject("key1.key2"); + builder.field(op.name().toLowerCase(Locale.ROOT), value); + builder.endObject(); + builder.endObject(); + + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + + CompareCondition condition = CompareCondition.parse(ClockMock.frozen(), "_id", parser); + + assertThat(condition, notNullValue()); + assertThat(condition.getPath(), is("key1.key2")); + assertThat(condition.getOp(), is(op)); + assertThat(condition.getValue(), is(value)); + } + + public void testParseInvalidNoOperationBody() throws Exception { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.startObject("key1.key2"); + builder.endObject(); + builder.endObject(); + + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + try { + CompareCondition.parse(ClockMock.frozen(), "_id", parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("expected an object but found [null] instead")); + } + } + + public void testParseInvalidUnknownOp() throws Exception { + Object value = randomFrom("value", 1, null); + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.startObject("key1.key2"); + builder.field("foobar", value); + builder.endObject(); + builder.endObject(); + + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + try { + CompareCondition.parse(ClockMock.frozen(), "_id", parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("unknown comparison operator [foobar]")); + } + } + + public void testParseInvalidWrongValueForOp() throws Exception { + Object value = randomFrom(Arrays.asList("1", "2"), singletonMap("key", "value")); + String op = randomFrom("lt", "lte", "gt", "gte"); + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.startObject("key1.key2"); + builder.field(op, value); + builder.endObject(); + builder.endObject(); + + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + try { + CompareCondition.parse(ClockMock.frozen(), "_id", parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("must either be a numeric, string, boolean or null value, but found [")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/NeverConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/NeverConditionTests.java new file mode 100644 index 0000000000000..61bc87185fe1d --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/NeverConditionTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; + +public class NeverConditionTests extends ESTestCase { + public void testExecute() throws Exception { + ExecutableCondition executable = NeverCondition.INSTANCE; + assertFalse(executable.execute(null).met()); + } + + public void testParserValid() throws Exception { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.endObject(); + XContentParser parser = createParser(builder); + parser.nextToken(); + + ExecutableCondition executable = NeverCondition.parse("_id", parser); + assertFalse(executable.execute(null).met()); + } + + public void testParserInvalid() throws Exception { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.field("foo", "bar"); + builder.endObject(); + XContentParser parser = createParser(builder); + parser.nextToken(); + try { + NeverCondition.parse("_id", parser); + fail("expected a condition exception trying to parse an invalid condition XContent, [" + + InternalAlwaysCondition.TYPE + "] condition should not parse with a body"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("expected an empty object but found [foo]")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java new file mode 100644 index 0000000000000..c7b7f2c63cdde --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java @@ -0,0 +1,234 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.condition; + + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.script.GeneralScriptException; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptException; +import org.elasticsearch.script.ScriptMetaData; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.Watcher; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContext; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +public class ScriptConditionTests extends ESTestCase { + + private ScriptService scriptService; + + @Before + public void init() throws IOException { + Map, Object>> scripts = new HashMap<>(); + scripts.put("return true", s -> true); + scripts.put("return new Object()", s -> new Object()); + + scripts.put("ctx.trigger.scheduled_time.getMillis() < new Date().time", vars -> { + DateTime scheduledTime = (DateTime) XContentMapValues.extractValue("ctx.trigger.scheduled_time", vars); + return scheduledTime.getMillis() < new Date().getTime(); + }); + + scripts.put("null.foo", s -> { + throw new ScriptException("Error evaluating null.foo", new IllegalArgumentException(), emptyList(), + "null.foo", AbstractWatcherIntegrationTestCase.WATCHER_LANG); + }); + + scripts.put("ctx.payload.hits.total > 1", vars -> { + int total = (int) XContentMapValues.extractValue("ctx.payload.hits.total", vars); + return total > 1; + }); + + scripts.put("ctx.payload.hits.total > threshold", vars -> { + int total = (int) XContentMapValues.extractValue("ctx.payload.hits.total", vars); + int threshold = (int) XContentMapValues.extractValue("threshold", vars); + return total > threshold; + }); + + ScriptEngine engine = new MockScriptEngine(MockScriptEngine.NAME, scripts); + scriptService = new ScriptService(Settings.EMPTY, Collections.singletonMap(engine.getType(), engine), + Collections.singletonMap(Watcher.SCRIPT_EXECUTABLE_CONTEXT.name, Watcher.SCRIPT_EXECUTABLE_CONTEXT)); + + ClusterState.Builder clusterState = new ClusterState.Builder(new ClusterName("_name")); + clusterState.metaData(MetaData.builder().putCustom(ScriptMetaData.TYPE, new ScriptMetaData.Builder(null).build())); + ClusterState cs = clusterState.build(); + scriptService.applyClusterState(new ClusterChangedEvent("_source", cs, cs)); + } + + public void testExecute() throws Exception { + ScriptCondition condition = new ScriptCondition(mockScript("ctx.payload.hits.total > 1"), scriptService); + SearchResponse response = new SearchResponse(InternalSearchResponse.empty(), "", 3, 3, 0, 500L, ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY); + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response)); + assertFalse(condition.execute(ctx).met()); + } + + public void testExecuteMergedParams() throws Exception { + Script script = new Script(ScriptType.INLINE, "mockscript", "ctx.payload.hits.total > threshold", singletonMap("threshold", 1)); + ScriptCondition executable = new ScriptCondition(script, scriptService); + SearchResponse response = new SearchResponse(InternalSearchResponse.empty(), "", 3, 3, 0, 500L, ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY); + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response)); + assertFalse(executable.execute(ctx).met()); + } + + public void testParserValid() throws Exception { + + XContentBuilder builder = createConditionContent("ctx.payload.hits.total > 1", "mockscript", ScriptType.INLINE); + + XContentParser parser = createParser(builder); + parser.nextToken(); + ExecutableCondition executable = ScriptCondition.parse(scriptService, "_watch", parser); + + SearchResponse response = new SearchResponse(InternalSearchResponse.empty(), "", 3, 3, 0, 500L, ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY); + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response)); + + assertFalse(executable.execute(ctx).met()); + + + builder = createConditionContent("return true", "mockscript", ScriptType.INLINE); + parser = createParser(builder); + parser.nextToken(); + executable = ScriptCondition.parse(scriptService, "_watch", parser); + + ctx = mockExecutionContext("_name", new Payload.XContent(response)); + + assertTrue(executable.execute(ctx).met()); + } + + public void testParserInvalid() throws Exception { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject().endObject(); + XContentParser parser = createParser(builder); + parser.nextToken(); + try { + ScriptCondition.parse(scriptService, "_id", parser); + fail("expected a condition exception trying to parse an invalid condition XContent"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), + containsString("must specify either [source] for an inline script or [id] for a stored script")); + } + } + + public void testScriptConditionParserBadScript() throws Exception { + ScriptType scriptType = randomFrom(ScriptType.values()); + String script; + Class expectedException; + switch (scriptType) { + case STORED: + expectedException = ResourceNotFoundException.class; + script = "nonExisting_script"; + break; + default: + expectedException = GeneralScriptException.class; + script = "foo = = 1"; + } + XContentBuilder builder = createConditionContent(script, "mockscript", scriptType); + XContentParser parser = createParser(builder); + parser.nextToken(); + + expectThrows(expectedException, + () -> ScriptCondition.parse(scriptService, "_watch", parser)); + } + + public void testScriptConditionParser_badLang() throws Exception { + String script = "return true"; + XContentBuilder builder = createConditionContent(script, "not_a_valid_lang", ScriptType.INLINE); + XContentParser parser = createParser(builder); + parser.nextToken(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> ScriptCondition.parse(scriptService, "_watch", parser)); + assertThat(exception.getMessage(), containsString("script_lang not supported [not_a_valid_lang]")); + } + + public void testScriptConditionThrowException() throws Exception { + ScriptCondition condition = new ScriptCondition( + mockScript("null.foo"), scriptService); + SearchResponse response = new SearchResponse(InternalSearchResponse.empty(), "", 3, 3, 0, 500L, ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY); + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response)); + ScriptException exception = expectThrows(ScriptException.class, () -> condition.execute(ctx)); + assertThat(exception.getMessage(), containsString("Error evaluating null.foo")); + } + + public void testScriptConditionReturnObjectThrowsException() throws Exception { + ScriptCondition condition = new ScriptCondition(mockScript("return new Object()"), scriptService); + SearchResponse response = new SearchResponse(InternalSearchResponse.empty(), "", 3, 3, 0, 500L, ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY); + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response)); + Exception exception = expectThrows(IllegalStateException.class, () -> condition.execute(ctx)); + assertThat(exception.getMessage(), + containsString("condition [script] must return a boolean value (true|false) but instead returned [_name]")); + } + + public void testScriptConditionAccessCtx() throws Exception { + ScriptCondition condition = new ScriptCondition(mockScript("ctx.trigger.scheduled_time.getMillis() < new Date().time"), + scriptService); + SearchResponse response = new SearchResponse(InternalSearchResponse.empty(), "", 3, 3, 0, 500L, ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY); + WatchExecutionContext ctx = mockExecutionContext("_name", new DateTime(DateTimeZone.UTC), new Payload.XContent(response)); + Thread.sleep(10); + assertThat(condition.execute(ctx).met(), is(true)); + } + + private static XContentBuilder createConditionContent(String script, String scriptLang, ScriptType scriptType) throws IOException { + XContentBuilder builder = jsonBuilder(); + if (scriptType == null) { + return builder.value(script); + } + builder.startObject(); + switch (scriptType) { + case INLINE: + builder.field("source", script); + break; + case STORED: + builder.field("id", script); + break; + default: + throw illegalArgument("unsupported script type [{}]", scriptType); + } + if (scriptLang != null && scriptType != ScriptType.STORED) { + builder.field("lang", scriptLang); + } + return builder.endObject(); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java new file mode 100644 index 0000000000000..9684c55692f1b --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -0,0 +1,1141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.execution; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.watcher.actions.Action; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapperResult; +import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; +import org.elasticsearch.xpack.core.watcher.actions.throttler.ActionThrottler; +import org.elasticsearch.xpack.core.watcher.actions.throttler.Throttler; +import org.elasticsearch.xpack.core.watcher.common.stats.Counters; +import org.elasticsearch.xpack.core.watcher.condition.Condition; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionPhase; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.execution.QueuedWatch; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionSnapshot; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.history.WatchRecord; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.condition.NeverCondition; +import org.elasticsearch.xpack.watcher.history.HistoryStore; +import org.elasticsearch.xpack.watcher.input.none.ExecutableNoneInput; +import org.elasticsearch.xpack.watcher.trigger.manual.ManualTrigger; +import org.elasticsearch.xpack.watcher.trigger.manual.ManualTriggerEvent; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.joda.time.DateTime; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.atomic.AtomicBoolean; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.joda.time.DateTime.now; +import static org.joda.time.DateTimeZone.UTC; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class ExecutionServiceTests extends ESTestCase { + + private Payload payload; + private ExecutableInput input; + private Input.Result inputResult; + + private TriggeredWatchStore triggeredWatchStore; + private WatchExecutor executor; + private HistoryStore historyStore; + private ExecutionService executionService; + private Clock clock; + private Client client; + private WatchParser parser; + + @Before + public void init() throws Exception { + payload = mock(Payload.class); + input = mock(ExecutableInput.class); + inputResult = mock(Input.Result.class); + when(inputResult.status()).thenReturn(Input.Result.Status.SUCCESS); + when(inputResult.payload()).thenReturn(payload); + when(input.execute(any(WatchExecutionContext.class), any(Payload.class))).thenReturn(inputResult); + + triggeredWatchStore = mock(TriggeredWatchStore.class); + historyStore = mock(HistoryStore.class); + + executor = mock(WatchExecutor.class); + when(executor.queue()).thenReturn(new ArrayBlockingQueue<>(1)); + + clock = ClockMock.frozen(); + client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + parser = mock(WatchParser.class); + + DiscoveryNode discoveryNode = new DiscoveryNode("node_1", ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(asList(DiscoveryNode.Role.values())), Version.CURRENT); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.localNode()).thenReturn(discoveryNode); + + executionService = new ExecutionService(Settings.EMPTY, historyStore, triggeredWatchStore, executor, clock, parser, + clusterService, client); + + executionService.start(); + } + + public void testExecute() throws Exception { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + GetResponse getResponse = mock(GetResponse.class); + when(getResponse.isExists()).thenReturn(true); + mockGetWatchResponse(client, "_id", getResponse); + + DateTime now = new DateTime(clock.millis()); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), now, event, timeValueSeconds(5)); + when(parser.parseWithSecrets(eq(watch.id()), eq(true), any(), any(), any())).thenReturn(watch); + + Condition.Result conditionResult = InternalAlwaysCondition.RESULT_INSTANCE; + ExecutableCondition condition = mock(ExecutableCondition.class); + // introduce a very short sleep time which we can use to check if the duration in milliseconds is correctly created + long randomConditionDurationMs = randomIntBetween(5, 10); + when(condition.execute(any(WatchExecutionContext.class))).then(invocationOnMock -> { + Thread.sleep(randomConditionDurationMs); + return conditionResult; + }); + + // watch level transform + Transform.Result watchTransformResult = mock(Transform.Result.class); + when(watchTransformResult.status()).thenReturn(Transform.Result.Status.SUCCESS); + when(watchTransformResult.payload()).thenReturn(payload); + ExecutableTransform watchTransform = mock(ExecutableTransform.class); + when(watchTransform.execute(context, payload)).thenReturn(watchTransformResult); + + // action throttler + Throttler.Result throttleResult = mock(Throttler.Result.class); + when(throttleResult.throttle()).thenReturn(false); + ActionThrottler throttler = mock(ActionThrottler.class); + when(throttler.throttle("_action", context)).thenReturn(throttleResult); + + // action level conditional + ExecutableCondition actionCondition = null; + Condition.Result actionConditionResult = null; + + if (randomBoolean()) { + Tuple pair = whenCondition(context); + + actionCondition = pair.v1(); + actionConditionResult = pair.v2(); + } + + // action level transform + ExecutableTransform actionTransform = null; + Transform.Result actionTransformResult = null; + + if (randomBoolean()) { + Tuple pair = whenTransform(context); + + actionTransform = pair.v1(); + actionTransformResult = pair.v2(); + } + + // the action + Action.Result actionResult = mock(Action.Result.class); + when(actionResult.type()).thenReturn("_action_type"); + when(actionResult.status()).thenReturn(Action.Result.Status.SUCCESS); + ExecutableAction action = mock(ExecutableAction.class); + when(action.type()).thenReturn("MY_AWESOME_TYPE"); + when(action.execute("_action", context, payload)).thenReturn(actionResult); + + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action); + + WatchStatus watchStatus = new WatchStatus(now, singletonMap("_action", new ActionStatus(now))); + + when(watch.input()).thenReturn(input); + when(watch.condition()).thenReturn(condition); + when(watch.transform()).thenReturn(watchTransform); + when(watch.actions()).thenReturn(Collections.singletonList(actionWrapper)); + when(watch.status()).thenReturn(watchStatus); + + WatchRecord watchRecord = executionService.execute(context); + assertThat(watchRecord.result().conditionResult(), sameInstance(conditionResult)); + assertThat(watchRecord.result().transformResult(), sameInstance(watchTransformResult)); + assertThat(watchRecord.getNodeId(), is("node_1")); + ActionWrapperResult result = watchRecord.result().actionsResults().get("_action"); + assertThat(result, notNullValue()); + assertThat(result.id(), is("_action")); + assertThat(result.condition(), sameInstance(actionConditionResult)); + assertThat(result.transform(), sameInstance(actionTransformResult)); + assertThat(result.action(), sameInstance(actionResult)); + + verify(historyStore, times(1)).put(watchRecord); + verify(condition, times(1)).execute(context); + verify(watchTransform, times(1)).execute(context, payload); + verify(action, times(1)).execute("_action", context, payload); + + // test execution duration, make sure it is set at all + // no exact duration check here, as different platforms handle sleep differently, so this might not be exact + assertThat(watchRecord.result().executionDurationMs(), is(greaterThan(0L))); + assertThat(watchRecord.result().executionTime(), is(notNullValue())); + + // test stats + Counters counters = executionService.executionTimes(); + assertThat(counters.get("execution.actions._all.total_time_in_ms"), is(notNullValue())); + assertThat(counters.get("execution.actions._all.total"), is(1L)); + assertThat(counters.get("execution.actions.MY_AWESOME_TYPE.total_time_in_ms"), is(notNullValue())); + assertThat(counters.get("execution.actions.MY_AWESOME_TYPE.total"), is(1L)); + } + + public void testExecuteFailedInput() throws Exception { + GetResponse getResponse = mock(GetResponse.class); + when(getResponse.isExists()).thenReturn(true); + mockGetWatchResponse(client, "_id", getResponse); + + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + + DateTime now = new DateTime(clock.millis()); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), now, event, timeValueSeconds(5)); + when(parser.parseWithSecrets(eq(watch.id()), eq(true), any(), any(), any())).thenReturn(watch); + + input = mock(ExecutableInput.class); + Input.Result inputResult = mock(Input.Result.class); + when(inputResult.status()).thenReturn(Input.Result.Status.FAILURE); + when(inputResult.getException()).thenReturn(new IOException()); + when(input.execute(eq(context), any(Payload.class))).thenReturn(inputResult); + + Condition.Result conditionResult = InternalAlwaysCondition.RESULT_INSTANCE; + ExecutableCondition condition = mock(ExecutableCondition.class); + when(condition.execute(any(WatchExecutionContext.class))).thenReturn(conditionResult); + + // watch level transform + Transform.Result watchTransformResult = mock(Transform.Result.class); + when(watchTransformResult.payload()).thenReturn(payload); + ExecutableTransform watchTransform = mock(ExecutableTransform.class); + when(watchTransform.execute(context, payload)).thenReturn(watchTransformResult); + + // action throttler + Throttler.Result throttleResult = mock(Throttler.Result.class); + when(throttleResult.throttle()).thenReturn(false); + ActionThrottler throttler = mock(ActionThrottler.class); + when(throttler.throttle("_action", context)).thenReturn(throttleResult); + + // action level condition (unused) + ExecutableCondition actionCondition = randomBoolean() ? mock(ExecutableCondition.class) : null; + // action level transform (unused) + ExecutableTransform actionTransform = randomBoolean() ? mock(ExecutableTransform.class) : null; + + // the action + Action.Result actionResult = mock(Action.Result.class); + when(actionResult.type()).thenReturn("_action_type"); + when(actionResult.status()).thenReturn(Action.Result.Status.SUCCESS); + ExecutableAction action = mock(ExecutableAction.class); + when(action.execute("_action", context, payload)).thenReturn(actionResult); + + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action); + WatchStatus watchStatus = new WatchStatus(now, singletonMap("_action", new ActionStatus(now))); + + when(watch.input()).thenReturn(input); + when(watch.condition()).thenReturn(condition); + when(watch.transform()).thenReturn(watchTransform); + when(watch.actions()).thenReturn(Arrays.asList(actionWrapper)); + when(watch.status()).thenReturn(watchStatus); + + WatchRecord watchRecord = executionService.execute(context); + assertThat(watchRecord.result().inputResult(), is(inputResult)); + assertThat(watchRecord.result().conditionResult(), nullValue()); + assertThat(watchRecord.result().transformResult(), nullValue()); + assertThat(watchRecord.result().actionsResults(), notNullValue()); + assertThat(watchRecord.result().actionsResults().size(), is(0)); + + verify(historyStore, times(1)).put(watchRecord); + verify(input, times(1)).execute(context, null); + verify(condition, never()).execute(context); + verify(watchTransform, never()).execute(context, payload); + verify(action, never()).execute("_action", context, payload); + } + + public void testExecuteFailedCondition() throws Exception { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + GetResponse getResponse = mock(GetResponse.class); + when(getResponse.isExists()).thenReturn(true); + mockGetWatchResponse(client, "_id", getResponse); + + DateTime now = new DateTime(clock.millis()); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), now, event, timeValueSeconds(5)); + when(parser.parseWithSecrets(eq(watch.id()), eq(true), any(), any(), any())).thenReturn(watch); + + ExecutableCondition condition = mock(ExecutableCondition.class); + Condition.Result conditionResult = mock(Condition.Result.class); + when(conditionResult.status()).thenReturn(Condition.Result.Status.FAILURE); + when(conditionResult.reason()).thenReturn("_reason"); + when(condition.execute(any(WatchExecutionContext.class))).thenReturn(conditionResult); + + // watch level transform + Transform.Result watchTransformResult = mock(Transform.Result.class); + when(watchTransformResult.payload()).thenReturn(payload); + ExecutableTransform watchTransform = mock(ExecutableTransform.class); + when(watchTransform.execute(context, payload)).thenReturn(watchTransformResult); + + // action throttler + Throttler.Result throttleResult = mock(Throttler.Result.class); + when(throttleResult.throttle()).thenReturn(false); + ActionThrottler throttler = mock(ActionThrottler.class); + when(throttler.throttle("_action", context)).thenReturn(throttleResult); + + // action level condition (unused) + ExecutableCondition actionCondition = randomBoolean() ? mock(ExecutableCondition.class) : null; + // action level transform (unused) + ExecutableTransform actionTransform = randomBoolean() ? mock(ExecutableTransform.class) : null; + + // the action + Action.Result actionResult = mock(Action.Result.class); + when(actionResult.type()).thenReturn("_action_type"); + when(actionResult.status()).thenReturn(Action.Result.Status.SUCCESS); + ExecutableAction action = mock(ExecutableAction.class); + when(action.execute("_action", context, payload)).thenReturn(actionResult); + + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action); + WatchStatus watchStatus = new WatchStatus(now, singletonMap("_action", new ActionStatus(now))); + + when(watch.input()).thenReturn(input); + when(watch.condition()).thenReturn(condition); + when(watch.transform()).thenReturn(watchTransform); + when(watch.actions()).thenReturn(Arrays.asList(actionWrapper)); + when(watch.status()).thenReturn(watchStatus); + + WatchRecord watchRecord = executionService.execute(context); + assertThat(watchRecord.result().inputResult(), is(inputResult)); + assertThat(watchRecord.result().conditionResult(), is(conditionResult)); + assertThat(watchRecord.result().transformResult(), nullValue()); + assertThat(watchRecord.result().actionsResults(), notNullValue()); + assertThat(watchRecord.result().actionsResults().size(), is(0)); + + verify(historyStore, times(1)).put(watchRecord); + verify(input, times(1)).execute(context, null); + verify(condition, times(1)).execute(context); + verify(watchTransform, never()).execute(context, payload); + verify(action, never()).execute("_action", context, payload); + } + + public void testExecuteFailedWatchTransform() throws Exception { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + GetResponse getResponse = mock(GetResponse.class); + when(getResponse.isExists()).thenReturn(true); + mockGetWatchResponse(client, "_id", getResponse); + + DateTime now = new DateTime(clock.millis()); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), now, event, timeValueSeconds(5)); + when(parser.parseWithSecrets(eq(watch.id()), eq(true), any(), any(), any())).thenReturn(watch); + + Condition.Result conditionResult = InternalAlwaysCondition.RESULT_INSTANCE; + ExecutableCondition condition = mock(ExecutableCondition.class); + when(condition.execute(any(WatchExecutionContext.class))).thenReturn(conditionResult); + + // watch level transform + Transform.Result watchTransformResult = mock(Transform.Result.class); + when(watchTransformResult.status()).thenReturn(Transform.Result.Status.FAILURE); + when(watchTransformResult.reason()).thenReturn("_reason"); + ExecutableTransform watchTransform = mock(ExecutableTransform.class); + when(watchTransform.execute(context, payload)).thenReturn(watchTransformResult); + + // action throttler + Throttler.Result throttleResult = mock(Throttler.Result.class); + when(throttleResult.throttle()).thenReturn(false); + ActionThrottler throttler = mock(ActionThrottler.class); + when(throttler.throttle("_action", context)).thenReturn(throttleResult); + + // action level condition (unused) + ExecutableCondition actionCondition = randomBoolean() ? mock(ExecutableCondition.class) : null; + // action level transform (unused) + ExecutableTransform actionTransform = randomBoolean() ? mock(ExecutableTransform.class) : null; + + // the action + Action.Result actionResult = mock(Action.Result.class); + when(actionResult.type()).thenReturn("_action_type"); + when(actionResult.status()).thenReturn(Action.Result.Status.SUCCESS); + ExecutableAction action = mock(ExecutableAction.class); + when(action.execute("_action", context, payload)).thenReturn(actionResult); + + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action); + WatchStatus watchStatus = new WatchStatus(now, singletonMap("_action", new ActionStatus(now))); + + when(watch.input()).thenReturn(input); + when(watch.condition()).thenReturn(condition); + when(watch.transform()).thenReturn(watchTransform); + when(watch.actions()).thenReturn(Arrays.asList(actionWrapper)); + when(watch.status()).thenReturn(watchStatus); + + WatchRecord watchRecord = executionService.execute(context); + assertThat(watchRecord.result().inputResult(), is(inputResult)); + assertThat(watchRecord.result().conditionResult(), is(conditionResult)); + assertThat(watchRecord.result().transformResult(), is(watchTransformResult)); + assertThat(watchRecord.result().actionsResults(), notNullValue()); + assertThat(watchRecord.result().actionsResults().size(), is(0)); + + verify(historyStore, times(1)).put(watchRecord); + verify(input, times(1)).execute(context, null); + verify(condition, times(1)).execute(context); + verify(watchTransform, times(1)).execute(context, payload); + verify(action, never()).execute("_action", context, payload); + } + + public void testExecuteFailedActionTransform() throws Exception { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + GetResponse getResponse = mock(GetResponse.class); + when(getResponse.isExists()).thenReturn(true); + mockGetWatchResponse(client, "_id", getResponse); + when(parser.parseWithSecrets(eq(watch.id()), eq(true), any(), any(), any())).thenReturn(watch); + + DateTime now = new DateTime(clock.millis()); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), now, event, timeValueSeconds(5)); + + Condition.Result conditionResult = InternalAlwaysCondition.RESULT_INSTANCE; + ExecutableCondition condition = mock(ExecutableCondition.class); + when(condition.execute(any(WatchExecutionContext.class))).thenReturn(conditionResult); + + // watch level transform + Transform.Result watchTransformResult = mock(Transform.Result.class); + when(watchTransformResult.status()).thenReturn(Transform.Result.Status.SUCCESS); + when(watchTransformResult.payload()).thenReturn(payload); + ExecutableTransform watchTransform = mock(ExecutableTransform.class); + when(watchTransform.execute(context, payload)).thenReturn(watchTransformResult); + + // action throttler + Throttler.Result throttleResult = mock(Throttler.Result.class); + when(throttleResult.throttle()).thenReturn(false); + ActionThrottler throttler = mock(ActionThrottler.class); + when(throttler.throttle("_action", context)).thenReturn(throttleResult); + + // action level condition + ExecutableCondition actionCondition = null; + Condition.Result actionConditionResult = null; + + if (randomBoolean()) { + Tuple pair = whenCondition(context); + + actionCondition = pair.v1(); + actionConditionResult = pair.v2(); + } + + // action level transform + Transform.Result actionTransformResult = mock(Transform.Result.class); + when(actionTransformResult.status()).thenReturn(Transform.Result.Status.FAILURE); + when(actionTransformResult.reason()).thenReturn("_reason"); + ExecutableTransform actionTransform = mock(ExecutableTransform.class); + when(actionTransform.execute(context, payload)).thenReturn(actionTransformResult); + + // the action + Action.Result actionResult = mock(Action.Result.class); + when(actionResult.type()).thenReturn("_action_type"); + when(actionResult.status()).thenReturn(Action.Result.Status.SUCCESS); + ExecutableAction action = mock(ExecutableAction.class); + when(action.logger()).thenReturn(logger); + when(action.execute("_action", context, payload)).thenReturn(actionResult); + + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action); + + WatchStatus watchStatus = new WatchStatus(now, singletonMap("_action", new ActionStatus(now))); + + when(watch.input()).thenReturn(input); + when(watch.condition()).thenReturn(condition); + when(watch.transform()).thenReturn(watchTransform); + when(watch.actions()).thenReturn(Arrays.asList(actionWrapper)); + when(watch.status()).thenReturn(watchStatus); + + WatchRecord watchRecord = executionService.execute(context); + assertThat(watchRecord.result().inputResult(), is(inputResult)); + assertThat(watchRecord.result().conditionResult(), is(conditionResult)); + assertThat(watchRecord.result().transformResult(), is(watchTransformResult)); + assertThat(watchRecord.result().actionsResults(), notNullValue()); + assertThat(watchRecord.result().actionsResults().size(), is(1)); + assertThat(watchRecord.result().actionsResults().get("_action").condition(), is(actionConditionResult)); + assertThat(watchRecord.result().actionsResults().get("_action").transform(), is(actionTransformResult)); + assertThat(watchRecord.result().actionsResults().get("_action").action().status(), is(Action.Result.Status.FAILURE)); + + verify(historyStore, times(1)).put(watchRecord); + verify(input, times(1)).execute(context, null); + verify(condition, times(1)).execute(context); + verify(watchTransform, times(1)).execute(context, payload); + // the action level transform is executed before the action itself + verify(action, never()).execute("_action", context, payload); + } + + public void testExecuteInner() throws Exception { + DateTime now = now(UTC); + Watch watch = mock(Watch.class); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), now, event, timeValueSeconds(5)); + context.ensureWatchExists(() -> watch); + + Condition.Result conditionResult = InternalAlwaysCondition.RESULT_INSTANCE; + ExecutableCondition condition = mock(ExecutableCondition.class); + when(condition.execute(any(WatchExecutionContext.class))).thenReturn(conditionResult); + + // watch level transform + Transform.Result watchTransformResult = mock(Transform.Result.class); + when(watchTransformResult.payload()).thenReturn(payload); + ExecutableTransform watchTransform = mock(ExecutableTransform.class); + when(watchTransform.execute(context, payload)).thenReturn(watchTransformResult); + + // action throttler + Throttler.Result throttleResult = mock(Throttler.Result.class); + when(throttleResult.throttle()).thenReturn(false); + ActionThrottler throttler = mock(ActionThrottler.class); + when(throttler.throttle("_action", context)).thenReturn(throttleResult); + + // action level conditional + ExecutableCondition actionCondition = null; + Condition.Result actionConditionResult = null; + + if (randomBoolean()) { + Tuple pair = whenCondition(context); + + actionCondition = pair.v1(); + actionConditionResult = pair.v2(); + } + + // action level transform + ExecutableTransform actionTransform = null; + Transform.Result actionTransformResult = null; + + if (randomBoolean()) { + Tuple pair = whenTransform(context); + + actionTransform = pair.v1(); + actionTransformResult = pair.v2(); + } + + // the action + Action.Result actionResult = mock(Action.Result.class); + when(actionResult.type()).thenReturn("_action_type"); + when(actionResult.status()).thenReturn(Action.Result.Status.SUCCESS); + ExecutableAction action = mock(ExecutableAction.class); + when(action.execute("_action", context, payload)).thenReturn(actionResult); + + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action); + WatchStatus watchStatus = new WatchStatus(new DateTime(clock.millis()), singletonMap("_action", new ActionStatus(now))); + + when(watch.input()).thenReturn(input); + when(watch.condition()).thenReturn(condition); + when(watch.transform()).thenReturn(watchTransform); + when(watch.actions()).thenReturn(Arrays.asList(actionWrapper)); + when(watch.status()).thenReturn(watchStatus); + + WatchRecord watchRecord = executionService.executeInner(context); + assertThat(watchRecord.result().conditionResult(), sameInstance(conditionResult)); + assertThat(watchRecord.result().transformResult(), sameInstance(watchTransformResult)); + ActionWrapperResult result = watchRecord.result().actionsResults().get("_action"); + assertThat(result, notNullValue()); + assertThat(result.id(), is("_action")); + assertThat(result.condition(), sameInstance(actionConditionResult)); + assertThat(result.transform(), sameInstance(actionTransformResult)); + assertThat(result.action(), sameInstance(actionResult)); + + verify(condition, times(1)).execute(context); + verify(watchTransform, times(1)).execute(context, payload); + verify(action, times(1)).execute("_action", context, payload); + } + + public void testExecuteInnerThrottled() throws Exception { + DateTime now = now(UTC); + Watch watch = mock(Watch.class); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), now, event, timeValueSeconds(5)); + context.ensureWatchExists(() -> watch); + + Condition.Result conditionResult = InternalAlwaysCondition.RESULT_INSTANCE; + ExecutableCondition condition = mock(ExecutableCondition.class); + when(condition.execute(any(WatchExecutionContext.class))).thenReturn(conditionResult); + + // action throttler + Throttler.Result throttleResult = mock(Throttler.Result.class); + when(throttleResult.throttle()).thenReturn(true); + when(throttleResult.reason()).thenReturn("_throttle_reason"); + ActionThrottler throttler = mock(ActionThrottler.class); + when(throttler.throttle("_action", context)).thenReturn(throttleResult); + + // unused with throttle + ExecutableCondition actionCondition = mock(ExecutableCondition.class); + ExecutableTransform actionTransform = mock(ExecutableTransform.class); + + ExecutableAction action = mock(ExecutableAction.class); + when(action.type()).thenReturn("_type"); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action); + WatchStatus watchStatus = new WatchStatus(new DateTime(clock.millis()), singletonMap("_action", new ActionStatus(now))); + + when(watch.input()).thenReturn(input); + when(watch.condition()).thenReturn(condition); + when(watch.actions()).thenReturn(Arrays.asList(actionWrapper)); + when(watch.status()).thenReturn(watchStatus); + + WatchRecord watchRecord = executionService.executeInner(context); + assertThat(watchRecord.result().inputResult(), sameInstance(inputResult)); + assertThat(watchRecord.result().conditionResult(), sameInstance(conditionResult)); + assertThat(watchRecord.result().transformResult(), nullValue()); + assertThat(watchRecord.result().actionsResults().size(), is(1)); + ActionWrapperResult result = watchRecord.result().actionsResults().get("_action"); + assertThat(result, notNullValue()); + assertThat(result.id(), is("_action")); + assertThat(result.condition(), nullValue()); + assertThat(result.transform(), nullValue()); + assertThat(result.action(), instanceOf(Action.Result.Throttled.class)); + Action.Result.Throttled throttled = (Action.Result.Throttled) result.action(); + assertThat(throttled.reason(), is("_throttle_reason")); + + verify(condition, times(1)).execute(context); + verify(throttler, times(1)).throttle("_action", context); + verify(actionCondition, never()).execute(context); + verify(actionTransform, never()).execute(context, payload); + } + + public void testExecuteInnerConditionNotMet() throws Exception { + DateTime now = now(UTC); + Watch watch = mock(Watch.class); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), now, event, timeValueSeconds(5)); + context.ensureWatchExists(() -> watch); + + Condition.Result conditionResult = InternalAlwaysCondition.RESULT_INSTANCE; + ExecutableCondition condition = mock(ExecutableCondition.class); + when(condition.execute(any(WatchExecutionContext.class))).thenReturn(conditionResult); + + // action throttler + Throttler.Result throttleResult = mock(Throttler.Result.class); + when(throttleResult.throttle()).thenReturn(false); + ActionThrottler throttler = mock(ActionThrottler.class); + when(throttler.throttle("_action", context)).thenReturn(throttleResult); + + // action condition (always fails) + Condition.Result actionConditionResult = mock(Condition.Result.class); + // note: sometimes it can be met _with_ success + if (randomBoolean()) { + when(actionConditionResult.status()).thenReturn(Condition.Result.Status.SUCCESS); + } else { + when(actionConditionResult.status()).thenReturn(Condition.Result.Status.FAILURE); + } + when(actionConditionResult.met()).thenReturn(false); + ExecutableCondition actionCondition = mock(ExecutableCondition.class); + when(actionCondition.execute(context)).thenReturn(actionConditionResult); + + // unused with failed condition + ExecutableTransform actionTransform = mock(ExecutableTransform.class); + + ExecutableAction action = mock(ExecutableAction.class); + when(action.type()).thenReturn("_type"); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action); + WatchStatus watchStatus = new WatchStatus(new DateTime(clock.millis()), singletonMap("_action", new ActionStatus(now))); + + when(watch.input()).thenReturn(input); + when(watch.condition()).thenReturn(condition); + when(watch.actions()).thenReturn(Arrays.asList(actionWrapper)); + when(watch.status()).thenReturn(watchStatus); + + WatchRecord watchRecord = executionService.executeInner(context); + assertThat(watchRecord.result().inputResult(), sameInstance(inputResult)); + assertThat(watchRecord.result().conditionResult(), sameInstance(conditionResult)); + assertThat(watchRecord.result().transformResult(), nullValue()); + assertThat(watchRecord.result().actionsResults().size(), is(1)); + ActionWrapperResult result = watchRecord.result().actionsResults().get("_action"); + assertThat(result, notNullValue()); + assertThat(result.id(), is("_action")); + assertThat(result.condition(), sameInstance(actionConditionResult)); + assertThat(result.transform(), nullValue()); + assertThat(result.action(), instanceOf(Action.Result.ConditionFailed.class)); + Action.Result.ConditionFailed conditionFailed = (Action.Result.ConditionFailed) result.action(); + assertThat(conditionFailed.reason(), is("condition not met. skipping")); + + verify(condition, times(1)).execute(context); + verify(throttler, times(1)).throttle("_action", context); + verify(actionCondition, times(1)).execute(context); + verify(actionTransform, never()).execute(context, payload); + } + + public void testExecuteInnerConditionNotMetDueToException() throws Exception { + DateTime now = DateTime.now(UTC); + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn(getTestName()); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), now, event, timeValueSeconds(5)); + context.ensureWatchExists(() -> watch); + + Condition.Result conditionResult = InternalAlwaysCondition.RESULT_INSTANCE; + ExecutableCondition condition = mock(ExecutableCondition.class); + when(condition.execute(any(WatchExecutionContext.class))).thenReturn(conditionResult); + + // action throttler + Throttler.Result throttleResult = mock(Throttler.Result.class); + when(throttleResult.throttle()).thenReturn(false); + ActionThrottler throttler = mock(ActionThrottler.class); + when(throttler.throttle("_action", context)).thenReturn(throttleResult); + + // action condition (always fails) + ExecutableCondition actionCondition = mock(ExecutableCondition.class); + when(actionCondition.execute(context)).thenThrow(new IllegalArgumentException("[expected] failed for test")); + + // unused with failed condition + ExecutableTransform actionTransform = mock(ExecutableTransform.class); + + ExecutableAction action = mock(ExecutableAction.class); + when(action.type()).thenReturn("_type"); + when(action.logger()).thenReturn(logger); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action); + WatchStatus watchStatus = new WatchStatus(new DateTime(clock.millis()), singletonMap("_action", new ActionStatus(now))); + + when(watch.input()).thenReturn(input); + when(watch.condition()).thenReturn(condition); + when(watch.actions()).thenReturn(Collections.singletonList(actionWrapper)); + when(watch.status()).thenReturn(watchStatus); + + WatchRecord watchRecord = executionService.executeInner(context); + assertThat(watchRecord.result().inputResult(), sameInstance(inputResult)); + assertThat(watchRecord.result().conditionResult(), sameInstance(conditionResult)); + assertThat(watchRecord.result().transformResult(), nullValue()); + assertThat(watchRecord.result().actionsResults().size(), is(1)); + ActionWrapperResult result = watchRecord.result().actionsResults().get("_action"); + assertThat(result, notNullValue()); + assertThat(result.id(), is("_action")); + assertThat(result.condition(), nullValue()); + assertThat(result.transform(), nullValue()); + assertThat(result.action(), instanceOf(Action.Result.ConditionFailed.class)); + Action.Result.ConditionFailed conditionFailed = (Action.Result.ConditionFailed) result.action(); + assertThat(conditionFailed.reason(), is("condition failed. skipping: [expected] failed for test")); + + verify(condition, times(1)).execute(context); + verify(throttler, times(1)).throttle("_action", context); + verify(actionCondition, times(1)).execute(context); + verify(actionTransform, never()).execute(context, payload); + } + + public void testExecuteConditionNotMet() throws Exception { + DateTime now = DateTime.now(UTC); + Watch watch = mock(Watch.class); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), now, event, timeValueSeconds(5)); + context.ensureWatchExists(() -> watch); + + Condition.Result conditionResult = NeverCondition.RESULT_INSTANCE; + ExecutableCondition condition = mock(ExecutableCondition.class); + when(condition.execute(any(WatchExecutionContext.class))).thenReturn(conditionResult); + + // watch level transform + ExecutableTransform watchTransform = mock(ExecutableTransform.class); + + // action throttler + ActionThrottler throttler = mock(ActionThrottler.class); + ExecutableCondition actionCondition = mock(ExecutableCondition.class); + ExecutableTransform actionTransform = mock(ExecutableTransform.class); + ExecutableAction action = mock(ExecutableAction.class); + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, actionCondition, actionTransform, action); + + WatchStatus watchStatus = new WatchStatus(new DateTime(clock.millis()), singletonMap("_action", new ActionStatus(now))); + + when(watch.input()).thenReturn(input); + when(watch.condition()).thenReturn(condition); + when(watch.transform()).thenReturn(watchTransform); + when(watch.actions()).thenReturn(Collections.singletonList(actionWrapper)); + when(watch.status()).thenReturn(watchStatus); + + WatchRecord watchRecord = executionService.executeInner(context); + assertThat(watchRecord.result().inputResult(), sameInstance(inputResult)); + assertThat(watchRecord.result().conditionResult(), sameInstance(conditionResult)); + assertThat(watchRecord.result().transformResult(), nullValue()); + assertThat(watchRecord.result().actionsResults().size(), is(0)); + + verify(condition, times(1)).execute(context); + verify(watchTransform, never()).execute(context, payload); + verify(throttler, never()).throttle("_action", context); + verify(actionTransform, never()).execute(context, payload); + verify(action, never()).execute("_action", context, payload); + } + + public void testThatTriggeredWatchDeletionWorksOnExecutionRejection() throws Exception { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("foo"); + WatchStatus status = new WatchStatus(DateTime.now(UTC), Collections.emptyMap()); + when(watch.status()).thenReturn(status); + GetResponse getResponse = mock(GetResponse.class); + when(getResponse.isExists()).thenReturn(true); + when(getResponse.getId()).thenReturn("foo"); + mockGetWatchResponse(client, "foo", getResponse); + when(parser.parseWithSecrets(eq("foo"), eq(true), any(), any(), any())).thenReturn(watch); + + // execute needs to fail as well as storing the history + doThrow(new EsRejectedExecutionException()).when(executor).execute(any()); + doThrow(new ElasticsearchException("whatever")).when(historyStore).forcePut(any()); + + Wid wid = new Wid(watch.id(), now()); + + TriggeredWatch triggeredWatch = new TriggeredWatch(wid, new ScheduleTriggerEvent(now() ,now())); + executionService.executeTriggeredWatches(Collections.singleton(triggeredWatch)); + + verify(triggeredWatchStore, times(1)).delete(wid); + ArgumentCaptor captor = ArgumentCaptor.forClass(WatchRecord.class); + verify(historyStore, times(1)).forcePut(captor.capture()); + assertThat(captor.getValue().state(), is(ExecutionState.THREADPOOL_REJECTION)); + } + + public void testThatTriggeredWatchDeletionHappensOnlyIfWatchExists() throws Exception { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + GetResponse getResponse = mock(GetResponse.class); + when(getResponse.isExists()).thenReturn(true); + mockGetWatchResponse(client, "_id", getResponse); + + DateTime now = new DateTime(clock.millis()); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + WatchExecutionContext context = ManualExecutionContext.builder(watch, false, new ManualTriggerEvent("foo", event), + timeValueSeconds(5)).build(); + + // action throttler, no throttling + Throttler.Result throttleResult = mock(Throttler.Result.class); + when(throttleResult.throttle()).thenReturn(false); + ActionThrottler throttler = mock(ActionThrottler.class); + when(throttler.throttle("_action", context)).thenReturn(throttleResult); + + // the action + Action.Result actionResult = mock(Action.Result.class); + when(actionResult.type()).thenReturn("_action_type"); + when(actionResult.status()).thenReturn(Action.Result.Status.SUCCESS); + ExecutableAction action = mock(ExecutableAction.class); + when(action.type()).thenReturn("MY_AWESOME_TYPE"); + when(action.execute("_action", context, payload)).thenReturn(actionResult); + + ActionWrapper actionWrapper = new ActionWrapper("_action", throttler, null, null, action); + + WatchStatus watchStatus = new WatchStatus(now, singletonMap("_action", new ActionStatus(now))); + + when(watch.input()).thenReturn(input); + when(watch.condition()).thenReturn(InternalAlwaysCondition.INSTANCE); + when(watch.actions()).thenReturn(Collections.singletonList(actionWrapper)); + when(watch.status()).thenReturn(watchStatus); + + executionService.execute(context); + verify(triggeredWatchStore, never()).delete(any()); + } + + public void testThatSingleWatchCannotBeExecutedConcurrently() throws Exception { + WatchExecutionContext ctx = mock(WatchExecutionContext.class); + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + when(ctx.watch()).thenReturn(watch); + Wid wid = new Wid(watch.id(), DateTime.now(UTC)); + when(ctx.id()).thenReturn(wid); + + executionService.getCurrentExecutions().put("_id", new ExecutionService.WatchExecution(ctx, Thread.currentThread())); + + executionService.execute(ctx); + + verify(ctx).abortBeforeExecution(eq(ExecutionState.NOT_EXECUTED_ALREADY_QUEUED), eq("Watch is already queued in thread pool")); + } + + public void testExecuteWatchNotFound() throws Exception { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), + new DateTime(0, UTC), + new ScheduleTriggerEvent(watch.id(), new DateTime(0, UTC), new DateTime(0, UTC)), + TimeValue.timeValueSeconds(5)); + + GetResponse notFoundResponse = mock(GetResponse.class); + when(notFoundResponse.isExists()).thenReturn(false); + mockGetWatchResponse(client, "_id", notFoundResponse); + + WatchRecord watchRecord = executionService.execute(context); + assertThat(watchRecord, not(nullValue())); + assertThat(watchRecord.state(), is(ExecutionState.NOT_EXECUTED_WATCH_MISSING)); + } + + public void testExecuteWatchIndexNotFoundException() { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), + new DateTime(0, UTC), + new ScheduleTriggerEvent(watch.id(), new DateTime(0, UTC), new DateTime(0, UTC)), + TimeValue.timeValueSeconds(5)); + + mockGetWatchException(client, "_id", new IndexNotFoundException(".watch")); + WatchRecord watchRecord = executionService.execute(context); + assertThat(watchRecord, not(nullValue())); + assertThat(watchRecord.state(), is(ExecutionState.NOT_EXECUTED_WATCH_MISSING)); + } + + public void testExecuteWatchParseWatchException() { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), + new DateTime(0, UTC), + new ScheduleTriggerEvent(watch.id(), new DateTime(0, UTC), new DateTime(0, UTC)), + TimeValue.timeValueSeconds(5)); + + IOException e = new IOException("something went wrong, i.e. index not found"); + mockGetWatchException(client, "_id", e); + + WatchRecord watchRecord = executionService.execute(context); + assertThat(watchRecord, not(nullValue())); + assertThat(watchRecord.state(), is(ExecutionState.FAILED)); + } + + public void testWatchInactive() throws Exception { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + WatchExecutionContext ctx = mock(WatchExecutionContext.class); + when(ctx.knownWatch()).thenReturn(true); + WatchStatus status = mock(WatchStatus.class); + when(status.state()).thenReturn(new WatchStatus.State(false, now())); + when(watch.status()).thenReturn(status); + when(ctx.watch()).thenReturn(watch); + Wid wid = new Wid(watch.id(), DateTime.now(UTC)); + when(ctx.id()).thenReturn(wid); + + GetResponse getResponse = mock(GetResponse.class); + when(getResponse.isExists()).thenReturn(true); + mockGetWatchResponse(client, "_id", getResponse); + when(parser.parseWithSecrets(eq(watch.id()), eq(true), any(), any(), any())).thenReturn(watch); + + WatchRecord.MessageWatchRecord record = mock(WatchRecord.MessageWatchRecord.class); + when(record.state()).thenReturn(ExecutionState.EXECUTION_NOT_NEEDED); + when(ctx.abortBeforeExecution(eq(ExecutionState.EXECUTION_NOT_NEEDED), eq("Watch is not active"))).thenReturn(record); + + WatchRecord watchRecord = executionService.execute(ctx); + assertThat(watchRecord.state(), is(ExecutionState.EXECUTION_NOT_NEEDED)); + } + + public void testCurrentExecutionSnapshots() throws Exception { + DateTime time = DateTime.now(UTC); + int snapshotCount = randomIntBetween(2, 8); + for (int i = 0; i < snapshotCount; i++) { + time = time.minusSeconds(10); + WatchExecutionContext ctx = createMockWatchExecutionContext("_id" + i, time); + executionService.getCurrentExecutions().put("_id" + i, new ExecutionService.WatchExecution(ctx, Thread.currentThread())); + } + + List snapshots = executionService.currentExecutions(); + assertThat(snapshots, hasSize(snapshotCount)); + assertThat(snapshots.get(0).watchId(), is("_id" + (snapshotCount-1))); + assertThat(snapshots.get(snapshots.size() - 1).watchId(), is("_id0")); + } + + public void testQueuedWatches() throws Exception { + Collection tasks = new ArrayList<>(); + DateTime time = DateTime.now(UTC); + int queuedWatchCount = randomIntBetween(2, 8); + for (int i = 0; i < queuedWatchCount; i++) { + time = time.minusSeconds(10); + WatchExecutionContext ctx = createMockWatchExecutionContext("_id" + i, time); + tasks.add(new ExecutionService.WatchExecutionTask(ctx, () -> logger.info("this will never be called"))); + } + + when(executor.tasks()).thenReturn(tasks.stream()); + + List queuedWatches = executionService.queuedWatches(); + assertThat(queuedWatches, hasSize(queuedWatchCount)); + assertThat(queuedWatches.get(0).watchId(), is("_id" + (queuedWatchCount-1))); + assertThat(queuedWatches.get(queuedWatches.size() - 1).watchId(), is("_id0")); + } + + public void testUpdateWatchStatusDoesNotUpdateState() throws Exception { + WatchStatus status = new WatchStatus(DateTime.now(UTC), Collections.emptyMap()); + Watch watch = new Watch("_id", new ManualTrigger(), new ExecutableNoneInput(logger), InternalAlwaysCondition.INSTANCE, null, null, + Collections.emptyList(), null, status, 1L); + + final AtomicBoolean assertionsTriggered = new AtomicBoolean(false); + doAnswer(invocation -> { + UpdateRequest request = (UpdateRequest) invocation.getArguments()[0]; + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, request.doc().source().streamInput())) { + Map map = parser.map(); + Map state = ObjectPath.eval("status.state", map); + assertThat(state, is(nullValue())); + assertionsTriggered.set(true); + } + + PlainActionFuture future = PlainActionFuture.newFuture(); + future.onResponse(new UpdateResponse()); + return future; + }).when(client).update(any()); + + executionService.updateWatchStatus(watch); + + assertThat(assertionsTriggered.get(), is(true)); + } + + public void testManualWatchExecutionContextGetsAlwaysExecuted() throws Exception { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + + DateTime now = new DateTime(clock.millis()); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + ManualExecutionContext ctx = ManualExecutionContext.builder(watch, true, + new ManualTriggerEvent("foo", event), timeValueSeconds(5)).build(); + + when(watch.input()).thenReturn(input); + Condition.Result conditionResult = InternalAlwaysCondition.RESULT_INSTANCE; + ExecutableCondition condition = mock(ExecutableCondition.class); + when(condition.execute(any(WatchExecutionContext.class))).thenReturn(conditionResult); + when(watch.condition()).thenReturn(condition); + + Action.Result actionResult = mock(Action.Result.class); + when(actionResult.type()).thenReturn("_action_type"); + when(actionResult.status()).thenReturn(Action.Result.Status.SUCCESS); + ExecutableAction action = mock(ExecutableAction.class); + when(action.logger()).thenReturn(logger); + when(action.execute(eq("_action"), eq(ctx), eq(payload))).thenReturn(actionResult); + + ActionWrapper actionWrapper = mock(ActionWrapper.class); + ActionWrapperResult actionWrapperResult = new ActionWrapperResult("_action", actionResult); + when(actionWrapper.execute(anyObject())).thenReturn(actionWrapperResult); + + when(watch.actions()).thenReturn(Collections.singletonList(actionWrapper)); + + WatchStatus status = mock(WatchStatus.class); + when(status.state()).thenReturn(new WatchStatus.State(false, now())); + when(watch.status()).thenReturn(status); + + WatchRecord watchRecord = executionService.execute(ctx); + assertThat(watchRecord.state(), is(ExecutionState.EXECUTED)); + } + + private WatchExecutionContext createMockWatchExecutionContext(String watchId, DateTime executionTime) { + WatchExecutionContext ctx = mock(WatchExecutionContext.class); + when(ctx.id()).thenReturn(new Wid(watchId, executionTime)); + when(ctx.executionTime()).thenReturn(executionTime); + when(ctx.executionPhase()).thenReturn(ExecutionPhase.INPUT); + + TriggerEvent triggerEvent = mock(TriggerEvent.class); + when(triggerEvent.triggeredTime()).thenReturn(executionTime.minusSeconds(1)); + when(ctx.triggerEvent()).thenReturn(triggerEvent); + + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn(watchId); + when(ctx.watch()).thenReturn(watch); + + WatchExecutionSnapshot snapshot = new WatchExecutionSnapshot(ctx, new StackTraceElement[]{}); + when(ctx.createSnapshot(anyObject())).thenReturn(snapshot); + + return ctx; + } + + private Tuple whenCondition(final WatchExecutionContext context) { + Condition.Result conditionResult = mock(Condition.Result.class); + when(conditionResult.met()).thenReturn(true); + ExecutableCondition condition = mock(ExecutableCondition.class); + when(condition.execute(context)).thenReturn(conditionResult); + + return new Tuple<>(condition, conditionResult); + } + + private Tuple whenTransform(final WatchExecutionContext context) { + Transform.Result transformResult = mock(Transform.Result.class); + when(transformResult.payload()).thenReturn(payload); + ExecutableTransform transform = mock(ExecutableTransform.class); + when(transform.execute(context, payload)).thenReturn(transformResult); + + return new Tuple<>(transform, transformResult); + } + + private void mockGetWatchResponse(Client client, String id, GetResponse response) { + doAnswer(invocation -> { + GetRequest request = (GetRequest) invocation.getArguments()[0]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + if (request.id().equals(id)) { + listener.onResponse(response); + } else { + GetResult notFoundResult = new GetResult(request.index(), request.type(), request.id(), -1, false, null, null); + listener.onResponse(new GetResponse(notFoundResult)); + } + return null; + }).when(client).get(any(), any()); + } + + private void mockGetWatchException(Client client, String id, Exception e) { + doAnswer(invocation -> { + GetRequest request = (GetRequest) invocation.getArguments()[0]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + if (request.id().equals(id)) { + listener.onFailure(e); + } else { + GetResult notFoundResult = new GetResult(request.index(), request.type(), request.id(), -1, false, null, null); + listener.onResponse(new GetResponse(notFoundResult)); + } + return null; + }).when(client).get(any(), any()); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java new file mode 100644 index 0000000000000..07ba254dea9a3 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java @@ -0,0 +1,390 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.execution; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.notification.email.EmailService; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateService; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.elasticsearch.xpack.watcher.trigger.TriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; +import org.elasticsearch.xpack.watcher.trigger.schedule.CronSchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleRegistry; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.elasticsearch.xpack.watcher.watch.WatchTests; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; + +import static java.util.Collections.singleton; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.joda.time.DateTimeZone.UTC; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +public class TriggeredWatchStoreTests extends ESTestCase { + + private Settings indexSettings = settings(Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + + private Client client; + private TriggeredWatch.Parser parser; + private TriggeredWatchStore triggeredWatchStore; + + @Before + public void init() { + client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + parser = mock(TriggeredWatch.Parser.class); + triggeredWatchStore = new TriggeredWatchStore(Settings.EMPTY, client, parser); + triggeredWatchStore.start(); + } + + public void testFindTriggeredWatchesEmptyCollection() throws Exception { + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("name")); + Collection triggeredWatches = triggeredWatchStore.findTriggeredWatches(Collections.emptyList(), csBuilder.build()); + assertThat(triggeredWatches, hasSize(0)); + } + + public void testValidateNoIndex() { + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("name")); + assertThat(triggeredWatchStore.validate(csBuilder.build()), is(true)); + } + + public void testValidateNoActivePrimaryShards() throws Exception { + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("name")); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + MetaData.Builder metaDataBuilder = MetaData.builder(); + + int numShards = 2 + randomInt(2); + int numStartedShards = 1; + Settings settings = settings(Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + metaDataBuilder.put(IndexMetaData.builder(TriggeredWatchStoreField.INDEX_NAME).settings(settings) + .numberOfShards(numShards).numberOfReplicas(1)); + final Index index = metaDataBuilder.get(TriggeredWatchStoreField.INDEX_NAME).getIndex(); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + for (int i = 0; i < numShards; i++) { + final ShardRoutingState state; + final String currentNodeId; + if (numStartedShards-- > 0) { + state = ShardRoutingState.STARTED; + currentNodeId = "_node_id"; + } else { + state = ShardRoutingState.UNASSIGNED; + currentNodeId = null; + } + ShardId shardId = new ShardId(index, 0); + indexRoutingTableBuilder.addIndexShard(new IndexShardRoutingTable.Builder(shardId) + .addShard(TestShardRouting.newShardRouting(shardId, currentNodeId, null, true, state, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""))) + .build()); + indexRoutingTableBuilder.addReplica(); + } + routingTableBuilder.add(indexRoutingTableBuilder.build()); + + csBuilder.metaData(metaDataBuilder); + csBuilder.routingTable(routingTableBuilder.build()); + ClusterState cs = csBuilder.build(); + + assertThat(triggeredWatchStore.validate(cs), is(false)); + } + + public void testFindTriggeredWatchesGoodCase() throws Exception { + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + MetaData.Builder metaDataBuilder = MetaData.builder(); + metaDataBuilder.put(IndexMetaData.builder(TriggeredWatchStoreField.INDEX_NAME).settings(indexSettings)); + final Index index = metaDataBuilder.get(TriggeredWatchStoreField.INDEX_NAME).getIndex(); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + ShardId shardId = new ShardId(index, 0); + indexRoutingTableBuilder.addIndexShard(new IndexShardRoutingTable.Builder(shardId) + .addShard(TestShardRouting.newShardRouting(shardId, "_node_id", null, true, ShardRoutingState.STARTED)) + .build()); + indexRoutingTableBuilder.addReplica(); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + csBuilder.metaData(metaDataBuilder); + csBuilder.routingTable(routingTableBuilder.build()); + ClusterState cs = csBuilder.build(); + + RefreshResponse refreshResponse = mockRefreshResponse(1, 1); + AdminClient adminClient = mock(AdminClient.class); + when(client.admin()).thenReturn(adminClient); + IndicesAdminClient indicesAdminClient = mock(IndicesAdminClient.class); + when(adminClient.indices()).thenReturn(indicesAdminClient); + PlainActionFuture future = PlainActionFuture.newFuture(); + when(indicesAdminClient.refresh(any())).thenReturn(future); + future.onResponse(refreshResponse); + + SearchResponse searchResponse1 = mock(SearchResponse.class); + when(searchResponse1.getSuccessfulShards()).thenReturn(1); + when(searchResponse1.getTotalShards()).thenReturn(1); + BytesArray source = new BytesArray("{}"); + SearchHit hit = new SearchHit(0, "first_foo", new Text(TriggeredWatchStoreField.DOC_TYPE), null); + hit.version(1L); + hit.shard(new SearchShardTarget("_node_id", index, 0, null)); + hit.sourceRef(source); + SearchHits hits = new SearchHits(new SearchHit[]{hit}, 1, 1.0f); + when(searchResponse1.getHits()).thenReturn(hits); + when(searchResponse1.getScrollId()).thenReturn("_scrollId"); + PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + when(client.search(any(SearchRequest.class))).thenReturn(searchFuture); + searchFuture.onResponse(searchResponse1); + + // First return a scroll response with a single hit and then with no hits + hit = new SearchHit(0, "second_foo", new Text(TriggeredWatchStoreField.DOC_TYPE), null); + hit.version(1L); + hit.shard(new SearchShardTarget("_node_id", index, 0, null)); + hit.sourceRef(source); + hits = new SearchHits(new SearchHit[]{hit}, 1, 1.0f); + SearchResponse searchResponse2 = new SearchResponse( + new InternalSearchResponse(hits, null, null, null, false, null, 1), "_scrollId1", 1, 1, 0, 1, null, null); + SearchResponse searchResponse3 = new SearchResponse(InternalSearchResponse.empty(), "_scrollId2", 1, 1, 0, 1, null, null); + + doAnswer(invocation -> { + SearchScrollRequest request = (SearchScrollRequest) invocation.getArguments()[0]; + PlainActionFuture searchScrollFuture = PlainActionFuture.newFuture(); + if (request.scrollId().equals("_scrollId")) { + searchScrollFuture.onResponse(searchResponse2); + } else if (request.scrollId().equals("_scrollId1")) { + searchScrollFuture.onResponse(searchResponse3); + } else { + searchScrollFuture.onFailure(new ElasticsearchException("test issue")); + } + return searchScrollFuture; + }).when(client).searchScroll(any()); + + TriggeredWatch triggeredWatch = mock(TriggeredWatch.class); + when(parser.parse(eq("_id"), eq(1L), any(BytesReference.class))).thenReturn(triggeredWatch); + + PlainActionFuture clearScrollResponseFuture = PlainActionFuture.newFuture(); + when(client.clearScroll(any())).thenReturn(clearScrollResponseFuture); + clearScrollResponseFuture.onResponse(new ClearScrollResponse(true, 1)); + + assertThat(triggeredWatchStore.validate(cs), is(true)); + DateTime now = DateTime.now(UTC); + ScheduleTriggerEvent triggerEvent = new ScheduleTriggerEvent(now, now); + + Watch watch1 = mock(Watch.class); + when(watch1.id()).thenReturn("first"); + TriggeredWatch triggeredWatch1 = new TriggeredWatch(new Wid("first", now), triggerEvent); + when(parser.parse(eq("first_foo"), anyLong(), eq(source))).thenReturn(triggeredWatch1); + + Watch watch2 = mock(Watch.class); + when(watch2.id()).thenReturn("second"); + TriggeredWatch triggeredWatch2 = new TriggeredWatch(new Wid("second", now), triggerEvent); + when(parser.parse(eq("second_foo"), anyLong(), eq(source))).thenReturn(triggeredWatch2); + + Collection watches = new ArrayList<>(); + watches.add(watch1); + if (randomBoolean()) { + watches.add(watch2); + } + Collection triggeredWatches = triggeredWatchStore.findTriggeredWatches(watches, cs); + assertThat(triggeredWatches, notNullValue()); + assertThat(triggeredWatches, hasSize(watches.size())); + + verify(client.admin().indices(), times(1)).refresh(any()); + verify(client, times(1)).search(any(SearchRequest.class)); + verify(client, times(2)).searchScroll(any()); + verify(client, times(1)).clearScroll(any()); + } + + // the elasticsearch migration helper is doing reindex using aliases, so we have to + // make sure that the watch store supports a single alias pointing to the watch index + public void testLoadStoreAsAlias() throws Exception { + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + MetaData.Builder metaDataBuilder = MetaData.builder(); + metaDataBuilder.put(IndexMetaData.builder("triggered-watches-alias").settings(indexSettings) + .putAlias(new AliasMetaData.Builder(TriggeredWatchStoreField.INDEX_NAME).build())); + final Index index = metaDataBuilder.get("triggered-watches-alias").getIndex(); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + ShardId shardId = new ShardId(index, 0); + indexRoutingTableBuilder.addIndexShard(new IndexShardRoutingTable.Builder(shardId) + .addShard(TestShardRouting.newShardRouting(shardId, "_node_id", null, true, ShardRoutingState.STARTED)) + .build()); + indexRoutingTableBuilder.addReplica(); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + csBuilder.metaData(metaDataBuilder); + csBuilder.routingTable(routingTableBuilder.build()); + ClusterState cs = csBuilder.build(); + + assertThat(triggeredWatchStore.validate(cs), is(true)); + } + + // the elasticsearch migration helper is doing reindex using aliases, so we have to + // make sure that the watch store supports only a single index in an alias + public void testLoadingFailsWithTwoAliases() throws Exception { + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); + + MetaData.Builder metaDataBuilder = MetaData.builder(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + metaDataBuilder.put(IndexMetaData.builder("triggered-watches-alias").settings(indexSettings) + .putAlias(new AliasMetaData.Builder(TriggeredWatchStoreField.INDEX_NAME).build())); + metaDataBuilder.put(IndexMetaData.builder("whatever").settings(indexSettings) + .putAlias(new AliasMetaData.Builder(TriggeredWatchStoreField.INDEX_NAME).build())); + + final Index index = metaDataBuilder.get("triggered-watches-alias").getIndex(); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + indexRoutingTableBuilder.addIndexShard(new IndexShardRoutingTable.Builder(new ShardId(index, 0)) + .addShard(TestShardRouting.newShardRouting("triggered-watches-alias", 0, "_node_id", null, true, ShardRoutingState.STARTED)) + .build()); + indexRoutingTableBuilder.addReplica(); + final Index otherIndex = metaDataBuilder.get("whatever").getIndex(); + IndexRoutingTable.Builder otherIndexRoutingTableBuilder = IndexRoutingTable.builder(otherIndex); + otherIndexRoutingTableBuilder.addIndexShard(new IndexShardRoutingTable.Builder(new ShardId(index, 0)) + .addShard(TestShardRouting.newShardRouting("whatever", 0, "_node_id", null, true, ShardRoutingState.STARTED)) + .build()); + + csBuilder.metaData(metaDataBuilder); + csBuilder.routingTable(routingTableBuilder.build()); + ClusterState cs = csBuilder.build(); + + assertThat(triggeredWatchStore.validate(cs), is(false)); + } + + // this is a special condition that could lead to an NPE in earlier versions + public void testTriggeredWatchesIndexIsClosed() throws Exception { + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); + + MetaData.Builder metaDataBuilder = MetaData.builder(); + metaDataBuilder.put(IndexMetaData.builder(TriggeredWatchStoreField.INDEX_NAME) + .settings(indexSettings) + .state(IndexMetaData.State.CLOSE)); + csBuilder.metaData(metaDataBuilder); + + assertThat(triggeredWatchStore.validate(csBuilder.build()), is(false)); + } + + public void testTriggeredWatchesIndexDoesNotExistOnStartup() throws Exception { + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); + ClusterState cs = csBuilder.build(); + assertThat(triggeredWatchStore.validate(cs), is(true)); + Watch watch = mock(Watch.class); + triggeredWatchStore.findTriggeredWatches(Collections.singletonList(watch), cs); + verifyZeroInteractions(client); + } + + public void testIndexNotFoundButInMetaData() throws Exception { + ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name")); + MetaData.Builder metaDataBuilder = MetaData.builder() + .put(IndexMetaData.builder(TriggeredWatchStoreField.INDEX_NAME).settings(indexSettings)); + csBuilder.metaData(metaDataBuilder); + + ClusterState cs = csBuilder.build(); + Watch watch = mock(Watch.class); + + AdminClient adminClient = mock(AdminClient.class); + when(client.admin()).thenReturn(adminClient); + IndicesAdminClient indicesAdminClient = mock(IndicesAdminClient.class); + when(adminClient.indices()).thenReturn(indicesAdminClient); + PlainActionFuture future = PlainActionFuture.newFuture(); + when(indicesAdminClient.refresh(any())).thenReturn(future); + future.onFailure(new IndexNotFoundException(TriggeredWatchStoreField.INDEX_NAME)); + + Collection triggeredWatches = triggeredWatchStore.findTriggeredWatches(Collections.singletonList(watch), cs); + assertThat(triggeredWatches, hasSize(0)); + } + + public void testTriggeredWatchParser() throws Exception { + EmailService emailService = mock(EmailService.class); + HttpClient httpClient = mock(HttpClient.class); + WatcherSearchTemplateService searchTemplateService = mock(WatcherSearchTemplateService.class); + + Watch watch = WatcherTestUtils.createTestWatch("fired_test", client, httpClient, emailService, searchTemplateService, logger); + ScheduleTriggerEvent event = new ScheduleTriggerEvent(watch.id(), DateTime.now(DateTimeZone.UTC), DateTime.now(DateTimeZone.UTC)); + Wid wid = new Wid("_record", DateTime.now(DateTimeZone.UTC)); + TriggeredWatch triggeredWatch = new TriggeredWatch(wid, event); + XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); + triggeredWatch.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); + + ScheduleRegistry scheduleRegistry = new ScheduleRegistry(Collections.singleton(new CronSchedule.Parser())); + TriggerEngine triggerEngine = new WatchTests.ParseOnlyScheduleTriggerEngine(Settings.EMPTY, scheduleRegistry, new ClockMock()); + TriggerService triggerService = new TriggerService(Settings.EMPTY, singleton(triggerEngine)); + + TriggeredWatch.Parser parser = new TriggeredWatch.Parser(Settings.EMPTY, triggerService); + TriggeredWatch parsedTriggeredWatch = parser.parse(triggeredWatch.id().value(), 0, BytesReference.bytes(jsonBuilder)); + + XContentBuilder jsonBuilder2 = XContentFactory.jsonBuilder(); + parsedTriggeredWatch.toXContent(jsonBuilder2, ToXContent.EMPTY_PARAMS); + + assertThat(BytesReference.bytes(jsonBuilder).utf8ToString(), equalTo(BytesReference.bytes(jsonBuilder2).utf8ToString())); + } + + private RefreshResponse mockRefreshResponse(int total, int successful) { + RefreshResponse refreshResponse = mock(RefreshResponse.class); + when(refreshResponse.getTotalShards()).thenReturn(total); + when(refreshResponse.getSuccessfulShards()).thenReturn(successful); + return refreshResponse; + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java new file mode 100644 index 0000000000000..189bf1d5b05ca --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java @@ -0,0 +1,273 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.history; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.condition.Condition; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.watcher.condition.CompareCondition; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.condition.NeverCondition; +import org.elasticsearch.xpack.watcher.condition.ScriptCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +/** + * This test makes sure per-action conditions are honored. + */ +@TestLogging("org.elasticsearch.xpack.watcher:DEBUG,org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") +public class HistoryActionConditionTests extends AbstractWatcherIntegrationTestCase { + + private final Input input = simpleInput("key", 15).build(); + + private final ExecutableCondition scriptConditionPasses = mockScriptCondition("return true;"); + private final ExecutableCondition compareConditionPasses = new CompareCondition("ctx.payload.key", CompareCondition.Op.GTE, 15); + private final ExecutableCondition conditionPasses = randomFrom(InternalAlwaysCondition.INSTANCE, + scriptConditionPasses, compareConditionPasses); + + private final ExecutableCondition scriptConditionFails = mockScriptCondition("return false;"); + private final ExecutableCondition compareConditionFails = new CompareCondition("ctx.payload.key", CompareCondition.Op.LT, 15); + private final ExecutableCondition conditionFails = randomFrom(NeverCondition.INSTANCE, scriptConditionFails, compareConditionFails); + + @Override + protected List> pluginTypes() { + List> types = super.pluginTypes(); + types.add(CustomScriptPlugin.class); + return types; + } + + public static class CustomScriptPlugin extends MockScriptPlugin { + + @Override + protected Map, Object>> pluginScripts() { + Map, Object>> scripts = new HashMap<>(); + + scripts.put("return true;", vars -> true); + scripts.put("return false;", vars -> false); + scripts.put("throw new IllegalStateException('failed');", vars -> { + throw new IllegalStateException("[expected] failed hard"); + }); + + return scripts; + } + + } + + /** + * A hard failure is where an exception is thrown by the script condition. + */ + @SuppressWarnings("unchecked") + public void testActionConditionWithHardFailures() throws Exception { + final String id = "testActionConditionWithHardFailures"; + + final ExecutableCondition scriptConditionFailsHard = mockScriptCondition("throw new IllegalStateException('failed');"); + final List actionConditionsWithFailure = + Arrays.asList(scriptConditionFailsHard, conditionPasses, InternalAlwaysCondition.INSTANCE); + + Collections.shuffle(actionConditionsWithFailure, random()); + + final int failedIndex = actionConditionsWithFailure.indexOf(scriptConditionFailsHard); + + putAndTriggerWatch(id, input, actionConditionsWithFailure.toArray(new Condition[actionConditionsWithFailure.size()])); + + flush(); + + assertWatchWithMinimumActionsCount(id, ExecutionState.EXECUTED, 1); + + // only one action should have failed via condition + final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); + assertThat(response.getHits().getTotalHits(), is(1L)); + + final SearchHit hit = response.getHits().getAt(0); + final List actions = getActionsFromHit(hit.getSourceAsMap()); + + for (int i = 0; i < actionConditionsWithFailure.size(); ++i) { + final Map action = (Map)actions.get(i); + final Map condition = (Map)action.get("condition"); + final Map logging = (Map)action.get("logging"); + + assertThat(action.get("id"), is("action" + i)); + + if (i == failedIndex) { + assertThat(action.get("status"), is("condition_failed")); + assertThat(action.get("reason"), is("condition failed. skipping: [expected] failed hard")); + assertThat(condition, nullValue()); + assertThat(logging, nullValue()); + } else { + assertThat(condition.get("type"), is(actionConditionsWithFailure.get(i).type())); + + assertThat(action.get("status"), is("success")); + assertThat(condition.get("met"), is(true)); + assertThat(action.get("reason"), nullValue()); + assertThat(logging.get("logged_text"), is(Integer.toString(i))); + } + } + } + + @SuppressWarnings("unchecked") + public void testActionConditionWithFailures() throws Exception { + final String id = "testActionConditionWithFailures"; + final ExecutableCondition[] actionConditionsWithFailure = new ExecutableCondition[] { + conditionFails, + conditionPasses, + InternalAlwaysCondition.INSTANCE + }; + Collections.shuffle(Arrays.asList(actionConditionsWithFailure), random()); + + final int failedIndex = Arrays.asList(actionConditionsWithFailure).indexOf(conditionFails); + + putAndTriggerWatch(id, input, actionConditionsWithFailure); + assertWatchWithMinimumActionsCount(id, ExecutionState.EXECUTED, 1); + + // only one action should have failed via condition + final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); + assertThat(response.getHits().getTotalHits(), is(1L)); + + final SearchHit hit = response.getHits().getAt(0); + final List actions = getActionsFromHit(hit.getSourceAsMap()); + + for (int i = 0; i < actionConditionsWithFailure.length; ++i) { + final Map action = (Map)actions.get(i); + final Map condition = (Map)action.get("condition"); + final Map logging = (Map)action.get("logging"); + + assertThat(action.get("id"), is("action" + i)); + assertThat(condition.get("type"), is(actionConditionsWithFailure[i].type())); + + if (i == failedIndex) { + assertThat(action.get("status"), is("condition_failed")); + assertThat(condition.get("met"), is(false)); + assertThat(action.get("reason"), is("condition not met. skipping")); + assertThat(logging, nullValue()); + } else { + assertThat(action.get("status"), is("success")); + assertThat(condition.get("met"), is(true)); + assertThat(action.get("reason"), nullValue()); + assertThat(logging.get("logged_text"), is(Integer.toString(i))); + } + } + } + + @SuppressWarnings("unchecked") + public void testActionCondition() throws Exception { + final String id = "testActionCondition"; + final List actionConditions = new ArrayList<>(); + //actionConditions.add(conditionPasses); + actionConditions.add(InternalAlwaysCondition.INSTANCE); + + /* + if (randomBoolean()) { + actionConditions.add(InternalAlwaysCondition.INSTANCE); + } + + Collections.shuffle(actionConditions, random()); + */ + + putAndTriggerWatch(id, input, actionConditions.toArray(new Condition[actionConditions.size()])); + + flush(); + + assertWatchWithMinimumActionsCount(id, ExecutionState.EXECUTED, 1); + + // all actions should be successful + final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); + assertThat(response.getHits().getTotalHits(), is(1L)); + + final SearchHit hit = response.getHits().getAt(0); + final List actions = getActionsFromHit(hit.getSourceAsMap()); + + for (int i = 0; i < actionConditions.size(); ++i) { + final Map action = (Map)actions.get(i); + final Map condition = (Map)action.get("condition"); + final Map logging = (Map)action.get("logging"); + + assertThat(action.get("id"), is("action" + i)); + assertThat(action.get("status"), is("success")); + assertThat(condition.get("type"), is(actionConditions.get(i).type())); + assertThat(condition.get("met"), is(true)); + assertThat(action.get("reason"), nullValue()); + assertThat(logging.get("logged_text"), is(Integer.toString(i))); + } + } + + /** + * Get the "actions" from the Watch History hit. + * + * @param source The hit's source. + * @return The list of "actions" + */ + @SuppressWarnings("unchecked") + private List getActionsFromHit(final Map source) { + final Map result = (Map)source.get("result"); + + return (List)result.get("actions"); + } + + /** + * Create a Watch with the specified {@code id} and {@code input}. + *

+ * The {@code actionConditions} are + * + * @param id The ID of the Watch + * @param input The input to use for the Watch + * @param actionConditions The conditions to add to the Watch + */ + private void putAndTriggerWatch(final String id, final Input input, final Condition... actionConditions) { + WatchSourceBuilder source = watchBuilder() + .trigger(schedule(interval("5s"))) + .input(input) + .condition(InternalAlwaysCondition.INSTANCE); + + for (int i = 0; i < actionConditions.length; ++i) { + source.addAction("action" + i, actionConditions[i], loggingAction(Integer.toString(i))); + } + + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch(id).setSource(source).get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + + timeWarp().trigger(id); + } + + /** + * Create an inline script using the {@link CustomScriptPlugin}. + * + * @param inlineScript The script to "compile" and run + * @return Never {@code null} + */ + private static ExecutableCondition mockScriptCondition(String inlineScript) { + Script script = new Script(ScriptType.INLINE, MockScriptPlugin.NAME, inlineScript, Collections.emptyMap()); + return new ScriptCondition(script); + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java new file mode 100644 index 0000000000000..f2a0f4c311ad6 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryStoreTests.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.history; + +import org.apache.http.HttpStatus; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.DocWriteRequest.OpType; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapperResult; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionResult; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.history.WatchRecord; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.actions.jira.JiraAction; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.notification.jira.JiraAccount; +import org.elasticsearch.xpack.watcher.notification.jira.JiraIssue; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.joda.time.DateTime; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.core.watcher.history.HistoryStoreField.getHistoryIndexNameForTime; +import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField.INDEX_TEMPLATE_VERSION; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.joda.time.DateTimeZone.UTC; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class HistoryStoreTests extends ESTestCase { + + private HistoryStore historyStore; + private Client client; + + @Before + public void init() { + client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + historyStore = new HistoryStore(Settings.EMPTY, client); + historyStore.start(); + } + + public void testPut() throws Exception { + DateTime now = new DateTime(0, UTC); + Wid wid = new Wid("_name", now); + String index = getHistoryIndexNameForTime(now); + ScheduleTriggerEvent event = new ScheduleTriggerEvent(wid.watchId(), now, now); + WatchRecord watchRecord = new WatchRecord.MessageWatchRecord(wid, event, ExecutionState.EXECUTED, null, randomAlphaOfLength(10)); + + IndexResponse indexResponse = mock(IndexResponse.class); + + doAnswer(invocation -> { + IndexRequest request = (IndexRequest) invocation.getArguments()[0]; + PlainActionFuture indexFuture = PlainActionFuture.newFuture(); + if (request.id().equals(wid.value()) && request.type().equals(HistoryStore.DOC_TYPE) && request.opType() == OpType.CREATE + && request.index().equals(index)) { + indexFuture.onResponse(indexResponse); + } else { + indexFuture.onFailure(new ElasticsearchException("test issue")); + } + return indexFuture; + }).when(client).index(any()); + + historyStore.put(watchRecord); + verify(client).index(any()); + } + + public void testPutStopped() throws Exception { + Wid wid = new Wid("_name", new DateTime(0, UTC)); + ScheduleTriggerEvent event = new ScheduleTriggerEvent(wid.watchId(), new DateTime(0, UTC), new DateTime(0, UTC)); + WatchRecord watchRecord = new WatchRecord.MessageWatchRecord(wid, event, ExecutionState.EXECUTED, null, randomAlphaOfLength(10)); + + historyStore.stop(); + try { + historyStore.put(watchRecord); + fail("Expected IllegalStateException"); + } catch (IllegalStateException e) { + assertThat(e.getMessage(), is("unable to persist watch record history store is not ready")); + } finally { + historyStore.start(); + } + } + + public void testIndexNameGeneration() { + String indexTemplateVersion = INDEX_TEMPLATE_VERSION; + assertThat(getHistoryIndexNameForTime(new DateTime(0, UTC)), + equalTo(".watcher-history-"+ indexTemplateVersion +"-1970.01.01")); + assertThat(getHistoryIndexNameForTime(new DateTime(100000000000L, UTC)), + equalTo(".watcher-history-" + indexTemplateVersion + "-1973.03.03")); + assertThat(getHistoryIndexNameForTime(new DateTime(1416582852000L, UTC)), + equalTo(".watcher-history-" + indexTemplateVersion + "-2014.11.21")); + assertThat(getHistoryIndexNameForTime(new DateTime(2833165811000L, UTC)), + equalTo(".watcher-history-" + indexTemplateVersion + "-2059.10.12")); + } + + public void testStoreWithHideSecrets() throws Exception { + HttpClient httpClient = mock(HttpClient.class); + when(httpClient.execute(any(HttpRequest.class))).thenReturn(new HttpResponse(HttpStatus.SC_INTERNAL_SERVER_ERROR)); + + final String username = randomFrom("admin", "elastic", "test"); + final String password = randomFrom("secret", "supersecret", "123456"); + final String url = "https://" + randomFrom("localhost", "internal-jira.elastic.co") + ":" + randomFrom(80, 8080, 449, 9443); + + Settings settings = Settings.builder().put("url", url).put("user", username).put("password", password).build(); + JiraAccount account = new JiraAccount("_account", settings, httpClient); + + JiraIssue jiraIssue = account.createIssue(singletonMap("foo", "bar"), null); + ActionWrapperResult result = new ActionWrapperResult(JiraAction.TYPE, new JiraAction.Executed(jiraIssue)); + + DateTime now = new DateTime(0, UTC); + Wid wid = new Wid("_name", now); + + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn("_id"); + when(watch.status()).thenReturn(new WatchStatus(now, singletonMap("_action", new ActionStatus(now)))); + + WatchExecutionContext context = mock(WatchExecutionContext.class); + when(context.id()).thenReturn(wid); + when(context.triggerEvent()).thenReturn(new ScheduleTriggerEvent(wid.watchId(), now, now)); + when(context.vars()).thenReturn(emptyMap()); + when(context.watch()).thenReturn(watch); + + WatchExecutionResult watchExecutionResult = new WatchExecutionResult(context, 0); + + WatchRecord watchRecord; + if (randomBoolean()) { + watchRecord = new WatchRecord.MessageWatchRecord(context, watchExecutionResult); + } else { + watchRecord = new WatchRecord.ExceptionWatchRecord(context, watchExecutionResult, new IllegalStateException()); + } + watchRecord.result().actionsResults().put(JiraAction.TYPE, result); + + PlainActionFuture indexResponseFuture = PlainActionFuture.newFuture(); + indexResponseFuture.onResponse(mock(IndexResponse.class)); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(IndexRequest.class); + when(client.index(requestCaptor.capture())).thenReturn(indexResponseFuture); + if (randomBoolean()) { + historyStore.put(watchRecord); + } else { + historyStore.forcePut(watchRecord); + } + + assertThat(requestCaptor.getAllValues(), hasSize(1)); + String indexedJson = requestCaptor.getValue().source().utf8ToString(); + assertThat(indexedJson, containsString(username)); + assertThat(indexedJson, not(containsString(password))); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateEmailMappingsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateEmailMappingsTests.java new file mode 100644 index 0000000000000..38008194b0ab1 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateEmailMappingsTests.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.history; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; +import org.elasticsearch.xpack.watcher.notification.email.support.EmailServer; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.junit.After; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.emailAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +/** + * This test makes sure that the email address fields in the watch_record action result are + * not analyzed so they can be used in aggregations + */ +@TestLogging("org.elasticsearch.xpack.watcher:DEBUG," + + "org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") +public class HistoryTemplateEmailMappingsTests extends AbstractWatcherIntegrationTestCase { + + private EmailServer server; + + @Override + public void setUp() throws Exception { + super.setUp(); + server = EmailServer.localhost(logger); + } + + @After + public void cleanup() throws Exception { + server.stop(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + + // email + .put("xpack.notification.email.account.test.smtp.auth", true) + .put("xpack.notification.email.account.test.smtp.user", EmailServer.USERNAME) + .put("xpack.notification.email.account.test.smtp.password", EmailServer.PASSWORD) + .put("xpack.notification.email.account.test.smtp.port", server.port()) + .put("xpack.notification.email.account.test.smtp.host", "localhost") + + .build(); + } + + public void testEmailFields() throws Exception { + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("_id").setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput()) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_email", emailAction(EmailTemplate.builder() + .from("from@example.com") + .to("to1@example.com", "to2@example.com") + .cc("cc1@example.com", "cc2@example.com") + .bcc("bcc1@example.com", "bcc2@example.com") + .replyTo("rt1@example.com", "rt2@example.com") + .subject("_subject") + .textBody("_body")))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + timeWarp().trigger("_id"); + flush(); + refresh(); + + // the action should fail as no email server is available + assertWatchWithMinimumActionsCount("_id", ExecutionState.EXECUTED, 1); + + SearchResponse response = client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*").setSource(searchSource() + .aggregation(terms("from").field("result.actions.email.message.from")) + .aggregation(terms("to").field("result.actions.email.message.to")) + .aggregation(terms("cc").field("result.actions.email.message.cc")) + .aggregation(terms("bcc").field("result.actions.email.message.bcc")) + .aggregation(terms("reply_to").field("result.actions.email.message.reply_to"))) + .get(); + + assertThat(response, notNullValue()); + assertThat(response.getHits().getTotalHits(), is(1L)); + Aggregations aggs = response.getAggregations(); + assertThat(aggs, notNullValue()); + + Terms terms = aggs.get("from"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey("from@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("from@example.com").getDocCount(), is(1L)); + + terms = aggs.get("to"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(2)); + assertThat(terms.getBucketByKey("to1@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("to1@example.com").getDocCount(), is(1L)); + assertThat(terms.getBucketByKey("to2@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("to2@example.com").getDocCount(), is(1L)); + + terms = aggs.get("cc"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(2)); + assertThat(terms.getBucketByKey("cc1@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("cc1@example.com").getDocCount(), is(1L)); + assertThat(terms.getBucketByKey("cc2@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("cc2@example.com").getDocCount(), is(1L)); + + terms = aggs.get("bcc"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(2)); + assertThat(terms.getBucketByKey("bcc1@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("bcc1@example.com").getDocCount(), is(1L)); + assertThat(terms.getBucketByKey("bcc2@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("bcc2@example.com").getDocCount(), is(1L)); + + terms = aggs.get("reply_to"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(2)); + assertThat(terms.getBucketByKey("rt1@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("rt1@example.com").getDocCount(), is(1L)); + assertThat(terms.getBucketByKey("rt2@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("rt2@example.com").getDocCount(), is(1L)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java new file mode 100644 index 0000000000000..51652078dc96f --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.history; + +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.webhookAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.httpInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; + +/** + * This test makes sure that the mapping for the watch_record are correct + */ +public class HistoryTemplateHttpMappingsTests extends AbstractWatcherIntegrationTestCase { + + private MockWebServer webServer = new MockWebServer(); + + @Before + public void init() throws Exception { + webServer.start(); + } + + @After + public void cleanup() throws Exception { + webServer.close(); + } + + public void testHttpFields() throws Exception { + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("_id").setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(httpInput(HttpRequestTemplate.builder("localhost", webServer.getPort()).path("/input/path"))) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_webhook", webhookAction(HttpRequestTemplate.builder("localhost", webServer.getPort()) + .path("/webhook/path") + .method(HttpMethod.POST) + .body("_body")))) + .get(); + + // one for the input, one for the webhook + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("{}")); + webServer.enqueue(new MockResponse().setResponseCode(200).setBody("{}")); + + assertThat(putWatchResponse.isCreated(), is(true)); + timeWarp().trigger("_id"); + flush(); + refresh(); + + // the action should fail as no email server is available + assertWatchWithMinimumActionsCount("_id", ExecutionState.EXECUTED, 1); + + SearchResponse response = client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*").setSource(searchSource() + .aggregation(terms("input_result_path").field("result.input.http.request.path")) + .aggregation(terms("input_result_host").field("result.input.http.request.host")) + .aggregation(terms("webhook_path").field("result.actions.webhook.request.path"))) + .get(); + + assertThat(response, notNullValue()); + assertThat(response.getHits().getTotalHits(), is(1L)); + Aggregations aggs = response.getAggregations(); + assertThat(aggs, notNullValue()); + + Terms terms = aggs.get("input_result_path"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey("/input/path"), notNullValue()); + assertThat(terms.getBucketByKey("/input/path").getDocCount(), is(1L)); + + terms = aggs.get("webhook_path"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey("/webhook/path"), notNullValue()); + assertThat(terms.getBucketByKey("/webhook/path").getDocCount(), is(1L)); + + assertThat(webServer.requests(), hasSize(2)); + assertThat(webServer.requests().get(0).getUri().getPath(), is("/input/path")); + assertThat(webServer.requests().get(1).getUri().getPath(), is("/webhook/path")); + } + + public void testExceptionMapping() { + // delete all history indices to ensure that we only need to check a single index + assertAcked(client().admin().indices().prepareDelete(HistoryStoreField.INDEX_PREFIX + "*")); + + String id = randomAlphaOfLength(10); + // switch between delaying the input or the action http request + boolean abortAtInput = randomBoolean(); + if (abortAtInput) { + webServer.enqueue(new MockResponse().setBeforeReplyDelay(TimeValue.timeValueSeconds(5))); + } else { + webServer.enqueue(new MockResponse().setBody("{}")); + webServer.enqueue(new MockResponse().setBeforeReplyDelay(TimeValue.timeValueSeconds(5))); + } + + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch(id).setSource(watchBuilder() + .trigger(schedule(interval("1h"))) + .input(httpInput(HttpRequestTemplate.builder("localhost", webServer.getPort()) + .path("/") + .readTimeout(abortAtInput ? TimeValue.timeValueMillis(10) : TimeValue.timeValueSeconds(10)))) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_webhook", webhookAction(HttpRequestTemplate.builder("localhost", webServer.getPort()) + .readTimeout(TimeValue.timeValueMillis(10)) + .path("/webhook/path") + .method(HttpMethod.POST) + .body("_body")))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + watcherClient().prepareExecuteWatch(id).setRecordExecution(true).get(); + + // ensure watcher history index has been written with this id + flushAndRefresh(HistoryStoreField.INDEX_PREFIX + "*"); + SearchResponse searchResponse = client().prepareSearch(HistoryStoreField.INDEX_PREFIX + "*") + .setQuery(QueryBuilders.termQuery("watch_id", id)) + .get(); + assertHitCount(searchResponse, 1L); + + // ensure that enabled is set to false + List indexed = new ArrayList<>(); + GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings(HistoryStoreField.INDEX_PREFIX + "*").get(); + Iterator> iterator = mappingsResponse.getMappings().valuesIt(); + while (iterator.hasNext()) { + ImmutableOpenMap mapping = iterator.next(); + assertThat(mapping.containsKey("doc"), is(true)); + Map docMapping = mapping.get("doc").getSourceAsMap(); + if (abortAtInput) { + Boolean enabled = ObjectPath.eval("properties.result.properties.input.properties.error.enabled", docMapping); + indexed.add(enabled); + } else { + Boolean enabled = ObjectPath.eval("properties.result.properties.actions.properties.error.enabled", docMapping); + indexed.add(enabled); + } + } + + assertThat(indexed, hasSize(greaterThanOrEqualTo(1))); + assertThat(indexed, hasItem(false)); + assertThat(indexed, not(hasItem(true))); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateIndexActionMappingsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateIndexActionMappingsTests.java new file mode 100644 index 0000000000000..6bf273b194c34 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateIndexActionMappingsTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.history; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +/** + * This test makes sure that the index action response `index` and `type` fields in the watch_record action result are + * not analyzed so they can be used in aggregations + */ +public class HistoryTemplateIndexActionMappingsTests extends AbstractWatcherIntegrationTestCase { + + public void testIndexActionFields() throws Exception { + String index = "the-index"; + String type = "the-type"; + + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("_id").setSource(watchBuilder() + .trigger(schedule(interval("5m"))) + .addAction("index", indexAction(index, type))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + timeWarp().trigger("_id"); + flush(); + refresh(); + + // the action should fail as no email server is available + assertWatchWithMinimumActionsCount("_id", ExecutionState.EXECUTED, 1); + flush(); + refresh(); + + SearchResponse response = client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*").setSource(searchSource() + .aggregation(terms("index_action_indices").field("result.actions.index.response.index")) + .aggregation(terms("index_action_types").field("result.actions.index.response.type"))) + .get(); + + assertThat(response, notNullValue()); + assertThat(response.getHits().getTotalHits(), is(1L)); + Aggregations aggs = response.getAggregations(); + assertThat(aggs, notNullValue()); + + Terms terms = aggs.get("index_action_indices"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey(index), notNullValue()); + assertThat(terms.getBucketByKey(index).getDocCount(), is(1L)); + + terms = aggs.get("index_action_types"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey(type), notNullValue()); + assertThat(terms.getBucketByKey(type).getDocCount(), is(1L)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateSearchInputMappingsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateSearchInputMappingsTests.java new file mode 100644 index 0000000000000..17575f58ec298 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateSearchInputMappingsTests.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.history; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +/** + * This test makes sure that the http host and path fields in the watch_record action result are + * not analyzed so they can be used in aggregations + */ +public class HistoryTemplateSearchInputMappingsTests extends AbstractWatcherIntegrationTestCase { + + public void testHttpFields() throws Exception { + String index = "the-index"; + String type = "the-type"; + createIndex(index); + index(index, type, "{}"); + flush(); + refresh(); + + WatcherSearchTemplateRequest request = new WatcherSearchTemplateRequest( + new String[]{index}, new String[]{type}, SearchType.QUERY_THEN_FETCH, + WatcherSearchTemplateRequest.DEFAULT_INDICES_OPTIONS, new BytesArray("{}") + ); + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("_id").setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(searchInput(request)) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("logger", loggingAction("indexed"))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + timeWarp().trigger("_id"); + flush(); + refresh(); + + // the action should fail as no email server is available + assertWatchWithMinimumActionsCount("_id", ExecutionState.EXECUTED, 1); + + SearchResponse response = client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*").setSource(searchSource() + .aggregation(terms("input_search_type").field("result.input.search.request.search_type")) + .aggregation(terms("input_indices").field("result.input.search.request.indices")) + .aggregation(terms("input_types").field("result.input.search.request.types")) + .aggregation(terms("input_body").field("result.input.search.request.body"))) + .get(); + + assertThat(response, notNullValue()); + assertThat(response.getHits().getTotalHits(), is(1L)); + Aggregations aggs = response.getAggregations(); + assertThat(aggs, notNullValue()); + + Terms terms = aggs.get("input_search_type"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey("query_then_fetch"), notNullValue()); + assertThat(terms.getBucketByKey("query_then_fetch").getDocCount(), is(1L)); + + terms = aggs.get("input_indices"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey(index), notNullValue()); + assertThat(terms.getBucketByKey(index).getDocCount(), is(1L)); + + terms = aggs.get("input_types"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey(type), notNullValue()); + assertThat(terms.getBucketByKey(type).getDocCount(), is(1L)); + + terms = aggs.get("input_body"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(0)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTimeMappingsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTimeMappingsTests.java new file mode 100644 index 0000000000000..2259ad87eb17a --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTimeMappingsTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.history; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.util.Map; + +import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +/** + * This test makes sure that the different time fields in the watch_record are mapped as date types + */ +public class HistoryTemplateTimeMappingsTests extends AbstractWatcherIntegrationTestCase { + + public void testTimeFields() throws Exception { + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("_id").setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput()) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_logging", loggingAction("foobar"))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + timeWarp().trigger("_id"); + + assertWatchWithMinimumActionsCount("_id", ExecutionState.EXECUTED, 1); + assertBusy(() -> { + GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings().get(); + assertThat(mappingsResponse, notNullValue()); + assertThat(mappingsResponse.getMappings().isEmpty(), is(false)); + for (ObjectObjectCursor> metadatas : mappingsResponse.getMappings()) { + if (!metadatas.key.startsWith(HistoryStoreField.INDEX_PREFIX)) { + continue; + } + MappingMetaData metadata = metadatas.value.get("doc"); + assertThat(metadata, notNullValue()); + try { + Map source = metadata.getSourceAsMap(); + logger.info("checking index [{}] with metadata:\n[{}]", metadatas.key, metadata.source().toString()); + assertThat(extractValue("properties.trigger_event.properties.type.type", source), is((Object) "keyword")); + assertThat(extractValue("properties.trigger_event.properties.triggered_time.type", source), is((Object) "date")); + assertThat(extractValue("properties.trigger_event.properties.schedule.properties.scheduled_time.type", source), + is((Object) "date")); + assertThat(extractValue("properties.result.properties.execution_time.type", source), is((Object) "date")); + } catch (ElasticsearchParseException e) { + throw new RuntimeException(e); + } + } + }); + + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTransformMappingsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTransformMappingsTests.java new file mode 100644 index 0000000000000..05247af948eea --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateTransformMappingsTests.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.history; + +import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; +import static org.elasticsearch.xpack.watcher.transform.TransformBuilders.searchTransform; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.hasItem; + +public class HistoryTemplateTransformMappingsTests extends AbstractWatcherIntegrationTestCase { + + public void testTransformFields() throws Exception { + assertAcked(client().admin().indices().prepareCreate("idx").addMapping("doc", + jsonBuilder().startObject() + .startObject("properties") + .startObject("foo") + .field("type", "object") + .field("enabled", false) + .endObject() + .endObject() + .endObject())); + + client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .add(client().prepareIndex("idx", "doc", "1") + .setSource(jsonBuilder().startObject().field("name", "first").field("foo", "bar").endObject())) + .add(client().prepareIndex("idx", "doc", "2") + .setSource(jsonBuilder().startObject().field("name", "second") + .startObject("foo").field("what", "ever").endObject().endObject())) + .get(); + + watcherClient().preparePutWatch("_first").setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput()) + .transform(searchTransform(templateRequest(searchSource().query(QueryBuilders.termQuery("name", "first")), "idx"))) + .addAction("logger", + searchTransform(templateRequest(searchSource().query(QueryBuilders.termQuery("name", "first")), "idx")), + loggingAction("indexed"))) + .get(); + + // execute another watch which with a transform that should conflict with the previous watch. Since the + // mapping for the transform construct is disabled, there should be no problems. + watcherClient().preparePutWatch("_second").setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput()) + .transform(searchTransform(templateRequest(searchSource().query(QueryBuilders.termQuery("name", "second")), "idx"))) + .addAction("logger", + searchTransform(templateRequest(searchSource().query(QueryBuilders.termQuery("name", "second")), "idx")), + loggingAction("indexed"))) + .get(); + + watcherClient().prepareExecuteWatch("_first").setRecordExecution(true).get(); + watcherClient().prepareExecuteWatch("_second").setRecordExecution(true).get(); + + assertBusy(() -> { + GetFieldMappingsResponse response = client().admin().indices() + .prepareGetFieldMappings(".watcher-history*") + .setFields("result.actions.transform.payload") + .setTypes("doc") + .includeDefaults(true) + .get(); + + // time might have rolled over to a new day, thus we need to check that this field exists only in one of the history indices + List payloadNulls = response.mappings().values().stream() + .map(map -> map.get("doc")) + .map(map -> map.get("result.actions.transform.payload")) + .filter(Objects::nonNull) + .map(GetFieldMappingsResponse.FieldMappingMetaData::isNull) + .collect(Collectors.toList()); + + assertThat(payloadNulls, hasItem(true)); + }); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/InputRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/InputRegistryTests.java new file mode 100644 index 0000000000000..83c3457c4f6d4 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/InputRegistryTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; + +public class InputRegistryTests extends ESTestCase { + + public void testParseEmptyInput() throws Exception { + InputRegistry registry = new InputRegistry(Settings.EMPTY, emptyMap()); + XContentParser parser = createParser(jsonBuilder().startObject().endObject()); + parser.nextToken(); + try { + registry.parse("_id", parser); + fail("expecting an exception when trying to parse an empty input"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("expected field indicating the input type, but found an empty object instead")); + } + } + + public void testParseArrayInput() throws Exception { + InputRegistry registry = new InputRegistry(Settings.EMPTY, emptyMap()); + XContentParser parser = createParser(jsonBuilder().startArray().endArray()); + parser.nextToken(); + try { + registry.parse("_id", parser); + fail("expecting an exception when trying to parse an input that is not an object"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("expected an object representing the input, but found [START_ARRAY] instead")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainInputTests.java new file mode 100644 index 0000000000000..e654452779ab8 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainInputTests.java @@ -0,0 +1,223 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.chain; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.condition.ScriptCondition; +import org.elasticsearch.xpack.watcher.input.InputFactory; +import org.elasticsearch.xpack.watcher.input.InputRegistry; +import org.elasticsearch.xpack.watcher.input.http.HttpInput; +import org.elasticsearch.xpack.watcher.input.simple.SimpleInput; +import org.elasticsearch.xpack.watcher.input.simple.SimpleInputFactory; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; + +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.chainInput; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.httpInput; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class ChainInputTests extends ESTestCase { + + /* note, first line does not need to be parsed + "chain" : { + "inputs" : [ + { "first" : { "simple" : { "foo" : "bar" } } }, + { "second" : { "simple" : { "spam" : "eggs" } } } + ] + } + */ + public void testThatExecutionWorks() throws Exception { + Map factories = new HashMap<>(); + factories.put("simple", new SimpleInputFactory(Settings.EMPTY)); + + // hackedy hack... + InputRegistry inputRegistry = new InputRegistry(Settings.EMPTY, factories); + ChainInputFactory chainInputFactory = new ChainInputFactory(Settings.EMPTY, inputRegistry); + factories.put("chain", chainInputFactory); + + XContentBuilder builder = jsonBuilder().startObject().startArray("inputs") + .startObject().startObject("first").startObject("simple").field("foo", "bar").endObject().endObject().endObject() + .startObject().startObject("second").startObject("simple").field("spam", "eggs").endObject().endObject().endObject() + .endArray().endObject(); + + // first pass JSON and check for correct inputs + XContentParser parser = createParser(builder); + parser.nextToken(); + ChainInput chainInput = chainInputFactory.parseInput("test", parser); + + assertThat(chainInput.getInputs(), hasSize(2)); + assertThat(chainInput.getInputs().get(0).v1(), is("first")); + assertThat(chainInput.getInputs().get(0).v2(), instanceOf(SimpleInput.class)); + assertThat(chainInput.getInputs().get(1).v1(), is("second")); + assertThat(chainInput.getInputs().get(1).v2(), instanceOf(SimpleInput.class)); + + // now execute + ExecutableChainInput executableChainInput = chainInputFactory.createExecutable(chainInput); + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + ChainInput.Result result = executableChainInput.execute(ctx, new Payload.Simple()); + Payload payload = result.payload(); + assertThat(payload.data(), hasKey("first")); + assertThat(payload.data(), hasKey("second")); + assertThat(payload.data().get("first"), instanceOf(Map.class)); + assertThat(payload.data().get("second"), instanceOf(Map.class)); + + // final payload check + Map firstPayload = (Map) payload.data().get("first"); + Map secondPayload = (Map) payload.data().get("second"); + assertThat(firstPayload, hasEntry("foo", "bar")); + assertThat(secondPayload, hasEntry("spam", "eggs")); + } + + public void testToXContent() throws Exception { + ChainInput chainedInput = chainInput() + .add("first", simpleInput("foo", "bar")) + .add("second", simpleInput("spam", "eggs")) + .build(); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + chainedInput.toXContent(builder, ToXContent.EMPTY_PARAMS); + + assertThat(BytesReference.bytes(builder).utf8ToString(), + is("{\"inputs\":[{\"first\":{\"simple\":{\"foo\":\"bar\"}}},{\"second\":{\"simple\":{\"spam\":\"eggs\"}}}]}")); + + // parsing it back as well! + Map factories = new HashMap<>(); + factories.put("simple", new SimpleInputFactory(Settings.EMPTY)); + + InputRegistry inputRegistry = new InputRegistry(Settings.EMPTY, factories); + ChainInputFactory chainInputFactory = new ChainInputFactory(Settings.EMPTY, inputRegistry); + factories.put("chain", chainInputFactory); + + XContentParser parser = createParser(builder); + parser.nextToken(); + ChainInput parsedChainInput = ChainInput.parse("testWatchId", parser, inputRegistry); + assertThat(parsedChainInput.getInputs(), hasSize(2)); + assertThat(parsedChainInput.getInputs().get(0).v1(), is("first")); + assertThat(parsedChainInput.getInputs().get(0).v2(), is(instanceOf(SimpleInput.class))); + assertThat(parsedChainInput.getInputs().get(1).v1(), is("second")); + assertThat(parsedChainInput.getInputs().get(1).v2(), is(instanceOf(SimpleInput.class))); + } + + public void testThatWatchSourceBuilderWorksWithChainInput() throws Exception { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + + HttpInput.Builder httpInputBuilder = httpInput(HttpRequestTemplate.builder("theHost", 1234) + .path("/index/_search") + .body(Strings.toString(jsonBuilder().startObject().field("size", 1).endObject())) + .auth(new BasicAuth("test", SecuritySettingsSourceField.TEST_PASSWORD.toCharArray()))); + + ChainInput.Builder chainedInputBuilder = chainInput() + .add("foo", httpInputBuilder) + .add("bar", simpleInput("spam", "eggs")); + + watchBuilder() + .trigger(schedule(interval("5s"))) + .input(chainedInputBuilder) + .condition(new ScriptCondition(mockScript("ctx.payload.hits.total == 1"))) + .addAction("_id", loggingAction("watch [{{ctx.watch_id}}] matched")) + .toXContent(builder, ToXContent.EMPTY_PARAMS); + + // no exception means all good + } + + public void testThatSerializationOfFailedInputWorks() throws Exception { + ChainInput.Result chainedResult = new ChainInput.Result(new ElasticsearchException("foo")); + + XContentBuilder builder = jsonBuilder(); + chainedResult.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertThat(BytesReference.bytes(builder).utf8ToString(), containsString("\"type\":\"exception\"")); + assertThat(BytesReference.bytes(builder).utf8ToString(), containsString("\"reason\":\"foo\"")); + } + + /* https://github.com/elastic/x-plugins/issues/3736 + the issue here is, that first/second in this setup do not have a guaranteed order, so we have to throw an exception + { + "inputs" : [ + { + "first" : { "simple" : { "foo" : "bar" } }, + "second" : { "simple" : { "spam" : "eggs" } } + } + ] + } + */ + public void testParsingShouldBeStrictWhenClosingInputs() throws Exception { + Map factories = new HashMap<>(); + factories.put("simple", new SimpleInputFactory(Settings.EMPTY)); + + InputRegistry inputRegistry = new InputRegistry(Settings.EMPTY, factories); + ChainInputFactory chainInputFactory = new ChainInputFactory(Settings.EMPTY, inputRegistry); + factories.put("chain", chainInputFactory); + + XContentBuilder builder = jsonBuilder().startObject().startArray("inputs").startObject() + .startObject("first").startObject("simple").field("foo", "bar").endObject().endObject() + .startObject("second").startObject("simple").field("spam", "eggs").endObject().endObject() + .endObject().endArray().endObject(); + + XContentParser parser = createParser(builder); + parser.nextToken(); + ElasticsearchParseException e = + expectThrows(ElasticsearchParseException.class, () -> chainInputFactory.parseInput("test", parser)); + assertThat(e.getMessage(), + containsString("Expected closing JSON object after parsing input [simple] named [first] in watch [test]")); + } + + /* https://github.com/elastic/x-plugins/issues/3736 + make sure that after the name of a chained input there is always an object + { + "inputs" : [ + { "first" : [ { "simple" : { "foo" : "bar" } } ] } + ] + } + */ + public void testParsingShouldBeStrictWhenStartingInputs() throws Exception { + Map factories = new HashMap<>(); + factories.put("simple", new SimpleInputFactory(Settings.EMPTY)); + + InputRegistry inputRegistry = new InputRegistry(Settings.EMPTY, factories); + ChainInputFactory chainInputFactory = new ChainInputFactory(Settings.EMPTY, inputRegistry); + factories.put("chain", chainInputFactory); + + XContentBuilder builder = jsonBuilder().startObject().startArray("inputs") + .startObject().startArray("first").startObject() + .startObject("simple").field("foo", "bar").endObject() + .endObject().endArray().endObject() + .endArray().endObject(); + + XContentParser parser = createParser(builder); + parser.nextToken(); + ElasticsearchParseException e = + expectThrows(ElasticsearchParseException.class, () -> chainInputFactory.parseInput("test", parser)); + assertThat(e.getMessage(), containsString("Expected starting JSON object after [first] in watch [test]")); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java new file mode 100644 index 0000000000000..3dbc9de70d50d --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.chain; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.transport.Netty4Plugin; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.input.http.HttpInput; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.chainInput; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.httpInput; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule.Interval.Unit.SECONDS; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.containsString; + +public class ChainIntegrationTests extends AbstractWatcherIntegrationTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.HTTP_ENABLED.getKey(), true) + .build(); + } + + @Override + protected Collection> nodePlugins() { + ArrayList> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(Netty4Plugin.class); // for http + return plugins; + } + + public void testChainedInputsAreWorking() throws Exception { + String index = "the-most-awesome-index-ever"; + createIndex(index); + client().prepareIndex(index, "type", "id").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + + InetSocketAddress address = internalCluster().httpAddresses()[0]; + HttpInput.Builder httpInputBuilder = httpInput(HttpRequestTemplate.builder(address.getHostString(), address.getPort()) + .path("/" + index + "/_search") + .body(Strings.toString(jsonBuilder().startObject().field("size", 1).endObject()))); + + ChainInput.Builder chainedInputBuilder = chainInput() + .add("first", simpleInput("url", "/" + index + "/_search")) + .add("second", httpInputBuilder); + + watcherClient().preparePutWatch("_name") + .setSource(watchBuilder() + .trigger(schedule(interval(5, SECONDS))) + .input(chainedInputBuilder) + .addAction("indexAction", indexAction("my-index", "my-type"))) + .get(); + + timeWarp().trigger("_name"); + refresh(); + + assertWatchWithMinimumPerformedActionsCount("_name", 1, false); + } + + public void assertWatchExecuted() { + try { + refresh(); + SearchResponse searchResponse = client().prepareSearch("my-index").setTypes("my-type").get(); + assertHitCount(searchResponse, 1); + assertThat(searchResponse.getHits().getAt(0).getSourceAsString(), containsString("the-most-awesome-index-ever")); + } catch (IndexNotFoundException e) { + fail("Index not found: ["+ e.getIndex() + "]"); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ExecutableChainInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ExecutableChainInputTests.java new file mode 100644 index 0000000000000..0ae6e0b3c96a4 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ExecutableChainInputTests.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.chain; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.input.simple.SimpleInput; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Arrays; + +import static org.elasticsearch.xpack.core.watcher.input.Input.Result.Status; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContextBuilder; +import static org.hamcrest.Matchers.is; + +public class ExecutableChainInputTests extends ESTestCase { + + public void testFailedResultHandling() throws Exception { + WatchExecutionContext ctx = createWatchExecutionContext(); + ChainInput chainInput = new ChainInput(Arrays.asList(new Tuple<>("whatever", new SimpleInput(Payload.EMPTY)))); + + Tuple tuple = new Tuple<>("whatever", new FailingExecutableInput()); + ExecutableChainInput executableChainInput = new ExecutableChainInput(chainInput, Arrays.asList(tuple), logger); + ChainInput.Result result = executableChainInput.execute(ctx, Payload.EMPTY); + assertThat(result.status(), is(Status.SUCCESS)); + } + + private class FailingExecutableInput extends ExecutableInput { + + protected FailingExecutableInput() { + super(new SimpleInput(Payload.EMPTY), ExecutableChainInputTests.this.logger); + } + + @Override + public Input.Result execute(WatchExecutionContext ctx, @Nullable Payload payload) { + return new FailingExecutableInputResult(new RuntimeException("foo")); + } + } + + private static class FailingExecutableInputResult extends Input.Result { + + protected FailingExecutableInputResult(Exception e) { + super("failing", e); + } + + @Override + protected XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + } + + private WatchExecutionContext createWatchExecutionContext() { + DateTime now = DateTime.now(DateTimeZone.UTC); + Wid wid = new Wid(randomAlphaOfLength(5), now); + return mockExecutionContextBuilder(wid.watchId()) + .wid(wid) + .payload(new Payload.Simple()) + .time(wid.watchId(), now) + .buildMock(); + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/http/HttpInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/http/HttpInputTests.java new file mode 100644 index 0000000000000..dd13fcb14a252 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/http/HttpInputTests.java @@ -0,0 +1,351 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.http; + +import io.netty.handler.codec.http.HttpHeaders; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpContentType; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.Scheme; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuthFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.input.InputBuilders; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class HttpInputTests extends ESTestCase { + + private HttpClient httpClient; + private HttpInputFactory httpParser; + private TextTemplateEngine templateEngine; + + @Before + public void init() throws Exception { + httpClient = mock(HttpClient.class); + templateEngine = mock(TextTemplateEngine.class); + HttpAuthRegistry registry = new HttpAuthRegistry(singletonMap("basic", new BasicAuthFactory(null))); + httpParser = new HttpInputFactory(Settings.EMPTY, httpClient, templateEngine, new HttpRequestTemplate.Parser(registry)); + } + + public void testExecute() throws Exception { + String host = "_host"; + int port = 123; + HttpRequestTemplate.Builder request = HttpRequestTemplate.builder(host, port) + .method(HttpMethod.POST) + .body("_body"); + HttpInput httpInput; + + HttpResponse response; + switch (randomIntBetween(1, 6)) { + case 1: + response = new HttpResponse(123, "{\"key\" : \"value\"}".getBytes(StandardCharsets.UTF_8)); + httpInput = InputBuilders.httpInput(request.build()).build(); + break; + case 2: + response = new HttpResponse(123, "---\nkey : value".getBytes(StandardCharsets.UTF_8)); + httpInput = InputBuilders.httpInput(request.build()).expectedResponseXContentType(HttpContentType.YAML).build(); + break; + case 3: + response = new HttpResponse(123, "{\"key\" : \"value\"}".getBytes(StandardCharsets.UTF_8), + singletonMap(HttpHeaders.Names.CONTENT_TYPE, new String[] { XContentType.JSON.mediaType() })); + httpInput = InputBuilders.httpInput(request.build()).build(); + break; + case 4: + response = new HttpResponse(123, "key: value".getBytes(StandardCharsets.UTF_8), + singletonMap(HttpHeaders.Names.CONTENT_TYPE, new String[] { XContentType.YAML.mediaType() })); + httpInput = InputBuilders.httpInput(request.build()).build(); + break; + case 5: + response = new HttpResponse(123, "---\nkey: value".getBytes(StandardCharsets.UTF_8), + singletonMap(HttpHeaders.Names.CONTENT_TYPE, new String[] { "unrecognized_content_type" })); + httpInput = InputBuilders.httpInput(request.build()).expectedResponseXContentType(HttpContentType.YAML).build(); + break; + default: + response = new HttpResponse(123, "{\"key\" : \"value\"}".getBytes(StandardCharsets.UTF_8), + singletonMap(HttpHeaders.Names.CONTENT_TYPE, new String[] { "unrecognized_content_type" })); + httpInput = InputBuilders.httpInput(request.build()).build(); + break; + } + + ExecutableHttpInput input = new ExecutableHttpInput(httpInput, logger, httpClient, templateEngine); + when(httpClient.execute(any(HttpRequest.class))).thenReturn(response); + when(templateEngine.render(eq(new TextTemplate("_body")), any(Map.class))).thenReturn("_body"); + + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + HttpInput.Result result = input.execute(ctx, new Payload.Simple()); + assertThat(result.type(), equalTo(HttpInput.TYPE)); + assertThat(result.payload().data(), hasEntry("key", "value")); + } + + public void testExecuteNonJson() throws Exception { + String host = "_host"; + int port = 123; + HttpRequestTemplate.Builder request = HttpRequestTemplate.builder(host, port) + .method(HttpMethod.POST) + .body("_body"); + HttpInput httpInput = InputBuilders.httpInput(request.build()).expectedResponseXContentType(HttpContentType.TEXT).build(); + ExecutableHttpInput input = new ExecutableHttpInput(httpInput, logger, httpClient, templateEngine); + String notJson = "This is not json"; + HttpResponse response = new HttpResponse(123, notJson.getBytes(StandardCharsets.UTF_8)); + when(httpClient.execute(any(HttpRequest.class))).thenReturn(response); + when(templateEngine.render(eq(new TextTemplate("_body")), any(Map.class))).thenReturn("_body"); + + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + HttpInput.Result result = input.execute(ctx, new Payload.Simple()); + assertThat(result.type(), equalTo(HttpInput.TYPE)); + assertThat(result.payload().data().get("_value").toString(), equalTo(notJson)); + } + + public void testParser() throws Exception { + final HttpMethod httpMethod = rarely() ? null : randomFrom(HttpMethod.values()); + Scheme scheme = randomFrom(Scheme.HTTP, Scheme.HTTPS, null); + String host = randomAlphaOfLength(3); + int port = randomIntBetween(8000, 9000); + String path = randomAlphaOfLength(3); + TextTemplate pathTemplate = new TextTemplate(path); + String body = randomBoolean() ? randomAlphaOfLength(3) : null; + Map params = + randomBoolean() ? new MapBuilder().put("a", new TextTemplate("b")).map() : null; + Map headers = + randomBoolean() ? new MapBuilder().put("c", new TextTemplate("d")).map() : null; + HttpAuth auth = randomBoolean() ? new BasicAuth("username", "password".toCharArray()) : null; + HttpRequestTemplate.Builder requestBuilder = HttpRequestTemplate.builder(host, port) + .scheme(scheme) + .method(httpMethod) + .path(pathTemplate) + .body(body != null ? new TextTemplate(body) : null) + .auth(auth); + + if (params != null) { + requestBuilder.putParams(params); + } + if (headers != null) { + requestBuilder.putHeaders(headers); + } + HttpInput.Builder inputBuilder = InputBuilders.httpInput(requestBuilder); + HttpContentType expectedResponseXContentType = randomFrom(HttpContentType.values()); + + String[] extractKeys = randomFrom(new String[]{"foo", "bar"}, new String[]{"baz"}, null); + if (expectedResponseXContentType != HttpContentType.TEXT) { + if (extractKeys != null) { + inputBuilder.extractKeys(extractKeys); + } + } + + inputBuilder.expectedResponseXContentType(expectedResponseXContentType); + XContentBuilder source = jsonBuilder(); + inputBuilder.build().toXContent(source, WatcherParams.builder().hideSecrets(false).build()); + XContentParser parser = createParser(source); + parser.nextToken(); + HttpInput result = httpParser.parseInput("_id", parser); + + assertThat(result.type(), equalTo(HttpInput.TYPE)); + assertThat(result.getRequest().scheme(), equalTo(scheme != null ? scheme : Scheme.HTTP)); // http is the default + assertThat(result.getRequest().method(), equalTo(httpMethod != null ? httpMethod : HttpMethod.GET)); // get is the default + assertThat(result.getRequest().host(), equalTo(host)); + assertThat(result.getRequest().port(), equalTo(port)); + assertThat(result.getRequest().path(), is(new TextTemplate(path))); + assertThat(result.getExpectedResponseXContentType(), equalTo(expectedResponseXContentType)); + if (expectedResponseXContentType != HttpContentType.TEXT && extractKeys != null) { + for (String key : extractKeys) { + assertThat(result.getExtractKeys().contains(key), is(true)); + } + } + if (params != null) { + assertThat(result.getRequest().params(), hasEntry(is("a"), is(new TextTemplate("b")))); + } + if (headers != null) { + assertThat(result.getRequest().headers(), hasEntry(is("c"), is(new TextTemplate("d")))); + } + assertThat(result.getRequest().auth(), equalTo(auth)); + if (body != null) { + assertThat(result.getRequest().body(), is(new TextTemplate(body))); + } else { + assertThat(result.getRequest().body(), nullValue()); + } + } + + public void testParserInvalidHttpMethod() throws Exception { + XContentBuilder builder = jsonBuilder().startObject() + .startObject("request") + .field("method", "_method") + .field("body", "_body") + .endObject() + .endObject(); + XContentParser parser = createParser(builder); + parser.nextToken(); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> httpParser.parseInput("_id", parser)); + assertThat(e.getMessage(), is("unsupported http method [_METHOD]")); + } + + public void testThatHeadersAreIncludedInPayload() throws Exception { + String headerName = randomAlphaOfLength(10); + String headerValue = randomAlphaOfLength(10); + boolean responseHasContent = randomBoolean(); + + HttpRequestTemplate.Builder request = HttpRequestTemplate.builder("localhost", 8080); + HttpInput httpInput = InputBuilders.httpInput(request.build()).build(); + ExecutableHttpInput input = new ExecutableHttpInput(httpInput, logger, httpClient, templateEngine); + + Map responseHeaders = new HashMap<>(); + responseHeaders.put(headerName, new String[] { headerValue }); + HttpResponse response; + if (responseHasContent) { + response = new HttpResponse(200, "body".getBytes(StandardCharsets.UTF_8), responseHeaders); + } else { + BytesReference bytesReference = null; + response = new HttpResponse(200, bytesReference, responseHeaders); + } + + when(httpClient.execute(any(HttpRequest.class))).thenReturn(response); + + when(templateEngine.render(eq(new TextTemplate("_body")), any(Map.class))).thenReturn("_body"); + + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + HttpInput.Result result = input.execute(ctx, new Payload.Simple()); + + assertThat(result.type(), equalTo(HttpInput.TYPE)); + List expectedHeaderValues = new ArrayList<>(); + expectedHeaderValues.add(headerValue); + Map expectedHeaderMap = MapBuilder.newMapBuilder() + .put(headerName.toLowerCase(Locale.ROOT), expectedHeaderValues) + .map(); + assertThat(result.payload().data(), hasKey("_headers")); + assertThat(result.payload().data().get("_headers"), equalTo(expectedHeaderMap)); + } + + public void testThatExpectedContentTypeOverridesReturnedContentType() throws Exception { + HttpRequestTemplate template = HttpRequestTemplate.builder("http:://127.0.0.1:12345").build(); + HttpInput httpInput = new HttpInput(template, HttpContentType.TEXT, null); + ExecutableHttpInput input = new ExecutableHttpInput(httpInput, logger, httpClient, templateEngine); + + Map headers = new HashMap<>(1); + String contentType = randomFrom("application/json", "application/json; charset=UTF-8", "text/html", "application/yaml", + "application/smile", "application/cbor"); + headers.put("Content-Type", new String[] { contentType }); + String body = "{\"foo\":\"bar\"}"; + HttpResponse httpResponse = new HttpResponse(200, body, headers); + when(httpClient.execute(any())).thenReturn(httpResponse); + + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + HttpInput.Result result = input.execute(ctx, Payload.EMPTY); + assertThat(result.payload().data(), hasEntry("_value", body)); + assertThat(result.payload().data(), not(hasKey("foo"))); + } + + public void testThatStatusCodeIsSetInResultAndPayload() throws Exception { + HttpResponse response = new HttpResponse(200); + when(httpClient.execute(any(HttpRequest.class))).thenReturn(response); + + HttpRequestTemplate.Builder request = HttpRequestTemplate.builder("localhost", 8080); + HttpInput httpInput = InputBuilders.httpInput(request.build()).build(); + ExecutableHttpInput input = new ExecutableHttpInput(httpInput, logger, httpClient, templateEngine); + + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + HttpInput.Result result = input.execute(ctx, new Payload.Simple()); + assertThat(result.statusCode, is(200)); + assertThat(result.payload().data(), hasKey("_status_code")); + assertThat(result.payload().data().get("_status_code"), is(200)); + } + + @SuppressWarnings("unchecked") + public void testThatArrayJsonResponseIsHandled() throws Exception { + Map headers = Collections.singletonMap("Content-Type", new String[]{"application/json"}); + HttpResponse response = new HttpResponse(200, "[ { \"foo\": \"first\" }, { \"foo\": \"second\"}]", headers); + when(httpClient.execute(any(HttpRequest.class))).thenReturn(response); + + HttpRequestTemplate.Builder request = HttpRequestTemplate.builder("localhost", 8080); + HttpInput httpInput = InputBuilders.httpInput(request.build()).build(); + ExecutableHttpInput input = new ExecutableHttpInput(httpInput, logger, httpClient, templateEngine); + + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + HttpInput.Result result = input.execute(ctx, new Payload.Simple()); + assertThat(result.statusCode, is(200)); + assertThat(result.payload().data(), not(hasKey("_value"))); + assertThat(result.payload().data(), hasKey("data")); + assertThat(result.payload().data().get("data"), instanceOf(List.class)); + List> data = (List>) result.payload().data().get("data"); + assertThat(data, hasSize(2)); + assertThat(data.get(0).get("foo"), is("first")); + assertThat(data.get(1).get("foo"), is("second")); + } + + public void testExceptionCase() throws Exception { + when(httpClient.execute(any(HttpRequest.class))).thenThrow(new IOException("could not connect")); + + HttpRequestTemplate.Builder request = HttpRequestTemplate.builder("localhost", 8080); + HttpInput httpInput = InputBuilders.httpInput(request.build()).build(); + ExecutableHttpInput input = new ExecutableHttpInput(httpInput, logger, httpClient, templateEngine); + + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + HttpInput.Result result = input.execute(ctx, new Payload.Simple()); + + assertThat(result.getException(), is(notNullValue())); + assertThat(result.getException(), is(instanceOf(IOException.class))); + assertThat(result.getException().getMessage(), is("could not connect")); + + try (XContentBuilder builder = jsonBuilder()) { + result.toXContent(builder, ToXContent.EMPTY_PARAMS); + BytesReference bytes = BytesReference.bytes(builder); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes.streamInput())) { + Map data = parser.map(); + String reason = ObjectPath.eval("error.reason", data); + assertThat(reason, is("could not connect")); + String type = ObjectPath.eval("error.type", data); + assertThat(type, is("i_o_exception")); + } + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/simple/SimpleInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/simple/SimpleInputTests.java new file mode 100644 index 0000000000000..b435e7ba38af0 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/simple/SimpleInputTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.simple; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.input.InputFactory; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; + +public class SimpleInputTests extends ESTestCase { + + public void testExecute() throws Exception { + Map data = new HashMap<>(); + data.put("foo", "bar"); + data.put("baz", new ArrayList() ); + ExecutableInput staticInput = new ExecutableSimpleInput(new SimpleInput(new Payload.Simple(data)), logger); + + Input.Result staticResult = staticInput.execute(null, new Payload.Simple()); + assertEquals(staticResult.payload().data().get("foo"), "bar"); + List baz = (List)staticResult.payload().data().get("baz"); + assertTrue(baz.isEmpty()); + } + + public void testParserValid() throws Exception { + Map data = new HashMap<>(); + data.put("foo", "bar"); + data.put("baz", new ArrayList()); + + XContentBuilder jsonBuilder = jsonBuilder().map(data); + InputFactory parser = new SimpleInputFactory(Settings.builder().build()); + XContentParser xContentParser = createParser(jsonBuilder); + xContentParser.nextToken(); + ExecutableInput input = parser.parseExecutable("_id", xContentParser); + assertEquals(input.type(), SimpleInput.TYPE); + + + Input.Result staticResult = input.execute(null, new Payload.Simple()); + assertEquals(staticResult.payload().data().get("foo"), "bar"); + List baz = (List)staticResult.payload().data().get("baz"); + assertTrue(baz.isEmpty()); + } + + public void testParserInvalid() throws Exception { + XContentBuilder jsonBuilder = jsonBuilder().value("just a string"); + + InputFactory parser = new SimpleInputFactory(Settings.builder().build()); + XContentParser xContentParser = createParser(jsonBuilder); + xContentParser.nextToken(); + try { + parser.parseInput("_id", xContentParser); + fail("[simple] input parse should fail with an InputException for an empty json object"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("expected an object but found [VALUE_STRING] instead")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/transform/TransformInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/transform/TransformInputTests.java new file mode 100644 index 0000000000000..aee5a5e07f09e --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/transform/TransformInputTests.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.input.transform; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.transform.TransformFactory; +import org.elasticsearch.xpack.core.watcher.transform.TransformRegistry; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.Watcher; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.elasticsearch.xpack.watcher.transform.script.ExecutableScriptTransform; +import org.elasticsearch.xpack.watcher.transform.script.ScriptTransform; +import org.elasticsearch.xpack.watcher.transform.script.ScriptTransformFactory; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; + +public class TransformInputTests extends ESTestCase { + + private ScriptService scriptService; + + @Before + public void setupScriptService() { + Map engines = new HashMap<>(); + engines.put(MockScriptEngine.NAME, new MockScriptEngine(MockScriptEngine.NAME, Collections.singletonMap("1", s -> "2"))); + Map> contexts = new HashMap<>(); + contexts.put(Watcher.SCRIPT_TEMPLATE_CONTEXT.name, Watcher.SCRIPT_TEMPLATE_CONTEXT); + contexts.put(Watcher.SCRIPT_SEARCH_CONTEXT.name, Watcher.SCRIPT_SEARCH_CONTEXT); + contexts.put(Watcher.SCRIPT_EXECUTABLE_CONTEXT.name, Watcher.SCRIPT_EXECUTABLE_CONTEXT); + scriptService = new ScriptService(Settings.EMPTY, engines, contexts); + } + + public void testExecute() { + Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "1", Collections.emptyMap(), Collections.emptyMap()); + ScriptTransform scriptTransform = ScriptTransform.builder(script).build(); + TransformInput transformInput = new TransformInput(scriptTransform); + + ExecutableTransform executableTransform = new ExecutableScriptTransform(scriptTransform, logger, scriptService); + ExecutableInput input = new ExecutableTransformInput(transformInput, logger, executableTransform); + + WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", Payload.EMPTY); + Input.Result result = input.execute(ctx, new Payload.Simple()); + assertThat(result.payload().data().size(), is(1)); + assertThat(result.payload().data(), hasEntry("_value", "2")); + } + + public void testParserValid() throws Exception { + Map transformFactories = Collections.singletonMap("script", + new ScriptTransformFactory(Settings.EMPTY, scriptService)); + TransformRegistry registry = new TransformRegistry(Settings.EMPTY, transformFactories); + TransformInputFactory factory = new TransformInputFactory(Settings.EMPTY, registry); + + // { "script" : { "lang" : "mockscript", "source" : "1" } } + XContentBuilder builder = jsonBuilder().startObject().startObject("script") + .field("lang", MockScriptEngine.NAME) + .field("source", "1") + .endObject().endObject(); + + XContentParser parser = createParser(builder); + parser.nextToken(); + ExecutableTransformInput executableTransformInput = factory.parseExecutable("_id", parser); + + WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContext("_id", Payload.EMPTY); + TransformInput.Result result = executableTransformInput.execute(ctx, Payload.EMPTY); + assertThat(result.payload().data().size(), is(1)); + assertThat(result.payload().data(), hasEntry("_value", "2")); + } + + public void testParserInvalid() throws Exception { + XContentBuilder jsonBuilder = jsonBuilder().value("just a string"); + + Map transformFactories = Collections.singletonMap("script", + new ScriptTransformFactory(Settings.EMPTY, scriptService)); + TransformRegistry registry = new TransformRegistry(Settings.EMPTY, transformFactories); + TransformInputFactory factory = new TransformInputFactory(Settings.EMPTY, registry); + XContentParser parser = createParser(jsonBuilder); + + parser.nextToken(); + expectThrows(ParsingException.class, () -> factory.parseInput("_id", parser)); + } + + public void testTransformResultToXContent() throws Exception { + Map data = Collections.singletonMap("foo", "bar"); + TransformInput.Result result = new TransformInput.Result(new Payload.Simple(data)); + try (XContentBuilder builder = jsonBuilder()) { + result.toXContent(builder, ToXContent.EMPTY_PARAMS); + } + } + + public void testTransformInputToXContentIsSameAsParsing() throws Exception { + Map transformFactories = Collections.singletonMap("script", + new ScriptTransformFactory(Settings.EMPTY, scriptService)); + TransformRegistry registry = new TransformRegistry(Settings.EMPTY, transformFactories); + TransformInputFactory factory = new TransformInputFactory(Settings.EMPTY, registry); + + XContentBuilder jsonBuilder = jsonBuilder().startObject().startObject("script") + .field("source", "1") + .field("lang", "mockscript") + .endObject().endObject(); + XContentParser parser = createParser(jsonBuilder); + + parser.nextToken(); + TransformInput transformInput = factory.parseInput("whatever", parser); + + XContentBuilder output = jsonBuilder(); + transformInput.toXContent(output, ToXContent.EMPTY_PARAMS); + + assertThat(Strings.toString(jsonBuilder), is(Strings.toString(output))); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountTests.java new file mode 100644 index 0000000000000..4eae885c4af1a --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountTests.java @@ -0,0 +1,296 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.common.secret.Secret; +import org.elasticsearch.xpack.watcher.notification.email.support.EmailServer; +import org.junit.After; +import org.junit.Before; + +import javax.mail.Address; +import javax.mail.Message; +import javax.mail.internet.InternetAddress; + +import java.util.Properties; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItemInArray; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class AccountTests extends ESTestCase { + + private EmailServer server; + + @Before + public void init() throws Exception { + server = EmailServer.localhost(logger); + } + + @After + public void cleanup() throws Exception { + server.stop(); + } + + public void testConfig() throws Exception { + String accountName = "_name"; + + Settings.Builder builder = Settings.builder(); + + Profile profile = rarely() ? Profile.STANDARD : randomFrom(Profile.values()); + if (profile != Profile.STANDARD) { + builder.put("profile", profile.name()); + } + + Account.Config.EmailDefaults emailDefaults; + if (randomBoolean()) { + Settings.Builder sb = Settings.builder(); + if (randomBoolean()) { + sb.put(Email.Field.FROM.getPreferredName(), "from@domain"); + } + if (randomBoolean()) { + sb.put(Email.Field.REPLY_TO.getPreferredName(), "replyto@domain"); + } + if (randomBoolean()) { + sb.put(Email.Field.PRIORITY.getPreferredName(), randomFrom(Email.Priority.values())); + } + if (randomBoolean()) { + sb.put(Email.Field.TO.getPreferredName(), "to@domain"); + } + if (randomBoolean()) { + sb.put(Email.Field.CC.getPreferredName(), "cc@domain"); + } + if (randomBoolean()) { + sb.put(Email.Field.BCC.getPreferredName(), "bcc@domain"); + } + if (randomBoolean()) { + sb.put(Email.Field.SUBJECT.getPreferredName(), "_subject"); + } + Settings settings = sb.build(); + emailDefaults = new Account.Config.EmailDefaults(accountName, settings); + for (String name : settings.names()) { + builder.put("email_defaults." + name, settings.get(name)); + } + } else { + emailDefaults = new Account.Config.EmailDefaults(accountName, Settings.EMPTY); + } + + Properties smtpProps = new Properties(); + Settings.Builder smtpBuilder = Settings.builder(); + String host = "somehost"; + String setting = randomFrom("host", "localaddress", "local_address"); + smtpBuilder.put(setting, host); + if (setting.equals("local_address")) { + // we need to remove the `_`... we only added support for `_` for readability + // the actual properties (java mail properties) don't contain underscores + setting = "localaddress"; + } + smtpProps.put("mail.smtp." + setting, host); + String user = null; + if (randomBoolean()) { + user = randomAlphaOfLength(5); + setting = randomFrom("user", "from"); + smtpBuilder.put(setting, user); + smtpProps.put("mail.smtp." + setting, user); + } + int port = 25; + if (randomBoolean()) { + port = randomIntBetween(2000, 2500); + setting = randomFrom("port", "localport", "local_port"); + smtpBuilder.put(setting, port); + if (setting.equals("local_port")) { + setting = "localport"; + } + smtpProps.setProperty("mail.smtp." + setting, String.valueOf(port)); + } + String password = null; + if (randomBoolean()) { + password = randomAlphaOfLength(8); + smtpBuilder.put("password", password); + smtpProps.put("mail.smtp.password", password); + } + for (int i = 0; i < 5; i++) { + String name = randomAlphaOfLength(5); + String value = randomAlphaOfLength(6); + smtpProps.put("mail.smtp." + name, value); + smtpBuilder.put(name, value); + } + + // default properties + for (String name : new String[]{ "connection_timeout", "write_timeout", "timeout"}) { + String propertyName = name.replaceAll("_", ""); + smtpProps.put("mail.smtp." + propertyName, + String.valueOf(TimeValue.parseTimeValue(Account.DEFAULT_SMTP_TIMEOUT_SETTINGS.get(name), name).millis())); + } + + Settings smtpSettings = smtpBuilder.build(); + for (String name : smtpSettings.names()) { + builder.put("smtp." + name, smtpSettings.get(name)); + } + + Settings settings = builder.build(); + + Account.Config config = new Account.Config(accountName, settings); + + assertThat(config.profile, is(profile)); + assertThat(config.defaults, equalTo(emailDefaults)); + assertThat(config.smtp, notNullValue()); + assertThat(config.smtp.port, is(port)); + assertThat(config.smtp.host, is(host)); + assertThat(config.smtp.user, is(user)); + if (password != null) { + assertThat(config.smtp.password, is(password.toCharArray())); + } else { + assertThat(config.smtp.password, nullValue()); + } + assertThat(config.smtp.properties, equalTo(smtpProps)); + } + + public void testSend() throws Exception { + Account account = new Account(new Account.Config("default", Settings.builder() + .put("smtp.host", "localhost") + .put("smtp.port", server.port()) + .put("smtp.user", EmailServer.USERNAME) + .put("smtp.password", EmailServer.PASSWORD) + .build()), null, logger); + + Email email = Email.builder() + .id("_id") + .from(new Email.Address("from@domain.com")) + .to(Email.AddressList.parse("To")) + .subject("_subject") + .textBody("_text_body") + .build(); + + final CountDownLatch latch = new CountDownLatch(1); + server.addListener(message -> { + assertThat(message.getFrom().length, is(1)); + assertThat(message.getFrom()[0], equalTo(new InternetAddress("from@domain.com"))); + assertThat(message.getRecipients(Message.RecipientType.TO).length, is(1)); + assertThat(message.getRecipients(Message.RecipientType.TO)[0], + equalTo(new InternetAddress("to@domain.com", "To"))); + assertThat(message.getSubject(), equalTo("_subject")); + assertThat(Profile.STANDARD.textBody(message), equalTo("_text_body")); + latch.countDown(); + }); + + account.send(email, null, Profile.STANDARD); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("waiting for email too long"); + } + } + + public void testSendCCAndBCC() throws Exception { + Account account = new Account(new Account.Config("default", Settings.builder() + .put("smtp.host", "localhost") + .put("smtp.port", server.port()) + .put("smtp.user", EmailServer.USERNAME) + .put("smtp.password", EmailServer.PASSWORD) + .build()), null, logger); + + Email email = Email.builder() + .id("_id") + .from(new Email.Address("from@domain.com")) + .to(Email.AddressList.parse("TO")) + .cc(Email.AddressList.parse("CC1,cc2@domain.com")) + .bcc(Email.AddressList.parse("BCC1,bcc2@domain.com")) + .replyTo(Email.AddressList.parse("noreply@domain.com")) + .build(); + + final CountDownLatch latch = new CountDownLatch(5); + server.addListener(message -> { + assertThat(message.getFrom().length, is(1)); + assertThat(message.getFrom()[0], equalTo(new InternetAddress("from@domain.com"))); + assertThat(message.getRecipients(Message.RecipientType.TO).length, is(1)); + assertThat(message.getRecipients(Message.RecipientType.TO)[0], equalTo(new InternetAddress("to@domain.com", "TO"))); + assertThat(message.getRecipients(Message.RecipientType.CC).length, is(2)); + assertThat(message.getRecipients(Message.RecipientType.CC), + hasItemInArray((Address) new InternetAddress("cc1@domain.com", "CC1"))); + assertThat(message.getRecipients(Message.RecipientType.CC), hasItemInArray((Address) new InternetAddress("cc2@domain.com"))); + assertThat(message.getReplyTo(), arrayWithSize(1)); + assertThat(message.getReplyTo(), hasItemInArray((Address) new InternetAddress("noreply@domain.com"))); + // bcc should not be there... (it's bcc after all) + latch.countDown(); + }); + + account.send(email, null, Profile.STANDARD); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("waiting for email too long"); + } + } + + public void testSendAuthentication() throws Exception { + Account account = new Account(new Account.Config("default", Settings.builder() + .put("smtp.host", "localhost") + .put("smtp.port", server.port()) + .build()), null, logger); + + Email email = Email.builder() + .id("_id") + .from(new Email.Address("from@domain.com")) + .to(Email.AddressList.parse("To")) + .subject("_subject") + .textBody("_text_body") + .build(); + + final CountDownLatch latch = new CountDownLatch(1); + server.addListener(message -> latch.countDown()); + + account.send(email, new Authentication(EmailServer.USERNAME, new Secret(EmailServer.PASSWORD.toCharArray())), Profile.STANDARD); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("waiting for email too long"); + } + } + + public void testDefaultAccountTimeout() { + Account account = new Account(new Account.Config("default", Settings.builder() + .put("smtp.host", "localhost") + .put("smtp.port", server.port()) + .build()), null, logger); + + Properties mailProperties = account.getConfig().smtp.properties; + assertThat(mailProperties.get("mail.smtp.connectiontimeout"), is(String.valueOf(TimeValue.timeValueMinutes(2).millis()))); + assertThat(mailProperties.get("mail.smtp.writetimeout"), is(String.valueOf(TimeValue.timeValueMinutes(2).millis()))); + assertThat(mailProperties.get("mail.smtp.timeout"), is(String.valueOf(TimeValue.timeValueMinutes(2).millis()))); + } + + public void testAccountTimeoutsCanBeConfigureAsTimeValue() { + Account account = new Account(new Account.Config("default", Settings.builder() + .put("smtp.host", "localhost") + .put("smtp.port", server.port()) + .put("smtp.connection_timeout", TimeValue.timeValueMinutes(4)) + .put("smtp.write_timeout", TimeValue.timeValueMinutes(6)) + .put("smtp.timeout", TimeValue.timeValueMinutes(8)) + .build()), null, logger); + + Properties mailProperties = account.getConfig().smtp.properties; + + assertThat(mailProperties.get("mail.smtp.connectiontimeout"), is(String.valueOf(TimeValue.timeValueMinutes(4).millis()))); + assertThat(mailProperties.get("mail.smtp.writetimeout"), is(String.valueOf(TimeValue.timeValueMinutes(6).millis()))); + assertThat(mailProperties.get("mail.smtp.timeout"), is(String.valueOf(TimeValue.timeValueMinutes(8).millis()))); + } + + public void testAccountTimeoutsConfiguredAsNumberAreRejected() { + expectThrows(IllegalArgumentException.class, () -> { + new Account(new Account.Config("default", Settings.builder() + .put("smtp.host", "localhost") + .put("smtp.port", server.port()) + .put("smtp.connection_timeout", 4000) + .build()), null, logger); + }); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountsTests.java new file mode 100644 index 0000000000000..cfe359925e6bf --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountsTests.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.HashSet; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isOneOf; +import static org.hamcrest.Matchers.notNullValue; + +public class AccountsTests extends ESTestCase { + public void testSingleAccount() throws Exception { + Settings.Builder builder = Settings.builder() + .put("default_account", "account1"); + addAccountSettings("account1", builder); + EmailService service = new EmailService(builder.build(), null, + new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); + Account account = service.getAccount("account1"); + assertThat(account, notNullValue()); + assertThat(account.name(), equalTo("account1")); + account = service.getAccount(null); // falling back on the default + assertThat(account, notNullValue()); + assertThat(account.name(), equalTo("account1")); + } + + public void testSingleAccountNoExplicitDefault() throws Exception { + Settings.Builder builder = Settings.builder(); + addAccountSettings("account1", builder); + EmailService service = new EmailService(builder.build(), null, + new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); + Account account = service.getAccount("account1"); + assertThat(account, notNullValue()); + assertThat(account.name(), equalTo("account1")); + account = service.getAccount(null); // falling back on the default + assertThat(account, notNullValue()); + assertThat(account.name(), equalTo("account1")); + } + + public void testMultipleAccounts() throws Exception { + Settings.Builder builder = Settings.builder() + .put("xpack.notification.email.default_account", "account1"); + addAccountSettings("account1", builder); + addAccountSettings("account2", builder); + + EmailService service = new EmailService(builder.build(), null, + new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); + Account account = service.getAccount("account1"); + assertThat(account, notNullValue()); + assertThat(account.name(), equalTo("account1")); + account = service.getAccount("account2"); + assertThat(account, notNullValue()); + assertThat(account.name(), equalTo("account2")); + account = service.getAccount(null); // falling back on the default + assertThat(account, notNullValue()); + assertThat(account.name(), equalTo("account1")); + } + + public void testMultipleAccountsNoExplicitDefault() throws Exception { + Settings.Builder builder = Settings.builder() + .put("default_account", "account1"); + addAccountSettings("account1", builder); + addAccountSettings("account2", builder); + + EmailService service = new EmailService(builder.build(), null, + new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); + Account account = service.getAccount("account1"); + assertThat(account, notNullValue()); + assertThat(account.name(), equalTo("account1")); + account = service.getAccount("account2"); + assertThat(account, notNullValue()); + assertThat(account.name(), equalTo("account2")); + account = service.getAccount(null); + assertThat(account, notNullValue()); + assertThat(account.name(), isOneOf("account1", "account2")); + } + + public void testMultipleAccountsUnknownDefault() throws Exception { + Settings.Builder builder = Settings.builder().put("xpack.notification.email.default_account", "unknown"); + addAccountSettings("account1", builder); + addAccountSettings("account2", builder); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())); + SettingsException e = expectThrows(SettingsException.class, () -> new EmailService(builder.build(), null, clusterSettings)); + assertThat(e.getMessage(), is("could not find default account [unknown]")); + } + + public void testNoAccount() throws Exception { + Settings.Builder builder = Settings.builder(); + EmailService service = new EmailService(builder.build(), null, + new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); + expectThrows(IllegalArgumentException.class, () -> service.getAccount(null)); + } + + public void testNoAccountWithDefaultAccount() throws Exception { + Settings settings = Settings.builder().put("xpack.notification.email.default_account", "unknown").build(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())); + SettingsException e = expectThrows(SettingsException.class, () -> new EmailService(settings, null, clusterSettings)); + assertThat(e.getMessage(), is("could not find default account [unknown]")); + } + + private void addAccountSettings(String name, Settings.Builder builder) { + builder.put("xpack.notification.email.account." + name + ".smtp.host", "_host"); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/DataAttachmentTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/DataAttachmentTests.java new file mode 100644 index 0000000000000..e947ee401400f --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/DataAttachmentTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.test.ESTestCase; + +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.is; + +public class DataAttachmentTests extends ESTestCase { + + public void testCreateJson() throws Exception { + Map data = singletonMap("key", "value"); + Attachment attachment = DataAttachment.JSON.create("data", data); + InputStream input = attachment.bodyPart().getDataHandler().getInputStream(); + String content = Streams.copyToString(new InputStreamReader(input, StandardCharsets.UTF_8)); + assertThat(content, is("{\n \"key\" : \"value\"\n}")); + } + + public void testCreateYaml() throws Exception { + Map data = singletonMap("key", "value"); + Attachment attachment = DataAttachment.YAML.create("data", data); + InputStream input = attachment.bodyPart().getDataHandler().getInputStream(); + String content = Streams.copyToString(new InputStreamReader(input, StandardCharsets.UTF_8)); + // the yaml factory in es always emits unix line breaks + // this seems to be a bug in jackson yaml factory that doesn't default to the platform line break + assertThat(content, is("---\nkey: \"value\"\n")); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailSecretsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailSecretsIntegrationTests.java new file mode 100644 index 0000000000000..1b48529ad057e --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailSecretsIntegrationTests.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.xpack.core.watcher.WatcherField; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoServiceTests; +import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.actions.ActionBuilders; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.notification.email.support.EmailServer; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.After; + +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.cron; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; + +public class EmailSecretsIntegrationTests extends AbstractWatcherIntegrationTestCase { + private EmailServer server; + private Boolean encryptSensitiveData; + private byte[] encryptionKey; + + @Override + public void setUp() throws Exception { + super.setUp(); + server = EmailServer.localhost(logger); + } + + @After + public void cleanup() throws Exception { + server.stop(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + if (encryptSensitiveData == null) { + encryptSensitiveData = randomBoolean(); + if (encryptSensitiveData) { + encryptionKey = CryptoServiceTests.generateKey(); + } + } + Settings.Builder builder = Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("xpack.notification.email.account.test.smtp.auth", true) + .put("xpack.notification.email.account.test.smtp.port", server.port()) + .put("xpack.notification.email.account.test.smtp.host", "localhost") + .put("xpack.watcher.encrypt_sensitive_data", encryptSensitiveData); + if (encryptSensitiveData) { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setFile(WatcherField.ENCRYPTION_KEY_SETTING.getKey(), encryptionKey); + builder.setSecureSettings(secureSettings); + } + return builder.build(); + } + + public void testEmail() throws Exception { + WatcherClient watcherClient = watcherClient(); + watcherClient.preparePutWatch("_id") + .setSource(watchBuilder() + .trigger(schedule(cron("0 0 0 1 * ? 2020"))) + .input(simpleInput()) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_email", ActionBuilders.emailAction( + EmailTemplate.builder() + .from("_from") + .to("_to") + .subject("_subject")) + .setAuthentication(EmailServer.USERNAME, EmailServer.PASSWORD.toCharArray()))) + .get(); + + // verifying the email password is stored encrypted in the index + GetResponse response = client().prepareGet(Watch.INDEX, Watch.DOC_TYPE, "_id").get(); + assertThat(response, notNullValue()); + assertThat(response.getId(), is("_id")); + Map source = response.getSource(); + Object value = XContentMapValues.extractValue("actions._email.email.password", source); + assertThat(value, notNullValue()); + if (encryptSensitiveData) { + assertThat(value, not(is(EmailServer.PASSWORD))); + MockSecureSettings mockSecureSettings = new MockSecureSettings(); + mockSecureSettings.setFile(WatcherField.ENCRYPTION_KEY_SETTING.getKey(), encryptionKey); + Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build(); + CryptoService cryptoService = new CryptoService(settings); + assertThat(new String(cryptoService.decrypt(((String) value).toCharArray())), is(EmailServer.PASSWORD)); + } else { + assertThat(value, is(EmailServer.PASSWORD)); + } + + // verifying the password is not returned by the GET watch API + GetWatchResponse watchResponse = watcherClient.prepareGetWatch("_id").get(); + assertThat(watchResponse, notNullValue()); + assertThat(watchResponse.getId(), is("_id")); + XContentSource contentSource = watchResponse.getSource(); + value = contentSource.getValue("actions._email.email.password"); + if (encryptSensitiveData) { + assertThat(value.toString(), startsWith("::es_encrypted::")); + } else { + assertThat(value, is("::es_redacted::")); + } + + // now we restart, to make sure the watches and their secrets are reloaded from the index properly + stopWatcher(); + startWatcher(); + + // now lets execute the watch manually + final CountDownLatch latch = new CountDownLatch(1); + server.addListener(message -> { + assertThat(message.getSubject(), is("_subject")); + latch.countDown(); + }); + + TriggerEvent triggerEvent = new ScheduleTriggerEvent(new DateTime(DateTimeZone.UTC), new DateTime(DateTimeZone.UTC)); + ExecuteWatchResponse executeResponse = watcherClient.prepareExecuteWatch("_id") + .setRecordExecution(false) + .setTriggerEvent(triggerEvent) + .setActionMode("_all", ActionExecutionMode.FORCE_EXECUTE) + .get(); + assertThat(executeResponse, notNullValue()); + contentSource = executeResponse.getRecordSource(); + + value = contentSource.getValue("result.actions.0.status"); + assertThat(value, is("success")); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("waiting too long for the email to be sent"); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java new file mode 100644 index 0000000000000..42ae7f8d9ce77 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.common.secret.Secret; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Properties; + +import static org.apache.logging.log4j.ThreadContext.containsKey; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class EmailServiceTests extends ESTestCase { + private EmailService service; + private Account account; + + @Before + public void init() throws Exception { + account = mock(Account.class); + service = new EmailService(Settings.builder().put("xpack.notification.email.account.account1.foo", "bar").build(), null, + new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))) { + @Override + protected Account createAccount(String name, Settings accountSettings) { + return account; + } + }; + } + + public void testSend() throws Exception { + when(account.name()).thenReturn("account1"); + Email email = mock(Email.class); + Authentication auth = new Authentication("user", new Secret("passwd".toCharArray())); + Profile profile = randomFrom(Profile.values()); + when(account.send(email, auth, profile)).thenReturn(email); + EmailService.EmailSent sent = service.send(email, auth, profile, "account1"); + verify(account).send(email, auth, profile); + assertThat(sent, notNullValue()); + assertThat(sent.email(), sameInstance(email)); + assertThat(sent.account(), is("account1")); + } + + public void testAccountSmtpPropertyConfiguration() { + Settings settings = Settings.builder() + .put("xpack.notification.email.account.account1.smtp.host", "localhost") + .put("xpack.notification.email.account.account1.smtp.starttls.required", "true") + .put("xpack.notification.email.account.account2.smtp.host", "localhost") + .put("xpack.notification.email.account.account2.smtp.connection_timeout", "1m") + .put("xpack.notification.email.account.account2.smtp.timeout", "1m") + .put("xpack.notification.email.account.account2.smtp.write_timeout", "1m") + .put("xpack.notification.email.account.account3.smtp.host", "localhost") + .put("xpack.notification.email.account.account3.smtp.send_partial", true) + .put("xpack.notification.email.account.account4.smtp.host", "localhost") + .put("xpack.notification.email.account.account4.smtp.local_address", "localhost") + .put("xpack.notification.email.account.account4.smtp.local_port", "1025") + .put("xpack.notification.email.account.account5.smtp.host", "localhost") + .put("xpack.notification.email.account.account5.smtp.wait_on_quit", true) + .build(); + EmailService emailService = new EmailService(settings, null, + new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); + + Account account1 = emailService.getAccount("account1"); + Properties properties1 = account1.getConfig().smtp.properties; + assertThat(properties1, hasEntry("mail.smtp.starttls.required", "true")); + assertThat(properties1, hasEntry("mail.smtp.connectiontimeout", "120000")); + assertThat(properties1, hasEntry("mail.smtp.writetimeout", "120000")); + assertThat(properties1, hasEntry("mail.smtp.timeout", "120000")); + assertThat(properties1, not(hasKey("mail.smtp.sendpartial"))); + assertThat(properties1, not(hasKey("mail.smtp.waitonquit"))); + assertThat(properties1, not(hasKey("mail.smtp.localport"))); + + Account account2 = emailService.getAccount("account2"); + Properties properties2 = account2.getConfig().smtp.properties; + assertThat(properties2, hasEntry("mail.smtp.connectiontimeout", "60000")); + assertThat(properties2, hasEntry("mail.smtp.writetimeout", "60000")); + assertThat(properties2, hasEntry("mail.smtp.timeout", "60000")); + + Account account3 = emailService.getAccount("account3"); + Properties properties3 = account3.getConfig().smtp.properties; + assertThat(properties3, hasEntry("mail.smtp.sendpartial", "true")); + + Account account4 = emailService.getAccount("account4"); + Properties properties4 = account4.getConfig().smtp.properties; + assertThat(properties4, hasEntry("mail.smtp.localaddress", "localhost")); + assertThat(properties4, hasEntry("mail.smtp.localport", "1025")); + + Account account5 = emailService.getAccount("account5"); + Properties properties5 = account5.getConfig().smtp.properties; + assertThat(properties5, hasEntry("mail.smtp.quitwait", "true")); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailTemplateTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailTemplateTests.java new file mode 100644 index 0000000000000..feded3f243941 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailTemplateTests.java @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class EmailTemplateTests extends ESTestCase { + + public void testEmailTemplateParserSelfGenerated() throws Exception { + TextTemplate from = randomFrom(new TextTemplate("from@from.com"), null); + List addresses = new ArrayList<>(); + for( int i = 0; i < randomIntBetween(1, 5); ++i){ + addresses.add(new TextTemplate("address" + i + "@test.com")); + } + TextTemplate[] possibleList = addresses.toArray(new TextTemplate[addresses.size()]); + TextTemplate[] replyTo = randomFrom(possibleList, null); + TextTemplate[] to = randomFrom(possibleList, null); + TextTemplate[] cc = randomFrom(possibleList, null); + TextTemplate[] bcc = randomFrom(possibleList, null); + TextTemplate priority = new TextTemplate(randomFrom(Email.Priority.values()).name()); + + TextTemplate subjectTemplate = new TextTemplate("Templated Subject {{foo}}"); + TextTemplate textBodyTemplate = new TextTemplate("Templated Body {{foo}}"); + + TextTemplate htmlBodyTemplate = new TextTemplate("Templated Html Body "); + String htmlBody = "Templated Html Body "; + String sanitizedHtmlBody = "Templated Html Body"; + + EmailTemplate emailTemplate = new EmailTemplate(from, replyTo, priority, to, cc, bcc, subjectTemplate, textBodyTemplate, + htmlBodyTemplate); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + emailTemplate.toXContent(builder, ToXContent.EMPTY_PARAMS); + + XContentParser parser = createParser(builder); + parser.nextToken(); + + EmailTemplate.Parser emailTemplateParser = new EmailTemplate.Parser(); + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + assertThat(emailTemplateParser.handle(currentFieldName, parser), is(true)); + } + } + EmailTemplate parsedEmailTemplate = emailTemplateParser.parsedTemplate(); + + Map model = new HashMap<>(); + + HtmlSanitizer htmlSanitizer = mock(HtmlSanitizer.class); + when(htmlSanitizer.sanitize(htmlBody)).thenReturn(sanitizedHtmlBody); + + Email.Builder emailBuilder = parsedEmailTemplate.render(new MockTextTemplateEngine(), model, htmlSanitizer, new HashMap<>()); + + assertThat(emailTemplate.from, equalTo(parsedEmailTemplate.from)); + assertThat(emailTemplate.replyTo, equalTo(parsedEmailTemplate.replyTo)); + assertThat(emailTemplate.priority, equalTo(parsedEmailTemplate.priority)); + assertThat(emailTemplate.to, equalTo(parsedEmailTemplate.to)); + assertThat(emailTemplate.cc, equalTo(parsedEmailTemplate.cc)); + assertThat(emailTemplate.bcc, equalTo(parsedEmailTemplate.bcc)); + assertThat(emailTemplate.subject, equalTo(parsedEmailTemplate.subject)); + assertThat(emailTemplate.textBody, equalTo(parsedEmailTemplate.textBody)); + assertThat(emailTemplate.htmlBody, equalTo(parsedEmailTemplate.htmlBody)); + + emailBuilder.id("_id"); + Email email = emailBuilder.build(); + assertThat(email.subject, equalTo(subjectTemplate.getTemplate())); + assertThat(email.textBody, equalTo(textBodyTemplate.getTemplate())); + assertThat(email.htmlBody, equalTo(sanitizedHtmlBody)); + } + + public void testParsingMultipleEmailAddresses() throws Exception { + EmailTemplate template = EmailTemplate.builder() + .from("sender@example.org") + .to("to1@example.org, to2@example.org") + .cc("cc1@example.org, cc2@example.org") + .bcc("bcc1@example.org, bcc2@example.org") + .textBody("blah") + .build(); + + Email email = template.render(new MockTextTemplateEngine(), emptyMap(), null, emptyMap()).id("foo").build(); + + assertThat(email.to.size(), is(2)); + assertThat(email.to, containsInAnyOrder(new Email.Address("to1@example.org"), + new Email.Address("to2@example.org"))); + assertThat(email.cc.size(), is(2)); + assertThat(email.cc, containsInAnyOrder(new Email.Address("cc1@example.org"), + new Email.Address("cc2@example.org"))); + assertThat(email.bcc.size(), is(2)); + assertThat(email.bcc, containsInAnyOrder(new Email.Address("bcc1@example.org"), + new Email.Address("bcc2@example.org"))); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailTests.java new file mode 100644 index 0000000000000..48dd20baa6784 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailTests.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class EmailTests extends ESTestCase { + public void testEmailParserSelfGenerated() throws Exception { + String id = "test-id"; + Email.Address from = randomFrom(new Email.Address("from@from.com"), null); + List addresses = new ArrayList<>(); + for( int i = 0; i < randomIntBetween(1, 5); ++i){ + addresses.add(new Email.Address("address" + i + "@test.com")); + } + Email.AddressList possibleList = new Email.AddressList(addresses); + Email.AddressList replyTo = randomFrom(possibleList, null); + Email.Priority priority = randomFrom(Email.Priority.values()); + DateTime sentDate = new DateTime(randomInt(), DateTimeZone.UTC); + Email.AddressList to = randomFrom(possibleList, null); + Email.AddressList cc = randomFrom(possibleList, null); + Email.AddressList bcc = randomFrom(possibleList, null); + String subject = randomFrom("Random Subject", "", null); + String textBody = randomFrom("Random Body", "", null); + String htmlBody = randomFrom("


BODY
", "", null); + Map attachments = null; + + Email email = new Email(id, from, replyTo, priority, sentDate, to, cc, bcc, subject, textBody, htmlBody, attachments); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + email.toXContent(builder, ToXContent.EMPTY_PARAMS); + + XContentParser parser = createParser(builder); + parser.nextToken(); + + Email parsedEmail = Email.parse(parser); + + assertThat(email.id, equalTo(parsedEmail.id)); + assertThat(email.from, equalTo(parsedEmail.from)); + assertThat(email.replyTo, equalTo(parsedEmail.replyTo)); + assertThat(email.priority, equalTo(parsedEmail.priority)); + assertThat(email.sentDate, equalTo(parsedEmail.sentDate)); + assertThat(email.to, equalTo(parsedEmail.to)); + assertThat(email.cc, equalTo(parsedEmail.cc)); + assertThat(email.bcc, equalTo(parsedEmail.bcc)); + assertThat(email.subject, equalTo(parsedEmail.subject)); + assertThat(email.textBody, equalTo(parsedEmail.textBody)); + assertThat(email.htmlBody, equalTo(parsedEmail.htmlBody)); + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizerTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizerTests.java new file mode 100644 index 0000000000000..60e3475437b9e --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/HtmlSanitizerTests.java @@ -0,0 +1,167 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.Watcher; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; + + +public class HtmlSanitizerTests extends ESTestCase { + public void testDefaultWithTemplatePlaceholders() { + String blockTag = randomFrom(HtmlSanitizer.BLOCK_TAGS); + while (blockTag.equals("li")) { + blockTag = randomFrom(HtmlSanitizer.BLOCK_TAGS); + } + String html = + "" + + "" + + "" + + "<" + blockTag + ">Hello {{ctx.metadata.name}}" + + "
  • item1
" + + "
  1. item2
" + + "meta Testlink meta" + + "" + + ""; + HtmlSanitizer sanitizer = new HtmlSanitizer(Settings.EMPTY); + String sanitizedHtml = sanitizer.sanitize(html); + if (blockTag.equals("ol") || blockTag.equals("ul")) { + assertThat(sanitizedHtml, equalTo( + "" + + "<" + blockTag + ">
  • Hello {{ctx.metadata.name}}
  • " + + "
    • item1
    " + + "
    1. item2
    " + + "meta Testlink " + + "meta")); + } else { + assertThat(sanitizedHtml, equalTo( + "" + + "<" + blockTag + ">Hello {{ctx.metadata.name}}" + + "
    • item1
    " + + "
    1. item2
    " + + "meta Testlink " + + "meta")); + } + } + + public void testDefaultOnClickDisallowed() { + String badHtml = ""; + HtmlSanitizer sanitizer = new HtmlSanitizer(Settings.EMPTY); + String sanitizedHtml = sanitizer.sanitize(badHtml); + assertThat(sanitizedHtml, equalTo("Click me to display Date and Time.")); + } + + public void testDefaultExternalImageDisallowed() { + String html = "This is a bad image"; + HtmlSanitizer sanitizer = new HtmlSanitizer(Settings.EMPTY); + String sanitizedHtml = sanitizer.sanitize(html); + assertThat(sanitizedHtml, equalTo("This is a bad image")); + } + + public void testDefault_EmbeddedImageAllowed() { + String html = "This is a good image"; + HtmlSanitizer sanitizer = new HtmlSanitizer(Settings.EMPTY); + String sanitizedHtml = sanitizer.sanitize(html); + assertThat(sanitizedHtml, equalTo(html)); + } + + public void testDefaultTablesAllowed() { + String html = "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "" + + "
    caption
    header1header2
    Sum$180
    cost180
    "; + HtmlSanitizer sanitizer = new HtmlSanitizer(Settings.EMPTY); + String sanitizedHtml = sanitizer.sanitize(html); + assertThat(sanitizedHtml, equalTo(html)); + } + + public void testAllowStyles() { + String html = "
    "; + Settings settings = Settings.builder().putList("xpack.notification.email.html.sanitization.allow", "_tables", "_styles").build(); + HtmlSanitizer sanitizer = new HtmlSanitizer(settings); + String sanitizedHtml = sanitizer.sanitize(html); + assertThat(sanitizedHtml, equalTo(html)); + } + + public void testDefaultFormattingAllowed() { + String html = "" + + "

    "; + HtmlSanitizer sanitizer = new HtmlSanitizer(Settings.EMPTY); + String sanitizedHtml = sanitizer.sanitize(html); + assertThat(sanitizedHtml, equalTo(html)); + } + + public void testDefaultSciptsDisallowed() { + String html = "This was a dangerous script"; + HtmlSanitizer sanitizer = new HtmlSanitizer(Settings.EMPTY); + String sanitizedHtml = sanitizer.sanitize(html); + assertThat(sanitizedHtml, equalTo("This was a dangerous script")); + } + + public void testCustomDisabled() { + String html = "This is a bad image"; + HtmlSanitizer sanitizer = new HtmlSanitizer(Settings.builder() + .put("xpack.notification.email.html.sanitization.enabled", false) + .build()); + String sanitizedHtml = sanitizer.sanitize(html); + assertThat(sanitizedHtml, equalTo(html)); + } + + public void testCustomAllImageAllowed() { + String html = "This is a bad image"; + HtmlSanitizer sanitizer = new HtmlSanitizer(Settings.builder() + .put("xpack.notification.email.html.sanitization.allow", "img:all") + .build()); + String sanitizedHtml = sanitizer.sanitize(html); + assertThat(sanitizedHtml, equalTo(html)); + } + + public void testCustomTablesDisallowed() { + String html = "
    cell1cell2
    "; + HtmlSanitizer sanitizer = new HtmlSanitizer(Settings.builder() + .put("xpack.notification.email.html.sanitization.disallow", "_tables") + .build()); + String sanitizedHtml = sanitizer.sanitize(html); + assertThat(sanitizedHtml, equalTo("cell1cell2")); + } + + public void testEnsureSettingsAreRegistered() { + Settings settings = Settings.builder().put("path.home", createTempDir()).build(); + Watcher watcher = new Watcher(settings); + for (Setting setting : HtmlSanitizer.getSettings()) { + assertThat(watcher.getSettings(), hasItem(setting)); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/ProfileTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/ProfileTests.java new file mode 100644 index 0000000000000..19e7e9de40ca4 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/ProfileTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import javax.mail.BodyPart; +import javax.mail.Part; +import javax.mail.Session; +import javax.mail.internet.MimeMessage; +import javax.mail.internet.MimeMultipart; + +import java.util.Collections; +import java.util.HashSet; + +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class ProfileTests extends ESTestCase { + + public void testThatInlineAttachmentsAreCreated() throws Exception { + String path = "/org/elasticsearch/xpack/watcher/actions/email/service/logo.png"; + Attachment attachment = new Attachment.Stream("inline.png", "inline.png", true, + () -> EmailServiceTests.class.getResourceAsStream(path)); + + Email email = Email.builder() + .id("foo") + .from("foo@example.org") + .to("bar@example.org") + .subject(randomAlphaOfLength(10)) + .attach(attachment) + .build(); + + Settings settings = Settings.builder() + .put("xpack.notification.email.default_account", "foo") + .put("xpack.notification.email.account.foo.smtp.host", "_host") + .build(); + + EmailService service = new EmailService(settings, null, + new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); + Session session = service.getAccount("foo").getConfig().createSession(); + MimeMessage mimeMessage = Profile.STANDARD.toMimeMessage(email, session); + + Object content = ((MimeMultipart) mimeMessage.getContent()).getBodyPart(0).getContent(); + assertThat(content, instanceOf(MimeMultipart.class)); + MimeMultipart multipart = (MimeMultipart) content; + + assertThat(multipart.getCount(), is(2)); + boolean foundInlineAttachment = false; + BodyPart bodyPart = null; + for (int i = 0; i < multipart.getCount(); i++) { + bodyPart = multipart.getBodyPart(i); + if (Part.INLINE.equalsIgnoreCase(bodyPart.getDisposition())) { + foundInlineAttachment = true; + break; + } + } + + assertThat("Expected to find an inline attachment in mime message, but didnt", foundInlineAttachment, is(true)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/DataAttachmentParserTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/DataAttachmentParserTests.java new file mode 100644 index 0000000000000..61e42a4cc3e7d --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/DataAttachmentParserTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.core.Is.is; + +public class DataAttachmentParserTests extends ESTestCase { + + public void testSerializationWorks() throws Exception { + Map attachmentParsers = new HashMap<>(); + attachmentParsers.put(DataAttachmentParser.TYPE, new DataAttachmentParser()); + EmailAttachmentsParser emailAttachmentsParser = new EmailAttachmentsParser(attachmentParsers); + + String id = "some-id"; + XContentBuilder builder = jsonBuilder().startObject().startObject(id) + .startObject(DataAttachmentParser.TYPE).field("format", randomFrom("yaml", "json")).endObject() + .endObject().endObject(); + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + logger.info("JSON: {}", Strings.toString(builder)); + + EmailAttachments emailAttachments = emailAttachmentsParser.parse(parser); + assertThat(emailAttachments.getAttachments(), hasSize(1)); + + XContentBuilder toXcontentBuilder = jsonBuilder().startObject(); + List attachments = new ArrayList<>(emailAttachments.getAttachments()); + attachments.get(0).toXContent(toXcontentBuilder, ToXContent.EMPTY_PARAMS); + toXcontentBuilder.endObject(); + assertThat(Strings.toString(toXcontentBuilder), is(Strings.toString(builder))); + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachmentParsersTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachmentParsersTests.java new file mode 100644 index 0000000000000..27f8330ed4782 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/EmailAttachmentParsersTests.java @@ -0,0 +1,232 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.Scheme; +import org.elasticsearch.xpack.watcher.notification.email.Attachment; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.core.Is.is; +import static org.mockito.Mockito.mock; + +public class EmailAttachmentParsersTests extends ESTestCase { + + private WatchExecutionContext ctx = mock(WatchExecutionContext.class); + + public void testThatCustomParsersCanBeRegistered() throws Exception { + Map parsers = new HashMap<>(); + parsers.put("test", new TestEmailAttachmentParser()); + EmailAttachmentsParser parser = new EmailAttachmentsParser(parsers); + + XContentBuilder builder = jsonBuilder(); + builder.startObject() + .startObject("my-id") + .startObject("test") + .field("foo", "bar") + .endObject() + .endObject() + .startObject("my-other-id") + .startObject("test") + .field("foo", "baz") + .endObject() + .endObject() + .endObject(); + + logger.info("JSON: {}", Strings.toString(builder)); + XContentParser xContentParser = createParser(builder); + EmailAttachments attachments = parser.parse(xContentParser); + assertThat(attachments.getAttachments(), hasSize(2)); + + List emailAttachments = new ArrayList<>(attachments.getAttachments()); + EmailAttachmentParser.EmailAttachment emailAttachment = emailAttachments.get(0); + assertThat(emailAttachment, instanceOf(TestEmailAttachment.class)); + + Attachment attachment = parsers.get("test").toAttachment(ctx, new Payload.Simple(), emailAttachment); + assertThat(attachment.name(), is("my-id")); + assertThat(attachment.contentType(), is("personalContentType")); + + assertThat(parsers.get("test").toAttachment(ctx, new Payload.Simple(), emailAttachments.get(1)).id(), is("my-other-id")); + } + + public void testThatUnknownParserThrowsException() throws IOException { + EmailAttachmentsParser parser = new EmailAttachmentsParser(Collections.emptyMap()); + + XContentBuilder builder = jsonBuilder(); + String type = randomAlphaOfLength(8); + builder.startObject().startObject("some-id").startObject(type).endObject().endObject().endObject(); + + XContentParser xContentParser = createParser(builder); + try { + parser.parse(xContentParser); + fail("Expected random parser of type [" + type + "] to throw an exception"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("Cannot parse attachment of type [" + type + "]")); + } + } + + public void testThatToXContentSerializationWorks() throws Exception { + List attachments = new ArrayList<>(); + attachments.add(new DataAttachment("my-name.json", org.elasticsearch.xpack.watcher.notification.email.DataAttachment.JSON)); + + HttpRequestTemplate requestTemplate = HttpRequestTemplate.builder("localhost", 80).scheme(Scheme.HTTP).path("/").build(); + boolean inline = randomBoolean(); + HttpRequestAttachment httpRequestAttachment = new HttpRequestAttachment("other-id", requestTemplate, inline, null); + + attachments.add(httpRequestAttachment); + EmailAttachments emailAttachments = new EmailAttachments(attachments); + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + emailAttachments.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + logger.info("JSON is: {}", Strings.toString(builder)); + assertThat(Strings.toString(builder), containsString("my-name.json")); + assertThat(Strings.toString(builder), containsString("json")); + assertThat(Strings.toString(builder), containsString("other-id")); + assertThat(Strings.toString(builder), containsString("localhost")); + assertThat(Strings.toString(builder), containsString("/")); + if (inline) { + assertThat(Strings.toString(builder), containsString("inline")); + } + } + + public void testThatTwoAttachmentsWithTheSameIdThrowError() throws Exception { + assumeFalse("Test only makes sense if XContent parser doesn't have strict duplicate checks enabled", + XContent.isStrictDuplicateDetectionEnabled()); + Map parsers = new HashMap<>(); + parsers.put("test", new TestEmailAttachmentParser()); + EmailAttachmentsParser parser = new EmailAttachmentsParser(parsers); + + List attachments = new ArrayList<>(); + attachments.add(new TestEmailAttachment("my-name.json", "value")); + attachments.add(new TestEmailAttachment("my-name.json", "value")); + + EmailAttachments emailAttachments = new EmailAttachments(attachments); + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + emailAttachments.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + logger.info("JSON is: " + Strings.toString(builder)); + + XContentParser xContentParser = createParser(builder); + try { + XContentParser.Token token = xContentParser.currentToken(); + assertNull(token); + + token = xContentParser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + + token = xContentParser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.FIELD_NAME)); + + parser.parse(xContentParser); + fail("Expected parser to fail but did not happen"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("Attachment with id [my-name.json] has already been created, must be renamed")); + } + } + + public class TestEmailAttachmentParser implements EmailAttachmentParser { + + @Override + public String type() { + return "test"; + } + + @Override + public TestEmailAttachment parse(String id, XContentParser parser) throws IOException { + TestEmailAttachment attachment = null; + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else { + if ("foo".equals(currentFieldName)) { + attachment = new TestEmailAttachment(id, parser.text()); + } + } + } + + if (attachment == null) { + throw new ElasticsearchParseException("Expected test parser to have field [foo]"); + } + + return attachment; + } + + @Override + public Attachment toAttachment(WatchExecutionContext ctx, Payload payload, TestEmailAttachment attachment) { + return new Attachment.Bytes(attachment.id(), attachment.getValue().getBytes(StandardCharsets.UTF_8), + "personalContentType", false); + } + } + + public static class TestEmailAttachment implements EmailAttachmentParser.EmailAttachment { + + private final String value; + private final String id; + + interface Fields { + ParseField FOO = new ParseField("foo"); + } + + public TestEmailAttachment(String id, String value) { + this.id = id; + this.value = value; + } + + @Override + public String type() { + return "test"; + } + + public String getValue() { + return value; + } + + @Override + public String id() { + return id; + } + + @Override + public boolean inline() { + return false; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject(id) + .startObject(type()) + .field(Fields.FOO.getPreferredName(), value) + .endObject() + .endObject(); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachementParserTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachementParserTests.java new file mode 100644 index 0000000000000..ef71a1157437b --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/HttpEmailAttachementParserTests.java @@ -0,0 +1,159 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuthFactory; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContextBuilder; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.core.Is.is; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class HttpEmailAttachementParserTests extends ESTestCase { + + private HttpRequestTemplate.Parser httpRequestTemplateParser; + private HttpClient httpClient; + private EmailAttachmentsParser emailAttachmentsParser; + private Map attachmentParsers; + + @Before + public void init() throws Exception { + HttpAuthRegistry authRegistry = new HttpAuthRegistry(singletonMap(BasicAuth.TYPE, new BasicAuthFactory(null))); + httpRequestTemplateParser = new HttpRequestTemplate.Parser(authRegistry); + httpClient = mock(HttpClient.class); + + attachmentParsers = new HashMap<>(); + attachmentParsers.put(HttpEmailAttachementParser.TYPE, + new HttpEmailAttachementParser(httpClient, httpRequestTemplateParser, new MockTextTemplateEngine())); + emailAttachmentsParser = new EmailAttachmentsParser(attachmentParsers); + } + + + public void testSerializationWorks() throws Exception { + HttpResponse response = new HttpResponse(200, "This is my response".getBytes(UTF_8)); + when(httpClient.execute(any(HttpRequest.class))).thenReturn(response); + + String id = "some-id"; + XContentBuilder builder = jsonBuilder().startObject().startObject(id) + .startObject(HttpEmailAttachementParser.TYPE) + .startObject("request") + .field("scheme", "http") + .field("host", "test.de") + .field("port", 80) + .field("method", "get") + .field("path", "/foo") + .startObject("params").endObject() + .startObject("headers").endObject() + .endObject(); + + boolean configureContentType = randomBoolean(); + if (configureContentType) { + builder.field("content_type", "application/foo"); + } + boolean isInline = randomBoolean(); + if (isInline) { + builder.field("inline", true); + } + builder.endObject().endObject().endObject(); + XContentParser parser = createParser(builder); + + EmailAttachments emailAttachments = emailAttachmentsParser.parse(parser); + assertThat(emailAttachments.getAttachments(), hasSize(1)); + + XContentBuilder toXcontentBuilder = jsonBuilder().startObject(); + List attachments = new ArrayList<>(emailAttachments.getAttachments()); + attachments.get(0).toXContent(toXcontentBuilder, ToXContent.EMPTY_PARAMS); + toXcontentBuilder.endObject(); + assertThat(Strings.toString(toXcontentBuilder), is(Strings.toString(builder))); + + assertThat(attachments.get(0).inline(), is(isInline)); + } + + public void testNonOkHttpCodeThrowsException() throws Exception { + HttpResponse response = new HttpResponse(403, "This is my response".getBytes(UTF_8)); + when(httpClient.execute(any(HttpRequest.class))).thenReturn(response); + + HttpRequestTemplate requestTemplate = HttpRequestTemplate.builder("localhost", 80).path("foo").build(); + HttpRequestAttachment attachment = new HttpRequestAttachment("someid", requestTemplate, false, null); + WatchExecutionContext ctx = createWatchExecutionContext(); + + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> attachmentParsers.get(HttpEmailAttachementParser.TYPE).toAttachment(ctx, new Payload.Simple(), attachment)); + assertThat(exception.getMessage(), is("Watch[watch1] attachment[someid] HTTP error status host[localhost], port[80], " + + "method[GET], path[foo], status[403]")); + } + + public void testEmptyResponseThrowsException() throws Exception { + HttpResponse response = new HttpResponse(200); + when(httpClient.execute(any(HttpRequest.class))).thenReturn(response); + + HttpRequestTemplate requestTemplate = HttpRequestTemplate.builder("localhost", 80).path("foo").build(); + HttpRequestAttachment attachment = new HttpRequestAttachment("someid", requestTemplate, false, null); + WatchExecutionContext ctx = createWatchExecutionContext(); + + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> attachmentParsers.get(HttpEmailAttachementParser.TYPE).toAttachment(ctx, new Payload.Simple(), attachment)); + assertThat(exception.getMessage(), is("Watch[watch1] attachment[someid] HTTP empty response body host[localhost], port[80], " + + "method[GET], path[foo], status[200]")); + } + + public void testHttpClientThrowsException() throws Exception { + when(httpClient.execute(any(HttpRequest.class))).thenThrow(new IOException("whatever")); + + HttpRequestTemplate requestTemplate = HttpRequestTemplate.builder("localhost", 80).path("foo").build(); + HttpRequestAttachment attachment = new HttpRequestAttachment("someid", requestTemplate, false, null); + WatchExecutionContext ctx = createWatchExecutionContext(); + + IOException exception = expectThrows(IOException.class, + () -> attachmentParsers.get(HttpEmailAttachementParser.TYPE).toAttachment(ctx, new Payload.Simple(), attachment)); + assertThat(exception.getMessage(), is("whatever")); + } + + private WatchExecutionContext createWatchExecutionContext() { + DateTime now = DateTime.now(DateTimeZone.UTC); + Wid wid = new Wid(randomAlphaOfLength(5), now); + Map metadata = MapBuilder.newMapBuilder().put("_key", "_val").map(); + return mockExecutionContextBuilder("watch1") + .wid(wid) + .payload(new Payload.Simple()) + .time("watch1", now) + .metadata(metadata) + .buildMock(); + } + + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParserTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParserTests.java new file mode 100644 index 0000000000000..5f08217a33522 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParserTests.java @@ -0,0 +1,430 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.attachment; + +import com.fasterxml.jackson.core.io.JsonEOFException; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthFactory; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuthFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.email.Attachment; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContextBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.core.Is.is; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class ReportingAttachmentParserTests extends ESTestCase { + + private HttpClient httpClient; + private Map attachmentParsers = new HashMap<>(); + private EmailAttachmentsParser emailAttachmentsParser; + private ReportingAttachmentParser reportingAttachmentParser; + private HttpAuthRegistry authRegistry; + private MockTextTemplateEngine templateEngine = new MockTextTemplateEngine(); + private String dashboardUrl = "http://www.example.org/ovb/api/reporting/generate/dashboard/My-Dashboard"; + + @Before + public void init() throws Exception { + httpClient = mock(HttpClient.class); + + Map factories = MapBuilder.newMapBuilder() + .put("basic", new BasicAuthFactory(null)) + .immutableMap(); + authRegistry = new HttpAuthRegistry(factories); + reportingAttachmentParser = new ReportingAttachmentParser(Settings.EMPTY, httpClient, templateEngine, authRegistry); + + attachmentParsers.put(ReportingAttachmentParser.TYPE, reportingAttachmentParser); + emailAttachmentsParser = new EmailAttachmentsParser(attachmentParsers); + } + + public void testSerializationWorks() throws Exception { + String id = "some-id"; + + XContentBuilder builder = jsonBuilder().startObject().startObject(id) + .startObject(ReportingAttachmentParser.TYPE) + .field("url", dashboardUrl); + + Integer retries = null; + boolean withRetries = randomBoolean(); + if (withRetries) { + retries = randomIntBetween(1, 10); + builder.field("retries", retries); + } + + TimeValue interval = null; + boolean withInterval = randomBoolean(); + if (withInterval) { + builder.field("interval", "1s"); + interval = TimeValue.timeValueSeconds(1); + } + + boolean isInline = randomBoolean(); + if (isInline) { + builder.field("inline", true); + } + + HttpAuth auth = null; + boolean withAuth = randomBoolean(); + boolean isPasswordEncrypted = randomBoolean(); + if (withAuth) { + builder.startObject("auth").startObject("basic") + .field("username", "foo") + .field("password", isPasswordEncrypted ? "::es_redacted::" :"secret") + .endObject().endObject(); + auth = new BasicAuth("foo", "secret".toCharArray()); + } + + HttpProxy proxy = null; + boolean withProxy = randomBoolean(); + if (withProxy) { + proxy = new HttpProxy("example.org", 8080); + builder.startObject("proxy") + .field("host", proxy.getHost()) + .field("port", proxy.getPort()) + .endObject(); + } + + builder.endObject().endObject().endObject(); + XContentParser parser = createParser(builder); + + EmailAttachments emailAttachments = emailAttachmentsParser.parse(parser); + assertThat(emailAttachments.getAttachments(), hasSize(1)); + + XContentBuilder toXcontentBuilder = jsonBuilder().startObject(); + List attachments = new ArrayList<>(emailAttachments.getAttachments()); + WatcherParams watcherParams = WatcherParams.builder().hideSecrets(isPasswordEncrypted).build(); + attachments.get(0).toXContent(toXcontentBuilder, watcherParams); + toXcontentBuilder.endObject(); + assertThat(Strings.toString(toXcontentBuilder), is(Strings.toString(builder))); + + XContentBuilder attachmentXContentBuilder = jsonBuilder().startObject(); + ReportingAttachment attachment = new ReportingAttachment(id, dashboardUrl, isInline, interval, retries, auth, proxy); + attachment.toXContent(attachmentXContentBuilder, watcherParams); + attachmentXContentBuilder.endObject(); + assertThat(Strings.toString(attachmentXContentBuilder), is(Strings.toString(builder))); + + assertThat(attachments.get(0).inline(), is(isInline)); + } + + public void testGoodCase() throws Exception { + // returns interval HTTP code for five times, then return expected data + String content = randomAlphaOfLength(200); + String path = "/ovb/api/reporting/jobs/download/iu5zfzvk15oa8990bfas9wy2"; + String randomContentType = randomAlphaOfLength(20); + Map headers = new HashMap<>(); + headers.put("Content-Type", new String[] { randomContentType }); + when(httpClient.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(200, "{\"path\":\""+ path +"\", \"other\":\"content\"}")) + .thenReturn(new HttpResponse(503)) + .thenReturn(new HttpResponse(503)) + .thenReturn(new HttpResponse(503)) + .thenReturn(new HttpResponse(503)) + .thenReturn(new HttpResponse(503)) + .thenReturn(new HttpResponse(200, content, headers)); + + ReportingAttachment reportingAttachment = + new ReportingAttachment("foo", dashboardUrl, randomBoolean(), TimeValue.timeValueMillis(1), 10, null, null); + Attachment attachment = reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, reportingAttachment); + assertThat(attachment, instanceOf(Attachment.Bytes.class)); + Attachment.Bytes bytesAttachment = (Attachment.Bytes) attachment; + assertThat(new String(bytesAttachment.bytes(), StandardCharsets.UTF_8), is(content)); + assertThat(bytesAttachment.contentType(), is(randomContentType)); + + ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass(HttpRequest.class); + verify(httpClient, times(7)).execute(requestArgumentCaptor.capture()); + assertThat(requestArgumentCaptor.getAllValues(), hasSize(7)); + // first invocation to the original URL + assertThat(requestArgumentCaptor.getAllValues().get(0).path(), is("/ovb/api/reporting/generate/dashboard/My-Dashboard")); + assertThat(requestArgumentCaptor.getAllValues().get(0).method(), is(HttpMethod.POST)); + // all other invocations to the redirected urls from the JSON payload + for (int i = 1; i < 7; i++) { + assertThat(requestArgumentCaptor.getAllValues().get(i).path(), is(path)); + assertThat(requestArgumentCaptor.getAllValues().get(i).params().keySet(), hasSize(0)); + } + + // test that the header "kbn-xsrf" has been set to "reporting" in all requests + requestArgumentCaptor.getAllValues().forEach((req) -> assertThat(req.headers(), hasEntry("kbn-xsrf", "reporting"))); + } + + public void testInitialRequestFailsWithError() throws Exception { + when(httpClient.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(403)); + ReportingAttachment attachment = new ReportingAttachment("foo", dashboardUrl, randomBoolean(), null, null, null, null); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment)); + assertThat(e.getMessage(), containsString("Error response when trying to trigger reporting generation")); + } + + public void testInitialRequestThrowsIOException() throws Exception { + when(httpClient.execute(any(HttpRequest.class))).thenThrow(new IOException("Connection timed out")); + ReportingAttachment attachment = new ReportingAttachment("foo", "http://www.example.org/", randomBoolean(), null, null, null, null); + IOException e = expectThrows(IOException.class, + () -> reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment)); + assertThat(e.getMessage(), containsString("Connection timed out")); + } + + public void testInitialRequestContainsInvalidPayload() throws Exception { + when(httpClient.execute(any(HttpRequest.class))) + // closing json bracket is missing + .thenReturn(new HttpResponse(200, "{\"path\":\"anything\"")); + ReportingAttachment attachment = new ReportingAttachment("foo", dashboardUrl, randomBoolean(), null, null, null, null); + JsonEOFException e = expectThrows(JsonEOFException.class, + () -> reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment)); + assertThat(e.getMessage(), containsString("Unexpected end-of-input")); + } + + public void testInitialRequestContainsPathAsObject() throws Exception { + when(httpClient.execute(any(HttpRequest.class))) + // path must be a field, but is an object here + .thenReturn(new HttpResponse(200, "{\"path\": { \"foo\" : \"anything\"}}")); + ReportingAttachment attachment = new ReportingAttachment("foo", "http://www.example.org/", randomBoolean(), null, null, null, null); + XContentParseException e = expectThrows(XContentParseException.class, + () -> reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment)); + assertThat(e.getMessage(), + containsString("[reporting_attachment_kibana_payload] path doesn't support values of type: START_OBJECT")); + } + + public void testInitialRequestDoesNotContainPathInJson() throws Exception { + when(httpClient.execute(any(HttpRequest.class))).thenReturn(new HttpResponse(200, "{\"foo\":\"bar\"}")); + ReportingAttachment attachment = new ReportingAttachment("foo", dashboardUrl, randomBoolean(), null, null, null, null); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment)); + assertThat(e.getMessage(), containsString("Watch[watch1] reporting[foo] field path found in JSON payload")); + } + + public void testPollingRequestIsError() throws Exception { + boolean hasBody = randomBoolean(); + when(httpClient.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(200, "{\"path\":\"whatever\"}")) + .thenReturn(new HttpResponse(403, hasBody ? "no permissions" : null)); + + ReportingAttachment attachment = + new ReportingAttachment("foo", "http://www.example.org/", randomBoolean(), TimeValue.timeValueMillis(1), 10, null, null); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment)); + assertThat(e.getMessage(), containsString("Error when polling pdf")); + if (hasBody) { + assertThat(e.getMessage(), containsString("body[no permissions]")); + } + } + + public void testPollingRequestRetryIsExceeded() throws Exception { + when(httpClient.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(200, "{\"path\":\"whatever\"}")) + .thenReturn(new HttpResponse(503)) + .thenReturn(new HttpResponse(503)); + + ReportingAttachment attachment = + new ReportingAttachment("foo", "http://www.example.org/", randomBoolean(), TimeValue.timeValueMillis(1), 1, null, null); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment)); + assertThat(e.getMessage(), containsString("Aborting due to maximum number of retries hit [1]")); + } + + public void testPollingRequestUnknownHTTPError() throws Exception { + when(httpClient.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(200, "{\"path\":\"whatever\"}")) + .thenReturn(new HttpResponse(1)); + + ReportingAttachment attachment = + new ReportingAttachment("foo", "http://www.example.org/", randomBoolean(), TimeValue.timeValueMillis(1), null, null, null); + + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment)); + assertThat(e.getMessage(), containsString("Unexpected status code")); + } + + public void testPollingRequestIOException() throws Exception { + when(httpClient.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(200, "{\"path\":\"whatever\"}")) + .thenThrow(new IOException("whatever")); + + ReportingAttachment attachment = + new ReportingAttachment("foo", "http://www.example.org/", randomBoolean(), TimeValue.timeValueMillis(1), null, null, null); + + IOException e = expectThrows(IOException.class, + () -> reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment)); + assertThat(e.getMessage(), containsString("whatever")); + } + + public void testWithBasicAuth() throws Exception { + String content = randomAlphaOfLength(200); + when(httpClient.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(200, "{\"path\":\"whatever\"}")) + .thenReturn(new HttpResponse(503)) + .thenReturn(new HttpResponse(200, content)); + + ReportingAttachment attachment = new ReportingAttachment("foo", dashboardUrl, randomBoolean(), + TimeValue.timeValueMillis(1), 10, new BasicAuth("foo", "bar".toCharArray()), null); + + reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment); + + ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass(HttpRequest.class); + verify(httpClient, times(3)).execute(requestArgumentCaptor.capture()); + List allRequests = requestArgumentCaptor.getAllValues(); + assertThat(allRequests, hasSize(3)); + for (HttpRequest request : allRequests) { + assertThat(request.auth(), is(notNullValue())); + assertThat(request.auth().type(), is("basic")); + assertThat(request.auth(), instanceOf(BasicAuth.class)); + BasicAuth basicAuth = (BasicAuth) request.auth(); + assertThat(basicAuth.getUsername(), is("foo")); + } + } + + public void testPollingDefaultsRetries() throws Exception { + when(httpClient.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(200, "{\"path\":\"whatever\"}")) + .thenReturn(new HttpResponse(503)); + + ReportingAttachment attachment = new ReportingAttachment("foo", dashboardUrl, randomBoolean(), TimeValue.timeValueMillis(1), + ReportingAttachmentParser.RETRIES_SETTING.getDefault(Settings.EMPTY), new BasicAuth("foo", "bar".toCharArray()), null); + expectThrows(ElasticsearchException.class, () -> + reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment)); + + verify(httpClient, times(ReportingAttachmentParser.RETRIES_SETTING.getDefault(Settings.EMPTY) + 1)).execute(any()); + } + + public void testPollingDefaultCanBeOverriddenBySettings() throws Exception { + int retries = 10; + when(httpClient.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(200, "{\"path\":\"whatever\"}")) + .thenReturn(new HttpResponse(503)); + + ReportingAttachment attachment = new ReportingAttachment("foo", dashboardUrl, randomBoolean(), null, null, null, null); + + Settings settings = Settings.builder() + .put(ReportingAttachmentParser.INTERVAL_SETTING.getKey(), "1ms") + .put(ReportingAttachmentParser.RETRIES_SETTING.getKey(), retries) + .build(); + + reportingAttachmentParser = new ReportingAttachmentParser(settings, httpClient, templateEngine, authRegistry); + expectThrows(ElasticsearchException.class, () -> + reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment)); + + verify(httpClient, times(retries + 1)).execute(any()); + } + + public void testThatUrlIsTemplatable() throws Exception { + when(httpClient.execute(any(HttpRequest.class))) + .thenReturn(new HttpResponse(200, "{\"path\":\"whatever\"}")) + .thenReturn(new HttpResponse(503)) + .thenReturn(new HttpResponse(200, randomAlphaOfLength(10))); + + TextTemplateEngine replaceHttpWithHttpsTemplateEngine = new TextTemplateEngine(Settings.EMPTY, null) { + @Override + public String render(TextTemplate textTemplate, Map model) { + return textTemplate.getTemplate().replaceAll("REPLACEME", "REPLACED"); + } + }; + + ReportingAttachment attachment = new ReportingAttachment("foo", "http://www.example.org/REPLACEME", randomBoolean(), + TimeValue.timeValueMillis(1), 10, new BasicAuth("foo", "bar".toCharArray()), null); + reportingAttachmentParser = new ReportingAttachmentParser(Settings.EMPTY, httpClient, + replaceHttpWithHttpsTemplateEngine, authRegistry); + reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, attachment); + + ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass(HttpRequest.class); + verify(httpClient, times(3)).execute(requestArgumentCaptor.capture()); + + List paths = requestArgumentCaptor.getAllValues().stream().map(HttpRequest::path).collect(Collectors.toList()); + assertThat(paths, not(hasItem(containsString("REPLACEME")))); + } + + public void testRetrySettingCannotBeNegative() throws Exception { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> + new ReportingAttachment("foo", "http://www.example.org/REPLACEME", randomBoolean(), null, -10, null, null)); + assertThat(e.getMessage(), is("Retries for attachment must be >= 0")); + + Settings invalidSettings = Settings.builder().put("xpack.notification.reporting.retries", -10).build(); + e = expectThrows(IllegalArgumentException.class, + () -> new ReportingAttachmentParser(invalidSettings, httpClient, templateEngine, authRegistry)); + assertThat(e.getMessage(), is("Failed to parse value [-10] for setting [xpack.notification.reporting.retries] must be >= 0")); + } + + public void testHttpProxy() throws Exception { + String content = randomAlphaOfLength(200); + String path = "/ovb/api/reporting/jobs/download/iu5zfzvk15oa8990bfas9wy2"; + String randomContentType = randomAlphaOfLength(20); + Map headers = new HashMap<>(); + headers.put("Content-Type", new String[] { randomContentType }); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(HttpRequest.class); + when(httpClient.execute(requestCaptor.capture())) + .thenReturn(new HttpResponse(200, "{\"path\":\""+ path +"\", \"other\":\"content\"}")) + .thenReturn(new HttpResponse(503)) + .thenReturn(new HttpResponse(200, content, headers)); + + HttpProxy proxy = new HttpProxy("localhost", 8080); + ReportingAttachment reportingAttachment = + new ReportingAttachment("foo", "http://www.example.org/", randomBoolean(), TimeValue.timeValueMillis(1), null, null, proxy); + + reportingAttachmentParser.toAttachment(createWatchExecutionContext(), Payload.EMPTY, reportingAttachment); + + assertThat(requestCaptor.getAllValues(), hasSize(3)); + requestCaptor.getAllValues().forEach(req -> assertThat(req.proxy(), is(proxy))); + } + + private WatchExecutionContext createWatchExecutionContext() { + DateTime now = DateTime.now(DateTimeZone.UTC); + return mockExecutionContextBuilder("watch1") + .wid(new Wid(randomAlphaOfLength(5), now)) + .payload(new Payload.Simple()) + .time("watch1", now) + .metadata(Collections.emptyMap()) + .buildMock(); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/support/EmailServer.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/support/EmailServer.java new file mode 100644 index 0000000000000..4195b25139208 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/support/EmailServer.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.email.support; + +import org.apache.logging.log4j.Logger; +import org.subethamail.smtp.auth.EasyAuthenticationHandlerFactory; +import org.subethamail.smtp.helper.SimpleMessageListener; +import org.subethamail.smtp.helper.SimpleMessageListenerAdapter; +import org.subethamail.smtp.server.SMTPServer; + +import javax.mail.MessagingException; +import javax.mail.Session; +import javax.mail.internet.MimeMessage; +import java.io.IOException; +import java.io.InputStream; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.CopyOnWriteArrayList; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.fail; + +/** + * An mini email smtp server that can be used for unit testing + */ +public class EmailServer { + + public static final String USERNAME = "_user"; + public static final String PASSWORD = "_passwd"; + + private final List listeners = new CopyOnWriteArrayList<>(); + private final SMTPServer server; + + public EmailServer(String host, final Logger logger) { + server = new SMTPServer(new SimpleMessageListenerAdapter(new SimpleMessageListener() { + @Override + public boolean accept(String from, String recipient) { + return true; + } + + @Override + public void deliver(String from, String recipient, InputStream data) throws IOException { + try { + Session session = Session.getInstance(new Properties()); + MimeMessage msg = new MimeMessage(session, data); + for (Listener listener : listeners) { + try { + listener.on(msg); + } catch (Exception e) { + logger.error("Unexpected failure", e); + fail(e.getMessage()); + } + } + } catch (MessagingException me) { + throw new RuntimeException("could not create mime message", me); + } + } + }), new EasyAuthenticationHandlerFactory((user, passwd) -> { + assertThat(user, is(USERNAME)); + assertThat(passwd, is(PASSWORD)); + })); + server.setHostName(host); + server.setPort(0); + } + + /** + * @return the port that the underlying server is listening on + */ + public int port() { + return server.getPort(); + } + + public void start() { + // Must have privileged access because underlying server will accept socket connections + AccessController.doPrivileged((PrivilegedAction) () -> { + server.start(); + return null; + }); + } + + public void stop() { + server.stop(); + listeners.clear(); + } + + public void addListener(Listener listener) { + listeners.add(listener); + } + + public static EmailServer localhost(final Logger logger) { + EmailServer server = new EmailServer("localhost", logger); + server.start(); + return server; + } + + @FunctionalInterface + public interface Listener { + void on(MimeMessage message) throws Exception; + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatAccountsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatAccountsTests.java new file mode 100644 index 0000000000000..ace63da8192b1 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatAccountsTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class HipChatAccountsTests extends ESTestCase { + private HttpClient httpClient; + + @Before + public void init() throws Exception { + httpClient = mock(HttpClient.class); + } + + public void testProxy() throws Exception { + Settings.Builder builder = Settings.builder() + .put("xpack.notification.hipchat.default_account", "account1"); + addAccountSettings("account1", builder); + HipChatService service = new HipChatService(builder.build(), httpClient, new ClusterSettings(Settings.EMPTY, + new HashSet<>(HipChatService.getSettings()))); + HipChatAccount account = service.getAccount("account1"); + + HipChatMessage.Template template = new HipChatMessage.Template.Builder(new TextTemplate("foo")) + .addRooms(new TextTemplate("room")) + .setFrom("from") + .build(); + HipChatMessage hipChatMessage = template.render(new MockTextTemplateEngine(), new HashMap<>()); + + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(HttpRequest.class); + when(httpClient.execute(argumentCaptor.capture())).thenReturn(new HttpResponse(200)); + + HttpProxy proxy = new HttpProxy("localhost", 8080); + account.send(hipChatMessage, proxy); + + HttpRequest request = argumentCaptor.getValue(); + assertThat(request.proxy(), is(proxy)); + } + + private void addAccountSettings(String name, Settings.Builder builder) { + HipChatAccount.Profile profile = randomFrom(HipChatAccount.Profile.values()); + builder.put("xpack.notification.hipchat.account." + name + ".profile", profile.value()); + builder.put("xpack.notification.hipchat.account." + name + ".auth_token", randomAlphaOfLength(50)); + if (profile == HipChatAccount.Profile.INTEGRATION) { + builder.put("xpack.notification.hipchat.account." + name + ".room", randomAlphaOfLength(10)); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatMessageTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatMessageTests.java new file mode 100644 index 0000000000000..96890e5629a26 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatMessageTests.java @@ -0,0 +1,325 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; + +public class HipChatMessageTests extends ESTestCase { + + public void testToXContent() throws Exception { + String message = randomAlphaOfLength(10); + String[] rooms = generateRandomStringArray(3, 10, true); + String[] users = generateRandomStringArray(3, 10, true); + String from = randomBoolean() ? null : randomAlphaOfLength(10); + HipChatMessage.Format format = rarely() ? null : randomFrom(HipChatMessage.Format.values()); + HipChatMessage.Color color = rarely() ? null : randomFrom(HipChatMessage.Color.values()); + Boolean notify = rarely() ? null : randomBoolean(); + HipChatMessage msg = new HipChatMessage(message, rooms, users, from, format, color, notify); + + XContentBuilder builder = jsonBuilder(); + boolean includeTarget = randomBoolean(); + if (includeTarget && randomBoolean()) { + msg.toXContent(builder, ToXContent.EMPTY_PARAMS); + } else { + msg.toXContent(builder, ToXContent.EMPTY_PARAMS, includeTarget); + } + BytesReference bytes = BytesReference.bytes(builder); + + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + + assertThat(parser.currentToken(), is(XContentParser.Token.START_OBJECT)); + + message = null; + rooms = null; + users = null; + from = null; + format = null; + color = null; + notify = null; + XContentParser.Token token = null; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if ("body".equals(currentFieldName)) { + message = parser.text(); + } else if ("room".equals(currentFieldName)) { + rooms = parser.list().stream().map(Object::toString).toArray(String[]::new); + } else if ("user".equals(currentFieldName)) { + users = parser.list().stream().map(Object::toString).toArray(String[]::new); + } else if ("from".equals(currentFieldName)) { + from = parser.text(); + } else if ("format".equals(currentFieldName)) { + format = HipChatMessage.Format.parse(parser); + } else if ("color".equals(currentFieldName)) { + color = HipChatMessage.Color.parse(parser); + } else if ("notify".equals(currentFieldName)) { + notify = parser.booleanValue(); + } else { + fail("unexpected xcontent field [" + currentFieldName + "] in hipchat message"); + } + } + + assertThat(message, notNullValue()); + assertThat(message, is(msg.body)); + if (includeTarget) { + if (msg.rooms == null || msg.rooms.length == 0) { + assertThat(rooms, nullValue()); + } else { + assertThat(rooms, arrayContaining(msg.rooms)); + } + if (msg.users == null || msg.users.length == 0) { + assertThat(users, nullValue()); + } else { + assertThat(users, arrayContaining(msg.users)); + } + } + assertThat(from, is(msg.from)); + assertThat(format, is(msg.format)); + assertThat(color, is(msg.color)); + assertThat(notify, is(msg.notify)); + } + + public void testEquals() throws Exception { + String message = randomAlphaOfLength(10); + String[] rooms = generateRandomStringArray(3, 10, true); + String[] users = generateRandomStringArray(3, 10, true); + String from = randomBoolean() ? null : randomAlphaOfLength(10); + HipChatMessage.Format format = rarely() ? null : randomFrom(HipChatMessage.Format.values()); + HipChatMessage.Color color = rarely() ? null : randomFrom(HipChatMessage.Color.values()); + Boolean notify = rarely() ? null : randomBoolean(); + HipChatMessage msg1 = new HipChatMessage(message, rooms, users, from, format, color, notify); + + boolean equals = randomBoolean(); + if (!equals) { + equals = true; + if (rarely()) { + equals = false; + message = "another message"; + } + if (rarely()) { + equals = false; + rooms = rooms == null ? new String[] { "roomX" } : randomBoolean() ? null : new String[] { "roomX" , "roomY"}; + } + if (rarely()) { + equals = false; + users = users == null ? new String[] { "userX" } : randomBoolean() ? null : new String[] { "userX", "userY" }; + } + if (rarely()) { + equals = false; + from = from == null ? "fromX" : randomBoolean() ? null : "fromY"; + } + if (rarely()) { + equals = false; + format = format == null ? + randomFrom(HipChatMessage.Format.values()) : + randomBoolean() ? + null : + randomFromWithExcludes(HipChatMessage.Format.values(), format); + } + if (rarely()) { + equals = false; + color = color == null ? + randomFrom(HipChatMessage.Color.values()) : + randomBoolean() ? + null : + randomFromWithExcludes(HipChatMessage.Color.values(), color); + } + if (rarely()) { + equals = false; + notify = notify == null ? (Boolean) randomBoolean() : randomBoolean() ? null : !notify; + } + } + + HipChatMessage msg2 = new HipChatMessage(message, rooms, users, from, format, color, notify); + assertThat(msg1.equals(msg2), is(equals)); + } + + public void testTemplateParse() throws Exception { + XContentBuilder jsonBuilder = jsonBuilder(); + jsonBuilder.startObject(); + + TextTemplate body = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("body", body, ToXContent.EMPTY_PARAMS); + TextTemplate[] rooms = null; + if (randomBoolean()) { + jsonBuilder.startArray("room"); + rooms = new TextTemplate[randomIntBetween(1, 3)]; + for (int i = 0; i < rooms.length; i++) { + rooms[i] = new TextTemplate(randomAlphaOfLength(10)); + rooms[i].toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); + } + jsonBuilder.endArray(); + } + TextTemplate[] users = null; + if (randomBoolean()) { + jsonBuilder.startArray("user"); + users = new TextTemplate[randomIntBetween(1, 3)]; + for (int i = 0; i < users.length; i++) { + users[i] = new TextTemplate(randomAlphaOfLength(10)); + users[i].toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); + } + jsonBuilder.endArray(); + } + String from = null; + if (randomBoolean()) { + from = randomAlphaOfLength(10); + jsonBuilder.field("from", from); + } + TextTemplate color = null; + if (randomBoolean()) { + color = new TextTemplate(randomAlphaOfLength(10)); + jsonBuilder.field("color", color, ToXContent.EMPTY_PARAMS); + } + HipChatMessage.Format format = null; + if (randomBoolean()) { + format = randomFrom(HipChatMessage.Format.values()); + jsonBuilder.field("format", format.value()); + } + Boolean notify = null; + if (randomBoolean()) { + notify = randomBoolean(); + jsonBuilder.field("notify", notify); + } + + BytesReference bytes = BytesReference.bytes(jsonBuilder.endObject()); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + + HipChatMessage.Template template = HipChatMessage.Template.parse(parser); + + assertThat(template, notNullValue()); + assertThat(template.body, is(body)); + if (rooms == null) { + assertThat(template.rooms, nullValue()); + } else { + assertThat(template.rooms, arrayContaining(rooms)); + } + if (users == null) { + assertThat(template.users, nullValue()); + } else { + assertThat(template.users, arrayContaining(users)); + } + assertThat(template.from, is(from)); + assertThat(template.color, is(color)); + assertThat(template.format, is(format)); + assertThat(template.notify, is(notify)); + } + + public void testTemplateParseSelfGenerated() throws Exception { + TextTemplate body = new TextTemplate(randomAlphaOfLength(10)); + HipChatMessage.Template.Builder templateBuilder = new HipChatMessage.Template.Builder(body); + + if (randomBoolean()) { + int count = randomIntBetween(1, 3); + for (int i = 0; i < count; i++) { + templateBuilder.addRooms(new TextTemplate(randomAlphaOfLength(10))); + } + } + if (randomBoolean()) { + int count = randomIntBetween(1, 3); + for (int i = 0; i < count; i++) { + templateBuilder.addUsers(new TextTemplate(randomAlphaOfLength(10))); + } + } + if (randomBoolean()) { + templateBuilder.setFrom(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + templateBuilder.setColor(new TextTemplate(randomAlphaOfLength(5))); + } + if (randomBoolean()) { + templateBuilder.setFormat(randomFrom(HipChatMessage.Format.values())); + } + if (randomBoolean()) { + templateBuilder.setNotify(randomBoolean()); + } + HipChatMessage.Template template = templateBuilder.build(); + + XContentBuilder jsonBuilder = jsonBuilder(); + template.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); + BytesReference bytes = BytesReference.bytes(jsonBuilder); + + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + + HipChatMessage.Template parsed = HipChatMessage.Template.parse(parser); + + assertThat(parsed, equalTo(template)); + } + + public void testAuthTokenParamIsFiltered() throws Exception { + HttpResponse response = new HttpResponse(500); + String token = randomAlphaOfLength(20); + HttpRequest request = HttpRequest.builder("localhost", 1234).setParam("auth_token", token).build(); + + // String body, String[] rooms, String[] users, String from, Format format, Color color, Boolean notify + HipChatMessage hipChatMessage = new HipChatMessage("body", new String[]{"room"}, null, "from", + HipChatMessage.Format.TEXT, HipChatMessage.Color.RED, false); + SentMessages.SentMessage sentMessage = SentMessages.SentMessage.responded("targetName", SentMessages.SentMessage.TargetType.ROOM, + hipChatMessage, request, response); + + + try (XContentBuilder builder = jsonBuilder()) { + WatcherParams params = WatcherParams.builder().hideSecrets(false).build(); + sentMessage.toXContent(builder, params); + assertThat(Strings.toString(builder), containsString(token)); + + try (XContentParser parser = builder.contentType().xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + Strings.toString(builder))) { + parser.map(); + } + } + try (XContentBuilder builder = jsonBuilder()) { + sentMessage.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertThat(Strings.toString(builder), not(containsString(token))); + + try (XContentParser parser = builder.contentType().xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + Strings.toString(builder))) { + parser.map(); + } + } + } + + static E randomFromWithExcludes(E[] values, E... exclude) { + List excludes = Arrays.asList(exclude); + List includes = new ArrayList<>(); + for (E value : values) { + if (!excludes.contains(value)) { + includes.add(value); + } + } + return randomFrom(includes); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatServiceTests.java new file mode 100644 index 0000000000000..bfcf2f408b087 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatServiceTests.java @@ -0,0 +1,269 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import com.carrotsearch.randomizedtesting.annotations.Repeat; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashSet; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; + +public class HipChatServiceTests extends ESTestCase { + private HttpClient httpClient; + + @Before + public void init() throws Exception { + httpClient = mock(HttpClient.class); + } + + public void testSingleAccountV1() throws Exception { + String accountName = randomAlphaOfLength(10); + String host = randomBoolean() ? null : "_host"; + int port = randomBoolean() ? -1 : randomIntBetween(300, 400); + String defaultRoom = randomBoolean() ? null : "_r1, _r2"; + String defaultFrom = randomBoolean() ? null : "_from"; + HipChatMessage.Color defaultColor = randomBoolean() ? null : randomFrom(HipChatMessage.Color.values()); + HipChatMessage.Format defaultFormat = randomBoolean() ? null : randomFrom(HipChatMessage.Format.values()); + Boolean defaultNotify = randomBoolean() ? null : (Boolean) randomBoolean(); + Settings.Builder settingsBuilder = Settings.builder() + .put("xpack.notification.hipchat.account." + accountName + ".profile", HipChatAccount.Profile.V1.value()) + .put("xpack.notification.hipchat.account." + accountName + ".auth_token", "_token"); + if (host != null) { + settingsBuilder.put("xpack.notification.hipchat.account." + accountName + ".host", host); + } + if (port > 0) { + settingsBuilder.put("xpack.notification.hipchat.account." + accountName + ".port", port); + } + buildMessageDefaults(accountName, settingsBuilder, defaultRoom, null, defaultFrom, defaultColor, defaultFormat, defaultNotify); + HipChatService service = new HipChatService(settingsBuilder.build(), httpClient, + new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings()))); + + HipChatAccount account = service.getAccount(accountName); + assertThat(account, notNullValue()); + assertThat(account.name, is(accountName)); + assertThat(account.authToken, is("_token")); + assertThat(account.profile, is(HipChatAccount.Profile.V1)); + assertThat(account.httpClient, is(httpClient)); + assertThat(account.server, notNullValue()); + assertThat(account.server.host(), is(host != null ? host : HipChatServer.DEFAULT.host())); + assertThat(account.server.port(), is(port > 0 ? port : HipChatServer.DEFAULT.port())); + assertThat(account, instanceOf(V1Account.class)); + if (defaultRoom == null) { + assertThat(((V1Account) account).defaults.rooms, nullValue()); + } else { + assertThat(((V1Account) account).defaults.rooms, arrayContaining("_r1", "_r2")); + } + assertThat(((V1Account) account).defaults.from, is(defaultFrom)); + assertThat(((V1Account) account).defaults.color, is(defaultColor)); + assertThat(((V1Account) account).defaults.format, is(defaultFormat)); + assertThat(((V1Account) account).defaults.notify, is(defaultNotify)); + + // with a single account defined, making sure that that account is set to the default one. + assertThat(service.getAccount(null), sameInstance(account)); + } + + public void testSingleAccountIntegration() throws Exception { + String accountName = randomAlphaOfLength(10); + String host = randomBoolean() ? null : "_host"; + int port = randomBoolean() ? -1 : randomIntBetween(300, 400); + String room = randomAlphaOfLength(10); + String defaultFrom = randomBoolean() ? null : "_from"; + HipChatMessage.Color defaultColor = randomBoolean() ? null : randomFrom(HipChatMessage.Color.values()); + HipChatMessage.Format defaultFormat = randomBoolean() ? null : randomFrom(HipChatMessage.Format.values()); + Boolean defaultNotify = randomBoolean() ? null : (Boolean) randomBoolean(); + Settings.Builder settingsBuilder = Settings.builder() + .put("xpack.notification.hipchat.account." + accountName + ".profile", + HipChatAccount.Profile.INTEGRATION.value()) + .put("xpack.notification.hipchat.account." + accountName + ".auth_token", "_token") + .put("xpack.notification.hipchat.account." + accountName + ".room", room); + if (host != null) { + settingsBuilder.put("xpack.notification.hipchat.account." + accountName + ".host", host); + } + if (port > 0) { + settingsBuilder.put("xpack.notification.hipchat.account." + accountName + ".port", port); + } + buildMessageDefaults(accountName, settingsBuilder, null, null, defaultFrom, defaultColor, defaultFormat, defaultNotify); + HipChatService service = new HipChatService(settingsBuilder.build(), httpClient, + new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings()))); + + HipChatAccount account = service.getAccount(accountName); + assertThat(account, notNullValue()); + assertThat(account.name, is(accountName)); + assertThat(account.authToken, is("_token")); + assertThat(account.profile, is(HipChatAccount.Profile.INTEGRATION)); + assertThat(account.httpClient, is(httpClient)); + assertThat(account.server, notNullValue()); + assertThat(account.server.host(), is(host != null ? host : HipChatServer.DEFAULT.host())); + assertThat(account.server.port(), is(port > 0 ? port : HipChatServer.DEFAULT.port())); + assertThat(account, instanceOf(IntegrationAccount.class)); + assertThat(((IntegrationAccount) account).room, is(room)); + assertThat(((IntegrationAccount) account).defaults.color, is(defaultColor)); + assertThat(((IntegrationAccount) account).defaults.format, is(defaultFormat)); + assertThat(((IntegrationAccount) account).defaults.notify, is(defaultNotify)); + + // with a single account defined, making sure that that account is set to the default one. + assertThat(service.getAccount(null), sameInstance(account)); + } + + public void testSingleAccountIntegrationNoRoomSetting() throws Exception { + String accountName = randomAlphaOfLength(10); + Settings.Builder settingsBuilder = Settings.builder() + .put("xpack.notification.hipchat.account." + accountName + ".profile", + HipChatAccount.Profile.INTEGRATION.value()) + .put("xpack.notification.hipchat.account." + accountName + ".auth_token", "_token"); + SettingsException e = expectThrows(SettingsException.class, () -> + new HipChatService(settingsBuilder.build(), httpClient, + new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings())))); + assertThat(e.getMessage(), containsString("missing required [room] setting for [integration] account profile")); + } + + public void testSingleAccountUser() throws Exception { + String accountName = randomAlphaOfLength(10); + String host = randomBoolean() ? null : "_host"; + int port = randomBoolean() ? -1 : randomIntBetween(300, 400); + String defaultRoom = randomBoolean() ? null : "_r1, _r2"; + String defaultUser = randomBoolean() ? null : "_u1, _u2"; + HipChatMessage.Color defaultColor = randomBoolean() ? null : randomFrom(HipChatMessage.Color.values()); + HipChatMessage.Format defaultFormat = randomBoolean() ? null : randomFrom(HipChatMessage.Format.values()); + Boolean defaultNotify = randomBoolean() ? null : (Boolean) randomBoolean(); + Settings.Builder settingsBuilder = Settings.builder() + .put("xpack.notification.hipchat.account." + accountName + ".profile", HipChatAccount.Profile.USER.value()) + .put("xpack.notification.hipchat.account." + accountName + ".auth_token", "_token"); + if (host != null) { + settingsBuilder.put("xpack.notification.hipchat.account." + accountName + ".host", host); + } + if (port > 0) { + settingsBuilder.put("xpack.notification.hipchat.account." + accountName + ".port", port); + } + buildMessageDefaults(accountName, settingsBuilder, defaultRoom, defaultUser, null, defaultColor, defaultFormat, defaultNotify); + HipChatService service = new HipChatService(settingsBuilder.build(), httpClient, + new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings()))); + + HipChatAccount account = service.getAccount(accountName); + assertThat(account, notNullValue()); + assertThat(account.name, is(accountName)); + assertThat(account.authToken, is("_token")); + assertThat(account.profile, is(HipChatAccount.Profile.USER)); + assertThat(account.httpClient, is(httpClient)); + assertThat(account.server, notNullValue()); + assertThat(account.server.host(), is(host != null ? host : HipChatServer.DEFAULT.host())); + assertThat(account.server.port(), is(port > 0 ? port : HipChatServer.DEFAULT.port())); + assertThat(account, instanceOf(UserAccount.class)); + if (defaultRoom == null) { + assertThat(((UserAccount) account).defaults.rooms, nullValue()); + } else { + assertThat(((UserAccount) account).defaults.rooms, arrayContaining("_r1", "_r2")); + } + if (defaultUser == null) { + assertThat(((UserAccount) account).defaults.users, nullValue()); + } else { + assertThat(((UserAccount) account).defaults.users, arrayContaining("_u1", "_u2")); + } + assertThat(((UserAccount) account).defaults.color, is(defaultColor)); + assertThat(((UserAccount) account).defaults.format, is(defaultFormat)); + assertThat(((UserAccount) account).defaults.notify, is(defaultNotify)); + + // with a single account defined, making sure that that account is set to the default one. + assertThat(service.getAccount(null), sameInstance(account)); + } + + public void testMultipleAccounts() throws Exception { + HipChatMessage.Color defaultColor = randomBoolean() ? null : randomFrom(HipChatMessage.Color.values()); + HipChatMessage.Format defaultFormat = randomBoolean() ? null : randomFrom(HipChatMessage.Format.values()); + Boolean defaultNotify = randomBoolean() ? null : (Boolean) randomBoolean(); + Settings.Builder settingsBuilder = Settings.builder(); + String defaultAccount = "_a" + randomIntBetween(0, 4); + settingsBuilder.put("xpack.notification.hipchat.default_account", defaultAccount); + + final boolean customGlobalServer = randomBoolean(); + if (customGlobalServer) { + settingsBuilder.put("xpack.notification.hipchat.host", "_host_global"); + settingsBuilder.put("xpack.notification.hipchat.port", 299); + } + + for (int i = 0; i < 5; i++) { + String name = "_a" + i; + String prefix = "xpack.notification.hipchat.account." + name; + HipChatAccount.Profile profile = randomFrom(HipChatAccount.Profile.values()); + settingsBuilder.put(prefix + ".profile", profile); + settingsBuilder.put(prefix + ".auth_token", "_token" + i); + if (profile == HipChatAccount.Profile.INTEGRATION) { + settingsBuilder.put(prefix + ".room", "_room" + i); + } + if (i % 2 == 0) { + settingsBuilder.put(prefix + ".host", "_host" + i); + settingsBuilder.put(prefix + ".port", 300 + i); + } + buildMessageDefaults(name, settingsBuilder, null, null, null, defaultColor, defaultFormat, defaultNotify); + } + + HipChatService service = new HipChatService(settingsBuilder.build(), httpClient, + new ClusterSettings(settingsBuilder.build(), new HashSet<>(HipChatService.getSettings()))); + + for (int i = 0; i < 5; i++) { + String name = "_a" + i; + HipChatAccount account = service.getAccount(name); + assertThat(account, notNullValue()); + assertThat(account.name, is(name)); + assertThat(account.authToken, is("_token" + i)); + assertThat(account.profile, notNullValue()); + if (account.profile == HipChatAccount.Profile.INTEGRATION) { + assertThat(account, instanceOf(IntegrationAccount.class)); + assertThat(((IntegrationAccount) account).room, is("_room" + i)); + } + assertThat(account.httpClient, is(httpClient)); + assertThat(account.server, notNullValue()); + if (i % 2 == 0) { + assertThat(account.server.host(), is("_host" + i)); + assertThat(account.server.port(), is(300 + i)); + } else if (customGlobalServer) { + assertThat(account.server.host(), is("_host_global")); + assertThat(account.server.port(), is(299)); + } else { + assertThat(account.server.host(), is(HipChatServer.DEFAULT.host())); + assertThat(account.server.port(), is(HipChatServer.DEFAULT.port())); + } + } + + assertThat(service.getAccount(null), sameInstance(service.getAccount(defaultAccount))); + } + + private void buildMessageDefaults(String account, Settings.Builder settingsBuilder, String room, String user, String from, + HipChatMessage.Color color, HipChatMessage.Format format, Boolean notify) { + if (room != null) { + settingsBuilder.put("xpack.notification.hipchat.account." + account + ".message_defaults.room", room); + } + if (user != null) { + settingsBuilder.put("xpack.notification.hipchat.account." + account + ".message_defaults.user", user); + } + if (from != null) { + settingsBuilder.put("xpack.notification.hipchat.account." + account + ".message_defaults.from", from); + } + if (color != null) { + settingsBuilder.put("xpack.notification.hipchat.account." + account + ".message_defaults.color", color.value()); + } + if (format != null) { + settingsBuilder.put("xpack.notification.hipchat.account." + account + ".message_defaults.format", format); + } + if (notify != null) { + settingsBuilder.put("xpack.notification.hipchat.account." + account + ".message_defaults.notify", notify); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccountTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccountTests.java new file mode 100644 index 0000000000000..b85348d7810bb --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/IntegrationAccountTests.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.Scheme; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class IntegrationAccountTests extends ESTestCase { + + public void testSettings() throws Exception { + String accountName = "_name"; + + Settings.Builder sb = Settings.builder(); + + String authToken = randomAlphaOfLength(50); + sb.put(IntegrationAccount.AUTH_TOKEN_SETTING, authToken); + + String host = HipChatServer.DEFAULT.host(); + if (randomBoolean()) { + host = randomAlphaOfLength(10); + sb.put("host", host); + } + int port = HipChatServer.DEFAULT.port(); + if (randomBoolean()) { + port = randomIntBetween(300, 400); + sb.put("port", port); + } + + String room = randomAlphaOfLength(10); + sb.put(IntegrationAccount.ROOM_SETTING, room); + + HipChatMessage.Format defaultFormat = null; + if (randomBoolean()) { + defaultFormat = randomFrom(HipChatMessage.Format.values()); + sb.put(HipChatAccount.DEFAULT_FORMAT_SETTING, defaultFormat); + } + HipChatMessage.Color defaultColor = null; + if (randomBoolean()) { + defaultColor = randomFrom(HipChatMessage.Color.values()); + sb.put(HipChatAccount.DEFAULT_COLOR_SETTING, defaultColor); + } + Boolean defaultNotify = null; + if (randomBoolean()) { + defaultNotify = randomBoolean(); + sb.put(HipChatAccount.DEFAULT_NOTIFY_SETTING, defaultNotify); + } + Settings settings = sb.build(); + + IntegrationAccount account = new IntegrationAccount(accountName, settings, HipChatServer.DEFAULT, mock(HttpClient.class), + mock(Logger.class)); + + assertThat(account.profile, is(HipChatAccount.Profile.INTEGRATION)); + assertThat(account.name, equalTo(accountName)); + assertThat(account.server.host(), is(host)); + assertThat(account.server.port(), is(port)); + assertThat(account.authToken, is(authToken)); + assertThat(account.room, is(room)); + assertThat(account.defaults.format, is(defaultFormat)); + assertThat(account.defaults.color, is(defaultColor)); + assertThat(account.defaults.notify, is(defaultNotify)); + } + + public void testSettingsNoAuthToken() throws Exception { + Settings.Builder sb = Settings.builder(); + sb.put(IntegrationAccount.ROOM_SETTING, randomAlphaOfLength(10)); + try { + new IntegrationAccount("_name", sb.build(), HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); + fail("Expected SettingsException"); + } catch (SettingsException e) { + assertThat(e.getMessage(), is("hipchat account [_name] missing required [auth_token] setting")); + } + } + + public void testSettingsWithoutRoom() throws Exception { + Settings.Builder sb = Settings.builder(); + sb.put(IntegrationAccount.AUTH_TOKEN_SETTING, randomAlphaOfLength(50)); + try { + new IntegrationAccount("_name", sb.build(), HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); + fail("Expected SettingsException"); + } catch (SettingsException e) { + assertThat(e.getMessage(), containsString("missing required [room] setting for [integration] account profile")); + } + } + + public void testSettingsWithoutMultipleRooms() throws Exception { + Settings.Builder sb = Settings.builder(); + sb.put(IntegrationAccount.AUTH_TOKEN_SETTING, randomAlphaOfLength(50)); + sb.put(IntegrationAccount.ROOM_SETTING, "_r1,_r2"); + try { + new IntegrationAccount("_name", sb.build(), HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); + fail("Expected SettingsException"); + } catch (SettingsException e) { + assertThat(e.getMessage(), containsString("[room] setting for [integration] account must only be set with a single value")); + } + } + + public void testSend() throws Exception { + String token = randomAlphaOfLength(10); + HttpClient httpClient = mock(HttpClient.class); + String room = "Room with Spaces"; + IntegrationAccount account = new IntegrationAccount("_name", Settings.builder() + .put("host", "_host") + .put("port", "443") + .put("auth_token", token) + .put("room", room) + .build(), HipChatServer.DEFAULT, httpClient, mock(Logger.class)); + + HipChatMessage.Format format = randomFrom(HipChatMessage.Format.values()); + HipChatMessage.Color color = randomFrom(HipChatMessage.Color.values()); + Boolean notify = randomBoolean(); + final HipChatMessage message = new HipChatMessage("_body", null, null, null, format, color, notify); + + HttpRequest req = HttpRequest.builder("_host", 443) + .method(HttpMethod.POST) + .scheme(Scheme.HTTPS) + // url encoded already + .path("/v2/room/Room+with+Spaces/notification") + .setHeader("Content-Type", "application/json") + .setHeader("Authorization", "Bearer " + token) + .body(Strings.toString((builder, params) -> { + builder.field("message", message.body); + if (message.format != null) { + builder.field("message_format", message.format.value()); + } + if (message.notify != null) { + builder.field("notify", message.notify); + } + if (message.color != null) { + builder.field("color", String.valueOf(message.color.value())); + } + return builder; + })) + .build(); + + HttpResponse res = mock(HttpResponse.class); + when(res.status()).thenReturn(200); + when(httpClient.execute(req)).thenReturn(res); + + SentMessages sentMessages = account.send(message, null); + verify(httpClient).execute(req); + assertThat(sentMessages.asList(), hasSize(1)); + try (XContentBuilder builder = jsonBuilder()) { + sentMessages.asList().get(0).toXContent(builder, ToXContent.EMPTY_PARAMS); + assertThat(Strings.toString(builder), not(containsString(token))); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/UserAccountTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/UserAccountTests.java new file mode 100644 index 0000000000000..28609efd025d5 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/UserAccountTests.java @@ -0,0 +1,302 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.Scheme; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; +import org.mockito.ArgumentCaptor; + +import java.util.HashMap; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class UserAccountTests extends ESTestCase { + + public void testSettings() throws Exception { + String accountName = "_name"; + + Settings.Builder sb = Settings.builder(); + + String authToken = randomAlphaOfLength(50); + sb.put(UserAccount.AUTH_TOKEN_SETTING, authToken); + + String host = HipChatServer.DEFAULT.host(); + if (randomBoolean()) { + host = randomAlphaOfLength(10); + sb.put("host", host); + } + int port = HipChatServer.DEFAULT.port(); + if (randomBoolean()) { + port = randomIntBetween(300, 400); + sb.put("port", port); + } + + String[] defaultRooms = null; + if (randomBoolean()) { + defaultRooms = new String[] { "_r1", "_r2" }; + sb.put(HipChatAccount.DEFAULT_ROOM_SETTING, "_r1,_r2"); + } + String[] defaultUsers = null; + if (randomBoolean()) { + defaultUsers = new String[] { "_u1", "_u2" }; + sb.put(HipChatAccount.DEFAULT_USER_SETTING, "_u1,_u2"); + } + HipChatMessage.Format defaultFormat = null; + if (randomBoolean()) { + defaultFormat = randomFrom(HipChatMessage.Format.values()); + sb.put(HipChatAccount.DEFAULT_FORMAT_SETTING, defaultFormat); + } + HipChatMessage.Color defaultColor = null; + if (randomBoolean()) { + defaultColor = randomFrom(HipChatMessage.Color.values()); + sb.put(HipChatAccount.DEFAULT_COLOR_SETTING, defaultColor); + } + Boolean defaultNotify = null; + if (randomBoolean()) { + defaultNotify = randomBoolean(); + sb.put(HipChatAccount.DEFAULT_NOTIFY_SETTING, defaultNotify); + } + Settings settings = sb.build(); + + UserAccount account = new UserAccount(accountName, settings, HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); + + assertThat(account.profile, is(HipChatAccount.Profile.USER)); + assertThat(account.name, equalTo(accountName)); + assertThat(account.server.host(), is(host)); + assertThat(account.server.port(), is(port)); + assertThat(account.authToken, is(authToken)); + if (defaultRooms != null) { + assertThat(account.defaults.rooms, arrayContaining(defaultRooms)); + } else { + assertThat(account.defaults.rooms, nullValue()); + } + if (defaultUsers != null) { + assertThat(account.defaults.users, arrayContaining(defaultUsers)); + } else { + assertThat(account.defaults.users, nullValue()); + } + assertThat(account.defaults.format, is(defaultFormat)); + assertThat(account.defaults.color, is(defaultColor)); + assertThat(account.defaults.notify, is(defaultNotify)); + } + + public void testSettingsNoAuthToken() throws Exception { + Settings.Builder sb = Settings.builder(); + try { + new UserAccount("_name", sb.build(), HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); + fail("Expected SettingsException"); + } catch (SettingsException e) { + assertThat(e.getMessage(), is("hipchat account [_name] missing required [auth_token] setting")); + } + } + + public void testSend() throws Exception { + HttpClient httpClient = mock(HttpClient.class); + UserAccount account = new UserAccount("_name", Settings.builder() + .put("host", "_host") + .put("port", "443") + .put("auth_token", "_token") + .build(), HipChatServer.DEFAULT, httpClient, mock(Logger.class)); + + HipChatMessage.Format format = randomFrom(HipChatMessage.Format.values()); + HipChatMessage.Color color = randomFrom(HipChatMessage.Color.values()); + Boolean notify = randomBoolean(); + final HipChatMessage message = new HipChatMessage("_body", new String[] { "_r1", "_r2" }, new String[] { "_u1", "_u2" }, null, + format, color, notify); + + HttpRequest reqR1 = HttpRequest.builder("_host", 443) + .method(HttpMethod.POST) + .scheme(Scheme.HTTPS) + .path("/v2/room/_r1/notification") + .setHeader("Content-Type", "application/json") + .setHeader("Authorization", "Bearer _token") + .body(Strings.toString((builder, params) -> { + builder.field("message", message.body); + if (message.format != null) { + builder.field("message_format", message.format.value()); + } + if (message.notify != null) { + builder.field("notify", message.notify); + } + if (message.color != null) { + builder.field("color", String.valueOf(message.color.value())); + } + return builder; + })) + .build(); + + logger.info("expected (r1): {}", BytesReference.bytes(jsonBuilder().value(reqR1)).utf8ToString()); + + HttpResponse resR1 = mock(HttpResponse.class); + when(resR1.status()).thenReturn(200); + when(httpClient.execute(reqR1)).thenReturn(resR1); + + HttpRequest reqR2 = HttpRequest.builder("_host", 443) + .method(HttpMethod.POST) + .scheme(Scheme.HTTPS) + .path("/v2/room/_r2/notification") + .setHeader("Content-Type", "application/json") + .setHeader("Authorization", "Bearer _token") + .body(Strings.toString((builder, params) -> { + builder.field("message", message.body); + if (message.format != null) { + builder.field("message_format", message.format.value()); + } + if (message.notify != null) { + builder.field("notify", message.notify); + } + if (message.color != null) { + builder.field("color", String.valueOf(message.color.value())); + } + return builder; + })) + .build(); + + logger.info("expected (r2): {}", BytesReference.bytes(jsonBuilder().value(reqR1)).utf8ToString()); + + HttpResponse resR2 = mock(HttpResponse.class); + when(resR2.status()).thenReturn(200); + when(httpClient.execute(reqR2)).thenReturn(resR2); + + HttpRequest reqU1 = HttpRequest.builder("_host", 443) + .method(HttpMethod.POST) + .scheme(Scheme.HTTPS) + .path("/v2/user/_u1/message") + .setHeader("Content-Type", "application/json") + .setHeader("Authorization", "Bearer _token") + .body(Strings.toString((builder, params) -> { + builder.field("message", message.body); + if (message.format != null) { + builder.field("message_format", message.format.value()); + } + if (message.notify != null) { + builder.field("notify", message.notify); + } + return builder; + })) + .build(); + + logger.info("expected (u1): {}", BytesReference.bytes(jsonBuilder().value(reqU1)).utf8ToString()); + + HttpResponse resU1 = mock(HttpResponse.class); + when(resU1.status()).thenReturn(200); + when(httpClient.execute(reqU1)).thenReturn(resU1); + + HttpRequest reqU2 = HttpRequest.builder("_host", 443) + .method(HttpMethod.POST) + .scheme(Scheme.HTTPS) + .path("/v2/user/_u2/message") + .setHeader("Content-Type", "application/json") + .setHeader("Authorization", "Bearer _token") + .body(Strings.toString((builder, params) -> { + builder.field("message", message.body); + if (message.format != null) { + builder.field("message_format", message.format.value()); + } + if (message.notify != null) { + builder.field("notify", message.notify); + } + return builder; + })) + .build(); + + logger.info("expected (u2): {}", BytesReference.bytes(jsonBuilder().value(reqU2)).utf8ToString()); + + HttpResponse resU2 = mock(HttpResponse.class); + when(resU2.status()).thenReturn(200); + when(httpClient.execute(reqU2)).thenReturn(resU2); + + account.send(message, null); + + verify(httpClient).execute(reqR1); + verify(httpClient).execute(reqR2); + verify(httpClient).execute(reqU2); + verify(httpClient).execute(reqU2); + } + + public void testColorIsOptional() throws Exception { + Settings settings = Settings.builder() + .put("user", "testuser") + .put("auth_token", "awesome-auth-token") + .build(); + UserAccount userAccount = createUserAccount(settings); + + TextTemplate body = new TextTemplate("body"); + TextTemplate[] rooms = new TextTemplate[] { new TextTemplate("room")}; + HipChatMessage.Template template = + new HipChatMessage.Template(body, rooms, null, "sender", HipChatMessage.Format.TEXT, null, true); + + HipChatMessage message = userAccount.render("watchId", "actionId", new MockTextTemplateEngine(), template, new HashMap<>()); + assertThat(message.color, is(nullValue())); + } + + public void testFormatIsOptional() throws Exception { + Settings settings = Settings.builder() + .put("user", "testuser") + .put("auth_token", "awesome-auth-token") + .build(); + UserAccount userAccount = createUserAccount(settings); + + TextTemplate body = new TextTemplate("body"); + TextTemplate[] rooms = new TextTemplate[] { new TextTemplate("room") }; + HipChatMessage.Template template = new HipChatMessage.Template(body, rooms, null, "sender", null, + new TextTemplate("yellow"), true); + + HipChatMessage message = userAccount.render("watchId", "actionId", new MockTextTemplateEngine(), template, new HashMap<>()); + assertThat(message.format, is(nullValue())); + } + + public void testRoomNameIsUrlEncoded() throws Exception { + Settings settings = Settings.builder() + .put("user", "testuser") + .put("auth_token", "awesome-auth-token") + .build(); + HipChatServer hipChatServer = mock(HipChatServer.class); + HttpClient httpClient = mock(HttpClient.class); + UserAccount account = new UserAccount("notify-monitoring", settings, hipChatServer, httpClient, logger); + + TextTemplate[] rooms = new TextTemplate[] { new TextTemplate("Room with Spaces")}; + HipChatMessage.Template template = + new HipChatMessage.Template(new TextTemplate("body"), rooms, null, "sender", HipChatMessage.Format.TEXT, null, true); + + HipChatMessage message = account.render("watchId", "actionId", new MockTextTemplateEngine(), template, new HashMap<>()); + account.send(message, HttpProxy.NO_PROXY); + + ArgumentCaptor captor = ArgumentCaptor.forClass(HttpRequest.class); + verify(httpClient).execute(captor.capture()); + assertThat(captor.getAllValues(), hasSize(1)); + assertThat(captor.getValue().path(), not(containsString("Room with Spaces"))); + assertThat(captor.getValue().path(), containsString("Room%20with%20Spaces")); + } + + private UserAccount createUserAccount(Settings settings) { + HipChatServer hipChatServer = mock(HipChatServer.class); + HttpClient httpClient = mock(HttpClient.class); + return new UserAccount("notify-monitoring", settings, hipChatServer, httpClient, logger); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/V1AccountTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/V1AccountTests.java new file mode 100644 index 0000000000000..c7391afb29bb6 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/hipchat/V1AccountTests.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.hipchat; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.Scheme; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class V1AccountTests extends ESTestCase { + public void testSettings() throws Exception { + String accountName = "_name"; + + Settings.Builder sb = Settings.builder(); + + String authToken = randomAlphaOfLength(50); + sb.put(V1Account.AUTH_TOKEN_SETTING, authToken); + + String host = HipChatServer.DEFAULT.host(); + if (randomBoolean()) { + host = randomAlphaOfLength(10); + sb.put("host", host); + } + int port = HipChatServer.DEFAULT.port(); + if (randomBoolean()) { + port = randomIntBetween(300, 400); + sb.put("port", port); + } + + String[] defaultRooms = null; + if (randomBoolean()) { + defaultRooms = new String[] { "_r1", "_r2" }; + sb.put(HipChatAccount.DEFAULT_ROOM_SETTING, "_r1,_r2"); + } + String defaultFrom = null; + if (randomBoolean()) { + defaultFrom = randomAlphaOfLength(10); + sb.put(HipChatAccount.DEFAULT_FROM_SETTING, defaultFrom); + } + HipChatMessage.Format defaultFormat = null; + if (randomBoolean()) { + defaultFormat = randomFrom(HipChatMessage.Format.values()); + sb.put(HipChatAccount.DEFAULT_FORMAT_SETTING, defaultFormat); + } + HipChatMessage.Color defaultColor = null; + if (randomBoolean()) { + defaultColor = randomFrom(HipChatMessage.Color.values()); + sb.put(HipChatAccount.DEFAULT_COLOR_SETTING, defaultColor); + } + Boolean defaultNotify = null; + if (randomBoolean()) { + defaultNotify = randomBoolean(); + sb.put(HipChatAccount.DEFAULT_NOTIFY_SETTING, defaultNotify); + } + Settings settings = sb.build(); + + V1Account account = new V1Account(accountName, settings, HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); + + assertThat(account.profile, is(HipChatAccount.Profile.V1)); + assertThat(account.name, equalTo(accountName)); + assertThat(account.server.host(), is(host)); + assertThat(account.server.port(), is(port)); + assertThat(account.authToken, is(authToken)); + if (defaultRooms != null) { + assertThat(account.defaults.rooms, arrayContaining(defaultRooms)); + } else { + assertThat(account.defaults.rooms, nullValue()); + } + assertThat(account.defaults.from, is(defaultFrom)); + assertThat(account.defaults.format, is(defaultFormat)); + assertThat(account.defaults.color, is(defaultColor)); + assertThat(account.defaults.notify, is(defaultNotify)); + } + + public void testSettingsNoAuthToken() throws Exception { + Settings.Builder sb = Settings.builder(); + try { + new V1Account("_name", sb.build(), HipChatServer.DEFAULT, mock(HttpClient.class), mock(Logger.class)); + fail("Expected SettingsException"); + } catch (SettingsException e) { + assertThat(e.getMessage(), is("hipchat account [_name] missing required [auth_token] setting")); + } + } + + public void testSend() throws Exception { + HttpClient httpClient = mock(HttpClient.class); + V1Account account = new V1Account("_name", Settings.builder() + .put("host", "_host") + .put("port", "443") + .put("auth_token", "_token") + .build(), HipChatServer.DEFAULT, httpClient, mock(Logger.class)); + + HipChatMessage.Format format = randomFrom(HipChatMessage.Format.values()); + HipChatMessage.Color color = randomFrom(HipChatMessage.Color.values()); + Boolean notify = randomBoolean(); + HipChatMessage message = new HipChatMessage("_body", new String[] { "Room with Spaces", "_r2" }, null, "_from", format, + color, notify); + + HttpRequest req1 = HttpRequest.builder("_host", 443) + .method(HttpMethod.POST) + .scheme(Scheme.HTTPS) + .path("/v1/rooms/message") + .setHeader("Content-Type", "application/x-www-form-urlencoded") + .setParam("format", "json") + .setParam("auth_token", "_token") + .body(new StringBuilder() + .append("room_id=").append("Room+with+Spaces&") + .append("from=").append("_from&") + .append("message=").append("_body&") + .append("message_format=").append(format.value()).append("&") + .append("color=").append(color.value()).append("&") + .append("notify=").append(notify ? "1" : "0") + .toString()) + .build(); + + logger.info("expected (r1): {}", BytesReference.bytes(jsonBuilder().value(req1)).utf8ToString()); + + HttpResponse res1 = mock(HttpResponse.class); + when(res1.status()).thenReturn(200); + when(httpClient.execute(req1)).thenReturn(res1); + + HttpRequest req2 = HttpRequest.builder("_host", 443) + .method(HttpMethod.POST) + .scheme(Scheme.HTTPS) + .path("/v1/rooms/message") + .setHeader("Content-Type", "application/x-www-form-urlencoded") + .setParam("format", "json") + .setParam("auth_token", "_token") + .body(new StringBuilder() + .append("room_id=").append("_r2&") + .append("from=").append("_from&") + .append("message=").append("_body&") + .append("message_format=").append(format.value()).append("&") + .append("color=").append(color.value()).append("&") + .append("notify=").append(notify ? "1" : "0") + .toString()) + .build(); + + logger.info("expected (r2): {}", BytesReference.bytes(jsonBuilder().value(req2)).utf8ToString()); + + HttpResponse res2 = mock(HttpResponse.class); + when(res2.status()).thenReturn(200); + when(httpClient.execute(req2)).thenReturn(res2); + + account.send(message, null); + + verify(httpClient).execute(req1); + verify(httpClient).execute(req2); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/jira/JiraAccountTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/jira/JiraAccountTests.java new file mode 100644 index 0000000000000..01ee6d399875e --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/jira/JiraAccountTests.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.jira; + +import org.apache.http.HttpStatus; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.Scheme; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.collect.Tuple.tuple; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class JiraAccountTests extends ESTestCase { + + private HttpClient httpClient; + private ClusterSettings clusterSettings; + + @Before + public void init() throws Exception { + httpClient = mock(HttpClient.class); + clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(JiraService.getSettings())); + } + + public void testJiraAccountSettings() { + final String url = "https://internal-jira.elastic.co:443"; + + SettingsException e = expectThrows(SettingsException.class, () -> new JiraAccount(null, Settings.EMPTY, null)); + assertThat(e.getMessage(), containsString("invalid jira [null] account settings. missing required [url] setting")); + + Settings settings1 = Settings.builder().put("url", url).build(); + e = expectThrows(SettingsException.class, () -> new JiraAccount("test", settings1, null)); + assertThat(e.getMessage(), containsString("invalid jira [test] account settings. missing required [user] setting")); + + Settings settings2 = Settings.builder().put("url", url).put("user", "").build(); + e = expectThrows(SettingsException.class, () -> new JiraAccount("test", settings2, null)); + assertThat(e.getMessage(), containsString("invalid jira [test] account settings. missing required [user] setting")); + + Settings settings3 = Settings.builder().put("url", url).put("user", "foo").build(); + e = expectThrows(SettingsException.class, () -> new JiraAccount("test", settings3, null)); + assertThat(e.getMessage(), containsString("invalid jira [test] account settings. missing required [password] setting")); + + Settings settings4 = Settings.builder().put("url", url).put("user", "foo").put("password", "").build(); + e = expectThrows(SettingsException.class, () -> new JiraAccount("test", settings4, null)); + assertThat(e.getMessage(), containsString("invalid jira [test] account settings. missing required [password] setting")); + } + + public void testUnsecureAccountUrl() throws Exception { + Settings settings = Settings.builder().put("url", "http://localhost").put("user", "foo").put("password", "bar").build(); + SettingsException e = expectThrows(SettingsException.class, () -> new JiraAccount("test", settings, null)); + assertThat(e.getMessage(), containsString("invalid jira [test] account settings. unsecure scheme [HTTP]")); + + Settings disallowHttp = Settings.builder().put(settings).put("allow_http", false).build(); + e = expectThrows(SettingsException.class, () -> new JiraAccount("test", disallowHttp, null)); + assertThat(e.getMessage(), containsString("invalid jira [test] account settings. unsecure scheme [HTTP]")); + + Settings allowHttp = Settings.builder().put(settings).put("allow_http", true).build(); + assertNotNull(new JiraAccount("test", allowHttp, null)); + } + + public void testCreateIssueWithError() throws Exception { + Settings.Builder builder = Settings.builder(); + addAccountSettings("account1", builder); + + JiraService service = new JiraService(builder.build(), httpClient, clusterSettings); + JiraAccount account = service.getAccount("account1"); + + Tuple error = randomHttpError(); + + when(httpClient.execute(any(HttpRequest.class))).thenReturn(new HttpResponse(error.v1())); + JiraIssue issue = account.createIssue(emptyMap(), null); + assertFalse(issue.successful()); + assertThat(issue.getFailureReason(), equalTo(error.v2())); + } + + public void testCreateIssue() throws Exception { + Settings.Builder builder = Settings.builder(); + addAccountSettings("account1", builder); + + JiraService service = new JiraService(builder.build(), httpClient, clusterSettings); + JiraAccount account = service.getAccount("account1"); + + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(HttpRequest.class); + when(httpClient.execute(argumentCaptor.capture())).thenReturn(new HttpResponse(HttpStatus.SC_CREATED)); + + Map fields = singletonMap("key", "value"); + + JiraIssue issue = account.createIssue(fields, null); + assertTrue(issue.successful()); + assertNull(issue.getFailureReason()); + + HttpRequest sentRequest = argumentCaptor.getValue(); + assertThat(sentRequest.host(), equalTo("internal-jira.elastic.co")); + assertThat(sentRequest.port(), equalTo(443)); + assertThat(sentRequest.scheme(), equalTo(Scheme.HTTPS)); + assertThat(sentRequest.path(), equalTo(JiraAccount.DEFAULT_PATH)); + assertThat(sentRequest.auth(), notNullValue()); + assertThat(sentRequest.body(), notNullValue()); + } + + public void testCustomUrls() throws Exception { + assertCustomUrl(Settings.builder().put("url", "https://localhost/foo").build(), "/foo"); + assertCustomUrl(Settings.builder().put("url", "https://localhost/foo/").build(), "/foo/"); + // this ensures we retain backwards compatibility + assertCustomUrl(Settings.builder().put("url", "https://localhost/").build(), JiraAccount.DEFAULT_PATH); + assertCustomUrl(Settings.builder().put("url", "https://localhost").build(), JiraAccount.DEFAULT_PATH); + } + + private void assertCustomUrl(Settings urlSettings, String expectedPath) throws IOException { + Settings settings = Settings.builder().put(urlSettings).put("user", "foo").put("password", "bar").build(); + HttpClient client = mock(HttpClient.class); + + HttpResponse response = new HttpResponse(200); + when(client.execute(any())).thenReturn(response); + + JiraAccount jiraAccount = new JiraAccount("test", settings, client); + jiraAccount.createIssue(Collections.emptyMap(), HttpProxy.NO_PROXY); + + ArgumentCaptor captor = ArgumentCaptor.forClass(HttpRequest.class); + verify(client, times(1)).execute(captor.capture()); + assertThat(captor.getAllValues(), hasSize(1)); + HttpRequest request = captor.getValue(); + assertThat(request.path(), is(expectedPath)); + } + + private void addAccountSettings(String name, Settings.Builder builder) { + builder.put("xpack.notification.jira.account." + name + "." + JiraAccount.URL_SETTING, "https://internal-jira.elastic.co:443"); + builder.put("xpack.notification.jira.account." + name + "." + JiraAccount.USER_SETTING, randomAlphaOfLength(10)); + builder.put("xpack.notification.jira.account." + name + "." + JiraAccount.PASSWORD_SETTING, randomAlphaOfLength(10)); + + Map defaults = randomIssueDefaults(); + for (Map.Entry setting : defaults.entrySet()) { + String key = "xpack.notification.jira.account." + name + "." + JiraAccount.ISSUE_DEFAULTS_SETTING + "." + setting.getKey(); + if (setting.getValue() instanceof String) { + builder.put(key, setting.getValue().toString()); + } else if (setting.getValue() instanceof Map) { + builder.putProperties((Map) setting.getValue(), s -> key + "." + s); + } + } + } + + public static Map randomIssueDefaults() { + MapBuilder builder = MapBuilder.newMapBuilder(); + if (randomBoolean()) { + Map project = new HashMap<>(); + project.put("project", singletonMap("id", randomAlphaOfLength(10))); + builder.putAll(project); + } + if (randomBoolean()) { + Map project = new HashMap<>(); + project.put("issuetype", singletonMap("name", randomAlphaOfLength(5))); + builder.putAll(project); + } + if (randomBoolean()) { + builder.put("summary", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + builder.put("description", randomAlphaOfLength(50)); + } + if (randomBoolean()) { + int count = randomIntBetween(0, 5); + for (int i = 0; i < count; i++) { + builder.put("customfield_" + i, randomAlphaOfLengthBetween(5, 10)); + } + } + return builder.immutableMap(); + } + + static Tuple randomHttpError() { + Tuple error = randomFrom( + tuple(400, "Bad Request"), + tuple(401, "Unauthorized (authentication credentials are invalid)"), + tuple(403, "Forbidden (account doesn't have permission to create this issue)"), + tuple(404, "Not Found (account uses invalid JIRA REST APIs)"), + tuple(408, "Request Timeout (request took too long to process)"), + tuple(500, "JIRA Server Error (internal error occurred while processing request)"), + tuple(666, "Unknown Error") + ); + return error; + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/jira/JiraIssueTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/jira/JiraIssueTests.java new file mode 100644 index 0000000000000..8e615d76050a7 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/jira/JiraIssueTests.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.jira; + +import org.apache.http.HttpStatus; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuthFactory; + +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.cborBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; +import static org.elasticsearch.xpack.watcher.notification.jira.JiraAccountTests.randomHttpError; +import static org.elasticsearch.xpack.watcher.notification.jira.JiraAccountTests.randomIssueDefaults; +import static org.elasticsearch.xpack.watcher.notification.jira.JiraIssue.resolveFailureReason; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; + +public class JiraIssueTests extends ESTestCase { + + public void testToXContent() throws Exception { + final JiraIssue issue = randomJiraIssue(); + + try (XContentBuilder builder = randomFrom(jsonBuilder(), smileBuilder(), yamlBuilder(), cborBuilder())) { + issue.toXContent(builder, WatcherParams.builder().hideSecrets(false).build()); + + Map parsedFields = null; + Map parsedResult = null; + + HttpRequest parsedRequest = null; + HttpResponse parsedResponse = null; + String parsedAccount = null; + String parsedReason = null; + + try (XContentParser parser = createParser(builder)) { + assertNull(parser.currentToken()); + parser.nextToken(); + + XContentParser.Token token = parser.currentToken(); + assertThat(token, is(XContentParser.Token.START_OBJECT)); + + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if ("account".equals(currentFieldName)) { + parsedAccount = parser.text(); + } else if ("result".equals(currentFieldName)) { + parsedResult = parser.map(); + } else if ("request".equals(currentFieldName)) { + HttpAuthRegistry registry = new HttpAuthRegistry(singletonMap(BasicAuth.TYPE, new BasicAuthFactory(null))); + HttpRequest.Parser httpRequestParser = new HttpRequest.Parser(registry); + parsedRequest = httpRequestParser.parse(parser); + } else if ("response".equals(currentFieldName)) { + parsedResponse = HttpResponse.parse(parser); + } else if ("fields".equals(currentFieldName)) { + parsedFields = parser.map(); + } else if ("reason".equals(currentFieldName)) { + parsedReason = parser.text(); + } else { + fail("unknown field [" + currentFieldName + "]"); + } + } + } + + assertThat(parsedAccount, equalTo(issue.getAccount())); + assertThat(parsedFields, equalTo(issue.getFields())); + if (issue.successful()) { + assertThat(parsedResult, hasEntry("key", "TEST")); + assertNull(parsedRequest); + assertNull(parsedResponse); + } else { + assertThat(parsedRequest, equalTo(issue.getRequest())); + assertThat(parsedResponse, equalTo(issue.getResponse())); + assertThat(parsedReason, equalTo(resolveFailureReason(issue.getResponse()))); + } + } + } + + public void testEquals() { + final JiraIssue issue1 = randomJiraIssue(); + final boolean equals = randomBoolean(); + + final Map fields = new HashMap<>(issue1.getFields()); + if (equals == false) { + if (fields.isEmpty()) { + fields.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + } else { + fields.remove(randomFrom(fields.keySet())); + } + } + + JiraIssue issue2 = new JiraIssue(issue1.getAccount(), fields, issue1.getRequest(), issue1.getResponse(), issue1.getFailureReason()); + assertThat(issue1.equals(issue2), is(equals)); + } + + private static JiraIssue randomJiraIssue() { + String account = "account_" + randomIntBetween(0, 100); + Map fields = randomIssueDefaults(); + HttpRequest request = HttpRequest.builder(randomFrom("localhost", "internal-jira.elastic.co"), randomFrom(80, 443)) + .method(HttpMethod.POST) + .path(JiraAccount.DEFAULT_PATH) + .auth(new BasicAuth(randomAlphaOfLength(5), randomAlphaOfLength(5).toCharArray())) + .build(); + if (rarely()) { + Tuple error = randomHttpError(); + return JiraIssue.responded(account, fields, request, new HttpResponse(error.v1(), "{\"error\": \"" + error.v2() + "\"}")); + } + return JiraIssue.responded(account, fields, request, new HttpResponse(HttpStatus.SC_CREATED, "{\"key\": \"TEST\"}")); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventDefaultsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventDefaultsTests.java new file mode 100644 index 0000000000000..2d4fdc070443a --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEventDefaultsTests.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.pagerduty; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.is; + +public class IncidentEventDefaultsTests extends ESTestCase { + + public void testConstructor() throws Exception { + Settings settings = randomSettings(); + IncidentEventDefaults defaults = new IncidentEventDefaults(settings); + assertThat(defaults.incidentKey, is(settings.get("incident_key", null))); + assertThat(defaults.description, is(settings.get("description", null))); + assertThat(defaults.clientUrl, is(settings.get("client_url", null))); + assertThat(defaults.client, is(settings.get("client", null))); + assertThat(defaults.eventType, is(settings.get("event_type", null))); + assertThat(defaults.attachPayload, is(settings.getAsBoolean("attach_payload", false))); + if (settings.getAsSettings("link").names().isEmpty()) { + IncidentEventDefaults.Context.LinkDefaults linkDefaults = new IncidentEventDefaults.Context.LinkDefaults(Settings.EMPTY); + assertThat(defaults.link, is(linkDefaults)); + } else { + assertThat(defaults.link, notNullValue()); + assertThat(defaults.link.href, is(settings.get("link.href", null))); + assertThat(defaults.link.text, is(settings.get("link.text", null))); + } + if (settings.getAsSettings("image").names().isEmpty()) { + IncidentEventDefaults.Context.ImageDefaults imageDefaults = new IncidentEventDefaults.Context.ImageDefaults(Settings.EMPTY); + assertThat(defaults.image, is(imageDefaults)); + } else { + assertThat(defaults.image, notNullValue()); + assertThat(defaults.image.href, is(settings.get("image.href", null))); + assertThat(defaults.image.alt, is(settings.get("image.alt", null))); + assertThat(defaults.image.src, is(settings.get("image.src", null))); + } + } + + public static Settings randomSettings() { + Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + settings.put("from", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + String[] to = new String[randomIntBetween(1, 3)]; + for (int i = 0; i < to.length; i++) { + to[i] = randomAlphaOfLength(10); + } + settings.putList("to", to); + } + if (randomBoolean()) { + settings.put("text", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("event_type", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("icon", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.fallback", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.color", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.pretext", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.author_name", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.author_link", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.author_icon", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.title", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.title_link", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.text", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.image_url", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.thumb_url", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.field.title", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.field.value", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.field.short", randomBoolean()); + } + return settings.build(); + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyAccountsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyAccountsTests.java new file mode 100644 index 0000000000000..d70badc4bec22 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyAccountsTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.pagerduty; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpProxy; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.notification.slack.message.SlackMessageDefaultsTests; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.util.HashSet; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class PagerDutyAccountsTests extends ESTestCase { + + private HttpClient httpClient; + + @Before + public void init() throws Exception { + httpClient = mock(HttpClient.class); + } + + public void testProxy() throws Exception { + Settings.Builder builder = Settings.builder().put("xpack.notification.pagerduty.default_account", "account1"); + addAccountSettings("account1", builder); + PagerDutyService service = new PagerDutyService(builder.build(), httpClient, new ClusterSettings(Settings.EMPTY, + new HashSet<>(PagerDutyService.getSettings()))); + PagerDutyAccount account = service.getAccount("account1"); + + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(HttpRequest.class); + when(httpClient.execute(argumentCaptor.capture())).thenReturn(new HttpResponse(200)); + + HttpProxy proxy = new HttpProxy("localhost", 8080); + IncidentEvent event = new IncidentEvent("foo", null, null, null, null, account.getName(), true, null, proxy); + account.send(event, Payload.EMPTY); + + HttpRequest request = argumentCaptor.getValue(); + assertThat(request.proxy(), is(proxy)); + } + + // in earlier versions of the PD action the wrong JSON was sent, because the contexts field was named context + // the pagerduty API accepts any JSON, thus this was never caught + public void testContextIsSentCorrect() throws Exception { + Settings.Builder builder = Settings.builder().put("xpack.notification.pagerduty.default_account", "account1"); + addAccountSettings("account1", builder); + PagerDutyService service = new PagerDutyService(builder.build(), httpClient, new ClusterSettings(Settings.EMPTY, + new HashSet<>(PagerDutyService.getSettings()))); + PagerDutyAccount account = service.getAccount("account1"); + + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(HttpRequest.class); + when(httpClient.execute(argumentCaptor.capture())).thenReturn(new HttpResponse(200)); + + IncidentEventContext[] contexts = { + IncidentEventContext.link("https://www.elastic.co/products/x-pack/alerting", "Go to the Elastic.co Alerting website"), + IncidentEventContext.image("https://www.elastic.co/assets/blte5d899fd0b0e6808/icon-alerting-bb.svg", + "https://www.elastic.co/products/x-pack/alerting", "X-Pack-Alerting website link with log") + }; + IncidentEvent event = new IncidentEvent("foo", null, null, null, null, account.getName(), true, contexts, HttpProxy.NO_PROXY); + account.send(event, Payload.EMPTY); + + HttpRequest request = argumentCaptor.getValue(); + ObjectPath source = ObjectPath.createFromXContent(JsonXContent.jsonXContent, new BytesArray(request.body())); + assertThat(source.evaluate("contexts"), notNullValue()); + } + + private void addAccountSettings(String name, Settings.Builder builder) { + builder.put("xpack.notification.pagerduty.account." + name + ".service_api_key", randomAlphaOfLength(50)); + Settings defaults = SlackMessageDefaultsTests.randomSettings(); + for (String setting : defaults.keySet()) { + builder.copy("xpack.notification.pagerduty.message_defaults." + setting, setting, defaults); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/pagerduty/SentEventTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/pagerduty/SentEventTests.java new file mode 100644 index 0000000000000..bd1072ca7ac98 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/pagerduty/SentEventTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.pagerduty; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; + +public class SentEventTests extends ESTestCase { + + public void testToXContentBodyFiltering() throws Exception { + HttpResponse response = new HttpResponse(500); + String body = randomAlphaOfLength(20); + HttpRequest request = HttpRequest.builder("localhost", 1234).body(body).build(); + IncidentEvent incidentEvent = new IncidentEvent("description", "eventtype", null, null, null, null, false, null, null); + SentEvent sentEvent = SentEvent.responded(incidentEvent, request, response); + + try (XContentBuilder builder = jsonBuilder()) { + WatcherParams params = WatcherParams.builder().hideSecrets(false).build(); + sentEvent.toXContent(builder, params); + assertThat(Strings.toString(builder), containsString(body)); + + try (XContentParser parser = builder.contentType().xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + Strings.toString(builder))) { + parser.map(); + } + } + try (XContentBuilder builder = jsonBuilder()) { + sentEvent.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertThat(Strings.toString(builder), not(containsString(body))); + + try (XContentParser parser = builder.contentType().xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + Strings.toString(builder))) { + parser.map(); + } + } + + } + +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/slack/message/SlackMessageDefaultsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/slack/message/SlackMessageDefaultsTests.java new file mode 100644 index 0000000000000..73dec8acd3daa --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/slack/message/SlackMessageDefaultsTests.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.slack.message; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.hamcrest.Matchers.is; + +public class SlackMessageDefaultsTests extends ESTestCase { + + public void testConstructor() throws Exception { + Settings settings = randomSettings(); + SlackMessageDefaults defaults = new SlackMessageDefaults(settings); + assertThat(defaults.from, is(settings.get("from", null))); + List to = settings.getAsList("to", null); + assertThat(defaults.to, is(to == null ? null : to.toArray(Strings.EMPTY_ARRAY))); + assertThat(defaults.text, is(settings.get("text", null))); + assertThat(defaults.icon, is(settings.get("icon", null))); + assertThat(defaults.attachment.fallback, is(settings.get("attachment.fallback", null))); + assertThat(defaults.attachment.color, is(settings.get("attachment.color", null))); + assertThat(defaults.attachment.pretext, is(settings.get("attachment.pretext", null))); + assertThat(defaults.attachment.authorName, is(settings.get("attachment.author_name", null))); + assertThat(defaults.attachment.authorLink, is(settings.get("attachment.author_link", null))); + assertThat(defaults.attachment.authorIcon, is(settings.get("attachment.author_icon", null))); + assertThat(defaults.attachment.title, is(settings.get("attachment.title", null))); + assertThat(defaults.attachment.titleLink, is(settings.get("attachment.title_link", null))); + assertThat(defaults.attachment.text, is(settings.get("attachment.text", null))); + assertThat(defaults.attachment.imageUrl, is(settings.get("attachment.image_url", null))); + assertThat(defaults.attachment.thumbUrl, is(settings.get("attachment.thumb_url", null))); + assertThat(defaults.attachment.field.title, is(settings.get("attachment.field.title", null))); + assertThat(defaults.attachment.field.value, is(settings.get("attachment.field.value", null))); + assertThat(defaults.attachment.field.isShort, is(settings.getAsBoolean("attachment.field.short", null))); + assertThat(defaults.attachment.markdownSupportedFields, is(settings.getAsList("attachment.mrkdwn_in", null))); + } + + public static Settings randomSettings() { + Settings.Builder settings = Settings.builder(); + if (randomBoolean()) { + settings.put("from", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + String[] to = new String[randomIntBetween(1, 3)]; + for (int i = 0; i < to.length; i++) { + to[i] = randomAlphaOfLength(10); + } + settings.putList("to", to); + } + if (randomBoolean()) { + settings.put("text", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("icon", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.fallback", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.color", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.pretext", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.author_name", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.author_link", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.author_icon", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.title", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.title_link", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.text", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.image_url", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.thumb_url", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.field.title", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.field.value", randomAlphaOfLength(10)); + } + if (randomBoolean()) { + settings.put("attachment.field.short", randomBoolean()); + } + if (randomBoolean()) { + if (randomBoolean()) { + settings.putList("attachment.mrkdwn_in", "foo", "bar"); + } else { + settings.put("attachment.mrkdwn_in", "foo,bar"); + } + } + return settings.build(); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/slack/message/SlackMessageTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/slack/message/SlackMessageTests.java new file mode 100644 index 0000000000000..4075bd3dadde4 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/slack/message/SlackMessageTests.java @@ -0,0 +1,690 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.notification.slack.message; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.watcher.common.http.HttpRequest; +import org.elasticsearch.xpack.watcher.common.http.HttpResponse; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.slack.SentMessages; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +public class SlackMessageTests extends ESTestCase { + + public void testToXContent() throws Exception { + String from = randomBoolean() ? null : randomAlphaOfLength(10); + String[] to = rarely() ? null : new String[randomIntBetween(0, 2)]; + if (to != null) { + for (int i = 0; i < to.length; i++) { + to[i] = randomAlphaOfLength(10); + } + } + String icon = randomBoolean() ? null : randomAlphaOfLength(10); + String text = randomBoolean() ? null : randomAlphaOfLength(50); + Attachment[] attachments = randomBoolean() ? null : new Attachment[randomIntBetween(0, 2)]; + if (attachments != null) { + for (int i = 0; i < attachments.length; i++) { + String fallback = randomBoolean() ? null : randomAlphaOfLength(10); + String color = randomBoolean() ? null : randomAlphaOfLength(10); + String pretext = randomBoolean() ? null : randomAlphaOfLength(10); + String authorName = randomBoolean() ? null : randomAlphaOfLength(10); + String authorLink = authorName == null || randomBoolean() ? null : randomAlphaOfLength(10); + String authorIcon = authorName == null || randomBoolean() ? null : randomAlphaOfLength(10); + String title = randomBoolean() ? null : randomAlphaOfLength(10); + String titleLink = title == null ||randomBoolean() ? null : randomAlphaOfLength(10); + String attachmentText = randomBoolean() ? null : randomAlphaOfLength(10); + Field[] fields = randomBoolean() ? null : new Field[randomIntBetween(0, 2)]; + if (fields != null) { + for (int j = 0; j < fields.length; j++) { + fields[j] = new Field(randomAlphaOfLength(10), randomAlphaOfLength(10), randomBoolean()); + } + } + String imageUrl = randomBoolean() ? null : randomAlphaOfLength(10); + String thumbUrl = randomBoolean() ? null : randomAlphaOfLength(10); + String[] markdownFields = randomBoolean() ? null : new String[]{"pretext"}; + List actions = new ArrayList<>(); + if (randomBoolean()) { + actions.add(new Action("primary", "action_name", "button", "action_text", "https://elastic.co")); + } + attachments[i] = new Attachment(fallback, color, pretext, authorName, authorLink, authorIcon, title, titleLink, + attachmentText, fields, imageUrl, thumbUrl, markdownFields, actions); + } + } + + SlackMessage expected = new SlackMessage(from, to, icon, text, attachments); + + boolean includeTarget = randomBoolean(); + + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + writeFieldIfNotNull(builder, "from", from); + if (includeTarget) { + writeFieldIfNotNull(builder, "to", to); + } + writeFieldIfNotNull(builder, "icon", icon); + writeFieldIfNotNull(builder, "text", text); + if (attachments != null) { + builder.startArray("attachments"); + for (Attachment attachment : attachments) { + builder.startObject(); + writeFieldIfNotNull(builder, "fallback", attachment.fallback); + writeFieldIfNotNull(builder, "color", attachment.color); + writeFieldIfNotNull(builder, "pretext", attachment.pretext); + writeFieldIfNotNull(builder, "author_name", attachment.authorName); + writeFieldIfNotNull(builder, "author_link", attachment.authorLink); + writeFieldIfNotNull(builder, "author_icon", attachment.authorIcon); + writeFieldIfNotNull(builder, "title", attachment.title); + writeFieldIfNotNull(builder, "title_link", attachment.titleLink); + writeFieldIfNotNull(builder, "text", attachment.text); + if (attachment.fields != null) { + builder.startArray("fields"); + for (Field field : attachment.fields) { + builder.startObject(); + builder.field("title", field.title); + builder.field("value", field.value); + builder.field("short", field.isShort); + builder.endObject(); + } + builder.endArray(); + } + if (attachment.actions.isEmpty() == false) { + builder.startArray("actions"); + for (Action action : attachment.actions) { + action.toXContent(builder, ToXContent.EMPTY_PARAMS); + } + builder.endArray(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + + builder = jsonBuilder(); + if (includeTarget && randomBoolean()) { + expected.toXContent(builder, ToXContent.EMPTY_PARAMS); + } else { + expected.toXContent(builder, ToXContent.EMPTY_PARAMS, includeTarget); + } + + XContentParser parser = createParser(builder); + parser.nextToken(); + + from = null; + to = null; + icon = null; + text = null; + attachments = null; + + String currentFieldName = null; + XContentParser.Token token = parser.currentToken(); + assertThat(token, is(XContentParser.Token.START_OBJECT)); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if ("from".equals(currentFieldName)) { + from = parser.text(); + } else if ("to".equals(currentFieldName)) { + List list = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + list.add(parser.text()); + } + to = list.toArray(new String[list.size()]); + } else if ("icon".equals(currentFieldName)) { + icon = parser.text(); + } else if ("text".equals(currentFieldName)) { + text = parser.text(); + } else if ("attachments".equals(currentFieldName)) { + List list = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + assertThat(token, is(XContentParser.Token.START_OBJECT)); + String fallback = null; + String color = null; + String pretext = null; + String authorName = null; + String authorLink = null; + String authorIcon = null; + String title = null; + String titleLink = null; + String attachmentText = null; + Field[] fields = null; + String imageUrl = null; + String thumbUrl = null; + String[] markdownSupportedFields = null; + List actions = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if ("fallback".equals(currentFieldName)) { + fallback = parser.text(); + } else if ("color".equals(currentFieldName)) { + color = parser.text(); + } else if ("pretext".equals(currentFieldName)) { + pretext = parser.text(); + } else if ("author_name".equals(currentFieldName)) { + authorName = parser.text(); + } else if ("author_link".equals(currentFieldName)) { + authorLink = parser.text(); + } else if ("author_icon".equals(currentFieldName)) { + authorIcon = parser.text(); + } else if ("title".equals(currentFieldName)) { + title = parser.text(); + } else if ("title_link".equals(currentFieldName)) { + titleLink = parser.text(); + } else if ("text".equals(currentFieldName)) { + attachmentText = parser.text(); + } else if ("fields".equals(currentFieldName)) { + List fieldList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + assertThat(token, is(XContentParser.Token.START_OBJECT)); + String fieldTitle = null; + String fieldValue = null; + boolean isShort = false; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if ("title".equals(currentFieldName)) { + fieldTitle = parser.text(); + } else if ("value".equals(currentFieldName)) { + fieldValue = parser.text(); + } else if ("short".equals(currentFieldName)) { + isShort = parser.booleanValue(); + } + } + fieldList.add(new Field(fieldTitle, fieldValue, isShort)); + } + fields = fieldList.toArray(new Field[fieldList.size()]); + } else if ("actions".equals(currentFieldName)) { + MockTextTemplateEngine engine = new MockTextTemplateEngine(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + Action.Template action = new Action.Template(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + switch (currentFieldName) { + case "url": + action.setUrl(new TextTemplate(parser.text())); + break; + case "name": + action.setName(new TextTemplate(parser.text())); + break; + case "style": + action.setStyle(new TextTemplate(parser.text())); + break; + case "text": + action.setText(new TextTemplate(parser.text())); + break; + case "type": + action.setType(new TextTemplate(parser.text())); + break; + } + } + + } + actions.add(action.render(engine, Collections.emptyMap())); + } + } else if ("image_url".equals(currentFieldName)) { + imageUrl = parser.text(); + } else if ("thumb_url".equals(currentFieldName)) { + thumbUrl = parser.text(); + } else if ("mrkdwn_in".equals(currentFieldName)) { + List data = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + data.add(parser.text()); + } + markdownSupportedFields = data.toArray(new String[]{}); + } + } + list.add(new Attachment(fallback, color, pretext, authorName, authorLink, authorIcon, title, titleLink, + attachmentText, fields, imageUrl, thumbUrl, markdownSupportedFields, actions)); + } + attachments = list.toArray(new Attachment[list.size()]); + } + } + + if (!includeTarget) { + assertThat(to, nullValue()); + to = expected.to; + } + + SlackMessage actual = new SlackMessage(from, to, icon, text, attachments); + + assertThat(actual, equalTo(expected)); + } + + public void testTemplateParse() throws Exception { + ToXContent.Params params = ToXContent.EMPTY_PARAMS; + XContentBuilder jsonBuilder = jsonBuilder(); + jsonBuilder.startObject(); + + TextTemplate from = null; + if (randomBoolean()) { + from = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("from", from, params); + } + TextTemplate[] to = null; + if (randomBoolean()) { + jsonBuilder.startArray("to"); + to = new TextTemplate[randomIntBetween(1, 3)]; + for (int i = 0; i < to.length; i++) { + to[i] = new TextTemplate(randomAlphaOfLength(10)); + to[i].toXContent(jsonBuilder, params); + } + jsonBuilder.endArray(); + } + TextTemplate text = null; + if (randomBoolean()) { + text = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("text", text, params); + } + TextTemplate icon = null; + if (randomBoolean()) { + icon = new TextTemplate(randomAlphaOfLength(10)); + jsonBuilder.field("icon", icon); + } + Attachment.Template[] attachments = null; + if (randomBoolean()) { + jsonBuilder.startArray("attachments"); + attachments = new Attachment.Template[randomIntBetween(1, 3)]; + for (int i = 0; i < attachments.length; i++) { + jsonBuilder.startObject(); + TextTemplate fallback = null; + if (randomBoolean()) { + fallback = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("fallback", fallback, params); + } + TextTemplate color = null; + if (randomBoolean()) { + color = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("color", color, params); + } + TextTemplate pretext = null; + if (randomBoolean()) { + pretext = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("pretext", pretext, params); + } + TextTemplate authorName = null; + TextTemplate authorLink = null; + TextTemplate authorIcon = null; + if (randomBoolean()) { + authorName = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("author_name", authorName, params); + if (randomBoolean()) { + authorLink = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("author_link", authorLink, params); + } + if (randomBoolean()) { + authorIcon = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("author_icon", authorIcon, params); + } + } + TextTemplate title = null; + TextTemplate titleLink = null; + if (randomBoolean()) { + title = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("title", title, params); + if (randomBoolean()) { + titleLink = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("title_link", titleLink, params); + } + } + TextTemplate attachmentText = null; + if (randomBoolean()) { + attachmentText = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("text", attachmentText, params); + } + TextTemplate imageUrl = null; + if (randomBoolean()) { + imageUrl = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("image_url", imageUrl, params); + } + TextTemplate thumbUrl = null; + if (randomBoolean()) { + thumbUrl = new TextTemplate(randomAlphaOfLength(200)); + jsonBuilder.field("thumb_url", thumbUrl, params); + } + Field.Template[] fields = null; + if (randomBoolean()) { + jsonBuilder.startArray("fields"); + fields = new Field.Template[randomIntBetween(1,3)]; + for (int j = 0; j < fields.length; j++) { + jsonBuilder.startObject(); + TextTemplate fieldTitle = new TextTemplate(randomAlphaOfLength(50)); + jsonBuilder.field("title", fieldTitle, params); + TextTemplate fieldValue = new TextTemplate(randomAlphaOfLength(50)); + jsonBuilder.field("value", fieldValue, params); + boolean isShort = randomBoolean(); + jsonBuilder.field("short", isShort); + fields[j] = new Field.Template(fieldTitle, fieldValue, isShort); + jsonBuilder.endObject(); + } + jsonBuilder.endArray(); + } + TextTemplate[] markdownSupportedFields = null; + if (randomBoolean()) { + jsonBuilder.startArray("mrkdwn_in"); + jsonBuilder.value("pretext"); + jsonBuilder.endArray(); + markdownSupportedFields = new TextTemplate[] { new TextTemplate("pretext") }; + } + List actions = new ArrayList<>(); + if (randomBoolean()) { + jsonBuilder.startArray("actions"); + jsonBuilder.startObject(); + jsonBuilder.field("type", "button"); + jsonBuilder.field("text", "My text"); + jsonBuilder.field("url", "https://elastic.co"); + String style = randomFrom("primary", "danger"); + jsonBuilder.field("style", style); + jsonBuilder.field("name", "somebuttonparty"); + jsonBuilder.endObject(); + jsonBuilder.endArray(); + Action.Template action = new Action.Template(); + action.setName(new TextTemplate("somebuttonparty")); + action.setStyle(new TextTemplate(style)); + action.setText(new TextTemplate("My text")); + action.setType(new TextTemplate("button")); + action.setUrl(new TextTemplate("https://elastic.co")); + actions.add(action); + } + jsonBuilder.endObject(); + attachments[i] = new Attachment.Template(fallback, color, pretext, authorName, authorLink, authorIcon, title, + titleLink, attachmentText, fields, imageUrl, thumbUrl, markdownSupportedFields, actions); + } + jsonBuilder.endArray(); + } + jsonBuilder.endObject(); + + XContentParser parser = createParser(jsonBuilder); + parser.nextToken(); + assertThat(parser.currentToken(), is(XContentParser.Token.START_OBJECT)); + + SlackMessage.Template template = SlackMessage.Template.parse(parser); + assertThat(template, notNullValue()); + assertThat(template.from, is(from)); + if (to == null) { + assertThat(template.to, nullValue()); + } else { + assertThat(template.to, arrayContaining(to)); + } + assertThat(template.icon, is(icon)); + assertThat(template.text, is(text)); + if (attachments == null) { + assertThat(template.attachments, nullValue()); + } else { + for (int i = 0; i < attachments.length; i++) { + assertThat(template.attachments[i], is(attachments[i])); + } + } + } + + public void testTemplateParseSelfGenerated() throws Exception { + SlackMessage.Template template = createRandomTemplate(); + + XContentBuilder jsonBuilder = jsonBuilder(); + template.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); + + XContentParser parser = createParser(jsonBuilder); + parser.nextToken(); + + SlackMessage.Template parsed = SlackMessage.Template.parse(parser); + + assertThat(parsed, equalTo(template)); + } + + public void testTemplateRender() throws Exception { + Settings settings = SlackMessageDefaultsTests.randomSettings(); + SlackMessageDefaults defaults = new SlackMessageDefaults(settings); + SlackMessage.Template.Builder templateBuilder = SlackMessage.Template.builder(); + + if (randomBoolean()) { + templateBuilder.setFrom(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + int count = randomIntBetween(0, 3); + for (int i = 0; i < count; i++) { + templateBuilder.addTo(randomAlphaOfLength(10)); + } + } + if (randomBoolean()) { + templateBuilder.setIcon(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + templateBuilder.setText(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + int count = randomIntBetween(0, 3); + for (int i = 0; i < count; i++) { + Attachment.Template.Builder attachmentBuilder = createRandomAttachmentTemplateBuilder(); + templateBuilder.addAttachments(attachmentBuilder); + } + } + + // relies on the fact that all the templates we use are inline templates without param place holders + TextTemplateEngine engine = new MockTextTemplateEngine(); + + SlackMessage.Template template = templateBuilder.build(); + + SlackMessage message = template.render("_w1", "_a1", engine, Collections.emptyMap(), defaults); + assertThat(message, notNullValue()); + if (template.from != null) { + assertThat(message.from, is(template.from.getTemplate())); + } else { + assertThat(message.from, is(defaults.from != null ? defaults.from : "_w1")); + } + if (template.to == null) { + assertThat(message.to, is(defaults.to)); + } else { + String[] expected = new String[message.to.length]; + for (int i = 0; i < expected.length; i++) { + expected[i] = template.to[i].getTemplate(); + } + assertThat(message.to, arrayContaining(expected)); + } + assertThat(message.icon, is(template.icon != null ? template.icon.getTemplate() : defaults.icon)); + assertThat(message.text, is(template.text != null ? template.text.getTemplate() : defaults.text)); + if (template.attachments == null) { + assertThat(message.attachments, nullValue()); + } else { + for (int i = 0; i < template.attachments.length; i++) { + Attachment.Template attachmentTemplate = template.attachments[i]; + Attachment attachment = message.attachments[i]; + assertThat(attachment.authorName, is(attachmentTemplate.authorName != null ? attachmentTemplate.authorName.getTemplate() + : defaults.attachment.authorName)); + assertThat(attachment.authorLink, is(attachmentTemplate.authorLink != null ? attachmentTemplate.authorLink.getTemplate() + : defaults.attachment.authorLink)); + assertThat(attachment.authorIcon, is(attachmentTemplate.authorIcon != null ? attachmentTemplate.authorIcon.getTemplate() + : defaults.attachment.authorIcon)); + assertThat(attachment.color, is(attachmentTemplate.color != null ? attachmentTemplate.color.getTemplate() + : defaults.attachment.color)); + assertThat(attachment.fallback, is(attachmentTemplate.fallback != null ? attachmentTemplate.fallback.getTemplate() + : defaults.attachment.fallback)); + assertThat(attachment.imageUrl, is(attachmentTemplate.imageUrl != null ? attachmentTemplate.imageUrl.getTemplate() + : defaults.attachment.imageUrl)); + assertThat(attachment.pretext, is(attachmentTemplate.pretext != null ? attachmentTemplate.pretext.getTemplate() + : defaults.attachment.pretext)); + assertThat(attachment.thumbUrl, is(attachmentTemplate.thumbUrl != null ? attachmentTemplate.thumbUrl.getTemplate() + : defaults.attachment.thumbUrl)); + assertThat(attachment.title, is(attachmentTemplate.title != null ? attachmentTemplate.title.getTemplate() + : defaults.attachment.title)); + assertThat(attachment.titleLink, is(attachmentTemplate.titleLink != null ? attachmentTemplate.titleLink.getTemplate() + : defaults.attachment.titleLink)); + assertThat(attachment.text, is(attachmentTemplate.text != null ? attachmentTemplate.text.getTemplate() + : defaults.attachment.text)); + if (attachmentTemplate.fields == null) { + assertThat(attachment.fields, nullValue()); + } else { + for (int j = 0; j < attachmentTemplate.fields.length; j++) { + Field.Template fieldTemplate = attachmentTemplate.fields[j]; + Field field = attachment.fields[j]; + assertThat(field.title, + is(fieldTemplate.title != null ? fieldTemplate.title.getTemplate(): defaults.attachment.field.title)); + assertThat(field.value, + is(fieldTemplate.value != null ? fieldTemplate.value.getTemplate() : defaults.attachment.field.value)); + assertThat(field.isShort, + is(fieldTemplate.isShort != null ? fieldTemplate.isShort : defaults.attachment.field.isShort)); + } + } + if (attachmentTemplate.markdownSupportedFields == null) { + assertThat(attachment.markdownSupportedFields, nullValue()); + } else { + for (int j = 0; j < attachmentTemplate.markdownSupportedFields.length; j++) { + String[] templateMarkdownSupportFields = Arrays.stream(attachmentTemplate.markdownSupportedFields) + .map(TextTemplate::getTemplate).toArray(String[]::new); + + assertThat(attachment.markdownSupportedFields, arrayContainingInAnyOrder(templateMarkdownSupportFields)); + } + } + } + } + } + + // the url path contains sensitive information, which should not be exposed + public void testUrlPathIsFiltered() throws Exception { + HttpResponse response = new HttpResponse(500); + String path = randomAlphaOfLength(20); + HttpRequest request = HttpRequest.builder("localhost", 1234).path(path).build(); + SlackMessage slackMessage = new SlackMessage("from", new String[] {"to"}, "icon", "text", null); + SentMessages sentMessages = new SentMessages("foo", + Arrays.asList(SentMessages.SentMessage.responded("recipient", slackMessage, request, response))); + + try (XContentBuilder builder = jsonBuilder()) { + WatcherParams params = WatcherParams.builder().hideSecrets(false).build(); + sentMessages.toXContent(builder, params); + assertThat(Strings.toString(builder), containsString(path)); + + try (XContentParser parser = builder.contentType().xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + Strings.toString(builder))) { + parser.map(); + } + } + try (XContentBuilder builder = jsonBuilder()) { + sentMessages.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertThat(Strings.toString(builder), not(containsString(path))); + + try (XContentParser parser = builder.contentType().xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + Strings.toString(builder))) { + parser.map(); + } + } + } + + private static void writeFieldIfNotNull(XContentBuilder builder, String field, Object value) throws IOException { + if (value != null) { + builder.field(field, value); + } + } + + public static SlackMessage.Template createRandomTemplate() { + SlackMessage.Template.Builder templateBuilder = SlackMessage.Template.builder(); + + if (randomBoolean()) { + templateBuilder.setFrom(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + int count = randomIntBetween(0, 3); + for (int i = 0; i < count; i++) { + templateBuilder.addTo(randomAlphaOfLength(10)); + } + } + if (randomBoolean()) { + templateBuilder.setIcon(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + templateBuilder.setText(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + int count = randomIntBetween(0, 3); + for (int i = 0; i < count; i++) { + Attachment.Template.Builder attachmentBuilder = createRandomAttachmentTemplateBuilder(); + templateBuilder.addAttachments(attachmentBuilder); + } + } + + return templateBuilder.build(); + } + + private static Attachment.Template.Builder createRandomAttachmentTemplateBuilder() { + Attachment.Template.Builder attachmentBuilder = Attachment.Template.builder(); + if (randomBoolean()) { + attachmentBuilder.setAuthorName(randomAlphaOfLength(10)); + if (randomBoolean()) { + attachmentBuilder.setAuthorIcon(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + attachmentBuilder.setAuthorLink(randomAlphaOfLength(10)); + } + } + if (randomBoolean()) { + attachmentBuilder.setColor(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + attachmentBuilder.setFallback(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + attachmentBuilder.setImageUrl(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + attachmentBuilder.setPretext(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + attachmentBuilder.setThumbUrl(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + attachmentBuilder.setTitle(randomAlphaOfLength(10)); + if (randomBoolean()) { + attachmentBuilder.setTitleLink(randomAlphaOfLength(10)); + } + } + if (randomBoolean()) { + attachmentBuilder.setText(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + int fieldCount = randomIntBetween(0, 3); + for (int j = 0; j < fieldCount; j++) { + attachmentBuilder.addField(randomAlphaOfLength(10), randomAlphaOfLength(10), randomBoolean()); + } + } + if (randomBoolean()) { + attachmentBuilder.addMarkdownField(randomAlphaOfLength(10)); + if (randomBoolean()) { + attachmentBuilder.addMarkdownField(randomAlphaOfLength(10)); + } + } + + return attachmentBuilder; + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/rest/action/RestExecuteWatchActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/rest/action/RestExecuteWatchActionTests.java new file mode 100644 index 0000000000000..f71999a068c34 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/rest/action/RestExecuteWatchActionTests.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.rest.action; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.test.rest.FakeRestRequest.Builder; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequestBuilder; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.core.Is.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RestExecuteWatchActionTests extends ESTestCase { + + private RestController restController = mock(RestController.class); + private Client client = mock(Client.class); + private WatcherClient watcherClient = mock(WatcherClient.class); + + public void testThatFlagsCanBeSpecifiedViaParameters() throws Exception { + String randomId = randomAlphaOfLength(10); + for (String recordExecution : Arrays.asList("true", "false", null)) { + for (String ignoreCondition : Arrays.asList("true", "false", null)) { + for (String debugCondition : Arrays.asList("true", "false", null)) { + ExecuteWatchRequestBuilder builder = new ExecuteWatchRequestBuilder(client); + when(watcherClient.prepareExecuteWatch()).thenReturn(builder); + + RestExecuteWatchAction restExecuteWatchAction = new RestExecuteWatchAction(Settings.EMPTY, restController); + restExecuteWatchAction.doPrepareRequest(createFakeRestRequest(randomId, recordExecution, ignoreCondition, + debugCondition), watcherClient); + + assertThat(builder.request().getId(), is(randomId)); + assertThat(builder.request().isRecordExecution(), is(Boolean.parseBoolean(recordExecution))); + assertThat(builder.request().isIgnoreCondition(), is(Boolean.parseBoolean(ignoreCondition))); + assertThat(builder.request().isDebug(), is(Boolean.parseBoolean(debugCondition))); + } + } + } + } + + private FakeRestRequest createFakeRestRequest(String randomId, String recordExecution, String ignoreCondition, String debugCondition) { + FakeRestRequest.Builder builder = new Builder(NamedXContentRegistry.EMPTY); + builder.withContent(new BytesArray("{}"), XContentType.JSON); + Map params = new HashMap<>(); + params.put("id", randomId); + // make sure we test true/false/no params + if (recordExecution != null) params.put("record_execution", recordExecution); + if (ignoreCondition != null) params.put("ignore_condition", ignoreCondition); + if (debugCondition != null) params.put("debug", debugCondition); + + builder.withParams(params); + return builder.build(); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/FilterXContentTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/FilterXContentTests.java new file mode 100644 index 0000000000000..eadf739b175a3 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/FilterXContentTests.java @@ -0,0 +1,207 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class FilterXContentTests extends ESTestCase { + public void testPayloadFiltering() throws Exception { + Map data = new HashMap<>(); + data.put("key0", "value1"); + data.put("key1", 2); + data.put("key2", 3.1); + data.put("key3", true); + data.put("key4", Arrays.asList("value5", "value5.5")); + data.put("key5", "value6"); + data.put("key6", 7.1); + data.put("key7", false); + + XContentBuilder builder = jsonBuilder().map(data); + XContentParser parser = createParser(builder); + + Set keys = new HashSet<>(); + int numKeys = randomInt(3); + for (int i = 0; i < numKeys; i++) { + boolean added; + do { + added = keys.add("key" + randomInt(7)); + } while (!added); + } + + Map filteredData = XContentFilterKeysUtils.filterMapOrdered(keys, parser); + assertThat(filteredData.size(), equalTo(numKeys)); + for (String key : keys) { + assertThat(filteredData.get(key), equalTo(data.get(key))); + } + } + + public void testNestedPayloadFiltering() throws Exception { + Map data = new HashMap<>(); + data.put("leaf1", MapBuilder.newMapBuilder().put("key1", "value1").put("key2", true).map()); + data.put("leaf2", MapBuilder.newMapBuilder().put("key1", "value1").put("key2", "value2").put("key3", 3).map()); + Map innerMap = MapBuilder.newMapBuilder().put("key1", "value1").put("key2", "value2").map(); + data.put("leaf3", MapBuilder.newMapBuilder().put("key1", "value1").put("key2", innerMap).map()); + + XContentBuilder builder = jsonBuilder().map(data); + XContentParser parser = createParser(builder); + Set keys = new HashSet<>(Arrays.asList("leaf1.key2")); + Map filteredData = XContentFilterKeysUtils.filterMapOrdered(keys, parser); + assertThat(filteredData.size(), equalTo(1)); + assertThat(selectMap(filteredData, "leaf1").size(), equalTo(1)); + assertThat(selectMap(filteredData, "leaf1").get("key2"), Matchers.equalTo(Boolean.TRUE)); + + parser = createParser(builder); + keys = new HashSet<>(Arrays.asList("leaf2")); + filteredData = XContentFilterKeysUtils.filterMapOrdered(keys, parser); + assertThat(filteredData.size(), equalTo(1)); + assertThat(selectMap(filteredData, "leaf2").size(), equalTo(3)); + assertThat(selectMap(filteredData, "leaf2").get("key1"), Matchers.equalTo("value1")); + assertThat(selectMap(filteredData, "leaf2").get("key2"), Matchers.equalTo("value2")); + assertThat(selectMap(filteredData, "leaf2").get("key3"), Matchers.equalTo(3)); + + parser = createParser(builder); + keys = new HashSet<>(Arrays.asList("leaf3.key2.key1")); + filteredData = XContentFilterKeysUtils.filterMapOrdered(keys, parser); + assertThat(filteredData.size(), equalTo(1)); + assertThat(selectMap(filteredData, "leaf3").size(), equalTo(1)); + assertThat(selectMap(filteredData, "leaf3", "key2").size(), equalTo(1)); + assertThat(selectMap(filteredData, "leaf3", "key2").get("key1"), Matchers.equalTo("value1")); + + parser = createParser(builder); + keys = new HashSet<>(Arrays.asList("leaf1.key1", "leaf2.key2")); + filteredData = XContentFilterKeysUtils.filterMapOrdered(keys, parser); + assertThat(filteredData.size(), equalTo(2)); + assertThat(selectMap(filteredData, "leaf1").size(), equalTo(1)); + assertThat(selectMap(filteredData, "leaf2").size(), equalTo(1)); + assertThat(selectMap(filteredData, "leaf1").get("key1"), Matchers.equalTo("value1")); + assertThat(selectMap(filteredData, "leaf2").get("key2"), Matchers.equalTo("value2")); + + parser = createParser(builder); + keys = new HashSet<>(Arrays.asList("leaf2.key1", "leaf2.key3")); + filteredData = XContentFilterKeysUtils.filterMapOrdered(keys, parser); + assertThat(filteredData.size(), equalTo(1)); + assertThat(selectMap(filteredData, "leaf2").size(), equalTo(2)); + assertThat(selectMap(filteredData, "leaf2").get("key1"), Matchers.equalTo("value1")); + assertThat(selectMap(filteredData, "leaf2").get("key3"), Matchers.equalTo(3)); + + parser = createParser(builder); + keys = new HashSet<>(Arrays.asList("leaf3.key2.key1", "leaf3.key2.key2")); + filteredData = XContentFilterKeysUtils.filterMapOrdered(keys, parser); + assertThat(filteredData.size(), equalTo(1)); + assertThat(selectMap(filteredData, "leaf3").size(), equalTo(1)); + assertThat(selectMap(filteredData, "leaf3", "key2").size(), equalTo(2)); + assertThat(selectMap(filteredData, "leaf3", "key2").get("key1"), Matchers.equalTo("value1")); + assertThat(selectMap(filteredData, "leaf3", "key2").get("key2"), Matchers.equalTo("value2")); + } + + // issue #852 + public void testArraysAreNotCutOff() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().startArray("buckets") + .startObject().startObject("foo").startObject("values").endObject().endObject().endObject() + .startObject().startObject("foo").startObject("values").endObject().endObject().endObject() + .endArray().endObject(); + + XContentParser parser = createParser(builder); + + Set keys = new HashSet<>(); + keys.add("buckets.foo.values"); + + Map filteredData = XContentFilterKeysUtils.filterMapOrdered(keys, parser); + assertThat(filteredData.get("buckets"), instanceOf(List.class)); + + // both buckets have to include the following keys + List> buckets = (List>) filteredData.get("buckets"); + assertThat(buckets, hasSize(2)); + assertThat(buckets.get(0).keySet(), containsInAnyOrder("foo")); + assertThat(buckets.get(1).keySet(), containsInAnyOrder("foo")); + } + + // issue #4614 + public void testNestedArraysWork() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().startArray("buckets") + .startObject().startObject("foo").field("spam", "eggs").endObject().endObject() + .startObject().startObject("foo").field("spam", "eggs2").endObject().endObject() + .startObject().startObject("foo").field("spam", "eggs3").endObject().endObject() + .endArray().endObject(); + + XContentParser parser = createParser(builder); + + assertArrayValues(parser, "buckets.foo.spam", "eggs", "eggs2", "eggs3"); + } + + private void assertArrayValues(XContentParser parser, String key, Object ... expectedValues) throws IOException { + Set keys = new HashSet<>(); + keys.add(key); + Map filteredData = XContentFilterKeysUtils.filterMapOrdered(keys, parser); + for (int i = 0; i < expectedValues.length; i++) { + if (expectedValues[i] instanceof String) { + String data = ObjectPath.eval("buckets." + i + ".foo.spam", filteredData); + assertThat(data, is(expectedValues[i])); + } else if (expectedValues[i] instanceof Integer) { + int data = ObjectPath.eval("buckets." + i + ".foo.spam", filteredData); + assertThat(data, is(expectedValues[i])); + } else if (expectedValues[i] instanceof Boolean) { + boolean data = ObjectPath.eval("buckets." + i + ".foo.spam", filteredData); + assertThat(data, is(expectedValues[i])); + } + } + } + + public void testNestedArraysWorkWithNumbers() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().startArray("buckets") + .startObject().startObject("foo").field("spam", 0).endObject().endObject() + .startObject().startObject("foo").field("spam", 1).endObject().endObject() + .startObject().startObject("foo").field("spam", 2).endObject().endObject() + .endArray().endObject(); + + XContentParser parser = createParser(builder); + + assertArrayValues(parser, "buckets.foo.spam", 0, 1, 2); + } + + public void testNestedArraysWorkWithBooleans() throws Exception { + boolean[] bools = new boolean[] { randomBoolean(), randomBoolean(), randomBoolean() }; + + XContentBuilder builder = jsonBuilder().startObject().startArray("buckets") + .startObject().startObject("foo").field("spam", bools[0]).endObject().endObject() + .startObject().startObject("foo").field("spam", bools[1]).endObject().endObject() + .startObject().startObject("foo").field("spam", bools[2]).endObject().endObject() + .endArray().endObject(); + + XContentParser parser = createParser(builder); + + assertArrayValues(parser, "buckets.foo.spam", bools); + } + + @SuppressWarnings("unchecked") + private static Map selectMap(Map data, String... path) { + for (String element : path) { + data = (Map) data.get(element); + } + return data; + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/VariablesTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/VariablesTests.java new file mode 100644 index 0000000000000..74396a3290644 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/VariablesTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.joda.time.DateTime; + +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.joda.time.DateTimeZone.UTC; + +public class VariablesTests extends ESTestCase { + public void testCreateCtxModel() throws Exception { + DateTime scheduledTime = DateTime.now(UTC); + DateTime triggeredTime = scheduledTime.plusMillis(50); + DateTime executionTime = triggeredTime.plusMillis(50); + Payload payload = new Payload.Simple(singletonMap("payload_key", "payload_value")); + Map metatdata = singletonMap("metadata_key", "metadata_value"); + TriggerEvent event = new ScheduleTriggerEvent("_watch_id", triggeredTime, scheduledTime); + Wid wid = new Wid("_watch_id", executionTime); + WatchExecutionContext ctx = WatcherTestUtils.mockExecutionContextBuilder("_watch_id") + .wid(wid) + .executionTime(executionTime) + .triggerEvent(event) + .payload(payload) + .metadata(metatdata) + .buildMock(); + + Map model = Variables.createCtxModel(ctx, payload); + assertThat(model, notNullValue()); + assertThat(model.size(), is(1)); + + assertThat(ObjectPath.eval("ctx", model), instanceOf(Map.class)); + assertThat(ObjectPath.eval("ctx.id", model), is(wid.value())); + assertThat(ObjectPath.eval("ctx.execution_time", model), is(executionTime)); + assertThat(ObjectPath.eval("ctx.trigger", model), is(event.data())); + assertThat(ObjectPath.eval("ctx.payload", model), is(payload.data())); + assertThat(ObjectPath.eval("ctx.metadata", model), is(metatdata)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherDateTimeUtilsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherDateTimeUtilsTests.java new file mode 100644 index 0000000000000..8c08c12a493da --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherDateTimeUtilsTests.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support; + + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static java.util.concurrent.TimeUnit.DAYS; +import static java.util.concurrent.TimeUnit.HOURS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.parseTimeValueSupportingFractional; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class WatcherDateTimeUtilsTests extends ESTestCase { + public void testParseTimeValueNumeric() throws Exception { + TimeValue value = new TimeValue(randomInt(100), randomFrom(TimeUnit.values())); + long millis = value.getMillis(); + XContentBuilder xContentBuilder = jsonBuilder().startObject(); + if (randomBoolean() || millis == 0) { // 0 is special - no unit required + xContentBuilder.field("value", millis); + } else { + xContentBuilder.field("value", Long.toString(millis)); + } + XContentParser parser = createParser(xContentBuilder.endObject()); + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // value + + try { + WatcherDateTimeUtils.parseTimeValue(parser, "test"); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), either(is("failed to parse time unit")) + .or(is("could not parse time value. expected either a string or a null value but found [VALUE_NUMBER] instead"))); + } + } + + public void testParseTimeValueNumericNegative() throws Exception { + TimeValue value = new TimeValue(randomIntBetween(1, 100), randomFrom(MILLISECONDS, SECONDS, MINUTES, HOURS, DAYS)); + + XContentParser parser = createParser(jsonBuilder().startObject().field("value", -1 * value.getMillis()).endObject()); + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // value + + try { + WatcherDateTimeUtils.parseTimeValue(parser, "test"); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), + is("could not parse time value. expected either a string or a null value but found [VALUE_NUMBER] instead")); + } + } + + public void testParseTimeValueString() throws Exception { + int value = randomIntBetween(2, 200); + Map values = new HashMap<>(); + values.put(value + "s", TimeValue.timeValueSeconds(value)); + values.put(value + "m", TimeValue.timeValueMinutes(value)); + values.put(value + "h", TimeValue.timeValueHours(value)); + + String key = randomFrom(values.keySet().toArray(new String[values.size()])); + + XContentParser parser = createParser(jsonBuilder().startObject().field("value", key).endObject()); + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // value + + TimeValue parsed = WatcherDateTimeUtils.parseTimeValue(parser, "test"); + assertThat(parsed, notNullValue()); + assertThat(parsed.millis(), is(values.get(key).millis())); + } + + public void testParseTimeValueStringNegative() throws Exception { + int value = -1 * randomIntBetween(2, 200); + Map values = new HashMap<>(); + values.put(value + "s", TimeValue.timeValueSeconds(value)); + values.put(value + "m", TimeValue.timeValueMinutes(value)); + values.put(value + "h", TimeValue.timeValueHours(value)); + + String key = randomFrom(values.keySet().toArray(new String[values.size()])); + + XContentParser parser = createParser(jsonBuilder().startObject().field("value", key).endObject()); + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // value + + try { + WatcherDateTimeUtils.parseTimeValue(parser, "test"); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("failed to parse time unit")); + } + } + + public void testParseTimeValueNull() throws Exception { + XContentParser parser = createParser(jsonBuilder().startObject().nullField("value").endObject()); + parser.nextToken(); // start object + parser.nextToken(); // field name + parser.nextToken(); // value + + TimeValue parsed = WatcherDateTimeUtils.parseTimeValue(parser, "test"); + assertThat(parsed, nullValue()); + } + + public void testParseTimeValueWithFractional() { + // This code is lifted strait from 2.x's TimeValueTests.java + assertEquals(new TimeValue(10, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("10 ms", "test")); + assertEquals(new TimeValue(10, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("10ms", "test")); + assertEquals(new TimeValue(10, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("10 MS", "test")); + assertEquals(new TimeValue(10, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("10MS", "test")); + + assertEquals(new TimeValue(10, TimeUnit.SECONDS), parseTimeValueSupportingFractional("10 s", "test")); + assertEquals(new TimeValue(10, TimeUnit.SECONDS), parseTimeValueSupportingFractional("10s", "test")); + assertEquals(new TimeValue(10, TimeUnit.SECONDS), parseTimeValueSupportingFractional("10 S", "test")); + assertEquals(new TimeValue(10, TimeUnit.SECONDS), parseTimeValueSupportingFractional("10S", "test")); + + assertEquals(new TimeValue(100, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("0.1s", "test")); + + assertEquals(new TimeValue(10, TimeUnit.MINUTES), parseTimeValueSupportingFractional("10 m", "test")); + assertEquals(new TimeValue(10, TimeUnit.MINUTES), parseTimeValueSupportingFractional("10m", "test")); + assertEquals(new TimeValue(10, TimeUnit.MINUTES), parseTimeValueSupportingFractional("10 M", "test")); + assertEquals(new TimeValue(10, TimeUnit.MINUTES), parseTimeValueSupportingFractional("10M", "test")); + + assertEquals(new TimeValue(10, TimeUnit.HOURS), parseTimeValueSupportingFractional("10 h", "test")); + assertEquals(new TimeValue(10, TimeUnit.HOURS), parseTimeValueSupportingFractional("10h", "test")); + assertEquals(new TimeValue(10, TimeUnit.HOURS), parseTimeValueSupportingFractional("10 H", "test")); + assertEquals(new TimeValue(10, TimeUnit.HOURS), parseTimeValueSupportingFractional("10H", "test")); + + assertEquals(new TimeValue(10, TimeUnit.DAYS), parseTimeValueSupportingFractional("10 d", "test")); + assertEquals(new TimeValue(10, TimeUnit.DAYS), parseTimeValueSupportingFractional("10d", "test")); + assertEquals(new TimeValue(10, TimeUnit.DAYS), parseTimeValueSupportingFractional("10 D", "test")); + assertEquals(new TimeValue(10, TimeUnit.DAYS), parseTimeValueSupportingFractional("10D", "test")); + + assertEquals(new TimeValue(70, TimeUnit.DAYS), parseTimeValueSupportingFractional("10 w", "test")); + assertEquals(new TimeValue(70, TimeUnit.DAYS), parseTimeValueSupportingFractional("10w", "test")); + assertEquals(new TimeValue(70, TimeUnit.DAYS), parseTimeValueSupportingFractional("10 W", "test")); + assertEquals(new TimeValue(70, TimeUnit.DAYS), parseTimeValueSupportingFractional("10W", "test")); + + // Extra fractional tests just because that is the point + assertEquals(new TimeValue(100, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("0.1s", "test")); + assertEquals(new TimeValue(6, TimeUnit.SECONDS), parseTimeValueSupportingFractional("0.1m", "test")); + assertEquals(new TimeValue(6, TimeUnit.MINUTES), parseTimeValueSupportingFractional("0.1h", "test")); + assertEquals(new TimeValue(144, TimeUnit.MINUTES), parseTimeValueSupportingFractional("0.1d", "test")); + assertEquals(new TimeValue(1008, TimeUnit.MINUTES), parseTimeValueSupportingFractional("0.1w", "test")); + + // And some crazy fractions just for fun + assertEquals(new TimeValue(1700, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("1.7s", "test")); + assertEquals(new TimeValue(162, TimeUnit.SECONDS), parseTimeValueSupportingFractional("2.7m", "test")); + assertEquals(new TimeValue(5988, TimeUnit.MINUTES), parseTimeValueSupportingFractional("99.8h", "test")); + assertEquals(new TimeValue(1057968, TimeUnit.SECONDS), parseTimeValueSupportingFractional("12.245d", "test")); + assertEquals(new TimeValue(7258204799L, TimeUnit.MILLISECONDS), parseTimeValueSupportingFractional("12.001w", "test")); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java new file mode 100644 index 0000000000000..4388b20f520c8 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.mock.orig.Mockito.verify; +import static org.elasticsearch.mock.orig.Mockito.when; +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verifyZeroInteractions; + +public class WatcherIndexTemplateRegistryTests extends ESTestCase { + + private WatcherIndexTemplateRegistry registry; + private Client client; + + @Before + public void createRegistryAndClient() { + ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + when(threadPool.generic()).thenReturn(EsExecutors.newDirectExecutorService()); + + client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + AdminClient adminClient = mock(AdminClient.class); + IndicesAdminClient indicesAdminClient = mock(IndicesAdminClient.class); + when(adminClient.indices()).thenReturn(indicesAdminClient); + when(client.admin()).thenReturn(adminClient); + doAnswer(invocationOnMock -> { + ActionListener listener = + (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(new TestPutIndexTemplateResponse(true)); + return null; + }).when(indicesAdminClient).putTemplate(any(PutIndexTemplateRequest.class), any(ActionListener.class)); + + ClusterService clusterService = mock(ClusterService.class); + registry = new WatcherIndexTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client); + } + + public void testThatNonExistingTemplatesAreAddedImmediately() { + DiscoveryNode node = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + ClusterChangedEvent event = createClusterChangedEvent(Collections.emptyList(), nodes); + registry.clusterChanged(event); + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class); + verify(client.admin().indices(), times(3)).putTemplate(argumentCaptor.capture(), anyObject()); + + // now delete one template from the cluster state and lets retry + ClusterChangedEvent newEvent = createClusterChangedEvent(Arrays.asList(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME, + WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME), nodes); + registry.clusterChanged(newEvent); + verify(client.admin().indices(), times(4)).putTemplate(argumentCaptor.capture(), anyObject()); + } + + public void testThatTemplatesExist() { + assertThat(WatcherIndexTemplateRegistry.validate(createClusterState(".watch-history")), is(false)); + assertThat(WatcherIndexTemplateRegistry.validate(createClusterState(".watch-history", ".triggered_watches", ".watches")), + is(false)); + assertThat(WatcherIndexTemplateRegistry.validate(createClusterState(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME, + ".triggered_watches", ".watches")), is(true)); + assertThat(WatcherIndexTemplateRegistry.validate(createClusterState(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME, + ".triggered_watches", ".watches", "whatever", "else")), is(true)); + } + + // if a node is newer than the master node, the template needs to be applied as well + // otherwise a rolling upgrade would not work as expected, when the node has a .watches shard on it + public void testThatTemplatesAreAppliedOnNewerNodes() { + DiscoveryNode localNode = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode masterNode = new DiscoveryNode("master", ESTestCase.buildNewFakeTransportAddress(), Version.V_6_0_0); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("master").add(localNode).add(masterNode).build(); + + ClusterChangedEvent event = createClusterChangedEvent(Arrays.asList(WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME, + WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME, ".watch-history-6"), nodes); + registry.clusterChanged(event); + + ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class); + verify(client.admin().indices(), times(1)).putTemplate(argumentCaptor.capture(), anyObject()); + assertThat(argumentCaptor.getValue().name(), is(WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME)); + } + + public void testThatTemplatesAreNotAppliedOnSameVersionNodes() { + DiscoveryNode localNode = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNode masterNode = new DiscoveryNode("master", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("master").add(localNode).add(masterNode).build(); + + ClusterChangedEvent event = createClusterChangedEvent(Arrays.asList(WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME, + WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME, ".watch-history-6"), nodes); + registry.clusterChanged(event); + + verifyZeroInteractions(client); + } + + public void testThatMissingMasterNodeDoesNothing() { + DiscoveryNode localNode = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").add(localNode).build(); + + ClusterChangedEvent event = createClusterChangedEvent(Arrays.asList(WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME, + WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME, ".watch-history-6"), nodes); + registry.clusterChanged(event); + + verifyZeroInteractions(client); + } + + private ClusterChangedEvent createClusterChangedEvent(List existingTemplateNames, DiscoveryNodes nodes) { + ClusterChangedEvent event = mock(ClusterChangedEvent.class); + when(event.localNodeMaster()).thenReturn(nodes.isLocalNodeElectedMaster()); + ClusterState cs = mock(ClusterState.class); + ClusterBlocks clusterBlocks = mock(ClusterBlocks.class); + when(clusterBlocks.hasGlobalBlock(eq(GatewayService.STATE_NOT_RECOVERED_BLOCK))).thenReturn(false); + when(cs.blocks()).thenReturn(clusterBlocks); + when(event.state()).thenReturn(cs); + + when(cs.getNodes()).thenReturn(nodes); + + MetaData metaData = mock(MetaData.class); + ImmutableOpenMap.Builder indexTemplates = ImmutableOpenMap.builder(); + for (String name : existingTemplateNames) { + indexTemplates.put(name, mock(IndexTemplateMetaData.class)); + } + + when(metaData.getTemplates()).thenReturn(indexTemplates.build()); + when(cs.metaData()).thenReturn(metaData); + + return event; + } + + private ClusterState createClusterState(String ... existingTemplates) { + MetaData.Builder metaDataBuilder = MetaData.builder(); + for (String templateName : existingTemplates) { + metaDataBuilder.put(IndexTemplateMetaData.builder(templateName) + .patterns(Arrays.asList(generateRandomStringArray(10, 100, false, false)))); + } + + return ClusterState.builder(new ClusterName("foo")).metaData(metaDataBuilder.build()).build(); + } + + private static class TestPutIndexTemplateResponse extends PutIndexTemplateResponse { + TestPutIndexTemplateResponse(boolean acknowledged) { + super(acknowledged); + } + + TestPutIndexTemplateResponse() { + super(); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherUtilsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherUtilsTests.java new file mode 100644 index 0000000000000..826a273c58b55 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherUtilsTests.java @@ -0,0 +1,241 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support; + +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.support.WatcherUtils; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.joda.time.DateTime; + +import java.time.Clock; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.formatDate; +import static org.elasticsearch.xpack.core.watcher.support.WatcherUtils.flattenModel; +import static org.elasticsearch.xpack.watcher.input.search.ExecutableSearchInput.DEFAULT_SEARCH_TYPE; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.getRandomSupportedSearchType; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class WatcherUtilsTests extends ESTestCase { + public void testFlattenModel() throws Exception { + DateTime now = new DateTime(Clock.systemUTC().millis()); + Map map = new HashMap<>(); + map.put("a", singletonMap("a1", new int[] { 0, 1, 2 })); + map.put("b", new String[] { "b0", "b1", "b2" }); + map.put("c", Arrays.asList(TimeValue.timeValueSeconds(0), TimeValue.timeValueSeconds(1))); + map.put("d", now); + + Map result = flattenModel(map); + assertThat(result.size(), is(9)); + assertThat(result, hasEntry("a.a1.0", "0")); + assertThat(result, hasEntry("a.a1.1", "1")); + assertThat(result, hasEntry("a.a1.2", "2")); + assertThat(result, hasEntry("b.0", "b0")); + assertThat(result, hasEntry("b.1", "b1")); + assertThat(result, hasEntry("b.2", "b2")); + assertThat(result, hasEntry("c.0", "0")); + assertThat(result, hasEntry("c.1", "1000")); + assertThat(result, hasEntry("d", formatDate(now))); + } + + public void testResponseToData() throws Exception { + final Map expected = new HashMap<>(); + expected.put("key1", "val"); + expected.put("key2", 1); + expected.put("key3", 1.4); + expected.put("key4", Arrays.asList("a", "b", "c")); + Map otherMap = new HashMap<>(); + otherMap.putAll(expected); + expected.put("key5", otherMap); + ToXContentObject content = (builder, params) -> { + builder.startObject(); + for (Map.Entry entry : expected.entrySet()) { + builder.field(entry.getKey()); + builder.value(entry.getValue()); + } + builder.endObject(); + return builder; + }; + Map result = WatcherUtils.responseToData(content); + assertThat(result, equalTo(expected)); + } + + public void testSerializeSearchRequest() throws Exception { + String[] expectedIndices = generateRandomStringArray(5, 5, true); + String[] expectedTypes = generateRandomStringArray(2, 5, true); + IndicesOptions expectedIndicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), WatcherSearchTemplateRequest.DEFAULT_INDICES_OPTIONS); + SearchType expectedSearchType = getRandomSupportedSearchType(); + + BytesReference expectedSource = null; + Script expectedTemplate = null; + WatcherSearchTemplateRequest request; + boolean stored = false; + if (randomBoolean()) { + Map params = new HashMap<>(); + if (randomBoolean()) { + int maxParams = randomIntBetween(1, 10); + for (int i = 0; i < maxParams; i++) { + params.put(randomAlphaOfLengthBetween(1, 5), randomAlphaOfLengthBetween(1, 5)); + } + } + String text = randomAlphaOfLengthBetween(1, 5); + ScriptType scriptType = randomFrom(ScriptType.values()); + stored = scriptType == ScriptType.STORED; + expectedTemplate = new Script(scriptType, stored ? null : "mustache", text, params); + request = new WatcherSearchTemplateRequest(expectedIndices, expectedTypes, expectedSearchType, + expectedIndicesOptions, expectedTemplate); + } else { + SearchSourceBuilder sourceBuilder = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()).size(11); + XContentBuilder builder = jsonBuilder(); + builder.value(sourceBuilder); + expectedSource = BytesReference.bytes(builder); + request = new WatcherSearchTemplateRequest(expectedIndices, expectedTypes, expectedSearchType, + expectedIndicesOptions, expectedSource); + } + + XContentBuilder builder = jsonBuilder(); + request.toXContent(builder, ToXContent.EMPTY_PARAMS); + XContentParser parser = createParser(builder); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); + WatcherSearchTemplateRequest result = WatcherSearchTemplateRequest.fromXContent(parser, DEFAULT_SEARCH_TYPE); + + assertThat(result.getIndices(), arrayContainingInAnyOrder(expectedIndices != null ? expectedIndices : new String[0])); + assertThat(result.getTypes(), arrayContainingInAnyOrder(expectedTypes != null ? expectedTypes : new String[0])); + assertThat(result.getIndicesOptions(), equalTo(expectedIndicesOptions)); + assertThat(result.getSearchType(), equalTo(expectedSearchType)); + + assertNotNull(result.getTemplate()); + assertThat(result.getTemplate().getLang(), equalTo(stored ? null : "mustache")); + if (expectedSource == null) { + assertThat(result.getTemplate().getIdOrCode(), equalTo(expectedTemplate.getIdOrCode())); + assertThat(result.getTemplate().getType(), equalTo(expectedTemplate.getType())); + assertThat(result.getTemplate().getParams(), equalTo(expectedTemplate.getParams())); + } else { + assertThat(result.getTemplate().getIdOrCode(), equalTo(expectedSource.utf8ToString())); + assertThat(result.getTemplate().getType(), equalTo(ScriptType.INLINE)); + } + } + + public void testDeserializeSearchRequest() throws Exception { + + XContentBuilder builder = jsonBuilder().startObject(); + + String[] indices = Strings.EMPTY_ARRAY; + if (randomBoolean()) { + indices = generateRandomStringArray(5, 5, false); + if (randomBoolean()) { + builder.array("indices", indices); + } else { + builder.field("indices", Strings.arrayToCommaDelimitedString(indices)); + } + } + + String[] types = Strings.EMPTY_ARRAY; + if (randomBoolean()) { + types = generateRandomStringArray(2, 5, false); + if (randomBoolean()) { + builder.array("types", types); + } else { + builder.field("types", Strings.arrayToCommaDelimitedString(types)); + } + } + + IndicesOptions indicesOptions = WatcherSearchTemplateRequest.DEFAULT_INDICES_OPTIONS; + if (randomBoolean()) { + indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), WatcherSearchTemplateRequest.DEFAULT_INDICES_OPTIONS); + builder.startObject("indices_options") + .field("allow_no_indices", indicesOptions.allowNoIndices()) + .field("expand_wildcards", indicesOptions.expandWildcardsClosed() && indicesOptions.expandWildcardsOpen() ? "all" : + indicesOptions.expandWildcardsClosed() ? "closed" : + indicesOptions.expandWildcardsOpen() ? "open" : + "none") + .field("ignore_unavailable", indicesOptions.ignoreUnavailable()) + .endObject(); + } + + SearchType searchType = SearchType.DEFAULT; + if (randomBoolean()) { + searchType = getRandomSupportedSearchType(); + builder.field("search_type", randomBoolean() ? searchType.name() : searchType.name().toLowerCase(Locale.ROOT)); + } + + BytesReference source = BytesArray.EMPTY; + if (randomBoolean()) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()).size(11); + XContentBuilder searchSourceJsonBuilder = jsonBuilder(); + searchSourceBuilder.toXContent(searchSourceJsonBuilder, ToXContent.EMPTY_PARAMS); + source = XContentHelper.toXContent(searchSourceBuilder, XContentType.JSON, false); + builder.rawField("body", source.streamInput()); + } + Script template = null; + boolean stored = false; + if (randomBoolean()) { + Map params = new HashMap<>(); + if (randomBoolean()) { + int maxParams = randomIntBetween(1, 10); + for (int i = 0; i < maxParams; i++) { + params.put(randomAlphaOfLengthBetween(1, 5), randomAlphaOfLengthBetween(1, 5)); + } + } + String text = randomAlphaOfLengthBetween(1, 5); + ScriptType scriptType = randomFrom(ScriptType.values()); + stored = scriptType == ScriptType.STORED; + template = new Script(scriptType, stored ? null : "mustache", text, params); + builder.field("template", template); + } + builder.endObject(); + + XContentParser parser = createParser(builder); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); + WatcherSearchTemplateRequest result = WatcherSearchTemplateRequest.fromXContent(parser, DEFAULT_SEARCH_TYPE); + + assertThat(result.getIndices(), arrayContainingInAnyOrder(indices)); + assertThat(result.getTypes(), arrayContainingInAnyOrder(types)); + assertThat(result.getIndicesOptions(), equalTo(indicesOptions)); + assertThat(result.getSearchType(), equalTo(searchType)); + if (source == null) { + assertThat(result.getSearchSource(), nullValue()); + } else { + assertThat(result.getSearchSource().utf8ToString(), equalTo(source.utf8ToString())); + } + if (template == null) { + assertThat(result.getTemplate(), nullValue()); + } else { + assertThat(result.getTemplate().getIdOrCode(), equalTo(template.getIdOrCode())); + assertThat(result.getTemplate().getType(), equalTo(template.getType())); + assertThat(result.getTemplate().getParams(), equalTo(template.getParams())); + assertThat(result.getTemplate().getLang(), equalTo(stored ? null : "mustache")); + } + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequestTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequestTests.java new file mode 100644 index 0000000000000..b6111cb97e2b7 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequestTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support.search; + +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; + +public class WatcherSearchTemplateRequestTests extends ESTestCase { + + public void testFromXContentWithTemplateDefaultLang() throws IOException { + String source = "{\"template\":{\"id\":\"default-script\", \"params\":{\"foo\":\"bar\"}}}"; + assertTemplate(source, "default-script", null, singletonMap("foo", "bar")); + } + + public void testFromXContentWithTemplateCustomLang() throws IOException { + String source = "{\"template\":{\"source\":\"custom-script\", \"lang\":\"painful\",\"params\":{\"bar\":\"baz\"}}}"; + assertTemplate(source, "custom-script", "painful", singletonMap("bar", "baz")); + } + + private void assertTemplate(String source, String expectedScript, String expectedLang, Map expectedParams) { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + parser.nextToken(); + WatcherSearchTemplateRequest result = WatcherSearchTemplateRequest.fromXContent(parser, randomFrom(SearchType.values())); + assertNotNull(result.getTemplate()); + assertThat(result.getTemplate().getIdOrCode(), equalTo(expectedScript)); + assertThat(result.getTemplate().getLang(), equalTo(expectedLang)); + assertThat(result.getTemplate().getParams(), equalTo(expectedParams)); + } catch (IOException e) { + fail("Failed to parse watch search request: " + e.getMessage()); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/xcontent/MapPathTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/xcontent/MapPathTests.java new file mode 100644 index 0000000000000..f89552a637726 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/xcontent/MapPathTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support.xcontent; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.is; + +public class MapPathTests extends ESTestCase { + public void testEval() throws Exception { + Map map = singletonMap("key", "value"); + + assertThat(ObjectPath.eval("key", map), is((Object) "value")); + assertThat(ObjectPath.eval("key1", map), nullValue()); + } + + public void testEvalList() throws Exception { + List list = Arrays.asList(1, 2, 3, 4); + Map map = singletonMap("key", list); + + int index = randomInt(3); + assertThat(ObjectPath.eval("key." + index, map), is(list.get(index))); + } + + public void testEvalArray() throws Exception { + int[] array = new int[] { 1, 2, 3, 4 }; + Map map = singletonMap("key", array); + + int index = randomInt(3); + assertThat(((Number) ObjectPath.eval("key." + index, map)).intValue(), is(array[index])); + } + + public void testEvalMap() throws Exception { + Map map = singletonMap("a", singletonMap("b", "val")); + + assertThat(ObjectPath.eval("a.b", map), is((Object) "val")); + } + + public void testEvalMixed() throws Exception { + Map map = new HashMap<>(); + + Map mapA = new HashMap<>(); + map.put("a", mapA); + + List listB = new ArrayList<>(); + mapA.put("b", listB); + List listB1 = new ArrayList<>(); + listB.add(listB1); + + Map mapB11 = new HashMap<>(); + listB1.add(mapB11); + mapB11.put("c", "val"); + + assertThat(ObjectPath.eval("", map), is((Object) map)); + assertThat(ObjectPath.eval("a.b.0.0.c", map), is((Object) "val")); + assertThat(ObjectPath.eval("a.b.0.0.c.d", map), nullValue()); + assertThat(ObjectPath.eval("a.b.0.0.d", map), nullValue()); + assertThat(ObjectPath.eval("a.b.c", map), nullValue()); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/xcontent/XContentSourceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/xcontent/XContentSourceTests.java new file mode 100644 index 0000000000000..5fe451458e025 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/xcontent/XContentSourceTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.support.xcontent; + + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; +import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; + +public class XContentSourceTests extends ESTestCase { + public void testToXContent() throws Exception { + XContentBuilder builder = randomBoolean() ? jsonBuilder() : randomBoolean() ? yamlBuilder() : smileBuilder(); + BytesReference bytes = randomBoolean() ? + BytesReference.bytes(builder.startObject().field("key", "value").endObject()) : + BytesReference + .bytes(builder.startObject() + .field("key_str", "value") + .startArray("array_int").value(randomInt(10)).endArray() + .nullField("key_null") + .endObject()); + XContentSource source = new XContentSource(bytes, builder.contentType()); + XContentBuilder builder2 = XContentFactory.contentBuilder(builder.contentType()); + BytesReference bytes2 = BytesReference.bytes(source.toXContent(builder2, ToXContent.EMPTY_PARAMS)); + assertEquals(bytes.toBytesRef(), bytes2.toBytesRef()); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java new file mode 100644 index 0000000000000..ce2b54ddd3b24 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -0,0 +1,626 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test; + +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockMustacheScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.disruption.ServiceDisruptionScheme; +import org.elasticsearch.test.store.MockFSIndexStore; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.watcher.WatcherState; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.WatcherLifeCycleService; +import org.elasticsearch.xpack.watcher.history.HistoryStore; +import org.elasticsearch.xpack.watcher.notification.email.Authentication; +import org.elasticsearch.xpack.watcher.notification.email.Email; +import org.elasticsearch.xpack.watcher.notification.email.EmailService; +import org.elasticsearch.xpack.watcher.notification.email.Profile; +import org.elasticsearch.xpack.watcher.trigger.ScheduleTriggerEngineMock; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.hamcrest.Matcher; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.After; +import org.junit.Before; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME; +import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField.TRIGGERED_TEMPLATE_NAME; +import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField.WATCHES_TEMPLATE_NAME; +import static org.hamcrest.Matchers.emptyArray; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsNot.not; + +@ClusterScope(scope = SUITE, numClientNodes = 0, transportClientRatio = 0, maxNumDataNodes = 3) +public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase { + + public static final String WATCHER_LANG = Script.DEFAULT_SCRIPT_LANG; + + private TimeWarp timeWarp; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(XPackSettings.MONITORING_ENABLED.getKey(), false) + .put(XPackSettings.SECURITY_ENABLED.getKey(), false) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial") + // we do this by default in core, but for watcher this isn't needed and only adds noise. + .put("index.store.mock.check_index_on_close", false) + // watcher settings that should work despite randomization + .put("xpack.watcher.execution.scroll.size", randomIntBetween(1, 100)) + .put("xpack.watcher.watch.scroll.size", randomIntBetween(1, 100)) + .put(WatcherLifeCycleService.SETTING_REQUIRE_MANUAL_START.getKey(), true) + .build(); + } + + @Override + protected Settings transportClientSettings() { + return Settings.builder() + .put("client.transport.sniff", false) + .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4) + .put(NetworkModule.HTTP_TYPE_KEY, SecurityField.NAME4) + .build(); + } + + @Override + protected Set excludeTemplates() { + Set excludes = new HashSet<>(); + excludes.addAll(Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES)); + return Collections.unmodifiableSet(excludes); + } + + @Override + protected Collection> getMockPlugins() { + Set> plugins = new HashSet<>(super.getMockPlugins()); + // security has its own transport service + plugins.remove(MockTransportService.TestPlugin.class); + // security has its own transport + // we have to explicitly add it otherwise we will fail to set the check_index_on_close setting + plugins.add(MockFSIndexStore.TestPlugin.class); + plugins.add(MockMustacheScriptEngine.TestPlugin.class); + return plugins; + } + + @Override + protected Collection> nodePlugins() { + return pluginTypes(); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + protected List> pluginTypes() { + List> types = new ArrayList<>(); + + if (timeWarped()) { + types.add(TimeWarpedWatcher.class); + } else { + types.add(LocalStateWatcher.class); + } + + types.add(CommonAnalysisPlugin.class); + return types; + } + + /** + * @return whether the test suite should run in time warp mode. By default this will be determined globally + * to all test suites based on {@code -Dtests.timewarp} system property (when missing, defaults to + * {@code true}). If a test suite requires to force the mode or force not running under this mode + * this method can be overridden. + */ + protected boolean timeWarped() { + return true; + } + + @Before + public void _setup() throws Exception { + if (timeWarped()) { + timeWarp = new TimeWarp(internalCluster().getInstances(ScheduleTriggerEngineMock.class), + (ClockMock)getInstanceFromMaster(Clock.class)); + } + + if (internalCluster().size() > 0) { + ensureLicenseEnabled(); + + if (timeWarped()) { + // now that the license is enabled and valid we can freeze all nodes clocks + logger.info("[{}#{}]: freezing time on nodes", getTestClass().getSimpleName(), getTestName()); + TimeFreezeDisruption ice = new TimeFreezeDisruption(); + internalCluster().setDisruptionScheme(ice); + ice.startDisrupting(); + } + + createWatcherIndicesOrAliases(); + startWatcher(); + } + } + + @After + public void _cleanup() throws Exception { + // Clear all internal watcher state for the next test method: + logger.info("[#{}]: clearing watcher state", getTestName()); + stopWatcher(); + } + + /** + * In order to test, that .watches and .triggered-watches indices can also point to an alias, we will rarely create those + * after starting watcher + * + * The idea behind this is the possible use of the migration helper for upgrades, see + * https://github.com/elastic/elasticsearch-migration/ + * + */ + private void createWatcherIndicesOrAliases() throws Exception { + if (internalCluster().size() > 0) { + ensureWatcherTemplatesAdded(); + // alias for .watches, setting the index template to the same as well + String watchIndexName; + String triggeredWatchIndexName; + if (rarely()) { + watchIndexName = ".watches-alias-index"; + CreateIndexResponse response = client().admin().indices().prepareCreate(watchIndexName) + .setCause("Index to test aliases with .watches index") + .addAlias(new Alias(Watch.INDEX)) + .get(); + assertAcked(response); + logger.info("set alias for .watches index to [{}]", watchIndexName); + } else { + watchIndexName = Watch.INDEX; + Settings.Builder builder = Settings.builder(); + if (randomBoolean()) { + builder.put("index.number_of_shards", scaledRandomIntBetween(1, 5)); + } + assertAcked(client().admin().indices().prepareCreate(watchIndexName).setSettings(builder)); + } + + // alias for .triggered-watches, ensuring the index template is set appropriately + if (rarely()) { + triggeredWatchIndexName = ".triggered_watches-alias-index"; + CreateIndexResponse response = client().admin().indices().prepareCreate(triggeredWatchIndexName) + .setCause("Index to test aliases with .triggered-watches index") + .addAlias(new Alias(TriggeredWatchStoreField.INDEX_NAME)) + .get(); + assertAcked(response); + logger.info("set alias for .triggered-watches index to [{}]", triggeredWatchIndexName); + } else { + triggeredWatchIndexName = TriggeredWatchStoreField.INDEX_NAME; + assertAcked(client().admin().indices().prepareCreate(triggeredWatchIndexName)); + } + + String historyIndex = HistoryStoreField.getHistoryIndexNameForTime(DateTime.now(DateTimeZone.UTC)); + assertAcked(client().admin().indices().prepareCreate(historyIndex)); + logger.info("creating watch history index [{}]", historyIndex); + ensureGreen(historyIndex, watchIndexName, triggeredWatchIndexName); + } + } + + protected TimeWarp timeWarp() { + assert timeWarped() : "cannot access TimeWarp when test context is not time warped"; + return timeWarp; + } + + public boolean randomizeNumberOfShardsAndReplicas() { + return false; + } + + protected long docCount(String index, String type, QueryBuilder query) { + refresh(); + return docCount(index, type, SearchSourceBuilder.searchSource().query(query)); + } + + protected long watchRecordCount(QueryBuilder query) { + refresh(); + return docCount(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*", + HistoryStore.DOC_TYPE, SearchSourceBuilder.searchSource().query(query)); + } + + protected long docCount(String index, String type, SearchSourceBuilder source) { + SearchRequestBuilder builder = client().prepareSearch(index).setSource(source).setSize(0); + if (type != null) { + builder.setTypes(type); + } + return builder.get().getHits().getTotalHits(); + } + + protected SearchResponse searchHistory(SearchSourceBuilder builder) { + return client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*").setSource(builder).get(); + } + + protected T getInstanceFromMaster(Class type) { + return internalCluster().getInstance(type, internalCluster().getMasterName()); + } + + protected WatchParser watchParser() { + return getInstanceFromMaster(WatchParser.class); + } + + protected WatcherClient watcherClient() { + return randomBoolean() ? new XPackClient(client()).watcher() : new WatcherClient(client()); + } + + private IndexNameExpressionResolver indexNameExpressionResolver() { + return internalCluster().getInstance(IndexNameExpressionResolver.class); + } + + protected void assertValue(XContentSource source, String path, Matcher matcher) { + assertThat(source.getValue(path), (Matcher) matcher); + } + + protected void assertWatchWithMinimumPerformedActionsCount(final String watchName, + final long minimumExpectedWatchActionsWithActionPerformed) throws Exception { + assertWatchWithMinimumPerformedActionsCount(watchName, minimumExpectedWatchActionsWithActionPerformed, true); + } + + // TODO remove this shitty method... the `assertConditionMet` is bogus + protected void assertWatchWithMinimumPerformedActionsCount(final String watchName, + final long minimumExpectedWatchActionsWithActionPerformed, + final boolean assertConditionMet) throws Exception { + final AtomicReference lastResponse = new AtomicReference<>(); + try { + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + String[] watchHistoryIndices = indexNameExpressionResolver().concreteIndexNames(state, + IndicesOptions.lenientExpandOpen(), HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*"); + assertThat(watchHistoryIndices, not(emptyArray())); + for (String index : watchHistoryIndices) { + IndexRoutingTable routingTable = state.getRoutingTable().index(index); + assertThat(routingTable, notNullValue()); + assertThat(routingTable.allPrimaryShardsActive(), is(true)); + } + + refresh(); + SearchResponse searchResponse = client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*") + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setQuery(boolQuery().must(matchQuery("watch_id", watchName)).must(matchQuery("state", + ExecutionState.EXECUTED.id()))) + .get(); + lastResponse.set(searchResponse); + assertThat("could not find executed watch record for watch " + watchName, searchResponse.getHits().getTotalHits(), + greaterThanOrEqualTo(minimumExpectedWatchActionsWithActionPerformed)); + if (assertConditionMet) { + assertThat((Integer) XContentMapValues.extractValue("result.input.payload.hits.total", + searchResponse.getHits().getAt(0).getSourceAsMap()), greaterThanOrEqualTo(1)); + } + }); + } catch (AssertionError error) { + SearchResponse searchResponse = lastResponse.get(); + logger.info("Found [{}] records for watch [{}]", searchResponse.getHits().getTotalHits(), watchName); + int counter = 1; + for (SearchHit hit : searchResponse.getHits().getHits()) { + logger.info("hit [{}]=\n {}", counter++, XContentHelper.convertToJson(hit.getSourceRef(), true, true)); + } + throw error; + } + } + + protected SearchResponse searchWatchRecords(Consumer requestBuilderCallback) { + SearchRequestBuilder builder = + client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*").setTypes(HistoryStore.DOC_TYPE); + requestBuilderCallback.accept(builder); + return builder.get(); + } + + protected long findNumberOfPerformedActions(String watchName) { + refresh(); + SearchResponse searchResponse = client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*") + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setQuery(boolQuery().must(matchQuery("watch_id", watchName)).must(matchQuery("state", ExecutionState.EXECUTED.id()))) + .get(); + return searchResponse.getHits().getTotalHits(); + } + + protected void assertWatchWithNoActionNeeded(final String watchName, + final long expectedWatchActionsWithNoActionNeeded) throws Exception { + final AtomicReference lastResponse = new AtomicReference<>(); + try { + assertBusy(() -> { + // The watch_history index gets created in the background when the first watch is triggered + // so we to check first is this index is created and shards are started + ClusterState state = client().admin().cluster().prepareState().get().getState(); + String[] watchHistoryIndices = indexNameExpressionResolver().concreteIndexNames(state, + IndicesOptions.lenientExpandOpen(), HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*"); + assertThat(watchHistoryIndices, not(emptyArray())); + for (String index : watchHistoryIndices) { + IndexRoutingTable routingTable = state.getRoutingTable().index(index); + assertThat(routingTable, notNullValue()); + assertThat(routingTable.allPrimaryShardsActive(), is(true)); + } + refresh(); + SearchResponse searchResponse = client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*") + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setQuery(boolQuery().must(matchQuery("watch_id", watchName)).must(matchQuery("state", + ExecutionState.EXECUTION_NOT_NEEDED.id()))) + .get(); + lastResponse.set(searchResponse); + assertThat(searchResponse.getHits().getTotalHits(), greaterThanOrEqualTo(expectedWatchActionsWithNoActionNeeded)); + }); + } catch (AssertionError error) { + SearchResponse searchResponse = lastResponse.get(); + logger.info("Found [{}] records for watch [{}]", searchResponse.getHits().getTotalHits(), watchName); + int counter = 1; + for (SearchHit hit : searchResponse.getHits().getHits()) { + logger.info("hit [{}]=\n {}", counter++, XContentHelper.convertToJson(hit.getSourceRef(), true, true)); + } + throw error; + } + } + + protected void assertWatchWithMinimumActionsCount(final String watchName, final ExecutionState recordState, + final long recordCount) throws Exception { + assertBusy(() -> { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + String[] watchHistoryIndices = indexNameExpressionResolver().concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), + HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*"); + assertThat(watchHistoryIndices, not(emptyArray())); + for (String index : watchHistoryIndices) { + IndexRoutingTable routingTable = state.getRoutingTable().index(index); + assertThat(routingTable, notNullValue()); + assertThat(routingTable.allPrimaryShardsActive(), is(true)); + } + + refresh(); + SearchResponse searchResponse = client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*") + .setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setQuery(boolQuery().must(matchQuery("watch_id", watchName)).must(matchQuery("state", recordState.id()))) + .get(); + assertThat("could not find executed watch record", searchResponse.getHits().getTotalHits(), + greaterThanOrEqualTo(recordCount)); + }); + } + + private void ensureWatcherTemplatesAdded() throws Exception { + // Verify that the index templates exist: + assertBusy(() -> { + GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates(HISTORY_TEMPLATE_NAME).get(); + assertThat("[" + HISTORY_TEMPLATE_NAME + "] is missing", response.getIndexTemplates().size(), equalTo(1)); + response = client().admin().indices().prepareGetTemplates(TRIGGERED_TEMPLATE_NAME).get(); + assertThat("[" + TRIGGERED_TEMPLATE_NAME + "] is missing", response.getIndexTemplates().size(), equalTo(1)); + response = client().admin().indices().prepareGetTemplates(WATCHES_TEMPLATE_NAME).get(); + assertThat("[" + WATCHES_TEMPLATE_NAME + "] is missing", response.getIndexTemplates().size(), equalTo(1)); + }); + } + + protected void startWatcher() throws Exception { + assertBusy(() -> { + WatcherStatsResponse watcherStatsResponse = watcherClient().prepareWatcherStats().get(); + assertThat(watcherStatsResponse.hasFailures(), is(false)); + List> currentStatesFromStatsRequest = watcherStatsResponse.getNodes().stream() + .map(response -> Tuple.tuple(response.getNode().getName(), response.getWatcherState())) + .collect(Collectors.toList()); + List states = currentStatesFromStatsRequest.stream().map(Tuple::v2).collect(Collectors.toList()); + + logger.info("waiting to start watcher, current states {}", currentStatesFromStatsRequest); + + boolean isAllStateStarted = states.stream().allMatch(w -> w == WatcherState.STARTED); + if (isAllStateStarted) { + return; + } + + boolean isAnyStopping = states.stream().anyMatch(w -> w == WatcherState.STOPPING); + if (isAnyStopping) { + throw new AssertionError("at least one node is in state stopping, waiting to be stopped"); + } + + boolean isAllStateStopped = states.stream().allMatch(w -> w == WatcherState.STOPPED); + if (isAllStateStopped) { + assertAcked(watcherClient().prepareWatchService().start().get()); + throw new AssertionError("all nodes are stopped, restarting"); + } + + boolean isAnyStarting = states.stream().anyMatch(w -> w == WatcherState.STARTING); + if (isAnyStarting) { + throw new AssertionError("at least one node is in state starting, waiting to be stopped"); + } + + throw new AssertionError("unexpected state, retrying with next run"); + }); + + } + + protected void ensureLicenseEnabled() throws Exception { + assertBusy(() -> { + for (XPackLicenseState licenseState : internalCluster().getInstances(XPackLicenseState.class)) { + assertThat(licenseState.isWatcherAllowed(), is(true)); + } + }); + } + + protected void stopWatcher() throws Exception { + assertBusy(() -> { + WatcherStatsResponse watcherStatsResponse = watcherClient().prepareWatcherStats().get(); + assertThat(watcherStatsResponse.hasFailures(), is(false)); + List> currentStatesFromStatsRequest = watcherStatsResponse.getNodes().stream() + .map(response -> Tuple.tuple(response.getNode().getName(), response.getWatcherState())) + .collect(Collectors.toList()); + List states = currentStatesFromStatsRequest.stream().map(Tuple::v2).collect(Collectors.toList()); + + logger.info("waiting to stop watcher, current states {}", currentStatesFromStatsRequest); + + boolean isAllStateStarted = states.stream().allMatch(w -> w == WatcherState.STARTED); + if (isAllStateStarted) { + assertAcked(watcherClient().prepareWatchService().stop().get()); + throw new AssertionError("all nodes are started, stopping"); + } + + boolean isAnyStopping = states.stream().anyMatch(w -> w == WatcherState.STOPPING); + if (isAnyStopping) { + throw new AssertionError("at least one node is in state stopping, waiting to be stopped"); + } + + boolean isAllStateStopped = states.stream().allMatch(w -> w == WatcherState.STOPPED); + if (isAllStateStopped) { + return; + } + + boolean isAnyStarting = states.stream().anyMatch(w -> w == WatcherState.STARTING); + if (isAnyStarting) { + throw new AssertionError("at least one node is in state starting, waiting to be started before stopping"); + } + + throw new AssertionError("unexpected state, retrying with next run"); + }); + } + + public static class NoopEmailService extends EmailService { + + public NoopEmailService() { + super(Settings.EMPTY, null, new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); + } + + @Override + public EmailSent send(Email email, Authentication auth, Profile profile, String accountName) { + return new EmailSent(accountName, email); + } + } + + protected static class TimeWarp { + + protected final Iterable schedulers; + protected final ClockMock clock; + + public TimeWarp(Iterable schedulers, ClockMock clock) { + this.schedulers = schedulers; + this.clock = clock; + } + + public void trigger(String jobName) { + schedulers.forEach(scheduler -> scheduler.trigger(jobName)); + } + + public ClockMock clock() { + return clock; + } + + public void trigger(String id, int times, TimeValue timeValue) { + schedulers.forEach(scheduler -> scheduler.trigger(id, times, timeValue)); + } + } + + /** + * A disruption that prevents time from advancing on nodes. This is needed to allow time sensitive tests + * to have full control of time. This disruption requires {@link ClockMock} being available on the nodes. + */ + private static class TimeFreezeDisruption implements ServiceDisruptionScheme { + + private InternalTestCluster cluster; + private boolean frozen; + + @Override + public void applyToCluster(InternalTestCluster cluster) { + this.cluster = cluster; + } + + @Override + public void removeFromCluster(InternalTestCluster cluster) { + stopDisrupting(); + } + + @Override + public void removeAndEnsureHealthy(InternalTestCluster cluster) { + stopDisrupting(); + } + + @Override + public synchronized void applyToNode(String node, InternalTestCluster cluster) { + if (frozen) { + ((ClockMock)cluster.getInstance(Clock.class, node)).freeze(); + } + } + + @Override + public void removeFromNode(String node, InternalTestCluster cluster) { + ((ClockMock)cluster.getInstance(Clock.class, node)).unfreeze(); + } + + @Override + public synchronized void startDisrupting() { + frozen = true; + for (String node: cluster.getNodeNames()) { + applyToNode(node, cluster); + } + } + + @Override + public void stopDisrupting() { + frozen = false; + for (String node: cluster.getNodeNames()) { + removeFromNode(node, cluster); + } + } + + @Override + public void testClusterClosed() { + } + + @Override + public TimeValue expectedTimeToHeal() { + return TimeValue.ZERO; + } + + @Override + public String toString() { + return "time frozen"; + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/LocalStateWatcher.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/LocalStateWatcher.java new file mode 100644 index 0000000000000..54c94dddd0fce --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/LocalStateWatcher.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.watcher.Watcher; + +import java.nio.file.Path; + +public class LocalStateWatcher extends LocalStateCompositeXPackPlugin { + public LocalStateWatcher(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + LocalStateWatcher thisVar = this; + + plugins.add(new Watcher(settings) { + @Override + protected SSLService getSslService() { + return thisVar.getSslService(); + } + + @Override + protected XPackLicenseState getLicenseState() { + return thisVar.getLicenseState(); + } + + }); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/MockTextTemplateEngine.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/MockTextTemplateEngine.java new file mode 100644 index 0000000000000..eeefa20c3a158 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/MockTextTemplateEngine.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; + +import java.util.Map; + +public class MockTextTemplateEngine extends TextTemplateEngine { + public MockTextTemplateEngine() { + super(Settings.EMPTY, null); + } + + @Override + public String render(TextTemplate textTemplate, Map model) { + if (textTemplate == null ) { + return null; + } + + return textTemplate.getTemplate(); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/TimeWarpedWatcher.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/TimeWarpedWatcher.java new file mode 100644 index 0000000000000..b1c263299f7d3 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/TimeWarpedWatcher.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.watcher.Watcher; +import org.elasticsearch.xpack.watcher.execution.ExecutionService; +import org.elasticsearch.xpack.watcher.execution.SyncTriggerEventConsumer; +import org.elasticsearch.xpack.watcher.execution.WatchExecutor; +import org.elasticsearch.xpack.watcher.trigger.ScheduleTriggerEngineMock; +import org.elasticsearch.xpack.watcher.trigger.TriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleRegistry; + +import java.nio.file.Path; +import java.time.Clock; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.function.Consumer; +import java.util.stream.Stream; + +public class TimeWarpedWatcher extends LocalStateCompositeXPackPlugin { + + // use a single clock across all nodes using this plugin, this lets keep it static + private static final ClockMock clock = new ClockMock(); + + public TimeWarpedWatcher(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + Logger logger = Loggers.getLogger(TimeWarpedWatcher.class, settings); + logger.info("using time warped watchers plugin"); + + TimeWarpedWatcher thisVar = this; + + plugins.add(new Watcher(settings) { + @Override + protected SSLService getSslService() { + return thisVar.getSslService(); + } + + @Override + protected XPackLicenseState getLicenseState() { + return thisVar.getLicenseState(); + } + + @Override + protected Clock getClock() { + return clock; + } + + @Override + protected TriggerEngine getTriggerEngine(Clock clock, ScheduleRegistry scheduleRegistry){ + return new ScheduleTriggerEngineMock(settings, scheduleRegistry, clock); + } + + @Override + protected WatchExecutor getWatchExecutor(ThreadPool threadPool) { + return new SameThreadExecutor(); + } + + @Override + protected Consumer> getTriggerEngineListener(ExecutionService executionService){ + return new SyncTriggerEventConsumer(settings, executionService); + } + }); + } + + public static class SameThreadExecutor implements WatchExecutor { + + @Override + public Stream tasks() { + return Stream.empty(); + } + + @Override + public BlockingQueue queue() { + return new ArrayBlockingQueue<>(1); + } + + @Override + public long largestPoolSize() { + return 1; + } + + @Override + public void execute(Runnable runnable) { + runnable.run(); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatchExecutionContextMockBuilder.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatchExecutionContextMockBuilder.java new file mode 100644 index 0000000000000..eb859baad8935 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatchExecutionContextMockBuilder.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.joda.time.DateTime; + +import java.util.Collections; +import java.util.Map; + +import static org.joda.time.DateTimeZone.UTC; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class WatchExecutionContextMockBuilder { + + private final WatchExecutionContext ctx; + private final Watch watch; + + public WatchExecutionContextMockBuilder(String watchId) { + ctx = mock(WatchExecutionContext.class); + watch = mock(Watch.class); + WatchStatus watchStatus = mock(WatchStatus.class); + when(watchStatus.getHeaders()).thenReturn(Collections.emptyMap()); + when(watch.status()).thenReturn(watchStatus); + when(watch.id()).thenReturn(watchId); + when(ctx.watch()).thenReturn(watch); + payload(Collections.emptyMap()); + metadata(Collections.emptyMap()); + time(watchId, DateTime.now(UTC)); + } + + public WatchExecutionContextMockBuilder wid(Wid wid) { + when(ctx.id()).thenReturn(wid); + return this; + } + + public WatchExecutionContextMockBuilder payload(String key, Object value) { + return payload(new Payload.Simple(MapBuilder.newMapBuilder().put(key, value).map())); + } + + public WatchExecutionContextMockBuilder payload(Map payload) { + return payload(new Payload.Simple(payload)); + } + + public WatchExecutionContextMockBuilder payload(Payload payload) { + when(ctx.payload()).thenReturn(payload); + return this; + } + + public WatchExecutionContextMockBuilder time(String watchId, DateTime time) { + return executionTime(time).triggerEvent(new ScheduleTriggerEvent(watchId, time, time)); + } + + public WatchExecutionContextMockBuilder executionTime(DateTime time) { + when(ctx.executionTime()).thenReturn(time); + return this; + } + + public WatchExecutionContextMockBuilder triggerEvent(TriggerEvent event) { + when(ctx.triggerEvent()).thenReturn(event); + return this; + } + + public WatchExecutionContextMockBuilder metadata(Map metadata) { + when(watch.metadata()).thenReturn(metadata); + return this; + } + + public WatchExecutionContextMockBuilder metadata(String key, String value) { + return metadata(MapBuilder.newMapBuilder().put(key, value).map()); + } + + public WatchExecutionContext buildMock() { + return ctx; + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java new file mode 100644 index 0000000000000..75c6a908b9936 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; +import org.elasticsearch.xpack.core.watcher.common.secret.Secret; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.actions.email.EmailAction; +import org.elasticsearch.xpack.watcher.actions.email.ExecutableEmailAction; +import org.elasticsearch.xpack.watcher.actions.webhook.ExecutableWebhookAction; +import org.elasticsearch.xpack.watcher.actions.webhook.WebhookAction; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.execution.TriggeredExecutionContext; +import org.elasticsearch.xpack.watcher.input.simple.ExecutableSimpleInput; +import org.elasticsearch.xpack.watcher.input.simple.SimpleInput; +import org.elasticsearch.xpack.watcher.notification.email.Authentication; +import org.elasticsearch.xpack.watcher.notification.email.EmailService; +import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; +import org.elasticsearch.xpack.watcher.notification.email.HtmlSanitizer; +import org.elasticsearch.xpack.watcher.notification.email.Profile; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateService; +import org.elasticsearch.xpack.watcher.transform.search.ExecutableSearchTransform; +import org.elasticsearch.xpack.watcher.transform.search.SearchTransform; +import org.elasticsearch.xpack.watcher.trigger.schedule.CronSchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.joda.time.DateTime; + +import javax.mail.internet.AddressException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.joda.time.DateTimeZone.UTC; + +public final class WatcherTestUtils { + + private WatcherTestUtils() { + } + + public static XContentSource xContentSource(BytesReference bytes) { + XContent xContent = XContentFactory.xContent(XContentHelper.xContentType(bytes)); + return new XContentSource(bytes, xContent.type()); + } + + public static WatcherSearchTemplateRequest templateRequest(SearchSourceBuilder sourceBuilder, String... indices) { + return templateRequest(sourceBuilder, SearchType.DEFAULT, indices); + } + + public static WatcherSearchTemplateRequest templateRequest(SearchSourceBuilder sourceBuilder, SearchType searchType, + String... indices) { + try { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.value(sourceBuilder); + return new WatcherSearchTemplateRequest(indices, new String[0], searchType, + WatcherSearchTemplateRequest.DEFAULT_INDICES_OPTIONS, BytesReference.bytes(xContentBuilder)); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static WatchExecutionContextMockBuilder mockExecutionContextBuilder(String watchId) { + return new WatchExecutionContextMockBuilder(watchId) + .wid(new Wid(watchId, DateTime.now(UTC))); + } + + public static WatchExecutionContext mockExecutionContext(String watchId, Payload payload) { + return mockExecutionContextBuilder(watchId) + .wid(new Wid(watchId, DateTime.now(UTC))) + .payload(payload) + .buildMock(); + } + + public static WatchExecutionContext mockExecutionContext(String watchId, DateTime time, Payload payload) { + return mockExecutionContextBuilder(watchId) + .wid(new Wid(watchId, DateTime.now(UTC))) + .payload(payload) + .time(watchId, time) + .buildMock(); + } + + public static WatchExecutionContext mockExecutionContext(String watchId, DateTime executionTime, TriggerEvent event, Payload payload) { + return mockExecutionContextBuilder(watchId) + .wid(new Wid(watchId, DateTime.now(UTC))) + .payload(payload) + .executionTime(executionTime) + .triggerEvent(event) + .buildMock(); + } + + public static WatchExecutionContext createWatchExecutionContext(Logger logger) throws Exception { + Watch watch = new Watch("test-watch", + new ScheduleTrigger(new IntervalSchedule(new IntervalSchedule.Interval(1, IntervalSchedule.Interval.Unit.MINUTES))), + new ExecutableSimpleInput(new SimpleInput(new Payload.Simple()), logger), + InternalAlwaysCondition.INSTANCE, + null, + null, + new ArrayList<>(), + null, + new WatchStatus(new DateTime(0, UTC), emptyMap()), 1L); + TriggeredExecutionContext context = new TriggeredExecutionContext(watch.id(), + new DateTime(0, UTC), + new ScheduleTriggerEvent(watch.id(), new DateTime(0, UTC), new DateTime(0, UTC)), + TimeValue.timeValueSeconds(5)); + context.ensureWatchExists(() -> watch); + return context; + } + + + public static Watch createTestWatch(String watchName, Client client, HttpClient httpClient, EmailService emailService, + WatcherSearchTemplateService searchTemplateService, Logger logger) throws AddressException { + List actions = new ArrayList<>(); + TextTemplateEngine engine = new MockTextTemplateEngine(); + + HttpRequestTemplate.Builder httpRequest = HttpRequestTemplate.builder("localhost", 80); + httpRequest.method(HttpMethod.POST); + httpRequest.path(new TextTemplate("/foobarbaz/{{ctx.watch_id}}")); + httpRequest.body(new TextTemplate("{{ctx.watch_id}} executed with {{ctx.payload.response.hits.total_hits}} hits")); + actions.add(new ActionWrapper("_webhook", null, null, null, new ExecutableWebhookAction(new WebhookAction(httpRequest.build()), + logger, httpClient, engine))); + + + EmailTemplate email = EmailTemplate.builder().from("from@test.com").to("to@test.com").build(); + Authentication auth = new Authentication("testname", new Secret("testpassword".toCharArray())); + EmailAction action = new EmailAction(email, "testaccount", auth, Profile.STANDARD, null, null); + ExecutableEmailAction executale = new ExecutableEmailAction(action, logger, emailService, engine, + new HtmlSanitizer(Settings.EMPTY), Collections.emptyMap()); + actions.add(new ActionWrapper("_email", null, null, null, executale)); + + DateTime now = DateTime.now(UTC); + Map statuses = new HashMap<>(); + statuses.put("_webhook", new ActionStatus(now)); + statuses.put("_email", new ActionStatus(now)); + + WatcherSearchTemplateRequest transformRequest = templateRequest(searchSource().query(matchAllQuery()), "my-payload-index"); + SearchTransform searchTransform = new SearchTransform(transformRequest, null, null); + + return new Watch( + watchName, + new ScheduleTrigger(new CronSchedule("0/5 * * * * ? *")), + new ExecutableSimpleInput(new SimpleInput(new Payload.Simple(Collections.singletonMap("bar", "foo"))), logger), + InternalAlwaysCondition.INSTANCE, + new ExecutableSearchTransform(searchTransform, logger, client, searchTemplateService, TimeValue.timeValueMinutes(1)), + new TimeValue(0), + actions, + Collections.singletonMap("foo", "bar"), + new WatchStatus(now, statuses), 1L); + } + + public static SearchType getRandomSupportedSearchType() { + return randomFrom(SearchType.QUERY_THEN_FETCH, SearchType.DFS_QUERY_THEN_FETCH); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/ScheduleEngineTriggerBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/ScheduleEngineTriggerBenchmark.java new file mode 100644 index 0000000000000..c1967d9e8cea8 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/ScheduleEngineTriggerBenchmark.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.bench; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.input.none.ExecutableNoneInput; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleRegistry; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleTriggerEngine; + +import java.time.Clock; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.util.Collections.emptySet; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; + +@SuppressForbidden(reason = "benchmark") +public class ScheduleEngineTriggerBenchmark { + + private static final Logger logger = ESLoggerFactory.getLogger(ScheduleEngineTriggerBenchmark.class); + + public static void main(String[] args) throws Exception { + int numWatches = 1000; + int interval = 2; + int benchTime = 60000; + + if (args.length % 2 != 0) { + throw new IllegalArgumentException("Uneven number of arguments"); + } + for (int i = 0; i < args.length; i += 2) { + String value = args[i + 1]; + if ("--num_watches".equals(args[i])) { + numWatches = Integer.valueOf(value); + } else if ("--bench_time".equals(args[i])) { + benchTime = Integer.valueOf(value); + } else if ("--interval".equals(args[i])) { + interval = Integer.valueOf(value); + } + } + System.out.println("Running benchmark with numWatches=" + numWatches + " benchTime=" + benchTime + " interval=" + interval); + + Settings settings = Settings.builder() + .put("name", "test") + .build(); + List watches = new ArrayList<>(numWatches); + for (int i = 0; i < numWatches; i++) { + watches.add(new Watch("job_" + i, new ScheduleTrigger(interval(interval + "s")), new ExecutableNoneInput(logger), + InternalAlwaysCondition.INSTANCE, null, null, Collections.emptyList(), null, null, 1L)); + } + ScheduleRegistry scheduleRegistry = new ScheduleRegistry(emptySet()); + + List results = new ArrayList<>(); + System.gc(); + System.out.println("====================================="); + System.out.println("===> Testing scheduler"); + System.out.println("====================================="); + final AtomicBoolean running = new AtomicBoolean(false); + final AtomicInteger total = new AtomicInteger(); + final MeanMetric triggerMetric = new MeanMetric(); + final MeanMetric tooEarlyMetric = new MeanMetric(); + + final ScheduleTriggerEngine scheduler = new TickerScheduleTriggerEngine(settings, scheduleRegistry, Clock.systemUTC()) { + @Override + protected void notifyListeners(List events) { + if (running.get()) { + for (TriggerEvent event : events) { + ScheduleTriggerEvent scheduleTriggerEvent = (ScheduleTriggerEvent) event; + measure(total, triggerMetric, tooEarlyMetric, event.triggeredTime().getMillis(), + scheduleTriggerEvent.scheduledTime().getMillis()); + } + } + } + }; + scheduler.start(watches); + System.out.println("Added [" + numWatches + "] jobs"); + running.set(true); + Thread.sleep(benchTime); + running.set(false); + scheduler.stop(); + System.out.println("done, triggered [" + total.get() + "] times, delayed triggered [" + triggerMetric.count() + + "] times, avg [" + triggerMetric.mean() + "] ms"); + results.add(new Stats(total.get(), triggerMetric.count(), triggerMetric.mean(), tooEarlyMetric.count(), tooEarlyMetric.mean())); + + System.out.println(" Name | # triggered | # delayed | avg delay | # too early triggered | avg too early delay"); + System.out.println("--------------- | ----------- | --------- | --------- | --------------------- | ------------------ "); + for (Stats stats : results) { + System.out.printf( + Locale.ENGLISH, + "%11d | %9d | %9d | %21d | %18d\n", + stats.numberOfTimesTriggered, stats.numberOfTimesDelayed, stats.avgDelayTime, + stats.numberOfEarlyTriggered, stats.avgEarlyDelayTime + ); + } + } + + private static void measure(AtomicInteger total, MeanMetric triggerMetric, MeanMetric tooEarlyMetric, long triggeredTime, + long scheduledTime) { + total.incrementAndGet(); + if (Long.compare(triggeredTime, scheduledTime) != 0) { + long delta = triggeredTime - scheduledTime; + triggerMetric.inc(delta); + if (delta < 0) { + tooEarlyMetric.inc(delta); + } + } + } + + static class Stats { + + final int numberOfTimesTriggered; + final long numberOfTimesDelayed; + final long avgDelayTime; + final long numberOfEarlyTriggered; + final long avgEarlyDelayTime; + + Stats(int numberOfTimesTriggered, long numberOfTimesDelayed, double avgDelayTime, + long numberOfEarlyTriggered, double avgEarlyDelayTime) { + this.numberOfTimesTriggered = numberOfTimesTriggered; + this.numberOfTimesDelayed = numberOfTimesDelayed; + this.avgDelayTime = Math.round(avgDelayTime); + this.numberOfEarlyTriggered = numberOfEarlyTriggered; + this.avgEarlyDelayTime = Math.round(avgEarlyDelayTime); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java new file mode 100644 index 0000000000000..9ae83c79e9577 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherExecutorServiceBenchmark.java @@ -0,0 +1,218 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.bench; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.node.MockNode; +import org.elasticsearch.node.Node; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest; +import org.elasticsearch.xpack.watcher.Watcher; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.condition.ScriptCondition; +import org.elasticsearch.xpack.watcher.trigger.ScheduleTriggerEngineMock; +import org.elasticsearch.xpack.watcher.trigger.TriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleRegistry; + +import java.time.Clock; +import java.util.Arrays; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.httpInput; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; + +/** + * Starts a master only node with watcher and benchmarks the executor service side, so no scheduling. The benchmark + * uses the mock scheduler to trigger watches. + * + * A date node needs to be started outside this benchmark. This the removes non watcher noise like indexing. + */ +public class WatcherExecutorServiceBenchmark { + + private static final Settings SETTINGS = Settings.builder() + .put("xpack.security.enabled", false) + .put("cluster.name", "bench") + .put("network.host", "localhost") + .put("script.disable_dynamic", false) + .put("discovery.zen.ping.unicast.hosts", "localhost") + .put("http.cors.enabled", true) + .put("cluster.routing.allocation.disk.threshold_enabled", false) +// .put("recycler.page.limit.heap", "60%") + .build(); + + private static Client client; + private static WatcherClient watcherClient; + private static ScheduleTriggerEngineMock scheduler; + + protected static void start() throws Exception { + Node node = new MockNode(Settings.builder().put(SETTINGS).put("node.data", false).build(), + Arrays.asList(BenchmarkWatcher.class)); + client = node.client(); + client.admin().cluster().prepareHealth("*").setWaitForGreenStatus().get(); + Thread.sleep(5000); + watcherClient = node.injector().getInstance(WatcherClient.class); + scheduler = node.injector().getInstance(ScheduleTriggerEngineMock.class); + } + + public static final class SmallSearchInput extends WatcherExecutorServiceBenchmark { + + public static void main(String[] args) throws Exception { + start(); + client.admin().indices().prepareCreate("test").get(); + client.prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); + + int numAlerts = 1000; + for (int i = 0; i < numAlerts; i++) { + final String name = "_name" + i; + PutWatchRequest putAlertRequest = new PutWatchRequest(name, new WatchSourceBuilder() + .trigger(schedule(interval("5s"))) + .input(searchInput(templateRequest(new SearchSourceBuilder(), "test"))) + .condition(new ScriptCondition(new Script( + ScriptType.INLINE, + Script.DEFAULT_SCRIPT_LANG, + "ctx.payload.hits.total > 0", + emptyMap()))).buildAsBytes(XContentType.JSON), XContentType.JSON); + putAlertRequest.setId(name); + watcherClient.putWatch(putAlertRequest).actionGet(); + } + + int numThreads = 50; + int watchersPerThread = numAlerts / numThreads; + Thread[] threads = new Thread[numThreads]; + for (int i = 0; i < numThreads; i++) { + final int begin = i * watchersPerThread; + final int end = (i + 1) * watchersPerThread; + Runnable r = new Runnable() { + @Override + public void run() { + while (true) { + for (int j = begin; j < end; j++) { + scheduler.trigger("_name" + j); + } + } + } + }; + threads[i] = new Thread(r); + threads[i].start(); + } + for (Thread thread : threads) { + thread.join(); + } + } + + } + + public static final class BigSearchInput extends WatcherExecutorServiceBenchmark { + + public static void main(String[] args) throws Exception { + start(); + int numAlerts = 1000; + for (int i = 0; i < numAlerts; i++) { + final String name = "_name" + i; + PutWatchRequest putAlertRequest = new PutWatchRequest(name, new WatchSourceBuilder() + .trigger(schedule(interval("5s"))) + .input(searchInput(templateRequest(new SearchSourceBuilder(), "test")) + .extractKeys("hits.total")) + .condition(new ScriptCondition(new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "1 == 1", emptyMap()))) + .addAction("_id", indexAction("index", "type")).buildAsBytes(XContentType.JSON), XContentType.JSON); + putAlertRequest.setId(name); + watcherClient.putWatch(putAlertRequest).actionGet(); + } + + int numThreads = 50; + int watchersPerThread = numAlerts / numThreads; + Thread[] threads = new Thread[numThreads]; + for (int i = 0; i < numThreads; i++) { + final int begin = i * watchersPerThread; + final int end = (i + 1) * watchersPerThread; + Runnable r = new Runnable() { + @Override + public void run() { + while (true) { + for (int j = begin; j < end; j++) { + scheduler.trigger("_name" + j); + } + } + } + }; + threads[i] = new Thread(r); + threads[i].start(); + } + + + for (Thread thread : threads) { + thread.join(); + } + } + + } + + public static final class HttpInput extends WatcherExecutorServiceBenchmark { + + public static void main(String[] args) throws Exception { + start(); + int numAlerts = 1000; + for (int i = 0; i < numAlerts; i++) { + final String name = "_name" + i; + PutWatchRequest putAlertRequest = new PutWatchRequest(name, new WatchSourceBuilder() + .trigger(schedule(interval("5s"))) + .input(httpInput(HttpRequestTemplate.builder("localhost", 9200))) + .condition(new ScriptCondition(new Script( + ScriptType.INLINE, + Script.DEFAULT_SCRIPT_LANG, + "ctx.payload.tagline == \"You Know, for Search\"", + emptyMap()))).buildAsBytes(XContentType.JSON), XContentType.JSON); + putAlertRequest.setId(name); + watcherClient.putWatch(putAlertRequest).actionGet(); + } + + int numThreads = 50; + int watchersPerThread = numAlerts / numThreads; + Thread[] threads = new Thread[numThreads]; + for (int i = 0; i < numThreads; i++) { + final int begin = i * watchersPerThread; + final int end = (i + 1) * watchersPerThread; + Runnable r = () -> { + while (true) { + for (int j = begin; j < end; j++) { + scheduler.trigger("_name" + j); + } + } + }; + threads[i] = new Thread(r); + threads[i].start(); + } + for (Thread thread : threads) { + thread.join(); + } + } + + } + + public static class BenchmarkWatcher extends Watcher { + + public BenchmarkWatcher(Settings settings) { + super(settings); + Loggers.getLogger(BenchmarkWatcher.class, settings).info("using watcher benchmark plugin"); + } + + @Override + protected TriggerEngine getTriggerEngine(Clock clock, ScheduleRegistry scheduleRegistry) { + return new ScheduleTriggerEngineMock(settings, scheduleRegistry, clock); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java new file mode 100644 index 0000000000000..7cf29632538a9 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java @@ -0,0 +1,359 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.bench; + +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.node.MockNode; +import org.elasticsearch.node.Node; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.threadpool.ThreadPoolStats; +import org.elasticsearch.xpack.core.watcher.WatcherState; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.actions.ActionBuilders; +import org.elasticsearch.xpack.watcher.actions.logging.LoggingLevel; +import org.elasticsearch.xpack.watcher.condition.ScriptCondition; +import org.elasticsearch.xpack.watcher.test.LocalStateWatcher; + +import java.io.IOException; +import java.time.Clock; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; + +@SuppressForbidden(reason = "benchmark") +public class WatcherScheduleEngineBenchmark { + + private static final Settings SETTINGS = Settings.builder() + .put("xpack.security.enabled", false) + .put("cluster.name", "bench") + .put("script.disable_dynamic", false) + .put("http.cors.enabled", true) + .build(); + + public static void main(String[] args) throws Exception { + System.setProperty("es.logger.prefix", ""); + + String[] engines = new String[]{"ticker", "scheduler"}; + int numWatches = 2000; + int benchTime = 60000; + int interval = 1; + + if (args.length % 2 != 0) { + throw new IllegalArgumentException("Uneven number of arguments"); + } + for (int i = 0; i < args.length; i += 2) { + String value = args[i + 1]; + if ("--num_watches".equals(args[i])) { + numWatches = Integer.valueOf(value); + } else if ("--bench_time".equals(args[i])) { + benchTime = Integer.valueOf(value); + } else if ("--interval".equals(args[i])) { + interval = Integer.valueOf(value); + } else if ("--engines".equals(args[i])) { + engines = Strings.commaDelimitedListToStringArray(value); + } + } + System.out.println("Running schedule benchmark with:"); + System.out.println("numWatches=" + numWatches + " benchTime=" + benchTime + " interval=" + interval + + " engines=" + Arrays.toString(engines)); + System.out.println("and heap_max=" + JvmInfo.jvmInfo().getMem().getHeapMax()); + + + // First clean everything and index the watcher (but not via put alert api!) + try (Node node = new Node(Settings.builder().put(SETTINGS).put("node.data", false).build()).start()) { + try (Client client = node.client()) { + ClusterHealthResponse response = client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); + if (response.getNumberOfNodes() != 2 && response.getNumberOfDataNodes() != 1) { + throw new IllegalStateException("This benchmark needs one extra data only node running outside this benchmark"); + } + + client.admin().indices().prepareDelete("_all").get(); + client.admin().indices().prepareCreate("test").get(); + client.prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); + + System.out.println("===============> indexing [" + numWatches + "] watches"); + for (int i = 0; i < numWatches; i++) { + final String id = "_id_" + i; + client.prepareIndex(Watch.INDEX, Watch.DOC_TYPE, id) + .setSource(new WatchSourceBuilder() + .trigger(schedule(interval(interval + "s"))) + .input(searchInput(templateRequest(new SearchSourceBuilder(), "test"))) + .condition(new ScriptCondition(new Script( + ScriptType.INLINE, + Script.DEFAULT_SCRIPT_LANG, + "ctx.payload.hits.total > 0", + emptyMap()))) + .addAction("logging", ActionBuilders.loggingAction("test").setLevel(LoggingLevel.TRACE)) + .buildAsBytes(XContentType.JSON), XContentType.JSON + ).get(); + } + client.admin().indices().prepareFlush(Watch.INDEX, "test").get(); + System.out.println("===============> indexed [" + numWatches + "] watches"); + } + } + + + // Now for each scheduler impl run the benchmark + Map results = new HashMap<>(); + for (String engine : engines) { + BenchStats stats = new BenchStats(engine, numWatches); + results.put(engine, stats); + System.out.println("===============> testing engine [" + engine + "]"); + System.gc(); + Settings settings = Settings.builder() + .put(SETTINGS) + .put("xpack.watcher.trigger.schedule.engine", engine) + .put("node.data", false) + .build(); + try (Node node = new MockNode(settings, Arrays.asList(LocalStateWatcher.class))) { + try (Client client = node.client()) { + client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); + client.admin().indices().prepareDelete(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*").get(); + client.admin().cluster().prepareHealth(Watch.INDEX, "test").setWaitForYellowStatus().get(); + + Clock clock = node.injector().getInstance(Clock.class); + WatcherClient watcherClient = node.injector().getInstance(WatcherClient.class); + while (!watcherClient.prepareWatcherStats().get().getNodes().stream() + .allMatch(r -> r.getWatcherState() == WatcherState.STARTED)) { + Thread.sleep(100); + } + long actualLoadedWatches = watcherClient.prepareWatcherStats().get().getWatchesCount(); + if (actualLoadedWatches != numWatches) { + throw new IllegalStateException("Expected [" + numWatches + "] watched to be loaded, but only [" + + actualLoadedWatches + "] watches were actually loaded"); + } + long startTime = clock.millis(); + System.out.println("==> watcher started, waiting [" + benchTime + "] seconds now..."); + + final AtomicBoolean start = new AtomicBoolean(true); + final MeanMetric jvmUsedHeapSpace = new MeanMetric(); + Thread sampleThread = new Thread(new Runnable() { + @Override + public void run() { + try { + while (start.get()) { + NodesStatsResponse response = client.admin().cluster().prepareNodesStats("_master").setJvm(true).get(); + ByteSizeValue heapUsed = response.getNodes().get(0).getJvm().getMem().getHeapUsed(); + jvmUsedHeapSpace.inc(heapUsed.getBytes()); + Thread.sleep(1000); + } + } catch (InterruptedException ignored) {} + } + }); + sampleThread.start(); + Thread.sleep(benchTime); + long endTime = clock.millis(); + start.set(false); + sampleThread.join(); + + NodesStatsResponse response = client.admin().cluster().prepareNodesStats().setThreadPool(true).get(); + for (NodeStats nodeStats : response.getNodes()) { + for (ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) { + if ("watcher".equals(threadPoolStats.getName())) { + stats.setWatcherThreadPoolStats(threadPoolStats); + } + } + } + client.admin().indices().prepareRefresh(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*").get(); + Script script = new Script( + ScriptType.INLINE, + Script.DEFAULT_SCRIPT_LANG, + "doc['trigger_event.schedule.triggered_time'].value - doc['trigger_event.schedule.scheduled_time'].value", + emptyMap()); + SearchResponse searchResponse = client.prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*") + .setQuery(QueryBuilders.rangeQuery("trigger_event.schedule.scheduled_time").gte(startTime).lte(endTime)) + .addAggregation(terms("state").field("state")) + .addAggregation(histogram("delay") + .script(script) + .interval(10) + ) + .addAggregation(percentiles("percentile_delay") + .script(script) + .percentiles(1.0, 20.0, 50.0, 80.0, 99.0) + ) + .get(); + Terms terms = searchResponse.getAggregations().get("state"); + stats.setStateStats(terms); + Histogram histogram = searchResponse.getAggregations().get("delay"); + stats.setDelayStats(histogram); + System.out.println("===> State"); + for (Terms.Bucket bucket : terms.getBuckets()) { + System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); + } + System.out.println("===> Delay"); + for (Histogram.Bucket bucket : histogram.getBuckets()) { + System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); + } + Percentiles percentiles = searchResponse.getAggregations().get("percentile_delay"); + stats.setDelayPercentiles(percentiles); + stats.setAvgJvmUsed(jvmUsedHeapSpace); + watcherClient.prepareWatchService().stop().get(); + } + } + } + + // Finally print out the results in an asciidoc table: + System.out.println("## Ran with [" + numWatches + "] watches, interval [" + interval + "] and bench_time [" + benchTime + "]"); + System.out.println(); + System.out.println("### Watcher execution and watcher thread pool stats"); + System.out.println(); + System.out.println(" Name | avg heap used | wtp rejected | wtp completed"); + System.out.println("---------- | ------------- | ------------ | -------------"); + for (BenchStats benchStats : results.values()) { + benchStats.printThreadStats(); + } + System.out.println(); + System.out.println("### Watch record state"); + System.out.println(); + System.out.println(" Name | # state executed | # state failed | # state throttled | # state awaits_execution"); + System.out.println("---------- | ---------------- | -------------- | ----------------- | ------------------------"); + for (BenchStats benchStats : results.values()) { + benchStats.printWatchRecordState(); + } + + System.out.println(); + System.out.println("### Trigger delay"); + System.out.println(); + System.out.println(" Name | 1% delayed | 20% delayed | 50% delayed | 80% delayed | 99% delayed"); + System.out.println("---------- | ---------- | ----------- | ----------- | ----------- | -----------"); + for (BenchStats benchStats : results.values()) { + benchStats.printTriggerDelay(); + } + } + + @SuppressForbidden(reason = "benchmark") + private static class BenchStats { + + private final String name; + private final int numWatches; + private ThreadPoolStats.Stats watcherThreadPoolStats; + + private Terms stateStats; + private Histogram delayStats; + + private Percentiles delayPercentiles; + + private long avgHeapUsed; + + private BenchStats(String name, int numWatches) { + this.name = name; + this.numWatches = numWatches; + } + + public String getName() { + return name; + } + + public int getNumWatches() { + return numWatches; + } + + public ThreadPoolStats.Stats getWatcherThreadPoolStats() { + return watcherThreadPoolStats; + } + + public void setWatcherThreadPoolStats(ThreadPoolStats.Stats watcherThreadPoolStats) { + this.watcherThreadPoolStats = watcherThreadPoolStats; + } + + public Terms getStateStats() { + return stateStats; + } + + public void setStateStats(Terms stateStats) { + this.stateStats = stateStats; + } + + public Histogram getDelayStats() { + return delayStats; + } + + public void setDelayStats(Histogram delayStats) { + this.delayStats = delayStats; + } + + public Percentiles getDelayPercentiles() { + return delayPercentiles; + } + + public void setDelayPercentiles(Percentiles delayPercentiles) { + this.delayPercentiles = delayPercentiles; + } + + public void setAvgJvmUsed(MeanMetric jvmMemUsed) { + avgHeapUsed = Math.round(jvmMemUsed.mean()); + } + + public void printThreadStats() throws IOException { + System.out.printf( + Locale.ENGLISH, + "%10s | %13s | %12d | %13d \n", + name, new ByteSizeValue(avgHeapUsed), + watcherThreadPoolStats.getRejected(), watcherThreadPoolStats.getCompleted() + ); + } + + public void printWatchRecordState() throws IOException { + Terms.Bucket executed = stateStats.getBucketByKey("executed"); + Terms.Bucket failed = stateStats.getBucketByKey("failed"); + Terms.Bucket throttled = stateStats.getBucketByKey("throttled"); + Terms.Bucket awaitsExecution = stateStats.getBucketByKey("awaits_execution"); + System.out.printf( + Locale.ENGLISH, + "%10s | %16d | %14d | %17d | %24d \n", + name, executed != null ? executed.getDocCount() : 0, + failed != null ? failed.getDocCount() : 0, + throttled != null ? throttled.getDocCount() : 0, + awaitsExecution != null ? awaitsExecution.getDocCount() : 0 + ); + } + + public void printTriggerDelay() throws Exception { + String _1thPercentile = String.valueOf(Math.round(delayPercentiles.percentile(1.0))); + String _20thPercentile = String.valueOf(Math.round(delayPercentiles.percentile(20.0))); + String _50thPercentile = String.valueOf(Math.round(delayPercentiles.percentile(50.0))); + String _80thPercentile = String.valueOf(Math.round(delayPercentiles.percentile(80.0))); + String _99thPercentile = String.valueOf(Math.round(delayPercentiles.percentile(99.0))); + System.out.printf( + Locale.ENGLISH, + "%10s | %10s | %11s | %11s | %11s | %11s \n", + name, _1thPercentile, _20thPercentile, _50thPercentile, _80thPercentile, _99thPercentile + ); + } + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java new file mode 100644 index 0000000000000..afcbd2499033e --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java @@ -0,0 +1,381 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.integration; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.condition.CompareCondition; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.Schedules; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.MonthTimes; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.WeekTimes; +import org.joda.time.DateTime; + +import java.time.Clock; +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.xContentSource; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.cron; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.daily; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.hourly; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.monthly; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.weekly; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +@TestLogging("org.elasticsearch.xpack.watcher:DEBUG," + + "org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") +public class BasicWatcherTests extends AbstractWatcherIntegrationTestCase { + + public void testIndexWatch() throws Exception { + WatcherClient watcherClient = watcherClient(); + createIndex("idx"); + // Have a sample document in the index, the watch is going to evaluate + client().prepareIndex("idx", "type").setSource("field", "foo").get(); + refresh(); + WatcherSearchTemplateRequest request = templateRequest(searchSource().query(termQuery("field", "foo")), "idx"); + watcherClient.preparePutWatch("_name") + .setSource(watchBuilder() + .trigger(schedule(interval(5, IntervalSchedule.Interval.Unit.SECONDS))) + .input(searchInput(request)) + .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 1L)) + .addAction("_logger", loggingAction("_logging") + .setCategory("_category"))) + .get(); + + timeWarp().trigger("_name"); + assertWatchWithMinimumPerformedActionsCount("_name", 1); + + GetWatchResponse getWatchResponse = watcherClient().prepareGetWatch().setId("_name").get(); + assertThat(getWatchResponse.isFound(), is(true)); + assertThat(getWatchResponse.getSource(), notNullValue()); + } + + public void testIndexWatchRegisterWatchBeforeTargetIndex() throws Exception { + WatcherClient watcherClient = watcherClient(); + WatcherSearchTemplateRequest searchRequest = templateRequest(searchSource().query(termQuery("field", "value")), "idx"); + watcherClient.preparePutWatch("_name") + .setSource(watchBuilder() + .trigger(schedule(interval(5, IntervalSchedule.Interval.Unit.SECONDS))) + .input(searchInput(searchRequest)) + .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 1L))) + .get(); + timeWarp().trigger("_name"); + // The watch's condition won't meet because there is no data that matches with the query + assertWatchWithNoActionNeeded("_name", 1); + + // Index sample doc after we register the watch and the watch's condition should meet + client().prepareIndex("idx", "type").setSource("field", "value").get(); + refresh(); + + timeWarp().clock().fastForwardSeconds(5); + timeWarp().trigger("_name"); + refresh(); + + assertWatchWithMinimumPerformedActionsCount("_name", 1); + } + + public void testDeleteWatch() throws Exception { + WatcherClient watcherClient = watcherClient(); + WatcherSearchTemplateRequest searchRequest = templateRequest(searchSource().query(matchAllQuery()), "idx"); + PutWatchResponse indexResponse = watcherClient.preparePutWatch("_name") + .setSource(watchBuilder() + .trigger(schedule(cron("0/1 * * * * ? 2020"))) + .input(searchInput(searchRequest)) + .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 1L))) + .get(); + assertThat(indexResponse.isCreated(), is(true)); + DeleteWatchResponse deleteWatchResponse = watcherClient.prepareDeleteWatch("_name").get(); + assertThat(deleteWatchResponse, notNullValue()); + assertThat(deleteWatchResponse.isFound(), is(true)); + + refresh(); + assertHitCount(client().prepareSearch(Watch.INDEX).setSize(0).get(), 0L); + + // Deleting the same watch for the second time + deleteWatchResponse = watcherClient.prepareDeleteWatch("_name").get(); + assertThat(deleteWatchResponse, notNullValue()); + assertThat(deleteWatchResponse.isFound(), is(false)); + } + + public void testMalformedWatch() throws Exception { + WatcherClient watcherClient = watcherClient(); + createIndex("idx"); + // Have a sample document in the index, the watch is going to evaluate + client().prepareIndex("idx", "type").setSource("field", "value").get(); + XContentBuilder watchSource = jsonBuilder(); + + watchSource.startObject(); + watchSource.field("unknown_field", "x"); + watchSource.startObject("schedule").field("cron", "0/5 * * * * ? *").endObject(); + + watchSource.startObject("condition").startObject("script").field("script", "return true"); + watchSource.field("request", templateRequest(searchSource().query(matchAllQuery()))); + watchSource.endObject().endObject(); + + watchSource.endObject(); + try { + watcherClient.preparePutWatch("_name") + .setSource(BytesReference.bytes(watchSource), watchSource.contentType()) + .get(); + fail(); + } catch (ElasticsearchParseException e) { + // In watch store we fail parsing if an watch contains undefined fields. + } + try { + client().prepareIndex(Watch.INDEX, Watch.DOC_TYPE, "_name") + .setSource(watchSource) + .get(); + fail(); + } catch (Exception e) { + // The watch index template the mapping is defined as strict + } + } + + @TestLogging("org.elasticsearch.xpack.watcher:DEBUG") + public void testModifyWatches() throws Exception { + createIndex("idx"); + WatcherSearchTemplateRequest searchRequest = templateRequest(searchSource().query(matchAllQuery()), "idx"); + + WatchSourceBuilder source = watchBuilder() + .trigger(schedule(interval("5s"))) + .input(searchInput(searchRequest)) + .addAction("_id", indexAction("idx", "action")); + + watcherClient().preparePutWatch("_name") + .setSource(source.condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 1L))) + .get(); + + timeWarp().clock().fastForwardSeconds(5); + timeWarp().trigger("_name"); + assertWatchWithMinimumPerformedActionsCount("_name", 0, false); + + watcherClient().preparePutWatch("_name") + .setSource(source.condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 0L))) + .get(); + + timeWarp().clock().fastForwardSeconds(5); + timeWarp().trigger("_name"); + refresh(); + assertWatchWithMinimumPerformedActionsCount("_name", 1, false); + + watcherClient().preparePutWatch("_name") + .setSource(source + .trigger(schedule(Schedules.cron("0/1 * * * * ? 2020"))) + .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 0L))) + .get(); + + timeWarp().clock().fastForwardSeconds(5); + timeWarp().trigger("_name"); + long count = findNumberOfPerformedActions("_name"); + + timeWarp().clock().fastForwardSeconds(5); + timeWarp().trigger("_name"); + assertThat(count, equalTo(findNumberOfPerformedActions("_name"))); + } + + public void testConditionSearchWithSource() throws Exception { + SearchSourceBuilder searchSourceBuilder = searchSource().query(matchQuery("level", "a")); + testConditionSearch(templateRequest(searchSourceBuilder, "events")); + } + + public void testConditionSearchWithIndexedTemplate() throws Exception { + SearchSourceBuilder searchSourceBuilder = searchSource().query(matchQuery("level", "a")); + assertAcked(client().admin().cluster().preparePutStoredScript() + .setId("my-template") + .setContent(BytesReference.bytes(jsonBuilder().startObject().field("template").value(searchSourceBuilder).endObject()), + XContentType.JSON) + .get()); + + Script template = new Script(ScriptType.STORED, null, "my-template", Collections.emptyMap()); + WatcherSearchTemplateRequest searchRequest = new WatcherSearchTemplateRequest(new String[]{"events"}, new String[0], + SearchType.DEFAULT, WatcherSearchTemplateRequest.DEFAULT_INDICES_OPTIONS, template); + testConditionSearch(searchRequest); + } + + public void testInputFiltering() throws Exception { + WatcherClient watcherClient = watcherClient(); + createIndex("idx"); + // Have a sample document in the index, the watch is going to evaluate + client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject().field("field", "foovalue").endObject()).get(); + refresh(); + WatcherSearchTemplateRequest request = templateRequest(searchSource().query(termQuery("field", "foovalue")), "idx"); + watcherClient.preparePutWatch("_name1") + .setSource(watchBuilder() + .trigger(schedule(interval(5, IntervalSchedule.Interval.Unit.SECONDS))) + .input(searchInput(request).extractKeys("hits.total")) + .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 1L))) + .get(); + // in this watcher the condition will fail, because max_score isn't extracted, only total: + watcherClient.preparePutWatch("_name2") + .setSource(watchBuilder() + .trigger(schedule(interval(5, IntervalSchedule.Interval.Unit.SECONDS))) + .input(searchInput(request).extractKeys("hits.total")) + .condition(new CompareCondition("ctx.payload.hits.max_score", CompareCondition.Op.GTE, 0L))) + .get(); + + timeWarp().trigger("_name1"); + assertWatchWithMinimumPerformedActionsCount("_name1", 1); + timeWarp().trigger("_name2"); + assertWatchWithNoActionNeeded("_name2", 1); + + // Check that the input result payload has been filtered + refresh(); + SearchResponse searchResponse = searchWatchRecords(builder -> builder.setQuery(matchQuery("watch_id", "_name1"))); + assertHitCount(searchResponse, 1); + XContentSource source = xContentSource(searchResponse.getHits().getAt(0).getSourceRef()); + assertThat(source.getValue("result.input.payload.hits.total"), equalTo((Object) 1)); + } + + public void testPutWatchWithNegativeSchedule() throws Exception { + try { + watcherClient().preparePutWatch("_name") + .setSource(watchBuilder() + .trigger(schedule(interval(-5, IntervalSchedule.Interval.Unit.SECONDS))) + .input(simpleInput("key", "value")) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_logger", loggingAction("executed!"))) + .get(); + fail("put watch should have failed"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("interval can't be lower than 1000 ms, but [-5s] was specified")); + } + + try { + watcherClient().preparePutWatch("_name") + .setSource(watchBuilder() + .trigger(schedule(hourly().minutes(-10).build())) + .input(simpleInput("key", "value")) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_logger", loggingAction("executed!"))) + .get(); + fail("put watch should have failed"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("invalid hourly minute [-10]. minute must be between 0 and 59 incl.")); + } + + try { + watcherClient().preparePutWatch("_name") + .setSource(watchBuilder() + .trigger(schedule(daily().atRoundHour(-10).build())) + .input(simpleInput("key", "value")) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_logger", loggingAction("executed!"))) + .get(); + fail("put watch should have failed"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), + equalTo("invalid time [0-10:00]. invalid time hour value [-10]. time hours must be between 0 and 23 incl.")); + } + + try { + watcherClient().preparePutWatch("_name") + .setSource(watchBuilder() + .trigger(schedule(weekly().time(WeekTimes.builder().atRoundHour(-10).build()).build())) + .input(simpleInput("key", "value")) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_logger", loggingAction("executed!"))) + .get(); + fail("put watch should have failed"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), + equalTo("invalid time [0-10:00]. invalid time hour value [-10]. time hours must be between 0 and 23 incl.")); + } + + try { + watcherClient().preparePutWatch("_name") + .setSource(watchBuilder() + .trigger(schedule(monthly().time(MonthTimes.builder().atRoundHour(-10).build()).build())) + .input(simpleInput("key", "value")) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_logger", loggingAction("executed!"))) + .get(); + fail("put watch should have failed"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), + equalTo("invalid time [0-10:00]. invalid time hour value [-10]. time hours must be between 0 and 23 incl.")); + } + } + + private void testConditionSearch(WatcherSearchTemplateRequest request) throws Exception { + // reset, so we don't miss event docs when we filter over the _timestamp field. + timeWarp().clock().setTime(new DateTime(Clock.systemUTC().millis())); + + String watchName = "_name"; + assertAcked(prepareCreate("events").addMapping("event", "level", "type=text")); + + watcherClient().preparePutWatch(watchName) + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(searchInput(request)) + .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.GTE, 3L))) + .get(); + + logger.info("created watch [{}] at [{}]", watchName, new DateTime(Clock.systemUTC().millis())); + + client().prepareIndex("events", "event") + .setSource("level", "a") + .get(); + client().prepareIndex("events", "event") + .setSource("level", "a") + .get(); + + refresh(); + timeWarp().clock().fastForwardSeconds(1); + timeWarp().trigger(watchName); + assertWatchWithNoActionNeeded(watchName, 1); + + client().prepareIndex("events", "event") + .setSource("level", "b") + .get(); + refresh(); + timeWarp().clock().fastForwardSeconds(1); + timeWarp().trigger(watchName); + assertWatchWithNoActionNeeded(watchName, 2); + + client().prepareIndex("events", "event") + .setSource("level", "a") + .get(); + refresh(); + timeWarp().clock().fastForwardSeconds(1); + timeWarp().trigger(watchName); + assertWatchWithMinimumPerformedActionsCount(watchName, 1); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java new file mode 100644 index 0000000000000..e47727f5d1085 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java @@ -0,0 +1,364 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.integration; + +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.history.WatchRecord; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; +import org.elasticsearch.xpack.watcher.condition.CompareCondition; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.execution.TriggeredWatch; +import org.elasticsearch.xpack.watcher.history.HistoryStore; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.hamcrest.Matchers; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.cron; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.joda.time.DateTimeZone.UTC; + +public class BootStrapTests extends AbstractWatcherIntegrationTestCase { + + @Override + protected boolean timeWarped() { + return false; + } + + @Before + public void deleteAllWatchHistoryIndices() { + assertAcked(client().admin().indices().prepareDelete(HistoryStoreField.INDEX_PREFIX + "*")); + } + + public void testLoadMalformedWatchRecord() throws Exception { + client().prepareIndex(Watch.INDEX, Watch.DOC_TYPE, "_id") + .setSource(jsonBuilder().startObject() + .startObject(WatchField.TRIGGER.getPreferredName()) + .startObject("schedule") + .field("cron", "0/5 * * * * ? 2050") + .endObject() + .endObject() + .startObject(WatchField.ACTIONS.getPreferredName()) + .endObject() + .endObject()) + .get(); + + // valid watch record: + DateTime now = DateTime.now(UTC); + Wid wid = new Wid("_id", now); + ScheduleTriggerEvent event = new ScheduleTriggerEvent("_id", now, now); + ExecutableCondition condition = InternalAlwaysCondition.INSTANCE; + String index = HistoryStoreField.getHistoryIndexNameForTime(now); + client().prepareIndex(index, HistoryStore.DOC_TYPE, wid.value()) + .setSource(jsonBuilder().startObject() + .startObject(WatchRecord.TRIGGER_EVENT.getPreferredName()) + .field(event.type(), event) + .endObject() + .startObject(WatchField.CONDITION.getPreferredName()) + .field(condition.type(), condition) + .endObject() + .startObject(WatchField.INPUT.getPreferredName()) + .startObject("none").endObject() + .endObject() + .endObject()) + .setWaitForActiveShards(ActiveShardCount.ALL) + .setRefreshPolicy(IMMEDIATE) + .get(); + + // unknown condition: + wid = new Wid("_id", now); + client().prepareIndex(index, HistoryStore.DOC_TYPE, wid.value()) + .setSource(jsonBuilder().startObject() + .startObject(WatchRecord.TRIGGER_EVENT.getPreferredName()) + .field(event.type(), event) + .endObject() + .startObject(WatchField.CONDITION.getPreferredName()) + .startObject("unknown").endObject() + .endObject() + .startObject(WatchField.INPUT.getPreferredName()) + .startObject("none").endObject() + .endObject() + .endObject()) + .setWaitForActiveShards(ActiveShardCount.ALL) + .setRefreshPolicy(IMMEDIATE) + .get(); + + // unknown trigger: + wid = new Wid("_id", now); + client().prepareIndex(index, HistoryStore.DOC_TYPE, wid.value()) + .setSource(jsonBuilder().startObject() + .startObject(WatchRecord.TRIGGER_EVENT.getPreferredName()) + .startObject("unknown").endObject() + .endObject() + .startObject(WatchField.CONDITION.getPreferredName()) + .field(condition.type(), condition) + .endObject() + .startObject(WatchField.INPUT.getPreferredName()) + .startObject("none").endObject() + .endObject() + .endObject()) + .setWaitForActiveShards(ActiveShardCount.ALL) + .setRefreshPolicy(IMMEDIATE) + .get(); + + stopWatcher(); + startWatcher(); + + WatcherStatsResponse response = watcherClient().prepareWatcherStats().get(); + assertThat(response.getWatchesCount(), equalTo(1L)); + } + + @AwaitsFix(bugUrl = "Supposedly fixed; https://github.com/elastic/x-pack-elasticsearch/issues/1915") + public void testLoadExistingWatchesUponStartup() throws Exception { + stopWatcher(); + + int numWatches = scaledRandomIntBetween(16, 128); + WatcherSearchTemplateRequest request = + templateRequest(searchSource().query(termQuery("field", "value")), "my-index"); + + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + for (int i = 0; i < numWatches; i++) { + bulkRequestBuilder.add( + client().prepareIndex(Watch.INDEX, Watch.DOC_TYPE, "_id" + i) + .setSource(watchBuilder() + .trigger(schedule(cron("0 0/5 * * * ? 2050"))) + .input(searchInput(request)) + .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 1L)) + .buildAsBytes(XContentType.JSON), XContentType.JSON + ) + .setWaitForActiveShards(ActiveShardCount.ALL)); + } + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + assertHitCount(client().prepareSearch(Watch.INDEX).setSize(0).get(), numWatches); + + startWatcher(); + + assertBusy(() -> { + WatcherStatsResponse response = watcherClient().prepareWatcherStats().get(); + assertThat(response.getWatchesCount(), equalTo((long) numWatches)); + }); + } + + public void testMixedTriggeredWatchLoading() throws Exception { + createIndex("output"); + client().prepareIndex("my-index", "foo", "bar") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .setSource("field", "value").get(); + + WatcherStatsResponse response = watcherClient().prepareWatcherStats().get(); + assertThat(response.getWatchesCount(), equalTo(0L)); + + WatcherSearchTemplateRequest request = templateRequest(searchSource().query(termQuery("field", "value")), "my-index"); + + ensureGreen("output", "my-index"); + int numWatches = 8; + for (int i = 0; i < numWatches; i++) { + String watchId = "_id" + i; + watcherClient().preparePutWatch(watchId).setSource(watchBuilder() + .trigger(schedule(cron("0/5 * * * * ? 2050"))) + .input(searchInput(request)) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_id", indexAction("output", "test")) + .defaultThrottlePeriod(TimeValue.timeValueMillis(0)) + ).get(); + } + + stopWatcher(); + + DateTime now = DateTime.now(UTC); + final int numRecords = scaledRandomIntBetween(numWatches, 128); + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + for (int i = 0; i < numRecords; i++) { + String watchId = "_id" + (i % numWatches); + now = now.plusMinutes(1); + ScheduleTriggerEvent event = new ScheduleTriggerEvent(watchId, now, now); + Wid wid = new Wid(watchId, now); + TriggeredWatch triggeredWatch = new TriggeredWatch(wid, event); + bulkRequestBuilder.add( + client().prepareIndex( + TriggeredWatchStoreField.INDEX_NAME, + TriggeredWatchStoreField.DOC_TYPE, + triggeredWatch.id().value()) + .setSource(jsonBuilder().value(triggeredWatch)) + .request()); + } + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + logger.info("Added [{}] triggered watches for [{}] different watches, starting watcher again", numRecords, numWatches); + startWatcher(); + assertSingleExecutionAndCompleteWatchHistory(numWatches, numRecords); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29846") + public void testTriggeredWatchLoading() throws Exception { + createIndex("output"); + client().prepareIndex("my-index", "foo", "bar") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .setSource("field", "value").get(); + + WatcherStatsResponse response = watcherClient().prepareWatcherStats().get(); + assertThat(response.getWatchesCount(), equalTo(0L)); + + String watchId = "_id"; + WatcherSearchTemplateRequest request = templateRequest(searchSource().query(termQuery("field", "value")), "my-index"); + watcherClient().preparePutWatch(watchId).setSource(watchBuilder() + .trigger(schedule(cron("0/5 * * * * ? 2050"))) + .input(searchInput(request)) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_id", indexAction("output", "test")) + .defaultThrottlePeriod(TimeValue.timeValueMillis(0)) + ).get(); + + stopWatcher(); + + DateTime now = DateTime.now(UTC); + final int numRecords = scaledRandomIntBetween(2, 12); + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + for (int i = 0; i < numRecords; i++) { + now = now.plusMinutes(1); + ScheduleTriggerEvent event = new ScheduleTriggerEvent(watchId, now, now); + Wid wid = new Wid(watchId, now); + TriggeredWatch triggeredWatch = new TriggeredWatch(wid, event); + bulkRequestBuilder.add(client() + .prepareIndex(TriggeredWatchStoreField.INDEX_NAME, TriggeredWatchStoreField.DOC_TYPE, triggeredWatch.id().value()) + .setSource(jsonBuilder().value(triggeredWatch)) + .setWaitForActiveShards(ActiveShardCount.ALL) + ); + } + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + startWatcher(); + + assertSingleExecutionAndCompleteWatchHistory(1, numRecords); + } + + private void assertSingleExecutionAndCompleteWatchHistory(final long numberOfWatches, + final int expectedWatchHistoryCount) throws Exception { + assertBusy(() -> { + // We need to wait until all the records are processed from the internal execution queue, only then we can assert + // that numRecords watch records have been processed as part of starting up. + WatcherStatsResponse response = watcherClient().prepareWatcherStats().setIncludeCurrentWatches(true).get(); + long maxSize = response.getNodes().stream().map(WatcherStatsResponse.Node::getSnapshots).mapToLong(List::size).sum(); + assertThat(maxSize, equalTo(0L)); + + refresh(); + SearchResponse searchResponse = client().prepareSearch("output").get(); + assertThat(searchResponse.getHits().getTotalHits(), is(greaterThanOrEqualTo(numberOfWatches))); + long successfulWatchExecutions = searchResponse.getHits().getTotalHits(); + + // the watch history should contain entries for each triggered watch, which a few have been marked as not executed + SearchResponse historySearchResponse = client().prepareSearch(HistoryStoreField.INDEX_PREFIX + "*").setSize(10000).get(); + assertHitCount(historySearchResponse, expectedWatchHistoryCount); + long notExecutedCount = Arrays.stream(historySearchResponse.getHits().getHits()) + .filter(hit -> hit.getSourceAsMap().get("state").equals(ExecutionState.NOT_EXECUTED_ALREADY_QUEUED.id())) + .count(); + logger.info("Watches not executed: [{}]: expected watch history count [{}] - [{}] successful watch exections", + notExecutedCount, expectedWatchHistoryCount, successfulWatchExecutions); + assertThat(notExecutedCount, is(expectedWatchHistoryCount - successfulWatchExecutions)); + }, 20, TimeUnit.SECONDS); + } + + public void testManuallyStopped() throws Exception { + WatcherStatsResponse response = watcherClient().prepareWatcherStats().get(); + assertThat(response.watcherMetaData().manuallyStopped(), is(false)); + stopWatcher(); + response = watcherClient().prepareWatcherStats().get(); + assertThat(response.watcherMetaData().manuallyStopped(), is(true)); + startWatcher(); + response = watcherClient().prepareWatcherStats().get(); + assertThat(response.watcherMetaData().manuallyStopped(), is(false)); + } + + public void testWatchRecordSavedTwice() throws Exception { + // Watcher could prevent to start if a watch record tried to executed twice or more and the watch didn't exist + // for that watch record or the execution threadpool rejected the watch record. + // A watch record without a watch is the easiest to simulate, so that is what this test does. + if (client().admin().indices().prepareExists(Watch.INDEX).get().isExists() == false) { + // we rarely create an .watches alias in the base class + assertAcked(client().admin().indices().prepareCreate(Watch.INDEX)); + } + DateTime triggeredTime = new DateTime(2015, 11, 5, 0, 0, 0, 0, DateTimeZone.UTC); + final String watchRecordIndex = HistoryStoreField.getHistoryIndexNameForTime(triggeredTime); + + logger.info("Stopping watcher"); + stopWatcher(); + + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + int numRecords = scaledRandomIntBetween(8, 32); + for (int i = 0; i < numRecords; i++) { + String watchId = Integer.toString(i); + ScheduleTriggerEvent event = new ScheduleTriggerEvent(watchId, triggeredTime, triggeredTime); + Wid wid = new Wid(watchId, triggeredTime); + TriggeredWatch triggeredWatch = new TriggeredWatch(wid, event); + bulkRequestBuilder.add( + client().prepareIndex(TriggeredWatchStoreField.INDEX_NAME, + TriggeredWatchStoreField.DOC_TYPE, triggeredWatch.id().value()).setSource(jsonBuilder().value(triggeredWatch)) + ); + + String id = internalCluster().getInstance(ClusterService.class).localNode().getId(); + WatchRecord watchRecord = new WatchRecord.MessageWatchRecord(wid, event, ExecutionState.EXECUTED, "executed", id); + bulkRequestBuilder.add(client().prepareIndex(watchRecordIndex, HistoryStore.DOC_TYPE, watchRecord.id().value()) + .setSource(jsonBuilder().value(watchRecord)) + ); + } + assertNoFailures(bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get()); + + logger.info("Starting watcher"); + startWatcher(); + + assertBusy(() -> { + // We need to wait until all the records are processed from the internal execution queue, only then we can assert + // that numRecords watch records have been processed as part of starting up. + WatcherStatsResponse response = watcherClient().prepareWatcherStats().setIncludeCurrentWatches(true).get(); + long maxSize = response.getNodes().stream().map(WatcherStatsResponse.Node::getSnapshots).mapToLong(List::size).sum(); + assertThat(maxSize, equalTo(0L)); + + // but even then since the execution of the watch record is async it may take a little bit before + // the actual documents are in the output index + refresh(); + SearchResponse searchResponse = client().prepareSearch(watchRecordIndex).setSize(numRecords).get(); + assertThat(searchResponse.getHits().getTotalHits(), Matchers.equalTo((long) numRecords)); + for (int i = 0; i < numRecords; i++) { + assertThat(searchResponse.getHits().getAt(i).getSourceAsMap().get("state"), + is(ExecutionState.EXECUTED.id())); + } + }); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/ExecutionVarsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/ExecutionVarsIntegrationTests.java new file mode 100644 index 0000000000000..85b0280588a6e --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/ExecutionVarsIntegrationTests.java @@ -0,0 +1,238 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.integration; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.watcher.condition.ScriptCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.hamcrest.Matcher; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.transform.TransformBuilders.scriptTransform; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.cron; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class ExecutionVarsIntegrationTests extends AbstractWatcherIntegrationTestCase { + + @Override + protected List> pluginTypes() { + List> types = super.pluginTypes(); + types.add(CustomScriptPlugin.class); + return types; + } + + public static class CustomScriptPlugin extends MockScriptPlugin { + + @Override + @SuppressWarnings("unchecked") + protected Map, Object>> pluginScripts() { + Map, Object>> scripts = new HashMap<>(); + + scripts.put("ctx.vars.condition_value = ctx.payload.value + 5; return ctx.vars.condition_value > 5;", vars -> { + int value = (int) XContentMapValues.extractValue("ctx.payload.value", vars); + + Map ctxVars = (Map) XContentMapValues.extractValue("ctx.vars", vars); + ctxVars.put("condition_value", value + 5); + + return (int) XContentMapValues.extractValue("condition_value", ctxVars) > 5; + }); + + scripts.put("ctx.vars.watch_transform_value = ctx.vars.condition_value + 5; return ctx.payload;", vars -> { + Map ctxVars = (Map) XContentMapValues.extractValue("ctx.vars", vars); + ctxVars.put("watch_transform_value", (int) XContentMapValues.extractValue("condition_value", ctxVars) + 5); + + return XContentMapValues.extractValue("ctx.payload", vars); + }); + + // Transforms the value of a1, equivalent to: + // ctx.vars.a1_transform_value = ctx.vars.watch_transform_value + 10; + // ctx.payload.a1_transformed_value = ctx.vars.a1_transform_value; + // return ctx.payload; + scripts.put("transform a1", vars -> { + Map ctxVars = (Map) XContentMapValues.extractValue("ctx.vars", vars); + Map ctxPayload = (Map) XContentMapValues.extractValue("ctx.payload", vars); + + int value = (int) XContentMapValues.extractValue("watch_transform_value", ctxVars); + ctxVars.put("a1_transform_value", value + 10); + + value = (int) XContentMapValues.extractValue("a1_transform_value", ctxVars); + ctxPayload.put("a1_transformed_value", value); + + return XContentMapValues.extractValue("ctx.payload", vars); + }); + + // Transforms the value of a2, equivalent to: + // ctx.vars.a2_transform_value = ctx.vars.watch_transform_value + 20; + // ctx.payload.a2_transformed_value = ctx.vars.a2_transform_value; + // return ctx.payload; + scripts.put("transform a2", vars -> { + Map ctxVars = (Map) XContentMapValues.extractValue("ctx.vars", vars); + Map ctxPayload = (Map) XContentMapValues.extractValue("ctx.payload", vars); + + int value = (int) XContentMapValues.extractValue("watch_transform_value", ctxVars); + ctxVars.put("a2_transform_value", value + 20); + + value = (int) XContentMapValues.extractValue("a2_transform_value", ctxVars); + ctxPayload.put("a2_transformed_value", value); + + return XContentMapValues.extractValue("ctx.payload", vars); + }); + + return scripts; + } + } + + public void testVars() throws Exception { + WatcherClient watcherClient = watcherClient(); + + PutWatchResponse putWatchResponse = watcherClient.preparePutWatch("_id").setSource(watchBuilder() + .trigger(schedule(cron("0/1 * * * * ?"))) + .input(simpleInput("value", 5)) + .condition(new ScriptCondition( + mockScript("ctx.vars.condition_value = ctx.payload.value + 5; return ctx.vars.condition_value > 5;"))) + .transform( + scriptTransform(mockScript("ctx.vars.watch_transform_value = ctx.vars.condition_value + 5; return ctx.payload;"))) + .addAction( + "a1", + scriptTransform(mockScript("transform a1")), + loggingAction("_text")) + .addAction( + "a2", + scriptTransform(mockScript("transform a2")), + loggingAction("_text"))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + + timeWarp().trigger("_id"); + + flush(); + refresh(); + + SearchResponse searchResponse = searchWatchRecords(builder -> { + // defaults to match all; + }); + + assertThat(searchResponse.getHits().getTotalHits(), is(1L)); + + Map source = searchResponse.getHits().getAt(0).getSourceAsMap(); + + assertValue(source, "watch_id", is("_id")); + assertValue(source, "state", is("executed")); + + // we don't store the computed vars in history + assertValue(source, "vars", nullValue()); + + assertValue(source, "result.condition.status", is("success")); + assertValue(source, "result.transform.status", is("success")); + + List> actions = ObjectPath.eval("result.actions", source); + for (Map action : actions) { + String id = (String) action.get("id"); + switch (id) { + case "a1": + assertValue(action, "status", is("success")); + assertValue(action, "transform.status", is("success")); + assertValue(action, "transform.payload.a1_transformed_value", equalTo(25)); + break; + case "a2": + assertValue(action, "status", is("success")); + assertValue(action, "transform.status", is("success")); + assertValue(action, "transform.payload.a2_transformed_value", equalTo(35)); + break; + default: + fail("there should not be an action result for action with an id other than a1 or a2"); + } + } + } + + public void testVarsManual() throws Exception { + WatcherClient watcherClient = watcherClient(); + + PutWatchResponse putWatchResponse = watcherClient.preparePutWatch("_id").setSource(watchBuilder() + .trigger(schedule(cron("0/1 * * * * ? 2020"))) + .input(simpleInput("value", 5)) + .condition(new ScriptCondition( + mockScript("ctx.vars.condition_value = ctx.payload.value + 5; return ctx.vars.condition_value > 5;"))) + .transform( + scriptTransform(mockScript("ctx.vars.watch_transform_value = ctx.vars.condition_value + 5; return ctx.payload;"))) + .addAction( + "a1", + scriptTransform(mockScript("transform a1")), + loggingAction("_text")) + .addAction( + "a2", + scriptTransform(mockScript("transform a2")), + loggingAction("_text"))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + + boolean debug = randomBoolean(); + + ExecuteWatchResponse executeWatchResponse = watcherClient + .prepareExecuteWatch("_id") + .setDebug(debug) + .get(); + assertThat(executeWatchResponse.getRecordId(), notNullValue()); + XContentSource source = executeWatchResponse.getRecordSource(); + + assertValue(source, "watch_id", is("_id")); + assertValue(source, "state", is("executed")); + + if (debug) { + assertValue(source, "vars.condition_value", is(10)); + assertValue(source, "vars.watch_transform_value", is(15)); + assertValue(source, "vars.a1_transform_value", is(25)); + assertValue(source, "vars.a2_transform_value", is(35)); + } + + assertValue(source, "result.condition.status", is("success")); + assertValue(source, "result.transform.status", is("success")); + + List> actions = source.getValue("result.actions"); + for (Map action : actions) { + String id = (String) action.get("id"); + switch (id) { + case "a1": + assertValue(action, "status", is("success")); + assertValue(action, "transform.status", is("success")); + assertValue(action, "transform.payload.a1_transformed_value", equalTo(25)); + break; + case "a2": + assertValue(action, "status", is("success")); + assertValue(action, "transform.status", is("success")); + assertValue(action, "transform.payload.a2_transformed_value", equalTo(35)); + break; + default: + fail("there should not be an action result for action with an id other than a1 or a2"); + } + } + } + + private static void assertValue(Map map, String path, Matcher matcher) { + assertThat(ObjectPath.eval(path, map), (Matcher) matcher); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java new file mode 100644 index 0000000000000..de48c240a3a77 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java @@ -0,0 +1,199 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.integration; + +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; + +import java.util.Locale; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.chainInput; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class HistoryIntegrationTests extends AbstractWatcherIntegrationTestCase { + + // issue: https://github.com/elastic/x-plugins/issues/2338 + public void testThatHistoryIsWrittenWithChainedInput() throws Exception { + XContentBuilder xContentBuilder = jsonBuilder().startObject().startObject("inner").field("date", "2015-06-06").endObject() + .endObject(); + index("foo", "bar", "1", xContentBuilder); + refresh(); + + WatchSourceBuilder builder = watchBuilder() + .trigger(schedule(interval("10s"))) + .addAction("logging", loggingAction("foo")); + + builder.input(chainInput().add("first", searchInput( + templateRequest(searchSource().sort(SortBuilders.fieldSort("inner.date").order(SortOrder.DESC)), "foo"))) + ); + + PutWatchResponse response = watcherClient().preparePutWatch("test_watch").setSource(builder).get(); + assertThat(response.isCreated(), is(true)); + + watcherClient().prepareExecuteWatch("test_watch").setRecordExecution(true).get(); + + flushAndRefresh(".watcher-history-*"); + SearchResponse searchResponse = client().prepareSearch(".watcher-history-*").get(); + assertHitCount(searchResponse, 1); + } + + // See https://github.com/elastic/x-plugins/issues/2913 + public void testFailedInputResultWithDotsInFieldNameGetsStored() throws Exception { + WatcherSearchTemplateRequest request = templateRequest(searchSource() + .query(matchAllQuery()) + .sort("trigger_event.triggered_time", SortOrder.DESC) + .size(1), "non-existing-index"); + + // The result of the search input will be a failure, because a missing index does not exist when + // the query is executed + Input.Builder input = searchInput(request); + // wrapping this randomly into a chained input to test this as well + boolean useChained = randomBoolean(); + if (useChained) { + input = chainInput().add("chained", input); + } + + watcherClient().preparePutWatch("test_watch") + .setSource(watchBuilder() + .trigger(schedule(interval(5, IntervalSchedule.Interval.Unit.HOURS))) + .input(input) + .addAction("_logger", loggingAction("#### randomLogging"))) + .get(); + + watcherClient().prepareExecuteWatch("test_watch").setRecordExecution(true).get(); + + refresh(".watcher-history*"); + SearchResponse searchResponse = client().prepareSearch(".watcher-history*").setSize(0).get(); + assertHitCount(searchResponse, 1); + + // as fields with dots are allowed in 5.0 again, the mapping must be checked in addition + GetMappingsResponse response = client().admin().indices().prepareGetMappings(".watcher-history*").addTypes("doc").get(); + byte[] bytes = response.getMappings().values().iterator().next().value.get("doc").source().uncompressed(); + XContentSource source = new XContentSource(new BytesArray(bytes), XContentType.JSON); + // lets make sure the body fields are disabled + if (useChained) { + String chainedPath = "doc.properties.result.properties.input.properties.chain.properties.chained.properties.search" + + ".properties.request.properties.body.enabled"; + assertThat(source.getValue(chainedPath), is(false)); + } else { + String path = "doc.properties.result.properties.input.properties.search.properties.request.properties.body.enabled"; + assertThat(source.getValue(path), is(false)); + } + } + + // See https://github.com/elastic/x-plugins/issues/2913 + public void testPayloadInputWithDotsInFieldNameWorks() throws Exception { + Input.Builder input = simpleInput("foo.bar", "bar"); + + // wrapping this randomly into a chained input to test this as well + boolean useChained = randomBoolean(); + if (useChained) { + input = chainInput().add("chained", input); + } + + watcherClient().preparePutWatch("test_watch") + .setSource(watchBuilder() + .trigger(schedule(interval(5, IntervalSchedule.Interval.Unit.HOURS))) + .input(input) + .addAction("_logger", loggingAction("#### randomLogging"))) + .get(); + + watcherClient().prepareExecuteWatch("test_watch").setRecordExecution(true).get(); + + refresh(".watcher-history*"); + SearchResponse searchResponse = client().prepareSearch(".watcher-history*").setSize(0).get(); + assertHitCount(searchResponse, 1); + + // as fields with dots are allowed in 5.0 again, the mapping must be checked in addition + GetMappingsResponse response = client().admin().indices().prepareGetMappings(".watcher-history*").addTypes("doc").get(); + byte[] bytes = response.getMappings().values().iterator().next().value.get("doc").source().uncompressed(); + XContentSource source = new XContentSource(new BytesArray(bytes), XContentType.JSON); + + // lets make sure the body fields are disabled + if (useChained) { + String path = "doc.properties.result.properties.input.properties.chain.properties.chained.properties.payload.enabled"; + assertThat(source.getValue(path), is(false)); + } else { + String path = "doc.properties.result.properties.input.properties.payload.enabled"; + assertThat(source.getValue(path), is(false)); + } + } + + public void testThatHistoryContainsStatus() throws Exception { + watcherClient().preparePutWatch("test_watch") + .setSource(watchBuilder() + .trigger(schedule(interval(5, IntervalSchedule.Interval.Unit.HOURS))) + .input(simpleInput("foo", "bar")) + .addAction("_logger", loggingAction("#### randomLogging"))) + .get(); + + watcherClient().prepareExecuteWatch("test_watch").setRecordExecution(true).get(); + + WatchStatus status = watcherClient().prepareGetWatch("test_watch").get().getStatus(); + + refresh(".watcher-history*"); + SearchResponse searchResponse = client().prepareSearch(".watcher-history*").setSize(1).get(); + assertHitCount(searchResponse, 1); + SearchHit hit = searchResponse.getHits().getAt(0); + + XContentSource source = new XContentSource(hit.getSourceRef(), XContentType.JSON); + + Boolean active = source.getValue("status.state.active"); + assertThat(active, is(status.state().isActive())); + + String timestamp = source.getValue("status.state.timestamp"); + assertThat(timestamp, is(status.state().getTimestamp().toString())); + + String lastChecked = source.getValue("status.last_checked"); + assertThat(lastChecked, is(status.lastChecked().toString())); + + Integer version = source.getValue("status.version"); + int expectedVersion = (int) (status.version() - 1); + assertThat(version, is(expectedVersion)); + + ActionStatus actionStatus = status.actionStatus("_logger"); + String ackStatusState = source.getValue("status.actions._logger.ack.state").toString().toUpperCase(Locale.ROOT); + assertThat(ackStatusState, is(actionStatus.ackStatus().state().toString())); + + Boolean lastExecutionSuccesful = source.getValue("status.actions._logger.last_execution.successful"); + assertThat(lastExecutionSuccesful, is(actionStatus.lastExecution().successful())); + + // also ensure that the status field is disabled in the watch history + GetMappingsResponse response = client().admin().indices().prepareGetMappings(".watcher-history*").addTypes("doc").get(); + byte[] bytes = response.getMappings().values().iterator().next().value.get("doc").source().uncompressed(); + XContentSource mappingSource = new XContentSource(new BytesArray(bytes), XContentType.JSON); + assertThat(mappingSource.getValue("doc.properties.status.enabled"), is(false)); + assertThat(mappingSource.getValue("doc.properties.status.properties.status"), is(nullValue())); + assertThat(mappingSource.getValue("doc.properties.status.properties.status.properties.active"), is(nullValue())); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java new file mode 100644 index 0000000000000..b91acc1f969ba --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java @@ -0,0 +1,248 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.integration; + +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.xpack.core.watcher.WatcherField; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; +import org.elasticsearch.xpack.core.watcher.crypto.CryptoServiceTests; +import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.ApplicableBasicAuth; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.joda.time.DateTime; +import org.junit.After; +import org.junit.Before; + +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.webhookAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.httpInput; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.cron; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; +import static org.joda.time.DateTimeZone.UTC; + +public class HttpSecretsIntegrationTests extends AbstractWatcherIntegrationTestCase { + + private static final String USERNAME = "_user"; + private static final String PASSWORD = "_passwd"; + + private MockWebServer webServer = new MockWebServer(); + private static Boolean encryptSensitiveData = null; + private static byte[] encryptionKey = CryptoServiceTests.generateKey(); + + @Before + public void init() throws Exception { + webServer.start(); + } + + @After + public void cleanup() { + webServer.close(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + if (encryptSensitiveData == null) { + encryptSensitiveData = randomBoolean(); + } + if (encryptSensitiveData) { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setFile(WatcherField.ENCRYPTION_KEY_SETTING.getKey(), encryptionKey); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("xpack.watcher.encrypt_sensitive_data", encryptSensitiveData) + .setSecureSettings(secureSettings) + .build(); + } + return super.nodeSettings(nodeOrdinal); + } + + public void testHttpInput() throws Exception { + WatcherClient watcherClient = watcherClient(); + watcherClient.preparePutWatch("_id") + .setSource(watchBuilder() + .trigger(schedule(cron("0 0 0 1 * ? 2020"))) + .input(httpInput(HttpRequestTemplate.builder(webServer.getHostName(), webServer.getPort()) + .path("/") + .auth(new BasicAuth(USERNAME, PASSWORD.toCharArray())))) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_logging", loggingAction("executed"))) + .get(); + + // verifying the basic auth password is stored encrypted in the index when security + // is enabled, and when it's not enabled, it's stored in plain text + GetResponse response = client().prepareGet(Watch.INDEX, Watch.DOC_TYPE, "_id").get(); + assertThat(response, notNullValue()); + assertThat(response.getId(), is("_id")); + Map source = response.getSource(); + Object value = XContentMapValues.extractValue("input.http.request.auth.basic.password", source); + assertThat(value, notNullValue()); + if (encryptSensitiveData) { + assertThat(value.toString(), startsWith("::es_encrypted::")); + MockSecureSettings mockSecureSettings = new MockSecureSettings(); + mockSecureSettings.setFile(WatcherField.ENCRYPTION_KEY_SETTING.getKey(), encryptionKey); + Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build(); + CryptoService cryptoService = new CryptoService(settings); + assertThat(new String(cryptoService.decrypt(((String) value).toCharArray())), is(PASSWORD)); + } else { + assertThat(value, is(PASSWORD)); + } + + // verifying the password is not returned by the GET watch API + GetWatchResponse watchResponse = watcherClient.prepareGetWatch("_id").get(); + assertThat(watchResponse, notNullValue()); + assertThat(watchResponse.getId(), is("_id")); + XContentSource contentSource = watchResponse.getSource(); + value = contentSource.getValue("input.http.request.auth.basic"); + assertThat(value, notNullValue()); // making sure we have the basic auth + value = contentSource.getValue("input.http.request.auth.basic.password"); + if (encryptSensitiveData) { + assertThat(value.toString(), startsWith("::es_encrypted::")); + } else { + assertThat(value, is("::es_redacted::")); + } + + // now we restart, to make sure the watches and their secrets are reloaded from the index properly + stopWatcher(); + startWatcher(); + + // now lets execute the watch manually + + webServer.enqueue(new MockResponse().setResponseCode(200).setBody( + BytesReference.bytes(jsonBuilder().startObject().field("key", "value").endObject()).utf8ToString())); + + TriggerEvent triggerEvent = new ScheduleTriggerEvent(new DateTime(UTC), new DateTime(UTC)); + ExecuteWatchResponse executeResponse = watcherClient.prepareExecuteWatch("_id") + .setRecordExecution(false) + .setTriggerEvent(triggerEvent) + .setActionMode("_all", ActionExecutionMode.FORCE_EXECUTE) + .get(); + assertThat(executeResponse, notNullValue()); + contentSource = executeResponse.getRecordSource(); + value = contentSource.getValue("result.input.http.status_code"); + assertThat(value, notNullValue()); + assertThat(value, is((Object) 200)); + + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getHeader("Authorization"), + is(ApplicableBasicAuth.headerValue(USERNAME, PASSWORD.toCharArray()))); + } + + public void testWebhookAction() throws Exception { + WatcherClient watcherClient = watcherClient(); + watcherClient.preparePutWatch("_id") + .setSource(watchBuilder() + .trigger(schedule(cron("0 0 0 1 * ? 2020"))) + .input(simpleInput()) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_webhook", webhookAction(HttpRequestTemplate.builder(webServer.getHostName(), webServer.getPort()) + .path("/") + .auth(new BasicAuth(USERNAME, PASSWORD.toCharArray()))))) + .get(); + + // verifying the basic auth password is stored encrypted in the index when security + // is enabled, when it's not enabled, the the passowrd should be stored in plain text + GetResponse response = client().prepareGet(Watch.INDEX, Watch.DOC_TYPE, "_id").get(); + assertThat(response, notNullValue()); + assertThat(response.getId(), is("_id")); + Map source = response.getSource(); + Object value = XContentMapValues.extractValue("actions._webhook.webhook.auth.basic.password", source); + assertThat(value, notNullValue()); + + if (encryptSensitiveData) { + assertThat(value, not(is((Object) PASSWORD))); + MockSecureSettings mockSecureSettings = new MockSecureSettings(); + mockSecureSettings.setFile(WatcherField.ENCRYPTION_KEY_SETTING.getKey(), encryptionKey); + Settings settings = Settings.builder().setSecureSettings(mockSecureSettings).build(); + CryptoService cryptoService = new CryptoService(settings); + assertThat(new String(cryptoService.decrypt(((String) value).toCharArray())), is(PASSWORD)); + } else { + assertThat(value, is((Object) PASSWORD)); + } + + // verifying the password is not returned by the GET watch API + GetWatchResponse watchResponse = watcherClient.prepareGetWatch("_id").get(); + assertThat(watchResponse, notNullValue()); + assertThat(watchResponse.getId(), is("_id")); + XContentSource contentSource = watchResponse.getSource(); + value = contentSource.getValue("actions._webhook.webhook.auth.basic"); + assertThat(value, notNullValue()); // making sure we have the basic auth + value = contentSource.getValue("actions._webhook.webhook.auth.basic.password"); + if (encryptSensitiveData) { + assertThat(value.toString(), startsWith("::es_encrypted::")); + } else { + assertThat(value, is("::es_redacted::")); + } + + // now we restart, to make sure the watches and their secrets are reloaded from the index properly + stopWatcher(); + startWatcher(); + + // now lets execute the watch manually + + webServer.enqueue(new MockResponse().setResponseCode(200).setBody( + BytesReference.bytes(jsonBuilder().startObject().field("key", "value").endObject()).utf8ToString())); + + TriggerEvent triggerEvent = new ScheduleTriggerEvent(new DateTime(UTC), new DateTime(UTC)); + ExecuteWatchResponse executeResponse = watcherClient.prepareExecuteWatch("_id") + .setRecordExecution(false) + .setActionMode("_all", ActionExecutionMode.FORCE_EXECUTE) + .setTriggerEvent(triggerEvent) + .get(); + assertThat(executeResponse, notNullValue()); + + contentSource = executeResponse.getRecordSource(); + + assertThat(contentSource.getValue("result.actions.0.status"), is("success")); + + value = contentSource.getValue("result.actions.0.webhook.response.status"); + assertThat(value, notNullValue()); + assertThat(value, instanceOf(Number.class)); + assertThat(((Number) value).intValue(), is(200)); + + value = contentSource.getValue("result.actions.0.webhook.request.auth.basic.username"); + assertThat(value, notNullValue()); + assertThat(value, instanceOf(String.class)); + assertThat(value, is(USERNAME)); // the auth username exists + + value = contentSource.getValue("result.actions.0.webhook.request.auth.basic.password"); + if (encryptSensitiveData) { + assertThat(value.toString(), startsWith("::es_encrypted::")); + } else { + assertThat(value.toString(), is("::es_redacted::")); + } + + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getHeader("Authorization"), + is(ApplicableBasicAuth.headerValue(USERNAME, PASSWORD.toCharArray()))); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java new file mode 100644 index 0000000000000..6d7f4bef213f7 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java @@ -0,0 +1,194 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.integration; + +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.MockMustacheScriptEngine; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.input.Input; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.Watcher; +import org.elasticsearch.xpack.watcher.input.search.ExecutableSearchInput; +import org.elasticsearch.xpack.watcher.input.search.SearchInput; +import org.elasticsearch.xpack.watcher.input.search.SearchInputFactory; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateService; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; +import static org.elasticsearch.mock.orig.Mockito.when; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.getRandomSupportedSearchType; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; + +public class SearchInputTests extends ESTestCase { + + private ScriptService scriptService; + private Client client; + + @Before + public void setup() { + Map engines = new HashMap<>(); + engines.put(MockMustacheScriptEngine.NAME, new MockMustacheScriptEngine()); + Map> contexts = new HashMap<>(); + contexts.put(Watcher.SCRIPT_TEMPLATE_CONTEXT.name, Watcher.SCRIPT_TEMPLATE_CONTEXT); + contexts.put(Watcher.SCRIPT_SEARCH_CONTEXT.name, Watcher.SCRIPT_SEARCH_CONTEXT); + contexts.put(Watcher.SCRIPT_EXECUTABLE_CONTEXT.name, Watcher.SCRIPT_EXECUTABLE_CONTEXT); + scriptService = new ScriptService(Settings.EMPTY, engines, contexts); + + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + } + + public void testExecute() throws Exception { + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(SearchRequest.class); + PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + SearchResponse searchResponse = new SearchResponse(InternalSearchResponse.empty(), "", 1, 1, 0, 1234, + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + searchFuture.onResponse(searchResponse); + when(client.search(requestCaptor.capture())).thenReturn(searchFuture); + + ArgumentCaptor headersCaptor = ArgumentCaptor.forClass(Map.class); + when(client.filterWithHeader(headersCaptor.capture())).thenReturn(client); + + SearchSourceBuilder searchSourceBuilder = searchSource().query(boolQuery().must(matchQuery("event_type", "a"))); + + WatcherSearchTemplateRequest request = WatcherTestUtils.templateRequest(searchSourceBuilder); + ExecutableSearchInput searchInput = new ExecutableSearchInput(new SearchInput(request, null, null, null), logger, + client, watcherSearchTemplateService(), TimeValue.timeValueMinutes(1)); + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + + SearchInput.Result result = searchInput.execute(ctx, new Payload.Simple()); + + assertThat(result.status(), is(Input.Result.Status.SUCCESS)); + SearchRequest searchRequest = requestCaptor.getValue(); + assertThat(searchRequest.searchType(), is(request.getSearchType())); + assertThat(searchRequest.indicesOptions(), is(request.getIndicesOptions())); + assertThat(searchRequest.indices(), is(arrayContainingInAnyOrder(request.getIndices()))); + assertThat(headersCaptor.getAllValues(), hasSize(0)); + } + + public void testDifferentSearchType() throws Exception { + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(SearchRequest.class); + PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + SearchResponse searchResponse = new SearchResponse(InternalSearchResponse.empty(), "", 1, 1, 0, 1234, + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + searchFuture.onResponse(searchResponse); + when(client.search(requestCaptor.capture())).thenReturn(searchFuture); + + SearchSourceBuilder searchSourceBuilder = searchSource().query(boolQuery().must(matchQuery("event_type", "a"))); + SearchType searchType = getRandomSupportedSearchType(); + WatcherSearchTemplateRequest request = WatcherTestUtils.templateRequest(searchSourceBuilder, searchType); + + ExecutableSearchInput searchInput = new ExecutableSearchInput(new SearchInput(request, null, null, null), logger, + client, watcherSearchTemplateService(), TimeValue.timeValueMinutes(1)); + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + SearchInput.Result result = searchInput.execute(ctx, new Payload.Simple()); + + assertThat(result.status(), is(Input.Result.Status.SUCCESS)); + SearchRequest searchRequest = requestCaptor.getValue(); + assertThat(searchRequest.searchType(), is(request.getSearchType())); + assertThat(searchRequest.indicesOptions(), is(request.getIndicesOptions())); + assertThat(searchRequest.indices(), is(arrayContainingInAnyOrder(request.getIndices()))); + } + + public void testParserValid() throws Exception { + SearchSourceBuilder source = searchSource() + .query(boolQuery().must(matchQuery("event_type", "a")).must(rangeQuery("_timestamp") + .from("{{ctx.trigger.scheduled_time}}||-30s").to("{{ctx.trigger.triggered_time}}"))); + + TimeValue timeout = randomBoolean() ? TimeValue.timeValueSeconds(randomInt(10)) : null; + XContentBuilder builder = jsonBuilder().value(new SearchInput(WatcherTestUtils.templateRequest(source), null, timeout, null)); + XContentParser parser = createParser(builder); + parser.nextToken(); + + SearchInputFactory factory = new SearchInputFactory(Settings.EMPTY, client, xContentRegistry(), scriptService); + + SearchInput searchInput = factory.parseInput("_id", parser); + assertEquals(SearchInput.TYPE, searchInput.type()); + assertThat(searchInput.getTimeout(), equalTo(timeout)); + } + + // source: https://discuss.elastic.co/t/need-help-for-energy-monitoring-system-alerts/89415/3 + public void testThatEmptyRequestBodyWorks() throws Exception { + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(SearchRequest.class); + PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + SearchResponse searchResponse = new SearchResponse(InternalSearchResponse.empty(), "", 1, 1, 0, 1234, + ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + searchFuture.onResponse(searchResponse); + when(client.search(requestCaptor.capture())).thenReturn(searchFuture); + + try (XContentBuilder builder = jsonBuilder().startObject().startObject("request") + .startArray("indices").value("foo").endArray().endObject().endObject(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, BytesReference.bytes(builder).streamInput())) { + + parser.nextToken(); // advance past the first starting object + + SearchInputFactory factory = new SearchInputFactory(Settings.EMPTY, client, xContentRegistry(), scriptService); + SearchInput input = factory.parseInput("my-watch", parser); + assertThat(input.getRequest(), is(not(nullValue()))); + assertThat(input.getRequest().getSearchSource(), is(BytesArray.EMPTY)); + + ExecutableSearchInput executableSearchInput = factory.createExecutable(input); + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + SearchInput.Result result = executableSearchInput.execute(ctx, Payload.Simple.EMPTY); + assertThat(result.status(), is(Input.Result.Status.SUCCESS)); + // no body in the search request + ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("pretty", "false")); + assertThat(requestCaptor.getValue().source().toString(params), is("{}")); + } + } + + private WatcherSearchTemplateService watcherSearchTemplateService() { + SearchModule module = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new WatcherSearchTemplateService(Settings.EMPTY, scriptService, new NamedXContentRegistry(module.getNamedXContents())); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchTransformTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchTransformTests.java new file mode 100644 index 0000000000000..10c61677a4c42 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchTransformTests.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.integration; + +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.transform.search.ExecutableSearchTransform; +import org.elasticsearch.xpack.watcher.transform.search.SearchTransform; +import org.elasticsearch.xpack.watcher.transform.search.SearchTransformFactory; + +import java.util.Collections; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.getRandomSupportedSearchType; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; + +public class SearchTransformTests extends ESTestCase { + + public void testParser() throws Exception { + String[] indices = rarely() ? null : randomBoolean() ? new String[] { "idx" } : new String[] { "idx1", "idx2" }; + SearchType searchType = getRandomSupportedSearchType(); + String templateName = randomBoolean() ? null : "template1"; + XContentBuilder builder = jsonBuilder().startObject(); + builder.startObject("request"); + if (indices != null) { + builder.array("indices", indices); + } + if (searchType != null) { + builder.field("search_type", searchType.name()); + } + if (templateName != null) { + TextTemplate template = new TextTemplate(templateName, null, ScriptType.INLINE, null); + builder.field("template", template); + } + + builder.startObject("body") + .startObject("query") + .startObject("match_all") + .endObject() + .endObject() + .endObject(); + + builder.endObject(); + TimeValue readTimeout = randomBoolean() ? TimeValue.timeValueSeconds(randomInt(10)) : null; + if (readTimeout != null) { + builder.field("timeout", readTimeout); + } + builder.endObject(); + + XContentParser parser = createParser(builder); + parser.nextToken(); + + final MockScriptEngine engine = new MockScriptEngine("mock", Collections.emptyMap()); + Map engines = Collections.singletonMap(engine.getType(), engine); + ScriptService scriptService = new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); + + Client client = mock(Client.class); + SearchTransformFactory transformFactory = new SearchTransformFactory(Settings.EMPTY, client, xContentRegistry(), scriptService); + ExecutableSearchTransform executable = transformFactory.parseExecutable("_id", parser); + + assertThat(executable, notNullValue()); + assertThat(executable.type(), is(SearchTransform.TYPE)); + assertThat(executable.transform().getRequest(), notNullValue()); + if (indices != null) { + assertThat(executable.transform().getRequest().getIndices(), arrayContainingInAnyOrder(indices)); + } + if (searchType != null) { + assertThat(executable.transform().getRequest().getSearchType(), is(searchType)); + } + if (templateName != null) { + assertThat(executable.transform().getRequest().getTemplate(), + equalTo(new Script(ScriptType.INLINE, "mustache", "template1", Collections.emptyMap()))); + } + assertThat(executable.transform().getRequest().getSearchSource().utf8ToString(), equalTo("{\"query\":{\"match_all\":{}}}")); + assertThat(executable.transform().getTimeout(), equalTo(readTimeout)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java new file mode 100644 index 0000000000000..8cb4ac1d07bf4 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.integration; + + +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.history.WatchRecord; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchRequestBuilder; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.condition.CompareCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.hamcrest.Matchers; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; +import static org.elasticsearch.xpack.watcher.transform.TransformBuilders.searchTransform; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.cron; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsEqual.equalTo; + +@TestLogging("org.elasticsearch.xpack.watcher:DEBUG") +public class WatchAckTests extends AbstractWatcherIntegrationTestCase { + + private String id = randomAlphaOfLength(10); + + @Before + public void indexTestDocument() { + IndexResponse eventIndexResponse = client().prepareIndex("events", "event", id) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .setSource("level", "error") + .get(); + assertEquals(DocWriteResponse.Result.CREATED, eventIndexResponse.getResult()); + } + + public void testAckSingleAction() throws Exception { + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch() + .setId("_id") + .setSource(watchBuilder() + .trigger(schedule(cron("0/5 * * * * ? *"))) + .input(searchInput(templateRequest(searchSource(), "events"))) + .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.GT, 0L)) + .transform(searchTransform(templateRequest(searchSource(), "events"))) + .addAction("_a1", indexAction("actions1", "doc")) + .addAction("_a2", indexAction("actions2", "doc")) + .defaultThrottlePeriod(new TimeValue(0, TimeUnit.SECONDS))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + assertThat(watcherClient().prepareWatcherStats().get().getWatchesCount(), is(1L)); + + timeWarp().trigger("_id", 4, TimeValue.timeValueSeconds(5)); + AckWatchResponse ackResponse = watcherClient().prepareAckWatch("_id").setActionIds("_a1").get(); + assertThat(ackResponse.getStatus().actionStatus("_a1").ackStatus().state(), is(ActionStatus.AckStatus.State.ACKED)); + assertThat(ackResponse.getStatus().actionStatus("_a2").ackStatus().state(), is(ActionStatus.AckStatus.State.ACKABLE)); + + refresh(); + long a1CountAfterAck = docCount("actions1", "doc", matchAllQuery()); + long a2CountAfterAck = docCount("actions2", "doc", matchAllQuery()); + assertThat(a1CountAfterAck, greaterThan(0L)); + assertThat(a2CountAfterAck, greaterThan(0L)); + + logger.info("###3"); + timeWarp().trigger("_id", 4, TimeValue.timeValueSeconds(5)); + logger.info("###4"); + flush(); + refresh(); + + // There shouldn't be more a1 actions in the index after we ack the watch, even though the watch was triggered + long a1CountAfterPostAckFires = docCount("actions1", "doc", matchAllQuery()); + assertThat(a1CountAfterPostAckFires, equalTo(a1CountAfterAck)); + + // There should be more a2 actions in the index after we ack the watch + long a2CountAfterPostAckFires = docCount("actions2", "doc", matchAllQuery()); + assertThat(a2CountAfterPostAckFires, greaterThan(a2CountAfterAck)); + + // Now delete the event and the ack states should change to AWAITS_EXECUTION + DeleteResponse response = client().prepareDelete("events", "event", id).get(); + assertEquals(DocWriteResponse.Result.DELETED, response.getResult()); + refresh(); + + logger.info("###5"); + timeWarp().trigger("_id", 4, TimeValue.timeValueSeconds(5)); + logger.info("###6"); + + GetWatchResponse getWatchResponse = watcherClient().prepareGetWatch("_id").get(); + assertThat(getWatchResponse.isFound(), is(true)); + + Watch parsedWatch = watchParser().parse(getWatchResponse.getId(), true, getWatchResponse.getSource().getBytes(), XContentType.JSON); + assertThat(parsedWatch.status().actionStatus("_a1").ackStatus().state(), + is(ActionStatus.AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION)); + assertThat(parsedWatch.status().actionStatus("_a2").ackStatus().state(), + is(ActionStatus.AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION)); + + long throttledCount = docCount(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*", null, + matchQuery(WatchRecord.STATE.getPreferredName(), ExecutionState.ACKNOWLEDGED.id())); + assertThat(throttledCount, greaterThan(0L)); + } + + public void testAckAllActions() throws Exception { + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch() + .setId("_id") + .setSource(watchBuilder() + .trigger(schedule(cron("0/5 * * * * ? *"))) + .input(searchInput(templateRequest(searchSource(), "events"))) + .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.GT, 0L)) + .transform(searchTransform(templateRequest(searchSource(), "events"))) + .addAction("_a1", indexAction("actions1", "doc")) + .addAction("_a2", indexAction("actions2", "doc")) + .defaultThrottlePeriod(new TimeValue(0, TimeUnit.SECONDS))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + assertThat(watcherClient().prepareWatcherStats().get().getWatchesCount(), is(1L)); + + timeWarp().trigger("_id", 4, TimeValue.timeValueSeconds(5)); + + AckWatchRequestBuilder ackWatchRequestBuilder = watcherClient().prepareAckWatch("_id"); + if (randomBoolean()) { + ackWatchRequestBuilder.setActionIds("_all"); + } else if (randomBoolean()) { + ackWatchRequestBuilder.setActionIds("_all", "a1"); + } + AckWatchResponse ackResponse = ackWatchRequestBuilder.get(); + + assertThat(ackResponse.getStatus().actionStatus("_a1").ackStatus().state(), is(ActionStatus.AckStatus.State.ACKED)); + assertThat(ackResponse.getStatus().actionStatus("_a2").ackStatus().state(), is(ActionStatus.AckStatus.State.ACKED)); + + refresh(); + long a1CountAfterAck = docCount("actions1", "doc", matchAllQuery()); + long a2CountAfterAck = docCount("actions2", "doc", matchAllQuery()); + assertThat(a1CountAfterAck, greaterThanOrEqualTo((long) 1)); + assertThat(a2CountAfterAck, greaterThanOrEqualTo((long) 1)); + + timeWarp().trigger("_id", 4, TimeValue.timeValueSeconds(5)); + flush(); + refresh(); + + // There shouldn't be more a1 actions in the index after we ack the watch, even though the watch was triggered + long a1CountAfterPostAckFires = docCount("actions1", "doc", matchAllQuery()); + assertThat(a1CountAfterPostAckFires, equalTo(a1CountAfterAck)); + + // There shouldn't be more a2 actions in the index after we ack the watch, even though the watch was triggered + long a2CountAfterPostAckFires = docCount("actions2", "doc", matchAllQuery()); + assertThat(a2CountAfterPostAckFires, equalTo(a2CountAfterAck)); + + // Now delete the event and the ack states should change to AWAITS_EXECUTION + DeleteResponse response = client().prepareDelete("events", "event", id).get(); + assertEquals(DocWriteResponse.Result.DELETED, response.getResult()); + refresh(); + + timeWarp().trigger("_id", 4, TimeValue.timeValueSeconds(5)); + + GetWatchResponse getWatchResponse = watcherClient().prepareGetWatch("_id").get(); + assertThat(getWatchResponse.isFound(), is(true)); + + Watch parsedWatch = watchParser().parse(getWatchResponse.getId(), true, getWatchResponse.getSource().getBytes(), XContentType.JSON); + assertThat(parsedWatch.status().actionStatus("_a1").ackStatus().state(), + is(ActionStatus.AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION)); + assertThat(parsedWatch.status().actionStatus("_a2").ackStatus().state(), + is(ActionStatus.AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION)); + + long throttledCount = docCount(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*", null, + matchQuery(WatchRecord.STATE.getPreferredName(), ExecutionState.ACKNOWLEDGED.id())); + assertThat(throttledCount, greaterThan(0L)); + } + + public void testAckWithRestart() throws Exception { + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch() + .setId("_name") + .setSource(watchBuilder() + .trigger(schedule(cron("0/5 * * * * ? *"))) + .input(searchInput(templateRequest(searchSource(), "events"))) + .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.GT, 0L)) + .transform(searchTransform(templateRequest(searchSource(), "events"))) + .addAction("_id", indexAction("actions", "action"))) + .get(); + assertThat(putWatchResponse.isCreated(), is(true)); + assertThat(watcherClient().prepareWatcherStats().get().getWatchesCount(), is(1L)); + + timeWarp().trigger("_name", 4, TimeValue.timeValueSeconds(5)); + restartWatcherRandomly(); + + AckWatchResponse ackResponse = watcherClient().prepareAckWatch("_name").get(); + assertThat(ackResponse.getStatus().actionStatus("_id").ackStatus().state(), is(ActionStatus.AckStatus.State.ACKED)); + + refresh("actions"); + long countAfterAck = client().prepareSearch("actions").setTypes("action").setQuery(matchAllQuery()).get().getHits().getTotalHits(); + assertThat(countAfterAck, greaterThanOrEqualTo(1L)); + + restartWatcherRandomly(); + + GetWatchResponse watchResponse = watcherClient().getWatch(new GetWatchRequest("_name")).actionGet(); + assertThat(watchResponse.getStatus().actionStatus("_id").ackStatus().state(), Matchers.equalTo(ActionStatus.AckStatus.State.ACKED)); + + refresh(); + GetResponse getResponse = client().get(new GetRequest(Watch.INDEX, Watch.DOC_TYPE, "_name")).actionGet(); + Watch indexedWatch = watchParser().parse("_name", true, getResponse.getSourceAsBytesRef(), XContentType.JSON); + assertThat(watchResponse.getStatus().actionStatus("_id").ackStatus().state(), + equalTo(indexedWatch.status().actionStatus("_id").ackStatus().state())); + + timeWarp().trigger("_name", 4, TimeValue.timeValueSeconds(5)); + refresh("actions"); + + // There shouldn't be more actions in the index after we ack the watch, even though the watch was triggered + long countAfterPostAckFires = docCount("actions", "action", matchAllQuery()); + assertThat(countAfterPostAckFires, equalTo(countAfterAck)); + } + + private void restartWatcherRandomly() throws Exception { + if (randomBoolean()) { + stopWatcher(); + startWatcher(); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java new file mode 100644 index 0000000000000..87c10c97c8fc6 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.test.integration; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; +import org.elasticsearch.xpack.watcher.actions.logging.LoggingLevel; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.condition.CompareCondition; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.joda.time.DateTime; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.noneInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.cron; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.joda.time.DateTimeZone.UTC; + +public class WatchMetadataTests extends AbstractWatcherIntegrationTestCase { + + public void testWatchMetadata() throws Exception { + Map metadata = new HashMap<>(); + metadata.put("foo", "bar"); + List metaList = new ArrayList<>(); + metaList.add("this"); + metaList.add("is"); + metaList.add("a"); + metaList.add("test"); + + metadata.put("baz", metaList); + watcherClient().preparePutWatch("_name") + .setSource(watchBuilder() + .trigger(schedule(cron("0/5 * * * * ? *"))) + .input(noneInput()) + .condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 1L)) + .metadata(metadata)) + .get(); + + timeWarp().trigger("_name"); + + refresh(); + SearchResponse searchResponse = client().prepareSearch(HistoryStoreField.INDEX_PREFIX_WITH_TEMPLATE + "*") + .setQuery(termQuery("metadata.foo", "bar")) + .get(); + assertThat(searchResponse.getHits().getTotalHits(), greaterThan(0L)); + } + + public void testWatchMetadataAvailableAtExecution() throws Exception { + Map metadata = new HashMap<>(); + metadata.put("foo", "bar"); + metadata.put("logtext", "This is a test"); + + LoggingAction.Builder loggingAction = loggingAction(new TextTemplate("_logging")) + .setLevel(LoggingLevel.DEBUG) + .setCategory("test"); + + watcherClient().preparePutWatch("_name") + .setSource(watchBuilder() + .trigger(schedule(cron("0 0 0 1 1 ? 2050"))) + .input(noneInput()) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("testLogger", loggingAction) + .defaultThrottlePeriod(TimeValue.timeValueSeconds(0)) + .metadata(metadata)) + .get(); + + TriggerEvent triggerEvent = new ScheduleTriggerEvent(new DateTime(UTC), new DateTime(UTC)); + ExecuteWatchResponse executeWatchResponse = watcherClient().prepareExecuteWatch("_name") + .setTriggerEvent(triggerEvent).setActionMode("_all", ActionExecutionMode.SIMULATE).get(); + Map result = executeWatchResponse.getRecordSource().getAsMap(); + logger.info("result=\n{}", result); + + assertThat(ObjectPath.eval("metadata.foo", result), equalTo("bar")); + assertThat(ObjectPath.eval("result.actions.0.id", result), equalTo("testLogger")); + assertThat(ObjectPath.eval("result.actions.0.logging.logged_text", result), equalTo("_logging")); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java new file mode 100644 index 0000000000000..1f7335aef0df8 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java @@ -0,0 +1,248 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transform; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; +import static org.elasticsearch.xpack.watcher.transform.TransformBuilders.chainTransform; +import static org.elasticsearch.xpack.watcher.transform.TransformBuilders.scriptTransform; +import static org.elasticsearch.xpack.watcher.transform.TransformBuilders.searchTransform; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; + +public class TransformIntegrationTests extends AbstractWatcherIntegrationTestCase { + + @Override + protected Collection> nodePlugins() { + List> types = super.pluginTypes(); + types.add(CustomScriptPlugin.class); + return types; + } + + @Override + protected Path nodeConfigPath(int nodeOrdinal) { + final Path config = createTempDir().resolve("config"); + final Path scripts = config.resolve("scripts"); + + try { + Files.createDirectories(scripts); + + // When using the MockScriptPlugin we can map File scripts to inline scripts: + // the name of the file script is used in test method while the source of the file script + // must match a predefined script from CustomScriptPlugin.pluginScripts() method + Files.write(scripts.resolve("my-script.mockscript"), "['key3' : ctx.payload.key1 + ctx.payload.key2]".getBytes("UTF-8")); + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + + return config; + } + + public static class CustomScriptPlugin extends MockScriptPlugin { + + @Override + protected Map, Object>> pluginScripts() { + Map, Object>> scripts = new HashMap<>(); + + scripts.put("['key3' : ctx.payload.key1 + ctx.payload.key2]", vars -> { + int key1 = (int) XContentMapValues.extractValue("ctx.payload.key1", vars); + int key2 = (int) XContentMapValues.extractValue("ctx.payload.key2", vars); + return singletonMap("key3", key1 + key2); + }); + + scripts.put("['key4' : ctx.payload.key3 + 10]", vars -> { + int key3 = (int) XContentMapValues.extractValue("ctx.payload.key3", vars); + return singletonMap("key4", key3 + 10); + }); + + return scripts; + } + } + + public void testScriptTransform() throws Exception { + final Script script; + if (randomBoolean()) { + logger.info("testing script transform with an inline script"); + script = mockScript("['key3' : ctx.payload.key1 + ctx.payload.key2]"); + } else { + logger.info("testing script transform with an indexed script"); + assertAcked(client().admin().cluster().preparePutStoredScript() + .setId("my-script") + .setContent(new BytesArray("{\"script\" : {\"lang\": \"" + MockScriptPlugin.NAME + "\", " + + "\"source\": \"['key3' : ctx.payload.key1 + ctx.payload.key2]\"}"), XContentType.JSON) + .get()); + script = new Script(ScriptType.STORED, null, "my-script", Collections.emptyMap()); + } + + // put a watch that has watch level transform: + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("_id1") + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput(MapBuilder.newMapBuilder().put("key1", 10).put("key2", 10))) + .condition(InternalAlwaysCondition.INSTANCE) + .transform(scriptTransform(script)) + .addAction("_id", indexAction("output1", "type"))) + .get(); + assertThat(putWatchResponse.isCreated(), is(true)); + // put a watch that has a action level transform: + putWatchResponse = watcherClient().preparePutWatch("_id2") + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput(MapBuilder.newMapBuilder().put("key1", 10).put("key2", 10))) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_id", scriptTransform(script), indexAction("output2", "type"))) + .get(); + assertThat(putWatchResponse.isCreated(), is(true)); + + timeWarp().trigger("_id1"); + timeWarp().trigger("_id2"); + refresh(); + + assertWatchWithMinimumPerformedActionsCount("_id1", 1, false); + assertWatchWithMinimumPerformedActionsCount("_id2", 1, false); + refresh(); + + SearchResponse response = client().prepareSearch("output1").get(); + assertNoFailures(response); + assertThat(response.getHits().getTotalHits(), greaterThanOrEqualTo(1L)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("key3").toString(), equalTo("20")); + + response = client().prepareSearch("output2").get(); + assertNoFailures(response); + assertThat(response.getHits().getTotalHits(), greaterThanOrEqualTo(1L)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("key3").toString(), equalTo("20")); + } + + public void testSearchTransform() throws Exception { + createIndex("my-condition-index", "my-payload-index"); + ensureGreen("my-condition-index", "my-payload-index"); + + index("my-payload-index", "payload", "mytestresult"); + refresh(); + + WatcherSearchTemplateRequest inputRequest = templateRequest(searchSource().query(matchAllQuery()), "my-condition-index"); + WatcherSearchTemplateRequest transformRequest = templateRequest(searchSource().query(matchAllQuery()), "my-payload-index"); + + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("_id1") + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(searchInput(inputRequest)) + .transform(searchTransform(transformRequest)) + .addAction("_id", indexAction("output1", "result")) + ).get(); + assertThat(putWatchResponse.isCreated(), is(true)); + putWatchResponse = watcherClient().preparePutWatch("_id2") + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(searchInput(inputRequest)) + .addAction("_id", searchTransform(transformRequest), indexAction("output2", "result")) + ).get(); + assertThat(putWatchResponse.isCreated(), is(true)); + + timeWarp().trigger("_id1"); + timeWarp().trigger("_id2"); + refresh(); + + assertWatchWithMinimumPerformedActionsCount("_id1", 1, false); + assertWatchWithMinimumPerformedActionsCount("_id2", 1, false); + refresh(); + + SearchResponse response = client().prepareSearch("output1").get(); + assertNoFailures(response); + assertThat(response.getHits().getTotalHits(), greaterThanOrEqualTo(1L)); + assertThat(response.getHits().getAt(0).getSourceAsString(), containsString("mytestresult")); + + response = client().prepareSearch("output2").get(); + assertNoFailures(response); + assertThat(response.getHits().getTotalHits(), greaterThanOrEqualTo(1L)); + assertThat(response.getHits().getAt(0).getSourceAsString(), containsString("mytestresult")); + } + + public void testChainTransform() throws Exception { + Script script1 = mockScript("['key3' : ctx.payload.key1 + ctx.payload.key2]"); + Script script2 = mockScript("['key4' : ctx.payload.key3 + 10]"); + + // put a watch that has watch level transform: + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("_id1") + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput(MapBuilder.newMapBuilder().put("key1", 10).put("key2", 10))) + .condition(InternalAlwaysCondition.INSTANCE) + .transform(chainTransform(scriptTransform(script1), scriptTransform(script2))) + .addAction("_id", indexAction("output1", "type"))) + .get(); + assertThat(putWatchResponse.isCreated(), is(true)); + // put a watch that has a action level transform: + putWatchResponse = watcherClient().preparePutWatch("_id2") + .setSource(watchBuilder() + .trigger(schedule(interval("5s"))) + .input(simpleInput(MapBuilder.newMapBuilder().put("key1", 10).put("key2", 10))) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_id", chainTransform(scriptTransform(script1), scriptTransform(script2)), + indexAction("output2", "type"))) + .get(); + assertThat(putWatchResponse.isCreated(), is(true)); + + timeWarp().trigger("_id1"); + timeWarp().trigger("_id2"); + refresh(); + + assertWatchWithMinimumPerformedActionsCount("_id1", 1, false); + assertWatchWithMinimumPerformedActionsCount("_id2", 1, false); + refresh(); + + SearchResponse response = client().prepareSearch("output1").get(); + assertNoFailures(response); + assertThat(response.getHits().getTotalHits(), greaterThanOrEqualTo(1L)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("key4").toString(), equalTo("30")); + + response = client().prepareSearch("output2").get(); + assertNoFailures(response); + assertThat(response.getHits().getTotalHits(), greaterThanOrEqualTo(1L)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("key4").toString(), equalTo("30")); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/chain/ChainTransformTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/chain/ChainTransformTests.java new file mode 100644 index 0000000000000..f3493c9c354f4 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/chain/ChainTransformTests.java @@ -0,0 +1,290 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transform.chain; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.transform.TransformFactory; +import org.elasticsearch.xpack.core.watcher.transform.TransformRegistry; +import org.elasticsearch.xpack.core.watcher.transform.chain.ChainTransform; +import org.elasticsearch.xpack.core.watcher.transform.chain.ChainTransformFactory; +import org.elasticsearch.xpack.core.watcher.transform.chain.ExecutableChainTransform; +import org.elasticsearch.xpack.core.watcher.watch.Payload; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; + +public class ChainTransformTests extends ESTestCase { + public void testExecute() throws Exception { + ChainTransform transform = new ChainTransform( + new NamedExecutableTransform.Transform("name1"), + new NamedExecutableTransform.Transform("name2"), + new NamedExecutableTransform.Transform("name3") + ); + ExecutableChainTransform executable = new ExecutableChainTransform(transform, logger, + new NamedExecutableTransform("name1"), + new NamedExecutableTransform("name2"), + new NamedExecutableTransform("name3")); + + WatchExecutionContext ctx = mock(WatchExecutionContext.class); + Payload payload = new Payload.Simple(new HashMap()); + + ChainTransform.Result result = executable.execute(ctx, payload); + assertThat(result.status(), is(Transform.Result.Status.SUCCESS)); + assertThat(result.results(), hasSize(3)); + assertThat(result.results().get(0), instanceOf(NamedExecutableTransform.Result.class)); + assertThat(result.results().get(0).status(), is(Transform.Result.Status.SUCCESS)); + assertThat((List) result.results().get(0).payload().data().get("names"), hasSize(1)); + assertThat((List) result.results().get(0).payload().data().get("names"), contains("name1")); + assertThat(result.results().get(1), instanceOf(NamedExecutableTransform.Result.class)); + assertThat(result.results().get(1).status(), is(Transform.Result.Status.SUCCESS)); + assertThat((List) result.results().get(1).payload().data().get("names"), hasSize(2)); + assertThat((List) result.results().get(1).payload().data().get("names"), contains("name1", "name2")); + assertThat(result.results().get(2), instanceOf(NamedExecutableTransform.Result.class)); + assertThat(result.results().get(2).status(), is(Transform.Result.Status.SUCCESS)); + assertThat((List) result.results().get(2).payload().data().get("names"), hasSize(3)); + assertThat((List) result.results().get(2).payload().data().get("names"), contains("name1", "name2", "name3")); + + Map data = result.payload().data(); + assertThat(data, notNullValue()); + assertThat(data, hasKey("names")); + assertThat(data.get("names"), instanceOf(List.class)); + List names = (List) data.get("names"); + assertThat(names, hasSize(3)); + assertThat(names, contains("name1", "name2", "name3")); + } + + public void testExecuteFailure() throws Exception { + ChainTransform transform = new ChainTransform( + new NamedExecutableTransform.Transform("name1"), + new NamedExecutableTransform.Transform("name2"), + new FailingExecutableTransform.Transform() + ); + ExecutableChainTransform executable = new ExecutableChainTransform(transform, logger, + new NamedExecutableTransform("name1"), + new NamedExecutableTransform("name2"), + new FailingExecutableTransform(logger)); + + WatchExecutionContext ctx = mock(WatchExecutionContext.class); + Payload payload = new Payload.Simple(new HashMap()); + + ChainTransform.Result result = executable.execute(ctx, payload); + assertThat(result.status(), is(Transform.Result.Status.FAILURE)); + assertThat(result.reason(), notNullValue()); + assertThat(result.results(), hasSize(3)); + assertThat(result.results().get(0), instanceOf(NamedExecutableTransform.Result.class)); + assertThat(result.results().get(0).status(), is(Transform.Result.Status.SUCCESS)); + assertThat((List) result.results().get(0).payload().data().get("names"), hasSize(1)); + assertThat((List) result.results().get(0).payload().data().get("names"), contains("name1")); + assertThat(result.results().get(1), instanceOf(NamedExecutableTransform.Result.class)); + assertThat(result.results().get(1).status(), is(Transform.Result.Status.SUCCESS)); + assertThat((List) result.results().get(1).payload().data().get("names"), hasSize(2)); + assertThat((List) result.results().get(1).payload().data().get("names"), contains("name1", "name2")); + assertThat(result.results().get(2), instanceOf(FailingExecutableTransform.Result.class)); + assertThat(result.results().get(2).status(), is(Transform.Result.Status.FAILURE)); + assertThat(result.results().get(2).reason(), containsString("_error")); + + } + + public void testParser() throws Exception { + TransformRegistry registry = new TransformRegistry(Settings.EMPTY, + singletonMap("named", new NamedExecutableTransform.Factory(logger))); + + ChainTransformFactory transformParser = new ChainTransformFactory(Settings.EMPTY, registry); + + XContentBuilder builder = jsonBuilder().startArray() + .startObject().startObject("named").field("name", "name1").endObject().endObject() + .startObject().startObject("named").field("name", "name2").endObject().endObject() + .startObject().startObject("named").field("name", "name3").endObject().endObject() + .startObject().field("named", "name4").endObject() + .endArray(); + + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + ExecutableChainTransform executable = transformParser.parseExecutable("_id", parser); + assertThat(executable, notNullValue()); + assertThat(executable.transform().getTransforms(), notNullValue()); + assertThat(executable.transform().getTransforms(), hasSize(4)); + for (int i = 0; i < executable.transform().getTransforms().size(); i++) { + assertThat(executable.executableTransforms().get(i), instanceOf(NamedExecutableTransform.class)); + assertThat(((NamedExecutableTransform) executable.executableTransforms().get(i)).transform().name, is("name" + (i + 1))); + } + } + + private static class NamedExecutableTransform extends ExecutableTransform { + private static final String TYPE = "named"; + + NamedExecutableTransform(String name) { + this(new Transform(name)); + } + + NamedExecutableTransform(Transform transform) { + super(transform, Loggers.getLogger(NamedExecutableTransform.class)); + } + + @Override + public Result execute(WatchExecutionContext ctx, Payload payload) { + List names = (List) payload.data().get("names"); + if (names == null) { + names = new ArrayList<>(); + } else { + names = new ArrayList<>(names); + } + names.add(transform.name); + Map data = new HashMap<>(); + data.put("names", names); + return new Result("named", new Payload.Simple(data)); + } + + public static class Transform implements org.elasticsearch.xpack.core.watcher.transform.Transform { + + private final String name; + + Transform(String name) { + this.name = name; + } + + @Override + public String type() { + return TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().field("name", name).endObject(); + } + } + + public static class Result extends Transform.Result { + + Result(String type, Payload payload) { + super(type, payload); + } + + @Override + protected XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + } + + public static class Factory extends TransformFactory { + Factory(Logger transformLogger) { + super(transformLogger); + } + + @Override + public String type() { + return TYPE; + } + + @Override + public Transform parseTransform(String watchId, XContentParser parser) throws IOException { + if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + return new Transform(parser.text()); + } + assert parser.currentToken() == XContentParser.Token.START_OBJECT; + XContentParser.Token token = parser.nextToken(); + assert token == XContentParser.Token.FIELD_NAME; // the "name" field + token = parser.nextToken(); + assert token == XContentParser.Token.VALUE_STRING; + String name = parser.text(); + token = parser.nextToken(); + assert token == XContentParser.Token.END_OBJECT; + return new Transform(name); + } + + @Override + public NamedExecutableTransform createExecutable(Transform transform) { + return new NamedExecutableTransform(transform); + } + } + } + + private static class FailingExecutableTransform extends ExecutableTransform { + private static final String TYPE = "throwing"; + + FailingExecutableTransform(Logger logger) { + super(new Transform(), logger); + } + + @Override + public Result execute(WatchExecutionContext ctx, Payload payload) { + return new Result(TYPE); + } + + public static class Transform implements org.elasticsearch.xpack.core.watcher.transform.Transform { + @Override + public String type() { + return TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().endObject(); + } + } + + public static class Result extends Transform.Result { + Result(String type) { + super(type, new Exception("_error")); + } + + @Override + protected XContentBuilder typeXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + } + + public static class Factory extends TransformFactory { + Factory(Logger transformLogger) { + super(transformLogger); + } + + @Override + public String type() { + return TYPE; + } + + @Override + public Transform parseTransform(String watchId, XContentParser parser) throws IOException { + assert parser.currentToken() == XContentParser.Token.START_OBJECT; + XContentParser.Token token = parser.nextToken(); + assert token == XContentParser.Token.END_OBJECT; + return new Transform(); + } + + @Override + public FailingExecutableTransform createExecutable(Transform transform) { + return new FailingExecutableTransform(transformLogger); + } + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransformTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransformTests.java new file mode 100644 index 0000000000000..bc79561f726c9 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transform/script/ScriptTransformTests.java @@ -0,0 +1,212 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transform.script; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptException; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.transform.Transform; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.Watcher; +import org.elasticsearch.xpack.watcher.support.Variables; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArgument; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.mockExecutionContext; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ScriptTransformTests extends ESTestCase { + + public void testExecuteMapValue() throws Exception { + ScriptService service = mock(ScriptService.class); + ScriptType type = randomFrom(ScriptType.values()); + Map params = Collections.emptyMap(); + Script script = new Script(type, type == ScriptType.STORED ? null : "_lang", "_script", params); + ExecutableScript.Factory factory = mock(ExecutableScript.Factory.class); + when(service.compile(script, Watcher.SCRIPT_EXECUTABLE_CONTEXT)).thenReturn(factory); + ExecutableScriptTransform transform = new ExecutableScriptTransform(new ScriptTransform(script), logger, service); + + WatchExecutionContext ctx = mockExecutionContext("_name", Payload.EMPTY); + + Payload payload = new Payload.Simple("key", "value"); + + Map model = Variables.createCtxModel(ctx, payload); + + Map transformed = singletonMap("key", "value"); + + ExecutableScript executable = mock(ExecutableScript.class); + when(executable.run()).thenReturn(transformed); + when(factory.newInstance(model)).thenReturn(executable); + + Transform.Result result = transform.execute(ctx, payload); + assertThat(result, notNullValue()); + assertThat(result.type(), is(ScriptTransform.TYPE)); + assertThat(result.status(), is(Transform.Result.Status.SUCCESS)); + assertThat(result.payload().data(), equalTo(transformed)); + } + + public void testExecuteMapValueFailure() throws Exception { + ScriptService service = mock(ScriptService.class); + ScriptType type = randomFrom(ScriptType.values()); + Map params = Collections.emptyMap(); + Script script = new Script(type, type == ScriptType.STORED ? null : "_lang", "_script", params); + ExecutableScript.Factory factory = mock(ExecutableScript.Factory.class); + when(service.compile(script, Watcher.SCRIPT_EXECUTABLE_CONTEXT)).thenReturn(factory); + ExecutableScriptTransform transform = new ExecutableScriptTransform(new ScriptTransform(script), logger, service); + + WatchExecutionContext ctx = mockExecutionContext("_name", Payload.EMPTY); + + Payload payload = new Payload.Simple("key", "value"); + + Map model = Variables.createCtxModel(ctx, payload); + + ExecutableScript executable = mock(ExecutableScript.class); + when(executable.run()).thenThrow(new RuntimeException("_error")); + when(factory.newInstance(model)).thenReturn(executable); + + Transform.Result result = transform.execute(ctx, payload); + assertThat(result, notNullValue()); + assertThat(result.type(), is(ScriptTransform.TYPE)); + assertThat(result.status(), is(Transform.Result.Status.FAILURE)); + assertThat(result.reason(), containsString("_error")); + } + + public void testExecuteNonMapValue() throws Exception { + ScriptService service = mock(ScriptService.class); + ScriptType type = randomFrom(ScriptType.values()); + Map params = Collections.emptyMap(); + Script script = new Script(type, type == ScriptType.STORED ? null : "_lang", "_script", params); + ExecutableScript.Factory factory = mock(ExecutableScript.Factory.class); + when(service.compile(script, Watcher.SCRIPT_EXECUTABLE_CONTEXT)).thenReturn(factory); + ExecutableScriptTransform transform = new ExecutableScriptTransform(new ScriptTransform(script), logger, service); + + WatchExecutionContext ctx = mockExecutionContext("_name", Payload.EMPTY); + + Payload payload = new Payload.Simple("key", "value"); + + Map model = Variables.createCtxModel(ctx, payload); + + ExecutableScript executable = mock(ExecutableScript.class); + Object value = randomFrom("value", 1, new String[] { "value" }, Collections.singletonList("value"), singleton("value")); + when(executable.run()).thenReturn(value); + when(factory.newInstance(model)).thenReturn(executable); + + Transform.Result result = transform.execute(ctx, payload); + assertThat(result, notNullValue()); + assertThat(result.type(), is(ScriptTransform.TYPE)); + assertThat(result.payload().data().size(), is(1)); + assertThat(result.payload().data(), hasEntry("_value", value)); + } + + public void testParser() throws Exception { + ScriptService service = mock(ScriptService.class); + ScriptType type = randomFrom(ScriptType.values()); + XContentBuilder builder = jsonBuilder().startObject(); + builder.field(scriptTypeField(type), "_script"); + if (type != ScriptType.STORED) { + builder.field("lang", "_lang"); + } + builder.startObject("params").field("key", "value").endObject(); + builder.endObject(); + + XContentParser parser = createParser(builder); + parser.nextToken(); + ExecutableScriptTransform transform = new ScriptTransformFactory(Settings.EMPTY, service).parseExecutable("_id", parser); + Script script = new Script(type, type == ScriptType.STORED ? null : "_lang", "_script", singletonMap("key", "value")); + assertThat(transform.transform().getScript(), equalTo(script)); + } + + public void testParserString() throws Exception { + ScriptService service = mock(ScriptService.class); + XContentBuilder builder = jsonBuilder().value("_script"); + + XContentParser parser = createParser(builder); + parser.nextToken(); + ExecutableScriptTransform transform = new ScriptTransformFactory(Settings.EMPTY, service).parseExecutable("_id", parser); + assertEquals(new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "_script", emptyMap()), transform.transform().getScript()); + } + + public void testScriptConditionParserBadScript() throws Exception { + ScriptService scriptService = mock(ScriptService.class); + String errorMessage = "expected error message"; + ScriptException scriptException = new ScriptException(errorMessage, new RuntimeException("foo"), + Collections.emptyList(), "whatever", "whatever"); + when(scriptService.compile(anyObject(), eq(Watcher.SCRIPT_EXECUTABLE_CONTEXT))).thenThrow(scriptException); + + ScriptTransformFactory transformFactory = new ScriptTransformFactory(Settings.builder().build(), scriptService); + + XContentBuilder builder = jsonBuilder().startObject() + .field(scriptTypeField(randomFrom(ScriptType.values())), "whatever") + .startObject("params").field("key", "value").endObject() + .endObject(); + + XContentParser parser = createParser(builder); + parser.nextToken(); + ScriptTransform scriptTransform = transformFactory.parseTransform("_watch", parser); + Exception e = expectThrows(ScriptException.class, () -> transformFactory.createExecutable(scriptTransform)); + assertThat(e.getMessage(), containsString(errorMessage)); + } + + public void testScriptConditionParserBadLang() throws Exception { + ScriptTransformFactory transformFactory = new ScriptTransformFactory(Settings.builder().build(), createScriptService()); + String script = "return true"; + XContentBuilder builder = jsonBuilder().startObject() + .field(scriptTypeField(ScriptType.INLINE), script) + .field("lang", "not_a_valid_lang") + .startObject("params").field("key", "value").endObject() + .endObject(); + + + XContentParser parser = createParser(builder); + parser.nextToken(); + ScriptTransform scriptCondition = transformFactory.parseTransform("_watch", parser); + Exception e = expectThrows(IllegalArgumentException.class, () -> transformFactory.createExecutable(scriptCondition)); + assertThat(e.getMessage(), containsString("script_lang not supported [not_a_valid_lang]")); + } + + static String scriptTypeField(ScriptType type) { + switch (type) { + case INLINE: return "source"; + case STORED: return "id"; + default: + throw illegalArgument("unsupported script type [{}]", type); + } + } + + public static ScriptService createScriptService() throws Exception { + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .build(); + Map contexts = new HashMap<>(ScriptModule.CORE_CONTEXTS); + contexts.put(Watcher.SCRIPT_EXECUTABLE_CONTEXT.name, Watcher.SCRIPT_EXECUTABLE_CONTEXT); + contexts.put(Watcher.SCRIPT_TEMPLATE_CONTEXT.name, Watcher.SCRIPT_TEMPLATE_CONTEXT); + return new ScriptService(settings, Collections.emptyMap(), Collections.emptyMap()); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/WatchRequestValidationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/WatchRequestValidationTests.java new file mode 100644 index 0000000000000..893e493bd1589 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/WatchRequestValidationTests.java @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest; + +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class WatchRequestValidationTests extends ESTestCase { + + public void testAcknowledgeWatchInvalidWatchId() { + ActionRequestValidationException e = new AckWatchRequest("id with whitespaces").validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("watch id contains whitespace")); + } + + public void testAcknowledgeWatchInvalidActionId() { + ActionRequestValidationException e = new AckWatchRequest("_id", "action id with whitespaces").validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("action id [action id with whitespaces] contains whitespace")); + } + + public void testAcknowledgeWatchNullActionArray() { + // need this to prevent some compilation errors, i.e. in 1.8.0_91 + String [] nullArray = null; + ActionRequestValidationException e = new AckWatchRequest("_id", nullArray).validate(); + assertThat(e, is(nullValue())); + } + + public void testAcknowledgeWatchNullActionId() { + ActionRequestValidationException e = new AckWatchRequest("_id", new String[] {null}).validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("action id may not be null")); + } + + public void testActivateWatchInvalidWatchId() { + ActionRequestValidationException e = new ActivateWatchRequest("id with whitespaces", randomBoolean()).validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("watch id contains whitespace")); + } + + public void testDeleteWatchInvalidWatchId() { + ActionRequestValidationException e = new DeleteWatchRequest("id with whitespaces").validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("watch id contains whitespace")); + } + + public void testDeleteWatchNullId() { + ActionRequestValidationException e = new DeleteWatchRequest(null).validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("watch id is missing")); + } + + public void testPutWatchInvalidWatchId() { + ActionRequestValidationException e = new PutWatchRequest("id with whitespaces", BytesArray.EMPTY, XContentType.JSON).validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("watch id contains whitespace")); + } + + public void testPutWatchNullId() { + ActionRequestValidationException e = new PutWatchRequest(null, BytesArray.EMPTY, XContentType.JSON).validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("watch id is missing")); + } + + public void testPutWatchSourceNull() { + ActionRequestValidationException e = new PutWatchRequest("foo", (BytesReference) null, XContentType.JSON).validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("watch source is missing")); + } + + public void testGetWatchInvalidWatchId() { + ActionRequestValidationException e = new GetWatchRequest("id with whitespaces").validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("watch id contains whitespace")); + } + + public void testGetWatchNullId() { + ActionRequestValidationException e = new GetWatchRequest((String) null).validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("watch id is missing")); + } + + public void testExecuteWatchInvalidWatchId() { + ActionRequestValidationException e = new ExecuteWatchRequest("id with whitespaces").validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("watch id contains whitespace")); + } + + public void testExecuteWatchMissingWatchIdNoSource() { + ActionRequestValidationException e = new ExecuteWatchRequest((String) null).validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), + hasItem("a watch execution request must either have a watch id or an inline watch source, but both are missing")); + } + + public void testExecuteWatchInvalidActionId() { + ExecuteWatchRequest request = new ExecuteWatchRequest("foo"); + request.setActionMode("foo bar baz", ActionExecutionMode.EXECUTE); + ActionRequestValidationException e = request.validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("action id [foo bar baz] contains whitespace")); + } + + public void testExecuteWatchWatchIdAndSource() { + ExecuteWatchRequest request = new ExecuteWatchRequest("foo"); + request.setWatchSource(BytesArray.EMPTY, XContentType.JSON); + ActionRequestValidationException e = request.validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), + hasItem("a watch execution request must either have a watch id or an inline watch source but not both")); + } + + public void testExecuteWatchSourceAndRecordExecution() { + ExecuteWatchRequest request = new ExecuteWatchRequest(); + request.setWatchSource(BytesArray.EMPTY, XContentType.JSON); + request.setRecordExecution(true); + ActionRequestValidationException e = request.validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("the execution of an inline watch cannot be recorded")); + } + + public void testExecuteWatchNullActionMode() { + ExecuteWatchRequest request = new ExecuteWatchRequest(); + request.setActionMode(null, ActionExecutionMode.EXECUTE); + ActionRequestValidationException e = request.validate(); + assertThat(e, is(notNullValue())); + assertThat(e.validationErrors(), hasItem("action id may not be null")); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java new file mode 100644 index 0000000000000..c692a1dfc8b7b --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java @@ -0,0 +1,169 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.action.activate; + + +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.cron; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +@TestLogging("org.elasticsearch.xpack.watcher:DEBUG,org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") +public class ActivateWatchTests extends AbstractWatcherIntegrationTestCase { + + @Override + protected boolean timeWarped() { + return false; + } + + // FIXME not to be sleep based + public void testDeactivateAndActivate() throws Exception { + PutWatchResponse putWatchResponse = watcherClient().preparePutWatch() + .setId("_id") + .setSource(watchBuilder() + .trigger(schedule(interval("1s"))) + .input(simpleInput("foo", "bar")) + .addAction("_a1", indexAction("actions", "action1")) + .defaultThrottlePeriod(new TimeValue(0, TimeUnit.SECONDS))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + + GetWatchResponse getWatchResponse = watcherClient().prepareGetWatch("_id").get(); + assertThat(getWatchResponse, notNullValue()); + assertThat(getWatchResponse.getStatus().state().isActive(), is(true)); + + logger.info("Waiting for watch to be executed at least once"); + assertWatchWithMinimumActionsCount("_id", ExecutionState.EXECUTED, 1); + + // we now know the watch is executing... lets deactivate it + ActivateWatchResponse activateWatchResponse = watcherClient().prepareActivateWatch("_id", false).get(); + assertThat(activateWatchResponse, notNullValue()); + assertThat(activateWatchResponse.getStatus().state().isActive(), is(false)); + + getWatchResponse = watcherClient().prepareGetWatch("_id").get(); + assertThat(getWatchResponse, notNullValue()); + assertThat(getWatchResponse.getStatus().state().isActive(), is(false)); + + // wait until no watch is executing + assertBusy(() -> { + WatcherStatsResponse statsResponse = watcherClient().prepareWatcherStats().setIncludeCurrentWatches(true).get(); + int sum = statsResponse.getNodes().stream().map(WatcherStatsResponse.Node::getSnapshots).mapToInt(List::size).sum(); + assertThat(sum, is(0)); + }); + + logger.info("Ensured no more watches are being executed"); + refresh(); + long count1 = docCount(".watcher-history*", "doc", matchAllQuery()); + + logger.info("Sleeping for 5 seconds, watch history count [{}]", count1); + Thread.sleep(5000); + + refresh(); + long count2 = docCount(".watcher-history*", "doc", matchAllQuery()); + + assertThat(count2, is(count1)); + + // lets activate it again + logger.info("Activating watch again"); + + activateWatchResponse = watcherClient().prepareActivateWatch("_id", true).get(); + assertThat(activateWatchResponse, notNullValue()); + assertThat(activateWatchResponse.getStatus().state().isActive(), is(true)); + + getWatchResponse = watcherClient().prepareGetWatch("_id").get(); + assertThat(getWatchResponse, notNullValue()); + assertThat(getWatchResponse.getStatus().state().isActive(), is(true)); + + logger.info("Sleeping for another five seconds, ensuring that watch is executed"); + Thread.sleep(5000); + refresh(); + long count3 = docCount(".watcher-history*", "doc", matchAllQuery()); + assertThat(count3, greaterThan(count1)); + } + + public void testLoadWatchWithoutAState() throws Exception { + WatcherClient watcherClient = watcherClient(); + + PutWatchResponse putWatchResponse = watcherClient.preparePutWatch() + .setId("_id") + .setSource(watchBuilder() + .trigger(schedule(cron("0 0 0 1 1 ? 2050"))) // some time in 2050 + .input(simpleInput("foo", "bar")) + .addAction("_a1", indexAction("actions", "action1")) + .defaultThrottlePeriod(new TimeValue(0, TimeUnit.SECONDS))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + + GetWatchResponse getWatchResponse = watcherClient.prepareGetWatch("_id").get(); + assertThat(getWatchResponse, notNullValue()); + assertThat(getWatchResponse.getStatus().state().isActive(), is(true)); + + GetResponse getResponse = client().prepareGet(".watches", "doc", "_id").get(); + XContentSource source = new XContentSource(getResponse.getSourceAsBytesRef(), XContentType.JSON); + + Set filters = Sets.newHashSet( + "trigger.**", + "input.**", + "condition.**", + "throttle_period.**", + "transform.**", + "actions.**", + "metadata.**", + "status.version", + "status.last_checked", + "status.last_met_condition", + "status.actions.**"); + + XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), new BytesStreamOutput(), filters); + source.toXContent(builder, ToXContent.EMPTY_PARAMS); + + // now that we filtered out the watch status state, lets put it back in + IndexResponse indexResponse = client().prepareIndex(".watches", "doc", "_id") + .setSource(BytesReference.bytes(builder), XContentType.JSON) + .get(); + assertThat(indexResponse.getId(), is("_id")); + + // now, let's restart + stopWatcher(); + startWatcher(); + + getWatchResponse = watcherClient.prepareGetWatch("_id").get(); + assertThat(getWatchResponse, notNullValue()); + assertThat(getWatchResponse.getStatus().state(), notNullValue()); + assertThat(getWatchResponse.getStatus().state().isActive(), is(true)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/delete/DeleteWatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/delete/DeleteWatchTests.java new file mode 100644 index 0000000000000..b50204abf4176 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/delete/DeleteWatchTests.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.action.delete; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.util.Map; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.sleep; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.httpInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +public class DeleteWatchTests extends AbstractWatcherIntegrationTestCase { + + // This is a special case, since locking is removed + // Deleting a watch while it is being executed is possible now + // This test ensures that there are no leftovers, like a watch status without a watch in the watch store + // Also the watch history is checked, that the error has been marked as deleted + // The mock webserver does not support count down latches, so we have to use sleep - sorry! + public void testWatchDeletionDuringExecutionWorks() throws Exception { + MockResponse response = new MockResponse(); + response.setBody("foo"); + response.setResponseCode(200); + response.setBodyDelay(TimeValue.timeValueSeconds(5)); + + try (MockWebServer server = new MockWebServer()) { + server.enqueue(response); + server.start(); + HttpRequestTemplate template = HttpRequestTemplate.builder(server.getHostName(), server.getPort()).path("/").build(); + + PutWatchResponse responseFuture = watcherClient().preparePutWatch("_name").setSource(watchBuilder() + .trigger(schedule(interval("6h"))) + .input(httpInput(template)) + .addAction("_action1", loggingAction("anything"))) + .get(); + assertThat(responseFuture.isCreated(), is(true)); + + ActionFuture executeWatchFuture = + watcherClient().prepareExecuteWatch("_name").setRecordExecution(true).execute(); + + // without this sleep the delete operation might overtake the watch execution + sleep(1000); + DeleteWatchResponse deleteWatchResponse = watcherClient().prepareDeleteWatch("_name").get(); + assertThat(deleteWatchResponse.isFound(), is(true)); + + executeWatchFuture.get(); + + // the watch is gone, no leftovers + GetWatchResponse getWatchResponse = watcherClient().prepareGetWatch("_name").get(); + assertThat(getWatchResponse.isFound(), is(false)); + + // the watch history shows a successful execution, even though the watch was deleted + // during execution + refresh(HistoryStoreField.INDEX_PREFIX + "*"); + + SearchResponse searchResponse = client().prepareSearch(HistoryStoreField.INDEX_PREFIX + "*").setQuery(matchAllQuery()).get(); + assertHitCount(searchResponse, 1); + + Map source = searchResponse.getHits().getAt(0).getSourceAsMap(); + // watch has been executed successfully + String state = ObjectPath.eval("state", source); + assertThat(state, is("executed")); + // no exception occured + assertThat(source, not(hasKey("exception"))); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/execute/ExecuteWatchRequestTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/execute/ExecuteWatchRequestTests.java new file mode 100644 index 0000000000000..8083374e44a87 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/execute/ExecuteWatchRequestTests.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.action.execute; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchRequest; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +public class ExecuteWatchRequestTests extends ESTestCase { + + public void testSerialization() throws IOException { + ExecuteWatchRequest request = new ExecuteWatchRequest("1"); + request.setWatchSource(new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), XContentType.JSON); + assertEquals(XContentType.JSON, request.getXContentType()); + + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes); + ExecuteWatchRequest serialized = new ExecuteWatchRequest(in); + assertEquals(XContentType.JSON, serialized.getXContentType()); + assertEquals("{}", serialized.getWatchSource().utf8ToString()); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/execute/ExecuteWatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/execute/ExecuteWatchTests.java new file mode 100644 index 0000000000000..c307764c29f0d --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/execute/ExecuteWatchTests.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.action.execute; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.execution.ActionExecutionMode; +import org.elasticsearch.xpack.core.watcher.execution.Wid; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchRequestBuilder; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.cron; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class ExecuteWatchTests extends AbstractWatcherIntegrationTestCase { + + public void testExecuteAllDefaults() throws Exception { + WatcherClient watcherClient = watcherClient(); + + PutWatchResponse putWatchResponse = watcherClient.preparePutWatch() + .setId("_id") + .setSource(watchBuilder() + .trigger(schedule(cron("0/5 * * * * ? 2099"))) + .input(simpleInput("foo", "bar")) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("log", loggingAction("_text"))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + + ExecuteWatchResponse response = watcherClient.prepareExecuteWatch("_id").get(); + assertThat(response, notNullValue()); + assertThat(response.getRecordId(), notNullValue()); + Wid wid = new Wid(response.getRecordId()); + assertThat(wid.watchId(), is("_id")); + + XContentSource record = response.getRecordSource(); + assertValue(record, "watch_id", is("_id")); + assertValue(record, "trigger_event.type", is("manual")); + assertValue(record, "trigger_event.triggered_time", notNullValue()); + String triggeredTime = record.getValue("trigger_event.triggered_time"); + assertValue(record, "trigger_event.manual.schedule.scheduled_time", is(triggeredTime)); + assertValue(record, "state", is("executed")); + assertValue(record, "input.simple.foo", is("bar")); + assertValue(record, "condition.always", notNullValue()); + assertValue(record, "result.execution_time", notNullValue()); + assertValue(record, "result.execution_duration", notNullValue()); + assertValue(record, "result.input.type", is("simple")); + assertValue(record, "result.input.payload.foo", is("bar")); + assertValue(record, "result.condition.type", is("always")); + assertValue(record, "result.condition.met", is(true)); + assertValue(record, "result.actions.0.id", is("log")); + assertValue(record, "result.actions.0.type", is("logging")); + assertValue(record, "result.actions.0.status", is("success")); + assertValue(record, "result.actions.0.logging.logged_text", is("_text")); + assertValue(record, "status.actions.log.ack.state", is("ackable")); + } + + public void testExecuteActionMode() throws Exception { + final WatcherClient watcherClient = watcherClient(); + + PutWatchResponse putWatchResponse = watcherClient.preparePutWatch() + .setId("_id") + .setSource(watchBuilder() + .trigger(schedule(interval("1s"))) // run every second so we can ack it + .input(simpleInput("foo", "bar")) + .defaultThrottlePeriod(TimeValue.timeValueMillis(0)) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("log", loggingAction("_text"))) + .get(); + + assertThat(putWatchResponse.isCreated(), is(true)); + + boolean execute = randomBoolean(); + boolean force = randomBoolean(); + ActionExecutionMode mode; + if (randomBoolean()) { + mode = ActionExecutionMode.SKIP; + } else { + if (execute && force) { + mode = ActionExecutionMode.FORCE_EXECUTE; + } else if (execute) { + mode = ActionExecutionMode.EXECUTE; + } else if (force) { + mode = ActionExecutionMode.FORCE_SIMULATE; + } else { + mode = ActionExecutionMode.SIMULATE; + } + } + + if (mode.force()) { + // since we're forcing, lets ack the action, such that it'd suppoed to be throttled + // but forcing will ignore the throttling + + // lets wait for the watch to be ackable + timeWarp().trigger("_id"); + + String[] actionIds = randomFrom( + new String[] { "_all" }, + new String[] { "log" }, + new String[] { "foo", "_all" }, + null + ); + AckWatchRequestBuilder ackWatchRequestBuilder = watcherClient.prepareAckWatch("_id"); + if (actionIds != null) { + ackWatchRequestBuilder.setActionIds(actionIds); + } + AckWatchResponse ackWatchResponse = ackWatchRequestBuilder.get(); + assertThat(ackWatchResponse, notNullValue()); + WatchStatus status = ackWatchResponse.getStatus(); + assertThat(status, notNullValue()); + ActionStatus actionStatus = status.actionStatus("log"); + assertThat(actionStatus, notNullValue()); + assertThat(actionStatus.ackStatus().state(), is(ActionStatus.AckStatus.State.ACKED)); + } + + ExecuteWatchResponse response = watcherClient.prepareExecuteWatch("_id") + .setActionMode(randomBoolean() ? "log" : "_all", mode) + .get(); + assertThat(response, notNullValue()); + assertThat(response.getRecordId(), notNullValue()); + Wid wid = new Wid(response.getRecordId()); + assertThat(wid.watchId(), is("_id")); + + XContentSource record = response.getRecordSource(); + assertValue(record, "watch_id", is("_id")); + assertValue(record, "trigger_event.type", is("manual")); + assertValue(record, "trigger_event.triggered_time", notNullValue()); + String triggeredTime = record.getValue("trigger_event.triggered_time"); + assertValue(record, "trigger_event.manual.schedule.scheduled_time", is(triggeredTime)); + if (mode == ActionExecutionMode.SKIP) { + assertValue(record, "state", is("throttled")); + } else { + assertValue(record, "state", is("executed")); + } + assertValue(record, "input.simple.foo", is("bar")); + assertValue(record, "condition.always", notNullValue()); + assertValue(record, "result.execution_time", notNullValue()); + assertValue(record, "result.execution_duration", notNullValue()); + assertValue(record, "result.input.type", is("simple")); + assertValue(record, "result.input.payload.foo", is("bar")); + assertValue(record, "result.condition.type", is("always")); + assertValue(record, "result.condition.met", is(true)); + assertValue(record, "result.actions.0.id", is("log")); + assertValue(record, "result.actions.0.type", is("logging")); + switch (mode) { + case SKIP: // the action should be manually skipped/throttled + assertValue(record, "result.actions.0.status", is("throttled")); + assertValue(record, "result.actions.0.reason", is("manually skipped")); + break; + default: + if (mode.simulate()) { + assertValue(record, "result.actions.0.status", is("simulated")); + } else { + assertValue(record, "result.actions.0.status", is("success")); + } + assertValue(record, "result.actions.0.logging.logged_text", is("_text")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/get/GetWatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/get/GetWatchTests.java new file mode 100644 index 0000000000000..9d13a2ec2a1d8 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/get/GetWatchTests.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.action.get; + +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import java.util.Map; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +// added due to https://github.com/elastic/x-pack-elasticsearch/issues/3854 +@TestLogging("org.elasticsearch.action.search:DEBUG") +public class GetWatchTests extends AbstractWatcherIntegrationTestCase { + + public void testGet() throws Exception { + PutWatchResponse putResponse = watcherClient().preparePutWatch("_name").setSource(watchBuilder() + .trigger(schedule(interval("5m"))) + .input(simpleInput()) + .condition(InternalAlwaysCondition.INSTANCE) + .addAction("_action1", loggingAction("{{ctx.watch_id}}"))) + .get(); + + assertThat(putResponse, notNullValue()); + assertThat(putResponse.isCreated(), is(true)); + + GetWatchResponse getResponse = watcherClient().getWatch(new GetWatchRequest("_name")).get(); + assertThat(getResponse, notNullValue()); + assertThat(getResponse.isFound(), is(true)); + assertThat(getResponse.getId(), is("_name")); + Map source = getResponse.getSource().getAsMap(); + assertThat(source, notNullValue()); + assertThat(source, hasKey("trigger")); + assertThat(source, hasKey("input")); + assertThat(source, hasKey("condition")); + assertThat(source, hasKey("actions")); + assertThat(source, not(hasKey("status"))); + } + + public void testGetNotFound() throws Exception { + // does not matter if the watch does not exist or the index does not exist, we expect the same response + // if the watches index is an alias, remove the alias randomly, otherwise the index + if (randomBoolean()) { + try { + GetIndexResponse indexResponse = client().admin().indices().prepareGetIndex().setIndices(Watch.INDEX).get(); + boolean isWatchIndexAlias = Watch.INDEX.equals(indexResponse.indices()[0]) == false; + if (isWatchIndexAlias) { + assertAcked(client().admin().indices().prepareAliases().removeAlias(indexResponse.indices()[0], Watch.INDEX)); + } else { + assertAcked(client().admin().indices().prepareDelete(Watch.INDEX)); + } + } catch (IndexNotFoundException e) {} + } + + GetWatchResponse getResponse = watcherClient().getWatch(new GetWatchRequest("_name")).get(); + assertThat(getResponse, notNullValue()); + assertThat(getResponse.getId(), is("_name")); + assertThat(getResponse.isFound(), is(false)); + assertThat(getResponse.getStatus(), nullValue()); + assertThat(getResponse.getSource(), nullValue()); + XContentSource source = getResponse.getSource(); + assertThat(source, nullValue()); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/put/PutWatchSerializationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/put/PutWatchSerializationTests.java new file mode 100644 index 0000000000000..a26022d97aeb3 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/put/PutWatchSerializationTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.action.put; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest; + +import static org.hamcrest.Matchers.is; + +public class PutWatchSerializationTests extends ESTestCase { + + // https://github.com/elastic/x-plugins/issues/2490 + public void testPutWatchSerialization() throws Exception { + PutWatchRequest request = new PutWatchRequest(); + request.setId(randomAlphaOfLength(10)); + request.setActive(randomBoolean()); + request.setSource( + new BytesArray(Strings.toString(JsonXContent.contentBuilder().startObject().field("foo", + randomAlphaOfLength(20)).endObject())), + XContentType.JSON); + + BytesStreamOutput streamOutput = new BytesStreamOutput(); + request.writeTo(streamOutput); + + PutWatchRequest readRequest = new PutWatchRequest(streamOutput.bytes().streamInput()); + assertThat(readRequest.isActive(), is(request.isActive())); + assertThat(readRequest.getId(), is(request.getId())); + assertThat(readRequest.getSource(), is(request.getSource())); + assertThat(readRequest.xContentType(), is(request.xContentType())); + assertThat(readRequest.getVersion(), is(request.getVersion())); + } + + public void testPutWatchSerializationXContent() throws Exception { + PutWatchRequest request = new PutWatchRequest(); + request.setId(randomAlphaOfLength(10)); + request.setActive(randomBoolean()); + request.setSource( + new BytesArray(Strings.toString(JsonXContent.contentBuilder().startObject().field("foo", + randomAlphaOfLength(20)).endObject())), + XContentType.JSON); + assertEquals(XContentType.JSON, request.xContentType()); + + BytesStreamOutput streamOutput = new BytesStreamOutput(); + request.writeTo(streamOutput); + + PutWatchRequest readRequest = new PutWatchRequest(streamOutput.bytes().streamInput()); + assertThat(readRequest.isActive(), is(request.isActive())); + assertThat(readRequest.getId(), is(request.getId())); + assertThat(readRequest.getSource(), is(request.getSource())); + assertThat(readRequest.xContentType(), is(XContentType.JSON)); + assertThat(readRequest.getVersion(), is(Versions.MATCH_ANY)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java new file mode 100644 index 0000000000000..d1b04ea55660d --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/ack/TransportAckWatchActionTests.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.actions.ack; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionSnapshot; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchResponse; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.execution.ExecutionService; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.junit.Before; + +import java.time.Clock; +import java.util.Collections; +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportAckWatchActionTests extends ESTestCase { + + private TransportAckWatchAction action; + private ExecutionService executionService; + private Client client; + + @Before + public void setupAction() { + TransportService transportService = mock(TransportService.class); + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + WatchParser watchParser = mock(WatchParser.class); + executionService = mock(ExecutionService.class); + client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + action = new TransportAckWatchAction(Settings.EMPTY, transportService, threadPool, + new ActionFilters(Collections.emptySet()), new IndexNameExpressionResolver(Settings.EMPTY), + Clock.systemUTC(), new XPackLicenseState(Settings.EMPTY), watchParser, executionService, client); + } + + public void testWatchNotFound() { + String watchId = "my_watch_id"; + doAnswer(invocation -> { + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new GetResponse(new GetResult(Watch.INDEX, Watch.DOC_TYPE, watchId, -1, false, + BytesArray.EMPTY, Collections.emptyMap()))); + return null; + }).when(client).get(anyObject(), anyObject()); + + AckWatchRequest ackWatchRequest = new AckWatchRequest(watchId); + PlainActionFuture listener = PlainActionFuture.newFuture(); + action.doExecute(ackWatchRequest, listener); + + ExecutionException exception = expectThrows(ExecutionException.class, listener::get); + ElasticsearchException e = (ElasticsearchException) exception.getCause(); + assertThat(e.getMessage(), is("Watch with id [" + watchId + "] does not exist")); + } + + public void testThatWatchCannotBeAckedWhileRunning() { + String watchId = "my_watch_id"; + WatchExecutionSnapshot snapshot = mock(WatchExecutionSnapshot.class); + when(snapshot.watchId()).thenReturn(watchId); + when(executionService.currentExecutions()).thenReturn(Collections.singletonList(snapshot)); + + AckWatchRequest ackWatchRequest = new AckWatchRequest(watchId); + PlainActionFuture listener = PlainActionFuture.newFuture(); + action.doExecute(ackWatchRequest, listener); + + ExecutionException exception = expectThrows(ExecutionException.class, listener::get); + ElasticsearchException e = (ElasticsearchException) exception.getCause(); + assertThat(e.getMessage(), is("watch[my_watch_id] is running currently, cannot ack until finished")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchActionTests.java new file mode 100644 index 0000000000000..ffefe3bab742d --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchActionTests.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.actions.put; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.Watcher; +import org.elasticsearch.xpack.watcher.test.WatchExecutionContextMockBuilder; +import org.elasticsearch.xpack.watcher.watch.WatchParser; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.util.Collections; +import java.util.Map; + +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class TransportPutWatchActionTests extends ESTestCase { + + private TransportPutWatchAction action; + private Watch watch = new WatchExecutionContextMockBuilder("_id").buildMock().watch(); + private ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + + @Before + public void setupAction() throws Exception { + ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + TransportService transportService = mock(TransportService.class); + + WatchParser parser = mock(WatchParser.class); + when(parser.parseWithSecrets(eq("_id"), eq(false), anyObject(), anyObject(), anyObject(), anyBoolean())).thenReturn(watch); + + Client client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + // mock an index response that calls the listener + doAnswer(invocation -> { + IndexRequest request = (IndexRequest) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; + + ShardId shardId = new ShardId(new Index(Watch.INDEX, "uuid"), 0); + listener.onResponse(new IndexResponse(shardId, request.type(), request.id(), 1, 1, 1, true)); + + return null; + }).when(client).execute(any(), any(), any()); + + action = new TransportPutWatchAction(Settings.EMPTY, transportService, threadPool, + new ActionFilters(Collections.emptySet()), new IndexNameExpressionResolver(Settings.EMPTY), new ClockMock(), + new XPackLicenseState(Settings.EMPTY), parser, client); + } + + public void testHeadersAreFilteredWhenPuttingWatches() throws Exception { + // set up threadcontext with some arbitrary info + String headerName = randomFrom(Watcher.HEADER_FILTERS); + threadContext.putHeader(headerName, randomAlphaOfLength(10)); + threadContext.putHeader(randomAlphaOfLength(10), "doesntmatter"); + + PutWatchRequest putWatchRequest = new PutWatchRequest(); + putWatchRequest.setId("_id"); + action.doExecute(putWatchRequest, ActionListener.wrap(r -> {}, e -> assertThat(e, is(nullValue())))); + + ArgumentCaptor captor = ArgumentCaptor.forClass(Map.class); + verify(watch.status()).setHeaders(captor.capture()); + Map capturedHeaders = captor.getValue(); + assertThat(capturedHeaders.keySet(), hasSize(1)); + assertThat(capturedHeaders, hasKey(headerName)); + } +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsActionTests.java new file mode 100644 index 0000000000000..94b356286daff --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/stats/TransportWatcherStatsActionTests.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.transport.actions.stats; + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.watcher.WatcherState; +import org.elasticsearch.xpack.core.watcher.common.stats.Counters; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsRequest; +import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsResponse; +import org.elasticsearch.xpack.watcher.WatcherService; +import org.elasticsearch.xpack.watcher.execution.ExecutionService; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TransportWatcherStatsActionTests extends ESTestCase { + + private TransportWatcherStatsAction action; + + @Before + public void setupTransportAction() { + TransportService transportService = mock(TransportService.class); + ThreadPool threadPool = mock(ThreadPool.class); + + ClusterService clusterService = mock(ClusterService.class); + DiscoveryNode discoveryNode = new DiscoveryNode("nodeId", buildNewFakeTransportAddress(), Version.CURRENT); + when(clusterService.localNode()).thenReturn(discoveryNode); + + ClusterName clusterName = new ClusterName("cluster_name"); + when(clusterService.getClusterName()).thenReturn(clusterName); + + ClusterState clusterState = mock(ClusterState.class); + when(clusterState.getMetaData()).thenReturn(MetaData.EMPTY_META_DATA); + when(clusterService.state()).thenReturn(clusterState); + + WatcherService watcherService = mock(WatcherService.class); + when(watcherService.state()).thenReturn(WatcherState.STARTED); + + ExecutionService executionService = mock(ExecutionService.class); + when(executionService.executionThreadPoolQueueSize()).thenReturn(100L); + when(executionService.executionThreadPoolMaxSize()).thenReturn(5L); + Counters firstExecutionCounters = new Counters(); + firstExecutionCounters.inc("spam.eggs", 1); + Counters secondExecutionCounters = new Counters(); + secondExecutionCounters.inc("whatever", 1); + secondExecutionCounters.inc("foo.bar.baz", 123); + when(executionService.executionTimes()).thenReturn(firstExecutionCounters, secondExecutionCounters); + + TriggerService triggerService = mock(TriggerService.class); + when(triggerService.count()).thenReturn(10L, 30L); + Counters firstTriggerServiceStats = new Counters(); + firstTriggerServiceStats.inc("foo.bar.baz", 1024); + Counters secondTriggerServiceStats = new Counters(); + secondTriggerServiceStats.inc("foo.bar.baz", 1024); + when(triggerService.stats()).thenReturn(firstTriggerServiceStats, secondTriggerServiceStats); + + action = new TransportWatcherStatsAction(Settings.EMPTY, transportService, + clusterService, threadPool, new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver(Settings.EMPTY), watcherService, executionService, triggerService); + } + + public void testWatcherStats() throws Exception { + WatcherStatsRequest request = new WatcherStatsRequest(); + request.includeStats(true); + WatcherStatsResponse.Node nodeResponse1 = action.nodeOperation(new WatcherStatsRequest.Node(request, "nodeId")); + WatcherStatsResponse.Node nodeResponse2 = action.nodeOperation(new WatcherStatsRequest.Node(request, "nodeId2")); + + WatcherStatsResponse response = action.newResponse(request, + Arrays.asList(nodeResponse1, nodeResponse2), Collections.emptyList()); + assertThat(response.getWatchesCount(), is(40L)); + + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + ObjectPath objectPath = ObjectPath.createFromXContent(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + assertThat(objectPath.evaluate("stats.0.stats.foo.bar.baz"), is(1024)); + assertThat(objectPath.evaluate("stats.1.stats.foo.bar.baz"), is(1147)); + assertThat(objectPath.evaluate("stats.0.stats.spam.eggs"), is(1)); + assertThat(objectPath.evaluate("stats.1.stats.whatever"), is(1)); + } + } +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java new file mode 100644 index 0000000000000..57fe40f67b4dd --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.watcher.common.stats.Counters; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleRegistry; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEvent; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.time.Clock; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * A mock scheduler to help with unit testing. Provide {@link ScheduleTriggerEngineMock#trigger} method to manually trigger + * jobCount. + */ +public class ScheduleTriggerEngineMock extends ScheduleTriggerEngine { + + private final Logger logger; + private final ConcurrentMap watches = new ConcurrentHashMap<>(); + + public ScheduleTriggerEngineMock(Settings settings, ScheduleRegistry scheduleRegistry, Clock clock) { + super(settings, scheduleRegistry, clock); + this.logger = Loggers.getLogger(ScheduleTriggerEngineMock.class, settings); + } + + @Override + public ScheduleTrigger parseTrigger(String context, XContentParser parser) throws IOException { + return new ScheduleTrigger(scheduleRegistry.parse(context, parser)); + } + + @Override + public ScheduleTriggerEvent parseTriggerEvent(TriggerService service, String watchId, String context, + XContentParser parser) throws IOException { + return ScheduleTriggerEvent.parse(parser, watchId, context, clock); + } + + @Override + public void start(Collection jobs) { + } + + @Override + public void stop() { + watches.clear(); + } + + @Override + public void add(Watch watch) { + logger.debug("adding watch [{}]", watch.id()); + watches.put(watch.id(), watch); + } + + @Override + public void pauseExecution() { + watches.clear(); + } + + @Override + public int getJobCount() { + return watches.size(); + } + + @Override + public boolean remove(String jobId) { + return watches.remove(jobId) != null; + } + + public void trigger(String jobName) { + trigger(jobName, 1, null); + } + + public void trigger(String jobName, int times) { + trigger(jobName, times, null); + } + + public void trigger(String jobName, int times, TimeValue interval) { + if (watches.containsKey(jobName) == false) { + logger.trace("not executing job [{}], not found", jobName); + return; + } + + for (int i = 0; i < times; i++) { + DateTime now = new DateTime(clock.millis()); + logger.debug("firing watch [{}] at [{}]", jobName, now); + ScheduleTriggerEvent event = new ScheduleTriggerEvent(jobName, now, now); + consumers.forEach(consumer -> consumer.accept(Collections.singletonList(event))); + if (interval != null) { + if (clock instanceof ClockMock) { + ((ClockMock) clock).fastForward(interval); + } else { + try { + Thread.sleep(interval.millis()); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + } + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/TriggerServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/TriggerServiceTests.java new file mode 100644 index 0000000000000..4aa34738cef66 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/TriggerServiceTests.java @@ -0,0 +1,188 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; +import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; +import org.elasticsearch.xpack.core.watcher.common.stats.Counters; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.trigger.Trigger; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.input.none.ExecutableNoneInput; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TriggerServiceTests extends ESTestCase { + + private static final String ENGINE_TYPE = "foo"; + private TriggerService service; + private Watch watch1; + private Watch watch2; + + @Before + public void setupTriggerService() { + TriggerEngine triggerEngine = mock(TriggerEngine.class); + when(triggerEngine.type()).thenReturn(ENGINE_TYPE); + service = new TriggerService(Settings.EMPTY, Collections.singleton(triggerEngine)); + + // simple watch, input and simple action + watch1 = createWatch("1"); + setMetadata(watch1); + setInput(watch1); + addAction(watch1, "my_action", null, null); + + watch2 = createWatch("2"); + setInput(watch2); + setCondition(watch2, "script"); + addAction(watch2, "my_action", "script", null); + } + + public void testStats() { + service.add(watch1); + + Counters stats = service.stats(); + assertThat(stats.size(), is(20L)); + assertThat(stats.get("watch.input.none.active"), is(1L)); + assertThat(stats.get("watch.input._all.active"), is(1L)); + assertThat(stats.get("watch.condition.always.active"), is(1L)); + assertThat(stats.get("watch.condition._all.active"), is(1L)); + assertThat(stats.get("watch.action.my_action.active"), is(1L)); + assertThat(stats.get("watch.action._all.active"), is(1L)); + assertThat(stats.get("watch.metadata.active"), is(1L)); + assertThat(stats.get("watch.metadata.total"), is(1L)); + assertThat(stats.get("count.active"), is(1L)); + assertThat(stats.get("count.total"), is(1L)); + assertThat(service.count(), is(1L)); + + service.add(watch2); + + stats = service.stats(); + assertThat(stats.size(), is(26L)); + assertThat(stats.get("watch.input.none.active"), is(2L)); + assertThat(stats.get("watch.input._all.active"), is(2L)); + assertThat(stats.get("watch.condition.script.active"), is(1L)); + assertThat(stats.get("watch.condition.always.active"), is(1L)); + assertThat(stats.get("watch.condition._all.active"), is(2L)); + assertThat(stats.get("watch.action.my_action.active"), is(2L)); + assertThat(stats.get("watch.action._all.active"), is(2L)); + assertThat(stats.get("watch.action.condition.script.active"), is(1L)); + assertThat(stats.get("watch.action.condition._all.active"), is(1L)); + assertThat(stats.get("watch.metadata.active"), is(1L)); + assertThat(stats.get("count.active"), is(2L)); + assertThat(service.count(), is(2L)); + + service.remove("1"); + stats = service.stats(); + assertThat(stats.size(), is(22L)); + assertThat(stats.get("count.active"), is(1L)); + assertThat(service.count(), is(1L)); + assertThat(stats.get("watch.input.none.active"), is(1L)); + assertThat(stats.get("watch.input._all.active"), is(1L)); + assertThat(stats.get("watch.condition.script.active"), is(1L)); + assertThat(stats.get("watch.condition._all.active"), is(1L)); + assertThat(stats.get("watch.action.my_action.active"), is(1L)); + assertThat(stats.get("watch.action._all.active"), is(1L)); + assertThat(stats.get("watch.action.condition.script.active"), is(1L)); + assertThat(stats.get("watch.action.condition._all.active"), is(1L)); + + service.remove("2"); + stats = service.stats(); + assertThat(stats.size(), is(6L)); + assertThat(stats.get("count.active"), is(0L)); + assertThat(stats.get("count.total"), is(0L)); + + } + + public void testCountOnPause() { + assertThat(service.count(), is(0L)); + service.add(watch2); + assertThat(service.count(), is(1L)); + service.add(watch1); + assertThat(service.count(), is(2L)); + service.pauseExecution(); + assertThat(service.count(), is(0L)); + } + + public void testCountOnStart() { + assertThat(service.count(), is(0L)); + service.start(Arrays.asList(watch1, watch2)); + assertThat(service.count(), is(2L)); + } + + public void testCountOnStop() { + assertThat(service.count(), is(0L)); + service.start(Arrays.asList(watch1, watch2)); + assertThat(service.count(), is(2L)); + service.stop(); + assertThat(service.count(), is(0L)); + } + + private Watch createWatch(String id) { + Watch watch = mock(Watch.class); + when(watch.id()).thenReturn(id); + Trigger trigger = mock(Trigger.class); + when(trigger.type()).thenReturn(ENGINE_TYPE); + when(watch.trigger()).thenReturn(trigger); + when(watch.condition()).thenReturn(InternalAlwaysCondition.INSTANCE); + return watch; + } + + private void setInput(Watch watch) { + ExecutableNoneInput noneInput = new ExecutableNoneInput(logger); + when(watch.input()).thenReturn(noneInput); + } + + private void setMetadata(Watch watch) { + Map metadata = Collections.singletonMap("foo", "bar"); + when(watch.metadata()).thenReturn(metadata); + } + + private void setCondition(Watch watch, String type) { + ExecutableCondition condition = mock(ExecutableCondition.class); + when(condition.type()).thenReturn(type); + when(watch.condition()).thenReturn(condition); + } + + private void addAction(Watch watch, String type, String condition, String transform) { + List actions = watch.actions(); + ArrayList newActions = new ArrayList<>(actions); + ActionWrapper actionWrapper = mock(ActionWrapper.class); + ExecutableAction executableAction = mock(ExecutableAction.class); + when(executableAction.type()).thenReturn(type); + if (condition != null) { + ExecutableCondition executableCondition = mock(ExecutableCondition.class); + when(executableCondition.type()).thenReturn(condition); + when(actionWrapper.condition()).thenReturn(executableCondition); + } + if (transform != null) { + ExecutableTransform executableTransform = mock(ExecutableTransform.class); + when(executableTransform.type()).thenReturn(transform); + when(actionWrapper.transform()).thenReturn(executableTransform); + } + when(actionWrapper.action()).thenReturn(executableAction); + newActions.add(actionWrapper); + when(watch.actions()).thenReturn(newActions); + } + + private void setTransform(Watch watch, String type) { + ExecutableTransform transform = mock(ExecutableTransform.class); + when(transform.type()).thenReturn(type); + when(watch.transform()).thenReturn(transform); + } +} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java new file mode 100644 index 0000000000000..1ade767410b53 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/CronScheduleTests.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.hasItemInArray; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class CronScheduleTests extends ScheduleTestCase { + public void testInvalid() throws Exception { + try { + new CronSchedule("0 * * *"); + fail("expecting a validation error to be thrown when creating a cron schedule with invalid cron expression"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), is("invalid cron expression [0 * * *]")); + } + } + + public void testParseSingle() throws Exception { + XContentBuilder builder = jsonBuilder().value("0 0/5 * * * ?"); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + CronSchedule schedule = new CronSchedule.Parser().parse(parser); + assertThat(schedule.crons(), arrayWithSize(1)); + assertThat(schedule.crons()[0].expression(), is("0 0/5 * * * ?")); + } + + public void testParseMultiple() throws Exception { + XContentBuilder builder = jsonBuilder().value(new String[] { + "0 0/1 * * * ?", + "0 0/2 * * * ?", + "0 0/3 * * * ?" + }); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + CronSchedule schedule = new CronSchedule.Parser().parse(parser); + String[] crons = expressions(schedule); + assertThat(crons, arrayWithSize(3)); + assertThat(crons, hasItemInArray("0 0/1 * * * ?")); + assertThat(crons, hasItemInArray("0 0/2 * * * ?")); + assertThat(crons, hasItemInArray("0 0/3 * * * ?")); + } + + public void testParseInvalidBadExpression() throws Exception { + XContentBuilder builder = jsonBuilder().value("0 0/5 * * ?"); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + try { + new CronSchedule.Parser().parse(parser); + fail("expected cron parsing to fail when using invalid cron expression"); + } catch (ElasticsearchParseException pe) { + // expected + assertThat(pe.getCause(), instanceOf(IllegalArgumentException.class)); + } + } + + public void testParseInvalidEmpty() throws Exception { + XContentBuilder builder = jsonBuilder(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + try { + new CronSchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [cron] schedule. expected either a cron string value or an array of cron " + + "string values, but found [null]")); + } + } + + public void testParseInvalidObject() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + try { + new CronSchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [cron] schedule. expected either a cron string value or an array of cron " + + "string values, but found [START_OBJECT]")); + } + } + + public void testParseInvalidEmptyArray() throws Exception { + XContentBuilder builder = jsonBuilder().value(new String[0]); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + try { + new CronSchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [cron] schedule. no cron expression found in cron array")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/DailyScheduleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/DailyScheduleTests.java new file mode 100644 index 0000000000000..85c7bcb8b1384 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/DailyScheduleTests.java @@ -0,0 +1,209 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.DayTimes; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.support.Strings.join; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.hasItemInArray; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class DailyScheduleTests extends ScheduleTestCase { + public void testDefault() throws Exception { + DailySchedule schedule = new DailySchedule(); + String[] crons = expressions(schedule.crons()); + assertThat(crons, arrayWithSize(1)); + assertThat(crons, arrayContaining("0 0 0 * * ?")); + } + + public void testSingleTime() throws Exception { + DayTimes time = validDayTime(); + DailySchedule schedule = new DailySchedule(time); + String[] crons = expressions(schedule); + assertThat(crons, arrayWithSize(1)); + assertThat(crons, arrayContaining("0 " + join(",", time.minute()) + " " + join(",", time.hour()) + " * * ?")); + } + + public void testSingleTimeInvalid() throws Exception { + HourAndMinute ham = invalidDayTime(); + try { + new DayTimes(ham.hour, ham.minute); + fail("expected an illegal argument exception on invalid time input"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("invalid time [")); + assertThat(e.getMessage(), either(containsString("invalid time hour value")).or(containsString("invalid time minute value"))); + } + } + + public void testMultipleTimes() throws Exception { + DayTimes[] times = validDayTimes(); + DailySchedule schedule = new DailySchedule(times); + String[] crons = expressions(schedule); + assertThat(crons, arrayWithSize(times.length)); + for (DayTimes time : times) { + assertThat(crons, hasItemInArray("0 " + join(",", time.minute()) + " " + join(",", time.hour()) + " * * ?")); + } + } + + public void testParserEmpty() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + DailySchedule schedule = new DailySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(1)); + assertThat(schedule.times()[0], is(new DayTimes(0, 0))); + } + + public void testParserSingleTimeObject() throws Exception { + DayTimes time = validDayTime(); + XContentBuilder builder = jsonBuilder() + .startObject() + .startObject("at") + .array("hour", time.hour()) + .array("minute", time.minute()) + .endObject() + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + DailySchedule schedule = new DailySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(1)); + assertThat(schedule.times()[0], is(time)); + } + + public void testParserSingleTimeObjectInvalid() throws Exception { + HourAndMinute time = invalidDayTime(); + XContentBuilder builder = jsonBuilder() + .startObject() + .startObject("at") + .field("hour", time.hour) + .field("minute", time.minute) + .endObject() + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new DailySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [daily] schedule. invalid time value for field [at] - [START_OBJECT]")); + } + } + + public void testParserSingleTimeString() throws Exception { + String timeStr = validDayTimeStr(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("at", timeStr) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + DailySchedule schedule = new DailySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(1)); + assertThat(schedule.times()[0], is(DayTimes.parse(timeStr))); + } + + public void testParserSingleTimeStringInvalid() throws Exception { + XContentBuilder builder = jsonBuilder() + .startObject() + .field("at", invalidDayTimeStr()) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new DailySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [daily] schedule. invalid time value for field [at] - [VALUE_STRING]")); + } + } + + public void testParserMultipleTimesObjects() throws Exception { + DayTimes[] times = validDayTimesFromNumbers(); + XContentBuilder builder = jsonBuilder() + .startObject() + .array("at", (Object[]) times) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + DailySchedule schedule = new DailySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(times.length)); + for (int i = 0; i < times.length; i++) { + assertThat(schedule.times(), hasItemInArray(times[i])); + } + } + + public void testParserMultipleTimesObjectsInvalid() throws Exception { + HourAndMinute[] times = invalidDayTimes(); + XContentBuilder builder = jsonBuilder() + .startObject() + .array("at", (Object[]) times) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new DailySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [daily] schedule. invalid time value for field [at] - [START_OBJECT]")); + } + } + + public void testParserMultipleTimesStrings() throws Exception { + DayTimes[] times = validDayTimesFromStrings(); + XContentBuilder builder = jsonBuilder() + .startObject() + .array("at", (Object[]) times) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + DailySchedule schedule = new DailySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(times.length)); + for (int i = 0; i < times.length; i++) { + assertThat(schedule.times(), hasItemInArray(times[i])); + } + } + + public void testParserMultipleTimesStringsInvalid() throws Exception { + String[] times = invalidDayTimesAsStrings(); + XContentBuilder builder = jsonBuilder() + .startObject() + .array("at", times) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new DailySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [daily] schedule. invalid time value for field [at] - [VALUE_STRING]")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/HourlyScheduleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/HourlyScheduleTests.java new file mode 100644 index 0000000000000..4683392beb043 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/HourlyScheduleTests.java @@ -0,0 +1,216 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.watcher.support.Strings; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class HourlyScheduleTests extends ScheduleTestCase { + public void testDefault() throws Exception { + HourlySchedule schedule = new HourlySchedule(); + String[] crons = expressions(schedule); + assertThat(crons, arrayWithSize(1)); + assertThat(crons, arrayContaining("0 0 * * * ?")); + } + + public void testSingleMinute() throws Exception { + int minute = validMinute(); + HourlySchedule schedule = new HourlySchedule(minute); + String[] crons = expressions(schedule); + assertThat(crons, arrayWithSize(1)); + assertThat(crons, arrayContaining("0 " + minute + " * * * ?")); + } + + public void testSingleMinuteInvalid() throws Exception { + try { + new HourlySchedule(invalidMinute()); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("invalid hourly minute")); + assertThat(e.getMessage(), containsString("minute must be between 0 and 59 incl.")); + } + } + + public void testMultipleMinutes() throws Exception { + int[] minutes = validMinutes(); + String minutesStr = Strings.join(",", minutes); + HourlySchedule schedule = new HourlySchedule(minutes); + String[] crons = expressions(schedule); + assertThat(crons, arrayWithSize(1)); + assertThat(crons, arrayContaining("0 " + minutesStr + " * * * ?")); + } + + public void testMultipleMinutesInvalid() throws Exception { + int[] minutes = invalidMinutes(); + try { + new HourlySchedule(minutes); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("invalid hourly minute")); + assertThat(e.getMessage(), containsString("minute must be between 0 and 59 incl.")); + } + } + + public void testParserEmpty() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + HourlySchedule schedule = new HourlySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.minutes().length, is(1)); + assertThat(schedule.minutes()[0], is(0)); + } + + public void testParserSingleMinuteNumber() throws Exception { + int minute = validMinute(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("minute", minute) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + HourlySchedule schedule = new HourlySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.minutes().length, is(1)); + assertThat(schedule.minutes()[0], is(minute)); + } + + public void testParserSingleMinuteNumberInvalid() throws Exception { + XContentBuilder builder = jsonBuilder() + .startObject() + .field("minute", invalidMinute()) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new HourlySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [hourly] schedule. invalid value for [minute]")); + } + } + + public void testParserSingleMinuteString() throws Exception { + int minute = validMinute(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("minute", String.valueOf(minute)) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + HourlySchedule schedule = new HourlySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.minutes().length, is(1)); + assertThat(schedule.minutes()[0], is(minute)); + } + + public void testParserSingleMinuteStringInvalid() throws Exception { + XContentBuilder builder = jsonBuilder() + .startObject() + .field("minute", String.valueOf(invalidMinute())) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new HourlySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [hourly] schedule. invalid value for [minute]")); + } + } + + public void testParserMultipleMinutesNumbers() throws Exception { + int[] minutes = validMinutes(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("minute", minutes) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + HourlySchedule schedule = new HourlySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.minutes().length, is(minutes.length)); + List ints = Arrays.stream(schedule.minutes()).mapToObj(Integer::valueOf).collect(Collectors.toList()); + for (int i = 0; i < minutes.length; i++) { + assertThat(ints, hasItem(minutes[i])); + } + } + + public void testParserMultipleMinutesNumbersInvalid() throws Exception { + int[] minutes = invalidMinutes(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("minute", minutes) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new HourlySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [hourly] schedule. invalid value for [minute]")); + } + } + + public void testParserMultipleMinutesStrings() throws Exception { + int[] minutes = validMinutes(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("minute", Arrays.stream(minutes).mapToObj(Integer::toString).collect(Collectors.toList())) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + HourlySchedule schedule = new HourlySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.minutes().length, is(minutes.length)); + + List ints = Arrays.stream(schedule.minutes()).mapToObj(Integer::valueOf).collect(Collectors.toList()); + for (int i = 0; i < minutes.length; i++) { + assertThat(ints, hasItem(minutes[i])); + } + } + + public void testParserMultipleMinutesStringsInvalid() throws Exception { + int[] minutes = invalidMinutes(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("minute", Arrays.stream(minutes).mapToObj(Integer::toString).collect(Collectors.toList())) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new HourlySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [hourly] schedule. invalid value for [minute]")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/IntervalScheduleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/IntervalScheduleTests.java new file mode 100644 index 0000000000000..6067bd68dda7a --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/IntervalScheduleTests.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class IntervalScheduleTests extends ESTestCase { + public void testParseNumber() throws Exception { + long value = randomIntBetween(0, Integer.MAX_VALUE); + XContentBuilder builder = jsonBuilder().value(value); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + IntervalSchedule schedule = new IntervalSchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.interval().seconds(), is(value)); + } + + public void testParseNegativeNumber() throws Exception { + long value = randomIntBetween(Integer.MIN_VALUE, 0); + XContentBuilder builder = jsonBuilder().value(value); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new IntervalSchedule.Parser().parse(parser); + fail("exception expected, because interval is negative"); + } catch (ElasticsearchParseException e) { + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(e.getCause().getMessage(), containsString("interval can't be lower than 1000 ms, but")); + } + } + + public void testParseString() throws Exception { + IntervalSchedule.Interval value = randomTimeInterval(); + XContentBuilder builder = jsonBuilder().value(value); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + IntervalSchedule schedule = new IntervalSchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.interval(), is(value)); + } + + public void testParseInvalidString() throws Exception { + XContentBuilder builder = jsonBuilder().value("43S"); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new IntervalSchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), containsString("unrecognized interval format [43S]")); + } + } + + public void testParseInvalidObject() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new IntervalSchedule.Parser().parse(parser); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), + containsString("expected either a numeric value (millis) or a string value representing time value")); + assertThat(e.getMessage(), containsString("found [START_OBJECT]")); + } + } + + private static IntervalSchedule.Interval randomTimeInterval() { + int randomSize = randomIntBetween(0, IntervalSchedule.Interval.Unit.values().length - 1); + IntervalSchedule.Interval.Unit unit = IntervalSchedule.Interval.Unit.values()[randomSize]; + return new IntervalSchedule.Interval(randomIntBetween(1, 100), unit); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/MonthlyScheduleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/MonthlyScheduleTests.java new file mode 100644 index 0000000000000..0e5f621e415ff --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/MonthlyScheduleTests.java @@ -0,0 +1,152 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.DayTimes; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.MonthTimes; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.support.Strings.join; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.hasItemInArray; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class MonthlyScheduleTests extends ScheduleTestCase { + public void testDefault() throws Exception { + MonthlySchedule schedule = new MonthlySchedule(); + String[] crons = expressions(schedule); + assertThat(crons, arrayWithSize(1)); + assertThat(crons, arrayContaining("0 0 0 1 * ?")); + } + + public void testSingleTime() throws Exception { + MonthTimes time = validMonthTime(); + MonthlySchedule schedule = new MonthlySchedule(time); + String[] crons = expressions(schedule); + assertThat(crons, arrayWithSize(time.times().length)); + for (DayTimes dayTimes : time.times()) { + String minStr = join(",", dayTimes.minute()); + String hrStr = join(",", dayTimes.hour()); + String dayStr = join(",", time.days()); + dayStr = dayStr.replace("32", "L"); + assertThat(crons, hasItemInArray("0 " + minStr + " " + hrStr + " " + dayStr + " * ?")); + } + } + + public void testMultipleTimes() throws Exception { + MonthTimes[] times = validMonthTimes(); + MonthlySchedule schedule = new MonthlySchedule(times); + String[] crons = expressions(schedule); + int count = 0; + for (MonthTimes time : times) { + count += time.times().length; + } + assertThat(crons, arrayWithSize(count)); + for (MonthTimes monthTimes : times) { + for (DayTimes dayTimes : monthTimes.times()) { + String minStr = join(",", dayTimes.minute()); + String hrStr = join(",", dayTimes.hour()); + String dayStr = join(",", monthTimes.days()); + dayStr = dayStr.replace("32", "L"); + assertThat(crons, hasItemInArray("0 " + minStr + " " + hrStr + " " + dayStr + " * ?")); + } + } + } + + public void testParserEmpty() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + MonthlySchedule schedule = new MonthlySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(1)); + assertThat(schedule.times()[0], is(new MonthTimes())); + } + + public void testParserSingleTime() throws Exception { + DayTimes time = validDayTime(); + Object day = randomDayOfMonth(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("on", day) + .startObject("at") + .array("hour", time.hour()) + .array("minute", time.minute()) + .endObject() + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + MonthlySchedule schedule = new MonthlySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(1)); + assertThat(schedule.times()[0].days().length, is(1)); + assertThat(schedule.times()[0].days()[0], is(dayOfMonthToInt(day))); + assertThat(schedule.times()[0].times(), arrayWithSize(1)); + assertThat(schedule.times()[0].times(), hasItemInArray(time)); + } + + public void testParserSingleTimeInvalid() throws Exception { + HourAndMinute time = invalidDayTime(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("on", randomBoolean() ? invalidDayOfMonth() : randomDayOfMonth()) + .startObject("at") + .field("hour", time.hour) + .field("minute", time.minute) + .endObject() + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new MonthlySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [monthly] schedule. invalid month times")); + } + } + + public void testParserMultipleTimes() throws Exception { + MonthTimes[] times = validMonthTimes(); + XContentBuilder builder = jsonBuilder().value(times); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + MonthlySchedule schedule = new MonthlySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(times.length)); + for (MonthTimes time : times) { + assertThat(schedule.times(), hasItemInArray(time)); + } + } + + public void testParserMultipleTimesInvalid() throws Exception { + HourAndMinute[] times = invalidDayTimes(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("on", randomDayOfMonth()) + .array("at", (Object[]) times) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new MonthlySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [monthly] schedule. invalid month times")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistryTests.java new file mode 100644 index 0000000000000..2bd6fc2d41aa9 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleRegistryTests.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.junit.Before; + +import java.util.HashSet; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class ScheduleRegistryTests extends ScheduleTestCase { + private ScheduleRegistry registry; + + @Before + public void init() throws Exception { + Set parsers = new HashSet<>(); + parsers.add(new IntervalSchedule.Parser()); + parsers.add(new CronSchedule.Parser()); + parsers.add(new HourlySchedule.Parser()); + parsers.add(new DailySchedule.Parser()); + parsers.add(new WeeklySchedule.Parser()); + parsers.add(new MonthlySchedule.Parser()); + registry = new ScheduleRegistry(parsers); + } + + public void testParserInterval() throws Exception { + IntervalSchedule interval = randomIntervalSchedule(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field(IntervalSchedule.TYPE, interval) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + Schedule schedule = registry.parse("ctx", parser); + assertThat(schedule, notNullValue()); + assertThat(schedule, instanceOf(IntervalSchedule.class)); + assertThat((IntervalSchedule) schedule, is(interval)); + } + + public void testParseCron() throws Exception { + Object cron = randomBoolean() ? + Schedules.cron("* 0/5 * * * ?") : + Schedules.cron("* 0/2 * * * ?", "* 0/3 * * * ?", "* 0/5 * * * ?"); + XContentBuilder builder = jsonBuilder() + .startObject() + .field(CronSchedule.TYPE, cron) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + Schedule schedule = registry.parse("ctx", parser); + assertThat(schedule, notNullValue()); + assertThat(schedule, instanceOf(CronSchedule.class)); + assertThat(schedule, is(cron)); + } + + public void testParseHourly() throws Exception { + HourlySchedule hourly = randomHourlySchedule(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field(HourlySchedule.TYPE, hourly) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + Schedule schedule = registry.parse("ctx", parser); + assertThat(schedule, notNullValue()); + assertThat(schedule, instanceOf(HourlySchedule.class)); + assertThat((HourlySchedule) schedule, equalTo(hourly)); + } + + public void testParseDaily() throws Exception { + DailySchedule daily = randomDailySchedule(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field(DailySchedule.TYPE, daily) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + Schedule schedule = registry.parse("ctx", parser); + assertThat(schedule, notNullValue()); + assertThat(schedule, instanceOf(DailySchedule.class)); + assertThat((DailySchedule) schedule, equalTo(daily)); + } + + public void testParseWeekly() throws Exception { + WeeklySchedule weekly = randomWeeklySchedule(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field(WeeklySchedule.TYPE, weekly) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + Schedule schedule = registry.parse("ctx", parser); + assertThat(schedule, notNullValue()); + assertThat(schedule, instanceOf(WeeklySchedule.class)); + assertThat((WeeklySchedule) schedule, equalTo(weekly)); + } + + public void testParseMonthly() throws Exception { + MonthlySchedule monthly = randomMonthlySchedule(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field(MonthlySchedule.TYPE, monthly) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); + Schedule schedule = registry.parse("ctx", parser); + assertThat(schedule, notNullValue()); + assertThat(schedule, instanceOf(MonthlySchedule.class)); + assertThat((MonthlySchedule) schedule, equalTo(monthly)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTestCase.java new file mode 100644 index 0000000000000..525461038e630 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTestCase.java @@ -0,0 +1,400 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.scheduler.Cron; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.DayOfWeek; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.DayTimes; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.Month; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.MonthTimes; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.WeekTimes; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.YearTimes; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.Set; + +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.daily; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.hourly; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.monthly; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.weekly; + +public abstract class ScheduleTestCase extends ESTestCase { + + protected static String[] expressions(CronnableSchedule schedule) { + return expressions(schedule.crons); + } + + protected static String[] expressions(Cron[] crons) { + String[] expressions = new String[crons.length]; + for (int i = 0; i < expressions.length; i++) { + expressions[i] = crons[i].expression(); + } + return expressions; + } + + protected static MonthlySchedule randomMonthlySchedule() { + switch (randomIntBetween(1, 4)) { + case 1: return monthly().build(); + case 2: return monthly().time(MonthTimes.builder().atMidnight()).build(); + case 3: return monthly().time(MonthTimes.builder().on(randomIntBetween(1, 31)).atMidnight()).build(); + default: return new MonthlySchedule(validMonthTimes()); + } + } + + protected static WeeklySchedule randomWeeklySchedule() { + switch (randomIntBetween(1, 4)) { + case 1: return weekly().build(); + case 2: return weekly().time(WeekTimes.builder().atMidnight()).build(); + case 3: return weekly().time(WeekTimes.builder().on(DayOfWeek.THURSDAY).atMidnight()).build(); + default: return new WeeklySchedule(validWeekTimes()); + } + } + + protected static DailySchedule randomDailySchedule() { + switch (randomIntBetween(1, 4)) { + case 1: return daily().build(); + case 2: return daily().atMidnight().build(); + case 3: return daily().atNoon().build(); + default: return new DailySchedule(validDayTimes()); + } + } + + protected static HourlySchedule randomHourlySchedule() { + switch (randomIntBetween(1, 4)) { + case 1: return hourly().build(); + case 2: return hourly().minutes(randomIntBetween(0, 59)).build(); + case 3: return hourly(randomIntBetween(0, 59)); + default: return hourly().minutes(validMinutes()).build(); + } + } + + protected static IntervalSchedule randomIntervalSchedule() { + switch (randomIntBetween(1, 3)) { + case 1: return interval(randomInterval().toString()); + case 2: return interval(randomIntBetween(1, 100), randomIntervalUnit()); + default: return new IntervalSchedule(randomInterval()); + } + } + + protected static IntervalSchedule.Interval randomInterval() { + return new IntervalSchedule.Interval(randomIntBetween(1, 100), randomIntervalUnit()); + } + + protected static IntervalSchedule.Interval.Unit randomIntervalUnit() { + return IntervalSchedule.Interval.Unit.values()[randomIntBetween(0, IntervalSchedule.Interval.Unit.values().length - 1)]; + } + + protected static YearTimes validYearTime() { + return new YearTimes(randomMonths(), randomDaysOfMonth(), validDayTimes()); + } + + protected static YearTimes[] validYearTimes() { + int count = randomIntBetween(2, 5); + Set times = new HashSet<>(); + for (int i = 0; i < count; i++) { + times.add(validYearTime()); + } + return times.toArray(new YearTimes[times.size()]); + } + + protected static MonthTimes validMonthTime() { + return new MonthTimes(randomDaysOfMonth(), validDayTimes()); + } + + protected static MonthTimes[] validMonthTimes() { + int count = randomIntBetween(2, 5); + Set times = new HashSet<>(); + for (int i = 0; i < count; i++) { + MonthTimes testMonthTimes = validMonthTime(); + boolean intersectsExistingMonthTimes = false; + for (MonthTimes validMonthTimes : times) { + if (validMonthTimes.intersects(testMonthTimes)) { + intersectsExistingMonthTimes = true; + } + } + if (!intersectsExistingMonthTimes) { + times.add(testMonthTimes); + } + } + return times.toArray(new MonthTimes[times.size()]); + } + + protected static WeekTimes validWeekTime() { + return new WeekTimes(randomDaysOfWeek(), validDayTimes()); + } + + protected static WeekTimes[] validWeekTimes() { + int count = randomIntBetween(2, 5); + Set times = new HashSet<>(); + for (int i = 0; i < count; i++) { + times.add(validWeekTime()); + } + return times.toArray(new WeekTimes[times.size()]); + } + + protected static EnumSet randomDaysOfWeek() { + int count = randomIntBetween(1, DayOfWeek.values().length-1); + Set days = new HashSet<>(); + for (int i = 0; i < count; i++) { + days.add(DayOfWeek.values()[randomIntBetween(0, count)]); + } + return EnumSet.copyOf(days); + } + + protected static EnumSet randomMonths() { + int count = randomIntBetween(1, 11); + Set months = new HashSet<>(); + for (int i = 0; i < count; i++) { + months.add(Month.values()[randomIntBetween(0, 11)]); + } + return EnumSet.copyOf(months); + } + + protected static Object randomMonth() { + int m = randomIntBetween(1, 14); + switch (m) { + case 13: + return "first"; + case 14: + return "last"; + default: + return Month.resolve(m); + } + } + + protected static int[] randomDaysOfMonth() { + if (rarely()) { + return new int[] { 32 }; + } + int count = randomIntBetween(1, 5); + Set days = new HashSet<>(); + for (int i = 0; i < count; i++) { + days.add(randomIntBetween(1, 31)); + } + return CollectionUtils.toArray(days); + } + + protected static Object randomDayOfMonth() { + int day = randomIntBetween(1, 32); + if (day == 32) { + return "last_day"; + } + if (day == 1) { + return randomBoolean() ? "first_day" : 1; + } + return day; + } + + protected static int dayOfMonthToInt(Object dom) { + if (dom instanceof Integer) { + return (Integer) dom; + } + if ("last_day".equals(dom)) { + return 32; + } + if ("first_day".equals(dom)) { + return 1; + } + throw new IllegalStateException("cannot convert given day-of-month [" + dom + "] to int"); + } + + protected static Object invalidDayOfMonth() { + return randomBoolean() ? + randomAlphaOfLength(5) : + randomBoolean() ? randomIntBetween(-30, -1) : randomIntBetween(33, 45); + } + + protected static DayTimes validDayTime() { + return randomBoolean() ? DayTimes.parse(validDayTimeStr()) : new DayTimes(validHours(), validMinutes()); + } + + protected static String validDayTimeStr() { + int hour = validHour(); + int min = validMinute(); + StringBuilder sb = new StringBuilder(); + if (hour < 10 && randomBoolean()) { + sb.append("0"); + } + sb.append(hour).append(":"); + if (min < 10) { + sb.append("0"); + } + return sb.append(min).toString(); + } + + protected static HourAndMinute invalidDayTime() { + return randomBoolean() ? + new HourAndMinute(invalidHour(), invalidMinute()) : + randomBoolean() ? + new HourAndMinute(validHour(), invalidMinute()) : + new HourAndMinute(invalidHour(), validMinute()); + } + + protected static String invalidDayTimeStr() { + int hour; + int min; + switch (randomIntBetween(1, 3)) { + case 1: + hour = invalidHour(); + min = validMinute(); + break; + case 2: + hour = validHour(); + min = invalidMinute(); + break; + default: + hour = invalidHour(); + min = invalidMinute(); + } + + StringBuilder sb = new StringBuilder(); + if (hour < 10 && randomBoolean()) { + sb.append("0"); + } + sb.append(hour).append(":"); + if (min < 10) { + sb.append("0"); + } + return sb.append(min).toString(); + } + + protected static DayTimes[] validDayTimes() { + int count = randomIntBetween(2, 5); + Set times = new HashSet<>(); + for (int i = 0; i < count; i++) { + times.add(validDayTime()); + } + return times.toArray(new DayTimes[times.size()]); + } + + protected static DayTimes[] validDayTimesFromNumbers() { + int count = randomIntBetween(2, 5); + Set times = new HashSet<>(); + for (int i = 0; i < count; i++) { + times.add(new DayTimes(validHours(), validMinutes())); + } + return times.toArray(new DayTimes[times.size()]); + } + + protected static DayTimes[] validDayTimesFromStrings() { + int count = randomIntBetween(2, 5); + Set times = new HashSet<>(); + for (int i = 0; i < count; i++) { + times.add(DayTimes.parse(validDayTimeStr())); + } + return times.toArray(new DayTimes[times.size()]); + } + + protected static HourAndMinute[] invalidDayTimes() { + int count = randomIntBetween(2, 5); + Set times = new HashSet<>(); + for (int i = 0; i < count; i++) { + times.add(invalidDayTime()); + } + return times.toArray(new HourAndMinute[times.size()]); + } + + protected static String[] invalidDayTimesAsStrings() { + int count = randomIntBetween(2, 5); + Set times = new HashSet<>(); + for (int i = 0; i < count; i++) { + times.add(invalidDayTimeStr()); + } + return times.toArray(new String[times.size()]); + } + + protected static int validMinute() { + return randomIntBetween(0, 59); + } + + protected static int[] validMinutes() { + int count = randomIntBetween(2, 6); + int inc = 59 / count; + int[] minutes = new int[count]; + for (int i = 0; i < count; i++) { + minutes[i] = randomIntBetween(i * inc, (i + 1) * inc); + } + return minutes; + } + + protected static int invalidMinute() { + return randomBoolean() ? randomIntBetween(60, 100) : randomIntBetween(-60, -1); + } + + protected static int[] invalidMinutes() { + int count = randomIntBetween(2, 6); + int[] minutes = new int[count]; + for (int i = 0; i < count; i++) { + minutes[i] = invalidMinute(); + } + return minutes; + } + + protected static int validHour() { + return randomIntBetween(0, 23); + } + + protected static int[] validHours() { + int count = randomIntBetween(2, 6); + int inc = 23 / count; + int[] hours = new int[count]; + for (int i = 0; i < count; i++) { + hours[i] = randomIntBetween(i * inc, (i + 1) * inc); + } + return hours; + } + + protected static int invalidHour() { + return randomBoolean() ? randomIntBetween(24, 40) : randomIntBetween(-60, -1); + } + + static class HourAndMinute implements ToXContentObject { + + int hour; + int minute; + + HourAndMinute(int hour, int minute) { + this.hour = hour; + this.minute = minute; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(DayTimes.HOUR_FIELD.getPreferredName(), hour) + .field(DayTimes.MINUTE_FIELD.getPreferredName(), minute) + .endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + HourAndMinute that = (HourAndMinute) o; + + if (hour != that.hour) return false; + if (minute != that.minute) return false; + + return true; + } + + @Override + public int hashCode() { + int result = hour; + result = 31 * result + minute; + return result; + } + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEventTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEventTests.java new file mode 100644 index 0000000000000..822dd02bd1909 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/ScheduleTriggerEventTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; + +import java.time.Clock; + +import static org.hamcrest.Matchers.is; + +public class ScheduleTriggerEventTests extends ESTestCase { + public void testParserRandomDateMath() throws Exception { + String triggeredTime = randomFrom("now", "now+5m", "2015-05-07T22:24:41.254Z", "2015-05-07T22:24:41.254Z||-5m"); + String scheduledTime = randomFrom("now", "now-5m", "2015-05-07T22:24:41.254Z", "2015-05-07T22:24:41.254Z||+5h"); + XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); + jsonBuilder.startObject(); + jsonBuilder.field(ScheduleTriggerEvent.Field.SCHEDULED_TIME.getPreferredName(), scheduledTime); + jsonBuilder.field(ScheduleTriggerEvent.Field.TRIGGERED_TIME.getPreferredName(), triggeredTime); + jsonBuilder.endObject(); + + XContentParser parser = createParser(jsonBuilder); + parser.nextToken(); + + ScheduleTriggerEvent scheduleTriggerEvent = ScheduleTriggerEvent.parse(parser, "_id", "_context", Clock.systemUTC()); + assertThat(scheduleTriggerEvent.scheduledTime().isAfter(0), is(true)); + assertThat(scheduleTriggerEvent.triggeredTime().isAfter(0), is(true)); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/WeeklyScheduleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/WeeklyScheduleTests.java new file mode 100644 index 0000000000000..910ac2621a7a5 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/WeeklyScheduleTests.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.DayOfWeek; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.DayTimes; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.WeekTimes; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.support.Strings.join; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.hasItemInArray; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class WeeklyScheduleTests extends ScheduleTestCase { + public void testDefault() throws Exception { + WeeklySchedule schedule = new WeeklySchedule(); + String[] crons = expressions(schedule); + assertThat(crons, arrayWithSize(1)); + assertThat(crons, arrayContaining("0 0 0 ? * MON")); + } + + public void testSingleTime() throws Exception { + WeekTimes time = validWeekTime(); + WeeklySchedule schedule = new WeeklySchedule(time); + String[] crons = expressions(schedule); + assertThat(crons, arrayWithSize(time.times().length)); + for (DayTimes dayTimes : time.times()) { + assertThat(crons, hasItemInArray("0 " + join(",", dayTimes.minute()) + " " + join(",", dayTimes.hour()) + " ? * " + + Strings.collectionToCommaDelimitedString(time.days()))); + } + } + + public void testMultipleTimes() throws Exception { + WeekTimes[] times = validWeekTimes(); + WeeklySchedule schedule = new WeeklySchedule(times); + String[] crons = expressions(schedule); + int count = 0; + for (int i = 0; i < times.length; i++) { + count += times[i].times().length; + } + assertThat(crons, arrayWithSize(count)); + for (WeekTimes weekTimes : times) { + for (DayTimes dayTimes : weekTimes.times()) { + assertThat(crons, hasItemInArray("0 " + join(",", dayTimes.minute()) + " " + join(",", dayTimes.hour()) + " ? * " + + Strings.collectionToCommaDelimitedString(weekTimes.days()))); + } + } + } + + public void testParserEmpty() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + WeeklySchedule schedule = new WeeklySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(1)); + assertThat(schedule.times()[0], is(new WeekTimes(DayOfWeek.MONDAY, new DayTimes()))); + } + + public void testParserSingleTime() throws Exception { + DayTimes time = validDayTime(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("on", "mon") + .startObject("at") + .array("hour", time.hour()) + .array("minute", time.minute()) + .endObject() + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + WeeklySchedule schedule = new WeeklySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(1)); + assertThat(schedule.times()[0].days(), hasSize(1)); + assertThat(schedule.times()[0].days(), contains(DayOfWeek.MONDAY)); + assertThat(schedule.times()[0].times(), arrayWithSize(1)); + assertThat(schedule.times()[0].times(), hasItemInArray(time)); + } + + public void testParserSingleTimeInvalid() throws Exception { + HourAndMinute time = invalidDayTime(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("on", "mon") + .startObject("at") + .field("hour", time.hour) + .field("minute", time.minute) + .endObject() + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new WeeklySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [weekly] schedule. invalid weekly times")); + } + } + + public void testParserMultipleTimes() throws Exception { + WeekTimes[] times = validWeekTimes(); + XContentBuilder builder = jsonBuilder().value(times); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + WeeklySchedule schedule = new WeeklySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(times.length)); + for (int i = 0; i < times.length; i++) { + assertThat(schedule.times(), hasItemInArray(times[i])); + } + } + + public void testParserMultipleTimesObjectsInvalid() throws Exception { + HourAndMinute[] times = invalidDayTimes(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("on", randomDaysOfWeek()) + .array("at", (Object[]) times) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new WeeklySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [weekly] schedule. invalid weekly times")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/YearlyScheduleTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/YearlyScheduleTests.java new file mode 100644 index 0000000000000..ae9c956d85a73 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/YearlyScheduleTests.java @@ -0,0 +1,162 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule; + + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.DayTimes; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.YearTimes; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.support.Strings.join; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.hasItemInArray; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class YearlyScheduleTests extends ScheduleTestCase { + public void testDefault() throws Exception { + YearlySchedule schedule = new YearlySchedule(); + String[] crons = expressions(schedule); + assertThat(crons, arrayWithSize(1)); + assertThat(crons, arrayContaining("0 0 0 1 JAN ?")); + } + + public void testSingleTime() throws Exception { + YearTimes time = validYearTime(); + YearlySchedule schedule = new YearlySchedule(time); + String[] crons = expressions(schedule); + assertThat(crons, arrayWithSize(time.times().length)); + for (DayTimes dayTimes : time.times()) { + String minStr = join(",", dayTimes.minute()); + String hrStr = join(",", dayTimes.hour()); + String dayStr = join(",", time.days()); + dayStr = dayStr.replace("32", "L"); + String monthStr = Strings.collectionToCommaDelimitedString(time.months()); + String expression = "0 " + minStr + " " + hrStr + " " + dayStr + " " + monthStr + " ?"; + logger.info("expression: {}", expression); + assertThat(crons, hasItemInArray(expression)); + } + } + + public void testMultipleTimes() throws Exception { + YearTimes[] times = validYearTimes(); + YearlySchedule schedule = new YearlySchedule(times); + String[] crons = expressions(schedule); + int count = 0; + for (int i = 0; i < times.length; i++) { + count += times[i].times().length; + } + assertThat(crons, arrayWithSize(count)); + for (YearTimes yearTimes : times) { + for (DayTimes dayTimes : yearTimes.times()) { + String minStr = join(",", dayTimes.minute()); + String hrStr = join(",", dayTimes.hour()); + String dayStr = join(",", yearTimes.days()); + dayStr = dayStr.replace("32", "L"); + String monthStr = Strings.collectionToCommaDelimitedString(yearTimes.months()); + assertThat(crons, hasItemInArray("0 " + minStr + " " + hrStr + " " + dayStr + " " + monthStr + " ?")); + } + } + } + + public void testParserEmpty() throws Exception { + XContentBuilder builder = jsonBuilder().startObject().endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + YearlySchedule schedule = new YearlySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(1)); + assertThat(schedule.times()[0], is(new YearTimes())); + } + + public void testParserSingleTime() throws Exception { + DayTimes time = validDayTime(); + Object day = randomDayOfMonth(); + Object month = randomMonth(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("in", month) + .field("on", day) + .startObject("at") + .array("hour", time.hour()) + .array("minute", time.minute()) + .endObject() + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + YearlySchedule schedule = new YearlySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(1)); + assertThat(schedule.times()[0].days().length, is(1)); + assertThat(schedule.times()[0].days()[0], is(dayOfMonthToInt(day))); + assertThat(schedule.times()[0].times(), arrayWithSize(1)); + assertThat(schedule.times()[0].times(), hasItemInArray(time)); + } + + public void testParserSingleTimeInvalid() throws Exception { + HourAndMinute time = invalidDayTime(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("in", randomMonth()) + .field("on", randomBoolean() ? invalidDayOfMonth() : randomDayOfMonth()) + .startObject("at") + .field("hour", time.hour) + .field("minute", time.minute) + .endObject() + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new MonthlySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [monthly] schedule. invalid month times")); + } + } + + public void testParserMultipleTimes() throws Exception { + YearTimes[] times = validYearTimes(); + XContentBuilder builder = jsonBuilder().value(times); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + YearlySchedule schedule = new YearlySchedule.Parser().parse(parser); + assertThat(schedule, notNullValue()); + assertThat(schedule.times().length, is(times.length)); + for (YearTimes time : times) { + assertThat(schedule.times(), hasItemInArray(time)); + } + } + + public void testParserMultipleTimesInvalid() throws Exception { + HourAndMinute[] times = invalidDayTimes(); + XContentBuilder builder = jsonBuilder() + .startObject() + .field("in", randomMonth()) + .field("on", randomDayOfMonth()) + .array("at", (Object[]) times) + .endObject(); + BytesReference bytes = BytesReference.bytes(builder); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); + parser.nextToken(); // advancing to the start object + try { + new YearlySchedule.Parser().parse(parser); + fail("Expected ElasticsearchParseException"); + } catch (ElasticsearchParseException e) { + assertThat(e.getMessage(), is("could not parse [yearly] schedule. invalid year times")); + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java new file mode 100644 index 0000000000000..7949998867b48 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java @@ -0,0 +1,258 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule.engine; + +import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.input.none.ExecutableNoneInput; +import org.elasticsearch.xpack.watcher.trigger.TriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.schedule.Schedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleRegistry; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.DayOfWeek; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.WeekTimes; +import org.joda.time.DateTime; +import org.junit.After; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; + +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.daily; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.weekly; +import static org.hamcrest.Matchers.is; +import static org.joda.time.DateTimeZone.UTC; +import static org.mockito.Mockito.mock; + +public class TickerScheduleEngineTests extends ESTestCase { + + private TriggerEngine engine; + protected ClockMock clock = ClockMock.frozen(); + + @Before + public void init() throws Exception { + engine = createEngine(); + } + + private TriggerEngine createEngine() { + return new TickerScheduleTriggerEngine(Settings.EMPTY, + mock(ScheduleRegistry.class), clock); + } + + private void advanceClockIfNeeded(DateTime newCurrentDateTime) { + clock.setTime(newCurrentDateTime); + } + + @After + public void cleanup() throws Exception { + engine.stop(); + } + + public void testStart() throws Exception { + int count = randomIntBetween(2, 5); + final CountDownLatch firstLatch = new CountDownLatch(count); + final CountDownLatch secondLatch = new CountDownLatch(count); + List watches = new ArrayList<>(); + for (int i = 0; i < count; i++) { + watches.add(createWatch(String.valueOf(i), interval("1s"))); + } + final BitSet bits = new BitSet(count); + + engine.register(new Consumer>() { + @Override + public void accept(Iterable events) { + for (TriggerEvent event : events) { + int index = Integer.parseInt(event.jobName()); + if (!bits.get(index)) { + logger.info("job [{}] first fire", index); + bits.set(index); + firstLatch.countDown(); + } else { + logger.info("job [{}] second fire", index); + secondLatch.countDown(); + } + } + } + }); + + engine.start(watches); + advanceClockIfNeeded(new DateTime(clock.millis(), UTC).plusMillis(1100)); + if (!firstLatch.await(3 * count, TimeUnit.SECONDS)) { + fail("waiting too long for all watches to be triggered"); + } + + advanceClockIfNeeded(new DateTime(clock.millis(), UTC).plusMillis(1100)); + if (!secondLatch.await(3 * count, TimeUnit.SECONDS)) { + fail("waiting too long for all watches to be triggered"); + } + engine.stop(); + assertThat(bits.cardinality(), is(count)); + } + + public void testAddHourly() throws Exception { + final String name = "job_name"; + final CountDownLatch latch = new CountDownLatch(1); + engine.start(Collections.emptySet()); + engine.register(new Consumer>() { + @Override + public void accept(Iterable events) { + for (TriggerEvent event : events) { + assertThat(event.jobName(), is(name)); + logger.info("triggered job on [{}]", clock); + } + latch.countDown(); + } + }); + + int randomMinute = randomIntBetween(0, 59); + DateTime testNowTime = new DateTime(clock.millis(), UTC).withMinuteOfHour(randomMinute) + .withSecondOfMinute(59); + DateTime scheduledTime = testNowTime.plusSeconds(2); + logger.info("Setting current time to [{}], job execution time [{}]", testNowTime, + scheduledTime); + + clock.setTime(testNowTime); + engine.add(createWatch(name, daily().at(scheduledTime.getHourOfDay(), + scheduledTime.getMinuteOfHour()).build())); + advanceClockIfNeeded(scheduledTime); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("waiting too long for all watches to be triggered"); + } + } + + public void testAddDaily() throws Exception { + final String name = "job_name"; + final CountDownLatch latch = new CountDownLatch(1); + engine.start(Collections.emptySet()); + + engine.register(new Consumer>() { + @Override + public void accept(Iterable events) { + for (TriggerEvent event : events) { + assertThat(event.jobName(), is(name)); + logger.info("triggered job on [{}]", new DateTime(clock.millis(), UTC)); + latch.countDown(); + } + } + }); + + int randomHour = randomIntBetween(0, 23); + int randomMinute = randomIntBetween(0, 59); + + DateTime testNowTime = new DateTime(clock.millis(), UTC).withHourOfDay(randomHour) + .withMinuteOfHour(randomMinute).withSecondOfMinute(59); + DateTime scheduledTime = testNowTime.plusSeconds(2); + logger.info("Setting current time to [{}], job execution time [{}]", testNowTime, + scheduledTime); + + clock.setTime(testNowTime); + engine.add(createWatch(name, daily().at(scheduledTime.getHourOfDay(), + scheduledTime.getMinuteOfHour()).build())); + advanceClockIfNeeded(scheduledTime); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("waiting too long for all watches to be triggered"); + } + } + + public void testAddWeekly() throws Exception { + final String name = "job_name"; + final CountDownLatch latch = new CountDownLatch(1); + engine.start(Collections.emptySet()); + engine.register(new Consumer>() { + @Override + public void accept(Iterable events) { + for (TriggerEvent event : events) { + assertThat(event.jobName(), is(name)); + logger.info("triggered job"); + } + latch.countDown(); + } + }); + + int randomHour = randomIntBetween(0, 23); + int randomMinute = randomIntBetween(0, 59); + int randomDay = randomIntBetween(1, 7); + + DateTime testNowTime = new DateTime(clock.millis(), UTC).withDayOfWeek(randomDay) + .withHourOfDay(randomHour).withMinuteOfHour(randomMinute).withSecondOfMinute(59); + DateTime scheduledTime = testNowTime.plusSeconds(2); + + logger.info("Setting current time to [{}], job execution time [{}]", testNowTime, + scheduledTime); + clock.setTime(testNowTime); + + // fun part here (aka WTF): DayOfWeek with Joda is MON-SUN, starting at 1 + // DayOfWeek with Watcher is SUN-SAT, starting at 1 + int watcherDay = (scheduledTime.getDayOfWeek() % 7) + 1; + engine.add(createWatch(name, weekly().time(WeekTimes.builder() + .on(DayOfWeek.resolve(watcherDay)) + .at(scheduledTime.getHourOfDay(), scheduledTime.getMinuteOfHour()).build()) + .build())); + advanceClockIfNeeded(scheduledTime); + + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("waiting too long for all watches to be triggered"); + } + } + + public void testAddSameJobSeveralTimesAndExecutedOnce() throws InterruptedException { + engine.start(Collections.emptySet()); + + final CountDownLatch firstLatch = new CountDownLatch(1); + final CountDownLatch secondLatch = new CountDownLatch(1); + AtomicInteger counter = new AtomicInteger(0); + engine.register(new Consumer>() { + @Override + public void accept(Iterable events) { + events.forEach(event -> { + if (counter.getAndIncrement() == 0) { + firstLatch.countDown(); + } else { + secondLatch.countDown(); + } + }); + } + }); + + int times = scaledRandomIntBetween(3, 30); + for (int i = 0; i < times; i++) { + engine.add(createWatch("_id", interval("1s"))); + } + + advanceClockIfNeeded(new DateTime(clock.millis(), UTC).plusMillis(1100)); + if (!firstLatch.await(3, TimeUnit.SECONDS)) { + fail("waiting too long for all watches to be triggered"); + } + + advanceClockIfNeeded(new DateTime(clock.millis(), UTC).plusMillis(1100)); + if (!secondLatch.await(3, TimeUnit.SECONDS)) { + fail("waiting too long for all watches to be triggered"); + } + + // ensure job was only called twice independent from its name + assertThat(counter.get(), is(2)); + } + + private Watch createWatch(String name, Schedule schedule) { + return new Watch(name, new ScheduleTrigger(schedule), new ExecutableNoneInput(logger), + InternalAlwaysCondition.INSTANCE, null, null, + Collections.emptyList(), null, null, Versions.MATCH_ANY); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java new file mode 100644 index 0000000000000..2238842494817 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalToolTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.trigger.schedule.tool; + +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.CommandTestCase; + +public class CronEvalToolTests extends CommandTestCase { + @Override + protected Command newCommand() { + return new CronEvalTool(); + } + + public void testParse() throws Exception { + String countOption = randomBoolean() ? "-c" : "--count"; + int count = randomIntBetween(1, 100); + String output = execute(countOption, Integer.toString(count), "0 0 0 1-6 * ?"); + assertTrue(output, output.contains("Here are the next " + count + " times this cron expression will trigger")); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusIntegrationTests.java new file mode 100644 index 0000000000000..9f738d8daa6b2 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusIntegrationTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.watch; + +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.watcher.client.WatcherClient; +import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; +import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchResponse; +import org.elasticsearch.xpack.watcher.condition.NeverCondition; +import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; + +import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; +import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule.Interval.Unit.SECONDS; +import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class WatchStatusIntegrationTests extends AbstractWatcherIntegrationTestCase { + + public void testThatStatusGetsUpdated() { + WatcherClient watcherClient = watcherClient(); + watcherClient.preparePutWatch("_name") + .setSource(watchBuilder() + .trigger(schedule(interval(5, SECONDS))) + .input(simpleInput()) + .condition(NeverCondition.INSTANCE) + .addAction("_logger", loggingAction("logged text"))) + .get(); + timeWarp().trigger("_name"); + + GetWatchResponse getWatchResponse = watcherClient.prepareGetWatch().setId("_name").get(); + assertThat(getWatchResponse.isFound(), is(true)); + assertThat(getWatchResponse.getSource(), notNullValue()); + assertThat(getWatchResponse.getStatus().lastChecked(), is(notNullValue())); + + GetResponse getResponse = client().prepareGet(".watches", "doc", "_name").get(); + getResponse.getSource(); + XContentSource source = new XContentSource(getResponse.getSourceAsBytesRef(), XContentType.JSON); + String lastChecked = source.getValue("status.last_checked"); + + assertThat(lastChecked, is(notNullValue())); + assertThat(getWatchResponse.getStatus().lastChecked().toString(), is(lastChecked)); + } + +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusTests.java new file mode 100644 index 0000000000000..1a8263c0c33c7 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusTests.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.watch; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus.AckStatus.State; +import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.joda.time.DateTime.now; + +public class WatchStatusTests extends ESTestCase { + + public void testAckStatusIsResetOnUnmetCondition() { + HashMap myMap = new HashMap<>(); + ActionStatus actionStatus = new ActionStatus(now()); + myMap.put("foo", actionStatus); + + actionStatus.update(now(), new LoggingAction.Result.Success("foo")); + actionStatus.onAck(now()); + assertThat(actionStatus.ackStatus().state(), is(State.ACKED)); + + WatchStatus status = new WatchStatus(now(), myMap); + status.onCheck(false, now()); + + assertThat(status.actionStatus("foo").ackStatus().state(), is(State.AWAITS_SUCCESSFUL_EXECUTION)); + } + + public void testHeadersToXContent() throws Exception { + WatchStatus status = new WatchStatus(now(), Collections.emptyMap()); + String key = randomAlphaOfLength(10); + String value = randomAlphaOfLength(10); + Map headers = Collections.singletonMap(key, value); + status.setHeaders(headers); + + // by default headers are hidden + try (XContentBuilder builder = jsonBuilder()) { + status.toXContent(builder, ToXContent.EMPTY_PARAMS); + try (XContentParser parser = createParser(builder)) { + Map fields = parser.map(); + assertThat(fields, not(hasKey(WatchStatus.Field.HEADERS.getPreferredName()))); + } + } + + // but they are required when storing a watch + try (XContentBuilder builder = jsonBuilder()) { + status.toXContent(builder, WatcherParams.builder().hideHeaders(false).build()); + try (XContentParser parser = createParser(builder)) { + parser.nextToken(); + Map fields = parser.map(); + assertThat(fields, hasKey(WatchStatus.Field.HEADERS.getPreferredName())); + assertThat(fields.get(WatchStatus.Field.HEADERS.getPreferredName()), instanceOf(Map.class)); + Map extractedHeaders = (Map) fields.get(WatchStatus.Field.HEADERS.getPreferredName()); + assertThat(extractedHeaders, is(headers)); + } + } + } + + public void testHeadersSerialization() throws IOException { + WatchStatus status = new WatchStatus(now(), Collections.emptyMap()); + String key = randomAlphaOfLength(10); + String value = randomAlphaOfLength(10); + Map headers = Collections.singletonMap(key, value); + status.setHeaders(headers); + + BytesStreamOutput out = new BytesStreamOutput(); + status.writeTo(out); + BytesReference bytesReference = out.bytes(); + WatchStatus readStatus = WatchStatus.read(bytesReference.streamInput()); + assertThat(readStatus, is(status)); + assertThat(readStatus.getHeaders(), is(headers)); + + // test equals + assertThat(readStatus.hashCode(), is(status.hashCode())); + assertThat(readStatus, equalTo(status)); + readStatus.getHeaders().clear(); + assertThat(readStatus, not(equalTo(status))); + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java new file mode 100644 index 0000000000000..572a361d25210 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java @@ -0,0 +1,684 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.watch; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.ScriptQueryBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.actions.ActionFactory; +import org.elasticsearch.xpack.core.watcher.actions.ActionRegistry; +import org.elasticsearch.xpack.core.watcher.actions.ActionStatus; +import org.elasticsearch.xpack.core.watcher.actions.ActionWrapper; +import org.elasticsearch.xpack.core.watcher.actions.throttler.ActionThrottler; +import org.elasticsearch.xpack.core.watcher.condition.ConditionFactory; +import org.elasticsearch.xpack.core.watcher.condition.ConditionRegistry; +import org.elasticsearch.xpack.core.watcher.condition.ExecutableCondition; +import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; +import org.elasticsearch.xpack.core.watcher.input.none.NoneInput; +import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; +import org.elasticsearch.xpack.core.watcher.transform.TransformFactory; +import org.elasticsearch.xpack.core.watcher.transform.TransformRegistry; +import org.elasticsearch.xpack.core.watcher.transform.chain.ChainTransform; +import org.elasticsearch.xpack.core.watcher.transform.chain.ExecutableChainTransform; +import org.elasticsearch.xpack.core.watcher.trigger.Trigger; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchField; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.actions.email.EmailAction; +import org.elasticsearch.xpack.watcher.actions.email.EmailActionFactory; +import org.elasticsearch.xpack.watcher.actions.email.ExecutableEmailAction; +import org.elasticsearch.xpack.watcher.actions.index.ExecutableIndexAction; +import org.elasticsearch.xpack.watcher.actions.index.IndexAction; +import org.elasticsearch.xpack.watcher.actions.index.IndexActionFactory; +import org.elasticsearch.xpack.watcher.actions.logging.ExecutableLoggingAction; +import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; +import org.elasticsearch.xpack.watcher.actions.logging.LoggingActionFactory; +import org.elasticsearch.xpack.watcher.actions.webhook.ExecutableWebhookAction; +import org.elasticsearch.xpack.watcher.actions.webhook.WebhookAction; +import org.elasticsearch.xpack.watcher.actions.webhook.WebhookActionFactory; +import org.elasticsearch.xpack.watcher.common.http.HttpClient; +import org.elasticsearch.xpack.watcher.common.http.HttpMethod; +import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; +import org.elasticsearch.xpack.watcher.common.http.auth.HttpAuthRegistry; +import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuthFactory; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.condition.AlwaysConditionTests; +import org.elasticsearch.xpack.watcher.condition.ArrayCompareCondition; +import org.elasticsearch.xpack.watcher.condition.CompareCondition; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.condition.NeverCondition; +import org.elasticsearch.xpack.watcher.condition.ScriptCondition; +import org.elasticsearch.xpack.watcher.input.InputBuilders; +import org.elasticsearch.xpack.watcher.input.InputFactory; +import org.elasticsearch.xpack.watcher.input.InputRegistry; +import org.elasticsearch.xpack.watcher.input.none.ExecutableNoneInput; +import org.elasticsearch.xpack.watcher.input.search.ExecutableSearchInput; +import org.elasticsearch.xpack.watcher.input.search.SearchInput; +import org.elasticsearch.xpack.watcher.input.search.SearchInputFactory; +import org.elasticsearch.xpack.watcher.input.simple.ExecutableSimpleInput; +import org.elasticsearch.xpack.watcher.input.simple.SimpleInput; +import org.elasticsearch.xpack.watcher.input.simple.SimpleInputFactory; +import org.elasticsearch.xpack.watcher.notification.email.DataAttachment; +import org.elasticsearch.xpack.watcher.notification.email.EmailService; +import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; +import org.elasticsearch.xpack.watcher.notification.email.HtmlSanitizer; +import org.elasticsearch.xpack.watcher.notification.email.Profile; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachments; +import org.elasticsearch.xpack.watcher.notification.email.attachment.EmailAttachmentsParser; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateService; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.elasticsearch.xpack.watcher.transform.script.ExecutableScriptTransform; +import org.elasticsearch.xpack.watcher.transform.script.ScriptTransform; +import org.elasticsearch.xpack.watcher.transform.script.ScriptTransformFactory; +import org.elasticsearch.xpack.watcher.transform.search.ExecutableSearchTransform; +import org.elasticsearch.xpack.watcher.transform.search.SearchTransform; +import org.elasticsearch.xpack.watcher.transform.search.SearchTransformFactory; +import org.elasticsearch.xpack.watcher.trigger.TriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.TriggerService; +import org.elasticsearch.xpack.watcher.trigger.schedule.CronSchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.DailySchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.HourlySchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.MonthlySchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.Schedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleRegistry; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTriggerEngine; +import org.elasticsearch.xpack.watcher.trigger.schedule.WeeklySchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.YearlySchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.DayOfWeek; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.Month; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.MonthTimes; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.WeekTimes; +import org.elasticsearch.xpack.watcher.trigger.schedule.support.YearTimes; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; + +import java.io.IOException; +import java.time.Clock; +import java.time.Instant; +import java.time.ZoneOffset; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static java.util.Collections.singleton; +import static java.util.Collections.singletonMap; +import static java.util.Collections.unmodifiableMap; +import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; +import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; +import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.joda.time.DateTimeZone.UTC; +import static org.mockito.Mockito.mock; + +public class WatchTests extends ESTestCase { + + private ScriptService scriptService; + private Client client; + private HttpClient httpClient; + private EmailService emailService; + private TextTemplateEngine templateEngine; + private HtmlSanitizer htmlSanitizer; + private HttpAuthRegistry authRegistry; + private XPackLicenseState licenseState; + private Logger logger; + private Settings settings = Settings.EMPTY; + private WatcherSearchTemplateService searchTemplateService; + + @Before + public void init() throws Exception { + scriptService = mock(ScriptService.class); + client = mock(Client.class); + httpClient = mock(HttpClient.class); + emailService = mock(EmailService.class); + templateEngine = mock(TextTemplateEngine.class); + htmlSanitizer = mock(HtmlSanitizer.class); + licenseState = mock(XPackLicenseState.class); + authRegistry = new HttpAuthRegistry(singletonMap("basic", new BasicAuthFactory(null))); + logger = Loggers.getLogger(WatchTests.class); + searchTemplateService = mock(WatcherSearchTemplateService.class); + } + + public void testParserSelfGenerated() throws Exception { + Clock clock = Clock.fixed(Instant.now(), ZoneOffset.UTC); + DateTime now = new DateTime(clock.millis(), UTC); + TransformRegistry transformRegistry = transformRegistry(); + boolean includeStatus = randomBoolean(); + Schedule schedule = randomSchedule(); + Trigger trigger = new ScheduleTrigger(schedule); + ScheduleRegistry scheduleRegistry = registry(schedule); + TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(Settings.EMPTY, scheduleRegistry, clock); + TriggerService triggerService = new TriggerService(Settings.EMPTY, singleton(triggerEngine)); + + ExecutableInput input = randomInput(); + InputRegistry inputRegistry = registry(input.type()); + + ExecutableCondition condition = AlwaysConditionTests.randomCondition(scriptService); + ConditionRegistry conditionRegistry = conditionRegistry(); + + ExecutableTransform transform = randomTransform(); + + List actions = randomActions(); + ActionRegistry actionRegistry = registry(actions, conditionRegistry, transformRegistry); + + Map metadata = singletonMap("_key", "_val"); + + Map actionsStatuses = new HashMap<>(); + for (ActionWrapper action : actions) { + actionsStatuses.put(action.id(), new ActionStatus(now)); + } + WatchStatus watchStatus = new WatchStatus(now, unmodifiableMap(actionsStatuses)); + + TimeValue throttlePeriod = randomBoolean() ? null : TimeValue.timeValueSeconds(randomIntBetween(5, 10000)); + + Watch watch = new Watch("_name", trigger, input, condition, transform, throttlePeriod, actions, metadata, watchStatus, 1L); + + BytesReference bytes = BytesReference.bytes(jsonBuilder().value(watch)); + logger.info("{}", bytes.utf8ToString()); + WatchParser watchParser = new WatchParser(settings, triggerService, actionRegistry, inputRegistry, null, clock); + + Watch parsedWatch = watchParser.parse("_name", includeStatus, bytes, XContentType.JSON); + + if (includeStatus) { + assertThat(parsedWatch.status(), equalTo(watchStatus)); + } + assertThat(parsedWatch.trigger(), equalTo(trigger)); + assertThat(parsedWatch.input(), equalTo(input)); + assertThat(parsedWatch.condition(), equalTo(condition)); + if (throttlePeriod != null) { + assertThat(parsedWatch.throttlePeriod().millis(), equalTo(throttlePeriod.millis())); + } + assertThat(parsedWatch.metadata(), equalTo(metadata)); + assertThat(parsedWatch.actions(), equalTo(actions)); + } + + public void testThatBothStatusFieldsCanBeRead() throws Exception { + InputRegistry inputRegistry = mock(InputRegistry.class); + ActionRegistry actionRegistry = mock(ActionRegistry.class); + // a fake trigger service that advances past the trigger end object, which cannot be done with mocking + TriggerService triggerService = new TriggerService(Settings.EMPTY, Collections.emptySet()) { + @Override + public Trigger parseTrigger(String jobName, XContentParser parser) throws IOException { + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + } + + return new ScheduleTrigger(randomSchedule()); + } + }; + + DateTime now = new DateTime(UTC); + ClockMock clock = ClockMock.frozen(); + clock.setTime(now); + + List actions = randomActions(); + Map actionsStatuses = new HashMap<>(); + for (ActionWrapper action : actions) { + actionsStatuses.put(action.id(), new ActionStatus(now)); + } + WatchStatus watchStatus = new WatchStatus(new DateTime(clock.millis()), unmodifiableMap(actionsStatuses)); + + WatchParser watchParser = new WatchParser(settings, triggerService, actionRegistry, inputRegistry, null, clock); + XContentBuilder builder = jsonBuilder().startObject().startObject("trigger").endObject().field("status", watchStatus).endObject(); + Watch watch = watchParser.parse("foo", true, BytesReference.bytes(builder), XContentType.JSON); + assertThat(watch.status().state().getTimestamp().getMillis(), is(clock.millis())); + for (ActionWrapper action : actions) { + assertThat(watch.status().actionStatus(action.id()), is(actionsStatuses.get(action.id()))); + } + } + + public void testParserBadActions() throws Exception { + ClockMock clock = ClockMock.frozen(); + ScheduleRegistry scheduleRegistry = registry(randomSchedule()); + TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(Settings.EMPTY, scheduleRegistry, clock); + TriggerService triggerService = new TriggerService(Settings.EMPTY, singleton(triggerEngine)); + ConditionRegistry conditionRegistry = conditionRegistry(); + ExecutableInput input = randomInput(); + InputRegistry inputRegistry = registry(input.type()); + + TransformRegistry transformRegistry = transformRegistry(); + + List actions = randomActions(); + ActionRegistry actionRegistry = registry(actions,conditionRegistry, transformRegistry); + + + XContentBuilder jsonBuilder = jsonBuilder() + .startObject() + .startArray("actions").endArray() + .endObject(); + WatchParser watchParser = new WatchParser(settings, triggerService, actionRegistry, inputRegistry, null, clock); + try { + watchParser.parse("failure", false, BytesReference.bytes(jsonBuilder), XContentType.JSON); + fail("This watch should fail to parse as actions is an array"); + } catch (ElasticsearchParseException pe) { + assertThat(pe.getMessage().contains("could not parse actions for watch [failure]"), is(true)); + } + } + + public void testParserDefaults() throws Exception { + Schedule schedule = randomSchedule(); + ScheduleRegistry scheduleRegistry = registry(schedule); + TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(Settings.EMPTY, scheduleRegistry, Clock.systemUTC()); + TriggerService triggerService = new TriggerService(Settings.EMPTY, singleton(triggerEngine)); + + ConditionRegistry conditionRegistry = conditionRegistry(); + InputRegistry inputRegistry = registry(new ExecutableNoneInput(logger).type()); + TransformRegistry transformRegistry = transformRegistry(); + ActionRegistry actionRegistry = registry(Collections.emptyList(), conditionRegistry, transformRegistry); + + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.startObject(WatchField.TRIGGER.getPreferredName()) + .field(ScheduleTrigger.TYPE, schedule(schedule).build()) + .endObject(); + builder.endObject(); + WatchParser watchParser = new WatchParser(settings, triggerService, actionRegistry, inputRegistry, null, Clock.systemUTC()); + Watch watch = watchParser.parse("failure", false, BytesReference.bytes(builder), XContentType.JSON); + assertThat(watch, notNullValue()); + assertThat(watch.trigger(), instanceOf(ScheduleTrigger.class)); + assertThat(watch.input(), instanceOf(ExecutableNoneInput.class)); + assertThat(watch.condition(), instanceOf(InternalAlwaysCondition.class)); + assertThat(watch.transform(), nullValue()); + assertThat(watch.actions(), notNullValue()); + assertThat(watch.actions().size(), is(0)); + } + + public void testParseWatch_verifyScriptLangDefault() throws Exception { + ScheduleRegistry scheduleRegistry = registry(new IntervalSchedule(new IntervalSchedule.Interval(1, + IntervalSchedule.Interval.Unit.SECONDS))); + TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(Settings.EMPTY, scheduleRegistry, Clock.systemUTC()); + TriggerService triggerService = new TriggerService(Settings.EMPTY, singleton(triggerEngine)); + + ConditionRegistry conditionRegistry = conditionRegistry(); + InputRegistry inputRegistry = registry(SearchInput.TYPE); + TransformRegistry transformRegistry = transformRegistry(); + ActionRegistry actionRegistry = registry(Collections.emptyList(), conditionRegistry, transformRegistry); + WatchParser watchParser = new WatchParser(settings, triggerService, actionRegistry, inputRegistry, null, Clock.systemUTC()); + + WatcherSearchTemplateService searchTemplateService = new WatcherSearchTemplateService(settings, scriptService, xContentRegistry()); + + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + + builder.startObject("trigger"); + builder.startObject("schedule"); + builder.field("interval", "99w"); + builder.endObject(); + builder.endObject(); + + builder.startObject("input"); + builder.startObject("search"); + builder.startObject("request"); + builder.startObject("body"); + builder.startObject("query"); + builder.startObject("script"); + if (randomBoolean()) { + builder.field("script", "return true"); + } else { + builder.startObject("script"); + builder.field("source", "return true"); + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + builder.endObject(); + builder.endObject(); + builder.endObject(); + builder.endObject(); + + builder.startObject("condition"); + if (randomBoolean()) { + builder.field("script", "return true"); + } else { + builder.startObject("script"); + builder.field("source", "return true"); + builder.endObject(); + } + builder.endObject(); + + builder.endObject(); + + // parse in default mode: + Watch watch = watchParser.parse("_id", false, BytesReference.bytes(builder), XContentType.JSON); + assertThat(((ScriptCondition) watch.condition()).getScript().getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); + WatcherSearchTemplateRequest request = ((SearchInput) watch.input().input()).getRequest(); + SearchRequest searchRequest = searchTemplateService.toSearchRequest(request); + assertThat(((ScriptQueryBuilder) searchRequest.source().query()).script().getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); + } + + public void testParseWatchWithoutInput() throws Exception { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + + builder.startObject("trigger").startObject("schedule").field("interval", "99w").endObject().endObject(); + builder.startObject("condition").startObject("always").endObject().endObject(); + builder.startObject("actions").startObject("logme") + .startObject("logging").field("text", "foo").endObject() + .endObject().endObject(); + builder.endObject(); + + WatchParser parser = createWatchparser(); + Watch watch = parser.parse("_id", false, BytesReference.bytes(builder), XContentType.JSON); + assertThat(watch, is(notNullValue())); + assertThat(watch.input().type(), is(NoneInput.TYPE)); + } + } + + public void testParseWatchWithoutAction() throws Exception { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + + builder.startObject("trigger").startObject("schedule").field("interval", "99w").endObject().endObject(); + builder.startObject("input").startObject("simple").endObject().endObject(); + builder.startObject("condition").startObject("always").endObject().endObject(); + builder.endObject(); + + WatchParser parser = createWatchparser(); + Watch watch = parser.parse("_id", false, BytesReference.bytes(builder), XContentType.JSON); + assertThat(watch, is(notNullValue())); + assertThat(watch.actions(), hasSize(0)); + } + } + + public void testParseWatchWithoutTriggerDoesNotWork() throws Exception { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + + builder.startObject("input").startObject("simple").endObject().endObject(); + builder.startObject("condition").startObject("always").endObject().endObject(); + builder.startObject("actions").startObject("logme") + .startObject("logging").field("text", "foo").endObject() + .endObject().endObject(); + builder.endObject(); + + WatchParser parser = createWatchparser(); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, + () -> parser.parse("_id", false, BytesReference.bytes(builder), XContentType.JSON)); + assertThat(e.getMessage(), is("could not parse watch [_id]. missing required field [trigger]")); + } + } + + private WatchParser createWatchparser() throws Exception { + LoggingAction loggingAction = new LoggingAction(new TextTemplate("foo"), null, null); + List actions = Collections.singletonList(new ActionWrapper("_logging_", randomThrottler(), null, null, + new ExecutableLoggingAction(loggingAction, logger, settings, new MockTextTemplateEngine()))); + + ScheduleRegistry scheduleRegistry = registry(new IntervalSchedule(new IntervalSchedule.Interval(1, + IntervalSchedule.Interval.Unit.SECONDS))); + TriggerEngine triggerEngine = new ParseOnlyScheduleTriggerEngine(Settings.EMPTY, scheduleRegistry, Clock.systemUTC()); + TriggerService triggerService = new TriggerService(Settings.EMPTY, singleton(triggerEngine)); + + ConditionRegistry conditionRegistry = conditionRegistry(); + InputRegistry inputRegistry = registry(SimpleInput.TYPE); + TransformRegistry transformRegistry = transformRegistry(); + ActionRegistry actionRegistry = registry(actions, conditionRegistry, transformRegistry); + + return new WatchParser(settings, triggerService, actionRegistry, inputRegistry, null, Clock.systemUTC()); + } + + private static Schedule randomSchedule() { + String type = randomFrom(CronSchedule.TYPE, HourlySchedule.TYPE, DailySchedule.TYPE, WeeklySchedule.TYPE, MonthlySchedule.TYPE, + YearlySchedule.TYPE, IntervalSchedule.TYPE); + switch (type) { + case CronSchedule.TYPE: + return new CronSchedule("0/5 * * * * ? *"); + case HourlySchedule.TYPE: + return HourlySchedule.builder().minutes(30).build(); + case DailySchedule.TYPE: + return DailySchedule.builder().atNoon().build(); + case WeeklySchedule.TYPE: + return WeeklySchedule.builder().time(WeekTimes.builder().on(DayOfWeek.FRIDAY).atMidnight()).build(); + case MonthlySchedule.TYPE: + return MonthlySchedule.builder().time(MonthTimes.builder().on(1).atNoon()).build(); + case YearlySchedule.TYPE: + return YearlySchedule.builder().time(YearTimes.builder().in(Month.JANUARY).on(1).atMidnight()).build(); + default: + return new IntervalSchedule(IntervalSchedule.Interval.seconds(5)); + } + } + + private static ScheduleRegistry registry(Schedule schedule) { + Set parsers = new HashSet<>(); + switch (schedule.type()) { + case CronSchedule.TYPE: + parsers.add(new CronSchedule.Parser()); + return new ScheduleRegistry(parsers); + case HourlySchedule.TYPE: + parsers.add(new HourlySchedule.Parser()); + return new ScheduleRegistry(parsers); + case DailySchedule.TYPE: + parsers.add(new DailySchedule.Parser()); + return new ScheduleRegistry(parsers); + case WeeklySchedule.TYPE: + parsers.add(new WeeklySchedule.Parser()); + return new ScheduleRegistry(parsers); + case MonthlySchedule.TYPE: + parsers.add(new MonthlySchedule.Parser()); + return new ScheduleRegistry(parsers); + case YearlySchedule.TYPE: + parsers.add(new YearlySchedule.Parser()); + return new ScheduleRegistry(parsers); + case IntervalSchedule.TYPE: + parsers.add(new IntervalSchedule.Parser()); + return new ScheduleRegistry(parsers); + default: + throw new IllegalArgumentException("unknown schedule [" + schedule + "]"); + } + } + + private ExecutableInput randomInput() { + String type = randomFrom(SearchInput.TYPE, SimpleInput.TYPE); + switch (type) { + case SearchInput.TYPE: + SearchInput searchInput = searchInput(WatcherTestUtils.templateRequest(searchSource(), "idx")) + .timeout(randomBoolean() ? null : timeValueSeconds(between(1, 10000))) + .build(); + return new ExecutableSearchInput(searchInput, logger, client, searchTemplateService, null); + default: + SimpleInput simpleInput = InputBuilders.simpleInput(singletonMap("_key", "_val")).build(); + return new ExecutableSimpleInput(simpleInput, logger); + } + } + + private InputRegistry registry(String inputType) { + Map parsers = new HashMap<>(); + switch (inputType) { + case SearchInput.TYPE: + parsers.put(SearchInput.TYPE, new SearchInputFactory(settings, client, xContentRegistry(), scriptService)); + return new InputRegistry(Settings.EMPTY, parsers); + default: + parsers.put(SimpleInput.TYPE, new SimpleInputFactory(settings)); + return new InputRegistry(Settings.EMPTY, parsers); + } + } + + + + private ConditionRegistry conditionRegistry() { + Map parsers = new HashMap<>(); + parsers.put(InternalAlwaysCondition.TYPE, (c, id, p) -> InternalAlwaysCondition.parse(id, p)); + parsers.put(NeverCondition.TYPE, (c, id, p) -> NeverCondition.parse(id, p)); + parsers.put(ArrayCompareCondition.TYPE, (c, id, p) -> ArrayCompareCondition.parse(c, id, p)); + parsers.put(CompareCondition.TYPE, (c, id, p) -> CompareCondition.parse(c, id, p)); + parsers.put(ScriptCondition.TYPE, (c, id, p) -> ScriptCondition.parse(scriptService, id, p)); + return new ConditionRegistry(parsers, ClockMock.frozen()); + } + + private ExecutableTransform randomTransform() { + String type = randomFrom(ScriptTransform.TYPE, SearchTransform.TYPE, ChainTransform.TYPE); + TimeValue timeout = randomBoolean() ? timeValueSeconds(between(1, 10000)) : null; + DateTimeZone timeZone = randomBoolean() ? DateTimeZone.UTC : null; + switch (type) { + case ScriptTransform.TYPE: + return new ExecutableScriptTransform(new ScriptTransform(mockScript("_script")), logger, scriptService); + case SearchTransform.TYPE: + SearchTransform transform = new SearchTransform( + templateRequest(searchSource()), timeout, timeZone); + return new ExecutableSearchTransform(transform, logger, client, searchTemplateService, TimeValue.timeValueMinutes(1)); + default: // chain + SearchTransform searchTransform = new SearchTransform( + templateRequest(searchSource()), timeout, timeZone); + ScriptTransform scriptTransform = new ScriptTransform(mockScript("_script")); + + ChainTransform chainTransform = new ChainTransform(Arrays.asList(searchTransform, scriptTransform)); + return new ExecutableChainTransform(chainTransform, logger, Arrays.asList( + new ExecutableSearchTransform(new SearchTransform( + templateRequest(searchSource()), timeout, timeZone), + logger, client, searchTemplateService, TimeValue.timeValueMinutes(1)), + new ExecutableScriptTransform(new ScriptTransform(mockScript("_script")), + logger, scriptService))); + } + } + + private TransformRegistry transformRegistry() { + Map factories = new HashMap<>(); + factories.put(ScriptTransform.TYPE, new ScriptTransformFactory(settings, scriptService)); + factories.put(SearchTransform.TYPE, new SearchTransformFactory(settings, client, xContentRegistry(), scriptService)); + return new TransformRegistry(Settings.EMPTY, unmodifiableMap(factories)); + } + + private List randomActions() { + List list = new ArrayList<>(); + if (randomBoolean()) { + EmailAction action = new EmailAction(EmailTemplate.builder().build(), null, null, Profile.STANDARD, + randomFrom(DataAttachment.JSON, DataAttachment.YAML), EmailAttachments.EMPTY_ATTACHMENTS); + list.add(new ActionWrapper("_email_" + randomAlphaOfLength(8), randomThrottler(), + AlwaysConditionTests.randomCondition(scriptService), randomTransform(), + new ExecutableEmailAction(action, logger, emailService, templateEngine, htmlSanitizer, Collections.emptyMap()))); + } + if (randomBoolean()) { + DateTimeZone timeZone = randomBoolean() ? DateTimeZone.UTC : null; + TimeValue timeout = randomBoolean() ? timeValueSeconds(between(1, 10000)) : null; + WriteRequest.RefreshPolicy refreshPolicy = randomBoolean() ? null : randomFrom(WriteRequest.RefreshPolicy.values()); + IndexAction action = new IndexAction("_index", "_type", randomBoolean() ? "123" : null, null, timeout, timeZone, + refreshPolicy); + list.add(new ActionWrapper("_index_" + randomAlphaOfLength(8), randomThrottler(), + AlwaysConditionTests.randomCondition(scriptService), randomTransform(), + new ExecutableIndexAction(action, logger, client, TimeValue.timeValueSeconds(30), + TimeValue.timeValueSeconds(30)))); + } + if (randomBoolean()) { + HttpRequestTemplate httpRequest = HttpRequestTemplate.builder("test.host", randomIntBetween(8000, 9000)) + .method(randomFrom(HttpMethod.GET, HttpMethod.POST, HttpMethod.PUT)) + .path(new TextTemplate("_url")) + .build(); + WebhookAction action = new WebhookAction(httpRequest); + list.add(new ActionWrapper("_webhook_" + randomAlphaOfLength(8), randomThrottler(), + AlwaysConditionTests.randomCondition(scriptService), randomTransform(), + new ExecutableWebhookAction(action, logger, httpClient, templateEngine))); + } + return list; + } + + private ActionRegistry registry(List actions, ConditionRegistry conditionRegistry, TransformRegistry transformRegistry) { + Map parsers = new HashMap<>(); + for (ActionWrapper action : actions) { + switch (action.action().type()) { + case EmailAction.TYPE: + parsers.put(EmailAction.TYPE, new EmailActionFactory(settings, emailService, templateEngine, + new EmailAttachmentsParser(Collections.emptyMap()))); + break; + case IndexAction.TYPE: + parsers.put(IndexAction.TYPE, new IndexActionFactory(settings, client)); + break; + case WebhookAction.TYPE: + parsers.put(WebhookAction.TYPE, new WebhookActionFactory(settings, httpClient, + new HttpRequestTemplate.Parser(authRegistry), templateEngine)); + break; + case LoggingAction.TYPE: + parsers.put(LoggingAction.TYPE, new LoggingActionFactory(settings, new MockTextTemplateEngine())); + break; + } + } + return new ActionRegistry(unmodifiableMap(parsers), conditionRegistry, transformRegistry, Clock.systemUTC(), licenseState); + } + + private ActionThrottler randomThrottler() { + return new ActionThrottler(Clock.systemUTC(), randomBoolean() ? null : timeValueSeconds(randomIntBetween(1, 10000)), + licenseState); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(Arrays.asList( + new NamedXContentRegistry.Entry(QueryBuilder.class, new ParseField(MatchAllQueryBuilder.NAME), (p, c) -> + MatchAllQueryBuilder.fromXContent(p)), + new NamedXContentRegistry.Entry(QueryBuilder.class, new ParseField(ScriptQueryBuilder.NAME), (p, c) -> + ScriptQueryBuilder.fromXContent(p)) + )); + } + + public static class ParseOnlyScheduleTriggerEngine extends ScheduleTriggerEngine { + + public ParseOnlyScheduleTriggerEngine(Settings settings, ScheduleRegistry registry, Clock clock) { + super(settings, registry, clock); + } + + @Override + public void start(Collection jobs) { + } + + @Override + public void stop() { + } + + @Override + public void add(Watch watch) { + } + + @Override + public void pauseExecution() { + } + + @Override + public int getJobCount() { + return 0; + } + + @Override + public boolean remove(String jobId) { + return false; + } + } +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/clock/ClockTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/clock/ClockTests.java new file mode 100644 index 0000000000000..d9f57e28b4842 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/clock/ClockTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.watch.clock; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; +import org.joda.time.DateTime; + +import java.time.Clock; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.joda.time.DateTimeZone.UTC; + +public class ClockTests extends ESTestCase { + public void testNowUTC() { + Clock clockMock = ClockMock.frozen(); + assertThat(new DateTime(clockMock.millis(), UTC).getZone(), equalTo(UTC)); + assertThat(new DateTime(Clock.systemUTC().millis(), UTC).getZone(), equalTo(UTC)); + } + + public void testFreezeUnfreeze() throws Exception { + ClockMock clockMock = ClockMock.frozen(); + final long millis = clockMock.millis(); + for (int i = 0; i < 10; i++) { + assertThat(clockMock.millis(), equalTo(millis)); + } + clockMock.unfreeze(); + assertBusy(() -> assertThat(clockMock.millis(), greaterThan(millis))); + } +} diff --git a/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/security/keystore/testnode.jks b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/security/keystore/testnode.jks new file mode 100644 index 0000000000000..4df11f34c3650 Binary files /dev/null and b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/security/keystore/testnode.jks differ diff --git a/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/security/keystore/truststore-testnode-only.jks b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/security/keystore/truststore-testnode-only.jks new file mode 100644 index 0000000000000..d75109b2a6854 Binary files /dev/null and b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/security/keystore/truststore-testnode-only.jks differ diff --git a/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.jks b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.jks new file mode 100644 index 0000000000000..ec482775bd055 Binary files /dev/null and b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode-no-subjaltname.jks differ diff --git a/x-pack/qa/audit-tests/build.gradle b/x-pack/qa/audit-tests/build.gradle new file mode 100644 index 0000000000000..8af672fe92aee --- /dev/null +++ b/x-pack/qa/audit-tests/build.gradle @@ -0,0 +1,39 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') +} + +String outputDir = "${buildDir}/generated-resources/${project.name}" +task copyXPackPluginProps(type: Copy) { // wth is this? + from project(xpackModule('core')).file('src/main/plugin-metadata') + from project(xpackModule('core')).tasks.pluginProperties + from project(xpackModule('security')).file('src/main/plugin-metadata') + from project(xpackModule('security')).tasks.pluginProperties + into outputDir +} +project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) + +integTestCluster { + distribution 'zip' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.security.audit.outputs', 'index' + setting 'xpack.license.self_generated.type', 'trial' + setting 'logger.level', 'DEBUG' + setupCommand 'setupDummyUser', + 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_user', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java b/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java new file mode 100644 index 0000000000000..3467316c24f6c --- /dev/null +++ b/x-pack/qa/audit-tests/src/test/java/org/elasticsearch/xpack/security/audit/IndexAuditIT.java @@ -0,0 +1,195 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.audit; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.TestCluster; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collection; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; + +public class IndexAuditIT extends ESIntegTestCase { + private static final String USER = "test_user"; + private static final String PASS = "x-pack-test-password"; + + @Override + protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException { + TestCluster testCluster = super.buildTestCluster(scope, seed); + return new TestCluster(seed) { + + @Override + public void afterTest() throws IOException { + testCluster.afterTest(); + } + + @Override + public Client client() { + return testCluster.client(); + } + + @Override + public int size() { + return testCluster.size(); + } + + @Override + public int numDataNodes() { + return testCluster.numDataNodes(); + } + + @Override + public int numDataAndMasterNodes() { + return testCluster.numDataAndMasterNodes(); + } + + @Override + public InetSocketAddress[] httpAddresses() { + return testCluster.httpAddresses(); + } + + @Override + public void close() throws IOException { + testCluster.close(); + } + + @Override + public void ensureEstimatedStats() { + // stats are not going to be accurate for these tests since the index audit trail + // is running and changing the values so we wrap the test cluster to skip these + // checks + } + + @Override + public String getClusterName() { + return testCluster.getClusterName(); + } + + @Override + public Iterable getClients() { + return testCluster.getClients(); + } + + @Override + public NamedWriteableRegistry getNamedWriteableRegistry() { + return testCluster.getNamedWriteableRegistry(); + } + }; + } + + public void testIndexAuditTrailWorking() throws Exception { + Response response = getRestClient().performRequest("GET", "/", + new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + UsernamePasswordToken.basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())))); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + final AtomicReference lastClusterState = new AtomicReference<>(); + final AtomicBoolean indexExists = new AtomicBoolean(false); + final boolean found = awaitBusy(() -> { + if (indexExists.get() == false) { + ClusterState state = client().admin().cluster().prepareState().get().getState(); + lastClusterState.set(state); + for (ObjectCursor cursor : state.getMetaData().getIndices().keys()) { + if (cursor.value.startsWith(".security_audit_log")) { + logger.info("found audit index [{}]", cursor.value); + indexExists.set(true); + break; + } + } + + if (indexExists.get() == false) { + return false; + } + } + + ensureYellowAndNoInitializingShards(".security_audit_log*"); + logger.info("security audit log index is yellow"); + ClusterState state = client().admin().cluster().prepareState().get().getState(); + lastClusterState.set(state); + + logger.info("refreshing audit indices"); + client().admin().indices().prepareRefresh(".security_audit_log*").get(); + logger.info("refreshed audit indices"); + return client().prepareSearch(".security_audit_log*").setQuery(QueryBuilders.matchQuery("principal", USER)) + .get().getHits().getTotalHits() > 0; + }, 60L, TimeUnit.SECONDS); + + assertTrue("Did not find security audit index. Current cluster state:\n" + lastClusterState.get().toString(), found); + + SearchResponse searchResponse = client().prepareSearch(".security_audit_log*").setQuery( + QueryBuilders.matchQuery("principal", USER)).get(); + assertThat(searchResponse.getHits().getHits().length, greaterThan(0)); + assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("principal"), is(USER)); + } + + public void testAuditTrailTemplateIsRecreatedAfterDelete() throws Exception { + // this is already "tested" by the test framework since we wipe the templates before and after, + // but lets be explicit about the behavior + awaitIndexTemplateCreation(); + + // delete the template + DeleteIndexTemplateResponse deleteResponse = client().admin().indices() + .prepareDeleteTemplate(IndexAuditTrail.INDEX_TEMPLATE_NAME).execute().actionGet(); + assertThat(deleteResponse.isAcknowledged(), is(true)); + awaitIndexTemplateCreation(); + } + + private void awaitIndexTemplateCreation() throws InterruptedException { + boolean found = awaitBusy(() -> { + GetIndexTemplatesResponse response = client().admin().indices() + .prepareGetTemplates(IndexAuditTrail.INDEX_TEMPLATE_NAME).execute().actionGet(); + if (response.getIndexTemplates().size() > 0) { + for (IndexTemplateMetaData indexTemplateMetaData : response.getIndexTemplates()) { + if (IndexAuditTrail.INDEX_TEMPLATE_NAME.equals(indexTemplateMetaData.name())) { + return true; + } + } + } + return false; + }); + + assertThat("index template [" + IndexAuditTrail.INDEX_TEMPLATE_NAME + "] was not created", found, is(true)); + } + + @Override + protected Settings externalClusterClientSettings() { + return Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), USER + ":" + PASS) + .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") + .build(); + } + + @Override + protected Collection> transportClientPlugins() { + return Arrays.asList(XPackClientPlugin.class); + } + +} diff --git a/x-pack/qa/build.gradle b/x-pack/qa/build.gradle new file mode 100644 index 0000000000000..1570b218592fe --- /dev/null +++ b/x-pack/qa/build.gradle @@ -0,0 +1,32 @@ +// this file must exist so that qa projects are found +// by the elasticsearch x-plugins include mechanism + +import org.elasticsearch.gradle.test.RestIntegTestTask + +subprojects { + // HACK: please fix this + // we want to add the rest api specs for xpack to qa tests, but we + // need to wait until after the project is evaluated to only apply + // to those that rest tests. this used to be done automatically + // when xpack was a plugin, but now there is no place with xpack as a module. + // instead, we should package these and make them easy to use for rest tests, + // but currently, they must be copied into the resources of the test runner. + project.tasks.withType(RestIntegTestTask) { + File xpackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') + project.copyRestSpec.from(xpackResources) { + include 'rest-api-spec/api/**' + } + } +} + +/* Remove assemble on all qa projects because we don't need to publish + * artifacts for them. */ +gradle.projectsEvaluated { + subprojects { + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + project.tasks.remove(assemble) + project.build.dependsOn.remove('assemble') + } + } +} diff --git a/x-pack/qa/core-rest-tests-with-security/build.gradle b/x-pack/qa/core-rest-tests-with-security/build.gradle new file mode 100644 index 0000000000000..c23432e5127f7 --- /dev/null +++ b/x-pack/qa/core-rest-tests-with-security/build.gradle @@ -0,0 +1,42 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') +} + +integTest { + includePackaged = true +} + +integTestRunner { + systemProperty 'tests.rest.blacklist', + ['cat.aliases/10_basic/Empty cluster', + 'index/10_with_id/Index with ID', + 'indices.get_alias/10_basic/Get alias against closed indices', + 'cat.templates/10_basic/No templates', + 'cat.templates/10_basic/Sort templates', + 'cat.templates/10_basic/Multiple template', + ].join(',') +} + +integTestCluster { + setting 'xpack.security.enabled', 'true' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setupCommand 'setupDummyUser', + 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_user', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/core-rest-tests-with-security/src/test/java/org/elasticsearch/xpack/security/CoreWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/core-rest-tests-with-security/src/test/java/org/elasticsearch/xpack/security/CoreWithSecurityClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..89ebc2db0eebb --- /dev/null +++ b/x-pack/qa/core-rest-tests-with-security/src/test/java/org/elasticsearch/xpack/security/CoreWithSecurityClientYamlTestSuiteIT.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; +import org.apache.lucene.util.TimeUnits; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +@TimeoutSuite(millis = 30 * TimeUnits.MINUTE) // as default timeout seems not enough on the jenkins VMs +public class CoreWithSecurityClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + private static final String USER = "test_user"; + private static final String PASS = "x-pack-test-password"; + + public CoreWithSecurityClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } +} + diff --git a/x-pack/qa/core-rest-tests-with-security/src/test/resources/rest-api-spec/test/rankeval/10_rankeval.yml b/x-pack/qa/core-rest-tests-with-security/src/test/resources/rest-api-spec/test/rankeval/10_rankeval.yml new file mode 100644 index 0000000000000..6dae2bb2a6773 --- /dev/null +++ b/x-pack/qa/core-rest-tests-with-security/src/test/resources/rest-api-spec/test/rankeval/10_rankeval.yml @@ -0,0 +1,56 @@ +# remove this test from core-tests-with-security as soon as we can +# pull in rest test from modules in core +--- +"Basic Rankeval test": + + - skip: + version: " - 6.2.99" + reason: response format was updated in 6.3 + + - do: + indices.create: + index: foo + body: + settings: + index: + number_of_shards: 1 + - do: + index: + index: foo + type: bar + id: doc1 + body: { "text": "berlin" } + + - do: + index: + index: foo + type: bar + id: doc2 + body: { "text": "amsterdam" } + + - do: + indices.refresh: {} + + - do: + rank_eval: + body: { + "requests" : [ + { + "id": "amsterdam_query", + "request": { "query": { "match" : {"text" : "amsterdam" }}}, + "ratings": [ + {"_index": "foo", "_id": "doc1", "rating": 0}, + {"_index": "foo", "_id": "doc2", "rating": 1} + ] + }, + { + "id" : "berlin_query", + "request": { "query": { "match" : { "text" : "berlin" } }, "size" : 10 }, + "ratings": [{"_index": "foo", "_id": "doc1", "rating": 1}] + } + ], + "metric" : { "precision": { "ignore_unlabeled" : true }} + } + + - match: { quality_level: 1 } + diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle new file mode 100644 index 0000000000000..8f5952d61edfb --- /dev/null +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -0,0 +1,280 @@ +import org.elasticsearch.gradle.test.NodeInfo +import org.elasticsearch.gradle.test.RestIntegTestTask +import org.elasticsearch.gradle.Version + +import java.nio.charset.StandardCharsets +import java.nio.file.Paths +import java.util.regex.Matcher + +// Apply the java plugin to this project so the sources can be edited in an IDE +apply plugin: 'elasticsearch.build' +test.enabled = false + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile (project(path: xpackModule('security'), configuration: 'runtime')) { + // Need to drop the guava dependency here or we get a conflict with watcher's guava dependency. + // This is total #$%, but the solution is to get the SAML realm (which uses guava) out of security proper + exclude group: "com.google.guava", module: "guava" + } + testCompile project(path: xpackModule('watcher'), configuration: 'runtime') + + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile (project(path: xpackModule('security'), configuration: 'testArtifacts')) { + // Need to drop the guava dependency here or we get a conflict with watcher's guava dependency. + // This is total #$%, but the solution is to get the SAML realm (which uses guava) out of security proper + exclude group: "com.google.guava", module: "guava" + } +} + +Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> + File tmpFile = new File(node.cwd, 'wait.success') + + // wait up to twenty seconds + final long stopTime = System.currentTimeMillis() + 20000L; + Exception lastException = null; + + while (System.currentTimeMillis() < stopTime) { + lastException = null; + // we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned + HttpURLConnection httpURLConnection = null; + try { + httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=${node.config.numNodes}&wait_for_status=yellow").openConnection(); + httpURLConnection.setRequestProperty("Authorization", "Basic " + + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); + httpURLConnection.setRequestMethod("GET"); + httpURLConnection.setConnectTimeout(1000); + httpURLConnection.setReadTimeout(30000); // read needs to wait for nodes! + httpURLConnection.connect(); + if (httpURLConnection.getResponseCode() == 200) { + tmpFile.withWriter StandardCharsets.UTF_8.name(), { + it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) + } + break; + } + } catch (Exception e) { + logger.debug("failed to call cluster health", e) + lastException = e + } finally { + if (httpURLConnection != null) { + httpURLConnection.disconnect(); + } + } + + // did not start, so wait a bit before trying again + Thread.sleep(500L); + } + if (tmpFile.exists() == false && lastException != null) { + logger.error("final attempt of calling cluster health failed", lastException) + } + return tmpFile.exists() +} + +Project mainProject = project + +String coreFullClusterRestartPath = project(':qa:full-cluster-restart').projectDir.toPath().resolve('src/test/java').toString() +sourceSets { + test { + java { + srcDirs += [coreFullClusterRestartPath] + } + } +} + +licenseHeaders { + approvedLicenses << 'Apache' +} + +/** + * Subdirectories of this project are test rolling upgrades with various + * configuration options based on their name. + */ +subprojects { + Matcher m = project.name =~ /with(out)?-system-key/ + if (false == m.matches()) { + throw new InvalidUserDataException("Invalid project name [${project.name}]") + } + boolean withSystemKey = m.group(1) == null + + apply plugin: 'elasticsearch.standalone-test' + + // Use resources from the rolling-upgrade project in subdirectories + sourceSets { + test { + java { + srcDirs = ["${mainProject.projectDir}/src/test/java", coreFullClusterRestartPath] + } + resources { + srcDirs = ["${mainProject.projectDir}/src/test/resources"] + } + } + } + + licenseHeaders { + approvedLicenses << 'Apache' + } + + String outputDir = "${buildDir}/generated-resources/${project.name}" + + // This is a top level task which we will add dependencies to below. + // It is a single task that can be used to backcompat tests against all versions. + task bwcTest { + description = 'Runs backwards compatibility tests.' + group = 'verification' + } + + String output = "${buildDir}/generated-resources/${project.name}" + task copyTestNodeKeystore(type: Copy) { + from project(xpackModule('core')) + .file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') + into outputDir + } + + for (Version version : bwcVersions.indexCompatible) { + String baseName = "v${version}" + + Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { + mustRunAfter(precommit) + } + + Object extension = extensions.findByName("${baseName}#oldClusterTestCluster") + configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { + dependsOn copyTestNodeKeystore + if (version.before('6.3.0')) { + plugin xpackProject('plugin').path + } + bwcVersion = version + numBwcNodes = 2 + numNodes = 2 + clusterName = 'full-cluster-restart' + String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' + setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = waitWithAuth + + // some tests rely on the translog not being flushed + setting 'indices.memory.shard_inactive_time', '20m' + + // debug logging for testRecovery see https://github.com/elastic/x-pack-elasticsearch/issues/2691 + setting 'logger.level', 'DEBUG' + + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.ssl.keystore.path', 'testnode.jks' + setting 'xpack.ssl.keystore.password', 'testnode' + setting 'xpack.license.self_generated.type', 'trial' + dependsOn copyTestNodeKeystore + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + if (withSystemKey) { + if (version.onOrAfter('5.1.0') && version.before('6.0.0')) { + // The setting didn't exist until 5.1.0 + setting 'xpack.security.system_key.required', 'true' + } + if (version.onOrAfter('6.0.0')) { + keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + } else { + extraConfigFile 'x-pack/system_key', "${mainProject.projectDir}/src/test/resources/system_key" + } + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + } + } + + Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") + oldClusterTestRunner.configure { + systemProperty 'tests.is_old_cluster', 'true' + systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") + systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") + exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + } + + Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) + + configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { + dependsOn oldClusterTestRunner, + "${baseName}#oldClusterTestCluster#node0.stop", + "${baseName}#oldClusterTestCluster#node1.stop" + numNodes = 2 + clusterName = 'full-cluster-restart' + dataDir = { nodeNum -> oldClusterTest.nodes[nodeNum].dataDir } + cleanShared = false // We want to keep snapshots made by the old cluster! + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = waitWithAuth + + // debug logging for testRecovery see https://github.com/elastic/x-pack-elasticsearch/issues/2691 + setting 'logger.level', 'DEBUG' + + // some tests rely on the translog not being flushed + setting 'indices.memory.shard_inactive_time', '20m' + setting 'xpack.security.enabled', 'true' + setting 'xpack.ssl.keystore.path', 'testnode.jks' + keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' + setting 'xpack.license.self_generated.type', 'trial' + dependsOn copyTestNodeKeystore + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + if (withSystemKey) { + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + } + } + + Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") + upgradedClusterTestRunner.configure { + systemProperty 'tests.is_old_cluster', 'false' + systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") + systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") + exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + } + + Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { + dependsOn = [upgradedClusterTest] + } + + if (project.bwc_tests_enabled) { + bwcTest.dependsOn(versionBwcTest) + } + } + + test.enabled = false // no unit tests for full cluster restarts, only the rest integration test + + // basic integ tests includes testing bwc against the most recent version + task integTest { + if (project.bwc_tests_enabled) { + for (final def version : bwcVersions.snapshotsIndexCompatible) { + dependsOn "v${version}#bwcTest" + } + } + } + + check.dependsOn(integTest) + + dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('watcher'), configuration: 'runtime') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') + } + + // copy x-pack plugin info so it is on the classpath and security manager has the right permissions + task copyXPackRestSpec(type: Copy) { + dependsOn(project.configurations.restSpec, 'processTestResources') + from project(xpackModule('core')).sourceSets.test.resources + include 'rest-api-spec/api/**' + into project.sourceSets.test.output.resourcesDir + } + + task copyXPackPluginProps(type: Copy) { + dependsOn(copyXPackRestSpec) + from project(xpackModule('core')).file('src/main/plugin-metadata') + from project(xpackModule('core')).tasks.pluginProperties + into outputDir + } + project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) + + repositories { + maven { + url "https://artifacts.elastic.co/maven" + } + maven { + url "https://snapshots.elastic.co/maven" + } + } +} diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java new file mode 100644 index 0000000000000..c5091c0d2703b --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/CoreFullClusterRestartIT.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.restart; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.upgrades.FullClusterRestartIT; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + +public class CoreFullClusterRestartIT extends FullClusterRestartIT { + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } +} diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java new file mode 100644 index 0000000000000..48a8ba7e2281a --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -0,0 +1,496 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.restart; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.Version; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.StreamsUtils; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; +import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.elasticsearch.xpack.security.support.IndexLifecycleManager; +import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; +import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; +import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; + +public class FullClusterRestartIT extends ESRestTestCase { + private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); + private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); + + @Before + public void waitForMlTemplates() throws Exception { + XPackRestTestHelper.waitForMlTemplates(client()); + } + + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @Override + protected boolean preserveSnapshotsUponCompletion() { + return true; + } + + @Override + protected boolean preserveReposUponCompletion() { + return true; + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + // we increase the timeout here to 90 seconds to handle long waits for a green + // cluster health. the waits for green need to be longer than a minute to + // account for delayed shards + .put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s") + .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s") + .build(); + } + + /** + * Tests that a single document survives. Super basic smoke test. + */ + public void testSingleDoc() throws IOException { + String docLocation = "/testsingledoc/doc/1"; + String doc = "{\"test\": \"test\"}"; + + if (runningAgainstOldCluster) { + client().performRequest("PUT", docLocation, singletonMap("refresh", "true"), + new StringEntity(doc, ContentType.APPLICATION_JSON)); + } + + assertThat(toStr(client().performRequest("GET", docLocation)), containsString(doc)); + } + + @SuppressWarnings("unchecked") + public void testSecurityNativeRealm() throws Exception { + if (runningAgainstOldCluster) { + createUser("preupgrade_user"); + createRole("preupgrade_role"); + } else { + waitForYellow(".security"); + Response settingsResponse = client().performRequest("GET", "/.security/_settings/index.format"); + Map settingsResponseMap = toMap(settingsResponse); + logger.info("settings response map {}", settingsResponseMap); + final boolean needsUpgrade; + final String concreteSecurityIndex; + if (settingsResponseMap.isEmpty()) { + needsUpgrade = true; + concreteSecurityIndex = ".security"; + } else { + concreteSecurityIndex = settingsResponseMap.keySet().iterator().next(); + Map indexSettingsMap = + (Map) settingsResponseMap.get(concreteSecurityIndex); + Map settingsMap = (Map) indexSettingsMap.get("settings"); + logger.info("settings map {}", settingsMap); + if (settingsMap.containsKey("index")) { + int format = Integer.parseInt(String.valueOf(((Map)settingsMap.get("index")).get("format"))); + needsUpgrade = format == IndexLifecycleManager.INTERNAL_INDEX_FORMAT ? false : true; + } else { + needsUpgrade = true; + } + } + + if (needsUpgrade) { + logger.info("upgrading security index {}", concreteSecurityIndex); + // without upgrade, an error should be thrown + try { + createUser("postupgrade_user"); + fail("should not be able to add a user when upgrade hasn't taken place"); + } catch (ResponseException e) { + assertThat(e.getMessage(), containsString("Security index is not on the current version - " + + "the native realm will not be operational until the upgrade API is run on the security index")); + } + // run upgrade API + Response upgradeResponse = client().performRequest("POST", "_xpack/migration/upgrade/" + concreteSecurityIndex); + logger.info("upgrade response:\n{}", toStr(upgradeResponse)); + } + + // create additional user and role + createUser("postupgrade_user"); + createRole("postupgrade_role"); + } + + assertUserInfo("preupgrade_user"); + assertRoleInfo("preupgrade_role"); + if (!runningAgainstOldCluster) { + assertUserInfo("postupgrade_user"); + assertRoleInfo("postupgrade_role"); + } + } + + public void testWatcher() throws Exception { + if (runningAgainstOldCluster) { + logger.info("Adding a watch on old cluster {}", oldClusterVersion); + client().performRequest("PUT", "_xpack/watcher/watch/bwc_watch", emptyMap(), + new StringEntity(loadWatch("simple-watch.json"), ContentType.APPLICATION_JSON)); + + logger.info("Adding a watch with \"fun\" throttle periods on old cluster"); + client().performRequest("PUT", "_xpack/watcher/watch/bwc_throttle_period", emptyMap(), + new StringEntity(loadWatch("throttle-period-watch.json"), ContentType.APPLICATION_JSON)); + + logger.info("Adding a watch with \"fun\" read timeout on old cluster"); + client().performRequest("PUT", "_xpack/watcher/watch/bwc_funny_timeout", emptyMap(), + new StringEntity(loadWatch("funny-timeout-watch.json"), ContentType.APPLICATION_JSON)); + + logger.info("Waiting for watch results index to fill up..."); + waitForYellow(".watches,bwc_watch_index,.watcher-history*"); + waitForHits("bwc_watch_index", 2); + waitForHits(".watcher-history*", 2); + logger.info("Done creating watcher-related indices"); + } else { + logger.info("testing against {}", oldClusterVersion); + waitForYellow(".watches,bwc_watch_index,.watcher-history*"); + + logger.info("checking if the upgrade procedure on the new cluster is required"); + Map response = toMap(client().performRequest("GET", "/_xpack/migration/assistance")); + logger.info(response); + + @SuppressWarnings("unchecked") Map indices = (Map) response.get("indices"); + if (indices.containsKey(".watches")) { + logger.info("upgrade procedure is required for watcher"); + assertThat(indices.entrySet().size(), greaterThanOrEqualTo(1)); + assertThat(indices.get(".watches"), notNullValue()); + @SuppressWarnings("unchecked") Map index = (Map) indices.get(".watches"); + assertThat(index.get("action_required"), equalTo("upgrade")); + + logger.info("starting upgrade procedure on the new cluster"); + + Map params = Collections.singletonMap("error_trace", "true"); + Map upgradeResponse = toMap(client().performRequest("POST", "_xpack/migration/upgrade/.watches", params)); + assertThat(upgradeResponse.get("timed_out"), equalTo(Boolean.FALSE)); + // we posted 3 watches, but monitoring can post a few more + assertThat((int) upgradeResponse.get("total"), greaterThanOrEqualTo(3)); + + logger.info("checking that upgrade procedure on the new cluster is no longer required"); + Map responseAfter = toMap(client().performRequest("GET", "/_xpack/migration/assistance")); + @SuppressWarnings("unchecked") Map indicesAfter = (Map) responseAfter.get("indices"); + assertNull(indicesAfter.get(".watches")); + } else { + logger.info("upgrade procedure is not required for watcher"); + } + + // Wait for watcher to actually start.... + Map startWatchResponse = toMap(client().performRequest("POST", "_xpack/watcher/_start")); + assertThat(startWatchResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + assertBusy(() -> { + Map statsWatchResponse = toMap(client().performRequest("GET", "_xpack/watcher/stats")); + @SuppressWarnings("unchecked") + List states = ((List) statsWatchResponse.get("stats")) + .stream().map(o -> ((Map) o).get("watcher_state")).collect(Collectors.toList()); + assertThat(states, everyItem(is("started"))); + }); + + try { + assertOldTemplatesAreDeleted(); + assertWatchIndexContentsWork(); + assertBasicWatchInteractions(); + } finally { + /* Shut down watcher after every test because watcher can be a bit finicky about shutting down when the node shuts + * down. This makes super sure it shuts down *and* causes the test to fail in a sensible spot if it doesn't shut down. + */ + Map stopWatchResponse = toMap(client().performRequest("POST", "_xpack/watcher/_stop")); + assertThat(stopWatchResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + assertBusy(() -> { + Map statsStoppedWatchResponse = toMap(client().performRequest("GET", "_xpack/watcher/stats")); + @SuppressWarnings("unchecked") + List states = ((List) statsStoppedWatchResponse.get("stats")) + .stream().map(o -> ((Map) o).get("watcher_state")).collect(Collectors.toList()); + assertThat(states, everyItem(is("stopped"))); + }); + } + } + } + + public void testSqlFailsOnIndexWithTwoTypes() throws IOException { + // TODO this isn't going to trigger until we backport to 6.1 + assumeTrue("It is only possible to build an index that sql doesn't like before 6.0.0", + oldClusterVersion.before(Version.V_6_0_0_alpha1)); + if (runningAgainstOldCluster) { + client().performRequest("POST", "/testsqlfailsonindexwithtwotypes/type1", emptyMap(), + new StringEntity("{}", ContentType.APPLICATION_JSON)); + client().performRequest("POST", "/testsqlfailsonindexwithtwotypes/type2", emptyMap(), + new StringEntity("{}", ContentType.APPLICATION_JSON)); + return; + } + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest("POST", "/_xpack/sql", emptyMap(), + new StringEntity("{\"query\":\"SELECT * FROM testsqlfailsonindexwithtwotypes\"}", ContentType.APPLICATION_JSON))); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString( + "[testsqlfailsonindexwithtwotypes] contains more than one type [type1, type2] so it is incompatible with sql")); + } + + private String loadWatch(String watch) throws IOException { + return StreamsUtils.copyToStringFromClasspath("/org/elasticsearch/xpack/restart/" + watch); + } + + @SuppressWarnings("unchecked") + private void assertOldTemplatesAreDeleted() throws IOException { + Map templates = toMap(client().performRequest("GET", "/_template")); + assertThat(templates.keySet(), not(hasItems(is("watches"), startsWith("watch-history"), is("triggered_watches")))); + } + + @SuppressWarnings("unchecked") + private void assertWatchIndexContentsWork() throws Exception { + // Fetch a basic watch + Map bwcWatch = toMap(client().performRequest("GET", "_xpack/watcher/watch/bwc_watch")); + + logger.error("-----> {}", bwcWatch); + + assertThat(bwcWatch.get("found"), equalTo(true)); + Map source = (Map) bwcWatch.get("watch"); + assertEquals(1000, source.get("throttle_period_in_millis")); + int timeout = (int) timeValueSeconds(100).millis(); + assertThat(ObjectPath.eval("input.search.timeout_in_millis", source), equalTo(timeout)); + assertThat(ObjectPath.eval("actions.index_payload.transform.search.timeout_in_millis", source), equalTo(timeout)); + assertThat(ObjectPath.eval("actions.index_payload.index.index", source), equalTo("bwc_watch_index")); + assertThat(ObjectPath.eval("actions.index_payload.index.doc_type", source), equalTo("bwc_watch_type")); + assertThat(ObjectPath.eval("actions.index_payload.index.timeout_in_millis", source), equalTo(timeout)); + + // Fetch a watch with "fun" throttle periods + bwcWatch = toMap(client().performRequest("GET", "_xpack/watcher/watch/bwc_throttle_period")); + assertThat(bwcWatch.get("found"), equalTo(true)); + source = (Map) bwcWatch.get("watch"); + assertEquals(timeout, source.get("throttle_period_in_millis")); + assertThat(ObjectPath.eval("actions.index_payload.throttle_period_in_millis", source), equalTo(timeout)); + + /* + * Fetch a watch with a funny timeout to verify loading fractional time + * values. + */ + bwcWatch = toMap(client().performRequest("GET", "_xpack/watcher/watch/bwc_funny_timeout")); + assertThat(bwcWatch.get("found"), equalTo(true)); + source = (Map) bwcWatch.get("watch"); + + + Map attachments = ObjectPath.eval("actions.work.email.attachments", source); + Map attachment = (Map) attachments.get("test_report.pdf"); + Map request = ObjectPath.eval("http.request", attachment); + assertEquals(timeout, request.get("read_timeout_millis")); + assertEquals("https", request.get("scheme")); + assertEquals("example.com", request.get("host")); + assertEquals("{{ctx.metadata.report_url}}", request.get("path")); + assertEquals(8443, request.get("port")); + Map basic = ObjectPath.eval("auth.basic", request); + assertThat(basic, hasEntry("username", "Aladdin")); + // password doesn't come back because it is hidden + assertThat(basic, hasEntry(is("password"), anyOf(startsWith("::es_encrypted::"), is("::es_redacted::")))); + + Map history = toMap(client().performRequest("GET", ".watcher-history*/_search")); + Map hits = (Map) history.get("hits"); + assertThat((int) (hits.get("total")), greaterThanOrEqualTo(2)); + } + + private void assertBasicWatchInteractions() throws Exception { + + String watch = new WatchSourceBuilder() + .condition(InternalAlwaysCondition.INSTANCE) + .trigger(ScheduleTrigger.builder(new IntervalSchedule(IntervalSchedule.Interval.seconds(1)))) + .addAction("awesome", LoggingAction.builder(new TextTemplate("test"))).buildAsBytes(XContentType.JSON).utf8ToString(); + Map put = toMap(client().performRequest("PUT", "_xpack/watcher/watch/new_watch", emptyMap(), + new StringEntity(watch, ContentType.APPLICATION_JSON))); + + logger.info(put); + + assertThat(put.get("created"), equalTo(true)); + assertThat(put.get("_version"), equalTo(1)); + + put = toMap(client().performRequest("PUT", "_xpack/watcher/watch/new_watch", emptyMap(), + new StringEntity(watch, ContentType.APPLICATION_JSON))); + assertThat(put.get("created"), equalTo(false)); + assertThat(put.get("_version"), equalTo(2)); + + Map get = toMap(client().performRequest("GET", "_xpack/watcher/watch/new_watch")); + assertThat(get.get("found"), equalTo(true)); + @SuppressWarnings("unchecked") Map source = (Map) get.get("watch"); + Map logging = ObjectPath.eval("actions.awesome.logging", source); + assertEquals("info", logging.get("level")); + assertEquals("test", logging.get("text")); + } + + private void waitForYellow(String indexName) throws IOException { + Map params = new HashMap<>(); + params.put("wait_for_status", "yellow"); + params.put("timeout", "30s"); + params.put("wait_for_no_relocating_shards", "true"); + if (oldClusterVersion.onOrAfter(Version.V_6_2_0)) { + params.put("wait_for_no_initializing_shards", "true"); + } + Map response = toMap(client().performRequest("GET", "/_cluster/health/" + indexName, params)); + assertThat(response.get("timed_out"), equalTo(Boolean.FALSE)); + } + + @SuppressWarnings("unchecked") + private void waitForHits(String indexName, int expectedHits) throws Exception { + Map params = singletonMap("size", "0"); + assertBusy(() -> { + try { + Map response = toMap(client().performRequest("GET", "/" + indexName + "/_search", params)); + Map hits = (Map) response.get("hits"); + int total = (int) hits.get("total"); + assertThat(total, greaterThanOrEqualTo(expectedHits)); + } catch (IOException ioe) { + if (ioe instanceof ResponseException) { + Response response = ((ResponseException) ioe).getResponse(); + if (RestStatus.fromCode(response.getStatusLine().getStatusCode()) == RestStatus.SERVICE_UNAVAILABLE) { + fail("shards are not yet active"); + } + } + throw ioe; + } + }, 30, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + private void waitForMonitoringTemplates() throws Exception { + assertBusy(() -> { + final Map templates = toMap(client().performRequest("GET", "/_template/.monitoring-*")); + + // in earlier versions, we published legacy templates in addition to the current ones to support transitioning + assertThat(templates.size(), greaterThanOrEqualTo(MonitoringTemplateUtils.TEMPLATE_IDS.length)); + + // every template should be updated to whatever the current version is + for (final String templateId : MonitoringTemplateUtils.TEMPLATE_IDS) { + final String templateName = MonitoringTemplateUtils.templateName(templateId); + final Map template = (Map) templates.get(templateName); + + assertThat(template.get("version"), is(MonitoringTemplateUtils.LAST_UPDATED_VERSION)); + } + }, 30, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + private void waitForClusterStats(final String expectedVersion) throws Exception { + assertBusy(() -> { + final Map params = new HashMap<>(3); + params.put("q", "type:cluster_stats"); + params.put("size", "1"); + params.put("sort", "timestamp:desc"); + + final Map response = toMap(client().performRequest("GET", "/.monitoring-es-*/_search", params)); + final Map hits = (Map) response.get("hits"); + + assertThat("No cluster_stats documents found.", (int)hits.get("total"), greaterThanOrEqualTo(1)); + + final Map hit = (Map) ((List) hits.get("hits")).get(0); + final Map source = (Map) hit.get("_source"); + assertThat(source.get("version"), is(expectedVersion)); + }, 30, TimeUnit.SECONDS); + } + + static Map toMap(Response response) throws IOException { + return toMap(EntityUtils.toString(response.getEntity())); + } + + static Map toMap(String response) throws IOException { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false); + } + + static String toStr(Response response) throws IOException { + return EntityUtils.toString(response.getEntity()); + } + + private void createUser(final String id) throws Exception { + final String userJson = + "{\n" + + " \"password\" : \"j@rV1s\",\n" + + " \"roles\" : [ \"admin\", \"other_role1\" ],\n" + + " \"full_name\" : \"" + randomAlphaOfLength(5) + "\",\n" + + " \"email\" : \"" + id + "@example.com\",\n" + + " \"enabled\": true\n" + + "}"; + + client().performRequest("PUT", "/_xpack/security/user/" + id, emptyMap(), + new StringEntity(userJson, ContentType.APPLICATION_JSON)); + } + + private void createRole(final String id) throws Exception { + final String roleJson = + "{\n" + + " \"run_as\": [ \"abc\" ],\n" + + " \"cluster\": [ \"monitor\" ],\n" + + " \"indices\": [\n" + + " {\n" + + " \"names\": [ \"events-*\" ],\n" + + " \"privileges\": [ \"read\" ],\n" + + " \"field_security\" : {\n" + + " \"grant\" : [ \"category\", \"@timestamp\", \"message\" ]\n" + + " },\n" + + " \"query\": \"{\\\"match\\\": {\\\"category\\\": \\\"click\\\"}}\"\n" + + " }\n" + + " ]\n" + + "}"; + + client().performRequest("PUT", "/_xpack/security/role/" + id, emptyMap(), + new StringEntity(roleJson, ContentType.APPLICATION_JSON)); + } + + private void assertUserInfo(final String user) throws Exception { + Map response = toMap(client().performRequest("GET", "/_xpack/security/user/" + user)); + @SuppressWarnings("unchecked") Map userInfo = (Map) response.get(user); + assertEquals(user + "@example.com", userInfo.get("email")); + assertNotNull(userInfo.get("full_name")); + assertNotNull(userInfo.get("roles")); + } + + private void assertRoleInfo(final String role) throws Exception { + @SuppressWarnings("unchecked") Map response = (Map) + toMap(client().performRequest("GET", "/_xpack/security/role/" + role)).get(role); + assertNotNull(response.get("run_as")); + assertNotNull(response.get("cluster")); + assertNotNull(response.get("indices")); + } +} diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json b/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json new file mode 100644 index 0000000000000..b5f1902afc6d0 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/funny-timeout-watch.json @@ -0,0 +1,38 @@ +{ + "trigger" : { + "schedule": { + "interval": "100s" + } + }, + "condition": { + "never": {} + }, + "actions": { + "work": { + "email": { + "to": "email@domain.com", + "subject": "Test Kibana PDF report", + "attachments": { + "test_report.pdf": { + "http": { + "content_type": "application/pdf", + "request": { + "read_timeout": "100s", + "scheme": "https", + "host": "example.com", + "path":"{{ctx.metadata.report_url}}", + "port": 8443, + "auth": { + "basic": { + "username": "Aladdin", + "password": "open sesame" + } + } + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/simple-watch.json b/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/simple-watch.json new file mode 100644 index 0000000000000..ceeb128dac890 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/simple-watch.json @@ -0,0 +1,40 @@ +{ + "trigger" : { + "schedule": { + "interval": "1s" + } + }, + "input" : { + "search" : { + "timeout": "100s", + "request" : { + "indices" : [ ".watches" ], + "body" : { + "query" : { "match_all" : {}}, + "size": 1 + } + } + } + }, + "condition" : { + "always" : {} + }, + "throttle_period": "1s", + "actions" : { + "index_payload" : { + "transform" : { + "search" : { + "request" : { + "body" : { "size": 1, "query" : { "match_all" : {} }} + }, + "timeout": "100s" + } + }, + "index" : { + "index" : "bwc_watch_index", + "doc_type" : "bwc_watch_type", + "timeout": "100s" + } + } + } +} \ No newline at end of file diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json b/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json new file mode 100644 index 0000000000000..736cf79ad7384 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/test/resources/org/elasticsearch/xpack/restart/throttle-period-watch.json @@ -0,0 +1,27 @@ +{ + "trigger" : { + "schedule": { + "interval": "1s" + } + }, + "condition" : { + "never" : {} + }, + "throttle_period": "100s", + "actions" : { + "index_payload" : { + "throttle_period": "100s", + "transform" : { + "search" : { + "request" : { + "body" : { "size": 1, "query" : { "match_all" : {} }} + } + } + }, + "index" : { + "index" : "bwc_watch_index", + "doc_type" : "bwc_watch_type" + } + } + } +} \ No newline at end of file diff --git a/x-pack/qa/full-cluster-restart/src/test/resources/system_key b/x-pack/qa/full-cluster-restart/src/test/resources/system_key new file mode 100644 index 0000000000000..a72e0d6e77632 --- /dev/null +++ b/x-pack/qa/full-cluster-restart/src/test/resources/system_key @@ -0,0 +1 @@ +�{�����+�dTI;f����̭�l���|�}�j���D�vYW�V5��K�h�8��ΪP� z~��Ճa�),$j��.����^��w�ɴȐ38�v �}��|�^[ �F�����"ԑ�Ǘ�� \ No newline at end of file diff --git a/x-pack/qa/full-cluster-restart/with-system-key/build.gradle b/x-pack/qa/full-cluster-restart/with-system-key/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/qa/full-cluster-restart/without-system-key/build.gradle b/x-pack/qa/full-cluster-restart/without-system-key/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/qa/ml-basic-multi-node/build.gradle b/x-pack/qa/ml-basic-multi-node/build.gradle new file mode 100644 index 0000000000000..d396d38b22354 --- /dev/null +++ b/x-pack/qa/ml-basic-multi-node/build.gradle @@ -0,0 +1,16 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('ml'), configuration: 'runtime') +} + +integTestCluster { + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.ml.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + numNodes = 3 +} diff --git a/x-pack/qa/ml-basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java b/x-pack/qa/ml-basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java new file mode 100644 index 0000000000000..3b84994f5acca --- /dev/null +++ b/x-pack/qa/ml-basic-multi-node/src/test/java/org/elasticsearch/xpack/ml/integration/MlBasicMultiNodeIT.java @@ -0,0 +1,328 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.io.IOException; +import java.net.URLEncoder; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.common.xcontent.XContentType.JSON; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class MlBasicMultiNodeIT extends ESRestTestCase { + + @SuppressWarnings("unchecked") + public void testMachineLearningInstalled() throws Exception { + Response response = client().performRequest("get", "/_xpack"); + assertEquals(200, response.getStatusLine().getStatusCode()); + Map features = (Map) responseEntityToMap(response).get("features"); + Map ml = (Map) features.get("ml"); + assertNotNull(ml); + assertTrue((Boolean) ml.get("available")); + assertTrue((Boolean) ml.get("enabled")); + } + + public void testInvalidJob() throws Exception { + // The job name is invalid because it contains a space + String jobId = "invalid job"; + ResponseException e = expectThrows(ResponseException.class, () -> createFarequoteJob(jobId)); + assertTrue(e.getMessage(), e.getMessage().contains("can contain lowercase alphanumeric (a-z and 0-9), hyphens or underscores")); + // If validation of the invalid job is not done until after transportation to the master node then the + // root cause gets reported as a remote_transport_exception. The code in PubJobAction is supposed to + // validate before transportation to avoid this. This test must be done in a multi-node cluster to have + // a chance of catching a problem, hence it is here rather than in the single node integration tests. + assertFalse(e.getMessage(), e.getMessage().contains("remote_transport_exception")); + } + + public void testMiniFarequote() throws Exception { + String jobId = "mini-farequote-job"; + createFarequoteJob(jobId); + + Response response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); + + String postData = + "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + + "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}"; + response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data", + Collections.emptyMap(), + new StringEntity(postData, randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); + assertEquals(202, response.getStatusLine().getStatusCode()); + Map responseBody = responseEntityToMap(response); + assertEquals(2, responseBody.get("processed_record_count")); + assertEquals(4, responseBody.get("processed_field_count")); + assertEquals(177, responseBody.get("input_bytes")); + assertEquals(6, responseBody.get("input_field_count")); + assertEquals(0, responseBody.get("invalid_date_count")); + assertEquals(0, responseBody.get("missing_field_count")); + assertEquals(0, responseBody.get("out_of_order_timestamp_count")); + assertEquals(0, responseBody.get("bucket_count")); + assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); + assertEquals(1403481700000L, responseBody.get("latest_record_timestamp")); + + response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush"); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertFlushResponse(response, true, 1403481600000L); + + response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", + Collections.singletonMap("timeout", "20s")); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); + + response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + assertEquals(200, response.getStatusLine().getStatusCode()); + @SuppressWarnings("unchecked") + Map dataCountsDoc = (Map) + ((Map)((List) responseEntityToMap(response).get("jobs")).get(0)).get("data_counts"); + assertEquals(2, dataCountsDoc.get("processed_record_count")); + assertEquals(4, dataCountsDoc.get("processed_field_count")); + assertEquals(177, dataCountsDoc.get("input_bytes")); + assertEquals(6, dataCountsDoc.get("input_field_count")); + assertEquals(0, dataCountsDoc.get("invalid_date_count")); + assertEquals(0, dataCountsDoc.get("missing_field_count")); + assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); + assertEquals(0, dataCountsDoc.get("bucket_count")); + assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); + assertEquals(1403481700000L, dataCountsDoc.get("latest_record_timestamp")); + + response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + assertEquals(200, response.getStatusLine().getStatusCode()); + } + + public void testMiniFarequoteWithDatafeeder() throws Exception { + String mappings = "{" + + " \"mappings\": {" + + " \"response\": {" + + " \"properties\": {" + + " \"time\": { \"type\":\"date\"}," + + " \"airline\": { \"type\":\"keyword\"}," + + " \"responsetime\": { \"type\":\"float\"}" + + " }" + + " }" + + " }" + + "}"; + client().performRequest("put", "airline-data", Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); + client().performRequest("put", "airline-data/response/1", Collections.emptyMap(), + new StringEntity("{\"time\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", + ContentType.APPLICATION_JSON)); + client().performRequest("put", "airline-data/response/2", Collections.emptyMap(), + new StringEntity("{\"time\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", + ContentType.APPLICATION_JSON)); + + // Ensure all data is searchable + client().performRequest("post", "_refresh"); + + String jobId = "mini-farequote-with-data-feeder-job"; + createFarequoteJob(jobId); + String datafeedId = "bar"; + createDatafeed(datafeedId, jobId); + + Response response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); + + response = client().performRequest("post", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start", + Collections.singletonMap("start", "0")); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertEquals(Collections.singletonMap("started", true), responseEntityToMap(response)); + + assertBusy(() -> { + try { + Response statsResponse = + client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + assertEquals(200, statsResponse.getStatusLine().getStatusCode()); + @SuppressWarnings("unchecked") + Map dataCountsDoc = (Map) + ((Map)((List) responseEntityToMap(statsResponse).get("jobs")).get(0)).get("data_counts"); + assertEquals(2, dataCountsDoc.get("input_record_count")); + assertEquals(2, dataCountsDoc.get("processed_record_count")); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + + response = client().performRequest("post", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop"); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertEquals(Collections.singletonMap("stopped", true), responseEntityToMap(response)); + + response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", + Collections.singletonMap("timeout", "20s")); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); + + response = client().performRequest("delete", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); + assertEquals(200, response.getStatusLine().getStatusCode()); + + response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + assertEquals(200, response.getStatusLine().getStatusCode()); + } + + public void testMiniFarequoteReopen() throws Exception { + String jobId = "mini-farequote-reopen"; + createFarequoteJob(jobId); + + Response response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); + + String postData = + "{\"airline\":\"AAL\",\"responsetime\":\"132.2046\",\"sourcetype\":\"farequote\",\"time\":\"1403481600\"}\n" + + "{\"airline\":\"JZA\",\"responsetime\":\"990.4628\",\"sourcetype\":\"farequote\",\"time\":\"1403481700\"}\n" + + "{\"airline\":\"JBU\",\"responsetime\":\"877.5927\",\"sourcetype\":\"farequote\",\"time\":\"1403481800\"}\n" + + "{\"airline\":\"KLM\",\"responsetime\":\"1355.4812\",\"sourcetype\":\"farequote\",\"time\":\"1403481900\"}\n" + + "{\"airline\":\"NKS\",\"responsetime\":\"9991.3981\",\"sourcetype\":\"farequote\",\"time\":\"1403482000\"}"; + response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data", + Collections.emptyMap(), + new StringEntity(postData, randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); + assertEquals(202, response.getStatusLine().getStatusCode()); + Map responseBody = responseEntityToMap(response); + assertEquals(5, responseBody.get("processed_record_count")); + assertEquals(10, responseBody.get("processed_field_count")); + assertEquals(446, responseBody.get("input_bytes")); + assertEquals(15, responseBody.get("input_field_count")); + assertEquals(0, responseBody.get("invalid_date_count")); + assertEquals(0, responseBody.get("missing_field_count")); + assertEquals(0, responseBody.get("out_of_order_timestamp_count")); + assertEquals(0, responseBody.get("bucket_count")); + assertEquals(1403481600000L, responseBody.get("earliest_record_timestamp")); + assertEquals(1403482000000L, responseBody.get("latest_record_timestamp")); + + response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush"); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertFlushResponse(response, true, 1403481600000L); + + response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", + Collections.singletonMap("timeout", "20s")); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); + + response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + assertEquals(200, response.getStatusLine().getStatusCode()); + + response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open", + Collections.singletonMap("timeout", "20s")); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertEquals(Collections.singletonMap("opened", true), responseEntityToMap(response)); + + // feed some more data points + postData = + "{\"airline\":\"AAL\",\"responsetime\":\"136.2361\",\"sourcetype\":\"farequote\",\"time\":\"1407081600\"}\n" + + "{\"airline\":\"VRD\",\"responsetime\":\"282.9847\",\"sourcetype\":\"farequote\",\"time\":\"1407081700\"}\n" + + "{\"airline\":\"JAL\",\"responsetime\":\"493.0338\",\"sourcetype\":\"farequote\",\"time\":\"1407081800\"}\n" + + "{\"airline\":\"UAL\",\"responsetime\":\"8.4275\",\"sourcetype\":\"farequote\",\"time\":\"1407081900\"}\n" + + "{\"airline\":\"FFT\",\"responsetime\":\"221.8693\",\"sourcetype\":\"farequote\",\"time\":\"1407082000\"}"; + response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data", + Collections.emptyMap(), + new StringEntity(postData, randomFrom(ContentType.APPLICATION_JSON, ContentType.create("application/x-ndjson")))); + assertEquals(202, response.getStatusLine().getStatusCode()); + responseBody = responseEntityToMap(response); + assertEquals(5, responseBody.get("processed_record_count")); + assertEquals(10, responseBody.get("processed_field_count")); + assertEquals(442, responseBody.get("input_bytes")); + assertEquals(15, responseBody.get("input_field_count")); + assertEquals(0, responseBody.get("invalid_date_count")); + assertEquals(0, responseBody.get("missing_field_count")); + assertEquals(0, responseBody.get("out_of_order_timestamp_count")); + assertEquals(0, responseBody.get("bucket_count")); + + // unintuitive: should return the earliest record timestamp of this feed??? + assertEquals(null, responseBody.get("earliest_record_timestamp")); + assertEquals(1407082000000L, responseBody.get("latest_record_timestamp")); + + response = client().performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close", + Collections.singletonMap("timeout", "20s")); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertEquals(Collections.singletonMap("closed", true), responseEntityToMap(response)); + + // counts should be summed up + response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + assertEquals(200, response.getStatusLine().getStatusCode()); + + @SuppressWarnings("unchecked") + Map dataCountsDoc = (Map) + ((Map)((List) responseEntityToMap(response).get("jobs")).get(0)).get("data_counts"); + assertEquals(10, dataCountsDoc.get("processed_record_count")); + assertEquals(20, dataCountsDoc.get("processed_field_count")); + assertEquals(888, dataCountsDoc.get("input_bytes")); + assertEquals(30, dataCountsDoc.get("input_field_count")); + assertEquals(0, dataCountsDoc.get("invalid_date_count")); + assertEquals(0, dataCountsDoc.get("missing_field_count")); + assertEquals(0, dataCountsDoc.get("out_of_order_timestamp_count")); + assertEquals(0, dataCountsDoc.get("bucket_count")); + assertEquals(1403481600000L, dataCountsDoc.get("earliest_record_timestamp")); + assertEquals(1407082000000L, dataCountsDoc.get("latest_record_timestamp")); + + response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + assertEquals(200, response.getStatusLine().getStatusCode()); + } + + private Response createDatafeed(String datafeedId, String jobId) throws Exception { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.startObject(); + xContentBuilder.field("job_id", jobId); + xContentBuilder.array("indexes", "airline-data"); + xContentBuilder.array("types", "response"); + xContentBuilder.field("_source", true); + xContentBuilder.endObject(); + return client().performRequest("put", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId, + Collections.emptyMap(), new StringEntity(Strings.toString(xContentBuilder), ContentType.APPLICATION_JSON)); + } + + private Response createFarequoteJob(String jobId) throws Exception { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.startObject(); + xContentBuilder.field("job_id", jobId); + xContentBuilder.field("description", "Analysis of response time by airline"); + + xContentBuilder.startObject("analysis_config"); + xContentBuilder.field("bucket_span", "3600s"); + xContentBuilder.startArray("detectors"); + xContentBuilder.startObject(); + xContentBuilder.field("function", "metric"); + xContentBuilder.field("field_name", "responsetime"); + xContentBuilder.field("by_field_name", "airline"); + xContentBuilder.endObject(); + xContentBuilder.endArray(); + xContentBuilder.endObject(); + + xContentBuilder.startObject("data_description"); + xContentBuilder.field("format", "xcontent"); + xContentBuilder.field("time_field", "time"); + xContentBuilder.field("time_format", "epoch"); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + + return client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + URLEncoder.encode(jobId, "UTF-8"), + Collections.emptyMap(), new StringEntity(Strings.toString(xContentBuilder), ContentType.APPLICATION_JSON)); + } + + private static Map responseEntityToMap(Response response) throws IOException { + return XContentHelper.convertToMap(JSON.xContent(), response.getEntity().getContent(), false); + } + + private static void assertFlushResponse(Response response, boolean expectedFlushed, long expectedLastFinalizedBucketEnd) + throws IOException { + Map asMap = responseEntityToMap(response); + assertThat(asMap.size(), equalTo(2)); + assertThat(asMap.get("flushed"), is(true)); + assertThat(asMap.get("last_finalized_bucket_end"), equalTo(expectedLastFinalizedBucketEnd)); + } +} diff --git a/x-pack/qa/ml-disabled/build.gradle b/x-pack/qa/ml-disabled/build.gradle new file mode 100644 index 0000000000000..22a7dfc74ac73 --- /dev/null +++ b/x-pack/qa/ml-disabled/build.gradle @@ -0,0 +1,13 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('ml'), configuration: 'runtime') +} + +integTestCluster { + setting 'xpack.security.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + numNodes = 1 +} diff --git a/x-pack/qa/ml-disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java b/x-pack/qa/ml-disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java new file mode 100644 index 0000000000000..e7a0a6028d4ec --- /dev/null +++ b/x-pack/qa/ml-disabled/src/test/java/org/elasticsearch/xpack/ml/integration/MlPluginDisabledIT.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; + +public class MlPluginDisabledIT extends ESRestTestCase { + + /** + * Check that when the ml plugin is disabled, you cannot create a job as the + * rest handler is not registered + */ + public void testActionsFail() throws Exception { + XContentBuilder xContentBuilder = jsonBuilder(); + xContentBuilder.startObject(); + xContentBuilder.field("actions-fail-job", "foo"); + xContentBuilder.field("description", "Analysis of response time by airline"); + + xContentBuilder.startObject("analysis_config"); + xContentBuilder.field("bucket_span", "3600s"); + xContentBuilder.startArray("detectors"); + xContentBuilder.startObject(); + xContentBuilder.field("function", "metric"); + xContentBuilder.field("field_name", "responsetime"); + xContentBuilder.field("by_field_name", "airline"); + xContentBuilder.endObject(); + xContentBuilder.endArray(); + xContentBuilder.endObject(); + + xContentBuilder.startObject("data_description"); + xContentBuilder.field("format", "xcontent"); + xContentBuilder.field("time_field", "time"); + xContentBuilder.field("time_format", "epoch"); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + + ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("put", + MachineLearning.BASE_PATH + "anomaly_detectors/foo", Collections.emptyMap(), + new StringEntity(Strings.toString(xContentBuilder), ContentType.APPLICATION_JSON))); + assertThat(exception.getMessage(), containsString("No handler found for uri [/_xpack/ml/anomaly_detectors/foo] and method [PUT]")); + } +} diff --git a/x-pack/qa/ml-native-tests/build.gradle b/x-pack/qa/ml-native-tests/build.gradle new file mode 100644 index 0000000000000..94b7be3a44d4d --- /dev/null +++ b/x-pack/qa/ml-native-tests/build.gradle @@ -0,0 +1,83 @@ +import org.elasticsearch.gradle.LoggedExec + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('ml'), configuration: 'runtime') + testCompile project(path: xpackModule('ml'), configuration: 'testArtifacts') +} + +integTestRunner { + /* + * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each + * other if we allow them to set the number of available processors as it's set-once in Netty. + */ + systemProperty 'es.set.netty.runtime.available.processors', 'false' +} + +// location of generated keystores and certificates +File keystoreDir = new File(project.buildDir, 'keystore') + +// Generate the node's keystore +File nodeKeystore = new File(keystoreDir, 'test-node.jks') +task createNodeKeyStore(type: LoggedExec) { + doFirst { + if (nodeKeystore.parentFile.exists() == false) { + nodeKeystore.parentFile.mkdirs() + } + if (nodeKeystore.exists()) { + delete nodeKeystore + } + } + executable = new File(project.runtimeJavaHome, 'bin/keytool') + standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) + args '-genkey', + '-alias', 'test-node', + '-keystore', nodeKeystore, + '-keyalg', 'RSA', + '-keysize', '2048', + '-validity', '712', + '-dname', 'CN=smoke-test-plugins-ssl', + '-keypass', 'keypass', + '-storepass', 'keypass' +} + +// Add keystores to test classpath: it expects it there +sourceSets.test.resources.srcDir(keystoreDir) +processTestResources.dependsOn(createNodeKeyStore) + +integTestCluster { + dependsOn createNodeKeyStore + setting 'xpack.security.enabled', 'true' + setting 'xpack.ml.enabled', 'true' + setting 'logger.org.elasticsearch.xpack.ml.datafeed', 'TRACE' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.security.transport.ssl.keystore.path', nodeKeystore.name + setting 'xpack.security.transport.ssl.verification_mode', 'certificate' + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + + keystoreSetting 'bootstrap.password', 'x-pack-test-password' + keystoreSetting 'xpack.security.transport.ssl.keystore.secure_password', 'keypass' + + setupCommand 'setupDummyUser', + 'bin/elasticsearch-users', 'useradd', 'x_pack_rest_user', '-p', 'x-pack-test-password', '-r', 'superuser' + + extraConfigFile nodeKeystore.name, nodeKeystore + + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'x_pack_rest_user', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java new file mode 100644 index 0000000000000..c46b1d1c8689b --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java @@ -0,0 +1,251 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.junit.After; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; + +/** + * A set of tests that ensure we comply to the model memory limit + */ +public class AutodetectMemoryLimitIT extends MlNativeAutodetectIntegTestCase { + + @After + public void cleanUpTest() throws Exception { + cleanUp(); + } + + public void testTooManyPartitions() throws Exception { + Detector.Builder detector = new Detector.Builder("count", null); + detector.setPartitionFieldName("user"); + + TimeValue bucketSpan = TimeValue.timeValueHours(1); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder("autodetect-memory-limit-test-too-many-partitions"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + // Set the memory limit to 30MB + AnalysisLimits limits = new AnalysisLimits(30L, null); + job.setAnalysisLimits(limits); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + long now = Instant.now().getEpochSecond(); + long timestamp = now - 8 * bucketSpan.seconds(); + List data = new ArrayList<>(); + while (timestamp < now) { + for (int i = 0; i < 10000; i++) { + // It's important that the values used here are either always represented in less than 16 UTF-8 bytes or + // always represented in more than 22 UTF-8 bytes. Otherwise platform differences in when the small string + // optimisation is used will make the results of this test very different for the different platforms. + data.add(createJsonRecord(createRecord(timestamp, String.valueOf(i), ""))); + } + timestamp += bucketSpan.seconds(); + } + + postData(job.getId(), data.stream().collect(Collectors.joining())); + closeJob(job.getId()); + + // Assert we haven't violated the limit too much + // and a balance of partitions/by fields were created + GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); + ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); + assertThat(modelSizeStats.getModelBytes(), lessThan(35000000L)); + assertThat(modelSizeStats.getModelBytes(), greaterThan(30000000L)); + + // it is important to check that while we rejected partitions, we still managed + // to create some by fields; it shows we utilize memory in a meaningful way + // rather than creating empty partitions + assertThat(modelSizeStats.getTotalPartitionFieldCount(), lessThan(900L)); + assertThat(modelSizeStats.getTotalPartitionFieldCount(), greaterThan(650L)); + assertThat(modelSizeStats.getTotalByFieldCount(), lessThan(900L)); + assertThat(modelSizeStats.getTotalByFieldCount(), greaterThan(650L)); + assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); + } + + public void testTooManyByFields() throws Exception { + Detector.Builder detector = new Detector.Builder("count", null); + detector.setByFieldName("user"); + + TimeValue bucketSpan = TimeValue.timeValueHours(1); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder("autodetect-memory-limit-test-too-many-by-fields"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + // Set the memory limit to 30MB + AnalysisLimits limits = new AnalysisLimits(30L, null); + job.setAnalysisLimits(limits); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + long now = Instant.now().getEpochSecond(); + long timestamp = now - 8 * bucketSpan.seconds(); + List data = new ArrayList<>(); + while (timestamp < now) { + for (int i = 0; i < 10000; i++) { + // It's important that the values used here are either always represented in less than 16 UTF-8 bytes or + // always represented in more than 22 UTF-8 bytes. Otherwise platform differences in when the small string + // optimisation is used will make the results of this test very different for the different platforms. + data.add(createJsonRecord(createRecord(timestamp, String.valueOf(i), ""))); + } + timestamp += bucketSpan.seconds(); + } + + postData(job.getId(), data.stream().collect(Collectors.joining())); + closeJob(job.getId()); + + // Assert we haven't violated the limit too much + GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); + ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); + assertThat(modelSizeStats.getModelBytes(), lessThan(36000000L)); + assertThat(modelSizeStats.getModelBytes(), greaterThan(30000000L)); + assertThat(modelSizeStats.getTotalByFieldCount(), lessThan(1900L)); + assertThat(modelSizeStats.getTotalByFieldCount(), greaterThan(1600L)); + assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); + } + + public void testTooManyByAndOverFields() throws Exception { + Detector.Builder detector = new Detector.Builder("count", null); + detector.setByFieldName("department"); + detector.setOverFieldName("user"); + + TimeValue bucketSpan = TimeValue.timeValueHours(1); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder("autodetect-memory-limit-test-too-many-by-and-over-fields"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + // Set the memory limit to 30MB + AnalysisLimits limits = new AnalysisLimits(30L, null); + job.setAnalysisLimits(limits); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + long now = Instant.now().getEpochSecond(); + long timestamp = now - 8 * bucketSpan.seconds(); + while (timestamp < now) { + for (int department = 0; department < 10; department++) { + List data = new ArrayList<>(); + for (int user = 0; user < 10000; user++) { + // It's important that the values used here are either always represented in less than 16 UTF-8 bytes or + // always represented in more than 22 UTF-8 bytes. Otherwise platform differences in when the small string + // optimisation is used will make the results of this test very different for the different platforms. + data.add(createJsonRecord(createRecord( + timestamp, String.valueOf(department) + "_" + String.valueOf(user), String.valueOf(department)))); + } + postData(job.getId(), data.stream().collect(Collectors.joining())); + } + timestamp += bucketSpan.seconds(); + } + + closeJob(job.getId()); + + // Assert we haven't violated the limit too much + GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); + ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); + assertThat(modelSizeStats.getModelBytes(), lessThan(36000000L)); + assertThat(modelSizeStats.getModelBytes(), greaterThan(24000000L)); + assertThat(modelSizeStats.getTotalByFieldCount(), equalTo(7L)); + assertThat(modelSizeStats.getTotalOverFieldCount(), greaterThan(40000L)); + assertThat(modelSizeStats.getTotalOverFieldCount(), lessThan(50000L)); + assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); + } + + public void testManyDistinctOverFields() throws Exception { + Detector.Builder detector = new Detector.Builder("sum", "value"); + detector.setOverFieldName("user"); + + TimeValue bucketSpan = TimeValue.timeValueHours(1); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder("autodetect-memory-limit-test-too-many-distinct-over-fields"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + // Set the memory limit to 110MB + AnalysisLimits limits = new AnalysisLimits(110L, null); + job.setAnalysisLimits(limits); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + long now = Instant.now().getEpochSecond(); + long timestamp = now - 15 * bucketSpan.seconds(); + int user = 0; + while (timestamp < now) { + List data = new ArrayList<>(); + for (int i = 0; i < 10000; i++) { + // It's important that the values used here are either always represented in less than 16 UTF-8 bytes or + // always represented in more than 22 UTF-8 bytes. Otherwise platform differences in when the small string + // optimisation is used will make the results of this test very different for the different platforms. + Map record = new HashMap<>(); + record.put("time", timestamp); + record.put("user", user++); + record.put("value", 42.0); + data.add(createJsonRecord(record)); + } + postData(job.getId(), data.stream().collect(Collectors.joining())); + timestamp += bucketSpan.seconds(); + } + + closeJob(job.getId()); + + // Assert we haven't violated the limit too much + GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); + ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); + assertThat(modelSizeStats.getModelBytes(), lessThan(90000000L)); + assertThat(modelSizeStats.getModelBytes(), greaterThan(75000000L)); + assertThat(modelSizeStats.getTotalOverFieldCount(), greaterThan(140000L)); + assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.OK)); + } + + private static Map createRecord(long timestamp, String user, String department) { + Map record = new HashMap<>(); + record.put("time", timestamp); + record.put("user", user); + record.put("department", department); + return record; + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java new file mode 100644 index 0000000000000..80afdeff82ad8 --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/BasicRenormalizationIT.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; +import org.junit.After; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +/** + * This is a minimal test to ensure renormalization takes place + */ +public class BasicRenormalizationIT extends MlNativeAutodetectIntegTestCase { + + @After + public void tearDownData() throws Exception { + cleanUp(); + } + + public void testDefaultRenormalization() throws Exception { + String jobId = "basic-renormalization-it-test-default-renormalization-job"; + createAndRunJob(jobId, null); + + List records = getRecords(jobId); + assertThat(records.size(), equalTo(2)); + AnomalyRecord laterRecord = records.get(0); + assertThat(laterRecord.getActual().get(0), equalTo(100.0)); + AnomalyRecord earlierRecord = records.get(1); + assertThat(earlierRecord.getActual().get(0), equalTo(10.0)); + assertThat(laterRecord.getRecordScore(), greaterThan(earlierRecord.getRecordScore())); + + // This is the key assertion: if renormalization never happened then the record_score would + // be the same as the initial_record_score on the anomaly record that happened earlier + assertThat(earlierRecord.getInitialRecordScore(), greaterThan(earlierRecord.getRecordScore())); + + // Since this job ran for 50 buckets, it's a good place to assert + // that established model memory matches model memory in the job stats + assertBusy(() -> { + GetJobsStatsAction.Response.JobStats jobStats = getJobStats(jobId).get(0); + ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); + Job updatedJob = getJob(jobId).get(0); + assertThat(updatedJob.getEstablishedModelMemory(), equalTo(modelSizeStats.getModelBytes())); + }); + } + + public void testRenormalizationDisabled() throws Exception { + String jobId = "basic-renormalization-it-test-renormalization-disabled-job"; + createAndRunJob(jobId, 0L); + + List records = getRecords(jobId); + for (AnomalyRecord record : records) { + assertThat(record.getInitialRecordScore(), equalTo(record.getRecordScore())); + } + } + + private void createAndRunJob(String jobId, Long renormalizationWindow) throws Exception { + TimeValue bucketSpan = TimeValue.timeValueHours(1); + long startTime = 1491004800000L; + + Job.Builder job = buildAndRegisterJob(jobId, bucketSpan, renormalizationWindow); + openJob(job.getId()); + postData(job.getId(), generateData(startTime, bucketSpan, 50, + bucketIndex -> { + if (bucketIndex == 35) { + // First anomaly is 10 events + return 10; + } else if (bucketIndex == 45) { + // Second anomaly is 100, should get the highest score and should bring the first score down + return 100; + } else { + return 1; + } + }).stream().collect(Collectors.joining())); + closeJob(job.getId()); + } + + private Job.Builder buildAndRegisterJob(String jobId, TimeValue bucketSpan, Long renormalizationWindow) throws Exception { + Detector.Builder detector = new Detector.Builder("count", null); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Arrays.asList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + Job.Builder job = new Job.Builder(jobId); + job.setAnalysisConfig(analysisConfig); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + job.setDataDescription(dataDescription); + if (renormalizationWindow != null) { + job.setRenormalizationWindowDays(renormalizationWindow); + } + registerJob(job); + putJob(job); + return job; + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java new file mode 100644 index 0000000000000..33a611fcb57e2 --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CategorizationIT.java @@ -0,0 +1,227 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition; +import org.junit.After; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +/** + * A fast integration test for categorization + */ +public class CategorizationIT extends MlNativeAutodetectIntegTestCase { + + private static final String DATA_INDEX = "log-data"; + private static final String DATA_TYPE = "log"; + + private long nowMillis; + + @Before + public void setUpData() { + client().admin().indices().prepareCreate(DATA_INDEX) + .addMapping(DATA_TYPE, "time", "type=date,format=epoch_millis", + "msg", "type=text") + .get(); + + nowMillis = System.currentTimeMillis(); + + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + IndexRequest indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + indexRequest.source("time", nowMillis - TimeValue.timeValueHours(2).millis(), + "msg", "Node 1 started"); + bulkRequestBuilder.add(indexRequest); + indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + indexRequest.source("time", nowMillis - TimeValue.timeValueHours(2).millis() + 1, + "msg", "Failed to shutdown [error org.aaaa.bbbb.Cccc line 54 caused " + + "by foo exception]"); + bulkRequestBuilder.add(indexRequest); + indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + indexRequest.source("time", nowMillis - TimeValue.timeValueHours(1).millis(), + "msg", "Node 2 started"); + bulkRequestBuilder.add(indexRequest); + indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + indexRequest.source("time", nowMillis - TimeValue.timeValueHours(1).millis() + 1, + "msg", "Failed to shutdown [error but this time completely different]"); + bulkRequestBuilder.add(indexRequest); + indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + indexRequest.source("time", nowMillis, "msg", "Node 3 started"); + bulkRequestBuilder.add(indexRequest); + + BulkResponse bulkResponse = bulkRequestBuilder + .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL) + .get(); + assertThat(bulkResponse.hasFailures(), is(false)); + } + + @After + public void tearDownData() { + cleanUp(); + client().admin().indices().prepareDelete(DATA_INDEX).get(); + client().admin().indices().prepareRefresh("*").get(); + } + + public void testBasicCategorization() throws Exception { + Job.Builder job = newJobBuilder("categorization", Collections.emptyList()); + registerJob(job); + putJob(job); + openJob(job.getId()); + + String datafeedId = job.getId() + "-feed"; + DatafeedConfig.Builder datafeedConfig = new DatafeedConfig.Builder(datafeedId, job.getId()); + datafeedConfig.setIndices(Collections.singletonList(DATA_INDEX)); + datafeedConfig.setTypes(Collections.singletonList(DATA_TYPE)); + DatafeedConfig datafeed = datafeedConfig.build(); + registerDatafeed(datafeed); + putDatafeed(datafeed); + startDatafeed(datafeedId, 0, nowMillis); + waitUntilJobIsClosed(job.getId()); + + List categories = getCategories(job.getId()); + assertThat(categories.size(), equalTo(3)); + + CategoryDefinition category1 = categories.get(0); + assertThat(category1.getRegex(), equalTo(".*?Node.+?started.*")); + assertThat(category1.getExamples(), + equalTo(Arrays.asList("Node 1 started", "Node 2 started"))); + + CategoryDefinition category2 = categories.get(1); + assertThat(category2.getRegex(), equalTo(".*?Failed.+?to.+?shutdown.+?error.+?" + + "org\\.aaaa\\.bbbb\\.Cccc.+?line.+?caused.+?by.+?foo.+?exception.*")); + assertThat(category2.getExamples(), equalTo(Collections.singletonList( + "Failed to shutdown [error org.aaaa.bbbb.Cccc line 54 caused by foo exception]"))); + + CategoryDefinition category3 = categories.get(2); + assertThat(category3.getRegex(), equalTo(".*?Failed.+?to.+?shutdown.+?error.+?but.+?" + + "this.+?time.+?completely.+?different.*")); + assertThat(category3.getExamples(), equalTo(Collections.singletonList( + "Failed to shutdown [error but this time completely different]"))); + + openJob("categorization"); + startDatafeed(datafeedId, 0, nowMillis + 1); + waitUntilJobIsClosed(job.getId()); + + categories = getCategories(job.getId()); + assertThat(categories.size(), equalTo(3)); + assertThat(categories.get(0).getExamples(), + equalTo(Arrays.asList("Node 1 started", "Node 2 started", "Node 3 started"))); + } + + public void testCategorizationWithFilters() throws Exception { + Job.Builder job = newJobBuilder("categorization-with-filters", Collections.singletonList("\\[.*\\]")); + registerJob(job); + putJob(job); + openJob(job.getId()); + + String datafeedId = job.getId() + "-feed"; + DatafeedConfig.Builder datafeedConfig = new DatafeedConfig.Builder(datafeedId, job.getId()); + datafeedConfig.setIndices(Collections.singletonList(DATA_INDEX)); + datafeedConfig.setTypes(Collections.singletonList(DATA_TYPE)); + DatafeedConfig datafeed = datafeedConfig.build(); + registerDatafeed(datafeed); + putDatafeed(datafeed); + startDatafeed(datafeedId, 0, nowMillis); + waitUntilJobIsClosed(job.getId()); + + List categories = getCategories(job.getId()); + assertThat(categories.size(), equalTo(2)); + + CategoryDefinition category1 = categories.get(0); + assertThat(category1.getRegex(), equalTo(".*?Node.+?started.*")); + assertThat(category1.getExamples(), + equalTo(Arrays.asList("Node 1 started", "Node 2 started"))); + + CategoryDefinition category2 = categories.get(1); + assertThat(category2.getRegex(), equalTo(".*?Failed.+?to.+?shutdown.*")); + assertThat(category2.getExamples(), equalTo(Arrays.asList( + "Failed to shutdown [error but this time completely different]", + "Failed to shutdown [error org.aaaa.bbbb.Cccc line 54 caused by foo exception]"))); + } + + public void testCategorizationPerformance() { + // To compare Java/C++ tokenization performance: + // 1. Change false to true in this assumption + // 2. Run the test several times + // 3. Change MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA to false + // 4. Run the test several more times + // 5. Check the timings that get logged + // 6. Revert the changes to this assumption and MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA + assumeTrue("This is time consuming to run on every build - it should be run manually when comparing Java/C++ tokenization", + false); + + int testBatchSize = 1000; + int testNumBatches = 1000; + String[] possibleMessages = new String[] { + " Source LOTS on 33080:817 has shut down.", + " P2PS failed to connect to the hrm server. " + + "Reason: Failed to connect to hrm server - No ACK from SIPC", + " Did not receive an image data for IDN_SELECTFEED:7630.T on 493. " + + "Recalling item. ", + " " + + "RRCP STATUS MSG: RRCP_REBOOT: node 33191 has rebooted", + " Source PRISM_VOBr on 33069:757 has shut down.", + " Service PRISM_VOB has shut down." + }; + + String jobId = "categorization-performance"; + Job.Builder job = newJobBuilder(jobId, Collections.emptyList()); + registerJob(job); + putJob(job); + openJob(job.getId()); + + long startTime = System.currentTimeMillis(); + + for (int batchNum = 0; batchNum < testNumBatches; ++batchNum) { + StringBuilder json = new StringBuilder(testBatchSize * 100); + for (int docNum = 0; docNum < testBatchSize; ++docNum) { + json.append(String.format(Locale.ROOT, "{\"time\":1000000,\"msg\":\"%s\"}\n", + possibleMessages[docNum % possibleMessages.length])); + } + postData(jobId, json.toString()); + } + flushJob(jobId, false); + + long duration = System.currentTimeMillis() - startTime; + Loggers.getLogger(CategorizationIT.class).info("Performance test with tokenization in " + + (MachineLearning.CATEGORIZATION_TOKENIZATION_IN_JAVA ? "Java" : "C++") + " took " + duration + "ms"); + } + + private static Job.Builder newJobBuilder(String id, List categorizationFilters) { + Detector.Builder detector = new Detector.Builder(); + detector.setFunction("count"); + detector.setByFieldName("mlcategory"); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder( + Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(TimeValue.timeValueHours(1)); + analysisConfig.setCategorizationFieldName("msg"); + analysisConfig.setCategorizationFilters(categorizationFilters); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeField("time"); + Job.Builder jobBuilder = new Job.Builder(id); + jobBuilder.setAnalysisConfig(analysisConfig); + jobBuilder.setDataDescription(dataDescription); + return jobBuilder; + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java new file mode 100644 index 0000000000000..23bd9dc849561 --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsIT.java @@ -0,0 +1,316 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; +import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; +import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; +import org.elasticsearch.xpack.core.ml.action.KillProcessAction; +import org.elasticsearch.xpack.core.ml.action.PutJobAction; +import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; +import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobState; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createDatafeed; +import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createDatafeedBuilder; +import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createScheduledJob; +import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.getDataCounts; +import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.indexDocs; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase { + + @After + public void cleanup() throws Exception { + cleanUp(); + } + + public void testLookbackOnly() throws Exception { + client().admin().indices().prepareCreate("data-1") + .addMapping("type", "time", "type=date") + .get(); + long numDocs = randomIntBetween(32, 2048); + long now = System.currentTimeMillis(); + long oneWeekAgo = now - 604800000; + long twoWeeksAgo = oneWeekAgo - 604800000; + indexDocs(logger, "data-1", numDocs, twoWeeksAgo, oneWeekAgo); + + client().admin().indices().prepareCreate("data-2") + .addMapping("type", "time", "type=date") + .get(); + client().admin().cluster().prepareHealth("data-1", "data-2").setWaitForYellowStatus().get(); + long numDocs2 = randomIntBetween(32, 2048); + indexDocs(logger, "data-2", numDocs2, oneWeekAgo, now); + + Job.Builder job = createScheduledJob("lookback-job"); + registerJob(job); + PutJobAction.Response putJobResponse = putJob(job); + assertThat(putJobResponse.getResponse().getJobVersion(), equalTo(Version.CURRENT)); + openJob(job.getId()); + assertBusy(() -> assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED)); + + List t = new ArrayList<>(2); + t.add("data-1"); + t.add("data-2"); + DatafeedConfig datafeedConfig = createDatafeed(job.getId() + "-datafeed", job.getId(), t); + registerDatafeed(datafeedConfig); + putDatafeed(datafeedConfig); + + startDatafeed(datafeedConfig.getId(), 0L, now); + assertBusy(() -> { + DataCounts dataCounts = getDataCounts(job.getId()); + assertThat(dataCounts.getProcessedRecordCount(), equalTo(numDocs + numDocs2)); + assertThat(dataCounts.getOutOfOrderTimeStampCount(), equalTo(0L)); + + GetDatafeedsStatsAction.Request request = new GetDatafeedsStatsAction.Request(datafeedConfig.getId()); + GetDatafeedsStatsAction.Response response = client().execute(GetDatafeedsStatsAction.INSTANCE, request).actionGet(); + assertThat(response.getResponse().results().get(0).getDatafeedState(), equalTo(DatafeedState.STOPPED)); + }, 60, TimeUnit.SECONDS); + + waitUntilJobIsClosed(job.getId()); + + // Since this job ran for 168 buckets, it's a good place to assert + // that established model memory matches model memory in the job stats + assertBusy(() -> { + GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); + ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); + Job updatedJob = getJob(job.getId()).get(0); + assertThat(updatedJob.getEstablishedModelMemory(), equalTo(modelSizeStats.getModelBytes())); + }); + } + + public void testRealtime() throws Exception { + String jobId = "realtime-job"; + String datafeedId = jobId + "-datafeed"; + startRealtime(jobId); + + try { + StopDatafeedAction.Response stopJobResponse = stopDatafeed(datafeedId); + assertTrue(stopJobResponse.isStopped()); + } catch (Exception e) { + NodesHotThreadsResponse nodesHotThreadsResponse = client().admin().cluster().prepareNodesHotThreads().get(); + int i = 0; + for (NodeHotThreads nodeHotThreads : nodesHotThreadsResponse.getNodes()) { + logger.info(i++ + ":\n" +nodeHotThreads.getHotThreads()); + } + throw e; + } + assertBusy(() -> { + GetDatafeedsStatsAction.Request request = new GetDatafeedsStatsAction.Request(datafeedId); + GetDatafeedsStatsAction.Response response = client().execute(GetDatafeedsStatsAction.INSTANCE, request).actionGet(); + assertThat(response.getResponse().results().get(0).getDatafeedState(), equalTo(DatafeedState.STOPPED)); + }); + } + + public void testRealtime_multipleStopCalls() throws Exception { + String jobId = "realtime-job-multiple-stop"; + final String datafeedId = jobId + "-datafeed"; + startRealtime(jobId); + + ConcurrentMapLong exceptions = ConcurrentCollections.newConcurrentMapLong(); + + // It's practically impossible to assert that a stop request has waited + // for a concurrently executing request to finish before returning. + // But we can assert the data feed has stopped after the request returns. + Runnable stopDataFeed = () -> { + StopDatafeedAction.Response stopJobResponse = stopDatafeed(datafeedId); + if (stopJobResponse.isStopped() == false) { + exceptions.put(Thread.currentThread().getId(), new AssertionError("Job is not stopped")); + } + + GetDatafeedsStatsAction.Request request = new GetDatafeedsStatsAction.Request(datafeedId); + GetDatafeedsStatsAction.Response response = client().execute(GetDatafeedsStatsAction.INSTANCE, request).actionGet(); + if (response.getResponse().results().get(0).getDatafeedState() != DatafeedState.STOPPED) { + exceptions.put(Thread.currentThread().getId(), + new AssertionError("Expected STOPPED datafeed state got " + + response.getResponse().results().get(0).getDatafeedState())); + } + }; + + // The idea is to hit the situation where one request waits for + // the other to complete. This is difficult to schedule but + // hopefully it will happen in CI + int numThreads = 5; + Thread [] threads = new Thread[numThreads]; + for (int i=0; i exception = new AtomicReference<>(); + + // The UI now force deletes datafeeds, which means they can be deleted while running. + // The first step is to isolate the datafeed. But if it was already being stopped then + // the datafeed may not be running by the time the isolate action is executed. This + // test will sometimes (depending on thread scheduling) achieve this situation and ensure + // the code is robust to it. + Thread deleteDatafeedThread = new Thread(() -> { + try { + DeleteDatafeedAction.Request request = new DeleteDatafeedAction.Request(datafeedId); + request.setForce(true); + DeleteDatafeedAction.Response response = client().execute(DeleteDatafeedAction.INSTANCE, request).actionGet(); + if (response.isAcknowledged()) { + GetDatafeedsStatsAction.Request statsRequest = new GetDatafeedsStatsAction.Request(datafeedId); + expectThrows(ResourceNotFoundException.class, + () -> client().execute(GetDatafeedsStatsAction.INSTANCE, statsRequest).actionGet()); + } else { + exception.set(new AssertionError("Job is not deleted")); + } + } catch (AssertionError | Exception e) { + exception.set(e); + } + }); + deleteDatafeedThread.start(); + + try { + stopDatafeed(datafeedId); + } catch (ResourceNotFoundException e) { + // This is OK - it means the thread running the delete fully completed before the stop started to execute + } finally { + deleteDatafeedThread.join(); + } + + if (exception.get() != null) { + throw exception.get(); + } + } + + public void testRealtime_GivenProcessIsKilled() throws Exception { + String jobId = "realtime-job-given-process-is-killed"; + String datafeedId = jobId + "-datafeed"; + startRealtime(jobId); + + KillProcessAction.Request killRequest = new KillProcessAction.Request(jobId); + client().execute(KillProcessAction.INSTANCE, killRequest).actionGet(); + + assertBusy(() -> { + GetDatafeedsStatsAction.Request request = new GetDatafeedsStatsAction.Request(datafeedId); + GetDatafeedsStatsAction.Response response = client().execute(GetDatafeedsStatsAction.INSTANCE, request).actionGet(); + assertThat(response.getResponse().results().get(0).getDatafeedState(), equalTo(DatafeedState.STOPPED)); + }); + } + + /** + * Stopping a lookback closes the associated job _after_ the stop call returns. + * This test ensures that a kill request submitted during this close doesn't + * put the job into the "failed" state. + */ + public void testStopLookbackFollowedByProcessKill() throws Exception { + client().admin().indices().prepareCreate("data") + .addMapping("type", "time", "type=date") + .get(); + long numDocs = randomIntBetween(1024, 2048); + long now = System.currentTimeMillis(); + long oneWeekAgo = now - 604800000; + long twoWeeksAgo = oneWeekAgo - 604800000; + indexDocs(logger, "data", numDocs, twoWeeksAgo, oneWeekAgo); + + Job.Builder job = createScheduledJob("lookback-job-stopped-then-killed"); + registerJob(job); + PutJobAction.Response putJobResponse = putJob(job); + assertThat(putJobResponse.getResponse().getJobVersion(), equalTo(Version.CURRENT)); + openJob(job.getId()); + assertBusy(() -> assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED)); + + List t = Collections.singletonList("data"); + DatafeedConfig.Builder datafeedConfigBuilder = createDatafeedBuilder(job.getId() + "-datafeed", job.getId(), t); + // Use lots of chunks so we have time to stop the lookback before it completes + datafeedConfigBuilder.setChunkingConfig(ChunkingConfig.newManual(new TimeValue(1, TimeUnit.SECONDS))); + DatafeedConfig datafeedConfig = datafeedConfigBuilder.build(); + registerDatafeed(datafeedConfig); + putDatafeed(datafeedConfig); + startDatafeed(datafeedConfig.getId(), 0L, now); + assertBusy(() -> { + DataCounts dataCounts = getDataCounts(job.getId()); + assertThat(dataCounts.getProcessedRecordCount(), greaterThan(0L)); + }, 60, TimeUnit.SECONDS); + + stopDatafeed(datafeedConfig.getId()); + + // At this point, stopping the datafeed will have submitted a request for the job to close. + // Depending on thread scheduling, the following kill request might overtake it. The Thread.sleep() + // call here makes it more likely; to make it inevitable for testing also add a Thread.sleep(10) + // immediately before the checkProcessIsAlive() call in AutodetectCommunicator.close(). + Thread.sleep(randomIntBetween(1, 9)); + + KillProcessAction.Request killRequest = new KillProcessAction.Request(job.getId()); + client().execute(KillProcessAction.INSTANCE, killRequest).actionGet(); + + // This should close very quickly, as we killed the process. If the job goes into the "failed" + // state that's wrong and this test will fail. + waitUntilJobIsClosed(job.getId(), TimeValue.timeValueSeconds(2)); + } + + private void startRealtime(String jobId) throws Exception { + client().admin().indices().prepareCreate("data") + .addMapping("type", "time", "type=date") + .get(); + long numDocs1 = randomIntBetween(32, 2048); + long now = System.currentTimeMillis(); + long lastWeek = now - 604800000; + indexDocs(logger, "data", numDocs1, lastWeek, now); + + Job.Builder job = createScheduledJob(jobId); + registerJob(job); + putJob(job); + openJob(job.getId()); + assertBusy(() -> assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED)); + + DatafeedConfig datafeedConfig = createDatafeed(job.getId() + "-datafeed", job.getId(), Collections.singletonList("data")); + registerDatafeed(datafeedConfig); + putDatafeed(datafeedConfig); + startDatafeed(datafeedConfig.getId(), 0L, null); + assertBusy(() -> { + DataCounts dataCounts = getDataCounts(job.getId()); + assertThat(dataCounts.getProcessedRecordCount(), equalTo(numDocs1)); + assertThat(dataCounts.getOutOfOrderTimeStampCount(), equalTo(0L)); + }); + + long numDocs2 = randomIntBetween(2, 64); + now = System.currentTimeMillis(); + indexDocs(logger, "data", numDocs2, now + 5000, now + 6000); + assertBusy(() -> { + DataCounts dataCounts = getDataCounts(job.getId()); + assertThat(dataCounts.getProcessedRecordCount(), equalTo(numDocs1 + numDocs2)); + assertThat(dataCounts.getOutOfOrderTimeStampCount(), equalTo(0L)); + }, 30, TimeUnit.SECONDS); + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java new file mode 100644 index 0000000000000..6731e27aaac19 --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java @@ -0,0 +1,866 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.ml.integration.MlRestTestStateCleaner; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.core.ml.notifications.AuditorField; +import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; +import org.junit.After; +import org.junit.Before; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Locale; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class DatafeedJobsRestIT extends ESRestTestCase { + + private static final String BASIC_AUTH_VALUE_SUPER_USER = + basicAuthHeaderValue("x_pack_rest_user", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + private static final String BASIC_AUTH_VALUE_ML_ADMIN = + basicAuthHeaderValue("ml_admin", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + private static final String BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS = + basicAuthHeaderValue("ml_admin_plus_data", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + + @Override + protected Settings restClientSettings() { + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE_SUPER_USER).build(); + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + private void setupDataAccessRole(String index) throws IOException { + String json = "{" + + " \"indices\" : [" + + " { \"names\": [\"" + index + "\"], \"privileges\": [\"read\"] }" + + " ]" + + "}"; + + client().performRequest("put", "_xpack/security/role/test_data_access", Collections.emptyMap(), + new StringEntity(json, ContentType.APPLICATION_JSON)); + } + + private void setupUser(String user, List roles) throws IOException { + String password = new String(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING.getChars()); + + String json = "{" + + " \"password\" : \"" + password + "\"," + + " \"roles\" : [ " + roles.stream().map(unquoted -> "\"" + unquoted + "\"").collect(Collectors.joining(", ")) + " ]" + + "}"; + + client().performRequest("put", "_xpack/security/user/" + user, Collections.emptyMap(), + new StringEntity(json, ContentType.APPLICATION_JSON)); + } + + @Before + public void setUpData() throws Exception { + setupDataAccessRole("network-data"); + // This user has admin rights on machine learning, but (importantly for the tests) no rights + // on any of the data indexes + setupUser("ml_admin", Collections.singletonList("machine_learning_admin")); + // This user has admin rights on machine learning, and read access to the network-data index + setupUser("ml_admin_plus_data", Arrays.asList("machine_learning_admin", "test_data_access")); + addAirlineData(); + addNetworkData("network-data"); + } + + private void addAirlineData() throws IOException { + String mappings = "{" + + " \"mappings\": {" + + " \"response\": {" + + " \"properties\": {" + + " \"time stamp\": { \"type\":\"date\"}," // space in 'time stamp' is intentional + + " \"airline\": { \"type\":\"keyword\"}," + + " \"responsetime\": { \"type\":\"float\"}" + + " }" + + " }" + + " }" + + "}"; + client().performRequest("put", "airline-data-empty", Collections.emptyMap(), + new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + // Create index with source = enabled, doc_values = enabled, stored = false + multi-field + mappings = "{" + + " \"mappings\": {" + + " \"response\": {" + + " \"properties\": {" + + " \"time stamp\": { \"type\":\"date\"}," // space in 'time stamp' is intentional + + " \"airline\": {" + + " \"type\":\"text\"," + + " \"fields\":{" + + " \"text\":{\"type\":\"text\"}," + + " \"keyword\":{\"type\":\"keyword\"}" + + " }" + + " }," + + " \"responsetime\": { \"type\":\"float\"}" + + " }" + + " }" + + " }" + + "}"; + client().performRequest("put", "airline-data", Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + client().performRequest("put", "airline-data/response/1", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", + ContentType.APPLICATION_JSON)); + client().performRequest("put", "airline-data/response/2", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", + ContentType.APPLICATION_JSON)); + + // Create index with source = enabled, doc_values = disabled (except time), stored = false + mappings = "{" + + " \"mappings\": {" + + " \"response\": {" + + " \"properties\": {" + + " \"time stamp\": { \"type\":\"date\"}," + + " \"airline\": { \"type\":\"keyword\", \"doc_values\":false}," + + " \"responsetime\": { \"type\":\"float\", \"doc_values\":false}" + + " }" + + " }" + + " }" + + "}"; + client().performRequest("put", "airline-data-disabled-doc-values", Collections.emptyMap(), + new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + client().performRequest("put", "airline-data-disabled-doc-values/response/1", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", + ContentType.APPLICATION_JSON)); + client().performRequest("put", "airline-data-disabled-doc-values/response/2", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", + ContentType.APPLICATION_JSON)); + + // Create index with source = disabled, doc_values = enabled (except time), stored = true + mappings = "{" + + " \"mappings\": {" + + " \"response\": {" + + " \"_source\":{\"enabled\":false}," + + " \"properties\": {" + + " \"time stamp\": { \"type\":\"date\", \"store\":true}," + + " \"airline\": { \"type\":\"keyword\", \"store\":true}," + + " \"responsetime\": { \"type\":\"float\", \"store\":true}" + + " }" + + " }" + + " }" + + "}"; + client().performRequest("put", "airline-data-disabled-source", Collections.emptyMap(), + new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + client().performRequest("put", "airline-data-disabled-source/response/1", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":135.22}", + ContentType.APPLICATION_JSON)); + client().performRequest("put", "airline-data-disabled-source/response/2", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T01:59:00Z\",\"airline\":\"AAA\",\"responsetime\":541.76}", + ContentType.APPLICATION_JSON)); + + // Create index with nested documents + mappings = "{" + + " \"mappings\": {" + + " \"response\": {" + + " \"properties\": {" + + " \"time\": { \"type\":\"date\"}" + + " }" + + " }" + + " }" + + "}"; + client().performRequest("put", "nested-data", Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + client().performRequest("put", "nested-data/response/1", Collections.emptyMap(), + new StringEntity("{\"time\":\"2016-06-01T00:00:00Z\", \"responsetime\":{\"millis\":135.22}}", + ContentType.APPLICATION_JSON)); + client().performRequest("put", "nested-data/response/2", Collections.emptyMap(), + new StringEntity("{\"time\":\"2016-06-01T01:59:00Z\",\"responsetime\":{\"millis\":222.0}}", + ContentType.APPLICATION_JSON)); + + // Create index with multiple docs per time interval for aggregation testing + mappings = "{" + + " \"mappings\": {" + + " \"response\": {" + + " \"properties\": {" + + " \"time stamp\": { \"type\":\"date\"}," // space in 'time stamp' is intentional + + " \"airline\": { \"type\":\"keyword\"}," + + " \"responsetime\": { \"type\":\"float\"}" + + " }" + + " }" + + " }" + + "}"; + client().performRequest("put", "airline-data-aggs", Collections.emptyMap(), + new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + client().performRequest("put", "airline-data-aggs/response/1", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"AAA\",\"responsetime\":100.0}", + ContentType.APPLICATION_JSON)); + client().performRequest("put", "airline-data-aggs/response/2", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T00:01:00Z\",\"airline\":\"AAA\",\"responsetime\":200.0}", + ContentType.APPLICATION_JSON)); + client().performRequest("put", "airline-data-aggs/response/3", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T00:00:00Z\",\"airline\":\"BBB\",\"responsetime\":1000.0}", + ContentType.APPLICATION_JSON)); + client().performRequest("put", "airline-data-aggs/response/4", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T00:01:00Z\",\"airline\":\"BBB\",\"responsetime\":2000.0}", + ContentType.APPLICATION_JSON)); + client().performRequest("put", "airline-data-aggs/response/5", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T01:00:00Z\",\"airline\":\"AAA\",\"responsetime\":300.0}", + ContentType.APPLICATION_JSON)); + client().performRequest("put", "airline-data-aggs/response/6", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T01:01:00Z\",\"airline\":\"AAA\",\"responsetime\":400.0}", + ContentType.APPLICATION_JSON)); + client().performRequest("put", "airline-data-aggs/response/7", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T01:00:00Z\",\"airline\":\"BBB\",\"responsetime\":3000.0}", + ContentType.APPLICATION_JSON)); + client().performRequest("put", "airline-data-aggs/response/8", Collections.emptyMap(), + new StringEntity("{\"time stamp\":\"2016-06-01T01:01:00Z\",\"airline\":\"BBB\",\"responsetime\":4000.0}", + ContentType.APPLICATION_JSON)); + + // Ensure all data is searchable + client().performRequest("post", "_refresh"); + } + + private void addNetworkData(String index) throws IOException { + + // Create index with source = enabled, doc_values = enabled, stored = false + multi-field + String mappings = "{" + + " \"mappings\": {" + + " \"doc\": {" + + " \"properties\": {" + + " \"timestamp\": { \"type\":\"date\"}," + + " \"host\": {" + + " \"type\":\"text\"," + + " \"fields\":{" + + " \"text\":{\"type\":\"text\"}," + + " \"keyword\":{\"type\":\"keyword\"}" + + " }" + + " }," + + " \"network_bytes_out\": { \"type\":\"long\"}" + + " }" + + " }" + + " }" + + "}"; + client().performRequest("put", index, Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON)); + + String docTemplate = "{\"timestamp\":%d,\"host\":\"%s\",\"network_bytes_out\":%d}"; + Date date = new Date(1464739200735L); + for (int i=0; i<120; i++) { + long byteCount = randomNonNegativeLong(); + String jsonDoc = String.format(Locale.ROOT, docTemplate, date.getTime(), "hostA", byteCount); + client().performRequest("post", index + "/doc", Collections.emptyMap(), + new StringEntity(jsonDoc, ContentType.APPLICATION_JSON)); + + byteCount = randomNonNegativeLong(); + jsonDoc = String.format(Locale.ROOT, docTemplate, date.getTime(), "hostB", byteCount); + client().performRequest("post", index + "/doc", Collections.emptyMap(), + new StringEntity(jsonDoc, ContentType.APPLICATION_JSON)); + + date = new Date(date.getTime() + 10_000); + } + + // Ensure all data is searchable + client().performRequest("post", "_refresh"); + } + + public void testLookbackOnlyWithMixedTypes() throws Exception { + new LookbackOnlyTestHelper("test-lookback-only-with-mixed-types", "airline-data") + .setShouldSucceedProcessing(true).execute(); + } + + public void testLookbackOnlyWithKeywordMultiField() throws Exception { + new LookbackOnlyTestHelper("test-lookback-only-with-keyword-multi-field", "airline-data") + .setAirlineVariant("airline.keyword").setShouldSucceedProcessing(true).execute(); + } + + public void testLookbackOnlyWithTextMultiField() throws Exception { + new LookbackOnlyTestHelper("test-lookback-only-with-keyword-multi-field", "airline-data") + .setAirlineVariant("airline.text").setShouldSucceedProcessing(true).execute(); + } + + public void testLookbackOnlyWithDocValuesDisabled() throws Exception { + new LookbackOnlyTestHelper("test-lookback-only-with-doc-values-disabled", "airline-data-disabled-doc-values").execute(); + } + + public void testLookbackOnlyWithSourceDisabled() throws Exception { + new LookbackOnlyTestHelper("test-lookback-only-with-source-disabled", "airline-data-disabled-source").execute(); + } + + @AwaitsFix(bugUrl = "This test uses painless which is not available in the integTest phase") + public void testLookbackOnlyWithScriptFields() throws Exception { + new LookbackOnlyTestHelper("test-lookback-only-with-script-fields", "airline-data-disabled-source") + .setAddScriptedFields(true).execute(); + } + + public void testLookbackOnlyWithNestedFields() throws Exception { + String jobId = "test-lookback-only-with-nested-fields"; + String job = "{\"description\":\"Nested job\", \"analysis_config\" : {\"bucket_span\":\"1h\",\"detectors\" :" + + "[{\"function\":\"mean\",\"field_name\":\"responsetime.millis\"}]}, \"data_description\" : {\"time_field\":\"time\"}" + + "}"; + client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), + new StringEntity(job, ContentType.APPLICATION_JSON)); + + String datafeedId = jobId + "-datafeed"; + new DatafeedBuilder(datafeedId, jobId, "nested-data", "response").build(); + openJob(client(), jobId); + + startDatafeedAndWaitUntilStopped(datafeedId); + waitUntilJobIsClosed(jobId); + Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":2")); + assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":2")); + assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0")); + } + + public void testLookbackOnlyGivenEmptyIndex() throws Exception { + new LookbackOnlyTestHelper("test-lookback-only-given-empty-index", "airline-data-empty") + .setShouldSucceedInput(false).setShouldSucceedProcessing(false).execute(); + } + + public void testInsufficientSearchPrivilegesOnPut() throws Exception { + String jobId = "privs-put-job"; + String job = "{\"description\":\"Aggs job\",\"analysis_config\" :{\"bucket_span\":\"1h\"," + + "\"summary_count_field_name\":\"doc_count\"," + + "\"detectors\":[{\"function\":\"mean\"," + + "\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]}," + + "\"data_description\" : {\"time_field\":\"time stamp\"}" + + "}"; + client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, + Collections.emptyMap(), new StringEntity(job, ContentType.APPLICATION_JSON)); + + String datafeedId = "datafeed-" + jobId; + // This should be disallowed, because even though the ml_admin user has permission to + // create a datafeed they DON'T have permission to search the index the datafeed is + // configured to read + ResponseException e = expectThrows(ResponseException.class, () -> + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs", "response") + .setAuthHeader(BASIC_AUTH_VALUE_ML_ADMIN) + .build()); + + assertThat(e.getMessage(), containsString("Cannot create datafeed")); + assertThat(e.getMessage(), + containsString("user ml_admin lacks permissions on the indices to be searched")); + } + + public void testInsufficientSearchPrivilegesOnPreview() throws Exception { + String jobId = "privs-preview-job"; + String job = "{\"description\":\"Aggs job\",\"analysis_config\" :{\"bucket_span\":\"1h\"," + + "\"summary_count_field_name\":\"doc_count\"," + + "\"detectors\":[{\"function\":\"mean\"," + + "\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]}," + + "\"data_description\" : {\"time_field\":\"time stamp\"}" + + "}"; + client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, + Collections.emptyMap(), new StringEntity(job, ContentType.APPLICATION_JSON)); + + String datafeedId = "datafeed-" + jobId; + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs", "response").build(); + + // This should be disallowed, because ml_admin is trying to preview a datafeed created by + // by another user (x_pack_rest_user in this case) that will reveal the content of an index they + // don't have permission to search directly + ResponseException e = expectThrows(ResponseException.class, () -> + client().performRequest("get", + MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_preview", + new BasicHeader("Authorization", BASIC_AUTH_VALUE_ML_ADMIN))); + + assertThat(e.getMessage(), + containsString("[indices:data/read/field_caps] is unauthorized for user [ml_admin]")); + } + + public void testLookbackOnlyGivenAggregationsWithHistogram() throws Exception { + String jobId = "aggs-histogram-job"; + String job = "{\"description\":\"Aggs job\",\"analysis_config\" :{\"bucket_span\":\"1h\"," + + "\"summary_count_field_name\":\"doc_count\"," + + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]}," + + "\"data_description\" : {\"time_field\":\"time stamp\"}" + + "}"; + client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), + new StringEntity(job, ContentType.APPLICATION_JSON)); + + String datafeedId = "datafeed-" + jobId; + String aggregations = "{\"buckets\":{\"histogram\":{\"field\":\"time stamp\",\"interval\":3600000}," + + "\"aggregations\":{" + + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + + "\"airline\":{\"terms\":{\"field\":\"airline\",\"size\":10}," + + " \"aggregations\":{\"responsetime\":{\"avg\":{\"field\":\"responsetime\"}}}}}}}"; + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs", "response").setAggregations(aggregations).build(); + openJob(client(), jobId); + + startDatafeedAndWaitUntilStopped(datafeedId); + waitUntilJobIsClosed(jobId); + Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":4")); + assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":4")); + assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0")); + } + + public void testLookbackOnlyGivenAggregationsWithDateHistogram() throws Exception { + String jobId = "aggs-date-histogram-job"; + String job = "{\"description\":\"Aggs job\",\"analysis_config\" :{\"bucket_span\":\"3600s\"," + + "\"summary_count_field_name\":\"doc_count\"," + + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]}," + + "\"data_description\" : {\"time_field\":\"time stamp\"}" + + "}"; + client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), + new StringEntity(job, ContentType.APPLICATION_JSON)); + + String datafeedId = "datafeed-" + jobId; + String aggregations = "{\"time stamp\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":\"1h\"}," + + "\"aggregations\":{" + + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + + "\"airline\":{\"terms\":{\"field\":\"airline\",\"size\":10}," + + " \"aggregations\":{\"responsetime\":{\"avg\":{\"field\":\"responsetime\"}}}}}}}"; + new DatafeedBuilder(datafeedId, jobId, "airline-data-aggs", "response").setAggregations(aggregations).build(); + openJob(client(), jobId); + + startDatafeedAndWaitUntilStopped(datafeedId); + waitUntilJobIsClosed(jobId); + Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":4")); + assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":4")); + assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0")); + } + + public void testLookbackUsingDerivativeAggWithLargerHistogramBucketThanDataRate() throws Exception { + String jobId = "derivative-agg-network-job"; + String job = "{\"analysis_config\" :{\"bucket_span\":\"300s\"," + + "\"summary_count_field_name\":\"doc_count\"," + + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"bytes-delta\",\"by_field_name\":\"hostname\"}]}," + + "\"data_description\" : {\"time_field\":\"timestamp\"}" + + "}"; + client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), + new StringEntity(job, ContentType.APPLICATION_JSON)); + + String datafeedId = "datafeed-" + jobId; + String aggregations = + "{\"hostname\": {\"terms\" : {\"field\": \"host.keyword\", \"size\":10}," + + "\"aggs\": {\"buckets\": {\"date_histogram\":{\"field\":\"timestamp\",\"interval\":\"60s\"}," + + "\"aggs\": {\"timestamp\":{\"max\":{\"field\":\"timestamp\"}}," + + "\"bytes-delta\":{\"derivative\":{\"buckets_path\":\"avg_bytes_out\"}}," + + "\"avg_bytes_out\":{\"avg\":{\"field\":\"network_bytes_out\"}} }}}}}"; + new DatafeedBuilder(datafeedId, jobId, "network-data", "doc") + .setAggregations(aggregations) + .setChunkingTimespan("300s") + .build(); + + openJob(client(), jobId); + + startDatafeedAndWaitUntilStopped(datafeedId); + waitUntilJobIsClosed(jobId); + Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":40")); + assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":40")); + assertThat(jobStatsResponseAsString, containsString("\"out_of_order_timestamp_count\":0")); + assertThat(jobStatsResponseAsString, containsString("\"bucket_count\":3")); + // The derivative agg won't have values for the first bucket of each host + assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":2")); + } + + public void testLookbackUsingDerivativeAggWithSmallerHistogramBucketThanDataRate() throws Exception { + String jobId = "derivative-agg-network-job"; + String job = "{\"analysis_config\" :{\"bucket_span\":\"300s\"," + + "\"summary_count_field_name\":\"doc_count\"," + + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"bytes-delta\",\"by_field_name\":\"hostname\"}]}," + + "\"data_description\" : {\"time_field\":\"timestamp\"}" + + "}"; + client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), + new StringEntity(job, ContentType.APPLICATION_JSON)); + + String datafeedId = "datafeed-" + jobId; + String aggregations = + "{\"hostname\": {\"terms\" : {\"field\": \"host.keyword\", \"size\":10}," + + "\"aggs\": {\"buckets\": {\"date_histogram\":{\"field\":\"timestamp\",\"interval\":\"5s\"}," + + "\"aggs\": {\"timestamp\":{\"max\":{\"field\":\"timestamp\"}}," + + "\"bytes-delta\":{\"derivative\":{\"buckets_path\":\"avg_bytes_out\"}}," + + "\"avg_bytes_out\":{\"avg\":{\"field\":\"network_bytes_out\"}} }}}}}"; + new DatafeedBuilder(datafeedId, jobId, "network-data", "doc") + .setAggregations(aggregations) + .setChunkingTimespan("300s") + .build(); + + openJob(client(), jobId); + + startDatafeedAndWaitUntilStopped(datafeedId); + waitUntilJobIsClosed(jobId); + Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":240")); + assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":240")); + } + + public void testLookbackWithoutPermissions() throws Exception { + String jobId = "permission-test-network-job"; + String job = "{\"analysis_config\" :{\"bucket_span\":\"300s\"," + + "\"summary_count_field_name\":\"doc_count\"," + + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"bytes-delta\",\"by_field_name\":\"hostname\"}]}," + + "\"data_description\" : {\"time_field\":\"timestamp\"}" + + "}"; + client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), + new StringEntity(job, ContentType.APPLICATION_JSON)); + + String datafeedId = "datafeed-" + jobId; + String aggregations = + "{\"hostname\": {\"terms\" : {\"field\": \"host.keyword\", \"size\":10}," + + "\"aggs\": {\"buckets\": {\"date_histogram\":{\"field\":\"timestamp\",\"interval\":\"5s\"}," + + "\"aggs\": {\"timestamp\":{\"max\":{\"field\":\"timestamp\"}}," + + "\"bytes-delta\":{\"derivative\":{\"buckets_path\":\"avg_bytes_out\"}}," + + "\"avg_bytes_out\":{\"avg\":{\"field\":\"network_bytes_out\"}} }}}}}"; + + // At the time we create the datafeed the user can access the network-data index that we have access to + new DatafeedBuilder(datafeedId, jobId, "network-data", "doc") + .setAggregations(aggregations) + .setChunkingTimespan("300s") + .setAuthHeader(BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS) + .build(); + + // Change the role so that the user can no longer access network-data + setupDataAccessRole("some-other-data"); + + openJob(client(), jobId); + + startDatafeedAndWaitUntilStopped(datafeedId, BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS); + waitUntilJobIsClosed(jobId); + Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + // We expect that no data made it through to the job + assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":0")); + assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":0")); + + // There should be a notification saying that there was a problem extracting data + client().performRequest("post", "_refresh"); + Response notificationsResponse = client().performRequest("get", AuditorField.NOTIFICATIONS_INDEX + "/_search?q=job_id:" + jobId); + String notificationsResponseAsString = responseEntityToString(notificationsResponse); + assertThat(notificationsResponseAsString, containsString("\"message\":\"Datafeed is encountering errors extracting data: " + + "action [indices:data/read/search] is unauthorized for user [ml_admin_plus_data]\"")); + } + + public void testLookbackWithPipelineBucketAgg() throws Exception { + String jobId = "pipeline-bucket-agg-job"; + String job = "{\"analysis_config\" :{\"bucket_span\":\"1h\"," + + "\"summary_count_field_name\":\"doc_count\"," + + "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"percentile95_airlines_count\"}]}," + + "\"data_description\" : {\"time_field\":\"time stamp\"}" + + "}"; + client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(), + new StringEntity(job, ContentType.APPLICATION_JSON)); + + String datafeedId = "datafeed-" + jobId; + String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":\"15m\"}," + + "\"aggregations\":{" + + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + + "\"airlines\":{\"terms\":{\"field\":\"airline.keyword\",\"size\":10}}," + + "\"percentile95_airlines_count\":{\"percentiles_bucket\":" + + "{\"buckets_path\":\"airlines._count\", \"percents\": [95]}}}}}"; + new DatafeedBuilder(datafeedId, jobId, "airline-data", "response").setAggregations(aggregations).build(); + + openJob(client(), jobId); + + startDatafeedAndWaitUntilStopped(datafeedId); + waitUntilJobIsClosed(jobId); + Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":2")); + assertThat(jobStatsResponseAsString, containsString("\"input_field_count\":4")); + assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":2")); + assertThat(jobStatsResponseAsString, containsString("\"processed_field_count\":4")); + assertThat(jobStatsResponseAsString, containsString("\"out_of_order_timestamp_count\":0")); + assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0")); + } + + public void testRealtime() throws Exception { + String jobId = "job-realtime-1"; + createJob(jobId, "airline"); + String datafeedId = jobId + "-datafeed"; + new DatafeedBuilder(datafeedId, jobId, "airline-data", "response").build(); + openJob(client(), jobId); + + Response response = client().performRequest("post", + MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start?start=2016-06-01T00:00:00Z"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(responseEntityToString(response), equalTo("{\"started\":true}")); + assertBusy(() -> { + try { + Response getJobResponse = client().performRequest("get", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + String responseAsString = responseEntityToString(getJobResponse); + assertThat(responseAsString, containsString("\"processed_record_count\":2")); + assertThat(responseAsString, containsString("\"state\":\"opened\"")); + } catch (Exception e1) { + throw new RuntimeException(e1); + } + }); + + // Model state should be persisted at the end of lookback + // test a model snapshot is present + assertBusy(() -> { + try { + Response getJobResponse = client().performRequest("get", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/model_snapshots"); + String responseAsString = responseEntityToString(getJobResponse); + assertThat(responseAsString, containsString("\"count\":1")); + } catch (Exception e1) { + throw new RuntimeException(e1); + } + }); + + ResponseException e = expectThrows(ResponseException.class, + () -> client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(409)); + assertThat(responseEntityToString(response), containsString("Cannot delete job [" + jobId + "] because datafeed [" + datafeedId + + "] refers to it")); + + response = client().performRequest("post", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stop"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(responseEntityToString(response), equalTo("{\"stopped\":true}")); + + client().performRequest("POST", "/_xpack/ml/anomaly_detectors/" + jobId + "/_close"); + + response = client().performRequest("delete", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(responseEntityToString(response), equalTo("{\"acknowledged\":true}")); + + response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(responseEntityToString(response), equalTo("{\"acknowledged\":true}")); + } + + public void testForceDeleteWhileDatafeedIsRunning() throws Exception { + String jobId = "job-realtime-2"; + createJob(jobId, "airline"); + String datafeedId = jobId + "-datafeed"; + new DatafeedBuilder(datafeedId, jobId, "airline-data", "response").build(); + openJob(client(), jobId); + + Response response = client().performRequest("post", + MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start?start=2016-06-01T00:00:00Z"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(responseEntityToString(response), equalTo("{\"started\":true}")); + + ResponseException e = expectThrows(ResponseException.class, + () -> client().performRequest("delete", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId)); + response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(409)); + assertThat(responseEntityToString(response), containsString("Cannot delete datafeed [" + datafeedId + + "] while its status is started")); + + response = client().performRequest("delete", + MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "?force=true"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(responseEntityToString(response), equalTo("{\"acknowledged\":true}")); + + expectThrows(ResponseException.class, + () -> client().performRequest("get", "/_xpack/ml/datafeeds/" + datafeedId)); + } + + private class LookbackOnlyTestHelper { + private String jobId; + private String airlineVariant; + private String dataIndex; + private boolean addScriptedFields; + private boolean shouldSucceedInput; + private boolean shouldSucceedProcessing; + + LookbackOnlyTestHelper(String jobId, String dataIndex) { + this.jobId = jobId; + this.dataIndex = dataIndex; + this.shouldSucceedInput = true; + this.shouldSucceedProcessing = true; + this.airlineVariant = "airline"; + } + + public LookbackOnlyTestHelper setAddScriptedFields(boolean value) { + addScriptedFields = value; + return this; + } + + public LookbackOnlyTestHelper setAirlineVariant(String airlineVariant) { + this.airlineVariant = airlineVariant; + return this; + } + + + public LookbackOnlyTestHelper setShouldSucceedInput(boolean value) { + shouldSucceedInput = value; + return this; + } + + public LookbackOnlyTestHelper setShouldSucceedProcessing(boolean value) { + shouldSucceedProcessing = value; + return this; + } + + public void execute() throws Exception { + createJob(jobId, airlineVariant); + String datafeedId = "datafeed-" + jobId; + new DatafeedBuilder(datafeedId, jobId, dataIndex, "response") + .setScriptedFields(addScriptedFields ? + "{\"airline\":{\"script\":{\"lang\":\"painless\",\"inline\":\"doc['airline'].value\"}}}" : null) + .build(); + openJob(client(), jobId); + + startDatafeedAndWaitUntilStopped(datafeedId); + waitUntilJobIsClosed(jobId); + + Response jobStatsResponse = client().performRequest("get", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + String jobStatsResponseAsString = responseEntityToString(jobStatsResponse); + if (shouldSucceedInput) { + assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":2")); + } else { + assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":0")); + } + if (shouldSucceedProcessing) { + assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":2")); + } else { + assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":0")); + } + assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0")); + } + } + private void startDatafeedAndWaitUntilStopped(String datafeedId) throws Exception { + startDatafeedAndWaitUntilStopped(datafeedId, BASIC_AUTH_VALUE_SUPER_USER); + } + + private void startDatafeedAndWaitUntilStopped(String datafeedId, String authHeader) throws Exception { + Response startDatafeedRequest = client().performRequest("post", + MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start?start=2016-06-01T00:00:00Z&end=2016-06-02T00:00:00Z", + new BasicHeader("Authorization", authHeader)); + assertThat(startDatafeedRequest.getStatusLine().getStatusCode(), equalTo(200)); + assertThat(responseEntityToString(startDatafeedRequest), equalTo("{\"started\":true}")); + assertBusy(() -> { + try { + Response datafeedStatsResponse = client().performRequest("get", + MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_stats"); + assertThat(responseEntityToString(datafeedStatsResponse), containsString("\"state\":\"stopped\"")); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + + private void waitUntilJobIsClosed(String jobId) throws Exception { + assertBusy(() -> { + try { + Response jobStatsResponse = client().performRequest("get", + MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"); + assertThat(responseEntityToString(jobStatsResponse), containsString("\"state\":\"closed\"")); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } + + private Response createJob(String id, String airlineVariant) throws Exception { + String job = "{\n" + " \"description\":\"Analysis of response time by airline\",\n" + + " \"analysis_config\" : {\n" + " \"bucket_span\":\"1h\",\n" + + " \"detectors\" :[\n" + + " {\"function\":\"mean\",\"field_name\":\"responsetime\",\"by_field_name\":\"" + airlineVariant + "\"}]\n" + + " },\n" + " \"data_description\" : {\n" + + " \"format\":\"xcontent\",\n" + + " \"time_field\":\"time stamp\",\n" + " \"time_format\":\"yyyy-MM-dd'T'HH:mm:ssX\"\n" + " }\n" + + "}"; + return client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + id, + Collections.emptyMap(), new StringEntity(job, ContentType.APPLICATION_JSON)); + } + + private static String responseEntityToString(Response response) throws Exception { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { + return reader.lines().collect(Collectors.joining("\n")); + } + } + + public static void openJob(RestClient client, String jobId) throws IOException { + Response response = client.performRequest("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + } + + @After + public void clearMlState() throws Exception { + new MlRestTestStateCleaner(logger, adminClient(), this).clearMlMetadata(); + XPackRestTestHelper.waitForPendingTasks(adminClient()); + } + + private static class DatafeedBuilder { + String datafeedId; + String jobId; + String index; + String type; + boolean source; + String scriptedFields; + String aggregations; + String authHeader = BASIC_AUTH_VALUE_SUPER_USER; + String chunkingTimespan; + + DatafeedBuilder(String datafeedId, String jobId, String index, String type) { + this.datafeedId = datafeedId; + this.jobId = jobId; + this.index = index; + this.type = type; + } + + DatafeedBuilder setSource(boolean enableSource) { + this.source = enableSource; + return this; + } + + DatafeedBuilder setScriptedFields(String scriptedFields) { + this.scriptedFields = scriptedFields; + return this; + } + + DatafeedBuilder setAggregations(String aggregations) { + this.aggregations = aggregations; + return this; + } + + DatafeedBuilder setAuthHeader(String authHeader) { + this.authHeader = authHeader; + return this; + } + + DatafeedBuilder setChunkingTimespan(String timespan) { + chunkingTimespan = timespan; + return this; + } + + Response build() throws IOException { + String datafeedConfig = "{" + + "\"job_id\": \"" + jobId + "\",\"indexes\":[\"" + index + "\"],\"types\":[\"" + type + "\"]" + + (source ? ",\"_source\":true" : "") + + (scriptedFields == null ? "" : ",\"script_fields\":" + scriptedFields) + + (aggregations == null ? "" : ",\"aggs\":" + aggregations) + + (chunkingTimespan == null ? "" : + ",\"chunking_config\":{\"mode\":\"MANUAL\",\"time_span\":\"" + chunkingTimespan + "\"}") + + "}"; + return client().performRequest("put", MachineLearning.BASE_PATH + "datafeeds/" + datafeedId, Collections.emptyMap(), + new StringEntity(datafeedConfig, ContentType.APPLICATION_JSON), + new BasicHeader("Authorization", authHeader)); + } + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java new file mode 100644 index 0000000000000..3a1fc2b0f6d4a --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.update.UpdateAction; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; +import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class DeleteExpiredDataIT extends MlNativeAutodetectIntegTestCase { + + private static final String DATA_INDEX = "delete-expired-data-test-data"; + private static final String DATA_TYPE = "my_type"; + + @Before + public void setUpData() throws IOException { + client().admin().indices().prepareCreate(DATA_INDEX) + .addMapping(DATA_TYPE, "time", "type=date,format=epoch_millis") + .get(); + + // We are going to create data for last 2 days + long nowMillis = System.currentTimeMillis(); + int totalBuckets = 3 * 24; + int normalRate = 10; + int anomalousRate = 100; + int anomalousBucket = 30; + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + for (int bucket = 0; bucket < totalBuckets; bucket++) { + long timestamp = nowMillis - TimeValue.timeValueHours(totalBuckets - bucket).getMillis(); + int bucketRate = bucket == anomalousBucket ? anomalousRate : normalRate; + for (int point = 0; point < bucketRate; point++) { + IndexRequest indexRequest = new IndexRequest(DATA_INDEX, DATA_TYPE); + indexRequest.source("time", timestamp); + bulkRequestBuilder.add(indexRequest); + } + } + + BulkResponse bulkResponse = bulkRequestBuilder + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + assertThat(bulkResponse.hasFailures(), is(false)); + + // Ensure all data is searchable + client().admin().indices().prepareRefresh(DATA_INDEX).get(); + } + + @After + public void tearDownData() throws Exception { + client().admin().indices().prepareDelete(DATA_INDEX).get(); + cleanUp(); + } + + public void testDeleteExpiredData() throws Exception { + registerJob(newJobBuilder("no-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(null)); + registerJob(newJobBuilder("results-retention").setResultsRetentionDays(1L).setModelSnapshotRetentionDays(null)); + registerJob(newJobBuilder("snapshots-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(2L)); + registerJob(newJobBuilder("snapshots-retention-with-retain").setResultsRetentionDays(null).setModelSnapshotRetentionDays(2L)); + registerJob(newJobBuilder("results-and-snapshots-retention").setResultsRetentionDays(1L).setModelSnapshotRetentionDays(2L)); + + List shortExpiryForecastIds = new ArrayList<>(); + + long now = System.currentTimeMillis(); + long oneDayAgo = now - TimeValue.timeValueHours(48).getMillis() - 1; + for (Job.Builder job : getJobs()) { + putJob(job); + + String datafeedId = job.getId() + "-feed"; + DatafeedConfig.Builder datafeedConfig = new DatafeedConfig.Builder(datafeedId, job.getId()); + datafeedConfig.setIndices(Arrays.asList(DATA_INDEX)); + datafeedConfig.setTypes(Arrays.asList(DATA_TYPE)); + DatafeedConfig datafeed = datafeedConfig.build(); + registerDatafeed(datafeed); + putDatafeed(datafeed); + + // Run up to a day ago + openJob(job.getId()); + startDatafeed(datafeedId, 0, now - TimeValue.timeValueHours(24).getMillis()); + waitUntilJobIsClosed(job.getId()); + assertThat(getBuckets(job.getId()).size(), is(greaterThanOrEqualTo(47))); + assertThat(getRecords(job.getId()).size(), equalTo(1)); + List modelSnapshots = getModelSnapshots(job.getId()); + assertThat(modelSnapshots.size(), equalTo(1)); + String snapshotDocId = ModelSnapshot.documentId(modelSnapshots.get(0)); + + // Update snapshot timestamp to force it out of snapshot retention window + String snapshotUpdate = "{ \"timestamp\": " + oneDayAgo + "}"; + UpdateRequest updateSnapshotRequest = new UpdateRequest(".ml-anomalies-" + job.getId(), "doc", snapshotDocId); + updateSnapshotRequest.doc(snapshotUpdate.getBytes(StandardCharsets.UTF_8), XContentType.JSON); + client().execute(UpdateAction.INSTANCE, updateSnapshotRequest).get(); + + // Now let's create some forecasts + openJob(job.getId()); + + // We must set a very small value for expires_in to keep this testable as the deletion cutoff point is the moment + // the DeleteExpiredDataAction is called. + String forecastShortExpiryId = forecast(job.getId(), TimeValue.timeValueHours(3), TimeValue.timeValueSeconds(1)); + shortExpiryForecastIds.add(forecastShortExpiryId); + String forecastDefaultExpiryId = forecast(job.getId(), TimeValue.timeValueHours(3), null); + String forecastNoExpiryId = forecast(job.getId(), TimeValue.timeValueHours(3), TimeValue.ZERO); + waitForecastToFinish(job.getId(), forecastShortExpiryId); + waitForecastToFinish(job.getId(), forecastDefaultExpiryId); + waitForecastToFinish(job.getId(), forecastNoExpiryId); + } + // Refresh to ensure the snapshot timestamp updates are visible + client().admin().indices().prepareRefresh("*").get(); + + // We need to wait a second to ensure the second time around model snapshots will have a different ID (it depends on epoch seconds) + awaitBusy(() -> false, 1, TimeUnit.SECONDS); + + for (Job.Builder job : getJobs()) { + // Run up to now + startDatafeed(job.getId() + "-feed", 0, now); + waitUntilJobIsClosed(job.getId()); + assertThat(getBuckets(job.getId()).size(), is(greaterThanOrEqualTo(70))); + assertThat(getRecords(job.getId()).size(), equalTo(1)); + List modelSnapshots = getModelSnapshots(job.getId()); + assertThat(modelSnapshots.size(), equalTo(2)); + } + + retainAllSnapshots("snapshots-retention-with-retain"); + + long totalModelSizeStatsBeforeDelete = client().prepareSearch("*") + .setQuery(QueryBuilders.termQuery("result_type", "model_size_stats")) + .get().getHits().totalHits; + long totalNotificationsCountBeforeDelete = client().prepareSearch(".ml-notifications").get().getHits().totalHits; + assertThat(totalModelSizeStatsBeforeDelete, greaterThan(0L)); + assertThat(totalNotificationsCountBeforeDelete, greaterThan(0L)); + + // Verify forecasts were created + List forecastStats = getForecastStats(); + assertThat(forecastStats.size(), equalTo(getJobs().size() * 3)); + for (ForecastRequestStats forecastStat : forecastStats) { + assertThat(countForecastDocs(forecastStat.getJobId(), forecastStat.getForecastId()), equalTo(forecastStat.getRecordCount())); + } + + client().execute(DeleteExpiredDataAction.INSTANCE, new DeleteExpiredDataAction.Request()).get(); + + // We need to refresh to ensure the deletion is visible + client().admin().indices().prepareRefresh("*").get(); + + // no-retention job should have kept all data + assertThat(getBuckets("no-retention").size(), is(greaterThanOrEqualTo(70))); + assertThat(getRecords("no-retention").size(), equalTo(1)); + assertThat(getModelSnapshots("no-retention").size(), equalTo(2)); + + List buckets = getBuckets("results-retention"); + assertThat(buckets.size(), is(lessThanOrEqualTo(24))); + assertThat(buckets.size(), is(greaterThanOrEqualTo(22))); + assertThat(buckets.get(0).getTimestamp().getTime(), greaterThanOrEqualTo(oneDayAgo)); + assertThat(getRecords("results-retention").size(), equalTo(0)); + assertThat(getModelSnapshots("results-retention").size(), equalTo(2)); + + assertThat(getBuckets("snapshots-retention").size(), is(greaterThanOrEqualTo(70))); + assertThat(getRecords("snapshots-retention").size(), equalTo(1)); + assertThat(getModelSnapshots("snapshots-retention").size(), equalTo(1)); + + assertThat(getBuckets("snapshots-retention-with-retain").size(), is(greaterThanOrEqualTo(70))); + assertThat(getRecords("snapshots-retention-with-retain").size(), equalTo(1)); + assertThat(getModelSnapshots("snapshots-retention-with-retain").size(), equalTo(2)); + + buckets = getBuckets("results-and-snapshots-retention"); + assertThat(buckets.size(), is(lessThanOrEqualTo(24))); + assertThat(buckets.size(), is(greaterThanOrEqualTo(22))); + assertThat(buckets.get(0).getTimestamp().getTime(), greaterThanOrEqualTo(oneDayAgo)); + assertThat(getRecords("results-and-snapshots-retention").size(), equalTo(0)); + assertThat(getModelSnapshots("results-and-snapshots-retention").size(), equalTo(1)); + + long totalModelSizeStatsAfterDelete = client().prepareSearch("*") + .setQuery(QueryBuilders.termQuery("result_type", "model_size_stats")) + .get().getHits().totalHits; + long totalNotificationsCountAfterDelete = client().prepareSearch(".ml-notifications").get().getHits().totalHits; + assertThat(totalModelSizeStatsAfterDelete, equalTo(totalModelSizeStatsBeforeDelete)); + assertThat(totalNotificationsCountAfterDelete, greaterThanOrEqualTo(totalNotificationsCountBeforeDelete)); + + // Verify short expiry forecasts were deleted only + forecastStats = getForecastStats(); + assertThat(forecastStats.size(), equalTo(getJobs().size() * 2)); + for (ForecastRequestStats forecastStat : forecastStats) { + assertThat(countForecastDocs(forecastStat.getJobId(), forecastStat.getForecastId()), equalTo(forecastStat.getRecordCount())); + } + for (Job.Builder job : getJobs()) { + for (String forecastId : shortExpiryForecastIds) { + assertThat(countForecastDocs(job.getId(), forecastId), equalTo(0L)); + } + } + } + + private static Job.Builder newJobBuilder(String id) { + Detector.Builder detector = new Detector.Builder(); + detector.setFunction("count"); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Arrays.asList(detector.build())); + analysisConfig.setBucketSpan(TimeValue.timeValueHours(1)); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeField("time"); + Job.Builder jobBuilder = new Job.Builder(id); + jobBuilder.setAnalysisConfig(analysisConfig); + jobBuilder.setDataDescription(dataDescription); + return jobBuilder; + } + + private void retainAllSnapshots(String jobId) throws Exception { + List modelSnapshots = getModelSnapshots(jobId); + for (ModelSnapshot modelSnapshot : modelSnapshots) { + UpdateModelSnapshotAction.Request request = new UpdateModelSnapshotAction.Request(jobId, modelSnapshot.getSnapshotId()); + request.setRetain(true); + client().execute(UpdateModelSnapshotAction.INSTANCE, request).get(); + } + // We need to refresh to ensure the updates are visible + client().admin().indices().prepareRefresh("*").get(); + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java new file mode 100644 index 0000000000000..28565c5923c38 --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java @@ -0,0 +1,277 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.Condition; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; +import org.elasticsearch.xpack.core.ml.job.config.MlFilter; +import org.elasticsearch.xpack.core.ml.job.config.Operator; +import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; +import org.elasticsearch.xpack.core.ml.job.config.RuleConditionType; +import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; +import org.junit.After; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isOneOf; + +/** + * An integration test for detection rules + */ +public class DetectionRulesIT extends MlNativeAutodetectIntegTestCase { + + @After + public void cleanUpTest() throws Exception { + cleanUp(); + } + + public void testNumericalRule() throws Exception { + RuleCondition condition1 = RuleCondition.createNumerical( + RuleConditionType.NUMERICAL_ACTUAL, + "by_field", + "by_field_value_1", + new Condition(Operator.LT, "1000")); + RuleCondition condition2 = RuleCondition.createNumerical( + RuleConditionType.NUMERICAL_ACTUAL, + "by_field", + "by_field_value_2", + new Condition(Operator.LT, "500")); + RuleCondition condition3 = RuleCondition.createNumerical( + RuleConditionType.NUMERICAL_ACTUAL, + "by_field", + "by_field_value_3", + new Condition(Operator.LT, "100")); + DetectionRule rule = new DetectionRule.Builder(Arrays.asList(condition1, condition2, condition3)).build(); + + Detector.Builder detector = new Detector.Builder("max", "value"); + detector.setRules(Arrays.asList(rule)); + detector.setByFieldName("by_field"); + + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder( + Arrays.asList(detector.build())); + analysisConfig.setBucketSpan(TimeValue.timeValueHours(1)); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + Job.Builder job = new Job.Builder("detection-rule-numeric-test"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + long timestamp = 1491004800000L; + int totalBuckets = 2 * 24; + // each half of the buckets contains one anomaly for each by field value + Set anomalousBuckets = new HashSet<>(Arrays.asList(20, 44)); + List byFieldValues = Arrays.asList("by_field_value_1", "by_field_value_2", "by_field_value_3"); + Map anomalousValues = new HashMap<>(); + anomalousValues.put("by_field_value_1", 800); + anomalousValues.put("by_field_value_2", 400); + anomalousValues.put("by_field_value_3", 400); + int normalValue = 1; + List data = new ArrayList<>(); + for (int bucket = 0; bucket < totalBuckets; bucket++) { + for (String byFieldValue : byFieldValues) { + Map record = new HashMap<>(); + record.put("time", timestamp); + record.put("value", anomalousBuckets.contains(bucket) ? anomalousValues.get(byFieldValue) : normalValue); + record.put("by_field", byFieldValue); + data.add(createJsonRecord(record)); + } + timestamp += TimeValue.timeValueHours(1).getMillis(); + } + + // push the data for the first half buckets + postData(job.getId(), joinBetween(0, data.size() / 2, data)); + closeJob(job.getId()); + + List records = getRecords(job.getId()); + assertThat(records.size(), equalTo(1)); + assertThat(records.get(0).getByFieldValue(), equalTo("by_field_value_3")); + long firstRecordTimestamp = records.get(0).getTimestamp().getTime(); + + { + // Update rules so that the anomalies suppression is inverted + RuleCondition newCondition1 = RuleCondition.createNumerical( + RuleConditionType.NUMERICAL_ACTUAL, + "by_field", + "by_field_value_1", + new Condition(Operator.GT, "1000")); + RuleCondition newCondition2 = RuleCondition.createNumerical( + RuleConditionType.NUMERICAL_ACTUAL, + "by_field", + "by_field_value_2", + new Condition(Operator.GT, "500")); + RuleCondition newCondition3 = RuleCondition.createNumerical( + RuleConditionType.NUMERICAL_ACTUAL, + "by_field", + "by_field_value_3", + new Condition(Operator.GT, "0")); + DetectionRule newRule = new DetectionRule.Builder(Arrays.asList(newCondition1, newCondition2, newCondition3)).build(); + JobUpdate.Builder update = new JobUpdate.Builder(job.getId()); + update.setDetectorUpdates(Arrays.asList(new JobUpdate.DetectorUpdate(0, null, Arrays.asList(newRule)))); + updateJob(job.getId(), update.build()); + } + + // push second half + openJob(job.getId()); + postData(job.getId(), joinBetween(data.size() / 2, data.size(), data)); + closeJob(job.getId()); + + GetRecordsAction.Request recordsAfterFirstHalf = new GetRecordsAction.Request(job.getId()); + recordsAfterFirstHalf.setStart(String.valueOf(firstRecordTimestamp + 1)); + records = getRecords(recordsAfterFirstHalf); + assertThat(records.size(), equalTo(2)); + Set secondHaldRecordByFieldValues = records.stream().map(AnomalyRecord::getByFieldValue).collect(Collectors.toSet()); + assertThat(secondHaldRecordByFieldValues, contains("by_field_value_1", "by_field_value_2")); + } + + public void testCategoricalRule() throws Exception { + MlFilter safeIps = new MlFilter("safe_ips", Arrays.asList("111.111.111.111", "222.222.222.222")); + assertThat(putMlFilter(safeIps), is(true)); + + RuleCondition condition = RuleCondition.createCategorical("ip", safeIps.getId()); + DetectionRule rule = new DetectionRule.Builder(Collections.singletonList(condition)).build(); + + Detector.Builder detector = new Detector.Builder("count", null); + detector.setRules(Arrays.asList(rule)); + detector.setOverFieldName("ip"); + + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(TimeValue.timeValueHours(1)); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + Job.Builder job = new Job.Builder("detection-rule-categorical-test"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + long timestamp = 1509062400000L; + List data = new ArrayList<>(); + + // Let's send a bunch of random IPs with counts of 1 + for (int bucket = 0; bucket < 20; bucket++) { + for (int i = 0; i < 5; i++) { + data.add(createIpRecord(timestamp, randomAlphaOfLength(10))); + } + timestamp += TimeValue.timeValueHours(1).getMillis(); + } + + // Now send anomalous counts for our filtered IPs plus 333.333.333.333 + List namedIps = Arrays.asList("111.111.111.111", "222.222.222.222", "333.333.333.333"); + long firstAnomalyTime = timestamp; + for (int i = 0; i < 10; i++) { + for (String ip : namedIps) { + data.add(createIpRecord(timestamp, ip)); + } + } + + // Some more normal buckets + for (int bucket = 0; bucket < 3; bucket++) { + for (int i = 0; i < 5; i++) { + data.add(createIpRecord(timestamp, randomAlphaOfLength(10))); + } + timestamp += TimeValue.timeValueHours(1).getMillis(); + } + + postData(job.getId(), joinBetween(0, data.size(), data)); + data = new ArrayList<>(); + flushJob(job.getId(), false); + + List records = getRecords(job.getId()); + assertThat(records.size(), equalTo(1)); + assertThat(records.get(0).getTimestamp().getTime(), equalTo(firstAnomalyTime)); + assertThat(records.get(0).getOverFieldValue(), equalTo("333.333.333.333")); + + // Now let's update the filter + MlFilter updatedFilter = new MlFilter(safeIps.getId(), Collections.singletonList("333.333.333.333")); + assertThat(putMlFilter(updatedFilter), is(true)); + + // Wait until the notification that the process was updated is indexed + assertBusy(() -> { + SearchResponse searchResponse = client().prepareSearch(".ml-notifications") + .setSize(1) + .addSort("timestamp", SortOrder.DESC) + .setQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("job_id", job.getId())) + .filter(QueryBuilders.termQuery("level", "info")) + ).get(); + SearchHit[] hits = searchResponse.getHits().getHits(); + assertThat(hits.length, equalTo(1)); + assertThat(hits[0].getSourceAsMap().get("message"), equalTo("Updated filter [safe_ips] in running process")); + }); + + long secondAnomalyTime = timestamp; + // Send another anomalous bucket + for (int i = 0; i < 10; i++) { + for (String ip : namedIps) { + data.add(createIpRecord(timestamp, ip)); + } + } + + // Some more normal buckets + for (int bucket = 0; bucket < 3; bucket++) { + for (int i = 0; i < 5; i++) { + data.add(createIpRecord(timestamp, randomAlphaOfLength(10))); + } + timestamp += TimeValue.timeValueHours(1).getMillis(); + } + + postData(job.getId(), joinBetween(0, data.size(), data)); + flushJob(job.getId(), false); + + GetRecordsAction.Request getRecordsRequest = new GetRecordsAction.Request(job.getId()); + getRecordsRequest.setStart(Long.toString(firstAnomalyTime + 1)); + records = getRecords(getRecordsRequest); + assertThat(records.size(), equalTo(2)); + for (AnomalyRecord record : records) { + assertThat(record.getTimestamp().getTime(), equalTo(secondAnomalyTime)); + assertThat(record.getOverFieldValue(), isOneOf("111.111.111.111", "222.222.222.222")); + } + + closeJob(job.getId()); + } + + private String createIpRecord(long timestamp, String ip) throws IOException { + Map record = new HashMap<>(); + record.put("time", timestamp); + record.put("ip", ip); + return createJsonRecord(record); + } + + private String joinBetween(int start, int end, List input) { + StringBuilder result = new StringBuilder(); + for (int i = start; i < end; i++) { + result.append(input.get(i)); + } + return result.toString(); + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java new file mode 100644 index 0000000000000..a5fc1575f484a --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java @@ -0,0 +1,258 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.elasticsearch.xpack.core.ml.job.results.Forecast; +import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; +import org.junit.After; + +import java.io.IOException; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; + +public class ForecastIT extends MlNativeAutodetectIntegTestCase { + + @After + public void tearDownData() throws Exception { + cleanUp(); + } + + public void testSingleSeries() throws Exception { + Detector.Builder detector = new Detector.Builder("mean", "value"); + + TimeValue bucketSpan = TimeValue.timeValueHours(1); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder("forecast-it-test-single-series"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + long now = Instant.now().getEpochSecond(); + long timestamp = now - 50 * bucketSpan.seconds(); + List data = new ArrayList<>(); + while (timestamp < now) { + data.add(createJsonRecord(createRecord(timestamp, 10.0))); + data.add(createJsonRecord(createRecord(timestamp, 30.0))); + timestamp += bucketSpan.seconds(); + } + + postData(job.getId(), data.stream().collect(Collectors.joining())); + flushJob(job.getId(), false); + + // Now we can start doing forecast requests + + String forecastIdDefaultDurationDefaultExpiry = forecast(job.getId(), null, null); + String forecastIdDuration1HourNoExpiry = forecast(job.getId(), TimeValue.timeValueHours(1), TimeValue.ZERO); + String forecastIdDuration3HoursExpiresIn24Hours = forecast(job.getId(), TimeValue.timeValueHours(3), TimeValue.timeValueHours(24)); + + waitForecastToFinish(job.getId(), forecastIdDefaultDurationDefaultExpiry); + waitForecastToFinish(job.getId(), forecastIdDuration1HourNoExpiry); + waitForecastToFinish(job.getId(), forecastIdDuration3HoursExpiresIn24Hours); + closeJob(job.getId()); + + List buckets = getBuckets(job.getId()); + Bucket lastBucket = buckets.get(buckets.size() - 1); + long lastBucketTime = lastBucket.getTimestamp().getTime(); + + // Now let's verify forecasts + double expectedForecastValue = 20.0; + + List forecastStats = getForecastStats(); + assertThat(forecastStats.size(), equalTo(3)); + Map idToForecastStats = new HashMap<>(); + forecastStats.stream().forEach(f -> idToForecastStats.put(f.getForecastId(), f)); + + { + ForecastRequestStats forecastDefaultDurationDefaultExpiry = idToForecastStats.get(forecastIdDefaultDurationDefaultExpiry); + assertThat(forecastDefaultDurationDefaultExpiry.getExpiryTime().toEpochMilli(), + equalTo(forecastDefaultDurationDefaultExpiry.getCreateTime().toEpochMilli() + + TimeValue.timeValueHours(14 * 24).getMillis())); + List forecasts = getForecasts(job.getId(), forecastDefaultDurationDefaultExpiry); + assertThat(forecastDefaultDurationDefaultExpiry.getRecordCount(), equalTo(24L)); + assertThat(forecasts.size(), equalTo(24)); + assertThat(forecasts.get(0).getTimestamp().getTime(), equalTo(lastBucketTime)); + for (int i = 0; i < forecasts.size(); i++) { + Forecast forecast = forecasts.get(i); + assertThat(forecast.getTimestamp().getTime(), equalTo(lastBucketTime + i * bucketSpan.getMillis())); + assertThat(forecast.getBucketSpan(), equalTo(bucketSpan.getSeconds())); + assertThat(forecast.getForecastPrediction(), closeTo(expectedForecastValue, 0.01)); + } + } + + { + ForecastRequestStats forecastDuration1HourNoExpiry = idToForecastStats.get(forecastIdDuration1HourNoExpiry); + assertThat(forecastDuration1HourNoExpiry.getExpiryTime(), equalTo(Instant.EPOCH)); + List forecasts = getForecasts(job.getId(), forecastDuration1HourNoExpiry); + assertThat(forecastDuration1HourNoExpiry.getRecordCount(), equalTo(1L)); + assertThat(forecasts.size(), equalTo(1)); + assertThat(forecasts.get(0).getTimestamp().getTime(), equalTo(lastBucketTime)); + for (int i = 0; i < forecasts.size(); i++) { + Forecast forecast = forecasts.get(i); + assertThat(forecast.getTimestamp().getTime(), equalTo(lastBucketTime + i * bucketSpan.getMillis())); + assertThat(forecast.getBucketSpan(), equalTo(bucketSpan.getSeconds())); + assertThat(forecast.getForecastPrediction(), closeTo(expectedForecastValue, 0.01)); + } + } + + { + ForecastRequestStats forecastDuration3HoursExpiresIn24Hours = idToForecastStats.get(forecastIdDuration3HoursExpiresIn24Hours); + assertThat(forecastDuration3HoursExpiresIn24Hours.getExpiryTime().toEpochMilli(), + equalTo(forecastDuration3HoursExpiresIn24Hours.getCreateTime().toEpochMilli() + + TimeValue.timeValueHours(24).getMillis())); + List forecasts = getForecasts(job.getId(), forecastDuration3HoursExpiresIn24Hours); + assertThat(forecastDuration3HoursExpiresIn24Hours.getRecordCount(), equalTo(3L)); + assertThat(forecasts.size(), equalTo(3)); + assertThat(forecasts.get(0).getTimestamp().getTime(), equalTo(lastBucketTime)); + for (int i = 0; i < forecasts.size(); i++) { + Forecast forecast = forecasts.get(i); + assertThat(forecast.getTimestamp().getTime(), equalTo(lastBucketTime + i * bucketSpan.getMillis())); + assertThat(forecast.getBucketSpan(), equalTo(bucketSpan.getSeconds())); + assertThat(forecast.getForecastPrediction(), closeTo(expectedForecastValue, 0.01)); + } + } + } + + public void testDurationCannotBeLessThanBucketSpan() throws Exception { + Detector.Builder detector = new Detector.Builder("mean", "value"); + + TimeValue bucketSpan = TimeValue.timeValueHours(1); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder("forecast-it-test-duration-bucket-span"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + registerJob(job); + putJob(job); + openJob(job.getId()); + ElasticsearchException e = expectThrows(ElasticsearchException.class,() -> forecast(job.getId(), + TimeValue.timeValueMinutes(10), null)); + assertThat(e.getMessage(), + equalTo("[duration] must be greater or equal to the bucket span: [10m/1h]")); + } + + public void testNoData() throws Exception { + Detector.Builder detector = new Detector.Builder("mean", "value"); + + TimeValue bucketSpan = TimeValue.timeValueMinutes(1); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder("forecast-it-test-no-data"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + registerJob(job); + putJob(job); + openJob(job.getId()); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> forecast(job.getId(), TimeValue.timeValueMinutes(120), null)); + assertThat(e.getMessage(), + equalTo("Cannot run forecast: Forecast cannot be executed as job requires data to have been processed and modeled")); + } + + public void testMemoryStatus() throws Exception { + Detector.Builder detector = new Detector.Builder("mean", "value"); + detector.setByFieldName("clientIP"); + + TimeValue bucketSpan = TimeValue.timeValueHours(1); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder("forecast-it-test-memory-status"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + // Set the memory limit to 30MB + AnalysisLimits limits = new AnalysisLimits(30L, null); + job.setAnalysisLimits(limits); + + registerJob(job); + putJob(job); + openJob(job.getId()); + createDataWithLotsOfClientIps(bucketSpan, job); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> forecast(job.getId(), TimeValue.timeValueMinutes(120), null)); + assertThat(e.getMessage(), equalTo("Cannot run forecast: Forecast cannot be executed as model memory status is not OK")); + } + + public void testMemoryLimit() throws Exception { + Detector.Builder detector = new Detector.Builder("mean", "value"); + detector.setByFieldName("clientIP"); + + TimeValue bucketSpan = TimeValue.timeValueHours(1); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder("forecast-it-test-memory-limit"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + registerJob(job); + putJob(job); + openJob(job.getId()); + createDataWithLotsOfClientIps(bucketSpan, job); + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> forecast(job.getId(), TimeValue.timeValueMinutes(120), null)); + assertThat(e.getMessage(), + equalTo("Cannot run forecast: Forecast cannot be executed as forecast memory usage is predicted to exceed 20MB")); + } + + private void createDataWithLotsOfClientIps(TimeValue bucketSpan, Job.Builder job) throws IOException { + long now = Instant.now().getEpochSecond(); + long timestamp = now - 50 * bucketSpan.seconds(); + while (timestamp < now) { + for (int i = 1; i < 256; i++) { + List data = new ArrayList<>(); + for (int j = 1; j < 100; j++) { + Map record = new HashMap<>(); + record.put("time", timestamp); + record.put("value", 10.0); + record.put("clientIP", String.format(Locale.ROOT, "192.168.%d.%d", i, j)); + data.add(createJsonRecord(record)); + } + postData(job.getId(), data.stream().collect(Collectors.joining())); + timestamp += bucketSpan.seconds(); + } + } + flushJob(job.getId(), false); + } + + private static Map createRecord(long timestamp, double value) { + Map record = new HashMap<>(); + record.put("time", timestamp); + record.put("value", value); + return record; + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java new file mode 100644 index 0000000000000..add0b9e8a93a3 --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/InterimResultsDeletedAfterReopeningJobIT.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +/** + * This tests that interim results created before a job was reopened get + * deleted after new buckets are created. + */ +public class InterimResultsDeletedAfterReopeningJobIT extends MlNativeAutodetectIntegTestCase { + + @After + public void cleanUpTest() throws Exception { + cleanUp(); + } + + public void test() throws Exception { + Detector.Builder detector = new Detector.Builder("mean", "value"); + detector.setByFieldName("by_field"); + + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder( + Arrays.asList(detector.build())); + analysisConfig.setBucketSpan(TimeValue.timeValueHours(1)); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + Job.Builder job = new Job.Builder("interim-results-deleted-after-reopening-job-test"); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + long timestamp = 1491004800000L; + int totalBuckets = 2 * 24; + List byFieldValues = Arrays.asList("foo", "bar"); + int normalValue = 1000; + List data = new ArrayList<>(); + for (int bucket = 0; bucket < totalBuckets; bucket++) { + for (String byFieldValue : byFieldValues) { + data.add(createJsonRecord(createRecord(timestamp, byFieldValue, normalValue))); + } + timestamp += TimeValue.timeValueHours(1).getMillis(); + } + + data.add(createJsonRecord(createRecord(timestamp, "foo", 1))); + data.add(createJsonRecord(createRecord(timestamp, "bar", 1))); + postData(job.getId(), data.stream().collect(Collectors.joining())); + flushJob(job.getId(), true); + closeJob(job.getId()); + + // We should have 2 interim records + List records = getRecords(job.getId()); + assertThat(records.size(), equalTo(2)); + assertThat(records.stream().allMatch(AnomalyRecord::isInterim), is(true)); + + // Second batch + data = new ArrayList<>(); + + // This should fix the mean for 'foo' + data.add(createJsonRecord(createRecord(timestamp, "foo", 2000))); + + // Then advance time and send normal data to force creating final results for previous bucket + timestamp += TimeValue.timeValueHours(1).getMillis(); + data.add(createJsonRecord(createRecord(timestamp, "foo", normalValue))); + data.add(createJsonRecord(createRecord(timestamp, "bar", normalValue))); + + openJob(job.getId()); + postData(job.getId(), data.stream().collect(Collectors.joining())); + closeJob(job.getId()); + + records = getRecords(job.getId()); + assertThat(records.size(), equalTo(1)); + assertThat(records.stream().allMatch(AnomalyRecord::isInterim), is(false)); + + // No other interim results either + assertNoInterimResults(job.getId()); + } + + private static Map createRecord(long timestamp, String byFieldValue, int value) { + Map record = new HashMap<>(); + record.put("time", timestamp); + record.put("by_field", byFieldValue); + record.put("value", value); + return record; + } + + private void assertNoInterimResults(String jobId) { + String indexName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); + SearchResponse search = client().prepareSearch(indexName).setTypes("result").setSize(1000) + .setQuery(QueryBuilders.termQuery("is_interim", true)).get(); + assertThat(search.getHits().getTotalHits(), equalTo(0L)); + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java new file mode 100644 index 0000000000000..114fbdd4e5dd3 --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java @@ -0,0 +1,682 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.ml.integration.MlRestTestStateCleaner; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; +import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; +import org.junit.After; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class MlJobIT extends ESRestTestCase { + + private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("x_pack_rest_user", + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + + @Override + protected Settings restClientSettings() { + return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build(); + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + public void testPutJob_GivenFarequoteConfig() throws Exception { + Response response = createFarequoteJob("given-farequote-config-job"); + + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + String responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"job_id\":\"given-farequote-config-job\"")); + } + + public void testGetJob_GivenNoSuchJob() throws Exception { + ResponseException e = expectThrows(ResponseException.class, + () -> client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/non-existing-job/_stats")); + + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + assertThat(e.getMessage(), containsString("No known job with id 'non-existing-job'")); + } + + public void testGetJob_GivenJobExists() throws Exception { + createFarequoteJob("get-job_given-job-exists-job"); + + Response response = client().performRequest("get", + MachineLearning.BASE_PATH + "anomaly_detectors/get-job_given-job-exists-job/_stats"); + + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + String responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":1")); + assertThat(responseAsString, containsString("\"job_id\":\"get-job_given-job-exists-job\"")); + } + + public void testGetJobs_GivenSingleJob() throws Exception { + String jobId = "get-jobs_given-single-job-job"; + createFarequoteJob(jobId); + + // Explicit _all + Response response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/_all"); + + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + String responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":1")); + assertThat(responseAsString, containsString("\"job_id\":\"" + jobId + "\"")); + + // Implicit _all + response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors"); + + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":1")); + assertThat(responseAsString, containsString("\"job_id\":\"" + jobId + "\"")); + } + + public void testGetJobs_GivenMultipleJobs() throws Exception { + createFarequoteJob("given-multiple-jobs-job-1"); + createFarequoteJob("given-multiple-jobs-job-2"); + createFarequoteJob("given-multiple-jobs-job-3"); + + // Explicit _all + Response response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/_all"); + + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + String responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":3")); + assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-1\"")); + assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-2\"")); + assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-3\"")); + + // Implicit _all + response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors"); + + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":3")); + assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-1\"")); + assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-2\"")); + assertThat(responseAsString, containsString("\"job_id\":\"given-multiple-jobs-job-3\"")); + } + + private Response createFarequoteJob(String jobId) throws IOException { + String job = "{\n" + " \"description\":\"Analysis of response time by airline\",\n" + + " \"analysis_config\" : {\n" + " \"bucket_span\": \"3600s\",\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\",\"by_field_name\":\"airline\"}]\n" + + " },\n" + " \"data_description\" : {\n" + " \"field_delimiter\":\",\",\n" + " " + + "\"time_field\":\"time\",\n" + + " \"time_format\":\"yyyy-MM-dd HH:mm:ssX\"\n" + " }\n" + "}"; + + return client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, + Collections.emptyMap(), new StringEntity(job, ContentType.APPLICATION_JSON)); + } + + public void testCantCreateJobWithSameID() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\"}]\n" + + " },\n" + + " \"data_description\": {},\n" + + " \"results_index_name\" : \"%s\"}"; + + String jobConfig = String.format(Locale.ROOT, jobTemplate, "index-1"); + + String jobId = "cant-create-job-with-same-id-job"; + Response response = client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId , + Collections.emptyMap(), + new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); + assertEquals(200, response.getStatusLine().getStatusCode()); + + final String jobConfig2 = String.format(Locale.ROOT, jobTemplate, "index-2"); + ResponseException e = expectThrows(ResponseException.class, + () ->client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, + Collections.emptyMap(), new StringEntity(jobConfig2, ContentType.APPLICATION_JSON))); + + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(e.getMessage(), containsString("The job cannot be created with the Id '" + jobId + "'. The Id is already used.")); + } + + public void testCreateJobsWithIndexNameOption() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"responsetime\"}]\n" + + " },\n" + + " \"data_description\": {},\n" + + " \"results_index_name\" : \"%s\"}"; + + String jobId1 = "create-jobs-with-index-name-option-job-1"; + String indexName = "non-default-index"; + String jobConfig = String.format(Locale.ROOT, jobTemplate, indexName); + + Response response = client().performRequest("put", MachineLearning.BASE_PATH + + "anomaly_detectors/" + jobId1, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); + assertEquals(200, response.getStatusLine().getStatusCode()); + + String jobId2 = "create-jobs-with-index-name-option-job-2"; + response = client().performRequest("put", MachineLearning.BASE_PATH + + "anomaly_detectors/" + jobId2, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); + assertEquals(200, response.getStatusLine().getStatusCode()); + + response = client().performRequest("get", "_aliases"); + assertEquals(200, response.getStatusLine().getStatusCode()); + String responseAsString = responseEntityToString(response); + + assertThat(responseAsString, + containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName("custom-" + indexName) + "\":{\"aliases\":{")); + assertThat(responseAsString, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId1 + "\",\"boost\":1.0}}}}")); + assertThat(responseAsString, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId1) + "\":{}")); + assertThat(responseAsString, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId2) + + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId2 + "\",\"boost\":1.0}}}}")); + assertThat(responseAsString, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId2) + "\":{}")); + + response = client().performRequest("get", "_cat/indices"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName)); + assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1)))); + assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2)))); + + String bucketResult = String.format(Locale.ROOT, + "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"result_type\":\"bucket\", \"bucket_span\": \"%s\"}", + jobId1, "1234", 1); + String id = String.format(Locale.ROOT, "%s_bucket_%s_%s", jobId1, "1234", 300); + response = client().performRequest("put", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/doc/" + id, + Collections.emptyMap(), new StringEntity(bucketResult, ContentType.APPLICATION_JSON)); + assertEquals(201, response.getStatusLine().getStatusCode()); + + bucketResult = String.format(Locale.ROOT, + "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"result_type\":\"bucket\", \"bucket_span\": \"%s\"}", + jobId1, "1236", 1); + id = String.format(Locale.ROOT, "%s_bucket_%s_%s", jobId1, "1236", 300); + response = client().performRequest("put", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/doc/" + id, + Collections.emptyMap(), new StringEntity(bucketResult, ContentType.APPLICATION_JSON)); + assertEquals(201, response.getStatusLine().getStatusCode()); + + client().performRequest("post", "_refresh"); + + response = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1 + "/results/buckets"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":2")); + + response = client().performRequest("get", AnomalyDetectorsIndex.jobResultsAliasedName(jobId1) + "/_search"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"total\":2")); + + response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId1); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + // check that indices still exist, but are empty and aliases are gone + response = client().performRequest("get", "_aliases"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, not(containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId1)))); + assertThat(responseAsString, containsString(AnomalyDetectorsIndex.jobResultsAliasedName(jobId2))); //job2 still exists + + response = client().performRequest("get", "_cat/indices"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName)); + + client().performRequest("post", "_refresh"); + + response = client().performRequest("get", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-" + indexName + "/_count"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":0")); + } + + public void testCreateJobInSharedIndexUpdatesMapping() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + + " },\n" + + " \"data_description\": {}\n" + + "}"; + + String jobId1 = "create-job-in-shared-index-updates-mapping-job-1"; + String byFieldName1 = "responsetime"; + String jobId2 = "create-job-in-shared-index-updates-mapping-job-2"; + String byFieldName2 = "cpu-usage"; + String jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName1); + + Response response = client().performRequest("put", MachineLearning.BASE_PATH + + "anomaly_detectors/" + jobId1, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); + assertEquals(200, response.getStatusLine().getStatusCode()); + + // Check the index mapping contains the first by_field_name + response = client().performRequest("get", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT + "/_mapping?pretty"); + assertEquals(200, response.getStatusLine().getStatusCode()); + String responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString(byFieldName1)); + assertThat(responseAsString, not(containsString(byFieldName2))); + + jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName2); + response = client().performRequest("put", MachineLearning.BASE_PATH + + "anomaly_detectors/" + jobId2, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); + assertEquals(200, response.getStatusLine().getStatusCode()); + + // Check the index mapping now contains both fields + response = client().performRequest("get", AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT + "/_mapping?pretty"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString(byFieldName1)); + assertThat(responseAsString, containsString(byFieldName2)); + } + + public void testCreateJobInCustomSharedIndexUpdatesMapping() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + + " },\n" + + " \"data_description\": {},\n" + + " \"results_index_name\" : \"shared-index\"}"; + + String jobId1 = "create-job-in-custom-shared-index-updates-mapping-job-1"; + String byFieldName1 = "responsetime"; + String jobId2 = "create-job-in-custom-shared-index-updates-mapping-job-2"; + String byFieldName2 = "cpu-usage"; + String jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName1); + + Response response = client().performRequest("put", MachineLearning.BASE_PATH + + "anomaly_detectors/" + jobId1, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); + assertEquals(200, response.getStatusLine().getStatusCode()); + + // Check the index mapping contains the first by_field_name + response = client().performRequest("get", + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-shared-index" + "/_mapping?pretty"); + assertEquals(200, response.getStatusLine().getStatusCode()); + String responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString(byFieldName1)); + assertThat(responseAsString, not(containsString(byFieldName2))); + + jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName2); + response = client().performRequest("put", MachineLearning.BASE_PATH + + "anomaly_detectors/" + jobId2, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); + assertEquals(200, response.getStatusLine().getStatusCode()); + + // Check the index mapping now contains both fields + response = client().performRequest("get", + AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-shared-index" + "/_mapping?pretty"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString(byFieldName1)); + assertThat(responseAsString, containsString(byFieldName2)); + } + + public void testCreateJob_WithClashingFieldMappingsFails() throws Exception { + String jobTemplate = "{\n" + + " \"analysis_config\" : {\n" + + " \"detectors\" :[{\"function\":\"metric\",\"field_name\":\"metric\", \"by_field_name\":\"%s\"}]\n" + + " },\n" + + " \"data_description\": {}\n" + + "}"; + + String jobId1 = "job-with-response-field"; + String byFieldName1; + String jobId2 = "job-will-fail-with-mapping-error-on-response-field"; + String byFieldName2; + // we should get the friendly advice nomatter which way around the clashing fields are seen + if (randomBoolean()) { + byFieldName1 = "response"; + byFieldName2 = "response.time"; + } else { + byFieldName1 = "response.time"; + byFieldName2 = "response"; + } + String jobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName1); + + Response response = client().performRequest("put", MachineLearning.BASE_PATH + + "anomaly_detectors/" + jobId1, Collections.emptyMap(), new StringEntity(jobConfig, ContentType.APPLICATION_JSON)); + assertEquals(200, response.getStatusLine().getStatusCode()); + + final String failingJobConfig = String.format(Locale.ROOT, jobTemplate, byFieldName2); + ResponseException e = expectThrows(ResponseException.class, + () -> client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId2, + Collections.emptyMap(), new StringEntity(failingJobConfig, ContentType.APPLICATION_JSON))); + + assertThat(e.getMessage(), + containsString("This job would cause a mapping clash with existing field [response] - " + + "avoid the clash by assigning a dedicated results index")); + } + + public void testDeleteJob() throws Exception { + String jobId = "delete-job-job"; + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + Response response = client().performRequest("get", "_cat/indices"); + assertEquals(200, response.getStatusLine().getStatusCode()); + String responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString(indexName)); + + response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + // check that the index still exists (it's shared by default) + response = client().performRequest("get", "_cat/indices"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString(indexName)); + + assertBusy(() -> { + try { + Response r = client().performRequest("get", indexName + "/_count"); + assertEquals(200, r.getStatusLine().getStatusCode()); + String responseString = responseEntityToString(r); + assertThat(responseString, containsString("\"count\":0")); + } catch (Exception e) { + fail(e.getMessage()); + } + + }); + + // check that the job itself is gone + expectThrows(ResponseException.class, () -> + client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + } + + public void testDeleteJobAfterMissingIndex() throws Exception { + String jobId = "delete-job-after-missing-index-job"; + String aliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + Response response = client().performRequest("get", "_cat/indices"); + assertEquals(200, response.getStatusLine().getStatusCode()); + String responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString(indexName)); + + // Manually delete the index so that we can test that deletion proceeds + // normally anyway + response = client().performRequest("delete", indexName); + assertEquals(200, response.getStatusLine().getStatusCode()); + + response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + // check index was deleted + response = client().performRequest("get", "_cat/indices"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, not(containsString(aliasName))); + assertThat(responseAsString, not(containsString(indexName))); + + expectThrows(ResponseException.class, () -> + client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + } + + public void testDeleteJobAfterMissingAliases() throws Exception { + String jobId = "delete-job-after-missing-alias-job"; + String readAliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); + String writeAliasName = AnomalyDetectorsIndex.resultsWriteAlias(jobId); + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + Response response = client().performRequest("get", "_cat/aliases"); + assertEquals(200, response.getStatusLine().getStatusCode()); + String responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString(readAliasName)); + assertThat(responseAsString, containsString(writeAliasName)); + + // Manually delete the aliases so that we can test that deletion proceeds + // normally anyway + response = client().performRequest("delete", indexName + "/_alias/" + readAliasName); + assertEquals(200, response.getStatusLine().getStatusCode()); + response = client().performRequest("delete", indexName + "/_alias/" + writeAliasName); + assertEquals(200, response.getStatusLine().getStatusCode()); + + // check aliases were deleted + expectThrows(ResponseException.class, () -> client().performRequest("get", indexName + "/_alias/" + readAliasName)); + expectThrows(ResponseException.class, () -> client().performRequest("get", indexName + "/_alias/" + writeAliasName)); + + response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + } + + public void testMultiIndexDelete() throws Exception { + String jobId = "multi-index-delete-job"; + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + Response response = client().performRequest("put", indexName + "-001"); + assertEquals(200, response.getStatusLine().getStatusCode()); + + response = client().performRequest("put", indexName + "-002"); + assertEquals(200, response.getStatusLine().getStatusCode()); + + response = client().performRequest("get", "_cat/indices"); + assertEquals(200, response.getStatusLine().getStatusCode()); + String responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString(indexName)); + assertThat(responseAsString, containsString(indexName + "-001")); + assertThat(responseAsString, containsString(indexName + "-002")); + + // Add some documents to each index to make sure the DBQ clears them out + String recordResult = + String.format(Locale.ROOT, + "{\"job_id\":\"%s\", \"timestamp\": \"%s\", \"bucket_span\":%d, \"result_type\":\"record\"}", + jobId, 123, 1); + client().performRequest("put", indexName + "/doc/" + 123, + Collections.singletonMap("refresh", "true"), new StringEntity(recordResult, ContentType.APPLICATION_JSON)); + client().performRequest("put", indexName + "-001/doc/" + 123, + Collections.singletonMap("refresh", "true"), new StringEntity(recordResult, ContentType.APPLICATION_JSON)); + client().performRequest("put", indexName + "-002/doc/" + 123, + Collections.singletonMap("refresh", "true"), new StringEntity(recordResult, ContentType.APPLICATION_JSON)); + + // Also index a few through the alias for the first job + client().performRequest("put", indexName + "/doc/" + 456, + Collections.singletonMap("refresh", "true"), new StringEntity(recordResult, ContentType.APPLICATION_JSON)); + + + client().performRequest("post", "_refresh"); + + // check for the documents + response = client().performRequest("get", indexName+ "/_count"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":2")); + + response = client().performRequest("get", indexName + "-001/_count"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":1")); + + response = client().performRequest("get", indexName + "-002/_count"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":1")); + + // Delete + response = client().performRequest("delete", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + client().performRequest("post", "_refresh"); + + // check that the indices still exist but are empty + response = client().performRequest("get", "_cat/indices"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString(indexName)); + assertThat(responseAsString, containsString(indexName + "-001")); + assertThat(responseAsString, containsString(indexName + "-002")); + + response = client().performRequest("get", indexName + "/_count"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":0")); + + response = client().performRequest("get", indexName + "-001/_count"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":0")); + + response = client().performRequest("get", indexName + "-002/_count"); + assertEquals(200, response.getStatusLine().getStatusCode()); + responseAsString = responseEntityToString(response); + assertThat(responseAsString, containsString("\"count\":0")); + + + expectThrows(ResponseException.class, () -> + client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); + } + + public void testDelete_multipleRequest() throws Exception { + String jobId = "delete-job-mulitple-times"; + createFarequoteJob(jobId); + + ConcurrentMapLong responses = ConcurrentCollections.newConcurrentMapLong(); + ConcurrentMapLong responseExceptions = ConcurrentCollections.newConcurrentMapLong(); + AtomicReference ioe = new AtomicReference<>(); + AtomicInteger recreationGuard = new AtomicInteger(0); + AtomicReference recreationResponse = new AtomicReference<>(); + AtomicReference recreationException = new AtomicReference<>(); + + Runnable deleteJob = () -> { + try { + boolean forceDelete = randomBoolean(); + String url = MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId; + if (forceDelete) { + url += "?force=true"; + } + Response response = client().performRequest("delete", url); + responses.put(Thread.currentThread().getId(), response); + } catch (ResponseException re) { + responseExceptions.put(Thread.currentThread().getId(), re); + } catch (IOException e) { + ioe.set(e); + } + + // Immediately after the first deletion finishes, recreate the job. This should pick up + // race conditions where another delete request deletes part of the newly created job. + if (recreationGuard.getAndIncrement() == 0) { + try { + recreationResponse.set(createFarequoteJob(jobId)); + } catch (ResponseException re) { + recreationException.set(re); + } catch (IOException e) { + ioe.set(e); + } + } + }; + + // The idea is to hit the situation where one request waits for + // the other to complete. This is difficult to schedule but + // hopefully it will happen in CI + int numThreads = 5; + Thread [] threads = new Thread[numThreads]; + for (int i=0; i jobs = new ArrayList<>(); + private List datafeeds = new ArrayList<>(); + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, Netty4Plugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return Arrays.asList(XPackClientPlugin.class, Netty4Plugin.class); + } + + @Override + protected Settings externalClusterClientSettings() { + Path keyStore; + try { + keyStore = PathUtils.get(getClass().getResource("/test-node.jks").toURI()); + } catch (URISyntaxException e) { + throw new IllegalStateException("error trying to get keystore path", e); + } + Settings.Builder builder = Settings.builder(); + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4); + builder.put(SecurityField.USER_SETTING.getKey(), "x_pack_rest_user:" + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + builder.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), true); + builder.put("xpack.security.transport.ssl.enabled", true); + builder.put("xpack.security.transport.ssl.keystore.path", keyStore.toAbsolutePath().toString()); + builder.put("xpack.security.transport.ssl.keystore.password", "keypass"); + builder.put("xpack.security.transport.ssl.verification_mode", "certificate"); + return builder.build(); + } + + protected void cleanUp() { + cleanUpDatafeeds(); + cleanUpJobs(); + waitForPendingTasks(); + } + + private void cleanUpDatafeeds() { + for (DatafeedConfig datafeed : datafeeds) { + try { + stopDatafeed(datafeed.getId()); + } catch (Exception e) { + // ignore + } + try { + deleteDatafeed(datafeed.getId()); + } catch (Exception e) { + // ignore + } + } + } + + private void cleanUpJobs() { + for (Job.Builder job : jobs) { + try { + closeJob(job.getId()); + } catch (Exception e) { + // ignore + } + try { + deleteJob(job.getId()); + } catch (Exception e) { + // ignore + } + } + } + + private void waitForPendingTasks() { + ListTasksRequest listTasksRequest = new ListTasksRequest(); + listTasksRequest.setWaitForCompletion(true); + listTasksRequest.setDetailed(true); + listTasksRequest.setTimeout(TimeValue.timeValueSeconds(10)); + try { + admin().cluster().listTasks(listTasksRequest).get(); + } catch (Exception e) { + throw new AssertionError("Failed to wait for pending tasks to complete", e); + } + } + + protected void registerJob(Job.Builder job) { + if (jobs.add(job) == false) { + throw new IllegalArgumentException("job [" + job.getId() + "] is already registered"); + } + } + + protected void registerDatafeed(DatafeedConfig datafeed) { + if (datafeeds.add(datafeed) == false) { + throw new IllegalArgumentException("datafeed [" + datafeed.getId() + "] is already registered"); + } + } + + protected List getJobs() { + return jobs; + } + + protected PutJobAction.Response putJob(Job.Builder job) { + PutJobAction.Request request = new PutJobAction.Request(job); + return client().execute(PutJobAction.INSTANCE, request).actionGet(); + } + + protected OpenJobAction.Response openJob(String jobId) { + OpenJobAction.Request request = new OpenJobAction.Request(jobId); + return client().execute(OpenJobAction.INSTANCE, request).actionGet(); + } + + protected CloseJobAction.Response closeJob(String jobId) { + CloseJobAction.Request request = new CloseJobAction.Request(jobId); + return client().execute(CloseJobAction.INSTANCE, request).actionGet(); + } + + protected FlushJobAction.Response flushJob(String jobId, boolean calcInterim) { + FlushJobAction.Request request = new FlushJobAction.Request(jobId); + request.setCalcInterim(calcInterim); + return client().execute(FlushJobAction.INSTANCE, request).actionGet(); + } + + protected PutJobAction.Response updateJob(String jobId, JobUpdate update) { + UpdateJobAction.Request request = new UpdateJobAction.Request(jobId, update); + return client().execute(UpdateJobAction.INSTANCE, request).actionGet(); + } + + protected DeleteJobAction.Response deleteJob(String jobId) { + DeleteJobAction.Request request = new DeleteJobAction.Request(jobId); + return client().execute(DeleteJobAction.INSTANCE, request).actionGet(); + } + + protected PutDatafeedAction.Response putDatafeed(DatafeedConfig datafeed) { + PutDatafeedAction.Request request = new PutDatafeedAction.Request(datafeed); + return client().execute(PutDatafeedAction.INSTANCE, request).actionGet(); + } + + protected StopDatafeedAction.Response stopDatafeed(String datafeedId) { + StopDatafeedAction.Request request = new StopDatafeedAction.Request(datafeedId); + return client().execute(StopDatafeedAction.INSTANCE, request).actionGet(); + } + + protected DeleteDatafeedAction.Response deleteDatafeed(String datafeedId) { + DeleteDatafeedAction.Request request = new DeleteDatafeedAction.Request(datafeedId); + return client().execute(DeleteDatafeedAction.INSTANCE, request).actionGet(); + } + + protected StartDatafeedAction.Response startDatafeed(String datafeedId, long start, Long end) { + StartDatafeedAction.Request request = new StartDatafeedAction.Request(datafeedId, start); + request.getParams().setEndTime(end); + return client().execute(StartDatafeedAction.INSTANCE, request).actionGet(); + } + + protected void waitUntilJobIsClosed(String jobId) throws Exception { + waitUntilJobIsClosed(jobId, TimeValue.timeValueSeconds(30)); + } + + protected void waitUntilJobIsClosed(String jobId, TimeValue waitTime) throws Exception { + assertBusy(() -> assertThat(getJobStats(jobId).get(0).getState(), equalTo(JobState.CLOSED)), + waitTime.getMillis(), TimeUnit.MILLISECONDS); + } + + protected List getJob(String jobId) { + GetJobsAction.Request request = new GetJobsAction.Request(jobId); + return client().execute(GetJobsAction.INSTANCE, request).actionGet().getResponse().results(); + } + + protected List getJobStats(String jobId) { + GetJobsStatsAction.Request request = new GetJobsStatsAction.Request(jobId); + GetJobsStatsAction.Response response = client().execute(GetJobsStatsAction.INSTANCE, request).actionGet(); + return response.getResponse().results(); + } + + protected List getBuckets(String jobId) { + GetBucketsAction.Request request = new GetBucketsAction.Request(jobId); + return getBuckets(request); + } + + protected List getBuckets(GetBucketsAction.Request request) { + GetBucketsAction.Response response = client().execute(GetBucketsAction.INSTANCE, request).actionGet(); + return response.getBuckets().results(); + } + + protected List getRecords(String jobId) { + GetRecordsAction.Request request = new GetRecordsAction.Request(jobId); + return getRecords(request); + } + + protected List getRecords(GetRecordsAction.Request request) { + GetRecordsAction.Response response = client().execute(GetRecordsAction.INSTANCE, request).actionGet(); + return response.getRecords().results(); + } + + protected List getModelSnapshots(String jobId) { + GetModelSnapshotsAction.Request request = new GetModelSnapshotsAction.Request(jobId, null); + GetModelSnapshotsAction.Response response = client().execute(GetModelSnapshotsAction.INSTANCE, request).actionGet(); + return response.getPage().results(); + } + + protected RevertModelSnapshotAction.Response revertModelSnapshot(String jobId, String snapshotId) { + RevertModelSnapshotAction.Request request = new RevertModelSnapshotAction.Request(jobId, snapshotId); + return client().execute(RevertModelSnapshotAction.INSTANCE, request).actionGet(); + } + + protected List getCategories(String jobId) { + GetCategoriesAction.Request getCategoriesRequest = + new GetCategoriesAction.Request(jobId); + getCategoriesRequest.setPageParams(new PageParams()); + GetCategoriesAction.Response categoriesResponse = client().execute(GetCategoriesAction.INSTANCE, getCategoriesRequest).actionGet(); + return categoriesResponse.getResult().results(); + } + + protected DataCounts postData(String jobId, String data) { + logger.debug("Posting data to job [{}]:\n{}", jobId, data); + PostDataAction.Request request = new PostDataAction.Request(jobId); + request.setContent(new BytesArray(data), XContentType.JSON); + return client().execute(PostDataAction.INSTANCE, request).actionGet().getDataCounts(); + } + + protected String forecast(String jobId, TimeValue duration, TimeValue expiresIn) { + ForecastJobAction.Request request = new ForecastJobAction.Request(jobId); + if (duration != null) { + request.setDuration(duration.getStringRep()); + } + if (expiresIn != null) { + request.setExpiresIn(expiresIn.getStringRep()); + } + return client().execute(ForecastJobAction.INSTANCE, request).actionGet().getForecastId(); + } + + protected void waitForecastToFinish(String jobId, String forecastId) throws Exception { + assertBusy(() -> { + ForecastRequestStats forecastRequestStats = getForecastStats(jobId, forecastId); + assertThat(forecastRequestStats, is(notNullValue())); + assertThat(forecastRequestStats.getStatus(), equalTo(ForecastRequestStats.ForecastRequestStatus.FINISHED)); + }, 30, TimeUnit.SECONDS); + } + + protected ForecastRequestStats getForecastStats(String jobId, String forecastId) { + SearchResponse searchResponse = client().prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName(jobId)) + .setQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(Result.RESULT_TYPE.getPreferredName(), ForecastRequestStats.RESULT_TYPE_VALUE)) + .filter(QueryBuilders.termQuery(Job.ID.getPreferredName(), jobId)) + .filter(QueryBuilders.termQuery(ForecastRequestStats.FORECAST_ID.getPreferredName(), forecastId))) + .execute().actionGet(); + SearchHits hits = searchResponse.getHits(); + if (hits.getTotalHits() == 0) { + return null; + } + assertThat(hits.getTotalHits(), equalTo(1L)); + try { + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser( + NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + hits.getHits()[0].getSourceRef().streamInput()); + return ForecastRequestStats.STRICT_PARSER.apply(parser, null); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + protected List getForecastStats() { + List forecastStats = new ArrayList<>(); + + SearchResponse searchResponse = client().prepareSearch(AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*") + .setSize(1000) + .setQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(Result.RESULT_TYPE.getPreferredName(), ForecastRequestStats.RESULT_TYPE_VALUE))) + .execute().actionGet(); + SearchHits hits = searchResponse.getHits(); + for (SearchHit hit : hits) { + try { + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser( + NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, hit.getSourceRef().streamInput()); + forecastStats.add(ForecastRequestStats.STRICT_PARSER.apply(parser, null)); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + return forecastStats; + } + + protected long countForecastDocs(String jobId, String forecastId) { + SearchResponse searchResponse = client().prepareSearch(AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*") + .setQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(Result.RESULT_TYPE.getPreferredName(), Forecast.RESULT_TYPE_VALUE)) + .filter(QueryBuilders.termQuery(Job.ID.getPreferredName(), jobId)) + .filter(QueryBuilders.termQuery(Forecast.FORECAST_ID.getPreferredName(), forecastId))) + .execute().actionGet(); + return searchResponse.getHits().getTotalHits(); + } + + protected List getForecasts(String jobId, ForecastRequestStats forecastRequestStats) { + List forecasts = new ArrayList<>(); + + SearchResponse searchResponse = client().prepareSearch(AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*") + .setSize((int) forecastRequestStats.getRecordCount()) + .setQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(Result.RESULT_TYPE.getPreferredName(), Forecast.RESULT_TYPE_VALUE)) + .filter(QueryBuilders.termQuery(Job.ID.getPreferredName(), jobId)) + .filter(QueryBuilders.termQuery(Forecast.FORECAST_ID.getPreferredName(), forecastRequestStats.getForecastId()))) + .addSort(SortBuilders.fieldSort(Result.TIMESTAMP.getPreferredName()).order(SortOrder.ASC)) + .execute().actionGet(); + SearchHits hits = searchResponse.getHits(); + for (SearchHit hit : hits) { + try { + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser( + NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + hit.getSourceRef().streamInput()); + forecasts.add(Forecast.STRICT_PARSER.apply(parser, null)); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + return forecasts; + } + + protected boolean putMlFilter(MlFilter filter) { + PutFilterAction.Response response = client().execute(PutFilterAction.INSTANCE, new PutFilterAction.Request(filter)).actionGet(); + return response.isAcknowledged(); + } + + protected PutCalendarAction.Response putCalendar(String calendarId, List jobIds, String description) { + PutCalendarAction.Request request = new PutCalendarAction.Request(new Calendar(calendarId, jobIds, description)); + return client().execute(PutCalendarAction.INSTANCE, request).actionGet(); + } + + protected PostCalendarEventsAction.Response postScheduledEvents(String calendarId, List events) { + PostCalendarEventsAction.Request request = new PostCalendarEventsAction.Request(calendarId, events); + return client().execute(PostCalendarEventsAction.INSTANCE, request).actionGet(); + } + + protected PersistJobAction.Response persistJob(String jobId) { + PersistJobAction.Request request = new PersistJobAction.Request(jobId); + return client().execute(PersistJobAction.INSTANCE, request).actionGet(); + } + + @Override + protected void ensureClusterStateConsistency() throws IOException { + if (cluster() != null && cluster().size() > 0) { + List entries = new ArrayList<>(ClusterModule.getNamedWriteables()); + entries.addAll(new SearchModule(Settings.EMPTY, true, Collections.emptyList()).getNamedWriteables()); + entries.add(new NamedWriteableRegistry.Entry(MetaData.Custom.class, "ml", MlMetadata::new)); + entries.add(new NamedWriteableRegistry.Entry(MetaData.Custom.class, PersistentTasksCustomMetaData.TYPE, + PersistentTasksCustomMetaData::new)); + entries.add(new NamedWriteableRegistry.Entry(PersistentTaskParams.class, StartDatafeedAction.TASK_NAME, + StartDatafeedAction.DatafeedParams::new)); + entries.add(new NamedWriteableRegistry.Entry(PersistentTaskParams.class, OpenJobAction.TASK_NAME, + OpenJobAction.JobParams::new)); + entries.add(new NamedWriteableRegistry.Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, + PersistentTasksNodeService.Status::new)); + entries.add(new NamedWriteableRegistry.Entry(Task.Status.class, JobTaskStatus.NAME, JobTaskStatus::new)); + entries.add(new NamedWriteableRegistry.Entry(Task.Status.class, DatafeedState.NAME, DatafeedState::fromStream)); + entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TokenMetaData.TYPE, TokenMetaData::new)); + final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries); + ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); + byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState); + // remove local node reference + masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null, namedWriteableRegistry); + Map masterStateMap = convertToMap(masterClusterState); + int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; + String masterId = masterClusterState.nodes().getMasterNodeId(); + for (Client client : cluster().getClients()) { + ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); + byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState); + // remove local node reference + localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null, namedWriteableRegistry); + final Map localStateMap = convertToMap(localClusterState); + final int localClusterStateSize = ClusterState.Builder.toBytes(localClusterState).length; + // Check that the non-master node has the same version of the cluster state as the master and + // that the master node matches the master (otherwise there is no requirement for the cluster state to match) + if (masterClusterState.version() == localClusterState.version() && + masterId.equals(localClusterState.nodes().getMasterNodeId())) { + try { + assertEquals("clusterstate UUID does not match", masterClusterState.stateUUID(), localClusterState.stateUUID()); + // We cannot compare serialization bytes since serialization order of maps is not guaranteed + // but we can compare serialization sizes - they should be the same + assertEquals("clusterstate size does not match", masterClusterStateSize, localClusterStateSize); + // Compare JSON serialization + assertNull("clusterstate JSON serialization does not match", + differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap)); + } catch (AssertionError error) { + logger.error("Cluster state from master:\n{}\nLocal cluster state:\n{}", + masterClusterState.toString(), localClusterState.toString()); + throw error; + } + } + } + } + } + + protected List generateData(long timestamp, TimeValue bucketSpan, int bucketCount, + Function timeToCountFunction) throws IOException { + List data = new ArrayList<>(); + long now = timestamp; + for (int bucketIndex = 0; bucketIndex < bucketCount; bucketIndex++) { + for (int count = 0; count < timeToCountFunction.apply(bucketIndex); count++) { + Map record = new HashMap<>(); + record.put("time", now); + data.add(createJsonRecord(record)); + } + now += bucketSpan.getMillis(); + } + return data; + } + + protected static String createJsonRecord(Map keyValueMap) throws IOException { + return Strings.toString(JsonXContent.contentBuilder().map(keyValueMap)) + "\n"; + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/OverallBucketsIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/OverallBucketsIT.java new file mode 100644 index 0000000000000..785c3c6f67742 --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/OverallBucketsIT.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; +import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; +import org.elasticsearch.xpack.core.ml.action.GetOverallBucketsAction; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.job.results.OverallBucket; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests that overall bucket results are calculated correctly + * for jobs that have many buckets. + */ +public class OverallBucketsIT extends MlNativeAutodetectIntegTestCase { + + private static final String JOB_ID = "overall-buckets-test"; + private static final long BUCKET_SPAN_SECONDS = 3600; + + @After + public void cleanUpTest() throws Exception { + cleanUp(); + } + + public void test() throws Exception { + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder( + Collections.singletonList(new Detector.Builder("count", null).build())); + analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(BUCKET_SPAN_SECONDS)); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder(JOB_ID); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + long timestamp = 1483228800L; // 2017-01-01T00:00:00Z + List data = new ArrayList<>(); + for (int i = 0; i < 3000; i++) { + data.add(createJsonRecord(createRecord(timestamp))); + if (i % 1000 == 0) { + data.add(createJsonRecord(createRecord(timestamp))); + data.add(createJsonRecord(createRecord(timestamp))); + data.add(createJsonRecord(createRecord(timestamp))); + } + timestamp += BUCKET_SPAN_SECONDS; + } + + postData(job.getId(), data.stream().collect(Collectors.joining())); + flushJob(job.getId(), true); + closeJob(job.getId()); + + GetBucketsAction.Request request = new GetBucketsAction.Request(job.getId()); + request.setPageParams(new PageParams(0, 3000)); + assertThat(client().execute(GetBucketsAction.INSTANCE, request).actionGet().getBuckets().count(), equalTo(3000L)); + + { + // Check we get equal number of overall buckets on a default request + GetOverallBucketsAction.Request overallBucketsRequest = new GetOverallBucketsAction.Request(job.getId()); + GetOverallBucketsAction.Response overallBucketsResponse = client().execute( + GetOverallBucketsAction.INSTANCE, overallBucketsRequest).actionGet(); + assertThat(overallBucketsResponse.getOverallBuckets().count(), equalTo(3000L)); + } + + { + // Check overall buckets are half when the bucket_span is set to double the job bucket span + GetOverallBucketsAction.Request aggregatedOverallBucketsRequest = new GetOverallBucketsAction.Request(job.getId()); + aggregatedOverallBucketsRequest.setBucketSpan(TimeValue.timeValueSeconds(2 * BUCKET_SPAN_SECONDS)); + GetOverallBucketsAction.Response aggregatedOverallBucketsResponse = client().execute( + GetOverallBucketsAction.INSTANCE, aggregatedOverallBucketsRequest).actionGet(); + assertThat(aggregatedOverallBucketsResponse.getOverallBuckets().count(), equalTo(1500L)); + } + + { + // Check overall score filtering works when chunking takes place + GetOverallBucketsAction.Request filteredOverallBucketsRequest = new GetOverallBucketsAction.Request(job.getId()); + filteredOverallBucketsRequest.setOverallScore(0.1); + GetOverallBucketsAction.Response filteredOverallBucketsResponse = client().execute( + GetOverallBucketsAction.INSTANCE, filteredOverallBucketsRequest).actionGet(); + assertThat(filteredOverallBucketsResponse.getOverallBuckets().count(), equalTo(2L)); + } + + // Since this job ran for 3000 buckets, it's a good place to assert + // that established model memory matches model memory in the job stats + assertBusy(() -> { + GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); + ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); + Job updatedJob = getJob(job.getId()).get(0); + assertThat(updatedJob.getEstablishedModelMemory(), equalTo(modelSizeStats.getModelBytes())); + }); + } + + private static Map createRecord(long timestamp) { + Map record = new HashMap<>(); + record.put("time", timestamp); + return record; + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java new file mode 100644 index 0000000000000..6f885744b21a4 --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.action.PersistJobAction; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.junit.After; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +public class PersistJobIT extends MlNativeAutodetectIntegTestCase { + + @After + public void cleanUpJobs() { + cleanUp(); + } + + public void testPersistJob() throws Exception { + String jobId = "persist-job-test"; + runJob(jobId); + + PersistJobAction.Response r = persistJob(jobId); + assertTrue(r.isPersisted()); + + // Persisting the job will create a model snapshot + assertBusy(() -> { + List snapshots = getModelSnapshots(jobId); + assertFalse(snapshots.isEmpty()); + }); + } + + private void runJob(String jobId) throws Exception { + TimeValue bucketSpan = TimeValue.timeValueMinutes(5); + Detector.Builder detector = new Detector.Builder("count", null); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Arrays.asList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + Job.Builder job = new Job.Builder(jobId); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(new DataDescription.Builder()); + registerJob(job); + putJob(job); + + openJob(job.getId()); + List data = generateData(System.currentTimeMillis(), bucketSpan, 10, bucketIndex -> randomIntBetween(10, 20)); + postData(job.getId(), data.stream().collect(Collectors.joining())); + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RestoreModelSnapshotIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RestoreModelSnapshotIT.java new file mode 100644 index 0000000000000..c5bc7c4ed1426 --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RestoreModelSnapshotIT.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +/** + * This test aims to catch regressions where, + * when a job is reopened, it does not get restored + * with its model snapshot. To achieve this we + * leverage the forecast API. Requesting a forecast + * when there's no model state results to an error. + * Thus, we create a job, send some data, and we close it. + * Then we open it again and we request a forecast asserting + * the forecast was successful. + */ +public class RestoreModelSnapshotIT extends MlNativeAutodetectIntegTestCase { + + @After + public void tearDownData() { + cleanUp(); + } + + public void test() throws Exception { + TimeValue bucketSpan = TimeValue.timeValueHours(1); + int bucketCount = 72; + + List data = new ArrayList<>(); + long now = System.currentTimeMillis(); + long timestamp = now - bucketCount * bucketSpan.getMillis(); + for (int i = 0; i < bucketCount; i++) { + Map record = new HashMap<>(); + record.put("time", timestamp); + data.add(createJsonRecord(record)); + timestamp += bucketSpan.getMillis(); + } + + // Create the job, post the data and close the job + Job.Builder job = buildAndRegisterJob("restore-model-snapshot-job", bucketSpan); + openJob(job.getId()); + // Forecast should fail when the model has seen no data, ie model state not initialized + expectThrows(ElasticsearchStatusException.class, () -> forecast(job.getId(), TimeValue.timeValueHours(3), null)); + postData(job.getId(), data.stream().collect(Collectors.joining())); + closeJob(job.getId()); + + // Reopen the job and check forecast works + openJob(job.getId()); + String forecastId = forecast(job.getId(), TimeValue.timeValueHours(3), null); + waitForecastToFinish(job.getId(), forecastId); + ForecastRequestStats forecastStats = getForecastStats(job.getId(), forecastId); + assertThat(forecastStats.getStatus(), equalTo(ForecastRequestStats.ForecastRequestStatus.FINISHED)); + + closeJob(job.getId()); + + // Since these jobs ran for 72 buckets, it's a good place to assert + // that established model memory matches model memory in the job stats + assertBusy(() -> { + GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); + ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); + Job updatedJob = getJob(job.getId()).get(0); + assertThat(updatedJob.getEstablishedModelMemory(), equalTo(modelSizeStats.getModelBytes())); + }); + } + + private Job.Builder buildAndRegisterJob(String jobId, TimeValue bucketSpan) throws Exception { + Detector.Builder detector = new Detector.Builder("count", null); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + Job.Builder job = new Job.Builder(jobId); + job.setAnalysisConfig(analysisConfig); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + job.setDataDescription(dataDescription); + registerJob(job); + putJob(job); + return job; + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RevertModelSnapshotIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RevertModelSnapshotIT.java new file mode 100644 index 0000000000000..3400d09ee75aa --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RevertModelSnapshotIT.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.junit.After; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +/** + * This test pushes data through a job in 2 runs creating + * 2 model snapshots. It then reverts to the earlier snapshot + * and asserts the reversion worked as expected. + */ +public class RevertModelSnapshotIT extends MlNativeAutodetectIntegTestCase { + + @After + public void tearDownData() throws Exception { + cleanUp(); + } + + public void test() throws Exception { + TimeValue bucketSpan = TimeValue.timeValueHours(1); + long startTime = 1491004800000L; + + Job.Builder job = buildAndRegisterJob("revert-model-snapshot-it-job", bucketSpan); + openJob(job.getId()); + postData(job.getId(), generateData(startTime, bucketSpan, 10, Arrays.asList("foo"), + (bucketIndex, series) -> bucketIndex == 5 ? 100.0 : 10.0).stream().collect(Collectors.joining())); + flushJob(job.getId(), true); + closeJob(job.getId()); + + ModelSizeStats modelSizeStats1 = getJobStats(job.getId()).get(0).getModelSizeStats(); + Quantiles quantiles1 = getQuantiles(job.getId()); + + List midwayBuckets = getBuckets(job.getId()); + Bucket revertPointBucket = midwayBuckets.get(midwayBuckets.size() - 1); + assertThat(revertPointBucket.isInterim(), is(true)); + + // We need to wait a second to ensure the second time around model snapshot will have a different ID (it depends on epoch seconds) + awaitBusy(() -> false, 1, TimeUnit.SECONDS); + + openJob(job.getId()); + postData(job.getId(), generateData(startTime + 10 * bucketSpan.getMillis(), bucketSpan, 10, Arrays.asList("foo", "bar"), + (bucketIndex, series) -> 10.0).stream().collect(Collectors.joining())); + closeJob(job.getId()); + + ModelSizeStats modelSizeStats2 = getJobStats(job.getId()).get(0).getModelSizeStats(); + Quantiles quantiles2 = getQuantiles(job.getId()); + + // Check model has grown since a new series was introduced + assertThat(modelSizeStats2.getModelBytes(), greaterThan(modelSizeStats1.getModelBytes())); + + // Check quantiles have changed + assertThat(quantiles2, not(equalTo(quantiles1))); + + List finalPreRevertBuckets = getBuckets(job.getId()); + Bucket finalPreRevertPointBucket = finalPreRevertBuckets.get(midwayBuckets.size() - 1); + assertThat(finalPreRevertPointBucket.isInterim(), is(false)); + + List modelSnapshots = getModelSnapshots(job.getId()); + assertThat(modelSnapshots.size(), equalTo(2)); + + // Snapshots are sorted in descending timestamp order so we revert to the last of the list/earliest. + assertThat(modelSnapshots.get(0).getTimestamp().getTime(), greaterThan(modelSnapshots.get(1).getTimestamp().getTime())); + assertThat(getJob(job.getId()).get(0).getModelSnapshotId(), equalTo(modelSnapshots.get(0).getSnapshotId())); + ModelSnapshot revertSnapshot = modelSnapshots.get(1); + + assertThat(revertModelSnapshot(job.getId(), revertSnapshot.getSnapshotId()).status(), equalTo(RestStatus.OK)); + + // Check model_size_stats has been reverted + assertThat(getJobStats(job.getId()).get(0).getModelSizeStats().getModelBytes(), equalTo(modelSizeStats1.getModelBytes())); + + // Check quantiles have been reverted + assertThat(getQuantiles(job.getId()).getTimestamp(), equalTo(revertSnapshot.getLatestResultTimeStamp())); + + // Re-run 2nd half of data + openJob(job.getId()); + postData(job.getId(), generateData(startTime + 10 * bucketSpan.getMillis(), bucketSpan, 10, Arrays.asList("foo", "bar"), + (bucketIndex, series) -> 10.0).stream().collect(Collectors.joining())); + closeJob(job.getId()); + + List finalPostRevertBuckets = getBuckets(job.getId()); + Bucket finalPostRevertPointBucket = finalPostRevertBuckets.get(midwayBuckets.size() - 1); + assertThat(finalPostRevertPointBucket.getTimestamp(), equalTo(finalPreRevertPointBucket.getTimestamp())); + assertThat(finalPostRevertPointBucket.getAnomalyScore(), equalTo(finalPreRevertPointBucket.getAnomalyScore())); + assertThat(finalPostRevertPointBucket.getEventCount(), equalTo(finalPreRevertPointBucket.getEventCount())); + } + + private Job.Builder buildAndRegisterJob(String jobId, TimeValue bucketSpan) throws Exception { + Detector.Builder detector = new Detector.Builder("mean", "value"); + detector.setPartitionFieldName("series"); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Arrays.asList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + Job.Builder job = new Job.Builder(jobId); + job.setAnalysisConfig(analysisConfig); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + job.setDataDescription(dataDescription); + registerJob(job); + putJob(job); + return job; + } + + private static List generateData(long timestamp, TimeValue bucketSpan, int bucketCount, List series, + BiFunction timeAndSeriesToValueFunction) throws IOException { + List data = new ArrayList<>(); + long now = timestamp; + for (int i = 0; i < bucketCount; i++) { + for (String field : series) { + Map record = new HashMap<>(); + record.put("time", now); + record.put("value", timeAndSeriesToValueFunction.apply(i, field)); + record.put("series", field); + data.add(createJsonRecord(record)); + + record = new HashMap<>(); + record.put("time", now + bucketSpan.getMillis() / 2); + record.put("value", timeAndSeriesToValueFunction.apply(i, field)); + record.put("series", field); + data.add(createJsonRecord(record)); + } + now += bucketSpan.getMillis(); + } + return data; + } + + private Quantiles getQuantiles(String jobId) { + SearchResponse response = client().prepareSearch(".ml-state") + .setQuery(QueryBuilders.idsQuery().addIds(Quantiles.documentId(jobId))) + .setSize(1) + .get(); + SearchHits hits = response.getHits(); + assertThat(hits.getTotalHits(), equalTo(1L)); + try { + XContentParser parser = JsonXContent.jsonXContent + .createParser(null, LoggingDeprecationHandler.INSTANCE, hits.getAt(0).getSourceAsString()); + return Quantiles.LENIENT_PARSER.apply(parser, null); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java new file mode 100644 index 0000000000000..6703e4ef2365b --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java @@ -0,0 +1,281 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; +import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; +import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.junit.After; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class ScheduledEventsIT extends MlNativeAutodetectIntegTestCase { + + @After + public void cleanUpTest() { + cleanUp(); + } + + public void testScheduledEvents() throws IOException { + + TimeValue bucketSpan = TimeValue.timeValueMinutes(30); + Job.Builder job = createJob("scheduled-events", bucketSpan); + String calendarId = "test-calendar"; + putCalendar(calendarId, Collections.singletonList(job.getId()), "testScheduledEvents calendar"); + + long startTime = 1514764800000L; + + List events = new ArrayList<>(); + long firstEventStartTime = 1514937600000L; + long firstEventEndTime = firstEventStartTime + 2 * 60 * 60 * 1000; + events.add(new ScheduledEvent.Builder().description("1st event (2hr)") + .startTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(firstEventStartTime), ZoneOffset.UTC)) + .endTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(firstEventEndTime), ZoneOffset.UTC)) + .calendarId(calendarId).build()); + // add 10 min event smaller than the bucket + long secondEventStartTime = 1515067200000L; + long secondEventEndTime = secondEventStartTime + 10 * 60 * 1000; + events.add(new ScheduledEvent.Builder().description("2nd event with period smaller than bucketspan") + .startTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(secondEventStartTime), ZoneOffset.UTC)) + .endTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(secondEventEndTime), ZoneOffset.UTC)) + .calendarId(calendarId).build()); + long thirdEventStartTime = 1515088800000L; + long thirdEventEndTime = thirdEventStartTime + 3 * 60 * 60 * 1000; + events.add(new ScheduledEvent.Builder().description("3rd event 3hr") + .startTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(thirdEventStartTime), ZoneOffset.UTC)) + .endTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(thirdEventEndTime), ZoneOffset.UTC)) + .calendarId(calendarId).build()); + + postScheduledEvents(calendarId, events); + + // Run 6 days of data + runJob(job, startTime, bucketSpan, 2 * 24 * 6); + + // Check tags on the buckets during the first event + GetBucketsAction.Request getBucketsRequest = new GetBucketsAction.Request(job.getId()); + getBucketsRequest.setStart(Long.toString(firstEventStartTime)); + getBucketsRequest.setEnd(Long.toString(firstEventEndTime)); + List buckets = getBuckets(getBucketsRequest); + for (Bucket bucket : buckets) { + assertEquals(1, bucket.getScheduledEvents().size()); + assertEquals("1st event (2hr)", bucket.getScheduledEvents().get(0)); + assertEquals(0.0, bucket.getAnomalyScore(), 0.00001); + } + + // Following buckets have 0 events + getBucketsRequest = new GetBucketsAction.Request(job.getId()); + getBucketsRequest.setStart(Long.toString(firstEventEndTime)); + getBucketsRequest.setEnd(Long.toString(secondEventStartTime)); + buckets = getBuckets(getBucketsRequest); + for (Bucket bucket : buckets) { + assertEquals(0, bucket.getScheduledEvents().size()); + } + + // The second event bucket + getBucketsRequest.setStart(Long.toString(secondEventStartTime)); + getBucketsRequest.setEnd(Long.toString(secondEventEndTime)); + buckets = getBuckets(getBucketsRequest); + assertEquals(1, buckets.size()); + for (Bucket bucket : buckets) { + assertEquals(1, bucket.getScheduledEvents().size()); + assertEquals("2nd event with period smaller than bucketspan", bucket.getScheduledEvents().get(0)); + assertEquals(0.0, bucket.getAnomalyScore(), 0.00001); + } + + // Following buckets have 0 events + getBucketsRequest.setStart(Long.toString(secondEventEndTime)); + getBucketsRequest.setEnd(Long.toString(thirdEventStartTime)); + buckets = getBuckets(getBucketsRequest); + for (Bucket bucket : buckets) { + assertEquals(0, bucket.getScheduledEvents().size()); + } + + // The 3rd event buckets + getBucketsRequest.setStart(Long.toString(thirdEventStartTime)); + getBucketsRequest.setEnd(Long.toString(thirdEventEndTime)); + buckets = getBuckets(getBucketsRequest); + for (Bucket bucket : buckets) { + assertEquals(1, bucket.getScheduledEvents().size()); + assertEquals("3rd event 3hr", bucket.getScheduledEvents().get(0)); + assertEquals(0.0, bucket.getAnomalyScore(), 0.00001); + } + + // Following buckets have 0 events + getBucketsRequest.setStart(Long.toString(thirdEventEndTime)); + getBucketsRequest.setEnd(null); + buckets = getBuckets(getBucketsRequest); + for (Bucket bucket : buckets) { + assertEquals(0, bucket.getScheduledEvents().size()); + } + + // It is unlikely any anomaly records have been created but + // ensure there are non present anyway + GetRecordsAction.Request getRecordsRequest = new GetRecordsAction.Request(job.getId()); + getRecordsRequest.setStart(Long.toString(firstEventStartTime)); + getRecordsRequest.setEnd(Long.toString(firstEventEndTime)); + List records = getRecords(getRecordsRequest); + assertThat(records, is(empty())); + + getRecordsRequest.setStart(Long.toString(secondEventStartTime)); + getRecordsRequest.setEnd(Long.toString(secondEventEndTime)); + records = getRecords(getRecordsRequest); + assertThat(records, is(empty())); + + getRecordsRequest.setStart(Long.toString(thirdEventStartTime)); + getRecordsRequest.setEnd(Long.toString(thirdEventEndTime)); + records = getRecords(getRecordsRequest); + assertThat(records, is(empty())); + } + + public void testScheduledEventWithInterimResults() throws IOException { + TimeValue bucketSpan = TimeValue.timeValueMinutes(30); + Job.Builder job = createJob("scheduled-events-interim-results", bucketSpan); + String calendarId = "test-calendar"; + putCalendar(calendarId, Collections.singletonList(job.getId()), "testScheduledEventWithInterimResults calendar"); + + long startTime = 1514764800000L; + + List events = new ArrayList<>(); + // The event starts 10 buckets in and lasts for 2 + int bucketCount = 10; + long firstEventStartTime = startTime + bucketSpan.millis() * bucketCount; + long firstEventEndTime = firstEventStartTime + bucketSpan.millis() * 2; + events.add(new ScheduledEvent.Builder().description("1st event 2hr") + .startTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(firstEventStartTime), ZoneOffset.UTC)) + .endTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(firstEventEndTime), ZoneOffset.UTC)) + .calendarId(calendarId).build()); + postScheduledEvents(calendarId, events); + + + openJob(job.getId()); + // write data up to and including the event + postData(job.getId(), generateData(startTime, bucketSpan, bucketCount + 1, bucketIndex -> randomIntBetween(100, 200)) + .stream().collect(Collectors.joining())); + + // flush the job and get the interim result during the event + flushJob(job.getId(), true); + + GetBucketsAction.Request getBucketsRequest = new GetBucketsAction.Request(job.getId()); + getBucketsRequest.setStart(Long.toString(firstEventStartTime)); + List buckets = getBuckets(getBucketsRequest); + assertEquals(1, buckets.size()); + assertTrue(buckets.get(0).isInterim()); + assertEquals(1, buckets.get(0).getScheduledEvents().size()); + assertEquals("1st event 2hr", buckets.get(0).getScheduledEvents().get(0)); + assertEquals(0.0, buckets.get(0).getAnomalyScore(), 0.00001); + } + + /** + * Test an open job picks up changes to scheduled events/calendars + */ + public void testOnlineUpdate() throws Exception { + TimeValue bucketSpan = TimeValue.timeValueMinutes(30); + Job.Builder job = createJob("scheduled-events-online-update", bucketSpan); + + long startTime = 1514764800000L; + final int bucketCount = 5; + + // Open the job + openJob(job.getId()); + + // write some buckets of data + postData(job.getId(), generateData(startTime, bucketSpan, bucketCount, bucketIndex -> randomIntBetween(100, 200)) + .stream().collect(Collectors.joining())); + + // Now create a calendar and events for the job while it is open + String calendarId = "test-calendar-online-update"; + putCalendar(calendarId, Collections.singletonList(job.getId()), "testOnlineUpdate calendar"); + + List events = new ArrayList<>(); + long eventStartTime = startTime + (bucketCount + 1) * bucketSpan.millis(); + long eventEndTime = eventStartTime + (long)(1.5 * bucketSpan.millis()); + events.add(new ScheduledEvent.Builder().description("Some Event") + .startTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(eventStartTime), ZoneOffset.UTC)) + .endTime(ZonedDateTime.ofInstant(Instant.ofEpochMilli(eventEndTime), ZoneOffset.UTC)) + .calendarId(calendarId).build()); + + postScheduledEvents(calendarId, events); + + // Wait until the notification that the process was updated is indexed + assertBusy(() -> { + SearchResponse searchResponse = client().prepareSearch(".ml-notifications") + .setSize(1) + .addSort("timestamp", SortOrder.DESC) + .setQuery(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("job_id", job.getId())) + .filter(QueryBuilders.termQuery("level", "info")) + ).get(); + SearchHit[] hits = searchResponse.getHits().getHits(); + assertThat(hits.length, equalTo(1)); + assertThat(hits[0].getSourceAsMap().get("message"), equalTo("Updated calendars in running process")); + }); + + // write some more buckets of data that cover the scheduled event period + postData(job.getId(), generateData(startTime + bucketCount * bucketSpan.millis(), bucketSpan, 5, + bucketIndex -> randomIntBetween(100, 200)) + .stream().collect(Collectors.joining())); + // and close + closeJob(job.getId()); + + GetBucketsAction.Request getBucketsRequest = new GetBucketsAction.Request(job.getId()); + List buckets = getBuckets(getBucketsRequest); + + // the first buckets have no events + for (int i=0; i<=bucketCount; i++) { + assertEquals(0, buckets.get(i).getScheduledEvents().size()); + } + // 7th and 8th buckets have the event + assertEquals(1, buckets.get(6).getScheduledEvents().size()); + assertEquals("Some Event", buckets.get(6).getScheduledEvents().get(0)); + assertEquals(1, buckets.get(7).getScheduledEvents().size()); + assertEquals("Some Event", buckets.get(7).getScheduledEvents().get(0)); + assertEquals(0, buckets.get(8).getScheduledEvents().size()); + } + + private Job.Builder createJob(String jobId, TimeValue bucketSpan) { + Detector.Builder detector = new Detector.Builder("count", null); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(detector.build())); + analysisConfig.setBucketSpan(bucketSpan); + Job.Builder job = new Job.Builder(jobId); + job.setAnalysisConfig(analysisConfig); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + job.setDataDescription(dataDescription); + putJob(job); + // register for clean up + registerJob(job); + + return job; + } + + private void runJob(Job.Builder job, long startTime, TimeValue bucketSpan, int bucketCount) throws IOException { + openJob(job.getId()); + postData(job.getId(), generateData(startTime, bucketSpan, bucketCount, bucketIndex -> randomIntBetween(100, 200)) + .stream().collect(Collectors.joining())); + closeJob(job.getId()); + } +} diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java new file mode 100644 index 0000000000000..3d5533fed08ed --- /dev/null +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; +import org.elasticsearch.xpack.core.ml.action.util.PageParams; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.junit.After; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; + +/** + * Tests that interim results get updated correctly + */ +public class UpdateInterimResultsIT extends MlNativeAutodetectIntegTestCase { + + private static final String JOB_ID = "update-interim-test"; + private static final long BUCKET_SPAN_SECONDS = 1000; + + private long time; + + @After + public void cleanUpTest() throws Exception { + cleanUp(); + } + + public void test() throws Exception { + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder( + Collections.singletonList(new Detector.Builder("max", "value").build())); + analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(BUCKET_SPAN_SECONDS)); + analysisConfig.setOverlappingBuckets(true); + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeFormat("epoch"); + Job.Builder job = new Job.Builder(JOB_ID); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(dataDescription); + + registerJob(job); + putJob(job); + openJob(job.getId()); + + time = 1400000000; + + // push some data, flush job, verify no interim results + assertThat(postData(job.getId(), createData(50)).getProcessedRecordCount(), equalTo(50L)); + flushJob(job.getId(), false); + assertThat(getInterimResults(job.getId()).isEmpty(), is(true)); + + // push some more data, flush job, verify no interim results + assertThat(postData(job.getId(), createData(30)).getProcessedRecordCount(), equalTo(30L)); + flushJob(job.getId(), false); + assertThat(getInterimResults(job.getId()).isEmpty(), is(true)); + assertThat(time, equalTo(1400040000L)); + + // push some data up to a 1/4 bucket boundary, flush (with interim), check interim results + String data = "{\"time\":1400040000,\"value\":14}\n" + + "{\"time\":1400040500,\"value\":12}\n" + + "{\"time\":1400040510,\"value\":16}\n"; + assertThat(postData(job.getId(), data).getProcessedRecordCount(), equalTo(3L)); + flushJob(job.getId(), true); + + // We might need to retry this while waiting for a refresh + assertBusy(() -> { + List firstInterimBuckets = getInterimResults(job.getId()); + assertThat("interim buckets were: " + firstInterimBuckets, firstInterimBuckets.size(), equalTo(2)); + assertThat(firstInterimBuckets.get(0).getTimestamp().getTime(), equalTo(1400039000000L)); + assertThat(firstInterimBuckets.get(1).getTimestamp().getTime(), equalTo(1400040000000L)); + assertThat(firstInterimBuckets.get(1).getRecords().get(0).getActual().get(0), equalTo(16.0)); + }); + + // push 1 more record, flush (with interim), check same interim result + data = "{\"time\":1400040520,\"value\":15}\n"; + assertThat(postData(job.getId(), data).getProcessedRecordCount(), equalTo(1L)); + flushJob(job.getId(), true); + + assertBusy(() -> { + List secondInterimBuckets = getInterimResults(job.getId()); + assertThat(secondInterimBuckets.get(0).getTimestamp().getTime(), equalTo(1400039000000L)); + assertThat(secondInterimBuckets.get(1).getTimestamp().getTime(), equalTo(1400040000000L)); + assertThat(secondInterimBuckets.get(1).getRecords().get(0).getActual().get(0), equalTo(16.0)); + }); + + // push rest of data, close, verify no interim results + time += BUCKET_SPAN_SECONDS; + assertThat(postData(job.getId(), createData(30)).getProcessedRecordCount(), equalTo(30L)); + closeJob(job.getId()); + assertThat(getInterimResults(job.getId()).isEmpty(), is(true)); + + // Verify interim results have been replaced with finalized results + GetBucketsAction.Request bucketRequest = new GetBucketsAction.Request(job.getId()); + bucketRequest.setTimestamp("1400039500000"); + bucketRequest.setExpand(true); + List bucket = client().execute(GetBucketsAction.INSTANCE, bucketRequest).get().getBuckets().results(); + assertThat(bucket.size(), equalTo(1)); + assertThat(bucket.get(0).getRecords().get(0).getActual().get(0), equalTo(14.0)); + } + + private String createData(int halfBuckets) { + StringBuilder data = new StringBuilder(); + for (int i = 0; i < halfBuckets; i++) { + int value = randomIntBetween(1, 3); + data.append("{\"time\":").append(time).append(", \"value\":").append(value).append("}\n"); + time += BUCKET_SPAN_SECONDS / 2; + } + return data.toString(); + } + + private List getInterimResults(String jobId) { + GetBucketsAction.Request request = new GetBucketsAction.Request(jobId); + request.setExpand(true); + request.setPageParams(new PageParams(0, 1500)); + GetBucketsAction.Response response = client().execute(GetBucketsAction.INSTANCE, request).actionGet(); + assertThat(response.getBuckets().count(), lessThan(1500L)); + List buckets = response.getBuckets().results(); + assertThat(buckets.size(), greaterThan(0)); + return buckets.stream().filter(Bucket::isInterim).collect(Collectors.toList()); + } +} diff --git a/x-pack/qa/ml-no-bootstrap-tests/build.gradle b/x-pack/qa/ml-no-bootstrap-tests/build.gradle new file mode 100644 index 0000000000000..cad5201a67b6a --- /dev/null +++ b/x-pack/qa/ml-no-bootstrap-tests/build.gradle @@ -0,0 +1,7 @@ +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('ml'), configuration: 'runtime') +} + diff --git a/x-pack/qa/ml-no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java b/x-pack/qa/ml-no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java new file mode 100644 index 0000000000000..6701a27a361ff --- /dev/null +++ b/x-pack/qa/ml-no-bootstrap-tests/src/test/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelperNoBootstrapTests.java @@ -0,0 +1,340 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.utils; + +import org.apache.lucene.util.Constants; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.monitor.jvm.JvmInfo; + +import com.sun.jna.IntegerType; +import com.sun.jna.Native; +import com.sun.jna.Pointer; +import com.sun.jna.WString; +import com.sun.jna.ptr.IntByReference; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.StandardOpenOption; +import java.time.Duration; + + +/** + * Covers positive test cases for create named pipes, which are not possible in Java with + * the Elasticsearch security manager configuration or seccomp. This is why the class extends + * LuceneTestCase rather than ESTestCase. + * + * The way that pipes are managed in this class, e.g. using the mkfifo shell command, is + * not suitable for production, but adequate for this test. + */ +public class NamedPipeHelperNoBootstrapTests extends LuceneTestCase { + + private static final NamedPipeHelper NAMED_PIPE_HELPER = new NamedPipeHelper(); + + private static final String HELLO_WORLD = "Hello, world!"; + private static final String GOODBYE_WORLD = "Goodbye, world!"; + + private static final int BUFFER_SIZE = 4096; + + private static final long PIPE_ACCESS_OUTBOUND = 2; + private static final long PIPE_ACCESS_INBOUND = 1; + private static final long PIPE_TYPE_BYTE = 0; + private static final long PIPE_WAIT = 0; + private static final long PIPE_REJECT_REMOTE_CLIENTS = 8; + private static final long NMPWAIT_USE_DEFAULT_WAIT = 0; + + private static final int ERROR_PIPE_CONNECTED = 535; + + private static final Pointer INVALID_HANDLE_VALUE = Pointer.createConstant(Pointer.SIZE == 8 ? -1 : 0xFFFFFFFFL); + + static { + // Have to use JNA for Windows named pipes + if (Constants.WINDOWS) { + Native.register("kernel32"); + } + } + + public static class DWord extends IntegerType { + + public DWord() { + super(4, 0, true); + } + + public DWord(long val) { + super(4, val, true); + } + } + + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365150(v=vs.85).aspx + private static native Pointer CreateNamedPipeW(WString name, DWord openMode, DWord pipeMode, DWord maxInstances, DWord outBufferSize, + DWord inBufferSize, DWord defaultTimeOut, Pointer securityAttributes); + + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365146(v=vs.85).aspx + private static native boolean ConnectNamedPipe(Pointer handle, Pointer overlapped); + + // https://msdn.microsoft.com/en-us/library/windows/desktop/ms724211(v=vs.85).aspx + private static native boolean CloseHandle(Pointer handle); + + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365467(v=vs.85).aspx + private static native boolean ReadFile(Pointer handle, Pointer buffer, DWord numberOfBytesToRead, IntByReference numberOfBytesRead, + Pointer overlapped); + + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365747(v=vs.85).aspx + private static native boolean WriteFile(Pointer handle, Pointer buffer, DWord numberOfBytesToWrite, IntByReference numberOfBytesWritten, + Pointer overlapped); + + private static Pointer createPipe(String pipeName, boolean forWrite) throws IOException, InterruptedException { + if (Constants.WINDOWS) { + return createPipeWindows(pipeName, forWrite); + } + createPipeUnix(pipeName); + // This won't be used in the *nix version + return INVALID_HANDLE_VALUE; + } + + private static void createPipeUnix(String pipeName) throws IOException, InterruptedException { + if (Runtime.getRuntime().exec("mkfifo " + pipeName).waitFor() != 0) { + throw new IOException("mkfifo failed for pipe " + pipeName); + } + } + + private static Pointer createPipeWindows(String pipeName, boolean forWrite) throws IOException { + Pointer handle = CreateNamedPipeW(new WString(pipeName), new DWord(forWrite ? PIPE_ACCESS_OUTBOUND : PIPE_ACCESS_INBOUND), + new DWord(PIPE_TYPE_BYTE | PIPE_WAIT | PIPE_REJECT_REMOTE_CLIENTS), new DWord(1), + new DWord(BUFFER_SIZE), new DWord(BUFFER_SIZE), new DWord(NMPWAIT_USE_DEFAULT_WAIT), Pointer.NULL); + if (INVALID_HANDLE_VALUE.equals(handle)) { + throw new IOException("CreateNamedPipeW failed for pipe " + pipeName + " with error " + Native.getLastError()); + } + return handle; + } + + private static String readLineFromPipe(String pipeName, Pointer handle) throws IOException { + if (Constants.WINDOWS) { + return readLineFromPipeWindows(pipeName, handle); + } + return readLineFromPipeUnix(pipeName); + } + + private static String readLineFromPipeUnix(String pipeName) throws IOException { + return Files.readAllLines(PathUtils.get(pipeName), StandardCharsets.UTF_8).get(0); + } + + private static String readLineFromPipeWindows(String pipeName, Pointer handle) throws IOException { + if (!ConnectNamedPipe(handle, Pointer.NULL)) { + // ERROR_PIPE_CONNECTED means the pipe was already connected so + // there was no need to connect it again - not a problem + if (Native.getLastError() != ERROR_PIPE_CONNECTED) { + throw new IOException("ConnectNamedPipe failed for pipe " + pipeName + " with error " + Native.getLastError()); + } + } + IntByReference numberOfBytesRead = new IntByReference(); + ByteBuffer buf = ByteBuffer.allocateDirect(BUFFER_SIZE); + if (!ReadFile(handle, Native.getDirectBufferPointer(buf), new DWord(BUFFER_SIZE), numberOfBytesRead, Pointer.NULL)) { + throw new IOException("ReadFile failed for pipe " + pipeName + " with error " + Native.getLastError()); + } + byte[] content = new byte[numberOfBytesRead.getValue()]; + buf.get(content); + String line = new String(content, StandardCharsets.UTF_8); + int newlinePos = line.indexOf('\n'); + if (newlinePos == -1) { + return line; + } + return line.substring(0, newlinePos); + } + + private static void writeLineToPipe(String pipeName, Pointer handle, String line) throws IOException { + if (Constants.WINDOWS) { + writeLineToPipeWindows(pipeName, handle, line); + } else { + writeLineToPipeUnix(pipeName, line); + } + } + + private static void writeLineToPipeUnix(String pipeName, String line) throws IOException { + Files.write(PathUtils.get(pipeName), (line + '\n').getBytes(StandardCharsets.UTF_8), StandardOpenOption.WRITE); + } + + private static void writeLineToPipeWindows(String pipeName, Pointer handle, String line) throws IOException { + if (!ConnectNamedPipe(handle, Pointer.NULL)) { + // ERROR_PIPE_CONNECTED means the pipe was already connected so + // there was no need to connect it again - not a problem + if (Native.getLastError() != ERROR_PIPE_CONNECTED) { + throw new IOException("ConnectNamedPipe failed for pipe " + pipeName + " with error " + Native.getLastError()); + } + } + IntByReference numberOfBytesWritten = new IntByReference(); + ByteBuffer buf = ByteBuffer.allocateDirect(BUFFER_SIZE); + buf.put((line + '\n').getBytes(StandardCharsets.UTF_8)); + if (!WriteFile(handle, Native.getDirectBufferPointer(buf), new DWord(buf.position()), numberOfBytesWritten, Pointer.NULL)) { + throw new IOException("WriteFile failed for pipe " + pipeName + " with error " + Native.getLastError()); + } + } + + private static void deletePipe(String pipeName, Pointer handle) throws IOException { + if (Constants.WINDOWS) { + deletePipeWindows(pipeName, handle); + } else { + deletePipeUnix(pipeName); + } + } + + private static void deletePipeUnix(String pipeName) throws IOException { + Files.delete(PathUtils.get(pipeName)); + } + + private static void deletePipeWindows(String pipeName, Pointer handle) throws IOException { + if (!CloseHandle(handle)) { + throw new IOException("CloseHandle failed for pipe " + pipeName + " with error " + Native.getLastError()); + } + } + + private static class PipeReaderServer extends Thread { + + private String pipeName; + private String line; + private Exception exception; + + PipeReaderServer(String pipeName) { + this.pipeName = pipeName; + } + + @Override + public void run() { + Pointer handle = INVALID_HANDLE_VALUE; + try { + handle = createPipe(pipeName, false); + line = readLineFromPipe(pipeName, handle); + } + catch (IOException | InterruptedException e) { + exception = e; + } + try { + deletePipe(pipeName, handle); + } catch (IOException e) { + // Ignore it if the previous block caught an exception, as this probably means we failed to create the pipe + if (exception == null) { + exception = e; + } + } + } + + public String getLine() { + return line; + } + + public Exception getException() { + return exception; + } + } + + private static class PipeWriterServer extends Thread { + + private String pipeName; + private String line; + private Exception exception; + + PipeWriterServer(String pipeName, String line) { + this.pipeName = pipeName; + this.line = line; + } + + @Override + public void run() { + Pointer handle = INVALID_HANDLE_VALUE; + try { + handle = createPipe(pipeName, true); + writeLineToPipe(pipeName, handle, line); + } catch (IOException | InterruptedException e) { + exception = e; + } + try { + deletePipe(pipeName, handle); + } catch (IOException e) { + // Ignore it if the previous block caught an exception, as this probably means we failed to create the pipe + if (exception == null) { + exception = e; + } + } + } + + public Exception getException() { + return exception; + } + } + + public void testOpenForInput() throws IOException, InterruptedException { + Environment env = TestEnvironment.newEnvironment( + Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build()); + String pipeName = NAMED_PIPE_HELPER.getDefaultPipeDirectoryPrefix(env) + "inputPipe" + JvmInfo.jvmInfo().pid(); + + PipeWriterServer server = new PipeWriterServer(pipeName, HELLO_WORLD); + server.start(); + try { + // Timeout is 10 seconds for the very rare case of Amazon EBS volumes created from snapshots + // being slow the first time a particular disk block is accessed. The same problem as + // https://github.com/elastic/x-pack-elasticsearch/issues/922, which was fixed by + // https://github.com/elastic/x-pack-elasticsearch/pull/987, has been observed in CI tests. + InputStream is = NAMED_PIPE_HELPER.openNamedPipeInputStream(pipeName, Duration.ofSeconds(10)); + assertNotNull(is); + + try (BufferedReader reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8))) { + String line = reader.readLine(); + assertEquals(HELLO_WORLD, line); + } + } catch (IOException e) { + server.interrupt(); + throw e; + } finally { + // If this doesn't join quickly then the server thread is probably deadlocked so there's no + // point waiting a long time. + server.join(1000); + } + + assertNull(server.getException()); + } + + public void testOpenForOutput() throws IOException, InterruptedException { + Environment env = TestEnvironment.newEnvironment( + Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build()); + String pipeName = NAMED_PIPE_HELPER.getDefaultPipeDirectoryPrefix(env) + "outputPipe" + JvmInfo.jvmInfo().pid(); + + PipeReaderServer server = new PipeReaderServer(pipeName); + server.start(); + try { + // Timeout is 10 seconds for the very rare case of Amazon EBS volumes created from snapshots + // being slow the first time a particular disk block is accessed. The same problem as + // https://github.com/elastic/x-pack-elasticsearch/issues/922, which was fixed by + // https://github.com/elastic/x-pack-elasticsearch/pull/987, has been observed in CI tests. + OutputStream os = NAMED_PIPE_HELPER.openNamedPipeOutputStream(pipeName, Duration.ofSeconds(10)); + assertNotNull(os); + + try (OutputStreamWriter writer = new OutputStreamWriter(os, StandardCharsets.UTF_8)) { + writer.write(GOODBYE_WORLD); + writer.write('\n'); + } + } catch (IOException e) { + server.interrupt(); + throw e; + } finally { + // If this doesn't join quickly then the server thread is probably deadlocked so there's no + // point waiting a long time. + server.join(1000); + } + + assertNull(server.getException()); + assertEquals(GOODBYE_WORLD, server.getLine()); + } +} diff --git a/x-pack/qa/ml-single-node-tests/build.gradle b/x-pack/qa/ml-single-node-tests/build.gradle new file mode 100644 index 0000000000000..9fd4a8d44d23f --- /dev/null +++ b/x-pack/qa/ml-single-node-tests/build.gradle @@ -0,0 +1,12 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('ml'), configuration: 'runtime') +} + +integTestCluster { + setting 'xpack.security.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' +} diff --git a/x-pack/qa/ml-single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java b/x-pack/qa/ml-single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java new file mode 100644 index 0000000000000..79e9a81831fc8 --- /dev/null +++ b/x-pack/qa/ml-single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java @@ -0,0 +1,366 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.transforms; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.ml.MachineLearning; +import org.elasticsearch.xpack.ml.utils.DomainSplitFunction; +import org.joda.time.DateTime; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + +public class PainlessDomainSplitIT extends ESRestTestCase { + + static class TestConfiguration { + public String subDomainExpected; + public String domainExpected; + public String hostName; + + TestConfiguration(String subDomainExpected, String domainExpected, String hostName) { + this.subDomainExpected = subDomainExpected; + this.domainExpected = domainExpected; + this.hostName = hostName; + } + } + + public static final ArrayList tests; + + static { + tests = new ArrayList<>(); + + tests.add(new TestConfiguration("", "", "")); + tests.add(new TestConfiguration("", "", ".")); + + // Test cases from https://github.com/john-kurkowski/tldextract/tree/master/tldextract/tests + + tests.add(new TestConfiguration("www", "google.com", "www.google.com")); + tests.add(new TestConfiguration("www.maps", "google.co.uk", "www.maps.google.co.uk")); + tests.add(new TestConfiguration("www", "theregister.co.uk", "www.theregister.co.uk")); + tests.add(new TestConfiguration("", "gmail.com", "gmail.com")); + tests.add(new TestConfiguration("media.forums", "theregister.co.uk", "media.forums.theregister.co.uk")); + tests.add(new TestConfiguration("www", "www.com", "www.www.com")); + tests.add(new TestConfiguration("", "www.com", "www.com")); + tests.add(new TestConfiguration("", "internalunlikelyhostname", "internalunlikelyhostname")); + tests.add(new TestConfiguration("internalunlikelyhostname", "bizarre", "internalunlikelyhostname.bizarre")); + tests.add(new TestConfiguration("", "internalunlikelyhostname.info", "internalunlikelyhostname.info")); // .info is a valid TLD + tests.add(new TestConfiguration("internalunlikelyhostname", "information", "internalunlikelyhostname.information")); + tests.add(new TestConfiguration("", "216.22.0.192", "216.22.0.192")); + tests.add(new TestConfiguration("", "::1", "::1")); + tests.add(new TestConfiguration("", "FE80:0000:0000:0000:0202:B3FF:FE1E:8329", "FE80:0000:0000:0000:0202:B3FF:FE1E:8329")); + tests.add(new TestConfiguration("216.22", "project.coop", "216.22.project.coop")); + tests.add(new TestConfiguration("www", "xn--h1alffa9f.xn--p1ai", "www.xn--h1alffa9f.xn--p1ai")); + tests.add(new TestConfiguration("", "", "")); + tests.add(new TestConfiguration("www", "parliament.uk", "www.parliament.uk")); + tests.add(new TestConfiguration("www", "parliament.co.uk", "www.parliament.co.uk")); + tests.add(new TestConfiguration("www.a", "cgs.act.edu.au", "www.a.cgs.act.edu.au")); + tests.add(new TestConfiguration("www", "google.com.au", "www.google.com.au")); + tests.add(new TestConfiguration("www", "metp.net.cn", "www.metp.net.cn")); + tests.add(new TestConfiguration("www", "waiterrant.blogspot.com", "www.waiterrant.blogspot.com")); + tests.add(new TestConfiguration("", "kittens.blogspot.co.uk", "kittens.blogspot.co.uk")); + tests.add(new TestConfiguration("", "prelert.s3.amazonaws.com", "prelert.s3.amazonaws.com")); + tests.add(new TestConfiguration("daves_bucket", "prelert.s3.amazonaws.com", "daves_bucket.prelert.s3.amazonaws.com")); + tests.add(new TestConfiguration("example", "example", "example.example")); + tests.add(new TestConfiguration("b.example", "example", "b.example.example")); + tests.add(new TestConfiguration("a.b.example", "example", "a.b.example.example")); + tests.add(new TestConfiguration("example", "local", "example.local")); + tests.add(new TestConfiguration("b.example", "local", "b.example.local")); + tests.add(new TestConfiguration("a.b.example", "local", "a.b.example.local")); + tests.add(new TestConfiguration("r192494180984795-1-1041782-channel-live.ums", "ustream.tv", "r192494180984795-1-1041782-cha" + + "nnel-live.ums.ustream.tv")); + tests.add(new TestConfiguration("192.168.62.9", "prelert.com", "192.168.62.9.prelert.com")); + + // These are not a valid DNS names + tests.add(new TestConfiguration("kerberos.http.192.168", "62.222", "kerberos.http.192.168.62.222")); + //tests.add(new TestConfiguration("192.168", "62.9\143\127", "192.168.62.9\143\127")); + + // no part of the DNS name can be longer than 63 octets + /* + String dnsLongerThan254Chars = "davesbucketdavesbucketdavesbucketdavesbucketdavesbucketdaves.bucketdavesbucketdavesbuc" + + "ketdavesbucketdavesbucketdaves.bucketdavesbucketdavesbucketdavesbucketdavesbucket.davesbucketdavesbucketdaves" + + "bucketdavesbucket.davesbucketdavesbucket.prelert.s3.amazonaws.com"; + String hrd = "prelert.s3.amazonaws.com"; + tests.add(new TestConfiguration(dnsLongerThan254Chars.substring(0, dnsLongerThan254Chars.length() - (hrd.length() + 1)), + hrd, dnsLongerThan254Chars)); + */ + + // [Zach] This breaks the script's JSON encoding, skipping for now + //String bad = "0u1aof\209\1945\188hI4\236\197\205J\244\188\247\223\190F\2135\229gVE7\230i\215\231\205Qzay\225UJ\192 + // pw\216\231\204\194\216\193QV4g\196\207Whpvx.fVxl\194BjA\245kbYk\211XG\235\198\218B\252\219\225S\197\217I\2538n\229 + // \244\213\252\215Ly\226NW\242\248\244Q\220\245\221c\207\189\205Hxq5\224\240.\189Jt4\243\245t\244\198\199p\210\1987 + // r\2050L\239sR0M\190w\238\223\234L\226\2242D\233\210\206\195h\199\206tA\214J\192C\224\191b\188\201\251\198M\244h + // \206.\198\242l\2114\191JBU\198h\207\215w\243\228R\1924\242\208\191CV\208p\197gDW\198P\217\195X\191Fp\196\197J\193 + // \245\2070\196zH\197\243\253g\239.adz.beacon.base.net"; + //hrd = "base.net"; + //tests.add(new TestConfiguration(bad.substring(0, bad.length() - (hrd.length() + 1)), hrd, bad)); + + + tests.add(new TestConfiguration("_example", "local", "_example.local")); + tests.add(new TestConfiguration("www._maps", "google.co.uk", "www._maps.google.co.uk")); + tests.add(new TestConfiguration("-forum", "theregister.co.uk", "-forum.theregister.co.uk")); + tests.add(new TestConfiguration("www._yourmp", "parliament.uk", "www._yourmp.parliament.uk")); + tests.add(new TestConfiguration("www.-a", "cgs.act.edu.au", "www.-a.cgs.act.edu.au")); + tests.add(new TestConfiguration("", "-foundation.org", "-foundation.org")); + tests.add(new TestConfiguration("www", "-foundation.org", "www.-foundation.org")); + tests.add(new TestConfiguration("", "_nfsv4idmapdomain", "_nfsv4idmapdomain")); + tests.add(new TestConfiguration("_nfsv4idmapdomain", "prelert.com", "_nfsv4idmapdomain.prelert.com")); + + // checkHighestRegisteredDomain() tests + tests.add(new TestConfiguration(null, "example.com", "example.COM")); + tests.add(new TestConfiguration(null, "example.com", "WwW.example.COM")); + + // TLD with only 1 rule. + tests.add(new TestConfiguration(null, "domain.biz", "domain.biz" )); + tests.add(new TestConfiguration(null, "domain.biz", "b.domain.biz")); + tests.add(new TestConfiguration(null, "domain.biz", "a.b.domain.biz")); + + // TLD with some 2-level rules. + tests.add(new TestConfiguration(null, "example.com", "example.com")); + tests.add(new TestConfiguration(null, "example.com", "b.example.com")); + tests.add(new TestConfiguration(null, "example.com", "a.b.example.com")); + tests.add(new TestConfiguration(null, "example.uk.com", "example.uk.com")); + tests.add(new TestConfiguration(null, "example.uk.com", "b.example.uk.com")); + tests.add(new TestConfiguration(null, "example.uk.com", "a.b.example.uk.com")); + tests.add(new TestConfiguration(null, "test.ac", "test.ac")); + tests.add(new TestConfiguration(null, "c.gov.cy", "c.gov.cy")); + tests.add(new TestConfiguration(null, "c.gov.cy", "b.c.gov.cy")); + tests.add(new TestConfiguration(null, "c.gov.cy", "a.b.c.gov.cy")); + + // more complex TLD + tests.add(new TestConfiguration(null, "test.jp", "test.jp")); + tests.add(new TestConfiguration(null, "test.jp", "www.test.jp")); + tests.add(new TestConfiguration(null, "test.ac.jp", "test.ac.jp")); + tests.add(new TestConfiguration(null, "test.ac.jp", "www.test.ac.jp")); + tests.add(new TestConfiguration(null, "test.kyoto.jp", "test.kyoto.jp")); + tests.add(new TestConfiguration(null, "b.ide.kyoto.jp", "b.ide.kyoto.jp")); + tests.add(new TestConfiguration(null, "b.ide.kyoto.jp", "a.b.ide.kyoto.jp")); + //tests.add(new TestConfiguration(null, "b.c.kobe.jp", "b.c.kobe.jp")); + //tests.add(new TestConfiguration(null, "b.c.kobe.jp", "a.b.c.kobe.jp")); + tests.add(new TestConfiguration(null, "city.kobe.jp", "city.kobe.jp")); + tests.add(new TestConfiguration(null, "city.kobe.jp", "www.city.kobe.jp")); + tests.add(new TestConfiguration(null, "test.us", "test.us")); + tests.add(new TestConfiguration(null, "test.us", "www.test.us")); + tests.add(new TestConfiguration(null, "test.ak.us", "test.ak.us")); + tests.add(new TestConfiguration(null, "test.ak.us", "www.test.ak.us")); + tests.add(new TestConfiguration(null, "test.k12.ak.us", "test.k12.ak.us")); + tests.add(new TestConfiguration(null, "test.k12.ak.us", "www.test.k12.ak.us")); + //tests.add(new TestConfiguration(null, "食狮.com.cn", "食狮.com.cn")); + //tests.add(new TestConfiguration(null, "食狮.公司.cn", "食狮.公司.cn")); + //tests.add(new TestConfiguration(null, "食狮.公司.cn", "www.食狮.公司.cn")); + //tests.add(new TestConfiguration(null, "shishi.公司.cn", "shishi.公司.cn")); + //tests.add(new TestConfiguration(null, "食狮.中国", "食狮.中国")); + //tests.add(new TestConfiguration(null, "食狮.中国", "www.食狮.中国")); + //tests.add(new TestConfiguration(null, "shishi.中国", "shishi.中国")); + + tests.add(new TestConfiguration(null, "xn--85x722f.com.cn", "xn--85x722f.com.cn")); + tests.add(new TestConfiguration(null, "xn--85x722f.xn--55qx5d.cn", "xn--85x722f.xn--55qx5d.cn")); + tests.add(new TestConfiguration(null, "xn--85x722f.xn--55qx5d.cn", "www.xn--85x722f.xn--55qx5d.cn")); + tests.add(new TestConfiguration(null, "shishi.xn--55qx5d.cn", "shishi.xn--55qx5d.cn")); + tests.add(new TestConfiguration(null, "xn--85x722f.xn--fiqs8s", "xn--85x722f.xn--fiqs8s")); + tests.add(new TestConfiguration(null, "xn--85x722f.xn--fiqs8s", "www.xn--85x722f.xn--fiqs8s")); + tests.add(new TestConfiguration(null, "shishi.xn--fiqs8s","shishi.xn--fiqs8s")); + } + + public void testIsolated() throws Exception { + Settings.Builder settings = Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0); + + createIndex("painless", settings.build()); + client().performRequest("PUT", "painless/test/1", Collections.emptyMap(), + new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON)); + client().performRequest("POST", "painless/_refresh"); + + Pattern pattern = Pattern.compile("domain_split\":\\[(.*?),(.*?)\\]"); + + Map params = new HashMap<>(DomainSplitFunction.params.size() + 1); + params.putAll(DomainSplitFunction.params); + for (TestConfiguration testConfig : tests) { + params.put("host", testConfig.hostName); + String mapAsJson = Strings.toString(jsonBuilder().map(params)); + logger.info("params={}", mapAsJson); + + StringEntity body = new StringEntity("{\n" + + " \"query\" : {\n" + + " \"match_all\": {}\n" + + " },\n" + + " \"script_fields\" : {\n" + + " \"domain_split\" : {\n" + + " \"script\" : {\n" + + " \"lang\": \"painless\",\n" + + " \"inline\": \"" + DomainSplitFunction.function + + " return domainSplit(params['host'], params); \",\n" + + " \"params\": " + mapAsJson + "\n" + + " }\n" + + " }\n" + + " }\n" + + "}", ContentType.APPLICATION_JSON); + + Response response = client().performRequest("GET", "painless/test/_search", Collections.emptyMap(), body); + String responseBody = EntityUtils.toString(response.getEntity()); + Matcher m = pattern.matcher(responseBody); + + String actualSubDomain = ""; + String actualDomain = ""; + if (m.find()) { + actualSubDomain = m.group(1).replace("\"", ""); + actualDomain = m.group(2).replace("\"", ""); + } + + String expectedTotal = "[" + testConfig.subDomainExpected + "," + testConfig.domainExpected + "]"; + String actualTotal = "[" + actualSubDomain + "," + actualDomain + "]"; + + // domainSplit() tests had subdomain, testHighestRegisteredDomainCases() do not + if (testConfig.subDomainExpected != null) { + assertThat("Expected subdomain [" + testConfig.subDomainExpected + "] but found [" + actualSubDomain + + "]. Actual " + actualTotal + " vs Expected " + expectedTotal, actualSubDomain, + equalTo(testConfig.subDomainExpected)); + } + + assertThat("Expected domain [" + testConfig.domainExpected + "] but found [" + actualDomain + "]. Actual " + + actualTotal + " vs Expected " + expectedTotal, actualDomain, equalTo(testConfig.domainExpected)); + } + } + + public void testHRDSplit() throws Exception { + + // Create job + String job = "{\n" + + " \"description\":\"Domain splitting\",\n" + + " \"analysis_config\" : {\n" + + " \"bucket_span\":\"3600s\",\n" + + " \"detectors\" :[{\"function\":\"count\", \"by_field_name\" : \"domain_split\"}]\n" + + " },\n" + + " \"data_description\" : {\n" + + " \"field_delimiter\":\",\",\n" + + " \"time_field\":\"time\"\n" + + " \n" + + " }\n" + + " }"; + + client().performRequest("PUT", MachineLearning.BASE_PATH + "anomaly_detectors/hrd-split-job", Collections.emptyMap(), + new StringEntity(job, ContentType.APPLICATION_JSON)); + client().performRequest("POST", MachineLearning.BASE_PATH + "anomaly_detectors/hrd-split-job/_open"); + + // Create index to hold data + Settings.Builder settings = Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0); + + createIndex("painless", settings.build(), "\"test\": { \"properties\": { \"domain\": { \"type\": \"keyword\" }," + + "\"time\": { \"type\": \"date\" } } }"); + + // Index some data + DateTime baseTime = new DateTime().minusYears(1); + TestConfiguration test = tests.get(randomInt(tests.size()-1)); + + // domainSplit() tests had subdomain, testHighestRegisteredDomainCases() did not, so we need a special case for sub + String expectedSub = test.subDomainExpected == null ? ".*" : test.subDomainExpected.replace(".", "\\."); + String expectedHRD = test.domainExpected.replace(".", "\\."); + Pattern pattern = Pattern.compile("domain_split\":\\[\"(" + expectedSub + "),(" + expectedHRD +")\"[,\\]]"); + + for (int i = 0; i < 100; i++) { + + DateTime time = baseTime.plusHours(i); + if (i == 64) { + // Anomaly has 100 docs, but we don't care about the value + for (int j = 0; j < 100; j++) { + client().performRequest("PUT", "painless/test/" + time.toDateTimeISO() + "_" + j, + Collections.emptyMap(), + new StringEntity("{\"domain\": \"" + "bar.bar.com\", \"time\": \"" + time.toDateTimeISO() + + "\"}", ContentType.APPLICATION_JSON)); + } + } else { + // Non-anomalous values will be what's seen when the anomaly is reported + client().performRequest("PUT", "painless/test/" + time.toDateTimeISO(), + Collections.emptyMap(), + new StringEntity("{\"domain\": \"" + test.hostName + "\", \"time\": \"" + time.toDateTimeISO() + + "\"}", ContentType.APPLICATION_JSON)); + } + } + + client().performRequest("POST", "painless/_refresh"); + + // Create and start datafeed + String body = "{\n" + + " \"job_id\":\"hrd-split-job\",\n" + + " \"indexes\":[\"painless\"],\n" + + " \"types\":[\"test\"],\n" + + " \"script_fields\": {\n" + + " \"domain_split\": {\n" + + " \"script\": \"return domainSplit(doc['domain'].value, params);\"\n" + + " }\n" + + " }\n" + + " }"; + + client().performRequest("PUT", MachineLearning.BASE_PATH + "datafeeds/hrd-split-datafeed", Collections.emptyMap(), + new StringEntity(body, ContentType.APPLICATION_JSON)); + client().performRequest("POST", MachineLearning.BASE_PATH + "datafeeds/hrd-split-datafeed/_start"); + + boolean passed = awaitBusy(() -> { + try { + client().performRequest("POST", "/_refresh"); + + Response response = client().performRequest("GET", + MachineLearning.BASE_PATH + "anomaly_detectors/hrd-split-job/results/records"); + String responseBody = EntityUtils.toString(response.getEntity()); + + if (responseBody.contains("\"count\":2")) { + Matcher m = pattern.matcher(responseBody); + + String actualSubDomain = ""; + String actualDomain = ""; + if (m.find()) { + actualSubDomain = m.group(1).replace("\"", ""); + actualDomain = m.group(2).replace("\"", ""); + } + + String expectedTotal = "[" + test.subDomainExpected + "," + test.domainExpected + "]"; + String actualTotal = "[" + actualSubDomain + "," + actualDomain + "]"; + + // domainSplit() tests had subdomain, testHighestRegisteredDomainCases() do not + if (test.subDomainExpected != null) { + assertThat("Expected subdomain [" + test.subDomainExpected + "] but found [" + actualSubDomain + + "]. Actual " + actualTotal + " vs Expected " + expectedTotal, actualSubDomain, + equalTo(test.subDomainExpected)); + } + + assertThat("Expected domain [" + test.domainExpected + "] but found [" + actualDomain + "]. Actual " + + actualTotal + " vs Expected " + expectedTotal, actualDomain, equalTo(test.domainExpected)); + + return true; + } else { + logger.error(responseBody); + return false; + } + + } catch (Exception e) { + logger.error(e.getMessage()); + return false; + } + + }, 5, TimeUnit.SECONDS); + + if (!passed) { + fail("Anomaly records were not found within 5 seconds"); + } + } +} diff --git a/x-pack/qa/multi-cluster-search-security/build.gradle b/x-pack/qa/multi-cluster-search-security/build.gradle new file mode 100644 index 0000000000000..f5265466965c1 --- /dev/null +++ b/x-pack/qa/multi-cluster-search-security/build.gradle @@ -0,0 +1,77 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +task remoteClusterTest(type: RestIntegTestTask) { + mustRunAfter(precommit) +} + +remoteClusterTestCluster { + numNodes = 2 + clusterName = 'remote-cluster' + setting 'search.remote.connect', false + setting 'xpack.security.enabled', 'true' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setupCommand 'setupDummyUser', + 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_user', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} + +remoteClusterTestRunner { + systemProperty 'tests.rest.suite', 'remote_cluster' +} + +task mixedClusterTest(type: RestIntegTestTask) {} + +mixedClusterTestCluster { + dependsOn remoteClusterTestRunner + setting 'xpack.security.enabled', 'true' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setupCommand 'setupDummyUser', + 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_user', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } + setting 'search.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\"" + setting 'search.remote.connections_per_cluster', 1 + setting 'search.remote.connect', true +} + +mixedClusterTestRunner { + systemProperty 'tests.rest.suite', 'multi_cluster' + finalizedBy 'remoteClusterTestCluster#node0.stop','remoteClusterTestCluster#node1.stop' +} + +task integTest { + dependsOn = [mixedClusterTest] +} + +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test +check.dependsOn(integTest) diff --git a/x-pack/qa/multi-cluster-search-security/src/test/java/org/elasticsearch/xpack/security/MultiClusterSearchWithSecurityYamlTestSuiteIT.java b/x-pack/qa/multi-cluster-search-security/src/test/java/org/elasticsearch/xpack/security/MultiClusterSearchWithSecurityYamlTestSuiteIT.java new file mode 100644 index 0000000000000..e61ff9062d171 --- /dev/null +++ b/x-pack/qa/multi-cluster-search-security/src/test/java/org/elasticsearch/xpack/security/MultiClusterSearchWithSecurityYamlTestSuiteIT.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +public class MultiClusterSearchWithSecurityYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + private static final String USER = "test_user"; + private static final String PASS = "x-pack-test-password"; + + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + public MultiClusterSearchWithSecurityYamlTestSuiteIT( + @Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } +} + diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml new file mode 100644 index 0000000000000..89db4df927e5b --- /dev/null +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -0,0 +1,217 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "x_cluster_role" ] + } + - do: + xpack.security.put_role: + name: "x_cluster_role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": ["local_index", "my_remote_cluster:test_i*", "my_remote_cluster:aliased_test_index", "test_remote_cluster:test_i*", "my_remote_cluster:secure_alias"], + "privileges": ["read"] + } + ] + } +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + - do: + xpack.security.delete_role: + name: "x_cluster_role" + ignore: 404 +--- +"Index data and search on the mixed cluster": + + - do: + indices.create: + index: local_index + body: + settings: + index: + number_of_shards: 2 + number_of_replicas: 0 + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "local_index", "_type": "test_type"}}' + - '{"f1": "local_cluster", "filter_field": 0}' + - '{"index": {"_index": "local_index", "_type": "test_type"}}' + - '{"f1": "local_cluster", "filter_field": 1}' + - '{"index": {"_index": "local_index", "_type": "test_type"}}' + - '{"f1": "local_cluster", "filter_field": 0}' + - '{"index": {"_index": "local_index", "_type": "test_type"}}' + - '{"f1": "local_cluster", "filter_field": 1}' + - '{"index": {"_index": "local_index", "_type": "test_type"}}' + - '{"f1": "local_cluster", "filter_field": 0}' + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: local_index,my_remote_cluster:test_index + body: + aggs: + cluster: + terms: + field: f1.keyword + + - match: { _shards.total: 5 } + - match: { hits.total: 11 } + - length: { aggregations.cluster.buckets: 2 } + - match: { aggregations.cluster.buckets.0.key: "remote_cluster" } + - match: { aggregations.cluster.buckets.0.doc_count: 6 } + - match: { aggregations.cluster.buckets.1.key: "local_cluster" } + - match: { aggregations.cluster.buckets.1.doc_count: 5 } + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: local_index,my_remote_cluster:test_index + body: + query: + term: + f1: remote_cluster + aggs: + cluster: + terms: + field: f1.keyword + + - match: { _shards.total: 5 } + - match: { hits.total: 6} + - match: { hits.hits.0._index: "my_remote_cluster:test_index"} + - length: { aggregations.cluster.buckets: 1 } + - match: { aggregations.cluster.buckets.0.key: "remote_cluster" } + - match: { aggregations.cluster.buckets.0.doc_count: 6 } + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: my_remote_cluster:test_index + body: + aggs: + cluster: + terms: + field: f1.keyword + + - match: { _shards.total: 3 } + - match: { hits.total: 6} + - match: { hits.hits.0._index: "my_remote_cluster:test_index"} + - length: { aggregations.cluster.buckets: 1 } + - match: { aggregations.cluster.buckets.0.key: "remote_cluster" } + - match: { aggregations.cluster.buckets.0.doc_count: 6 } + + # Test wildcard in cluster name + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: "my_*:test_index" + body: + aggs: + cluster: + terms: + field: f1.keyword + + - match: { _shards.total: 3 } + - match: { hits.total: 6} + - match: { hits.hits.0._index: "my_remote_cluster:test_index"} + - length: { aggregations.cluster.buckets: 1 } + - match: { aggregations.cluster.buckets.0.key: "remote_cluster" } + - match: { aggregations.cluster.buckets.0.doc_count: 6 } + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: local_index + body: + aggs: + cluster: + terms: + field: f1.keyword + + - match: { _shards.total: 2 } + - match: { hits.total: 5} + - match: { hits.hits.0._index: "local_index"} + - length: { aggregations.cluster.buckets: 1 } + - match: { aggregations.cluster.buckets.0.key: "local_cluster" } + - match: { aggregations.cluster.buckets.0.doc_count: 5 } + +--- +"Add transient remote cluster based on the preset cluster": + - do: + cluster.get_settings: + include_defaults: true + + - set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip } + + - do: + cluster.put_settings: + flat_settings: true + body: + transient: + search.remote.test_remote_cluster.seeds: $remote_ip + + - match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}} + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: test_remote_cluster:test_index + + - match: { _shards.total: 3 } + - match: { hits.total: 6 } + - match: { hits.hits.0._index: "test_remote_cluster:test_index" } + + # Test wildcard that matches multiple (two) cluster names + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: "*_remote_cluster:test_ind*" + + - match: { _shards.total: 6 } + - match: { hits.total: 12 } + +--- +"Search an filtered alias on the remote cluster": + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: my_remote_cluster:aliased_test_index + + - match: { _shards.total: 3 } + - match: { hits.total: 2 } + - match: { hits.hits.0._source.filter_field: 1 } + - match: { hits.hits.0._index: "my_remote_cluster:test_index" } + +--- +"Search across clusters via a secured alias": + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: my_remote_cluster:secure_alias # TODO make this a wildcard once + + - match: { _shards.total: 2 } + - match: { hits.total: 1 } + - is_true: hits.hits.0._source.secure + - match: { hits.hits.0._index: "my_remote_cluster:secured_via_alias" } + diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml new file mode 100644 index 0000000000000..5ff92df69b863 --- /dev/null +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml @@ -0,0 +1,84 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "x_cluster_role" ] + } + - do: + xpack.security.put_role: + name: "x_cluster_role" + body: > + { + "cluster": ["monitor"] + } +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + - do: + xpack.security.delete_role: + name: "monitor_role" + ignore: 404 +--- +"Fetch remote cluster info for existing cluster": + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + cluster.remote_info: {} + - match: { my_remote_cluster.connected: true } + - match: { my_remote_cluster.num_nodes_connected: 1} + - match: { my_remote_cluster.max_connections_per_cluster: 1} + - match: { my_remote_cluster.initial_connect_timeout: "30s" } + +--- +"Add transient remote cluster based on the preset cluster and check remote info": + - do: + cluster.get_settings: + include_defaults: true + + - set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip } + + - do: + cluster.put_settings: + flat_settings: true + body: + transient: + search.remote.test_remote_cluster.seeds: $remote_ip + + - match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}} + + # we do another search here since this will enforce the connection to be established + # otherwise the cluster might not have been connected yet. + - do: + search: + index: test_remote_cluster:test_index + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + cluster.remote_info: {} + - match: { test_remote_cluster.connected: true } + - match: { my_remote_cluster.connected: true } + + - match: { test_remote_cluster.seeds.0: $remote_ip } + - match: { my_remote_cluster.seeds.0: $remote_ip } + + - match: { my_remote_cluster.max_connections_per_cluster: 1} + - match: { test_remote_cluster.max_connections_per_cluster: 1} + + - match: { my_remote_cluster.num_nodes_connected: 1} + - match: { test_remote_cluster.num_nodes_connected: 1} + + - match: { my_remote_cluster.initial_connect_timeout: "30s" } + - match: { test_remote_cluster.initial_connect_timeout: "30s" } diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml new file mode 100644 index 0000000000000..937c0ddec9a86 --- /dev/null +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yml @@ -0,0 +1,106 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "x_cluster_role" ] + } + - do: + xpack.security.put_role: + name: "x_cluster_role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": ["field_caps_index_2", "my_remote_cluster:field_caps_index_1", "my_remote_cluster:field_caps_index_3"], + "privileges": ["read"] + } + ] + } +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + - do: + xpack.security.delete_role: + name: "x_cluster_role" + ignore: 404 +--- +"Get simple field caps from remote cluster": + - skip: + version: " - 5.4.99" + reason: this uses a new API that has been added in 5.5.0 + + - do: + indices.create: + index: field_caps_index_2 + body: + mappings: + t: + properties: + text: + type: text + keyword: + type: keyword + number: + type: double + geo: + type: geo_point + object: + type: object + properties: + nested1 : + type : text + index: true + nested2: + type: float + doc_values: true + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + field_caps: + index: 'field_caps_index_2,my_remote_cluster:field_caps_index_1,my_remote_cluster:field_caps_index_3' + fields: [text, keyword, number, geo] + + - match: {fields.text.text.searchable: true} + - match: {fields.text.text.aggregatable: false} + - is_false: fields.text.text.indices + - is_false: fields.text.text.non_searchable_indices + - is_false: fields.text.text.non_aggregatable_indices + - match: {fields.keyword.keyword.searchable: true} + - match: {fields.keyword.keyword.aggregatable: true} + - is_false: fields.text.keyword.indices + - is_false: fields.text.keyword.non_searchable_indices + - is_false: fields.text.keyword.non_aggregatable_indices + - match: {fields.number.double.searchable: true} + - match: {fields.number.double.aggregatable: true} + - match: {fields.number.double.indices: ["field_caps_index_2", "my_remote_cluster:field_caps_index_1"]} + - is_false: fields.number.double.non_searchable_indices + - is_false: fields.number.double.non_aggregatable_indices + - match: {fields.number.long.searchable: true} + - match: {fields.number.long.aggregatable: true} + - match: {fields.number.long.indices: ["my_remote_cluster:field_caps_index_3"]} + - is_false: fields.number.long.non_searchable_indices + - is_false: fields.number.long.non_aggregatable_indices + - match: {fields.geo.geo_point.searchable: true} + - match: {fields.geo.geo_point.aggregatable: true} + - match: {fields.geo.geo_point.indices: ["field_caps_index_2", "my_remote_cluster:field_caps_index_1"]} + - is_false: fields.geo.geo_point.non_searchable_indices + - is_false: fields.geo.geo_point.non_aggregatable_indices + - match: {fields.geo.keyword.searchable: true} + - match: {fields.geo.keyword.aggregatable: true} + - match: {fields.geo.keyword.indices: ["my_remote_cluster:field_caps_index_3"]} + - is_false: fields.geo.keyword.non_searchable_indices + - is_false: fields.geo.keyword.on_aggregatable_indices diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/40_scroll.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/40_scroll.yml new file mode 100644 index 0000000000000..97e0cfab862cc --- /dev/null +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/40_scroll.yml @@ -0,0 +1,117 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "x_cluster_role" ] + } + - do: + xpack.security.put_role: + name: "x_cluster_role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": ["local_index", "my_remote_cluster:test_i*", "my_remote_cluster:aliased_test_index", "test_remote_cluster:test_i*", "my_remote_cluster:secure_alias"], + "privileges": ["read"] + } + ] + } +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + - do: + xpack.security.delete_role: + name: "x_cluster_role" + ignore: 404 +--- +"Scroll on the mixed cluster": + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: my_remote_cluster:test_index + size: 4 + scroll: 1m + sort: filter_field + body: + query: + match_all: {} + + - set: {_scroll_id: scroll_id} + - match: {hits.total: 6 } + - length: {hits.hits: 4 } + - match: {hits.hits.0._source.filter_field: 0 } + - match: {hits.hits.1._source.filter_field: 0 } + - match: {hits.hits.2._source.filter_field: 0 } + - match: {hits.hits.3._source.filter_field: 0 } + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + scroll: + body: { "scroll_id": "$scroll_id", "scroll": "1m"} + + - match: {hits.total: 6 } + - length: {hits.hits: 2 } + - match: {hits.hits.0._source.filter_field: 1 } + - match: {hits.hits.1._source.filter_field: 1 } + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + scroll: + scroll_id: $scroll_id + scroll: 1m + + - match: {hits.total: 6 } + - length: {hits.hits: 0 } + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + clear_scroll: + scroll_id: $scroll_id + +--- +"Steal Scroll ID on the mixed cluster": + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: my_remote_cluster:test_index + size: 4 + scroll: 1m + sort: filter_field + body: + query: + match_all: {} + + - set: {_scroll_id: scroll_id} + - match: {hits.total: 6 } + - length: {hits.hits: 4 } + - match: {hits.hits.0._source.filter_field: 0 } + - match: {hits.hits.1._source.filter_field: 0 } + - match: {hits.hits.2._source.filter_field: 0 } + - match: {hits.hits.3._source.filter_field: 0 } + + - do: # steal the scroll ID cross cluster and make sure it fails + catch: /search_context_missing_exception/ + scroll: + body: { "scroll_id": "$scroll_id", "scroll": "1m"} + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + catch: missing + clear_scroll: + scroll_id: $scroll_id + - match: { num_freed : 0 } diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml new file mode 100644 index 0000000000000..9c445f418daf2 --- /dev/null +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml @@ -0,0 +1,68 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "x_cluster_role" ] + } + - do: + xpack.security.put_role: + name: "x_cluster_role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": ["local_index", "my_remote_cluster:test_i*", "my_remote_cluster:aliased_test_index", "test_remote_cluster:test_i*", "my_remote_cluster:secure_alias"], + "privileges": ["read"] + } + ] + } +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + - do: + xpack.security.delete_role: + name: "x_cluster_role" + ignore: 404 +--- +"Search with missing remote index pattern": + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: "*:foo-*" + + - match: { _shards.total: 0 } + - match: { hits.total: 0 } + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: "my_remote_cluster:foo-*" + + - match: { _shards.total: 0 } + - match: { hits.total: 0 } + + - do: + catch: "request" + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: "*:foo-bar" + + - do: + catch: "request" + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: "my_remote_cluster:foo-bar" diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/60_skip_shards.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/60_skip_shards.yml new file mode 100644 index 0000000000000..ad27f58567a94 --- /dev/null +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/60_skip_shards.yml @@ -0,0 +1,96 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "x_cluster_role" ] + } + - do: + xpack.security.put_role: + name: "x_cluster_role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": ["skip_shards_index", "my_remote_cluster:single_doc_index"], + "privileges": ["read"] + } + ] + } +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + - do: + xpack.security.delete_role: + name: "x_cluster_role" + ignore: 404 +--- +"Test that remote indices are subject to shard skipping": + + - do: + indices.create: + index: skip_shards_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + test_type: + properties: + created_at: + type: date + format: "yyyy-MM-dd" + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "skip_shards_index", "_type": "test_type"}}' + - '{"f1": "local_cluster", "sort_field": 0, "created_at" : "2017-01-01"}' + + # check that we skip the remote shard + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: "skip_shards_index,my_remote_cluster:single_doc_index" + pre_filter_shard_size: 1 + body: { "size" : 10, "query" : { "range" : { "created_at" : { "gte" : "2016-02-01", "lt": "2018-02-01"} } } } + + - match: { hits.total: 1 } + - match: { hits.hits.0._index: "skip_shards_index"} + - match: { _shards.total: 2 } + - match: { _shards.successful: 2 } + - match: { _shards.skipped : 1} + - match: { _shards.failed: 0 } + - match: { hits.total: 1 } + + # check that we skip the local shard + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: "skip_shards_index,my_remote_cluster:single_doc_index" + pre_filter_shard_size: 1 + body: { "size" : 10, "query" : { "range" : { "created_at" : { "gte" : "2015-02-01", "lt": "2016-02-01"} } } } + + - match: { hits.total: 1 } + - match: { hits.hits.0._index: "my_remote_cluster:single_doc_index"} + - match: { _shards.total: 2 } + - match: { _shards.successful: 2 } + - match: { _shards.skipped : 1} + - match: { _shards.failed: 0 } + - match: { hits.total: 1 } + diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml new file mode 100644 index 0000000000000..6fa2b1e31a152 --- /dev/null +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml @@ -0,0 +1,198 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ "x_cluster_role" ] + } + - do: + xpack.security.put_role: + name: "x_cluster_role" + body: > + { + "cluster": ["monitor"], + "indices": [ + { + "names": ["single_doc_index", "secure_alias", "test_index", "aliased_test_index", "field_caps_index_1", + "field_caps_index_3"], + "privileges": ["read", "read_cross_cluster"] + } + ] + } +--- +"Index data and search on the remote cluster": + + - do: + indices.create: + index: single_doc_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + test_type: + properties: + created_at: + type: date + format: "yyyy-MM-dd" + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "single_doc_index", "_type": "test_type"}}' + - '{"f1": "remote_cluster", "sort_field": 1, "created_at" : "2016-01-01"}' + + - do: + indices.create: + index: field_caps_index_1 + body: + mappings: + t: + properties: + text: + type: text + keyword: + type: keyword + number: + type: double + geo: + type: geo_point + object: + type: object + properties: + nested1 : + type : text + index: false + nested2: + type: float + doc_values: false + - do: + indices.create: + index: field_caps_index_3 + body: + mappings: + t: + properties: + text: + type: text + keyword: + type: keyword + number: + type: long + geo: + type: keyword + object: + type: object + properties: + nested1 : + type : long + index: false + nested2: + type: keyword + doc_values: false + + - do: + indices.create: + index: test_index + body: + settings: + index: + number_of_shards: 3 + number_of_replicas: 0 + aliases: + aliased_test_index: # we use this alias in the multi cluster test to verify filtered aliases work + filter: + term: + filter_field : 1 + - do: + indices.create: + index: secured_via_alias + body: + settings: + index: + number_of_shards: 2 + number_of_replicas: 0 + aliases: + secure_alias: {} # we use this alias in the multi cluster test to verify permissions via aliases work + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "remote_cluster", "filter_field": 0}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "remote_cluster", "filter_field": 1}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "remote_cluster", "filter_field": 0}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "remote_cluster", "filter_field": 1}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "remote_cluster", "filter_field": 0}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "remote_cluster", "filter_field": 0}' + - '{"index": {"_index": "secured_via_alias", "_type": "test_type"}}' + - '{"f1": "remote_cluster", "secure": true}' + + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: test_index + body: + aggs: + cluster: + terms: + field: f1.keyword + + - match: { _shards.total: 3 } + - match: { hits.total: 6 } + - length: { aggregations.cluster.buckets: 1 } + - match: { aggregations.cluster.buckets.0.key: "remote_cluster" } + - match: { aggregations.cluster.buckets.0.doc_count: 6 } + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: aliased_test_index + + - match: { _shards.total: 3 } + - match: { hits.total: 2 } + - match: { hits.hits.0._source.filter_field: 1 } + - match: { hits.hits.0._index: "test_index" } + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdA==" } + search: + index: secure_alias + + - match: { _shards.total: 2 } + - match: { hits.total: 1 } + - is_true: hits.hits.0._source.secure + - match: { hits.hits.0._index: "secured_via_alias" } + +# The user is updated to remove its role mappings to show that we do not +# need the user to be assigned to a role on the remote cluster and that the +# roles sent with the user from the other cluster are used. The put user +# request clears the cached reference to the user so we do not need to do +# that manually + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "s3krit", + "roles" : [ ] + } + - match: { user: { created: false } } diff --git a/x-pack/qa/multi-node/build.gradle b/x-pack/qa/multi-node/build.gradle new file mode 100644 index 0000000000000..bff9d8652b915 --- /dev/null +++ b/x-pack/qa/multi-node/build.gradle @@ -0,0 +1,28 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') +} + +integTestCluster { + numNodes = 2 + clusterName = 'multi-node' + setting 'xpack.security.enabled', 'true' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + extraConfigFile 'roles.yml', 'roles.yml' + setupCommand 'setup-test-user', 'bin/elasticsearch-users', 'useradd', 'test-user', '-p', 'x-pack-test-password', '-r', 'test' + setupCommand 'setup-super-user', 'bin/elasticsearch-users', 'useradd', 'super-user', '-p', 'x-pack-super-password', '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'super-user', + password: 'x-pack-super-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/multi-node/roles.yml b/x-pack/qa/multi-node/roles.yml new file mode 100644 index 0000000000000..b5c9fb10238fb --- /dev/null +++ b/x-pack/qa/multi-node/roles.yml @@ -0,0 +1,14 @@ +test: + # this privileges must be kept minimal or the user could inadvertently be + # granted permissions that would enable the global checkpoint sync to run + # as this user leading to tests that should fail if the global checkpoint + # sync does not run as the system user to pass for when they should not + cluster: + - monitor + indices: + - names: [ 'test-index' ] + privileges: + - create_index + - monitor + - write + diff --git a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java new file mode 100644 index 0000000000000..abc784b4cb286 --- /dev/null +++ b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/GlobalCheckpointSyncActionIT.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.multi_node; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.equalTo; + +public class GlobalCheckpointSyncActionIT extends ESRestTestCase { + + @Override + protected Settings restClientSettings() { + return getClientSettings("test-user", "x-pack-test-password"); + } + + @Override + protected Settings restAdminSettings() { + return getClientSettings("super-user", "x-pack-super-password"); + } + + private Settings getClientSettings(final String username, final String password) { + final String token = basicAuthHeaderValue(username, new SecureString(password.toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + /* + * The post-operation global checkpoint sync runs privileged as the system user otherwise the sync would be denied to restricted users. + * This test ensures that these post-operation syncs are successful otherwise the global checkpoint would not have advanced on the + * replica. + */ + public void testGlobalCheckpointSyncActionRunsAsPrivilegedUser() throws Exception { + // create the test-index index + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("settings"); + { + builder.field("index.number_of_shards", 1); + builder.field("index.number_of_replicas", 1); + } + builder.endObject(); + } + builder.endObject(); + final StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + client().performRequest("PUT", "test-index", Collections.emptyMap(), entity); + } + + // wait for the replica to recover + client().performRequest("GET", "/_cluster/health", Collections.singletonMap("wait_for_status", "green")); + + // index some documents + final int numberOfDocuments = randomIntBetween(0, 128); + for (int i = 0; i < numberOfDocuments; i++) { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.field("foo", i); + } + builder.endObject(); + final StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + client().performRequest("PUT", "/test-index/test-type/" + i, Collections.emptyMap(), entity); + } + } + + // we have to wait for the post-operation global checkpoint sync to propagate to the replica + assertBusy(() -> { + final Map params = new HashMap<>(2); + params.put("level", "shards"); + params.put("filter_path", "**.seq_no"); + final Response response = client().performRequest("GET", "/test-index/_stats", params); + final ObjectPath path = ObjectPath.createFromResponse(response); + // int looks funny here since global checkpoints are longs but the response parser does not know enough to treat them as long + final int shard0GlobalCheckpoint = path.evaluate("indices.test-index.shards.0.0.seq_no.global_checkpoint"); + assertThat(shard0GlobalCheckpoint, equalTo(numberOfDocuments - 1)); + final int shard1GlobalCheckpoint = path.evaluate("indices.test-index.shards.0.1.seq_no.global_checkpoint"); + assertThat(shard1GlobalCheckpoint, equalTo(numberOfDocuments - 1)); + }); + } + +} diff --git a/x-pack/qa/openldap-tests/build.gradle b/x-pack/qa/openldap-tests/build.gradle new file mode 100644 index 0000000000000..6aef9fa28921f --- /dev/null +++ b/x-pack/qa/openldap-tests/build.gradle @@ -0,0 +1,34 @@ +Project idpFixtureProject = xpackProject("test:idp-fixture") +evaluationDependsOn(idpFixtureProject.path) + +apply plugin: 'elasticsearch.standalone-test' +apply plugin: 'elasticsearch.vagrantsupport' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +task openLdapFixture { + dependsOn "vagrantCheckVersion", "virtualboxCheckVersion", idpFixtureProject.up +} + +String outputDir = "generated-resources/${project.name}" +task copyIdpTrust(type: Copy) { + from idpFixtureProject.file('src/main/resources/certs/idptrust.jks'); + into outputDir +} +if (project.rootProject.vagrantSupported) { + project.sourceSets.test.output.dir(outputDir, builtBy: copyIdpTrust) + test.dependsOn openLdapFixture + test.finalizedBy idpFixtureProject.halt +} else { + test.enabled = false +} + +namingConventions { + // integ tests use Tests instead of IT + skipIntegTestInDisguise = true +} + diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java new file mode 100644 index 0000000000000..eced8a1b39ae8 --- /dev/null +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java @@ -0,0 +1,290 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.test; + +import com.unboundid.ldap.sdk.LDAPConnection; +import com.unboundid.ldap.sdk.LDAPException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.VerificationMode; +import org.elasticsearch.xpack.security.authc.ldap.LdapSessionFactory; +import org.elasticsearch.xpack.security.authc.ldap.LdapTestUtils; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapMetaDataResolver; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; +import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Path; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; + +public class OpenLdapTests extends ESTestCase { + + public static final String OPEN_LDAP_DNS_URL = "ldaps://localhost:60636"; + public static final String OPEN_LDAP_IP_URL = "ldaps://127.0.0.1:60636"; + + public static final String PASSWORD = "NickFuryHeartsES"; + private static final String HAWKEYE_DN = "uid=hawkeye,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + public static final String LDAPTRUST_PATH = "/idptrust.jks"; + private static final SecureString PASSWORD_SECURE_STRING = new SecureString(PASSWORD.toCharArray()); + + private boolean useGlobalSSL; + private SSLService sslService; + private ThreadPool threadPool; + private Settings globalSettings; + + @Before + public void init() throws Exception { + threadPool = new TestThreadPool("OpenLdapTests thread pool"); + } + + @After + public void shutdown() throws InterruptedException { + terminate(threadPool); + } + + @Override + public boolean enableWarningsCheck() { + return false; + } + + @Before + public void initializeSslSocketFactory() throws Exception { + Path truststore = getDataPath(LDAPTRUST_PATH); + /* + * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. + * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * verification tests since a re-established connection does not perform hostname verification. + */ + useGlobalSSL = randomBoolean(); + MockSecureSettings mockSecureSettings = new MockSecureSettings(); + Settings.Builder builder = Settings.builder().put("path.home", createTempDir()); + if (useGlobalSSL) { + builder.put("xpack.ssl.truststore.path", truststore); + mockSecureSettings.setString("xpack.ssl.truststore.secure_password", "changeit"); + + // fake realm to load config with certificate verification mode + builder.put("xpack.security.authc.realms.bar.ssl.truststore.path", truststore); + mockSecureSettings.setString("xpack.security.authc.realms.bar.ssl.truststore.secure_password", "changeit"); + builder.put("xpack.security.authc.realms.bar.ssl.verification_mode", VerificationMode.CERTIFICATE); + } else { + // fake realms so ssl will get loaded + builder.put("xpack.security.authc.realms.foo.ssl.truststore.path", truststore); + mockSecureSettings.setString("xpack.security.authc.realms.foo.ssl.truststore.secure_password", "changeit"); + builder.put("xpack.security.authc.realms.foo.ssl.verification_mode", VerificationMode.FULL); + builder.put("xpack.security.authc.realms.bar.ssl.truststore.path", truststore); + mockSecureSettings.setString("xpack.security.authc.realms.bar.ssl.truststore.secure_password", "changeit"); + builder.put("xpack.security.authc.realms.bar.ssl.verification_mode", VerificationMode.CERTIFICATE); + } + globalSettings = builder.setSecureSettings(mockSecureSettings).build(); + Environment environment = TestEnvironment.newEnvironment(globalSettings); + sslService = new SSLService(globalSettings, environment); + } + + public void testConnect() throws Exception { + //openldap does not use cn as naming attributes by default + String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + RealmConfig config = new RealmConfig("oldap-test", buildLdapSettings(OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, + LdapSearchScope.ONE_LEVEL), globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(Settings.EMPTY)); + LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); + + String[] users = new String[] { "blackwidow", "cap", "hawkeye", "hulk", "ironman", "thor" }; + for (String user : users) { + logger.info("testing connect as user [{}]", user); + try (LdapSession ldap = session(sessionFactory, user, PASSWORD_SECURE_STRING)) { + assertThat(groups(ldap), hasItem(containsString("Avengers"))); + } + } + } + + public void testGroupSearchScopeBase() throws Exception { + //base search on a groups means that the user can be in just one group + + String groupSearchBase = "cn=Avengers,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + RealmConfig config = new RealmConfig("oldap-test", buildLdapSettings(OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, + LdapSearchScope.BASE), globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); + LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); + + String[] users = new String[] { "blackwidow", "cap", "hawkeye", "hulk", "ironman", "thor" }; + for (String user : users) { + try (LdapSession ldap = session(sessionFactory, user, PASSWORD_SECURE_STRING)) { + assertThat(groups(ldap), hasItem(containsString("Avengers"))); + } + } + } + + public void testCustomFilter() throws Exception { + String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + Settings settings = Settings.builder() + .put(buildLdapSettings(OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) + .put("group_search.filter", "(&(objectclass=posixGroup)(memberUid={0}))") + .put("group_search.user_attribute", "uid") + .build(); + RealmConfig config = new RealmConfig("oldap-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(Settings.EMPTY)); + LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); + + try (LdapSession ldap = session(sessionFactory, "selvig", PASSWORD_SECURE_STRING)) { + assertThat(groups(ldap), hasItem(containsString("Geniuses"))); + } + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29758") + public void testTcpTimeout() throws Exception { + String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + Settings settings = Settings.builder() + .put(buildLdapSettings(OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put("group_search.filter", "(objectClass=*)") + .put("ssl.verification_mode", VerificationMode.CERTIFICATE) + .put(SessionFactorySettings.TIMEOUT_TCP_READ_SETTING, "1ms") //1 millisecond + .build(); + RealmConfig config = new RealmConfig("oldap-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(Settings.EMPTY)); + LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); + + LDAPException expected = expectThrows(LDAPException.class, + () -> session(sessionFactory, "thor", PASSWORD_SECURE_STRING).groups(new PlainActionFuture<>())); + assertThat(expected.getMessage(), containsString("A client-side timeout was encountered while waiting")); + } + + public void testStandardLdapConnectionHostnameVerificationFailure() throws Exception { + //openldap does not use cn as naming attributes by default + String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + Settings settings = Settings.builder() + // The certificate used in the vagrant box is valid for "localhost", but not for "127.0.0.1" + .put(buildLdapSettings(OPEN_LDAP_IP_URL, userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) + .put("ssl.verification_mode", VerificationMode.FULL) + .build(); + + RealmConfig config = new RealmConfig("oldap-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(Settings.EMPTY)); + LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); + + String user = "blackwidow"; + UncategorizedExecutionException e = expectThrows(UncategorizedExecutionException.class, + () -> session(sessionFactory, user, PASSWORD_SECURE_STRING)); + assertThat(e.getCause(), instanceOf(ExecutionException.class)); + assertThat(e.getCause().getCause(), instanceOf(LDAPException.class)); + assertThat(e.getCause().getCause().getMessage(), + anyOf(containsString("Hostname verification failed"), containsString("peer not authenticated"))); + } + + public void testStandardLdapConnectionHostnameVerificationSuccess() throws Exception { + //openldap does not use cn as naming attributes by default + String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + String userTemplate = "uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + Settings settings = Settings.builder() + // The certificate used in the vagrant box is valid for "localhost" (but not for "127.0.0.1") + .put(buildLdapSettings(OPEN_LDAP_DNS_URL, userTemplate, groupSearchBase, LdapSearchScope.ONE_LEVEL)) + .put("ssl.verification_mode", VerificationMode.FULL) + .build(); + + RealmConfig config = new RealmConfig("oldap-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(Settings.EMPTY)); + LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); + + final String user = "blackwidow"; + try (LdapSession ldap = session(sessionFactory, user, PASSWORD_SECURE_STRING)) { + assertThat(ldap, notNullValue()); + assertThat(ldap.userDn(), startsWith("uid=" + user + ",")); + } + } + + public void testResolveSingleValuedAttributeFromConnection() throws Exception { + LdapMetaDataResolver resolver = new LdapMetaDataResolver(Settings.builder().putList("metadata", "cn", "sn").build(), true); + try (LDAPConnection ldapConnection = setupOpenLdapConnection()) { + final Map map = resolve(ldapConnection, resolver); + assertThat(map.size(), equalTo(2)); + assertThat(map.get("cn"), equalTo("Clint Barton")); + assertThat(map.get("sn"), equalTo("Clint Barton")); + } + } + + public void testResolveMultiValuedAttributeFromConnection() throws Exception { + LdapMetaDataResolver resolver = new LdapMetaDataResolver(Settings.builder().putList("metadata", "objectClass").build(), true); + try (LDAPConnection ldapConnection = setupOpenLdapConnection()) { + final Map map = resolve(ldapConnection, resolver); + assertThat(map.size(), equalTo(1)); + assertThat(map.get("objectClass"), instanceOf(List.class)); + assertThat((List) map.get("objectClass"), contains("top", "posixAccount", "inetOrgPerson")); + } + } + + public void testResolveMissingAttributeFromConnection() throws Exception { + LdapMetaDataResolver resolver = new LdapMetaDataResolver(Settings.builder().putList("metadata", "alias").build(), true); + try (LDAPConnection ldapConnection = setupOpenLdapConnection()) { + final Map map = resolve(ldapConnection, resolver); + assertThat(map.size(), equalTo(0)); + } + } + + private Settings buildLdapSettings(String ldapUrl, String userTemplate, String groupSearchBase, LdapSearchScope scope) { + Settings.Builder builder = Settings.builder() + .put(LdapTestCase.buildLdapSettings(ldapUrl, userTemplate, groupSearchBase, scope)); + builder.put("group_search.user_attribute", "uid"); + if (useGlobalSSL) { + return builder.build(); + } + return builder + .put("ssl.truststore.path", getDataPath(LDAPTRUST_PATH)) + .put("ssl.truststore.password", "changeit") + .build(); + } + + private LdapSession session(SessionFactory factory, String username, SecureString password) { + PlainActionFuture future = new PlainActionFuture<>(); + factory.session(username, password, future); + return future.actionGet(); + } + + private List groups(LdapSession ldapSession) { + PlainActionFuture> future = new PlainActionFuture<>(); + ldapSession.groups(future); + return future.actionGet(); + } + + private LDAPConnection setupOpenLdapConnection() throws Exception { + Path truststore = getDataPath(LDAPTRUST_PATH); + return LdapTestUtils.openConnection(OpenLdapTests.OPEN_LDAP_DNS_URL, HAWKEYE_DN, OpenLdapTests.PASSWORD, truststore); + } + + private Map resolve(LDAPConnection connection, LdapMetaDataResolver resolver) throws Exception { + final PlainActionFuture> future = new PlainActionFuture<>(); + resolver.resolve(connection, HAWKEYE_DN, TimeValue.timeValueSeconds(1), logger, null, future); + return future.get(); + } +} diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java new file mode 100644 index 0000000000000..c008b5260f82b --- /dev/null +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.OpenLdapTests; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; +import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Path; +import java.text.MessageFormat; +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +import static org.elasticsearch.test.OpenLdapTests.LDAPTRUST_PATH; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.is; + +public class OpenLdapUserSearchSessionFactoryTests extends ESTestCase { + + private Settings globalSettings; + private ThreadPool threadPool; + private MockSecureSettings globalSecureSettings; + + @Before + public void init() throws Exception { + Path keystore = getDataPath(LDAPTRUST_PATH); + /* + * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. + * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * verification tests since a re-established connection does not perform hostname verification. + */ + globalSecureSettings = newSecureSettings("xpack.ssl.truststore.secure_password", "changeit"); + globalSettings = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.ssl.truststore.path", keystore) + .setSecureSettings(globalSecureSettings) + .build(); + threadPool = new TestThreadPool("LdapUserSearchSessionFactoryTests"); + } + + @After + public void shutdown() throws InterruptedException { + terminate(threadPool); + } + + public void testUserSearchWithBindUserOpenLDAP() throws Exception { + final boolean useSecureBindPassword = randomBoolean(); + String groupSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + String userSearchBase = "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + final Settings.Builder realmSettings = Settings.builder() + .put(LdapTestCase.buildLdapSettings(new String[]{OpenLdapTests.OPEN_LDAP_DNS_URL}, Strings.EMPTY_ARRAY, groupSearchBase, + LdapSearchScope.ONE_LEVEL)) + .put("user_search.base_dn", userSearchBase) + .put("group_search.user_attribute", "uid") + .put("bind_dn", "uid=blackwidow,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put("user_search.pool.enabled", randomBoolean()); + if (useSecureBindPassword) { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("secure_bind_password", OpenLdapTests.PASSWORD); + realmSettings.setSecureSettings(secureSettings); + } else { + realmSettings.put("bind_password", OpenLdapTests.PASSWORD); + } + RealmConfig config = new RealmConfig("oldap-test", realmSettings.build(), globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + Settings.Builder builder = Settings.builder() + .put(globalSettings, false); + builder.put(Settings.builder().put(config.settings(), false).normalizePrefix("xpack.security.authc.realms.ldap.").build()); + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.merge(globalSecureSettings); + if (useSecureBindPassword) { + secureSettings.setString("xpack.security.authc.realms.ldap.secure_bind_password", OpenLdapTests.PASSWORD); + } + builder.setSecureSettings(secureSettings); + Settings settings = builder.build(); + SSLService sslService = new SSLService(settings, TestEnvironment.newEnvironment(settings)); + + + String[] users = new String[]{"cap", "hawkeye", "hulk", "ironman", "thor"}; + try (LdapUserSearchSessionFactory sessionFactory = new LdapUserSearchSessionFactory(config, sslService, threadPool)) { + for (String user : users) { + //auth + try (LdapSession ldap = session(sessionFactory, user, new SecureString(OpenLdapTests.PASSWORD))) { + assertThat(ldap.userDn(), is(equalTo(new MessageFormat("uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com", + Locale.ROOT).format(new Object[]{user}, new StringBuffer(), null).toString()))); + assertThat(groups(ldap), hasItem(containsString("Avengers"))); + } + + //lookup + try (LdapSession ldap = unauthenticatedSession(sessionFactory, user)) { + assertThat(ldap.userDn(), is(equalTo(new MessageFormat("uid={0},ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com", + Locale.ROOT).format(new Object[]{user}, new StringBuffer(), null).toString()))); + assertThat(groups(ldap), hasItem(containsString("Avengers"))); + } + } + } + + if (useSecureBindPassword == false) { + assertSettingDeprecationsAndWarnings(new Setting[]{PoolingSessionFactorySettings.LEGACY_BIND_PASSWORD}); + } + } + + private MockSecureSettings newSecureSettings(String key, String value) { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(key, value); + return secureSettings; + } + + private LdapSession session(SessionFactory factory, String username, SecureString password) { + PlainActionFuture future = new PlainActionFuture<>(); + factory.session(username, password, future); + return future.actionGet(); + } + + private List groups(LdapSession ldapSession) { + Objects.requireNonNull(ldapSession); + PlainActionFuture> future = new PlainActionFuture<>(); + ldapSession.groups(future); + return future.actionGet(); + } + + private LdapSession unauthenticatedSession(SessionFactory factory, String username) { + PlainActionFuture future = new PlainActionFuture<>(); + factory.unauthenticatedSession(username, future); + return future.actionGet(); + } +} diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java new file mode 100644 index 0000000000000..f277f5d84b371 --- /dev/null +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/SearchGroupsResolverTests.java @@ -0,0 +1,180 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.OpenLdapTests; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.security.support.NoOpLogger; + +import java.util.List; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +@SuppressWarnings("unchecked") +public class SearchGroupsResolverTests extends GroupsResolverTestCase { + + private static final String BRUCE_BANNER_DN = "uid=hulk,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; + + public void testResolveSubTree() throws Exception { + Settings settings = Settings.builder() + .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put("group_search.user_attribute", "uid") + .build(); + + SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + List groups = + resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); + assertThat(groups, containsInAnyOrder( + containsString("Avengers"), + containsString("SHIELD"), + containsString("Geniuses"), + containsString("Philanthropists"))); + } + + public void testResolveOneLevel() throws Exception { + Settings settings = Settings.builder() + .put("group_search.base_dn", "ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put("group_search.scope", LdapSearchScope.ONE_LEVEL) + .put("group_search.user_attribute", "uid") + .build(); + + SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + List groups = + resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); + assertThat(groups, containsInAnyOrder( + containsString("Avengers"), + containsString("SHIELD"), + containsString("Geniuses"), + containsString("Philanthropists"))); + } + + public void testResolveBase() throws Exception { + Settings settings = Settings.builder() + .put("group_search.base_dn", "cn=Avengers,ou=People,dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put("group_search.scope", LdapSearchScope.BASE) + .put("group_search.user_attribute", "uid") + .build(); + + SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + List groups = + resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); + assertThat(groups, hasItem(containsString("Avengers"))); + } + + public void testResolveCustomFilter() throws Exception { + Settings settings = Settings.builder() + .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put("group_search.filter", "(&(objectclass=posixGroup)(memberUID={0}))") + .put("group_search.user_attribute", "uid") + .build(); + + SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + List groups = + resolveBlocking(resolver, ldapConnection, "uid=selvig,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com", + TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); + assertThat(groups, hasItem(containsString("Geniuses"))); + } + + public void testFilterIncludesPosixGroups() throws Exception { + Settings settings = Settings.builder() + .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put("group_search.user_attribute", "uid") + .build(); + + SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + List groups = + resolveBlocking(resolver, ldapConnection, "uid=selvig,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com", + TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); + assertThat(groups, hasItem(containsString("Geniuses"))); + } + + public void testCreateWithoutSpecifyingBaseDN() throws Exception { + Settings settings = Settings.builder() + .put("group_search.scope", LdapSearchScope.SUB_TREE) + .build(); + + try { + new SearchGroupsResolver(settings); + fail("base_dn must be specified and an exception should have been thrown"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("base_dn must be specified")); + } + } + + public void testReadUserAttributeUid() throws Exception { + Settings settings = Settings.builder() + .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put("group_search.user_attribute", "uid").build(); + SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + PlainActionFuture future = new PlainActionFuture<>(); + resolver.readUserAttribute(ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(5), future); + assertThat(future.actionGet(), is("hulk")); + } + + public void testReadUserAttributeCn() throws Exception { + Settings settings = Settings.builder() + .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put("group_search.user_attribute", "cn") + .build(); + SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + + PlainActionFuture future = new PlainActionFuture<>(); + resolver.readUserAttribute(ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(5), future); + assertThat(future.actionGet(), is("Bruce Banner")); + } + + public void testReadNonExistentUserAttribute() throws Exception { + Settings settings = Settings.builder() + .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put("group_search.user_attribute", "doesntExists") + .build(); + SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + + PlainActionFuture future = new PlainActionFuture<>(); + resolver.readUserAttribute(ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(5), future); + assertNull(future.actionGet()); + } + + public void testReadBinaryUserAttribute() throws Exception { + Settings settings = Settings.builder() + .put("group_search.base_dn", "dc=oldap,dc=test,dc=elasticsearch,dc=com") + .put("group_search.user_attribute", "userPassword") + .build(); + SearchGroupsResolver resolver = new SearchGroupsResolver(settings); + + PlainActionFuture future = new PlainActionFuture<>(); + resolver.readUserAttribute(ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(5), future); + String attribute = future.actionGet(); + assertThat(attribute, is(notNullValue())); + } + + @Override + protected String ldapUrl() { + return OpenLdapTests.OPEN_LDAP_DNS_URL; + } + + @Override + protected String bindDN() { + return BRUCE_BANNER_DN; + } + + @Override + protected String bindPassword() { + return OpenLdapTests.PASSWORD; + } + + @Override + protected String trustPath() { + return "/idptrust.jks"; + } +} \ No newline at end of file diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle new file mode 100644 index 0000000000000..ddf72f7d45833 --- /dev/null +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -0,0 +1,40 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: ':modules:reindex') +} + +integTestCluster { + // Whitelist reindexing from the local node so we can test it. + setting 'reindex.remote.whitelist', '127.0.0.1:*' + setting 'xpack.security.enabled', 'true' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + extraConfigFile 'roles.yml', 'roles.yml' + [ + test_admin: 'superuser', + powerful_user: 'superuser', + minimal_user: 'minimal', + readonly_user: 'readonly', + dest_only_user: 'dest_only', + can_not_see_hidden_docs_user: 'can_not_see_hidden_docs', + can_not_see_hidden_fields_user: 'can_not_see_hidden_fields', + ].each { String user, String role -> + setupCommand 'setupUser#' + user, + 'bin/elasticsearch-users', 'useradd', user, '-p', 'x-pack-test-password', '-r', role + } + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/reindex-tests-with-security/roles.yml b/x-pack/qa/reindex-tests-with-security/roles.yml new file mode 100644 index 0000000000000..ce45f980670fd --- /dev/null +++ b/x-pack/qa/reindex-tests-with-security/roles.yml @@ -0,0 +1,91 @@ +# All cluster rights +# All operations on all indices +# Run as all users +admin: + cluster: + - all + indices: + - names: '*' + privileges: [ all ] + run_as: + - '*' + +# Search and write on both source and destination indices. It should work if you could just search on the source and +# write to the destination but that isn't how security works. +minimal: + cluster: + - cluster:monitor/main + indices: + - names: source + privileges: + - read + - write + - create_index + - indices:admin/refresh + - names: dest + privileges: + - read + - write + - create_index + - indices:admin/refresh + +# Read only operations on indices +readonly: + cluster: + - cluster:monitor/main + indices: + - names: '*' + privileges: [ read ] + +# Write operations on destination index, none on source index +dest_only: + cluster: + - cluster:monitor/main + indices: + - names: dest + privileges: [ write ] + +# Search and write on both source and destination indices with document level security filtering out some docs. +can_not_see_hidden_docs: + cluster: + - cluster:monitor/main + indices: + - names: source + privileges: + - read + - write + - create_index + - indices:admin/refresh + query: + bool: + must_not: + match: + hidden: true + - names: dest + privileges: + - read + - write + - create_index + - indices:admin/refresh + +# Search and write on both source and destination indices with field level security. +can_not_see_hidden_fields: + cluster: + - cluster:monitor/main + indices: + - names: source + privileges: + - read + - write + - create_index + - indices:admin/refresh + field_security: + grant: + - foo + - bar + - names: dest + privileges: + - read + - write + - create_index + - indices:admin/refresh diff --git a/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..67ebf16f426ed --- /dev/null +++ b/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityClientYamlTestSuiteIT.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +public class ReindexWithSecurityClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + private static final String USER = "test_admin"; + private static final String PASS = "x-pack-test-password"; + + public ReindexWithSecurityClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + /** + * All tests run as a an administrative user but use es-security-runas-user to become a less privileged user. + */ + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } +} + diff --git a/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java b/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java new file mode 100644 index 0000000000000..d21900ba47a5b --- /dev/null +++ b/x-pack/qa/reindex-tests-with-security/src/test/java/org/elasticsearch/xpack/security/ReindexWithSecurityIT.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.reindex.DeleteByQueryAction; +import org.elasticsearch.index.reindex.ReindexAction; +import org.elasticsearch.index.reindex.UpdateByQueryAction; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.security.SecurityField; + +import java.util.Collection; +import java.util.stream.Collectors; + +import static org.hamcrest.core.IsCollectionContaining.hasItem; + +public class ReindexWithSecurityIT extends SecurityIntegTestCase { + + @Override + protected Settings externalClusterClientSettings() { + Settings.Builder builder = Settings.builder().put(super.externalClusterClientSettings()); + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4); + builder.put(SecurityField.USER_SETTING.getKey(), "test_admin:x-pack-test-password"); + return builder.build(); + } + + /** + * TODO: this entire class should be removed. SecurityIntegTestCase is meant for tests, but we run against real xpack + */ + @Override + public void doAssertXPackIsInstalled() { + // this assertion doesn't make sense with a real distribution, since there is not currently a way + // from nodes info to see which modules are loaded + } + + public void testDeleteByQuery() { + createIndicesWithRandomAliases("test1", "test2", "test3"); + + BulkByScrollResponse response = DeleteByQueryAction.INSTANCE.newRequestBuilder(client()) + .source("test1", "test2") + .filter(QueryBuilders.matchAllQuery()) + .get(); + assertNotNull(response); + + response = DeleteByQueryAction.INSTANCE.newRequestBuilder(client()) + .source("test*") + .filter(QueryBuilders.matchAllQuery()) + .get(); + assertNotNull(response); + + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> DeleteByQueryAction.INSTANCE.newRequestBuilder(client()) + .source("test1", "index1") + .filter(QueryBuilders.matchAllQuery()) + .get()); + assertEquals("no such index", e.getMessage()); + } + + public void testUpdateByQuery() { + createIndicesWithRandomAliases("test1", "test2", "test3"); + + BulkByScrollResponse response = UpdateByQueryAction.INSTANCE.newRequestBuilder(client()).source("test1", "test2").get(); + assertNotNull(response); + + response = UpdateByQueryAction.INSTANCE.newRequestBuilder(client()).source("test*").get(); + assertNotNull(response); + + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> UpdateByQueryAction.INSTANCE.newRequestBuilder(client()).source("test1", "index1").get()); + assertEquals("no such index", e.getMessage()); + } + + public void testReindex() { + createIndicesWithRandomAliases("test1", "test2", "test3", "dest"); + + BulkByScrollResponse response = ReindexAction.INSTANCE.newRequestBuilder(client()).source("test1", "test2") + .destination("dest").get(); + assertNotNull(response); + + response = ReindexAction.INSTANCE.newRequestBuilder(client()).source("test*").destination("dest").get(); + assertNotNull(response); + + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, + () -> ReindexAction.INSTANCE.newRequestBuilder(client()).source("test1", "index1").destination("dest").get()); + assertEquals("no such index", e.getMessage()); + } +} diff --git a/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/10_reindex.yml b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/10_reindex.yml new file mode 100644 index 0000000000000..407a1c1849526 --- /dev/null +++ b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/10_reindex.yml @@ -0,0 +1,315 @@ +--- +"Reindex as same user works": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + reindex: + body: + source: + index: source + dest: + index: dest + - match: {created: 1} + +--- +"Reindex with runas user works": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: powerful_user} + reindex: + refresh: true + body: + source: + index: source + dest: + index: dest + - match: {created: 1} + + - do: + search: + index: dest + body: + query: + match: + text: test + - match: { hits.total: 1 } + + +--- +"Reindex with runas user with minimal privileges works": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: minimal_user} + reindex: + refresh: true + body: + source: + index: source + dest: + index: dest + - match: {created: 1} + + - do: + search: + index: dest + body: + query: + match: + text: test + - match: { hits.total: 1 } + + +--- +"Reindex as readonly user is forbidden": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: readonly_user} + catch: forbidden + reindex: + body: + source: + index: source + dest: + index: dest + +--- +"Reindex as user that can't read from the source is forbidden": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: dest_only_user} + catch: forbidden + reindex: + body: + source: + index: source + dest: + index: dest + +--- +"Using a script to write to an index to which you don't have access is forbidden": + - do: + index: + index: source + type: tweet + id: 1 + body: { "user": "kimchy" } + - do: + index: + index: source + type: tweet + id: 2 + body: { "user": "another" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: minimal_user} + catch: forbidden + reindex: + body: + source: + index: source + dest: + index: dest + script: + source: if (ctx._source.user == "kimchy") {ctx._index = 'other_dest'} + + - do: + indices.refresh: {} + + # The index to which the user tried the unauthorized write didn't even get created + - do: + catch: missing + search: + index: other_dest + + - do: + search: + index: dest + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: dest } + +--- +"Reindex misses hidden docs": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 2 + body: { "text": "test", "hidden": true } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: can_not_see_hidden_docs_user} + reindex: + refresh: true + body: + source: + index: source + dest: + index: dest + - match: {created: 1} + + # We copied just one doc, presumably the one without the hidden field + - do: + search: + index: dest + body: + query: + match: + text: test + - match: { hits.total: 1 } + + # We didn't copy the doc with the hidden field + - do: + search: + index: dest + body: + query: + match: + hidden: true + - match: { hits.total: 0 } + +--- +"Reindex misses hidden fields": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test", "foo": "z", "bar": "z" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: can_not_see_hidden_fields_user} + reindex: + refresh: true + body: + source: + index: source + dest: + index: dest + - match: {created: 1} + + - do: + search: + index: dest + body: + query: + match: + foo: z + - match: { hits.total: 1 } + + - do: + search: + index: dest + body: + query: + match: + bar: z + - match: { hits.total: 1 } + + - do: + search: + index: dest + body: + query: + match: + text: test + - match: { hits.total: 0 } + +--- +"Reindex to index with document level security is forbidden": + + - do: + index: + index: dest + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: can_not_see_hidden_docs_user} + reindex: + body: + source: + index: dest + dest: + index: source + +--- +"Reindex to index with field level security is forbidden": + + - do: + index: + index: dest + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: can_not_see_hidden_fields_user} + reindex: + body: + source: + index: dest + dest: + index: source diff --git a/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/15_reindex_from_remote.yml b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/15_reindex_from_remote.yml new file mode 100644 index 0000000000000..3c31b8cc5b039 --- /dev/null +++ b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/15_reindex_from_remote.yml @@ -0,0 +1,417 @@ +--- +"Reindex from remote as superuser works": + - skip: + features: catch_unauthorized + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + reindex: + body: + source: + remote: + host: http://${host} + username: test_admin + password: x-pack-test-password + index: source + dest: + index: dest + - match: {created: 1} + +--- +"Reindex from remote searching as user with minimal privileges works": + - skip: + features: catch_unauthorized + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + reindex: + refresh: true + body: + source: + remote: + host: http://${host} + username: minimal_user + password: x-pack-test-password + index: source + dest: + index: dest + - match: {created: 1} + + - do: + search: + index: dest + body: + query: + match: + text: test + - match: { hits.total: 1 } + +--- +"Reindex from remote reading as readonly user works when the indexing user is allowed to index": + - skip: + features: catch_unauthorized + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + reindex: + refresh: true + body: + source: + remote: + host: http://${host} + username: readonly_user + password: x-pack-test-password + index: source + dest: + index: dest + + - do: + search: + index: dest + body: + query: + match: + text: test + - match: { hits.total: 1 } + +--- +"Reindex from remote as user that can't read from the source is forbidden": + - skip: + features: catch_unauthorized + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + catch: forbidden + reindex: + body: + source: + remote: + host: http://${host} + username: dest_only_user + password: x-pack-test-password + index: source + dest: + index: dest + +--- +"Using a script to write to an index to which you don't have access is forbidden even if you read as a superuser": + - do: + index: + index: source + type: tweet + id: 1 + body: { "user": "kimchy" } + - do: + index: + index: source + type: tweet + id: 2 + body: { "user": "another" } + - do: + indices.refresh: {} + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + headers: {es-security-runas-user: minimal_user} + catch: forbidden + reindex: + body: + source: + remote: + host: http://${host} + username: test_admin + password: x-pack-test-password + index: source + dest: + index: dest + script: + source: if (ctx._source.user == "kimchy") {ctx._index = 'other_dest'} + + - do: + indices.refresh: {} + + # The index to which the user tried the unauthorized write didn't even get created + - do: + catch: missing + search: + index: other_dest + + - do: + search: + index: dest + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: dest } + +--- +"Reindex from remote misses hidden docs": + - skip: + features: catch_unauthorized + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 2 + body: { "text": "test", "hidden": true } + - do: + indices.refresh: {} + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + reindex: + refresh: true + body: + source: + remote: + host: http://${host} + username: can_not_see_hidden_docs_user + password: x-pack-test-password + index: source + dest: + index: dest + - match: {created: 1} + + # We copied just one doc, presumably the one without the hidden field + - do: + search: + index: dest + body: + query: + match: + text: test + - match: { hits.total: 1 } + + # We didn't copy the doc with the hidden field + - do: + search: + index: dest + body: + query: + match: + hidden: true + - match: { hits.total: 0 } + +--- +"Reindex misses hidden fields": + - skip: + features: catch_unauthorized + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test", "foo": "z", "bar": "z" } + - do: + indices.refresh: {} + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + reindex: + refresh: true + body: + source: + remote: + host: http://${host} + username: can_not_see_hidden_fields_user + password: x-pack-test-password + index: source + dest: + index: dest + - match: {created: 1} + + - do: + search: + index: dest + body: + query: + match: + foo: z + - match: { hits.total: 1 } + + - do: + search: + index: dest + body: + query: + match: + bar: z + - match: { hits.total: 1 } + + - do: + search: + index: dest + body: + query: + match: + text: test + - match: { hits.total: 0 } + + +--- +"Reindex from remote with bad password is unauthorized": + - skip: + features: catch_unauthorized + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + catch: unauthorized + reindex: + body: + source: + remote: + host: http://${host} + username: test_admin + password: badpass + index: source + dest: + index: dest + +--- +"Reindex from remote with no username or password is unauthorized": + - skip: + features: catch_unauthorized + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + catch: unauthorized + reindex: + body: + source: + remote: + host: http://${host} + index: source + dest: + index: dest diff --git a/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/20_update_by_query.yml b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/20_update_by_query.yml new file mode 100644 index 0000000000000..627b29ea8b529 --- /dev/null +++ b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/20_update_by_query.yml @@ -0,0 +1,213 @@ +--- +"Update_by_query as same user works": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + update_by_query: + refresh: true + index: source + body: + script: + source: ctx._source['hi'] = 'there' + - match: {updated: 1} + + - do: + search: + index: source + body: + query: + match: + hi: there + - match: { hits.total: 1 } + +--- +"Update_by_query with runas user works": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: powerful_user} + update_by_query: + refresh: true + index: source + body: + script: + source: ctx._source['hi'] = 'there' + - match: {updated: 1} + + - do: + search: + index: source + body: + query: + match: + hi: there + - match: { hits.total: 1 } + +--- +"Update_by_query with runas user with minimal privileges works": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: minimal_user} + update_by_query: + refresh: true + index: source + body: + script: + source: ctx._source['hi'] = 'there' + - match: {updated: 1} + + - do: + search: + index: source + body: + query: + match: + hi: there + - match: { hits.total: 1 } + +--- +"Update_by_query as readonly user is forbidden": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: readonly_user} + catch: forbidden + update_by_query: + index: source + +--- +"Update_by_query as user that can't read from the source is forbidden": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: dest_only_user} + catch: forbidden + update_by_query: + index: source + +--- +"Update_by_query misses hidden docs": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 2 + body: { "text": "test", "hidden": true } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: can_not_see_hidden_docs_user} + update_by_query: + refresh: true + index: source + body: + script: + source: ctx._source['hi'] = 'there' + - match: {updated: 1} + + # We only updated one doc, presumably the one without the hidden field + - do: + search: + index: source + body: + query: + match: + hi: there + - match: { hits.total: 1 } + + # We didn't update the doc with the hidden field + - do: + search: + index: source + body: + query: + bool: + must: + - match: + hi: there + - match: + hidden: true + - match: { hits.total: 0 } + +--- +"Reindex misses hidden fields": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test", "foo": "z", "bar": "z" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: can_not_see_hidden_fields_user} + update_by_query: + index: source + body: + script: + source: ctx._source['hi'] = ctx._source['text'] + ';' + ctx._source['foo'] + - match: {updated: 1} + + - do: + get: + index: source + type: foo + id: 1 + # These were visible to the user running the update_by_query so they stayed. + - match: { _source.foo: z } + - match: { _source.bar: z } + # This wasn't visible to the update_by_query-ing user so it is gone. + - is_false: _source.text + # The reindexing user tried to sneak an invisible field using a script and got a null for their trouble. + - match: { _source.hi: null;z } diff --git a/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/30_delete_by_query.yml b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/30_delete_by_query.yml new file mode 100644 index 0000000000000..59b6f2b7792a6 --- /dev/null +++ b/x-pack/qa/reindex-tests-with-security/src/test/resources/rest-api-spec/test/30_delete_by_query.yml @@ -0,0 +1,230 @@ +--- +"Delete_by_query as same user works": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + delete_by_query: + refresh: true + index: source + body: + query: + match_all: {} + - match: {deleted: 1} + + - do: + count: + index: source + - match: {count: 0} + +--- +"Delete_by_query with runas user works": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: powerful_user} + delete_by_query: + refresh: true + index: source + body: + query: + match_all: {} + - match: {deleted: 1} + + - do: + count: + index: source + - match: {count: 0} + +--- +"Delete_by_query with runas user with minimal privileges works": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: minimal_user} + delete_by_query: + refresh: true + index: source + body: + query: + match_all: {} + - match: {deleted: 1} + + - do: + count: + index: source + - match: {count: 0} + +--- +"Delete_by_query as readonly user is forbidden": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: readonly_user} + catch: forbidden + delete_by_query: + refresh: true + index: source + body: + query: + match_all: {} + + - do: + count: + index: source + - match: {count: 1} + +--- +"Delete_by_query as user that can't read from the source is forbidden": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: dest_only_user} + catch: forbidden + delete_by_query: + refresh: true + index: source + body: + query: + match_all: {} + + - do: + count: + index: source + - match: {count: 1} + +--- +"Delete_by_query misses hidden docs": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test", "hidden": false } + - do: + index: + index: source + type: foo + id: 2 + body: { "text": "test", "hidden": true } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: can_not_see_hidden_docs_user} + delete_by_query: + refresh: true + index: source + body: + query: + match: + text: "test" + - match: {deleted: 1} + + # We only deleted one doc, presumably the one without the hidden field + - do: + search: + index: source + body: + query: + match: + text: "test" + - match: { hits.total: 1 } + + # We didn't delete the doc with the hidden field set to "true" + - do: + search: + index: source + body: + query: + bool: + must: + - match: + text: "test" + - match: + hidden: true + - match: { hits.total: 1 } + + # But the doc with the hidden field set to "false" must have been deleted + - do: + search: + index: source + body: + query: + bool: + must: + - match: + text: "test" + - match: + hidden: false + - match: { hits.total: 0 } + +--- +"Delete_by_query misses hidden fields": + + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test", "foo": "z", "bar": "z" } + - do: + indices.refresh: {} + + - do: + headers: {es-security-runas-user: can_not_see_hidden_fields_user} + delete_by_query: + refresh: true + index: source + body: + query: + match: + text: "test" + - match: {deleted: 0} + + # The "text" field was not visible to the user running the delete_by_query + # so the document survived. + - do: + count: + index: source + - match: {count: 1} diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle new file mode 100644 index 0000000000000..bb1b5c58c4aa5 --- /dev/null +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -0,0 +1,203 @@ +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.test.NodeInfo +import org.elasticsearch.gradle.test.RestIntegTestTask + +import java.nio.charset.StandardCharsets + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('security'), configuration: 'runtime') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit +} + +Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> + File tmpFile = new File(node.cwd, 'wait.success') + + // wait up to two minutes + final long stopTime = System.currentTimeMillis() + (2 * 60000L); + Exception lastException = null; + int lastResponseCode = 0 + + while (System.currentTimeMillis() < stopTime) { + + lastException = null; + // we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned + HttpURLConnection httpURLConnection = null; + try { + // TODO this sucks having to hardcode number of nodes, but node.config.numNodes isn't necessarily accurate for rolling + httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=2&wait_for_status=yellow").openConnection(); + httpURLConnection.setRequestProperty("Authorization", "Basic " + + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); + httpURLConnection.setRequestMethod("GET"); + httpURLConnection.setConnectTimeout(1000); + httpURLConnection.setReadTimeout(30000); // read needs to wait for nodes! + httpURLConnection.connect(); + lastResponseCode = httpURLConnection.getResponseCode() + if (lastResponseCode == 200) { + tmpFile.withWriter StandardCharsets.UTF_8.name(), { + it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) + } + break; + } + } catch (Exception e) { + logger.debug("failed to call cluster health", e) + lastException = e + } finally { + if (httpURLConnection != null) { + httpURLConnection.disconnect(); + } + } + + // did not start, so wait a bit before trying again + Thread.sleep(500L); + } + if (tmpFile.exists() == false) { + final String message = "final attempt of calling cluster health failed [lastResponseCode=${lastResponseCode}]" + if (lastException != null) { + logger.error(message, lastException) + } else { + logger.error(message + " [no exception]") + } + } + return tmpFile.exists() +} + +// This is a top level task which we will add dependencies to below. +// It is a single task that can be used to backcompat tests against all versions. +task bwcTest { + description = 'Runs backwards compatibility tests.' + group = 'verification' +} + +String outputDir = "${buildDir}/generated-resources/${project.name}" + +for (Version version : bwcVersions.wireCompatible) { + String baseName = "v${version}" + + Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { + mustRunAfter(precommit) + } + + configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { + if (version.before('6.3.0')) { + plugin xpackProject('plugin').path + } + bwcVersion = version + numBwcNodes = 2 + numNodes = 2 + minimumMasterNodes = { 2 } + clusterName = 'rolling-upgrade-basic' + waitCondition = waitWithAuth + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + } + + Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") + oldClusterTestRunner.configure { + systemProperty 'tests.rest.suite', 'old_cluster' + } + + Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask) + + configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) { + dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop" + clusterName = 'rolling-upgrade-basic' + unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } + minimumMasterNodes = { 2 } + dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir } + waitCondition = waitWithAuth + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + setting 'node.name', 'mixed-node-0' + } + + Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner") + mixedClusterTestRunner.configure { + systemProperty 'tests.rest.suite', 'mixed_cluster' + finalizedBy "${baseName}#oldClusterTestCluster#node0.stop" + } + + Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) + + configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { + dependsOn(mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop") + clusterName = 'rolling-upgrade-basic' + unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() } + minimumMasterNodes = { 2 } + dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir } + waitCondition = waitWithAuth + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + setting 'node.name', 'upgraded-node-0' + } + + Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") + upgradedClusterTestRunner.configure { + systemProperty 'tests.rest.suite', 'upgraded_cluster' + // only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion + finalizedBy "${baseName}#mixedClusterTestCluster#stop" + } + + Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { + dependsOn = [upgradedClusterTest] + } + + if (project.bwc_tests_enabled) { + bwcTest.dependsOn(versionBwcTest) + } +} + +test.enabled = false // no unit tests for rolling upgrades, only the rest integration test + +// basic integ tests includes testing bwc against the most recent version +task integTest { + if (project.bwc_tests_enabled) { + for (final def version : bwcVersions.snapshotsWireCompatible) { + dependsOn "v${version}#bwcTest" + } + } +} +check.dependsOn(integTest) + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" + +// copy x-pack plugin info so it is on the classpath and security manager has the right permissions +task copyXPackRestSpec(type: Copy) { + dependsOn(project.configurations.restSpec, 'processTestResources') + from project(xpackProject('plugin').path).sourceSets.test.resources + include 'rest-api-spec/api/**' + into project.sourceSets.test.output.resourcesDir +} + +task copyXPackPluginProps(type: Copy) { + dependsOn(copyXPackRestSpec) + from project(xpackModule('core')).file('src/main/plugin-metadata') + from project(xpackModule('core')).tasks.pluginProperties + into outputDir +} +project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) + +repositories { + maven { + url "https://artifacts.elastic.co/maven" + } + maven { + url "https://snapshots.elastic.co/maven" + } +} diff --git a/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java new file mode 100644 index 0000000000000..ebe7530e732c5 --- /dev/null +++ b/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.test.rest.ESRestTestCase; + +public abstract class AbstractUpgradeTestCase extends ESRestTestCase { + + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @Override + protected boolean preserveReposUponCompletion() { + return true; + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + enum CLUSTER_TYPE { + OLD, + MIXED, + UPGRADED; + + public static CLUSTER_TYPE parse(String value) { + switch (value) { + case "old_cluster": + return OLD; + case "mixed_cluster": + return MIXED; + case "upgraded_cluster": + return UPGRADED; + default: + throw new AssertionError("unknown cluster type: " + value); + } + } + } + + protected final CLUSTER_TYPE clusterType = CLUSTER_TYPE.parse(System.getProperty("tests.rest.suite")); +} diff --git a/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/BasicLicenseUpgradeIT.java b/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/BasicLicenseUpgradeIT.java new file mode 100644 index 0000000000000..9ed7f561ba30b --- /dev/null +++ b/x-pack/qa/rolling-upgrade-basic/src/test/java/org/elasticsearch/upgrades/BasicLicenseUpgradeIT.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.client.Response; +import org.elasticsearch.license.LicenseService; + +import java.util.Map; + +public class BasicLicenseUpgradeIT extends AbstractUpgradeTestCase { + + public void testOldAndMixedClusterHaveActiveBasic() throws Exception { + assumeTrue("only runs against old or mixed cluster", clusterType == CLUSTER_TYPE.OLD || clusterType == CLUSTER_TYPE.MIXED); + assertBusy(this::checkBasicLicense); + } + + public void testNewClusterHasActiveNonExpiringBasic() throws Exception { + assumeTrue("only runs against upgraded cluster", clusterType == CLUSTER_TYPE.UPGRADED); + assertBusy(this::checkNonExpiringBasicLicense); + } + + private void checkBasicLicense() throws Exception { + Response licenseResponse = client().performRequest("GET", "/_xpack/license"); + Map licenseResponseMap = entityAsMap(licenseResponse); + Map licenseMap = (Map) licenseResponseMap.get("license"); + assertEquals("basic", licenseMap.get("type")); + assertEquals("active", licenseMap.get("status")); + } + + private void checkNonExpiringBasicLicense() throws Exception { + Response licenseResponse = client().performRequest("GET", "/_xpack/license"); + Map licenseResponseMap = entityAsMap(licenseResponse); + Map licenseMap = (Map) licenseResponseMap.get("license"); + assertEquals("basic", licenseMap.get("type")); + assertEquals("active", licenseMap.get("status")); + assertNull(licenseMap.get("expiry_date_in_millis")); + } +} diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle new file mode 100644 index 0000000000000..433dc08e1f39f --- /dev/null +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -0,0 +1,311 @@ +import org.elasticsearch.gradle.test.NodeInfo +import org.elasticsearch.gradle.test.RestIntegTestTask +import org.elasticsearch.gradle.Version + +import java.nio.charset.StandardCharsets +import java.util.regex.Matcher + +// Apply the java plugin to this project so the sources can be edited in an IDE +apply plugin: 'elasticsearch.build' +test.enabled = false + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('security'), configuration: 'runtime') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit +} + +Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> + File tmpFile = new File(node.cwd, 'wait.success') + + // wait up to two minutes + final long stopTime = System.currentTimeMillis() + (2 * 60000L); + Exception lastException = null; + int lastResponseCode = 0 + + while (System.currentTimeMillis() < stopTime) { + + lastException = null; + // we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned + HttpURLConnection httpURLConnection = null; + try { + // TODO this sucks having to hardcode number of nodes, but node.config.numNodes isn't necessarily accurate for rolling + httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=2&wait_for_status=yellow").openConnection(); + httpURLConnection.setRequestProperty("Authorization", "Basic " + + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); + httpURLConnection.setRequestMethod("GET"); + httpURLConnection.setConnectTimeout(1000); + httpURLConnection.setReadTimeout(30000); // read needs to wait for nodes! + httpURLConnection.connect(); + lastResponseCode = httpURLConnection.getResponseCode() + if (lastResponseCode == 200) { + tmpFile.withWriter StandardCharsets.UTF_8.name(), { + it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) + } + break; + } + } catch (Exception e) { + logger.debug("failed to call cluster health", e) + lastException = e + } finally { + if (httpURLConnection != null) { + httpURLConnection.disconnect(); + } + } + + // did not start, so wait a bit before trying again + Thread.sleep(500L); + } + if (tmpFile.exists() == false) { + final String message = "final attempt of calling cluster health failed [lastResponseCode=${lastResponseCode}]" + if (lastException != null) { + logger.error(message, lastException) + } else { + logger.error(message + " [no exception]") + } + } + return tmpFile.exists() +} + +Project mainProject = project + +compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" + +/** + * Subdirectories of this project are test rolling upgrades with various + * configuration options based on their name. + */ +subprojects { + Matcher m = project.name =~ /with(out)?-system-key/ + if (false == m.matches()) { + throw new InvalidUserDataException("Invalid project name [${project.name}]") + } + boolean withSystemKey = m.group(1) == null + + apply plugin: 'elasticsearch.standalone-test' + + // Use resources from the rolling-upgrade project in subdirectories + sourceSets { + test { + java { + srcDirs = ["${mainProject.projectDir}/src/test/java"] + } + resources { + srcDirs = ["${mainProject.projectDir}/src/test/resources"] + } + } + } + + String outputDir = "${buildDir}/generated-resources/${project.name}" + + // This is a top level task which we will add dependencies to below. + // It is a single task that can be used to backcompat tests against all versions. + task bwcTest { + description = 'Runs backwards compatibility tests.' + group = 'verification' + } + + String output = "${buildDir}/generated-resources/${project.name}" + task copyTestNodeKeystore(type: Copy) { + from project(xpackModule('core')) + .file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') + into outputDir + } + + for (Version version : bwcVersions.wireCompatible) { + String baseName = "v${version}" + + Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { + mustRunAfter(precommit) + } + + Object extension = extensions.findByName("${baseName}#oldClusterTestCluster") + configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { + dependsOn copyTestNodeKeystore + if (version.before('6.3.0')) { + plugin xpackProject('plugin').path + } + String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' + setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + bwcVersion = version + numBwcNodes = 2 + numNodes = 2 + minimumMasterNodes = { 2 } + clusterName = 'rolling-upgrade' + waitCondition = waitWithAuth + setting 'xpack.monitoring.exporters._http.type', 'http' + setting 'xpack.monitoring.exporters._http.enabled', 'false' + setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' + setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.security.audit.outputs', 'index' + setting 'xpack.ssl.keystore.path', 'testnode.jks' + setting 'xpack.ssl.keystore.password', 'testnode' + dependsOn copyTestNodeKeystore + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + if (withSystemKey) { + if (version.onOrAfter('5.1.0') && version.before('6.0.0')) { + // The setting didn't exist until 5.1.0 + setting 'xpack.security.system_key.required', 'true' + } + if (version.onOrAfter('6.0.0')) { + keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + } else { + String systemKeyFile = version.before('6.3.0') ? 'x-pack/system_key' : 'system_key' + extraConfigFile systemKeyFile, "${mainProject.projectDir}/src/test/resources/system_key" + } + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + } + } + + Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") + oldClusterTestRunner.configure { + systemProperty 'tests.rest.suite', 'old_cluster' + } + + Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask) + + configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) { + dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop" + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + clusterName = 'rolling-upgrade' + unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } + minimumMasterNodes = { 2 } + dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir } + waitCondition = waitWithAuth + setting 'xpack.monitoring.exporters._http.type', 'http' + setting 'xpack.monitoring.exporters._http.enabled', 'false' + setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' + setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.ssl.keystore.path', 'testnode.jks' + keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' + setting 'node.attr.upgraded', 'first' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.security.audit.outputs', 'index' + setting 'node.name', 'mixed-node-0' + dependsOn copyTestNodeKeystore + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + if (withSystemKey) { + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + } + } + + Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner") + mixedClusterTestRunner.configure { + systemProperty 'tests.rest.suite', 'mixed_cluster' + finalizedBy "${baseName}#oldClusterTestCluster#node0.stop" + } + + Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) + + configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { + dependsOn(mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop") + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + clusterName = 'rolling-upgrade' + unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() } + minimumMasterNodes = { 2 } + dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir } + waitCondition = waitWithAuth + setting 'xpack.monitoring.exporters._http.type', 'http' + setting 'xpack.monitoring.exporters._http.enabled', 'false' + setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' + setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.ssl.keystore.path', 'testnode.jks' + keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.security.audit.outputs', 'index' + setting 'node.name', 'upgraded-node-0' + dependsOn copyTestNodeKeystore + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + if (withSystemKey) { + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + } + } + + Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") + upgradedClusterTestRunner.configure { + systemProperty 'tests.rest.suite', 'upgraded_cluster' + + // migration tests should only run when the original/old cluster nodes where versions < 5.2.0. + // this stinks but we do the check here since our rest tests do not support conditionals + // otherwise we could check the index created version + String versionStr = project.extensions.findByName("${baseName}#oldClusterTestCluster").properties.get('bwcVersion') + String[] versionParts = versionStr.split('\\.') + if (versionParts[0].equals("5")) { + Integer minor = Integer.parseInt(versionParts[1]) + if (minor >= 2) { + systemProperty 'tests.rest.blacklist', '/20_security/Verify default password migration results in upgraded cluster' + } + } + // only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion + finalizedBy "${baseName}#mixedClusterTestCluster#stop" + } + + Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { + dependsOn = [upgradedClusterTest] + } + + if (project.bwc_tests_enabled) { + bwcTest.dependsOn(versionBwcTest) + } + } + + test.enabled = false // no unit tests for rolling upgrades, only the rest integration test + + // basic integ tests includes testing bwc against the most recent version + task integTest { + if (project.bwc_tests_enabled) { + for (final def version : bwcVersions.snapshotsWireCompatible) { + dependsOn "v${version}#bwcTest" + } + } + } + check.dependsOn(integTest) + + dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('watcher')) + } + + compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" + + // copy x-pack plugin info so it is on the classpath and security manager has the right permissions + task copyXPackRestSpec(type: Copy) { + dependsOn(project.configurations.restSpec, 'processTestResources') + from project(xpackProject('plugin').path).sourceSets.test.resources + include 'rest-api-spec/api/**' + into project.sourceSets.test.output.resourcesDir + } + + task copyXPackPluginProps(type: Copy) { + dependsOn(copyXPackRestSpec) + from project(xpackModule('core')).file('src/main/plugin-metadata') + from project(xpackModule('core')).tasks.pluginProperties + into outputDir + } + project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) + + repositories { + maven { + url "https://artifacts.elastic.co/maven" + } + maven { + url "https://snapshots.elastic.co/maven" + } + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java new file mode 100644 index 0000000000000..a9e1ccba614ef --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.security.SecurityLifecycleServiceField; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +public abstract class AbstractUpgradeTestCase extends ESRestTestCase { + + private static final String BASIC_AUTH_VALUE = + basicAuthHeaderValue("test_user", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @Override + protected boolean preserveReposUponCompletion() { + return true; + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + enum CLUSTER_TYPE { + OLD, + MIXED, + UPGRADED; + + public static CLUSTER_TYPE parse(String value) { + switch (value) { + case "old_cluster": + return OLD; + case "mixed_cluster": + return MIXED; + case "upgraded_cluster": + return UPGRADED; + default: + throw new AssertionError("unknown cluster type: " + value); + } + } + } + + protected final CLUSTER_TYPE clusterType = CLUSTER_TYPE.parse(System.getProperty("tests.rest.suite")); + + @Override + protected Settings restClientSettings() { + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE) + .build(); + } + + protected Collection templatesToWaitFor() { + return Collections.singletonList(SecurityLifecycleServiceField.SECURITY_TEMPLATE_NAME); + } + + @Before + public void setupForTests() throws Exception { + awaitBusy(() -> { + boolean success = true; + for (String template : templatesToWaitFor()) { + try { + final boolean exists = + adminClient().performRequest("HEAD", "_template/" + template).getStatusLine().getStatusCode() == 200; + success &= exists; + logger.debug("template [{}] exists [{}]", template, exists); + } catch (IOException e) { + logger.warn("error calling template api", e); + } + } + return success; + }); + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java new file mode 100644 index 0000000000000..ffdf0a39a909a --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Response; +import org.hamcrest.Matchers; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class IndexAuditUpgradeIT extends AbstractUpgradeTestCase { + + public void testDocsAuditedInOldCluster() throws Exception { + assumeTrue("only runs against old cluster", clusterType == CLUSTER_TYPE.OLD); + assertBusy(() -> { + assertAuditDocsExist(); + assertNumUniqueNodeNameBuckets(2); + }); + } + + public void testDocsAuditedInMixedCluster() throws Exception { + assumeTrue("only runs against mixed cluster", clusterType == CLUSTER_TYPE.MIXED); + assertBusy(() -> { + assertAuditDocsExist(); + assertNumUniqueNodeNameBuckets(2); + }); + } + + public void testDocsAuditedInUpgradedCluster() throws Exception { + assumeTrue("only runs against upgraded cluster", clusterType == CLUSTER_TYPE.UPGRADED); + assertBusy(() -> { + assertAuditDocsExist(); + assertNumUniqueNodeNameBuckets(4); + }); + } + + private void assertAuditDocsExist() throws Exception { + Response response = client().performRequest("GET", "/.security_audit_log*/doc/_count"); + assertEquals(200, response.getStatusLine().getStatusCode()); + Map responseMap = entityAsMap(response); + assertNotNull(responseMap.get("count")); + assertThat((Integer) responseMap.get("count"), Matchers.greaterThanOrEqualTo(1)); + } + + private void assertNumUniqueNodeNameBuckets(int numBuckets) throws Exception { + // call API that will hit all nodes + assertEquals(200, client().performRequest("GET", "/_nodes").getStatusLine().getStatusCode()); + + HttpEntity httpEntity = new StringEntity( + "{\n" + + " \"aggs\" : {\n" + + " \"nodes\" : {\n" + + " \"terms\" : { \"field\" : \"node_name\" }\n" + + " }\n" + + " }\n" + + "}", ContentType.APPLICATION_JSON); + Response aggResponse = client().performRequest("GET", "/.security_audit_log*/_search", + Collections.singletonMap("pretty", "true"), httpEntity); + Map aggResponseMap = entityAsMap(aggResponse); + logger.debug("aggResponse {}", aggResponseMap); + Map aggregations = (Map) aggResponseMap.get("aggregations"); + assertNotNull(aggregations); + Map nodesAgg = (Map) aggregations.get("nodes"); + assertNotNull(nodesAgg); + List> buckets = (List>) nodesAgg.get("buckets"); + assertNotNull(buckets); + assertEquals(numBuckets, buckets.size()); + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java new file mode 100644 index 0000000000000..4fa0c9a535f6c --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -0,0 +1,214 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.HttpHeaders; +import org.apache.http.HttpHost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.Version; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.test.rest.yaml.ObjectPath; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { + + public void testGeneratingTokenInOldCluster() throws Exception { + assumeTrue("this test should only run against the old cluster", clusterType == CLUSTER_TYPE.OLD); + final StringEntity tokenPostBody = new StringEntity("{\n" + + " \"username\": \"test_user\",\n" + + " \"password\": \"x-pack-test-password\",\n" + + " \"grant_type\": \"password\"\n" + + "}", ContentType.APPLICATION_JSON); + Response response = client().performRequest("POST", "_xpack/security/oauth2/token", Collections.emptyMap(), tokenPostBody); + assertOK(response); + Map responseMap = entityAsMap(response); + String token = (String) responseMap.get("access_token"); + assertNotNull(token); + assertTokenWorks(token); + + StringEntity oldClusterToken = new StringEntity("{\n" + + " \"token\": \"" + token + "\"\n" + + "}", ContentType.APPLICATION_JSON); + Response indexResponse = client().performRequest("PUT", "token_backwards_compatibility_it/doc/old_cluster_token1", + Collections.emptyMap(), oldClusterToken); + assertOK(indexResponse); + + response = client().performRequest("POST", "_xpack/security/oauth2/token", Collections.emptyMap(), tokenPostBody); + assertOK(response); + responseMap = entityAsMap(response); + token = (String) responseMap.get("access_token"); + assertNotNull(token); + assertTokenWorks(token); + oldClusterToken = new StringEntity("{\n" + + " \"token\": \"" + token + "\"\n" + + "}", ContentType.APPLICATION_JSON); + indexResponse = client().performRequest("PUT", "token_backwards_compatibility_it/doc/old_cluster_token2", + Collections.emptyMap(), oldClusterToken); + assertOK(indexResponse); + } + + public void testTokenWorksInMixedOrUpgradedCluster() throws Exception { + assumeTrue("this test should only run against the mixed or upgraded cluster", + clusterType == CLUSTER_TYPE.MIXED || clusterType == CLUSTER_TYPE.UPGRADED); + Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token1"); + assertOK(getResponse); + Map source = (Map) entityAsMap(getResponse).get("_source"); + assertTokenWorks((String) source.get("token")); + } + + public void testMixedCluster() throws Exception { + assumeTrue("this test should only run against the mixed cluster", clusterType == CLUSTER_TYPE.MIXED); + assumeTrue("the master must be on the latest version before we can write", isMasterOnLatestVersion()); + Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); + assertOK(getResponse); + Map source = (Map) entityAsMap(getResponse).get("_source"); + final String token = (String) source.get("token"); + assertTokenWorks(token); + + final StringEntity body = new StringEntity("{\"token\": \"" + token + "\"}", ContentType.APPLICATION_JSON); + Response invalidationResponse = client().performRequest("DELETE", "_xpack/security/oauth2/token", Collections.emptyMap(), body); + assertOK(invalidationResponse); + assertTokenDoesNotWork(token); + + // create token and refresh on version that supports it + final StringEntity tokenPostBody = new StringEntity("{\n" + + " \"username\": \"test_user\",\n" + + " \"password\": \"x-pack-test-password\",\n" + + " \"grant_type\": \"password\"\n" + + "}", ContentType.APPLICATION_JSON); + try (RestClient client = getRestClientForCurrentVersionNodesOnly()) { + Response response = client.performRequest("POST", "_xpack/security/oauth2/token", Collections.emptyMap(), tokenPostBody); + assertOK(response); + Map responseMap = entityAsMap(response); + String accessToken = (String) responseMap.get("access_token"); + String refreshToken = (String) responseMap.get("refresh_token"); + assertNotNull(accessToken); + assertNotNull(refreshToken); + assertTokenWorks(accessToken); + + final StringEntity tokenRefresh = new StringEntity("{\n" + + " \"refresh_token\": \"" + refreshToken + "\",\n" + + " \"grant_type\": \"refresh_token\"\n" + + "}", ContentType.APPLICATION_JSON); + response = client.performRequest("POST", "_xpack/security/oauth2/token", Collections.emptyMap(), tokenRefresh); + assertOK(response); + responseMap = entityAsMap(response); + String updatedAccessToken = (String) responseMap.get("access_token"); + String updatedRefreshToken = (String) responseMap.get("refresh_token"); + assertNotNull(updatedAccessToken); + assertNotNull(updatedRefreshToken); + assertTokenWorks(updatedAccessToken); + assertTokenWorks(accessToken); + assertNotEquals(accessToken, updatedAccessToken); + assertNotEquals(refreshToken, updatedRefreshToken); + } + } + + public void testUpgradedCluster() throws Exception { + assumeTrue("this test should only run against the mixed cluster", clusterType == CLUSTER_TYPE.UPGRADED); + Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); + assertOK(getResponse); + Map source = (Map) entityAsMap(getResponse).get("_source"); + final String token = (String) source.get("token"); + + // invalidate again since this may not have been invalidated in the mixed cluster + final StringEntity body = new StringEntity("{\"token\": \"" + token + "\"}", ContentType.APPLICATION_JSON); + Response invalidationResponse = client().performRequest("DELETE", "_xpack/security/oauth2/token", + Collections.singletonMap("error_trace", "true"), body); + assertOK(invalidationResponse); + assertTokenDoesNotWork(token); + + getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token1"); + assertOK(getResponse); + source = (Map) entityAsMap(getResponse).get("_source"); + final String workingToken = (String) source.get("token"); + assertTokenWorks(workingToken); + + final StringEntity tokenPostBody = new StringEntity("{\n" + + " \"username\": \"test_user\",\n" + + " \"password\": \"x-pack-test-password\",\n" + + " \"grant_type\": \"password\"\n" + + "}", ContentType.APPLICATION_JSON); + Response response = client().performRequest("POST", "_xpack/security/oauth2/token", Collections.emptyMap(), tokenPostBody); + assertOK(response); + Map responseMap = entityAsMap(response); + String accessToken = (String) responseMap.get("access_token"); + String refreshToken = (String) responseMap.get("refresh_token"); + assertNotNull(accessToken); + assertNotNull(refreshToken); + assertTokenWorks(accessToken); + + final StringEntity tokenRefresh = new StringEntity("{\n" + + " \"refresh_token\": \"" + refreshToken + "\",\n" + + " \"grant_type\": \"refresh_token\"\n" + + "}", ContentType.APPLICATION_JSON); + response = client().performRequest("POST", "_xpack/security/oauth2/token", Collections.emptyMap(), tokenRefresh); + assertOK(response); + responseMap = entityAsMap(response); + String updatedAccessToken = (String) responseMap.get("access_token"); + String updatedRefreshToken = (String) responseMap.get("refresh_token"); + assertNotNull(updatedAccessToken); + assertNotNull(updatedRefreshToken); + assertTokenWorks(updatedAccessToken); + assertTokenWorks(accessToken); + assertNotEquals(accessToken, updatedAccessToken); + assertNotEquals(refreshToken, updatedRefreshToken); + } + + private void assertTokenWorks(String token) throws IOException { + Response authenticateResponse = client().performRequest("GET", "_xpack/security/_authenticate", Collections.emptyMap(), + new BasicHeader(HttpHeaders.AUTHORIZATION, "Bearer " + token)); + assertOK(authenticateResponse); + assertEquals("test_user", entityAsMap(authenticateResponse).get("username")); + } + + private void assertTokenDoesNotWork(String token) { + ResponseException e = expectThrows(ResponseException.class, + () -> client().performRequest("GET", "_xpack/security/_authenticate", Collections.emptyMap(), + new BasicHeader(HttpHeaders.AUTHORIZATION, "Bearer " + token))); + assertEquals(401, e.getResponse().getStatusLine().getStatusCode()); + Response response = e.getResponse(); + assertEquals("Bearer realm=\"security\", error=\"invalid_token\", error_description=\"The access token expired\"", + response.getHeader("WWW-Authenticate")); + } + + private boolean isMasterOnLatestVersion() throws Exception { + Response response = client().performRequest("GET", "_cluster/state"); + assertOK(response); + final String masterNodeId = ObjectPath.createFromResponse(response).evaluate("master_node"); + response = client().performRequest("GET", "_nodes"); + assertOK(response); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + return Version.CURRENT.equals(Version.fromString(objectPath.evaluate("nodes." + masterNodeId + ".version"))); + } + + private RestClient getRestClientForCurrentVersionNodesOnly() throws IOException { + Response response = client().performRequest("GET", "_nodes"); + assertOK(response); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + Map nodesAsMap = objectPath.evaluate("nodes"); + List hosts = new ArrayList<>(); + for (Map.Entry entry : nodesAsMap.entrySet()) { + Map nodeDetails = (Map) entry.getValue(); + Version version = Version.fromString((String) nodeDetails.get("version")); + if (Version.CURRENT.equals(version)) { + Map httpInfo = (Map) nodeDetails.get("http"); + hosts.add(HttpHost.create((String) httpInfo.get("publish_address"))); + } + } + + return buildClient(restClientSettings(), hosts.toArray(new HttpHost[0])); + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..c9ad4b3053cbe --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; + +import org.apache.lucene.util.TimeUnits; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; +import org.junit.Before; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.is; + +@TimeoutSuite(millis = 5 * TimeUnits.MINUTE) // to account for slow as hell VMs +public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + /** + * Waits for the Machine Learning templates to be created by {@link org.elasticsearch.plugins.MetaDataUpgrader} + */ + @Before + public void waitForTemplates() throws Exception { + XPackRestTestHelper.waitForMlTemplates(client()); + } + + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + public UpgradeClusterClientYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(); + } + + @Override + protected Settings restClientSettings() { + String token = "Basic " + Base64.getEncoder().encodeToString(("test_user:x-pack-test-password").getBytes(StandardCharsets.UTF_8)); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + // we increase the timeout here to 90 seconds to handle long waits for a green + // cluster health. the waits for green need to be longer than a minute to + // account for delayed shards + .put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s") + .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s") + .build(); + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml new file mode 100644 index 0000000000000..93db3996a6ba9 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml @@ -0,0 +1,201 @@ +--- +setup: + - do: + cluster.health: + # if the primary shard of an index with (number_of_replicas > 0) ends up on the new node, the replica cannot be + # allocated to the old node (see NodeVersionAllocationDecider). x-pack automatically creates indices with + # replicas, for example monitoring-data-*. + wait_for_status: yellow + wait_for_nodes: 2 + +--- +"Index data and search on the mixed cluster": + - do: + search: + index: test_index + + - match: { hits.total: 5 } # no new indexed data, so expect the original 5 documents from the old cluster + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v1_mixed", "f2": 5}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v2_mixed", "f2": 6}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v3_mixed", "f2": 7}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v4_mixed", "f2": 8}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v5_mixed", "f2": 9}' + + - do: + index: + index: test_index + type: test_type + id: d10 + body: {"f1": "v6_mixed", "f2": 10} + + - do: + index: + index: test_index + type: test_type + id: d11 + body: {"f1": "v7_mixed", "f2": 11} + + - do: + index: + index: test_index + type: test_type + id: d12 + body: {"f1": "v8_mixed", "f2": 12} + + - do: + indices.refresh: + index: test_index + + - do: + search: + index: test_index + + - match: { hits.total: 13 } # 5 docs from old cluster, 8 docs from mixed cluster + + - do: + delete: + index: test_index + type: test_type + id: d10 + + - do: + delete: + index: test_index + type: test_type + id: d11 + + - do: + delete: + index: test_index + type: test_type + id: d12 + + - do: + indices.refresh: + index: test_index + +--- +"Basic scroll mixed": + - do: + indices.create: + index: test_scroll + - do: + index: + index: test_scroll + type: test + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_scroll + type: test + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + search: + index: test_scroll + size: 1 + scroll: 1m + sort: foo + body: + query: + match_all: {} + + - set: {_scroll_id: scroll_id} + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + - do: + index: + index: test_scroll + type: test + id: 44 + body: { foo: 3 } + + - do: + indices.refresh: {} + + - do: + scroll: + body: { "scroll_id": "$scroll_id", "scroll": "1m"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "43" } + + - do: + scroll: + scroll_id: $scroll_id + scroll: 1m + + - match: {hits.total: 2 } + - length: {hits.hits: 0 } + + - do: + clear_scroll: + scroll_id: $scroll_id + +--- +"Start scroll in mixed cluster for upgraded": + - do: + indices.create: + index: upgraded_scroll + wait_for_active_shards: all + body: + settings: + number_of_replicas: "0" + index.routing.allocation.include.upgraded: "first" + + - do: + index: + index: upgraded_scroll + type: test + id: 42 + body: { foo: 1 } + + - do: + index: + index: upgraded_scroll + type: test + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + search: + index: upgraded_scroll + size: 1 + scroll: 5m + sort: foo + body: + query: + match_all: {} + - set: {_scroll_id: scroll_id} + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + - do: + index: + index: scroll_index + type: doc + id: 1 + body: { value: $scroll_id } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml new file mode 100644 index 0000000000000..e9189a916bba5 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml @@ -0,0 +1,44 @@ +--- +"Verify user and role in mixed cluster": + - do: + headers: + Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" + cluster.health: + wait_for_status: yellow + wait_for_nodes: 2 + - match: { timed_out: false } + + - do: + xpack.security.get_user: + username: "native_user" + - match: { native_user.username: "native_user" } + - match: { native_user.roles.0: "native_role" } + + - do: + xpack.security.get_role: + name: "native_role" + - match: { native_role.cluster.0: "all" } + - match: { native_role.indices.0.names.0: "test_index" } + - match: { native_role.indices.0.privileges.0: "all" } + + - do: + xpack.security.clear_cached_roles: + name: "native_role" + + - do: + xpack.security.clear_cached_realms: + realms: "_all" + +--- +"verify users for default password migration in mixed cluster": + - skip: + version: " - 5.1.1" + reason: "the rest enabled action used by the old cluster test trips an assertion. see https://github.com/elastic/x-pack/pull/4443" + - do: + xpack.security.get_user: + username: "kibana,logstash_system" + - match: { kibana.enabled: false } + - match: { logstash_system.enabled: true } + + + diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml new file mode 100644 index 0000000000000..7bfbb5ad8c4d6 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml @@ -0,0 +1,96 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + wait_for_nodes: 2 + +--- +"Test get old cluster job": + - do: + xpack.ml.get_jobs: + job_id: old-cluster-job + - match: { count: 1 } + + - do: + xpack.ml.get_job_stats: + job_id: old-cluster-job + - match: { jobs.0.state: "closed" } + - match: { jobs.0.data_counts.processed_record_count: 2 } + - is_true: jobs.0.model_size_stats + - is_false: node + + - do: + xpack.ml.open_job: + job_id: old-cluster-job + + - do: + xpack.ml.get_job_stats: + job_id: old-cluster-job + - match: { jobs.0.state: "opened" } + - match: { jobs.0.data_counts.processed_record_count: 2 } + - is_true: jobs.0.model_size_stats + - is_true: jobs.0.node + - is_true: jobs.0.open_time + + - do: + xpack.ml.close_job: + job_id: old-cluster-job + + - do: + xpack.ml.get_buckets: + job_id: old-cluster-job + - match: { count: 1 } + +--- +"Create a job in the mixed cluster and write some data": + - do: + xpack.ml.put_job: + job_id: mixed-cluster-job + body: > + { + "description":"Mixed Cluster", + "analysis_config" : { + "bucket_span": "60s", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "analysis_limits" : { + "model_memory_limit": "50mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + - match: { job_id: mixed-cluster-job } + + - do: + xpack.ml.open_job: + job_id: mixed-cluster-job + + - do: + xpack.ml.post_data: + job_id: mixed-cluster-job + body: + - airline: AAL + responsetime: 132.2046 + sourcetype: post-data-job + time: 1403481600 + - airline: JZA + responsetime: 990.4628 + sourcetype: post-data-job + time: 1403481700 + - match: { processed_record_count: 2 } + + - do: + xpack.ml.close_job: + job_id: mixed-cluster-job + +--- +"Test get job with rules": + + - do: + xpack.ml.get_jobs: + job_id: old-cluster-job-with-rules + - match: { count: 1 } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml new file mode 100644 index 0000000000000..8a06c91cc8a01 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -0,0 +1,60 @@ +setup: + - do: + cluster.health: + wait_for_status: yellow + wait_for_nodes: 2 + +--- +"Test old cluster datafeed": + - do: + xpack.ml.get_datafeeds: + datafeed_id: old-cluster-datafeed + - match: { datafeeds.0.datafeed_id: "old-cluster-datafeed"} + - length: { datafeeds.0.indices: 1 } + - length: { datafeeds.0.types: 1 } + - gte: { datafeeds.0.scroll_size: 2000 } + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: old-cluster-datafeed + - match: { datafeeds.0.state: "stopped"} + - is_false: datafeeds.0.node + +--- +"Put job and datafeed in mixed cluster": + + - do: + xpack.ml.put_job: + job_id: mixed-cluster-datafeed-job + body: > + { + "description":"Cluster upgrade", + "analysis_config" : { + "bucket_span": "60s", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "50mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + + - do: + xpack.ml.put_datafeed: + datafeed_id: mixed-cluster-datafeed + body: > + { + "job_id":"mixed-cluster-datafeed-job", + "indices":["airline-data"], + "types":["response"], + "scroll_size": 2000 + } + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: mixed-cluster-datafeed + - match: { datafeeds.0.state: stopped} + - is_false: datafeeds.0.node diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml new file mode 100644 index 0000000000000..093902f8d0af0 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml @@ -0,0 +1,50 @@ +--- +"Get the indexed token and use if to authenticate": + - skip: + features: headers + + - do: + get: + index: token_index + type: doc + id: "6" + + - match: { _index: token_index } + - match: { _type: doc } + - match: { _id: "6" } + - is_true: _source.token + - set: { _source.token : token } + + - do: + headers: + Authorization: Bearer ${token} + xpack.security.authenticate: {} + + - match: { username: "token_user" } + - match: { roles.0: "superuser" } + - match: { full_name: "Token User" } + + - do: + headers: + Authorization: Bearer ${token} + search: + index: token_index + + - match: { hits.total: 6 } + + - do: + headers: + Authorization: Bearer ${token} + search: + index: token_index + + - match: { hits.total: 6 } + + - do: + headers: + Authorization: Bearer ${token} + search: + index: token_index + + - match: { hits.total: 6 } + diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml new file mode 100644 index 0000000000000..a780709400a29 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml @@ -0,0 +1,31 @@ +--- +"Index data and search on the old cluster": + - do: + indices.create: + index: test_index + wait_for_active_shards : all + body: + settings: + index: + number_of_replicas: 1 + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v1_old", "f2": 0}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v2_old", "f2": 1}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v3_old", "f2": 2}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v4_old", "f2": 3}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v5_old", "f2": 4}' + + - do: + search: + index: test_index + + - match: { hits.total: 5 } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_security.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_security.yml new file mode 100644 index 0000000000000..119f6f4874960 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_security.yml @@ -0,0 +1,75 @@ +--- +"Verify native store security actions": + # create native user and role + - do: + xpack.security.put_user: + username: "native_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "native_role" ] + } + - match: { user: { created: true } } + + - do: + xpack.security.put_role: + name: "native_role" + body: > + { + "cluster": ["all"], + "indices": [ + { + "names": "test_index", + "privileges": ["all"] + } + ] + } + - match: { role: { created: true } } + + # validate that the user and role work in the cluster by executing a health request and getting a valid response back + - do: + headers: + Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" + cluster.health: {} + - match: { timed_out: false } + + - do: + xpack.security.clear_cached_roles: + name: "native_role" + + - do: + xpack.security.clear_cached_realms: + realms: "_all" + + - do: + cluster.health: + index: ".security" + wait_for_active_shards: 2 # 1 primary and 1 replica since we have two nodes + + # Check that enabling a user in old cluster will not prevent the user from having a "default password" in the new cluster. + # See: org.elasticsearch.xpack.security.authc.esnative.NativeRealmMigrator.doConvertDefaultPasswords + - do: + xpack.security.disable_user: + username: "kibana" + + - do: + xpack.security.get_user: + username: "kibana" + - match: { kibana.enabled: false } + + - do: + xpack.security.change_password: + username: "logstash_system" + body: > + { + "password" : "changed-it" + } + + - do: + xpack.security.enable_user: + username: "logstash_system" + + - do: + xpack.security.get_user: + username: "logstash_system" + - match: { logstash_system.enabled: true } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml new file mode 100644 index 0000000000000..61f39107a2d92 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml @@ -0,0 +1,209 @@ +--- +"Put job on the old cluster and post some data": + + - do: + xpack.ml.put_job: + job_id: old-cluster-job + body: > + { + "description":"Cluster upgrade", + "analysis_config" : { + "bucket_span": "60s", + "detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}] + }, + "analysis_limits" : { + "model_memory_limit": "50mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + - match: { job_id: old-cluster-job } + + - do: + xpack.ml.open_job: + job_id: old-cluster-job + + - do: + xpack.ml.post_data: + job_id: old-cluster-job + body: + - airline: AAL + responsetime: 132.2046 + sourcetype: post-data-job + time: 1403481600 + - airline: JZA + responsetime: 990.4628 + sourcetype: post-data-job + time: 1403481700 + - match: { processed_record_count: 2 } + + - do: + xpack.ml.close_job: + job_id: old-cluster-job + + - do: + xpack.ml.get_buckets: + job_id: old-cluster-job + - match: { count: 1 } + +--- +"Put job on the old cluster with the default model memory limit and post some data": + - do: + xpack.ml.put_job: + job_id: no-model-memory-limit-job + body: > + { + "analysis_config" : { + "bucket_span": "60s", + "detectors" :[{"function":"count"}] + }, + "data_description" : { + "time_field":"time", + "time_format":"epoch" + } + } + - match: { job_id: no-model-memory-limit-job } + + - do: + xpack.ml.open_job: + job_id: no-model-memory-limit-job + + - do: + xpack.ml.post_data: + job_id: no-model-memory-limit-job + body: + - sourcetype: post-data-job + time: 1403481600 + - sourcetype: post-data-job + time: 1403484700 + - sourcetype: post-data-job + time: 1403487700 + - sourcetype: post-data-job + time: 1403490700 + - sourcetype: post-data-job + time: 1403493700 + - match: { processed_record_count: 5 } + + - do: + xpack.ml.close_job: + job_id: no-model-memory-limit-job + + - do: + xpack.ml.get_buckets: + job_id: no-model-memory-limit-job + - match: { count: 201 } + +--- +"Put job with empty strings in the configuration": + - do: + xpack.ml.put_job: + job_id: old-cluster-job-empty-fields + body: > + { + "description": "Cluster upgrade bad config", + "analysis_config" : { + "influencers": "", + "bucket_span": "60s", + "detectors" :[{"function":"count","field_name":""}] + }, + "analysis_limits" : { + "model_memory_limit": "50mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + +--- +"Test job with pre 6.2 rules": + + - skip: + version: "6.2.0 - " + reason: "Rules fields were renamed on 6.2.0" + + - do: + xpack.ml.put_job: + job_id: old-cluster-job-with-rules + body: > + { + "analysis_config": { + "detectors": [ + { + "function": "count", + "by_field_name": "country", + "detector_rules": [ + { + "rule_action": "filter_results", + "rule_conditions": [ + { + "condition_type":"numerical_actual", + "field_name":"country", + "field_value": "uk", + "condition": {"operator":"lt","value":"33.3"} + }, + {"condition_type":"categorical", "field_name":"country", "value_filter": "foo"} + ] + } + ] + } + ] + }, + "data_description" : {} + } + +--- +"Test job with post 6.2 rules": + + - skip: + version: " - 6.1.99" + reason: "Rules fields were renamed on 6.2.0" + + - do: + xpack.ml.put_job: + job_id: old-cluster-job-with-rules + body: > + { + "analysis_config": { + "detectors": [ + { + "function": "count", + "by_field_name": "country", + "rules": [ + { + "actions": ["filter_results"], + "conditions": [ + { + "type":"numerical_actual", + "field_name":"country", + "field_value": "uk", + "condition": {"operator":"lt","value":"33.3"} + }, + {"type":"categorical", "field_name":"country", "filter_id": "foo"} + ] + } + ] + } + ] + }, + "data_description" : {} + } + +--- +"Test function shortcut expansion": + - do: + xpack.ml.put_job: + job_id: old-cluster-function-shortcut-expansion + body: > + { + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"nzc","by_field_name":"airline"}] + }, + "data_description" : {} + } + - match: { job_id: "old-cluster-function-shortcut-expansion" } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml new file mode 100644 index 0000000000000..c1317bdf3d660 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -0,0 +1,39 @@ +--- +"Put job and datafeed in old cluster": + + - do: + xpack.ml.put_job: + job_id: old-cluster-datafeed-job + body: > + { + "description":"Cluster upgrade", + "analysis_config" : { + "bucket_span": "60s", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "50mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + - match: { job_id: old-cluster-datafeed-job } + + - do: + xpack.ml.put_datafeed: + datafeed_id: old-cluster-datafeed + body: > + { + "job_id":"old-cluster-datafeed-job", + "indices":["airline-data"], + "types":["response"], + "scroll_size": 2000 + } + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: old-cluster-datafeed + - match: { datafeeds.0.state: stopped} + - is_false: datafeeds.0.node diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/50_token_auth.yml new file mode 100644 index 0000000000000..864332ecd336a --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/50_token_auth.yml @@ -0,0 +1,85 @@ +--- +"Create a token and reuse it across the upgrade": + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_user: + username: "token_user" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "superuser" ], + "full_name" : "Token User" + } + + - do: + xpack.security.get_token: + body: + grant_type: "password" + username: "token_user" + password: "x-pack-test-password" + + - match: { type: "Bearer" } + - is_true: access_token + - set: { access_token: token } + - match: { expires_in: 1200 } + - is_false: scope + + - do: + headers: + Authorization: Bearer ${token} + xpack.security.authenticate: {} + + - match: { username: "token_user" } + - match: { roles.0: "superuser" } + - match: { full_name: "Token User" } + + - do: + indices.create: + index: token_index + wait_for_active_shards : all + body: + settings: + index: + number_of_replicas: 1 + + - do: + headers: + Authorization: Bearer ${token} + bulk: + refresh: true + body: + - '{"index": {"_index": "token_index", "_type": "doc", "_id" : "1"}}' + - '{"f1": "v1_old", "f2": 0}' + - '{"index": {"_index": "token_index", "_type": "doc", "_id" : "2"}}' + - '{"f1": "v2_old", "f2": 1}' + - '{"index": {"_index": "token_index", "_type": "doc", "_id" : "3"}}' + - '{"f1": "v3_old", "f2": 2}' + - '{"index": {"_index": "token_index", "_type": "doc", "_id" : "4"}}' + - '{"f1": "v4_old", "f2": 3}' + - '{"index": {"_index": "token_index", "_type": "doc", "_id" : "5"}}' + - '{"f1": "v5_old", "f2": 4}' + + - do: + headers: + Authorization: Bearer ${token} + search: + index: token_index + + - match: { hits.total: 5 } + + # we do store the token in the index such that we can reuse it down the road once + # the cluster is upgraded + - do: + headers: + Authorization: Bearer ${token} + index: + index: token_index + type: doc + id: "6" + body: { "token" : "${token}"} diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml new file mode 100644 index 0000000000000..9c3443339a7cd --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -0,0 +1,67 @@ +--- +"Index data and search on the upgraded cluster": + - do: + cluster.health: + wait_for_status: green + wait_for_nodes: 2 + # wait for long enough that we give delayed unassigned shards to stop being delayed + timeout: 70s + level: shards + + - do: + search: + index: test_index + + - match: { hits.total: 10 } # no new indexed data, so expect the original 10 documents from the old and mixed clusters + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v1_upgraded", "f2": 10}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v2_upgraded", "f2": 11}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v3_upgraded", "f2": 12}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v4_upgraded", "f2": 13}' + - '{"index": {"_index": "test_index", "_type": "test_type"}}' + - '{"f1": "v5_upgraded", "f2": 14}' + + - do: + search: + index: test_index + + - match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs + +--- +"Get indexed scroll and execute scroll": + - do: + get: + index: scroll_index + type: doc + id: 1 + + - set: {_source.value: scroll_id} + + - do: + scroll: + scroll_id: $scroll_id + scroll: 1m + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "43" } + + - do: + scroll: + scroll_id: $scroll_id + scroll: 1m + + - match: {hits.total: 2 } + - length: {hits.hits: 0 } + + - do: + clear_scroll: + scroll_id: $scroll_id diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml new file mode 100644 index 0000000000000..9c7097483914b --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml @@ -0,0 +1,25 @@ +--- +"Verify user and role in upgraded cluster": + - do: + headers: + Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" + cluster.health: + wait_for_status: green + wait_for_nodes: 2 + # wait for long enough that we give delayed unassigned shards to stop being delayed + timeout: 70s + - match: { timed_out: false } + + - do: + xpack.security.get_user: + username: "native_user" + - match: { native_user.username: "native_user" } + - match: { native_user.roles.0: "native_role" } + + - do: + xpack.security.get_role: + name: "native_role" + - match: { native_role.cluster.0: "all" } + - match: { native_role.indices.0.names.0: "test_index" } + - match: { native_role.indices.0.privileges.0: "all" } + diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml new file mode 100644 index 0000000000000..69b1cdcdac00e --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml @@ -0,0 +1,129 @@ +setup: + - do: + cluster.health: + wait_for_status: green + wait_for_nodes: 2 + # wait for long enough that we give delayed unassigned shards to stop being delayed + timeout: 70s + +--- +"Test open old jobs": + + - do: + xpack.ml.open_job: + job_id: old-cluster-job + + - do: + xpack.ml.get_job_stats: + job_id: old-cluster-job + - match: { jobs.0.state: "opened" } + - match: { jobs.0.data_counts.processed_record_count: 2 } + - is_true: jobs.0.model_size_stats + - is_true: jobs.0.node + - is_true: jobs.0.open_time + + - do: + xpack.ml.open_job: + job_id: mixed-cluster-job + + - do: + xpack.ml.get_job_stats: + job_id: mixed-cluster-job + - match: { jobs.0.state: "opened" } + - match: { jobs.0.data_counts.processed_record_count: 2 } + - is_true: jobs.0.model_size_stats + - is_true: jobs.0.node + - is_true: jobs.0.open_time + + - do: + xpack.ml.close_job: + job_id: old-cluster-job + + - do: + xpack.ml.close_job: + job_id: mixed-cluster-job + + - do: + xpack.ml.get_buckets: + job_id: old-cluster-job + - match: { count: 1 } + + - do: + xpack.ml.get_buckets: + job_id: mixed-cluster-job + - match: { count: 1 } + + - do: + xpack.ml.delete_job: + job_id: old-cluster-job + - match: { acknowledged: true } + + - do: + catch: missing + xpack.ml.get_jobs: + job_id: old-cluster-job + + - do: + xpack.ml.delete_job: + job_id: mixed-cluster-job + - match: { acknowledged: true } + + - do: + catch: missing + xpack.ml.get_jobs: + job_id: mixed-cluster-job + +--- +"Test job with no model memory limit has established model memory after reopening": + - do: + xpack.ml.open_job: + job_id: no-model-memory-limit-job + + - do: + xpack.ml.get_jobs: + job_id: no-model-memory-limit-job + - is_true: jobs.0.established_model_memory + - lt: { jobs.0.established_model_memory: 100000 } + + - do: + xpack.ml.close_job: + job_id: no-model-memory-limit-job + + - do: + xpack.ml.delete_job: + job_id: no-model-memory-limit-job + - match: { acknowledged: true } + +--- +"Test get job with rules": + + - do: + xpack.ml.get_jobs: + job_id: old-cluster-job-with-rules + - match: { count: 1 } + - match: { + jobs.0.analysis_config.detectors.0.rules: [ + { + "actions": ["filter_results"], + "conditions_connective": "or", + "conditions": [ + { + "type":"numerical_actual", + "field_name":"country", + "field_value": "uk", + "condition": {"operator":"lt","value":"33.3"} + }, + {"type":"categorical", "field_name":"country", "filter_id": "foo"} + ] + } + ] + } + +--- +"Test get job with function shortcut should expand": + + - do: + xpack.ml.get_jobs: + job_id: old-cluster-function-shortcut-expansion + - match: { count: 1 } + - match: { jobs.0.analysis_config.detectors.0.function: "non_zero_count" } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml new file mode 100644 index 0000000000000..ed6a66ae1a51f --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -0,0 +1,100 @@ +setup: + - do: + cluster.health: + wait_for_status: green + wait_for_nodes: 2 + # wait for long enough that we give delayed unassigned shards to stop being delayed + timeout: 70s + + - do: + indices.create: + index: airline-data + body: + mappings: + response: + properties: + time: + type: date + +--- +"Test old and mixed cluster datafeeds": + - do: + xpack.ml.get_datafeeds: + datafeed_id: old-cluster-datafeed + - match: { datafeeds.0.datafeed_id: "old-cluster-datafeed"} + - length: { datafeeds.0.indices: 1 } + - length: { datafeeds.0.types: 1 } + - gte: { datafeeds.0.scroll_size: 2000 } + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: old-cluster-datafeed + - match: { datafeeds.0.state: "stopped"} + - is_false: datafeeds.0.node + + - do: + xpack.ml.get_datafeeds: + datafeed_id: mixed-cluster-datafeed + - match: { datafeeds.0.datafeed_id: "mixed-cluster-datafeed"} + - length: { datafeeds.0.indices: 1 } + - length: { datafeeds.0.types: 1 } + - gte: { datafeeds.0.scroll_size: 2000 } + + - do: + xpack.ml.get_datafeed_stats: + datafeed_id: mixed-cluster-datafeed + - match: { datafeeds.0.state: "stopped"} + - is_false: datafeeds.0.node + + - do: + xpack.ml.open_job: + job_id: old-cluster-datafeed-job + + - do: + xpack.ml.start_datafeed: + datafeed_id: old-cluster-datafeed + start: 0 + + - do: + xpack.ml.stop_datafeed: + datafeed_id: old-cluster-datafeed + + - do: + xpack.ml.close_job: + job_id: old-cluster-datafeed-job + + - do: + xpack.ml.delete_datafeed: + datafeed_id: old-cluster-datafeed + + - do: + xpack.ml.delete_job: + job_id: old-cluster-datafeed-job + - match: { acknowledged: true } + + - do: + xpack.ml.open_job: + job_id: mixed-cluster-datafeed-job + + - do: + xpack.ml.start_datafeed: + datafeed_id: mixed-cluster-datafeed + start: 0 + + - do: + xpack.ml.stop_datafeed: + datafeed_id: mixed-cluster-datafeed + + - do: + xpack.ml.close_job: + job_id: mixed-cluster-datafeed-job + + - do: + xpack.ml.delete_datafeed: + datafeed_id: mixed-cluster-datafeed + + - do: + xpack.ml.delete_job: + job_id: mixed-cluster-datafeed-job + - match: { acknowledged: true } + diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml new file mode 100644 index 0000000000000..9f576512fc70b --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml @@ -0,0 +1,40 @@ +--- +"Get the indexed token and use if to authenticate": + - skip: + features: headers + - do: + get: + index: token_index + type: doc + id: "6" + + - match: { _index: token_index } + - match: { _type: doc } + - match: { _id: "6" } + - is_true: _source.token + - set: { _source.token : token } + + - do: + headers: + Authorization: Bearer ${token} + xpack.security.authenticate: {} + + - match: { username: "token_user" } + - match: { roles.0: "superuser" } + - match: { full_name: "Token User" } + + - do: + headers: + Authorization: Bearer ${token} + search: + index: token_index + + - match: { hits.total: 6 } + + # counter example that we are really checking this + - do: + headers: + Authorization: Bearer boom + catch: /missing authentication token/ + search: + index: token_index diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/system_key b/x-pack/qa/rolling-upgrade/src/test/resources/system_key new file mode 100644 index 0000000000000..a72e0d6e77632 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/system_key @@ -0,0 +1 @@ +�{�����+�dTI;f����̭�l���|�}�j���D�vYW�V5��K�h�8��ΪP� z~��Ճa�),$j��.����^��w�ɴȐ38�v �}��|�^[ �F�����"ԑ�Ǘ�� \ No newline at end of file diff --git a/x-pack/qa/rolling-upgrade/with-system-key/build.gradle b/x-pack/qa/rolling-upgrade/with-system-key/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/qa/rolling-upgrade/without-system-key/build.gradle b/x-pack/qa/rolling-upgrade/without-system-key/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/qa/saml-idp-tests/build.gradle b/x-pack/qa/saml-idp-tests/build.gradle new file mode 100644 index 0000000000000..251311fe8cd0a --- /dev/null +++ b/x-pack/qa/saml-idp-tests/build.gradle @@ -0,0 +1,87 @@ +Project idpFixtureProject = xpackProject("test:idp-fixture") +evaluationDependsOn(idpFixtureProject.path) + +apply plugin: 'elasticsearch.vagrantsupport' +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') + testCompile 'com.google.jimfs:jimfs:1.1' +} + +task idpFixture { + dependsOn "vagrantCheckVersion", "virtualboxCheckVersion", idpFixtureProject.up +} + +String outputDir = "generated-resources/${project.name}" +task copyIdpCertificate(type: Copy) { + from idpFixtureProject.file('src/main/resources/certs/ca.crt'); + into outputDir +} +if (project.rootProject.vagrantSupported) { + project.sourceSets.test.output.dir(outputDir, builtBy: copyIdpCertificate) + integTestCluster.dependsOn idpFixture, copyIdpCertificate + integTest.finalizedBy idpFixtureProject.halt +} else { + integTest.enabled = false +} + +integTestCluster { + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.http.ssl.enabled', 'false' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.authc.realms.file.type', 'file' + setting 'xpack.security.authc.realms.file.order', '0' + setting 'xpack.security.authc.realms.shibboleth.type', 'saml' + setting 'xpack.security.authc.realms.shibboleth.order', '1' + setting 'xpack.security.authc.realms.shibboleth.idp.entity_id', 'https://test.shibboleth.elastic.local/' + setting 'xpack.security.authc.realms.shibboleth.idp.metadata.path', 'idp-metadata.xml' + setting 'xpack.security.authc.realms.shibboleth.sp.entity_id', 'http://mock.http.elastic.local/' + // The port in the ACS URL is fake - the test will bind the mock webserver + // to a random port and then whenever it needs to connect to a URL on the + // mock webserver it will replace 54321 with the real port + setting 'xpack.security.authc.realms.shibboleth.sp.acs', 'http://localhost:54321/saml/acs' + setting 'xpack.security.authc.realms.shibboleth.attributes.principal', 'uid' + setting 'xpack.security.authc.realms.shibboleth.attributes.name', 'urn:oid:2.5.4.3' + setting 'xpack.ml.enabled', 'false' + + extraConfigFile 'idp-metadata.xml', idpFixtureProject.file("src/main/resources/provision/generated/idp-metadata.xml") + + setupCommand 'setupTestAdmin', + 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" + + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} + +forbiddenPatterns { + exclude '**/*.der' + exclude '**/*.p12' + exclude '**/*.key' +} + +thirdPartyAudit.excludes = [ + // uses internal java api: sun.misc.Unsafe + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + // missing + 'com.ibm.icu.lang.UCharacter' +] + diff --git a/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java b/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java new file mode 100644 index 0000000000000..67d338ab2db56 --- /dev/null +++ b/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java @@ -0,0 +1,574 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.saml; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.HttpResponse; +import org.apache.http.NameValuePair; +import org.apache.http.StatusLine; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.entity.UrlEncodedFormEntity; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.client.utils.URLEncodedUtils; +import org.apache.http.cookie.Cookie; +import org.apache.http.cookie.CookieOrigin; +import org.apache.http.cookie.MalformedCookieException; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.impl.cookie.DefaultCookieSpec; +import org.apache.http.message.BasicHeader; +import org.apache.http.message.BasicNameValuePair; +import org.apache.http.protocol.BasicHttpContext; +import org.apache.http.protocol.HTTP; +import org.apache.http.protocol.HttpContext; +import org.apache.http.protocol.HttpCoreContext; +import org.apache.http.util.CharArrayBuffer; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cli.SuppressForbidden; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.ssl.CertUtils; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import javax.net.ssl.KeyManager; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.X509ExtendedTrustManager; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.file.Path; +import java.security.SecureRandom; +import java.security.cert.Certificate; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.common.xcontent.XContentHelper.convertToMap; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.iterableWithSize; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; + +/** + * An integration test for validating SAML authentication against a real Identity Provider (Shibboleth) + */ +@SuppressForbidden(reason = "uses sun http server") +public class SamlAuthenticationIT extends ESRestTestCase { + + private static final String SP_LOGIN_PATH = "/saml/login"; + private static final String SP_ACS_PATH = "/saml/acs"; + private static final String SAML_RESPONSE_FIELD = "SAMLResponse"; + private static final String REQUEST_ID_COOKIE = "saml-request-id"; + + private static final String KIBANA_PASSWORD = "K1b@na K1b@na K1b@na"; + private static HttpServer httpServer; + + @BeforeClass + public static void setupHttpServer() throws IOException { + InetSocketAddress address = new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0); + httpServer = MockHttpServer.createHttp(address, 0); + httpServer.start(); + } + + @AfterClass + public static void shutdownHttpServer() { + final Executor executor = httpServer.getExecutor(); + if (executor instanceof ExecutorService) { + try { + terminate((ExecutorService) executor); + } catch (InterruptedException e) { + // oh well + } + } + httpServer.stop(0); + httpServer = null; + } + + @Before + public void setupHttpContext() { + httpServer.createContext(SP_LOGIN_PATH, wrapFailures(this::httpLogin)); + httpServer.createContext(SP_ACS_PATH, wrapFailures(this::httpAcs)); + } + + /** + * Wraps a {@code HttpHandler} in a {@code try-catch} block that returns a + * 500 server error if an exception or an {@link AssertionError} occurs. + */ + private HttpHandler wrapFailures(HttpHandler handler) { + return http -> { + try { + handler.handle(http); + } catch (AssertionError | Exception e) { + logger.warn(new ParameterizedMessage("Failure while handling {}", http.getRequestURI()), e); + http.getResponseHeaders().add("x-test-failure", e.toString()); + http.sendResponseHeaders(500, 0); + http.close(); + throw e; + } + }; + } + + @After + public void clearHttpContext() { + httpServer.removeContext(SP_LOGIN_PATH); + httpServer.removeContext(SP_ACS_PATH); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + /** + * We perform all requests to Elasticsearch as the "kibana" user, as this is the user that will be used + * in a typical SAML deployment (where Kibana is providing the UI for the SAML Web SSO interactions). + * Before we can use the Kibana user, we need to set its password to something we know. + */ + @Before + public void setKibanaPassword() throws IOException { + final HttpEntity json = new StringEntity("{ \"password\" : \"" + KIBANA_PASSWORD + "\" }", ContentType.APPLICATION_JSON); + final Response response = adminClient().performRequest("PUT", "/_xpack/security/user/kibana/_password", emptyMap(), json); + assertOK(response); + } + + /** + * This is a simple mapping that maps the "thor" user in the "shibboleth" realm to the "kibana_users" role. + * We could do something more complex, but we have unit tests for role-mapping - this is just to verify that + * the mapping runs OK in a real environment. + */ + @Before + public void setupRoleMapping() throws IOException { + final StringEntity json = new StringEntity(Strings // top-level + .toString(XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .array("roles", new String[] { "kibana_user"} ) + .field("enabled", true) + .startObject("rules") + .startArray("all") + .startObject().startObject("field").field("username", "thor").endObject().endObject() + .startObject().startObject("field").field("realm.name", "shibboleth").endObject().endObject() + .endArray() // "all" + .endObject() // "rules" + .endObject()), ContentType.APPLICATION_JSON); + + final Response response = adminClient().performRequest("PUT", "/_xpack/security/role_mapping/thor-kibana", emptyMap(), json); + assertOK(response); + } + + /** + * Tests that a user can login via a SAML idp: + * It uses: + *
      + *
    • A real IdP (Shibboleth, running locally)
    • + *
    • A fake UI, running in this JVM, that roughly mimic Kibana (see {@link #httpLogin}, {@link #httpAcs})
    • + *
    • A fake web browser (apache http client)
    • + *
    + * It takes the following steps: + *
      + *
    1. Requests a "login" on the local UI
    2. + *
    3. Walks through the login process at the IdP
    4. + *
    5. Receives a JSON response from the local UI that has a Bearer token
    6. + *
    7. Uses that token to verify the user details
    8. + *
    + */ + public void testLoginUser() throws Exception { + final BasicHttpContext context = new BasicHttpContext(); + try (CloseableHttpClient client = getHttpClient()) { + final URI loginUri = goToLoginPage(client, context); + final URI consentUri = submitLoginForm(client, context, loginUri); + final Tuple tuple = submitConsentForm(context, client, consentUri); + final Map result = submitSamlResponse(context, client, tuple.v1(), tuple.v2()); + assertThat(result.get("username"), equalTo("thor")); + + final Object expiresIn = result.get("expires_in"); + assertThat(expiresIn, instanceOf(Number.class)); + assertThat(((Number) expiresIn).longValue(), greaterThan(TimeValue.timeValueMinutes(15).seconds())); + + final Object accessToken = result.get("access_token"); + assertThat(accessToken, notNullValue()); + assertThat(accessToken, instanceOf(String.class)); + verifyElasticsearchAccessToken((String) accessToken); + + final Object refreshToken = result.get("refresh_token"); + assertThat(refreshToken, notNullValue()); + assertThat(refreshToken, instanceOf(String.class)); + verifyElasticsearchRefreshToken((String) refreshToken); + } + } + + /** + * Verifies that the provided "Access Token" (see {@link org.elasticsearch.xpack.security.authc.TokenService}) + * is for the expected user with the expected name and roles. + */ + private void verifyElasticsearchAccessToken(String accessToken) throws IOException { + final BasicHeader authorization = new BasicHeader("Authorization", "Bearer " + accessToken); + final Response response = client().performRequest("GET", "/_xpack/security/_authenticate", authorization); + assertOK(response); + final Map map = parseResponseAsMap(response.getEntity()); + assertThat(map.get("username"), equalTo("thor")); + assertThat(map.get("full_name"), equalTo("Thor Odinson")); + assertSingletonList(map.get("roles"), "kibana_user"); + + assertThat(map.get("metadata"), instanceOf(Map.class)); + final Map metadata = (Map) map.get("metadata"); + assertSingletonList(metadata.get("saml_uid"), "thor"); + assertSingletonList(metadata.get("saml(urn:oid:0.9.2342.19200300.100.1.1)"), "thor"); + assertSingletonList(metadata.get("saml_displayName"), "Thor Odinson"); + assertSingletonList(metadata.get("saml(urn:oid:2.5.4.3)"), "Thor Odinson"); + } + + /** + * Verifies that the provided "Refresh Token" (see {@link org.elasticsearch.xpack.security.authc.TokenService}) + * can be used to get a new valid access token and refresh token. + */ + private void verifyElasticsearchRefreshToken(String refreshToken) throws IOException { + final String body = "{ \"grant_type\":\"refresh_token\", \"refresh_token\":\"" + refreshToken + "\" }"; + final Response response = client().performRequest("POST", "/_xpack/security/oauth2/token", + emptyMap(), new StringEntity(body, ContentType.APPLICATION_JSON), kibanaAuth()); + assertOK(response); + + final Map result = parseResponseAsMap(response.getEntity()); + final Object newRefreshToken = result.get("refresh_token"); + assertThat(newRefreshToken, notNullValue()); + assertThat(newRefreshToken, instanceOf(String.class)); + + final Object accessToken = result.get("access_token"); + assertThat(accessToken, notNullValue()); + assertThat(accessToken, instanceOf(String.class)); + verifyElasticsearchAccessToken((String) accessToken); + } + + /** + * Navigates to the login page on the local (in memory) HTTP UI. + * + * @return A URI to which the "login form" should be submitted. + */ + private URI goToLoginPage(CloseableHttpClient client, BasicHttpContext context) throws IOException { + HttpGet login = new HttpGet(getUrl(SP_LOGIN_PATH)); + String target = execute(client, login, context, response -> { + assertHttpOk(response.getStatusLine()); + return getFormTarget(response.getEntity().getContent()); + }); + + assertThat("Cannot find form target", target, Matchers.notNullValue()); + assertThat("Target must be an absolute path", target, startsWith("/")); + final Object host = context.getAttribute(HttpCoreContext.HTTP_TARGET_HOST); + assertThat(host, instanceOf(HttpHost.class)); + + final String uri = ((HttpHost) host).toURI() + target; + return toUri(uri); + } + + /** + * Submits a Shibboleth login form to the provided URI. + * + * @return A URI to which the "consent form" should be submitted. + */ + private URI submitLoginForm(CloseableHttpClient client, BasicHttpContext context, URI formUri) throws IOException { + final HttpPost form = new HttpPost(formUri); + List params = new ArrayList<>(); + params.add(new BasicNameValuePair("j_username", "Thor")); + params.add(new BasicNameValuePair("j_password", "NickFuryHeartsES")); + params.add(new BasicNameValuePair("_eventId_proceed", "")); + form.setEntity(new UrlEncodedFormEntity(params)); + + final String redirect = execute(client, form, context, response -> { + assertThat(response.getStatusLine().getStatusCode(), equalTo(302)); + return response.getFirstHeader("Location").getValue(); + }); + assertThat(redirect, startsWith("/")); + + String target = execute(client, new HttpGet(formUri.resolve(redirect)), context, response -> { + assertHttpOk(response.getStatusLine()); + return getFormTarget(response.getEntity().getContent()); + }); + assertThat("Cannot find form target", target, Matchers.notNullValue()); + return formUri.resolve(target); + } + + /** + * Submits a Shibboleth consent form to the provided URI. + * The consent form is a step that Shibboleth inserts into the login flow to confirm that the user is willing to send their + * personal details to the application (SP) that they are logging in to. + * + * @return A tuple of ( URI to SP's Assertion-Consumer-Service, SAMLResponse to post to the service ) + */ + private Tuple submitConsentForm(BasicHttpContext context, CloseableHttpClient client, URI consentUri) throws IOException { + final HttpPost form = new HttpPost(consentUri); + List params = new ArrayList<>(); + params.add(new BasicNameValuePair("_shib_idp_consentOptions", "_shib_idp_globalConsent")); + params.add(new BasicNameValuePair("_eventId_proceed", "Accept")); + form.setEntity(new UrlEncodedFormEntity(params)); + + return execute(client, form, context, + response -> parseSamlSubmissionForm(response.getEntity().getContent())); + } + + /** + * Submits a SAML assertion to the ACS URI. + * + * @param acs The URI to the Service Provider's Assertion-Consumer-Service. + * @param saml The (deflated + base64 encoded) {@code SAMLResponse} parameter to post the ACS + */ + private Map submitSamlResponse(BasicHttpContext context, CloseableHttpClient client, URI acs, String saml) + throws IOException { + assertThat("SAML submission target", acs, notNullValue()); + assertThat(acs.getPath(), equalTo(SP_ACS_PATH)); + assertThat("SAML submission content", saml, notNullValue()); + + // The ACS url provided from the SP is going to be wrong because the gradle + // build doesn't know what the web server's port is, so it uses a fake one. + final HttpPost form = new HttpPost(getUrl(SP_ACS_PATH)); + List params = new ArrayList<>(); + params.add(new BasicNameValuePair(SAML_RESPONSE_FIELD, saml)); + form.setEntity(new UrlEncodedFormEntity(params)); + + return execute(client, form, context, response -> { + assertHttpOk(response.getStatusLine()); + return parseResponseAsMap(response.getEntity()); + }); + } + + /** + * Finds the target URL for the HTML form within the provided content. + */ + private String getFormTarget(InputStream content) throws IOException { + // Yes this is seriously bad - but would you prefer I run a headless browser for this? + return findLine(Streams.readAllLines(content), "
    parseSamlSubmissionForm(InputStream content) throws IOException { + final List lines = Streams.readAllLines(content); + return new Tuple<>( + toUri(htmlDecode(findLine(lines, " lines, String regex) { + Pattern pattern = Pattern.compile(regex); + for (String line : lines) { + final Matcher matcher = pattern.matcher(line); + if (matcher.find()) { + return matcher.group(1); + } + } + return null; + } + + private String htmlDecode(String text) { + final Pattern hexEntity = Pattern.compile("&#x([0-9a-f]{2});"); + while (true) { + final Matcher matcher = hexEntity.matcher(text); + if (matcher.find() == false) { + return text; + } + char ch = (char) Integer.parseInt(matcher.group(1), 16); + text = matcher.replaceFirst(Character.toString(ch)); + } + } + + private URI toUri(String uri) { + try { + return new URI(uri); + } catch (URISyntaxException e) { + fail("Cannot parse URI " + uri + " - " + e); + return null; + } + } + + private Map parseResponseAsMap(HttpEntity entity) throws IOException { + return convertToMap(XContentType.JSON.xContent(), entity.getContent(), false); + } + + private T execute(CloseableHttpClient client, HttpRequestBase request, + HttpContext context, CheckedFunction body) + throws IOException { + final int timeout = (int) TimeValue.timeValueSeconds(90).millis(); + RequestConfig requestConfig = RequestConfig.custom() + .setConnectionRequestTimeout(timeout) + .setConnectTimeout(timeout) + .setSocketTimeout(timeout) + .build(); + request.setConfig(requestConfig); + logger.info("Execute HTTP " + request.getMethod() + ' ' + request.getURI()); + try (CloseableHttpResponse response = SocketAccess.doPrivileged(() -> client.execute(request, context))) { + return body.apply(response); + } catch (Exception e) { + logger.warn(new ParameterizedMessage("HTTP Request [{}] failed", request.getURI()), e); + throw e; + } + } + + private String getUrl(String path) { + return getWebServerUri().resolve(path).toString(); + } + + /** + * Provides the "login" handler for the fake WebApp. + * This interacts with Elasticsearch (using the rest client) to find the login page for the IdP, and then + * sends a redirect to that page. + */ + private void httpLogin(HttpExchange http) throws IOException { + final Response prepare = client().performRequest("POST", "/_xpack/security/saml/prepare", + emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON), kibanaAuth()); + assertOK(prepare); + final Map body = parseResponseAsMap(prepare.getEntity()); + logger.info("Created SAML authentication request {}", body); + http.getResponseHeaders().add("Set-Cookie", REQUEST_ID_COOKIE + "=" + body.get("id")); + http.getResponseHeaders().add("Location", (String) body.get("redirect")); + http.sendResponseHeaders(302, 0); + http.close(); + } + + /** + * Provides the "Assertion-Consumer-Service" handler for the fake WebApp. + * This interacts with Elasticsearch (using the rest client) to perform a SAML login, and just + * forwards the JSON response back to the client. + */ + private void httpAcs(HttpExchange http) throws IOException { + final Response saml = samlAuthenticate(http); + assertOK(saml); + final byte[] content = Streams.copyToString(new InputStreamReader(saml.getEntity().getContent())).getBytes(); + http.getResponseHeaders().add("Content-Type", "application/json"); + http.sendResponseHeaders(200, content.length); + http.getResponseBody().write(content); + http.close(); + } + + private Response samlAuthenticate(HttpExchange http) throws IOException { + final List pairs = parseRequestForm(http); + assertThat(pairs, iterableWithSize(1)); + final String saml = pairs.stream() + .filter(p -> SAML_RESPONSE_FIELD.equals(p.getName())) + .map(p -> p.getValue()) + .findFirst() + .orElseGet(() -> { + fail("Cannot find " + SAML_RESPONSE_FIELD + " in form fields"); + return null; + }); + + final String id = getCookie(REQUEST_ID_COOKIE, http); + assertThat(id, notNullValue()); + + final String body = "{ \"content\" : \"" + saml + "\", \"ids\": [\"" + id + "\"] }"; + return client().performRequest("POST", "/_xpack/security/saml/authenticate", + emptyMap(), new StringEntity(body, ContentType.APPLICATION_JSON), kibanaAuth()); + } + + private List parseRequestForm(HttpExchange http) throws IOException { + String reqContent = Streams.copyToString(new InputStreamReader(http.getRequestBody())); + final CharArrayBuffer buffer = new CharArrayBuffer(reqContent.length()); + buffer.append(reqContent); + return URLEncodedUtils.parse(buffer, HTTP.DEF_CONTENT_CHARSET, '&'); + } + + private String getCookie(String name, HttpExchange http) throws IOException { + try { + final String cookies = http.getRequestHeaders().getFirst("Cookie"); + if (cookies == null) { + return null; + } + Header header = new BasicHeader("Cookie", cookies); + final URI serverUri = getWebServerUri(); + final URI requestURI = http.getRequestURI(); + final CookieOrigin origin = new CookieOrigin(serverUri.getHost(), serverUri.getPort(), requestURI.getPath(), false); + final List parsed = new DefaultCookieSpec().parse(header, origin); + return parsed.stream().filter(c -> name.equals(c.getName())).map(c -> c.getValue()).findFirst().orElse(null); + } catch (MalformedCookieException e) { + throw new IOException("Cannot read cookies", e); + } + } + + private void assertHttpOk(StatusLine status) { + assertThat("Unexpected HTTP Response status: " + status, status.getStatusCode(), Matchers.equalTo(200)); + } + + private static void assertSingletonList(Object value, String expectedElement) { + assertThat(value, instanceOf(List.class)); + assertThat(((List) value), contains(expectedElement)); + } + + private static BasicHeader kibanaAuth() { + final String auth = UsernamePasswordToken.basicAuthHeaderValue("kibana", new SecureString(KIBANA_PASSWORD.toCharArray())); + return new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, auth); + } + + private CloseableHttpClient getHttpClient() throws Exception { + return HttpClients.custom().setSSLContext(getClientSslContext()).build(); + } + + private SSLContext getClientSslContext() throws Exception { + final Path pem = getDataPath("/ca.crt"); + final Certificate[] certificates = CertUtils.readCertificates(Collections.singletonList(pem)); + final X509ExtendedTrustManager trustManager = CertUtils.trustManager(certificates); + SSLContext context = SSLContext.getInstance("TLS"); + context.init(new KeyManager[0], new TrustManager[] { trustManager }, new SecureRandom()); + return context; + } + + private URI getWebServerUri() { + final InetSocketAddress address = httpServer.getAddress(); + final String host = address.getHostString(); + final int port = address.getPort(); + try { + return new URI("http", null, host, port, "/", null, null); + } catch (URISyntaxException e) { + throw new ElasticsearchException("Cannot construct URI for httpServer @ {}:{}", e, host, port); + } + } + +} diff --git a/x-pack/qa/security-client-tests/build.gradle b/x-pack/qa/security-client-tests/build.gradle new file mode 100644 index 0000000000000..4e517f4d3633e --- /dev/null +++ b/x-pack/qa/security-client-tests/build.gradle @@ -0,0 +1,39 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') +} + +String outputDir = "${buildDir}/generated-resources/${project.name}" +task copyXPackPluginProps(type: Copy) { + from project(xpackModule('core')).file('src/main/plugin-metadata') + from project(xpackModule('core')).tasks.pluginProperties + into outputDir +} +project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) + +integTestRunner { + systemProperty 'tests.security.manager', 'false' +} + +integTestCluster { + setting 'xpack.security.enabled', 'true' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setupCommand 'setupDummyUser', + 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + setupCommand 'setupTransportClientUser', + 'bin/elasticsearch-users', 'useradd', 'transport', '-p', 'x-pack-test-password', '-r', 'transport_client' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_user', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/security-client-tests/src/test/java/org/elasticsearch/xpack/security/qa/SecurityTransportClientIT.java b/x-pack/qa/security-client-tests/src/test/java/org/elasticsearch/xpack/security/qa/SecurityTransportClientIT.java new file mode 100644 index 0000000000000..519f365d515a0 --- /dev/null +++ b/x-pack/qa/security-client-tests/src/test/java/org/elasticsearch/xpack/security/qa/SecurityTransportClientIT.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.qa; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; +import org.elasticsearch.xpack.core.security.SecurityField; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +/** + * Integration tests that test a transport client with security being loaded that connect to an external cluster + */ +public class SecurityTransportClientIT extends ESIntegTestCase { + static final String ADMIN_USER_PW = "test_user:x-pack-test-password"; + static final String TRANSPORT_USER_PW = "transport:x-pack-test-password"; + + @Override + protected Settings externalClusterClientSettings() { + return Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), ADMIN_USER_PW) + .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") + .build(); + } + + @Override + protected Collection> transportClientPlugins() { + return Collections.singletonList(XPackClientPlugin.class); + } + + public void testThatTransportClientWithoutAuthenticationDoesNotWork() throws Exception { + try (TransportClient client = transportClient(Settings.EMPTY)) { + boolean connected = awaitBusy(() -> { + return client.connectedNodes().size() > 0; + }, 5L, TimeUnit.SECONDS); + + assertThat(connected, is(false)); + } + } + + public void testThatTransportClientAuthenticationWithTransportClientRole() throws Exception { + Settings settings = Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), TRANSPORT_USER_PW) + .build(); + try (TransportClient client = transportClient(settings)) { + boolean connected = awaitBusy(() -> { + return client.connectedNodes().size() > 0; + }, 5L, TimeUnit.SECONDS); + + assertThat(connected, is(true)); + + // this checks that the transport client is really running in a limited state + try { + client.admin().cluster().prepareHealth().get(); + fail("the transport user should not be be able to get health!"); + } catch (ElasticsearchSecurityException e) { + assertThat(e.toString(), containsString("unauthorized")); + } + } + } + + public void testTransportClientWithAdminUser() throws Exception { + final boolean useTransportUser = randomBoolean(); + Settings settings = Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), useTransportUser ? TRANSPORT_USER_PW : ADMIN_USER_PW) + .build(); + try (TransportClient client = transportClient(settings)) { + boolean connected = awaitBusy(() -> { + return client.connectedNodes().size() > 0; + }, 5L, TimeUnit.SECONDS); + + assertThat(connected, is(true)); + + // this checks that the transport client is really running in a limited state + ClusterHealthResponse response; + if (useTransportUser) { + response = client.filterWithHeader(Collections.singletonMap("Authorization", + basicAuthHeaderValue("test_user", new SecureString("x-pack-test-password".toCharArray())))) + .admin().cluster().prepareHealth().get(); + } else { + response = client.admin().cluster().prepareHealth().get(); + } + + assertThat(response.isTimedOut(), is(false)); + } + } + + TransportClient transportClient(Settings extraSettings) { + NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); + List nodes = nodeInfos.getNodes(); + assertTrue(nodes.isEmpty() == false); + TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress(); + String clusterName = nodeInfos.getClusterName().value(); + + Settings settings = Settings.builder() + .put(extraSettings) + .put("cluster.name", clusterName) + .build(); + + TransportClient client = new PreBuiltXPackTransportClient(settings); + client.addTransportAddress(publishAddress); + return client; + } +} diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle new file mode 100644 index 0000000000000..b2fac075cb315 --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -0,0 +1,52 @@ +apply plugin: 'elasticsearch.esplugin' + +esplugin { + name 'spi-extension' + description 'An example spi extension pluing for xpack security' + classname 'org.elasticsearch.example.SpiExtensionPlugin' + extendedPlugins = ['x-pack-security'] +} + +dependencies { + compileOnly project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') +} + + +integTestRunner { + systemProperty 'tests.security.manager', 'false' +} + +integTestCluster { + dependsOn buildZip + setting 'xpack.security.authc.realms.custom.order', '0' + setting 'xpack.security.authc.realms.custom.type', 'custom' + setting 'xpack.security.authc.realms.custom.filtered_setting', 'should be filtered' + setting 'xpack.security.authc.realms.esusers.order', '1' + setting 'xpack.security.authc.realms.esusers.type', 'file' + setting 'xpack.security.authc.realms.native.type', 'native' + setting 'xpack.security.authc.realms.native.order', '2' + setting 'xpack.security.enabled', 'true' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + + // This is important, so that all the modules are available too. + // There are index templates that use token filters that are in analysis-module and + // processors are being used that are in ingest-common module. + distribution = 'zip' + + setupCommand 'setupDummyUser', + 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_user', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} +check.dependsOn integTest diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java new file mode 100644 index 0000000000000..e426265c8a467 --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/ExampleSecurityExtension.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.example; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.example.realm.CustomAuthenticationFailureHandler; +import org.elasticsearch.example.realm.CustomRealm; +import org.elasticsearch.example.role.CustomInMemoryRolesProvider; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.security.authc.AuthenticationFailureHandler; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.SecurityExtension; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.BiConsumer; + +import static org.elasticsearch.example.role.CustomInMemoryRolesProvider.ROLE_A; +import static org.elasticsearch.example.role.CustomInMemoryRolesProvider.ROLE_B; + +/** + * An example x-pack extension for testing custom realms and custom role providers. + */ +public class ExampleSecurityExtension implements SecurityExtension { + + static { + // check that the extension's policy works. + AccessController.doPrivileged((PrivilegedAction) () -> { + System.getSecurityManager().checkPrintJobAccess(); + return null; + }); + } + + @Override + public Map getRealms(ResourceWatcherService resourceWatcherService) { + return Collections.singletonMap(CustomRealm.TYPE, CustomRealm::new); + } + + @Override + public AuthenticationFailureHandler getAuthenticationFailureHandler() { + return new CustomAuthenticationFailureHandler(); + } + + + @Override + public List, ActionListener>>> + getRolesProviders(Settings settings, ResourceWatcherService resourceWatcherService) { + CustomInMemoryRolesProvider rp1 = new CustomInMemoryRolesProvider(settings, Collections.singletonMap(ROLE_A, "read")); + Map roles = new HashMap<>(); + roles.put(ROLE_A, "all"); + roles.put(ROLE_B, "all"); + CustomInMemoryRolesProvider rp2 = new CustomInMemoryRolesProvider(settings, roles); + return Arrays.asList(rp1, rp2); + } +} diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/SpiExtensionPlugin.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/SpiExtensionPlugin.java new file mode 100644 index 0000000000000..07f769849d5d0 --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/SpiExtensionPlugin.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.example; + +import org.elasticsearch.example.realm.CustomRealm; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +/** + * An example x-pack extension for testing custom realms and custom role providers. + */ +public class SpiExtensionPlugin extends Plugin implements ActionPlugin { + + @Override + public Collection getRestHeaders() { + return Arrays.asList(CustomRealm.USER_HEADER, CustomRealm.PW_HEADER); + } + + @Override + public List getSettingsFilter() { + return Collections.singletonList("xpack.security.authc.realms.*.filtered_setting"); + } +} diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomAuthenticationFailureHandler.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomAuthenticationFailureHandler.java new file mode 100644 index 0000000000000..73f71c8fe584a --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomAuthenticationFailureHandler.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.example.realm; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.transport.TransportMessage; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; + +public class CustomAuthenticationFailureHandler extends DefaultAuthenticationFailureHandler { + + @Override + public ElasticsearchSecurityException failedAuthentication(RestRequest request, AuthenticationToken token, + ThreadContext context) { + ElasticsearchSecurityException e = super.failedAuthentication(request, token, context); + // set a custom header + e.addHeader("WWW-Authenticate", "custom-challenge"); + return e; + } + + @Override + public ElasticsearchSecurityException failedAuthentication(TransportMessage message, AuthenticationToken token, String action, + ThreadContext context) { + ElasticsearchSecurityException e = super.failedAuthentication(message, token, action, context); + // set a custom header + e.addHeader("WWW-Authenticate", "custom-challenge"); + return e; + } + + @Override + public ElasticsearchSecurityException missingToken(RestRequest request, ThreadContext context) { + ElasticsearchSecurityException e = super.missingToken(request, context); + // set a custom header + e.addHeader("WWW-Authenticate", "custom-challenge"); + return e; + } + + @Override + public ElasticsearchSecurityException missingToken(TransportMessage message, String action, ThreadContext context) { + ElasticsearchSecurityException e = super.missingToken(message, action, context); + // set a custom header + e.addHeader("WWW-Authenticate", "custom-challenge"); + return e; + } +} diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java new file mode 100644 index 0000000000000..19ef9d2eb0d72 --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/realm/CustomRealm.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.example.realm; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.CharArrays; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; + +public class CustomRealm extends Realm { + + public static final String TYPE = "custom"; + + public static final String USER_HEADER = "User"; + public static final String PW_HEADER = "Password"; + + public static final String KNOWN_USER = "custom_user"; + public static final SecureString KNOWN_PW = new SecureString("x-pack-test-password".toCharArray()); + static final String[] ROLES = new String[] { "superuser" }; + + public CustomRealm(RealmConfig config) { + super(TYPE, config); + } + + @Override + public boolean supports(AuthenticationToken token) { + return token instanceof UsernamePasswordToken; + } + + @Override + public UsernamePasswordToken token(ThreadContext threadContext) { + String user = threadContext.getHeader(USER_HEADER); + if (user != null) { + String password = threadContext.getHeader(PW_HEADER); + if (password != null) { + return new UsernamePasswordToken(user, new SecureString(password.toCharArray())); + } + } + return null; + } + + @Override + public void authenticate(AuthenticationToken authToken, ActionListener listener) { + UsernamePasswordToken token = (UsernamePasswordToken)authToken; + final String actualUser = token.principal(); + if (KNOWN_USER.equals(actualUser)) { + if (CharArrays.constantTimeEquals(token.credentials().getChars(), KNOWN_PW.getChars())) { + listener.onResponse(AuthenticationResult.success(new User(actualUser, ROLES))); + } else { + listener.onResponse(AuthenticationResult.unsuccessful("Invalid password for user " + actualUser, null)); + } + } else { + listener.onResponse(AuthenticationResult.notHandled()); + } + } + + @Override + public void lookupUser(String username, ActionListener listener) { + listener.onResponse(null); + } +} diff --git a/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/role/CustomInMemoryRolesProvider.java b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/role/CustomInMemoryRolesProvider.java new file mode 100644 index 0000000000000..df9d3b5a6b875 --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch/example/role/CustomInMemoryRolesProvider.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.example.role; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.BiConsumer; + +/** + * A custom roles provider implementation for testing that serves + * static roles from memory. + */ +public class CustomInMemoryRolesProvider + extends AbstractComponent + implements BiConsumer, ActionListener>> { + + public static final String INDEX = "foo"; + public static final String ROLE_A = "roleA"; + public static final String ROLE_B = "roleB"; + + private final Map rolePermissionSettings; + + public CustomInMemoryRolesProvider(Settings settings, Map rolePermissionSettings) { + super(settings); + this.rolePermissionSettings = rolePermissionSettings; + } + + @Override + public void accept(Set roles, ActionListener> listener) { + Set roleDescriptors = new HashSet<>(); + for (String role : roles) { + if (rolePermissionSettings.containsKey(role)) { + roleDescriptors.add( + new RoleDescriptor(role, new String[] { "all" }, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder() + .privileges(rolePermissionSettings.get(role)) + .indices(INDEX) + .grantedFields("*") + .build() + }, null) + ); + } + } + + listener.onResponse(roleDescriptors); + } +} diff --git a/x-pack/qa/security-example-spi-extension/src/main/plugin-metadata/plugin-security.policy b/x-pack/qa/security-example-spi-extension/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..a3b647b316eec --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,3 @@ +grant { + permission java.lang.RuntimePermission "queuePrintJob"; +}; diff --git a/x-pack/qa/security-example-spi-extension/src/main/resources/META-INF/services/org.elasticsearch.xpack.core.security.SecurityExtension b/x-pack/qa/security-example-spi-extension/src/main/resources/META-INF/services/org.elasticsearch.xpack.core.security.SecurityExtension new file mode 100644 index 0000000000000..cba4e93a292ae --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/main/resources/META-INF/services/org.elasticsearch.xpack.core.security.SecurityExtension @@ -0,0 +1 @@ +org.elasticsearch.example.ExampleSecurityExtension \ No newline at end of file diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java new file mode 100644 index 0000000000000..65ec595a0d409 --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmIT.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.example.realm; + +import org.apache.http.message.BasicHeader; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.transport.NoNodeAvailableException; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; +import org.elasticsearch.xpack.core.XPackClientPlugin; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.is; + +/** + * Integration test to test authentication with the custom realm + */ +public class CustomRealmIT extends ESIntegTestCase { + + @Override + protected Settings externalClusterClientSettings() { + return Settings.builder() + .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER) + .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) + .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") + .build(); + } + + @Override + protected Collection> transportClientPlugins() { + return Collections.>singleton(XPackClientPlugin.class); + } + + public void testHttpConnectionWithNoAuthentication() throws Exception { + try { + getRestClient().performRequest("GET", "/"); + fail("request should have failed"); + } catch(ResponseException e) { + Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), is(401)); + String value = response.getHeader("WWW-Authenticate"); + assertThat(value, is("custom-challenge")); + } + } + + public void testHttpAuthentication() throws Exception { + Response response = getRestClient().performRequest("GET", "/", + new BasicHeader(CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER), + new BasicHeader(CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString())); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + public void testTransportClient() throws Exception { + NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); + List nodes = nodeInfos.getNodes(); + assertTrue(nodes.isEmpty() == false); + TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress(); + String clusterName = nodeInfos.getClusterName().value(); + + Settings settings = Settings.builder() + .put("cluster.name", clusterName) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) + .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER) + .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) + .build(); + try (TransportClient client = new PreBuiltXPackTransportClient(settings)) { + client.addTransportAddress(publishAddress); + ClusterHealthResponse response = client.admin().cluster().prepareHealth().execute().actionGet(); + assertThat(response.isTimedOut(), is(false)); + } + } + + public void testTransportClientWrongAuthentication() throws Exception { + NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); + List nodes = nodeInfos.getNodes(); + assertTrue(nodes.isEmpty() == false); + TransportAddress publishAddress = randomFrom(nodes).getTransport().address().publishAddress(); + String clusterName = nodeInfos.getClusterName().value(); + + Settings settings = Settings.builder() + .put("cluster.name", clusterName) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) + .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER + randomAlphaOfLength(1)) + .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) + .build(); + try (TransportClient client = new PreBuiltXPackTransportClient(settings)) { + client.addTransportAddress(publishAddress); + client.admin().cluster().prepareHealth().execute().actionGet(); + fail("authentication failure should have resulted in a NoNodesAvailableException"); + } catch (NoNodeAvailableException e) { + // expected + } + } + + public void testSettingsFiltering() throws Exception { + NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().clear().setSettings(true).get(); + for(NodeInfo info : nodeInfos.getNodes()) { + Settings settings = info.getSettings(); + assertNotNull(settings); + assertNull(settings.get("xpack.security.authc.realms.custom.filtered_setting")); + assertEquals(CustomRealm.TYPE, settings.get("xpack.security.authc.realms.custom.type")); + } + } +} diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java new file mode 100644 index 0000000000000..d1435ebaa3c28 --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/realm/CustomRealmTests.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.example.realm; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.User; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class CustomRealmTests extends ESTestCase { + public void testAuthenticate() { + Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + CustomRealm realm = new CustomRealm(new RealmConfig("test", Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); + SecureString password = CustomRealm.KNOWN_PW.clone(); + UsernamePasswordToken token = new UsernamePasswordToken(CustomRealm.KNOWN_USER, password); + PlainActionFuture plainActionFuture = new PlainActionFuture<>(); + realm.authenticate(token, plainActionFuture); + User user = plainActionFuture.actionGet().getUser(); + assertThat(user, notNullValue()); + assertThat(user.roles(), equalTo(CustomRealm.ROLES)); + assertThat(user.principal(), equalTo(CustomRealm.KNOWN_USER)); + } + + public void testAuthenticateBadUser() { + Settings globalSettings = Settings.builder().put("path.home", createTempDir()).build(); + CustomRealm realm = new CustomRealm(new RealmConfig("test", Settings.EMPTY, globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings))); + SecureString password = CustomRealm.KNOWN_PW.clone(); + UsernamePasswordToken token = new UsernamePasswordToken(CustomRealm.KNOWN_USER + "1", password); + PlainActionFuture plainActionFuture = new PlainActionFuture<>(); + realm.authenticate(token, plainActionFuture); + final AuthenticationResult result = plainActionFuture.actionGet(); + assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.CONTINUE)); + } +} diff --git a/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/role/CustomRolesProviderIT.java b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/role/CustomRolesProviderIT.java new file mode 100644 index 0000000000000..4e1fb72256086 --- /dev/null +++ b/x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch/example/role/CustomRolesProviderIT.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.example.role; + +import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.example.realm.CustomRealm; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.client.SecurityClient; + +import java.util.Collection; +import java.util.Collections; + +import static org.elasticsearch.example.role.CustomInMemoryRolesProvider.INDEX; +import static org.elasticsearch.example.role.CustomInMemoryRolesProvider.ROLE_A; +import static org.elasticsearch.example.role.CustomInMemoryRolesProvider.ROLE_B; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.is; + +/** + * Integration test for custom roles providers. + */ +public class CustomRolesProviderIT extends ESIntegTestCase { + + private static final String TEST_USER = "test_user"; + private static final String TEST_PWD = "change_me"; + + @Override + protected Settings externalClusterClientSettings() { + return Settings.builder() + .put(ThreadContext.PREFIX + "." + CustomRealm.USER_HEADER, CustomRealm.KNOWN_USER) + .put(ThreadContext.PREFIX + "." + CustomRealm.PW_HEADER, CustomRealm.KNOWN_PW.toString()) + .put(NetworkModule.TRANSPORT_TYPE_KEY, "security4") + .build(); + } + + @Override + protected Collection> transportClientPlugins() { + return Collections.singleton(XPackClientPlugin.class); + } + + public void setupTestUser(String role) { + SecurityClient securityClient = new SecurityClient(client()); + securityClient.preparePutUser(TEST_USER, TEST_PWD.toCharArray(), role).get(); + } + + public void testAuthorizedCustomRoleSucceeds() throws Exception { + setupTestUser(ROLE_B); + // roleB has all permissions on index "foo", so creating "foo" should succeed + Response response = getRestClient().performRequest("PUT", "/" + INDEX, authHeader()); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + } + + public void testFirstResolvedRoleTakesPrecedence() throws Exception { + // the first custom roles provider has set ROLE_A to only have read permission on the index, + // the second custom roles provider has set ROLE_A to have all permissions, but since + // the first custom role provider appears first in order, it should take precedence and deny + // permission to create the index + setupTestUser(ROLE_A); + // roleB has all permissions on index "foo", so creating "foo" should succeed + try { + getRestClient().performRequest("PUT", "/" + INDEX, authHeader()); + fail(ROLE_A + " should not be authorized to create index " + INDEX); + } catch (ResponseException e) { + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); + } + } + + public void testUnresolvedRoleDoesntSucceed() throws Exception { + setupTestUser("unknown"); + // roleB has all permissions on index "foo", so creating "foo" should succeed + try { + getRestClient().performRequest("PUT", "/" + INDEX, authHeader()); + fail(ROLE_A + " should not be authorized to create index " + INDEX); + } catch (ResponseException e) { + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(403)); + } + } + + private BasicHeader authHeader() { + return new BasicHeader(UsernamePasswordToken.BASIC_AUTH_HEADER, + basicAuthHeaderValue(TEST_USER, new SecureString(TEST_PWD.toCharArray()))); + } +} diff --git a/x-pack/qa/security-migrate-tests/build.gradle b/x-pack/qa/security-migrate-tests/build.gradle new file mode 100644 index 0000000000000..7ccf6d2349b8b --- /dev/null +++ b/x-pack/qa/security-migrate-tests/build.gradle @@ -0,0 +1,33 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('security'), configuration: 'runtime') + testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') +} + +integTestCluster { + setting 'xpack.security.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + extraConfigFile 'roles.yml', 'roles.yml' + [ + test_admin: 'superuser', + transport_user: 'superuser', + existing: 'superuser', + bob: 'actual_role' + ].each { String user, String role -> + setupCommand 'setupUser#' + user, + 'bin/elasticsearch-users', 'useradd', user, '-p', 'x-pack-test-password', '-r', role + } + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/security-migrate-tests/roles.yml b/x-pack/qa/security-migrate-tests/roles.yml new file mode 100644 index 0000000000000..6e997383f8a5a --- /dev/null +++ b/x-pack/qa/security-migrate-tests/roles.yml @@ -0,0 +1,22 @@ +# A role that has all sorts of configuration: +# - it can monitor the cluster +# - for index1 and index2 it can do CRUD things and refresh +# - for other indices it has search-only privileges +actual_role: + run_as: [ "joe" ] + cluster: + - monitor + indices: + - names: [ "index1", "index2" ] + privileges: [ "read", "write", "create_index", "indices:admin/refresh" ] + field_security: + grant: + - foo + - bar + query: + bool: + must_not: + match: + hidden: true + - names: "*" + privileges: [ "read" ] diff --git a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java b/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java new file mode 100644 index 0000000000000..719971bf8a829 --- /dev/null +++ b/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolIT.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.Requests; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; +import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; +import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool; +import org.junit.Before; + +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collections; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; + +/** + * Integration tests for the {@code elasticsearch-migrate} shell command + */ +public class MigrateToolIT extends MigrateToolTestCase { + + @Before + public void setupUpTest() throws Exception { + Client client = getClient(); + SecurityClient c = new SecurityClient(client); + + // Add an existing user so the tool will skip it + PutUserResponse pur = c.preparePutUser("existing", "s3kirt".toCharArray(), "role1", "user").get(); + assertTrue(pur.created()); + } + + public void testRunMigrateTool() throws Exception { + final String testConfigDir = System.getProperty("tests.config.dir"); + logger.info("--> CONF: {}", testConfigDir); + final Path configPath = PathUtils.get(testConfigDir); + Settings settings = Settings.builder().put("path.home", configPath.getParent()).build(); + // Cluster should already be up + String url = "http://" + getHttpURL(); + logger.info("--> using URL: {}", url); + MockTerminal t = new MockTerminal(); + ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); + OptionParser parser = muor.getParser(); + + OptionSet options = parser.parse("-u", "test_admin", "-p", "x-pack-test-password", "-U", url); + muor.execute(t, options, new Environment(settings, configPath)); + + logger.info("--> output:\n{}", t.getOutput()); + + Client client = getClient(); + SecurityClient c = new SecurityClient(client); + + // Check that the migrated user can be retrieved + GetUsersResponse resp = c.prepareGetUsers("bob").get(); + assertTrue("user 'bob' should exist", resp.hasUsers()); + User bob = resp.users()[0]; + assertEquals(bob.principal(), "bob"); + assertArrayEquals(bob.roles(), new String[]{"actual_role"}); + + // Make sure the existing user did not change + resp = c.prepareGetUsers("existing").get(); + assertTrue("user should exist", resp.hasUsers()); + User existing = resp.users()[0]; + assertEquals(existing.principal(), "existing"); + assertArrayEquals(existing.roles(), new String[]{"role1", "user"}); + + // Make sure the "actual_role" made it in and is correct + GetRolesResponse roleResp = c.prepareGetRoles().names("actual_role").get(); + assertTrue("role should exist", roleResp.hasRoles()); + RoleDescriptor rd = roleResp.roles()[0]; + assertNotNull(rd); + assertEquals(rd.getName(), "actual_role"); + assertArrayEquals(rd.getClusterPrivileges(), new String[]{"monitor"}); + assertArrayEquals(rd.getRunAs(), new String[]{"joe"}); + RoleDescriptor.IndicesPrivileges[] ips = rd.getIndicesPrivileges(); + assertEquals(ips.length, 2); + for (RoleDescriptor.IndicesPrivileges ip : ips) { + final FieldPermissions fieldPermissions = new FieldPermissions( + new FieldPermissionsDefinition(ip.getGrantedFields(), ip.getDeniedFields())); + if (Arrays.equals(ip.getIndices(), new String[]{"index1", "index2"})) { + assertArrayEquals(ip.getPrivileges(), new String[]{"read", "write", "create_index", "indices:admin/refresh"}); + assertTrue(fieldPermissions.hasFieldLevelSecurity()); + assertTrue(fieldPermissions.grantsAccessTo("bar")); + assertTrue(fieldPermissions.grantsAccessTo("foo")); + assertNotNull(ip.getQuery()); + assertThat(ip.getQuery().iterator().next().utf8ToString(), + containsString("{\"bool\":{\"must_not\":{\"match\":{\"hidden\":true}}}}")); + } else { + assertArrayEquals(ip.getIndices(), new String[]{"*"}); + assertArrayEquals(ip.getPrivileges(), new String[]{"read"}); + assertFalse(fieldPermissions.hasFieldLevelSecurity()); + assertNull(ip.getQuery()); + } + } + + // Check that bob can access the things the "actual_role" says he can + String token = basicAuthHeaderValue("bob", new SecureString("x-pack-test-password".toCharArray())); + // Create "index1" index and try to search from it as "bob" + client.filterWithHeader(Collections.singletonMap("Authorization", token)).admin().indices().prepareCreate("index1").get(); + // Wait for the index to be ready so it doesn't fail if no shards are initialized + client.admin().cluster().health(Requests.clusterHealthRequest("index1") + .timeout(TimeValue.timeValueSeconds(30)) + .waitForYellowStatus() + .waitForEvents(Priority.LANGUID) + .waitForNoRelocatingShards(true)) + .actionGet(); + SearchResponse searchResp = client.filterWithHeader(Collections.singletonMap("Authorization", token)).prepareSearch("index1").get(); + } +} diff --git a/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolTestCase.java b/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolTestCase.java new file mode 100644 index 0000000000000..2987c1afc8daf --- /dev/null +++ b/x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/security/MigrateToolTestCase.java @@ -0,0 +1,167 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.file.Path; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.notNullValue; + +/** + * {@link MigrateToolTestCase} is an abstract base class to run integration + * tests against an external Elasticsearch Cluster. + *

    + * You can define a list of transport addresses from where you can reach your cluster + * by setting "tests.cluster" system property. It defaults to "localhost:9300". + *

    + * All tests can be run from maven using mvn install as maven will start an external cluster first. + *

    + * If you want to debug this module from your IDE, then start an external cluster by yourself + * then run JUnit. If you changed the default port, set "tests.cluster=localhost:PORT" when running + * your test. + */ +@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") +public abstract class MigrateToolTestCase extends LuceneTestCase { + + /** + * Key used to eventually switch to using an external cluster and provide its transport addresses + */ + public static final String TESTS_CLUSTER = "tests.cluster"; + + /** + * Key used to eventually switch to using an external cluster and provide its transport addresses + */ + public static final String TESTS_HTTP_CLUSTER = "tests.rest.cluster"; + + /** + * Defaults to localhost:9300 + */ + public static final String TESTS_CLUSTER_DEFAULT = "localhost:9300"; + + protected static final Logger logger = ESLoggerFactory.getLogger(MigrateToolTestCase.class.getName()); + + private static final AtomicInteger counter = new AtomicInteger(); + private static Client client; + private static String clusterAddresses; + private static String clusterHttpAddresses; + + private static Client startClient(Path tempDir, TransportAddress... transportAddresses) { + logger.info("--> Starting Elasticsearch Java TransportClient {}, {}", transportAddresses, tempDir); + + Settings clientSettings = Settings.builder() + .put("cluster.name", "qa_migrate_tests_" + counter.getAndIncrement()) + .put("client.transport.ignore_cluster_name", true) + .put("path.home", tempDir) + .put(SecurityField.USER_SETTING.getKey(), "transport_user:x-pack-test-password") + .build(); + + TransportClient client = new PreBuiltXPackTransportClient(clientSettings).addTransportAddresses(transportAddresses); + Exception clientException = null; + try { + logger.info("--> Elasticsearch Java TransportClient started"); + ClusterHealthResponse health = client.admin().cluster().prepareHealth().get(); + logger.info("--> connected to [{}] cluster which is running [{}] node(s).", + health.getClusterName(), health.getNumberOfNodes()); + } catch (Exception e) { + clientException = e; + } + + assumeNoException("Sounds like your cluster is not running at " + clusterAddresses, clientException); + + return client; + } + + private static Client startClient() throws UnknownHostException { + String[] stringAddresses = clusterAddresses.split(","); + TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; + int i = 0; + for (String stringAddress : stringAddresses) { + int lastColon = stringAddress.lastIndexOf(":"); + if (lastColon == -1) { + throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid"); + } + String ip = stringAddress.substring(0, lastColon); + String port = stringAddress.substring(lastColon + 1); + try { + transportAddresses[i++] = new TransportAddress(InetAddress.getByName(ip), Integer.valueOf(port)); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("port is not valid, expected number but was [" + port + "]"); + } + } + return startClient(createTempDir(), transportAddresses); + } + + public static Client getClient() { + if (client == null) { + try { + client = startClient(); + } catch (UnknownHostException e) { + logger.error("could not start the client", e); + } + assertThat(client, notNullValue()); + } + return client; + } + + public static String getHttpURL() { + return clusterHttpAddresses; + } + + @BeforeClass + public static void initializeSettings() throws UnknownHostException { + String port = System.getProperty("integ.http.port"); + clusterAddresses = System.getProperty(TESTS_CLUSTER); + clusterHttpAddresses = System.getProperty(TESTS_HTTP_CLUSTER); + if (clusterAddresses == null || clusterAddresses.isEmpty()) { + throw new UnknownHostException("unable to get a cluster address"); + } + } + + @AfterClass + public static void stopTransportClient() { + if (client != null) { + client.close(); + client = null; + } + } + + @Before + public void defineIndexName() { + doClean(); + } + + @After + public void cleanIndex() { + doClean(); + } + + private void doClean() { + if (client != null) { + try { + client.admin().indices().prepareDelete("_all").get(); + } catch (Exception e) { + // We ignore this cleanup exception + } + } + } +} diff --git a/x-pack/qa/security-setup-password-tests/build.gradle b/x-pack/qa/security-setup-password-tests/build.gradle new file mode 100644 index 0000000000000..16accc87a9476 --- /dev/null +++ b/x-pack/qa/security-setup-password-tests/build.gradle @@ -0,0 +1,29 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('security'), configuration: 'runtime') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +integTestRunner { + systemProperty 'tests.security.manager', 'false' +} + +integTestCluster { + setupCommand 'setupTestAdmin', + 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" + setting 'xpack.security.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java b/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java new file mode 100644 index 0000000000000..74f1223f4a6a1 --- /dev/null +++ b/x-pack/qa/security-setup-password-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolIT.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.esnative.tool; + +import org.apache.http.message.BasicHeader; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.InetAddress; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +public class SetupPasswordToolIT extends ESRestTestCase { + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @SuppressWarnings("unchecked") + public void testSetupPasswordToolAutoSetup() throws Exception { + final String testConfigDir = System.getProperty("tests.config.dir"); + logger.info("--> CONF: {}", testConfigDir); + final Path configPath = PathUtils.get(testConfigDir); + setSystemPropsForTool(configPath); + + Response nodesResponse = client().performRequest("GET", "/_nodes/http"); + Map nodesMap = entityAsMap(nodesResponse); + + Map nodes = (Map) nodesMap.get("nodes"); + Map firstNode = (Map) nodes.entrySet().iterator().next().getValue(); + Map firstNodeHttp = (Map) firstNode.get("http"); + String nodePublishAddress = (String) firstNodeHttp.get("publish_address"); + final int lastColonIndex = nodePublishAddress.lastIndexOf(':'); + InetAddress actualPublishAddress = InetAddresses.forString(nodePublishAddress.substring(0, lastColonIndex)); + InetAddress expectedPublishAddress = new NetworkService(Collections.emptyList()).resolvePublishHostAddresses(Strings.EMPTY_ARRAY); + final int port = Integer.valueOf(nodePublishAddress.substring(lastColonIndex + 1)); + + List lines = Files.readAllLines(configPath.resolve("elasticsearch.yml")); + lines = lines.stream().filter(s -> s.startsWith("http.port") == false && s.startsWith("http.publish_port") == false) + .collect(Collectors.toList()); + lines.add(randomFrom("http.port", "http.publish_port") + ": " + port); + if (expectedPublishAddress.equals(actualPublishAddress) == false) { + lines.add("http.publish_address: " + InetAddresses.toAddrString(actualPublishAddress)); + } + Files.write(configPath.resolve("elasticsearch.yml"), lines, StandardCharsets.UTF_8, StandardOpenOption.TRUNCATE_EXISTING); + + MockTerminal mockTerminal = new MockTerminal(); + SetupPasswordTool tool = new SetupPasswordTool(); + final int status; + if (randomBoolean()) { + mockTerminal.addTextInput("y"); // answer yes to continue prompt + status = tool.main(new String[] { "auto" }, mockTerminal); + } else { + status = tool.main(new String[] { "auto", "--batch" }, mockTerminal); + } + assertEquals(0, status); + String output = mockTerminal.getOutput(); + logger.info("CLI TOOL OUTPUT:\n{}", output); + String[] outputLines = output.split("\\n"); + Map userPasswordMap = new HashMap<>(); + Arrays.asList(outputLines).forEach(line -> { + if (line.startsWith("PASSWORD ")) { + String[] pieces = line.split(" "); + String user = pieces[1]; + String password = pieces[pieces.length - 1]; + logger.info("user [{}] password [{}]", user, password); + userPasswordMap.put(user, password); + } + }); + + assertEquals(4, userPasswordMap.size()); + userPasswordMap.entrySet().forEach(entry -> { + final String basicHeader = "Basic " + + Base64.getEncoder().encodeToString((entry.getKey() + ":" + entry.getValue()).getBytes(StandardCharsets.UTF_8)); + try { + Response authenticateResponse = client().performRequest("GET", "/_xpack/security/_authenticate", + new BasicHeader("Authorization", basicHeader)); + assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); + Map userInfoMap = entityAsMap(authenticateResponse); + assertEquals(entry.getKey(), userInfoMap.get("username")); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); + } + + @SuppressForbidden(reason = "need to set sys props for CLI tool") + private void setSystemPropsForTool(Path configPath) { + System.setProperty("es.path.conf", configPath.toString()); + System.setProperty("es.path.home", configPath.getParent().toString()); + } +} diff --git a/x-pack/qa/security-tools-tests/build.gradle b/x-pack/qa/security-tools-tests/build.gradle new file mode 100644 index 0000000000000..5df22c557db3c --- /dev/null +++ b/x-pack/qa/security-tools-tests/build.gradle @@ -0,0 +1,21 @@ +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(xpackModule('security')) + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') + testCompile 'com.google.jimfs:jimfs:1.1' + testCompile 'com.google.guava:guava:16.0.1' +} + +// add test resources from security, so certificate tool tests can use example certs +sourceSets.test.resources.srcDirs(project(xpackModule('security')).sourceSets.test.resources.srcDirs) + +// we have to repeate these patterns because the security test resources are effectively in the src of this project +forbiddenPatterns { + exclude '**/*.key' + exclude '**/*.p12' + exclude '**/*.der' +} + +// these are just tests, no need to audit +thirdPartyAudit.enabled = false diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/core/ssl/CertificateGenerateToolTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/core/ssl/CertificateGenerateToolTests.java new file mode 100644 index 0000000000000..e6685e9c6cb68 --- /dev/null +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/core/ssl/CertificateGenerateToolTests.java @@ -0,0 +1,542 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import org.elasticsearch.core.internal.io.IOUtils; +import org.bouncycastle.asn1.ASN1ObjectIdentifier; +import org.bouncycastle.asn1.ASN1Sequence; +import org.bouncycastle.asn1.ASN1String; +import org.bouncycastle.asn1.DEROctetString; +import org.bouncycastle.asn1.DERTaggedObject; +import org.bouncycastle.asn1.pkcs.Attribute; +import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; +import org.bouncycastle.asn1.x509.Extension; +import org.bouncycastle.asn1.x509.Extensions; +import org.bouncycastle.asn1.x509.GeneralName; +import org.bouncycastle.asn1.x509.GeneralNames; +import org.bouncycastle.cert.X509CertificateHolder; +import org.bouncycastle.openssl.PEMEncryptedKeyPair; +import org.bouncycastle.openssl.PEMParser; +import org.bouncycastle.pkcs.PKCS10CertificationRequest; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.ssl.CertificateGenerateTool.CAInfo; +import org.elasticsearch.xpack.core.ssl.CertificateGenerateTool.CertificateInformation; +import org.elasticsearch.xpack.core.ssl.CertificateGenerateTool.Name; +import org.hamcrest.Matchers; +import org.junit.After; + +import javax.security.auth.x500.X500Principal; + +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.net.InetAddress; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.PosixFilePermission; +import java.security.Key; +import java.security.KeyPair; +import java.security.KeyStore; +import java.security.PrivateKey; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.security.interfaces.RSAKey; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.elasticsearch.test.TestMatchers.pathExists; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; + +/** + * Unit tests for the tool used to simplify SSL certificate generation + */ +// TODO baz - fix this to work in intellij+java9, its complaining about java.sql.Date not being on the classpath +public class CertificateGenerateToolTests extends ESTestCase { + + private FileSystem jimfs; + + private Path initTempDir() throws Exception { + Configuration conf = Configuration.unix().toBuilder().setAttributeViews("posix").build(); + jimfs = Jimfs.newFileSystem(conf); + Path tempDir = jimfs.getPath("temp"); + IOUtils.rm(tempDir); + Files.createDirectories(tempDir); + return tempDir; + } + + @After + public void tearDown() throws Exception { + IOUtils.close(jimfs); + super.tearDown(); + } + + public void testOutputDirectory() throws Exception { + Path outputDir = createTempDir(); + Path outputFile = outputDir.resolve("certs.zip"); + MockTerminal terminal = new MockTerminal(); + + // test with a user provided dir + Path resolvedOutputFile = CertificateGenerateTool.getOutputFile(terminal, outputFile.toString(), null); + assertEquals(outputFile, resolvedOutputFile); + assertTrue(terminal.getOutput().isEmpty()); + + // test without a user provided directory + Path userPromptedOutputFile = outputDir.resolve("csr"); + assertFalse(Files.exists(userPromptedOutputFile)); + terminal.addTextInput(userPromptedOutputFile.toString()); + resolvedOutputFile = CertificateGenerateTool.getOutputFile(terminal, null, "out.zip"); + assertEquals(userPromptedOutputFile, resolvedOutputFile); + assertTrue(terminal.getOutput().isEmpty()); + + // test with empty user input + String defaultFilename = randomAlphaOfLengthBetween(1, 10); + Path expectedDefaultPath = resolvePath(defaultFilename); + terminal.addTextInput(""); + resolvedOutputFile = CertificateGenerateTool.getOutputFile(terminal, null, defaultFilename); + assertEquals(expectedDefaultPath, resolvedOutputFile); + assertTrue(terminal.getOutput().isEmpty()); + } + + public void testPromptingForInstanceInformation() throws Exception { + final int numberOfInstances = scaledRandomIntBetween(1, 12); + Map> instanceInput = new HashMap<>(numberOfInstances); + for (int i = 0; i < numberOfInstances; i++) { + final String name; + while (true) { + String randomName = getValidRandomInstanceName(); + if (instanceInput.containsKey(randomName) == false) { + name = randomName; + break; + } + } + Map instanceInfo = new HashMap<>(); + instanceInput.put(name, instanceInfo); + instanceInfo.put("ip", randomFrom("127.0.0.1", "::1", "192.168.1.1,::1", "")); + instanceInfo.put("dns", randomFrom("localhost", "localhost.localdomain", "localhost,myhost", "")); + logger.info("instance [{}] name [{}] [{}]", i, name, instanceInfo); + } + + int count = 0; + MockTerminal terminal = new MockTerminal(); + for (Entry> entry : instanceInput.entrySet()) { + terminal.addTextInput(entry.getKey()); + terminal.addTextInput(""); + terminal.addTextInput(entry.getValue().get("ip")); + terminal.addTextInput(entry.getValue().get("dns")); + count++; + if (count == numberOfInstances) { + terminal.addTextInput("n"); + } else { + terminal.addTextInput("y"); + } + } + + Collection certInfos = CertificateGenerateTool.getCertificateInformationList(terminal, null); + logger.info("certificate tool output:\n{}", terminal.getOutput()); + assertEquals(numberOfInstances, certInfos.size()); + for (CertificateInformation certInfo : certInfos) { + String name = certInfo.name.originalName; + Map instanceInfo = instanceInput.get(name); + assertNotNull("did not find map for " + name, instanceInfo); + List expectedIps = Arrays.asList(Strings.commaDelimitedListToStringArray(instanceInfo.get("ip"))); + List expectedDns = Arrays.asList(Strings.commaDelimitedListToStringArray(instanceInfo.get("dns"))); + assertEquals(expectedIps, certInfo.ipAddresses); + assertEquals(expectedDns, certInfo.dnsNames); + instanceInput.remove(name); + } + assertEquals(0, instanceInput.size()); + final String output = terminal.getOutput(); + assertTrue("Output: " + output, output.isEmpty()); + } + + public void testParsingFile() throws Exception { + Path tempDir = initTempDir(); + Path instanceFile = writeInstancesTo(tempDir.resolve("instances.yml")); + Collection certInfos = CertificateGenerateTool.parseFile(instanceFile); + assertEquals(4, certInfos.size()); + + Map certInfosMap = + certInfos.stream().collect(Collectors.toMap((c) -> c.name.originalName, Function.identity())); + CertificateInformation certInfo = certInfosMap.get("node1"); + assertEquals(Collections.singletonList("127.0.0.1"), certInfo.ipAddresses); + assertEquals(Collections.singletonList("localhost"), certInfo.dnsNames); + assertEquals(Collections.emptyList(), certInfo.commonNames); + assertEquals("node1", certInfo.name.filename); + + certInfo = certInfosMap.get("node2"); + assertEquals(Collections.singletonList("::1"), certInfo.ipAddresses); + assertEquals(Collections.emptyList(), certInfo.dnsNames); + assertEquals(Collections.singletonList("node2.elasticsearch"), certInfo.commonNames); + assertEquals("node2", certInfo.name.filename); + + certInfo = certInfosMap.get("node3"); + assertEquals(Collections.emptyList(), certInfo.ipAddresses); + assertEquals(Collections.emptyList(), certInfo.dnsNames); + assertEquals(Collections.emptyList(), certInfo.commonNames); + assertEquals("node3", certInfo.name.filename); + + certInfo = certInfosMap.get("CN=different value"); + assertEquals(Collections.emptyList(), certInfo.ipAddresses); + assertEquals(Collections.singletonList("node4.mydomain.com"), certInfo.dnsNames); + assertEquals(Collections.emptyList(), certInfo.commonNames); + assertEquals("different file", certInfo.name.filename); + } + + public void testGeneratingCsr() throws Exception { + Path tempDir = initTempDir(); + Path outputFile = tempDir.resolve("out.zip"); + Path instanceFile = writeInstancesTo(tempDir.resolve("instances.yml")); + Collection certInfos = CertificateGenerateTool.parseFile(instanceFile); + assertEquals(4, certInfos.size()); + + assertFalse(Files.exists(outputFile)); + CertificateGenerateTool.generateAndWriteCsrs(outputFile, certInfos, randomFrom(1024, 2048)); + assertTrue(Files.exists(outputFile)); + + Set perms = Files.getPosixFilePermissions(outputFile); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_READ)); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_WRITE)); + assertEquals(perms.toString(), 2, perms.size()); + + FileSystem fileSystem = FileSystems.newFileSystem(new URI("jar:" + outputFile.toUri()), Collections.emptyMap()); + Path zipRoot = fileSystem.getPath("/"); + + assertFalse(Files.exists(zipRoot.resolve("ca"))); + for (CertificateInformation certInfo : certInfos) { + String filename = certInfo.name.filename; + assertTrue(Files.exists(zipRoot.resolve(filename))); + final Path csr = zipRoot.resolve(filename + "/" + filename + ".csr"); + assertTrue(Files.exists(csr)); + assertTrue(Files.exists(zipRoot.resolve(filename + "/" + filename + ".key"))); + PKCS10CertificationRequest request = readCertificateRequest(csr); + assertEquals(certInfo.name.x500Principal.getName(), request.getSubject().toString()); + Attribute[] extensionsReq = request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest); + if (certInfo.ipAddresses.size() > 0 || certInfo.dnsNames.size() > 0) { + assertEquals(1, extensionsReq.length); + Extensions extensions = Extensions.getInstance(extensionsReq[0].getAttributeValues()[0]); + GeneralNames subjAltNames = GeneralNames.fromExtensions(extensions, Extension.subjectAlternativeName); + assertSubjAltNames(subjAltNames, certInfo); + } else { + assertEquals(0, extensionsReq.length); + } + } + } + + public void testGeneratingSignedCertificates() throws Exception { + Path tempDir = initTempDir(); + Path outputFile = tempDir.resolve("out.zip"); + Path instanceFile = writeInstancesTo(tempDir.resolve("instances.yml")); + Collection certInfos = CertificateGenerateTool.parseFile(instanceFile); + assertEquals(4, certInfos.size()); + + final int keysize = randomFrom(1024, 2048); + final int days = randomIntBetween(1, 1024); + KeyPair keyPair = CertUtils.generateKeyPair(keysize); + X509Certificate caCert = CertUtils.generateCACertificate(new X500Principal("CN=test ca"), keyPair, days); + + final boolean generatedCa = randomBoolean(); + final char[] keyPassword = randomBoolean() ? SecuritySettingsSourceField.TEST_PASSWORD.toCharArray() : null; + final char[] pkcs12Password = randomBoolean() ? randomAlphaOfLengthBetween(1, 12).toCharArray() : null; + assertFalse(Files.exists(outputFile)); + CAInfo caInfo = new CAInfo(caCert, keyPair.getPrivate(), generatedCa, keyPassword); + CertificateGenerateTool.generateAndWriteSignedCertificates(outputFile, certInfos, caInfo, keysize, days, pkcs12Password); + assertTrue(Files.exists(outputFile)); + + Set perms = Files.getPosixFilePermissions(outputFile); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_READ)); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_WRITE)); + assertEquals(perms.toString(), 2, perms.size()); + + FileSystem fileSystem = FileSystems.newFileSystem(new URI("jar:" + outputFile.toUri()), Collections.emptyMap()); + Path zipRoot = fileSystem.getPath("/"); + + if (generatedCa) { + assertTrue(Files.exists(zipRoot.resolve("ca"))); + assertTrue(Files.exists(zipRoot.resolve("ca").resolve("ca.crt"))); + assertTrue(Files.exists(zipRoot.resolve("ca").resolve("ca.key"))); + // check the CA cert + try (Reader reader = Files.newBufferedReader(zipRoot.resolve("ca").resolve("ca.crt"))) { + X509Certificate parsedCaCert = readX509Certificate(reader); + assertThat(parsedCaCert.getSubjectX500Principal().getName(), containsString("test ca")); + assertEquals(caCert, parsedCaCert); + long daysBetween = ChronoUnit.DAYS.between(caCert.getNotBefore().toInstant(), caCert.getNotAfter().toInstant()); + assertEquals(days, (int) daysBetween); + } + + // check the CA key + if (keyPassword != null) { + try (Reader reader = Files.newBufferedReader(zipRoot.resolve("ca").resolve("ca.key"))) { + PEMParser pemParser = new PEMParser(reader); + Object parsed = pemParser.readObject(); + assertThat(parsed, instanceOf(PEMEncryptedKeyPair.class)); + char[] zeroChars = new char[keyPassword.length]; + Arrays.fill(zeroChars, (char) 0); + assertArrayEquals(zeroChars, keyPassword); + } + } + + try (Reader reader = Files.newBufferedReader(zipRoot.resolve("ca").resolve("ca.key"))) { + PrivateKey privateKey = CertUtils.readPrivateKey(reader, () -> keyPassword != null ? + SecuritySettingsSourceField.TEST_PASSWORD.toCharArray() : null); + assertEquals(caInfo.privateKey, privateKey); + } + } else { + assertFalse(Files.exists(zipRoot.resolve("ca"))); + } + + for (CertificateInformation certInfo : certInfos) { + String filename = certInfo.name.filename; + assertTrue(Files.exists(zipRoot.resolve(filename))); + final Path cert = zipRoot.resolve(filename + "/" + filename + ".crt"); + assertTrue(Files.exists(cert)); + assertTrue(Files.exists(zipRoot.resolve(filename + "/" + filename + ".key"))); + final Path p12 = zipRoot.resolve(filename + "/" + filename + ".p12"); + try (Reader reader = Files.newBufferedReader(cert)) { + X509Certificate certificate = readX509Certificate(reader); + assertEquals(certInfo.name.x500Principal.toString(), certificate.getSubjectX500Principal().getName()); + final int sanCount = certInfo.ipAddresses.size() + certInfo.dnsNames.size() + certInfo.commonNames.size(); + if (sanCount == 0) { + assertNull(certificate.getSubjectAlternativeNames()); + } else { + X509CertificateHolder x509CertHolder = new X509CertificateHolder(certificate.getEncoded()); + GeneralNames subjAltNames = + GeneralNames.fromExtensions(x509CertHolder.getExtensions(), Extension.subjectAlternativeName); + assertSubjAltNames(subjAltNames, certInfo); + } + if (pkcs12Password != null) { + assertThat(p12, pathExists(p12)); + try (InputStream in = Files.newInputStream(p12)) { + final KeyStore ks = KeyStore.getInstance("PKCS12"); + ks.load(in, pkcs12Password); + final Certificate p12Certificate = ks.getCertificate(certInfo.name.originalName); + assertThat("Certificate " + certInfo.name, p12Certificate, notNullValue()); + assertThat(p12Certificate, equalTo(certificate)); + final Key key = ks.getKey(certInfo.name.originalName, pkcs12Password); + assertThat(key, notNullValue()); + } + } else { + assertThat(p12, not(pathExists(p12))); + } + } + } + } + + public void testGetCAInfo() throws Exception { + Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + Path testNodeCertPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"); + Path testNodeKeyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem"); + final boolean passwordPrompt = randomBoolean(); + MockTerminal terminal = new MockTerminal(); + if (passwordPrompt) { + terminal.addSecretInput("testnode"); + } + + final int days = randomIntBetween(1, 1024); + CAInfo caInfo = CertificateGenerateTool.getCAInfo(terminal, "CN=foo", testNodeCertPath.toString(), testNodeKeyPath.toString(), + passwordPrompt ? null : "testnode".toCharArray(), passwordPrompt, env, randomFrom(1024, 2048), days); + assertTrue(terminal.getOutput().isEmpty()); + assertEquals(caInfo.caCert.getSubjectX500Principal().getName(), + "CN=Elasticsearch Test Node,OU=elasticsearch,O=org"); + assertThat(caInfo.privateKey.getAlgorithm(), containsString("RSA")); + assertEquals(2048, ((RSAKey) caInfo.privateKey).getModulus().bitLength()); + assertFalse(caInfo.generated); + long daysBetween = ChronoUnit.DAYS.between(caInfo.caCert.getNotBefore().toInstant(), caInfo.caCert.getNotAfter().toInstant()); + assertEquals(1460L, daysBetween); + + // test generation + final boolean passwordProtected = randomBoolean(); + final char[] password; + if (passwordPrompt && passwordProtected) { + password = null; + terminal.addSecretInput("testnode"); + } else { + password = "testnode".toCharArray(); + } + final int keysize = randomFrom(1024, 2048); + caInfo = CertificateGenerateTool.getCAInfo(terminal, "CN=foo bar", null, null, password, passwordProtected && passwordPrompt, env, + keysize, days); + assertTrue(terminal.getOutput().isEmpty()); + assertThat(caInfo.caCert, instanceOf(X509Certificate.class)); + assertEquals(caInfo.caCert.getSubjectX500Principal().getName(), "CN=foo bar"); + assertThat(caInfo.privateKey.getAlgorithm(), containsString("RSA")); + assertTrue(caInfo.generated); + assertEquals(keysize, ((RSAKey) caInfo.privateKey).getModulus().bitLength()); + daysBetween = ChronoUnit.DAYS.between(caInfo.caCert.getNotBefore().toInstant(), caInfo.caCert.getNotAfter().toInstant()); + assertEquals(days, (int) daysBetween); + } + + public void testNameValues() throws Exception { + // good name + Name name = Name.fromUserProvidedName("my instance", "my instance"); + assertEquals("my instance", name.originalName); + assertNull(name.error); + assertEquals("CN=my instance", name.x500Principal.getName()); + assertEquals("my instance", name.filename); + + // too long + String userProvidedName = randomAlphaOfLength(CertificateGenerateTool.MAX_FILENAME_LENGTH + 1); + name = Name.fromUserProvidedName(userProvidedName, userProvidedName); + assertEquals(userProvidedName, name.originalName); + assertThat(name.error, containsString("valid filename")); + + // too short + name = Name.fromUserProvidedName("", ""); + assertEquals("", name.originalName); + assertThat(name.error, containsString("valid filename")); + assertEquals("CN=", name.x500Principal.getName()); + assertNull(name.filename); + + // invalid characters only + userProvidedName = "<>|<>*|?\"\\"; + name = Name.fromUserProvidedName(userProvidedName, userProvidedName); + assertEquals(userProvidedName, name.originalName); + assertThat(name.error, containsString("valid DN")); + assertNull(name.x500Principal); + assertNull(name.filename); + + // invalid for file but DN ok + userProvidedName = "*"; + name = Name.fromUserProvidedName(userProvidedName, userProvidedName); + assertEquals(userProvidedName, name.originalName); + assertThat(name.error, containsString("valid filename")); + assertEquals("CN=" + userProvidedName, name.x500Principal.getName()); + assertNull(name.filename); + + // invalid with valid chars for filename + userProvidedName = "*.mydomain.com"; + name = Name.fromUserProvidedName(userProvidedName, userProvidedName); + assertEquals(userProvidedName, name.originalName); + assertThat(name.error, containsString("valid filename")); + assertEquals("CN=" + userProvidedName, name.x500Principal.getName()); + + // valid but could create hidden file/dir so it is not allowed + userProvidedName = ".mydomain.com"; + name = Name.fromUserProvidedName(userProvidedName, userProvidedName); + assertEquals(userProvidedName, name.originalName); + assertThat(name.error, containsString("valid filename")); + assertEquals("CN=" + userProvidedName, name.x500Principal.getName()); + } + + private PKCS10CertificationRequest readCertificateRequest(Path path) throws Exception { + try (Reader reader = Files.newBufferedReader(path); + PEMParser pemParser = new PEMParser(reader)) { + Object object = pemParser.readObject(); + assertThat(object, instanceOf(PKCS10CertificationRequest.class)); + return (PKCS10CertificationRequest) object; + } + } + + private X509Certificate readX509Certificate(Reader reader) throws Exception { + List list = new ArrayList<>(1); + CertUtils.readCertificates(reader, list, CertificateFactory.getInstance("X.509")); + assertEquals(1, list.size()); + assertThat(list.get(0), instanceOf(X509Certificate.class)); + return (X509Certificate) list.get(0); + } + + private void assertSubjAltNames(GeneralNames subjAltNames, CertificateInformation certInfo) throws Exception { + final int expectedCount = certInfo.ipAddresses.size() + certInfo.dnsNames.size() + certInfo.commonNames.size(); + assertEquals(expectedCount, subjAltNames.getNames().length); + Collections.sort(certInfo.dnsNames); + Collections.sort(certInfo.ipAddresses); + for (GeneralName generalName : subjAltNames.getNames()) { + if (generalName.getTagNo() == GeneralName.dNSName) { + String dns = ((ASN1String) generalName.getName()).getString(); + assertTrue(certInfo.dnsNames.stream().anyMatch(dns::equals)); + } else if (generalName.getTagNo() == GeneralName.iPAddress) { + byte[] ipBytes = DEROctetString.getInstance(generalName.getName()).getOctets(); + String ip = NetworkAddress.format(InetAddress.getByAddress(ipBytes)); + assertTrue(certInfo.ipAddresses.stream().anyMatch(ip::equals)); + } else if (generalName.getTagNo() == GeneralName.otherName) { + ASN1Sequence seq = ASN1Sequence.getInstance(generalName.getName()); + assertThat(seq.size(), equalTo(2)); + assertThat(seq.getObjectAt(0), instanceOf(ASN1ObjectIdentifier.class)); + assertThat(seq.getObjectAt(0).toString(), equalTo(CertUtils.CN_OID)); + assertThat(seq.getObjectAt(1), instanceOf(DERTaggedObject.class)); + DERTaggedObject taggedName = (DERTaggedObject) seq.getObjectAt(1); + assertThat(taggedName.getTagNo(), equalTo(0)); + assertThat(taggedName.getObject(), instanceOf(ASN1String.class)); + assertThat(taggedName.getObject().toString(), Matchers.isIn(certInfo.commonNames)); + } else { + fail("unknown general name with tag " + generalName.getTagNo()); + } + } + } + + /** + * Gets a random name that is valid for certificate generation. There are some cases where the random value could match one of the + * reserved names like ca, so this method allows us to avoid these issues. + */ + private String getValidRandomInstanceName() { + String name; + boolean valid; + do { + name = randomAlphaOfLengthBetween(1, 32); + valid = Name.fromUserProvidedName(name, name).error == null; + } while (valid == false); + return name; + } + + /** + * Writes the description of instances to a given {@link Path} + */ + private Path writeInstancesTo(Path path) throws IOException { + Iterable instances = Arrays.asList( + "instances:", + " - name: \"node1\"", + " ip:", + " - \"127.0.0.1\"", + " dns: \"localhost\"", + " - name: \"node2\"", + " filename: \"node2\"", + " ip: \"::1\"", + " cn:", + " - \"node2.elasticsearch\"", + " - name: \"node3\"", + " filename: \"node3\"", + " - name: \"CN=different value\"", + " filename: \"different file\"", + " dns:", + " - \"node4.mydomain.com\""); + + return Files.write(path, instances, StandardCharsets.UTF_8); + } + + @SuppressForbidden(reason = "resolve paths against CWD for a CLI tool") + private static Path resolvePath(String path) { + return PathUtils.get(path).toAbsolutePath(); + } +} diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/core/ssl/CertificateToolTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/core/ssl/CertificateToolTests.java new file mode 100644 index 0000000000000..cad254207a65c --- /dev/null +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/core/ssl/CertificateToolTests.java @@ -0,0 +1,928 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ssl; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.bouncycastle.asn1.ASN1ObjectIdentifier; +import org.bouncycastle.asn1.ASN1Sequence; +import org.bouncycastle.asn1.ASN1String; +import org.bouncycastle.asn1.ASN1TaggedObject; +import org.bouncycastle.asn1.DEROctetString; +import org.bouncycastle.asn1.pkcs.Attribute; +import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; +import org.bouncycastle.asn1.x509.Extension; +import org.bouncycastle.asn1.x509.Extensions; +import org.bouncycastle.asn1.x509.GeneralName; +import org.bouncycastle.asn1.x509.GeneralNames; +import org.bouncycastle.cert.X509CertificateHolder; +import org.bouncycastle.openssl.PEMEncryptedKeyPair; +import org.bouncycastle.openssl.PEMParser; +import org.bouncycastle.pkcs.PKCS10CertificationRequest; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.TestMatchers; +import org.elasticsearch.xpack.core.ssl.CertificateTool.CAInfo; +import org.elasticsearch.xpack.core.ssl.CertificateTool.CertificateAuthorityCommand; +import org.elasticsearch.xpack.core.ssl.CertificateTool.CertificateCommand; +import org.elasticsearch.xpack.core.ssl.CertificateTool.CertificateInformation; +import org.elasticsearch.xpack.core.ssl.CertificateTool.GenerateCertificateCommand; +import org.elasticsearch.xpack.core.ssl.CertificateTool.Name; +import org.hamcrest.Matchers; +import org.junit.After; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509ExtendedKeyManager; +import javax.net.ssl.X509ExtendedTrustManager; +import javax.security.auth.x500.X500Principal; +import java.io.IOException; +import java.io.Reader; +import java.net.InetAddress; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.PosixFilePermission; +import java.security.Key; +import java.security.KeyPair; +import java.security.KeyStore; +import java.security.Principal; +import java.security.PrivateKey; +import java.security.cert.Certificate; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.security.interfaces.RSAKey; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; + +/** + * Unit tests for the tool used to simplify SSL certificate generation + */ +public class CertificateToolTests extends ESTestCase { + + private FileSystem jimfs; + + private Path initTempDir() throws Exception { + Configuration conf = Configuration.unix().toBuilder().setAttributeViews("posix").build(); + jimfs = Jimfs.newFileSystem(conf); + Path tempDir = jimfs.getPath("temp"); + IOUtils.rm(tempDir); + Files.createDirectories(tempDir); + return tempDir; + } + + @After + public void tearDown() throws Exception { + IOUtils.close(jimfs); + super.tearDown(); + } + + public void testOutputDirectory() throws Exception { + Path outputDir = createTempDir(); + Path outputFile = outputDir.resolve("certs.zip"); + MockTerminal terminal = new MockTerminal(); + + // test with a user provided file + Path resolvedOutputFile = CertificateCommand.resolveOutputPath(terminal, outputFile.toString(), "something"); + assertEquals(outputFile, resolvedOutputFile); + assertTrue(terminal.getOutput().isEmpty()); + + // test without a user provided file, with user input (prompted) + Path userPromptedOutputFile = outputDir.resolve("csr"); + assertFalse(Files.exists(userPromptedOutputFile)); + terminal.addTextInput(userPromptedOutputFile.toString()); + resolvedOutputFile = CertificateCommand.resolveOutputPath(terminal, (String) null, "default.zip"); + assertEquals(userPromptedOutputFile, resolvedOutputFile); + assertTrue(terminal.getOutput().isEmpty()); + + // test with empty user input + String defaultFilename = randomAlphaOfLengthBetween(1, 10); + Path expectedDefaultPath = resolvePath(defaultFilename); + terminal.addTextInput(""); + resolvedOutputFile = CertificateCommand.resolveOutputPath(terminal, (String) null, defaultFilename); + assertEquals(expectedDefaultPath, resolvedOutputFile); + assertTrue(terminal.getOutput().isEmpty()); + } + + public void testPromptingForInstanceInformation() throws Exception { + final int numberOfInstances = scaledRandomIntBetween(1, 12); + Map> instanceInput = new HashMap<>(numberOfInstances); + for (int i = 0; i < numberOfInstances; i++) { + final String name; + while (true) { + String randomName = getValidRandomInstanceName(); + if (instanceInput.containsKey(randomName) == false) { + name = randomName; + break; + } + } + Map instanceInfo = new HashMap<>(); + instanceInput.put(name, instanceInfo); + instanceInfo.put("ip", randomFrom("127.0.0.1", "::1", "192.168.1.1,::1", "")); + instanceInfo.put("dns", randomFrom("localhost", "localhost.localdomain", "localhost,myhost", "")); + logger.info("instance [{}] name [{}] [{}]", i, name, instanceInfo); + } + + int count = 0; + MockTerminal terminal = new MockTerminal(); + for (Entry> entry : instanceInput.entrySet()) { + terminal.addTextInput(entry.getKey()); + terminal.addTextInput(""); + terminal.addTextInput(entry.getValue().get("ip")); + terminal.addTextInput(entry.getValue().get("dns")); + count++; + if (count == numberOfInstances) { + terminal.addTextInput("n"); + } else { + terminal.addTextInput("y"); + } + } + + Collection certInfos = CertificateCommand.readMultipleCertificateInformation(terminal); + logger.info("certificate tool output:\n{}", terminal.getOutput()); + assertEquals(numberOfInstances, certInfos.size()); + for (CertificateInformation certInfo : certInfos) { + String name = certInfo.name.originalName; + Map instanceInfo = instanceInput.get(name); + assertNotNull("did not find map for " + name, instanceInfo); + List expectedIps = Arrays.asList(Strings.commaDelimitedListToStringArray(instanceInfo.get("ip"))); + List expectedDns = Arrays.asList(Strings.commaDelimitedListToStringArray(instanceInfo.get("dns"))); + assertEquals(expectedIps, certInfo.ipAddresses); + assertEquals(expectedDns, certInfo.dnsNames); + instanceInput.remove(name); + } + assertEquals(0, instanceInput.size()); + final String output = terminal.getOutput(); + assertTrue("Output: " + output, output.isEmpty()); + } + + public void testParsingFile() throws Exception { + Path tempDir = initTempDir(); + Path instanceFile = writeInstancesTo(tempDir.resolve("instances.yml")); + Collection certInfos = CertificateTool.parseFile(instanceFile); + assertEquals(4, certInfos.size()); + + Map certInfosMap = + certInfos.stream().collect(Collectors.toMap((c) -> c.name.originalName, Function.identity())); + CertificateInformation certInfo = certInfosMap.get("node1"); + assertEquals(Collections.singletonList("127.0.0.1"), certInfo.ipAddresses); + assertEquals(Collections.singletonList("localhost"), certInfo.dnsNames); + assertEquals(Collections.emptyList(), certInfo.commonNames); + assertEquals("node1", certInfo.name.filename); + + certInfo = certInfosMap.get("node2"); + assertEquals(Collections.singletonList("::1"), certInfo.ipAddresses); + assertEquals(Collections.emptyList(), certInfo.dnsNames); + assertEquals(Collections.singletonList("node2.elasticsearch"), certInfo.commonNames); + assertEquals("node2", certInfo.name.filename); + + certInfo = certInfosMap.get("node3"); + assertEquals(Collections.emptyList(), certInfo.ipAddresses); + assertEquals(Collections.emptyList(), certInfo.dnsNames); + assertEquals(Collections.emptyList(), certInfo.commonNames); + assertEquals("node3", certInfo.name.filename); + + certInfo = certInfosMap.get("CN=different value"); + assertEquals(Collections.emptyList(), certInfo.ipAddresses); + assertEquals(Collections.singletonList("node4.mydomain.com"), certInfo.dnsNames); + assertEquals(Collections.emptyList(), certInfo.commonNames); + assertEquals("different file", certInfo.name.filename); + } + + public void testParsingFileWithInvalidDetails() throws Exception { + Path tempDir = initTempDir(); + Path instanceFile = writeInvalidInstanceInformation(tempDir.resolve("instances-invalid.yml")); + final MockTerminal terminal = new MockTerminal(); + final UserException exception = expectThrows(UserException.class, + () -> CertificateTool.parseAndValidateFile(terminal, instanceFile)); + assertThat(exception.getMessage(), containsString("invalid configuration")); + assertThat(exception.getMessage(), containsString(instanceFile.toString())); + assertThat(terminal.getOutput(), containsString("THIS=not a,valid DN")); + assertThat(terminal.getOutput(), containsString("could not be converted to a valid DN")); + } + + public void testGeneratingCsr() throws Exception { + Path tempDir = initTempDir(); + Path outputFile = tempDir.resolve("out.zip"); + Path instanceFile = writeInstancesTo(tempDir.resolve("instances.yml")); + Collection certInfos = CertificateTool.parseFile(instanceFile); + assertEquals(4, certInfos.size()); + + assertFalse(Files.exists(outputFile)); + int keySize = randomFrom(1024, 2048); + + new CertificateTool.SigningRequestCommand().generateAndWriteCsrs(outputFile, keySize, certInfos); + assertTrue(Files.exists(outputFile)); + + Set perms = Files.getPosixFilePermissions(outputFile); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_READ)); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_WRITE)); + assertEquals(perms.toString(), 2, perms.size()); + + FileSystem fileSystem = FileSystems.newFileSystem(new URI("jar:" + outputFile.toUri()), Collections.emptyMap()); + Path zipRoot = fileSystem.getPath("/"); + + assertFalse(Files.exists(zipRoot.resolve("ca"))); + for (CertificateInformation certInfo : certInfos) { + String filename = certInfo.name.filename; + assertTrue(Files.exists(zipRoot.resolve(filename))); + final Path csr = zipRoot.resolve(filename + "/" + filename + ".csr"); + assertTrue(Files.exists(csr)); + assertTrue(Files.exists(zipRoot.resolve(filename + "/" + filename + ".key"))); + PKCS10CertificationRequest request = readCertificateRequest(csr); + assertEquals(certInfo.name.x500Principal.getName(), request.getSubject().toString()); + Attribute[] extensionsReq = request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest); + if (certInfo.ipAddresses.size() > 0 || certInfo.dnsNames.size() > 0) { + assertEquals(1, extensionsReq.length); + Extensions extensions = Extensions.getInstance(extensionsReq[0].getAttributeValues()[0]); + GeneralNames subjAltNames = GeneralNames.fromExtensions(extensions, Extension.subjectAlternativeName); + assertSubjAltNames(subjAltNames, certInfo); + } else { + assertEquals(0, extensionsReq.length); + } + } + } + + public void testGeneratingSignedPemCertificates() throws Exception { + Path tempDir = initTempDir(); + Path outputFile = tempDir.resolve("out.zip"); + Path instanceFile = writeInstancesTo(tempDir.resolve("instances.yml")); + Collection certInfos = CertificateTool.parseFile(instanceFile); + assertEquals(4, certInfos.size()); + + int keySize = randomFrom(1024, 2048); + int days = randomIntBetween(1, 1024); + + KeyPair keyPair = CertUtils.generateKeyPair(keySize); + X509Certificate caCert = CertUtils.generateCACertificate(new X500Principal("CN=test ca"), keyPair, days); + + final boolean generatedCa = randomBoolean(); + final boolean keepCaKey = generatedCa && randomBoolean(); + final String keyPassword = randomBoolean() ? SecuritySettingsSourceField.TEST_PASSWORD : null; + + assertFalse(Files.exists(outputFile)); + CAInfo caInfo = new CAInfo(caCert, keyPair.getPrivate(), generatedCa, keyPassword == null ? null : keyPassword.toCharArray()); + final GenerateCertificateCommand command = new GenerateCertificateCommand(); + List args = CollectionUtils.arrayAsArrayList("-keysize", String.valueOf(keySize), "-days", String.valueOf(days), "-pem"); + if (keyPassword != null) { + args.add("-pass"); + args.add(keyPassword); + } + if (keepCaKey) { + args.add("-keep-ca-key"); + } + final OptionSet options = command.getParser().parse(Strings.toStringArray(args)); + + command.generateAndWriteSignedCertificates(outputFile, true, options, certInfos, caInfo, null); + assertTrue(Files.exists(outputFile)); + + Set perms = Files.getPosixFilePermissions(outputFile); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_READ)); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_WRITE)); + assertEquals(perms.toString(), 2, perms.size()); + + FileSystem fileSystem = FileSystems.newFileSystem(new URI("jar:" + outputFile.toUri()), Collections.emptyMap()); + Path zipRoot = fileSystem.getPath("/"); + + if (generatedCa) { + assertTrue(Files.exists(zipRoot.resolve("ca"))); + assertTrue(Files.exists(zipRoot.resolve("ca").resolve("ca.crt"))); + // check the CA cert + try (Reader reader = Files.newBufferedReader(zipRoot.resolve("ca").resolve("ca.crt"))) { + X509Certificate parsedCaCert = readX509Certificate(reader); + assertThat(parsedCaCert.getSubjectX500Principal().getName(), containsString("test ca")); + assertEquals(caCert, parsedCaCert); + long daysBetween = getDurationInDays(caCert); + assertEquals(days, (int) daysBetween); + } + + if (keepCaKey) { + assertTrue(Files.exists(zipRoot.resolve("ca").resolve("ca.key"))); + // check the CA key + if (keyPassword != null) { + try (Reader reader = Files.newBufferedReader(zipRoot.resolve("ca").resolve("ca.key"))) { + PEMParser pemParser = new PEMParser(reader); + Object parsed = pemParser.readObject(); + assertThat(parsed, instanceOf(PEMEncryptedKeyPair.class)); + char[] zeroChars = new char[caInfo.password.length]; + Arrays.fill(zeroChars, (char) 0); + assertArrayEquals(zeroChars, caInfo.password); + } + } + + try (Reader reader = Files.newBufferedReader(zipRoot.resolve("ca").resolve("ca.key"))) { + PrivateKey privateKey = CertUtils.readPrivateKey(reader, () -> keyPassword != null ? keyPassword.toCharArray() : null); + assertEquals(caInfo.certAndKey.key, privateKey); + } + } + } else { + assertFalse(Files.exists(zipRoot.resolve("ca"))); + } + + for (CertificateInformation certInfo : certInfos) { + String filename = certInfo.name.filename; + assertTrue(Files.exists(zipRoot.resolve(filename))); + final Path cert = zipRoot.resolve(filename + "/" + filename + ".crt"); + assertTrue(Files.exists(cert)); + assertTrue(Files.exists(zipRoot.resolve(filename + "/" + filename + ".key"))); + final Path p12 = zipRoot.resolve(filename + "/" + filename + ".p12"); + try (Reader reader = Files.newBufferedReader(cert)) { + X509Certificate certificate = readX509Certificate(reader); + assertEquals(certInfo.name.x500Principal.toString(), certificate.getSubjectX500Principal().getName()); + final int sanCount = certInfo.ipAddresses.size() + certInfo.dnsNames.size() + certInfo.commonNames.size(); + if (sanCount == 0) { + assertNull(certificate.getSubjectAlternativeNames()); + } else { + X509CertificateHolder x509CertHolder = new X509CertificateHolder(certificate.getEncoded()); + GeneralNames subjAltNames = + GeneralNames.fromExtensions(x509CertHolder.getExtensions(), Extension.subjectAlternativeName); + assertSubjAltNames(subjAltNames, certInfo); + } + assertThat(p12, Matchers.not(TestMatchers.pathExists(p12))); + } + } + } + + public void testGetCAInfo() throws Exception { + Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + Path testNodeCertPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"); + Path testNodeKeyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem"); + final boolean passwordPrompt = randomBoolean(); + MockTerminal terminal = new MockTerminal(); + if (passwordPrompt) { + terminal.addSecretInput("testnode"); + } + + final int keySize = randomFrom(1024, 2048); + final int days = randomIntBetween(1, 1024); + String caPassword = passwordPrompt ? null : "testnode"; + + List args = CollectionUtils.arrayAsArrayList( + "-keysize", String.valueOf(keySize), + "-days", String.valueOf(days), + "-pem", + "-ca-cert", testNodeCertPath.toString(), + "-ca-key", testNodeKeyPath.toString()); + + args.add("-ca-pass"); + if (caPassword != null) { + args.add(caPassword); + } + + final GenerateCertificateCommand command = new GenerateCertificateCommand(); + + OptionSet options = command.getParser().parse(Strings.toStringArray(args)); + CAInfo caInfo = command.getCAInfo(terminal, options, env); + + assertTrue(terminal.getOutput().isEmpty()); + CertificateTool.CertificateAndKey caCK = caInfo.certAndKey; + assertEquals(caCK.cert.getSubjectX500Principal().getName(), "CN=Elasticsearch Test Node,OU=elasticsearch,O=org"); + assertThat(caCK.key.getAlgorithm(), containsString("RSA")); + assertEquals(2048, ((RSAKey) caCK.key).getModulus().bitLength()); + assertFalse(caInfo.generated); + long daysBetween = getDurationInDays(caCK.cert); + assertEquals(1460L, daysBetween); + + // test generation + args = CollectionUtils.arrayAsArrayList( + "-keysize", String.valueOf(keySize), + "-days", String.valueOf(days), + "-pem", + "-ca-dn", "CN=foo bar"); + + final boolean passwordProtected = randomBoolean(); + if (passwordProtected) { + args.add("-ca-pass"); + if (passwordPrompt) { + terminal.addSecretInput("testnode"); + } else { + args.add(caPassword); + } + } + + options = command.getParser().parse(Strings.toStringArray(args)); + caInfo = command.getCAInfo(terminal, options, env); + caCK = caInfo.certAndKey; + + assertTrue(terminal.getOutput().isEmpty()); + assertThat(caCK.cert, instanceOf(X509Certificate.class)); + assertEquals(caCK.cert.getSubjectX500Principal().getName(), "CN=foo bar"); + assertThat(caCK.key.getAlgorithm(), containsString("RSA")); + assertTrue(caInfo.generated); + assertEquals(keySize, getKeySize(caCK.key)); + assertEquals(days, getDurationInDays(caCK.cert)); + } + + public void testNameValues() throws Exception { + // good name + Name name = Name.fromUserProvidedName("my instance", "my instance"); + assertEquals("my instance", name.originalName); + assertNull(name.error); + assertEquals("CN=my instance", name.x500Principal.getName()); + assertEquals("my instance", name.filename); + + // null + name = Name.fromUserProvidedName(null, ""); + assertEquals("", name.originalName); + assertThat(name.error, containsString("null")); + assertNull(name.x500Principal); + assertNull(name.filename); + + // too long + String userProvidedName = randomAlphaOfLength(CertificateTool.MAX_FILENAME_LENGTH + 1); + name = Name.fromUserProvidedName(userProvidedName, userProvidedName); + assertEquals(userProvidedName, name.originalName); + assertThat(name.error, containsString("valid filename")); + + // too short + name = Name.fromUserProvidedName("", ""); + assertEquals("", name.originalName); + assertThat(name.error, containsString("valid filename")); + assertEquals("CN=", String.valueOf(name.x500Principal)); + assertNull(name.filename); + + // invalid characters only + userProvidedName = "<>|<>*|?\"\\"; + name = Name.fromUserProvidedName(userProvidedName, userProvidedName); + assertEquals(userProvidedName, name.originalName); + assertThat(name.error, containsString("valid DN")); + assertNull(name.x500Principal); + assertNull(name.filename); + + // invalid for file but DN ok + userProvidedName = "*"; + name = Name.fromUserProvidedName(userProvidedName, userProvidedName); + assertEquals(userProvidedName, name.originalName); + assertThat(name.error, containsString("valid filename")); + assertEquals("CN=" + userProvidedName, name.x500Principal.getName()); + assertNull(name.filename); + + // invalid with valid chars for filename + userProvidedName = "*.mydomain.com"; + name = Name.fromUserProvidedName(userProvidedName, userProvidedName); + assertEquals(userProvidedName, name.originalName); + assertThat(name.error, containsString("valid filename")); + assertEquals("CN=" + userProvidedName, name.x500Principal.getName()); + + // valid but could create hidden file/dir so it is not allowed + userProvidedName = ".mydomain.com"; + name = Name.fromUserProvidedName(userProvidedName, userProvidedName); + assertEquals(userProvidedName, name.originalName); + assertThat(name.error, containsString("valid filename")); + assertEquals("CN=" + userProvidedName, name.x500Principal.getName()); + } + + /** + * A multi-stage test that: + * - Create a new CA + * - Uses that CA to create 2 node certificates + * - Creates a 3rd node certificate using an auto-generated CA + * - Checks that the first 2 node certificates trust one another + * - Checks that the 3rd node certificate is _not_ trusted + * - Checks that all 3 certificates have the right values based on the command line options provided during generation + */ + public void testCreateCaAndMultipleInstances() throws Exception { + final Path tempDir = initTempDir(); + + final Terminal terminal = new MockTerminal(); + Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", tempDir).build()); + + final Path caFile = tempDir.resolve("ca.p12"); + final Path node1File = tempDir.resolve("node1.p12").toAbsolutePath(); + final Path node2File = tempDir.resolve("node2.p12").toAbsolutePath(); + final Path node3File = tempDir.resolve("node3.p12").toAbsolutePath(); + + final int caKeySize = randomIntBetween(4, 8) * 512; + final int node1KeySize = randomIntBetween(2, 6) * 512; + final int node2KeySize = randomIntBetween(2, 6) * 512; + final int node3KeySize = randomIntBetween(1, 4) * 512; + + final int days = randomIntBetween(7, 1500); + + final String caPassword = randomAlphaOfLengthBetween(4, 16); + final String node1Password = randomAlphaOfLengthBetween(4, 16); + final String node2Password = randomAlphaOfLengthBetween(4, 16); + final String node3Password = randomAlphaOfLengthBetween(4, 16); + + final String node1Ip = "200.181." + randomIntBetween(1, 250) + "." + randomIntBetween(1, 250); + final String node2Ip = "200.182." + randomIntBetween(1, 250) + "." + randomIntBetween(1, 250); + final String node3Ip = "200.183." + randomIntBetween(1, 250) + "." + randomIntBetween(1, 250); + + final CertificateAuthorityCommand caCommand = new CertificateAuthorityCommand() { + @Override + Path resolveOutputPath(Terminal terminal, OptionSet options, String defaultFilename) throws IOException { + // Needed to work within the security manager + return caFile; + } + }; + final OptionSet caOptions = caCommand.getParser().parse( + "-ca-dn", "CN=My ElasticSearch Cluster", + "-pass", caPassword, + "-out", caFile.toString(), + "-keysize", String.valueOf(caKeySize), + "-days", String.valueOf(days) + ); + caCommand.execute(terminal, caOptions, env); + + assertThat(caFile, TestMatchers.pathExists(caFile)); + + final GenerateCertificateCommand gen1Command = new PathAwareGenerateCertificateCommand(caFile, node1File); + final OptionSet gen1Options = gen1Command.getParser().parse( + "-ca", "", + "-ca-pass", caPassword, + "-pass", node1Password, + "-out", "", + "-keysize", String.valueOf(node1KeySize), + "-days", String.valueOf(days), + "-dns", "node01.cluster1.es.internal.corp.net", + "-ip", node1Ip, + "-name", "node01"); + gen1Command.execute(terminal, gen1Options, env); + + assertThat(node1File, TestMatchers.pathExists(node1File)); + + final GenerateCertificateCommand gen2Command = new PathAwareGenerateCertificateCommand(caFile, node2File); + final OptionSet gen2Options = gen2Command.getParser().parse( + "-ca", "", + "-ca-pass", caPassword, + "-pass", node2Password, + "-out", "", + "-keysize", String.valueOf(node2KeySize), + "-days", String.valueOf(days), + "-dns", "node02.cluster1.es.internal.corp.net", + "-ip", node2Ip, + "-name", "node02"); + gen2Command.execute(terminal, gen2Options, env); + + assertThat(node2File, TestMatchers.pathExists(node2File)); + + // Node 3 uses an auto generated CA, and therefore should not be trusted by the other nodes. + final GenerateCertificateCommand gen3Command = new PathAwareGenerateCertificateCommand(null, node3File); + final OptionSet gen3Options = gen3Command.getParser().parse( + "-ca-dn", "CN=My ElasticSearch Cluster 2", + "-pass", node3Password, + "-out", "", + "-keysize", String.valueOf(node3KeySize), + "-days", String.valueOf(days), + "-dns", "node03.cluster2.es.internal.corp.net", + "-ip", node3Ip); + gen3Command.execute(terminal, gen3Options, env); + + assertThat(node3File, TestMatchers.pathExists(node3File)); + + final KeyStore node1KeyStore = CertUtils.readKeyStore(node1File, "PKCS12", node1Password.toCharArray()); + final KeyStore node2KeyStore = CertUtils.readKeyStore(node2File, "PKCS12", node2Password.toCharArray()); + final KeyStore node3KeyStore = CertUtils.readKeyStore(node3File, "PKCS12", node3Password.toCharArray()); + + checkTrust(node1KeyStore, node1Password.toCharArray(), node1KeyStore, true); + checkTrust(node1KeyStore, node1Password.toCharArray(), node2KeyStore, true); + checkTrust(node2KeyStore, node2Password.toCharArray(), node2KeyStore, true); + checkTrust(node2KeyStore, node2Password.toCharArray(), node1KeyStore, true); + checkTrust(node1KeyStore, node1Password.toCharArray(), node3KeyStore, false); + checkTrust(node3KeyStore, node3Password.toCharArray(), node2KeyStore, false); + checkTrust(node3KeyStore, node3Password.toCharArray(), node3KeyStore, true); + + final Certificate node1Cert = node1KeyStore.getCertificate("node01"); + assertThat(node1Cert, instanceOf(X509Certificate.class)); + assertSubjAltNames(node1Cert, node1Ip, "node01.cluster1.es.internal.corp.net"); + assertThat(getDurationInDays((X509Certificate) node1Cert), equalTo(days)); + final Key node1Key = node1KeyStore.getKey("node01", node1Password.toCharArray()); + assertThat(getKeySize(node1Key), equalTo(node1KeySize)); + + final Certificate node2Cert = node2KeyStore.getCertificate("node02"); + assertThat(node2Cert, instanceOf(X509Certificate.class)); + assertSubjAltNames(node2Cert, node2Ip, "node02.cluster1.es.internal.corp.net"); + assertThat(getDurationInDays((X509Certificate) node2Cert), equalTo(days)); + final Key node2Key = node2KeyStore.getKey("node02", node2Password.toCharArray()); + assertThat(getKeySize(node2Key), equalTo(node2KeySize)); + + final Certificate node3Cert = node3KeyStore.getCertificate(CertificateTool.DEFAULT_CERT_NAME); + assertThat(node3Cert, instanceOf(X509Certificate.class)); + assertSubjAltNames(node3Cert, node3Ip, "node03.cluster2.es.internal.corp.net"); + assertThat(getDurationInDays((X509Certificate) node3Cert), equalTo(days)); + final Key node3Key = node3KeyStore.getKey(CertificateTool.DEFAULT_CERT_NAME, node3Password.toCharArray()); + assertThat(getKeySize(node3Key), equalTo(node3KeySize)); + } + + + /** + * A multi-stage test that: + * - Creates a ZIP of a PKCS12 cert, with an auto-generated CA + * - Uses the generate CA to create a PEM certificate + * - Checks that the PKCS12 certificate and the PEM certificate trust one another + */ + public void testTrustBetweenPEMandPKCS12() throws Exception { + final Path tempDir = initTempDir(); + + final MockTerminal terminal = new MockTerminal(); + Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", tempDir).build()); + + final Path pkcs12Zip = tempDir.resolve("p12.zip"); + final Path pemZip = tempDir.resolve("pem.zip"); + + final int keySize = randomIntBetween(4, 8) * 512; + final int days = randomIntBetween(500, 1500); + + final String caPassword = randomAlphaOfLengthBetween(4, 16); + final String node1Password = randomAlphaOfLengthBetween(4, 16); + + final GenerateCertificateCommand gen1Command = new PathAwareGenerateCertificateCommand(null, pkcs12Zip); + final OptionSet gen1Options = gen1Command.getParser().parse( + "-keep-ca-key", + "-out", "", + "-keysize", String.valueOf(keySize), + "-days", String.valueOf(days), + "-dns", "node01.cluster1.es.internal.corp.net", + "-name", "node01" + ); + + terminal.addSecretInput(caPassword); + terminal.addSecretInput(node1Password); + gen1Command.execute(terminal, gen1Options, env); + + assertThat(pkcs12Zip, TestMatchers.pathExists(pkcs12Zip)); + + FileSystem zip1FS = FileSystems.newFileSystem(new URI("jar:" + pkcs12Zip.toUri()), Collections.emptyMap()); + Path zip1Root = zip1FS.getPath("/"); + + final Path caP12 = zip1Root.resolve("ca/ca.p12"); + assertThat(caP12, TestMatchers.pathExists(caP12)); + + final Path node1P12 = zip1Root.resolve("node01/node01.p12"); + assertThat(node1P12, TestMatchers.pathExists(node1P12)); + + final GenerateCertificateCommand gen2Command = new PathAwareGenerateCertificateCommand(caP12, pemZip); + final OptionSet gen2Options = gen2Command.getParser().parse( + "-ca", "", + "-out", "", + "-keysize", String.valueOf(keySize), + "-days", String.valueOf(days), + "-dns", "node02.cluster1.es.internal.corp.net", + "-name", "node02", + "-pem" + ); + + terminal.addSecretInput(caPassword); + gen2Command.execute(terminal, gen2Options, env); + + assertThat(pemZip, TestMatchers.pathExists(pemZip)); + + FileSystem zip2FS = FileSystems.newFileSystem(new URI("jar:" + pemZip.toUri()), Collections.emptyMap()); + Path zip2Root = zip2FS.getPath("/"); + + final Path ca2 = zip2Root.resolve("ca/ca.p12"); + assertThat(ca2, Matchers.not(TestMatchers.pathExists(ca2))); + + final Path node2Cert = zip2Root.resolve("node02/node02.crt"); + assertThat(node2Cert, TestMatchers.pathExists(node2Cert)); + final Path node2Key = zip2Root.resolve("node02/node02.key"); + assertThat(node2Key, TestMatchers.pathExists(node2Key)); + + final KeyStore node1KeyStore = CertUtils.readKeyStore(node1P12, "PKCS12", node1Password.toCharArray()); + final KeyStore node1TrustStore = node1KeyStore; + + final KeyStore node2KeyStore = CertUtils.getKeyStoreFromPEM(node2Cert, node2Key, new char[0]); + final KeyStore node2TrustStore = CertUtils.readKeyStore(caP12, "PKCS12", caPassword.toCharArray()); + + checkTrust(node1KeyStore, node1Password.toCharArray(), node2TrustStore, true); + checkTrust(node2KeyStore, new char[0], node1TrustStore, true); + } + + public void testZipOutputFromCommandLineOptions() throws Exception { + final Path tempDir = initTempDir(); + + final MockTerminal terminal = new MockTerminal(); + Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", tempDir).build()); + + final Path zip = tempDir.resolve("pem.zip"); + + final AtomicBoolean isZip = new AtomicBoolean(false); + final GenerateCertificateCommand genCommand = new PathAwareGenerateCertificateCommand(null, zip) { + @Override + void generateAndWriteSignedCertificates(Path output, boolean writeZipFile, OptionSet options, + Collection certs, CAInfo caInfo, + Terminal terminal) throws Exception { + isZip.set(writeZipFile); + // do nothing, all we care about is the "zip" flag + } + + @Override + Collection getCertificateInformationList(Terminal terminal, OptionSet options) throws Exception { + // Regardless of the commandline options, just work with a single cert + return Collections.singleton(new CertificateInformation("node", "node", + Collections.emptyList(), Collections.emptyList(), Collections.emptyList())); + } + }; + + final String optionThatTriggersZip = randomFrom("-pem", "-keep-ca-key", "-multiple", "-in=input.yml"); + final OptionSet genOptions = genCommand.getParser().parse( + "-out", "", + optionThatTriggersZip + ); + genCommand.execute(terminal, genOptions, env); + + assertThat("For command line option " + optionThatTriggersZip, isZip.get(), equalTo(true)); + } + + private int getKeySize(Key node1Key) { + assertThat(node1Key, instanceOf(RSAKey.class)); + return ((RSAKey) node1Key).getModulus().bitLength(); + } + + private int getDurationInDays(X509Certificate cert) { + return (int) ChronoUnit.DAYS.between(cert.getNotBefore().toInstant(), cert.getNotAfter().toInstant()); + } + + private void assertSubjAltNames(Certificate certificate, String ip, String dns) throws Exception { + final X509CertificateHolder holder = new X509CertificateHolder(certificate.getEncoded()); + final GeneralNames names = GeneralNames.fromExtensions(holder.getExtensions(), Extension.subjectAlternativeName); + final CertificateInformation certInfo = new CertificateInformation("n", "n", Collections.singletonList(ip), + Collections.singletonList(dns), Collections.emptyList()); + assertSubjAltNames(names, certInfo); + } + + /** + * Checks whether there are keys in {@code keyStore} that are trusted by {@code trustStore}. + */ + private void checkTrust(KeyStore keyStore, char[] keyPassword, KeyStore trustStore, boolean trust) throws Exception { + final X509ExtendedKeyManager keyManager = CertUtils.keyManager(keyStore, keyPassword, KeyManagerFactory.getDefaultAlgorithm()); + final X509ExtendedTrustManager trustManager = CertUtils.trustManager(trustStore, TrustManagerFactory.getDefaultAlgorithm()); + + final X509Certificate[] node1CertificateIssuers = trustManager.getAcceptedIssuers(); + final Principal[] trustedPrincipals = new Principal[node1CertificateIssuers.length]; + for (int i = 0; i < node1CertificateIssuers.length; i++) { + trustedPrincipals[i] = node1CertificateIssuers[i].getIssuerX500Principal(); + } + final String[] keyAliases = keyManager.getClientAliases("RSA", trustedPrincipals); + if (trust) { + assertThat(keyAliases, arrayWithSize(1)); + trustManager.checkClientTrusted(keyManager.getCertificateChain(keyAliases[0]), "RSA"); + } else { + assertThat(keyAliases, nullValue()); + } + } + + private PKCS10CertificationRequest readCertificateRequest(Path path) throws Exception { + try (Reader reader = Files.newBufferedReader(path); + PEMParser pemParser = new PEMParser(reader)) { + Object object = pemParser.readObject(); + assertThat(object, instanceOf(PKCS10CertificationRequest.class)); + return (PKCS10CertificationRequest) object; + } + } + + private X509Certificate readX509Certificate(Reader reader) throws Exception { + List list = new ArrayList<>(1); + CertUtils.readCertificates(reader, list, CertificateFactory.getInstance("X.509")); + assertEquals(1, list.size()); + assertThat(list.get(0), instanceOf(X509Certificate.class)); + return (X509Certificate) list.get(0); + } + + private void assertSubjAltNames(GeneralNames subjAltNames, CertificateInformation certInfo) throws Exception { + final int expectedCount = certInfo.ipAddresses.size() + certInfo.dnsNames.size() + certInfo.commonNames.size(); + assertEquals(expectedCount, subjAltNames.getNames().length); + Collections.sort(certInfo.dnsNames); + Collections.sort(certInfo.ipAddresses); + for (GeneralName generalName : subjAltNames.getNames()) { + if (generalName.getTagNo() == GeneralName.dNSName) { + String dns = ((ASN1String) generalName.getName()).getString(); + assertTrue(certInfo.dnsNames.stream().anyMatch(dns::equals)); + } else if (generalName.getTagNo() == GeneralName.iPAddress) { + byte[] ipBytes = DEROctetString.getInstance(generalName.getName()).getOctets(); + String ip = NetworkAddress.format(InetAddress.getByAddress(ipBytes)); + assertTrue(certInfo.ipAddresses.stream().anyMatch(ip::equals)); + } else if (generalName.getTagNo() == GeneralName.otherName) { + ASN1Sequence seq = ASN1Sequence.getInstance(generalName.getName()); + assertThat(seq.size(), equalTo(2)); + assertThat(seq.getObjectAt(0), instanceOf(ASN1ObjectIdentifier.class)); + assertThat(seq.getObjectAt(0).toString(), equalTo(CertUtils.CN_OID)); + assertThat(seq.getObjectAt(1), instanceOf(ASN1TaggedObject.class)); + ASN1TaggedObject tagged = (ASN1TaggedObject) seq.getObjectAt(1); + assertThat(tagged.getObject(), instanceOf(ASN1String.class)); + assertThat(tagged.getObject().toString(), Matchers.isIn(certInfo.commonNames)); + } else { + fail("unknown general name with tag " + generalName.getTagNo()); + } + } + } + + /** + * Gets a random name that is valid for certificate generation. There are some cases where the random value could match one of the + * reserved names like ca, so this method allows us to avoid these issues. + */ + private String getValidRandomInstanceName() { + String name; + boolean valid; + do { + name = randomAlphaOfLengthBetween(1, 32); + valid = Name.fromUserProvidedName(name, name).error == null; + } while (valid == false); + return name; + } + + /** + * Writes the description of instances to a given {@link Path} + */ + private Path writeInstancesTo(Path path) throws IOException { + Iterable instances = Arrays.asList( + "instances:", + " - name: \"node1\"", + " ip:", + " - \"127.0.0.1\"", + " dns: \"localhost\"", + " - name: \"node2\"", + " filename: \"node2\"", + " ip: \"::1\"", + " cn:", + " - \"node2.elasticsearch\"", + " - name: \"node3\"", + " filename: \"node3\"", + " - name: \"CN=different value\"", + " filename: \"different file\"", + " dns:", + " - \"node4.mydomain.com\""); + + return Files.write(path, instances, StandardCharsets.UTF_8); + } + + /** + * Writes the description of instances to a given {@link Path} + */ + private Path writeInvalidInstanceInformation(Path path) throws IOException { + Iterable instances = Arrays.asList( + "instances:", + " - name: \"THIS=not a,valid DN\"", + " ip: \"127.0.0.1\""); + return Files.write(path, instances, StandardCharsets.UTF_8); + } + + @SuppressForbidden(reason = "resolve paths against CWD for a CLI tool") + private static Path resolvePath(String path) { + return PathUtils.get(path).toAbsolutePath(); + } + + /** + * Converting jimfs Paths into strings and back to paths doesn't work with the security manager. + * This class works around that by sticking with the original path objects + */ + private static class PathAwareGenerateCertificateCommand extends GenerateCertificateCommand { + private final Path caFile; + private final Path outFile; + + PathAwareGenerateCertificateCommand(Path caFile, Path outFile) { + this.caFile = caFile; + this.outFile = outFile; + } + + @Override + protected Path resolvePath(OptionSet options, OptionSpec spec) { + if (spec.options().contains("ca")) { + return caFile; + } + return super.resolvePath(options, spec); + } + + @Override + Path resolveOutputPath(Terminal terminal, OptionSet options, String defaultFilename) throws IOException { + return outFile; + } + } +} diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/file/tool/UsersToolTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/file/tool/UsersToolTests.java new file mode 100644 index 0000000000000..672db68ad9ac4 --- /dev/null +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/file/tool/UsersToolTests.java @@ -0,0 +1,540 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.file.tool; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.CommandTestCase; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.PathUtilsForTesting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.elasticsearch.xpack.core.security.user.KibanaUser; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.hamcrest.Matchers.containsString; + +public class UsersToolTests extends CommandTestCase { + + // the mock filesystem we use so permissions/users/groups can be modified + static FileSystem jimfs; + String pathHomeParameter; + String fileTypeParameter; + + // the config dir for each test to use + Path confDir; + + // settings used to create an Environment for tools + Settings settings; + + @BeforeClass + public static void setupJimfs() throws IOException { + String view = randomFrom("basic", "posix"); + Configuration conf = Configuration.unix().toBuilder().setAttributeViews(view).build(); + jimfs = Jimfs.newFileSystem(conf); + PathUtilsForTesting.installMock(jimfs); + } + + @Before + public void setupHome() throws IOException { + Path homeDir = jimfs.getPath("eshome"); + IOUtils.rm(homeDir); + confDir = homeDir.resolve("config"); + Files.createDirectories(confDir); + String defaultPassword = SecuritySettingsSourceField.TEST_PASSWORD; + Files.write(confDir.resolve("users"), Arrays.asList( + "existing_user:" + new String(Hasher.BCRYPT.hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)), + "existing_user2:" + new String(Hasher.BCRYPT.hash(new SecureString((defaultPassword + "2").toCharArray()))), + "existing_user3:" + new String(Hasher.BCRYPT.hash(new SecureString((defaultPassword + "3").toCharArray()))) + ), StandardCharsets.UTF_8); + Files.write(confDir.resolve("users_roles"), Arrays.asList( + "test_admin:existing_user,existing_user2", + "test_r1:existing_user2" + ), StandardCharsets.UTF_8); + Files.write(confDir.resolve("roles.yml"), Arrays.asList( + "test_admin:", + " cluster: all", + "test_r1:", + " cluster: all", + "test_r2:", + " cluster: all" + ), StandardCharsets.UTF_8); + settings = + Settings.builder() + .put("path.home", homeDir) + .put("xpack.security.authc.realms.file.type", "file") + .build(); + pathHomeParameter = "-Epath.home=" + homeDir; + fileTypeParameter = "-Expack.security.authc.realms.file.type=file"; + } + + @AfterClass + public static void closeJimfs() throws IOException { + if (jimfs != null) { + jimfs.close(); + jimfs = null; + } + } + + @Override + protected Command newCommand() { + return new UsersTool() { + @Override + protected AddUserCommand newAddUserCommand() { + return new AddUserCommand() { + @Override + protected Environment createEnv(Map settings) throws UserException { + return new Environment(UsersToolTests.this.settings, confDir); + } + }; + } + + @Override + protected DeleteUserCommand newDeleteUserCommand() { + return new DeleteUserCommand() { + @Override + protected Environment createEnv(Map settings) throws UserException { + return new Environment(UsersToolTests.this.settings, confDir); + } + }; + } + + @Override + protected PasswordCommand newPasswordCommand() { + return new PasswordCommand() { + @Override + protected Environment createEnv(Map settings) throws UserException { + return new Environment(UsersToolTests.this.settings, confDir); + } + }; + } + + @Override + protected RolesCommand newRolesCommand() { + return new RolesCommand() { + @Override + protected Environment createEnv(Map settings) throws UserException { + return new Environment(UsersToolTests.this.settings, confDir); + } + }; + } + + @Override + protected ListCommand newListCommand() { + return new ListCommand() { + @Override + protected Environment createEnv(Map settings) throws UserException { + return new Environment(UsersToolTests.this.settings, confDir); + } + }; + } + }; + } + + /** checks the user exists with the given password */ + void assertUser(String username, String password) throws IOException { + List lines = Files.readAllLines(confDir.resolve("users"), StandardCharsets.UTF_8); + for (String line : lines) { + String[] usernameHash = line.split(":", 2); + if (usernameHash.length != 2) { + fail("Corrupted users file, line: " + line); + } + if (username.equals(usernameHash[0]) == false) { + continue; + } + String gotHash = usernameHash[1]; + SecureString expectedHash = new SecureString(password); + assertTrue("Expected hash " + expectedHash + " for password " + password + " but got " + gotHash, + Hasher.BCRYPT.verify(expectedHash, gotHash.toCharArray())); + return; + } + fail("Could not find username " + username + " in users file:\n" + lines.toString()); + } + + /** Checks the user does not exist in the users or users_roles files*/ + void assertNoUser(String username) throws IOException { + List lines = Files.readAllLines(confDir.resolve("users"), StandardCharsets.UTF_8); + for (String line : lines) { + String[] usernameHash = line.split(":", 2); + if (usernameHash.length != 2) { + fail("Corrupted users file, line: " + line); + } + assertNotEquals(username, usernameHash[0]); + } + lines = Files.readAllLines(confDir.resolve("users_roles"), StandardCharsets.UTF_8); + for (String line : lines) { + String[] roleUsers = line.split(":", 2); + if (roleUsers.length != 2) { + fail("Corrupted users_roles file, line: " + line); + } + String[] users = roleUsers[1].split(","); + for (String user : users) { + assertNotEquals(user, username); + } + } + + } + + /** checks the role has the given users, or that the role does not exist if not users are passed. */ + void assertRole(String role, String... users) throws IOException { + List lines = Files.readAllLines(confDir.resolve("users_roles"), StandardCharsets.UTF_8); + for (String line : lines) { + String[] roleUsers = line.split(":", 2); + if (roleUsers.length != 2) { + fail("Corrupted users_roles file, line: " + line); + } + if (role.equals(roleUsers[0]) == false) { + continue; + } + if (users.length == 0) { + fail("Found role " + role + " in users_roles file with users [" + roleUsers[1] + "]"); + } + List gotUsers = Arrays.asList(roleUsers[1].split(",")); + for (String user : users) { + if (gotUsers.contains(user) == false) { + fail("Expected users [" + Arrays.toString(users) + "] for role " + role + + " but found [" + gotUsers.toString() + "]"); + } + } + return; + } + if (users.length != 0) { + fail("Could not find role " + role + " in users_roles file:\n" + lines.toString()); + } + } + + public void testParseInvalidUsername() throws Exception { + UserException e = expectThrows(UserException.class, () -> { + UsersTool.parseUsername(Collections.singletonList("áccented"), Settings.EMPTY); + }); + assertEquals(ExitCodes.DATA_ERROR, e.exitCode); + assertTrue(e.getMessage(), e.getMessage().contains("Invalid username")); + } + + public void testParseReservedUsername() throws Exception { + final String name = randomFrom(ElasticUser.NAME, KibanaUser.NAME); + UserException e = expectThrows(UserException.class, () -> { + UsersTool.parseUsername(Collections.singletonList(name), Settings.EMPTY); + }); + assertEquals(ExitCodes.DATA_ERROR, e.exitCode); + assertTrue(e.getMessage(), e.getMessage().contains("Invalid username")); + + Settings settings = Settings.builder().put(XPackSettings.RESERVED_REALM_ENABLED_SETTING.getKey(), false).build(); + UsersTool.parseUsername(Collections.singletonList(name), settings); + } + + public void testParseUsernameMissing() throws Exception { + UserException e = expectThrows(UserException.class, () -> { + UsersTool.parseUsername(Collections.emptyList(), Settings.EMPTY); + }); + assertEquals(ExitCodes.USAGE, e.exitCode); + assertTrue(e.getMessage(), e.getMessage().contains("Missing username argument")); + } + + public void testParseUsernameExtraArgs() throws Exception { + UserException e = expectThrows(UserException.class, () -> { + UsersTool.parseUsername(Arrays.asList("username", "extra"), Settings.EMPTY); + }); + assertEquals(ExitCodes.USAGE, e.exitCode); + assertTrue(e.getMessage(), e.getMessage().contains("Expected a single username argument")); + } + + public void testParseInvalidPasswordOption() throws Exception { + UserException e = expectThrows(UserException.class, () -> { + UsersTool.parsePassword(terminal, "123"); + }); + assertEquals(ExitCodes.DATA_ERROR, e.exitCode); + assertTrue(e.getMessage(), e.getMessage().contains("Invalid password")); + } + + public void testParseInvalidPasswordInput() throws Exception { + terminal.addSecretInput("123"); + UserException e = expectThrows(UserException.class, () -> { + UsersTool.parsePassword(terminal, null); + }); + assertEquals(ExitCodes.DATA_ERROR, e.exitCode); + assertTrue(e.getMessage(), e.getMessage().contains("Invalid password")); + } + + public void testParseMismatchPasswordInput() throws Exception { + terminal.addSecretInput("password1"); + terminal.addSecretInput("password2"); + UserException e = expectThrows(UserException.class, () -> { + UsersTool.parsePassword(terminal, null); + }); + assertEquals(ExitCodes.DATA_ERROR, e.exitCode); + assertTrue(e.getMessage(), e.getMessage().contains("Password mismatch")); + } + + public void testParseUnknownRole() throws Exception { + UsersTool.parseRoles(terminal, TestEnvironment.newEnvironment(settings), "test_r1,r2,r3"); + String output = terminal.getOutput(); + assertTrue(output, output.contains("The following roles [r2,r3] are not in the [")); + } + + public void testParseReservedRole() throws Exception { + final String reservedRoleName = randomFrom(ReservedRolesStore.names().toArray(Strings.EMPTY_ARRAY)); + String rolesArg = randomBoolean() ? "test_r1," + reservedRoleName : reservedRoleName; + UsersTool.parseRoles(terminal, TestEnvironment.newEnvironment(settings), rolesArg); + String output = terminal.getOutput(); + assertTrue(output, output.isEmpty()); + } + + public void testParseInvalidRole() throws Exception { + UserException e = expectThrows(UserException.class, () -> { + UsersTool.parseRoles(terminal, TestEnvironment.newEnvironment(settings), "fóóbár"); + }); + assertEquals(ExitCodes.DATA_ERROR, e.exitCode); + assertTrue(e.getMessage(), e.getMessage().contains("Invalid role [fóóbár]")); + } + + public void testParseMultipleRoles() throws Exception { + String[] roles = UsersTool.parseRoles(terminal, TestEnvironment.newEnvironment(settings), "test_r1,test_r2"); + assertEquals(Objects.toString(roles), 2, roles.length); + assertEquals("test_r1", roles[0]); + assertEquals("test_r2", roles[1]); + } + + public void testUseraddNoPassword() throws Exception { + terminal.addSecretInput(SecuritySettingsSourceField.TEST_PASSWORD); + terminal.addSecretInput(SecuritySettingsSourceField.TEST_PASSWORD); + execute("useradd", pathHomeParameter, fileTypeParameter, "username"); + assertUser("username", SecuritySettingsSourceField.TEST_PASSWORD); + } + + public void testUseraddPasswordOption() throws Exception { + execute("useradd", pathHomeParameter, fileTypeParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD); + assertUser("username", SecuritySettingsSourceField.TEST_PASSWORD); + } + + public void testUseraddUserExists() throws Exception { + UserException e = expectThrows(UserException.class, () -> { + execute("useradd", pathHomeParameter, fileTypeParameter, "existing_user", "-p", SecuritySettingsSourceField.TEST_PASSWORD); + }); + assertEquals(ExitCodes.CODE_ERROR, e.exitCode); + assertEquals("User [existing_user] already exists", e.getMessage()); + } + + public void testUseraddReservedUser() throws Exception { + final String name = randomFrom(ElasticUser.NAME, KibanaUser.NAME); + UserException e = expectThrows(UserException.class, () -> { + execute("useradd", pathHomeParameter, fileTypeParameter, name, "-p", SecuritySettingsSourceField.TEST_PASSWORD); + }); + assertEquals(ExitCodes.DATA_ERROR, e.exitCode); + assertEquals("Invalid username [" + name + "]... Username [" + name + "] is reserved and may not be used.", e.getMessage()); + } + + public void testUseraddNoRoles() throws Exception { + Files.delete(confDir.resolve("users_roles")); + Files.createFile(confDir.resolve("users_roles")); + execute("useradd", pathHomeParameter, fileTypeParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD); + List lines = Files.readAllLines(confDir.resolve("users_roles"), StandardCharsets.UTF_8); + assertTrue(lines.toString(), lines.isEmpty()); + } + + public void testUserdelUnknownUser() throws Exception { + UserException e = expectThrows(UserException.class, () -> { + execute("userdel", pathHomeParameter, fileTypeParameter, "unknown"); + }); + assertEquals(ExitCodes.NO_USER, e.exitCode); + assertTrue(e.getMessage(), e.getMessage().contains("User [unknown] doesn't exist")); + } + + public void testUserdel() throws Exception { + execute("userdel", pathHomeParameter, fileTypeParameter, "existing_user"); + assertNoUser("existing_user"); + } + + public void testPasswdUnknownUser() throws Exception { + UserException e = expectThrows(UserException.class, () -> { + execute("passwd", pathHomeParameter, fileTypeParameter, "unknown", "-p", SecuritySettingsSourceField.TEST_PASSWORD); + }); + assertEquals(ExitCodes.NO_USER, e.exitCode); + assertTrue(e.getMessage(), e.getMessage().contains("User [unknown] doesn't exist")); + } + + public void testPasswdNoPasswordOption() throws Exception { + terminal.addSecretInput("newpassword"); + terminal.addSecretInput("newpassword"); + execute("passwd", pathHomeParameter, fileTypeParameter, "existing_user"); + assertUser("existing_user", "newpassword"); + assertRole("test_admin", "existing_user", "existing_user2"); // roles unchanged + } + + public void testPasswd() throws Exception { + execute("passwd", pathHomeParameter, fileTypeParameter, "existing_user", "-p", "newpassword"); + assertUser("existing_user", "newpassword"); + assertRole("test_admin", "existing_user"); // roles unchanged + } + + public void testRolesUnknownUser() throws Exception { + UserException e = expectThrows(UserException.class, () -> { + execute("roles", pathHomeParameter, fileTypeParameter, "unknown"); + }); + assertEquals(ExitCodes.NO_USER, e.exitCode); + assertTrue(e.getMessage(), e.getMessage().contains("User [unknown] doesn't exist")); + } + + public void testRolesAdd() throws Exception { + execute("roles", pathHomeParameter, fileTypeParameter, "existing_user", "-a", "test_r1"); + assertRole("test_admin", "existing_user"); + assertRole("test_r1", "existing_user"); + } + + public void testRolesRemove() throws Exception { + execute("roles", pathHomeParameter, fileTypeParameter, "existing_user", "-r", "test_admin"); + assertRole("test_admin", "existing_user2"); + } + + public void testRolesAddAndRemove() throws Exception { + execute("roles", pathHomeParameter, fileTypeParameter, "existing_user", "-a", "test_r1", "-r", "test_admin"); + assertRole("test_admin", "existing_user2"); + assertRole("test_r1", "existing_user"); + } + + public void testRolesRemoveLeavesExisting() throws Exception { + execute("useradd", pathHomeParameter, fileTypeParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD, + "-r", "test_admin"); + execute("roles", pathHomeParameter, fileTypeParameter, "existing_user", "-r", "test_admin"); + assertRole("test_admin", "username"); + } + + public void testRolesNoAddOrRemove() throws Exception { + String output = execute("roles", pathHomeParameter, fileTypeParameter, "existing_user"); + assertTrue(output, output.contains("existing_user")); + assertTrue(output, output.contains("test_admin")); + } + + public void testListUnknownUser() throws Exception { + UserException e = expectThrows(UserException.class, () -> { + execute("list", pathHomeParameter, fileTypeParameter, "unknown"); + }); + assertEquals(ExitCodes.NO_USER, e.exitCode); + assertTrue(e.getMessage(), e.getMessage().contains("User [unknown] doesn't exist")); + } + + public void testListAllUsers() throws Exception { + String output = execute("list", pathHomeParameter, fileTypeParameter); + assertTrue(output, output.contains("existing_user")); + assertTrue(output, output.contains("test_admin")); + assertTrue(output, output.contains("existing_user2")); + assertTrue(output, output.contains("test_r1")); + + // output should not contain '*' which indicates unknown role + assertFalse(output, output.contains("*")); + } + + public void testListSingleUser() throws Exception { + String output = execute("list", pathHomeParameter, fileTypeParameter, "existing_user"); + assertTrue(output, output.contains("existing_user")); + assertTrue(output, output.contains("test_admin")); + assertFalse(output, output.contains("existing_user2")); + assertFalse(output, output.contains("test_r1")); + + // output should not contain '*' which indicates unknown role + assertFalse(output, output.contains("*")); + } + + public void testListUnknownRoles() throws Exception { + execute("useradd", pathHomeParameter, fileTypeParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD, + "-r", "test_r1,r2,r3"); + String output = execute("list", pathHomeParameter, fileTypeParameter, "username"); + assertTrue(output, output.contains("username")); + assertTrue(output, output.contains("r2*,r3*,test_r1")); + } + + public void testListNoUsers() throws Exception { + Files.delete(confDir.resolve("users")); + Files.createFile(confDir.resolve("users")); + Files.delete(confDir.resolve("users_roles")); + Files.createFile(confDir.resolve("users_roles")); + String output = execute("list", pathHomeParameter, fileTypeParameter); + assertTrue(output, output.contains("No users found")); + } + + public void testListUserWithoutRoles() throws Exception { + String output = execute("list", pathHomeParameter, fileTypeParameter, "existing_user3"); + assertTrue(output, output.contains("existing_user3")); + output = execute("list", pathHomeParameter, fileTypeParameter); + assertTrue(output, output.contains("existing_user3")); + + // output should not contain '*' which indicates unknown role + assertFalse(output, output.contains("*")); + } + + public void testUserAddNoConfig() throws Exception { + Path homeDir = jimfs.getPath("eshome"); + IOUtils.rm(confDir.resolve("users")); + pathHomeParameter = "-Epath.home=" + homeDir; + fileTypeParameter = "-Expack.security.authc.realms.file.type=file"; + UserException e = expectThrows(UserException.class, () -> { + execute("useradd", pathHomeParameter, fileTypeParameter, "username", "-p", SecuritySettingsSourceField.TEST_PASSWORD); + }); + assertEquals(ExitCodes.CONFIG, e.exitCode); + assertThat(e.getMessage(), containsString("Configuration file [users] is missing")); + } + + public void testUserListNoConfig() throws Exception { + Path homeDir = jimfs.getPath("eshome"); + IOUtils.rm(confDir.resolve("users")); + pathHomeParameter = "-Epath.home=" + homeDir; + fileTypeParameter = "-Expack.security.authc.realms.file.type=file"; + UserException e = expectThrows(UserException.class, () -> { + execute("list", pathHomeParameter, fileTypeParameter); + }); + assertEquals(ExitCodes.CONFIG, e.exitCode); + assertThat(e.getMessage(), containsString("Configuration file [users] is missing")); + } + + public void testUserDelNoConfig() throws Exception { + Path homeDir = jimfs.getPath("eshome"); + IOUtils.rm(confDir.resolve("users")); + pathHomeParameter = "-Epath.home=" + homeDir; + fileTypeParameter = "-Expack.security.authc.realms.file.type=file"; + UserException e = expectThrows(UserException.class, () -> { + execute("userdel", pathHomeParameter, fileTypeParameter, "username"); + }); + assertEquals(ExitCodes.CONFIG, e.exitCode); + assertThat(e.getMessage(), containsString("Configuration file [users] is missing")); + } + + public void testListUserRolesNoConfig() throws Exception { + Path homeDir = jimfs.getPath("eshome"); + IOUtils.rm(confDir.resolve("users_roles")); + pathHomeParameter = "-Epath.home=" + homeDir; + fileTypeParameter = "-Expack.security.authc.realms.file.type=file"; + UserException e = expectThrows(UserException.class, () -> { + execute("roles", pathHomeParameter, fileTypeParameter, "username"); + }); + assertEquals(ExitCodes.CONFIG, e.exitCode); + assertThat(e.getMessage(), containsString("Configuration file [users_roles] is missing")); + } +} diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyToolTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyToolTests.java new file mode 100644 index 0000000000000..3bc861c2efcdd --- /dev/null +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyToolTests.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.crypto.tool; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.CommandTestCase; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.io.PathUtilsForTesting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.xpack.core.XPackField; +import org.junit.After; + +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.PosixFilePermission; +import java.util.Map; +import java.util.Set; + +public class SystemKeyToolTests extends CommandTestCase { + + private FileSystem jimfs; + + private Path initFileSystem(boolean needsPosix) throws Exception { + String view = needsPosix ? "posix" : randomFrom("basic", "posix"); + Configuration conf = Configuration.unix().toBuilder().setAttributeViews(view).build(); + jimfs = Jimfs.newFileSystem(conf); + PathUtilsForTesting.installMock(jimfs); + return jimfs.getPath("eshome"); + } + + @After + public void tearDown() throws Exception { + IOUtils.close(jimfs); + super.tearDown(); + } + + @Override + protected Command newCommand() { + return new SystemKeyTool() { + + @Override + protected Environment createEnv(Map settings) throws UserException { + Settings.Builder builder = Settings.builder(); + settings.forEach((k,v) -> builder.put(k, v)); + return TestEnvironment.newEnvironment(builder.build()); + } + + }; + } + + public void testGenerate() throws Exception { + final Path homeDir = initFileSystem(true); + + Path path = jimfs.getPath(randomAlphaOfLength(10)).resolve("key"); + Files.createDirectory(path.getParent()); + + execute("-Epath.home=" + homeDir, path.toString()); + byte[] bytes = Files.readAllBytes(path); + // TODO: maybe we should actually check the key is...i dunno...valid? + assertEquals(SystemKeyTool.KEY_SIZE / 8, bytes.length); + + Set perms = Files.getPosixFilePermissions(path); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_READ)); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_WRITE)); + assertEquals(perms.toString(), 2, perms.size()); + } + + public void testGeneratePathInSettings() throws Exception { + final Path homeDir = initFileSystem(false); + + Path xpackConf = homeDir.resolve("config"); + Files.createDirectories(xpackConf); + execute("-Epath.home=" + homeDir.toString()); + byte[] bytes = Files.readAllBytes(xpackConf.resolve("system_key")); + assertEquals(SystemKeyTool.KEY_SIZE / 8, bytes.length); + } + + public void testGenerateDefaultPath() throws Exception { + final Path homeDir = initFileSystem(false); + Path keyPath = homeDir.resolve("config/system_key"); + Files.createDirectories(keyPath.getParent()); + execute("-Epath.home=" + homeDir.toString()); + byte[] bytes = Files.readAllBytes(keyPath); + assertEquals(SystemKeyTool.KEY_SIZE / 8, bytes.length); + } + + public void testThatSystemKeyMayOnlyBeReadByOwner() throws Exception { + final Path homeDir = initFileSystem(true); + + Path path = jimfs.getPath(randomAlphaOfLength(10)).resolve("key"); + Files.createDirectories(path.getParent()); + + execute("-Epath.home=" + homeDir, path.toString()); + Set perms = Files.getPosixFilePermissions(path); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_READ)); + assertTrue(perms.toString(), perms.contains(PosixFilePermission.OWNER_WRITE)); + assertEquals(perms.toString(), 2, perms.size()); + } + +} diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/support/FileAttributesCheckerTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/support/FileAttributesCheckerTests.java new file mode 100644 index 0000000000000..12ae440e8f7e4 --- /dev/null +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/support/FileAttributesCheckerTests.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.support; + +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.GroupPrincipal; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.UserPrincipal; +import java.util.HashSet; +import java.util.Set; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.test.ESTestCase; + +public class FileAttributesCheckerTests extends ESTestCase { + + public void testNonExistentFile() throws Exception { + Path path = createTempDir().resolve("dne"); + FileAttributesChecker checker = new FileAttributesChecker(path); + MockTerminal terminal = new MockTerminal(); + checker.check(terminal); + assertTrue(terminal.getOutput(), terminal.getOutput().isEmpty()); + } + + public void testNoPosix() throws Exception { + Configuration conf = Configuration.unix().toBuilder().setAttributeViews("basic").build(); + try (FileSystem fs = Jimfs.newFileSystem(conf)) { + Path path = fs.getPath("temp"); + FileAttributesChecker checker = new FileAttributesChecker(path); + MockTerminal terminal = new MockTerminal(); + checker.check(terminal); + assertTrue(terminal.getOutput(), terminal.getOutput().isEmpty()); + } + } + + public void testNoChanges() throws Exception { + Configuration conf = Configuration.unix().toBuilder().setAttributeViews("posix").build(); + try (FileSystem fs = Jimfs.newFileSystem(conf)) { + Path path = fs.getPath("temp"); + Files.createFile(path); + FileAttributesChecker checker = new FileAttributesChecker(path); + + MockTerminal terminal = new MockTerminal(); + checker.check(terminal); + assertTrue(terminal.getOutput(), terminal.getOutput().isEmpty()); + } + } + + public void testPermissionsChanged() throws Exception { + Configuration conf = Configuration.unix().toBuilder().setAttributeViews("posix").build(); + try (FileSystem fs = Jimfs.newFileSystem(conf)) { + Path path = fs.getPath("temp"); + Files.createFile(path); + + PosixFileAttributeView attrs = Files.getFileAttributeView(path, PosixFileAttributeView.class); + Set perms = new HashSet<>(attrs.readAttributes().permissions()); + perms.remove(PosixFilePermission.GROUP_READ); + attrs.setPermissions(perms); + + FileAttributesChecker checker = new FileAttributesChecker(path); + perms.add(PosixFilePermission.GROUP_READ); + attrs.setPermissions(perms); + + MockTerminal terminal = new MockTerminal(); + checker.check(terminal); + String output = terminal.getOutput(); + assertTrue(output, output.contains("permissions of [" + path + "] have changed")); + } + } + + public void testOwnerChanged() throws Exception { + Configuration conf = Configuration.unix().toBuilder().setAttributeViews("posix").build(); + try (FileSystem fs = Jimfs.newFileSystem(conf)) { + Path path = fs.getPath("temp"); + Files.createFile(path); + FileAttributesChecker checker = new FileAttributesChecker(path); + + UserPrincipal newOwner = fs.getUserPrincipalLookupService().lookupPrincipalByName("randomuser"); + PosixFileAttributeView attrs = Files.getFileAttributeView(path, PosixFileAttributeView.class); + attrs.setOwner(newOwner); + + MockTerminal terminal = new MockTerminal(); + checker.check(terminal); + String output = terminal.getOutput(); + assertTrue(output, output.contains("Owner of file [" + path + "] used to be")); + } + } + + public void testGroupChanged() throws Exception { + Configuration conf = Configuration.unix().toBuilder().setAttributeViews("posix").build(); + try (FileSystem fs = Jimfs.newFileSystem(conf)) { + Path path = fs.getPath("temp"); + Files.createFile(path); + FileAttributesChecker checker = new FileAttributesChecker(path); + + GroupPrincipal newGroup = fs.getUserPrincipalLookupService().lookupPrincipalByGroupName("randomgroup"); + PosixFileAttributeView attrs = Files.getFileAttributeView(path, PosixFileAttributeView.class); + attrs.setGroup(newGroup); + + MockTerminal terminal = new MockTerminal(); + checker.check(terminal); + String output = terminal.getOutput(); + assertTrue(output, output.contains("Group of file [" + path + "] used to be")); + } + } +} diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/support/SecurityFilesTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/support/SecurityFilesTests.java new file mode 100644 index 0000000000000..a52ca49d6c091 --- /dev/null +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/support/SecurityFilesTests.java @@ -0,0 +1,157 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.support; + +import com.google.common.jimfs.Configuration; +import com.google.common.jimfs.Jimfs; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermission; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.atomic.AtomicBoolean; + +import static java.nio.file.attribute.PosixFilePermission.GROUP_EXECUTE; +import static java.nio.file.attribute.PosixFilePermission.OTHERS_EXECUTE; +import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE; +import static java.nio.file.attribute.PosixFilePermission.OWNER_READ; +import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +public class SecurityFilesTests extends ESTestCase { + public void testThatOriginalPermissionsAreKept() throws Exception { + assumeTrue("test cannot run with security manager enabled", System.getSecurityManager() == null); + Path path = createTempFile(); + + // no posix file permissions, nothing to test, done here + boolean supportsPosixPermissions = Environment.getFileStore(path).supportsFileAttributeView(PosixFileAttributeView.class); + assumeTrue("Ignoring because posix file attributes are not supported", supportsPosixPermissions); + + Files.write(path, "foo".getBytes(StandardCharsets.UTF_8)); + + Set perms = Sets.newHashSet(OWNER_READ, OWNER_WRITE); + if (randomBoolean()) perms.add(OWNER_EXECUTE); + if (randomBoolean()) perms.add(GROUP_EXECUTE); + if (randomBoolean()) perms.add(OTHERS_EXECUTE); + + Files.setPosixFilePermissions(path, perms); + + final Map map = new TreeMap<>(); + map.put("This is the first", "line"); + map.put("This is the second", "line"); + SecurityFiles.writeFileAtomically(path, map, e -> e.getKey() + " " + e.getValue()); + + final List lines = Files.readAllLines(path); + assertThat(lines, hasSize(2)); + assertThat(lines.get(0), equalTo("This is the first line")); + assertThat(lines.get(1), equalTo("This is the second line")); + + Set permissionsAfterWrite = Files.getPosixFilePermissions(path); + assertThat(permissionsAfterWrite, is(perms)); + } + + public void testFailure() throws IOException { + final Path path = createTempFile("existing", "file"); + + Files.write(path, "foo".getBytes(StandardCharsets.UTF_8)); + + final Visitor innerVisitor = new Visitor(path); + final RuntimeException re = expectThrows(RuntimeException.class, () -> SecurityFiles.writeFileAtomically( + path, + Collections.singletonMap("foo", "bar"), + e -> { + try { + Files.walkFileTree(path.getParent(), innerVisitor); + } catch (final IOException inner) { + throw new UncheckedIOException(inner); + } + throw new RuntimeException(e.getKey() + " " + e.getValue()); + } + )); + + assertThat(re, hasToString(containsString("foo bar"))); + + // assert the temporary file was created while trying to write the file + assertTrue(innerVisitor.found()); + + final Visitor visitor = new Visitor(path); + Files.walkFileTree(path.getParent(), visitor); + + // now assert the temporary file was cleaned up after the write failed + assertFalse(visitor.found()); + + // finally, assert the original file contents remain + final List lines = Files.readAllLines(path); + assertThat(lines, hasSize(1)); + assertThat(lines.get(0), equalTo("foo")); + } + + static final class Visitor extends SimpleFileVisitor { + + private final Path path; + private final AtomicBoolean found = new AtomicBoolean(); + + Visitor(final Path path) { + this.path = path; + } + + public boolean found() { + return found.get(); + } + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (file.getFileName().toString().startsWith(path.getFileName().toString()) && file.getFileName().toString().endsWith("tmp")) { + found.set(true); + return FileVisitResult.TERMINATE; + } + return FileVisitResult.CONTINUE; + } + } + + public void testThatOwnerAndGroupAreChanged() throws Exception { + Configuration jimFsConfiguration = Configuration.unix().toBuilder().setAttributeViews("basic", "owner", "posix", "unix").build(); + try (FileSystem fs = Jimfs.newFileSystem(jimFsConfiguration)) { + Path path = fs.getPath("foo"); + Path tempPath = fs.getPath("bar"); + Files.write(path, "foo".getBytes(StandardCharsets.UTF_8)); + Files.write(tempPath, "bar".getBytes(StandardCharsets.UTF_8)); + + PosixFileAttributeView view = Files.getFileAttributeView(path, PosixFileAttributeView.class); + view.setGroup(fs.getUserPrincipalLookupService().lookupPrincipalByGroupName(randomAlphaOfLength(10))); + view.setOwner(fs.getUserPrincipalLookupService().lookupPrincipalByName(randomAlphaOfLength(10))); + + PosixFileAttributeView tempPathView = Files.getFileAttributeView(tempPath, PosixFileAttributeView.class); + assertThat(tempPathView.getOwner(), not(equalTo(view.getOwner()))); + assertThat(tempPathView.readAttributes().group(), not(equalTo(view.readAttributes().group()))); + + SecurityFiles.setPosixAttributesOnTempFile(path, tempPath); + + assertThat(tempPathView.getOwner(), equalTo(view.getOwner())); + assertThat(tempPathView.readAttributes().group(), equalTo(view.readAttributes().group())); + } + } +} diff --git a/x-pack/qa/smoke-test-graph-with-security/build.gradle b/x-pack/qa/smoke-test-graph-with-security/build.gradle new file mode 100644 index 0000000000000..d3f788d0b06e4 --- /dev/null +++ b/x-pack/qa/smoke-test-graph-with-security/build.gradle @@ -0,0 +1,36 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') +} + +// bring in graph rest test suite +task copyGraphRestTests(type: Copy) { + into project.sourceSets.test.output.resourcesDir + from project(xpackProject('plugin').path).sourceSets.test.resources.srcDirs + include 'rest-api-spec/test/graph/**' +} + +integTestCluster { + dependsOn copyGraphRestTests + extraConfigFile 'roles.yml', 'roles.yml' + setupCommand 'setupTestAdminUser', + 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' + setupCommand 'setupGraphExplorerUser', + 'bin/elasticsearch-users', 'useradd', 'graph_explorer', '-p', 'x-pack-test-password', '-r', 'graph_explorer' + setupCommand 'setupPowerlessUser', + 'bin/elasticsearch-users', 'useradd', 'no_graph_explorer', '-p', 'x-pack-test-password', '-r', 'no_graph_explorer' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/smoke-test-graph-with-security/roles.yml b/x-pack/qa/smoke-test-graph-with-security/roles.yml new file mode 100644 index 0000000000000..5551ce4e0bb64 --- /dev/null +++ b/x-pack/qa/smoke-test-graph-with-security/roles.yml @@ -0,0 +1,33 @@ +admin: + cluster: + - all + indices: + - names: '*' + privileges: + - all + +graph_explorer: + cluster: + - cluster:monitor/health + - cluster:monitor/main + indices: + - names: '*' + privileges: + - read + - write + - indices:admin/refresh + - indices:admin/create + + +no_graph_explorer: + cluster: + - cluster:monitor/health + - cluster:monitor/main + indices: + - names: '*' + privileges: + - indices:data/read/search + - indices:data/write/index + - indices:data/write/bulk + - indices:admin/refresh + - indices:admin/create diff --git a/x-pack/qa/smoke-test-graph-with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityIT.java b/x-pack/qa/smoke-test-graph-with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityIT.java new file mode 100644 index 0000000000000..5650f407dd310 --- /dev/null +++ b/x-pack/qa/smoke-test-graph-with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityIT.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + + +public class GraphWithSecurityIT extends ESClientYamlSuiteTestCase { + + private static final String TEST_ADMIN_USERNAME = "test_admin"; + private static final String TEST_ADMIN_PASSWORD = "x-pack-test-password"; + + public GraphWithSecurityIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + protected String[] getCredentials() { + return new String[]{"graph_explorer", "x-pack-test-password"}; + } + + + @Override + protected Settings restClientSettings() { + String[] creds = getCredentials(); + String token = basicAuthHeaderValue(creds[0], new SecureString(creds[1].toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue(TEST_ADMIN_USERNAME, new SecureString(TEST_ADMIN_PASSWORD.toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } +} + diff --git a/x-pack/qa/smoke-test-graph-with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityInsufficientRoleIT.java b/x-pack/qa/smoke-test-graph-with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityInsufficientRoleIT.java new file mode 100644 index 0000000000000..5034ba8eff26f --- /dev/null +++ b/x-pack/qa/smoke-test-graph-with-security/src/test/java/org/elasticsearch/smoketest/GraphWithSecurityInsufficientRoleIT.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; + +public class GraphWithSecurityInsufficientRoleIT extends GraphWithSecurityIT { + + public GraphWithSecurityInsufficientRoleIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + public void test() throws IOException { + try { + super.test(); + fail("should have failed because of missing role"); + } catch(AssertionError ae) { + assertThat(ae.getMessage(), containsString("action [indices:data/read/xpack/graph/explore")); + assertThat(ae.getMessage(), containsString("returned [403 Forbidden]")); + assertThat(ae.getMessage(), containsString("is unauthorized for user [no_graph_explorer]")); + } + } + + @Override + protected String[] getCredentials() { + return new String[]{"no_graph_explorer", "x-pack-test-password"}; + } +} + diff --git a/x-pack/qa/smoke-test-ml-with-security/build.gradle b/x-pack/qa/smoke-test-ml-with-security/build.gradle new file mode 100644 index 0000000000000..ebe55c2b7ef29 --- /dev/null +++ b/x-pack/qa/smoke-test-ml-with-security/build.gradle @@ -0,0 +1,118 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackProject('plugin').path, configuration: 'testArtifacts') +} + +// bring in machine learning rest test suite +task copyMlRestTests(type: Copy) { + into project.sourceSets.test.output.resourcesDir + from project(xpackProject('plugin').path).sourceSets.test.resources.srcDirs + include 'rest-api-spec/test/ml/**' +} + +integTestRunner { + systemProperty 'tests.rest.blacklist', [ + // Remove this test because it doesn't call an ML endpoint and we don't want + // to grant extra permissions to the users used in this test suite + 'ml/ml_classic_analyze/Test analyze API with an analyzer that does what we used to do in native code', + // Remove tests that are expected to throw an exception, because we cannot then + // know whether to expect an authorization exception or a validation exception + 'ml/calendar_crud/Test get calendar given missing', + 'ml/calendar_crud/Test cannot create calendar with name _all', + 'ml/calendar_crud/Test PageParams with ID is invalid', + 'ml/calendar_crud/Test post calendar events given empty events', + 'ml/calendar_crud/Test put calendar given id contains invalid chars', + 'ml/calendar_crud/Test delete event from non existing calendar', + 'ml/calendar_crud/Test delete job from non existing calendar', + 'ml/custom_all_field/Test querying custom all field', + 'ml/datafeeds_crud/Test delete datafeed with missing id', + 'ml/datafeeds_crud/Test put datafeed referring to missing job_id', + 'ml/datafeeds_crud/Test put datafeed with invalid query', + 'ml/datafeeds_crud/Test put datafeed with security headers in the body', + 'ml/datafeeds_crud/Test update datafeed with missing id', + 'ml/delete_job_force/Test cannot force delete a non-existent job', + 'ml/delete_model_snapshot/Test delete snapshot missing snapshotId', + 'ml/delete_model_snapshot/Test delete snapshot missing job_id', + 'ml/delete_model_snapshot/Test delete with in-use model', + 'ml/filter_crud/Test create filter api with mismatching body ID', + 'ml/filter_crud/Test get filter API with bad ID', + 'ml/filter_crud/Test invalid param combinations', + 'ml/filter_crud/Test non-existing filter', + 'ml/get_datafeed_stats/Test get datafeed stats given missing datafeed_id', + 'ml/get_datafeeds/Test get datafeed given missing datafeed_id', + 'ml/jobs_crud/Test cannot create job with existing categorizer state document', + 'ml/jobs_crud/Test cannot create job with existing quantiles document', + 'ml/jobs_crud/Test cannot create job with existing result document', + 'ml/jobs_crud/Test cannot create job with model snapshot id set', + 'ml/jobs_crud/Test cannot decrease model_memory_limit below current usage', + 'ml/jobs_crud/Test get job API with non existing job id', + 'ml/jobs_crud/Test put job after closing results index', + 'ml/jobs_crud/Test put job after closing state index', + 'ml/jobs_crud/Test put job with inconsistent body/param ids', + 'ml/jobs_crud/Test put job with time field in analysis_config', + 'ml/jobs_crud/Test job with categorization_analyzer and categorization_filters', + 'ml/jobs_get/Test get job given missing job_id', + 'ml/jobs_get_result_buckets/Test mutually-exclusive params', + 'ml/jobs_get_result_buckets/Test mutually-exclusive params via body', + 'ml/jobs_get_result_categories/Test with invalid param combinations', + 'ml/jobs_get_result_categories/Test with invalid param combinations via body', + 'ml/jobs_get_result_overall_buckets/Test overall buckets given missing job', + 'ml/jobs_get_result_overall_buckets/Test overall buckets given non-matching expression and not allow_no_jobs', + 'ml/jobs_get_result_overall_buckets/Test overall buckets given top_n is 0', + 'ml/jobs_get_result_overall_buckets/Test overall buckets given top_n is negative', + 'ml/jobs_get_result_overall_buckets/Test overall buckets given invalid start param', + 'ml/jobs_get_result_overall_buckets/Test overall buckets given invalid end param', + 'ml/jobs_get_result_overall_buckets/Test overall buckets given bucket_span is smaller than max job bucket_span', + 'ml/jobs_get_stats/Test get job stats given missing job', + 'ml/jobs_get_stats/Test no exception on get job stats with missing index', + 'ml/job_groups/Test put job with empty group', + 'ml/job_groups/Test put job with group that matches an job id', + 'ml/job_groups/Test put job with group that matches its id', + 'ml/job_groups/Test put job with id that matches an existing group', + 'ml/job_groups/Test put job with invalid group', + 'ml/ml_info/Test ml info', + 'ml/post_data/Test Flush data with invalid parameters', + 'ml/post_data/Test flushing and posting a closed job', + 'ml/post_data/Test open and close with non-existent job id', + 'ml/post_data/Test POST data with invalid parameters', + 'ml/preview_datafeed/Test preview missing datafeed', + 'ml/revert_model_snapshot/Test revert model with invalid snapshotId', + 'ml/start_stop_datafeed/Test start datafeed job, but not open', + 'ml/start_stop_datafeed/Test start non existing datafeed', + 'ml/start_stop_datafeed/Test stop non existing datafeed', + 'ml/update_model_snapshot/Test without description', + 'ml/validate/Test invalid job config', + 'ml/validate/Test job config is invalid because model snapshot id set', + 'ml/validate/Test job config that is invalid only because of the job ID', + 'ml/validate_detector/Test invalid detector' + ].join(',') +} + +integTestCluster { + dependsOn copyMlRestTests + extraConfigFile 'roles.yml', 'roles.yml' + setupCommand 'setupTestAdminUser', + 'bin/elasticsearch-users', 'useradd', 'x_pack_rest_user', '-p', 'x-pack-test-password', '-r', 'superuser' + setupCommand 'setupMlAdminUser', + 'bin/elasticsearch-users', 'useradd', 'ml_admin', '-p', 'x-pack-test-password', '-r', 'minimal,machine_learning_admin' + setupCommand 'setupMlUserUser', + 'bin/elasticsearch-users', 'useradd', 'ml_user', '-p', 'x-pack-test-password', '-r', 'minimal,machine_learning_user' + setupCommand 'setupPowerlessUser', + 'bin/elasticsearch-users', 'useradd', 'no_ml', '-p', 'x-pack-test-password', '-r', 'minimal' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'x_pack_rest_user', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/smoke-test-ml-with-security/roles.yml b/x-pack/qa/smoke-test-ml-with-security/roles.yml new file mode 100644 index 0000000000000..e47fe40a120cd --- /dev/null +++ b/x-pack/qa/smoke-test-ml-with-security/roles.yml @@ -0,0 +1,17 @@ +minimal: + cluster: + # This is always required because the REST client uses it to find the version of + # Elasticsearch it's talking to + - cluster:monitor/main + indices: + # Give all users involved in these tests access to the indices where the data to + # be analyzed is stored, because the ML roles alone do not provide access to + # non-ML indices + - names: [ 'airline-data', 'index-*', 'unavailable-data', 'utopia' ] + privileges: + - indices:admin/create + - indices:admin/refresh + - indices:data/read/field_caps + - indices:data/read/search + - indices:data/write/bulk + - indices:data/write/index diff --git a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java b/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java new file mode 100644 index 0000000000000..fc6cb92791fac --- /dev/null +++ b/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityIT.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.xpack.test.rest.XPackRestIT; + +import java.util.Collections; +import java.util.Map; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + + +public class MlWithSecurityIT extends XPackRestIT { + + private static final String TEST_ADMIN_USERNAME = "x_pack_rest_user"; + + public MlWithSecurityIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + protected String[] getCredentials() { + return new String[]{"ml_admin", "x-pack-test-password"}; + } + + @Override + protected Settings restClientSettings() { + String[] creds = getCredentials(); + String token = basicAuthHeaderValue(creds[0], new SecureString(creds[1].toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue(TEST_ADMIN_USERNAME, SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + protected Map getApiCallHeaders() { + return Collections.singletonMap("Authorization", basicAuthHeaderValue(TEST_ADMIN_USERNAME, + SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + } + + @Override + protected boolean isMonitoringTest() { + return false; + } + + @Override + protected boolean isWatcherTest() { + return false; + } + + @Override + protected boolean isMachineLearningTest() { + return true; + } +} diff --git a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java b/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java new file mode 100644 index 0000000000000..7d66c7debebae --- /dev/null +++ b/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityInsufficientRoleIT.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; + +public class MlWithSecurityInsufficientRoleIT extends MlWithSecurityIT { + + public MlWithSecurityInsufficientRoleIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + public void test() throws IOException { + try { + // Cannot use expectThrows here because blacklisted tests will throw an + // InternalAssumptionViolatedException rather than an AssertionError + super.test(); + fail("should have failed because of missing role"); + } catch (AssertionError ae) { + // Some tests assert on searches of wildcarded ML indices rather than on ML endpoints. For these we expect no hits. + if (ae.getMessage().contains("hits.total didn't match expected value")) { + assertThat(ae.getMessage(), containsString("but was [0]")); + } else { + assertThat(ae.getMessage(), + either(containsString("action [cluster:monitor/xpack/ml")).or(containsString("action [cluster:admin/xpack/ml"))); + assertThat(ae.getMessage(), containsString("returned [403 Forbidden]")); + assertThat(ae.getMessage(), containsString("is unauthorized for user [no_ml]")); + } + } + } + + @Override + protected String[] getCredentials() { + return new String[]{"no_ml", "x-pack-test-password"}; + } +} + diff --git a/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java b/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java new file mode 100644 index 0000000000000..b103d30f282e2 --- /dev/null +++ b/x-pack/qa/smoke-test-ml-with-security/src/test/java/org/elasticsearch/smoketest/MlWithSecurityUserRoleIT.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.section.DoSection; +import org.elasticsearch.test.rest.yaml.section.ExecutableSection; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; + +public class MlWithSecurityUserRoleIT extends MlWithSecurityIT { + + private final ClientYamlTestCandidate testCandidate; + + public MlWithSecurityUserRoleIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + this.testCandidate = testCandidate; + } + + @Override + public void test() throws IOException { + try { + super.test(); + + // We should have got here if and only if the only ML endpoints in the test were GETs + for (ExecutableSection section : testCandidate.getTestSection().getExecutableSections()) { + if (section instanceof DoSection) { + if (((DoSection) section).getApiCallSection().getApi().startsWith("xpack.ml.") && + ((DoSection) section).getApiCallSection().getApi().startsWith("xpack.ml.get_") == false) { + fail("should have failed because of missing role"); + } + } + } + } catch (AssertionError ae) { + assertThat(ae.getMessage(), + either(containsString("action [cluster:monitor/xpack/ml")).or(containsString("action [cluster:admin/xpack/ml"))); + assertThat(ae.getMessage(), containsString("returned [403 Forbidden]")); + assertThat(ae.getMessage(), containsString("is unauthorized for user [ml_user]")); + } + } + + @Override + protected String[] getCredentials() { + return new String[]{"ml_user", "x-pack-test-password"}; + } +} + diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle new file mode 100644 index 0000000000000..4e079430562a7 --- /dev/null +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/build.gradle @@ -0,0 +1,27 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core')) + testCompile project(path: xpackModule('watcher')) + testCompile project(path: xpackModule('monitoring')) +} + +integTestCluster { + setting 'xpack.monitoring.enabled', 'true' + setting 'xpack.watcher.enabled', 'true' + setting 'xpack.security.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + + // exporter settings are configured dynamically in our tests + // configure a local exporter, the HTTP exporter is configured via dynamic settings change + //setting 'xpack.monitoring.exporters.my_local.type', 'local' + //setting 'xpack.monitoring.exporters.my_local.index.name.time_format', 'YYYY' + //setting 'xpack.monitoring.exporters.my_http.type', 'http' + //setting 'xpack.monitoring.exporters.my_http.host', 'http' + //setting 'xpack.monitoring.exporters.my_http.index.name.time_format', 'YYYY-MM' + // one of the exporters should configure cluster alerts + // setting 'xpack.monitoring.exporters.my_http.cluster_alerts.management.enabled', 'true' +} + diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java new file mode 100644 index 0000000000000..d89d558f02fae --- /dev/null +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/MonitoringWithWatcherRestIT.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.xpack.monitoring.exporter.ClusterAlertsUtil; +import org.elasticsearch.xpack.watcher.actions.ActionBuilders; +import org.elasticsearch.xpack.watcher.client.WatchSourceBuilders; +import org.elasticsearch.xpack.watcher.trigger.TriggerBuilders; +import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; +import org.junit.After; + +import java.io.IOException; +import java.util.Collections; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; +import static org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule.Interval.Unit.MINUTES; +import static org.hamcrest.Matchers.is; + +@TestLogging("org.elasticsearch.client:TRACE,tracer:TRACE") +@AwaitsFix(bugUrl = "flaky tests") +public class MonitoringWithWatcherRestIT extends ESRestTestCase { + + @After + public void cleanExporters() throws Exception { + String body = Strings.toString(jsonBuilder().startObject().startObject("transient") + .nullField("xpack.monitoring.exporters.*") + .endObject().endObject()); + assertOK(adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(), + new StringEntity(body, ContentType.APPLICATION_JSON))); + + assertOK(adminClient().performRequest("DELETE", ".watch*", Collections.emptyMap())); + } + + public void testThatLocalExporterAddsWatches() throws Exception { + String watchId = createMonitoringWatch(); + + String body = BytesReference.bytes(jsonBuilder().startObject().startObject("transient") + .field("xpack.monitoring.exporters.my_local_exporter.type", "local") + .field("xpack.monitoring.exporters.my_local_exporter.cluster_alerts.management.enabled", true) + .endObject().endObject()).utf8ToString(); + + adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(), + new StringEntity(body, ContentType.APPLICATION_JSON)); + + assertTotalWatchCount(ClusterAlertsUtil.WATCH_IDS.length); + + assertMonitoringWatchHasBeenOverWritten(watchId); + } + + public void testThatHttpExporterAddsWatches() throws Exception { + String watchId = createMonitoringWatch(); + String httpHost = getHttpHost(); + + String body = BytesReference.bytes(jsonBuilder().startObject().startObject("transient") + .field("xpack.monitoring.exporters.my_http_exporter.type", "http") + .field("xpack.monitoring.exporters.my_http_exporter.host", httpHost) + .field("xpack.monitoring.exporters.my_http_exporter.cluster_alerts.management.enabled", true) + .endObject().endObject()).utf8ToString(); + + adminClient().performRequest("PUT", "_cluster/settings", Collections.emptyMap(), + new StringEntity(body, ContentType.APPLICATION_JSON)); + + assertTotalWatchCount(ClusterAlertsUtil.WATCH_IDS.length); + + assertMonitoringWatchHasBeenOverWritten(watchId); + } + + private void assertMonitoringWatchHasBeenOverWritten(String watchId) throws Exception { + ObjectPath path = ObjectPath.createFromResponse(client().performRequest("GET", "_xpack/watcher/watch/" + watchId)); + String interval = path.evaluate("watch.trigger.schedule.interval"); + assertThat(interval, is("1m")); + } + + private void assertTotalWatchCount(int expectedWatches) throws Exception { + assertBusy(() -> { + assertOK(client().performRequest("POST", ".watches/_refresh")); + ObjectPath path = ObjectPath.createFromResponse(client().performRequest("POST", ".watches/_count")); + int count = path.evaluate("count"); + assertThat(count, is(expectedWatches)); + }); + } + + private String createMonitoringWatch() throws Exception { + String clusterUUID = getClusterUUID(); + String watchId = clusterUUID + "_kibana_version_mismatch"; + String sampleWatch = WatchSourceBuilders.watchBuilder() + .trigger(TriggerBuilders.schedule(new IntervalSchedule(new IntervalSchedule.Interval(1000, MINUTES)))) + .input(simpleInput()) + .addAction("logme", ActionBuilders.loggingAction("foo")) + .buildAsBytes(XContentType.JSON).utf8ToString(); + client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, Collections.emptyMap(), + new StringEntity(sampleWatch, ContentType.APPLICATION_JSON)); + return watchId; + } + + private String getClusterUUID() throws Exception { + Response response = client().performRequest("GET", "_cluster/state/metadata", Collections.emptyMap()); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + String clusterUUID = objectPath.evaluate("metadata.cluster_uuid"); + return clusterUUID; + } + + public String getHttpHost() throws IOException { + ObjectPath path = ObjectPath.createFromResponse(client().performRequest("GET", "_cluster/state", Collections.emptyMap())); + String masterNodeId = path.evaluate("master_node"); + + ObjectPath nodesPath = ObjectPath.createFromResponse(client().performRequest("GET", "_nodes", Collections.emptyMap())); + String httpHost = nodesPath.evaluate("nodes." + masterNodeId + ".http.publish_address"); + return httpHost; + } +} diff --git a/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/PreventFailingBuildIT.java b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/PreventFailingBuildIT.java new file mode 100644 index 0000000000000..2c2cdd044aab7 --- /dev/null +++ b/x-pack/qa/smoke-test-monitoring-with-watcher/src/test/java/org/elasticsearch/smoketest/PreventFailingBuildIT.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import org.elasticsearch.test.ESIntegTestCase; + +public class PreventFailingBuildIT extends ESIntegTestCase { + + public void testSoThatTestsDoNotFail() { + // Noop + + // This is required because if tests are not enable no + // tests will be run in the entire project and all tests will fail. + } +} diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle new file mode 100644 index 0000000000000..28fd4d2db49ed --- /dev/null +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -0,0 +1,407 @@ +import org.elasticsearch.gradle.LoggedExec +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin +import org.elasticsearch.gradle.plugin.PluginBuildPlugin +import org.elasticsearch.gradle.test.NodeInfo + +import javax.net.ssl.HttpsURLConnection +import javax.net.ssl.KeyManagerFactory +import javax.net.ssl.SSLContext +import javax.net.ssl.TrustManagerFactory +import java.nio.charset.StandardCharsets +import java.security.KeyStore +import java.security.SecureRandom + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') +} + +String outputDir = "${buildDir}/generated-resources/${project.name}" +task copyXPackPluginProps(type: Copy) { + from project(xpackModule('core')).file('src/main/plugin-metadata') + from project(xpackModule('core')).tasks.pluginProperties + into outputDir +} +project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) + +// needed to be consistent with ssl host checking +Object san = new SanEvaluator() + +// location of generated keystores and certificates +File keystoreDir = new File(project.buildDir, 'keystore') + +// Generate the node's keystore +File nodeKeystore = new File(keystoreDir, 'test-node.jks') +task createNodeKeyStore(type: LoggedExec) { + doFirst { + if (nodeKeystore.parentFile.exists() == false) { + nodeKeystore.parentFile.mkdirs() + } + if (nodeKeystore.exists()) { + delete nodeKeystore + } + } + executable = new File(project.runtimeJavaHome, 'bin/keytool') + standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) + args '-genkey', + '-alias', 'test-node', + '-keystore', nodeKeystore, + '-keyalg', 'RSA', + '-keysize', '2048', + '-validity', '712', + '-dname', 'CN=smoke-test-plugins-ssl', + '-keypass', 'keypass', + '-storepass', 'keypass', + '-ext', san +} + +// Generate the client's keystore +File clientKeyStore = new File(keystoreDir, 'test-client.jks') +task createClientKeyStore(type: LoggedExec) { + doFirst { + if (clientKeyStore.parentFile.exists() == false) { + clientKeyStore.parentFile.mkdirs() + } + if (clientKeyStore.exists()) { + delete clientKeyStore + } + } + executable = new File(project.runtimeJavaHome, 'bin/keytool') + standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) + args '-genkey', + '-alias', 'test-client', + '-keystore', clientKeyStore, + '-keyalg', 'RSA', + '-keysize', '2048', + '-validity', '712', + '-dname', 'CN=smoke-test-plugins-ssl', + '-keypass', 'keypass', + '-storepass', 'keypass', + '-ext', san +} + +// Export the node's certificate +File nodeCertificate = new File(keystoreDir, 'test-node.cert') +task exportNodeCertificate(type: LoggedExec) { + dependsOn createNodeKeyStore + doFirst { + if (nodeCertificate.parentFile.exists() == false) { + nodeCertificate.parentFile.mkdirs() + } + if (nodeCertificate.exists()) { + delete nodeCertificate + } + } + executable = new File(project.runtimeJavaHome, 'bin/keytool') + args '-export', + '-alias', 'test-node', + '-keystore', nodeKeystore, + '-storepass', 'keypass', + '-file', nodeCertificate +} + +// Import the node certificate in the client's keystore +task importNodeCertificateInClientKeyStore(type: LoggedExec) { + dependsOn exportNodeCertificate + executable = new File(project.runtimeJavaHome, 'bin/keytool') + args '-import', + '-alias', 'test-node', + '-keystore', clientKeyStore, + '-storepass', 'keypass', + '-file', nodeCertificate, + '-noprompt' +} + +// Export the client's certificate +File clientCertificate = new File(keystoreDir, 'test-client.cert') +task exportClientCertificate(type: LoggedExec) { + dependsOn createClientKeyStore + doFirst { + if (clientCertificate.parentFile.exists() == false) { + clientCertificate.parentFile.mkdirs() + } + if (clientCertificate.exists()) { + delete clientCertificate + } + } + executable = new File(project.runtimeJavaHome, 'bin/keytool') + args '-export', + '-alias', 'test-client', + '-keystore', clientKeyStore, + '-storepass', 'keypass', + '-file', clientCertificate +} + +// Import the client certificate in the node's keystore +task importClientCertificateInNodeKeyStore(type: LoggedExec) { + dependsOn exportClientCertificate + executable = new File(project.runtimeJavaHome, 'bin/keytool') + args '-import', + '-alias', 'test-client', + '-keystore', nodeKeystore, + '-storepass', 'keypass', + '-file', clientCertificate, + '-noprompt' +} + +forbiddenPatterns { + exclude '**/*.cert' +} + +// Add keystores to test classpath: it expects it there +sourceSets.test.resources.srcDir(keystoreDir) +processTestResources.dependsOn( + createNodeKeyStore, createClientKeyStore, + importNodeCertificateInClientKeyStore, importClientCertificateInNodeKeyStore +) + +integTestCluster.dependsOn(importClientCertificateInNodeKeyStore, importNodeCertificateInClientKeyStore) + + +ext.pluginsCount = 0 +project(':plugins').getChildProjects().each { pluginName, pluginProject -> + // need to get a non-decorated project object, so must re-lookup the project by path + integTestCluster.plugin(pluginProject.path) + pluginsCount += 1 +} + +integTestCluster { + setting 'xpack.monitoring.collection.interval', '1s' + setting 'xpack.monitoring.exporters._http.type', 'http' + setting 'xpack.monitoring.exporters._http.enabled', 'false' + setting 'xpack.monitoring.exporters._http.ssl.truststore.path', clientKeyStore.name + setting 'xpack.monitoring.exporters._http.ssl.truststore.password', 'keypass' + setting 'xpack.monitoring.exporters._http.auth.username', 'monitoring_agent' + setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' + setting 'xpack.monitoring.exporters._http.ssl.verification_mode', 'full' + + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.http.ssl.enabled', 'true' + setting 'xpack.security.http.ssl.keystore.path', nodeKeystore.name + keystoreSetting 'xpack.security.http.ssl.keystore.secure_password', 'keypass' + + setting 'xpack.ml.enabled', 'false' + + // copy keystores into config/ + extraConfigFile nodeKeystore.name, nodeKeystore + extraConfigFile clientKeyStore.name, clientKeyStore + + setupCommand 'setupTestUser', + 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + setupCommand 'setupMonitoringUser', + 'bin/elasticsearch-users', 'useradd', 'monitoring_agent', '-p', 'x-pack-test-password', '-r', 'remote_monitoring_agent' + + waitCondition = { NodeInfo node, AntBuilder ant -> + File tmpFile = new File(node.cwd, 'wait.success') + KeyStore keyStore = KeyStore.getInstance("JKS"); + keyStore.load(clientKeyStore.newInputStream(), 'keypass'.toCharArray()); + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(keyStore, 'keypass'.toCharArray()); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(keyStore); + SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); + sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); + for (int i = 0; i < 10; i++) { + // we use custom wait logic here for HTTPS + HttpsURLConnection httpURLConnection = null; + try { + httpURLConnection = (HttpsURLConnection) new URL("https://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}&wait_for_status=yellow").openConnection(); + httpURLConnection.setSSLSocketFactory(sslContext.getSocketFactory()); + httpURLConnection.setRequestProperty("Authorization", "Basic " + + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); + httpURLConnection.setRequestMethod("GET"); + httpURLConnection.connect(); + if (httpURLConnection.getResponseCode() == 200) { + tmpFile.withWriter StandardCharsets.UTF_8.name(), { + it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) + } + } + } catch (IOException e) { + if (i == 9) { + logger.error("final attempt of calling cluster health failed", e) + } else { + logger.debug("failed to call cluster health", e) + } + } finally { + if (httpURLConnection != null) { + httpURLConnection.disconnect(); + } + } + + // did not start, so wait a bit before trying again + Thread.sleep(500L); + } + return tmpFile.exists() + } +} + +ext.expansions = [ + 'expected.plugins.count': pluginsCount +] + +processTestResources { + from(sourceSets.test.resources.srcDirs) { + include '**/*.yml' + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) + } +} + +/** A lazy evaluator to find the san to use for certificate generation. */ +class SanEvaluator { + + private static String san = null + + String toString() { + synchronized (SanEvaluator.class) { + if (san == null) { + san = getSubjectAlternativeNameString() + } + } + return san + } + + // Code stolen from NetworkUtils/InetAddresses/NetworkAddress to support SAN + /** Return all interfaces (and subinterfaces) on the system */ + private static List getInterfaces() throws SocketException { + List all = new ArrayList<>(); + addAllInterfaces(all, Collections.list(NetworkInterface.getNetworkInterfaces())); + Collections.sort(all, new Comparator() { + @Override + public int compare(NetworkInterface left, NetworkInterface right) { + return Integer.compare(left.getIndex(), right.getIndex()); + } + }); + return all; + } + + /** Helper for getInterfaces, recursively adds subinterfaces to {@code target} */ + private static void addAllInterfaces(List target, List level) { + if (!level.isEmpty()) { + target.addAll(level); + for (NetworkInterface intf : level) { + addAllInterfaces(target, Collections.list(intf.getSubInterfaces())); + } + } + } + + private static String getSubjectAlternativeNameString() { + List list = new ArrayList<>(); + for (NetworkInterface intf : getInterfaces()) { + if (intf.isUp()) { + // NOTE: some operating systems (e.g. BSD stack) assign a link local address to the loopback interface + // while technically not a loopback address, some of these treat them as one (e.g. OS X "localhost") so we must too, + // otherwise things just won't work out of box. So we include all addresses from loopback interfaces. + for (InetAddress address : Collections.list(intf.getInetAddresses())) { + if (intf.isLoopback() || address.isLoopbackAddress()) { + list.add(address); + } + } + } + } + if (list.isEmpty()) { + throw new IllegalArgumentException("no up-and-running loopback addresses found, got " + getInterfaces()); + } + + StringBuilder builder = new StringBuilder("san="); + for (int i = 0; i < list.size(); i++) { + InetAddress address = list.get(i); + String hostAddress; + if (address instanceof Inet6Address) { + hostAddress = compressedIPV6Address((Inet6Address)address); + } else { + hostAddress = address.getHostAddress(); + } + builder.append("ip:").append(hostAddress); + String hostname = address.getHostName(); + if (hostname.equals(address.getHostAddress()) == false) { + builder.append(",dns:").append(hostname); + } + + if (i != (list.size() - 1)) { + builder.append(","); + } + } + + return builder.toString(); + } + + private static String compressedIPV6Address(Inet6Address inet6Address) { + byte[] bytes = inet6Address.getAddress(); + int[] hextets = new int[8]; + for (int i = 0; i < hextets.length; i++) { + hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255; + } + compressLongestRunOfZeroes(hextets); + return hextetsToIPv6String(hextets); + } + + /** + * Identify and mark the longest run of zeroes in an IPv6 address. + * + *

    Only runs of two or more hextets are considered. In case of a tie, the + * leftmost run wins. If a qualifying run is found, its hextets are replaced + * by the sentinel value -1. + * + * @param hextets {@code int[]} mutable array of eight 16-bit hextets + */ + private static void compressLongestRunOfZeroes(int[] hextets) { + int bestRunStart = -1; + int bestRunLength = -1; + int runStart = -1; + for (int i = 0; i < hextets.length + 1; i++) { + if (i < hextets.length && hextets[i] == 0) { + if (runStart < 0) { + runStart = i; + } + } else if (runStart >= 0) { + int runLength = i - runStart; + if (runLength > bestRunLength) { + bestRunStart = runStart; + bestRunLength = runLength; + } + runStart = -1; + } + } + if (bestRunLength >= 2) { + Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1); + } + } + + /** + * Convert a list of hextets into a human-readable IPv6 address. + * + *

    In order for "::" compression to work, the input should contain negative + * sentinel values in place of the elided zeroes. + * + * @param hextets {@code int[]} array of eight 16-bit hextets, or -1s + */ + private static String hextetsToIPv6String(int[] hextets) { + /* + * While scanning the array, handle these state transitions: + * start->num => "num" start->gap => "::" + * num->num => ":num" num->gap => "::" + * gap->num => "num" gap->gap => "" + */ + StringBuilder buf = new StringBuilder(39); + boolean lastWasNumber = false; + for (int i = 0; i < hextets.length; i++) { + boolean thisIsNumber = hextets[i] >= 0; + if (thisIsNumber) { + if (lastWasNumber) { + buf.append(':'); + } + buf.append(Integer.toHexString(hextets[i])); + } else { + if (i == 0 || lastWasNumber) { + buf.append("::"); + } + } + lastWasNumber = thisIsNumber; + } + return buf.toString(); + } +} diff --git a/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java b/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java new file mode 100644 index 0000000000000..f8d1dd5e2b717 --- /dev/null +++ b/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.action.XPackUsageRequestBuilder; +import org.elasticsearch.xpack.core.action.XPackUsageResponse; +import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.junit.After; +import org.junit.Before; + +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; + +/** + * This test checks that a Monitoring's HTTP exporter correctly exports to a monitoring cluster + * protected by security with HTTPS/SSL. + * + * It sets up a cluster with Monitoring and Security configured with SSL. Once started, + * an HTTP exporter is activated and it exports data locally over HTTPS/SSL. The test + * then uses a transport client to check that the data have been correctly received and + * indexed in the cluster. + */ +public class SmokeTestMonitoringWithSecurityIT extends ESIntegTestCase { + private static final String USER = "test_user"; + private static final String PASS = "x-pack-test-password"; + private static final String MONITORING_PATTERN = ".monitoring-*"; + + @Override + protected Collection> transportClientPlugins() { + return Collections.singletonList(XPackPlugin.class); + } + + @Override + protected Settings externalClusterClientSettings() { + return Settings.builder() + .put(SecurityField.USER_SETTING.getKey(), USER + ":" + PASS) + .put(NetworkModule.TRANSPORT_TYPE_KEY, SecurityField.NAME4).build(); + } + + @Before + public void enableExporter() throws Exception { + Settings exporterSettings = Settings.builder() + .put("xpack.monitoring.collection.enabled", true) + .put("xpack.monitoring.exporters._http.enabled", true) + .put("xpack.monitoring.exporters._http.host", "https://" + randomNodeHttpAddress()) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings)); + } + + @After + public void disableExporter() { + Settings exporterSettings = Settings.builder() + .putNull("xpack.monitoring.collection.enabled") + .putNull("xpack.monitoring.exporters._http.enabled") + .putNull("xpack.monitoring.exporters._http.host") + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings)); + } + + private boolean getMonitoringUsageExportersDefined() throws Exception { + final XPackUsageResponse usageResponse = new XPackUsageRequestBuilder(client()).execute().get(); + final Optional monitoringUsage = + usageResponse.getUsages() + .stream() + .filter(usage -> usage instanceof MonitoringFeatureSetUsage) + .map(usage -> (MonitoringFeatureSetUsage)usage) + .findFirst(); + + assertThat("Monitoring feature set does not exist", monitoringUsage.isPresent(), is(true)); + + return monitoringUsage.get().getExporters().isEmpty() == false; + } + + public void testHTTPExporterWithSSL() throws Exception { + // Ensures that the exporter is actually on + assertBusy(() -> assertThat("[_http] exporter is not defined", getMonitoringUsageExportersDefined(), is(true))); + + // Checks that the monitoring index templates have been installed + assertBusy(() -> { + GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates(MONITORING_PATTERN).get(); + assertThat(response.getIndexTemplates().size(), greaterThanOrEqualTo(2)); + }); + + // Waits for monitoring indices to be created + assertBusy(() -> { + try { + assertThat(client().admin().indices().prepareExists(MONITORING_PATTERN).get().isExists(), equalTo(true)); + } catch (Exception e) { + fail("exception when checking for monitoring documents: " + e.getMessage()); + } + }); + + // Waits for indices to be ready + ensureYellowAndNoInitializingShards(MONITORING_PATTERN); + + // Checks that the HTTP exporter has successfully exported some data + assertBusy(() -> { + try { + assertThat(client().prepareSearch(MONITORING_PATTERN).setSize(0).get().getHits().getTotalHits(), greaterThan(0L)); + } catch (Exception e) { + fail("exception when checking for monitoring documents: " + e.getMessage()); + } + }); + } + + private String randomNodeHttpAddress() { + List nodes = client().admin().cluster().prepareNodesInfo().clear().setHttp(true).get().getNodes(); + assertThat(nodes.size(), greaterThan(0)); + + InetSocketAddress[] httpAddresses = new InetSocketAddress[nodes.size()]; + for (int i = 0; i < nodes.size(); i++) { + httpAddresses[i] = nodes.get(i).getHttp().address().publishAddress().address(); + } + return NetworkAddress.format(randomFrom(httpAddresses)); + } +} diff --git a/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestPluginsSslClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestPluginsSslClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..8411a7eb5a4e7 --- /dev/null +++ b/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestPluginsSslClientYamlTestSuiteIT.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +public class SmokeTestPluginsSslClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + private static final String USER = "test_user"; + private static final String PASS = "x-pack-test-password"; + private static final String KEYSTORE_PASS = "keypass"; + + public SmokeTestPluginsSslClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + static Path keyStore; + + @BeforeClass + public static void getKeyStore() { + try { + keyStore = PathUtils.get(SmokeTestPluginsSslClientYamlTestSuiteIT.class.getResource("/test-node.jks").toURI()); + } catch (URISyntaxException e) { + throw new ElasticsearchException("exception while reading the store", e); + } + if (!Files.exists(keyStore)) { + throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist."); + } + } + + @AfterClass + public static void clearKeyStore() { + keyStore = null; + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .put(ESRestTestCase.TRUSTSTORE_PATH, keyStore) + .put(ESRestTestCase.TRUSTSTORE_PASSWORD, KEYSTORE_PASS) + .build(); + } + + @Override + protected String getProtocol() { + return "https"; + } +} diff --git a/x-pack/qa/smoke-test-plugins-ssl/src/test/resources/rest-api-spec/test/smoke_test_plugins_ssl/10_basic.yml b/x-pack/qa/smoke-test-plugins-ssl/src/test/resources/rest-api-spec/test/smoke_test_plugins_ssl/10_basic.yml new file mode 100644 index 0000000000000..32ff292d4715f --- /dev/null +++ b/x-pack/qa/smoke-test-plugins-ssl/src/test/resources/rest-api-spec/test/smoke_test_plugins_ssl/10_basic.yml @@ -0,0 +1,13 @@ +# Integration tests for smoke testing plugins +# +"Plugins are actually installed": + - do: + cluster.state: {} + + # Get master node id + - set: { master_node: master } + + - do: + nodes.info: {} + + - length: { nodes.$master.plugins: ${expected.plugins.count} } diff --git a/x-pack/qa/smoke-test-plugins-ssl/src/test/resources/rest-api-spec/test/smoke_test_plugins_ssl/20_settings_filter.yml b/x-pack/qa/smoke-test-plugins-ssl/src/test/resources/rest-api-spec/test/smoke_test_plugins_ssl/20_settings_filter.yml new file mode 100644 index 0000000000000..2eb851ab19d00 --- /dev/null +++ b/x-pack/qa/smoke-test-plugins-ssl/src/test/resources/rest-api-spec/test/smoke_test_plugins_ssl/20_settings_filter.yml @@ -0,0 +1,20 @@ +# Integration tests for smoke testing plugins +# +"Secret settings are correctly filtered": + - do: + cluster.state: {} + + - set: {master_node: master} + + - do: + nodes.info: + metric: [ settings ] + + - is_true: nodes + - is_true: nodes.$master.settings.xpack.monitoring.exporters._http.type + + - is_false: nodes.$master.settings.xpack.monitoring.exporters._http.auth.username + - is_false: nodes.$master.settings.xpack.monitoring.exporters._http.auth.password + - is_false: nodes.$master.settings.xpack.monitoring.exporters._http.ssl.truststore.path + - is_false: nodes.$master.settings.xpack.monitoring.exporters._http.ssl.truststore.password + - is_false: nodes.$master.settings.xpack.monitoring.exporters._http.ssl.verification_mode diff --git a/x-pack/qa/smoke-test-plugins/build.gradle b/x-pack/qa/smoke-test-plugins/build.gradle new file mode 100644 index 0000000000000..207fa8204db00 --- /dev/null +++ b/x-pack/qa/smoke-test-plugins/build.gradle @@ -0,0 +1,42 @@ +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin +import org.elasticsearch.gradle.plugin.PluginBuildPlugin + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') +} + +ext.pluginsCount = 0 +project(':plugins').getChildProjects().each { pluginName, pluginProject -> + // need to get a non-decorated project object, so must re-lookup the project by path + integTestCluster.plugin(pluginProject.path) + pluginsCount += 1 +} + +integTestCluster { + setting 'xpack.security.enabled', 'true' + setupCommand 'setupDummyUser', + 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_user', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} + +ext.expansions = [ + 'expected.plugins.count': pluginsCount +] + +processTestResources { + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) +} diff --git a/x-pack/qa/smoke-test-plugins/src/test/java/org/elasticsearch/smoketest/XSmokeTestPluginsClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-plugins/src/test/java/org/elasticsearch/smoketest/XSmokeTestPluginsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..29671386f5ba0 --- /dev/null +++ b/x-pack/qa/smoke-test-plugins/src/test/java/org/elasticsearch/smoketest/XSmokeTestPluginsClientYamlTestSuiteIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +public class XSmokeTestPluginsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + private static final String USER = "test_user"; + private static final String PASS = "x-pack-test-password"; + + public XSmokeTestPluginsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Override + protected Settings restClientSettings() { + + String token = basicAuthHeaderValue(USER, new SecureString(PASS.toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } +} + diff --git a/x-pack/qa/smoke-test-plugins/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_basic.yml b/x-pack/qa/smoke-test-plugins/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_basic.yml new file mode 100644 index 0000000000000..b40101a819e46 --- /dev/null +++ b/x-pack/qa/smoke-test-plugins/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_basic.yml @@ -0,0 +1,14 @@ +# Integration tests for smoke testing plugins +# +"Plugins are actually installed": + - do: + cluster.state: {} + + # Get master node id + - set: { master_node: master } + + - do: + nodes.info: {} + + - length: { nodes.$master.plugins: ${expected.plugins.count} } + # TODO: check that every plugin is installed diff --git a/x-pack/qa/smoke-test-security-with-mustache/build.gradle b/x-pack/qa/smoke-test-security-with-mustache/build.gradle new file mode 100644 index 0000000000000..1c43db0b63e34 --- /dev/null +++ b/x-pack/qa/smoke-test-security-with-mustache/build.gradle @@ -0,0 +1,27 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') +} + +integTestCluster { + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.security.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + setupCommand 'setupDummyUser', + 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/smoke-test-security-with-mustache/src/test/java/org/elasticsearch/smoketest/SmokeTestSecurityWithMustacheClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-security-with-mustache/src/test/java/org/elasticsearch/smoketest/SmokeTestSecurityWithMustacheClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..46daddae62b0d --- /dev/null +++ b/x-pack/qa/smoke-test-security-with-mustache/src/test/java/org/elasticsearch/smoketest/SmokeTestSecurityWithMustacheClientYamlTestSuiteIT.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +public class SmokeTestSecurityWithMustacheClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("test_admin", + new SecureString("x-pack-test-password".toCharArray())); + + public SmokeTestSecurityWithMustacheClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Override + protected Settings restClientSettings() { + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE) + .build(); + } +} diff --git a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/10_templated_role_query.yml b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/10_templated_role_query.yml new file mode 100644 index 0000000000000..30284ab1645a3 --- /dev/null +++ b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/10_templated_role_query.yml @@ -0,0 +1,189 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_user: + username: "inline_template_user" + body: > + { + "password": "x-pack-test-password", + "roles" : [ "inline_template_role" ] + } + - do: + xpack.security.put_user: + username: "stored_template_user" + body: > + { + "password": "x-pack-test-password", + "roles" : [ "stored_template_role" ] + } + + - do: + xpack.security.put_user: + username: "terms_template_user" + body: > + { + "password": "x-pack-test-password", + "roles" : [ "terms_template_role" ], + "metadata": { + "groups": [ "inline_template_user"] + } + } + + - do: + xpack.security.put_role: + name: "inline_template_role" + body: > + { + "indices": [ + { + "names": "foobar", + "privileges": ["all"], + "query" : { + "template" : { + "source" : { + "term" : { "username" : "{{_user.username}}" } + } + } + } + } + ] + } + + - do: + xpack.security.put_role: + name: "terms_template_role" + body: > + { + "indices": [ + { + "names": "foobar", + "privileges": ["all"], + "query" : { + "template" : { + "source" : "{\"terms\" : { \"username\" : {{#toJson}}_user.metadata.groups{{/toJson}} } }" + } + } + } + ] + } + + - do: + xpack.security.put_role: + name: "stored_template_role" + body: > + { + "indices": [ + { + "names": "foobar", + "privileges": ["all"], + "query" : { + "template" : { + "id" : "1" + } + } + } + ] + } + + - do: + put_script: + id: "1" + body: > + { + "script": { + "lang": "mustache", + "source": { + "term" : { + "username" : "{{_user.username}}" + } + } + } + } + + - do: + index: + index: foobar + type: type + id: 1 + body: > + { + "username": "inline_template_user" + } + - do: + index: + index: foobar + type: type + id: 2 + body: > + { + "username": "stored_template_user" + } + + - do: + indices.refresh: {} + +--- +teardown: + - do: + xpack.security.delete_user: + username: "inline_template_user" + ignore: 404 + - do: + xpack.security.delete_user: + username: "stored_template_user" + ignore: 404 + - do: + xpack.security.delete_user: + username: "terms_template_user" + ignore: 404 + - do: + xpack.security.delete_role: + name: "inline_template_role" + ignore: 404 + - do: + xpack.security.delete_role: + name: "stored_template_role" + ignore: 404 + - do: + xpack.security.delete_role: + name: "terms_template_role" + ignore: 404 +--- +"Test inline template": + - do: + headers: + Authorization: "Basic aW5saW5lX3RlbXBsYXRlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ" + search: + index: foobar + body: { "query" : { "match_all" : {} } } + - match: { hits.total: 1} + - match: { hits.hits.0._source.username: inline_template_user} + +--- +"Test stored template": + - do: + headers: + Authorization: "Basic c3RvcmVkX3RlbXBsYXRlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" + search: + index: foobar + body: { "query" : { "match_all" : {} } } + - match: { hits.total: 1} + - match: { hits.hits.0._source.username: stored_template_user} + +--- +"Test terms template": + - do: + headers: + Authorization: "Basic dGVybXNfdGVtcGxhdGVfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" + search: + index: foobar + body: { "query" : { "match_all" : {} } } + - match: { hits.total: 1} + - match: { hits.hits.0._source.username: inline_template_user} diff --git a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/11_templated_role_query_runas.yml b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/11_templated_role_query_runas.yml new file mode 100644 index 0000000000000..0c6bd4cbdac67 --- /dev/null +++ b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/11_templated_role_query_runas.yml @@ -0,0 +1,189 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_user: + username: "inline_template_user" + body: > + { + "password": "x-pack-test-password", + "roles" : [ "inline_template_role" ] + } + - do: + xpack.security.put_user: + username: "stored_template_user" + body: > + { + "password": "x-pack-test-password", + "roles" : [ "stored_template_role" ] + } + + - do: + xpack.security.put_user: + username: "terms_template_user" + body: > + { + "password": "x-pack-test-password", + "roles" : [ "terms_template_role" ], + "metadata": { + "groups": [ "inline_template_user" ] + } + } + + - do: + xpack.security.put_role: + name: "inline_template_role" + body: > + { + "indices": [ + { + "names": "foobar", + "privileges": ["all"], + "query" : { + "template" : { + "source" : { + "term" : { "username" : "{{_user.username}}" } + } + } + } + } + ] + } + + - do: + xpack.security.put_role: + name: "terms_template_role" + body: > + { + "indices": [ + { + "names": "foobar", + "privileges": ["all"], + "query" : { + "template" : { + "source" : "{\"terms\" : { \"username\" : {{#toJson}}_user.metadata.groups{{/toJson}} } }" + } + } + } + ] + } + + - do: + xpack.security.put_role: + name: "stored_template_role" + body: > + { + "indices": [ + { + "names": "foobar", + "privileges": ["all"], + "query" : { + "template" : { + "id" : "1" + } + } + } + ] + } + + - do: + put_script: + id: "1" + body: > + { + "script": { + "lang": "mustache", + "source": { + "term" : { + "username" : "{{_user.username}}" + } + } + } + } + + - do: + index: + index: foobar + type: type + id: 1 + body: > + { + "username": "inline_template_user" + } + - do: + index: + index: foobar + type: type + id: 2 + body: > + { + "username": "stored_template_user" + } + + - do: + indices.refresh: {} + +--- +teardown: + - do: + xpack.security.delete_user: + username: "inline_template_user" + ignore: 404 + - do: + xpack.security.delete_user: + username: "stored_template_user" + ignore: 404 + - do: + xpack.security.delete_user: + username: "terms_template_user" + ignore: 404 + - do: + xpack.security.delete_role: + name: "inline_template_role" + ignore: 404 + - do: + xpack.security.delete_role: + name: "stored_template_role" + ignore: 404 + - do: + xpack.security.delete_role: + name: "terms_template_role" + ignore: 404 +--- +"Test inline template with run as": + - do: + headers: + es-security-runas-user: "inline_template_user" + search: + index: foobar + body: { "query" : { "match_all" : {} } } + - match: { hits.total: 1} + - match: { hits.hits.0._source.username: inline_template_user} + +--- +"Test stored template with run as": + - do: + headers: + es-security-runas-user: "stored_template_user" + search: + index: foobar + body: { "query" : { "match_all" : {} } } + - match: { hits.total: 1} + - match: { hits.hits.0._source.username: stored_template_user} + +--- +"Test terms template with run as": + - do: + headers: + es-security-runas-user: "terms_template_user" + search: + index: foobar + body: { "query" : { "match_all" : {} } } + - match: { hits.total: 1} + - match: { hits.hits.0._source.username: inline_template_user} diff --git a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/20_small_users_one_index.yml b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/20_small_users_one_index.yml new file mode 100644 index 0000000000000..a015a88a315fa --- /dev/null +++ b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/20_small_users_one_index.yml @@ -0,0 +1,198 @@ +--- +setup: + - skip: + features: headers + + - do: + indices.create: + index: shared_logs + + - do: + cluster.health: + wait_for_status: yellow + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "processors": [ + { + "set_security_user" : { + "field" : "user" + } + } + ] + } + - do: + xpack.security.put_user: + username: "joe" + body: > + { + "password": "x-pack-test-password", + "roles" : [ "small_companies_role" ], + "metadata" : { + "customer_id" : "1" + } + } + - do: + xpack.security.put_user: + username: "john" + body: > + { + "password": "x-pack-test-password", + "roles" : [ "small_companies_role" ], + "metadata" : { + "customer_id" : "2" + } + } + +--- +teardown: + - do: + xpack.security.delete_user: + username: "joe" + ignore: 404 + - do: + xpack.security.delete_user: + username: "john" + ignore: 404 + - do: + xpack.security.delete_role: + name: "small_companies_role" + ignore: 404 + +--- +"Test shared index seperating user by using DLS role query with user's username": + - do: + xpack.security.put_role: + name: "small_companies_role" + body: > + { + "indices": [ + { + "names": "shared_logs", + "privileges": ["read", "create"], + "query" : { + "template" : { + "source" : { + "term" : { "user.username" : "{{_user.username}}" } + } + } + } + } + ] + } + + - do: + headers: + Authorization: "Basic am9lOngtcGFjay10ZXN0LXBhc3N3b3Jk" + index: + index: shared_logs + type: type + pipeline: "my_pipeline" + body: > + { + "log": "Joe's first log entry" + } + - do: + headers: + Authorization: "Basic am9objp4LXBhY2stdGVzdC1wYXNzd29yZA==" + index: + index: shared_logs + type: type + pipeline: "my_pipeline" + body: > + { + "log": "John's first log entry" + } + + - do: + indices.refresh: {} + + # Joe searches: + - do: + headers: + Authorization: "Basic am9lOngtcGFjay10ZXN0LXBhc3N3b3Jk" + search: + index: shared_logs + body: { "query" : { "match_all" : {} } } + - match: { hits.total: 1} + - match: { hits.hits.0._source.user.username: joe} + + # John searches: + - do: + headers: + Authorization: "Basic am9objp4LXBhY2stdGVzdC1wYXNzd29yZA==" + search: + index: shared_logs + body: { "query" : { "match_all" : {} } } + - match: { hits.total: 1} + - match: { hits.hits.0._source.user.username: john} + +--- +"Test shared index seperating user by using DLS role query with user's metadata": + - do: + xpack.security.put_role: + name: "small_companies_role" + body: > + { + "indices": [ + { + "names": "shared_logs", + "privileges": ["read", "create"], + "query" : { + "template" : { + "source" : { + "term" : { "user.metadata.customer_id" : "{{_user.metadata.customer_id}}" } + } + } + } + } + ] + } + + - do: + headers: + Authorization: "Basic am9lOngtcGFjay10ZXN0LXBhc3N3b3Jk" + index: + index: shared_logs + type: type + pipeline: "my_pipeline" + body: > + { + "log": "Joe's first log entry" + } + - do: + headers: + Authorization: "Basic am9objp4LXBhY2stdGVzdC1wYXNzd29yZA==" + index: + index: shared_logs + type: type + pipeline: "my_pipeline" + body: > + { + "log": "John's first log entry" + } + + - do: + indices.refresh: {} + + # Joe searches: + - do: + headers: + Authorization: "Basic am9lOngtcGFjay10ZXN0LXBhc3N3b3Jk" + search: + index: shared_logs + body: { "query" : { "match_all" : {} } } + - match: { hits.total: 1} + - match: { hits.hits.0._source.user.username: joe} + + # John searches: + - do: + headers: + Authorization: "Basic am9objp4LXBhY2stdGVzdC1wYXNzd29yZA==" + search: + index: shared_logs + body: { "query" : { "match_all" : {} } } + - match: { hits.total: 1} + - match: { hits.hits.0._source.user.username: john} diff --git a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/30_search_template.yml b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/30_search_template.yml new file mode 100644 index 0000000000000..e6e71b74b602f --- /dev/null +++ b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/30_search_template.yml @@ -0,0 +1,169 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.security.put_user: + username: "inline_template_user" + body: > + { + "password": "x-pack-test-password", + "roles" : [ "role" ] + } + + - do: + xpack.security.put_role: + name: "role" + body: > + { + "indices": [ + { + "names": "foobar", + "privileges": ["read"] + } + ] + } + + - do: + index: + index: foobar + type: type + id: 1 + body: + title: "contains some words" + + - do: + index: + index: unauthorized_index + type: type + id: 2 + body: + title: "contains some words too" + + - do: + indices.refresh: {} + +--- +teardown: + - do: + xpack.security.delete_user: + username: "inline_template_user" + ignore: 404 + +--- +"Test inline template against specific index": + - do: + headers: + Authorization: "Basic aW5saW5lX3RlbXBsYXRlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ" + search_template: + index: foobar + body: + source: + query: + match: + title: "{{query_string}}" + params: + query_string: "search for these words" + + - match: { hits.total: 1} + - match: { hits.hits.0._id: "1"} + +--- +"Test inline template against all indices": + - do: + headers: + Authorization: "Basic aW5saW5lX3RlbXBsYXRlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ" + search_template: + body: + source: + query: + match: + title: "{{query_string}}" + params: + query_string: "search for these words" + + - match: { hits.total: 1} + - match: { hits.hits.0._id: "1"} + +--- +"Test inline template against wildcard expression": + - do: + headers: + Authorization: "Basic aW5saW5lX3RlbXBsYXRlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ" + search_template: + index: foo* + body: + source: + query: + match: + title: "{{query_string}}" + params: + query_string: "search for these words" + + - match: { hits.total: 1} + - match: { hits.hits.0._id: "1"} +--- +"Test unauthorized inline template against wildcard expression": + + - do: + headers: + Authorization: "Basic aW5saW5lX3RlbXBsYXRlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ" + search_template: + index: unauthorized* + body: + source: + query: + match: + title: "{{query_string}}" + params: + query_string: "search for these words" + + - match: { hits.total: 0} + +--- +"Basic multi-search template": + + - do: + headers: + Authorization: "Basic aW5saW5lX3RlbXBsYXRlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ" + msearch_template: + body: + - index: foobar + - source: + query: + match: + title: "{{query_string}}" + params: + query_string: "search for these words" + - index: unauthorized* + - source: + query: + match: + title: "{{query_string}}" + params: + query_string: "search for these words" + + - match: { responses.0.hits.total: 1 } + - match: { responses.1.hits.total: 0 } + +--- +"Test render template": + - do: + headers: + Authorization: "Basic aW5saW5lX3RlbXBsYXRlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ" + render_search_template: + body: + source: + query: + match: + title: "{{query_string}}" + params: + query_string: "search for these words" + + - match: { template_output.query.match.title: "search for these words" } + diff --git a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/templates/query.mustache b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/templates/query.mustache new file mode 100644 index 0000000000000..34a93aa2cd788 --- /dev/null +++ b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/templates/query.mustache @@ -0,0 +1,5 @@ +{ + "term" : { + "username" : "{{_user.username}}" + } +} \ No newline at end of file diff --git a/x-pack/qa/smoke-test-watcher-with-security/build.gradle b/x-pack/qa/smoke-test-watcher-with-security/build.gradle new file mode 100644 index 0000000000000..0f052074bfbf6 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher-with-security/build.gradle @@ -0,0 +1,45 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') +} + +// bring in watcher rest test suite +task copyWatcherRestTests(type: Copy) { + into project.sourceSets.test.output.resourcesDir + from project(xpackProject('plugin').path).sourceSets.test.resources.srcDirs + include 'rest-api-spec/test/watcher/**' +} + +integTestCluster { + dependsOn copyWatcherRestTests + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.security.enabled', 'true' + // settings to test settings filtering on + setting 'xpack.notification.email.account._email.smtp.host', 'host.domain' + setting 'xpack.notification.email.account._email.smtp.port', '587' + setting 'xpack.notification.email.account._email.smtp.user', '_user' + setting 'xpack.notification.email.account._email.smtp.password', '_passwd' + setting 'xpack.license.self_generated.type', 'trial' + extraConfigFile 'roles.yml', 'roles.yml' + setupCommand 'setupTestAdminUser', + 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' + setupCommand 'setupXpackUserForTests', + 'bin/elasticsearch-users', 'useradd', 'x_pack_rest_user', '-p', 'x-pack-test-password', '-r', 'watcher_manager' + setupCommand 'setupWatcherManagerUser', + 'bin/elasticsearch-users', 'useradd', 'watcher_manager', '-p', 'x-pack-test-password', '-r', 'watcher_manager' + setupCommand 'setupPowerlessUser', + 'bin/elasticsearch-users', 'useradd', 'powerless_user', '-p', 'x-pack-test-password', '-r', 'crappy_role' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/smoke-test-watcher-with-security/roles.yml b/x-pack/qa/smoke-test-watcher-with-security/roles.yml new file mode 100644 index 0000000000000..bebfa883fcb15 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher-with-security/roles.yml @@ -0,0 +1,39 @@ +admin: + cluster: + - all + indices: + - names: '*' + privileges: + - all + +watcher_manager: + cluster: + - manage + indices: + - names: '.watch*' + privileges: + - all + # this index gets created by one of the watcher yaml tests + # and is needed for a search transform, so we have to create it as part of the test + - names: 'my_test_index' + privileges: + - all + run_as: + - powerless_user + - watcher_manager + +watcher_monitor: + cluster: + - monitor + indices: + - names: '.watcher-history-*' + privileges: + - read + +crappy_role: + cluster: + - cluster:monitor/nodes/info + - cluster:monitor/health + - cluster:monitor/nodes/liveness + - cluster:monitor/main + diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..a989bb476118f --- /dev/null +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.junit.After; +import org.junit.Before; + +import java.util.Collections; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.is; + +public class SmokeTestWatcherWithSecurityClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + private static final String TEST_ADMIN_USERNAME = "test_admin"; + private static final String TEST_ADMIN_PASSWORD = "x-pack-test-password"; + + public SmokeTestWatcherWithSecurityClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Before + public void startWatcher() throws Exception { + // delete the watcher history to not clutter with entries from other test + getAdminExecutionContext().callApi("indices.delete", Collections.singletonMap("index", ".watcher-history-*"), + emptyList(), emptyMap()); + + // create one document in this index, so we can test in the YAML tests, that the index cannot be accessed + Response resp = adminClient().performRequest("PUT", "/index_not_allowed_to_read/doc/1", Collections.emptyMap(), + new StringEntity("{\"foo\":\"bar\"}", ContentType.APPLICATION_JSON)); + assertThat(resp.getStatusLine().getStatusCode(), is(201)); + + assertBusy(() -> { + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + + switch (state) { + case "stopped": + ClientYamlTestResponse startResponse = + getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged"); + assertThat(isAcknowledged, is(true)); + break; + case "stopping": + throw new AssertionError("waiting until stopping state reached stopped state to start again"); + case "starting": + throw new AssertionError("waiting until starting state reached started state"); + case "started": + // all good here, we are done + break; + default: + throw new AssertionError("unknown state[" + state + "]"); + } + }); + + assertBusy(() -> { + for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { + ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", + singletonMap("name", template), emptyList(), emptyMap()); + assertThat(templateExistsResponse.getStatusCode(), is(200)); + } + }); + } + + @After + public void stopWatcher() throws Exception { + assertBusy(() -> { + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + + switch (state) { + case "stopped": + // all good here, we are done + break; + case "stopping": + throw new AssertionError("waiting until stopping state reached stopped state"); + case "starting": + throw new AssertionError("waiting until starting state reached started state to stop"); + case "started": + ClientYamlTestResponse stopResponse = + getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + boolean isAcknowledged = (boolean) stopResponse.evaluate("acknowledged"); + assertThat(isAcknowledged, is(true)); + break; + default: + throw new AssertionError("unknown state[" + state + "]"); + } + }); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("watcher_manager", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue(TEST_ADMIN_USERNAME, new SecureString(TEST_ADMIN_PASSWORD.toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } +} + diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java new file mode 100644 index 0000000000000..529a1aaec2725 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -0,0 +1,337 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; + +public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { + + private static final String TEST_ADMIN_USERNAME = "test_admin"; + private static final String TEST_ADMIN_PASSWORD = "x-pack-test-password"; + + @Before + public void startWatcher() throws Exception { + StringEntity entity = new StringEntity("{ \"value\" : \"15\" }", ContentType.APPLICATION_JSON); + assertOK(adminClient().performRequest("PUT", "my_test_index/doc/1", Collections.singletonMap("refresh", "true"), entity)); + + // delete the watcher history to not clutter with entries from other test + adminClient().performRequest("DELETE", ".watcher-history-*", Collections.emptyMap()); + + // create one document in this index, so we can test in the YAML tests, that the index cannot be accessed + Response resp = adminClient().performRequest("PUT", "/index_not_allowed_to_read/doc/1", Collections.emptyMap(), + new StringEntity("{\"foo\":\"bar\"}", ContentType.APPLICATION_JSON)); + assertThat(resp.getStatusLine().getStatusCode(), is(201)); + + assertBusy(() -> { + try { + Response statsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + ObjectPath objectPath = ObjectPath.createFromResponse(statsResponse); + String state = objectPath.evaluate("stats.0.watcher_state"); + + switch (state) { + case "stopped": + Response startResponse = adminClient().performRequest("POST", "_xpack/watcher/_start"); + assertOK(startResponse); + String body = EntityUtils.toString(startResponse.getEntity()); + assertThat(body, containsString("\"acknowledged\":true")); + break; + case "stopping": + throw new AssertionError("waiting until stopping state reached stopped state to start again"); + case "starting": + throw new AssertionError("waiting until starting state reached started state"); + case "started": + // all good here, we are done + break; + default: + throw new AssertionError("unknown state[" + state + "]"); + } + } catch (IOException e) { + throw new AssertionError(e); + } + }); + + assertBusy(() -> { + for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { + assertOK(adminClient().performRequest("HEAD", "_template/" + template)); + } + }); + } + + @After + public void stopWatcher() throws Exception { + adminClient().performRequest("DELETE", "_xpack/watcher/watch/my_watch"); + assertOK(adminClient().performRequest("DELETE", "my_test_index")); + + assertBusy(() -> { + try { + Response statsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + ObjectPath objectPath = ObjectPath.createFromResponse(statsResponse); + String state = objectPath.evaluate("stats.0.watcher_state"); + + switch (state) { + case "stopped": + // all good here, we are done + break; + case "stopping": + throw new AssertionError("waiting until stopping state reached stopped state"); + case "starting": + throw new AssertionError("waiting until starting state reached started state to stop"); + case "started": + Response stopResponse = adminClient().performRequest("POST", "_xpack/watcher/_stop", Collections.emptyMap()); + assertOK(stopResponse); + String body = EntityUtils.toString(stopResponse.getEntity()); + assertThat(body, containsString("\"acknowledged\":true")); + break; + default: + throw new AssertionError("unknown state[" + state + "]"); + } + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("watcher_manager", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue(TEST_ADMIN_USERNAME, new SecureString(TEST_ADMIN_PASSWORD.toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + + public void testSearchInputHasPermissions() throws Exception { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.startObject("trigger").startObject("schedule").field("interval", "1s").endObject().endObject(); + builder.startObject("input").startObject("search").startObject("request") + .startArray("indices").value("my_test_index").endArray() + .startObject("body").startObject("query").startObject("match_all").endObject().endObject().endObject() + .endObject().endObject().endObject(); + builder.startObject("condition").startObject("compare").startObject("ctx.payload.hits.total").field("gte", 1) + .endObject().endObject().endObject(); + builder.startObject("actions").startObject("logging").startObject("logging") + .field("text", "successfully ran my_watch to test for search inpput").endObject().endObject().endObject(); + builder.endObject(); + + indexWatch("my_watch", builder); + } + + // check history, after watch has fired + ObjectPath objectPath = getWatchHistoryEntry("my_watch", "executed"); + boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); + assertThat(conditionMet, is(true)); + } + + public void testSearchInputWithInsufficientPrivileges() throws Exception { + String indexName = "index_not_allowed_to_read"; + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.startObject("trigger").startObject("schedule").field("interval", "1s").endObject().endObject(); + builder.startObject("input").startObject("search").startObject("request") + .startArray("indices").value(indexName).endArray() + .startObject("body").startObject("query").startObject("match_all").endObject().endObject().endObject() + .endObject().endObject().endObject(); + builder.startObject("condition").startObject("compare").startObject("ctx.payload.hits.total").field("gte", 1) + .endObject().endObject().endObject(); + builder.startObject("actions").startObject("logging").startObject("logging") + .field("text", "this should never be logged").endObject().endObject().endObject(); + builder.endObject(); + + indexWatch("my_watch", builder); + } + + // check history, after watch has fired + ObjectPath objectPath = getWatchHistoryEntry("my_watch"); + String state = objectPath.evaluate("hits.hits.0._source.state"); + assertThat(state, is("execution_not_needed")); + boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); + assertThat(conditionMet, is(false)); + } + + public void testSearchTransformHasPermissions() throws Exception { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.startObject("trigger").startObject("schedule").field("interval", "1s").endObject().endObject(); + builder.startObject("input").startObject("simple").field("foo", "bar").endObject().endObject(); + builder.startObject("transform").startObject("search").startObject("request") + .startArray("indices").value("my_test_index").endArray() + .startObject("body").startObject("query").startObject("match_all").endObject().endObject().endObject() + .endObject().endObject().endObject(); + builder.startObject("actions").startObject("index").startObject("index") + .field("index", "my_test_index") + .field("doc_type", "doc") + .field("doc_id", "my-id") + .endObject().endObject().endObject(); + builder.endObject(); + + indexWatch("my_watch", builder); + } + + // check history, after watch has fired + ObjectPath objectPath = getWatchHistoryEntry("my_watch", "executed"); + boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); + assertThat(conditionMet, is(true)); + + ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest("GET", "my_test_index/doc/my-id")); + String value = getObjectPath.evaluate("_source.hits.hits.0._source.value"); + assertThat(value, is("15")); + } + + public void testSearchTransformInsufficientPermissions() throws Exception { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.startObject("trigger").startObject("schedule").field("interval", "1s").endObject().endObject(); + builder.startObject("input").startObject("simple").field("foo", "bar").endObject().endObject(); + builder.startObject("transform").startObject("search").startObject("request") + .startArray("indices").value("index_not_allowed_to_read").endArray() + .startObject("body").startObject("query").startObject("match_all").endObject().endObject().endObject() + .endObject().endObject().endObject(); + builder.startObject("condition").startObject("compare").startObject("ctx.payload.hits.total").field("gte", 1) + .endObject().endObject().endObject(); + builder.startObject("actions").startObject("index").startObject("index") + .field("index", "my_test_index") + .field("doc_type", "doc") + .field("doc_id", "some-id") + .endObject().endObject().endObject(); + builder.endObject(); + + indexWatch("my_watch", builder); + } + + getWatchHistoryEntry("my_watch"); + + Response response = adminClient().performRequest("GET", "my_test_index/doc/some-id", + Collections.singletonMap("ignore", "404")); + assertThat(response.getStatusLine().getStatusCode(), is(404)); + } + + public void testIndexActionHasPermissions() throws Exception { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.startObject("trigger").startObject("schedule").field("interval", "1s").endObject().endObject(); + builder.startObject("input").startObject("simple").field("spam", "eggs").endObject().endObject(); + builder.startObject("actions").startObject("index").startObject("index") + .field("index", "my_test_index") + .field("doc_type", "doc") + .field("doc_id", "my-id") + .endObject().endObject().endObject(); + builder.endObject(); + + indexWatch("my_watch", builder); + } + + ObjectPath objectPath = getWatchHistoryEntry("my_watch", "executed"); + boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); + assertThat(conditionMet, is(true)); + + ObjectPath getObjectPath = ObjectPath.createFromResponse(client().performRequest("GET", "my_test_index/doc/my-id")); + String spam = getObjectPath.evaluate("_source.spam"); + assertThat(spam, is("eggs")); + } + + public void testIndexActionInsufficientPrivileges() throws Exception { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.startObject("trigger").startObject("schedule").field("interval", "1s").endObject().endObject(); + builder.startObject("input").startObject("simple").field("spam", "eggs").endObject().endObject(); + builder.startObject("actions").startObject("index").startObject("index") + .field("index", "index_not_allowed_to_read") + .field("doc_type", "doc") + .field("doc_id", "my-id") + .endObject().endObject().endObject(); + builder.endObject(); + + indexWatch("my_watch", builder); + } + + ObjectPath objectPath = getWatchHistoryEntry("my_watch", "executed"); + boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); + assertThat(conditionMet, is(true)); + + Response response = adminClient().performRequest("GET", "index_not_allowed_to_read/doc/my-id", + Collections.singletonMap("ignore", "404")); + assertThat(response.getStatusLine().getStatusCode(), is(404)); + } + + private void indexWatch(String watchId, XContentBuilder builder) throws Exception { + StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + + Response response = client().performRequest("PUT", "_xpack/watcher/watch/my_watch", Collections.emptyMap(), entity); + assertOK(response); + Map responseMap = entityAsMap(response); + assertThat(responseMap, hasEntry("_id", watchId)); + } + + private ObjectPath getWatchHistoryEntry(String watchId) throws Exception { + return getWatchHistoryEntry(watchId, null); + } + + private ObjectPath getWatchHistoryEntry(String watchId, String state) throws Exception { + final AtomicReference objectPathReference = new AtomicReference<>(); + assertBusy(() -> { + client().performRequest("POST", ".watcher-history-*/_refresh"); + + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.startObject("query").startObject("bool").startArray("must"); + builder.startObject().startObject("term").startObject("watch_id").field("value", watchId).endObject().endObject() + .endObject(); + if (Strings.isNullOrEmpty(state) == false) { + builder.startObject().startObject("term").startObject("state").field("value", state).endObject().endObject() + .endObject(); + } + builder.endArray().endObject().endObject(); + builder.startArray("sort").startObject().startObject("trigger_event.triggered_time").field("order", "desc").endObject() + .endObject().endArray(); + builder.endObject(); + + StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + Response response = client().performRequest("POST", ".watcher-history-*/_search", Collections.emptyMap(), entity); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + int totalHits = objectPath.evaluate("hits.total"); + assertThat(totalHits, is(greaterThanOrEqualTo(1))); + String watchid = objectPath.evaluate("hits.hits.0._source.watch_id"); + assertThat(watchid, is(watchId)); + objectPathReference.set(objectPath); + } + }); + return objectPathReference.get(); + } +} diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml new file mode 100644 index 0000000000000..0b74ebb0c0058 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/10_insufficient_privs.yml @@ -0,0 +1,12 @@ +--- +"Test watcher is protected by security": + - do: + headers: { es-security-runas-user: powerless_user } + catch: forbidden + xpack.watcher.stats: {} + # there seems to be a bug in the yaml parser we use, where a single element list + # has the END_LIST token skipped...so here we just rerun the same request without + # the impersonation to show it works + - do: + xpack.watcher.stats: {} + - match: { stats.0.watcher_state: started } diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_settings_filter.yml b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_settings_filter.yml new file mode 100644 index 0000000000000..4899c3040d650 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_settings_filter.yml @@ -0,0 +1,13 @@ +--- +"Test watcher settings filter is applied": + - do: + cluster.state: {} + - set: { master_node: master } + + - do: + nodes.info: + metric: settings + - is_true: nodes.$master.settings.xpack.notification.email.account._email.smtp.host + - is_true: nodes.$master.settings.xpack.notification.email.account._email.smtp.port + - is_true: nodes.$master.settings.xpack.notification.email.account._email.smtp.user + - is_false: nodes.$master.settings.xpack.notification.email.account._email.smtp.password diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml new file mode 100644 index 0000000000000..9bc7724b2c0f4 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml @@ -0,0 +1,330 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + + # user watcher_user is allowed to write into this index + - do: + index: + index: my_test_index + type: type + id: 1 + refresh: true + body: > + { + "value" : "15" + } + +--- +teardown: + - do: + xpack.watcher.delete_watch: + id: "my_watch" + ignore: 404 + + + + +--- +"Test watch search input is run as user who added the watch": + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "search" : { + "request" : { + "indices" : [ "my_test_index" ], + "body" :{ + "query" : { "match_all": {} } + } + } + } + }, + "condition" : { + "compare" : { + "ctx.payload.hits.total" : { + "gte" : 1 + } + } + }, + "actions": { + "logging": { + "logging": { + "text": "Successfully ran my_watch to test for search input" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { _id: "my_watch" } + - is_false: watch.status.headers + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + - match: { watch_record.watch_id: "my_watch" } + - match: { watch_record.state: "executed" } + + + + +--- +"Test watch search input does not work against index user is not allowed to read": + + - do: + # by impersonating this request as powerless user we cannot query the my_test_index + # headers: { es-security-runas-user: powerless_user } + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "search" : { + "request" : { + "indices" : [ "index_not_allowed_to_read" ], + "body" :{ + "query" : { "match_all": {} } + } + } + } + }, + "condition" : { + "compare" : { + "ctx.payload.hits.total" : { + "gte" : 1 + } + } + }, + "actions": { + "logging": { + "logging": { + "text": "This message should never occur in the logs as the search above should not have returned any hits" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { _id: "my_watch" } + - is_false: watch.status.headers + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + - match: { watch_record.watch_id: "my_watch" } + # because we are not allowed to read the index, there wont be any data + - match: { watch_record.state: "execution_not_needed" } + + +--- +"Test watch search transform is run as user who added the watch": + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple" : { + "foo" : "bar" + } + }, + "transform" : { + "search" : { + "request" : { + "indices" : [ "my_test_index" ], + "body" :{ + "query" : { "match_all": {} } + } + } + } + }, + "actions": { + "index": { + "index": { + "index" : "my_test_index", + "doc_type" : "type", + "doc_id": "my-id" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + - match: { watch_record.watch_id: "my_watch" } + + - do: + get: + index: my_test_index + type: type + id: my-id + # this value is from the document in the my_text_index index, see the setup + - match: { _source.hits.hits.0._source.value: "15" } + + +--- +"Test watch search transform does not work without permissions": + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple" : { + "foo" : "bar" + } + }, + "transform" : { + "search" : { + "request" : { + "indices" : [ "index_not_allowed_to_read" ], + "body" :{ + "query" : { "match_all": {} } + } + } + } + }, + "actions": { + "index": { + "index": { + "index" : "my_test_index", + "doc_type" : "type", + "doc_id": "my-id" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + - match: { watch_record.watch_id: "my_watch" } + + - do: + get: + index: my_test_index + type: type + id: my-id + - match: { _source.hits.total: 0 } + + + +--- +"Test watch index action requires permission to write to an index": + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple" : { + "foo" : "bar" + } + }, + "actions": { + "index": { + "index": { + "index" : "my_test_index", + "doc_type" : "type", + "doc_id": "my-id" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { _id: "my_watch" } + - is_false: watch.status.headers + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + - match: { watch_record.watch_id: "my_watch" } + - match: { watch_record.state: "executed" } + + - do: + get: + index: my_test_index + type: type + id: 1 + - match: { _id: "1" } + + + +--- +"Test watch index action does not work without permissions": + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input": { + "simple" : { + "foo" : "bar" + } + }, + "actions": { + "index": { + "index": { + "index" : "index_not_allowed_to_read", + "doc_type" : "type", + "doc_id": "my-id" + } + } + } + } + - match: { _id: "my_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { _id: "my_watch" } + - is_false: watch.status.headers + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + - match: { watch_record.watch_id: "my_watch" } + - match: { watch_record.state: "executed" } + + - do: + get: + index: index_not_allowed_to_read + type: type + id: 1 + catch: forbidden + diff --git a/x-pack/qa/smoke-test-watcher/build.gradle b/x-pack/qa/smoke-test-watcher/build.gradle new file mode 100644 index 0000000000000..abfd27e729b6d --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/build.gradle @@ -0,0 +1,22 @@ +import groovy.json.JsonSlurper + +import javax.net.ssl.HttpsURLConnection +import java.nio.charset.StandardCharsets + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('watcher'), configuration: 'runtime') + testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') + testCompile project(path: ':modules:lang-painless', configuration: 'runtime') +} + +integTestCluster { + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' +} diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java new file mode 100644 index 0000000000000..6581de8fa26fb --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { + + private static final String TEST_ADMIN_USERNAME = "test_admin"; + private static final String TEST_ADMIN_PASSWORD = "x-pack-test-password"; + + @Before + public void startWatcher() throws Exception { + assertBusy(() -> { + adminClient().performRequest("POST", "_xpack/watcher/_start"); + + for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { + assertOK(adminClient().performRequest("HEAD", "_template/" + template)); + } + + Response statsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + ObjectPath objectPath = ObjectPath.createFromResponse(statsResponse); + String state = objectPath.evaluate("stats.0.watcher_state"); + assertThat(state, is("started")); + }); + } + + @After + public void stopWatcher() throws Exception { + assertBusy(() -> { + adminClient().performRequest("POST", "_xpack/watcher/_stop", Collections.emptyMap()); + Response statsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + ObjectPath objectPath = ObjectPath.createFromResponse(statsResponse); + String state = objectPath.evaluate("stats.0.watcher_state"); + assertThat(state, is("stopped")); + }); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("watcher_manager", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue(TEST_ADMIN_USERNAME, new SecureString(TEST_ADMIN_PASSWORD.toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + public void testMonitorClusterHealth() throws Exception { + String watchId = "cluster_health_watch"; + + // get master publish address + Response clusterStateResponse = adminClient().performRequest("GET", "_cluster/state"); + ObjectPath clusterState = ObjectPath.createFromResponse(clusterStateResponse); + String masterNode = clusterState.evaluate("master_node"); + assertThat(masterNode, is(notNullValue())); + + Response statsResponse = adminClient().performRequest("GET", "_nodes"); + ObjectPath stats = ObjectPath.createFromResponse(statsResponse); + String address = stats.evaluate("nodes." + masterNode + ".http.publish_address"); + assertThat(address, is(notNullValue())); + String[] splitAddress = address.split(":", 2); + String host = splitAddress[0]; + int port = Integer.valueOf(splitAddress[1]); + + // put watch + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + // trigger + builder.startObject("trigger").startObject("schedule").field("interval", "1s").endObject().endObject(); + // input + builder.startObject("input").startObject("http").startObject("request").field("host", host).field("port", port) + .field("path", "/_cluster/health") + .field("scheme", "http") + .startObject("auth").startObject("basic") + .field("username", TEST_ADMIN_USERNAME).field("password", TEST_ADMIN_PASSWORD) + .endObject().endObject() + .endObject().endObject().endObject(); + // condition + builder.startObject("condition").startObject("compare").startObject("ctx.payload.number_of_data_nodes").field("lt", 10) + .endObject().endObject().endObject(); + // actions + builder.startObject("actions").startObject("log").startObject("logging").field("text", "executed").endObject().endObject() + .endObject(); + + builder.endObject(); + + indexWatch(watchId, builder); + } + + // check watch count + assertWatchCount(1); + + // check watch history + ObjectPath objectPath = getWatchHistoryEntry(watchId); + boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); + assertThat(conditionMet, is(true)); + + deleteWatch(watchId); + assertWatchCount(0); + } + + private void indexWatch(String watchId, XContentBuilder builder) throws Exception { + StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + + Response response = client().performRequest("PUT", "_xpack/watcher/watch/" + watchId, Collections.emptyMap(), entity); + assertOK(response); + Map responseMap = entityAsMap(response); + assertThat(responseMap, hasEntry("_id", watchId)); + } + + private void deleteWatch(String watchId) throws IOException { + Response response = client().performRequest("DELETE", "_xpack/watcher/watch/" + watchId); + assertOK(response); + ObjectPath path = ObjectPath.createFromResponse(response); + boolean found = path.evaluate("found"); + assertThat(found, is(true)); + } + + private ObjectPath getWatchHistoryEntry(String watchId) throws Exception { + final AtomicReference objectPathReference = new AtomicReference<>(); + assertBusy(() -> { + client().performRequest("POST", ".watcher-history-*/_refresh"); + + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.startObject("query").startObject("bool").startArray("must"); + builder.startObject().startObject("term").startObject("watch_id").field("value", watchId).endObject().endObject() + .endObject(); + builder.endArray().endObject().endObject(); + builder.startArray("sort").startObject().startObject("trigger_event.triggered_time").field("order", "desc").endObject() + .endObject().endArray(); + builder.endObject(); + + StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + Response response = client().performRequest("POST", ".watcher-history-*/_search", Collections.emptyMap(), entity); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + int totalHits = objectPath.evaluate("hits.total"); + assertThat(totalHits, is(greaterThanOrEqualTo(1))); + String watchid = objectPath.evaluate("hits.hits.0._source.watch_id"); + assertThat(watchid, is(watchId)); + objectPathReference.set(objectPath); + } + }); + return objectPathReference.get(); + } + + private void assertWatchCount(int expectedWatches) throws IOException { + Response watcherStatsResponse = adminClient().performRequest("GET", "_xpack/watcher/stats"); + ObjectPath objectPath = ObjectPath.createFromResponse(watcherStatsResponse); + int watchCount = objectPath.evaluate("stats.0.watch_count"); + assertThat(watchCount, is(expectedWatches)); + } +} diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java new file mode 100644 index 0000000000000..86df806531144 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.junit.After; +import org.junit.Before; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.is; + +/** Runs rest tests against external cluster */ +public class WatcherRestIT extends ESClientYamlSuiteTestCase { + + public WatcherRestIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Before + public void startWatcher() throws Exception { + assertBusy(() -> { + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + + switch (state) { + case "stopped": + ClientYamlTestResponse startResponse = + getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + boolean isAcknowledged = (boolean) startResponse.evaluate("acknowledged"); + assertThat(isAcknowledged, is(true)); + break; + case "stopping": + throw new AssertionError("waiting until stopping state reached stopped state to start again"); + case "starting": + throw new AssertionError("waiting until starting state reached started state"); + case "started": + // all good here, we are done + break; + default: + throw new AssertionError("unknown state[" + state + "]"); + } + }); + + assertBusy(() -> { + for (String template : WatcherIndexTemplateRegistryField.TEMPLATE_NAMES) { + ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", + singletonMap("name", template), emptyList(), emptyMap()); + assertThat(templateExistsResponse.getStatusCode(), is(200)); + } + }); + } + + @After + public void stopWatcher() throws Exception { + assertBusy(() -> { + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + + switch (state) { + case "stopped": + // all good here, we are done + break; + case "stopping": + throw new AssertionError("waiting until stopping state reached stopped state"); + case "starting": + throw new AssertionError("waiting until starting state reached started state to stop"); + case "started": + ClientYamlTestResponse stopResponse = + getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + boolean isAcknowledged = (boolean) stopResponse.evaluate("acknowledged"); + assertThat(isAcknowledged, is(true)); + break; + default: + throw new AssertionError("unknown state[" + state + "]"); + } + }); + } +} diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherTemplateIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherTemplateIT.java new file mode 100644 index 0000000000000..feeea24871240 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherTemplateIT.java @@ -0,0 +1,169 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.fasterxml.jackson.core.io.JsonStringEncoder; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngine; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.watcher.Watcher; +import org.elasticsearch.xpack.watcher.common.text.TextTemplate; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.junit.Before; + +import java.io.IOException; +import java.io.StringWriter; +import java.io.Writer; +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class WatcherTemplateIT extends ESTestCase { + + private TextTemplateEngine textTemplateEngine; + + @Before + public void init() throws Exception { + MustacheScriptEngine engine = new MustacheScriptEngine(); + Map engines = Collections.singletonMap(engine.getType(), engine); + Map> contexts = + Collections.singletonMap(Watcher.SCRIPT_TEMPLATE_CONTEXT.name, Watcher.SCRIPT_TEMPLATE_CONTEXT); + ScriptService scriptService = new ScriptService(Settings.EMPTY, engines, contexts); + textTemplateEngine = new TextTemplateEngine(Settings.EMPTY, scriptService); + } + + public void testEscaping() throws Exception { + XContentType contentType = randomFrom(XContentType.values()); + if (rarely()) { + contentType = null; + } + Character[] specialChars = new Character[]{'\f', '\n', '\r', '"', '\\', (char) 11, '\t', '\b' }; + int iters = scaledRandomIntBetween(100, 1000); + for (int i = 0; i < iters; i++) { + int rounds = scaledRandomIntBetween(1, 20); + StringWriter escaped = new StringWriter(); //This will be escaped as it is constructed + StringWriter unescaped = new StringWriter(); //This will be escaped at the end + + for (int j = 0; j < rounds; j++) { + String s = getChars(); + unescaped.write(s); + if (contentType == XContentType.JSON) { + escaped.write(JsonStringEncoder.getInstance().quoteAsString(s)); + } else { + escaped.write(s); + } + + char c = randomFrom(specialChars); + unescaped.append(c); + + if (contentType == XContentType.JSON) { + escaped.write(JsonStringEncoder.getInstance().quoteAsString("" + c)); + } else { + escaped.append(c); + } + } + + if (contentType == XContentType.JSON) { + assertThat(escaped.toString(), equalTo(new String(JsonStringEncoder.getInstance().quoteAsString(unescaped.toString())))); + } + else { + assertThat(escaped.toString(), equalTo(unescaped.toString())); + } + + String template = prepareTemplate("{{data}}", contentType); + + Map dataMap = new HashMap<>(); + dataMap.put("data", unescaped.toString()); + String renderedTemplate = textTemplateEngine.render(new TextTemplate(template), dataMap); + assertThat(renderedTemplate, notNullValue()); + + if (contentType == XContentType.JSON) { + if (!escaped.toString().equals(renderedTemplate)) { + String escapedString = escaped.toString(); + for (int l = 0; l < renderedTemplate.length() && l < escapedString.length(); ++l) { + if (renderedTemplate.charAt(l) != escapedString.charAt(l)) { + logger.error("at [{}] expected [{}] but got [{}]", l, renderedTemplate.charAt(l), escapedString.charAt(l)); + } + } + } + assertThat(escaped.toString(), equalTo(renderedTemplate)); + } else { + assertThat(unescaped.toString(), equalTo(renderedTemplate)); + } + } + } + + public void testSimpleParameterReplace() { + { + String template = "__json__::GET _search {\"query\": " + "{\"boosting\": {" + "\"positive\": {\"match\": {\"body\": \"gift\"}}," + + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}" + "}}, \"negative_boost\": {{boost_val}} } }}"; + Map vars = new HashMap<>(); + vars.put("boost_val", "0.3"); + String result = textTemplateEngine.render(new TextTemplate(template), vars); + assertEquals("GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}}," + + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}}}, \"negative_boost\": 0.3 } }}", + result); + } + { + String template = "__json__::GET _search {\"query\": " + "{\"boosting\": {" + "\"positive\": {\"match\": {\"body\": \"gift\"}}," + + "\"negative\": {\"term\": {\"body\": {\"value\": \"{{body_val}}\"}" + "}}, \"negative_boost\": {{boost_val}} } }}"; + Map vars = new HashMap<>(); + vars.put("boost_val", "0.3"); + vars.put("body_val", "\"quick brown\""); + String result = textTemplateEngine.render(new TextTemplate(template), vars); + assertEquals("GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}}," + + "\"negative\": {\"term\": {\"body\": {\"value\": \"\\\"quick brown\\\"\"}}}, \"negative_boost\": 0.3 } }}", + result); + } + } + + public void testInvalidPrefixes() throws Exception { + String[] specialStrings = new String[]{"\f", "\n", "\r", "\"", "\\", "\t", "\b", "__::", "__" }; + String prefix = randomFrom("", "__", "____::", "___::", "____", "::", "++json__::", "__json__", "+_json__::", "__json__:"); + String template = prefix + " {{test_var1}} {{test_var2}}"; + Map vars = new HashMap<>(); + Writer var1Writer = new StringWriter(); + Writer var2Writer = new StringWriter(); + + for(int i = 0; i < scaledRandomIntBetween(10,1000); ++i) { + var1Writer.write(randomRealisticUnicodeOfCodepointLengthBetween(0, 10)); + var2Writer.write(randomRealisticUnicodeOfCodepointLengthBetween(0, 10)); + var1Writer.append(randomFrom(specialStrings)); + var2Writer.append(randomFrom(specialStrings)); + } + + vars.put("test_var1", var1Writer.toString()); + vars.put("test_var2", var2Writer.toString()); + String s1 = textTemplateEngine.render(new TextTemplate(template), vars); + String s2 = prefix + " " + var1Writer.toString() + " " + var2Writer.toString(); + assertThat(s1, equalTo(s2)); + } + + static String getChars() throws IOException { + return randomRealisticUnicodeOfCodepointLengthBetween(0, 10); + } + + static String prepareTemplate(String template, @Nullable XContentType contentType) { + if (contentType == null) { + return template; + } + return new StringBuilder("__") + .append(contentType.shortName().toLowerCase(Locale.ROOT)) + .append("__::") + .append(template) + .toString(); + } + +} diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/10_webhook.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/10_webhook.yml new file mode 100644 index 0000000000000..50ee1f6eafdb9 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/10_webhook.yml @@ -0,0 +1,72 @@ +--- +"Test webhook action with mustache integration": + - do: + cluster.health: + wait_for_status: yellow + + # extract http host and port from master node + - do: + cluster.state: {} + - set: { master_node: master } + + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: { nodes.$master.http.publish_address: host } + + - do: + ingest.simulate: + body: + pipeline: + description: _description + processors: [ grok: { field: host, patterns : ["%{IPORHOST:hostname}:%{NUMBER:port:int}"]} ] + docs: [ { _index: index, _type: type, _id: id, _source: { host: $host } } ] + - set: { docs.0.doc._source.hostname: hostname } + - set: { docs.0.doc._source.port: port } + + - do: + xpack.watcher.put_watch: + id: "test_watch" + body: + trigger: + schedule: + interval: 1s + input: + simple: + key: value + condition: + always: {} + actions: + output: + webhook: + method: PUT + host: $hostname + port: $port + path: "/my_index/my_type/{{ctx.watch_id}}" + body: + source: + watch_id: "{{ctx.watch_id}}" + params: {} + + - match: { _id: "test_watch" } + - match: { created: true } + + - do: + xpack.watcher.execute_watch: + id: test_watch + body: + record_execution: true + action_modes: + _all: execute + + - match: { watch_record.state: "executed" } + - match: { watch_record.result.actions.0.status: "success" } + + - do: + get: + index: my_index + type: my_type + id: test_watch + + - match: { _source: { watch_id: "test_watch" } } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/20_array_access.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/20_array_access.yml new file mode 100644 index 0000000000000..bb52ee7f8d176 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/20_array_access.yml @@ -0,0 +1,42 @@ +--- +"Test array access": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.execute_watch: + body: > + { + "watch": { + "trigger": { + "schedule": { + "interval": "1s" + } + }, + "input": { + "simple" : { + "objects" : [ + { + "field": "value1" + }, + { + "field": "value2" + } + ] + } + }, + "condition": { + "always" : {} + }, + "actions": { + "output": { + "logging" : { + "text" : "{{ctx.payload.objects.0.field}} {{ctx.payload.objects.1.field}}" + } + } + } + } + } + + - match: { watch_record.result.actions.0.logging.logged_text: "value1 value2" } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/25_array_compare.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/25_array_compare.yml new file mode 100644 index 0000000000000..cb5e6d6b1f90c --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/25_array_compare.yml @@ -0,0 +1,90 @@ +--- +"Basic array_compare watch": + + - do: + cluster.health: + wait_for_status: yellow + + - do: + index: + index: test_1 + type: test + id: 1 + body: { level: 0 } + + - do: + index: + index: test_1 + type: test + id: 2 + body: { level: 0 } + + - do: + index: + index: test_1 + type: test + id: 3 + body: { level: 0 } + + - do: + index: + index: test_1 + type: test + id: 4 + body: { level: 1 } + - do: + indices.refresh: {} + + - do: + xpack.watcher.execute_watch: + body: > + { + "watch" : { + "trigger": { + "schedule": { + "interval": "5m" + } + }, + "input": { + "search": { + "request": { + "indices": [ "test_1" ], + "body": { + "aggs": { + "top_levels": { + "terms": { + "field": "level", + "size": 1 + } + } + } + } + } + } + }, + "condition": { + "array_compare": { + "ctx.payload.aggregations.top_levels.buckets": { + "path": "doc_count", + "gte": { + "value": 3, + "quantifier": "some" + } + } + } + }, + "actions": { + "log": { + "logging": { + "text": "executed at {{ctx.execution_time}}" + } + } + } + } + } + + + - match: { "watch_record.result.input.status": "success" } + - match: { "watch_record.result.input.payload.hits.total": 4 } + - match: { "watch_record.result.condition.status": "success" } + - match: { "watch_record.result.condition.met": true } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/30_search_input.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/30_search_input.yml new file mode 100644 index 0000000000000..28e2788fedbbc --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/30_search_input.yml @@ -0,0 +1,165 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + - do: {xpack.watcher.stats:{}} + - do: + index: + index: idx + type: type + id: 1 + body: > + { + "date" : "2015-01-01T00:00:00", + "value" : "val_1" + } + - do: + index: + index: idx + type: type + id: 2 + body: > + { + "date" : "2015-01-02T00:00:00", + "value" : "val_2" + } + - do: + index: + index: idx + type: type + id: 3 + body: > + { + "date" : "2015-01-03T00:00:00", + "value" : "val_3" + } + - do: + index: + index: idx + type: type + id: 4 + body: > + { + "date" : "2015-01-04T00:00:00", + "value" : "val_4" + } + - do: + indices.refresh: + index: idx + +--- +"Test search input mustache integration (using request body)": + - do: + xpack.watcher.execute_watch: + body: > + { + "trigger_data" : { + "scheduled_time" : "2015-01-04T00:00:00" + }, + "watch" : { + "trigger" : { "schedule" : { "interval" : "10s" } }, + "actions" : { + "dummy" : { + "logging" : { + "text" : "executed!" + } + } + }, + "input" : { + "search" : { + "request" : { + "indices" : "idx", + "body" : { + "query" : { + "bool" : { + "filter" : [ + { + "range" : { + "date" : { + "lte" : "{{ctx.trigger.scheduled_time}}", + "gte" : "{{ctx.trigger.scheduled_time}}||-3d" + } + } + } + ] + } + } + } + } + } + } + } + } + - match: { "watch_record.result.input.type": "search" } + - match: { "watch_record.result.input.status": "success" } + - match: { "watch_record.result.input.payload.hits.total": 4 } + # makes sure that the mustache template snippets have been resolved correctly: + - match: { "watch_record.result.input.search.request.body.query.bool.filter.0.range.date.gte": "2015-01-04T00:00:00.000Z||-3d" } + - match: { "watch_record.result.input.search.request.body.query.bool.filter.0.range.date.lte": "2015-01-04T00:00:00.000Z" } + +--- +"Test search input mustache integration (using request template)": + + - do: + put_script: + id: "search-template" + body: { + "script": { + "lang": "mustache", + "source": { + "query" : { + "bool" : { + "must" : [ + { + "term" : { + "value" : "val_{{num}}" + } + } + ] + } + } + } + } + } + - match: { acknowledged: true } + + - do: + xpack.watcher.execute_watch: + body: > + { + "trigger_data" : { + "scheduled_time" : "2015-01-04T00:00:00" + }, + "watch" : { + "trigger" : { "schedule" : { "interval" : "10s" } }, + "actions" : { + "dummy" : { + "logging" : { + "text" : "executed!" + } + } + }, + "input" : { + "search" : { + "request" : { + "indices" : "idx", + "template" : { + "id": "search-template", + "params": { + "num": 2 + } + } + } + } + } + } + } + - match: { "watch_record.result.input.type": "search" } + - match: { "watch_record.result.input.status": "success" } + - match: { "watch_record.result.input.payload.hits.total": 1 } + - match: { "watch_record.result.input.payload.hits.hits.0._id": "2" } + # makes sure that the mustache template snippets have been resolved correctly: + - match: { "watch_record.result.input.search.request.body.query.bool.must.0.term.value": "val_2" } + - match: { "watch_record.result.input.search.request.template.id": "search-template" } + - match: { "watch_record.result.input.search.request.template.params.num": 2 } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/40_search_transform.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/40_search_transform.yml new file mode 100644 index 0000000000000..b4dd70759be71 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/40_search_transform.yml @@ -0,0 +1,146 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + - do: {xpack.watcher.stats:{}} + - do: + index: + index: idx + type: type + id: 1 + body: > + { + "date" : "2015-01-01T00:00:00", + "value" : "val_1" + } + - do: + index: + index: idx + type: type + id: 2 + body: > + { + "date" : "2015-01-02T00:00:00", + "value" : "val_2" + } + - do: + index: + index: idx + type: type + id: 3 + body: > + { + "date" : "2015-01-03T00:00:00", + "value" : "val_3" + } + - do: + index: + index: idx + type: type + id: 4 + body: > + { + "date" : "2015-01-04T00:00:00", + "value" : "val_4" + } + - do: + indices.refresh: + index: idx + +--- +"Test search transform mustache integration (using request body)": + - do: + xpack.watcher.execute_watch: + body: > + { + "trigger_data" : { + "scheduled_time" : "2015-01-04T00:00:00" + }, + "watch" : { + "trigger" : { "schedule" : { "interval" : "10s" } }, + "input" : { "simple" : { "value" : "val_3" } }, + "actions" : { + "dummy" : { + "logging" : { + "text" : "executed!" + } + } + }, + "transform" : { + "search" : { + "request" : { + "indices" : "idx", + "body" : { + "query" : { + "bool" : { + "filter" : [ + { + "range" : { + "date" : { + "lte" : "{{ctx.trigger.scheduled_time}}", + "gte" : "{{ctx.trigger.scheduled_time}}||-1d" + } + } + }, + { + "term" : { + "value" : "{{ctx.payload.value}}" + } + } + ] + } + } + } + } + } + } + } + } + - match: { "watch_record.result.transform.type": "search" } + - match: { "watch_record.result.transform.status": "success" } + - match: { "watch_record.result.transform.payload.hits.total": 1 } + - match: { "watch_record.result.transform.payload.hits.hits.0._id": "3" } + # makes sure that the mustache template snippets have been resolved correctly: + - match: { "watch_record.result.transform.search.request.body.query.bool.filter.0.range.date.gte": "2015-01-04T00:00:00.000Z||-1d" } + - match: { "watch_record.result.transform.search.request.body.query.bool.filter.0.range.date.lte": "2015-01-04T00:00:00.000Z" } + - match: { "watch_record.result.transform.search.request.body.query.bool.filter.1.term.value": "val_3" } + +--- +"Test search transform mustache integration (using request template)": + - do: + xpack.watcher.execute_watch: + body: > + { + "trigger_data" : { + "scheduled_time" : "2015-01-04T00:00:00" + }, + "watch" : { + "trigger" : { "schedule" : { "interval" : "10s" } }, + "input" : { "simple" : { "number" : 2 } }, + "actions" : { + "dummy" : { + "logging" : { + "text" : "executed!" + } + } + }, + "transform" : { + "search" : { + "request" : { + "indices" : "idx", + "template" : { + "source" : "{\"query\": {\"bool\" : { \"must\": [{\"term\": {\"value\": \"val_{{ctx.payload.number}}\"}}]}}}" + } + } + } + } + } + } + - match: { "watch_record.result.transform.type": "search" } + - match: { "watch_record.result.transform.status": "success" } + - match: { "watch_record.result.transform.payload.hits.total": 1 } + - match: { "watch_record.result.transform.payload.hits.hits.0._id": "2" } + # makes sure that the mustache template snippets have been resolved correctly: + - match: { "watch_record.result.transform.search.request.body.query.bool.must.0.term.value": "val_2" } + - match: { "watch_record.result.transform.search.request.template.lang": "mustache" } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml new file mode 100644 index 0000000000000..9952741d4ed74 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/mustache/50_webhook_url_escaping.yml @@ -0,0 +1,83 @@ +--- +"Test url escaping with url mustache function": + - do: + cluster.health: + wait_for_status: yellow + + - do: + index: + index: + type: log + id: 1 + refresh: true + body: { foo: bar } + + - do: {xpack.watcher.stats:{}} + - match: { "stats.0.watcher_state": "started" } + - match: { "stats.0.watch_count": 0 } + + # extract http host and port from master node + - do: + cluster.state: {} + - set: { master_node: master } + + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: { nodes.$master.http.publish_address: host } + + - do: + ingest.simulate: + body: + pipeline: + description: _description + processors: [ grok: { field: host, patterns : ["%{IPORHOST:hostname}:%{NUMBER:port:int}"] } ] + docs: [ { _index: index, _type: type, _id: id, _source: { host: $host } } ] + - set: { docs.0.doc._source.hostname: hostname } + - set: { docs.0.doc._source.port: port } + + - do: + xpack.watcher.put_watch: + id: "test_watch" + body: + metadata: + index: "" + trigger: + schedule: + interval: 1h + input: + http: + request: + host: $hostname + port: $port + path: "/{{#url}}{{ctx.metadata.index}}{{/url}}/_search" + condition: + compare: + "ctx.payload.hits.total": + eq: 1 + actions: + output: + webhook: + method: PUT + host: $hostname + port: $port + path: "/{{#url}}{{ctx.metadata.index}}{{/url}}/log/2" + params: + refresh: "true" + body: "{ \"foo\": \"bar\" }" + + - match: { _id: "test_watch" } + - match: { created: true } + + - do: + xpack.watcher.execute_watch: + id: "test_watch" + + - do: + count: + index: + type: log + + - match: {count : 2} + diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml new file mode 100644 index 0000000000000..5996273435ec6 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/10_basic.yml @@ -0,0 +1,121 @@ +--- +"Test execute watch api": + - do: + cluster.health: + wait_for_status: green + + - do: + xpack.watcher.put_watch: + id: "my_exe_watch" + body: > + { + "trigger" : { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input" : { + "chain" : { + "inputs" : [ + { + "first" : { + "search" : { + "request" : { + "indices" : [ "logstash*" ], + "body" : { + "query" : { + "bool": { + "must" : { + "match": { + "response": 404 + } + }, + "filter": { + "range": { + "@timestamp" : { + "from": "{{ctx.trigger.scheduled_time}}||-5m", + "to": "{{ctx.trigger.triggered_time}}" + } + } + } + } + } + } + } + } + } + }, + { + "second" : { + "transform" : { + "script" : { + "source": "return [ 'hits' : [ 'total' : ctx.payload.first.hits.total ]]" + } + } + } + } + ] + } + }, + "condition" : { + "script" : { + "source" : "ctx.payload.hits.total > 1", + "lang" : "painless" + } + }, + "actions" : { + "email_admin" : { + "transform" : { + "script" : { + "source" : "return ['foo': 'bar']", + "lang" : "painless" + } + }, + "email" : { + "to" : "someone@domain.host.com", + "subject" : "404 recently encountered" + } + } + } + } + - match: { _id: "my_exe_watch" } + + - do: + xpack.watcher.get_watch: + id: "my_exe_watch" + + - match: { _id: "my_exe_watch" } + - match: { watch.actions.email_admin.transform.script.source: "return ['foo': 'bar']" } + - match: { watch.input.chain.inputs.1.second.transform.script.source: "return [ 'hits' : [ 'total' : ctx.payload.first.hits.total ]]" } + + - do: + xpack.watcher.execute_watch: + id: "my_exe_watch" + body: > + { + "trigger_data" : { + "scheduled_time" : "2015-05-05T20:58:02.443Z", + "triggered_time" : "2015-05-05T20:58:02.443Z" + }, + "alternative_input" : { + "foo" : "bar" + }, + "ignore_condition" : true, + "action_modes" : { + "_all" : "force_simulate" + }, + "record_execution" : true + } + - match: { "watch_record.watch_id": "my_exe_watch" } + - match: { "watch_record.state": "executed" } + - match: { "watch_record.trigger_event.type": "manual" } + - match: { "watch_record.trigger_event.triggered_time": "2015-05-05T20:58:02.443Z" } + - match: { "watch_record.trigger_event.manual.schedule.scheduled_time": "2015-05-05T20:58:02.443Z" } + - match: { "watch_record.result.input.type": "simple" } + - match: { "watch_record.result.input.status": "success" } + - match: { "watch_record.result.input.payload.foo": "bar" } + - match: { "watch_record.result.condition.type": "always" } + - match: { "watch_record.result.condition.status": "success" } + - match: { "watch_record.result.condition.met": true } + - match: { "watch_record.result.actions.0.id" : "email_admin" } + - match: { "watch_record.result.actions.0.status" : "simulated" } + - match: { "watch_record.result.actions.0.type" : "email" } + - match: { "watch_record.result.actions.0.email.message.subject" : "404 recently encountered" } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml new file mode 100644 index 0000000000000..f5f0a6cd04b0d --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/20_minimal_body.yml @@ -0,0 +1,51 @@ +--- +"Test execute watch api with minimal body": + - do: + cluster.health: + wait_for_status: green + + - do: + xpack.watcher.put_watch: + id: "my_logging_watch" + body: > + { + "trigger" : { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input" : { + "simple" : { + "count" : 1 + } + }, + "condition" : { + "script" : { + "source" : "ctx.payload.count == 1", + "lang" : "painless" + } + }, + "actions" : { + "logging" : { + "logging" : { + "text" : "foobar" + } + } + } + } + - match: { _id: "my_logging_watch" } + + - do: + xpack.watcher.execute_watch: + id: "my_logging_watch" + + - match: { "watch_record.watch_id": "my_logging_watch" } + - match: { "watch_record.state": "executed" } + - match: { "watch_record.result.input.type": "simple" } + - match: { "watch_record.result.input.status": "success" } + - match: { "watch_record.result.input.payload.count": 1 } + - match: { "watch_record.result.condition.type": "script" } + - match: { "watch_record.result.condition.status": "success" } + - match: { "watch_record.result.condition.met": true } + - match: { "watch_record.result.actions.0.id" : "logging" } + - match: { "watch_record.result.actions.0.type" : "logging" } + - match: { "watch_record.result.actions.0.status" : "success" } + - match: { "watch_record.result.actions.0.logging.logged_text" : "foobar" } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/30_inline_watch.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/30_inline_watch.yml new file mode 100644 index 0000000000000..ce3dd7bb7317c --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/30_inline_watch.yml @@ -0,0 +1,75 @@ +--- +"Test execute watch api with an inline watch": + - do: + cluster.health: + wait_for_status: green + + - do: + xpack.watcher.execute_watch: + body: > + { + "trigger_data" : { + "scheduled_time" : "2015-05-05T20:58:02.443Z", + "triggered_time" : "2015-05-05T20:58:02.443Z" + }, + "alternative_input" : { + "foo" : "bar" + }, + "ignore_condition" : true, + "action_modes" : { + "_all" : "force_simulate" + }, + "watch" : { + "trigger" : { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input" : { + "search" : { + "request" : { + "indices" : [ "logstash*" ], + "body" : { + "query" : { + "bool" : { + "must": { + "match": { + "response": 404 + } + }, + "filter": { + "range": { + "@timestamp" : { + "from": "{{ctx.trigger.scheduled_time}}||-5m", + "to": "{{ctx.trigger.triggered_time}}" + } + } + } + } + } + } + } + } + }, + "condition" : { + "script" : { + "source" : "ctx.payload.hits.total > 1", + "lang" : "painless" + } + }, + "actions" : { + "email_admin" : { + "email" : { + "to" : "someone@domain.host.com", + "subject" : "404 recently encountered" + } + } + } + } + } + - match: { "watch_record.state": "executed" } + - match: { "watch_record.trigger_event.manual.schedule.scheduled_time": "2015-05-05T20:58:02.443Z" } + - match: { "watch_record.result.input.type": "simple" } + - match: { "watch_record.result.input.payload.foo": "bar" } + - match: { "watch_record.result.condition.met": true } + - match: { "watch_record.result.actions.0.id" : "email_admin" } + - match: { "watch_record.result.actions.0.status" : "simulated" } + - match: { "watch_record.result.actions.0.email.message.subject" : "404 recently encountered" } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/40_exception.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/40_exception.yml new file mode 100644 index 0000000000000..a794316e97d9b --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/40_exception.yml @@ -0,0 +1,110 @@ +--- +"Test awesome painless exceptions are returned including the script_stack field": + - do: + cluster.health: + wait_for_status: green + + - do: + catch: request + xpack.watcher.put_watch: + id: "my_exe_watch" + body: > + { + "trigger" : { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input" : { + "simple" : {} + }, + "condition" : { + "script" : { + "source" : "FOO == 1", + "lang" : "painless" + } + }, + "actions" : { + "email_admin" : { + "email" : { + "to" : "someone@domain.host.com", + "subject" : "404 recently encountered" + } + } + } + } + + - is_true: error.script_stack + - match: { status: 500 } + +--- +"Test painless exceptions are returned when logging a broken response": + - do: + cluster.health: + wait_for_status: green + + - do: + xpack.watcher.execute_watch: + body: > + { + "watch" : { + "trigger": { + "schedule": { + "interval": "1d" + } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "actions": { + "my-logging": { + "transform": { + "script": { + "source": "def x = [:] ; def y = [:] ; x.a = y ; y.a = x ; return x" + } + }, + "logging": { + "text": "{{ctx}}" + } + } + } + } + } + + - match: { watch_record.watch_id: "_inlined_" } + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.result.actions.0.status: "failure" } + - match: { watch_record.result.actions.0.error.caused_by.caused_by.type: "illegal_argument_exception" } + - match: { watch_record.result.actions.0.error.caused_by.caused_by.reason: "Iterable object is self-referencing itself" } + + - do: + catch: bad_request + xpack.watcher.execute_watch: + body: > + { + "watch": { + "trigger": { + "schedule": { + "interval": "10s" + } + }, + "input": { + "simple": { + "foo": "bar" + } + }, + "actions": { + "my-logging": { + "transform": { + "script": { + "source": "def x = [:] ; def y = [:] ; x.a = y ; y.a = x ; return x" + } + }, + "logging": { + "text": "{{#join}}ctx.payload{{/join}}" + } + } + } + } + } diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml new file mode 100644 index 0000000000000..89e6602035c2a --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/50_update_scripts.yml @@ -0,0 +1,152 @@ +# When a script is specified in a watch, updates should be taken into account +# See https://github.com/elastic/x-plugins/issues/4237 +--- +"Test transform scripts are updated on execution": + - skip: + features: warnings + + - do: + cluster.health: + wait_for_status: yellow + + - do: + put_script: + id: transform-script + body: > + { + "script":{ + "lang": "painless", + "source":"return [ 'email': 'foo@bar.org' ]" + } + } + + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "interval": "1h" + } + }, + "input": { + "simple": {} + }, + "transform": { + "script": { + "id": "transform-script" + } + }, + "actions": { + "my_log": { + "logging": { + "text": "{{ctx}}" + } + } + } + } + + - match: { _id: "my_watch" } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + + - match: { "watch_record.watch_id": "my_watch" } + - match: { "watch_record.result.transform.payload.email": "foo@bar.org" } + + - do: + put_script: + id: transform-script + body: > + { + "script": + { + "lang": "painless", + "source":"return [ 'email': 'foo@example.org' ]" + } + } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + + - match: { "watch_record.watch_id": "my_watch" } + - match: { "watch_record.result.transform.payload.email": "foo@example.org" } + +--- +"Test condition scripts are updated on execution": + - skip: + features: warnings + + - do: + cluster.health: + wait_for_status: yellow + + - do: + put_script: + id: condition-script + body: > + { + "script": + { + "lang": "painless", + "source": "return false" + } + } + + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "interval": "1h" + } + }, + "input": { + "simple": {} + }, + "condition": { + "script": { + "id": "condition-script" + } + }, + "actions": { + "my_log": { + "logging": { + "text": "{{ctx}}" + } + } + } + } + + - match: { _id: "my_watch" } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + + - match: { "watch_record.watch_id": "my_watch" } + - match: { "watch_record.result.condition.met": false } + + - do: + put_script: + id: condition-script + body: > + { + "script": { + "lang": "painless", + "source": "return true" + } + } + + - do: + xpack.watcher.execute_watch: + id: "my_watch" + + - match: { "watch_record.watch_id": "my_watch" } + - match: { "watch_record.result.condition.met": true } + diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml new file mode 100644 index 0000000000000..08097e0cace17 --- /dev/null +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml @@ -0,0 +1,54 @@ +--- +"Test chained input with transform": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.execute_watch: + body: > + { + "watch" : { + "trigger" : { + "schedule" : { "cron" : "0 0 0 1 * ? 2099" } + }, + "input" : { + "chain" : { + "inputs" : [ + { + "first" : { + "simple" : { "foo" : "bar" } + } + }, + { + "second" : { + "transform": { + "script" : "def value = ctx.payload.first.foo + ' baz' ; return [ 'foo' : value ]" + } + } + } + ] + } + }, + "actions" : { + "index" : { + "index" : { + "index" : "my-index", + "doc_type" : "my-type", + "doc_id" : "my-id" + } + } + } + } + } + - match: { "watch_record.state": "executed" } + - match: { "watch_record.result.input.status": "success" } + + - do: + get: + index: my-index + type: my-type + id: my-id + + - match: { _source.first.foo: "bar" } + - match: { _source.second.foo: "bar baz" } diff --git a/x-pack/qa/sql/build.gradle b/x-pack/qa/sql/build.gradle new file mode 100644 index 0000000000000..18ad4067805a6 --- /dev/null +++ b/x-pack/qa/sql/build.gradle @@ -0,0 +1,120 @@ +import org.elasticsearch.gradle.precommit.PrecommitTasks +import org.elasticsearch.gradle.test.RunTask + +description = 'Integration tests for SQL' +apply plugin: 'elasticsearch.build' +archivesBaseName = 'qa-sql' + +dependencies { + compile "org.elasticsearch.test:framework:${version}" + + // JDBC testing dependencies + compile xpackProject('plugin:sql:jdbc') + compile "net.sourceforge.csvjdbc:csvjdbc:1.0.34" + + // CLI testing dependencies + compile project(path: xpackModule('sql:sql-cli'), configuration: 'nodeps') + compile "org.jline:jline:3.6.0" +} + +/* disable unit tests because these are all integration tests used + * other qa projects. */ +test.enabled = false + +dependencyLicenses.enabled = false + +// the main files are actually test files, so use the appropriate forbidden api sigs +forbiddenApisMain { + signaturesURLs = [PrecommitTasks.getResource('/forbidden/es-all-signatures.txt'), + PrecommitTasks.getResource('/forbidden/es-test-signatures.txt')] +} + +thirdPartyAudit.excludes = [ + // jLine's optional dependencies + 'org.apache.sshd.client.SshClient', + 'org.apache.sshd.client.auth.keyboard.UserInteraction', + 'org.apache.sshd.client.channel.ChannelShell', + 'org.apache.sshd.client.channel.ClientChannel', + 'org.apache.sshd.client.channel.ClientChannelEvent', + 'org.apache.sshd.client.future.AuthFuture', + 'org.apache.sshd.client.future.ConnectFuture', + 'org.apache.sshd.client.future.OpenFuture', + 'org.apache.sshd.client.session.ClientSession', + 'org.apache.sshd.common.Factory', + 'org.apache.sshd.common.channel.PtyMode', + 'org.apache.sshd.common.config.keys.FilePasswordProvider', + 'org.apache.sshd.common.util.io.NoCloseInputStream', + 'org.apache.sshd.common.util.io.NoCloseOutputStream', + 'org.apache.sshd.server.Command', + 'org.apache.sshd.server.Environment', + 'org.apache.sshd.server.ExitCallback', + 'org.apache.sshd.server.SessionAware', + 'org.apache.sshd.server.Signal', + 'org.apache.sshd.server.SshServer', + 'org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider', + 'org.apache.sshd.server.scp.ScpCommandFactory$Builder', + 'org.apache.sshd.server.session.ServerSession', + 'org.apache.sshd.server.subsystem.sftp.SftpSubsystemFactory$Builder', + 'org.fusesource.jansi.Ansi', + 'org.fusesource.jansi.internal.CLibrary$Termios', + 'org.fusesource.jansi.internal.CLibrary$WinSize', + 'org.fusesource.jansi.internal.CLibrary', + 'org.fusesource.jansi.internal.Kernel32$CHAR_INFO', + 'org.fusesource.jansi.internal.Kernel32$CONSOLE_SCREEN_BUFFER_INFO', + 'org.fusesource.jansi.internal.Kernel32$COORD', + 'org.fusesource.jansi.internal.Kernel32$FOCUS_EVENT_RECORD', + 'org.fusesource.jansi.internal.Kernel32$INPUT_RECORD', + 'org.fusesource.jansi.internal.Kernel32$KEY_EVENT_RECORD', + 'org.fusesource.jansi.internal.Kernel32$MOUSE_EVENT_RECORD', + 'org.fusesource.jansi.internal.Kernel32$SMALL_RECT', + 'org.fusesource.jansi.internal.Kernel32', + 'org.fusesource.jansi.internal.WindowsSupport', + 'org.mozilla.universalchardet.UniversalDetector', +] + +subprojects { + apply plugin: 'elasticsearch.standalone-rest-test' + dependencies { + /* Since we're a standalone rest test we actually get transitive + * dependencies but we don't really want them because they cause + * all kinds of trouble with the jar hell checks. So we suppress + * them explicitly for non-es projects. */ + testCompile(xpackProject('qa:sql')) { + transitive = false + } + testCompile "org.elasticsearch.test:framework:${version}" + + // JDBC testing dependencies + testRuntime "net.sourceforge.csvjdbc:csvjdbc:1.0.34" + testRuntime "com.h2database:h2:1.4.197" + testRuntime xpackProject('plugin:sql:jdbc') + + // TODO check if needed + testRuntime("org.antlr:antlr4-runtime:4.5.3") { + transitive = false + } + + // CLI testing dependencies + testRuntime project(path: xpackModule('sql:sql-cli'), configuration: 'nodeps') + testRuntime "org.jline:jline:3.6.0" + } + + if (project.name != 'security') { + // The security project just configures its subprojects + apply plugin: 'elasticsearch.rest-test' + + integTestCluster { + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'script.max_compilations_rate', '1000/1m' + } + + task runqa(type: RunTask) { + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'script.max_compilations_rate', '1000/1m' + } + } +} diff --git a/x-pack/qa/sql/multinode/build.gradle b/x-pack/qa/sql/multinode/build.gradle new file mode 100644 index 0000000000000..ce7616ad1d1ae --- /dev/null +++ b/x-pack/qa/sql/multinode/build.gradle @@ -0,0 +1,13 @@ +description = 'Run a subset of SQL tests against multiple nodes' + +/* + * We try to pick a small subset of the SQL tests so it'll + * run quickly but still exercise the bits of SQL that we + * feel should need to be tested against more than one node. + */ + +integTestCluster { + numNodes = 2 + setting 'xpack.security.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' +} diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliSelectIT.java b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliSelectIT.java new file mode 100644 index 0000000000000..af6f986e1bc46 --- /dev/null +++ b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliSelectIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.cli.SelectTestCase; + +public class CliSelectIT extends SelectTestCase { +} diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliShowIT.java b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliShowIT.java new file mode 100644 index 0000000000000..07e544094d547 --- /dev/null +++ b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/CliShowIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.cli.ShowTestCase; + +public class CliShowIT extends ShowTestCase { +} diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcDatabaseMetaDataIT.java b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcDatabaseMetaDataIT.java new file mode 100644 index 0000000000000..9bb663f190aff --- /dev/null +++ b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcDatabaseMetaDataIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.jdbc.DatabaseMetaDataTestCase; + +public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase { +} diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcErrorsIT.java b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcErrorsIT.java new file mode 100644 index 0000000000000..946a9b6c73165 --- /dev/null +++ b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcErrorsIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.jdbc.ErrorsTestCase; + +public class JdbcErrorsIT extends ErrorsTestCase { +} diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcPreparedStatementIT.java b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcPreparedStatementIT.java new file mode 100644 index 0000000000000..155e9bf161b1c --- /dev/null +++ b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcPreparedStatementIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.jdbc.PreparedStatementTestCase; + +public class JdbcPreparedStatementIT extends PreparedStatementTestCase { +} diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcShowTablesIT.java b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcShowTablesIT.java new file mode 100644 index 0000000000000..7af41a5c9d8ef --- /dev/null +++ b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/JdbcShowTablesIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.jdbc.ShowTablesTestCase; + +public class JdbcShowTablesIT extends ShowTablesTestCase { +} diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlIT.java b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlIT.java new file mode 100644 index 0000000000000..231cee1f343bb --- /dev/null +++ b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlIT.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase; + +/** + * Integration test for the rest sql action. The one that speaks json directly to a + * user rather than to the JDBC driver or CLI. + */ +public class RestSqlIT extends RestSqlTestCase { +} diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java new file mode 100644 index 0000000000000..efd426439e0ab --- /dev/null +++ b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.multinode; + +import org.apache.http.HttpHost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.NotEqualMessageBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.UnsupportedCharsetException; +import java.sql.JDBCType; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.columnInfo; +import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.randomMode; + +/** + * Tests specific to multiple nodes. + */ +public class RestSqlMultinodeIT extends ESRestTestCase { + /** + * Tests count of index run across multiple nodes. + */ + public void testIndexSpread() throws IOException { + int documents = between(10, 100); + createTestData(documents); + assertCount(client(), documents); + } + + /** + * Tests count against index on a node that doesn't have any shards of the index. + */ + public void testIndexOnWrongNode() throws IOException { + HttpHost firstHost = getClusterHosts().get(0); + String firstHostName = null; + + String match = firstHost.getHostName() + ":" + firstHost.getPort(); + Map nodesInfo = responseToMap(client().performRequest("GET", "/_nodes")); + @SuppressWarnings("unchecked") + Map nodes = (Map) nodesInfo.get("nodes"); + for (Map.Entry node : nodes.entrySet()) { + String name = node.getKey(); + Map nodeEntries = (Map) node.getValue(); + Map http = (Map) nodeEntries.get("http"); + List boundAddress = (List) http.get("bound_address"); + if (boundAddress.contains(match)) { + firstHostName = name; + break; + } + } + assertNotNull("Didn't find first host among published addresses", firstHostName); + + XContentBuilder index = JsonXContent.contentBuilder().prettyPrint().startObject(); + index.startObject("settings"); { + index.field("routing.allocation.exclude._name", firstHostName); + } + index.endObject(); + index.endObject(); + client().performRequest("PUT", "/test", emptyMap(), new StringEntity(Strings.toString(index), ContentType.APPLICATION_JSON)); + int documents = between(10, 100); + createTestData(documents); + + try (RestClient firstNodeClient = buildClient(restClientSettings(), new HttpHost[] {firstHost})) { + assertCount(firstNodeClient, documents); + } + } + + private void createTestData(int documents) throws UnsupportedCharsetException, IOException { + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < documents; i++) { + int a = 3 * i; + int b = a + 1; + int c = b + 1; + bulk.append("{\"index\":{\"_id\":\"" + i + "\"}\n"); + bulk.append("{\"a\": " + a + ", \"b\": " + b + ", \"c\": " + c + "}\n"); + } + client().performRequest("PUT", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + } + + private Map responseToMap(Response response) throws IOException { + try (InputStream content = response.getEntity().getContent()) { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + } + } + + private void assertCount(RestClient client, int count) throws IOException { + Map expected = new HashMap<>(); + String mode = randomMode(); + expected.put("columns", singletonList(columnInfo(mode, "COUNT(1)", "long", JDBCType.BIGINT, 20))); + expected.put("rows", singletonList(singletonList(count))); + + Map params = new TreeMap<>(); + params.put("format", "json"); // JSON is easier to parse then a table + if (Strings.hasText(mode)) { + params.put("mode", mode); // JDBC or PLAIN mode + } + + Map actual = responseToMap(client.performRequest("POST", "/_xpack/sql", params, + new StringEntity("{\"query\": \"SELECT COUNT(*) FROM test\"}", ContentType.APPLICATION_JSON))); + + if (false == expected.equals(actual)) { + NotEqualMessageBuilder message = new NotEqualMessageBuilder(); + message.compareMaps(actual, expected); + fail("Response does not match:\n" + message.toString()); + } + } +} diff --git a/x-pack/qa/sql/no-security/build.gradle b/x-pack/qa/sql/no-security/build.gradle new file mode 100644 index 0000000000000..3a8b0ffde0ac5 --- /dev/null +++ b/x-pack/qa/sql/no-security/build.gradle @@ -0,0 +1,9 @@ +integTestCluster { + setting 'xpack.security.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' +} + +runqa { + setting 'xpack.security.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliErrorsIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliErrorsIT.java new file mode 100644 index 0000000000000..be6c003c7460c --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliErrorsIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.cli.ErrorsTestCase; + +public class CliErrorsIT extends ErrorsTestCase { +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java new file mode 100644 index 0000000000000..f913395759c47 --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliExplainIT.java @@ -0,0 +1,160 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.cli.CliIntegrationTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.startsWith; + +public class CliExplainIT extends CliIntegrationTestCase { + public void testExplainBasic() throws IOException { + index("test", body -> body.field("test_field", "test_value")); + + assertThat(command("EXPLAIN (PLAN PARSED) SELECT * FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("With[{}]")); + assertThat(readLine(), startsWith("\\_Project[[?*]]")); + assertThat(readLine(), startsWith(" \\_UnresolvedRelation[[][index=test],null,Unknown index [test]]")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN " + (randomBoolean() ? "" : "(PLAN ANALYZED) ") + "SELECT * FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("Project[[test_field{f}#")); + assertThat(readLine(), startsWith("\\_SubQueryAlias[test]")); + assertThat(readLine(), startsWith(" \\_EsRelation[test][test_field{f}#")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN (PLAN OPTIMIZED) SELECT * FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("Project[[test_field{f}#")); + assertThat(readLine(), startsWith("\\_EsRelation[test][test_field{f}#")); + assertEquals("", readLine()); + + // TODO in this case we should probably remove the source filtering entirely. Right? It costs but we don't need it. + assertThat(command("EXPLAIN (PLAN EXECUTABLE) SELECT * FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("EsQueryExec[test,{")); + assertThat(readLine(), startsWith(" \"_source\" : {")); + assertThat(readLine(), startsWith(" \"includes\" : [")); + assertThat(readLine(), startsWith(" \"test_field\"")); + assertThat(readLine(), startsWith(" ],")); + assertThat(readLine(), startsWith(" \"excludes\" : [ ]")); + assertThat(readLine(), startsWith(" },")); + assertThat(readLine(), startsWith(" \"sort\" : [")); + assertThat(readLine(), startsWith(" {")); + assertThat(readLine(), startsWith(" \"_doc\" :")); + assertThat(readLine(), startsWith(" \"order\" : \"asc\"")); + assertThat(readLine(), startsWith(" }")); + assertThat(readLine(), startsWith(" }")); + assertThat(readLine(), startsWith(" ]")); + assertThat(readLine(), startsWith("}]")); + assertEquals("", readLine()); + } + + public void testExplainWithWhere() throws IOException { + index("test", body -> body.field("test_field", "test_value1").field("i", 1)); + index("test", body -> body.field("test_field", "test_value2").field("i", 2)); + + assertThat(command("EXPLAIN (PLAN PARSED) SELECT * FROM test WHERE i = 2"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("With[{}]")); + assertThat(readLine(), startsWith("\\_Project[[?*]]")); + assertThat(readLine(), startsWith(" \\_Filter[?i = 2]")); + assertThat(readLine(), startsWith(" \\_UnresolvedRelation[[][index=test],null,Unknown index [test]]")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN " + (randomBoolean() ? "" : "(PLAN ANALYZED) ") + "SELECT * FROM test WHERE i = 2"), + containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("Project[[i{f}#")); + assertThat(readLine(), startsWith("\\_Filter[i{f}#")); + assertThat(readLine(), startsWith(" \\_SubQueryAlias[test]")); + assertThat(readLine(), startsWith(" \\_EsRelation[test][i{f}#")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN (PLAN OPTIMIZED) SELECT * FROM test WHERE i = 2"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("Project[[i{f}#")); + assertThat(readLine(), startsWith("\\_Filter[i{f}#")); + assertThat(readLine(), startsWith(" \\_EsRelation[test][i{f}#")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN (PLAN EXECUTABLE) SELECT * FROM test WHERE i = 2"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("EsQueryExec[test,{")); + assertThat(readLine(), startsWith(" \"query\" : {")); + assertThat(readLine(), startsWith(" \"term\" : {")); + assertThat(readLine(), startsWith(" \"i\" : {")); + assertThat(readLine(), startsWith(" \"value\" : 2,")); + assertThat(readLine(), startsWith(" \"boost\" : 1.0")); + assertThat(readLine(), startsWith(" }")); + assertThat(readLine(), startsWith(" }")); + assertThat(readLine(), startsWith(" },")); + assertThat(readLine(), startsWith(" \"_source\" : {")); + assertThat(readLine(), startsWith(" \"includes\" : [")); + assertThat(readLine(), startsWith(" \"test_field\"")); + assertThat(readLine(), startsWith(" ],")); + assertThat(readLine(), startsWith(" \"excludes\" : [ ]")); + assertThat(readLine(), startsWith(" },")); + assertThat(readLine(), startsWith(" \"docvalue_fields\" : [")); + assertThat(readLine(), startsWith(" \"i\"")); + assertThat(readLine(), startsWith(" ],")); + assertThat(readLine(), startsWith(" \"sort\" : [")); + assertThat(readLine(), startsWith(" {")); + assertThat(readLine(), startsWith(" \"_doc\" :")); + assertThat(readLine(), startsWith(" \"order\" : \"asc\"")); + assertThat(readLine(), startsWith(" }")); + assertThat(readLine(), startsWith(" }")); + assertThat(readLine(), startsWith(" ]")); + assertThat(readLine(), startsWith("}]")); + assertEquals("", readLine()); + } + + public void testExplainWithCount() throws IOException { + index("test", body -> body.field("test_field", "test_value1").field("i", 1)); + index("test", body -> body.field("test_field", "test_value2").field("i", 2)); + + assertThat(command("EXPLAIN (PLAN PARSED) SELECT COUNT(*) FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("With[{}]")); + assertThat(readLine(), startsWith("\\_Project[[?COUNT(?*)]]")); + assertThat(readLine(), startsWith(" \\_UnresolvedRelation[[][index=test],null,Unknown index [test]]")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN " + (randomBoolean() ? "" : "(PLAN ANALYZED) ") + "SELECT COUNT(*) FROM test"), + containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("Aggregate[[],[COUNT(1)#")); + assertThat(readLine(), startsWith("\\_SubQueryAlias[test]")); + assertThat(readLine(), startsWith(" \\_EsRelation[test][i{f}#")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN (PLAN OPTIMIZED) SELECT COUNT(*) FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("Aggregate[[],[COUNT(1)#")); + assertThat(readLine(), startsWith("\\_EsRelation[test][i{f}#")); + assertEquals("", readLine()); + + assertThat(command("EXPLAIN (PLAN EXECUTABLE) SELECT COUNT(*) FROM test"), containsString("plan")); + assertThat(readLine(), startsWith("----------")); + assertThat(readLine(), startsWith("EsQueryExec[test,{")); + assertThat(readLine(), startsWith(" \"size\" : 0,")); + assertThat(readLine(), startsWith(" \"_source\" : false,")); + assertThat(readLine(), startsWith(" \"stored_fields\" : \"_none_\",")); + assertThat(readLine(), startsWith(" \"sort\" : [")); + assertThat(readLine(), startsWith(" {")); + assertThat(readLine(), startsWith(" \"_doc\" :")); + assertThat(readLine(), startsWith(" \"order\" : \"asc\"")); + assertThat(readLine(), startsWith(" }")); + assertThat(readLine(), startsWith(" }")); + assertThat(readLine(), startsWith(" ]")); + assertThat(readLine(), startsWith("}]")); + assertEquals("", readLine()); + } +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliFetchSizeIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliFetchSizeIT.java new file mode 100644 index 0000000000000..e4d2ef1a0e2ca --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliFetchSizeIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.cli.FetchSizeTestCase; + +public class CliFetchSizeIT extends FetchSizeTestCase { +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliSelectIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliSelectIT.java new file mode 100644 index 0000000000000..56a112df02135 --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliSelectIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.cli.SelectTestCase; + +public class CliSelectIT extends SelectTestCase { +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliShowIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliShowIT.java new file mode 100644 index 0000000000000..2720b45d2a72e --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/CliShowIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.cli.ShowTestCase; + +public class CliShowIT extends ShowTestCase { +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcConnectionIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcConnectionIT.java new file mode 100644 index 0000000000000..e75cf6d059dfe --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcConnectionIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.ConnectionTestCase; + +public class JdbcConnectionIT extends ConnectionTestCase { +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcCsvSpecIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcCsvSpecIT.java new file mode 100644 index 0000000000000..a245e6c85ef24 --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcCsvSpecIT.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.CsvSpecTestCase; +import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; + +public class JdbcCsvSpecIT extends CsvSpecTestCase { + public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDatabaseMetaDataIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDatabaseMetaDataIT.java new file mode 100644 index 0000000000000..f653049b9a1ae --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDatabaseMetaDataIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.DatabaseMetaDataTestCase; + +public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase { +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcErrorsIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcErrorsIT.java new file mode 100644 index 0000000000000..21a52b609bbb3 --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcErrorsIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.ErrorsTestCase; + +public class JdbcErrorsIT extends ErrorsTestCase { +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcFetchSizeIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcFetchSizeIT.java new file mode 100644 index 0000000000000..b64290957c088 --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcFetchSizeIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.FetchSizeTestCase; + +public class JdbcFetchSizeIT extends FetchSizeTestCase { +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcPreparedStatementIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcPreparedStatementIT.java new file mode 100644 index 0000000000000..0d711c2798608 --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcPreparedStatementIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.PreparedStatementTestCase; + +public class JdbcPreparedStatementIT extends PreparedStatementTestCase { +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcShowTablesIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcShowTablesIT.java new file mode 100644 index 0000000000000..f68b022f6adae --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcShowTablesIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.ShowTablesTestCase; + +public class JdbcShowTablesIT extends ShowTablesTestCase { +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSimpleExampleIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSimpleExampleIT.java new file mode 100644 index 0000000000000..08539667cf92d --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSimpleExampleIT.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.SimpleExampleTestCase; + +public class JdbcSimpleExampleIT extends SimpleExampleTestCase { +} diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSqlSpecIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSqlSpecIT.java new file mode 100644 index 0000000000000..fb658270729d3 --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcSqlSpecIT.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase; + +public class JdbcSqlSpecIT extends SqlSpecTestCase { + public JdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber, query); + } +} \ No newline at end of file diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/RestSqlIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/RestSqlIT.java new file mode 100644 index 0000000000000..e22c8fb085210 --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/RestSqlIT.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase; + +/** + * Integration test for the rest sql action. The one that speaks json directly to a + * user rather than to the JDBC driver or CLI. + */ +public class RestSqlIT extends RestSqlTestCase { +} diff --git a/x-pack/qa/sql/security/build.gradle b/x-pack/qa/sql/security/build.gradle new file mode 100644 index 0000000000000..35434b60c1774 --- /dev/null +++ b/x-pack/qa/sql/security/build.gradle @@ -0,0 +1,55 @@ +dependencies { + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" +} + +Project mainProject = project + +subprojects { + // Use resources from the parent project in subprojects + sourceSets { + test { + java { + srcDirs = ["${mainProject.projectDir}/src/test/java"] + } + resources { + srcDirs = ["${mainProject.projectDir}/src/test/resources"] + } + } + } + + dependencies { + testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + } + + integTestCluster { + // Setup auditing so we can use it in some tests + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.security.audit.outputs', 'logfile' + setting 'xpack.security.enabled', 'true' + // Setup roles used by tests + extraConfigFile 'roles.yml', '../roles.yml' + /* Setup the one admin user that we run the tests as. + * Tests use "run as" to get different users. */ + setupCommand 'setupUser#test_admin', + 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' + // Subprojects override the wait condition to work properly with security + } + + integTestRunner { + systemProperty 'tests.audit.logfile', + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_access.log" + } + + runqa { + // Setup auditing so we can use it in some tests + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.security.audit.outputs', 'logfile' + setting 'xpack.security.enabled', 'true' + // Setup roles used by tests + extraConfigFile 'roles.yml', '../roles.yml' + /* Setup the one admin user that we run the tests as. + * Tests use "run as" to get different users. */ + setupCommand 'setupUser#test_admin', + 'bin/elasticsearch-users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser' + } +} diff --git a/x-pack/qa/sql/security/no-ssl/build.gradle b/x-pack/qa/sql/security/no-ssl/build.gradle new file mode 100644 index 0000000000000..ac748527da371 --- /dev/null +++ b/x-pack/qa/sql/security/no-ssl/build.gradle @@ -0,0 +1,17 @@ +integTestRunner { + systemProperty 'tests.ssl.enabled', 'false' +} + +integTestCluster { + setting 'xpack.license.self_generated.type', 'trial' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/x-pack/qa/sql/security/roles.yml b/x-pack/qa/sql/security/roles.yml new file mode 100644 index 0000000000000..1759c972d34bf --- /dev/null +++ b/x-pack/qa/sql/security/roles.yml @@ -0,0 +1,87 @@ +# tag::rest +rest_minimal: + indices: + - names: test + privileges: [read, "indices:admin/get"] + - names: bort + privileges: [read, "indices:admin/get"] +# end::rest + +# tag::cli_jdbc +cli_or_jdbc_minimal: + cluster: + - "cluster:monitor/main" + indices: + - names: test + privileges: [read, "indices:admin/get"] + - names: bort + privileges: [read, "indices:admin/get"] +# end::cli_jdbc + +read_something_else: + cluster: + - "cluster:monitor/main" + indices: + - names: something_that_isnt_test + privileges: [read, "indices:admin/get"] + +read_test_a: + cluster: + - "cluster:monitor/main" + indices: + - names: test + privileges: [read, "indices:admin/get"] + field_security: + grant: [a] + +read_test_a_and_b: + cluster: + - "cluster:monitor/main" + indices: + - names: test + privileges: [read, "indices:admin/get"] + field_security: + grant: ["*"] + except: [c] + +read_test_without_c_3: + cluster: + - "cluster:monitor/main" + indices: + - names: test + privileges: [read, "indices:admin/get"] + query: | + { + "bool": { + "must_not": [ + { + "match": { + "c": 3 + } + } + ] + } + } + +read_bort: + cluster: + - "cluster:monitor/main" + indices: + - names: bort + privileges: [read, "indices:admin/get"] + +no_monitor_main: + indices: + - names: test + privileges: [read, "indices:admin/get"] + - names: bort + privileges: [read, "indices:admin/get"] + +no_get_index: + cluster: + - "cluster:monitor/main" + indices: + - names: test + privileges: [read] + - names: bort + privileges: [read] diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliErrorsIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliErrorsIT.java new file mode 100644 index 0000000000000..92d9608a527f1 --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliErrorsIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.cli.ErrorsTestCase; +import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli.SecurityConfig; + +public class CliErrorsIT extends ErrorsTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected SecurityConfig securityConfig() { + return CliSecurityIT.adminSecurityConfig(); + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliFetchSizeIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliFetchSizeIT.java new file mode 100644 index 0000000000000..c8ca7db71d1e2 --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliFetchSizeIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.cli.FetchSizeTestCase; +import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli.SecurityConfig; + +public class CliFetchSizeIT extends FetchSizeTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected SecurityConfig securityConfig() { + return CliSecurityIT.adminSecurityConfig(); + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSecurityIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSecurityIT.java new file mode 100644 index 0000000000000..5e8aa4ec6ad8b --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSecurityIT.java @@ -0,0 +1,202 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.xpack.qa.sql.cli.ErrorsTestCase; +import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli; +import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli.SecurityConfig; +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.qa.sql.cli.CliIntegrationTestCase.elasticsearchAddress; +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.startsWith; + +public class CliSecurityIT extends SqlSecurityTestCase { + static SecurityConfig adminSecurityConfig() { + String keystoreLocation; + String keystorePassword; + if (RestSqlIT.SSL_ENABLED) { + Path keyStore; + try { + keyStore = PathUtils.get(RestSqlIT.class.getResource("/test-node.jks").toURI()); + } catch (URISyntaxException e) { + throw new RuntimeException("exception while reading the store", e); + } + if (!Files.exists(keyStore)) { + throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist."); + } + keystoreLocation = keyStore.toAbsolutePath().toString(); + keystorePassword = "keypass"; + } else { + keystoreLocation = null; + keystorePassword = null; + } + return new SecurityConfig(RestSqlIT.SSL_ENABLED, "test_admin", "x-pack-test-password", keystoreLocation, keystorePassword); + } + + /** + * Perform security test actions using the CLI. + */ + private static class CliActions implements Actions { + @Override + public String minimalPermissionsForAllActions() { + return "cli_or_jdbc_minimal"; + } + + private SecurityConfig userSecurity(String user) { + SecurityConfig admin = adminSecurityConfig(); + if (user == null) { + return admin; + } + return new SecurityConfig(RestSqlIT.SSL_ENABLED, user, "testpass", admin.keystoreLocation(), admin.keystorePassword()); + } + + @Override + public void queryWorksAsAdmin() throws Exception { + try (EmbeddedCli cli = new EmbeddedCli(elasticsearchAddress(), true, adminSecurityConfig())) { + assertThat(cli.command("SELECT * FROM test ORDER BY a"), containsString("a | b | c")); + assertEquals("---------------+---------------+---------------", cli.readLine()); + assertThat(cli.readLine(), containsString("1 |2 |3")); + assertThat(cli.readLine(), containsString("4 |5 |6")); + assertEquals("", cli.readLine()); + } + } + + @Override + public void expectMatchesAdmin(String adminSql, String user, String userSql) throws Exception { + expectMatchesAdmin(adminSql, user, userSql, cli -> {}); + } + + @Override + public void expectScrollMatchesAdmin(String adminSql, String user, String userSql) throws Exception { + expectMatchesAdmin(adminSql, user, userSql, cli -> { + assertEquals("[?1l>[?1000l[?2004lfetch size set to [90m1[0m", cli.command("fetch size = 1")); + assertEquals("[?1l>[?1000l[?2004lfetch separator set to \"[90m -- fetch sep -- [0m\"", + cli.command("fetch separator = \" -- fetch sep -- \"")); + }); + } + + public void expectMatchesAdmin(String adminSql, String user, String userSql, + CheckedConsumer customizer) throws Exception { + List adminResult = new ArrayList<>(); + try (EmbeddedCli cli = new EmbeddedCli(elasticsearchAddress(), true, adminSecurityConfig())) { + customizer.accept(cli); + adminResult.add(cli.command(adminSql)); + String line; + do { + line = cli.readLine(); + adminResult.add(line); + } while (false == (line.equals("[0m") || line.equals(""))); + adminResult.add(line); + } + + Iterator expected = adminResult.iterator(); + try (EmbeddedCli cli = new EmbeddedCli(elasticsearchAddress(), true, userSecurity(user))) { + customizer.accept(cli); + assertTrue(expected.hasNext()); + assertEquals(expected.next(), cli.command(userSql)); + String line; + do { + line = cli.readLine(); + assertTrue(expected.hasNext()); + assertEquals(expected.next(), line); + } while (false == (line.equals("[0m") || line.equals(""))); + assertTrue(expected.hasNext()); + assertEquals(expected.next(), line); + assertFalse(expected.hasNext()); + } + } + + @Override + public void expectDescribe(Map columns, String user) throws Exception { + try (EmbeddedCli cli = new EmbeddedCli(elasticsearchAddress(), true, userSecurity(user))) { + assertThat(cli.command("DESCRIBE test"), containsString("column | type")); + assertEquals("---------------+---------------", cli.readLine()); + for (Map.Entry column : columns.entrySet()) { + assertThat(cli.readLine(), both(startsWith(column.getKey())).and(containsString("|" + column.getValue()))); + } + assertEquals("", cli.readLine()); + } + } + + @Override + public void expectShowTables(List tables, String user) throws Exception { + try (EmbeddedCli cli = new EmbeddedCli(elasticsearchAddress(), true, userSecurity(user))) { + String tablesOutput = cli.command("SHOW TABLES"); + assertThat(tablesOutput, containsString("name")); + assertThat(tablesOutput, containsString("type")); + assertEquals("---------------+---------------", cli.readLine()); + for (String table : tables) { + String line = null; + /* + * Security automatically creates either a `.security` or a + * `.security6` index but it might not have created the index + * by the time the test runs. + */ + while (line == null || line.startsWith(".security")) { + line = cli.readLine(); + } + assertThat(line, containsString(table)); + } + assertEquals("", cli.readLine()); + } + } + + @Override + public void expectUnknownIndex(String user, String sql) throws Exception { + try (EmbeddedCli cli = new EmbeddedCli(elasticsearchAddress(), true, userSecurity(user))) { + ErrorsTestCase.assertFoundOneProblem(cli.command(sql)); + assertThat(cli.readLine(), containsString("Unknown index")); + } + } + + @Override + public void expectForbidden(String user, String sql) throws Exception { + /* + * Cause the CLI to skip its connection test on startup so we + * can get a forbidden exception when we run the query. + */ + try (EmbeddedCli cli = new EmbeddedCli(elasticsearchAddress(), false, userSecurity(user))) { + assertThat(cli.command(sql), containsString("is unauthorized for user [" + user + "]")); + } + } + + @Override + public void expectUnknownColumn(String user, String sql, String column) throws Exception { + try (EmbeddedCli cli = new EmbeddedCli(elasticsearchAddress(), true, userSecurity(user))) { + ErrorsTestCase.assertFoundOneProblem(cli.command(sql)); + assertThat(cli.readLine(), containsString("Unknown column [" + column + "]" + ErrorsTestCase.END)); + } + } + + @Override + public void checkNoMonitorMain(String user) throws Exception { + // Building the cli will attempt the connection and run the assertion + @SuppressWarnings("resource") // forceClose will close it + EmbeddedCli cli = new EmbeddedCli(elasticsearchAddress(), true, userSecurity(user)) { + @Override + protected void assertConnectionTest() throws IOException { + assertThat(readLine(), containsString("action [cluster:monitor/main] is unauthorized for user [" + user + "]")); + } + }; + cli.forceClose(); + } + } + + public CliSecurityIT() { + super(new CliActions()); + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSelectIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSelectIT.java new file mode 100644 index 0000000000000..596fd1e723644 --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliSelectIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli.SecurityConfig; +import org.elasticsearch.xpack.qa.sql.cli.SelectTestCase; + +public class CliSelectIT extends SelectTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected SecurityConfig securityConfig() { + return CliSecurityIT.adminSecurityConfig(); + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliShowIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliShowIT.java new file mode 100644 index 0000000000000..c05dbcc3d1369 --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/CliShowIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli.SecurityConfig; +import org.elasticsearch.xpack.qa.sql.cli.ShowTestCase; + +public class CliShowIT extends ShowTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected SecurityConfig securityConfig() { + return CliSecurityIT.adminSecurityConfig(); + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcConnectionIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcConnectionIT.java new file mode 100644 index 0000000000000..08aa73f68b9ea --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcConnectionIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.ConnectionTestCase; + +import java.util.Properties; + +public class JdbcConnectionIT extends ConnectionTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties properties = super.connectionProperties(); + properties.putAll(JdbcSecurityIT.adminProperties()); + return properties; + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcCsvSpecIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcCsvSpecIT.java new file mode 100644 index 0000000000000..e5fdf0baf452c --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcCsvSpecIT.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.CsvSpecTestCase; +import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; + +import java.util.Properties; + +public class JdbcCsvSpecIT extends CsvSpecTestCase { + public JdbcCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } + + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties sp = super.connectionProperties(); + sp.putAll(JdbcSecurityIT.adminProperties()); + return sp; + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcDatabaseMetaDataIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcDatabaseMetaDataIT.java new file mode 100644 index 0000000000000..f4aafe4090b95 --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcDatabaseMetaDataIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.DatabaseMetaDataTestCase; + +import java.util.Properties; + +public class JdbcDatabaseMetaDataIT extends DatabaseMetaDataTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties properties = super.connectionProperties(); + properties.putAll(JdbcSecurityIT.adminProperties()); + return properties; + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcErrorsIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcErrorsIT.java new file mode 100644 index 0000000000000..2ed8ac7941ff0 --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcErrorsIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.ErrorsTestCase; + +import java.util.Properties; + +public class JdbcErrorsIT extends ErrorsTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties properties = super.connectionProperties(); + properties.putAll(JdbcSecurityIT.adminProperties()); + return properties; + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcFetchSizeIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcFetchSizeIT.java new file mode 100644 index 0000000000000..ac239193e9938 --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcFetchSizeIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.FetchSizeTestCase; + +import java.util.Properties; + +public class JdbcFetchSizeIT extends FetchSizeTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties properties = super.connectionProperties(); + properties.putAll(JdbcSecurityIT.adminProperties()); + return properties; + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcPreparedStatementIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcPreparedStatementIT.java new file mode 100644 index 0000000000000..3ecb0d388c204 --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcPreparedStatementIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.PreparedStatementTestCase; + +import java.util.Properties; + +public class JdbcPreparedStatementIT extends PreparedStatementTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties sp = super.connectionProperties(); + sp.putAll(JdbcSecurityIT.adminProperties()); + return sp; + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSecurityIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSecurityIT.java new file mode 100644 index 0000000000000..48b850d0acf21 --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSecurityIT.java @@ -0,0 +1,344 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.xpack.qa.sql.jdbc.LocalH2; + +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLInvalidAuthorizationSpecException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert.assertResultSets; +import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcIntegrationTestCase.elasticsearchAddress; +import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcIntegrationTestCase.randomKnownTimeZone; +import static org.elasticsearch.xpack.qa.sql.security.RestSqlIT.SSL_ENABLED; +import static org.hamcrest.Matchers.containsString; + +public class JdbcSecurityIT extends SqlSecurityTestCase { + static Properties adminProperties() { + // tag::admin_properties + Properties properties = new Properties(); + properties.put("user", "test_admin"); + properties.put("password", "x-pack-test-password"); + // end::admin_properties + addSslPropertiesIfNeeded(properties); + return properties; + } + + static Connection es(Properties properties) throws SQLException { + Properties props = new Properties(); + props.put("timezone", randomKnownTimeZone()); + props.putAll(properties); + String scheme = SSL_ENABLED ? "https" : "http"; + return DriverManager.getConnection("jdbc:es://" + scheme + "://" + elasticsearchAddress(), props); + } + + static Properties userProperties(String user) { + if (user == null) { + return adminProperties(); + } + Properties prop = new Properties(); + prop.put("user", user); + prop.put("password", "testpass"); + addSslPropertiesIfNeeded(prop); + return prop; + } + + private static void addSslPropertiesIfNeeded(Properties properties) { + if (false == SSL_ENABLED) { + return; + } + Path keyStore; + try { + keyStore = PathUtils.get(RestSqlIT.class.getResource("/test-node.jks").toURI()); + } catch (URISyntaxException e) { + throw new RuntimeException("exception while reading the store", e); + } + if (!Files.exists(keyStore)) { + throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist."); + } + String keyStoreStr = keyStore.toAbsolutePath().toString(); + + properties.put("ssl", "true"); + properties.put("ssl.keystore.location", keyStoreStr); + properties.put("ssl.keystore.pass", "keypass"); + properties.put("ssl.truststore.location", keyStoreStr); + properties.put("ssl.truststore.pass", "keypass"); + } + + static void expectActionMatchesAdmin(CheckedFunction adminAction, + String user, CheckedFunction userAction) throws Exception { + try (Connection adminConnection = es(adminProperties()); + Connection userConnection = es(userProperties(user))) { + assertResultSets(adminAction.apply(adminConnection), userAction.apply(userConnection)); + } + } + + static void expectForbidden(String user, CheckedConsumer action) throws Exception { + expectError(user, action, "is unauthorized for user [" + user + "]"); + } + + static void expectUnknownIndex(String user, CheckedConsumer action) throws Exception { + expectError(user, action, "Unknown index"); + } + + static void expectError(String user, CheckedConsumer action, String errorMessage) throws Exception { + SQLException e; + try (Connection connection = es(userProperties(user))) { + e = expectThrows(SQLException.class, () -> action.accept(connection)); + } + assertThat(e.getMessage(), containsString(errorMessage)); + } + + static void expectActionThrowsUnknownColumn(String user, + CheckedConsumer action, String column) throws Exception { + SQLException e; + try (Connection connection = es(userProperties(user))) { + e = expectThrows(SQLException.class, () -> action.accept(connection)); + } + assertThat(e.getMessage(), containsString("Unknown column [" + column + "]")); + } + + private static class JdbcActions implements Actions { + @Override + public String minimalPermissionsForAllActions() { + return "cli_or_jdbc_minimal"; + } + + @Override + public void queryWorksAsAdmin() throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = es(adminProperties())) { + h2.createStatement().executeUpdate("CREATE TABLE test (a BIGINT, b BIGINT, c BIGINT)"); + h2.createStatement().executeUpdate("INSERT INTO test (a, b, c) VALUES (1, 2, 3), (4, 5, 6)"); + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM test ORDER BY a"); + assertResultSets(expected, es.createStatement().executeQuery("SELECT * FROM test ORDER BY a")); + } + } + + @Override + public void expectMatchesAdmin(String adminSql, String user, String userSql) throws Exception { + expectActionMatchesAdmin( + con -> con.createStatement().executeQuery(adminSql), + user, + con -> con.createStatement().executeQuery(userSql)); + } + + @Override + public void expectScrollMatchesAdmin(String adminSql, String user, String userSql) throws Exception { + expectActionMatchesAdmin( + con -> { + Statement st = con.createStatement(); + st.setFetchSize(1); + return st.executeQuery(adminSql); + }, + user, + con -> { + Statement st = con.createStatement(); + st.setFetchSize(1); + return st.executeQuery(userSql); + }); + } + + @Override + public void expectDescribe(Map columns, String user) throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = es(userProperties(user))) { + // h2 doesn't have the same sort of DESCRIBE that we have so we emulate it + h2.createStatement().executeUpdate("CREATE TABLE mock (column VARCHAR, type VARCHAR)"); + if (columns.size() > 0) { + StringBuilder insert = new StringBuilder(); + insert.append("INSERT INTO mock (column, type) VALUES "); + boolean first = true; + for (Map.Entry column : columns.entrySet()) { + if (first) { + first = false; + } else { + insert.append(", "); + } + insert.append("('").append(column.getKey()).append("', '").append(column.getValue()).append("')"); + } + h2.createStatement().executeUpdate(insert.toString()); + } + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock"); + assertResultSets(expected, es.createStatement().executeQuery("DESCRIBE test")); + } + } + + @Override + public void expectShowTables(List tables, String user) throws Exception { + try (Connection es = es(userProperties(user))) { + ResultSet actual = es.createStatement().executeQuery("SHOW TABLES"); + + /* + * Security automatically creates either a `.security` or a + * `.security6` index but it might not have created the index + * by the time the test runs. + */ + List actualList = new ArrayList<>(); + + while (actual.next()) { + String name = actual.getString("name"); + if (!name.startsWith(".security")) { + actualList.add(name); + } + } + + assertEquals(tables, actualList); + } + } + + @Override + public void expectForbidden(String user, String sql) throws Exception { + JdbcSecurityIT.expectForbidden(user, con -> con.createStatement().executeQuery(sql)); + } + + @Override + public void expectUnknownIndex(String user, String sql) throws Exception { + JdbcSecurityIT.expectUnknownIndex(user, con -> con.createStatement().executeQuery(sql)); + } + + @Override + public void expectUnknownColumn(String user, String sql, String column) throws Exception { + expectActionThrowsUnknownColumn( + user, + con -> con.createStatement().executeQuery(sql), + column); + } + + @Override + public void checkNoMonitorMain(String user) throws Exception { + // Most SQL actually works fine without monitor/main + expectMatchesAdmin("SELECT * FROM test", user, "SELECT * FROM test"); + expectMatchesAdmin("SHOW TABLES LIKE 'test'", user, "SHOW TABLES LIKE 'test'"); + expectMatchesAdmin("DESCRIBE test", user, "DESCRIBE test"); + + // But there are a few things that don't work + try (Connection es = es(userProperties(user))) { + expectUnauthorized("cluster:monitor/main", user, () -> es.getMetaData().getDatabaseMajorVersion()); + expectUnauthorized("cluster:monitor/main", user, () -> es.getMetaData().getDatabaseMinorVersion()); + } + } + + private void expectUnauthorized(String action, String user, ThrowingRunnable r) { + SQLInvalidAuthorizationSpecException e = expectThrows(SQLInvalidAuthorizationSpecException.class, r); + assertEquals("action [" + action + "] is unauthorized for user [" + user + "]", e.getMessage()); + } + } + + public JdbcSecurityIT() { + super(new JdbcActions()); + } + + // Metadata methods only available to JDBC + public void testMetaDataGetTablesWithFullAccess() throws Exception { + createUser("full_access", "cli_or_jdbc_minimal"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getTables("%", "%", "%t", null), + "full_access", + con -> con.getMetaData().getTables("%", "%", "%", null)); + } + + public void testMetaDataGetTablesWithNoAccess() throws Exception { + createUser("no_access", "read_nothing"); + + expectForbidden("no_access", con -> con.getMetaData().getTables("%", "%", "%", null)); + } + + public void testMetaDataGetTablesWithLimitedAccess() throws Exception { + createUser("read_bort", "read_bort"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getTables("%", "%", "bort", null), + "read_bort", + con -> con.getMetaData().getTables("%", "%", "%", null)); + } + + public void testMetaDataGetTablesWithInAccessibleIndex() throws Exception { + createUser("read_bort", "read_bort"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getTables("%", "%", "not_created", null), + "read_bort", + con -> con.getMetaData().getTables("%", "%", "test", null)); + } + + public void testMetaDataGetColumnsWorksAsFullAccess() throws Exception { + createUser("full_access", "cli_or_jdbc_minimal"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getColumns(null, "%", "%t", "%"), + "full_access", + con -> con.getMetaData().getColumns(null, "%", "%", "%")); + } + + public void testMetaDataGetColumnsWithNoAccess() throws Exception { + createUser("no_access", "read_nothing"); + + expectForbidden("no_access", con -> con.getMetaData().getColumns("%", "%", "%", "%")); + } + + public void testMetaDataGetColumnsWithWrongAccess() throws Exception { + createUser("wrong_access", "read_something_else"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getColumns(null, "%", "not_created", "%"), + "wrong_access", + con -> con.getMetaData().getColumns(null, "%", "test", "%")); + } + + public void testMetaDataGetColumnsSingleFieldGranted() throws Exception { + createUser("only_a", "read_test_a"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getColumns(null, "%", "test", "a"), + "only_a", + con -> con.getMetaData().getColumns(null, "%", "test", "%")); + } + + public void testMetaDataGetColumnsSingleFieldExcepted() throws Exception { + createUser("not_c", "read_test_a_and_b"); + + /* Since there is no easy way to get a result from the admin side with + * both 'a' and 'b' we'll have to roll our own assertion here, but we + * are intentionally much less restrictive then the tests elsewhere. */ + try (Connection con = es(userProperties("not_c"))) { + ResultSet result = con.getMetaData().getColumns(null, "%", "test", "%"); + assertTrue(result.next()); + String columnName = result.getString(4); + assertEquals("a", columnName); + assertTrue(result.next()); + columnName = result.getString(4); + assertEquals("b", columnName); + assertFalse(result.next()); + } + } + + public void testMetaDataGetColumnsDocumentExcluded() throws Exception { + createUser("no_3s", "read_test_without_c_3"); + + expectActionMatchesAdmin( + con -> con.getMetaData().getColumns(null, "%", "test", "%"), + "no_3s", + con -> con.getMetaData().getColumns(null, "%", "test", "%")); + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcShowTablesIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcShowTablesIT.java new file mode 100644 index 0000000000000..ab76b3f33a132 --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcShowTablesIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.ShowTablesTestCase; + +import java.util.Properties; + +public class JdbcShowTablesIT extends ShowTablesTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties sp = super.connectionProperties(); + sp.putAll(JdbcSecurityIT.adminProperties()); + return sp; + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSimpleExampleIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSimpleExampleIT.java new file mode 100644 index 0000000000000..b01fe72333b64 --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSimpleExampleIT.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.SimpleExampleTestCase; + +import java.util.Properties; + +public class JdbcSimpleExampleIT extends SimpleExampleTestCase { + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties properties = super.connectionProperties(); + properties.putAll(JdbcSecurityIT.adminProperties()); + return properties; + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java new file mode 100644 index 0000000000000..609847f513e3a --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/JdbcSqlSpecIT.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase; + +import java.util.Properties; + +public class JdbcSqlSpecIT extends SqlSpecTestCase { + public JdbcSqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber, query); + } + + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + @Override + protected Properties connectionProperties() { + Properties sp = super.connectionProperties(); + sp.putAll(JdbcSecurityIT.adminProperties()); + return sp; + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlIT.java new file mode 100644 index 0000000000000..e5408e48dac02 --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlIT.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * Integration test for the rest sql action. The one that speaks json directly to a + * user rather than to the JDBC driver or CLI. + */ +public class RestSqlIT extends RestSqlTestCase { + static final boolean SSL_ENABLED = Booleans.parseBoolean(System.getProperty("tests.ssl.enabled")); + + static Settings securitySettings() { + String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray())); + Settings.Builder builder = Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token); + if (SSL_ENABLED) { + Path keyStore; + try { + keyStore = PathUtils.get(RestSqlIT.class.getResource("/test-node.jks").toURI()); + } catch (URISyntaxException e) { + throw new RuntimeException("exception while reading the store", e); + } + if (!Files.exists(keyStore)) { + throw new IllegalStateException("Keystore file [" + keyStore + "] does not exist."); + } + builder.put(ESRestTestCase.TRUSTSTORE_PATH, keyStore) + .put(ESRestTestCase.TRUSTSTORE_PASSWORD, "keypass"); + } + return builder.build(); + } + + @Override + protected Settings restClientSettings() { + return securitySettings(); + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java new file mode 100644 index 0000000000000..6ac1c2c11ea9b --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java @@ -0,0 +1,267 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.NotEqualMessageBuilder; +import org.elasticsearch.xpack.qa.sql.security.SqlSecurityTestCase.AuditLogAsserter; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.io.InputStream; +import java.sql.JDBCType; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.columnInfo; +import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.randomMode; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; + +public class RestSqlSecurityIT extends SqlSecurityTestCase { + private static class RestActions implements Actions { + @Override + public String minimalPermissionsForAllActions() { + return "rest_minimal"; + } + + @Override + public void queryWorksAsAdmin() throws Exception { + String mode = randomMode(); + Map expected = new HashMap<>(); + expected.put("columns", Arrays.asList( + columnInfo(mode, "a", "long", JDBCType.BIGINT, 20), + columnInfo(mode, "b", "long", JDBCType.BIGINT, 20), + columnInfo(mode, "c", "long", JDBCType.BIGINT, 20))); + expected.put("rows", Arrays.asList( + Arrays.asList(1, 2, 3), + Arrays.asList(4, 5, 6))); + + assertResponse(expected, runSql(null, mode, "SELECT * FROM test ORDER BY a")); + } + + @Override + public void expectMatchesAdmin(String adminSql, String user, String userSql) throws Exception { + String mode = randomMode(); + assertResponse(runSql(null, mode, adminSql), runSql(user, mode, userSql)); + } + + @Override + public void expectScrollMatchesAdmin(String adminSql, String user, String userSql) throws Exception { + String mode = randomMode(); + Map adminResponse = runSql(null, mode, + new StringEntity("{\"query\": \"" + adminSql + "\", \"fetch_size\": 1}", ContentType.APPLICATION_JSON)); + Map otherResponse = runSql(user, mode, + new StringEntity("{\"query\": \"" + adminSql + "\", \"fetch_size\": 1}", ContentType.APPLICATION_JSON)); + + String adminCursor = (String) adminResponse.remove("cursor"); + String otherCursor = (String) otherResponse.remove("cursor"); + assertNotNull(adminCursor); + assertNotNull(otherCursor); + assertResponse(adminResponse, otherResponse); + while (true) { + adminResponse = runSql(null, mode, + new StringEntity("{\"cursor\": \"" + adminCursor + "\"}", ContentType.APPLICATION_JSON)); + otherResponse = runSql(user, mode, + new StringEntity("{\"cursor\": \"" + otherCursor + "\"}", ContentType.APPLICATION_JSON)); + adminCursor = (String) adminResponse.remove("cursor"); + otherCursor = (String) otherResponse.remove("cursor"); + assertResponse(adminResponse, otherResponse); + if (adminCursor == null) { + assertNull(otherCursor); + return; + } + assertNotNull(otherCursor); + } + } + + @Override + public void expectDescribe(Map columns, String user) throws Exception { + String mode = randomMode(); + Map expected = new HashMap<>(3); + expected.put("columns", Arrays.asList( + columnInfo(mode, "column", "keyword", JDBCType.VARCHAR, 0), + columnInfo(mode, "type", "keyword", JDBCType.VARCHAR, 0))); + List> rows = new ArrayList<>(columns.size()); + for (Map.Entry column : columns.entrySet()) { + rows.add(Arrays.asList(column.getKey(), column.getValue())); + } + expected.put("rows", rows); + + assertResponse(expected, runSql(user, mode, "DESCRIBE test")); + } + + @Override + public void expectShowTables(List tables, String user) throws Exception { + String mode = randomMode(); + List columns = new ArrayList<>(); + columns.add(columnInfo(mode, "name", "keyword", JDBCType.VARCHAR, 0)); + columns.add(columnInfo(mode, "type", "keyword", JDBCType.VARCHAR, 0)); + Map expected = new HashMap<>(); + expected.put("columns", columns); + List> rows = new ArrayList<>(); + for (String table : tables) { + List fields = new ArrayList<>(); + fields.add(table); + fields.add("BASE TABLE"); + rows.add(fields); + } + expected.put("rows", rows); + + Map actual = runSql(user, mode, "SHOW TABLES"); + /* + * Security automatically creates either a `.security` or a + * `.security6` index but it might not have created the index + * by the time the test runs. + */ + @SuppressWarnings("unchecked") + List> rowsNoSecurity = ((List>) actual.get("rows")) + .stream() + .filter(ls -> ls.get(0).startsWith(".security") == false) + .collect(Collectors.toList()); + actual.put("rows", rowsNoSecurity); + assertResponse(expected, actual); + } + + @Override + public void expectForbidden(String user, String sql) { + ResponseException e = expectThrows(ResponseException.class, () -> runSql(user, randomMode(), sql)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(403)); + assertThat(e.getMessage(), containsString("unauthorized")); + } + + @Override + public void expectUnknownIndex(String user, String sql) { + ResponseException e = expectThrows(ResponseException.class, () -> runSql(user, randomMode(), sql)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(e.getMessage(), containsString("Unknown index")); + } + + @Override + public void expectUnknownColumn(String user, String sql, String column) throws Exception { + ResponseException e = expectThrows(ResponseException.class, () -> runSql(user, randomMode(), sql)); + assertThat(e.getMessage(), containsString("Unknown column [" + column + "]")); + } + + @Override + public void checkNoMonitorMain(String user) throws Exception { + // Without monitor/main everything should work just fine + expectMatchesAdmin("SELECT * FROM test", user, "SELECT * FROM test"); + expectMatchesAdmin("SHOW TABLES LIKE 'test'", user, "SHOW TABLES LIKE 'test'"); + expectMatchesAdmin("DESCRIBE test", user, "DESCRIBE test"); + } + + private static Map runSql(@Nullable String asUser, String mode, String sql) throws IOException { + return runSql(asUser, mode, new StringEntity("{\"query\": \"" + sql + "\"}", ContentType.APPLICATION_JSON)); + } + + private static Map runSql(@Nullable String asUser, String mode, HttpEntity entity) throws IOException { + Map params = new TreeMap<>(); + params.put("format", "json"); // JSON is easier to parse then a table + if (Strings.hasText(mode)) { + params.put("mode", mode); // JDBC or PLAIN mode + } + Header[] headers = asUser == null ? new Header[0] : new Header[] {new BasicHeader("es-security-runas-user", asUser)}; + Response response = client().performRequest("POST", "/_xpack/sql", params, entity, headers); + return toMap(response); + } + + private static void assertResponse(Map expected, Map actual) { + if (false == expected.equals(actual)) { + NotEqualMessageBuilder message = new NotEqualMessageBuilder(); + message.compareMaps(actual, expected); + fail("Response does not match:\n" + message.toString()); + } + } + + private static Map toMap(Response response) throws IOException { + try (InputStream content = response.getEntity().getContent()) { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + } + } + } + + public RestSqlSecurityIT() { + super(new RestActions()); + } + + @Override + protected AuditLogAsserter createAuditLogAsserter() { + return new RestAuditLogAsserter(); + } + + /** + * Test the hijacking a scroll fails. This test is only implemented for + * REST because it is the only API where it is simple to hijack a scroll. + * It should excercise the same code as the other APIs but if we were truly + * paranoid we'd hack together something to test the others as well. + */ + public void testHijackScrollFails() throws Exception { + createUser("full_access", "rest_minimal"); + + Map adminResponse = RestActions.runSql(null, randomMode(), + new StringEntity("{\"query\": \"SELECT * FROM test\", \"fetch_size\": 1}", ContentType.APPLICATION_JSON)); + + String cursor = (String) adminResponse.remove("cursor"); + assertNotNull(cursor); + + ResponseException e = expectThrows(ResponseException.class, () -> RestActions.runSql("full_access", randomMode(), + new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON))); + // TODO return a better error message for bad scrolls + assertThat(e.getMessage(), containsString("No search context found for id")); + assertEquals(404, e.getResponse().getStatusLine().getStatusCode()); + + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expect(true, SQL_ACTION_NAME, "full_access", empty()) + // One scroll access denied per shard + .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") + .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") + .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") + .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") + .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") + .assertLogs(); + } + + protected class RestAuditLogAsserter extends AuditLogAsserter { + @Override + public AuditLogAsserter expect(String eventType, String action, String principal, String realm, + Matcher> indicesMatcher, String request) { + final Matcher runByPrincipalMatcher = principal.equals("test_admin") ? Matchers.nullValue(String.class) + : Matchers.is("test_admin"); + final Matcher runByRealmMatcher = realm.equals("default_file") ? Matchers.nullValue(String.class) + : Matchers.is("default_file"); + logCheckers.add( + m -> eventType.equals(m.get("event_type")) + && action.equals(m.get("action")) + && principal.equals(m.get("principal")) + && realm.equals(m.get("realm")) + && runByPrincipalMatcher.matches(m.get("run_by_principal")) + && runByRealmMatcher.matches(m.get("run_by_realm")) + && indicesMatcher.matches(m.get("indices")) + && request.equals(m.get("request"))); + return this; + } + + } +} diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java new file mode 100644 index 0000000000000..205cd479dde1b --- /dev/null +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java @@ -0,0 +1,645 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.security; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.lucene.util.SuppressForbidden; +import org.elasticsearch.SpecialPermission; +import org.elasticsearch.action.admin.indices.get.GetIndexAction; +import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; +import org.junit.AfterClass; +import org.junit.Before; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.regex.Pattern; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasItems; + +public abstract class SqlSecurityTestCase extends ESRestTestCase { + /** + * Actions taken by this test. + *

    + * For methods that take {@code user} a {@code null} user means "use the admin". + */ + protected interface Actions { + String minimalPermissionsForAllActions(); + void queryWorksAsAdmin() throws Exception; + /** + * Assert that running some sql as a user returns the same result as running it as + * the administrator. + */ + void expectMatchesAdmin(String adminSql, String user, String userSql) throws Exception; + /** + * Same as {@link #expectMatchesAdmin(String, String, String)} but sets the scroll size + * to 1 and completely scrolls the results. + */ + void expectScrollMatchesAdmin(String adminSql, String user, String userSql) throws Exception; + void expectDescribe(Map columns, String user) throws Exception; + void expectShowTables(List tables, String user) throws Exception; + void expectForbidden(String user, String sql) throws Exception; + void expectUnknownIndex(String user, String sql) throws Exception; + void expectUnknownColumn(String user, String sql, String column) throws Exception; + void checkNoMonitorMain(String user) throws Exception; + } + + protected static final String SQL_ACTION_NAME = "indices:data/read/sql"; + /** + * Location of the audit log file. We could technically figure this out by reading the admin + * APIs but it isn't worth doing because we also have to give ourselves permission to read + * the file and that must be done by setting a system property and reading it in + * {@code plugin-security.policy}. So we may as well have gradle set the property. + */ + private static final Path AUDIT_LOG_FILE = lookupAuditLog(); + + @SuppressForbidden(reason="security doesn't work with mock filesystem") + private static Path lookupAuditLog() { + String auditLogFileString = System.getProperty("tests.audit.logfile"); + if (null == auditLogFileString) { + throw new IllegalStateException("tests.audit.logfile must be set to run this test. It is automatically " + + "set by gradle. If you must set it yourself then it should be the absolute path to the audit " + + "log file generated by running x-pack with audit logging enabled."); + } + return Paths.get(auditLogFileString); + } + + private static boolean oneTimeSetup = false; + private static boolean auditFailure = false; + + /** + * The actions taken by this test. + */ + private final Actions actions; + + /** + * How much of the audit log was written before the test started. + */ + private long auditLogWrittenBeforeTestStart; + + public SqlSecurityTestCase(Actions actions) { + this.actions = actions; + } + + /** + * All tests run as a an administrative user but use + * es-security-runas-user to become a less privileged user when needed. + */ + @Override + protected Settings restClientSettings() { + return RestSqlIT.securitySettings(); + } + + @Override + protected boolean preserveIndicesUponCompletion() { + /* We can't wipe the cluster between tests because that nukes the audit + * trail index which makes the auditing flaky. Instead we wipe all + * indices after the entire class is finished. */ + return true; + } + + @Before + public void oneTimeSetup() throws Exception { + if (oneTimeSetup) { + /* Since we don't wipe the cluster between tests we only need to + * write the test data once. */ + return; + } + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_index\": \"test\", \"_type\": \"doc\", \"_id\":\"1\"}\n"); + bulk.append("{\"a\": 1, \"b\": 2, \"c\": 3}\n"); + bulk.append("{\"index\":{\"_index\": \"test\", \"_type\": \"doc\", \"_id\":\"2\"}\n"); + bulk.append("{\"a\": 4, \"b\": 5, \"c\": 6}\n"); + bulk.append("{\"index\":{\"_index\": \"bort\", \"_type\": \"doc\", \"_id\":\"1\"}\n"); + bulk.append("{\"a\": \"test\"}\n"); + client().performRequest("PUT", "/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + oneTimeSetup = true; + } + + @Before + public void setInitialAuditLogOffset() { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new SpecialPermission()); + } + AccessController.doPrivileged((PrivilegedAction) () -> { + if (false == Files.exists(AUDIT_LOG_FILE)) { + auditLogWrittenBeforeTestStart = 0; + return null; + } + if (false == Files.isRegularFile(AUDIT_LOG_FILE)) { + throw new IllegalStateException("expected tests.audit.logfile [" + AUDIT_LOG_FILE + "]to be a plain file but wasn't"); + } + try { + auditLogWrittenBeforeTestStart = Files.size(AUDIT_LOG_FILE); + } catch (IOException e) { + throw new RuntimeException(e); + } + return null; + }); + } + + @AfterClass + public static void wipeIndicesAfterTests() throws IOException { + try { + adminClient().performRequest("DELETE", "*"); + } catch (ResponseException e) { + // 404 here just means we had no indexes + if (e.getResponse().getStatusLine().getStatusCode() != 404) { + throw e; + } + } finally { + // Clear the static state so other subclasses can reuse it later + oneTimeSetup = false; + auditFailure = false; + } + } + + @Override + protected String getProtocol() { + return RestSqlIT.SSL_ENABLED ? "https" : "http"; + } + + public void testQueryWorksAsAdmin() throws Exception { + actions.queryWorksAsAdmin(); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .assertLogs(); + } + + public void testQueryWithFullAccess() throws Exception { + createUser("full_access", actions.minimalPermissionsForAllActions()); + + actions.expectMatchesAdmin("SELECT * FROM test ORDER BY a", "full_access", "SELECT * FROM test ORDER BY a"); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expectSqlCompositeAction("full_access", "test") + .assertLogs(); + } + + public void testScrollWithFullAccess() throws Exception { + createUser("full_access", actions.minimalPermissionsForAllActions()); + + actions.expectScrollMatchesAdmin("SELECT * FROM test ORDER BY a", "full_access", "SELECT * FROM test ORDER BY a"); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + /* Scrolling doesn't have to access the index again, at least not through sql. + * If we asserted query and scroll logs then we would see the scroll. */ + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expectSqlCompositeAction("full_access", "test") + .expect(true, SQL_ACTION_NAME, "full_access", empty()) + .expect(true, SQL_ACTION_NAME, "full_access", empty()) + .assertLogs(); + } + + public void testQueryNoAccess() throws Exception { + createUser("no_access", "read_nothing"); + + actions.expectForbidden("no_access", "SELECT * FROM test"); + createAuditLogAsserter() + .expect(false, SQL_ACTION_NAME, "no_access", empty()) + .assertLogs(); + } + + public void testQueryWrongAccess() throws Exception { + createUser("wrong_access", "read_something_else"); + + actions.expectUnknownIndex("wrong_access", "SELECT * FROM test"); + createAuditLogAsserter() + //This user has permission to run sql queries so they are given preliminary authorization + .expect(true, SQL_ACTION_NAME, "wrong_access", empty()) + //the following get index is granted too but against the no indices placeholder, as ignore_unavailable=true + .expect(true, GetIndexAction.NAME, "wrong_access", hasItems("*", "-*")) + .assertLogs(); + } + + public void testQuerySingleFieldGranted() throws Exception { + createUser("only_a", "read_test_a"); + + actions.expectMatchesAdmin("SELECT a FROM test ORDER BY a", "only_a", "SELECT * FROM test ORDER BY a"); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expectSqlCompositeAction("only_a", "test") + .assertLogs(); + } + + public void testScrollWithSingleFieldGranted() throws Exception { + createUser("only_a", "read_test_a"); + + actions.expectScrollMatchesAdmin("SELECT a FROM test ORDER BY a", "only_a", "SELECT * FROM test ORDER BY a"); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + /* Scrolling doesn't have to access the index again, at least not through sql. + * If we asserted query and scroll logs then we would see the scoll. */ + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expectSqlCompositeAction("only_a", "test") + .expect(true, SQL_ACTION_NAME, "only_a", empty()) + .expect(true, SQL_ACTION_NAME, "only_a", empty()) + .assertLogs(); + } + + public void testQueryStringSingeFieldGrantedWrongRequested() throws Exception { + createUser("only_a", "read_test_a"); + + actions.expectUnknownColumn("only_a", "SELECT c FROM test", "c"); + /* The user has permission to query the index but one of the + * columns that they explicitly mention is hidden from them + * by field level access control. This *looks* like a successful + * query from the audit side because all the permissions checked + * out but it failed in SQL because it couldn't compile the + * query without the metadata for the missing field. */ + createAuditLogAsserter() + .expectSqlCompositeAction("only_a", "test") + .assertLogs(); + } + + public void testQuerySingleFieldExcepted() throws Exception { + createUser("not_c", "read_test_a_and_b"); + + actions.expectMatchesAdmin("SELECT a, b FROM test ORDER BY a", "not_c", "SELECT * FROM test ORDER BY a"); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expectSqlCompositeAction("not_c", "test") + .assertLogs(); + } + + public void testScrollWithSingleFieldExcepted() throws Exception { + createUser("not_c", "read_test_a_and_b"); + + actions.expectScrollMatchesAdmin("SELECT a, b FROM test ORDER BY a", "not_c", "SELECT * FROM test ORDER BY a"); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + /* Scrolling doesn't have to access the index again, at least not through sql. + * If we asserted query and scroll logs then we would see the scroll. */ + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expectSqlCompositeAction("not_c", "test") + .expect(true, SQL_ACTION_NAME, "not_c", empty()) + .expect(true, SQL_ACTION_NAME, "not_c", empty()) + .assertLogs(); + } + + public void testQuerySingleFieldExceptionedWrongRequested() throws Exception { + createUser("not_c", "read_test_a_and_b"); + + actions.expectUnknownColumn("not_c", "SELECT c FROM test", "c"); + /* The user has permission to query the index but one of the + * columns that they explicitly mention is hidden from them + * by field level access control. This *looks* like a successful + * query from the audit side because all the permissions checked + * out but it failed in SQL because it couldn't compile the + * query without the metadata for the missing field. */ + createAuditLogAsserter() + .expectSqlCompositeAction("not_c", "test") + .assertLogs(); + } + + public void testQueryDocumentExcluded() throws Exception { + createUser("no_3s", "read_test_without_c_3"); + + actions.expectMatchesAdmin("SELECT * FROM test WHERE c != 3 ORDER BY a", "no_3s", "SELECT * FROM test ORDER BY a"); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expectSqlCompositeAction("no_3s", "test") + .assertLogs(); + } + + public void testShowTablesWorksAsAdmin() throws Exception { + actions.expectShowTables(Arrays.asList("bort", "test"), null); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "bort", "test") + .assertLogs(); + } + + public void testShowTablesWorksAsFullAccess() throws Exception { + createUser("full_access", actions.minimalPermissionsForAllActions()); + + actions.expectMatchesAdmin("SHOW TABLES LIKE '%t'", "full_access", "SHOW TABLES"); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "bort", "test") + .expectSqlCompositeAction("full_access", "bort", "test") + .assertLogs(); + } + + public void testShowTablesWithNoAccess() throws Exception { + createUser("no_access", "read_nothing"); + + actions.expectForbidden("no_access", "SHOW TABLES"); + createAuditLogAsserter() + .expect(false, SQL_ACTION_NAME, "no_access", empty()) + .assertLogs(); + } + + public void testShowTablesWithLimitedAccess() throws Exception { + createUser("read_bort", "read_bort"); + + actions.expectMatchesAdmin("SHOW TABLES LIKE 'bort'", "read_bort", "SHOW TABLES"); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "bort") + .expectSqlCompositeAction("read_bort", "bort") + .assertLogs(); + } + + public void testShowTablesWithLimitedAccessUnaccessableIndex() throws Exception { + createUser("read_bort", "read_bort"); + + actions.expectMatchesAdmin("SHOW TABLES LIKE 'not-created'", "read_bort", "SHOW TABLES LIKE 'test'"); + createAuditLogAsserter() + .expect(true, SQL_ACTION_NAME, "test_admin", empty()) + .expect(true, GetIndexAction.NAME, "test_admin", contains("*", "-*")) + .expect(true, SQL_ACTION_NAME, "read_bort", empty()) + .expect(true, GetIndexAction.NAME, "read_bort", contains("*", "-*")) + .assertLogs(); + } + + public void testDescribeWorksAsAdmin() throws Exception { + Map expected = new TreeMap<>(); + expected.put("a", "BIGINT"); + expected.put("b", "BIGINT"); + expected.put("c", "BIGINT"); + actions.expectDescribe(expected, null); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .assertLogs(); + } + + public void testDescribeWorksAsFullAccess() throws Exception { + createUser("full_access", actions.minimalPermissionsForAllActions()); + + actions.expectMatchesAdmin("DESCRIBE test", "full_access", "DESCRIBE test"); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expectSqlCompositeAction("full_access", "test") + .assertLogs(); + } + + public void testDescribeWithNoAccess() throws Exception { + createUser("no_access", "read_nothing"); + + actions.expectForbidden("no_access", "DESCRIBE test"); + createAuditLogAsserter() + .expect(false, SQL_ACTION_NAME, "no_access", empty()) + .assertLogs(); + } + + public void testDescribeWithWrongAccess() throws Exception { + createUser("wrong_access", "read_something_else"); + + actions.expectDescribe(Collections.emptyMap(), "wrong_access"); + createAuditLogAsserter() + //This user has permission to run sql queries so they are given preliminary authorization + .expect(true, SQL_ACTION_NAME, "wrong_access", empty()) + //the following get index is granted too but against the no indices placeholder, as ignore_unavailable=true + .expect(true, GetIndexAction.NAME, "wrong_access", hasItems("*", "-*")) + .assertLogs(); + } + + public void testDescribeSingleFieldGranted() throws Exception { + createUser("only_a", "read_test_a"); + + actions.expectDescribe(singletonMap("a", "BIGINT"), "only_a"); + createAuditLogAsserter() + .expectSqlCompositeAction("only_a", "test") + .assertLogs(); + } + + public void testDescribeSingleFieldExcepted() throws Exception { + createUser("not_c", "read_test_a_and_b"); + + Map expected = new TreeMap<>(); + expected.put("a", "BIGINT"); + expected.put("b", "BIGINT"); + actions.expectDescribe(expected, "not_c"); + createAuditLogAsserter() + .expectSqlCompositeAction("not_c", "test") + .assertLogs(); + } + + public void testDescribeDocumentExcluded() throws Exception { + createUser("no_3s", "read_test_without_c_3"); + + actions.expectMatchesAdmin("DESCRIBE test", "no_3s", "DESCRIBE test"); + createAuditLogAsserter() + .expectSqlCompositeAction("test_admin", "test") + .expectSqlCompositeAction("no_3s", "test") + .assertLogs(); + } + + public void testNoMonitorMain() throws Exception { + createUser("no_monitor_main", "no_monitor_main"); + actions.checkNoMonitorMain("no_monitor_main"); + } + + public void testNoGetIndex() throws Exception { + createUser("no_get_index", "no_get_index"); + + actions.expectForbidden("no_get_index", "SELECT * FROM test"); + actions.expectForbidden("no_get_index", "SHOW TABLES LIKE 'test'"); + actions.expectForbidden("no_get_index", "DESCRIBE test"); + } + + protected static void createUser(String name, String role) throws IOException { + XContentBuilder user = JsonXContent.contentBuilder().prettyPrint().startObject(); { + user.field("password", "testpass"); + user.field("roles", role); + } + user.endObject(); + client().performRequest("PUT", "/_xpack/security/user/" + name, emptyMap(), + new StringEntity(Strings.toString(user), ContentType.APPLICATION_JSON)); + } + + protected AuditLogAsserter createAuditLogAsserter() { + return new AuditLogAsserter(); + } + + /** + * Used to assert audit logs. Logs are asserted to match in any order because + * we don't always scroll in the same order but each log checker must match a + * single log and all logs must be matched. + */ + protected class AuditLogAsserter { + protected final List, Boolean>> logCheckers = new ArrayList<>(); + + public AuditLogAsserter expectSqlCompositeAction(String user, String... indices) { + expect(true, SQL_ACTION_NAME, user, empty()); + expect(true, GetIndexAction.NAME, user, hasItems(indices)); + return this; + } + + public AuditLogAsserter expect(boolean granted, String action, String principal, + Matcher> indicesMatcher) { + String request; + switch (action) { + case SQL_ACTION_NAME: + request = "SqlQueryRequest"; + break; + case GetIndexAction.NAME: + request = GetIndexRequest.class.getSimpleName(); + break; + default: + throw new IllegalArgumentException("Unknown action [" + action + "]"); + } + final String eventType = granted ? "access_granted" : "access_denied"; + final String realm = principal.equals("test_admin") ? "default_file" : "default_native"; + return expect(eventType, action, principal, realm, indicesMatcher, request); + } + + public AuditLogAsserter expect(String eventType, String action, String principal, String realm, + Matcher> indicesMatcher, String request) { + logCheckers.add(m -> eventType.equals(m.get("event_type")) + && action.equals(m.get("action")) + && principal.equals(m.get("principal")) + && realm.equals(m.get("realm")) + && Matchers.nullValue(String.class).matches(m.get("run_by_principal")) + && Matchers.nullValue(String.class).matches(m.get("run_by_realm")) + && indicesMatcher.matches(m.get("indices")) + && request.equals(m.get("request")) + ); + return this; + } + + public void assertLogs() throws Exception { + assertFalse("Previous test had an audit-related failure. All subsequent audit related assertions are bogus because we can't " + + "guarantee that we fully cleaned up after the last test.", auditFailure); + try { + assertBusy(() -> { + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPermission(new SpecialPermission()); + } + BufferedReader logReader = AccessController.doPrivileged((PrivilegedAction) () -> { + try { + return Files.newBufferedReader(AUDIT_LOG_FILE, StandardCharsets.UTF_8); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + logReader.skip(auditLogWrittenBeforeTestStart); + + List> logs = new ArrayList<>(); + String line; + Pattern logPattern = Pattern.compile( + ("PART PART PART PART origin_type=PART, origin_address=PART, principal=PART, realm=PART, " + + "(?:run_as_principal=IGN, )?(?:run_as_realm=IGN, )?(?:run_by_principal=PART, )?(?:run_by_realm=PART, )?" + + "roles=PART, action=\\[(.*?)\\], (?:indices=PART, )?request=PART") + .replace(" ", "\\s+").replace("PART", "\\[([^\\]]*)\\]").replace("IGN", "\\[[^\\]]*\\]")); + // fail(logPattern.toString()); + while ((line = logReader.readLine()) != null) { + java.util.regex.Matcher m = logPattern.matcher(line); + if (false == m.matches()) { + throw new IllegalArgumentException("Unrecognized log: " + line); + } + int i = 1; + Map log = new HashMap<>(); + /* We *could* parse the date but leaving it in the original format makes it + * easier to find the lines in the file that this log comes from. */ + log.put("time", m.group(i++)); + log.put("node", m.group(i++)); + log.put("origin", m.group(i++)); + String eventType = m.group(i++); + if (false == ("access_denied".equals(eventType) || "access_granted".equals(eventType))) { + continue; + } + log.put("event_type", eventType); + log.put("origin_type", m.group(i++)); + log.put("origin_address", m.group(i++)); + String principal = m.group(i++); + log.put("principal", principal); + log.put("realm", m.group(i++)); + log.put("run_by_principal", m.group(i++)); + log.put("run_by_realm", m.group(i++)); + log.put("roles", m.group(i++)); + String action = m.group(i++); + if (false == (SQL_ACTION_NAME.equals(action) || GetIndexAction.NAME.equals(action))) { + //TODO we may want to extend this and the assertions to SearchAction.NAME as well + continue; + } + log.put("action", action); + // Use a sorted list for indices for consistent error reporting + List indices = new ArrayList<>(Strings.tokenizeByCommaToSet(m.group(i++))); + Collections.sort(indices); + if ("test_admin".equals(principal)) { + /* Sometimes we accidentally sneak access to the security tables. This is fine, SQL + * drops them from the interface. So we might have access to them, but we don't show + * them. */ + indices.remove(".security"); + indices.remove(".security-6"); + } + log.put("indices", indices); + log.put("request", m.group(i)); + logs.add(log); + } + List> allLogs = new ArrayList<>(logs); + List notMatching = new ArrayList<>(); + checker: for (int c = 0; c < logCheckers.size(); c++) { + Function, Boolean> logChecker = logCheckers.get(c); + for (Iterator> logsItr = logs.iterator(); logsItr.hasNext();) { + Map log = logsItr.next(); + if (logChecker.apply(log)) { + logsItr.remove(); + continue checker; + } + } + notMatching.add(c); + } + if (false == notMatching.isEmpty()) { + fail("Some checkers " + notMatching + " didn't match any logs. All logs:" + logsMessage(allLogs) + + "\nRemaining logs:" + logsMessage(logs)); + } + if (false == logs.isEmpty()) { + fail("Not all logs matched. Unmatched logs:" + logsMessage(logs)); + } + }); + } catch (AssertionError e) { + auditFailure = true; + logger.warn("Failed to find an audit log. Skipping remaining tests in this class after this the missing audit" + + "logs could turn up later."); + throw e; + } + } + + private String logsMessage(List> logs) { + if (logs.isEmpty()) { + return " none!"; + } + StringBuilder logsMessage = new StringBuilder(); + for (Map log : logs) { + logsMessage.append('\n').append(log); + } + return logsMessage.toString(); + } + } +} diff --git a/x-pack/qa/sql/security/src/test/resources/plugin-security.policy b/x-pack/qa/sql/security/src/test/resources/plugin-security.policy new file mode 100644 index 0000000000000..d013547b9fd5f --- /dev/null +++ b/x-pack/qa/sql/security/src/test/resources/plugin-security.policy @@ -0,0 +1,8 @@ +grant { + // Needed to read the audit log file + permission java.io.FilePermission "${tests.audit.logfile}", "read"; + + //// Required by ssl subproject: + // Required for the net client to setup ssl rather than use global ssl. + permission java.lang.RuntimePermission "setFactory"; +}; diff --git a/x-pack/qa/sql/security/ssl/build.gradle b/x-pack/qa/sql/security/ssl/build.gradle new file mode 100644 index 0000000000000..8c19ba0303f78 --- /dev/null +++ b/x-pack/qa/sql/security/ssl/build.gradle @@ -0,0 +1,368 @@ +import org.elasticsearch.gradle.LoggedExec +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.NodeInfo + +import javax.net.ssl.HttpsURLConnection +import javax.net.ssl.KeyManagerFactory +import javax.net.ssl.SSLContext +import javax.net.ssl.TrustManagerFactory +import java.nio.charset.StandardCharsets +import java.security.KeyStore +import java.security.SecureRandom + +// Tell the tests we're running with ssl enabled +integTestRunner { + systemProperty 'tests.ssl.enabled', 'true' +} + +// needed to be consistent with ssl host checking +Object san = new SanEvaluator() + +// location of generated keystores and certificates +File keystoreDir = new File(project.buildDir, 'keystore') + +// Generate the node's keystore +File nodeKeystore = new File(keystoreDir, 'test-node.jks') +task createNodeKeyStore(type: LoggedExec) { + doFirst { + if (nodeKeystore.parentFile.exists() == false) { + nodeKeystore.parentFile.mkdirs() + } + if (nodeKeystore.exists()) { + delete nodeKeystore + } + } + executable = new File(project.runtimeJavaHome, 'bin/keytool') + standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) + args '-genkey', + '-alias', 'test-node', + '-keystore', nodeKeystore, + '-keyalg', 'RSA', + '-keysize', '2048', + '-validity', '712', + '-dname', 'CN=smoke-test-plugins-ssl', + '-keypass', 'keypass', + '-storepass', 'keypass', + '-ext', san +} + +// Generate the client's keystore +File clientKeyStore = new File(keystoreDir, 'test-client.jks') +task createClientKeyStore(type: LoggedExec) { + doFirst { + if (clientKeyStore.parentFile.exists() == false) { + clientKeyStore.parentFile.mkdirs() + } + if (clientKeyStore.exists()) { + delete clientKeyStore + } + } + executable = new File(project.runtimeJavaHome, 'bin/keytool') + standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8')) + args '-genkey', + '-alias', 'test-client', + '-keystore', clientKeyStore, + '-keyalg', 'RSA', + '-keysize', '2048', + '-validity', '712', + '-dname', 'CN=smoke-test-plugins-ssl', + '-keypass', 'keypass', + '-storepass', 'keypass', + '-ext', san +} + +// Export the node's certificate +File nodeCertificate = new File(keystoreDir, 'test-node.cert') +task exportNodeCertificate(type: LoggedExec) { + doFirst { + if (nodeCertificate.parentFile.exists() == false) { + nodeCertificate.parentFile.mkdirs() + } + if (nodeCertificate.exists()) { + delete nodeCertificate + } + } + executable = new File(project.runtimeJavaHome, 'bin/keytool') + args '-export', + '-alias', 'test-node', + '-keystore', nodeKeystore, + '-storepass', 'keypass', + '-file', nodeCertificate +} + +// Import the node certificate in the client's keystore +task importNodeCertificateInClientKeyStore(type: LoggedExec) { + dependsOn exportNodeCertificate + executable = new File(project.runtimeJavaHome, 'bin/keytool') + args '-import', + '-alias', 'test-node', + '-keystore', clientKeyStore, + '-storepass', 'keypass', + '-file', nodeCertificate, + '-noprompt' +} + +// Export the client's certificate +File clientCertificate = new File(keystoreDir, 'test-client.cert') +task exportClientCertificate(type: LoggedExec) { + doFirst { + if (clientCertificate.parentFile.exists() == false) { + clientCertificate.parentFile.mkdirs() + } + if (clientCertificate.exists()) { + delete clientCertificate + } + } + executable = new File(project.runtimeJavaHome, 'bin/keytool') + args '-export', + '-alias', 'test-client', + '-keystore', clientKeyStore, + '-storepass', 'keypass', + '-file', clientCertificate +} + +// Import the client certificate in the node's keystore +task importClientCertificateInNodeKeyStore(type: LoggedExec) { + dependsOn exportClientCertificate + executable = new File(project.runtimeJavaHome, 'bin/keytool') + args '-import', + '-alias', 'test-client', + '-keystore', nodeKeystore, + '-storepass', 'keypass', + '-file', clientCertificate, + '-noprompt' +} + +forbiddenPatterns { + exclude '**/*.cert' +} + +// Add keystores to test classpath: it expects it there +sourceSets.test.resources.srcDir(keystoreDir) +processTestResources.dependsOn( + createNodeKeyStore, createClientKeyStore, + importNodeCertificateInClientKeyStore, importClientCertificateInNodeKeyStore +) + +integTestCluster.dependsOn(importClientCertificateInNodeKeyStore) + + +integTestCluster { + // The setup that we actually want + setting 'xpack.security.http.ssl.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + + // ceremony to set up ssl + setting 'xpack.ssl.keystore.path', 'test-node.jks' + keystoreSetting 'xpack.ssl.keystore.secure_password', 'keypass' + + setting 'xpack.license.self_generated.type', 'trial' + + // copy keystores into config/ + extraConfigFile nodeKeystore.name, nodeKeystore + extraConfigFile clientKeyStore.name, clientKeyStore + + // Override the wait condition to work properly with security and SSL + waitCondition = { NodeInfo node, AntBuilder ant -> + File tmpFile = new File(node.cwd, 'wait.success') + KeyStore keyStore = KeyStore.getInstance("JKS"); + keyStore.load(clientKeyStore.newInputStream(), 'keypass'.toCharArray()); + KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(keyStore, 'keypass'.toCharArray()); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(keyStore); + SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); + sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom()); + for (int i = 0; i < 10; i++) { + // we use custom wait logic here for HTTPS + HttpsURLConnection httpURLConnection = null; + try { + httpURLConnection = (HttpsURLConnection) new URL("https://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}&wait_for_status=yellow").openConnection(); + httpURLConnection.setSSLSocketFactory(sslContext.getSocketFactory()); + httpURLConnection.setRequestProperty("Authorization", "Basic " + + Base64.getEncoder().encodeToString("test_admin:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); + httpURLConnection.setRequestMethod("GET"); + httpURLConnection.connect(); + if (httpURLConnection.getResponseCode() == 200) { + tmpFile.withWriter StandardCharsets.UTF_8.name(), { + it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) + } + } + } catch (IOException e) { + if (i == 9) { + logger.error("final attempt of calling cluster health failed", e) + } else { + logger.debug("failed to call cluster health", e) + } + } finally { + if (httpURLConnection != null) { + httpURLConnection.disconnect(); + } + } + + // did not start, so wait a bit before trying again + Thread.sleep(500L); + } + + return tmpFile.exists() + } +} + + + + +/** A lazy evaluator to find the san to use for certificate generation. */ +class SanEvaluator { + + private static String san = null + + String toString() { + synchronized (SanEvaluator.class) { + if (san == null) { + san = getSubjectAlternativeNameString() + } + } + return san + } + + // Code stolen from NetworkUtils/InetAddresses/NetworkAddress to support SAN + /** Return all interfaces (and subinterfaces) on the system */ + private static List getInterfaces() throws SocketException { + List all = new ArrayList<>(); + addAllInterfaces(all, Collections.list(NetworkInterface.getNetworkInterfaces())); + Collections.sort(all, new Comparator() { + @Override + public int compare(NetworkInterface left, NetworkInterface right) { + return Integer.compare(left.getIndex(), right.getIndex()); + } + }); + return all; + } + + /** Helper for getInterfaces, recursively adds subinterfaces to {@code target} */ + private static void addAllInterfaces(List target, List level) { + if (!level.isEmpty()) { + target.addAll(level); + for (NetworkInterface intf : level) { + addAllInterfaces(target, Collections.list(intf.getSubInterfaces())); + } + } + } + + private static String getSubjectAlternativeNameString() { + List list = new ArrayList<>(); + for (NetworkInterface intf : getInterfaces()) { + if (intf.isUp()) { + // NOTE: some operating systems (e.g. BSD stack) assign a link local address to the loopback interface + // while technically not a loopback address, some of these treat them as one (e.g. OS X "localhost") so we must too, + // otherwise things just won't work out of box. So we include all addresses from loopback interfaces. + for (InetAddress address : Collections.list(intf.getInetAddresses())) { + if (intf.isLoopback() || address.isLoopbackAddress()) { + list.add(address); + } + } + } + } + if (list.isEmpty()) { + throw new IllegalArgumentException("no up-and-running loopback addresses found, got " + getInterfaces()); + } + + StringBuilder builder = new StringBuilder("san="); + for (int i = 0; i < list.size(); i++) { + InetAddress address = list.get(i); + String hostAddress; + if (address instanceof Inet6Address) { + hostAddress = compressedIPV6Address((Inet6Address)address); + } else { + hostAddress = address.getHostAddress(); + } + builder.append("ip:").append(hostAddress); + String hostname = address.getHostName(); + if (hostname.equals(address.getHostAddress()) == false) { + builder.append(",dns:").append(hostname); + } + + if (i != (list.size() - 1)) { + builder.append(","); + } + } + + return builder.toString(); + } + + private static String compressedIPV6Address(Inet6Address inet6Address) { + byte[] bytes = inet6Address.getAddress(); + int[] hextets = new int[8]; + for (int i = 0; i < hextets.length; i++) { + hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255; + } + compressLongestRunOfZeroes(hextets); + return hextetsToIPv6String(hextets); + } + + /** + * Identify and mark the longest run of zeroes in an IPv6 address. + * + *

    Only runs of two or more hextets are considered. In case of a tie, the + * leftmost run wins. If a qualifying run is found, its hextets are replaced + * by the sentinel value -1. + * + * @param hextets {@code int[]} mutable array of eight 16-bit hextets + */ + private static void compressLongestRunOfZeroes(int[] hextets) { + int bestRunStart = -1; + int bestRunLength = -1; + int runStart = -1; + for (int i = 0; i < hextets.length + 1; i++) { + if (i < hextets.length && hextets[i] == 0) { + if (runStart < 0) { + runStart = i; + } + } else if (runStart >= 0) { + int runLength = i - runStart; + if (runLength > bestRunLength) { + bestRunStart = runStart; + bestRunLength = runLength; + } + runStart = -1; + } + } + if (bestRunLength >= 2) { + Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1); + } + } + + /** + * Convert a list of hextets into a human-readable IPv6 address. + * + *

    In order for "::" compression to work, the input should contain negative + * sentinel values in place of the elided zeroes. + * + * @param hextets {@code int[]} array of eight 16-bit hextets, or -1s + */ + private static String hextetsToIPv6String(int[] hextets) { + /* + * While scanning the array, handle these state transitions: + * start->num => "num" start->gap => "::" + * num->num => ":num" num->gap => "::" + * gap->num => "num" gap->gap => "" + */ + StringBuilder buf = new StringBuilder(39); + boolean lastWasNumber = false; + for (int i = 0; i < hextets.length; i++) { + boolean thisIsNumber = hextets[i] >= 0; + if (thisIsNumber) { + if (lastWasNumber) { + buf.append(':'); + } + buf.append(Integer.toHexString(hextets[i])); + } else { + if (i == 0 || lastWasNumber) { + buf.append("::"); + } + } + lastWasNumber = thisIsNumber; + } + return buf.toString(); + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/ErrorsTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/ErrorsTestCase.java new file mode 100644 index 0000000000000..8421ec9631150 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/ErrorsTestCase.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql; + +/** + * Interface implemented once per SQL access method to ensure that we + * test the same minimal set of error cases. Note that this does not + * include security related failures, those are tracked in another test. + */ +public interface ErrorsTestCase { + void testSelectInvalidSql() throws Exception; + void testSelectFromMissingIndex() throws Exception; + void testSelectFromIndexWithoutTypes() throws Exception; + void testSelectMissingField() throws Exception; + void testSelectMissingFunction() throws Exception; + void testSelectProjectScoreInAggContext() throws Exception; + void testSelectOrderByScoreInAggContext() throws Exception; + void testSelectGroupByScore() throws Exception; + void testSelectScoreSubField() throws Exception; + void testSelectScoreInScalar() throws Exception; +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java new file mode 100644 index 0000000000000..63795edecf855 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.cli; + +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.qa.sql.cli.EmbeddedCli.SecurityConfig; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts; + +public abstract class CliIntegrationTestCase extends ESRestTestCase { + /** + * Read an address for Elasticsearch suitable for the CLI from the system properties. + */ + public static String elasticsearchAddress() { + String cluster = System.getProperty("tests.rest.cluster"); + // CLI only supports a single node at a time so we just give it one. + return cluster.split(",")[0]; + } + + private EmbeddedCli cli; + + /** + * Asks the CLI Fixture to start a CLI instance. + */ + @Before + public void startCli() throws IOException { + cli = new EmbeddedCli(CliIntegrationTestCase.elasticsearchAddress(), true, securityConfig()); + } + + @After + public void orderlyShutdown() throws Exception { + if (cli == null) { + // failed to connect to the cli so there is nothing to do here + return; + } + cli.close(); + assertNoSearchContexts(); + } + + /** + * Override to add security configuration to the cli. + */ + protected SecurityConfig securityConfig() { + return null; + } + + protected void index(String index, CheckedConsumer body) throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().startObject(); + body.accept(builder); + builder.endObject(); + HttpEntity doc = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + client().performRequest("PUT", "/" + index + "/doc/1", singletonMap("refresh", "true"), doc); + } + + public String command(String command) throws IOException { + return cli.command(command); + } + + /** + * Read a line produced by the CLI. + * Note that these lines will contain {@code xterm-256color} + * escape sequences. + */ + public String readLine() throws IOException { + return cli.readLine(); + } + +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/EmbeddedCli.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/EmbeddedCli.java new file mode 100644 index 0000000000000..89184edec0ad7 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/EmbeddedCli.java @@ -0,0 +1,354 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.cli; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.xpack.sql.cli.Cli; +import org.elasticsearch.xpack.sql.cli.CliTerminal; +import org.elasticsearch.xpack.sql.cli.JLineTerminal; +import org.jline.terminal.impl.ExternalTerminal; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.io.PipedInputStream; +import java.io.PipedOutputStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; + +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.not; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; + +/** + * Wraps a CLI in as "real" a way as it can get without forking the CLI + * subprocess with the goal being integration testing of the CLI without + * breaking our security model by forking. We test the script that starts + * the CLI using packaging tests which is super "real" but not super fast + * and doesn't run super frequently. + */ +public class EmbeddedCli implements Closeable { + private static final Logger logger = Loggers.getLogger(EmbeddedCli.class); + + private final Thread exec; + private final Cli cli; + private final AtomicInteger returnCode = new AtomicInteger(Integer.MIN_VALUE); + private final AtomicReference failure = new AtomicReference<>(); + private final BufferedWriter out; + private final BufferedReader in; + /** + * Has the client already been closed? + */ + private boolean closed = false; + + public EmbeddedCli(String elasticsearchAddress, boolean checkConnectionOnStartup, + @Nullable SecurityConfig security) throws IOException { + PipedOutputStream outgoing = new PipedOutputStream(); + PipedInputStream cliIn = new PipedInputStream(outgoing); + PipedInputStream incoming = new PipedInputStream(); + PipedOutputStream cliOut = new PipedOutputStream(incoming); + CliTerminal cliTerminal = new JLineTerminal( + new ExternalTerminal("test", "xterm-256color", cliIn, cliOut, StandardCharsets.UTF_8), + false); + cli = new Cli(cliTerminal) { + @Override + protected boolean addShutdownHook() { + return false; + } + }; + out = new BufferedWriter(new OutputStreamWriter(outgoing, StandardCharsets.UTF_8)); + in = new BufferedReader(new InputStreamReader(incoming, StandardCharsets.UTF_8)); + + List args = new ArrayList<>(); + if (security == null) { + args.add(elasticsearchAddress); + } else { + String address = security.user + "@" + elasticsearchAddress; + if (security.https) { + address = "https://" + address; + } else if (randomBoolean()) { + address = "http://" + address; + } + args.add(address); + if (security.keystoreLocation != null) { + args.add("-keystore_location"); + args.add(security.keystoreLocation); + } + } + if (false == checkConnectionOnStartup) { + args.add("-check"); + args.add("false"); + } + args.add("-debug"); + exec = new Thread(() -> { + try { + /* + * We don't really interact with the terminal because we're + * trying to test our interaction with jLine which doesn't + * support Elasticsearch's Terminal abstraction. + */ + Terminal terminal = new MockTerminal(); + int exitCode = cli.main(args.toArray(new String[0]), terminal); + returnCode.set(exitCode); + logger.info("cli exited with code [{}]", exitCode); + } catch (Exception e) { + failure.set(e); + } + }); + exec.start(); + + try { + // Feed it passwords if needed + if (security != null) { + String passwordPrompt = "[?1h=[?2004hpassword: "; + if (security.keystoreLocation != null) { + assertEquals("[?1h=[?2004hkeystore password: ", readUntil(s -> s.endsWith(": "))); + out.write(security.keystorePassword + "\n"); + out.flush(); + logger.info("out: {}", security.keystorePassword); + // Read the newline echoed after the password prompt + assertEquals("", readLine()); + /* + * And for some reason jLine adds a second one so + * consume that too. I'm not sure why it does this + * but it looks right when a use runs the cli. + */ + assertEquals("", readLine()); + /* + * If we read the keystore password the console will + * emit some state reset escape sequences before the + * prompt for the password. + */ + passwordPrompt = "[?1l>[?1000l[?2004l[?1h=[?2004hpassword: "; + } + assertEquals(passwordPrompt, readUntil(s -> s.endsWith(": "))); + out.write(security.password + "\n"); + out.flush(); + logger.info("out: {}", security.password); + // Read the newline echoed after the password prompt + assertEquals("", readLine()); + } + + // Throw out the logo + while (false == readLine().contains("SQL")); + assertConnectionTest(); + } catch (IOException e) { + try { + forceClose(); + } catch (Exception closeException) { + e.addSuppressed(closeException); + throw e; + } + } + } + + /** + * Assert that result of the connection test. Default implementation + * asserts that the test passes but overridden to check places where + * we want to assert that it fails. + */ + protected void assertConnectionTest() throws IOException { + // After the connection test passess we emit an empty line and then the prompt + assertEquals("", readLine()); + } + + /** + * Attempts an orderly shutdown of the CLI, reporting any unconsumed lines as errors. + */ + @Override + public void close() throws IOException { + if (closed) { + return; + } + try { + // Try and shutdown the client normally + + /* + * Don't use command here because we want want + * to collect all the responses and report them + * as failures if there is a problem rather than + * failing on the first bad response. + */ + out.write("quit;\n"); + out.flush(); + List nonQuit = new ArrayList<>(); + String line; + while (true) { + line = readLine(); + if (line == null) { + fail("got EOF before [Bye!]. Extras " + nonQuit); + } + if (line.contains("quit;")) { + continue; + } + if (line.contains("Bye!")) { + break; + } + if (false == line.isEmpty()) { + nonQuit.add(line); + } + } + assertThat("unconsumed lines", nonQuit, empty()); + } finally { + forceClose(); + } + assertEquals(0, returnCode.get()); + } + + /** + * Shutdown the connection to the remote CLI without attempting to shut + * the remote down in an orderly way. + */ + public void forceClose() throws IOException { + closed = true; + IOUtils.close(out, in, cli); + try { + exec.join(TimeUnit.SECONDS.toMillis(10)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + Exception e = failure.get(); + if (e != null) { + throw new RuntimeException("CLI thread failed", e); + } + } + + /** + * Send a command and assert the echo. + */ + public String command(String command) throws IOException { + assertThat("; automatically added", command, not(endsWith(";"))); + logger.info("out: {};", command); + out.write(command + ";\n"); + out.flush(); + for (String echo : expectedCommandEchos(command)) { + assertEquals(echo, readLine()); + } + return readLine(); + } + /** + * Create the "echo" that we expect jLine to send to the terminal + * while we're typing a command. + */ + private List expectedCommandEchos(String command) { + List commandLines = Strings.splitSmart(command, "\n", false); + List result = new ArrayList<>(commandLines.size() * 2); + result.add("[?1h=[?2004h[33msql> [0m" + commandLines.get(0)); + // Every line gets an extra new line because, I dunno, but it looks right in the CLI + result.add(""); + for (int i = 1; i < commandLines.size(); i++) { + result.add("[?1l>[?1000l[?2004l[?1h=[?2004h[33m | [0m" + commandLines.get(i)); + // Every line gets an extra new line because, I dunno, but it looks right in the CLI + result.add(""); + } + result.set(result.size() - 2, result.get(result.size() - 2) + ";"); + return result; + } + + public String readLine() throws IOException { + /* + * Since we can't *see* esc in the error messages we just + * remove it here and pretend it isn't required. Hopefully + * `[` is enough for us to assert on. + * + * `null` means EOF so we should just pass that back through. + */ + String line = in.readLine(); + line = line == null ? null : line.replace("\u001B", ""); + logger.info("in : {}", line); + return line; + } + + private String readUntil(Predicate end) throws IOException { + StringBuilder b = new StringBuilder(); + String result; + while (true) { + int c = in.read(); + if (c == -1) { + throw new IOException("got eof before end"); + } + if (c == '\u001B') { + /* + * Since we can't *see* esc in the error messages we just + * remove it here and pretend it isn't required. Hopefully + * `[` is enough for us to assert on. + */ + continue; + } + b.append((char) c); + result = b.toString(); + if (end.test(result)) { + break; + } + } + logger.info("in : {}", result); + return result; + } + + public static class SecurityConfig { + private final boolean https; + private final String user; + private final String password; + @Nullable + private final String keystoreLocation; + @Nullable + private final String keystorePassword; + + public SecurityConfig(boolean https, String user, String password, + @Nullable String keystoreLocation, @Nullable String keystorePassword) { + if (user == null) { + throw new IllegalArgumentException( + "[user] is required. Send [null] instead of a SecurityConfig to run without security."); + } + if (password == null) { + throw new IllegalArgumentException( + "[password] is required. Send [null] instead of a SecurityConfig to run without security."); + } + if (keystoreLocation == null) { + if (keystorePassword != null) { + throw new IllegalArgumentException("[keystorePassword] cannot be specified if [keystoreLocation] is not specified"); + } + } else { + if (keystorePassword == null) { + throw new IllegalArgumentException("[keystorePassword] is required if [keystoreLocation] is specified"); + } + } + + this.https = https; + this.user = user; + this.password = password; + this.keystoreLocation = keystoreLocation; + this.keystorePassword = keystorePassword; + } + + public String keystoreLocation() { + return keystoreLocation; + } + + public String keystorePassword() { + return keystorePassword; + } + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java new file mode 100644 index 0000000000000..9a5d5b9c3eaca --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.cli; + +import java.io.IOException; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; + +import static java.util.Collections.emptyMap; + +import static org.hamcrest.Matchers.startsWith; + +/** + * Tests for error messages. + */ +public abstract class ErrorsTestCase extends CliIntegrationTestCase implements org.elasticsearch.xpack.qa.sql.ErrorsTestCase { + /** + * Starting sequence commons to lots of errors. + */ + public static final String START = "[?1l>[?1000l[?2004l[31;1m"; + /** + * Ending sequence common to lots of errors. + */ + public static final String END = "[23;31;1m][0m"; + + @Override + public void testSelectInvalidSql() throws Exception { + assertFoundOneProblem(command("SELECT * FRO")); + assertEquals("line 1:8: Cannot determine columns for *" + END, readLine()); + } + + @Override + public void testSelectFromMissingIndex() throws IOException { + assertFoundOneProblem(command("SELECT * FROM test")); + assertEquals("line 1:15: Unknown index [test]" + END, readLine()); + } + + @Override + public void testSelectFromIndexWithoutTypes() throws Exception { + // Create an index without any types + client().performRequest("PUT", "/test", emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON)); + + assertFoundOneProblem(command("SELECT * FROM test")); + assertEquals("line 1:15: [test] doesn't have any types so it is incompatible with sql" + END, readLine()); + } + + @Override + public void testSelectMissingField() throws IOException { + index("test", body -> body.field("test", "test")); + assertFoundOneProblem(command("SELECT missing FROM test")); + assertEquals("line 1:8: Unknown column [missing]" + END, readLine()); + } + + @Override + public void testSelectMissingFunction() throws Exception { + index("test", body -> body.field("foo", 1)); + assertFoundOneProblem(command("SELECT missing(foo) FROM test")); + assertEquals("line 1:8: Unknown function [missing]" + END, readLine()); + } + + @Override + public void testSelectProjectScoreInAggContext() throws Exception { + index("test", body -> body.field("foo", 1)); + assertFoundOneProblem(command("SELECT foo, SCORE(), COUNT(*) FROM test GROUP BY foo")); + assertEquals("line 1:13: Cannot use non-grouped column [SCORE()], expected [foo]" + END, readLine()); + } + + @Override + public void testSelectOrderByScoreInAggContext() throws Exception { + index("test", body -> body.field("foo", 1)); + assertFoundOneProblem(command("SELECT foo, COUNT(*) FROM test GROUP BY foo ORDER BY SCORE()")); + assertEquals("line 1:54: Cannot order by non-grouped column [SCORE()], expected [foo]" + END, readLine()); + } + + @Override + public void testSelectGroupByScore() throws Exception { + index("test", body -> body.field("foo", 1)); + assertFoundOneProblem(command("SELECT COUNT(*) FROM test GROUP BY SCORE()")); + assertEquals("line 1:36: Cannot use [SCORE()] for grouping" + END, readLine()); + } + + @Override + public void testSelectScoreSubField() throws Exception { + index("test", body -> body.field("foo", 1)); + assertThat(command("SELECT SCORE().bar FROM test"), + startsWith(START + "Bad request [[3;33;22mline 1:15: extraneous input '.' expecting {, ',',")); + } + + @Override + public void testSelectScoreInScalar() throws Exception { + index("test", body -> body.field("foo", 1)); + assertFoundOneProblem(command("SELECT SIN(SCORE()) FROM test")); + assertEquals("line 1:12: [SCORE()] cannot be an argument to a function" + END, readLine()); + } + + public static void assertFoundOneProblem(String commandResult) { + assertEquals(START + "Bad request [[3;33;22mFound 1 problem(s)", commandResult); + } + +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java new file mode 100644 index 0000000000000..dc34b9c1101c7 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.cli; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; + +import java.io.IOException; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.containsString; + +/** + * Test for setting the fetch size. + */ +public abstract class FetchSizeTestCase extends CliIntegrationTestCase { + public void testSelect() throws IOException { + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < 20; i++) { + bulk.append("{\"index\":{}}\n"); + bulk.append("{\"test_field\":" + i + "}\n"); + } + client().performRequest("PUT", "/test/doc/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + assertEquals("[?1l>[?1000l[?2004lfetch size set to [90m4[0m", command("fetch size = 4")); + assertEquals("[?1l>[?1000l[?2004lfetch separator set to \"[90m -- fetch sep -- [0m\"", + command("fetch separator = \" -- fetch sep -- \"")); + assertThat(command("SELECT * FROM test ORDER BY test_field ASC"), containsString("test_field")); + assertThat(readLine(), containsString("----------")); + int i = 0; + while (i < 20) { + assertThat(readLine(), containsString(Integer.toString(i++))); + assertThat(readLine(), containsString(Integer.toString(i++))); + assertThat(readLine(), containsString(Integer.toString(i++))); + assertThat(readLine(), containsString(Integer.toString(i++))); + assertThat(readLine(), containsString(" -- fetch sep -- ")); + } + assertEquals("", readLine()); + } + + public void testInvalidFetchSize() throws IOException { + assertEquals(ErrorsTestCase.START + "Invalid fetch size [[3;33;22mcat" + ErrorsTestCase.END, command("fetch size = cat")); + assertEquals(ErrorsTestCase.START + "Invalid fetch size [[3;33;22m0[23;31;1m]. Must be > 0.[0m", command("fetch size = 0")); + assertEquals(ErrorsTestCase.START + "Invalid fetch size [[3;33;22m-1231[23;31;1m]. Must be > 0.[0m", command("fetch size = -1231")); + assertEquals(ErrorsTestCase.START + "Invalid fetch size [[3;33;22m" + Long.MAX_VALUE + ErrorsTestCase.END, + command("fetch size = " + Long.MAX_VALUE)); + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/SelectTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/SelectTestCase.java new file mode 100644 index 0000000000000..fffa8e0f72a1d --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/SelectTestCase.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.cli; + +import org.elasticsearch.test.hamcrest.RegexMatcher; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; + +public abstract class SelectTestCase extends CliIntegrationTestCase { + public void testSelect() throws IOException { + index("test", body -> body.field("test_field", "test_value")); + assertThat(command("SELECT * FROM test"), containsString("test_field")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), containsString("test_value")); + assertEquals("", readLine()); + } + public void testMultiLineSelect() throws IOException { + index("test", body -> body.field("test_field", "test_value")); + assertThat(command("SELECT *\nFROM\ntest"), containsString("test_field")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), containsString("test_value")); + assertEquals("", readLine()); + } + + public void testSelectWithWhere() throws IOException { + index("test", body -> body.field("test_field", "test_value1").field("i", 1)); + index("test", body -> body.field("test_field", "test_value2").field("i", 2)); + assertThat(command("SELECT * FROM test WHERE i = 2"), RegexMatcher.matches("\\s*i\\s*\\|\\s*test_field\\s*")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), RegexMatcher.matches("\\s*2\\s*\\|\\s*test_value2\\s*")); + assertEquals("", readLine()); + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java new file mode 100644 index 0000000000000..2605f6c27ce9b --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ShowTestCase.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.cli; + +import org.elasticsearch.test.hamcrest.RegexMatcher; + +import java.io.IOException; +import java.util.regex.Pattern; + +import static org.hamcrest.Matchers.containsString; + +public abstract class ShowTestCase extends CliIntegrationTestCase { + public void testShowTables() throws IOException { + index("test1", body -> body.field("test_field", "test_value")); + index("test2", body -> body.field("test_field", "test_value")); + assertThat(command("SHOW TABLES"), RegexMatcher.matches("\\s*name\\s*")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), RegexMatcher.matches("\\s*test[12]\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*test[12]\\s*")); + assertEquals("", readLine()); + } + + public void testShowFunctions() throws IOException { + assertThat(command("SHOW FUNCTIONS"), RegexMatcher.matches("\\s*name\\s*\\|\\s*type\\s*")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), RegexMatcher.matches("\\s*AVG\\s*\\|\\s*AGGREGATE\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*COUNT\\s*\\|\\s*AGGREGATE\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*MAX\\s*\\|\\s*AGGREGATE\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*MIN\\s*\\|\\s*AGGREGATE\\s*")); + String line = readLine(); + Pattern aggregateFunction = Pattern.compile("\\s*[A-Z0-9_~]+\\s*\\|\\s*AGGREGATE\\s*"); + while (aggregateFunction.matcher(line).matches()) { + line = readLine(); + } + Pattern scalarFunction = Pattern.compile("\\s*[A-Z0-9_~]+\\s*\\|\\s*SCALAR\\s*"); + while (scalarFunction.matcher(line).matches()) { + line = readLine(); + } + assertThat(line, RegexMatcher.matches("\\s*SCORE\\s*\\|\\s*SCORE\\s*")); + assertEquals("", readLine()); + } + + public void testShowFunctionsLikePrefix() throws IOException { + assertThat(command("SHOW FUNCTIONS LIKE 'L%'"), RegexMatcher.matches("\\s*name\\s*\\|\\s*type\\s*")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), RegexMatcher.matches("\\s*LOG\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*LOG10\\s*\\|\\s*SCALAR\\s*")); + assertEquals("", readLine()); + } + + public void testShowFunctionsLikeInfix() throws IOException { + assertThat(command("SHOW FUNCTIONS LIKE '%DAY%'"), RegexMatcher.matches("\\s*name\\s*\\|\\s*type\\s*")); + assertThat(readLine(), containsString("----------")); + assertThat(readLine(), RegexMatcher.matches("\\s*DAY_OF_MONTH\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*DAY\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*DAY_OF_WEEK\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*DAY_OF_YEAR\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*HOUR_OF_DAY\\s*\\|\\s*SCALAR\\s*")); + assertThat(readLine(), RegexMatcher.matches("\\s*MINUTE_OF_DAY\\s*\\|\\s*SCALAR\\s*")); + assertEquals("", readLine()); + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/package-info.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/package-info.java new file mode 100644 index 0000000000000..b77e050cc3422 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Support for integration tests for the Elasticsearch SQL CLI client + * and integration tests shared between multiple qa projects. + */ +package org.elasticsearch.xpack.qa.sql.cli; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ConnectionTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ConnectionTestCase.java new file mode 100644 index 0000000000000..444142b7138b0 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ConnectionTestCase.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.elasticsearch.Version; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.SQLException; + +/** + * Test the jdbc {@link Connection} implementation. + */ +public abstract class ConnectionTestCase extends JdbcIntegrationTestCase { + public void testConnectionProperties() throws SQLException { + try (Connection c = esJdbc()) { + assertFalse(c.isClosed()); + assertTrue(c.isReadOnly()); + DatabaseMetaData md = c.getMetaData(); + assertEquals(Version.CURRENT.major, md.getDatabaseMajorVersion()); + assertEquals(Version.CURRENT.minor, md.getDatabaseMinorVersion()); + } + } + + public void testIsValid() throws SQLException { + try (Connection c = esJdbc()) { + assertTrue(c.isValid(10)); + } + } + + /** + * Tests that we throw report no transaction isolation and throw sensible errors if you ask for any. + */ + public void testTransactionIsolation() throws Exception { + try (Connection c = esJdbc()) { + assertEquals(Connection.TRANSACTION_NONE, c.getTransactionIsolation()); + SQLException e = expectThrows(SQLException.class, () -> c.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE)); + assertEquals("Transactions not supported", e.getMessage()); + assertEquals(Connection.TRANSACTION_NONE, c.getTransactionIsolation()); + } + } +} \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java new file mode 100644 index 0000000000000..e37688eb90465 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.csvConnection; +import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.executeCsvQuery; +import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.specParser; + +/** + * Tests comparing sql queries executed against our jdbc client + * with hard coded result sets. + */ +public abstract class CsvSpecTestCase extends SpecBaseIntegrationTestCase { + private final CsvTestCase testCase; + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + List tests = new ArrayList<>(); + tests.addAll(readScriptSpec("/command.csv-spec", parser)); + tests.addAll(readScriptSpec("/fulltext.csv-spec", parser)); + tests.addAll(readScriptSpec("/agg.csv-spec", parser)); + tests.addAll(readScriptSpec("/columns.csv-spec", parser)); + tests.addAll(readScriptSpec("/datetime.csv-spec", parser)); + tests.addAll(readScriptSpec("/alias.csv-spec", parser)); + tests.addAll(readScriptSpec("/nulls.csv-spec", parser)); + tests.addAll(readScriptSpec("/nested.csv-spec", parser)); + return tests; + } + + public CsvSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber); + this.testCase = testCase; + } + + @Override + protected final void doTest() throws Throwable { + try (Connection csv = csvConnection(testCase.expectedResults); + Connection es = esJdbc()) { + + // pass the testName as table for debugging purposes (in case the underlying reader is missing) + ResultSet expected = executeCsvQuery(csv, testName); + ResultSet elasticResults = executeJdbcQuery(es, testCase.query); + assertResults(expected, elasticResults); + } + } + + // make sure ES uses UTC (otherwise JDBC driver picks up the JVM timezone per spec/convention) + @Override + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); + return connectionProperties; + } + +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java new file mode 100644 index 0000000000000..fbbc2285ed123 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java @@ -0,0 +1,196 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.Streams; +import org.relique.io.TableReader; +import org.relique.jdbc.csv.CsvConnection; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.IOException; +import java.io.Reader; +import java.io.StringReader; +import java.io.StringWriter; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; +import java.util.Locale; +import java.util.Properties; + +import static org.hamcrest.Matchers.arrayWithSize; +import static org.junit.Assert.assertThat; + +/** + * Utility functions for CSV testing + */ +public final class CsvTestUtils { + + private CsvTestUtils() { + + } + + /** + * Executes a query on provided CSV connection. + *

    + * The supplied table name is only used for the test identification. + */ + public static ResultSet executeCsvQuery(Connection csv, String csvTableName) throws SQLException { + ResultSet expected = csv.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY) + .executeQuery("SELECT * FROM " + csvTableName); + // trigger data loading for type inference + expected.beforeFirst(); + return expected; + } + + /** + * Wraps CSV in the expectedResults into CSV Connection. + * + * Use {@link #executeCsvQuery} to obtain ResultSet from this connection + */ + public static Connection csvConnection(String expectedResults) throws IOException, SQLException { + Properties csvProperties = new Properties(); + csvProperties.setProperty("charset", "UTF-8"); + csvProperties.setProperty("separator", "|"); + csvProperties.setProperty("trimValues", "true"); + Tuple resultsAndTypes = extractColumnTypesAndStripCli(expectedResults); + csvProperties.setProperty("columnTypes", resultsAndTypes.v2()); + Reader reader = new StringReader(resultsAndTypes.v1()); + TableReader tableReader = new TableReader() { + @Override + public Reader getReader(Statement statement, String tableName) throws SQLException { + return reader; + } + + @Override + public List getTableNames(Connection connection) throws SQLException { + throw new UnsupportedOperationException(); + } + }; + return new CsvConnection(tableReader, csvProperties, "") { + }; + } + + private static Tuple extractColumnTypesAndStripCli(String expectedResults) throws IOException { + try (StringReader reader = new StringReader(expectedResults); + BufferedReader bufferedReader = new BufferedReader(reader); + StringWriter writer = new StringWriter(); + BufferedWriter bufferedWriter = new BufferedWriter(writer)) { + + String header = bufferedReader.readLine(); + Tuple headerAndTypes; + + if (header.contains(":")) { + headerAndTypes = extractColumnTypesFromHeader(header); + } else { + // No type information in headers, no need to parse columns - trigger auto-detection + headerAndTypes = new Tuple<>(header, ""); + } + bufferedWriter.write(headerAndTypes.v1()); + bufferedWriter.newLine(); + + /* Read the next line. It might be a separator designed to look like the cli. + * If it is, then throw it out. If it isn't then keep it. + */ + String maybeSeparator = bufferedReader.readLine(); + if (maybeSeparator != null && false == maybeSeparator.startsWith("----")) { + bufferedWriter.write(maybeSeparator); + bufferedWriter.newLine(); + } + + bufferedWriter.flush(); + // Copy the rest of test + Streams.copy(bufferedReader, bufferedWriter); + return new Tuple<>(writer.toString(), headerAndTypes.v2()); + } + } + + private static Tuple extractColumnTypesFromHeader(String header) { + String[] columnTypes = Strings.delimitedListToStringArray(header, "|", " \t"); + StringBuilder types = new StringBuilder(); + StringBuilder columns = new StringBuilder(); + for (String column : columnTypes) { + String[] nameType = Strings.delimitedListToStringArray(column, ":"); + assertThat("If at least one column has a type associated with it, all columns should have types", nameType, arrayWithSize(2)); + if (types.length() > 0) { + types.append(","); + columns.append("|"); + } + columns.append(nameType[0]); + types.append(resolveColumnType(nameType[1])); + } + return new Tuple<>(columns.toString(), types.toString()); + } + + private static String resolveColumnType(String type) { + switch (type.toLowerCase(Locale.ROOT)) { + case "s": + return "string"; + case "b": + return "boolean"; + case "i": + return "integer"; + case "l": + return "long"; + case "f": + return "float"; + case "d": + return "double"; + case "ts": + return "timestamp"; + default: + return type; + } + } + + /** + * Returns an instance of a parser for csv-spec tests. + */ + public static CsvSpecParser specParser() { + return new CsvSpecParser(); + } + + private static class CsvSpecParser implements SpecBaseIntegrationTestCase.Parser { + private final StringBuilder data = new StringBuilder(); + private CsvTestCase testCase; + + @Override + public Object parse(String line) { + // beginning of the section + if (testCase == null) { + // pick up the query + testCase = new CsvTestCase(); + testCase.query = line.endsWith(";") ? line.substring(0, line.length() - 1) : line; + } + else { + // read data + if (line.startsWith(";")) { + testCase.expectedResults = data.toString(); + // clean-up and emit + CsvTestCase result = testCase; + testCase = null; + data.setLength(0); + return result; + } + else { + data.append(line); + data.append("\r\n"); + } + } + + return null; + } + } + + public static class CsvTestCase { + String query; + String expectedResults; + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java new file mode 100644 index 0000000000000..9137e2028aa50 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java @@ -0,0 +1,188 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.http.HttpHost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; + +public class DataLoader { + + public static void main(String[] args) throws Exception { + try (RestClient client = RestClient.builder(new HttpHost("localhost", 9200)).build()) { + loadDatasetIntoEs(client); + Loggers.getLogger(DataLoader.class).info("Data loaded"); + } + } + + protected static void loadDatasetIntoEs(RestClient client) throws Exception { + loadDatasetIntoEs(client, "test_emp"); + loadDatasetIntoEs(client, "test_emp_copy"); + makeAlias(client, "test_alias", "test_emp", "test_emp_copy"); + makeAlias(client, "test_alias_emp", "test_emp", "test_emp_copy"); + } + + private static void createString(String name, XContentBuilder builder) throws Exception { + builder.startObject(name).field("type", "text") + .startObject("fields") + .startObject("keyword").field("type", "keyword").endObject() + .endObject() + .endObject(); + } + protected static void loadDatasetIntoEs(RestClient client, String index) throws Exception { + XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); + createIndex.startObject("settings"); + { + createIndex.field("number_of_shards", 1); + } + createIndex.endObject(); + createIndex.startObject("mappings"); + { + createIndex.startObject("emp"); + { + createIndex.startObject("properties"); + { + createIndex.startObject("emp_no").field("type", "integer").endObject(); + createString("first_name", createIndex); + createString("last_name", createIndex); + createIndex.startObject("gender").field("type", "keyword").endObject(); + createIndex.startObject("birth_date").field("type", "date").endObject(); + createIndex.startObject("hire_date").field("type", "date").endObject(); + createIndex.startObject("salary").field("type", "integer").endObject(); + createIndex.startObject("languages").field("type", "byte").endObject(); + { + createIndex.startObject("dep").field("type", "nested"); + createIndex.startObject("properties"); + createIndex.startObject("dep_id").field("type", "keyword").endObject(); + createString("dep_name", createIndex); + createIndex.startObject("from_date").field("type", "date").endObject(); + createIndex.startObject("to_date").field("type", "date").endObject(); + createIndex.endObject(); + createIndex.endObject(); + } + } + createIndex.endObject(); + } + createIndex.endObject(); + } + createIndex.endObject().endObject(); + + client.performRequest("PUT", "/" + index, emptyMap(), new StringEntity(Strings.toString(createIndex), + ContentType.APPLICATION_JSON)); + + + Map deps = new LinkedHashMap<>(); + csvToLines("departments", (titles, fields) -> deps.put(fields.get(0), fields.get(1))); + + Map>> dep_emp = new LinkedHashMap<>(); + csvToLines("dep_emp", (titles, fields) -> { + String emp_no = fields.get(0); + List> list = dep_emp.get(emp_no); + if (list == null) { + list = new ArrayList<>(); + dep_emp.put(emp_no, list); + } + List dep = new ArrayList<>(); + // dep_id + dep.add(fields.get(1)); + // dep_name (from departments) + dep.add(deps.get(fields.get(1))); + // from + dep.add(fields.get(2)); + // to + dep.add(fields.get(3)); + list.add(dep); + }); + + StringBuilder bulk = new StringBuilder(); + csvToLines("employees", (titles, fields) -> { + bulk.append("{\"index\":{}}\n"); + bulk.append('{'); + String emp_no = fields.get(1); + for (int f = 0; f < fields.size(); f++) { + if (f != 0) { + bulk.append(','); + } + bulk.append('"').append(titles.get(f)).append("\":\"").append(fields.get(f)).append('"'); + } + // append department + List> list = dep_emp.get(emp_no); + if (!list.isEmpty()) { + bulk.append(", \"dep\" : ["); + for (List dp : list) { + bulk.append("{"); + bulk.append("\"dep_id\":\"" + dp.get(0) + "\","); + bulk.append("\"dep_name\":\"" + dp.get(1) + "\","); + bulk.append("\"from_date\":\"" + dp.get(2) + "\","); + bulk.append("\"to_date\":\"" + dp.get(3) + "\""); + bulk.append("},"); + } + // remove last , + bulk.setLength(bulk.length() - 1); + bulk.append("]"); + } + + bulk.append("}\n"); + }); + + client.performRequest("POST", "/" + index + "/emp/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + } + + protected static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception { + for (String index : indices) { + client.performRequest("POST", "/" + index + "/_alias/" + aliasName); + } + } + + private static void csvToLines(String name, CheckedBiConsumer, List, Exception> consumeLine) throws Exception { + String location = "/" + name + ".csv"; + URL dataSet = SqlSpecTestCase.class.getResource(location); + if (dataSet == null) { + throw new IllegalArgumentException("Can't find [" + location + "]"); + } + + try (BufferedReader reader = new BufferedReader(new InputStreamReader(readFromJarUrl(dataSet), StandardCharsets.UTF_8))) { + String titlesString = reader.readLine(); + if (titlesString == null) { + throw new IllegalArgumentException("[" + location + "] must contain at least a title row"); + } + List titles = Arrays.asList(titlesString.split(",")); + + String line; + while ((line = reader.readLine()) != null) { + consumeLine.accept(titles, Arrays.asList(line.split(","))); + } + } + } + + @SuppressForbidden(reason = "test reads from jar") + public static InputStream readFromJarUrl(URL source) throws IOException { + return source.openStream(); + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DatabaseMetaDataTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DatabaseMetaDataTestCase.java new file mode 100644 index 0000000000000..a5f3a5f83643d --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DatabaseMetaDataTestCase.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.elasticsearch.common.CheckedSupplier; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; + +import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert.assertResultSets; + +/** + * Tests for our implementation of {@link DatabaseMetaData}. + */ +public class DatabaseMetaDataTestCase extends JdbcIntegrationTestCase { + /** + * We do not support procedures so we return an empty set for + * {@link DatabaseMetaData#getProcedures(String, String, String)}. + */ + public void testGetProcedures() throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_metadata_get_procedures.sql'"); + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock"); + assertResultSets(expected, es.getMetaData().getProcedures( + randomBoolean() ? null : randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(5))); + } + } + + /** + * We do not support procedures so we return an empty set for + * {@link DatabaseMetaData#getProcedureColumns(String, String, String, String)}. + */ + public void testGetProcedureColumns() throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_metadata_get_procedure_columns.sql'"); + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock"); + assertResultSets(expected, es.getMetaData().getProcedureColumns( + randomBoolean() ? null : randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(5))); + } + } + + public void testGetTables() throws Exception { + index("test1", body -> body.field("name", "bob")); + index("test2", body -> body.field("name", "bob")); + + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_metadata_get_tables.sql'"); + + CheckedSupplier all = () -> + h2.createStatement().executeQuery("SELECT '" + clusterName() + "' AS TABLE_CAT, * FROM mock"); + assertResultSets(all.get(), es.getMetaData().getTables("%", "%", "%", null)); + assertResultSets(all.get(), es.getMetaData().getTables("%", "%", "te%", null)); + assertResultSets( + h2.createStatement().executeQuery("SELECT '" + clusterName() + "' AS TABLE_CAT, * FROM mock WHERE TABLE_NAME = 'test1'"), + es.getMetaData().getTables("%", "%", "test1", null)); + } + } + + public void testGetTableTypes() throws Exception { + index("test1", body -> body.field("name", "bob")); + index("test2", body -> body.field("name", "bob")); + + try (Connection h2 = LocalH2.anonymousDb(); Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_metadata_get_table_types.sql'"); + + CheckedSupplier all = () -> h2.createStatement() + .executeQuery("SELECT '" + clusterName() + "' AS TABLE_CAT, * FROM mock"); + assertResultSets(all.get(), es.getMetaData().getTables("%", "%", "%", new String[] { "BASE TABLE" })); + assertResultSets( + h2.createStatement() + .executeQuery("SELECT '" + clusterName() + "' AS TABLE_CAT, * FROM mock WHERE TABLE_NAME = 'test1'"), + es.getMetaData().getTables("%", "%", "test1", new String[] { "BASE TABLE" })); + } + } + + public void testColumns() throws Exception { + index("test1", body -> body.field("name", "bob")); + index("test2", body -> { + body.field("number", 7); + body.field("date", "2017-01-01T01:01:01Z"); + body.field("float", 42.0); + }); + + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_metadata_get_columns.sql'"); + + ResultSet expected = h2.createStatement().executeQuery("SELECT '" + clusterName() + "' AS TABLE_CAT, * FROM mock"); + assertResultSets(expected, es.getMetaData().getColumns(null, "%", "%", null)); + } + } +} \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java new file mode 100644 index 0000000000000..c0d3db026d8bd --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.List; +import java.util.Properties; + +import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.csvConnection; +import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.executeCsvQuery; +import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.specParser; + +@TestLogging(JdbcTestUtils.SQL_TRACE) +public abstract class DebugCsvSpec extends SpecBaseIntegrationTestCase { + private final CsvTestCase testCase; + + @ParametersFactory(shuffle = false, argumentFormatting = SqlSpecTestCase.PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + return readScriptSpec("/debug.csv-spec", parser); + } + + public DebugCsvSpec(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber); + this.testCase = testCase; + } + + @Override + protected void assertResults(ResultSet expected, ResultSet elastic) throws SQLException { + Logger log = logEsResultSet() ? logger : null; + + // + // uncomment this to printout the result set and create new CSV tests + // + JdbcTestUtils.logResultSetMetadata(elastic, log); + JdbcTestUtils.logResultSetData(elastic, log); + //JdbcAssert.assertResultSets(expected, elastic, log); + } + + @Override + protected boolean logEsResultSet() { + return true; + } + + @Override + protected final void doTest() throws Throwable { + try (Connection csv = csvConnection(testCase.expectedResults); + Connection es = esJdbc()) { + + // pass the testName as table for debugging purposes (in case the underlying reader is missing) + ResultSet expected = executeCsvQuery(csv, testName); + ResultSet elasticResults = executeJdbcQuery(es, testCase.query); + assertResults(expected, elasticResults); + } + } + + // make sure ES uses UTC (otherwise JDBC driver picks up the JVM timezone per spec/convention) + @Override + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); + return connectionProperties; + } +} \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugSqlSpec.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugSqlSpec.java new file mode 100644 index 0000000000000..8726d8ddb9cf5 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugSqlSpec.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.util.List; + +@TestLogging(JdbcTestUtils.SQL_TRACE) +public abstract class DebugSqlSpec extends SqlSpecTestCase { + @ParametersFactory(shuffle = false, argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + return readScriptSpec("/debug.sql-spec", parser); + } + + public DebugSqlSpec(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber, query); + } + + @Override + protected boolean logEsResultSet() { + return true; + } +} \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java new file mode 100644 index 0000000000000..0fffb0dac4c3b --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import java.sql.Connection; +import java.sql.SQLException; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; + +import static java.util.Collections.emptyMap; + +import static org.hamcrest.Matchers.startsWith; + +/** + * Tests for exceptions and their messages. + */ +public class ErrorsTestCase extends JdbcIntegrationTestCase implements org.elasticsearch.xpack.qa.sql.ErrorsTestCase { + @Override + public void testSelectInvalidSql() throws Exception { + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT * FRO").executeQuery()); + assertEquals("Found 1 problem(s)\nline 1:8: Cannot determine columns for *", e.getMessage()); + } + } + + @Override + public void testSelectFromMissingIndex() throws SQLException { + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT * FROM test").executeQuery()); + assertEquals("Found 1 problem(s)\nline 1:15: Unknown index [test]", e.getMessage()); + } + } + + @Override + public void testSelectFromIndexWithoutTypes() throws Exception { + // Create an index without any types + client().performRequest("PUT", "/test", emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON)); + + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT * FROM test").executeQuery()); + assertEquals("Found 1 problem(s)\nline 1:15: [test] doesn't have any types so it is incompatible with sql", e.getMessage()); + } + } + + @Override + public void testSelectMissingField() throws Exception { + index("test", body -> body.field("test", "test")); + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT missing FROM test").executeQuery()); + assertEquals("Found 1 problem(s)\nline 1:8: Unknown column [missing]", e.getMessage()); + } + } + + @Override + public void testSelectMissingFunction() throws Exception { + index("test", body -> body.field("foo", 1)); + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT missing(foo) FROM test").executeQuery()); + assertEquals("Found 1 problem(s)\nline 1:8: Unknown function [missing]", e.getMessage()); + } + } + + @Override + public void testSelectProjectScoreInAggContext() throws Exception { + index("test", body -> body.field("foo", 1)); + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> + c.prepareStatement("SELECT foo, SCORE(), COUNT(*) FROM test GROUP BY foo").executeQuery()); + assertEquals("Found 1 problem(s)\nline 1:13: Cannot use non-grouped column [SCORE()], expected [foo]", e.getMessage()); + } + } + + @Override + public void testSelectOrderByScoreInAggContext() throws Exception { + index("test", body -> body.field("foo", 1)); + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> + c.prepareStatement("SELECT foo, COUNT(*) FROM test GROUP BY foo ORDER BY SCORE()").executeQuery()); + assertEquals("Found 1 problem(s)\nline 1:54: Cannot order by non-grouped column [SCORE()], expected [foo]", e.getMessage()); + } + } + + @Override + public void testSelectGroupByScore() throws Exception { + index("test", body -> body.field("foo", 1)); + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> + c.prepareStatement("SELECT COUNT(*) FROM test GROUP BY SCORE()").executeQuery()); + assertEquals("Found 1 problem(s)\nline 1:36: Cannot use [SCORE()] for grouping", e.getMessage()); + } + } + + @Override + public void testSelectScoreSubField() throws Exception { + index("test", body -> body.field("foo", 1)); + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> + c.prepareStatement("SELECT SCORE().bar FROM test").executeQuery()); + assertThat(e.getMessage(), startsWith("line 1:15: extraneous input '.' expecting {, ','")); + } + } + + @Override + public void testSelectScoreInScalar() throws Exception { + index("test", body -> body.field("foo", 1)); + try (Connection c = esJdbc()) { + SQLException e = expectThrows(SQLException.class, () -> + c.prepareStatement("SELECT SIN(SCORE()) FROM test").executeQuery()); + assertThat(e.getMessage(), startsWith("Found 1 problem(s)\nline 1:12: [SCORE()] cannot be an argument to a function")); + } + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java new file mode 100644 index 0000000000000..de7cf465acacf --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.junit.Before; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts; + +/** + * Tests for setting {@link Statement#setFetchSize(int)} and + * {@link ResultSet#getFetchSize()}. + */ +public class FetchSizeTestCase extends JdbcIntegrationTestCase { + @Before + public void createTestIndex() throws IOException { + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < 20; i++) { + bulk.append("{\"index\":{}}\n"); + bulk.append("{\"test_field\":" + i + "}\n"); + } + client().performRequest("PUT", "/test/doc/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + } + + /** + * Test for {@code SELECT} that is implemented as a scroll query. + * In this case the fetch size should be entirely respected. + */ + public void testScroll() throws SQLException { + try (Connection c = esJdbc(); + Statement s = c.createStatement()) { + s.setFetchSize(4); + try (ResultSet rs = s.executeQuery("SELECT * FROM test ORDER BY test_field ASC")) { + for (int i = 0; i < 20; i++) { + assertEquals(4, rs.getFetchSize()); + assertTrue("No more entries left after " + i, rs.next()); + assertEquals(i, rs.getInt(1)); + } + assertFalse(rs.next()); + } + } + } + + /** + * Test for {@code SELECT} that is implemented as a scroll query. + * In this test we don't retrieve all records and rely on close() to clean the cursor + */ + public void testIncompleteScroll() throws Exception { + try (Connection c = esJdbc(); + Statement s = c.createStatement()) { + s.setFetchSize(4); + try (ResultSet rs = s.executeQuery("SELECT * FROM test ORDER BY test_field ASC")) { + for (int i = 0; i < 10; i++) { + assertEquals(4, rs.getFetchSize()); + assertTrue("No more entries left after " + i, rs.next()); + assertEquals(i, rs.getInt(1)); + } + assertTrue(rs.next()); + } + } + assertNoSearchContexts(); + } + + + /** + * Test for {@code SELECT} that is implemented as an aggregation. + */ + public void testAggregation() throws SQLException { + try (Connection c = esJdbc(); + Statement s = c.createStatement()) { + s.setFetchSize(4); + try (ResultSet rs = s.executeQuery("SELECT test_field, COUNT(*) FROM test GROUP BY test_field")) { + for (int i = 0; i < 20; i++) { + assertEquals(4, rs.getFetchSize()); + assertTrue("No more entries left at " + i, rs.next()); + assertEquals(i, rs.getInt(1)); + assertEquals("Incorrect count returned", 1, rs.getInt(2)); + } + assertFalse(rs.next()); + } + } + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java new file mode 100644 index 0000000000000..801f40639fad1 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.logging.log4j.Logger; +import org.relique.jdbc.csv.CsvResultSet; + +import java.sql.JDBCType; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +import java.util.Locale; +import java.util.TimeZone; + +import static java.lang.String.format; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class JdbcAssert { + private static final Calendar UTC_CALENDAR = Calendar.getInstance(TimeZone.getTimeZone("UTC"), Locale.ROOT); + + public static void assertResultSets(ResultSet expected, ResultSet actual) throws SQLException { + assertResultSets(expected, actual, null); + } + + public static void assertResultSets(ResultSet expected, ResultSet actual, Logger logger) throws SQLException { + try (ResultSet ex = expected; ResultSet ac = actual) { + assertResultSetMetadata(ex, ac, logger); + assertResultSetData(ex, ac, logger); + } + } + + // metadata doesn't consume a ResultSet thus it shouldn't close it + public static void assertResultSetMetadata(ResultSet expected, ResultSet actual, Logger logger) throws SQLException { + ResultSetMetaData expectedMeta = expected.getMetaData(); + ResultSetMetaData actualMeta = actual.getMetaData(); + + if (logger != null) { + JdbcTestUtils.logResultSetMetadata(actual, logger); + } + + if (expectedMeta.getColumnCount() != actualMeta.getColumnCount()) { + List expectedCols = new ArrayList<>(); + for (int i = 1; i <= expectedMeta.getColumnCount(); i++) { + expectedCols.add(expectedMeta.getColumnName(i)); + + } + + List actualCols = new ArrayList<>(); + for (int i = 1; i <= actualMeta.getColumnCount(); i++) { + actualCols.add(actualMeta.getColumnName(i)); + } + + assertEquals(format(Locale.ROOT, "Different number of columns returned (expected %d but was %d);", + expectedMeta.getColumnCount(), actualMeta.getColumnCount()), + expectedCols.toString(), actualCols.toString()); + } + + for (int column = 1; column <= expectedMeta.getColumnCount(); column++) { + String expectedName = expectedMeta.getColumnName(column); + String actualName = actualMeta.getColumnName(column); + + if (!expectedName.equals(actualName)) { + // to help debugging, indicate the previous column (which also happened to match and thus was correct) + String expectedSet = expectedName; + String actualSet = actualName; + if (column > 1) { + expectedSet = expectedMeta.getColumnName(column - 1) + "," + expectedName; + actualSet = actualMeta.getColumnName(column - 1) + "," + actualName; + } + + assertEquals("Different column name [" + column + "]", expectedSet, actualSet); + } + + // use the type not the name (timestamp with timezone returns spaces for example) + int expectedType = expectedMeta.getColumnType(column); + int actualType = actualMeta.getColumnType(column); + + // since H2 cannot use a fixed timezone, the data is stored in UTC (and thus with timezone) + if (expectedType == Types.TIMESTAMP_WITH_TIMEZONE) { + expectedType = Types.TIMESTAMP; + } + // since csv doesn't support real, we use float instead..... + if (expectedType == Types.FLOAT && expected instanceof CsvResultSet) { + expectedType = Types.REAL; + } + assertEquals("Different column type for column [" + expectedName + "] (" + JDBCType.valueOf(expectedType) + " != " + + JDBCType.valueOf(actualType) + ")", expectedType, actualType); + } + } + + // The ResultSet is consumed and thus it should be closed + public static void assertResultSetData(ResultSet expected, ResultSet actual, Logger logger) throws SQLException { + try (ResultSet ex = expected; ResultSet ac = actual) { + doAssertResultSetData(ex, ac, logger); + } + } + + private static void doAssertResultSetData(ResultSet expected, ResultSet actual, Logger logger) throws SQLException { + ResultSetMetaData metaData = expected.getMetaData(); + int columns = metaData.getColumnCount(); + + long count = 0; + try { + for (count = 0; expected.next(); count++) { + assertTrue("Expected more data but no more entries found after [" + count + "]", actual.next()); + + if (logger != null) { + logger.info(JdbcTestUtils.resultSetCurrentData(actual)); + } + + for (int column = 1; column <= columns; column++) { + Object expectedObject = expected.getObject(column); + Object actualObject = actual.getObject(column); + + int type = metaData.getColumnType(column); + + String msg = format(Locale.ROOT, "Different result for column [" + metaData.getColumnName(column) + "], " + + "entry [" + (count + 1) + "]"); + + // handle nulls first + if (expectedObject == null || actualObject == null) { + assertEquals(msg, expectedObject, actualObject); + } + // then timestamp + else if (type == Types.TIMESTAMP || type == Types.TIMESTAMP_WITH_TIMEZONE) { + assertEquals(msg, expected.getTimestamp(column), actual.getTimestamp(column)); + } + // and floats/doubles + else if (type == Types.DOUBLE) { + // the 1d/1f difference is used due to rounding/flooring + assertEquals(msg, (double) expectedObject, (double) actualObject, 1d); + } else if (type == Types.FLOAT) { + assertEquals(msg, (float) expectedObject, (float) actualObject, 1f); + } + // finally the actual comparison + else { + assertEquals(msg, expectedObject, actualObject); + } + } + } + } catch (AssertionError ae) { + if (logger != null && actual.next()) { + logger.info("^^^ Assertion failure ^^^"); + logger.info(JdbcTestUtils.resultSetCurrentData(actual)); + } + throw ae; + } + + if (actual.next()) { + fail("Elasticsearch [" + actual + "] still has data after [" + count + "] entries:\n" + + JdbcTestUtils.resultSetCurrentData(actual)); + } + } + +} \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java new file mode 100644 index 0000000000000..aa5dc5c0ac2b6 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.elasticsearch.xpack.sql.jdbc.jdbcx.JdbcDataSource; +import org.joda.time.DateTimeZone; +import org.junit.After; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Properties; +import java.util.Set; +import java.util.TimeZone; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts; + +public abstract class JdbcIntegrationTestCase extends ESRestTestCase { + @After + public void checkSearchContent() throws Exception { + // Some context might linger due to fire and forget nature of scroll cleanup + assertNoSearchContexts(); + } + + /** + * Read an address for Elasticsearch suitable for the JDBC driver from the system properties. + */ + public static String elasticsearchAddress() { + String cluster = System.getProperty("tests.rest.cluster"); + // JDBC only supports a single node at a time so we just give it one. + return cluster.split(",")[0]; + /* This doesn't include "jdbc:es://" because we want the example in + * esJdbc to be obvious and because we want to use getProtocol to add + * https if we are running against https. */ + } + + public Connection esJdbc() throws SQLException { + return randomBoolean() ? useDriverManager() : useDataSource(); + } + + protected Connection useDriverManager() throws SQLException { + String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); + // tag::connect-dm + String address = "jdbc:es://" + elasticsearchAddress; // <1> + Properties connectionProperties = connectionProperties(); // <2> + Connection connection = DriverManager.getConnection(address, connectionProperties); + // end::connect-dm + assertNotNull("The timezone should be specified", connectionProperties.getProperty(JdbcConfiguration.TIME_ZONE)); + return connection; + } + + protected Connection useDataSource() throws SQLException { + String elasticsearchAddress = getProtocol() + "://" + elasticsearchAddress(); + // tag::connect-ds + JdbcDataSource dataSource = new JdbcDataSource(); + String address = "jdbc:es://" + elasticsearchAddress; // <1> + dataSource.setUrl(address); + Properties connectionProperties = connectionProperties(); // <2> + dataSource.setProperties(connectionProperties); + Connection connection = dataSource.getConnection(); + // end::connect-ds + assertNotNull("The timezone should be specified", connectionProperties.getProperty(JdbcConfiguration.TIME_ZONE)); + return connection; + } + + public static void index(String index, CheckedConsumer body) throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().startObject(); + body.accept(builder); + builder.endObject(); + HttpEntity doc = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + client().performRequest("PUT", "/" + index + "/doc/1", singletonMap("refresh", "true"), doc); + } + + protected String clusterName() { + try { + String response = EntityUtils.toString(client().performRequest("GET", "/").getEntity()); + return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false).get("cluster_name").toString(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * The properties used to build the connection. + */ + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.put(JdbcConfiguration.TIME_ZONE, randomKnownTimeZone()); + return connectionProperties; + } + + public static String randomKnownTimeZone() { + // We use system default timezone for the connection that is selected randomly by TestRuleSetupAndRestoreClassEnv + // from all available JDK timezones. While Joda and JDK are generally in sync, some timezones might not be known + // to the current version of Joda and in this case the test might fail. To avoid that, we specify a timezone + // known for both Joda and JDK + Set timeZones = new HashSet<>(DateTimeZone.getAvailableIDs()); + timeZones.retainAll(Arrays.asList(TimeZone.getAvailableIDs())); + List ids = new ArrayList<>(timeZones); + Collections.sort(ids); + return randomFrom(ids); + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java new file mode 100644 index 0000000000000..5062525f2b31e --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.logging.log4j.Logger; + +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; + +public abstract class JdbcTestUtils { + + public static final String SQL_TRACE = "org.elasticsearch.xpack.sql:TRACE"; + + public static void logResultSetMetadata(ResultSet rs, Logger logger) throws SQLException { + ResultSetMetaData metaData = rs.getMetaData(); + // header + StringBuilder sb = new StringBuilder(); + StringBuilder column = new StringBuilder(); + + int columns = metaData.getColumnCount(); + for (int i = 1; i <= columns; i++) { + if (i > 1) { + sb.append(" | "); + } + column.setLength(0); + column.append(metaData.getColumnName(i)); + column.append("("); + column.append(metaData.getColumnTypeName(i)); + column.append(")"); + + sb.append(trimOrPad(column)); + } + + int l = sb.length(); + logger.info(sb.toString()); + sb.setLength(0); + for (int i = 0; i < l; i++) { + sb.append("-"); + } + + logger.info(sb.toString()); + } + + private static final int MAX_WIDTH = 20; + + public static void logResultSetData(ResultSet rs, Logger log) throws SQLException { + ResultSetMetaData metaData = rs.getMetaData(); + StringBuilder sb = new StringBuilder(); + StringBuilder column = new StringBuilder(); + + int columns = metaData.getColumnCount(); + + while (rs.next()) { + sb.setLength(0); + for (int i = 1; i <= columns; i++) { + column.setLength(0); + if (i > 1) { + sb.append(" | "); + } + sb.append(trimOrPad(column.append(rs.getString(i)))); + } + log.info(sb); + } + } + + public static String resultSetCurrentData(ResultSet rs) throws SQLException { + ResultSetMetaData metaData = rs.getMetaData(); + StringBuilder column = new StringBuilder(); + + int columns = metaData.getColumnCount(); + + StringBuilder sb = new StringBuilder(); + for (int i = 1; i <= columns; i++) { + column.setLength(0); + if (i > 1) { + sb.append(" | "); + } + sb.append(trimOrPad(column.append(rs.getString(i)))); + } + return sb.toString(); + } + + private static StringBuilder trimOrPad(StringBuilder buffer) { + if (buffer.length() > MAX_WIDTH) { + buffer.setLength(MAX_WIDTH - 1); + buffer.append("~"); + } + else { + for (int i = buffer.length(); i < MAX_WIDTH; i++) { + buffer.append(" "); + } + } + return buffer; + } +} \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/LocalH2.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/LocalH2.java new file mode 100644 index 0000000000000..e807a2dbf2e8e --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/LocalH2.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.CheckedSupplier; +import org.junit.rules.ExternalResource; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Properties; + +public class LocalH2 extends ExternalResource implements CheckedSupplier { + + static { + try { + // Initialize h2 so we can use it for testing + Class.forName("org.h2.Driver"); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + } + + /** + * Creates an in memory anonymous database and returns the only connection to it. + * Closing the connection will remove the db. + */ + public static Connection anonymousDb() throws SQLException { + return DriverManager.getConnection("jdbc:h2:mem:;DATABASE_TO_UPPER=false;ALIAS_COLUMN_NAME=true"); + } + + private static final Properties DEFAULTS = new Properties(); + + private final String url; + // H2 in-memory will keep the db alive as long as this connection is opened + private Connection keepAlive; + + private CheckedConsumer initializer; + + /* + * The syntax on the connection string is fairly particular: + * mem:; creates an anonymous database in memory. The `;` is + * technically the separator that comes after the name. + * DATABASE_TO_UPPER=false turns *off* H2's Oracle-like habit + * of upper-casing everything that isn't quoted. + * ALIAS_COLUMN_NAME=true turn *on* returning alias names in + * result set metadata which is what most DBs do except + * for MySQL and, by default, H2. Our jdbc driver does it. + */ + // http://www.h2database.com/html/features.html#in_memory_databases + public LocalH2(CheckedConsumer initializer) { + this.url = "jdbc:h2:mem:essql;DATABASE_TO_UPPER=false;ALIAS_COLUMN_NAME=true"; + this.initializer = initializer; + } + + @Override + protected void before() throws Throwable { + keepAlive = get(); + initializer.accept(keepAlive); + } + + @Override + protected void after() { + try { + keepAlive.close(); + } catch (SQLException ex) { + // close + } + } + + @Override + public Connection get() throws SQLException { + return DriverManager.getConnection(url, DEFAULTS); + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/PreparedStatementTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/PreparedStatementTestCase.java new file mode 100644 index 0000000000000..c4ac31120a3bd --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/PreparedStatementTestCase.java @@ -0,0 +1,202 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.elasticsearch.common.collect.Tuple; + +import java.sql.Connection; +import java.sql.JDBCType; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.SQLSyntaxErrorException; + +import static org.hamcrest.Matchers.startsWith; + +public class PreparedStatementTestCase extends JdbcIntegrationTestCase { + + public void testSupportedTypes() throws Exception { + index("library", builder -> { + builder.field("name", "Don Quixote"); + builder.field("page_count", 1072); + }); + + String stringVal = randomAlphaOfLength(randomIntBetween(0, 1000)); + int intVal = randomInt(); + long longVal = randomLong(); + double doubleVal = randomDouble(); + float floatVal = randomFloat(); + boolean booleanVal = randomBoolean(); + byte byteVal = randomByte(); + short shortVal = randomShort(); + + try (Connection connection = esJdbc()) { + try (PreparedStatement statement = connection.prepareStatement( + "SELECT ?, ?, ?, ?, ?, ?, ?, ?, ?, name FROM library WHERE page_count=?")) { + statement.setString(1, stringVal); + statement.setInt(2, intVal); + statement.setLong(3, longVal); + statement.setFloat(4, floatVal); + statement.setDouble(5, doubleVal); + statement.setNull(6, JDBCType.DOUBLE.getVendorTypeNumber()); + statement.setBoolean(7, booleanVal); + statement.setByte(8, byteVal); + statement.setShort(9, shortVal); + statement.setInt(10, 1072); + + try (ResultSet results = statement.executeQuery()) { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + ParameterMetaData parameterMetaData = statement.getParameterMetaData(); + assertEquals(resultSetMetaData.getColumnCount(), parameterMetaData.getParameterCount()); + for (int i = 1; i < resultSetMetaData.getColumnCount(); i++) { + // Makes sure that column types survived the round trip + assertEquals(parameterMetaData.getParameterType(i), resultSetMetaData.getColumnType(i)); + } + assertTrue(results.next()); + assertEquals(stringVal, results.getString(1)); + assertEquals(intVal, results.getInt(2)); + assertEquals(longVal, results.getLong(3)); + assertEquals(floatVal, results.getFloat(4), 0.00001f); + assertEquals(doubleVal, results.getDouble(5), 0.00001f); + assertNull(results.getObject(6)); + assertEquals(booleanVal, results.getBoolean(7)); + assertEquals(byteVal, results.getByte(8)); + assertEquals(shortVal, results.getShort(9)); + assertEquals("Don Quixote", results.getString(10)); + assertFalse(results.next()); + } + } + } + } + + public void testUnsupportedParameterUse() throws Exception { + index("library", builder -> { + builder.field("name", "Don Quixote"); + builder.field("page_count", 1072); + }); + + try (Connection connection = esJdbc()) { + // This is the current limitation of JDBC parser that it cannot detect improper use of '?' + try (PreparedStatement statement = connection.prepareStatement("SELECT name FROM ? WHERE page_count=?")) { + statement.setString(1, "library"); + statement.setInt(2, 1072); + SQLSyntaxErrorException exception = expectThrows(SQLSyntaxErrorException.class, statement::executeQuery); + assertThat(exception.getMessage(), startsWith("line 1:18: mismatched input '?' expecting ")); + + } + } + } + + public void testTooMayParameters() throws Exception { + index("library", builder -> { + builder.field("name", "Don Quixote"); + builder.field("page_count", 1072); + }); + + try (Connection connection = esJdbc()) { + try (PreparedStatement statement = connection.prepareStatement("SELECT name FROM library WHERE page_count=?")) { + statement.setInt(1, 1072); + int tooBig = randomIntBetween(2, 10); + SQLException tooBigEx = expectThrows(SQLException.class, () -> statement.setInt(tooBig, 1072)); + assertThat(tooBigEx.getMessage(), startsWith("Invalid parameter index [")); + int tooSmall = randomIntBetween(-10, 0); + SQLException tooSmallEx = expectThrows(SQLException.class, () -> statement.setInt(tooSmall, 1072)); + assertThat(tooSmallEx.getMessage(), startsWith("Invalid parameter index [")); + } + } + } + + public void testStringEscaping() throws Exception { + try (Connection connection = esJdbc()) { + try (PreparedStatement statement = connection.prepareStatement( + "SELECT ?, ?, ?, ?")) { + statement.setString(1, "foo --"); + statement.setString(2, "/* foo */"); + statement.setString(3, "\"foo"); + statement.setString(4, "'foo'"); + try (ResultSet results = statement.executeQuery()) { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + assertEquals(4, resultSetMetaData.getColumnCount()); + for (int i = 1; i < resultSetMetaData.getColumnCount(); i++) { + assertEquals(JDBCType.VARCHAR.getVendorTypeNumber().intValue(), resultSetMetaData.getColumnType(i)); + } + assertTrue(results.next()); + assertEquals("foo --", results.getString(1)); + assertEquals("/* foo */", results.getString(2)); + assertEquals("\"foo", results.getString(3)); + assertEquals("'foo'", results.getString(4)); + assertFalse(results.next()); + } + } + } + } + + public void testCommentsHandling() throws Exception { + try (Connection connection = esJdbc()) { + try (PreparedStatement statement = connection.prepareStatement( + "SELECT ?, /* ?, */ ? -- ?")) { + assertEquals(2, statement.getParameterMetaData().getParameterCount()); + statement.setString(1, "foo"); + statement.setString(2, "bar"); + try (ResultSet results = statement.executeQuery()) { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + assertEquals(2, resultSetMetaData.getColumnCount()); + assertTrue(results.next()); + assertEquals("foo", results.getString(1)); + assertEquals("bar", results.getString(2)); + assertFalse(results.next()); + } + } + } + } + + public void testSingleParameterMultipleTypes() throws Exception { + String stringVal = randomAlphaOfLength(randomIntBetween(0, 1000)); + int intVal = randomInt(); + long longVal = randomLong(); + double doubleVal = randomDouble(); + float floatVal = randomFloat(); + boolean booleanVal = randomBoolean(); + byte byteVal = randomByte(); + short shortVal = randomShort(); + + try (Connection connection = esJdbc()) { + try (PreparedStatement statement = connection.prepareStatement("SELECT ?")) { + + statement.setString(1, stringVal); + assertEquals(new Tuple<>(JDBCType.VARCHAR.getVendorTypeNumber(), stringVal), execute(statement)); + statement.setInt(1, intVal); + assertEquals(new Tuple<>(JDBCType.INTEGER.getVendorTypeNumber(), intVal), execute(statement)); + statement.setLong(1, longVal); + assertEquals(new Tuple<>(JDBCType.BIGINT.getVendorTypeNumber(), longVal), execute(statement)); + statement.setFloat(1, floatVal); + assertEquals(new Tuple<>(JDBCType.REAL.getVendorTypeNumber(), floatVal), execute(statement)); + statement.setDouble(1, doubleVal); + assertEquals(new Tuple<>(JDBCType.DOUBLE.getVendorTypeNumber(), doubleVal), execute(statement)); + statement.setNull(1, JDBCType.DOUBLE.getVendorTypeNumber()); + assertEquals(new Tuple<>(JDBCType.DOUBLE.getVendorTypeNumber(), null), execute(statement)); + statement.setBoolean(1, booleanVal); + assertEquals(new Tuple<>(JDBCType.BOOLEAN.getVendorTypeNumber(), booleanVal), execute(statement)); + statement.setByte(1, byteVal); + assertEquals(new Tuple<>(JDBCType.TINYINT.getVendorTypeNumber(), byteVal), execute(statement)); + statement.setShort(1, shortVal); + assertEquals(new Tuple<>(JDBCType.SMALLINT.getVendorTypeNumber(), shortVal), execute(statement)); + } + } + } + + private Tuple execute(PreparedStatement statement) throws SQLException { + try (ResultSet results = statement.executeQuery()) { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + assertTrue(results.next()); + Tuple result = new Tuple<>(resultSetMetaData.getColumnType(1), results.getObject(1)); + assertFalse(results.next()); + return result; + } + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ShowTablesTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ShowTablesTestCase.java new file mode 100644 index 0000000000000..aa250628f7361 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ShowTablesTestCase.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.util.Locale; + +import static org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert.assertResultSets; + +public class ShowTablesTestCase extends JdbcIntegrationTestCase { + public void testShowTablesWithoutAnyIndexes() throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_show_tables.sql'"); + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock"); + assertResultSets(expected, es.createStatement().executeQuery("SHOW TABLES")); + } + } + + public void testShowTablesWithManyIndices() throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); + Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_show_tables.sql'"); + int indices = between(2, 20); + for (int i = 0; i < indices; i++) { + String index = String.format(Locale.ROOT, "test%02d", i); + index(index, builder -> builder.field("name", "bob")); + h2.createStatement().executeUpdate("INSERT INTO mock VALUES ('" + index + "', 'BASE TABLE');"); + } + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock ORDER BY name"); + assertResultSets(expected, es.createStatement().executeQuery("SHOW TABLES")); + } + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java new file mode 100644 index 0000000000000..7621743481a4f --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SimpleExampleTestCase.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +public class SimpleExampleTestCase extends JdbcIntegrationTestCase { + public void testSimpleExample() throws Exception { + index("library", builder -> { + builder.field("name", "Don Quixote"); + builder.field("page_count", 1072); + }); + try (Connection connection = esJdbc()) { + // tag::simple_example + try (Statement statement = connection.createStatement(); + ResultSet results = statement.executeQuery( + "SELECT name, page_count FROM library ORDER BY page_count DESC LIMIT 1")) { + assertTrue(results.next()); + assertEquals("Don Quixote", results.getString(1)); + assertEquals(1072, results.getInt(2)); + SQLException e = expectThrows(SQLException.class, () -> results.getInt(1)); + assertTrue(e.getMessage(), e.getMessage().contains("unable to convert column 1 to an int")); + assertFalse(results.next()); + } + // end::simple_example + } + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java new file mode 100644 index 0000000000000..5a589f94d28d4 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.junit.AfterClass; +import org.junit.Before; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Tests that compare the Elasticsearch JDBC client to some other JDBC client + * after loading a specific set of test data. + */ +public abstract class SpecBaseIntegrationTestCase extends JdbcIntegrationTestCase { + protected static final String PARAM_FORMATTING = "%2$s.test%3$s"; + + protected final String fileName; + protected final String groupName; + protected final String testName; + protected final Integer lineNumber; + + public SpecBaseIntegrationTestCase(String fileName, String groupName, String testName, Integer lineNumber) { + this.fileName = fileName; + this.groupName = groupName; + this.testName = testName; + this.lineNumber = lineNumber; + } + + @Before + public void setupTestDataIfNeeded() throws Exception { + if (client().performRequest("HEAD", "/test_emp").getStatusLine().getStatusCode() == 404) { + DataLoader.loadDatasetIntoEs(client()); + } + } + + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @AfterClass + public static void wipeTestData() throws IOException { + try { + adminClient().performRequest("DELETE", "/*"); + } catch (ResponseException e) { + // 404 here just means we had no indexes + if (e.getResponse().getStatusLine().getStatusCode() != 404) { + throw e; + } + } + } + + public final void test() throws Throwable { + try { + doTest(); + } catch (AssertionError ae) { + throw reworkException(ae); + } + } + + /** + * Implementations should pay attention on using + * {@link #executeJdbcQuery(Connection, String)} (typically for + * ES connections) and {@link #assertResults(ResultSet, ResultSet)} + * which takes into account logging/debugging results (through + * {@link #logEsResultSet()}. + */ + protected abstract void doTest() throws Throwable; + + protected ResultSet executeJdbcQuery(Connection con, String query) throws SQLException { + Statement statement = con.createStatement(); + statement.setFetchSize(between(1, 500)); + return statement.executeQuery(query); + } + + protected boolean logEsResultSet() { + return false; + } + + protected void assertResults(ResultSet expected, ResultSet elastic) throws SQLException { + Logger log = logEsResultSet() ? logger : null; + JdbcAssert.assertResultSets(expected, elastic, log); + } + + private Throwable reworkException(Throwable th) { + StackTraceElement[] stackTrace = th.getStackTrace(); + StackTraceElement[] redone = new StackTraceElement[stackTrace.length + 1]; + System.arraycopy(stackTrace, 0, redone, 1, stackTrace.length); + redone[0] = new StackTraceElement(getClass().getName(), groupName + ".test" + testName, fileName, lineNumber); + + th.setStackTrace(redone); + return th; + } + + // + // spec reader + // + + // returns source file, groupName, testName, its line location, and the custom object (based on each test parser) + protected static List readScriptSpec(String url, Parser parser) throws Exception { + URL source = SpecBaseIntegrationTestCase.class.getResource(url); + Objects.requireNonNull(source, "Cannot find resource " + url); + + String fileName = source.getFile().substring(source.getFile().lastIndexOf("/") + 1); + String groupName = fileName.substring(fileName.lastIndexOf('/') + 1, fileName.lastIndexOf(".")); + + Map testNames = new LinkedHashMap<>(); + List testCases = new ArrayList<>(); + + String testName = null; + try (BufferedReader reader = new BufferedReader(new InputStreamReader(readFromJarUrl(source), StandardCharsets.UTF_8))) { + String line; + int lineNumber = 1; + while ((line = reader.readLine()) != null) { + line = line.trim(); + // ignore comments + if (!line.isEmpty() && !line.startsWith("//")) { + // parse test name + if (testName == null) { + if (testNames.keySet().contains(line)) { + throw new IllegalStateException("Duplicate test name '" + line + "' at line " + lineNumber + + " (previously seen at line " + testNames.get(line) + ")"); + } else { + testName = Strings.capitalize(line); + testNames.put(testName, Integer.valueOf(lineNumber)); + } + } else { + Object result = parser.parse(line); + // only if the parser is ready, add the object - otherwise keep on serving it lines + if (result != null) { + testCases.add(new Object[] { fileName, groupName, testName, Integer.valueOf(lineNumber), result }); + testName = null; + } + } + } + lineNumber++; + } + if (testName != null) { + throw new IllegalStateException("Read a test without a body at the end of [" + fileName + "]."); + } + } + assertNull("Cannot find spec for test " + testName, testName); + + return testCases; + } + + public interface Parser { + Object parse(String line); + } + + @SuppressForbidden(reason = "test reads from jar") + public static InputStream readFromJarUrl(URL source) throws IOException { + return source.openStream(); + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java new file mode 100644 index 0000000000000..f1bcef6f750fc --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; +import org.junit.ClassRule; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +/** + * Tests comparing sql queries executed against our jdbc client + * with those executed against H2's jdbc client. + */ +public abstract class SqlSpecTestCase extends SpecBaseIntegrationTestCase { + private String query; + + @ClassRule + public static LocalH2 H2 = new LocalH2((c) -> c.createStatement().execute("RUNSCRIPT FROM 'classpath:/setup_test_emp.sql'")); + + @ParametersFactory(argumentFormatting = PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + List tests = new ArrayList<>(); + tests.addAll(readScriptSpec("/select.sql-spec", parser)); + tests.addAll(readScriptSpec("/filter.sql-spec", parser)); + tests.addAll(readScriptSpec("/datetime.sql-spec", parser)); + tests.addAll(readScriptSpec("/math.sql-spec", parser)); + tests.addAll(readScriptSpec("/agg.sql-spec", parser)); + tests.addAll(readScriptSpec("/arithmetic.sql-spec", parser)); + return tests; + } + + private static class SqlSpecParser implements Parser { + @Override + public Object parse(String line) { + return line.endsWith(";") ? line.substring(0, line.length() - 1) : line; + } + } + + static SqlSpecParser specParser() { + return new SqlSpecParser(); + } + + public SqlSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, String query) { + super(fileName, groupName, testName, lineNumber); + this.query = query; + } + + @Override + protected final void doTest() throws Throwable { + try (Connection h2 = H2.get(); + Connection es = esJdbc()) { + + ResultSet expected, elasticResults; + expected = executeJdbcQuery(h2, query); + elasticResults = executeJdbcQuery(es, query); + + assertResults(expected, elasticResults); + } + } + + // TODO: use UTC for now until deciding on a strategy for handling date extraction + @Override + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); + return connectionProperties; + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/package-info.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/package-info.java new file mode 100644 index 0000000000000..1825d9033c83f --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/package-info.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Support for integration tests for the Elasticsearch SQL JDBC client + * and integration tests shared between multiple qa projects. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java new file mode 100644 index 0000000000000..8062d7af497de --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java @@ -0,0 +1,630 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.rest; + +import com.fasterxml.jackson.core.io.JsonStringEncoder; + +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.NotEqualMessageBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.qa.sql.ErrorsTestCase; +import org.hamcrest.Matcher; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.sql.JDBCType; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.TreeMap; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.Collections.unmodifiableMap; +import static org.hamcrest.Matchers.containsString; + +/** + * Integration test for the rest sql action. The one that speaks json directly to a + * user rather than to the JDBC driver or CLI. + */ +public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTestCase { + /** + * Builds that map that is returned in the header for each column. + */ + public static Map columnInfo(String mode, String name, String type, JDBCType jdbcType, int size) { + Map column = new HashMap<>(); + column.put("name", name); + column.put("type", type); + if ("jdbc".equals(mode)) { + column.put("jdbc_type", jdbcType.getVendorTypeNumber()); + column.put("display_size", size); + } + return unmodifiableMap(column); + } + + public void testBasicQuery() throws IOException { + index("{\"test\":\"test\"}", + "{\"test\":\"test\"}"); + + Map expected = new HashMap<>(); + String mode = randomMode(); + expected.put("columns", singletonList(columnInfo(mode, "test", "text", JDBCType.VARCHAR, 0))); + expected.put("rows", Arrays.asList(singletonList("test"), singletonList("test"))); + assertResponse(expected, runSql(mode, "SELECT * FROM test")); + } + + public void testNextPage() throws IOException { + String mode = randomMode(); + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < 20; i++) { + bulk.append("{\"index\":{\"_id\":\"" + i + "\"}}\n"); + bulk.append("{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"); + } + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + String request = "{\"query\":\"" + + " SELECT text, number, SQRT(number) AS s, SCORE()" + + " FROM test" + + " ORDER BY number, SCORE()\", " + + "\"mode\":\"" + mode + "\", " + + "\"fetch_size\":2}"; + + String cursor = null; + for (int i = 0; i < 20; i += 2) { + Map response; + if (i == 0) { + response = runSql(mode, new StringEntity(request, ContentType.APPLICATION_JSON)); + } else { + response = runSql(mode, new StringEntity("{\"cursor\":\"" + cursor + "\"}", + ContentType.APPLICATION_JSON)); + } + + Map expected = new HashMap<>(); + if (i == 0) { + expected.put("columns", Arrays.asList( + columnInfo(mode, "text", "text", JDBCType.VARCHAR, 0), + columnInfo(mode, "number", "long", JDBCType.BIGINT, 20), + columnInfo(mode, "s", "double", JDBCType.DOUBLE, 25), + columnInfo(mode, "SCORE()", "float", JDBCType.REAL, 15))); + } + expected.put("rows", Arrays.asList( + Arrays.asList("text" + i, i, Math.sqrt(i), 1.0), + Arrays.asList("text" + (i + 1), i + 1, Math.sqrt(i + 1), 1.0))); + cursor = (String) response.remove("cursor"); + assertResponse(expected, response); + assertNotNull(cursor); + } + Map expected = new HashMap<>(); + expected.put("rows", emptyList()); + assertResponse(expected, runSql(mode, new StringEntity("{ \"cursor\":\"" + cursor + "\"}", + ContentType.APPLICATION_JSON))); + } + + @AwaitsFix(bugUrl = "Unclear status, https://github.com/elastic/x-pack-elasticsearch/issues/2074") + public void testTimeZone() throws IOException { + String mode = randomMode(); + index("{\"test\":\"2017-07-27 00:00:00\"}", + "{\"test\":\"2017-07-27 01:00:00\"}"); + + Map expected = new HashMap<>(); + expected.put("columns", singletonMap("test", singletonMap("type", "text"))); + expected.put("rows", Arrays.asList(singletonMap("test", "test"), singletonMap("test", "test"))); + expected.put("size", 2); + + // Default TimeZone is UTC + assertResponse(expected, runSql(mode, new StringEntity("{\"query\":\"SELECT DAY_OF_YEAR(test), COUNT(*) FROM test\"}", + ContentType.APPLICATION_JSON))); + } + + public void testScoreWithFieldNamedScore() throws IOException { + String mode = randomMode(); + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"name\":\"test\", \"score\":10}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + Map expected = new HashMap<>(); + expected.put("columns", Arrays.asList( + columnInfo(mode, "name", "text", JDBCType.VARCHAR, 0), + columnInfo(mode, "score", "long", JDBCType.BIGINT, 20), + columnInfo(mode, "SCORE()", "float", JDBCType.REAL, 15))); + expected.put("rows", singletonList(Arrays.asList( + "test", 10, 1.0))); + + assertResponse(expected, runSql(mode, "SELECT *, SCORE() FROM test ORDER BY SCORE()")); + assertResponse(expected, runSql(mode, "SELECT name, \\\"score\\\", SCORE() FROM test ORDER BY SCORE()")); + } + + public void testSelectWithJoinFails() throws Exception { + // Normal join not supported + expectBadRequest(() -> runSql(randomMode(), "SELECT * FROM test JOIN other"), + containsString("line 1:21: Queries with JOIN are not yet supported")); + // Neither is a self join + expectBadRequest(() -> runSql(randomMode(), "SELECT * FROM test JOIN test"), + containsString("line 1:21: Queries with JOIN are not yet supported")); + // Nor fancy stuff like CTEs + expectBadRequest(() -> runSql(randomMode(), + " WITH evil" + + " AS (SELECT *" + + " FROM foo)" + + "SELECT *" + + " FROM test" + + " JOIN evil"), + containsString("line 1:67: Queries with JOIN are not yet supported")); + } + + public void testSelectDistinctFails() throws Exception { + index("{\"name\":\"test\"}"); + expectBadRequest(() -> runSql(randomMode(), "SELECT DISTINCT name FROM test"), + containsString("line 1:8: SELECT DISTINCT is not yet supported")); + } + + public void testSelectGroupByAllFails() throws Exception { + index("{\"foo\":1}", "{\"foo\":2}"); + expectBadRequest(() -> runSql(randomMode(), "SELECT foo FROM test GROUP BY ALL foo"), + containsString("line 1:32: GROUP BY ALL is not supported")); + } + + public void testSelectWhereExistsFails() throws Exception { + index("{\"foo\":1}", "{\"foo\":2}"); + expectBadRequest(() -> runSql(randomMode(), "SELECT foo FROM test WHERE EXISTS (SELECT * FROM test t WHERE t.foo = test.foo)"), + containsString("line 1:28: EXISTS is not yet supported")); + } + + + @Override + public void testSelectInvalidSql() { + String mode = randomFrom("jdbc", "plain"); + expectBadRequest(() -> runSql(mode, "SELECT * FRO"), containsString("1:8: Cannot determine columns for *")); + } + + @Override + public void testSelectFromMissingIndex() { + String mode = randomFrom("jdbc", "plain"); + expectBadRequest(() -> runSql(mode, "SELECT * FROM missing"), containsString("1:15: Unknown index [missing]")); + } + + @Override + public void testSelectFromIndexWithoutTypes() throws Exception { + // Create an index without any types + client().performRequest("PUT", "/test", emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON)); + String mode = randomFrom("jdbc", "plain"); + expectBadRequest(() -> runSql(mode, "SELECT * FROM test"), + containsString("1:15: [test] doesn't have any types so it is incompatible with sql")); + } + + @Override + public void testSelectMissingField() throws IOException { + index("{\"test\":\"test\"}"); + String mode = randomFrom("jdbc", "plain"); + expectBadRequest(() -> runSql(mode, "SELECT foo FROM test"), containsString("1:8: Unknown column [foo]")); + } + + @Override + public void testSelectMissingFunction() throws Exception { + index("{\"foo\":1}"); + expectBadRequest(() -> runSql(randomMode(), "SELECT missing(foo) FROM test"), + containsString("1:8: Unknown function [missing]")); + } + + private void index(String... docs) throws IOException { + StringBuilder bulk = new StringBuilder(); + for (String doc : docs) { + bulk.append("{\"index\":{}\n"); + bulk.append(doc + "\n"); + } + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + } + + @Override + public void testSelectProjectScoreInAggContext() throws Exception { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"foo\":1}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + expectBadRequest(() -> runSql(randomMode(), + " SELECT foo, SCORE(), COUNT(*)" + + " FROM test" + + " GROUP BY foo"), + containsString("Cannot use non-grouped column [SCORE()], expected [foo]")); + } + + @Override + public void testSelectOrderByScoreInAggContext() throws Exception { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"foo\":1}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + expectBadRequest(() -> runSql(randomMode(), + " SELECT foo, COUNT(*)" + + " FROM test" + + " GROUP BY foo" + + " ORDER BY SCORE()"), + containsString("Cannot order by non-grouped column [SCORE()], expected [foo]")); + } + + @Override + public void testSelectGroupByScore() throws Exception { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"foo\":1}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + expectBadRequest(() -> runSql(randomMode(), "SELECT COUNT(*) FROM test GROUP BY SCORE()"), + containsString("Cannot use [SCORE()] for grouping")); + } + + @Override + public void testSelectScoreSubField() throws Exception { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"foo\":1}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + expectBadRequest(() -> runSql(randomMode(), "SELECT SCORE().bar FROM test"), + containsString("line 1:15: extraneous input '.' expecting {, ','")); + } + + @Override + public void testSelectScoreInScalar() throws Exception { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"foo\":1}\n"); + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + expectBadRequest(() -> runSql(randomMode(), "SELECT SIN(SCORE()) FROM test"), + containsString("line 1:12: [SCORE()] cannot be an argument to a function")); + } + + private void expectBadRequest(CheckedSupplier, Exception> code, Matcher errorMessageMatcher) { + try { + Map result = code.get(); + fail("expected ResponseException but got " + result); + } catch (ResponseException e) { + if (400 != e.getResponse().getStatusLine().getStatusCode()) { + String body; + try { + body = Streams.copyToString(new InputStreamReader( + e.getResponse().getEntity().getContent(), StandardCharsets.UTF_8)); + } catch (IOException bre) { + throw new RuntimeException("error reading body after remote sent bad status", bre); + } + fail("expected [400] response but get [" + e.getResponse().getStatusLine().getStatusCode() + "] with body:\n" + body); + } + assertThat(e.getMessage(), errorMessageMatcher); + } catch (Exception e) { + throw new AssertionError("expected ResponseException but got [" + e.getClass() + "]", e); + } + } + + private Map runSql(String mode, String sql) throws IOException { + return runSql(mode, sql, ""); + } + + private Map runSql(String mode, String sql, String suffix) throws IOException { + return runSql(mode, new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON), suffix); + } + + private Map runSql(String mode, HttpEntity sql) throws IOException { + return runSql(mode, sql, ""); + } + + private Map runSql(String mode, HttpEntity sql, String suffix) throws IOException { + Map params = new TreeMap<>(); + params.put("error_trace", "true"); // Helps with debugging in case something crazy happens on the server. + params.put("pretty", "true"); // Improves error reporting readability + if (randomBoolean()) { + // We default to JSON but we force it randomly for extra coverage + params.put("format", "json"); + } + if (Strings.hasText(mode)) { + params.put("mode", mode); // JDBC or PLAIN mode + } + Header[] headers = randomFrom( + new Header[] {}, + new Header[] {new BasicHeader("Accept", "*/*")}, + new Header[] {new BasicHeader("Accpet", "application/json")}); + Response response = client().performRequest("POST", "/_xpack/sql" + suffix, params, sql); + try (InputStream content = response.getEntity().getContent()) { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + } + } + + public void testBasicTranslateQuery() throws IOException { + StringBuilder bulk = new StringBuilder(); + bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); + bulk.append("{\"test\":\"test\"}\n"); + bulk.append("{\"index\":{\"_id\":\"2\"}}\n"); + bulk.append("{\"test\":\"test\"}\n"); + client().performRequest("POST", "/test_translate/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + Map response = runSql(randomMode(), "SELECT * FROM test_translate", "/translate/"); + assertEquals(response.get("size"), 1000); + @SuppressWarnings("unchecked") + Map source = (Map) response.get("_source"); + assertNotNull(source); + assertEquals(emptyList(), source.get("excludes")); + assertEquals(singletonList("test"), source.get("includes")); + } + + public void testBasicQueryWithFilter() throws IOException { + String mode = randomMode(); + index("{\"test\":\"foo\"}", + "{\"test\":\"bar\"}"); + + Map expected = new HashMap<>(); + expected.put("columns", singletonList(columnInfo(mode, "test", "text", JDBCType.VARCHAR, 0))); + expected.put("rows", singletonList(singletonList("foo"))); + assertResponse(expected, runSql(mode, new StringEntity("{\"query\":\"SELECT * FROM test\", " + + "\"filter\":{\"match\": {\"test\": \"foo\"}}}", + ContentType.APPLICATION_JSON))); + } + + public void testBasicQueryWithParameters() throws IOException { + String mode = randomMode(); + index("{\"test\":\"foo\"}", + "{\"test\":\"bar\"}"); + + Map expected = new HashMap<>(); + expected.put("columns", Arrays.asList( + columnInfo(mode, "test", "text", JDBCType.VARCHAR, 0), + columnInfo(mode, "param", "integer", JDBCType.INTEGER, 11) + )); + expected.put("rows", singletonList(Arrays.asList("foo", 10))); + assertResponse(expected, runSql(mode, new StringEntity("{\"query\":\"SELECT test, ? param FROM test WHERE test = ?\", " + + "\"params\":[{\"type\": \"integer\", \"value\": 10}, {\"type\": \"keyword\", \"value\": \"foo\"}]}", + ContentType.APPLICATION_JSON))); + } + + public void testBasicTranslateQueryWithFilter() throws IOException { + index("{\"test\":\"foo\"}", + "{\"test\":\"bar\"}"); + + Map response = runSql("", + new StringEntity("{\"query\":\"SELECT * FROM test\", \"filter\":{\"match\": {\"test\": \"foo\"}}}", + ContentType.APPLICATION_JSON), "/translate/" + ); + + assertEquals(response.get("size"), 1000); + @SuppressWarnings("unchecked") + Map source = (Map) response.get("_source"); + assertNotNull(source); + assertEquals(emptyList(), source.get("excludes")); + assertEquals(singletonList("test"), source.get("includes")); + + @SuppressWarnings("unchecked") + Map query = (Map) response.get("query"); + assertNotNull(query); + + @SuppressWarnings("unchecked") + Map constantScore = (Map) query.get("constant_score"); + assertNotNull(constantScore); + + @SuppressWarnings("unchecked") + Map filter = (Map) constantScore.get("filter"); + assertNotNull(filter); + + @SuppressWarnings("unchecked") + Map match = (Map) filter.get("match"); + assertNotNull(match); + + @SuppressWarnings("unchecked") + Map matchQuery = (Map) match.get("test"); + assertNotNull(matchQuery); + assertEquals("foo", matchQuery.get("query")); + } + + public void testBasicQueryText() throws IOException { + index("{\"test\":\"test\"}", + "{\"test\":\"test\"}"); + + String expected = + " test \n" + + "---------------\n" + + "test \n" + + "test \n"; + Tuple response = runSqlAsText("SELECT * FROM test", "text/plain"); + assertEquals(expected, response.v1()); + } + + public void testNextPageText() throws IOException { + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < 20; i++) { + bulk.append("{\"index\":{\"_id\":\"" + i + "\"}}\n"); + bulk.append("{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"); + } + client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), + new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + + String request = "{\"query\":\"SELECT text, number, number + 5 AS sum FROM test ORDER BY number\", \"fetch_size\":2}"; + + String cursor = null; + for (int i = 0; i < 20; i += 2) { + Tuple response; + if (i == 0) { + response = runSqlAsText("", new StringEntity(request, ContentType.APPLICATION_JSON), "text/plain"); + } else { + response = runSqlAsText("", new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), + "text/plain"); + } + + StringBuilder expected = new StringBuilder(); + if (i == 0) { + expected.append(" text | number | sum \n"); + expected.append("---------------+---------------+---------------\n"); + } + expected.append(String.format(Locale.ROOT, "%-15s|%-15d|%-15d\n", "text" + i, i, i + 5)); + expected.append(String.format(Locale.ROOT, "%-15s|%-15d|%-15d\n", "text" + (i + 1), i + 1, i + 6)); + cursor = response.v2(); + assertEquals(expected.toString(), response.v1()); + assertNotNull(cursor); + } + Map expected = new HashMap<>(); + expected.put("rows", emptyList()); + assertResponse(expected, runSql("", new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON))); + + Map response = runSql("", new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON), + "/close"); + assertEquals(true, response.get("succeeded")); + + assertEquals(0, getNumberOfSearchContexts("test")); + } + + // CSV/TSV tests + + private static String toJson(String value) { + return "\"" + new String(JsonStringEncoder.getInstance().quoteAsString(value)) + "\""; + } + + public void testDefaultQueryInCSV() throws IOException { + index("{\"name\":" + toJson("first") + ", \"number\" : 1 }", + "{\"name\":" + toJson("second\t") + ", \"number\": 2 }", + "{\"name\":" + toJson("\"third,\"") + ", \"number\": 3 }"); + + String expected = + "name,number\r\n" + + "first,1\r\n" + + "second\t,2\r\n" + + "\"\"\"third,\"\"\",3\r\n"; + + String query = "SELECT * FROM test ORDER BY number"; + Tuple response = runSqlAsText(query, "text/csv"); + assertEquals(expected, response.v1()); + + response = runSqlAsTextFormat(query, "csv"); + assertEquals(expected, response.v1()); + } + + public void testQueryWithoutHeaderInCSV() throws IOException { + index("{\"name\":" + toJson("first") + ", \"number\" : 1 }", + "{\"name\":" + toJson("second\t") + ", \"number\": 2 }", + "{\"name\":" + toJson("\"third,\"") + ", \"number\": 3 }"); + + String expected = + "first,1\r\n" + + "second\t,2\r\n" + + "\"\"\"third,\"\"\",3\r\n"; + + String query = "SELECT * FROM test ORDER BY number"; + Tuple response = runSqlAsText(query, "text/csv; header=absent"); + assertEquals(expected, response.v1()); + } + + public void testQueryInTSV() throws IOException { + index("{\"name\":" + toJson("first") + ", \"number\" : 1 }", + "{\"name\":" + toJson("second\t") + ", \"number\": 2 }", + "{\"name\":" + toJson("\"third,\"") + ", \"number\": 3 }"); + + String expected = + "name\tnumber\n" + + "first\t1\n" + + "second\\t\t2\n" + + "\"third,\"\t3\n"; + + String query = "SELECT * FROM test ORDER BY number"; + Tuple response = runSqlAsText(query, "text/tab-separated-values"); + assertEquals(expected, response.v1()); + response = runSqlAsTextFormat(query, "tsv"); + assertEquals(expected, response.v1()); + } + + private Tuple runSqlAsText(String sql, String accept) throws IOException { + return runSqlAsText("", new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON), accept); + } + + private Tuple runSqlAsText(String suffix, HttpEntity entity, String accept) throws IOException { + Response response = client().performRequest("POST", "/_xpack/sql" + suffix, singletonMap("error_trace", "true"), + entity, new BasicHeader("Accept", accept)); + return new Tuple<>( + Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)), + response.getHeader("Cursor") + ); + } + + private Tuple runSqlAsTextFormat(String sql, String format) throws IOException { + StringEntity entity = new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON); + + Map params = new HashMap<>(); + params.put("error_trace", "true"); + params.put("format", format); + + Response response = client().performRequest("POST", "/_xpack/sql", params, entity); + return new Tuple<>( + Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)), + response.getHeader("Cursor") + ); + } + + private void assertResponse(Map expected, Map actual) { + if (false == expected.equals(actual)) { + NotEqualMessageBuilder message = new NotEqualMessageBuilder(); + message.compareMaps(actual, expected); + fail("Response does not match:\n" + message.toString()); + } + } + + public static int getNumberOfSearchContexts(String index) throws IOException { + Response response = client().performRequest("GET", "/_stats/search"); + Map stats; + try (InputStream content = response.getEntity().getContent()) { + stats = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + } + return getOpenContexts(stats, index); + } + + public static void assertNoSearchContexts() throws IOException { + Response response = client().performRequest("GET", "/_stats/search"); + Map stats; + try (InputStream content = response.getEntity().getContent()) { + stats = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + } + @SuppressWarnings("unchecked") + Map indexStats = (Map) stats.get("indices"); + for (String index : indexStats.keySet()) { + if (index.startsWith(".") == false) { // We are not interested in internal indices + assertEquals(index + " should have no search contexts", 0, getOpenContexts(stats, index)); + } + } + } + + @SuppressWarnings("unchecked") + public static int getOpenContexts(Map indexStats, String index) { + return (int) ((Map) ((Map) ((Map) ((Map) + indexStats.get("indices")).get(index)).get("total")).get("search")).get("open_contexts"); + } + + public static String randomMode() { + return randomFrom("", "jdbc", "plain"); + } +} diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/package-info.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/package-info.java new file mode 100644 index 0000000000000..1a061730c60bf --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Integration tests shared between multiple qa projects. + */ +package org.elasticsearch.xpack.qa.sql.rest; diff --git a/x-pack/qa/sql/src/main/resources/agg.csv-spec b/x-pack/qa/sql/src/main/resources/agg.csv-spec new file mode 100644 index 0000000000000..0d7b0e1476020 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/agg.csv-spec @@ -0,0 +1,93 @@ +// +// Aggs not supported by H2 / traditional SQL stores +// + +singlePercentileWithoutComma +SELECT gender, PERCENTILE(emp_no, 97) p1 FROM test_emp GROUP BY gender; + +gender:s | p1:d +F | 10099.1936 +M | 10095.6112 +; + +singlePercentileWithComma +SELECT gender, PERCENTILE(emp_no, 97.76) p1 FROM test_emp GROUP BY gender; + +gender:s | p1:d +F | 10099.1936 +M | 10095.6112 +; + +multiplePercentilesOneWithCommaOneWithout +SELECT gender, PERCENTILE(emp_no, 92.45) p1, PERCENTILE(emp_no, 91) p2 FROM test_emp GROUP BY gender; + +gender:s | p1:d | p2:d +F | 10096.826000000001 | 10094.68 +M | 10090.319 | 10089.320000000002 +; + +multiplePercentilesWithoutComma +SELECT gender, PERCENTILE(emp_no, 91) p1, PERCENTILE(emp_no, 89) p2 FROM test_emp GROUP BY gender; + +gender:s | p1:d | p2:d +F | 10094.68 | 10092.08 +M | 10089.320000000002 | 10085.18 +; + +multiplePercentilesWithComma +SELECT gender, PERCENTILE(emp_no, 85.7) p1, PERCENTILE(emp_no, 94.3) p2 FROM test_emp GROUP BY gender; + +gender:s | p1:d | p2:d +F | 10088.852 | 10097.792 +M | 10083.134 | 10091.932 +; + +percentileRank +SELECT gender, PERCENTILE_RANK(emp_no, 10025) rank FROM test_emp GROUP BY gender; + +gender:s | rank:d +F | 26.351351351351347 +M | 23.41269841269841 +; + +multiplePercentileRanks +SELECT gender, PERCENTILE_RANK(emp_no, 10030.0) rank1, PERCENTILE_RANK(emp_no, 10025) rank2 FROM test_emp GROUP BY gender; + +gender:s | rank1:d | rank2:d +F | 29.93762993762994 | 26.351351351351347 +M | 29.365079365079367 | 23.41269841269841 +; + +multiplePercentilesAndPercentileRank +SELECT gender, PERCENTILE(emp_no, 97.76) p1, PERCENTILE(emp_no, 93.3) p2, PERCENTILE_RANK(emp_no, 10025) rank FROM test_emp GROUP BY gender; + +gender:s | p1:d | p2:d | rank:d +F | 10099.1936 | 10098.021 | 26.351351351351347 +M | 10095.6112 | 10090.846 | 23.41269841269841 +; + +// Simple sum used in documentation +sum +// tag::sum +SELECT SUM(salary) FROM test_emp; +// end::sum + SUM(salary) +--------------- +4824855 +; + +kurtosisAndSkewnessNoGroup +SELECT KURTOSIS(emp_no) k, SKEWNESS(salary) s FROM test_emp; + +k:d | s:d +1.7997599759975997 | 0.2707722118423227 +; + +kurtosisAndSkewnessGroup +SELECT gender, KURTOSIS(salary) k, SKEWNESS(salary) s FROM test_emp GROUP BY gender; + +gender:s | k:d | s:d + +F | 1.8427808415250482 | 0.04517149340491813 +M | 2.259327644285826 | 0.40268950715550333 +; \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/agg.sql-spec b/x-pack/qa/sql/src/main/resources/agg.sql-spec new file mode 100644 index 0000000000000..f778458dfe2bf --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/agg.sql-spec @@ -0,0 +1,283 @@ +// +// Group-By +// + +// +// H2 seems to return data in DESCending order while composite returns it in ASCending +// hence while all queries have the ORDER specified +// + +groupByOnText +SELECT gender g FROM "test_emp" GROUP BY gender ORDER BY gender ASC; +groupByOnTextWithWhereClause +SELECT gender g FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender ORDER BY gender; +groupByOnTextWithWhereAndLimit +SELECT gender g FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender ORDER BY gender LIMIT 1; +groupByOnTextOnAlias +SELECT gender g FROM "test_emp" WHERE emp_no < 10020 GROUP BY g ORDER BY gender; +groupByOnTextOnAliasOrderDesc +SELECT gender g FROM "test_emp" WHERE emp_no < 10020 GROUP BY g ORDER BY g DESC; + +groupByOnDate +SELECT birth_date b FROM "test_emp" GROUP BY birth_date ORDER BY birth_date DESC; +groupByOnDateWithWhereClause +SELECT birth_date b FROM "test_emp" WHERE emp_no < 10020 GROUP BY birth_date ORDER BY birth_date DESC; +groupByOnDateWithWhereAndLimit +SELECT birth_date b FROM "test_emp" WHERE emp_no < 10020 GROUP BY birth_date ORDER BY birth_date DESC LIMIT 1; +groupByOnDateOnAlias +SELECT birth_date b FROM "test_emp" WHERE emp_no < 10020 GROUP BY b ORDER BY birth_date DESC; + +groupByOnNumber +SELECT emp_no e FROM "test_emp" GROUP BY emp_no ORDER BY emp_no ASC; +groupByOnNumberWithWhereClause +SELECT emp_no e FROM "test_emp" WHERE emp_no < 10020 GROUP BY emp_no ORDER BY emp_no DESC; +groupByOnNumberWithWhereAndLimit +SELECT emp_no e FROM "test_emp" WHERE emp_no < 10020 GROUP BY emp_no ORDER BY emp_no DESC LIMIT 1; +groupByOnNumberOnAlias +SELECT emp_no e FROM "test_emp" WHERE emp_no < 10020 GROUP BY e ORDER BY emp_no DESC; + +// group by scalar +groupByAddScalar +SELECT emp_no + 1 AS e FROM test_emp GROUP BY e ORDER BY e; +groupByMinScalarDesc +SELECT emp_no - 1 AS e FROM test_emp GROUP BY e ORDER BY e DESC; +groupByAddScalarDesc +SELECT emp_no % 2 AS e FROM test_emp GROUP BY e ORDER BY e DESC; +groupByMulScalar +SELECT emp_no * 2 AS e FROM test_emp GROUP BY e ORDER BY e; +groupByModScalar +SELECT (emp_no % 3) + 1 AS e FROM test_emp GROUP BY e ORDER BY e; + +// +// Aggregate Functions +// + +// COUNT +aggCountImplicit +// tag::countStar +SELECT COUNT(*) AS count FROM test_emp; +// end::countStar +aggCountImplicitWithCast +SELECT CAST(COUNT(*) AS INT) c FROM "test_emp"; +aggCountImplicitWithConstant +SELECT COUNT(1) FROM "test_emp"; +aggCountImplicitWithConstantAndFilter +SELECT COUNT(1) FROM "test_emp" WHERE emp_no < 10010; +aggCountAliasAndWhereClause +SELECT gender g, COUNT(*) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender ORDER BY gender; +aggCountAliasAndWhereClauseAndLimit +SELECT gender g, COUNT(*) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender ORDER BY gender LIMIT 1; +aggCountAliasWithCastAndFilter +SELECT gender g, CAST(COUNT(*) AS INT) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender ORDER BY gender; +aggCountWithAlias +SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g ORDER BY gender; +countDistinct +// tag::countDistinct +SELECT COUNT(DISTINCT hire_date) AS count FROM test_emp; +// end::countDistinct + + +// Conditional COUNT +aggCountAndHaving +SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g HAVING COUNT(*) > 10 ORDER BY gender; +aggCountOnColumnAndHaving +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING COUNT(gender) > 10 ORDER BY gender; +// NOT supported yet since Having introduces a new agg +aggCountOnColumnAndWildcardAndHaving +SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g HAVING COUNT(gender) > 10 ORDER BY gender; +aggCountAndHavingOnAlias +SELECT gender g, COUNT(*) c FROM "test_emp" GROUP BY g HAVING c > 10 ORDER BY gender; +aggCountOnColumnAndHavingOnAlias +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 ORDER BY gender; +aggCountOnColumnAndMultipleHaving +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND c < 70 ORDER BY gender ; +aggCountOnColumnAndMultipleHavingWithLimit +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND c < 70 ORDER BY gender LIMIT 1; +aggCountOnColumnAndHavingBetween +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c BETWEEN 10 AND 70 ORDER BY gender; +aggCountOnColumnAndHavingBetweenWithLimit +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c BETWEEN 10 AND 70 ORDER BY gender LIMIT 1; + +aggCountOnColumnAndHavingOnAliasAndFunction +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND COUNT(gender) < 70 ORDER BY gender; +// NOT supported yet since Having introduces a new agg +aggCountOnColumnAndHavingOnAliasAndFunctionWildcard -> COUNT(*/1) vs COUNT(gender) +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND COUNT(*) < 70 ORDER BY gender; +aggCountOnColumnAndHavingOnAliasAndFunctionConstant +SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND COUNT(1) < 70 ORDER BY gender; + + +// MIN +aggMinImplicit +// tag::min +SELECT MIN(emp_no) AS min FROM test_emp; +// end::min +aggMinImplicitWithCast +SELECT CAST(MIN(emp_no) AS SMALLINT) m FROM "test_emp"; +aggMin +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY gender ORDER BY gender; +aggMinWithCast +SELECT CAST(MIN(emp_no) AS SMALLINT) m FROM "test_emp" GROUP BY gender ORDER BY gender; +aggMinAndCount +SELECT MIN(emp_no) m, COUNT(1) c FROM "test_emp" GROUP BY gender ORDER BY gender; +aggMinAndCountWithFilter +SELECT MIN(emp_no) m, COUNT(1) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender ORDER BY gender; +aggMinAndCountWithFilterAndLimit +SELECT MIN(emp_no) m, COUNT(1) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender ORDER BY gender LIMIT 1; +aggMinWithCastAndFilter +SELECT gender g, CAST(MIN(emp_no) AS SMALLINT) m, COUNT(1) c FROM "test_emp" WHERE emp_no < 10020 GROUP BY gender ORDER BY gender; +aggMinWithAlias +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g ORDER BY gender; + +// Conditional MIN +aggMinWithHaving +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING MIN(emp_no) > 10 ORDER BY gender; +aggMinWithHavingOnAlias +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 ORDER BY gender; +aggMinWithMultipleHaving +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND m < 99999 ORDER BY gender; +aggMinWithMultipleHavingBetween +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING m BETWEEN 10 AND 99999 ORDER BY gender; +aggMinWithMultipleHavingWithLimit +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND m < 99999 ORDER BY g LIMIT 1; +aggMinWithMultipleHavingBetween +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING m BETWEEN 10 AND 99999 ORDER BY g LIMIT 1; +aggMinWithMultipleHavingOnAliasAndFunction +SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND MIN(emp_no) < 99999 ORDER BY gender; + +// MAX +aggMaxImplicit +// tag::max +SELECT MAX(salary) AS max FROM test_emp; +// end::max +aggMaxImplicitWithCast +SELECT CAST(MAX(emp_no) AS SMALLINT) c FROM "test_emp"; +aggMax +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY gender ORDER BY gender; +aggMaxWithCast +SELECT gender g, CAST(MAX(emp_no) AS SMALLINT) m FROM "test_emp" GROUP BY gender ORDER BY gender; +aggMaxAndCount +SELECT MAX(emp_no) m, COUNT(1) c FROM "test_emp" GROUP BY gender ORDER BY gender; +aggMaxAndCountWithFilter +SELECT gender g, MAX(emp_no) m, COUNT(1) c FROM "test_emp" WHERE emp_no > 10000 GROUP BY gender ORDER BY gender ; +aggMaxAndCountWithFilterAndLimit +SELECT gender g, MAX(emp_no) m, COUNT(1) c FROM "test_emp" WHERE emp_no > 10000 GROUP BY gender ORDER BY gender LIMIT 1; +aggMaxWithAlias +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g ORDER BY gender; + +// Conditional MAX +aggMaxWithHaving +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING MAX(emp_no) > 10 ORDER BY g ; +aggMaxWithHavingOnAlias +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 ORDER BY g ; +aggMaxWithMultipleHaving +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND m < 99999 ORDER BY gender; +aggMaxWithMultipleHavingBetween +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m BETWEEN 10 AND 99999 ORDER BY g ; +aggMaxWithMultipleHavingWithLimit +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND m < 99999 ORDER BY g LIMIT 1; +aggMaxWithMultipleHavingBetweenWithLimit +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m BETWEEN 10 AND 99999 ORDER BY g LIMIT 1; +aggMaxWithMultipleHavingOnAliasAndFunction +SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g HAVING m > 10 AND MAX(emp_no) < 99999 ORDER BY gender; + +// SUM +aggSumImplicitWithCast +SELECT CAST(SUM(emp_no) AS BIGINT) s FROM "test_emp"; +aggSumWithCast +SELECT gender g, CAST(SUM(emp_no) AS BIGINT) s FROM "test_emp" GROUP BY gender ORDER BY gender; +aggSumWithCastAndCount +SELECT gender g, CAST(SUM(emp_no) AS BIGINT) s, COUNT(1) c FROM "test_emp" GROUP BY g ORDER BY gender; +aggSumWithCastAndCountWithFilter +SELECT gender g, CAST(SUM(emp_no) AS BIGINT) s, COUNT(1) c FROM "test_emp" WHERE emp_no > 10000 GROUP BY g ORDER BY gender; +aggSumWithCastAndCountWithFilterAndLimit +SELECT gender g, CAST(SUM(emp_no) AS BIGINT) s, COUNT(1) c FROM "test_emp" WHERE emp_no > 10000 GROUP BY g ORDER BY g LIMIT 1; +aggSumWithAlias +SELECT gender g, CAST(SUM(emp_no) AS BIGINT) s FROM "test_emp" GROUP BY g ORDER BY gender; + +// Conditional SUM +aggSumWithHaving +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING SUM(emp_no) > 10 ORDER BY gender; +aggSumWithHavingOnAlias +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s > 10 ORDER BY gender; +aggSumWithMultipleHaving +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s > 10 AND s < 10000000 ORDER BY gender; +aggSumWithMultipleHavingBetween +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s BETWEEN 10 AND 10000000 ORDER BY gender; +aggSumWithMultipleHavingWithLimit +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s > 10 AND s < 10000000 ORDER BY g LIMIT 1; +aggSumWithMultipleHavingBetweenWithLimit +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s BETWEEN 10 AND 10000000 ORDER BY g LIMIT 1; +aggSumWithMultipleHavingOnAliasAndFunction +SELECT gender g, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY g HAVING s > 10 AND SUM(emp_no) > 10000000 ORDER BY gender; + +// AVG +aggAvgImplicitWithCast +SELECT CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp"; +aggAvgWithCastToFloat +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY gender ORDER BY gender; +// casting to an exact type - varchar, bigint, etc... will likely fail due to rounding error +aggAvgWithCastToDouble +SELECT gender g, CAST(AVG(emp_no) AS DOUBLE) a FROM "test_emp" GROUP BY gender ORDER BY gender; +aggAvg +// tag::avg +SELECT AVG(salary) AS avg FROM test_emp; +// end::avg +aggAvgWithCastAndCount +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a, COUNT(1) c FROM "test_emp" GROUP BY gender ORDER BY gender; +aggAvgWithCastAndCountWithFilter +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a, COUNT(1) c FROM "test_emp" WHERE emp_no > 10000 GROUP BY gender ORDER BY gender; +aggAvgWithCastAndCountWithFilterAndLimit +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a, COUNT(1) c FROM "test_emp" WHERE emp_no > 10000 GROUP BY gender ORDER BY gender LIMIT 1; +aggAvgWithAlias +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g ORDER BY gender; + +// Conditional AVG +aggAvgWithHaving +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING AVG(emp_no) > 10 ORDER BY g ; +aggAvgWithHavingOnAlias +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a > 10 ORDER BY gender; +aggAvgWithMultipleHaving +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a > 10 AND a < 10000000 ORDER BY g ; +aggAvgWithMultipleHavingBetween +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a BETWEEN 10 AND 10000000 ORDER BY g ; +aggAvgWithMultipleHavingWithLimit +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a > 10 AND a < 10000000 ORDER BY g LIMIT 1; +aggAvgWithMultipleHavingBetweenWithLimit +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a BETWEEN 10 AND 10000000 ORDER BY g LIMIT 1; +aggAvgWithMultipleHavingOnAliasAndFunction +SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a > 10 AND AVG(emp_no) > 10000000 ORDER BY g ; + +// +// GroupBy on Scalar plus Having +// +aggGroupByOnScalarWithHaving +SELECT emp_no + 1 AS e FROM test_emp GROUP BY e HAVING AVG(salary) BETWEEN 1 AND 10010 ORDER BY e; + +// +// Mixture of Aggs that triggers promotion of aggs to stats +// +aggMultiIncludingScalarFunction +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages ORDER BY languages; +aggHavingWithAggNotInGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages HAVING AVG(salary) > 30000 ORDER BY languages; +aggHavingWithAliasOnScalarFromGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages HAVING d BETWEEN 50 AND 10000 AND AVG(salary) > 30000 ORDER BY languages; +aggHavingWithScalarFunctionBasedOnAliasFromGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages HAVING ma % mi > 1 AND AVG(salary) > 30000 ORDER BY languages; +aggHavingWithMultipleScalarFunctionsBasedOnAliasFromGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages HAVING d - ma % mi > 0 AND AVG(salary) > 30000 ORDER BY languages; +aggHavingWithMultipleScalarFunctionsBasedOnAliasFromGroupByAndAggNotInGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages HAVING ROUND(d - ABS(ma % mi)) + AVG(salary) > 0 AND AVG(salary) > 30000 ORDER BY languages; +aggHavingScalarOnAggFunctionsWithoutAliasesInAndNotInGroupBy +SELECT MIN(salary) mi, MAX(salary) ma, MAX(salary) - MIN(salary) AS d FROM test_emp GROUP BY languages HAVING MAX(salary) % MIN(salary) + AVG(salary) > 3000 ORDER BY languages; + +// +// Mixture of aggs that get promoted plus filtering on one of them +// +aggMultiWithHaving +SELECT MIN(salary) min, MAX(salary) max, gender g, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g HAVING max > 74600 ORDER BY gender; + +// filter on count (which is a special agg) +aggMultiWithHavingOnCount +SELECT MIN(salary) min, MAX(salary) max, gender g, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g HAVING c > 40 ORDER BY gender; \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/alias.csv-spec b/x-pack/qa/sql/src/main/resources/alias.csv-spec new file mode 100644 index 0000000000000..839d2cba79451 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/alias.csv-spec @@ -0,0 +1,107 @@ +tableAlias +SELECT emp_no, first_name FROM test_alias ORDER BY emp_no LIMIT 6; + +emp_no:i | first_name:s + +10001 | Georgi +10001 | Georgi +10002 | Bezalel +10002 | Bezalel +10003 | Parto +10003 | Parto +; + +tablePattern +SELECT emp_no, first_name FROM test_alias ORDER BY emp_no LIMIT 6; +emp_no:i | first_name:s + +10001 | Georgi +10001 | Georgi +10002 | Bezalel +10002 | Bezalel +10003 | Parto +10003 | Parto +; + +describeAlias +DESCRIBE test_alias; + +column:s | type:s + +birth_date | TIMESTAMP +dep | STRUCT +dep.dep_id | VARCHAR +dep.dep_name | VARCHAR +dep.dep_name.keyword | VARCHAR +dep.from_date | TIMESTAMP +dep.to_date | TIMESTAMP +emp_no | INTEGER +first_name | VARCHAR +first_name.keyword | VARCHAR +gender | VARCHAR +hire_date | TIMESTAMP +languages | TINYINT +last_name | VARCHAR +last_name.keyword | VARCHAR +salary | INTEGER +; + +describePattern +DESCRIBE test_*; + +column:s | type:s + +birth_date | TIMESTAMP +dep | STRUCT +dep.dep_id | VARCHAR +dep.dep_name | VARCHAR +dep.dep_name.keyword | VARCHAR +dep.from_date | TIMESTAMP +dep.to_date | TIMESTAMP +emp_no | INTEGER +first_name | VARCHAR +first_name.keyword | VARCHAR +gender | VARCHAR +hire_date | TIMESTAMP +languages | TINYINT +last_name | VARCHAR +last_name.keyword | VARCHAR +salary | INTEGER +; + +showAlias +SHOW TABLES LIKE 'test\_alias' ESCAPE '\'; + +name:s | type:s + +test_alias | ALIAS +; + +showPattern +SHOW TABLES LIKE 'test_%'; + +name:s | type:s + +test_alias | ALIAS +test_alias_emp | ALIAS +test_emp | BASE TABLE +test_emp_copy | BASE TABLE +; + +testGroupByOnAlias +SELECT gender g, PERCENTILE(emp_no, 97) p1 FROM test_alias GROUP BY g ORDER BY g DESC; + +g:s | p1:d + +M | 10095.75 +F | 10099.28 +; + +testGroupByOnPattern +SELECT gender, PERCENTILE(emp_no, 97) p1 FROM test_* GROUP BY gender; + +gender:s | p1:d + +F | 10099.28 +M | 10095.75 +; \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/arithmetic.csv-spec b/x-pack/qa/sql/src/main/resources/arithmetic.csv-spec new file mode 100644 index 0000000000000..4d8a9fc3fc2cf --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/arithmetic.csv-spec @@ -0,0 +1,13 @@ +// +// Arithmetic tests outside H2 +// + +// the standard behavior here is to return the constant for each element +// the weird thing is that an actual query needs to be ran +arithmeticWithFrom +SELECT 5 - 2 x FROM test_emp; + +x +3 +; + diff --git a/x-pack/qa/sql/src/main/resources/arithmetic.sql-spec b/x-pack/qa/sql/src/main/resources/arithmetic.sql-spec new file mode 100644 index 0000000000000..c9ff79dca0d70 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/arithmetic.sql-spec @@ -0,0 +1,87 @@ +// +// Arithmetic tests +// + +unaryMinus +// tag::unaryMinus +SELECT - 1 AS x; +// end::unaryMinus +plus +// tag::plus +SELECT 1 + 1 AS x; +// end::plus +minus +// tag::minus +SELECT 1 - 1 AS x; +// end::minus +divide +// tag::divide +SELECT 6 / 3 AS x; +// end::divide +multiply +// tag::multiply +SELECT 2 * 3 AS x; +// end::multiply +mod +// tag::mod +SELECT 5 % 2 AS x; +// end::mod +operatorsPriority +SELECT 1 + 3 * 4 / 2 - 2 AS x; +operatorsPriorityWithParanthesis +SELECT ((1 + 3) * 2 / (3 - 1)) * 2 AS x; +literalAliasing +SELECT 2 + 3 AS x, 'foo' y; + +// variable scalar arithmetic +scalarVariablePlus +SELECT emp_no + 10000 AS x FROM test_emp; +scalarVariableMinus +SELECT emp_no - 10000 AS x FROM test_emp; +scalarVariableMul +SELECT emp_no * 10000 AS x FROM test_emp; +scalarVariableDiv +SELECT emp_no / 10000 AS x FROM test_emp; +scalarVariableMod +SELECT emp_no % 10000 AS x FROM test_emp; +scalarVariableMultipleInputs +SELECT (emp_no % 10000) + YEAR(hire_date) AS x FROM test_emp; +scalarVariableTwoInputs +SELECT (emp_no % 10000) + YEAR(hire_date) AS x FROM test_emp; +scalarVariableThreeInputs +SELECT ((emp_no % 10000) + YEAR(hire_date)) / MONTH(birth_date) AS x FROM test_emp; +scalarVariableArithmeticAndEntry +SELECT emp_no, emp_no % 10000 AS x FROM test_emp; +scalarVariableTwoInputsAndEntry +SELECT emp_no, (emp_no % 10000) + YEAR(hire_date) AS x FROM test_emp; +scalarVariableThreeInputsAndEntry +SELECT emp_no, ((emp_no % 10000) + YEAR(hire_date)) / MONTH(birth_date) AS x FROM test_emp; + + +// variable scalar agg +aggVariablePlus +SELECT COUNT(*) + 10000 AS x FROM test_emp GROUP BY gender ORDER BY gender; +aggVariableMinus +SELECT COUNT(*) - 10000 AS x FROM test_emp GROUP BY gender ORDER BY gender; +aggVariableMul +SELECT COUNT(*) * 2 AS x FROM test_emp GROUP BY gender ORDER BY gender; +aggVariableDiv +SELECT COUNT(*) / 5000 AS x FROM test_emp GROUP BY gender ORDER BY gender; +aggVariableMod +SELECT COUNT(*) % 10000 AS x FROM test_emp GROUP BY gender ORDER BY gender; +aggVariableTwoInputs +SELECT MAX(emp_no) - MIN(emp_no) AS x FROM test_emp GROUP BY gender ORDER BY gender; +aggVariableThreeInputs +SELECT (MAX(emp_no) - MIN(emp_no)) + AVG(emp_no) AS x FROM test_emp GROUP BY gender ORDER BY gender; + +// ordering +orderByPlus +SELECT emp_no FROM test_emp ORDER BY emp_no + 2 LIMIT 10; +orderByNegative +SELECT emp_no FROM test_emp ORDER BY -emp_no LIMIT 10; +orderByMinusDesc +SELECT emp_no FROM test_emp ORDER BY -emp_no DESC LIMIT 10; +orderByModulo +SELECT emp_no FROM test_emp ORDER BY emp_no % 10000 LIMIT 10; +orderByMul +SELECT emp_no FROM test_emp ORDER BY emp_no * 2 LIMIT 10; diff --git a/x-pack/qa/sql/src/main/resources/columns.csv-spec b/x-pack/qa/sql/src/main/resources/columns.csv-spec new file mode 100644 index 0000000000000..a88d509a5a6ee --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/columns.csv-spec @@ -0,0 +1,15 @@ +// +// Test of explicit column types +// the columns can be specified as or as +// if at least one column has an explicit column type, all columns should have an explicit type +// type might be missing in which case it will be autodetected or can be one of the following +// d - double, f - float, i - int, b - byte, l - long, t - timestamp, date + + +columnDetectionOverride +SELECT gender, FLOOR(PERCENTILE(emp_no, 97.76)) p1 FROM test_emp GROUP BY gender ORDER BY gender DESC; + +gender:s | p1:l +M | 10096 +F | 10099 +; \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/command-sys.csv-spec b/x-pack/qa/sql/src/main/resources/command-sys.csv-spec new file mode 100644 index 0000000000000..38f3772f76c32 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/command-sys.csv-spec @@ -0,0 +1,11 @@ +// +// Sys Commands +// + +sysTableTypes +SYS TABLE TYPES; + + TABLE_TYPE:s +BASE TABLE +ALIAS +; diff --git a/x-pack/qa/sql/src/main/resources/command.csv-spec b/x-pack/qa/sql/src/main/resources/command.csv-spec new file mode 100644 index 0000000000000..d54fb6bf1554c --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/command.csv-spec @@ -0,0 +1,144 @@ +// +// Commands +// + +// SHOW_FUNCTIONS +showFunctions +SHOW FUNCTIONS; + + name:s | type:s +AVG |AGGREGATE +COUNT |AGGREGATE +MAX |AGGREGATE +MIN |AGGREGATE +SUM |AGGREGATE +STDDEV_POP |AGGREGATE +VAR_POP |AGGREGATE +PERCENTILE |AGGREGATE +PERCENTILE_RANK |AGGREGATE +SUM_OF_SQUARES |AGGREGATE +SKEWNESS |AGGREGATE +KURTOSIS |AGGREGATE +DAY_OF_MONTH |SCALAR +DAY |SCALAR +DOM |SCALAR +DAY_OF_WEEK |SCALAR +DOW |SCALAR +DAY_OF_YEAR |SCALAR +DOY |SCALAR +HOUR_OF_DAY |SCALAR +HOUR |SCALAR +MINUTE_OF_DAY |SCALAR +MINUTE_OF_HOUR |SCALAR +MINUTE |SCALAR +SECOND_OF_MINUTE|SCALAR +SECOND |SCALAR +MONTH_OF_YEAR |SCALAR +MONTH |SCALAR +YEAR |SCALAR +WEEK_OF_YEAR |SCALAR +WEEK |SCALAR +ABS |SCALAR +ACOS |SCALAR +ASIN |SCALAR +ATAN |SCALAR +ATAN2 |SCALAR +CBRT |SCALAR +CEIL |SCALAR +CEILING |SCALAR +COS |SCALAR +COSH |SCALAR +COT |SCALAR +DEGREES |SCALAR +E |SCALAR +EXP |SCALAR +EXPM1 |SCALAR +FLOOR |SCALAR +LOG |SCALAR +LOG10 |SCALAR +MOD |SCALAR +PI |SCALAR +POWER |SCALAR +RADIANS |SCALAR +RANDOM |SCALAR +RAND |SCALAR +ROUND |SCALAR +SIGN |SCALAR +SIGNUM |SCALAR +SIN |SCALAR +SINH |SCALAR +SQRT |SCALAR +TAN |SCALAR +SCORE |SCORE +; + +showFunctionsWithExactMatch +SHOW FUNCTIONS LIKE 'ABS'; + + name:s | type:s +ABS |SCALAR +; + + +showFunctionsWithPatternWildcard +SHOW FUNCTIONS LIKE 'A%'; + + name:s | type:s +AVG |AGGREGATE +ABS |SCALAR +ACOS |SCALAR +ASIN |SCALAR +ATAN |SCALAR +ATAN2 |SCALAR +; + +showFunctionsWithPatternChar +SHOW FUNCTIONS LIKE 'A__'; + + name:s | type:s +AVG |AGGREGATE +ABS |SCALAR +; + +showFunctionsWithLeadingPattern +SHOW FUNCTIONS '%DAY%'; + + name:s | type:s +DAY_OF_MONTH |SCALAR +DAY |SCALAR +DAY_OF_WEEK |SCALAR +DAY_OF_YEAR |SCALAR +HOUR_OF_DAY |SCALAR +MINUTE_OF_DAY |SCALAR +; + +showTables +SHOW TABLES 'test_emp'; + + name:s | type:s +test_emp |BASE TABLE +; + +// DESCRIBE + +describe +DESCRIBE "test_emp"; + + column:s | type:s +birth_date | TIMESTAMP +dep | STRUCT +dep.dep_id | VARCHAR +dep.dep_name | VARCHAR +dep.dep_name.keyword | VARCHAR +dep.from_date | TIMESTAMP +dep.to_date | TIMESTAMP +emp_no | INTEGER +first_name | VARCHAR +first_name.keyword | VARCHAR +gender | VARCHAR +hire_date | TIMESTAMP +languages | TINYINT +last_name | VARCHAR +last_name.keyword | VARCHAR +salary | INTEGER +; diff --git a/x-pack/qa/sql/src/main/resources/datetime.csv-spec b/x-pack/qa/sql/src/main/resources/datetime.csv-spec new file mode 100644 index 0000000000000..d5984d7671010 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/datetime.csv-spec @@ -0,0 +1,275 @@ +// +// DateTime +// + +// +// Time (H2 doesn't support these for Timezone with timestamp) +// +// + +dateTimeSecond +SELECT SECOND(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + +d:i | l:s +0 | Facello +0 | Simmel +0 | Bamford +0 | Koblick +0 | Maliniak +0 | Preusig +0 | Zielinski +0 | Kalloufi +0 | Peac +; + +dateTimeMinute +SELECT MINUTE(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + +d:i | l:s +0 | Facello +0 | Simmel +0 | Bamford +0 | Koblick +0 | Maliniak +0 | Preusig +0 | Zielinski +0 | Kalloufi +0 | Peac +; + +dateTimeHour +SELECT HOUR(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + +d:i | l:s +0 | Facello +0 | Simmel +0 | Bamford +0 | Koblick +0 | Maliniak +0 | Preusig +0 | Zielinski +0 | Kalloufi +0 | Peac + +; + +// +// Date (in H2 these start at 0 instead of 1...) +// +dateTimeDayOfWeek +SELECT DAY_OF_WEEK(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY DAY_OF_WEEK(birth_date); + +d:i | l:s +1 | Preusig +2 | Simmel +3 | Facello +3 | Kalloufi +4 | Bamford +4 | Zielinski +5 | Maliniak +6 | Koblick +6 | Peac +; + +dateTimeDayOfYear +SELECT DAY_OF_YEAR(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + +d:i | l:s +245 | Facello +154 | Simmel +337 | Bamford +121 | Koblick +21 | Maliniak +110 | Preusig +143 | Zielinski +50 | Kalloufi +110 | Peac +; + +// +// Aggregate +// + +dateTimeAggByYear +SELECT YEAR(birth_date) AS d, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY YEAR(birth_date) ORDER BY YEAR(birth_date) LIMIT 13; + +d:i | s:i +1952 | 90472 +1953 | 110398 +1954 | 80447 +1955 | 40240 +1956 | 60272 +1957 | 50280 +1958 | 70225 +1959 | 110517 +1960 | 100501 +1961 | 100606 +1962 | 60361 +1963 | 80372 +1964 | 40264 +; + +dateTimeAggByMonth +SELECT MONTH(birth_date) AS d, COUNT(*) AS c, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY MONTH(birth_date) ORDER BY MONTH(birth_date) DESC; + +d:i | c:l | s:i +12 | 7 | 70325 +11 | 8 | 80439 +10 | 9 | 90517 +9 | 13 | 130688 +8 | 8 | 80376 +7 | 11 | 110486 +6 | 8 | 80314 +5 | 10 | 100573 +4 | 9 | 90450 +3 | 2 | 20164 +2 | 9 | 90430 +1 | 6 | 60288 +; + +dateTimeAggByDayOfMonth +SELECT DAY_OF_MONTH(birth_date) AS d, COUNT(*) AS c, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY DAY_OF_MONTH(birth_date) ORDER BY DAY_OF_MONTH(birth_date) DESC; + +d:i | c:l | s:i +31 | 1 | 10025 +30 | 2 | 20147 +29 | 3 | 30104 +28 | 2 | 20125 +27 | 3 | 30169 +26 | 4 | 40190 +25 | 5 | 50443 +24 | 2 | 20069 +23 | 7 | 70413 +22 | 1 | 10037 +21 | 6 | 60359 +20 | 4 | 40135 +19 | 8 | 80299 +18 | 2 | 20169 +17 | 1 | 10081 +16 | 1 | 10096 +15 | 2 | 20132 +14 | 4 | 40173 +13 | 5 | 50264 +12 | 1 | 10014 +11 | 2 | 20141 +10 | 2 | 20063 +9 | 3 | 30189 +8 | 2 | 20057 +7 | 5 | 50240 +6 | 4 | 40204 +5 | 2 | 20103 +4 | 3 | 30157 +3 | 4 | 40204 +2 | 4 | 40081 +1 | 5 | 50167 +; + +constantYear +// tag::year +SELECT YEAR(CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS year; + + year +--------------- +2018 +// end::year +; + +constantMonthOfYear +// tag::monthOfYear +SELECT MONTH_OF_YEAR(CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS month; + + month +--------------- +2 +// end::monthOfYear +; + +constantWeekOfYear +// tag::weekOfYear +SELECT WEEK_OF_YEAR(CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS week; + + week +--------------- +8 +// end::weekOfYear +; + +constantDayOfYear +// tag::dayOfYear +SELECT DAY_OF_YEAR(CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS day; + + day +--------------- +50 +// end::dayOfYear +; + +extractDayOfYear +// tag::extractDayOfYear +SELECT EXTRACT(DAY_OF_YEAR FROM CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS day; + + day +--------------- +50 +// end::extractDayOfYear +; + +constantDayOfMonth +// tag::dayOfMonth +SELECT DAY_OF_MONTH(CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS day; + + day +--------------- +19 +// end::dayOfMonth +; + +constantDayOfWeek +// tag::dayOfWeek +SELECT DAY_OF_WEEK(CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS day; + + day +--------------- +1 +// end::dayOfWeek +; + +constantHourOfDay +// tag::hourOfDay +SELECT HOUR_OF_DAY(CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS hour; + + hour +--------------- +10 +// end::hourOfDay +; + +constantMinuteOfDay +// tag::minuteOfDay +SELECT MINUTE_OF_DAY(CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS minute; + + minute +--------------- +623 +// end::minuteOfDay +; + +constantMinuteOfHour +// tag::minuteOfHour +SELECT MINUTE_OF_HOUR(CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS minute; + + minute +--------------- +23 +// end::minuteOfHour +; + +constantSecondOfMinute +// tag::secondOfMinute +SELECT SECOND_OF_MINUTE(CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS second; + + second +--------------- +27 +// end::secondOfMinute +; diff --git a/x-pack/qa/sql/src/main/resources/datetime.sql-spec b/x-pack/qa/sql/src/main/resources/datetime.sql-spec new file mode 100644 index 0000000000000..20ea8329c8f4d --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/datetime.sql-spec @@ -0,0 +1,45 @@ +// +// DateTime +// + +// +// Time NOT IMPLEMENTED in H2 on TIMESTAMP WITH TIME ZONE - hence why these are moved to CSV +// + +// +// Date +// + +dateTimeDay +SELECT DAY(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +dateTimeDayOfMonth +SELECT DAY_OF_MONTH(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +dateTimeMonth +SELECT MONTH(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +dateTimeYear +SELECT YEAR(birth_date) d, last_name l FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + +// +// Filter +// +dateTimeFilterDayOfMonth +SELECT DAY_OF_MONTH(birth_date) AS d, last_name l FROM "test_emp" WHERE DAY_OF_MONTH(birth_date) <= 10 ORDER BY emp_no LIMIT 5; +dateTimeFilterMonth +SELECT MONTH(birth_date) AS d, last_name l FROM "test_emp" WHERE MONTH(birth_date) <= 5 ORDER BY emp_no LIMIT 5; +dateTimeFilterYear +SELECT YEAR(birth_date) AS d, last_name l FROM "test_emp" WHERE YEAR(birth_date) <= 1960 ORDER BY emp_no LIMIT 5; + + +// +// Aggregate +// + + +dateTimeAggByYear +SELECT YEAR(birth_date) AS d, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY YEAR(birth_date) ORDER BY YEAR(birth_date) LIMIT 13; + +dateTimeAggByMonth +SELECT MONTH(birth_date) AS d, COUNT(*) AS c, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY MONTH(birth_date) ORDER BY MONTH(birth_date) DESC; + +dateTimeAggByDayOfMonth +SELECT DAY_OF_MONTH(birth_date) AS d, COUNT(*) AS c, CAST(SUM(emp_no) AS INT) s FROM "test_emp" GROUP BY DAY_OF_MONTH(birth_date) ORDER BY DAY_OF_MONTH(birth_date) DESC; diff --git a/x-pack/qa/sql/src/main/resources/debug.csv-spec b/x-pack/qa/sql/src/main/resources/debug.csv-spec new file mode 100644 index 0000000000000..a76b656cfa07b --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/debug.csv-spec @@ -0,0 +1,20 @@ +// +// Spec used for debugging a certain test (without having to alter the spec suite of which it might be part of) +// + +debug +SELECT first_name f, last_name l, dep.from_date d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY f LIMIT 5; + +f:s | l:s | d:ts + +Alain | Chappelet | 589420800000 +Chirstian | Koblick | 533779200000 +Duangkaew | Piveteau | 848793600000 +Elvis | Demeyer | 761443200000 +Gino | Leonhardt | 607996800000 +; + +//SELECT YEAR(dep.from_date) start FROM test_emp WHERE dep.dep_name = 'Production' GROUP BY start LIMIT 5; +//table:s +//test_emp +//; \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/debug.sql-spec b/x-pack/qa/sql/src/main/resources/debug.sql-spec new file mode 100644 index 0000000000000..cd03b4764b73d --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/debug.sql-spec @@ -0,0 +1,6 @@ +// +// Spec used for debugging a certain test (without having to alter the spec suite of which it might be part of) +// + +debug +SELECT 5 + 2 AS a; diff --git a/x-pack/qa/sql/src/main/resources/dep_emp.csv b/x-pack/qa/sql/src/main/resources/dep_emp.csv new file mode 100644 index 0000000000000..ece933b394130 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/dep_emp.csv @@ -0,0 +1,111 @@ +emp_no,dep_id,from_date,to_date +10001,d005,1986-06-26,9999-01-01 +10002,d007,1996-08-03,9999-01-01 +10003,d004,1995-12-03,9999-01-01 +10004,d004,1986-12-01,9999-01-01 +10005,d003,1989-09-12,9999-01-01 +10006,d005,1990-08-05,9999-01-01 +10007,d008,1989-02-10,9999-01-01 +10008,d005,1998-03-11,2000-07-31 +10009,d006,1985-02-18,9999-01-01 +10010,d004,1996-11-24,2000-06-26 +10010,d006,2000-06-26,9999-01-01 +10011,d009,1990-01-22,1996-11-09 +10012,d005,1992-12-18,9999-01-01 +10013,d003,1985-10-20,9999-01-01 +10014,d005,1993-12-29,9999-01-01 +10015,d008,1992-09-19,1993-08-22 +10016,d007,1998-02-11,9999-01-01 +10017,d001,1993-08-03,9999-01-01 +10018,d004,1992-07-29,9999-01-01 +10018,d005,1987-04-03,1992-07-29 +10019,d008,1999-04-30,9999-01-01 +10020,d004,1997-12-30,9999-01-01 +10021,d005,1988-02-10,2002-07-15 +10022,d005,1999-09-03,9999-01-01 +10023,d005,1999-09-27,9999-01-01 +10024,d004,1998-06-14,9999-01-01 +10025,d005,1987-08-17,1997-10-15 +10026,d004,1995-03-20,9999-01-01 +10027,d005,1995-04-02,9999-01-01 +10028,d005,1991-10-22,1998-04-06 +10029,d004,1991-09-18,1999-07-08 +10029,d006,1999-07-08,9999-01-01 +10030,d004,1994-02-17,9999-01-01 +10031,d005,1991-09-01,9999-01-01 +10032,d004,1990-06-20,9999-01-01 +10033,d006,1987-03-18,1993-03-24 +10034,d007,1995-04-12,1999-10-31 +10035,d004,1988-09-05,9999-01-01 +10036,d003,1992-04-28,9999-01-01 +10037,d005,1990-12-05,9999-01-01 +10038,d009,1989-09-20,9999-01-01 +10039,d003,1988-01-19,9999-01-01 +10040,d005,1993-02-14,2002-01-22 +10040,d008,2002-01-22,9999-01-01 +10041,d007,1989-11-12,9999-01-01 +10042,d002,1993-03-21,2000-08-10 +10043,d005,1990-10-20,9999-01-01 +10044,d004,1994-05-21,9999-01-01 +10045,d004,1996-11-16,9999-01-01 +10046,d008,1992-06-20,9999-01-01 +10047,d004,1989-03-31,9999-01-01 +10048,d005,1985-02-24,1987-01-27 +10049,d009,1992-05-04,9999-01-01 +10050,d002,1990-12-25,1992-11-05 +10050,d007,1992-11-05,9999-01-01 +10051,d004,1992-10-15,9999-01-01 +10052,d008,1997-01-31,9999-01-01 +10053,d007,1994-11-13,9999-01-01 +10054,d003,1995-07-29,9999-01-01 +10055,d001,1992-04-27,1995-07-22 +10056,d005,1990-02-01,9999-01-01 +10057,d005,1992-01-15,9999-01-01 +10058,d001,1988-04-25,9999-01-01 +10059,d002,1991-06-26,9999-01-01 +10060,d007,1989-05-28,1992-11-11 +10060,d009,1992-11-11,9999-01-01 +10061,d007,1989-12-02,9999-01-01 +10062,d005,1991-08-30,9999-01-01 +10063,d004,1989-04-08,9999-01-01 +10064,d008,1985-11-20,1992-03-02 +10065,d005,1998-05-24,9999-01-01 +10066,d005,1986-02-26,9999-01-01 +10067,d006,1987-03-04,9999-01-01 +10068,d007,1987-08-07,9999-01-01 +10069,d004,1992-06-14,9999-01-01 +10070,d005,1985-10-14,1995-10-18 +10070,d008,1995-10-18,9999-01-01 +10071,d003,1995-08-05,9999-01-01 +10072,d005,1989-05-21,9999-01-01 +10073,d006,1998-02-02,1998-02-22 +10074,d005,1990-08-13,9999-01-01 +10075,d005,1988-05-17,2001-01-15 +10076,d005,1996-07-15,9999-01-01 +10077,d003,1994-12-23,9999-01-01 +10078,d005,1994-09-29,9999-01-01 +10079,d005,1995-12-13,9999-01-01 +10080,d002,1994-09-28,1997-07-09 +10080,d003,1997-07-09,9999-01-01 +10081,d004,1986-10-30,9999-01-01 +10082,d008,1990-01-03,1990-01-15 +10083,d004,1987-03-31,9999-01-01 +10084,d004,1995-12-15,9999-01-01 +10085,d004,1994-04-09,9999-01-01 +10086,d003,1992-02-19,9999-01-01 +10087,d007,1997-05-08,2001-01-09 +10088,d007,1988-09-02,1992-03-21 +10088,d009,1992-03-21,9999-01-01 +10089,d007,1989-01-10,9999-01-01 +10090,d005,1986-03-14,1999-05-07 +10091,d005,1992-11-18,9999-01-01 +10092,d005,1996-04-22,9999-01-01 +10093,d007,1997-06-08,9999-01-01 +10094,d008,1987-04-18,1997-11-08 +10095,d007,1994-03-10,9999-01-01 +10096,d004,1999-01-23,9999-01-01 +10097,d008,1990-09-15,9999-01-01 +10098,d004,1985-05-13,1989-06-29 +10098,d009,1989-06-29,1992-12-11 +10099,d007,1988-10-18,9999-01-01 +10100,d003,1987-09-21,9999-01-01 diff --git a/x-pack/qa/sql/src/main/resources/departments.csv b/x-pack/qa/sql/src/main/resources/departments.csv new file mode 100644 index 0000000000000..8d86313fce163 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/departments.csv @@ -0,0 +1,10 @@ +dep_id,dep_name +d001,Marketing +d002,Finance +d003,Human Resources +d004,Production +d005,Development +d006,Quality Management +d007,Sales +d008,Research +d009,Customer Service diff --git a/x-pack/qa/sql/src/main/resources/employees.csv b/x-pack/qa/sql/src/main/resources/employees.csv new file mode 100644 index 0000000000000..4425a4b592f70 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/employees.csv @@ -0,0 +1,101 @@ +birth_date,emp_no,first_name,gender,hire_date,languages,last_name,salary +1953-09-02T00:00:00Z,10001,Georgi,M,1986-06-26T00:00:00Z,2,Facello,57305 +1964-06-02T00:00:00Z,10002,Bezalel,F,1985-11-21T00:00:00Z,5,Simmel,56371 +1959-12-03T00:00:00Z,10003,Parto,M,1986-08-28T00:00:00Z,4,Bamford,61805 +1954-05-01T00:00:00Z,10004,Chirstian,M,1986-12-01T00:00:00Z,5,Koblick,36174 +1955-01-21T00:00:00Z,10005,Kyoichi,M,1989-09-12T00:00:00Z,1,Maliniak,63528 +1953-04-20T00:00:00Z,10006,Anneke,F,1989-06-02T00:00:00Z,3,Preusig,60335 +1957-05-23T00:00:00Z,10007,Tzvetan,F,1989-02-10T00:00:00Z,4,Zielinski,74572 +1958-02-19T00:00:00Z,10008,Saniya,M,1994-09-15T00:00:00Z,2,Kalloufi,43906 +1952-04-19T00:00:00Z,10009,Sumant,F,1985-02-18T00:00:00Z,1,Peac,66174 +1963-06-01T00:00:00Z,10010,Duangkaew,F,1989-08-24T00:00:00Z,4,Piveteau,45797 +1953-11-07T00:00:00Z,10011,Mary,F,1990-01-22T00:00:00Z,5,Sluis,31120 +1960-10-04T00:00:00Z,10012,Patricio,M,1992-12-18T00:00:00Z,5,Bridgland,48942 +1963-06-07T00:00:00Z,10013,Eberhardt,M,1985-10-20T00:00:00Z,1,Terkki,48735 +1956-02-12T00:00:00Z,10014,Berni,M,1987-03-11T00:00:00Z,5,Genin,37137 +1959-08-19T00:00:00Z,10015,Guoxiang,M,1987-07-02T00:00:00Z,5,Nooteboom,25324 +1961-05-02T00:00:00Z,10016,Kazuhito,M,1995-01-27T00:00:00Z,2,Cappelletti,61358 +1958-07-06T00:00:00Z,10017,Cristinel,F,1993-08-03T00:00:00Z,2,Bouloucos,58715 +1954-06-19T00:00:00Z,10018,Kazuhide,F,1987-04-03T00:00:00Z,2,Peha,56760 +1953-01-23T00:00:00Z,10019,Lillian,M,1999-04-30T00:00:00Z,1,Haddadi,73717 +1952-12-24T00:00:00Z,10020,Mayuko,M,1991-01-26T00:00:00Z,3,Warwick,40031 +1960-02-20T00:00:00Z,10021,Ramzi,M,1988-02-10T00:00:00Z,5,Erde,60408 +1952-07-08T00:00:00Z,10022,Shahaf,M,1995-08-22T00:00:00Z,3,Famili,48233 +1953-09-29T00:00:00Z,10023,Bojan,F,1989-12-17T00:00:00Z,2,Montemayor,47896 +1958-09-05T00:00:00Z,10024,Suzette,F,1997-05-19T00:00:00Z,3,Pettey,64675 +1958-10-31T00:00:00Z,10025,Prasadram,M,1987-08-17T00:00:00Z,5,Heyers,47411 +1953-04-03T00:00:00Z,10026,Yongqiao,M,1995-03-20T00:00:00Z,3,Berztiss,28336 +1962-07-10T00:00:00Z,10027,Divier,F,1989-07-07T00:00:00Z,5,Reistad,73851 +1963-11-26T00:00:00Z,10028,Domenick,M,1991-10-22T00:00:00Z,1,Tempesti,39356 +1956-12-13T00:00:00Z,10029,Otmar,M,1985-11-20T00:00:00Z,3,Herbst,74999 +1958-07-14T00:00:00Z,10030,Elvis,M,1994-02-17T00:00:00Z,3,Demeyer,67492 +1959-01-27T00:00:00Z,10031,Karsten,M,1991-09-01T00:00:00Z,4,Joslin,37716 +1960-08-09T00:00:00Z,10032,Jeong,F,1990-06-20T00:00:00Z,3,Reistad,62233 +1956-11-14T00:00:00Z,10033,Arif,M,1987-03-18T00:00:00Z,1,Merlo,70011 +1962-12-29T00:00:00Z,10034,Bader,M,1988-09-21T00:00:00Z,1,Swan,39878 +1953-02-08T00:00:00Z,10035,Alain,M,1988-09-05T00:00:00Z,5,Chappelet,25945 +1959-08-10T00:00:00Z,10036,Adamantios,M,1992-01-03T00:00:00Z,4,Portugali,60781 +1963-07-22T00:00:00Z,10037,Pradeep,M,1990-12-05T00:00:00Z,2,Makrucki,37691 +1960-07-20T00:00:00Z,10038,Huan,M,1989-09-20T00:00:00Z,4,Lortz,35222 +1959-10-01T00:00:00Z,10039,Alejandro,M,1988-01-19T00:00:00Z,2,Brender,36051 +1959-09-13T00:00:00Z,10040,Weiyi,F,1993-02-14T00:00:00Z,4,Meriste,37112 +1959-08-27T00:00:00Z,10041,Uri,F,1989-11-12T00:00:00Z,1,Lenart,56415 +1956-02-26T00:00:00Z,10042,Magy,F,1993-03-21T00:00:00Z,3,Stamatiou,30404 +1960-09-19T00:00:00Z,10043,Yishay,M,1990-10-20T00:00:00Z,1,Tzvieli,34341 +1961-09-21T00:00:00Z,10044,Mingsen,F,1994-05-21T00:00:00Z,1,Casley,39728 +1957-08-14T00:00:00Z,10045,Moss,M,1989-09-02T00:00:00Z,3,Shanbhogue,74970 +1960-07-23T00:00:00Z,10046,Lucien,M,1992-06-20T00:00:00Z,4,Rosenbaum,50064 +1952-06-29T00:00:00Z,10047,Zvonko,M,1989-03-31T00:00:00Z,4,Nyanchama,42716 +1963-07-11T00:00:00Z,10048,Florian,M,1985-02-24T00:00:00Z,3,Syrotiuk,26436 +1961-04-24T00:00:00Z,10049,Basil,F,1992-05-04T00:00:00Z,5,Tramer,37853 +1958-05-21T00:00:00Z,10050,Yinghua,M,1990-12-25T00:00:00Z,2,Dredge,43026 +1953-07-28T00:00:00Z,10051,Hidefumi,M,1992-10-15T00:00:00Z,3,Caine,58121 +1961-02-26T00:00:00Z,10052,Heping,M,1988-05-21T00:00:00Z,1,Nitsch,55360 +1954-09-13T00:00:00Z,10053,Sanjiv,F,1986-02-04T00:00:00Z,3,Zschoche,54462 +1957-04-04T00:00:00Z,10054,Mayumi,M,1995-03-13T00:00:00Z,4,Schueller,65367 +1956-06-06T00:00:00Z,10055,Georgy,M,1992-04-27T00:00:00Z,5,Dredge,49281 +1961-09-01T00:00:00Z,10056,Brendon,F,1990-02-01T00:00:00Z,2,Bernini,33370 +1954-05-30T00:00:00Z,10057,Ebbe,F,1992-01-15T00:00:00Z,4,Callaway,27215 +1954-10-01T00:00:00Z,10058,Berhard,M,1987-04-13T00:00:00Z,3,McFarlin,38376 +1953-09-19T00:00:00Z,10059,Alejandro,F,1991-06-26T00:00:00Z,2,McAlpine,44307 +1961-10-15T00:00:00Z,10060,Breannda,M,1987-11-02T00:00:00Z,2,Billingsley,29175 +1962-10-19T00:00:00Z,10061,Tse,M,1985-09-17T00:00:00Z,1,Herber,49095 +1961-11-02T00:00:00Z,10062,Anoosh,M,1991-08-30T00:00:00Z,3,Peyn,65030 +1952-08-06T00:00:00Z,10063,Gino,F,1989-04-08T00:00:00Z,3,Leonhardt,52121 +1959-04-07T00:00:00Z,10064,Udi,M,1985-11-20T00:00:00Z,5,Jansch,33956 +1963-04-14T00:00:00Z,10065,Satosi,M,1988-05-18T00:00:00Z,2,Awdeh,50249 +1952-11-13T00:00:00Z,10066,Kwee,M,1986-02-26T00:00:00Z,5,Schusler,31897 +1953-01-07T00:00:00Z,10067,Claudi,M,1987-03-04T00:00:00Z,2,Stavenow,52044 +1962-11-26T00:00:00Z,10068,Charlene,M,1987-08-07T00:00:00Z,3,Brattka,28941 +1960-09-06T00:00:00Z,10069,Margareta,F,1989-11-05T00:00:00Z,5,Bierman,41933 +1955-08-20T00:00:00Z,10070,Reuven,M,1985-10-14T00:00:00Z,3,Garigliano,54329 +1958-01-21T00:00:00Z,10071,Hisao,M,1987-10-01T00:00:00Z,2,Lipner,40612 +1952-05-15T00:00:00Z,10072,Hironoby,F,1988-07-21T00:00:00Z,5,Sidou,54518 +1954-02-23T00:00:00Z,10073,Shir,M,1991-12-01T00:00:00Z,4,McClurg,32568 +1955-08-28T00:00:00Z,10074,Mokhtar,F,1990-08-13T00:00:00Z,5,Bernatsky,38992 +1960-03-09T00:00:00Z,10075,Gao,F,1987-03-19T00:00:00Z,5,Dolinsky,51956 +1952-06-13T00:00:00Z,10076,Erez,F,1985-07-09T00:00:00Z,3,Ritzmann,62405 +1964-04-18T00:00:00Z,10077,Mona,M,1990-03-02T00:00:00Z,5,Azuma,46595 +1959-12-25T00:00:00Z,10078,Danel,F,1987-05-26T00:00:00Z,2,Mondadori,69904 +1961-10-05T00:00:00Z,10079,Kshitij,F,1986-03-27T00:00:00Z,2,Gils,32263 +1957-12-03T00:00:00Z,10080,Premal,M,1985-11-19T00:00:00Z,5,Baek,52833 +1960-12-17T00:00:00Z,10081,Zhongwei,M,1986-10-30T00:00:00Z,2,Rosen,50128 +1963-09-09T00:00:00Z,10082,Parviz,M,1990-01-03T00:00:00Z,4,Lortz,49818 +1959-07-23T00:00:00Z,10083,Vishv,M,1987-03-31T00:00:00Z,1,Zockler,39110 +1960-05-25T00:00:00Z,10084,Tuval,M,1995-12-15T00:00:00Z,1,Kalloufi,28035 +1962-11-07T00:00:00Z,10085,Kenroku,M,1994-04-09T00:00:00Z,5,Malabarba,35742 +1962-11-19T00:00:00Z,10086,Somnath,M,1990-02-16T00:00:00Z,1,Foote,68547 +1959-07-23T00:00:00Z,10087,Xinglin,F,1986-09-08T00:00:00Z,5,Eugenio,32272 +1954-02-25T00:00:00Z,10088,Jungsoon,F,1988-09-02T00:00:00Z,5,Syrzycki,39638 +1963-03-21T00:00:00Z,10089,Sudharsan,F,1986-08-12T00:00:00Z,4,Flasterstein,43602 +1961-05-30T00:00:00Z,10090,Kendra,M,1986-03-14T00:00:00Z,2,Hofting,44956 +1955-10-04T00:00:00Z,10091,Amabile,M,1992-11-18T00:00:00Z,3,Gomatam,38645 +1964-10-18T00:00:00Z,10092,Valdiodio,F,1989-09-22T00:00:00Z,1,Niizuma,25976 +1964-06-11T00:00:00Z,10093,Sailaja,M,1996-11-05T00:00:00Z,3,Desikan,45656 +1957-05-25T00:00:00Z,10094,Arumugam,F,1987-04-18T00:00:00Z,5,Ossenbruggen,66817 +1965-01-03T00:00:00Z,10095,Hilari,M,1986-07-15T00:00:00Z,4,Morton,37702 +1954-09-16T00:00:00Z,10096,Jayson,M,1990-01-14T00:00:00Z,4,Mandell,43889 +1952-02-27T00:00:00Z,10097,Remzi,M,1990-09-15T00:00:00Z,3,Waschkowski,71165 +1961-09-23T00:00:00Z,10098,Sreekrishna,F,1985-05-13T00:00:00Z,4,Servieres,44817 +1956-05-25T00:00:00Z,10099,Valter,F,1988-10-18T00:00:00Z,2,Sullins,73578 +1953-04-21T00:00:00Z,10100,Hironobu,F,1987-09-21T00:00:00Z,4,Haraldson,68431 diff --git a/x-pack/qa/sql/src/main/resources/example.csv-spec b/x-pack/qa/sql/src/main/resources/example.csv-spec new file mode 100644 index 0000000000000..a7964cec8d238 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/example.csv-spec @@ -0,0 +1,22 @@ +// some comment + +// name of the test - translated into 'testName' +name + +// ES SQL query +SELECT COUNT(*) FROM "emp"; + +// +// expected result in CSV format +// + +// list of +// type might be missing in which case it will be autodetected or can be one of the following +// d - double, f - float, i - int, b - byte, l - long, t - timestamp, date +A,B:d,C:i +// actual values +foo,2.5,3 +bar,3.5,4 +tar,4.5,5 +; +// repeat the above \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/example.sql-spec b/x-pack/qa/sql/src/main/resources/example.sql-spec new file mode 100644 index 0000000000000..8408dc58b1aed --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/example.sql-spec @@ -0,0 +1,8 @@ +// some comment + +// name of the test - translated into 'testName' +name +// SQL query to be executed against H2 and ES +SELECT COUNT(*) FROM "emp"; + +// repeat the above \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/resources/filter.sql-spec b/x-pack/qa/sql/src/main/resources/filter.sql-spec new file mode 100644 index 0000000000000..5112fbc15511d --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/filter.sql-spec @@ -0,0 +1,80 @@ +// +// Filter +// + +whereFieldEquality +// tag::whereFieldEquality +SELECT last_name l FROM "test_emp" WHERE emp_no = 10000 LIMIT 5; +// end::whereFieldEquality +whereFieldNonEquality +// tag::whereFieldNonEquality +SELECT last_name l FROM "test_emp" WHERE emp_no <> 10000 ORDER BY emp_no LIMIT 5; +// end::whereFieldNonEquality +whereFieldNonEqualityJavaSyntax +SELECT last_name l FROM "test_emp" WHERE emp_no != 10000 ORDER BY emp_no LIMIT 5; +whereFieldLessThan +// tag::whereFieldLessThan +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 ORDER BY emp_no LIMIT 5; +// end::whereFieldLessThan +whereFieldAndComparison +// tag::whereFieldAndComparison +SELECT last_name l FROM "test_emp" WHERE emp_no > 10000 AND emp_no < 10005 ORDER BY emp_no LIMIT 5; +// end::whereFieldAndComparison +whereFieldOrComparison +// tag::whereFieldOrComparison +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 OR emp_no = 10005 ORDER BY emp_no LIMIT 5; +// end::whereFieldOrComparison + + +whereFieldEqualityNot +// tag::whereFieldEqualityNot +SELECT last_name l FROM "test_emp" WHERE NOT emp_no = 10000 LIMIT 5; +// end::whereFieldEqualityNot +whereFieldNonEqualityNot +SELECT last_name l FROM "test_emp" WHERE NOT emp_no <> 10000 ORDER BY emp_no LIMIT 5; +whereFieldNonEqualityJavaSyntaxNot +SELECT last_name l FROM "test_emp" WHERE NOT emp_no != 10000 ORDER BY emp_no LIMIT 5; +whereFieldLessThanNot +SELECT last_name l FROM "test_emp" WHERE NOT emp_no < 10003 ORDER BY emp_no LIMIT 5; +whereFieldAndComparisonNot +SELECT last_name l FROM "test_emp" WHERE NOT (emp_no > 10000 AND emp_no < 10005) ORDER BY emp_no LIMIT 5; +whereFieldOrComparisonNot +SELECT last_name l FROM "test_emp" WHERE NOT (emp_no < 10003 OR emp_no = 10005) ORDER BY emp_no LIMIT 5; + +whereFieldWithOrder +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 ORDER BY emp_no; +whereFieldWithExactMatchOnString +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND gender = 'M'; +whereFieldWithNotEqualsOnString +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND gender <> 'M'; +whereFieldWithLikeMatch +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND last_name LIKE 'K%'; + +whereFieldWithOrderNot +SELECT last_name l FROM "test_emp" WHERE NOT emp_no < 10003 ORDER BY emp_no LIMIT 5; +whereFieldWithExactMatchOnStringNot +SELECT last_name l FROM "test_emp" WHERE NOT (emp_no < 10003 AND gender = 'M') ORDER BY emp_no LIMIT 5; +whereFieldWithNotEqualsOnStringNot +SELECT last_name l FROM "test_emp" WHERE NOT (emp_no < 10003 AND gender <> 'M') ORDER BY emp_no LIMIT 5; +whereFieldWithLikeMatchNot +SELECT last_name l FROM "test_emp" WHERE NOT (emp_no < 10003 AND last_name NOT LIKE 'K%') ORDER BY emp_no LIMIT 5; + +whereFieldOnMatchWithAndAndOr +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND (gender = 'M' AND NOT FALSE OR last_name LIKE 'K%') ORDER BY emp_no; + +// TODO: (NOT) RLIKE in particular and more NOT queries in general + +whereIsNotNullAndComparison +SELECT last_name l FROM "test_emp" WHERE emp_no IS NOT NULL AND emp_no < 10005 ORDER BY emp_no; +whereIsNull +SELECT last_name l FROM "test_emp" WHERE emp_no IS NULL; +whereIsNotNullAndIsNull +// tag::whereIsNotNullAndIsNull +SELECT last_name l FROM "test_emp" WHERE emp_no IS NOT NULL AND gender IS NULL; +// end::whereIsNotNullAndIsNull +whereBetween +// tag::whereBetween +SELECT last_name l FROM "test_emp" WHERE emp_no BETWEEN 9990 AND 10003 ORDER BY emp_no; +// end::whereBetween +whereNotBetween +SELECT last_name l FROM "test_emp" WHERE emp_no NOT BETWEEN 10010 AND 10020 ORDER BY emp_no LIMIT 5; diff --git a/x-pack/qa/sql/src/main/resources/fulltext.csv-spec b/x-pack/qa/sql/src/main/resources/fulltext.csv-spec new file mode 100644 index 0000000000000..5c032917ff153 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/fulltext.csv-spec @@ -0,0 +1,66 @@ +// +// Full-text +// + +simpleQueryAllFields +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE QUERY('Baek fox') LIMIT 3; + + emp_no:i | first_name:s | gender:s | last_name:s +10080 |Premal |M |Baek +; + +simpleQueryDedicatedField +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE QUERY('Man*', 'default_field=last_name') LIMIT 5; + + emp_no:i | first_name:s | gender:s | last_name:s +10096 |Jayson |M |Mandell +; + +simpleQueryOptions +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE QUERY('Man*', 'default_field=last_name;lenient=true;fuzzy_rewrite=scoring_boolean') LIMIT 5; + + emp_no:i | first_name:s | gender:s | last_name:s +10096 |Jayson |M |Mandell +; + +matchQuery +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez'); + + emp_no:i | first_name:s | gender:s | last_name:s +10076 |Erez |F |Ritzmann +; + +matchQueryWithOptions +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez', 'lenient=true;cutoff_frequency=2;fuzzy_rewrite=scoring_boolean;minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); + + emp_no:i | first_name:s | gender:s | last_name:s +10076 |Erez |F |Ritzmann +; + +multiMatchQuery +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'type=best_fields;operator=OR'); + + emp_no:i | first_name:s | gender:s | last_name:s +10095 |Hilari |M |Morton +; + +multiMatchQueryAllOptions +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'slop=1;lenient=true;cutoff_frequency=2;tie_breaker=0.1;use_dis_max=true;fuzzy_rewrite=scoring_boolean;minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;type=best_fields;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); + + emp_no:i | first_name:s | gender:s | last_name:s +10095 |Hilari |M |Morton +; + +score +SELECT emp_no, first_name, SCORE() FROM test_emp WHERE MATCH(first_name, 'Erez') ORDER BY SCORE(); + + emp_no:i | first_name:s | SCORE():f +10076 |Erez |4.2096553 +; + +scoreAsSomething +SELECT emp_no, first_name, SCORE() as s FROM test_emp WHERE MATCH(first_name, 'Erez') ORDER BY SCORE(); + + emp_no:i | first_name:s | s:f +10076 |Erez |4.2096553 +; diff --git a/x-pack/qa/sql/src/main/resources/math.sql-spec b/x-pack/qa/sql/src/main/resources/math.sql-spec new file mode 100644 index 0000000000000..e38de2aa6bcbf --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/math.sql-spec @@ -0,0 +1,134 @@ +// +// Math +// + +mathAbs +// tag::abs +SELECT ABS(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::abs +mathACos +// tag::acos +SELECT ACOS(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::acos +mathASin +// tag::asin +SELECT ASIN(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::asin +mathATan +// tag::atan +SELECT ATAN(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::atan +//mathCbrt +//SELECT CBRT(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathCeil +// H2 returns CEIL as a double despite the value being an integer; we return a long as the other DBs +SELECT CAST(CEIL(emp_no) AS INT) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathCos +// tag::cos +SELECT COS(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::cos +mathCosh +// tag::cosh +SELECT COSH(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::cosh +mathCot +// tag::cot +SELECT COT(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::cot +mathDegrees +// tag::degrees +SELECT DEGREES(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::degrees +mathExp +// tag::exp +SELECT EXP(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::exp +mathExpm1 +// tag::expm1 +SELECT EXP(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::expm1 +mathFloor +SELECT CAST(FLOOR(emp_no) AS INT) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathLog +// tag::log +SELECT LOG(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::log +mathLog10 +// tag::log10 +SELECT LOG10(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::log10 +mathRadians +// tag::radians +SELECT RADIANS(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::radians +mathRound +SELECT CAST(ROUND(emp_no) AS INT) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathSign +// tag::sign +SELECT SIGN(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::sign +mathSin +// tag::sin +SELECT SIN(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::sin +mathSinH +// tag::sinh +SELECT SINH(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::sinh +mathSqrt +// tag::sqrt +SELECT SQRT(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::sqrt +mathTan +// tag::tan +SELECT TAN(emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::tan + +// +// Combined methods +// + +mathAbsOfSin +SELECT ABS(SIN(emp_no)) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathAbsOfCeilOfSin +SELECT EXP(ABS(CEIL(SIN(DEGREES(emp_no))))) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathAbsOfCeilOfSinWithFilter +SELECT EXP(ABS(CEIL(SIN(DEGREES(emp_no))))) m, first_name FROM "test_emp" WHERE EXP(ABS(CEIL(SIN(DEGREES(emp_no))))) < 10 ORDER BY emp_no; + +// +// Filter by Scalar +// +mathAbsFilterAndOrder +SELECT emp_no, ABS(emp_no) m, first_name FROM "test_emp" WHERE ABS(emp_no) < 10010 ORDER BY ABS(emp_no); +mathACosFilterAndOrder +SELECT emp_no, ACOS(emp_no) m, first_name FROM "test_emp" WHERE ACOS(emp_no) < 10010 ORDER BY ACOS(emp_no); +mathASinFilterAndOrder +SELECT emp_no, ASIN(emp_no) m, first_name FROM "test_emp" WHERE ASIN(emp_no) < 10010 ORDER BY ASIN(emp_no); +//mathATanFilterAndOrder +//SELECT emp_no, ATAN(emp_no) m, first_name FROM "test_emp" WHERE ATAN(emp_no) < 10010 ORDER BY ATAN(emp_no); +mathCeilFilterAndOrder +SELECT emp_no, CAST(CEIL(emp_no) AS INT) m, first_name FROM "test_emp" WHERE CEIL(emp_no) < 10010 ORDER BY CEIL(emp_no); +//mathCosFilterAndOrder +//SELECT emp_no, COS(emp_no) m, first_name FROM "test_emp" WHERE COS(emp_no) < 10010 ORDER BY COS(emp_no); +//mathCoshFilterAndOrder +//SELECT emp_no, COSH(emp_no) m, first_name FROM "test_emp" WHERE COSH(emp_no) < 10010 ORDER BY COSH(emp_no); + +// +// constants +// +mathConstantPI +SELECT ABS(emp_no) m, PI() as pi, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +mathConstant +SELECT 5 + 2 * 3 / 2 % 2 AS c, PI() as e, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; + +// +// binary functions +// +mathATan2 +// tag::atan2 +SELECT ATAN2(emp_no, emp_no) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::atan2 +mathPower +// tag::power +SELECT POWER(emp_no, 2) m, first_name FROM "test_emp" WHERE emp_no < 10010 ORDER BY emp_no; +// end::power diff --git a/x-pack/qa/sql/src/main/resources/nested.csv-spec b/x-pack/qa/sql/src/main/resources/nested.csv-spec new file mode 100644 index 0000000000000..7f7b97896ed35 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/nested.csv-spec @@ -0,0 +1,112 @@ +// +// Nested documents +// +// CsvJdbc has issues with foo.bar so msot fields are aliases or wrapped inside a function + +describeParent +DESCRIBE test_emp; + +column | type + +birth_date | TIMESTAMP +dep | STRUCT +dep.dep_id | VARCHAR +dep.dep_name | VARCHAR +dep.dep_name.keyword | VARCHAR +dep.from_date | TIMESTAMP +dep.to_date | TIMESTAMP +emp_no | INTEGER +first_name | VARCHAR +first_name.keyword | VARCHAR +gender | VARCHAR +hire_date | TIMESTAMP +languages | TINYINT +last_name | VARCHAR +last_name.keyword | VARCHAR +salary | INTEGER +; + +// disable until we figure out how to use field names with . in their name +//nestedStar +//SELECT dep.* FROM test_emp ORDER BY dep.dep_id LIMIT 5; + +//dep.dep_id:s | dep.dep_name:s | dep.from_date:ts | dep.to_date:ts + +//d001 | Marketing | 744336000000 | 253370764800000 +//d001 | Marketing | 704332800000 | 806371200000 +//d001 | Marketing | 577929600000 | 253370764800000 +//d002 | Finance | 732672000000 | 965865600000 +//d007 | Sales | 720921600000 | 253370764800000 +//; + +filterPerNestedWithOrderByTopLevel +SELECT first_name f, last_name l, YEAR(dep.from_date) d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY f LIMIT 5; + +f:s | l:s | d:i + +Alain | Chappelet | 1988 +Chirstian | Koblick | 1986 +Duangkaew | Piveteau | 1996 +Elvis | Demeyer | 1994 +Gino | Leonhardt | 1989 +; + +filterPerNestedWithOrderByNested +SELECT first_name f, last_name l, YEAR(dep.from_date) d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY dep.from_date LIMIT 5; + +f:s | l:s | d:i + +Sreekrishna | Servieres | 1985 +Zhongwei | Rosen | 1986 +Chirstian | Koblick | 1986 +Vishv | Zockler | 1987 +Alain | Chappelet | 1988 +; + +filterPerNestedWithOrderByNestedWithAlias +SELECT first_name f, dep.dep_id i, MONTH(dep.from_date) d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY i LIMIT 5; + +f:s | i:s | d:i + +Parto | d004 | 12 +Chirstian | d004 | 12 +Duangkaew | d004 | 11 +Kazuhide | d004 | 7 +Mayuko | d004 | 12 +; + +filterPerNestedWithOrderByNestedWithoutProjection +SELECT first_name f, MONTH(dep.from_date) d FROM test_emp WHERE dep.dep_name = 'Production' ORDER BY dep.dep_id LIMIT 5; + +f:s | d:i + +Parto | 12 +Chirstian | 12 +Duangkaew | 11 +Kazuhide | 7 +Mayuko | 12 +; + +selectWithScalarOnNested +SELECT first_name f, last_name l, YEAR(dep.from_date) start FROM test_emp WHERE dep.dep_name = 'Production' AND languages > 1 ORDER BY start LIMIT 5; + +f:s | l:s | start:i + +Sreekrishna | Servieres | 1985 +Zhongwei | Rosen | 1986 +Chirstian | Koblick | 1986 +Alain | Chappelet | 1988 +Zvonko | Nyanchama | 1989 +; + +selectWithScalarOnNestedWithoutProjection +SELECT first_name f, last_name l FROM test_emp WHERE dep.dep_name = 'Production' AND languages > 1 ORDER BY YEAR(dep.from_date) LIMIT 5; + +f:s | l:s + +Sreekrishna | Servieres +Zhongwei | Rosen +Chirstian | Koblick +Alain | Chappelet +Zvonko | Nyanchama +; diff --git a/x-pack/qa/sql/src/main/resources/nulls.csv-spec b/x-pack/qa/sql/src/main/resources/nulls.csv-spec new file mode 100644 index 0000000000000..1cb9a1ed7f319 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/nulls.csv-spec @@ -0,0 +1,25 @@ +// +// Null expressions +// + +nullDate +SELECT YEAR(CAST(NULL AS DATE)) d; + +d:i +null +; + +nullAdd +SELECT CAST(NULL AS INT) + CAST(NULL AS FLOAT) AS n; + +n:d +null +; + + +nullDiv +SELECT 5 / CAST(NULL AS FLOAT) + 10 AS n; + +n:d +null +; diff --git a/x-pack/qa/sql/src/main/resources/plugin-security.policy b/x-pack/qa/sql/src/main/resources/plugin-security.policy new file mode 100644 index 0000000000000..bb58eb4270ddf --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/plugin-security.policy @@ -0,0 +1,4 @@ +grant { + // Policy is required for tests to connect to testing Elasticsearch instances. + permission java.net.SocketPermission "*", "connect,resolve"; +}; diff --git a/x-pack/qa/sql/src/main/resources/select.sql-spec b/x-pack/qa/sql/src/main/resources/select.sql-spec new file mode 100644 index 0000000000000..76562a07c86f7 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/select.sql-spec @@ -0,0 +1,58 @@ +// +// Basic SELECT +// + +wildcardWithOrder +// tag::wildcardWithOrder +SELECT * FROM test_emp ORDER BY emp_no; +// end::wildcardWithOrder +column +SELECT last_name FROM "test_emp" ORDER BY emp_no; +columnWithAlias +SELECT last_name AS l FROM "test_emp" ORDER BY emp_no; +columnWithAliasNoAs +SELECT last_name l FROM "test_emp" ORDER BY emp_no; +multipleColumnsNoAlias +SELECT first_name, last_name FROM "test_emp" ORDER BY emp_no; +multipleColumnWithAliasWithAndWithoutAs +SELECT first_name f, last_name AS l FROM "test_emp" ORDER BY emp_no; + +// +// SELECT with LIMIT +// + +wildcardWithLimit +SELECT * FROM "test_emp" ORDER BY emp_no LIMIT 5; +wildcardWithOrderWithLimit +SELECT * FROM "test_emp" ORDER BY emp_no LIMIT 5; +columnWithLimit +SELECT last_name FROM "test_emp" ORDER BY emp_no LIMIT 5; +columnWithAliasWithLimit +SELECT last_name AS l FROM "test_emp" ORDER BY emp_no LIMIT 5; +columnWithAliasNoAsWithLimit +SELECT last_name l FROM "test_emp" ORDER BY emp_no LIMIT 5; +multipleColumnsNoAliasWithLimit +SELECT first_name, last_name FROM "test_emp" ORDER BY emp_no LIMIT 5; +multipleColumnWithAliasWithAndWithoutAsWithLimit +SELECT first_name f, last_name AS l FROM "test_emp" ORDER BY emp_no LIMIT 5; + + +// +// SELECT with CAST +// +//castWithLiteralToInt +//SELECT CAST(1 AS INT); +castOnColumnNumberToVarchar +SELECT CAST(emp_no AS VARCHAR) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; +castOnColumnNumberToLong +SELECT CAST(emp_no AS BIGINT) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; +castOnColumnNumberToSmallint +SELECT CAST(emp_no AS SMALLINT) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; +castOnColumnNumberWithAliasToInt +SELECT CAST(emp_no AS INT) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; +castOnColumnNumberToReal +SELECT CAST(emp_no AS REAL) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; +castOnColumnNumberToDouble +SELECT CAST(emp_no AS DOUBLE) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; +castOnColumnNumberToBoolean +SELECT CAST(emp_no AS BOOL) AS emp_no_cast FROM "test_emp" ORDER BY emp_no LIMIT 5; diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_columns.sql b/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_columns.sql new file mode 100644 index 0000000000000..3d8cf4708945e --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_columns.sql @@ -0,0 +1,50 @@ +CREATE TABLE mock ( + TABLE_SCHEM VARCHAR, + TABLE_NAME VARCHAR, + COLUMN_NAME VARCHAR, + DATA_TYPE INTEGER, + TYPE_NAME VARCHAR, + COLUMN_SIZE INTEGER, + BUFFER_LENGTH INTEGER, + DECIMAL_DIGITS INTEGER, + NUM_PREC_RADIX INTEGER, + NULLABLE INTEGER, + REMARKS VARCHAR, + COLUMN_DEF VARCHAR, + SQL_DATA_TYPE INTEGER, + SQL_DATETIME_SUB INTEGER, + CHAR_OCTET_LENGTH INTEGER, + ORDINAL_POSITION INTEGER, + IS_NULLABLE VARCHAR, + SCOPE_CATALOG VARCHAR, + SCOPE_SCHEMA VARCHAR, + SCOPE_TABLE VARCHAR, + SOURCE_DATA_TYPE SMALLINT, + IS_AUTOINCREMENT VARCHAR, + IS_GENERATEDCOLUMN VARCHAR +) AS +SELECT null, 'test1', 'name', 12, 'TEXT', 0, 2147483647, null, null, + 1, -- columnNullable + null, null, 12, null, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' +FROM DUAL +UNION ALL +SELECT null, 'test1', 'name.keyword', 12, 'KEYWORD', 0, 2147483647, null, null, + 1, -- columnNullable + null, null, 12, null, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' +FROM DUAL +UNION ALL +SELECT null, 'test2', 'date', 93, 'DATE', 20, 8, null, null, + 1, -- columnNullable + null, null, 93, null, null, 1, 'YES', null, null, null, null, 'NO', 'NO' +FROM DUAL +UNION ALL +SELECT null, 'test2', 'float', 7, 'FLOAT', 15, 4, null, 2, + 1, -- columnNullable + null, null, 7, null, null, 2, 'YES', null, null, null, null, 'NO', 'NO' +FROM DUAL +UNION ALL +SELECT null, 'test2', 'number', -5, 'LONG', 20, 8, null, 10, + 1, -- columnNullable + null, null, -5, null, null, 3, 'YES', null, null, null, null, 'NO', 'NO' +FROM DUAL +; diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_procedure_columns.sql b/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_procedure_columns.sql new file mode 100644 index 0000000000000..bf2b741c1cdf7 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_procedure_columns.sql @@ -0,0 +1,22 @@ +CREATE TABLE mock ( + PROCEDURE_CAT VARCHAR, + PROCEDURE_SCHEM VARCHAR, + PROCEDURE_NAME VARCHAR, + COLUMN_NAME VARCHAR, + COLUMN_TYPE SMALLINT, + DATA_TYPE INTEGER, + TYPE_NAME VARCHAR, + PRECISION INTEGER, + LENGTH INTEGER, + SCALE SMALLINT, + RADIX SMALLINT, + NULLABLE SMALLINT, + REMARKS VARCHAR, + COLUMN_DEF VARCHAR, + SQL_DATA_TYPE INTEGER, + SQL_DATETIME_SUB INTEGER, + CHAR_OCTET_LENGTH INTEGER, + ORDINAL_POSITION INTEGER, + IS_NULLABLE VARCHAR, + SPECIFIC_NAME VARCHAR +); diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_procedures.sql b/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_procedures.sql new file mode 100644 index 0000000000000..72bc81259e4aa --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_procedures.sql @@ -0,0 +1,11 @@ +CREATE TABLE mock ( + PROCEDURE_CAT VARCHAR, + PROCEDURE_SCHEM VARCHAR, + PROCEDURE_NAME VARCHAR, + NUM_INPUT_PARAMS INTEGER, + NUM_OUTPUT_PARAMS INTEGER, + NUM_RESULT_SETS INTEGER, + REMARKS VARCHAR, + PROCEDURE_TYPE SMALLINT, + SPECIFIC_NAME VARCHAR +); diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_table_types.sql b/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_table_types.sql new file mode 100644 index 0000000000000..db40c6b90865f --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_table_types.sql @@ -0,0 +1,15 @@ +CREATE TABLE mock ( + TABLE_SCHEM VARCHAR, + TABLE_NAME VARCHAR, + TABLE_TYPE VARCHAR, + REMARKS VARCHAR, + TYPE_CAT VARCHAR, + TYPE_SCHEM VARCHAR, + TYPE_NAME VARCHAR, + SELF_REFERENCING_COL_NAME VARCHAR, + REF_GENERATION VARCHAR +) AS +SELECT '', 'test1', 'BASE TABLE', '', null, null, null, null, null FROM DUAL +UNION ALL +SELECT '', 'test2', 'BASE TABLE', '', null, null, null, null, null FROM DUAL +; diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_tables.sql b/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_tables.sql new file mode 100644 index 0000000000000..db40c6b90865f --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_tables.sql @@ -0,0 +1,15 @@ +CREATE TABLE mock ( + TABLE_SCHEM VARCHAR, + TABLE_NAME VARCHAR, + TABLE_TYPE VARCHAR, + REMARKS VARCHAR, + TYPE_CAT VARCHAR, + TYPE_SCHEM VARCHAR, + TYPE_NAME VARCHAR, + SELF_REFERENCING_COL_NAME VARCHAR, + REF_GENERATION VARCHAR +) AS +SELECT '', 'test1', 'BASE TABLE', '', null, null, null, null, null FROM DUAL +UNION ALL +SELECT '', 'test2', 'BASE TABLE', '', null, null, null, null, null FROM DUAL +; diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_show_tables.sql b/x-pack/qa/sql/src/main/resources/setup_mock_show_tables.sql new file mode 100644 index 0000000000000..b65be73066e41 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/setup_mock_show_tables.sql @@ -0,0 +1,4 @@ +CREATE TABLE mock ( + "name" VARCHAR, + "type" VARCHAR +); diff --git a/x-pack/qa/sql/src/main/resources/setup_test_emp.sql b/x-pack/qa/sql/src/main/resources/setup_test_emp.sql new file mode 100644 index 0000000000000..3b79b3037f11c --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/setup_test_emp.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS "test_emp"; +CREATE TABLE "test_emp" ( + "birth_date" TIMESTAMP WITH TIME ZONE, + "emp_no" INT, + "first_name" VARCHAR(50), + "gender" VARCHAR(1), + "hire_date" TIMESTAMP WITH TIME ZONE, + "languages" TINYINT, + "last_name" VARCHAR(50), + "salary" INT + ) + AS SELECT * FROM CSVREAD('classpath:/employees.csv'); \ No newline at end of file diff --git a/x-pack/qa/third-party/active-directory/build.gradle b/x-pack/qa/third-party/active-directory/build.gradle new file mode 100644 index 0000000000000..c9fa55652108d --- /dev/null +++ b/x-pack/qa/third-party/active-directory/build.gradle @@ -0,0 +1,47 @@ +Project smbFixtureProject = xpackProject("test:smb-fixture") +evaluationDependsOn(smbFixtureProject.path) + +apply plugin: 'elasticsearch.vagrantsupport' +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(xpackModule('security')) + testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') +} + +// add test resources from security, so tests can use example certs +sourceSets.test.resources.srcDirs(project(xpackModule('security')).sourceSets.test.resources.srcDirs) +compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" + +// we have to repeat these patterns because the security test resources are effectively in the src of this project +forbiddenPatterns { + exclude '**/*.key' + exclude '**/*.p12' + exclude '**/*.der' +} + +test { + /* + * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each + * other if we allow them to set the number of available processors as it's set-once in Netty. + */ + systemProperty 'es.set.netty.runtime.available.processors', 'false' + include '**/*IT.class' + include '**/*Tests.class' +} + +// these are just tests, no need to audit +thirdPartyAudit.enabled = false + +task smbFixture { + dependsOn "vagrantCheckVersion", "virtualboxCheckVersion", smbFixtureProject.up +} + +if (project.rootProject.vagrantSupported) { + if (project.hasProperty('useExternalAD') == false) { + test.dependsOn smbFixture + test.finalizedBy smbFixtureProject.halt + } +} else { + test.enabled = false +} diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java new file mode 100644 index 0000000000000..98594917129f2 --- /dev/null +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; +import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Path; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.security.authc.ldap.LdapUserSearchSessionFactoryTests.getLdapUserSearchSessionFactory; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; + +public class ADLdapUserSearchSessionFactoryTests extends AbstractActiveDirectoryTestCase { + + private SSLService sslService; + private Settings globalSettings; + private ThreadPool threadPool; + + @Before + public void init() throws Exception { + Path keystore = getDataPath("support/ADtrust.jks"); + Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + /* + * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. + * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * verification tests since a re-established connection does not perform hostname verification. + */ + + globalSettings = Settings.builder() + .put("path.home", createTempDir()) + .put("xpack.ssl.truststore.path", keystore) + .setSecureSettings(newSecureSettings("xpack.ssl.truststore.secure_password", "changeit")) + .build(); + sslService = new SSLService(globalSettings, env); + threadPool = new TestThreadPool("ADLdapUserSearchSessionFactoryTests"); + } + + @After + public void shutdown() throws InterruptedException { + terminate(threadPool); + } + + private MockSecureSettings newSecureSettings(String key, String value) { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(key, value); + return secureSettings; + } + + public void testUserSearchWithActiveDirectory() throws Exception { + String groupSearchBase = "DC=ad,DC=test,DC=elasticsearch,DC=com"; + String userSearchBase = "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; + Settings settings = Settings.builder() + .put(LdapTestCase.buildLdapSettings( + new String[] { ActiveDirectorySessionFactoryTests.AD_LDAP_URL }, + Strings.EMPTY_ARRAY, groupSearchBase, LdapSearchScope.SUB_TREE, null, + true)) + .put("user_search.base_dn", userSearchBase) + .put("bind_dn", "ironman@ad.test.elasticsearch.com") + .put("bind_password", ActiveDirectorySessionFactoryTests.PASSWORD) + .put("user_search.filter", "(cn={0})") + .put("user_search.pool.enabled", randomBoolean()) + .put("follow_referrals", ActiveDirectorySessionFactoryTests.FOLLOW_REFERRALS) + .build(); + Settings.Builder builder = Settings.builder() + .put(globalSettings); + settings.keySet().forEach(k -> { + builder.copy("xpack.security.authc.realms.ldap." + k, k, settings); + + }); + Settings fullSettings = builder.build(); + sslService = new SSLService(fullSettings, TestEnvironment.newEnvironment(fullSettings)); + RealmConfig config = new RealmConfig("ad-as-ldap-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + LdapUserSearchSessionFactory sessionFactory = getLdapUserSearchSessionFactory(config, sslService, threadPool); + + String user = "Bruce Banner"; + try { + //auth + try (LdapSession ldap = session(sessionFactory, user, new SecureString(ActiveDirectorySessionFactoryTests.PASSWORD))) { + assertConnectionCanReconnect(ldap.getConnection()); + List groups = groups(ldap); + + assertThat(groups, containsInAnyOrder( + containsString("Avengers"), + containsString("SHIELD"), + containsString("Geniuses"), + containsString("Philanthropists"))); + } + + //lookup + try (LdapSession ldap = unauthenticatedSession(sessionFactory, user)) { + assertConnectionCanReconnect(ldap.getConnection()); + List groups = groups(ldap); + + assertThat(groups, containsInAnyOrder( + containsString("Avengers"), + containsString("SHIELD"), + containsString("Geniuses"), + containsString("Philanthropists"))); + } + } finally { + sessionFactory.close(); + } + } + + @Override + protected boolean enableWarningsCheck() { + return false; + } + + private LdapSession session(SessionFactory factory, String username, SecureString password) { + PlainActionFuture future = new PlainActionFuture<>(); + factory.session(username, password, future); + return future.actionGet(); + } + + private List groups(LdapSession ldapSession) { + Objects.requireNonNull(ldapSession); + PlainActionFuture> future = new PlainActionFuture<>(); + ldapSession.groups(future); + return future.actionGet(); + } + + private LdapSession unauthenticatedSession(SessionFactory factory, String username) { + PlainActionFuture future = new PlainActionFuture<>(); + factory.unauthenticatedSession(username, future); + return future.actionGet(); + } +} diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java new file mode 100644 index 0000000000000..7ef1bd674a32b --- /dev/null +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.LDAPConnection; +import com.unboundid.ldap.sdk.LDAPConnectionPool; +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.LDAPInterface; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.VerificationMode; +import org.junit.Before; + +import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; + +public abstract class AbstractActiveDirectoryTestCase extends ESTestCase { + + // follow referrals defaults to false here which differs from the default value of the setting + // this is needed to prevent test logs being filled by errors as the default configuration of + // the tests run against a vagrant samba4 instance configured as a domain controller with the + // ports mapped into the ephemeral port range and there is the possibility of incorrect results + // as we cannot control the URL of the referral which may contain a non-resolvable DNS name as + // this name would be served by the samba4 instance + public static final Boolean FOLLOW_REFERRALS = Booleans.parseBoolean(getFromEnv("TESTS_AD_FOLLOW_REFERRALS", "false")); + public static final String AD_LDAP_URL = getFromEnv("TESTS_AD_LDAP_URL", "ldaps://localhost:61636"); + public static final String AD_LDAP_GC_URL = getFromEnv("TESTS_AD_LDAP_GC_URL", "ldaps://localhost:63269"); + public static final String PASSWORD = getFromEnv("TESTS_AD_USER_PASSWORD", "Passw0rd"); + public static final String AD_LDAP_PORT = getFromEnv("TESTS_AD_LDAP_PORT", "61389"); + public static final String AD_LDAPS_PORT = getFromEnv("TESTS_AD_LDAPS_PORT", "61636"); + public static final String AD_GC_LDAP_PORT = getFromEnv("TESTS_AD_GC_LDAP_PORT", "63268"); + public static final String AD_GC_LDAPS_PORT = getFromEnv("TESTS_AD_GC_LDAPS_PORT", "63269"); + public static final String AD_DOMAIN = "ad.test.elasticsearch.com"; + + protected SSLService sslService; + protected Settings globalSettings; + protected boolean useGlobalSSL; + + @Before + public void initializeSslSocketFactory() throws Exception { + useGlobalSSL = randomBoolean(); + Path truststore = getDataPath("../ldap/support/ADtrust.jks"); + /* + * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. + * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * verification tests since a re-established connection does not perform hostname verification. + */ + Settings.Builder builder = Settings.builder().put("path.home", createTempDir()); + if (useGlobalSSL) { + builder.put("xpack.ssl.truststore.path", truststore) + .put("xpack.ssl.truststore.password", "changeit"); + + // fake realm to load config with certificate verification mode + builder.put("xpack.security.authc.realms.bar.ssl.truststore.path", truststore); + builder.put("xpack.security.authc.realms.bar.ssl.truststore.password", "changeit"); + builder.put("xpack.security.authc.realms.bar.ssl.verification_mode", VerificationMode.CERTIFICATE); + } else { + // fake realms so ssl will get loaded + builder.put("xpack.security.authc.realms.foo.ssl.truststore.path", truststore); + builder.put("xpack.security.authc.realms.foo.ssl.truststore.password", "changeit"); + builder.put("xpack.security.authc.realms.foo.ssl.verification_mode", VerificationMode.FULL); + builder.put("xpack.security.authc.realms.bar.ssl.truststore.path", truststore); + builder.put("xpack.security.authc.realms.bar.ssl.truststore.password", "changeit"); + builder.put("xpack.security.authc.realms.bar.ssl.verification_mode", VerificationMode.CERTIFICATE); + } + globalSettings = builder.build(); + Environment environment = TestEnvironment.newEnvironment(globalSettings); + sslService = new SSLService(globalSettings, environment); + } + + Settings buildAdSettings(String ldapUrl, String adDomainName, String userSearchDN, LdapSearchScope scope, + boolean hostnameVerification) { + Settings.Builder builder = Settings.builder() + .putList(SessionFactorySettings.URLS_SETTING, ldapUrl) + .put(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING, adDomainName) + .put(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_BASEDN_SETTING, userSearchDN) + .put(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_SCOPE_SETTING, scope) + .put(ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING.getKey(), AD_LDAP_PORT) + .put(ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING.getKey(), AD_LDAPS_PORT) + .put(ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING.getKey(), AD_GC_LDAP_PORT) + .put(ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING.getKey(), AD_GC_LDAPS_PORT) + .put("follow_referrals", FOLLOW_REFERRALS); + if (randomBoolean()) { + builder.put("ssl.verification_mode", hostnameVerification ? VerificationMode.FULL : VerificationMode.CERTIFICATE); + } else { + builder.put(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING, hostnameVerification); + } + if (useGlobalSSL == false) { + builder.put("ssl.truststore.path", getDataPath("../ldap/support/ADtrust.jks")) + .put("ssl.truststore.password", "changeit"); + } + return builder.build(); + } + + protected static void assertConnectionCanReconnect(LDAPInterface conn) { + AccessController.doPrivileged(new PrivilegedAction() { + @Override + public Void run() { + try { + if (conn instanceof LDAPConnection) { + ((LDAPConnection) conn).reconnect(); + } else if (conn instanceof LDAPConnectionPool) { + try (LDAPConnection c = ((LDAPConnectionPool) conn).getConnection()) { + c.reconnect(); + } + } + } catch (LDAPException e) { + fail("Connection is not valid. It will not work on follow referral flow." + + System.lineSeparator() + ExceptionsHelper.stackTrace(e)); + } + return null; + } + }); + } + + private static String getFromEnv(String envVar, String defaultValue) { + final String value = System.getenv(envVar); + return value == null ? defaultValue : value; + } +} diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java new file mode 100644 index 0000000000000..1d73d1f0d2979 --- /dev/null +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java @@ -0,0 +1,461 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.xpack.core.security.SecurityLifecycleServiceField; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; +import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope.ONE_LEVEL; +import static org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope.SUB_TREE; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; +import static org.elasticsearch.xpack.security.authc.ldap.AbstractActiveDirectoryTestCase.AD_GC_LDAPS_PORT; +import static org.elasticsearch.xpack.security.authc.ldap.AbstractActiveDirectoryTestCase.AD_GC_LDAP_PORT; +import static org.elasticsearch.xpack.security.authc.ldap.AbstractActiveDirectoryTestCase.AD_LDAPS_PORT; +import static org.elasticsearch.xpack.security.authc.ldap.AbstractActiveDirectoryTestCase.AD_LDAP_PORT; +import static org.elasticsearch.xpack.security.test.SecurityTestUtils.writeFile; +import static org.hamcrest.Matchers.equalTo; + +/** + * This test assumes all subclass tests will be of type SUITE. It picks a random realm configuration for the tests, and + * writes a group to role mapping file for each node. + */ +public abstract class AbstractAdLdapRealmTestCase extends SecurityIntegTestCase { + + public static final String XPACK_SECURITY_AUTHC_REALMS_EXTERNAL = "xpack.security.authc.realms.external"; + public static final String PASSWORD = AbstractActiveDirectoryTestCase.PASSWORD; + public static final String ASGARDIAN_INDEX = "gods"; + public static final String PHILANTHROPISTS_INDEX = "philanthropists"; + public static final String SECURITY_INDEX = "security"; + + private static final RoleMappingEntry[] AD_ROLE_MAPPING = new RoleMappingEntry[] { + new RoleMappingEntry( + "SHIELD: [ \"CN=SHIELD,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\" ]", + "{ \"roles\":[\"SHIELD\"], \"enabled\":true, \"rules\":" + + "{\"field\": {\"groups\": \"CN=SHIELD,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\"} } }" + ), + new RoleMappingEntry( + "Avengers: [ \"CN=Avengers,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\" ]", + "{ \"roles\":[\"Avengers\"], \"enabled\":true, \"rules\":" + + "{ \"field\": { \"groups\" : \"CN=Avengers,CN=Users,*\" } } }" + ), + new RoleMappingEntry( + "Gods: [ \"CN=Gods,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\" ]", + "{ \"roles\":[\"Gods\"], \"enabled\":true, \"rules\":{\"any\": [" + + " { \"field\":{ \"groups\": \"CN=Gods,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\" } }," + + " { \"field\":{ \"groups\": \"CN=Deities,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\" } } " + + "] } }" + ), + new RoleMappingEntry( + "Philanthropists: [ \"CN=Philanthropists,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\" ]", + "{ \"roles\":[\"Philanthropists\"], \"enabled\":true, \"rules\": { \"all\": [" + + " { \"field\": { \"groups\" : \"CN=Philanthropists,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\" } }," + + " { \"field\": { \"realm.name\" : \"external\" } } " + + "] } }" + ) + }; + + protected static final String TESTNODE_KEYSTORE = "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks"; + protected static RealmConfig realmConfig; + protected static List roleMappings; + protected static boolean useGlobalSSL; + + @BeforeClass + public static void setupRealm() { + realmConfig = randomFrom(RealmConfig.values()); + roleMappings = realmConfig.selectRoleMappings(ESTestCase::randomBoolean); + useGlobalSSL = randomBoolean(); + ESLoggerFactory.getLogger("test").info("running test with realm configuration [{}], with direct group to role mapping [{}]. " + + "Settings [{}]", realmConfig, realmConfig.mapGroupsAsRoles, realmConfig.settings); + } + + @AfterClass + public static void cleanupRealm() { + realmConfig = null; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + final RealmConfig realm = AbstractAdLdapRealmTestCase.realmConfig; + Path store = getDataPath(TESTNODE_KEYSTORE); + Settings.Builder builder = Settings.builder(); + // don't use filter since it returns a prefixed secure setting instead of mock! + Settings settingsToAdd = super.nodeSettings(nodeOrdinal); + builder.put(settingsToAdd.filter(k -> k.startsWith("xpack.ssl.") == false), false); + MockSecureSettings mockSecureSettings = (MockSecureSettings) Settings.builder().put(settingsToAdd).getSecureSettings(); + if (mockSecureSettings != null) { + MockSecureSettings filteredSecureSettings = new MockSecureSettings(); + builder.setSecureSettings(filteredSecureSettings); + for (String secureSetting : mockSecureSettings.getSettingNames()) { + if (secureSetting.startsWith("xpack.ssl.") == false) { + SecureString secureString = mockSecureSettings.getString(secureSetting); + if (secureString == null) { + final byte[] fileBytes; + try (InputStream in = mockSecureSettings.getFile(secureSetting); + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { + int numRead; + byte[] bytes = new byte[1024]; + while ((numRead = in.read(bytes)) != -1) { + byteArrayOutputStream.write(bytes, 0, numRead); + } + byteArrayOutputStream.flush(); + fileBytes = byteArrayOutputStream.toByteArray(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + filteredSecureSettings.setFile(secureSetting, fileBytes); + } else { + filteredSecureSettings.setString(secureSetting, new String(secureString.getChars())); + } + } + } + } + addSslSettingsForStore(builder, store, "testnode"); + builder.put(buildRealmSettings(realm, roleMappings, store)); + return builder.build(); + } + + protected Settings buildRealmSettings(RealmConfig realm, List roleMappingEntries, Path store) { + Settings.Builder builder = Settings.builder(); + builder.put(realm.buildSettings(store, "testnode")); + configureFileRoleMappings(builder, roleMappingEntries); + return builder.build(); + } + + @Before + public void setupRoleMappings() throws Exception { + assertSecurityIndexActive(); + + List content = getRoleMappingContent(RoleMappingEntry::getNativeContent); + if (content.isEmpty()) { + return; + } + SecurityClient securityClient = securityClient(); + Map> futures = new LinkedHashMap<>(content.size()); + for (int i = 0; i < content.size(); i++) { + final String name = "external_" + i; + final PutRoleMappingRequestBuilder builder = securityClient.preparePutRoleMapping( + name, new BytesArray(content.get(i)), XContentType.JSON); + futures.put(name, builder.execute()); + } + for (String mappingName : futures.keySet()) { + final PutRoleMappingResponse response = futures.get(mappingName).get(); + logger.info("Created native role-mapping {} : {}", mappingName, response.isCreated()); + } + } + + @After + public void cleanupSecurityIndex() throws Exception { + super.deleteSecurityIndex(); + } + + @Override + public Set excludeTemplates() { + Set templates = Sets.newHashSet(super.excludeTemplates()); + templates.add(SecurityLifecycleServiceField.SECURITY_TEMPLATE_NAME); // don't remove the security index template + return templates; + } + + private List getRoleMappingContent(Function contentFunction) { + return getRoleMappingContent(contentFunction, AbstractAdLdapRealmTestCase.roleMappings); + } + + private List getRoleMappingContent(Function contentFunction, List mappings) { + return mappings.stream() + .map(contentFunction) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + } + + @Override + protected Settings transportClientSettings() { + if (useGlobalSSL) { + Path store = getDataPath(TESTNODE_KEYSTORE); + Settings.Builder builder = Settings.builder() + .put(super.transportClientSettings().filter((s) -> s.startsWith("xpack.ssl.") == false)); + addSslSettingsForStore(builder, store, "testnode"); + return builder.build(); + } else { + return super.transportClientSettings(); + } + } + + @Override + protected boolean transportSSLEnabled() { + return useGlobalSSL; + } + + protected final void configureFileRoleMappings(Settings.Builder builder, List mappings) { + String content = getRoleMappingContent(RoleMappingEntry::getFileContent, mappings).stream().collect(Collectors.joining("\n")); + Path nodeFiles = createTempDir(); + String file = writeFile(nodeFiles, "role_mapping.yml", content); + builder.put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".files.role_mapping", file); + } + + @Override + protected String configRoles() { + return super.configRoles() + + "\n" + + "Avengers:\n" + + " cluster: [ NONE ]\n" + + " indices:\n" + + " - names: 'avengers'\n" + + " privileges: [ all ]\n" + + "SHIELD:\n" + + " cluster: [ NONE ]\n" + + " indices:\n" + + " - names: '" + SECURITY_INDEX + "'\n" + + " privileges: [ all ]\n" + + "Gods:\n" + + " cluster: [ NONE ]\n" + + " indices:\n" + + " - names: '" + ASGARDIAN_INDEX + "'\n" + + " privileges: [ all ]\n" + + "Philanthropists:\n" + + " cluster: [ NONE ]\n" + + " indices:\n" + + " - names: '" + PHILANTHROPISTS_INDEX + "'\n" + + " privileges: [ all ]\n"; + } + + protected void assertAccessAllowed(String user, String index) throws IOException { + Client client = client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, userHeader(user, PASSWORD))); + IndexResponse indexResponse = client.prepareIndex(index, "type"). + setSource(jsonBuilder() + .startObject() + .field("name", "value") + .endObject()) + .execute().actionGet(); + + assertEquals("user " + user + " should have write access to index " + index, + DocWriteResponse.Result.CREATED, indexResponse.getResult()); + + refresh(); + + GetResponse getResponse = client.prepareGet(index, "type", indexResponse.getId()) + .get(); + + assertThat("user " + user + " should have read access to index " + index, getResponse.getId(), equalTo(indexResponse.getId())); + } + + protected void assertAccessDenied(String user, String index) throws IOException { + try { + client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, userHeader(user, PASSWORD))) + .prepareIndex(index, "type"). + setSource(jsonBuilder() + .startObject() + .field("name", "value") + .endObject()) + .execute().actionGet(); + fail("Write access to index " + index + " should not be allowed for user " + user); + } catch (ElasticsearchSecurityException e) { + // expected + } + refresh(); + } + + protected static String userHeader(String username, String password) { + return UsernamePasswordToken.basicAuthHeaderValue(username, new SecureString(password.toCharArray())); + } + + private void addSslSettingsForStore(Settings.Builder builder, Path store, String password) { + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> { + secureSettings.setString("xpack.ssl.keystore.secure_password", password); + secureSettings.setString("xpack.ssl.truststore.secure_password", password); + }); + builder.put("xpack.ssl.keystore.path", store) + .put("xpack.ssl.verification_mode", "certificate") + .put("xpack.ssl.truststore.path", store); + } + + static class RoleMappingEntry { + @Nullable + public final String fileContent; + @Nullable + public final String nativeContent; + + RoleMappingEntry(@Nullable String fileContent, @Nullable String nativeContent) { + this.fileContent = fileContent; + this.nativeContent = nativeContent; + } + + String getFileContent() { + return fileContent; + } + + String getNativeContent() { + return nativeContent; + } + + RoleMappingEntry pickEntry(Supplier shouldPickFileContent) { + if (nativeContent == null) { + return new RoleMappingEntry(fileContent, null); + } + if (fileContent == null) { + return new RoleMappingEntry(null, nativeContent); + } + if (shouldPickFileContent.get()) { + return new RoleMappingEntry(fileContent, null); + } else { + return new RoleMappingEntry(null, nativeContent); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + final RoleMappingEntry that = (RoleMappingEntry) o; + return Objects.equals(this.fileContent, that.fileContent) + && Objects.equals(this.nativeContent, that.nativeContent); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(fileContent); + result = 31 * result + Objects.hashCode(nativeContent); + return result; + } + } + + /** + * Represents multiple possible configurations for active directory and ldap + */ + enum RealmConfig { + + AD(false, AD_ROLE_MAPPING, + Settings.builder() + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".type", LdapRealmSettings.AD_TYPE) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".domain_name", ActiveDirectorySessionFactoryTests.AD_DOMAIN) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + + ".group_search.base_dn", "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com") + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".group_search.scope", randomBoolean() ? SUB_TREE : ONE_LEVEL) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".url", ActiveDirectorySessionFactoryTests.AD_LDAP_URL) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".follow_referrals", + ActiveDirectorySessionFactoryTests.FOLLOW_REFERRALS) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + "." + + ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING.getKey(), AD_LDAP_PORT) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + "." + + ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING.getKey(), AD_LDAPS_PORT) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + "." + + ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING.getKey(), AD_GC_LDAP_PORT) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + "." + + ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING.getKey(), AD_GC_LDAPS_PORT) + .build()), + + AD_LDAP_GROUPS_FROM_SEARCH(true, AD_ROLE_MAPPING, + Settings.builder() + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".type", LdapRealmSettings.LDAP_TYPE) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".url", ActiveDirectorySessionFactoryTests.AD_LDAP_URL) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + + ".group_search.base_dn", "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com") + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".group_search.scope", randomBoolean() ? SUB_TREE : ONE_LEVEL) + .putList(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".user_dn_templates", + "cn={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com") + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".follow_referrals", + ActiveDirectorySessionFactoryTests.FOLLOW_REFERRALS) + .build()), + + AD_LDAP_GROUPS_FROM_ATTRIBUTE(true, AD_ROLE_MAPPING, + Settings.builder() + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".type", LdapRealmSettings.LDAP_TYPE) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".url", ActiveDirectorySessionFactoryTests.AD_LDAP_URL) + .putList(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".user_dn_templates", + "cn={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com") + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".follow_referrals", + ActiveDirectorySessionFactoryTests.FOLLOW_REFERRALS) + .build()); + + final boolean mapGroupsAsRoles; + final boolean loginWithCommonName; + private final RoleMappingEntry[] roleMappings; + final Settings settings; + + RealmConfig(boolean loginWithCommonName, RoleMappingEntry[] roleMappings, Settings settings) { + this.settings = settings; + this.loginWithCommonName = loginWithCommonName; + this.roleMappings = roleMappings; + this.mapGroupsAsRoles = randomBoolean(); + } + + public Settings buildSettings(Path store, String password) { + return buildSettings(store, password, 1); + } + + protected Settings buildSettings(Path store, String password, int order) { + Settings.Builder builder = Settings.builder() + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".order", order) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".hostname_verification", false) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".unmapped_groups_as_roles", mapGroupsAsRoles) + .put(this.settings); + if (useGlobalSSL == false) { + builder.put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".ssl.truststore.path", store) + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".ssl.truststore.password", password); + } + + return builder.build(); + } + + public List selectRoleMappings(Supplier shouldPickFileContent) { + // if mapGroupsAsRoles is turned on we use empty role mapping + if (mapGroupsAsRoles) { + return Collections.emptyList(); + } else { + return Arrays.stream(this.roleMappings) + .map(e -> e.pickEntry(shouldPickFileContent)) + .collect(Collectors.toList()); + } + } + } +} diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java new file mode 100644 index 0000000000000..fb7ea6c5dd754 --- /dev/null +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.Filter; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.security.support.NoOpLogger; +import org.junit.Before; + +import java.util.List; +import java.util.regex.Pattern; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.is; + +public class ActiveDirectoryGroupsResolverTests extends GroupsResolverTestCase { + + private static final String BRUCE_BANNER_DN = + "cn=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; + + @Before + public void setReferralFollowing() { + ldapConnection.getConnectionOptions().setFollowReferrals(AbstractActiveDirectoryTestCase.FOLLOW_REFERRALS); + } + + @SuppressWarnings("unchecked") + public void testResolveSubTree() throws Exception { + Settings settings = Settings.builder() + .put("group_search.scope", LdapSearchScope.SUB_TREE) + .put("group_search.base_dn", "DC=ad,DC=test,DC=elasticsearch,DC=com") + .put("domain_name", "ad.test.elasticsearch.com") + .build(); + ActiveDirectoryGroupsResolver resolver = new ActiveDirectoryGroupsResolver(settings); + List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, + TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); + assertThat(groups, containsInAnyOrder( + containsString("Avengers"), + containsString("SHIELD"), + containsString("Geniuses"), + containsString("Philanthropists"), + containsString("CN=Users,CN=Builtin"), + containsString("Domain Users"), + containsString("Supers"))); + } + + public void testResolveOneLevel() throws Exception { + Settings settings = Settings.builder() + .put("scope", LdapSearchScope.ONE_LEVEL) + .put("group_search.base_dn", "CN=Builtin, DC=ad, DC=test, DC=elasticsearch,DC=com") + .put("domain_name", "ad.test.elasticsearch.com") + .build(); + ActiveDirectoryGroupsResolver resolver = new ActiveDirectoryGroupsResolver(settings); + List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, + TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); + assertThat(groups, hasItem(containsString("Users"))); + } + + public void testResolveBaseLevel() throws Exception { + Settings settings = Settings.builder() + .put("group_search.scope", LdapSearchScope.BASE) + .put("group_search.base_dn", "CN=Users, CN=Builtin, DC=ad, DC=test, DC=elasticsearch, DC=com") + .put("domain_name", "ad.test.elasticsearch.com") + .build(); + ActiveDirectoryGroupsResolver resolver = new ActiveDirectoryGroupsResolver(settings); + List groups = resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, + TimeValue.timeValueSeconds(10), NoOpLogger.INSTANCE, null); + assertThat(groups, hasItem(containsString("CN=Users,CN=Builtin"))); + } + + public void testBuildGroupQuery() throws Exception { + //test a user with no assigned groups, other than the default groups + { + String[] expectedSids = new String[]{ + "S-1-5-32-545", //Default Users group + }; + final String dn = "CN=Jarvis, CN=Users, DC=ad, DC=test, DC=elasticsearch, DC=com"; + PlainActionFuture future = new PlainActionFuture<>(); + ActiveDirectoryGroupsResolver.buildGroupQuery(ldapConnection, dn, + TimeValue.timeValueSeconds(10), false, future); + Filter query = future.actionGet(); + assertValidSidQuery(query, expectedSids); + } + + //test a user of one groups + { + String[] expectedSids = new String[]{ + "S-1-5-32-545" //Default Users group + }; + final String dn = "CN=Odin, CN=Users, DC=ad, DC=test, DC=elasticsearch, DC=com"; + PlainActionFuture future = new PlainActionFuture<>(); + ActiveDirectoryGroupsResolver.buildGroupQuery(ldapConnection, dn, + TimeValue.timeValueSeconds(10), false, future); + Filter query = future.actionGet(); + assertValidSidQuery(query, expectedSids); + } + } + + private void assertValidSidQuery(Filter query, String[] expectedSids) { + String queryString = query.toString(); + Pattern sidQueryPattern = Pattern.compile("\\(\\|(\\(objectSid=S(-\\d+)+\\))+\\)"); + assertThat("[" + queryString + "] didn't match the search filter pattern", + sidQueryPattern.matcher(queryString).matches(), is(true)); + for (String sid: expectedSids) { + assertThat(queryString, containsString(sid)); + } + } + + @Override + protected String ldapUrl() { + return ActiveDirectorySessionFactoryTests.AD_LDAP_URL; + } + + @Override + protected String bindDN() { + return BRUCE_BANNER_DN; + } + + @Override + protected String bindPassword() { + return ActiveDirectorySessionFactoryTests.PASSWORD; + } + + @Override + protected String trustPath() { + return "/org/elasticsearch/xpack/security/authc/ldap/support/ADtrust.jks"; + } +} \ No newline at end of file diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRunAsIT.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRunAsIT.java new file mode 100644 index 0000000000000..2aabe2a464b94 --- /dev/null +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRunAsIT.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; +import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; +import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.user.ElasticUser; +import org.hamcrest.Matchers; +import org.junit.BeforeClass; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; + +/** + * This tests that "run-as" works on LDAP/AD realms + */ +public class ActiveDirectoryRunAsIT extends AbstractAdLdapRealmTestCase { + + private static boolean useLegacyBindPassword; + + @BeforeClass + public static void selectRealmConfig() { + realmConfig = RealmConfig.AD; + useLegacyBindPassword = randomBoolean(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + useLegacyBindPassword = randomBoolean(); + final Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + switch (realmConfig) { + case AD: + builder.put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".bind_dn", "ironman@ad.test.elasticsearch.com") + .put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".user_search.pool.enabled", false); + if (useLegacyBindPassword) { + builder.put(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".bind_password", ActiveDirectorySessionFactoryTests.PASSWORD); + } else { + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> { + secureSettings.setString(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + ".secure_bind_password", + ActiveDirectorySessionFactoryTests.PASSWORD); + }); + } + break; + default: + throw new IllegalStateException("Unknown realm config " + realmConfig); + } + return builder.build(); + } + + public void testRunAs() throws Exception { + String avenger = realmConfig.loginWithCommonName ? "Natasha Romanoff" : "blackwidow"; + final AuthenticateRequest request = new AuthenticateRequest(avenger); + final ActionFuture future = runAsClient(avenger).execute(AuthenticateAction.INSTANCE, request); + final AuthenticateResponse response = future.get(30, TimeUnit.SECONDS); + assertThat(response.user().principal(), Matchers.equalTo(avenger)); + } + + protected Client runAsClient(String user) { + final Map headers = MapBuilder.newMapBuilder() + .put(BASIC_AUTH_HEADER, UsernamePasswordToken.basicAuthHeaderValue(ElasticUser.NAME, BOOTSTRAP_PASSWORD)) + .put(AuthenticationServiceField.RUN_AS_USER_HEADER, user) + .map(); + return client().filterWithHeader(headers); + } + +} diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java new file mode 100644 index 0000000000000..a319578072def --- /dev/null +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java @@ -0,0 +1,458 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.LDAPException; +import com.unboundid.ldap.sdk.ResultCode; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.ssl.VerificationMode; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapSession; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapTestCase; +import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; +import org.junit.After; +import org.junit.Before; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class ActiveDirectorySessionFactoryTests extends AbstractActiveDirectoryTestCase { + + private final SecureString SECURED_PASSWORD = new SecureString(PASSWORD); + private ThreadPool threadPool; + + @Before + public void init() throws Exception { + threadPool = new TestThreadPool("ActiveDirectorySessionFactoryTests thread pool"); + } + + @After + public void shutdown() throws InterruptedException { + terminate(threadPool); + } + + @Override + public boolean enableWarningsCheck() { + return false; + } + + @SuppressWarnings("unchecked") + public void testAdAuth() throws Exception { + RealmConfig config = new RealmConfig("ad-test", + buildAdSettings(AD_LDAP_URL, AD_DOMAIN, false), + globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); + try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { + + String userName = "ironman"; + try (LdapSession ldap = session(sessionFactory, userName, SECURED_PASSWORD)) { + assertConnectionCanReconnect(ldap.getConnection()); + List groups = groups(ldap); + assertThat(groups, containsInAnyOrder( + containsString("Geniuses"), + containsString("Billionaire"), + containsString("Playboy"), + containsString("Philanthropists"), + containsString("Avengers"), + containsString("SHIELD"), + containsString("CN=Users,CN=Builtin"), + containsString("Domain Users"), + containsString("Supers"))); + } + } + } + + @SuppressWarnings("unchecked") + public void testNetbiosAuth() throws Exception { + final String adUrl = randomFrom(AD_LDAP_URL, AD_LDAP_GC_URL); + RealmConfig config = new RealmConfig("ad-test", buildAdSettings(adUrl, AD_DOMAIN, false), globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { + + String userName = "ades\\ironman"; + try (LdapSession ldap = session(sessionFactory, userName, SECURED_PASSWORD)) { + assertConnectionCanReconnect(ldap.getConnection()); + List groups = groups(ldap); + assertThat(groups, containsInAnyOrder( + containsString("Geniuses"), + containsString("Billionaire"), + containsString("Playboy"), + containsString("Philanthropists"), + containsString("Avengers"), + containsString("SHIELD"), + containsString("CN=Users,CN=Builtin"), + containsString("Domain Users"), + containsString("Supers"))); + } + } + } + + public void testAdAuthAvengers() throws Exception { + RealmConfig config = new RealmConfig("ad-test", buildAdSettings(AD_LDAP_URL, AD_DOMAIN, false), globalSettings, + TestEnvironment.newEnvironment(globalSettings), new ThreadContext(globalSettings)); + try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { + + String[] users = new String[]{"cap", "hawkeye", "hulk", "ironman", "thor", "blackwidow"}; + for (String user : users) { + try (LdapSession ldap = session(sessionFactory, user, SECURED_PASSWORD)) { + assertConnectionCanReconnect(ldap.getConnection()); + assertThat("group avenger test for user " + user, groups(ldap), hasItem(containsString("Avengers"))); + } + } + } + } + + @SuppressWarnings("unchecked") + public void testAuthenticate() throws Exception { + Settings settings = buildAdSettings(AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", + LdapSearchScope.ONE_LEVEL, false); + RealmConfig config = new RealmConfig("ad-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { + + String userName = "hulk"; + try (LdapSession ldap = session(sessionFactory, userName, SECURED_PASSWORD)) { + assertConnectionCanReconnect(ldap.getConnection()); + List groups = groups(ldap); + + assertThat(groups, containsInAnyOrder( + containsString("Avengers"), + containsString("SHIELD"), + containsString("Geniuses"), + containsString("Philanthropists"), + containsString("CN=Users,CN=Builtin"), + containsString("Domain Users"), + containsString("Supers"))); + } + } + } + + @SuppressWarnings("unchecked") + public void testAuthenticateBaseUserSearch() throws Exception { + Settings settings = buildAdSettings(AD_LDAP_URL, AD_DOMAIN, "CN=Bruce Banner, CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", + LdapSearchScope.BASE, false); + RealmConfig config = new RealmConfig("ad-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { + + String userName = "hulk"; + try (LdapSession ldap = session(sessionFactory, userName, SECURED_PASSWORD)) { + assertConnectionCanReconnect(ldap.getConnection()); + List groups = groups(ldap); + + assertThat(groups, containsInAnyOrder( + containsString("Avengers"), + containsString("SHIELD"), + containsString("Geniuses"), + containsString("Philanthropists"), + containsString("CN=Users,CN=Builtin"), + containsString("Domain Users"), + containsString("Supers"))); + } + } + } + + public void testAuthenticateBaseGroupSearch() throws Exception { + Settings settings = Settings.builder() + .put(buildAdSettings(AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", + LdapSearchScope.ONE_LEVEL, false)) + .put(ActiveDirectorySessionFactorySettings.AD_GROUP_SEARCH_BASEDN_SETTING, + "CN=Avengers,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com") + .put(ActiveDirectorySessionFactorySettings.AD_GROUP_SEARCH_SCOPE_SETTING, LdapSearchScope.BASE) + .build(); + RealmConfig config = new RealmConfig("ad-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { + + String userName = "hulk"; + try (LdapSession ldap = session(sessionFactory, userName, SECURED_PASSWORD)) { + assertConnectionCanReconnect(ldap.getConnection()); + List groups = groups(ldap); + + assertThat(groups, hasItem(containsString("Avengers"))); + } + } + } + + @SuppressWarnings("unchecked") + public void testAuthenticateWithUserPrincipalName() throws Exception { + Settings settings = buildAdSettings(AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", + LdapSearchScope.ONE_LEVEL, false); + RealmConfig config = new RealmConfig("ad-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { + + //Login with the UserPrincipalName + String userDN = "CN=Erik Selvig,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; + try (LdapSession ldap = session(sessionFactory, "erik.selvig", SECURED_PASSWORD)) { + assertConnectionCanReconnect(ldap.getConnection()); + List groups = groups(ldap); + assertThat(ldap.userDn(), is(userDN)); + assertThat(groups, containsInAnyOrder( + containsString("Geniuses"), + containsString("CN=Users,CN=Builtin"), + containsString("Domain Users"))); + } + } + } + + @SuppressWarnings("unchecked") + public void testAuthenticateWithSAMAccountName() throws Exception { + Settings settings = buildAdSettings(AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", + LdapSearchScope.ONE_LEVEL, false); + RealmConfig config = new RealmConfig("ad-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { + + //login with sAMAccountName + String userDN = "CN=Erik Selvig,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; + try (LdapSession ldap = session(sessionFactory, "selvig", SECURED_PASSWORD)) { + assertConnectionCanReconnect(ldap.getConnection()); + assertThat(ldap.userDn(), is(userDN)); + + List groups = groups(ldap); + assertThat(groups, containsInAnyOrder( + containsString("Geniuses"), + containsString("CN=Users,CN=Builtin"), + containsString("Domain Users"))); + } + } + } + + @SuppressWarnings("unchecked") + public void testCustomUserFilter() throws Exception { + Settings settings = Settings.builder() + .put(buildAdSettings(AD_LDAP_URL, AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", + LdapSearchScope.SUB_TREE, false)) + .put(ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_FILTER_SETTING, + "(&(objectclass=user)(userPrincipalName={0}@ad.test.elasticsearch.com))") + .build(); + RealmConfig config = new RealmConfig("ad-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { + + //Login with the UserPrincipalName + try (LdapSession ldap = session(sessionFactory, "erik.selvig", SECURED_PASSWORD)) { + assertConnectionCanReconnect(ldap.getConnection()); + List groups = groups(ldap); + assertThat(groups, containsInAnyOrder( + containsString("CN=Geniuses"), + containsString("CN=Domain Users"), + containsString("CN=Users,CN=Builtin"))); + } + } + } + + + @SuppressWarnings("unchecked") + public void testStandardLdapConnection() throws Exception { + String groupSearchBase = "DC=ad,DC=test,DC=elasticsearch,DC=com"; + String userTemplate = "CN={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; + Settings settings = Settings.builder() + .put(LdapTestCase.buildLdapSettings( + new String[] { AD_LDAP_URL }, + new String[] { userTemplate }, + groupSearchBase, + LdapSearchScope.SUB_TREE, + null, + true)) + .put("follow_referrals", FOLLOW_REFERRALS) + .build(); + if (useGlobalSSL == false) { + settings = Settings.builder() + .put(settings) + .put("ssl.truststore.path", getDataPath("../ldap/support/ADtrust.jks")) + .put("ssl.truststore.password", "changeit") + .build(); + } + RealmConfig config = new RealmConfig("ad-as-ldap-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); + + String user = "Bruce Banner"; + try (LdapSession ldap = session(sessionFactory, user, SECURED_PASSWORD)) { + assertConnectionCanReconnect(ldap.getConnection()); + List groups = groups(ldap); + + assertThat(groups, containsInAnyOrder( + containsString("Avengers"), + containsString("SHIELD"), + containsString("Geniuses"), + containsString("Philanthropists"))); + } + } + + @SuppressWarnings("unchecked") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29840") + public void testHandlingLdapReferralErrors() throws Exception { + String groupSearchBase = "DC=ad,DC=test,DC=elasticsearch,DC=com"; + String userTemplate = "CN={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; + final boolean ignoreReferralErrors = false; + Settings settings = LdapTestCase.buildLdapSettings( + new String[] { AD_LDAP_URL }, + new String[] { userTemplate }, + groupSearchBase, + LdapSearchScope.SUB_TREE, + null, + ignoreReferralErrors); + if (useGlobalSSL == false) { + settings = Settings.builder() + .put(settings) + .put("ssl.truststore.path", getDataPath("../ldap/support/ADtrust.jks")) + .put("ssl.truststore.password", "changeit") + .build(); + } + RealmConfig config = new RealmConfig("ad-as-ldap-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); + + String user = "Bruce Banner"; + try (LdapSession ldap = session(sessionFactory, user, SECURED_PASSWORD)) { + final UncategorizedExecutionException exception = expectThrows( + UncategorizedExecutionException.class, + () -> groups(ldap) + ); + final Throwable cause = exception.getCause(); + assertThat(cause, instanceOf(ExecutionException.class)); + assertThat(cause.getCause(), instanceOf(LDAPException.class)); + final LDAPException ldapException = (LDAPException) cause.getCause(); + assertThat(ldapException.getResultCode(), is(ResultCode.INVALID_CREDENTIALS)); + } + } + + @SuppressWarnings("unchecked") + public void testStandardLdapWithAttributeGroups() throws Exception { + String userTemplate = "CN={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; + Settings settings = LdapTestCase.buildLdapSettings(new String[] { AD_LDAP_URL }, userTemplate, false); + if (useGlobalSSL == false) { + settings = Settings.builder() + .put(settings) + .put("ssl.truststore.path", getDataPath("../ldap/support/ADtrust.jks")) + .put("ssl.truststore.password", "changeit") + .build(); + } + RealmConfig config = new RealmConfig("ad-as-ldap-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), + new ThreadContext(globalSettings)); + LdapSessionFactory sessionFactory = new LdapSessionFactory(config, sslService, threadPool); + + String user = "Bruce Banner"; + try (LdapSession ldap = session(sessionFactory, user, SECURED_PASSWORD)) { + assertConnectionCanReconnect(ldap.getConnection()); + List groups = groups(ldap); + + assertThat(groups, containsInAnyOrder( + containsString("Avengers"), + containsString("SHIELD"), + containsString("Geniuses"), + containsString("Philanthropists"))); + } + } + + public void testADLookup() throws Exception { + RealmConfig config = new RealmConfig("ad-test", + buildAdSettings(AD_LDAP_URL, AD_DOMAIN, false, true), + globalSettings, TestEnvironment.newEnvironment(globalSettings), new ThreadContext(Settings.EMPTY)); + try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { + + List users = randomSubsetOf(Arrays.asList("cap", "hawkeye", "hulk", "ironman", "thor", "blackwidow", + "cap@ad.test.elasticsearch.com", "hawkeye@ad.test.elasticsearch.com", "hulk@ad.test.elasticsearch.com", + "ironman@ad.test.elasticsearch.com", "thor@ad.test.elasticsearch.com", "blackwidow@ad.test.elasticsearch.com", + "ADES\\cap", "ADES\\hawkeye", "ADES\\hulk", "ADES\\ironman", "ADES\\thor", "ADES\\blackwidow")); + for (String user : users) { + try (LdapSession ldap = unauthenticatedSession(sessionFactory, user)) { + assertConnectionCanReconnect(ldap.getConnection()); + assertNotNull("ldap session was null for user " + user, ldap); + assertThat("group avenger test for user " + user, groups(ldap), hasItem(containsString("Avengers"))); + } + } + } + } + + private Settings buildAdSettings(String ldapUrl, String adDomainName, boolean hostnameVerification) { + return buildAdSettings(ldapUrl, adDomainName, hostnameVerification, randomBoolean()); + } + + private Settings buildAdSettings(String ldapUrl, String adDomainName, boolean hostnameVerification, boolean useBindUser) { + Settings.Builder builder = Settings.builder() + .put(SessionFactorySettings.URLS_SETTING, ldapUrl) + .put(ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING, adDomainName) + .put(ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING.getKey(), AD_LDAP_PORT) + .put(ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING.getKey(), AD_LDAPS_PORT) + .put(ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING.getKey(), AD_GC_LDAP_PORT) + .put(ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING.getKey(), AD_GC_LDAPS_PORT) + .put("follow_referrals", FOLLOW_REFERRALS); + if (randomBoolean()) { + builder.put("ssl.verification_mode", hostnameVerification ? VerificationMode.FULL : VerificationMode.CERTIFICATE); + } else { + builder.put(SessionFactorySettings.HOSTNAME_VERIFICATION_SETTING, hostnameVerification); + } + + if (useGlobalSSL == false) { + builder.put("ssl.truststore.path", getDataPath("../ldap/support/ADtrust.jks")) + .put("ssl.truststore.password", "changeit"); + } + + if (useBindUser) { + final String user = randomFrom("cap", "hawkeye", "hulk", "ironman", "thor", "blackwidow", "cap@ad.test.elasticsearch.com", + "hawkeye@ad.test.elasticsearch.com", "hulk@ad.test.elasticsearch.com", "ironman@ad.test.elasticsearch.com", + "thor@ad.test.elasticsearch.com", "blackwidow@ad.test.elasticsearch.com", "ADES\\cap", "ADES\\hawkeye", "ADES\\hulk", + "ADES\\ironman", "ADES\\thor", "ADES\\blackwidow", "CN=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"); + final boolean poolingEnabled = randomBoolean(); + builder.put("bind_dn", user) + .put("bind_password", PASSWORD) + .put("user_search.pool.enabled", poolingEnabled); + logger.info("using bind user [{}] with pooling enabled [{}]", user, poolingEnabled); + } + return builder.build(); + } + + private LdapSession session(SessionFactory factory, String username, SecureString password) { + PlainActionFuture future = new PlainActionFuture<>(); + factory.session(username, password, future); + return future.actionGet(); + } + + private LdapSession unauthenticatedSession(SessionFactory factory, String username) { + PlainActionFuture future = new PlainActionFuture<>(); + factory.unauthenticatedSession(username, future); + return future.actionGet(); + } + + private List groups(LdapSession ldapSession) { + PlainActionFuture> future = new PlainActionFuture<>(); + ldapSession.groups(future); + return future.actionGet(); + } + + static ActiveDirectorySessionFactory getActiveDirectorySessionFactory(RealmConfig config, SSLService sslService, ThreadPool threadPool) + throws LDAPException { + ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); + if (sessionFactory.getConnectionPool() != null) { + // don't use this in production + // used here to catch bugs that might get masked by an automatic retry + sessionFactory.getConnectionPool().setRetryFailedOperationsDueToInvalidConnections(false); + } + return sessionFactory; + } +} diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/GroupMappingIT.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/GroupMappingIT.java new file mode 100644 index 0000000000000..a56e1fefcba63 --- /dev/null +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/GroupMappingIT.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import java.io.IOException; + +/** + * This tests the group to role mappings from LDAP sources provided by the super class - available from super.realmConfig. + * The super class will provide appropriate group mappings via configGroupMappings() + */ +public class GroupMappingIT extends AbstractAdLdapRealmTestCase { + public void testAuthcAuthz() throws IOException { + String avenger = realmConfig.loginWithCommonName ? "Natasha Romanoff" : "blackwidow"; + assertAccessAllowed(avenger, "avengers"); + } + + public void testGroupMapping() throws IOException { + String asgardian = "odin"; + String securityPhilanthropist = realmConfig.loginWithCommonName ? "Bruce Banner" : "hulk"; + String securityMappedUser = realmConfig.loginWithCommonName ? "Phil Coulson" : "phil"; + String securityAsgardianPhilanthropist = "thor"; + String noGroupUser = "jarvis"; + + assertAccessAllowed(asgardian, ASGARDIAN_INDEX); + assertAccessAllowed(securityAsgardianPhilanthropist, ASGARDIAN_INDEX); + assertAccessDenied(securityPhilanthropist, ASGARDIAN_INDEX); + assertAccessDenied(securityMappedUser, ASGARDIAN_INDEX); + assertAccessDenied(noGroupUser, ASGARDIAN_INDEX); + + assertAccessAllowed(securityPhilanthropist, PHILANTHROPISTS_INDEX); + assertAccessAllowed(securityAsgardianPhilanthropist, PHILANTHROPISTS_INDEX); + assertAccessDenied(asgardian, PHILANTHROPISTS_INDEX); + assertAccessDenied(securityMappedUser, PHILANTHROPISTS_INDEX); + assertAccessDenied(noGroupUser, PHILANTHROPISTS_INDEX); + + assertAccessAllowed(securityMappedUser, SECURITY_INDEX); + assertAccessAllowed(securityPhilanthropist, SECURITY_INDEX); + assertAccessAllowed(securityAsgardianPhilanthropist, SECURITY_INDEX); + assertAccessDenied(asgardian, SECURITY_INDEX); + assertAccessDenied(noGroupUser, SECURITY_INDEX); + } +} diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/MultiGroupMappingIT.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/MultiGroupMappingIT.java new file mode 100644 index 0000000000000..5e0dda8fe2da5 --- /dev/null +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/MultiGroupMappingIT.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.ArrayList; + +/** + * This tests the mapping of multiple groups to a role in a file based role-mapping + */ +public class MultiGroupMappingIT extends AbstractAdLdapRealmTestCase { + + @BeforeClass + public static void setRoleMappingType() { + final String extraContent = "MarvelCharacters:\n" + + " - \"CN=SHIELD,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\"\n" + + " - \"CN=Avengers,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\"\n" + + " - \"CN=Gods,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\"\n" + + " - \"CN=Philanthropists,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\"\n" + + " - \"cn=SHIELD,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com\"\n" + + " - \"cn=Avengers,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com\"\n" + + " - \"cn=Gods,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com\"\n" + + " - \"cn=Philanthropists,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com\"\n"; + roleMappings = new ArrayList<>(roleMappings); + roleMappings.add(new RoleMappingEntry(extraContent, null)); + } + + @Override + protected String configRoles() { + return super.configRoles() + + "\n" + + "MarvelCharacters:\n" + + " cluster: [ NONE ]\n" + + " indices:\n" + + " - names: 'marvel_comics'\n" + + " privileges: [ all ]\n"; + } + + public void testGroupMapping() throws IOException { + String asgardian = "odin"; + String securityPhilanthropist = realmConfig.loginWithCommonName ? "Bruce Banner" : "hulk"; + String security = realmConfig.loginWithCommonName ? "Phil Coulson" : "phil"; + String securityAsgardianPhilanthropist = "thor"; + String noGroupUser = "jarvis"; + + assertAccessAllowed(asgardian, "marvel_comics"); + assertAccessAllowed(securityAsgardianPhilanthropist, "marvel_comics"); + assertAccessAllowed(securityPhilanthropist, "marvel_comics"); + assertAccessAllowed(security, "marvel_comics"); + assertAccessDenied(noGroupUser, "marvel_comics"); + } +} diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/MultipleAdRealmIT.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/MultipleAdRealmIT.java new file mode 100644 index 0000000000000..c4e07a846fd56 --- /dev/null +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/MultipleAdRealmIT.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +/** + * This tests that configurations that contain two AD realms work correctly. + * The required behaviour is that users from both realms (directory servers) can be authenticated using + * just their userid (the AuthenticationService tries them in order) + */ +public class MultipleAdRealmIT extends AbstractAdLdapRealmTestCase { + + private static RealmConfig secondaryRealmConfig; + + @BeforeClass + public static void setupSecondaryRealm() { + // Pick a secondary realm that has the inverse value for 'loginWithCommonName' compare with the primary realm + final List configs = Arrays.stream(RealmConfig.values()) + .filter(config -> config.loginWithCommonName != AbstractAdLdapRealmTestCase.realmConfig.loginWithCommonName) + .filter(config -> config.name().startsWith("AD")) + .collect(Collectors.toList()); + secondaryRealmConfig = randomFrom(configs); + ESLoggerFactory.getLogger("test") + .info("running test with secondary realm configuration [{}], with direct group to role mapping [{}]. Settings [{}]", + secondaryRealmConfig, secondaryRealmConfig.mapGroupsAsRoles, secondaryRealmConfig.settings); + + // It's easier to test 2 realms when using file based role mapping, and for the purposes of + // this test, there's no need to test native mappings. + AbstractAdLdapRealmTestCase.roleMappings = realmConfig.selectRoleMappings(() -> true); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder(); + builder.put(super.nodeSettings(nodeOrdinal)); + + Path store = getDataPath(TESTNODE_KEYSTORE); + final List secondaryRoleMappings = secondaryRealmConfig.selectRoleMappings(() -> true); + final Settings secondarySettings = super.buildRealmSettings(secondaryRealmConfig, secondaryRoleMappings, store); + secondarySettings.keySet().forEach(name -> { + String newName = name.replace(XPACK_SECURITY_AUTHC_REALMS_EXTERNAL, XPACK_SECURITY_AUTHC_REALMS_EXTERNAL + "2"); + builder.copy(newName, name, secondarySettings); + }); + + return builder.build(); + } + + /** + * Test that both realms support user login. Implementation wise, this means that if the first realm reject the authentication attempt, + * then the second realm will be tried. + * Because one realm is using "common name" (cn) for login, and the other uses the "userid" (sAMAccountName) [see + * {@link #setupSecondaryRealm()}], this is simply a matter of checking that we can authenticate with both identifiers. + */ + public void testCanAuthenticateAgainstBothRealms() throws IOException { + assertAccessAllowed("Natasha Romanoff", "avengers"); + assertAccessAllowed("blackwidow", "avengers"); + } + +} diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java new file mode 100644 index 0000000000000..d6fc22a5cf579 --- /dev/null +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.security.authc.ldap; + +import com.unboundid.ldap.sdk.Attribute; +import com.unboundid.ldap.sdk.SearchRequest; +import com.unboundid.ldap.sdk.SearchScope; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.security.support.NoOpLogger; +import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; + +import java.util.Collection; +import java.util.List; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasItems; + +public class UserAttributeGroupsResolverTests extends GroupsResolverTestCase { + + public static final String BRUCE_BANNER_DN = "cn=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; + + @SuppressWarnings("unchecked") + public void testResolve() throws Exception { + //falling back on the 'memberOf' attribute + UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(Settings.EMPTY); + List groups = + resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(20), NoOpLogger.INSTANCE, null); + assertThat(groups, containsInAnyOrder( + containsString("Avengers"), + containsString("SHIELD"), + containsString("Geniuses"), + containsString("Philanthropists"))); + } + + @SuppressWarnings("unchecked") + public void testResolveFromPreloadedAttributes() throws Exception { + SearchRequest preSearch = new SearchRequest(BRUCE_BANNER_DN, SearchScope.BASE, LdapUtils.OBJECT_CLASS_PRESENCE_FILTER, "memberOf"); + final Collection attributes = ldapConnection.searchForEntry(preSearch).getAttributes(); + + UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(Settings.EMPTY); + List groups = + resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(20), NoOpLogger.INSTANCE, attributes); + assertThat(groups, containsInAnyOrder( + containsString("Avengers"), + containsString("SHIELD"), + containsString("Geniuses"), + containsString("Philanthropists"))); + } + + @SuppressWarnings("unchecked") + public void testResolveCustomGroupAttribute() throws Exception { + Settings settings = Settings.builder() + .put("user_group_attribute", "seeAlso") + .build(); + UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(settings); + List groups = + resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(20), NoOpLogger.INSTANCE, null); + assertThat(groups, hasItems(containsString("Avengers"))); //seeAlso only has Avengers + } + + public void testResolveInvalidGroupAttribute() throws Exception { + Settings settings = Settings.builder() + .put("user_group_attribute", "doesntExist") + .build(); + UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(settings); + List groups = + resolveBlocking(resolver, ldapConnection, BRUCE_BANNER_DN, TimeValue.timeValueSeconds(20), NoOpLogger.INSTANCE, null); + assertThat(groups, empty()); + } + + @Override + protected String ldapUrl() { + return ActiveDirectorySessionFactoryTests.AD_LDAP_URL; + } + + @Override + protected String bindDN() { + return BRUCE_BANNER_DN; + } + + @Override + protected String bindPassword() { + return ActiveDirectorySessionFactoryTests.PASSWORD; + } + + @Override + protected String trustPath() { + return "/org/elasticsearch/xpack/security/authc/ldap/support/ADtrust.jks"; + } +} diff --git a/x-pack/qa/third-party/build.gradle b/x-pack/qa/third-party/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/qa/third-party/hipchat/build.gradle b/x-pack/qa/third-party/hipchat/build.gradle new file mode 100644 index 0000000000000..cd37d6e738e64 --- /dev/null +++ b/x-pack/qa/third-party/hipchat/build.gradle @@ -0,0 +1,32 @@ +import org.elasticsearch.gradle.LoggedExec + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('watcher'), configuration: 'runtime') +} + +String integrationAccount = System.getenv('hipchat_auth_token_integration') +String userAccount = System.getenv('hipchat_auth_token_user') +String v1Account = System.getenv('hipchat_auth_token_v1') + +integTestCluster { + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' + setting 'xpack.notification.hipchat.account.integration_account.profile', 'integration' + setting 'xpack.notification.hipchat.account.integration_account.room', 'test-watcher' + setting 'xpack.notification.hipchat.account.user_account.profile', 'user' + setting 'xpack.notification.hipchat.account.v1_account.profile', 'v1' + keystoreSetting 'xpack.notification.hipchat.account.integration_account.secure_auth_token', integrationAccount + keystoreSetting 'xpack.notification.hipchat.account.user_account.secure_auth_token', userAccount + keystoreSetting 'xpack.notification.hipchat.account.v1_account.secure_auth_token', v1Account +} + +if (!integrationAccount && !userAccount && !v1Account) { + integTest.enabled = false +} diff --git a/x-pack/qa/third-party/hipchat/src/test/java/org/elasticsearch/smoketest/WatcherHipchatYamlTestSuiteIT.java b/x-pack/qa/third-party/hipchat/src/test/java/org/elasticsearch/smoketest/WatcherHipchatYamlTestSuiteIT.java new file mode 100644 index 0000000000000..785b9d3a89249 --- /dev/null +++ b/x-pack/qa/third-party/hipchat/src/test/java/org/elasticsearch/smoketest/WatcherHipchatYamlTestSuiteIT.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.is; + +/** Runs rest tests against external cluster */ +public class WatcherHipchatYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public WatcherHipchatYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Before + public void startWatcher() throws Exception { + final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES); + assertBusy(() -> { + try { + getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + + for (String template : watcherTemplates) { + ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", + singletonMap("name", template), emptyList(), emptyMap()); + assertThat(templateExistsResponse.getStatusCode(), is(200)); + } + + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + assertThat(state, is("started")); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + @After + public void stopWatcher() throws Exception { + assertBusy(() -> { + try { + getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + assertThat(state, is("stopped")); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } +} diff --git a/x-pack/qa/third-party/hipchat/src/test/resources/rest-api-spec/test/hipchat/10_hipchat.yml b/x-pack/qa/third-party/hipchat/src/test/resources/rest-api-spec/test/hipchat/10_hipchat.yml new file mode 100644 index 0000000000000..9277ddae6c6e9 --- /dev/null +++ b/x-pack/qa/third-party/hipchat/src/test/resources/rest-api-spec/test/hipchat/10_hipchat.yml @@ -0,0 +1,276 @@ +--- +"Test Hipchat v1 account Action": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "hipchat_v1_watch" + body: > + { + "trigger": { + "schedule": { + "interval": "1d" + } + }, + "input": { + "simple": { + "foo": "something from input" + } + }, + "actions": { + "my_hipchat_action": { + "hipchat": { + "account": "v1_account", + "message": { + "from" : "watcher-tests", + "room" : ["test-watcher", "test-watcher-2", "test watcher with spaces"], + "body": "From input {{ctx.payload.foo}}, and some tests (facepalm) in the v1 account", + "format": "text", + "color": "red", + "notify": true + } + } + } + } + } + + - do: + xpack.watcher.execute_watch: + id: "hipchat_v1_watch" + body: > + { + "record_execution": true + } + + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.result.actions.0.id: "my_hipchat_action" } + - match: { watch_record.result.actions.0.type: "hipchat" } + - match: { watch_record.result.actions.0.status: "success" } + - match: { watch_record.result.actions.0.hipchat.account: "v1_account" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.0.room: "test-watcher" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.1.room: "test-watcher-2" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.2.room: "test watcher with spaces" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.0.status: "success" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.1.status: "success" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.2.status: "success" } + + # Waits for the watcher history index to be available + - do: + cluster.health: + index: ".watcher-history-*" + wait_for_no_relocating_shards: true + timeout: 60s + + - do: + indices.refresh: {} + + - do: + search: + index: ".watcher-history-*" + body: > + { + "query" : { + "term" : { + "watch_id" : "hipchat_v1_watch" + } + } + } + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.state: "executed" } + - match: { hits.hits.0._source.result.actions.0.id: "my_hipchat_action" } + - match: { hits.hits.0._source.result.actions.0.type: "hipchat" } + - match: { hits.hits.0._source.result.actions.0.status: "success" } + - match: { hits.hits.0._source.result.actions.0.hipchat.account: "v1_account" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.0.room: "test-watcher" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.1.room: "test-watcher-2" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.2.room: "test watcher with spaces" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.0.status: "success" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.1.status: "success" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.2.status: "success" } + +--- +"Test Hipchat integration account Action": + - do: + cluster.health: + wait_for_status: yellow + + # custom rooms, custom users and custom from are not allowed for this account type to be configured + - do: + xpack.watcher.put_watch: + id: "hipchat_integration_account_watch" + body: > + { + "trigger": { + "schedule": { + "interval": "1d" + } + }, + "input": { + "simple": { + "foo": "something from input" + } + }, + "actions": { + "my_hipchat_action": { + "hipchat": { + "account": "integration_account", + "message": { + "body": "From input {{ctx.payload.foo}}, and some tests (facepalm) in the integration account", + "format": "text", + "color": "red", + "notify": true + } + } + } + } + } + + - do: + xpack.watcher.execute_watch: + id: "hipchat_integration_account_watch" + body: > + { + "record_execution": true + } + + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.result.actions.0.id: "my_hipchat_action" } + - match: { watch_record.result.actions.0.type: "hipchat" } + - match: { watch_record.result.actions.0.status: "success" } + - match: { watch_record.result.actions.0.hipchat.account: "integration_account" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.0.status: "success" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.0.room: "test-watcher" } + + # Waits for the watcher history index to be available + - do: + cluster.health: + index: ".watcher-history-*" + wait_for_no_relocating_shards: true + timeout: 60s + + - do: + indices.refresh: {} + + - do: + search: + index: ".watcher-history-*" + body: > + { + "query" : { + "term" : { + "watch_id" : "hipchat_integration_account_watch" + } + } + } + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.state: "executed" } + - match: { hits.hits.0._source.result.actions.0.id: "my_hipchat_action" } + - match: { hits.hits.0._source.result.actions.0.type: "hipchat" } + - match: { hits.hits.0._source.result.actions.0.status: "success" } + - match: { hits.hits.0._source.result.actions.0.hipchat.account: "integration_account" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.0.room: "test-watcher" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.0.status: "success" } + +--- +"Test Hipchat user account Action": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "hipchat_user_account_watch" + body: > + { + "trigger": { + "schedule": { + "interval": "1d" + } + }, + "input": { + "simple": { + "foo": "something from input" + } + }, + "actions": { + "my_hipchat_action": { + "hipchat": { + "account": "user_account", + "message": { + "user" : [ "watcher@elastic.co" ], + "room" : ["test-watcher", "test-watcher-2", "test watcher with spaces"], + "body": "From input {{ctx.payload.foo}}, and some tests (facepalm) in the user_account test. bold", + "format": "html" + } + } + } + } + } + + - do: + xpack.watcher.execute_watch: + id: "hipchat_user_account_watch" + body: > + { + "record_execution": true + } + + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.result.actions.0.id: "my_hipchat_action" } + - match: { watch_record.result.actions.0.type: "hipchat" } + - match: { watch_record.result.actions.0.status: "success" } + - match: { watch_record.result.actions.0.hipchat.account: "user_account" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.0.room: "test-watcher" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.1.room: "test-watcher-2" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.2.room: "test watcher with spaces" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.3.user: "watcher@elastic.co" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.0.status: "success" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.1.status: "success" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.2.status: "success" } + - match: { watch_record.result.actions.0.hipchat.sent_messages.3.status: "success" } + + # Waits for the watcher history index to be available + - do: + cluster.health: + index: ".watcher-history-*" + wait_for_no_relocating_shards: true + timeout: 60s + + - do: + indices.refresh: {} + + - do: + search: + index: ".watcher-history-*" + body: > + { + "query" : { + "term" : { + "watch_id" : "hipchat_user_account_watch" + } + } + } + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.state: "executed" } + - match: { hits.hits.0._source.result.actions.0.id: "my_hipchat_action" } + - match: { hits.hits.0._source.result.actions.0.type: "hipchat" } + - match: { hits.hits.0._source.result.actions.0.status: "success" } + - match: { hits.hits.0._source.result.actions.0.hipchat.account: "user_account" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.0.room: "test-watcher" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.1.room: "test-watcher-2" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.2.room: "test watcher with spaces" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.3.user: "watcher@elastic.co" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.0.status: "success" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.1.status: "success" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.2.status: "success" } + - match: { hits.hits.0._source.result.actions.0.hipchat.sent_messages.1.status: "success" } + + diff --git a/x-pack/qa/third-party/jira/build.gradle b/x-pack/qa/third-party/jira/build.gradle new file mode 100644 index 0000000000000..078fed4dd36e2 --- /dev/null +++ b/x-pack/qa/third-party/jira/build.gradle @@ -0,0 +1,88 @@ +import groovy.json.JsonSlurper + +import javax.net.ssl.HttpsURLConnection +import java.nio.charset.StandardCharsets + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('watcher'), configuration: 'runtime') +} + +ext { + jiraUrl = System.getenv('jira_url') + jiraUser = System.getenv('jira_user') + jiraPassword = System.getenv('jira_password') + jiraProject = System.getenv('jira_project') +} + +integTestCluster { + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' + keystoreSetting 'xpack.notification.jira.account.test.secure_url', jiraUrl + keystoreSetting 'xpack.notification.jira.account.test.secure_user', jiraUser + keystoreSetting 'xpack.notification.jira.account.test.secure_password', jiraPassword + setting 'xpack.notification.jira.account.test.issue_defaults.project.key', jiraProject + setting 'xpack.notification.jira.account.test.issue_defaults.issuetype.name', 'Bug' + setting 'xpack.notification.jira.account.test.issue_defaults.labels.0', 'integration-tests' +} + +task cleanJira(type: DefaultTask) { + doLast { + List issues = jiraIssues(jiraProject) + assert issues instanceof List + issues.forEach { + // See https://docs.atlassian.com/jira/REST/cloud/#api/2/issue-deleteIssue + logger.debug("Deleting JIRA issue [${it}]") + jiraHttpRequest("issue/${it}", "DELETE", 204) + } + } +} + +// require network access for this one, exit early instead of starting up the cluster if we dont have network +if (!jiraUrl && !jiraUser && !jiraPassword && !jiraProject) { + integTest.enabled = false +} else { + integTestRunner.finalizedBy cleanJira +} + +/** List all issues associated to a given Jira project **/ +def jiraIssues(projectKey) { + // See https://docs.atlassian.com/jira/REST/cloud/#api/2/search-search + def response = jiraHttpRequest("search?maxResults=100&fields=id,self,key&jql=project%3D${projectKey}", "GET", 200) + assert response.issues instanceof List + return response.issues.findAll {it.key.startsWith(projectKey)}.collect {it.key} +} + +/** Execute a HTTP request against the Jira server instance **/ +def jiraHttpRequest(String endpoint, String method, int successCode) { + HttpsURLConnection connection = null; + try { + byte[] credentials = "${jiraUser}:${jiraPassword}".getBytes(StandardCharsets.UTF_8); + connection = (HttpsURLConnection) new URL("${jiraUrl}/rest/api/2/${endpoint}").openConnection(); + connection.setRequestProperty("Authorization", "Basic " + Base64.getEncoder().encodeToString(credentials)); + connection.setRequestMethod(method); + connection.connect(); + + if (connection.getResponseCode() == successCode) { + String response = connection.getInputStream().getText(StandardCharsets.UTF_8.name()); + if (response != null && response.length() > 0) { + return new JsonSlurper().parseText(response) + } + } else { + throw new GradleException("Unexpected response code for [${endpoint}]: got ${connection.getResponseCode()} but expected ${successCode}") + } + } catch (Exception e) { + logger.error("Failed to delete JIRA issues after test execution", e) + } finally { + if (connection != null) { + connection.disconnect(); + } + } + return null +} diff --git a/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java b/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java new file mode 100644 index 0000000000000..8218d0e18f67b --- /dev/null +++ b/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.is; + +/** Runs rest tests against external cluster */ +public class WatcherJiraYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public WatcherJiraYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Before + public void startWatcher() throws Exception { + final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES); + assertBusy(() -> { + try { + getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + + for (String template : watcherTemplates) { + ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", + singletonMap("name", template), emptyList(), emptyMap()); + assertThat(templateExistsResponse.getStatusCode(), is(200)); + } + + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + assertThat(state, is("started")); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + @After + public void stopWatcher() throws Exception { + assertBusy(() -> { + try { + getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + assertThat(state, is("stopped")); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } +} diff --git a/x-pack/qa/third-party/jira/src/test/resources/rest-api-spec/test/jira/10_jira.yml b/x-pack/qa/third-party/jira/src/test/resources/rest-api-spec/test/jira/10_jira.yml new file mode 100644 index 0000000000000..cc44c43349265 --- /dev/null +++ b/x-pack/qa/third-party/jira/src/test/resources/rest-api-spec/test/jira/10_jira.yml @@ -0,0 +1,343 @@ +--- +"Test Jira Action": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "jira_watch" + body: > + { + "metadata": { + "custom_title": "Hello from" + }, + "trigger": { + "schedule": { + "interval": "1d" + } + }, + "input": { + "simple": { + } + }, + "condition": { + "always": {} + }, + "actions": { + "create_jira_issue": { + "jira": { + "account": "test", + "fields": { + "summary": "{{ctx.metadata.custom_title}} {{ctx.watch_id}}", + "description": "Issue created by the REST integration test [/watcher/actions/20_jira.yaml]", + "issuetype" : { + "name": "Bug" + } + } + } + } + } + } + - match: { _id: "jira_watch" } + - match: { created: true } + + - do: + xpack.watcher.execute_watch: + id: "jira_watch" + body: > + { + "trigger_data" : { + "triggered_time" : "2012-12-12T12:12:12.120Z", + "scheduled_time" : "2000-12-12T12:12:12.120Z" + }, + "record_execution": true + } + + - match: { watch_record.watch_id: "jira_watch" } + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.trigger_event.triggered_time: "2012-12-12T12:12:12.120Z" } + - match: { watch_record.trigger_event.manual.schedule.scheduled_time: "2000-12-12T12:12:12.120Z" } + - match: { watch_record.state: "executed" } + + # Waits for the watcher history index to be available + - do: + cluster.health: + index: ".watcher-history-*" + wait_for_no_relocating_shards: true + timeout: 60s + + - do: + indices.refresh: {} + + - do: + search: + index: ".watcher-history-*" + - match: { hits.total: 1 } + + - match: { hits.hits.0._type: "doc" } + - match: { hits.hits.0._source.watch_id: "jira_watch" } + - match: { hits.hits.0._source.state: "executed" } + + - match: { hits.hits.0._source.result.actions.0.id: "create_jira_issue" } + - match: { hits.hits.0._source.result.actions.0.type: "jira" } + - match: { hits.hits.0._source.result.actions.0.status: "success" } + - match: { hits.hits.0._source.result.actions.0.jira.account: "test" } + - match: { hits.hits.0._source.result.actions.0.jira.fields.summary: "Hello from jira_watch" } + - match: { hits.hits.0._source.result.actions.0.jira.fields.issuetype.name: "Bug" } + - match: { hits.hits.0._source.result.actions.0.jira.fields.project.key: "XWT" } + - match: { hits.hits.0._source.result.actions.0.jira.fields.labels.0: "integration-tests" } + - match: { hits.hits.0._source.result.actions.0.jira.result.id: /\d+/ } + - match: { hits.hits.0._source.result.actions.0.jira.result.key: /XWT-\d+/ } + - match: { hits.hits.0._source.result.actions.0.jira.result.self: /http(.)*/ } + + - set: { hits.hits.0._id: id } + - set: { hits.hits.0._source.result.actions.0.jira.result.self: self } + + - do: + search: + index: ".watcher-history-*" + body: + query: + match: + result.actions.jira.fields.project.key: "XWT" + - match: { hits.total: 1 } + - match: { hits.hits.0._id: $id } + - match: { hits.hits.0._source.result.actions.0.jira.result.self: $self } + + - do: + search: + index: ".watcher-history-*" + body: + query: + match: + result.actions.jira.fields.summary: "hello jira_watch" + - match: { hits.total: 1 } + - match: { hits.hits.0._id: $id } + - match: { hits.hits.0._source.result.actions.0.jira.result.self: $self } + +--- +"Test Jira Action with Error": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "wrong_jira_watch" + body: > + { + "trigger": { + "schedule": { + "interval": "1d" + } + }, + "input": { + "simple": { + } + }, + "condition": { + "always": {} + }, + "actions": { + "fail_to_create_jira_issue": { + "jira": { + "account": "test", + "fields": { + "summary": "Hello from {{ctx.watch_id}}", + "description": "This Jira issue does not have a type (see below) so it won't be created at all", + "issuetype" : { + "name": null + } + } + } + } + } + } + - match: { _id: "wrong_jira_watch" } + - match: { created: true } + + - do: + xpack.watcher.execute_watch: + id: "wrong_jira_watch" + body: > + { + "trigger_data" : { + "triggered_time" : "2012-12-12T12:12:12.120Z", + "scheduled_time" : "2000-12-12T12:12:12.120Z" + }, + "record_execution": true + } + + - match: { watch_record.watch_id: "wrong_jira_watch" } + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.trigger_event.triggered_time: "2012-12-12T12:12:12.120Z" } + - match: { watch_record.trigger_event.manual.schedule.scheduled_time: "2000-12-12T12:12:12.120Z" } + - match: { watch_record.state: "executed" } + + # Waits for the watcher history index to be available + - do: + cluster.health: + index: ".watcher-history-*" + wait_for_no_relocating_shards: true + timeout: 60s + + - do: + indices.refresh: {} + + - do: + search: + index: ".watcher-history-*" + body: + query: + match: + result.actions.status: "failure" + + - match: { hits.total: 1 } + + - match: { hits.hits.0._type: "doc" } + - match: { hits.hits.0._source.watch_id: "wrong_jira_watch" } + - match: { hits.hits.0._source.state: "executed" } + + - match: { hits.hits.0._source.result.actions.0.id: "fail_to_create_jira_issue" } + - match: { hits.hits.0._source.result.actions.0.type: "jira" } + - match: { hits.hits.0._source.result.actions.0.status: "failure" } + - match: { hits.hits.0._source.result.actions.0.jira.account: "test" } + - match: { hits.hits.0._source.result.actions.0.jira.fields.summary: "Hello from wrong_jira_watch" } + - is_false: hits.hits.0._source.result.actions.0.jira.fields.issuetype.name + - match: { hits.hits.0._source.result.actions.0.jira.fields.project.key: "XWT" } + - match: { hits.hits.0._source.result.actions.0.jira.fields.labels.0: "integration-tests" } + - match: { hits.hits.0._source.result.actions.0.jira.reason: "Bad Request - Field [issuetype] has error [issue type is required]\n" } + - match: { hits.hits.0._source.result.actions.0.jira.request.method: "post" } + - match: { hits.hits.0._source.result.actions.0.jira.request.path: "/rest/api/2/issue" } + - is_true: hits.hits.0._source.result.actions.0.jira.request.auth.basic.username + - match: { hits.hits.0._source.result.actions.0.jira.request.auth.basic.password: "::es_redacted::" } + - match: { hits.hits.0._source.result.actions.0.jira.response.body: "{\"errorMessages\":[],\"errors\":{\"issuetype\":\"issue type is required\"}}" } + +--- +"Test Jira action with custom fields of different types": + + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "jira_watch_with_custom_field_one" + body: > + { + "trigger": { + "schedule": { + "interval": "1d" + } + }, + "input": { + "simple": { + } + }, + "condition": { + "always": {} + }, + "actions": { + "create_jira_issue": { + "jira": { + "account": "test", + "fields": { + "summary": "Jira watch with custom field of string type", + "description": "Issue created by the REST integration test [/watcher/actions/20_jira.yaml]", + "issuetype" : { + "name": "Bug" + }, + "customfield_70000": "jira-software-users" + } + } + } + } + } + - match: { _id: "jira_watch_with_custom_field_one" } + - match: { created: true } + + - do: + xpack.watcher.execute_watch: + id: "jira_watch_with_custom_field_one" + body: > + { + "trigger_data" : { + "triggered_time" : "2012-12-12T12:12:12.120Z", + "scheduled_time" : "2000-12-12T12:12:12.120Z" + }, + "record_execution": true + } + + - match: { watch_record.watch_id: "jira_watch_with_custom_field_one" } + - match: { watch_record.state: "executed" } + + - do: + xpack.watcher.put_watch: + id: "jira_watch_with_custom_field_two" + body: > + { + "trigger": { + "schedule": { + "interval": "1d" + } + }, + "input": { + "simple": { + } + }, + "condition": { + "always": {} + }, + "actions": { + "create_jira_issue": { + "jira": { + "account": "test", + "fields": { + "summary": "Jira watch with custom field of object (Jira's CascadingSelectField) type", + "description": "Issue created by the REST integration test [/watcher/actions/20_jira.yaml]", + "issuetype" : { + "name": "Bug" + }, + "customfield_70000": { + "value": "green", + "child": { + "value":"blue" + } + } + } + } + } + } + } + - match: { _id: "jira_watch_with_custom_field_two" } + - match: { created: true } + + - do: + xpack.watcher.execute_watch: + id: "jira_watch_with_custom_field_two" + body: > + { + "trigger_data" : { + "triggered_time" : "2012-12-12T12:12:12.120Z", + "scheduled_time" : "2000-12-12T12:12:12.120Z" + }, + "record_execution": true + } + + - match: { watch_record.watch_id: "jira_watch_with_custom_field_two" } + - match: { watch_record.state: "executed" } + + - do: + indices.refresh: + index: ".watcher-history-*" + + - do: + search: + index: ".watcher-history-*" + body: + query: + match: + result.actions.status: "failure" + + - match: { hits.total: 2 } diff --git a/x-pack/qa/third-party/pagerduty/build.gradle b/x-pack/qa/third-party/pagerduty/build.gradle new file mode 100644 index 0000000000000..683e18caa1c7e --- /dev/null +++ b/x-pack/qa/third-party/pagerduty/build.gradle @@ -0,0 +1,22 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('watcher'), configuration: 'runtime') +} + +String pagerDutyServiceKey = System.getenv('pagerduty_service_api_key') + +integTestCluster { + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' + keystoreSetting 'xpack.notification.pagerduty.account.test_account.secure_service_api_key', pagerDutyServiceKey +} + +if (!pagerDutyServiceKey) { + integTest.enabled = false +} diff --git a/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java b/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java new file mode 100644 index 0000000000000..019609793e38c --- /dev/null +++ b/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.is; + +/** Runs rest tests against external cluster */ +public class WatcherPagerDutyYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public WatcherPagerDutyYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Before + public void startWatcher() throws Exception { + final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES); + assertBusy(() -> { + try { + getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + + for (String template : watcherTemplates) { + ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", + singletonMap("name", template), emptyList(), emptyMap()); + assertThat(templateExistsResponse.getStatusCode(), is(200)); + } + + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + assertThat(state, is("started")); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + @After + public void stopWatcher() throws Exception { + assertBusy(() -> { + try { + getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + assertThat(state, is("stopped")); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } +} diff --git a/x-pack/qa/third-party/pagerduty/src/test/resources/rest-api-spec/test/pagerduty/10_pagerduty.yml b/x-pack/qa/third-party/pagerduty/src/test/resources/rest-api-spec/test/pagerduty/10_pagerduty.yml new file mode 100644 index 0000000000000..47706e5f1e7da --- /dev/null +++ b/x-pack/qa/third-party/pagerduty/src/test/resources/rest-api-spec/test/pagerduty/10_pagerduty.yml @@ -0,0 +1,103 @@ +--- +"Test PagerDuty Action": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "pagerduty_watch" + body: > + { + "trigger": { + "schedule": { + "interval": "1d" + } + }, + "input": { + "simple": { + } + }, + "actions": { + "my_pagerduty_action": { + "pagerduty": { + "description": "#testIncidentEvent()", + "client": "PagerDutyServiceTests", + "client_url": "_client_url", + "account" : "_account", + "contexts" : [ + { + "type": "link", + "href": "https://www.elastic.co/products/x-pack/alerting", + "text": "Go to the Elastic.co Alerting website" + }, + { + "type": "image", + "src": "https://www.elastic.co/assets/blte5d899fd0b0e6808/icon-alerting-bb.svg", + "href": "https://www.elastic.co/assets/blte5d899fd0b0e6808/icon-alerting-bb.svg", + "alt": "X-Pack-Alerting website link with log" + } + ] + } + } + } + } + + - do: + xpack.watcher.execute_watch: + id: "pagerduty_watch" + body: > + { + "record_execution": true + } + + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.result.actions.0.id: "my_pagerduty_action" } + - match: { watch_record.result.actions.0.type: "pagerduty" } + - match: { watch_record.result.actions.0.status: "success" } + - match: { watch_record.result.actions.0.pagerduty.sent_event.event.account: "_account" } + - match: { watch_record.result.actions.0.pagerduty.sent_event.event.incident_key: "pagerduty_watch" } + - match: { watch_record.result.actions.0.pagerduty.sent_event.event.description: "#testIncidentEvent()" } + - match: { watch_record.result.actions.0.pagerduty.sent_event.event.client: "PagerDutyServiceTests" } + - match: { watch_record.result.actions.0.pagerduty.sent_event.event.client_url: "_client_url" } + - match: { watch_record.result.actions.0.pagerduty.sent_event.event.attach_payload: false } + - match: { watch_record.result.actions.0.pagerduty.sent_event.event.contexts.0.type: "link" } + - match: { watch_record.result.actions.0.pagerduty.sent_event.event.contexts.1.type: "image" } + + # Waits for the watcher history index to be available + - do: + cluster.health: + index: ".watcher-history-*" + wait_for_no_relocating_shards: true + timeout: 60s + + - do: + indices.refresh: {} + + - do: + search: + index: ".watcher-history-*" + body: > + { + "query" : { + "term" : { + "watch_id" : "pagerduty_watch" + } + } + } + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.state: "executed" } + - match: { hits.hits.0._source.result.actions.0.id: "my_pagerduty_action" } + - match: { hits.hits.0._source.result.actions.0.type: "pagerduty" } + - match: { hits.hits.0._source.result.actions.0.status: "success" } + - match: { hits.hits.0._source.result.actions.0.pagerduty.sent_event.event.account: "_account" } + - match: { hits.hits.0._source.result.actions.0.pagerduty.sent_event.event.incident_key: "pagerduty_watch" } + - match: { hits.hits.0._source.result.actions.0.pagerduty.sent_event.event.description: "#testIncidentEvent()" } + - match: { hits.hits.0._source.result.actions.0.pagerduty.sent_event.event.client: "PagerDutyServiceTests" } + - match: { hits.hits.0._source.result.actions.0.pagerduty.sent_event.event.client_url: "_client_url" } + - match: { hits.hits.0._source.result.actions.0.pagerduty.sent_event.event.attach_payload: false } + - match: { hits.hits.0._source.result.actions.0.pagerduty.sent_event.event.contexts.0.type: "link" } + - match: { hits.hits.0._source.result.actions.0.pagerduty.sent_event.event.contexts.1.type: "image" } + diff --git a/x-pack/qa/third-party/slack/build.gradle b/x-pack/qa/third-party/slack/build.gradle new file mode 100644 index 0000000000000..abcdad0e096e1 --- /dev/null +++ b/x-pack/qa/third-party/slack/build.gradle @@ -0,0 +1,25 @@ +import org.elasticsearch.gradle.test.NodeInfo +import org.elasticsearch.gradle.LoggedExec + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackModule('watcher'), configuration: 'runtime') +} + +String slackUrl = System.getenv('slack_url') + +integTestCluster { + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' + setting 'logger.org.elasticsearch.xpack.watcher', 'DEBUG' + keystoreSetting 'xpack.notification.slack.account.test_account.secure_url', slackUrl +} + +if (!slackUrl) { + integTest.enabled = false +} diff --git a/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java b/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java new file mode 100644 index 0000000000000..f6e8222ea73a0 --- /dev/null +++ b/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.smoketest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.is; + +/** Runs rest tests against external cluster */ +public class WatcherSlackYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public WatcherSlackYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Before + public void startWatcher() throws Exception { + final List watcherTemplates = Arrays.asList(WatcherIndexTemplateRegistryField.TEMPLATE_NAMES); + assertBusy(() -> { + try { + getAdminExecutionContext().callApi("xpack.watcher.start", emptyMap(), emptyList(), emptyMap()); + + for (String template : watcherTemplates) { + ClientYamlTestResponse templateExistsResponse = getAdminExecutionContext().callApi("indices.exists_template", + singletonMap("name", template), emptyList(), emptyMap()); + assertThat(templateExistsResponse.getStatusCode(), is(200)); + } + + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + assertThat(state, is("started")); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + @After + public void stopWatcher() throws Exception { + assertBusy(() -> { + try { + getAdminExecutionContext().callApi("xpack.watcher.stop", emptyMap(), emptyList(), emptyMap()); + ClientYamlTestResponse response = + getAdminExecutionContext().callApi("xpack.watcher.stats", emptyMap(), emptyList(), emptyMap()); + String state = (String) response.evaluate("stats.0.watcher_state"); + assertThat(state, is("stopped")); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } +} diff --git a/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml b/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml new file mode 100644 index 0000000000000..3b04ba716759a --- /dev/null +++ b/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml @@ -0,0 +1,109 @@ +--- +"Test Slack Action": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "slack_watch" + body: > + { + "trigger": { + "schedule": { + "interval": "1d" + } + }, + "input": { + "simple": { + "foo" : "something from input" + } + }, + "actions": { + "my_slack_action": { + "slack": { + "account": "test_account", + "message": { + "from": "SlackServiceTests", + "to": [ + "#watcher-test", "#watcher-test-2" + ], + "text": "slack integration test {{ctx.trigger.triggered_time}}", + "attachments": [ + { + "title": "title あいうえお", + "text": "From input: {{ctx.payload.foo}} - other testing: :facepalm: also `code` and *bold*", + "color": "warning", + "author_name" : "your friendly slack integration test" + }, + { + "title" : "attachment with action", + "text" : "button test", + "actions" : [ + { + "name" : "action name", + "style" : "danger", + "type" : "button", + "text" : "Button to go to elastic.co", + "url" : "https://elastic.co" + } + ] + } + ] + } + } + } + } + } + + - do: + xpack.watcher.execute_watch: + id: "slack_watch" + body: > + { + "record_execution": true + } + + - match: { watch_record.trigger_event.type: "manual" } + - match: { watch_record.state: "executed" } + - match: { watch_record.result.actions.0.id: "my_slack_action" } + - match: { watch_record.result.actions.0.type: "slack" } + - match: { watch_record.result.actions.0.status: "success" } + - match: { watch_record.result.actions.0.slack.account: "test_account" } + - match: { watch_record.result.actions.0.slack.sent_messages.0.to: "#watcher-test" } + - match: { watch_record.result.actions.0.slack.sent_messages.1.to: "#watcher-test-2" } + - match: { watch_record.result.actions.0.slack.sent_messages.0.status: "success" } + - match: { watch_record.result.actions.0.slack.sent_messages.1.status: "success" } + + # Waits for the watcher history index to be available + - do: + cluster.health: + index: ".watcher-history-*" + wait_for_no_relocating_shards: true + timeout: 60s + + - do: + indices.refresh: {} + + - do: + search: + index: ".watcher-history-*" + body: > + { + "query" : { + "term" : { + "watch_id" : "slack_watch" + } + } + } + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.state: "executed" } + - match: { hits.hits.0._source.result.actions.0.id: "my_slack_action" } + - match: { hits.hits.0._source.result.actions.0.type: "slack" } + - match: { hits.hits.0._source.result.actions.0.status: "success" } + - match: { hits.hits.0._source.result.actions.0.slack.account: "test_account" } + - match: { hits.hits.0._source.result.actions.0.slack.sent_messages.0.to: "#watcher-test" } + - match: { hits.hits.0._source.result.actions.0.slack.sent_messages.1.to: "#watcher-test-2" } + - match: { hits.hits.0._source.result.actions.0.slack.sent_messages.0.status: "success" } + - match: { hits.hits.0._source.result.actions.0.slack.sent_messages.1.status: "success" } diff --git a/x-pack/qa/transport-client-tests/build.gradle b/x-pack/qa/transport-client-tests/build.gradle new file mode 100644 index 0000000000000..c864a9084cba8 --- /dev/null +++ b/x-pack/qa/transport-client-tests/build.gradle @@ -0,0 +1,12 @@ +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'runtime') + testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') +} + +integTestCluster { + setting 'xpack.security.enabled', 'false' + setting 'xpack.license.self_generated.type', 'trial' +} diff --git a/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/ESXPackSmokeClientTestCase.java b/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/ESXPackSmokeClientTestCase.java new file mode 100644 index 0000000000000..c77715431ec5e --- /dev/null +++ b/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/ESXPackSmokeClientTestCase.java @@ -0,0 +1,154 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.client; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.client.PreBuiltXPackTransportClient; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.URL; +import java.nio.file.Path; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicInteger; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; +import static org.hamcrest.Matchers.notNullValue; + +/** + * An abstract base class to run integration tests against an Elasticsearch + * cluster running outside of the test process. + *

    + * You can define a list of transport addresses from where you can reach your + * cluster by setting "tests.cluster" system property. It defaults to + * "localhost:9300". If you run this from `gradle integTest` then it will start + * the clsuter for you and set up the property. + *

    + * If you want to debug this module from your IDE, then start an external + * cluster by yourself, maybe with `gradle run`, then run JUnit. If you changed + * the default port, set "-Dtests.cluster=localhost:PORT" when running your + * test. + */ +@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") +public abstract class ESXPackSmokeClientTestCase extends LuceneTestCase { + + /** + * Key used to eventually switch to using an external cluster and provide + * its transport addresses + */ + public static final String TESTS_CLUSTER = "tests.cluster"; + + protected static final Logger logger = ESLoggerFactory + .getLogger(ESXPackSmokeClientTestCase.class.getName()); + + private static final AtomicInteger counter = new AtomicInteger(); + private static Client client; + private static String clusterAddresses; + protected String index; + + private static Client startClient(Path tempDir, TransportAddress... transportAddresses) { + Settings.Builder builder = Settings.builder() + .put("node.name", "qa_xpack_smoke_client_" + counter.getAndIncrement()) + .put("client.transport.ignore_cluster_name", true) + .put("xpack.security.enabled", false) + .put(Environment.PATH_HOME_SETTING.getKey(), tempDir); + TransportClient client = new PreBuiltXPackTransportClient(builder.build()) + .addTransportAddresses(transportAddresses); + + logger.info("--> Elasticsearch Java TransportClient started"); + + Exception clientException = null; + try { + ClusterHealthResponse health = client.admin().cluster().prepareHealth().get(); + logger.info("--> connected to [{}] cluster which is running [{}] node(s).", + health.getClusterName(), health.getNumberOfNodes()); + } catch (Exception e) { + logger.error("Error getting cluster health", e); + clientException = e; + } + + assumeNoException("Sounds like your cluster is not running at " + clusterAddresses, + clientException); + + return client; + } + + private static Client startClient() throws IOException { + String[] stringAddresses = clusterAddresses.split(","); + TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; + int i = 0; + for (String stringAddress : stringAddresses) { + URL url = new URL("http://" + stringAddress); + InetAddress inetAddress = InetAddress.getByName(url.getHost()); + transportAddresses[i++] = new TransportAddress( + new InetSocketAddress(inetAddress, url.getPort())); + } + return startClient(createTempDir(), transportAddresses); + } + + public static Client getClient() { + if (client == null) { + try { + client = startClient(); + } catch (IOException e) { + logger.error("can not start the client", e); + } + assertThat(client, notNullValue()); + } + return client; + } + + @BeforeClass + public static void initializeSettings() { + clusterAddresses = System.getProperty(TESTS_CLUSTER); + if (clusterAddresses == null || clusterAddresses.isEmpty()) { + fail("Must specify " + TESTS_CLUSTER + " for smoke client test"); + } + } + + @AfterClass + public static void stopTransportClient() { + if (client != null) { + client.close(); + client = null; + } + } + + @Before + public void defineIndexName() { + doClean(); + index = "qa-xpack-smoke-test-client-" + + randomAsciiOfLength(10).toLowerCase(Locale.getDefault()); + } + + @After + public void cleanIndex() { + doClean(); + } + + private void doClean() { + if (client != null) { + try { + client.admin().indices().prepareDelete(index).get(); + } catch (Exception e) { + // We ignore this cleanup exception + } + } + } +} diff --git a/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/MLTransportClientIT.java b/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/MLTransportClientIT.java new file mode 100644 index 0000000000000..406b354e57439 --- /dev/null +++ b/x-pack/qa/transport-client-tests/src/test/java/org/elasticsearch/xpack/ml/client/MLTransportClientIT.java @@ -0,0 +1,179 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.client; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.ml.action.CloseJobAction; +import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; +import org.elasticsearch.xpack.core.ml.action.FlushJobAction; +import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; +import org.elasticsearch.xpack.core.ml.action.GetJobsAction; +import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.xpack.core.ml.action.PostDataAction; +import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.PutJobAction; +import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; +import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; +import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; +import org.elasticsearch.xpack.core.ml.job.config.DataDescription; +import org.elasticsearch.xpack.core.ml.job.config.Detector; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class MLTransportClientIT extends ESXPackSmokeClientTestCase { + + public void testMLTransportClient_JobActions() { + Client client = getClient(); + XPackClient xPackClient = new XPackClient(client); + MachineLearningClient mlClient = xPackClient.machineLearning(); + + String jobId = "ml-transport-client-it-job"; + Job.Builder job = createJob(jobId); + + PutJobAction.Response putJobResponse = mlClient.putJob(new PutJobAction.Request(job)).actionGet(); + assertThat(putJobResponse, notNullValue()); + + GetJobsAction.Response getJobResponse = mlClient.getJobs(new GetJobsAction.Request(jobId)).actionGet(); + assertThat(getJobResponse, notNullValue()); + assertThat(getJobResponse.getResponse(), notNullValue()); + assertThat(getJobResponse.getResponse().count(), equalTo(1L)); + + // Open job POST data, flush, close and check a result + OpenJobAction.Response openJobResponse = mlClient.openJob(new OpenJobAction.Request(jobId)).actionGet(); + assertThat(openJobResponse.isAcknowledged(), equalTo(true)); + + String content = "{\"time\":1000, \"msg\": \"some categorical message\"}\n" + + "{\"time\":11000, \"msg\": \"some categorical message in the second bucket\"}\n" + + "{\"time\":21000, \"msg\": \"some categorical message in the third bucket\"}\n"; + PostDataAction.Request postRequest = new PostDataAction.Request(jobId); + postRequest.setContent(new BytesArray(content), XContentType.JSON); + PostDataAction.Response postResponse = mlClient.postData(postRequest).actionGet(); + assertThat(postResponse.getDataCounts(), notNullValue()); + assertThat(postResponse.getDataCounts().getInputFieldCount(), equalTo(3L)); + + FlushJobAction.Response flushResponse = mlClient.flushJob(new FlushJobAction.Request(jobId)).actionGet(); + assertThat(flushResponse.isFlushed(), equalTo(true)); + + CloseJobAction.Response closeResponse = mlClient.closeJob(new CloseJobAction.Request(jobId)).actionGet(); + assertThat(closeResponse.isClosed(), equalTo(true)); + + GetBucketsAction.Response getBucketsResponse = mlClient.getBuckets(new GetBucketsAction.Request(jobId)).actionGet(); + assertThat(getBucketsResponse.getBuckets().count(), equalTo(1L)); + + // Update a model snapshot + GetModelSnapshotsAction.Response getModelSnapshotResponse = + mlClient.getModelSnapshots(new GetModelSnapshotsAction.Request(jobId, null)).actionGet(); + assertThat(getModelSnapshotResponse.getPage().count(), equalTo(1L)); + String snapshotId = getModelSnapshotResponse.getPage().results().get(0).getSnapshotId(); + + UpdateModelSnapshotAction.Request updateModelSnapshotRequest = new UpdateModelSnapshotAction.Request(jobId, snapshotId); + updateModelSnapshotRequest.setDescription("Changed description"); + UpdateModelSnapshotAction.Response updateModelSnapshotResponse = + mlClient.updateModelSnapshot(updateModelSnapshotRequest).actionGet(); + assertThat(updateModelSnapshotResponse.getModel(), notNullValue()); + assertThat(updateModelSnapshotResponse.getModel().getDescription(), equalTo("Changed description")); + + // and delete the job + DeleteJobAction.Response deleteJobResponse = mlClient.deleteJob(new DeleteJobAction.Request(jobId)).actionGet(); + assertThat(deleteJobResponse, notNullValue()); + assertThat(deleteJobResponse.isAcknowledged(), equalTo(true)); + } + + public void testMLTransportClient_ValidateActions() { + Client client = getClient(); + XPackClient xPackClient = new XPackClient(client); + MachineLearningClient mlClient = xPackClient.machineLearning(); + + Detector.Builder detector = new Detector.Builder(); + detector.setFunction("count"); + ValidateDetectorAction.Request validateDetectorRequest = new ValidateDetectorAction.Request(detector.build()); + ValidateDetectorAction.Response validateDetectorResponse = mlClient.validateDetector(validateDetectorRequest).actionGet(); + assertThat(validateDetectorResponse.isAcknowledged(), equalTo(true)); + + Job.Builder job = createJob("ml-transport-client-it-validate-job"); + ValidateJobConfigAction.Request validateJobRequest = new ValidateJobConfigAction.Request(job.build(new Date())); + ValidateJobConfigAction.Response validateJobResponse = mlClient.validateJobConfig(validateJobRequest).actionGet(); + assertThat(validateJobResponse.isAcknowledged(), equalTo(true)); + } + + + public void testMLTransportClient_DateFeedActions() { + Client client = getClient(); + XPackClient xPackClient = new XPackClient(client); + MachineLearningClient mlClient = xPackClient.machineLearning(); + + String jobId = "ml-transport-client-it-datafeed-job"; + Job.Builder job = createJob(jobId); + + PutJobAction.Response putJobResponse = mlClient.putJob(new PutJobAction.Request(job)).actionGet(); + assertThat(putJobResponse, notNullValue()); + + String datafeedId = "ml-transport-client-it-datafeed"; + DatafeedConfig.Builder datafeed = new DatafeedConfig.Builder(datafeedId, jobId); + String datafeedIndex = "ml-transport-client-test"; + String datatype = "type-bar"; + datafeed.setIndices(Collections.singletonList(datafeedIndex)); + datafeed.setTypes(Collections.singletonList("type-bar")); + + mlClient.putDatafeed(new PutDatafeedAction.Request(datafeed.build())).actionGet(); + + GetDatafeedsAction.Response getDatafeedResponse = mlClient.getDatafeeds(new GetDatafeedsAction.Request(datafeedId)).actionGet(); + assertThat(getDatafeedResponse.getResponse(), notNullValue()); + + // Open job before starting the datafeed + OpenJobAction.Response openJobResponse = mlClient.openJob(new OpenJobAction.Request(jobId)).actionGet(); + assertThat(openJobResponse.isAcknowledged(), equalTo(true)); + + // create the index for the data feed + Map source = new HashMap<>(); + source.put("time", new Date()); + source.put("message", "some message"); + client.prepareIndex(datafeedIndex, datatype).setSource(source).get(); + + StartDatafeedAction.Request startDatafeedRequest = new StartDatafeedAction.Request(datafeedId, new Date().getTime()); + StartDatafeedAction.Response startDataFeedResponse = mlClient.startDatafeed(startDatafeedRequest).actionGet(); + assertThat(startDataFeedResponse.isAcknowledged(), equalTo(true)); + + StopDatafeedAction.Response stopDataFeedResponse = mlClient.stopDatafeed(new StopDatafeedAction.Request(datafeedId)).actionGet(); + assertThat(stopDataFeedResponse.isStopped(), equalTo(true)); + } + + private Job.Builder createJob(String jobId) { + Job.Builder job = new Job.Builder(); + job.setId(jobId); + + List detectors = new ArrayList<>(); + Detector.Builder detector = new Detector.Builder(); + detector.setFunction("count"); + detectors.add(detector.build()); + + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(detectors); + analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(10L)); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(new DataDescription.Builder()); + return job; + } +} diff --git a/x-pack/qa/vagrant/build.gradle b/x-pack/qa/vagrant/build.gradle new file mode 100644 index 0000000000000..0c3428f258c0e --- /dev/null +++ b/x-pack/qa/vagrant/build.gradle @@ -0,0 +1,51 @@ +import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin +import org.elasticsearch.gradle.plugin.MetaPluginPropertiesExtension +import org.elasticsearch.gradle.plugin.PluginBuildPlugin +import org.elasticsearch.gradle.plugin.PluginPropertiesExtension + +apply plugin: 'elasticsearch.vagrantsupport' +apply plugin: 'elasticsearch.vagrant' + +esvagrant { + inheritTestUtils true +} + +dependencies { + // Packaging tests use the x-pack meta plugin + packaging project(path: xpackProject('plugin').path, configuration: 'zip') + + // Inherit Bats test utils from :qa:vagrant project + packaging project(path: ':qa:vagrant', configuration: 'packaging') +} + +Map> metaPlugins = [:] +for (Project metaPlugin : project.rootProject.subprojects) { + if (metaPlugin.plugins.hasPlugin(MetaPluginBuildPlugin)) { + MetaPluginPropertiesExtension extension = metaPlugin.extensions.findByName('es_meta_plugin') + if (extension != null) { + List plugins = [] + metaPlugin.subprojects.each { + if (extension.plugins.contains(it.name)) { + Project plugin = (Project) it + if (plugin.plugins.hasPlugin(PluginBuildPlugin)) { + PluginPropertiesExtension esplugin = plugin.extensions.findByName('esplugin') + if (esplugin != null) { + plugins.add(esplugin.name) + } + } + } + } + metaPlugins.put(extension.name, plugins.toSorted()) + } + } +} + +setupPackagingTest { + doLast { + metaPlugins.each{ name, plugins -> + File expectedMetaPlugins = file("build/plugins/${name}.expected") + expectedMetaPlugins.parentFile.mkdirs() + expectedMetaPlugins.setText(plugins.join('\n'), 'UTF-8') + } + } +} diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/10_basic.bats b/x-pack/qa/vagrant/src/test/resources/packaging/tests/10_basic.bats new file mode 100644 index 0000000000000..898fedbff794e --- /dev/null +++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/10_basic.bats @@ -0,0 +1,46 @@ +#!/usr/bin/env bats + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +# This file is used to test the X-Pack package. + +# WARNING: This testing file must be executed as root and can +# dramatically change your system. It removes the 'elasticsearch' +# user/group and also many directories. Do not execute this file +# unless you know exactly what you are doing. + +load $BATS_UTILS/utils.bash +load $BATS_UTILS/tar.bash +load $BATS_UTILS/plugins.bash +load $BATS_UTILS/xpack.bash + +setup() { + skip_not_tar_gz + export ESHOME=/tmp/elasticsearch + export PACKAGE_NAME="elasticsearch" + export_elasticsearch_paths + export ESPLUGIN_COMMAND_USER=elasticsearch +} + +@test "[X-PACK] install default distribution" { + # Cleans everything for the 1st execution + clean_before_test + + # Install the archive + install_archive +} + +@test "[X-PACK] verify x-pack installation" { + verify_xpack_installation +} + +@test "[X-PACK] verify croneval works" { + run $ESHOME/bin/elasticsearch-croneval "0 0 20 ? * MON-THU" -c 2 + [ "$status" -eq 0 ] + [[ "$output" == *"Valid!"* ]] || { + echo "Expected output message to contain [Valid!] but found: $output" + false + } +} diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/20_tar_bootstrap_password.bats b/x-pack/qa/vagrant/src/test/resources/packaging/tests/20_tar_bootstrap_password.bats new file mode 120000 index 0000000000000..58a968aa3e14c --- /dev/null +++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/20_tar_bootstrap_password.bats @@ -0,0 +1 @@ +bootstrap_password.bash \ No newline at end of file diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/25_package_bootstrap_password.bats b/x-pack/qa/vagrant/src/test/resources/packaging/tests/25_package_bootstrap_password.bats new file mode 120000 index 0000000000000..58a968aa3e14c --- /dev/null +++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/25_package_bootstrap_password.bats @@ -0,0 +1 @@ +bootstrap_password.bash \ No newline at end of file diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/30_tar_setup_passwords.bats b/x-pack/qa/vagrant/src/test/resources/packaging/tests/30_tar_setup_passwords.bats new file mode 120000 index 0000000000000..74d1204b3f9e7 --- /dev/null +++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/30_tar_setup_passwords.bats @@ -0,0 +1 @@ +setup_passwords.bash \ No newline at end of file diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/35_package_setup_passwords.bats b/x-pack/qa/vagrant/src/test/resources/packaging/tests/35_package_setup_passwords.bats new file mode 120000 index 0000000000000..74d1204b3f9e7 --- /dev/null +++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/35_package_setup_passwords.bats @@ -0,0 +1 @@ +setup_passwords.bash \ No newline at end of file diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/40_tar_certgen.bats b/x-pack/qa/vagrant/src/test/resources/packaging/tests/40_tar_certgen.bats new file mode 120000 index 0000000000000..c9a929d829edd --- /dev/null +++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/40_tar_certgen.bats @@ -0,0 +1 @@ +certgen.bash \ No newline at end of file diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/45_package_certgen.bats b/x-pack/qa/vagrant/src/test/resources/packaging/tests/45_package_certgen.bats new file mode 120000 index 0000000000000..c9a929d829edd --- /dev/null +++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/45_package_certgen.bats @@ -0,0 +1 @@ +certgen.bash \ No newline at end of file diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/bootstrap_password.bash b/x-pack/qa/vagrant/src/test/resources/packaging/tests/bootstrap_password.bash new file mode 100644 index 0000000000000..043a8911ac492 --- /dev/null +++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/bootstrap_password.bash @@ -0,0 +1,169 @@ +#!/usr/bin/env bats + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +load $BATS_UTILS/utils.bash +load $BATS_UTILS/plugins.bash +load $BATS_UTILS/xpack.bash + +setup() { + if [ $BATS_TEST_NUMBER == 1 ]; then + export PACKAGE_NAME="elasticsearch" + clean_before_test + install + + generate_trial_license + verify_xpack_installation + fi +} + +if [[ "$BATS_TEST_FILENAME" =~ 20_tar_bootstrap_password.bats$ ]]; then + load $BATS_UTILS/tar.bash + GROUP='TAR BOOTSTRAP PASSWORD' + install() { + install_archive + verify_archive_installation + } + export ESHOME=/tmp/elasticsearch + export_elasticsearch_paths + export ESPLUGIN_COMMAND_USER=elasticsearch +else + load $BATS_UTILS/packages.bash + if is_rpm; then + GROUP='RPM BOOTSTRAP PASSWORD' + elif is_dpkg; then + GROUP='DEB BOOTSTRAP PASSWORD' + fi + export_elasticsearch_paths + export ESPLUGIN_COMMAND_USER=root + install() { + install_package + verify_package_installation + } +fi + +@test "[$GROUP] add bootstrap.password setting" { + if [[ -f /tmp/bootstrap.password ]]; then + sudo rm -f /tmp/bootstrap.password + fi + + run sudo -E -u $ESPLUGIN_COMMAND_USER bash <<"NEW_PASS" +if [[ ! -f $ESCONFIG/elasticsearch.keystore ]]; then + $ESHOME/bin/elasticsearch-keystore create +fi +cat /dev/urandom | tr -dc "[a-zA-Z0-9]" | fold -w 20 | head -n 1 > /tmp/bootstrap.password +cat /tmp/bootstrap.password | $ESHOME/bin/elasticsearch-keystore add --stdin bootstrap.password +NEW_PASS + [ "$status" -eq 0 ] || { + echo "Expected elasticsearch-keystore tool exit code to be zero but got [$status]" + echo "$output" + false + } + assert_file_exist "/tmp/bootstrap.password" +} + +@test "[$GROUP] test bootstrap.password is in setting list" { + run sudo -E -u $ESPLUGIN_COMMAND_USER bash <<"NODE_SETTINGS" +cat >> $ESCONFIG/elasticsearch.yml <<- EOF +network.host: 127.0.0.1 +http.port: 9200 +EOF +NODE_SETTINGS + + run_elasticsearch_service 0 + wait_for_xpack 127.0.0.1 9200 + + sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-keystore" list | grep "bootstrap.password" + + password=$(cat /tmp/bootstrap.password) + clusterHealth=$(sudo curl -u "elastic:$password" -H "Content-Type: application/json" \ + -XGET "http://127.0.0.1:9200/_cluster/health?wait_for_status=green&timeout=30s") + echo "$clusterHealth" | grep '"status":"green"' || { + echo "Expected cluster health to be green but got:" + echo "$clusterHealth" + false + } +} + +@test "[$GROUP] test auto generated passwords with modified bootstrap.password" { + if [[ -f /tmp/setup-passwords-output-with-bootstrap ]]; then + sudo rm -f /tmp/setup-passwords-output-with-bootstrap + fi + + run sudo -E -u $ESPLUGIN_COMMAND_USER bash <<"SETUP_OK" +echo 'y' | $ESHOME/bin/elasticsearch-setup-passwords auto +SETUP_OK + echo "$output" > /tmp/setup-passwords-output-with-bootstrap + [ "$status" -eq 0 ] || { + echo "Expected x-pack elasticsearch-setup-passwords tool exit code to be zero but got [$status]" + cat /tmp/setup-passwords-output-with-bootstrap + debug_collect_logs + false + } + + curl -s -XGET 'http://127.0.0.1:9200' | grep "missing authentication token for REST" + + # Disable bash history expansion because passwords can contain "!" + set +H + + users=( elastic kibana logstash_system ) + for user in "${users[@]}"; do + grep "Changed password for user $user" /tmp/setup-passwords-output-with-bootstrap || { + echo "Expected x-pack elasticsearch-setup-passwords tool to change password for user [$user]:" + cat /tmp/setup-passwords-output-with-bootstrap + false + } + + password=$(grep "PASSWORD $user = " /tmp/setup-passwords-output-with-bootstrap | sed "s/PASSWORD $user = //") + curl -u "$user:$password" -XGET 'http://127.0.0.1:9200' | grep "You Know, for Search" + + basic=$(echo -n "$user:$password" | base64) + curl -H "Authorization: Basic $basic" -XGET 'http://127.0.0.1:9200' | grep "You Know, for Search" + done + set -H +} + +@test "[$GROUP] test elasticsearch-sql-cli" { + password=$(grep "PASSWORD elastic = " /tmp/setup-passwords-output-with-bootstrap | sed "s/PASSWORD elastic = //") + curl -s -u "elastic:$password" -H "Content-Type: application/json" -XPUT 'localhost:9200/library/book/1?refresh&pretty' -d'{ + "name": "Ender'"'"'s Game", + "author": "Orson Scott Card", + "release_date": "1985-06-01", + "page_count": 324 + }' + + password=$(grep "PASSWORD elastic = " /tmp/setup-passwords-output-with-bootstrap | sed "s/PASSWORD elastic = //") + + run $ESHOME/bin/elasticsearch-sql-cli --debug "http://elastic@127.0.0.1:9200" < /tmp/instances.yml <<- EOF +instances: + - name: "node-master" + ip: + - "127.0.0.1" + - name: "node-data" + ip: + - "127.0.0.1" +EOF +CREATE_INSTANCES_FILE + + [ "$status" -eq 0 ] || { + echo "Failed to create instances file [$instances]: $output" + false + } +} + +@test "[$GROUP] create certificates" { + if [[ -f "$certificates" ]]; then + sudo rm -f "$certificates" + fi + + run sudo -E -u $MASTER_USER "$MASTER_HOME/bin/elasticsearch-certgen" --in "$instances" --out "$certificates" + [ "$status" -eq 0 ] || { + echo "Expected elasticsearch-certgen tool exit code to be zero" + echo "$output" + false + } + + echo "$output" | grep "Certificates written to $certificates" + assert_file "$certificates" f $MASTER_USER $MASTER_USER 600 +} + +@test "[$GROUP] install certificates on master node" { + load $MASTER_UTILS + export ESHOME="$MASTER_HOME" + export_elasticsearch_paths + + certs="$ESCONFIG/certs" + if [[ -d "$certs" ]]; then + sudo rm -rf "$certs" + fi + + run sudo -E -u $MASTER_USER "unzip" $certificates -d $certs + [ "$status" -eq 0 ] || { + echo "Failed to unzip certificates in $certs: $output" + false + } + + assert_file "$certs/ca/ca.key" f $MASTER_USER $MASTER_GROUP 644 + assert_file "$certs/ca/ca.crt" f $MASTER_USER $MASTER_GROUP 644 + + assert_file "$certs/node-master" d $MASTER_USER $MASTER_GROUP $MASTER_DPERMS + assert_file "$certs/node-master/node-master.key" f $MASTER_USER $MASTER_GROUP 644 + assert_file "$certs/node-master/node-master.crt" f $MASTER_USER $MASTER_GROUP 644 + + assert_file "$certs/node-data" d $MASTER_USER $MASTER_GROUP $MASTER_DPERMS + assert_file "$certs/node-data/node-data.key" f $MASTER_USER $MASTER_GROUP 644 + assert_file "$certs/node-data/node-data.crt" f $MASTER_USER $MASTER_GROUP 644 +} + +@test "[$GROUP] update master node settings" { + load $MASTER_UTILS + export ESHOME="$MASTER_HOME" + export_elasticsearch_paths + + run sudo -E -u $MASTER_USER bash <<"MASTER_SETTINGS" +cat >> $ESCONFIG/elasticsearch.yml <<- EOF +node.name: "node-master" +node.master: true +node.data: false +discovery.zen.ping.unicast.hosts: ["127.0.0.1:9301"] + +xpack.ssl.key: $ESCONFIG/certs/node-master/node-master.key +xpack.ssl.certificate: $ESCONFIG/certs/node-master/node-master.crt +xpack.ssl.certificate_authorities: ["$ESCONFIG/certs/ca/ca.crt"] + +xpack.security.transport.ssl.enabled: true +transport.tcp.port: 9300 + +xpack.security.http.ssl.enabled: true +http.port: 9200 + +EOF +MASTER_SETTINGS + + start_master_node + wait_for_xpack 127.0.0.1 9200 +} + +@test "[$GROUP] test connection to master node using HTTPS" { + load $MASTER_UTILS + export ESHOME="$MASTER_HOME" + export_elasticsearch_paths + + run sudo -E -u $MASTER_USER curl -u "elastic:changeme" --cacert "$ESCONFIG/certs/ca/ca.crt" -XGET "https://127.0.0.1:9200" + [ "$status" -eq 0 ] || { + echo "Failed to connect to master node using HTTPS:" + echo "$output" + debug_collect_logs + false + } + echo "$output" | grep "node-master" +} + +@test "[$GROUP] install data node" { + install_data_node +} + +@test "[$GROUP] install certificates on data node" { + load $DATA_UTILS + export ESHOME="$DATA_HOME" + export_elasticsearch_paths + + sudo chown $DATA_USER:$DATA_USER "$certificates" + [ -f "$certificates" ] || { + echo "Could not find certificates: $certificates" + false + } + + certs="$ESCONFIG/certs" + if [[ -d "$certs" ]]; then + sudo rm -rf "$certs" + fi + + run sudo -E -u $DATA_USER "unzip" $certificates -d $certs + [ "$status" -eq 0 ] || { + echo "Failed to unzip certificates in $certs: $output" + false + } + + assert_file "$certs/ca" d $DATA_USER $DATA_GROUP + assert_file "$certs/ca/ca.key" f $DATA_USER $DATA_GROUP 644 + assert_file "$certs/ca/ca.crt" f $DATA_USER $DATA_GROUP 644 + + assert_file "$certs/node-master" d $DATA_USER $DATA_GROUP + assert_file "$certs/node-master/node-master.key" f $DATA_USER $DATA_GROUP 644 + assert_file "$certs/node-master/node-master.crt" f $DATA_USER $DATA_GROUP 644 + + assert_file "$certs/node-data" d $DATA_USER $DATA_GROUP + assert_file "$certs/node-data/node-data.key" f $DATA_USER $DATA_GROUP 644 + assert_file "$certs/node-data/node-data.crt" f $DATA_USER $DATA_GROUP 644 +} + +@test "[$GROUP] update data node settings" { + load $DATA_UTILS + export ESHOME="$DATA_HOME" + export_elasticsearch_paths + + run sudo -E -u $DATA_USER bash <<"DATA_SETTINGS" +cat >> $ESCONFIG/elasticsearch.yml <<- EOF +node.name: "node-data" +node.master: false +node.data: true +discovery.zen.ping.unicast.hosts: ["127.0.0.1:9300"] + +xpack.ssl.key: $ESCONFIG/certs/node-data/node-data.key +xpack.ssl.certificate: $ESCONFIG/certs/node-data/node-data.crt +xpack.ssl.certificate_authorities: ["$ESCONFIG/certs/ca/ca.crt"] + +xpack.security.transport.ssl.enabled: true +transport.tcp.port: 9301 + +xpack.security.http.ssl.enabled: true +http.port: 9201 + +EOF +DATA_SETTINGS + + start_data_node + wait_for_xpack 127.0.0.1 9201 +} + +@test "[$GROUP] test connection to data node using HTTPS" { + load $DATA_UTILS + export ESHOME="$DATA_HOME" + export_elasticsearch_paths + + run sudo -E -u $DATA_USER curl --cacert "$ESCONFIG/certs/ca/ca.crt" -XGET "https://127.0.0.1:9201" + [ "$status" -eq 0 ] || { + echo "Failed to connect to data node using HTTPS:" + echo "$output" + false + } + echo "$output" | grep "missing authentication token" +} + +@test "[$GROUP] test node to node communication" { + load $MASTER_UTILS + export ESHOME="$MASTER_HOME" + export_elasticsearch_paths + + testIndex=$(sudo curl -u "elastic:changeme" \ + -H "Content-Type: application/json" \ + --cacert "$ESCONFIG/certs/ca/ca.crt" \ + -XPOST "https://127.0.0.1:9200/books/book/0?refresh" \ + -d '{"title": "Elasticsearch The Definitive Guide"}') + + debug_collect_logs + echo "$testIndex" | grep '"result":"created"' + + masterSettings=$(sudo curl -u "elastic:changeme" \ + -H "Content-Type: application/json" \ + --cacert "$ESCONFIG/certs/ca/ca.crt" \ + -XGET "https://127.0.0.1:9200/_nodes/node-master?filter_path=nodes.*.settings.xpack,nodes.*.settings.http.type,nodes.*.settings.transport.type") + + echo "$masterSettings" | grep '"http":{"ssl":{"enabled":"true"}' + echo "$masterSettings" | grep '"http":{"type":"security4"}' + echo "$masterSettings" | grep '"transport":{"ssl":{"enabled":"true"}' + echo "$masterSettings" | grep '"transport":{"type":"security4"}' + + load $DATA_UTILS + export ESHOME="$DATA_HOME" + export_elasticsearch_paths + + dataSettings=$(curl -u "elastic:changeme" \ + -H "Content-Type: application/json" \ + --cacert "$ESCONFIG/certs/ca/ca.crt" \ + -XGET "https://127.0.0.1:9200/_nodes/node-data?filter_path=nodes.*.settings.xpack,nodes.*.settings.http.type,nodes.*.settings.transport.type") + + echo "$dataSettings" | grep '"http":{"ssl":{"enabled":"true"}' + echo "$dataSettings" | grep '"http":{"type":"security4"}' + echo "$dataSettings" | grep '"transport":{"ssl":{"enabled":"true"}' + echo "$dataSettings" | grep '"transport":{"type":"security4"}' + + testSearch=$(curl -u "elastic:changeme" \ + -H "Content-Type: application/json" \ + --cacert "$ESCONFIG/certs/ca/ca.crt" \ + -XGET "https://127.0.0.1:9200/_search?q=title:guide") + + echo "$testSearch" | grep '"_index":"books"' + echo "$testSearch" | grep '"_id":"0"' +} diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/setup_passwords.bash b/x-pack/qa/vagrant/src/test/resources/packaging/tests/setup_passwords.bash new file mode 100644 index 0000000000000..14bd8be682693 --- /dev/null +++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/setup_passwords.bash @@ -0,0 +1,85 @@ + +#!/usr/bin/env bats + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +load $BATS_UTILS/utils.bash +load $BATS_UTILS/plugins.bash +load $BATS_UTILS/xpack.bash + +setup() { + if [ $BATS_TEST_NUMBER == 1 ]; then + export PACKAGE_NAME="elasticsearch" + clean_before_test + install + + generate_trial_license + verify_xpack_installation + fi +} + + +if [[ "$BATS_TEST_FILENAME" =~ 30_tar_setup_passwords.bats$ ]]; then + load $BATS_UTILS/tar.bash + GROUP='TAR SETUP PASSWORD' + install() { + install_archive + verify_archive_installation + } + export ESHOME=/tmp/elasticsearch + export_elasticsearch_paths + export ESPLUGIN_COMMAND_USER=elasticsearch +else + load $BATS_UTILS/packages.bash + if is_rpm; then + GROUP='RPM SETUP PASSWORD' + elif is_dpkg; then + GROUP='DEB SETUP PASSWORD' + fi + export_elasticsearch_paths + export ESPLUGIN_COMMAND_USER=root + install() { + install_package + verify_package_installation + } +fi + +@test "[$GROUP] test auto generated passwords" { + run_elasticsearch_service 0 + wait_for_xpack + + run sudo -E -u $ESPLUGIN_COMMAND_USER bash <<"SETUP_AUTO" +echo 'y' | $ESHOME/bin/elasticsearch-setup-passwords auto +SETUP_AUTO + echo "$output" > /tmp/setup-passwords-output + [ "$status" -eq 0 ] || { + echo "Expected x-pack elasticsearch-setup-passwords tool exit code to be zero" + cat /tmp/setup-passwords-output + false + } + + curl -s -XGET localhost:9200 | grep "missing authentication token for REST" + + # Disable bash history expansion because passwords can contain "!" + set +H + + users=( elastic kibana logstash_system ) + for user in "${users[@]}"; do + grep "Changed password for user $user" /tmp/setup-passwords-output || { + echo "Expected x-pack elasticsearch-setup-passwords tool to change password for user [$user]:" + cat /tmp/setup-passwords-output + false + } + + password=$(grep "PASSWORD $user = " /tmp/setup-passwords-output | sed "s/PASSWORD $user = //") + curl -u "$user:$password" -XGET localhost:9200 | grep "You Know, for Search" + + basic=$(echo -n "$user:$password" | base64) + curl -H "Authorization: Basic $basic" -XGET localhost:9200 | grep "You Know, for Search" + done + set -H + + stop_elasticsearch_service +} diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash b/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash new file mode 100644 index 0000000000000..95ab2a08d3e57 --- /dev/null +++ b/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash @@ -0,0 +1,121 @@ +#!/bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +# Checks that X-Pack files are correctly installed +verify_xpack_installation() { + local name="x-pack" + local user="$ESPLUGIN_COMMAND_USER" + local group="$ESPLUGIN_COMMAND_USER" + + # Verify binary files + # nocommit: already verified by "main" package verification + #assert_file "$ESHOME/bin" d $user $group 755 + local binaryFiles=( + 'elasticsearch-certgen' + 'elasticsearch-certgen.bat' + 'elasticsearch-certutil' + 'elasticsearch-certutil.bat' + 'elasticsearch-croneval' + 'elasticsearch-croneval.bat' + 'elasticsearch-migrate' + 'elasticsearch-migrate.bat' + 'elasticsearch-saml-metadata' + 'elasticsearch-saml-metadata.bat' + 'elasticsearch-setup-passwords' + 'elasticsearch-setup-passwords.bat' + 'elasticsearch-sql-cli' + 'elasticsearch-sql-cli.bat' + "elasticsearch-sql-cli-$(cat version).jar" # This jar is executable so we pitch it in bin so folks will find it + 'elasticsearch-syskeygen' + 'elasticsearch-syskeygen.bat' + 'elasticsearch-users' + 'elasticsearch-users.bat' + 'x-pack-env' + 'x-pack-env.bat' + 'x-pack-security-env' + 'x-pack-security-env.bat' + 'x-pack-watcher-env' + 'x-pack-watcher-env.bat' + ) + + local binaryFilesCount=5 # start with oss distro number + for binaryFile in ${binaryFiles[@]}; do + echo "checking for bin file ${binaryFile}" + assert_file "$ESHOME/bin/${binaryFile}" f $user $group 755 + binaryFilesCount=$(( binaryFilesCount + 1 )) + done + ls "$ESHOME/bin/" + # nocommit: decide whether to check the files added by the distribution, not part of xpack... + #assert_number_of_files "$ESHOME/bin/" $binaryFilesCount + + # Verify config files + # nocommit: already verified by "main" package verification + #assert_file "$ESCONFIG" d $user elasticsearch 755 + local configFiles=( + 'users' + 'users_roles' + 'roles.yml' + 'role_mapping.yml' + 'log4j2.properties' + ) + + local configFilesCount=2 # start with ES files, excluding log4j2 + for configFile in ${configFiles[@]}; do + assert_file "$ESCONFIG/${configFile}" f $user elasticsearch 660 + configFilesCount=$(( configFilesCount + 1 )) + done + # nocommit: decide whether to check the files added by the distribution, not part of xpack... + #assert_number_of_files "$ESCONFIG/" $configFilesCount + + # Read the $name.expected file that contains all the expected + # plugins for the meta plugin + while read plugin; do + assert_module_or_plugin_directory "$ESMODULES/$name/$plugin" + assert_file_exist "$ESMODULES/$name/$plugin/$plugin"*".jar" + assert_file_exist "$ESMODULES/$name/$plugin/plugin-descriptor.properties" + assert_file_exist "$ESMODULES/$name/$plugin/plugin-security.policy" + done > $ESCONFIG/elasticsearch.yml <<- EOF +xpack.license.self_generated.type: trial +xpack.security.enabled: true +EOF +NODE_SETTINGS +} + +wait_for_xpack() { + local host=${1:-localhost} + local port=${2:-9200} + local listening=1 + for i in {1..60}; do + if test_port "$host" "$port"; then + listening=0 + break + else + sleep 1 + fi + done + + [ "$listening" -eq 0 ] || { + echo "Looks like elasticsearch with x-pack never started." + debug_collect_logs + false + } +} diff --git a/x-pack/test/build.gradle b/x-pack/test/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/x-pack/test/idp-fixture/.gitignore b/x-pack/test/idp-fixture/.gitignore new file mode 100644 index 0000000000000..114dd0b46e381 --- /dev/null +++ b/x-pack/test/idp-fixture/.gitignore @@ -0,0 +1 @@ +src/main/resources/provision/generated/* diff --git a/x-pack/test/idp-fixture/README.txt b/x-pack/test/idp-fixture/README.txt new file mode 100644 index 0000000000000..fff29f38797c1 --- /dev/null +++ b/x-pack/test/idp-fixture/README.txt @@ -0,0 +1,6 @@ +Provisions OpenLDAP + shibboleth IDP 3.3.1 . +Uses ansible on the guest. + +Run: `vagrant up --provision` + +Any issues: albert.zaharovits@elastic.co diff --git a/x-pack/test/idp-fixture/Vagrantfile b/x-pack/test/idp-fixture/Vagrantfile new file mode 100644 index 0000000000000..c05fa33ba1a7a --- /dev/null +++ b/x-pack/test/idp-fixture/Vagrantfile @@ -0,0 +1,24 @@ +Vagrant.configure("2") do |config| + + config.vm.define "test.shibboleth.elastic.local" do |config| + config.vm.box = "elastic/ubuntu-16.04-x86_64" + end + + config.vm.hostname = "localhost" + + if Vagrant.has_plugin?("vagrant-cachier") + config.cache.scope = :box + end + + config.vm.network "forwarded_port", guest: 389, host: 60389, protocol: "tcp" + config.vm.network "forwarded_port", guest: 636, host: 60636, protocol: "tcp" + config.vm.network "forwarded_port", guest: 8080, host: 60080, protocol: "tcp" + config.vm.network "forwarded_port", guest: 8443, host: 60443, protocol: "tcp" + + config.vm.provision "ansible_local" do |ansible| + ansible.verbose = "v" + ansible.playbook = "src/main/resources/provision/playbook.yml" + ansible.install_mode = "pip" + end + +end diff --git a/x-pack/test/idp-fixture/build.gradle b/x-pack/test/idp-fixture/build.gradle new file mode 100644 index 0000000000000..c62ac0409ba95 --- /dev/null +++ b/x-pack/test/idp-fixture/build.gradle @@ -0,0 +1,43 @@ +apply plugin: 'elasticsearch.build' + +Map vagrantEnvVars = [ + 'VAGRANT_CWD' : "${project.projectDir.absolutePath}", + 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', + 'VAGRANT_PROJECT_DIR' : "${project.projectDir.absolutePath}" +] + +String box = "test.shibboleth.elastic.local" + +task update(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { + command 'box' + subcommand 'update' + boxName box + environmentVars vagrantEnvVars +} + +task up(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { + command 'up' + args '--provision', '--provider', 'virtualbox' + boxName box + environmentVars vagrantEnvVars + dependsOn update +} + +task halt(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { + command 'halt' + boxName box + environmentVars vagrantEnvVars +} + +task destroy(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { + command 'destroy' + args '-f' + boxName box + environmentVars vagrantEnvVars + dependsOn halt +} + +thirdPartyAudit.enabled = false +licenseHeaders.enabled = false +test.enabled = false +jarHell.enabled = false diff --git a/x-pack/test/idp-fixture/src/main/resources/certs/README.txt b/x-pack/test/idp-fixture/src/main/resources/certs/README.txt new file mode 100644 index 0000000000000..0cf738156a7f0 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/certs/README.txt @@ -0,0 +1,15 @@ +File in this directory are: + +idp-ca.crt +idp-ca.key + Description: A CA for the IdP + Generated Date: 2018-02-07 + Command: bin/elasticsearch-certutil ca --ca-dn 'CN=idp-fixture,OU=elasticsearch,DC=elastic,DC=co' --days 5000 -keysize 1024 --out idp-ca.zip --pem + X-Pack Version: 6.2.0 + +idptrust.jks + Description: Java Keystore Format of CA cert + Generated Date: 2018-02-07 + Command: keytool -importcert -file ca.crt -alias idp-fixture-ca -keystore idptrust.jks -noprompt -storepass changeit + Java Version: Java(TM) SE Runtime Environment (build 9.0.1+11) + diff --git a/x-pack/test/idp-fixture/src/main/resources/certs/ca.crt b/x-pack/test/idp-fixture/src/main/resources/certs/ca.crt new file mode 100644 index 0000000000000..1ab8e866c1785 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/certs/ca.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDEDCCAnmgAwIBAgIVAOLlDV8Lvg17LwKqchYKcsog1SyKMA0GCSqGSIb3DQEB +CwUAMFsxEjAQBgoJkiaJk/IsZAEZFgJjbzEXMBUGCgmSJomT8ixkARkWB2VsYXN0 +aWMxFjAUBgNVBAsTDWVsYXN0aWNzZWFyY2gxFDASBgNVBAMTC2lkcC1maXh0dXJl +MB4XDTE4MDIwNzAzMjAwNloXDTMxMTAxNzAzMjAwNlowWzESMBAGCgmSJomT8ixk +ARkWAmNvMRcwFQYKCZImiZPyLGQBGRYHZWxhc3RpYzEWMBQGA1UECxMNZWxhc3Rp +Y3NlYXJjaDEUMBIGA1UEAxMLaWRwLWZpeHR1cmUwgZ8wDQYJKoZIhvcNAQEBBQAD +gY0AMIGJAoGBALWf8R7uGnrrmuQ26khwQ/81f+x57RgE1cHQGp0sBkwsijzZPpuU +8ZkqYMNXG/LU2hNfAv4LeCsighgo4Le+TkBKncbucQcNM+dLINvhAfgYp9QAdGjk +89hxWEQ6p/Tr98TG0Qd7jZa6bu8azMf7+bmjKpHaffIMpxDnkPZsaxodAgMBAAGj +gc8wgcwwHQYDVR0OBBYEFDsd63fpzLH1G+aduhypBPctWuNNMIGZBgNVHSMEgZEw +gY6AFDsd63fpzLH1G+aduhypBPctWuNNoV+kXTBbMRQwEgYDVQQDEwtpZHAtZml4 +dHVyZTEWMBQGA1UECxMNZWxhc3RpY3NlYXJjaDEXMBUGCgmSJomT8ixkARkWB2Vs +YXN0aWMxEjAQBgoJkiaJk/IsZAEZFgJjb4IVAOLlDV8Lvg17LwKqchYKcsog1SyK +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADgYEAi1bfK31u7deMDLXv +Axrg1nJjEzMjkb6F/tqA2hJCokvWz2sdKPLHfrfOu2edHm4qQABAdnmRtE/1xsYm +xVuZA+O7khEkXv5ql65HIgCHL0hEvFWfKzMDCjgm+1rvNTMbgsRj2RGzEQeu/Aqg +Nv2mnc0Vjk3kaAQ0JtmCI8k6fM0= +-----END CERTIFICATE----- diff --git a/x-pack/test/idp-fixture/src/main/resources/certs/ca.key b/x-pack/test/idp-fixture/src/main/resources/certs/ca.key new file mode 100644 index 0000000000000..9f93ff3b7ceab --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/certs/ca.key @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQC1n/Ee7hp665rkNupIcEP/NX/see0YBNXB0BqdLAZMLIo82T6b +lPGZKmDDVxvy1NoTXwL+C3grIoIYKOC3vk5ASp3G7nEHDTPnSyDb4QH4GKfUAHRo +5PPYcVhEOqf06/fExtEHe42Wum7vGszH+/m5oyqR2n3yDKcQ55D2bGsaHQIDAQAB +AoGACfOsm5xCWS/ludGEftmf8DD3RHbd1e4V5FvJyYjrA2uBW5ovwwijQFhBGxL/ +1gtxs5QGLvNj70Ehzb8XqRnFYcrSUxkABCcO9vJf8wuamtPeaQzlSVSVM9myjkBu +2EhegkFXSgFiVX6A/sxm8e8bqxxouz46Upa2/YLKhcb5oiECQQDb3HhP0hIx0oDj +h1FXLACtbTlYUg8gGylD17RsWSPB765tOTt65/KztyH8BmdlTAKxIC5BHEQLYiug +u3KwPEk5AkEA03qFxj/quoH6l0y7i8kah97KCtiM0kg4oXYDuSDIzt4NqdNw/UWx +p3DGiIPpY5errR1ytyPiiuM2j+c5oUcMBQJAfC4SZkMos6tJ0Tlk3++iklHWyePP +VzsAG6mB5pCSeb9+rYJd7hWEJ62QLGERlU1RV+ntNilY5XUVXzuAk7n5QQJBANLg +31q0S9WVXRPYUT/v1kPcVi6Ah9P8bnQa4VWOqo8WABvzmz0DbUahf2eL2oQULv3e +WpDi+Lk0HylaEi6PUR0CQQDHTzjyjuTLmnPw5AvZw7oQgilZxTUhOapw3Ihcq/KA +T8oFnLwmnMs+kZOO6e2QcagXaFXufH1w/MvxhSjHj8SO +-----END RSA PRIVATE KEY----- diff --git a/x-pack/test/idp-fixture/src/main/resources/certs/idptrust.jks b/x-pack/test/idp-fixture/src/main/resources/certs/idptrust.jks new file mode 100644 index 0000000000000..fbd3135095f9b Binary files /dev/null and b/x-pack/test/idp-fixture/src/main/resources/certs/idptrust.jks differ diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/playbook.yml b/x-pack/test/idp-fixture/src/main/resources/provision/playbook.yml new file mode 100644 index 0000000000000..24474a832d208 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/playbook.yml @@ -0,0 +1,11 @@ +- hosts: all + + vars_files: + - vars/default.yml + + roles: + - { role: java-8-openjdk, become: yes } + - { role: certs, become: yes } + - { role: openldap, become: yes } + - { role: tomcat-8, become: yes } + - { role: shibboleth, become: yes } diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/README b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/README new file mode 100644 index 0000000000000..748d912bd2c5f --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/README @@ -0,0 +1,2 @@ +Generates CA and OpenLDAP Service Key/Certificate +See: https://www.digitalocean.com/community/tutorials/how-to-encrypt-openldap-connections-using-starttls diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/files/ca_server.conf b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/files/ca_server.conf new file mode 100644 index 0000000000000..3a412ac90760d --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/files/ca_server.conf @@ -0,0 +1,3 @@ +cn = Vagrant Server CA +ca +cert_signing_key diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/tasks/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/tasks/main.yml new file mode 100644 index 0000000000000..553b9eff5d703 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/tasks/main.yml @@ -0,0 +1,106 @@ +--- + +- name: Install GNUtls + apt: + name: "{{ item }}" + state: installed + update_cache: yes + with_items: + - gnutls-bin + - ssl-cert + +- name: Create GNUtls templates dir + file: + path: "{{ ssl_dir_templates }}" + state: directory + group: ssl-cert + mode: 0777 + +- name: Copy server cert template + template: + src: cert_server.conf.j2 + dest: "{{ ssl_dir_templates }}/cert_server.conf" + mode: 0666 + +- name: Copy server keystore template + template: + src: keystore_server.conf.j2 + dest: "{{ ssl_dir_templates }}/keystore_server.conf" + mode: 0666 + +- name: Copy CA Cert + copy: + src: "../certs/ca.crt" + dest: "{{ ssl_dir_certs }}/ca_server.pem" + mode: 0666 + register: copy_ca + +- name: Copy CA Key + copy: + src: "../certs/ca.key" + dest: "{{ ssl_dir_private }}/ca_server.key" + mode: 0600 + +- name: Create Key for LDAP Service + command: "certtool -p --sec-param high --outfile {{ ssl_dir_private }}/{{ openldap_key_name }}" + args: + creates: "{{ ssl_dir_private }}/{{ openldap_key_name }}" + +- name: Set group for LDAP Key + file: + path: "{{ ssl_dir_private }}/{{ openldap_key_name }}" + group: ssl-cert + mode: 0640 + +- name: Delete old LDAP cert + file: + path: "{{ ssl_dir_certs }}/{{ openldap_cert_name}}" + state: absent + when: copy_ca.changed + +- name: Create Cert for LDAP + command: "certtool -c --load-privkey {{ ssl_dir_private }}/{{ openldap_key_name }} --load-ca-certificate {{ ssl_dir_certs }}/ca_server.pem --load-ca-privkey {{ ssl_dir_private }}/ca_server.key --template {{ ssl_dir_templates }}/cert_server.conf --outfile {{ ssl_dir_certs }}/{{ openldap_cert_name}}" + args: + creates: "{{ ssl_dir_certs }}/{{ openldap_cert_name}}" + +- name: Create Key for Tomcat Service + command: "certtool -p --sec-param high --outfile {{ ssl_dir_private }}/{{ tomcat_key_name }}" + args: + creates: "{{ ssl_dir_private }}/{{ tomcat_key_name }}" + +- name: Set group for Tomcat Key + file: + path: "{{ ssl_dir_private }}/{{ tomcat_key_name }}" + group: ssl-cert + mode: 0640 + +- name: Delete old Tomcat cert + file: + path: "{{ ssl_dir_certs }}/{{ tomcat_cert_name }}" + state: absent + when: copy_ca.changed + +- name: Create Cert for Tomcat + command: "certtool -c --load-privkey {{ ssl_dir_private }}/{{ tomcat_key_name }} --load-ca-certificate {{ ssl_dir_certs }}/ca_server.pem --load-ca-privkey {{ ssl_dir_private }}/ca_server.key --template {{ ssl_dir_templates }}/cert_server.conf --outfile {{ ssl_dir_certs }}/{{ tomcat_cert_name}}" + args: + creates: "{{ ssl_dir_certs }}/{{ tomcat_cert_name}}" + register: tomcat_cert + +- name: Delete old Tomcat Keystore + file: + path: "{{ ssl_dir_private }}/{{ tomcat_keystore_name }}" + state: absent + when: tomcat_cert.changed + +- name: Create Keystore for Tomcat + command: "certtool --load-ca-certificate {{ ssl_dir_certs }}/ca_server.pem --load-privkey {{ ssl_dir_private }}/{{ tomcat_key_name }} --load-certificate {{ ssl_dir_certs }}/{{ tomcat_cert_name }} --template {{ ssl_dir_templates }}/keystore_server.conf --outder --to-p12 --outfile {{ ssl_dir_private }}/{{ tomcat_keystore_name }}" + args: + creates: "{{ ssl_dir_private }}/{{ tomcat_keystore_name }}" + notify: Restart Tomcat Service + +- name: Set group for Tomcat Keystore + file: + path: "{{ ssl_dir_private }}/{{ tomcat_keystore_name }}" + group: ssl-cert + mode: 0640 +... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/templates/cert_server.conf.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/templates/cert_server.conf.j2 new file mode 100644 index 0000000000000..448264d368198 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/templates/cert_server.conf.j2 @@ -0,0 +1,7 @@ +organization = Elastic.co +cn = {{ ansible_fqdn }} +dns_name = {{ ansible_fqdn }} +tls_www_server +encryption_key +signing_key +expiration_days = 3652 diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/templates/keystore_server.conf.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/templates/keystore_server.conf.j2 new file mode 100644 index 0000000000000..ee7eeaf52398e --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/templates/keystore_server.conf.j2 @@ -0,0 +1,2 @@ +pkcs12_key_name = {{ tomcat_key_alias }} +password = {{ tomcat_keystore_pass }} diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/vars/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/vars/main.yml new file mode 100644 index 0000000000000..a364fc641d2ad --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/certs/vars/main.yml @@ -0,0 +1 @@ +ssl_dir_templates: "{{ ssl_dir }}/templates" diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/java-8-openjdk/tasks/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/java-8-openjdk/tasks/main.yml new file mode 100644 index 0000000000000..1951a6f31a086 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/java-8-openjdk/tasks/main.yml @@ -0,0 +1,16 @@ +--- + +- name: Install openjdk-8 + apt: + name: openjdk-8-jdk-headless + state: latest + update_cache: true + +- name: Capture JAVA_HOME + shell: readlink -e /etc/alternatives/java | sed 's|/jre/bin/java||' + register: java_home + +- set_fact: + java_home: "{{ java_home.stdout }}" + +... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/handlers/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/handlers/main.yml new file mode 100644 index 0000000000000..cad57b49f6a26 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/handlers/main.yml @@ -0,0 +1,3 @@ +- name: restart slapd + become: yes + service: name=slapd state=restarted diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/configure.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/configure.yml new file mode 100644 index 0000000000000..00a06b319b12f --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/configure.yml @@ -0,0 +1,29 @@ +- name: Hash Password + shell: "slappasswd -s {{ ldap_password }}" + register: passwd + +- name: Setting default db + ldap_attr: + dn: "{{ default_db_dn }}" + name: "{{ item.key }}" + values: "{{ item.val }}" + state: exact + with_items: + - { key: olcSuffix, val: "{{ ldap_domain_dn }}" } + - { key: olcRootDN, val: "cn=admin,{{ ldap_domain_dn }}" } + - { key: olcRootPW, val: "{{ passwd.stdout }}" } + - { key: olcAccess, val: "{0}to attrs=userPassword,shadowLastChange\n + by self write\n + by anonymous auth\n + by dn=\"cn=admin,{{ ldap_domain_dn }}\" write\n + by * none" } + - { key: olcAccess, val: "{1}to dn.base=\"\" by * read" } + - { key: olcAccess, val: "{2}to * \n + by self write\n + by dn.base=\"gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth\" write\n + by dn=\"cn=admin,{{ ldap_domain_dn }}\" write\n + by * read" } + # - { key: olcAccess, val: "{0}to *\n + # by dn.base=\"gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth\" write\n + # by dn.base=\"cn=admin,{{ ldap_domain_dn }}\" write\n + # by * none"} diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/install.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/install.yml new file mode 100644 index 0000000000000..ccf1d5c7794f4 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/install.yml @@ -0,0 +1,79 @@ +--- + +- name: Install openldap + apt: name={{ item }} state=installed update_cache=yes + with_items: + - slapd + - ldap-utils + +- name: Create service user for openldap + user: + name: "{{ openldap_server_user }}" + group: "{{ openldap_server_user }}" + createhome: no + system: yes + +- name: Add openldap service user to the ssl-cert group + user: + name: "{{ openldap_server_user }}" + append: yes + groups: ssl-cert + +- name: Copy slapd defaults + template: + src: slapd.j2 + dest: /etc/default/slapd + owner: "{{ openldap_server_user }}" + mode: 0644 + +- name: Restart slapd + service: name=slapd state=restarted enabled=yes + +# bug: https://github.com/ansible/ansible/issues/25665 +# When solved the commented section should replace the copying +# and execution of the subsequent 2 ldif files +#- name: Configure openldap server tls/ssl - 1 +# become: yes +# ignore_errors: yes +# ldap_attr: +# dn: cn=config +# name: "{{ item.0 }}" +# values: "{{ item.1 }}" +# state: exact +# with_together: +# - [ "olcTLSCertificateFile", "olcTLSCertificateKeyFile" ] +# - [ "{{ openldap_ssl_dir }}/cert.pem", "{{ openldap_ssl_dir }}/key.pem" ] + +- name: Copy TLS ldif add config + template: + src: tls_add.ldif.j2 + dest: /tmp/tls_add.ldif + +- name: Run TLS ldif add config + shell: ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/tls_add.ldif + notify: + - restart slapd + ignore_errors: yes + register: addTLS + +- name: Copy TLS ldif replace config + template: + src: tls_replace.ldif.j2 + dest: /tmp/tls_replace.ldif + when: addTLS|failed + +- name: Run TLS ldif replace config + shell: ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/tls_replace.ldif + notify: + - restart slapd + when: addTLS|failed + +- name: Restart slapd + service: name=slapd state=restarted enabled=yes + +- name: Configure local clients to trust slapd cert + template: + src: ldap.conf.j2 + dest: /etc/ldap/ldap.conf + backup: yes +... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/main.yml new file mode 100644 index 0000000000000..a836c535bf6ae --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- include: provision_deps.yml +- include: install.yml +- include: configure.yml +- include: populate.yml +... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/populate.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/populate.yml new file mode 100644 index 0000000000000..0c1200042e391 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/populate.yml @@ -0,0 +1,77 @@ +--- + +- name: Hash Password + shell: "slappasswd -s {{ ldap_password }}" + register: passwd + +- name: Load top level entry + ldap_entry: + dn: "{{ ldap_domain_dn }}" + objectClass: domain + attributes: + dc: "{{ ldap_domain.split('.')[0] }}" + +- name: Add people root entry + ldap_entry: + dn: "ou=people,{{ ldap_domain_dn }}" + objectClass: organizationalUnit + attributes: + ou: people + +- name: Add people entries + ldap_entry: + dn: "uid={{ item.uid }},ou=people,{{ ldap_domain_dn }}" + objectClass: + - top + - posixAccount + - inetOrgPerson + attributes: + userPassword: "{{ passwd.stdout }}" + uid: "{{ item.uid }}" + uidNumber: "{{ item.uidNumber }}" + gidNumber: "{{ item.uidNumber }}" + homeDirectory: "/home/{{ item.uidNumber }}" + mail: "{{ item.name }}@{{ ldap_domain }}" + cn: "{{ item.name }}" + sn: "{{ item.name }}" + with_items: + - { uid: kraken, uidNumber: 1000, name: "Commander Kraken" } + - { uid: hulk, uidNumber: 1001, name: "Bruce Banner" } + - { uid: hawkeye, uidNumber: 1002, name: "Clint Barton" } + - { uid: jarvis, uidNumber: 1003, name: "Jarvis" } + - { uid: blackwidow, uidNumber: 1004, name: "Natasha Romanova" } + - { uid: fury, uidNumber: 1005, name: "Nick Fury" } + - { uid: phil, uidNumber: 1006, name: "Phil Colson" } + - { uid: cap, uidNumber: 1007, name: "Steve Rogers" } + - { uid: thor, uidNumber: 1008, name: "Thor Odinson" } + - { uid: ironman, uidNumber: 1009, name: "Tony Stark" } + - { uid: odin, uidNumber: 1010, name: "Gods" } + - { uid: selvig, uidNumber: 1011, name: "Erik Selvig" } + +- name: Add group entries + ldap_entry: + dn: "cn={{ item.name }},ou=people,{{ ldap_domain_dn }}" + objectClass: + - top + - posixGroup + attributes: + cn: "{{ item.name }}" + gidNumber: "{{ item.gid }}" + with_items: + - "{{ group_members }}" + +- name: Populate group entries + ldap_attr: + dn: "cn={{ item.0.name }},ou=people,{{ ldap_domain_dn }}" + name: memberUid + values: "{{ item.1 }}" + with_subelements: + - "{{ group_members }}" + - memberUid + +# print users: +# sudo ldapsearch -H ldapi:// -Y EXTERNAL -LLL -b "{{ ldap_domain_dn }}" '(objectClass=person)' cn mail uid +# print groups: +# sudo ldapsearch -H ldapi:// -Y EXTERNAL -LLL -b "{{ ldap_domain_dn }}" '(objectClass=posixGroup)' + +... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/provision_deps.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/provision_deps.yml new file mode 100644 index 0000000000000..2c1874c488aa6 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/tasks/provision_deps.yml @@ -0,0 +1,10 @@ +- name: Install python-ldap dependecies + apt: name={{ item }} state=installed update_cache=yes + with_items: + - libsasl2-dev + - python-dev + - libldap2-dev + - libssl-dev + +- name: Install python-ldap + pip: name=python-ldap diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/ldap.conf.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/ldap.conf.j2 new file mode 100644 index 0000000000000..e3b49df61eeca --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/ldap.conf.j2 @@ -0,0 +1,17 @@ +# +# LDAP Defaults +# + +# See ldap.conf(5) for details +# This file should be world readable but not world writable. + +#BASE dc=example,dc=com +#URI ldap://ldap.example.com ldap://ldap-master.example.com:666 + +#SIZELIMIT 12 +#TIMELIMIT 15 +#DEREF never + +# TLS certificates (needed for GnuTLS) +#TLS_CACERT /etc/ssl/certs/ca-certificates.crt +TLS_CACERT {{ ssl_dir_certs }}/ca_server.pem diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/slapd.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/slapd.j2 new file mode 100644 index 0000000000000..5a202fa9fe6a4 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/slapd.j2 @@ -0,0 +1,46 @@ +# Default location of the slapd.conf file or slapd.d cn=config directory. If +# empty, use the compiled-in default (/etc/ldap/slapd.d with a fallback to +# /etc/ldap/slapd.conf). +SLAPD_CONF= + +# System account to run the slapd server under. If empty the server +# will run as root. + +SLAPD_USER="{{ openldap_server_user }}" + +# System group to run the slapd server under. If empty the server will +# run in the primary group of its user. +SLAPD_GROUP="{{ openldap_server_user }}" + +# Path to the pid file of the slapd server. If not set the init.d script +# will try to figure it out from $SLAPD_CONF (/etc/ldap/slapd.d by +# default) +SLAPD_PIDFILE= + +# slapd normally serves ldap only on all TCP-ports 389. slapd can also +# service requests on TCP-port 636 (ldaps) and requests via unix +# sockets. +# Example usage: +# SLAPD_SERVICES="ldap://127.0.0.1:389/ ldaps:/// ldapi:///" +SLAPD_SERVICES="ldap:/// ldapi:/// ldaps:///" + +# If SLAPD_NO_START is set, the init script will not start or restart +# slapd (but stop will still work). Uncomment this if you are +# starting slapd via some other means or if you don't want slapd normally +# started at boot. +#SLAPD_NO_START=1 + +# If SLAPD_SENTINEL_FILE is set to path to a file and that file exists, +# the init script will not start or restart slapd (but stop will still +# work). Use this for temporarily disabling startup of slapd (when doing +# maintenance, for example, or through a configuration management system) +# when you don't want to edit a configuration file. +SLAPD_SENTINEL_FILE=/etc/ldap/noslapd + +# For Kerberos authentication (via SASL), slapd by default uses the system +# keytab file (/etc/krb5.keytab). To use a different keytab file, +# uncomment this line and change the path. +#export KRB5_KTNAME=/etc/krb5.keytab + +# Additional options to pass to slapd +SLAPD_OPTIONS="" diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/tls_add.ldif.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/tls_add.ldif.j2 new file mode 100644 index 0000000000000..f2bbefd3aee0c --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/tls_add.ldif.j2 @@ -0,0 +1,10 @@ +dn: cn=config +changetype: modify +add: olcTLSCACertificateFile +olcTLSCACertificateFile: {{ ssl_dir_certs }}/ca_server.pem +- +add: olcTLSCertificateFile +olcTLSCertificateFile: {{ ssl_dir_certs }}/{{ openldap_cert_name }} +- +add: olcTLSCertificateKeyFile +olcTLSCertificateKeyFile: {{ ssl_dir_private }}/{{ openldap_key_name }} diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/tls_replace.ldif.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/tls_replace.ldif.j2 new file mode 100644 index 0000000000000..40af5a77bec51 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/templates/tls_replace.ldif.j2 @@ -0,0 +1,10 @@ +dn: cn=config +changetype: modify +replace: olcTLSCACertificateFile +olcTLSCACertificateFile: {{ ssl_dir_certs }}/ca_server.pem +- +replace: olcTLSCertificateFile +olcTLSCertificateFile: {{ ssl_dir_certs }}/{{ openldap_cert_name }} +- +replace: olcTLSCertificateKeyFile +olcTLSCertificateKeyFile: {{ ssl_dir_private }}/{{ openldap_key_name }} diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/vars/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/vars/main.yml new file mode 100644 index 0000000000000..dd61e48ca2bd1 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/openldap/vars/main.yml @@ -0,0 +1,10 @@ +default_db_dn: "olcDatabase={1}mdb,cn=config" +group_members: + - {name: Hydra, gid: 101, memberUid: [kraken] } + - {name: Geniuses, gid: 102, memberUid: [hulk, ironman, selvig] } + - {name: SHIELD, gid: 103, memberUid: [hulk, hawkeye, blackwidow, fury, phil, cap, thor, ironman] } + - {name: Philanthropists, gid: 104, memberUid: [hulk, thor, ironman] } + - {name: Avengers, gid: 105, memberUid: [hulk, hawkeye, blackwidow, fury, cap, thor, ironman] } + - {name: Gods, gid: 106, memberUid: [thor, odin] } + - {name: Playboys, gid: 107, memberUid: [ironman] } + - {name: Billionaries, gid: 108, memberUid: [ironman] } diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/attribute-filter.xml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/attribute-filter.xml new file mode 100644 index 0000000000000..09892d65f6edb --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/attribute-filter.xml @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/attribute-resolver.xml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/attribute-resolver.xml new file mode 100644 index 0000000000000..9ee1bd3f17818 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/attribute-resolver.xml @@ -0,0 +1,158 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + uid mail cn + + + + + + + + cn + + + + + + diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/relying-party.xml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/relying-party.xml new file mode 100644 index 0000000000000..40b9e5df211e7 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/files/relying-party.xml @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/handlers/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/handlers/main.yml new file mode 100644 index 0000000000000..f3d67396f503a --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/handlers/main.yml @@ -0,0 +1,3 @@ +- name: Restart Tomcat Service + become: yes + service: name=tomcat8 state=restarted enabled=yes diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/tasks/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/tasks/main.yml new file mode 100644 index 0000000000000..275a7ca9d9feb --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/tasks/main.yml @@ -0,0 +1,88 @@ +--- +- name: Download Shibboleth IdP + get_url: + url: "http://shibboleth.net/downloads/identity-provider/{{ shib_idp_version }}/shibboleth-identity-provider-{{ shib_idp_version }}.tar.gz" + timeout: 30 + dest: "/tmp/shibboleth-identity-provider-{{ shib_idp_version }}.tar.gz" + sha256sum: 8bd852dcdc7e6729ee645c0374a3c476b152fa24506fb86ffec33dfd190e607c + +- name: Create base directory for shibboleth idp + file: + path: "{{ shib_installdir }}" + state: directory + owner: "{{ ansible_ssh_user }}" + +- name: Extract shibboleth + unarchive: + src: "/tmp/shibboleth-identity-provider-{{ shib_idp_version }}.tar.gz" + dest: "{{ shib_installdir }}" + remote_src: yes + +- name: Copying shibboleth idp install files + template: + src: idp.properties.j2 + dest: "{{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/idp.properties" + +- name: Copying shibboleth ldap properties + template: + src: ldap.properties.j2 + dest: "{{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/ldap.properties" + +- name: Copy silent installation file + template: + src: idp.silent.params.j2 + dest: "{{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/idp.silent.params" + +- name: Install Shibboleth IdP + shell: "{{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/install.sh -propertyfile {{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/idp.silent.params" + environment: + JAVA_HOME: "{{ java_home }}" + args: + creates: "{{ shib_home }}" + +- name: Turn on log debugging level + ignore_errors: yes + replace: + path: "{{ shib_home }}/conf/logback.xml" + regexp: '' + replace: "" + +# HACK! +- name: Fix metadata error because of port after hostname + replace: + path: "{{ shib_home }}/metadata/idp-metadata.xml" + regexp: "({{ shib_dns_name }}):8443" + replace: '\1' + +- name: Fetch idp metadata + fetch: + src: "{{ shib_home }}/metadata/idp-metadata.xml" + dest: "generated/" + flat: yes + +- name: Setting attr resolve/release policy and enable unverified RPs + copy: + src: "{{ item }}" + dest: "{{ shib_home }}/conf/{{ item }}" + mode: 0600 + with_items: + - attribute-resolver.xml + - attribute-filter.xml + - relying-party.xml + +- name: Change owner and group for Shibboleth IdP + file: + path: "{{ shib_home }}" + recurse: yes + owner: tomcat8 + group: tomcat8 + +- name: Copy shib war descriptor to Catalina localhost + template: + src: idp.xml.j2 + dest: /etc/tomcat8/Catalina/localhost/idp.xml + owner: tomcat8 + group: tomcat8 + notify: Restart Tomcat Service + +... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.properties.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.properties.j2 new file mode 100644 index 0000000000000..0b7f0f76f2a45 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.properties.j2 @@ -0,0 +1,4 @@ +idp.entityID=https://test.shibboleth.elastic.local/ +idp.scope=test.elastic.co +idp.sealer.storePassword={{ idp_sealer_password }} +idp.sealer.keyPassword={{ idp_sealer_password }} diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.silent.params.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.silent.params.j2 new file mode 100644 index 0000000000000..dd919887ad951 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.silent.params.j2 @@ -0,0 +1,8 @@ +idp.noprompt="true" +idp.host.name={{ shib_dns_name }} +idp.src.dir={{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/ +idp.target.dir={{ shib_installdir }}/shibboleth-idp/ +idp.sealer.password={{ idp_sealer_password }} +idp.keystore.password={{ idp_keystore_password }} +idp.merge.properties={{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/idp.properties +ldap.merge.properties={{ shib_installdir }}/shibboleth-identity-provider-{{ shib_idp_version }}/bin/ldap.properties diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.xml.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.xml.j2 new file mode 100644 index 0000000000000..db75a533f8dbe --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/idp.xml.j2 @@ -0,0 +1,11 @@ + + + + + + + + diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/ldap.properties.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/ldap.properties.j2 new file mode 100644 index 0000000000000..939effe589f2f --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/shibboleth/templates/ldap.properties.j2 @@ -0,0 +1,11 @@ +idp.authn.LDAP.authenticator = bindSearchAuthenticator +idp.authn.LDAP.ldapURL = ldap://{{ ansible_fqdn }}:389 +idp.authn.LDAP.useStartTLS = true +idp.authn.LDAP.sslConfig = certificateTrust +idp.authn.LDAP.trustCertificates= {{ ssl_dir_certs }}/ca_server.pem +idp.authn.LDAP.baseDN = ou=people,{{ ldap_domain_dn }} +idp.authn.LDAP.subtreeSearch = true +idp.authn.LDAP.userFilter = (uid={user}) +idp.authn.LDAP.bindDN = cn=admin,{{ ldap_domain_dn }} +idp.authn.LDAP.bindDNCredential = {{ ldap_password }} +idp.authn.LDAP.dnFormat = uid=%s,ou=people,{{ ldap_domain_dn }} diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/handlers/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/handlers/main.yml new file mode 100644 index 0000000000000..f3d67396f503a --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/handlers/main.yml @@ -0,0 +1,3 @@ +- name: Restart Tomcat Service + become: yes + service: name=tomcat8 state=restarted enabled=yes diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/tasks/main.yml b/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/tasks/main.yml new file mode 100644 index 0000000000000..e018e58f9ba7a --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/tasks/main.yml @@ -0,0 +1,22 @@ +--- + +- name: Install Tomcat 8 + apt: + name: tomcat8 + update_cache: yes + state: latest + +- name: Add tomcat8 service user to the ssl-cert group + user: + name: tomcat8 + append: yes + groups: ssl-cert + +- name: Enable HTTPS connector + template: + src: server.xml.j2 + dest: /etc/tomcat8/server.xml + group: tomcat8 + mode: 0640 + notify: Restart Tomcat Service +... diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/templates/server.xml.j2 b/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/templates/server.xml.j2 new file mode 100644 index 0000000000000..62ba3233223b1 --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/roles/tomcat-8/templates/server.xml.j2 @@ -0,0 +1,147 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/x-pack/test/idp-fixture/src/main/resources/provision/vars/default.yml b/x-pack/test/idp-fixture/src/main/resources/provision/vars/default.yml new file mode 100644 index 0000000000000..de2c85f4ea31d --- /dev/null +++ b/x-pack/test/idp-fixture/src/main/resources/provision/vars/default.yml @@ -0,0 +1,26 @@ +--- +ssl_dir: /etc/ssl +ssl_dir_private: "{{ ssl_dir }}/private" +ssl_dir_certs: "{{ ssl_dir }}/certs" + +openldap_server_user: openldap +openldap_key_name: ldap_server.key +openldap_cert_name: ldap_server.pem + +ldap_password: NickFuryHeartsES +ldap_domain: oldap.test.elasticsearch.com +ldap_domain_dn: "dc={{ ldap_domain.split('.')[0] }},dc={{ ldap_domain.split('.')[1] }},dc={{ ldap_domain.split('.')[2] }},dc={{ ldap_domain.split('.')[3] }}" + +tomcat_key_name: tomcat_server.key +tomcat_cert_name: tomcat_server.pem +tomcat_keystore_name: tomcat_server.p12 +tomcat_keystore_pass: secret +tomcat_key_alias: tomcat + +shib_idp_version: 3.3.1 +shib_installdir: /opt +shib_home: "{{ shib_installdir }}/shibboleth-idp" +shib_dns_name: "{{ ansible_fqdn }}:60443" +idp_sealer_password: secret +idp_keystore_password: secret1 +... diff --git a/x-pack/test/smb-fixture/Vagrantfile b/x-pack/test/smb-fixture/Vagrantfile new file mode 100644 index 0000000000000..e3c8d807e2761 --- /dev/null +++ b/x-pack/test/smb-fixture/Vagrantfile @@ -0,0 +1,20 @@ +Vagrant.configure("2") do |config| + + config.vm.define "test.ad.elastic.local" do |config| + config.vm.box = "elastic/ubuntu-16.04-x86_64" + end + + config.vm.hostname = "ad.test.elastic.local" + + if Vagrant.has_plugin?("vagrant-cachier") + config.cache.scope = :box + end + + config.vm.network "forwarded_port", guest: 389, host: 61389, protocol: "tcp" + config.vm.network "forwarded_port", guest: 636, host: 61636, protocol: "tcp" + config.vm.network "forwarded_port", guest: 3268, host: 63268, protocol: "tcp" + config.vm.network "forwarded_port", guest: 3269, host: 63269, protocol: "tcp" + + config.vm.provision "shell", path: "src/main/resources/provision/installsmb.sh" + +end diff --git a/x-pack/test/smb-fixture/build.gradle b/x-pack/test/smb-fixture/build.gradle new file mode 100644 index 0000000000000..233b289b295db --- /dev/null +++ b/x-pack/test/smb-fixture/build.gradle @@ -0,0 +1,43 @@ +apply plugin: 'elasticsearch.build' + +Map vagrantEnvVars = [ + 'VAGRANT_CWD' : "${project.projectDir.absolutePath}", + 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', + 'VAGRANT_PROJECT_DIR' : "${project.projectDir.absolutePath}" +] + +String box = "test.ad.elastic.local" + +task update(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { + command 'box' + subcommand 'update' + boxName box + environmentVars vagrantEnvVars +} + +task up(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { + command 'up' + args '--provision', '--provider', 'virtualbox' + boxName box + environmentVars vagrantEnvVars + dependsOn update +} + +task halt(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { + command 'halt' + boxName box + environmentVars vagrantEnvVars +} + +task destroy(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { + command 'destroy' + args '-f' + boxName box + environmentVars vagrantEnvVars + dependsOn halt +} + +thirdPartyAudit.enabled = false +licenseHeaders.enabled = false +test.enabled = false +jarHell.enabled = false diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/ca.key b/x-pack/test/smb-fixture/src/main/resources/certs/ca.key new file mode 100644 index 0000000000000..b755b758914ee --- /dev/null +++ b/x-pack/test/smb-fixture/src/main/resources/certs/ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEApAA/+F2xZ27cKT9m16iyK3KeZclMKeR5wFhIu2OBiH2s8MWm +mP0p2xnv7k/0W/Eu/+3hjDzVgjbhOseTQKcysP9AFpcOI8Z5P1vpP+BZZWbXNCPb +/nruAfZRoiQh9/LurT0Ej0gJdIoMFFdEwuBVHX2KImTBZvdvlNW9pAjgzywSYkgd +d6Lb9x8QpVHmEGmu3N4qS+Dqj0NfH6cO/ZkRRnuK3mkQ3kCMPm3RLfKa2VRTmWqe +vx6vr9Byu5rYRfETUfI8SSk38LCXu0XUgIx2C5++CG3mqXX8+TjdWYEy6Uj/rPl7 +7sZnX7Hv1gDXuBpu0Yn6icHTMnGAMGPS22jsfwIDAQABAoIBAAYdrEUK2W7OB4/S +OXeZZuuP3rBVDW4SgyfVIwE5+L6qUSS5ejkCV+k/0l7ExIwZNnN834hnTF8KxON4 +RdmHYrCPFEjDYVecMzFVsCEdsLfDWgsruyyGURHpqamuR0YD3TrAp8bgHNonu8OW +bY4G56Wt5NTbhQrd919JiUTwv9F59+6TnP9cubdt2GHDD2M6TkUNpgQS0hnM578X +zrkiQlakAi+rlC2ZQkH94wxKlm53okBliiAykUmbCOGkLUT/GaQLMoN+MZ7Wv6Ib +nsH8lC0KDcH4T9VGmxjlScIJtxGUMO+dNWx6Kg7E/MSwEasUAJOCqIofRtpDUTr4 +QJNo4eECgYEA32l+vZLm7OMFxhbqGnueiZXbnc/v0kveeFTDt4OWcUozMBez5H3W +AFFILTRADNbvgEAwuK5oC1hEOH5zoRfnaGXcmWayurD8ibK/t23gE5Xf6rL/vCBN +LMS6WoKXXgCKOwQ0Ke5AoaPmca0Iq8bHFmb4pBF9C/0Z1mc1fc+RWxUCgYEAu+xD +w0zhh5Ktob8Q8eNiqVMrSa9jq0MUS1ljx6qCeIGxbuvQARkJxqms3SXwR4JjEwf3 +BAzetYCTFvkqrne9jhoVyZGGS0gLXSG9v3iOaP6GIa51GZwtYhBrzDuGao+UL/Cc +ke4hXpC9S7TSoprW8WWevXVa4dy1kaoFUbrTPkMCgYAxRrx8pcUnZJ9mZLF36+I4 +6IPLGA0GblOAaPnOJUjubfZCWkgEUrj70vG/frHN4y5qND5KzbUHI43QhBuO4Y3Z +2fXBJASx5s2ctX9RvvtYdosv4hFD9j/vaujLg9hNFINopvG2eeVpgZQXaJnsAWjy +CP44ed8B4O5s+tCykjC2TQKBgCMxJqt/TUjnRg7hShoSXBqbkaK17rNW14kYz1/H +5bENkJ3WGVjrSHJkuhOcFDhACa+5sR+YDWjuEB2gQcb0c5IV/niGASE996rUM8WU +nQ66g4HxOsq1/aW8r4NKrmxsQPMNWzTU5HjiICD6VuvOlWwVfLm8LW3YuEP0FBTv +KLojAoGAW2EKM7SnstY4khKnC+029aZNuSy/VE6LDcn+E5vwEUgpBN5UTqOWweTv +krlEbD1uAI6aI0Ybc4jM6uyo5LSBzw1TZRS5u3prLZxyyG10JvRD0/f/QTOI21TS +LubgfTc+LXbvUpv6F29lIxHZcIe9lX7cUzHK3Wwo24QOCsXYeqU= +-----END RSA PRIVATE KEY----- diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/ca.pem b/x-pack/test/smb-fixture/src/main/resources/certs/ca.pem new file mode 100644 index 0000000000000..082882be57f79 --- /dev/null +++ b/x-pack/test/smb-fixture/src/main/resources/certs/ca.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDmzCCAoOgAwIBAgIUdwsnIxjgSneHNVKT6JNCCsrQ3T0wDQYJKoZIhvcNAQEL +BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l +cmF0ZWQgQ0EwHhcNMTgwMjE1MTc0ODQ2WhcNMjEwMjE0MTc0ODQ2WjA0MTIwMAYD +VQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKQAP/hdsWdu3Ck/Zteosity +nmXJTCnkecBYSLtjgYh9rPDFppj9KdsZ7+5P9FvxLv/t4Yw81YI24TrHk0CnMrD/ +QBaXDiPGeT9b6T/gWWVm1zQj2/567gH2UaIkIffy7q09BI9ICXSKDBRXRMLgVR19 +iiJkwWb3b5TVvaQI4M8sEmJIHXei2/cfEKVR5hBprtzeKkvg6o9DXx+nDv2ZEUZ7 +it5pEN5AjD5t0S3ymtlUU5lqnr8er6/Qcrua2EXxE1HyPEkpN/Cwl7tF1ICMdguf +vght5ql1/Pk43VmBMulI/6z5e+7GZ1+x79YA17gabtGJ+onB0zJxgDBj0tto7H8C +AwEAAaOBpDCBoTAdBgNVHQ4EFgQUZo2Y3maL2NoxbbkwRZiC37k6QMEwbwYDVR0j +BGgwZoAUZo2Y3maL2NoxbbkwRZiC37k6QMGhOKQ2MDQxMjAwBgNVBAMTKUVsYXN0 +aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2VuZXJhdGVkIENBghR3CycjGOBKd4c1 +UpPok0IKytDdPTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBf +mkc4bvUR5+We/2rRqCmP4LFnl/LxfbZ9/pUPRdcxuowuK7YfxN8i44VXGpJvLtec +izhA8gvlj6GbYB/GNlHMogqEORbrMlu2o5Cev4HE/pcWpoqtVaDJqI5Hq4763EmJ +p2dXGMmU04H4LtkcCEt3xQfLQ+QIP4Dl2yEsNd248BKSsscCGm9V3vgzFzbdgndo +zUWv9hQCaEsKNtqvnkTqDy2uFjnf+xNoXFr/bI94gvD9HlZHnIC+g0TL5jjtSfCH +gjeXhC2bBKFtlSt4ClIdZTXWievYs6YDRREfaOi4F0757A/gf+hT0fjZ+9WWnUeM +UuvUnl71CNRnJ5JlNKBA +-----END CERTIFICATE----- diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/cert.pem b/x-pack/test/smb-fixture/src/main/resources/certs/cert.pem new file mode 100644 index 0000000000000..34f12d8605184 --- /dev/null +++ b/x-pack/test/smb-fixture/src/main/resources/certs/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDoDCCAoigAwIBAgIUMVGoHuyNTjTFaoRmqFELz75jzDEwDQYJKoZIhvcNAQEL +BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l +cmF0ZWQgQ0EwHhcNMTgwMjE1MTc0OTExWhcNMjEwMjE0MTc0OTExWjARMQ8wDQYD +VQQDEwZzYW1iYTQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtGBwa +n+7JN2vweSUsYh4zPmh8RPIE+nEVjK1lx/rADUBY7UVjfTYC+MVKKiezZe7gYCNT +7JNKazPpgVI9e3ZFKw/UxomLqRuuvn5bTh+1tMs3afY5+GGzi7oPmEbBO3ceg0Hi +rNSTDa1rfroZnRYK8uIeSZacQnAW90plITI7rBBt9jq+W9albFbDybfDgNv+yS/C +rzIsofm4rbFC3SMRYfrT6HvwDhjOmmYKZci5x7tsn0T+3tSiR44Bw5/DgiN5kX3m +/kl9qg1eoYWbCUy1dKmQlb4Nb4uNcxrIugLB3zjBkfhMZ0OHoveKh/lJASTWik9k +xQ9rEYbpsRbuXpsHAgMBAAGjgcwwgckwHQYDVR0OBBYEFJOLa7UXKtLPibgKeFh7 +Kq1+rS0/MG8GA1UdIwRoMGaAFGaNmN5mi9jaMW25MEWYgt+5OkDBoTikNjA0MTIw +MAYDVQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBD +QYIUdwsnIxjgSneHNVKT6JNCCsrQ3T0wLAYDVR0RBCUwI4IJbG9jYWxob3N0hwR/ +AAABhxAAAAAAAAAAAAAAAAAAAAABMAkGA1UdEwQCMAAwDQYJKoZIhvcNAQELBQAD +ggEBAEHqT1WHkcF8DuOgyIBx7wKcUVQ5H1qYYlJ1xgMGrKFFZLUzouLcON7oadEu +HLIJ4Z3AKD3bqWpcls5XJ9MTECGR48tou67x9cXqTV7jR3Rh0H/VGwzwhR85vbpu +o8ielOPL8XAQOfnAFESJii5sfCU4ZwLg+3evmGZdKfhU6rqQtLimgG/Gm96vOJne +y0a/TZTWrfAarithkOHHXSSAhEI5SdW5SlZAytF4AmYqFvafwxe1+NyFwfCRy0Xl +H40WgVsq+z84psU+WyORb3THX5rgB4au9nuMXOqFKAtrJSI/uApncYraaqU28rqB +gYd8XrtjhKOLw+6viqAKu8l7/cs= +-----END CERTIFICATE----- diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/key.pem b/x-pack/test/smb-fixture/src/main/resources/certs/key.pem new file mode 100644 index 0000000000000..4d5c3a2026733 --- /dev/null +++ b/x-pack/test/smb-fixture/src/main/resources/certs/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEArRgcGp/uyTdr8HklLGIeMz5ofETyBPpxFYytZcf6wA1AWO1F +Y302AvjFSions2Xu4GAjU+yTSmsz6YFSPXt2RSsP1MaJi6kbrr5+W04ftbTLN2n2 +Ofhhs4u6D5hGwTt3HoNB4qzUkw2ta366GZ0WCvLiHkmWnEJwFvdKZSEyO6wQbfY6 +vlvWpWxWw8m3w4Db/skvwq8yLKH5uK2xQt0jEWH60+h78A4YzppmCmXIuce7bJ9E +/t7UokeOAcOfw4IjeZF95v5JfaoNXqGFmwlMtXSpkJW+DW+LjXMayLoCwd84wZH4 +TGdDh6L3iof5SQEk1opPZMUPaxGG6bEW7l6bBwIDAQABAoIBAFkjr2Vus3PgHLAs +Ux52MQNGwlwszU4PAymL1sgxokpBCMBDAJbppmUFY+R7rRJQDiJyn/7aOEf8yTEZ +LhcHe7LHKFH1JGRN5DmrVDsFEoNq5bRV1z2nUfk6ncjmLJnaW8/U3Js1Ugug4YwY +KRKDuRROXHAoiW1TMZJCK4fE/q+HZeG/lz110C+GJfhtCH6PzowC4eVPeh1/FExl +TFGRFh2qnN/d5IfGWaZwazTR16OGOoZf/WYydBilcugxQFNx5osR+4nAFdMW0xD2 +x2diukMf+WBjIWlO3vt9GMs8PBQsU2Ix3/+MUxC5MCUafL1GBqUSphB4YWTrNWQ0 +izFEYZECgYEA1Ue8S0xL6Qxy8F1cZj+tvoGlblCTxHL65JeMl6LwMHzuNHX27s8Q +Ax9j8Z2MTWH7IBATmA56CYlA09FWRiIr38k7cNUC8KdjvjPeqHKJeTIhluiBWGLl +AE2XbEkOdjBXXg7ipF0tcBWb+/WrobzJ2T9dpMzZZOnWEB+7wsN2fy0CgYEAz8PG +TJ0u/+2q+RfeAxo0zdC/dwx8T5rJfZD0AiJpEu69oqMZdiX2M1JDabh/CFdYWyAm +AWQdSw0ugeUHeiZ0i2gujtbxDLAhpCsQ1jleJpJm7VWvgCKY7FotmXhVxA8Acmm0 +slv280ezNJMJFKIONuuOtDATdX1b+MXoh5D2A4MCgYAuadwKLuJeJv1kXYzcG4N9 +78zGgvaFS9hZorlPzn+ira1Q8VL5iUocw9oGHJkJxgbWZWk+L/hS1vGqpuW1gX42 +xx4OYeyv3l2QaM1Nrw9Htqckphhv2aWoOTp4sDVbdw2sRGUCC9z1hV5aqI3fNqxe +gLGqSYINue2BuMYtjkfdSQKBgC0uefU3SX1GhiPdWN572HfZqYmOIYp+Mssntqiw +KwF/AaZYqbTT1JKclSRshtOdiw1mFF3BE826dB6zW8joi/e1FErj20/TDb3Rz7uG +hj8FH3UFaUEIRRFBGyGA1cXpLUO0USNodG+7a/FG+HaQN18iIsp0mga22EVlZIf2 +sklZAoGAOJTQtMJfVk5KvqaZWnFou1De5BLnTq7LEvuNHlddIlXG0QQe9QcyB82G +UFqbpBR8QBTFsHdIQEA4LNQE7L1WrKR2Qj087QXHaUnlo7x2WPoSyziPet0nUn3E +gE5dgnkzX/MENKjG90wJFWNiJqz3JXIbVZKZLzkbM+u+X2+oRvs= +-----END RSA PRIVATE KEY----- diff --git a/x-pack/test/smb-fixture/src/main/resources/provision/installsmb.sh b/x-pack/test/smb-fixture/src/main/resources/provision/installsmb.sh new file mode 100644 index 0000000000000..6c7425da3c0b4 --- /dev/null +++ b/x-pack/test/smb-fixture/src/main/resources/provision/installsmb.sh @@ -0,0 +1,95 @@ +#! /bin/bash + +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License; +# you may not use this file except in compliance with the Elastic License. + +set -ex + +MARKER_FILE=/etc/marker + +if [ -f $MARKER_FILE ]; then + echo "Already provisioned..." + exit 0; +fi + +VDIR=/vagrant +RESOURCES=$VDIR/src/main/resources +CERTS_DIR=$RESOURCES/certs +SSL_DIR=/var/lib/samba/private/tls + +# Update package manager +apt-get update -qqy + +# Install krb5 packages +apt-get install -qqy samba ldap-utils + +# install ssl certs +mkdir -p $SSL_DIR +cp $CERTS_DIR/*.pem $SSL_DIR +chmod 600 $SSL_DIR/key.pem + +cat $SSL_DIR/ca.pem >> /etc/ssl/certs/ca-certificates.crt + +mv /etc/samba/smb.conf /etc/samba/smb.conf.orig + +samba-tool domain provision --server-role=dc --use-rfc2307 --dns-backend=SAMBA_INTERNAL --realm=AD.TEST.ELASTICSEARCH.COM --domain=ADES --adminpass=Passw0rd + +cp /var/lib/samba/private/krb5.conf /etc/krb5.conf + +service samba-ad-dc restart + +# Add users +samba-tool user add ironman Passw0rd --surname=Stark --given-name=Tony --job-title=CEO +samba-tool user add hulk Passw0rd --surname=Banner --given-name=Bruce +samba-tool user add phil Passw0rd --surname=Coulson --given-name=Phil +samba-tool user add cap Passw0rd --surname=Rogers --given-name=Steve +samba-tool user add blackwidow Passw0rd --surname=Romanoff --given-name=Natasha +samba-tool user add hawkeye Passw0rd --surname=Barton --given-name=Clint +samba-tool user add Thor Passw0rd +samba-tool user add selvig Passw0rd --surname=Selvig --given-name=Erik +samba-tool user add Odin Passw0rd +samba-tool user add Jarvis Passw0rd +samba-tool user add kraken Passw0rd --surname=Kraken --given-name=Commander +samba-tool user add fury Passw0rd --surname=Fury --given-name=Nick + +# Add groups +samba-tool group add SHIELD +samba-tool group add Avengers +samba-tool group add Supers +samba-tool group add Geniuses +samba-tool group add Playboys +samba-tool group add Philanthropists +samba-tool group add Gods +samba-tool group add Billionaires +samba-tool group add "World Security Council" +samba-tool group add Hydra + +# Group membership +samba-tool group addmembers "SHIELD" Thor,hawkeye,blackwidow,cap,phil,hulk,ironman +samba-tool group addmembers "Avengers" Thor,hawkeye,blackwidow,cap,hulk,ironman +samba-tool group addmembers "Supers" Avengers +samba-tool group addmembers "Geniuses" selvig,hulk,ironman +samba-tool group addmembers "Playboys" ironman +samba-tool group addmembers "Philanthropists" Thor,hulk,ironman +samba-tool group addmembers "Gods" Thor,Odin +samba-tool group addmembers "Billionaires" ironman +samba-tool group addmembers "World Security Council" fury +samba-tool group addmembers "Hydra" kraken + +# update UPN +cat > /tmp/entrymods << EOL +dn: CN=Erik Selvig,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com +changetype: modify +replace: userPrincipalName +userPrincipalName: erik.selvig@ad.test.elasticsearch.com + +dn: CN=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com +changetype: modify +add: seeAlso +seeAlso: CN=Avengers,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com +EOL + +ldapmodify -D Administrator@ad.test.elasticsearch.com -w Passw0rd -H ldaps://127.0.0.1:636 -f /tmp/entrymods -v + +touch $MARKER_FILE diff --git a/x-pack/transport-client/build.gradle b/x-pack/transport-client/build.gradle new file mode 100644 index 0000000000000..31c05569274de --- /dev/null +++ b/x-pack/transport-client/build.gradle @@ -0,0 +1,41 @@ +import org.elasticsearch.gradle.precommit.PrecommitTasks + +apply plugin: 'elasticsearch.build' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + +group = 'org.elasticsearch.client' +archivesBaseName = 'x-pack-transport' + +dependencies { + // this "api" dependency looks weird, but it is correct, as it contains + // all of x-pack for now, and transport client will be going away in the future. + compile "org.elasticsearch.plugin:x-pack-core:${version}" + compile "org.elasticsearch.client:transport:${version}" + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" +} + +dependencyLicenses.enabled = false + +forbiddenApisTest { + // we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to + // be pulled in + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'), + PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')] +} + +namingConventions { + testClass = 'com.carrotsearch.randomizedtesting.RandomizedTest' + //we don't have integration tests + skipIntegTestInDisguise = true +} + +publishing { + publications { + nebula(MavenPublication) { + artifactId = archivesBaseName + } + } +} diff --git a/x-pack/transport-client/src/main/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClient.java b/x-pack/transport-client/src/main/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClient.java new file mode 100644 index 0000000000000..cf4e5db92b00e --- /dev/null +++ b/x-pack/transport-client/src/main/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClient.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.client; + +import io.netty.util.ThreadDeathWatcher; +import io.netty.util.concurrent.GlobalEventExecutor; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.transport.client.PreBuiltTransportClient; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.security.SecurityField; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +/** + * A builder to create an instance of {@link TransportClient} that pre-installs + * all of the plugins installed by the {@link PreBuiltTransportClient} and the + * {@link XPackPlugin} so that the client may be used with an x-pack enabled + * cluster. + * + * @deprecated {@link TransportClient} is deprecated in favour of the high-level REST client and will be removed in Elasticsearch 8.0 + */ +@SuppressWarnings({"unchecked","varargs"}) +@Deprecated +public class PreBuiltXPackTransportClient extends PreBuiltTransportClient { + + @SafeVarargs + public PreBuiltXPackTransportClient(Settings settings, Class... plugins) { + this(settings, Arrays.asList(plugins)); + } + + public PreBuiltXPackTransportClient(Settings settings, Collection> plugins) { + this(settings, plugins, null); + } + + public PreBuiltXPackTransportClient(Settings settings, Collection> plugins, + HostFailureListener hostFailureListener) { + super(settings, addPlugins(plugins, Collections.singletonList(XPackClientPlugin.class)), hostFailureListener); + } + + @Override + public void close() { + super.close(); + if (NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(SecurityField.NAME4)) { + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + try { + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + } +} diff --git a/x-pack/transport-client/src/test/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClientTests.java b/x-pack/transport-client/src/test/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClientTests.java new file mode 100644 index 0000000000000..f9808ce54faac --- /dev/null +++ b/x-pack/transport-client/src/test/java/org/elasticsearch/xpack/client/PreBuiltXPackTransportClientTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.client; + +import com.carrotsearch.randomizedtesting.RandomizedTest; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.security.SecurityField; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +/** + * Unit tests for the {@link PreBuiltXPackTransportClient} + */ +public class PreBuiltXPackTransportClientTests extends RandomizedTest { + + @Test + public void testPluginInstalled() { + try (TransportClient client = new PreBuiltXPackTransportClient(Settings.EMPTY)) { + Settings settings = client.settings(); + assertEquals(SecurityField.NAME4, NetworkModule.TRANSPORT_TYPE_SETTING.get(settings)); + } + } + +} \ No newline at end of file